summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS9
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend75
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-xen_memory77
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci18
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb25
-rw-r--r--Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration12
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-docg334
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff2
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-multitouch9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-isku135
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-wiimote12
-rw-r--r--Documentation/ABI/testing/sysfs-driver-wacom17
-rw-r--r--Documentation/DocBook/debugobjects.tmpl50
-rw-r--r--Documentation/HOWTO4
-rw-r--r--Documentation/RCU/checklist.txt6
-rw-r--r--Documentation/RCU/rcu.txt10
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/torture.txt13
-rw-r--r--Documentation/RCU/trace.txt4
-rw-r--r--Documentation/RCU/whatisRCU.txt19
-rw-r--r--Documentation/arm/memory.txt11
-rw-r--r--Documentation/atomic_ops.txt87
-rw-r--r--Documentation/cgroups/cgroups.txt51
-rw-r--r--Documentation/cgroups/memory.txt28
-rw-r--r--Documentation/cgroups/net_prio.txt53
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/development-process/5.Posting8
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/gic.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/insignal-boards.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/samsung-boards.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/vic.txt29
-rw-r--r--Documentation/devicetree/bindings/c6x/clocks.txt40
-rw-r--r--Documentation/devicetree/bindings/c6x/dscr.txt127
-rw-r--r--Documentation/devicetree/bindings/c6x/emifa.txt62
-rw-r--r--Documentation/devicetree/bindings/c6x/interrupt.txt104
-rw-r--r--Documentation/devicetree/bindings/c6x/soc.txt28
-rw-r--r--Documentation/devicetree/bindings/c6x/timer64.txt26
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl330.txt30
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-samsung.txt40
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-designware.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt58
-rw-r--r--Documentation/devicetree/bindings/input/samsung-keypad.txt88
-rw-r--r--Documentation/devicetree/bindings/input/tegra-kbc.txt18
-rw-r--r--Documentation/devicetree/bindings/mtd/gpio-control-nand.txt44
-rw-r--r--Documentation/devicetree/bindings/net/calxeda-xgmac.txt15
-rw-r--r--Documentation/devicetree/bindings/net/can/cc770.txt53
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt25
-rw-r--r--Documentation/devicetree/bindings/nvec/nvec_nvidia.txt9
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt163
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/srio.txt103
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.txt29
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt54
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt20
-rw-r--r--Documentation/devicetree/bindings/rtc/twl-rtc.txt12
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt10
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/tegra-usb.txt13
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--Documentation/dma-buf-sharing.txt224
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--Documentation/feature-removal-schedule.txt37
-rw-r--r--Documentation/filesystems/Locking8
-rw-r--r--Documentation/filesystems/btrfs.txt4
-rw-r--r--Documentation/filesystems/configfs/configfs.txt2
-rw-r--r--Documentation/filesystems/debugfs.txt56
-rw-r--r--Documentation/filesystems/ext4.txt7
-rw-r--r--Documentation/filesystems/proc.txt39
-rw-r--r--Documentation/filesystems/sysfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt8
-rw-r--r--Documentation/hwmon/pmbus5
-rw-r--r--Documentation/hwmon/zl610015
-rw-r--r--Documentation/input/alps.txt188
-rw-r--r--Documentation/input/gpio-tilt.txt103
-rw-r--r--Documentation/input/sentelic.txt364
-rw-r--r--Documentation/kdump/kdump.txt35
-rw-r--r--Documentation/kernel-parameters.txt70
-rw-r--r--Documentation/lockdep-design.txt63
-rw-r--r--Documentation/md.txt22
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/batman-adv.txt7
-rw-r--r--Documentation/networking/bonding.txt17
-rw-r--r--Documentation/networking/ieee802154.txt27
-rw-r--r--Documentation/networking/ifenslave.c2
-rw-r--r--Documentation/networking/ip-sysctl.txt23
-rw-r--r--Documentation/networking/openvswitch.txt195
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/scaling.txt8
-rw-r--r--Documentation/networking/stmmac.txt16
-rw-r--r--Documentation/networking/team.txt2
-rw-r--r--Documentation/pinctrl.txt258
-rw-r--r--Documentation/power/devices.txt118
-rw-r--r--Documentation/power/freezing-of-tasks.txt39
-rw-r--r--Documentation/power/regulator/regulator.txt2
-rw-r--r--Documentation/power/runtime_pm.txt158
-rw-r--r--Documentation/s390/Debugging390.txt34
-rw-r--r--Documentation/scsi/53c700.txt21
-rw-r--r--Documentation/serial/driver2
-rw-r--r--Documentation/sound/alsa/soc/machine.txt6
-rw-r--r--Documentation/trace/events-kmem.txt12
-rw-r--r--Documentation/trace/events.txt2
-rw-r--r--Documentation/trace/postprocess/trace-pagealloc-postprocess.pl20
-rw-r--r--Documentation/trace/tracepoint-analysis.txt40
-rw-r--r--Documentation/usb/linux-cdc-acm.inf4
-rw-r--r--Documentation/usb/usbmon.txt14
-rw-r--r--Documentation/vgaarbiter.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt41
-rw-r--r--Documentation/watchdog/00-INDEX2
-rw-r--r--Documentation/watchdog/convert_drivers_to_kernel_api.txt19
-rw-r--r--Documentation/watchdog/watchdog-kernel-api.txt10
-rw-r--r--MAINTAINERS248
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/ipcbuf.h29
-rw-r--r--arch/alpha/include/asm/socket.h3
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/include/asm/types.h5
-rw-r--r--arch/arm/Kconfig96
-rw-r--r--arch/arm/Kconfig.debug45
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile3
-rw-r--r--arch/arm/boot/compressed/head.S1
-rw-r--r--arch/arm/boot/dts/at91sam9g20.dtsi7
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi7
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts5
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts137
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts182
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi397
-rw-r--r--arch/arm/boot/dts/highbank.dts12
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts17
-rw-r--r--arch/arm/boot/dts/imx51.dtsi20
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts18
-rw-r--r--arch/arm/boot/dts/imx53-evk.dts17
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts18
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts19
-rw-r--r--arch/arm/boot/dts/imx53.dtsi34
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts (renamed from arch/arm/boot/dts/imx6q-sabreauto.dts)12
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts49
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi34
-rw-r--r--arch/arm/boot/dts/omap2.dtsi67
-rw-r--r--arch/arm/boot/dts/omap3.dtsi31
-rw-r--r--arch/arm/boot/dts/omap4.dtsi28
-rw-r--r--arch/arm/boot/dts/tegra-cardhu.dts36
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts29
-rw-r--r--arch/arm/boot/dts/tegra-paz00.dts77
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts74
-rw-r--r--arch/arm/boot/dts/tegra-trimslice.dts65
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts45
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi71
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi127
-rw-r--r--arch/arm/boot/dts/testcases/tests-phandle.dtsi37
-rw-r--r--arch/arm/boot/dts/testcases/tests.dtsi1
-rw-r--r--arch/arm/boot/dts/usb_a9g20.dts5
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/common/Kconfig6
-rw-r--r--arch/arm/common/gic.c179
-rw-r--r--arch/arm/common/pl330.c136
-rw-r--r--arch/arm/common/timer-sp.c7
-rw-r--r--arch/arm/common/vic.c148
-rw-r--r--arch/arm/configs/at91cap9_defconfig (renamed from arch/arm/configs/at91cap9adk_defconfig)7
-rw-r--r--arch/arm/configs/at91rm9200_defconfig47
-rw-r--r--arch/arm/configs/at91sam9260_defconfig (renamed from arch/arm/configs/at91sam9260ek_defconfig)16
-rw-r--r--arch/arm/configs/at91sam9g20_defconfig (renamed from arch/arm/configs/at91sam9g20ek_defconfig)23
-rw-r--r--arch/arm/configs/at91sam9g45_defconfig7
-rw-r--r--arch/arm/configs/at91sam9rl_defconfig (renamed from arch/arm/configs/at91sam9rlek_defconfig)5
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig13
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig7
-rw-r--r--arch/arm/configs/pcontrol_g20_defconfig175
-rw-r--r--arch/arm/configs/tegra_defconfig9
-rw-r--r--arch/arm/configs/u300_defconfig13
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/assembler.h11
-rw-r--r--arch/arm/include/asm/bug.h1
-rw-r--r--arch/arm/include/asm/cti.h179
-rw-r--r--arch/arm/include/asm/edac.h48
-rw-r--r--arch/arm/include/asm/entry-macro-vic2.S57
-rw-r--r--arch/arm/include/asm/gpio.h4
-rw-r--r--arch/arm/include/asm/hardirq.h17
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-gic.S60
-rw-r--r--arch/arm/include/asm/hardware/gic.h26
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h1
-rw-r--r--arch/arm/include/asm/hardware/vic.h10
-rw-r--r--arch/arm/include/asm/idmap.h14
-rw-r--r--arch/arm/include/asm/ipcbuf.h30
-rw-r--r--arch/arm/include/asm/mach/arch.h11
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/opcodes.h20
-rw-r--r--arch/arm/include/asm/page.h4
-rw-r--r--arch/arm/include/asm/perf_event.h3
-rw-r--r--arch/arm/include/asm/pgalloc.h26
-rw-r--r--arch/arm/include/asm/pgtable-2level.h41
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h77
-rw-r--r--arch/arm/include/asm/pgtable-3level-types.h70
-rw-r--r--arch/arm/include/asm/pgtable-3level.h155
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h4
-rw-r--r--arch/arm/include/asm/pgtable.h55
-rw-r--r--arch/arm/include/asm/pmu.h25
-rw-r--r--arch/arm/include/asm/proc-fns.h21
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/include/asm/prom.h1
-rw-r--r--arch/arm/include/asm/sched_clock.h108
-rw-r--r--arch/arm/include/asm/setup.h6
-rw-r--r--arch/arm/include/asm/socket.h3
-rw-r--r--arch/arm/include/asm/swab.h5
-rw-r--r--arch/arm/include/asm/system.h10
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/tlb.h12
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/include/asm/types.h6
-rw-r--r--arch/arm/include/asm/unwind.h16
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/entry-armv.S9
-rw-r--r--arch/arm/kernel/head.S65
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c27
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c16
-rw-r--r--arch/arm/kernel/kprobes-test.c66
-rw-r--r--arch/arm/kernel/kprobes-test.h100
-rw-r--r--arch/arm/kernel/leds.c21
-rw-r--r--arch/arm/kernel/machine_kexec.c15
-rw-r--r--arch/arm/kernel/opcodes.c72
-rw-r--r--arch/arm/kernel/perf_event.c39
-rw-r--r--arch/arm/kernel/perf_event_v6.c32
-rw-r--r--arch/arm/kernel/perf_event_v7.c401
-rw-r--r--arch/arm/kernel/perf_event_xscale.c16
-rw-r--r--arch/arm/kernel/pmu.c1
-rw-r--r--arch/arm/kernel/process.c86
-rw-r--r--arch/arm/kernel/sched_clock.c118
-rw-r--r--arch/arm/kernel/setup.c30
-rw-r--r--arch/arm/kernel/sleep.S4
-rw-r--r--arch/arm/kernel/smp.c36
-rw-r--r--arch/arm/kernel/smp_twd.c95
-rw-r--r--arch/arm/kernel/suspend.c18
-rw-r--r--arch/arm/kernel/swp_emulate.c16
-rw-r--r--arch/arm/kernel/tcm.c22
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/unwind.c129
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm/lib/Makefile3
-rw-r--r--arch/arm/lib/bitops.h26
-rw-r--r--arch/arm/lib/call_with_stack.S44
-rw-r--r--arch/arm/lib/changebit.S4
-rw-r--r--arch/arm/lib/clearbit.S4
-rw-r--r--arch/arm/lib/setbit.S4
-rw-r--r--arch/arm/lib/testchangebit.S4
-rw-r--r--arch/arm/lib/testclearbit.S4
-rw-r--r--arch/arm/lib/testsetbit.S4
-rw-r--r--arch/arm/mach-at91/Kconfig24
-rw-r--r--arch/arm/mach-at91/at91cap9.c44
-rw-r--r--arch/arm/mach-at91/at91cap9_devices.c49
-rw-r--r--arch/arm/mach-at91/at91rm9200.c28
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c50
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c8
-rw-r--r--arch/arm/mach-at91/at91sam9260.c44
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c57
-rw-r--r--arch/arm/mach-at91/at91sam9261.c34
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c35
-rw-r--r--arch/arm/mach-at91/at91sam9263.c47
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c61
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c38
-rw-r--r--arch/arm/mach-at91/at91sam9_alt_reset.S9
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c48
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c69
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c38
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c43
-rw-r--r--arch/arm/mach-at91/board-1arm.c4
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c10
-rw-r--r--arch/arm/mach-at91/board-cam60.c8
-rw-r--r--arch/arm/mach-at91/board-cap9adk.c21
-rw-r--r--arch/arm/mach-at91/board-carmeva.c9
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c14
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c7
-rw-r--r--arch/arm/mach-at91/board-csb337.c7
-rw-r--r--arch/arm/mach-at91/board-csb637.c4
-rw-r--r--arch/arm/mach-at91/board-dt.c3
-rw-r--r--arch/arm/mach-at91/board-eb9200.c11
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c7
-rw-r--r--arch/arm/mach-at91/board-eco920.c7
-rw-r--r--arch/arm/mach-at91/board-flexibity.c5
-rw-r--r--arch/arm/mach-at91/board-foxg20.c9
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c7
-rw-r--r--arch/arm/mach-at91/board-kafa.c4
-rw-r--r--arch/arm/mach-at91/board-kb9202.c8
-rw-r--r--arch/arm/mach-at91/board-neocore926.c9
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c8
-rw-r--r--arch/arm/mach-at91/board-picotux200.c5
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c18
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c13
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c5
-rw-r--r--arch/arm/mach-at91/board-rsi-ews.c4
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c12
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c16
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c12
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c13
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c8
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c9
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c10
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c16
-rw-r--r--arch/arm/mach-at91/board-usb-a926x.c14
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c9
-rw-r--r--arch/arm/mach-at91/generic.h10
-rw-r--r--arch/arm/mach-at91/gpio.c85
-rw-r--r--arch/arm/mach-at91/include/mach/at91_aic.h48
-rw-r--r--arch/arm/mach-at91/include/mach/at91_dbgu.h2
-rw-r--r--arch/arm/mach-at91/include/mach/at91_pit.h8
-rw-r--r--arch/arm/mach-at91/include/mach/at91_rtc.h24
-rw-r--r--arch/arm/mach-at91/include/mach/at91_shdwc.h16
-rw-r--r--arch/arm/mach-at91/include/mach/at91cap9.h27
-rw-r--r--arch/arm/mach-at91/include/mach/at91rm9200.h14
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9260.h23
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9261.h20
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9263.h33
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9_smc.h17
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9g45.h30
-rw-r--r--arch/arm/mach-at91/include/mach/at91sam9rl.h29
-rw-r--r--arch/arm/mach-at91/include/mach/at91x40.h1
-rw-r--r--arch/arm/mach-at91/include/mach/board.h42
-rw-r--r--arch/arm/mach-at91/include/mach/debug-macro.S10
-rw-r--r--arch/arm/mach-at91/include/mach/entry-macro.S11
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h336
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h12
-rw-r--r--arch/arm/mach-at91/include/mach/io.h8
-rw-r--r--arch/arm/mach-at91/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-at91/include/mach/system.h9
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h2
-rw-r--r--arch/arm/mach-at91/include/mach/timex.h65
-rw-r--r--arch/arm/mach-at91/include/mach/uncompress.h6
-rw-r--r--arch/arm/mach-at91/irq.c38
-rw-r--r--arch/arm/mach-at91/pm.c11
-rw-r--r--arch/arm/mach-at91/sam9_smc.c62
-rw-r--r--arch/arm/mach-at91/sam9_smc.h3
-rw-r--r--arch/arm/mach-at91/setup.c44
-rw-r--r--arch/arm/mach-at91/soc.h1
-rw-r--r--arch/arm/mach-bcmring/arch.c25
-rw-r--r--arch/arm/mach-bcmring/core.c1
-rw-r--r--arch/arm/mach-bcmring/dma.c2
-rw-r--r--arch/arm/mach-bcmring/include/mach/system.h26
-rw-r--r--arch/arm/mach-bcmring/include/mach/vmalloc.h25
-rw-r--r--arch/arm/mach-clps711x/Makefile2
-rw-r--r--arch/arm/mach-clps711x/autcpu12.c1
-rw-r--r--arch/arm/mach-clps711x/cdb89712.c1
-rw-r--r--arch/arm/mach-clps711x/ceiva.c1
-rw-r--r--arch/arm/mach-clps711x/clep7312.c1
-rw-r--r--arch/arm/mach-clps711x/common.c (renamed from arch/arm/mach-clps711x/irq.c)100
-rw-r--r--arch/arm/mach-clps711x/common.h1
-rw-r--r--arch/arm/mach-clps711x/edb7211-arch.c1
-rw-r--r--arch/arm/mach-clps711x/fortunet.c1
-rw-r--r--arch/arm/mach-clps711x/include/mach/system.h5
-rw-r--r--arch/arm/mach-clps711x/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-clps711x/mm.c48
-rw-r--r--arch/arm/mach-clps711x/p720t.c1
-rw-r--r--arch/arm/mach-clps711x/time.c84
-rw-r--r--arch/arm/mach-cns3xxx/cns3420vb.c3
-rw-r--r--arch/arm/mach-cns3xxx/core.h1
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/system.h3
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/vmalloc.h11
-rw-r--r--arch/arm/mach-cns3xxx/pm.c4
-rw-r--r--arch/arm/mach-davinci/Makefile2
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c5
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c3
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/clock.c13
-rw-r--r--arch/arm/mach-davinci/clock.h10
-rw-r--r--arch/arm/mach-davinci/common.c3
-rw-r--r--arch/arm/mach-davinci/da830.c1
-rw-r--r--arch/arm/mach-davinci/da850.c1
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c5
-rw-r--r--arch/arm/mach-davinci/devices.c5
-rw-r--r--arch/arm/mach-davinci/dm355.c1
-rw-r--r--arch/arm/mach-davinci/dm365.c1
-rw-r--r--arch/arm/mach-davinci/dm644x.c5
-rw-r--r--arch/arm/mach-davinci/dm646x.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h53
-rw-r--r--arch/arm/mach-davinci/include/mach/io.h8
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h5
-rw-r--r--arch/arm/mach-davinci/include/mach/system.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/tnetv107x.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/vmalloc.h14
-rw-r--r--arch/arm/mach-davinci/io.c48
-rw-r--r--arch/arm/mach-davinci/psc.c18
-rw-r--r--arch/arm/mach-davinci/tnetv107x.c7
-rw-r--r--arch/arm/mach-dove/addr-map.c121
-rw-r--r--arch/arm/mach-dove/cm-a510.c1
-rw-r--r--arch/arm/mach-dove/common.c32
-rw-r--r--arch/arm/mach-dove/common.h2
-rw-r--r--arch/arm/mach-dove/dove-db-setup.c1
-rw-r--r--arch/arm/mach-dove/include/mach/dove.h2
-rw-r--r--arch/arm/mach-dove/include/mach/system.h19
-rw-r--r--arch/arm/mach-dove/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-dove/pcie.c4
-rw-r--r--arch/arm/mach-ebsa110/core.c8
-rw-r--r--arch/arm/mach-ebsa110/include/mach/system.h2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c3
-rw-r--r--arch/arm/mach-ep93xx/core.c12
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c17
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c3
-rw-r--r--arch/arm/mach-ep93xx/include/mach/entry-macro.S42
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/system.h17
-rw-r--r--arch/arm/mach-ep93xx/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-ep93xx/micro9.c9
-rw-r--r--arch/arm/mach-ep93xx/simone.c3
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c3
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c3
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig38
-rw-r--r--arch/arm/mach-exynos/Makefile15
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c3
-rw-r--r--arch/arm/mach-exynos/clock.c305
-rw-r--r--arch/arm/mach-exynos/common.c699
-rw-r--r--arch/arm/mach-exynos/common.h41
-rw-r--r--arch/arm/mach-exynos/cpu.c298
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-exynos/dev-ohci.c52
-rw-r--r--arch/arm/mach-exynos/dma.c229
-rw-r--r--arch/arm/mach-exynos/include/mach/entry-macro.S75
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h11
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h20
-rw-r--r--arch/arm/mach-exynos/include/mach/ohci.h21
-rw-r--r--arch/arm/mach-exynos/include/mach/spi-clocks.h16
-rw-r--r--arch/arm/mach-exynos/include/mach/system.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/vmalloc.h22
-rw-r--r--arch/arm/mach-exynos/init.c42
-rw-r--r--arch/arm/mach-exynos/irq-combiner.c124
-rw-r--r--arch/arm/mach-exynos/irq-eint.c237
-rw-r--r--arch/arm/mach-exynos/mach-armlex4210.c8
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c85
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c17
-rw-r--r--arch/arm/mach-exynos/mach-origen.c21
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c10
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c27
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c18
-rw-r--r--arch/arm/mach-exynos/mct.c13
-rw-r--r--arch/arm/mach-exynos/platsmp.c28
-rw-r--r--arch/arm/mach-exynos/pm.c34
-rw-r--r--arch/arm/mach-exynos/setup-sdhci.c22
-rw-r--r--arch/arm/mach-exynos/setup-spi.c72
-rw-r--r--arch/arm/mach-exynos/setup-usb-phy.c15
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c3
-rw-r--r--arch/arm/mach-footbridge/common.c27
-rw-r--r--arch/arm/mach-footbridge/common.h1
-rw-r--r--arch/arm/mach-footbridge/ebsa285.c1
-rw-r--r--arch/arm/mach-footbridge/include/mach/system.h56
-rw-r--r--arch/arm/mach-footbridge/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c27
-rw-r--r--arch/arm/mach-footbridge/personal.c1
-rw-r--r--arch/arm/mach-gemini/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-h720x/common.c5
-rw-r--r--arch/arm/mach-h720x/common.h1
-rw-r--r--arch/arm/mach-h720x/h7201-eval.c1
-rw-r--r--arch/arm/mach-h720x/h7202-eval.c1
-rw-r--r--arch/arm/mach-h720x/include/mach/system.h6
-rw-r--r--arch/arm/mach-h720x/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c6
-rw-r--r--arch/arm/mach-highbank/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-highbank/include/mach/system.h2
-rw-r--r--arch/arm/mach-highbank/include/mach/vmalloc.h1
-rw-r--r--arch/arm/mach-highbank/system.c2
-rw-r--r--arch/arm/mach-imx/Kconfig25
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/Makefile.boot3
-rw-r--r--arch/arm/mach-imx/clock-imx35.c20
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c9
-rw-r--r--arch/arm/mach-imx/head-v7.S17
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c11
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c1
-rw-r--r--arch/arm/mach-imx/mach-bug.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c9
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c1
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c68
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c1
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c1
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c1
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c3
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c1
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c1
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c1
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c1
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c1
-rw-r--r--arch/arm/mach-imx/mach-pca100.c1
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c1
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c1
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c1
-rw-r--r--arch/arm/mach-imx/mach-qong.c1
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c1
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c109
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c2
-rw-r--r--arch/arm/mach-imx/src.c30
-rw-r--r--arch/arm/mach-integrator/Kconfig4
-rw-r--r--arch/arm/mach-integrator/common.h1
-rw-r--r--arch/arm/mach-integrator/core.c27
-rw-r--r--arch/arm/mach-integrator/include/mach/system.h11
-rw-r--r--arch/arm/mach-integrator/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c1
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-iop13xx/include/mach/iop13xx.h1
-rw-r--r--arch/arm/mach-iop13xx/include/mach/system.h14
-rw-r--r--arch/arm/mach-iop13xx/include/mach/vmalloc.h4
-rw-r--r--arch/arm/mach-iop13xx/iq81340mc.c1
-rw-r--r--arch/arm/mach-iop13xx/iq81340sc.c1
-rw-r--r--arch/arm/mach-iop13xx/setup.c11
-rw-r--r--arch/arm/mach-iop32x/em7210.c1
-rw-r--r--arch/arm/mach-iop32x/glantank.c1
-rw-r--r--arch/arm/mach-iop32x/include/mach/io.h7
-rw-r--r--arch/arm/mach-iop32x/include/mach/system.h21
-rw-r--r--arch/arm/mach-iop32x/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-iop32x/iq31244.c2
-rw-r--r--arch/arm/mach-iop32x/iq80321.c1
-rw-r--r--arch/arm/mach-iop32x/n2100.c9
-rw-r--r--arch/arm/mach-iop33x/include/mach/io.h7
-rw-r--r--arch/arm/mach-iop33x/include/mach/system.h10
-rw-r--r--arch/arm/mach-iop33x/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-iop33x/iq80331.c1
-rw-r--r--arch/arm/mach-iop33x/iq80332.c1
-rw-r--r--arch/arm/mach-ixp2000/core.c4
-rw-r--r--arch/arm/mach-ixp2000/enp2611.c1
-rw-r--r--arch/arm/mach-ixp2000/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ixp2000/include/mach/system.h35
-rw-r--r--arch/arm/mach-ixp2000/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-ixp2000/ixdp2400.c1
-rw-r--r--arch/arm/mach-ixp2000/ixdp2800.c1
-rw-r--r--arch/arm/mach-ixp2000/ixdp2x01.c32
-rw-r--r--arch/arm/mach-ixp23xx/core.c6
-rw-r--r--arch/arm/mach-ixp23xx/espresso.c1
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/io.h29
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/system.h17
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-ixp23xx/ixdp2351.c12
-rw-r--r--arch/arm/mach-ixp23xx/roadrunner.c1
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c36
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c1
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/system.h25
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/omixp-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/addr-map.c137
-rw-r--r--arch/arm/mach-kirkwood/common.c35
-rw-r--r--arch/arm/mach-kirkwood/common.h2
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/dockstar-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/guruplug-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/io.h25
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/system.h19
-rw-r--r--arch/arm/mach-kirkwood/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-kirkwood/mpp.c1
-rw-r--r--arch/arm/mach-kirkwood/mpp.h1
-rw-r--r--arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c3
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/openrd-setup.c3
-rw-r--r--arch/arm/mach-kirkwood/pcie.c4
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/sheevaplug-setup.c8
-rw-r--r--arch/arm/mach-kirkwood/t5325-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/ts219-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c1
-rw-r--r--arch/arm/mach-ks8695/board-acs5k.c1
-rw-r--r--arch/arm/mach-ks8695/board-dsm320.c1
-rw-r--r--arch/arm/mach-ks8695/board-micrel.c1
-rw-r--r--arch/arm/mach-ks8695/generic.h1
-rw-r--r--arch/arm/mach-ks8695/include/mach/system.h21
-rw-r--r--arch/arm/mach-ks8695/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-ks8695/irq.c2
-rw-r--r--arch/arm/mach-ks8695/time.c18
-rw-r--r--arch/arm/mach-lpc32xx/common.c20
-rw-r--r--arch/arm/mach-lpc32xx/common.h2
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/system.h25
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/vmalloc.h24
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c3
-rw-r--r--arch/arm/mach-mmp/aspenite.c7
-rw-r--r--arch/arm/mach-mmp/avengers_lite.c2
-rw-r--r--arch/arm/mach-mmp/brownstone.c2
-rw-r--r--arch/arm/mach-mmp/common.c5
-rw-r--r--arch/arm/mach-mmp/common.h1
-rw-r--r--arch/arm/mach-mmp/flint.c6
-rw-r--r--arch/arm/mach-mmp/gplugd.c4
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h5
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio.h7
-rw-r--r--arch/arm/mach-mmp/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa168.h3
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa910.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/system.h10
-rw-r--r--arch/arm/mach-mmp/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-mmp/jasper.c1
-rw-r--r--arch/arm/mach-mmp/mmp2.c39
-rw-r--r--arch/arm/mach-mmp/pxa168.c45
-rw-r--r--arch/arm/mach-mmp/pxa910.c40
-rw-r--r--arch/arm/mach-mmp/tavorevb.c7
-rw-r--r--arch/arm/mach-mmp/teton_bga.c4
-rw-r--r--arch/arm/mach-mmp/time.c16
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c9
-rw-r--r--arch/arm/mach-msm/Kconfig36
-rw-r--r--arch/arm/mach-msm/board-msm8960.c2
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c4
-rw-r--r--arch/arm/mach-msm/board-sapphire.c2
-rw-r--r--arch/arm/mach-msm/devices-iommu.c1
-rw-r--r--arch/arm/mach-msm/include/mach/debug-macro.S51
-rw-r--r--arch/arm/mach-msm/include/mach/entry-macro-qgic.S17
-rw-r--r--arch/arm/mach-msm/include/mach/entry-macro-vic.S37
-rw-r--r--arch/arm/mach-msm/include/mach/entry-macro.S27
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h12
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h12
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8960.h5
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h12
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x60.h5
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h12
-rw-r--r--arch/arm/mach-msm/include/mach/system.h8
-rw-r--r--arch/arm/mach-msm/include/mach/uncompress.h39
-rw-r--r--arch/arm/mach-msm/io.c15
-rw-r--r--arch/arm/mach-msm/platsmp.c2
-rw-r--r--arch/arm/mach-msm/smd_debug.c2
-rw-r--r--arch/arm/mach-msm/timer.c347
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c102
-rw-r--r--arch/arm/mach-mv78xx0/buffalo-wxl-setup.c1
-rw-r--r--arch/arm/mach-mv78xx0/common.c38
-rw-r--r--arch/arm/mach-mv78xx0/common.h2
-rw-r--r--arch/arm/mach-mv78xx0/db78x00-bp-setup.c1
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/system.h19
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c1
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c4
-rw-r--r--arch/arm/mach-mv78xx0/rd78x00-masa-setup.c1
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c1
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-mx5/board-mx50_rdp.c1
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c1
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c3
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c3
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c1
-rw-r--r--arch/arm/mach-mx5/board-mx53_ard.c1
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c3
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c3
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c3
-rw-r--r--arch/arm/mach-mx5/cpu.c5
-rw-r--r--arch/arm/mach-mx5/imx51-dt.c13
-rw-r--r--arch/arm/mach-mx5/imx53-dt.c13
-rw-r--r--arch/arm/mach-mx5/mm.c23
-rw-r--r--arch/arm/mach-mx5/system.c3
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c10
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c50
-rw-r--r--arch/arm/mach-mxs/clock.c33
-rw-r--r--arch/arm/mach-mxs/devices-mx28.h3
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-saif.c5
-rw-r--r--arch/arm/mach-mxs/include/mach/common.h2
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h4
-rw-r--r--arch/arm/mach-mxs/include/mach/digctl.h21
-rw-r--r--arch/arm/mach-mxs/include/mach/mx28.h4
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h1
-rw-r--r--arch/arm/mach-mxs/include/mach/system.h2
-rw-r--r--arch/arm/mach-mxs/include/mach/vmalloc.h22
-rw-r--r--arch/arm/mach-mxs/mach-m28evk.c3
-rw-r--r--arch/arm/mach-mxs/mach-mx23evk.c1
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c21
-rw-r--r--arch/arm/mach-mxs/mach-stmp378x_devb.c3
-rw-r--r--arch/arm/mach-mxs/mach-tx28.c1
-rw-r--r--arch/arm/mach-mxs/module-tx28.c4
-rw-r--r--arch/arm/mach-mxs/system.c6
-rw-r--r--arch/arm/mach-mxs/timer.c2
-rw-r--r--arch/arm/mach-netx/generic.c5
-rw-r--r--arch/arm/mach-netx/generic.h1
-rw-r--r--arch/arm/mach-netx/include/mach/entry-macro.S13
-rw-r--r--arch/arm/mach-netx/include/mach/system.h10
-rw-r--r--arch/arm/mach-netx/nxdb500.c3
-rw-r--r--arch/arm/mach-netx/nxdkn.c3
-rw-r--r--arch/arm/mach-netx/nxeb500hmi.c3
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c5
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c12
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.h4
-rw-r--r--arch/arm/mach-nomadik/include/mach/entry-macro.S30
-rw-r--r--arch/arm/mach-nomadik/include/mach/setup.h3
-rw-r--r--arch/arm/mach-nomadik/include/mach/system.h13
-rw-r--r--arch/arm/mach-nomadik/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/Kconfig72
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c13
-rw-r--r--arch/arm/mach-omap1/board-fsample.c3
-rw-r--r--arch/arm/mach-omap1/board-generic.c3
-rw-r--r--arch/arm/mach-omap1/board-h2.c3
-rw-r--r--arch/arm/mach-omap1/board-h3.c3
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c3
-rw-r--r--arch/arm/mach-omap1/board-innovator.c3
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c3
-rw-r--r--arch/arm/mach-omap1/board-osk.c3
-rw-r--r--arch/arm/mach-omap1/board-palmte.c3
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c3
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c3
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c3
-rw-r--r--arch/arm/mach-omap1/board-sx1.c3
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c8
-rw-r--r--arch/arm/mach-omap1/clock.c14
-rw-r--r--arch/arm/mach-omap1/clock.h6
-rw-r--r--arch/arm/mach-omap1/clock_data.c64
-rw-r--r--arch/arm/mach-omap1/common.h62
-rw-r--r--arch/arm/mach-omap1/devices.c5
-rw-r--r--arch/arm/mach-omap1/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-omap1/io.c1
-rw-r--r--arch/arm/mach-omap1/opp.h1
-rw-r--r--arch/arm/mach-omap1/opp_data.c63
-rw-r--r--arch/arm/mach-omap1/reset.c5
-rw-r--r--arch/arm/mach-omap1/time.c60
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig42
-rw-r--r--arch/arm/mach-omap2/Makefile25
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c104
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c81
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c4
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c26
-rw-r--r--arch/arm/mach-omap2/board-apollon.c4
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c88
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c4
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c4
-rw-r--r--arch/arm/mach-omap2/board-generic.c9
-rw-r--r--arch/arm/mach-omap2/board-h4.c4
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c6
-rw-r--r--arch/arm/mach-omap2/board-ldp.c4
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c16
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c6
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c4
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c74
-rw-r--r--arch/arm/mach-omap2/board-overo.c4
-rw-r--r--arch/arm/mach-omap2/board-rm680.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c50
-rw-r--r--arch/arm/mach-omap2/board-rx51.c4
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c46
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom.c6
-rw-r--r--arch/arm/mach-omap2/clock.c2
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c43
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c19
-rw-r--r--arch/arm/mach-omap2/cm2xxx_3xxx.c2
-rw-r--r--arch/arm/mach-omap2/cm44xx.c2
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c2
-rw-r--r--arch/arm/mach-omap2/common.c50
-rw-r--r--arch/arm/mach-omap2/common.h239
-rw-r--r--arch/arm/mach-omap2/control.c2
-rw-r--r--arch/arm/mach-omap2/control.h8
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c23
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c245
-rw-r--r--arch/arm/mach-omap2/devices.c29
-rw-r--r--arch/arm/mach-omap2/display.c160
-rw-r--r--arch/arm/mach-omap2/display.h29
-rw-r--r--arch/arm/mach-omap2/hsmmc.c59
-rw-r--r--arch/arm/mach-omap2/hsmmc.h1
-rw-r--r--arch/arm/mach-omap2/i2c.c2
-rw-r--r--arch/arm/mach-omap2/id.c54
-rw-r--r--arch/arm/mach-omap2/include/mach/barriers.h (renamed from arch/arm/mach-netx/include/mach/vmalloc.h)24
-rw-r--r--arch/arm/mach-omap2/include/mach/debug-macro.S12
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S137
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-secure.h57
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-wakeupgen.h39
-rw-r--r--arch/arm/mach-omap2/include/mach/omap4-common.h43
-rw-r--r--arch/arm/mach-omap2/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-omap2/io.c55
-rw-r--r--arch/arm/mach-omap2/io.h0
-rw-r--r--arch/arm/mach-omap2/irq.c53
-rw-r--r--arch/arm/mach-omap2/mcbsp.c6
-rw-r--r--arch/arm/mach-omap2/mux.c89
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S5
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c17
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c398
-rw-r--r--arch/arm/mach-omap2/omap-secure.c81
-rw-r--r--arch/arm/mach-omap2/omap-smc.S (renamed from arch/arm/mach-omap2/omap44xx-smc.S)23
-rw-r--r--arch/arm/mach-omap2/omap-smp.c48
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c389
-rw-r--r--arch/arm/mach-omap2/omap4-common.c99
-rw-r--r--arch/arm/mach-omap2/omap4-sar-layout.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c231
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c17
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c429
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c241
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c2
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c35
-rw-r--r--arch/arm/mach-omap2/opp2xxx.h2
-rw-r--r--arch/arm/mach-omap2/pm.c8
-rw-r--r--arch/arm/mach-omap2/pm.h1
-rw-r--r--arch/arm/mach-omap2/pm24xx.c21
-rw-r--r--arch/arm/mach-omap2/pm34xx.c159
-rw-r--r--arch/arm/mach-omap2/pm44xx.c155
-rw-r--r--arch/arm/mach-omap2/prcm-common.h77
-rw-r--r--arch/arm/mach-omap2/prcm.c7
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.c2
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c99
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.h9
-rw-r--r--arch/arm/mach-omap2/prm44xx.c118
-rw-r--r--arch/arm/mach-omap2/prm44xx.h8
-rw-r--r--arch/arm/mach-omap2/prm_common.c320
-rw-r--r--arch/arm/mach-omap2/prminst44xx.c2
-rw-r--r--arch/arm/mach-omap2/sdram-nokia.c27
-rw-r--r--arch/arm/mach-omap2/sdrc.c2
-rw-r--r--arch/arm/mach-omap2/sdrc2xxx.c2
-rw-r--r--arch/arm/mach-omap2/serial.c909
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S379
-rw-r--r--arch/arm/mach-omap2/smartreflex.c4
-rw-r--r--arch/arm/mach-omap2/timer.c22
-rw-r--r--arch/arm/mach-omap2/twl-common.c11
-rw-r--r--arch/arm/mach-omap2/twl-common.h3
-rw-r--r--arch/arm/mach-omap2/usb-host.c100
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-omap2/vc3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/vc44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/voltage.c2
-rw-r--r--arch/arm/mach-omap2/voltagedomains3xxx_data.c42
-rw-r--r--arch/arm/mach-omap2/voltagedomains44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/vp.c2
-rw-r--r--arch/arm/mach-omap2/vp3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/vp44xx_data.c2
-rw-r--r--arch/arm/mach-orion5x/addr-map.c146
-rw-r--r--arch/arm/mach-orion5x/common.c35
-rw-r--r--arch/arm/mach-orion5x/common.h4
-rw-r--r--arch/arm/mach-orion5x/d2net-setup.c2
-rw-r--r--arch/arm/mach-orion5x/db88f5281-setup.c1
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c1
-rw-r--r--arch/arm/mach-orion5x/edmini_v2-setup.c1
-rw-r--r--arch/arm/mach-orion5x/include/mach/io.h25
-rw-r--r--arch/arm/mach-orion5x/include/mach/orion5x.h2
-rw-r--r--arch/arm/mach-orion5x/include/mach/system.h14
-rw-r--r--arch/arm/mach-orion5x/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-orion5x/kurobox_pro-setup.c2
-rw-r--r--arch/arm/mach-orion5x/ls-chl-setup.c3
-rw-r--r--arch/arm/mach-orion5x/ls_hgl-setup.c3
-rw-r--r--arch/arm/mach-orion5x/lsmini-setup.c3
-rw-r--r--arch/arm/mach-orion5x/mpp.c1
-rw-r--r--arch/arm/mach-orion5x/mss2-setup.c3
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c3
-rw-r--r--arch/arm/mach-orion5x/net2big-setup.c1
-rw-r--r--arch/arm/mach-orion5x/pci.c5
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c1
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c1
-rw-r--r--arch/arm/mach-orion5x/rd88f5182-setup.c1
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c1
-rw-r--r--arch/arm/mach-orion5x/terastation_pro2-setup.c1
-rw-r--r--arch/arm/mach-orion5x/ts209-setup.c3
-rw-r--r--arch/arm/mach-orion5x/ts409-setup.c1
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c1
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c1
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c1
-rw-r--r--arch/arm/mach-picoxcell/Makefile1
-rw-r--r--arch/arm/mach-picoxcell/common.c61
-rw-r--r--arch/arm/mach-picoxcell/common.h1
-rw-r--r--arch/arm/mach-picoxcell/include/mach/entry-macro.S11
-rw-r--r--arch/arm/mach-picoxcell/include/mach/irqs.h9
-rw-r--r--arch/arm/mach-picoxcell/include/mach/memory.h1
-rw-r--r--arch/arm/mach-picoxcell/include/mach/system.h5
-rw-r--r--arch/arm/mach-picoxcell/include/mach/vmalloc.h14
-rw-r--r--arch/arm/mach-picoxcell/io.c32
-rw-r--r--arch/arm/mach-picoxcell/time.c17
-rw-r--r--arch/arm/mach-pnx4008/core.c6
-rw-r--r--arch/arm/mach-pnx4008/include/mach/system.h9
-rw-r--r--arch/arm/mach-pnx4008/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-prima2/common.h1
-rw-r--r--arch/arm/mach-prima2/include/mach/map.h6
-rw-r--r--arch/arm/mach-prima2/include/mach/system.h12
-rw-r--r--arch/arm/mach-prima2/include/mach/vmalloc.h16
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/prima2.c2
-rw-r--r--arch/arm/mach-prima2/rstc.c7
-rw-r--r--arch/arm/mach-pxa/am200epd.c4
-rw-r--r--arch/arm/mach-pxa/am300epd.c4
-rw-r--r--arch/arm/mach-pxa/balloon3.c6
-rw-r--r--arch/arm/mach-pxa/capc7117.c15
-rw-r--r--arch/arm/mach-pxa/cm-x270.c4
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c5
-rw-r--r--arch/arm/mach-pxa/cm-x300.c7
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c8
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c5
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c7
-rw-r--r--arch/arm/mach-pxa/corgi.c10
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c21
-rw-r--r--arch/arm/mach-pxa/csb726.c1
-rw-r--r--arch/arm/mach-pxa/devices.c50
-rw-r--r--arch/arm/mach-pxa/devices.h1
-rw-r--r--arch/arm/mach-pxa/em-x270.c8
-rw-r--r--arch/arm/mach-pxa/eseries.c10
-rw-r--r--arch/arm/mach-pxa/ezx.c6
-rw-r--r--arch/arm/mach-pxa/generic.h2
-rw-r--r--arch/arm/mach-pxa/gumstix.c3
-rw-r--r--arch/arm/mach-pxa/h5000.c1
-rw-r--r--arch/arm/mach-pxa/himalaya.c1
-rw-r--r--arch/arm/mach-pxa/hx4700.c19
-rw-r--r--arch/arm/mach-pxa/icontrol.c11
-rw-r--r--arch/arm/mach-pxa/idp.c5
-rw-r--r--arch/arm/mach-pxa/include/mach/balloon3.h6
-rw-r--r--arch/arm/mach-pxa/include/mach/corgi.h26
-rw-r--r--arch/arm/mach-pxa/include/mach/csb726.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/entry-macro.S36
-rw-r--r--arch/arm/mach-pxa/include/mach/gpio-pxa.h133
-rw-r--r--arch/arm/mach-pxa/include/mach/gpio.h20
-rw-r--r--arch/arm/mach-pxa/include/mach/gumstix.h20
-rw-r--r--arch/arm/mach-pxa/include/mach/hx4700.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/idp.h16
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/littleton.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/magician.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/palmld.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/palmt5.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtc.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtx.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm027.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm990_baseboard.h14
-rw-r--r--arch/arm/mach-pxa/include/mach/poodle.h26
-rw-r--r--arch/arm/mach-pxa/include/mach/spitz.h40
-rw-r--r--arch/arm/mach-pxa/include/mach/system.h8
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h54
-rw-r--r--arch/arm/mach-pxa/include/mach/trizeps4.h16
-rw-r--r--arch/arm/mach-pxa/include/mach/vmalloc.h11
-rw-r--r--arch/arm/mach-pxa/irq.c61
-rw-r--r--arch/arm/mach-pxa/littleton.c7
-rw-r--r--arch/arm/mach-pxa/lpd270.c5
-rw-r--r--arch/arm/mach-pxa/lubbock.c5
-rw-r--r--arch/arm/mach-pxa/magician.c9
-rw-r--r--arch/arm/mach-pxa/mainstone.c5
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c6
-rw-r--r--arch/arm/mach-pxa/mioa701.c44
-rw-r--r--arch/arm/mach-pxa/mp900.c1
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmld.c3
-rw-r--r--arch/arm/mach-pxa/palmt5.c3
-rw-r--r--arch/arm/mach-pxa/palmtc.c5
-rw-r--r--arch/arm/mach-pxa/palmte2.c3
-rw-r--r--arch/arm/mach-pxa/palmtreo.c2
-rw-r--r--arch/arm/mach-pxa/palmtx.c3
-rw-r--r--arch/arm/mach-pxa/palmz72.c3
-rw-r--r--arch/arm/mach-pxa/pcm027.c1
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c6
-rw-r--r--arch/arm/mach-pxa/poodle.c15
-rw-r--r--arch/arm/mach-pxa/pxa25x.c7
-rw-r--r--arch/arm/mach-pxa/pxa27x.c7
-rw-r--r--arch/arm/mach-pxa/pxa300.c1
-rw-r--r--arch/arm/mach-pxa/pxa320.c1
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c9
-rw-r--r--arch/arm/mach-pxa/pxa95x.c6
-rw-r--r--arch/arm/mach-pxa/raumfeld.c11
-rw-r--r--arch/arm/mach-pxa/reset.c7
-rw-r--r--arch/arm/mach-pxa/saar.c7
-rw-r--r--arch/arm/mach-pxa/saarb.c3
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c24
-rw-r--r--arch/arm/mach-pxa/spitz.c11
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c11
-rw-r--r--arch/arm/mach-pxa/stargate2.c28
-rw-r--r--arch/arm/mach-pxa/tavorevb.c5
-rw-r--r--arch/arm/mach-pxa/tavorevb3.c3
-rw-r--r--arch/arm/mach-pxa/time.c15
-rw-r--r--arch/arm/mach-pxa/tosa.c9
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c13
-rw-r--r--arch/arm/mach-pxa/vpac270.c15
-rw-r--r--arch/arm/mach-pxa/xcep.c1
-rw-r--r--arch/arm/mach-pxa/z2.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c21
-rw-r--r--arch/arm/mach-pxa/zylonite.c5
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c4
-rw-r--r--arch/arm/mach-realview/Kconfig9
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-realview/core.h1
-rw-r--r--arch/arm/mach-realview/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-realview/include/mach/system.h17
-rw-r--r--arch/arm/mach-realview/include/mach/vmalloc.h21
-rw-r--r--arch/arm/mach-realview/realview_eb.c12
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c8
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c8
-rw-r--r--arch/arm/mach-realview/realview_pba8.c8
-rw-r--r--arch/arm/mach-realview/realview_pbx.c12
-rw-r--r--arch/arm/mach-rpc/include/mach/system.h14
-rw-r--r--arch/arm/mach-rpc/include/mach/vmalloc.h10
-rw-r--r--arch/arm/mach-rpc/riscpc.c12
-rw-r--r--arch/arm/mach-s3c2410/bast-irq.c2
-rw-r--r--arch/arm/mach-s3c2410/common.h17
-rw-r--r--arch/arm/mach-s3c2410/cpu-freq.c26
-rw-r--r--arch/arm/mach-s3c2410/dma.c28
-rw-r--r--arch/arm/mach-s3c2410/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/reset.h22
-rw-r--r--arch/arm/mach-s3c2410/include/mach/system-reset.h32
-rw-r--r--arch/arm/mach-s3c2410/include/mach/system.h4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c3
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c24
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-n30.c4
-rw-r--r--arch/arm/mach-s3c2410/mach-otom.c3
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c7
-rw-r--r--arch/arm/mach-s3c2410/mach-smdk2410.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-tct_hammer.c3
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c26
-rw-r--r--arch/arm/mach-s3c2410/pll.c20
-rw-r--r--arch/arm/mach-s3c2410/pm.c36
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c43
-rw-r--r--arch/arm/mach-s3c2412/clock.c9
-rw-r--r--arch/arm/mach-s3c2412/cpu-freq.c13
-rw-r--r--arch/arm/mach-s3c2412/dma.c12
-rw-r--r--arch/arm/mach-s3c2412/irq.c12
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c2
-rw-r--r--arch/arm/mach-s3c2412/mach-smdk2413.c3
-rw-r--r--arch/arm/mach-s3c2412/mach-vstms.c1
-rw-r--r--arch/arm/mach-s3c2412/pm.c12
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c25
-rw-r--r--arch/arm/mach-s3c2416/Makefile1
-rw-r--r--arch/arm/mach-s3c2416/clock.c68
-rw-r--r--arch/arm/mach-s3c2416/irq.c12
-rw-r--r--arch/arm/mach-s3c2416/mach-smdk2416.c2
-rw-r--r--arch/arm/mach-s3c2416/pm.c12
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c22
-rw-r--r--arch/arm/mach-s3c2416/setup-sdhci.c24
-rw-r--r--arch/arm/mach-s3c2440/clock.c59
-rw-r--r--arch/arm/mach-s3c2440/common.h17
-rw-r--r--arch/arm/mach-s3c2440/dma.c12
-rw-r--r--arch/arm/mach-s3c2440/irq.c12
-rw-r--r--arch/arm/mach-s3c2440/mach-anubis.c25
-rw-r--r--arch/arm/mach-s3c2440/mach-at2440evb.c25
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c2
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c21
-rw-r--r--arch/arm/mach-s3c2440/mach-nexcoder.c3
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c27
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c23
-rw-r--r--arch/arm/mach-s3c2440/mach-rx3715.c24
-rw-r--r--arch/arm/mach-s3c2440/mach-smdk2440.c3
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-cpufreq.c22
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-pll-12000000.c20
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-pll-16934400.c24
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c21
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c17
-rw-r--r--arch/arm/mach-s3c2440/s3c244x-clock.c19
-rw-r--r--arch/arm/mach-s3c2440/s3c244x-irq.c20
-rw-r--r--arch/arm/mach-s3c2440/s3c244x.c16
-rw-r--r--arch/arm/mach-s3c2443/clock.c2
-rw-r--r--arch/arm/mach-s3c2443/dma.c12
-rw-r--r--arch/arm/mach-s3c2443/irq.c12
-rw-r--r--arch/arm/mach-s3c2443/mach-smdk2443.c1
-rw-r--r--arch/arm/mach-s3c2443/s3c2443.c23
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig15
-rw-r--r--arch/arm/mach-s3c64xx/Makefile65
-rw-r--r--arch/arm/mach-s3c64xx/clock.c247
-rw-r--r--arch/arm/mach-s3c64xx/common.c385
-rw-r--r--arch/arm/mach-s3c64xx/common.h56
-rw-r--r--arch/arm/mach-s3c64xx/cpu.c161
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c179
-rw-r--r--arch/arm/mach-s3c64xx/dma.c23
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/crag6410.h6
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/entry-macro.S7
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/map.h2
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/system.h11
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-s3c64xx/irq-eint.c213
-rw-r--r--arch/arm/mach-s3c64xx/irq.c47
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c58
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c47
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c5
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c5
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c6
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c6
-rw-r--r--arch/arm/mach-s3c64xx/pm.c189
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c20
-rw-r--r--arch/arm/mach-s3c64xx/s3c6410.c21
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-sdhci.c24
-rw-r--r--arch/arm/mach-s3c64xx/setup-spi.c45
-rw-r--r--arch/arm/mach-s5p64x0/Kconfig31
-rw-r--r--arch/arm/mach-s5p64x0/Makefile12
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c170
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c158
-rw-r--r--arch/arm/mach-s5p64x0/clock.c6
-rw-r--r--arch/arm/mach-s5p64x0/common.c447
-rw-r--r--arch/arm/mach-s5p64x0/common.h57
-rw-r--r--arch/arm/mach-s5p64x0/cpu.c215
-rw-r--r--arch/arm/mach-s5p64x0/dev-spi.c224
-rw-r--r--arch/arm/mach-s5p64x0/dma.c227
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/entry-macro.S7
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/map.h2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/system.h2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-s5p64x0/init.c73
-rw-r--r--arch/arm/mach-s5p64x0/irq-eint.c155
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6440.c33
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6450.c34
-rw-r--r--arch/arm/mach-s5p64x0/pm.c10
-rw-r--r--arch/arm/mach-s5p64x0/setup-sdhci-gpio.c104
-rw-r--r--arch/arm/mach-s5p64x0/setup-spi.c55
-rw-r--r--arch/arm/mach-s5pc100/Kconfig5
-rw-r--r--arch/arm/mach-s5pc100/Makefile28
-rw-r--r--arch/arm/mach-s5pc100/clock.c290
-rw-r--r--arch/arm/mach-s5pc100/common.c (renamed from arch/arm/mach-s5pc100/cpu.c)118
-rw-r--r--arch/arm/mach-s5pc100/common.h37
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c227
-rw-r--r--arch/arm/mach-s5pc100/dma.c247
-rw-r--r--arch/arm/mach-s5pc100/include/mach/entry-macro.S25
-rw-r--r--arch/arm/mach-s5pc100/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h3
-rw-r--r--arch/arm/mach-s5pc100/include/mach/system.h2
-rw-r--r--arch/arm/mach-s5pc100/include/mach/vmalloc.h17
-rw-r--r--arch/arm/mach-s5pc100/init.c24
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c8
-rw-r--r--arch/arm/mach-s5pc100/setup-sdhci.c23
-rw-r--r--arch/arm/mach-s5pc100/setup-spi.c65
-rw-r--r--arch/arm/mach-s5pv210/Kconfig5
-rw-r--r--arch/arm/mach-s5pv210/Makefile20
-rw-r--r--arch/arm/mach-s5pv210/clock.c329
-rw-r--r--arch/arm/mach-s5pv210/common.c (renamed from arch/arm/mach-s5pv210/cpu.c)107
-rw-r--r--arch/arm/mach-s5pv210/common.h37
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c175
-rw-r--r--arch/arm/mach-s5pv210/dma.c241
-rw-r--r--arch/arm/mach-s5pv210/include/mach/entry-macro.S37
-rw-r--r--arch/arm/mach-s5pv210/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/system.h2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/vmalloc.h22
-rw-r--r--arch/arm/mach-s5pv210/init.c44
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c11
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c14
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkc110.c10
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c21
-rw-r--r--arch/arm/mach-s5pv210/mach-torbreck.c8
-rw-r--r--arch/arm/mach-s5pv210/pm.c10
-rw-r--r--arch/arm/mach-s5pv210/setup-sdhci.c22
-rw-r--r--arch/arm/mach-s5pv210/setup-spi.c51
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot4
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-sa1100/badge4.c1
-rw-r--r--arch/arm/mach-sa1100/cerf.c1
-rw-r--r--arch/arm/mach-sa1100/clock.c91
-rw-r--r--arch/arm/mach-sa1100/collie.c1
-rw-r--r--arch/arm/mach-sa1100/generic.c31
-rw-r--r--arch/arm/mach-sa1100/generic.h1
-rw-r--r--arch/arm/mach-sa1100/h3100.c1
-rw-r--r--arch/arm/mach-sa1100/h3600.c1
-rw-r--r--arch/arm/mach-sa1100/hackkit.c1
-rw-r--r--arch/arm/mach-sa1100/include/mach/system.h13
-rw-r--r--arch/arm/mach-sa1100/include/mach/vmalloc.h4
-rw-r--r--arch/arm/mach-sa1100/jornada720.c1
-rw-r--r--arch/arm/mach-sa1100/lart.c1
-rw-r--r--arch/arm/mach-sa1100/nanoengine.c2
-rw-r--r--arch/arm/mach-sa1100/pleb.c1
-rw-r--r--arch/arm/mach-sa1100/shannon.c1
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-sa1100/time.c28
-rw-r--r--arch/arm/mach-shark/core.c4
-rw-r--r--arch/arm/mach-shark/include/mach/system.h3
-rw-r--r--arch/arm/mach-shark/include/mach/vmalloc.h4
-rw-r--r--arch/arm/mach-shmobile/Makefile1
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c5
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c2
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c2
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c141
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c18
-rw-r--r--arch/arm/mach-shmobile/entry-gic.S18
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h5
-rw-r--r--arch/arm/mach-shmobile/include/mach/entry-macro.S9
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h6
-rw-r--r--arch/arm/mach-shmobile/include/mach/system.h2
-rw-r--r--arch/arm/mach-shmobile/include/mach/vmalloc.h7
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c50
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c196
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c6
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S21
-rw-r--r--arch/arm/mach-spear3xx/include/mach/entry-macro.S27
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear3xx/spear300_evb.c3
-rw-r--r--arch/arm/mach-spear3xx/spear310_evb.c3
-rw-r--r--arch/arm/mach-spear3xx/spear320_evb.c3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/entry-macro.S36
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear6xx/spear600_evb.c3
-rw-r--r--arch/arm/mach-tcc8k/Kconfig11
-rw-r--r--arch/arm/mach-tcc8k/Makefile9
-rw-r--r--arch/arm/mach-tcc8k/Makefile.boot3
-rw-r--r--arch/arm/mach-tcc8k/board-tcc8000-sdk.c81
-rw-r--r--arch/arm/mach-tcc8k/clock.c580
-rw-r--r--arch/arm/mach-tcc8k/common.h15
-rw-r--r--arch/arm/mach-tcc8k/devices.c239
-rw-r--r--arch/arm/mach-tcc8k/io.c62
-rw-r--r--arch/arm/mach-tcc8k/irq.c111
-rw-r--r--arch/arm/mach-tcc8k/time.c134
-rw-r--r--arch/arm/mach-tegra/Kconfig31
-rw-r--r--arch/arm/mach-tegra/Makefile36
-rw-r--r--arch/arm/mach-tegra/Makefile.boot3
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c (renamed from arch/arm/mach-tegra/board-dt.c)65
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c63
-rw-r--r--arch/arm/mach-tegra/board-harmony-pcie.c9
-rw-r--r--arch/arm/mach-tegra/board-harmony-pinmux.c23
-rw-r--r--arch/arm/mach-tegra/board-harmony.c5
-rw-r--r--arch/arm/mach-tegra/board-paz00-pinmux.c25
-rw-r--r--arch/arm/mach-tegra/board-paz00.c32
-rw-r--r--arch/arm/mach-tegra/board-paz00.h3
-rw-r--r--arch/arm/mach-tegra/board-pinmux.c104
-rw-r--r--arch/arm/mach-tegra/board-pinmux.h38
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c122
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c13
-rw-r--r--arch/arm/mach-tegra/board-trimslice-pinmux.c27
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c5
-rw-r--r--arch/arm/mach-tegra/board.h5
-rw-r--r--arch/arm/mach-tegra/clock.c25
-rw-r--r--arch/arm/mach-tegra/clock.h4
-rw-r--r--arch/arm/mach-tegra/common.c55
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/entry-macro.S36
-rw-r--r--arch/arm/mach-tegra/include/mach/io.h6
-rw-r--r--arch/arm/mach-tegra/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h1
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux-tegra20.h (renamed from arch/arm/mach-tegra/include/mach/pinmux-t2.h)6
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux-tegra30.h320
-rw-r--r--arch/arm/mach-tegra/include/mach/pinmux.h88
-rw-r--r--arch/arm/mach-tegra/include/mach/system.h4
-rw-r--r--arch/arm/mach-tegra/io.c21
-rw-r--r--arch/arm/mach-tegra/irq.c14
-rw-r--r--arch/arm/mach-tegra/pcie.c1
-rw-r--r--arch/arm/mach-tegra/pinmux-tegra20-tables.c (renamed from arch/arm/mach-tegra/pinmux-t2-tables.c)24
-rw-r--r--arch/arm/mach-tegra/pinmux-tegra30-tables.c376
-rw-r--r--arch/arm/mach-tegra/pinmux.c153
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c19
-rw-r--r--arch/arm/mach-tegra/timer.c42
-rw-r--r--arch/arm/mach-u300/Kconfig4
-rw-r--r--arch/arm/mach-u300/core.c34
-rw-r--r--arch/arm/mach-u300/include/mach/entry-macro.S24
-rw-r--r--arch/arm/mach-u300/include/mach/gpio-u300.h115
-rw-r--r--arch/arm/mach-u300/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-u300/include/mach/memory.h19
-rw-r--r--arch/arm/mach-u300/include/mach/platform.h1
-rw-r--r--arch/arm/mach-u300/include/mach/system.h28
-rw-r--r--arch/arm/mach-u300/include/mach/vmalloc.h12
-rw-r--r--arch/arm/mach-u300/mmc.c2
-rw-r--r--arch/arm/mach-u300/timer.c15
-rw-r--r--arch/arm/mach-u300/u300-gpio.h114
-rw-r--r--arch/arm/mach-u300/u300.c5
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c88
-rw-r--r--arch/arm/mach-ux500/board-mop500.c6
-rw-r--r--arch/arm/mach-ux500/board-mop500.h63
-rw-r--r--arch/arm/mach-ux500/board-u5500.c2
-rw-r--r--arch/arm/mach-ux500/clock.c207
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c46
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c35
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c10
-rw-r--r--arch/arm/mach-ux500/id.c6
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h4
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-ux500/include/mach/gpio.h5
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h10
-rw-r--r--arch/arm/mach-ux500/include/mach/id.h24
-rw-r--r--arch/arm/mach-ux500/include/mach/system.h5
-rw-r--r--arch/arm/mach-ux500/include/mach/vmalloc.h18
-rw-r--r--arch/arm/mach-versatile/core.c19
-rw-r--r--arch/arm/mach-versatile/core.h1
-rw-r--r--arch/arm/mach-versatile/include/mach/entry-macro.S30
-rw-r--r--arch/arm/mach-versatile/include/mach/system.h16
-rw-r--r--arch/arm/mach-versatile/include/mach/vmalloc.h21
-rw-r--r--arch/arm/mach-versatile/versatile_ab.c4
-rw-r--r--arch/arm/mach-versatile/versatile_dt.c3
-rw-r--r--arch/arm/mach-versatile/versatile_pb.c4
-rw-r--r--arch/arm/mach-vexpress/Kconfig2
-rw-r--r--arch/arm/mach-vexpress/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-vexpress/include/mach/system.h4
-rw-r--r--arch/arm/mach-vexpress/include/mach/vmalloc.h21
-rw-r--r--arch/arm/mach-vexpress/v2m.c6
-rw-r--r--arch/arm/mach-vt8500/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mach-w90x900/cpu.c16
-rw-r--r--arch/arm/mach-w90x900/include/mach/system.h22
-rw-r--r--arch/arm/mach-w90x900/include/mach/vmalloc.h23
-rw-r--r--arch/arm/mach-w90x900/irq.c4
-rw-r--r--arch/arm/mach-w90x900/mach-nuc910evb.c1
-rw-r--r--arch/arm/mach-w90x900/mach-nuc950evb.c1
-rw-r--r--arch/arm/mach-w90x900/mach-nuc960evb.c1
-rw-r--r--arch/arm/mach-w90x900/nuc910.h9
-rw-r--r--arch/arm/mach-w90x900/nuc950.h9
-rw-r--r--arch/arm/mach-w90x900/nuc960.h9
-rw-r--r--arch/arm/mach-w90x900/nuc9xx.h24
-rw-r--r--arch/arm/mach-w90x900/time.c2
-rw-r--r--arch/arm/mach-zynq/common.c1
-rw-r--r--arch/arm/mach-zynq/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-zynq/include/mach/system.h5
-rw-r--r--arch/arm/mach-zynq/include/mach/vmalloc.h20
-rw-r--r--arch/arm/mm/Kconfig40
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/context.c19
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/fault.c169
-rw-r--r--arch/arm/mm/fault.h27
-rw-r--r--arch/arm/mm/fsr-2level.c78
-rw-r--r--arch/arm/mm/fsr-3level.c68
-rw-r--r--arch/arm/mm/idmap.c93
-rw-r--r--arch/arm/mm/init.c44
-rw-r--r--arch/arm/mm/ioremap.c119
-rw-r--r--arch/arm/mm/mm.h14
-rw-r--r--arch/arm/mm/mmap.c196
-rw-r--r--arch/arm/mm/mmu.c97
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/mm/pgd.c51
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm6_7.S4
-rw-r--r--arch/arm/mm/proc-arm720.S3
-rw-r--r--arch/arm/mm/proc-arm740.S3
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S3
-rw-r--r--arch/arm/mm/proc-arm920.S3
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S3
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S3
-rw-r--r--arch/arm/mm/proc-fa526.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S3
-rw-r--r--arch/arm/mm/proc-macros.S5
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-sa110.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S3
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S171
-rw-r--r--arch/arm/mm/proc-v7-3level.S150
-rw-r--r--arch/arm/mm/proc-v7.S196
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/arm/mm/proc-xscale.S3
-rw-r--r--arch/arm/nwfpe/entry.S8
-rw-r--r--arch/arm/nwfpe/fpopcode.c26
-rw-r--r--arch/arm/nwfpe/fpopcode.h3
-rw-r--r--arch/arm/oprofile/common.c2
-rw-r--r--arch/arm/plat-iop/Makefile4
-rw-r--r--arch/arm/plat-iop/io.c59
-rw-r--r--arch/arm/plat-iop/restart.c19
-rw-r--r--arch/arm/plat-iop/time.c16
-rw-r--r--arch/arm/plat-mxc/Kconfig1
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/cpufreq.c3
-rw-r--r--arch/arm/plat-mxc/gic.c41
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h12
-rw-r--r--arch/arm/plat-mxc/include/mach/entry-macro.S11
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx25.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx1.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h16
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h9
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h1
-rw-r--r--arch/arm/plat-mxc/include/mach/vmalloc.h22
-rw-r--r--arch/arm/plat-mxc/pwm.c16
-rw-r--r--arch/arm/plat-mxc/system.c14
-rw-r--r--arch/arm/plat-mxc/time.c15
-rw-r--r--arch/arm/plat-mxc/tzic.c40
-rw-r--r--arch/arm/plat-nomadik/timer.c20
-rw-r--r--arch/arm/plat-omap/Makefile3
-rw-r--r--arch/arm/plat-omap/common.c11
-rw-r--r--arch/arm/plat-omap/counter_32k.c40
-rw-r--r--arch/arm/plat-omap/dma.c22
-rw-r--r--arch/arm/plat-omap/include/plat/am33xx.h25
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h6
-rw-r--r--arch/arm/plat-omap/include/plat/common.h84
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h56
-rw-r--r--arch/arm/plat-omap/include/plat/hardware.h3
-rw-r--r--arch/arm/plat-omap/include/plat/io.h88
-rw-r--r--arch/arm/plat-omap/include/plat/iommu.h31
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h12
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h16
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h1
-rw-r--r--arch/arm/plat-omap/include/plat/omap-secure.h13
-rw-r--r--arch/arm/plat-omap/include/plat/omap-serial.h37
-rw-r--r--arch/arm/plat-omap/include/plat/omap34xx.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h1
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h6
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h27
-rw-r--r--arch/arm/plat-omap/include/plat/sram.h6
-rw-r--r--arch/arm/plat-omap/include/plat/system.h2
-rw-r--r--arch/arm/plat-omap/include/plat/ti81xx.h (renamed from arch/arm/plat-omap/include/plat/ti816x.h)18
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h11
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h35
-rw-r--r--arch/arm/plat-omap/io.c159
-rw-r--r--arch/arm/plat-omap/sram.c17
-rw-r--r--arch/arm/plat-orion/Makefile2
-rw-r--r--arch/arm/plat-orion/addr-map.c174
-rw-r--r--arch/arm/plat-orion/common.c43
-rw-r--r--arch/arm/plat-orion/gpio.c6
-rw-r--r--arch/arm/plat-orion/include/plat/addr-map.h53
-rw-r--r--arch/arm/plat-orion/include/plat/audio.h3
-rw-r--r--arch/arm/plat-orion/include/plat/common.h17
-rw-r--r--arch/arm/plat-orion/include/plat/ehci-orion.h1
-rw-r--r--arch/arm/plat-orion/include/plat/mv_xor.h6
-rw-r--r--arch/arm/plat-orion/include/plat/mvsdio.h1
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c6
-rw-r--r--arch/arm/plat-orion/time.c21
-rw-r--r--arch/arm/plat-pxa/include/plat/gpio-pxa.h44
-rw-r--r--arch/arm/plat-pxa/include/plat/gpio.h30
-rw-r--r--arch/arm/plat-s3c24xx/common-smdk.c2
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq-debugfs.c2
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq.c2
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c23
-rw-r--r--arch/arm/plat-s3c24xx/dma.c3
-rw-r--r--arch/arm/plat-s3c24xx/irq.c2
-rw-r--r--arch/arm/plat-s3c24xx/pm-simtec.c1
-rw-r--r--arch/arm/plat-s3c24xx/s3c2410-clock.c2
-rw-r--r--arch/arm/plat-s3c24xx/s3c2412-iotiming.c2
-rw-r--r--arch/arm/plat-s3c24xx/s3c2443-clock.c39
-rw-r--r--arch/arm/plat-s5p/Kconfig1
-rw-r--r--arch/arm/plat-s5p/Makefile1
-rw-r--r--arch/arm/plat-s5p/clock.c2
-rw-r--r--arch/arm/plat-s5p/cpu.c144
-rw-r--r--arch/arm/plat-s5p/irq-eint.c2
-rw-r--r--arch/arm/plat-s5p/s5p-time.c19
-rw-r--r--arch/arm/plat-s5p/sysmmu.c1
-rw-r--r--arch/arm/plat-samsung/Kconfig24
-rw-r--r--arch/arm/plat-samsung/clock-clksrc.c2
-rw-r--r--arch/arm/plat-samsung/clock.c2
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c1
-rw-r--r--arch/arm/plat-samsung/devs.c128
-rw-r--r--arch/arm/plat-samsung/dma-ops.c15
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu-freq-core.h25
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h34
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h9
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-s3c24xx.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/exynos4.h35
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/irqs.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/keypad.h27
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h45
-rw-r--r--arch/arm/plat-samsung/include/plat/reset.h16
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c2412.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c2416.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c2443.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c6400.h36
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c6410.h29
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h24
-rw-r--r--arch/arm/plat-samsung/include/plat/s5p6440.h36
-rw-r--r--arch/arm/plat-samsung/include/plat/s5p6450.h36
-rw-r--r--arch/arm/plat-samsung/include/plat/s5pc100.h33
-rw-r--r--arch/arm/plat-samsung/include/plat/s5pv210.h33
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h75
-rw-r--r--arch/arm/plat-samsung/include/plat/system-reset.h31
-rw-r--r--arch/arm/plat-samsung/include/plat/udc.h15
-rw-r--r--arch/arm/plat-samsung/include/plat/watchdog-reset.h1
-rw-r--r--arch/arm/plat-samsung/pd.c2
-rw-r--r--arch/arm/plat-samsung/pm-gpio.c2
-rw-r--r--arch/arm/plat-samsung/pwm.c2
-rw-r--r--arch/arm/plat-samsung/wakeup-mask.c2
-rw-r--r--arch/arm/plat-spear/Makefile2
-rw-r--r--arch/arm/plat-spear/include/plat/system.h15
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h19
-rw-r--r--arch/arm/plat-spear/restart.c27
-rw-r--r--arch/arm/plat-tcc/Kconfig20
-rw-r--r--arch/arm/plat-tcc/Makefile3
-rw-r--r--arch/arm/plat-tcc/clock.c179
-rw-r--r--arch/arm/plat-tcc/include/mach/clock.h48
-rw-r--r--arch/arm/plat-tcc/include/mach/debug-macro.S32
-rw-r--r--arch/arm/plat-tcc/include/mach/entry-macro.S68
-rw-r--r--arch/arm/plat-tcc/include/mach/hardware.h43
-rw-r--r--arch/arm/plat-tcc/include/mach/io.h23
-rw-r--r--arch/arm/plat-tcc/include/mach/irqs.h83
-rw-r--r--arch/arm/plat-tcc/include/mach/system.h31
-rw-r--r--arch/arm/plat-tcc/include/mach/tcc8k-regs.h807
-rw-r--r--arch/arm/plat-tcc/include/mach/timex.h5
-rw-r--r--arch/arm/plat-tcc/include/mach/uncompress.h34
-rw-r--r--arch/arm/plat-tcc/include/mach/vmalloc.h10
-rw-r--r--arch/arm/plat-tcc/system.c25
-rw-r--r--arch/arm/plat-versatile/sched-clock.c29
-rw-r--r--arch/arm/tools/mach-types714
-rw-r--r--arch/avr32/boards/atngw100/setup.c2
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c2
-rw-r--r--arch/avr32/boards/favr-32/setup.c2
-rw-r--r--arch/avr32/boards/hammerhead/setup.c2
-rw-r--r--arch/avr32/boards/merisc/merisc_sysfs.c1
-rw-r--r--arch/avr32/boards/merisc/setup.c2
-rw-r--r--arch/avr32/boards/mimc200/setup.c2
-rw-r--r--arch/avr32/include/asm/ipcbuf.h30
-rw-r--r--arch/avr32/include/asm/socket.h3
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/avr32/include/asm/types.h6
-rw-r--r--arch/avr32/kernel/cpu.c74
-rw-r--r--arch/avr32/kernel/irq.c2
-rw-r--r--arch/avr32/kernel/process.c6
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c8
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h7
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-AD7160-EVAL_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT-SMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig2
-rw-r--r--arch/blackfin/configs/DNP5370_defconfig2
-rw-r--r--arch/blackfin/configs/H8606_defconfig2
-rw-r--r--arch/blackfin/configs/IP0X_defconfig2
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig2
-rw-r--r--arch/blackfin/configs/SRV1_defconfig2
-rw-r--r--arch/blackfin/configs/TCM-BF518_defconfig2
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig2
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h3
-rw-r--r--arch/blackfin/include/asm/cpu.h3
-rw-r--r--arch/blackfin/include/asm/smp.h5
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/time-ts.c8
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c10
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c6
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c6
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c4
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c179
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c6
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c78
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c6
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c6
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c73
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c4
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c101
-rw-r--r--arch/blackfin/mach-bf561/include/mach/pll.h4
-rw-r--r--arch/blackfin/mach-bf561/smp.c5
-rw-r--r--arch/blackfin/mach-common/smp.c61
-rw-r--r--arch/c6x/Kconfig174
-rw-r--r--arch/c6x/Makefile60
-rw-r--r--arch/c6x/boot/Makefile30
-rw-r--r--arch/c6x/boot/dts/dsk6455.dts62
-rw-r--r--arch/c6x/boot/dts/evmc6457.dts48
-rw-r--r--arch/c6x/boot/dts/evmc6472.dts73
-rw-r--r--arch/c6x/boot/dts/evmc6474.dts58
-rw-r--r--arch/c6x/boot/dts/tms320c6455.dtsi96
-rw-r--r--arch/c6x/boot/dts/tms320c6457.dtsi68
-rw-r--r--arch/c6x/boot/dts/tms320c6472.dtsi134
-rw-r--r--arch/c6x/boot/dts/tms320c6474.dtsi89
-rw-r--r--arch/c6x/boot/linked_dtb.S2
-rw-r--r--arch/c6x/configs/dsk6455_defconfig44
-rw-r--r--arch/c6x/configs/evmc6457_defconfig41
-rw-r--r--arch/c6x/configs/evmc6472_defconfig42
-rw-r--r--arch/c6x/configs/evmc6474_defconfig42
-rw-r--r--arch/c6x/include/asm/Kbuild54
-rw-r--r--arch/c6x/include/asm/asm-offsets.h1
-rw-r--r--arch/c6x/include/asm/bitops.h105
-rw-r--r--arch/c6x/include/asm/byteorder.h12
-rw-r--r--arch/c6x/include/asm/cache.h90
-rw-r--r--arch/c6x/include/asm/cacheflush.h65
-rw-r--r--arch/c6x/include/asm/checksum.h34
-rw-r--r--arch/c6x/include/asm/clkdev.h22
-rw-r--r--arch/c6x/include/asm/clock.h148
-rw-r--r--arch/c6x/include/asm/delay.h67
-rw-r--r--arch/c6x/include/asm/dma-mapping.h91
-rw-r--r--arch/c6x/include/asm/dscr.h34
-rw-r--r--arch/c6x/include/asm/elf.h113
-rw-r--r--arch/c6x/include/asm/ftrace.h6
-rw-r--r--arch/c6x/include/asm/hardirq.h20
-rw-r--r--arch/c6x/include/asm/irq.h302
-rw-r--r--arch/c6x/include/asm/irqflags.h72
-rw-r--r--arch/c6x/include/asm/linkage.h30
-rw-r--r--arch/c6x/include/asm/megamod-pic.h9
-rw-r--r--arch/c6x/include/asm/mmu.h18
-rw-r--r--arch/c6x/include/asm/module.h33
-rw-r--r--arch/c6x/include/asm/mutex.h6
-rw-r--r--arch/c6x/include/asm/page.h11
-rw-r--r--arch/c6x/include/asm/pgtable.h81
-rw-r--r--arch/c6x/include/asm/processor.h132
-rw-r--r--arch/c6x/include/asm/procinfo.h28
-rw-r--r--arch/c6x/include/asm/prom.h1
-rw-r--r--arch/c6x/include/asm/ptrace.h174
-rw-r--r--arch/c6x/include/asm/sections.h12
-rw-r--r--arch/c6x/include/asm/setup.h32
-rw-r--r--arch/c6x/include/asm/sigcontext.h80
-rw-r--r--arch/c6x/include/asm/signal.h17
-rw-r--r--arch/c6x/include/asm/soc.h35
-rw-r--r--arch/c6x/include/asm/string.h21
-rw-r--r--arch/c6x/include/asm/swab.h54
-rw-r--r--arch/c6x/include/asm/syscall.h123
-rw-r--r--arch/c6x/include/asm/syscalls.h55
-rw-r--r--arch/c6x/include/asm/system.h168
-rw-r--r--arch/c6x/include/asm/thread_info.h121
-rw-r--r--arch/c6x/include/asm/timer64.h6
-rw-r--r--arch/c6x/include/asm/timex.h33
-rw-r--r--arch/c6x/include/asm/tlb.h8
-rw-r--r--arch/c6x/include/asm/traps.h36
-rw-r--r--arch/c6x/include/asm/uaccess.h107
-rw-r--r--arch/c6x/include/asm/unaligned.h170
-rw-r--r--arch/c6x/include/asm/unistd.h26
-rw-r--r--arch/c6x/kernel/Makefile12
-rw-r--r--arch/c6x/kernel/asm-offsets.c123
-rw-r--r--arch/c6x/kernel/c6x_ksyms.c66
-rw-r--r--arch/c6x/kernel/devicetree.c53
-rw-r--r--arch/c6x/kernel/dma.c153
-rw-r--r--arch/c6x/kernel/entry.S803
-rw-r--r--arch/c6x/kernel/head.S84
-rw-r--r--arch/c6x/kernel/irq.c728
-rw-r--r--arch/c6x/kernel/module.c123
-rw-r--r--arch/c6x/kernel/process.c265
-rw-r--r--arch/c6x/kernel/ptrace.c187
-rw-r--r--arch/c6x/kernel/setup.c510
-rw-r--r--arch/c6x/kernel/signal.c377
-rw-r--r--arch/c6x/kernel/soc.c91
-rw-r--r--arch/c6x/kernel/switch_to.S74
-rw-r--r--arch/c6x/kernel/sys_c6x.c74
-rw-r--r--arch/c6x/kernel/time.c65
-rw-r--r--arch/c6x/kernel/traps.c423
-rw-r--r--arch/c6x/kernel/vectors.S81
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S162
-rw-r--r--arch/c6x/lib/Makefile7
-rw-r--r--arch/c6x/lib/checksum.c36
-rw-r--r--arch/c6x/lib/csum_64plus.S419
-rw-r--r--arch/c6x/lib/divi.S53
-rw-r--r--arch/c6x/lib/divremi.S46
-rw-r--r--arch/c6x/lib/divremu.S87
-rw-r--r--arch/c6x/lib/divu.S98
-rw-r--r--arch/c6x/lib/llshl.S37
-rw-r--r--arch/c6x/lib/llshr.S38
-rw-r--r--arch/c6x/lib/llshru.S38
-rw-r--r--arch/c6x/lib/memcpy_64plus.S46
-rw-r--r--arch/c6x/lib/mpyll.S49
-rw-r--r--arch/c6x/lib/negll.S31
-rw-r--r--arch/c6x/lib/pop_rts.S32
-rw-r--r--arch/c6x/lib/push_rts.S31
-rw-r--r--arch/c6x/lib/remi.S64
-rw-r--r--arch/c6x/lib/remu.S82
-rw-r--r--arch/c6x/lib/strasgi.S89
-rw-r--r--arch/c6x/lib/strasgi_64plus.S39
-rw-r--r--arch/c6x/mm/Makefile5
-rw-r--r--arch/c6x/mm/dma-coherent.c143
-rw-r--r--arch/c6x/mm/init.c113
-rw-r--r--arch/c6x/platforms/Kconfig16
-rw-r--r--arch/c6x/platforms/Makefile12
-rw-r--r--arch/c6x/platforms/cache.c445
-rw-r--r--arch/c6x/platforms/dscr.c598
-rw-r--r--arch/c6x/platforms/emif.c87
-rw-r--r--arch/c6x/platforms/megamod-pic.c349
-rw-r--r--arch/c6x/platforms/platform.c17
-rw-r--r--arch/c6x/platforms/pll.c444
-rw-r--r--arch/c6x/platforms/plldata.c404
-rw-r--r--arch/c6x/platforms/timer64.c244
-rw-r--r--arch/cris/Kconfig.debug1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c7
-rw-r--r--arch/cris/arch-v32/kernel/time.c4
-rw-r--r--arch/cris/include/asm/ipcbuf.h30
-rw-r--r--arch/cris/include/asm/socket.h3
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/types.h6
-rw-r--r--arch/frv/Kconfig10
-rw-r--r--arch/frv/include/asm/ipcbuf.h31
-rw-r--r--arch/frv/include/asm/socket.h3
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/frv/include/asm/types.h6
-rw-r--r--arch/h8300/include/asm/ipcbuf.h30
-rw-r--r--arch/h8300/include/asm/socket.h3
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/h8300/include/asm/types.h17
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/include/asm/cputime.h71
-rw-r--r--arch/ia64/include/asm/iommu.h2
-rw-r--r--arch/ia64/include/asm/ipcbuf.h29
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/types.h2
-rw-r--r--arch/ia64/include/asm/xen/interface.h2
-rw-r--r--arch/ia64/kernel/err_inject.c52
-rw-r--r--arch/ia64/kernel/pci-dma.c1
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/setup.c19
-rw-r--r--arch/ia64/kernel/topology.c10
-rw-r--r--arch/ia64/kvm/kvm-ia64.c12
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/sn/kernel/irq.c5
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c3
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c4
-rw-r--r--arch/m32r/include/asm/ipcbuf.h30
-rw-r--r--arch/m32r/include/asm/socket.h3
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/types.h6
-rw-r--r--arch/m68k/Kconfig17
-rw-r--r--arch/m68k/Kconfig.cpu78
-rw-r--r--arch/m68k/Kconfig.debug27
-rw-r--r--arch/m68k/Kconfig.devices46
-rw-r--r--arch/m68k/Kconfig.machine4
-rw-r--r--arch/m68k/atari/ataints.c2
-rw-r--r--arch/m68k/atari/debug.c1
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig3
-rw-r--r--arch/m68k/configs/multi_defconfig5
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/emu/nfeth.c2
-rw-r--r--arch/m68k/hp300/config.c3
-rw-r--r--arch/m68k/include/asm/anchor.h112
-rw-r--r--arch/m68k/include/asm/atarihw.h2
-rw-r--r--arch/m68k/include/asm/atomic.h10
-rw-r--r--arch/m68k/include/asm/blinken.h8
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h88
-rw-r--r--arch/m68k/include/asm/checksum.h31
-rw-r--r--arch/m68k/include/asm/div64.h8
-rw-r--r--arch/m68k/include/asm/elf.h6
-rw-r--r--arch/m68k/include/asm/entry.h10
-rw-r--r--arch/m68k/include/asm/fpu.h2
-rw-r--r--arch/m68k/include/asm/gpio.h3
-rw-r--r--arch/m68k/include/asm/ipcbuf.h30
-rw-r--r--arch/m68k/include/asm/irq.h5
-rw-r--r--arch/m68k/include/asm/m54xxacr.h32
-rw-r--r--arch/m68k/include/asm/mac_baboon.h6
-rw-r--r--arch/m68k/include/asm/mac_iop.h2
-rw-r--r--arch/m68k/include/asm/mac_oss.h23
-rw-r--r--arch/m68k/include/asm/mac_psc.h4
-rw-r--r--arch/m68k/include/asm/mac_via.h9
-rw-r--r--arch/m68k/include/asm/macintosh.h10
-rw-r--r--arch/m68k/include/asm/macints.h6
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h102
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h425
-rw-r--r--arch/m68k/include/asm/mcfmmu.h112
-rw-r--r--arch/m68k/include/asm/mmu_context.h250
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h1
-rw-r--r--arch/m68k/include/asm/page.h10
-rw-r--r--arch/m68k/include/asm/page_no.h3
-rw-r--r--arch/m68k/include/asm/page_offset.h10
-rw-r--r--arch/m68k/include/asm/pgalloc.h4
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h30
-rw-r--r--arch/m68k/include/asm/processor.h18
-rw-r--r--arch/m68k/include/asm/segment.h30
-rw-r--r--arch/m68k/include/asm/serial.h2
-rw-r--r--arch/m68k/include/asm/setup.h14
-rw-r--r--arch/m68k/include/asm/sigcontext.h4
-rw-r--r--arch/m68k/include/asm/socket.h3
-rw-r--r--arch/m68k/include/asm/thread_info.h34
-rw-r--r--arch/m68k/include/asm/tlbflush.h23
-rw-r--r--arch/m68k/include/asm/traps.h1
-rw-r--r--arch/m68k/include/asm/types.h6
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h42
-rw-r--r--arch/m68k/include/asm/ucontext.h4
-rw-r--r--arch/m68k/include/asm/unistd.h14
-rw-r--r--arch/m68k/kernel/Makefile21
-rw-r--r--arch/m68k/kernel/asm-offsets.c3
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68k/kernel/entry_mm.S31
-rw-r--r--arch/m68k/kernel/entry_no.S9
-rw-r--r--arch/m68k/kernel/head.S117
-rw-r--r--arch/m68k/kernel/init_task.c3
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68k/kernel/process_mm.c75
-rw-r--r--arch/m68k/kernel/ptrace_mm.c18
-rw-r--r--arch/m68k/kernel/setup_mm.c22
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/signal_mm.c204
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/m68k/kernel/time_no.c3
-rw-r--r--arch/m68k/kernel/traps.c104
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds (renamed from arch/m68k/kernel/vmlinux.lds_no.S)8
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/kernel/vmlinux.lds.S15
-rw-r--r--arch/m68k/kernel/vmlinux.lds_mm.S10
-rw-r--r--arch/m68k/lib/Makefile10
-rw-r--r--arch/m68k/lib/checksum.c (renamed from arch/m68k/lib/checksum_mm.c)0
-rw-r--r--arch/m68k/lib/checksum_no.c156
-rw-r--r--arch/m68k/lib/uaccess.c22
-rw-r--r--arch/m68k/mac/baboon.c41
-rw-r--r--arch/m68k/mac/config.c93
-rw-r--r--arch/m68k/mac/iop.c8
-rw-r--r--arch/m68k/mac/macints.c197
-rw-r--r--arch/m68k/mac/oss.c157
-rw-r--r--arch/m68k/mac/psc.c17
-rw-r--r--arch/m68k/mac/via.c255
-rw-r--r--arch/m68k/mm/Makefile8
-rw-r--r--arch/m68k/mm/cache.c24
-rw-r--r--arch/m68k/mm/init_mm.c36
-rw-r--r--arch/m68k/mm/kmap.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c198
-rw-r--r--arch/m68k/mm/memory.c8
-rw-r--r--arch/m68k/mvme16x/config.c160
-rw-r--r--arch/m68k/platform/54xx/config.c47
-rw-r--r--arch/m68k/platform/68328/Makefile6
-rw-r--r--arch/m68k/platform/68328/bootlogo.h2
-rw-r--r--arch/m68k/platform/68328/bootlogo.pl10
-rw-r--r--arch/m68k/platform/68328/config.c3
-rw-r--r--arch/m68k/platform/68328/head-pilot.S19
-rw-r--r--arch/m68k/platform/68328/head-rom.S9
-rw-r--r--arch/m68k/platform/68328/timers.c4
-rw-r--r--arch/m68k/platform/coldfire/dma_timer.c5
-rw-r--r--arch/m68k/platform/coldfire/entry.S7
-rw-r--r--arch/m68k/platform/coldfire/gpio.c9
-rw-r--r--arch/m68k/platform/coldfire/head.S53
-rw-r--r--arch/m68k/platform/coldfire/pit.c4
-rw-r--r--arch/m68k/platform/coldfire/sltimers.c13
-rw-r--r--arch/m68k/platform/coldfire/timers.c4
-rw-r--r--arch/microblaze/include/asm/irq.h11
-rw-r--r--arch/microblaze/include/asm/memblock.h14
-rw-r--r--arch/microblaze/include/asm/page.h11
-rw-r--r--arch/microblaze/include/asm/setup.h6
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/microblaze/include/asm/unistd.h5
-rw-r--r--arch/microblaze/kernel/early_printk.c4
-rw-r--r--arch/microblaze/kernel/entry.S2
-rw-r--r--arch/microblaze/kernel/intc.c52
-rw-r--r--arch/microblaze/kernel/irq.c11
-rw-r--r--arch/microblaze/kernel/module.c2
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/microblaze/kernel/prom.c3
-rw-r--r--arch/microblaze/kernel/reset.c43
-rw-r--r--arch/microblaze/kernel/setup.c18
-rw-r--r--arch/microblaze/kernel/syscall_table.S3
-rw-r--r--arch/microblaze/kernel/timer.c21
-rw-r--r--arch/microblaze/lib/Makefile1
-rw-r--r--arch/microblaze/lib/cmpdi2.c26
-rw-r--r--arch/microblaze/pci/pci-common.c4
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c3
-rw-r--r--arch/mips/include/asm/ip32/mace.h2
-rw-r--r--arch/mips/include/asm/ipcbuf.h29
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h11
-rw-r--r--arch/mips/include/asm/socket.h3
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/types.h6
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c8
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/sgi-ip27/Kconfig6
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/mips/sibyte/Kconfig1
-rw-r--r--arch/mips/txx9/generic/7segled.c44
-rw-r--r--arch/mips/txx9/generic/setup.c34
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mn10300/include/asm/ipcbuf.h30
-rw-r--r--arch/mn10300/include/asm/socket.h3
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/types.h6
-rw-r--r--arch/openrisc/include/asm/memblock.h24
-rw-r--r--arch/openrisc/kernel/idle.c6
-rw-r--r--arch/openrisc/kernel/prom.c3
-rw-r--r--arch/parisc/hpux/sys_hpux.c9
-rw-r--r--arch/parisc/include/asm/socket.h3
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/types.h6
-rw-r--r--arch/parisc/kernel/time.c6
-rw-r--r--arch/powerpc/Kconfig58
-rw-r--r--arch/powerpc/Kconfig.debug12
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/boot/Makefile12
-rw-r--r--arch/powerpc/boot/dcr.h6
-rw-r--r--arch/powerpc/boot/div64.S52
-rw-r--r--arch/powerpc/boot/dts/asp834x-redboot.dts4
-rw-r--r--arch/powerpc/boot/dts/currituck.dts237
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi248
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi63
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi191
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi63
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi143
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi62
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi270
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi304
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi64
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi196
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi70
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi198
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi64
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi174
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi225
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi235
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi224
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi76
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi194
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi69
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi325
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi111
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi352
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi112
-rw-r--r--arch/powerpc/boot/dts/fsl/p3060si-post.dtsi296
-rw-r--r--arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi125
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi350
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi143
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi355
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi96
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi66
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi66
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi51
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi39
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi60
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi60
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi59
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi42
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi42
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi42
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi43
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi43
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi42
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi66
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi40
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi40
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi43
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi45
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi45
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi45
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi66
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi66
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi51
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi51
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi40
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi53
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi106
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi39
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi39
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi100
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi109
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi109
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi41
-rw-r--r--arch/powerpc/boot/dts/gef_ppc9a.dts4
-rw-r--r--arch/powerpc/boot/dts/gef_sbc310.dts4
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts4
-rw-r--r--arch/powerpc/boot/dts/klondike.dts227
-rw-r--r--arch/powerpc/boot/dts/kmeter1.dts2
-rw-r--r--arch/powerpc/boot/dts/kuroboxHD.dts4
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8308_p1m.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8308rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8377_wlan.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts456
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi141
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds_36b.dts410
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts473
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dtsi161
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts505
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts482
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts414
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts757
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dtsi397
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_36b.dts746
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts487
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts245
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts73
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts4
-rw-r--r--arch/powerpc/boot/dts/obs600.dts314
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts228
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi234
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_36b.dts89
-rw-r--r--arch/powerpc/boot/dts/p1010si.dtsi374
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts262
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dtsi247
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_36b.dts66
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core0.dts154
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core1.dts11
-rw-r--r--arch/powerpc/boot/dts/p1020si.dtsi377
-rw-r--r--arch/powerpc/boot/dts/p1021mds.dts425
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts459
-rw-r--r--arch/powerpc/boot/dts/p1023rds.dts383
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dts353
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dtsi316
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts71
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts141
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts107
-rw-r--r--arch/powerpc/boot/dts/p2020si.dtsi382
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts22
-rw-r--r--arch/powerpc/boot/dts/p2041si.dtsi692
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts23
-rw-r--r--arch/powerpc/boot/dts/p3041si.dtsi729
-rw-r--r--arch/powerpc/boot/dts/p3060qds.dts12
-rw-r--r--arch/powerpc/boot/dts/p3060si.dtsi719
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts24
-rw-r--r--arch/powerpc/boot/dts/p4080si.dtsi755
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts24
-rw-r--r--arch/powerpc/boot/dts/p5020si.dtsi716
-rw-r--r--arch/powerpc/boot/dts/sbc8349.dts4
-rw-r--r--arch/powerpc/boot/dts/sbc8548.dts4
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts4
-rw-r--r--arch/powerpc/boot/dts/socrates.dts4
-rw-r--r--arch/powerpc/boot/dts/storcenter.dts4
-rw-r--r--arch/powerpc/boot/dts/stxssa8555.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts23
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts23
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8xx.dts25
-rw-r--r--arch/powerpc/boot/dts/xcalibur1501.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5200.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5200_xmon.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5301.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5330.dts4
-rw-r--r--arch/powerpc/boot/dts/xpedite5370.dts4
-rw-r--r--arch/powerpc/boot/treeboot-currituck.c119
-rwxr-xr-xarch/powerpc/boot/wrapper45
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig55
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig83
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig110
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig3
-rw-r--r--arch/powerpc/configs/chroma_defconfig307
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig11
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig4
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig17
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig18
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig5
-rw-r--r--arch/powerpc/configs/ps3_defconfig39
-rw-r--r--arch/powerpc/configs/pseries_defconfig5
-rw-r--r--arch/powerpc/include/asm/Kbuild2
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/cputime.h76
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h834
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h7
-rw-r--r--arch/powerpc/include/asm/hugetlb.h38
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/kdump.h4
-rw-r--r--arch/powerpc/include/asm/kexec.h7
-rw-r--r--arch/powerpc/include/asm/keylargo.h2
-rw-r--r--arch/powerpc/include/asm/kvm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h33
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h33
-rw-r--r--arch/powerpc/include/asm/lv1call.h10
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/memblock.h8
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h11
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h7
-rw-r--r--arch/powerpc/include/asm/mpic.h14
-rw-r--r--arch/powerpc/include/asm/opal.h131
-rw-r--r--arch/powerpc/include/asm/paca.h1
-rw-r--r--arch/powerpc/include/asm/page.h90
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h6
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/reg_booke.h4
-rw-r--r--arch/powerpc/include/asm/rtas.h18
-rw-r--r--arch/powerpc/include/asm/rwsem.h132
-rw-r--r--arch/powerpc/include/asm/socket.h3
-rw-r--r--arch/powerpc/include/asm/spu.h14
-rw-r--r--arch/powerpc/include/asm/system.h11
-rw-r--r--arch/powerpc/include/asm/tce.h10
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/time.h2
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/types.h11
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cacheinfo.c10
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S10
-rw-r--r--arch/powerpc/kernel/cputable.c27
-rw-r--r--arch/powerpc/kernel/crash.c220
-rw-r--r--arch/powerpc/kernel/crash_dump.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S107
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/idle.c42
-rw-r--r--arch/powerpc/kernel/idle_power7.S4
-rw-r--r--arch/powerpc/kernel/irq.c28
-rw-r--r--arch/powerpc/kernel/legacy_serial.c3
-rw-r--r--arch/powerpc/kernel/lparcfg.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/pci-common.c78
-rw-r--r--arch/powerpc/kernel/pci_dn.c3
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/prom.c20
-rw-r--r--arch/powerpc/kernel/prom_init.c37
-rw-r--r--arch/powerpc/kernel/reloc_32.S208
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/rtasd.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kernel/smp.c5
-rw-r--r--arch/powerpc/kernel/sysfs.c271
-rw-r--r--arch/powerpc/kernel/time.c105
-rw-r--r--arch/powerpc/kernel/traps.c173
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S8
-rw-r--r--arch/powerpc/kvm/book3s.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/lib/Makefile4
-rw-r--r--arch/powerpc/lib/copyuser_64.S6
-rw-r--r--arch/powerpc/lib/copyuser_power7.S683
-rw-r--r--arch/powerpc/lib/copyuser_power7_vmx.c50
-rw-r--r--arch/powerpc/mm/44x_mmu.c6
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c17
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c21
-rw-r--r--arch/powerpc/mm/hugetlbpage.c117
-rw-r--r--arch/powerpc/mm/icswx.c273
-rw-r--r--arch/powerpc/mm/icswx.h62
-rw-r--r--arch/powerpc/mm/icswx_pid.c87
-rw-r--r--arch/powerpc/mm/init_32.c11
-rw-r--r--arch/powerpc/mm/mem.c25
-rw-r--r--arch/powerpc/mm/mmap_64.c14
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c195
-rw-r--r--arch/powerpc/mm/numa.c74
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S36
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/powerpc/platforms/40x/Kconfig55
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c4
-rw-r--r--arch/powerpc/platforms/44x/Kconfig32
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/currituck.c204
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c35
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c58
-rw-r--r--arch/powerpc/platforms/83xx/misc.c77
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c40
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c43
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c53
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c56
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c30
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c42
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c53
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c48
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c46
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c47
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h16
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c49
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/common.c66
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c28
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c69
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c74
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c42
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c50
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c73
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c49
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c38
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c37
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c47
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c2
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c43
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c74
-rw-r--r--arch/powerpc/platforms/85xx/smp.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.h15
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c33
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c73
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c73
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c48
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c2
-rw-r--r--arch/powerpc/platforms/86xx/pic.c18
-rw-r--r--arch/powerpc/platforms/Kconfig6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype25
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c144
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c25
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c61
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h4
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c23
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c20
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c24
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c26
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c23
-rw-r--r--arch/powerpc/platforms/iseries/setup.c12
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c55
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c4
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c80
-rw-r--r--arch/powerpc/platforms/powermac/setup.c8
-rw-r--r--arch/powerpc/platforms/powermac/smp.c6
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S8
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1330
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c228
-rw-r--r--arch/powerpc/platforms/powernv/pci.h100
-rw-r--r--arch/powerpc/platforms/powernv/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c103
-rw-r--r--arch/powerpc/platforms/ps3/mm.c1
-rw-r--r--arch/powerpc/platforms/ps3/repository.c137
-rw-r--r--arch/powerpc/platforms/ps3/setup.c4
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/spu.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c67
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c61
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c7
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c329
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h3
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c71
-rw-r--r--arch/powerpc/platforms/pseries/setup.c109
-rw-r--r--arch/powerpc/platforms/pseries/smp.c3
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c33
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig12
-rw-r--r--arch/powerpc/platforms/wsp/Makefile8
-rw-r--r--arch/powerpc/platforms/wsp/chroma.c56
-rw-r--r--arch/powerpc/platforms/wsp/h8.c134
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c3
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c56
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c115
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h16
-rwxr-xr-xarch/powerpc/relocs_check.pl14
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/axonram.c1
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_ifc.c310
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c37
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c99
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c84
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c1519
-rw-r--r--arch/powerpc/sysdev/fsl_rio.h135
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c1104
-rw-r--r--arch/powerpc/sysdev/mpic.c201
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c6
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c85
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.h7
-rw-r--r--arch/powerpc/sysdev/qe_lib/gpio.c42
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c12
-rw-r--r--arch/powerpc/sysdev/uic.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c47
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/xmon/xmon.c16
-rw-r--r--arch/s390/Kbuild13
-rw-r--r--arch/s390/Kconfig18
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/appldata/appldata_os.c16
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/hypfs/inode.c14
-rw-r--r--arch/s390/include/asm/cputime.h142
-rw-r--r--arch/s390/include/asm/debug.h4
-rw-r--r--arch/s390/include/asm/kdebug.h2
-rw-r--r--arch/s390/include/asm/lowcore.h142
-rw-r--r--arch/s390/include/asm/percpu.h44
-rw-r--r--arch/s390/include/asm/pgtable.h31
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/ptrace.h3
-rw-r--r--arch/s390/include/asm/qdio.h5
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/socket.h3
-rw-r--r--arch/s390/include/asm/sparsemem.h4
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/include/asm/system.h2
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/topology.h40
-rw-r--r--arch/s390/include/asm/types.h2
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c8
-rw-r--r--arch/s390/kernel/base.S16
-rw-r--r--arch/s390/kernel/compat_linux.c3
-rw-r--r--arch/s390/kernel/compat_signal.c12
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c20
-rw-r--r--arch/s390/kernel/entry.S1103
-rw-r--r--arch/s390/kernel/entry.h10
-rw-r--r--arch/s390/kernel/entry64.S976
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mem_detect.c122
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/ptrace.c30
-rw-r--r--arch/s390/kernel/reipl64.S4
-rw-r--r--arch/s390/kernel/setup.c75
-rw-r--r--arch/s390/kernel/signal.c28
-rw-r--r--arch/s390/kernel/smp.c229
-rw-r--r--arch/s390/kernel/sys_s390.c76
-rw-r--r--arch/s390/kernel/time.c260
-rw-r--r--arch/s390/kernel/topology.c281
-rw-r--r--arch/s390/kernel/traps.c170
-rw-r--r--arch/s390/mm/fault.c107
-rw-r--r--arch/s390/mm/init.c16
-rw-r--r--arch/s390/mm/pgtable.c14
-rw-r--r--arch/s390/oprofile/hwsampler.c7
-rw-r--r--arch/s390/oprofile/init.c375
-rw-r--r--arch/s390/oprofile/op_counter.h23
-rw-r--r--arch/score/Kconfig6
-rw-r--r--arch/score/kernel/setup.c4
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c16
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c81
-rw-r--r--arch/sh/include/asm/dma.h4
-rw-r--r--arch/sh/include/asm/memblock.h4
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c24
-rw-r--r--arch/sh/kernel/idle.c6
-rw-r--r--arch/sh/kernel/machine_kexec.c3
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/mm/Kconfig3
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/oprofile/common.c4
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/atomic_32.h104
-rw-r--r--arch/sparc/include/asm/memblock.h8
-rw-r--r--arch/sparc/include/asm/page_32.h10
-rw-r--r--arch/sparc/include/asm/pgtsun4.h171
-rw-r--r--arch/sparc/include/asm/posix_types.h2
-rw-r--r--arch/sparc/include/asm/signal.h3
-rw-r--r--arch/sparc/include/asm/socket.h3
-rw-r--r--arch/sparc/include/asm/thread_info_32.h4
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/types.h6
-rw-r--r--arch/sparc/kernel/ds.c6
-rw-r--r--arch/sparc/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc/kernel/process_64.c6
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/sysfs.c122
-rw-r--r--arch/sparc/lib/atomic_32.S55
-rw-r--r--arch/sparc/lib/ksyms.c6
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/init_64.c32
-rw-r--r--arch/tile/include/asm/irq.h10
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/pci-dma.c1
-rw-r--r--arch/tile/kernel/pci.c1
-rw-r--r--arch/tile/kernel/process.c6
-rw-r--r--arch/tile/kernel/sysfs.c62
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/mm/fault.c4
-rw-r--r--arch/tile/mm/homecache.c9
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/um/kernel/time.c6
-rw-r--r--arch/unicore32/include/asm/io.h8
-rw-r--r--arch/unicore32/include/asm/thread_info.h2
-rw-r--r--arch/unicore32/kernel/process.c6
-rw-r--r--arch/unicore32/kernel/puv3-core.c1
-rw-r--r--arch/unicore32/kernel/puv3-nb0916.c5
-rw-r--r--arch/unicore32/kernel/setup.c3
-rw-r--r--arch/unicore32/kernel/signal.c15
-rw-r--r--arch/unicore32/kernel/time.c2
-rw-r--r--arch/unicore32/mm/init.c4
-rw-r--r--arch/unicore32/mm/mmu.c1
-rw-r--r--arch/x86/Kconfig29
-rw-r--r--arch/x86/ia32/ia32entry.S43
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/apic.h6
-rw-r--r--arch/x86/include/asm/apic_flat_64.h7
-rw-r--r--arch/x86/include/asm/apicdef.h1
-rw-r--r--arch/x86/include/asm/bitops.h76
-rw-r--r--arch/x86/include/asm/cmpxchg.h163
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h46
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h43
-rw-r--r--arch/x86/include/asm/cpufeature.h3
-rw-r--r--arch/x86/include/asm/div64.h22
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/hardirq.h1
-rw-r--r--arch/x86/include/asm/i387.h2
-rw-r--r--arch/x86/include/asm/insn.h7
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h14
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h90
-rw-r--r--arch/x86/include/asm/mach_timer.h2
-rw-r--r--arch/x86/include/asm/mc146818rtc.h4
-rw-r--r--arch/x86/include/asm/mce.h14
-rw-r--r--arch/x86/include/asm/memblock.h23
-rw-r--r--arch/x86/include/asm/microcode.h2
-rw-r--r--arch/x86/include/asm/mrst.h11
-rw-r--r--arch/x86/include/asm/msr.h9
-rw-r--r--arch/x86/include/asm/numachip/numachip_csr.h167
-rw-r--r--arch/x86/include/asm/percpu.h77
-rw-r--r--arch/x86/include/asm/perf_event.h44
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/processor-flags.h1
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/spinlock.h15
-rw-r--r--arch/x86/include/asm/system.h1
-rw-r--r--arch/x86/include/asm/thread_info.h11
-rw-r--r--arch/x86/include/asm/timer.h23
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c10
-rw-r--r--arch/x86/kernel/amd_nb.c8
-rw-r--r--arch/x86/kernel/aperture_64.c4
-rw-r--r--arch/x86/kernel/apic/Makefile1
-rw-r--r--arch/x86/kernel/apic/apic.c113
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c9
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c294
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/check.c34
-rw-r--r--arch/x86/kernel/cpu/amd.c17
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/common.c14
-rw-r--r--arch/x86/kernel/cpu/cpu.h5
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c25
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c34
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c194
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c94
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c278
-rw-r--r--arch/x86/kernel/cpu/perf_event.h51
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c29
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c94
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/cpu/powerflags.c3
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/e820.c58
-rw-r--r--arch/x86/kernel/early_printk.c2
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S31
-rw-r--r--arch/x86/kernel/head.c2
-rw-r--r--arch/x86/kernel/head32.c7
-rw-r--r--arch/x86/kernel/head64.c7
-rw-r--r--arch/x86/kernel/hpet.c29
-rw-r--r--arch/x86/kernel/irq.c11
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/jump_label.c2
-rw-r--r--arch/x86/kernel/kvm.c181
-rw-r--r--arch/x86/kernel/microcode_amd.c209
-rw-r--r--arch/x86/kernel/microcode_core.c91
-rw-r--r--arch/x86/kernel/mpparse.c14
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-dma.c11
-rw-r--r--arch/x86/kernel/process.c10
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c15
-rw-r--r--arch/x86/kernel/ptrace.c3
-rw-r--r--arch/x86/kernel/quirks.c13
-rw-r--r--arch/x86/kernel/reboot.c21
-rw-r--r--arch/x86/kernel/rtc.c5
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/trampoline.c4
-rw-r--r--arch/x86/kernel/traps.c7
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kernel/tsc_sync.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c77
-rw-r--r--arch/x86/kernel/x86_init.c1
-rw-r--r--arch/x86/kvm/Kconfig3
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c670
-rw-r--r--arch/x86/kvm/cpuid.h46
-rw-r--r--arch/x86/kvm/emulate.c436
-rw-r--r--arch/x86/kvm/i8254.c14
-rw-r--r--arch/x86/kvm/i8259.c24
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/mmu.c545
-rw-r--r--arch/x86/kvm/mmu_audit.c29
-rw-r--r--arch/x86/kvm/mmutrace.h19
-rw-r--r--arch/x86/kvm/paging_tmpl.h86
-rw-r--r--arch/x86/kvm/pmu.c533
-rw-r--r--arch/x86/kvm/svm.c15
-rw-r--r--arch/x86/kvm/timer.c26
-rw-r--r--arch/x86/kvm/vmx.c45
-rw-r--r--arch/x86/kvm/x86.c1008
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/lib/inat.c9
-rw-r--r--arch/x86/lib/insn.c4
-rw-r--r--arch/x86/lib/string_32.c8
-rw-r--r--arch/x86/lib/x86-opcode-map.txt606
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c22
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_32.c36
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/memblock.c348
-rw-r--r--arch/x86/mm/memtest.c33
-rw-r--r--arch/x86/mm/numa.c37
-rw-r--r--arch/x86/mm/numa_32.c10
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/numa_emulation.c36
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/srat.c7
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/oprofile/Makefile3
-rw-r--r--arch/x86/oprofile/init.c25
-rw-r--r--arch/x86/oprofile/nmi_int.c27
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c50
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/platform/efi/efi.c12
-rw-r--r--arch/x86/platform/efi/efi_32.c48
-rw-r--r--arch/x86/platform/mrst/early_printk_mrst.c16
-rw-r--r--arch/x86/platform/mrst/mrst.c68
-rw-r--r--arch/x86/platform/uv/uv_sysfs.c2
-rw-r--r--arch/x86/tools/Makefile11
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk21
-rw-r--r--arch/x86/tools/insn_sanity.c275
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/debugfs.c2
-rw-r--r--arch/x86/xen/debugfs.h2
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/grant-table.c44
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c27
-rw-r--r--arch/xtensa/include/asm/socket.h3
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/include/asm/types.h2
-rw-r--r--arch/xtensa/kernel/time.c13
-rw-r--r--block/Kconfig6
-rw-r--r--block/Makefile3
-rw-r--r--block/blk-cgroup.c45
-rw-r--r--block/blk-core.c23
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-tag.c13
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c28
-rw-r--r--block/genhd.c5
-rw-r--r--block/ioctl.c28
-rw-r--r--block/partition-generic.c (renamed from fs/partitions/check.c)152
-rw-r--r--block/partitions/Kconfig (renamed from fs/partitions/Kconfig)0
-rw-r--r--block/partitions/Makefile (renamed from fs/partitions/Makefile)0
-rw-r--r--block/partitions/acorn.c (renamed from fs/partitions/acorn.c)0
-rw-r--r--block/partitions/acorn.h (renamed from fs/partitions/acorn.h)0
-rw-r--r--block/partitions/amiga.c (renamed from fs/partitions/amiga.c)0
-rw-r--r--block/partitions/amiga.h (renamed from fs/partitions/amiga.h)0
-rw-r--r--block/partitions/atari.c (renamed from fs/partitions/atari.c)0
-rw-r--r--block/partitions/atari.h (renamed from fs/partitions/atari.h)0
-rw-r--r--block/partitions/check.c166
-rw-r--r--block/partitions/check.h (renamed from fs/partitions/check.h)3
-rw-r--r--block/partitions/efi.c (renamed from fs/partitions/efi.c)0
-rw-r--r--block/partitions/efi.h (renamed from fs/partitions/efi.h)0
-rw-r--r--block/partitions/ibm.c (renamed from fs/partitions/ibm.c)0
-rw-r--r--block/partitions/ibm.h (renamed from fs/partitions/ibm.h)0
-rw-r--r--block/partitions/karma.c (renamed from fs/partitions/karma.c)0
-rw-r--r--block/partitions/karma.h (renamed from fs/partitions/karma.h)0
-rw-r--r--block/partitions/ldm.c (renamed from fs/partitions/ldm.c)0
-rw-r--r--block/partitions/ldm.h (renamed from fs/partitions/ldm.h)0
-rw-r--r--block/partitions/mac.c (renamed from fs/partitions/mac.c)0
-rw-r--r--block/partitions/mac.h (renamed from fs/partitions/mac.h)0
-rw-r--r--block/partitions/msdos.c (renamed from fs/partitions/msdos.c)0
-rw-r--r--block/partitions/msdos.h (renamed from fs/partitions/msdos.h)0
-rw-r--r--block/partitions/osf.c (renamed from fs/partitions/osf.c)0
-rw-r--r--block/partitions/osf.h (renamed from fs/partitions/osf.h)0
-rw-r--r--block/partitions/sgi.c (renamed from fs/partitions/sgi.c)0
-rw-r--r--block/partitions/sgi.h (renamed from fs/partitions/sgi.h)0
-rw-r--r--block/partitions/sun.c (renamed from fs/partitions/sun.c)0
-rw-r--r--block/partitions/sun.h (renamed from fs/partitions/sun.h)0
-rw-r--r--block/partitions/sysv68.c (renamed from fs/partitions/sysv68.c)0
-rw-r--r--block/partitions/sysv68.h (renamed from fs/partitions/sysv68.h)0
-rw-r--r--block/partitions/ultrix.c (renamed from fs/partitions/ultrix.c)0
-rw-r--r--block/partitions/ultrix.h (renamed from fs/partitions/ultrix.h)0
-rw-r--r--crypto/Kconfig2
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/apei/erst.c37
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/processor_driver.c6
-rw-r--r--drivers/acpi/processor_thermal.c1
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/bus.c140
-rw-r--r--drivers/ata/ahci.c26
-rw-r--r--drivers/ata/ahci_platform.c68
-rw-r--r--drivers/ata/libahci.c5
-rw-r--r--drivers/ata/libata-core.c188
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata-transport.c5
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_arasan_cf.c12
-rw-r--r--drivers/ata/pata_at91.c21
-rw-r--r--drivers/ata/pata_bf54x.c20
-rw-r--r--drivers/ata/pata_cs5536.c99
-rw-r--r--drivers/ata/pata_imx.c12
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c13
-rw-r--r--drivers/ata/pata_mpc52xx.c21
-rw-r--r--drivers/ata/pata_of_platform.c27
-rw-r--r--drivers/ata/pata_palmld.c13
-rw-r--r--drivers/ata/pata_platform.c12
-rw-r--r--drivers/ata/pata_pxa.c13
-rw-r--r--drivers/ata/pata_rb532_cf.c21
-rw-r--r--drivers/ata/sata_dwc_460ex.c13
-rw-r--r--drivers/ata/sata_fsl.c14
-rw-r--r--drivers/ata/sata_mv.c19
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/Makefile5
-rw-r--r--drivers/base/base.h12
-rw-r--r--drivers/base/bus.c293
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/core.c95
-rw-r--r--drivers/base/cpu.c153
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/devtmpfs.c11
-rw-r--r--drivers/base/dma-buf.c291
-rw-r--r--drivers/base/firmware_class.c18
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c160
-rw-r--r--drivers/base/node.c154
-rw-r--r--drivers/base/platform.c117
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c539
-rw-r--r--drivers/base/power/domain_governor.c156
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
-rw-r--r--drivers/base/regmap/Kconfig3
-rw-r--r--drivers/base/regmap/Makefile4
-rw-r--r--drivers/base/regmap/internal.h6
-rw-r--r--drivers/base/regmap/regcache-indexed.c64
-rw-r--r--drivers/base/regmap/regcache-lzo.c21
-rw-r--r--drivers/base/regmap/regcache-rbtree.c61
-rw-r--r--drivers/base/regmap/regcache.c87
-rw-r--r--drivers/base/regmap/regmap-irq.c302
-rw-r--r--drivers/base/regmap/regmap.c179
-rw-r--r--drivers/base/sys.c10
-rw-r--r--drivers/base/topology.c51
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/host_pci.c69
-rw-r--r--drivers/bcma/main.c16
-rw-r--r--drivers/bcma/sprom.c61
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoechr.c2
-rw-r--r--drivers/block/brd.c9
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/block/xen-blkback/xenbus.c11
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/block/xsysace.c10
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/ath3k.c29
-rw-r--r--drivers/bluetooth/bcm203x.c21
-rw-r--r--drivers/bluetooth/bfusb.c25
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bpa10x.c15
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c44
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_vhci.c13
-rw-r--r--drivers/cdrom/cdrom.c1
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c41
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tile-srom.c2
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c16
-rw-r--r--drivers/clocksource/i8253.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/cpufreq/cpufreq.c79
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c50
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c54
-rw-r--r--drivers/cpufreq/cpufreq_stats.c6
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h10
-rw-r--r--drivers/cpuidle/sysfs.c74
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/dma/mxs-dma.c8
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/edac/edac_core.h7
-rw-r--r--drivers/edac/edac_device.c1
-rw-r--r--drivers/edac/edac_device_sysfs.c20
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c16
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c1
-rw-r--r--drivers/edac/edac_pci_sysfs.c16
-rw-r--r--drivers/edac/edac_stub.c27
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/i82975x_edac.c30
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/mce_amd_inj.c13
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/firmware/efivars.c20
-rw-r--r--drivers/firmware/google/gsmi.c3
-rw-r--r--drivers/firmware/iscsi_ibft.c54
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Kconfig30
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-adp5520.c12
-rw-r--r--drivers/gpio/gpio-adp5588.c5
-rw-r--r--drivers/gpio/gpio-bt8xx.c3
-rw-r--r--drivers/gpio/gpio-cs5535.c14
-rw-r--r--drivers/gpio/gpio-da9052.c33
-rw-r--r--drivers/gpio/gpio-generic.c12
-rw-r--r--drivers/gpio/gpio-janz-ttl.c15
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-nomadik.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pcf857x.c5
-rw-r--r--drivers/gpio/gpio-pch.c7
-rw-r--r--drivers/gpio/gpio-pl061.c6
-rw-r--r--drivers/gpio/gpio-pxa.c377
-rw-r--r--drivers/gpio/gpio-rdc321x.c13
-rw-r--r--drivers/gpio/gpio-samsung.c105
-rw-r--r--drivers/gpio/gpio-sch.c13
-rw-r--r--drivers/gpio/gpio-timberdale.c13
-rw-r--r--drivers/gpio/gpio-ucb1400.c13
-rw-r--r--drivers/gpio/gpio-vr41xx.c13
-rw-r--r--drivers/gpio/gpio-vx855.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c79
-rw-r--r--drivers/gpio/gpio-xilinx.c1
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_context.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c608
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c77
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_edid.c103
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h284
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c15
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_sman.c351
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig7
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c139
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c168
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c190
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c126
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c255
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c248
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h75
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h73
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c163
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1176
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1070
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h92
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h147
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h141
-rw-r--r--drivers/gpu/drm/exynos/regs-vp.h91
-rw-r--r--drivers/gpu/drm/gma500/Kconfig27
-rw-r--r--drivers/gpu/drm/gma500/Makefile40
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c364
-rw-r--r--drivers/gpu/drm/gma500/backlight.c49
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c351
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h36
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c333
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1508
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c394
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c732
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c831
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h47
-rw-r--r--drivers/gpu/drm/gma500/gem.c292
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c89
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c553
-rw-r--r--drivers/gpu/drm/gma500/gtt.h64
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c303
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h430
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c493
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c169
-rw-r--r--drivers/gpu/drm/gma500/intel_opregion.c81
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c263
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h21
-rw-r--r--drivers/gpu/drm/gma500/mmu.c858
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h252
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c604
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c512
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c859
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c328
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c449
-rw-r--r--drivers/gpu/drm/gma500/power.c316
-rw-r--r--drivers/gpu/drm/gma500/power.h67
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c328
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c703
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h956
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1446
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h289
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c868
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h1309
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2617
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h723
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c564
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h45
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c88
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h582
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c19
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c24
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c87
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c82
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h216
-rw-r--r--drivers/gpu/drm/i915/intel_display.c445
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c174
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h52
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c668
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c904
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c403
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c243
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h139
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c258
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c556
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c677
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c176
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c197
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c109
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c347
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c144
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c272
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c783
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c78
-rw-r--r--drivers/gpu/drm/nouveau/nv98_ppp.c78
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc262
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc56
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc217
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc311
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c835
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c30
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c256
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c242
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c395
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h42
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h96
-rw-r--r--drivers/gpu/drm/radeon/ni.c395
-rw-r--r--drivers/gpu/drm/radeon/nid.h35
-rw-r--r--drivers/gpu/drm/radeon/r100.c239
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r300.c160
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c273
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c57
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c235
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h367
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c197
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c307
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c425
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c147
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c465
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c189
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c269
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c355
-rw-r--r--drivers/gpu/drm/radeon/rs400.c27
-rw-r--r--drivers/gpu/drm/radeon/rs600.c45
-rw-r--r--drivers/gpu/drm/radeon/rs690.c30
-rw-r--r--drivers/gpu/drm/radeon/rv515.c106
-rw-r--r--drivers/gpu/drm/radeon/rv770.c70
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c23
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c56
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h7
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c199
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c23
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c105
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c90
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c184
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1143
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c324
-rw-r--r--drivers/gpu/drm/via/via_drv.c48
-rw-r--r--drivers/gpu/drm/via/via_drv.h7
-rw-r--r--drivers/gpu/drm/via/via_map.c10
-rw-r--r--drivers/gpu/drm/via/via_mm.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c427
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c58
-rw-r--r--drivers/hid/Kconfig38
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c84
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-emsff.c2
-rw-r--r--drivers/hid/hid-hyperv.c (renamed from drivers/staging/hv/hv_mouse.c)303
-rw-r--r--drivers/hid/hid-ids.h45
-rw-r--r--drivers/hid/hid-input.c228
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-multitouch.c213
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/hid-pl.c4
-rw-r--r--drivers/hid/hid-quanta.c261
-rw-r--r--drivers/hid/hid-roccat-common.c4
-rw-r--r--drivers/hid/hid-roccat-isku.c487
-rw-r--r--drivers/hid/hid-roccat-isku.h147
-rw-r--r--drivers/hid/hid-roccat-kone.c4
-rw-r--r--drivers/hid/hid-twinhan.c2
-rw-r--r--drivers/hid/hid-wacom.c179
-rw-r--r--drivers/hid/hid-wiimote-core.c (renamed from drivers/hid/hid-wiimote.c)246
-rw-r--r--drivers/hid/hid-wiimote-debug.c227
-rw-r--r--drivers/hid/hid-wiimote-ext.c752
-rw-r--r--drivers/hid/hid-wiimote.h208
-rw-r--r--drivers/hid/usbhid/hid-core.c241
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h3
-rw-r--r--drivers/hid/usbhid/usbkbd.c81
-rw-r--r--drivers/hid/usbhid/usbmouse.c17
-rw-r--r--drivers/hv/Kconfig4
-rw-r--r--drivers/hv/channel_mgmt.c12
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/hv/hv_kvp.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h1
-rw-r--r--drivers/hv/vmbus_drv.c30
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/acpi_power_meter.c6
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/adcxx.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c28
-rw-r--r--drivers/hwmon/adt7470.c26
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/amc6821.c14
-rw-r--r--drivers/hwmon/applesmc.c6
-rw-r--r--drivers/hwmon/asc7621.c24
-rw-r--r--drivers/hwmon/coretemp.c7
-rw-r--r--drivers/hwmon/dme1737.c6
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc1403.c6
-rw-r--r--drivers/hwmon/emc2103.c10
-rw-r--r--drivers/hwmon/emc6w201.c6
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c309
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gpio-fan.c19
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/it87.c34
-rw-r--r--drivers/hwmon/jc42.c6
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c25
-rw-r--r--drivers/hwmon/lm75.h5
-rw-r--r--drivers/hwmon/lm80.c70
-rw-r--r--drivers/hwmon/lm90.c10
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/lm95245.c8
-rw-r--r--drivers/hwmon/ltc4261.c1
-rw-r--r--drivers/hwmon/max1111.c2
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max6639.c8
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/pc87427.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/adm1275.c71
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c43
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c10
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/w83627ehf.c22
-rw-r--r--drivers/hwmon/w83791d.c8
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwmon/w83795.c32
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c12
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c15
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-puv3.c16
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c10
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c15
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/ide/at91_ide.c2
-rw-r--r--drivers/ieee802154/fakehard.c2
-rw-r--r--drivers/infiniband/core/addr.c50
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cm_msgs.h1
-rw-r--r--drivers/infiniband/core/cma.c20
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c218
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c6
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c58
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c43
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/input/evdev.c20
-rw-r--r--drivers/input/input-polldev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c13
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c13
-rw-r--r--drivers/input/keyboard/amikbd.c15
-rw-r--r--drivers/input/keyboard/atkbd.c40
-rw-r--r--drivers/input/keyboard/bf54x-keys.c16
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c13
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c14
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c14
-rw-r--r--drivers/input/keyboard/imx_keypad.c14
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c14
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c14
-rw-r--r--drivers/input/keyboard/lm8323.c11
-rw-r--r--drivers/input/keyboard/matrix_keypad.c14
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c15
-rw-r--r--drivers/input/keyboard/omap-keypad.c15
-rw-r--r--drivers/input/keyboard/omap4-keypad.c13
-rw-r--r--drivers/input/keyboard/opencores-kbd.c13
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c13
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c14
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c13
-rw-r--r--drivers/input/keyboard/samsung-keypad.c278
-rw-r--r--drivers/input/keyboard/sh_keysc.c14
-rw-r--r--drivers/input/keyboard/spear-keyboard.c13
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c13
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c15
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c430
-rw-r--r--drivers/input/keyboard/tegra-kbc.c132
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c14
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c13
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c14
-rw-r--r--drivers/input/misc/88pm860x_onkey.c13
-rw-r--r--drivers/input/misc/Kconfig25
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c13
-rw-r--r--drivers/input/misc/adxl34x-spi.c1
-rw-r--r--drivers/input/misc/adxl34x.c16
-rw-r--r--drivers/input/misc/ati_remote2.c40
-rw-r--r--drivers/input/misc/bfin_rotary.c13
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/misc/cobalt_btns.c14
-rw-r--r--drivers/input/misc/dm355evm_keys.c13
-rw-r--r--drivers/input/misc/gp2ap002a00f.c299
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c213
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c13
-rw-r--r--drivers/input/misc/keyspan_remote.c21
-rw-r--r--drivers/input/misc/max8925_onkey.c13
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c14
-rw-r--r--drivers/input/misc/mpu3050.c128
-rw-r--r--drivers/input/misc/pcap_keys.c14
-rw-r--r--drivers/input/misc/pcf50633-input.c13
-rw-r--r--drivers/input/misc/pcspkr.c14
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c13
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c13
-rw-r--r--drivers/input/misc/powermate.c13
-rw-r--r--drivers/input/misc/pwm-beeper.c13
-rw-r--r--drivers/input/misc/rb532_button.c14
-rw-r--r--drivers/input/misc/rotary_encoder.c14
-rw-r--r--drivers/input/misc/sgi_btns.c13
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c15
-rw-r--r--drivers/input/misc/twl4030-vibra.c14
-rw-r--r--drivers/input/misc/twl6040-vibra.c13
-rw-r--r--drivers/input/misc/wm831x-on.c13
-rw-r--r--drivers/input/misc/xen-kbdfront.c7
-rw-r--r--drivers/input/misc/yealink.c17
-rw-r--r--drivers/input/mouse/alps.c1036
-rw-r--r--drivers/input/mouse/alps.h19
-rw-r--r--drivers/input/mouse/amimouse.c16
-rw-r--r--drivers/input/mouse/appletouch.c13
-rw-r--r--drivers/input/mouse/bcm5974.c14
-rw-r--r--drivers/input/mouse/elantech.c80
-rw-r--r--drivers/input/mouse/elantech.h2
-rw-r--r--drivers/input/mouse/gpio_mouse.c13
-rw-r--r--drivers/input/mouse/hgpk.c18
-rw-r--r--drivers/input/mouse/logips2pp.c9
-rw-r--r--drivers/input/mouse/psmouse-base.c229
-rw-r--r--drivers/input/mouse/psmouse.h3
-rw-r--r--drivers/input/mouse/pxa930_trkball.c14
-rw-r--r--drivers/input/mouse/sentelic.c51
-rw-r--r--drivers/input/mouse/sentelic.h3
-rw-r--r--drivers/input/mouse/synaptics.c208
-rw-r--r--drivers/input/mouse/synaptics.h5
-rw-r--r--drivers/input/mouse/trackpoint.c17
-rw-r--r--drivers/input/serio/altera_ps2.c13
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/at32psif.c14
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/serio/rpckbd.c14
-rw-r--r--drivers/input/serio/xilinx_ps2.c16
-rw-r--r--drivers/input/tablet/acecad.c17
-rw-r--r--drivers/input/tablet/aiptek.c53
-rw-r--r--drivers/input/tablet/gtco.c28
-rw-r--r--drivers/input/tablet/hanwang.c13
-rw-r--r--drivers/input/tablet/kbtab.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c120
-rw-r--r--drivers/input/tablet/wacom_wac.c191
-rw-r--r--drivers/input/tablet/wacom_wac.h5
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c13
-rw-r--r--drivers/input/touchscreen/Kconfig41
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c21
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c31
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c27
-rw-r--r--drivers/input/touchscreen/ad7879.c23
-rw-r--r--drivers/input/touchscreen/ad7879.h4
-rw-r--r--drivers/input/touchscreen/ads7846.c9
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c13
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c15
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c652
-rw-r--r--drivers/input/touchscreen/da9034-ts.c13
-rw-r--r--drivers/input/touchscreen/egalax_ts.c303
-rw-r--r--drivers/input/touchscreen/htcpen.c7
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c13
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c14
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c13
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c14
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c13
-rw-r--r--drivers/input/touchscreen/migor_ts.c117
-rw-r--r--drivers/input/touchscreen/pcap_ts.c14
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c239
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c14
-rw-r--r--drivers/input/touchscreen/st1232.c13
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c15
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c14
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c13
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c287
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c49
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c14
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c13
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c19
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c883
-rw-r--r--drivers/iommu/amd_iommu_init.c133
-rw-r--r--drivers/iommu/amd_iommu_proto.h24
-rw-r--r--drivers/iommu/amd_iommu_types.h118
-rw-r--r--drivers/iommu/amd_iommu_v2.c994
-rw-r--r--drivers/iommu/intel-iommu.c111
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/iommu.c179
-rw-r--r--drivers/iommu/msm_iommu.c25
-rw-r--r--drivers/iommu/omap-iommu.c80
-rw-r--r--drivers/iommu/omap-iovmm.c48
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/gigaset/i4l.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c28
-rw-r--r--drivers/isdn/hisax/enternow_pci.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-triggers.c1
-rw-r--r--drivers/leds/leds-88pm860x.c12
-rw-r--r--drivers/leds/leds-adp5520.c12
-rw-r--r--drivers/leds/leds-ams-delta.c13
-rw-r--r--drivers/leds/leds-asic3.c16
-rw-r--r--drivers/leds/leds-atmel-pwm.c17
-rw-r--r--drivers/leds/leds-bd2802.c15
-rw-r--r--drivers/leds/leds-cobalt-qube.c17
-rw-r--r--drivers/leds/leds-da903x.c12
-rw-r--r--drivers/leds/leds-dac124s085.c13
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c16
-rw-r--r--drivers/leds/leds-hp6xx.c17
-rw-r--r--drivers/leds/leds-lm3530.c13
-rw-r--r--drivers/leds/leds-lp3944.c13
-rw-r--r--drivers/leds/leds-lp5521.c20
-rw-r--r--drivers/leds/leds-lp5523.c22
-rw-r--r--drivers/leds/leds-lt3593.c16
-rw-r--r--drivers/leds/leds-mc13783.c14
-rw-r--r--drivers/leds/leds-netxbig.c39
-rw-r--r--drivers/leds/leds-ns2.c15
-rw-r--r--drivers/leds/leds-pca9532.c14
-rw-r--r--drivers/leds/leds-pca955x.c13
-rw-r--r--drivers/leds/leds-pwm.c13
-rw-r--r--drivers/leds/leds-rb532.c16
-rw-r--r--drivers/leds/leds-regulator.c12
-rw-r--r--drivers/leds/leds-renesas-tpu.c13
-rw-r--r--drivers/leds/leds-s3c24xx.c13
-rw-r--r--drivers/leds/leds-tca6507.c779
-rw-r--r--drivers/leds/leds-wm831x-status.c17
-rw-r--r--drivers/leds/leds-wm8350.c19
-rw-r--r--drivers/lguest/lguest_device.c6
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c138
-rw-r--r--drivers/md/md.h82
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid1.c174
-rw-r--r--drivers/md/raid1.h7
-rw-r--r--drivers/md/raid10.c582
-rw-r--r--drivers/md/raid10.h61
-rw-r--r--drivers/md/raid5.c577
-rw-r--r--drivers/md/raid5.h98
-rw-r--r--drivers/media/common/tuners/mxl5007t.c3
-rw-r--r--drivers/media/common/tuners/tda18218.c2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-usb.c20
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c21
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c20
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c21
-rw-r--r--drivers/media/dvb/dvb-usb/au6610.c21
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c23
-rw-r--r--drivers/media/dvb/dvb-usb/ce6230.c22
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c20
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c22
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c21
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dtv5100.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c17
-rw-r--r--drivers/media/dvb/dvb-usb/ec168.c22
-rw-r--r--drivers/media/dvb/dvb-usb/friio.c23
-rw-r--r--drivers/media/dvb/dvb-usb/gl861.c21
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c21
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c21
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c21
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c22
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c19
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c21
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c17
-rw-r--r--drivers/media/dvb/dvb-usb/pctv452e.c17
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c20
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c21
-rw-r--r--drivers/media/dvb/dvb-usb/umt-010.c21
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c21
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c21
-rw-r--r--drivers/media/dvb/siano/smsusb.c21
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c21
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c21
-rw-r--r--drivers/media/radio/dsbr100.c16
-rw-r--r--drivers/media/radio/radio-mr800.c23
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c28
-rw-r--r--drivers/media/rc/ati_remote.c144
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/imon.c21
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c96
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c128
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c114
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/rc/redrat3.c20
-rw-r--r--drivers/media/rc/streamzap.c28
-rw-r--r--drivers/media/rc/winbond-cir.c2
-rw-r--r--drivers/media/video/au0828/au0828-cards.c7
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c24
-rw-r--r--drivers/media/video/davinci/vpif.h1
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c24
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c25
-rw-r--r--drivers/media/video/gspca/benq.c13
-rw-r--r--drivers/media/video/gspca/conex.c13
-rw-r--r--drivers/media/video/gspca/cpia1.c13
-rw-r--r--drivers/media/video/gspca/etoms.c14
-rw-r--r--drivers/media/video/gspca/finepix.c14
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c17
-rw-r--r--drivers/media/video/gspca/gspca.c6
-rw-r--r--drivers/media/video/gspca/jeilinj.c14
-rw-r--r--drivers/media/video/gspca/kinect.c14
-rw-r--r--drivers/media/video/gspca/konica.c13
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c14
-rw-r--r--drivers/media/video/gspca/mars.c13
-rw-r--r--drivers/media/video/gspca/mr97310a.c13
-rw-r--r--drivers/media/video/gspca/nw80x.c13
-rw-r--r--drivers/media/video/gspca/ov519.c13
-rw-r--r--drivers/media/video/gspca/ov534.c14
-rw-r--r--drivers/media/video/gspca/ov534_9.c14
-rw-r--r--drivers/media/video/gspca/pac207.c13
-rw-r--r--drivers/media/video/gspca/pac7302.c13
-rw-r--r--drivers/media/video/gspca/pac7311.c13
-rw-r--r--drivers/media/video/gspca/se401.c13
-rw-r--r--drivers/media/video/gspca/sn9c2028.c14
-rw-r--r--drivers/media/video/gspca/sn9c20x.c13
-rw-r--r--drivers/media/video/gspca/sonixb.c13
-rw-r--r--drivers/media/video/gspca/sonixj.c13
-rw-r--r--drivers/media/video/gspca/spca1528.c13
-rw-r--r--drivers/media/video/gspca/spca500.c13
-rw-r--r--drivers/media/video/gspca/spca501.c13
-rw-r--r--drivers/media/video/gspca/spca505.c13
-rw-r--r--drivers/media/video/gspca/spca506.c19
-rw-r--r--drivers/media/video/gspca/spca508.c13
-rw-r--r--drivers/media/video/gspca/spca561.c13
-rw-r--r--drivers/media/video/gspca/sq905.c14
-rw-r--r--drivers/media/video/gspca/sq905c.c14
-rw-r--r--drivers/media/video/gspca/sq930x.c13
-rw-r--r--drivers/media/video/gspca/stk014.c13
-rw-r--r--drivers/media/video/gspca/stv0680.c13
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c13
-rw-r--r--drivers/media/video/gspca/sunplus.c13
-rw-r--r--drivers/media/video/gspca/t613.c13
-rw-r--r--drivers/media/video/gspca/topro.c13
-rw-r--r--drivers/media/video/gspca/tv8532.c14
-rw-r--r--drivers/media/video/gspca/vc032x.c13
-rw-r--r--drivers/media/video/gspca/vicam.c14
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c13
-rw-r--r--drivers/media/video/gspca/zc3xx.c13
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c21
-rw-r--r--drivers/media/video/m5mols/m5mols.h2
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c22
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9t112.c4
-rw-r--r--drivers/media/video/omap/omap_vout.c9
-rw-r--r--drivers/media/video/omap1_camera.c1
-rw-r--r--drivers/media/video/omap24xxcam-dma.c2
-rw-r--r--drivers/media/video/omap3isp/isp.c30
-rw-r--r--drivers/media/video/omap3isp/isp.h2
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c20
-rw-r--r--drivers/media/video/omap3isp/ispstat.c10
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c5
-rw-r--r--drivers/media/video/ov6650.c2
-rw-r--r--drivers/media/video/s2255drv.c20
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c24
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c43
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c34
-rw-r--r--drivers/media/video/sh_mobile_csi2.c4
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c25
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/media/video/stk-webcam.c23
-rw-r--r--drivers/media/video/tm6000/tm6000-cards.c26
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/media/video/zr364xx.c23
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h2
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/mfd/Kconfig28
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab5500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/da9052-core.c694
-rw-r--r--drivers/mfd/da9052-i2c.c140
-rw-r--r--drivers/mfd/da9052-spi.c115
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/jz4740-adc.c1
-rw-r--r--drivers/mfd/omap-usb-host.c755
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/twl-core.c16
-rw-r--r--drivers/mfd/twl4030-irq.c18
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/wm8994-core.c1
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c10
-rw-r--r--drivers/misc/ad525x_dpot-spi.c97
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/ad525x_dpot.h8
-rw-r--r--drivers/misc/bmp085.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c88
-rw-r--r--drivers/misc/ibmasm/command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h2
-rw-r--r--drivers/misc/ibmasm/ibmasm.h2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h2
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/remote.h2
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/ti-st/st_core.c18
-rw-r--r--drivers/misc/ti-st/st_kim.c84
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/card/mmc_test.c2
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/at91_mci.c30
-rw-r--r--drivers/mmc/host/mmci.c16
-rw-r--r--drivers/mmc/host/mvsdio.c13
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/omap.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c33
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c6
-rw-r--r--drivers/mmc/host/sdhci-dove.c5
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c5
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c26
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c18
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c30
-rw-r--r--drivers/mmc/host/sdhci-tegra.c5
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/ushc.c12
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c4
-rw-r--r--drivers/mtd/ar7part.c15
-rw-r--r--drivers/mtd/bcm63xxpart.c222
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c13
-rw-r--r--drivers/mtd/devices/Kconfig12
-rw-r--r--drivers/mtd/devices/block2mtd.c3
-rw-r--r--drivers/mtd/devices/doc2000.c9
-rw-r--r--drivers/mtd/devices/doc2001.c8
-rw-r--r--drivers/mtd/devices/doc2001plus.c9
-rw-r--r--drivers/mtd/devices/docg3.c1441
-rw-r--r--drivers/mtd/devices/docg3.h65
-rw-r--r--drivers/mtd/devices/docprobe.c7
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c3
-rw-r--r--drivers/mtd/ftl.c81
-rw-r--r--drivers/mtd/inftlcore.c25
-rw-r--r--drivers/mtd/inftlmount.c19
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c7
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c277
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c12
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ixp2000.c12
-rw-r--r--drivers/mtd/maps/ixp4xx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c12
-rw-r--r--drivers/mtd/maps/physmap.c10
-rw-r--r--drivers/mtd/maps/physmap_of.c13
-rw-r--r--drivers/mtd/maps/plat-ram.c12
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c19
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c18
-rw-r--r--drivers/mtd/maps/sa1100-flash.c17
-rw-r--r--drivers/mtd/maps/scb2_flash.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c13
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdblock.c21
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c203
-rw-r--r--drivers/mtd/mtdconcat.c53
-rw-r--r--drivers/mtd/mtdcore.c126
-rw-r--r--drivers/mtd/mtdoops.c44
-rw-r--r--drivers/mtd/mtdpart.c63
-rw-r--r--drivers/mtd/mtdswap.c29
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/alauda.c13
-rw-r--r--drivers/mtd/nand/ams-delta.c12
-rw-r--r--drivers/mtd/nand/atmel_nand.c8
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c13
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/gpio.c115
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c12
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c14
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/mtd/nand/nand_bbt.c14
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/nomadik_nand.c18
-rw-r--r--drivers/mtd/nand/nuc900_nand.c13
-rw-r--r--drivers/mtd/nand/omap2.c15
-rw-r--r--drivers/mtd/nand/pasemi_nand.c12
-rw-r--r--drivers/mtd/nand/plat_nand.c13
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c16
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c13
-rw-r--r--drivers/mtd/nand/tmio_nand.c13
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c6
-rw-r--r--drivers/mtd/nftlcore.c25
-rw-r--r--drivers/mtd/nftlmount.c13
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/mtd/onenand/samsung.c13
-rw-r--r--drivers/mtd/redboot.c12
-rw-r--r--drivers/mtd/rfd_ftl.c46
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/ssfdc.c12
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c28
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c57
-rw-r--r--drivers/mtd/tests/mtd_readtest.c11
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c37
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c22
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c32
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c15
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/eba.c6
-rw-r--r--drivers/mtd/ubi/io.c19
-rw-r--r--drivers/mtd/ubi/kapi.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/wl.c12
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_ipv6.c225
-rw-r--r--drivers/net/bonding/bond_main.c116
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/caif/caif_hsi.c13
-rw-r--r--drivers/net/caif/caif_serial.c10
-rw-r--r--drivers/net/caif/caif_shmcore.c27
-rw-r--r--drivers/net/caif/caif_spi.c178
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c13
-rw-r--r--drivers/net/can/bfin_can.c12
-rw-r--r--drivers/net/can/c_can/c_can_platform.c12
-rw-r--r--drivers/net/can/cc770/Kconfig21
-rw-r--r--drivers/net/can/cc770/Makefile9
-rw-r--r--drivers/net/can/cc770/cc770.c881
-rw-r--r--drivers/net/can/cc770/cc770.h203
-rw-r--r--drivers/net/can/cc770/cc770_isa.c367
-rw-r--r--drivers/net/can/cc770/cc770_platform.c272
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c25
-rw-r--r--drivers/net/can/janz-ican3.c13
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c118
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c13
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c16
-rw-r--r--drivers/net/can/ti_hecc.c15
-rw-r--r--drivers/net/can/usb/ems_usb.c26
-rw-r--r--drivers/net/can/usb/esd_usb2.c23
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/dsa/Kconfig36
-rw-r--r--drivers/net/dsa/Makefile9
-rw-r--r--drivers/net/dsa/mv88e6060.c (renamed from net/dsa/mv88e6060.c)7
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c (renamed from net/dsa/mv88e6123_61_65.c)19
-rw-r--r--drivers/net/dsa/mv88e6131.c (renamed from net/dsa/mv88e6131.c)20
-rw-r--r--drivers/net/dsa/mv88e6xxx.c (renamed from net/dsa/mv88e6xxx.c)29
-rw-r--r--drivers/net/dsa/mv88e6xxx.h (renamed from net/dsa/mv88e6xxx.h)3
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c7
-rw-r--r--drivers/net/ethernet/3com/3c59x.c12
-rw-r--r--drivers/net/ethernet/3com/typhoon.c16
-rw-r--r--drivers/net/ethernet/8390/8390.h2
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c18
-rw-r--r--drivers/net/ethernet/8390/es3210.c2
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c2
-rw-r--r--drivers/net/ethernet/8390/hp.c2
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/lne390.c4
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c15
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c15
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c19
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c23
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c13
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c196
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c366
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h116
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h217
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1099
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c130
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c707
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h44
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h98
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c493
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h54
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h97
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c70
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c623
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c157
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig16
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c26
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c416
-rw-r--r--drivers/net/ethernet/cadence/macb.h152
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig7
-rw-r--r--drivers/net/ethernet/calxeda/Makefile1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1928
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c10
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/de600.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h39
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c237
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h84
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c120
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c588
-rw-r--r--drivers/net/ethernet/ethoc.c13
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/fec.c89
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c69
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c28
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h7
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c7
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c27
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c465
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c37
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c359
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c83
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c129
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/korina.c13
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1332
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c430
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c415
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c902
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h670
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c616
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c132
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c13
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c513
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h15
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c16
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c77
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c20
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c13
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c503
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c5
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c48
-rw-r--r--drivers/net/ethernet/rdc/r6040.c42
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c20
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c89
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c19
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c18
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c221
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c310
-rw-r--r--drivers/net/ethernet/sfc/filter.h12
-rw-r--r--drivers/net/ethernet/sfc/mtd.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/sgi/meth.c67
-rw-r--r--drivers/net/ethernet/sis/sis190.c15
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c203
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c455
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c198
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c42
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c13
-rw-r--r--drivers/net/ethernet/sun/sungem.c6
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/tile/tilepro.c15
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c20
-rw-r--r--drivers/net/ethernet/via/via-rhine.c21
-rw-r--r--drivers/net/ethernet/via/via-velocity.c12
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c30
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h (renamed from drivers/staging/hv/hyperv_net.h)139
-rw-r--r--drivers/net/hyperv/netvsc.c (renamed from drivers/staging/hv/netvsc.c)164
-rw-r--r--drivers/net/hyperv/netvsc_drv.c (renamed from drivers/staging/hv/netvsc_drv.c)162
-rw-r--r--drivers/net/hyperv/rndis_filter.c (renamed from drivers/staging/hv/rndis_filter.c)44
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/bfin_sir.c13
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c35
-rw-r--r--drivers/net/irda/kingsun-sir.c19
-rw-r--r--drivers/net/irda/ks959-sir.c21
-rw-r--r--drivers/net/irda/ksdazzle-sir.c21
-rw-r--r--drivers/net/irda/mcs7780.c23
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c13
-rw-r--r--drivers/net/irda/sh_irda.c13
-rw-r--r--drivers/net/irda/sh_sir.c13
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c21
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/macvtap.c24
-rw-r--r--drivers/net/mii.c53
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/phy_device.c20
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/phy/spi_ks8995.c375
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/team/Kconfig43
-rw-r--r--drivers/net/team/Makefile7
-rw-r--r--drivers/net/team/team.c1684
-rw-r--r--drivers/net/team/team_mode_activebackup.c136
-rw-r--r--drivers/net/team/team_mode_roundrobin.c107
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/asix.c26
-rw-r--r--drivers/net/usb/catc.c17
-rw-r--r--drivers/net/usb/cdc-phonet.c23
-rw-r--r--drivers/net/usb/cdc_eem.c13
-rw-r--r--drivers/net/usb/cdc_ether.c19
-rw-r--r--drivers/net/usb/cdc_ncm.c23
-rw-r--r--drivers/net/usb/cdc_subset.c12
-rw-r--r--drivers/net/usb/cx82310_eth.c12
-rw-r--r--drivers/net/usb/dm9601.c13
-rw-r--r--drivers/net/usb/gl620a.c12
-rw-r--r--drivers/net/usb/int51x1.c12
-rw-r--r--drivers/net/usb/ipheth.c22
-rw-r--r--drivers/net/usb/kalmia.c12
-rw-r--r--drivers/net/usb/kaweth.c30
-rw-r--r--drivers/net/usb/lg-vl600.c12
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/pegasus.c8
-rw-r--r--drivers/net/usb/plusb.c12
-rw-r--r--drivers/net/usb/rndis_host.c12
-rw-r--r--drivers/net/usb/rtl8150.c15
-rw-r--r--drivers/net/usb/sierra_net.c21
-rw-r--r--drivers/net/usb/smsc75xx.c17
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/tx.c8
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c7
-rw-r--r--drivers/net/wireless/Makefile8
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h571
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c293
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c217
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h124
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c370
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c81
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c222
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c234
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h59
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h22
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h5
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c244
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1695
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h32
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h17
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h309
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c849
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h79
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c (renamed from drivers/net/wireless/ath/ath6kl/htc_hif.c)150
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h66
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c725
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h18
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1013
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c771
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c660
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h19
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c219
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c834
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h386
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig31
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c223
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1493
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h102
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h60
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c135
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h41
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c215
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c93
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c285
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h243
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c338
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c668
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h134
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h321
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c359
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c97
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c13
-rw-r--r--drivers/net/wireless/ath/key.c8
-rw-r--r--drivers/net/wireless/ath/regd.c77
-rw-r--r--drivers/net/wireless/b43/b43.h20
-rw-r--r--drivers/net/wireless/b43/dma.c27
-rw-r--r--drivers/net/wireless/b43/leds.c16
-rw-r--r--drivers/net/wireless/b43/lo.c8
-rw-r--r--drivers/net/wireless/b43/main.c155
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_g.c34
-rw-r--r--drivers/net/wireless/b43/phy_lp.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c4112
-rw-r--r--drivers/net/wireless/b43/phy_n.h14
-rw-r--r--drivers/net/wireless/b43/pio.c22
-rw-r--r--drivers/net/wireless/b43/radio_2056.c25
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c183
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h31
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h20
-rw-r--r--drivers/net/wireless/b43legacy/dma.c81
-rw-r--r--drivers/net/wireless/b43legacy/dma.h5
-rw-r--r--drivers/net/wireless/b43legacy/leds.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c150
-rw-r--r--drivers/net/wireless/b43legacy/radio.c20
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c153
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c218
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h140
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c711
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1292
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c607
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h136
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h41
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c1251
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h224
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c118
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c469
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c328
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c1408
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c101
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c122
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c271
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c503
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c218
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h30
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h12
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c15
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h12
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c25
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3976
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c986
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2743
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h607
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6515
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2402
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1301
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5867
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3246
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1314
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c550
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c817
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c659
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c436
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c103
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c365
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1685
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h111
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c403
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c252
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c1601
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c (renamed from drivers/net/wireless/iwlwifi/iwl-sv-open.c)265
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c140
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c)418
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-wifi.h (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.h)21
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c35
-rw-r--r--drivers/net/wireless/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/libertas/ethtool.c7
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c24
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c22
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c323
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h1
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c38
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h5
-rw-r--r--drivers/net/wireless/mwifiex/init.c46
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/mwifiex/join.c106
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c27
-rw-r--r--drivers/net/wireless/mwifiex/scan.c28
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c43
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c23
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c76
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/mwl8k.c162
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c27
-rw-r--r--drivers/net/wireless/orinoco/scan.c16
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c12
-rw-r--r--drivers/net/wireless/p54/p54usb.c13
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c335
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c5
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/rayctl.h4
-rw-r--r--drivers/net/wireless/rndis_wlan.c121
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c13
-rw-r--r--drivers/net/wireless/rtlwifi/base.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c42
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c53
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c55
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h14
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/wl12xx/acx.c172
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c15
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c371
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h50
-rw-r--r--drivers/net/wireless/wl12xx/conf.h4
-rw-r--r--drivers/net/wireless/wl12xx/debug.h101
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c157
-rw-r--r--drivers/net/wireless/wl12xx/event.c216
-rw-r--r--drivers/net/wireless/wl12xx/event.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c491
-rw-r--r--drivers/net/wireless/wl12xx/init.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.c12
-rw-r--r--drivers/net/wireless/wl12xx/io.h23
-rw-r--r--drivers/net/wireless/wl12xx/main.c2058
-rw-r--r--drivers/net/wireless/wl12xx/ps.c62
-rw-r--r--drivers/net/wireless/wl12xx/ps.h9
-rw-r--r--drivers/net/wireless/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.c120
-rw-r--r--drivers/net/wireless/wl12xx/scan.h8
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c259
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c543
-rw-r--r--drivers/net/wireless/wl12xx/spi.c215
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c77
-rw-r--r--drivers/net/wireless/wl12xx/tx.c382
-rw-r--r--drivers/net/wireless/wl12xx/tx.h11
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h386
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c25
-rw-r--r--drivers/net/wireless/zd1201.c13
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c12
-rw-r--r--drivers/net/xen-netback/xenbus.c9
-rw-r--r--drivers/net/xen-netfront.c21
-rw-r--r--drivers/nfc/pn533.c189
-rw-r--r--drivers/of/Kconfig9
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c156
-rw-r--r--drivers/of/fdt.c6
-rw-r--r--drivers/of/gpio.c45
-rw-r--r--drivers/of/irq.c15
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/of/selftest.c139
-rw-r--r--drivers/oprofile/nmi_timer_int.c173
-rw-r--r--drivers/oprofile/oprof.c21
-rw-r--r--drivers/oprofile/oprof.h10
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofilefs.c11
-rw-r--r--drivers/oprofile/timer_int.c29
-rw-r--r--drivers/parisc/Kconfig7
-rw-r--r--drivers/parport/parport_ax88796.c13
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_sunbpp.c13
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/ats.c91
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c11
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c1
-rw-r--r--drivers/pci/ioapic.c15
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/msi.c121
-rw-r--r--drivers/pci/pci-acpi.c13
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/pci/pcie/aspm.c58
-rw-r--r--drivers/pci/xen-pcifront.c11
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c16
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c9
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c6
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c5
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/core.c143
-rw-r--r--drivers/pinctrl/core.h13
-rw-r--r--drivers/pinctrl/pinconf.c326
-rw-r--r--drivers/pinctrl/pinconf.h36
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c (renamed from drivers/gpio/gpio-u300.c)21
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c (renamed from drivers/pinctrl/pinmux-sirf.c)9
-rw-r--r--drivers/pinctrl/pinctrl-u300.c (renamed from drivers/pinctrl/pinmux-u300.c)47
-rw-r--r--drivers/pinctrl/pinmux.c265
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c4
-rw-r--r--drivers/platform/x86/ibm_rtl.c34
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c19
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/88pm8607.c2
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c6
-rw-r--r--drivers/regulator/ab3100.c2
-rw-r--r--drivers/regulator/ab8500.c2
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/bq24022.c2
-rw-r--r--drivers/regulator/core.c180
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/da9052-regulator.c606
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fixed.c85
-rw-r--r--drivers/regulator/gpio-regulator.c2
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max8649.c157
-rw-r--r--drivers/regulator/max8660.c2
-rw-r--r--drivers/regulator/max8925-regulator.c35
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/regulator/max8998.c2
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c47
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c57
-rw-r--r--drivers/regulator/mc13xxx.h26
-rw-r--r--drivers/regulator/of_regulator.c87
-rw-r--r--drivers/regulator/pcap-regulator.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c3
-rw-r--r--drivers/regulator/tps65023-regulator.c89
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/tps6586x-regulator.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c39
-rw-r--r--drivers/regulator/tps65912-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c48
-rw-r--r--drivers/regulator/userspace-consumer.c13
-rw-r--r--drivers/regulator/virtual.c12
-rw-r--r--drivers/regulator/wm831x-dcdc.c18
-rw-r--r--drivers/regulator/wm831x-isink.c7
-rw-r--r--drivers/regulator/wm831x-ldo.c18
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/regulator/wm8994-regulator.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c12
-rw-r--r--drivers/rtc/rtc-ab8500.c136
-rw-r--r--drivers/rtc/rtc-at91rm9200.c101
-rw-r--r--drivers/rtc/rtc-bfin.c13
-rw-r--r--drivers/rtc/rtc-bq4802.c13
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-dm355evm.c12
-rw-r--r--drivers/rtc/rtc-ds1286.c13
-rw-r--r--drivers/rtc/rtc-ds1511.c15
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c13
-rw-r--r--drivers/rtc/rtc-jz4740.c14
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t80.c9
-rw-r--r--drivers/rtc/rtc-m41t93.c1
-rw-r--r--drivers/rtc/rtc-m41t94.c1
-rw-r--r--drivers/rtc/rtc-m48t35.c13
-rw-r--r--drivers/rtc/rtc-m48t59.c13
-rw-r--r--drivers/rtc/rtc-m48t86.c13
-rw-r--r--drivers/rtc/rtc-max6902.c1
-rw-r--r--drivers/rtc/rtc-max8925.c12
-rw-r--r--drivers/rtc/rtc-max8998.c12
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c12
-rw-r--r--drivers/rtc/rtc-mrst.c13
-rw-r--r--drivers/rtc/rtc-mxc.c123
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/rtc/rtc-pcf50633.c12
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c12
-rw-r--r--drivers/rtc/rtc-puv3.c22
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-s3c.c39
-rw-r--r--drivers/rtc/rtc-sa1100.c313
-rw-r--r--drivers/rtc/rtc-spear.c12
-rw-r--r--drivers/rtc/rtc-stk17ta8.c13
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c13
-rw-r--r--drivers/rtc/rtc-twl.c10
-rw-r--r--drivers/rtc/rtc-v3020.c13
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/rtc/rtc-vt8500.c12
-rw-r--r--drivers/rtc/rtc-wm831x.c36
-rw-r--r--drivers/rtc/rtc-wm8350.c12
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/block/dasd_3990_erp.c4
-rw-r--r--drivers/s390/block/dasd_alias.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c411
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/sclp_config.c8
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c32
-rw-r--r--drivers/s390/kvm/kvm_virtio.c6
-rw-r--r--drivers/s390/net/netiucv.c217
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c19
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/aacraid/commctrl.c1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c17
-rw-r--r--drivers/scsi/bfa/bfa_defs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h437
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c19
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c14
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c436
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c432
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c172
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c338
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c704
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c6
-rw-r--r--drivers/scsi/mac_esp.c3
-rw-r--r--drivers/scsi/mac_scsi.c3
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h28
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h49
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h67
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h9
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c205
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h26
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c170
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c9
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c314
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c98
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c655
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c255
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c449
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h56
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1221
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_pm.c27
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sg.c7
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/sh/intc/core.c37
-rw-r--r--drivers/sh/intc/internals.h7
-rw-r--r--drivers/sh/intc/userimask.c16
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c51
-rw-r--r--drivers/spi/spi-pl022.c143
-rw-r--r--drivers/spi/spi-s3c64xx.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c13
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/ssb/pci.c23
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig110
-rw-r--r--drivers/staging/android/Makefile9
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/android_pmem.h93
-rw-r--r--drivers/staging/android/ashmem.c752
-rw-r--r--drivers/staging/android/ashmem.h48
-rw-r--r--drivers/staging/android/binder.c3600
-rw-r--r--drivers/staging/android/binder.h330
-rw-r--r--drivers/staging/android/logger.c616
-rw-r--r--drivers/staging/android/logger.h49
-rw-r--r--drivers/staging/android/lowmemorykiller.c219
-rw-r--r--drivers/staging/android/pmem.c1345
-rw-r--r--drivers/staging/android/ram_console.c443
-rw-r--r--drivers/staging/android/ram_console.h (renamed from arch/arm/mach-tegra/include/mach/vmalloc.h)18
-rw-r--r--drivers/staging/android/switch/Kconfig11
-rw-r--r--drivers/staging/android/switch/Makefile4
-rw-r--r--drivers/staging/android/switch/switch.h53
-rw-r--r--drivers/staging/android/switch/switch_class.c174
-rw-r--r--drivers/staging/android/switch/switch_gpio.c172
-rw-r--r--drivers/staging/android/timed_gpio.c176
-rw-r--r--drivers/staging/android/timed_gpio.h (renamed from arch/arm/mach-msm/include/mach/vmalloc.h)25
-rw-r--r--drivers/staging/android/timed_output.c123
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/asus_oled/asus_oled.c4
-rw-r--r--drivers/staging/bcm/Bcmchar.c376
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c323
-rw-r--r--drivers/staging/bcm/InterfaceDld.c12
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c33
-rw-r--r--drivers/staging/bcm/InterfaceInit.c12
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c24
-rw-r--r--drivers/staging/bcm/Misc.c32
-rw-r--r--drivers/staging/bcm/hostmibs.c178
-rw-r--r--drivers/staging/bcm/led_control.c1131
-rw-r--r--drivers/staging/bcm/nvm.c95
-rw-r--r--drivers/staging/bcm/target_params.h4
-rw-r--r--drivers/staging/comedi/comedi_fops.c158
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c137
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7296.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7432.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c32
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c23
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c20
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c42
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c26
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c18
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c23
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c50
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c30
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c19
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c73
-rw-r--r--drivers/staging/comedi/drivers/das08.c6
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c8
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c37
-rw-r--r--drivers/staging/comedi/drivers/das1800.c65
-rw-r--r--drivers/staging/comedi/drivers/das6402.c23
-rw-r--r--drivers/staging/comedi/drivers/das800.c43
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c38
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c81
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c6
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c29
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c108
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c26
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c40
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c7
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c23
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h262
-rw-r--r--drivers/staging/cxt1e1/comet.h22
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c24
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h24
-rw-r--r--drivers/staging/cxt1e1/libsbew.h34
-rw-r--r--drivers/staging/cxt1e1/musycc.c49
-rw-r--r--drivers/staging/cxt1e1/musycc.h31
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c10
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h21
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h42
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h33
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h14
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c70
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h16
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h14
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h27
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h20
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h20
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h55
-rw-r--r--drivers/staging/et131x/et131x.c370
-rw-r--r--drivers/staging/frontier/alphatrack.c28
-rw-r--r--drivers/staging/frontier/tranzport.c29
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c22
-rw-r--r--drivers/staging/gma500/Kconfig2
-rw-r--r--drivers/staging/gma500/accel_2d.c2
-rw-r--r--drivers/staging/gma500/cdv_intel_display.c4
-rw-r--r--drivers/staging/gma500/framebuffer.c41
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/staging/gma500/mrst_crtc.c4
-rw-r--r--drivers/staging/gma500/power.c2
-rw-r--r--drivers/staging/gma500/psb_drv.c23
-rw-r--r--drivers/staging/gma500/psb_intel_display.c4
-rw-r--r--drivers/staging/hv/Kconfig12
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO4
-rw-r--r--drivers/staging/hv/storvsc_drv.c310
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c8
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h26
-rw-r--r--drivers/staging/iio/Documentation/ring.txt10
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio10
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c49
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c37
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c48
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c45
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c9
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c40
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c45
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c9
-rw-r--r--drivers/staging/iio/accel/kxsd9.c23
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h12
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c48
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c61
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c37
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c97
-rw-r--r--drivers/staging/iio/adc/ad7280a.c41
-rw-r--r--drivers/staging/iio/adc/ad7291.c53
-rw-r--r--drivers/staging/iio/adc/ad7298.h5
-rw-r--r--drivers/staging/iio/adc/ad7298_core.c80
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c45
-rw-r--r--drivers/staging/iio/adc/ad7476.h5
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c38
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c31
-rw-r--r--drivers/staging/iio/adc/ad7606.h1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c10
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c1
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c30
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c16
-rw-r--r--drivers/staging/iio/adc/ad7780.c21
-rw-r--r--drivers/staging/iio/adc/ad7793.c108
-rw-r--r--drivers/staging/iio/adc/ad7816.c16
-rw-r--r--drivers/staging/iio/adc/ad7887.h5
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c26
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c46
-rw-r--r--drivers/staging/iio/adc/ad799x.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c23
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c47
-rw-r--r--drivers/staging/iio/adc/adt7310.c17
-rw-r--r--drivers/staging/iio/adc/adt7410.c15
-rw-r--r--drivers/staging/iio/adc/max1363.h14
-rw-r--r--drivers/staging/iio/adc/max1363_core.c92
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c92
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c14
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c15
-rw-r--r--drivers/staging/iio/addac/adt7316.c1
-rw-r--r--drivers/staging/iio/buffer.h (renamed from drivers/staging/iio/buffer_generic.h)87
-rw-r--r--drivers/staging/iio/cdc/ad7150.c22
-rw-r--r--drivers/staging/iio/cdc/ad7152.c52
-rw-r--r--drivers/staging/iio/cdc/ad7746.c64
-rw-r--r--drivers/staging/iio/chrdev.h25
-rw-r--r--drivers/staging/iio/dac/Kconfig39
-rw-r--r--drivers/staging/iio/dac/Makefile3
-rw-r--r--drivers/staging/iio/dac/ad5064.c19
-rw-r--r--drivers/staging/iio/dac/ad5360.c37
-rw-r--r--drivers/staging/iio/dac/ad5380.c676
-rw-r--r--drivers/staging/iio/dac/ad5421.c555
-rw-r--r--drivers/staging/iio/dac/ad5421.h32
-rw-r--r--drivers/staging/iio/dac/ad5446.c201
-rw-r--r--drivers/staging/iio/dac/ad5446.h10
-rw-r--r--drivers/staging/iio/dac/ad5504.c145
-rw-r--r--drivers/staging/iio/dac/ad5504.h5
-rw-r--r--drivers/staging/iio/dac/ad5624r.h4
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c140
-rw-r--r--drivers/staging/iio/dac/ad5686.c18
-rw-r--r--drivers/staging/iio/dac/ad5764.c393
-rw-r--r--drivers/staging/iio/dac/ad5791.c28
-rw-r--r--drivers/staging/iio/dac/max517.c14
-rw-r--r--drivers/staging/iio/dds/ad5930.c14
-rw-r--r--drivers/staging/iio/dds/ad9832.c18
-rw-r--r--drivers/staging/iio/dds/ad9834.c24
-rw-r--r--drivers/staging/iio/dds/ad9850.c14
-rw-r--r--drivers/staging/iio/dds/ad9852.c14
-rw-r--r--drivers/staging/iio/dds/ad9910.c14
-rw-r--r--drivers/staging/iio/dds/ad9951.c14
-rw-r--r--drivers/staging/iio/events.h103
-rw-r--r--drivers/staging/iio/gyro/Kconfig6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c14
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c14
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c58
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c9
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h5
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c101
-rw-r--r--drivers/staging/iio/iio.h144
-rw-r--r--drivers/staging/iio/iio_core.h11
-rw-r--r--drivers/staging/iio/iio_core_trigger.h3
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c49
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c12
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c43
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c282
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c23
-rw-r--r--drivers/staging/iio/industrialio-buffer.c346
-rw-r--r--drivers/staging/iio/industrialio-core.c89
-rw-r--r--drivers/staging/iio/industrialio-trigger.c36
-rw-r--r--drivers/staging/iio/kfifo_buf.c72
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c21
-rw-r--r--drivers/staging/iio/light/tsl2563.c25
-rw-r--r--drivers/staging/iio/light/tsl2583.c31
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c18
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c19
-rw-r--r--drivers/staging/iio/meter/ade7753.c14
-rw-r--r--drivers/staging/iio/meter/ade7754.c14
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c50
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c10
-rw-r--r--drivers/staging/iio/meter/ade7759.c14
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c14
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c14
-rw-r--r--drivers/staging/iio/ring_sw.c151
-rw-r--r--drivers/staging/iio/ring_sw.h2
-rw-r--r--drivers/staging/iio/sysfs.h43
-rw-r--r--drivers/staging/iio/trigger.h2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c1
-rw-r--r--drivers/staging/iio/types.h49
-rw-r--r--drivers/staging/intel_sst/Kconfig19
-rw-r--r--drivers/staging/intel_sst/Makefile7
-rw-r--r--drivers/staging/intel_sst/TODO13
-rw-r--r--drivers/staging/intel_sst/intel_sst.c649
-rw-r--r--drivers/staging/intel_sst/intel_sst.h162
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c1460
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h623
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c564
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c496
-rw-r--r--drivers/staging/intel_sst/intel_sst_fw_ipc.h416
-rw-r--r--drivers/staging/intel_sst/intel_sst_ioctl.h440
-rw-r--r--drivers/staging/intel_sst/intel_sst_ipc.c774
-rw-r--r--drivers/staging/intel_sst/intel_sst_pvt.c313
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream.c583
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c1273
-rw-r--r--drivers/staging/intel_sst/intelmid.c1022
-rw-r--r--drivers/staging/intel_sst/intelmid.h209
-rw-r--r--drivers/staging/intel_sst/intelmid_adc_control.h193
-rw-r--r--drivers/staging/intel_sst/intelmid_ctrl.c921
-rw-r--r--drivers/staging/intel_sst/intelmid_msic_control.c1047
-rw-r--r--drivers/staging/intel_sst/intelmid_pvt.c173
-rw-r--r--drivers/staging/intel_sst/intelmid_snd_control.h123
-rw-r--r--drivers/staging/intel_sst/intelmid_v0_control.c866
-rw-r--r--drivers/staging/intel_sst/intelmid_v1_control.c978
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c1156
-rw-r--r--drivers/staging/keucr/smilmain.c4
-rw-r--r--drivers/staging/keucr/usb.c24
-rw-r--r--drivers/staging/line6/Makefile3
-rw-r--r--drivers/staging/line6/capture.c44
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c74
-rw-r--r--drivers/staging/line6/driver.h10
-rw-r--r--drivers/staging/line6/midi.c37
-rw-r--r--drivers/staging/line6/midi.h4
-rw-r--r--drivers/staging/line6/pcm.c115
-rw-r--r--drivers/staging/line6/pcm.h8
-rw-r--r--drivers/staging/line6/playback.c53
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/podhd.c154
-rw-r--r--drivers/staging/line6/podhd.h30
-rw-r--r--drivers/staging/line6/revision.h2
-rw-r--r--drivers/staging/line6/toneport.c6
-rw-r--r--drivers/staging/line6/usbdefs.h91
-rw-r--r--drivers/staging/line6/variax.c6
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c15
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c21
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c24
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c25
-rw-r--r--drivers/staging/media/lirc/lirc_ttusbir.c22
-rw-r--r--drivers/staging/mei/init.c49
-rw-r--r--drivers/staging/mei/interface.c8
-rw-r--r--drivers/staging/mei/interface.h11
-rw-r--r--drivers/staging/mei/interrupt.c322
-rw-r--r--drivers/staging/mei/iorw.c77
-rw-r--r--drivers/staging/mei/main.c576
-rw-r--r--drivers/staging/mei/mei.txt226
-rw-r--r--drivers/staging/mei/mei_dev.h13
-rw-r--r--drivers/staging/mei/wd.c32
-rw-r--r--drivers/staging/nvec/nvec.c30
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c18
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c10
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c10
-rw-r--r--drivers/staging/omapdrm/Kconfig25
-rw-r--r--drivers/staging/omapdrm/Makefile21
-rw-r--r--drivers/staging/omapdrm/TODO38
-rw-r--r--drivers/staging/omapdrm/omap_connector.c371
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c326
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c42
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h187
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c830
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h135
-rw-r--r--drivers/staging/omapdrm/omap_drm.h123
-rw-r--r--drivers/staging/omapdrm/omap_drv.c821
-rw-r--r--drivers/staging/omapdrm/omap_drv.h135
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c171
-rw-r--r--drivers/staging/omapdrm/omap_fb.c243
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c372
-rw-r--r--drivers/staging/omapdrm/omap_gem.c1231
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/staging/omapdrm/omap_priv.h47
-rw-r--r--drivers/staging/omapdrm/tcm-sita.c703
-rw-r--r--drivers/staging/omapdrm/tcm-sita.h95
-rw-r--r--drivers/staging/omapdrm/tcm.h326
-rw-r--r--drivers/staging/phison/phison.c2
-rw-r--r--drivers/staging/pohmelfs/dir.c11
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--drivers/staging/pohmelfs/netfs.h2
-rw-r--r--drivers/staging/rtl8192e/Kconfig50
-rw-r--r--drivers/staging/rtl8192e/Makefile46
-rw-r--r--drivers/staging/rtl8192e/dot11d.c4
-rw-r--r--drivers/staging/rtl8192e/dot11d.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h (renamed from drivers/staging/rtl8192e/r8190P_def.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c (renamed from drivers/staging/rtl8192e/r8192E_dev.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h (renamed from drivers/staging/rtl8192e/r8192E_dev.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c (renamed from drivers/staging/rtl8192e/r8192E_firmware.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h (renamed from drivers/staging/rtl8192e/r8192E_firmware.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h (renamed from drivers/staging/rtl8192e/r8192E_hw.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c (renamed from drivers/staging/rtl8192e/r8192E_hwimg.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h (renamed from drivers/staging/rtl8192e/r8192E_hwimg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c (renamed from drivers/staging/rtl8192e/r8192E_phy.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h (renamed from drivers/staging/rtl8192e/r8192E_phy.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h (renamed from drivers/staging/rtl8192e/r8192E_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h (renamed from drivers/staging/rtl8192e/r819xE_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c (renamed from drivers/staging/rtl8192e/rtl_cam.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h (renamed from drivers/staging/rtl8192e/rtl_cam.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c (renamed from drivers/staging/rtl8192e/rtl_core.c)68
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h (renamed from drivers/staging/rtl8192e/rtl_core.h)53
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h (renamed from drivers/staging/rtl8192e/rtl_crypto.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_debug.c (renamed from drivers/staging/rtl8192e/rtl_debug.c)79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c (renamed from drivers/staging/rtl8192e/rtl_dm.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h (renamed from drivers/staging/rtl8192e/rtl_dm.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c (renamed from drivers/staging/rtl8192e/rtl_eeprom.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h (renamed from drivers/staging/rtl8192e/rtl_eeprom.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c (renamed from drivers/staging/rtl8192e/rtl_ethtool.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c (renamed from drivers/staging/rtl8192e/rtl_pci.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h (renamed from drivers/staging/rtl8192e/rtl_pci.h)1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c (renamed from drivers/staging/rtl8192e/rtl_pm.c)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h (renamed from drivers/staging/rtl8192e/rtl_pm.h)4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c (renamed from drivers/staging/rtl8192e/rtl_ps.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h (renamed from drivers/staging/rtl8192e/rtl_ps.h)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c (renamed from drivers/staging/rtl8192e/rtl_wx.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h (renamed from drivers/staging/rtl8192e/rtl_wx.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl_debug.h299
-rw-r--r--drivers/staging/rtl8192e/rtllib.h113
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c80
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.h64
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c26
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c25
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h86
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c96
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c109
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c244
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts5139/rts51x.c32
-rw-r--r--drivers/staging/rts5139/rts51x.h1
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c3
-rw-r--r--drivers/staging/sep/sep_driver.c4
-rw-r--r--drivers/staging/serial/68360serial.c4
-rw-r--r--drivers/staging/sm7xx/smtcfb.c6
-rw-r--r--drivers/staging/speakup/kobjects.c3
-rw-r--r--drivers/staging/speakup/main.c5
-rw-r--r--drivers/staging/spectra/Kconfig41
-rw-r--r--drivers/staging/spectra/Makefile11
-rw-r--r--drivers/staging/spectra/README29
-rw-r--r--drivers/staging/spectra/ffsdefs.h58
-rw-r--r--drivers/staging/spectra/ffsport.c834
-rw-r--r--drivers/staging/spectra/ffsport.h85
-rw-r--r--drivers/staging/spectra/flash.c4305
-rw-r--r--drivers/staging/spectra/flash.h198
-rw-r--r--drivers/staging/spectra/lld.c339
-rw-r--r--drivers/staging/spectra/lld.h111
-rw-r--r--drivers/staging/spectra/lld_cdma.c910
-rw-r--r--drivers/staging/spectra/lld_cdma.h123
-rw-r--r--drivers/staging/spectra/lld_emu.c776
-rw-r--r--drivers/staging/spectra/lld_emu.h51
-rw-r--r--drivers/staging/spectra/lld_mtd.c683
-rw-r--r--drivers/staging/spectra/lld_mtd.h51
-rw-r--r--drivers/staging/spectra/lld_nand.c2619
-rw-r--r--drivers/staging/spectra/lld_nand.h131
-rw-r--r--drivers/staging/spectra/nand_regs.h619
-rw-r--r--drivers/staging/spectra/spectraswconfig.h82
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/stub_dev.c5
-rw-r--r--drivers/staging/usbip/stub_rx.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c61
-rw-r--r--drivers/staging/usbip/usbip_common.h17
-rw-r--r--drivers/staging/usbip/vhci_rx.c12
-rw-r--r--drivers/staging/vme/TODO67
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c31
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c36
-rw-r--r--drivers/staging/vme/devices/Kconfig13
-rw-r--r--drivers/staging/vme/devices/Makefile3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h249
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c524
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c232
-rw-r--r--drivers/staging/vme/devices/vme_user.h10
-rw-r--r--drivers/staging/vme/vme.c69
-rw-r--r--drivers/staging/vme/vme.h38
-rw-r--r--drivers/staging/vme/vme_api.txt61
-rw-r--r--drivers/staging/vme/vme_bridge.h38
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/ioctl.c8
-rw-r--r--drivers/staging/vt6655/iwctl.c12
-rw-r--r--drivers/staging/vt6655/iwctl.h5
-rw-r--r--drivers/staging/vt6656/80211mgr.c35
-rw-r--r--drivers/staging/vt6656/baseband.c61
-rw-r--r--drivers/staging/vt6656/bssdb.c100
-rw-r--r--drivers/staging/vt6656/card.c14
-rw-r--r--drivers/staging/vt6656/card.h4
-rw-r--r--drivers/staging/vt6656/int.c5
-rw-r--r--drivers/staging/vt6656/int.h2
-rw-r--r--drivers/staging/vt6656/ioctl.c8
-rw-r--r--drivers/staging/vt6656/iwctl.c12
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/mac.c4
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c26
-rw-r--r--drivers/staging/winbond/wbusb.c13
-rw-r--r--drivers/staging/wlags49_h2/debug.h2
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h2
-rw-r--r--drivers/staging/wlags49_h2/hcf.c6
-rw-r--r--drivers/staging/wlags49_h2/hcf.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfcfg.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfdef.h6
-rw-r--r--drivers/staging/wlags49_h2/mdd.h6
-rw-r--r--drivers/staging/wlags49_h2/mmd.c2
-rw-r--r--drivers/staging/wlags49_h2/mmd.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_if.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c17
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h4
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c14
-rw-r--r--drivers/staging/xgifb/Makefile2
-rw-r--r--drivers/staging/xgifb/XGI_main.h13
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c158
-rw-r--r--drivers/staging/xgifb/XGIfb.h3
-rw-r--r--drivers/staging/xgifb/vb_ext.c444
-rw-r--r--drivers/staging/xgifb/vb_ext.h9
-rw-r--r--drivers/staging/xgifb/vb_init.c276
-rw-r--r--drivers/staging/xgifb/vb_setmode.c1003
-rw-r--r--drivers/staging/xgifb/vb_setmode.h52
-rw-r--r--drivers/staging/xgifb/vb_struct.h4
-rw-r--r--drivers/staging/xgifb/vb_table.h27
-rw-r--r--drivers/staging/xgifb/vgatypes.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c6
-rw-r--r--drivers/staging/zram/zram_drv.c3
-rw-r--r--drivers/staging/zram/zram_sysfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/pty.c26
-rw-r--r--drivers/tty/serial/8250.c98
-rw-r--r--drivers/tty/serial/8250.h26
-rw-r--r--drivers/tty/serial/8250_dw.c12
-rw-r--r--drivers/tty/serial/8250_fsl.c63
-rw-r--r--drivers/tty/serial/8250_pci.c47
-rw-r--r--drivers/tty/serial/Kconfig83
-rw-r--r--drivers/tty/serial/Makefile7
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/apbuart.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c12
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c22
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h5
-rw-r--r--drivers/tty/serial/bfin_uart.c83
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/imx.c148
-rw-r--r--drivers/tty/serial/m32r_sio.c7
-rw-r--r--drivers/tty/serial/max3100.c1
-rw-r--r--drivers/tty/serial/max3107-aava.c1
-rw-r--r--drivers/tty/serial/max3107.c1
-rw-r--r--drivers/tty/serial/mfd.c18
-rw-r--r--drivers/tty/serial/mrst_max3110.c1
-rw-r--r--drivers/tty/serial/msm_serial_hs.c23
-rw-r--r--drivers/tty/serial/mxs-auart.c13
-rw-r--r--drivers/tty/serial/omap-serial.c430
-rw-r--r--drivers/tty/serial/pch_uart.c160
-rw-r--r--drivers/tty/serial/pmac_zilog.c423
-rw-r--r--drivers/tty/serial/pmac_zilog.h19
-rw-r--r--drivers/tty/serial/s3c2410.c115
-rw-r--r--drivers/tty/serial/s3c2412.c149
-rw-r--r--drivers/tty/serial/s3c2440.c178
-rw-r--r--drivers/tty/serial/s3c6400.c149
-rw-r--r--drivers/tty/serial/s5pv210.c158
-rw-r--r--drivers/tty/serial/samsung.c639
-rw-r--r--drivers/tty/serial/samsung.h32
-rw-r--r--drivers/tty/serial/sc26xx.c14
-rw-r--r--drivers/tty/serial/serial_core.c325
-rw-r--r--drivers/tty/serial/serial_cs.c8
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c783
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h185
-rw-r--r--drivers/tty/serial/timbuart.c15
-rw-r--r--drivers/tty/serial/ucc_uart.c3
-rw-r--r--drivers/tty/serial/vr41xx_siu.c13
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_io.c311
-rw-r--r--drivers/tty/tty_ldisc.c22
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/uio/uio_pdrv.c12
-rw-r--r--drivers/uio/uio_pdrv_genirq.c13
-rw-r--r--drivers/uio/uio_pruss.c14
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/atm/speedtch.c17
-rw-r--r--drivers/usb/atm/ueagle-atm.c31
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c15
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c1
-rw-r--r--drivers/usb/class/cdc-acm.c348
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c19
-rw-r--r--drivers/usb/class/usblp.c15
-rw-r--r--drivers/usb/class/usbtmc.c17
-rw-r--r--drivers/usb/core/devio.c189
-rw-r--r--drivers/usb/core/driver.c36
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hcd.c31
-rw-r--r--drivers/usb/core/hub.c89
-rw-r--r--drivers/usb/core/inode.c31
-rw-r--r--drivers/usb/core/quirks.c5
-rw-r--r--drivers/usb/core/sysfs.c4
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h14
-rw-r--r--drivers/usb/dwc3/Kconfig5
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c211
-rw-r--r--drivers/usb/dwc3/core.h62
-rw-r--r--drivers/usb/dwc3/debugfs.c99
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c43
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c51
-rw-r--r--drivers/usb/dwc3/ep0.c160
-rw-r--r--drivers/usb/dwc3/gadget.c440
-rw-r--r--drivers/usb/dwc3/gadget.h29
-rw-r--r--drivers/usb/dwc3/host.c102
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/gadget/Kconfig29
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/amd5536udc.c4
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c36
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h2
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/dbgp.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c15
-rw-r--r--drivers/usb/gadget/epautoconf.c9
-rw-r--r--drivers/usb/gadget/f_fs.c33
-rw-r--r--drivers/usb/gadget/f_mass_storage.c61
-rw-r--r--drivers/usb/gadget/f_phonet.c11
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/file_storage.c64
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c77
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/goku_udc.c3
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c32
-rw-r--r--drivers/usb/gadget/langwell_udc.c2
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/mv_udc.h7
-rw-r--r--drivers/usb/gadget/mv_udc_core.c344
-rw-r--r--drivers/usb/gadget/net2272.c4
-rw-r--r--drivers/usb/gadget/net2280.c4
-rw-r--r--drivers/usb/gadget/omap_udc.c3
-rw-r--r--drivers/usb/gadget/pch_udc.c4
-rw-r--r--drivers/usb/gadget/printer.c6
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c19
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c137
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c8
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h2
-rw-r--r--drivers/usb/gadget/udc-core.c26
-rw-r--r--drivers/usb/gadget/usbstring.c73
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c69
-rw-r--r--drivers/usb/host/ehci-mv.c391
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c19
-rw-r--r--drivers/usb/host/ehci-orion.c10
-rw-r--r--drivers/usb/host/ehci-ps3.c30
-rw-r--r--drivers/usb/host/ehci-pxa168.c2
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-s5p.c4
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/ehci-tegra.c71
-rw-r--r--drivers/usb/host/ehci-vt8500.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c12
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c12
-rw-r--r--drivers/usb/host/hwa-hc.c16
-rw-r--r--drivers/usb/host/imx21-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c74
-rw-r--r--drivers/usb/host/isp1760-if.c27
-rw-r--r--drivers/usb/host/ohci-at91.c12
-rw-r--r--drivers/usb/host/ohci-au1xxx.c5
-rw-r--r--drivers/usb/host/ohci-dbg.c18
-rw-r--r--drivers/usb/host/ohci-ep93xx.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c274
-rw-r--r--drivers/usb/host/ohci-hcd.c33
-rw-r--r--drivers/usb/host/ohci-hub.c7
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-omap3.c18
-rw-r--r--drivers/usb/host/ohci-pci.c5
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-q.c8
-rw-r--r--drivers/usb/host/ohci-s3c2410.c55
-rw-r--r--drivers/usb/host/ohci-sh.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/ohci-xls.c2
-rw-r--r--drivers/usb/host/ohci.h14
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c19
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-hub.c18
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-ring.c113
-rw-r--r--drivers/usb/host/xhci.c36
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/image/microtek.c14
-rw-r--r--drivers/usb/misc/adutux.c35
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c22
-rw-r--r--drivers/usb/misc/cytherm.c26
-rw-r--r--drivers/usb/misc/emi26.c13
-rw-r--r--drivers/usb/misc/emi62.c17
-rw-r--r--drivers/usb/misc/idmouse.c24
-rw-r--r--drivers/usb/misc/iowarrior.c15
-rw-r--r--drivers/usb/misc/isight_firmware.c19
-rw-r--r--drivers/usb/misc/ldusb.c27
-rw-r--r--drivers/usb/misc/legousbtower.c48
-rw-r--r--drivers/usb/misc/rio500.c28
-rw-r--r--drivers/usb/misc/trancevibrator.c21
-rw-r--r--drivers/usb/misc/usblcd.c20
-rw-r--r--drivers/usb/misc/usbled.c20
-rw-r--r--drivers/usb/misc/usbsevseg.c18
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/misc/yurex.c22
-rw-r--r--drivers/usb/musb/Kconfig61
-rw-r--r--drivers/usb/musb/Makefile26
-rw-r--r--drivers/usb/musb/cppi_dma.c2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musb_debug.h4
-rw-r--r--drivers/usb/musb/musb_debugfs.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c1
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/musb/omap2430.c61
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/ux500_dma.c39
-rw-r--r--drivers/usb/otg/Kconfig32
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/fsl_otg.c15
-rw-r--r--drivers/usb/otg/mv_otg.c957
-rw-r--r--drivers/usb/otg/mv_otg.h165
-rw-r--r--drivers/usb/renesas_usbhs/common.c52
-rw-r--r--drivers/usb/renesas_usbhs/common.h9
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c9
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h3
-rw-r--r--drivers/usb/renesas_usbhs/mod.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c238
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c953
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c31
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ChangeLog.history730
-rw-r--r--drivers/usb/serial/belkin_sa.c43
-rw-r--r--drivers/usb/serial/ch341.c3
-rw-r--r--drivers/usb/serial/cp210x.c59
-rw-r--r--drivers/usb/serial/cyberjack.c33
-rw-r--r--drivers/usb/serial/cypress_m8.c29
-rw-r--r--drivers/usb/serial/digi_acceleport.c227
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c9
-rw-r--r--drivers/usb/serial/generic.c83
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_ti.c28
-rw-r--r--drivers/usb/serial/ipaq.c34
-rw-r--r--drivers/usb/serial/ir-usb.c32
-rw-r--r--drivers/usb/serial/iuu_phoenix.c3
-rw-r--r--drivers/usb/serial/keyspan.c90
-rw-r--r--drivers/usb/serial/keyspan_pda.c66
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c46
-rw-r--r--drivers/usb/serial/mos7720.c18
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/omninet.c51
-rw-r--r--drivers/usb/serial/opticon.c1
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/oti6858.c23
-rw-r--r--drivers/usb/serial/pl2303.c17
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/symbolserial.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c13
-rw-r--r--drivers/usb/serial/usb-serial.c98
-rw-r--r--drivers/usb/serial/usb_debug.c13
-rw-r--r--drivers/usb/serial/whiteheat.c58
-rw-r--r--drivers/usb/storage/alauda.c15
-rw-r--r--drivers/usb/storage/cypress_atacb.c15
-rw-r--r--drivers/usb/storage/datafab.c15
-rw-r--r--drivers/usb/storage/ene_ub6250.c25
-rw-r--r--drivers/usb/storage/freecom.c15
-rw-r--r--drivers/usb/storage/isd200.c17
-rw-r--r--drivers/usb/storage/jumpshot.c15
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c15
-rw-r--r--drivers/usb/storage/realtek_cr.c27
-rw-r--r--drivers/usb/storage/sddr09.c15
-rw-r--r--drivers/usb/storage/sddr55.c15
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/uas.c13
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c14
-rw-r--r--drivers/usb/usb-skeleton.c61
-rw-r--r--drivers/usb/wusbcore/Kconfig1
-rw-r--r--drivers/usb/wusbcore/cbaf.c12
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/uwb/est.c2
-rw-r--r--drivers/uwb/hwa-rc.c12
-rw-r--r--drivers/uwb/i1480/dfu/usb.c22
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/backlight/88pm860x_bl.c12
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c12
-rw-r--r--drivers/video/backlight/adx_bl.c182
-rw-r--r--drivers/video/backlight/backlight.c6
-rw-r--r--drivers/video/backlight/da903x_bl.c12
-rw-r--r--drivers/video/backlight/ep93xx_bl.c13
-rw-r--r--drivers/video/backlight/generic_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_lcd.c13
-rw-r--r--drivers/video/backlight/lcd.c26
-rw-r--r--drivers/video/backlight/ld9040.c71
-rw-r--r--drivers/video/backlight/max8925_bl.c12
-rw-r--r--drivers/video/backlight/omap1_bl.c13
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c12
-rw-r--r--drivers/video/backlight/platform_lcd.c22
-rw-r--r--drivers/video/backlight/pwm_bl.c33
-rw-r--r--drivers/video/backlight/wm831x_bl.c12
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/mxsfb.c8
-rw-r--r--drivers/video/offb.c71
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/smscufx.c19
-rw-r--r--drivers/video/udlfb.c19
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/video/xen-fbfront.c9
-rw-r--r--drivers/virtio/virtio_mmio.c6
-rw-r--r--drivers/virtio/virtio_pci.c8
-rw-r--r--drivers/w1/masters/ds2490.c21
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/watchdog/Kconfig13
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c17
-rw-r--r--drivers/watchdog/at91sam9_wdt.c22
-rw-r--r--drivers/watchdog/at91sam9_wdt.h6
-rw-r--r--drivers/watchdog/ath79_wdt.c6
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c13
-rw-r--r--drivers/watchdog/coh901327_wdt.c6
-rw-r--r--drivers/watchdog/cpu5wdt.c3
-rw-r--r--drivers/watchdog/cpwd.c13
-rw-r--r--drivers/watchdog/davinci_wdt.c13
-rw-r--r--drivers/watchdog/dw_wdt.c12
-rw-r--r--drivers/watchdog/eurotechwdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/ibmasr.c4
-rw-r--r--drivers/watchdog/indydog.c4
-rw-r--r--drivers/watchdog/iop_wdt.c5
-rw-r--r--drivers/watchdog/ixp2000_wdt.c3
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c13
-rw-r--r--drivers/watchdog/ks8695_wdt.c3
-rw-r--r--drivers/watchdog/lantiq_wdt.c3
-rw-r--r--drivers/watchdog/max63xx_wdt.c13
-rw-r--r--drivers/watchdog/mtx-1_wdt.c13
-rw-r--r--drivers/watchdog/nuc900_wdt.c13
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c13
-rw-r--r--drivers/watchdog/omap_wdt.c17
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/watchdog/pcwd_usb.c35
-rw-r--r--drivers/watchdog/pnx4008_wdt.c13
-rw-r--r--drivers/watchdog/rc32434_wdt.c13
-rw-r--r--drivers/watchdog/rdc321x_wdt.c13
-rw-r--r--drivers/watchdog/riowd.c13
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c13
-rw-r--r--drivers/watchdog/ts72xx_wdt.c12
-rw-r--r--drivers/watchdog/twl4030_wdt.c12
-rw-r--r--drivers/watchdog/via_wdt.c267
-rw-r--r--drivers/watchdog/w83627hf_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c23
-rw-r--r--drivers/watchdog/wm8350_wdt.c12
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events.c77
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntalloc.c121
-rw-r--r--drivers/xen/gntdev.c34
-rw-r--r--drivers/xen/grant-table.c518
-rw-r--r--drivers/xen/privcmd.c (renamed from drivers/xen/xenfs/privcmd.c)41
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/swiotlb-xen.c6
-rw-r--r--drivers/xen/xen-balloon.c86
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c4
-rw-r--r--drivers/xen/xen-pciback/xenbus.c19
-rw-r--r--drivers/xen/xen-selfballoon.c76
-rw-r--r--drivers/xen/xenbus/Makefile2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c193
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h4
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c90
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c (renamed from drivers/xen/xenfs/xenbus.c)40
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h6
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c36
-rw-r--r--drivers/xen/xenfs/Makefile2
-rw-r--r--drivers/xen/xenfs/super.c6
-rw-r--r--drivers/xen/xenfs/xenfs.h2
-rw-r--r--drivers/zorro/zorro.ids2
-rw-r--r--firmware/README.AddingFirmware3
-rw-r--r--fs/9p/cache.c64
-rw-r--r--fs/9p/fid.c8
-rw-r--r--fs/9p/v9fs.c59
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_addr.c13
-rw-r--r--fs/9p/vfs_dentry.c12
-rw-r--r--fs/9p/vfs_dir.c13
-rw-r--r--fs/9p/vfs_file.c34
-rw-r--r--fs/9p/vfs_inode.c203
-rw-r--r--fs/9p/vfs_inode_dotl.c137
-rw-r--r--fs/9p/vfs_super.c14
-rw-r--r--fs/9p/xattr.c16
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/Makefile3
-rw-r--r--fs/adfs/super.c4
-rw-r--r--fs/affs/affs.h6
-rw-r--r--fs/affs/amigaffs.c6
-rw-r--r--fs/affs/namei.c8
-rw-r--r--fs/affs/super.c1
-rw-r--r--fs/afs/dir.c12
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/attr.c4
-rw-r--r--fs/autofs4/autofs_i.h2
-rw-r--r--fs/autofs4/dev-ioctl.c10
-rw-r--r--fs/autofs4/inode.c8
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/bad_inode.c6
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/inode.c1
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c6
-rw-r--r--fs/block_dev.c38
-rw-r--r--fs/btrfs/async-thread.c122
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-inode.c4
-rw-r--r--fs/btrfs/disk-io.c42
-rw-r--r--fs/btrfs/extent-tree.c171
-rw-r--r--fs/btrfs/extent_io.c51
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/free-space-cache.c4
-rw-r--r--fs/btrfs/inode.c204
-rw-r--r--fs/btrfs/ioctl.c30
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c13
-rw-r--r--fs/btrfs/super.c43
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/buffer.c50
-rw-r--r--fs/cachefiles/interface.c1
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/caps.c191
-rw-r--r--fs/ceph/dir.c65
-rw-r--r--fs/ceph/file.c23
-rw-r--r--fs/ceph/inode.c54
-rw-r--r--fs/ceph/ioctl.c4
-rw-r--r--fs/ceph/mds_client.c33
-rw-r--r--fs/ceph/mds_client.h2
-rw-r--r--fs/ceph/snap.c16
-rw-r--r--fs/ceph/super.c23
-rw-r--r--fs/ceph/super.h33
-rw-r--r--fs/ceph/xattr.c42
-rw-r--r--fs/char_dev.c6
-rw-r--r--fs/cifs/cifs_fs_sb.h4
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifsfs.h6
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/coda/dir.c8
-rw-r--r--fs/coda/inode.c1
-rw-r--r--fs/compat.c13
-rw-r--r--fs/compat_ioctl.c38
-rw-r--r--fs/configfs/configfs_internal.h4
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/configfs/inode.c8
-rw-r--r--fs/configfs/mount.c36
-rw-r--r--fs/cramfs/inode.c3
-rw-r--r--fs/dcache.c186
-rw-r--r--fs/debugfs/file.c117
-rw-r--r--fs/debugfs/inode.c16
-rw-r--r--fs/devpts/inode.c8
-rw-r--r--fs/dlm/config.c130
-rw-r--r--fs/dlm/config.h17
-rw-r--r--fs/dlm/debug_fs.c28
-rw-r--r--fs/dlm/dir.c1
-rw-r--r--fs/dlm/dlm_internal.h60
-rw-r--r--fs/dlm/lock.c87
-rw-r--r--fs/dlm/lockspace.c71
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/member.c486
-rw-r--r--fs/dlm/member.h10
-rw-r--r--fs/dlm/rcom.c99
-rw-r--r--fs/dlm/rcom.h2
-rw-r--r--fs/dlm/recover.c87
-rw-r--r--fs/dlm/recoverd.c53
-rw-r--r--fs/dlm/user.c5
-rw-r--r--fs/ecryptfs/inode.c29
-rw-r--r--fs/ecryptfs/super.c5
-rw-r--r--fs/efs/super.c1
-rw-r--r--fs/exec.c6
-rw-r--r--fs/exofs/Kconfig11
-rw-r--r--fs/exofs/Kconfig.ore12
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/exofs/inode.c2
-rw-r--r--fs/exofs/namei.c6
-rw-r--r--fs/exofs/ore.c8
-rw-r--r--fs/exofs/ore_raid.c78
-rw-r--r--fs/exofs/super.c3
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/ialloc.c9
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext2/ioctl.c12
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext2/super.c8
-rw-r--r--fs/ext2/xattr.c1
-rw-r--r--fs/ext2/xattr_security.c1
-rw-r--r--fs/ext2/xattr_trusted.c1
-rw-r--r--fs/ext2/xattr_user.c1
-rw-r--r--fs/ext3/ialloc.c10
-rw-r--r--fs/ext3/inode.c45
-rw-r--r--fs/ext3/ioctl.c26
-rw-r--r--fs/ext3/namei.c17
-rw-r--r--fs/ext3/super.c22
-rw-r--r--fs/ext3/xattr_security.c1
-rw-r--r--fs/ext3/xattr_trusted.c1
-rw-r--r--fs/ext3/xattr_user.c1
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/ext4.h31
-rw-r--r--fs/ext4/extents.c14
-rw-r--r--fs/ext4/ialloc.c26
-rw-r--r--fs/ext4/indirect.c1
-rw-r--r--fs/ext4/inode.c202
-rw-r--r--fs/ext4/ioctl.c116
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/migrate.c1
-rw-r--r--fs/ext4/namei.c8
-rw-r--r--fs/ext4/page-io.c13
-rw-r--r--fs/ext4/resize.c1175
-rw-r--r--fs/ext4/super.c51
-rw-r--r--fs/ext4/xattr_security.c6
-rw-r--r--fs/ext4/xattr_trusted.c1
-rw-r--r--fs/ext4/xattr_user.c1
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/file.c8
-rw-r--r--fs/fat/inode.c33
-rw-r--r--fs/fat/namei_msdos.c4
-rw-r--r--fs/fat/namei_vfat.c7
-rw-r--r--fs/fhandle.c8
-rw-r--r--fs/file_table.c23
-rw-r--r--fs/filesystems.c1
-rw-r--r--fs/freevxfs/vxfs_inode.c5
-rw-r--r--fs/fs-writeback.c37
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c35
-rw-r--r--fs/gfs2/acl.c14
-rw-r--r--fs/gfs2/aops.c18
-rw-r--r--fs/gfs2/bmap.c26
-rw-r--r--fs/gfs2/dir.c64
-rw-r--r--fs/gfs2/dir.h2
-rw-r--r--fs/gfs2/export.c3
-rw-r--r--fs/gfs2/file.c38
-rw-r--r--fs/gfs2/incore.h20
-rw-r--r--fs/gfs2/inode.c88
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/log.c6
-rw-r--r--fs/gfs2/main.c3
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c91
-rw-r--r--fs/gfs2/rgrp.c293
-rw-r--r--fs/gfs2/rgrp.h16
-rw-r--r--fs/gfs2/super.c23
-rw-r--r--fs/gfs2/trans.h6
-rw-r--r--fs/gfs2/xattr.c48
-rw-r--r--fs/hfs/dir.c4
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/super.c5
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h4
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c4
-rw-r--r--fs/hfsplus/options.c4
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c11
-rw-r--r--fs/hpfs/namei.c6
-rw-r--r--fs/hpfs/super.c1
-rw-r--r--fs/hppfs/hppfs.c3
-rw-r--r--fs/hugetlbfs/inode.c66
-rw-r--r--fs/inode.c94
-rw-r--r--fs/internal.h30
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/isofs/inode.c12
-rw-r--r--fs/isofs/isofs.h6
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jbd/revoke.c34
-rw-r--r--fs/jbd/transaction.c38
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/revoke.c34
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/jffs2/dir.c14
-rw-r--r--fs/jffs2/erase.c17
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jffs2/readinode.c22
-rw-r--r--fs/jffs2/scan.c12
-rw-r--r--fs/jffs2/super.c9
-rw-r--r--fs/jffs2/wbuf.c38
-rw-r--r--fs/jffs2/writev.c32
-rw-r--r--fs/jfs/ioctl.c4
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/jfs/namei.c6
-rw-r--r--fs/jfs/super.c5
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/lockd/svcsubs.c2
-rw-r--r--fs/locks.c11
-rw-r--r--fs/logfs/dev_mtd.c78
-rw-r--r--fs/logfs/dir.c6
-rw-r--r--fs/logfs/inode.c3
-rw-r--r--fs/logfs/logfs.h2
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/inode.c35
-rw-r--r--fs/minix/minix.h2
-rw-r--r--fs/minix/namei.c6
-rw-r--r--fs/mount.h76
-rw-r--r--fs/namei.c55
-rw-r--r--fs/namespace.c837
-rw-r--r--fs/ncpfs/dir.c18
-rw-r--r--fs/ncpfs/inode.c15
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/ncpfs/ncplib_kernel.h2
-rw-r--r--fs/ncpfs/symlink.c2
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/client.c12
-rw-r--r--fs/nfs/dir.c33
-rw-r--r--fs/nfs/file.c6
-rw-r--r--fs/nfs/idmap.c83
-rw-r--r--fs/nfs/inode.c46
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3proc.c3
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4filelayout.c9
-rw-r--r--fs/nfs/nfs4proc.c206
-rw-r--r--fs/nfs/nfs4state.c137
-rw-r--r--fs/nfs/nfs4xdr.c137
-rw-r--r--fs/nfs/objlayout/objio_osd.c3
-rw-r--r--fs/nfs/objlayout/objlayout.c4
-rw-r--r--fs/nfs/pnfs.c42
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/nfs/proc.c3
-rw-r--r--fs/nfs/super.c92
-rw-r--r--fs/nfs/write.c27
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4recover.c12
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/nfsd/nfsfh.h2
-rw-r--r--fs/nfsd/vfs.c38
-rw-r--r--fs/nfsd/vfs.h12
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/nilfs2/ioctl.c38
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/nilfs2/super.c8
-rw-r--r--fs/nls/nls_base.c73
-rw-r--r--fs/notify/fanotify/fanotify_user.c6
-rw-r--r--fs/notify/fsnotify.c9
-rw-r--r--fs/notify/vfsmount_mark.c19
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ntfs/inode.h2
-rw-r--r--fs/ntfs/super.c6
-rw-r--r--fs/ntfs/volume.h4
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c69
-rw-r--r--fs/ocfs2/aops.h14
-rw-r--r--fs/ocfs2/cluster/heartbeat.c194
-rw-r--r--fs/ocfs2/cluster/netdebug.c102
-rw-r--r--fs/ocfs2/cluster/tcp.c138
-rw-r--r--fs/ocfs2/cluster/tcp.h2
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h56
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c44
-rw-r--r--fs/ocfs2/dlm/dlmlock.c54
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c175
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c164
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c24
-rw-r--r--fs/ocfs2/dlmglue.c21
-rw-r--r--fs/ocfs2/extent_map.c96
-rw-r--r--fs/ocfs2/extent_map.h2
-rw-r--r--fs/ocfs2/file.c98
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h3
-rw-r--r--fs/ocfs2/ioctl.c15
-rw-r--r--fs/ocfs2/journal.c23
-rw-r--r--fs/ocfs2/journal.h5
-rw-r--r--fs/ocfs2/mmap.c53
-rw-r--r--fs/ocfs2/move_extents.c6
-rw-r--r--fs/ocfs2/namei.c8
-rw-r--r--fs/ocfs2/ocfs2.h51
-rw-r--r--fs/ocfs2/quota_local.c23
-rw-r--r--fs/ocfs2/slot_map.c4
-rw-r--r--fs/ocfs2/stack_o2cb.c71
-rw-r--r--fs/ocfs2/stack_user.c4
-rw-r--r--fs/ocfs2/super.c35
-rw-r--r--fs/ocfs2/xattr.c12
-rw-r--r--fs/ocfs2/xattr.h2
-rw-r--r--fs/omfs/dir.c6
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/omfs/omfs.h2
-rw-r--r--fs/open.c22
-rw-r--r--fs/openpromfs/inode.c1
-rw-r--r--fs/pipe.c7
-rw-r--r--fs/pnode.c120
-rw-r--r--fs/pnode.h36
-rw-r--r--fs/proc/array.c8
-rw-r--r--fs/proc/base.c543
-rw-r--r--fs/proc/generic.c8
-rw-r--r--fs/proc/inode.c19
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/proc/namespaces.c1
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--fs/proc/root.c78
-rw-r--r--fs/proc/stat.c67
-rw-r--r--fs/proc/uptime.c11
-rw-r--r--fs/proc_namespace.c333
-rw-r--r--fs/pstore/inode.c3
-rw-r--r--fs/pstore/platform.c49
-rw-r--r--fs/qnx4/inode.c8
-rw-r--r--fs/quota/dquot.c3
-rw-r--r--fs/quota/quota.c1
-rw-r--r--fs/ramfs/inode.c8
-rw-r--r--fs/reiserfs/bitmap.c94
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/ioctl.c8
-rw-r--r--fs/reiserfs/journal.c64
-rw-r--r--fs/reiserfs/namei.c8
-rw-r--r--fs/reiserfs/super.c200
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/romfs/mmap-nommu.c28
-rw-r--r--fs/romfs/super.c1
-rw-r--r--fs/seq_file.c16
-rw-r--r--fs/splice.c1
-rw-r--r--fs/squashfs/super.c1
-rw-r--r--fs/statfs.c21
-rw-r--r--fs/super.c70
-rw-r--r--fs/sync.c1
-rw-r--r--fs/sysfs/file.c4
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/sysfs/inode.c2
-rw-r--r--fs/sysfs/sysfs.h4
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c1
-rw-r--r--fs/sysv/itree.c2
-rw-r--r--fs/sysv/namei.c6
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/dir.c14
-rw-r--r--fs/ubifs/ioctl.c4
-rw-r--r--fs/ubifs/lpt.c6
-rw-r--r--fs/ubifs/super.c23
-rw-r--r--fs/ubifs/tnc.c3
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/udf/file.c6
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c63
-rw-r--r--fs/udf/namei.c6
-rw-r--r--fs/udf/super.c25
-rw-r--r--fs/udf/symlink.c14
-rw-r--r--fs/udf/udf_sb.h8
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/super.c5
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/xattr.c4
-rw-r--r--fs/xfs/xfs_acl.c4
-rw-r--r--fs/xfs/xfs_attr_leaf.c64
-rw-r--r--fs/xfs/xfs_bmap.c20
-rw-r--r--fs/xfs/xfs_buf.c10
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_discard.c4
-rw-r--r--fs/xfs/xfs_dquot.c500
-rw-r--r--fs/xfs/xfs_dquot.h39
-rw-r--r--fs/xfs/xfs_dquot_item.c5
-rw-r--r--fs/xfs/xfs_export.c8
-rw-r--r--fs/xfs/xfs_file.c6
-rw-r--r--fs/xfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/xfs_iget.c1
-rw-r--r--fs/xfs/xfs_inode.c25
-rw-r--r--fs/xfs/xfs_inode.h3
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_ioctl32.c8
-rw-r--r--fs/xfs/xfs_iops.c8
-rw-r--r--fs/xfs/xfs_log.c427
-rw-r--r--fs/xfs/xfs_log.h8
-rw-r--r--fs/xfs/xfs_log_cil.c98
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c464
-rw-r--r--fs/xfs/xfs_qm.h6
-rw-r--r--fs/xfs/xfs_quota.h12
-rw-r--r--fs/xfs/xfs_super.c70
-rw-r--r--fs/xfs/xfs_sync.c51
-rw-r--r--fs/xfs/xfs_sync.h2
-rw-r--r--fs/xfs/xfs_trace.h14
-rw-r--r--fs/xfs/xfs_trans.c475
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_utils.c2
-rw-r--r--fs/xfs/xfs_utils.h2
-rw-r--r--fs/xfs/xfs_vnodeops.c4
-rw-r--r--fs/xfs/xfs_vnodeops.h4
-rw-r--r--include/asm-generic/cputime.h64
-rw-r--r--include/asm-generic/gpio.h6
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/page.h10
-rw-r--r--include/asm-generic/socket.h3
-rw-r--r--include/asm-generic/types.h6
-rw-r--r--include/asm-generic/uaccess.h7
-rw-r--r--include/asm-generic/unistd.h8
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h4
-rw-r--r--include/drm/drmP.h12
-rw-r--r--include/drm/drm_crtc.h212
-rw-r--r--include/drm/drm_crtc_helper.h5
-rw-r--r--include/drm/drm_fourcc.h137
-rw-r--r--include/drm/drm_mode.h74
-rw-r--r--include/drm/drm_pciids.h18
-rw-r--r--include/drm/drm_sman.h176
-rw-r--r--include/drm/exynos_drm.h46
-rw-r--r--include/drm/gma_drm.h91
-rw-r--r--include/drm/i915_drm.h40
-rw-r--r--include/drm/radeon_drm.h36
-rw-r--r--include/drm/sis_drm.h4
-rw-r--r--include/drm/ttm/ttm_bo_api.h24
-rw-r--r--include/drm/ttm/ttm_bo_driver.h203
-rw-r--r--include/drm/ttm/ttm_page_alloc.h77
-rw-r--r--include/drm/via_drm.h4
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/amba/bus.h7
-rw-r--r--include/linux/amba/pl022.h4
-rw-r--r--include/linux/amba/pl330.h15
-rw-r--r--include/linux/amd-iommu.h138
-rw-r--r--include/linux/ata_platform.h3
-rw-r--r--include/linux/atmdev.h10
-rw-r--r--include/linux/audit.h8
-rw-r--r--include/linux/bcma/bcma.h55
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/can/platform/cc770.h33
-rw-r--r--include/linux/cgroup.h33
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/clk.h22
-rw-r--r--include/linux/clocksource.h12
-rw-r--r--include/linux/compat.h13
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/configfs.h2
-rw-r--r--include/linux/cordic.h4
-rw-r--r--include/linux/cpu.h19
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/debugfs.h72
-rw-r--r--include/linux/debugobjects.h6
-rw-r--r--include/linux/device.h114
-rw-r--r--include/linux/dlm.h71
-rw-r--r--include/linux/dma-buf.h176
-rw-r--r--include/linux/dma_remapping.h2
-rw-r--r--include/linux/dynamic_queue_limits.h97
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/eeprom_93cx6.h8
-rw-r--r--include/linux/elf-em.h1
-rw-r--r--include/linux/errqueue.h7
-rw-r--r--include/linux/ethtool.h116
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/freezer.h159
-rw-r--r--include/linux/fs.h110
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/genetlink.h24
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/gpio-pxa.h16
-rw-r--r--include/linux/hardirq.h21
-rw-r--r--include/linux/hid.h21
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c.h13
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_team.h242
-rw-r--r--include/linux/if_vlan.h80
-rw-r--r--include/linux/inet_diag.h43
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/input/auo-pixcir-ts.h56
-rw-r--r--include/linux/input/gp2ap002a00f.h22
-rw-r--r--include/linux/input/gpio_tilt.h73
-rw-r--r--include/linux/input/pixcir_ts.h10
-rw-r--r--include/linux/input/samsung-keypad.h43
-rw-r--r--include/linux/input/tca8418_keypad.h44
-rw-r--r--include/linux/iommu.h33
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/iscsi_boot_sysfs.h8
-rw-r--r--include/linux/jbd.h5
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/jump_label.h27
-rw-r--r--include/linux/kernel.h16
-rw-r--r--include/linux/kernel_stat.h36
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kref.h77
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/kvm_host.h39
-rw-r--r--include/linux/kvm_para.h1
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/leds-tca6507.h34
-rw-r--r--include/linux/lglock.h36
-rw-r--r--include/linux/lockd/lockd.h6
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/log2.h1
-rw-r--r--include/linux/mbus.h13
-rw-r--r--include/linux/mdio-bitbang.h2
-rw-r--r--include/linux/mdio-gpio.h2
-rw-r--r--include/linux/memblock.h170
-rw-r--r--include/linux/memcontrol.h21
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/mempolicy.h10
-rw-r--r--include/linux/mfd/da9052/da9052.h131
-rw-r--r--include/linux/mfd/da9052/pdata.h (renamed from arch/arm/mach-at91/include/mach/vmalloc.h)26
-rw-r--r--include/linux/mfd/da9052/reg.h749
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/tps65910.h30
-rw-r--r--include/linux/mii.h200
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h51
-rw-r--r--include/linux/mlx4/device.h80
-rw-r--r--include/linux/mlx4/qp.h28
-rw-r--r--include/linux/mm.h64
-rw-r--r--include/linux/mmc/card.h6
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mnt_namespace.h31
-rw-r--r--include/linux/mod_devicetable.h18
-rw-r--r--include/linux/mount.h39
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mtd/cfi.h16
-rw-r--r--include/linux/mtd/cfi_endian.h76
-rw-r--r--include/linux/mtd/map.h3
-rw-r--r--include/linux/mtd/mtd.h334
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/physmap.h1
-rw-r--r--include/linux/neighbour.h1
-rw-r--r--include/linux/netdev_features.h146
-rw-r--r--include/linux/netdevice.h354
-rw-r--r--include/linux/netfilter.h26
-rw-r--r--include/linux/netfilter/Kbuild4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_tuple_common.h27
-rw-r--r--include/linux/netfilter/nf_nat.h25
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h36
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/netfilter/xt_CT.h3
-rw-r--r--include/linux/netfilter/xt_ecn.h35
-rw-r--r--include/linux/netfilter/xt_nfacct.h13
-rw-r--r--include/linux/netfilter/xt_rpfilter.h23
-rw-r--r--include/linux/netfilter_ipv4/Kbuild1
-rw-r--r--include/linux/netfilter_ipv4/ipt_ecn.h38
-rw-r--r--include/linux/netfilter_ipv4/nf_nat.h58
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfc.h31
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_idmap.h8
-rw-r--r--include/linux/nfs_xdr.h22
-rw-r--r--include/linux/nl80211.h171
-rw-r--r--include/linux/nls.h5
-rw-r--r--include/linux/node.h6
-rw-r--r--include/linux/of.h44
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_gpio.h10
-rw-r--r--include/linux/openvswitch.h452
-rw-r--r--include/linux/page-debug-flags.h4
-rw-r--r--include/linux/pagevec.h7
-rw-r--r--include/linux/pci-aspm.h4
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pci_regs.h34
-rw-r--r--include/linux/percpu.h190
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/phonet.h2
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pinctrl/machine.h30
-rw-r--r--include/linux/pinctrl/pinconf.h97
-rw-r--r--include/linux/pinctrl/pinctrl.h15
-rw-r--r--include/linux/pinctrl/pinmux.h29
-rw-r--r--include/linux/pkt_sched.h38
-rw-r--r--include/linux/platform_data/macb.h17
-rw-r--r--include/linux/platform_data/mv_usb.h18
-rw-r--r--include/linux/platform_data/s3c-hsudc.h34
-rw-r--r--include/linux/platform_device.h44
-rw-r--r--include/linux/pm.h244
-rw-r--r--include/linux/pm_domain.h103
-rw-r--r--include/linux/pm_qos.h8
-rw-r--r--include/linux/pm_runtime.h5
-rw-r--r--include/linux/poison.h6
-rw-r--r--include/linux/proc_fs.h26
-rw-r--r--include/linux/pstore.h16
-rw-r--r--include/linux/raid/md_p.h7
-rw-r--r--include/linux/raid/pq.h2
-rw-r--r--include/linux/ramfs.h2
-rw-r--r--include/linux/rcupdate.h115
-rw-r--r--include/linux/regmap.h59
-rw-r--r--include/linux/regulator/consumer.h13
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/of_regulator.h22
-rw-r--r--include/linux/reiserfs_fs.h9
-rw-r--r--include/linux/reiserfs_fs_sb.h4
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h109
-rw-r--r--include/linux/security.h68
-rw-r--r--include/linux/seq_file.h10
-rw-r--r--include/linux/serial_8250.h5
-rw-r--r--include/linux/serial_core.h100
-rw-r--r--include/linux/sh_intc.h1
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/sigma.h13
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/skbuff.h86
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/smscphy.h25
-rw-r--r--include/linux/sock_diag.h48
-rw-r--r--include/linux/spi/spi.h11
-rw-r--r--include/linux/srcu.h87
-rw-r--r--include/linux/ssb/ssb.h9
-rw-r--r--include/linux/ssb/ssb_regs.h17
-rw-r--r--include/linux/sunrpc/auth.h3
-rw-r--r--include/linux/sunrpc/auth_gss.h2
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h8
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/suspend.h35
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/swiotlb.h2
-rw-r--r--include/linux/syscalls.h24
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h8
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/tick.h11
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/ucb1400.h6
-rw-r--r--include/linux/unix_diag.h54
-rw-r--r--include/linux/usb.h28
-rw-r--r--include/linux/usb/ch11.h31
-rw-r--r--include/linux/usb/ch9.h20
-rw-r--r--include/linux/usb/gadget.h26
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/renesas_usbhs.h10
-rw-r--r--include/linux/usb/serial.h11
-rw-r--r--include/linux/virtio_config.h14
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/wait.h4
-rw-r--r--include/linux/wanrouter.h2
-rw-r--r--include/linux/watchdog.h21
-rw-r--r--include/linux/wl12xx.h5
-rw-r--r--include/linux/workqueue.h47
-rw-r--r--include/linux/writeback.h12
-rw-r--r--include/media/davinci/vpif_types.h71
-rw-r--r--include/media/soc_camera.h7
-rw-r--r--include/mtd/mtd-abi.h3
-rw-r--r--include/net/9p/9p.h28
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/arp.h2
-rw-r--r--include/net/atmclip.h7
-rw-r--r--include/net/bluetooth/bluetooth.h56
-rw-r--r--include/net/bluetooth/hci.h83
-rw-r--r--include/net/bluetooth/hci_core.h366
-rw-r--r--include/net/bluetooth/l2cap.h457
-rw-r--r--include/net/bluetooth/mgmt.h251
-rw-r--r--include/net/bluetooth/smp.h6
-rw-r--r--include/net/caif/caif_dev.h21
-rw-r--r--include/net/caif/caif_layer.h4
-rw-r--r--include/net/caif/caif_spi.h4
-rw-r--r--include/net/caif/cfcnfg.h23
-rw-r--r--include/net/caif/cfserl.h4
-rw-r--r--include/net/cfg80211.h269
-rw-r--r--include/net/dsa.h144
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/flow.h6
-rw-r--r--include/net/flow_keys.h16
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/icmp.h4
-rw-r--r--include/net/ieee80211_radiotap.h8
-rw-r--r--include/net/ieee802154.h6
-rw-r--r--include/net/inet6_hashtables.h4
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/inet_timewait_sock.h12
-rw-r--r--include/net/inetpeer.h3
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h7
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_vs.h10
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/ndisc.h45
-rw-r--r--include/net/neighbour.h17
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h4
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h19
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h1
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h1
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_core.h2
-rw-r--r--include/net/netfilter/nf_nat_protocol.h17
-rw-r--r--include/net/netfilter/nf_tproxy_core.h2
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/mib.h6
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/netprio_cgroup.h57
-rw-r--r--include/net/nfc/nci.h178
-rw-r--r--include/net/nfc/nci_core.h13
-rw-r--r--include/net/nfc/nfc.h24
-rw-r--r--include/net/protocol.h12
-rw-r--r--include/net/red.h194
-rw-r--r--include/net/regulatory.h6
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h9
-rw-r--r--include/net/snmp.h18
-rw-r--r--include/net/sock.h281
-rw-r--r--include/net/tcp.h25
-rw-r--r--include/net/tcp_memcontrol.h19
-rw-r--r--include/net/udp.h13
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/rdma/ib_addr.h2
-rw-r--r--include/rdma/ib_cm.h3
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/scsi/scsi_transport_iscsi.h7
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/saif.h4
-rw-r--r--include/target/target_core_base.h46
-rw-r--r--include/target/target_core_transport.h24
-rw-r--r--include/trace/events/ext4.h6
-rw-r--r--include/trace/events/kmem.h4
-rw-r--r--include/trace/events/oom.h33
-rw-r--r--include/trace/events/rcu.h122
-rw-r--r--include/trace/events/regmap.h9
-rw-r--r--include/trace/events/sched.h57
-rw-r--r--include/trace/events/task.h61
-rw-r--r--include/trace/events/writeback.h29
-rw-r--r--include/video/omapdss.h7
-rw-r--r--include/xen/balloon.h6
-rw-r--r--include/xen/events.h7
-rw-r--r--include/xen/grant_table.h37
-rw-r--r--include/xen/interface/grant_table.h167
-rw-r--r--include/xen/interface/io/xs_wire.h6
-rw-r--r--include/xen/interface/xen.h2
-rw-r--r--include/xen/xenbus.h31
-rw-r--r--include/xen/xenbus_dev.h41
-rw-r--r--init/Kconfig21
-rw-r--r--init/do_mounts.c45
-rw-r--r--init/initramfs.c8
-rw-r--r--init/main.c3
-rw-r--r--ipc/mqueue.c24
-rw-r--r--ipc/msgutil.c5
-rw-r--r--kernel/Makefile20
-rw-r--r--kernel/acct.c46
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/auditsc.c20
-rw-r--r--kernel/cgroup.c428
-rw-r--r--kernel/cgroup_freezer.c86
-rw-r--r--kernel/cpu.c8
-rw-r--r--kernel/cpuset.c134
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/callchain.c191
-rw-r--r--kernel/events/core.c408
-rw-r--r--kernel/events/internal.h42
-rw-r--r--kernel/events/ring_buffer.c5
-rw-r--r--kernel/exit.c37
-rw-r--r--kernel/fork.c30
-rw-r--r--kernel/freezer.c203
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/hung_task.c14
-rw-r--r--kernel/irq/irqdomain.c15
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/irq/spurious.c4
-rw-r--r--kernel/itimer.c15
-rw-r--r--kernel/jump_label.c54
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kmod.c27
-rw-r--r--kernel/kthread.c27
-rw-r--r--kernel/lockdep.c91
-rw-r--r--kernel/panic.c17
-rw-r--r--kernel/posix-cpu-timers.c132
-rw-r--r--kernel/power/hibernate.c104
-rw-r--r--kernel/power/main.c10
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c77
-rw-r--r--kernel/power/snapshot.c6
-rw-r--r--kernel/power/suspend.c12
-rw-r--r--kernel/power/swap.c1
-rw-r--r--kernel/power/user.c184
-rw-r--r--kernel/printk.c14
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/rcu.h7
-rw-r--r--kernel/rcupdate.c12
-rw-r--r--kernel/rcutiny.c149
-rw-r--r--kernel/rcutiny_plugin.h29
-rw-r--r--kernel/rcutorture.c225
-rw-r--r--kernel/rcutree.c290
-rw-r--r--kernel/rcutree.h26
-rw-r--r--kernel/rcutree_plugin.h289
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/res_counter.c3
-rw-r--r--kernel/rtmutex-debug.c1
-rw-r--r--kernel/rtmutex-tester.c37
-rw-r--r--kernel/rtmutex.c8
-rw-r--r--kernel/sched/Makefile20
-rw-r--r--kernel/sched/auto_group.c (renamed from kernel/sched_autogroup.c)33
-rw-r--r--kernel/sched/auto_group.h (renamed from kernel/sched_autogroup.h)26
-rw-r--r--kernel/sched/clock.c (renamed from kernel/sched_clock.c)0
-rw-r--r--kernel/sched/core.c (renamed from kernel/sched.c)2277
-rw-r--r--kernel/sched/cpupri.c (renamed from kernel/sched_cpupri.c)4
-rw-r--r--kernel/sched/cpupri.h (renamed from kernel/sched_cpupri.h)0
-rw-r--r--kernel/sched/debug.c (renamed from kernel/sched_debug.c)6
-rw-r--r--kernel/sched/fair.c (renamed from kernel/sched_fair.c)1133
-rw-r--r--kernel/sched/features.h (renamed from kernel/sched_features.h)29
-rw-r--r--kernel/sched/idle_task.c (renamed from kernel/sched_idletask.c)4
-rw-r--r--kernel/sched/rt.c (renamed from kernel/sched_rt.c)221
-rw-r--r--kernel/sched/sched.h1166
-rw-r--r--kernel/sched/stats.c111
-rw-r--r--kernel/sched/stats.h (renamed from kernel/sched_stats.h)109
-rw-r--r--kernel/sched/stop_task.c (renamed from kernel/sched_stoptask.c)4
-rw-r--r--kernel/signal.c82
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/clocksource.c111
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-sched.c105
-rw-r--r--kernel/time/timekeeping.c94
-rw-r--r--kernel/timer.c64
-rw-r--r--kernel/trace/blktrace.c2
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace.c108
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_filter.c39
-rw-r--r--kernel/trace/trace_irqsoff.c13
-rw-r--r--kernel/trace/trace_output.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c13
-rw-r--r--kernel/tsacct.c2
-rw-r--r--kernel/wait.c4
-rw-r--r--kernel/workqueue.c32
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Makefile4
-rw-r--r--lib/btree.c1
-rw-r--r--lib/cordic.c2
-rw-r--r--lib/crc32.c21
-rw-r--r--lib/debugobjects.c54
-rw-r--r--lib/decompress_bunzip2.c5
-rw-r--r--lib/decompress_unlzma.c2
-rw-r--r--lib/devres.c55
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_queue_limits.c133
-rw-r--r--lib/fault-inject.c8
-rw-r--r--lib/kobject.c37
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/kref.c97
-rw-r--r--lib/reciprocal_div.c2
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c19
-rw-r--r--mm/Kconfig6
-rw-r--r--mm/Kconfig.debug5
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/compaction.c14
-rw-r--r--mm/fadvise.c3
-rw-r--r--mm/failslab.c2
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/hugetlb.c56
-rw-r--r--mm/memblock.c961
-rw-r--r--mm/memcontrol.c128
-rw-r--r--mm/mempolicy.c25
-rw-r--r--mm/mempool.c104
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mmap.c60
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/nobootmem.c45
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page-writeback.c568
-rw-r--r--mm/page_alloc.c773
-rw-r--r--mm/percpu-vm.c17
-rw-r--r--mm/percpu.c68
-rw-r--r--mm/rmap.c45
-rw-r--r--mm/shmem.c17
-rw-r--r--mm/slab.c7
-rw-r--r--mm/slub.c51
-rw-r--r--mm/swap.c14
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmalloc.c41
-rw-r--r--mm/vmscan.c82
-rw-r--r--net/8021q/vlan.c126
-rw-r--r--net/8021q/vlan.h43
-rw-r--r--net/8021q/vlan_core.c270
-rw-r--r--net/8021q/vlan_dev.c140
-rw-r--r--net/8021q/vlan_gvrp.c4
-rw-r--r--net/8021q/vlan_netlink.c10
-rw-r--r--net/8021q/vlanproc.c42
-rw-r--r--net/9p/client.c242
-rw-r--r--net/9p/error.c6
-rw-r--r--net/9p/mod.c31
-rw-r--r--net/9p/protocol.c8
-rw-r--r--net/9p/trans_fd.c113
-rw-r--r--net/9p/trans_rdma.c26
-rw-r--r--net/9p/trans_virtio.c34
-rw-r--r--net/9p/util.c4
-rw-r--r--net/Kconfig14
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/atm_misc.c2
-rw-r--r--net/atm/br2684.c38
-rw-r--r--net/atm/clip.c159
-rw-r--r--net/atm/common.c34
-rw-r--r--net/atm/common.h1
-rw-r--r--net/atm/pppoatm.c4
-rw-r--r--net/ax25/af_ax25.c26
-rw-r--r--net/batman-adv/bat_sysfs.c4
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/gateway_client.c153
-rw-r--r--net/batman-adv/gateway_client.h5
-rw-r--r--net/batman-adv/gateway_common.c4
-rw-r--r--net/batman-adv/hash.c4
-rw-r--r--net/batman-adv/hash.h13
-rw-r--r--net/batman-adv/icmp_socket.c14
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/originator.c13
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/routing.c21
-rw-r--r--net/batman-adv/soft-interface.c45
-rw-r--r--net/batman-adv/translation-table.c456
-rw-r--r--net/batman-adv/types.h14
-rw-r--r--net/batman-adv/vis.c23
-rw-r--r--net/bluetooth/Kconfig37
-rw-r--r--net/bluetooth/Makefile5
-rw-r--r--net/bluetooth/af_bluetooth.c11
-rw-r--r--net/bluetooth/bnep/Kconfig2
-rw-r--r--net/bluetooth/bnep/core.c25
-rw-r--r--net/bluetooth/cmtp/Kconfig2
-rw-r--r--net/bluetooth/cmtp/core.c18
-rw-r--r--net/bluetooth/hci_conn.c188
-rw-r--r--net/bluetooth/hci_core.c709
-rw-r--r--net/bluetooth/hci_event.c474
-rw-r--r--net/bluetooth/hci_sock.c23
-rw-r--r--net/bluetooth/hci_sysfs.c131
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/hidp/core.c157
-rw-r--r--net/bluetooth/l2cap_core.c1470
-rw-r--r--net/bluetooth/l2cap_sock.c168
-rw-r--r--net/bluetooth/mgmt.c1730
-rw-r--r--net/bluetooth/rfcomm/Kconfig2
-rw-r--r--net/bluetooth/rfcomm/core.c37
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c45
-rw-r--r--net/bluetooth/sco.c44
-rw-r--r--net/bluetooth/smp.c268
-rw-r--r--net/bridge/br.c4
-rw-r--r--net/bridge/br_device.c14
-rw-r--r--net/bridge/br_fdb.c102
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_if.c5
-rw-r--r--net/bridge/br_multicast.c45
-rw-r--r--net/bridge/br_netfilter.c12
-rw-r--r--net/bridge/br_netlink.c6
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/br_stp.c29
-rw-r--r--net/bridge/netfilter/ebt_ip6.c3
-rw-r--r--net/bridge/netfilter/ebt_log.c5
-rw-r--r--net/caif/Kconfig11
-rw-r--r--net/caif/Makefile1
-rw-r--r--net/caif/caif_dev.c277
-rw-r--r--net/caif/caif_usb.c208
-rw-r--r--net/caif/cfcnfg.c47
-rw-r--r--net/caif/cffrml.c15
-rw-r--r--net/caif/cfpkt_skbuff.c15
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c3
-rw-r--r--net/ceph/crush/mapper.c35
-rw-r--r--net/core/Makefile6
-rw-r--r--net/core/dev.c316
-rw-r--r--net/core/dev_addr_lists.c19
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c712
-rw-r--r--net/core/flow.c12
-rw-r--r--net/core/flow_dissector.c143
-rw-r--r--net/core/neighbour.c227
-rw-r--r--net/core/net-sysfs.c324
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/netprio_cgroup.c344
-rw-r--r--net/core/pktgen.c21
-rw-r--r--net/core/request_sock.c7
-rw-r--r--net/core/rtnetlink.c25
-rw-r--r--net/core/secure_seq.c8
-rw-r--r--net/core/skbuff.c91
-rw-r--r--net/core/sock.c212
-rw-r--r--net/core/sock_diag.c192
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/dccp/ccids/ccid2.c4
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/diag.c20
-rw-r--r--net/dccp/feat.c16
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c42
-rw-r--r--net/dccp/minisocks.c8
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/probe.c14
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_route.c18
-rw-r--r--net/decnet/dn_timer.c17
-rw-r--r--net/dsa/Kconfig38
-rw-r--r--net/dsa/Makefile19
-rw-r--r--net/dsa/dsa.c51
-rw-r--r--net/dsa/dsa_priv.h127
-rw-r--r--net/dsa/tag_dsa.c15
-rw-r--r--net/dsa/tag_edsa.c15
-rw-r--r--net/dsa/tag_trailer.c15
-rw-r--r--net/econet/af_econet.c7
-rw-r--r--net/ieee802154/6lowpan.c374
-rw-r--r--net/ieee802154/6lowpan.h23
-rw-r--r--net/ieee802154/dgram.c7
-rw-r--r--net/ieee802154/raw.c7
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/arp.c40
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/fib_rules.c1
-rw-r--r--net/ipv4/fib_trie.c1
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_connection_sock.c19
-rw-r--r--net/ipv4/inet_diag.c478
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ip_output.c23
-rw-r--r--net/ipv4/ip_sockglue.c41
-rw-r--r--net/ipv4/ipconfig.c19
-rw-r--r--net/ipv4/ipip.c9
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/Kconfig19
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c16
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c14
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c16
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c127
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c141
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c98
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c20
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_dccp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udplite.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_unknown.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c22
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c2
-rw-r--r--net/ipv4/proc.c15
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c218
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c69
-rw-r--r--net/ipv4/tcp.c57
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_diag.c20
-rw-r--r--net/ipv4/tcp_input.c66
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/tcp_memcontrol.c272
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/tunnel4.c10
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv4/udp_diag.c201
-rw-r--r--net/ipv4/xfrm4_tunnel.c6
-rw-r--r--net/ipv6/addrconf.c78
-rw-r--r--net/ipv6/af_inet6.c29
-rw-r--r--net/ipv6/ah6.c12
-rw-r--r--net/ipv6/anycast.c8
-rw-r--r--net/ipv6/datagram.c36
-rw-r--r--net/ipv6/exthdrs.c18
-rw-r--r--net/ipv6/exthdrs_core.c11
-rw-r--r--net/ipv6/fib6_rules.c2
-rw-r--r--net/ipv6/icmp.c25
-rw-r--r--net/ipv6/inet6_connection_sock.c14
-rw-r--r--net/ipv6/ip6_fib.c234
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c37
-rw-r--r--net/ipv6/ip6_tunnel.c24
-rw-r--r--net/ipv6/ip6mr.c12
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/mcast.c44
-rw-r--r--net/ipv6/mip6.c4
-rw-r--r--net/ipv6/ndisc.c56
-rw-r--r--net/ipv6/netfilter/Kconfig11
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c5
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c11
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c133
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/proc.c15
-rw-r--r--net/ipv6/raw.c19
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c334
-rw-r--r--net/ipv6/sit.c17
-rw-r--r--net/ipv6/syncookies.c8
-rw-r--r--net/ipv6/tcp_ipv6.c67
-rw-r--r--net/ipv6/udp.c32
-rw-r--r--net/ipv6/xfrm6_mode_beet.c8
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c4
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_state.c4
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/irda/irlan/irlan_common.c2
-rw-r--r--net/irda/irttp.c4
-rw-r--r--net/iucv/af_iucv.c103
-rw-r--r--net/key/af_key.c20
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/llc/af_llc.c14
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c16
-rw-r--r--net/mac80211/agg-tx.c228
-rw-r--r--net/mac80211/cfg.c293
-rw-r--r--net/mac80211/debugfs.c37
-rw-r--r--net/mac80211/debugfs_netdev.c5
-rw-r--r--net/mac80211/debugfs_sta.c8
-rw-r--r--net/mac80211/driver-ops.h90
-rw-r--r--net/mac80211/driver-trace.h11
-rw-r--r--net/mac80211/ht.c92
-rw-r--r--net/mac80211/ibss.c258
-rw-r--r--net/mac80211/ieee80211_i.h146
-rw-r--r--net/mac80211/iface.c66
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/main.c108
-rw-r--r--net/mac80211/mesh.c71
-rw-r--r--net/mac80211/mesh.h9
-rw-r--r--net/mac80211/mesh_hwmp.c195
-rw-r--r--net/mac80211/mesh_pathtbl.c59
-rw-r--r--net/mac80211/mesh_plink.c58
-rw-r--r--net/mac80211/mlme.c139
-rw-r--r--net/mac80211/offchannel.c34
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c7
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c62
-rw-r--r--net/mac80211/rc80211_pid_algo.c4
-rw-r--r--net/mac80211/rx.c279
-rw-r--r--net/mac80211/scan.c201
-rw-r--r--net/mac80211/sta_info.c373
-rw-r--r--net/mac80211/sta_info.h60
-rw-r--r--net/mac80211/status.c110
-rw-r--r--net/mac80211/tx.c407
-rw-r--r--net/mac80211/util.c329
-rw-r--r--net/mac80211/wep.c5
-rw-r--r--net/mac80211/wme.c38
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c153
-rw-r--r--net/mac80211/wpa.c34
-rw-r--r--net/netfilter/Kconfig39
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipset/ip_set_getport.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c2
-rw-r--r--net/netfilter/ipvs/Kconfig16
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c20
-rw-r--r--net/netfilter/nf_conntrack_acct.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c20
-rw-r--r--net/netfilter/nf_conntrack_ecache.c37
-rw-r--r--net/netfilter/nf_conntrack_expect.c75
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c9
-rw-r--r--net/netfilter/nf_conntrack_helper.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c177
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c361
-rw-r--r--net/netfilter/xt_AUDIT.c3
-rw-r--r--net/netfilter/xt_CT.c8
-rw-r--r--net/netfilter/xt_NFQUEUE.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c11
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c7
-rw-r--r--net/netfilter/xt_TEE.c11
-rw-r--r--net/netfilter/xt_TPROXY.c2
-rw-r--r--net/netfilter/xt_addrtype.c10
-rw-r--r--net/netfilter/xt_connbytes.c38
-rw-r--r--net/netfilter/xt_ecn.c179
-rw-r--r--net/netfilter/xt_hashlimit.c19
-rw-r--r--net/netfilter/xt_nfacct.c76
-rw-r--r--net/netfilter/xt_socket.c8
-rw-r--r--net/netlabel/netlabel_addrlist.c8
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_domainhash.c20
-rw-r--r--net/netlabel/netlabel_domainhash.h2
-rw-r--r--net/netlabel/netlabel_kapi.c42
-rw-r--r--net/netlabel/netlabel_mgmt.c10
-rw-r--r--net/netlabel/netlabel_unlabeled.c30
-rw-r--r--net/netlink/af_netlink.c30
-rw-r--r--net/netlink/genetlink.c32
-rw-r--r--net/netrom/af_netrom.c17
-rw-r--r--net/netrom/nr_route.c11
-rw-r--r--net/nfc/Kconfig1
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/core.c201
-rw-r--r--net/nfc/llcp/Kconfig7
-rw-r--r--net/nfc/llcp/commands.c399
-rw-r--r--net/nfc/llcp/llcp.c971
-rw-r--r--net/nfc/llcp/llcp.h193
-rw-r--r--net/nfc/llcp/sock.c675
-rw-r--r--net/nfc/nci/core.c126
-rw-r--r--net/nfc/nci/data.c35
-rw-r--r--net/nfc/nci/lib.c11
-rw-r--r--net/nfc/nci/ntf.c209
-rw-r--r--net/nfc/nci/rsp.c131
-rw-r--r--net/nfc/netlink.c179
-rw-r--r--net/nfc/nfc.h70
-rw-r--r--net/nfc/rawsock.c37
-rw-r--r--net/openvswitch/Kconfig28
-rw-r--r--net/openvswitch/Makefile14
-rw-r--r--net/openvswitch/actions.c415
-rw-r--r--net/openvswitch/datapath.c1912
-rw-r--r--net/openvswitch/datapath.h125
-rw-r--r--net/openvswitch/dp_notify.c66
-rw-r--r--net/openvswitch/flow.c1346
-rw-r--r--net/openvswitch/flow.h199
-rw-r--r--net/openvswitch/vport-internal_dev.c241
-rw-r--r--net/openvswitch/vport-internal_dev.h28
-rw-r--r--net/openvswitch/vport-netdev.c198
-rw-r--r--net/openvswitch/vport-netdev.h42
-rw-r--r--net/openvswitch/vport.c398
-rw-r--r--net/openvswitch/vport.h205
-rw-r--r--net/packet/af_packet.c33
-rw-r--r--net/phonet/pep.c106
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rfkill/rfkill-gpio.c15
-rw-r--r--net/rfkill/rfkill-regulator.c18
-rw-r--r--net/rxrpc/ar-ack.c14
-rw-r--r--net/rxrpc/ar-key.c6
-rw-r--r--net/rxrpc/ar-output.c4
-rw-r--r--net/sched/cls_flow.c182
-rw-r--r--net/sched/sch_api.c14
-rw-r--r--net/sched/sch_choke.c161
-rw-r--r--net/sched/sch_generic.c9
-rw-r--r--net/sched/sch_gred.c81
-rw-r--r--net/sched/sch_hfsc.c10
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_multiq.c6
-rw-r--r--net/sched/sch_netem.c285
-rw-r--r--net/sched/sch_qfq.c21
-rw-r--r--net/sched/sch_red.c59
-rw-r--r--net/sched/sch_sfb.c17
-rw-r--r--net/sched/sch_sfq.c369
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/sched/sch_teql.c37
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c40
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sctp/transport.c16
-rw-r--r--net/socket.c38
-rw-r--r--net/sunrpc/addr.c8
-rw-r--r--net/sunrpc/auth_generic.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c40
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/sched.c33
-rw-r--r--net/sunrpc/svc.c8
-rw-r--r--net/sunrpc/svc_xprt.c8
-rw-r--r--net/sunrpc/svcauth_unix.c8
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xdr.c3
-rw-r--r--net/sunrpc/xprt.c10
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/bcast.c161
-rw-r--r--net/tipc/bcast.h16
-rw-r--r--net/tipc/bearer.c190
-rw-r--r--net/tipc/bearer.h77
-rw-r--r--net/tipc/config.c13
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/discover.c27
-rw-r--r--net/tipc/discover.h8
-rw-r--r--net/tipc/eth_media.c159
-rw-r--r--net/tipc/link.c346
-rw-r--r--net/tipc/link.h63
-rw-r--r--net/tipc/msg.c9
-rw-r--r--net/tipc/msg.h16
-rw-r--r--net/tipc/name_distr.c14
-rw-r--r--net/tipc/name_table.c15
-rw-r--r--net/tipc/name_table.h10
-rw-r--r--net/tipc/net.c7
-rw-r--r--net/tipc/node.c25
-rw-r--r--net/tipc/node.h12
-rw-r--r--net/tipc/port.c8
-rw-r--r--net/tipc/port.h4
-rw-r--r--net/tipc/ref.c3
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/subscr.c46
-rw-r--r--net/tipc/subscr.h10
-rw-r--r--net/unix/Kconfig7
-rw-r--r--net/unix/Makefile3
-rw-r--r--net/unix/af_unix.c74
-rw-r--r--net/unix/diag.c329
-rw-r--r--net/wireless/Kconfig7
-rw-r--r--net/wireless/chan.c12
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h20
-rw-r--r--net/wireless/genregdb.awk13
-rw-r--r--net/wireless/mesh.c2
-rw-r--r--net/wireless/mlme.c72
-rw-r--r--net/wireless/nl80211.c859
-rw-r--r--net/wireless/nl80211.h5
-rw-r--r--net/wireless/reg.c190
-rw-r--r--net/wireless/reg.h16
-rw-r--r--net/wireless/regdb.h16
-rw-r--r--net/wireless/scan.c117
-rw-r--r--net/wireless/sme.c68
-rw-r--r--net/wireless/util.c198
-rw-r--r--net/wireless/wext-compat.c12
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/xfrm/xfrm_policy.c40
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--net/xfrm/xfrm_user.c12
-rwxr-xr-xscripts/checkpatch.pl236
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--scripts/kconfig/Makefile5
-rw-r--r--scripts/mod/file2alias.c72
-rw-r--r--scripts/package/Makefile2
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/lsm.c11
-rw-r--r--security/apparmor/path.c66
-rw-r--r--security/capability.c13
-rw-r--r--security/device_cgroup.c7
-rw-r--r--security/inode.c16
-rw-r--r--security/integrity/evm/evm_crypto.c19
-rw-r--r--security/lsm_audit.c7
-rw-r--r--security/security.c19
-rw-r--r--security/selinux/hooks.c21
-rw-r--r--security/selinux/netnode.c2
-rw-r--r--security/selinux/netport.c4
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--security/tomoyo/audit.c4
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/realpath.c22
-rw-r--r--security/tomoyo/securityfs_if.c2
-rw-r--r--security/tomoyo/tomoyo.c15
-rw-r--r--sound/aoa/codecs/Kconfig8
-rw-r--r--sound/arm/aaci.c2
-rw-r--r--sound/atmel/ac97c.c4
-rw-r--r--sound/core/Kconfig1
-rw-r--r--sound/oss/Kconfig2
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c2
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_eld.c28
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/patch_cirrus.c32
-rw-r--r--sound/pci/hda/patch_hdmi.c16
-rw-r--r--sound/pci/hda/patch_realtek.c99
-rw-r--r--sound/pci/hda/patch_sigmatel.c75
-rw-r--r--sound/pci/hda/patch_via.c76
-rw-r--r--sound/pci/lx6464es/lx_core.c23
-rw-r--r--sound/pci/lx6464es/lx_core.h3
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c64
-rw-r--r--sound/soc/atmel/Kconfig21
-rw-r--r--sound/soc/atmel/Makefile4
-rw-r--r--sound/soc/atmel/playpaq_wm8510.c473
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/ad1836.h2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/cs4270.c10
-rw-r--r--sound/soc/codecs/cs4271.c8
-rw-r--r--sound/soc/codecs/cs42l51.c2
-rw-r--r--sound/soc/codecs/jz4740.c1
-rw-r--r--sound/soc/codecs/max9877.c10
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c4
-rw-r--r--sound/soc/codecs/sta32x.c63
-rw-r--r--sound/soc/codecs/sta32x.h1
-rw-r--r--sound/soc/codecs/uda1380.c4
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8776.c1
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c19
-rw-r--r--sound/soc/codecs/wm8996.c1
-rw-r--r--sound/soc/codecs/wm9081.c10
-rw-r--r--sound/soc/codecs/wm9090.c6
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c1
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c24
-rw-r--r--sound/soc/imx/Kconfig2
-rw-r--r--sound/soc/kirkwood/Kconfig3
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c15
-rw-r--r--sound/soc/mxs/mxs-pcm.c3
-rw-r--r--sound/soc/mxs/mxs-saif.c24
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c1
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rw-r--r--sound/soc/pxa/Kconfig3
-rw-r--r--sound/soc/pxa/hx4700.c5
-rw-r--r--sound/soc/samsung/jive_wm8750.c3
-rw-r--r--sound/soc/samsung/smdk2443_wm9710.c1
-rw-r--r--sound/soc/samsung/smdk_wm8994.c1
-rw-r--r--sound/soc/samsung/speyside.c2
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-utils.c31
-rw-r--r--sound/sound_core.c2
-rw-r--r--sound/usb/6fire/chip.c15
-rw-r--r--sound/usb/caiaq/device.c13
-rw-r--r--sound/usb/misc/ua101.c14
-rw-r--r--sound/usb/quirks-table.h31
-rw-r--r--sound/usb/usx2y/us122l.c14
-rw-r--r--sound/usb/usx2y/usbusx2y.c13
-rw-r--r--tools/perf/Documentation/examples.txt34
-rw-r--r--tools/perf/Documentation/perf-annotate.txt4
-rw-r--r--tools/perf/Documentation/perf-buildid-list.txt2
-rw-r--r--tools/perf/Documentation/perf-evlist.txt2
-rw-r--r--tools/perf/Documentation/perf-kmem.txt2
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/perf/Documentation/perf-report.txt11
-rw-r--r--tools/perf/Documentation/perf-sched.txt2
-rw-r--r--tools/perf/Documentation/perf-script.txt9
-rw-r--r--tools/perf/Documentation/perf-test.txt8
-rw-r--r--tools/perf/Documentation/perf-timechart.txt2
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c3
-rw-r--r--tools/perf/builtin-annotate.c132
-rw-r--r--tools/perf/builtin-buildid-list.c53
-rw-r--r--tools/perf/builtin-diff.c21
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-inject.c118
-rw-r--r--tools/perf/builtin-kmem.c16
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-lock.c12
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-record.c603
-rw-r--r--tools/perf/builtin-report.c236
-rw-r--r--tools/perf/builtin-sched.c200
-rw-r--r--tools/perf/builtin-script.c130
-rw-r--r--tools/perf/builtin-stat.c137
-rw-r--r--tools/perf/builtin-test.c545
-rw-r--r--tools/perf/builtin-timechart.c38
-rw-r--r--tools/perf/builtin-top.c558
-rw-r--r--tools/perf/perf.c33
-rw-r--r--tools/perf/perf.h24
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/annotate.h5
-rw-r--r--tools/perf/util/build-id.c26
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/callchain.h3
-rw-r--r--tools/perf/util/cgroup.c15
-rw-r--r--tools/perf/util/config.c5
-rw-r--r--tools/perf/util/debugfs.c35
-rw-r--r--tools/perf/util/debugfs.h31
-rw-r--r--tools/perf/util/event.c360
-rw-r--r--tools/perf/util/event.h68
-rw-r--r--tools/perf/util/evlist.c299
-rw-r--r--tools/perf/util/evlist.h43
-rw-r--r--tools/perf/util/evsel.c164
-rw-r--r--tools/perf/util/evsel.h8
-rw-r--r--tools/perf/util/header.c743
-rw-r--r--tools/perf/util/header.h51
-rw-r--r--tools/perf/util/hist.c10
-rw-r--r--tools/perf/util/hist.h5
-rw-r--r--tools/perf/util/include/linux/bitops.h118
-rw-r--r--tools/perf/util/map.c4
-rw-r--r--tools/perf/util/map.h19
-rw-r--r--tools/perf/util/parse-events.c30
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/probe-finder.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c75
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c346
-rw-r--r--tools/perf/util/session.h72
-rw-r--r--tools/perf/util/setup.py3
-rw-r--r--tools/perf/util/symbol.c11
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c6
-rw-r--r--tools/perf/util/thread.h14
-rw-r--r--tools/perf/util/tool.h50
-rw-r--r--tools/perf/util/top.h20
-rw-r--r--tools/perf/util/trace-event-info.c28
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/trace-event-scripting.c2
-rw-r--r--tools/perf/util/trace-event.h8
-rw-r--r--tools/perf/util/ui/browsers/annotate.c16
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/ui/progress.c3
-rw-r--r--tools/perf/util/usage.c5
-rw-r--r--tools/perf/util/util.h11
-rw-r--r--tools/perf/util/values.c1
-rw-r--r--tools/power/x86/turbostat/turbostat.88
-rw-r--r--tools/testing/ktest/sample.conf2
-rw-r--r--virt/kvm/assigned-dev.c93
-rw-r--r--virt/kvm/coalesced_mmio.c12
-rw-r--r--virt/kvm/ioapic.c17
-rw-r--r--virt/kvm/iommu.c25
-rw-r--r--virt/kvm/kvm_main.c204
7462 files changed, 326886 insertions, 205666 deletions
diff --git a/CREDITS b/CREDITS
index 07e32a87d95..44fce988eaa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
N: Kees Cook
E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
S: (ask for current address)
+S: Portland, Oregon
S: USA
N: Robin Cornelius
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
new file mode 100644
index 00000000000..3d5951c8bf5
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -0,0 +1,75 @@
+What: /sys/bus/xen-backend/devices/*/devtype
+Date: Feb 2009
+KernelVersion: 2.6.38
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The type of the device. e.g., one of: 'vbd' (block),
+ 'vif' (network), or 'vfb' (framebuffer).
+
+What: /sys/bus/xen-backend/devices/*/nodename
+Date: Feb 2009
+KernelVersion: 2.6.38
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ XenStore node (under /local/domain/NNN/) for this
+ backend device.
+
+What: /sys/bus/xen-backend/devices/vbd-*/physical_device
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The major:minor number (in hexidecimal) of the
+ physical device providing the storage for this backend
+ block device.
+
+What: /sys/bus/xen-backend/devices/vbd-*/mode
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Whether the block device is read-only ('r') or
+ read-write ('w').
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/f_req
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of flush requests from the frontend.
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/oo_req
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of requests delayed because the backend was too
+ busy processing previous requests.
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/rd_req
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of read requests from the frontend.
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/rd_sect
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of sectors read by the frontend.
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/wr_req
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of write requests from the frontend.
+
+What: /sys/bus/xen-backend/devices/vbd-*/statistics/wr_sect
+Date: April 2011
+KernelVersion: 3.0
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Number of sectors written by the frontend.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
new file mode 100644
index 00000000000..caa311d59ac
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
@@ -0,0 +1,77 @@
+What: /sys/devices/system/xen_memory/xen_memory0/max_retry_count
+Date: May 2011
+KernelVersion: 2.6.39
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The maximum number of times the balloon driver will
+ attempt to increase the balloon before giving up. See
+ also 'retry_count' below.
+ A value of zero means retry forever and is the default one.
+
+What: /sys/devices/system/xen_memory/xen_memory0/max_schedule_delay
+Date: May 2011
+KernelVersion: 2.6.39
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The limit that 'schedule_delay' (see below) will be
+ increased to. The default value is 32 seconds.
+
+What: /sys/devices/system/xen_memory/xen_memory0/retry_count
+Date: May 2011
+KernelVersion: 2.6.39
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The current number of times that the balloon driver
+ has attempted to increase the size of the balloon.
+ The default value is one. With max_retry_count being
+ zero (unlimited), this means that the driver will attempt
+ to retry with a 'schedule_delay' delay.
+
+What: /sys/devices/system/xen_memory/xen_memory0/schedule_delay
+Date: May 2011
+KernelVersion: 2.6.39
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The time (in seconds) to wait between attempts to
+ increase the balloon. Each time the balloon cannot be
+ increased, 'schedule_delay' is increased (until
+ 'max_schedule_delay' is reached at which point it
+ will use the max value).
+
+What: /sys/devices/system/xen_memory/xen_memory0/target
+Date: April 2008
+KernelVersion: 2.6.26
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ The target number of pages to adjust this domain's
+ memory reservation to.
+
+What: /sys/devices/system/xen_memory/xen_memory0/target_kb
+Date: April 2008
+KernelVersion: 2.6.26
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ As target above, except the value is in KiB.
+
+What: /sys/devices/system/xen_memory/xen_memory0/info/current_kb
+Date: April 2008
+KernelVersion: 2.6.26
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Current size (in KiB) of this domain's memory
+ reservation.
+
+What: /sys/devices/system/xen_memory/xen_memory0/info/high_kb
+Date: April 2008
+KernelVersion: 2.6.26
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Amount (in KiB) of high memory in the balloon.
+
+What: /sys/devices/system/xen_memory/xen_memory0/info/low_kb
+Date: April 2008
+KernelVersion: 2.6.26
+Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Description:
+ Amount (in KiB) of low (or normal) memory in the
+ balloon.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 349ecf26ce1..34f51100f02 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -66,6 +66,24 @@ Description:
re-discover previously removed devices.
Depends on CONFIG_HOTPLUG.
+What: /sys/bus/pci/devices/.../msi_irqs/
+Date: September, 2011
+Contact: Neil Horman <nhorman@tuxdriver.com>
+Description:
+ The /sys/devices/.../msi_irqs directory contains a variable set
+ of sub-directories, with each sub-directory being named after a
+ corresponding msi irq vector allocated to that device. Each
+ numbered sub-directory N contains attributes of that irq.
+ Note that this directory is not created for device drivers which
+ do not support msi irqs
+
+What: /sys/bus/pci/devices/.../msi_irqs/<N>/mode
+Date: September 2011
+Contact: Neil Horman <nhorman@tuxdriver.com>
+Description:
+ This attribute indicates the mode that the irq vector named by
+ the parent directory is in (msi vs. msix)
+
What: /sys/bus/pci/devices/.../remove
Date: January 2009
Contact: Linux PCI developers <linux-pci@vger.kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index fa72ccb2282..dbedafb095e 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -57,13 +57,6 @@ create_snap
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
-rollback_snap
-
- Rolls back data to the specified snapshot. This goes over the entire
- list of rados blocks and sends a rollback command to each.
-
- $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
snap_*
A directory per each snapshot
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index e647378e9e8..b4f548792e3 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -119,6 +119,31 @@ Description:
Write a 1 to force the device to disconnect
(equivalent to unplugging a wired USB device).
+What: /sys/bus/usb/drivers/.../new_id
+Date: October 2011
+Contact: linux-usb@vger.kernel.org
+Description:
+ Writing a device ID to this file will attempt to
+ dynamically add a new device ID to a USB device driver.
+ This may allow the driver to support more hardware than
+ was included in the driver's static device ID support
+ table at compile time. The format for the device ID is:
+ idVendor idProduct bInterfaceClass.
+ The vendor ID and device ID fields are required, the
+ interface class is optional.
+ Upon successfully adding an ID, the driver will probe
+ for the device and attempt to bind to it. For example:
+ # echo "8086 10f5" > /sys/bus/usb/drivers/foo/new_id
+
+What: /sys/bus/usb-serial/drivers/.../new_id
+Date: October 2011
+Contact: linux-usb@vger.kernel.org
+Description:
+ For serial USB drivers, this attribute appears under the
+ extra bus folder "usb-serial" in sysfs; apart from that
+ difference, all descriptions from the entry
+ "/sys/bus/usb/drivers/.../new_id" apply.
+
What: /sys/bus/usb/drivers/.../remove_id
Date: November 2009
Contact: CHENG Renquan <rqcheng@smu.edu.sg>
diff --git a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
new file mode 100644
index 00000000000..4cf1e72222d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
@@ -0,0 +1,12 @@
+What: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock
+Date: Oct 2011
+KernelVersion: 3.0
+Contact: Mark Godfrey <mark.godfrey@stericsson.com>
+Description: The rtc_calibration attribute allows the userspace to
+ calibrate the AB8500.s 32KHz Real Time Clock.
+ Every 60 seconds the AB8500 will correct the RTC's value
+ by adding to it the value of this attribute.
+ The range of the attribute is -127 to +127 in units of
+ 30.5 micro-seconds (half-parts-per-million of the 32KHz clock)
+Users: The /vendor/st-ericsson/base_utilities/core/rtc_calibration
+ daemon uses this interface.
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-docg3 b/Documentation/ABI/testing/sysfs-devices-platform-docg3
new file mode 100644
index 00000000000..8aa36716882
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-docg3
@@ -0,0 +1,34 @@
+What: /sys/devices/platform/docg3/f[0-3]_dps[01]_is_keylocked
+Date: November 2011
+KernelVersion: 3.3
+Contact: Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+ Show whether the floor (0 to 4), protection area (0 or 1) is
+ keylocked. Each docg3 chip (or floor) has 2 protection areas,
+ which can cover any part of it, block aligned, called DPS.
+ The protection has information embedded whether it blocks reads,
+ writes or both.
+ The result is:
+ 0 -> the DPS is not keylocked
+ 1 -> the DPS is keylocked
+Users: None identified so far.
+
+What: /sys/devices/platform/docg3/f[0-3]_dps[01]_protection_key
+Date: November 2011
+KernelVersion: 3.3
+Contact: Robert Jarzmik <robert.jarzmik@free.fr>
+Description:
+ Enter the protection key for the floor (0 to 4), protection area
+ (0 or 1). Each docg3 chip (or floor) has 2 protection areas,
+ which can cover any part of it, block aligned, called DPS.
+ The protection has information embedded whether it blocks reads,
+ writes or both.
+ The protection key is a string of 8 bytes (value 0-255).
+ Entering the correct value toggle the lock, and can be observed
+ through f[0-3]_dps[01]_is_keylocked.
+ Possible values are:
+ - 8 bytes
+ Typical values are:
+ - "00000000"
+ - "12345678"
+Users: None identified so far.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
index 9aec8ef228b..167d9032b97 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -1,7 +1,7 @@
What: /sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
Date: July 2011
KernelVersion: 3.2
-Contact: Michal Malý <madcatxster@gmail.com>
+Contact: Michal MalĂ˝ <madcatxster@gmail.com>
Description: Display minimum, maximum and current range of the steering
wheel. Writing a value within min and max boundaries sets the
range of the wheel.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-multitouch b/Documentation/ABI/testing/sysfs-driver-hid-multitouch
new file mode 100644
index 00000000000..f79839d1af3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-multitouch
@@ -0,0 +1,9 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/quirks
+Date: November 2011
+Contact: Benjamin Tissoires <benjamin.tissoires@gmail.com>
+Description: The integer value of this attribute corresponds to the
+ quirks actually in place to handle the device's protocol.
+ When read, this attribute returns the current settings (see
+ MT_QUIRKS_* in hid-multitouch.c).
+ When written this attribute change on the fly the quirks, then
+ the protocol to handle the device.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku b/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
new file mode 100644
index 00000000000..189dc43891b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-isku
@@ -0,0 +1,135 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/actual_profile
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 0-4.
+ When read, this attribute returns the number of the actual
+ profile. This value is persistent, so its equivalent to the
+ profile that's active when the device is powered on next time.
+ When written, this file sets the number of the startup profile
+ and the device activates this profile immediately.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/info
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns general data like firmware version.
+ The data is 6 bytes long.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/key_mask
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one deactivate certain keys like
+ windows and application keys, to prevent accidental presses.
+ Profile number for which this settings occur is included in
+ written data. The data has to be 6 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_capslock
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ capslock key for a specific profile. Profile number is included
+ in written data. The data has to be 6 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_easyzone
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ easyzone keys for a specific profile. Profile number is included
+ in written data. The data has to be 65 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_function
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ function keys for a specific profile. Profile number is included
+ in written data. The data has to be 41 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_macro
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the macro
+ keys for a specific profile. Profile number is included in
+ written data. The data has to be 35 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_media
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the media
+ keys for a specific profile. Profile number is included in
+ written data. The data has to be 29 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/keys_thumbster
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the function of the
+ thumbster keys for a specific profile. Profile number is included
+ in written data. The data has to be 23 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/last_set
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the time in secs since
+ epoch in which the last configuration took place.
+ The data has to be 20 bytes long.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/light
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one set the backlight intensity for
+ a specific profile. Profile number is included in written data.
+ The data has to be 10 bytes long.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/macro
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one store macros with max 500
+ keystrokes for a specific button for a specific profile.
+ Button and profile numbers are included in written data.
+ The data has to be 2083 bytes long.
+ Before reading this file, control has to be written to select
+ which profile and key to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/control
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one select which data from which
+ profile will be read next. The data has to be 3 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/isku/roccatisku<minor>/talk
+Date: June 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one trigger easyshift functionality
+ from the host.
+ The data has to be 16 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-wiimote b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
index 5d5a16ea57c..3d98009f447 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-wiimote
+++ b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
@@ -8,3 +8,15 @@ Contact: David Herrmann <dh.herrmann@googlemail.com>
Description: Make it possible to set/get current led state. Reading from it
returns 0 if led is off and 1 if it is on. Writing 0 to it
disables the led, writing 1 enables it.
+
+What: /sys/bus/hid/drivers/wiimote/<dev>/extension
+Date: August 2011
+KernelVersion: 3.2
+Contact: David Herrmann <dh.herrmann@googlemail.com>
+Description: This file contains the currently connected and initialized
+ extensions. It can be one of: none, motionp, nunchuck, classic,
+ motionp+nunchuck, motionp+classic
+ motionp is the official Nintendo Motion+ extension, nunchuck is
+ the official Nintendo Nunchuck extension and classic is the
+ Nintendo Classic Controller extension. The motionp extension can
+ be combined with the other two.
diff --git a/Documentation/ABI/testing/sysfs-driver-wacom b/Documentation/ABI/testing/sysfs-driver-wacom
index 82d4df13644..0130d6683c1 100644
--- a/Documentation/ABI/testing/sysfs-driver-wacom
+++ b/Documentation/ABI/testing/sysfs-driver-wacom
@@ -15,9 +15,9 @@ Contact: linux-input@vger.kernel.org
Description:
Attribute group for control of the status LEDs and the OLEDs.
This attribute group is only available for Intuos 4 M, L,
- and XL (with LEDs and OLEDs) and Cintiq 21UX2 (LEDs only).
- Therefore its presence implicitly signifies the presence of
- said LEDs and OLEDs on the tablet device.
+ and XL (with LEDs and OLEDs) and Cintiq 21UX2 and Cintiq 24HD
+ (LEDs only). Therefore its presence implicitly signifies the
+ presence of said LEDs and OLEDs on the tablet device.
What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status0_luminance
Date: August 2011
@@ -41,16 +41,17 @@ Date: August 2011
Contact: linux-input@vger.kernel.org
Description:
Writing to this file sets which one of the four (for Intuos 4)
- or of the right four (for Cintiq 21UX2) status LEDs is active (0..3).
- The other three LEDs on the same side are always inactive.
+ or of the right four (for Cintiq 21UX2 and Cintiq 24HD) status
+ LEDs is active (0..3). The other three LEDs on the same side are
+ always inactive.
What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/status_led1_select
Date: September 2011
Contact: linux-input@vger.kernel.org
Description:
- Writing to this file sets which one of the left four (for Cintiq 21UX2)
- status LEDs is active (0..3). The other three LEDs on the left are always
- inactive.
+ Writing to this file sets which one of the left four (for Cintiq 21UX2
+ and Cintiq 24HD) status LEDs is active (0..3). The other three LEDs on
+ the left are always inactive.
What: /sys/bus/usb/devices/<busnum>-<devnum>:<cfg>.<intf>/wacom_led/buttons_luminance
Date: August 2011
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
index 08ff908aa7a..24979f691e3 100644
--- a/Documentation/DocBook/debugobjects.tmpl
+++ b/Documentation/DocBook/debugobjects.tmpl
@@ -96,6 +96,7 @@
<listitem><para>debug_object_deactivate</para></listitem>
<listitem><para>debug_object_destroy</para></listitem>
<listitem><para>debug_object_free</para></listitem>
+ <listitem><para>debug_object_assert_init</para></listitem>
</itemizedlist>
Each of these functions takes the address of the real object and
a pointer to the object type specific debug description
@@ -273,6 +274,26 @@
debug checks.
</para>
</sect1>
+
+ <sect1 id="debug_object_assert_init">
+ <title>debug_object_assert_init</title>
+ <para>
+ This function is called to assert that an object has been
+ initialized.
+ </para>
+ <para>
+ When the real object is not tracked by debugobjects, it calls
+ fixup_assert_init of the object type description structure
+ provided by the caller, with the hardcoded object state
+ ODEBUG_NOT_AVAILABLE. The fixup function can correct the problem
+ by calling debug_object_init and other specific initializing
+ functions.
+ </para>
+ <para>
+ When the real object is already tracked by debugobjects it is
+ ignored.
+ </para>
+ </sect1>
</chapter>
<chapter id="fixupfunctions">
<title>Fixup functions</title>
@@ -381,6 +402,35 @@
statistics.
</para>
</sect1>
+ <sect1 id="fixup_assert_init">
+ <title>fixup_assert_init</title>
+ <para>
+ This function is called from the debug code whenever a problem
+ in debug_object_assert_init is detected.
+ </para>
+ <para>
+ Called from debug_object_assert_init() with a hardcoded state
+ ODEBUG_STATE_NOTAVAILABLE when the object is not found in the
+ debug bucket.
+ </para>
+ <para>
+ The function returns 1 when the fixup was successful,
+ otherwise 0. The return value is used to update the
+ statistics.
+ </para>
+ <para>
+ Note, this function should make sure debug_object_init() is
+ called before returning.
+ </para>
+ <para>
+ The handling of statically initialized objects is a special
+ case. The fixup function should check if this is a legitimate
+ case of a statically initialized object or not. In this case only
+ debug_object_init() should be called to make the object known to
+ the tracker. Then the function should return 0 because this is not
+ a real fixup.
+ </para>
+ </sect1>
</chapter>
<chapter id="bugs">
<title>Known Bugs And Assumptions</title>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 81bc1a9ab9d..f7ade3b3b40 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -275,8 +275,8 @@ versions.
If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
kernel is the current stable kernel.
-2.6.x.y are maintained by the "stable" team <stable@kernel.org>, and are
-released as needs dictate. The normal release period is approximately
+2.6.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+are released as needs dictate. The normal release period is approximately
two weeks, but it can be longer if there are no pressing problems. A
security-related problem, instead, can cause a release to happen almost
instantly.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 0c134f8afc6..bff2d8be1e1 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -328,6 +328,12 @@ over a rather long period of time, but improvements are always welcome!
RCU rather than SRCU, because RCU is almost always faster and
easier to use than is SRCU.
+ If you need to enter your read-side critical section in a
+ hardirq or exception handler, and then exit that same read-side
+ critical section in the task that was interrupted, then you need
+ to srcu_read_lock_raw() and srcu_read_unlock_raw(), which avoid
+ the lockdep checking that would otherwise this practice illegal.
+
Also unlike other forms of RCU, explicit initialization
and cleanup is required via init_srcu_struct() and
cleanup_srcu_struct(). These are passed a "struct srcu_struct"
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 31852705b58..bf778332a28 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -38,11 +38,11 @@ o How can the updater tell when a grace period has completed
Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
same effect, but require that the readers manipulate CPU-local
- counters. These counters allow limited types of blocking
- within RCU read-side critical sections. SRCU also uses
- CPU-local counters, and permits general blocking within
- RCU read-side critical sections. These two variants of
- RCU detect grace periods by sampling these counters.
+ counters. These counters allow limited types of blocking within
+ RCU read-side critical sections. SRCU also uses CPU-local
+ counters, and permits general blocking within RCU read-side
+ critical sections. These variants of RCU detect grace periods
+ by sampling these counters.
o If I am running on a uniprocessor kernel, which can only do one
thing at a time, why should I wait for a grace period?
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4e959208f73..083d88cbc08 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -101,6 +101,11 @@ o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
messages.
+o A hardware or software issue shuts off the scheduler-clock
+ interrupt on a CPU that is not in dyntick-idle mode. This
+ problem really has happened, and seems to be most likely to
+ result in RCU CPU stall warnings for CONFIG_NO_HZ=n kernels.
+
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
@@ -109,12 +114,11 @@ o A hardware failure. This is quite unlikely, but has occurred
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall
-warning. SRCU does not have its own CPU stall warnings, but its
-calls to synchronize_sched() will result in RCU-sched detecting
-RCU-sched-related CPU stalls. Please note that RCU only detects
-CPU stalls when there is a grace period in progress. No grace period,
-no CPU stall warnings.
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
+SRCU does not have its own CPU stall warnings, but its calls to
+synchronize_sched() will result in RCU-sched detecting RCU-sched-related
+CPU stalls. Please note that RCU only detects CPU stalls when there is
+a grace period in progress. No grace period, no CPU stall warnings.
To diagnose the cause of the stall, inspect the stack traces.
The offending function will usually be near the top of the stack.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 783d6c134d3..d67068d0d2b 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -61,11 +61,24 @@ nreaders This is the number of RCU reading threads supported.
To properly exercise RCU implementations with preemptible
read-side critical sections.
+onoff_interval
+ The number of seconds between each attempt to execute a
+ randomly selected CPU-hotplug operation. Defaults to
+ zero, which disables CPU hotplugging. In HOTPLUG_CPU=n
+ kernels, rcutorture will silently refuse to do any
+ CPU-hotplug operations regardless of what value is
+ specified for onoff_interval.
+
shuffle_interval
The number of seconds to keep the test threads affinitied
to a particular subset of the CPUs, defaults to 3 seconds.
Used in conjunction with test_no_idle_hz.
+shutdown_secs The number of seconds to run the test before terminating
+ the test and powering off the system. The default is
+ zero, which disables test termination and system shutdown.
+ This capability is useful for automated testing.
+
stat_interval The number of seconds between output of torture
statistics (via printk()). Regardless of the interval,
statistics are printed when the module is unloaded.
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index aaf65f6c6cd..49587abfc2f 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -105,14 +105,10 @@ o "dt" is the current value of the dyntick counter that is incremented
or one greater than the interrupt-nesting depth otherwise.
The number after the second "/" is the NMI nesting depth.
- This field is displayed only for CONFIG_NO_HZ kernels.
-
o "df" is the number of times that some other CPU has forced a
quiescent state on behalf of this CPU due to this CPU being in
dynticks-idle state.
- This field is displayed only for CONFIG_NO_HZ kernels.
-
o "of" is the number of times that some other CPU has forced a
quiescent state on behalf of this CPU due to this CPU being
offline. In a perfect world, this might never happen, but it
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 6ef692667e2..6bbe8dcdc3d 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -4,6 +4,7 @@ to start learning about RCU:
1. What is RCU, Fundamentally? http://lwn.net/Articles/262464/
2. What is RCU? Part 2: Usage http://lwn.net/Articles/263130/
3. RCU part 3: the RCU API http://lwn.net/Articles/264090/
+4. The RCU API, 2010 Edition http://lwn.net/Articles/418853/
What is RCU?
@@ -834,6 +835,8 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu N/A
srcu_read_unlock synchronize_srcu_expedited
+ srcu_read_lock_raw
+ srcu_read_unlock_raw
srcu_dereference
SRCU: Initialization/cleanup
@@ -855,27 +858,33 @@ list can be helpful:
a. Will readers need to block? If so, you need SRCU.
-b. What about the -rt patchset? If readers would need to block
+b. Is it necessary to start a read-side critical section in a
+ hardirq handler or exception handler, and then to complete
+ this read-side critical section in the task that was
+ interrupted? If so, you need SRCU's srcu_read_lock_raw() and
+ srcu_read_unlock_raw() primitives.
+
+c. What about the -rt patchset? If readers would need to block
in an non-rt kernel, you need SRCU. If readers would block
in a -rt kernel, but not in a non-rt kernel, SRCU is not
necessary.
-c. Do you need to treat NMI handlers, hardirq handlers,
+d. Do you need to treat NMI handlers, hardirq handlers,
and code segments with preemption disabled (whether
via preempt_disable(), local_irq_save(), local_bh_disable(),
or some other mechanism) as if they were explicit RCU readers?
If so, you need RCU-sched.
-d. Do you need RCU grace periods to complete even in the face
+e. Do you need RCU grace periods to complete even in the face
of softirq monopolization of one or more of the CPUs? For
example, is your code subject to network-based denial-of-service
attacks? If so, you need RCU-bh.
-e. Is your workload too update-intensive for normal use of
+f. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms?
If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
-f. Otherwise, use RCU.
+g. Otherwise, use RCU.
Of course, this all assumes that you have determined that RCU is in fact
the right tool for your job.
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 771d48d3b33..208a2d465b9 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -51,15 +51,14 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned
ff000000 ffbfffff Reserved for future expansion of DMA
mapping region.
-VMALLOC_END feffffff Free for platform use, recommended.
- VMALLOC_END must be aligned to a 2MB
- boundary.
-
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
Memory returned by vmalloc/ioremap will
be dynamically placed in this region.
- VMALLOC_START may be based upon the value
- of the high_memory variable.
+ Machine specific static mappings are also
+ located here through iotable_init().
+ VMALLOC_START is based upon the value
+ of the high_memory variable, and VMALLOC_END
+ is equal to 0xff000000.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 3bd585b4492..27f2b21a9d5 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -84,6 +84,93 @@ compiler optimizes the section accessing atomic_t variables.
*** YOU HAVE BEEN WARNED! ***
+Properly aligned pointers, longs, ints, and chars (and unsigned
+equivalents) may be atomically loaded from and stored to in the same
+sense as described for atomic_read() and atomic_set(). The ACCESS_ONCE()
+macro should be used to prevent the compiler from using optimizations
+that might otherwise optimize accesses out of existence on the one hand,
+or that might create unsolicited accesses on the other.
+
+For example consider the following code:
+
+ while (a > 0)
+ do_something();
+
+If the compiler can prove that do_something() does not store to the
+variable a, then the compiler is within its rights transforming this to
+the following:
+
+ tmp = a;
+ if (a > 0)
+ for (;;)
+ do_something();
+
+If you don't want the compiler to do this (and you probably don't), then
+you should use something like the following:
+
+ while (ACCESS_ONCE(a) < 0)
+ do_something();
+
+Alternatively, you could place a barrier() call in the loop.
+
+For another example, consider the following code:
+
+ tmp_a = a;
+ do_something_with(tmp_a);
+ do_something_else_with(tmp_a);
+
+If the compiler can prove that do_something_with() does not store to the
+variable a, then the compiler is within its rights to manufacture an
+additional load as follows:
+
+ tmp_a = a;
+ do_something_with(tmp_a);
+ tmp_a = a;
+ do_something_else_with(tmp_a);
+
+This could fatally confuse your code if it expected the same value
+to be passed to do_something_with() and do_something_else_with().
+
+The compiler would be likely to manufacture this additional load if
+do_something_with() was an inline function that made very heavy use
+of registers: reloading from variable a could save a flush to the
+stack and later reload. To prevent the compiler from attacking your
+code in this manner, write the following:
+
+ tmp_a = ACCESS_ONCE(a);
+ do_something_with(tmp_a);
+ do_something_else_with(tmp_a);
+
+For a final example, consider the following code, assuming that the
+variable a is set at boot time before the second CPU is brought online
+and never changed later, so that memory barriers are not needed:
+
+ if (a)
+ b = 9;
+ else
+ b = 42;
+
+The compiler is within its rights to manufacture an additional store
+by transforming the above code into the following:
+
+ b = 42;
+ if (a)
+ b = 9;
+
+This could come as a fatal surprise to other code running concurrently
+that expected b to never have the value 42 if a was zero. To prevent
+the compiler from doing this, write something like:
+
+ if (a)
+ ACCESS_ONCE(b) = 9;
+ else
+ ACCESS_ONCE(b) = 42;
+
+Don't even -think- about doing this without proper use of memory barriers,
+locks, or atomic operations if variable a can change at runtime!
+
+*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! ***
+
Now, we move onto the atomic operation interfaces typically implemented with
the help of assembly code.
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 9c452ef2328..a7c96ae5557 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -594,53 +594,44 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup.
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *task)
+ struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
-Called prior to moving a task into a cgroup; if the subsystem
-returns an error, this will abort the attach operation. If a NULL
-task is passed, then a successful result indicates that *any*
-unspecified task can be moved into the cgroup. Note that this isn't
-called on a fork. If this method returns 0 (success) then this should
-remain valid while the caller holds cgroup_mutex and it is ensured that either
+Called prior to moving one or more tasks into a cgroup; if the
+subsystem returns an error, this will abort the attach operation.
+@tset contains the tasks to be attached and is guaranteed to have at
+least one task in it.
+
+If there are multiple tasks in the taskset, then:
+ - it's guaranteed that all are from the same thread group
+ - @tset contains all tasks from the thread group whether or not
+ they're switching cgroups
+ - the first task is the leader
+
+Each @tset entry also contains the task's old cgroup and tasks which
+aren't switching cgroup can be skipped easily using the
+cgroup_taskset_for_each() iterator. Note that this isn't called on a
+fork. If this method returns 0 (success) then this should remain valid
+while the caller holds cgroup_mutex and it is ensured that either
attach() or cancel_attach() will be called in future.
-int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
-(cgroup_mutex held by caller)
-
-As can_attach, but for operations that must be run once per task to be
-attached (possibly many when using cgroup_attach_proc). Called after
-can_attach.
-
void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *task, bool threadgroup)
+ struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
Called when a task attach operation has failed after can_attach() has succeeded.
A subsystem whose can_attach() has some side-effects should provide this
function, so that the subsystem can implement a rollback. If not, not necessary.
This will be called only about subsystems whose can_attach() operation have
-succeeded.
-
-void pre_attach(struct cgroup *cgrp);
-(cgroup_mutex held by caller)
-
-For any non-per-thread attachment work that needs to happen before
-attach_task. Needed by cpuset.
+succeeded. The parameters are identical to can_attach().
void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cgrp, struct task_struct *task)
+ struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking.
-
-void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
-(cgroup_mutex held by caller)
-
-As attach, but for operations that must be run once per task to be attached,
-like can_attach_task. Called before attach. Currently does not support any
-subsystem that might need the old_cgrp for every thread in the group.
+The parameters are identical to can_attach().
void fork(struct cgroup_subsy *ss, struct task_struct *task)
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index cc0ebc5241b..4d8774f6f48 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -44,8 +44,8 @@ Features:
- oom-killer disable knob and oom-notifier
- Root cgroup has no limit controls.
- Kernel memory and Hugepages are not under control yet. We just manage
- pages on LRU. To add more controls, we have to take care of performance.
+ Kernel memory support is work in progress, and the current version provides
+ basically functionality. (See Section 2.7)
Brief summary of control files.
@@ -72,6 +72,9 @@ Brief summary of control files.
memory.oom_control # set/show oom controls.
memory.numa_stat # show the number of memory usage per numa node
+ memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory
+ memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation
+
1. History
The memory controller has a long history. A request for comments for the memory
@@ -255,6 +258,27 @@ When oom event notifier is registered, event will be delivered.
per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
zone->lru_lock, it has no lock of its own.
+2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+
+With the Kernel memory extension, the Memory Controller is able to limit
+the amount of kernel memory used by the system. Kernel memory is fundamentally
+different than user memory, since it can't be swapped out, which makes it
+possible to DoS the system by consuming too much of this precious resource.
+
+Kernel memory limits are not imposed for the root cgroup. Usage for the root
+cgroup may or may not be accounted.
+
+Currently no soft limit is implemented for kernel memory. It is future work
+to trigger slab reclaim when those limits are reached.
+
+2.7.1 Current Kernel Memory resources accounted
+
+* sockets memory pressure: some sockets protocols have memory pressure
+thresholds. The Memory Controller allows them to be controlled individually
+per cgroup, instead of globally.
+
+* tcp memory pressure: sockets memory pressure for the tcp protocol.
+
3. User Interface
0. Configuration
diff --git a/Documentation/cgroups/net_prio.txt b/Documentation/cgroups/net_prio.txt
new file mode 100644
index 00000000000..01b32263559
--- /dev/null
+++ b/Documentation/cgroups/net_prio.txt
@@ -0,0 +1,53 @@
+Network priority cgroup
+-------------------------
+
+The Network priority cgroup provides an interface to allow an administrator to
+dynamically set the priority of network traffic generated by various
+applications
+
+Nominally, an application would set the priority of its traffic via the
+SO_PRIORITY socket option. This however, is not always possible because:
+
+1) The application may not have been coded to set this value
+2) The priority of application traffic is often a site-specific administrative
+ decision rather than an application defined one.
+
+This cgroup allows an administrator to assign a process to a group which defines
+the priority of egress traffic on a given interface. Network priority groups can
+be created by first mounting the cgroup filesystem.
+
+# mount -t cgroup -onet_prio none /sys/fs/cgroup/net_prio
+
+With the above step, the initial group acting as the parent accounting group
+becomes visible at '/sys/fs/cgroup/net_prio'. This group includes all tasks in
+the system. '/sys/fs/cgroup/net_prio/tasks' lists the tasks in this cgroup.
+
+Each net_prio cgroup contains two files that are subsystem specific
+
+net_prio.prioidx
+This file is read-only, and is simply informative. It contains a unique integer
+value that the kernel uses as an internal representation of this cgroup.
+
+net_prio.ifpriomap
+This file contains a map of the priorities assigned to traffic originating from
+processes in this group and egressing the system on various interfaces. It
+contains a list of tuples in the form <ifname priority>. Contents of this file
+can be modified by echoing a string into the file using the same tuple format.
+for example:
+
+echo "eth0 5" > /sys/fs/cgroups/net_prio/iscsi/net_prio.ifpriomap
+
+This command would force any traffic originating from processes belonging to the
+iscsi net_prio cgroup and egressing on interface eth0 to have the priority of
+said traffic set to the value 5. The parent accounting group also has a
+writeable 'net_prio.ifpriomap' file that can be used to set a system default
+priority.
+
+Priorities are set immediately prior to queueing a frame to the device
+queueing discipline (qdisc) so priorities will be assigned prior to the hardware
+queue selection being made.
+
+One usage for the net_prio cgroup is with mqprio qdisc allowing application
+traffic to be steered to hardware/driver based traffic classes. These mappings
+can then be managed by administrators or other networking protocols such as
+DCBX.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index d221781daba..c7a2eb8450c 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -127,7 +127,7 @@ in the bash (as said, 1000 is default), do:
echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
>ondemand/sampling_rate
-show_sampling_rate_min:
+sampling_rate_min:
The sampling rate is limited by the HW transition latency:
transition_latency * 100
Or by kernel restrictions:
@@ -140,8 +140,6 @@ HZ=100: min=200000us (200ms)
The highest value of kernel and HW latency restrictions is shown and
used as the minimum sampling rate.
-show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
-
up_threshold: defines what the average CPU usage between the samplings
of 'sampling_rate' needs to be for the kernel to make a decision on
whether it should increase the frequency. For example when it is set
diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting
index 903a2546f13..8a48c9b6286 100644
--- a/Documentation/development-process/5.Posting
+++ b/Documentation/development-process/5.Posting
@@ -271,10 +271,10 @@ copies should go to:
the linux-kernel list.
- If you are fixing a bug, think about whether the fix should go into the
- next stable update. If so, stable@kernel.org should get a copy of the
- patch. Also add a "Cc: stable@kernel.org" to the tags within the patch
- itself; that will cause the stable team to get a notification when your
- fix goes into the mainline.
+ next stable update. If so, stable@vger.kernel.org should get a copy of
+ the patch. Also add a "Cc: stable@vger.kernel.org" to the tags within
+ the patch itself; that will cause the stable team to get a notification
+ when your fix goes into the mainline.
When selecting recipients for a patch, it is good to have an idea of who
you think will eventually accept the patch and get it merged. While it
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index eccffe71522..cec8864ce4e 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -379,7 +379,7 @@ Your cooperation is appreciated.
162 = /dev/smbus System Management Bus
163 = /dev/lik Logitech Internet Keyboard
164 = /dev/ipmo Intel Intelligent Platform Management
- 165 = /dev/vmmon VMWare virtual machine monitor
+ 165 = /dev/vmmon VMware virtual machine monitor
166 = /dev/i2o/ctl I2O configuration manager
167 = /dev/specialix_sxctl Specialix serial control
168 = /dev/tcldrv Technology Concepts serial control
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index c9848ad0e2e..54bdddadf1c 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -21,6 +21,10 @@ i.MX53 Smart Mobile Reference Design Board
Required root node properties:
- compatible = "fsl,imx53-smd", "fsl,imx53";
-i.MX6 Quad SABRE Automotive Board
+i.MX6 Quad Armadillo2 Board
Required root node properties:
- - compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+ - compatible = "fsl,imx6q-arm2", "fsl,imx6q";
+
+i.MX6 Quad SABRE Lite Board
+Required root node properties:
+ - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt
index 52916b4aa1f..9b4b82a721b 100644
--- a/Documentation/devicetree/bindings/arm/gic.txt
+++ b/Documentation/devicetree/bindings/arm/gic.txt
@@ -42,6 +42,10 @@ Optional
- interrupts : Interrupt source of the parent interrupt controller. Only
present on secondary GICs.
+- cpu-offset : per-cpu offset within the distributor and cpu interface
+ regions, used when the GIC doesn't have banked registers. The offset is
+ cpu-offset * cpu-nr.
+
Example:
intc: interrupt-controller@fff11000 {
diff --git a/Documentation/devicetree/bindings/arm/insignal-boards.txt b/Documentation/devicetree/bindings/arm/insignal-boards.txt
new file mode 100644
index 00000000000..524c3dc5d80
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/insignal-boards.txt
@@ -0,0 +1,8 @@
+* Insignal's Exynos4210 based Origen evaluation board
+
+Origen low-cost evaluation board is based on Samsung's Exynos4210 SoC.
+
+Required root node properties:
+ - compatible = should be one or more of the following.
+ (a) "samsung,smdkv310" - for Samsung's SMDKV310 eval board.
+ (b) "samsung,exynos4210" - for boards based on Exynos4210 SoC.
diff --git a/Documentation/devicetree/bindings/arm/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung-boards.txt
new file mode 100644
index 00000000000..0bf68be56fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung-boards.txt
@@ -0,0 +1,8 @@
+* Samsung's Exynos4210 based SMDKV310 evaluation board
+
+SMDKV310 evaluation board is based on Samsung's Exynos4210 SoC.
+
+Required root node properties:
+ - compatible = should be one or more of the following.
+ (a) "samsung,smdkv310" - for Samsung's SMDKV310 eval board.
+ (b) "samsung,exynos4210" - for boards based on Exynos4210 SoC.
diff --git a/Documentation/devicetree/bindings/arm/tegra.txt b/Documentation/devicetree/bindings/arm/tegra.txt
new file mode 100644
index 00000000000..6e69d2e5e76
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra.txt
@@ -0,0 +1,14 @@
+NVIDIA Tegra device tree bindings
+-------------------------------------------
+
+Boards with the tegra20 SoC shall have the following properties:
+
+Required root node property:
+
+compatible = "nvidia,tegra20";
+
+Boards with the tegra30 SoC shall have the following properties:
+
+Required root node property:
+
+compatible = "nvidia,tegra30";
diff --git a/Documentation/devicetree/bindings/arm/vic.txt b/Documentation/devicetree/bindings/arm/vic.txt
new file mode 100644
index 00000000000..266716b2343
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/vic.txt
@@ -0,0 +1,29 @@
+* ARM Vectored Interrupt Controller
+
+One or more Vectored Interrupt Controllers (VIC's) can be connected in an ARM
+system for interrupt routing. For multiple controllers they can either be
+nested or have the outputs wire-OR'd together.
+
+Required properties:
+
+- compatible : should be one of
+ "arm,pl190-vic"
+ "arm,pl192-vic"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : The number of cells to define the interrupts. Must be 1 as
+ the VIC has no configuration options for interrupt sources. The cell is a u32
+ and defines the interrupt number.
+- reg : The register bank for the VIC.
+
+Optional properties:
+
+- interrupts : Interrupt source for parent controllers if the VIC is nested.
+
+Example:
+
+ vic0: interrupt-controller@60000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x60000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/c6x/clocks.txt b/Documentation/devicetree/bindings/c6x/clocks.txt
new file mode 100644
index 00000000000..a04f5fd3012
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/clocks.txt
@@ -0,0 +1,40 @@
+C6X PLL Clock Controllers
+-------------------------
+
+This is a first-cut support for the SoC clock controllers. This is still
+under development and will probably change as the common device tree
+clock support is added to the kernel.
+
+Required properties:
+
+- compatible: "ti,c64x+pll"
+ May also have SoC-specific value to support SoC-specific initialization
+ in the driver. One of:
+ "ti,c6455-pll"
+ "ti,c6457-pll"
+ "ti,c6472-pll"
+ "ti,c6474-pll"
+
+- reg: base address and size of register area
+- clock-frequency: input clock frequency in hz
+
+
+Optional properties:
+
+- ti,c64x+pll-bypass-delay: CPU cycles to delay when entering bypass mode
+
+- ti,c64x+pll-reset-delay: CPU cycles to delay after PLL reset
+
+- ti,c64x+pll-lock-delay: CPU cycles to delay after PLL frequency change
+
+Example:
+
+ clock-controller@29a0000 {
+ compatible = "ti,c6472-pll", "ti,c64x+pll";
+ reg = <0x029a0000 0x200>;
+ clock-frequency = <25000000>;
+
+ ti,c64x+pll-bypass-delay = <200>;
+ ti,c64x+pll-reset-delay = <12000>;
+ ti,c64x+pll-lock-delay = <80000>;
+ };
diff --git a/Documentation/devicetree/bindings/c6x/dscr.txt b/Documentation/devicetree/bindings/c6x/dscr.txt
new file mode 100644
index 00000000000..d847758f2b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/dscr.txt
@@ -0,0 +1,127 @@
+Device State Configuration Registers
+------------------------------------
+
+TI C6X SoCs contain a region of miscellaneous registers which provide various
+function for SoC control or status. Details vary considerably among from SoC
+to SoC with no two being alike.
+
+In general, the Device State Configuraion Registers (DSCR) will provide one or
+more configuration registers often protected by a lock register where one or
+more key values must be written to a lock register in order to unlock the
+configuration register for writes. These configuration register may be used to
+enable (and disable in some cases) SoC pin drivers, select peripheral clock
+sources (internal or pin), etc. In some cases, a configuration register is
+write once or the individual bits are write once. In addition to device config,
+the DSCR block may provide registers which which are used to reset peripherals,
+provide device ID information, provide ethernet MAC addresses, as well as other
+miscellaneous functions.
+
+For device state control (enable/disable), each device control is assigned an
+id which is used by individual device drivers to control the state as needed.
+
+Required properties:
+
+- compatible: must be "ti,c64x+dscr"
+- reg: register area base and size
+
+Optional properties:
+
+ NOTE: These are optional in that not all SoCs will have all properties. For
+ SoCs which do support a given property, leaving the property out of the
+ device tree will result in reduced functionality or possibly driver
+ failure.
+
+- ti,dscr-devstat
+ offset of the devstat register
+
+- ti,dscr-silicon-rev
+ offset, start bit, and bitsize of silicon revision field
+
+- ti,dscr-rmii-resets
+ offset and bitmask of RMII reset field. May have multiple tuples if more
+ than one ethernet port is available.
+
+- ti,dscr-locked-regs
+ possibly multiple tuples describing registers which are write protected by
+ a lock register. Each tuple consists of the register offset, lock register
+ offsset, and the key value used to unlock the register.
+
+- ti,dscr-kick-regs
+ offset and key values of two "kick" registers used to write protect other
+ registers in DSCR. On SoCs using kick registers, the first key must be
+ written to the first kick register and the second key must be written to
+ the second register before other registers in the area are write-enabled.
+
+- ti,dscr-mac-fuse-regs
+ MAC addresses are contained in two registers. Each element of a MAC address
+ is contained in a single byte. This property has two tuples. Each tuple has
+ a register offset and four cells representing bytes in the register from
+ most significant to least. The value of these four cells is the MAC byte
+ index (1-6) of the byte within the register. A value of 0 means the byte
+ is unused in the MAC address.
+
+- ti,dscr-devstate-ctl-regs
+ This property describes the bitfields used to control the state of devices.
+ Each tuple describes a range of identical bitfields used to control one or
+ more devices (one bitfield per device). The layout of each tuple is:
+
+ start_id num_ids reg enable disable start_bit nbits
+
+ Where:
+ start_id is device id for the first device control in the range
+ num_ids is the number of device controls in the range
+ reg is the offset of the register holding the control bits
+ enable is the value to enable a device
+ disable is the value to disable a device (0xffffffff if cannot disable)
+ start_bit is the bit number of the first bit in the range
+ nbits is the number of bits per device control
+
+- ti,dscr-devstate-stat-regs
+ This property describes the bitfields used to provide device state status
+ for device states controlled by the DSCR. Each tuple describes a range of
+ identical bitfields used to provide status for one or more devices (one
+ bitfield per device). The layout of each tuple is:
+
+ start_id num_ids reg enable disable start_bit nbits
+
+ Where:
+ start_id is device id for the first device status in the range
+ num_ids is the number of devices covered by the range
+ reg is the offset of the register holding the status bits
+ enable is the value indicating device is enabled
+ disable is the value indicating device is disabled
+ start_bit is the bit number of the first bit in the range
+ nbits is the number of bits per device status
+
+- ti,dscr-privperm
+ Offset and default value for register used to set access privilege for
+ some SoC devices.
+
+
+Example:
+
+ device-state-config-regs@2a80000 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02a80000 0x41000>;
+
+ ti,dscr-devstat = <0>;
+ ti,dscr-silicon-rev = <8 28 0xf>;
+ ti,dscr-rmii-resets = <0x40020 0x00040000>;
+
+ ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
+ ti,dscr-devstate-ctl-regs =
+ <0 12 0x40008 1 0 0 2
+ 12 1 0x40008 3 0 30 2
+ 13 2 0x4002c 1 0xffffffff 0 1>;
+ ti,dscr-devstate-stat-regs =
+ <0 10 0x40014 1 0 0 3
+ 10 2 0x40018 1 0 0 3>;
+
+ ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
+ 0x704 5 6 0 0>;
+
+ ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
+
+ ti,dscr-kick-regs = <0x38 0x83E70B13
+ 0x3c 0x95A4F1E0>;
+ };
diff --git a/Documentation/devicetree/bindings/c6x/emifa.txt b/Documentation/devicetree/bindings/c6x/emifa.txt
new file mode 100644
index 00000000000..0ff6e9b9a13
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/emifa.txt
@@ -0,0 +1,62 @@
+External Memory Interface
+-------------------------
+
+The emifa node describes a simple external bus controller found on some C6X
+SoCs. This interface provides external busses with a number of chip selects.
+
+Required properties:
+
+- compatible: must be "ti,c64x+emifa", "simple-bus"
+- reg: register area base and size
+- #address-cells: must be 2 (chip-select + offset)
+- #size-cells: must be 1
+- ranges: mapping from EMIFA space to parent space
+
+
+Optional properties:
+
+- ti,dscr-dev-enable: Device ID if EMIF is enabled/disabled from DSCR
+
+- ti,emifa-burst-priority:
+ Number of memory transfers after which the EMIF will elevate the priority
+ of the oldest command in the command FIFO. Setting this field to 255
+ disables this feature, thereby allowing old commands to stay in the FIFO
+ indefinitely.
+
+- ti,emifa-ce-config:
+ Configuration values for each of the supported chip selects.
+
+Example:
+
+ emifa@70000000 {
+ compatible = "ti,c64x+emifa", "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0x70000000 0x100>;
+ ranges = <0x2 0x0 0xa0000000 0x00000008
+ 0x3 0x0 0xb0000000 0x00400000
+ 0x4 0x0 0xc0000000 0x10000000
+ 0x5 0x0 0xD0000000 0x10000000>;
+
+ ti,dscr-dev-enable = <13>;
+ ti,emifa-burst-priority = <255>;
+ ti,emifa-ce-config = <0x00240120
+ 0x00240120
+ 0x00240122
+ 0x00240122>;
+
+ flash@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x3 0x0 0x400000>;
+ bank-width = <1>;
+ device-width = <1>;
+ partition@0 {
+ reg = <0x0 0x400000>;
+ label = "NOR";
+ };
+ };
+ };
+
+This shows a flash chip attached to chip select 3.
diff --git a/Documentation/devicetree/bindings/c6x/interrupt.txt b/Documentation/devicetree/bindings/c6x/interrupt.txt
new file mode 100644
index 00000000000..42bb796cc4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/interrupt.txt
@@ -0,0 +1,104 @@
+C6X Interrupt Chips
+-------------------
+
+* C64X+ Core Interrupt Controller
+
+ The core interrupt controller provides 16 prioritized interrupts to the
+ C64X+ core. Priority 0 and 1 are used for reset and NMI respectively.
+ Priority 2 and 3 are reserved. Priority 4-15 are used for interrupt
+ sources coming from outside the core.
+
+ Required properties:
+ --------------------
+ - compatible: Should be "ti,c64x+core-pic";
+ - #interrupt-cells: <1>
+
+ Interrupt Specifier Definition
+ ------------------------------
+ Single cell specifying the core interrupt priority level (4-15) where
+ 4 is highest priority and 15 is lowest priority.
+
+ Example
+ -------
+ core_pic: interrupt-controller@0 {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ compatible = "ti,c64x+core-pic";
+ };
+
+
+
+* C64x+ Megamodule Interrupt Controller
+
+ The megamodule PIC consists of four interrupt mupliplexers each of which
+ combine up to 32 interrupt inputs into a single interrupt output which
+ may be cascaded into the core interrupt controller. The megamodule PIC
+ has a total of 12 outputs cascading into the core interrupt controller.
+ One for each core interrupt priority level. In addition to the combined
+ interrupt sources, individual megamodule interrupts may be cascaded to
+ the core interrupt controller. When an individual interrupt is cascaded,
+ it is no longer handled through a megamodule interrupt combiner and is
+ considered to have the core interrupt controller as the parent.
+
+ Required properties:
+ --------------------
+ - compatible: "ti,c64x+megamod-pic"
+ - interrupt-controller
+ - #interrupt-cells: <1>
+ - reg: base address and size of register area
+ - interrupt-parent: must be core interrupt controller
+ - interrupts: This should have four cells; one for each interrupt combiner.
+ The cells contain the core priority interrupt to which the
+ corresponding combiner output is wired.
+
+ Optional properties:
+ --------------------
+ - ti,c64x+megamod-pic-mux: Array of 12 cells correspnding to the 12 core
+ priority interrupts. The first cell corresponds to
+ core priority 4 and the last cell corresponds to
+ core priority 15. The value of each cell is the
+ megamodule interrupt source which is MUXed to
+ the core interrupt corresponding to the cell
+ position. Allowed values are 4 - 127. Mapping for
+ interrupts 0 - 3 (combined interrupt sources) are
+ ignored.
+
+ Interrupt Specifier Definition
+ ------------------------------
+ Single cell specifying the megamodule interrupt source (4-127). Note that
+ interrupts mapped directly to the core with "ti,c64x+megamod-pic-mux" will
+ use the core interrupt controller as their parent and the specifier will
+ be the core priority level, not the megamodule interrupt number.
+
+ Examples
+ --------
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ interrupts = < 12 13 14 15 >;
+ };
+
+ This is a minimal example where all individual interrupts go through a
+ combiner. Combiner-0 is mapped to core interrupt 12, combiner-1 is mapped
+ to interrupt 13, etc.
+
+
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ interrupts = < 12 13 14 15 >;
+ ti,c64x+megamod-pic-mux = < 0 0 0 0
+ 32 0 0 0
+ 0 0 0 0 >;
+ };
+
+ This the same as the first example except that megamodule interrupt 32 is
+ mapped directly to core priority interrupt 8. The node using this interrupt
+ must set the core controller as its interrupt parent and use 8 in the
+ interrupt specifier value.
diff --git a/Documentation/devicetree/bindings/c6x/soc.txt b/Documentation/devicetree/bindings/c6x/soc.txt
new file mode 100644
index 00000000000..b1e4973b576
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/soc.txt
@@ -0,0 +1,28 @@
+C6X System-on-Chip
+------------------
+
+Required properties:
+
+- compatible: "simple-bus"
+- #address-cells: must be 1
+- #size-cells: must be 1
+- ranges
+
+Optional properties:
+
+- model: specific SoC model
+
+- nodes for IP blocks within SoC
+
+
+Example:
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6455";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/c6x/timer64.txt b/Documentation/devicetree/bindings/c6x/timer64.txt
new file mode 100644
index 00000000000..95911fe7022
--- /dev/null
+++ b/Documentation/devicetree/bindings/c6x/timer64.txt
@@ -0,0 +1,26 @@
+Timer64
+-------
+
+The timer64 node describes C6X event timers.
+
+Required properties:
+
+- compatible: must be "ti,c64x+timer64"
+- reg: base address and size of register region
+- interrupt-parent: interrupt controller
+- interrupts: interrupt id
+
+Optional properties:
+
+- ti,dscr-dev-enable: Device ID used to enable timer IP through DSCR interface.
+
+- ti,core-mask: on multi-core SoCs, bitmask of cores allowed to use this timer.
+
+Example:
+ timer0: timer@25e0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x01 >;
+ reg = <0x25e0000 0x40>;
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt
new file mode 100644
index 00000000000..a4cd273b2a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt
@@ -0,0 +1,30 @@
+* ARM PrimeCell PL330 DMA Controller
+
+The ARM PrimeCell PL330 DMA controller can move blocks of memory contents
+between memory and peripherals or memory to memory.
+
+Required properties:
+ - compatible: should include both "arm,pl330" and "arm,primecell".
+ - reg: physical base address of the controller and length of memory mapped
+ region.
+ - interrupts: interrupt number to the cpu.
+
+Example:
+
+ pdma0: pdma@12680000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x12680000 0x1000>;
+ interrupts = <99>;
+ };
+
+Client drivers (device nodes requiring dma transfers from dev-to-mem or
+mem-to-dev) should specify the DMA channel numbers using a two-value pair
+as shown below.
+
+ [property name] = <[phandle of the dma controller] [dma request id]>;
+
+ where 'dma request id' is the dma request number which is connected
+ to the client controller. The 'property name' is recommended to be
+ of the form <name>-dma-channel.
+
+ Example: tx-dma-channel = <&pdma0 12>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-samsung.txt b/Documentation/devicetree/bindings/gpio/gpio-samsung.txt
new file mode 100644
index 00000000000..8f50fe5e6c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-samsung.txt
@@ -0,0 +1,40 @@
+Samsung Exynos4 GPIO Controller
+
+Required properties:
+- compatible: Compatible property value should be "samsung,exynos4-gpio>".
+
+- reg: Physical base address of the controller and length of memory mapped
+ region.
+
+- #gpio-cells: Should be 4. The syntax of the gpio specifier used by client nodes
+ should be the following with values derived from the SoC user manual.
+ <[phandle of the gpio controller node]
+ [pin number within the gpio controller]
+ [mux function]
+ [pull up/down]
+ [drive strength]>
+
+ Values for gpio specifier:
+ - Pin number: is a value between 0 to 7.
+ - Pull Up/Down: 0 - Pull Up/Down Disabled.
+ 1 - Pull Down Enabled.
+ 3 - Pull Up Enabled.
+ - Drive Strength: 0 - 1x,
+ 1 - 3x,
+ 2 - 2x,
+ 3 - 4x
+
+- gpio-controller: Specifies that the node is a gpio controller.
+- #address-cells: should be 1.
+- #size-cells: should be 1.
+
+Example:
+
+ gpa0: gpio-controller@11400000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400000 0x20>;
+ #gpio-cells = <4>;
+ gpio-controller;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
new file mode 100644
index 00000000000..e42a2ee233e
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
@@ -0,0 +1,22 @@
+* Synopsys DesignWare I2C
+
+Required properties :
+
+ - compatible : should be "snps,designware-i2c"
+ - reg : Offset and length of the register set for the device
+ - interrupts : <IRQ> where IRQ is the interrupt number.
+
+Recommended properties :
+
+ - clock-frequency : desired I2C bus clock frequency in Hz.
+
+Example :
+
+ i2c@f0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xf0000 0x1000>;
+ interrupts = <11>;
+ clock-frequency = <400000>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
new file mode 100644
index 00000000000..1a85f986961
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -0,0 +1,58 @@
+This is a list of trivial i2c devices that have simple device tree
+bindings, consisting only of a compatible field, an address and
+possibly an interrupt line.
+
+If a device needs more specific bindings, such as properties to
+describe some aspect of it, there needs to be a specific binding
+document for it just like any other devices.
+
+
+Compatible Vendor / Chip
+========== =============
+ad,ad7414 SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin
+ad,adm9240 ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems
+adi,adt7461 +/-1C TDM Extended Temp Range I.C
+adt7461 +/-1C TDM Extended Temp Range I.C
+at,24c08 i2c serial eeprom (24cxx)
+atmel,24c02 i2c serial eeprom (24cxx)
+catalyst,24c32 i2c serial eeprom
+dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
+dallas,ds1338 I2C RTC with 56-Byte NV RAM
+dallas,ds1339 I2C Serial Real-Time Clock
+dallas,ds1340 I2C RTC with Trickle Charger
+dallas,ds1374 I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output
+dallas,ds1631 High-Precision Digital Thermometer
+dallas,ds1682 Total-Elapsed-Time Recorder with Alarm
+dallas,ds1775 Tiny Digital Thermometer and Thermostat
+dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
+dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
+dallas,ds75 Digital Thermometer and Thermostat
+dialog,da9053 DA9053: flexible system level PMIC with multicore support
+epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
+epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
+fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
+fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
+fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
+fsl,mpr121 MPR121: Proximity Capacitive Touch Sensor Controller
+fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec
+maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
+maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
+maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
+mc,rv3029c2 Real Time Clock Module with I2C-Bus
+national,lm75 I2C TEMP SENSOR
+national,lm80 Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
+national,lm92 ¹0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator with Two-Wire Interface
+nxp,pca9556 Octal SMBus and I2C registered interface
+nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
+nxp,pcf8563 Real-time clock/calendar
+ovti,ov5642 OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI and Embedded TrueFocus
+pericom,pt7c4338 Real-time Clock Module
+plx,pex8648 48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
+ramtron,24c64 i2c serial eeprom (24cxx)
+ricoh,rs5c372a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
+st-micro,24c256 i2c serial eeprom (24cxx)
+stm,m41t00 Serial Access TIMEKEEPER
+stm,m41t62 Serial real-time clock (RTC) with alarm
+stm,m41t80 M41T80 - SERIAL ACCESS RTC WITH ALARMS
+ti,tsc2003 I2C Touch-Screen Controller
diff --git a/Documentation/devicetree/bindings/input/samsung-keypad.txt b/Documentation/devicetree/bindings/input/samsung-keypad.txt
new file mode 100644
index 00000000000..ce3e394c0e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/samsung-keypad.txt
@@ -0,0 +1,88 @@
+* Samsung's Keypad Controller device tree bindings
+
+Samsung's Keypad controller is used to interface a SoC with a matrix-type
+keypad device. The keypad controller supports multiple row and column lines.
+A key can be placed at each intersection of a unique row and a unique column.
+The keypad controller can sense a key-press and key-release and report the
+event using a interrupt to the cpu.
+
+Required SoC Specific Properties:
+- compatible: should be one of the following
+ - "samsung,s3c6410-keypad": For controllers compatible with s3c6410 keypad
+ controller.
+ - "samsung,s5pv210-keypad": For controllers compatible with s5pv210 keypad
+ controller.
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- interrupts: The interrupt number to the cpu.
+
+Required Board Specific Properties:
+- samsung,keypad-num-rows: Number of row lines connected to the keypad
+ controller.
+
+- samsung,keypad-num-columns: Number of column lines connected to the
+ keypad controller.
+
+- row-gpios: List of gpios used as row lines. The gpio specifier for
+ this property depends on the gpio controller to which these row lines
+ are connected.
+
+- col-gpios: List of gpios used as column lines. The gpio specifier for
+ this property depends on the gpio controller to which these column
+ lines are connected.
+
+- Keys represented as child nodes: Each key connected to the keypad
+ controller is represented as a child node to the keypad controller
+ device node and should include the following properties.
+ - keypad,row: the row number to which the key is connected.
+ - keypad,column: the column number to which the key is connected.
+ - linux,code: the key-code to be reported when the key is pressed
+ and released.
+
+Optional Properties specific to linux:
+- linux,keypad-no-autorepeat: do no enable autorepeat feature.
+- linux,keypad-wakeup: use any event on keypad as wakeup event.
+
+
+Example:
+ keypad@100A0000 {
+ compatible = "samsung,s5pv210-keypad";
+ reg = <0x100A0000 0x100>;
+ interrupts = <173>;
+ samsung,keypad-num-rows = <2>;
+ samsung,keypad-num-columns = <8>;
+ linux,input-no-autorepeat;
+ linux,input-wakeup;
+
+ row-gpios = <&gpx2 0 3 3 0
+ &gpx2 1 3 3 0>;
+
+ col-gpios = <&gpx1 0 3 0 0
+ &gpx1 1 3 0 0
+ &gpx1 2 3 0 0
+ &gpx1 3 3 0 0
+ &gpx1 4 3 0 0
+ &gpx1 5 3 0 0
+ &gpx1 6 3 0 0
+ &gpx1 7 3 0 0>;
+
+ key_1 {
+ keypad,row = <0>;
+ keypad,column = <3>;
+ linux,code = <2>;
+ };
+
+ key_2 {
+ keypad,row = <0>;
+ keypad,column = <4>;
+ linux,code = <3>;
+ };
+
+ key_3 {
+ keypad,row = <0>;
+ keypad,column = <5>;
+ linux,code = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/tegra-kbc.txt b/Documentation/devicetree/bindings/input/tegra-kbc.txt
new file mode 100644
index 00000000000..5ecfa99089b
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/tegra-kbc.txt
@@ -0,0 +1,18 @@
+* Tegra keyboard controller
+
+Required properties:
+- compatible: "nvidia,tegra20-kbc"
+
+Optional properties:
+- debounce-delay: delay in milliseconds per row scan for debouncing
+- repeat-delay: delay in milliseconds before repeat starts
+- ghost-filter: enable ghost filtering for this device
+- wakeup-source: configure keyboard as a wakeup source for suspend/resume
+
+Example:
+
+keyboard: keyboard {
+ compatible = "nvidia,tegra20-kbc";
+ reg = <0x7000e200 0x100>;
+ ghost-filter;
+};
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
new file mode 100644
index 00000000000..719f4dc58df
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -0,0 +1,44 @@
+GPIO assisted NAND flash
+
+The GPIO assisted NAND flash uses a memory mapped interface to
+read/write the NAND commands and data and GPIO pins for the control
+signals.
+
+Required properties:
+- compatible : "gpio-control-nand"
+- reg : should specify localbus chip select and size used for the chip. The
+ resource describes the data bus connected to the NAND flash and all accesses
+ are made in native endianness.
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+ representing partitions.
+- gpios : specifies the gpio pins to control the NAND device. nwp is an
+ optional gpio and may be set to 0 if not present.
+
+Optional properties:
+- bank-width : Width (in bytes) of the device. If not present, the width
+ defaults to 1 byte.
+- chip-delay : chip dependent delay for transferring data from array to
+ read registers (tR). If not present then a default of 20us is used.
+- gpio-control-nand,io-sync-reg : A 64-bit physical address for a read
+ location used to guard against bus reordering with regards to accesses to
+ the GPIO's and the NAND flash data bus. If present, then after changing
+ GPIO state and before and after command byte writes, this register will be
+ read to ensure that the GPIO accesses have completed.
+
+Examples:
+
+gpio-nand@1,0 {
+ compatible = "gpio-control-nand";
+ reg = <1 0x0000 0x2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ gpios = <&banka 1 0 /* rdy */
+ &banka 2 0 /* nce */
+ &banka 3 0 /* ale */
+ &banka 4 0 /* cle */
+ 0 /* nwp */>;
+
+ partition@0 {
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
new file mode 100644
index 00000000000..411727a3f82
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
@@ -0,0 +1,15 @@
+* Calxeda Highbank 10Gb XGMAC Ethernet
+
+Required properties:
+- compatible : Should be "calxeda,hb-xgmac"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
+ The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
+
+Example:
+
+ethernet@fff50000 {
+ compatible = "calxeda,hb-xgmac";
+ reg = <0xfff50000 0x1000>;
+ interrupts = <0 77 4 0 78 4 0 79 4>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/cc770.txt b/Documentation/devicetree/bindings/net/can/cc770.txt
new file mode 100644
index 00000000000..77027bf6460
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/cc770.txt
@@ -0,0 +1,53 @@
+Memory mapped Bosch CC770 and Intel AN82527 CAN controller
+
+Note: The CC770 is a CAN controller from Bosch, which is 100%
+compatible with the old AN82527 from Intel, but with "bugs" being fixed.
+
+Required properties:
+
+- compatible : should be "bosch,cc770" for the CC770 and "intc,82527"
+ for the AN82527.
+
+- reg : should specify the chip select, address offset and size required
+ to map the registers of the controller. The size is usually 0x80.
+
+- interrupts : property with a value describing the interrupt source
+ (number and sensitivity) required for the controller.
+
+Optional properties:
+
+- bosch,external-clock-frequency : frequency of the external oscillator
+ clock in Hz. Note that the internal clock frequency used by the
+ controller is half of that value. If not specified, a default
+ value of 16000000 (16 MHz) is used.
+
+- bosch,clock-out-frequency : slock frequency in Hz on the CLKOUT pin.
+ If not specified or if the specified value is 0, the CLKOUT pin
+ will be disabled.
+
+- bosch,slew-rate : slew rate of the CLKOUT signal. If not specified,
+ a resonable value will be calculated.
+
+- bosch,disconnect-rx0-input : see data sheet.
+
+- bosch,disconnect-rx1-input : see data sheet.
+
+- bosch,disconnect-tx1-output : see data sheet.
+
+- bosch,polarity-dominant : see data sheet.
+
+- bosch,divide-memory-clock : see data sheet.
+
+- bosch,iso-low-speed-mux : see data sheet.
+
+For further information, please have a look to the CC770 or AN82527.
+
+Examples:
+
+can@3,100 {
+ compatible = "bosch,cc770";
+ reg = <3 0x100 0x80>;
+ interrupts = <2 0>;
+ interrupt-parent = <&mpic>;
+ bosch,external-clock-frequency = <16000000>;
+};
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
new file mode 100644
index 00000000000..44afa0e5057
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -0,0 +1,25 @@
+* Cadence MACB/GEM Ethernet controller
+
+Required properties:
+- compatible: Should be "cdns,[<chip>-]{macb|gem}"
+ Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
+ Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
+ Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+ the Cadence GEM, or the generic form: "cdns,gem".
+- reg: Address and length of the register set for the device
+- interrupts: Should contain macb interrupt
+- phy-mode: String, operation mode of the PHY interface.
+ Supported values are: "mii", "rmii", "gmii", "rgmii".
+
+Optional properties:
+- local-mac-address: 6 bytes, mac address
+
+Examples:
+
+ macb0: ethernet@fffc4000 {
+ compatible = "cdns,at32ap7000-macb";
+ reg = <0xfffc4000 0x4000>;
+ interrupts = <21>;
+ phy-mode = "rmii";
+ local-mac-address = [3a 0e 03 04 05 06];
+ };
diff --git a/Documentation/devicetree/bindings/nvec/nvec_nvidia.txt b/Documentation/devicetree/bindings/nvec/nvec_nvidia.txt
new file mode 100644
index 00000000000..5aeee53ff9f
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvec/nvec_nvidia.txt
@@ -0,0 +1,9 @@
+NVIDIA compliant embedded controller
+
+Required properties:
+- compatible : should be "nvidia,nvec".
+- reg : the iomem of the i2c slave controller
+- interrupts : the interrupt line of the i2c slave controller
+- clock-frequency : the frequency of the i2c bus
+- gpios : the gpio used for ec request
+- slave-addr: the i2c address of the slave controller
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
new file mode 100644
index 00000000000..b9a8a2bcfae
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
@@ -0,0 +1,163 @@
+Message unit node:
+
+For SRIO controllers that implement the message unit as part of the controller
+this node is required. For devices with RMAN this node should NOT exist. The
+node is composed of three types of sub-nodes ("fsl-srio-msg-unit",
+"fsl-srio-dbell-unit" and "fsl-srio-port-write-unit").
+
+See srio.txt for more details about generic SRIO controller details.
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include "fsl,srio-rmu-vX.Y", "fsl,srio-rmu".
+
+ The version X.Y should match the general SRIO controller's IP Block
+ revision register's Major(X) and Minor (Y) value.
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the physical address and
+ length of the SRIO configuration registers for message units
+ and doorbell units.
+
+ - fsl,liodn
+ Usage: optional-but-recommended (for devices with PAMU)
+ Value type: <prop-encoded-array>
+ Definition: The logical I/O device number for the PAMU (IOMMU) to be
+ correctly configured for SRIO accesses. The property should
+ not exist on devices that do not support PAMU.
+
+ The LIODN value is associated with all RMU transactions
+ (msg-unit, doorbell, port-write).
+
+Sub-Nodes for RMU: The RMU node is composed of multiple sub-nodes that
+correspond to the actual sub-controllers in the RMU. The manual for a given
+SoC will detail which and how many of these sub-controllers are implemented.
+
+Message Unit:
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include "fsl,srio-msg-unit-vX.Y", "fsl,srio-msg-unit".
+
+ The version X.Y should match the general SRIO controller's IP Block
+ revision register's Major(X) and Minor (Y) value.
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the physical address and
+ length of the SRIO configuration registers for message units
+ and doorbell units.
+
+ - interrupts
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Specifies the interrupts generated by this device. The
+ value of the interrupts property consists of one interrupt
+ specifier. The format of the specifier is defined by the
+ binding document describing the node's interrupt parent.
+
+ A pair of IRQs are specified in this property. The first
+ element is associated with the transmit (TX) interrupt and the
+ second element is associated with the receive (RX) interrupt.
+
+Doorbell Unit:
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include:
+ "fsl,srio-dbell-unit-vX.Y", "fsl,srio-dbell-unit"
+
+ The version X.Y should match the general SRIO controller's IP Block
+ revision register's Major(X) and Minor (Y) value.
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the physical address and
+ length of the SRIO configuration registers for message units
+ and doorbell units.
+
+ - interrupts
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Specifies the interrupts generated by this device. The
+ value of the interrupts property consists of one interrupt
+ specifier. The format of the specifier is defined by the
+ binding document describing the node's interrupt parent.
+
+ A pair of IRQs are specified in this property. The first
+ element is associated with the transmit (TX) interrupt and the
+ second element is associated with the receive (RX) interrupt.
+
+Port-Write Unit:
+
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include:
+ "fsl,srio-port-write-unit-vX.Y", "fsl,srio-port-write-unit"
+
+ The version X.Y should match the general SRIO controller's IP Block
+ revision register's Major(X) and Minor (Y) value.
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the physical address and
+ length of the SRIO configuration registers for message units
+ and doorbell units.
+
+ - interrupts
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Specifies the interrupts generated by this device. The
+ value of the interrupts property consists of one interrupt
+ specifier. The format of the specifier is defined by the
+ binding document describing the node's interrupt parent.
+
+ A single IRQ that handles port-write conditions is
+ specified by this property. (Typically shared with error).
+
+ Note: All other standard properties (see the ePAPR) are allowed
+ but are optional.
+
+Example:
+ rmu: rmu@d3000 {
+ compatible = "fsl,srio-rmu";
+ reg = <0xd3000 0x400>;
+ ranges = <0x0 0xd3000 0x400>;
+ fsl,liodn = <0xc8>;
+
+ message-unit@0 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x0 0x100>;
+ interrupts = <
+ 60 2 0 0 /* msg1_tx_irq */
+ 61 2 0 0>;/* msg1_rx_irq */
+ };
+ message-unit@100 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x100 0x100>;
+ interrupts = <
+ 62 2 0 0 /* msg2_tx_irq */
+ 63 2 0 0>;/* msg2_rx_irq */
+ };
+ doorbell-unit@400 {
+ compatible = "fsl,srio-dbell-unit";
+ reg = <0x400 0x80>;
+ interrupts = <
+ 56 2 0 0 /* bell_outb_irq */
+ 57 2 0 0>;/* bell_inb_irq */
+ };
+ port-write-unit@4e0 {
+ compatible = "fsl,srio-port-write-unit";
+ reg = <0x4e0 0x20>;
+ interrupts = <16 2 1 11>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
new file mode 100644
index 00000000000..b039bcbee13
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
@@ -0,0 +1,103 @@
+* Freescale Serial RapidIO (SRIO) Controller
+
+RapidIO port node:
+Properties:
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must include "fsl,srio" for IP blocks with IP Block
+ Revision Register (SRIO IPBRR1) Major ID equal to 0x01c0.
+
+ Optionally, a compatiable string of "fsl,srio-vX.Y" where X is Major
+ version in IP Block Revision Register and Y is Minor version. If this
+ compatiable is provided it should be ordered before "fsl,srio".
+
+ - reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Specifies the physical address and
+ length of the SRIO configuration registers. The size should
+ be set to 0x11000.
+
+ - interrupts
+ Usage: required
+ Value type: <prop_encoded-array>
+ Definition: Specifies the interrupts generated by this device. The
+ value of the interrupts property consists of one interrupt
+ specifier. The format of the specifier is defined by the
+ binding document describing the node's interrupt parent.
+
+ A single IRQ that handles error conditions is specified by this
+ property. (Typically shared with port-write).
+
+ - fsl,srio-rmu-handle:
+ Usage: required if rmu node is defined
+ Value type: <phandle>
+ Definition: A single <phandle> value that points to the RMU.
+ (See srio-rmu.txt for more details on RMU node binding)
+
+Port Child Nodes: There should a port child node for each port that exists in
+the controller. The ports are numbered starting at one (1) and should have
+the following properties:
+
+ - cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: A standard property. Matches the port id.
+
+ - ranges
+ Usage: required if local access windows preset
+ Value type: <prop-encoded-array>
+ Definition: A standard property. Utilized to describe the memory mapped
+ IO space utilized by the controller. This corresponds to the
+ setting of the local access windows that are targeted to this
+ SRIO port.
+
+ - fsl,liodn
+ Usage: optional-but-recommended (for devices with PAMU)
+ Value type: <prop-encoded-array>
+ Definition: The logical I/O device number for the PAMU (IOMMU) to be
+ correctly configured for SRIO accesses. The property should
+ not exist on devices that do not support PAMU.
+
+ For HW (ie, the P4080) that only supports a LIODN for both
+ memory and maintenance transactions then a single LIODN is
+ represented in the property for both transactions.
+
+ For HW (ie, the P304x/P5020, etc) that supports an LIODN for
+ memory transactions and a unique LIODN for maintenance
+ transactions then a pair of LIODNs are represented in the
+ property. Within the pair, the first element represents the
+ LIODN associated with memory transactions and the second element
+ represents the LIODN associated with maintenance transactions
+ for the port.
+
+Note: All other standard properties (see ePAPR) are allowed but are optional.
+
+Example:
+
+ rapidio: rapidio@ffe0c0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>; /* err_irq */
+ fsl,srio-rmu-handle = <&rmu>;
+ ranges;
+
+ port1 {
+ cell-index = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,liodn = <34>;
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+
+ port2 {
+ cell-index = <2>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,liodn = <48>;
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
new file mode 100644
index 00000000000..9cf57fd042d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
@@ -0,0 +1,29 @@
+Fixed Voltage regulators
+
+Required properties:
+- compatible: Must be "regulator-fixed";
+
+Optional properties:
+- gpio: gpio to use for enable control
+- startup-delay-us: startup time in microseconds
+- enable-active-high: Polarity of GPIO is Active high
+If this property is missing, the default assumed is Active low.
+
+Any property defined as part of the core regulator
+binding, defined in regulator.txt, can also be used.
+However a fixed voltage regulator is expected to have the
+regulator-min-microvolt and regulator-max-microvolt
+to be the same.
+
+Example:
+
+ abc: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-supply";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio1 16 0>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ regulator-boot-on
+ };
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
new file mode 100644
index 00000000000..5b7a408acda
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -0,0 +1,54 @@
+Voltage/Current Regulators
+
+Optional properties:
+- regulator-name: A string used as a descriptive name for regulator outputs
+- regulator-min-microvolt: smallest voltage consumers may set
+- regulator-max-microvolt: largest voltage consumers may set
+- regulator-microvolt-offset: Offset applied to voltages to compensate for voltage drops
+- regulator-min-microamp: smallest current consumers may set
+- regulator-max-microamp: largest current consumers may set
+- regulator-always-on: boolean, regulator should never be disabled
+- regulator-boot-on: bootloader/firmware enabled regulator
+- <name>-supply: phandle to the parent supply/regulator node
+
+Example:
+
+ xyzreg: regulator@0 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ vin-supply = <&vin>;
+ };
+
+Regulator Consumers:
+Consumer nodes can reference one or more of its supplies/
+regulators using the below bindings.
+
+- <name>-supply: phandle to the regulator node
+
+These are the same bindings that a regulator in the above
+example used to reference its own supply, in which case
+its just seen as a special case of a regulator being a
+consumer itself.
+
+Example of a consumer device node (mmc) referencing two
+regulators (twl_reg1 and twl_reg2),
+
+ twl_reg1: regulator@0 {
+ ...
+ ...
+ ...
+ };
+
+ twl_reg2: regulator@1 {
+ ...
+ ...
+ ...
+ };
+
+ mmc: mmc@0x0 {
+ ...
+ ...
+ vmmc-supply = <&twl_reg1>;
+ vmmcaux-supply = <&twl_reg2>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
new file mode 100644
index 00000000000..90ec45fd33e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -0,0 +1,20 @@
+* Samsung's S3C Real Time Clock controller
+
+Required properties:
+- compatible: should be one of the following.
+ * "samsung,s3c2410-rtc" - for controllers compatible with s3c2410 rtc.
+ * "samsung,s3c6410-rtc" - for controllers compatible with s3c6410 rtc.
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: Two interrupt numbers to the cpu should be specified. First
+ interrupt number is the rtc alarm interupt and second interrupt number
+ is the rtc tick interrupt. The number of cells representing a interrupt
+ depends on the parent interrupt controller.
+
+Example:
+
+ rtc@10070000 {
+ compatible = "samsung,s3c6410-rtc";
+ reg = <0x10070000 0x100>;
+ interrupts = <44 0 45 0>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/twl-rtc.txt b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
new file mode 100644
index 00000000000..596e0c97be7
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
@@ -0,0 +1,12 @@
+* TI twl RTC
+
+The TWL family (twl4030/6030) contains a RTC.
+
+Required properties:
+- compatible : Should be twl4030-rtc
+
+Examples:
+
+rtc@0 {
+ compatible = "ti,twl4030-rtc";
+};
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
new file mode 100644
index 00000000000..342eedd1005
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -0,0 +1,10 @@
+OMAP UART controller
+
+Required properties:
+- compatible : should be "ti,omap2-uart" for OMAP2 controllers
+- compatible : should be "ti,omap3-uart" for OMAP3 controllers
+- compatible : should be "ti,omap4-uart" for OMAP4 controllers
+- ti,hwmods : Must be "uart<n>", n being the instance number (1-based)
+
+Optional properties:
+- clock-frequency : frequency of the clock input to the UART
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.txt b/Documentation/devicetree/bindings/serial/samsung_uart.txt
new file mode 100644
index 00000000000..2c8a17cf5cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.txt
@@ -0,0 +1,14 @@
+* Samsung's UART Controller
+
+The Samsung's UART controller is used for interfacing SoC with serial communicaion
+devices.
+
+Required properties:
+- compatible: should be
+ - "samsung,exynos4210-uart", for UART's compatible with Exynos4210 uart ports.
+
+- reg: base physical address of the controller and length of memory mapped
+ region.
+
+- interrupts: interrupt number to the cpu. The interrupt specifier format depends
+ on the interrupt controller parent.
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/tegra-usb.txt
new file mode 100644
index 00000000000..035d63d5646
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/tegra-usb.txt
@@ -0,0 +1,13 @@
+Tegra SOC USB controllers
+
+The device node for a USB controller that is part of a Tegra
+SOC is as described in the document "Open Firmware Recommended
+Practice : Universal Serial Bus" with the following modifications
+and additions :
+
+Required properties :
+ - compatible : Should be "nvidia,tegra20-ehci" for USB controllers
+ used in host mode.
+ - phy_type : Should be one of "ulpi" or "utmi".
+ - nvidia,vbus-gpio : If present, specifies a gpio that needs to be
+ activated for the bus to be powered.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index e8552782b44..18626965159 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,7 +8,9 @@ amcc Applied Micro Circuits Corporation (APM, formally AMCC)
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
atmel Atmel Corporation
+cavium Cavium, Inc.
chrp Common Hardware Reference Platform
+cortina Cortina Systems, Inc.
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
denx Denx Software Engineering
epson Seiko Epson Corp.
@@ -33,8 +35,10 @@ qcom Qualcomm, Inc.
ramtron Ramtron International
samsung Samsung Semiconductor
schindler Schindler
+sil Silicon Image
simtek
sirf SiRF Technology, Inc.
+st STMicroelectronics
stericsson ST-Ericsson
ti Texas Instruments
xlnx Xilinx
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
new file mode 100644
index 00000000000..510eab32f39
--- /dev/null
+++ b/Documentation/dma-buf-sharing.txt
@@ -0,0 +1,224 @@
+ DMA Buffer Sharing API Guide
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Sumit Semwal
+ <sumit dot semwal at linaro dot org>
+ <sumit dot semwal at ti dot com>
+
+This document serves as a guide to device-driver writers on what is the dma-buf
+buffer sharing API, how to use it for exporting and using shared buffers.
+
+Any device driver which wishes to be a part of DMA buffer sharing, can do so as
+either the 'exporter' of buffers, or the 'user' of buffers.
+
+Say a driver A wants to use buffers created by driver B, then we call B as the
+exporter, and A as buffer-user.
+
+The exporter
+- implements and manages operations[1] for the buffer
+- allows other users to share the buffer by using dma_buf sharing APIs,
+- manages the details of buffer allocation,
+- decides about the actual backing storage where this allocation happens,
+- takes care of any migration of scatterlist - for all (shared) users of this
+ buffer,
+
+The buffer-user
+- is one of (many) sharing users of the buffer.
+- doesn't need to worry about how the buffer is allocated, or where.
+- needs a mechanism to get access to the scatterlist that makes up this buffer
+ in memory, mapped into its own address space, so it can access the same area
+ of memory.
+
+*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
+For this first version, A buffer shared using the dma_buf sharing API:
+- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
+ this framework.
+- may be used *ONLY* by importers that do not need CPU access to the buffer.
+
+The dma_buf buffer sharing API usage contains the following steps:
+
+1. Exporter announces that it wishes to export a buffer
+2. Userspace gets the file descriptor associated with the exported buffer, and
+ passes it around to potential buffer-users based on use case
+3. Each buffer-user 'connects' itself to the buffer
+4. When needed, buffer-user requests access to the buffer from exporter
+5. When finished with its use, the buffer-user notifies end-of-DMA to exporter
+6. when buffer-user is done using this buffer completely, it 'disconnects'
+ itself from the buffer.
+
+
+1. Exporter's announcement of buffer export
+
+ The buffer exporter announces its wish to export a buffer. In this, it
+ connects its own private buffer data, provides implementation for operations
+ that can be performed on the exported dma_buf, and flags for the file
+ associated with this buffer.
+
+ Interface:
+ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+ size_t size, int flags)
+
+ If this succeeds, dma_buf_export allocates a dma_buf structure, and returns a
+ pointer to the same. It also associates an anonymous file with this buffer,
+ so it can be exported. On failure to allocate the dma_buf object, it returns
+ NULL.
+
+2. Userspace gets a handle to pass around to potential buffer-users
+
+ Userspace entity requests for a file-descriptor (fd) which is a handle to the
+ anonymous file associated with the buffer. It can then share the fd with other
+ drivers and/or processes.
+
+ Interface:
+ int dma_buf_fd(struct dma_buf *dmabuf)
+
+ This API installs an fd for the anonymous file associated with this buffer;
+ returns either 'fd', or error.
+
+3. Each buffer-user 'connects' itself to the buffer
+
+ Each buffer-user now gets a reference to the buffer, using the fd passed to
+ it.
+
+ Interface:
+ struct dma_buf *dma_buf_get(int fd)
+
+ This API will return a reference to the dma_buf, and increment refcount for
+ it.
+
+ After this, the buffer-user needs to attach its device with the buffer, which
+ helps the exporter to know of device buffer constraints.
+
+ Interface:
+ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+
+ This API returns reference to an attachment structure, which is then used
+ for scatterlist operations. It will optionally call the 'attach' dma_buf
+ operation, if provided by the exporter.
+
+ The dma-buf sharing framework does the bookkeeping bits related to managing
+ the list of all attachments to a buffer.
+
+Until this stage, the buffer-exporter has the option to choose not to actually
+allocate the backing storage for this buffer, but wait for the first buffer-user
+to request use of buffer for allocation.
+
+
+4. When needed, buffer-user requests access to the buffer
+
+ Whenever a buffer-user wants to use the buffer for any DMA, it asks for
+ access to the buffer using dma_buf_map_attachment API. At least one attach to
+ the buffer must have happened before map_dma_buf can be called.
+
+ Interface:
+ struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *,
+ enum dma_data_direction);
+
+ This is a wrapper to dma_buf->ops->map_dma_buf operation, which hides the
+ "dma_buf->ops->" indirection from the users of this interface.
+
+ In struct dma_buf_ops, map_dma_buf is defined as
+ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
+ enum dma_data_direction);
+
+ It is one of the buffer operations that must be implemented by the exporter.
+ It should return the sg_table containing scatterlist for this buffer, mapped
+ into caller's address space.
+
+ If this is being called for the first time, the exporter can now choose to
+ scan through the list of attachments for this buffer, collate the requirements
+ of the attached devices, and choose an appropriate backing storage for the
+ buffer.
+
+ Based on enum dma_data_direction, it might be possible to have multiple users
+ accessing at the same time (for reading, maybe), or any other kind of sharing
+ that the exporter might wish to make available to buffer-users.
+
+ map_dma_buf() operation can return -EINTR if it is interrupted by a signal.
+
+
+5. When finished, the buffer-user notifies end-of-DMA to exporter
+
+ Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to
+ the exporter using the dma_buf_unmap_attachment API.
+
+ Interface:
+ void dma_buf_unmap_attachment(struct dma_buf_attachment *,
+ struct sg_table *);
+
+ This is a wrapper to dma_buf->ops->unmap_dma_buf() operation, which hides the
+ "dma_buf->ops->" indirection from the users of this interface.
+
+ In struct dma_buf_ops, unmap_dma_buf is defined as
+ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *);
+
+ unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like
+ map_dma_buf, this API also must be implemented by the exporter.
+
+
+6. when buffer-user is done using this buffer, it 'disconnects' itself from the
+ buffer.
+
+ After the buffer-user has no more interest in using this buffer, it should
+ disconnect itself from the buffer:
+
+ - it first detaches itself from the buffer.
+
+ Interface:
+ void dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *dmabuf_attach);
+
+ This API removes the attachment from the list in dmabuf, and optionally calls
+ dma_buf->ops->detach(), if provided by exporter, for any housekeeping bits.
+
+ - Then, the buffer-user returns the buffer reference to exporter.
+
+ Interface:
+ void dma_buf_put(struct dma_buf *dmabuf);
+
+ This API then reduces the refcount for this buffer.
+
+ If, as a result of this call, the refcount becomes 0, the 'release' file
+ operation related to this fd is called. It calls the dmabuf->ops->release()
+ operation in turn, and frees the memory allocated for dmabuf when exported.
+
+NOTES:
+- Importance of attach-detach and {map,unmap}_dma_buf operation pairs
+ The attach-detach calls allow the exporter to figure out backing-storage
+ constraints for the currently-interested devices. This allows preferential
+ allocation, and/or migration of pages across different types of storage
+ available, if possible.
+
+ Bracketing of DMA access with {map,unmap}_dma_buf operations is essential
+ to allow just-in-time backing of storage, and migration mid-way through a
+ use-case.
+
+- Migration of backing storage if needed
+ If after
+ - at least one map_dma_buf has happened,
+ - and the backing storage has been allocated for this buffer,
+ another new buffer-user intends to attach itself to this buffer, it might
+ be allowed, if possible for the exporter.
+
+ In case it is allowed by the exporter:
+ if the new buffer-user has stricter 'backing-storage constraints', and the
+ exporter can handle these constraints, the exporter can just stall on the
+ map_dma_buf until all outstanding access is completed (as signalled by
+ unmap_dma_buf).
+ Once all users have finished accessing and have unmapped this buffer, the
+ exporter could potentially move the buffer to the stricter backing-storage,
+ and then allow further {map,unmap}_dma_buf operations from any buffer-user
+ from the migrated backing-storage.
+
+ If the exporter cannot fulfil the backing-storage constraints of the new
+ buffer-user device as requested, dma_buf_attach() would return an error to
+ denote non-compatibility of the new buffer-sharing request with the current
+ buffer.
+
+ If the exporter chooses not to allow an attach() operation once a
+ map_dma_buf() API has been called, it simply returns an error.
+
+References:
+[1] struct dma_buf_ops in include/linux/dma-buf.h
+[2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index dfa6fc6e4b2..0c083c5c2fa 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -66,7 +66,6 @@ GRTAGS
GSYMS
GTAGS
Image
-Kerntypes
Module.markers
Module.symvers
PENDING
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index d79aead9418..10c64c8a13d 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -262,6 +262,7 @@ IOMAP
devm_ioremap()
devm_ioremap_nocache()
devm_iounmap()
+ devm_request_and_ioremap() : checks resource, requests region, ioremaps
pcim_iomap()
pcim_iounmap()
pcim_iomap_table() : array of mapped addresses indexed by BAR
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 3d849122b5b..5575759b84e 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
---------------------------
-What: Deprecated snapshot ioctls
-When: 2.6.36
-
-Why: The ioctls in kernel/power/user.c were marked as deprecated long time
- ago. Now they notify users about that so that they need to replace
- their userspace. After some more time, remove them completely.
-
-Who: Jiri Slaby <jirislaby@gmail.com>
-
----------------------------
-
What: The ieee80211_regdom module parameter
When: March 2010 / desktop catchup
@@ -263,8 +252,7 @@ Who: Ravikiran Thirumalai <kiran@scalex86.org>
What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS
(in net/core/net-sysfs.c)
-When: After the only user (hal) has seen a release with the patches
- for enough time, probably some time in 2010.
+When: 3.5
Why: Over 1K .text/.data size reduction, data is available in other
ways (ioctls)
Who: Johannes Berg <johannes@sipsolutions.net>
@@ -362,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org>
----------------------------
-What: KVM paravirt mmu host support
-When: January 2011
-Why: The paravirt mmu host support is slower than non-paravirt mmu, both
- on newer and older hardware. It is already not exposed to the guest,
- and kept only for live migration purposes.
-Who: Avi Kivity <avi@redhat.com>
-
-----------------------------
-
What: iwlwifi 50XX module parameters
When: 3.0
Why: The "..50" modules parameters were used to configure 5000 series and
@@ -535,6 +514,20 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
information log when acer-wmi initial.
Who: Lee, Chun-Yi <jlee@novell.com>
+---------------------------
+
+What: /sys/devices/platform/_UDC_/udc/_UDC_/is_dualspeed file and
+ is_dualspeed line in /sys/devices/platform/ci13xxx_*/udc/device file.
+When: 3.8
+Why: The is_dualspeed file is superseded by maximum_speed in the same
+ directory and is_dualspeed line in device file is superseded by
+ max_speed line in the same file.
+
+ The maximum_speed/max_speed specifies maximum speed supported by UDC.
+ To check if dualspeeed is supported, check if the value is >= 3.
+ Various possible speeds are defined in <linux/usb/ch9.h>.
+Who: Michal Nazarewicz <mina86@mina86.com>
+
----------------------------
What: The XFS nodelaylog mount option
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index d819ba16a0c..4fca82e5276 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -37,15 +37,15 @@ d_manage: no no yes (ref-walk) maybe
--------------------------- inode_operations ---------------------------
prototypes:
- int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+ int (*create) (struct inode *,struct dentry *,umode_t, struct nameidata *);
struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid
ata *);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,int);
+ int (*mkdir) (struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+ int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char __user *,int);
@@ -117,7 +117,7 @@ prototypes:
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);
- int (*show_options)(struct seq_file *, struct vfsmount *);
+ int (*show_options)(struct seq_file *, struct dentry *);
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 64087c34327..7671352216f 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -63,8 +63,8 @@ IRC network.
Userspace tools for creating and manipulating Btrfs file systems are
available from the git repository at the following location:
- http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
- git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs.git
These include the following tools:
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index dd57bb6bb39..b40fec9d3f5 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -192,7 +192,7 @@ attribute value uses the store_attribute() method.
struct configfs_attribute {
char *ca_name;
struct module *ca_owner;
- mode_t ca_mode;
+ umode_t ca_mode;
};
When a config_item wants an attribute to appear as a file in the item's
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index 742cc06e138..6872c91bce3 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -35,7 +35,7 @@ described below will work.
The most general way to create a file within a debugfs directory is with:
- struct dentry *debugfs_create_file(const char *name, mode_t mode,
+ struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -53,13 +53,13 @@ actually necessary; the debugfs code provides a number of helper functions
for simple situations. Files containing a single integer value can be
created with any of:
- struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+ struct dentry *debugfs_create_u8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
- struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+ struct dentry *debugfs_create_u16(const char *name, umode_t mode,
struct dentry *parent, u16 *value);
- struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+ struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
- struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+ struct dentry *debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value);
These files support both reading and writing the given value; if a specific
@@ -67,13 +67,13 @@ file should not be written to, simply set the mode bits accordingly. The
values in these files are in decimal; if hexadecimal is more appropriate,
the following functions can be used instead:
- struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+ struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
- struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+ struct dentry *debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value);
- struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+ struct dentry *debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
- struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+ struct dentry *debugfs_create_x64(const char *name, umode_t mode,
struct dentry *parent, u64 *value);
These functions are useful as long as the developer knows the size of the
@@ -81,7 +81,7 @@ value to be exported. Some types can have different widths on different
architectures, though, complicating the situation somewhat. There is a
function meant to help out in one special case:
- struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+ struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent,
size_t *value);
@@ -90,21 +90,22 @@ a variable of type size_t.
Boolean values can be placed in debugfs with:
- struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+ struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
A read on the resulting file will yield either Y (for non-zero values) or
N, followed by a newline. If written to, it will accept either upper- or
lower-case values, or 1 or 0. Any other input will be silently ignored.
-Finally, a block of arbitrary binary data can be exported with:
+Another option is exporting a block of arbitrary binary data, with
+this structure and function:
struct debugfs_blob_wrapper {
void *data;
unsigned long size;
};
- struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
@@ -115,6 +116,35 @@ can be used to export binary information, but there does not appear to be
any code which does so in the mainline. Note that all files created with
debugfs_create_blob() are read-only.
+If you want to dump a block of registers (something that happens quite
+often during development, even if little such code reaches mainline.
+Debugfs offers two functions: one to make a registers-only file, and
+another to insert a register block in the middle of another sequential
+file.
+
+ struct debugfs_reg32 {
+ char *name;
+ unsigned long offset;
+ };
+
+ struct debugfs_regset32 {
+ struct debugfs_reg32 *regs;
+ int nregs;
+ void __iomem *base;
+ };
+
+ struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
+
+ int debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix);
+
+The "base" argument may be 0, but you may want to build the reg32 array
+using __stringify, and a number of register names (macros) are actually
+byte offsets over a base for the register block.
+
+
There are a couple of other directory-oriented helper functions:
struct dentry *debugfs_rename(struct dentry *old_dir,
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 4917cf24a5e..10ec4639f15 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -581,6 +581,13 @@ Table of Ext4 specific ioctls
behaviour may change in the future as it is
not necessary and has been done this way only
for sake of simplicity.
+
+ EXT4_IOC_RESIZE_FS Resize the filesystem to a new size. The number
+ of blocks of resized filesystem is passed in via
+ 64 bit integer argument. The kernel allocates
+ bitmaps and inode table, the userspace tool thus
+ just passes the new number of blocks.
+
..............................................................................
References
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 0ec91f03422..12fee132fbe 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -41,6 +41,8 @@ Table of Contents
3.5 /proc/<pid>/mountinfo - Information about mounts
3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
+ 4 Configuring procfs
+ 4.1 Mount options
------------------------------------------------------------------------------
Preface
@@ -1542,3 +1544,40 @@ a task to set its own or one of its thread siblings comm value. The comm value
is limited in size compared to the cmdline value, so writing anything longer
then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
comm value.
+
+
+------------------------------------------------------------------------------
+Configuring procfs
+------------------------------------------------------------------------------
+
+4.1 Mount options
+---------------------
+
+The following mount options are supported:
+
+ hidepid= Set /proc/<pid>/ access mode.
+ gid= Set the group authorized to learn processes information.
+
+hidepid=0 means classic mode - everybody may access all /proc/<pid>/ directories
+(default).
+
+hidepid=1 means users may not access any /proc/<pid>/ directories but their
+own. Sensitive files like cmdline, sched*, status are now protected against
+other users. This makes it impossible to learn whether any user runs
+specific program (given the program doesn't reveal itself by its behaviour).
+As an additional bonus, as /proc/<pid>/cmdline is unaccessible for other users,
+poorly written programs passing sensitive information via program arguments are
+now protected against local eavesdroppers.
+
+hidepid=2 means hidepid=1 plus all /proc/<pid>/ will be fully invisible to other
+users. It doesn't mean that it hides a fact whether a process with a specific
+pid value exists (it can be learned by other means, e.g. by "kill -0 $PID"),
+but it hides process' uid and gid, which may be learned by stat()'ing
+/proc/<pid>/ otherwise. It greatly complicates an intruder's task of gathering
+information about running processes, whether some daemon runs with elevated
+privileges, whether other user runs some sensitive program, whether other users
+run any program at all, etc.
+
+gid= defines a group authorized to learn processes information otherwise
+prohibited by hidepid=. If you use some daemon like identd which needs to learn
+information about processes information, just add identd to this group.
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 07235caec22..a6619b7064b 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -70,7 +70,7 @@ An attribute definition is simply:
struct attribute {
char * name;
struct module *owner;
- mode_t mode;
+ umode_t mode;
};
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 43cbd082172..3d9393b845b 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -225,7 +225,7 @@ struct super_operations {
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
- int (*show_options)(struct seq_file *, struct vfsmount *);
+ int (*show_options)(struct seq_file *, struct dentry *);
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
@@ -341,14 +341,14 @@ This describes how the VFS can manipulate an inode in your
filesystem. As of kernel 2.6.22, the following members are defined:
struct inode_operations {
- int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+ int (*create) (struct inode *,struct dentry *, umode_t, struct nameidata *);
struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,int);
+ int (*mkdir) (struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+ int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char __user *,int);
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index 15ac911ce51..d28b591753d 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -2,9 +2,8 @@ Kernel driver pmbus
====================
Supported chips:
- * Ericsson BMR45X series
- DC/DC Converter
- Prefixes: 'bmr450', 'bmr451', 'bmr453', 'bmr454'
+ * Ericsson BMR453, BMR454
+ Prefixes: 'bmr453', 'bmr454'
Addresses scanned: -
Datasheet:
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
index 7617798b5c9..51f76a189fe 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100
@@ -6,6 +6,10 @@ Supported chips:
Prefix: 'zl2004'
Addresses scanned: -
Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+ * Intersil / Zilker Labs ZL2005
+ Prefix: 'zl2005'
+ Addresses scanned: -
+ Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
* Intersil / Zilker Labs ZL2006
Prefix: 'zl2006'
Addresses scanned: -
@@ -30,6 +34,17 @@ Supported chips:
Prefix: 'zl6105'
Addresses scanned: -
Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+ * Ericsson BMR450, BMR451
+ Prefix: 'bmr450', 'bmr451'
+ Addresses scanned: -
+ Datasheet:
+http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+ * Ericsson BMR462, BMR463, BMR464
+ Prefixes: 'bmr462', 'bmr463', 'bmr464'
+ Addresses scanned: -
+ Datasheet:
+http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+
Author: Guenter Roeck <guenter.roeck@ericsson.com>
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
new file mode 100644
index 00000000000..f274c28b510
--- /dev/null
+++ b/Documentation/input/alps.txt
@@ -0,0 +1,188 @@
+ALPS Touchpad Protocol
+----------------------
+
+Introduction
+------------
+
+Currently the ALPS touchpad driver supports four protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various
+protocol versions is contained in the following sections.
+
+Detection
+---------
+
+All ALPS touchpads should respond to the "E6 report" command sequence:
+E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
+00-00-64.
+
+If the E6 report is successful, the touchpad model is identified using the "E7
+report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
+matched against known models in the alps_model_data_array.
+
+With protocol versions 3 and 4, the E7 report model signature is always
+73-02-64. To differentiate between these versions, the response from the
+"Enter Command Mode" sequence must be inspected as described below.
+
+Command Mode
+------------
+
+Protocol versions 3 and 4 have a command mode that is used to read and write
+one-byte device registers in a 16-bit address space. The command sequence
+EC-EC-EC-E9 places the device in command mode, and the device will respond
+with 88-07 followed by a third byte. This third byte can be used to determine
+whether the devices uses the version 3 or 4 protocol.
+
+To exit command mode, PSMOUSE_CMD_SETSTREAM (EA) is sent to the touchpad.
+
+While in command mode, register addresses can be set by first sending a
+specific command, either EC for v3 devices or F5 for v4 devices. Then the
+address is sent one nibble at a time, where each nibble is encoded as a
+command with optional data. This enoding differs slightly between the v3 and
+v4 protocols.
+
+Once an address has been set, the addressed register can be read by sending
+PSMOUSE_CMD_GETINFO (E9). The first two bytes of the response contains the
+address of the register being read, and the third contains the value of the
+register. Registers are written by writing the value one nibble at a time
+using the same encoding used for addresses.
+
+Packet Format
+-------------
+
+In the following tables, the following notation is used.
+
+ CAPITALS = stick, miniscules = touchpad
+
+?'s can have different meanings on different models, such as wheel rotation,
+extra buttons, stick buttons on a dualpoint, etc.
+
+PS/2 packet format
+------------------
+
+ byte 0: 0 0 YSGN XSGN 1 M R L
+ byte 1: X7 X6 X5 X4 X3 X2 X1 X0
+ byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+
+Note that the device never signals overflow condition.
+
+ALPS Absolute Mode - Protocol Verion 1
+--------------------------------------
+
+ byte 0: 1 0 0 0 1 x9 x8 x7
+ byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ byte 2: 0 ? ? l r ? fin ges
+ byte 3: 0 ? ? ? ? y9 y8 y7
+ byte 4: 0 y6 y5 y4 y3 y2 y1 y0
+ byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+
+ALPS Absolute Mode - Protocol Version 2
+---------------------------------------
+
+ byte 0: 1 ? ? ? 1 ? ? ?
+ byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ byte 2: 0 x10 x9 x8 x7 ? fin ges
+ byte 3: 0 y9 y8 y7 1 M R L
+ byte 4: 0 y6 y5 y4 y3 y2 y1 y0
+ byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+
+Dualpoint device -- interleaved packet format
+---------------------------------------------
+
+ byte 0: 1 1 0 0 1 1 1 1
+ byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ byte 2: 0 x10 x9 x8 x7 0 fin ges
+ byte 3: 0 0 YSGN XSGN 1 1 1 1
+ byte 4: X7 X6 X5 X4 X3 X2 X1 X0
+ byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ byte 6: 0 y9 y8 y7 1 m r l
+ byte 7: 0 y6 y5 y4 y3 y2 y1 y0
+ byte 8: 0 z6 z5 z4 z3 z2 z1 z0
+
+ALPS Absolute Mode - Protocol Version 3
+---------------------------------------
+
+ALPS protocol version 3 has three different packet formats. The first two are
+associated with touchpad events, and the third is associatd with trackstick
+events.
+
+The first type is the touchpad position packet.
+
+ byte 0: 1 ? x1 x0 1 1 1 1
+ byte 1: 0 x10 x9 x8 x7 x6 x5 x4
+ byte 2: 0 y10 y9 y8 y7 y6 y5 y4
+ byte 3: 0 M R L 1 m r l
+ byte 4: 0 mt x3 x2 y3 y2 y1 y0
+ byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+
+Note that for some devices the trackstick buttons are reported in this packet,
+and on others it is reported in the trackstick packets.
+
+The second packet type contains bitmaps representing the x and y axes. In the
+bitmaps a given bit is set if there is a finger covering that position on the
+given axis. Thus the bitmap packet can be used for low-resolution multi-touch
+data, although finger tracking is not possible. This packet also encodes the
+number of contacts (f1 and f0 in the table below).
+
+ byte 0: 1 1 x1 x0 1 1 1 1
+ byte 1: 0 x8 x7 x6 x5 x4 x3 x2
+ byte 2: 0 y7 y6 y5 y4 y3 y2 y1
+ byte 3: 0 y10 y9 y8 1 1 1 1
+ byte 4: 0 x14 x13 x12 x11 x10 x9 y0
+ byte 5: 0 1 ? ? ? ? f1 f0
+
+This packet only appears after a position packet with the mt bit set, and
+ususally only appears when there are two or more contacts (although
+ocassionally it's seen with only a single contact).
+
+The final v3 packet type is the trackstick packet.
+
+ byte 0: 1 1 x7 y7 1 1 1 1
+ byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ byte 2: 0 y6 y5 y4 y3 y2 y1 y0
+ byte 3: 0 1 0 0 1 0 0 0
+ byte 4: 0 z4 z3 z2 z1 z0 ? ?
+ byte 5: 0 0 1 1 1 1 1 1
+
+ALPS Absolute Mode - Protocol Version 4
+---------------------------------------
+
+Protocol version 4 has an 8-byte packet format.
+
+ byte 0: 1 ? x1 x0 1 1 1 1
+ byte 1: 0 x10 x9 x8 x7 x6 x5 x4
+ byte 2: 0 y10 y9 y8 y7 y6 y5 y4
+ byte 3: 0 1 x3 x2 y3 y2 y1 y0
+ byte 4: 0 ? ? ? 1 ? r l
+ byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+ byte 6: bitmap data (described below)
+ byte 7: bitmap data (described below)
+
+The last two bytes represent a partial bitmap packet, with 3 full packets
+required to construct a complete bitmap packet. Once assembled, the 6-byte
+bitmap packet has the following format:
+
+ byte 0: 0 1 x7 x6 x5 x4 x3 x2
+ byte 1: 0 x1 x0 y4 y3 y2 y1 y0
+ byte 2: 0 0 ? x14 x13 x12 x11 x10
+ byte 3: 0 x9 x8 y9 y8 y7 y6 y5
+ byte 4: 0 0 0 0 0 0 0 0
+ byte 5: 0 0 0 0 0 0 0 y10
+
+There are several things worth noting here.
+
+ 1) In the bitmap data, bit 6 of byte 0 serves as a sync byte to
+ identify the first fragment of a bitmap packet.
+
+ 2) The bitmaps represent the same data as in the v3 bitmap packets, although
+ the packet layout is different.
+
+ 3) There doesn't seem to be a count of the contact points anywhere in the v4
+ protocol packets. Deriving a count of contact points must be done by
+ analyzing the bitmaps.
+
+ 4) There is a 3 to 1 ratio of position packets to bitmap packets. Therefore
+ MT position can only be updated for every third ST position update, and
+ the count of contact points can only be updated every third packet as
+ well.
+
+So far no v4 devices with tracksticks have been encountered.
diff --git a/Documentation/input/gpio-tilt.txt b/Documentation/input/gpio-tilt.txt
new file mode 100644
index 00000000000..06d60c3ff5e
--- /dev/null
+++ b/Documentation/input/gpio-tilt.txt
@@ -0,0 +1,103 @@
+Driver for tilt-switches connected via GPIOs
+============================================
+
+Generic driver to read data from tilt switches connected via gpios.
+Orientation can be provided by one or more than one tilt switches,
+i.e. each tilt switch providing one axis, and the number of axes
+is also not limited.
+
+
+Data structures:
+----------------
+
+The array of struct gpio in the gpios field is used to list the gpios
+that represent the current tilt state.
+
+The array of struct gpio_tilt_axis describes the axes that are reported
+to the input system. The values set therein are used for the
+input_set_abs_params calls needed to init the axes.
+
+The array of struct gpio_tilt_state maps gpio states to the corresponding
+values to report. The gpio state is represented as a bitfield where the
+bit-index corresponds to the index of the gpio in the struct gpio array.
+In the same manner the values stored in the axes array correspond to
+the elements of the gpio_tilt_axis-array.
+
+
+Example:
+--------
+
+Example configuration for a single TS1003 tilt switch that rotates around
+one axis in 4 steps and emitts the current tilt via two GPIOs.
+
+static int sg060_tilt_enable(struct device *dev) {
+ /* code to enable the sensors */
+};
+
+static void sg060_tilt_disable(struct device *dev) {
+ /* code to disable the sensors */
+};
+
+static struct gpio sg060_tilt_gpios[] = {
+ { SG060_TILT_GPIO_SENSOR1, GPIOF_IN, "tilt_sensor1" },
+ { SG060_TILT_GPIO_SENSOR2, GPIOF_IN, "tilt_sensor2" },
+};
+
+static struct gpio_tilt_state sg060_tilt_states[] = {
+ {
+ .gpios = (0 << 1) | (0 << 0),
+ .axes = (int[]) {
+ 0,
+ },
+ }, {
+ .gpios = (0 << 1) | (1 << 0),
+ .axes = (int[]) {
+ 1, /* 90 degrees */
+ },
+ }, {
+ .gpios = (1 << 1) | (1 << 0),
+ .axes = (int[]) {
+ 2, /* 180 degrees */
+ },
+ }, {
+ .gpios = (1 << 1) | (0 << 0),
+ .axes = (int[]) {
+ 3, /* 270 degrees */
+ },
+ },
+};
+
+static struct gpio_tilt_axis sg060_tilt_axes[] = {
+ {
+ .axis = ABS_RY,
+ .min = 0,
+ .max = 3,
+ .fuzz = 0,
+ .flat = 0,
+ },
+};
+
+static struct gpio_tilt_platform_data sg060_tilt_pdata= {
+ .gpios = sg060_tilt_gpios,
+ .nr_gpios = ARRAY_SIZE(sg060_tilt_gpios),
+
+ .axes = sg060_tilt_axes,
+ .nr_axes = ARRAY_SIZE(sg060_tilt_axes),
+
+ .states = sg060_tilt_states,
+ .nr_states = ARRAY_SIZE(sg060_tilt_states),
+
+ .debounce_interval = 100,
+
+ .poll_interval = 1000,
+ .enable = sg060_tilt_enable,
+ .disable = sg060_tilt_disable,
+};
+
+static struct platform_device sg060_device_tilt = {
+ .name = "gpio-tilt-polled",
+ .id = -1,
+ .dev = {
+ .platform_data = &sg060_tilt_pdata,
+ },
+};
diff --git a/Documentation/input/sentelic.txt b/Documentation/input/sentelic.txt
index b2ef125b71f..89251e2a3eb 100644
--- a/Documentation/input/sentelic.txt
+++ b/Documentation/input/sentelic.txt
@@ -1,5 +1,5 @@
-Copyright (C) 2002-2010 Sentelic Corporation.
-Last update: Jan-13-2010
+Copyright (C) 2002-2011 Sentelic Corporation.
+Last update: Dec-07-2011
==============================================================================
* Finger Sensing Pad Intellimouse Mode(scrolling wheel, 4th and 5th buttons)
@@ -140,6 +140,7 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordination packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
When both fingers are up, the last two reports have zero valid
bit.
@@ -164,6 +165,7 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordinates packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
When both fingers are up, the last two reports have zero valid
bit.
@@ -188,6 +190,7 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordinates packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => 1
Bit4 => when in absolute coordinates mode (valid when EN_PKT_GO is 1):
0: left button is generated by the on-pad command
@@ -205,7 +208,7 @@ Byte 4: Bit7 => scroll right button
Bit6 => scroll left button
Bit5 => scroll down button
Bit4 => scroll up button
- * Note that if gesture and additional buttoni (Bit4~Bit7)
+ * Note that if gesture and additional button (Bit4~Bit7)
happen at the same time, the button information will not
be sent.
Bit3~Bit0 => Reserved
@@ -227,6 +230,7 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordinates packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
When both fingers are up, the last two reports have zero valid
bit.
@@ -253,6 +257,7 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordination packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => Valid bit, 0 means that the coordinate is invalid or finger up.
When both fingers are up, the last two reports have zero valid
bit.
@@ -279,8 +284,9 @@ BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------
Byte 1: Bit7~Bit6 => 00, Normal data packet
=> 01, Absolute coordination packet
=> 10, Notify packet
+ => 11, Normal data packet with on-pad click
Bit5 => 1
- Bit4 => when in absolute coordinate mode (valid when EN_PKT_GO is 1):
+ Bit4 => when in absolute coordinates mode (valid when EN_PKT_GO is 1):
0: left button is generated by the on-pad command
1: left button is generated by the external button
Bit3 => 1
@@ -307,6 +313,110 @@ Sample sequence of Multi-finger, Multi-coordinate mode:
abs pkt 2, ..., notify packet (valid bit == 0)
==============================================================================
+* Absolute position for STL3888-Cx and STL3888-Dx.
+==============================================================================
+Single Finger, Absolute Coordinate Mode (SFAC)
+ Bit 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+ 1 |0|1|0|P|1|M|R|L| 2 |X|X|X|X|X|X|X|X| 3 |Y|Y|Y|Y|Y|Y|Y|Y| 4 |r|l|B|F|X|X|Y|Y|
+ |---------------| |---------------| |---------------| |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+ => 01, Absolute coordinates packet
+ => 10, Notify packet
+ Bit5 => Coordinate mode(always 0 in SFAC mode):
+ 0: single-finger absolute coordinates (SFAC) mode
+ 1: multi-finger, multiple coordinates (MFMC) mode
+ Bit4 => 0: The LEFT button is generated by on-pad command (OPC)
+ 1: The LEFT button is generated by external button
+ Default is 1 even if the LEFT button is not pressed.
+ Bit3 => Always 1, as specified by PS/2 protocol.
+ Bit2 => Middle Button, 1 is pressed, 0 is not pressed.
+ Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+ Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: X coordinate (xpos[9:2])
+Byte 3: Y coordinate (ypos[9:2])
+Byte 4: Bit1~Bit0 => Y coordinate (xpos[1:0])
+ Bit3~Bit2 => X coordinate (ypos[1:0])
+ Bit4 => 4th mouse button(forward one page)
+ Bit5 => 5th mouse button(backward one page)
+ Bit6 => scroll left button
+ Bit7 => scroll right button
+
+Multi Finger, Multiple Coordinates Mode (MFMC):
+ Bit 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+ 1 |0|1|1|P|1|F|R|L| 2 |X|X|X|X|X|X|X|X| 3 |Y|Y|Y|Y|Y|Y|Y|Y| 4 |r|l|B|F|X|X|Y|Y|
+ |---------------| |---------------| |---------------| |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+ => 01, Absolute coordination packet
+ => 10, Notify packet
+ Bit5 => Coordinate mode (always 1 in MFMC mode):
+ 0: single-finger absolute coordinates (SFAC) mode
+ 1: multi-finger, multiple coordinates (MFMC) mode
+ Bit4 => 0: The LEFT button is generated by on-pad command (OPC)
+ 1: The LEFT button is generated by external button
+ Default is 1 even if the LEFT button is not pressed.
+ Bit3 => Always 1, as specified by PS/2 protocol.
+ Bit2 => Finger index, 0 is the first finger, 1 is the second finger.
+ If bit 1 and 0 are all 1 and bit 4 is 0, the middle external
+ button is pressed.
+ Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+ Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: X coordinate (xpos[9:2])
+Byte 3: Y coordinate (ypos[9:2])
+Byte 4: Bit1~Bit0 => Y coordinate (xpos[1:0])
+ Bit3~Bit2 => X coordinate (ypos[1:0])
+ Bit4 => 4th mouse button(forward one page)
+ Bit5 => 5th mouse button(backward one page)
+ Bit6 => scroll left button
+ Bit7 => scroll right button
+
+ When one of the two fingers is up, the device will output four consecutive
+MFMC#0 report packets with zero X and Y to represent 1st finger is up or
+four consecutive MFMC#1 report packets with zero X and Y to represent that
+the 2nd finger is up. On the other hand, if both fingers are up, the device
+will output four consecutive single-finger, absolute coordinate(SFAC) packets
+with zero X and Y.
+
+Notify Packet for STL3888-Cx/Dx
+ Bit 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+BYTE |---------------|BYTE |---------------|BYTE|---------------|BYTE|---------------|
+ 1 |1|0|0|P|1|M|R|L| 2 |C|C|C|C|C|C|C|C| 3 |0|0|F|F|0|0|0|i| 4 |r|l|u|d|0|0|0|0|
+ |---------------| |---------------| |---------------| |---------------|
+
+Byte 1: Bit7~Bit6 => 00, Normal data packet
+ => 01, Absolute coordinates packet
+ => 10, Notify packet
+ Bit5 => Always 0
+ Bit4 => 0: The LEFT button is generated by on-pad command(OPC)
+ 1: The LEFT button is generated by external button
+ Default is 1 even if the LEFT button is not pressed.
+ Bit3 => 1
+ Bit2 => Middle Button, 1 is pressed, 0 is not pressed.
+ Bit1 => Right Button, 1 is pressed, 0 is not pressed.
+ Bit0 => Left Button, 1 is pressed, 0 is not pressed.
+Byte 2: Message type:
+ 0xba => gesture information
+ 0xc0 => one finger hold-rotating gesture
+Byte 3: The first parameter for the received message:
+ 0xba => gesture ID (refer to the 'Gesture ID' section)
+ 0xc0 => region ID
+Byte 4: The second parameter for the received message:
+ 0xba => N/A
+ 0xc0 => finger up/down information
+
+Sample sequence of Multi-finger, Multi-coordinates mode:
+
+ notify packet (valid bit == 1), MFMC packet 1 (byte 1, bit 2 == 0),
+ MFMC packet 2 (byte 1, bit 2 == 1), MFMC packet 1, MFMC packet 2,
+ ..., notify packet (valid bit == 0)
+
+ That is, when the device is in MFMC mode, the host will receive
+ interleaved absolute coordinate packets for each finger.
+
+==============================================================================
* FSP Enable/Disable packet
==============================================================================
Bit 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
@@ -348,9 +458,10 @@ http://www.computer-engineering.org/ps2mouse/
==============================================================================
1. Identify FSP by reading device ID(0x00) and version(0x01) register
-2. Determine number of buttons by reading status2 (0x0b) register
+2a. For FSP version < STL3888 Cx, determine number of buttons by reading
+ the 'test mode status' (0x20) register:
- buttons = reg[0x0b] & 0x30
+ buttons = reg[0x20] & 0x30
if buttons == 0x30 or buttons == 0x20:
# two/four buttons
@@ -365,6 +476,10 @@ http://www.computer-engineering.org/ps2mouse/
Refer to 'Finger Sensing Pad PS/2 Mouse Intellimouse'
section A for packet parsing detail
+2b. For FSP version >= STL3888 Cx:
+ Refer to 'Finger Sensing Pad PS/2 Mouse Intellimouse'
+ section A for packet parsing detail (ignore byte 4, bit ~ 7)
+
==============================================================================
* Programming Sequence for Register Reading/Writing
==============================================================================
@@ -374,7 +489,7 @@ Register inversion requirement:
Following values needed to be inverted(the '~' operator in C) before being
sent to FSP:
- 0xe9, 0xee, 0xf2 and 0xff.
+ 0xe8, 0xe9, 0xee, 0xf2, 0xf3 and 0xff.
Register swapping requirement:
@@ -415,7 +530,18 @@ Register reading sequence:
8. send 0xe9(status request) PS/2 command to FSP;
- 9. the response read from FSP should be the requested register value.
+ 9. the 4th byte of the response read from FSP should be the
+ requested register value(?? indicates don't care byte):
+
+ host: 0xe9
+ 3888: 0xfa (??) (??) (val)
+
+ * Note that since the Cx release, the hardware will return 1's
+ complement of the register value at the 3rd byte of status request
+ result:
+
+ host: 0xe9
+ 3888: 0xfa (??) (~val) (val)
Register writing sequence:
@@ -465,71 +591,194 @@ Register writing sequence:
9. the register writing sequence is completed.
+ * Note that since the Cx release, the hardware will return 1's
+ complement of the register value at the 3rd byte of status request
+ result. Host can optionally send another 0xe9 (status request) PS/2
+ command to FSP at the end of register writing to verify that the
+ register writing operation is successful (?? indicates don't care
+ byte):
+
+ host: 0xe9
+ 3888: 0xfa (??) (~val) (val)
+
+==============================================================================
+* Programming Sequence for Page Register Reading/Writing
+==============================================================================
+
+ In order to overcome the limitation of maximum number of registers
+supported, the hardware separates register into different groups called
+'pages.' Each page is able to include up to 255 registers.
+
+ The default page after power up is 0x82; therefore, if one has to get
+access to register 0x8301, one has to use following sequence to switch
+to page 0x83, then start reading/writing from/to offset 0x01 by using
+the register read/write sequence described in previous section.
+
+Page register reading sequence:
+
+ 1. send 0xf3 PS/2 command to FSP;
+
+ 2. send 0x66 PS/2 command to FSP;
+
+ 3. send 0x88 PS/2 command to FSP;
+
+ 4. send 0xf3 PS/2 command to FSP;
+
+ 5. send 0x83 PS/2 command to FSP;
+
+ 6. send 0x88 PS/2 command to FSP;
+
+ 7. send 0xe9(status request) PS/2 command to FSP;
+
+ 8. the response read from FSP should be the requested page value.
+
+Page register writing sequence:
+
+ 1. send 0xf3 PS/2 command to FSP;
+
+ 2. send 0x38 PS/2 command to FSP;
+
+ 3. send 0x88 PS/2 command to FSP;
+
+ 4. send 0xf3 PS/2 command to FSP;
+
+ 5. if the page address being written is not required to be
+ inverted(refer to the 'Register inversion requirement' section),
+ goto step 6
+
+ 5a. send 0x47 PS/2 command to FSP;
+
+ 5b. send the inverted page address to FSP and goto step 9;
+
+ 6. if the page address being written is not required to be
+ swapped(refer to the 'Register swapping requirement' section),
+ goto step 7
+
+ 6a. send 0x44 PS/2 command to FSP;
+
+ 6b. send the swapped page address to FSP and goto step 9;
+
+ 7. send 0x33 PS/2 command to FSP;
+
+ 8. send the page address to FSP;
+
+ 9. the page register writing sequence is completed.
+
+==============================================================================
+* Gesture ID
+==============================================================================
+
+ Unlike other devices which sends multiple fingers' coordinates to host,
+FSP processes multiple fingers' coordinates internally and convert them
+into a 8 bits integer, namely 'Gesture ID.' Following is a list of
+supported gesture IDs:
+
+ ID Description
+ 0x86 2 finger straight up
+ 0x82 2 finger straight down
+ 0x80 2 finger straight right
+ 0x84 2 finger straight left
+ 0x8f 2 finger zoom in
+ 0x8b 2 finger zoom out
+ 0xc0 2 finger curve, counter clockwise
+ 0xc4 2 finger curve, clockwise
+ 0x2e 3 finger straight up
+ 0x2a 3 finger straight down
+ 0x28 3 finger straight right
+ 0x2c 3 finger straight left
+ 0x38 palm
+
==============================================================================
* Register Listing
==============================================================================
+ Registers are represented in 16 bits values. The higher 8 bits represent
+the page address and the lower 8 bits represent the relative offset within
+that particular page. Refer to the 'Programming Sequence for Page Register
+Reading/Writing' section for instructions on how to change current page
+address.
+
offset width default r/w name
-0x00 bit7~bit0 0x01 RO device ID
+0x8200 bit7~bit0 0x01 RO device ID
-0x01 bit7~bit0 0xc0 RW version ID
+0x8201 bit7~bit0 RW version ID
+ 0xc1: STL3888 Ax
+ 0xd0 ~ 0xd2: STL3888 Bx
+ 0xe0 ~ 0xe1: STL3888 Cx
+ 0xe2 ~ 0xe3: STL3888 Dx
-0x02 bit7~bit0 0x01 RO vendor ID
+0x8202 bit7~bit0 0x01 RO vendor ID
-0x03 bit7~bit0 0x01 RO product ID
+0x8203 bit7~bit0 0x01 RO product ID
-0x04 bit3~bit0 0x01 RW revision ID
+0x8204 bit3~bit0 0x01 RW revision ID
-0x0b RO test mode status 1
- bit3 1 RO 0: rotate 180 degree, 1: no rotation
+0x820b test mode status 1
+ bit3 1 RO 0: rotate 180 degree
+ 1: no rotation
+ *only supported by H/W prior to Cx
- bit5~bit4 RO number of buttons
- 11 => 2, lbtn/rbtn
- 10 => 4, lbtn/rbtn/scru/scrd
- 01 => 6, lbtn/rbtn/scru/scrd/scrl/scrr
- 00 => 6, lbtn/rbtn/scru/scrd/fbtn/bbtn
+0x820f register file page control
+ bit2 0 RW 1: rotate 180 degree
+ 0: no rotation
+ *supported since Cx
-0x0f RW register file page control
bit0 0 RW 1 to enable page 1 register files
+ *only supported by H/W prior to Cx
-0x10 RW system control 1
+0x8210 RW system control 1
bit0 1 RW Reserved, must be 1
bit1 0 RW Reserved, must be 0
- bit4 1 RW Reserved, must be 0
- bit5 0 RW register clock gating enable
+ bit4 0 RW Reserved, must be 0
+ bit5 1 RW register clock gating enable
0: read only, 1: read/write enable
(Note that following registers does not require clock gating being
enabled prior to write: 05 06 07 08 09 0c 0f 10 11 12 16 17 18 23 2e
40 41 42 43. In addition to that, this bit must be 1 when gesture
mode is enabled)
-0x31 RW on-pad command detection
+0x8220 test mode status
+ bit5~bit4 RO number of buttons
+ 11 => 2, lbtn/rbtn
+ 10 => 4, lbtn/rbtn/scru/scrd
+ 01 => 6, lbtn/rbtn/scru/scrd/scrl/scrr
+ 00 => 6, lbtn/rbtn/scru/scrd/fbtn/bbtn
+ *only supported by H/W prior to Cx
+
+0x8231 RW on-pad command detection
bit7 0 RW on-pad command left button down tag
enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
-0x34 RW on-pad command control 5
+0x8234 RW on-pad command control 5
bit4~bit0 0x05 RW XLO in 0s/4/1, so 03h = 0010.1b = 2.5
(Note that position unit is in 0.5 scanline)
+ *only supported by H/W prior to Cx
bit7 0 RW on-pad tap zone enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
-0x35 RW on-pad command control 6
+0x8235 RW on-pad command control 6
bit4~bit0 0x1d RW XHI in 0s/4/1, so 19h = 1100.1b = 12.5
(Note that position unit is in 0.5 scanline)
+ *only supported by H/W prior to Cx
-0x36 RW on-pad command control 7
+0x8236 RW on-pad command control 7
bit4~bit0 0x04 RW YLO in 0s/4/1, so 03h = 0010.1b = 2.5
(Note that position unit is in 0.5 scanline)
+ *only supported by H/W prior to Cx
-0x37 RW on-pad command control 8
+0x8237 RW on-pad command control 8
bit4~bit0 0x13 RW YHI in 0s/4/1, so 11h = 1000.1b = 8.5
(Note that position unit is in 0.5 scanline)
+ *only supported by H/W prior to Cx
-0x40 RW system control 5
+0x8240 RW system control 5
bit1 0 RW FSP Intellimouse mode enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
bit2 0 RW movement + abs. coordinate mode enable
0: disable, 1: enable
@@ -537,6 +786,7 @@ offset width default r/w name
bit 1 is not set. However, the format is different from that of bit 1.
In addition, when bit 1 and bit 2 are set at the same time, bit 2 will
override bit 1.)
+ *only supported by H/W prior to Cx
bit3 0 RW abs. coordinate only mode enable
0: disable, 1: enable
@@ -544,9 +794,11 @@ offset width default r/w name
bit 1 is not set. However, the format is different from that of bit 1.
In addition, when bit 1, bit 2 and bit 3 are set at the same time,
bit 3 will override bit 1 and 2.)
+ *only supported by H/W prior to Cx
bit5 0 RW auto switch enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
bit6 0 RW G0 abs. + notify packet format enable
0: disable, 1: enable
@@ -554,18 +806,68 @@ offset width default r/w name
bit 2 and 3. That is, if any of those bit is 1, host will receive
absolute coordinates; otherwise, host only receives packets with
relative coordinate.)
+ *only supported by H/W prior to Cx
bit7 0 RW EN_PS2_F2: PS/2 gesture mode 2nd
finger packet enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
-0x43 RW on-pad control
+0x8243 RW on-pad control
bit0 0 RW on-pad control enable
0: disable, 1: enable
(Note that if this bit is cleared, bit 3/5 will be ineffective)
+ *only supported by H/W prior to Cx
bit3 0 RW on-pad fix vertical scrolling enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
bit5 0 RW on-pad fix horizontal scrolling enable
0: disable, 1: enable
+ *only supported by H/W prior to Cx
+
+0x8290 RW software control register 1
+ bit0 0 RW absolute coordination mode
+ 0: disable, 1: enable
+ *supported since Cx
+
+ bit1 0 RW gesture ID output
+ 0: disable, 1: enable
+ *supported since Cx
+
+ bit2 0 RW two fingers' coordinates output
+ 0: disable, 1: enable
+ *supported since Cx
+
+ bit3 0 RW finger up one packet output
+ 0: disable, 1: enable
+ *supported since Cx
+
+ bit4 0 RW absolute coordination continuous mode
+ 0: disable, 1: enable
+ *supported since Cx
+
+ bit6~bit5 00 RW gesture group selection
+ 00: basic
+ 01: suite
+ 10: suite pro
+ 11: advanced
+ *supported since Cx
+
+ bit7 0 RW Bx packet output compatible mode
+ 0: disable, 1: enable *supported since Cx
+ *supported since Cx
+
+
+0x833d RW on-pad command control 1
+ bit7 1 RW on-pad command detection enable
+ 0: disable, 1: enable
+ *supported since Cx
+
+0x833e RW on-pad command detection
+ bit7 0 RW on-pad command left button down tag
+ enable. Works only in H/W based PS/2
+ data packet mode.
+ 0: disable, 1: enable
+ *supported since Cx
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 7a9e0b4b290..506c7390c2b 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -17,8 +17,8 @@ You can use common commands, such as cp and scp, to copy the
memory image to a dump file on the local disk, or across the network to
a remote system.
-Kdump and kexec are currently supported on the x86, x86_64, ppc64 and ia64
-architectures.
+Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
+and s390x architectures.
When the system kernel boots, it reserves a small section of memory for
the dump-capture kernel. This ensures that ongoing Direct Memory Access
@@ -34,11 +34,18 @@ Similarly on PPC64 machines first 32KB of physical memory is needed for
booting regardless of where the kernel is loaded and to support 64K page
size kexec backs up the first 64KB memory.
+For s390x, when kdump is triggered, the crashkernel region is exchanged
+with the region [0, crashkernel region size] and then the kdump kernel
+runs in [0, crashkernel region size]. Therefore no relocatable kernel is
+needed for s390x.
+
All of the necessary information about the system kernel's core image is
encoded in the ELF format, and stored in a reserved area of memory
before a crash. The physical address of the start of the ELF header is
passed to the dump-capture kernel through the elfcorehdr= boot
-parameter.
+parameter. Optionally the size of the ELF header can also be passed
+when using the elfcorehdr=[size[KMG]@]offset[KMG] syntax.
+
With the dump-capture kernel, you can access the memory image, or "old
memory," in two ways:
@@ -291,6 +298,10 @@ Boot into System Kernel
The region may be automatically placed on ia64, see the
dump-capture kernel config option notes above.
+ On s390x, typically use "crashkernel=xxM". The value of xx is dependent
+ on the memory consumption of the kdump system. In general this is not
+ dependent on the memory size of the production system.
+
Load the Dump-capture Kernel
============================
@@ -308,6 +319,8 @@ For ppc64:
- Use vmlinux
For ia64:
- Use vmlinux or vmlinuz.gz
+For s390x:
+ - Use image or bzImage
If you are using a uncompressed vmlinux image then use following command
@@ -337,6 +350,8 @@ For i386, x86_64 and ia64:
For ppc64:
"1 maxcpus=1 noirqdistrib reset_devices"
+For s390x:
+ "1 maxcpus=1 cgroup_disable=memory"
Notes on loading the dump-capture kernel:
@@ -362,6 +377,20 @@ Notes on loading the dump-capture kernel:
dump. Hence generally it is useful either to build a UP dump-capture
kernel or specify maxcpus=1 option while loading dump-capture kernel.
+* For s390x there are two kdump modes: If a ELF header is specified with
+ the elfcorehdr= kernel parameter, it is used by the kdump kernel as it
+ is done on all other architectures. If no elfcorehdr= kernel parameter is
+ specified, the s390x kdump kernel dynamically creates the header. The
+ second mode has the advantage that for CPU and memory hotplug, kdump has
+ not to be reloaded with kexec_load().
+
+* For s390x systems with many attached devices the "cio_ignore" kernel
+ parameter should be used for the kdump kernel in order to prevent allocation
+ of kernel memory for devices that are not relevant for kdump. The same
+ applies to systems that use SCSI/FCP devices. In that case the
+ "allow_lun_scan" zfcp module parameter should be set to zero before
+ setting FCP devices online.
+
Kernel Panic
============
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a0c5c5f4fce..c92b1532f05 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CPU-intensive style benchmark, and it can vary highly in
a microbenchmark depending on workload and compiler.
- 1: only for 32-bit processes
- 2: only for 64-bit processes
+ 32: only for 32-bit processes
+ 64: only for 64-bit processes
on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes
- amd_iommu= [HW,X86-84]
+ amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
fullflush - enable flushing of IO/TLB entries when
@@ -329,6 +329,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
is a lot of faster
off - do not initialize any AMD IOMMU found in
the system
+ force_isolation - Force device isolation for all
+ devices. The IOMMU driver is not
+ allowed anymore to lift isolation
+ requirements as needed. This option
+ does not override iommu=pt
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
@@ -623,6 +628,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
no_debug_objects
[KNL] Disable object debugging
+ debug_guardpage_minorder=
+ [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+ parameter allows control of the order of pages that will
+ be intentionally kept free (and hence protected) by the
+ buddy allocator. Bigger value increase the probability
+ of catching random memory corruption, but reduce the
+ amount of memory for normal system use. The maximum
+ possible value is MAX_ORDER/2. Setting this parameter
+ to 1 or 2 should be enough to identify most random
+ memory corruption problems caused by bugs in kernel or
+ driver code when a CPU writes to (or reads from) a
+ random memory location. Note that there exists a class
+ of memory corruptions problems caused by buggy H/W or
+ F/W or by drivers badly programing DMA (basically when
+ memory is written at bus level and the CPU MMU is
+ bypassed) which are not detectable by
+ CONFIG_DEBUG_PAGEALLOC, hence this option will not help
+ tracking down these problems.
+
debugpat [X86] Enable PAT debugging
decnet.addr= [HW,NET]
@@ -1059,7 +1083,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nomerge
forcesac
soft
- pt [x86, IA-64]
+ pt [x86, IA-64]
+ group_mf [x86, IA-64]
+
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -1178,9 +1204,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
- kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
- Default is 1 (enabled)
-
kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
KVM MMU at runtime.
Default is 0 (off)
@@ -1630,12 +1653,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The default is to return 64-bit inode numbers.
nfs.nfs4_disable_idmapping=
- [NFSv4] When set, this option disables the NFSv4
- idmapper on the client, but only if the mount
- is using the 'sec=sys' security flavour. This may
- make migration from legacy NFSv2/v3 systems easier
- provided that the server has the appropriate support.
- The default is to always enable NFSv4 idmapping.
+ [NFSv4] When set to the default of '1', this option
+ ensures that both the RPC level authentication
+ scheme and the NFS level operations agree to use
+ numeric uids/gids if the mount is using the
+ 'sec=sys' security flavour. In effect it is
+ disabling idmapping, which can make migration from
+ legacy NFSv2/v3 systems to NFSv4 easier.
+ Servers that do not support this mode of operation
+ will be autodetected by the client, and it will fall
+ back to using the idmapper.
+ To turn off this behaviour, set the value to '0'.
nmi_debug= [KNL,AVR32,SH] Specify one or more actions to take
when a NMI is triggered.
@@ -1885,6 +1913,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
arch_perfmon: [X86] Force use of architectural
perfmon on Intel CPUs instead of the
CPU specific event set.
+ timer: [X86] Force use of architectural NMI
+ timer mode (see also oprofile.timer
+ for generic hr timer mode)
+ [s390] Force legacy basic mode sampling
+ (report cpu_type "timer")
oops=panic Always panic on oopses. Default is to just kill the
process, but there is a small probability of
@@ -2632,6 +2665,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[USB] Start with the old device initialization
scheme (default 0 = off).
+ usbcore.usbfs_memory_mb=
+ [USB] Memory limit (in MB) for buffers allocated by
+ usbfs (default = 16, 0 = max = 2047).
+
usbcore.use_both_schemes=
[USB] Try the other device initialization scheme
if the first one fails (default 1 = enabled).
@@ -2750,11 +2787,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
- emulate Vsyscalls turn into traps and are emulated
- reasonably safely.
+ emulate [default] Vsyscalls turn into traps and are
+ emulated reasonably safely.
- native [default] Vsyscalls are native syscall
- instructions.
+ native Vsyscalls are native syscall instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.
diff --git a/Documentation/lockdep-design.txt b/Documentation/lockdep-design.txt
index abf768c681e..5dbc99c04f6 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/lockdep-design.txt
@@ -221,3 +221,66 @@ when the chain is validated for the first time, is then put into a hash
table, which hash-table can be checked in a lockfree manner. If the
locking chain occurs again later on, the hash table tells us that we
dont have to validate the chain again.
+
+Troubleshooting:
+----------------
+
+The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes.
+Exceeding this number will trigger the following lockdep warning:
+
+ (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
+
+By default, MAX_LOCKDEP_KEYS is currently set to 8191, and typical
+desktop systems have less than 1,000 lock classes, so this warning
+normally results from lock-class leakage or failure to properly
+initialize locks. These two problems are illustrated below:
+
+1. Repeated module loading and unloading while running the validator
+ will result in lock-class leakage. The issue here is that each
+ load of the module will create a new set of lock classes for
+ that module's locks, but module unloading does not remove old
+ classes (see below discussion of reuse of lock classes for why).
+ Therefore, if that module is loaded and unloaded repeatedly,
+ the number of lock classes will eventually reach the maximum.
+
+2. Using structures such as arrays that have large numbers of
+ locks that are not explicitly initialized. For example,
+ a hash table with 8192 buckets where each bucket has its own
+ spinlock_t will consume 8192 lock classes -unless- each spinlock
+ is explicitly initialized at runtime, for example, using the
+ run-time spin_lock_init() as opposed to compile-time initializers
+ such as __SPIN_LOCK_UNLOCKED(). Failure to properly initialize
+ the per-bucket spinlocks would guarantee lock-class overflow.
+ In contrast, a loop that called spin_lock_init() on each lock
+ would place all 8192 locks into a single lock class.
+
+ The moral of this story is that you should always explicitly
+ initialize your locks.
+
+One might argue that the validator should be modified to allow
+lock classes to be reused. However, if you are tempted to make this
+argument, first review the code and think through the changes that would
+be required, keeping in mind that the lock classes to be removed are
+likely to be linked into the lock-dependency graph. This turns out to
+be harder to do than to say.
+
+Of course, if you do run out of lock classes, the next thing to do is
+to find the offending lock classes. First, the following command gives
+you the number of lock classes currently in use along with the maximum:
+
+ grep "lock-classes" /proc/lockdep_stats
+
+This command produces the following output on a modest system:
+
+ lock-classes: 748 [max: 8191]
+
+If the number allocated (748 above) increases continually over time,
+then there is likely a leak. The following command can be used to
+identify the leaking lock classes:
+
+ grep "BD" /proc/lockdep
+
+Run the command and save the output, then compare against the output from
+a later run of this command to identify the leakers. This same output
+can also help you find situations where runtime lock initialization has
+been omitted.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index fc94770f44a..993fba37b7d 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -357,14 +357,14 @@ Each directory contains:
written to, that device.
state
- A file recording the current state of the device in the array
+ A file recording the current state of the device in the array
which can be a comma separated list of
faulty - device has been kicked from active use due to
- a detected fault or it has unacknowledged bad
- blocks
+ a detected fault, or it has unacknowledged bad
+ blocks
in_sync - device is a fully in-sync member of the array
writemostly - device will only be subject to read
- requests if there are no other options.
+ requests if there are no other options.
This applies only to raid1 arrays.
blocked - device has failed, and the failure hasn't been
acknowledged yet by the metadata handler.
@@ -374,6 +374,13 @@ Each directory contains:
This includes spares that are in the process
of being recovered to
write_error - device has ever seen a write error.
+ want_replacement - device is (mostly) working but probably
+ should be replaced, either due to errors or
+ due to user request.
+ replacement - device is a replacement for another active
+ device with same raid_disk.
+
+
This list may grow in future.
This can be written to.
Writing "faulty" simulates a failure on the device.
@@ -386,6 +393,13 @@ Each directory contains:
Writing "in_sync" sets the in_sync flag.
Writing "write_error" sets writeerrorseen flag.
Writing "-write_error" clears writeerrorseen flag.
+ Writing "want_replacement" is allowed at any time except to a
+ replacement device or a spare. It sets the flag.
+ Writing "-want_replacement" is allowed at any time. It clears
+ the flag.
+ Writing "replacement" or "-replacement" is only allowed before
+ starting the array. It sets or clears the flag.
+
This file responds to select/poll. Any change to 'faulty'
or 'blocked' causes an event.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index bbce1215434..9ad9ddeb384 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -144,6 +144,8 @@ nfc.txt
- The Linux Near Field Communication (NFS) subsystem.
olympic.txt
- IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
+openvswitch.txt
+ - Open vSwitch developer documentation.
operstates.txt
- Overview of network interface operational states.
packet_mmap.txt
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index c86d03f18a5..221ad0cdf11 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -200,15 +200,16 @@ abled during run time. Following log_levels are defined:
0 - All debug output disabled
1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or tt entry added / changed / deleted
-3 - Enable all messages
+2 - Enable messages related to route added / changed / deleted
+4 - Enable messages related to translation table operations
+7 - Enable all messages
The debug output can be changed at runtime using the file
/sys/class/net/bat0/mesh/log_level. e.g.
# echo 2 > /sys/class/net/bat0/mesh/log_level
-will enable debug messages for when routes or TTs change.
+will enable debug messages for when routes change.
BATCTL
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 91df678fb7f..080ad26690a 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -196,6 +196,23 @@ or, for backwards compatibility, the option value. E.g.,
The parameters are as follows:
+active_slave
+
+ Specifies the new active slave for modes that support it
+ (active-backup, balance-alb and balance-tlb). Possible values
+ are the name of any currently enslaved interface, or an empty
+ string. If a name is given, the slave and its link must be up in order
+ to be selected as the new active slave. If an empty string is
+ specified, the current active slave is cleared, and a new active
+ slave is selected automatically.
+
+ Note that this is only available through the sysfs interface. No module
+ parameter by this name exists.
+
+ The normal value of this option is the name of the currently
+ active slave, or the empty string if there is no active slave or
+ the current mode does not use an active slave.
+
ad_select
Specifies the 802.3ad aggregation selection logic to use. The
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index f41ea240522..1dc1c24a754 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -78,3 +78,30 @@ in software. This is currently WIP.
See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
+6LoWPAN Linux implementation
+============================
+
+The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+octets of actual MAC payload once security is turned on, on a wireless link
+with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format
+[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
+taking into account limited bandwidth, memory, or energy resources that are
+expected in applications such as wireless Sensor Networks. [RFC4944] defines
+a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
+to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
+compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
+relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+
+In Semptember 2011 the standard update was published - [RFC6282].
+It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
+used in this Linux implementation.
+
+All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+
+To setup 6lowpan interface you need (busybox release > 1.17.0):
+1. Add IEEE802.15.4 interface and initialize PANid;
+2. Add 6lowpan interface by command like:
+ # ip link add link wpan0 name lowpan0 type lowpan
+3. Set MAC (if needs):
+ # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
+4. Bring up 'lowpan0' interface
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 65968fbf1e4..ac5debb2f16 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -539,12 +539,14 @@ static int if_getconfig(char *ifname)
metric = 0;
} else
metric = ifr.ifr_metric;
+ printf("The result of SIOCGIFMETRIC is %d\n", metric);
strcpy(ifr.ifr_name, ifname);
if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0)
mtu = 0;
else
mtu = ifr.ifr_mtu;
+ printf("The result of SIOCGIFMTU is %d\n", mtu);
strcpy(ifr.ifr_name, ifname);
if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f049a1ca186..ad3e80e17b4 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -31,6 +31,16 @@ neigh/default/gc_thresh3 - INTEGER
when using large numbers of interfaces and when communicating
with large numbers of directly-connected peers.
+neigh/default/unres_qlen_bytes - INTEGER
+ The maximum number of bytes which may be used by packets
+ queued for each unresolved address by other network layers.
+ (added in linux 3.3)
+
+neigh/default/unres_qlen - INTEGER
+ The maximum number of packets which may be queued for each
+ unresolved address by other network layers.
+ (deprecated in linux 3.3) : use unres_qlen_bytes instead.
+
mtu_expires - INTEGER
Time, in seconds, that cached PMTU information is kept.
@@ -165,6 +175,9 @@ tcp_congestion_control - STRING
connections. The algorithm "reno" is always available, but
additional choices may be available based on kernel configuration.
Default is set as part of kernel configuration.
+ For passive connections, the listener congestion control choice
+ is inherited.
+ [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
tcp_cookie_size - INTEGER
Default size of TCP Cookie Transactions (TCPCT) option, that may be
@@ -282,11 +295,11 @@ tcp_max_ssthresh - INTEGER
Default: 0 (off)
tcp_max_syn_backlog - INTEGER
- Maximal number of remembered connection requests, which are
- still did not receive an acknowledgment from connecting client.
- Default value is 1024 for systems with more than 128Mb of memory,
- and 128 for low memory machines. If server suffers of overload,
- try to increase this number.
+ Maximal number of remembered connection requests, which have not
+ received an acknowledgment from connecting client.
+ The minimal value is 128 for low memory machines, and it will
+ increase in proportion to the memory of machine.
+ If server suffers from overload, try increasing this number.
tcp_max_tw_buckets - INTEGER
Maximal number of timewait sockets held by system simultaneously.
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
new file mode 100644
index 00000000000..b8a048b8df3
--- /dev/null
+++ b/Documentation/networking/openvswitch.txt
@@ -0,0 +1,195 @@
+Open vSwitch datapath developer documentation
+=============================================
+
+The Open vSwitch kernel module allows flexible userspace control over
+flow-level packet processing on selected network devices. It can be
+used to implement a plain Ethernet switch, network device bonding,
+VLAN processing, network access control, flow-based network control,
+and so on.
+
+The kernel module implements multiple "datapaths" (analogous to
+bridges), each of which can have multiple "vports" (analogous to ports
+within a bridge). Each datapath also has associated with it a "flow
+table" that userspace populates with "flows" that map from keys based
+on packet headers and metadata to sets of actions. The most common
+action forwards the packet to another vport; other actions are also
+implemented.
+
+When a packet arrives on a vport, the kernel module processes it by
+extracting its flow key and looking it up in the flow table. If there
+is a matching flow, it executes the associated actions. If there is
+no match, it queues the packet to userspace for processing (as part of
+its processing, userspace will likely set up a flow to handle further
+packets of the same type entirely in-kernel).
+
+
+Flow key compatibility
+----------------------
+
+Network protocols evolve over time. New protocols become important
+and existing protocols lose their prominence. For the Open vSwitch
+kernel module to remain relevant, it must be possible for newer
+versions to parse additional protocols as part of the flow key. It
+might even be desirable, someday, to drop support for parsing
+protocols that have become obsolete. Therefore, the Netlink interface
+to Open vSwitch is designed to allow carefully written userspace
+applications to work with any version of the flow key, past or future.
+
+To support this forward and backward compatibility, whenever the
+kernel module passes a packet to userspace, it also passes along the
+flow key that it parsed from the packet. Userspace then extracts its
+own notion of a flow key from the packet and compares it against the
+kernel-provided version:
+
+ - If userspace's notion of the flow key for the packet matches the
+ kernel's, then nothing special is necessary.
+
+ - If the kernel's flow key includes more fields than the userspace
+ version of the flow key, for example if the kernel decoded IPv6
+ headers but userspace stopped at the Ethernet type (because it
+ does not understand IPv6), then again nothing special is
+ necessary. Userspace can still set up a flow in the usual way,
+ as long as it uses the kernel-provided flow key to do it.
+
+ - If the userspace flow key includes more fields than the
+ kernel's, for example if userspace decoded an IPv6 header but
+ the kernel stopped at the Ethernet type, then userspace can
+ forward the packet manually, without setting up a flow in the
+ kernel. This case is bad for performance because every packet
+ that the kernel considers part of the flow must go to userspace,
+ but the forwarding behavior is correct. (If userspace can
+ determine that the values of the extra fields would not affect
+ forwarding behavior, then it could set up a flow anyway.)
+
+How flow keys evolve over time is important to making this work, so
+the following sections go into detail.
+
+
+Flow key format
+---------------
+
+A flow key is passed over a Netlink socket as a sequence of Netlink
+attributes. Some attributes represent packet metadata, defined as any
+information about a packet that cannot be extracted from the packet
+itself, e.g. the vport on which the packet was received. Most
+attributes, however, are extracted from headers within the packet,
+e.g. source and destination addresses from Ethernet, IP, or TCP
+headers.
+
+The <linux/openvswitch.h> header file defines the exact format of the
+flow key attributes. For informal explanatory purposes here, we write
+them as comma-separated strings, with parentheses indicating arguments
+and nesting. For example, the following could represent a flow key
+corresponding to a TCP packet that arrived on vport 1:
+
+ in_port(1), eth(src=e0:91:f5:21:d0:b2, dst=00:02:e3:0f:80:a4),
+ eth_type(0x0800), ipv4(src=172.16.0.20, dst=172.18.0.52, proto=17, tos=0,
+ frag=no), tcp(src=49163, dst=80)
+
+Often we ellipsize arguments not important to the discussion, e.g.:
+
+ in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+
+
+Basic rule for evolving flow keys
+---------------------------------
+
+Some care is needed to really maintain forward and backward
+compatibility for applications that follow the rules listed under
+"Flow key compatibility" above.
+
+The basic rule is obvious:
+
+ ------------------------------------------------------------------
+ New network protocol support must only supplement existing flow
+ key attributes. It must not change the meaning of already defined
+ flow key attributes.
+ ------------------------------------------------------------------
+
+This rule does have less-obvious consequences so it is worth working
+through a few examples. Suppose, for example, that the kernel module
+did not already implement VLAN parsing. Instead, it just interpreted
+the 802.1Q TPID (0x8100) as the Ethertype then stopped parsing the
+packet. The flow key for any packet with an 802.1Q header would look
+essentially like this, ignoring metadata:
+
+ eth(...), eth_type(0x8100)
+
+Naively, to add VLAN support, it makes sense to add a new "vlan" flow
+key attribute to contain the VLAN tag, then continue to decode the
+encapsulated headers beyond the VLAN tag using the existing field
+definitions. With this change, an TCP packet in VLAN 10 would have a
+flow key much like this:
+
+ eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...)
+
+But this change would negatively affect a userspace application that
+has not been updated to understand the new "vlan" flow key attribute.
+The application could, following the flow compatibility rules above,
+ignore the "vlan" attribute that it does not understand and therefore
+assume that the flow contained IP packets. This is a bad assumption
+(the flow only contains IP packets if one parses and skips over the
+802.1Q header) and it could cause the application's behavior to change
+across kernel versions even though it follows the compatibility rules.
+
+The solution is to use a set of nested attributes. This is, for
+example, why 802.1Q support uses nested attributes. A TCP packet in
+VLAN 10 is actually expressed as:
+
+ eth(...), eth_type(0x8100), vlan(vid=10, pcp=0), encap(eth_type(0x0800),
+ ip(proto=6, ...), tcp(...)))
+
+Notice how the "eth_type", "ip", and "tcp" flow key attributes are
+nested inside the "encap" attribute. Thus, an application that does
+not understand the "vlan" key will not see either of those attributes
+and therefore will not misinterpret them. (Also, the outer eth_type
+is still 0x8100, not changed to 0x0800.)
+
+Handling malformed packets
+--------------------------
+
+Don't drop packets in the kernel for malformed protocol headers, bad
+checksums, etc. This would prevent userspace from implementing a
+simple Ethernet switch that forwards every packet.
+
+Instead, in such a case, include an attribute with "empty" content.
+It doesn't matter if the empty content could be valid protocol values,
+as long as those values are rarely seen in practice, because userspace
+can always forward all packets with those values to userspace and
+handle them individually.
+
+For example, consider a packet that contains an IP header that
+indicates protocol 6 for TCP, but which is truncated just after the IP
+header, so that the TCP header is missing. The flow key for this
+packet would include a tcp attribute with all-zero src and dst, like
+this:
+
+ eth(...), eth_type(0x0800), ip(proto=6, ...), tcp(src=0, dst=0)
+
+As another example, consider a packet with an Ethernet type of 0x8100,
+indicating that a VLAN TCI should follow, but which is truncated just
+after the Ethernet type. The flow key for this packet would include
+an all-zero-bits vlan and an empty encap attribute, like this:
+
+ eth(...), eth_type(0x8100), vlan(0), encap()
+
+Unlike a TCP packet with source and destination ports 0, an
+all-zero-bits VLAN TCI is not that rare, so the CFI bit (aka
+VLAN_TAG_PRESENT inside the kernel) is ordinarily set in a vlan
+attribute expressly to allow this situation to be distinguished.
+Thus, the flow key in this second example unambiguously indicates a
+missing or malformed VLAN TCI.
+
+Other rules
+-----------
+
+The other rules for flow keys are much less subtle:
+
+ - Duplicate attributes are not allowed at a given nesting level.
+
+ - Ordering of attributes is not significant.
+
+ - When the kernel sends a given flow key to userspace, it always
+ composes it the same way. This allows userspace to hash and
+ compare entire flow keys that it may not be able to fully
+ interpret.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 4acea660372..1c08a4b0981 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -155,7 +155,7 @@ As capture, each frame contains two parts:
/* fill sockaddr_ll struct to prepare binding */
my_addr.sll_family = AF_PACKET;
- my_addr.sll_protocol = ETH_P_ALL;
+ my_addr.sll_protocol = htons(ETH_P_ALL);
my_addr.sll_ifindex = s_ifr.ifr_ifindex;
/* bind socket to eth0 */
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index a177de21d28..579994afbe0 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -208,7 +208,7 @@ The counter in rps_dev_flow_table values records the length of the current
CPU's backlog when a packet in this flow was last enqueued. Each backlog
queue has a head counter that is incremented on dequeue. A tail counter
is computed as head counter + queue length. In other words, the counter
-in rps_dev_flow_table[i] records the last element in flow i that has
+in rps_dev_flow[i] records the last element in flow i that has
been enqueued onto the currently designated CPU for flow i (of course,
entry i is actually selected by hash and multiple flows may hash to the
same entry i).
@@ -224,7 +224,7 @@ following is true:
- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
-- The current CPU is unset (equal to NR_CPUS)
+- The current CPU is unset (equal to RPS_NO_CPU)
- The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
@@ -235,7 +235,7 @@ CPU.
==== RFS Configuration
-RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
+RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on
by default for SMP). The functionality remains disabled until explicitly
configured. The number of entries in the global flow table is set through:
@@ -258,7 +258,7 @@ For a single queue device, the rps_flow_cnt value for the single queue
would normally be configured to the same value as rps_sock_flow_entries.
For a multi-queue device, the rps_flow_cnt for each queue might be
configured as rps_sock_flow_entries / N, where N is the number of
-queues. So for instance, if rps_flow_entries is set to 32768 and there
+queues. So for instance, if rps_sock_flow_entries is set to 32768 and there
are 16 configured receive queues, rps_flow_cnt for each queue might be
configured as 2048.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 8d67980fabe..d0aeeadd264 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -4,14 +4,16 @@ Copyright (C) 2007-2010 STMicroelectronics Ltd
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
-(Synopsys IP blocks); it has been fully tested on STLinux platforms.
+(Synopsys IP blocks).
Currently this network device driver is for all STM embedded MAC/GMAC
-(i.e. 7xxx/5xxx SoCs) and it's known working on other platforms i.e. ARM SPEAr.
+(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
+FF1152AMT0221 D1215994A VIRTEX FPGA board.
-DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
-Universal version 4.0 have been used for developing the first code
-implementation.
+DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
+Universal version 4.0 have been used for developing this driver.
+
+This driver supports both the platform bus and PCI.
Please, for more information also visit: www.stlinux.com
@@ -277,5 +279,5 @@ In fact, these can generate an huge amount of debug messages.
6) TODO:
o XGMAC is not supported.
- o Review the timer optimisation code to use an embedded device that will be
- available in new chip generations.
+ o Add the EEE - Energy Efficient Ethernet
+ o Add the PTP - precision time protocol
diff --git a/Documentation/networking/team.txt b/Documentation/networking/team.txt
new file mode 100644
index 00000000000..5a013686b9e
--- /dev/null
+++ b/Documentation/networking/team.txt
@@ -0,0 +1,2 @@
+Team devices are driven from userspace via libteam library which is here:
+ https://github.com/jpirko/libteam
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index b04cb7d45a1..6727b92bc2f 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -7,12 +7,9 @@ This subsystem deals with:
- Multiplexing of pins, pads, fingers (etc) see below for details
-The intention is to also deal with:
-
-- Software-controlled biasing and driving mode specific pins, such as
- pull-up/down, open drain etc, load capacitance configuration when controlled
- by software, etc.
-
+- Configuration of pins, pads, fingers (etc), such as software-controlled
+ biasing and driving mode specific pins, such as pull-up/down, open drain,
+ load capacitance etc.
Top-level interface
===================
@@ -32,7 +29,7 @@ Definition of PIN:
be sparse - i.e. there may be gaps in the space with numbers where no
pin exists.
-When a PIN CONTROLLER is instatiated, it will register a descriptor to the
+When a PIN CONTROLLER is instantiated, it will register a descriptor to the
pin control framework, and this descriptor contains an array of pin descriptors
describing the pins handled by this specific pin controller.
@@ -61,14 +58,14 @@ this in our driver:
#include <linux/pinctrl/pinctrl.h>
-const struct pinctrl_pin_desc __refdata foo_pins[] = {
- PINCTRL_PIN(0, "A1"),
- PINCTRL_PIN(1, "A2"),
- PINCTRL_PIN(2, "A3"),
+const struct pinctrl_pin_desc foo_pins[] = {
+ PINCTRL_PIN(0, "A8"),
+ PINCTRL_PIN(1, "B8"),
+ PINCTRL_PIN(2, "C8"),
...
- PINCTRL_PIN(61, "H6"),
- PINCTRL_PIN(62, "H7"),
- PINCTRL_PIN(63, "H8"),
+ PINCTRL_PIN(61, "F1"),
+ PINCTRL_PIN(62, "G1"),
+ PINCTRL_PIN(63, "H1"),
};
static struct pinctrl_desc foo_desc = {
@@ -88,11 +85,16 @@ int __init foo_probe(void)
pr_err("could not register foo pin driver\n");
}
+To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
+selected drivers, you need to select them from your machine's Kconfig entry,
+since these are so tightly integrated with the machines they are used on.
+See for example arch/arm/mach-u300/Kconfig for an example.
+
Pins usually have fancier names than this. You can find these in the dataheet
for your chip. Notice that the core pinctrl.h file provides a fancy macro
called PINCTRL_PIN() to create the struct entries. As you can see I enumerated
-the pins from 0 in the upper left corner to 63 in the lower right corner,
-this enumeration was arbitrarily chosen, in practice you need to think
+the pins from 0 in the upper left corner to 63 in the lower right corner.
+This enumeration was arbitrarily chosen, in practice you need to think
through your numbering system so that it matches the layout of registers
and such things in your driver, or the code may become complicated. You must
also consider matching of offsets to the GPIO ranges that may be handled by
@@ -133,8 +135,8 @@ struct foo_group {
const unsigned num_pins;
};
-static unsigned int spi0_pins[] = { 0, 8, 16, 24 };
-static unsigned int i2c0_pins[] = { 24, 25 };
+static const unsigned int spi0_pins[] = { 0, 8, 16, 24 };
+static const unsigned int i2c0_pins[] = { 24, 25 };
static const struct foo_group foo_groups[] = {
{
@@ -193,6 +195,88 @@ structure, for example specific register ranges associated with each group
and so on.
+Pin configuration
+=================
+
+Pins can sometimes be software-configured in an various ways, mostly related
+to their electronic properties when used as inputs or outputs. For example you
+may be able to make an output pin high impedance, or "tristate" meaning it is
+effectively disconnected. You may be able to connect an input pin to VDD or GND
+using a certain resistor value - pull up and pull down - so that the pin has a
+stable value when nothing is driving the rail it is connected to, or when it's
+unconnected.
+
+For example, a platform may do this:
+
+ret = pin_config_set("foo-dev", "FOO_GPIO_PIN", PLATFORM_X_PULL_UP);
+
+To pull up a pin to VDD. The pin configuration driver implements callbacks for
+changing pin configuration in the pin controller ops like this:
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "platform_x_pindefs.h"
+
+static int foo_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned offset,
+ unsigned long *config)
+{
+ struct my_conftype conf;
+
+ ... Find setting for pin @ offset ...
+
+ *config = (unsigned long) conf;
+}
+
+static int foo_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned offset,
+ unsigned long config)
+{
+ struct my_conftype *conf = (struct my_conftype *) config;
+
+ switch (conf) {
+ case PLATFORM_X_PULL_UP:
+ ...
+ }
+ }
+}
+
+static int foo_pin_config_group_get (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *config)
+{
+ ...
+}
+
+static int foo_pin_config_group_set (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long config)
+{
+ ...
+}
+
+static struct pinconf_ops foo_pconf_ops = {
+ .pin_config_get = foo_pin_config_get,
+ .pin_config_set = foo_pin_config_set,
+ .pin_config_group_get = foo_pin_config_group_get,
+ .pin_config_group_set = foo_pin_config_group_set,
+};
+
+/* Pin config operations are handled by some pin controller */
+static struct pinctrl_desc foo_desc = {
+ ...
+ .confops = &foo_pconf_ops,
+};
+
+Since some controllers have special logic for handling entire groups of pins
+they can exploit the special whole-group pin control function. The
+pin_config_group_set() callback is allowed to return the error code -EAGAIN,
+for groups it does not want to handle, or if it just wants to do some
+group-level handling and then fall through to iterate over all pins, in which
+case each individual pin will be treated by separate pin_config_set() calls as
+well.
+
+
Interaction with the GPIO subsystem
===================================
@@ -214,19 +298,20 @@ static struct pinctrl_gpio_range gpio_range_a = {
.name = "chip a",
.id = 0,
.base = 32,
+ .pin_base = 32,
.npins = 16,
.gc = &chip_a;
};
-static struct pinctrl_gpio_range gpio_range_a = {
+static struct pinctrl_gpio_range gpio_range_b = {
.name = "chip b",
.id = 0,
.base = 48,
+ .pin_base = 64,
.npins = 8,
.gc = &chip_b;
};
-
{
struct pinctrl_dev *pctl;
...
@@ -235,42 +320,39 @@ static struct pinctrl_gpio_range gpio_range_a = {
}
So this complex system has one pin controller handling two different
-GPIO chips. Chip a has 16 pins and chip b has 8 pins. They are mapped in
-the global GPIO pin space at:
+GPIO chips. "chip a" has 16 pins and "chip b" has 8 pins. The "chip a" and
+"chip b" have different .pin_base, which means a start pin number of the
+GPIO range.
+
+The GPIO range of "chip a" starts from the GPIO base of 32 and actual
+pin range also starts from 32. However "chip b" has different starting
+offset for the GPIO range and pin range. The GPIO range of "chip b" starts
+from GPIO number 48, while the pin range of "chip b" starts from 64.
+
+We can convert a gpio number to actual pin number using this "pin_base".
+They are mapped in the global GPIO pin space at:
-chip a: [32 .. 47]
-chip b: [48 .. 55]
+chip a:
+ - GPIO range : [32 .. 47]
+ - pin range : [32 .. 47]
+chip b:
+ - GPIO range : [48 .. 55]
+ - pin range : [64 .. 71]
When GPIO-specific functions in the pin control subsystem are called, these
-ranges will be used to look up the apropriate pin controller by inspecting
+ranges will be used to look up the appropriate pin controller by inspecting
and matching the pin to the pin ranges across all controllers. When a
pin controller handling the matching range is found, GPIO-specific functions
will be called on that specific pin controller.
For all functionalities dealing with pin biasing, pin muxing etc, the pin
controller subsystem will subtract the range's .base offset from the passed
-in gpio pin number, and pass that on to the pin control driver, so the driver
-will get an offset into its handled number range. Further it is also passed
+in gpio number, and add the ranges's .pin_base offset to retrive a pin number.
+After that, the subsystem passes it on to the pin control driver, so the driver
+will get an pin number into its handled number range. Further it is also passed
the range ID value, so that the pin controller knows which range it should
deal with.
-For example: if a user issues pinctrl_gpio_set_foo(50), the pin control
-subsystem will find that the second range on this pin controller matches,
-subtract the base 48 and call the
-pinctrl_driver_gpio_set_foo(pinctrl, range, 2) where the latter function has
-this signature:
-
-int pinctrl_driver_gpio_set_foo(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *rangeid,
- unsigned offset);
-
-Now the driver knows that we want to do some GPIO-specific operation on the
-second GPIO range handled by "chip b", at offset 2 in that specific range.
-
-(If the GPIO subsystem is ever refactored to use a local per-GPIO controller
-pin space, this mapping will need to be augmented accordingly.)
-
-
PINMUX interfaces
=================
@@ -438,7 +520,7 @@ you. Define enumerators only for the pins you can control if that makes sense.
Assumptions:
-We assume that the number possible function maps to pin groups is limited by
+We assume that the number of possible function maps to pin groups is limited by
the hardware. I.e. we assume that there is no system where any function can be
mapped to any pin, like in a phone exchange. So the available pins groups for
a certain function will be limited to a few choices (say up to eight or so),
@@ -585,7 +667,7 @@ int foo_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
{
- return myfuncs[selector].name;
+ return foo_functions[selector].name;
}
static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
@@ -600,16 +682,16 @@ static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
int foo_enable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
- u8 regbit = (1 << group);
+ u8 regbit = (1 << selector + group);
writeb((readb(MUX)|regbit), MUX)
return 0;
}
-int foo_disable(struct pinctrl_dev *pctldev, unsigned selector,
+void foo_disable(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
- u8 regbit = (1 << group);
+ u8 regbit = (1 << selector + group);
writeb((readb(MUX) & ~(regbit)), MUX)
return 0;
@@ -647,6 +729,17 @@ All the above functions are mandatory to implement for a pinmux driver.
Pinmux interaction with the GPIO subsystem
==========================================
+The public pinmux API contains two functions named pinmux_request_gpio()
+and pinmux_free_gpio(). These two functions shall *ONLY* be called from
+gpiolib-based drivers as part of their gpio_request() and
+gpio_free() semantics. Likewise the pinmux_gpio_direction_[input|output]
+shall only be called from within respective gpio_direction_[input|output]
+gpiolib implementation.
+
+NOTE that platforms and individual drivers shall *NOT* request GPIO pins to be
+muxed in. Instead, implement a proper gpiolib driver and have that driver
+request proper muxing for its pins.
+
The function list could become long, especially if you can convert every
individual pin into a GPIO pin independent of any other pins, and then try
the approach to define every pin as a function.
@@ -654,19 +747,24 @@ the approach to define every pin as a function.
In this case, the function array would become 64 entries for each GPIO
setting and then the device functions.
-For this reason there is an additional function a pinmux driver can implement
-to enable only GPIO on an individual pin: .gpio_request_enable(). The same
-.free() function as for other functions is assumed to be usable also for
-GPIO pins.
+For this reason there are two functions a pinmux driver can implement
+to enable only GPIO on an individual pin: .gpio_request_enable() and
+.gpio_disable_free().
This function will pass in the affected GPIO range identified by the pin
controller core, so you know which GPIO pins are being affected by the request
operation.
-Alternatively it is fully allowed to use named functions for each GPIO
-pin, the pinmux_request_gpio() will attempt to obtain the function "gpioN"
-where "N" is the global GPIO pin number if no special GPIO-handler is
-registered.
+If your driver needs to have an indication from the framework of whether the
+GPIO pin shall be used for input or output you can implement the
+.gpio_set_direction() function. As described this shall be called from the
+gpiolib driver and the affected GPIO range, pin offset and desired direction
+will be passed along to this function.
+
+Alternatively to using these special functions, it is fully allowed to use
+named functions for each GPIO pin, the pinmux_request_gpio() will attempt to
+obtain the function "gpioN" where "N" is the global GPIO pin number if no
+special GPIO-handler is registered.
Pinmux board/machine configuration
@@ -683,19 +781,19 @@ spi on the second function mapping:
#include <linux/pinctrl/machine.h>
-static struct pinmux_map pmx_mapping[] = {
+static const struct pinmux_map __initdata pmx_mapping[] = {
{
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "spi0",
.dev_name = "foo-spi.0",
},
{
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "i2c0",
.dev_name = "foo-i2c.0",
},
{
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
.dev_name = "foo-mmc.0",
},
@@ -714,14 +812,14 @@ for example if they are not yet instantiated or cumbersome to obtain.
You register this pinmux mapping to the pinmux subsystem by simply:
- ret = pinmux_register_mappings(&pmx_mapping, ARRAY_SIZE(pmx_mapping));
+ ret = pinmux_register_mappings(pmx_mapping, ARRAY_SIZE(pmx_mapping));
Since the above construct is pretty common there is a helper macro to make
-it even more compact which assumes you want to use pinctrl.0 and position
+it even more compact which assumes you want to use pinctrl-foo and position
0 for mapping, for example:
-static struct pinmux_map pmx_mapping[] = {
- PINMUX_MAP_PRIMARY("I2CMAP", "i2c0", "foo-i2c.0"),
+static struct pinmux_map __initdata pmx_mapping[] = {
+ PINMUX_MAP("I2CMAP", "pinctrl-foo", "i2c0", "foo-i2c.0"),
};
@@ -734,14 +832,14 @@ As it is possible to map a function to different groups of pins an optional
...
{
.name = "spi0-pos-A",
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "spi0",
.group = "spi0_0_grp",
.dev_name = "foo-spi.0",
},
{
.name = "spi0-pos-B",
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "spi0",
.group = "spi0_1_grp",
.dev_name = "foo-spi.0",
@@ -760,44 +858,44 @@ case), we define a mapping like this:
...
{
.name "2bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_0_grp",
+ .group = "mmc0_1_grp",
.dev_name = "foo-mmc.0",
},
{
.name "4bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_0_grp",
+ .group = "mmc0_1_grp",
.dev_name = "foo-mmc.0",
},
{
.name "4bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_1_grp",
+ .group = "mmc0_2_grp",
.dev_name = "foo-mmc.0",
},
{
.name "8bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_0_grp",
+ .group = "mmc0_1_grp",
.dev_name = "foo-mmc.0",
},
{
.name "8bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_1_grp",
+ .group = "mmc0_2_grp",
.dev_name = "foo-mmc.0",
},
{
.name "8bit"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "mmc0",
- .group = "mmc0_2_grp",
+ .group = "mmc0_3_grp",
.dev_name = "foo-mmc.0",
},
...
@@ -898,7 +996,7 @@ like this:
{
.name "POWERMAP"
- .ctrl_dev_name = "pinctrl.0",
+ .ctrl_dev_name = "pinctrl-foo",
.function = "power_func",
.hog_on_boot = true,
},
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 646a89e0c07..20af7def23c 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -123,9 +123,12 @@ please refer directly to the source code for more information about it.
Subsystem-Level Methods
-----------------------
The core methods to suspend and resume devices reside in struct dev_pm_ops
-pointed to by the pm member of struct bus_type, struct device_type and
-struct class. They are mostly of interest to the people writing infrastructure
-for buses, like PCI or USB, or device type and device class drivers.
+pointed to by the ops member of struct dev_pm_domain, or by the pm member of
+struct bus_type, struct device_type and struct class. They are mostly of
+interest to the people writing infrastructure for platforms and buses, like PCI
+or USB, or device type and device class drivers. They also are relevant to the
+writers of device drivers whose subsystems (PM domains, device types, device
+classes and bus types) don't provide all power management methods.
Bus drivers implement these methods as appropriate for the hardware and the
drivers using it; PCI works differently from USB, and so on. Not many people
@@ -139,41 +142,57 @@ sequencing in the driver model tree.
/sys/devices/.../power/wakeup files
-----------------------------------
-All devices in the driver model have two flags to control handling of wakeup
-events (hardware signals that can force the device and/or system out of a low
-power state). These flags are initialized by bus or device driver code using
+All device objects in the driver model contain fields that control the handling
+of system wakeup events (hardware signals that can force the system out of a
+sleep state). These fields are initialized by bus or device driver code using
device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
include/linux/pm_wakeup.h.
-The "can_wakeup" flag just records whether the device (and its driver) can
+The "power.can_wakeup" flag just records whether the device (and its driver) can
physically support wakeup events. The device_set_wakeup_capable() routine
-affects this flag. The "should_wakeup" flag controls whether the device should
-try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
-for the most part drivers should not change its value. The initial value of
-should_wakeup is supposed to be false for the majority of devices; the major
-exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
-(wake-on-LAN) feature has been set up with ethtool. It should also default
-to true for devices that don't generate wakeup requests on their own but merely
-forward wakeup requests from one bus to another (like PCI bridges).
+affects this flag. The "power.wakeup" field is a pointer to an object of type
+struct wakeup_source used for controlling whether or not the device should use
+its system wakeup mechanism and for notifying the PM core of system wakeup
+events signaled by the device. This object is only present for wakeup-capable
+devices (i.e. devices whose "can_wakeup" flags are set) and is created (or
+removed) by device_set_wakeup_capable().
Whether or not a device is capable of issuing wakeup events is a hardware
matter, and the kernel is responsible for keeping track of it. By contrast,
whether or not a wakeup-capable device should issue wakeup events is a policy
decision, and it is managed by user space through a sysfs attribute: the
-power/wakeup file. User space can write the strings "enabled" or "disabled" to
-set or clear the "should_wakeup" flag, respectively. This file is only present
-for wakeup-capable devices (i.e. devices whose "can_wakeup" flags are set)
-and is created (or removed) by device_set_wakeup_capable(). Reads from the
-file will return the corresponding string.
-
-The device_may_wakeup() routine returns true only if both flags are set.
+"power/wakeup" file. User space can write the strings "enabled" or "disabled"
+to it to indicate whether or not, respectively, the device is supposed to signal
+system wakeup. This file is only present if the "power.wakeup" object exists
+for the given device and is created (or removed) along with that object, by
+device_set_wakeup_capable(). Reads from the file will return the corresponding
+string.
+
+The "power/wakeup" file is supposed to contain the "disabled" string initially
+for the majority of devices; the major exceptions are power buttons, keyboards,
+and Ethernet adapters whose WoL (wake-on-LAN) feature has been set up with
+ethtool. It should also default to "enabled" for devices that don't generate
+wakeup requests on their own but merely forward wakeup requests from one bus to
+another (like PCI Express ports).
+
+The device_may_wakeup() routine returns true only if the "power.wakeup" object
+exists and the corresponding "power/wakeup" file contains the string "enabled".
This information is used by subsystems, like the PCI bus type code, to see
whether or not to enable the devices' wakeup mechanisms. If device wakeup
mechanisms are enabled or disabled directly by drivers, they also should use
device_may_wakeup() to decide what to do during a system sleep transition.
-However for runtime power management, wakeup events should be enabled whenever
-the device and driver both support them, regardless of the should_wakeup flag.
-
+Device drivers, however, are not supposed to call device_set_wakeup_enable()
+directly in any case.
+
+It ought to be noted that system wakeup is conceptually different from "remote
+wakeup" used by runtime power management, although it may be supported by the
+same physical mechanism. Remote wakeup is a feature allowing devices in
+low-power states to trigger specific interrupts to signal conditions in which
+they should be put into the full-power state. Those interrupts may or may not
+be used to signal system wakeup events, depending on the hardware design. On
+some systems it is impossible to trigger them from system sleep states. In any
+case, remote wakeup should always be enabled for runtime power management for
+all devices and drivers that support it.
/sys/devices/.../power/control files
------------------------------------
@@ -249,23 +268,37 @@ for every device before the next phase begins. Not all busses or classes
support all these callbacks and not all drivers use all the callbacks. The
various phases always run after tasks have been frozen and before they are
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
-been disabled (except for those marked with the IRQ_WAKEUP flag).
+been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
+
+All phases use PM domain, bus, type, class or driver callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
+dev->driver->pm). These callbacks are regarded by the PM core as mutually
+exclusive. Moreover, PM domain callbacks always take precedence over all of the
+other callbacks and, for example, type callbacks take precedence over bus, class
+and driver callbacks. To be precise, the following rules are used to determine
+which callback to execute in the given phase:
+
+ 1. If dev->pm_domain is present, the PM core will choose the callback
+ included in dev->pm_domain->ops for execution
+
+ 2. Otherwise, if both dev->type and dev->type->pm are present, the callback
+ included in dev->type->pm will be chosen for execution.
+
+ 3. Otherwise, if both dev->class and dev->class->pm are present, the
+ callback included in dev->class->pm will be chosen for execution.
+
+ 4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
+ included in dev->bus->pm will be chosen for execution.
+
+This allows PM domains and device types to override callbacks provided by bus
+types or device classes if necessary.
-All phases use bus, type, or class callbacks (that is, methods defined in
-dev->bus->pm, dev->type->pm, or dev->class->pm). These callbacks are mutually
-exclusive, so if the device type provides a struct dev_pm_ops object pointed to
-by its pm field (i.e. both dev->type and dev->type->pm are defined), the
-callbacks included in that object (i.e. dev->type->pm) will be used. Otherwise,
-if the class provides a struct dev_pm_ops object pointed to by its pm field
-(i.e. both dev->class and dev->class->pm are defined), the PM core will use the
-callbacks from that object (i.e. dev->class->pm). Finally, if the pm fields of
-both the device type and class objects are NULL (or those objects do not exist),
-the callbacks provided by the bus (that is, the callbacks from dev->bus->pm)
-will be used (this allows device types to override callbacks provided by bus
-types or classes if necessary).
+The PM domain, type, class and bus callbacks may in turn invoke device- or
+driver-specific methods stored in dev->driver->pm, but they don't have to do
+that.
-These callbacks may in turn invoke device- or driver-specific methods stored in
-dev->driver->pm, but they don't have to.
+If the subsystem callback chosen for execution is not present, the PM core will
+execute the corresponding method from dev->driver->pm instead if there is one.
Entering System Suspend
@@ -283,9 +316,8 @@ When the system goes into the standby or memory sleep state, the phases are:
After the prepare callback method returns, no new children may be
registered below the device. The method may also prepare the device or
- driver in some way for the upcoming system power transition (for
- example, by allocating additional memory required for this purpose), but
- it should not put the device into a low-power state.
+ driver in some way for the upcoming system power transition, but it
+ should not put the device into a low-power state.
2. The suspend methods should quiesce the device to stop it from performing
I/O. They also may save the device registers and put it into the
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 316c2ba187f..6ccb68f68da 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
either wakes them up, if they are kernel threads, or sends fake signals to them,
if they are user space processes. A task that has TIF_FREEZE set, should react
-to it by calling the function called refrigerator() (defined in
+to it by calling the function called __refrigerator() (defined in
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
User space processes are generally frozen before kernel threads.
-It is not recommended to call refrigerator() directly. Instead, it is
-recommended to use the try_to_freeze() function (defined in
-include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
-task enter refrigerator() if the flag is set.
+__refrigerator() must not be called directly. Instead, use the
+try_to_freeze() function (defined in include/linux/freezer.h), that checks
+the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
+flag is set.
For user space processes try_to_freeze() is called automatically from the
signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
After the system memory state has been restored from a hibernation image and
devices have been reinitialized, the function thaw_processes() is called in
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
-have been frozen leave refrigerator() and continue running.
+have been frozen leave __refrigerator() and continue running.
III. Which kernel threads are freezable?
Kernel threads are not freezable by default. However, a kernel thread may clear
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
-directly is strongly discouraged). From this point it is regarded as freezable
+directly is not allowed). From this point it is regarded as freezable
and must call try_to_freeze() in a suitable place.
IV. Why do we do that?
@@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
A driver must have all firmwares it may need in RAM before suspend() is called.
If keeping them is not practical, for example due to their size, they must be
requested early enough using the suspend notifier API described in notifiers.txt.
+
+VI. Are there any precautions to be taken to prevent freezing failures?
+
+Yes, there are.
+
+First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
+from system-wide sleep such as suspend/hibernation is not encouraged.
+If possible, that piece of code must instead hook onto the suspend/hibernation
+notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
+(kernel/cpu.c) for an example.
+
+However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
+it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
+that could lead to freezing failures, because if the suspend/hibernate code
+successfully acquired the 'pm_mutex' lock, and hence that other entity failed
+to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
+state. As a consequence, the freezer would not be able to freeze that task,
+leading to freezing failure.
+
+However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
+since they ask the freezer to skip freezing this task, since it is anyway
+"frozen enough" as it is blocked on 'pm_mutex', which will be released
+only after the entire suspend/hibernation sequence is complete.
+So, to summarize, use [un]lock_system_sleep() instead of directly using
+mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt
index 3f8b528f237..e272d9909e3 100644
--- a/Documentation/power/regulator/regulator.txt
+++ b/Documentation/power/regulator/regulator.txt
@@ -12,7 +12,7 @@ Drivers can register a regulator by calling :-
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
struct device *dev, struct regulator_init_data *init_data,
- void *driver_data);
+ void *driver_data, struct device_node *of_node);
This will register the regulators capabilities and operations to the regulator
core.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 5336149f831..4abe83e1045 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -44,98 +44,112 @@ struct dev_pm_ops {
};
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
-are executed by the PM core for either the power domain, or the device type
-(if the device power domain's struct dev_pm_ops does not exist), or the class
-(if the device power domain's and type's struct dev_pm_ops object does not
-exist), or the bus type (if the device power domain's, type's and class'
-struct dev_pm_ops objects do not exist) of the given device, so the priority
-order of callbacks from high to low is that power domain callbacks, device
-type callbacks, class callbacks and bus type callbacks, and the high priority
-one will take precedence over low priority one. The bus type, device type and
-class callbacks are referred to as subsystem-level callbacks in what follows,
-and generally speaking, the power domain callbacks are used for representing
-power domains within a SoC.
+are executed by the PM core for the device's subsystem that may be either of
+the following:
+
+ 1. PM domain of the device, if the device's PM domain object, dev->pm_domain,
+ is present.
+
+ 2. Device type of the device, if both dev->type and dev->type->pm are present.
+
+ 3. Device class of the device, if both dev->class and dev->class->pm are
+ present.
+
+ 4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+
+If the subsystem chosen by applying the above rules doesn't provide the relevant
+callback, the PM core will invoke the corresponding driver callback stored in
+dev->driver->pm directly (if present).
+
+The PM core always checks which callback to use in the order given above, so the
+priority order of callbacks from high to low is: PM domain, device type, class
+and bus type. Moreover, the high-priority one will always take precedence over
+a low-priority one. The PM domain, bus type, device type and class callbacks
+are referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
-enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
-callbacks should be invoked in atomic context with interrupts disabled.
-This implies that these callback routines must not block or sleep, but it also
-means that the synchronous helper functions listed at the end of Section 4 can
-be used within an interrupt handler or in an atomic context.
-
-The subsystem-level suspend callback is _entirely_ _responsible_ for handling
-the suspend of the device as appropriate, which may, but need not include
-executing the device driver's own ->runtime_suspend() callback (from the
+enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
+the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
+and ->runtime_idle() callbacks for the given device in atomic context with
+interrupts disabled. This implies that the callback routines in question must
+not block or sleep, but it also means that the synchronous helper functions
+listed at the end of Section 4 may be used for that device within an interrupt
+handler or generally in an atomic context.
+
+The subsystem-level suspend callback, if present, is _entirely_ _responsible_
+for handling the suspend of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_suspend() callback (from the
PM core's point of view it is not necessary to implement a ->runtime_suspend()
callback in a device driver as long as the subsystem-level suspend callback
knows what to do to handle the device).
- * Once the subsystem-level suspend callback has completed successfully
- for given device, the PM core regards the device as suspended, which need
- not mean that the device has been put into a low power state. It is
- supposed to mean, however, that the device will not process data and will
- not communicate with the CPU(s) and RAM until the subsystem-level resume
- callback is executed for it. The runtime PM status of a device after
- successful execution of the subsystem-level suspend callback is 'suspended'.
-
- * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
- the device's runtime PM status is 'active', which means that the device
- _must_ be fully operational afterwards.
-
- * If the subsystem-level suspend callback returns an error code different
- from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
- refuse to run the helper functions described in Section 4 for the device,
- until the status of it is directly set either to 'active', or to 'suspended'
- (the PM core provides special helper functions for this purpose).
-
-In particular, if the driver requires remote wake-up capability (i.e. hardware
+ * Once the subsystem-level suspend callback (or the driver suspend callback,
+ if invoked directly) has completed successfully for the given device, the PM
+ core regards the device as suspended, which need not mean that it has been
+ put into a low power state. It is supposed to mean, however, that the
+ device will not process data and will not communicate with the CPU(s) and
+ RAM until the appropriate resume callback is executed for it. The runtime
+ PM status of a device after successful execution of the suspend callback is
+ 'suspended'.
+
+ * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
+ status remains 'active', which means that the device _must_ be fully
+ operational afterwards.
+
+ * If the suspend callback returns an error code different from -EBUSY and
+ -EAGAIN, the PM core regards this as a fatal error and will refuse to run
+ the helper functions described in Section 4 for the device until its status
+ is directly set to either'active', or 'suspended' (the PM core provides
+ special helper functions for this purpose).
+
+In particular, if the driver requires remote wakeup capability (i.e. hardware
mechanism allowing the device to request a change of its power state, such as
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
-device_run_wake() returns 'true' for the device and the device is put into a low
-power state during the execution of the subsystem-level suspend callback, it is
-expected that remote wake-up will be enabled for the device. Generally, remote
-wake-up should be enabled for all input devices put into a low power state at
-run time.
-
-The subsystem-level resume callback is _entirely_ _responsible_ for handling the
-resume of the device as appropriate, which may, but need not include executing
-the device driver's own ->runtime_resume() callback (from the PM core's point of
-view it is not necessary to implement a ->runtime_resume() callback in a device
-driver as long as the subsystem-level resume callback knows what to do to handle
-the device).
-
- * Once the subsystem-level resume callback has completed successfully, the PM
- core regards the device as fully operational, which means that the device
- _must_ be able to complete I/O operations as needed. The runtime PM status
- of the device is then 'active'.
-
- * If the subsystem-level resume callback returns an error code, the PM core
- regards this as a fatal error and will refuse to run the helper functions
- described in Section 4 for the device, until its status is directly set
- either to 'active' or to 'suspended' (the PM core provides special helper
- functions for this purpose).
-
-The subsystem-level idle callback is executed by the PM core whenever the device
-appears to be idle, which is indicated to the PM core by two counters, the
-device's usage counter and the counter of 'active' children of the device.
+device_run_wake() returns 'true' for the device and the device is put into a
+low-power state during the execution of the suspend callback, it is expected
+that remote wakeup will be enabled for the device. Generally, remote wakeup
+should be enabled for all input devices put into low-power states at run time.
+
+The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
+handling the resume of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_resume() callback (from the
+PM core's point of view it is not necessary to implement a ->runtime_resume()
+callback in a device driver as long as the subsystem-level resume callback knows
+what to do to handle the device).
+
+ * Once the subsystem-level resume callback (or the driver resume callback, if
+ invoked directly) has completed successfully, the PM core regards the device
+ as fully operational, which means that the device _must_ be able to complete
+ I/O operations as needed. The runtime PM status of the device is then
+ 'active'.
+
+ * If the resume callback returns an error code, the PM core regards this as a
+ fatal error and will refuse to run the helper functions described in Section
+ 4 for the device, until its status is directly set to either 'active', or
+ 'suspended' (by means of special helper functions provided by the PM core
+ for this purpose).
+
+The idle callback (a subsystem-level one, if present, or the driver one) is
+executed by the PM core whenever the device appears to be idle, which is
+indicated to the PM core by two counters, the device's usage counter and the
+counter of 'active' children of the device.
* If any of these counters is decreased using a helper function provided by
the PM core and it turns out to be equal to zero, the other counter is
checked. If that counter also is equal to zero, the PM core executes the
- subsystem-level idle callback with the device as an argument.
+ idle callback with the device as its argument.
-The action performed by a subsystem-level idle callback is totally dependent on
-the subsystem in question, but the expected and recommended action is to check
+The action performed by the idle callback is totally dependent on the subsystem
+(or driver) in question, but the expected and recommended action is to check
if the device can be suspended (i.e. if all of the conditions necessary for
suspending the device are satisfied) and to queue up a suspend request for the
device in that case. The value returned by this callback is ignored by the PM
core.
The helper functions provided by the PM core, described in Section 4, guarantee
-that the following constraints are met with respect to the bus type's runtime
-PM callbacks:
+that the following constraints are met with respect to runtime PM callbacks for
+one device:
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
->runtime_suspend() in parallel with ->runtime_resume() or with another
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index efe998becc5..462321c1aee 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -41,7 +41,6 @@ ldd
Debugging modules
The proc file system
Starting points for debugging scripting languages etc.
-Dumptool & Lcrash
SysRq
References
Special Thanks
@@ -2455,39 +2454,6 @@ jdb <filename> another fully interactive gdb style debugger.
-Dumptool & Lcrash ( lkcd )
-==========================
-Michael Holzheu & others here at IBM have a fairly mature port of
-SGI's lcrash tool which allows one to look at kernel structures in a
-running kernel.
-
-It also complements a tool called dumptool which dumps all the kernel's
-memory pages & registers to either a tape or a disk.
-This can be used by tech support or an ambitious end user do
-post mortem debugging of a machine like gdb core dumps.
-
-Going into how to use this tool in detail will be explained
-in other documentation supplied by IBM with the patches & the
-lcrash homepage http://oss.sgi.com/projects/lkcd/ & the lcrash manpage.
-
-How they work
--------------
-Lcrash is a perfectly normal program,however, it requires 2
-additional files, Kerntypes which is built using a patch to the
-linux kernel sources in the linux root directory & the System.map.
-
-Kerntypes is an objectfile whose sole purpose in life
-is to provide stabs debug info to lcrash, to do this
-Kerntypes is built from kerntypes.c which just includes the most commonly
-referenced header files used when debugging, lcrash can then read the
-.stabs section of this file.
-
-Debugging a live system it uses /dev/mem
-alternatively for post mortem debugging it uses the data
-collected by dumptool.
-
-
-
SysRq
=====
This is now supported by linux for s/390 & z/Architecture.
diff --git a/Documentation/scsi/53c700.txt b/Documentation/scsi/53c700.txt
index 0da681d497a..e31aceb6df1 100644
--- a/Documentation/scsi/53c700.txt
+++ b/Documentation/scsi/53c700.txt
@@ -16,32 +16,13 @@ fill in to get the driver working.
Compile Time Flags
==================
-The driver may be either io mapped or memory mapped. This is
-selectable by configuration flags:
-
-CONFIG_53C700_MEM_MAPPED
-
-define if the driver is memory mapped.
-
-CONFIG_53C700_IO_MAPPED
-
-define if the driver is to be io mapped.
-
-One or other of the above flags *must* be defined.
-
-Other flags are:
+A compile time flag is:
CONFIG_53C700_LE_ON_BE
define if the chipset must be supported in little endian mode on a big
endian architecture (used for the 700 on parisc).
-CONFIG_53C700_USE_CONSISTENT
-
-allocate consistent memory (should only be used if your architecture
-has a mixture of consistent and inconsistent memory). Fully
-consistent or fully inconsistent architectures should not define this.
-
Using the Chip Core Driver
==========================
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index 77ba0afbe4d..0a25a919186 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -101,7 +101,7 @@ hardware.
Returns the current state of modem control inputs. The state
of the outputs should not be returned, since the core keeps
track of their state. The state information should include:
- - TIOCM_DCD state of DCD signal
+ - TIOCM_CAR state of DCD signal
- TIOCM_CTS state of CTS signal
- TIOCM_DSR state of DSR signal
- TIOCM_RI state of RI signal
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 3e2ec9cbf39..d50c14df341 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -50,8 +50,7 @@ Machine DAI Configuration
The machine DAI configuration glues all the codec and CPU DAIs together. It can
also be used to set up the DAI system clock and for any machine related DAI
initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
@@ -83,8 +82,7 @@ Machine Power Map
The machine driver can optionally extend the codec power map and to become an
audio power map of the audio subsystem. This allows for automatic power up/down
of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
Machine Controls
diff --git a/Documentation/trace/events-kmem.txt b/Documentation/trace/events-kmem.txt
index aa82ee4a5a8..19480041006 100644
--- a/Documentation/trace/events-kmem.txt
+++ b/Documentation/trace/events-kmem.txt
@@ -40,8 +40,8 @@ but the call_site can usually be used to extrapolate that information.
==================
mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
-mm_page_free_direct page=%p pfn=%lu order=%d
-mm_pagevec_free page=%p pfn=%lu order=%d cold=%d
+mm_page_free page=%p pfn=%lu order=%d
+mm_page_free_batched page=%p pfn=%lu order=%d cold=%d
These four events deal with page allocation and freeing. mm_page_alloc is
a simple indicator of page allocator activity. Pages may be allocated from
@@ -53,13 +53,13 @@ amounts of activity imply high activity on the zone->lock. Taking this lock
impairs performance by disabling interrupts, dirtying cache lines between
CPUs and serialising many CPUs.
-When a page is freed directly by the caller, the mm_page_free_direct event
+When a page is freed directly by the caller, the only mm_page_free event
is triggered. Significant amounts of activity here could indicate that the
callers should be batching their activities.
-When pages are freed using a pagevec, the mm_pagevec_free is
-triggered. Broadly speaking, pages are taken off the LRU lock in bulk and
-freed in batch with a pagevec. Significant amounts of activity here could
+When pages are freed in batch, the also mm_page_free_batched is triggered.
+Broadly speaking, pages are taken off the LRU lock in bulk and
+freed in batch with a page list. Significant amounts of activity here could
indicate that the system is under memory pressure and can also indicate
contention on the zone->lru_lock.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index b510564aac7..bb24c2a0e87 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -191,8 +191,6 @@ And for string fields they are:
Currently, only exact string matches are supported.
-Currently, the maximum number of predicates in a filter is 16.
-
5.2 Setting filters
-------------------
diff --git a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
index 7df50e8cf4d..0a120aae33c 100644
--- a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
@@ -17,8 +17,8 @@ use Getopt::Long;
# Tracepoint events
use constant MM_PAGE_ALLOC => 1;
-use constant MM_PAGE_FREE_DIRECT => 2;
-use constant MM_PAGEVEC_FREE => 3;
+use constant MM_PAGE_FREE => 2;
+use constant MM_PAGE_FREE_BATCHED => 3;
use constant MM_PAGE_PCPU_DRAIN => 4;
use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5;
use constant MM_PAGE_ALLOC_EXTFRAG => 6;
@@ -223,10 +223,10 @@ EVENT_PROCESS:
# Perl Switch() sucks majorly
if ($tracepoint eq "mm_page_alloc") {
$perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++;
- } elsif ($tracepoint eq "mm_page_free_direct") {
- $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++;
- } elsif ($tracepoint eq "mm_pagevec_free") {
- $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++;
+ } elsif ($tracepoint eq "mm_page_free") {
+ $perprocesspid{$process_pid}->{MM_PAGE_FREE}++
+ } elsif ($tracepoint eq "mm_page_free_batched") {
+ $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}++;
} elsif ($tracepoint eq "mm_page_pcpu_drain") {
$perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++;
$perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++;
@@ -336,8 +336,8 @@ sub dump_stats {
$process_pid,
$stats{$process_pid}->{MM_PAGE_ALLOC},
$stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED},
- $stats{$process_pid}->{MM_PAGE_FREE_DIRECT},
- $stats{$process_pid}->{MM_PAGEVEC_FREE},
+ $stats{$process_pid}->{MM_PAGE_FREE},
+ $stats{$process_pid}->{MM_PAGE_FREE_BATCHED},
$stats{$process_pid}->{MM_PAGE_PCPU_DRAIN},
$stats{$process_pid}->{HIGH_PCPU_DRAINS},
$stats{$process_pid}->{HIGH_PCPU_REFILLS},
@@ -364,8 +364,8 @@ sub aggregate_perprocesspid() {
$perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC};
$perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED};
- $perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT};
- $perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE};
+ $perprocess{$process}->{MM_PAGE_FREE} += $perprocesspid{$process_pid}->{MM_PAGE_FREE};
+ $perprocess{$process}->{MM_PAGE_FREE_BATCHED} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED};
$perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN};
$perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS};
$perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};
diff --git a/Documentation/trace/tracepoint-analysis.txt b/Documentation/trace/tracepoint-analysis.txt
index 87bee3c129b..058cc6c9dc5 100644
--- a/Documentation/trace/tracepoint-analysis.txt
+++ b/Documentation/trace/tracepoint-analysis.txt
@@ -93,14 +93,14 @@ By specifying the -a switch and analysing sleep, the system-wide events
for a duration of time can be examined.
$ perf stat -a \
- -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
- -e kmem:mm_pagevec_free \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free \
+ -e kmem:mm_page_free_batched \
sleep 10
Performance counter stats for 'sleep 10':
9630 kmem:mm_page_alloc
- 2143 kmem:mm_page_free_direct
- 7424 kmem:mm_pagevec_free
+ 2143 kmem:mm_page_free
+ 7424 kmem:mm_page_free_batched
10.002577764 seconds time elapsed
@@ -119,15 +119,15 @@ basis using set_ftrace_pid.
Events can be activated and tracked for the duration of a process on a local
basis using PCL such as follows.
- $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
- -e kmem:mm_pagevec_free ./hackbench 10
+ $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free \
+ -e kmem:mm_page_free_batched ./hackbench 10
Time: 0.909
Performance counter stats for './hackbench 10':
17803 kmem:mm_page_alloc
- 12398 kmem:mm_page_free_direct
- 4827 kmem:mm_pagevec_free
+ 12398 kmem:mm_page_free
+ 4827 kmem:mm_page_free_batched
0.973913387 seconds time elapsed
@@ -146,8 +146,8 @@ to know what the standard deviation is. By and large, this is left to the
performance analyst to do it by hand. In the event that the discrete event
occurrences are useful to the performance analyst, then perf can be used.
- $ perf stat --repeat 5 -e kmem:mm_page_alloc -e kmem:mm_page_free_direct
- -e kmem:mm_pagevec_free ./hackbench 10
+ $ perf stat --repeat 5 -e kmem:mm_page_alloc -e kmem:mm_page_free
+ -e kmem:mm_page_free_batched ./hackbench 10
Time: 0.890
Time: 0.895
Time: 0.915
@@ -157,8 +157,8 @@ occurrences are useful to the performance analyst, then perf can be used.
Performance counter stats for './hackbench 10' (5 runs):
16630 kmem:mm_page_alloc ( +- 3.542% )
- 11486 kmem:mm_page_free_direct ( +- 4.771% )
- 4730 kmem:mm_pagevec_free ( +- 2.325% )
+ 11486 kmem:mm_page_free ( +- 4.771% )
+ 4730 kmem:mm_page_free_batched ( +- 2.325% )
0.982653002 seconds time elapsed ( +- 1.448% )
@@ -168,15 +168,15 @@ aggregation of discrete events, then a script would need to be developed.
Using --repeat, it is also possible to view how events are fluctuating over
time on a system-wide basis using -a and sleep.
- $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
- -e kmem:mm_pagevec_free \
+ $ perf stat -e kmem:mm_page_alloc -e kmem:mm_page_free \
+ -e kmem:mm_page_free_batched \
-a --repeat 10 \
sleep 1
Performance counter stats for 'sleep 1' (10 runs):
1066 kmem:mm_page_alloc ( +- 26.148% )
- 182 kmem:mm_page_free_direct ( +- 5.464% )
- 890 kmem:mm_pagevec_free ( +- 30.079% )
+ 182 kmem:mm_page_free ( +- 5.464% )
+ 890 kmem:mm_page_free_batched ( +- 30.079% )
1.002251757 seconds time elapsed ( +- 0.005% )
@@ -220,8 +220,8 @@ were generating events within the kernel. To begin this sort of analysis, the
data must be recorded. At the time of writing, this required root:
$ perf record -c 1 \
- -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
- -e kmem:mm_pagevec_free \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free \
+ -e kmem:mm_page_free_batched \
./hackbench 10
Time: 0.894
[ perf record: Captured and wrote 0.733 MB perf.data (~32010 samples) ]
@@ -260,8 +260,8 @@ noticed that X was generating an insane amount of page allocations so let's look
at it:
$ perf record -c 1 -f \
- -e kmem:mm_page_alloc -e kmem:mm_page_free_direct \
- -e kmem:mm_pagevec_free \
+ -e kmem:mm_page_alloc -e kmem:mm_page_free \
+ -e kmem:mm_page_free_batched \
-p `pidof X`
This was interrupted after a few seconds and
diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf
index 37a02ce5484..f0ffc27d4c0 100644
--- a/Documentation/usb/linux-cdc-acm.inf
+++ b/Documentation/usb/linux-cdc-acm.inf
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
[SourceDisksFiles]
[SourceDisksNames]
[DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
[DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
;------------------------------------------------------------------------------
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index a4efa0462f0..5335fa8b06e 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously.
2. Find which bus connects to the desired device
-Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to
-the device. Usually you do it by looking for the vendor string. If you have
-many similar devices, unplug one and compare two /proc/bus/usb/devices outputs.
-The T-line will have a bus number. Example:
+Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds
+to the device. Usually you do it by looking for the vendor string. If you have
+many similar devices, unplug one and compare the two
+/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number.
+Example:
T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
@@ -58,7 +59,10 @@ P: Vendor=0557 ProdID=2004 Rev= 1.00
S: Manufacturer=ATEN
S: Product=UC100KM V2.00
-Bus=03 means it's bus 3.
+"Bus=03" means it's bus 3. Alternatively, you can look at the output from
+"lsusb" and get the bus number from the appropriate line. Example:
+
+Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00
3. Start 'cat'
diff --git a/Documentation/vgaarbiter.txt b/Documentation/vgaarbiter.txt
index b7d401e0eae..014423e2824 100644
--- a/Documentation/vgaarbiter.txt
+++ b/Documentation/vgaarbiter.txt
@@ -177,7 +177,7 @@ II. Credits
Benjamin Herrenschmidt (IBM?) started this work when he discussed such design
with the Xorg community in 2005 [1, 2]. In the end of 2007, Paulo Zanoni and
-Tiago Vignatti (both of C3SL/Federal University of Paraná) proceeded his work
+Tiago Vignatti (both of C3SL/Federal University of ParanĂĄ) proceeded his work
enhancing the kernel code to adapt as a kernel module and also did the
implementation of the user space side [3]. Now (2009) Tiago Vignatti and Dave
Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7945b0bd35e..e1d94bf4056 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
this function/index combination
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support. Instead it is reported via
+
+ ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
/* Depends on KVM_CAP_IOMMU */
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device. Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment. The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
@@ -1450,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility.
+4.64 KVM_NMI
+
+Capability: KVM_CAP_USER_NMI
+Architectures: x86
+Type: vcpu ioctl
+Parameters: none
+Returns: 0 on success, -1 on error
+
+Queues an NMI on the thread's vcpu. Note this is well defined only
+when KVM_CREATE_IRQCHIP has not been called, since this is an interface
+between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP
+has been called, this interface is completely emulated within the kernel.
+
+To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
+following algorithm:
+
+ - pause the vpcu
+ - read the local APIC's state (KVM_GET_LAPIC)
+ - check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
+ - if so, issue KVM_NMI
+ - resume the vcpu
+
+Some guests configure the LINT1 NMI input to cause a panic, aiding in
+debugging.
+
5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by
diff --git a/Documentation/watchdog/00-INDEX b/Documentation/watchdog/00-INDEX
index fc51128071c..fc9082a1477 100644
--- a/Documentation/watchdog/00-INDEX
+++ b/Documentation/watchdog/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file.
+convert_drivers_to_kernel_api.txt
+ - how-to for converting old watchdog drivers to the new kernel API.
hpwdt.txt
- information on the HP iLO2 NMI watchdog
pcwd-watchdog.txt
diff --git a/Documentation/watchdog/convert_drivers_to_kernel_api.txt b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
index ae1e90036d0..be8119bb15d 100644
--- a/Documentation/watchdog/convert_drivers_to_kernel_api.txt
+++ b/Documentation/watchdog/convert_drivers_to_kernel_api.txt
@@ -163,6 +163,25 @@ Here is a simple example for a watchdog device:
+};
+Handle the 'nowayout' feature
+-----------------------------
+
+A few drivers use nowayout statically, i.e. there is no module parameter for it
+and only CONFIG_WATCHDOG_NOWAYOUT determines if the feature is going to be
+used. This needs to be converted by initializing the status variable of the
+watchdog_device like this:
+
+ .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+
+Most drivers, however, also allow runtime configuration of nowayout, usually
+by adding a module parameter. The conversion for this would be something like:
+
+ watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
+The module parameter itself needs to stay, everything else related to nowayout
+can go, though. This will likely be some code in open(), close() or write().
+
+
Register the watchdog device
----------------------------
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 4f7c894244d..4b93c28e35c 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -1,6 +1,6 @@
The Linux WatchDog Timer Driver Core kernel API.
===============================================
-Last reviewed: 22-Jul-2011
+Last reviewed: 29-Nov-2011
Wim Van Sebroeck <wim@iguana.be>
@@ -142,6 +142,14 @@ bit-operations. The status bits that are defined are:
* WDOG_NO_WAY_OUT: this bit stores the nowayout setting for the watchdog.
If this bit is set then the watchdog timer will not be able to stop.
+ To set the WDOG_NO_WAY_OUT status bit (before registering your watchdog
+ timer device) you can either:
+ * set it statically in your watchdog_device struct with
+ .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+ (this will set the value the same as CONFIG_WATCHDOG_NOWAYOUT) or
+ * use the following helper function:
+ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, int nowayout)
+
Note: The WatchDog Timer Driver Core supports the magic close feature and
the nowayout feature. To use the magic close feature you must set the
WDIOF_MAGICCLOSE bit in the options field of the watchdog's info structure.
diff --git a/MAINTAINERS b/MAINTAINERS
index 3523ab000f1..311b0c40557 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -184,11 +184,6 @@ S: Maintained
F: Documentation/filesystems/9p.txt
F: fs/9p/
-A2232 SERIAL BOARD DRIVER
-L: linux-m68k@lists.linux-m68k.org
-S: Orphan
-F: drivers/staging/generic_serial/ser_a2232*
-
AACRAID SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
L: linux-scsi@vger.kernel.org
@@ -347,7 +342,7 @@ S: Supported
F: drivers/mfd/adp5520.c
F: drivers/video/backlight/adp5520_bl.c
F: drivers/leds/leds-adp5520.c
-F: drivers/gpio/adp5520-gpio.c
+F: drivers/gpio/gpio-adp5520.c
F: drivers/input/keyboard/adp5520-keys.c
ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
@@ -356,7 +351,7 @@ L: device-drivers-devel@blackfin.uclinux.org
W: http://wiki.analog.com/ADP5588
S: Supported
F: drivers/input/keyboard/adp5588-keys.c
-F: drivers/gpio/adp5588-gpio.c
+F: drivers/gpio/gpio-adp5588.c
ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
M: Michael Hennerich <michael.hennerich@analog.com>
@@ -511,8 +506,8 @@ M: Joerg Roedel <joerg.roedel@amd.com>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
S: Supported
-F: arch/x86/kernel/amd_iommu*.c
-F: arch/x86/include/asm/amd_iommu*.h
+F: drivers/iommu/amd_iommu*.[ch]
+F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -789,6 +784,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.pengutronix.de/git/imx/linux-2.6.git
F: arch/arm/mach-mx*/
+F: arch/arm/mach-imx/
F: arch/arm/plat-mxc/
ARM/FREESCALE IMX51
@@ -804,6 +800,13 @@ S: Maintained
T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
F: arch/arm/mach-imx/*imx6*
+ARM/FREESCALE MXS ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+F: arch/arm/mach-mxs/
+
ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -911,7 +914,6 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
M: Nicolas Pitre <nico@fluxnic.net>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Odd Fixes
-F: arch/arm/mach-loki/
F: arch/arm/mach-kirkwood/
F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
@@ -1046,35 +1048,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: arch/arm/mach-s3c24*/
+F: arch/arm/mach-s3c64xx/
F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2440/
-F: arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c64xx/
+F: drivers/spi/spi-s3c*
+F: sound/soc/samsung/*
ARM/S5P EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
@@ -1090,8 +1075,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5pv210/mach-aquila.c
F: arch/arm/mach-s5pv210/mach-goni.c
-F: arch/arm/mach-exynos4/mach-universal_c210.c
-F: arch/arm/mach-exynos4/mach-nuri.c
+F: arch/arm/mach-exynos/mach-universal_c210.c
+F: arch/arm/mach-exynos/mach-nuri.c
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
@@ -1119,7 +1104,6 @@ M: Tomasz Stanislawski <t.stanislaws@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
-F: arch/arm/plat-s5p/dev-tv.c
F: drivers/media/video/s5p-tv/
ARM/SHMOBILE ARM ARCHITECTURE
@@ -1133,13 +1117,6 @@ S: Supported
F: arch/arm/mach-shmobile/
F: drivers/sh/
-ARM/TELECHIPS ARM ARCHITECTURE
-M: "Hans J. Koch" <hjk@hansjkoch.de>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/plat-tcc/
-F: arch/arm/mach-tcc8k/
-
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1161,14 +1138,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.mcuos.com
S: Maintained
F: arch/arm/mach-w90x900/
-F: arch/arm/mach-nuc93x/
F: drivers/input/keyboard/w90p910_keypad.c
F: drivers/input/touchscreen/w90p910_ts.c
F: drivers/watchdog/nuc900_wdt.c
F: drivers/net/ethernet/nuvoton/w90p910_ether.c
F: drivers/mtd/nand/nuc900_nand.c
F: drivers/rtc/rtc-nuc900.c
-F: drivers/spi/spi_nuc900.c
+F: drivers/spi/spi-nuc900.c
F: drivers/usb/host/ehci-w90x900.c
F: drivers/video/nuc900fb.c
@@ -1193,7 +1169,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ux500/
F: drivers/dma/ste_dma40*
-F: drivers/mfd/ab3550*
F: drivers/mfd/abx500*
F: drivers/mfd/ab8500*
F: drivers/mfd/stmpe*
@@ -1373,7 +1348,7 @@ F: drivers/net/ethernet/cadence/
ATMEL SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
-F: drivers/spi/atmel_spi.*
+F: drivers/spi/spi-atmel.*
ATMEL USBA UDC DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1512,7 +1487,7 @@ M: Sonic Zhang <sonic.zhang@analog.com>
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
-F: drivers/tty/serial/bfin_5xx.c
+F: drivers/tty/serial/bfin_uart.c
BLACKFIN WATCHDOG DRIVER
M: Mike Frysinger <vapier.adi@gmail.com>
@@ -1603,7 +1578,7 @@ M: Franky (Zhenhui) Lin <frankyl@broadcom.com>
M: Kan Yan <kanyan@broadcom.com>
L: linux-wireless@vger.kernel.org
S: Supported
-F: drivers/staging/brcm80211/
+F: drivers/net/wireless/brcm80211/
BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
M: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
@@ -1642,7 +1617,7 @@ BT8XXGPIO DRIVER
M: Michael Buesch <m@bues.ch>
W: http://bu3sch.de/btgpio.php
S: Maintained
-F: drivers/gpio/bt8xxgpio.c
+F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <chris.mason@oracle.com>
@@ -1670,6 +1645,14 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/pci/oxygen/
+C6X ARCHITECTURE
+M: Mark Salter <msalter@redhat.com>
+M: Aurelien Jacquiot <a-jacquiot@ti.com>
+L: linux-c6x-dev@linux-c6x.org
+W: http://www.linux-c6x.org/wiki/index.php/Main_Page
+S: Maintained
+F: arch/c6x/
+
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
M: David Howells <dhowells@redhat.com>
L: linux-cachefs@redhat.com
@@ -1683,7 +1666,7 @@ L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: Documentation/video4linux/cafe_ccic
-F: drivers/media/video/cafe_ccic*
+F: drivers/media/video/marvell-ccic/
CAIF NETWORK LAYER
M: Sjur Braendeland <sjur.brandeland@stericsson.com>
@@ -1707,11 +1690,9 @@ F: arch/x86/include/asm/tce.h
CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
-M: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
-M: Urs Thuermann <urs.thuermann@volkswagen.de>
L: linux-can@vger.kernel.org
-L: netdev@vger.kernel.org
-W: http://developer.berlios.de/projects/socketcan/
+W: http://gitorious.org/linux-can
+T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: net/can/
F: include/linux/can.h
@@ -1722,9 +1703,10 @@ F: include/linux/can/gw.h
CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
+M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
-L: netdev@vger.kernel.org
-W: http://developer.berlios.de/projects/socketcan/
+W: http://gitorious.org/linux-can
+T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: drivers/net/can/
F: include/linux/can/dev.h
@@ -1908,12 +1890,6 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/compal-laptop.c
-COMPUTONE INTELLIPORT MULTIPORT CARD
-W: http://www.wittsend.com/computone.html
-S: Orphan
-F: Documentation/serial/computone.txt
-F: drivers/staging/tty/ip2/
-
CONEXANT ACCESSRUNNER USB DRIVER
M: Simon Arlott <cxacru@fire.lp0.eu>
L: accessrunner-general@lists.sourceforge.net
@@ -2128,7 +2104,7 @@ DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
L: netdev@vger.kernel.org
S: Orphan
F: Documentation/networking/dmfe.txt
-F: drivers/net/ethernet/tulip/dmfe.c
+F: drivers/net/ethernet/dec/tulip/dmfe.c
DC390/AM53C974 SCSI driver
M: Kurt Garloff <garloff@suse.de>
@@ -2201,6 +2177,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/dwc3/
+DEVICE FREQUENCY (DEVFREQ)
+M: MyungJoo Ham <myungjoo.ham@samsung.com>
+M: Kyungmin Park <kyungmin.park@samsung.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/devfreq/
+
DEVICE NUMBER REGISTRY
M: Torben Mathiasen <device@lanana.org>
W: http://lanana.org/docs/device-list/index.html
@@ -2217,15 +2200,6 @@ F: drivers/md/dm*
F: include/linux/device-mapper.h
F: include/linux/dm-*.h
-DIGI INTL. EPCA DRIVER
-M: "Digi International, Inc" <Eng.Linux@digi.com>
-L: Eng.Linux@digi.com
-W: http://www.digi.com
-S: Orphan
-F: Documentation/serial/digiepca.txt
-F: drivers/staging/tty/epca*
-F: drivers/staging/tty/digi*
-
DIOLAN U2C-12 I2C DRIVER
M: Guenter Roeck <guenter.roeck@ericsson.com>
L: linux-i2c@vger.kernel.org
@@ -2709,7 +2683,7 @@ FIREWIRE SUBSYSTEM
M: Stefan Richter <stefanr@s5r6.in-berlin.de>
L: linux1394-devel@lists.sourceforge.net
W: http://ieee1394.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
S: Maintained
F: drivers/firewire/
F: include/linux/firewire*.h
@@ -2929,6 +2903,7 @@ F: include/linux/gigaset_dev.h
GPIO SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
+M: Linus Walleij <linus.walleij@stericsson.com>
S: Maintained
T: git git://git.secretlab.ca/git/linux-2.6.git
F: Documentation/gpio.txt
@@ -2946,7 +2921,7 @@ GRETH 10/100/1G Ethernet MAC device driver
M: Kristoffer Glembo <kristoffer@gaisler.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/greth*
+F: drivers/net/ethernet/aeroflex/
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
@@ -3110,6 +3085,7 @@ F: include/linux/hid*
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Maintained
F: Documentation/timers/
F: kernel/hrtimer.c
@@ -3197,6 +3173,16 @@ M: William Irwin <wli@holomorphy.com>
S: Maintained
F: fs/hugetlbfs/
+Hyper-V CORE AND DRIVERS
+M: K. Y. Srinivasan <kys@microsoft.com>
+M: Haiyang Zhang <haiyangz@microsoft.com>
+L: devel@linuxdriverproject.org
+S: Maintained
+F: drivers/hv/
+F: drivers/hid/hid-hyperv.c
+F: drivers/net/hyperv/
+F: drivers/staging/hv/
+
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
@@ -3592,8 +3578,7 @@ F: net/netfilter/ipvs/
IPWIRELESS DRIVER
M: Jiri Kosina <jkosina@suse.cz>
M: David Sterba <dsterba@suse.cz>
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git
+S: Odd Fixes
F: drivers/tty/ipwireless/
IPX NETWORK LAYER
@@ -3619,7 +3604,7 @@ F: net/irda/
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
ISAPNP
@@ -3886,8 +3871,7 @@ L: keyrings@linux-nfs.org
S: Supported
F: Documentation/security/keys-trusted-encrypted.txt
F: include/keys/encrypted-type.h
-F: security/keys/encrypted.c
-F: security/keys/encrypted.h
+F: security/keys/encrypted-keys/
KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
@@ -4020,7 +4004,7 @@ M: Josh Boyer <jwboyer@gmail.com>
M: Matt Porter <mporter@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
+T: git git://git.infradead.org/users/jwboyer/powerpc-4xx.git
S: Maintained
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
@@ -4107,7 +4091,7 @@ F: drivers/hwmon/lm90.c
LOCKDEP AND LOCKSTAT
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
@@ -4289,7 +4273,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
+F: Documentation/DocBook/media/
F: drivers/media/
+F: drivers/staging/media/
F: include/media/
F: include/linux/dvb/
F: include/linux/videodev*.h
@@ -4311,8 +4297,9 @@ F: include/linux/mm.h
F: mm/
MEMORY RESOURCE CONTROLLER
+M: Johannes Weiner <hannes@cmpxchg.org>
+M: Michal Hocko <mhocko@suse.cz>
M: Balbir Singh <bsingharora@gmail.com>
-M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
@@ -4860,6 +4847,14 @@ S: Maintained
T: git git://openrisc.net/~jonas/linux
F: arch/openrisc
+OPENVSWITCH
+M: Jesse Gross <jesse@nicira.com>
+L: dev@openvswitch.org
+W: http://openvswitch.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+S: Maintained
+F: net/openvswitch/
+
OPL4 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5094,6 +5089,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
F: kernel/events/*
F: include/linux/perf_event.h
@@ -5125,10 +5121,19 @@ L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/devices/phram.c
+PICOXCELL SUPPORT
+M: Jamie Iles <jamie@jamieiles.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://github.com/jamieiles/linux-2.6-ji.git
+S: Supported
+F: arch/arm/mach-picoxcell
+F: drivers/*/picoxcell*
+F: drivers/*/*/picoxcell*
+
PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
-F: drivers/pinmux/
+F: drivers/pinctrl/
PKTCDVD DRIVER
M: Peter Osterlund <petero2@telia.com>
@@ -5173,6 +5178,7 @@ F: drivers/scsi/pm8001/
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: fs/timerfd.c
F: include/linux/timer*
@@ -5310,35 +5316,27 @@ F: drivers/media/video/pvrusb2/
PXA2xx/PXA3xx SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
M: Russell King <linux@arm.linux.org.uk>
+M: Haojian Zhuang <haojian.zhuang@marvell.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://github.com/hzhuang1/linux.git
+T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
S: Maintained
F: arch/arm/mach-pxa/
F: drivers/pcmcia/pxa2xx*
-F: drivers/spi/pxa2xx*
+F: drivers/spi/spi-pxa2xx*
F: drivers/usb/gadget/pxa2*
F: include/sound/pxa2xx-lib.h
F: sound/arm/pxa*
F: sound/soc/pxa
-PXA168 SUPPORT
-M: Eric Miao <eric.y.miao@gmail.com>
-M: Jason Chagas <jason.chagas@marvell.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git
-S: Maintained
-
-PXA910 SUPPORT
+MMP SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git
-S: Maintained
-
-MMP2 SUPPORT (aka ARMADA610)
M: Haojian Zhuang <haojian.zhuang@marvell.com>
-M: Eric Miao <eric.y.miao@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git
+T: git git://github.com/hzhuang1/linux.git
+T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
S: Maintained
+F: arch/arm/mach-mmp/
PXA MMCI DRIVER
S: Orphan
@@ -5377,6 +5375,7 @@ S: Supported
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
+M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
@@ -5547,11 +5546,6 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/memstick/host/r592.*
-RISCOM8 DRIVER
-S: Orphan
-F: Documentation/serial/riscom8.txt
-F: drivers/staging/tty/riscom8*
-
ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
@@ -5667,7 +5661,6 @@ F: drivers/media/video/*7146*
F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar <jassisinghbrar@gmail.com>
M: Sangbeom Kim <sbkim73@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
@@ -5689,6 +5682,7 @@ F: drivers/dma/dw_dmac.c
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: include/linux/clocksource.h
F: include/linux/time.h
@@ -5713,6 +5707,7 @@ F: drivers/watchdog/sc1200wdt.c
SCHEDULER
M: Ingo Molnar <mingo@elte.hu>
M: Peter Zijlstra <peterz@infradead.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
F: kernel/sched*
F: include/linux/sched.h
@@ -5810,13 +5805,14 @@ L: linux-mmc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
S: Maintained
F: drivers/mmc/host/sdhci.*
+F: drivers/mmc/host/sdhci-pltfm.[ch]
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
M: Anton Vorontsov <avorontsov@ru.mvista.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-mmc@vger.kernel.org
S: Maintained
-F: drivers/mmc/host/sdhci-of.*
+F: drivers/mmc/host/sdhci-pltfm.[ch]
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
M: Ben Dooks <ben-linux@fluff.org>
@@ -5895,7 +5891,6 @@ F: drivers/net/ethernet/emulex/benet/
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M: Steve Hodgson <shodgson@solarflare.com>
M: Ben Hutchings <bhutchings@solarflare.com>
L: netdev@vger.kernel.org
S: Supported
@@ -6196,9 +6191,7 @@ M: Viresh Kumar <viresh.kumar@st.com>
W: http://www.st.com/spear
S: Maintained
F: arch/arm/mach-spear*/clock.c
-F: arch/arm/mach-spear*/include/mach/clkdev.h
F: arch/arm/plat-spear/clock.c
-F: arch/arm/plat-spear/include/plat/clkdev.h
F: arch/arm/plat-spear/include/plat/clock.h
SPEAR PAD MULTIPLEXING SUPPORT
@@ -6214,11 +6207,6 @@ F: arch/arm/mach-spear3xx/spear3*0_evb.c
F: arch/arm/mach-spear6xx/spear600.c
F: arch/arm/mach-spear6xx/spear600_evb.c
-SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
-S: Orphan
-F: Documentation/serial/specialix.txt
-F: drivers/staging/tty/specialix*
-
SPI SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
L: spi-devel-general@lists.sourceforge.net
@@ -6261,7 +6249,7 @@ F: arch/alpha/kernel/srm_env.c
STABLE BRANCH
M: Greg Kroah-Hartman <greg@kroah.com>
-L: stable@kernel.org
+L: stable@vger.kernel.org
S: Maintained
STAGING SUBSYSTEM
@@ -6296,11 +6284,6 @@ M: Manu Abraham <abraham.manu@gmail.com>
S: Odd Fixes
F: drivers/staging/crystalhd/
-STAGING - CYPRESS WESTBRIDGE SUPPORT
-M: David Cross <david.cross@cypress.com>
-S: Odd Fixes
-F: drivers/staging/westbridge/
-
STAGING - ECHO CANCELLER
M: Steve Underwood <steveu@coppice.org>
M: David Rowe <david@rowetel.com>
@@ -6322,12 +6305,6 @@ M: David Täht <d@teklibre.com>
S: Odd Fixes
F: drivers/staging/frontier/
-STAGING - HYPER-V (MICROSOFT)
-M: Hank Janssen <hjanssen@microsoft.com>
-M: Haiyang Zhang <haiyangz@microsoft.com>
-S: Odd Fixes
-F: drivers/staging/hv/
-
STAGING - INDUSTRIAL IO
M: Jonathan Cameron <jic23@cam.ac.uk>
L: linux-iio@vger.kernel.org
@@ -6338,7 +6315,7 @@ STAGING - LIRC (LINUX INFRARED REMOTE CONTROL) DRIVERS
M: Jarod Wilson <jarod@wilsonet.com>
W: http://www.lirc.org/
S: Odd Fixes
-F: drivers/staging/lirc/
+F: drivers/staging/media/lirc/
STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
M: Julian Andres Klode <jak@jak-linux.org>
@@ -6374,7 +6351,7 @@ F: drivers/staging/sm7xx/
STAGING - SOFTLOGIC 6x10 MPEG CODEC
M: Ben Collins <bcollins@bluecherry.net>
S: Odd Fixes
-F: drivers/staging/solo6x10/
+F: drivers/staging/media/solo6x10/
STAGING - SPEAKUP CONSOLE SPEECH DRIVER
M: William Hubbs <w.d.hubbs@gmail.com>
@@ -6412,7 +6389,7 @@ S: Odd Fixes
F: drivers/staging/winbond/
STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER
-M: Arnaud Patard <apatard@mandriva.com>
+M: Arnaud Patard <arnaud.patard@rtp-net.org>
S: Odd Fixes
F: drivers/staging/xgifb/
@@ -6503,6 +6480,13 @@ W: http://tcp-lp-mod.sourceforge.net/
S: Maintained
F: net/ipv4/tcp_lp.c
+TEAM DRIVER
+M: Jiri Pirko <jpirko@redhat.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/team/
+F: include/linux/if_team.h
+
TEGRA SUPPORT
M: Colin Cross <ccross@android.com>
M: Olof Johansson <olof@lixom.net>
@@ -6640,7 +6624,7 @@ TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Frederic Weisbecker <fweisbec@gmail.com>
M: Ingo Molnar <mingo@redhat.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Maintained
F: Documentation/trace/ftrace.txt
F: arch/*/*/*/ftrace.h
@@ -6670,7 +6654,7 @@ TULIP NETWORK DRIVERS
M: Grant Grundler <grundler@parisc-linux.org>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ethernet/tulip/
+F: drivers/net/ethernet/dec/tulip/
TUN/TAP driver
M: Maxim Krasnyansky <maxk@qualcomm.com>
@@ -7390,7 +7374,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
M: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
F: Documentation/x86/
F: arch/x86/
diff --git a/Makefile b/Makefile
index 3a8f0640cda..adddd11c3b3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 4b0669cbb3b..2505740b81d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,10 @@ config OPROFILE_EVENT_MULTIPLEX
config HAVE_OPROFILE
bool
+config OPROFILE_NMI_TIMER
+ def_bool y
+ depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+
config KPROBES
bool "Kprobes"
depends on MODULES
diff --git a/arch/alpha/include/asm/ipcbuf.h b/arch/alpha/include/asm/ipcbuf.h
index d9c0e1a5070..84c7e51cb6d 100644
--- a/arch/alpha/include/asm/ipcbuf.h
+++ b/arch/alpha/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ALPHA_IPCBUF_H
-#define _ALPHA_IPCBUF_H
-
-/*
- * The ipc64_perm structure for alpha architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned short seq;
- unsigned short __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ALPHA_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index 06edfefc337..082355f159e 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -69,6 +69,9 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index ff73db02234..28335bd40e4 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
-#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
/* Work to do on interrupt/exception return. */
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 881544339c2..0a0579076f4 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -15,9 +15,4 @@
#include <asm-generic/int-l64.h>
#endif
-#ifndef __ASSEMBLY__
-
-typedef unsigned int umode_t;
-
-#endif /* __ASSEMBLY__ */
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2ebf66b8008..24626b0419e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -16,6 +16,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
+ select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
@@ -221,8 +222,9 @@ config NEED_MACH_MEMORY_H
be avoided when possible.
config PHYS_OFFSET
- hex "Physical address of main memory"
+ hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+ default DRAM_BASE if !MMU
help
Please provide the physical address corresponding to the
location of main memory in your system.
@@ -258,6 +260,7 @@ config ARCH_INTEGRATOR
select ARCH_HAS_CPUFREQ
select CLKDEV_LOOKUP
select HAVE_MACH_CLKDEV
+ select HAVE_TCM
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
@@ -341,10 +344,12 @@ config ARCH_HIGHBANK
select ARM_AMBA
select ARM_GIC
select ARM_TIMER_SP804
+ select CACHE_L2X0
select CLKDEV_LOOKUP
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
+ select HAVE_SMP
select USE_OF
help
Support for the Calxeda Highbank SoC based boards.
@@ -362,6 +367,7 @@ config ARCH_CNS3XXX
select CPU_V6K
select GENERIC_CLOCKEVENTS
select ARM_GIC
+ select MIGHT_HAVE_CACHE_L2X0
select MIGHT_HAVE_PCI
select PCI_DOMAINS if PCI
help
@@ -382,6 +388,7 @@ config ARCH_PRIMA2
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select GENERIC_IRQ_CHIP
+ select MIGHT_HAVE_CACHE_L2X0
select USE_OF
select ZONE_DMA
help
@@ -442,6 +449,7 @@ config ARCH_MXS
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select HAVE_CLK_PREPARE
help
Support for Freescale MXS-based family of processors
@@ -592,6 +600,7 @@ config ARCH_MMP
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
+ select GPIO_PXA
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select PLAT_PXA
@@ -634,6 +643,8 @@ config ARCH_TEGRA
select GENERIC_GPIO
select HAVE_CLK
select HAVE_SCHED_CLOCK
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
select ARCH_HAS_CPUFREQ
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
@@ -651,6 +662,7 @@ config ARCH_PICOXCELL
select HAVE_SCHED_CLOCK
select HAVE_TCM
select NO_IOPORT
+ select SPARSE_IRQ
select USE_OF
help
This enables support for systems based on the Picochip picoXcell
@@ -674,6 +686,7 @@ config ARCH_PXA
select CLKSRC_MMIO
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ select GPIO_PXA
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select PLAT_PXA
@@ -703,7 +716,9 @@ config ARCH_SHMOBILE
select HAVE_CLK
select CLKDEV_LOOKUP
select HAVE_MACH_CLKDEV
+ select HAVE_SMP
select GENERIC_CLOCKEVENTS
+ select MIGHT_HAVE_CACHE_L2X0
select NO_IOPORT
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
@@ -739,7 +754,7 @@ config ARCH_SA1100
select ARCH_HAS_CPUFREQ
select CPU_FREQ
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
+ select CLKDEV_LOOKUP
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
@@ -868,16 +883,6 @@ config ARCH_SHARK
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
-config ARCH_TCC_926
- bool "Telechips TCC ARM926-based systems"
- select CLKSRC_MMIO
- select CPU_ARM926T
- select HAVE_CLK
- select CLKDEV_LOOKUP
- select GENERIC_CLOCKEVENTS
- help
- Support for Telechips TCC ARM926-based systems.
-
config ARCH_U300
bool "ST-Ericsson U300 Series"
depends on MMU
@@ -893,7 +898,6 @@ config ARCH_U300
select HAVE_MACH_CLKDEV
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select NEED_MACH_MEMORY_H
help
Support for ST-Ericsson U300 series mobile platforms.
@@ -905,6 +909,8 @@ config ARCH_U8500
select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
help
Support for ST-Ericsson's Ux500 architecture
@@ -915,6 +921,7 @@ config ARCH_NOMADIK
select CPU_ARM926T
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
+ select MIGHT_HAVE_CACHE_L2X0
select ARCH_REQUIRE_GPIOLIB
help
Support for the Nomadik platform by ST-Ericsson
@@ -974,6 +981,7 @@ config ARCH_ZYNQ
select ARM_GIC
select ARM_AMBA
select ICST
+ select MIGHT_HAVE_CACHE_L2X0
select USE_OF
help
Support for Xilinx Zynq ARM Cortex A9 Platform
@@ -1060,8 +1068,6 @@ source "arch/arm/plat-s5p/Kconfig"
source "arch/arm/plat-spear/Kconfig"
-source "arch/arm/plat-tcc/Kconfig"
-
if ARCH_S3C2410
source "arch/arm/mach-s3c2410/Kconfig"
source "arch/arm/mach-s3c2412/Kconfig"
@@ -1126,6 +1132,11 @@ config ARM_TIMER_SP804
source arch/arm/mm/Kconfig
+config ARM_NR_BANKS
+ int
+ default 16 if ARCH_EP93XX
+ default 8
+
config IWMMXT
bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
@@ -1134,10 +1145,9 @@ config IWMMXT
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
-# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
config XSCALE_PMU
bool
- depends on CPU_XSCALE && !XSCALE_PMU_TIMER
+ depends on CPU_XSCALE
default y
config CPU_HAS_PMU
@@ -1232,7 +1242,7 @@ config ARM_ERRATA_742231
capabilities of the processor.
config PL310_ERRATA_588369
- bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+ bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0
help
The PL310 L2 cache controller implements three types of Clean &
@@ -1246,7 +1256,7 @@ config PL310_ERRATA_588369
config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
- depends on CPU_V7 && SMP
+ depends on CPU_V7
help
This option enables the workaround for the 720789 Cortex-A9 (prior to
r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1257,7 +1267,7 @@ config ARM_ERRATA_720789
entries regardless of the ASID.
config PL310_ERRATA_727915
- bool "Background Clean & Invalidate by Way operation can cause data corruption"
+ bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
@@ -1282,7 +1292,7 @@ config ARM_ERRATA_743622
config ARM_ERRATA_751472
bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
- depends on CPU_V7 && SMP
+ depends on CPU_V7
help
This option enables the workaround for the 751472 Cortex-A9 (prior
to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
@@ -1290,8 +1300,8 @@ config ARM_ERRATA_751472
operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB.
-config ARM_ERRATA_753970
- bool "ARM errata: cache sync operation may be faulty"
+config PL310_ERRATA_753970
+ bool "PL310 errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
@@ -1353,6 +1363,18 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1423,14 +1445,20 @@ menu "Kernel Features"
source "kernel/time/Kconfig"
+config HAVE_SMP
+ bool
+ help
+ This option should be selected by machines which have an SMP-
+ capable CPU.
+
+ The only effect of this option is to make the SMP-related
+ options available to the user for configuration.
+
config SMP
bool "Symmetric Multi-Processing"
depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS
- depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
- ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
- ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE || ARCH_HIGHBANK || SOC_IMX6Q
+ depends on HAVE_SMP
depends on MMU
select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
@@ -1548,6 +1576,16 @@ config LOCAL_TIMERS
accounting to be spread across the timer interval, preventing a
"thundering herd" at every timer tick.
+config ARCH_NR_GPIO
+ int
+ default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
+ default 350 if ARCH_U8500
+ default 0
+ help
+ Maximum number of GPIOs in the system.
+
+ If unsure, leave the default value.
+
source kernel/Kconfig.preempt
config HZ
@@ -1960,7 +1998,7 @@ endchoice
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
- depends on !ZBOOT_ROM
+ depends on !ZBOOT_ROM && !ARM_LPAE
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
@@ -1990,7 +2028,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index c5213e78606..e0d236d7ff7 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -100,6 +100,14 @@ choice
Note that the system will appear to hang during boot if there
is nothing connected to read from the DCC.
+ config AT91_DEBUG_LL_DBGU0
+ bool "Kernel low-level debugging on rm9200, 9260/9g20, 9261/9g10 and 9rl"
+ depends on HAVE_AT91_DBGU0
+
+ config AT91_DEBUG_LL_DBGU1
+ bool "Kernel low-level debugging on 9263, 9g45 and cap9"
+ depends on HAVE_AT91_DBGU1
+
config DEBUG_FOOTBRIDGE_COM1
bool "Kernel low-level debugging messages via footbridge 8250 at PCI COM1"
depends on FOOTBRIDGE
@@ -247,6 +255,43 @@ choice
their output to the standard serial port on the RealView
PB1176 platform.
+ config DEBUG_MSM_UART1
+ bool "Kernel low-level debugging messages via MSM UART1"
+ depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the first serial port on MSM devices.
+
+ config DEBUG_MSM_UART2
+ bool "Kernel low-level debugging messages via MSM UART2"
+ depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the second serial port on MSM devices.
+
+ config DEBUG_MSM_UART3
+ bool "Kernel low-level debugging messages via MSM UART3"
+ depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the third serial port on MSM devices.
+
+ config DEBUG_MSM8660_UART
+ bool "Kernel low-level debugging messages via MSM 8660 UART"
+ depends on ARCH_MSM8X60
+ select MSM_HAS_DEBUG_UART_HS
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8660 devices.
+
+ config DEBUG_MSM8960_UART
+ bool "Kernel low-level debugging messages via MSM 8960 UART"
+ depends on ARCH_MSM8960
+ select MSM_HAS_DEBUG_UART_HS
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port on MSM 8960 devices.
+
endchoice
config EARLY_PRINTK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index dfcf3b033e1..40319d91bb7 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -184,7 +184,6 @@ machine-$(CONFIG_ARCH_EXYNOS4) := exynos
machine-$(CONFIG_ARCH_SA1100) := sa1100
machine-$(CONFIG_ARCH_SHARK) := shark
machine-$(CONFIG_ARCH_SHMOBILE) := shmobile
-machine-$(CONFIG_ARCH_TCC8K) := tcc8k
machine-$(CONFIG_ARCH_TEGRA) := tegra
machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
@@ -204,7 +203,6 @@ machine-$(CONFIG_ARCH_ZYNQ) := zynq
plat-$(CONFIG_ARCH_MXC) := mxc
plat-$(CONFIG_ARCH_OMAP) := omap
plat-$(CONFIG_ARCH_S3C64XX) := samsung
-plat-$(CONFIG_ARCH_TCC_926) := tcc
plat-$(CONFIG_ARCH_ZYNQ) := versatile
plat-$(CONFIG_PLAT_IOP) := iop
plat-$(CONFIG_PLAT_NOMADIK) := nomadik
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 21f56ff3279..cf0a64ce4b8 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -126,7 +126,8 @@ ccflags-y := -fpic -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all
# Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
+KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
+ awk 'END{print $$3}')
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index c2effc91725..c5d60250d43 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,6 +659,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi
index aeef04269cf..07603b8c950 100644
--- a/arch/arm/boot/dts/at91sam9g20.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20.dtsi
@@ -114,6 +114,13 @@
atmel,use-dma-tx;
status = "disabled";
};
+
+ macb0: ethernet@fffc4000 {
+ compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ reg = <0xfffc4000 0x100>;
+ interrupts = <21>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index db6a45202f2..fffa005300a 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -101,6 +101,13 @@
atmel,use-dma-tx;
status = "disabled";
};
+
+ macb0: ethernet@fffbc000 {
+ compatible = "cdns,at32ap7000-macb", "cdns,macb";
+ reg = <0xfffbc000 0x100>;
+ interrupts = <25>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 85b34f59cd8..a387e7704ce 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -30,6 +30,11 @@
usart1: serial@fff90000 {
status = "okay";
};
+
+ macb0: ethernet@fffbc000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
new file mode 100644
index 00000000000..b8c476384ee
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -0,0 +1,137 @@
+/*
+ * Samsung's Exynos4210 based Origen board device tree source
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2010-2011 Linaro Ltd.
+ * www.linaro.org
+ *
+ * Device tree source file for Insignal's Origen board which is based on
+ * Samsung's Exynos4210 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+/include/ "exynos4210.dtsi"
+
+/ {
+ model = "Insignal Origen evaluation board based on Exynos4210";
+ compatible = "insignal,origen", "samsung,exynos4210";
+
+ memory {
+ reg = <0x40000000 0x40000000>;
+ };
+
+ chosen {
+ bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
+ };
+
+ sdhci@12530000 {
+ samsung,sdhci-bus-width = <4>;
+ linux,mmc_cap_4_bit_data;
+ samsung,sdhci-cd-internal;
+ gpio-cd = <&gpk2 2 2 3 3>;
+ gpios = <&gpk2 0 2 0 3>,
+ <&gpk2 1 2 0 3>,
+ <&gpk2 3 2 3 3>,
+ <&gpk2 4 2 3 3>,
+ <&gpk2 5 2 3 3>,
+ <&gpk2 6 2 3 3>;
+ };
+
+ sdhci@12510000 {
+ samsung,sdhci-bus-width = <4>;
+ linux,mmc_cap_4_bit_data;
+ samsung,sdhci-cd-internal;
+ gpio-cd = <&gpk0 2 2 3 3>;
+ gpios = <&gpk0 0 2 0 3>,
+ <&gpk0 1 2 0 3>,
+ <&gpk0 3 2 3 3>,
+ <&gpk0 4 2 3 3>,
+ <&gpk0 5 2 3 3>,
+ <&gpk0 6 2 3 3>;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ up {
+ label = "Up";
+ gpios = <&gpx2 0 0 0 2>;
+ linux,code = <103>;
+ };
+
+ down {
+ label = "Down";
+ gpios = <&gpx2 1 0 0 2>;
+ linux,code = <108>;
+ };
+
+ back {
+ label = "Back";
+ gpios = <&gpx1 7 0 0 2>;
+ linux,code = <158>;
+ };
+
+ home {
+ label = "Home";
+ gpios = <&gpx1 6 0 0 2>;
+ linux,code = <102>;
+ };
+
+ menu {
+ label = "Menu";
+ gpios = <&gpx1 5 0 0 2>;
+ linux,code = <139>;
+ };
+ };
+
+ keypad@100A0000 {
+ status = "disabled";
+ };
+
+ sdhci@12520000 {
+ status = "disabled";
+ };
+
+ sdhci@12540000 {
+ status = "disabled";
+ };
+
+ i2c@13860000 {
+ status = "disabled";
+ };
+
+ i2c@13870000 {
+ status = "disabled";
+ };
+
+ i2c@13880000 {
+ status = "disabled";
+ };
+
+ i2c@13890000 {
+ status = "disabled";
+ };
+
+ i2c@138A0000 {
+ status = "disabled";
+ };
+
+ i2c@138B0000 {
+ status = "disabled";
+ };
+
+ i2c@138C0000 {
+ status = "disabled";
+ };
+
+ i2c@138D0000 {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
new file mode 100644
index 00000000000..27afc8e535c
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -0,0 +1,182 @@
+/*
+ * Samsung's Exynos4210 based SMDKV310 board device tree source
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2010-2011 Linaro Ltd.
+ * www.linaro.org
+ *
+ * Device tree source file for Samsung's SMDKV310 board which is based on
+ * Samsung's Exynos4210 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+/include/ "exynos4210.dtsi"
+
+/ {
+ model = "Samsung smdkv310 evaluation board based on Exynos4210";
+ compatible = "samsung,smdkv310", "samsung,exynos4210";
+
+ memory {
+ reg = <0x40000000 0x80000000>;
+ };
+
+ chosen {
+ bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
+ };
+
+ sdhci@12530000 {
+ samsung,sdhci-bus-width = <4>;
+ linux,mmc_cap_4_bit_data;
+ samsung,sdhci-cd-internal;
+ gpio-cd = <&gpk2 2 2 3 3>;
+ gpios = <&gpk2 0 2 0 3>,
+ <&gpk2 1 2 0 3>,
+ <&gpk2 3 2 3 3>,
+ <&gpk2 4 2 3 3>,
+ <&gpk2 5 2 3 3>,
+ <&gpk2 6 2 3 3>;
+ };
+
+ keypad@100A0000 {
+ samsung,keypad-num-rows = <2>;
+ samsung,keypad-num-columns = <8>;
+ linux,keypad-no-autorepeat;
+ linux,keypad-wakeup;
+
+ row-gpios = <&gpx2 0 3 3 0>,
+ <&gpx2 1 3 3 0>;
+
+ col-gpios = <&gpx1 0 3 0 0>,
+ <&gpx1 1 3 0 0>,
+ <&gpx1 2 3 0 0>,
+ <&gpx1 3 3 0 0>,
+ <&gpx1 4 3 0 0>,
+ <&gpx1 5 3 0 0>,
+ <&gpx1 6 3 0 0>,
+ <&gpx1 7 3 0 0>;
+
+ key_1 {
+ keypad,row = <0>;
+ keypad,column = <3>;
+ linux,code = <2>;
+ };
+
+ key_2 {
+ keypad,row = <0>;
+ keypad,column = <4>;
+ linux,code = <3>;
+ };
+
+ key_3 {
+ keypad,row = <0>;
+ keypad,column = <5>;
+ linux,code = <4>;
+ };
+
+ key_4 {
+ keypad,row = <0>;
+ keypad,column = <6>;
+ linux,code = <5>;
+ };
+
+ key_5 {
+ keypad,row = <0>;
+ keypad,column = <7>;
+ linux,code = <6>;
+ };
+
+ key_a {
+ keypad,row = <1>;
+ keypad,column = <3>;
+ linux,code = <30>;
+ };
+
+ key_b {
+ keypad,row = <1>;
+ keypad,column = <4>;
+ linux,code = <48>;
+ };
+
+ key_c {
+ keypad,row = <1>;
+ keypad,column = <5>;
+ linux,code = <46>;
+ };
+
+ key_d {
+ keypad,row = <1>;
+ keypad,column = <6>;
+ linux,code = <32>;
+ };
+
+ key_e {
+ keypad,row = <1>;
+ keypad,column = <7>;
+ linux,code = <18>;
+ };
+ };
+
+ i2c@13860000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <20000>;
+ gpios = <&gpd1 0 2 3 0>,
+ <&gpd1 1 2 3 0>;
+
+ eeprom@50 {
+ compatible = "samsung,24ad0xd1";
+ reg = <0x50>;
+ };
+
+ eeprom@52 {
+ compatible = "samsung,24ad0xd1";
+ reg = <0x52>;
+ };
+ };
+
+ sdhci@12510000 {
+ status = "disabled";
+ };
+
+ sdhci@12520000 {
+ status = "disabled";
+ };
+
+ sdhci@12540000 {
+ status = "disabled";
+ };
+
+ i2c@13870000 {
+ status = "disabled";
+ };
+
+ i2c@13880000 {
+ status = "disabled";
+ };
+
+ i2c@13890000 {
+ status = "disabled";
+ };
+
+ i2c@138A0000 {
+ status = "disabled";
+ };
+
+ i2c@138B0000 {
+ status = "disabled";
+ };
+
+ i2c@138C0000 {
+ status = "disabled";
+ };
+
+ i2c@138D0000 {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
new file mode 100644
index 00000000000..63d7578856c
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -0,0 +1,397 @@
+/*
+ * Samsung's Exynos4210 SoC device tree source
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2010-2011 Linaro Ltd.
+ * www.linaro.org
+ *
+ * Samsung's Exynos4210 SoC device nodes are listed in this file. Exynos4210
+ * based board files can include this file and provide values for board specfic
+ * bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * Exynos4210 SoC. As device tree coverage for Exynos4210 increases, additional
+ * nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "samsung,exynos4210";
+ interrupt-parent = <&gic>;
+
+ gic:interrupt-controller@10490000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+ };
+
+ watchdog@10060000 {
+ compatible = "samsung,s3c2410-wdt";
+ reg = <0x10060000 0x100>;
+ interrupts = <0 43 0>;
+ };
+
+ rtc@10070000 {
+ compatible = "samsung,s3c6410-rtc";
+ reg = <0x10070000 0x100>;
+ interrupts = <0 44 0>, <0 45 0>;
+ };
+
+ keypad@100A0000 {
+ compatible = "samsung,s5pv210-keypad";
+ reg = <0x100A0000 0x100>;
+ interrupts = <0 109 0>;
+ };
+
+ sdhci@12510000 {
+ compatible = "samsung,exynos4210-sdhci";
+ reg = <0x12510000 0x100>;
+ interrupts = <0 73 0>;
+ };
+
+ sdhci@12520000 {
+ compatible = "samsung,exynos4210-sdhci";
+ reg = <0x12520000 0x100>;
+ interrupts = <0 74 0>;
+ };
+
+ sdhci@12530000 {
+ compatible = "samsung,exynos4210-sdhci";
+ reg = <0x12530000 0x100>;
+ interrupts = <0 75 0>;
+ };
+
+ sdhci@12540000 {
+ compatible = "samsung,exynos4210-sdhci";
+ reg = <0x12540000 0x100>;
+ interrupts = <0 76 0>;
+ };
+
+ serial@13800000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x13800000 0x100>;
+ interrupts = <0 52 0>;
+ };
+
+ serial@13810000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x13810000 0x100>;
+ interrupts = <0 53 0>;
+ };
+
+ serial@13820000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x13820000 0x100>;
+ interrupts = <0 54 0>;
+ };
+
+ serial@13830000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x13830000 0x100>;
+ interrupts = <0 55 0>;
+ };
+
+ i2c@13860000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x13860000 0x100>;
+ interrupts = <0 58 0>;
+ };
+
+ i2c@13870000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x13870000 0x100>;
+ interrupts = <0 59 0>;
+ };
+
+ i2c@13880000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x13880000 0x100>;
+ interrupts = <0 60 0>;
+ };
+
+ i2c@13890000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x13890000 0x100>;
+ interrupts = <0 61 0>;
+ };
+
+ i2c@138A0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x138A0000 0x100>;
+ interrupts = <0 62 0>;
+ };
+
+ i2c@138B0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x138B0000 0x100>;
+ interrupts = <0 63 0>;
+ };
+
+ i2c@138C0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x138C0000 0x100>;
+ interrupts = <0 64 0>;
+ };
+
+ i2c@138D0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x138D0000 0x100>;
+ interrupts = <0 65 0>;
+ };
+
+ amba {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "arm,amba-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ pdma0: pdma@12680000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x12680000 0x1000>;
+ interrupts = <0 35 0>;
+ };
+
+ pdma1: pdma@12690000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x12690000 0x1000>;
+ interrupts = <0 36 0>;
+ };
+ };
+
+ gpio-controllers {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ gpio-controller;
+ ranges;
+
+ gpa0: gpio-controller@11400000 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400000 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpa1: gpio-controller@11400020 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400020 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpb: gpio-controller@11400040 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400040 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpc0: gpio-controller@11400060 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400060 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpc1: gpio-controller@11400080 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400080 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpd0: gpio-controller@114000A0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114000A0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpd1: gpio-controller@114000C0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114000C0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpe0: gpio-controller@114000E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114000E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpe1: gpio-controller@11400100 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400100 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpe2: gpio-controller@11400120 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400120 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpe3: gpio-controller@11400140 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400140 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpe4: gpio-controller@11400160 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400160 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpf0: gpio-controller@11400180 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11400180 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpf1: gpio-controller@114001A0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114001A0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpf2: gpio-controller@114001C0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114001C0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpf3: gpio-controller@114001E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114001E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpj0: gpio-controller@11000000 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000000 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpj1: gpio-controller@11000020 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000020 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpk0: gpio-controller@11000040 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000040 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpk1: gpio-controller@11000060 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000060 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpk2: gpio-controller@11000080 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000080 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpk3: gpio-controller@110000A0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110000A0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpl0: gpio-controller@110000C0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110000C0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpl1: gpio-controller@110000E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110000E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpl2: gpio-controller@11000100 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000100 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy0: gpio-controller@11000120 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000120 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy1: gpio-controller@11000140 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000140 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy2: gpio-controller@11000160 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000160 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy3: gpio-controller@11000180 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000180 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy4: gpio-controller@110001A0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110001A0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy5: gpio-controller@110001C0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110001C0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpy6: gpio-controller@110001E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x110001E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpx0: gpio-controller@11000C00 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000C00 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpx1: gpio-controller@11000C20 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000C20 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpx2: gpio-controller@11000C40 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000C40 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpx3: gpio-controller@11000C60 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x11000C60 0x20>;
+ #gpio-cells = <4>;
+ };
+
+ gpz: gpio-controller@03860000 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x03860000 0x20>;
+ #gpio-cells = <4>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index aeb1a7578fa..305635bd45c 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -194,5 +194,17 @@
reg = <0xfff3d000 0x1000>;
interrupts = <0 92 4>;
};
+
+ ethernet@fff50000 {
+ compatible = "calxeda,hb-xgmac";
+ reg = <0xfff50000 0x1000>;
+ interrupts = <0 77 4 0 78 4 0 79 4>;
+ };
+
+ ethernet@fff51000 {
+ compatible = "calxeda,hb-xgmac";
+ reg = <0xfff51000 0x1000>;
+ interrupts = <0 80 4 0 81 4 0 82 4>;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index f8766af1121..564cb8c19f1 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -35,20 +35,19 @@
};
esdhc@70008000 { /* ESDHC2 */
- cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
- wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+ cd-gpios = <&gpio1 6 0>;
+ wp-gpios = <&gpio1 5 0>;
status = "okay";
};
- uart2: uart@7000c000 { /* UART3 */
+ uart3: uart@7000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
ecspi@70010000 { /* ECSPI1 */
fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
- <&gpio3 25 0>; /* GPIO4_25 */
+ cs-gpios = <&gpio4 24 0>, <&gpio4 25 0>;
status = "okay";
pmic: mc13892@0 {
@@ -57,7 +56,7 @@
compatible = "fsl,mc13892";
spi-max-frequency = <6000000>;
reg = <0>;
- mc13xxx-irq-gpios = <&gpio0 8 0>; /* GPIO1_8 */
+ mc13xxx-irq-gpios = <&gpio1 8 0>;
fsl,mc13xxx-uses-regulator;
};
@@ -91,12 +90,12 @@
reg = <0x73fa8000 0x4000>;
};
- uart0: uart@73fbc000 {
+ uart1: uart@73fbc000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart1: uart@73fc0000 {
+ uart2: uart@73fc0000 {
status = "okay";
};
};
@@ -127,7 +126,7 @@
power {
label = "Power Button";
- gpios = <&gpio1 21 0>;
+ gpios = <&gpio2 21 0>;
linux,code = <116>; /* KEY_POWER */
gpio-key,wakeup;
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 327ab8e3a4c..6663986fe1c 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -14,9 +14,9 @@
/ {
aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
};
tzic: tz-interrupt-controller@e0000000 {
@@ -86,7 +86,7 @@
status = "disabled";
};
- uart2: uart@7000c000 { /* UART3 */
+ uart3: uart@7000c000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x7000c000 0x4000>;
interrupts = <33>;
@@ -117,7 +117,7 @@
};
};
- gpio0: gpio@73f84000 { /* GPIO1 */
+ gpio1: gpio@73f84000 {
compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
reg = <0x73f84000 0x4000>;
interrupts = <50 51>;
@@ -127,7 +127,7 @@
#interrupt-cells = <1>;
};
- gpio1: gpio@73f88000 { /* GPIO2 */
+ gpio2: gpio@73f88000 {
compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
reg = <0x73f88000 0x4000>;
interrupts = <52 53>;
@@ -137,7 +137,7 @@
#interrupt-cells = <1>;
};
- gpio2: gpio@73f8c000 { /* GPIO3 */
+ gpio3: gpio@73f8c000 {
compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
reg = <0x73f8c000 0x4000>;
interrupts = <54 55>;
@@ -147,7 +147,7 @@
#interrupt-cells = <1>;
};
- gpio3: gpio@73f90000 { /* GPIO4 */
+ gpio4: gpio@73f90000 {
compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
reg = <0x73f90000 0x4000>;
interrupts = <56 57>;
@@ -171,14 +171,14 @@
status = "disabled";
};
- uart0: uart@73fbc000 {
+ uart1: uart@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart1: uart@73fc0000 {
+ uart2: uart@73fc0000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fc0000 0x4000>;
interrupts = <32>;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 2ab7f80a0a3..2dccce46ed8 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -29,8 +29,8 @@
aips@50000000 { /* AIPS1 */
spba@50000000 {
esdhc@50004000 { /* ESDHC1 */
- cd-gpios = <&gpio0 1 0>; /* GPIO1_1 */
- wp-gpios = <&gpio0 9 0>; /* GPIO1_9 */
+ cd-gpios = <&gpio1 1 0>;
+ wp-gpios = <&gpio1 9 0>;
status = "okay";
};
};
@@ -44,7 +44,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart0: uart@53fbc000 { /* UART1 */
+ uart1: uart@53fbc000 {
status = "okay";
};
};
@@ -67,7 +67,7 @@
compatible = "smsc,lan9220", "smsc,lan9115";
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
- interrupt-parent = <&gpio1>;
+ interrupt-parent = <&gpio2>;
interrupts = <31>;
reg-io-width = <4>;
smsc,irq-push-pull;
@@ -79,34 +79,34 @@
home {
label = "Home";
- gpios = <&gpio4 10 0>; /* GPIO5_10 */
+ gpios = <&gpio5 10 0>;
linux,code = <102>; /* KEY_HOME */
gpio-key,wakeup;
};
back {
label = "Back";
- gpios = <&gpio4 11 0>; /* GPIO5_11 */
+ gpios = <&gpio5 11 0>;
linux,code = <158>; /* KEY_BACK */
gpio-key,wakeup;
};
program {
label = "Program";
- gpios = <&gpio4 12 0>; /* GPIO5_12 */
+ gpios = <&gpio5 12 0>;
linux,code = <362>; /* KEY_PROGRAM */
gpio-key,wakeup;
};
volume-up {
label = "Volume Up";
- gpios = <&gpio4 13 0>; /* GPIO5_13 */
+ gpios = <&gpio5 13 0>;
linux,code = <115>; /* KEY_VOLUMEUP */
};
volume-down {
label = "Volume Down";
- gpios = <&gpio3 0 0>; /* GPIO4_0 */
+ gpios = <&gpio4 0 0>;
linux,code = <114>; /* KEY_VOLUMEDOWN */
};
};
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index 3f3a88185ff..5bac4aa4800 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -29,15 +29,14 @@
aips@50000000 { /* AIPS1 */
spba@50000000 {
esdhc@50004000 { /* ESDHC1 */
- cd-gpios = <&gpio2 13 0>; /* GPIO3_13 */
- wp-gpios = <&gpio2 14 0>; /* GPIO3_14 */
+ cd-gpios = <&gpio3 13 0>;
+ wp-gpios = <&gpio3 14 0>;
status = "okay";
};
ecspi@50010000 { /* ECSPI1 */
fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio1 30 0>, /* GPIO2_30 */
- <&gpio2 19 0>; /* GPIO3_19 */
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
status = "okay";
flash: at45db321d@1 {
@@ -61,8 +60,8 @@
};
esdhc@50020000 { /* ESDHC3 */
- cd-gpios = <&gpio2 11 0>; /* GPIO3_11 */
- wp-gpios = <&gpio2 12 0>; /* GPIO3_12 */
+ cd-gpios = <&gpio3 11 0>;
+ wp-gpios = <&gpio3 12 0>;
status = "okay";
};
};
@@ -76,7 +75,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart0: uart@53fbc000 { /* UART1 */
+ uart1: uart@53fbc000 {
status = "okay";
};
};
@@ -102,7 +101,7 @@
fec@63fec000 {
phy-mode = "rmii";
- phy-reset-gpios = <&gpio6 6 0>; /* GPIO7_6 */
+ phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
};
};
@@ -113,7 +112,7 @@
green {
label = "Heartbeat";
- gpios = <&gpio6 7 0>; /* GPIO7_7 */
+ gpios = <&gpio7 7 0>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index ae6de6d0c3f..5c57c8672c3 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -29,13 +29,13 @@
aips@50000000 { /* AIPS1 */
spba@50000000 {
esdhc@50004000 { /* ESDHC1 */
- cd-gpios = <&gpio2 13 0>; /* GPIO3_13 */
+ cd-gpios = <&gpio3 13 0>;
status = "okay";
};
esdhc@50020000 { /* ESDHC3 */
- cd-gpios = <&gpio2 11 0>; /* GPIO3_11 */
- wp-gpios = <&gpio2 12 0>; /* GPIO3_12 */
+ cd-gpios = <&gpio3 11 0>;
+ wp-gpios = <&gpio3 12 0>;
status = "okay";
};
};
@@ -49,7 +49,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart0: uart@53fbc000 { /* UART1 */
+ uart1: uart@53fbc000 {
status = "okay";
};
};
@@ -84,7 +84,7 @@
fec@63fec000 {
phy-mode = "rmii";
- phy-reset-gpios = <&gpio6 6 0>; /* GPIO7_6 */
+ phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
};
};
@@ -95,20 +95,20 @@
power {
label = "Power Button";
- gpios = <&gpio0 8 0>; /* GPIO1_8 */
+ gpios = <&gpio1 8 0>;
linux,code = <116>; /* KEY_POWER */
gpio-key,wakeup;
};
volume-up {
label = "Volume Up";
- gpios = <&gpio1 14 0>; /* GPIO2_14 */
+ gpios = <&gpio2 14 0>;
linux,code = <115>; /* KEY_VOLUMEUP */
};
volume-down {
label = "Volume Down";
- gpios = <&gpio1 15 0>; /* GPIO2_15 */
+ gpios = <&gpio2 15 0>;
linux,code = <114>; /* KEY_VOLUMEDOWN */
};
};
@@ -118,7 +118,7 @@
user {
label = "Heartbeat";
- gpios = <&gpio6 7 0>; /* GPIO7_7 */
+ gpios = <&gpio7 7 0>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index b1c062eea71..c7ee86c2dfb 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -29,8 +29,8 @@
aips@50000000 { /* AIPS1 */
spba@50000000 {
esdhc@50004000 { /* ESDHC1 */
- cd-gpios = <&gpio2 13 0>; /* GPIO3_13 */
- wp-gpios = <&gpio3 11 0>; /* GPIO4_11 */
+ cd-gpios = <&gpio3 13 0>;
+ wp-gpios = <&gpio4 11 0>;
status = "okay";
};
@@ -39,15 +39,14 @@
status = "okay";
};
- uart2: uart@5000c000 { /* UART3 */
+ uart3: uart@5000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
ecspi@50010000 { /* ECSPI1 */
fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio1 30 0>, /* GPIO2_30 */
- <&gpio2 19 0>; /* GPIO3_19 */
+ cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
status = "okay";
zigbee: mc1323@0 {
@@ -91,11 +90,11 @@
reg = <0x53fa8000 0x4000>;
};
- uart0: uart@53fbc000 { /* UART1 */
+ uart1: uart@53fbc000 {
status = "okay";
};
- uart1: uart@53fc0000 { /* UART2 */
+ uart2: uart@53fc0000 {
status = "okay";
};
};
@@ -145,7 +144,7 @@
fec@63fec000 {
phy-mode = "rmii";
- phy-reset-gpios = <&gpio6 6 0>; /* GPIO7_6 */
+ phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
};
};
@@ -156,13 +155,13 @@
volume-up {
label = "Volume Up";
- gpios = <&gpio1 14 0>; /* GPIO2_14 */
+ gpios = <&gpio2 14 0>;
linux,code = <115>; /* KEY_VOLUMEUP */
};
volume-down {
label = "Volume Down";
- gpios = <&gpio1 15 0>; /* GPIO2_15 */
+ gpios = <&gpio2 15 0>;
linux,code = <114>; /* KEY_VOLUMEDOWN */
};
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 099cd84ee37..5dd91b942c9 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -14,11 +14,11 @@
/ {
aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
};
tzic: tz-interrupt-controller@0fffc000 {
@@ -88,7 +88,7 @@
status = "disabled";
};
- uart2: uart@5000c000 { /* UART3 */
+ uart3: uart@5000c000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x5000c000 0x4000>;
interrupts = <33>;
@@ -119,7 +119,7 @@
};
};
- gpio0: gpio@53f84000 { /* GPIO1 */
+ gpio1: gpio@53f84000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53f84000 0x4000>;
interrupts = <50 51>;
@@ -129,7 +129,7 @@
#interrupt-cells = <1>;
};
- gpio1: gpio@53f88000 { /* GPIO2 */
+ gpio2: gpio@53f88000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53f88000 0x4000>;
interrupts = <52 53>;
@@ -139,7 +139,7 @@
#interrupt-cells = <1>;
};
- gpio2: gpio@53f8c000 { /* GPIO3 */
+ gpio3: gpio@53f8c000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53f8c000 0x4000>;
interrupts = <54 55>;
@@ -149,7 +149,7 @@
#interrupt-cells = <1>;
};
- gpio3: gpio@53f90000 { /* GPIO4 */
+ gpio4: gpio@53f90000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53f90000 0x4000>;
interrupts = <56 57>;
@@ -173,21 +173,21 @@
status = "disabled";
};
- uart0: uart@53fbc000 { /* UART1 */
+ uart1: uart@53fbc000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart1: uart@53fc0000 { /* UART2 */
+ uart2: uart@53fc0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fc0000 0x4000>;
interrupts = <32>;
status = "disabled";
};
- gpio4: gpio@53fdc000 { /* GPIO5 */
+ gpio5: gpio@53fdc000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53fdc000 0x4000>;
interrupts = <103 104>;
@@ -197,7 +197,7 @@
#interrupt-cells = <1>;
};
- gpio5: gpio@53fe0000 { /* GPIO6 */
+ gpio6: gpio@53fe0000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53fe0000 0x4000>;
interrupts = <105 106>;
@@ -207,7 +207,7 @@
#interrupt-cells = <1>;
};
- gpio6: gpio@53fe4000 { /* GPIO7 */
+ gpio7: gpio@53fe4000 {
compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
reg = <0x53fe4000 0x4000>;
interrupts = <107 108>;
@@ -226,7 +226,7 @@
status = "disabled";
};
- uart3: uart@53ff0000 { /* UART4 */
+ uart4: uart@53ff0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53ff0000 0x4000>;
interrupts = <13>;
@@ -241,7 +241,7 @@
reg = <0x60000000 0x10000000>;
ranges;
- uart4: uart@63f90000 { /* UART5 */
+ uart5: uart@63f90000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x63f90000 0x4000>;
interrupts = <86>;
diff --git a/arch/arm/boot/dts/imx6q-sabreauto.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 072974e443f..c3977e0478b 100644
--- a/arch/arm/boot/dts/imx6q-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -14,8 +14,8 @@
/include/ "imx6q.dtsi"
/ {
- model = "Freescale i.MX6 Quad SABRE Automotive Board";
- compatible = "fsl,imx6q-sabreauto", "fsl,imx6q";
+ model = "Freescale i.MX6 Quad Armadillo2 Board";
+ compatible = "fsl,imx6q-arm2", "fsl,imx6q";
chosen {
bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
@@ -34,8 +34,8 @@
};
usdhc@02198000 { /* uSDHC3 */
- cd-gpios = <&gpio5 11 0>; /* GPIO6_11 */
- wp-gpios = <&gpio5 14 0>; /* GPIO6_14 */
+ cd-gpios = <&gpio6 11 0>;
+ wp-gpios = <&gpio6 14 0>;
status = "okay";
};
@@ -44,7 +44,7 @@
status = "okay";
};
- uart3: uart@021f0000 { /* UART4 */
+ uart4: uart@021f0000 {
status = "okay";
};
};
@@ -55,7 +55,7 @@
debug-led {
label = "Heartbeat";
- gpios = <&gpio2 25 0>; /* GPIO3_25 */
+ gpios = <&gpio3 25 0>;
linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
new file mode 100644
index 00000000000..08d920de728
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+ model = "Freescale i.MX6 Quad SABRE Lite Board";
+ compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ soc {
+ aips-bus@02100000 { /* AIPS2 */
+ enet@02188000 {
+ phy-mode = "rgmii";
+ phy-reset-gpios = <&gpio3 23 0>;
+ status = "okay";
+ };
+
+ usdhc@02198000 { /* uSDHC3 */
+ cd-gpios = <&gpio7 0 0>;
+ wp-gpios = <&gpio7 1 0>;
+ status = "okay";
+ };
+
+ usdhc@0219c000 { /* uSDHC4 */
+ cd-gpios = <&gpio2 6 0>;
+ wp-gpios = <&gpio2 7 0>;
+ status = "okay";
+ };
+
+ uart2: uart@021e8000 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 7dda599558c..263e8f3664b 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -14,11 +14,11 @@
/ {
aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
};
cpus {
@@ -165,7 +165,7 @@
status = "disabled";
};
- uart0: uart@02020000 { /* UART1 */
+ uart1: uart@02020000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x02020000 0x4000>;
interrupts = <0 26 0x04>;
@@ -247,7 +247,7 @@
interrupts = <0 55 0x04>;
};
- gpio0: gpio@0209c000 { /* GPIO1 */
+ gpio1: gpio@0209c000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x0209c000 0x4000>;
interrupts = <0 66 0x04 0 67 0x04>;
@@ -257,7 +257,7 @@
#interrupt-cells = <1>;
};
- gpio1: gpio@020a0000 { /* GPIO2 */
+ gpio2: gpio@020a0000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020a0000 0x4000>;
interrupts = <0 68 0x04 0 69 0x04>;
@@ -267,7 +267,7 @@
#interrupt-cells = <1>;
};
- gpio2: gpio@020a4000 { /* GPIO3 */
+ gpio3: gpio@020a4000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020a4000 0x4000>;
interrupts = <0 70 0x04 0 71 0x04>;
@@ -277,7 +277,7 @@
#interrupt-cells = <1>;
};
- gpio3: gpio@020a8000 { /* GPIO4 */
+ gpio4: gpio@020a8000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020a8000 0x4000>;
interrupts = <0 72 0x04 0 73 0x04>;
@@ -287,7 +287,7 @@
#interrupt-cells = <1>;
};
- gpio4: gpio@020ac000 { /* GPIO5 */
+ gpio5: gpio@020ac000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020ac000 0x4000>;
interrupts = <0 74 0x04 0 75 0x04>;
@@ -297,7 +297,7 @@
#interrupt-cells = <1>;
};
- gpio5: gpio@020b0000 { /* GPIO6 */
+ gpio6: gpio@020b0000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020b0000 0x4000>;
interrupts = <0 76 0x04 0 77 0x04>;
@@ -307,7 +307,7 @@
#interrupt-cells = <1>;
};
- gpio6: gpio@020b4000 { /* GPIO7 */
+ gpio7: gpio@020b4000 {
compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
reg = <0x020b4000 0x4000>;
interrupts = <0 78 0x04 0 79 0x04>;
@@ -543,28 +543,28 @@
interrupts = <0 18 0x04>;
};
- uart1: uart@021e8000 { /* UART2 */
+ uart2: uart@021e8000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021e8000 0x4000>;
interrupts = <0 27 0x04>;
status = "disabled";
};
- uart2: uart@021ec000 { /* UART3 */
+ uart3: uart@021ec000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021ec000 0x4000>;
interrupts = <0 28 0x04>;
status = "disabled";
};
- uart3: uart@021f0000 { /* UART4 */
+ uart4: uart@021f0000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f0000 0x4000>;
interrupts = <0 29 0x04>;
status = "disabled";
};
- uart4: uart@021f4000 { /* UART5 */
+ uart5: uart@021f4000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f4000 0x4000>;
interrupts = <0 30 0x04>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
new file mode 100644
index 00000000000..f2ab4ea7cc0
--- /dev/null
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -0,0 +1,67 @@
+/*
+ * Device Tree Source for OMAP2 SoC
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "ti,omap2430", "ti,omap2420", "ti,omap2";
+
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,arm1136jf-s";
+ };
+ };
+
+ soc {
+ compatible = "ti,omap-infra";
+ mpu {
+ compatible = "ti,omap2-mpu";
+ ti,hwmods = "mpu";
+ };
+ };
+
+ ocp {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "l3_main";
+
+ intc: interrupt-controller@1 {
+ compatible = "ti,omap2-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart1: serial@4806a000 {
+ compatible = "ti,omap2-uart";
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ };
+
+ uart2: serial@4806c000 {
+ compatible = "ti,omap2-uart";
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ };
+
+ uart3: serial@4806e000 {
+ compatible = "ti,omap2-uart";
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index d202bb5ec7e..216c3317461 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -13,6 +13,13 @@
/ {
compatible = "ti,omap3430", "ti,omap3";
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ };
+
cpus {
cpu@0 {
compatible = "arm,cortex-a8";
@@ -59,5 +66,29 @@
interrupt-controller;
#interrupt-cells = <1>;
};
+
+ uart1: serial@0x4806a000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ };
+
+ uart2: serial@0x4806c000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ };
+
+ uart3: serial@0x49020000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ };
+
+ uart4: serial@0x49042000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 4c61c829043..e8fe75fac7c 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -21,6 +21,10 @@
interrupt-parent = <&gic>;
aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
};
cpus {
@@ -99,5 +103,29 @@
reg = <0x48241000 0x1000>,
<0x48240100 0x0100>;
};
+
+ uart1: serial@0x4806a000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ };
+
+ uart2: serial@0x4806c000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ };
+
+ uart3: serial@0x48020000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ };
+
+ uart4: serial@0x4806e000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts
new file mode 100644
index 00000000000..70c41fc897d
--- /dev/null
+++ b/arch/arm/boot/dts/tegra-cardhu.dts
@@ -0,0 +1,36 @@
+/dts-v1/;
+
+/include/ "tegra30.dtsi"
+
+/ {
+ model = "NVIDIA Tegra30 Cardhu evaluation board";
+ compatible = "nvidia,cardhu", "nvidia,tegra30";
+
+ memory {
+ reg = < 0x80000000 0x40000000 >;
+ };
+
+ serial@70006000 {
+ clock-frequency = < 408000000 >;
+ };
+
+ i2c@7000c000 {
+ clock-frequency = <100000>;
+ };
+
+ i2c@7000c400 {
+ clock-frequency = <100000>;
+ };
+
+ i2c@7000c500 {
+ clock-frequency = <100000>;
+ };
+
+ i2c@7000c700 {
+ clock-frequency = <100000>;
+ };
+
+ i2c@7000d000 {
+ clock-frequency = <100000>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 0e225b86b65..80afa1b70b8 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -1,16 +1,11 @@
/dts-v1/;
-/memreserve/ 0x1c000000 0x04000000;
/include/ "tegra20.dtsi"
/ {
model = "NVIDIA Tegra2 Harmony evaluation board";
compatible = "nvidia,harmony", "nvidia,tegra20";
- chosen {
- bootargs = "vmalloc=192M video=tegrafb console=ttyS0,115200n8 root=/dev/mmcblk0p2 rw rootwait";
- };
-
memory@0 {
reg = < 0x00000000 0x40000000 >;
};
@@ -52,16 +47,40 @@
ext-mic-en-gpios = <&gpio 185 0>;
};
+ serial@70006000 {
+ status = "disable";
+ };
+
+ serial@70006040 {
+ status = "disable";
+ };
+
+ serial@70006200 {
+ status = "disable";
+ };
+
serial@70006300 {
clock-frequency = < 216000000 >;
};
+ serial@70006400 {
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ status = "disable";
+ };
+
sdhci@c8000200 {
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 155 0>; /* gpio PT3 */
};
+ sdhci@c8000400 {
+ status = "disable";
+ };
+
sdhci@c8000600 {
cd-gpios = <&gpio 58 0>; /* gpio PH2 */
wp-gpios = <&gpio 59 0>; /* gpio PH3 */
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
new file mode 100644
index 00000000000..1a1d7023b69
--- /dev/null
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -0,0 +1,77 @@
+/dts-v1/;
+
+/include/ "tegra20.dtsi"
+
+/ {
+ model = "Toshiba AC100 / Dynabook AZ";
+ compatible = "compal,paz00", "nvidia,tegra20";
+
+ memory@0 {
+ reg = <0x00000000 0x20000000>;
+ };
+
+ i2c@7000c000 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c400 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c500 {
+ status = "disable";
+ };
+
+ nvec@7000c500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,nvec";
+ reg = <0x7000C500 0x100>;
+ interrupts = <0 92 0x04>;
+ clock-frequency = <80000>;
+ request-gpios = <&gpio 170 0>;
+ slave-addr = <138>;
+ };
+
+ i2c@7000d000 {
+ clock-frequency = <400000>;
+ };
+
+ serial@70006000 {
+ clock-frequency = <216000000>;
+ };
+
+ serial@70006040 {
+ status = "disable";
+ };
+
+ serial@70006200 {
+ status = "disable";
+ };
+
+ serial@70006300 {
+ clock-frequency = <216000000>;
+ };
+
+ serial@70006400 {
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ cd-gpios = <&gpio 173 0>; /* gpio PV5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 155 0>; /* gpio PT3 */
+ };
+
+ sdhci@c8000200 {
+ status = "disable";
+ };
+
+ sdhci@c8000400 {
+ status = "disable";
+ };
+
+ sdhci@c8000600 {
+ support-8bit;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index a72299b8e66..b55a02e34ba 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -1,25 +1,65 @@
/dts-v1/;
-/memreserve/ 0x1c000000 0x04000000;
/include/ "tegra20.dtsi"
/ {
model = "NVIDIA Seaboard";
compatible = "nvidia,seaboard", "nvidia,tegra20";
- chosen {
- bootargs = "vmalloc=192M video=tegrafb console=ttyS0,115200n8 root=/dev/mmcblk1p3 rw rootwait";
- };
-
memory {
device_type = "memory";
reg = < 0x00000000 0x40000000 >;
};
+ i2c@7000c000 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c400 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c500 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000d000 {
+ clock-frequency = <400000>;
+
+ adt7461@4c {
+ compatible = "adt7461";
+ reg = <0x4c>;
+ };
+ };
+
+ serial@70006000 {
+ status = "disable";
+ };
+
+ serial@70006040 {
+ status = "disable";
+ };
+
+ serial@70006200 {
+ status = "disable";
+ };
+
serial@70006300 {
clock-frequency = < 216000000 >;
};
+ serial@70006400 {
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ status = "disable";
+ };
+
+ sdhci@c8000200 {
+ status = "disable";
+ };
+
sdhci@c8000400 {
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
@@ -29,4 +69,28 @@
sdhci@c8000600 {
support-8bit;
};
+
+ usb@c5000000 {
+ nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ label = "Power";
+ gpios = <&gpio 170 1>; /* gpio PV2, active low */
+ linux,code = <116>; /* KEY_POWER */
+ gpio-key,wakeup;
+ };
+
+ lid {
+ label = "Lid";
+ gpios = <&gpio 23 0>; /* gpio PC7 */
+ linux,input-type = <5>; /* EV_SW */
+ linux,code = <0>; /* SW_LID */
+ debounce-interval = <1>;
+ gpio-key,wakeup;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
new file mode 100644
index 00000000000..3b3ee7db99f
--- /dev/null
+++ b/arch/arm/boot/dts/tegra-trimslice.dts
@@ -0,0 +1,65 @@
+/dts-v1/;
+
+/include/ "tegra20.dtsi"
+
+/ {
+ model = "Compulab TrimSlice board";
+ compatible = "compulab,trimslice", "nvidia,tegra20";
+
+ memory@0 {
+ reg = < 0x00000000 0x40000000 >;
+ };
+
+ i2c@7000c000 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c400 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c500 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000d000 {
+ status = "disable";
+ };
+
+ serial@70006000 {
+ clock-frequency = < 216000000 >;
+ };
+
+ serial@70006040 {
+ status = "disable";
+ };
+
+ serial@70006200 {
+ status = "disable";
+ };
+
+ serial@70006300 {
+ status = "disable";
+ };
+
+ serial@70006400 {
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ status = "disable";
+ };
+
+ sdhci@c8000200 {
+ status = "disable";
+ };
+
+ sdhci@c8000400 {
+ status = "disable";
+ };
+
+ sdhci@c8000600 {
+ cd-gpios = <&gpio 121 0>;
+ wp-gpios = <&gpio 122 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 3f9abd6b696..c7d3b87f29d 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -1,24 +1,59 @@
/dts-v1/;
-/memreserve/ 0x1c000000 0x04000000;
/include/ "tegra20.dtsi"
/ {
model = "NVIDIA Tegra2 Ventana evaluation board";
compatible = "nvidia,ventana", "nvidia,tegra20";
- chosen {
- bootargs = "vmalloc=192M video=tegrafb console=ttyS0,115200n8 root=/dev/ram rdinit=/sbin/init";
- };
-
memory {
reg = < 0x00000000 0x40000000 >;
};
+ i2c@7000c000 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c400 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000c500 {
+ clock-frequency = <400000>;
+ };
+
+ i2c@7000d000 {
+ clock-frequency = <400000>;
+ };
+
+ serial@70006000 {
+ status = "disable";
+ };
+
+ serial@70006040 {
+ status = "disable";
+ };
+
+ serial@70006200 {
+ status = "disable";
+ };
+
serial@70006300 {
clock-frequency = < 216000000 >;
};
+ serial@70006400 {
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ status = "disable";
+ };
+
+ sdhci@c8000200 {
+ status = "disable";
+ };
+
sdhci@c8000400 {
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 65d7e6a333e..3da7afd4532 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -5,9 +5,9 @@
interrupt-parent = <&intc>;
intc: interrupt-controller@50041000 {
- compatible = "nvidia,tegra20-gic";
+ compatible = "arm,cortex-a9-gic";
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <3>;
reg = < 0x50041000 0x1000 >,
< 0x50040100 0x0100 >;
};
@@ -17,7 +17,7 @@
#size-cells = <0>;
compatible = "nvidia,tegra20-i2c";
reg = <0x7000C000 0x100>;
- interrupts = < 70 >;
+ interrupts = < 0 38 0x04 >;
};
i2c@7000c400 {
@@ -25,7 +25,7 @@
#size-cells = <0>;
compatible = "nvidia,tegra20-i2c";
reg = <0x7000C400 0x100>;
- interrupts = < 116 >;
+ interrupts = < 0 84 0x04 >;
};
i2c@7000c500 {
@@ -33,38 +33,32 @@
#size-cells = <0>;
compatible = "nvidia,tegra20-i2c";
reg = <0x7000C500 0x100>;
- interrupts = < 124 >;
+ interrupts = < 0 92 0x04 >;
};
i2c@7000d000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
+ compatible = "nvidia,tegra20-i2c-dvc";
reg = <0x7000D000 0x200>;
- interrupts = < 85 >;
+ interrupts = < 0 53 0x04 >;
};
i2s@70002800 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "nvidia,tegra20-i2s";
reg = <0x70002800 0x200>;
- interrupts = < 45 >;
+ interrupts = < 0 13 0x04 >;
dma-channel = < 2 >;
};
i2s@70002a00 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "nvidia,tegra20-i2s";
reg = <0x70002a00 0x200>;
- interrupts = < 35 >;
+ interrupts = < 0 3 0x04 >;
dma-channel = < 1 >;
};
das@70000c00 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "nvidia,tegra20-das";
reg = <0x70000c00 0x80>;
};
@@ -72,7 +66,13 @@
gpio: gpio@6000d000 {
compatible = "nvidia,tegra20-gpio";
reg = < 0x6000d000 0x1000 >;
- interrupts = < 64 65 66 67 87 119 121 >;
+ interrupts = < 0 32 0x04
+ 0 33 0x04
+ 0 34 0x04
+ 0 35 0x04
+ 0 55 0x04
+ 0 87 0x04
+ 0 89 0x04 >;
#gpio-cells = <2>;
gpio-controller;
};
@@ -89,59 +89,80 @@
compatible = "nvidia,tegra20-uart";
reg = <0x70006000 0x40>;
reg-shift = <2>;
- interrupts = < 68 >;
+ interrupts = < 0 36 0x04 >;
};
serial@70006040 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006040 0x40>;
reg-shift = <2>;
- interrupts = < 69 >;
+ interrupts = < 0 37 0x04 >;
};
serial@70006200 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006200 0x100>;
reg-shift = <2>;
- interrupts = < 78 >;
+ interrupts = < 0 46 0x04 >;
};
serial@70006300 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006300 0x100>;
reg-shift = <2>;
- interrupts = < 122 >;
+ interrupts = < 0 90 0x04 >;
};
serial@70006400 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006400 0x100>;
reg-shift = <2>;
- interrupts = < 123 >;
+ interrupts = < 0 91 0x04 >;
};
sdhci@c8000000 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000000 0x200>;
- interrupts = < 46 >;
+ interrupts = < 0 14 0x04 >;
};
sdhci@c8000200 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000200 0x200>;
- interrupts = < 47 >;
+ interrupts = < 0 15 0x04 >;
};
sdhci@c8000400 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000400 0x200>;
- interrupts = < 51 >;
+ interrupts = < 0 19 0x04 >;
};
sdhci@c8000600 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000600 0x200>;
- interrupts = < 63 >;
+ interrupts = < 0 31 0x04 >;
+ };
+
+ usb@c5000000 {
+ compatible = "nvidia,tegra20-ehci", "usb-ehci";
+ reg = <0xc5000000 0x4000>;
+ interrupts = < 0 20 0x04 >;
+ phy_type = "utmi";
+ };
+
+ usb@c5004000 {
+ compatible = "nvidia,tegra20-ehci", "usb-ehci";
+ reg = <0xc5004000 0x4000>;
+ interrupts = < 0 21 0x04 >;
+ phy_type = "ulpi";
+ };
+
+ usb@c5008000 {
+ compatible = "nvidia,tegra20-ehci", "usb-ehci";
+ reg = <0xc5008000 0x4000>;
+ interrupts = < 0 97 0x04 >;
+ phy_type = "utmi";
};
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
new file mode 100644
index 00000000000..ee7db9892e0
--- /dev/null
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -0,0 +1,127 @@
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "nvidia,tegra30";
+ interrupt-parent = <&intc>;
+
+ intc: interrupt-controller@50041000 {
+ compatible = "arm,cortex-a9-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = < 0x50041000 0x1000 >,
+ < 0x50040100 0x0100 >;
+ };
+
+ i2c@7000c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000C000 0x100>;
+ interrupts = < 0 38 0x04 >;
+ };
+
+ i2c@7000c400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000C400 0x100>;
+ interrupts = < 0 84 0x04 >;
+ };
+
+ i2c@7000c500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000C500 0x100>;
+ interrupts = < 0 92 0x04 >;
+ };
+
+ i2c@7000c700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c700 0x100>;
+ interrupts = < 0 120 0x04 >;
+ };
+
+ i2c@7000d000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000D000 0x100>;
+ interrupts = < 0 53 0x04 >;
+ };
+
+ gpio: gpio@6000d000 {
+ compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
+ reg = < 0x6000d000 0x1000 >;
+ interrupts = < 0 32 0x04 0 33 0x04 0 34 0x04 0 35 0x04 0 55 0x04 0 87 0x04 0 89 0x04 >;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ serial@70006000 {
+ compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+ reg = <0x70006000 0x40>;
+ reg-shift = <2>;
+ interrupts = < 0 36 0x04 >;
+ };
+
+ serial@70006040 {
+ compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+ reg = <0x70006040 0x40>;
+ reg-shift = <2>;
+ interrupts = < 0 37 0x04 >;
+ };
+
+ serial@70006200 {
+ compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+ reg = <0x70006200 0x100>;
+ reg-shift = <2>;
+ interrupts = < 0 46 0x04 >;
+ };
+
+ serial@70006300 {
+ compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+ reg = <0x70006300 0x100>;
+ reg-shift = <2>;
+ interrupts = < 0 90 0x04 >;
+ };
+
+ serial@70006400 {
+ compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
+ reg = <0x70006400 0x100>;
+ reg-shift = <2>;
+ interrupts = < 0 91 0x04 >;
+ };
+
+ sdhci@78000000 {
+ compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+ reg = <0x78000000 0x200>;
+ interrupts = < 0 14 0x04 >;
+ };
+
+ sdhci@78000200 {
+ compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+ reg = <0x78000200 0x200>;
+ interrupts = < 0 15 0x04 >;
+ };
+
+ sdhci@78000400 {
+ compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+ reg = <0x78000400 0x200>;
+ interrupts = < 0 19 0x04 >;
+ };
+
+ sdhci@78000600 {
+ compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
+ reg = <0x78000600 0x200>;
+ interrupts = < 0 31 0x04 >;
+ };
+
+ pinmux: pinmux@70000000 {
+ compatible = "nvidia,tegra30-pinmux";
+ reg = < 0x70000868 0xd0 /* Pad control registers */
+ 0x70003000 0x3e0 >; /* Mux registers */
+ };
+};
diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/arch/arm/boot/dts/testcases/tests-phandle.dtsi
new file mode 100644
index 00000000000..ec0c4e6212c
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests-phandle.dtsi
@@ -0,0 +1,37 @@
+
+/ {
+ testcase-data {
+ phandle-tests {
+ provider0: provider0 {
+ #phandle-cells = <0>;
+ };
+
+ provider1: provider1 {
+ #phandle-cells = <1>;
+ };
+
+ provider2: provider2 {
+ #phandle-cells = <2>;
+ };
+
+ provider3: provider3 {
+ #phandle-cells = <3>;
+ };
+
+ consumer-a {
+ phandle-list = <&provider1 1>,
+ <&provider2 2 0>,
+ <0>,
+ <&provider3 4 4 3>,
+ <&provider2 5 100>,
+ <&provider0>,
+ <&provider1 7>;
+ phandle-list-names = "first", "second", "third";
+
+ phandle-list-bad-phandle = <12345678 0 0>;
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider3 0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
new file mode 100644
index 00000000000..a7c5067622e
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests.dtsi
@@ -0,0 +1 @@
+/include/ "tests-phandle.dtsi"
diff --git a/arch/arm/boot/dts/usb_a9g20.dts b/arch/arm/boot/dts/usb_a9g20.dts
index d66e2c00ac3..f04b535477f 100644
--- a/arch/arm/boot/dts/usb_a9g20.dts
+++ b/arch/arm/boot/dts/usb_a9g20.dts
@@ -25,6 +25,11 @@
dbgu: serial@fffff200 {
status = "okay";
};
+
+ macb0: ethernet@fffc4000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 8a614e39800..166461073b7 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -46,3 +46,5 @@
};
};
};
+
+/include/ "testcases/tests.dtsi"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 74df9ca2be3..81a933eb090 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,8 +1,14 @@
config ARM_GIC
select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+ bool
+
+config GIC_NON_BANKED
bool
config ARM_VIC
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
bool
config ARM_VIC_NR
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 0e6ae470c94..b2dc2dd7f1d 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -40,13 +40,36 @@
#include <linux/slab.h>
#include <asm/irq.h>
+#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+union gic_base {
+ void __iomem *common_base;
+ void __percpu __iomem **percpu_base;
+};
+
+struct gic_chip_data {
+ unsigned int irq_offset;
+ union gic_base dist_base;
+ union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+#endif
+#ifdef CONFIG_IRQ_DOMAIN
+ struct irq_domain domain;
+#endif
+ unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+ void __iomem *(*get_base)(union gic_base *);
+#endif
+};
-/* Address of GIC 0 CPU interface */
-void __iomem *gic_cpu_base_addr __read_mostly;
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
/*
* Supported arch specific GIC irq extension.
@@ -67,16 +90,48 @@ struct irq_chip gic_arch_extn = {
static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+ return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+ return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+ void __iomem *(*f)(union gic_base *))
+{
+ data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d) ((d)->dist_base.common_base)
+#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d,f)
+#endif
+
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
- return gic_data->dist_base;
+ return gic_data_dist_base(gic_data);
}
static inline void __iomem *gic_cpu_base(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
- return gic_data->cpu_base;
+ return gic_data_cpu_base(gic_data);
}
static inline unsigned int gic_irq(struct irq_data *d)
@@ -215,6 +270,32 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
#define gic_set_wake NULL
#endif
+asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ struct gic_chip_data *gic = &gic_data[0];
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & ~0x1c00;
+
+ if (likely(irqnr > 15 && irqnr < 1021)) {
+ irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ if (irqnr < 16) {
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+ handle_IPI(irqnr, regs);
+#endif
+ continue;
+ }
+ break;
+ } while (1);
+}
+
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
struct gic_chip_data *chip_data = irq_get_handler_data(irq);
@@ -225,7 +306,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
raw_spin_lock(&irq_controller_lock);
- status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
+ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
raw_spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
@@ -270,7 +351,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs;
struct irq_domain *domain = &gic->domain;
- void __iomem *base = gic->dist_base;
+ void __iomem *base = gic_data_dist_base(gic);
u32 cpu = 0;
#ifdef CONFIG_SMP
@@ -330,8 +411,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
{
- void __iomem *dist_base = gic->dist_base;
- void __iomem *base = gic->cpu_base;
+ void __iomem *dist_base = gic_data_dist_base(gic);
+ void __iomem *base = gic_data_cpu_base(gic);
int i;
/*
@@ -368,7 +449,7 @@ static void gic_dist_save(unsigned int gic_nr)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data[gic_nr].dist_base;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
if (!dist_base)
return;
@@ -403,7 +484,7 @@ static void gic_dist_restore(unsigned int gic_nr)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data[gic_nr].dist_base;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
if (!dist_base)
return;
@@ -439,8 +520,8 @@ static void gic_cpu_save(unsigned int gic_nr)
if (gic_nr >= MAX_GIC_NR)
BUG();
- dist_base = gic_data[gic_nr].dist_base;
- cpu_base = gic_data[gic_nr].cpu_base;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
if (!dist_base || !cpu_base)
return;
@@ -465,8 +546,8 @@ static void gic_cpu_restore(unsigned int gic_nr)
if (gic_nr >= MAX_GIC_NR)
BUG();
- dist_base = gic_data[gic_nr].dist_base;
- cpu_base = gic_data[gic_nr].cpu_base;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
if (!dist_base || !cpu_base)
return;
@@ -491,6 +572,11 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
int i;
for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+ /* Skip over unused GICs */
+ if (!gic_data[i].get_base)
+ continue;
+#endif
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(i);
@@ -526,7 +612,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
- cpu_pm_register_notifier(&gic_notifier_block);
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
@@ -563,8 +650,9 @@ const struct irq_domain_ops gic_irq_domain_ops = {
#endif
};
-void __init gic_init(unsigned int gic_nr, int irq_start,
- void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ void __iomem *dist_base, void __iomem *cpu_base,
+ u32 percpu_offset)
{
struct gic_chip_data *gic;
struct irq_domain *domain;
@@ -574,26 +662,55 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
gic = &gic_data[gic_nr];
domain = &gic->domain;
- gic->dist_base = dist_base;
- gic->cpu_base = cpu_base;
+#ifdef CONFIG_GIC_NON_BANKED
+ if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ unsigned int cpu;
+
+ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+ if (WARN_ON(!gic->dist_base.percpu_base ||
+ !gic->cpu_base.percpu_base)) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ }
+
+ gic_set_base_accessor(gic, gic_get_percpu_base);
+ } else
+#endif
+ { /* Normal, sane GIC... */
+ WARN(percpu_offset,
+ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+ percpu_offset);
+ gic->dist_base.common_base = dist_base;
+ gic->cpu_base.common_base = cpu_base;
+ gic_set_base_accessor(gic, gic_get_common_base);
+ }
/*
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
+ domain->hwirq_base = 32;
if (gic_nr == 0) {
- gic_cpu_base_addr = cpu_base;
- domain->hwirq_base = 16;
- if (irq_start > 0)
- irq_start = (irq_start & ~31) + 16;
- } else
- domain->hwirq_base = 32;
+ if ((irq_start & 31) > 0) {
+ domain->hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ }
+ }
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
- gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020)
gic_irqs = 1020;
@@ -641,7 +758,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dsb();
/* this always happens on GIC0 */
- writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
+ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}
#endif
@@ -652,6 +769,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
+ u32 percpu_offset;
int irq;
struct irq_domain *domain = &gic_data[gic_cnt].domain;
@@ -664,9 +782,12 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
+ if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
domain->of_node = of_node_get(node);
- gic_init(gic_cnt, -1, dist_base, cpu_base);
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
if (parent) {
irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 7129cfbdacd..d8e44a43047 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -221,17 +221,6 @@
*/
#define MCODE_BUFF_PER_REQ 256
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req) do { \
- _emit_END(0, (req)->mc_cpu); \
- (req)->mc_len = 0; \
- } while (0)
-
/* If the _pl330_req is available to the client */
#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
@@ -301,8 +290,10 @@ struct pl330_thread {
struct pl330_dmac *dmac;
/* Only two at a time */
struct _pl330_req req[2];
- /* Index of the last submitted request */
+ /* Index of the last enqueued request */
unsigned lstenq;
+ /* Index of the last submitted request or -1 if the DMA is stopped */
+ int req_running;
};
enum pl330_dmac_state {
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
writel(0, regs + DBGCMD);
}
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+ struct _pl330_req *req = &thrd->req[idx];
+
+ _emit_END(0, req->mc_cpu);
+ req->mc_len = 0;
+
+ thrd->req_running = -1;
+}
+
static inline u32 _state(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd)
}
}
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
- struct _pl330_req *req)
-{
- void __iomem *regs = thrd->dmac->pinfo->base;
- u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
- if (IS_FREE(req))
- return false;
-
- return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
- if (_req_active(thrd, &thrd->req[0]))
- return 1; /* First req active */
-
- if (_req_active(thrd, &thrd->req[1]))
- return 2; /* Second req active */
-
- return 0;
-}
-
static void _stop(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd)
struct _arg_GO go;
unsigned ns;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ int idx;
/* Return if already ACTIVE */
if (_state(thrd) != PL330_STATE_STOPPED)
return true;
- if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
- req = &thrd->req[1 - thrd->lstenq];
- else if (!IS_FREE(&thrd->req[thrd->lstenq]))
- req = &thrd->req[thrd->lstenq];
- else
- req = NULL;
+ idx = 1 - thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else {
+ idx = thrd->lstenq;
+ if (!IS_FREE(&thrd->req[idx]))
+ req = &thrd->req[idx];
+ else
+ req = NULL;
+ }
/* Return if no request */
if (!req || !req->r)
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd)
/* Only manager can execute GO */
_execute_DBGINSN(thrd, insn, true);
+ thrd->req_running = idx;
+
return true;
}
@@ -1211,8 +1200,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
- ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
- ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+ ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
+ ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
ccr |= (rqc->swap << CC_SWAP_SHFT);
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data)
thrd->req[0].r = NULL;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[0]);
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
/* Clear the reset flag */
pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi)
thrd = &pl330->channels[id];
- active = _thrd_active(thrd);
- if (!active) /* Aborted */
+ active = thrd->req_running;
+ if (active == -1) /* Aborted */
continue;
- active -= 1;
-
rqdone = &thrd->req[active];
- MARK_FREE(rqdone);
+ mark_free(thrd, active);
/* Get going again ASAP */
_start(thrd);
@@ -1480,13 +1467,19 @@ int pl330_update(const struct pl330_info *pi)
/* Now that we are in no hurry, do the callbacks */
while (!list_empty(&pl330->req_done)) {
+ struct pl330_req *r;
+
rqdone = container_of(pl330->req_done.next,
struct _pl330_req, rqd);
list_del_init(&rqdone->rqd);
+ /* Detach the req */
+ r = rqdone->r;
+ rqdone->r = NULL;
+
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(rqdone->r, PL330_ERR_NONE);
+ _callback(r, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
@@ -1509,7 +1502,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
struct pl330_thread *thrd = ch_id;
struct pl330_dmac *pl330;
unsigned long flags;
- int ret = 0, active;
+ int ret = 0, active = thrd->req_running;
if (!thrd || thrd->free || thrd->dmac->state == DYING)
return -EINVAL;
@@ -1525,28 +1518,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
thrd->req[0].r = NULL;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[0]);
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 0);
+ mark_free(thrd, 1);
break;
case PL330_OP_ABORT:
- active = _thrd_active(thrd);
-
/* Make sure the channel is stopped */
_stop(thrd);
/* ABORT is only for the active req */
- if (!active)
+ if (active == -1)
break;
- active--;
-
thrd->req[active].r = NULL;
- MARK_FREE(&thrd->req[active]);
+ mark_free(thrd, active);
/* Start the next */
case PL330_OP_START:
- if (!_thrd_active(thrd) && !_start(thrd))
+ if ((active == -1) && !_start(thrd))
ret = -EIO;
break;
@@ -1587,14 +1576,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
else
pstatus->faulting = false;
- active = _thrd_active(thrd);
+ active = thrd->req_running;
- if (!active) {
+ if (active == -1) {
/* Indicate that the thread is not running */
pstatus->top_req = NULL;
pstatus->wait_req = NULL;
} else {
- active--;
pstatus->top_req = thrd->req[active].r;
pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
? thrd->req[1 - active].r : NULL;
@@ -1623,6 +1611,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
return -1;
}
+static bool _chan_ns(const struct pl330_info *pi, int i)
+{
+ return pi->pcfg.irq_ns & (1 << i);
+}
+
/* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise.
*/
@@ -1647,15 +1640,16 @@ void *pl330_request_channel(const struct pl330_info *pi)
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
- if (thrd->free) {
+ if ((thrd->free) && (!_manager_ns(thrd) ||
+ _chan_ns(pi, i))) {
thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) {
thrd->free = false;
thrd->lstenq = 1;
thrd->req[0].r = NULL;
- MARK_FREE(&thrd->req[0]);
+ mark_free(thrd, 0);
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 1);
break;
}
}
@@ -1761,14 +1755,14 @@ static inline void _reset_thread(struct pl330_thread *thrd)
thrd->req[0].mc_bus = pl330->mcode_bus
+ (thrd->id * pi->mcbufsz);
thrd->req[0].r = NULL;
- MARK_FREE(&thrd->req[0]);
+ mark_free(thrd, 0);
thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
+ pi->mcbufsz / 2;
thrd->req[1].mc_bus = thrd->req[0].mc_bus
+ pi->mcbufsz / 2;
thrd->req[1].r = NULL;
- MARK_FREE(&thrd->req[1]);
+ mark_free(thrd, 1);
}
static int dmac_alloc_threads(struct pl330_dmac *pl330)
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index 2393b5bc96f..8794a34eae6 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -143,7 +143,6 @@ static int sp804_set_next_event(unsigned long next,
}
static struct clock_event_device sp804_clockevent = {
- .shift = 32,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sp804_set_mode,
.set_next_event = sp804_set_next_event,
@@ -169,13 +168,9 @@ void __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
-
evt->name = name;
evt->irq = irq;
- evt->mult = div_sc(rate, NSEC_PER_SEC, evt->shift);
- evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
- evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
setup_irq(irq, &sp804_timer_irq);
- clockevents_register_device(evt);
+ clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
}
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 01f18a421b1..dcb004a804c 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -19,17 +19,22 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
+#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
-#ifdef CONFIG_PM
/**
* struct vic_device - VIC PM device
* @irq: The IRQ number for the base of the VIC.
@@ -40,6 +45,7 @@
* @int_enable: Save for VIC_INT_ENABLE.
* @soft_int: Save for VIC_INT_SOFT.
* @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
*/
struct vic_device {
void __iomem *base;
@@ -50,13 +56,13 @@ struct vic_device {
u32 int_enable;
u32 soft_int;
u32 protect;
+ struct irq_domain domain;
};
/* we cannot allocate memory when VICs are initially registered */
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
static int vic_id;
-#endif /* CONFIG_PM */
/**
* vic_init2 - common initialisation code
@@ -156,39 +162,50 @@ static int __init vic_pm_init(void)
return 0;
}
late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
/**
- * vic_pm_register - Register a VIC for later power management control
+ * vic_register() - Register a VIC.
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
*
* Register the VIC with the system device tree so that it can be notified
* of suspend and resume requests and ensure that the correct actions are
* taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
*/
-static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
+static void __init vic_register(void __iomem *base, unsigned int irq,
+ u32 resume_sources, struct device_node *node)
{
struct vic_device *v;
- if (vic_id >= ARRAY_SIZE(vic_devices))
+ if (vic_id >= ARRAY_SIZE(vic_devices)) {
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
- else {
- v = &vic_devices[vic_id];
- v->base = base;
- v->resume_sources = resume_sources;
- v->irq = irq;
- vic_id++;
+ return;
}
+
+ v = &vic_devices[vic_id];
+ v->base = base;
+ v->resume_sources = resume_sources;
+ v->irq = irq;
+ vic_id++;
+
+ v->domain.irq_base = irq;
+ v->domain.nr_irq = 32;
+#ifdef CONFIG_OF_IRQ
+ v->domain.of_node = of_node_get(node);
+#endif /* CONFIG_OF */
+ v->domain.ops = &irq_domain_simple_ops;
+ irq_domain_add(&v->domain);
}
-#else
-static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
-#endif /* CONFIG_PM */
static void vic_ack_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq & 31;
+ unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
/* moreover, clear the soft-triggered, in case it was the reason */
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
@@ -197,14 +214,14 @@ static void vic_ack_irq(struct irq_data *d)
static void vic_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq & 31;
+ unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
}
static void vic_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq & 31;
+ unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE);
}
@@ -226,7 +243,7 @@ static struct vic_device *vic_from_irq(unsigned int irq)
static int vic_set_wake(struct irq_data *d, unsigned int on)
{
struct vic_device *v = vic_from_irq(d->irq);
- unsigned int off = d->irq & 31;
+ unsigned int off = d->hwirq;
u32 bit = 1 << off;
if (!v)
@@ -301,7 +318,7 @@ static void __init vic_set_irq_sources(void __iomem *base,
* and 020 within the page. We call this "second block".
*/
static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
- u32 vic_sources)
+ u32 vic_sources, struct device_node *node)
{
unsigned int i;
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
@@ -328,17 +345,12 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
}
vic_set_irq_sources(base, irq_start, vic_sources);
+ vic_register(base, irq_start, 0, node);
}
-/**
- * vic_init - initialise a vectored interrupt controller
- * @base: iomem base address
- * @irq_start: starting interrupt number, must be muliple of 32
- * @vic_sources: bitmask of interrupt sources to allow
- * @resume_sources: bitmask of interrupt sources to allow for resume
- */
-void __init vic_init(void __iomem *base, unsigned int irq_start,
- u32 vic_sources, u32 resume_sources)
+static void __init __vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources,
+ struct device_node *node)
{
unsigned int i;
u32 cellid = 0;
@@ -356,7 +368,7 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
switch(vendor) {
case AMBA_VENDOR_ST:
- vic_init_st(base, irq_start, vic_sources);
+ vic_init_st(base, irq_start, vic_sources, node);
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
@@ -375,5 +387,81 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
vic_set_irq_sources(base, irq_start, vic_sources);
- vic_pm_register(base, irq_start, resume_sources);
+ vic_register(base, irq_start, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources)
+{
+ __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+int __init vic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *regs;
+ int irq_base;
+
+ if (WARN(parent, "non-root VICs are not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+ if (WARN_ON(irq_base < 0))
+ goto out_unmap;
+
+ __vic_init(regs, irq_base, ~0, ~0, node);
+
+ return 0;
+
+ out_unmap:
+ iounmap(regs);
+
+ return -EIO;
+}
+#endif /* CONFIG OF */
+
+/*
+ * Handle each interrupt in a single VIC. Returns non-zero if we've
+ * handled at least one interrupt. This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+ u32 stat, irq;
+ int handled = 0;
+
+ stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
+ while (stat) {
+ irq = ffs(stat) - 1;
+ handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs);
+ stat &= ~(1 << irq);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < vic_id; ++i)
+ handled |= handle_one_vic(&vic_devices[i], regs);
+ } while (handled);
}
diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9_defconfig
index ffb1edd9336..8826eb218e7 100644
--- a/arch/arm/configs/at91cap9adk_defconfig
+++ b/arch/arm/configs/at91cap9_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -52,16 +51,12 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -81,7 +76,6 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
@@ -99,7 +93,6 @@ CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
index 38cb7c98542..bbe4e1a1f5d 100644
--- a/arch/arm/configs/at91rm9200_defconfig
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@@ -56,7 +55,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -75,18 +73,8 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
CONFIG_MTD_CHAR=y
@@ -108,8 +96,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_LEGACY=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
@@ -119,14 +105,23 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
+CONFIG_ARM_AT91_ETHER=y
CONFIG_PHYLIB=y
CONFIG_DAVICOM_PHY=y
CONFIG_SMSC_PHY=y
CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_ARM_AT91_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=y
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -139,18 +134,6 @@ CONFIG_USB_NET_RNDIS_HOST=m
CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
@@ -158,9 +141,9 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LEGACY_PTY_COUNT=32
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=32
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -290,7 +273,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
@@ -335,7 +317,6 @@ CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260_defconfig
index f8a9226413b..505b3765f87 100644
--- a/arch/arm/configs/at91sam9260ek_defconfig
+++ b/arch/arm/configs/at91sam9260_defconfig
@@ -12,11 +12,23 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9260=y
+CONFIG_ARCH_AT91SAM9260_SAM9XE=y
CONFIG_MACH_AT91SAM9260EK=y
+CONFIG_MACH_CAM60=y
+CONFIG_MACH_SAM9_L9260=y
+CONFIG_MACH_AFEB9260=y
+CONFIG_MACH_USB_A9260=y
+CONFIG_MACH_QIL_A9260=y
+CONFIG_MACH_CPU9260=y
+CONFIG_MACH_FLEXIBITY=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
@@ -33,12 +45,10 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -55,7 +65,6 @@ CONFIG_I2C_GPIO=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
@@ -71,7 +80,6 @@ CONFIG_USB_G_SERIAL=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20_defconfig
index 9e90e6d7929..9123568d9a8 100644
--- a/arch/arm/configs/at91sam9g20ek_defconfig
+++ b/arch/arm/configs/at91sam9g20_defconfig
@@ -14,6 +14,15 @@ CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G20=y
CONFIG_MACH_AT91SAM9G20EK=y
CONFIG_MACH_AT91SAM9G20EK_2MMC=y
+CONFIG_MACH_CPU9G20=y
+CONFIG_MACH_ACMENETUSFOXG20=y
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+CONFIG_MACH_PCONTROL_G20=y
+CONFIG_MACH_GSIA18S=y
+CONFIG_MACH_USB_A9G20=y
+CONFIG_MACH_SNAPPER_9260=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
# CONFIG_ARM_THUMB is not set
CONFIG_AEABI=y
@@ -21,9 +30,10 @@ CONFIG_LEDS=y
CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,8 +47,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -48,17 +56,13 @@ CONFIG_MTD_NAND_ATMEL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -66,15 +70,14 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
CONFIG_HW_RANDOM=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
@@ -82,7 +85,6 @@ CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_SND_AT73C213=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
@@ -105,7 +107,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
index c5876d244f4..606d48f3b8f 100644
--- a/arch/arm/configs/at91sam9g45_defconfig
+++ b/arch/arm/configs/at91sam9g45_defconfig
@@ -18,6 +18,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91SAM9G45=y
CONFIG_MACH_AT91SAM9M10G45EK=y
+CONFIG_MACH_AT91SAM_DT=y
CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
CONFIG_AT91_SLOW_CLOCK=y
CONFIG_AEABI=y
@@ -73,11 +74,8 @@ CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_MII=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_MACB=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_DAVICOM_PHY=y
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
CONFIG_AT76C50X_USB=m
@@ -131,7 +129,6 @@ CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
CONFIG_FB=y
CONFIG_FB_ATMEL=y
CONFIG_FB_UDL=m
diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rl_defconfig
index 75621e4d03f..ad562ee6420 100644
--- a/arch/arm/configs/at91sam9rlek_defconfig
+++ b/arch/arm/configs/at91sam9rl_defconfig
@@ -23,8 +23,6 @@ CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -35,7 +33,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
CONFIG_BLK_DEV_RAM_SIZE=24576
-CONFIG_ATMEL_SSC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
@@ -62,13 +59,11 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_ATMEL=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_MMC=y
CONFIG_MMC_AT91=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477346e..d95763d5f0d 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22af03..fd996bb1302 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 11a4192197c..a22e9307906 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y
CONFIG_ARCH_IMX_V4_V5=y
CONFIG_ARCH_MX1ADS=y
CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
CONFIG_MACH_MX21ADS=y
CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
CONFIG_MACH_MX27ADS=y
CONFIG_MACH_PCM038=y
CONFIG_MACH_CPUIMX27=y
@@ -67,22 +68,20 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
# CONFIG_MTD_CFI_I2 is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
CONFIG_MTD_UBI=y
CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
CONFIG_DM9000=y
+CONFIG_SMC91X=y
CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +99,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
CONFIG_SPI=y
CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
CONFIG_W1=y
CONFIG_W1_MASTER_MXC=y
CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +139,7 @@ CONFIG_MMC=y
CONFIG_MMC_MXC=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_MC13783=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d4e9a..443675d317e 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7b63462b349..dde2a1af7b3 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,13 +48,6 @@ CONFIG_MACH_SX1=y
CONFIG_MACH_NOKIA770=y
CONFIG_MACH_AMS_DELTA=y
CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
-CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
# CONFIG_ARM_THUMB is not set
CONFIG_PCCARD=y
CONFIG_OMAP_CF=y
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
deleted file mode 100644
index c75c9fcede5..00000000000
--- a/arch/arm/configs/pcontrol_g20_defconfig
+++ /dev/null
@@ -1,175 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="/opt/arm-2010q1/bin/arm-none-linux-gnueabi-"
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_TREE_PREEMPT_RCU=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_NAMESPACES=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_ARCH_AT91=y
-CONFIG_ARCH_AT91SAM9G20=y
-CONFIG_MACH_PCONTROL_G20=y
-CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200 mem=128M mtdparts=atmel_nand:128k(bootstrap)ro,256k(uboot)ro,128k(env1)ro,128k(env2)ro,2M(linux),-(root) root=/dev/mmcblk0p1 rootwait rw"
-CONFIG_VFP=y
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_VLAN_8021Q=y
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FW_LOADER is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ATMEL=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_AT24=m
-CONFIG_SCSI=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_MACVLAN=m
-CONFIG_TUN=m
-CONFIG_SMSC_PHY=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MACB=y
-CONFIG_SMSC911X=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_MPPE=m
-CONFIG_INPUT_POLLDEV=y
-CONFIG_INPUT_SPARSEKMAP=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=m
-CONFIG_KEYBOARD_MATRIX=m
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=m
-CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
-# CONFIG_SERIO is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_ATMEL=y
-CONFIG_SERIAL_ATMEL_CONSOLE=y
-CONFIG_SERIAL_MAX3100=m
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_R3964=m
-CONFIG_I2C=m
-CONFIG_I2C_CHARDEV=m
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_I2C_GPIO=m
-CONFIG_SPI=y
-CONFIG_SPI_ATMEL=m
-CONFIG_SPI_SPIDEV=m
-CONFIG_GPIO_SYSFS=y
-CONFIG_W1=m
-CONFIG_W1_MASTER_GPIO=m
-CONFIG_W1_SLAVE_DS2431=m
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_AT91SAM9X_WATCHDOG=y
-# CONFIG_MFD_SUPPORT is not set
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=m
-CONFIG_USB_LIBUSUAL=y
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_GADGET=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_G_HID=m
-CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
-CONFIG_MMC_ATMELMCI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_AT91SAM9=y
-CONFIG_AUXDISPLAY=y
-CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
-CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
-CONFIG_IIO=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 195729760ae..fd5d3041d71 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -9,9 +9,8 @@ CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
-# CONFIG_SYSCTL_SYSCALL is not set
# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -20,6 +19,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
CONFIG_MACH_HARMONY=y
CONFIG_MACH_KAEN=y
CONFIG_MACH_PAZ00=y
@@ -78,14 +79,12 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
CONFIG_R8169=y
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
CONFIG_USB_PEGASUS=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 4a5a12681be..374000ec4e4 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U300=y
CONFIG_MACH_U300=y
CONFIG_MACH_U300_BS335=y
-CONFIG_MACH_U300_DUAL_RAM=y
-CONFIG_U300_DEBUG=y
CONFIG_MACH_U300_SPIDUMMY=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -26,19 +24,21 @@ CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
CONFIG_CPU_IDLE=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
# CONFIG_SUSPEND is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_MISC_DEVICES is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_HWMON is not set
@@ -51,6 +51,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
@@ -65,10 +66,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 97d31a4663d..2d7b6e7b727 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -10,7 +10,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_U8500=y
CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
-CONFIG_MACH_U8500=y
+CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
CONFIG_MACH_U5500=y
CONFIG_NO_HZ=y
@@ -24,6 +24,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -41,11 +42,8 @@ CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -72,15 +70,12 @@ CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
-# CONFIG_HWMON is not set
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
+CONFIG_AB5500_CORE=y
CONFIG_AB8500_CORE=y
CONFIG_REGULATOR_AB8500=y
# CONFIG_HID_SUPPORT is not set
-CONFIG_USB_MUSB_HDRC=y
-CONFIG_USB_GADGET_MUSB_HDRC=y
-CONFIG_MUSB_PIO_ONLY=y
CONFIG_USB_GADGET=y
CONFIG_AB8500_USB=y
CONFIG_MMC=y
@@ -97,6 +92,7 @@ CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_HSEM_U8500=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad3f4e..547a3c1e59d 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 29035e86a59..b6e65dedfd7 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -187,6 +187,17 @@
#endif
/*
+ * Instruction barrier
+ */
+ .macro instr_sync
+#if __LINUX_ARM_ARCH__ >= 7
+ isb
+#elif __LINUX_ARM_ARCH__ == 6
+ mcr p15, 0, r0, c7, c5, 4
+#endif
+ .endm
+
+/*
* SMP data memory barrier
*/
.macro smp_dmb mode
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 9abe7a07d5a..fac79dceb73 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -32,7 +32,6 @@
#define __BUG(__file, __line, __value) \
do { \
- BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \
asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 00000000000..a0ada3ea435
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,179 @@
+#ifndef __ASMARM_CTI_H
+#define __ASMARM_CTI_H
+
+#include <asm/io.h>
+
+/* The registers' definition is from section 3.2 of
+ * Embedded Cross Trigger Revision: r0p0
+ */
+#define CTICONTROL 0x000
+#define CTISTATUS 0x004
+#define CTILOCK 0x008
+#define CTIPROTECTION 0x00C
+#define CTIINTACK 0x010
+#define CTIAPPSET 0x014
+#define CTIAPPCLEAR 0x018
+#define CTIAPPPULSE 0x01c
+#define CTIINEN 0x020
+#define CTIOUTEN 0x0A0
+#define CTITRIGINSTATUS 0x130
+#define CTITRIGOUTSTATUS 0x134
+#define CTICHINSTATUS 0x138
+#define CTICHOUTSTATUS 0x13c
+#define CTIPERIPHID0 0xFE0
+#define CTIPERIPHID1 0xFE4
+#define CTIPERIPHID2 0xFE8
+#define CTIPERIPHID3 0xFEC
+#define CTIPCELLID0 0xFF0
+#define CTIPCELLID1 0xFF4
+#define CTIPCELLID2 0xFF8
+#define CTIPCELLID3 0xFFC
+
+/* The below are from section 3.6.4 of
+ * CoreSight v1.0 Architecture Specification
+ */
+#define LOCKACCESS 0xFB0
+#define LOCKSTATUS 0xFB4
+
+/* write this value to LOCKACCESS will unlock the module, and
+ * other value will lock the module
+ */
+#define LOCKCODE 0xC5ACCE55
+
+/**
+ * struct cti - cross trigger interface struct
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out_for_irq: triger out number which will cause
+ * the @irq happen
+ *
+ * cti struct used to operate cti registers.
+ */
+struct cti {
+ void __iomem *base;
+ int irq;
+ int trig_out_for_irq;
+};
+
+/**
+ * cti_init - initialize the cti instance
+ * @cti: cti instance
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out: triger out number which will cause
+ * the @irq happen
+ *
+ * called by machine code to pass the board dependent
+ * @base, @irq and @trig_out to cti.
+ */
+static inline void cti_init(struct cti *cti,
+ void __iomem *base, int irq, int trig_out)
+{
+ cti->base = base;
+ cti->irq = irq;
+ cti->trig_out_for_irq = trig_out;
+}
+
+/**
+ * cti_map_trigger - use the @chan to map @trig_in to @trig_out
+ * @cti: cti instance
+ * @trig_in: trigger in number
+ * @trig_out: trigger out number
+ * @channel: channel number
+ *
+ * This function maps one trigger in of @trig_in to one trigger
+ * out of @trig_out using the channel @chan.
+ */
+static inline void cti_map_trigger(struct cti *cti,
+ int trig_in, int trig_out, int chan)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + CTIINEN + trig_in * 4);
+ val |= BIT(chan);
+ __raw_writel(val, base + CTIINEN + trig_in * 4);
+
+ val = __raw_readl(base + CTIOUTEN + trig_out * 4);
+ val |= BIT(chan);
+ __raw_writel(val, base + CTIOUTEN + trig_out * 4);
+}
+
+/**
+ * cti_enable - enable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_enable(struct cti *cti)
+{
+ __raw_writel(0x1, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_disable - disable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_disable(struct cti *cti)
+{
+ __raw_writel(0, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_irq_ack - clear the cti irq
+ * @cti: cti instance
+ *
+ * clear the cti irq
+ */
+static inline void cti_irq_ack(struct cti *cti)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + CTIINTACK);
+ val |= BIT(cti->trig_out_for_irq);
+ __raw_writel(val, base + CTIINTACK);
+}
+
+/**
+ * cti_unlock - unlock cti module
+ * @cti: cti instance
+ *
+ * unlock the cti module, or else any writes to the cti
+ * module is not allowed.
+ */
+static inline void cti_unlock(struct cti *cti)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + LOCKSTATUS);
+
+ if (val & 1) {
+ val = LOCKCODE;
+ __raw_writel(val, base + LOCKACCESS);
+ }
+}
+
+/**
+ * cti_lock - lock cti module
+ * @cti: cti instance
+ *
+ * lock the cti module, so any writes to the cti
+ * module will be not allowed.
+ */
+static inline void cti_lock(struct cti *cti)
+{
+ void __iomem *base = cti->base;
+ unsigned long val;
+
+ val = __raw_readl(base + LOCKSTATUS);
+
+ if (!(val & 1)) {
+ val = ~LOCKCODE;
+ __raw_writel(val, base + LOCKACCESS);
+ }
+}
+#endif
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644
index 00000000000..0df7a2c1fc3
--- /dev/null
+++ b/arch/arm/include/asm/edac.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing. It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+ unsigned int *virt_addr = va;
+ unsigned int temp, temp2;
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+ /* Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %1, %0, [%2]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+ : "=&r"(temp), "=&r"(temp2)
+ : "r"(virt_addr)
+ : "cc");
+ }
+#endif
+}
+
+#endif
diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
deleted file mode 100644
index 3ceb85e4385..00000000000
--- a/arch/arm/include/asm/entry-macro-vic2.S
+++ /dev/null
@@ -1,57 +0,0 @@
-/* arch/arm/include/asm/entry-macro-vic2.S
- *
- * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Low-level IRQ helper macros for a device with two VICs
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
-*/
-
-/* This should be included from <mach/entry-macro.S> with the necessary
- * defines for virtual addresses and IRQ bases for the two vics.
- *
- * The code needs the following defined:
- * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
- * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
- * VA_VIC0 Virtual address of VIC0
- * VA_VIC1 Virtual address of VIC1
- *
- * Note, code assumes VIC0's virtual address is an ARM immediate constant
- * away from VIC1.
-*/
-
-#include <asm/hardware/vic.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =VA_VIC0
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, #IRQ_VIC0_BASE + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
- addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
- .endm
diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h
index 11ad0bfbb0a..7151753b098 100644
--- a/arch/arm/include/asm/gpio.h
+++ b/arch/arm/include/asm/gpio.h
@@ -1,6 +1,10 @@
#ifndef _ARCH_ARM_GPIO_H
#define _ARCH_ARM_GPIO_H
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIO CONFIG_ARCH_NR_GPIO
+#endif
+
/* not all ARM platforms necessarily support this API ... */
#include <mach/gpio.h>
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index ddf07a92a6c..436e60b2cf7 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -27,23 +27,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu smp_irq_stat_cpu
-#if NR_IRQS > 512
-#define HARDIRQ_BITS 10
-#elif NR_IRQS > 256
-#define HARDIRQ_BITS 9
-#else
-#define HARDIRQ_BITS 8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have space
- * for potentially all IRQ sources in the system nesting
- * on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S
deleted file mode 100644
index 74ebc803904..00000000000
--- a/arch/arm/include/asm/hardware/entry-macro-gic.S
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-gic.S
- *
- * Low-level IRQ helper macros for GIC
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/hardware/gic.h>
-
-#ifndef HAVE_GET_IRQNR_PREAMBLE
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =gic_cpu_base_addr
- ldr \base, [\base]
- .endm
-#endif
-
-/*
- * The interrupt numbering scheme is defined in the
- * interrupt controller spec. To wit:
- *
- * Interrupts 0-15 are IPI
- * 16-31 are local. We allow 30 to be used for the watchdog.
- * 32-1020 are global
- * 1021-1022 are reserved
- * 1023 is "spurious" (no interrupt)
- *
- * A simple read from the controller will tell us the number of the highest
- * priority enabled interrupt. We then just need to check whether it is in the
- * valid range for an IRQ (30-1020 inclusive).
- */
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- ldr \irqstat, [\base, #GIC_CPU_INTACK]
- /* bits 12-10 = src CPU, 9-0 = int # */
-
- ldr \tmp, =1021
- bic \irqnr, \irqstat, #0x1c00
- cmp \irqnr, #15
- cmpcc \irqnr, \irqnr
- cmpne \irqnr, \tmp
- cmpcs \irqnr, \irqnr
- .endm
-
-/* We assume that irqstat (the raw value of the IRQ acknowledge
- * register) is preserved from the macro above.
- * If there is an IPI, we immediately signal end of interrupt on the
- * controller, since this requires the original irqstat value which
- * we won't easily be able to recreate later.
- */
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- bic \irqnr, \irqstat, #0x1c00
- cmp \irqnr, #16
- strcc \irqstat, [\base, #GIC_CPU_EOI]
- cmpcs \irqnr, \irqnr
- .endm
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3e91f22046f..4bdfe001869 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -36,30 +36,22 @@
#include <linux/irqdomain.h>
struct device_node;
-extern void __iomem *gic_cpu_base_addr;
extern struct irq_chip gic_arch_extn;
-void gic_init(unsigned int, int, void __iomem *, void __iomem *);
+void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
+ u32 offset);
int gic_of_init(struct device_node *node, struct device_node *parent);
void gic_secondary_init(unsigned int);
+void gic_handle_irq(struct pt_regs *regs);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
-struct gic_chip_data {
- void __iomem *dist_base;
- void __iomem *cpu_base;
-#ifdef CONFIG_CPU_PM
- u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
- u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
- u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
- u32 __percpu *saved_ppi_enable;
- u32 __percpu *saved_ppi_conf;
-#endif
-#ifdef CONFIG_IRQ_DOMAIN
- struct irq_domain domain;
-#endif
- unsigned int gic_irqs;
-};
+static inline void gic_init(unsigned int nr, int start,
+ void __iomem *dist , void __iomem *cpu)
+{
+ gic_init_bases(nr, start, dist, cpu, 0);
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 5daea2961d4..077c32326c6 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -234,6 +234,7 @@ extern int iop3xx_get_init_atu(void);
void iop3xx_map_io(void);
void iop_init_cp6_handler(void);
void iop_init_time(unsigned long tickrate);
+void iop3xx_restart(char, const char *);
static inline u32 read_tmr0(void)
{
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
index 5d72550a809..f42ebd61959 100644
--- a/arch/arm/include/asm/hardware/vic.h
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -41,7 +41,15 @@
#define VIC_PL192_VECT_ADDR 0xF00
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct device_node;
+struct pt_regs;
+
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-#endif
+int vic_of_init(struct device_node *node, struct device_node *parent);
+void vic_handle_irq(struct pt_regs *regs);
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
new file mode 100644
index 00000000000..bf863edb517
--- /dev/null
+++ b/arch/arm/include/asm/idmap.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_IDMAP_H
+#define __ASM_IDMAP_H
+
+#include <linux/compiler.h>
+#include <asm/pgtable.h>
+
+/* Tag a function as requiring to be executed via an identity mapping. */
+#define __idmap __section(.idmap.text) noinline notrace
+
+extern pgd_t *idmap_pgd;
+
+void setup_mm_for_reboot(void);
+
+#endif /* __ASM_IDMAP_H */
diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h
index 97683975f7d..84c7e51cb6d 100644
--- a/arch/arm/include/asm/ipcbuf.h
+++ b/arch/arm/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __ASMARM_IPCBUF_H
-#define __ASMARM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for arm architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __ASMARM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 2b0efc3104a..d7692cafde7 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -19,7 +19,7 @@ struct machine_desc {
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long atag_offset; /* tagged list (relative) */
- const char **dt_compat; /* array of device tree
+ const char *const *dt_compat; /* array of device tree
* 'compatible' strings */
unsigned int nr_irqs; /* number of IRQs */
@@ -31,10 +31,10 @@ struct machine_desc {
unsigned int video_start; /* start of video RAM */
unsigned int video_end; /* end of video RAM */
- unsigned int reserve_lp0 :1; /* never has lp0 */
- unsigned int reserve_lp1 :1; /* never has lp1 */
- unsigned int reserve_lp2 :1; /* never has lp2 */
- unsigned int soft_reboot :1; /* soft reboot */
+ unsigned char reserve_lp0 :1; /* never has lp0 */
+ unsigned char reserve_lp1 :1; /* never has lp1 */
+ unsigned char reserve_lp2 :1; /* never has lp2 */
+ char restart_mode; /* default restart mode */
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
@@ -46,6 +46,7 @@ struct machine_desc {
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
+ void (*restart)(char, const char *);
};
/*
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index d5adaae5ee2..f73c908b7fa 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -10,8 +10,6 @@
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-#include <linux/sysdev.h>
-
/*
* This is our kernel timer structure.
*
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
new file mode 100644
index 00000000000..c0efdd60966
--- /dev/null
+++ b/arch/arm/include/asm/opcodes.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL 0
+#define ARM_OPCODE_CONDTEST_PASS 1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ca94653f1ec..97b440c25c5 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,7 +151,11 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-types.h>
+#else
#include <asm/pgtable-2level-types.h>
+#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 0f8e3827a89..99cfe360798 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -32,7 +32,4 @@ enum arm_perf_pmu_ids {
extern enum arm_perf_pmu_ids
armpmu_get_pmu_id(void);
-extern int
-armpmu_get_max_events(void);
-
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 3e08fd3fbb6..943504f53f5 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -25,12 +25,34 @@
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
+#ifdef CONFIG_ARM_LPAE
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ free_page((unsigned long)pmd);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+}
+
+#else /* !CONFIG_ARM_LPAE */
+
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
-#define pgd_populate(mm,pmd,pte) BUG()
+#define pud_populate(mm,pmd,pte) BUG()
+
+#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
{
pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
pmdp[0] = __pmd(pmdval);
+#ifndef CONFIG_ARM_LPAE
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
+#endif
flush_pmd_entry(pmdp);
}
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 470457e1cfc..2317a71c8f8 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,4 +140,45 @@
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+#ifndef __ASSEMBLY__
+
+/*
+ * The "pud_xxx()" functions here are trivial when the pmd is folded into
+ * the pud: the pud entry is never bad, always exists, and can't be set or
+ * cleared.
+ */
+#define pud_none(pud) (0)
+#define pud_bad(pud) (0)
+#define pud_present(pud) (1)
+#define pud_clear(pudp) do { } while (0)
+#define set_pud(pud,pudp) do { } while (0)
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud;
+}
+
+#define pmd_bad(pmd) (pmd_val(pmd) & 2)
+
+#define copy_pmd(pmdpd,pmdps) \
+ do { \
+ pmdpd[0] = pmdps[0]; \
+ pmdpd[1] = pmdps[1]; \
+ flush_pmd_entry(pmdpd); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ pmdp[0] = __pmd(0); \
+ pmdp[1] = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+/* we don't need complex calculations here as the pmd is folded into the pgd */
+#define pmd_addr_end(addr,end) (end)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
new file mode 100644
index 00000000000..d7952824c5c
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-hwdef.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
+#define _ASM_PGTABLE_3LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1/2 descriptor
+ * - common
+ */
+#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_BIT4 (_AT(pmdval_t, 0))
+#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+
+/*
+ * - section
+ */
+#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
+#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
+#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0) << 2) /* strongly ordered */
+#define PMD_SECT_BUFFERED (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */
+#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
+#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
+#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+
+/*
+ * + Level 3 descriptor (PTE)
+ */
+#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
+#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
+#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
+#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
+
+/*
+ * 40-bit physical address supported.
+ */
+#define PHYS_MASK_SHIFT (40)
+#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h
new file mode 100644
index 00000000000..921aa30259c
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level-types.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/include/asm/pgtable-3level-types.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
+#define _ASM_PGTABLE_3LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pgdval_t pgd; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pgdval_t pgd_t;
+typedef pteval_t pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
new file mode 100644
index 00000000000..759af70f9a0
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -0,0 +1,155 @@
+/*
+ * arch/arm/include/asm/pgtable-3level.h
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_3LEVEL_H
+#define _ASM_PGTABLE_3LEVEL_H
+
+/*
+ * With LPAE, there are 3 levels of page tables. Each level has 512 entries of
+ * 8 bytes each, occupying a 4K page. The first level table covers a range of
+ * 512GB, each entry representing 1GB. Since we are limited to 4GB input
+ * address range, only 4 entries in the PGD are used.
+ *
+ * There are enough spare bits in a page table entry for the kernel specific
+ * state.
+ */
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 512
+#define PTRS_PER_PGD 4
+
+#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF (0)
+#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
+
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map.
+ */
+#define PGDIR_SHIFT 30
+
+/*
+ * PMD_SHIFT determines the size a middle-level page table entry can map.
+ */
+#define PMD_SHIFT 21
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 21
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
+
+/*
+ * "Linux" PTE definitions for LPAE.
+ *
+ * These bits overlap with the hardware bits but the naming is preserved for
+ * consistency with the classic page table format.
+ */
+#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
+#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
+#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
+#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
+#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
+#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
+#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
+#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
+
+/*
+ * To be used in assembly code with the upper page attributes.
+ */
+#define L_PTE_XN_HIGH (1 << (54 - 32))
+#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
+
+/*
+ * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
+ */
+#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
+#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
+#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
+#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
+#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
+#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
+#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
+
+/*
+ * Software PGD flags.
+ */
+#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
+
+#ifndef __ASSEMBLY__
+
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!(pud_val(pud) & 2))
+#define pud_present(pud) (pud_val(pud))
+
+#define pud_clear(pudp) \
+ do { \
+ *pudp = __pud(0); \
+ clean_pmd_entry(pudp); \
+ } while (0)
+
+#define set_pud(pudp, pud) \
+ do { \
+ *pudp = pud; \
+ flush_pmd_entry(pudp); \
+ } while (0)
+
+static inline pmd_t *pud_page_vaddr(pud_t pud)
+{
+ return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+/* Find an entry in the second-level page table.. */
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
+
+#define copy_pmd(pmdpd,pmdps) \
+ do { \
+ *pmdpd = *pmdps; \
+ flush_pmd_entry(pmdpd); \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ *pmdp = __pmd(0); \
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index 183111164ce..8426229ba29 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -10,6 +10,10 @@
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level-hwdef.h>
+#else
#include <asm/pgtable-2level-hwdef.h>
+#endif
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9451dce3a55..f66626d71e7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -11,20 +11,24 @@
#define _ASMARM_PGTABLE_H
#include <linux/const.h>
-#include <asm-generic/4level-fixup.h>
#include <asm/proc-fns.h>
#ifndef CONFIG_MMU
+#include <asm-generic/4level-fixup.h>
#include "pgtable-nommu.h"
#else
+#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
-#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
+#ifdef CONFIG_ARM_LPAE
+#include <asm/pgtable-3level.h>
+#else
#include <asm/pgtable-2level.h>
+#endif
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -33,15 +37,10 @@
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
- *
- * Note that platforms may override VMALLOC_START, but they must provide
- * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
- * which may not overlap IO space.
*/
-#ifndef VMALLOC_START
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#endif
+#define VMALLOC_END 0xff000000UL
#define LIBRARY_TEXT_START 0x0c000000
@@ -163,39 +162,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-#define pgd_none(pgd) (0)
-#define pgd_bad(pgd) (0)
-#define pgd_present(pgd) (1)
-#define pgd_clear(pgdp) do { } while (0)
-#define set_pgd(pgd,pgdp) do { } while (0)
-#define set_pud(pud,pudp) do { } while (0)
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, addr) ((pmd_t *)(dir))
-
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
-#define pmd_bad(pmd) (pmd_val(pmd) & 2)
-
-#define copy_pmd(pmdpd,pmdps) \
- do { \
- pmdpd[0] = pmdps[0]; \
- pmdpd[1] = pmdps[1]; \
- flush_pmd_entry(pmdpd); \
- } while (0)
-
-#define pmd_clear(pmdp) \
- do { \
- pmdp[0] = __pmd(0); \
- pmdp[1] = __pmd(0); \
- clean_pmd_entry(pmdp); \
- } while (0)
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
@@ -204,10 +172,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-/* we don't need complex calculations here as the pmd is folded into the pgd */
-#define pmd_addr_end(addr,end) (end)
-
-
#ifndef CONFIG_HIGHPTE
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
#define __pte_unmap(pte) do { } while (0)
@@ -229,7 +193,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
-#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#if __LINUX_ARM_ARCH__ < 6
@@ -336,6 +299,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
@@ -346,9 +310,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgtable_cache_init() do { } while (0)
-void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
-void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
-
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 71d99b83cdb..b5a5be2536c 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -27,13 +27,22 @@ enum arm_pmu_type {
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
- * @handle_irq: an optional handler which will be called from the interrupt and
- * passed the address of the low level handler, and can be used to implement
- * any platform specific handling before or after calling it.
+ * @handle_irq: an optional handler which will be called from the
+ * interrupt and passed the address of the low level handler,
+ * and can be used to implement any platform specific handling
+ * before or after calling it.
+ * @enable_irq: an optional handler which will be called after
+ * request_irq and be used to handle some platform specific
+ * irq enablement
+ * @disable_irq: an optional handler which will be called before
+ * free_irq and be used to handle some platform specific
+ * irq disablement
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
+ void (*enable_irq)(int irq);
+ void (*disable_irq)(int irq);
};
#ifdef CONFIG_CPU_HAS_PMU
@@ -55,16 +64,6 @@ reserve_pmu(enum arm_pmu_type type);
extern void
release_pmu(enum arm_pmu_type type);
-/**
- * init_pmu() - Initialise the PMU.
- *
- * Initialise the system ready for PMU enabling. This should typically set the
- * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
- * the actual hardware initialisation.
- */
-extern int
-init_pmu(enum arm_pmu_type type);
-
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 9e92cb205e6..f3628fb3d2b 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -65,7 +65,11 @@ extern struct processor {
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
+#ifdef CONFIG_ARM_LPAE
+ void (*set_pte_ext)(pte_t *ptep, pte_t pte);
+#else
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
/* Suspend/resume */
unsigned int suspend_size;
@@ -79,7 +83,11 @@ extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+#ifdef CONFIG_ARM_LPAE
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
@@ -107,6 +115,18 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+#ifdef CONFIG_ARM_LPAE
+#define cpu_get_pgd() \
+ ({ \
+ unsigned long pg, pg2; \
+ __asm__("mrrc p15, 0, %0, %1, c2" \
+ : "=r" (pg), "=r" (pg2) \
+ : \
+ : "cc"); \
+ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
+ (pgd_t *)phys_to_virt(pg); \
+ })
+#else
#define cpu_get_pgd() \
({ \
unsigned long pg; \
@@ -115,6 +135,7 @@ extern void cpu_resume(void);
pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \
})
+#endif
#endif
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b2d9df5667a..ce280b8d613 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -123,6 +123,8 @@ static inline void prefetch(const void *ptr)
#endif
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
#endif
#endif /* __ASM_ARM_PROCESSOR_H */
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 6f65ca86a5e..ee036330791 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -13,7 +13,6 @@
#ifdef CONFIG_OF
-#include <asm/setup.h>
#include <asm/irq.h>
extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index c8e6ddf3e86..e3f75726343 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -8,113 +8,7 @@
#ifndef ASM_SCHED_CLOCK
#define ASM_SCHED_CLOCK
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct clock_data {
- u64 epoch_ns;
- u32 epoch_cyc;
- u32 epoch_cyc_copy;
- u32 mult;
- u32 shift;
-};
-
-#define DEFINE_CLOCK_DATA(name) struct clock_data name
-
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
-{
- return (cyc * mult) >> shift;
-}
-
-/*
- * Atomically update the sched_clock epoch. Your update callback will
- * be called from a timer before the counter wraps - read the current
- * counter value, and call this function to safely move the epochs
- * forward. Only use this from the update callback.
- */
-static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
-{
- unsigned long flags;
- u64 ns = cd->epoch_ns +
- cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
-
- /*
- * Write epoch_cyc and epoch_ns in a way that the update is
- * detectable in cyc_to_fixed_sched_clock().
- */
- raw_local_irq_save(flags);
- cd->epoch_cyc = cyc;
- smp_wmb();
- cd->epoch_ns = ns;
- smp_wmb();
- cd->epoch_cyc_copy = cyc;
- raw_local_irq_restore(flags);
-}
-
-/*
- * If your clock rate is known at compile time, using this will allow
- * you to optimize the mult/shift loads away. This is paired with
- * init_fixed_sched_clock() to ensure that your mult/shift are correct.
- */
-static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
- u32 cyc, u32 mask, u32 mult, u32 shift)
-{
- u64 epoch_ns;
- u32 epoch_cyc;
-
- /*
- * Load the epoch_cyc and epoch_ns atomically. We do this by
- * ensuring that we always write epoch_cyc, epoch_ns and
- * epoch_cyc_copy in strict order, and read them in strict order.
- * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
- * the middle of an update, and we should repeat the load.
- */
- do {
- epoch_cyc = cd->epoch_cyc;
- smp_rmb();
- epoch_ns = cd->epoch_ns;
- smp_rmb();
- } while (epoch_cyc != cd->epoch_cyc_copy);
-
- return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
-}
-
-/*
- * Otherwise, you need to use this, which will obtain the mult/shift
- * from the clock_data structure. Use init_sched_clock() with this.
- */
-static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
- u32 cyc, u32 mask)
-{
- return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
-}
-
-/*
- * Initialize the clock data - calculate the appropriate multiplier
- * and shift. Also setup a timer to ensure that the epoch is refreshed
- * at the appropriate time interval, which will call your update
- * handler.
- */
-void init_sched_clock(struct clock_data *, void (*)(void),
- unsigned int, unsigned long);
-
-/*
- * Use this initialization function rather than init_sched_clock() if
- * you're using cyc_to_fixed_sched_clock, which will warn if your
- * constants are incorrect.
- */
-static inline void init_fixed_sched_clock(struct clock_data *cd,
- void (*update)(void), unsigned int bits, unsigned long rate,
- u32 mult, u32 shift)
-{
- init_sched_clock(cd, update, bits, rate);
- if (cd->mult != mult || cd->shift != shift) {
- pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
- "sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
- mult, shift, cd->mult, cd->shift);
- }
-}
-
extern void sched_clock_postinit(void);
+extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 915696dd9c7..23ebc0c82a3 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -192,11 +192,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn }
/*
* Memory map description
*/
-#ifdef CONFIG_ARCH_EP93XX
-# define NR_BANKS 16
-#else
-# define NR_BANKS 8
-#endif
+#define NR_BANKS CONFIG_ARM_NR_BANKS
struct membank {
phys_addr_t start;
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h
index 90ffd04b8e7..dec6f9afb3c 100644
--- a/arch/arm/include/asm/socket.h
+++ b/arch/arm/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index 9997ad20eff..32ee164a2f6 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -24,12 +24,13 @@
#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u32 __arch_swahb32(__u32 x)
{
__asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
return x;
}
-#define __arch_swab16 __arch_swab16
+#define __arch_swahb32 __arch_swahb32
+#define __arch_swab16(x) ((__u16)__arch_swahb32(x))
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 984014b9264..e4c96cc6ec0 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -80,6 +80,14 @@ struct siginfo;
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
unsigned long err, unsigned long trap);
+#ifdef CONFIG_ARM_LPAE
+#define FAULT_CODE_ALIGNMENT 33
+#define FAULT_CODE_DEBUG 34
+#else
+#define FAULT_CODE_ALIGNMENT 1
+#define FAULT_CODE_DEBUG 2
+#endif
+
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
@@ -100,7 +108,7 @@ extern void __show_regs(struct pt_regs *);
extern int __pure cpu_architecture(void);
extern void cpu_init(void);
-void arm_machine_restart(char mode, const char *cmd);
+void soft_restart(unsigned long);
extern void (*arm_pm_restart)(char str, const char *cmd);
#define UDBG_UNDEFINED (1 << 0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8dae06..0f30c3a78fc 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 265f908c4a6..5d3ed7e3856 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -202,8 +202,18 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb_remove_page(tlb, pte);
}
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ unsigned long addr)
+{
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+ tlb_remove_page(tlb, virt_to_page(pmdp));
+#endif
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
+#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
+#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index a7e457ed27c..58b8b84adcd 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
#else
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
index 48192ac3a23..28beab917ff 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/asm/types.h
@@ -3,12 +3,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index a5edf421005..d1c3f3a71c9 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -30,14 +30,15 @@ enum unwind_reason_code {
};
struct unwind_idx {
- unsigned long addr;
+ unsigned long addr_offset;
unsigned long insn;
};
struct unwind_table {
struct list_head list;
- struct unwind_idx *start;
- struct unwind_idx *stop;
+ const struct unwind_idx *start;
+ const struct unwind_idx *origin;
+ const struct unwind_idx *stop;
unsigned long begin_addr;
unsigned long end_addr;
};
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
extern void unwind_table_del(struct unwind_table *tab);
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
- return 0;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 16eed6aebfa..43b740d0e37 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -13,7 +13,7 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
-obj-y := elf.o entry-armv.o entry-common.o irq.o \
+obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9ad50c4208a..3a456c6c700 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -36,12 +36,11 @@
#ifdef CONFIG_MULTI_IRQ_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
- ldr r1, [r1]
adr lr, BSYM(9997f)
- teq r1, #0
- movne pc, r1
-#endif
+ ldr pc, [r1]
+#else
arch_irq_handler_default
+#endif
9997:
.endm
@@ -497,7 +496,7 @@ ENDPROC(__und_usr)
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b
.long 3b, 4b
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 08c82fd844a..14e277d2ff9 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -39,8 +39,14 @@
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
+#ifdef CONFIG_ARM_LPAE
+ /* LPAE requires an additional page for the PGD */
+#define PG_DIR_SIZE 0x5000
+#define PMD_ORDER 3
+#else
#define PG_DIR_SIZE 0x4000
#define PMD_ORDER 2
+#endif
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@@ -164,17 +170,36 @@ __create_page_tables:
teq r0, r6
bne 1b
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Build the PGD table (first level) to point to the PMD table. A PGD
+ * entry is 64-bit wide.
+ */
+ mov r0, r4
+ add r3, r4, #0x1000 @ first PMD table address
+ orr r3, r3, #3 @ PGD block type
+ mov r6, #4 @ PTRS_PER_PGD
+ mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
+1: str r3, [r0], #4 @ set bottom PGD entry bits
+ str r7, [r0], #4 @ set top PGD entry bits
+ add r3, r3, #0x1000 @ next PMD table
+ subs r6, r6, #1
+ bne 1b
+
+ add r4, r4, #0x1000 @ point to the PMD tables
+#endif
+
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
/*
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
- adr r0, __enable_mmu_loc
+ adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset
- add r5, r5, r0 @ phys __enable_mmu
- add r6, r6, r0 @ phys __enable_mmu_end
+ add r5, r5, r0 @ phys __turn_mmu_on
+ add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
@@ -219,8 +244,8 @@ __create_page_tables:
#endif
/*
- * Then map boot params address in r2 or
- * the first 1MB of ram if boot params address is not specified.
+ * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
+ * of ram if boot params address is not specified.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
@@ -251,7 +276,15 @@ __create_page_tables:
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
+#ifdef CONFIG_ARM_LPAE
+ mov r7, #1 << (54 - 32) @ XN
+#else
+ orr r3, r3, #PMD_SECT_XN
+#endif
1: str r3, [r0], #4
+#ifdef CONFIG_ARM_LPAE
+ str r7, [r0], #4
+#endif
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
blo 1b
@@ -283,14 +316,17 @@ __create_page_tables:
str r3, [r0]
#endif
#endif
+#ifdef CONFIG_ARM_LPAE
+ sub r4, r4, #0x1000 @ point to the PGD table
+#endif
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg
.align
-__enable_mmu_loc:
+__turn_mmu_on_loc:
.long .
- .long __enable_mmu
- .long __enable_mmu_end
+ .long __turn_mmu_on
+ .long __turn_mmu_on_end
#if defined(CONFIG_SMP)
__CPUINIT
@@ -374,12 +410,17 @@ __enable_mmu:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
+#ifdef CONFIG_ARM_LPAE
+ mov r5, #0
+ mcrr p15, 0, r4, r5, c2 @ load TTBR0
+#else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
+#endif
b __turn_mmu_on
ENDPROC(__enable_mmu)
@@ -398,15 +439,19 @@ ENDPROC(__enable_mmu)
* other registers depend on the function called upon completion
*/
.align 5
-__turn_mmu_on:
+ .pushsection .idmap.text, "ax"
+ENTRY(__turn_mmu_on)
mov r0, r0
+ instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
+ instr_sync
mov r3, r3
mov r3, r13
mov pc, r3
-__enable_mmu_end:
+__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
+ .popsection
#ifdef CONFIG_SMP_ON_UP
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 814a52a9dc3..d6a95ef9131 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1016,10 +1016,10 @@ static int __init arch_hw_breakpoint_init(void)
}
/* Register debug fault handler. */
- hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
- "watchpoint debug exception");
- hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
- "breakpoint debug exception");
+ hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+ TRAP_HWBKPT, "watchpoint debug exception");
+ hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+ TRAP_HWBKPT, "breakpoint debug exception");
/* Register hotplug notifier. */
register_cpu_notifier(&dbg_reset_nb);
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 9fe8910308a..8a30c89da70 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
static const union decode_item arm_cccc_0001_____1001_table[] = {
/* Synchronization primitives */
+#if __LINUX_ARM_ARCH__ < 6
+ /* Deprecated on ARMv6 and may be UNDEFINED on v7 */
/* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
-
+#endif
/* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
/* And unallocated instructions... */
DECODE_END
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index fc82de8bdcc..ba32b393b3f 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Synchronization primitives")
- /*
- * Use hard coded constants for SWP instructions to avoid warnings
- * about deprecated instructions.
- */
- TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]")
- TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
+ TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+ TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
+ TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
+#endif
TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
- TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+ TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
+ TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
+#endif
TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
- TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"")
+ TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
index 5e726c31c45..5d8b8579222 100644
--- a/arch/arm/kernel/kprobes-test-thumb.c
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
DONT_TEST_IN_ITBLOCK(
TEST_BF_R( "cbnz r",0,0, ", 2f")
TEST_BF_R( "cbz r",2,-1,", 2f")
- TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20)
- TEST_BF_RX( "cbz r",7,0, ", 2f",0x40)
+ TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
+ TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
)
TEST_R("sxth r0, r",7, HH1,"")
TEST_R("sxth r7, r",0, HH2,"")
@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
TESTCASE_START(code) \
TEST_ARG_PTR(13, offset) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code,0) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
TEST("push {r0}")
@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
TEST_BF( "b 2f")
TEST_BB( "b 2b")
- TEST_BF_X("b 2f", 0x400)
- TEST_BB_X("b 2b", 0x400)
+ TEST_BF_X("b 2f", SPACE_0x400)
+ TEST_BB_X("b 2b", SPACE_0x400)
TEST_GROUP("Testing instructions in IT blocks")
@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
TEST_BB("bne.w 2b")
TEST_BF("bgt.w 2f")
TEST_BB("blt.w 2b")
- TEST_BF_X("bpl.w 2f",0x1000)
+ TEST_BF_X("bpl.w 2f", SPACE_0x1000)
)
TEST_UNSUPPORTED("msr cpsr, r0")
@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
TEST_BF( "b.w 2f")
TEST_BB( "b.w 2b")
- TEST_BF_X("b.w 2f", 0x1000)
+ TEST_BF_X("b.w 2f", SPACE_0x1000)
TEST_BF( "bl.w 2f")
TEST_BB( "bl.w 2b")
- TEST_BB_X("bl.w 2b", 0x1000)
+ TEST_BB_X("bl.w 2b", SPACE_0x1000)
TEST_X( "blx __dummy_arm_subroutine",
".arm \n\t"
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index e17cdd6d90d..1862d8f2fd4 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -202,6 +202,8 @@
#include <linux/slab.h>
#include <linux/kprobes.h>
+#include <asm/opcodes.h>
+
#include "kprobes.h"
#include "kprobes-test.h"
@@ -1050,65 +1052,9 @@ static int test_instance;
static unsigned long test_check_cc(int cc, unsigned long cpsr)
{
- unsigned long temp;
-
- switch (cc) {
- case 0x0: /* eq */
- return cpsr & PSR_Z_BIT;
-
- case 0x1: /* ne */
- return (~cpsr) & PSR_Z_BIT;
-
- case 0x2: /* cs */
- return cpsr & PSR_C_BIT;
-
- case 0x3: /* cc */
- return (~cpsr) & PSR_C_BIT;
-
- case 0x4: /* mi */
- return cpsr & PSR_N_BIT;
-
- case 0x5: /* pl */
- return (~cpsr) & PSR_N_BIT;
-
- case 0x6: /* vs */
- return cpsr & PSR_V_BIT;
-
- case 0x7: /* vc */
- return (~cpsr) & PSR_V_BIT;
+ int ret = arm_check_condition(cc << 28, cpsr);
- case 0x8: /* hi */
- cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return cpsr & PSR_C_BIT;
-
- case 0x9: /* ls */
- cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (~cpsr) & PSR_C_BIT;
-
- case 0xa: /* ge */
- cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (~cpsr) & PSR_N_BIT;
-
- case 0xb: /* lt */
- cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return cpsr & PSR_N_BIT;
-
- case 0xc: /* gt */
- temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
- return (~temp) & PSR_N_BIT;
-
- case 0xd: /* le */
- temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
- return temp & PSR_N_BIT;
-
- case 0xe: /* al */
- case 0xf: /* unconditional */
- return true;
- }
- BUG();
- return false;
+ return (ret != ARM_OPCODE_CONDTEST_FAIL);
}
static int is_last_scenario;
@@ -1128,7 +1074,9 @@ static unsigned long test_context_cpsr(int scenario)
if (!test_case_is_thumb) {
/* Testing ARM code */
- probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0;
+ int cc = current_instruction >> 28;
+
+ probe_should_run = test_check_cc(cc, cpsr) != 0;
if (scenario == 15)
is_last_scenario = true;
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index 0dc5d77b935..e28a869b1ae 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -149,23 +149,31 @@ struct test_arg_end {
"1: "instruction" \n\t" \
" nop \n\t"
-#define TEST_BRANCH_F(instruction, xtra_dist) \
+#define TEST_BRANCH_F(instruction) \
TEST_INSTRUCTION(instruction) \
- ".if "#xtra_dist" \n\t" \
" b 99f \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ "2: nop \n\t"
+
+#define TEST_BRANCH_B(instruction) \
+ " b 50f \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t" \
+ " b 99f \n\t" \
+ TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex) \
+ TEST_INSTRUCTION(instruction) \
+ " b 99f \n\t" \
+ codex" \n\t" \
" b 99f \n\t" \
"2: nop \n\t"
-#define TEST_BRANCH_B(instruction, xtra_dist) \
+#define TEST_BRANCH_BX(instruction, codex) \
" b 50f \n\t" \
" b 99f \n\t" \
"2: nop \n\t" \
" b 99f \n\t" \
- ".if "#xtra_dist" \n\t" \
- ".space "#xtra_dist" \n\t" \
- ".endif \n\t" \
+ codex" \n\t" \
TEST_INSTRUCTION(instruction)
#define TESTCASE_END \
@@ -301,47 +309,60 @@ struct test_arg_end {
TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2) \
TESTCASE_END
-#define TEST_BF_X(code, xtra_dist) \
+#define TEST_BF(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code, xtra_dist) \
+ TEST_BRANCH_F(code) \
TESTCASE_END
-#define TEST_BB_X(code, xtra_dist) \
+#define TEST_BB(code) \
TESTCASE_START(code) \
TEST_ARG_END("") \
- TEST_BRANCH_B(code, xtra_dist) \
+ TEST_BRANCH_B(code) \
TESTCASE_END
-#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
+#define TEST_BF_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg code2) \
TESTCASE_END
-#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
- TESTCASE_START(code1 #reg code2) \
- TEST_ARG_REG(reg, val) \
- TEST_ARG_END("") \
- TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
+#define TEST_BB_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_B(code1 #reg code2) \
TESTCASE_END
-#define TEST_BF(code) TEST_BF_X(code, 0)
-#define TEST_BB(code) TEST_BB_X(code, 0)
-
-#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
-#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
-
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \
- TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
+ TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_BF_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BB_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_BX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code1 #reg code2, codex) \
TESTCASE_END
#define TEST_X(code, codex) \
@@ -372,6 +393,25 @@ struct test_arg_end {
TESTCASE_END
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x) x x
+#define SPACE_0x8 TWICE(".space 4\n\t")
+#define SPACE_0x10 TWICE(SPACE_0x8)
+#define SPACE_0x20 TWICE(SPACE_0x10)
+#define SPACE_0x40 TWICE(SPACE_0x20)
+#define SPACE_0x80 TWICE(SPACE_0x40)
+#define SPACE_0x100 TWICE(SPACE_0x80)
+#define SPACE_0x200 TWICE(SPACE_0x100)
+#define SPACE_0x400 TWICE(SPACE_0x200)
+#define SPACE_0x800 TWICE(SPACE_0x400)
+#define SPACE_0x1000 TWICE(SPACE_0x800)
+
+
/* Various values used in test cases... */
#define N(val) (val ^ 0xffffffff)
#define VAL1 0x12345678
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0bcd3834157..1911dae19e4 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,7 +9,7 @@
*/
#include <linux/export.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/string.h>
@@ -34,8 +34,8 @@ static const struct leds_evt_name evt_names[] = {
{ "red", led_red_on, led_red_off },
};
-static ssize_t leds_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t leds_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = -EINVAL, len = strcspn(buf, " ");
@@ -69,15 +69,16 @@ static ssize_t leds_store(struct sys_device *dev,
return ret;
}
-static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+static DEVICE_ATTR(event, 0200, NULL, leds_store);
-static struct sysdev_class leds_sysclass = {
+static struct bus_type leds_subsys = {
.name = "leds",
+ .dev_name = "leds",
};
-static struct sys_device leds_device = {
+static struct device leds_device = {
.id = 0,
- .cls = &leds_sysclass,
+ .bus = &leds_subsys,
};
static int leds_suspend(void)
@@ -105,11 +106,11 @@ static struct syscore_ops leds_syscore_ops = {
static int __init leds_init(void)
{
int ret;
- ret = sysdev_class_register(&leds_sysclass);
+ ret = subsys_system_register(&leds_subsys, NULL);
if (ret == 0)
- ret = sysdev_register(&leds_device);
+ ret = device_register(&leds_device);
if (ret == 0)
- ret = sysdev_create_file(&leds_device, &attr_event);
+ ret = device_create_file(&leds_device, &dev_attr_event);
if (ret == 0)
register_syscore_ops(&leds_syscore_ops);
return ret;
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd496c3..764bd456d84 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -12,12 +12,11 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
+#include <asm/system.h>
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
-extern void setup_mm_for_reboot(char mode);
-
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
@@ -111,14 +110,6 @@ void machine_kexec(struct kimage *image)
if (kexec_reinit)
kexec_reinit();
- local_irq_disable();
- local_fiq_disable();
- setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
- flush_cache_all();
- outer_flush_all();
- outer_disable();
- cpu_proc_fin();
- outer_inv_all();
- flush_cache_all();
- cpu_reset(reboot_code_buffer_phys);
+
+ soft_restart(reboot_code_buffer_phys);
}
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
new file mode 100644
index 00000000000..f8179c6a817
--- /dev/null
+++ b/arch/arm/kernel/opcodes.c
@@ -0,0 +1,72 @@
+/*
+ * linux/arch/arm/kernel/opcodes.c
+ *
+ * A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+ 0xF0F0, /* EQ == Z set */
+ 0x0F0F, /* NE */
+ 0xCCCC, /* CS == C set */
+ 0x3333, /* CC */
+ 0xFF00, /* MI == N set */
+ 0x00FF, /* PL */
+ 0xAAAA, /* VS == V set */
+ 0x5555, /* VC */
+ 0x0C0C, /* HI == C set && Z clear */
+ 0xF3F3, /* LS == C clear || Z set */
+ 0xAA55, /* GE == (N==V) */
+ 0x55AA, /* LT == (N!=V) */
+ 0x0A05, /* GT == (!Z && (N==V)) */
+ 0xF5FA, /* LE == (Z || (N!=V)) */
+ 0xFFFF, /* AL always */
+ 0 /* NV */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ * opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
+{
+ u32 cc_bits = opcode >> 28;
+ u32 psr_cond = psr >> 28;
+ unsigned int ret;
+
+ if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+ if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+ ret = ARM_OPCODE_CONDTEST_PASS;
+ else
+ ret = ARM_OPCODE_CONDTEST_FAIL;
+ } else {
+ ret = ARM_OPCODE_CONDTEST_UNCOND;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 24e2347be6b..5bb91bf3d47 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
}
EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
-int
-armpmu_get_max_events(void)
+int perf_num_counters(void)
{
int max_events = 0;
@@ -69,12 +68,6 @@ armpmu_get_max_events(void)
return max_events;
}
-EXPORT_SYMBOL_GPL(armpmu_get_max_events);
-
-int perf_num_counters(void)
-{
- return armpmu_get_max_events();
-}
EXPORT_SYMBOL_GPL(perf_num_counters);
#define HW_OP_UNSUPPORTED 0xFFFF
@@ -343,19 +336,25 @@ validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
+ DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
- memset(&fake_pmu, 0, sizeof(fake_pmu));
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ memset(fake_used_mask, 0, sizeof(fake_used_mask));
+ fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
@@ -374,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
+ struct arm_pmu_platdata *plat =
+ dev_get_platdata(&pmu_device->dev);
irqs = min(pmu_device->num_resources, num_possible_cpus());
@@ -381,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
+ if (irq >= 0) {
+ if (plat && plat->disable_irq)
+ plat->disable_irq(irq);
free_irq(irq, armpmu);
+ }
}
release_pmu(armpmu->type);
@@ -396,6 +400,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
+ if (!pmu_device)
+ return -ENODEV;
+
err = reserve_pmu(armpmu->type);
if (err) {
pr_warning("unable to reserve pmu\n");
@@ -439,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
irq);
armpmu_release_hardware(armpmu);
return err;
- }
+ } else if (plat && plat->enable_irq)
+ plat->enable_irq(irq);
cpumask_set_cpu(i, &armpmu->active_irqs);
}
@@ -631,6 +639,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
+ if (!cpu_pmu)
+ return -ENODEV;
+
cpu_pmu->plat_device = pdev;
return 0;
}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index e63d8115c01..533be9930ec 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -65,13 +65,15 @@ enum armv6_counters {
* accesses/misses in hardware.
*/
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
};
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
* accesses/misses in hardware.
*/
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
};
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1ef6d0034b8..460bbbb6b88 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
* they are not available.
*/
enum armv7_perf_types {
- ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
- ARMV7_PERFCTR_IFETCH_MISS = 0x01,
- ARMV7_PERFCTR_ITLB_MISS = 0x02,
- ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
- ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
- ARMV7_PERFCTR_DTLB_REFILL = 0x05,
- ARMV7_PERFCTR_DREAD = 0x06,
- ARMV7_PERFCTR_DWRITE = 0x07,
- ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
- ARMV7_PERFCTR_EXC_TAKEN = 0x09,
- ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
- ARMV7_PERFCTR_CID_WRITE = 0x0B,
- /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+ ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
+ ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
+ ARMV7_PERFCTR_ITLB_REFILL = 0x02,
+ ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
+ ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
+ ARMV7_PERFCTR_DTLB_REFILL = 0x05,
+ ARMV7_PERFCTR_MEM_READ = 0x06,
+ ARMV7_PERFCTR_MEM_WRITE = 0x07,
+ ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
+ ARMV7_PERFCTR_EXC_TAKEN = 0x09,
+ ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
+ ARMV7_PERFCTR_CID_WRITE = 0x0B,
+
+ /*
+ * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
- * - all branch instructions,
+ * - all (taken) branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
- ARMV7_PERFCTR_PC_WRITE = 0x0C,
- ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
- ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
- ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
+ ARMV7_PERFCTR_PC_WRITE = 0x0C,
+ ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
+ ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
+ ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
+ ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
+ ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
- ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
- ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
- ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
- ARMV7_PERFCTR_MEM_ACCESS = 0x13,
- ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
- ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
- ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
- ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
- ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
- ARMV7_PERFCTR_BUS_ACCESS = 0x19,
- ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
- ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
- ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
- ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
-
- ARMV7_PERFCTR_CPU_CYCLES = 0xFF
+ ARMV7_PERFCTR_MEM_ACCESS = 0x13,
+ ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+ ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
+ ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
+ ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
+ ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
+ ARMV7_PERFCTR_BUS_ACCESS = 0x19,
+ ARMV7_PERFCTR_MEM_ERROR = 0x1A,
+ ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
+ ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
+ ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
+
+ ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
- ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
- ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
- ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
- ARMV7_PERFCTR_L2_ACCESS = 0x43,
- ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
- ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
- ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
- ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
- ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
- ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
- ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
- ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
- ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
- ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
- ARMV7_PERFCTR_L2_NEON = 0x4E,
- ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
- ARMV7_PERFCTR_L1_INST = 0x50,
- ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
- ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
- ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
- ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
- ARMV7_PERFCTR_OP_EXECUTED = 0x55,
- ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
- ARMV7_PERFCTR_CYCLES_INST = 0x57,
- ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
- ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
- ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
-
- ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
- ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
- ARMV7_PERFCTR_PMU_EVENTS = 0x72,
+ ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
+ ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
+ ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
+ ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
- ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
- ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
- ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
-
- ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
- ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
-
- ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
- ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
- ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
- ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
- ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
- ARMV7_PERFCTR_DATA_EVICTION = 0x65,
- ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
- ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
- ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
-
- ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
-
- ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
- ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
- ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
- ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
- ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
-
- ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
- ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
- ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
- ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
- ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
- ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
- ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
-
- ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
- ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
-
- ARMV7_PERFCTR_ISB_INST = 0x90,
- ARMV7_PERFCTR_DSB_INST = 0x91,
- ARMV7_PERFCTR_DMB_INST = 0x92,
- ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
-
- ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
- ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
- ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
- ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
- ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
- ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
+ ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
+ ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
+ ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
};
/* ARMv7 Cortex-A5 specific event types */
enum armv7_a5_perf_types {
- ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
- ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
-
- ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
- ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
- ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
- ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
- ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
- ARMV7_PERFCTR_READ_ALLOC = 0xc5,
-
- ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
+ ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
+ ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
};
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
- ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
- ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
- ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
- ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
+ ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
+ ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
+ ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
+ ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
- ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
- ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
+ ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
+ ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
- ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
- ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
- ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
- ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
+ ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
+ ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
+ ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
+ ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
- ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
+ ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
};
/*
@@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* combined.
*/
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(L1I)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
+ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
+ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(LL)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
+ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
+ [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(BPU)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] =
- ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
};
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* combined.
*/
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(BPU)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_DCACHE_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_DCACHE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_PREFETCH_LINEFILL,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
/*
* The prefetch counters don't differentiate between the I
* side and the D side.
*/
[C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_PREFETCH_LINEFILL,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(LL)] = {
@@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(LL)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)]
- = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+ [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+ [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)]
- = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e0cca10a841..3b99d826982 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -48,13 +48,15 @@ enum xscale_counters {
};
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
- [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
- [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
- [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c3407ee857..2334bf8a650 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
{
clear_bit_unlock(type, pmu_lock);
}
+EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 75316f0dd02..971d65c253a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,7 +57,7 @@ static const char *isa_modes[] = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
-extern void setup_mm_for_reboot(char mode);
+extern void setup_mm_for_reboot(void);
static volatile int hlt_counter;
@@ -92,18 +92,24 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
-void arm_machine_restart(char mode, const char *cmd)
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+typedef void (*phys_reset_t)(unsigned long);
+
+/*
+ * A temporary stack to use for CPU reset. This is static so that we
+ * don't clobber it with the identity mapping. When running with this
+ * stack, any references to the current task *will not work* so you
+ * should really do as little as possible before jumping to your reset
+ * code.
+ */
+static u64 soft_restart_stack[16];
+
+static void __soft_restart(void *addr)
{
- /* Disable interrupts first */
- local_irq_disable();
- local_fiq_disable();
+ phys_reset_t phys_reset;
- /*
- * Tell the mm system that we are going to reboot -
- * we may need it to insert some 1:1 mappings so that
- * soft boot works.
- */
- setup_mm_for_reboot(mode);
+ /* Take out a flat memory mapping. */
+ setup_mm_for_reboot();
/* Clean and invalidate caches */
flush_cache_all();
@@ -114,18 +120,35 @@ void arm_machine_restart(char mode, const char *cmd)
/* Push out any further dirty data, and ensure cache is empty */
flush_cache_all();
- /*
- * Now call the architecture specific reboot code.
- */
- arch_reset(mode, cmd);
+ /* Switch to the identity mapping. */
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset((unsigned long)addr);
- /*
- * Whoops - the architecture was unable to reboot.
- * Tell the user!
- */
- mdelay(1000);
- printk("Reboot failed -- System halted\n");
- while (1);
+ /* Should never get here. */
+ BUG();
+}
+
+void soft_restart(unsigned long addr)
+{
+ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+ /* Disable interrupts first */
+ local_irq_disable();
+ local_fiq_disable();
+
+ /* Disable the L2 if we're the last man standing. */
+ if (num_online_cpus() == 1)
+ outer_disable();
+
+ /* Change to the new stack and continue with the reset. */
+ call_with_stack(__soft_restart, (void *)addr, (void *)stack);
+
+ /* Should never get here. */
+ BUG();
+}
+
+static void null_restart(char mode, const char *cmd)
+{
}
/*
@@ -134,7 +157,7 @@ void arm_machine_restart(char mode, const char *cmd)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
+void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
static void do_nothing(void *unused)
@@ -183,7 +206,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
leds_event(led_idle_start);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
@@ -192,6 +216,9 @@ void cpu_idle(void)
#endif
local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+ wmb();
+#endif
if (hlt_counter) {
local_irq_enable();
cpu_relax();
@@ -210,7 +237,8 @@ void cpu_idle(void)
}
}
leds_event(led_idle_end);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -250,7 +278,15 @@ void machine_power_off(void)
void machine_restart(char *cmd)
{
machine_shutdown();
+
arm_pm_restart(reboot_mode, cmd);
+
+ /* Give a grace period for failure to restart of 1s */
+ mdelay(1000);
+
+ /* Whoops - the platform was unable to reboot. Tell the user! */
+ printk("Reboot failed -- System halted\n");
+ while (1);
}
void __show_regs(struct pt_regs *regs)
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 9a46370fe9d..5416c7c1252 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -14,61 +14,153 @@
#include <asm/sched_clock.h>
+struct clock_data {
+ u64 epoch_ns;
+ u32 epoch_cyc;
+ u32 epoch_cyc_copy;
+ u32 mult;
+ u32 shift;
+};
+
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
-static void (*sched_clock_update_fn)(void);
+
+static struct clock_data cd = {
+ .mult = NSEC_PER_SEC / HZ,
+};
+
+static u32 __read_mostly sched_clock_mask = 0xffffffff;
+
+static u32 notrace jiffy_sched_clock_read(void)
+{
+ return (u32)(jiffies - INITIAL_JIFFIES);
+}
+
+static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+{
+ return (cyc * mult) >> shift;
+}
+
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+{
+ u64 epoch_ns;
+ u32 epoch_cyc;
+
+ /*
+ * Load the epoch_cyc and epoch_ns atomically. We do this by
+ * ensuring that we always write epoch_cyc, epoch_ns and
+ * epoch_cyc_copy in strict order, and read them in strict order.
+ * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
+ * the middle of an update, and we should repeat the load.
+ */
+ do {
+ epoch_cyc = cd.epoch_cyc;
+ smp_rmb();
+ epoch_ns = cd.epoch_ns;
+ smp_rmb();
+ } while (epoch_cyc != cd.epoch_cyc_copy);
+
+ return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+}
+
+/*
+ * Atomically update the sched_clock epoch.
+ */
+static void notrace update_sched_clock(void)
+{
+ unsigned long flags;
+ u32 cyc;
+ u64 ns;
+
+ cyc = read_sched_clock();
+ ns = cd.epoch_ns +
+ cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+ cd.mult, cd.shift);
+ /*
+ * Write epoch_cyc and epoch_ns in a way that the update is
+ * detectable in cyc_to_fixed_sched_clock().
+ */
+ raw_local_irq_save(flags);
+ cd.epoch_cyc = cyc;
+ smp_wmb();
+ cd.epoch_ns = ns;
+ smp_wmb();
+ cd.epoch_cyc_copy = cyc;
+ raw_local_irq_restore(flags);
+}
static void sched_clock_poll(unsigned long wrap_ticks)
{
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
- sched_clock_update_fn();
+ update_sched_clock();
}
-void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
- unsigned int clock_bits, unsigned long rate)
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
u64 res, wrap;
char r_unit;
- sched_clock_update_fn = update;
+ BUG_ON(bits > 32);
+ WARN_ON(!irqs_disabled());
+ WARN_ON(read_sched_clock != jiffy_sched_clock_read);
+ read_sched_clock = read;
+ sched_clock_mask = (1 << bits) - 1;
/* calculate the mult/shift to convert counter ticks to ns. */
- clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
+ clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
r = rate;
if (r >= 4000000) {
r /= 1000000;
r_unit = 'M';
- } else {
+ } else if (r >= 1000) {
r /= 1000;
r_unit = 'k';
- }
+ } else
+ r_unit = ' ';
/* calculate how many ns until we wrap */
- wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
+ wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
do_div(wrap, NSEC_PER_MSEC);
w = wrap;
/* calculate the ns resolution of this counter */
- res = cyc_to_ns(1ULL, cd->mult, cd->shift);
+ res = cyc_to_ns(1ULL, cd.mult, cd.shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
- clock_bits, r, r_unit, res, w);
+ bits, r, r_unit, res, w);
/*
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
- update();
+ update_sched_clock();
/*
* Ensure that sched_clock() starts off at 0ns
*/
- cd->epoch_ns = 0;
+ cd.epoch_ns = 0;
+
+ pr_debug("Registered %pF as sched_clock source\n", read);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc = read_sched_clock();
+ return cyc_to_sched_clock(cyc, sched_clock_mask);
}
void __init sched_clock_postinit(void)
{
+ /*
+ * If no sched_clock function has been provided at that point,
+ * make it the final one one.
+ */
+ if (read_sched_clock == jiffy_sched_clock_read)
+ setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+
sched_clock_poll(sched_clock_timer.data);
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3448a3f9cc8..129fbd55bde 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,6 +31,7 @@
#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/sort.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -52,6 +53,7 @@
#include <asm/mach/time.h>
#include <asm/traps.h>
#include <asm/unwind.h>
+#include <asm/memblock.h>
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
#include "compat.h"
@@ -890,13 +892,17 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
return mdesc;
}
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
void __init setup_arch(char **cmdline_p)
{
struct machine_desc *mdesc;
- unwind_init();
-
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
@@ -904,8 +910,14 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- if (mdesc->soft_reboot)
- reboot_setup("s");
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ extern unsigned long arm_dma_zone_size;
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ }
+#endif
+ if (mdesc->restart_mode)
+ reboot_setup(&mdesc->restart_mode);
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
@@ -918,12 +930,16 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
paging_init(mdesc);
request_standard_resources(mdesc);
+ if (mdesc->restart)
+ arm_pm_restart = mdesc->restart;
+
unflatten_device_tree();
#ifdef CONFIG_SMP
@@ -934,12 +950,6 @@ void __init setup_arch(char **cmdline_p)
tcm_init();
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 020e99c845e..1f268bda455 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -54,14 +54,18 @@ ENDPROC(cpu_suspend_abort)
* r0 = control register value
*/
.align 5
+ .pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
+ instr_sync
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r0, c0, c0, 0 @ read id reg
+ instr_sync
mov r0, r0
mov r0, r0
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu)
+ .popsection
cpu_resume_after_mmu:
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index ef5640b9e21..57db122a4f6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
+#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
- pgd_t *pgd;
int ret;
/*
@@ -84,29 +84,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
}
/*
- * Allocate initial page tables to allow the new CPU to
- * enable the MMU safely. This essentially means a set
- * of our "standard" page tables, with the addition of
- * a 1:1 mapping for the physical address of the kernel.
- */
- pgd = pgd_alloc(&init_mm);
- if (!pgd)
- return -ENOMEM;
-
- if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
- identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
- identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
- identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
- }
-
- /*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
- secondary_data.pgdir = virt_to_phys(pgd);
+ secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.stack = NULL;
secondary_data.pgdir = 0;
- if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
- identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
- identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
- identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
- }
-
- pgd_free(&init_mm, pgd);
-
return ret;
}
@@ -550,6 +522,10 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
+#ifdef CONFIG_HOTPLUG_CPU
+ platform_cpu_kill(cpu);
+#endif
+
while (1)
cpu_relax();
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a8a6682d6b5..c8e938553d4 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,8 +10,11 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
@@ -25,6 +28,7 @@
/* set up by the platform code */
void __iomem *twd_base;
+static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static struct clock_event_device __percpu **twd_evt;
@@ -89,6 +93,52 @@ void twd_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(clk->irq);
}
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+ twd_timer_rate = clk_get_rate(twd_clk);
+
+ clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+ smp_call_function_single(freqs->cpu, twd_update_frequency,
+ NULL, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+ .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+ if (!IS_ERR(twd_clk))
+ return cpufreq_register_notifier(&twd_cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+#endif
+
static void __cpuinit twd_calibrate_rate(void)
{
unsigned long count;
@@ -140,6 +190,35 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+static struct clk *twd_get_clock(void)
+{
+ struct clk *clk;
+ int err;
+
+ clk = clk_get_sys("smp_twd", NULL);
+ if (IS_ERR(clk)) {
+ pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+ return clk;
+ }
+
+ err = clk_prepare(clk);
+ if (err) {
+ pr_err("smp_twd: clock failed to prepare: %d\n", err);
+ clk_put(clk);
+ return ERR_PTR(err);
+ }
+
+ err = clk_enable(clk);
+ if (err) {
+ pr_err("smp_twd: clock failed to enable: %d\n", err);
+ clk_unprepare(clk);
+ clk_put(clk);
+ return ERR_PTR(err);
+ }
+
+ return clk;
+}
+
/*
* Setup the local clock events for a CPU.
*/
@@ -165,7 +244,13 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
}
}
- twd_calibrate_rate();
+ if (!twd_clk)
+ twd_clk = twd_get_clock();
+
+ if (!IS_ERR_OR_NULL(twd_clk))
+ twd_timer_rate = clk_get_rate(twd_clk);
+ else
+ twd_calibrate_rate();
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -173,15 +258,11 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
- clk->shift = 20;
- clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
- clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
- clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
- clockevents_register_device(clk);
-
+ clockevents_config_and_register(clk, twd_timer_rate,
+ 0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
}
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 93a22d282c1..1794cc3b0f1 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,13 +1,12 @@
#include <linux/init.h>
+#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-static pgd_t *suspend_pgd;
-
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
extern void cpu_resume_mmu(void);
@@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
- *ptr++ = virt_to_phys(suspend_pgd);
+ *ptr++ = virt_to_phys(idmap_pgd);
*ptr++ = sp;
*ptr++ = virt_to_phys(cpu_do_resume);
@@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct mm_struct *mm = current->active_mm;
int ret;
- if (!suspend_pgd)
+ if (!idmap_pgd)
return -EINVAL;
/*
@@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
-
-static int __init cpu_suspend_init(void)
-{
- suspend_pgd = pgd_alloc(&init_mm);
- if (suspend_pgd) {
- unsigned long addr = virt_to_phys(cpu_resume_mmu);
- identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
- }
- return suspend_pgd ? 0 : -ENOMEM;
-}
-core_initcall(cpu_suspend_init);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5f452f8fde0..df745188f5d 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -25,6 +25,7 @@
#include <linux/syscalls.h>
#include <linux/perf_event.h>
+#include <asm/opcodes.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -185,6 +186,21 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
+ res = arm_check_condition(instr, regs->ARM_cpsr);
+ switch (res) {
+ case ARM_OPCODE_CONDTEST_PASS:
+ break;
+ case ARM_OPCODE_CONDTEST_FAIL:
+ /* Condition failed - return to next instruction */
+ regs->ARM_pc += 4;
+ return 0;
+ case ARM_OPCODE_CONDTEST_UNCOND:
+ /* If unconditional encoding - not a SWP, undef */
+ return -EFAULT;
+ default:
+ return -EINVAL;
+ }
+
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30e302d33e0..01ec453bb92 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -180,9 +180,9 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
*/
void __init tcm_init(void)
{
- u32 tcm_status = read_cpuid_tcmstatus();
- u8 dtcm_banks = (tcm_status >> 16) & 0x03;
- u8 itcm_banks = (tcm_status & 0x03);
+ u32 tcm_status;
+ u8 dtcm_banks;
+ u8 itcm_banks;
size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
char *start;
@@ -191,6 +191,22 @@ void __init tcm_init(void)
int ret;
int i;
+ /*
+ * Prior to ARMv5 there is no TCM, and trying to read the status
+ * register will hang the processor.
+ */
+ if (cpu_architecture() < CPU_ARCH_ARMv5) {
+ if (dtcm_code_sz || itcm_code_sz)
+ pr_info("CPU TCM: %u bytes of DTCM and %u bytes of "
+ "ITCM code compiled in, but no TCM present "
+ "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz);
+ return;
+ }
+
+ tcm_status = read_cpuid_tcmstatus();
+ dtcm_banks = (tcm_status >> 16) & 0x03;
+ itcm_banks = (tcm_status & 0x03);
+
/* Values greater than 2 for D/ITCM banks are "reserved" */
if (dtcm_banks > 2)
dtcm_banks = 0;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 1040c00405d..8200deaa14f 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -43,7 +43,7 @@
struct cputopo_arm cpu_topology[NR_CPUS];
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e7e8365795c..00df012c467 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
- unsigned long *insn; /* pointer to the current instructions word */
+ const unsigned long *insn; /* pointer to the current instructions word */
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
@@ -83,8 +83,9 @@ enum regs {
PC = 15
};
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
})
/*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
* guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
*/
-static struct unwind_idx *search_index(unsigned long addr,
- struct unwind_idx *first,
- struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+ const struct unwind_idx *start,
+ const struct unwind_idx *origin,
+ const struct unwind_idx *stop)
{
- pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+ unsigned long addr_prel31;
+
+ pr_debug("%s(%08lx, %p, %p, %p)\n",
+ __func__, addr, start, origin, stop);
+
+ /*
+ * only search in the section with the matching sign. This way the
+ * prel31 numbers can be compared as unsigned longs.
+ */
+ if (addr < (unsigned long)start)
+ /* negative offsets: [start; origin) */
+ stop = origin;
+ else
+ /* positive offsets: [origin; stop) */
+ start = origin;
+
+ /* prel31 for address relavive to start */
+ addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
- if (addr < first->addr) {
+ while (start < stop - 1) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+ /*
+ * As addr_prel31 is relative to start an offset is needed to
+ * make it relative to mid.
+ */
+ if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+ mid->addr_offset)
+ stop = mid;
+ else {
+ /* keep addr_prel31 relative to start */
+ addr_prel31 -= ((unsigned long)mid -
+ (unsigned long)start);
+ start = mid;
+ }
+ }
+
+ if (likely(start->addr_offset <= addr_prel31))
+ return start;
+ else {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
- } else if (addr >= last->addr)
- return last;
+ }
+}
- while (first < last - 1) {
- struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+ const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+ pr_debug("%s(%p, %p)\n", __func__, start, stop);
+ while (start < stop) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
- if (addr < mid->addr)
- last = mid;
+ if (mid->addr_offset >= 0x40000000)
+ /* negative offset */
+ start = mid + 1;
else
- first = mid;
+ /* positive offset */
+ stop = mid;
}
-
- return first;
+ pr_debug("%s -> %p\n", __func__, stop);
+ return stop;
}
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
{
- struct unwind_idx *idx = NULL;
+ const struct unwind_idx *idx = NULL;
unsigned long flags;
pr_debug("%s(%08lx)\n", __func__, addr);
- if (core_kernel_text(addr))
+ if (core_kernel_text(addr)) {
+ if (unlikely(!__origin_unwind_idx))
+ __origin_unwind_idx =
+ unwind_find_origin(__start_unwind_idx,
+ __stop_unwind_idx);
+
/* main unwind table */
idx = search_index(addr, __start_unwind_idx,
- __stop_unwind_idx - 1);
- else {
+ __origin_unwind_idx,
+ __stop_unwind_idx);
+ } else {
/* module unwind tables */
struct unwind_table *table;
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
if (addr >= table->begin_addr &&
addr < table->end_addr) {
idx = search_index(addr, table->start,
- table->stop - 1);
+ table->origin,
+ table->stop);
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
- struct unwind_idx *idx;
+ const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
unsigned long text_size)
{
unsigned long flags;
- struct unwind_idx *idx;
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
if (!tab)
return tab;
- tab->start = (struct unwind_idx *)start;
- tab->stop = (struct unwind_idx *)(start + size);
+ tab->start = (const struct unwind_idx *)start;
+ tab->stop = (const struct unwind_idx *)(start + size);
+ tab->origin = unwind_find_origin(tab->start, tab->stop);
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- /* Convert the symbol addresses to absolute values */
- for (idx = tab->start; idx < tab->stop; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
kfree(tab);
}
-
-int __init unwind_init(void)
-{
- struct unwind_idx *idx;
-
- /* Convert the symbol addresses to absolute values */
- for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
- pr_debug("unwind: ARM stack unwinding initialised\n");
-
- return 0;
-}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 20b3041e086..f76e7554867 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -13,6 +13,12 @@
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__idmap_text_start) = .; \
+ *(.idmap.text) \
+ VMLINUX_SYMBOL(__idmap_text_end) = .;
+
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
@@ -92,6 +98,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IDMAP_TEXT
#ifdef CONFIG_MMU
*(.fixup)
#endif
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index cf73a7f742d..0ade0acc1ed 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -13,7 +13,8 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o \
- io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+ io-readsb.o io-writesb.o io-readsl.o io-writesl.o \
+ call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 10d868a5a48..d6408d1ee54 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,9 @@
+#include <asm/unwind.h>
+
#if __LINUX_ARM_ARCH__ >= 6
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -13,9 +17,13 @@
cmp r0, #0
bne 1b
bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1
@@ -34,9 +42,13 @@
cmp r0, #0
movne r0, #1
2: bx lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#else
- .macro bitop, instr
+ .macro bitop, name, instr
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r2, r0, #31
@@ -49,6 +61,8 @@
str r2, [r1, r0, lsl #2]
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
/**
@@ -59,7 +73,9 @@
* Note: we can trivially conditionalise the store instruction
* to avoid dirtying the data cache.
*/
- .macro testop, instr, store
+ .macro testop, name, instr, store
+ENTRY( \name )
+UNWIND( .fnstart )
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r3, r0, #31
@@ -73,5 +89,7 @@
moveq r0, #0
restore_irqs ip
mov pc, lr
+UNWIND( .fnend )
+ENDPROC(\name )
.endm
#endif
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
new file mode 100644
index 00000000000..916c80f13ae
--- /dev/null
+++ b/arch/arm/lib/call_with_stack.S
@@ -0,0 +1,44 @@
+/*
+ * arch/arm/lib/call_with_stack.S
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * Written by Will Deacon <will.deacon@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * void call_with_stack(void (*fn)(void *), void *arg, void *sp)
+ *
+ * Change the stack to that pointed at by sp, then invoke fn(arg) with
+ * the new stack.
+ */
+ENTRY(call_with_stack)
+ str sp, [r2, #-4]!
+ str lr, [r2, #-4]!
+
+ mov sp, r2
+ mov r2, r0
+ mov r0, r1
+
+ adr lr, BSYM(1f)
+ mov pc, r2
+
+1: ldr lr, [sp]
+ ldr sp, [sp, #4]
+ mov pc, lr
+ENDPROC(call_with_stack)
diff --git a/arch/arm/lib/changebit.S b/arch/arm/lib/changebit.S
index 68ed5b62e83..f4027862172 100644
--- a/arch/arm/lib/changebit.S
+++ b/arch/arm/lib/changebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_change_bit)
- bitop eor
-ENDPROC(_change_bit)
+bitop _change_bit, eor
diff --git a/arch/arm/lib/clearbit.S b/arch/arm/lib/clearbit.S
index 4c04c3b51ee..f6b75fb64d3 100644
--- a/arch/arm/lib/clearbit.S
+++ b/arch/arm/lib/clearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_clear_bit)
- bitop bic
-ENDPROC(_clear_bit)
+bitop _clear_bit, bic
diff --git a/arch/arm/lib/setbit.S b/arch/arm/lib/setbit.S
index bbee5c66a23..618fedae4b3 100644
--- a/arch/arm/lib/setbit.S
+++ b/arch/arm/lib/setbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_set_bit)
- bitop orr
-ENDPROC(_set_bit)
+bitop _set_bit, orr
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 15a4d431f22..4becdc3a59c 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_change_bit)
- testop eor, str
-ENDPROC(_test_and_change_bit)
+testop _test_and_change_bit, eor, str
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 521b66b5b95..918841dcce7 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_clear_bit)
- testop bicne, strne
-ENDPROC(_test_and_clear_bit)
+testop _test_and_clear_bit, bicne, strne
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index 1c98cc2185b..8d1b2fe9e48 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -12,6 +12,4 @@
#include "bitops.h"
.text
-ENTRY(_test_and_set_bit)
- testop orreq, streq
-ENDPROC(_test_and_set_bit)
+testop _test_and_set_bit, orreq, streq
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index d111c3e9924..4f991f29528 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -3,6 +3,12 @@ if ARCH_AT91
config HAVE_AT91_DATAFLASH_CARD
bool
+config HAVE_AT91_DBGU0
+ bool
+
+config HAVE_AT91_DBGU1
+ bool
+
config HAVE_AT91_USART3
bool
@@ -21,12 +27,14 @@ config ARCH_AT91RM9200
bool "AT91RM9200"
select CPU_ARM920T
select GENERIC_CLOCKEVENTS
+ select HAVE_AT91_DBGU0
select HAVE_AT91_USART3
config ARCH_AT91SAM9260
bool "AT91SAM9260 or AT91SAM9XE"
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select HAVE_AT91_DBGU0
select HAVE_AT91_USART3
select HAVE_AT91_USART4
select HAVE_AT91_USART5
@@ -37,11 +45,13 @@ config ARCH_AT91SAM9261
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
+ select HAVE_AT91_DBGU0
config ARCH_AT91SAM9G10
bool "AT91SAM9G10"
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select HAVE_AT91_DBGU0
select HAVE_FB_ATMEL
config ARCH_AT91SAM9263
@@ -50,6 +60,7 @@ config ARCH_AT91SAM9263
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
select HAVE_NET_MACB
+ select HAVE_AT91_DBGU1
config ARCH_AT91SAM9RL
bool "AT91SAM9RL"
@@ -57,11 +68,13 @@ config ARCH_AT91SAM9RL
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
select HAVE_FB_ATMEL
+ select HAVE_AT91_DBGU0
config ARCH_AT91SAM9G20
bool "AT91SAM9G20"
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select HAVE_AT91_DBGU0
select HAVE_AT91_USART3
select HAVE_AT91_USART4
select HAVE_AT91_USART5
@@ -74,6 +87,7 @@ config ARCH_AT91SAM9G45
select HAVE_AT91_USART3
select HAVE_FB_ATMEL
select HAVE_NET_MACB
+ select HAVE_AT91_DBGU1
config ARCH_AT91CAP9
bool "AT91CAP9"
@@ -81,6 +95,7 @@ config ARCH_AT91CAP9
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
select HAVE_NET_MACB
+ select HAVE_AT91_DBGU1
config ARCH_AT91X40
bool "AT91x40"
@@ -510,8 +525,13 @@ config AT91_TIMER_HZ
choice
prompt "Select a UART for early kernel messages"
-config AT91_EARLY_DBGU
- bool "DBGU"
+config AT91_EARLY_DBGU0
+ bool "DBGU on rm9200, 9260/9g20, 9261/9g10 and 9rl"
+ depends on HAVE_AT91_DBGU0
+
+config AT91_EARLY_DBGU1
+ bool "DBGU on 9263, 9g45 and cap9"
+ depends on HAVE_AT91_DBGU1
config AT91_EARLY_USART0
bool "USART0"
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index ecdd54dd68c..edb879ac04c 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -13,7 +13,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -23,11 +22,11 @@
#include <mach/at91cap9.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -137,7 +136,7 @@ static struct clk pwm_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk macb_clk = {
- .name = "macb_clk",
+ .name = "pclk",
.pmc_mask = 1 << AT91CAP9_ID_EMAC,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -210,6 +209,8 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for macb_hclk */
+ CLKDEV_CON_ID("hclk", &macb_clk),
CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk),
CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk),
CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
@@ -221,6 +222,10 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
+ CLKDEV_CON_ID("pioA", &pioABCD_clk),
+ CLKDEV_CON_ID("pioB", &pioABCD_clk),
+ CLKDEV_CON_ID("pioC", &pioABCD_clk),
+ CLKDEV_CON_ID("pioD", &pioABCD_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -293,37 +298,27 @@ void __init at91cap9_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91cap9_gpio[] = {
+static struct at91_gpio_bank at91cap9_gpio[] __initdata = {
{
.id = AT91CAP9_ID_PIOABCD,
- .offset = AT91_PIOA,
- .clock = &pioABCD_clk,
+ .regbase = AT91CAP9_BASE_PIOA,
}, {
.id = AT91CAP9_ID_PIOABCD,
- .offset = AT91_PIOB,
- .clock = &pioABCD_clk,
+ .regbase = AT91CAP9_BASE_PIOB,
}, {
.id = AT91CAP9_ID_PIOABCD,
- .offset = AT91_PIOC,
- .clock = &pioABCD_clk,
+ .regbase = AT91CAP9_BASE_PIOC,
}, {
.id = AT91CAP9_ID_PIOABCD,
- .offset = AT91_PIOD,
- .clock = &pioABCD_clk,
+ .regbase = AT91CAP9_BASE_PIOD,
}
};
-static void at91cap9_reset(void)
+static void at91cap9_restart(char mode, const char *cmd)
{
at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
}
-static void at91cap9_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91CAP9 processor initialization
* -------------------------------------------------------------------- */
@@ -333,10 +328,16 @@ static void __init at91cap9_map_io(void)
at91_init_sram(0, AT91CAP9_SRAM_BASE, AT91CAP9_SRAM_SIZE);
}
+static void __init at91cap9_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91CAP9_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91CAP9_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91CAP9_BASE_SMC);
+}
+
static void __init at91cap9_initialize(void)
{
- at91_arch_reset = at91cap9_reset;
- pm_power_off = at91cap9_poweroff;
+ arm_pm_restart = at91cap9_restart;
at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
/* Register GPIO subsystem */
@@ -394,6 +395,7 @@ static unsigned int at91cap9_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91cap9_soc = {
.map_io = at91cap9_map_io,
.default_irq_priority = at91cap9_default_irq_priority,
+ .ioremap_registers = at91cap9_ioremap_registers,
.register_clocks = at91cap9_register_clocks,
.init = at91cap9_initialize,
};
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
index adad70db70e..d298fb7cb21 100644
--- a/arch/arm/mach-at91/at91cap9_devices.c
+++ b/arch/arm/mach-at91/at91cap9_devices.c
@@ -76,7 +76,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
- if (data->vbus_pin[i])
+ if (gpio_is_valid(data->vbus_pin[i]))
at91_set_gpio_output(data->vbus_pin[i], 0);
}
@@ -179,7 +179,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));
- if (data && data->vbus_pin > 0) {
+ if (data && gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
usba_udc_data.pdata.vbus_pin = data->vbus_pin;
@@ -200,7 +200,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data) {}
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
+static struct macb_platform_data eth_data;
static struct resource eth_resources[] = {
[0] = {
@@ -227,12 +227,12 @@ static struct platform_device at91cap9_eth_device = {
.num_resources = ARRAY_SIZE(eth_resources),
};
-void __init at91_add_device_eth(struct at91_eth_data *data)
+void __init at91_add_device_eth(struct macb_platform_data *data)
{
if (!data)
return;
- if (data->phy_irq_pin) {
+ if (gpio_is_valid(data->phy_irq_pin)) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
@@ -264,7 +264,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
platform_device_register(&at91cap9_eth_device);
}
#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
+void __init at91_add_device_eth(struct macb_platform_data *data) {}
#endif
@@ -332,13 +332,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
if (mmc_id == 0) { /* MCI0 */
@@ -398,8 +398,8 @@ static struct resource nand_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_BASE_SYS + AT91_ECC,
- .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
+ .start = AT91CAP9_BASE_ECC,
+ .end = AT91CAP9_BASE_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -425,15 +425,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
@@ -670,8 +670,8 @@ static void __init at91_add_device_tc(void) { }
static struct resource rtt_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .start = AT91CAP9_BASE_RTT,
+ .end = AT91CAP9_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -694,10 +694,19 @@ static void __init at91_add_device_rtt(void)
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91CAP9_BASE_WDT,
+ .end = AT91CAP9_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91cap9_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -807,7 +816,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data)
at91_set_A_periph(AT91_PIN_PA9, 0); /* AC97RX */
/* reset */
- if (data->reset_pin)
+ if (gpio_is_valid(data->reset_pin))
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
@@ -1021,8 +1030,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91CAP9_BASE_DBGU,
+ .end = AT91CAP9_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 713d3bdbd28..99c3174e24a 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -23,6 +23,7 @@
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
static struct map_desc at91rm9200_io_desc[] __initdata = {
{
@@ -195,6 +196,10 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioC_clk),
+ CLKDEV_CON_ID("pioD", &pioD_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -268,27 +273,23 @@ void __init at91rm9200_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91rm9200_gpio[] = {
+static struct at91_gpio_bank at91rm9200_gpio[] __initdata = {
{
.id = AT91RM9200_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91RM9200_BASE_PIOA,
}, {
.id = AT91RM9200_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91RM9200_BASE_PIOB,
}, {
.id = AT91RM9200_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
+ .regbase = AT91RM9200_BASE_PIOC,
}, {
.id = AT91RM9200_ID_PIOD,
- .offset = AT91_PIOD,
- .clock = &pioD_clk,
+ .regbase = AT91RM9200_BASE_PIOD,
}
};
-static void at91rm9200_reset(void)
+static void at91rm9200_restart(char mode, const char *cmd)
{
/*
* Perform a hardware reset with the use of the Watchdog timer.
@@ -307,9 +308,13 @@ static void __init at91rm9200_map_io(void)
iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
}
+static void __init at91rm9200_ioremap_registers(void)
+{
+}
+
static void __init at91rm9200_initialize(void)
{
- at91_arch_reset = at91rm9200_reset;
+ arm_pm_restart = at91rm9200_restart;
at91_extern_irq = (1 << AT91RM9200_ID_IRQ0) | (1 << AT91RM9200_ID_IRQ1)
| (1 << AT91RM9200_ID_IRQ2) | (1 << AT91RM9200_ID_IRQ3)
| (1 << AT91RM9200_ID_IRQ4) | (1 << AT91RM9200_ID_IRQ5)
@@ -366,6 +371,7 @@ static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91rm9200_soc = {
.map_io = at91rm9200_map_io,
.default_irq_priority = at91rm9200_default_irq_priority,
+ .ioremap_registers = at91rm9200_ioremap_registers,
.register_clocks = at91rm9200_register_clocks,
.init = at91rm9200_initialize,
};
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 66591fa53e0..18bacec2b09 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
@@ -114,11 +114,11 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
if (!data)
return;
- if (data->vbus_pin) {
+ if (gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
- if (data->pullup_pin)
+ if (gpio_is_valid(data->pullup_pin))
at91_set_gpio_output(data->pullup_pin, 0);
udc_data = *data;
@@ -135,7 +135,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
#if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
+static struct macb_platform_data eth_data;
static struct resource eth_resources[] = {
[0] = {
@@ -162,12 +162,12 @@ static struct platform_device at91rm9200_eth_device = {
.num_resources = ARRAY_SIZE(eth_resources),
};
-void __init at91_add_device_eth(struct at91_eth_data *data)
+void __init at91_add_device_eth(struct macb_platform_data *data)
{
if (!data)
return;
- if (data->phy_irq_pin) {
+ if (gpio_is_valid(data->phy_irq_pin)) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
@@ -199,7 +199,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
platform_device_register(&at91rm9200_eth_device);
}
#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
+void __init at91_add_device_eth(struct macb_platform_data *data) {}
#endif
@@ -260,7 +260,7 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
);
/* input/irq */
- if (data->irq_pin) {
+ if (gpio_is_valid(data->irq_pin)) {
at91_set_gpio_input(data->irq_pin, 1);
at91_set_deglitch(data->irq_pin, 1);
}
@@ -268,7 +268,7 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
at91_set_deglitch(data->det_pin, 1);
/* outputs, initially off */
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
at91_set_gpio_output(data->rst_pin, 0);
@@ -328,13 +328,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
@@ -419,15 +419,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
at91_set_A_periph(AT91_PIN_PC1, 0); /* SMOE */
@@ -665,10 +665,24 @@ static void __init at91_add_device_tc(void) { }
* -------------------------------------------------------------------- */
#if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE)
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = AT91RM9200_BASE_RTC,
+ .end = AT91RM9200_BASE_RTC + SZ_256 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91_ID_SYS,
+ .end = AT91_ID_SYS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct platform_device at91rm9200_rtc_device = {
.name = "at91_rtc",
.id = -1,
- .num_resources = 0,
+ .resource = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
};
static void __init at91_add_device_rtc(void)
@@ -877,8 +891,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91RM9200_BASE_DBGU,
+ .end = AT91RM9200_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 1dd69c85dfe..a028cdf8f97 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -32,6 +32,8 @@ static unsigned long last_crtr;
static u32 irqmask;
static struct clock_event_device clkevt;
+#define RM9200_TIMER_LATCH ((AT91_SLOW_CLOCK + HZ/2) / HZ)
+
/*
* The ST_CRTR is updated asynchronously to the master clock ... but
* the updates as seen by the CPU don't seem to be strictly monotonic.
@@ -74,8 +76,8 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
if (sr & AT91_ST_PITS) {
u32 crtr = read_CRTR();
- while (((crtr - last_crtr) & AT91_ST_CRTV) >= LATCH) {
- last_crtr += LATCH;
+ while (((crtr - last_crtr) & AT91_ST_CRTV) >= RM9200_TIMER_LATCH) {
+ last_crtr += RM9200_TIMER_LATCH;
clkevt.event_handler(&clkevt);
}
return IRQ_HANDLED;
@@ -116,7 +118,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
case CLOCK_EVT_MODE_PERIODIC:
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
- at91_sys_write(AT91_ST_PIMR, LATCH);
+ at91_sys_write(AT91_ST_PIMR, RM9200_TIMER_LATCH);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* ALM for oneshot irqs, set by next_event()
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b84a9f642f5..5e46e4a9643 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -21,11 +20,11 @@
#include <mach/at91sam9260.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -120,7 +119,7 @@ static struct clk ohci_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk macb_clk = {
- .name = "macb_clk",
+ .name = "pclk",
.pmc_mask = 1 << AT91SAM9260_ID_EMAC,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -190,14 +189,16 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for macb_hclk */
+ CLKDEV_CON_ID("hclk", &macb_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
- CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
- CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
@@ -209,6 +210,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "fffd8000.serial", &usart5_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioC_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -270,28 +274,19 @@ void __init at91sam9260_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91sam9260_gpio[] = {
+static struct at91_gpio_bank at91sam9260_gpio[] __initdata = {
{
.id = AT91SAM9260_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91SAM9260_BASE_PIOA,
}, {
.id = AT91SAM9260_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91SAM9260_BASE_PIOB,
}, {
.id = AT91SAM9260_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
+ .regbase = AT91SAM9260_BASE_PIOC,
}
};
-static void at91sam9260_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91SAM9260 processor initialization
* -------------------------------------------------------------------- */
@@ -325,10 +320,16 @@ static void __init at91sam9260_map_io(void)
}
}
+static void __init at91sam9260_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91SAM9260_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
+}
+
static void __init at91sam9260_initialize(void)
{
- at91_arch_reset = at91sam9_alt_reset;
- pm_power_off = at91sam9260_poweroff;
+ arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
| (1 << AT91SAM9260_ID_IRQ2);
@@ -381,6 +382,7 @@ static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91sam9260_soc = {
.map_io = at91sam9260_map_io,
.default_irq_priority = at91sam9260_default_irq_priority,
+ .ioremap_registers = at91sam9260_ioremap_registers,
.register_clocks = at91sam9260_register_clocks,
.init = at91sam9260_initialize,
};
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 25e3464fb07..642ccb6d26b 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
@@ -115,7 +115,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
if (!data)
return;
- if (data->vbus_pin) {
+ if (gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
@@ -136,7 +136,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
+static struct macb_platform_data eth_data;
static struct resource eth_resources[] = {
[0] = {
@@ -163,12 +163,12 @@ static struct platform_device at91sam9260_eth_device = {
.num_resources = ARRAY_SIZE(eth_resources),
};
-void __init at91_add_device_eth(struct at91_eth_data *data)
+void __init at91_add_device_eth(struct macb_platform_data *data)
{
if (!data)
return;
- if (data->phy_irq_pin) {
+ if (gpio_is_valid(data->phy_irq_pin)) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
@@ -200,7 +200,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
platform_device_register(&at91sam9260_eth_device);
}
#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
+void __init at91_add_device_eth(struct macb_platform_data *data) {}
#endif
@@ -243,13 +243,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
@@ -330,11 +330,11 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (data->slot[i].bus_width) {
/* input/irq */
- if (data->slot[i].detect_pin) {
+ if (gpio_is_valid(data->slot[i].detect_pin)) {
at91_set_gpio_input(data->slot[i].detect_pin, 1);
at91_set_deglitch(data->slot[i].detect_pin, 1);
}
- if (data->slot[i].wp_pin)
+ if (gpio_is_valid(data->slot[i].wp_pin))
at91_set_gpio_input(data->slot[i].wp_pin, 1);
switch (i) {
@@ -399,8 +399,8 @@ static struct resource nand_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_BASE_SYS + AT91_ECC,
- .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
+ .start = AT91SAM9260_BASE_ECC,
+ .end = AT91SAM9260_BASE_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -426,15 +426,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
@@ -714,8 +714,8 @@ static void __init at91_add_device_tc(void) { }
static struct resource rtt_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .start = AT91SAM9260_BASE_RTT,
+ .end = AT91SAM9260_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -738,10 +738,19 @@ static void __init at91_add_device_rtt(void)
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91SAM9260_BASE_WDT,
+ .end = AT91SAM9260_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91sam9260_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -837,8 +846,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91SAM9260_BASE_DBGU,
+ .end = AT91SAM9260_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -1281,17 +1290,17 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa);
- if (data->rst_pin) {
+ if (gpio_is_valid(data->rst_pin)) {
at91_set_multi_drive(data->rst_pin, 0);
at91_set_gpio_output(data->rst_pin, 1);
}
- if (data->irq_pin) {
+ if (gpio_is_valid(data->irq_pin)) {
at91_set_gpio_input(data->irq_pin, 0);
at91_set_deglitch(data->irq_pin, 1);
}
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 0);
at91_set_deglitch(data->det_pin, 1);
}
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 658a5185abf..b85b9ea6017 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -20,11 +19,11 @@
#include <mach/at91sam9261.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -176,6 +175,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &hck0),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioC_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -251,28 +253,19 @@ void __init at91sam9261_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91sam9261_gpio[] = {
+static struct at91_gpio_bank at91sam9261_gpio[] __initdata = {
{
.id = AT91SAM9261_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91SAM9261_BASE_PIOA,
}, {
.id = AT91SAM9261_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91SAM9261_BASE_PIOB,
}, {
.id = AT91SAM9261_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
+ .regbase = AT91SAM9261_BASE_PIOC,
}
};
-static void at91sam9261_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91SAM9261 processor initialization
* -------------------------------------------------------------------- */
@@ -285,10 +278,16 @@ static void __init at91sam9261_map_io(void)
at91_init_sram(0, AT91SAM9261_SRAM_BASE, AT91SAM9261_SRAM_SIZE);
}
+static void __init at91sam9261_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91SAM9261_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
+}
+
static void __init at91sam9261_initialize(void)
{
- at91_arch_reset = at91sam9_alt_reset;
- pm_power_off = at91sam9261_poweroff;
+ arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
| (1 << AT91SAM9261_ID_IRQ2);
@@ -341,6 +340,7 @@ static unsigned int at91sam9261_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91sam9261_soc = {
.map_io = at91sam9261_map_io,
.default_irq_priority = at91sam9261_default_irq_priority,
+ .ioremap_registers = at91sam9261_ioremap_registers,
.register_clocks = at91sam9261_register_clocks,
.init = at91sam9261_initialize,
};
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index ae78f4d03b7..fc59cbdb0e3 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
@@ -118,7 +118,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
if (!data)
return;
- if (data->vbus_pin) {
+ if (gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
@@ -171,13 +171,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
@@ -240,15 +240,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
at91_set_A_periph(AT91_PIN_PC0, 0); /* NANDOE */
@@ -600,8 +600,8 @@ static void __init at91_add_device_tc(void) { }
static struct resource rtt_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .start = AT91SAM9261_BASE_RTT,
+ .end = AT91SAM9261_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -624,10 +624,19 @@ static void __init at91_add_device_rtt(void)
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91SAM9261_BASE_WDT,
+ .end = AT91SAM9261_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91sam9261_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -816,8 +825,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91SAM9261_BASE_DBGU,
+ .end = AT91SAM9261_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index f83fbb0ee0c..79e3669b111 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -19,11 +18,11 @@
#include <mach/at91sam9263.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -118,7 +117,7 @@ static struct clk pwm_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk macb_clk = {
- .name = "macb_clk",
+ .name = "pclk",
.pmc_mask = 1 << AT91SAM9263_ID_EMAC,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -182,6 +181,8 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for macb_hclk */
+ CLKDEV_CON_ID("hclk", &macb_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk),
@@ -191,6 +192,11 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioCDE_clk),
+ CLKDEV_CON_ID("pioD", &pioCDE_clk),
+ CLKDEV_CON_ID("pioE", &pioCDE_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -263,36 +269,25 @@ void __init at91sam9263_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91sam9263_gpio[] = {
+static struct at91_gpio_bank at91sam9263_gpio[] __initdata = {
{
.id = AT91SAM9263_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91SAM9263_BASE_PIOA,
}, {
.id = AT91SAM9263_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91SAM9263_BASE_PIOB,
}, {
.id = AT91SAM9263_ID_PIOCDE,
- .offset = AT91_PIOC,
- .clock = &pioCDE_clk,
+ .regbase = AT91SAM9263_BASE_PIOC,
}, {
.id = AT91SAM9263_ID_PIOCDE,
- .offset = AT91_PIOD,
- .clock = &pioCDE_clk,
+ .regbase = AT91SAM9263_BASE_PIOD,
}, {
.id = AT91SAM9263_ID_PIOCDE,
- .offset = AT91_PIOE,
- .clock = &pioCDE_clk,
+ .regbase = AT91SAM9263_BASE_PIOE,
}
};
-static void at91sam9263_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91SAM9263 processor initialization
* -------------------------------------------------------------------- */
@@ -303,10 +298,17 @@ static void __init at91sam9263_map_io(void)
at91_init_sram(1, AT91SAM9263_SRAM1_BASE, AT91SAM9263_SRAM1_SIZE);
}
+static void __init at91sam9263_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91SAM9263_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91SAM9263_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
+ at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
+}
+
static void __init at91sam9263_initialize(void)
{
- at91_arch_reset = at91sam9_alt_reset;
- pm_power_off = at91sam9263_poweroff;
+ arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1);
/* Register GPIO subsystem */
@@ -358,6 +360,7 @@ static unsigned int at91sam9263_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91sam9263_soc = {
.map_io = at91sam9263_map_io,
.default_irq_priority = at91sam9263_default_irq_priority,
+ .ioremap_registers = at91sam9263_ioremap_registers,
.register_clocks = at91sam9263_register_clocks,
.init = at91sam9263_initialize,
};
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index ad017eb1f8d..7b46b278702 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -70,7 +70,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
- if (data->vbus_pin[i])
+ if (gpio_is_valid(data->vbus_pin[i]))
at91_set_gpio_output(data->vbus_pin[i], 0);
}
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
@@ -123,7 +123,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
if (!data)
return;
- if (data->vbus_pin) {
+ if (gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
@@ -144,7 +144,7 @@ void __init at91_add_device_udc(struct at91_udc_data *data) {}
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
+static struct macb_platform_data eth_data;
static struct resource eth_resources[] = {
[0] = {
@@ -171,12 +171,12 @@ static struct platform_device at91sam9263_eth_device = {
.num_resources = ARRAY_SIZE(eth_resources),
};
-void __init at91_add_device_eth(struct at91_eth_data *data)
+void __init at91_add_device_eth(struct macb_platform_data *data)
{
if (!data)
return;
- if (data->phy_irq_pin) {
+ if (gpio_is_valid(data->phy_irq_pin)) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
@@ -208,7 +208,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
platform_device_register(&at91sam9263_eth_device);
}
#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
+void __init at91_add_device_eth(struct macb_platform_data *data) {}
#endif
@@ -276,13 +276,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
if (mmc_id == 0) { /* MCI0 */
@@ -430,17 +430,17 @@ void __init at91_add_device_cf(struct at91_cf_data *data)
}
at91_sys_write(AT91_MATRIX_EBI0CSA, ebi0_csa);
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->irq_pin) {
+ if (gpio_is_valid(data->irq_pin)) {
at91_set_gpio_input(data->irq_pin, 1);
at91_set_deglitch(data->irq_pin, 1);
}
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
/* initially off */
at91_set_gpio_output(data->vcc_pin, 0);
@@ -473,8 +473,8 @@ static struct resource nand_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_BASE_SYS + AT91_ECC0,
- .end = AT91_BASE_SYS + AT91_ECC0 + SZ_512 - 1,
+ .start = AT91SAM9263_BASE_ECC0,
+ .end = AT91SAM9263_BASE_ECC0 + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -500,15 +500,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBI0CSA, csa | AT91_MATRIX_EBI0_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
@@ -749,7 +749,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data)
at91_set_A_periph(AT91_PIN_PB3, 0); /* AC97RX */
/* reset */
- if (data->reset_pin)
+ if (gpio_is_valid(data->reset_pin))
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
@@ -956,8 +956,8 @@ static void __init at91_add_device_tc(void) { }
static struct resource rtt0_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT0,
- .end = AT91_BASE_SYS + AT91_RTT0 + SZ_16 - 1,
+ .start = AT91SAM9263_BASE_RTT0,
+ .end = AT91SAM9263_BASE_RTT0 + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -971,8 +971,8 @@ static struct platform_device at91sam9263_rtt0_device = {
static struct resource rtt1_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT1,
- .end = AT91_BASE_SYS + AT91_RTT1 + SZ_16 - 1,
+ .start = AT91SAM9263_BASE_RTT1,
+ .end = AT91SAM9263_BASE_RTT1 + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -996,10 +996,19 @@ static void __init at91_add_device_rtt(void)
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91SAM9263_BASE_WDT,
+ .end = AT91SAM9263_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91sam9263_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -1196,8 +1205,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91SAM9263_BASE_DBGU,
+ .end = AT91SAM9263_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 4ba85499fa9..d89ead740a9 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -25,7 +25,17 @@
static u32 pit_cycle; /* write-once */
static u32 pit_cnt; /* access only w/system irq blocked */
+static void __iomem *pit_base_addr __read_mostly;
+static inline unsigned int pit_read(unsigned int reg_offset)
+{
+ return __raw_readl(pit_base_addr + reg_offset);
+}
+
+static inline void pit_write(unsigned int reg_offset, unsigned long value)
+{
+ __raw_writel(value, pit_base_addr + reg_offset);
+}
/*
* Clocksource: just a monotonic counter of MCK/16 cycles.
@@ -39,7 +49,7 @@ static cycle_t read_pit_clk(struct clocksource *cs)
raw_local_irq_save(flags);
elapsed = pit_cnt;
- t = at91_sys_read(AT91_PIT_PIIR);
+ t = pit_read(AT91_PIT_PIIR);
raw_local_irq_restore(flags);
elapsed += PIT_PICNT(t) * pit_cycle;
@@ -64,8 +74,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* update clocksource counter */
- pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
| AT91_PIT_PITIEN);
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -74,7 +84,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
/* disable irq, leaving the clocksource active */
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
break;
case CLOCK_EVT_MODE_RESUME:
break;
@@ -103,11 +113,11 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/* The PIT interrupt may be disabled, and is shared */
if ((pit_clkevt.mode == CLOCK_EVT_MODE_PERIODIC)
- && (at91_sys_read(AT91_PIT_SR) & AT91_PIT_PITS)) {
+ && (pit_read(AT91_PIT_SR) & AT91_PIT_PITS)) {
unsigned nr_ticks;
/* Get number of ticks performed before irq, and ack it */
- nr_ticks = PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
+ nr_ticks = PIT_PICNT(pit_read(AT91_PIT_PIVR));
do {
pit_cnt += pit_cycle;
pit_clkevt.event_handler(&pit_clkevt);
@@ -129,14 +139,14 @@ static struct irqaction at91sam926x_pit_irq = {
static void at91sam926x_pit_reset(void)
{
/* Disable timer and irqs */
- at91_sys_write(AT91_PIT_MR, 0);
+ pit_write(AT91_PIT_MR, 0);
/* Clear any pending interrupts, wait for PIT to stop counting */
- while (PIT_CPIV(at91_sys_read(AT91_PIT_PIVR)) != 0)
+ while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0)
cpu_relax();
/* Start PIT but don't enable IRQ */
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
}
/*
@@ -178,7 +188,15 @@ static void __init at91sam926x_pit_init(void)
static void at91sam926x_pit_suspend(void)
{
/* Disable timer */
- at91_sys_write(AT91_PIT_MR, 0);
+ pit_write(AT91_PIT_MR, 0);
+}
+
+void __init at91sam926x_ioremap_pit(u32 addr)
+{
+ pit_base_addr = ioremap(addr, 16);
+
+ if (!pit_base_addr)
+ panic("Impossible to ioremap PIT\n");
}
struct sys_timer at91sam926x_timer = {
diff --git a/arch/arm/mach-at91/at91sam9_alt_reset.S b/arch/arm/mach-at91/at91sam9_alt_reset.S
index e0256deb91f..d3f931c5942 100644
--- a/arch/arm/mach-at91/at91sam9_alt_reset.S
+++ b/arch/arm/mach-at91/at91sam9_alt_reset.S
@@ -14,20 +14,15 @@
*/
#include <linux/linkage.h>
-#include <asm/system.h>
#include <mach/hardware.h>
#include <mach/at91sam9_sdramc.h>
#include <mach/at91_rstc.h>
.arm
- .globl at91sam9_alt_reset
+ .globl at91sam9_alt_restart
-at91sam9_alt_reset: mrc p15, 0, r0, c1, c0, 0
- orr r0, r0, #CR_I
- mcr p15, 0, r0, c1, c0, 0 @ enable I-cache
-
- ldr r0, .at91_va_base_sdramc @ preload constants
+at91sam9_alt_restart: ldr r0, .at91_va_base_sdramc @ preload constants
ldr r1, .at91_va_base_rstc_cr
mov r2, #1
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 318b0407ea0..7032dd32cdf 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
@@ -20,12 +19,12 @@
#include <mach/at91sam9g45.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include <mach/cpu.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -150,7 +149,7 @@ static struct clk ac97_clk = {
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk macb_clk = {
- .name = "macb_clk",
+ .name = "pclk",
.pmc_mask = 1 << AT91SAM9G45_ID_EMAC,
.type = CLK_TYPE_PERIPHERAL,
};
@@ -209,6 +208,8 @@ static struct clk *periph_clocks[] __initdata = {
};
static struct clk_lookup periph_clocks_lookups[] = {
+ /* One additional fake clock for macb_hclk */
+ CLKDEV_CON_ID("hclk", &macb_clk),
/* One additional fake clock for ohci */
CLKDEV_CON_ID("ohci_clk", &uhphs_clk),
CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk),
@@ -231,6 +232,11 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "fff98000.serial", &usart3_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioC_clk),
+ CLKDEV_CON_ID("pioD", &pioDE_clk),
+ CLKDEV_CON_ID("pioE", &pioDE_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -293,41 +299,30 @@ void __init at91sam9g45_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91sam9g45_gpio[] = {
+static struct at91_gpio_bank at91sam9g45_gpio[] __initdata = {
{
.id = AT91SAM9G45_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91SAM9G45_BASE_PIOA,
}, {
.id = AT91SAM9G45_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91SAM9G45_BASE_PIOB,
}, {
.id = AT91SAM9G45_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
+ .regbase = AT91SAM9G45_BASE_PIOC,
}, {
.id = AT91SAM9G45_ID_PIODE,
- .offset = AT91_PIOD,
- .clock = &pioDE_clk,
+ .regbase = AT91SAM9G45_BASE_PIOD,
}, {
.id = AT91SAM9G45_ID_PIODE,
- .offset = AT91_PIOE,
- .clock = &pioDE_clk,
+ .regbase = AT91SAM9G45_BASE_PIOE,
}
};
-static void at91sam9g45_reset(void)
+static void at91sam9g45_restart(char mode, const char *cmd)
{
at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
}
-static void at91sam9g45_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91SAM9G45 processor initialization
* -------------------------------------------------------------------- */
@@ -338,10 +333,16 @@ static void __init at91sam9g45_map_io(void)
init_consistent_dma_size(SZ_4M);
}
+static void __init at91sam9g45_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91SAM9G45_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
+}
+
static void __init at91sam9g45_initialize(void)
{
- at91_arch_reset = at91sam9g45_reset;
- pm_power_off = at91sam9g45_poweroff;
+ arm_pm_restart = at91sam9g45_restart;
at91_extern_irq = (1 << AT91SAM9G45_ID_IRQ0);
/* Register GPIO subsystem */
@@ -393,6 +394,7 @@ static unsigned int at91sam9g45_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91sam9g45_soc = {
.map_io = at91sam9g45_map_io,
.default_irq_priority = at91sam9g45_default_irq_priority,
+ .ioremap_registers = at91sam9g45_ioremap_registers,
.register_clocks = at91sam9g45_register_clocks,
.init = at91sam9g45_initialize,
};
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 09a16d6bd5c..b7582dd10dc 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -44,8 +44,8 @@ static struct at_dma_platform_data atdma_pdata = {
static struct resource hdmac_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DMA,
- .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
+ .start = AT91SAM9G45_BASE_DMA,
+ .end = AT91SAM9G45_BASE_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -120,7 +120,7 @@ void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data)
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
- if (data->vbus_pin[i])
+ if (gpio_is_valid(data->vbus_pin[i]))
at91_set_gpio_output(data->vbus_pin[i], 0);
}
@@ -181,7 +181,7 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data)
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
- if (data->vbus_pin[i])
+ if (gpio_is_valid(data->vbus_pin[i]))
at91_set_gpio_output(data->vbus_pin[i], 0);
}
@@ -263,7 +263,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));
- if (data && data->vbus_pin > 0) {
+ if (data && gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
usba_udc_data.pdata.vbus_pin = data->vbus_pin;
@@ -284,7 +284,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data) {}
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
-static struct at91_eth_data eth_data;
+static struct macb_platform_data eth_data;
static struct resource eth_resources[] = {
[0] = {
@@ -311,12 +311,12 @@ static struct platform_device at91sam9g45_eth_device = {
.num_resources = ARRAY_SIZE(eth_resources),
};
-void __init at91_add_device_eth(struct at91_eth_data *data)
+void __init at91_add_device_eth(struct macb_platform_data *data)
{
if (!data)
return;
- if (data->phy_irq_pin) {
+ if (gpio_is_valid(data->phy_irq_pin)) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
@@ -348,7 +348,7 @@ void __init at91_add_device_eth(struct at91_eth_data *data)
platform_device_register(&at91sam9g45_eth_device);
}
#else
-void __init at91_add_device_eth(struct at91_eth_data *data) {}
+void __init at91_add_device_eth(struct macb_platform_data *data) {}
#endif
@@ -449,11 +449,11 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
/* input/irq */
- if (data->slot[0].detect_pin) {
+ if (gpio_is_valid(data->slot[0].detect_pin)) {
at91_set_gpio_input(data->slot[0].detect_pin, 1);
at91_set_deglitch(data->slot[0].detect_pin, 1);
}
- if (data->slot[0].wp_pin)
+ if (gpio_is_valid(data->slot[0].wp_pin))
at91_set_gpio_input(data->slot[0].wp_pin, 1);
if (mmc_id == 0) { /* MCI0 */
@@ -529,8 +529,8 @@ static struct resource nand_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_BASE_SYS + AT91_ECC,
- .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
+ .start = AT91SAM9G45_BASE_ECC,
+ .end = AT91SAM9G45_BASE_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -556,15 +556,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
@@ -859,7 +859,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data)
at91_set_A_periph(AT91_PIN_PD6, 0); /* AC97RX */
/* reset */
- if (data->reset_pin)
+ if (gpio_is_valid(data->reset_pin))
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
@@ -1009,10 +1009,24 @@ static void __init at91_add_device_tc(void) { }
* -------------------------------------------------------------------- */
#if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE)
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_RTC,
+ .end = AT91SAM9G45_BASE_RTC + SZ_256 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91_ID_SYS,
+ .end = AT91_ID_SYS,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct platform_device at91sam9g45_rtc_device = {
.name = "at91_rtc",
.id = -1,
- .num_resources = 0,
+ .resource = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
};
static void __init at91_add_device_rtc(void)
@@ -1081,8 +1095,8 @@ void __init at91_add_device_tsadcc(struct at91_tsadcc_data *data) {}
static struct resource rtt_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .start = AT91SAM9G45_BASE_RTT,
+ .end = AT91SAM9G45_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -1133,10 +1147,19 @@ static void __init at91_add_device_trng(void) {}
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91SAM9G45_BASE_WDT,
+ .end = AT91SAM9G45_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91sam9g45_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -1332,8 +1355,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91SAM9G45_BASE_DBGU,
+ .end = AT91SAM9G45_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index a238105d2c1..d6bcb1da11d 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -20,11 +19,11 @@
#include <mach/at91sam9rl.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
-#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
+#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
@@ -184,6 +183,10 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
+ CLKDEV_CON_ID("pioA", &pioA_clk),
+ CLKDEV_CON_ID("pioB", &pioB_clk),
+ CLKDEV_CON_ID("pioC", &pioC_clk),
+ CLKDEV_CON_ID("pioD", &pioD_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
@@ -243,32 +246,22 @@ void __init at91sam9rl_set_console_clock(int id)
* GPIO
* -------------------------------------------------------------------- */
-static struct at91_gpio_bank at91sam9rl_gpio[] = {
+static struct at91_gpio_bank at91sam9rl_gpio[] __initdata = {
{
.id = AT91SAM9RL_ID_PIOA,
- .offset = AT91_PIOA,
- .clock = &pioA_clk,
+ .regbase = AT91SAM9RL_BASE_PIOA,
}, {
.id = AT91SAM9RL_ID_PIOB,
- .offset = AT91_PIOB,
- .clock = &pioB_clk,
+ .regbase = AT91SAM9RL_BASE_PIOB,
}, {
.id = AT91SAM9RL_ID_PIOC,
- .offset = AT91_PIOC,
- .clock = &pioC_clk,
+ .regbase = AT91SAM9RL_BASE_PIOC,
}, {
.id = AT91SAM9RL_ID_PIOD,
- .offset = AT91_PIOD,
- .clock = &pioD_clk,
+ .regbase = AT91SAM9RL_BASE_PIOD,
}
};
-static void at91sam9rl_poweroff(void)
-{
- at91_sys_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-
/* --------------------------------------------------------------------
* AT91SAM9RL processor initialization
* -------------------------------------------------------------------- */
@@ -290,10 +283,16 @@ static void __init at91sam9rl_map_io(void)
at91_init_sram(0, AT91SAM9RL_SRAM_BASE, sram_size);
}
+static void __init at91sam9rl_ioremap_registers(void)
+{
+ at91_ioremap_shdwc(AT91SAM9RL_BASE_SHDWC);
+ at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
+ at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
+}
+
static void __init at91sam9rl_initialize(void)
{
- at91_arch_reset = at91sam9_alt_reset;
- pm_power_off = at91sam9rl_poweroff;
+ arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9RL_ID_IRQ0);
/* Register GPIO subsystem */
@@ -345,6 +344,7 @@ static unsigned int at91sam9rl_default_irq_priority[NR_AIC_IRQS] __initdata = {
struct at91_init_soc __initdata at91sam9rl_soc = {
.map_io = at91sam9rl_map_io,
.default_irq_priority = at91sam9rl_default_irq_priority,
+ .ioremap_registers = at91sam9rl_ioremap_registers,
.register_clocks = at91sam9rl_register_clocks,
.init = at91sam9rl_initialize,
};
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 628eb566d60..61908dce978 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -39,8 +39,8 @@ static struct at_dma_platform_data atdma_pdata = {
static struct resource hdmac_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DMA,
- .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
+ .start = AT91SAM9RL_BASE_DMA,
+ .end = AT91SAM9RL_BASE_DMA + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
@@ -147,7 +147,7 @@ void __init at91_add_device_usba(struct usba_platform_data *data)
usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));
- if (data && data->vbus_pin > 0) {
+ if (data && gpio_is_valid(data->vbus_pin)) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
usba_udc_data.pdata.vbus_pin = data->vbus_pin;
@@ -201,13 +201,13 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
return;
/* input/irq */
- if (data->det_pin) {
+ if (gpio_is_valid(data->det_pin)) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
- if (data->wp_pin)
+ if (gpio_is_valid(data->wp_pin))
at91_set_gpio_input(data->wp_pin, 1);
- if (data->vcc_pin)
+ if (gpio_is_valid(data->vcc_pin))
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
@@ -248,8 +248,8 @@ static struct resource nand_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_BASE_SYS + AT91_ECC,
- .end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
+ .start = AT91SAM9RL_BASE_ECC,
+ .end = AT91SAM9RL_BASE_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -275,15 +275,15 @@ void __init at91_add_device_nand(struct atmel_nand_data *data)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
/* enable pin */
- if (data->enable_pin)
+ if (gpio_is_valid(data->enable_pin))
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
- if (data->rdy_pin)
+ if (gpio_is_valid(data->rdy_pin))
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
- if (data->det_pin)
+ if (gpio_is_valid(data->det_pin))
at91_set_gpio_input(data->det_pin, 1);
at91_set_A_periph(AT91_PIN_PB4, 0); /* NANDOE */
@@ -483,7 +483,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data)
at91_set_A_periph(AT91_PIN_PD4, 0); /* AC97RX */
/* reset */
- if (data->reset_pin)
+ if (gpio_is_valid(data->reset_pin))
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
@@ -685,8 +685,8 @@ static void __init at91_add_device_rtc(void) {}
static struct resource rtt_resources[] = {
{
- .start = AT91_BASE_SYS + AT91_RTT,
- .end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+ .start = AT91SAM9RL_BASE_RTT,
+ .end = AT91SAM9RL_BASE_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
@@ -709,10 +709,19 @@ static void __init at91_add_device_rtt(void)
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct resource wdt_resources[] = {
+ {
+ .start = AT91SAM9RL_BASE_WDT,
+ .end = AT91SAM9RL_BASE_WDT + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
static struct platform_device at91sam9rl_wdt_device = {
.name = "at91_wdt",
.id = -1,
- .num_resources = 0,
+ .resource = wdt_resources,
+ .num_resources = ARRAY_SIZE(wdt_resources),
};
static void __init at91_add_device_watchdog(void)
@@ -908,8 +917,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
- .start = AT91_BASE_SYS + AT91_DBGU,
- .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ .start = AT91SAM9RL_BASE_DBGU,
+ .end = AT91SAM9RL_BASE_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 367d5cd5e36..2628384aaae 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -63,13 +63,15 @@ static void __init onearm_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata onearm_eth_data = {
+static struct macb_platform_data __initdata onearm_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata onearm_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata onearm_udc_data = {
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index 4282d96dffa..3bb40694b02 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -75,6 +75,8 @@ static void __init afeb9260_init_early(void)
*/
static struct at91_usbh_data __initdata afeb9260_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -82,7 +84,7 @@ static struct at91_usbh_data __initdata afeb9260_usbh_data = {
*/
static struct at91_udc_data __initdata afeb9260_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -103,7 +105,7 @@ static struct spi_board_info afeb9260_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata afeb9260_macb_data = {
+static struct macb_platform_data __initdata afeb9260_macb_data = {
.phy_irq_pin = AT91_PIN_PA9,
.is_rmii = 0,
};
@@ -138,6 +140,7 @@ static struct atmel_nand_data __initdata afeb9260_nand_data = {
.bus_width_16 = 0,
.parts = afeb9260_nand_partition,
.num_parts = ARRAY_SIZE(afeb9260_nand_partition),
+ .det_pin = -EINVAL,
};
@@ -149,6 +152,7 @@ static struct at91_mmc_data __initdata afeb9260_mmc_data = {
.wp_pin = AT91_PIN_PC4,
.slot_b = 1,
.wire4 = 1,
+ .vcc_pin = -EINVAL,
};
@@ -169,6 +173,8 @@ static struct i2c_board_info __initdata afeb9260_i2c_devices[] = {
static struct at91_cf_data afeb9260_cf_data = {
.chipselect = 4,
.irq_pin = AT91_PIN_PA6,
+ .det_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
.rst_pin = AT91_PIN_PA7,
.flags = AT91_CF_TRUE_IDE,
};
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index f90cfb32bad..8510e9e5498 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -62,6 +62,8 @@ static void __init cam60_init_early(void)
*/
static struct at91_usbh_data __initdata cam60_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
@@ -115,7 +117,7 @@ static struct spi_board_info cam60_spi_devices[] __initdata = {
/*
* MACB Ethernet device
*/
-static struct __initdata at91_eth_data cam60_macb_data = {
+static struct __initdata macb_platform_data cam60_macb_data = {
.phy_irq_pin = AT91_PIN_PB5,
.is_rmii = 0,
};
@@ -135,7 +137,7 @@ static struct mtd_partition __initdata cam60_nand_partition[] = {
static struct atmel_nand_data __initdata cam60_nand_data = {
.ale = 21,
.cle = 22,
- // .det_pin = ... not there
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PA9,
.enable_pin = AT91_PIN_PA7,
.parts = cam60_nand_partition,
@@ -163,7 +165,7 @@ static struct sam9_smc_config __initdata cam60_nand_smc_config = {
static void __init cam60_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &cam60_nand_smc_config);
+ sam9_smc_configure(0, 3, &cam60_nand_smc_config);
at91_add_device_nand(&cam60_nand_data);
}
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index 5dffd3be62d..ac3de4f7c31 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -70,6 +70,8 @@ static void __init cap9adk_init_early(void)
*/
static struct at91_usbh_data __initdata cap9adk_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -144,16 +146,17 @@ static struct spi_board_info cap9adk_spi_devices[] = {
*/
static struct at91_mmc_data __initdata cap9adk_mmc_data = {
.wire4 = 1,
-// .det_pin = ... not connected
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata cap9adk_macb_data = {
+static struct macb_platform_data __initdata cap9adk_macb_data = {
+ .phy_irq_pin = -EINVAL,
.is_rmii = 1,
};
@@ -172,8 +175,8 @@ static struct mtd_partition __initdata cap9adk_nand_partitions[] = {
static struct atmel_nand_data __initdata cap9adk_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
-// .rdy_pin = ... not connected
+ .det_pin = -EINVAL,
+ .rdy_pin = -EINVAL,
.enable_pin = AT91_PIN_PD15,
.parts = cap9adk_nand_partitions,
.num_parts = ARRAY_SIZE(cap9adk_nand_partitions),
@@ -212,7 +215,7 @@ static void __init cap9adk_add_device_nand(void)
cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &cap9adk_nand_smc_config);
+ sam9_smc_configure(0, 3, &cap9adk_nand_smc_config);
at91_add_device_nand(&cap9adk_nand_data);
}
@@ -282,7 +285,7 @@ static __init void cap9adk_add_device_nor(void)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V);
/* configure chip-select 0 (NOR) */
- sam9_smc_configure(0, &cap9adk_nor_smc_config);
+ sam9_smc_configure(0, 0, &cap9adk_nor_smc_config);
platform_device_register(&cap9adk_nor_flash);
}
@@ -351,7 +354,7 @@ static struct atmel_lcdfb_info __initdata cap9adk_lcdc_data;
* AC97
*/
static struct ac97c_platform_data cap9adk_ac97_data = {
-// .reset_pin = ... not connected
+ .reset_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 774c87fcbd5..59d9cf99753 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -57,13 +57,15 @@ static void __init carmeva_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata carmeva_eth_data = {
+static struct macb_platform_data __initdata carmeva_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata carmeva_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata carmeva_udc_data = {
@@ -75,8 +77,8 @@ static struct at91_udc_data __initdata carmeva_udc_data = {
// static struct at91_cf_data __initdata carmeva_cf_data = {
// .det_pin = AT91_PIN_PB0,
// .rst_pin = AT91_PIN_PC5,
- // .irq_pin = ... not connected
- // .vcc_pin = ... always powered
+ // .irq_pin = -EINVAL,
+ // .vcc_pin = -EINVAL,
// };
static struct at91_mmc_data __initdata carmeva_mmc_data = {
@@ -84,6 +86,7 @@ static struct at91_mmc_data __initdata carmeva_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PB10,
.wp_pin = AT91_PIN_PC14,
+ .vcc_pin = -EINVAL,
};
static struct spi_board_info carmeva_spi_devices[] = {
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index fc885a4ce24..9ab3d1ea326 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -86,6 +86,8 @@ static void __init cpu9krea_init_early(void)
*/
static struct at91_usbh_data __initdata cpu9krea_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -93,13 +95,14 @@ static struct at91_usbh_data __initdata cpu9krea_usbh_data = {
*/
static struct at91_udc_data __initdata cpu9krea_udc_data = {
.vbus_pin = AT91_PIN_PC8,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata cpu9krea_macb_data = {
+static struct macb_platform_data __initdata cpu9krea_macb_data = {
+ .phy_irq_pin = -EINVAL,
.is_rmii = 1,
};
@@ -112,6 +115,7 @@ static struct atmel_nand_data __initdata cpu9krea_nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.bus_width_16 = 0,
+ .det_pin = -EINVAL,
};
#ifdef CONFIG_MACH_CPU9260
@@ -156,7 +160,7 @@ static struct sam9_smc_config __initdata cpu9krea_nand_smc_config = {
static void __init cpu9krea_add_device_nand(void)
{
- sam9_smc_configure(3, &cpu9krea_nand_smc_config);
+ sam9_smc_configure(0, 3, &cpu9krea_nand_smc_config);
at91_add_device_nand(&cpu9krea_nand_data);
}
@@ -238,7 +242,7 @@ static __init void cpu9krea_add_device_nor(void)
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_VDDIOMSEL_3_3V);
/* configure chip-select 0 (NOR) */
- sam9_smc_configure(0, &cpu9krea_nor_smc_config);
+ sam9_smc_configure(0, 0, &cpu9krea_nor_smc_config);
platform_device_register(&cpu9krea_nor_flash);
}
@@ -337,6 +341,8 @@ static struct at91_mmc_data __initdata cpu9krea_mmc_data = {
.slot_b = 0,
.wire4 = 1,
.det_pin = AT91_PIN_PA29,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
static void __init cpu9krea_board_init(void)
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index d35e65b08cc..368e1427ad9 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -82,12 +82,15 @@ static void __init cpuat91_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata cpuat91_eth_data = {
+static struct macb_platform_data __initdata cpuat91_eth_data = {
+ .phy_irq_pin = -EINVAL,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata cpuat91_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata cpuat91_udc_data = {
@@ -98,6 +101,8 @@ static struct at91_udc_data __initdata cpuat91_udc_data = {
static struct at91_mmc_data __initdata cpuat91_mmc_data = {
.det_pin = AT91_PIN_PC2,
.wire4 = 1,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
static struct physmap_flash_data cpuat91_flash_data = {
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index c3936665e64..1a1547b1ce4 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -58,18 +58,20 @@ static void __init csb337_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata csb337_eth_data = {
+static struct macb_platform_data __initdata csb337_eth_data = {
.phy_irq_pin = AT91_PIN_PC2,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata csb337_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata csb337_udc_data = {
- // this has no VBUS sensing pin
.pullup_pin = AT91_PIN_PA24,
+ .vbus_pin = -EINVAL,
};
static struct i2c_board_info __initdata csb337_i2c_devices[] = {
@@ -98,6 +100,7 @@ static struct at91_mmc_data __initdata csb337_mmc_data = {
.slot_b = 0,
.wire4 = 1,
.wp_pin = AT91_PIN_PD6,
+ .vcc_pin = -EINVAL,
};
static struct spi_board_info csb337_spi_devices[] = {
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 586100e2acb..f650bf39455 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -52,13 +52,15 @@ static void __init csb637_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata csb637_eth_data = {
+static struct macb_platform_data __initdata csb637_eth_data = {
.phy_irq_pin = AT91_PIN_PC0,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata csb637_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata csb637_udc_data = {
diff --git a/arch/arm/mach-at91/board-dt.c b/arch/arm/mach-at91/board-dt.c
index 0b7d3277821..bb6b434ec0c 100644
--- a/arch/arm/mach-at91/board-dt.c
+++ b/arch/arm/mach-at91/board-dt.c
@@ -50,6 +50,7 @@ static void __init ek_init_early(void)
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC8,
.enable_pin = AT91_PIN_PC14,
};
@@ -82,7 +83,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index 45db7a3dbef..d302ca3eeb6 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -60,13 +60,15 @@ static void __init eb9200_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata eb9200_eth_data = {
+static struct macb_platform_data __initdata eb9200_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata eb9200_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata eb9200_udc_data = {
@@ -75,15 +77,18 @@ static struct at91_udc_data __initdata eb9200_udc_data = {
};
static struct at91_cf_data __initdata eb9200_cf_data = {
+ .irq_pin = -EINVAL,
.det_pin = AT91_PIN_PB0,
+ .vcc_pin = -EINVAL,
.rst_pin = AT91_PIN_PC5,
- // .irq_pin = ... not connected
- // .vcc_pin = ... always powered
};
static struct at91_mmc_data __initdata eb9200_mmc_data = {
.slot_b = 0,
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
static struct i2c_board_info __initdata eb9200_i2c_devices[] = {
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 2f9c16d2921..69966ce4d77 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -64,18 +64,23 @@ static void __init ecb_at91init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata ecb_at91eth_data = {
+static struct macb_platform_data __initdata ecb_at91eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata ecb_at91usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_mmc_data __initdata ecb_at91mmc_data = {
.slot_b = 0,
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index 8252c722607..07ef35b0ec2 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -47,13 +47,15 @@ static void __init eco920_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata eco920_eth_data = {
+static struct macb_platform_data __initdata eco920_eth_data = {
.phy_irq_pin = AT91_PIN_PC2,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata eco920_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata eco920_udc_data = {
@@ -64,6 +66,9 @@ static struct at91_udc_data __initdata eco920_udc_data = {
static struct at91_mmc_data __initdata eco920_mmc_data = {
.slot_b = 0,
.wire4 = 0,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
static struct physmap_flash_data eco920_flash_data = {
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index 4c3f65d9c59..eec02cd57ce 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -52,12 +52,14 @@ static void __init flexibity_init_early(void)
/* USB Host port */
static struct at91_usbh_data __initdata flexibity_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/* USB Device port */
static struct at91_udc_data __initdata flexibity_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/* SPI devices */
@@ -76,6 +78,7 @@ static struct at91_mmc_data __initdata flexibity_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PC9,
.wp_pin = AT91_PIN_PC4,
+ .vcc_pin = -EINVAL,
};
/* LEDs */
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index f27d1a780cf..caf017f0f4e 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -106,6 +106,8 @@ static void __init foxg20_init_early(void)
*/
static struct at91_usbh_data __initdata foxg20_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -113,7 +115,7 @@ static struct at91_usbh_data __initdata foxg20_usbh_data = {
*/
static struct at91_udc_data __initdata foxg20_udc_data = {
.vbus_pin = AT91_PIN_PC6,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -135,7 +137,7 @@ static struct spi_board_info foxg20_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata foxg20_macb_data = {
+static struct macb_platform_data __initdata foxg20_macb_data = {
.phy_irq_pin = AT91_PIN_PA7,
.is_rmii = 1,
};
@@ -147,6 +149,9 @@ static struct at91_eth_data __initdata foxg20_macb_data = {
static struct at91_mmc_data __initdata foxg20_mmc_data = {
.slot_b = 1,
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index 2e95949737e..230e71969fb 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -80,6 +80,8 @@ static void __init gsia18s_init_early(void)
*/
static struct at91_usbh_data __initdata usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -87,13 +89,13 @@ static struct at91_usbh_data __initdata usbh_data = {
*/
static struct at91_udc_data __initdata udc_data = {
.vbus_pin = AT91_PIN_PA22,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata macb_data = {
+static struct macb_platform_data __initdata macb_data = {
.phy_irq_pin = AT91_PIN_PA28,
.is_rmii = 1,
};
@@ -530,6 +532,7 @@ static struct i2c_board_info __initdata gsia18s_i2c_devices[] = {
static struct at91_cf_data __initdata gsia18s_cf1_data = {
.irq_pin = AT91_PIN_PA27,
.det_pin = AT91_PIN_PB30,
+ .vcc_pin = -EINVAL,
.rst_pin = AT91_PIN_PB31,
.chipselect = 5,
.flags = AT91_CF_TRUE_IDE,
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index 3bae73e6363..efde1b2327c 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -61,13 +61,15 @@ static void __init kafa_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata kafa_eth_data = {
+static struct macb_platform_data __initdata kafa_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata kafa_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata kafa_udc_data = {
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index e61351ffad5..d75a4a2ad9c 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -69,13 +69,15 @@ static void __init kb9202_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata kb9202_eth_data = {
+static struct macb_platform_data __initdata kb9202_eth_data = {
.phy_irq_pin = AT91_PIN_PB29,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata kb9202_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata kb9202_udc_data = {
@@ -87,6 +89,8 @@ static struct at91_mmc_data __initdata kb9202_mmc_data = {
.det_pin = AT91_PIN_PB2,
.slot_b = 0,
.wire4 = 1,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
static struct mtd_partition __initdata kb9202_nand_partition[] = {
@@ -100,7 +104,7 @@ static struct mtd_partition __initdata kb9202_nand_partition[] = {
static struct atmel_nand_data __initdata kb9202_nand_data = {
.ale = 22,
.cle = 21,
- // .det_pin = ... not there
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC29,
.enable_pin = AT91_PIN_PC28,
.parts = kb9202_nand_partition,
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index ef816c17dc6..3f8617c0e04 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -72,6 +72,7 @@ static void __init neocore926_init_early(void)
static struct at91_usbh_data __initdata neocore926_usbh_data = {
.ports = 2,
.vbus_pin = { AT91_PIN_PA24, AT91_PIN_PA21 },
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -79,7 +80,7 @@ static struct at91_usbh_data __initdata neocore926_usbh_data = {
*/
static struct at91_udc_data __initdata neocore926_udc_data = {
.vbus_pin = AT91_PIN_PA25,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -149,13 +150,14 @@ static struct at91_mmc_data __initdata neocore926_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PE18,
.wp_pin = AT91_PIN_PE19,
+ .vcc_pin = -EINVAL,
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata neocore926_macb_data = {
+static struct macb_platform_data __initdata neocore926_macb_data = {
.phy_irq_pin = AT91_PIN_PE31,
.is_rmii = 1,
};
@@ -190,6 +192,7 @@ static struct atmel_nand_data __initdata neocore926_nand_data = {
.enable_pin = AT91_PIN_PD15,
.parts = neocore926_nand_partition,
.num_parts = ARRAY_SIZE(neocore926_nand_partition),
+ .det_pin = -EINVAL,
};
static struct sam9_smc_config __initdata neocore926_nand_smc_config = {
@@ -213,7 +216,7 @@ static struct sam9_smc_config __initdata neocore926_nand_smc_config = {
static void __init neocore926_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &neocore926_nand_smc_config);
+ sam9_smc_configure(0, 3, &neocore926_nand_smc_config);
at91_add_device_nand(&neocore926_nand_data);
}
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index 49e3f699b48..b4a12fc184c 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -96,9 +96,9 @@ static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
static void __init add_device_pcontrol(void)
{
/* configure chip-select 4 (IO compatible to 8051 X4 ) */
- sam9_smc_configure(4, &pcontrol_smc_config[0]);
+ sam9_smc_configure(0, 4, &pcontrol_smc_config[0]);
/* configure chip-select 7 (FerroRAM 256KiBx16bit MR2A16A D4 ) */
- sam9_smc_configure(7, &pcontrol_smc_config[1]);
+ sam9_smc_configure(0, 7, &pcontrol_smc_config[1]);
}
@@ -107,6 +107,8 @@ static void __init add_device_pcontrol(void)
*/
static struct at91_usbh_data __initdata usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
@@ -122,7 +124,7 @@ static struct at91_udc_data __initdata pcontrol_g20_udc_data = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata macb_data = {
+static struct macb_platform_data __initdata macb_data = {
.phy_irq_pin = AT91_PIN_PA28,
.is_rmii = 1,
};
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 0a8fe6a1b7c..ab024fa11d5 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -60,13 +60,15 @@ static void __init picotux200_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata picotux200_eth_data = {
+static struct macb_platform_data __initdata picotux200_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata picotux200_usbh_data = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_mmc_data __initdata picotux200_mmc_data = {
@@ -74,6 +76,7 @@ static struct at91_mmc_data __initdata picotux200_mmc_data = {
.slot_b = 0,
.wire4 = 1,
.wp_pin = AT91_PIN_PA17,
+ .vcc_pin = -EINVAL,
};
#define PICOTUX200_FLASH_BASE AT91_CHIPSELECT_0
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 07421bdb88e..e029d220cb8 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -77,6 +77,8 @@ static void __init ek_init_early(void)
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -84,7 +86,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/*
@@ -104,7 +106,7 @@ static struct spi_board_info ek_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA31,
.is_rmii = 1,
};
@@ -133,7 +135,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.parts = ek_nand_partition,
@@ -161,7 +163,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -172,9 +174,9 @@ static void __init ek_add_device_nand(void)
static struct at91_mmc_data __initdata ek_mmc_data = {
.slot_b = 0,
.wire4 = 1,
-// .det_pin = ... not connected
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
/*
@@ -251,7 +253,7 @@ static void __init ek_board_init(void)
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
/* shutdown controller, wakeup button (5 msec low) */
- at91_sys_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10) | AT91_SHDW_WKMODE0_LOW
+ at91_shdwc_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10) | AT91_SHDW_WKMODE0_LOW
| AT91_SHDW_RTTWKEN);
}
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index 80a8c9c6e92..782f37946af 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -65,13 +65,15 @@ static void __init dk_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata dk_eth_data = {
+static struct macb_platform_data __initdata dk_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata dk_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata dk_udc_data = {
@@ -80,16 +82,19 @@ static struct at91_udc_data __initdata dk_udc_data = {
};
static struct at91_cf_data __initdata dk_cf_data = {
+ .irq_pin = -EINVAL,
.det_pin = AT91_PIN_PB0,
+ .vcc_pin = -EINVAL,
.rst_pin = AT91_PIN_PC5,
- // .irq_pin = ... not connected
- // .vcc_pin = ... always powered
};
#ifndef CONFIG_MTD_AT91_DATAFLASH_CARD
static struct at91_mmc_data __initdata dk_mmc_data = {
.slot_b = 0,
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
#endif
@@ -143,7 +148,7 @@ static struct atmel_nand_data __initdata dk_nand_data = {
.cle = 21,
.det_pin = AT91_PIN_PB1,
.rdy_pin = AT91_PIN_PC2,
- // .enable_pin = ... not there
+ .enable_pin = -EINVAL,
.parts = dk_nand_partition,
.num_parts = ARRAY_SIZE(dk_nand_partition),
};
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 99fd7f8aee0..ef7c12a9224 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -65,13 +65,15 @@ static void __init ek_init_early(void)
at91_set_serial_console(0);
}
-static struct at91_eth_data __initdata ek_eth_data = {
+static struct macb_platform_data __initdata ek_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata ek_udc_data = {
@@ -85,6 +87,7 @@ static struct at91_mmc_data __initdata ek_mmc_data = {
.slot_b = 0,
.wire4 = 1,
.wp_pin = AT91_PIN_PA17,
+ .vcc_pin = -EINVAL,
};
#endif
diff --git a/arch/arm/mach-at91/board-rsi-ews.c b/arch/arm/mach-at91/board-rsi-ews.c
index e927df0175d..af0750fafa2 100644
--- a/arch/arm/mach-at91/board-rsi-ews.c
+++ b/arch/arm/mach-at91/board-rsi-ews.c
@@ -60,7 +60,7 @@ static void __init rsi_ews_init_early(void)
/*
* Ethernet
*/
-static struct at91_eth_data rsi_ews_eth_data __initdata = {
+static struct macb_platform_data rsi_ews_eth_data __initdata = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 1,
};
@@ -70,6 +70,8 @@ static struct at91_eth_data rsi_ews_eth_data __initdata = {
*/
static struct at91_usbh_data rsi_ews_usbh_data __initdata = {
.ports = 1,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 072d53af98d..84bce587735 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -72,6 +72,8 @@ static void __init ek_init_early(void)
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -79,7 +81,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -109,7 +111,7 @@ static struct spi_board_info ek_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA7,
.is_rmii = 0,
};
@@ -134,7 +136,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.parts = ek_nand_partition,
@@ -162,7 +164,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -176,7 +178,7 @@ static struct at91_mmc_data __initdata ek_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PC8,
.wp_pin = AT91_PIN_PC4,
-// .vcc_pin = ... not connected
+ .vcc_pin = -EINVAL,
};
static void __init ek_board_init(void)
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 4f10181a078..be8233bcabd 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -75,6 +75,8 @@ static void __init ek_init_early(void)
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -82,7 +84,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -151,7 +153,7 @@ static struct spi_board_info ek_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA7,
.is_rmii = 1,
};
@@ -176,7 +178,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.parts = ek_nand_partition,
@@ -211,7 +213,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -223,9 +225,9 @@ static void __init ek_add_device_nand(void)
static struct at91_mmc_data __initdata ek_mmc_data = {
.slot_b = 1,
.wire4 = 1,
-// .det_pin = ... not connected
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index b005b738e8f..40895072a1a 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -131,7 +131,7 @@ static struct sam9_smc_config __initdata dm9000_smc_config = {
static void __init ek_add_device_dm9000(void)
{
/* Configure chip-select 2 (DM9000) */
- sam9_smc_configure(2, &dm9000_smc_config);
+ sam9_smc_configure(0, 2, &dm9000_smc_config);
/* Configure Reset signal as output */
at91_set_gpio_output(AT91_PIN_PC10, 0);
@@ -151,6 +151,8 @@ static void __init ek_add_device_dm9000(void) {}
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
@@ -159,7 +161,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PB29,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -182,7 +184,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 22,
.cle = 21,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC15,
.enable_pin = AT91_PIN_PC14,
.parts = ek_nand_partition,
@@ -217,7 +219,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -345,6 +347,9 @@ static struct spi_board_info ek_spi_devices[] = {
*/
static struct at91_mmc_data __initdata ek_mmc_data = {
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
#endif /* CONFIG_SPI_ATMEL_* */
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index bccdcf23caa..29f66052fe6 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -74,6 +74,7 @@ static void __init ek_init_early(void)
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
.vbus_pin = { AT91_PIN_PA24, AT91_PIN_PA21 },
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -81,7 +82,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PA25,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -151,14 +152,14 @@ static struct at91_mmc_data __initdata ek_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PE18,
.wp_pin = AT91_PIN_PE19,
-// .vcc_pin = ... not connected
+ .vcc_pin = -EINVAL,
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PE31,
.is_rmii = 1,
};
@@ -183,7 +184,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PA22,
.enable_pin = AT91_PIN_PD15,
.parts = ek_nand_partition,
@@ -218,7 +219,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -353,6 +354,7 @@ static void __init ek_add_device_buttons(void) {}
* reset_pin is not connected: NRST
*/
static struct ac97c_platform_data ek_ac97_data = {
+ .reset_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 64fc75c9d0a..843d6286c6f 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -86,6 +86,8 @@ static void __init ek_init_early(void)
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -93,7 +95,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PC5,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
@@ -123,7 +125,7 @@ static struct spi_board_info ek_spi_devices[] = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PA7,
.is_rmii = 1,
};
@@ -163,6 +165,7 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.cle = 22,
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
+ .det_pin = -EINVAL,
.parts = ek_nand_partition,
.num_parts = ARRAY_SIZE(ek_nand_partition),
};
@@ -195,7 +198,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -210,6 +213,7 @@ static struct mci_platform_data __initdata ek_mmc_data = {
.slot[1] = {
.bus_width = 4,
.detect_pin = AT91_PIN_PC9,
+ .wp_pin = -EINVAL,
},
};
@@ -218,6 +222,8 @@ static struct at91_mmc_data __initdata ek_mmc_data = {
.slot_b = 1, /* Only one slot so use slot B */
.wire4 = 1,
.det_pin = AT91_PIN_PC9,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
#endif
@@ -227,6 +233,7 @@ static void __init ek_add_device_mmc(void)
if (ek_have_2mmc()) {
ek_mmc_data.slot[0].bus_width = 4;
ek_mmc_data.slot[0].detect_pin = AT91_PIN_PC2;
+ ek_mmc_data.slot[0].wp_pin = -1;
}
at91_add_device_mci(0, &ek_mmc_data);
#else
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 92de9127923..ea0d1b9c2b7 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -69,6 +69,7 @@ static void __init ek_init_early(void)
static struct at91_usbh_data __initdata ek_usbh_hs_data = {
.ports = 2,
.vbus_pin = {AT91_PIN_PD1, AT91_PIN_PD3},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
@@ -100,6 +101,7 @@ static struct mci_platform_data __initdata mci0_data = {
.slot[0] = {
.bus_width = 4,
.detect_pin = AT91_PIN_PD10,
+ .wp_pin = -EINVAL,
},
};
@@ -115,7 +117,7 @@ static struct mci_platform_data __initdata mci1_data = {
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PD5,
.is_rmii = 1,
};
@@ -143,6 +145,7 @@ static struct atmel_nand_data __initdata ek_nand_data = {
.cle = 22,
.rdy_pin = AT91_PIN_PC8,
.enable_pin = AT91_PIN_PC14,
+ .det_pin = -EINVAL,
.parts = ek_nand_partition,
.num_parts = ARRAY_SIZE(ek_nand_partition),
};
@@ -175,7 +178,7 @@ static void __init ek_add_device_nand(void)
ek_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -330,6 +333,7 @@ static void __init ek_add_device_buttons(void) {}
* reset_pin is not connected: NRST
*/
static struct ac97c_platform_data ek_ac97_data = {
+ .reset_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index b2b748239f3..c1366d0032b 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -67,8 +67,8 @@ static struct usba_platform_data __initdata ek_usba_udc_data = {
static struct at91_mmc_data __initdata ek_mmc_data = {
.wire4 = 1,
.det_pin = AT91_PIN_PA15,
-// .wp_pin = ... not connected
-// .vcc_pin = ... not connected
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
@@ -91,7 +91,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PD17,
.enable_pin = AT91_PIN_PB6,
.parts = ek_nand_partition,
@@ -119,7 +119,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = {
static void __init ek_add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &ek_nand_smc_config);
+ sam9_smc_configure(0, 3, &ek_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -204,6 +204,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data;
* reset_pin is not connected: NRST
*/
static struct ac97c_platform_data ek_ac97_data = {
+ .reset_pin = -EINVAL,
};
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 0df01c6e2d0..4770db08e5a 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -57,15 +57,19 @@ static void __init snapper9260_init_early(void)
static struct at91_usbh_data __initdata snapper9260_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata snapper9260_udc_data = {
.vbus_pin = SNAPPER9260_IO_EXP_GPIO(5),
.vbus_active_low = 1,
.vbus_polled = 1,
+ .pullup_pin = -EINVAL,
};
-static struct at91_eth_data snapper9260_macb_data = {
+static struct macb_platform_data snapper9260_macb_data = {
+ .phy_irq_pin = -EINVAL,
.is_rmii = 1,
};
@@ -104,6 +108,8 @@ static struct atmel_nand_data __initdata snapper9260_nand_data = {
.parts = snapper9260_nand_partitions,
.num_parts = ARRAY_SIZE(snapper9260_nand_partitions),
.bus_width_16 = 0,
+ .enable_pin = -EINVAL,
+ .det_pin = -EINVAL,
};
static struct sam9_smc_config __initdata snapper9260_nand_smc_config = {
@@ -149,7 +155,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = {
static void __init snapper9260_add_device_nand(void)
{
at91_set_A_periph(AT91_PIN_PC14, 0);
- sam9_smc_configure(3, &snapper9260_nand_smc_config);
+ sam9_smc_configure(0, 3, &snapper9260_nand_smc_config);
at91_add_device_nand(&snapper9260_nand_data);
}
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index 936e5fd7f40..72eb3b4d9ab 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -85,6 +85,7 @@ static struct atmel_nand_data __initdata nand_data = {
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
.bus_width_16 = 0,
+ .det_pin = -EINVAL,
};
static struct sam9_smc_config __initdata nand_smc_config = {
@@ -108,7 +109,7 @@ static struct sam9_smc_config __initdata nand_smc_config = {
static void __init add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
- sam9_smc_configure(3, &nand_smc_config);
+ sam9_smc_configure(0, 3, &nand_smc_config);
at91_add_device_nand(&nand_data);
}
@@ -122,12 +123,17 @@ static void __init add_device_nand(void)
static struct mci_platform_data __initdata mmc_data = {
.slot[0] = {
.bus_width = 4,
+ .detect_pin = -1,
+ .wp_pin = -1,
},
};
#else
static struct at91_mmc_data __initdata mmc_data = {
.slot_b = 0,
.wire4 = 1,
+ .det_pin = -EINVAL,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
#endif
@@ -137,6 +143,8 @@ static struct at91_mmc_data __initdata mmc_data = {
*/
static struct at91_usbh_data __initdata usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
@@ -145,19 +153,19 @@ static struct at91_usbh_data __initdata usbh_data = {
*/
static struct at91_udc_data __initdata portuxg20_udc_data = {
.vbus_pin = AT91_PIN_PC7,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
static struct at91_udc_data __initdata stamp9g20evb_udc_data = {
.vbus_pin = AT91_PIN_PA22,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata macb_data = {
+static struct macb_platform_data __initdata macb_data = {
.phy_irq_pin = AT91_PIN_PA28,
.is_rmii = 1,
};
diff --git a/arch/arm/mach-at91/board-usb-a926x.c b/arch/arm/mach-at91/board-usb-a926x.c
index 0a20bab21f9..26c36fc2d1e 100644
--- a/arch/arm/mach-at91/board-usb-a926x.c
+++ b/arch/arm/mach-at91/board-usb-a926x.c
@@ -66,6 +66,8 @@ static void __init ek_init_early(void)
*/
static struct at91_usbh_data __initdata ek_usbh_data = {
.ports = 2,
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -73,7 +75,7 @@ static struct at91_usbh_data __initdata ek_usbh_data = {
*/
static struct at91_udc_data __initdata ek_udc_data = {
.vbus_pin = AT91_PIN_PB11,
- .pullup_pin = 0, /* pull-up driven by UDC */
+ .pullup_pin = -EINVAL, /* pull-up driven by UDC */
};
static void __init ek_add_device_udc(void)
@@ -146,7 +148,7 @@ static void __init ek_add_device_spi(void)
/*
* MACB Ethernet device
*/
-static struct at91_eth_data __initdata ek_macb_data = {
+static struct macb_platform_data __initdata ek_macb_data = {
.phy_irq_pin = AT91_PIN_PE31,
.is_rmii = 1,
};
@@ -193,7 +195,7 @@ static struct mtd_partition __initdata ek_nand_partition[] = {
static struct atmel_nand_data __initdata ek_nand_data = {
.ale = 21,
.cle = 22,
-// .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PA22,
.enable_pin = AT91_PIN_PD15,
.parts = ek_nand_partition,
@@ -245,9 +247,9 @@ static void __init ek_add_device_nand(void)
/* configure chip-select 3 (NAND) */
if (machine_is_usb_a9g20())
- sam9_smc_configure(3, &usb_a9g20_nand_smc_config);
+ sam9_smc_configure(0, 3, &usb_a9g20_nand_smc_config);
else
- sam9_smc_configure(3, &usb_a9260_nand_smc_config);
+ sam9_smc_configure(0, 3, &usb_a9260_nand_smc_config);
at91_add_device_nand(&ek_nand_data);
}
@@ -344,7 +346,7 @@ static void __init ek_board_init(void)
/* I2C */
at91_add_device_i2c(NULL, 0);
/* shutdown controller, wakeup button (5 msec low) */
- at91_sys_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10)
+ at91_shdwc_write(AT91_SHDW_MR, AT91_SHDW_CPTWK0_(10)
| AT91_SHDW_WKMODE0_LOW
| AT91_SHDW_RTTWKEN);
}
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 12a3f955162..bbd553e1cd9 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -110,7 +110,7 @@ static struct gpio_led yl9200_leds[] = {
/*
* Ethernet
*/
-static struct at91_eth_data __initdata yl9200_eth_data = {
+static struct macb_platform_data __initdata yl9200_eth_data = {
.phy_irq_pin = AT91_PIN_PB28,
.is_rmii = 1,
};
@@ -120,6 +120,8 @@ static struct at91_eth_data __initdata yl9200_eth_data = {
*/
static struct at91_usbh_data __initdata yl9200_usbh_data = {
.ports = 1, /* PQFP version of AT91RM9200 */
+ .vbus_pin = {-EINVAL, -EINVAL},
+ .overcurrent_pin= {-EINVAL, -EINVAL},
};
/*
@@ -137,8 +139,9 @@ static struct at91_udc_data __initdata yl9200_udc_data = {
*/
static struct at91_mmc_data __initdata yl9200_mmc_data = {
.det_pin = AT91_PIN_PB9,
- // .wp_pin = ... not connected
.wire4 = 1,
+ .wp_pin = -EINVAL,
+ .vcc_pin = -EINVAL,
};
/*
@@ -175,7 +178,7 @@ static struct mtd_partition __initdata yl9200_nand_partition[] = {
static struct atmel_nand_data __initdata yl9200_nand_data = {
.ale = 6,
.cle = 7,
- // .det_pin = ... not connected
+ .det_pin = -EINVAL,
.rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */
.enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */
.parts = yl9200_nand_partition,
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 938b34f5774..4866b8180d6 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -29,6 +29,7 @@ extern void __init at91_aic_init(unsigned int priority[]);
/* Timer */
struct sys_timer;
extern struct sys_timer at91rm9200_timer;
+extern void at91sam926x_ioremap_pit(u32 addr);
extern struct sys_timer at91sam926x_timer;
extern struct sys_timer at91x40_timer;
@@ -57,7 +58,10 @@ extern void at91_irq_suspend(void);
extern void at91_irq_resume(void);
/* reset */
-extern void at91sam9_alt_reset(void);
+extern void at91sam9_alt_restart(char, const char *);
+
+/* shutdown */
+extern void at91_ioremap_shdwc(u32 base_addr);
/* GPIO */
#define AT91RM9200_PQFP 3 /* AT91RM9200 PQFP package has 3 banks */
@@ -65,11 +69,9 @@ extern void at91sam9_alt_reset(void);
struct at91_gpio_bank {
unsigned short id; /* peripheral ID */
- unsigned long offset; /* offset from system peripheral base */
- struct clk *clock; /* associated clock */
+ unsigned long regbase; /* offset from system peripheral base */
};
extern void __init at91_gpio_init(struct at91_gpio_bank *, int nr_banks);
extern void __init at91_gpio_irq_setup(void);
-extern void (*at91_arch_reset)(void);
extern int at91_extern_irq;
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index 224e9e2f867..74d6783eeab 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -29,8 +29,9 @@
struct at91_gpio_chip {
struct gpio_chip chip;
struct at91_gpio_chip *next; /* Bank sharing same clock */
- struct at91_gpio_bank *bank; /* Bank definition */
+ int id; /* ID of register bank */
void __iomem *regbase; /* Base of register bank */
+ struct clk *clock; /* associated clock */
};
#define to_at91_gpio_chip(c) container_of(c, struct at91_gpio_chip, chip)
@@ -58,18 +59,17 @@ static int at91_gpiolib_direction_input(struct gpio_chip *chip,
}
static struct at91_gpio_chip gpio_chip[] = {
- AT91_GPIO_CHIP("A", 0x00 + PIN_BASE, 32),
- AT91_GPIO_CHIP("B", 0x20 + PIN_BASE, 32),
- AT91_GPIO_CHIP("C", 0x40 + PIN_BASE, 32),
- AT91_GPIO_CHIP("D", 0x60 + PIN_BASE, 32),
- AT91_GPIO_CHIP("E", 0x80 + PIN_BASE, 32),
+ AT91_GPIO_CHIP("pioA", 0x00, 32),
+ AT91_GPIO_CHIP("pioB", 0x20, 32),
+ AT91_GPIO_CHIP("pioC", 0x40, 32),
+ AT91_GPIO_CHIP("pioD", 0x60, 32),
+ AT91_GPIO_CHIP("pioE", 0x80, 32),
};
static int gpio_banks;
static inline void __iomem *pin_to_controller(unsigned pin)
{
- pin -= PIN_BASE;
pin /= 32;
if (likely(pin < gpio_banks))
return gpio_chip[pin].regbase;
@@ -79,7 +79,6 @@ static inline void __iomem *pin_to_controller(unsigned pin)
static inline unsigned pin_to_mask(unsigned pin)
{
- pin -= PIN_BASE;
return 1 << (pin % 32);
}
@@ -274,8 +273,9 @@ static u32 backups[MAX_GPIO_BANKS];
static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
{
- unsigned mask = pin_to_mask(d->irq);
- unsigned bank = (d->irq - PIN_BASE) / 32;
+ unsigned pin = irq_to_gpio(d->irq);
+ unsigned mask = pin_to_mask(pin);
+ unsigned bank = pin / 32;
if (unlikely(bank >= MAX_GPIO_BANKS))
return -EINVAL;
@@ -285,7 +285,7 @@ static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
else
wakeups[bank] &= ~mask;
- irq_set_irq_wake(gpio_chip[bank].bank->id, state);
+ irq_set_irq_wake(gpio_chip[bank].id, state);
return 0;
}
@@ -302,7 +302,7 @@ void at91_gpio_suspend(void)
__raw_writel(wakeups[i], pio + PIO_IER);
if (!wakeups[i])
- clk_disable(gpio_chip[i].bank->clock);
+ clk_disable(gpio_chip[i].clock);
else {
#ifdef CONFIG_PM_DEBUG
printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", 'A'+i, wakeups[i]);
@@ -319,7 +319,7 @@ void at91_gpio_resume(void)
void __iomem *pio = gpio_chip[i].regbase;
if (!wakeups[i])
- clk_enable(gpio_chip[i].bank->clock);
+ clk_enable(gpio_chip[i].clock);
__raw_writel(wakeups[i], pio + PIO_IDR);
__raw_writel(backups[i], pio + PIO_IER);
@@ -344,8 +344,9 @@ void at91_gpio_resume(void)
static void gpio_irq_mask(struct irq_data *d)
{
- void __iomem *pio = pin_to_controller(d->irq);
- unsigned mask = pin_to_mask(d->irq);
+ unsigned pin = irq_to_gpio(d->irq);
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
if (pio)
__raw_writel(mask, pio + PIO_IDR);
@@ -353,8 +354,9 @@ static void gpio_irq_mask(struct irq_data *d)
static void gpio_irq_unmask(struct irq_data *d)
{
- void __iomem *pio = pin_to_controller(d->irq);
- unsigned mask = pin_to_mask(d->irq);
+ unsigned pin = irq_to_gpio(d->irq);
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
if (pio)
__raw_writel(mask, pio + PIO_IER);
@@ -382,7 +384,7 @@ static struct irq_chip gpio_irqchip = {
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- unsigned pin;
+ unsigned irq_pin;
struct irq_data *idata = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(idata);
struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata);
@@ -405,12 +407,12 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
continue;
}
- pin = at91_gpio->chip.base;
+ irq_pin = gpio_to_irq(at91_gpio->chip.base);
while (isr) {
if (isr & 1)
- generic_handle_irq(pin);
- pin++;
+ generic_handle_irq(irq_pin);
+ irq_pin++;
isr >>= 1;
}
}
@@ -438,7 +440,7 @@ static int at91_gpio_show(struct seq_file *s, void *unused)
seq_printf(s, "%i:\t", j);
for (bank = 0; bank < gpio_banks; bank++) {
- unsigned pin = PIN_BASE + (32 * bank) + j;
+ unsigned pin = (32 * bank) + j;
void __iomem *pio = pin_to_controller(pin);
unsigned mask = pin_to_mask(pin);
@@ -491,27 +493,28 @@ static struct lock_class_key gpio_lock_class;
*/
void __init at91_gpio_irq_setup(void)
{
- unsigned pioc, pin;
+ unsigned pioc, irq = gpio_to_irq(0);
struct at91_gpio_chip *this, *prev;
- for (pioc = 0, pin = PIN_BASE, this = gpio_chip, prev = NULL;
+ for (pioc = 0, this = gpio_chip, prev = NULL;
pioc++ < gpio_banks;
prev = this, this++) {
- unsigned id = this->bank->id;
+ unsigned id = this->id;
unsigned i;
__raw_writel(~0, this->regbase + PIO_IDR);
- for (i = 0, pin = this->chip.base; i < 32; i++, pin++) {
- irq_set_lockdep_class(pin, &gpio_lock_class);
+ for (i = 0, irq = gpio_to_irq(this->chip.base); i < 32;
+ i++, irq++) {
+ irq_set_lockdep_class(irq, &gpio_lock_class);
/*
* Can use the "simple" and not "edge" handler since it's
* shorter, and the AIC handles interrupts sanely.
*/
- irq_set_chip_and_handler(pin, &gpio_irqchip,
+ irq_set_chip_and_handler(irq, &gpio_irqchip,
handle_simple_irq);
- set_irq_flags(pin, IRQF_VALID);
+ set_irq_flags(irq, IRQF_VALID);
}
/* The toplevel handler handles one bank of GPIOs, except
@@ -524,7 +527,7 @@ void __init at91_gpio_irq_setup(void)
irq_set_chip_data(id, this);
irq_set_chained_handler(id, gpio_irq_handler);
}
- pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks);
+ pr_info("AT91: %d gpio irqs in %d banks\n", irq - gpio_to_irq(0), gpio_banks);
}
/* gpiolib support */
@@ -612,16 +615,26 @@ void __init at91_gpio_init(struct at91_gpio_bank *data, int nr_banks)
for (i = 0; i < nr_banks; i++) {
at91_gpio = &gpio_chip[i];
- at91_gpio->bank = &data[i];
- at91_gpio->chip.base = PIN_BASE + i * 32;
- at91_gpio->regbase = at91_gpio->bank->offset +
- (void __iomem *)AT91_VA_BASE_SYS;
+ at91_gpio->id = data[i].id;
+ at91_gpio->chip.base = i * 32;
+
+ at91_gpio->regbase = ioremap(data[i].regbase, 512);
+ if (!at91_gpio->regbase) {
+ pr_err("at91_gpio.%d, failed to map registers, ignoring.\n", i);
+ continue;
+ }
+
+ at91_gpio->clock = clk_get_sys(NULL, at91_gpio->chip.label);
+ if (!at91_gpio->clock) {
+ pr_err("at91_gpio.%d, failed to get clock, ignoring.\n", i);
+ continue;
+ }
/* enable PIO controller's clock */
- clk_enable(at91_gpio->bank->clock);
+ clk_enable(at91_gpio->clock);
/* AT91SAM9263_ID_PIOCDE groups PIOC, PIOD, PIOE */
- if (last && last->bank->id == at91_gpio->bank->id)
+ if (last && last->id == at91_gpio->id)
last->next = at91_gpio;
last = at91_gpio;
diff --git a/arch/arm/mach-at91/include/mach/at91_aic.h b/arch/arm/mach-at91/include/mach/at91_aic.h
index 03566799d3b..3045781c473 100644
--- a/arch/arm/mach-at91/include/mach/at91_aic.h
+++ b/arch/arm/mach-at91/include/mach/at91_aic.h
@@ -16,7 +16,19 @@
#ifndef AT91_AIC_H
#define AT91_AIC_H
-#define AT91_AIC_SMR(n) (AT91_AIC + ((n) * 4)) /* Source Mode Registers 0-31 */
+#ifndef __ASSEMBLY__
+extern void __iomem *at91_aic_base;
+
+#define at91_aic_read(field) \
+ __raw_readl(at91_aic_base + field)
+
+#define at91_aic_write(field, value) \
+ __raw_writel(value, at91_aic_base + field);
+#else
+.extern at91_aic_base
+#endif
+
+#define AT91_AIC_SMR(n) ((n) * 4) /* Source Mode Registers 0-31 */
#define AT91_AIC_PRIOR (7 << 0) /* Priority Level */
#define AT91_AIC_SRCTYPE (3 << 5) /* Interrupt Source Type */
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
@@ -24,30 +36,30 @@
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
#define AT91_AIC_SRCTYPE_RISING (3 << 5)
-#define AT91_AIC_SVR(n) (AT91_AIC + 0x80 + ((n) * 4)) /* Source Vector Registers 0-31 */
-#define AT91_AIC_IVR (AT91_AIC + 0x100) /* Interrupt Vector Register */
-#define AT91_AIC_FVR (AT91_AIC + 0x104) /* Fast Interrupt Vector Register */
-#define AT91_AIC_ISR (AT91_AIC + 0x108) /* Interrupt Status Register */
+#define AT91_AIC_SVR(n) (0x80 + ((n) * 4)) /* Source Vector Registers 0-31 */
+#define AT91_AIC_IVR 0x100 /* Interrupt Vector Register */
+#define AT91_AIC_FVR 0x104 /* Fast Interrupt Vector Register */
+#define AT91_AIC_ISR 0x108 /* Interrupt Status Register */
#define AT91_AIC_IRQID (0x1f << 0) /* Current Interrupt Identifier */
-#define AT91_AIC_IPR (AT91_AIC + 0x10c) /* Interrupt Pending Register */
-#define AT91_AIC_IMR (AT91_AIC + 0x110) /* Interrupt Mask Register */
-#define AT91_AIC_CISR (AT91_AIC + 0x114) /* Core Interrupt Status Register */
+#define AT91_AIC_IPR 0x10c /* Interrupt Pending Register */
+#define AT91_AIC_IMR 0x110 /* Interrupt Mask Register */
+#define AT91_AIC_CISR 0x114 /* Core Interrupt Status Register */
#define AT91_AIC_NFIQ (1 << 0) /* nFIQ Status */
#define AT91_AIC_NIRQ (1 << 1) /* nIRQ Status */
-#define AT91_AIC_IECR (AT91_AIC + 0x120) /* Interrupt Enable Command Register */
-#define AT91_AIC_IDCR (AT91_AIC + 0x124) /* Interrupt Disable Command Register */
-#define AT91_AIC_ICCR (AT91_AIC + 0x128) /* Interrupt Clear Command Register */
-#define AT91_AIC_ISCR (AT91_AIC + 0x12c) /* Interrupt Set Command Register */
-#define AT91_AIC_EOICR (AT91_AIC + 0x130) /* End of Interrupt Command Register */
-#define AT91_AIC_SPU (AT91_AIC + 0x134) /* Spurious Interrupt Vector Register */
-#define AT91_AIC_DCR (AT91_AIC + 0x138) /* Debug Control Register */
+#define AT91_AIC_IECR 0x120 /* Interrupt Enable Command Register */
+#define AT91_AIC_IDCR 0x124 /* Interrupt Disable Command Register */
+#define AT91_AIC_ICCR 0x128 /* Interrupt Clear Command Register */
+#define AT91_AIC_ISCR 0x12c /* Interrupt Set Command Register */
+#define AT91_AIC_EOICR 0x130 /* End of Interrupt Command Register */
+#define AT91_AIC_SPU 0x134 /* Spurious Interrupt Vector Register */
+#define AT91_AIC_DCR 0x138 /* Debug Control Register */
#define AT91_AIC_DCR_PROT (1 << 0) /* Protection Mode */
#define AT91_AIC_DCR_GMSK (1 << 1) /* General Mask */
-#define AT91_AIC_FFER (AT91_AIC + 0x140) /* Fast Forcing Enable Register [SAM9 only] */
-#define AT91_AIC_FFDR (AT91_AIC + 0x144) /* Fast Forcing Disable Register [SAM9 only] */
-#define AT91_AIC_FFSR (AT91_AIC + 0x148) /* Fast Forcing Status Register [SAM9 only] */
+#define AT91_AIC_FFER 0x140 /* Fast Forcing Enable Register [SAM9 only] */
+#define AT91_AIC_FFDR 0x144 /* Fast Forcing Disable Register [SAM9 only] */
+#define AT91_AIC_FFSR 0x148 /* Fast Forcing Status Register [SAM9 only] */
#endif
diff --git a/arch/arm/mach-at91/include/mach/at91_dbgu.h b/arch/arm/mach-at91/include/mach/at91_dbgu.h
index dbfe455a4c4..2aa0c5e1349 100644
--- a/arch/arm/mach-at91/include/mach/at91_dbgu.h
+++ b/arch/arm/mach-at91/include/mach/at91_dbgu.h
@@ -19,7 +19,7 @@
#define dbgu_readl(dbgu, field) \
__raw_readl(AT91_VA_BASE_SYS + dbgu + AT91_DBGU_ ## field)
-#ifdef AT91_DBGU
+#if !defined(CONFIG_ARCH_AT91X40)
#define AT91_DBGU_CR (0x00) /* Control Register */
#define AT91_DBGU_MR (0x04) /* Mode Register */
#define AT91_DBGU_IER (0x08) /* Interrupt Enable Register */
diff --git a/arch/arm/mach-at91/include/mach/at91_pit.h b/arch/arm/mach-at91/include/mach/at91_pit.h
index 974d0bd05b5..d1f80ad7f4d 100644
--- a/arch/arm/mach-at91/include/mach/at91_pit.h
+++ b/arch/arm/mach-at91/include/mach/at91_pit.h
@@ -16,16 +16,16 @@
#ifndef AT91_PIT_H
#define AT91_PIT_H
-#define AT91_PIT_MR (AT91_PIT + 0x00) /* Mode Register */
+#define AT91_PIT_MR 0x00 /* Mode Register */
#define AT91_PIT_PITIEN (1 << 25) /* Timer Interrupt Enable */
#define AT91_PIT_PITEN (1 << 24) /* Timer Enabled */
#define AT91_PIT_PIV (0xfffff) /* Periodic Interval Value */
-#define AT91_PIT_SR (AT91_PIT + 0x04) /* Status Register */
+#define AT91_PIT_SR 0x04 /* Status Register */
#define AT91_PIT_PITS (1 << 0) /* Timer Status */
-#define AT91_PIT_PIVR (AT91_PIT + 0x08) /* Periodic Interval Value Register */
-#define AT91_PIT_PIIR (AT91_PIT + 0x0c) /* Periodic Interval Image Register */
+#define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */
+#define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */
#define AT91_PIT_PICNT (0xfff << 20) /* Interval Counter */
#define AT91_PIT_CPIV (0xfffff) /* Inverval Value */
diff --git a/arch/arm/mach-at91/include/mach/at91_rtc.h b/arch/arm/mach-at91/include/mach/at91_rtc.h
index e56f4701a3e..da1945e5f71 100644
--- a/arch/arm/mach-at91/include/mach/at91_rtc.h
+++ b/arch/arm/mach-at91/include/mach/at91_rtc.h
@@ -16,7 +16,7 @@
#ifndef AT91_RTC_H
#define AT91_RTC_H
-#define AT91_RTC_CR (AT91_RTC + 0x00) /* Control Register */
+#define AT91_RTC_CR 0x00 /* Control Register */
#define AT91_RTC_UPDTIM (1 << 0) /* Update Request Time Register */
#define AT91_RTC_UPDCAL (1 << 1) /* Update Request Calendar Register */
#define AT91_RTC_TIMEVSEL (3 << 8) /* Time Event Selection */
@@ -29,44 +29,44 @@
#define AT91_RTC_CALEVSEL_MONTH (1 << 16)
#define AT91_RTC_CALEVSEL_YEAR (2 << 16)
-#define AT91_RTC_MR (AT91_RTC + 0x04) /* Mode Register */
+#define AT91_RTC_MR 0x04 /* Mode Register */
#define AT91_RTC_HRMOD (1 << 0) /* 12/24 Hour Mode */
-#define AT91_RTC_TIMR (AT91_RTC + 0x08) /* Time Register */
+#define AT91_RTC_TIMR 0x08 /* Time Register */
#define AT91_RTC_SEC (0x7f << 0) /* Current Second */
#define AT91_RTC_MIN (0x7f << 8) /* Current Minute */
#define AT91_RTC_HOUR (0x3f << 16) /* Current Hour */
#define AT91_RTC_AMPM (1 << 22) /* Ante Meridiem Post Meridiem Indicator */
-#define AT91_RTC_CALR (AT91_RTC + 0x0c) /* Calendar Register */
+#define AT91_RTC_CALR 0x0c /* Calendar Register */
#define AT91_RTC_CENT (0x7f << 0) /* Current Century */
#define AT91_RTC_YEAR (0xff << 8) /* Current Year */
#define AT91_RTC_MONTH (0x1f << 16) /* Current Month */
#define AT91_RTC_DAY (7 << 21) /* Current Day */
#define AT91_RTC_DATE (0x3f << 24) /* Current Date */
-#define AT91_RTC_TIMALR (AT91_RTC + 0x10) /* Time Alarm Register */
+#define AT91_RTC_TIMALR 0x10 /* Time Alarm Register */
#define AT91_RTC_SECEN (1 << 7) /* Second Alarm Enable */
#define AT91_RTC_MINEN (1 << 15) /* Minute Alarm Enable */
#define AT91_RTC_HOUREN (1 << 23) /* Hour Alarm Enable */
-#define AT91_RTC_CALALR (AT91_RTC + 0x14) /* Calendar Alarm Register */
+#define AT91_RTC_CALALR 0x14 /* Calendar Alarm Register */
#define AT91_RTC_MTHEN (1 << 23) /* Month Alarm Enable */
#define AT91_RTC_DATEEN (1 << 31) /* Date Alarm Enable */
-#define AT91_RTC_SR (AT91_RTC + 0x18) /* Status Register */
+#define AT91_RTC_SR 0x18 /* Status Register */
#define AT91_RTC_ACKUPD (1 << 0) /* Acknowledge for Update */
#define AT91_RTC_ALARM (1 << 1) /* Alarm Flag */
#define AT91_RTC_SECEV (1 << 2) /* Second Event */
#define AT91_RTC_TIMEV (1 << 3) /* Time Event */
#define AT91_RTC_CALEV (1 << 4) /* Calendar Event */
-#define AT91_RTC_SCCR (AT91_RTC + 0x1c) /* Status Clear Command Register */
-#define AT91_RTC_IER (AT91_RTC + 0x20) /* Interrupt Enable Register */
-#define AT91_RTC_IDR (AT91_RTC + 0x24) /* Interrupt Disable Register */
-#define AT91_RTC_IMR (AT91_RTC + 0x28) /* Interrupt Mask Register */
+#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
+#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
+#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
+#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
-#define AT91_RTC_VER (AT91_RTC + 0x2c) /* Valid Entry Register */
+#define AT91_RTC_VER 0x2c /* Valid Entry Register */
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
#define AT91_RTC_NVCAL (1 << 1) /* Non valid Calendar */
#define AT91_RTC_NVTIMALR (1 << 2) /* Non valid Time Alarm */
diff --git a/arch/arm/mach-at91/include/mach/at91_shdwc.h b/arch/arm/mach-at91/include/mach/at91_shdwc.h
index c4ce07e8a8f..1d4fe822c77 100644
--- a/arch/arm/mach-at91/include/mach/at91_shdwc.h
+++ b/arch/arm/mach-at91/include/mach/at91_shdwc.h
@@ -16,11 +16,21 @@
#ifndef AT91_SHDWC_H
#define AT91_SHDWC_H
-#define AT91_SHDW_CR (AT91_SHDWC + 0x00) /* Shut Down Control Register */
+#ifndef __ASSEMBLY__
+extern void __iomem *at91_shdwc_base;
+
+#define at91_shdwc_read(field) \
+ __raw_readl(at91_shdwc_base + field)
+
+#define at91_shdwc_write(field, value) \
+ __raw_writel(value, at91_shdwc_base + field);
+#endif
+
+#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
#define AT91_SHDW_SHDW (1 << 0) /* Shut Down command */
#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
-#define AT91_SHDW_MR (AT91_SHDWC + 0x04) /* Shut Down Mode Register */
+#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
#define AT91_SHDW_WKMODE0 (3 << 0) /* Wake-up 0 Mode Selection */
#define AT91_SHDW_WKMODE0_NONE 0
#define AT91_SHDW_WKMODE0_HIGH 1
@@ -30,7 +40,7 @@
#define AT91_SHDW_CPTWK0_(x) ((x) << 4)
#define AT91_SHDW_RTTWKEN (1 << 16) /* Real Time Timer Wake-up Enable */
-#define AT91_SHDW_SR (AT91_SHDWC + 0x08) /* Shut Down Status Register */
+#define AT91_SHDW_SR 0x08 /* Shut Down Status Register */
#define AT91_SHDW_WAKEUP0 (1 << 0) /* Wake-up 0 Status */
#define AT91_SHDW_RTTWK (1 << 16) /* Real-time Timer Wake-up */
#define AT91_SHDW_RTCWK (1 << 17) /* Real-time Clock Wake-up [SAM9RL] */
diff --git a/arch/arm/mach-at91/include/mach/at91cap9.h b/arch/arm/mach-at91/include/mach/at91cap9.h
index c5df1e8f195..4c0e2f6011d 100644
--- a/arch/arm/mach-at91/include/mach/at91cap9.h
+++ b/arch/arm/mach-at91/include/mach/at91cap9.h
@@ -79,29 +79,28 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_ECC (0xffffe200 - AT91_BASE_SYS)
#define AT91_BCRAMC (0xffffe400 - AT91_BASE_SYS)
#define AT91_DDRSDRC0 (0xffffe600 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffe800 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffea00 - AT91_BASE_SYS)
-#define AT91_CCFG (0xffffeb10 - AT91_BASE_SYS)
-#define AT91_DMA (0xffffec00 - AT91_BASE_SYS)
-#define AT91_DBGU (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOD (0xfffff800 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
#define AT91_GPBR (cpu_is_at91cap9_revB() ? \
(0xfffffd50 - AT91_BASE_SYS) : \
(0xfffffd60 - AT91_BASE_SYS))
+#define AT91CAP9_BASE_ECC 0xffffe200
+#define AT91CAP9_BASE_DMA 0xffffec00
+#define AT91CAP9_BASE_SMC 0xffffe800
+#define AT91CAP9_BASE_DBGU AT91_BASE_DBGU1
+#define AT91CAP9_BASE_PIOA 0xfffff200
+#define AT91CAP9_BASE_PIOB 0xfffff400
+#define AT91CAP9_BASE_PIOC 0xfffff600
+#define AT91CAP9_BASE_PIOD 0xfffff800
+#define AT91CAP9_BASE_SHDWC 0xfffffd10
+#define AT91CAP9_BASE_RTT 0xfffffd20
+#define AT91CAP9_BASE_PIT 0xfffffd30
+#define AT91CAP9_BASE_WDT 0xfffffd40
+
#define AT91_USART0 AT91CAP9_BASE_US0
#define AT91_USART1 AT91CAP9_BASE_US1
#define AT91_USART2 AT91CAP9_BASE_US2
diff --git a/arch/arm/mach-at91/include/mach/at91rm9200.h b/arch/arm/mach-at91/include/mach/at91rm9200.h
index e4037b50030..bacb5114181 100644
--- a/arch/arm/mach-at91/include/mach/at91rm9200.h
+++ b/arch/arm/mach-at91/include/mach/at91rm9200.h
@@ -79,17 +79,17 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS) /* Advanced Interrupt Controller */
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS) /* Debug Unit */
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS) /* PIO Controller A */
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS) /* PIO Controller B */
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS) /* PIO Controller C */
-#define AT91_PIOD (0xfffffa00 - AT91_BASE_SYS) /* PIO Controller D */
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS) /* Power Management Controller */
#define AT91_ST (0xfffffd00 - AT91_BASE_SYS) /* System Timer */
-#define AT91_RTC (0xfffffe00 - AT91_BASE_SYS) /* Real-Time Clock */
#define AT91_MC (0xffffff00 - AT91_BASE_SYS) /* Memory Controllers */
+#define AT91RM9200_BASE_DBGU AT91_BASE_DBGU0 /* Debug Unit */
+#define AT91RM9200_BASE_PIOA 0xfffff400 /* PIO Controller A */
+#define AT91RM9200_BASE_PIOB 0xfffff600 /* PIO Controller B */
+#define AT91RM9200_BASE_PIOC 0xfffff800 /* PIO Controller C */
+#define AT91RM9200_BASE_PIOD 0xfffffa00 /* PIO Controller D */
+#define AT91RM9200_BASE_RTC 0xfffffe00 /* Real-Time Clock */
+
#define AT91_USART0 AT91RM9200_BASE_US0
#define AT91_USART1 AT91RM9200_BASE_US1
#define AT91_USART2 AT91RM9200_BASE_US2
diff --git a/arch/arm/mach-at91/include/mach/at91sam9260.h b/arch/arm/mach-at91/include/mach/at91sam9260.h
index 9a791165913..f937c476bb6 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9260.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9260.h
@@ -80,24 +80,23 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_ECC (0xffffe800 - AT91_BASE_SYS)
#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
-#define AT91_CCFG (0xffffef10 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
#define AT91_GPBR (0xfffffd50 - AT91_BASE_SYS)
+#define AT91SAM9260_BASE_ECC 0xffffe800
+#define AT91SAM9260_BASE_SMC 0xffffec00
+#define AT91SAM9260_BASE_DBGU AT91_BASE_DBGU0
+#define AT91SAM9260_BASE_PIOA 0xfffff400
+#define AT91SAM9260_BASE_PIOB 0xfffff600
+#define AT91SAM9260_BASE_PIOC 0xfffff800
+#define AT91SAM9260_BASE_SHDWC 0xfffffd10
+#define AT91SAM9260_BASE_RTT 0xfffffd20
+#define AT91SAM9260_BASE_PIT 0xfffffd30
+#define AT91SAM9260_BASE_WDT 0xfffffd40
+
#define AT91_USART0 AT91SAM9260_BASE_US0
#define AT91_USART1 AT91SAM9260_BASE_US1
#define AT91_USART2 AT91SAM9260_BASE_US2
diff --git a/arch/arm/mach-at91/include/mach/at91sam9261.h b/arch/arm/mach-at91/include/mach/at91sam9261.h
index ce596204cef..175604e261b 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9261.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9261.h
@@ -66,21 +66,21 @@
* System Peripherals (offset from AT91_BASE_SYS)
*/
#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
#define AT91_GPBR (0xfffffd50 - AT91_BASE_SYS)
+#define AT91SAM9261_BASE_SMC 0xffffec00
+#define AT91SAM9261_BASE_DBGU AT91_BASE_DBGU0
+#define AT91SAM9261_BASE_PIOA 0xfffff400
+#define AT91SAM9261_BASE_PIOB 0xfffff600
+#define AT91SAM9261_BASE_PIOC 0xfffff800
+#define AT91SAM9261_BASE_SHDWC 0xfffffd10
+#define AT91SAM9261_BASE_RTT 0xfffffd20
+#define AT91SAM9261_BASE_PIT 0xfffffd30
+#define AT91SAM9261_BASE_WDT 0xfffffd40
+
#define AT91_USART0 AT91SAM9261_BASE_US0
#define AT91_USART1 AT91SAM9261_BASE_US1
#define AT91_USART2 AT91SAM9261_BASE_US2
diff --git a/arch/arm/mach-at91/include/mach/at91sam9263.h b/arch/arm/mach-at91/include/mach/at91sam9263.h
index f1b92961a2b..80c915002d8 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9263.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9263.h
@@ -74,30 +74,29 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_ECC0 (0xffffe000 - AT91_BASE_SYS)
#define AT91_SDRAMC0 (0xffffe200 - AT91_BASE_SYS)
-#define AT91_SMC0 (0xffffe400 - AT91_BASE_SYS)
-#define AT91_ECC1 (0xffffe600 - AT91_BASE_SYS)
#define AT91_SDRAMC1 (0xffffe800 - AT91_BASE_SYS)
-#define AT91_SMC1 (0xffffea00 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffec00 - AT91_BASE_SYS)
-#define AT91_CCFG (0xffffed10 - AT91_BASE_SYS)
-#define AT91_DBGU (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOD (0xfffff800 - AT91_BASE_SYS)
-#define AT91_PIOE (0xfffffa00 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT0 (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
-#define AT91_RTT1 (0xfffffd50 - AT91_BASE_SYS)
#define AT91_GPBR (0xfffffd60 - AT91_BASE_SYS)
+#define AT91SAM9263_BASE_ECC0 0xffffe000
+#define AT91SAM9263_BASE_SMC0 0xffffe400
+#define AT91SAM9263_BASE_ECC1 0xffffe600
+#define AT91SAM9263_BASE_SMC1 0xffffea00
+#define AT91SAM9263_BASE_DBGU AT91_BASE_DBGU1
+#define AT91SAM9263_BASE_PIOA 0xfffff200
+#define AT91SAM9263_BASE_PIOB 0xfffff400
+#define AT91SAM9263_BASE_PIOC 0xfffff600
+#define AT91SAM9263_BASE_PIOD 0xfffff800
+#define AT91SAM9263_BASE_PIOE 0xfffffa00
+#define AT91SAM9263_BASE_SHDWC 0xfffffd10
+#define AT91SAM9263_BASE_RTT0 0xfffffd20
+#define AT91SAM9263_BASE_PIT 0xfffffd30
+#define AT91SAM9263_BASE_WDT 0xfffffd40
+#define AT91SAM9263_BASE_RTT1 0xfffffd50
+
#define AT91_USART0 AT91SAM9263_BASE_US0
#define AT91_USART1 AT91SAM9263_BASE_US1
#define AT91_USART2 AT91SAM9263_BASE_US2
diff --git a/arch/arm/mach-at91/include/mach/at91sam9_smc.h b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
index 57de6207e57..eb18a70fa64 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9_smc.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9_smc.h
@@ -16,7 +16,9 @@
#ifndef AT91SAM9_SMC_H
#define AT91SAM9_SMC_H
-#define AT91_SMC_SETUP(n) (AT91_SMC + 0x00 + ((n)*0x10)) /* Setup Register for CS n */
+#include <mach/cpu.h>
+
+#define AT91_SMC_SETUP 0x00 /* Setup Register for CS n */
#define AT91_SMC_NWESETUP (0x3f << 0) /* NWE Setup Length */
#define AT91_SMC_NWESETUP_(x) ((x) << 0)
#define AT91_SMC_NCS_WRSETUP (0x3f << 8) /* NCS Setup Length in Write Access */
@@ -26,7 +28,7 @@
#define AT91_SMC_NCS_RDSETUP (0x3f << 24) /* NCS Setup Length in Read Access */
#define AT91_SMC_NCS_RDSETUP_(x) ((x) << 24)
-#define AT91_SMC_PULSE(n) (AT91_SMC + 0x04 + ((n)*0x10)) /* Pulse Register for CS n */
+#define AT91_SMC_PULSE 0x04 /* Pulse Register for CS n */
#define AT91_SMC_NWEPULSE (0x7f << 0) /* NWE Pulse Length */
#define AT91_SMC_NWEPULSE_(x) ((x) << 0)
#define AT91_SMC_NCS_WRPULSE (0x7f << 8) /* NCS Pulse Length in Write Access */
@@ -36,13 +38,13 @@
#define AT91_SMC_NCS_RDPULSE (0x7f << 24) /* NCS Pulse Length in Read Access */
#define AT91_SMC_NCS_RDPULSE_(x)((x) << 24)
-#define AT91_SMC_CYCLE(n) (AT91_SMC + 0x08 + ((n)*0x10)) /* Cycle Register for CS n */
+#define AT91_SMC_CYCLE 0x08 /* Cycle Register for CS n */
#define AT91_SMC_NWECYCLE (0x1ff << 0 ) /* Total Write Cycle Length */
#define AT91_SMC_NWECYCLE_(x) ((x) << 0)
#define AT91_SMC_NRDCYCLE (0x1ff << 16) /* Total Read Cycle Length */
#define AT91_SMC_NRDCYCLE_(x) ((x) << 16)
-#define AT91_SMC_MODE(n) (AT91_SMC + 0x0c + ((n)*0x10)) /* Mode Register for CS n */
+#define AT91_SMC_MODE 0x0c /* Mode Register for CS n */
#define AT91_SMC_READMODE (1 << 0) /* Read Mode */
#define AT91_SMC_WRITEMODE (1 << 1) /* Write Mode */
#define AT91_SMC_EXNWMODE (3 << 4) /* NWAIT Mode */
@@ -66,11 +68,4 @@
#define AT91_SMC_PS_16 (2 << 28)
#define AT91_SMC_PS_32 (3 << 28)
-#if defined(AT91_SMC1) /* The AT91SAM9263 has 2 Static Memory contollers */
-#define AT91_SMC1_SETUP(n) (AT91_SMC1 + 0x00 + ((n)*0x10)) /* Setup Register for CS n */
-#define AT91_SMC1_PULSE(n) (AT91_SMC1 + 0x04 + ((n)*0x10)) /* Pulse Register for CS n */
-#define AT91_SMC1_CYCLE(n) (AT91_SMC1 + 0x08 + ((n)*0x10)) /* Cycle Register for CS n */
-#define AT91_SMC1_MODE(n) (AT91_SMC1 + 0x0c + ((n)*0x10)) /* Mode Register for CS n */
-#endif
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/at91sam9g45.h b/arch/arm/mach-at91/include/mach/at91sam9g45.h
index 406bb649680..f0c23c960de 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9g45.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9g45.h
@@ -86,27 +86,27 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_ECC (0xffffe200 - AT91_BASE_SYS)
#define AT91_DDRSDRC1 (0xffffe400 - AT91_BASE_SYS)
#define AT91_DDRSDRC0 (0xffffe600 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffe800 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffea00 - AT91_BASE_SYS)
-#define AT91_DMA (0xffffec00 - AT91_BASE_SYS)
-#define AT91_DBGU (0xffffee00 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOD (0xfffff800 - AT91_BASE_SYS)
-#define AT91_PIOE (0xfffffa00 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
#define AT91_GPBR (0xfffffd60 - AT91_BASE_SYS)
-#define AT91_RTC (0xfffffdb0 - AT91_BASE_SYS)
+
+#define AT91SAM9G45_BASE_ECC 0xffffe200
+#define AT91SAM9G45_BASE_DMA 0xffffec00
+#define AT91SAM9G45_BASE_SMC 0xffffe800
+#define AT91SAM9G45_BASE_DBGU AT91_BASE_DBGU1
+#define AT91SAM9G45_BASE_PIOA 0xfffff200
+#define AT91SAM9G45_BASE_PIOB 0xfffff400
+#define AT91SAM9G45_BASE_PIOC 0xfffff600
+#define AT91SAM9G45_BASE_PIOD 0xfffff800
+#define AT91SAM9G45_BASE_PIOE 0xfffffa00
+#define AT91SAM9G45_BASE_SHDWC 0xfffffd10
+#define AT91SAM9G45_BASE_RTT 0xfffffd20
+#define AT91SAM9G45_BASE_PIT 0xfffffd30
+#define AT91SAM9G45_BASE_WDT 0xfffffd40
+#define AT91SAM9G45_BASE_RTC 0xfffffdb0
#define AT91_USART0 AT91SAM9G45_BASE_US0
#define AT91_USART1 AT91SAM9G45_BASE_US1
diff --git a/arch/arm/mach-at91/include/mach/at91sam9rl.h b/arch/arm/mach-at91/include/mach/at91sam9rl.h
index 1aabacd315d..2bb359e60b9 100644
--- a/arch/arm/mach-at91/include/mach/at91sam9rl.h
+++ b/arch/arm/mach-at91/include/mach/at91sam9rl.h
@@ -69,27 +69,26 @@
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_DMA (0xffffe600 - AT91_BASE_SYS)
-#define AT91_ECC (0xffffe800 - AT91_BASE_SYS)
#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
-#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
-#define AT91_CCFG (0xffffef10 - AT91_BASE_SYS)
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
-#define AT91_DBGU (0xfffff200 - AT91_BASE_SYS)
-#define AT91_PIOA (0xfffff400 - AT91_BASE_SYS)
-#define AT91_PIOB (0xfffff600 - AT91_BASE_SYS)
-#define AT91_PIOC (0xfffff800 - AT91_BASE_SYS)
-#define AT91_PIOD (0xfffffa00 - AT91_BASE_SYS)
#define AT91_PMC (0xfffffc00 - AT91_BASE_SYS)
#define AT91_RSTC (0xfffffd00 - AT91_BASE_SYS)
-#define AT91_SHDWC (0xfffffd10 - AT91_BASE_SYS)
-#define AT91_RTT (0xfffffd20 - AT91_BASE_SYS)
-#define AT91_PIT (0xfffffd30 - AT91_BASE_SYS)
-#define AT91_WDT (0xfffffd40 - AT91_BASE_SYS)
#define AT91_SCKCR (0xfffffd50 - AT91_BASE_SYS)
#define AT91_GPBR (0xfffffd60 - AT91_BASE_SYS)
-#define AT91_RTC (0xfffffe00 - AT91_BASE_SYS)
+
+#define AT91SAM9RL_BASE_DMA 0xffffe600
+#define AT91SAM9RL_BASE_ECC 0xffffe800
+#define AT91SAM9RL_BASE_SMC 0xffffec00
+#define AT91SAM9RL_BASE_DBGU AT91_BASE_DBGU0
+#define AT91SAM9RL_BASE_PIOA 0xfffff400
+#define AT91SAM9RL_BASE_PIOB 0xfffff600
+#define AT91SAM9RL_BASE_PIOC 0xfffff800
+#define AT91SAM9RL_BASE_PIOD 0xfffffa00
+#define AT91SAM9RL_BASE_SHDWC 0xfffffd10
+#define AT91SAM9RL_BASE_RTT 0xfffffd20
+#define AT91SAM9RL_BASE_PIT 0xfffffd30
+#define AT91SAM9RL_BASE_WDT 0xfffffd40
+#define AT91SAM9RL_BASE_RTC 0xfffffe00
#define AT91_USART0 AT91SAM9RL_BASE_US0
#define AT91_USART1 AT91SAM9RL_BASE_US1
diff --git a/arch/arm/mach-at91/include/mach/at91x40.h b/arch/arm/mach-at91/include/mach/at91x40.h
index a152ff87e68..a57829f4fd1 100644
--- a/arch/arm/mach-at91/include/mach/at91x40.h
+++ b/arch/arm/mach-at91/include/mach/at91x40.h
@@ -40,7 +40,6 @@
#define AT91_PIOA (0xffff0000 - AT91_BASE_SYS) /* PIO Controller A */
#define AT91_PS (0xffff4000 - AT91_BASE_SYS) /* Power Save */
#define AT91_WD (0xffff8000 - AT91_BASE_SYS) /* Watchdog Timer */
-#define AT91_AIC (0xfffff000 - AT91_BASE_SYS) /* Advanced Interrupt Controller */
/*
* The AT91x40 series doesn't have a debug unit like the other AT91 parts.
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index eac92e995bb..d0b377b21bd 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -40,13 +40,14 @@
#include <linux/atmel-mci.h>
#include <sound/atmel-ac97c.h>
#include <linux/serial.h>
+#include <linux/platform_data/macb.h>
/* USB Device */
struct at91_udc_data {
- u8 vbus_pin; /* high == host powering us */
+ int vbus_pin; /* high == host powering us */
u8 vbus_active_low; /* vbus polarity */
u8 vbus_polled; /* Use polling, not interrupt */
- u8 pullup_pin; /* active == D+ pulled up */
+ int pullup_pin; /* active == D+ pulled up */
u8 pullup_active_low; /* true == pullup_pin is active low */
};
extern void __init at91_add_device_udc(struct at91_udc_data *data);
@@ -56,10 +57,10 @@ extern void __init at91_add_device_usba(struct usba_platform_data *data);
/* Compact Flash */
struct at91_cf_data {
- u8 irq_pin; /* I/O IRQ */
- u8 det_pin; /* Card detect */
- u8 vcc_pin; /* power switching */
- u8 rst_pin; /* card reset */
+ int irq_pin; /* I/O IRQ */
+ int det_pin; /* Card detect */
+ int vcc_pin; /* power switching */
+ int rst_pin; /* card reset */
u8 chipselect; /* EBI Chip Select number */
u8 flags;
#define AT91_CF_TRUE_IDE 0x01
@@ -70,37 +71,26 @@ extern void __init at91_add_device_cf(struct at91_cf_data *data);
/* MMC / SD */
/* at91_mci platform config */
struct at91_mmc_data {
- u8 det_pin; /* card detect IRQ */
+ int det_pin; /* card detect IRQ */
unsigned slot_b:1; /* uses Slot B */
unsigned wire4:1; /* (SD) supports DAT0..DAT3 */
- u8 wp_pin; /* (SD) writeprotect detect */
- u8 vcc_pin; /* power switching (high == on) */
+ int wp_pin; /* (SD) writeprotect detect */
+ int vcc_pin; /* power switching (high == on) */
};
extern void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data);
/* atmel-mci platform config */
extern void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data);
- /* Ethernet (EMAC & MACB) */
-struct at91_eth_data {
- u32 phy_mask;
- u8 phy_irq_pin; /* PHY IRQ */
- u8 is_rmii; /* using RMII interface? */
-};
-extern void __init at91_add_device_eth(struct at91_eth_data *data);
-
-#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91SAM9G20) || defined(CONFIG_ARCH_AT91CAP9) \
- || defined(CONFIG_ARCH_AT91SAM9G45)
-#define eth_platform_data at91_eth_data
-#endif
+extern void __init at91_add_device_eth(struct macb_platform_data *data);
/* USB Host */
struct at91_usbh_data {
u8 ports; /* number of ports on root hub */
- u8 vbus_pin[2]; /* port power-control pin */
+ int vbus_pin[2]; /* port power-control pin */
u8 vbus_pin_inverted;
u8 overcurrent_supported;
- u8 overcurrent_pin[2];
+ int overcurrent_pin[2];
u8 overcurrent_status[2];
u8 overcurrent_changed[2];
};
@@ -110,9 +100,9 @@ extern void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data);
/* NAND / SmartMedia */
struct atmel_nand_data {
- u8 enable_pin; /* chip enable */
- u8 det_pin; /* card detect */
- u8 rdy_pin; /* ready/busy */
+ int enable_pin; /* chip enable */
+ int det_pin; /* card detect */
+ int rdy_pin; /* ready/busy */
u8 rdy_pin_active_low; /* rdy_pin value is inverted */
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/arch/arm/mach-at91/include/mach/debug-macro.S b/arch/arm/mach-at91/include/mach/debug-macro.S
index 0ed8648c645..c6bb9e2d9ba 100644
--- a/arch/arm/mach-at91/include/mach/debug-macro.S
+++ b/arch/arm/mach-at91/include/mach/debug-macro.S
@@ -14,9 +14,15 @@
#include <mach/hardware.h>
#include <mach/at91_dbgu.h>
+#if defined(CONFIG_AT91_DEBUG_LL_DBGU0)
+#define AT91_DBGU AT91_BASE_DBGU0
+#else
+#define AT91_DBGU AT91_BASE_DBGU1
+#endif
+
.macro addruart, rp, rv, tmp
- ldr \rp, =(AT91_BASE_SYS + AT91_DBGU) @ System peripherals (phys address)
- ldr \rv, =(AT91_VA_BASE_SYS + AT91_DBGU) @ System peripherals (virt address)
+ ldr \rp, =AT91_DBGU @ System peripherals (phys address)
+ ldr \rv, =AT91_IO_P2V(AT91_DBGU) @ System peripherals (virt address)
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/mach-at91/include/mach/entry-macro.S b/arch/arm/mach-at91/include/mach/entry-macro.S
index 7ab68f97222..423eea0ed74 100644
--- a/arch/arm/mach-at91/include/mach/entry-macro.S
+++ b/arch/arm/mach-at91/include/mach/entry-macro.S
@@ -17,16 +17,17 @@
.endm
.macro get_irqnr_preamble, base, tmp
- ldr \base, =(AT91_VA_BASE_SYS + AT91_AIC) @ base virtual address of AIC peripheral
+ ldr \base, =at91_aic_base @ base virtual address of AIC peripheral
+ ldr \base, [\base]
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base, #(AT91_AIC_IVR - AT91_AIC)] @ read IRQ vector register: de-asserts nIRQ to processor (and clears interrupt)
- ldr \irqstat, [\base, #(AT91_AIC_ISR - AT91_AIC)] @ read interrupt source number
- teq \irqstat, #0 @ ISR is 0 when no current interrupt, or spurious interrupt
- streq \tmp, [\base, #(AT91_AIC_EOICR - AT91_AIC)] @ not going to be handled further, then ACK it now.
+ ldr \irqnr, [\base, #AT91_AIC_IVR] @ read IRQ vector register: de-asserts nIRQ to processor (and clears interrupt)
+ ldr \irqstat, [\base, #AT91_AIC_ISR] @ read interrupt source number
+ teq \irqstat, #0 @ ISR is 0 when no current interrupt, or spurious interrupt
+ streq \tmp, [\base, #AT91_AIC_EOICR] @ not going to be handled further, then ACK it now.
.endm
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index 2b9a1f51210..e3fd225121c 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -16,177 +16,175 @@
#include <linux/kernel.h>
#include <asm/irq.h>
-#define PIN_BASE NR_AIC_IRQS
-
#define MAX_GPIO_BANKS 5
-#define NR_BUILTIN_GPIO (PIN_BASE + (MAX_GPIO_BANKS * 32))
+#define NR_BUILTIN_GPIO (MAX_GPIO_BANKS * 32)
/* these pin numbers double as IRQ numbers, like AT91xxx_ID_* values */
-#define AT91_PIN_PA0 (PIN_BASE + 0x00 + 0)
-#define AT91_PIN_PA1 (PIN_BASE + 0x00 + 1)
-#define AT91_PIN_PA2 (PIN_BASE + 0x00 + 2)
-#define AT91_PIN_PA3 (PIN_BASE + 0x00 + 3)
-#define AT91_PIN_PA4 (PIN_BASE + 0x00 + 4)
-#define AT91_PIN_PA5 (PIN_BASE + 0x00 + 5)
-#define AT91_PIN_PA6 (PIN_BASE + 0x00 + 6)
-#define AT91_PIN_PA7 (PIN_BASE + 0x00 + 7)
-#define AT91_PIN_PA8 (PIN_BASE + 0x00 + 8)
-#define AT91_PIN_PA9 (PIN_BASE + 0x00 + 9)
-#define AT91_PIN_PA10 (PIN_BASE + 0x00 + 10)
-#define AT91_PIN_PA11 (PIN_BASE + 0x00 + 11)
-#define AT91_PIN_PA12 (PIN_BASE + 0x00 + 12)
-#define AT91_PIN_PA13 (PIN_BASE + 0x00 + 13)
-#define AT91_PIN_PA14 (PIN_BASE + 0x00 + 14)
-#define AT91_PIN_PA15 (PIN_BASE + 0x00 + 15)
-#define AT91_PIN_PA16 (PIN_BASE + 0x00 + 16)
-#define AT91_PIN_PA17 (PIN_BASE + 0x00 + 17)
-#define AT91_PIN_PA18 (PIN_BASE + 0x00 + 18)
-#define AT91_PIN_PA19 (PIN_BASE + 0x00 + 19)
-#define AT91_PIN_PA20 (PIN_BASE + 0x00 + 20)
-#define AT91_PIN_PA21 (PIN_BASE + 0x00 + 21)
-#define AT91_PIN_PA22 (PIN_BASE + 0x00 + 22)
-#define AT91_PIN_PA23 (PIN_BASE + 0x00 + 23)
-#define AT91_PIN_PA24 (PIN_BASE + 0x00 + 24)
-#define AT91_PIN_PA25 (PIN_BASE + 0x00 + 25)
-#define AT91_PIN_PA26 (PIN_BASE + 0x00 + 26)
-#define AT91_PIN_PA27 (PIN_BASE + 0x00 + 27)
-#define AT91_PIN_PA28 (PIN_BASE + 0x00 + 28)
-#define AT91_PIN_PA29 (PIN_BASE + 0x00 + 29)
-#define AT91_PIN_PA30 (PIN_BASE + 0x00 + 30)
-#define AT91_PIN_PA31 (PIN_BASE + 0x00 + 31)
-
-#define AT91_PIN_PB0 (PIN_BASE + 0x20 + 0)
-#define AT91_PIN_PB1 (PIN_BASE + 0x20 + 1)
-#define AT91_PIN_PB2 (PIN_BASE + 0x20 + 2)
-#define AT91_PIN_PB3 (PIN_BASE + 0x20 + 3)
-#define AT91_PIN_PB4 (PIN_BASE + 0x20 + 4)
-#define AT91_PIN_PB5 (PIN_BASE + 0x20 + 5)
-#define AT91_PIN_PB6 (PIN_BASE + 0x20 + 6)
-#define AT91_PIN_PB7 (PIN_BASE + 0x20 + 7)
-#define AT91_PIN_PB8 (PIN_BASE + 0x20 + 8)
-#define AT91_PIN_PB9 (PIN_BASE + 0x20 + 9)
-#define AT91_PIN_PB10 (PIN_BASE + 0x20 + 10)
-#define AT91_PIN_PB11 (PIN_BASE + 0x20 + 11)
-#define AT91_PIN_PB12 (PIN_BASE + 0x20 + 12)
-#define AT91_PIN_PB13 (PIN_BASE + 0x20 + 13)
-#define AT91_PIN_PB14 (PIN_BASE + 0x20 + 14)
-#define AT91_PIN_PB15 (PIN_BASE + 0x20 + 15)
-#define AT91_PIN_PB16 (PIN_BASE + 0x20 + 16)
-#define AT91_PIN_PB17 (PIN_BASE + 0x20 + 17)
-#define AT91_PIN_PB18 (PIN_BASE + 0x20 + 18)
-#define AT91_PIN_PB19 (PIN_BASE + 0x20 + 19)
-#define AT91_PIN_PB20 (PIN_BASE + 0x20 + 20)
-#define AT91_PIN_PB21 (PIN_BASE + 0x20 + 21)
-#define AT91_PIN_PB22 (PIN_BASE + 0x20 + 22)
-#define AT91_PIN_PB23 (PIN_BASE + 0x20 + 23)
-#define AT91_PIN_PB24 (PIN_BASE + 0x20 + 24)
-#define AT91_PIN_PB25 (PIN_BASE + 0x20 + 25)
-#define AT91_PIN_PB26 (PIN_BASE + 0x20 + 26)
-#define AT91_PIN_PB27 (PIN_BASE + 0x20 + 27)
-#define AT91_PIN_PB28 (PIN_BASE + 0x20 + 28)
-#define AT91_PIN_PB29 (PIN_BASE + 0x20 + 29)
-#define AT91_PIN_PB30 (PIN_BASE + 0x20 + 30)
-#define AT91_PIN_PB31 (PIN_BASE + 0x20 + 31)
-
-#define AT91_PIN_PC0 (PIN_BASE + 0x40 + 0)
-#define AT91_PIN_PC1 (PIN_BASE + 0x40 + 1)
-#define AT91_PIN_PC2 (PIN_BASE + 0x40 + 2)
-#define AT91_PIN_PC3 (PIN_BASE + 0x40 + 3)
-#define AT91_PIN_PC4 (PIN_BASE + 0x40 + 4)
-#define AT91_PIN_PC5 (PIN_BASE + 0x40 + 5)
-#define AT91_PIN_PC6 (PIN_BASE + 0x40 + 6)
-#define AT91_PIN_PC7 (PIN_BASE + 0x40 + 7)
-#define AT91_PIN_PC8 (PIN_BASE + 0x40 + 8)
-#define AT91_PIN_PC9 (PIN_BASE + 0x40 + 9)
-#define AT91_PIN_PC10 (PIN_BASE + 0x40 + 10)
-#define AT91_PIN_PC11 (PIN_BASE + 0x40 + 11)
-#define AT91_PIN_PC12 (PIN_BASE + 0x40 + 12)
-#define AT91_PIN_PC13 (PIN_BASE + 0x40 + 13)
-#define AT91_PIN_PC14 (PIN_BASE + 0x40 + 14)
-#define AT91_PIN_PC15 (PIN_BASE + 0x40 + 15)
-#define AT91_PIN_PC16 (PIN_BASE + 0x40 + 16)
-#define AT91_PIN_PC17 (PIN_BASE + 0x40 + 17)
-#define AT91_PIN_PC18 (PIN_BASE + 0x40 + 18)
-#define AT91_PIN_PC19 (PIN_BASE + 0x40 + 19)
-#define AT91_PIN_PC20 (PIN_BASE + 0x40 + 20)
-#define AT91_PIN_PC21 (PIN_BASE + 0x40 + 21)
-#define AT91_PIN_PC22 (PIN_BASE + 0x40 + 22)
-#define AT91_PIN_PC23 (PIN_BASE + 0x40 + 23)
-#define AT91_PIN_PC24 (PIN_BASE + 0x40 + 24)
-#define AT91_PIN_PC25 (PIN_BASE + 0x40 + 25)
-#define AT91_PIN_PC26 (PIN_BASE + 0x40 + 26)
-#define AT91_PIN_PC27 (PIN_BASE + 0x40 + 27)
-#define AT91_PIN_PC28 (PIN_BASE + 0x40 + 28)
-#define AT91_PIN_PC29 (PIN_BASE + 0x40 + 29)
-#define AT91_PIN_PC30 (PIN_BASE + 0x40 + 30)
-#define AT91_PIN_PC31 (PIN_BASE + 0x40 + 31)
-
-#define AT91_PIN_PD0 (PIN_BASE + 0x60 + 0)
-#define AT91_PIN_PD1 (PIN_BASE + 0x60 + 1)
-#define AT91_PIN_PD2 (PIN_BASE + 0x60 + 2)
-#define AT91_PIN_PD3 (PIN_BASE + 0x60 + 3)
-#define AT91_PIN_PD4 (PIN_BASE + 0x60 + 4)
-#define AT91_PIN_PD5 (PIN_BASE + 0x60 + 5)
-#define AT91_PIN_PD6 (PIN_BASE + 0x60 + 6)
-#define AT91_PIN_PD7 (PIN_BASE + 0x60 + 7)
-#define AT91_PIN_PD8 (PIN_BASE + 0x60 + 8)
-#define AT91_PIN_PD9 (PIN_BASE + 0x60 + 9)
-#define AT91_PIN_PD10 (PIN_BASE + 0x60 + 10)
-#define AT91_PIN_PD11 (PIN_BASE + 0x60 + 11)
-#define AT91_PIN_PD12 (PIN_BASE + 0x60 + 12)
-#define AT91_PIN_PD13 (PIN_BASE + 0x60 + 13)
-#define AT91_PIN_PD14 (PIN_BASE + 0x60 + 14)
-#define AT91_PIN_PD15 (PIN_BASE + 0x60 + 15)
-#define AT91_PIN_PD16 (PIN_BASE + 0x60 + 16)
-#define AT91_PIN_PD17 (PIN_BASE + 0x60 + 17)
-#define AT91_PIN_PD18 (PIN_BASE + 0x60 + 18)
-#define AT91_PIN_PD19 (PIN_BASE + 0x60 + 19)
-#define AT91_PIN_PD20 (PIN_BASE + 0x60 + 20)
-#define AT91_PIN_PD21 (PIN_BASE + 0x60 + 21)
-#define AT91_PIN_PD22 (PIN_BASE + 0x60 + 22)
-#define AT91_PIN_PD23 (PIN_BASE + 0x60 + 23)
-#define AT91_PIN_PD24 (PIN_BASE + 0x60 + 24)
-#define AT91_PIN_PD25 (PIN_BASE + 0x60 + 25)
-#define AT91_PIN_PD26 (PIN_BASE + 0x60 + 26)
-#define AT91_PIN_PD27 (PIN_BASE + 0x60 + 27)
-#define AT91_PIN_PD28 (PIN_BASE + 0x60 + 28)
-#define AT91_PIN_PD29 (PIN_BASE + 0x60 + 29)
-#define AT91_PIN_PD30 (PIN_BASE + 0x60 + 30)
-#define AT91_PIN_PD31 (PIN_BASE + 0x60 + 31)
-
-#define AT91_PIN_PE0 (PIN_BASE + 0x80 + 0)
-#define AT91_PIN_PE1 (PIN_BASE + 0x80 + 1)
-#define AT91_PIN_PE2 (PIN_BASE + 0x80 + 2)
-#define AT91_PIN_PE3 (PIN_BASE + 0x80 + 3)
-#define AT91_PIN_PE4 (PIN_BASE + 0x80 + 4)
-#define AT91_PIN_PE5 (PIN_BASE + 0x80 + 5)
-#define AT91_PIN_PE6 (PIN_BASE + 0x80 + 6)
-#define AT91_PIN_PE7 (PIN_BASE + 0x80 + 7)
-#define AT91_PIN_PE8 (PIN_BASE + 0x80 + 8)
-#define AT91_PIN_PE9 (PIN_BASE + 0x80 + 9)
-#define AT91_PIN_PE10 (PIN_BASE + 0x80 + 10)
-#define AT91_PIN_PE11 (PIN_BASE + 0x80 + 11)
-#define AT91_PIN_PE12 (PIN_BASE + 0x80 + 12)
-#define AT91_PIN_PE13 (PIN_BASE + 0x80 + 13)
-#define AT91_PIN_PE14 (PIN_BASE + 0x80 + 14)
-#define AT91_PIN_PE15 (PIN_BASE + 0x80 + 15)
-#define AT91_PIN_PE16 (PIN_BASE + 0x80 + 16)
-#define AT91_PIN_PE17 (PIN_BASE + 0x80 + 17)
-#define AT91_PIN_PE18 (PIN_BASE + 0x80 + 18)
-#define AT91_PIN_PE19 (PIN_BASE + 0x80 + 19)
-#define AT91_PIN_PE20 (PIN_BASE + 0x80 + 20)
-#define AT91_PIN_PE21 (PIN_BASE + 0x80 + 21)
-#define AT91_PIN_PE22 (PIN_BASE + 0x80 + 22)
-#define AT91_PIN_PE23 (PIN_BASE + 0x80 + 23)
-#define AT91_PIN_PE24 (PIN_BASE + 0x80 + 24)
-#define AT91_PIN_PE25 (PIN_BASE + 0x80 + 25)
-#define AT91_PIN_PE26 (PIN_BASE + 0x80 + 26)
-#define AT91_PIN_PE27 (PIN_BASE + 0x80 + 27)
-#define AT91_PIN_PE28 (PIN_BASE + 0x80 + 28)
-#define AT91_PIN_PE29 (PIN_BASE + 0x80 + 29)
-#define AT91_PIN_PE30 (PIN_BASE + 0x80 + 30)
-#define AT91_PIN_PE31 (PIN_BASE + 0x80 + 31)
+#define AT91_PIN_PA0 (0x00 + 0)
+#define AT91_PIN_PA1 (0x00 + 1)
+#define AT91_PIN_PA2 (0x00 + 2)
+#define AT91_PIN_PA3 (0x00 + 3)
+#define AT91_PIN_PA4 (0x00 + 4)
+#define AT91_PIN_PA5 (0x00 + 5)
+#define AT91_PIN_PA6 (0x00 + 6)
+#define AT91_PIN_PA7 (0x00 + 7)
+#define AT91_PIN_PA8 (0x00 + 8)
+#define AT91_PIN_PA9 (0x00 + 9)
+#define AT91_PIN_PA10 (0x00 + 10)
+#define AT91_PIN_PA11 (0x00 + 11)
+#define AT91_PIN_PA12 (0x00 + 12)
+#define AT91_PIN_PA13 (0x00 + 13)
+#define AT91_PIN_PA14 (0x00 + 14)
+#define AT91_PIN_PA15 (0x00 + 15)
+#define AT91_PIN_PA16 (0x00 + 16)
+#define AT91_PIN_PA17 (0x00 + 17)
+#define AT91_PIN_PA18 (0x00 + 18)
+#define AT91_PIN_PA19 (0x00 + 19)
+#define AT91_PIN_PA20 (0x00 + 20)
+#define AT91_PIN_PA21 (0x00 + 21)
+#define AT91_PIN_PA22 (0x00 + 22)
+#define AT91_PIN_PA23 (0x00 + 23)
+#define AT91_PIN_PA24 (0x00 + 24)
+#define AT91_PIN_PA25 (0x00 + 25)
+#define AT91_PIN_PA26 (0x00 + 26)
+#define AT91_PIN_PA27 (0x00 + 27)
+#define AT91_PIN_PA28 (0x00 + 28)
+#define AT91_PIN_PA29 (0x00 + 29)
+#define AT91_PIN_PA30 (0x00 + 30)
+#define AT91_PIN_PA31 (0x00 + 31)
+
+#define AT91_PIN_PB0 (0x20 + 0)
+#define AT91_PIN_PB1 (0x20 + 1)
+#define AT91_PIN_PB2 (0x20 + 2)
+#define AT91_PIN_PB3 (0x20 + 3)
+#define AT91_PIN_PB4 (0x20 + 4)
+#define AT91_PIN_PB5 (0x20 + 5)
+#define AT91_PIN_PB6 (0x20 + 6)
+#define AT91_PIN_PB7 (0x20 + 7)
+#define AT91_PIN_PB8 (0x20 + 8)
+#define AT91_PIN_PB9 (0x20 + 9)
+#define AT91_PIN_PB10 (0x20 + 10)
+#define AT91_PIN_PB11 (0x20 + 11)
+#define AT91_PIN_PB12 (0x20 + 12)
+#define AT91_PIN_PB13 (0x20 + 13)
+#define AT91_PIN_PB14 (0x20 + 14)
+#define AT91_PIN_PB15 (0x20 + 15)
+#define AT91_PIN_PB16 (0x20 + 16)
+#define AT91_PIN_PB17 (0x20 + 17)
+#define AT91_PIN_PB18 (0x20 + 18)
+#define AT91_PIN_PB19 (0x20 + 19)
+#define AT91_PIN_PB20 (0x20 + 20)
+#define AT91_PIN_PB21 (0x20 + 21)
+#define AT91_PIN_PB22 (0x20 + 22)
+#define AT91_PIN_PB23 (0x20 + 23)
+#define AT91_PIN_PB24 (0x20 + 24)
+#define AT91_PIN_PB25 (0x20 + 25)
+#define AT91_PIN_PB26 (0x20 + 26)
+#define AT91_PIN_PB27 (0x20 + 27)
+#define AT91_PIN_PB28 (0x20 + 28)
+#define AT91_PIN_PB29 (0x20 + 29)
+#define AT91_PIN_PB30 (0x20 + 30)
+#define AT91_PIN_PB31 (0x20 + 31)
+
+#define AT91_PIN_PC0 (0x40 + 0)
+#define AT91_PIN_PC1 (0x40 + 1)
+#define AT91_PIN_PC2 (0x40 + 2)
+#define AT91_PIN_PC3 (0x40 + 3)
+#define AT91_PIN_PC4 (0x40 + 4)
+#define AT91_PIN_PC5 (0x40 + 5)
+#define AT91_PIN_PC6 (0x40 + 6)
+#define AT91_PIN_PC7 (0x40 + 7)
+#define AT91_PIN_PC8 (0x40 + 8)
+#define AT91_PIN_PC9 (0x40 + 9)
+#define AT91_PIN_PC10 (0x40 + 10)
+#define AT91_PIN_PC11 (0x40 + 11)
+#define AT91_PIN_PC12 (0x40 + 12)
+#define AT91_PIN_PC13 (0x40 + 13)
+#define AT91_PIN_PC14 (0x40 + 14)
+#define AT91_PIN_PC15 (0x40 + 15)
+#define AT91_PIN_PC16 (0x40 + 16)
+#define AT91_PIN_PC17 (0x40 + 17)
+#define AT91_PIN_PC18 (0x40 + 18)
+#define AT91_PIN_PC19 (0x40 + 19)
+#define AT91_PIN_PC20 (0x40 + 20)
+#define AT91_PIN_PC21 (0x40 + 21)
+#define AT91_PIN_PC22 (0x40 + 22)
+#define AT91_PIN_PC23 (0x40 + 23)
+#define AT91_PIN_PC24 (0x40 + 24)
+#define AT91_PIN_PC25 (0x40 + 25)
+#define AT91_PIN_PC26 (0x40 + 26)
+#define AT91_PIN_PC27 (0x40 + 27)
+#define AT91_PIN_PC28 (0x40 + 28)
+#define AT91_PIN_PC29 (0x40 + 29)
+#define AT91_PIN_PC30 (0x40 + 30)
+#define AT91_PIN_PC31 (0x40 + 31)
+
+#define AT91_PIN_PD0 (0x60 + 0)
+#define AT91_PIN_PD1 (0x60 + 1)
+#define AT91_PIN_PD2 (0x60 + 2)
+#define AT91_PIN_PD3 (0x60 + 3)
+#define AT91_PIN_PD4 (0x60 + 4)
+#define AT91_PIN_PD5 (0x60 + 5)
+#define AT91_PIN_PD6 (0x60 + 6)
+#define AT91_PIN_PD7 (0x60 + 7)
+#define AT91_PIN_PD8 (0x60 + 8)
+#define AT91_PIN_PD9 (0x60 + 9)
+#define AT91_PIN_PD10 (0x60 + 10)
+#define AT91_PIN_PD11 (0x60 + 11)
+#define AT91_PIN_PD12 (0x60 + 12)
+#define AT91_PIN_PD13 (0x60 + 13)
+#define AT91_PIN_PD14 (0x60 + 14)
+#define AT91_PIN_PD15 (0x60 + 15)
+#define AT91_PIN_PD16 (0x60 + 16)
+#define AT91_PIN_PD17 (0x60 + 17)
+#define AT91_PIN_PD18 (0x60 + 18)
+#define AT91_PIN_PD19 (0x60 + 19)
+#define AT91_PIN_PD20 (0x60 + 20)
+#define AT91_PIN_PD21 (0x60 + 21)
+#define AT91_PIN_PD22 (0x60 + 22)
+#define AT91_PIN_PD23 (0x60 + 23)
+#define AT91_PIN_PD24 (0x60 + 24)
+#define AT91_PIN_PD25 (0x60 + 25)
+#define AT91_PIN_PD26 (0x60 + 26)
+#define AT91_PIN_PD27 (0x60 + 27)
+#define AT91_PIN_PD28 (0x60 + 28)
+#define AT91_PIN_PD29 (0x60 + 29)
+#define AT91_PIN_PD30 (0x60 + 30)
+#define AT91_PIN_PD31 (0x60 + 31)
+
+#define AT91_PIN_PE0 (0x80 + 0)
+#define AT91_PIN_PE1 (0x80 + 1)
+#define AT91_PIN_PE2 (0x80 + 2)
+#define AT91_PIN_PE3 (0x80 + 3)
+#define AT91_PIN_PE4 (0x80 + 4)
+#define AT91_PIN_PE5 (0x80 + 5)
+#define AT91_PIN_PE6 (0x80 + 6)
+#define AT91_PIN_PE7 (0x80 + 7)
+#define AT91_PIN_PE8 (0x80 + 8)
+#define AT91_PIN_PE9 (0x80 + 9)
+#define AT91_PIN_PE10 (0x80 + 10)
+#define AT91_PIN_PE11 (0x80 + 11)
+#define AT91_PIN_PE12 (0x80 + 12)
+#define AT91_PIN_PE13 (0x80 + 13)
+#define AT91_PIN_PE14 (0x80 + 14)
+#define AT91_PIN_PE15 (0x80 + 15)
+#define AT91_PIN_PE16 (0x80 + 16)
+#define AT91_PIN_PE17 (0x80 + 17)
+#define AT91_PIN_PE18 (0x80 + 18)
+#define AT91_PIN_PE19 (0x80 + 19)
+#define AT91_PIN_PE20 (0x80 + 20)
+#define AT91_PIN_PE21 (0x80 + 21)
+#define AT91_PIN_PE22 (0x80 + 22)
+#define AT91_PIN_PE23 (0x80 + 23)
+#define AT91_PIN_PE24 (0x80 + 24)
+#define AT91_PIN_PE25 (0x80 + 25)
+#define AT91_PIN_PE26 (0x80 + 26)
+#define AT91_PIN_PE27 (0x80 + 27)
+#define AT91_PIN_PE28 (0x80 + 28)
+#define AT91_PIN_PE29 (0x80 + 29)
+#define AT91_PIN_PE30 (0x80 + 30)
+#define AT91_PIN_PE31 (0x80 + 31)
#ifndef __ASSEMBLY__
/* setup setup routines, called from board init or driver probe() */
@@ -215,8 +213,8 @@ extern void at91_gpio_resume(void);
#include <asm/errno.h>
-#define gpio_to_irq(gpio) (gpio)
-#define irq_to_gpio(irq) (irq)
+#define gpio_to_irq(gpio) (gpio + NR_AIC_IRQS)
+#define irq_to_gpio(irq) (irq - NR_AIC_IRQS)
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index 483478d8be6..2d0e4e99856 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -16,6 +16,12 @@
#include <asm/sizes.h>
+/* DBGU base */
+/* rm9200, 9260/9g20, 9261/9g10, 9rl */
+#define AT91_BASE_DBGU0 0xfffff200
+/* 9263, 9g45, cap9 */
+#define AT91_BASE_DBGU1 0xffffee00
+
#if defined(CONFIG_ARCH_AT91RM9200)
#include <mach/at91rm9200.h>
#elif defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20)
@@ -52,6 +58,12 @@
#endif
/*
+ * On all at91 have the Advanced Interrupt Controller starts at address
+ * 0xfffff000
+ */
+#define AT91_AIC 0xfffff000
+
+/*
* Peripheral identifiers/interrupts.
*/
#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
index 4298e7806c7..4ca09ef7ca2 100644
--- a/arch/arm/mach-at91/include/mach/io.h
+++ b/arch/arm/mach-at91/include/mach/io.h
@@ -30,14 +30,6 @@
#ifndef __ASSEMBLY__
-#ifndef CONFIG_ARCH_AT91X40
-#define __arch_ioremap at91_ioremap
-#define __arch_iounmap at91_iounmap
-#endif
-
-void __iomem *at91_ioremap(unsigned long phys, size_t size, unsigned int type);
-void at91_iounmap(volatile void __iomem *addr);
-
static inline unsigned int at91_sys_read(unsigned int reg_offset)
{
void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;
diff --git a/arch/arm/mach-at91/include/mach/irqs.h b/arch/arm/mach-at91/include/mach/irqs.h
index 36bd55f3fc6..ac8b7dfc85e 100644
--- a/arch/arm/mach-at91/include/mach/irqs.h
+++ b/arch/arm/mach-at91/include/mach/irqs.h
@@ -31,7 +31,7 @@
* Acknowledge interrupt with AIC after interrupt has been handled.
* (by kernel/irq.c)
*/
-#define irq_finish(irq) do { at91_sys_write(AT91_AIC_EOICR, 0); } while (0)
+#define irq_finish(irq) do { at91_aic_write(AT91_AIC_EOICR, 0); } while (0)
/*
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index 36af14bc13b..cbd64f3bcec 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -47,13 +47,4 @@ static inline void arch_idle(void)
#endif
}
-void (*at91_arch_reset)(void);
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- /* call the CPU-specific reset function */
- if (at91_arch_reset)
- (at91_arch_reset)();
-}
-
#endif
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
index 8f4866045b4..ec164a4124c 100644
--- a/arch/arm/mach-at91/include/mach/system_rev.h
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -19,7 +19,7 @@
#define BOARD_HAVE_NAND_16BIT (1 << 31)
static inline int board_have_nand_16bit(void)
{
- return system_rev & BOARD_HAVE_NAND_16BIT;
+ return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
}
#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-at91/include/mach/timex.h b/arch/arm/mach-at91/include/mach/timex.h
index 85820ad801c..5e917a66edd 100644
--- a/arch/arm/mach-at91/include/mach/timex.h
+++ b/arch/arm/mach-at91/include/mach/timex.h
@@ -23,70 +23,15 @@
#include <mach/hardware.h>
-#if defined(CONFIG_ARCH_AT91RM9200)
+#ifdef CONFIG_ARCH_AT91X40
-#define CLOCK_TICK_RATE (AT91_SLOW_CLOCK)
-
-#elif defined(CONFIG_ARCH_AT91SAM9260)
-
-#if defined(CONFIG_MACH_USB_A9260) || defined(CONFIG_MACH_QIL_A9260)
-#define AT91SAM9_MASTER_CLOCK 90000000
-#else
-#define AT91SAM9_MASTER_CLOCK 99300000
-#endif
-
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9261)
-
-#define AT91SAM9_MASTER_CLOCK 99300000
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9G10)
-
-#define AT91SAM9_MASTER_CLOCK 133000000
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9263)
-
-#if defined(CONFIG_MACH_USB_A9263)
-#define AT91SAM9_MASTER_CLOCK 90000000
-#else
-#define AT91SAM9_MASTER_CLOCK 99959500
-#endif
-
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9RL)
-
-#define AT91SAM9_MASTER_CLOCK 100000000
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9G20)
+#define AT91X40_MASTER_CLOCK 40000000
+#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK)
-#if defined(CONFIG_MACH_USB_A9G20)
-#define AT91SAM9_MASTER_CLOCK 133000000
#else
-#define AT91SAM9_MASTER_CLOCK 132096000
-#endif
-
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91SAM9G45)
-#define AT91SAM9_MASTER_CLOCK 133333333
-#define CLOCK_TICK_RATE (AT91SAM9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91CAP9)
-
-#define AT91CAP9_MASTER_CLOCK 100000000
-#define CLOCK_TICK_RATE (AT91CAP9_MASTER_CLOCK/16)
-
-#elif defined(CONFIG_ARCH_AT91X40)
-
-#define AT91X40_MASTER_CLOCK 40000000
-#define CLOCK_TICK_RATE (AT91X40_MASTER_CLOCK)
+#define CLOCK_TICK_RATE 12345678
#endif
-#endif
+#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-at91/include/mach/uncompress.h b/arch/arm/mach-at91/include/mach/uncompress.h
index 18bdcdeb474..0234fd9d20d 100644
--- a/arch/arm/mach-at91/include/mach/uncompress.h
+++ b/arch/arm/mach-at91/include/mach/uncompress.h
@@ -24,8 +24,10 @@
#include <linux/io.h>
#include <linux/atmel_serial.h>
-#if defined(CONFIG_AT91_EARLY_DBGU)
-#define UART_OFFSET (AT91_DBGU + AT91_BASE_SYS)
+#if defined(CONFIG_AT91_EARLY_DBGU0)
+#define UART_OFFSET AT91_BASE_DBGU0
+#elif defined(CONFIG_AT91_EARLY_DBGU1)
+#define UART_OFFSET AT91_BASE_DBGU1
#elif defined(CONFIG_AT91_EARLY_USART0)
#define UART_OFFSET AT91_USART0
#elif defined(CONFIG_AT91_EARLY_USART1)
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 9665265ec75..be6b639ecd7 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -33,17 +33,18 @@
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
+void __iomem *at91_aic_base;
static void at91_aic_mask_irq(struct irq_data *d)
{
/* Disable interrupt on AIC */
- at91_sys_write(AT91_AIC_IDCR, 1 << d->irq);
+ at91_aic_write(AT91_AIC_IDCR, 1 << d->irq);
}
static void at91_aic_unmask_irq(struct irq_data *d)
{
/* Enable interrupt on AIC */
- at91_sys_write(AT91_AIC_IECR, 1 << d->irq);
+ at91_aic_write(AT91_AIC_IECR, 1 << d->irq);
}
unsigned int at91_extern_irq;
@@ -77,8 +78,8 @@ static int at91_aic_set_type(struct irq_data *d, unsigned type)
return -EINVAL;
}
- smr = at91_sys_read(AT91_AIC_SMR(d->irq)) & ~AT91_AIC_SRCTYPE;
- at91_sys_write(AT91_AIC_SMR(d->irq), smr | srctype);
+ smr = at91_aic_read(AT91_AIC_SMR(d->irq)) & ~AT91_AIC_SRCTYPE;
+ at91_aic_write(AT91_AIC_SMR(d->irq), smr | srctype);
return 0;
}
@@ -102,15 +103,15 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
void at91_irq_suspend(void)
{
- backups = at91_sys_read(AT91_AIC_IMR);
- at91_sys_write(AT91_AIC_IDCR, backups);
- at91_sys_write(AT91_AIC_IECR, wakeups);
+ backups = at91_aic_read(AT91_AIC_IMR);
+ at91_aic_write(AT91_AIC_IDCR, backups);
+ at91_aic_write(AT91_AIC_IECR, wakeups);
}
void at91_irq_resume(void)
{
- at91_sys_write(AT91_AIC_IDCR, wakeups);
- at91_sys_write(AT91_AIC_IECR, backups);
+ at91_aic_write(AT91_AIC_IDCR, wakeups);
+ at91_aic_write(AT91_AIC_IECR, backups);
}
#else
@@ -133,34 +134,39 @@ void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
{
unsigned int i;
+ at91_aic_base = ioremap(AT91_AIC, 512);
+
+ if (!at91_aic_base)
+ panic("Impossible to ioremap AT91_AIC\n");
+
/*
* The IVR is used by macro get_irqnr_and_base to read and verify.
* The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
*/
for (i = 0; i < NR_AIC_IRQS; i++) {
/* Put irq number in Source Vector Register: */
- at91_sys_write(AT91_AIC_SVR(i), i);
+ at91_aic_write(AT91_AIC_SVR(i), i);
/* Active Low interrupt, with the specified priority */
- at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
+ at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
/* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */
if (i < 8)
- at91_sys_write(AT91_AIC_EOICR, 0);
+ at91_aic_write(AT91_AIC_EOICR, 0);
}
/*
* Spurious Interrupt ID in Spurious Vector Register is NR_AIC_IRQS
* When there is no current interrupt, the IRQ Vector Register reads the value stored in AIC_SPU
*/
- at91_sys_write(AT91_AIC_SPU, NR_AIC_IRQS);
+ at91_aic_write(AT91_AIC_SPU, NR_AIC_IRQS);
/* No debugging in AIC: Debug (Protect) Control Register */
- at91_sys_write(AT91_AIC_DCR, 0);
+ at91_aic_write(AT91_AIC_DCR, 0);
/* Disable and clear all interrupts initially */
- at91_sys_write(AT91_AIC_IDCR, 0xFFFFFFFF);
- at91_sys_write(AT91_AIC_ICCR, 0xFFFFFFFF);
+ at91_aic_write(AT91_AIC_IDCR, 0xFFFFFFFF);
+ at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF);
}
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 7046158109d..62ad95556c3 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -34,7 +34,7 @@
/*
* Show the reason for the previous system reset.
*/
-#if defined(AT91_SHDWC)
+#if defined(AT91_RSTC)
#include <mach/at91_rstc.h>
#include <mach/at91_shdwc.h>
@@ -58,8 +58,11 @@ static void __init show_reset_status(void)
char *reason, *r2 = reset;
u32 reset_type, wake_type;
+ if (!at91_shdwc_base)
+ return;
+
reset_type = at91_sys_read(AT91_RSTC_SR) & AT91_RSTC_RSTTYP;
- wake_type = at91_sys_read(AT91_SHDW_SR);
+ wake_type = at91_shdwc_read(AT91_SHDW_SR);
switch (reset_type) {
case AT91_RSTC_RSTTYP_GENERAL:
@@ -215,7 +218,7 @@ static int at91_pm_enter(suspend_state_t state)
| (1 << AT91_ID_FIQ)
| (1 << AT91_ID_SYS)
| (at91_extern_irq))
- & at91_sys_read(AT91_AIC_IMR),
+ & at91_aic_read(AT91_AIC_IMR),
state);
switch (state) {
@@ -283,7 +286,7 @@ static int at91_pm_enter(suspend_state_t state)
}
pr_debug("AT91: PM - wakeup %08x\n",
- at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
+ at91_aic_read(AT91_AIC_IPR) & at91_aic_read(AT91_AIC_IMR));
error:
target_state = PM_SUSPEND_ON;
diff --git a/arch/arm/mach-at91/sam9_smc.c b/arch/arm/mach-at91/sam9_smc.c
index 5eab6aa621d..8294783b679 100644
--- a/arch/arm/mach-at91/sam9_smc.c
+++ b/arch/arm/mach-at91/sam9_smc.c
@@ -10,38 +10,58 @@
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
-void __init sam9_smc_configure(int cs, struct sam9_smc_config* config)
+
+#define AT91_SMC_CS(id, n) (smc_base_addr[id] + ((n) * 0x10))
+
+static void __iomem *smc_base_addr[2];
+
+static void __init sam9_smc_cs_configure(void __iomem *base, struct sam9_smc_config* config)
{
+
/* Setup register */
- at91_sys_write(AT91_SMC_SETUP(cs),
- AT91_SMC_NWESETUP_(config->nwe_setup)
- | AT91_SMC_NCS_WRSETUP_(config->ncs_write_setup)
- | AT91_SMC_NRDSETUP_(config->nrd_setup)
- | AT91_SMC_NCS_RDSETUP_(config->ncs_read_setup)
- );
+ __raw_writel(AT91_SMC_NWESETUP_(config->nwe_setup)
+ | AT91_SMC_NCS_WRSETUP_(config->ncs_write_setup)
+ | AT91_SMC_NRDSETUP_(config->nrd_setup)
+ | AT91_SMC_NCS_RDSETUP_(config->ncs_read_setup),
+ base + AT91_SMC_SETUP);
/* Pulse register */
- at91_sys_write(AT91_SMC_PULSE(cs),
- AT91_SMC_NWEPULSE_(config->nwe_pulse)
- | AT91_SMC_NCS_WRPULSE_(config->ncs_write_pulse)
- | AT91_SMC_NRDPULSE_(config->nrd_pulse)
- | AT91_SMC_NCS_RDPULSE_(config->ncs_read_pulse)
- );
+ __raw_writel(AT91_SMC_NWEPULSE_(config->nwe_pulse)
+ | AT91_SMC_NCS_WRPULSE_(config->ncs_write_pulse)
+ | AT91_SMC_NRDPULSE_(config->nrd_pulse)
+ | AT91_SMC_NCS_RDPULSE_(config->ncs_read_pulse),
+ base + AT91_SMC_PULSE);
/* Cycle register */
- at91_sys_write(AT91_SMC_CYCLE(cs),
- AT91_SMC_NWECYCLE_(config->write_cycle)
- | AT91_SMC_NRDCYCLE_(config->read_cycle)
- );
+ __raw_writel(AT91_SMC_NWECYCLE_(config->write_cycle)
+ | AT91_SMC_NRDCYCLE_(config->read_cycle),
+ base + AT91_SMC_CYCLE);
/* Mode register */
- at91_sys_write(AT91_SMC_MODE(cs),
- config->mode
- | AT91_SMC_TDF_(config->tdf_cycles)
- );
+ __raw_writel(config->mode
+ | AT91_SMC_TDF_(config->tdf_cycles),
+ base + AT91_SMC_MODE);
+}
+
+void __init sam9_smc_configure(int id, int cs, struct sam9_smc_config* config)
+{
+ sam9_smc_cs_configure(AT91_SMC_CS(id, cs), config);
+}
+
+void __init at91sam9_ioremap_smc(int id, u32 addr)
+{
+ if (id > 1) {
+ pr_warn("%s: id > 2\n", __func__);
+ return;
+ }
+ smc_base_addr[id] = ioremap(addr, 512);
+ if (!smc_base_addr[id])
+ pr_warn("Impossible to ioremap smc.%d 0x%x\n", id, addr);
}
diff --git a/arch/arm/mach-at91/sam9_smc.h b/arch/arm/mach-at91/sam9_smc.h
index bf72cfb3455..039c5ce17ae 100644
--- a/arch/arm/mach-at91/sam9_smc.h
+++ b/arch/arm/mach-at91/sam9_smc.h
@@ -30,4 +30,5 @@ struct sam9_smc_config {
u8 tdf_cycles:4;
};
-extern void __init sam9_smc_configure(int cs, struct sam9_smc_config* config);
+extern void __init sam9_smc_configure(int id, int cs, struct sam9_smc_config* config);
+extern void __init at91sam9_ioremap_smc(int id, u32 addr);
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index aa64294c7db..8bdcc3cb601 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/pm.h>
#include <asm/mach/map.h>
@@ -15,6 +16,7 @@
#include <mach/cpu.h>
#include <mach/at91_dbgu.h>
#include <mach/at91_pmc.h>
+#include <mach/at91_shdwc.h>
#include "soc.h"
#include "generic.h"
@@ -73,27 +75,6 @@ static struct map_desc at91_io_desc __initdata = {
.type = MT_DEVICE,
};
-void __iomem *at91_ioremap(unsigned long p, size_t size, unsigned int type)
-{
- if (p >= AT91_BASE_SYS && p <= (AT91_BASE_SYS + SZ_16K - 1))
- return (void __iomem *)AT91_IO_P2V(p);
-
- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(at91_ioremap);
-
-void at91_iounmap(volatile void __iomem *addr)
-{
- unsigned long virt = (unsigned long)addr;
-
- if (virt >= VMALLOC_START && virt < VMALLOC_END)
- __iounmap(addr);
-}
-EXPORT_SYMBOL(at91_iounmap);
-
-#define AT91_DBGU0 0xfffff200
-#define AT91_DBGU1 0xffffee00
-
static void __init soc_detect(u32 dbgu_base)
{
u32 cidr, socid;
@@ -266,9 +247,9 @@ void __init at91_map_io(void)
at91_soc_initdata.type = AT91_SOC_NONE;
at91_soc_initdata.subtype = AT91_SOC_SUBTYPE_NONE;
- soc_detect(AT91_DBGU0);
+ soc_detect(AT91_BASE_DBGU0);
if (!at91_soc_is_detected())
- soc_detect(AT91_DBGU1);
+ soc_detect(AT91_BASE_DBGU1);
if (!at91_soc_is_detected())
panic("AT91: Impossible to detect the SOC type");
@@ -285,8 +266,25 @@ void __init at91_map_io(void)
at91_boot_soc.map_io();
}
+void __iomem *at91_shdwc_base = NULL;
+
+static void at91sam9_poweroff(void)
+{
+ at91_shdwc_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
+}
+
+void __init at91_ioremap_shdwc(u32 base_addr)
+{
+ at91_shdwc_base = ioremap(base_addr, 16);
+ if (!at91_shdwc_base)
+ panic("Impossible to ioremap at91_shdwc_base\n");
+ pm_power_off = at91sam9_poweroff;
+}
+
void __init at91_initialize(unsigned long main_clock)
{
+ at91_boot_soc.ioremap_registers();
+
/* Init clock subsystem */
at91_clock_init(main_clock);
diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h
index 21ed8816e6f..4588ae6f7ac 100644
--- a/arch/arm/mach-at91/soc.h
+++ b/arch/arm/mach-at91/soc.h
@@ -7,6 +7,7 @@
struct at91_init_soc {
unsigned int *default_irq_priority;
void (*map_io)(void);
+ void (*ioremap_registers)(void);
void (*register_clocks)(void);
void (*init)(void);
};
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 31a143592c8..9e5e7552498 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -49,7 +49,29 @@ HW_DECLARE_SPINLOCK(gpio)
#endif
/* sysctl */
-int bcmring_arch_warm_reboot; /* do a warm reboot on hard reset */
+static int bcmring_arch_warm_reboot; /* do a warm reboot on hard reset */
+
+static void bcmring_restart(char mode, const char *cmd)
+{
+ printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
+
+ if (mode == 'h') {
+ /* Reboot configured in proc entry */
+ if (bcmring_arch_warm_reboot) {
+ printk("warm reset\n");
+ /* Issue Warm reset (do not reset ethernet switch, keep alive) */
+ chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_WARM);
+ } else {
+ /* Force reset of everything */
+ printk("force reset\n");
+ chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
+ }
+ } else {
+ /* Force reset of everything */
+ printk("force reset\n");
+ chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
+ }
+}
static struct ctl_table_header *bcmring_sysctl_header;
@@ -173,4 +195,5 @@ MACHINE_START(BCMRING, "BCMRING")
.init_irq = bcmring_init_irq,
.timer = &bcmring_timer,
.init_machine = bcmring_init_machine
+ .restart = bcmring_restart,
MACHINE_END
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 430da120a29..6b67b7e8426 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -25,7 +25,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/clkdev.h>
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index f4d4d6d174d..1a1a27dd565 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -1615,7 +1615,7 @@ DMA_MemType_t dma_mem_type(void *addr)
{
unsigned long addrVal = (unsigned long)addr;
- if (addrVal >= VMALLOC_END) {
+ if (addrVal >= CONSISTENT_BASE) {
/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
/* dma_alloc_xxx pages are physically and virtually contiguous */
diff --git a/arch/arm/mach-bcmring/include/mach/system.h b/arch/arm/mach-bcmring/include/mach/system.h
index 38b37060d42..cb78250db64 100644
--- a/arch/arm/mach-bcmring/include/mach/system.h
+++ b/arch/arm/mach-bcmring/include/mach/system.h
@@ -20,35 +20,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/csp/chipcHw_inline.h>
-
-extern int bcmring_arch_warm_reboot;
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);
-
- if (mode == 'h') {
- /* Reboot configured in proc entry */
- if (bcmring_arch_warm_reboot) {
- printk("warm reset\n");
- /* Issue Warm reset (do not reset ethernet switch, keep alive) */
- chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_WARM);
- } else {
- /* Force reset of everything */
- printk("force reset\n");
- chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
- }
- } else {
- /* Force reset of everything */
- printk("force reset\n");
- chipcHw_reset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
- }
-}
-
#endif
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
deleted file mode 100644
index 7397bd7817d..00000000000
--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/*
- * Move VMALLOC_END to 0xf0000000 so that the vm space can range from
- * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
- * larger physical memory designs better.
- */
-#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-clps711x/Makefile b/arch/arm/mach-clps711x/Makefile
index 4a197315f0c..f2f0256232e 100644
--- a/arch/arm/mach-clps711x/Makefile
+++ b/arch/arm/mach-clps711x/Makefile
@@ -4,7 +4,7 @@
# Object file lists.
-obj-y := irq.o mm.o time.o
+obj-y := common.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-clps711x/autcpu12.c b/arch/arm/mach-clps711x/autcpu12.c
index 0276091b7f8..3fb79a1d0bd 100644
--- a/arch/arm/mach-clps711x/autcpu12.c
+++ b/arch/arm/mach-clps711x/autcpu12.c
@@ -68,5 +68,6 @@ MACHINE_START(AUTCPU12, "autronix autcpu12")
.map_io = autcpu12_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/cdb89712.c b/arch/arm/mach-clps711x/cdb89712.c
index 25b3bfd0e85..c314f49d6ef 100644
--- a/arch/arm/mach-clps711x/cdb89712.c
+++ b/arch/arm/mach-clps711x/cdb89712.c
@@ -59,4 +59,5 @@ MACHINE_START(CDB89712, "Cirrus-CDB89712")
.map_io = cdb89712_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/ceiva.c b/arch/arm/mach-clps711x/ceiva.c
index 1df9ec67aa9..a70147e347a 100644
--- a/arch/arm/mach-clps711x/ceiva.c
+++ b/arch/arm/mach-clps711x/ceiva.c
@@ -60,4 +60,5 @@ MACHINE_START(CEIVA, "CEIVA/Polaroid Photo MAX Digital Picture Frame")
.map_io = ceiva_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/clep7312.c b/arch/arm/mach-clps711x/clep7312.c
index 80496c09ac5..dbc7842639d 100644
--- a/arch/arm/mach-clps711x/clep7312.c
+++ b/arch/arm/mach-clps711x/clep7312.c
@@ -41,5 +41,6 @@ MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312")
.map_io = clps711x_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/irq.c b/arch/arm/mach-clps711x/common.c
index c2eceee645e..ab1711b9b4d 100644
--- a/arch/arm/mach-clps711x/irq.c
+++ b/arch/arm/mach-clps711x/common.c
@@ -1,7 +1,9 @@
/*
- * linux/arch/arm/mach-clps711x/irq.c
+ * linux/arch/arm/mach-clps711x/core.c
*
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ * Core support for the CLPS711x-based machines.
+ *
+ * Copyright (C) 2001,2011 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,16 +19,42 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/init.h>
-#include <linux/list.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/timex.h>
-#include <asm/mach/irq.h>
+#include <asm/sizes.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-
+#include <asm/leds.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
#include <asm/hardware/clps7111.h>
+/*
+ * This maps the generic CLPS711x registers
+ */
+static struct map_desc clps711x_io_desc[] __initdata = {
+ {
+ .virtual = CLPS7111_VIRT_BASE,
+ .pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ }
+};
+
+void __init clps711x_map_io(void)
+{
+ iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
+}
+
static void int1_mask(struct irq_data *d)
{
u32 intmr1;
@@ -112,15 +140,15 @@ void __init clps711x_init_irq(void)
for (i = 0; i < NR_IRQS; i++) {
if (INT1_IRQS & (1 << i)) {
- irq_set_chip_and_handler(i, &int1_chip,
+ irq_set_chip_and_handler(i, &int1_chip,
handle_level_irq);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
if (INT2_IRQS & (1 << i)) {
irq_set_chip_and_handler(i, &int2_chip,
handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
+ }
}
/*
@@ -141,3 +169,59 @@ void __init clps711x_init_irq(void)
clps_writel(0, SYNCIO);
clps_writel(0, KBDEOI);
}
+
+/*
+ * gettimeoffset() returns time since last timer tick, in usecs.
+ *
+ * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
+ * 'tick' is usecs per jiffy.
+ */
+static unsigned long clps711x_gettimeoffset(void)
+{
+ unsigned long hwticks;
+ hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
+ return (hwticks * (tick_nsec / 1000)) / LATCH;
+}
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t p720t_timer_interrupt(int irq, void *dev_id)
+{
+ timer_tick();
+ return IRQ_HANDLED;
+}
+
+static struct irqaction clps711x_timer_irq = {
+ .name = "CLPS711x Timer Tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = p720t_timer_interrupt,
+};
+
+static void __init clps711x_timer_init(void)
+{
+ struct timespec tv;
+ unsigned int syscon;
+
+ syscon = clps_readl(SYSCON1);
+ syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
+ clps_writel(syscon, SYSCON1);
+
+ clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
+
+ setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
+
+ tv.tv_nsec = 0;
+ tv.tv_sec = clps_readl(RTCDR);
+ do_settimeofday(&tv);
+}
+
+struct sys_timer clps711x_timer = {
+ .init = clps711x_timer_init,
+ .offset = clps711x_gettimeoffset,
+};
+
+void clps711x_restart(char mode, const char *cmd)
+{
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h
index 2b8b801f1dc..fc0f0650dcb 100644
--- a/arch/arm/mach-clps711x/common.h
+++ b/arch/arm/mach-clps711x/common.h
@@ -9,3 +9,4 @@ struct sys_timer;
extern void clps711x_map_io(void);
extern void clps711x_init_irq(void);
extern struct sys_timer clps711x_timer;
+extern void clps711x_restart(char mode, const char *cmd);
diff --git a/arch/arm/mach-clps711x/edb7211-arch.c b/arch/arm/mach-clps711x/edb7211-arch.c
index 9721f6111dc..5fad0b4f40a 100644
--- a/arch/arm/mach-clps711x/edb7211-arch.c
+++ b/arch/arm/mach-clps711x/edb7211-arch.c
@@ -62,4 +62,5 @@ MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)")
.reserve = edb7211_reserve,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index d9925668729..3a3f0b702cb 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -78,4 +78,5 @@ MACHINE_START(FORTUNET, "ARM-FortuNet")
.map_io = clps711x_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
diff --git a/arch/arm/mach-clps711x/include/mach/system.h b/arch/arm/mach-clps711x/include/mach/system.h
index f916cd7a477..23d6ef8c84d 100644
--- a/arch/arm/mach-clps711x/include/mach/system.h
+++ b/arch/arm/mach-clps711x/include/mach/system.h
@@ -32,9 +32,4 @@ static inline void arch_idle(void)
mov r0, r0");
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- cpu_reset(0);
-}
-
#endif
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
deleted file mode 100644
index 467b96137e4..00000000000
--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-clps711x/include/mach/vmalloc.h
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-clps711x/mm.c b/arch/arm/mach-clps711x/mm.c
deleted file mode 100644
index 98659217676..00000000000
--- a/arch/arm/mach-clps711x/mm.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/mm.c
- *
- * Generic MM setup for the CLPS711x-based machines.
- *
- * Copyright (C) 2001 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/sizes.h>
-#include <mach/hardware.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/mach/map.h>
-#include <asm/hardware/clps7111.h>
-
-/*
- * This maps the generic CLPS711x registers
- */
-static struct map_desc clps711x_io_desc[] __initdata = {
- {
- .virtual = CLPS7111_VIRT_BASE,
- .pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
- .length = SZ_1M,
- .type = MT_DEVICE
- }
-};
-
-void __init clps711x_map_io(void)
-{
- iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
-}
diff --git a/arch/arm/mach-clps711x/p720t.c b/arch/arm/mach-clps711x/p720t.c
index 6ecea95f38b..42ee8f33eaf 100644
--- a/arch/arm/mach-clps711x/p720t.c
+++ b/arch/arm/mach-clps711x/p720t.c
@@ -93,6 +93,7 @@ MACHINE_START(P720T, "ARM-Prospector720T")
.map_io = p720t_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
+ .restart = clps711x_restart,
MACHINE_END
static int p720t_hw_init(void)
diff --git a/arch/arm/mach-clps711x/time.c b/arch/arm/mach-clps711x/time.c
deleted file mode 100644
index d581ef0bcd2..00000000000
--- a/arch/arm/mach-clps711x/time.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * linux/arch/arm/mach-clps711x/time.c
- *
- * Copyright (C) 2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/timex.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/leds.h>
-#include <asm/hardware/clps7111.h>
-
-#include <asm/mach/time.h>
-
-
-/*
- * gettimeoffset() returns time since last timer tick, in usecs.
- *
- * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
- * 'tick' is usecs per jiffy.
- */
-static unsigned long clps711x_gettimeoffset(void)
-{
- unsigned long hwticks;
- hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
- return (hwticks * (tick_nsec / 1000)) / LATCH;
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-p720t_timer_interrupt(int irq, void *dev_id)
-{
- timer_tick();
- return IRQ_HANDLED;
-}
-
-static struct irqaction clps711x_timer_irq = {
- .name = "CLPS711x Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = p720t_timer_interrupt,
-};
-
-static void __init clps711x_timer_init(void)
-{
- struct timespec tv;
- unsigned int syscon;
-
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
- clps_writel(syscon, SYSCON1);
-
- clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
-
- setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
-
- tv.tv_nsec = 0;
- tv.tv_sec = clps_readl(RTCDR);
- do_settimeofday(&tv);
-}
-
-struct sys_timer clps711x_timer = {
- .init = clps711x_timer_init,
- .offset = clps711x_gettimeoffset,
-};
diff --git a/arch/arm/mach-cns3xxx/cns3420vb.c b/arch/arm/mach-cns3xxx/cns3420vb.c
index 55f7b4b08ab..2c5fb4c7e50 100644
--- a/arch/arm/mach-cns3xxx/cns3420vb.c
+++ b/arch/arm/mach-cns3xxx/cns3420vb.c
@@ -26,6 +26,7 @@
#include <linux/mtd/partitions.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
+#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -201,5 +202,7 @@ MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
.map_io = cns3420_map_io,
.init_irq = cns3xxx_init_irq,
.timer = &cns3xxx_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = cns3420_init,
+ .restart = cns3xxx_restart,
MACHINE_END
diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h
index fcd225343c6..4894b8c1715 100644
--- a/arch/arm/mach-cns3xxx/core.h
+++ b/arch/arm/mach-cns3xxx/core.h
@@ -22,5 +22,6 @@ static inline void cns3xxx_l2x0_init(void) {}
void __init cns3xxx_map_io(void);
void __init cns3xxx_init_irq(void);
void cns3xxx_power_off(void);
+void cns3xxx_restart(char, const char *);
#endif /* __CNS3XXX_CORE_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
index d87bfc397d3..01c57df5f71 100644
--- a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
+++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
@@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <asm/hardware/entry-macro-gic.S>
-
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h
index 4f16c9b79f7..9e56b7dc133 100644
--- a/arch/arm/mach-cns3xxx/include/mach/system.h
+++ b/arch/arm/mach-cns3xxx/include/mach/system.h
@@ -11,7 +11,6 @@
#ifndef __MACH_SYSTEM_H
#define __MACH_SYSTEM_H
-#include <linux/io.h>
#include <asm/proc-fns.h>
static inline void arch_idle(void)
@@ -23,6 +22,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-void arch_reset(char mode, const char *cmd);
-
#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
deleted file mode 100644
index 1dd231d2f77..00000000000
--- a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright 2000 Russell King.
- * Copyright 2003 ARM Limited
- * Copyright 2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
-#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c
index 0c04678615c..36458080332 100644
--- a/arch/arm/mach-cns3xxx/pm.c
+++ b/arch/arm/mach-cns3xxx/pm.c
@@ -11,9 +11,9 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/atomic.h>
-#include <mach/system.h>
#include <mach/cns3xxx.h>
#include <mach/pm.h>
+#include "core.h"
void cns3xxx_pwr_clk_en(unsigned int block)
{
@@ -89,7 +89,7 @@ void cns3xxx_pwr_soft_rst(unsigned int block)
}
EXPORT_SYMBOL(cns3xxx_pwr_soft_rst);
-void arch_reset(char mode, const char *cmd)
+void cns3xxx_restart(char mode, const char *cmd)
{
/*
* To reset, we hit the on-board reset register
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 495e31306fc..2db78bd5c83 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -4,7 +4,7 @@
#
# Common objects
-obj-y := time.o clock.o serial.o io.o psc.o \
+obj-y := time.o clock.o serial.o psc.o \
dma.o usb.o common.o sram.o aemif.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 11c3db98528..dc1afe5be20 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -682,4 +682,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.timer = &davinci_timer,
.init_machine = da830_evm_init,
.dma_zone_size = SZ_128M,
+ .restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1d7d2499522..6b22b543a83 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -127,7 +127,7 @@ static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
size_t retlen;
if (!strcmp(mtd->name, "MAC-Address")) {
- mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+ mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
if (retlen == ETH_ALEN)
pr_info("Read MAC addr from SPI Flash: %pM\n",
mac_addr);
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_1,
+ .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
@@ -1411,4 +1411,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.timer = &davinci_timer,
.init_machine = da850_evm_init,
.dma_zone_size = SZ_128M,
+ .restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 4e0e707c313..275341f159f 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,4 +357,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index ff2d2413279..e99db28181a 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,4 +276,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 1918ae71142..346e1de2f5a 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
- .size = 28 * NAND_BLOCK_SIZE,
+ .size = 30 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
@@ -618,5 +618,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 0cf8abf78d3..a64b49cfedc 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -719,4 +719,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
.timer = &davinci_timer,
.init_machine = davinci_evm_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index e574d7f837a..64017558860 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
int val;
u32 value;
- if (!vpif_vsclkdis_reg || !cpld_client)
+ if (!vpif_vidclkctl_reg || !cpld_client)
return -ENXIO;
val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
return val;
spin_lock_irqsave(&vpif_reg_lock, flags);
- value = __raw_readl(vpif_vsclkdis_reg);
+ value = __raw_readl(vpif_vidclkctl_reg);
if (mux_mode) {
val &= VPIF_INPUT_TWO_CHANNEL;
value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
val |= VPIF_INPUT_ONE_CHANNEL;
value &= ~VIDCH1CLK;
}
- __raw_writel(value, vpif_vsclkdis_reg);
+ __raw_writel(value, vpif_vidclkctl_reg);
spin_unlock_irqrestore(&vpif_reg_lock, flags);
err = i2c_smbus_write_byte(cpld_client, val);
@@ -799,6 +799,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.timer = &davinci_timer,
.init_machine = evm_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
@@ -808,5 +809,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.timer = &davinci_timer,
.init_machine = evm_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 3cfff555e8f..672d820e2aa 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -573,4 +573,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.timer = &davinci_timer,
.init_machine = mityomapl138_init,
.dma_zone_size = SZ_128M,
+ .restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index e5f231aefee..6c4a16415d4 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,4 +278,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
.timer = &davinci_timer,
.init_machine = davinci_ntosd2_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index c6701e4a795..e7c0c7c5349 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -344,4 +344,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.timer = &davinci_timer,
.init_machine = omapl138_hawk_init,
.dma_zone_size = SZ_128M,
+ .restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 5dd4da9d230..0b136a831c5 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,4 +157,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
.dma_zone_size = SZ_128M,
+ .restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index f69e40a29e0..5f14e30b00d 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -283,4 +283,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
.timer = &davinci_timer,
.init_machine = tnetv107x_evm_board_init,
.dma_zone_size = SZ_128M,
+ .restart = tnetv107x_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 00861139101..008772e3b84 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -31,19 +31,12 @@ static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clockfw_lock);
-static unsigned psc_domain(struct clk *clk)
-{
- return (clk->flags & PSC_DSP)
- ? DAVINCI_GPSC_DSPDOMAIN
- : DAVINCI_GPSC_ARMDOMAIN;
-}
-
static void __clk_enable(struct clk *clk)
{
if (clk->parent)
__clk_enable(clk->parent);
if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
true, clk->flags);
}
@@ -53,7 +46,7 @@ static void __clk_disable(struct clk *clk)
return;
if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
(clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
false, clk->flags);
if (clk->parent)
__clk_disable(clk->parent);
@@ -237,7 +230,7 @@ static int __init clk_disable_unused(void)
pr_debug("Clocks: disable unused %s\n", ck->name);
- davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
+ davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
false, ck->flags);
}
spin_unlock_irq(&clockfw_lock);
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index a705f367a84..46f0f1bf1a4 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -93,6 +93,7 @@ struct clk {
u8 usecount;
u8 lpsc;
u8 gpsc;
+ u8 domain;
u32 flags;
struct clk *parent;
struct list_head children; /* list of children */
@@ -107,11 +108,10 @@ struct clk {
/* Clock flags: SoC-specific flags start at BIT(16) */
#define ALWAYS_ENABLED BIT(1)
#define CLK_PSC BIT(2)
-#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
-#define CLK_PLL BIT(4) /* PLL-derived clock */
-#define PRE_PLL BIT(5) /* source is before PLL mult/div */
-#define PSC_SWRSTDISABLE BIT(6) /* Disable state is SwRstDisable */
-#define PSC_FORCE BIT(7) /* Force module state transtition */
+#define CLK_PLL BIT(3) /* PLL-derived clock */
+#define PRE_PLL BIT(4) /* source is before PLL mult/div */
+#define PSC_SWRSTDISABLE BIT(5) /* Disable state is SwRstDisable */
+#define PSC_FORCE BIT(6) /* Force module state transtition */
#define CLK(dev, con, ck) \
{ \
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 865ffe5899a..cb9b2e47510 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -97,9 +97,6 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
local_flush_tlb_all();
flush_cache_all();
- if (!davinci_soc_info.reset)
- davinci_soc_info.reset = davinci_watchdog_reset;
-
/*
* We want to check CPU revision early for cpu_is_xxxx() macros.
* IO space mapping must be initialized before we can do that.
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index a6bf5dcaef1..deee5c2da75 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -1201,7 +1201,6 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
- .reset_device = &da8xx_wdt_device,
};
void __init da830_init(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index b047f870227..0ed7fdb64ef 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -1121,7 +1121,6 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
.emac_pdata = &da8xx_emac_pdata,
.sram_dma = DA8XX_ARM_RAM_BASE,
.sram_len = SZ_8K,
- .reset_device = &da8xx_wdt_device,
};
void __init da850_init(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 68def718886..42dbf3dc11a 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -363,6 +363,11 @@ struct platform_device da8xx_wdt_device = {
.resource = da8xx_watchdog_resources,
};
+void da8xx_restart(char mode, const char *cmd)
+{
+ davinci_watchdog_reset(&da8xx_wdt_device);
+}
+
int __init da8xx_register_watchdog(void)
{
return platform_device_register(&da8xx_wdt_device);
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 806a2f02b98..50c0156b426 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -291,6 +291,11 @@ struct platform_device davinci_wdt_device = {
.resource = wdt_resources,
};
+void davinci_restart(char mode, const char *cmd)
+{
+ davinci_watchdog_reset(&davinci_wdt_device);
+}
+
static void davinci_init_wdt(void)
{
platform_device_register(&davinci_wdt_device);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index fe520d4167a..19667cfc5de 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -853,7 +853,6 @@ static struct davinci_soc_info davinci_soc_info_dm355 = {
.serial_dev = &dm355_serial_device,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
- .reset_device = &davinci_wdt_device,
};
void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 679e168dce3..f15b435cc65 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1083,7 +1083,6 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.emac_pdata = &dm365_emac_pdata,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
- .reset_device = &davinci_wdt_device,
};
void __init dm365_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 3470983aa34..43a48ee1917 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -130,7 +130,7 @@ static struct clk dsp_clk = {
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DAVINCI_LPSC_GEM,
- .flags = PSC_DSP,
+ .domain = DAVINCI_GPSC_DSPDOMAIN,
.usecount = 1, /* REVISIT how to disable? */
};
@@ -145,7 +145,7 @@ static struct clk vicp_clk = {
.name = "vicp",
.parent = &pll1_sysclk2,
.lpsc = DAVINCI_LPSC_IMCOP,
- .flags = PSC_DSP,
+ .domain = DAVINCI_GPSC_DSPDOMAIN,
.usecount = 1, /* REVISIT how to disable? */
};
@@ -767,7 +767,6 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
.sram_len = SZ_16K,
- .reset_device = &davinci_wdt_device,
};
void __init dm644x_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 0b68ed534f8..00f774394b1 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DM646X_LPSC_C64X_CPU,
- .flags = PSC_DSP,
.usecount = 1, /* REVISIT how to disable? */
};
@@ -855,7 +854,6 @@ static struct davinci_soc_info davinci_soc_info_dm646x = {
.emac_pdata = &dm646x_emac_pdata,
.sram_dma = 0x10010000,
.sram_len = SZ_32K,
- .reset_device = &davinci_wdt_device,
};
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index a57cba21e21..5cd39a4e0c9 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -77,14 +77,13 @@ struct davinci_soc_info {
struct emac_platform_data *emac_pdata;
dma_addr_t sram_dma;
unsigned sram_len;
- struct platform_device *reset_device;
- void (*reset)(struct platform_device *);
};
extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(struct davinci_soc_info *soc_info);
extern void davinci_init_ide(void);
+void davinci_restart(char mode, const char *cmd);
/* standard place to map on-chip SRAMs; they *may* support DMA */
#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index eaca7d8b9d6..ee3461d7ec1 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -91,6 +91,7 @@ int da8xx_register_cpuidle(void);
void __iomem * __init da8xx_get_mem_ctlr(void);
int da850_register_pm(struct platform_device *pdev);
int __init da850_register_sata(unsigned long refclkpn);
+void da8xx_restart(char mode, const char *cmd);
extern struct platform_device da8xx_serial_device;
extern struct emac_platform_data da8xx_emac_pdata;
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 2a00fe5ac25..a8ee6c9f0bb 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -16,6 +16,7 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <linux/davinci_emac.h>
+#include <media/davinci/vpif_types.h>
#define DM646X_EMAC_BASE (0x01C80000)
#define DM646X_EMAC_MDIO_BASE (DM646X_EMAC_BASE + 0x4000)
@@ -34,58 +35,6 @@ int __init dm646x_init_edma(struct edma_rsv_info *rsv);
void dm646x_video_init(void);
-enum vpif_if_type {
- VPIF_IF_BT656,
- VPIF_IF_BT1120,
- VPIF_IF_RAW_BAYER
-};
-
-struct vpif_interface {
- enum vpif_if_type if_type;
- unsigned hd_pol:1;
- unsigned vd_pol:1;
- unsigned fid_pol:1;
-};
-
-struct vpif_subdev_info {
- const char *name;
- struct i2c_board_info board_info;
- u32 input;
- u32 output;
- unsigned can_route:1;
- struct vpif_interface vpif_if;
-};
-
-struct vpif_display_config {
- int (*set_clock)(int, int);
- struct vpif_subdev_info *subdevinfo;
- int subdev_count;
- const char **output;
- int output_count;
- const char *card_name;
-};
-
-struct vpif_input {
- struct v4l2_input input;
- const char *subdev_name;
-};
-
-#define VPIF_CAPTURE_MAX_CHANNELS 2
-
-struct vpif_capture_chan_config {
- const struct vpif_input *inputs;
- int input_count;
-};
-
-struct vpif_capture_config {
- int (*setup_input_channel_mode)(int);
- int (*setup_input_path)(int, const char *);
- struct vpif_capture_chan_config chan_config[VPIF_CAPTURE_MAX_CHANNELS];
- struct vpif_subdev_info *subdev_info;
- int subdev_count;
- const char *card_name;
-};
-
void dm646x_setup_vpif(struct vpif_display_config *,
struct vpif_capture_config *);
diff --git a/arch/arm/mach-davinci/include/mach/io.h b/arch/arm/mach-davinci/include/mach/io.h
index d1b954955c1..b2267d1e1a7 100644
--- a/arch/arm/mach-davinci/include/mach/io.h
+++ b/arch/arm/mach-davinci/include/mach/io.h
@@ -21,12 +21,4 @@
#define __mem_pci(a) (a)
#define __mem_isa(a) (a)
-#ifndef __ASSEMBLER__
-#define __arch_ioremap davinci_ioremap
-#define __arch_iounmap davinci_iounmap
-
-void __iomem *davinci_ioremap(unsigned long phys, size_t size,
- unsigned int type);
-void davinci_iounmap(volatile void __iomem *addr);
-#endif
#endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index fa59c097223..8bc3fc25617 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -233,7 +233,7 @@
#define PTCMD 0x120
#define PTSTAT 0x128
#define PDSTAT 0x200
-#define PDCTL1 0x304
+#define PDCTL 0x300
#define MDSTAT 0x800
#define MDCTL 0xA00
@@ -244,7 +244,10 @@
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x3f
+#define PDSTAT_STATE_MASK 0x1f
#define MDCTL_FORCE BIT(31)
+#define PDCTL_NEXT BIT(1)
+#define PDCTL_EPCGOOD BIT(8)
#ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-davinci/include/mach/system.h b/arch/arm/mach-davinci/include/mach/system.h
index e65629c2076..fcb7a015aba 100644
--- a/arch/arm/mach-davinci/include/mach/system.h
+++ b/arch/arm/mach-davinci/include/mach/system.h
@@ -18,10 +18,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (davinci_soc_info.reset)
- davinci_soc_info.reset(davinci_soc_info.reset_device);
-}
-
#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h
index 89c1fdc63c0..83e5926f3c4 100644
--- a/arch/arm/mach-davinci/include/mach/tnetv107x.h
+++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h
@@ -54,6 +54,7 @@ extern struct platform_device tnetv107x_serial_device;
extern void __init tnetv107x_init(void);
extern void __init tnetv107x_devices_init(struct tnetv107x_device_info *);
extern void __init tnetv107x_irq_init(void);
+void tnetv107x_restart(char mode, const char *cmd);
#endif
diff --git a/arch/arm/mach-davinci/include/mach/vmalloc.h b/arch/arm/mach-davinci/include/mach/vmalloc.h
deleted file mode 100644
index d49646a8e20..00000000000
--- a/arch/arm/mach-davinci/include/mach/vmalloc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * DaVinci vmalloc definitions
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <mach/hardware.h>
-
-/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
-#define VMALLOC_END (IO_VIRT - (2<<20))
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
deleted file mode 100644
index 8ea60a8b249..00000000000
--- a/arch/arm/mach-davinci/io.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * DaVinci I/O mapping code
- *
- * Copyright (C) 2005-2006 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <asm/tlb.h>
-#include <asm/mach/map.h>
-
-#include <mach/common.h>
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
-{
- struct map_desc *desc = davinci_soc_info.io_desc;
- int desc_num = davinci_soc_info.io_desc_num;
- int i;
-
- for (i = 0; i < desc_num; i++, desc++) {
- unsigned long iophys = __pfn_to_phys(desc->pfn);
- unsigned long iosize = desc->length;
-
- if (p >= iophys && (p + size) <= (iophys + iosize))
- return __io(desc->virtual + p - iophys);
- }
-
- return __arm_ioremap_caller(p, size, type,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(davinci_ioremap);
-
-void davinci_iounmap(volatile void __iomem *addr)
-{
- unsigned long virt = (unsigned long)addr;
-
- if (virt >= VMALLOC_START && virt < VMALLOC_END)
- __iounmap(addr);
-}
-EXPORT_SYMBOL(davinci_iounmap);
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 1fb6bdff38c..d7e210f4b55 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
unsigned int id, bool enable, u32 flags)
{
- u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+ u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
mdctl |= MDCTL_FORCE;
__raw_writel(mdctl, psc_base + MDCTL + 4 * id);
- pdstat = __raw_readl(psc_base + PDSTAT);
- if ((pdstat & 0x00000001) == 0) {
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x1;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_NEXT;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
epcpr = __raw_readl(psc_base + EPCPR);
} while ((((epcpr >> domain) & 1) == 0));
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x100;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_EPCGOOD;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
} else {
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c
index 409bb869c7c..dc1a209b9b6 100644
--- a/arch/arm/mach-davinci/tnetv107x.c
+++ b/arch/arm/mach-davinci/tnetv107x.c
@@ -730,6 +730,11 @@ static void tnetv107x_watchdog_reset(struct platform_device *pdev)
__raw_writel(1, &regs->kick);
}
+void tnetv107x_restart(char mode, const char *cmd)
+{
+ tnetv107x_watchdog_reset(&tnetv107x_wdt_device);
+}
+
static struct davinci_soc_info tnetv107x_soc_info = {
.io_desc = io_desc,
.io_desc_num = ARRAY_SIZE(io_desc),
@@ -752,8 +757,6 @@ static struct davinci_soc_info tnetv107x_soc_info = {
.gpio_num = TNETV107X_N_GPIO,
.timer_info = &timer_info,
.serial_dev = &tnetv107x_serial_device,
- .reset = tnetv107x_watchdog_reset,
- .reset_device = &tnetv107x_wdt_device,
};
void __init tnetv107x_init(void)
diff --git a/arch/arm/mach-dove/addr-map.c b/arch/arm/mach-dove/addr-map.c
index 00be4fc26dd..98b8c83b09a 100644
--- a/arch/arm/mach-dove/addr-map.c
+++ b/arch/arm/mach-dove/addr-map.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
+#include <plat/addr-map.h>
#include "common.h"
/*
@@ -34,98 +35,72 @@
#define ATTR_PCIE_MEM 0xe8
#define ATTR_SCRATCHPAD 0x0
-/*
- * CPU Address Decode Windows registers
- */
-#define WIN_CTRL(n) (BRIDGE_VIRT_BASE + ((n) << 4) + 0x0)
-#define WIN_BASE(n) (BRIDGE_VIRT_BASE + ((n) << 4) + 0x4)
-#define WIN_REMAP_LO(n) (BRIDGE_VIRT_BASE + ((n) << 4) + 0x8)
-#define WIN_REMAP_HI(n) (BRIDGE_VIRT_BASE + ((n) << 4) + 0xc)
-
-struct mbus_dram_target_info dove_mbus_dram_info;
-
static inline void __iomem *ddr_map_sc(int i)
{
return (void __iomem *)(DOVE_MC_VIRT_BASE + 0x100 + ((i) << 4));
}
-static int cpu_win_can_remap(int win)
-{
- if (win < 4)
- return 1;
-
- return 0;
-}
-
-static void __init setup_cpu_win(int win, u32 base, u32 size,
- u8 target, u8 attr, int remap)
-{
- u32 ctrl;
-
- base &= 0xffff0000;
- ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1;
-
- writel(base, WIN_BASE(win));
- writel(ctrl, WIN_CTRL(win));
- if (cpu_win_can_remap(win)) {
- if (remap < 0)
- remap = base;
- writel(remap & 0xffff0000, WIN_REMAP_LO(win));
- writel(0, WIN_REMAP_HI(win));
- }
-}
-
-void __init dove_setup_cpu_mbus(void)
-{
- int i;
- int cs;
+/*
+ * Description of the windows needed by the platform code
+ */
+static struct __initdata orion_addr_map_cfg addr_map_cfg = {
+ .num_wins = 8,
+ .remappable_wins = 4,
+ .bridge_virt_base = BRIDGE_VIRT_BASE,
+};
+static const struct __initdata orion_addr_map_info addr_map_info[] = {
/*
- * First, disable and clear windows.
+ * Windows for PCIe IO+MEM space.
*/
- for (i = 0; i < 8; i++) {
- writel(0, WIN_BASE(i));
- writel(0, WIN_CTRL(i));
- if (cpu_win_can_remap(i)) {
- writel(0, WIN_REMAP_LO(i));
- writel(0, WIN_REMAP_HI(i));
- }
- }
-
+ { 0, DOVE_PCIE0_IO_PHYS_BASE, DOVE_PCIE0_IO_SIZE,
+ TARGET_PCIE0, ATTR_PCIE_IO, DOVE_PCIE0_IO_BUS_BASE
+ },
+ { 1, DOVE_PCIE1_IO_PHYS_BASE, DOVE_PCIE1_IO_SIZE,
+ TARGET_PCIE1, ATTR_PCIE_IO, DOVE_PCIE1_IO_BUS_BASE
+ },
+ { 2, DOVE_PCIE0_MEM_PHYS_BASE, DOVE_PCIE0_MEM_SIZE,
+ TARGET_PCIE0, ATTR_PCIE_MEM, -1
+ },
+ { 3, DOVE_PCIE1_MEM_PHYS_BASE, DOVE_PCIE1_MEM_SIZE,
+ TARGET_PCIE1, ATTR_PCIE_MEM, -1
+ },
/*
- * Setup windows for PCIe IO+MEM space.
+ * Window for CESA engine.
*/
- setup_cpu_win(0, DOVE_PCIE0_IO_PHYS_BASE, DOVE_PCIE0_IO_SIZE,
- TARGET_PCIE0, ATTR_PCIE_IO, DOVE_PCIE0_IO_BUS_BASE);
- setup_cpu_win(1, DOVE_PCIE1_IO_PHYS_BASE, DOVE_PCIE1_IO_SIZE,
- TARGET_PCIE1, ATTR_PCIE_IO, DOVE_PCIE1_IO_BUS_BASE);
- setup_cpu_win(2, DOVE_PCIE0_MEM_PHYS_BASE, DOVE_PCIE0_MEM_SIZE,
- TARGET_PCIE0, ATTR_PCIE_MEM, -1);
- setup_cpu_win(3, DOVE_PCIE1_MEM_PHYS_BASE, DOVE_PCIE1_MEM_SIZE,
- TARGET_PCIE1, ATTR_PCIE_MEM, -1);
-
+ { 4, DOVE_CESA_PHYS_BASE, DOVE_CESA_SIZE,
+ TARGET_CESA, ATTR_CESA, -1
+ },
/*
- * Setup window for CESA engine.
+ * Window to the BootROM for Standby and Sleep Resume
*/
- setup_cpu_win(4, DOVE_CESA_PHYS_BASE, DOVE_CESA_SIZE,
- TARGET_CESA, ATTR_CESA, -1);
-
+ { 5, DOVE_BOOTROM_PHYS_BASE, DOVE_BOOTROM_SIZE,
+ TARGET_BOOTROM, ATTR_BOOTROM, -1
+ },
/*
- * Setup the Window to the BootROM for Standby and Sleep Resume
+ * Window to the PMU Scratch Pad space
*/
- setup_cpu_win(5, DOVE_BOOTROM_PHYS_BASE, DOVE_BOOTROM_SIZE,
- TARGET_BOOTROM, ATTR_BOOTROM, -1);
+ { 6, DOVE_SCRATCHPAD_PHYS_BASE, DOVE_SCRATCHPAD_SIZE,
+ TARGET_SCRATCHPAD, ATTR_SCRATCHPAD, -1
+ },
+ /* End marker */
+ { -1, 0, 0, 0, 0, 0 }
+};
+
+void __init dove_setup_cpu_mbus(void)
+{
+ int i;
+ int cs;
/*
- * Setup the Window to the PMU Scratch Pad space
+ * Disable, clear and configure windows.
*/
- setup_cpu_win(6, DOVE_SCRATCHPAD_PHYS_BASE, DOVE_SCRATCHPAD_SIZE,
- TARGET_SCRATCHPAD, ATTR_SCRATCHPAD, -1);
+ orion_config_wins(&addr_map_cfg, addr_map_info);
/*
* Setup MBUS dram target info.
*/
- dove_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+ orion_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
for (i = 0, cs = 0; i < 2; i++) {
u32 map = readl(ddr_map_sc(i));
@@ -136,7 +111,7 @@ void __init dove_setup_cpu_mbus(void)
if (map & 1) {
struct mbus_dram_window *w;
- w = &dove_mbus_dram_info.cs[cs++];
+ w = &orion_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0; /* CS address decoding done inside */
/* the DDR controller, no need to */
@@ -145,5 +120,5 @@ void __init dove_setup_cpu_mbus(void)
w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
}
}
- dove_mbus_dram_info.num_cs = cs;
+ orion_mbus_dram_info.num_cs = cs;
}
diff --git a/arch/arm/mach-dove/cm-a510.c b/arch/arm/mach-dove/cm-a510.c
index c8a406f7e94..792b4e2e24f 100644
--- a/arch/arm/mach-dove/cm-a510.c
+++ b/arch/arm/mach-dove/cm-a510.c
@@ -93,4 +93,5 @@ MACHINE_START(CM_A510, "Compulab CM-A510 Board")
.init_early = dove_init_early,
.init_irq = dove_init_irq,
.timer = &dove_timer,
+ .restart = dove_restart,
MACHINE_END
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index a9e0dae86a2..dd1429ae640 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/clk.h>
-#include <linux/mbus.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
#include <asm/page.h>
@@ -30,6 +29,7 @@
#include <linux/irq.h>
#include <plat/time.h>
#include <plat/common.h>
+#include <plat/addr-map.h>
#include "common.h"
static int get_tclk(void);
@@ -71,8 +71,7 @@ void __init dove_map_io(void)
****************************************************************************/
void __init dove_ehci0_init(void)
{
- orion_ehci_init(&dove_mbus_dram_info,
- DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0);
+ orion_ehci_init(DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0);
}
/*****************************************************************************
@@ -80,8 +79,7 @@ void __init dove_ehci0_init(void)
****************************************************************************/
void __init dove_ehci1_init(void)
{
- orion_ehci_1_init(&dove_mbus_dram_info,
- DOVE_USB1_PHYS_BASE, IRQ_DOVE_USB1);
+ orion_ehci_1_init(DOVE_USB1_PHYS_BASE, IRQ_DOVE_USB1);
}
/*****************************************************************************
@@ -89,7 +87,7 @@ void __init dove_ehci1_init(void)
****************************************************************************/
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
- orion_ge00_init(eth_data, &dove_mbus_dram_info,
+ orion_ge00_init(eth_data,
DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
0, get_tclk());
}
@@ -107,8 +105,7 @@ void __init dove_rtc_init(void)
****************************************************************************/
void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
{
- orion_sata_init(sata_data, &dove_mbus_dram_info,
- DOVE_SATA_PHYS_BASE, IRQ_DOVE_SATA);
+ orion_sata_init(sata_data, DOVE_SATA_PHYS_BASE, IRQ_DOVE_SATA);
}
@@ -198,8 +195,7 @@ struct sys_timer dove_timer = {
****************************************************************************/
void __init dove_xor0_init(void)
{
- orion_xor0_init(&dove_mbus_dram_info,
- DOVE_XOR0_PHYS_BASE, DOVE_XOR0_HIGH_PHYS_BASE,
+ orion_xor0_init(DOVE_XOR0_PHYS_BASE, DOVE_XOR0_HIGH_PHYS_BASE,
IRQ_DOVE_XOR_00, IRQ_DOVE_XOR_01);
}
@@ -292,3 +288,19 @@ void __init dove_init(void)
dove_xor0_init();
dove_xor1_init();
}
+
+void dove_restart(char mode, const char *cmd)
+{
+ /*
+ * Enable soft reset to assert RSTOUTn.
+ */
+ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+ /*
+ * Assert soft reset.
+ */
+ writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+ while (1)
+ ;
+}
diff --git a/arch/arm/mach-dove/common.h b/arch/arm/mach-dove/common.h
index 6a2046e4470..6432a3ba864 100644
--- a/arch/arm/mach-dove/common.h
+++ b/arch/arm/mach-dove/common.h
@@ -15,7 +15,6 @@ struct mv643xx_eth_platform_data;
struct mv_sata_platform_data;
extern struct sys_timer dove_timer;
-extern struct mbus_dram_target_info dove_mbus_dram_info;
/*
* Basic Dove init functions used early by machine-setup.
@@ -39,5 +38,6 @@ void dove_spi1_init(void);
void dove_i2c_init(void);
void dove_sdio0_init(void);
void dove_sdio1_init(void);
+void dove_restart(char, const char *);
#endif
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index 11ea34e4fc7..ea77ae430b2 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -100,4 +100,5 @@ MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board")
.init_early = dove_init_early,
.init_irq = dove_init_irq,
.timer = &dove_timer,
+ .restart = dove_restart,
MACHINE_END
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
index b20ec9af788..ad1165d488c 100644
--- a/arch/arm/mach-dove/include/mach/dove.h
+++ b/arch/arm/mach-dove/include/mach/dove.h
@@ -11,8 +11,6 @@
#ifndef __ASM_ARCH_DOVE_H
#define __ASM_ARCH_DOVE_H
-#include <mach/vmalloc.h>
-
/*
* Marvell Dove address maps.
*
diff --git a/arch/arm/mach-dove/include/mach/system.h b/arch/arm/mach-dove/include/mach/system.h
index 356afda5685..3027954f616 100644
--- a/arch/arm/mach-dove/include/mach/system.h
+++ b/arch/arm/mach-dove/include/mach/system.h
@@ -9,28 +9,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/bridge-regs.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Enable soft reset to assert RSTOUTn.
- */
- writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
- /*
- * Assert soft reset.
- */
- writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
- while (1)
- ;
-}
-
-
#endif
diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h
deleted file mode 100644
index a28792cf761..00000000000
--- a/arch/arm/mach-dove/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfd800000UL
diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
index aa2b3a09a51..6c11a4df717 100644
--- a/arch/arm/mach-dove/pcie.c
+++ b/arch/arm/mach-dove/pcie.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/mbus.h>
#include <video/vga.h>
#include <asm/mach/pci.h>
#include <asm/mach/arch.h>
@@ -19,6 +18,7 @@
#include <plat/pcie.h>
#include <mach/irqs.h>
#include <mach/bridge-regs.h>
+#include <plat/addr-map.h>
#include "common.h"
struct pcie_port {
@@ -50,7 +50,7 @@ static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys)
*/
orion_pcie_set_local_bus_nr(pp->base, sys->busnr);
- orion_pcie_setup(pp->base, &dove_mbus_dram_info);
+ orion_pcie_setup(pp->base);
/*
* IORESOURCE_IO
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index d0ce8abdd4b..294aad07f7a 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -278,13 +278,19 @@ static int __init ebsa110_init(void)
arch_initcall(ebsa110_init);
+static void ebsa110_restart(char mode, const char *cmd)
+{
+ soft_restart(0x80000000);
+}
+
MACHINE_START(EBSA110, "EBSA110")
/* Maintainer: Russell King */
.atag_offset = 0x400,
.reserve_lp0 = 1,
.reserve_lp2 = 1,
- .soft_reboot = 1,
+ .restart_mode = 's',
.map_io = ebsa110_map_io,
.init_irq = ebsa110_init_irq,
.timer = &ebsa110_timer,
+ .restart = ebsa110_restart,
MACHINE_END
diff --git a/arch/arm/mach-ebsa110/include/mach/system.h b/arch/arm/mach-ebsa110/include/mach/system.h
index 9a26245bf1f..2e4af65edb6 100644
--- a/arch/arm/mach-ebsa110/include/mach/system.h
+++ b/arch/arm/mach-ebsa110/include/mach/system.h
@@ -34,6 +34,4 @@ static inline void arch_idle(void)
asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
}
-#define arch_reset(mode, cmd) cpu_reset(0x80000000)
-
#endif
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
deleted file mode 100644
index ea141b7a3e0..00000000000
--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-ebsa110/include/mach/vmalloc.h
- *
- * Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END 0xdf000000UL
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 0713448206a..681e939407d 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -16,6 +16,7 @@
#include <mach/hardware.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -36,6 +37,8 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = adssphere_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 2432a6b7dca..24203f9a679 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -906,3 +906,15 @@ void __init ep93xx_init_devices(void)
platform_device_register(&ep93xx_ohci_device);
platform_device_register(&ep93xx_leds);
}
+
+void ep93xx_restart(char mode, const char *cmd)
+{
+ /*
+ * Set then clear the SWRST bit to initiate a software reset
+ */
+ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
+ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
+
+ while (1)
+ ;
+}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 70ef8c527d2..d115653edca 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -39,6 +39,7 @@
#include <mach/ep93xx_spi.h>
#include <mach/gpio-ep93xx.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -250,8 +251,10 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -261,8 +264,10 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -272,8 +277,10 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -283,8 +290,10 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -294,8 +303,10 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -305,8 +316,10 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -316,8 +329,10 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -327,7 +342,9 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 45ee205856f..af46970dc58 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -16,6 +16,7 @@
#include <mach/hardware.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -36,6 +37,8 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = gesbc9312_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/entry-macro.S b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
index 96b85e2c2c0..9be6edcf904 100644
--- a/arch/arm/mach-ep93xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-ep93xx/include/mach/entry-macro.S
@@ -9,51 +9,9 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
-#include <mach/ep93xx-regs.h>
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \base, =(EP93XX_AHB_VIRT_BASE)
- orr \base, \base, #0x000b0000
- mov \irqnr, #0
- ldr \irqstat, [\base] @ lower 32 interrupts
- cmp \irqstat, #0
- bne 1001f
-
- eor \base, \base, #0x00070000
- ldr \irqstat, [\base] @ upper 32 interrupts
- cmp \irqstat, #0
- beq 1002f
- mov \irqnr, #0x20
-
-1001:
- movs \tmp, \irqstat, lsl #16
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #16
-
- movs \tmp, \irqstat, lsl #8
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #8
-
- movs \tmp, \irqstat, lsl #4
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #4
-
- movs \tmp, \irqstat, lsl #2
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #2
-
- movs \tmp, \irqstat, lsl #1
- addeq \irqnr, \irqnr, #1
- orrs \base, \base, #1
-
-1002:
- .endm
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 50660455b1d..d4c934931f9 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -66,4 +66,6 @@ void ep93xx_register_ac97(void);
void ep93xx_init_devices(void);
extern struct sys_timer ep93xx_timer;
+void ep93xx_restart(char, const char *);
+
#endif
diff --git a/arch/arm/mach-ep93xx/include/mach/system.h b/arch/arm/mach-ep93xx/include/mach/system.h
index 6d661fe9d66..b5bec7cb9b5 100644
--- a/arch/arm/mach-ep93xx/include/mach/system.h
+++ b/arch/arm/mach-ep93xx/include/mach/system.h
@@ -1,24 +1,7 @@
/*
* arch/arm/mach-ep93xx/include/mach/system.h
*/
-
-#include <mach/hardware.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- local_irq_disable();
-
- /*
- * Set then clear the SWRST bit to initiate a software reset
- */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_SWRST);
- ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_SWRST);
-
- while (1)
- ;
-}
diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h
deleted file mode 100644
index 1b3f25d03d3..00000000000
--- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe800000UL
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index e72f7368876..7b98084f0c9 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -18,6 +18,7 @@
#include <mach/hardware.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -80,8 +81,10 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -91,8 +94,10 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -102,8 +107,10 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
@@ -113,7 +120,9 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index 52e090dc9d2..f4e553eca21 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -25,6 +25,7 @@
#include <mach/fb.h>
#include <mach/gpio-ep93xx.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -80,6 +81,8 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = simone_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 8121e3aedc0..fd846331ddf 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -31,6 +31,7 @@
#include <mach/fb.h>
#include <mach/gpio-ep93xx.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -177,6 +178,8 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = snappercl15_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 8b2f1435bca..79f8ecf07a1 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -23,6 +23,7 @@
#include <mach/hardware.h>
#include <mach/ts72xx.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
@@ -247,6 +248,8 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
.atag_offset = 0x100,
.map_io = ts72xx_map_io,
.init_irq = ep93xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = ts72xx_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index d96e4dbec6a..03dd4012043 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -361,4 +361,5 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
.init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer,
.init_machine = vision_init_machine,
+ .restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 724ec0f3560..5d602f68a0e 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -17,6 +17,8 @@ choice
config ARCH_EXYNOS4
bool "SAMSUNG EXYNOS4"
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
help
Samsung EXYNOS4 SoCs based systems
@@ -57,6 +59,11 @@ config EXYNOS4_MCT
help
Use MCT (Multi Core Timer) as kernel timers
+config EXYNOS4_DEV_DMA
+ bool
+ help
+ Compile in amba device definitions for DMA controller
+
config EXYNOS4_DEV_AHCI
bool
help
@@ -82,6 +89,11 @@ config EXYNOS4_DEV_DWMCI
help
Compile in platform device definitions for DWMCI
+config EXYNOS4_DEV_USB_OHCI
+ bool
+ help
+ Compile in platform device definition for USB OHCI
+
config EXYNOS4_SETUP_I2C1
bool
help
@@ -143,6 +155,11 @@ config EXYNOS4_SETUP_USB_PHY
help
Common setup code for USB PHY controller
+config EXYNOS4_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations.
+
# machine support
if ARCH_EXYNOS4
@@ -177,8 +194,10 @@ config MACH_SMDKV310
select SAMSUNG_DEV_BACKLIGHT
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
+ select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select SAMSUNG_DEV_PWM
+ select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
@@ -197,6 +216,7 @@ config MACH_ARMLEX4210
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select EXYNOS4_DEV_AHCI
+ select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_SDHCI
help
@@ -222,6 +242,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_MFC
select S5P_DEV_ONENAND
select S5P_DEV_TV
+ select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
@@ -255,6 +276,7 @@ config MACH_NURI
select S5P_DEV_MFC
select S5P_DEV_USB_EHCI
select S5P_SETUP_MIPIPHY
+ select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select EXYNOS4_SETUP_FIMC
select EXYNOS4_SETUP_FIMD0
@@ -287,7 +309,9 @@ config MACH_ORIGEN
select S5P_DEV_USB_EHCI
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
+ select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
+ select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_SDHCI
select EXYNOS4_SETUP_USB_PHY
@@ -327,6 +351,20 @@ config MACH_SMDK4412
Machine support for Samsung SMDK4412
endif
+comment "Flattened Device Tree based board for Exynos4 based SoC"
+
+config MACH_EXYNOS4_DT
+ bool "Samsung Exynos4 Machine using device tree"
+ select CPU_EXYNOS4210
+ select USE_OF
+ select ARM_AMBA
+ select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
+ help
+ Machine support for Samsung Exynos4 machine with device tree enabled.
+ Select this if a fdt blob is available for the Exynos4 SoC based board.
+ Note: This is under development and not all peripherals can be supported
+ with this machine file.
+
if ARCH_EXYNOS4
comment "Configuration for HSMMC 8-bit bus width"
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 59069a35e40..5fc202cdfdb 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -10,15 +10,17 @@ obj-m :=
obj-n :=
obj- :=
-# Core support for EXYNOS4 system
+# Core
-obj-$(CONFIG_ARCH_EXYNOS4) += cpu.o init.o clock.o irq-combiner.o setup-i2c0.o
-obj-$(CONFIG_ARCH_EXYNOS4) += irq-eint.o dma.o pmu.o
+obj-$(CONFIG_ARCH_EXYNOS4) += common.o clock.o
obj-$(CONFIG_CPU_EXYNOS4210) += clock-exynos4210.o
obj-$(CONFIG_SOC_EXYNOS4212) += clock-exynos4212.o
+
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o
+
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_EXYNOS4_MCT) += mct.o
@@ -37,6 +39,8 @@ obj-$(CONFIG_MACH_ORIGEN) += mach-origen.o
obj-$(CONFIG_MACH_SMDK4212) += mach-smdk4x12.o
obj-$(CONFIG_MACH_SMDK4412) += mach-smdk4x12.o
+obj-$(CONFIG_MACH_EXYNOS4_DT) += mach-exynos4-dt.o
+
# device support
obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
@@ -44,7 +48,10 @@ obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o
obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
+obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o
+obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
+obj-$(CONFIG_ARCH_EXYNOS4) += setup-i2c0.o
obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o
obj-$(CONFIG_EXYNOS4_SETUP_FIMD0) += setup-fimd0.o
obj-$(CONFIG_EXYNOS4_SETUP_I2C1) += setup-i2c1.o
@@ -55,6 +62,6 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C5) += setup-i2c5.o
obj-$(CONFIG_EXYNOS4_SETUP_I2C6) += setup-i2c6.o
obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
-obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
+obj-$(CONFIG_EXYNOS4_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index b9d5ef670eb..a5823a7f249 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -23,7 +23,6 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
#include <plat/pm.h>
#include <mach/hardware.h>
@@ -31,6 +30,8 @@
#include <mach/regs-clock.h>
#include <mach/exynos4-clock.h>
+#include "common.h"
+
static struct sleep_save exynos4210_clock_save[] = {
SAVE_ITEM(S5P_CLKSRC_IMAGE),
SAVE_ITEM(S5P_CLKSRC_LCD1),
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 77d5decb34f..26a668b0d10 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -23,7 +23,6 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
#include <plat/pm.h>
#include <mach/hardware.h>
@@ -31,6 +30,8 @@
#include <mach/regs-clock.h>
#include <mach/exynos4-clock.h>
+#include "common.h"
+
static struct sleep_save exynos4212_clock_save[] = {
SAVE_ITEM(S5P_CLKSRC_IMAGE),
SAVE_ITEM(S5P_CLKDIV_IMAGE),
diff --git a/arch/arm/mach-exynos/clock.c b/arch/arm/mach-exynos/clock.c
index 2894f0adef5..5a8c42e9000 100644
--- a/arch/arm/mach-exynos/clock.c
+++ b/arch/arm/mach-exynos/clock.c
@@ -21,7 +21,6 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/exynos4.h>
#include <plat/pm.h>
#include <mach/map.h>
@@ -29,6 +28,8 @@
#include <mach/sysmmu.h>
#include <mach/exynos4-clock.h>
+#include "common.h"
+
static struct sleep_save exynos4_clock_save[] = {
SAVE_ITEM(S5P_CLKDIV_LEFTBUS),
SAVE_ITEM(S5P_CLKGATE_IP_LEFTBUS),
@@ -553,16 +554,6 @@ static struct clk init_clocks_off[] = {
.enable = exynos4_clk_dac_ctrl,
.ctrlbit = (1 << 0),
}, {
- .name = "dma",
- .devname = "dma-pl330.0",
- .enable = exynos4_clk_ip_fsys_ctrl,
- .ctrlbit = (1 << 0),
- }, {
- .name = "dma",
- .devname = "dma-pl330.1",
- .enable = exynos4_clk_ip_fsys_ctrl,
- .ctrlbit = (1 << 1),
- }, {
.name = "adc",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 15),
@@ -778,6 +769,20 @@ static struct clk init_clocks[] = {
}
};
+static struct clk clk_pdma0 = {
+ .name = "dma",
+ .devname = "dma-pl330.0",
+ .enable = exynos4_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 0),
+};
+
+static struct clk clk_pdma1 = {
+ .name = "dma",
+ .devname = "dma-pl330.1",
+ .enable = exynos4_clk_ip_fsys_ctrl,
+ .ctrlbit = (1 << 1),
+};
+
struct clk *clkset_group_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_xusbxti,
@@ -1009,46 +1014,6 @@ static struct clksrc_clk clk_dout_mmc4 = {
static struct clksrc_clk clksrcs[] = {
{
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.0",
- .enable = exynos4_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 0),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.1",
- .enable = exynos4_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 4),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.2",
- .enable = exynos4_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 8),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 8, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.3",
- .enable = exynos4_clksrc_mask_peril0_ctrl,
- .ctrlbit = (1 << 12),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 12, .size = 4 },
- }, {
.clk = {
.name = "sclk_pwm",
.enable = exynos4_clksrc_mask_peril0_ctrl,
@@ -1147,36 +1112,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_LCD0, .shift = 0, .size = 4 },
}, {
.clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
- .enable = exynos4_clksrc_mask_peril1_ctrl,
- .ctrlbit = (1 << 16),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 16, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
- .enable = exynos4_clksrc_mask_peril1_ctrl,
- .ctrlbit = (1 << 20),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 20, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.2",
- .enable = exynos4_clksrc_mask_peril1_ctrl,
- .ctrlbit = (1 << 24),
- },
- .sources = &clkset_group,
- .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 24, .size = 4 },
- .reg_div = { .reg = S5P_CLKDIV_PERIL2, .shift = 0, .size = 4 },
- }, {
- .clk = {
.name = "sclk_fimg2d",
},
.sources = &clkset_mout_g2d,
@@ -1192,42 +1127,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_MFC, .shift = 0, .size = 4 },
}, {
.clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.0",
- .parent = &clk_dout_mmc0.clk,
- .enable = exynos4_clksrc_mask_fsys_ctrl,
- .ctrlbit = (1 << 0),
- },
- .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 8, .size = 8 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_dout_mmc1.clk,
- .enable = exynos4_clksrc_mask_fsys_ctrl,
- .ctrlbit = (1 << 4),
- },
- .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 24, .size = 8 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.2",
- .parent = &clk_dout_mmc2.clk,
- .enable = exynos4_clksrc_mask_fsys_ctrl,
- .ctrlbit = (1 << 8),
- },
- .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 8, .size = 8 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.3",
- .parent = &clk_dout_mmc3.clk,
- .enable = exynos4_clksrc_mask_fsys_ctrl,
- .ctrlbit = (1 << 12),
- },
- .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 },
- }, {
- .clk = {
.name = "sclk_dwmmc",
.parent = &clk_dout_mmc4.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
@@ -1237,6 +1136,134 @@ static struct clksrc_clk clksrcs[] = {
}
};
+static struct clksrc_clk clk_sclk_uart0 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "exynos4210-uart.0",
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart1 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "exynos4210-uart.1",
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 4),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart2 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "exynos4210-uart.2",
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart3 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "exynos4210-uart.3",
+ .enable = exynos4_clksrc_mask_peril0_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL0, .shift = 12, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.0",
+ .parent = &clk_dout_mmc0.clk,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 8, .size = 8 },
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.1",
+ .parent = &clk_dout_mmc1.clk,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 4),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS1, .shift = 24, .size = 8 },
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.2",
+ .parent = &clk_dout_mmc2.clk,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 8, .size = 8 },
+};
+
+static struct clksrc_clk clk_sclk_mmc3 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.3",
+ .parent = &clk_dout_mmc3.clk,
+ .enable = exynos4_clksrc_mask_fsys_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 },
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.0",
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.1",
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 20),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL1, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi2 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.2",
+ .enable = exynos4_clksrc_mask_peril1_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &clkset_group,
+ .reg_src = { .reg = S5P_CLKSRC_PERIL1, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLKDIV_PERIL2, .shift = 0, .size = 4 },
+};
+
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
@@ -1271,6 +1298,42 @@ static struct clksrc_clk *sysclks[] = {
&clk_mout_mfc1,
};
+static struct clk *clk_cdev[] = {
+ &clk_pdma0,
+ &clk_pdma1,
+};
+
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uart0,
+ &clk_sclk_uart1,
+ &clk_sclk_uart2,
+ &clk_sclk_uart3,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2,
+ &clk_sclk_mmc3,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+ &clk_sclk_spi2,
+
+};
+
+static struct clk_lookup exynos4_clk_lookup[] = {
+ CLKDEV_INIT("exynos4210-uart.0", "clk_uart_baud0", &clk_sclk_uart0.clk),
+ CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &clk_sclk_uart1.clk),
+ CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &clk_sclk_uart2.clk),
+ CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &clk_sclk_uart3.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk),
+ CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
+ CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk0", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk0", &clk_sclk_spi2.clk),
+};
+
static int xtal_rate;
static unsigned long exynos4_fout_apll_get_rate(struct clk *clk)
@@ -1478,11 +1541,19 @@ void __init exynos4_register_clocks(void)
for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++)
s3c_register_clksrc(sclk_tv[ptr], 1);
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
+ s3c_register_clksrc(clksrc_cdev[ptr], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
+ s3c_disable_clocks(clk_cdev[ptr], 1);
+
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(exynos4_clk_lookup, ARRAY_SIZE(exynos4_clk_lookup));
register_syscore_ops(&exynos4_clock_syscore_ops);
s3c24xx_register_clock(&dummy_apb_pclk);
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
new file mode 100644
index 00000000000..c59e1887100
--- /dev/null
+++ b/arch/arm/mach-exynos/common.c
@@ -0,0 +1,699 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Codes for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/serial_core.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <asm/proc-fns.h>
+#include <asm/exception.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-pmu.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/sdhci.h>
+#include <plat/gpio-cfg.h>
+#include <plat/adc-core.h>
+#include <plat/fb-core.h>
+#include <plat/fimc-core.h>
+#include <plat/iic-core.h>
+#include <plat/tv-core.h>
+#include <plat/regs-serial.h>
+
+#include "common.h"
+
+static const char name_exynos4210[] = "EXYNOS4210";
+static const char name_exynos4212[] = "EXYNOS4212";
+static const char name_exynos4412[] = "EXYNOS4412";
+
+static struct cpu_table cpu_ids[] __initdata = {
+ {
+ .idcode = EXYNOS4210_CPU_ID,
+ .idmask = EXYNOS4_CPU_MASK,
+ .map_io = exynos4_map_io,
+ .init_clocks = exynos4_init_clocks,
+ .init_uarts = exynos4_init_uarts,
+ .init = exynos_init,
+ .name = name_exynos4210,
+ }, {
+ .idcode = EXYNOS4212_CPU_ID,
+ .idmask = EXYNOS4_CPU_MASK,
+ .map_io = exynos4_map_io,
+ .init_clocks = exynos4_init_clocks,
+ .init_uarts = exynos4_init_uarts,
+ .init = exynos_init,
+ .name = name_exynos4212,
+ }, {
+ .idcode = EXYNOS4412_CPU_ID,
+ .idmask = EXYNOS4_CPU_MASK,
+ .map_io = exynos4_map_io,
+ .init_clocks = exynos4_init_clocks,
+ .init_uarts = exynos4_init_uarts,
+ .init = exynos_init,
+ .name = name_exynos4412,
+ },
+};
+
+/* Initial IO mappings */
+
+static struct map_desc exynos_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_CHIPID,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_TIMER,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_WATCHDOG,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SROMC,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SYSTIMER,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_PMU,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GIC_CPU,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GIC_DIST,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_UART,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
+ .length = SZ_512K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct map_desc exynos4_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_CMU,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
+ .length = SZ_128K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
+ .length = SZ_8K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_L2CC,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GPIO1,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GPIO2,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GPIO3,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
+ .length = SZ_256,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_DMC0,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_USB_HSPHY,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct map_desc exynos4_iodesc0[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_SYSRAM,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct map_desc exynos4_iodesc1[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_SYSRAM,
+ .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void exynos_idle(void)
+{
+ if (!need_resched())
+ cpu_do_idle();
+
+ local_irq_enable();
+}
+
+void exynos4_restart(char mode, const char *cmd)
+{
+ __raw_writel(0x1, S5P_SWRESET);
+}
+
+/*
+ * exynos_map_io
+ *
+ * register the standard cpu IO areas
+ */
+
+void __init exynos_init_io(struct map_desc *mach_desc, int size)
+{
+ /* initialize the io descriptors we need for initialization */
+ iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
+ if (mach_desc)
+ iotable_init(mach_desc, size);
+
+ /* detect cpu id and rev. */
+ s5p_init_cpu(S5P_VA_CHIPID);
+
+ s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init exynos4_map_io(void)
+{
+ iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
+
+ if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
+ iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
+ else
+ iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
+
+ /* initialize device information early */
+ exynos4_default_sdhci0();
+ exynos4_default_sdhci1();
+ exynos4_default_sdhci2();
+ exynos4_default_sdhci3();
+
+ s3c_adc_setname("samsung-adc-v3");
+
+ s3c_fimc_setname(0, "exynos4-fimc");
+ s3c_fimc_setname(1, "exynos4-fimc");
+ s3c_fimc_setname(2, "exynos4-fimc");
+ s3c_fimc_setname(3, "exynos4-fimc");
+
+ /* The I2C bus controllers are directly compatible with s3c2440 */
+ s3c_i2c0_setname("s3c2440-i2c");
+ s3c_i2c1_setname("s3c2440-i2c");
+ s3c_i2c2_setname("s3c2440-i2c");
+
+ s5p_fb_setname(0, "exynos4-fb");
+ s5p_hdmi_setname("exynos4-hdmi");
+}
+
+void __init exynos4_init_clocks(int xtal)
+{
+ printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+ s3c24xx_register_baseclocks(xtal);
+ s5p_register_clocks(xtal);
+
+ if (soc_is_exynos4210())
+ exynos4210_register_clocks();
+ else if (soc_is_exynos4212() || soc_is_exynos4412())
+ exynos4212_register_clocks();
+
+ exynos4_register_clocks();
+ exynos4_setup_clocks();
+}
+
+#define COMBINER_ENABLE_SET 0x0
+#define COMBINER_ENABLE_CLEAR 0x4
+#define COMBINER_INT_STATUS 0xC
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+ unsigned int irq_offset;
+ unsigned int irq_mask;
+ void __iomem *base;
+};
+
+static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+ struct combiner_chip_data *combiner_data =
+ irq_data_get_irq_chip_data(data);
+
+ return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->irq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->irq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, combiner_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ spin_lock(&irq_controller_lock);
+ status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ spin_unlock(&irq_controller_lock);
+ status &= chip_data->irq_mask;
+
+ if (status == 0)
+ goto out;
+
+ combiner_irq = __ffs(status);
+
+ cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
+ if (unlikely(cascade_irq >= NR_IRQS))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+};
+
+static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+{
+ if (combiner_nr >= MAX_COMBINER_NR)
+ BUG();
+ if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+}
+
+static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
+ unsigned int irq_start)
+{
+ unsigned int i;
+
+ if (combiner_nr >= MAX_COMBINER_NR)
+ BUG();
+
+ combiner_data[combiner_nr].base = base;
+ combiner_data[combiner_nr].irq_offset = irq_start;
+ combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+
+ /* Disable all interrupts */
+
+ __raw_writel(combiner_data[combiner_nr].irq_mask,
+ base + COMBINER_ENABLE_CLEAR);
+
+ /* Setup the Linux IRQ subsystem */
+
+ for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
+ + MAX_IRQ_IN_COMBINER; i++) {
+ irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(i, &combiner_data[combiner_nr]);
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ }
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos4_dt_irq_match[] = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+ {},
+};
+#endif
+
+void __init exynos4_init_irq(void)
+{
+ int irq;
+ unsigned int gic_bank_offset;
+
+ gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
+
+ if (!of_have_populated_dt())
+ gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset);
+#ifdef CONFIG_OF
+ else
+ of_irq_init(exynos4_dt_irq_match);
+#endif
+
+ for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
+
+ combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
+ COMBINER_IRQ(irq, 0));
+ combiner_cascade_irq(irq, IRQ_SPI(irq));
+ }
+
+ /*
+ * The parameters of s5p_init_irq() are for VIC init.
+ * Theses parameters should be NULL and 0 because EXYNOS4
+ * uses GIC instead of VIC.
+ */
+ s5p_init_irq(NULL, 0);
+}
+
+struct bus_type exynos4_subsys = {
+ .name = "exynos4-core",
+ .dev_name = "exynos4-core",
+};
+
+static struct device exynos4_dev = {
+ .bus = &exynos4_subsys,
+};
+
+static int __init exynos4_core_init(void)
+{
+ return subsys_system_register(&exynos4_subsys, NULL);
+}
+core_initcall(exynos4_core_init);
+
+#ifdef CONFIG_CACHE_L2X0
+static int __init exynos4_l2x0_cache_init(void)
+{
+ /* TAG, Data Latency Control: 2cycle */
+ __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
+
+ if (soc_is_exynos4210())
+ __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
+ else if (soc_is_exynos4212() || soc_is_exynos4412())
+ __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
+
+ /* L2X0 Prefetch Control */
+ __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
+
+ /* L2X0 Power Control */
+ __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
+ S5P_VA_L2CC + L2X0_POWER_CTRL);
+
+ l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
+
+ return 0;
+}
+
+early_initcall(exynos4_l2x0_cache_init);
+#endif
+
+int __init exynos_init(void)
+{
+ printk(KERN_INFO "EXYNOS: Initializing architecture\n");
+
+ /* set idle function */
+ pm_idle = exynos_idle;
+
+ return device_register(&exynos4_dev);
+}
+
+/* uart registration process */
+
+void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ struct s3c2410_uartcfg *tcfg = cfg;
+ u32 ucnt;
+
+ for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
+ tcfg->has_fracval = 1;
+
+ s3c24xx_init_uartdevs("exynos4210-uart", s5p_uart_resources, cfg, no);
+}
+
+static DEFINE_SPINLOCK(eint_lock);
+
+static unsigned int eint0_15_data[16];
+
+static unsigned int exynos4_get_irq_nr(unsigned int number)
+{
+ u32 ret = 0;
+
+ switch (number) {
+ case 0 ... 3:
+ ret = (number + IRQ_EINT0);
+ break;
+ case 4 ... 7:
+ ret = (number + (IRQ_EINT4 - 4));
+ break;
+ case 8 ... 15:
+ ret = (number + (IRQ_EINT8 - 8));
+ break;
+ default:
+ printk(KERN_ERR "number available : %d\n", number);
+ }
+
+ return ret;
+}
+
+static inline void exynos4_irq_eint_mask(struct irq_data *data)
+{
+ u32 mask;
+
+ spin_lock(&eint_lock);
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+ mask |= eint_irq_to_bit(data->irq);
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+ spin_unlock(&eint_lock);
+}
+
+static void exynos4_irq_eint_unmask(struct irq_data *data)
+{
+ u32 mask;
+
+ spin_lock(&eint_lock);
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+ mask &= ~(eint_irq_to_bit(data->irq));
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
+ spin_unlock(&eint_lock);
+}
+
+static inline void exynos4_irq_eint_ack(struct irq_data *data)
+{
+ __raw_writel(eint_irq_to_bit(data->irq),
+ S5P_EINT_PEND(EINT_REG_NR(data->irq)));
+}
+
+static void exynos4_irq_eint_maskack(struct irq_data *data)
+{
+ exynos4_irq_eint_mask(data);
+ exynos4_irq_eint_ack(data);
+}
+
+static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+ int offs = EINT_OFFSET(data->irq);
+ int shift;
+ u32 ctrl, mask;
+ u32 newvalue = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S5P_IRQ_TYPE_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
+ break;
+
+ default:
+ printk(KERN_ERR "No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ shift = (offs & 0x7) * 4;
+ mask = 0x7 << shift;
+
+ spin_lock(&eint_lock);
+ ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
+ ctrl &= ~mask;
+ ctrl |= newvalue << shift;
+ __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
+ spin_unlock(&eint_lock);
+
+ switch (offs) {
+ case 0 ... 7:
+ s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
+ break;
+ case 8 ... 15:
+ s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
+ break;
+ case 16 ... 23:
+ s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
+ break;
+ case 24 ... 31:
+ s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
+ break;
+ default:
+ printk(KERN_ERR "No such irq number %d", offs);
+ }
+
+ return 0;
+}
+
+static struct irq_chip exynos4_irq_eint = {
+ .name = "exynos4-eint",
+ .irq_mask = exynos4_irq_eint_mask,
+ .irq_unmask = exynos4_irq_eint_unmask,
+ .irq_mask_ack = exynos4_irq_eint_maskack,
+ .irq_ack = exynos4_irq_eint_ack,
+ .irq_set_type = exynos4_irq_eint_set_type,
+#ifdef CONFIG_PM
+ .irq_set_wake = s3c_irqext_wake,
+#endif
+};
+
+/*
+ * exynos4_irq_demux_eint
+ *
+ * This function demuxes the IRQ from from EINTs 16 to 31.
+ * It is designed to be inlined into the specific handler
+ * s5p_irq_demux_eintX_Y.
+ *
+ * Each EINT pend/mask registers handle eight of them.
+ */
+static inline void exynos4_irq_demux_eint(unsigned int start)
+{
+ unsigned int irq;
+
+ u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
+ u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
+
+ status &= ~mask;
+ status &= 0xff;
+
+ while (status) {
+ irq = fls(status) - 1;
+ generic_handle_irq(irq + start);
+ status &= ~(1 << irq);
+ }
+}
+
+static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
+ exynos4_irq_demux_eint(IRQ_EINT(16));
+ exynos4_irq_demux_eint(IRQ_EINT(24));
+ chained_irq_exit(chip, desc);
+}
+
+static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+{
+ u32 *irq_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+
+ generic_handle_irq(*irq_data);
+
+ chip->irq_unmask(&desc->irq_data);
+ chained_irq_exit(chip, desc);
+}
+
+int __init exynos4_init_irq_eint(void)
+{
+ int irq;
+
+ for (irq = 0 ; irq <= 31 ; irq++) {
+ irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
+ handle_level_irq);
+ set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
+ }
+
+ irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
+
+ for (irq = 0 ; irq <= 15 ; irq++) {
+ eint0_15_data[irq] = IRQ_EINT(irq);
+
+ irq_set_handler_data(exynos4_get_irq_nr(irq),
+ &eint0_15_data[irq]);
+ irq_set_chained_handler(exynos4_get_irq_nr(irq),
+ exynos4_irq_eint0_15);
+ }
+
+ return 0;
+}
+arch_initcall(exynos4_init_irq_eint);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
new file mode 100644
index 00000000000..1ac49de0f39
--- /dev/null
+++ b/arch/arm/mach-exynos/common.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for EXYNOS machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H
+#define __ARCH_ARM_MACH_EXYNOS_COMMON_H
+
+void exynos_init_io(struct map_desc *mach_desc, int size);
+void exynos4_init_irq(void);
+
+void exynos4_register_clocks(void);
+void exynos4_setup_clocks(void);
+
+void exynos4210_register_clocks(void);
+void exynos4212_register_clocks(void);
+
+void exynos4_restart(char mode, const char *cmd);
+
+extern struct sys_timer exynos4_timer;
+
+#ifdef CONFIG_ARCH_EXYNOS
+extern int exynos_init(void);
+extern void exynos4_map_io(void);
+extern void exynos4_init_clocks(int xtal);
+extern void exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define exynos4_init_clocks NULL
+#define exynos4_init_uarts NULL
+#define exynos4_map_io NULL
+#define exynos_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpu.c b/arch/arm/mach-exynos/cpu.c
deleted file mode 100644
index 90ec247f3b3..00000000000
--- a/arch/arm/mach-exynos/cpu.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* linux/arch/arm/mach-exynos/cpu.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/sched.h>
-#include <linux/sysdev.h>
-
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-
-#include <asm/proc-fns.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/hardware/gic.h>
-
-#include <plat/cpu.h>
-#include <plat/clock.h>
-#include <plat/devs.h>
-#include <plat/exynos4.h>
-#include <plat/adc-core.h>
-#include <plat/sdhci.h>
-#include <plat/fb-core.h>
-#include <plat/fimc-core.h>
-#include <plat/iic-core.h>
-#include <plat/reset.h>
-#include <plat/tv-core.h>
-
-#include <mach/regs-irq.h>
-#include <mach/regs-pmu.h>
-
-unsigned int gic_bank_offset __read_mostly;
-
-extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
- unsigned int irq_start);
-extern void combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq);
-
-/* Initial IO mappings */
-static struct map_desc exynos_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_SYSTIMER,
- .pfn = __phys_to_pfn(EXYNOS_PA_SYSTIMER),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_PMU,
- .pfn = __phys_to_pfn(EXYNOS_PA_PMU),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
- .pfn = __phys_to_pfn(EXYNOS_PA_COMBINER),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GIC_CPU,
- .pfn = __phys_to_pfn(EXYNOS_PA_GIC_CPU),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GIC_DIST,
- .pfn = __phys_to_pfn(EXYNOS_PA_GIC_DIST),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(S3C_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc exynos4_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_CMU,
- .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
- .length = SZ_128K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
- .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
- .length = SZ_8K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_L2CC,
- .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GPIO1,
- .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GPIO2,
- .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_GPIO3,
- .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
- .length = SZ_256,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_DMC0,
- .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_SROMC,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_USB_HSPHY,
- .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc exynos4_iodesc0[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_SYSRAM,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc exynos4_iodesc1[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_SYSRAM,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static void exynos_idle(void)
-{
- if (!need_resched())
- cpu_do_idle();
-
- local_irq_enable();
-}
-
-static void exynos4_sw_reset(void)
-{
- __raw_writel(0x1, S5P_SWRESET);
-}
-
-/*
- * exynos_map_io
- *
- * register the standard cpu IO areas
- */
-void __init exynos4_map_io(void)
-{
- iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
- iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
-
- if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
- iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
- else
- iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
-
- /* initialize device information early */
- exynos4_default_sdhci0();
- exynos4_default_sdhci1();
- exynos4_default_sdhci2();
- exynos4_default_sdhci3();
-
- s3c_adc_setname("samsung-adc-v3");
-
- s3c_fimc_setname(0, "exynos4-fimc");
- s3c_fimc_setname(1, "exynos4-fimc");
- s3c_fimc_setname(2, "exynos4-fimc");
- s3c_fimc_setname(3, "exynos4-fimc");
-
- /* The I2C bus controllers are directly compatible with s3c2440 */
- s3c_i2c0_setname("s3c2440-i2c");
- s3c_i2c1_setname("s3c2440-i2c");
- s3c_i2c2_setname("s3c2440-i2c");
-
- s5p_fb_setname(0, "exynos4-fb");
- s5p_hdmi_setname("exynos4-hdmi");
-}
-
-void __init exynos4_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
- s3c24xx_register_baseclocks(xtal);
- s5p_register_clocks(xtal);
-
- if (soc_is_exynos4210())
- exynos4210_register_clocks();
- else if (soc_is_exynos4212() || soc_is_exynos4412())
- exynos4212_register_clocks();
-
- exynos4_register_clocks();
- exynos4_setup_clocks();
-}
-
-static void exynos4_gic_irq_fix_base(struct irq_data *d)
-{
- struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
-
- gic_data->cpu_base = S5P_VA_GIC_CPU +
- (gic_bank_offset * smp_processor_id());
-
- gic_data->dist_base = S5P_VA_GIC_DIST +
- (gic_bank_offset * smp_processor_id());
-}
-
-void __init exynos4_init_irq(void)
-{
- int irq;
-
- gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
-
- gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
- gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
- gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
- gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
-
- for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
-
- combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
- COMBINER_IRQ(irq, 0));
- combiner_cascade_irq(irq, IRQ_SPI(irq));
- }
-
- /* The parameters of s5p_init_irq() are for VIC init.
- * Theses parameters should be NULL and 0 because EXYNOS4
- * uses GIC instead of VIC.
- */
- s5p_init_irq(NULL, 0);
-}
-
-struct sysdev_class exynos4_sysclass = {
- .name = "exynos4-core",
-};
-
-static struct sys_device exynos4_sysdev = {
- .cls = &exynos4_sysclass,
-};
-
-static int __init exynos4_core_init(void)
-{
- return sysdev_class_register(&exynos4_sysclass);
-}
-core_initcall(exynos4_core_init);
-
-#ifdef CONFIG_CACHE_L2X0
-static int __init exynos4_l2x0_cache_init(void)
-{
- /* TAG, Data Latency Control: 2cycle */
- __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
-
- if (soc_is_exynos4210())
- __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
- else if (soc_is_exynos4212() || soc_is_exynos4412())
- __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
-
- /* L2X0 Prefetch Control */
- __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
-
- /* L2X0 Power Control */
- __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
- S5P_VA_L2CC + L2X0_POWER_CTRL);
-
- l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
-
- return 0;
-}
-
-early_initcall(exynos4_l2x0_cache_init);
-#endif
-
-int __init exynos_init(void)
-{
- printk(KERN_INFO "EXYNOS: Initializing architecture\n");
-
- /* set idle function */
- pm_idle = exynos_idle;
-
- /* set sw_reset function */
- if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos4412())
- s5p_reset_hook = exynos4_sw_reset;
-
- return sysdev_register(&exynos4_sysdev);
-}
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 35f6502144a..4ebb382c597 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -12,6 +12,8 @@
#include <linux/init.h>
#include <linux/cpuidle.h>
#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/time.h>
#include <asm/proc-fns.h>
diff --git a/arch/arm/mach-exynos/dev-ohci.c b/arch/arm/mach-exynos/dev-ohci.c
new file mode 100644
index 00000000000..b8e75300c77
--- /dev/null
+++ b/arch/arm/mach-exynos/dev-ohci.c
@@ -0,0 +1,52 @@
+/* linux/arch/arm/mach-exynos/dev-ohci.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - OHCI support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+#include <mach/ohci.h>
+
+#include <plat/devs.h>
+#include <plat/usb-phy.h>
+
+static struct resource exynos4_ohci_resource[] = {
+ [0] = DEFINE_RES_MEM(EXYNOS4_PA_OHCI, SZ_256),
+ [1] = DEFINE_RES_IRQ(IRQ_USB_HOST),
+};
+
+static u64 exynos4_ohci_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device exynos4_device_ohci = {
+ .name = "exynos-ohci",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(exynos4_ohci_resource),
+ .resource = exynos4_ohci_resource,
+ .dev = {
+ .dma_mask = &exynos4_ohci_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
+void __init exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd)
+{
+ struct exynos4_ohci_platdata *npd;
+
+ npd = s3c_set_platdata(pd, sizeof(struct exynos4_ohci_platdata),
+ &exynos4_device_ohci);
+
+ if (!npd->phy_init)
+ npd->phy_init = s5p_usb_phy_init;
+ if (!npd->phy_exit)
+ npd->phy_exit = s5p_usb_phy_exit;
+}
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 9667c61e64f..b10fcd270f0 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
+#include <linux/of.h>
#include <asm/irq.h>
#include <plat/devs.h>
@@ -35,95 +36,42 @@
static u64 dma_dmamask = DMA_BIT_MASK(32);
-struct dma_pl330_peri pdma0_peri[28] = {
- {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ0,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ2,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART4_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART4_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS4_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS4_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_AC97_MICIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMOUT,
- .rqtype = MEMTODEV,
- },
+u8 pdma0_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM2_RX,
+ DMACH_PCM2_TX,
+ DMACH_MSM_REQ0,
+ DMACH_MSM_REQ2,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART4_RX,
+ DMACH_UART4_TX,
+ DMACH_SLIMBUS0_RX,
+ DMACH_SLIMBUS0_TX,
+ DMACH_SLIMBUS2_RX,
+ DMACH_SLIMBUS2_TX,
+ DMACH_SLIMBUS4_RX,
+ DMACH_SLIMBUS4_TX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
};
struct dma_pl330_platdata exynos4_pdma0_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
- .peri = pdma0_peri,
+ .peri_id = pdma0_peri,
};
struct amba_device exynos4_device_pdma0 = {
@@ -142,86 +90,37 @@ struct amba_device exynos4_device_pdma0 = {
.periphid = 0x00041330,
};
-struct dma_pl330_peri pdma1_peri[25] = {
- {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ1,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ3,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS5_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SLIMBUS5_TX,
- .rqtype = MEMTODEV,
- },
+u8 pdma1_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM1_TX,
+ DMACH_MSM_REQ1,
+ DMACH_MSM_REQ3,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_SLIMBUS1_RX,
+ DMACH_SLIMBUS1_TX,
+ DMACH_SLIMBUS3_RX,
+ DMACH_SLIMBUS3_TX,
+ DMACH_SLIMBUS5_RX,
+ DMACH_SLIMBUS5_TX,
};
struct dma_pl330_platdata exynos4_pdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
- .peri = pdma1_peri,
+ .peri_id = pdma1_peri,
};
struct amba_device exynos4_device_pdma1 = {
@@ -242,7 +141,15 @@ struct amba_device exynos4_device_pdma1 = {
static int __init exynos4_dma_init(void)
{
+ if (of_have_populated_dt())
+ return 0;
+
+ dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
amba_device_register(&exynos4_device_pdma0, &iomem_resource);
+
+ dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
amba_device_register(&exynos4_device_pdma1, &iomem_resource);
return 0;
diff --git a/arch/arm/mach-exynos/include/mach/entry-macro.S b/arch/arm/mach-exynos/include/mach/entry-macro.S
index f5e9fd8e37b..3ba4f547534 100644
--- a/arch/arm/mach-exynos/include/mach/entry-macro.S
+++ b/arch/arm/mach-exynos/include/mach/entry-macro.S
@@ -9,83 +9,8 @@
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <asm/hardware/gic.h>
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- mov \tmp, #0
-
- mrc p15, 0, \base, c0, c0, 5
- and \base, \base, #3
- cmp \base, #0
- beq 1f
-
- ldr \tmp, =gic_bank_offset
- ldr \tmp, [\tmp]
- cmp \base, #1
- beq 1f
-
- cmp \base, #2
- addeq \tmp, \tmp, \tmp
- addne \tmp, \tmp, \tmp, LSL #1
-
-1: ldr \base, =gic_cpu_base_addr
- ldr \base, [\base]
- add \base, \base, \tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- /*
- * The interrupt numbering scheme is defined in the
- * interrupt controller spec. To wit:
- *
- * Interrupts 0-15 are IPI
- * 16-28 are reserved
- * 29-31 are local. We allow 30 to be used for the watchdog.
- * 32-1020 are global
- * 1021-1022 are reserved
- * 1023 is "spurious" (no interrupt)
- *
- * For now, we ignore all local interrupts so only return an interrupt if it's
- * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
- *
- * A simple read from the controller will tell us the number of the highest
- * priority enabled interrupt. We then just need to check whether it is in the
- * valid range for an IRQ (30-1020 inclusive).
- */
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
-
- ldr \tmp, =1021
-
- bic \irqnr, \irqstat, #0x1c00
-
- cmp \irqnr, #15
- cmpcc \irqnr, \irqnr
- cmpne \irqnr, \tmp
- cmpcs \irqnr, \irqnr
- addne \irqnr, \irqnr, #32
-
- .endm
-
- /* We assume that irqstat (the raw value of the IRQ acknowledge
- * register) is preserved from the macro above.
- * If there is an IPI, we immediately signal end of interrupt on the
- * controller, since this requires the original irqstat value which
- * we won't easily be able to recreate later.
- */
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- bic \irqnr, \irqstat, #0x1c00
- cmp \irqnr, #16
- strcc \irqstat, [\base, #GIC_CPU_EOI]
- cmpcs \irqnr, \irqnr
- .endm
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index dfd4b7eecb9..f77bce04789 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -17,13 +17,13 @@
/* PPI: Private Peripheral Interrupt */
-#define IRQ_PPI(x) S5P_IRQ(x+16)
+#define IRQ_PPI(x) (x+16)
#define IRQ_MCT_LOCALTIMER IRQ_PPI(12)
/* SPI: Shared Peripheral Interrupt */
-#define IRQ_SPI(x) S5P_IRQ(x+32)
+#define IRQ_SPI(x) (x+32)
#define IRQ_EINT0 IRQ_SPI(16)
#define IRQ_EINT1 IRQ_SPI(17)
@@ -72,6 +72,9 @@
#define IRQ_IIC5 IRQ_SPI(63)
#define IRQ_IIC6 IRQ_SPI(64)
#define IRQ_IIC7 IRQ_SPI(65)
+#define IRQ_SPI0 IRQ_SPI(66)
+#define IRQ_SPI1 IRQ_SPI(67)
+#define IRQ_SPI2 IRQ_SPI(68)
#define IRQ_USB_HOST IRQ_SPI(70)
#define IRQ_USB_HSOTG IRQ_SPI(71)
@@ -163,7 +166,9 @@
#define IRQ_GPIO2_NR_GROUPS 9
#define IRQ_GPIO_END (S5P_GPIOINT_BASE + S5P_GPIOINT_COUNT)
+#define IRQ_TIMER_BASE (IRQ_GPIO_END + 64)
+
/* Set the default NR_IRQS */
-#define NR_IRQS (IRQ_GPIO_END + 64)
+#define NR_IRQS (IRQ_TIMER_BASE + IRQ_TIMER_COUNT)
#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 058541d45af..c754a22a2bb 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -87,6 +87,10 @@
#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
+#define EXYNOS4_PA_SPI0 0x13920000
+#define EXYNOS4_PA_SPI1 0x13930000
+#define EXYNOS4_PA_SPI2 0x13940000
+
#define EXYNOS4_PA_GPIO1 0x11400000
#define EXYNOS4_PA_GPIO2 0x11000000
@@ -107,6 +111,7 @@
#define EXYNOS4_PA_SROMC 0x12570000
#define EXYNOS4_PA_EHCI 0x12580000
+#define EXYNOS4_PA_OHCI 0x12590000
#define EXYNOS4_PA_HSPHY 0x125B0000
#define EXYNOS4_PA_MFC 0x13400000
@@ -148,8 +153,10 @@
#define S3C_PA_RTC EXYNOS4_PA_RTC
#define S3C_PA_WDT EXYNOS4_PA_WATCHDOG
#define S3C_PA_UART EXYNOS4_PA_UART
+#define S3C_PA_SPI0 EXYNOS4_PA_SPI0
+#define S3C_PA_SPI1 EXYNOS4_PA_SPI1
+#define S3C_PA_SPI2 EXYNOS4_PA_SPI2
-#define S5P_PA_CHIPID EXYNOS4_PA_CHIPID
#define S5P_PA_EHCI EXYNOS4_PA_EHCI
#define S5P_PA_FIMC0 EXYNOS4_PA_FIMC0
#define S5P_PA_FIMC1 EXYNOS4_PA_FIMC1
@@ -166,26 +173,17 @@
#define S5P_PA_ONENAND_DMA EXYNOS4_PA_ONENAND_DMA
#define S5P_PA_SDO EXYNOS4_PA_SDO
#define S5P_PA_SDRAM EXYNOS4_PA_SDRAM
-#define S5P_PA_SROMC EXYNOS4_PA_SROMC
-#define S5P_PA_SYSCON EXYNOS4_PA_SYSCON
-#define S5P_PA_TIMER EXYNOS4_PA_TIMER
#define S5P_PA_VP EXYNOS4_PA_VP
#define SAMSUNG_PA_ADC EXYNOS4_PA_ADC
#define SAMSUNG_PA_ADC1 EXYNOS4_PA_ADC1
#define SAMSUNG_PA_KEYPAD EXYNOS4_PA_KEYPAD
-#define EXYNOS_PA_COMBINER EXYNOS4_PA_COMBINER
-#define EXYNOS_PA_GIC_CPU EXYNOS4_PA_GIC_CPU
-#define EXYNOS_PA_GIC_DIST EXYNOS4_PA_GIC_DIST
-#define EXYNOS_PA_PMU EXYNOS4_PA_PMU
-#define EXYNOS_PA_SYSTIMER EXYNOS4_PA_SYSTIMER
-
/* Compatibility UART */
#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART(x) (EXYNOS4_PA_UART + ((x) * S3C_UART_OFFSET))
#define S5P_PA_UART0 S5P_PA_UART(0)
#define S5P_PA_UART1 S5P_PA_UART(1)
#define S5P_PA_UART2 S5P_PA_UART(2)
diff --git a/arch/arm/mach-exynos/include/mach/ohci.h b/arch/arm/mach-exynos/include/mach/ohci.h
new file mode 100644
index 00000000000..c256c595be5
--- /dev/null
+++ b/arch/arm/mach-exynos/include/mach/ohci.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MACH_EXYNOS_OHCI_H
+#define __MACH_EXYNOS_OHCI_H
+
+struct exynos4_ohci_platdata {
+ int (*phy_init)(struct platform_device *pdev, int type);
+ int (*phy_exit)(struct platform_device *pdev, int type);
+};
+
+extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
+
+#endif /* __MACH_EXYNOS_OHCI_H */
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
new file mode 100644
index 00000000000..576efdf6d09
--- /dev/null
+++ b/arch/arm/mach-exynos/include/mach/spi-clocks.h
@@ -0,0 +1,16 @@
+/* linux/arch/arm/mach-exynos4/include/mach/spi-clocks.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_SPI_CLKS_H
+#define __ASM_ARCH_SPI_CLKS_H __FILE__
+
+/* Must source from SCLK_SPI */
+#define EXYNOS4_SPI_SRCCLK_SCLK 0
+
+#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/include/mach/system.h b/arch/arm/mach-exynos/include/mach/system.h
index 5e3220c18fc..0063a6de3dc 100644
--- a/arch/arm/mach-exynos/include/mach/system.h
+++ b/arch/arm/mach-exynos/include/mach/system.h
@@ -13,8 +13,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H __FILE__
-#include <plat/system-reset.h>
-
static void arch_idle(void)
{
/* nothing here yet */
diff --git a/arch/arm/mach-exynos/include/mach/vmalloc.h b/arch/arm/mach-exynos/include/mach/vmalloc.h
deleted file mode 100644
index 284330e571d..00000000000
--- a/arch/arm/mach-exynos/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Based on arch/arm/mach-s5p6440/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * EXYNOS4 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-exynos/init.c b/arch/arm/mach-exynos/init.c
deleted file mode 100644
index a8a83e3881a..00000000000
--- a/arch/arm/mach-exynos/init.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-exynos4/init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
- [0] = {
- .name = "uclk1",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
-/* uart registration process */
-void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- struct s3c2410_uartcfg *tcfg = cfg;
- u32 ucnt;
-
- for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
- if (!tcfg->clocks) {
- tcfg->has_fracval = 1;
- tcfg->clocks = exynos4_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
- }
- tcfg->flags |= NO_NEED_CHECK_CLKSRC;
- }
-
- s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-exynos/irq-combiner.c b/arch/arm/mach-exynos/irq-combiner.c
deleted file mode 100644
index 5a2758ab055..00000000000
--- a/arch/arm/mach-exynos/irq-combiner.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/* linux/arch/arm/mach-exynos4/irq-combiner.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Based on arch/arm/common/gic.c
- *
- * IRQ COMBINER support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/io.h>
-
-#include <asm/mach/irq.h>
-
-#define COMBINER_ENABLE_SET 0x0
-#define COMBINER_ENABLE_CLEAR 0x4
-#define COMBINER_INT_STATUS 0xC
-
-static DEFINE_SPINLOCK(irq_controller_lock);
-
-struct combiner_chip_data {
- unsigned int irq_offset;
- unsigned int irq_mask;
- void __iomem *base;
-};
-
-static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
-
-static inline void __iomem *combiner_base(struct irq_data *data)
-{
- struct combiner_chip_data *combiner_data =
- irq_data_get_irq_chip_data(data);
-
- return combiner_data->base;
-}
-
-static void combiner_mask_irq(struct irq_data *data)
-{
- u32 mask = 1 << (data->irq % 32);
-
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
-}
-
-static void combiner_unmask_irq(struct irq_data *data)
-{
- u32 mask = 1 << (data->irq % 32);
-
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
-}
-
-static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
-{
- struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
- unsigned int cascade_irq, combiner_irq;
- unsigned long status;
-
- chained_irq_enter(chip, desc);
-
- spin_lock(&irq_controller_lock);
- status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
- spin_unlock(&irq_controller_lock);
- status &= chip_data->irq_mask;
-
- if (status == 0)
- goto out;
-
- combiner_irq = __ffs(status);
-
- cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
- if (unlikely(cascade_irq >= NR_IRQS))
- do_bad_IRQ(cascade_irq, desc);
- else
- generic_handle_irq(cascade_irq);
-
- out:
- chained_irq_exit(chip, desc);
-}
-
-static struct irq_chip combiner_chip = {
- .name = "COMBINER",
- .irq_mask = combiner_mask_irq,
- .irq_unmask = combiner_unmask_irq,
-};
-
-void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
-{
- if (combiner_nr >= MAX_COMBINER_NR)
- BUG();
- if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
- BUG();
- irq_set_chained_handler(irq, combiner_handle_cascade_irq);
-}
-
-void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
- unsigned int irq_start)
-{
- unsigned int i;
-
- if (combiner_nr >= MAX_COMBINER_NR)
- BUG();
-
- combiner_data[combiner_nr].base = base;
- combiner_data[combiner_nr].irq_offset = irq_start;
- combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
-
- /* Disable all interrupts */
-
- __raw_writel(combiner_data[combiner_nr].irq_mask,
- base + COMBINER_ENABLE_CLEAR);
-
- /* Setup the Linux IRQ subsystem */
-
- for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
- + MAX_IRQ_IN_COMBINER; i++) {
- irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
- irq_set_chip_data(i, &combiner_data[combiner_nr]);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
-}
diff --git a/arch/arm/mach-exynos/irq-eint.c b/arch/arm/mach-exynos/irq-eint.c
deleted file mode 100644
index badb8c66fc9..00000000000
--- a/arch/arm/mach-exynos/irq-eint.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* linux/arch/arm/mach-exynos4/irq-eint.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4 - IRQ EINT support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/gpio.h>
-
-#include <plat/pm.h>
-#include <plat/cpu.h>
-#include <plat/gpio-cfg.h>
-
-#include <mach/regs-gpio.h>
-
-#include <asm/mach/irq.h>
-
-static DEFINE_SPINLOCK(eint_lock);
-
-static unsigned int eint0_15_data[16];
-
-static unsigned int exynos4_get_irq_nr(unsigned int number)
-{
- u32 ret = 0;
-
- switch (number) {
- case 0 ... 3:
- ret = (number + IRQ_EINT0);
- break;
- case 4 ... 7:
- ret = (number + (IRQ_EINT4 - 4));
- break;
- case 8 ... 15:
- ret = (number + (IRQ_EINT8 - 8));
- break;
- default:
- printk(KERN_ERR "number available : %d\n", number);
- }
-
- return ret;
-}
-
-static inline void exynos4_irq_eint_mask(struct irq_data *data)
-{
- u32 mask;
-
- spin_lock(&eint_lock);
- mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
- mask |= eint_irq_to_bit(data->irq);
- __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
- spin_unlock(&eint_lock);
-}
-
-static void exynos4_irq_eint_unmask(struct irq_data *data)
-{
- u32 mask;
-
- spin_lock(&eint_lock);
- mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
- mask &= ~(eint_irq_to_bit(data->irq));
- __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
- spin_unlock(&eint_lock);
-}
-
-static inline void exynos4_irq_eint_ack(struct irq_data *data)
-{
- __raw_writel(eint_irq_to_bit(data->irq),
- S5P_EINT_PEND(EINT_REG_NR(data->irq)));
-}
-
-static void exynos4_irq_eint_maskack(struct irq_data *data)
-{
- exynos4_irq_eint_mask(data);
- exynos4_irq_eint_ack(data);
-}
-
-static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
- int offs = EINT_OFFSET(data->irq);
- int shift;
- u32 ctrl, mask;
- u32 newvalue = 0;
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- newvalue = S5P_IRQ_TYPE_EDGE_RISING;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
- break;
-
- default:
- printk(KERN_ERR "No such irq type %d", type);
- return -EINVAL;
- }
-
- shift = (offs & 0x7) * 4;
- mask = 0x7 << shift;
-
- spin_lock(&eint_lock);
- ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
- ctrl &= ~mask;
- ctrl |= newvalue << shift;
- __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
- spin_unlock(&eint_lock);
-
- switch (offs) {
- case 0 ... 7:
- s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
- break;
- case 8 ... 15:
- s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
- break;
- case 16 ... 23:
- s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
- break;
- case 24 ... 31:
- s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
- break;
- default:
- printk(KERN_ERR "No such irq number %d", offs);
- }
-
- return 0;
-}
-
-static struct irq_chip exynos4_irq_eint = {
- .name = "exynos4-eint",
- .irq_mask = exynos4_irq_eint_mask,
- .irq_unmask = exynos4_irq_eint_unmask,
- .irq_mask_ack = exynos4_irq_eint_maskack,
- .irq_ack = exynos4_irq_eint_ack,
- .irq_set_type = exynos4_irq_eint_set_type,
-#ifdef CONFIG_PM
- .irq_set_wake = s3c_irqext_wake,
-#endif
-};
-
-/* exynos4_irq_demux_eint
- *
- * This function demuxes the IRQ from from EINTs 16 to 31.
- * It is designed to be inlined into the specific handler
- * s5p_irq_demux_eintX_Y.
- *
- * Each EINT pend/mask registers handle eight of them.
- */
-static inline void exynos4_irq_demux_eint(unsigned int start)
-{
- unsigned int irq;
-
- u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
- u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
-
- status &= ~mask;
- status &= 0xff;
-
- while (status) {
- irq = fls(status) - 1;
- generic_handle_irq(irq + start);
- status &= ~(1 << irq);
- }
-}
-
-static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_get_chip(irq);
- chained_irq_enter(chip, desc);
- exynos4_irq_demux_eint(IRQ_EINT(16));
- exynos4_irq_demux_eint(IRQ_EINT(24));
- chained_irq_exit(chip, desc);
-}
-
-static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
-{
- u32 *irq_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
-
- chained_irq_enter(chip, desc);
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_ack)
- chip->irq_ack(&desc->irq_data);
-
- generic_handle_irq(*irq_data);
-
- chip->irq_unmask(&desc->irq_data);
- chained_irq_exit(chip, desc);
-}
-
-int __init exynos4_init_irq_eint(void)
-{
- int irq;
-
- for (irq = 0 ; irq <= 31 ; irq++) {
- irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
- handle_level_irq);
- set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
- }
-
- irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
-
- for (irq = 0 ; irq <= 15 ; irq++) {
- eint0_15_data[irq] = IRQ_EINT(irq);
-
- irq_set_handler_data(exynos4_get_irq_nr(irq),
- &eint0_15_data[irq]);
- irq_set_chained_handler(exynos4_get_irq_nr(irq),
- exynos4_irq_eint0_15);
- }
-
- return 0;
-}
-
-arch_initcall(exynos4_init_irq_eint);
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index f0ca6c157d2..d726fcd3acf 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -16,11 +16,11 @@
#include <linux/smsc911x.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/cpu.h>
#include <plat/devs.h>
-#include <plat/exynos4.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
@@ -28,6 +28,8 @@
#include <mach/map.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define ARMLEX4210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -187,7 +189,7 @@ static void __init armlex4210_smsc911x_init(void)
static void __init armlex4210_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(armlex4210_uartcfgs,
ARRAY_SIZE(armlex4210_uartcfgs));
@@ -210,6 +212,8 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = armlex4210_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = armlex4210_machine_init,
.timer = &exynos4_timer,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
new file mode 100644
index 00000000000..85fa02767d6
--- /dev/null
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -0,0 +1,85 @@
+/*
+ * Samsung's Exynos4210 flattened device tree enabled machine
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2010-2011 Linaro Ltd.
+ * www.linaro.org
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/of_platform.h>
+#include <linux/serial_core.h>
+
+#include <asm/mach/arch.h>
+#include <mach/map.h>
+
+#include <plat/cpu.h>
+#include <plat/regs-serial.h>
+#include <plat/exynos4.h>
+
+/*
+ * The following lookup table is used to override device names when devices
+ * are registered from device tree. This is temporarily added to enable
+ * device tree support addition for the Exynos4 architecture.
+ *
+ * For drivers that require platform data to be provided from the machine
+ * file, a platform data pointer can also be supplied along with the
+ * devices names. Usually, the platform data elements that cannot be parsed
+ * from the device tree by the drivers (example: function pointers) are
+ * supplied. But it should be noted that this is a temporary mechanism and
+ * at some point, the drivers should be capable of parsing all the platform
+ * data from the device tree.
+ */
+static const struct of_dev_auxdata exynos4210_auxdata_lookup[] __initconst = {
+ OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART0,
+ "exynos4210-uart.0", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART1,
+ "exynos4210-uart.1", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART2,
+ "exynos4210-uart.2", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART3,
+ "exynos4210-uart.3", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(0),
+ "exynos4-sdhci.0", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(1),
+ "exynos4-sdhci.1", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(2),
+ "exynos4-sdhci.2", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(3),
+ "exynos4-sdhci.3", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS4_PA_IIC(0),
+ "s3c2440-i2c.0", NULL),
+ OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA0, "dma-pl330.0", NULL),
+ OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA1, "dma-pl330.1", NULL),
+ {},
+};
+
+static void __init exynos4210_dt_map_io(void)
+{
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s3c24xx_init_clocks(24000000);
+}
+
+static void __init exynos4210_dt_machine_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ exynos4210_auxdata_lookup, NULL);
+}
+
+static char const *exynos4210_dt_compat[] __initdata = {
+ "samsung,exynos4210",
+ NULL
+};
+
+DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
+ /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
+ .init_irq = exynos4_init_irq,
+ .map_io = exynos4210_dt_map_io,
+ .init_machine = exynos4210_dt_machine_init,
+ .timer = &exynos4_timer,
+ .dt_compat = exynos4210_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 236bbe18716..b895ec03110 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -32,12 +32,12 @@
#include <media/v4l2-mediabus.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/adc.h>
#include <plat/regs-fb-v4.h>
#include <plat/regs-serial.h>
-#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
@@ -54,6 +54,8 @@
#include <mach/map.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define NURI_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -247,13 +249,8 @@ static void nuri_lcd_power_on(struct plat_lcd_data *pd, unsigned int power)
static int nuri_bl_init(struct device *dev)
{
- int ret, gpio = EXYNOS4_GPE2(3);
-
- ret = gpio_request(gpio, "LCD_LDO_EN");
- if (!ret)
- gpio_direction_output(gpio, 0);
-
- return ret;
+ return gpio_request_one(EXYNOS4_GPE2(3), GPIOF_OUT_INIT_LOW,
+ "LCD_LD0_EN");
}
static int nuri_bl_notify(struct device *dev, int brightness)
@@ -1283,7 +1280,7 @@ static struct platform_device *nuri_devices[] __initdata = {
static void __init nuri_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
}
@@ -1333,7 +1330,9 @@ MACHINE_START(NURI, "NURI")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = nuri_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = nuri_machine_init,
.timer = &exynos4_timer,
.reserve = &nuri_reserve,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index f80b563f2be..2b11e046d39 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -22,13 +22,13 @@
#include <linux/lcd.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <video/platform_lcd.h>
#include <plat/regs-serial.h>
#include <plat/regs-fb-v4.h>
-#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
@@ -41,8 +41,11 @@
#include <plat/fb.h>
#include <plat/mfc.h>
+#include <mach/ohci.h>
#include <mach/map.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define ORIGEN_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -483,6 +486,16 @@ static void __init origen_ehci_init(void)
s5p_ehci_set_platdata(pdata);
}
+/* USB OHCI */
+static struct exynos4_ohci_platdata origen_ohci_pdata;
+
+static void __init origen_ohci_init(void)
+{
+ struct exynos4_ohci_platdata *pdata = &origen_ohci_pdata;
+
+ exynos4_ohci_set_platdata(pdata);
+}
+
static struct gpio_keys_button origen_gpio_keys_table[] = {
{
.code = KEY_MENU,
@@ -606,6 +619,7 @@ static struct platform_device *origen_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&s5p_device_mixer,
+ &exynos4_device_ohci,
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_TV],
&exynos4_device_pd[PD_G3D],
@@ -638,7 +652,7 @@ static void s5p_tv_setup(void)
static void __init origen_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(origen_uartcfgs, ARRAY_SIZE(origen_uartcfgs));
}
@@ -670,6 +684,7 @@ static void __init origen_machine_init(void)
s3c_sdhci0_set_platdata(&origen_hsmmc0_pdata);
origen_ehci_init();
+ origen_ohci_init();
clk_xusbxti.rate = 24000000;
s5p_tv_setup();
@@ -694,7 +709,9 @@ MACHINE_START(ORIGEN, "ORIGEN")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = origen_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = origen_machine_init,
.timer = &exynos4_timer,
.reserve = &origen_reserve,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fcf2e0e23d5..d00e4f016a6 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -21,13 +21,13 @@
#include <linux/serial_core.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/backlight.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
-#include <plat/exynos4.h>
#include <plat/gpio-cfg.h>
#include <plat/iic.h>
#include <plat/keypad.h>
@@ -36,6 +36,8 @@
#include <mach/map.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDK4X12_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -249,7 +251,7 @@ static void __init smdk4x12_map_io(void)
{
clk_xusbxti.rate = 24000000;
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(smdk4x12_uartcfgs, ARRAY_SIZE(smdk4x12_uartcfgs));
}
@@ -287,8 +289,10 @@ MACHINE_START(SMDK4212, "SMDK4212")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
+ .restart = exynos4_restart,
MACHINE_END
MACHINE_START(SMDK4412, "SMDK4412")
@@ -297,6 +301,8 @@ MACHINE_START(SMDK4412, "SMDK4412")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index cec2afabe7b..b2c5557f50e 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -21,13 +21,13 @@
#include <linux/pwm_backlight.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <video/platform_lcd.h>
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
#include <plat/regs-fb-v4.h>
-#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
@@ -42,6 +42,9 @@
#include <plat/clock.h>
#include <mach/map.h>
+#include <mach/ohci.h>
+
+#include "common.h"
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV310_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -129,9 +132,7 @@ static void lcd_lte480wv_set_power(struct plat_lcd_data *pd,
gpio_free(EXYNOS4_GPD0(1));
#endif
/* fire nRESET on power up */
- gpio_request(EXYNOS4_GPX0(6), "GPX0");
-
- gpio_direction_output(EXYNOS4_GPX0(6), 1);
+ gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0");
mdelay(100);
gpio_set_value(EXYNOS4_GPX0(6), 0);
@@ -245,6 +246,16 @@ static void __init smdkv310_ehci_init(void)
s5p_ehci_set_platdata(pdata);
}
+/* USB OHCI */
+static struct exynos4_ohci_platdata smdkv310_ohci_pdata;
+
+static void __init smdkv310_ohci_init(void)
+{
+ struct exynos4_ohci_platdata *pdata = &smdkv310_ohci_pdata;
+
+ exynos4_ohci_set_platdata(pdata);
+}
+
static struct platform_device *smdkv310_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
@@ -261,6 +272,7 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_fimc3,
&exynos4_device_ac97,
&exynos4_device_i2s0,
+ &exynos4_device_ohci,
&samsung_device_keypad,
&s5p_device_mfc,
&s5p_device_mfc_l,
@@ -332,7 +344,7 @@ static void s5p_tv_setup(void)
static void __init smdkv310_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(smdkv310_uartcfgs, ARRAY_SIZE(smdkv310_uartcfgs));
}
@@ -363,6 +375,7 @@ static void __init smdkv310_machine_init(void)
s5p_fimd0_set_platdata(&smdkv310_lcd0_pdata);
smdkv310_ehci_init();
+ smdkv310_ohci_init();
clk_xusbxti.rate = 24000000;
platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
@@ -375,9 +388,11 @@ MACHINE_START(SMDKV310, "SMDKV310")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.timer = &exynos4_timer,
.reserve = &smdkv310_reserve,
+ .restart = exynos4_restart,
MACHINE_END
MACHINE_START(SMDKC210, "SMDKC210")
@@ -385,6 +400,8 @@ MACHINE_START(SMDKC210, "SMDKC210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.timer = &exynos4_timer,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index a2a177ff4b4..37ac93e8d6d 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -24,10 +24,10 @@
#include <linux/i2c/atmel_mxt_ts.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
-#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/iic.h>
@@ -47,6 +47,8 @@
#include <media/s5p_fimc.h>
#include <media/m5mols.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define UNIVERSAL_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -608,8 +610,7 @@ static void __init universal_tsp_init(void)
/* TSP_LDO_ON: XMDMADDR_11 */
gpio = EXYNOS4_GPE2(3);
- gpio_request(gpio, "TSP_LDO_ON");
- gpio_direction_output(gpio, 1);
+ gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "TSP_LDO_ON");
gpio_export(gpio, 0);
/* TSP_INT: XMDMADDR_7 */
@@ -669,8 +670,7 @@ static void __init universal_touchkey_init(void)
i2c_gpio12_devs[0].irq = gpio_to_irq(gpio);
gpio = EXYNOS4_GPE3(3); /* XMDMDATA_3 */
- gpio_request(gpio, "3_TOUCH_EN");
- gpio_direction_output(gpio, 1);
+ gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "3_TOUCH_EN");
}
static struct s3c2410_platform_i2c universal_i2c0_platdata __initdata = {
@@ -992,7 +992,7 @@ static struct platform_device *universal_devices[] __initdata = {
static void __init universal_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
}
@@ -1000,9 +1000,7 @@ static void __init universal_map_io(void)
void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
- gpio_request(EXYNOS4_GPX3(7), "hpd-plug");
-
- gpio_direction_input(EXYNOS4_GPX3(7));
+ gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug");
s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3));
s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE);
@@ -1058,7 +1056,9 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = universal_map_io,
+ .handle_irq = gic_handle_irq,
.init_machine = universal_machine_init,
.timer = &exynos4_timer,
.reserve = &universal_reserve,
+ .restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 97343df8f13..85b5527d091 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
char name[10];
};
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
static void exynos4_mct_write(unsigned int value, void *addr)
{
void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
}
#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
void local_timer_stop(struct clock_event_device *evt)
{
+ unsigned int cpu = smp_processor_id();
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
if (mct_int_type == MCT_INT_SPI)
- disable_irq(evt->irq);
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
else
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
}
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
clk_rate = clk_get_rate(mct_clk);
+#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
int err;
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
WARN(err, "MCT: can't request IRQ %d (%d)\n",
IRQ_MCT_LOCALTIMER, err);
}
+#endif /* CONFIG_LOCAL_TIMERS */
}
static void __init exynos4_timer_init(void)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 69ffb2fb387..60bc45e3e70 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -32,7 +32,6 @@
#include <plat/cpu.h>
-extern unsigned int gic_bank_offset;
extern void exynos4_secondary_startup(void);
#define CPU1_BOOT_REG (samsung_rev() == EXYNOS4210_REV_1_1 ? \
@@ -65,31 +64,6 @@ static void __iomem *scu_base_addr(void)
static DEFINE_SPINLOCK(boot_lock);
-static void __cpuinit exynos4_gic_secondary_init(void)
-{
- void __iomem *dist_base = S5P_VA_GIC_DIST +
- (gic_bank_offset * smp_processor_id());
- void __iomem *cpu_base = S5P_VA_GIC_CPU +
- (gic_bank_offset * smp_processor_id());
- int i;
-
- /*
- * Deal with the banked PPI and SGI interrupts - disable all
- * PPI interrupts, ensure all SGI interrupts are enabled.
- */
- __raw_writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
- __raw_writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
- /*
- * Set priority on PPI and SGI interrupts
- */
- for (i = 0; i < 32; i += 4)
- __raw_writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
-
- __raw_writel(0xf0, cpu_base + GIC_CPU_PRIMASK);
- __raw_writel(1, cpu_base + GIC_CPU_CTRL);
-}
-
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/*
@@ -97,7 +71,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
- exynos4_gic_secondary_init();
+ gic_secondary_init(0);
/*
* let the primary processor know we're out of the
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 509a435afd4..a4f61a43c7b 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -23,6 +23,7 @@
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/smp_scu.h>
#include <plat/cpu.h>
#include <plat/pm.h>
@@ -205,7 +206,7 @@ static void exynos4_pm_prepare(void)
}
-static int exynos4_pm_add(struct sys_device *sysdev)
+static int exynos4_pm_add(struct device *dev)
{
pm_cpu_prep = exynos4_pm_prepare;
pm_cpu_sleep = exynos4_cpu_suspend;
@@ -213,27 +214,6 @@ static int exynos4_pm_add(struct sys_device *sysdev)
return 0;
}
-/* This function copy from linux/arch/arm/kernel/smp_scu.c */
-
-void exynos4_scu_enable(void __iomem *scu_base)
-{
- u32 scu_ctrl;
-
- scu_ctrl = __raw_readl(scu_base);
- /* already enabled? */
- if (scu_ctrl & 1)
- return;
-
- scu_ctrl |= 1;
- __raw_writel(scu_ctrl, scu_base);
-
- /*
- * Ensure that the data accessed by CPU0 before the SCU was
- * initialised is visible to the other CPUs.
- */
- flush_cache_all();
-}
-
static unsigned long pll_base_rate;
static void exynos4_restore_pll(void)
@@ -301,8 +281,10 @@ static void exynos4_restore_pll(void)
} while (epll_wait || vpll_wait);
}
-static struct sysdev_driver exynos4_pm_driver = {
- .add = exynos4_pm_add,
+static struct subsys_interface exynos4_pm_interface = {
+ .name = "exynos4_pm",
+ .subsys = &exynos4_subsys,
+ .add_dev = exynos4_pm_add,
};
static __init int exynos4_pm_drvinit(void)
@@ -325,7 +307,7 @@ static __init int exynos4_pm_drvinit(void)
clk_put(pll_base);
}
- return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver);
+ return subsys_interface_register(&exynos4_pm_interface);
}
arch_initcall(exynos4_pm_drvinit);
@@ -402,7 +384,7 @@ static void exynos4_pm_resume(void)
exynos4_restore_pll();
- exynos4_scu_enable(S5P_VA_SCU);
+ scu_enable(S5P_VA_SCU);
#ifdef CONFIG_CACHE_L2X0
s3c_pm_do_restore_core(exynos4_l2cc_save, ARRAY_SIZE(exynos4_l2cc_save));
diff --git a/arch/arm/mach-exynos/setup-sdhci.c b/arch/arm/mach-exynos/setup-sdhci.c
deleted file mode 100644
index 92937b41090..00000000000
--- a/arch/arm/mach-exynos/setup-sdhci.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-exynos4/setup-sdhci.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4 - Helper functions for settign up SDHCI device(s) (HSMMC)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-
-/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-
-char *exynos4_hsmmc_clksrcs[4] = {
- [0] = NULL,
- [1] = NULL,
- [2] = "sclk_mmc", /* mmc_bus */
- [3] = NULL,
-};
diff --git a/arch/arm/mach-exynos/setup-spi.c b/arch/arm/mach-exynos/setup-spi.c
new file mode 100644
index 00000000000..833ff40ee0e
--- /dev/null
+++ b/arch/arm/mach-exynos/setup-spi.c
@@ -0,0 +1,72 @@
+/* linux/arch/arm/mach-exynos4/setup-spi.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+ .clk_from_cmu = true,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+ .clk_from_cmu = true,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(EXYNOS4_GPB(4), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(EXYNOS4_GPB(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(EXYNOS4_GPB(6), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI2
+struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+ .clk_from_cmu = true,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi2_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(EXYNOS4_GPC1(1), S3C_GPIO_SFN(5));
+ s3c_gpio_setpull(EXYNOS4_GPC1(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2,
+ S3C_GPIO_SFN(5), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c
index 39aca045f66..41743d21e8c 100644
--- a/arch/arm/mach-exynos/setup-usb-phy.c
+++ b/arch/arm/mach-exynos/setup-usb-phy.c
@@ -19,6 +19,13 @@
#include <plat/cpu.h>
#include <plat/usb-phy.h>
+static atomic_t host_usage;
+
+static int exynos4_usb_host_phy_is_on(void)
+{
+ return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1;
+}
+
static int exynos4_usb_phy1_init(struct platform_device *pdev)
{
struct clk *otg_clk;
@@ -27,6 +34,8 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
u32 rstcon;
int err;
+ atomic_inc(&host_usage);
+
otg_clk = clk_get(&pdev->dev, "otg");
if (IS_ERR(otg_clk)) {
dev_err(&pdev->dev, "Failed to get otg clock\n");
@@ -39,6 +48,9 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
return err;
}
+ if (exynos4_usb_host_phy_is_on())
+ return 0;
+
writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE,
S5P_USBHOST_PHY_CONTROL);
@@ -95,6 +107,9 @@ static int exynos4_usb_phy1_exit(struct platform_device *pdev)
struct clk *otg_clk;
int err;
+ if (atomic_dec_return(&host_usage) > 0)
+ return 0;
+
otg_clk = clk_get(&pdev->dev, "otg");
if (IS_ERR(otg_clk)) {
dev_err(&pdev->dev, "Failed to get otg clock\n");
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index d5f17854092..25b453601ac 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -86,9 +86,10 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi)
MACHINE_START(CATS, "Chalice-CATS")
/* Maintainer: Philip Blundell */
.atag_offset = 0x100,
- .soft_reboot = 1,
+ .restart_mode = 's',
.fixup = fixup_cats,
.map_io = footbridge_map_io,
.init_irq = footbridge_init_irq,
.timer = &isa_timer,
+ .restart = footbridge_restart,
MACHINE_END
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 38a44f9b9da..41978ee4f9d 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -199,6 +199,33 @@ void __init footbridge_map_io(void)
iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
}
+void footbridge_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* Jump into the ROM */
+ soft_restart(0x41000000);
+ } else {
+ /*
+ * Force the watchdog to do a CPU reset.
+ *
+ * After making sure that the watchdog is disabled
+ * (so we can change the timer registers) we first
+ * enable the timer to autoreload itself. Next, the
+ * timer interval is set really short and any
+ * current interrupt request is cleared (so we can
+ * see an edge transition). Finally, TIMER4 is
+ * enabled as the watchdog.
+ */
+ *CSR_SA110_CNTL &= ~(1 << 13);
+ *CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE |
+ TIMER_CNTL_AUTORELOAD |
+ TIMER_CNTL_DIV16;
+ *CSR_TIMER4_LOAD = 0x2;
+ *CSR_TIMER4_CLR = 0;
+ *CSR_SA110_CNTL |= (1 << 13);
+ }
+}
+
#ifdef CONFIG_FOOTBRIDGE_ADDIN
static inline unsigned long fb_bus_sdram_offset(void)
diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h
index b05e662d21a..c9767b892cb 100644
--- a/arch/arm/mach-footbridge/common.h
+++ b/arch/arm/mach-footbridge/common.h
@@ -8,3 +8,4 @@ extern void footbridge_map_io(void);
extern void footbridge_init_irq(void);
extern void isa_init_irq(unsigned int irq);
+extern void footbridge_restart(char, const char *);
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c
index 012210cf7d1..27716a7e5fc 100644
--- a/arch/arm/mach-footbridge/ebsa285.c
+++ b/arch/arm/mach-footbridge/ebsa285.c
@@ -21,5 +21,6 @@ MACHINE_START(EBSA285, "EBSA285")
.map_io = footbridge_map_io,
.init_irq = footbridge_init_irq,
.timer = &footbridge_timer,
+ .restart = footbridge_restart,
MACHINE_END
diff --git a/arch/arm/mach-footbridge/include/mach/system.h b/arch/arm/mach-footbridge/include/mach/system.h
index 0b293156620..a174a5841bc 100644
--- a/arch/arm/mach-footbridge/include/mach/system.h
+++ b/arch/arm/mach-footbridge/include/mach/system.h
@@ -7,63 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/io.h>
-#include <asm/hardware/dec21285.h>
-#include <mach/hardware.h>
-#include <asm/leds.h>
-#include <asm/mach-types.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (mode == 's') {
- /*
- * Jump into the ROM
- */
- cpu_reset(0x41000000);
- } else {
- if (machine_is_netwinder()) {
- /* open up the SuperIO chip
- */
- outb(0x87, 0x370);
- outb(0x87, 0x370);
-
- /* aux function group 1 (logical device 7)
- */
- outb(0x07, 0x370);
- outb(0x07, 0x371);
-
- /* set GP16 for WD-TIMER output
- */
- outb(0xe6, 0x370);
- outb(0x00, 0x371);
-
- /* set a RED LED and toggle WD_TIMER for rebooting
- */
- outb(0xc4, 0x338);
- } else {
- /*
- * Force the watchdog to do a CPU reset.
- *
- * After making sure that the watchdog is disabled
- * (so we can change the timer registers) we first
- * enable the timer to autoreload itself. Next, the
- * timer interval is set really short and any
- * current interrupt request is cleared (so we can
- * see an edge transition). Finally, TIMER4 is
- * enabled as the watchdog.
- */
- *CSR_SA110_CNTL &= ~(1 << 13);
- *CSR_TIMER4_CNTL = TIMER_CNTL_ENABLE |
- TIMER_CNTL_AUTORELOAD |
- TIMER_CNTL_DIV16;
- *CSR_TIMER4_LOAD = 0x2;
- *CSR_TIMER4_CLR = 0;
- *CSR_SA110_CNTL |= (1 << 13);
- }
- }
-}
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
deleted file mode 100644
index 40ba78e5782..00000000000
--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-footbridge/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 0d3846f3b60..80a1c5cc907 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -645,6 +645,32 @@ fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi)
#endif
}
+static void netwinder_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* Jump into the ROM */
+ soft_restart(0x41000000);
+ } else {
+ local_irq_disable();
+ local_fiq_disable();
+
+ /* open up the SuperIO chip */
+ outb(0x87, 0x370);
+ outb(0x87, 0x370);
+
+ /* aux function group 1 (logical device 7) */
+ outb(0x07, 0x370);
+ outb(0x07, 0x371);
+
+ /* set GP16 for WD-TIMER output */
+ outb(0xe6, 0x370);
+ outb(0x00, 0x371);
+
+ /* set a RED LED and toggle WD_TIMER for rebooting */
+ outb(0xc4, 0x338);
+ }
+}
+
MACHINE_START(NETWINDER, "Rebel-NetWinder")
/* Maintainer: Russell King/Rebel.com */
.atag_offset = 0x100,
@@ -656,4 +682,5 @@ MACHINE_START(NETWINDER, "Rebel-NetWinder")
.map_io = footbridge_map_io,
.init_irq = footbridge_init_irq,
.timer = &isa_timer,
+ .restart = netwinder_restart,
MACHINE_END
diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c
index f41dba39b32..e1e9990fa95 100644
--- a/arch/arm/mach-footbridge/personal.c
+++ b/arch/arm/mach-footbridge/personal.c
@@ -19,5 +19,6 @@ MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer")
.map_io = footbridge_map_io,
.init_irq = footbridge_init_irq,
.timer = &footbridge_timer,
+ .restart = footbridge_restart,
MACHINE_END
diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h
deleted file mode 100644
index 45371eb86fc..00000000000
--- a/arch/arm/mach-gemini/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index 51d4e44ab97..f8a2f6bb548 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -242,3 +242,8 @@ void __init h720x_map_io(void)
{
iotable_init(h720x_io_desc,ARRAY_SIZE(h720x_io_desc));
}
+
+void h720x_restart(char mode, const char *cmd)
+{
+ CPU_REG (PMU_BASE, PMU_STAT) |= PMU_WARMRESET;
+}
diff --git a/arch/arm/mach-h720x/common.h b/arch/arm/mach-h720x/common.h
index 7dd5fa604ef..2489537d33d 100644
--- a/arch/arm/mach-h720x/common.h
+++ b/arch/arm/mach-h720x/common.h
@@ -16,6 +16,7 @@
extern unsigned long h720x_gettimeoffset(void);
extern void __init h720x_init_irq(void);
extern void __init h720x_map_io(void);
+extern void h720x_restart(char, const char *);
#ifdef CONFIG_ARCH_H7202
extern struct sys_timer h7202_timer;
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 9886f19805f..5fdb20c855e 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -34,4 +34,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201")
.init_irq = h720x_init_irq,
.timer = &h7201_timer,
.dma_zone_size = SZ_256M,
+ .restart = h720x_restart,
MACHINE_END
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index 284a134819e..169673036c5 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -77,4 +77,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202")
.timer = &h7202_timer,
.init_machine = init_eval_h7202,
.dma_zone_size = SZ_256M,
+ .restart = h720x_restart,
MACHINE_END
diff --git a/arch/arm/mach-h720x/include/mach/system.h b/arch/arm/mach-h720x/include/mach/system.h
index a708d24ee46..16ac46e239a 100644
--- a/arch/arm/mach-h720x/include/mach/system.h
+++ b/arch/arm/mach-h720x/include/mach/system.h
@@ -24,10 +24,4 @@ static void arch_idle(void)
nop();
}
-
-static __inline__ void arch_reset(char mode, const char *cmd)
-{
- CPU_REG (PMU_BASE, PMU_STAT) |= PMU_WARMRESET;
-}
-
#endif
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
deleted file mode 100644
index 8520b4a4d4e..00000000000
--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-h720x/include/mach/vmalloc.h
- */
-
-#ifndef __ARCH_ARM_VMALLOC_H
-#define __ARCH_ARM_VMALLOC_H
-
-#define VMALLOC_END 0xd0000000UL
-
-#endif
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index 7e33fc94cd1..d8e2d0be64a 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -1,5 +1,6 @@
extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
extern void highbank_clocks_init(void);
+extern void highbank_restart(char, const char *);
extern void __iomem *scu_base_addr;
#ifdef CONFIG_DEBUG_HIGHBANK_UART
extern void highbank_lluart_map_io(void);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b82dcf08e74..804c4a55f80 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/unified.h>
@@ -72,6 +73,9 @@ static void __init highbank_map_io(void)
void highbank_set_cpu_jump(int cpu, void *jump_addr)
{
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(cpu);
+#endif
writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
@@ -140,6 +144,8 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
.map_io = highbank_map_io,
.init_irq = highbank_init_irq,
.timer = &highbank_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = highbank_init,
.dt_compat = highbank_match,
+ .restart = highbank_restart,
MACHINE_END
diff --git a/arch/arm/mach-highbank/include/mach/entry-macro.S b/arch/arm/mach-highbank/include/mach/entry-macro.S
index 73c11297509..a14f9e62ca9 100644
--- a/arch/arm/mach-highbank/include/mach/entry-macro.S
+++ b/arch/arm/mach-highbank/include/mach/entry-macro.S
@@ -1,5 +1,3 @@
-#include <asm/hardware/entry-macro-gic.S>
-
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-highbank/include/mach/system.h b/arch/arm/mach-highbank/include/mach/system.h
index 7e8192296ca..b1d8b5fbe37 100644
--- a/arch/arm/mach-highbank/include/mach/system.h
+++ b/arch/arm/mach-highbank/include/mach/system.h
@@ -21,6 +21,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-extern void arch_reset(char mode, const char *cmd);
-
#endif
diff --git a/arch/arm/mach-highbank/include/mach/vmalloc.h b/arch/arm/mach-highbank/include/mach/vmalloc.h
deleted file mode 100644
index 1969e954277..00000000000
--- a/arch/arm/mach-highbank/include/mach/vmalloc.h
+++ /dev/null
@@ -1 +0,0 @@
-#define VMALLOC_END 0xFEE00000UL
diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c
index 53f0c4c5ef1..82c27230d4a 100644
--- a/arch/arm/mach-highbank/system.c
+++ b/arch/arm/mach-highbank/system.c
@@ -20,7 +20,7 @@
#include "core.h"
#include "sysregs.h"
-void arch_reset(char mode, const char *cmd)
+void highbank_restart(char mode, const char *cmd)
{
if (mode == 'h')
hignbank_set_pwr_hard_reset();
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5f7f9c2a34a..0e6de366c64 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -10,11 +10,6 @@ config HAVE_IMX_MMDC
config HAVE_IMX_SRC
bool
-#
-# ARCH_MX31 and ARCH_MX35 are left for compatibility
-# Some usages assume that having one of them implies not having (e.g.) ARCH_MX2.
-# To easily distinguish good and reviewed from unreviewed usages new (and IMHO
-# more sensible) names are used: SOC_IMX31 and SOC_IMX35
config ARCH_MX1
bool
@@ -27,12 +22,6 @@ config ARCH_MX25
config MACH_MX27
bool
-config ARCH_MX31
- bool
-
-config ARCH_MX35
- bool
-
config SOC_IMX1
bool
select ARCH_MX1
@@ -72,7 +61,6 @@ config SOC_IMX31
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select ARCH_MXC_AUDMUX_V2
- select ARCH_MX31
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -82,7 +70,6 @@ config SOC_IMX35
select ARCH_MXC_IOMUX_V3
select ARCH_MXC_AUDMUX_V2
select HAVE_EPIT
- select ARCH_MX35
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -111,6 +98,7 @@ config MACH_SCB9328
config MACH_APF9328
bool "APF9328"
select SOC_IMX1
+ select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
help
Say Yes here if you are using the Armadeus APF9328 development board
@@ -145,7 +133,7 @@ config MACH_MX25_3DS
select IMX_HAVE_PLATFORM_MXC_NAND
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
bool "Support Eukrea CPUIMX25 Platform"
select SOC_IMX25
select IMX_HAVE_PLATFORM_FLEXCAN
@@ -161,7 +149,7 @@ config MACH_EUKREA_CPUIMX25
choice
prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX25
+ depends on MACH_EUKREA_CPUIMX25SD
default MACH_EUKREA_MBIMXSD25_BASEBOARD
config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -555,7 +543,7 @@ config MACH_MX35_3DS
Include support for MX35PDK platform. This includes specific
configurations for the board and its peripherals.
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
bool "Support Eukrea CPUIMX35 Platform"
select SOC_IMX35
select IMX_HAVE_PLATFORM_FLEXCAN
@@ -573,7 +561,7 @@ config MACH_EUKREA_CPUIMX35
choice
prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX35
+ depends on MACH_EUKREA_CPUIMX35SD
default MACH_EUKREA_MBIMXSD35_BASEBOARD
config MACH_EUKREA_MBIMXSD35_BASEBOARD
@@ -608,13 +596,14 @@ comment "i.MX6 family:"
config SOC_IMX6Q
bool "i.MX6 Quad support"
+ select ARM_CPU_SUSPEND if PM
select ARM_GIC
- select CACHE_L2X0
select CPU_V7
select HAVE_ARM_SCU
select HAVE_IMX_GPC
select HAVE_IMX_MMDC
select HAVE_IMX_SRC
+ select HAVE_SMP
select USE_OF
help
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index aba73214c2a..f5920c24f7d 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
# i.MX25 based machines
obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
# i.MX27 based machines
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o
# i.MX35 based machines
obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
@@ -70,4 +70,8 @@ AFLAGS_head-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
+
+ifeq ($(CONFIG_PM),y)
+obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
+endif
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index cfede5768aa..5f4d06af491 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -25,3 +25,6 @@ initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000
zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000
params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100
initrd_phys-$(CONFIG_SOC_IMX6Q) := 0x10800000
+
+dtb-$(CONFIG_SOC_IMX6Q) += imx6q-arm2.dtb \
+ imx6q-sabrelite.dtb
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
index 8116f119517..ac8238caecb 100644
--- a/arch/arm/mach-imx/clock-imx35.c
+++ b/arch/arm/mach-imx/clock-imx35.c
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = {
int __init mx35_clocks_init()
{
- unsigned int cgr2 = 3 << 26, cgr3 = 0;
+ unsigned int cgr2 = 3 << 26;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@ int __init mx35_clocks_init()
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
CCM_BASE + CCM_CGR1);
+ __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+ __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+ clk_enable(&iim_clk);
+ imx_print_silicon_rev("i.MX35", mx35_revision());
+ clk_disable(&iim_clk);
/*
* Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@ int __init mx35_clocks_init()
*/
if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
/* Additionally turn on UART1, SCC, and IIM clocks */
- cgr2 |= 3 << 16 | 3 << 4;
- cgr3 |= 3 << 2;
+ clk_enable(&iim_clk);
+ clk_enable(&uart1_clk);
+ clk_enable(&scc_clk);
}
- __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
- __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX35", mx35_revision());
- clk_disable(&iim_clk);
-
#ifdef CONFIG_MXC_USE_EPIT
epit_timer_init(&epit1_clk,
MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
index 613a1b993bf..9273c2a24b5 100644
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ b/arch/arm/mach-imx/clock-imx6q.c
@@ -1931,14 +1931,12 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
val |= 0x1 << BP_CLPCR_LPM;
val &= ~BM_CLPCR_VSTBY;
val &= ~BM_CLPCR_SBYOS;
- val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
break;
case STOP_POWER_OFF:
val |= 0x2 << BP_CLPCR_LPM;
val |= 0x3 << BP_CLPCR_STBY_COUNT;
val |= BM_CLPCR_VSTBY;
val |= BM_CLPCR_SBYOS;
- val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
break;
default:
return -EINVAL;
@@ -1953,14 +1951,17 @@ static struct map_desc imx6q_clock_desc[] = {
imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
};
+void __init imx6q_clock_map_io(void)
+{
+ iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
+}
+
int __init mx6q_clocks_init(void)
{
struct device_node *np;
void __iomem *base;
int i, irq;
- iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-
/* retrieve the freqency of fixed clocks from device tree */
for_each_compatible_node(np, NULL, "fixed-clock") {
u32 rate;
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/head-v7.S
index 6229efbc70c..7e49deb128a 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/head-v7.S
@@ -16,7 +16,6 @@
#include <asm/hardware/cache-l2x0.h>
.section ".text.head", "ax"
- __CPUINIT
/*
* The secondary kernel init calls v7_flush_dcache_all before it enables
@@ -33,6 +32,7 @@
*/
ENTRY(v7_invalidate_l1)
mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 2, r0, c0, c0, 0
mrc p15, 1, r0, c0, c0, 0
@@ -71,6 +71,7 @@ ENTRY(v7_secondary_startup)
ENDPROC(v7_secondary_startup)
#endif
+#ifdef CONFIG_PM
/*
* The following code is located into the .data section. This is to
* allow phys_l2x0_saved_regs to be accessed with a relative load
@@ -79,6 +80,7 @@ ENDPROC(v7_secondary_startup)
.data
.align
+#ifdef CONFIG_CACHE_L2X0
.macro pl310_resume
ldr r2, phys_l2x0_saved_regs
ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
@@ -88,12 +90,17 @@ ENDPROC(v7_secondary_startup)
str r1, [r0, #L2X0_CTRL] @ re-enable L2
.endm
+ .globl phys_l2x0_saved_regs
+phys_l2x0_saved_regs:
+ .long 0
+#else
+ .macro pl310_resume
+ .endm
+#endif
+
ENTRY(v7_cpu_resume)
bl v7_invalidate_l1
pl310_resume
b cpu_resume
ENDPROC(v7_cpu_resume)
-
- .globl phys_l2x0_saved_regs
-phys_l2x0_saved_regs:
- .long 0
+#endif
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 1e486e67dab..f4a63ee9e21 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/dm9000.h>
+#include <linux/i2c.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -41,6 +42,9 @@ static const int apf9328_pins[] __initconst = {
PB29_PF_UART2_RTS,
PB30_PF_UART2_TXD,
PB31_PF_UART2_RXD,
+ /* I2C */
+ PA15_PF_I2C_SDA,
+ PA16_PF_I2C_SCL,
};
/*
@@ -103,6 +107,10 @@ static const struct imxuart_platform_data uart1_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
+static const struct imxi2c_platform_data apf9328_i2c_data __initconst = {
+ .bitrate = 100000,
+};
+
static struct platform_device *devices[] __initdata = {
&apf9328_flash_device,
&dm9000x_device,
@@ -119,6 +127,8 @@ static void __init apf9328_init(void)
imx1_add_imx_uart0(NULL);
imx1_add_imx_uart1(&uart1_pdata);
+ imx1_add_imx_i2c(&apf9328_i2c_data);
+
platform_add_devices(devices, ARRAY_SIZE(devices));
}
@@ -139,4 +149,5 @@ MACHINE_START(APF9328, "Armadeus APF9328")
.handle_irq = imx1_handle_irq,
.timer = &apf9328_timer,
.init_machine = apf9328_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index c9a9cf67755..e4f426a0989 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -561,4 +561,5 @@ MACHINE_START(ARMADILLO5X0, "Armadillo-500")
.handle_irq = imx31_handle_irq,
.timer = &armadillo5x0_timer,
.init_machine = armadillo5x0_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c
index 313f62ddc1e..9a9897749dd 100644
--- a/arch/arm/mach-imx/mach-bug.c
+++ b/arch/arm/mach-imx/mach-bug.c
@@ -65,4 +65,5 @@ MACHINE_START(BUG, "BugLabs BUGBase")
.handle_irq = imx31_handle_irq,
.timer = &bug_timer,
.init_machine = bug_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index edb37305257..d085aea0870 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -318,4 +318,5 @@ MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
.handle_irq = imx27_handle_irq,
.timer = &eukrea_cpuimx27_timer,
.init_machine = eukrea_cpuimx27_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 66af2e8f7e5..8ecc872b254 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data
.bitrate = 100000,
};
+#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+ return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
static struct tsc2007_platform_data tsc2007_info = {
.model = 2007,
.x_plate_ohms = 180,
+ .get_pendown_state = tsc2007_get_pendown_state,
};
-#define TSC2007_IRQGPIO IMX_GPIO_NR(3, 2)
static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
{
I2C_BOARD_INFO("pcf8563", 0x51),
@@ -201,4 +207,5 @@ MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
.handle_irq = imx35_handle_irq,
.timer = &eukrea_cpuimx35_timer,
.init_machine = eukrea_cpuimx35_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index ab8fbcc472b..76a97a598b9 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -170,4 +170,5 @@ MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
.handle_irq = imx25_handle_irq,
.timer = &eukrea_cpuimx25_timer,
.init_machine = eukrea_cpuimx25_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 38eb9e45110..c2766ae02b4 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -282,4 +282,5 @@ MACHINE_START(IMX27_VISSTRIM_M10, "Vista Silicon Visstrim_M10")
.handle_irq = imx27_handle_irq,
.timer = &visstrim_m10_timer,
.init_machine = visstrim_m10_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index 7052155d055..c9d350c5dcc 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -78,4 +78,5 @@ MACHINE_START(IMX27IPCAM, "Freescale IMX27IPCAM")
.handle_irq = imx27_handle_irq,
.timer = &mx27ipcam_timer,
.init_machine = mx27ipcam_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 8d6a63521f1..1f45b918922 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -84,4 +84,5 @@ MACHINE_START(IMX27LITE, "LogicPD i.MX27LITE")
.handle_irq = imx27_handle_irq,
.timer = &mx27lite_timer,
.init_machine = mx27lite_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 8bf5fa34948..c2572810691 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,12 +10,17 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/micrel_phy.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
@@ -23,8 +28,57 @@
#include <mach/common.h>
#include <mach/hardware.h>
+void imx6q_restart(char mode, const char *cmd)
+{
+ struct device_node *np;
+ void __iomem *wdog_base;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-wdt");
+ wdog_base = of_iomap(np, 0);
+ if (!wdog_base)
+ goto soft;
+
+ imx_src_prepare_restart();
+
+ /* enable wdog */
+ writew_relaxed(1 << 2, wdog_base);
+ /* write twice to ensure the request will not get ignored */
+ writew_relaxed(1 << 2, wdog_base);
+
+ /* wait for reset to assert ... */
+ mdelay(500);
+
+ pr_err("Watchdog reset failed to assert reset\n");
+
+ /* delay to allow the serial port to show the message */
+ mdelay(50);
+
+soft:
+ /* we'll take a jump through zero as a poor second */
+ soft_restart(0);
+}
+
+/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
+static int ksz9021rn_phy_fixup(struct phy_device *phydev)
+{
+ /* min rx data delay */
+ phy_write(phydev, 0x0b, 0x8105);
+ phy_write(phydev, 0x0c, 0x0000);
+
+ /* max rx/tx clock delay, min rx/tx control delay */
+ phy_write(phydev, 0x0b, 0x8104);
+ phy_write(phydev, 0x0c, 0xf0f0);
+ phy_write(phydev, 0x0b, 0x104);
+
+ return 0;
+}
+
static void __init imx6q_init_machine(void)
{
+ if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
+ phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
+ ksz9021rn_phy_fixup);
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
imx6q_pm_init();
@@ -34,16 +88,18 @@ static void __init imx6q_map_io(void)
{
imx_lluart_map_io();
imx_scu_map_io();
+ imx6q_clock_map_io();
}
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx6q gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx6q_irq_match[] __initconst = {
@@ -70,7 +126,8 @@ static struct sys_timer imx6q_timer = {
};
static const char *imx6q_dt_compat[] __initdata = {
- "fsl,imx6q-sabreauto",
+ "fsl,imx6q-arm2",
+ "fsl,imx6q-sabrelite",
NULL,
};
@@ -81,4 +138,5 @@ DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad (Device Tree)")
.timer = &imx6q_timer,
.init_machine = imx6q_init_machine,
.dt_compat = imx6q_dt_compat,
+ .restart = imx6q_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 5f37f89e40f..fc78e8071cd 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -279,4 +279,5 @@ MACHINE_START(KZM_ARM11_01, "Kyoto Microcomputer Co., Ltd. KZM-ARM11-01")
.handle_irq = imx31_handle_irq,
.timer = &kzm_timer,
.init_machine = kzm_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index fc49785e734..97046088ff1 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -147,6 +147,7 @@ MACHINE_START(MX1ADS, "Freescale MX1ADS")
.handle_irq = imx1_handle_irq,
.timer = &mx1ads_timer,
.init_machine = mx1ads_init,
+ .restart = mxc_restart,
MACHINE_END
MACHINE_START(MXLADS, "Freescale MXLADS")
@@ -157,4 +158,5 @@ MACHINE_START(MXLADS, "Freescale MXLADS")
.handle_irq = imx1_handle_irq,
.timer = &mx1ads_timer,
.init_machine = mx1ads_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 25f84028d05..8d9f95514b1 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -312,4 +312,5 @@ MACHINE_START(MX21ADS, "Freescale i.MX21ADS")
.handle_irq = imx21_handle_irq,
.timer = &mx21ads_timer,
.init_machine = mx21ads_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 88dccf12224..f26734298aa 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -270,4 +270,5 @@ MACHINE_START(MX25_3DS, "Freescale MX25PDK (3DS)")
.handle_irq = imx25_handle_irq,
.timer = &mx25pdk_timer,
.init_machine = mx25pdk_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index ba232d79fa8..18f35816706 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -425,4 +425,5 @@ MACHINE_START(MX27_3DS, "Freescale MX27PDK")
.handle_irq = imx27_handle_irq,
.timer = &mx27pdk_timer,
.init_machine = mx27pdk_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 74dd5731eb6..0228d2e07fe 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -351,4 +351,5 @@ MACHINE_START(MX27ADS, "Freescale i.MX27ADS")
.handle_irq = imx27_handle_irq,
.timer = &mx27ads_timer,
.init_machine = mx27ads_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index b8c54b84018..89c33258639 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -492,7 +492,7 @@ static struct mc13xxx_platform_data mc13783_pdata = {
.regulators = mx31_3ds_regulators,
.num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
},
- .flags = MC13XXX_USE_TOUCHSCREEN,
+ .flags = MC13XXX_USE_TOUCHSCREEN | MC13XXX_USE_RTC,
};
/* SPI */
@@ -770,4 +770,5 @@ MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
.timer = &mx31_3ds_timer,
.init_machine = mx31_3ds_init,
.reserve = mx31_3ds_reserve,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 9cc1a49053b..4917aab0e25 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -542,4 +542,5 @@ MACHINE_START(MX31ADS, "Freescale MX31ADS")
.handle_irq = imx31_handle_irq,
.timer = &mx31ads_timer,
.init_machine = mx31ads_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 102ec99357c..02401bbd6d5 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -303,4 +303,5 @@ MACHINE_START(LILLY1131, "INCO startec LILLY-1131")
.handle_irq = imx31_handle_irq,
.timer = &mx31lilly_timer,
.init_machine = mx31lilly_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index 5366d2de18f..ef80751712e 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -287,4 +287,5 @@ MACHINE_START(MX31LITE, "LogicPD i.MX31 SOM")
.handle_irq = imx31_handle_irq,
.timer = &mx31lite_timer,
.init_machine = mx31lite_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index 93269150309..b95981dacb2 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -600,4 +600,5 @@ MACHINE_START(MX31MOBOARD, "EPFL Mobots mx31moboard")
.handle_irq = imx31_handle_irq,
.timer = &mx31moboard_timer,
.init_machine = mx31moboard_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 7a462025a0f..0af6c9c5b3f 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -224,4 +224,5 @@ MACHINE_START(MX35_3DS, "Freescale MX35PDK")
.handle_irq = imx35_handle_irq,
.timer = &mx35pdk_timer,
.init_machine = mx35_3ds_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 125c19643b0..8b3d3f07d89 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -274,4 +274,5 @@ MACHINE_START(MXT_TD60, "Maxtrack i-MXT TD60")
.handle_irq = imx27_handle_irq,
.timer = &mxt_td60_timer,
.init_machine = mxt_td60_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 26072f4b02e..d3b9c6b5edd 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -442,4 +442,5 @@ MACHINE_START(PCA100, "phyCARD-i.MX27")
.handle_irq = imx27_handle_irq,
.init_machine = pca100_init,
.timer = &pca100_timer,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index efd6b536ef6..d7e151669ed 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -696,4 +696,5 @@ MACHINE_START(PCM037, "Phytec Phycore pcm037")
.handle_irq = imx31_handle_irq,
.timer = &pcm037_timer,
.init_machine = pcm037_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index a17e9c7dfca..16f126da9f8 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -357,4 +357,5 @@ MACHINE_START(PCM038, "phyCORE-i.MX27")
.handle_irq = imx27_handle_irq,
.timer = &pcm038_timer,
.init_machine = pcm038_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 7366c2ae3ea..06dc106519a 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -425,4 +425,5 @@ MACHINE_START(PCM043, "Phytec Phycore pcm043")
.handle_irq = imx35_handle_irq,
.timer = &pcm043_timer,
.init_machine = pcm043_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index 4ff5faf102a..260621055b6 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -273,4 +273,5 @@ MACHINE_START(QONG, "Dave/DENX QongEVB-LITE")
.handle_irq = imx31_handle_irq,
.timer = &qong_timer,
.init_machine = qong_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index bb6e5b25d8d..cb9ceae2f64 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -144,4 +144,5 @@ MACHINE_START(SCB9328, "Synertronixx scb9328")
.handle_irq = imx1_handle_irq,
.timer = &scb9328_timer,
.init_machine = scb9328_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index 69092458f2d..033257e553e 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -322,4 +322,5 @@ MACHINE_START(VPR200, "VPR200")
.handle_irq = imx35_handle_irq,
.timer = &vpr200_timer,
.init_machine = vpr200_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9f0e82ec339..31807d2a8b7 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -33,29 +33,32 @@
static void imx3_idle(void)
{
unsigned long reg = 0;
- __asm__ __volatile__(
- /* disable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "bic %0, %0, #0x00001000\n"
- "bic %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- /* invalidate I cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c5, 0\n"
- /* clear and invalidate D cache */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c14, 0\n"
- /* WFI */
- "mov %0, #0\n"
- "mcr p15, 0, %0, c7, c0, 4\n"
- "nop\n" "nop\n" "nop\n" "nop\n"
- "nop\n" "nop\n" "nop\n"
- /* enable I and D cache */
- "mrc p15, 0, %0, c1, c0, 0\n"
- "orr %0, %0, #0x00001000\n"
- "orr %0, %0, #0x00000004\n"
- "mcr p15, 0, %0, c1, c0, 0\n"
- : "=r" (reg));
+
+ if (!need_resched())
+ __asm__ __volatile__(
+ /* disable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "bic %0, %0, #0x00001000\n"
+ "bic %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ /* invalidate I cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c5, 0\n"
+ /* clear and invalidate D cache */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c14, 0\n"
+ /* WFI */
+ "mov %0, #0\n"
+ "mcr p15, 0, %0, c7, c0, 4\n"
+ "nop\n" "nop\n" "nop\n" "nop\n"
+ "nop\n" "nop\n" "nop\n"
+ /* enable I and D cache */
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ "orr %0, %0, #0x00001000\n"
+ "orr %0, %0, #0x00000004\n"
+ "mcr p15, 0, %0, c1, c0, 0\n"
+ : "=r" (reg));
+ local_irq_enable();
}
static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
@@ -108,6 +111,7 @@ void imx3_init_l2x0(void)
l2x0_init(l2x0_base, 0x00030024, 0x00000000);
}
+#ifdef CONFIG_SOC_IMX31
static struct map_desc mx31_io_desc[] __initdata = {
imx_map_entry(MX31, X_MEMC, MT_DEVICE),
imx_map_entry(MX31, AVIC, MT_DEVICE_NONSHARED),
@@ -126,33 +130,11 @@ void __init mx31_map_io(void)
iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
}
-static struct map_desc mx35_io_desc[] __initdata = {
- imx_map_entry(MX35, X_MEMC, MT_DEVICE),
- imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
- imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
-};
-
-void __init mx35_map_io(void)
-{
- iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
-}
-
void __init imx31_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX31);
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
- imx_ioremap = imx3_ioremap;
-}
-
-void __init imx35_init_early(void)
-{
- mxc_set_cpu_type(MXC_CPU_MX35);
- mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
- mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
- imx_idle = imx3_idle;
+ pm_idle = imx3_idle;
imx_ioremap = imx3_ioremap;
}
@@ -161,11 +143,6 @@ void __init mx31_init_irq(void)
mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
}
-void __init mx35_init_irq(void)
-{
- mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
-}
-
static struct sdma_script_start_addrs imx31_to1_sdma_script __initdata = {
.per_2_per_addr = 1677,
};
@@ -199,6 +176,35 @@ void __init imx31_soc_init(void)
imx_add_imx_sdma("imx31-sdma", MX31_SDMA_BASE_ADDR, MX31_INT_SDMA, &imx31_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX31 */
+
+#ifdef CONFIG_SOC_IMX35
+static struct map_desc mx35_io_desc[] __initdata = {
+ imx_map_entry(MX35, X_MEMC, MT_DEVICE),
+ imx_map_entry(MX35, AVIC, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS1, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, AIPS2, MT_DEVICE_NONSHARED),
+ imx_map_entry(MX35, SPBA0, MT_DEVICE_NONSHARED),
+};
+
+void __init mx35_map_io(void)
+{
+ iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
+}
+
+void __init imx35_init_early(void)
+{
+ mxc_set_cpu_type(MXC_CPU_MX35);
+ mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
+ mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
+ pm_idle = imx3_idle;
+ imx_ioremap = imx3_ioremap;
+}
+
+void __init mx35_init_irq(void)
+{
+ mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
+}
static struct sdma_script_start_addrs imx35_to1_sdma_script __initdata = {
.ap_2_ap_addr = 642,
@@ -254,3 +260,4 @@ void __init imx35_soc_init(void)
imx_add_imx_sdma("imx35-sdma", MX35_SDMA_BASE_ADDR, MX35_INT_SDMA, &imx35_sdma_pdata);
}
+#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index f20f191d7cc..f7b0c2b1b90 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -64,7 +64,9 @@ void __init imx6q_pm_init(void)
* address of the data structure used by l2x0 core to save registers,
* and later restore the necessary ones in imx6q resume entry.
*/
+#ifdef CONFIG_CACHE_L2X0
phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
+#endif
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index 36cacbd0dcc..4bde04f99e3 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -14,19 +14,26 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/smp.h>
#include <asm/unified.h>
#define SRC_SCR 0x000
#define SRC_GPR1 0x020
+#define BP_SRC_SCR_WARM_RESET_ENABLE 0
#define BP_SRC_SCR_CORE1_RST 14
#define BP_SRC_SCR_CORE1_ENABLE 22
static void __iomem *src_base;
+#ifndef CONFIG_SMP
+#define cpu_logical_map(cpu) 0
+#endif
+
void imx_enable_cpu(int cpu, bool enable)
{
u32 mask, val;
+ cpu = cpu_logical_map(cpu);
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
@@ -35,15 +42,38 @@ void imx_enable_cpu(int cpu, bool enable)
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
+ cpu = cpu_logical_map(cpu);
writel_relaxed(BSYM(virt_to_phys(jump_addr)),
src_base + SRC_GPR1 + cpu * 8);
}
+void imx_src_prepare_restart(void)
+{
+ u32 val;
+
+ /* clear enable bits of secondary cores */
+ val = readl_relaxed(src_base + SRC_SCR);
+ val &= ~(0x7 << BP_SRC_SCR_CORE1_ENABLE);
+ writel_relaxed(val, src_base + SRC_SCR);
+
+ /* clear persistent entry register of primary core */
+ writel_relaxed(0, src_base + SRC_GPR1);
+}
+
void __init imx_src_init(void)
{
struct device_node *np;
+ u32 val;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-src");
src_base = of_iomap(np, 0);
WARN_ON(!src_base);
+
+ /*
+ * force warm reset sources to generate cold reset
+ * for a more reliable restart
+ */
+ val = readl_relaxed(src_base + SRC_SCR);
+ val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
+ writel_relaxed(val, src_base + SRC_SCR);
}
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index dfd18f3b50e..350e26636a0 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -6,6 +6,8 @@ config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
select CLKSRC_MMIO
select MIGHT_HAVE_PCI
+ select SERIAL_AMBA_PL010
+ select SERIAL_AMBA_PL010_CONSOLE
help
Include support for the ARM(R) Integrator/AP and
Integrator/PP2 platforms.
@@ -15,6 +17,8 @@ config ARCH_INTEGRATOR_CP
select ARCH_CINTEGRATOR
select ARM_TIMER_SP804
select PLAT_VERSATILE_CLCD
+ select SERIAL_AMBA_PL011
+ select SERIAL_AMBA_PL011_CONSOLE
help
Include support for the ARM(R) Integrator CP platform.
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h
index a08f9b0299d..899561d8db2 100644
--- a/arch/arm/mach-integrator/common.h
+++ b/arch/arm/mach-integrator/common.h
@@ -1,2 +1,3 @@
void integrator_init_early(void);
void integrator_reserve(void);
+void integrator_restart(char, const char *);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 4b38e13667a..019f0ab08f6 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -29,6 +29,7 @@
#include <mach/cm.h>
#include <asm/system.h>
#include <asm/leds.h>
+#include <asm/mach-types.h>
#include <asm/mach/time.h>
#include <asm/pgtable.h>
@@ -44,7 +45,6 @@ static struct amba_device rtc_device = {
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_RTCINT, NO_IRQ },
- .periphid = 0x00041030,
};
static struct amba_device uart0_device = {
@@ -58,7 +58,6 @@ static struct amba_device uart0_device = {
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_UARTINT0, NO_IRQ },
- .periphid = 0x0041010,
};
static struct amba_device uart1_device = {
@@ -72,7 +71,6 @@ static struct amba_device uart1_device = {
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_UARTINT1, NO_IRQ },
- .periphid = 0x0041010,
};
static struct amba_device kmi0_device = {
@@ -85,7 +83,6 @@ static struct amba_device kmi0_device = {
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_KMIINT0, NO_IRQ },
- .periphid = 0x00041050,
};
static struct amba_device kmi1_device = {
@@ -98,7 +95,6 @@ static struct amba_device kmi1_device = {
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_KMIINT1, NO_IRQ },
- .periphid = 0x00041050,
};
static struct amba_device *amba_devs[] __initdata = {
@@ -157,6 +153,19 @@ static int __init integrator_init(void)
{
int i;
+ /*
+ * The Integrator/AP lacks necessary AMBA PrimeCell IDs, so we need to
+ * hard-code them. The Integator/CP and forward have proper cell IDs.
+ * Else we leave them undefined to the bus driver can autoprobe them.
+ */
+ if (machine_is_integrator()) {
+ rtc_device.periphid = 0x00041030;
+ uart0_device.periphid = 0x00041010;
+ uart1_device.periphid = 0x00041010;
+ kmi0_device.periphid = 0x00041050;
+ kmi1_device.periphid = 0x00041050;
+ }
+
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
@@ -238,3 +247,11 @@ void __init integrator_reserve(void)
{
memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
}
+
+/*
+ * To reset, we hit the on-board reset register in the system FPGA
+ */
+void integrator_restart(char mode, const char *cmd)
+{
+ cm_control(CM_CTRL_RESET, CM_CTRL_RESET);
+}
diff --git a/arch/arm/mach-integrator/include/mach/system.h b/arch/arm/mach-integrator/include/mach/system.h
index e1551b8dab7..901514eba4a 100644
--- a/arch/arm/mach-integrator/include/mach/system.h
+++ b/arch/arm/mach-integrator/include/mach/system.h
@@ -21,8 +21,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/cm.h>
-
static inline void arch_idle(void)
{
/*
@@ -32,13 +30,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- cm_control(CM_CTRL_RESET, CM_CTRL_RESET);
-}
-
#endif
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
deleted file mode 100644
index 2f5a2bafb11..00000000000
--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-integrator/include/mach/vmalloc.h
- *
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index a1769f35a86..21a1d6cbef4 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -472,4 +472,5 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator")
.init_irq = ap_init_irq,
.timer = &ap_timer,
.init_machine = ap_init,
+ .restart = integrator_restart,
MACHINE_END
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 5de49c33e4d..a8b6aa6003f 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/string.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
#include <linux/amba/clcd.h>
@@ -499,4 +499,5 @@ MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
.init_irq = intcp_init_irq,
.timer = &cp_timer,
.init_machine = intcp_init,
+ .restart = integrator_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 52b7fab7ef6..07e9ff7adaf 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -10,6 +10,7 @@ void iop13xx_map_io(void);
void iop13xx_platform_init(void);
void iop13xx_add_tpmi_devices(void);
void iop13xx_init_irq(void);
+void iop13xx_restart(char, const char *);
/* CPUID CP6 R0 Page 0 */
static inline int iop13xx_cpu_id(void)
diff --git a/arch/arm/mach-iop13xx/include/mach/system.h b/arch/arm/mach-iop13xx/include/mach/system.h
index d0c66ef450a..1f31ed3f8ae 100644
--- a/arch/arm/mach-iop13xx/include/mach/system.h
+++ b/arch/arm/mach-iop13xx/include/mach/system.h
@@ -7,21 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <mach/iop13xx.h>
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Reset the internal bus (warning both cores are reset)
- */
- write_wdtcr(IOP_WDTCR_EN_ARM);
- write_wdtcr(IOP_WDTCR_EN);
- write_wdtsr(IOP13XX_WDTSR_WRITE_EN | IOP13XX_WDTCR_IB_RESET);
- write_wdtcr(0x1000);
-
- for(;;);
-}
diff --git a/arch/arm/mach-iop13xx/include/mach/vmalloc.h b/arch/arm/mach-iop13xx/include/mach/vmalloc.h
deleted file mode 100644
index c5345674034..00000000000
--- a/arch/arm/mach-iop13xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _VMALLOC_H_
-#define _VMALLOC_H_
-#define VMALLOC_END 0xfa000000UL
-#endif
diff --git a/arch/arm/mach-iop13xx/iq81340mc.c b/arch/arm/mach-iop13xx/iq81340mc.c
index 4cf2cc477ea..abaee883358 100644
--- a/arch/arm/mach-iop13xx/iq81340mc.c
+++ b/arch/arm/mach-iop13xx/iq81340mc.c
@@ -96,4 +96,5 @@ MACHINE_START(IQ81340MC, "Intel IQ81340MC")
.init_irq = iop13xx_init_irq,
.timer = &iq81340mc_timer,
.init_machine = iq81340mc_init,
+ .restart = iop13xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop13xx/iq81340sc.c b/arch/arm/mach-iop13xx/iq81340sc.c
index cd9e27499a1..690916a09dc 100644
--- a/arch/arm/mach-iop13xx/iq81340sc.c
+++ b/arch/arm/mach-iop13xx/iq81340sc.c
@@ -98,4 +98,5 @@ MACHINE_START(IQ81340SC, "Intel IQ81340SC")
.init_irq = iop13xx_init_irq,
.timer = &iq81340sc_timer,
.init_machine = iq81340sc_init,
+ .restart = iop13xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index a5b989728b9..daabb1fa6c2 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -606,3 +606,14 @@ static int __init iop13xx_init_adma_setup(char *str)
__setup("iop13xx_init_adma", iop13xx_init_adma_setup);
__setup("iop13xx_init_uart", iop13xx_init_uart_setup);
__setup("iop13xx_init_i2c", iop13xx_init_i2c_setup);
+
+void iop13xx_restart(char mode, const char *cmd)
+{
+ /*
+ * Reset the internal bus (warning both cores are reset)
+ */
+ write_wdtcr(IOP_WDTCR_EN_ARM);
+ write_wdtcr(IOP_WDTCR_EN);
+ write_wdtsr(IOP13XX_WDTSR_WRITE_EN | IOP13XX_WDTCR_IB_RESET);
+ write_wdtcr(0x1000);
+}
diff --git a/arch/arm/mach-iop32x/em7210.c b/arch/arm/mach-iop32x/em7210.c
index 4325055d4e1..24069e03fdc 100644
--- a/arch/arm/mach-iop32x/em7210.c
+++ b/arch/arm/mach-iop32x/em7210.c
@@ -208,4 +208,5 @@ MACHINE_START(EM7210, "Lanner EM7210")
.init_irq = iop32x_init_irq,
.timer = &em7210_timer,
.init_machine = em7210_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop32x/glantank.c b/arch/arm/mach-iop32x/glantank.c
index 0edc8802057..204e1d1cd76 100644
--- a/arch/arm/mach-iop32x/glantank.c
+++ b/arch/arm/mach-iop32x/glantank.c
@@ -212,4 +212,5 @@ MACHINE_START(GLANTANK, "GLAN Tank")
.init_irq = iop32x_init_irq,
.timer = &glantank_timer,
.init_machine = glantank_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop32x/include/mach/io.h b/arch/arm/mach-iop32x/include/mach/io.h
index 059c783ce0b..2d88264b986 100644
--- a/arch/arm/mach-iop32x/include/mach/io.h
+++ b/arch/arm/mach-iop32x/include/mach/io.h
@@ -13,15 +13,8 @@
#include <asm/hardware/iop3xx.h>
-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
- unsigned int mtype);
-extern void __iop3xx_iounmap(void __iomem *addr);
-
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
#define __mem_pci(a) (a)
-#define __arch_ioremap __iop3xx_ioremap
-#define __arch_iounmap __iop3xx_iounmap
-
#endif
diff --git a/arch/arm/mach-iop32x/include/mach/system.h b/arch/arm/mach-iop32x/include/mach/system.h
index a4b808fe0d8..4a88727bca9 100644
--- a/arch/arm/mach-iop32x/include/mach/system.h
+++ b/arch/arm/mach-iop32x/include/mach/system.h
@@ -7,28 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <asm/mach-types.h>
-#include <asm/hardware/iop3xx.h>
-#include <mach/n2100.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- local_irq_disable();
-
- if (machine_is_n2100()) {
- gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
- gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
- while (1)
- ;
- }
-
- *IOP3XX_PCSR = 0x30;
-
- /* Jump into ROM at address 0 */
- cpu_reset(0);
-}
diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h
deleted file mode 100644
index c4862d48e58..00000000000
--- a/arch/arm/mach-iop32x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe000000UL
diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c
index 9e7aaccfeba..3eb642af1cd 100644
--- a/arch/arm/mach-iop32x/iq31244.c
+++ b/arch/arm/mach-iop32x/iq31244.c
@@ -318,6 +318,7 @@ MACHINE_START(IQ31244, "Intel IQ31244")
.init_irq = iop32x_init_irq,
.timer = &iq31244_timer,
.init_machine = iq31244_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
/* There should have been an ep80219 machine identifier from the beginning.
@@ -332,4 +333,5 @@ MACHINE_START(EP80219, "Intel EP80219")
.init_irq = iop32x_init_irq,
.timer = &iq31244_timer,
.init_machine = iq31244_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c
index 53ea86f649d..2ec724b58a2 100644
--- a/arch/arm/mach-iop32x/iq80321.c
+++ b/arch/arm/mach-iop32x/iq80321.c
@@ -191,4 +191,5 @@ MACHINE_START(IQ80321, "Intel IQ80321")
.init_irq = iop32x_init_irq,
.timer = &iq80321_timer,
.init_machine = iq80321_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index d7269279968..6b6d5591244 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -291,6 +291,14 @@ static void n2100_power_off(void)
;
}
+static void n2100_restart(char mode, const char *cmd)
+{
+ gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
+ gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
+ while (1)
+ ;
+}
+
static struct timer_list power_button_poll_timer;
@@ -332,4 +340,5 @@ MACHINE_START(N2100, "Thecus N2100")
.init_irq = iop32x_init_irq,
.timer = &n2100_timer,
.init_machine = n2100_init_machine,
+ .restart = n2100_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop33x/include/mach/io.h b/arch/arm/mach-iop33x/include/mach/io.h
index 39e893e97c2..a8a66fc8fbd 100644
--- a/arch/arm/mach-iop33x/include/mach/io.h
+++ b/arch/arm/mach-iop33x/include/mach/io.h
@@ -13,15 +13,8 @@
#include <asm/hardware/iop3xx.h>
-extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
- unsigned int mtype);
-extern void __iop3xx_iounmap(void __iomem *addr);
-
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
#define __mem_pci(a) (a)
-#define __arch_ioremap __iop3xx_ioremap
-#define __arch_iounmap __iop3xx_iounmap
-
#endif
diff --git a/arch/arm/mach-iop33x/include/mach/system.h b/arch/arm/mach-iop33x/include/mach/system.h
index f192a34be07..4f98e765397 100644
--- a/arch/arm/mach-iop33x/include/mach/system.h
+++ b/arch/arm/mach-iop33x/include/mach/system.h
@@ -7,17 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <asm/hardware/iop3xx.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- *IOP3XX_PCSR = 0x30;
-
- /* Jump into ROM at address 0 */
- cpu_reset(0);
-}
diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h
deleted file mode 100644
index 48331dc2370..00000000000
--- a/arch/arm/mach-iop33x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe000000UL
diff --git a/arch/arm/mach-iop33x/iq80331.c b/arch/arm/mach-iop33x/iq80331.c
index 9e14ccc56f8..abce934f381 100644
--- a/arch/arm/mach-iop33x/iq80331.c
+++ b/arch/arm/mach-iop33x/iq80331.c
@@ -146,4 +146,5 @@ MACHINE_START(IQ80331, "Intel IQ80331")
.init_irq = iop33x_init_irq,
.timer = &iq80331_timer,
.init_machine = iq80331_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-iop33x/iq80332.c b/arch/arm/mach-iop33x/iq80332.c
index 09c899a2523..7513559e25b 100644
--- a/arch/arm/mach-iop33x/iq80332.c
+++ b/arch/arm/mach-iop33x/iq80332.c
@@ -146,4 +146,5 @@ MACHINE_START(IQ80332, "Intel IQ80332")
.init_irq = iop33x_init_irq,
.timer = &iq80332_timer,
.init_machine = iq80332_init_machine,
+ .restart = iop3xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index 24f0fe35f4a..81c45370a4e 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -515,3 +515,7 @@ void __init ixp2000_init_irq(void)
}
}
+void ixp2000_restart(char mode, const char *cmd)
+{
+ ixp2000_reg_wrb(IXP2000_RESET0, RSTALL);
+}
diff --git a/arch/arm/mach-ixp2000/enp2611.c b/arch/arm/mach-ixp2000/enp2611.c
index af9994537e0..ee525416f0d 100644
--- a/arch/arm/mach-ixp2000/enp2611.c
+++ b/arch/arm/mach-ixp2000/enp2611.c
@@ -259,6 +259,7 @@ MACHINE_START(ENP2611, "Radisys ENP-2611 PCI network processor board")
.init_irq = ixp2000_init_irq,
.timer = &enp2611_timer,
.init_machine = enp2611_init_machine,
+ .restart = ixp2000_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp2000/include/mach/platform.h b/arch/arm/mach-ixp2000/include/mach/platform.h
index 42182c79ed9..bb0f8dcf9ee 100644
--- a/arch/arm/mach-ixp2000/include/mach/platform.h
+++ b/arch/arm/mach-ixp2000/include/mach/platform.h
@@ -122,6 +122,7 @@ void ixp2000_map_io(void);
void ixp2000_uart_init(void);
void ixp2000_init_irq(void);
void ixp2000_init_time(unsigned long);
+void ixp2000_restart(char, const char *);
unsigned long ixp2000_gettimeoffset(void);
struct pci_sys_data;
diff --git a/arch/arm/mach-ixp2000/include/mach/system.h b/arch/arm/mach-ixp2000/include/mach/system.h
index de370992c84..a7fb08b2b8e 100644
--- a/arch/arm/mach-ixp2000/include/mach/system.h
+++ b/arch/arm/mach-ixp2000/include/mach/system.h
@@ -8,42 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- local_irq_disable();
-
- /*
- * Reset flash banking register so that we are pointing at
- * RedBoot bank.
- */
- if (machine_is_ixdp2401()) {
- ixp2000_reg_write(IXDP2X01_CPLD_FLASH_REG,
- ((0 >> IXDP2X01_FLASH_WINDOW_BITS)
- | IXDP2X01_CPLD_FLASH_INTERN));
- ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0xffffffff);
- }
-
- /*
- * On IXDP2801 we need to write this magic sequence to the CPLD
- * to cause a complete reset of the CPU and all external devices
- * and move the flash bank register back to 0.
- */
- if (machine_is_ixdp2801() || machine_is_ixdp28x5()) {
- unsigned long reset_reg = *IXDP2X01_CPLD_RESET_REG;
-
- reset_reg = 0x55AA0000 | (reset_reg & 0x0000FFFF);
- ixp2000_reg_write(IXDP2X01_CPLD_RESET_REG, reset_reg);
- ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0x80000000);
- }
-
- ixp2000_reg_wrb(IXP2000_RESET0, RSTALL);
-}
diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h
deleted file mode 100644
index 61c8dae24f9..00000000000
--- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-ixp2000/include/mach/vmalloc.h
- *
- * Author: Naeem Afzal <naeem.m.afzal@intel.com>
- *
- * Copyright 2002 Intel Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END 0xfb000000UL
diff --git a/arch/arm/mach-ixp2000/ixdp2400.c b/arch/arm/mach-ixp2000/ixdp2400.c
index f7dfd970014..f53e911ec94 100644
--- a/arch/arm/mach-ixp2000/ixdp2400.c
+++ b/arch/arm/mach-ixp2000/ixdp2400.c
@@ -176,5 +176,6 @@ MACHINE_START(IXDP2400, "Intel IXDP2400 Development Platform")
.init_irq = ixdp2400_init_irq,
.timer = &ixdp2400_timer,
.init_machine = ixdp2x00_init_machine,
+ .restart = ixp2000_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c
index d33bcac1ec9..a2e7c393e74 100644
--- a/arch/arm/mach-ixp2000/ixdp2800.c
+++ b/arch/arm/mach-ixp2000/ixdp2800.c
@@ -291,5 +291,6 @@ MACHINE_START(IXDP2800, "Intel IXDP2800 Development Platform")
.init_irq = ixdp2800_init_irq,
.timer = &ixdp2800_timer,
.init_machine = ixdp2x00_init_machine,
+ .restart = ixp2000_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c
index 61a28676b5b..7632beadabf 100644
--- a/arch/arm/mach-ixp2000/ixdp2x01.c
+++ b/arch/arm/mach-ixp2000/ixdp2x01.c
@@ -413,6 +413,35 @@ static void __init ixdp2x01_init_machine(void)
ixdp2x01_uart_init();
}
+static void ixdp2401_restart(char mode, const char *cmd)
+{
+ /*
+ * Reset flash banking register so that we are pointing at
+ * RedBoot bank.
+ */
+ ixp2000_reg_write(IXDP2X01_CPLD_FLASH_REG,
+ ((0 >> IXDP2X01_FLASH_WINDOW_BITS)
+ | IXDP2X01_CPLD_FLASH_INTERN));
+ ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0xffffffff);
+
+ ixp2000_restart(mode, cmd);
+}
+
+static void ixdp280x_restart(char mode, const char *cmd)
+{
+ /*
+ * On IXDP2801 we need to write this magic sequence to the CPLD
+ * to cause a complete reset of the CPU and all external devices
+ * and move the flash bank register back to 0.
+ */
+ unsigned long reset_reg = *IXDP2X01_CPLD_RESET_REG;
+
+ reset_reg = 0x55AA0000 | (reset_reg & 0x0000FFFF);
+ ixp2000_reg_write(IXDP2X01_CPLD_RESET_REG, reset_reg);
+ ixp2000_reg_wrb(IXDP2X01_CPLD_RESET_REG, 0x80000000);
+
+ ixp2000_restart(mode, cmd);
+}
#ifdef CONFIG_ARCH_IXDP2401
MACHINE_START(IXDP2401, "Intel IXDP2401 Development Platform")
@@ -422,6 +451,7 @@ MACHINE_START(IXDP2401, "Intel IXDP2401 Development Platform")
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
+ .restart = ixdp2401_restart,
MACHINE_END
#endif
@@ -433,6 +463,7 @@ MACHINE_START(IXDP2801, "Intel IXDP2801 Development Platform")
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
+ .restart = ixdp280x_restart,
MACHINE_END
/*
@@ -446,6 +477,7 @@ MACHINE_START(IXDP28X5, "Intel IXDP2805/2855 Development Platform")
.init_irq = ixdp2x01_init_irq,
.timer = &ixdp2x01_timer,
.init_machine = ixdp2x01_init_machine,
+ .restart = ixdp280x_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index a1bee33d183..0923bb905cc 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -444,3 +444,9 @@ void __init ixp23xx_sys_init(void)
*IXP23XX_EXP_UNIT_FUSE |= 0xf;
platform_add_devices(ixp23xx_devices, ARRAY_SIZE(ixp23xx_devices));
}
+
+void ixp23xx_restart(char mode, const char *cmd)
+{
+ /* Use on-chip reset capability */
+ *IXP23XX_RESET0 |= IXP23XX_RST_ALL;
+}
diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c
index 30dd31652e9..8f2487e1fc4 100644
--- a/arch/arm/mach-ixp23xx/espresso.c
+++ b/arch/arm/mach-ixp23xx/espresso.c
@@ -90,4 +90,5 @@ MACHINE_START(ESPRESSO, "IP Fabrics Double Espresso")
.timer = &ixp23xx_timer,
.atag_offset = 0x100,
.init_machine = espresso_init,
+ .restart = ixp23xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp23xx/include/mach/io.h b/arch/arm/mach-ixp23xx/include/mach/io.h
index a1749d0fd89..4ce4353b9f7 100644
--- a/arch/arm/mach-ixp23xx/include/mach/io.h
+++ b/arch/arm/mach-ixp23xx/include/mach/io.h
@@ -20,33 +20,4 @@
#define __io(p) ((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT))
#define __mem_pci(a) (a)
-static inline void __iomem *
-ixp23xx_ioremap(unsigned long addr, unsigned long size, unsigned int mtype)
-{
- if (addr >= IXP23XX_PCI_MEM_START &&
- addr <= IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE) {
- if (addr + size > IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE)
- return NULL;
-
- return (void __iomem *)
- ((addr - IXP23XX_PCI_MEM_START) + IXP23XX_PCI_MEM_VIRT);
- }
-
- return __arm_ioremap(addr, size, mtype);
-}
-
-static inline void
-ixp23xx_iounmap(void __iomem *addr)
-{
- if ((((u32)addr) >= IXP23XX_PCI_MEM_VIRT) &&
- (((u32)addr) < IXP23XX_PCI_MEM_VIRT + IXP23XX_PCI_MEM_SIZE))
- return;
-
- __iounmap(addr);
-}
-
-#define __arch_ioremap ixp23xx_ioremap
-#define __arch_iounmap ixp23xx_iounmap
-
-
#endif
diff --git a/arch/arm/mach-ixp23xx/include/mach/platform.h b/arch/arm/mach-ixp23xx/include/mach/platform.h
index db9d9416e5e..50de558e722 100644
--- a/arch/arm/mach-ixp23xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp23xx/include/mach/platform.h
@@ -34,6 +34,7 @@ struct pci_sys_data;
void ixp23xx_map_io(void);
void ixp23xx_init_irq(void);
void ixp23xx_sys_init(void);
+void ixp23xx_restart(char, const char *);
int ixp23xx_pci_setup(int, struct pci_sys_data *);
void ixp23xx_pci_preinit(void);
struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*);
diff --git a/arch/arm/mach-ixp23xx/include/mach/system.h b/arch/arm/mach-ixp23xx/include/mach/system.h
index 8920ff2dff1..277dda7334b 100644
--- a/arch/arm/mach-ixp23xx/include/mach/system.h
+++ b/arch/arm/mach-ixp23xx/include/mach/system.h
@@ -7,10 +7,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
static inline void arch_idle(void)
{
#if 0
@@ -18,16 +14,3 @@ static inline void arch_idle(void)
cpu_do_idle();
#endif
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- /* First try machine specific support */
- if (machine_is_ixdp2351()) {
- *IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_MAGIC;
- (void) *IXDP2351_CPLD_RESET1_REG;
- *IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_ENABLE;
- }
-
- /* Use on-chip reset capability */
- *IXP23XX_RESET0 |= IXP23XX_RST_ALL;
-}
diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
deleted file mode 100644
index 896c56a1c00..00000000000
--- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-ixp23xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2005 MontaVista Software, Inc.
- *
- * NPU mappings end at 0xf0000000 and we allocate 64MB for board
- * specific static I/O.
- */
-
-#define VMALLOC_END (0xec000000UL)
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index b3a57e0f341..5d5dd3e8d06 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -326,6 +326,17 @@ static void __init ixdp2351_init(void)
ixp23xx_sys_init();
}
+static void ixdp2351_restart(char mode, const char *cmd)
+{
+ /* First try machine specific support */
+
+ *IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_MAGIC;
+ (void) *IXDP2351_CPLD_RESET1_REG;
+ *IXDP2351_CPLD_RESET1_REG = IXDP2351_CPLD_RESET1_ENABLE;
+
+ ixp23xx_restart(mode, cmd);
+}
+
MACHINE_START(IXDP2351, "Intel IXDP2351 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.map_io = ixdp2351_map_io,
@@ -333,4 +344,5 @@ MACHINE_START(IXDP2351, "Intel IXDP2351 Development Platform")
.timer = &ixp23xx_timer,
.atag_offset = 0x100,
.init_machine = ixdp2351_init,
+ .restart = ixdp2351_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c
index 8f4dcbba902..377283fc658 100644
--- a/arch/arm/mach-ixp23xx/roadrunner.c
+++ b/arch/arm/mach-ixp23xx/roadrunner.c
@@ -177,4 +177,5 @@ MACHINE_START(ROADRUNNER, "ADI Engineering RoadRunner Development Platform")
.timer = &ixp23xx_timer,
.atag_offset = 0x100,
.init_machine = roadrunner_init,
+ .restart = ixp23xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 37609a22c45..a7277ad470a 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -172,6 +172,7 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
/*
@@ -190,6 +191,7 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index b86a0055ab9..3841ab4146b 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/serial.h>
-#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
@@ -403,18 +402,9 @@ void __init ixp4xx_sys_init(void)
/*
* sched_clock()
*/
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
-{
- u32 cyc = *IXP4XX_OSTS;
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace ixp4xx_update_sched_clock(void)
+static u32 notrace ixp4xx_read_sched_clock(void)
{
- u32 cyc = *IXP4XX_OSTS;
- update_sched_clock(&cd, cyc, (u32)~0);
+ return *IXP4XX_OSTS;
}
/*
@@ -430,7 +420,7 @@ unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
EXPORT_SYMBOL(ixp4xx_timer_freq);
static void __init ixp4xx_clocksource_init(void)
{
- init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
+ setup_sched_clock(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq);
clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
ixp4xx_clocksource_read);
@@ -501,3 +491,23 @@ static void __init ixp4xx_clockevent_init(void)
clockevents_register_device(&clockevent_ixp4xx);
}
+
+void ixp4xx_restart(char mode, const char *cmd)
+{
+ if ( 1 && mode == 's') {
+ /* Jump into ROM at address 0 */
+ soft_restart(0);
+ } else {
+ /* Use on-chip reset capability */
+
+ /* set the "key" register to enable access to
+ * "timer" and "enable" registers
+ */
+ *IXP4XX_OSWK = IXP4XX_WDT_KEY;
+
+ /* write 0 to the timer register for an immediate reset */
+ *IXP4XX_OSWT = 0;
+
+ *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
+ }
+}
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 81dfec31842..a74f86ce8bc 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -117,6 +117,7 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
@@ -132,6 +133,7 @@ MACHINE_START(IXDPG425, "Intel IXDPG425")
.timer = &ixp4xx_timer,
.atag_offset = 0x100,
.init_machine = coyote_init,
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 8837fbca27c..67be177b336 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -286,4 +286,5 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 2887c3578c1..6d5818285af 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -277,5 +277,6 @@ MACHINE_START(FSG, "Freecom FSG-3")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index d69d1b053bb..7ecf9b28f1c 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -104,5 +104,6 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index bf6678d1a92..c0e3d69a8ae 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -504,4 +504,5 @@ MACHINE_START(GORAMO_MLR, "MultiLink")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index aa029fc1914..a23f8939145 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -172,6 +172,7 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index e824c02c825..df9250bbf13 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -125,6 +125,7 @@ extern void ixp4xx_init_irq(void);
extern void ixp4xx_sys_init(void);
extern void ixp4xx_timer_init(void);
extern struct sys_timer ixp4xx_timer;
+extern void ixp4xx_restart(char, const char *);
extern void ixp4xx_pci_preinit(void);
struct pci_sys_data;
extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
diff --git a/arch/arm/mach-ixp4xx/include/mach/system.h b/arch/arm/mach-ixp4xx/include/mach/system.h
index 54c0af7fa2d..140a9bef446 100644
--- a/arch/arm/mach-ixp4xx/include/mach/system.h
+++ b/arch/arm/mach-ixp4xx/include/mach/system.h
@@ -8,9 +8,6 @@
* published by the Free Software Foundation.
*
*/
-
-#include <mach/hardware.h>
-
static inline void arch_idle(void)
{
/* ixp4xx does not implement the XScale PWRMODE register,
@@ -20,25 +17,3 @@ static inline void arch_idle(void)
cpu_do_idle();
#endif
}
-
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- if ( 1 && mode == 's') {
- /* Jump into ROM at address 0 */
- cpu_reset(0);
- } else {
- /* Use on-chip reset capability */
-
- /* set the "key" register to enable access to
- * "timer" and "enable" registers
- */
- *IXP4XX_OSWK = IXP4XX_WDT_KEY;
-
- /* write 0 to the timer register for an immediate reset */
- *IXP4XX_OSWT = 0;
-
- *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
- }
-}
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
deleted file mode 100644
index 9bcd64d5985..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/vmalloc.h
- */
-#define VMALLOC_END (0xff000000UL)
-
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index f235f829dfa..8a38b39999f 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -261,6 +261,7 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index de716fa1aab..1010eb7b008 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -321,4 +321,5 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index ac81ccb26bf..aa355c360d5 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -307,4 +307,5 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c
index 3b6a81a696f..0940869fcfd 100644
--- a/arch/arm/mach-ixp4xx/omixp-setup.c
+++ b/arch/arm/mach-ixp4xx/omixp-setup.c
@@ -246,6 +246,7 @@ MACHINE_START(DEVIXP, "Omicron DEVIXP")
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = omixp_init,
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
@@ -259,6 +260,7 @@ MACHINE_START(MICCPT, "Omicron MICCPT")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
@@ -269,5 +271,6 @@ MACHINE_START(MIC256, "Omicron MIC256")
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = omixp_init,
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 27e469ef452..9dec2068329 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -244,4 +244,5 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index b14144b967a..5ac0f0a0fd8 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -105,5 +105,6 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
+ .restart = ixp4xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/addr-map.c b/arch/arm/mach-kirkwood/addr-map.c
index 8d03bcef518..e9a7180863d 100644
--- a/arch/arm/mach-kirkwood/addr-map.c
+++ b/arch/arm/mach-kirkwood/addr-map.c
@@ -13,12 +13,12 @@
#include <linux/mbus.h>
#include <linux/io.h>
#include <mach/hardware.h>
+#include <plat/addr-map.h>
#include "common.h"
/*
* Generic Address Decode Windows bit settings
*/
-#define TARGET_DDR 0
#define TARGET_DEV_BUS 1
#define TARGET_SRAM 3
#define TARGET_PCIE 4
@@ -36,118 +36,55 @@
#define ATTR_SRAM 0x01
/*
- * Helpers to get DDR bank info
+ * Description of the windows needed by the platform code
*/
-#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
-#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
-
-/*
- * CPU Address Decode Windows registers
- */
-#define WIN_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
-#define WIN_CTRL_OFF 0x0000
-#define WIN_BASE_OFF 0x0004
-#define WIN_REMAP_LO_OFF 0x0008
-#define WIN_REMAP_HI_OFF 0x000c
-
-
-struct mbus_dram_target_info kirkwood_mbus_dram_info;
-
-static int __init cpu_win_can_remap(int win)
-{
- if (win < 4)
- return 1;
-
- return 0;
-}
-
-static void __init setup_cpu_win(int win, u32 base, u32 size,
- u8 target, u8 attr, int remap)
-{
- void __iomem *addr = (void __iomem *)WIN_OFF(win);
- u32 ctrl;
-
- base &= 0xffff0000;
- ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1;
-
- writel(base, addr + WIN_BASE_OFF);
- writel(ctrl, addr + WIN_CTRL_OFF);
- if (cpu_win_can_remap(win)) {
- if (remap < 0)
- remap = base;
-
- writel(remap & 0xffff0000, addr + WIN_REMAP_LO_OFF);
- writel(0, addr + WIN_REMAP_HI_OFF);
- }
-}
-
-void __init kirkwood_setup_cpu_mbus(void)
-{
- void __iomem *addr;
- int i;
- int cs;
+static struct __initdata orion_addr_map_cfg addr_map_cfg = {
+ .num_wins = 8,
+ .remappable_wins = 4,
+ .bridge_virt_base = BRIDGE_VIRT_BASE,
+};
+static const struct __initdata orion_addr_map_info addr_map_info[] = {
/*
- * First, disable and clear windows.
+ * Windows for PCIe IO+MEM space.
*/
- for (i = 0; i < 8; i++) {
- addr = (void __iomem *)WIN_OFF(i);
-
- writel(0, addr + WIN_BASE_OFF);
- writel(0, addr + WIN_CTRL_OFF);
- if (cpu_win_can_remap(i)) {
- writel(0, addr + WIN_REMAP_LO_OFF);
- writel(0, addr + WIN_REMAP_HI_OFF);
- }
- }
-
+ { 0, KIRKWOOD_PCIE_IO_PHYS_BASE, KIRKWOOD_PCIE_IO_SIZE,
+ TARGET_PCIE, ATTR_PCIE_IO, KIRKWOOD_PCIE_IO_BUS_BASE
+ },
+ { 1, KIRKWOOD_PCIE_MEM_PHYS_BASE, KIRKWOOD_PCIE_MEM_SIZE,
+ TARGET_PCIE, ATTR_PCIE_MEM, KIRKWOOD_PCIE_MEM_BUS_BASE
+ },
+ { 2, KIRKWOOD_PCIE1_IO_PHYS_BASE, KIRKWOOD_PCIE1_IO_SIZE,
+ TARGET_PCIE, ATTR_PCIE1_IO, KIRKWOOD_PCIE1_IO_BUS_BASE
+ },
+ { 3, KIRKWOOD_PCIE1_MEM_PHYS_BASE, KIRKWOOD_PCIE1_MEM_SIZE,
+ TARGET_PCIE, ATTR_PCIE1_MEM, KIRKWOOD_PCIE1_MEM_BUS_BASE
+ },
/*
- * Setup windows for PCIe IO+MEM space.
+ * Window for NAND controller.
*/
- setup_cpu_win(0, KIRKWOOD_PCIE_IO_PHYS_BASE, KIRKWOOD_PCIE_IO_SIZE,
- TARGET_PCIE, ATTR_PCIE_IO, KIRKWOOD_PCIE_IO_BUS_BASE);
- setup_cpu_win(1, KIRKWOOD_PCIE_MEM_PHYS_BASE, KIRKWOOD_PCIE_MEM_SIZE,
- TARGET_PCIE, ATTR_PCIE_MEM, KIRKWOOD_PCIE_MEM_BUS_BASE);
- setup_cpu_win(2, KIRKWOOD_PCIE1_IO_PHYS_BASE, KIRKWOOD_PCIE1_IO_SIZE,
- TARGET_PCIE, ATTR_PCIE1_IO, KIRKWOOD_PCIE1_IO_BUS_BASE);
- setup_cpu_win(3, KIRKWOOD_PCIE1_MEM_PHYS_BASE, KIRKWOOD_PCIE1_MEM_SIZE,
- TARGET_PCIE, ATTR_PCIE1_MEM, KIRKWOOD_PCIE1_MEM_BUS_BASE);
-
+ { 4, KIRKWOOD_NAND_MEM_PHYS_BASE, KIRKWOOD_NAND_MEM_SIZE,
+ TARGET_DEV_BUS, ATTR_DEV_NAND, -1
+ },
/*
- * Setup window for NAND controller.
+ * Window for SRAM.
*/
- setup_cpu_win(4, KIRKWOOD_NAND_MEM_PHYS_BASE, KIRKWOOD_NAND_MEM_SIZE,
- TARGET_DEV_BUS, ATTR_DEV_NAND, -1);
+ { 5, KIRKWOOD_SRAM_PHYS_BASE, KIRKWOOD_SRAM_SIZE,
+ TARGET_SRAM, ATTR_SRAM, -1
+ },
+ /* End marker */
+ { -1, 0, 0, 0, 0, 0 }
+};
+void __init kirkwood_setup_cpu_mbus(void)
+{
/*
- * Setup window for SRAM.
+ * Disable, clear and configure windows.
*/
- setup_cpu_win(5, KIRKWOOD_SRAM_PHYS_BASE, KIRKWOOD_SRAM_SIZE,
- TARGET_SRAM, ATTR_SRAM, -1);
+ orion_config_wins(&addr_map_cfg, addr_map_info);
/*
* Setup MBUS dram target info.
*/
- kirkwood_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
-
- addr = (void __iomem *)DDR_WINDOW_CPU_BASE;
-
- for (i = 0, cs = 0; i < 4; i++) {
- u32 base = readl(addr + DDR_BASE_CS_OFF(i));
- u32 size = readl(addr + DDR_SIZE_CS_OFF(i));
-
- /*
- * Chip select enabled?
- */
- if (size & 1) {
- struct mbus_dram_window *w;
-
- w = &kirkwood_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- w->base = base & 0xffff0000;
- w->size = (size | 0x0000ffff) + 1;
- }
- }
- kirkwood_mbus_dram_info.num_cs = cs;
+ orion_setup_cpu_mbus_target(&addr_map_cfg, DDR_WINDOW_CPU_BASE);
}
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index f3248cfbe51..cc15426787b 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
-#include <linux/mbus.h>
#include <linux/ata_platform.h>
#include <linux/mtd/nand.h>
#include <linux/dma-mapping.h>
@@ -30,6 +29,7 @@
#include <plat/orion_nand.h>
#include <plat/common.h>
#include <plat/time.h>
+#include <plat/addr-map.h>
#include "common.h"
/*****************************************************************************
@@ -73,8 +73,7 @@ unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
void __init kirkwood_ehci_init(void)
{
kirkwood_clk_ctrl |= CGC_USB0;
- orion_ehci_init(&kirkwood_mbus_dram_info,
- USB_PHYS_BASE, IRQ_KIRKWOOD_USB);
+ orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB);
}
@@ -85,7 +84,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
kirkwood_clk_ctrl |= CGC_GE0;
- orion_ge00_init(eth_data, &kirkwood_mbus_dram_info,
+ orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
}
@@ -99,7 +98,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
kirkwood_clk_ctrl |= CGC_GE1;
- orion_ge01_init(eth_data, &kirkwood_mbus_dram_info,
+ orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
}
@@ -178,8 +177,7 @@ void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
if (sata_data->n_ports > 1)
kirkwood_clk_ctrl |= CGC_SATA1;
- orion_sata_init(sata_data, &kirkwood_mbus_dram_info,
- SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
+ orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
}
@@ -221,7 +219,6 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
mvsdio_data->clock = 100000000;
else
mvsdio_data->clock = 200000000;
- mvsdio_data->dram = &kirkwood_mbus_dram_info;
kirkwood_clk_ctrl |= CGC_SDIO;
kirkwood_sdio.dev.platform_data = mvsdio_data;
platform_device_register(&kirkwood_sdio);
@@ -285,8 +282,7 @@ static void __init kirkwood_xor0_init(void)
{
kirkwood_clk_ctrl |= CGC_XOR0;
- orion_xor0_init(&kirkwood_mbus_dram_info,
- XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
+ orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
}
@@ -364,7 +360,6 @@ static struct resource kirkwood_i2s_resources[] = {
};
static struct kirkwood_asoc_platform_data kirkwood_i2s_data = {
- .dram = &kirkwood_mbus_dram_info,
.burst = 128,
};
@@ -430,6 +425,8 @@ static char * __init kirkwood_id(void)
} else if (dev == MV88F6282_DEV_ID) {
if (rev == MV88F6282_REV_A0)
return "MV88F6282-Rev-A0";
+ else if (rev == MV88F6282_REV_A1)
+ return "MV88F6282-Rev-A1";
else
return "MV88F6282-Rev-Unsupported";
} else {
@@ -534,3 +531,19 @@ static int __init kirkwood_clock_gate(void)
return 0;
}
late_initcall(kirkwood_clock_gate);
+
+void kirkwood_restart(char mode, const char *cmd)
+{
+ /*
+ * Enable soft reset to assert RSTOUTn.
+ */
+ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+ /*
+ * Assert soft reset.
+ */
+ writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+ while (1)
+ ;
+}
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index b9b0f0968a3..9071a397136 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -30,7 +30,6 @@ void kirkwood_init(void);
void kirkwood_init_early(void);
void kirkwood_init_irq(void);
-extern struct mbus_dram_target_info kirkwood_mbus_dram_info;
void kirkwood_setup_cpu_mbus(void);
void kirkwood_enable_pcie(void);
@@ -50,6 +49,7 @@ void kirkwood_uart1_init(void);
void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
void kirkwood_audio_init(void);
+void kirkwood_restart(char, const char *);
extern int kirkwood_tclk;
extern struct sys_timer kirkwood_timer;
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index f457e07a65f..6e1bac929ab 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -227,4 +227,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index ff4c21c1f92..d9335937959 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -103,4 +103,5 @@ MACHINE_START(DB88F6281_BP, "Marvell DB-88F6281-BP Development Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/dockstar-setup.c b/arch/arm/mach-kirkwood/dockstar-setup.c
index e4d199b2b1e..61d9a552a05 100644
--- a/arch/arm/mach-kirkwood/dockstar-setup.c
+++ b/arch/arm/mach-kirkwood/dockstar-setup.c
@@ -108,4 +108,5 @@ MACHINE_START(DOCKSTAR, "Seagate FreeAgent DockStar")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
index 6c40f784b51..bdaed3867d1 100644
--- a/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -127,4 +127,5 @@ MACHINE_START(GURUPLUG, "Marvell GuruPlug Reference Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/include/mach/io.h b/arch/arm/mach-kirkwood/include/mach/io.h
index 1aaddc364f2..49dd0cb5e16 100644
--- a/arch/arm/mach-kirkwood/include/mach/io.h
+++ b/arch/arm/mach-kirkwood/include/mach/io.h
@@ -19,31 +19,6 @@ static inline void __iomem *__io(unsigned long addr)
+ KIRKWOOD_PCIE_IO_VIRT_BASE);
}
-static inline void __iomem *
-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
-{
- void __iomem *retval;
- unsigned long offs = paddr - KIRKWOOD_REGS_PHYS_BASE;
- if (mtype == MT_DEVICE && size && offs < KIRKWOOD_REGS_SIZE &&
- size <= KIRKWOOD_REGS_SIZE && offs + size <= KIRKWOOD_REGS_SIZE) {
- retval = (void __iomem *)KIRKWOOD_REGS_VIRT_BASE + offs;
- } else {
- retval = __arm_ioremap(paddr, size, mtype);
- }
-
- return retval;
-}
-
-static inline void
-__arch_iounmap(void __iomem *addr)
-{
- if (addr < (void __iomem *)KIRKWOOD_REGS_VIRT_BASE ||
- addr >= (void __iomem *)(KIRKWOOD_REGS_VIRT_BASE + KIRKWOOD_REGS_SIZE))
- __iounmap(addr);
-}
-
-#define __arch_ioremap __arch_ioremap
-#define __arch_iounmap __arch_iounmap
#define __io(a) __io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 010bdeb4ac5..fede3d503ef 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -135,4 +135,5 @@
#define MV88F6282_DEV_ID 0x6282
#define MV88F6282_REV_A0 0
+#define MV88F6282_REV_A1 1
#endif
diff --git a/arch/arm/mach-kirkwood/include/mach/system.h b/arch/arm/mach-kirkwood/include/mach/system.h
index 7568e95d279..5fddde002b5 100644
--- a/arch/arm/mach-kirkwood/include/mach/system.h
+++ b/arch/arm/mach-kirkwood/include/mach/system.h
@@ -9,28 +9,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/bridge-regs.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Enable soft reset to assert RSTOUTn.
- */
- writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
- /*
- * Assert soft reset.
- */
- writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
- while (1)
- ;
-}
-
-
#endif
diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h
deleted file mode 100644
index bf162ca3d2c..00000000000
--- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe800000UL
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index cc431fa22cc..0c6ad63f10c 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -10,7 +10,6 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/mbus.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <plat/mpp.h>
diff --git a/arch/arm/mach-kirkwood/mpp.h b/arch/arm/mach-kirkwood/mpp.h
index ac787957e2d..e8fda45c073 100644
--- a/arch/arm/mach-kirkwood/mpp.h
+++ b/arch/arm/mach-kirkwood/mpp.h
@@ -102,6 +102,7 @@
#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 1, 0, 1, 1, 1, 1 )
#define MPP12_GPO MPP( 12, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+#define MPP12_GPIO MPP( 12, 0x0, 1, 1, 0, 0, 0, 1, 0 )
#define MPP12_SD_CLK MPP( 12, 0x1, 0, 1, 1, 1, 1, 1, 1 )
#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 1, 0, 0, 0, 0, 1 )
#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 1, 0, 0, 0, 0, 1 )
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 9a1e917352f..85f6169c248 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -169,4 +169,5 @@ MACHINE_START(MV88F6281GTW_GE, "Marvell 88F6281 GTW GE Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 8849bcc7328..e6bba01bae3 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -264,6 +264,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -275,6 +276,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -286,5 +288,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index 1ba12c4dff8..31ae8de34e9 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -405,6 +405,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -416,5 +417,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index 5660ca6c3d8..01f8c899288 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -220,6 +220,7 @@ MACHINE_START(OPENRD_BASE, "Marvell OpenRD Base Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -232,6 +233,7 @@ MACHINE_START(OPENRD_CLIENT, "Marvell OpenRD Client Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -244,5 +246,6 @@ MACHINE_START(OPENRD_ULTIMATE, "Marvell OpenRD Ultimate Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 74b992d810e..fb451bfe478 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,12 +11,12 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/mbus.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
#include <mach/bridge-regs.h>
+#include <plat/addr-map.h>
#include "common.h"
void kirkwood_enable_pcie(void)
@@ -208,7 +208,7 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
*/
orion_pcie_set_local_bus_nr(pp->base, sys->busnr);
- orion_pcie_setup(pp->base, &kirkwood_mbus_dram_info);
+ orion_pcie_setup(pp->base);
return 1;
}
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index 6663869773a..fd2c9c8b683 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -85,4 +85,5 @@ MACHINE_START(RD88F6192_NAS, "Marvell RD-88F6192-NAS Development Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index 66b3c05e37a..ef922079348 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -121,4 +121,5 @@ MACHINE_START(RD88F6281, "Marvell RD-88F6281 Reference Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/sheevaplug-setup.c b/arch/arm/mach-kirkwood/sheevaplug-setup.c
index 8b102d62e82..4ea70e5f713 100644
--- a/arch/arm/mach-kirkwood/sheevaplug-setup.c
+++ b/arch/arm/mach-kirkwood/sheevaplug-setup.c
@@ -107,7 +107,7 @@ static void __init sheevaplug_init(void)
kirkwood_init();
/* setup gpio pin select */
- if (machine_is_sheeva_esata())
+ if (machine_is_esata_sheevaplug())
kirkwood_mpp_conf(sheeva_esata_mpp_config);
else
kirkwood_mpp_conf(sheevaplug_mpp_config);
@@ -123,11 +123,11 @@ static void __init sheevaplug_init(void)
kirkwood_ge00_init(&sheevaplug_ge00_data);
/* honor lower power consumption for plugs with out eSATA */
- if (machine_is_sheeva_esata())
+ if (machine_is_esata_sheevaplug())
kirkwood_sata_init(&sheeva_esata_sata_data);
/* enable sd wp and sd cd on plugs with esata */
- if (machine_is_sheeva_esata())
+ if (machine_is_esata_sheevaplug())
kirkwood_sdio_init(&sheeva_esata_mvsdio_data);
else
kirkwood_sdio_init(&sheevaplug_mvsdio_data);
@@ -144,6 +144,7 @@ MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
@@ -155,5 +156,6 @@ MACHINE_START(ESATA_SHEEVAPLUG, "Marvell eSATA SheevaPlug Reference Board")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index ea104fb5ec3..966b2b3bb81 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -207,4 +207,5 @@ MACHINE_START(T5325, "HP t5325 Thin Client")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
index 262c034836d..73e2b6ca956 100644
--- a/arch/arm/mach-kirkwood/ts219-setup.c
+++ b/arch/arm/mach-kirkwood/ts219-setup.c
@@ -138,4 +138,5 @@ MACHINE_START(TS219, "QNAP TS-119/TS-219")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index b68f5b4a9ec..5bbca268044 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -182,4 +182,5 @@ MACHINE_START(TS41X, "QNAP TS-41x")
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
+ .restart = kirkwood_restart,
MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c
index a91f99d265a..255502ddd87 100644
--- a/arch/arm/mach-ks8695/board-acs5k.c
+++ b/arch/arm/mach-ks8695/board-acs5k.c
@@ -228,4 +228,5 @@ MACHINE_START(ACS5K, "Brivo Systems LLC ACS-5000 Master board")
.init_irq = ks8695_init_irq,
.init_machine = acs5k_init,
.timer = &ks8695_timer,
+ .restart = ks8695_restart,
MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-dsm320.c b/arch/arm/mach-ks8695/board-dsm320.c
index d24bcef2e2d..e0d36cef2c5 100644
--- a/arch/arm/mach-ks8695/board-dsm320.c
+++ b/arch/arm/mach-ks8695/board-dsm320.c
@@ -126,4 +126,5 @@ MACHINE_START(DSM320, "D-Link DSM-320 Wireless Media Player")
.init_irq = ks8695_init_irq,
.init_machine = dsm320_init,
.timer = &ks8695_timer,
+ .restart = ks8695_restart,
MACHINE_END
diff --git a/arch/arm/mach-ks8695/board-micrel.c b/arch/arm/mach-ks8695/board-micrel.c
index 16c95657f8f..a8270725b76 100644
--- a/arch/arm/mach-ks8695/board-micrel.c
+++ b/arch/arm/mach-ks8695/board-micrel.c
@@ -58,4 +58,5 @@ MACHINE_START(KS8695, "KS8695 Centaur Development Board")
.init_irq = ks8695_init_irq,
.init_machine = micrel_init,
.timer = &ks8695_timer,
+ .restart = ks8695_restart,
MACHINE_END
diff --git a/arch/arm/mach-ks8695/generic.h b/arch/arm/mach-ks8695/generic.h
index 2fbfab8d5fa..f8bdb11a9c3 100644
--- a/arch/arm/mach-ks8695/generic.h
+++ b/arch/arm/mach-ks8695/generic.h
@@ -12,4 +12,5 @@
extern __init void ks8695_map_io(void);
extern __init void ks8695_init_irq(void);
+extern void ks8695_restart(char, const char *);
extern struct sys_timer ks8695_timer;
diff --git a/arch/arm/mach-ks8695/include/mach/system.h b/arch/arm/mach-ks8695/include/mach/system.h
index fb1dda9be2d..59fe992395b 100644
--- a/arch/arm/mach-ks8695/include/mach/system.h
+++ b/arch/arm/mach-ks8695/include/mach/system.h
@@ -14,9 +14,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/regs-timer.h>
-
static void arch_idle(void)
{
/*
@@ -27,22 +24,4 @@ static void arch_idle(void)
}
-static void arch_reset(char mode, const char *cmd)
-{
- unsigned int reg;
-
- if (mode == 's')
- cpu_reset(0);
-
- /* disable timer0 */
- reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
- __raw_writel(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
-
- /* enable watchdog mode */
- __raw_writel((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
-
- /* re-enable timer0 */
- __raw_writel(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
-}
-
#endif
diff --git a/arch/arm/mach-ks8695/include/mach/vmalloc.h b/arch/arm/mach-ks8695/include/mach/vmalloc.h
deleted file mode 100644
index 744ac66be3a..00000000000
--- a/arch/arm/mach-ks8695/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-ks8695/include/mach/vmalloc.h
- *
- * Copyright (C) 2006 Ben Dooks
- * Copyright (C) 2006 Simtec Electronics <linux@simtec.co.uk>
- *
- * KS8695 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END (KS8695_IO_VA & PGDIR_MASK)
-
-#endif
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c
index a78092dcd6f..76802aac0f4 100644
--- a/arch/arm/mach-ks8695/irq.c
+++ b/arch/arm/mach-ks8695/irq.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c
index 69c072c2c0f..37dfcd5bd2a 100644
--- a/arch/arm/mach-ks8695/time.c
+++ b/arch/arm/mach-ks8695/time.c
@@ -109,3 +109,21 @@ struct sys_timer ks8695_timer = {
.offset = ks8695_gettimeoffset,
.resume = ks8695_timer_setup,
};
+
+void ks8695_restart(char mode, const char *cmd)
+{
+ unsigned int reg;
+
+ if (mode == 's')
+ soft_restart(0);
+
+ /* disable timer0 */
+ reg = __raw_readl(KS8695_TMR_VA + KS8695_TMCON);
+ __raw_writel(reg & ~TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
+
+ /* enable watchdog mode */
+ __raw_writel((10 << 8) | T0TC_WATCHDOG, KS8695_TMR_VA + KS8695_T0TC);
+
+ /* re-enable timer0 */
+ __raw_writel(reg | TMCON_T0EN, KS8695_TMR_VA + KS8695_TMCON);
+}
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c
index 205b2dbb565..369b152896c 100644
--- a/arch/arm/mach-lpc32xx/common.c
+++ b/arch/arm/mach-lpc32xx/common.c
@@ -164,7 +164,7 @@ int clk_is_sysclk_mainosc(void)
/*
* System reset via the watchdog timer
*/
-void lpc32xx_watchdog_reset(void)
+static void lpc32xx_watchdog_reset(void)
{
/* Make sure WDT clocks are enabled */
__raw_writel(LPC32XX_CLKPWR_PWMCLK_WDOG_EN,
@@ -311,3 +311,21 @@ void __init lpc32xx_map_io(void)
{
iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc));
}
+
+void lpc23xx_restart(char mode, const char *cmd)
+{
+ switch (mode) {
+ case 's':
+ case 'h':
+ lpc32xx_watchdog_reset();
+ break;
+
+ default:
+ /* Do nothing */
+ break;
+ }
+
+ /* Wait for watchdog to reset system */
+ while (1)
+ ;
+}
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h
index 5583f52662b..4b4e700343c 100644
--- a/arch/arm/mach-lpc32xx/common.h
+++ b/arch/arm/mach-lpc32xx/common.h
@@ -39,6 +39,8 @@ extern void __init lpc32xx_init_irq(void);
extern void __init lpc32xx_map_io(void);
extern void __init lpc32xx_serial_init(void);
extern void __init lpc32xx_gpio_init(void);
+extern void lpc23xx_restart(char, const char *);
+
/*
* Structure used for setting up and querying the PLLS
diff --git a/arch/arm/mach-lpc32xx/include/mach/system.h b/arch/arm/mach-lpc32xx/include/mach/system.h
index df3b0dea4d7..bf176c99152 100644
--- a/arch/arm/mach-lpc32xx/include/mach/system.h
+++ b/arch/arm/mach-lpc32xx/include/mach/system.h
@@ -24,29 +24,4 @@ static void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- extern void lpc32xx_watchdog_reset(void);
-
- switch (mode) {
- case 's':
- case 'h':
- printk(KERN_CRIT "RESET: Rebooting system\n");
-
- /* Disable interrupts */
- local_irq_disable();
-
- lpc32xx_watchdog_reset();
- break;
-
- default:
- /* Do nothing */
- break;
- }
-
- /* Wait for watchdog to reset system */
- while (1)
- ;
-}
-
#endif
diff --git a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
deleted file mode 100644
index 720fa43a60b..00000000000
--- a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/vmalloc.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF0000000UL
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 6d2f0d1b937..bfee5b45510 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
@@ -388,4 +388,5 @@ MACHINE_START(PHY3250, "Phytec 3250 board with the LPC3250 Microcontroller")
.init_irq = lpc32xx_init_irq,
.timer = &lpc32xx_timer,
.init_machine = phy3250_board_init,
+ .restart = lpc23xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 7a60bbbce7a..17cb7606012 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -120,8 +120,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(27),
- .end = gpio_to_irq(27),
+ .start = MMP_GPIO_TO_IRQ(27),
+ .end = MMP_GPIO_TO_IRQ(27),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -232,6 +232,7 @@ static void __init common_init(void)
pxa168_add_nand(&aspenite_nand_info);
pxa168_add_fb(&aspenite_lcd_info);
pxa168_add_keypad(&aspenite_keypad_info);
+ platform_device_register(&pxa168_device_gpio);
/* off-chip devices */
platform_device_register(&smc91x_device);
@@ -243,6 +244,7 @@ MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform")
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = common_init,
+ .restart = pxa168_restart,
MACHINE_END
MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform")
@@ -251,4 +253,5 @@ MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform")
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = common_init,
+ .restart = pxa168_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/avengers_lite.c b/arch/arm/mach-mmp/avengers_lite.c
index 39f0878d64a..b148a9dc5a4 100644
--- a/arch/arm/mach-mmp/avengers_lite.c
+++ b/arch/arm/mach-mmp/avengers_lite.c
@@ -38,6 +38,7 @@ static void __init avengers_lite_init(void)
/* on-chip devices */
pxa168_add_uart(2);
+ platform_device_register(&pxa168_device_gpio);
}
MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform")
@@ -45,4 +46,5 @@ MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform")
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = avengers_lite_init,
+ .restart = pxa168_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index 983cfb15fbd..d839fe6421e 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -202,6 +202,7 @@ static void __init brownstone_init(void)
/* on-chip devices */
mmp2_add_uart(1);
mmp2_add_uart(3);
+ platform_device_register(&mmp2_device_gpio);
mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info));
mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */
mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */
@@ -219,4 +220,5 @@ MACHINE_START(BROWNSTONE, "Brownstone Development Platform")
.init_irq = mmp2_init_irq,
.timer = &mmp2_timer,
.init_machine = brownstone_init,
+ .restart = mmp_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c
index 5720674739f..062b5b93c50 100644
--- a/arch/arm/mach-mmp/common.c
+++ b/arch/arm/mach-mmp/common.c
@@ -45,3 +45,8 @@ void __init mmp_map_io(void)
/* this is early, initialize mmp_chip_id here */
mmp_chip_id = __raw_readl(MMP_CHIPID);
}
+
+void mmp_restart(char mode, const char *cmd)
+{
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h
index ec8d65ded25..1c9d6c1ea97 100644
--- a/arch/arm/mach-mmp/common.h
+++ b/arch/arm/mach-mmp/common.h
@@ -6,3 +6,4 @@ extern void timer_init(int irq);
extern void __init icu_init_irq(void);
extern void __init mmp_map_io(void);
+extern void mmp_restart(char, const char *);
diff --git a/arch/arm/mach-mmp/flint.c b/arch/arm/mach-mmp/flint.c
index c4fd806b15b..2ee8cd7829d 100644
--- a/arch/arm/mach-mmp/flint.c
+++ b/arch/arm/mach-mmp/flint.c
@@ -87,8 +87,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(155),
- .end = gpio_to_irq(155),
+ .start = MMP_GPIO_TO_IRQ(155),
+ .end = MMP_GPIO_TO_IRQ(155),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -110,6 +110,7 @@ static void __init flint_init(void)
/* on-chip devices */
mmp2_add_uart(1);
mmp2_add_uart(2);
+ platform_device_register(&mmp2_device_gpio);
/* off-chip devices */
platform_device_register(&smc91x_device);
@@ -121,4 +122,5 @@ MACHINE_START(FLINT, "Flint Development Platform")
.init_irq = mmp2_init_irq,
.timer = &mmp2_timer,
.init_machine = flint_init,
+ .restart = mmp_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 69156568bc4..87765467de6 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -182,8 +182,9 @@ static void __init gplugd_init(void)
/* on-chip devices */
pxa168_add_uart(3);
- pxa168_add_ssp(0);
+ pxa168_add_ssp(1);
pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(gplugd_i2c_board_info));
+ platform_device_register(&pxa168_device_gpio);
pxa168_add_eth(&gplugd_eth_platform_data);
}
@@ -194,4 +195,5 @@ MACHINE_START(GPLUGD, "PXA168-based GuruPlug Display (gplugD) Platform")
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = gplugd_init,
+ .restart = pxa168_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
index d14eeaf1632..0e135a599f3 100644
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
@@ -2,14 +2,13 @@
#define __ASM_MACH_GPIO_PXA_H
#include <mach/addr-map.h>
+#include <mach/cputype.h>
#include <mach/irqs.h>
#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (GPIO_REGS_VIRT + (x))
-
-#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
+#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
#define gpio_to_bank(gpio) ((gpio) >> 5)
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h
index 681262359d1..13219ebf512 100644
--- a/arch/arm/mach-mmp/include/mach/gpio.h
+++ b/arch/arm/mach-mmp/include/mach/gpio.h
@@ -3,11 +3,6 @@
#include <asm-generic/gpio.h>
-#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio))
-#define irq_to_gpio(irq) ((irq) - IRQ_GPIO_START)
+#include <mach/cputype.h>
-#define __gpio_is_inverted(gpio) (0)
-#define __gpio_is_occupied(gpio) (0)
-
-#include <plat/gpio.h>
#endif /* __ASM_MACH_GPIO_H */
diff --git a/arch/arm/mach-mmp/include/mach/irqs.h b/arch/arm/mach-mmp/include/mach/irqs.h
index a09d328e2dd..34635a0bbb5 100644
--- a/arch/arm/mach-mmp/include/mach/irqs.h
+++ b/arch/arm/mach-mmp/include/mach/irqs.h
@@ -219,10 +219,10 @@
#define IRQ_MMP2_MUX_END (IRQ_MMP2_SSP_BASE + 2)
#define IRQ_GPIO_START 128
-#define IRQ_GPIO_NUM 192
-#define IRQ_GPIO(x) (IRQ_GPIO_START + (x))
+#define MMP_NR_BUILTIN_GPIO 192
+#define MMP_GPIO_TO_IRQ(gpio) (IRQ_GPIO_START + (gpio))
-#define IRQ_BOARD_START (IRQ_GPIO_START + IRQ_GPIO_NUM)
+#define IRQ_BOARD_START (IRQ_GPIO_START + MMP_NR_BUILTIN_GPIO)
#define NR_IRQS (IRQ_BOARD_START)
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 2f7b2d3c2b1..cba22fed226 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -32,6 +32,8 @@ extern struct pxa_device_desc mmp2_device_sdh3;
extern struct pxa_device_desc mmp2_device_asram;
extern struct pxa_device_desc mmp2_device_isram;
+extern struct platform_device mmp2_device_gpio;
+
static inline int mmp2_add_uart(int id)
{
struct pxa_device_desc *d = NULL;
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 7fb568d2845..dc03d580a06 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -5,6 +5,7 @@ struct sys_timer;
extern struct sys_timer pxa168_timer;
extern void __init pxa168_init_irq(void);
+extern void pxa168_restart(char, const char *);
extern void pxa168_clear_keypad_wakeup(void);
#include <linux/i2c.h>
@@ -42,6 +43,8 @@ struct pxa168_usb_pdata {
/* pdata can be NULL */
int __init pxa168_add_usb_host(struct pxa168_usb_pdata *pdata);
+extern struct platform_device pxa168_device_gpio;
+
static inline int pxa168_add_uart(int id)
{
struct pxa_device_desc *d = NULL;
diff --git a/arch/arm/mach-mmp/include/mach/pxa910.h b/arch/arm/mach-mmp/include/mach/pxa910.h
index 91be7559139..4de13abef7b 100644
--- a/arch/arm/mach-mmp/include/mach/pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/pxa910.h
@@ -21,6 +21,8 @@ extern struct pxa_device_desc pxa910_device_pwm3;
extern struct pxa_device_desc pxa910_device_pwm4;
extern struct pxa_device_desc pxa910_device_nand;
+extern struct platform_device pxa910_device_gpio;
+
static inline int pxa910_add_uart(int id)
{
struct pxa_device_desc *d = NULL;
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 1a8a25edb1b..1d001eab81e 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,18 +9,8 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
-#include <mach/cputype.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (cpu_is_pxa168())
- cpu_reset(0xffff0000);
- else
- cpu_reset(0);
-}
#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h
deleted file mode 100644
index 1d0bac003ad..00000000000
--- a/arch/arm/mach-mmp/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * linux/arch/arm/mach-mmp/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe000000UL
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 8bfac661262..96cf5c8fe47 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -175,4 +175,5 @@ MACHINE_START(MARVELL_JASPER, "Jasper Development Platform")
.init_irq = mmp2_init_irq,
.timer = &mmp2_timer,
.init_machine = jasper_init,
+ .restart = mmp_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index 5dd1d4a6aeb..617c60a170a 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <asm/hardware/cache-tauros2.h>
@@ -24,7 +25,6 @@
#include <mach/irqs.h>
#include <mach/dma.h>
#include <mach/mfp.h>
-#include <mach/gpio-pxa.h>
#include <mach/devices.h>
#include <mach/mmp2.h>
@@ -33,8 +33,6 @@
#define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000)
-#define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x9c)
-
static struct mfp_addr_map mmp2_addr_map[] __initdata = {
MFP_ADDR_X(GPIO0, GPIO58, 0x54),
@@ -95,24 +93,9 @@ void mmp2_clear_pmic_int(void)
__raw_writel(data, mfpr_pmic);
}
-static void __init mmp2_init_gpio(void)
-{
- int i;
-
- /* enable GPIO clock */
- __raw_writel(APBC_APBCLK | APBC_FNCLK, APBC_MMP2_GPIO);
-
- /* unmask GPIO edge detection for all 6 banks -- APMASKx */
- for (i = 0; i < 6; i++)
- __raw_writel(0xffffffff, APMASK(i));
-
- pxa_init_gpio(IRQ_MMP2_GPIO, 0, 167, NULL);
-}
-
void __init mmp2_init_irq(void)
{
mmp2_init_icu();
- mmp2_init_gpio();
}
static void sdhc_clk_enable(struct clk *clk)
@@ -149,6 +132,7 @@ static APBC_CLK(twsi3, MMP2_TWSI3, 0, 26000000);
static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000);
static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000);
static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000);
+static APBC_CLK(gpio, MMP2_GPIO, 0, 26000000);
static APMU_CLK(nand, NAND, 0xbf, 100000000);
static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops);
@@ -168,6 +152,7 @@ static struct clk_lookup mmp2_clkregs[] = {
INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
+ INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
@@ -230,3 +215,21 @@ MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000);
/* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */
MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000);
+struct resource mmp2_resource_gpio[] = {
+ {
+ .start = 0xd4019000,
+ .end = 0xd4019fff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_MMP2_GPIO,
+ .end = IRQ_MMP2_GPIO,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mmp2_device_gpio = {
+ .name = "pxa-gpio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mmp2_resource_gpio),
+ .resource = mmp2_resource_gpio,
+};
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 76ca15c00e4..7bc17eaa12e 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
#include <asm/mach/time.h>
#include <mach/addr-map.h>
@@ -20,7 +21,6 @@
#include <mach/regs-apbc.h>
#include <mach/regs-apmu.h>
#include <mach/irqs.h>
-#include <mach/gpio-pxa.h>
#include <mach/dma.h>
#include <mach/devices.h>
#include <mach/mfp.h>
@@ -43,26 +43,9 @@ static struct mfp_addr_map pxa168_mfp_addr_map[] __initdata =
MFP_ADDR_END,
};
-#define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x09c)
-
-static void __init pxa168_init_gpio(void)
-{
- int i;
-
- /* enable GPIO clock */
- __raw_writel(APBC_APBCLK | APBC_FNCLK, APBC_PXA168_GPIO);
-
- /* unmask GPIO edge detection for all 4 banks - APMASKx */
- for (i = 0; i < 4; i++)
- __raw_writel(0xffffffff, APMASK(i));
-
- pxa_init_gpio(IRQ_PXA168_GPIOX, 0, 127, NULL);
-}
-
void __init pxa168_init_irq(void)
{
icu_init_irq();
- pxa168_init_gpio();
}
/* APB peripheral clocks */
@@ -80,6 +63,7 @@ static APBC_CLK(ssp2, PXA168_SSP2, 4, 0);
static APBC_CLK(ssp3, PXA168_SSP3, 4, 0);
static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
+static APBC_CLK(gpio, PXA168_GPIO, 0, 13000000);
static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
static APMU_CLK(nand, NAND, 0x19b, 156000000);
@@ -105,6 +89,7 @@ static struct clk_lookup pxa168_clkregs[] = {
INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
+ INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
INIT_CLKREG(&clk_usb, "pxa168-ehci", "PXA168-USBCLK"),
@@ -174,6 +159,25 @@ PXA168_DEVICE(fb, "pxa168-fb", -1, LCD, 0xd420b000, 0x1c8);
PXA168_DEVICE(keypad, "pxa27x-keypad", -1, KEYPAD, 0xd4012000, 0x4c);
PXA168_DEVICE(eth, "pxa168-eth", -1, MFU, 0xc0800000, 0x0fff);
+struct resource pxa168_resource_gpio[] = {
+ {
+ .start = 0xd4019000,
+ .end = 0xd4019fff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PXA168_GPIOX,
+ .end = IRQ_PXA168_GPIOX,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device pxa168_device_gpio = {
+ .name = "pxa-gpio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pxa168_resource_gpio),
+ .resource = pxa168_resource_gpio,
+};
+
struct resource pxa168_usb_host_resources[] = {
/* USB Host conroller register base */
[0] = {
@@ -214,3 +218,8 @@ int __init pxa168_add_usb_host(struct pxa168_usb_pdata *pdata)
pxa168_device_usb_host.dev.platform_data = pdata;
return platform_device_register(&pxa168_device_usb_host);
}
+
+void pxa168_restart(char mode, const char *cmd)
+{
+ soft_restart(0xffff0000);
+}
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index 4ebbfbba39f..3241a25784d 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <asm/mach/time.h>
#include <mach/addr-map.h>
@@ -19,7 +20,6 @@
#include <mach/regs-apmu.h>
#include <mach/cputype.h>
#include <mach/irqs.h>
-#include <mach/gpio-pxa.h>
#include <mach/dma.h>
#include <mach/mfp.h>
#include <mach/devices.h>
@@ -77,26 +77,9 @@ static struct mfp_addr_map pxa910_mfp_addr_map[] __initdata =
MFP_ADDR_END,
};
-#define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x09c)
-
-static void __init pxa910_init_gpio(void)
-{
- int i;
-
- /* enable GPIO clock */
- __raw_writel(APBC_APBCLK | APBC_FNCLK, APBC_PXA910_GPIO);
-
- /* unmask GPIO edge detection for all 4 banks - APMASKx */
- for (i = 0; i < 4; i++)
- __raw_writel(0xffffffff, APMASK(i));
-
- pxa_init_gpio(IRQ_PXA910_AP_GPIO, 0, 127, NULL);
-}
-
void __init pxa910_init_irq(void)
{
icu_init_irq();
- pxa910_init_gpio();
}
/* APB peripheral clocks */
@@ -108,6 +91,7 @@ static APBC_CLK(pwm1, PXA910_PWM1, 1, 13000000);
static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
+static APBC_CLK(gpio, PXA910_GPIO, 0, 13000000);
static APMU_CLK(nand, NAND, 0x19b, 156000000);
static APMU_CLK(u2o, USB, 0x1b, 480000000);
@@ -123,6 +107,7 @@ static struct clk_lookup pxa910_clkregs[] = {
INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL),
INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
+ INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL),
INIT_CLKREG(&clk_u2o, "pxa-u2o", "U2OCLK"),
};
@@ -179,3 +164,22 @@ PXA910_DEVICE(pwm2, "pxa910-pwm", 1, NONE, 0xd401a400, 0x10);
PXA910_DEVICE(pwm3, "pxa910-pwm", 2, NONE, 0xd401a800, 0x10);
PXA910_DEVICE(pwm4, "pxa910-pwm", 3, NONE, 0xd401ac00, 0x10);
PXA910_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x80, 97, 99);
+
+struct resource pxa910_resource_gpio[] = {
+ {
+ .start = 0xd4019000,
+ .end = 0xd4019fff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_PXA910_AP_GPIO,
+ .end = IRQ_PXA910_AP_GPIO,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device pxa910_device_gpio = {
+ .name = "pxa-gpio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pxa910_resource_gpio),
+ .resource = pxa910_resource_gpio,
+};
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
index eb5be879fd8..8e3b5af04a5 100644
--- a/arch/arm/mach-mmp/tavorevb.c
+++ b/arch/arm/mach-mmp/tavorevb.c
@@ -19,6 +19,7 @@
#include <mach/addr-map.h>
#include <mach/mfp-pxa910.h>
#include <mach/pxa910.h>
+#include <mach/irqs.h>
#include "common.h"
@@ -71,8 +72,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(80),
- .end = gpio_to_irq(80),
+ .start = MMP_GPIO_TO_IRQ(80),
+ .end = MMP_GPIO_TO_IRQ(80),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -93,6 +94,7 @@ static void __init tavorevb_init(void)
/* on-chip devices */
pxa910_add_uart(1);
+ platform_device_register(&pxa910_device_gpio);
/* off-chip devices */
platform_device_register(&smc91x_device);
@@ -103,4 +105,5 @@ MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)")
.init_irq = pxa910_init_irq,
.timer = &pxa910_timer,
.init_machine = tavorevb_init,
+ .restart = mmp_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c
index bbe4727b96c..0523e422990 100644
--- a/arch/arm/mach-mmp/teton_bga.c
+++ b/arch/arm/mach-mmp/teton_bga.c
@@ -66,7 +66,7 @@ static struct pxa27x_keypad_platform_data teton_bga_keypad_info __initdata = {
static struct i2c_board_info teton_bga_i2c_info[] __initdata = {
{
I2C_BOARD_INFO("ds1337", 0x68),
- .irq = gpio_to_irq(RTC_INT_GPIO)
+ .irq = MMP_GPIO_TO_IRQ(RTC_INT_GPIO)
},
};
@@ -78,6 +78,7 @@ static void __init teton_bga_init(void)
pxa168_add_uart(1);
pxa168_add_keypad(&teton_bga_keypad_info);
pxa168_add_twsi(0, NULL, ARRAY_AND_SIZE(teton_bga_i2c_info));
+ platform_device_register(&pxa168_device_gpio);
}
MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform")
@@ -86,4 +87,5 @@ MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform")
.init_irq = pxa168_init_irq,
.timer = &pxa168_timer,
.init_machine = teton_bga_init,
+ .restart = pxa168_restart,
MACHINE_END
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 4e91ee6e27c..71fc4ee4602 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/sched.h>
#include <asm/sched_clock.h>
#include <mach/addr-map.h>
@@ -42,8 +41,6 @@
#define MAX_DELTA (0xfffffffe)
#define MIN_DELTA (16)
-static DEFINE_CLOCK_DATA(cd);
-
/*
* FIXME: the timer needs some delay to stablize the counter capture
*/
@@ -59,16 +56,9 @@ static inline uint32_t timer_read(void)
return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1));
}
-unsigned long long notrace sched_clock(void)
+static u32 notrace mmp_read_sched_clock(void)
{
- u32 cyc = timer_read();
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mmp_update_sched_clock(void)
-{
- u32 cyc = timer_read();
- update_sched_clock(&cd, cyc, (u32)~0);
+ return timer_read();
}
static irqreturn_t timer_interrupt(int irq, void *dev_id)
@@ -201,7 +191,7 @@ void __init timer_init(int irq)
{
timer_config();
- init_sched_clock(&cd, mmp_update_sched_clock, 32, CLOCK_TICK_RATE);
+ setup_sched_clock(mmp_read_sched_clock, 32, CLOCK_TICK_RATE);
ckevt.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, ckevt.shift);
ckevt.max_delta_ns = clockevent_delta2ns(MAX_DELTA, &ckevt);
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 176515a7698..5ac5d5832e4 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -24,12 +24,13 @@
#include <mach/addr-map.h>
#include <mach/mfp-pxa910.h>
#include <mach/pxa910.h>
+#include <mach/irqs.h>
#include "common.h"
-#define TTCDKB_GPIO_EXT0(x) (NR_BUILTIN_GPIO + ((x < 0) ? 0 : \
+#define TTCDKB_GPIO_EXT0(x) (MMP_NR_BUILTIN_GPIO + ((x < 0) ? 0 : \
((x < 16) ? x : 15)))
-#define TTCDKB_GPIO_EXT1(x) (NR_BUILTIN_GPIO + 16 + ((x < 0) ? 0 : \
+#define TTCDKB_GPIO_EXT1(x) (MMP_NR_BUILTIN_GPIO + 16 + ((x < 0) ? 0 : \
((x < 16) ? x : 15)))
/*
@@ -122,6 +123,7 @@ static struct platform_device ttc_dkb_device_onenand = {
};
static struct platform_device *ttc_dkb_devices[] = {
+ &pxa910_device_gpio,
&ttc_dkb_device_onenand,
};
@@ -136,7 +138,7 @@ static struct i2c_board_info ttc_dkb_i2c_info[] = {
{
.type = "max7312",
.addr = 0x23,
- .irq = IRQ_GPIO(80),
+ .irq = MMP_GPIO_TO_IRQ(80),
.platform_data = &max7312_data,
},
};
@@ -159,4 +161,5 @@ MACHINE_START(TTC_DKB, "PXA910-based TTC_DKB Development Platform")
.init_irq = pxa910_init_irq,
.timer = &pxa910_timer,
.init_machine = ttc_dkb_init,
+ .restart = mmp_restart,
MACHINE_END
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index ebde97f5d5f..1cd40ad301d 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -13,7 +13,6 @@ config ARCH_MSM7X00A
select CPU_V6
select GPIO_MSM_V1
select MSM_PROC_COMM
- select HAS_MSM_DEBUG_UART_PHYS
config ARCH_MSM7X30
bool "MSM7x30"
@@ -25,7 +24,6 @@ config ARCH_MSM7X30
select MSM_GPIOMUX
select GPIO_MSM_V1
select MSM_PROC_COMM
- select HAS_MSM_DEBUG_UART_PHYS
config ARCH_QSD8X50
bool "QSD8X50"
@@ -37,7 +35,6 @@ config ARCH_QSD8X50
select MSM_GPIOMUX
select GPIO_MSM_V1
select MSM_PROC_COMM
- select HAS_MSM_DEBUG_UART_PHYS
config ARCH_MSM8X60
bool "MSM8X60"
@@ -63,19 +60,20 @@ config ARCH_MSM8960
endchoice
+config MSM_HAS_DEBUG_UART_HS
+ bool
+
config MSM_SOC_REV_A
bool
config ARCH_MSM_SCORPIONMP
bool
+ select HAVE_SMP
config ARCH_MSM_ARM11
bool
config ARCH_MSM_SCORPION
bool
-config HAS_MSM_DEBUG_UART_PHYS
- bool
-
config MSM_VIC
bool
@@ -152,32 +150,6 @@ config MACH_MSM8960_RUMI3
endmenu
-config MSM_DEBUG_UART
- int
- default 1 if MSM_DEBUG_UART1
- default 2 if MSM_DEBUG_UART2
- default 3 if MSM_DEBUG_UART3
-
-if HAS_MSM_DEBUG_UART_PHYS
-choice
- prompt "Debug UART"
-
- default MSM_DEBUG_UART_NONE
-
- config MSM_DEBUG_UART_NONE
- bool "None"
-
- config MSM_DEBUG_UART1
- bool "UART1"
-
- config MSM_DEBUG_UART2
- bool "UART2"
-
- config MSM_DEBUG_UART3
- bool "UART3"
-endchoice
-endif
-
config MSM_SMD_PKG3
bool
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index 6dc1cbd2a59..ed359812853 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -99,6 +99,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8960_sim_init,
MACHINE_END
@@ -108,6 +109,7 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8960_rumi3_init,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 44bf7168837..0a113424632 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -108,6 +108,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
.reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
.timer = &msm_timer,
MACHINE_END
@@ -117,6 +118,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
.reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
.timer = &msm_timer,
MACHINE_END
@@ -126,6 +128,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
.reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
.timer = &msm_timer,
MACHINE_END
@@ -135,6 +138,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
.reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 32b465763db..97b8191d9d3 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -18,7 +18,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/delay.h>
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 24030d0da6e..0fb7a17df39 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
#include <mach/irqs.h>
#include <mach/iommu.h>
diff --git a/arch/arm/mach-msm/include/mach/debug-macro.S b/arch/arm/mach-msm/include/mach/debug-macro.S
index 2dc73ccddb1..3ffd8668c9a 100644
--- a/arch/arm/mach-msm/include/mach/debug-macro.S
+++ b/arch/arm/mach-msm/include/mach/debug-macro.S
@@ -1,6 +1,7 @@
-/* arch/arm/mach-msm7200/include/mach/debug-macro.S
+/*
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -14,40 +15,52 @@
*
*/
-
-
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
-#if defined(CONFIG_HAS_MSM_DEBUG_UART_PHYS) && !defined(CONFIG_MSM_DEBUG_UART_NONE)
.macro addruart, rp, rv, tmp
+#ifdef MSM_DEBUG_UART_PHYS
ldr \rp, =MSM_DEBUG_UART_PHYS
ldr \rv, =MSM_DEBUG_UART_BASE
+#endif
.endm
- .macro senduart,rd,rx
+ .macro senduart, rd, rx
+#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS
+ @ Write the 1 character to UARTDM_TF
+ str \rd, [\rx, #0x70]
+#else
teq \rx, #0
strne \rd, [\rx, #0x0C]
+#endif
.endm
- .macro waituart,rd,rx
+ .macro waituart, rd, rx
+#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS
+ @ check for TX_EMT in UARTDM_SR
+ ldr \rd, [\rx, #0x08]
+ tst \rd, #0x08
+ bne 1002f
+ @ wait for TXREADY in UARTDM_ISR
+1001: ldr \rd, [\rx, #0x14]
+ tst \rd, #0x80
+ beq 1001b
+1002:
+ @ Clear TX_READY by writing to the UARTDM_CR register
+ mov \rd, #0x300
+ str \rd, [\rx, #0x10]
+ @ Write 0x1 to NCF register
+ mov \rd, #0x1
+ str \rd, [\rx, #0x40]
+ @ UARTDM reg. Read to induce delay
+ ldr \rd, [\rx, #0x08]
+#else
@ wait for TX_READY
1001: ldr \rd, [\rx, #0x08]
tst \rd, #0x04
beq 1001b
- .endm
-#else
- .macro addruart, rp, rv, tmp
- mov \rv, #0xff000000
- orr \rv, \rv, #0x00f00000
- .endm
-
- .macro senduart,rd,rx
- .endm
-
- .macro waituart,rd,rx
- .endm
#endif
+ .endm
- .macro busyuart,rd,rx
+ .macro busyuart, rd, rx
.endm
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
deleted file mode 100644
index 717076f3ca7..00000000000
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Low-level IRQ helper macros
- *
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/hardware/entry-macro-gic.S>
-
- .macro disable_fiq
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-vic.S b/arch/arm/mach-msm/include/mach/entry-macro-vic.S
deleted file mode 100644
index 70563ed11b3..00000000000
--- a/arch/arm/mach-msm/include/mach/entry-macro-vic.S
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <mach/msm_iomap.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- @ enable imprecise aborts
- cpsie a
- mov \base, #MSM_VIC_BASE
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- @ 0xD0 has irq# or old irq# if the irq has been handled
- @ 0xD4 has irq# or -1 if none pending *but* if you just
- @ read 0xD4 you never get the first irq for some reason
- ldr \irqnr, [\base, #0xD0]
- ldr \irqnr, [\base, #0xD4]
- cmp \irqnr, #0xffffffff
- .endm
diff --git a/arch/arm/mach-msm/include/mach/entry-macro.S b/arch/arm/mach-msm/include/mach/entry-macro.S
index b16f082eeb6..41f7003ef34 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro.S
@@ -16,8 +16,27 @@
*
*/
-#if defined(CONFIG_ARM_GIC)
-#include <mach/entry-macro-qgic.S>
-#else
-#include <mach/entry-macro-vic.S>
+ .macro disable_fiq
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+#if !defined(CONFIG_ARM_GIC)
+#include <mach/msm_iomap.h>
+
+ .macro get_irqnr_preamble, base, tmp
+ @ enable imprecise aborts
+ cpsie a
+ mov \base, #MSM_VIC_BASE
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ @ 0xD0 has irq# or old irq# if the irq has been handled
+ @ 0xD4 has irq# or -1 if none pending *but* if you just
+ @ read 0xD4 you never get the first irq for some reason
+ ldr \irqnr, [\base, #0xD0]
+ ldr \irqnr, [\base, #0xD4]
+ cmp \irqnr, #0xffffffff
+ .endm
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
index 94fe9fe6feb..8af46123dab 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -78,18 +78,6 @@
#define MSM_UART3_PHYS 0xA9C00000
#define MSM_UART3_SIZE SZ_4K
-#ifdef CONFIG_MSM_DEBUG_UART
-#define MSM_DEBUG_UART_BASE 0xE1000000
-#if CONFIG_MSM_DEBUG_UART == 1
-#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 2
-#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 3
-#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
-#endif
-#define MSM_DEBUG_UART_SIZE SZ_4K
-#endif
-
#define MSM_SDC1_PHYS 0xA0400000
#define MSM_SDC1_SIZE SZ_4K
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 37694442d1b..198202c267c 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -89,18 +89,6 @@
#define MSM_UART3_PHYS 0xACC00000
#define MSM_UART3_SIZE SZ_4K
-#ifdef CONFIG_MSM_DEBUG_UART
-#define MSM_DEBUG_UART_BASE 0xE1000000
-#if CONFIG_MSM_DEBUG_UART == 1
-#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 2
-#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 3
-#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
-#endif
-#define MSM_DEBUG_UART_SIZE SZ_4K
-#endif
-
#define MSM_MDC_BASE IOMEM(0xE0200000)
#define MSM_MDC_PHYS 0xAA500000
#define MSM_MDC_SIZE SZ_1M
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
index 3c9d9602a31..800b55767e6 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
@@ -45,4 +45,9 @@
#define MSM8960_TMR0_PHYS 0x0208A000
#define MSM8960_TMR0_SIZE SZ_4K
+#ifdef CONFIG_DEBUG_MSM8960_UART
+#define MSM_DEBUG_UART_BASE 0xE1040000
+#define MSM_DEBUG_UART_PHYS 0x16440000
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
index d67cd73316f..0faa894729b 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -83,18 +83,6 @@
#define MSM_UART3_PHYS 0xA9C00000
#define MSM_UART3_SIZE SZ_4K
-#ifdef CONFIG_MSM_DEBUG_UART
-#define MSM_DEBUG_UART_BASE 0xE1000000
-#if CONFIG_MSM_DEBUG_UART == 1
-#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 2
-#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 3
-#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
-#endif
-#define MSM_DEBUG_UART_SIZE SZ_4K
-#endif
-
#define MSM_MDC_BASE IOMEM(0xE0200000)
#define MSM_MDC_PHYS 0xAA500000
#define MSM_MDC_SIZE SZ_1M
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index 3b19b8f244b..54e12caa8d8 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -62,4 +62,9 @@
#define MSM8X60_TMR0_PHYS 0x02040000
#define MSM8X60_TMR0_SIZE SZ_4K
+#ifdef CONFIG_DEBUG_MSM8660_UART
+#define MSM_DEBUG_UART_BASE 0xE1040000
+#define MSM_DEBUG_UART_PHYS 0x19C40000
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 4ded15238b6..90682f4599d 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -55,6 +55,18 @@
#include "msm_iomap-8960.h"
+#define MSM_DEBUG_UART_SIZE SZ_4K
+#if defined(CONFIG_DEBUG_MSM_UART1)
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
+#elif defined(CONFIG_DEBUG_MSM_UART2)
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
+#elif defined(CONFIG_DEBUG_MSM_UART3)
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
+#endif
+
/* Virtual addresses shared across all MSM targets. */
#define MSM_CSR_BASE IOMEM(0xE0001000)
#define MSM_QGIC_DIST_BASE IOMEM(0xF0000000)
diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h
index d2e83f42ba1..311db2b35da 100644
--- a/arch/arm/mach-msm/include/mach/system.h
+++ b/arch/arm/mach-msm/include/mach/system.h
@@ -12,16 +12,8 @@
* GNU General Public License for more details.
*
*/
-
-#include <mach/hardware.h>
-
void arch_idle(void);
-static inline void arch_reset(char mode, const char *cmd)
-{
- for (;;) ; /* depends on IPC w/ other core */
-}
-
/* low level hardware reset hook -- for example, hitting the
* PSHOLD line on the PMIC to hard reset the system
*/
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h
index d94292c29d8..169a8400745 100644
--- a/arch/arm/mach-msm/include/mach/uncompress.h
+++ b/arch/arm/mach-msm/include/mach/uncompress.h
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/uncompress.h
- *
+/*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -14,17 +14,40 @@
*/
#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
+#define __ASM_ARCH_MSM_UNCOMPRESS_H
+
+#include <asm/processor.h>
+#include <mach/msm_iomap.h>
+
+#define UART_CSR (*(volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x08))
+#define UART_TF (*(volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x0c))
-#include "hardware.h"
-#include "linux/io.h"
-#include "mach/msm_iomap.h"
+#define UART_DM_SR (*((volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x08)))
+#define UART_DM_CR (*((volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x10)))
+#define UART_DM_ISR (*((volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x14)))
+#define UART_DM_NCHAR (*((volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x40)))
+#define UART_DM_TF (*((volatile uint32_t *)(MSM_DEBUG_UART_PHYS + 0x70)))
static void putc(int c)
{
#if defined(MSM_DEBUG_UART_PHYS)
- unsigned base = MSM_DEBUG_UART_PHYS;
- while (!(readl(base + 0x08) & 0x04)) ;
- writel(c, base + 0x0c);
+#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS
+ /*
+ * Wait for TX_READY to be set; but skip it if we have a
+ * TX underrun.
+ */
+ if (UART_DM_SR & 0x08)
+ while (!(UART_DM_ISR & 0x80))
+ cpu_relax();
+
+ UART_DM_CR = 0x300;
+ UART_DM_NCHAR = 0x1;
+ UART_DM_TF = c;
+#else
+ while (!(UART_CSR & 0x04))
+ cpu_relax();
+ UART_TF = c;
+#endif
#endif
}
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 8759ecf7454..578b04e42de 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -47,7 +47,8 @@ static struct map_desc msm_io_desc[] __initdata = {
MSM_CHIP_DEVICE(GPIO1, MSM7X00),
MSM_CHIP_DEVICE(GPIO2, MSM7X00),
MSM_DEVICE(CLK_CTL),
-#ifdef CONFIG_MSM_DEBUG_UART
+#if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \
+ defined(CONFIG_DEBUG_MSM_UART3)
MSM_DEVICE(DEBUG_UART),
#endif
#ifdef CONFIG_ARCH_MSM7X30
@@ -84,7 +85,8 @@ static struct map_desc qsd8x50_io_desc[] __initdata = {
MSM_DEVICE(SCPLL),
MSM_DEVICE(AD5),
MSM_DEVICE(MDC),
-#ifdef CONFIG_MSM_DEBUG_UART
+#if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \
+ defined(CONFIG_DEBUG_MSM_UART3)
MSM_DEVICE(DEBUG_UART),
#endif
{
@@ -109,6 +111,9 @@ static struct map_desc msm8x60_io_desc[] __initdata = {
MSM_CHIP_DEVICE(TMR0, MSM8X60),
MSM_DEVICE(ACC),
MSM_DEVICE(GCC),
+#ifdef CONFIG_DEBUG_MSM8660_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
};
void __init msm_map_msm8x60_io(void)
@@ -123,6 +128,9 @@ static struct map_desc msm8960_io_desc[] __initdata = {
MSM_CHIP_DEVICE(QGIC_CPU, MSM8960),
MSM_CHIP_DEVICE(TMR, MSM8960),
MSM_CHIP_DEVICE(TMR0, MSM8960),
+#ifdef CONFIG_DEBUG_MSM8960_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
};
void __init msm_map_msm8960_io(void)
@@ -146,7 +154,8 @@ static struct map_desc msm7x30_io_desc[] __initdata = {
MSM_DEVICE(SAW),
MSM_DEVICE(GCC),
MSM_DEVICE(TCSR),
-#ifdef CONFIG_MSM_DEBUG_UART
+#if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \
+ defined(CONFIG_DEBUG_MSM_UART3)
MSM_DEVICE(DEBUG_UART),
#endif
{
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index fdec58aaa35..0b3e357c4c8 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -79,7 +79,7 @@ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
SCM_FLAG_COLDBOOT_CPU1);
if (ret == 0) {
- void *sc1_base_ptr;
+ void __iomem *sc1_base_ptr;
sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
if (sc1_base_ptr) {
writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index 8736afff82f..0c56a5aaf58 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -215,7 +215,7 @@ static const struct file_operations debug_ops = {
.llseek = default_llseek,
};
-static void debug_create(const char *name, mode_t mode,
+static void debug_create(const char *name, umode_t mode,
struct dentry *dent,
int (*fill)(char *buf, int max))
{
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index afeeca52fc6..11d0d8f2656 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -1,6 +1,7 @@
-/* linux/arch/arm/mach-msm/timer.c
+/*
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -13,306 +14,207 @@
*
*/
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/init.h>
-#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <asm/mach/time.h>
#include <asm/hardware/gic.h>
+#include <asm/localtimer.h>
#include <mach/msm_iomap.h>
#include <mach/cpu.h>
+#include <mach/board.h>
#define TIMER_MATCH_VAL 0x0000
#define TIMER_COUNT_VAL 0x0004
#define TIMER_ENABLE 0x0008
-#define TIMER_ENABLE_CLR_ON_MATCH_EN 2
-#define TIMER_ENABLE_EN 1
+#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
+#define TIMER_ENABLE_EN BIT(0)
#define TIMER_CLEAR 0x000C
#define DGT_CLK_CTL 0x0034
-enum {
- DGT_CLK_CTL_DIV_1 = 0,
- DGT_CLK_CTL_DIV_2 = 1,
- DGT_CLK_CTL_DIV_3 = 2,
- DGT_CLK_CTL_DIV_4 = 3,
-};
-#define CSR_PROTECTION 0x0020
-#define CSR_PROTECTION_EN 1
+#define DGT_CLK_CTL_DIV_4 0x3
#define GPT_HZ 32768
-enum timer_location {
- LOCAL_TIMER = 0,
- GLOBAL_TIMER = 1,
-};
-
-#define MSM_GLOBAL_TIMER MSM_CLOCK_DGT
-
-/* TODO: Remove these ifdefs */
-#if defined(CONFIG_ARCH_QSD8X50)
-#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
-#define MSM_DGT_SHIFT (0)
-#elif defined(CONFIG_ARCH_MSM7X30)
-#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
-#define MSM_DGT_SHIFT (0)
-#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
-#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
-#define MSM_DGT_SHIFT (0)
-#else
-#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
-#define MSM_DGT_SHIFT (5)
-#endif
+#define MSM_DGT_SHIFT 5
-struct msm_clock {
- struct clock_event_device clockevent;
- struct clocksource clocksource;
- unsigned int irq;
- void __iomem *regbase;
- uint32_t freq;
- uint32_t shift;
- void __iomem *global_counter;
- void __iomem *local_counter;
- union {
- struct clock_event_device *evt;
- struct clock_event_device __percpu **percpu_evt;
- };
-};
-
-enum {
- MSM_CLOCK_GPT,
- MSM_CLOCK_DGT,
- NR_TIMERS,
-};
-
-
-static struct msm_clock msm_clocks[];
+static void __iomem *event_base;
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
- if (evt->event_handler == NULL)
- return IRQ_HANDLED;
+ /* Stop the timer tick */
+ if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+ ctrl &= ~TIMER_ENABLE_EN;
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+ }
evt->event_handler(evt);
return IRQ_HANDLED;
}
-static cycle_t msm_read_timer_count(struct clocksource *cs)
-{
- struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
-
- /*
- * Shift timer count down by a constant due to unreliable lower bits
- * on some targets.
- */
- return readl(clk->global_counter) >> clk->shift;
-}
-
-static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
-{
-#ifdef CONFIG_SMP
- int i;
- for (i = 0; i < NR_TIMERS; i++)
- if (evt == &(msm_clocks[i].clockevent))
- return &msm_clocks[i];
- return &msm_clocks[MSM_GLOBAL_TIMER];
-#else
- return container_of(evt, struct msm_clock, clockevent);
-#endif
-}
-
static int msm_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- struct msm_clock *clock = clockevent_to_clock(evt);
- uint32_t now = readl(clock->local_counter);
- uint32_t alarm = now + (cycles << clock->shift);
+ u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
- writel(alarm, clock->regbase + TIMER_MATCH_VAL);
+ writel_relaxed(0, event_base + TIMER_CLEAR);
+ writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
+ writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
}
static void msm_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- struct msm_clock *clock = clockevent_to_clock(evt);
+ u32 ctrl;
+
+ ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+ ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
switch (mode) {
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC:
break;
case CLOCK_EVT_MODE_ONESHOT:
- writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
+ /* Timer is enabled in set_next_event */
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- writel(0, clock->regbase + TIMER_ENABLE);
break;
}
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
}
-static struct msm_clock msm_clocks[] = {
- [MSM_CLOCK_GPT] = {
- .clockevent = {
- .name = "gp_timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
- .rating = 200,
- .set_next_event = msm_timer_set_next_event,
- .set_mode = msm_timer_set_mode,
- },
- .clocksource = {
- .name = "gp_timer",
- .rating = 200,
- .read = msm_read_timer_count,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- },
- .irq = INT_GP_TIMER_EXP,
- .freq = GPT_HZ,
- },
- [MSM_CLOCK_DGT] = {
- .clockevent = {
- .name = "dg_timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32 + MSM_DGT_SHIFT,
- .rating = 300,
- .set_next_event = msm_timer_set_next_event,
- .set_mode = msm_timer_set_mode,
- },
- .clocksource = {
- .name = "dg_timer",
- .rating = 300,
- .read = msm_read_timer_count,
- .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- },
- .irq = INT_DEBUG_TIMER_EXP,
- .freq = DGT_HZ >> MSM_DGT_SHIFT,
- .shift = MSM_DGT_SHIFT,
- }
+static struct clock_event_device msm_clockevent = {
+ .name = "gp_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = msm_timer_set_next_event,
+ .set_mode = msm_timer_set_mode,
+};
+
+static union {
+ struct clock_event_device *evt;
+ struct clock_event_device __percpu **percpu_evt;
+} msm_evt;
+
+static void __iomem *source_base;
+
+static cycle_t msm_read_timer_count(struct clocksource *cs)
+{
+ return readl_relaxed(source_base + TIMER_COUNT_VAL);
+}
+
+static cycle_t msm_read_timer_count_shift(struct clocksource *cs)
+{
+ /*
+ * Shift timer count down by a constant due to unreliable lower bits
+ * on some targets.
+ */
+ return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
+}
+
+static struct clocksource msm_clocksource = {
+ .name = "dg_timer",
+ .rating = 300,
+ .read = msm_read_timer_count,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init msm_timer_init(void)
{
- int i;
+ struct clock_event_device *ce = &msm_clockevent;
+ struct clocksource *cs = &msm_clocksource;
int res;
- int global_offset = 0;
+ u32 dgt_hz;
if (cpu_is_msm7x01()) {
- msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
- msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
+ event_base = MSM_CSR_BASE;
+ source_base = MSM_CSR_BASE + 0x10;
+ dgt_hz = 19200000 >> MSM_DGT_SHIFT; /* 600 KHz */
+ cs->read = msm_read_timer_count_shift;
+ cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
} else if (cpu_is_msm7x30()) {
- msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE + 0x04;
- msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x24;
+ event_base = MSM_CSR_BASE + 0x04;
+ source_base = MSM_CSR_BASE + 0x24;
+ dgt_hz = 24576000 / 4;
} else if (cpu_is_qsd8x50()) {
- msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
- msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
+ event_base = MSM_CSR_BASE;
+ source_base = MSM_CSR_BASE + 0x10;
+ dgt_hz = 19200000 / 4;
} else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
- msm_clocks[MSM_CLOCK_GPT].regbase = MSM_TMR_BASE + 0x04;
- msm_clocks[MSM_CLOCK_DGT].regbase = MSM_TMR_BASE + 0x24;
-
- /* Use CPU0's timer as the global timer. */
- global_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
+ event_base = MSM_TMR_BASE + 0x04;
+ /* Use CPU0's timer as the global clock source. */
+ source_base = MSM_TMR0_BASE + 0x24;
+ dgt_hz = 27000000 / 4;
+ writel_relaxed(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
} else
BUG();
-#ifdef CONFIG_ARCH_MSM_SCORPIONMP
- writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
-#endif
-
- for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) {
- struct msm_clock *clock = &msm_clocks[i];
- struct clock_event_device *ce = &clock->clockevent;
- struct clocksource *cs = &clock->clocksource;
-
- clock->local_counter = clock->regbase + TIMER_COUNT_VAL;
- clock->global_counter = clock->local_counter + global_offset;
-
- writel(0, clock->regbase + TIMER_ENABLE);
- writel(0, clock->regbase + TIMER_CLEAR);
- writel(~0, clock->regbase + TIMER_MATCH_VAL);
-
- ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
- /* allow at least 10 seconds to notice that the timer wrapped */
- ce->max_delta_ns =
- clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
- /* 4 gets rounded down to 3 */
- ce->min_delta_ns = clockevent_delta2ns(4, ce);
- ce->cpumask = cpumask_of(0);
-
- res = clocksource_register_hz(cs, clock->freq);
- if (res)
- printk(KERN_ERR "msm_timer_init: clocksource_register "
- "failed for %s\n", cs->name);
-
- ce->irq = clock->irq;
- if (cpu_is_msm8x60() || cpu_is_msm8960()) {
- clock->percpu_evt = alloc_percpu(struct clock_event_device *);
- if (!clock->percpu_evt) {
- pr_err("msm_timer_init: memory allocation "
- "failed for %s\n", ce->name);
- continue;
- }
-
- *__this_cpu_ptr(clock->percpu_evt) = ce;
- res = request_percpu_irq(ce->irq, msm_timer_interrupt,
- ce->name, clock->percpu_evt);
- if (!res)
- enable_percpu_irq(ce->irq, 0);
- } else {
- clock->evt = ce;
- res = request_irq(ce->irq, msm_timer_interrupt,
- IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
- ce->name, &clock->evt);
+ writel_relaxed(0, event_base + TIMER_ENABLE);
+ writel_relaxed(0, event_base + TIMER_CLEAR);
+ writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
+ ce->cpumask = cpumask_of(0);
+
+ ce->irq = INT_GP_TIMER_EXP;
+ clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
+ if (cpu_is_msm8x60() || cpu_is_msm8960()) {
+ msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
+ if (!msm_evt.percpu_evt) {
+ pr_err("memory allocation failed for %s\n", ce->name);
+ goto err;
}
-
- if (res)
- pr_err("msm_timer_init: request_irq failed for %s\n",
- ce->name);
-
- clockevents_register_device(ce);
+ *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
+ res = request_percpu_irq(ce->irq, msm_timer_interrupt,
+ ce->name, msm_evt.percpu_evt);
+ if (!res)
+ enable_percpu_irq(ce->irq, 0);
+ } else {
+ msm_evt.evt = ce;
+ res = request_irq(ce->irq, msm_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING |
+ IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
}
+
+ if (res)
+ pr_err("request_irq failed for %s\n", ce->name);
+err:
+ writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
+ res = clocksource_register_hz(cs, dgt_hz);
+ if (res)
+ pr_err("clocksource_register failed\n");
}
-#ifdef CONFIG_SMP
+#ifdef CONFIG_LOCAL_TIMERS
int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
- static bool local_timer_inited;
- struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
-
/* Use existing clock_event for cpu 0 */
if (!smp_processor_id())
return 0;
- writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
-
- if (!local_timer_inited) {
- writel(0, clock->regbase + TIMER_ENABLE);
- writel(0, clock->regbase + TIMER_CLEAR);
- writel(~0, clock->regbase + TIMER_MATCH_VAL);
- local_timer_inited = true;
- }
- evt->irq = clock->irq;
+ writel_relaxed(0, event_base + TIMER_ENABLE);
+ writel_relaxed(0, event_base + TIMER_CLEAR);
+ writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
+ evt->irq = msm_clockevent.irq;
evt->name = "local_timer";
- evt->features = CLOCK_EVT_FEAT_ONESHOT;
- evt->rating = clock->clockevent.rating;
+ evt->features = msm_clockevent.features;
+ evt->rating = msm_clockevent.rating;
evt->set_mode = msm_timer_set_mode;
evt->set_next_event = msm_timer_set_next_event;
- evt->shift = clock->clockevent.shift;
- evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift);
- evt->max_delta_ns =
- clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
+ evt->shift = msm_clockevent.shift;
+ evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt);
evt->min_delta_ns = clockevent_delta2ns(4, evt);
- *__this_cpu_ptr(clock->percpu_evt) = evt;
- enable_percpu_irq(evt->irq, 0);
-
+ *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
clockevents_register_device(evt);
+ enable_percpu_irq(evt->irq, 0);
return 0;
}
@@ -321,8 +223,7 @@ void local_timer_stop(struct clock_event_device *evt)
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
disable_percpu_irq(evt->irq);
}
-
-#endif
+#endif /* CONFIG_LOCAL_TIMERS */
struct sys_timer msm_timer = {
.init = msm_timer_init
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 311d5b0e9bc..62b53d710ef 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -12,12 +12,12 @@
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
+#include <plat/addr-map.h>
#include "common.h"
/*
* Generic Address Decode Windows bit settings
*/
-#define TARGET_DDR 0
#define TARGET_DEV_BUS 1
#define TARGET_PCIE0 4
#define TARGET_PCIE1 8
@@ -32,23 +32,10 @@
#define ATTR_PCIE_MEM(l) (0xf8 & ~(0x10 << (l)))
/*
- * Helpers to get DDR bank info
- */
-#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
-#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
-
-/*
* CPU Address Decode Windows registers
*/
#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
-#define WIN_CTRL_OFF 0x0000
-#define WIN_BASE_OFF 0x0004
-#define WIN_REMAP_LO_OFF 0x0008
-#define WIN_REMAP_HI_OFF 0x000c
-
-
-struct mbus_dram_target_info mv78xx0_mbus_dram_info;
static void __init __iomem *win_cfg_base(int win)
{
@@ -63,94 +50,43 @@ static void __init __iomem *win_cfg_base(int win)
return (void __iomem *)((win < 8) ? WIN0_OFF(win) : WIN8_OFF(win));
}
-static int __init cpu_win_can_remap(int win)
-{
- if (win < 8)
- return 1;
-
- return 0;
-}
-
-static void __init setup_cpu_win(int win, u32 base, u32 size,
- u8 target, u8 attr, int remap)
-{
- void __iomem *addr = win_cfg_base(win);
- u32 ctrl;
-
- base &= 0xffff0000;
- ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1;
-
- writel(base, addr + WIN_BASE_OFF);
- writel(ctrl, addr + WIN_CTRL_OFF);
- if (cpu_win_can_remap(win)) {
- if (remap < 0)
- remap = base;
-
- writel(remap & 0xffff0000, addr + WIN_REMAP_LO_OFF);
- writel(0, addr + WIN_REMAP_HI_OFF);
- }
-}
+/*
+ * Description of the windows needed by the platform code
+ */
+static struct __initdata orion_addr_map_cfg addr_map_cfg = {
+ .num_wins = 14,
+ .remappable_wins = 8,
+ .win_cfg_base = win_cfg_base,
+};
void __init mv78xx0_setup_cpu_mbus(void)
{
- void __iomem *addr;
- int i;
- int cs;
-
/*
- * First, disable and clear windows.
+ * Disable, clear and configure windows.
*/
- for (i = 0; i < 14; i++) {
- addr = win_cfg_base(i);
-
- writel(0, addr + WIN_BASE_OFF);
- writel(0, addr + WIN_CTRL_OFF);
- if (cpu_win_can_remap(i)) {
- writel(0, addr + WIN_REMAP_LO_OFF);
- writel(0, addr + WIN_REMAP_HI_OFF);
- }
- }
+ orion_config_wins(&addr_map_cfg, NULL);
/*
* Setup MBUS dram target info.
*/
- mv78xx0_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
-
if (mv78xx0_core_index() == 0)
- addr = (void __iomem *)DDR_WINDOW_CPU0_BASE;
+ orion_setup_cpu_mbus_target(&addr_map_cfg,
+ DDR_WINDOW_CPU0_BASE);
else
- addr = (void __iomem *)DDR_WINDOW_CPU1_BASE;
-
- for (i = 0, cs = 0; i < 4; i++) {
- u32 base = readl(addr + DDR_BASE_CS_OFF(i));
- u32 size = readl(addr + DDR_SIZE_CS_OFF(i));
-
- /*
- * Chip select enabled?
- */
- if (size & 1) {
- struct mbus_dram_window *w;
-
- w = &mv78xx0_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- w->base = base & 0xffff0000;
- w->size = (size | 0x0000ffff) + 1;
- }
- }
- mv78xx0_mbus_dram_info.num_cs = cs;
+ orion_setup_cpu_mbus_target(&addr_map_cfg,
+ DDR_WINDOW_CPU1_BASE);
}
void __init mv78xx0_setup_pcie_io_win(int window, u32 base, u32 size,
int maj, int min)
{
- setup_cpu_win(window, base, size, TARGET_PCIE(maj),
- ATTR_PCIE_IO(min), -1);
+ orion_setup_cpu_win(&addr_map_cfg, window, base, size,
+ TARGET_PCIE(maj), ATTR_PCIE_IO(min), -1);
}
void __init mv78xx0_setup_pcie_mem_win(int window, u32 base, u32 size,
int maj, int min)
{
- setup_cpu_win(window, base, size, TARGET_PCIE(maj),
- ATTR_PCIE_MEM(min), -1);
+ orion_setup_cpu_win(&addr_map_cfg, window, base, size,
+ TARGET_PCIE(maj), ATTR_PCIE_MEM(min), -1);
}
diff --git a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
index 0e94268d6e6..ee74ec97c14 100644
--- a/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
+++ b/arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
@@ -151,4 +151,5 @@ MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL")
.init_early = mv78xx0_init_early,
.init_irq = mv78xx0_init_irq,
.timer = &mv78xx0_timer,
+ .restart = mv78xx0_restart,
MACHINE_END
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 23d3980ef59..0cdd41004ad 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
-#include <linux/mbus.h>
#include <linux/ata_platform.h>
#include <linux/ethtool.h>
#include <asm/mach/map.h>
@@ -23,6 +22,7 @@
#include <plat/orion_nand.h>
#include <plat/time.h>
#include <plat/common.h>
+#include <plat/addr-map.h>
#include "common.h"
static int get_tclk(void);
@@ -169,8 +169,7 @@ void __init mv78xx0_map_io(void)
****************************************************************************/
void __init mv78xx0_ehci0_init(void)
{
- orion_ehci_init(&mv78xx0_mbus_dram_info,
- USB0_PHYS_BASE, IRQ_MV78XX0_USB_0);
+ orion_ehci_init(USB0_PHYS_BASE, IRQ_MV78XX0_USB_0);
}
@@ -179,8 +178,7 @@ void __init mv78xx0_ehci0_init(void)
****************************************************************************/
void __init mv78xx0_ehci1_init(void)
{
- orion_ehci_1_init(&mv78xx0_mbus_dram_info,
- USB1_PHYS_BASE, IRQ_MV78XX0_USB_1);
+ orion_ehci_1_init(USB1_PHYS_BASE, IRQ_MV78XX0_USB_1);
}
@@ -189,8 +187,7 @@ void __init mv78xx0_ehci1_init(void)
****************************************************************************/
void __init mv78xx0_ehci2_init(void)
{
- orion_ehci_2_init(&mv78xx0_mbus_dram_info,
- USB2_PHYS_BASE, IRQ_MV78XX0_USB_2);
+ orion_ehci_2_init(USB2_PHYS_BASE, IRQ_MV78XX0_USB_2);
}
@@ -199,7 +196,7 @@ void __init mv78xx0_ehci2_init(void)
****************************************************************************/
void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
- orion_ge00_init(eth_data, &mv78xx0_mbus_dram_info,
+ orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
IRQ_MV78XX0_GE_ERR, get_tclk());
}
@@ -210,7 +207,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
****************************************************************************/
void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
- orion_ge01_init(eth_data, &mv78xx0_mbus_dram_info,
+ orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
NO_IRQ, get_tclk());
}
@@ -234,7 +231,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
eth_data->duplex = DUPLEX_FULL;
}
- orion_ge10_init(eth_data, &mv78xx0_mbus_dram_info,
+ orion_ge10_init(eth_data,
GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
NO_IRQ, get_tclk());
}
@@ -258,7 +255,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
eth_data->duplex = DUPLEX_FULL;
}
- orion_ge11_init(eth_data, &mv78xx0_mbus_dram_info,
+ orion_ge11_init(eth_data,
GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
NO_IRQ, get_tclk());
}
@@ -277,8 +274,7 @@ void __init mv78xx0_i2c_init(void)
****************************************************************************/
void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
{
- orion_sata_init(sata_data, &mv78xx0_mbus_dram_info,
- SATA_PHYS_BASE, IRQ_MV78XX0_SATA);
+ orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_MV78XX0_SATA);
}
@@ -401,3 +397,19 @@ void __init mv78xx0_init(void)
feroceon_l2_init(is_l2_writethrough());
#endif
}
+
+void mv78xx0_restart(char mode, const char *cmd)
+{
+ /*
+ * Enable soft reset to assert RSTOUTn.
+ */
+ writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
+
+ /*
+ * Assert soft reset.
+ */
+ writel(SOFT_RESET, SYSTEM_SOFT_RESET);
+
+ while (1)
+ ;
+}
diff --git a/arch/arm/mach-mv78xx0/common.h b/arch/arm/mach-mv78xx0/common.h
index 632e63d65e7..507c767d49e 100644
--- a/arch/arm/mach-mv78xx0/common.h
+++ b/arch/arm/mach-mv78xx0/common.h
@@ -23,7 +23,6 @@ void mv78xx0_init(void);
void mv78xx0_init_early(void);
void mv78xx0_init_irq(void);
-extern struct mbus_dram_target_info mv78xx0_mbus_dram_info;
void mv78xx0_setup_cpu_mbus(void);
void mv78xx0_setup_pcie_io_win(int window, u32 base, u32 size,
int maj, int min);
@@ -46,6 +45,7 @@ void mv78xx0_uart1_init(void);
void mv78xx0_uart2_init(void);
void mv78xx0_uart3_init(void);
void mv78xx0_i2c_init(void);
+void mv78xx0_restart(char, const char *);
extern struct sys_timer mv78xx0_timer;
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index 50b85ae2da5..4d6d48bf51e 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -99,4 +99,5 @@ MACHINE_START(DB78X00_BP, "Marvell DB-78x00-BP Development Board")
.init_early = mv78xx0_init_early,
.init_irq = mv78xx0_init_irq,
.timer = &mv78xx0_timer,
+ .restart = mv78xx0_restart,
MACHINE_END
diff --git a/arch/arm/mach-mv78xx0/include/mach/system.h b/arch/arm/mach-mv78xx0/include/mach/system.h
index 66e7ce4e90b..8c3a5387cec 100644
--- a/arch/arm/mach-mv78xx0/include/mach/system.h
+++ b/arch/arm/mach-mv78xx0/include/mach/system.h
@@ -9,28 +9,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/bridge-regs.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Enable soft reset to assert RSTOUTn.
- */
- writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
- /*
- * Assert soft reset.
- */
- writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
- while (1)
- ;
-}
-
-
#endif
diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
deleted file mode 100644
index ba26fe98e64..00000000000
--- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe000000UL
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index cf4e494d44b..df50342179e 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -10,7 +10,6 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/mbus.h>
#include <linux/io.h>
#include <plat/mpp.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index c51af1cac30..12fcb108b0e 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -10,11 +10,11 @@
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/mbus.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
+#include <plat/addr-map.h>
#include "common.h"
struct pcie_port {
@@ -153,7 +153,7 @@ static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
* Generic PCIe unit setup.
*/
orion_pcie_set_local_bus_nr(pp->base, sys->busnr);
- orion_pcie_setup(pp->base, &mv78xx0_mbus_dram_info);
+ orion_pcie_setup(pp->base);
sys->resource[0] = &pp->res[0];
sys->resource[1] = &pp->res[1];
diff --git a/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c b/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
index e85222e5357..9a882706e13 100644
--- a/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
+++ b/arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
@@ -84,4 +84,5 @@ MACHINE_START(RD78X00_MASA, "Marvell RD-78x00-MASA Development Board")
.init_early = mv78xx0_init_early,
.init_irq = mv78xx0_init_irq,
.timer = &mv78xx0_timer,
+ .restart = mv78xx0_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index 1fc11034804..944025da833 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -297,4 +297,5 @@ MACHINE_START(EUKREA_CPUIMX51, "Eukrea CPUIMX51 Module")
.handle_irq = imx51_handle_irq,
.timer = &mxc_timer,
.init_machine = eukrea_cpuimx51_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index 52a11c1898e..9fbe923c8b0 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -335,4 +335,5 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
.handle_irq = imx51_handle_irq,
.timer = &mxc_timer,
.init_machine = eukrea_cpuimx51sd_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index fc3621d90bd..42b66e8d961 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -222,4 +222,5 @@ MACHINE_START(MX50_RDP, "Freescale MX50 Reference Design Platform")
.handle_irq = imx50_handle_irq,
.timer = &mx50_rdp_timer,
.init_machine = mx50_rdp_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 05783906db2..83eab4176ca 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -175,4 +175,5 @@ MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_3ds_timer,
.init_machine = mx51_3ds_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 5c837603ff0..e4b822e9f71 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
- PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
imx51_soc_init();
@@ -426,4 +426,5 @@ MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_babbage_timer,
.init_machine = mx51_babbage_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index a9e48662cf7..3a5ed2dd885 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -182,7 +182,7 @@ static const struct gpio_keys_platform_data mx51_efikamx_powerkey_data __initcon
.nbuttons = ARRAY_SIZE(mx51_efikamx_powerkey),
};
-void mx51_efikamx_reset(void)
+static void mx51_efikamx_restart(char mode, const char *cmd)
{
if (system_rev == 0x11)
gpio_direction_output(EFIKAMX_RESET1_1, 0);
@@ -292,4 +292,5 @@ MACHINE_START(MX51_EFIKAMX, "Genesi EfikaMX nettop")
.handle_irq = imx51_handle_irq,
.timer = &mx51_efikamx_timer,
.init_machine = mx51_efikamx_init,
+ .restart = mx51_efikamx_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 38c4a3e28d3..ea5f65b0381 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -287,4 +287,5 @@ MACHINE_START(MX51_EFIKASB, "Genesi Efika Smartbook")
.handle_irq = imx51_handle_irq,
.init_machine = efikasb_board_init,
.timer = &mx51_efikasb_timer,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_ard.c b/arch/arm/mach-mx5/board-mx53_ard.c
index 0d7f0fffb23..5f224f1c3eb 100644
--- a/arch/arm/mach-mx5/board-mx53_ard.c
+++ b/arch/arm/mach-mx5/board-mx53_ard.c
@@ -257,4 +257,5 @@ MACHINE_START(MX53_ARD, "Freescale MX53 ARD Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_ard_timer,
.init_machine = mx53_ard_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index 6bea31ab8f8..d6ce137896d 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
@@ -175,4 +175,5 @@ MACHINE_START(MX53_EVK, "Freescale MX53 EVK Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_evk_timer,
.init_machine = mx53_evk_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 7678f7734db..fd8b524e1c5 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
gpio_set_value(LOCO_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
@@ -317,4 +317,5 @@ MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_loco_timer,
.init_machine = mx53_loco_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 59c0845eb4a..22c53c9b18a 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
gpio_set_value(SMD_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
@@ -164,4 +164,5 @@ MACHINE_START(MX53_SMD, "Freescale MX53 SMD Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_smd_timer,
.init_machine = mx53_smd_board_init,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 5c5328257dc..5e2e7a84386 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <mach/hardware.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int mx5_cpu_rev = -1;
@@ -67,7 +67,8 @@ static int __init mx51_neon_fixup(void)
if (!cpu_is_mx51())
return 0;
- if (mx51_revision() < IMX_CHIP_REVISION_3_0 && (elf_hwcap & HWCAP_NEON)) {
+ if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
+ (elf_hwcap & HWCAP_NEON)) {
elf_hwcap &= ~HWCAP_NEON;
pr_info("Turning off NEON support, detected broken NEON implementation\n");
}
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c
index ccc61585659..e6bad17b908 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-mx5/imx51-dt.c
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 4; /* imx51 gets 4 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx51_irq_match[] __initconst = {
@@ -113,4 +115,5 @@ DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
.timer = &imx51_timer,
.init_machine = imx51_dt_init,
.dt_compat = imx51_dt_board_compat,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c
index ccaa0b81b76..05ebb3e6867 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-mx5/imx53-dt.c
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx53 gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx53_irq_match[] __initconst = {
@@ -123,4 +125,5 @@ DT_MACHINE_START(IMX53_DT, "Freescale i.MX53 (Device Tree Support)")
.timer = &imx53_timer,
.init_machine = imx53_dt_init,
.dt_compat = imx53_dt_board_compat,
+ .restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index 26eacc9d0d9..bc17dfea381 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <asm/mach/map.h>
@@ -21,9 +22,27 @@
#include <mach/devices-common.h>
#include <mach/iomux-v3.h>
+static struct clk *gpc_dvfs_clk;
+
static void imx5_idle(void)
{
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ if (!need_resched()) {
+ /* gpc clock is needed for SRPG */
+ if (gpc_dvfs_clk == NULL) {
+ gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
+ if (IS_ERR(gpc_dvfs_clk))
+ goto err0;
+ }
+ clk_enable(gpc_dvfs_clk);
+ mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ if (tzic_enable_wake())
+ goto err1;
+ cpu_do_idle();
+err1:
+ clk_disable(gpc_dvfs_clk);
+ }
+err0:
+ local_irq_enable();
}
/*
@@ -89,7 +108,7 @@ void __init imx51_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX51);
mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
- imx_idle = imx5_idle;
+ pm_idle = imx5_idle;
}
void __init imx53_init_early(void)
diff --git a/arch/arm/mach-mx5/system.c b/arch/arm/mach-mx5/system.c
index 144ebebc4a6..5eebfaad122 100644
--- a/arch/arm/mach-mx5/system.c
+++ b/arch/arm/mach-mx5/system.c
@@ -55,9 +55,6 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
stop_mode = 1;
}
arm_srpgcr |= MXC_SRPGCR_PCR;
-
- if (tzic_enable_wake(1) != 0)
- return;
break;
case STOP_POWER_ON:
ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
index 0163b6d8377..e12e11231dc 100644
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ b/arch/arm/mach-mxs/clock-mx23.c
@@ -545,11 +545,11 @@ int __init mx23_clocks_init(void)
*/
clk_set_parent(&ssp_clk, &ref_io_clk);
- clk_enable(&cpu_clk);
- clk_enable(&hbus_clk);
- clk_enable(&xbus_clk);
- clk_enable(&emi_clk);
- clk_enable(&uart_clk);
+ clk_prepare_enable(&cpu_clk);
+ clk_prepare_enable(&hbus_clk);
+ clk_prepare_enable(&xbus_clk);
+ clk_prepare_enable(&emi_clk);
+ clk_prepare_enable(&uart_clk);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 229ae349421..5d68e415222 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/clkdev.h>
+#include <linux/spinlock.h>
#include <asm/clkdev.h>
#include <asm/div64.h>
@@ -29,6 +30,7 @@
#include <mach/mx28.h>
#include <mach/common.h>
#include <mach/clock.h>
+#include <mach/digctl.h>
#include "regs-clkctrl-mx28.h"
@@ -43,6 +45,33 @@ static struct clk emi_clk;
static struct clk saif0_clk;
static struct clk saif1_clk;
static struct clk clk32k_clk;
+static DEFINE_SPINLOCK(clkmux_lock);
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ * clock pins selected for SAIF1 input clocks.
+ * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ * SAIF0 clock inputs selected for SAIF1 input clocks.
+ * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+ if (clkmux > 0x3)
+ return -EINVAL;
+
+ spin_lock(&clkmux_lock);
+ __raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
+ DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
+ __raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
+ DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
+ spin_unlock(&clkmux_lock);
+
+ return 0;
+}
static int _raw_clk_enable(struct clk *clk)
{
@@ -404,7 +433,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg | (1 << clk->enable_shift)) { \
+ if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
@@ -775,16 +804,25 @@ int __init mx28_clocks_init(void)
clk_set_parent(&ssp0_clk, &ref_io0_clk);
clk_set_parent(&ssp1_clk, &ref_io0_clk);
- clk_enable(&cpu_clk);
- clk_enable(&hbus_clk);
- clk_enable(&xbus_clk);
- clk_enable(&emi_clk);
- clk_enable(&uart_clk);
+ clk_prepare_enable(&cpu_clk);
+ clk_prepare_enable(&hbus_clk);
+ clk_prepare_enable(&xbus_clk);
+ clk_prepare_enable(&emi_clk);
+ clk_prepare_enable(&uart_clk);
clk_set_parent(&lcdif_clk, &ref_pix_clk);
clk_set_parent(&saif0_clk, &pll0_clk);
clk_set_parent(&saif1_clk, &pll0_clk);
+ /*
+ * Set an initial clock rate for the saif internal logic to work
+ * properly. This is important when working in EXTMASTER mode that
+ * uses the other saif's BITCLK&LRCLK but it still needs a basic
+ * clock which should be fast enough for the internal logic.
+ */
+ clk_set_rate(&saif0_clk, 24000000);
+ clk_set_rate(&saif1_clk, 24000000);
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
index a7093c88e6a..97a6f4acc6c 100644
--- a/arch/arm/mach-mxs/clock.c
+++ b/arch/arm/mach-mxs/clock.c
@@ -74,10 +74,15 @@ static int __clk_enable(struct clk *clk)
return 0;
}
-/* This function increments the reference count on the clock and enables the
- * clock if not already enabled. The parent clock tree is recursively enabled
+/*
+ * The clk_enable/clk_disable could be called by drivers in atomic context,
+ * so they should not really hold mutex. Instead, clk_prepare/clk_unprepare
+ * can hold a mutex, as the pair will only be called in non-atomic context.
+ * Before migrating to common clk framework, we can have __clk_enable and
+ * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
+ * leave clk_enable/clk_disable as the dummy functions.
*/
-int clk_enable(struct clk *clk)
+int clk_prepare(struct clk *clk)
{
int ret = 0;
@@ -90,13 +95,9 @@ int clk_enable(struct clk *clk)
return ret;
}
-EXPORT_SYMBOL(clk_enable);
+EXPORT_SYMBOL(clk_prepare);
-/* This function decrements the reference count on the clock and disables
- * the clock when reference count is 0. The parent clock tree is
- * recursively disabled
- */
-void clk_disable(struct clk *clk)
+void clk_unprepare(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
return;
@@ -105,6 +106,18 @@ void clk_disable(struct clk *clk)
__clk_disable(clk);
mutex_unlock(&clocks_mutex);
}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ /* nothing to do */
+}
EXPORT_SYMBOL(clk_disable);
/* Retrieve the *current* clock rate. If the clock itself
@@ -166,7 +179,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return ret;
if (clk->usecount)
- clk_enable(parent);
+ clk_prepare_enable(parent);
mutex_lock(&clocks_mutex);
ret = clk->set_parent(clk, parent);
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index c8887103f0e..4f50094e293 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -47,6 +47,7 @@ struct platform_device *__init mx28_add_mxsfb(
const struct mxsfb_platform_data *pdata);
extern const struct mxs_saif_data mx28_saif_data[] __initconst;
-#define mx28_add_saif(id) mxs_add_saif(&mx28_saif_data[id])
+#define mx28_add_saif(id, pdata) \
+ mxs_add_saif(&mx28_saif_data[id], pdata)
struct platform_device *__init mx28_add_rtc_stmp3xxx(void);
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-saif.c b/arch/arm/mach-mxs/devices/platform-mxs-saif.c
index 1ec965e9fe9..f6e3a60b420 100644
--- a/arch/arm/mach-mxs/devices/platform-mxs-saif.c
+++ b/arch/arm/mach-mxs/devices/platform-mxs-saif.c
@@ -32,7 +32,8 @@ const struct mxs_saif_data mx28_saif_data[] __initconst = {
};
#endif
-struct platform_device *__init mxs_add_saif(const struct mxs_saif_data *data)
+struct platform_device *__init mxs_add_saif(const struct mxs_saif_data *data,
+ const struct mxs_saif_platform_data *pdata)
{
struct resource res[] = {
{
@@ -56,5 +57,5 @@ struct platform_device *__init mxs_add_saif(const struct mxs_saif_data *data)
};
return mxs_add_platform_device("mxs-saif", data->id, res,
- ARRAY_SIZE(res), NULL, 0);
+ ARRAY_SIZE(res), pdata, sizeof(*pdata));
}
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 635bb5d9a20..e1237ab2586 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -16,6 +16,8 @@ struct clk;
extern const u32 *mxs_get_ocotp(void);
extern int mxs_reset_block(void __iomem *);
extern void mxs_timer_init(struct clk *, int);
+extern void mxs_restart(char, const char *);
+extern int mxs_saif_clkmux_select(unsigned int clkmux);
extern int mx23_register_gpios(void);
extern int mx23_clocks_init(void);
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index a8080f44c03..dc369c1239f 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -94,6 +94,7 @@ struct platform_device *__init mxs_add_mxs_pwm(
resource_size_t iobase, int id);
/* saif */
+#include <sound/saif.h>
struct mxs_saif_data {
int id;
resource_size_t iobase;
@@ -103,4 +104,5 @@ struct mxs_saif_data {
};
struct platform_device *__init mxs_add_saif(
- const struct mxs_saif_data *data);
+ const struct mxs_saif_data *data,
+ const struct mxs_saif_platform_data *pdata);
diff --git a/arch/arm/mach-mxs/include/mach/digctl.h b/arch/arm/mach-mxs/include/mach/digctl.h
new file mode 100644
index 00000000000..49a888c65d6
--- /dev/null
+++ b/arch/arm/mach-mxs/include/mach/digctl.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_DIGCTL_H__
+#define __MACH_DIGCTL_H__
+
+/* MXS DIGCTL SAIF CLKMUX */
+#define MXS_DIGCTL_SAIF_CLKMUX_DIRECT 0x0
+#define MXS_DIGCTL_SAIF_CLKMUX_CROSSINPUT 0x1
+#define MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0 0x2
+#define MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR1 0x3
+
+#define HW_DIGCTL_CTRL 0x0
+#define BP_DIGCTL_CTRL_SAIF_CLKMUX 10
+#define BM_DIGCTL_CTRL_SAIF_CLKMUX (0x3 << 10)
+#endif
diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h
index 75d86118b76..30c7990f3c0 100644
--- a/arch/arm/mach-mxs/include/mach/mx28.h
+++ b/arch/arm/mach-mxs/include/mach/mx28.h
@@ -104,8 +104,8 @@
#define MX28_INT_CAN1 9
#define MX28_INT_LRADC_TOUCH 10
#define MX28_INT_HSADC 13
-#define MX28_INT_IRADC_THRESH0 14
-#define MX28_INT_IRADC_THRESH1 15
+#define MX28_INT_LRADC_THRESH0 14
+#define MX28_INT_LRADC_THRESH1 15
#define MX28_INT_LRADC_CH0 16
#define MX28_INT_LRADC_CH1 17
#define MX28_INT_LRADC_CH2 18
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index 0d2d2b47099..bde5f663474 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -30,6 +30,7 @@
*/
#define cpu_is_mx23() ( \
machine_is_mx23evk() || \
+ machine_is_stmp378x() || \
0)
#define cpu_is_mx28() ( \
machine_is_mx28evk() || \
diff --git a/arch/arm/mach-mxs/include/mach/system.h b/arch/arm/mach-mxs/include/mach/system.h
index 0e428239b43..e7ad1bb2942 100644
--- a/arch/arm/mach-mxs/include/mach/system.h
+++ b/arch/arm/mach-mxs/include/mach/system.h
@@ -22,6 +22,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-void arch_reset(char mode, const char *cmd);
-
#endif /* __MACH_MXS_SYSTEM_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/vmalloc.h b/arch/arm/mach-mxs/include/mach/vmalloc.h
deleted file mode 100644
index 103b0165ed0..00000000000
--- a/arch/arm/mach-mxs/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2000 Russell King.
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_MXS_VMALLOC_H__
-#define __MACH_MXS_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END 0xf4000000UL
-
-#endif /* __MACH_MXS_VMALLOC_H__ */
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c
index 3b1681e4f49..2f2758230ed 100644
--- a/arch/arm/mach-mxs/mach-m28evk.c
+++ b/arch/arm/mach-mxs/mach-m28evk.c
@@ -361,6 +361,7 @@ static struct sys_timer m28evk_timer = {
MACHINE_START(M28EVK, "DENX M28 EVK")
.map_io = mx28_map_io,
.init_irq = mx28_init_irq,
- .init_machine = m28evk_init,
.timer = &m28evk_timer,
+ .init_machine = m28evk_init,
+ .restart = mxs_restart,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-mx23evk.c b/arch/arm/mach-mxs/mach-mx23evk.c
index c325fbe4e4c..5ea1c57d260 100644
--- a/arch/arm/mach-mxs/mach-mx23evk.c
+++ b/arch/arm/mach-mxs/mach-mx23evk.c
@@ -184,4 +184,5 @@ MACHINE_START(MX23EVK, "Freescale MX23 EVK")
.init_irq = mx23_init_irq,
.timer = &mx23evk_timer,
.init_machine = mx23evk_init,
+ .restart = mxs_restart,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index 064ec5abaa5..fdb0a5664dd 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -27,6 +27,7 @@
#include <mach/common.h>
#include <mach/iomux-mx28.h>
+#include <mach/digctl.h>
#include "devices-mx28.h"
@@ -228,7 +229,7 @@ static void __init mx28evk_fec_reset(void)
/* Enable fec phy clock */
clk = clk_get_sys("pll2", NULL);
if (!IS_ERR(clk))
- clk_enable(clk);
+ clk_prepare_enable(clk);
/* Power up fec phy */
ret = gpio_request(MX28EVK_FEC_PHY_POWER, "fec-phy-power");
@@ -421,6 +422,18 @@ static struct gpio mx28evk_lcd_gpios[] = {
{ MX28EVK_BL_ENABLE, GPIOF_OUT_INIT_HIGH, "bl-enable" },
};
+static const struct mxs_saif_platform_data
+ mx28evk_mxs_saif_pdata[] __initconst = {
+ /* working on EXTMSTR0 mode (saif0 master, saif1 slave) */
+ {
+ .master_mode = 1,
+ .master_id = 0,
+ }, {
+ .master_mode = 0,
+ .master_id = 0,
+ },
+};
+
static void __init mx28evk_init(void)
{
int ret;
@@ -454,8 +467,9 @@ static void __init mx28evk_init(void)
else
mx28_add_mxsfb(&mx28evk_mxsfb_pdata);
- mx28_add_saif(0);
- mx28_add_saif(1);
+ mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
+ mx28_add_saif(0, &mx28evk_mxs_saif_pdata[0]);
+ mx28_add_saif(1, &mx28evk_mxs_saif_pdata[1]);
mx28_add_mxs_i2c(0);
i2c_register_board_info(0, mxs_i2c0_board_info,
@@ -501,4 +515,5 @@ MACHINE_START(MX28EVK, "Freescale MX28 EVK")
.init_irq = mx28_init_irq,
.timer = &mx28evk_timer,
.init_machine = mx28evk_init,
+ .restart = mxs_restart,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c
index 177e53123a0..a626c07b871 100644
--- a/arch/arm/mach-mxs/mach-stmp378x_devb.c
+++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c
@@ -115,6 +115,7 @@ static struct sys_timer stmp378x_dvb_timer = {
MACHINE_START(STMP378X, "STMP378X")
.map_io = mx23_map_io,
.init_irq = mx23_init_irq,
- .init_machine = stmp378x_dvb_init,
.timer = &stmp378x_dvb_timer,
+ .init_machine = stmp378x_dvb_init,
+ .restart = mxs_restart,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-tx28.c b/arch/arm/mach-mxs/mach-tx28.c
index 9a1f0e7a338..2c0862e655e 100644
--- a/arch/arm/mach-mxs/mach-tx28.c
+++ b/arch/arm/mach-mxs/mach-tx28.c
@@ -178,4 +178,5 @@ MACHINE_START(TX28, "Ka-Ro electronics TX28 module")
.init_irq = mx28_init_irq,
.timer = &tx28_timer,
.init_machine = tx28_stk5v3_init,
+ .restart = mxs_restart,
MACHINE_END
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
index 0fcff47009c..9a7b08b2a92 100644
--- a/arch/arm/mach-mxs/module-tx28.c
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
MX28_PAD_ENET0_CRS__ENET1_RX_EN,
};
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 20ec3bddf7c..54f91ad1c96 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -42,7 +42,7 @@ static void __iomem *mxs_clkctrl_reset_addr;
/*
* Reset the system. It is called by machine_restart().
*/
-void arch_reset(char mode, const char *cmd)
+void mxs_restart(char mode, const char *cmd)
{
/* reset the chip */
__mxs_setl(MXS_CLKCTRL_RESET_CHIP, mxs_clkctrl_reset_addr);
@@ -53,7 +53,7 @@ void arch_reset(char mode, const char *cmd)
mdelay(50);
/* We'll take a jump through zero as a poor second */
- cpu_reset(0);
+ soft_restart(0);
}
static int __init mxs_arch_reset_init(void)
@@ -66,7 +66,7 @@ static int __init mxs_arch_reset_init(void)
clk = clk_get_sys("rtc", NULL);
if (!IS_ERR(clk))
- clk_enable(clk);
+ clk_prepare_enable(clk);
return 0;
}
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index cace0d2e5a5..564a63279f1 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -245,7 +245,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
void __init mxs_timer_init(struct clk *timer_clk, int irq)
{
- clk_enable(timer_clk);
+ clk_prepare_enable(timer_clk);
/*
* Initialize timers to a known state
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 00023b5cf12..59e67979f19 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -187,3 +187,8 @@ static int __init netx_init(void)
subsys_initcall(netx_init);
+void netx_restart(char mode, const char *cmd)
+{
+ writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
+ NETX_SYSTEM_RES_CR);
+}
diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h
index ede2d35341c..9b915119b8d 100644
--- a/arch/arm/mach-netx/generic.h
+++ b/arch/arm/mach-netx/generic.h
@@ -19,6 +19,7 @@
extern void __init netx_map_io(void);
extern void __init netx_init_irq(void);
+extern void netx_restart(char, const char *);
struct sys_timer;
extern struct sys_timer netx_timer;
diff --git a/arch/arm/mach-netx/include/mach/entry-macro.S b/arch/arm/mach-netx/include/mach/entry-macro.S
index 844f1f9acbd..6e9f1cbe163 100644
--- a/arch/arm/mach-netx/include/mach/entry-macro.S
+++ b/arch/arm/mach-netx/include/mach/entry-macro.S
@@ -18,22 +18,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <mach/hardware.h>
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =io_p2v(0x001ff000)
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, [\base, #0]
- clz \irqnr, \irqstat
- rsb \irqnr, \irqnr, #31
- cmp \irqstat, #0
- .endm
-
diff --git a/arch/arm/mach-netx/include/mach/system.h b/arch/arm/mach-netx/include/mach/system.h
index dc7b4bc003c..b38fa36d58c 100644
--- a/arch/arm/mach-netx/include/mach/system.h
+++ b/arch/arm/mach-netx/include/mach/system.h
@@ -19,20 +19,10 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include "netx-regs.h"
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
- NETX_SYSTEM_RES_CR);
-}
-
#endif
diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c
index 90903dd44cb..180ea899a48 100644
--- a/arch/arm/mach-netx/nxdb500.c
+++ b/arch/arm/mach-netx/nxdb500.c
@@ -28,6 +28,7 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
@@ -203,6 +204,8 @@ MACHINE_START(NXDB500, "Hilscher nxdb500")
.atag_offset = 0x100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &netx_timer,
.init_machine = nxdb500_init,
+ .restart = netx_restart,
MACHINE_END
diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c
index c63384aba50..58009e29b20 100644
--- a/arch/arm/mach-netx/nxdkn.c
+++ b/arch/arm/mach-netx/nxdkn.c
@@ -28,6 +28,7 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
@@ -96,6 +97,8 @@ MACHINE_START(NXDKN, "Hilscher nxdkn")
.atag_offset = 0x100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &netx_timer,
.init_machine = nxdkn_init,
+ .restart = netx_restart,
MACHINE_END
diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c
index 8f548ec83ad..122e99826ef 100644
--- a/arch/arm/mach-netx/nxeb500hmi.c
+++ b/arch/arm/mach-netx/nxeb500hmi.c
@@ -28,6 +28,7 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/vic.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
@@ -180,6 +181,8 @@ MACHINE_START(NXEB500HMI, "Hilscher nxeb500hmi")
.atag_offset = 0x100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &netx_timer,
.init_machine = nxeb500hmi_init,
+ .restart = netx_restart,
MACHINE_END
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 0cbb74c96ef..7c878bf0034 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -21,6 +21,7 @@
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
+#include <asm/hardware/vic.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -34,6 +35,8 @@
#include <mach/nand.h>
#include <mach/fsmc.h>
+#include "cpu-8815.h"
+
/* Initial value for SRC control register: all timers use MXTAL/8 source */
#define SRC_CR_INIT_MASK 0x00007fff
#define SRC_CR_INIT_VAL 0x2aaa8000
@@ -280,6 +283,8 @@ MACHINE_START(NOMADIK, "NHK8815")
.atag_offset = 0x100,
.map_io = cpu8815_map_io,
.init_irq = cpu8815_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &nomadik_timer,
.init_machine = nhk8815_platform_init,
+ .restart = cpu8815_restart,
MACHINE_END
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index dc67717db6f..65df7b4fdd3 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -21,6 +21,7 @@
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
@@ -32,6 +33,7 @@
#include <asm/hardware/cache-l2x0.h>
#include "clock.h"
+#include "cpu-8815.h"
#define __MEM_4K_RESOURCE(x) \
.res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
@@ -164,3 +166,13 @@ void __init cpu8815_init_irq(void)
#endif
return;
}
+
+void cpu8815_restart(char mode, const char *cmd)
+{
+ void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18);
+
+ /* FIXME: use egpio when implemented */
+
+ /* Write anything to Reset status register */
+ writel(1, src_rstsr);
+}
diff --git a/arch/arm/mach-nomadik/cpu-8815.h b/arch/arm/mach-nomadik/cpu-8815.h
new file mode 100644
index 00000000000..71c21e8a11d
--- /dev/null
+++ b/arch/arm/mach-nomadik/cpu-8815.h
@@ -0,0 +1,4 @@
+extern void cpu8815_map_io(void);
+extern void cpu8815_platform_init(void);
+extern void cpu8815_init_irq(void);
+extern void cpu8815_restart(char, const char *);
diff --git a/arch/arm/mach-nomadik/include/mach/entry-macro.S b/arch/arm/mach-nomadik/include/mach/entry-macro.S
index 49f1aa3bb42..98ea1c1fbba 100644
--- a/arch/arm/mach-nomadik/include/mach/entry-macro.S
+++ b/arch/arm/mach-nomadik/include/mach/entry-macro.S
@@ -6,38 +6,8 @@
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =IO_ADDRESS(NOMADIK_IC_BASE)
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- /* This stanza gets the irq mask from one of two status registers */
- mov \irqnr, #0
- ldr \irqstat, [\base, #VIC_REG_IRQSR0] @ get masked status
- cmp \irqstat, #0
- bne 1001f
- add \irqnr, \irqnr, #32
- ldr \irqstat, [\base, #VIC_REG_IRQSR1] @ get masked status
-
-1001: tst \irqstat, #15
- bne 1002f
- add \irqnr, \irqnr, #4
- movs \irqstat, \irqstat, lsr #4
- bne 1001b
-1002: tst \irqstat, #1
- bne 1003f
- add \irqnr, \irqnr, #1
- movs \irqstat, \irqstat, lsr #1
- bne 1002b
-1003: /* EQ will be set if no irqs pending */
- .endm
diff --git a/arch/arm/mach-nomadik/include/mach/setup.h b/arch/arm/mach-nomadik/include/mach/setup.h
index b7897edf1f3..bcaeaf41c05 100644
--- a/arch/arm/mach-nomadik/include/mach/setup.h
+++ b/arch/arm/mach-nomadik/include/mach/setup.h
@@ -12,9 +12,6 @@
#ifdef CONFIG_NOMADIK_8815
-extern void cpu8815_map_io(void);
-extern void cpu8815_platform_init(void);
-extern void cpu8815_init_irq(void);
extern void nmdk_timer_init(void);
#endif /* NOMADIK_8815 */
diff --git a/arch/arm/mach-nomadik/include/mach/system.h b/arch/arm/mach-nomadik/include/mach/system.h
index 7119f688116..25e198b8976 100644
--- a/arch/arm/mach-nomadik/include/mach/system.h
+++ b/arch/arm/mach-nomadik/include/mach/system.h
@@ -20,9 +20,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-
static inline void arch_idle(void)
{
/*
@@ -32,14 +29,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- void __iomem *src_rstsr = io_p2v(NOMADIK_SRC_BASE + 0x18);
-
- /* FIXME: use egpio when implemented */
-
- /* Write anything to Reset status register */
- writel(1, src_rstsr);
-}
-
#endif
diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h
deleted file mode 100644
index f83d574d944..00000000000
--- a/arch/arm/mach-nomadik/include/mach/vmalloc.h
+++ /dev/null
@@ -1,2 +0,0 @@
-
-#define VMALLOC_END 0xe8000000UL
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index e0a028161dd..4f8d66f044e 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -168,78 +168,6 @@ config MACH_OMAP_GENERIC
custom OMAP boards. Say Y here if you have a custom
board.
-comment "OMAP CPU Speed"
- depends on ARCH_OMAP1
-
-config OMAP_CLOCKS_SET_BY_BOOTLOADER
- bool "OMAP clocks set by bootloader"
- depends on ARCH_OMAP1
- help
- Enable this option to prevent the kernel from overriding the clock
- frequencies programmed by bootloader for MPU, DSP, MMUs, TC,
- internal LCD controller and MPU peripherals.
-
-config OMAP_ARM_216MHZ
- bool "OMAP ARM 216 MHz CPU (1710 only)"
- depends on ARCH_OMAP1 && ARCH_OMAP16XX
- help
- Enable 216 MHz clock for OMAP1710 CPU. If unsure, say N.
-
-config OMAP_ARM_195MHZ
- bool "OMAP ARM 195 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 195MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_192MHZ
- bool "OMAP ARM 192 MHz CPU"
- depends on ARCH_OMAP1 && ARCH_OMAP16XX
- help
- Enable 192MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_182MHZ
- bool "OMAP ARM 182 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 182MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_168MHZ
- bool "OMAP ARM 168 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 168MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_150MHZ
- bool "OMAP ARM 150 MHz CPU"
- depends on ARCH_OMAP1 && ARCH_OMAP15XX
- help
- Enable 150MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_120MHZ
- bool "OMAP ARM 120 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 120MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_96MHZ
- bool "OMAP ARM 96 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 96MHz clock for OMAP CPU. If unsure, say N.
-
-config OMAP_ARM_60MHZ
- bool "OMAP ARM 60 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
- default y
- help
- Enable 60MHz clock for OMAP CPU. If unsure, say Y.
-
-config OMAP_ARM_30MHZ
- bool "OMAP ARM 30 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_OMAP850)
- help
- Enable 30MHz clock for OMAP CPU. If unsure, say N.
-
endmenu
endif
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 51bae31cf36..88909cc0b25 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -35,7 +35,7 @@
#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <mach/camera.h>
#include <mach/ams-delta-fiq.h>
@@ -302,8 +302,6 @@ static void __init ams_delta_init(void)
omap_cfg_reg(J19_1610_CAM_D6);
omap_cfg_reg(J18_1610_CAM_D7);
- iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
-
omap_board_config = ams_delta_config;
omap_board_config_size = ARRAY_SIZE(ams_delta_config);
omap_serial_init();
@@ -373,15 +371,22 @@ static int __init ams_delta_modem_init(void)
}
arch_initcall(ams_delta_modem_init);
+static void __init ams_delta_map_io(void)
+{
+ omap15xx_map_io();
+ iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
+}
+
MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
/* Maintainer: Jonathan McDowell <noodles@earth.li> */
.atag_offset = 0x100,
- .map_io = omap15xx_map_io,
+ .map_io = ams_delta_map_io,
.init_early = omap1_init_early,
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
EXPORT_SYMBOL(ams_delta_latch1_write);
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 23178275f96..0b9464b4121 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -32,7 +32,7 @@
#include <plat/flash.h>
#include <plat/fpga.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
/* fsample is pretty close to p2-sample */
@@ -390,4 +390,5 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
.init_irq = omap1_init_irq,
.init_machine = omap_fsample_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index dc5b75de531..9a5fe581bc1 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -25,7 +25,7 @@
#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
/* assume no Mini-AB port */
@@ -89,4 +89,5 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
.init_irq = omap1_init_irq,
.init_machine = omap_generic_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index b334b148167..00ad6b22d60 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -43,7 +43,7 @@
#include <plat/irda.h>
#include <plat/usb.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/flash.h>
#include "board-h2.h"
@@ -456,4 +456,5 @@ MACHINE_START(OMAP_H2, "TI-H2")
.init_irq = omap1_init_irq,
.init_machine = h2_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 74ebe72c984..4a7f2514970 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -45,7 +45,7 @@
#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/dma.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/flash.h>
#include "board-h3.h"
@@ -444,4 +444,5 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
.init_irq = omap1_init_irq,
.init_machine = h3_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 3e91baab1a8..731cc3db7ab 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -41,7 +41,7 @@
#include <asm/mach/arch.h>
#include <plat/omap7xx.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
#include <plat/keypad.h>
#include <plat/usb.h>
@@ -610,4 +610,5 @@ MACHINE_START(HERALD, "HTC Herald")
.init_irq = omap1_init_irq,
.init_machine = htcherald_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 273153dba15..309369ea697 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -37,7 +37,7 @@
#include <plat/tc.h>
#include <plat/usb.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/mmc.h>
/* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
@@ -460,4 +460,5 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
.init_irq = omap1_init_irq,
.init_machine = innovator_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 6798b848831..f9efc036ba9 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -30,7 +30,7 @@
#include <plat/usb.h>
#include <plat/board.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/hwa742.h>
#include <plat/lcd_mipid.h>
#include <plat/mmc.h>
@@ -259,4 +259,5 @@ MACHINE_START(NOKIA770, "Nokia 770")
.init_irq = omap1_init_irq,
.init_machine = omap_nokia770_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index c3859278d25..675de06557a 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -51,7 +51,7 @@
#include <plat/usb.h>
#include <plat/mux.h>
#include <plat/tc.h>
-#include <plat/common.h>
+#include "common.h"
/* At OMAP5912 OSK the Ethernet is directly connected to CS1 */
#define OMAP_OSK_ETHR_START 0x04800300
@@ -578,4 +578,5 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
.init_irq = omap1_init_irq,
.init_machine = osk_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index f9c44cb15b4..81fa27f8836 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -41,7 +41,7 @@
#include <plat/board.h>
#include <plat/irda.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#define PALMTE_USBDETECT_GPIO 0
#define PALMTE_USB_OR_DC_GPIO 1
@@ -270,4 +270,5 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
.init_irq = omap1_init_irq,
.init_machine = omap_palmte_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 11a98539f7b..81cb8217838 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -39,7 +39,7 @@
#include <plat/board.h>
#include <plat/irda.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -317,4 +317,5 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
.init_irq = omap1_init_irq,
.init_machine = omap_palmtt_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 42061573e38..e881945ce8e 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -41,7 +41,7 @@
#include <plat/board.h>
#include <plat/irda.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -334,4 +334,5 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
.init_irq = omap1_init_irq,
.init_machine = omap_palmz71_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 203ae07550d..c000bed7627 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -32,7 +32,7 @@
#include <plat/fpga.h>
#include <plat/flash.h>
#include <plat/keypad.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
static const unsigned int p2_keymap[] = {
@@ -352,4 +352,5 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
.init_irq = omap1_init_irq,
.init_machine = omap_perseus2_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 092a4c04640..7bcd82ab0fd 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -40,7 +40,7 @@
#include <plat/usb.h>
#include <plat/tc.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/keypad.h>
#include <plat/board-sx1.h>
@@ -416,4 +416,5 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
.init_irq = omap1_init_irq,
.init_machine = omap_sx1_init,
.timer = &omap1_timer,
+ .restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 61ed6cdab2b..f83a502dc93 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -28,13 +28,12 @@
#include <linux/export.h>
#include <mach/hardware.h>
-#include <mach/system.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/board-voiceblue.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/tc.h>
@@ -221,7 +220,7 @@ void voiceblue_wdt_ping(void)
gpio_set_value(0, wdt_gpio_state);
}
-static void voiceblue_reset(char mode, const char *cmd)
+static void voiceblue_restart(char mode, const char *cmd)
{
/*
* Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
@@ -285,8 +284,6 @@ static void __init voiceblue_init(void)
* (it is connected through invertor) */
omap_writeb(0x00, OMAP_LPG1_LCR);
omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
-
- arch_reset = voiceblue_reset;
}
MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
@@ -298,4 +295,5 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
.init_irq = omap1_init_irq,
.init_machine = voiceblue_init,
.timer = &omap1_timer,
+ .restart = voiceblue_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 84ef70476b5..0c50df05d13 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -197,11 +197,10 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
ref_rate = ck_ref_p->rate;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
- if (ptr->xtal != ref_rate)
+ if (!(ptr->flags & cpu_mask))
continue;
- /* DPLL1 cannot be reprogrammed without risking system crash */
- if (likely(dpll1_rate != 0) && ptr->pll_rate != dpll1_rate)
+ if (ptr->xtal != ref_rate)
continue;
/* Can check only after xtal frequency check */
@@ -215,12 +214,8 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
/*
* In most cases we should not need to reprogram DPLL.
* Reprogramming the DPLL is tricky, it must be done from SRAM.
- * (on 730, bit 13 must always be 1)
*/
- if (cpu_is_omap7xx())
- omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val | 0x2000);
- else
- omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
+ omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
ck_dpll1_p->rate = ptr->pll_rate;
@@ -290,6 +285,9 @@ long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
highest_rate = -EINVAL;
for (ptr = omap1_rate_table; ptr->rate; ptr++) {
+ if (!(ptr->flags & cpu_mask))
+ continue;
+
if (ptr->xtal != ref_rate)
continue;
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index eaf09efb91c..3d04f4f6767 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -17,7 +17,8 @@
#include <plat/clock.h>
-extern int __init omap1_clk_init(void);
+int omap1_clk_init(void);
+void omap1_clk_late_init(void);
extern int omap1_clk_enable(struct clk *clk);
extern void omap1_clk_disable(struct clk *clk);
extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
@@ -110,4 +111,7 @@ extern const struct clkops clkops_dummy;
extern const struct clkops clkops_uart_16xx;
extern const struct clkops clkops_generic;
+/* used for passing SoC type to omap1_{select,round_to}_table_rate() */
+extern u32 cpu_mask;
+
#endif
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 92400b9eb69..94699a82a73 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -16,6 +16,8 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <asm/mach-types.h> /* for machine_is_* */
@@ -23,6 +25,7 @@
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/clkdev_omap.h>
+#include <plat/sram.h> /* for omap_sram_reprogram_clock() */
#include <plat/usb.h> /* for OTG_BASE */
#include "clock.h"
@@ -767,12 +770,23 @@ static struct clk_functions omap1_clk_functions = {
.clk_disable_unused = omap1_clk_disable_unused,
};
+static void __init omap1_show_rates(void)
+{
+ pr_notice("Clocking rate (xtal/DPLL1/MPU): "
+ "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+ ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+ ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+}
+
+u32 cpu_mask;
+
int __init omap1_clk_init(void)
{
struct omap_clk *c;
const struct omap_clock_config *info;
int crystal_type = 0; /* Default 12 MHz */
- u32 reg, cpu_mask;
+ u32 reg;
#ifdef CONFIG_DEBUG_LL
/*
@@ -797,6 +811,8 @@ int __init omap1_clk_init(void)
clk_preinit(c->lk.clk);
cpu_mask = 0;
+ if (cpu_is_omap1710())
+ cpu_mask |= CK_1710;
if (cpu_is_omap16xx())
cpu_mask |= CK_16XX;
if (cpu_is_omap1510())
@@ -835,9 +851,12 @@ int __init omap1_clk_init(void)
/* We want to be in syncronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
- /* Use values set by bootloader. Determine PLL rate and recalculate
- * dependent clocks as if kernel had changed PLL or divisors.
+
+ /*
+ * Initially use the values set by bootloader. Determine PLL rate and
+ * recalculate dependent clocks as if kernel had changed PLL or
+ * divisors. See also omap1_clk_late_init() that can reprogram dpll1
+ * after the SRAM is initialized.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
@@ -862,25 +881,10 @@ int __init omap1_clk_init(void)
}
}
}
-#else
- /* Find the highest supported frequency and enable it */
- if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
- printk(KERN_ERR "System frequencies not set. Check your config.\n");
- /* Guess sane values (60MHz) */
- omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
- ck_dpll1.rate = 60000000;
- }
-#endif
propagate_rate(&ck_dpll1);
/* Cache rates for clocks connected to ck_ref (not dpll1) */
propagate_rate(&ck_ref);
- printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
- "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
- ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
- ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
- arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
+ omap1_show_rates();
if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
@@ -925,3 +929,23 @@ int __init omap1_clk_init(void)
return 0;
}
+
+#define OMAP1_DPLL1_SANE_VALUE 60000000
+
+void __init omap1_clk_late_init(void)
+{
+ unsigned long rate = ck_dpll1.rate;
+
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ pr_err("System frequencies not set, using default. Check your config.\n");
+ /*
+ * Reprogramming the DPLL is tricky, it must be done from SRAM.
+ */
+ omap_sram_reprogram_clock(0x2290, 0x0005);
+ ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
+ }
+ propagate_rate(&ck_dpll1);
+ omap1_show_rates();
+ loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
+}
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
new file mode 100644
index 00000000000..a9a5146dd2d
--- /dev/null
+++ b/arch/arm/mach-omap1/common.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * Header for code common to all OMAP1 machines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP1_COMMON_H
+#define __ARCH_ARM_MACH_OMAP1_COMMON_H
+
+#include <plat/common.h>
+
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+void omap7xx_map_io(void);
+#else
+static inline void omap7xx_map_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP15XX
+void omap15xx_map_io(void);
+#else
+static inline void omap15xx_map_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP16XX
+void omap16xx_map_io(void);
+#else
+static inline void omap16xx_map_io(void)
+{
+}
+#endif
+
+void omap1_init_early(void);
+void omap1_init_irq(void);
+void omap1_restart(char, const char *);
+
+extern struct sys_timer omap1_timer;
+extern bool omap_32k_timer_init(void);
+
+#endif /* __ARCH_ARM_MACH_OMAP1_COMMON_H */
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 48ef9888e82..1d76a63c098 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,7 +22,7 @@
#include <mach/hardware.h>
#include <asm/mach/map.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/mux.h>
@@ -30,6 +30,8 @@
#include <plat/omap7xx.h>
#include <plat/mcbsp.h>
+#include "clock.h"
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -293,6 +295,7 @@ static int __init omap1_init_devices(void)
return -ENODEV;
omap_sram_init();
+ omap1_clk_late_init();
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
deleted file mode 100644
index 22ec4a47957..00000000000
--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-omap1/include/mach/vmalloc.h
- *
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 7969cfda445..8e55b6fb347 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -121,7 +121,6 @@ void __init omap16xx_map_io(void)
void omap1_init_early(void)
{
omap_check_revision();
- omap_ioremap_init();
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".
diff --git a/arch/arm/mach-omap1/opp.h b/arch/arm/mach-omap1/opp.h
index 07074d79adc..79a683864a5 100644
--- a/arch/arm/mach-omap1/opp.h
+++ b/arch/arm/mach-omap1/opp.h
@@ -21,6 +21,7 @@ struct mpu_rate {
unsigned long pll_rate;
__u16 ckctl_val;
__u16 dpllctl_val;
+ u32 flags;
};
extern struct mpu_rate omap1_rate_table[];
diff --git a/arch/arm/mach-omap1/opp_data.c b/arch/arm/mach-omap1/opp_data.c
index 75a54651499..9cd4ddb5139 100644
--- a/arch/arm/mach-omap1/opp_data.c
+++ b/arch/arm/mach-omap1/opp_data.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <plat/clkdev_omap.h>
#include "opp.h"
/*-------------------------------------------------------------------------
@@ -20,40 +21,34 @@ struct mpu_rate omap1_rate_table[] = {
* NOTE: Comment order here is different from bits in CKCTL value:
* armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
*/
-#if defined(CONFIG_OMAP_ARM_216MHZ)
- { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_195MHZ)
- { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_192MHZ)
- { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
- { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
- { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
- { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
- { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_182MHZ)
- { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_168MHZ)
- { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_150MHZ)
- { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_120MHZ)
- { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_96MHZ)
- { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_60MHZ)
- { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_30MHZ)
- { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
-#endif
+ { 216000000, 12000000, 216000000, 0x050d, 0x2910, /* 1/1/2/2/2/8 */
+ CK_1710 },
+ { 195000000, 13000000, 195000000, 0x050e, 0x2790, /* 1/1/2/2/4/8 */
+ CK_7XX },
+ { 192000000, 19200000, 192000000, 0x050f, 0x2510, /* 1/1/2/2/8/8 */
+ CK_16XX },
+ { 192000000, 12000000, 192000000, 0x050f, 0x2810, /* 1/1/2/2/8/8 */
+ CK_16XX },
+ { 96000000, 12000000, 192000000, 0x055f, 0x2810, /* 2/2/2/2/8/8 */
+ CK_16XX },
+ { 48000000, 12000000, 192000000, 0x0baf, 0x2810, /* 4/4/4/8/8/8 */
+ CK_16XX },
+ { 24000000, 12000000, 192000000, 0x0fff, 0x2810, /* 8/8/8/8/8/8 */
+ CK_16XX },
+ { 182000000, 13000000, 182000000, 0x050e, 0x2710, /* 1/1/2/2/4/8 */
+ CK_7XX },
+ { 168000000, 12000000, 168000000, 0x010f, 0x2710, /* 1/1/1/2/8/8 */
+ CK_16XX|CK_7XX },
+ { 150000000, 12000000, 150000000, 0x010a, 0x2cb0, /* 1/1/1/2/4/4 */
+ CK_1510 },
+ { 120000000, 12000000, 120000000, 0x010a, 0x2510, /* 1/1/1/2/4/4 */
+ CK_16XX|CK_1510|CK_310|CK_7XX },
+ { 96000000, 12000000, 96000000, 0x0005, 0x2410, /* 1/1/1/1/2/2 */
+ CK_16XX|CK_1510|CK_310|CK_7XX },
+ { 60000000, 12000000, 60000000, 0x0005, 0x2290, /* 1/1/1/1/2/2 */
+ CK_16XX|CK_1510|CK_310|CK_7XX },
+ { 30000000, 12000000, 60000000, 0x0555, 0x2290, /* 2/2/2/2/2/2 */
+ CK_16XX|CK_1510|CK_310|CK_7XX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c
index ad951ee6920..91d199b6497 100644
--- a/arch/arm/mach-omap1/reset.c
+++ b/arch/arm/mach-omap1/reset.c
@@ -5,10 +5,9 @@
#include <linux/io.h>
#include <mach/hardware.h>
-#include <mach/system.h>
#include <plat/prcm.h>
-void omap1_arch_reset(char mode, const char *cmd)
+void omap1_restart(char mode, const char *cmd)
{
/*
* Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
@@ -21,5 +20,3 @@ void omap1_arch_reset(char mode, const char *cmd)
omap_writew(1, ARM_RSTCT1);
}
-
-void (*arch_reset)(char, const char *) = omap1_arch_reset;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index a1837771e03..b8faffa44f9 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -37,7 +37,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -54,7 +53,7 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-#include <plat/common.h>
+#include "common.h"
#ifdef CONFIG_OMAP_MPU_TIMER
@@ -190,30 +189,9 @@ static __init void omap_init_mpu_timer(unsigned long rate)
* ---------------------------------------------------------------------------
*/
-static DEFINE_CLOCK_DATA(cd);
-
-static inline unsigned long long notrace _omap_mpu_sched_clock(void)
-{
- u32 cyc = ~omap_mpu_timer_read(1);
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-#ifndef CONFIG_OMAP_32K_TIMER
-unsigned long long notrace sched_clock(void)
-{
- return _omap_mpu_sched_clock();
-}
-#else
-static unsigned long long notrace omap_mpu_sched_clock(void)
-{
- return _omap_mpu_sched_clock();
-}
-#endif
-
-static void notrace mpu_update_sched_clock(void)
+static u32 notrace omap_mpu_read_sched_clock(void)
{
- u32 cyc = ~omap_mpu_timer_read(1);
- update_sched_clock(&cd, cyc, (u32)~0);
+ return ~omap_mpu_timer_read(1);
}
static void __init omap_init_clocksource(unsigned long rate)
@@ -223,7 +201,7 @@ static void __init omap_init_clocksource(unsigned long rate)
"%s: can't register clocksource!\n";
omap_mpu_timer_start(1, ~0, 1);
- init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
+ setup_sched_clock(omap_mpu_read_sched_clock, 32, rate);
if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate,
300, 32, clocksource_mmio_readl_down))
@@ -254,30 +232,6 @@ static inline void omap_mpu_timer_init(void)
}
#endif /* CONFIG_OMAP_MPU_TIMER */
-#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
-static unsigned long long (*preferred_sched_clock)(void);
-
-unsigned long long notrace sched_clock(void)
-{
- if (!preferred_sched_clock)
- return 0;
-
- return preferred_sched_clock();
-}
-
-static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
-{
- if (use_32k_sched_clock)
- preferred_sched_clock = omap_32k_sched_clock;
- else
- preferred_sched_clock = omap_mpu_sched_clock;
-}
-#else
-static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
-{
-}
-#endif
-
static inline int omap_32k_timer_usable(void)
{
int res = false;
@@ -299,12 +253,8 @@ static inline int omap_32k_timer_usable(void)
*/
static void __init omap1_timer_init(void)
{
- if (omap_32k_timer_usable()) {
- preferred_sched_clock_init(1);
- } else {
+ if (!omap_32k_timer_usable())
omap_mpu_timer_init();
- preferred_sched_clock_init(0);
- }
}
struct sys_timer omap1_timer = {
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 96604a50c4f..9a54ef4dcf5 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -52,7 +52,7 @@
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/dmtimer.h>
/*
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 50341471890..904bd1dfcd2 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -25,6 +25,7 @@ config ARCH_OMAP2
depends on ARCH_OMAP2PLUS
default y
select CPU_V6
+ select MULTI_IRQ_HANDLER
config ARCH_OMAP3
bool "TI OMAP3"
@@ -36,13 +37,16 @@ config ARCH_OMAP3
select ARCH_HAS_OPP
select PM_OPP if PM
select ARM_CPU_SUSPEND if PM
+ select MULTI_IRQ_HANDLER
config ARCH_OMAP4
bool "TI OMAP4"
default y
depends on ARCH_OMAP2PLUS
+ select CACHE_L2X0
select CPU_V7
select ARM_GIC
+ select HAVE_SMP
select LOCAL_TIMERS if SMP
select PL310_ERRATA_588369
select PL310_ERRATA_727915
@@ -74,8 +78,13 @@ config SOC_OMAP3430
default y
select ARCH_OMAP_OTG
-config SOC_OMAPTI816X
- bool "TI816X support"
+config SOC_OMAPTI81XX
+ bool "TI81XX support"
+ depends on ARCH_OMAP3
+ default y
+
+config SOC_OMAPAM33XX
+ bool "AM33XX support"
depends on ARCH_OMAP3
default y
@@ -312,7 +321,12 @@ config MACH_OMAP_3630SDP
config MACH_TI8168EVM
bool "TI8168 Evaluation Module"
- depends on SOC_OMAPTI816X
+ depends on SOC_OMAPTI81XX
+ default y
+
+config MACH_TI8148EVM
+ bool "TI8148 Evaluation Module"
+ depends on SOC_OMAPTI81XX
default y
config MACH_OMAP_4430SDP
@@ -334,6 +348,7 @@ config MACH_OMAP4_PANDA
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
depends on ARCH_OMAP3
+ select ARM_AMBA
select OC_ETM
help
Say Y here to enable debugging hardware of omap3
@@ -350,6 +365,27 @@ config OMAP3_SDRC_AC_TIMING
wish to say no. Selecting yes without understanding what is
going on could result in system crashes;
+config OMAP4_ERRATA_I688
+ bool "OMAP4 errata: Async Bridge Corruption"
+ depends on ARCH_OMAP4
+ select ARCH_HAS_BARRIERS
+ help
+ If a data is stalled inside asynchronous bridge because of back
+ pressure, it may be accepted multiple times, creating pointer
+ misalignment that will corrupt next transfers on that data path
+ until next reset of the system (No recovery procedure once the
+ issue is hit, the path remains consistently broken). Async bridge
+ can be found on path between MPU to EMIF and MPU to L3 interconnect.
+ This situation can happen only when the idle is initiated by a
+ Master Request Disconnection (which is trigged by software when
+ executing WFI on CPU).
+ The work-around for this errata needs all the initiators connected
+ through async bridge must ensure that data path is properly drained
+ before issuing WFI. This condition will be met if one Strongly ordered
+ access is performed to the target right before executing the WFI.
+ In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ IO barrier ensure that there is no synchronisation loss on initiators
+ operating on both interconnect port simultaneously.
endmenu
endif
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 69ab1c06913..fc9b238cbc1 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,17 +4,18 @@
# Common support
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
- common.o gpio.o dma.o wd_timer.o
+ common.o gpio.o dma.o wd_timer.o display.o
omap-2-3-common = irq.o sdrc.o
hwmod-common = omap_hwmod.o \
omap_hwmod_common_data.o
clock-common = clock.o clock_common_data.o \
clkt_dpll.o clkt_clksel.o
+secure-common = omap-smc.o omap-secure.o
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
@@ -24,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
+obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \
+ sleep44xx.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
-AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec)
# Functions loaded to SRAM
obj-$(CONFIG_SOC_OMAP2420) += sram242x.o
@@ -62,7 +65,8 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \
+ cpuidle44xx.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
@@ -77,6 +81,7 @@ endif
endif
# PRCM
+obj-y += prm_common.o
obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
vc3xxx_data.o vp3xxx_data.o
@@ -86,7 +91,7 @@ obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
cm44xx.o prcm_mpu44xx.o \
prminst44xx.o vc44xx_data.o \
- vp44xx_data.o
+ vp44xx_data.o prm44xx.o
# OMAP voltage domains
voltagedomain-common := voltage.o vc.o vp.o
@@ -232,6 +237,7 @@ obj-$(CONFIG_MACH_CRANEBOARD) += board-am3517crane.o
obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o
obj-$(CONFIG_MACH_TI8168EVM) += board-ti8168evm.o
+obj-$(CONFIG_MACH_TI8148EVM) += board-ti8168evm.o
# Platform specific device init code
@@ -264,7 +270,4 @@ smsc911x-$(CONFIG_SMSC911X) := gpmc-smsc911x.o
obj-y += $(smsc911x-m) $(smsc911x-y)
obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
-disp-$(CONFIG_OMAP2_DSS) := display.o
-obj-y += $(disp-m) $(disp-y)
-
obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index d704f0ac328..7370983f809 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -34,7 +34,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <plat/usb.h>
#include <plat/gpmc-smc91x.h>
@@ -301,6 +301,8 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
.map_io = omap243x_map_io,
.init_early = omap2430_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = omap_2430sdp_init,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 77142c13fa1..383717ba63b 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -33,7 +33,7 @@
#include <plat/mcspi.h>
#include <plat/board.h>
#include <plat/usb.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <video/omapdss.h>
@@ -475,106 +475,8 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-
-static struct omap_device_pad serial1_pads[] __initdata = {
- /*
- * Note that off output enable is an active low
- * signal. So setting this means pin is a
- * input enabled in off mode
- */
- OMAP_MUX_STATIC("uart1_cts.uart1_cts",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_rts.uart1_rts",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_rx.uart1_rx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart1_tx.uart1_tx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLDOWN |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT |
- OMAP_PIN_OFF_INPUT_PULLDOWN |
- OMAP_OFFOUT_EN |
- OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT |
- OMAP_OFF_EN |
- OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial1_data __initdata = {
- .id = 0,
- .pads = serial1_pads,
- .pads_cnt = ARRAY_SIZE(serial1_pads),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static inline void board_serial_init(void)
-{
- omap_serial_init_port(&serial1_data);
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
-}
#else
#define board_mux NULL
-
-static inline void board_serial_init(void)
-{
- omap_serial_init();
-}
#endif
/*
@@ -711,7 +613,7 @@ static void __init omap_3430sdp_init(void)
else
gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
omap_ads7846_init(1, gpio_pendown, 310, NULL);
- board_serial_init();
+ omap_serial_init();
omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL);
usb_musb_init(NULL);
board_smc91x_init();
@@ -728,6 +630,8 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap_3430sdp_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index f552305162f..6ef350d1ae4 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -16,7 +16,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
#include <plat/gpmc-smc91x.h>
#include <plat/usb.h>
@@ -215,6 +215,8 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
.map_io = omap3_map_io,
.init_early = omap3630_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap_sdp_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 515646886b5..2ceb75d21eb 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -27,13 +27,13 @@
#include <linux/leds_pwm.h>
#include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include <plat/mmc.h>
#include <plat/omap4-keypad.h>
@@ -372,11 +372,17 @@ static struct platform_device sdp4430_vbat = {
},
};
+static struct platform_device sdp4430_dmic_codec = {
+ .name = "dmic-codec",
+ .id = -1,
+};
+
static struct platform_device *sdp4430_devices[] __initdata = {
&sdp4430_gpio_keys_device,
&sdp4430_leds_gpio,
&sdp4430_leds_pwm,
&sdp4430_vbat,
+ &sdp4430_dmic_codec,
};
static struct omap_musb_board_data musb_board_data = {
@@ -404,6 +410,7 @@ static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 5,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
+ .pm_caps = MMC_PM_KEEP_POWER,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
.ocr_mask = MMC_VDD_165_195,
@@ -837,74 +844,8 @@ static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial4_pads[] __initdata = {
- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static struct omap_board_data serial4_data __initdata = {
- .id = 3,
- .pads = serial4_pads,
- .pads_cnt = ARRAY_SIZE(serial4_pads),
-};
-
-static inline void board_serial_init(void)
-{
- struct omap_board_data bdata;
- bdata.flags = 0;
- bdata.pads = NULL;
- bdata.pads_cnt = 0;
- bdata.id = 0;
- /* pass dummy data for UART1 */
- omap_serial_init_port(&bdata);
-
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
- omap_serial_init_port(&serial4_data);
-}
#else
#define board_mux NULL
-
-static inline void board_serial_init(void)
-{
- omap_serial_init();
-}
#endif
static void omap4_sdp4430_wifi_mux_init(void)
@@ -954,7 +895,7 @@ static void __init omap_4430sdp_init(void)
omap4_i2c_init();
omap_sfh7741prox_init();
platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
- board_serial_init();
+ omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap4_sdp4430_wifi_init();
omap4_twl6030_hsmmc_init(mmc);
@@ -984,6 +925,8 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
.map_io = omap4_map_io,
.init_early = omap4430_init_early,
.init_irq = gic_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = omap_4430sdp_init,
.timer = &omap4_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 7834536ab41..c3851e8de28 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -27,7 +27,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include "mux.h"
@@ -98,6 +98,8 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.map_io = omap3_map_io,
.init_early = am35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_crane_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index d314f033c9d..4b1cfe32e6b 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -24,6 +24,7 @@
#include <linux/i2c/pca953x.h>
#include <linux/can/platform/ti_hecc.h>
#include <linux/davinci_emac.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <mach/am35xx.h>
@@ -32,7 +33,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include <video/omapdss.h>
#include <video/omap-panel-generic-dpi.h>
@@ -40,6 +41,7 @@
#include "mux.h"
#include "control.h"
+#include "hsmmc.h"
#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
@@ -455,6 +457,23 @@ static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
static struct omap_board_config_kernel am3517_evm_config[] __initdata = {
};
+static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = 127,
+ .gpio_wp = 126,
+ },
+ {
+ .mmc = 2,
+ .caps = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = 128,
+ .gpio_wp = 129,
+ },
+ {} /* Terminator */
+};
+
+
static void __init am3517_evm_init(void)
{
omap_board_config = am3517_evm_config;
@@ -483,6 +502,9 @@ static void __init am3517_evm_init(void)
/* MUSB */
am3517_evm_musb_init();
+
+ /* MMC init function */
+ omap2_hsmmc_init(mmc);
}
MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
@@ -491,6 +513,8 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.map_io = omap3_map_io,
.init_early = am35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_evm_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index de8134b7f58..ac773829941 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -37,7 +37,7 @@
#include <plat/led.h>
#include <plat/usb.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <video/omapdss.h>
@@ -354,6 +354,8 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = omap_apollon_init,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index bd1bcacb40f..e921e3be24a 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -37,7 +37,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/nand.h>
#include <plat/gpmc.h>
#include <plat/usb.h>
@@ -53,7 +53,8 @@
#include "hsmmc.h"
#include "common-board-devices.h"
-#define CM_T35_GPIO_PENDOWN 57
+#define CM_T35_GPIO_PENDOWN 57
+#define SB_T35_USB_HUB_RESET_GPIO 167
#define CM_T35_SMSC911X_CS 5
#define CM_T35_SMSC911X_GPIO 163
@@ -339,8 +340,10 @@ static struct regulator_consumer_supply cm_t35_vsim_supply[] = {
REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply cm_t35_vdvi_supply[] = {
- REGULATOR_SUPPLY("vdvi", "omapdss"),
+static struct regulator_consumer_supply cm_t35_vio_supplies[] = {
+ REGULATOR_SUPPLY("vcc", "spi1.0"),
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -373,6 +376,19 @@ static struct regulator_init_data cm_t35_vsim = {
.consumer_supplies = cm_t35_vsim_supply,
};
+static struct regulator_init_data cm_t35_vio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(cm_t35_vio_supplies),
+ .consumer_supplies = cm_t35_vio_supplies,
+};
+
static uint32_t cm_t35_keymap[] = {
KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_LEFT),
KEY(1, 0, KEY_UP), KEY(1, 1, KEY_ENTER), KEY(1, 2, KEY_DOWN),
@@ -421,6 +437,23 @@ static struct usbhs_omap_board_data usbhs_bdata __initdata = {
.reset_gpio_port[2] = -EINVAL
};
+static void cm_t35_init_usbh(void)
+{
+ int err;
+
+ err = gpio_request_one(SB_T35_USB_HUB_RESET_GPIO,
+ GPIOF_OUT_INIT_LOW, "usb hub rst");
+ if (err) {
+ pr_err("SB-T35: usb hub rst gpio request failed: %d\n", err);
+ } else {
+ udelay(10);
+ gpio_set_value(SB_T35_USB_HUB_RESET_GPIO, 1);
+ msleep(1);
+ }
+
+ usbhs_init(&usbhs_bdata);
+}
+
static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
unsigned ngpio)
{
@@ -456,17 +489,14 @@ static struct twl4030_platform_data cm_t35_twldata = {
.gpio = &cm_t35_gpio_data,
.vmmc1 = &cm_t35_vmmc1,
.vsim = &cm_t35_vsim,
+ .vio = &cm_t35_vio,
};
static void __init cm_t35_init_i2c(void)
{
omap3_pmic_get_config(&cm_t35_twldata, TWL_COMMON_PDATA_USB,
- TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
- cm_t35_twldata.vpll2->constraints.name = "VDVI";
- cm_t35_twldata.vpll2->num_consumer_supplies =
- ARRAY_SIZE(cm_t35_vdvi_supply);
- cm_t35_twldata.vpll2->consumer_supplies = cm_t35_vdvi_supply;
+ TWL_COMMON_REGULATOR_VDAC |
+ TWL_COMMON_PDATA_AUDIO);
omap3_pmic_init("tps65930", &cm_t35_twldata);
}
@@ -570,24 +600,28 @@ static void __init cm_t3x_common_dss_mux_init(int mux_mode)
static void __init cm_t35_init_mux(void)
{
- omap_mux_init_signal("gpio_70", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("gpio_71", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("gpio_72", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("gpio_73", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("gpio_74", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("gpio_75", OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
- cm_t3x_common_dss_mux_init(OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT);
+ int mux_mode = OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT;
+
+ omap_mux_init_signal("dss_data0.dss_data0", mux_mode);
+ omap_mux_init_signal("dss_data1.dss_data1", mux_mode);
+ omap_mux_init_signal("dss_data2.dss_data2", mux_mode);
+ omap_mux_init_signal("dss_data3.dss_data3", mux_mode);
+ omap_mux_init_signal("dss_data4.dss_data4", mux_mode);
+ omap_mux_init_signal("dss_data5.dss_data5", mux_mode);
+ cm_t3x_common_dss_mux_init(mux_mode);
}
static void __init cm_t3730_init_mux(void)
{
- omap_mux_init_signal("sys_boot0", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sys_boot1", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sys_boot3", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sys_boot4", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sys_boot5", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- omap_mux_init_signal("sys_boot6", OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
- cm_t3x_common_dss_mux_init(OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT);
+ int mux_mode = OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT;
+
+ omap_mux_init_signal("sys_boot0", mux_mode);
+ omap_mux_init_signal("sys_boot1", mux_mode);
+ omap_mux_init_signal("sys_boot3", mux_mode);
+ omap_mux_init_signal("sys_boot4", mux_mode);
+ omap_mux_init_signal("sys_boot5", mux_mode);
+ omap_mux_init_signal("sys_boot6", mux_mode);
+ cm_t3x_common_dss_mux_init(mux_mode);
}
#else
static inline void cm_t35_init_mux(void) {}
@@ -612,7 +646,7 @@ static void __init cm_t3x_common_init(void)
cm_t35_init_display();
usb_musb_init(NULL);
- usbhs_init(&usbhs_bdata);
+ cm_t35_init_usbh();
}
static void __init cm_t35_init(void)
@@ -634,8 +668,10 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t35_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(CM_T3730, "Compulab CM-T3730")
@@ -644,6 +680,8 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
.map_io = omap3_map_io,
.init_early = omap3630_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3730_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 3f4dc662684..f36d694d215 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -39,7 +39,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include <plat/nand.h>
#include <plat/gpmc.h>
@@ -299,6 +299,8 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
.map_io = omap3_map_io,
.init_early = am35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3517_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 90154e411da..e873063f4fd 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -41,7 +41,7 @@
#include <asm/mach/flash.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
@@ -660,6 +660,8 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = devkit8000_init,
.timer = &omap3_secure_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index fb55fa3dad5..d5875606048 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,8 +20,7 @@
#include <asm/mach/arch.h>
#include <plat/board.h>
-#include <plat/common.h>
-#include <mach/omap4-common.h>
+#include "common.h"
#include "common-board-devices.h"
/*
@@ -70,7 +69,6 @@ static void __init omap_generic_init(void)
if (node)
irq_domain_add_simple(node, 0);
- omap_serial_init();
omap_sdrc_init(NULL, NULL);
of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
@@ -107,6 +105,7 @@ DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)")
.init_machine = omap_generic_init,
.timer = &omap2_timer,
.dt_compat = omap242x_boards_compat,
+ .restart = omap_prcm_restart,
MACHINE_END
#endif
@@ -122,9 +121,11 @@ DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)")
.map_io = omap243x_map_io,
.init_early = omap2430_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = omap_generic_init,
.timer = &omap2_timer,
.dt_compat = omap243x_boards_compat,
+ .restart = omap_prcm_restart,
MACHINE_END
#endif
@@ -143,6 +144,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
.init_machine = omap3_init,
.timer = &omap3_timer,
.dt_compat = omap3_boards_compat,
+ .restart = omap_prcm_restart,
MACHINE_END
#endif
@@ -161,5 +163,6 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.init_machine = omap4_init,
.timer = &omap4_timer,
.dt_compat = omap4_boards_compat,
+ .restart = omap_prcm_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 8b351d92a1c..54af800d143 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -34,7 +34,7 @@
#include <plat/usb.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/menelaus.h>
#include <plat/dma.h>
#include <plat/gpmc.h>
@@ -396,6 +396,8 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = omap_h4_init,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index d0a3f78a9b6..a59ace0ed56 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -28,7 +28,7 @@
#include <asm/mach/arch.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <plat/usb.h>
#include <video/omapdss.h>
@@ -672,8 +672,10 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(IGEP0030, "IGEP OMAP3 module")
@@ -682,6 +684,8 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index e179da0c4da..2d2a61f7dcb 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -36,7 +36,7 @@
#include <plat/mcspi.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <mach/board-zoom.h>
@@ -434,6 +434,8 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap_ldp_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index e9d5f4a3d06..42a4d11fad2 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -26,7 +26,7 @@
#include <asm/mach-types.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/menelaus.h>
#include <mach/irqs.h>
#include <plat/mcspi.h>
@@ -46,7 +46,7 @@ static struct device *mmc_device;
#define TUSB6010_GPIO_ENABLE 0
#define TUSB6010_DMACHAN 0x3f
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
* 1.5 V voltage regulators of PM companion chip. Companion chip will then
@@ -644,15 +644,15 @@ static inline void board_serial_init(void)
bdata.pads_cnt = 0;
bdata.id = 0;
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
bdata.id = 1;
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
bdata.id = 2;
bdata.pads = serial2_pads;
bdata.pads_cnt = ARRAY_SIZE(serial2_pads);
- omap_serial_init_port(&bdata);
+ omap_serial_init_port(&bdata, NULL);
}
#else
@@ -689,8 +689,10 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(NOKIA_N810, "Nokia N810")
@@ -699,8 +701,10 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
@@ -709,6 +713,8 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_irq = omap2_init_irq,
+ .handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
.timer = &omap2_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 4a71cb7e42d..7ffcd2839e7 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -40,7 +40,7 @@
#include <asm/mach/flash.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <video/omapdss.h>
#include <video/omap-panel-dvi.h>
#include <plat/gpmc.h>
@@ -559,6 +559,8 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
.map_io = omap3_map_io,
.init_early = omap3_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_beagle_init,
.timer = &omap3_secure_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ec00b2ec702..003fe34c934 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -43,7 +43,7 @@
#include <plat/board.h>
#include <plat/usb.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/mcspi.h>
#include <video/omapdss.h>
#include <video/omap-panel-dvi.h>
@@ -681,6 +681,8 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_evm_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 7c0f193f246..4198dd017d8 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -40,7 +40,7 @@
#include <plat/mux.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc-smsc911x.h>
#include <plat/gpmc.h>
#include <plat/sdrc.h>
@@ -208,8 +208,10 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
@@ -217,6 +219,8 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index f7811f4cfc3..1644b73017f 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -41,7 +41,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <mach/hardware.h>
#include <plat/mcspi.h>
#include <plat/usb.h>
@@ -606,6 +606,8 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3pandora_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index ddb7d6663c6..cb089a46f62 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -35,7 +35,7 @@
#include <asm/mach/flash.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
@@ -454,6 +454,8 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_stalker_init,
.timer = &omap3_secure_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index a2d0d1971e2..a0b851aafcc 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -44,7 +44,7 @@
#include <asm/mach/flash.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
@@ -381,6 +381,8 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_touchbook_init,
.timer = &omap3_secure_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index a8c2c4263e3..e96a2e7ad36 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -30,14 +30,14 @@
#include <linux/wl12xx.h>
#include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <video/omapdss.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include <plat/mmc.h>
#include <video/omap-panel-dvi.h>
@@ -364,74 +364,8 @@ static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
-static struct omap_device_pad serial2_pads[] __initdata = {
- OMAP_MUX_STATIC("uart2_cts.uart2_cts",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rts.uart2_rts",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_rx.uart2_rx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart2_tx.uart2_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial3_pads[] __initdata = {
- OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
- OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_device_pad serial4_pads[] __initdata = {
- OMAP_MUX_STATIC("uart4_rx.uart4_rx",
- OMAP_PIN_INPUT | OMAP_MUX_MODE0),
- OMAP_MUX_STATIC("uart4_tx.uart4_tx",
- OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
-};
-
-static struct omap_board_data serial2_data __initdata = {
- .id = 1,
- .pads = serial2_pads,
- .pads_cnt = ARRAY_SIZE(serial2_pads),
-};
-
-static struct omap_board_data serial3_data __initdata = {
- .id = 2,
- .pads = serial3_pads,
- .pads_cnt = ARRAY_SIZE(serial3_pads),
-};
-
-static struct omap_board_data serial4_data __initdata = {
- .id = 3,
- .pads = serial4_pads,
- .pads_cnt = ARRAY_SIZE(serial4_pads),
-};
-
-static inline void board_serial_init(void)
-{
- struct omap_board_data bdata;
- bdata.flags = 0;
- bdata.pads = NULL;
- bdata.pads_cnt = 0;
- bdata.id = 0;
- /* pass dummy data for UART1 */
- omap_serial_init_port(&bdata);
-
- omap_serial_init_port(&serial2_data);
- omap_serial_init_port(&serial3_data);
- omap_serial_init_port(&serial4_data);
-}
#else
#define board_mux NULL
-
-static inline void board_serial_init(void)
-{
- omap_serial_init();
-}
#endif
/* Display DVI */
@@ -562,7 +496,7 @@ static void __init omap4_panda_init(void)
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
platform_device_register(&omap_vwlan_device);
- board_serial_init();
+ omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap4_twl6030_hsmmc_init(mmc);
omap4_ehci_init();
@@ -577,6 +511,8 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
.map_io = omap4_map_io,
.init_early = omap4430_init_early,
.init_irq = gic_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = omap4_panda_init,
.timer = &omap4_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 4cf7aeabab8..52c0cef7716 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -43,7 +43,7 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <video/omapdss.h>
#include <video/omap-panel-generic-dpi.h>
#include <video/omap-panel-dvi.h>
@@ -562,6 +562,8 @@ MACHINE_START(OVERO, "Gumstix Overo")
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = overo_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 616fb39763b..8678b386c6a 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -25,7 +25,7 @@
#include <plat/mmc.h>
#include <plat/usb.h>
#include <plat/gpmc.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/onenand.h>
#include "mux.h"
@@ -149,6 +149,8 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
.map_io = omap3_map_io,
.init_early = omap3630_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = rm680_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ba1aa07bdb2..d67bcdf724d 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -15,6 +15,7 @@
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/spi/tsc2005.h>
#include <linux/i2c.h>
#include <linux/i2c/twl.h>
#include <linux/clk.h>
@@ -27,7 +28,7 @@
#include <plat/mcspi.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <plat/onenand.h>
@@ -58,6 +59,9 @@
#define RX51_USB_TRANSCEIVER_RST_GPIO 67
+#define RX51_TSC2005_RESET_GPIO 104
+#define RX51_TSC2005_IRQ_GPIO 100
+
/* list all spi devices here */
enum {
RX51_SPI_WL1251,
@@ -66,6 +70,7 @@ enum {
};
static struct wl12xx_platform_data wl1251_pdata;
+static struct tsc2005_platform_data tsc2005_pdata;
#if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
static struct tsl2563_platform_data rx51_tsl2563_platform_data = {
@@ -167,10 +172,10 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.modalias = "tsc2005",
.bus_num = 1,
.chip_select = 0,
- /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
+ .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
.max_speed_hz = 6000000,
.controller_data = &tsc2005_mcspi_config,
- /* .platform_data = &tsc2005_config,*/
+ .platform_data = &tsc2005_pdata,
},
};
@@ -193,7 +198,7 @@ static struct platform_device rx51_charger_device = {
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}
@@ -1086,6 +1091,42 @@ error:
*/
}
+static struct tsc2005_platform_data tsc2005_pdata = {
+ .ts_pressure_max = 2048,
+ .ts_pressure_fudge = 2,
+ .ts_x_max = 4096,
+ .ts_x_fudge = 4,
+ .ts_y_max = 4096,
+ .ts_y_fudge = 7,
+ .ts_x_plate_ohm = 280,
+ .esd_timeout_ms = 8000,
+};
+
+static void rx51_tsc2005_set_reset(bool enable)
+{
+ gpio_set_value(RX51_TSC2005_RESET_GPIO, enable);
+}
+
+static void __init rx51_init_tsc2005(void)
+{
+ int r;
+
+ r = gpio_request_one(RX51_TSC2005_IRQ_GPIO, GPIOF_IN, "tsc2005 IRQ");
+ if (r < 0) {
+ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 IRQ");
+ rx51_peripherals_spi_board_info[RX51_SPI_TSC2005].irq = 0;
+ }
+
+ r = gpio_request_one(RX51_TSC2005_RESET_GPIO, GPIOF_OUT_INIT_HIGH,
+ "tsc2005 reset");
+ if (r >= 0) {
+ tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
+ } else {
+ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 reset");
+ tsc2005_pdata.esd_timeout_ms = 0;
+ }
+}
+
void __init rx51_peripherals_init(void)
{
rx51_i2c_init();
@@ -1094,6 +1135,7 @@ void __init rx51_peripherals_init(void)
board_smc91x_init();
rx51_add_gpio_keys();
rx51_init_wl1251();
+ rx51_init_tsc2005();
rx51_init_si4713();
spi_register_board_info(rx51_peripherals_spi_board_info,
ARRAY_SIZE(rx51_peripherals_spi_board_info));
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 4af7c4b2881..27f01f051df 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -25,7 +25,7 @@
#include <plat/mcspi.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <plat/usb.h>
@@ -127,6 +127,8 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = rx51_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index e6ee8842285..ab9a7a9e9d6 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -1,5 +1,5 @@
/*
- * Code for TI8168 EVM.
+ * Code for TI8168/TI8148 EVM.
*
* Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
*
@@ -22,30 +22,46 @@
#include <plat/irqs.h>
#include <plat/board.h>
-#include <plat/common.h>
+#include "common.h"
+#include <plat/usb.h>
-static struct omap_board_config_kernel ti8168_evm_config[] __initdata = {
+static struct omap_musb_board_data musb_board_data = {
+ .set_phy_power = ti81xx_musb_phy_power,
+ .interface_type = MUSB_INTERFACE_ULPI,
+ .mode = MUSB_OTG,
+ .power = 500,
};
-static void __init ti8168_evm_init(void)
+static struct omap_board_config_kernel ti81xx_evm_config[] __initdata = {
+};
+
+static void __init ti81xx_evm_init(void)
{
omap_serial_init();
omap_sdrc_init(NULL, NULL);
- omap_board_config = ti8168_evm_config;
- omap_board_config_size = ARRAY_SIZE(ti8168_evm_config);
-}
-
-static void __init ti8168_evm_map_io(void)
-{
- omapti816x_map_common_io();
+ omap_board_config = ti81xx_evm_config;
+ omap_board_config_size = ARRAY_SIZE(ti81xx_evm_config);
+ usb_musb_init(&musb_board_data);
}
MACHINE_START(TI8168EVM, "ti8168evm")
/* Maintainer: Texas Instruments */
.atag_offset = 0x100,
- .map_io = ti8168_evm_map_io,
- .init_early = ti816x_init_early,
- .init_irq = ti816x_init_irq,
+ .map_io = ti81xx_map_io,
+ .init_early = ti81xx_init_early,
+ .init_irq = ti81xx_init_irq,
+ .timer = &omap3_timer,
+ .init_machine = ti81xx_evm_init,
+ .restart = omap_prcm_restart,
+MACHINE_END
+
+MACHINE_START(TI8148EVM, "ti8148evm")
+ /* Maintainer: Texas Instruments */
+ .atag_offset = 0x100,
+ .map_io = ti81xx_map_io,
+ .init_early = ti81xx_init_early,
+ .init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
- .init_machine = ti8168_evm_init,
+ .init_machine = ti81xx_evm_init,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 6d0aa4fcb7c..8d7ce11cfea 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -24,7 +24,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/usb.h>
#include <mach/board-zoom.h>
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index be6684dc4f5..5c20bcc57f2 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -21,7 +21,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
#include <plat/usb.h>
@@ -135,8 +135,10 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
@@ -145,6 +147,8 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
.map_io = omap3_map_io,
.init_early = omap3630_init_early,
.init_irq = omap3_init_irq,
+ .handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
.timer = &omap3_timer,
+ .restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 1f3481f8d69..f57ed5baecc 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -35,7 +35,7 @@
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
-u8 cpu_mask;
+u16 cpu_mask;
/*
* clkdm_control: if true, then when a clock is enabled in the
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 2311bc21722..b8c2a686481 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -132,7 +132,7 @@ void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
const char *core_ck_name,
const char *mpu_ck_name);
-extern u8 cpu_mask;
+extern u16 cpu_mask;
extern const struct clkops clkops_omap2_dflt_wait;
extern const struct clkops clkops_dummy;
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 5d0064a4fb5..d75e5f6b8a0 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -2480,6 +2480,16 @@ static struct clk uart4_fck = {
.recalc = &followparent_recalc,
};
+static struct clk uart4_fck_am35xx = {
+ .name = "uart4_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_UART4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
static struct clk gpt2_fck = {
.name = "gpt2_fck",
.ops = &clkops_omap2_dflt_wait,
@@ -3287,7 +3297,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
@@ -3323,7 +3333,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "pka_ick", &pka_ick, CK_34XX | CK_36XX),
CLK(NULL, "core_l4_ick", &core_l4_ick, CK_3XXX),
CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "usbtll_ick", &usbtll_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("omap_hsmmc.2", "ick", &mmchs3_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "icr_ick", &icr_ick, CK_34XX | CK_36XX),
CLK("omap-aes", "ick", &aes2_ick, CK_34XX | CK_36XX),
@@ -3369,20 +3379,18 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "cam_ick", &cam_ick, CK_34XX | CK_36XX),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_34XX | CK_36XX),
CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("usbhs-omap.0", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
- CLK("usbhs-omap.0", "init_60m_fclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usbhost_ick", &usbhost_ick, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+ CLK("usbhs_omap", "utmi_p1_gfclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "utmi_p2_gfclk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "xclk60mhsp1_ck", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "xclk60mhsp2_ck", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_host_hs_utmi_p1_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_host_hs_utmi_p2_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch0_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "usb_tll_hs_usb_ch1_clk", &dummy_ck, CK_3XXX),
+ CLK("usbhs_omap", "init_60m_fclk", &dummy_ck, CK_3XXX),
CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2PLUS | CK_36XX),
CLK(NULL, "gpt1_fck", &gpt1_fck, CK_3XXX),
CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_3XXX),
@@ -3403,6 +3411,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX),
+ CLK(NULL, "uart4_fck", &uart4_fck_am35xx, CK_3505 | CK_3517),
CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX),
CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX),
CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX),
@@ -3517,6 +3526,10 @@ int __init omap3xxx_clk_init(void)
} else if (cpu_is_ti816x()) {
cpu_mask = RATE_IN_TI816X;
cpu_clkflg = CK_TI816X;
+ } else if (cpu_is_am33xx()) {
+ cpu_mask = RATE_IN_AM33XX;
+ } else if (cpu_is_ti814x()) {
+ cpu_mask = RATE_IN_TI814X;
} else if (cpu_is_omap34xx()) {
if (omap_rev() == OMAP3430_REV_ES1_0) {
cpu_mask = RATE_IN_3430ES1;
@@ -3600,7 +3613,7 @@ int __init omap3xxx_clk_init(void)
* Lock DPLL5 -- here only until other device init code can
* handle this
*/
- if (!cpu_is_ti816x() && (omap_rev() >= OMAP3430_REV_ES2_0))
+ if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0))
omap3_clk_lock_dpll5();
/* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 0798a802497..08e86d793a1 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -1206,6 +1206,14 @@ static const struct clksel ocp_abe_iclk_div[] = {
{ .parent = NULL },
};
+static struct clk mpu_periphclk = {
+ .name = "mpu_periphclk",
+ .parent = &dpll_mpu_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
static struct clk ocp_abe_iclk = {
.name = "ocp_abe_iclk",
.parent = &aess_fclk,
@@ -3189,6 +3197,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
+ CLK("smp_twd", NULL, &mpu_periphclk, CK_443X),
CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
@@ -3295,7 +3304,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
- CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X),
+ CLK("usbhs_omap", "fs_fck", &usb_host_fs_fck, CK_443X),
CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
CLK(NULL, "utmi_p2_gfclk", &utmi_p2_gfclk, CK_443X),
@@ -3306,7 +3315,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
- CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X),
+ CLK("usbhs_omap", "hs_fck", &usb_host_hs_fck, CK_443X),
CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
@@ -3314,7 +3323,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
- CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
+ CLK("usbhs_omap", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
CLK(NULL, "usim_ck", &usim_ck, CK_443X),
CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
CLK(NULL, "usim_fck", &usim_fck, CK_443X),
@@ -3374,8 +3383,8 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
- CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
- CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
+ CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_443X),
+ CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_443X),
CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_443X),
CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_443X),
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.c b/arch/arm/mach-omap2/cm2xxx_3xxx.c
index 38830d8d478..04d39cdd211 100644
--- a/arch/arm/mach-omap2/cm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include "cm.h"
#include "cm2xxx_3xxx.h"
diff --git a/arch/arm/mach-omap2/cm44xx.c b/arch/arm/mach-omap2/cm44xx.c
index e96f53ea01a..6a836303252 100644
--- a/arch/arm/mach-omap2/cm44xx.c
+++ b/arch/arm/mach-omap2/cm44xx.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include "cm.h"
#include "cm1_44xx.h"
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index eb2a472bbf4..6204deaf85b 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -20,7 +20,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include "cm.h"
#include "cm1_44xx.h"
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 110e5b9db14..aaf421178c9 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
#include <plat/mux.h>
@@ -110,23 +110,49 @@ void __init omap3_map_io(void)
/*
* Adjust TAP register base such that omap3_check_revision accesses the correct
- * TI816X register for checking device ID (it adds 0x204 to tap base while
- * TI816X DEVICE ID register is at offset 0x600 from control base).
+ * TI81XX register for checking device ID (it adds 0x204 to tap base while
+ * TI81XX DEVICE ID register is at offset 0x600 from control base).
*/
-#define TI816X_TAP_BASE (TI816X_CTRL_BASE + \
- TI816X_CONTROL_DEVICE_ID - 0x204)
+#define TI81XX_TAP_BASE (TI81XX_CTRL_BASE + \
+ TI81XX_CONTROL_DEVICE_ID - 0x204)
-static struct omap_globals ti816x_globals = {
+static struct omap_globals ti81xx_globals = {
.class = OMAP343X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(TI816X_TAP_BASE),
- .ctrl = OMAP2_L4_IO_ADDRESS(TI816X_CTRL_BASE),
- .prm = OMAP2_L4_IO_ADDRESS(TI816X_PRCM_BASE),
- .cm = OMAP2_L4_IO_ADDRESS(TI816X_PRCM_BASE),
+ .tap = OMAP2_L4_IO_ADDRESS(TI81XX_TAP_BASE),
+ .ctrl = OMAP2_L4_IO_ADDRESS(TI81XX_CTRL_BASE),
+ .prm = OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE),
+ .cm = OMAP2_L4_IO_ADDRESS(TI81XX_PRCM_BASE),
};
-void __init omap2_set_globals_ti816x(void)
+void __init omap2_set_globals_ti81xx(void)
{
- __omap2_set_globals(&ti816x_globals);
+ __omap2_set_globals(&ti81xx_globals);
+}
+
+void __init ti81xx_map_io(void)
+{
+ omapti81xx_map_common_io();
+}
+
+#define AM33XX_TAP_BASE (AM33XX_CTRL_BASE + \
+ TI81XX_CONTROL_DEVICE_ID - 0x204)
+
+static struct omap_globals am33xx_globals = {
+ .class = AM335X_CLASS,
+ .tap = AM33XX_L4_WK_IO_ADDRESS(AM33XX_TAP_BASE),
+ .ctrl = AM33XX_L4_WK_IO_ADDRESS(AM33XX_CTRL_BASE),
+ .prm = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE),
+ .cm = AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRCM_BASE),
+};
+
+void __init omap2_set_globals_am33xx(void)
+{
+ __omap2_set_globals(&am33xx_globals);
+}
+
+void __init am33xx_map_io(void)
+{
+ omapam33xx_map_common_io();
}
#endif
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
new file mode 100644
index 00000000000..febffde2ff1
--- /dev/null
+++ b/arch/arm/mach-omap2/common.h
@@ -0,0 +1,239 @@
+/*
+ * Header for code common to all OMAP2+ machines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
+#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
+#ifndef __ASSEMBLER__
+
+#include <linux/delay.h>
+#include <plat/common.h>
+#include <asm/proc-fns.h>
+
+#ifdef CONFIG_SOC_OMAP2420
+extern void omap242x_map_common_io(void);
+#else
+static inline void omap242x_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAP2430
+extern void omap243x_map_common_io(void);
+#else
+static inline void omap243x_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+extern void omap34xx_map_common_io(void);
+#else
+static inline void omap34xx_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAPTI81XX
+extern void omapti81xx_map_common_io(void);
+#else
+static inline void omapti81xx_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAPAM33XX
+extern void omapam33xx_map_common_io(void);
+#else
+static inline void omapam33xx_map_common_io(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+extern void omap44xx_map_common_io(void);
+#else
+static inline void omap44xx_map_common_io(void)
+{
+}
+#endif
+
+extern void omap2_init_common_infrastructure(void);
+
+extern struct sys_timer omap2_timer;
+extern struct sys_timer omap3_timer;
+extern struct sys_timer omap3_secure_timer;
+extern struct sys_timer omap4_timer;
+
+void omap2420_init_early(void);
+void omap2430_init_early(void);
+void omap3430_init_early(void);
+void omap35xx_init_early(void);
+void omap3630_init_early(void);
+void omap3_init_early(void); /* Do not use this one */
+void am35xx_init_early(void);
+void ti81xx_init_early(void);
+void omap4430_init_early(void);
+void omap_prcm_restart(char, const char *);
+
+/*
+ * IO bases for various OMAP processors
+ * Except the tap base, rest all the io bases
+ * listed are physical addresses.
+ */
+struct omap_globals {
+ u32 class; /* OMAP class to detect */
+ void __iomem *tap; /* Control module ID code */
+ void __iomem *sdrc; /* SDRAM Controller */
+ void __iomem *sms; /* SDRAM Memory Scheduler */
+ void __iomem *ctrl; /* System Control Module */
+ void __iomem *ctrl_pad; /* PAD Control Module */
+ void __iomem *prm; /* Power and Reset Management */
+ void __iomem *cm; /* Clock Management */
+ void __iomem *cm2;
+};
+
+void omap2_set_globals_242x(void);
+void omap2_set_globals_243x(void);
+void omap2_set_globals_3xxx(void);
+void omap2_set_globals_443x(void);
+void omap2_set_globals_ti81xx(void);
+void omap2_set_globals_am33xx(void);
+
+/* These get called from omap2_set_globals_xxxx(), do not call these */
+void omap2_set_globals_tap(struct omap_globals *);
+void omap2_set_globals_sdrc(struct omap_globals *);
+void omap2_set_globals_control(struct omap_globals *);
+void omap2_set_globals_prcm(struct omap_globals *);
+
+void omap242x_map_io(void);
+void omap243x_map_io(void);
+void omap3_map_io(void);
+void am33xx_map_io(void);
+void omap4_map_io(void);
+void ti81xx_map_io(void);
+
+/**
+ * omap_test_timeout - busy-loop, testing a condition
+ * @cond: condition to test until it evaluates to true
+ * @timeout: maximum number of microseconds in the timeout
+ * @index: loop index (integer)
+ *
+ * Loop waiting for @cond to become true or until at least @timeout
+ * microseconds have passed. To use, define some integer @index in the
+ * calling code. After running, if @index == @timeout, then the loop has
+ * timed out.
+ */
+#define omap_test_timeout(cond, timeout, index) \
+({ \
+ for (index = 0; index < timeout; index++) { \
+ if (cond) \
+ break; \
+ udelay(1); \
+ } \
+})
+
+extern struct device *omap2_get_mpuss_device(void);
+extern struct device *omap2_get_iva_device(void);
+extern struct device *omap2_get_l3_device(void);
+extern struct device *omap4_get_dsp_device(void);
+
+void omap2_init_irq(void);
+void omap3_init_irq(void);
+void ti81xx_init_irq(void);
+extern int omap_irq_pending(void);
+void omap_intc_save_context(void);
+void omap_intc_restore_context(void);
+void omap3_intc_suspend(void);
+void omap3_intc_prepare_idle(void);
+void omap3_intc_resume_idle(void);
+void omap2_intc_handle_irq(struct pt_regs *regs);
+void omap3_intc_handle_irq(struct pt_regs *regs);
+
+#ifdef CONFIG_CACHE_L2X0
+extern void __iomem *omap4_get_l2cache_base(void);
+#endif
+
+#ifdef CONFIG_SMP
+extern void __iomem *omap4_get_scu_base(void);
+#else
+static inline void __iomem *omap4_get_scu_base(void)
+{
+ return NULL;
+}
+#endif
+
+extern void __init gic_init_irq(void);
+extern void omap_smc1(u32 fn, u32 arg);
+extern void __iomem *omap4_get_sar_ram_base(void);
+extern void omap_do_wfi(void);
+
+#ifdef CONFIG_SMP
+/* Needed for secondary core boot */
+extern void omap_secondary_startup(void);
+extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
+extern void omap_auxcoreboot_addr(u32 cpu_addr);
+extern u32 omap_read_auxcoreboot0(void);
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PM)
+extern int omap4_mpuss_init(void);
+extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
+extern int omap4_finish_suspend(unsigned long cpu_state);
+extern void omap4_cpu_resume(void);
+extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_mpuss_read_prev_context_state(void);
+#else
+static inline int omap4_enter_lowpower(unsigned int cpu,
+ unsigned int power_state)
+{
+ cpu_do_idle();
+ return 0;
+}
+
+static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+{
+ cpu_do_idle();
+ return 0;
+}
+
+static inline int omap4_mpuss_init(void)
+{
+ return 0;
+}
+
+static inline int omap4_finish_suspend(unsigned long cpu_state)
+{
+ return 0;
+}
+
+static inline void omap4_cpu_resume(void)
+{}
+
+static inline u32 omap4_mpuss_read_prev_context_state(void)
+{
+ return 0;
+}
+#endif
+#endif /* __ASSEMBLER__ */
+#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index e34d27f8c49..114c037e433 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/sdrc.h>
#include "cm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index d4ef75d5a38..0ba68d3764b 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -52,8 +52,8 @@
#define OMAP343X_CONTROL_PADCONFS_WKUP 0xa00
#define OMAP343X_CONTROL_GENERAL_WKUP 0xa60
-/* TI816X spefic control submodules */
-#define TI816X_CONTROL_DEVCONF 0x600
+/* TI81XX spefic control submodules */
+#define TI81XX_CONTROL_DEVCONF 0x600
/* Control register offsets - read/write with omap_ctrl_{read,write}{bwl}() */
@@ -244,8 +244,8 @@
#define OMAP3_PADCONF_SAD2D_MSTANDBY 0x250
#define OMAP3_PADCONF_SAD2D_IDLEACK 0x254
-/* TI816X CONTROL_DEVCONF register offsets */
-#define TI816X_CONTROL_DEVICE_ID (TI816X_CONTROL_DEVCONF + 0x000)
+/* TI81XX CONTROL_DEVCONF register offsets */
+#define TI81XX_CONTROL_DEVICE_ID (TI81XX_CONTROL_DEVCONF + 0x000)
/*
* REVISIT: This list of registers is not comprehensive - there are more
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1fe35c24fba..464cffde58f 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -24,15 +24,17 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/export.h>
+#include <linux/cpu_pm.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
#include "powerdomain.h"
#include "clockdomain.h"
-#include <plat/serial.h>
#include "pm.h"
#include "control.h"
+#include "common.h"
#ifdef CONFIG_CPU_IDLE
@@ -122,9 +124,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
}
+ /*
+ * Call idle CPU PM enter notifier chain so that
+ * VFP context is saved.
+ */
+ if (mpu_state == PWRDM_POWER_OFF)
+ cpu_pm_enter();
+
/* Execute ARM wfi */
omap_sram_idle();
+ /*
+ * Call idle CPU PM enter notifier chain to restore
+ * VFP context.
+ */
+ if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
+ cpu_pm_exit();
+
/* Re-allow idle for C1 */
if (index == 0) {
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
@@ -243,11 +259,6 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
struct omap3_idle_statedata *cx;
int ret;
- if (!omap3_can_sleep()) {
- new_state_idx = drv->safe_state_index;
- goto select_state;
- }
-
/*
* Prevent idle completely if CAM is active.
* CAM does not have wakeup capability in OMAP3.
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
new file mode 100644
index 00000000000..cfdbb86bc84
--- /dev/null
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -0,0 +1,245 @@
+/*
+ * OMAP4 CPU idle Routines
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/clockchips.h>
+
+#include <asm/proc-fns.h>
+
+#include "common.h"
+#include "pm.h"
+#include "prm.h"
+
+#ifdef CONFIG_CPU_IDLE
+
+/* Machine specific information to be recorded in the C-state driver_data */
+struct omap4_idle_statedata {
+ u32 cpu_state;
+ u32 mpu_logic_state;
+ u32 mpu_state;
+ u8 valid;
+};
+
+static struct cpuidle_params cpuidle_params_table[] = {
+ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
+ {.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1},
+ /* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */
+ {.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1},
+ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+ {.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1},
+};
+
+#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
+
+struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
+static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
+
+/**
+ * omap4_enter_idle - Programs OMAP4 to enter the specified state
+ * @dev: cpuidle device
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified low power state selected by the governor.
+ * Returns the amount of time spent in the low power state.
+ */
+static int omap4_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ struct omap4_idle_statedata *cx =
+ cpuidle_get_statedata(&dev->states_usage[index]);
+ struct timespec ts_preidle, ts_postidle, ts_idle;
+ u32 cpu1_state;
+ int idle_time;
+ int new_state_idx;
+ int cpu_id = smp_processor_id();
+
+ /* Used to keep track of the total time in idle */
+ getnstimeofday(&ts_preidle);
+
+ local_irq_disable();
+ local_fiq_disable();
+
+ /*
+ * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
+ * This is necessary to honour hardware recommondation
+ * of triggeing all the possible low power modes once CPU1 is
+ * out of coherency and in OFF mode.
+ * Update dev->last_state so that governor stats reflects right
+ * data.
+ */
+ cpu1_state = pwrdm_read_pwrst(cpu1_pd);
+ if (cpu1_state != PWRDM_POWER_OFF) {
+ new_state_idx = drv->safe_state_index;
+ cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
+ }
+
+ if (index > 0)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
+ /*
+ * Call idle CPU PM enter notifier chain so that
+ * VFP and per CPU interrupt context is saved.
+ */
+ if (cx->cpu_state == PWRDM_POWER_OFF)
+ cpu_pm_enter();
+
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+
+ /*
+ * Call idle CPU cluster PM enter notifier chain
+ * to save GIC and wakeupgen context.
+ */
+ if ((cx->mpu_state == PWRDM_POWER_RET) &&
+ (cx->mpu_logic_state == PWRDM_POWER_OFF))
+ cpu_cluster_pm_enter();
+
+ omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+
+ /*
+ * Call idle CPU PM exit notifier chain to restore
+ * VFP and per CPU IRQ context. Only CPU0 state is
+ * considered since CPU1 is managed by CPU hotplug.
+ */
+ if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
+ cpu_pm_exit();
+
+ /*
+ * Call idle CPU cluster PM exit notifier chain
+ * to restore GIC and wakeupgen context.
+ */
+ if (omap4_mpuss_read_prev_context_state())
+ cpu_cluster_pm_exit();
+
+ if (index > 0)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
+ getnstimeofday(&ts_postidle);
+ ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+ local_irq_enable();
+ local_fiq_enable();
+
+ idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
+ USEC_PER_SEC;
+
+ /* Update cpuidle counters */
+ dev->last_residency = idle_time;
+
+ return index;
+}
+
+DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+
+struct cpuidle_driver omap4_idle_driver = {
+ .name = "omap4_idle",
+ .owner = THIS_MODULE,
+};
+
+static inline void _fill_cstate(struct cpuidle_driver *drv,
+ int idx, const char *descr)
+{
+ struct cpuidle_state *state = &drv->states[idx];
+
+ state->exit_latency = cpuidle_params_table[idx].exit_latency;
+ state->target_residency = cpuidle_params_table[idx].target_residency;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ state->enter = omap4_enter_idle;
+ sprintf(state->name, "C%d", idx + 1);
+ strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
+}
+
+static inline struct omap4_idle_statedata *_fill_cstate_usage(
+ struct cpuidle_device *dev,
+ int idx)
+{
+ struct omap4_idle_statedata *cx = &omap4_idle_data[idx];
+ struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
+
+ cx->valid = cpuidle_params_table[idx].valid;
+ cpuidle_set_statedata(state_usage, cx);
+
+ return cx;
+}
+
+
+
+/**
+ * omap4_idle_init - Init routine for OMAP4 idle
+ *
+ * Registers the OMAP4 specific cpuidle driver to the cpuidle
+ * framework with the valid set of states.
+ */
+int __init omap4_idle_init(void)
+{
+ struct omap4_idle_statedata *cx;
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv = &omap4_idle_driver;
+ unsigned int cpu_id = 0;
+
+ mpu_pd = pwrdm_lookup("mpu_pwrdm");
+ cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
+ cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+ if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
+ return -ENODEV;
+
+
+ drv->safe_state_index = -1;
+ dev = &per_cpu(omap4_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+
+ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
+ _fill_cstate(drv, 0, "MPUSS ON");
+ drv->safe_state_index = 0;
+ cx = _fill_cstate_usage(dev, 0);
+ cx->valid = 1; /* C1 is always valid */
+ cx->cpu_state = PWRDM_POWER_ON;
+ cx->mpu_state = PWRDM_POWER_ON;
+ cx->mpu_logic_state = PWRDM_POWER_RET;
+
+ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+ _fill_cstate(drv, 1, "MPUSS CSWR");
+ cx = _fill_cstate_usage(dev, 1);
+ cx->cpu_state = PWRDM_POWER_OFF;
+ cx->mpu_state = PWRDM_POWER_RET;
+ cx->mpu_logic_state = PWRDM_POWER_RET;
+
+ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+ _fill_cstate(drv, 2, "MPUSS OSWR");
+ cx = _fill_cstate_usage(dev, 2);
+ cx->cpu_state = PWRDM_POWER_OFF;
+ cx->mpu_state = PWRDM_POWER_RET;
+ cx->mpu_logic_state = PWRDM_POWER_OFF;
+
+ drv->state_count = OMAP4_NUM_STATES;
+ cpuidle_register_driver(&omap4_idle_driver);
+
+ dev->state_count = OMAP4_NUM_STATES;
+ if (cpuidle_register_device(dev)) {
+ pr_err("%s: CPUidle register device failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+#else
+int __init omap4_idle_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_CPU_IDLE */
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index c15cfada5f1..46dfd1ae8f7 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -28,6 +28,7 @@
#include <plat/board.h>
#include <plat/mcbsp.h>
#include <plat/mmc.h>
+#include <plat/iommu.h>
#include <plat/dma.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
@@ -211,9 +212,15 @@ static struct platform_device omap3isp_device = {
.resource = omap3isp_resources,
};
+static struct omap_iommu_arch_data omap3_isp_iommu = {
+ .name = "isp",
+};
+
int omap3_init_camera(struct isp_platform_data *pdata)
{
omap3isp_device.dev.platform_data = pdata;
+ omap3isp_device.dev.archdata.iommu = &omap3_isp_iommu;
+
return platform_device_register(&omap3isp_device);
}
@@ -336,6 +343,27 @@ static void omap_init_mcpdm(void)
static inline void omap_init_mcpdm(void) {}
#endif
+#if defined(CONFIG_SND_OMAP_SOC_DMIC) || \
+ defined(CONFIG_SND_OMAP_SOC_DMIC_MODULE)
+
+static void omap_init_dmic(void)
+{
+ struct omap_hwmod *oh;
+ struct platform_device *pdev;
+
+ oh = omap_hwmod_lookup("dmic");
+ if (!oh) {
+ printk(KERN_ERR "Could not look up mcpdm hw_mod\n");
+ return;
+ }
+
+ pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0, NULL, 0, 0);
+ WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n");
+}
+#else
+static inline void omap_init_dmic(void) {}
+#endif
+
#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
#include <plat/mcspi.h>
@@ -681,6 +709,7 @@ static int __init omap2_init_devices(void)
*/
omap_init_audio();
omap_init_mcpdm();
+ omap_init_dmic();
omap_init_camera();
omap_init_mbox();
omap_init_mcspi();
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index adb2756e242..bc6cf863a56 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -22,13 +22,41 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <video/omapdss.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
+#include "common.h"
#include "control.h"
+#include "display.h"
+
+#define DISPC_CONTROL 0x0040
+#define DISPC_CONTROL2 0x0238
+#define DISPC_IRQSTATUS 0x0018
+
+#define DSS_SYSCONFIG 0x10
+#define DSS_SYSSTATUS 0x14
+#define DSS_CONTROL 0x40
+#define DSS_SDI_CONTROL 0x44
+#define DSS_PLL_CONTROL 0x48
+
+#define LCD_EN_MASK (0x1 << 0)
+#define DIGIT_EN_MASK (0x1 << 1)
+
+#define FRAMEDONE_IRQ_SHIFT 0
+#define EVSYNC_EVEN_IRQ_SHIFT 2
+#define EVSYNC_ODD_IRQ_SHIFT 3
+#define FRAMEDONE2_IRQ_SHIFT 22
+#define FRAMEDONETV_IRQ_SHIFT 24
+
+/*
+ * FRAMEDONE_IRQ_TIMEOUT: how long (in milliseconds) to wait during DISPC
+ * reset before deciding that something has gone wrong
+ */
+#define FRAMEDONE_IRQ_TIMEOUT 100
static struct platform_device omap_display_device = {
.name = "omapdss",
@@ -172,3 +200,135 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
return r;
}
+
+static void dispc_disable_outputs(void)
+{
+ u32 v, irq_mask = 0;
+ bool lcd_en, digit_en, lcd2_en = false;
+ int i;
+ struct omap_dss_dispc_dev_attr *da;
+ struct omap_hwmod *oh;
+
+ oh = omap_hwmod_lookup("dss_dispc");
+ if (!oh) {
+ WARN(1, "display: could not disable outputs during reset - could not find dss_dispc hwmod\n");
+ return;
+ }
+
+ if (!oh->dev_attr) {
+ pr_err("display: could not disable outputs during reset due to missing dev_attr\n");
+ return;
+ }
+
+ da = (struct omap_dss_dispc_dev_attr *)oh->dev_attr;
+
+ /* store value of LCDENABLE and DIGITENABLE bits */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ lcd_en = v & LCD_EN_MASK;
+ digit_en = v & DIGIT_EN_MASK;
+
+ /* store value of LCDENABLE for LCD2 */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ lcd2_en = v & LCD_EN_MASK;
+ }
+
+ if (!(lcd_en | digit_en | lcd2_en))
+ return; /* no managers currently enabled */
+
+ /*
+ * If any manager was enabled, we need to disable it before
+ * DSS clocks are disabled or DISPC module is reset
+ */
+ if (lcd_en)
+ irq_mask |= 1 << FRAMEDONE_IRQ_SHIFT;
+
+ if (digit_en) {
+ if (da->has_framedonetv_irq) {
+ irq_mask |= 1 << FRAMEDONETV_IRQ_SHIFT;
+ } else {
+ irq_mask |= 1 << EVSYNC_EVEN_IRQ_SHIFT |
+ 1 << EVSYNC_ODD_IRQ_SHIFT;
+ }
+ }
+
+ if (lcd2_en)
+ irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+
+ /*
+ * clear any previous FRAMEDONE, FRAMEDONETV,
+ * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+ */
+ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
+
+ /* disable LCD and TV managers */
+ v = omap_hwmod_read(oh, DISPC_CONTROL);
+ v &= ~(LCD_EN_MASK | DIGIT_EN_MASK);
+ omap_hwmod_write(v, oh, DISPC_CONTROL);
+
+ /* disable LCD2 manager */
+ if (da->manager_count > 2) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL2);
+ v &= ~LCD_EN_MASK;
+ omap_hwmod_write(v, oh, DISPC_CONTROL2);
+ }
+
+ i = 0;
+ while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
+ irq_mask) {
+ i++;
+ if (i > FRAMEDONE_IRQ_TIMEOUT) {
+ pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+int omap_dss_reset(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_opt_clk *oc;
+ int c = 0;
+ int i, r;
+
+ if (!(oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)) {
+ pr_err("dss_core: hwmod data doesn't contain reset data\n");
+ return -EINVAL;
+ }
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_enable(oc->_clk);
+
+ dispc_disable_outputs();
+
+ /* clear SDI registers */
+ if (cpu_is_omap3430()) {
+ omap_hwmod_write(0x0, oh, DSS_SDI_CONTROL);
+ omap_hwmod_write(0x0, oh, DSS_PLL_CONTROL);
+ }
+
+ /*
+ * clear DSS_CONTROL register to switch DSS clock sources to
+ * PRCM clock, if any
+ */
+ omap_hwmod_write(0x0, oh, DSS_CONTROL);
+
+ omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+
+ if (c == MAX_MODULE_SOFTRESET_WAIT)
+ pr_warning("dss_core: waiting for reset to finish failed\n");
+ else
+ pr_debug("dss_core: softreset done\n");
+
+ for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+ if (oc->_clk)
+ clk_disable(oc->_clk);
+
+ r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
+
+ return r;
+}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
new file mode 100644
index 00000000000..b871b017b35
--- /dev/null
+++ b/arch/arm/mach-omap2/display.h
@@ -0,0 +1,29 @@
+/*
+ * display.h - OMAP2+ integration-specific DSS header
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+#define __ARCH_ARM_MACH_OMAP2_DISPLAY_H
+
+#include <linux/kernel.h>
+
+struct omap_dss_dispc_dev_attr {
+ u8 manager_count;
+ bool has_framedonetv_irq;
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index f4a1020559a..bd844af13af 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -171,6 +171,17 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
}
}
+static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
+{
+ u32 reg;
+
+ if (mmc->slots[0].internal_clock) {
+ reg = omap_ctrl_readl(control_devconf1_offset);
+ reg |= OMAP2_MMCSDIO2ADPCLKISEL;
+ omap_ctrl_writel(reg, control_devconf1_offset);
+ }
+}
+
static void hsmmc23_before_set_reg(struct device *dev, int slot,
int power_on, int vdd)
{
@@ -179,16 +190,19 @@ static void hsmmc23_before_set_reg(struct device *dev, int slot,
if (mmc->slots[0].remux)
mmc->slots[0].remux(dev, slot, power_on);
- if (power_on) {
- /* Only MMC2 supports a CLKIN */
- if (mmc->slots[0].internal_clock) {
- u32 reg;
+ if (power_on)
+ hsmmc2_select_input_clk_src(mmc);
+}
- reg = omap_ctrl_readl(control_devconf1_offset);
- reg |= OMAP2_MMCSDIO2ADPCLKISEL;
- omap_ctrl_writel(reg, control_devconf1_offset);
- }
- }
+static int am35x_hsmmc2_set_power(struct device *dev, int slot,
+ int power_on, int vdd)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ if (power_on)
+ hsmmc2_select_input_clk_src(mmc);
+
+ return 0;
}
static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
@@ -200,10 +214,12 @@ static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller,
int controller_nr)
{
- if (gpio_is_valid(mmc_controller->slots[0].switch_pin))
+ if (gpio_is_valid(mmc_controller->slots[0].switch_pin) &&
+ (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
OMAP_PIN_INPUT_PULLUP);
- if (gpio_is_valid(mmc_controller->slots[0].gpio_wp))
+ if (gpio_is_valid(mmc_controller->slots[0].gpio_wp) &&
+ (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
OMAP_PIN_INPUT_PULLUP);
if (cpu_is_omap34xx()) {
@@ -296,6 +312,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
mmc->slots[0].name = hc_name;
mmc->nr_slots = 1;
mmc->slots[0].caps = c->caps;
+ mmc->slots[0].pm_caps = c->pm_caps;
mmc->slots[0].internal_clock = !c->ext_clock;
mmc->dma_mask = 0xffffffff;
if (cpu_is_omap44xx())
@@ -336,11 +353,17 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
*
* temporary HACK: ocr_mask instead of fixed supply
*/
- mmc->slots[0].ocr_mask = c->ocr_mask;
-
- if (cpu_is_omap3517() || cpu_is_omap3505())
- mmc->slots[0].set_power = nop_mmc_set_power;
+ if (cpu_is_omap3505() || cpu_is_omap3517())
+ mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
+ MMC_VDD_26_27 |
+ MMC_VDD_27_28 |
+ MMC_VDD_29_30 |
+ MMC_VDD_30_31 |
+ MMC_VDD_31_32;
else
+ mmc->slots[0].ocr_mask = c->ocr_mask;
+
+ if (!cpu_is_omap3517() && !cpu_is_omap3505())
mmc->slots[0].features |= HSMMC_HAS_PBIAS;
if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -363,6 +386,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
}
+ if (cpu_is_omap3517() || cpu_is_omap3505())
+ mmc->slots[0].set_power = nop_mmc_set_power;
+
/* OMAP3630 HSMMC1 supports only 4-bit */
if (cpu_is_omap3630() &&
(c->caps & MMC_CAP_8_BIT_DATA)) {
@@ -372,6 +398,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
break;
case 2:
+ if (cpu_is_omap3517() || cpu_is_omap3505())
+ mmc->slots[0].set_power = am35x_hsmmc2_set_power;
+
if (c->ext_clock)
c->transceiver = 1;
if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index f757e78d4d4..c4409730c4b 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -12,6 +12,7 @@ struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
* capabilities OR'd (ref. linux/mmc/host.h) */
+ u32 pm_caps; /* PM capabilities */
bool transceiver; /* MMC-2 option */
bool ext_clock; /* use external pin for input clock */
bool cover_only; /* No card detect - just cover switch */
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index ace99944e96..a12e224eb97 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -21,7 +21,7 @@
#include <plat/cpu.h>
#include <plat/i2c.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/omap_hwmod.h>
#include "mux.h"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 7f47092a193..6c5826605ea 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -21,7 +21,7 @@
#include <asm/cputype.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/cpu.h>
#include <mach/id.h>
@@ -226,7 +226,7 @@ static void __init omap4_check_features(void)
}
}
-static void __init ti816x_check_features(void)
+static void __init ti81xx_check_features(void)
{
omap_features = OMAP3_HAS_NEON;
}
@@ -340,6 +340,29 @@ static void __init omap3_check_revision(const char **cpu_rev)
break;
}
break;
+ case 0xb944:
+ omap_revision = AM335X_REV_ES1_0;
+ *cpu_rev = "1.0";
+ case 0xb8f2:
+ switch (rev) {
+ case 0:
+ /* FALLTHROUGH */
+ case 1:
+ omap_revision = TI8148_REV_ES1_0;
+ *cpu_rev = "1.0";
+ break;
+ case 2:
+ omap_revision = TI8148_REV_ES2_0;
+ *cpu_rev = "2.0";
+ break;
+ case 3:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = TI8148_REV_ES2_1;
+ *cpu_rev = "2.1";
+ break;
+ }
+ break;
default:
/* Unknown default to latest silicon rev as default */
omap_revision = OMAP3630_REV_ES1_2;
@@ -367,7 +390,7 @@ static void __init omap4_check_revision(void)
* Few initial 4430 ES2.0 samples IDCODE is same as ES1.0
* Use ARM register to detect the correct ES version
*/
- if (!rev && (hawkeye != 0xb94e)) {
+ if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
idcode = read_cpuid(CPUID_ID);
rev = (idcode & 0xf) - 1;
}
@@ -389,8 +412,11 @@ static void __init omap4_check_revision(void)
omap_revision = OMAP4430_REV_ES2_1;
break;
case 4:
- default:
omap_revision = OMAP4430_REV_ES2_2;
+ break;
+ case 6:
+ default:
+ omap_revision = OMAP4430_REV_ES2_3;
}
break;
case 0xb94e:
@@ -401,9 +427,17 @@ static void __init omap4_check_revision(void)
break;
}
break;
+ case 0xb975:
+ switch (rev) {
+ case 0:
+ default:
+ omap_revision = OMAP4470_REV_ES1_0;
+ break;
+ }
+ break;
default:
/* Unknown default to latest silicon rev as default */
- omap_revision = OMAP4430_REV_ES2_2;
+ omap_revision = OMAP4430_REV_ES2_3;
}
pr_info("OMAP%04x ES%d.%d\n", omap_rev() >> 16,
@@ -432,6 +466,10 @@ static void __init omap3_cpuinfo(const char *cpu_rev)
cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
} else if (cpu_is_ti816x()) {
cpu_name = "TI816X";
+ } else if (cpu_is_am335x()) {
+ cpu_name = "AM335X";
+ } else if (cpu_is_ti814x()) {
+ cpu_name = "TI814X";
} else if (omap3_has_iva() && omap3_has_sgx()) {
/* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
cpu_name = "OMAP3430/3530";
@@ -472,11 +510,11 @@ void __init omap2_check_revision(void)
} else if (cpu_is_omap34xx()) {
omap3_check_revision(&cpu_rev);
- /* TI816X doesn't have feature register */
- if (!cpu_is_ti816x())
+ /* TI81XX doesn't have feature register */
+ if (!cpu_is_ti81xx())
omap3_check_features();
else
- ti816x_check_features();
+ ti81xx_check_features();
omap3_cpuinfo(cpu_rev);
return;
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/barriers.h
index 871f1ef7bff..4fa72c7cc7c 100644
--- a/arch/arm/mach-netx/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap2/include/mach/barriers.h
@@ -1,11 +1,13 @@
/*
- * arch/arm/mach-netx/include/mach/vmalloc.h
+ * OMAP memory barrier header.
*
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -14,6 +16,16 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END 0xd0000000UL
+
+#ifndef __MACH_BARRIERS_H
+#define __MACH_BARRIERS_H
+
+extern void omap_bus_sync(void);
+
+#define rmb() dsb()
+#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
+#define mb() wmb()
+
+#endif /* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index 13f98e59cfe..cdfc2a1f0e7 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -66,11 +66,11 @@ omap_uart_lsr: .word 0
beq 34f @ configure OMAP3UART4
cmp \rp, #OMAP4UART4 @ only on 44xx
beq 44f @ configure OMAP4UART4
- cmp \rp, #TI816XUART1 @ ti816x UART offsets different
+ cmp \rp, #TI81XXUART1 @ ti81Xx UART offsets different
beq 81f @ configure UART1
- cmp \rp, #TI816XUART2 @ ti816x UART offsets different
+ cmp \rp, #TI81XXUART2 @ ti81Xx UART offsets different
beq 82f @ configure UART2
- cmp \rp, #TI816XUART3 @ ti816x UART offsets different
+ cmp \rp, #TI81XXUART3 @ ti81Xx UART offsets different
beq 83f @ configure UART3
cmp \rp, #ZOOM_UART @ only on zoom2/3
beq 95f @ configure ZOOM_UART
@@ -94,11 +94,11 @@ omap_uart_lsr: .word 0
b 98f
44: mov \rp, #UART_OFFSET(OMAP4_UART4_BASE)
b 98f
-81: mov \rp, #UART_OFFSET(TI816X_UART1_BASE)
+81: mov \rp, #UART_OFFSET(TI81XX_UART1_BASE)
b 98f
-82: mov \rp, #UART_OFFSET(TI816X_UART2_BASE)
+82: mov \rp, #UART_OFFSET(TI81XX_UART2_BASE)
b 98f
-83: mov \rp, #UART_OFFSET(TI816X_UART3_BASE)
+83: mov \rp, #UART_OFFSET(TI81XX_UART3_BASE)
b 98f
95: ldr \rp, =ZOOM_UART_BASE
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index feb90a10945..56964a0c4c7 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -10,146 +10,9 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/io.h>
-#include <mach/irqs.h>
-#include <asm/hardware/gic.h>
-
-#include <plat/omap24xx.h>
-#include <plat/omap34xx.h>
-#include <plat/omap44xx.h>
-
-#include <plat/multi.h>
-
-#define OMAP2_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
-#define OMAP3_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
-#define OMAP4_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE)
-#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* omap2/3 active interrupt offset */
-#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
.macro disable_fiq
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
-/*
- * Unoptimized irq functions for multi-omap2, 3 and 4
- */
-
-#ifdef MULTI_OMAP2
- /*
- * Configure the interrupt base on the first interrupt.
- * See also omap_irq_base_init for setting omap_irq_base.
- */
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =omap_irq_base @ irq base address
- ldr \base, [\base, #0] @ irq base value
- .endm
-
- /* Check the pending interrupts. Note that base already set */
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- tst \base, #0x100 @ gic address?
- bne 4401f @ found gic
-
- /* Handle omap2 and omap3 */
- ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
- cmp \irqnr, #0x0
- bne 9998f
- ldr \irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
- cmp \irqnr, #0x0
- bne 9998f
- ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
- cmp \irqnr, #0x0
- bne 9998f
-
- /*
- * ti816x has additional IRQ pending register. Checking this
- * register on omap2 & omap3 has no effect (read as 0).
- */
- ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
- cmp \irqnr, #0x0
-9998:
- ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
- and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
- b 9999f
-
- /* Handle omap4 */
-4401: ldr \irqstat, [\base, #GIC_CPU_INTACK]
- ldr \tmp, =1021
- bic \irqnr, \irqstat, #0x1c00
- cmp \irqnr, #15
- cmpcc \irqnr, \irqnr
- cmpne \irqnr, \tmp
- cmpcs \irqnr, \irqnr
-9999:
- .endm
-
-#ifdef CONFIG_SMP
- /* We assume that irqstat (the raw value of the IRQ acknowledge
- * register) is preserved from the macro above.
- * If there is an IPI, we immediately signal end of interrupt
- * on the controller, since this requires the original irqstat
- * value which we won't easily be able to recreate later.
- */
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- bic \irqnr, \irqstat, #0x1c00
- cmp \irqnr, #16
- it cc
- strcc \irqstat, [\base, #GIC_CPU_EOI]
- it cs
- cmpcs \irqnr, \irqnr
- .endm
-#endif /* CONFIG_SMP */
-
-#else /* MULTI_OMAP2 */
-
-
-/*
- * Optimized irq functions for omap2, 3 and 4
- */
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- .macro get_irqnr_preamble, base, tmp
-#ifdef CONFIG_ARCH_OMAP2
- ldr \base, =OMAP2_IRQ_BASE
-#else
- ldr \base, =OMAP3_IRQ_BASE
-#endif
- .endm
-
- /* Check the pending interrupts. Note that base already set */
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
- cmp \irqnr, #0x0
- bne 9999f
- ldr \irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
- cmp \irqnr, #0x0
- bne 9999f
- ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
- cmp \irqnr, #0x0
-#ifdef CONFIG_SOC_OMAPTI816X
- bne 9999f
- ldr \irqnr, [\base, #0xf8] /* IRQ pending reg 4 */
- cmp \irqnr, #0x0
-#endif
-9999:
- ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
- and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
-
- .endm
-#endif
-
-
-#ifdef CONFIG_ARCH_OMAP4
-#define HAVE_GET_IRQNR_PREAMBLE
-#include <asm/hardware/entry-macro-gic.S>
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =OMAP4_IRQ_BASE
- .endm
-
-#endif
-
-#endif /* MULTI_OMAP2 */
diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h
new file mode 100644
index 00000000000..c90a43589ab
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap-secure.h
@@ -0,0 +1,57 @@
+/*
+ * omap-secure.h: OMAP Secure infrastructure header.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP_SECURE_H
+#define OMAP_ARCH_OMAP_SECURE_H
+
+/* Monitor error code */
+#define API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR 0xFFFFFFFE
+#define API_HAL_RET_VALUE_SERVICE_UNKNWON 0xFFFFFFFF
+
+/* HAL API error codes */
+#define API_HAL_RET_VALUE_OK 0x00
+#define API_HAL_RET_VALUE_FAIL 0x01
+
+/* Secure HAL API flags */
+#define FLAG_START_CRITICAL 0x4
+#define FLAG_IRQFIQ_MASK 0x3
+#define FLAG_IRQ_ENABLE 0x2
+#define FLAG_FIQ_ENABLE 0x1
+#define NO_FLAG 0x0
+
+/* Maximum Secure memory storage size */
+#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
+
+/* Secure low power HAL API index */
+#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
+#define OMAP4_HAL_SAVEHW_INDEX 0x1b
+#define OMAP4_HAL_SAVEALL_INDEX 0x1c
+#define OMAP4_HAL_SAVEGIC_INDEX 0x1d
+
+/* Secure Monitor mode APIs */
+#define OMAP4_MON_SCU_PWR_INDEX 0x108
+#define OMAP4_MON_L2X0_DBG_CTRL_INDEX 0x100
+#define OMAP4_MON_L2X0_CTRL_INDEX 0x102
+#define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109
+#define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113
+
+/* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_L2_POR_INDEX 0x23
+#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
+
+#ifndef __ASSEMBLER__
+
+extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
+extern phys_addr_t omap_secure_ram_mempool_base(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* OMAP_ARCH_OMAP_SECURE_H */
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
new file mode 100644
index 00000000000..d79321b0f2a
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -0,0 +1,39 @@
+/*
+ * OMAP WakeupGen header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_WAKEUPGEN_H
+#define OMAP_ARCH_WAKEUPGEN_H
+
+#define OMAP_WKG_CONTROL_0 0x00
+#define OMAP_WKG_ENB_A_0 0x10
+#define OMAP_WKG_ENB_B_0 0x14
+#define OMAP_WKG_ENB_C_0 0x18
+#define OMAP_WKG_ENB_D_0 0x1c
+#define OMAP_WKG_ENB_SECURE_A_0 0x20
+#define OMAP_WKG_ENB_SECURE_B_0 0x24
+#define OMAP_WKG_ENB_SECURE_C_0 0x28
+#define OMAP_WKG_ENB_SECURE_D_0 0x2c
+#define OMAP_WKG_ENB_A_1 0x410
+#define OMAP_WKG_ENB_B_1 0x414
+#define OMAP_WKG_ENB_C_1 0x418
+#define OMAP_WKG_ENB_D_1 0x41c
+#define OMAP_WKG_ENB_SECURE_A_1 0x420
+#define OMAP_WKG_ENB_SECURE_B_1 0x424
+#define OMAP_WKG_ENB_SECURE_C_1 0x428
+#define OMAP_WKG_ENB_SECURE_D_1 0x42c
+#define OMAP_AUX_CORE_BOOT_0 0x800
+#define OMAP_AUX_CORE_BOOT_1 0x804
+#define OMAP_PTMSYNCREQ_MASK 0xc00
+#define OMAP_PTMSYNCREQ_EN 0xc04
+#define OMAP_TIMESTAMPCYCLELO 0xc08
+#define OMAP_TIMESTAMPCYCLEHI 0xc0c
+
+extern int __init omap_wakeupgen_init(void);
+#endif
diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
deleted file mode 100644
index e4bd8761973..00000000000
--- a/arch/arm/mach-omap2/include/mach/omap4-common.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * omap4-common.h: OMAP4 specific common header file
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- *
- * Author:
- * Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef OMAP_ARCH_OMAP4_COMMON_H
-#define OMAP_ARCH_OMAP4_COMMON_H
-
-/*
- * wfi used in low power code. Directly opcode is used instead
- * of instruction to avoid mulit-omap build break
- */
-#ifdef CONFIG_THUMB2_KERNEL
-#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
-#else
-#define do_wfi() \
- __asm__ __volatile__ (".word 0xe320f003" : : : "memory")
-#endif
-
-#ifdef CONFIG_CACHE_L2X0
-extern void __iomem *l2cache_base;
-#endif
-
-extern void __iomem *gic_dist_base_addr;
-
-extern void __init gic_init_irq(void);
-extern void omap_smc1(u32 fn, u32 arg);
-
-#ifdef CONFIG_SMP
-/* Needed for secondary core boot */
-extern void omap_secondary_startup(void);
-extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
-extern void omap_auxcoreboot_addr(u32 cpu_addr);
-extern u32 omap_read_auxcoreboot0(void);
-#endif
-#endif
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
deleted file mode 100644
index 86631994776..00000000000
--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/plat-omap/include/mach/vmalloc.h
- *
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 25d20ced03e..3f174d51f67 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -35,7 +35,7 @@
#include "clock3xxx.h"
#include "clock44xx.h"
-#include <plat/common.h>
+#include "common.h"
#include <plat/omap-pm.h>
#include "voltage.h"
#include "powerdomain.h"
@@ -43,7 +43,7 @@
#include "clockdomain.h"
#include <plat/omap_hwmod.h>
#include <plat/multi.h>
-#include <plat/common.h>
+#include "common.h"
/*
* The machine specific code may provide the extra mapping besides the
@@ -176,14 +176,31 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_SOC_OMAPTI816X
-static struct map_desc omapti816x_io_desc[] __initdata = {
+#ifdef CONFIG_SOC_OMAPTI81XX
+static struct map_desc omapti81xx_io_desc[] __initdata = {
+ {
+ .virtual = L4_34XX_VIRT,
+ .pfn = __phys_to_pfn(L4_34XX_PHYS),
+ .length = L4_34XX_SIZE,
+ .type = MT_DEVICE
+ }
+};
+#endif
+
+#ifdef CONFIG_SOC_OMAPAM33XX
+static struct map_desc omapam33xx_io_desc[] __initdata = {
{
.virtual = L4_34XX_VIRT,
.pfn = __phys_to_pfn(L4_34XX_PHYS),
.length = L4_34XX_SIZE,
.type = MT_DEVICE
},
+ {
+ .virtual = L4_WK_AM33XX_VIRT,
+ .pfn = __phys_to_pfn(L4_WK_AM33XX_PHYS),
+ .length = L4_WK_AM33XX_SIZE,
+ .type = MT_DEVICE
+ }
};
#endif
@@ -237,6 +254,15 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
.length = L4_EMU_44XX_SIZE,
.type = MT_DEVICE,
},
+#ifdef CONFIG_OMAP4_ERRATA_I688
+ {
+ .virtual = OMAP4_SRAM_VA,
+ .pfn = __phys_to_pfn(OMAP4_SRAM_PA),
+ .length = PAGE_SIZE,
+ .type = MT_MEMORY_SO,
+ },
+#endif
+
};
#endif
@@ -263,10 +289,17 @@ void __init omap34xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPTI816X
-void __init omapti816x_map_common_io(void)
+#ifdef CONFIG_SOC_OMAPTI81XX
+void __init omapti81xx_map_common_io(void)
{
- iotable_init(omapti816x_io_desc, ARRAY_SIZE(omapti816x_io_desc));
+ iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
+}
+#endif
+
+#ifdef CONFIG_SOC_OMAPAM33XX
+void __init omapam33xx_map_common_io(void)
+{
+ iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
}
#endif
@@ -316,13 +349,9 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
}
-/* See irq.c, omap4-common.c and entry-macro.S */
-void __iomem *omap_irq_base;
-
static void __init omap_common_init_early(void)
{
omap2_check_revision();
- omap_ioremap_init();
omap_init_consistent_dma_size();
}
@@ -422,9 +451,9 @@ void __init am35xx_init_early(void)
omap3_init_early();
}
-void __init ti816x_init_early(void)
+void __init ti81xx_init_early(void)
{
- omap2_set_globals_ti816x();
+ omap2_set_globals_ti81xx();
omap_common_init_early();
omap3xxx_voltagedomains_init();
omap3xxx_powerdomains_init();
diff --git a/arch/arm/mach-omap2/io.h b/arch/arm/mach-omap2/io.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/arch/arm/mach-omap2/io.h
+++ /dev/null
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 65f1be6a182..1fef061f792 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <mach/hardware.h>
+#include <asm/exception.h>
#include <asm/mach/irq.h>
@@ -35,6 +36,11 @@
/* Number of IRQ state bits in each MIR register */
#define IRQ_BITS_PER_REG 32
+#define OMAP2_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE)
+#define OMAP3_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
+#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* omap2/3 active interrupt offset */
+#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
+
/*
* OMAP2 has a number of different interrupt controllers, each interrupt
* controller is identified as its own "bank". Register definitions are
@@ -143,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
static void __init omap_init_irq(u32 base, int nr_irqs)
{
+ void __iomem *omap_irq_base;
unsigned long nr_of_irqs = 0;
unsigned int nr_banks = 0;
int i, j;
@@ -186,11 +193,49 @@ void __init omap3_init_irq(void)
omap_init_irq(OMAP34XX_IC_BASE, 96);
}
-void __init ti816x_init_irq(void)
+void __init ti81xx_init_irq(void)
{
omap_init_irq(OMAP34XX_IC_BASE, 128);
}
+static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
+{
+ u32 irqnr;
+
+ do {
+ irqnr = readl_relaxed(base_addr + 0x98);
+ if (irqnr)
+ goto out;
+
+ irqnr = readl_relaxed(base_addr + 0xb8);
+ if (irqnr)
+ goto out;
+
+ irqnr = readl_relaxed(base_addr + 0xd8);
+#ifdef CONFIG_SOC_OMAPTI816X
+ if (irqnr)
+ goto out;
+ irqnr = readl_relaxed(base_addr + 0xf8);
+#endif
+
+out:
+ if (!irqnr)
+ break;
+
+ irqnr = readl_relaxed(base_addr + INTCPS_SIR_IRQ_OFFSET);
+ irqnr &= ACTIVEIRQ_MASK;
+
+ if (irqnr)
+ handle_IRQ(irqnr, regs);
+ } while (irqnr);
+}
+
+asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
+{
+ void __iomem *base_addr = OMAP2_IRQ_BASE;
+ omap_intc_handle_irq(base_addr, regs);
+}
+
#ifdef CONFIG_ARCH_OMAP3
static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
@@ -263,4 +308,10 @@ void omap3_intc_resume_idle(void)
/* Re-enable autoidle */
intc_bank_write_reg(1, &irq_banks[0], INTC_SYSCONFIG);
}
+
+asmlinkage void __exception_irq_entry omap3_intc_handle_irq(struct pt_regs *regs)
+{
+ void __iomem *base_addr = OMAP3_IRQ_BASE;
+ omap_intc_handle_irq(base_addr, regs);
+}
#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 292eee3be15..28fcb27005d 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
pdata->reg_size = 4;
pdata->has_ccr = true;
}
+ pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+ if (id == 1)
+ pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
name, oh->name);
return PTR_ERR(pdev);
}
- pdata->set_clk_src = omap2_mcbsp_set_clk_src;
- if (id == 1)
- pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
omap_mcbsp_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 655e9480eb9..e1cc75d1a57 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -32,6 +32,8 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
@@ -39,6 +41,7 @@
#include "control.h"
#include "mux.h"
+#include "prm.h"
#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
#define OMAP_MUX_BASE_SZ 0x5ca
@@ -306,7 +309,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
pad->idle = bpad->idle;
pad->off = bpad->off;
- if (pad->flags & OMAP_DEVICE_PAD_REMUX)
+ if (pad->flags &
+ (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP))
nr_pads_dynamic++;
pr_debug("%s: Initialized %s\n", __func__, pad->name);
@@ -331,7 +335,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
for (i = 0; i < hmux->nr_pads; i++) {
struct omap_device_pad *pad = &hmux->pads[i];
- if (pad->flags & OMAP_DEVICE_PAD_REMUX) {
+ if (pad->flags &
+ (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) {
pr_debug("%s: pad %s tagged dynamic\n",
__func__, pad->name);
hmux->pads_dynamic[nr_pads_dynamic] = pad;
@@ -351,6 +356,78 @@ err1:
return NULL;
}
+/**
+ * omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads
+ * @hmux: Pads for a hwmod
+ * @mpu_irqs: MPU irq array for a hwmod
+ *
+ * Scans the wakeup status of pads for a single hwmod. If an irq
+ * array is defined for this mux, the parser will call the registered
+ * ISRs for corresponding pads, otherwise the parser will stop at the
+ * first wakeup active pad and return. Returns true if there is a
+ * pending and non-served wakeup event for the mux, otherwise false.
+ */
+static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux,
+ struct omap_hwmod_irq_info *mpu_irqs)
+{
+ int i, irq;
+ unsigned int val;
+ u32 handled_irqs = 0;
+
+ for (i = 0; i < hmux->nr_pads_dynamic; i++) {
+ struct omap_device_pad *pad = hmux->pads_dynamic[i];
+
+ if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) ||
+ !(pad->idle & OMAP_WAKEUP_EN))
+ continue;
+
+ val = omap_mux_read(pad->partition, pad->mux->reg_offset);
+ if (!(val & OMAP_WAKEUP_EVENT))
+ continue;
+
+ if (!hmux->irqs)
+ return true;
+
+ irq = hmux->irqs[i];
+ /* make sure we only handle each irq once */
+ if (handled_irqs & 1 << irq)
+ continue;
+
+ handled_irqs |= 1 << irq;
+
+ generic_handle_irq(mpu_irqs[irq].irq);
+ }
+
+ return false;
+}
+
+/**
+ * _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod
+ *
+ * Checks a single hwmod for every wakeup capable pad to see if there is an
+ * active wakeup event. If this is the case, call the corresponding ISR.
+ */
+static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data)
+{
+ if (!oh->mux || !oh->mux->enabled)
+ return 0;
+ if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs))
+ generic_handle_irq(oh->mpu_irqs[0].irq);
+ return 0;
+}
+
+/**
+ * omap_hwmod_mux_handle_irq - Process pad wakeup irqs.
+ *
+ * Calls a function for each registered omap_hwmod to check
+ * pad wakeup statuses.
+ */
+static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused)
+{
+ omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL);
+ return IRQ_HANDLED;
+}
+
/* Assumes the calling function takes care of locking */
void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
{
@@ -715,6 +792,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
static int __init omap_mux_late_init(void)
{
struct omap_mux_partition *partition;
+ int ret;
list_for_each_entry(partition, &mux_partitions, node) {
struct omap_mux_entry *e, *tmp;
@@ -735,6 +813,13 @@ static int __init omap_mux_late_init(void)
}
}
+ ret = request_irq(omap_prcm_event_to_irq("io"),
+ omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND,
+ "hwmod_io", omap_mux_late_init);
+
+ if (ret)
+ pr_warning("mux: Failed to setup hwmod io irq %d\n", ret);
+
omap_mux_dbg_init();
return 0;
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4ee6aeca885..b13ef7ef5ef 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -18,11 +18,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
-/* Physical address needed since MMU not enabled yet on secondary core */
-#define OMAP4_AUX_CORE_BOOT1_PA 0x48281804
-
- __INIT
-
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 4976b9393e4..adbe4d8c7ca 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -19,7 +19,10 @@
#include <linux/smp.h>
#include <asm/cacheflush.h>
-#include <mach/omap4-common.h>
+
+#include "common.h"
+
+#include "powerdomain.h"
int platform_cpu_kill(unsigned int cpu)
{
@@ -32,6 +35,8 @@ int platform_cpu_kill(unsigned int cpu)
*/
void platform_cpu_die(unsigned int cpu)
{
+ unsigned int this_cpu;
+
flush_cache_all();
dsb();
@@ -39,15 +44,15 @@ void platform_cpu_die(unsigned int cpu)
* we're ready for shutdown now, so do it
*/
if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
- printk(KERN_CRIT "Secure clear status failed\n");
+ pr_err("Secure clear status failed\n");
for (;;) {
/*
- * Execute WFI
+ * Enter into low power state
*/
- do_wfi();
-
- if (omap_read_auxcoreboot0() == cpu) {
+ omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
+ this_cpu = smp_processor_id();
+ if (omap_read_auxcoreboot0() == this_cpu) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
new file mode 100644
index 00000000000..1d5d0105655
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -0,0 +1,398 @@
+/*
+ * OMAP MPUSS low power code
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
+ * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
+ * CPU0 and CPU1 LPRM modules.
+ * CPU0, CPU1 and MPUSS each have there own power domain and
+ * hence multiple low power combinations of MPUSS are possible.
+ *
+ * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
+ * because the mode is not supported by hw constraints of dormant
+ * mode. While waking up from the dormant mode, a reset signal
+ * to the Cortex-A9 processor must be asserted by the external
+ * power controller.
+ *
+ * With architectural inputs and hardware recommendations, only
+ * below modes are supported from power gain vs latency point of view.
+ *
+ * CPU0 CPU1 MPUSS
+ * ----------------------------------------------
+ * ON ON ON
+ * ON(Inactive) OFF ON(Inactive)
+ * OFF OFF CSWR
+ * OFF OFF OSWR
+ * OFF OFF OFF(Device OFF *TBD)
+ * ----------------------------------------------
+ *
+ * Note: CPU0 is the master core and it is the last CPU to go down
+ * and first to wake-up when MPUSS low power states are excercised
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/smp_scu.h>
+#include <asm/system.h>
+#include <asm/pgalloc.h>
+#include <asm/suspend.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+
+#include "common.h"
+#include "omap4-sar-layout.h"
+#include "pm.h"
+#include "prcm_mpu44xx.h"
+#include "prminst44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prm-regbits-44xx.h"
+
+#ifdef CONFIG_SMP
+
+struct omap4_cpu_pm_info {
+ struct powerdomain *pwrdm;
+ void __iomem *scu_sar_addr;
+ void __iomem *wkup_sar_addr;
+ void __iomem *l2x0_sar_addr;
+};
+
+static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+static struct powerdomain *mpuss_pd;
+static void __iomem *sar_base;
+
+/*
+ * Program the wakeup routine address for the CPU0 and CPU1
+ * used for OFF or DORMANT wakeup.
+ */
+static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ __raw_writel(addr, pm_info->wkup_sar_addr);
+}
+
+/*
+ * Set the CPUx powerdomain's previous power state
+ */
+static inline void set_cpu_next_pwrst(unsigned int cpu_id,
+ unsigned int power_state)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
+}
+
+/*
+ * Read CPU's previous power state
+ */
+static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ return pwrdm_read_prev_pwrst(pm_info->pwrdm);
+}
+
+/*
+ * Clear the CPUx powerdomain's previous power state
+ */
+static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+}
+
+/*
+ * Store the SCU power status value to scratchpad memory
+ */
+static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+ u32 scu_pwr_st;
+
+ switch (cpu_state) {
+ case PWRDM_POWER_RET:
+ scu_pwr_st = SCU_PM_DORMANT;
+ break;
+ case PWRDM_POWER_OFF:
+ scu_pwr_st = SCU_PM_POWEROFF;
+ break;
+ case PWRDM_POWER_ON:
+ case PWRDM_POWER_INACTIVE:
+ default:
+ scu_pwr_st = SCU_PM_NORMAL;
+ break;
+ }
+
+ __raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
+}
+
+/* Helper functions for MPUSS OSWR */
+static inline void mpuss_clear_prev_logic_pwrst(void)
+{
+ u32 reg;
+
+ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+ omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+}
+
+static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
+{
+ u32 reg;
+
+ if (cpu_id) {
+ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
+ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
+ OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+ } else {
+ reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
+ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+ omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
+ OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+ }
+}
+
+/**
+ * omap4_mpuss_read_prev_context_state:
+ * Function returns the MPUSS previous context state
+ */
+u32 omap4_mpuss_read_prev_context_state(void)
+{
+ u32 reg;
+
+ reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+ OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+ reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
+ return reg;
+}
+
+/*
+ * Store the CPU cluster state for L2X0 low power operations.
+ */
+static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
+{
+ struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+ __raw_writel(save_state, pm_info->l2x0_sar_addr);
+}
+
+/*
+ * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
+ * in every restore MPUSS OFF path.
+ */
+#ifdef CONFIG_CACHE_L2X0
+static void save_l2x0_context(void)
+{
+ u32 val;
+ void __iomem *l2x0_base = omap4_get_l2cache_base();
+
+ val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
+ __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
+ val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
+ __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+}
+#else
+static void save_l2x0_context(void)
+{}
+#endif
+
+/**
+ * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
+ * The purpose of this function is to manage low power programming
+ * of OMAP4 MPUSS subsystem
+ * @cpu : CPU ID
+ * @power_state: Low power state.
+ *
+ * MPUSS states for the context save:
+ * save_state =
+ * 0 - Nothing lost and no need to save: MPUSS INACTIVE
+ * 1 - CPUx L1 and logic lost: MPUSS CSWR
+ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
+ */
+int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
+{
+ unsigned int save_state = 0;
+ unsigned int wakeup_cpu;
+
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return -ENXIO;
+
+ switch (power_state) {
+ case PWRDM_POWER_ON:
+ case PWRDM_POWER_INACTIVE:
+ save_state = 0;
+ break;
+ case PWRDM_POWER_OFF:
+ save_state = 1;
+ break;
+ case PWRDM_POWER_RET:
+ default:
+ /*
+ * CPUx CSWR is invalid hardware state. Also CPUx OSWR
+ * doesn't make much scense, since logic is lost and $L1
+ * needs to be cleaned because of coherency. This makes
+ * CPUx OSWR equivalent to CPUX OFF and hence not supported
+ */
+ WARN_ON(1);
+ return -ENXIO;
+ }
+
+ pwrdm_pre_transition();
+
+ /*
+ * Check MPUSS next state and save interrupt controller if needed.
+ * In MPUSS OSWR or device OFF, interrupt controller contest is lost.
+ */
+ mpuss_clear_prev_logic_pwrst();
+ pwrdm_clear_all_prev_pwrst(mpuss_pd);
+ if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
+ (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
+ save_state = 2;
+
+ clear_cpu_prev_pwrst(cpu);
+ cpu_clear_prev_logic_pwrst(cpu);
+ set_cpu_next_pwrst(cpu, power_state);
+ set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
+ scu_pwrst_prepare(cpu, power_state);
+ l2x0_pwrst_prepare(cpu, save_state);
+
+ /*
+ * Call low level function with targeted low power state.
+ */
+ cpu_suspend(save_state, omap4_finish_suspend);
+
+ /*
+ * Restore the CPUx power state to ON otherwise CPUx
+ * power domain can transitions to programmed low power
+ * state while doing WFI outside the low powe code. On
+ * secure devices, CPUx does WFI which can result in
+ * domain transition
+ */
+ wakeup_cpu = smp_processor_id();
+ set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
+
+ pwrdm_post_transition();
+
+ return 0;
+}
+
+/**
+ * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
+ * @cpu : CPU ID
+ * @power_state: CPU low power state.
+ */
+int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+{
+ unsigned int cpu_state = 0;
+
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return -ENXIO;
+
+ if (power_state == PWRDM_POWER_OFF)
+ cpu_state = 1;
+
+ clear_cpu_prev_pwrst(cpu);
+ set_cpu_next_pwrst(cpu, power_state);
+ set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup));
+ scu_pwrst_prepare(cpu, power_state);
+
+ /*
+ * CPU never retuns back if targetted power state is OFF mode.
+ * CPU ONLINE follows normal CPU ONLINE ptah via
+ * omap_secondary_startup().
+ */
+ omap4_finish_suspend(cpu_state);
+
+ set_cpu_next_pwrst(cpu, PWRDM_POWER_ON);
+ return 0;
+}
+
+
+/*
+ * Initialise OMAP4 MPUSS
+ */
+int __init omap4_mpuss_init(void)
+{
+ struct omap4_cpu_pm_info *pm_info;
+
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+ return -ENODEV;
+ }
+
+ sar_base = omap4_get_sar_ram_base();
+
+ /* Initilaise per CPU PM information */
+ pm_info = &per_cpu(omap4_pm_info, 0x0);
+ pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
+ pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
+ pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
+ pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
+ if (!pm_info->pwrdm) {
+ pr_err("Lookup failed for CPU0 pwrdm\n");
+ return -ENODEV;
+ }
+
+ /* Clear CPU previous power domain state */
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+ cpu_clear_prev_logic_pwrst(0);
+
+ /* Initialise CPU0 power domain state to ON */
+ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+ pm_info = &per_cpu(omap4_pm_info, 0x1);
+ pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
+ pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+ pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
+ pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
+ if (!pm_info->pwrdm) {
+ pr_err("Lookup failed for CPU1 pwrdm\n");
+ return -ENODEV;
+ }
+
+ /* Clear CPU previous power domain state */
+ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+ cpu_clear_prev_logic_pwrst(1);
+
+ /* Initialise CPU1 power domain state to ON */
+ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+ mpuss_pd = pwrdm_lookup("mpu_pwrdm");
+ if (!mpuss_pd) {
+ pr_err("Failed to lookup MPUSS power domain\n");
+ return -ENODEV;
+ }
+ pwrdm_clear_all_prev_pwrst(mpuss_pd);
+ mpuss_clear_prev_logic_pwrst();
+
+ /* Save device type on scratchpad for low level code to use */
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+ __raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
+ else
+ __raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
+
+ save_l2x0_context();
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
new file mode 100644
index 00000000000..69f3c72d959
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -0,0 +1,81 @@
+/*
+ * OMAP Secure API infrastructure.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/omap-secure.h>
+
+static phys_addr_t omap_secure_memblock_base;
+
+/**
+ * omap_sec_dispatcher: Routine to dispatch low power secure
+ * service routines
+ * @idx: The HAL API index
+ * @flag: The flag indicating criticality of operation
+ * @nargs: Number of valid arguments out of four.
+ * @arg1, arg2, arg3 args4: Parameters passed to secure API
+ *
+ * Return the non-zero error value on failure.
+ */
+u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ u32 ret;
+ u32 param[5];
+
+ param[0] = nargs;
+ param[1] = arg1;
+ param[2] = arg2;
+ param[3] = arg3;
+ param[4] = arg4;
+
+ /*
+ * Secure API needs physical address
+ * pointer for the parameters
+ */
+ flush_cache_all();
+ outer_clean_range(__pa(param), __pa(param + 5));
+ ret = omap_smc2(idx, flag, __pa(param));
+
+ return ret;
+}
+
+/* Allocate the memory to save secure ram */
+int __init omap_secure_ram_reserve_memblock(void)
+{
+ phys_addr_t paddr;
+ u32 size = OMAP_SECURE_RAM_STORAGE;
+
+ size = ALIGN(size, SZ_1M);
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %x bytes\n",
+ __func__, size);
+ return -ENOMEM;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+
+ omap_secure_memblock_base = paddr;
+
+ return 0;
+}
+
+phys_addr_t omap_secure_ram_mempool_base(void)
+{
+ return omap_secure_memblock_base;
+}
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap-smc.S
index e69d37d9520..f6441c13cd8 100644
--- a/arch/arm/mach-omap2/omap44xx-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -31,6 +31,29 @@ ENTRY(omap_smc1)
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_smc1)
+/**
+ * u32 omap_smc2(u32 id, u32 falg, u32 pargs)
+ * Low level common routine for secure HAL and PPA APIs.
+ * @id: Application ID of HAL APIs
+ * @flag: Flag to indicate the criticality of operation
+ * @pargs: Physical address of parameter list starting
+ * with number of parametrs
+ */
+ENTRY(omap_smc2)
+ stmfd sp!, {r4-r12, lr}
+ mov r3, r2
+ mov r2, r1
+ mov r1, #0x0 @ Process ID
+ mov r6, #0xff
+ mov r12, #0x00 @ Secure Service ID
+ mov r7, #0
+ mcr p15, 0, r7, c7, c5, 6
+ dsb
+ dmb
+ smc #0
+ ldmfd sp!, {r4-r12, pc}
+ENDPROC(omap_smc2)
+
ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 4412ddb7b3f..c1bf3ef0ba0 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -24,16 +24,37 @@
#include <asm/hardware/gic.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <mach/omap-secure.h>
+
+#include "common.h"
+
+#include "clockdomain.h"
/* SCU base address */
static void __iomem *scu_base;
static DEFINE_SPINLOCK(boot_lock);
+void __iomem *omap4_get_scu_base(void)
+{
+ return scu_base;
+}
+
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/*
+ * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
+ * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
+ * init and for CPU1, a secure PPA API provided. CPU0 must be ON
+ * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
+ * OMAP443X GP devices- SMP bit isn't accessible.
+ * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
+ */
+ if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+ omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
+ 4, 0, 0, 0, 0, 0);
+
+ /*
* If any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
@@ -49,6 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ static struct clockdomain *cpu1_clkdm;
+ static bool booted;
/*
* Set synchronisation state between this boot processor
* and the secondary one
@@ -64,6 +87,29 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
flush_cache_all();
smp_wmb();
+
+ if (!cpu1_clkdm)
+ cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
+
+ /*
+ * The SGI(Software Generated Interrupts) are not wakeup capable
+ * from low power states. This is known limitation on OMAP4 and
+ * needs to be worked around by using software forced clockdomain
+ * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
+ * software force wakeup. The clockdomain is then put back to
+ * hardware supervised mode.
+ * More details can be found in OMAP4430 TRM - Version J
+ * Section :
+ * 4.3.4.2 Power States of CPU0 and CPU1
+ */
+ if (booted) {
+ clkdm_wakeup(cpu1_clkdm);
+ clkdm_allow_idle(cpu1_clkdm);
+ } else {
+ dsb_sev();
+ booted = true;
+ }
+
gic_raise_softirq(cpumask_of(cpu), 1);
/*
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
new file mode 100644
index 00000000000..d3d8971d7f3
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -0,0 +1,389 @@
+/*
+ * OMAP WakeupGen Source file
+ *
+ * OMAP WakeupGen is the interrupt controller extension used along
+ * with ARM GIC to wake the CPU out from low power states on
+ * external interrupts. It is responsible for generating wakeup
+ * event from the incoming interrupts and enable bits. It is
+ * implemented in MPU always ON power domain. During normal operation,
+ * WakeupGen delivers external interrupts directly to the GIC.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu_pm.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/omap-wakeupgen.h>
+#include <mach/omap-secure.h>
+
+#include "omap4-sar-layout.h"
+#include "common.h"
+
+#define NR_REG_BANKS 4
+#define MAX_IRQS 128
+#define WKG_MASK_ALL 0x00000000
+#define WKG_UNMASK_ALL 0xffffffff
+#define CPU_ENA_OFFSET 0x400
+#define CPU0_ID 0x0
+#define CPU1_ID 0x1
+
+static void __iomem *wakeupgen_base;
+static void __iomem *sar_base;
+static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks);
+static DEFINE_SPINLOCK(wakeupgen_lock);
+static unsigned int irq_target_cpu[NR_IRQS];
+
+/*
+ * Static helper functions.
+ */
+static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
+{
+ return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
+ (cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
+{
+ __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
+ (cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void sar_writel(u32 val, u32 offset, u8 idx)
+{
+ __raw_writel(val, sar_base + offset + (idx * 4));
+}
+
+static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
+{
+ u8 i;
+
+ for (i = 0; i < NR_REG_BANKS; i++)
+ wakeupgen_writel(reg, i, cpu);
+}
+
+static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
+{
+ unsigned int spi_irq;
+
+ /*
+ * PPIs and SGIs are not supported.
+ */
+ if (irq < OMAP44XX_IRQ_GIC_START)
+ return -EINVAL;
+
+ /*
+ * Subtract the GIC offset.
+ */
+ spi_irq = irq - OMAP44XX_IRQ_GIC_START;
+ if (spi_irq > MAX_IRQS) {
+ pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
+ return -EINVAL;
+ }
+
+ /*
+ * Each WakeupGen register controls 32 interrupt.
+ * i.e. 1 bit per SPI IRQ
+ */
+ *reg_index = spi_irq >> 5;
+ *bit_posn = spi_irq %= 32;
+
+ return 0;
+}
+
+static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
+{
+ u32 val, bit_number;
+ u8 i;
+
+ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+ return;
+
+ val = wakeupgen_readl(i, cpu);
+ val &= ~BIT(bit_number);
+ wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
+{
+ u32 val, bit_number;
+ u8 i;
+
+ if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+ return;
+
+ val = wakeupgen_readl(i, cpu);
+ val |= BIT(bit_number);
+ wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_save_masks(unsigned int cpu)
+{
+ u8 i;
+
+ for (i = 0; i < NR_REG_BANKS; i++)
+ per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
+}
+
+static void _wakeupgen_restore_masks(unsigned int cpu)
+{
+ u8 i;
+
+ for (i = 0; i < NR_REG_BANKS; i++)
+ wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
+}
+
+/*
+ * Architecture specific Mask extension
+ */
+static void wakeupgen_mask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeupgen_lock, flags);
+ _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
+ spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+/*
+ * Architecture specific Unmask extension
+ */
+static void wakeupgen_unmask(struct irq_data *d)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeupgen_lock, flags);
+ _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
+ spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+/*
+ * Mask or unmask all interrupts on given CPU.
+ * 0 = Mask all interrupts on the 'cpu'
+ * 1 = Unmask all interrupts on the 'cpu'
+ * Ensure that the initial mask is maintained. This is faster than
+ * iterating through GIC registers to arrive at the correct masks.
+ */
+static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeupgen_lock, flags);
+ if (set) {
+ _wakeupgen_save_masks(cpu);
+ _wakeupgen_set_all(cpu, WKG_MASK_ALL);
+ } else {
+ _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
+ _wakeupgen_restore_masks(cpu);
+ }
+ spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
+ * ROM code. WakeupGen IP is integrated along with GIC to manage the
+ * interrupt wakeups from CPU low power states. It manages
+ * masking/unmasking of Shared peripheral interrupts(SPI). So the
+ * interrupt enable/disable control should be in sync and consistent
+ * at WakeupGen and GIC so that interrupts are not lost.
+ */
+static void irq_save_context(void)
+{
+ u32 i, val;
+
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ return;
+
+ if (!sar_base)
+ sar_base = omap4_get_sar_ram_base();
+
+ for (i = 0; i < NR_REG_BANKS; i++) {
+ /* Save the CPUx interrupt mask for IRQ 0 to 127 */
+ val = wakeupgen_readl(i, 0);
+ sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
+ val = wakeupgen_readl(i, 1);
+ sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
+
+ /*
+ * Disable the secure interrupts for CPUx. The restore
+ * code blindly restores secure and non-secure interrupt
+ * masks from SAR RAM. Secure interrupts are not suppose
+ * to be enabled from HLOS. So overwrite the SAR location
+ * so that the secure interrupt remains disabled.
+ */
+ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
+ sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
+ }
+
+ /* Save AuxBoot* registers */
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+ /* Save SyncReq generation logic */
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+ /* Save SyncReq generation logic */
+ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
+ __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
+ __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
+
+ /* Set the Backup Bit Mask status */
+ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+ val |= SAR_BACKUP_STATUS_WAKEUPGEN;
+ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
+
+/*
+ * Clear WakeupGen SAR backup status.
+ */
+void irq_sar_clear(void)
+{
+ u32 val;
+ val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+ val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
+ __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
+
+/*
+ * Save GIC and Wakeupgen interrupt context using secure API
+ * for HS/EMU devices.
+ */
+static void irq_save_secure_context(void)
+{
+ u32 ret;
+ ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ if (ret != API_HAL_RET_VALUE_OK)
+ pr_err("GIC and Wakeupgen context save failed\n");
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ wakeupgen_irqmask_all(cpu, 0);
+ break;
+ case CPU_DEAD:
+ wakeupgen_irqmask_all(cpu, 1);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata irq_hotplug_notifier = {
+ .notifier_call = irq_cpu_hotplug_notify,
+};
+
+static void __init irq_hotplug_init(void)
+{
+ register_hotcpu_notifier(&irq_hotplug_notifier);
+}
+#else
+static void __init irq_hotplug_init(void)
+{}
+#endif
+
+#ifdef CONFIG_CPU_PM
+static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ irq_save_context();
+ else
+ irq_save_secure_context();
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ irq_sar_clear();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block irq_notifier_block = {
+ .notifier_call = irq_notifier,
+};
+
+static void __init irq_pm_init(void)
+{
+ cpu_pm_register_notifier(&irq_notifier_block);
+}
+#else
+static void __init irq_pm_init(void)
+{}
+#endif
+
+/*
+ * Initialise the wakeupgen module.
+ */
+int __init omap_wakeupgen_init(void)
+{
+ int i;
+ unsigned int boot_cpu = smp_processor_id();
+
+ /* Not supported on OMAP4 ES1.0 silicon */
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
+ return -EPERM;
+ }
+
+ /* Static mapping, never released */
+ wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
+ if (WARN_ON(!wakeupgen_base))
+ return -ENOMEM;
+
+ /* Clear all IRQ bitmasks at wakeupGen level */
+ for (i = 0; i < NR_REG_BANKS; i++) {
+ wakeupgen_writel(0, i, CPU0_ID);
+ wakeupgen_writel(0, i, CPU1_ID);
+ }
+
+ /*
+ * Override GIC architecture specific functions to add
+ * OMAP WakeupGen interrupt controller along with GIC
+ */
+ gic_arch_extn.irq_mask = wakeupgen_mask;
+ gic_arch_extn.irq_unmask = wakeupgen_unmask;
+ gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+ /*
+ * FIXME: Add support to set_smp_affinity() once the core
+ * GIC code has necessary hooks in place.
+ */
+
+ /* Associate all the IRQs to boot CPU like GIC init does. */
+ for (i = 0; i < NR_IRQS; i++)
+ irq_target_cpu[i] = boot_cpu;
+
+ irq_hotplug_init();
+ irq_pm_init();
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 35ac3e5f6e9..bc16c818c6b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -15,24 +15,80 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/memblock.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
#include <plat/irqs.h>
+#include <plat/sram.h>
#include <mach/hardware.h>
-#include <mach/omap4-common.h>
+#include <mach/omap-wakeupgen.h>
+
+#include "common.h"
+#include "omap4-sar-layout.h"
#ifdef CONFIG_CACHE_L2X0
-void __iomem *l2cache_base;
+static void __iomem *l2cache_base;
#endif
-void __iomem *gic_dist_base_addr;
+static void __iomem *sar_ram_base;
+
+#ifdef CONFIG_OMAP4_ERRATA_I688
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA 0xfe600000
+
+void __iomem *dram_sync, *sram_sync;
+
+void omap_bus_sync(void)
+{
+ if (dram_sync && sram_sync) {
+ writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+ writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+ isb();
+ }
+}
+
+static int __init omap_barriers_init(void)
+{
+ struct map_desc dram_io_desc[1];
+ phys_addr_t paddr;
+ u32 size;
+
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+ size = ALIGN(PAGE_SIZE, SZ_1M);
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve 4 Kbytes\n", __func__);
+ return -ENOMEM;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+ dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+ dram_io_desc[0].pfn = __phys_to_pfn(paddr);
+ dram_io_desc[0].length = size;
+ dram_io_desc[0].type = MT_MEMORY_SO;
+ iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+ dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+ sram_sync = (void __iomem *) OMAP4_SRAM_VA;
+
+ pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
+ (long long) paddr, dram_io_desc[0].virtual);
+
+ return 0;
+}
+core_initcall(omap_barriers_init);
+#endif
void __init gic_init_irq(void)
{
+ void __iomem *omap_irq_base;
+ void __iomem *gic_dist_base_addr;
+
/* Static mapping, never released */
gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
BUG_ON(!gic_dist_base_addr);
@@ -41,11 +97,18 @@ void __init gic_init_irq(void)
omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
BUG_ON(!omap_irq_base);
+ omap_wakeupgen_init();
+
gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
}
#ifdef CONFIG_CACHE_L2X0
+void __iomem *omap4_get_l2cache_base(void)
+{
+ return l2cache_base;
+}
+
static void omap4_l2x0_disable(void)
{
/* Disable PL310 L2 Cache controller */
@@ -71,7 +134,8 @@ static int __init omap_l2_cache_init(void)
/* Static mapping, never released */
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
- BUG_ON(!l2cache_base);
+ if (WARN_ON(!l2cache_base))
+ return -ENOMEM;
/*
* 16-way associativity, parity disabled
@@ -111,3 +175,30 @@ static int __init omap_l2_cache_init(void)
}
early_initcall(omap_l2_cache_init);
#endif
+
+void __iomem *omap4_get_sar_ram_base(void)
+{
+ return sar_ram_base;
+}
+
+/*
+ * SAR RAM used to save and restore the HW
+ * context in low power modes
+ */
+static int __init omap4_sar_ram_init(void)
+{
+ /*
+ * To avoid code running on other OMAPs in
+ * multi-omap builds
+ */
+ if (!cpu_is_omap44xx())
+ return -ENOMEM;
+
+ /* Static mapping, never released */
+ sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
+ if (WARN_ON(!sar_ram_base))
+ return -ENOMEM;
+
+ return 0;
+}
+early_initcall(omap4_sar_ram_init);
diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h
new file mode 100644
index 00000000000..fe5b545ad44
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-sar-layout.h
@@ -0,0 +1,50 @@
+/*
+ * omap4-sar-layout.h: OMAP4 SAR RAM layout header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+
+/*
+ * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
+ */
+#define SAR_BANK1_OFFSET 0x0000
+#define SAR_BANK2_OFFSET 0x1000
+#define SAR_BANK3_OFFSET 0x2000
+#define SAR_BANK4_OFFSET 0x3000
+
+/* Scratch pad memory offsets from SAR_BANK1 */
+#define SCU_OFFSET0 0xd00
+#define SCU_OFFSET1 0xd04
+#define OMAP_TYPE_OFFSET 0xd10
+#define L2X0_SAVE_OFFSET0 0xd14
+#define L2X0_SAVE_OFFSET1 0xd18
+#define L2X0_AUXCTRL_OFFSET 0xd1c
+#define L2X0_PREFETCH_CTRL_OFFSET 0xd20
+
+/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
+#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04
+#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08
+
+#define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500)
+#define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504)
+#define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508)
+
+/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */
+#define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694)
+#define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4)
+#define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4)
+#define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8)
+#define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc)
+#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0)
+#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10
+
+#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6b3088db83b..5192cabb40e 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -136,8 +136,9 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/slab.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/cpu.h>
#include "clockdomain.h"
#include "powerdomain.h"
@@ -381,6 +382,51 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
}
/**
+ * _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux
+ * @oh: struct omap_hwmod *
+ * @set_wake: bool value indicating to set (true) or clear (false) wakeup enable
+ *
+ * Set or clear the I/O pad wakeup flag in the mux entries for the
+ * hwmod @oh. This function changes the @oh->mux->pads_dynamic array
+ * in memory. If the hwmod is currently idled, and the new idle
+ * values don't match the previous ones, this function will also
+ * update the SCM PADCTRL registers. Otherwise, if the hwmod is not
+ * currently idled, this function won't touch the hardware: the new
+ * mux settings are written to the SCM PADCTRL registers when the
+ * hwmod is idled. No return value.
+ */
+static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake)
+{
+ struct omap_device_pad *pad;
+ bool change = false;
+ u16 prev_idle;
+ int j;
+
+ if (!oh->mux || !oh->mux->enabled)
+ return;
+
+ for (j = 0; j < oh->mux->nr_pads_dynamic; j++) {
+ pad = oh->mux->pads_dynamic[j];
+
+ if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP))
+ continue;
+
+ prev_idle = pad->idle;
+
+ if (set_wake)
+ pad->idle |= OMAP_WAKEUP_EN;
+ else
+ pad->idle &= ~OMAP_WAKEUP_EN;
+
+ if (prev_idle != pad->idle)
+ change = true;
+ }
+
+ if (change && oh->_state == _HWMOD_STATE_IDLE)
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+}
+
+/**
* _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware
* @oh: struct omap_hwmod *
*
@@ -706,27 +752,65 @@ static void _enable_module(struct omap_hwmod *oh)
}
/**
- * _disable_module - enable CLKCTRL modulemode on OMAP4
+ * _omap4_wait_target_disable - wait for a module to be disabled on OMAP4
+ * @oh: struct omap_hwmod *
+ *
+ * Wait for a module @oh to enter slave idle. Returns 0 if the module
+ * does not have an IDLEST bit or if the module successfully enters
+ * slave idle; otherwise, pass along the return value of the
+ * appropriate *_cm*_wait_module_idle() function.
+ */
+static int _omap4_wait_target_disable(struct omap_hwmod *oh)
+{
+ if (!cpu_is_omap44xx())
+ return 0;
+
+ if (!oh)
+ return -EINVAL;
+
+ if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ return 0;
+
+ if (oh->flags & HWMOD_NO_IDLEST)
+ return 0;
+
+ return omap4_cminst_wait_module_idle(oh->clkdm->prcm_partition,
+ oh->clkdm->cm_inst,
+ oh->clkdm->clkdm_offs,
+ oh->prcm.omap4.clkctrl_offs);
+}
+
+/**
+ * _omap4_disable_module - enable CLKCTRL modulemode on OMAP4
* @oh: struct omap_hwmod *
*
* Disable the PRCM module mode related to the hwmod @oh.
- * No return value.
+ * Return EINVAL if the modulemode is not supported and 0 in case of success.
*/
-static void _disable_module(struct omap_hwmod *oh)
+static int _omap4_disable_module(struct omap_hwmod *oh)
{
+ int v;
+
/* The module mode does not exist prior OMAP4 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- return;
+ if (!cpu_is_omap44xx())
+ return -EINVAL;
if (!oh->clkdm || !oh->prcm.omap4.modulemode)
- return;
+ return -EINVAL;
- pr_debug("omap_hwmod: %s: _disable_module\n", oh->name);
+ pr_debug("omap_hwmod: %s: %s\n", oh->name, __func__);
omap4_cminst_module_disable(oh->clkdm->prcm_partition,
oh->clkdm->cm_inst,
oh->clkdm->clkdm_offs,
oh->prcm.omap4.clkctrl_offs);
+
+ v = _omap4_wait_target_disable(oh);
+ if (v)
+ pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
+ oh->name);
+
+ return 0;
}
/**
@@ -749,7 +833,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
ohii = &oh->mpu_irqs[i++];
} while (ohii->irq != -1);
- return i;
+ return i-1;
}
/**
@@ -772,7 +856,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
ohdi = &oh->sdma_reqs[i++];
} while (ohdi->dma_req != -1);
- return i;
+ return i-1;
}
/**
@@ -795,7 +879,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
mem = &os->addr[i++];
} while (mem->pa_start != mem->pa_end);
- return i;
+ return i-1;
}
/**
@@ -1153,36 +1237,6 @@ static int _wait_target_ready(struct omap_hwmod *oh)
}
/**
- * _wait_target_disable - wait for a module to be disabled
- * @oh: struct omap_hwmod *
- *
- * Wait for a module @oh to enter slave idle. Returns 0 if the module
- * does not have an IDLEST bit or if the module successfully enters
- * slave idle; otherwise, pass along the return value of the
- * appropriate *_cm*_wait_module_idle() function.
- */
-static int _wait_target_disable(struct omap_hwmod *oh)
-{
- /* TODO: For now just handle OMAP4+ */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- return 0;
-
- if (!oh)
- return -EINVAL;
-
- if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
- return 0;
-
- if (oh->flags & HWMOD_NO_IDLEST)
- return 0;
-
- return omap4_cminst_wait_module_idle(oh->clkdm->prcm_partition,
- oh->clkdm->cm_inst,
- oh->clkdm->clkdm_offs,
- oh->prcm.omap4.clkctrl_offs);
-}
-
-/**
* _lookup_hardreset - fill register bit info for this hwmod/reset line
* @oh: struct omap_hwmod *
* @name: name of the reset line in the context of this hwmod
@@ -1441,6 +1495,25 @@ static int _enable(struct omap_hwmod *oh)
pr_debug("omap_hwmod: %s: enabling\n", oh->name);
+ /*
+ * hwmods with HWMOD_INIT_NO_IDLE flag set are left
+ * in enabled state at init.
+ * Now that someone is really trying to enable them,
+ * just ensure that the hwmod mux is set.
+ */
+ if (oh->_int_flags & _HWMOD_SKIP_ENABLE) {
+ /*
+ * If the caller has mux data populated, do the mux'ing
+ * which wouldn't have been done as part of the _enable()
+ * done during setup.
+ */
+ if (oh->mux)
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+
+ oh->_int_flags &= ~_HWMOD_SKIP_ENABLE;
+ return 0;
+ }
+
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) {
@@ -1524,8 +1597,6 @@ static int _enable(struct omap_hwmod *oh)
*/
static int _idle(struct omap_hwmod *oh)
{
- int ret;
-
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
@@ -1537,11 +1608,9 @@ static int _idle(struct omap_hwmod *oh)
if (oh->class->sysc)
_idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
- _disable_module(oh);
- ret = _wait_target_disable(oh);
- if (ret)
- pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
- oh->name);
+
+ _omap4_disable_module(oh);
+
/*
* The module must be in idle mode before disabling any parents
* clocks. Otherwise, the parent clock might be disabled before
@@ -1642,11 +1711,7 @@ static int _shutdown(struct omap_hwmod *oh)
if (oh->_state == _HWMOD_STATE_ENABLED) {
_del_initiator_dep(oh, mpu_oh);
/* XXX what about the other system initiators here? dma, dsp */
- _disable_module(oh);
- ret = _wait_target_disable(oh);
- if (ret)
- pr_warn("omap_hwmod: %s: _wait_target_disable failed\n",
- oh->name);
+ _omap4_disable_module(oh);
_disable_clocks(oh);
if (oh->clkdm)
clkdm_hwmod_disable(oh->clkdm, oh);
@@ -1744,8 +1809,10 @@ static int _setup(struct omap_hwmod *oh, void *data)
* it should be set by the core code as a runtime flag during startup
*/
if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
- (postsetup_state == _HWMOD_STATE_IDLE))
+ (postsetup_state == _HWMOD_STATE_IDLE)) {
+ oh->_int_flags |= _HWMOD_SKIP_ENABLE;
postsetup_state = _HWMOD_STATE_ENABLED;
+ }
if (postsetup_state == _HWMOD_STATE_IDLE)
_idle(oh);
@@ -2416,6 +2483,7 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
v = oh->_sysc_cache;
_enable_wakeup(oh, &v);
_write_sysconfig(v, oh);
+ _set_idle_ioring_wakeup(oh, true);
spin_unlock_irqrestore(&oh->_lock, flags);
return 0;
@@ -2446,6 +2514,7 @@ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
v = oh->_sysc_cache;
_disable_wakeup(oh, &v);
_write_sysconfig(v, oh);
+ _set_idle_ioring_wakeup(oh, false);
spin_unlock_irqrestore(&oh->_lock, flags);
return 0;
@@ -2662,3 +2731,57 @@ int omap_hwmod_no_setup_reset(struct omap_hwmod *oh)
return 0;
}
+
+/**
+ * omap_hwmod_pad_route_irq - route an I/O pad wakeup to a particular MPU IRQ
+ * @oh: struct omap_hwmod * containing hwmod mux entries
+ * @pad_idx: array index in oh->mux of the hwmod mux entry to route wakeup
+ * @irq_idx: the hwmod mpu_irqs array index of the IRQ to trigger on wakeup
+ *
+ * When an I/O pad wakeup arrives for the dynamic or wakeup hwmod mux
+ * entry number @pad_idx for the hwmod @oh, trigger the interrupt
+ * service routine for the hwmod's mpu_irqs array index @irq_idx. If
+ * this function is not called for a given pad_idx, then the ISR
+ * associated with @oh's first MPU IRQ will be triggered when an I/O
+ * pad wakeup occurs on that pad. Note that @pad_idx is the index of
+ * the _dynamic or wakeup_ entry: if there are other entries not
+ * marked with OMAP_DEVICE_PAD_WAKEUP or OMAP_DEVICE_PAD_REMUX, these
+ * entries are NOT COUNTED in the dynamic pad index. This function
+ * must be called separately for each pad that requires its interrupt
+ * to be re-routed this way. Returns -EINVAL if there is an argument
+ * problem or if @oh does not have hwmod mux entries or MPU IRQs;
+ * returns -ENOMEM if memory cannot be allocated; or 0 upon success.
+ *
+ * XXX This function interface is fragile. Rather than using array
+ * indexes, which are subject to unpredictable change, it should be
+ * using hwmod IRQ names, and some other stable key for the hwmod mux
+ * pad records.
+ */
+int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx)
+{
+ int nr_irqs;
+
+ might_sleep();
+
+ if (!oh || !oh->mux || !oh->mpu_irqs || pad_idx < 0 ||
+ pad_idx >= oh->mux->nr_pads_dynamic)
+ return -EINVAL;
+
+ /* Check the number of available mpu_irqs */
+ for (nr_irqs = 0; oh->mpu_irqs[nr_irqs].irq >= 0; nr_irqs++)
+ ;
+
+ if (irq_idx >= nr_irqs)
+ return -EINVAL;
+
+ if (!oh->mux->irqs) {
+ /* XXX What frees this? */
+ oh->mux->irqs = kzalloc(sizeof(int) * oh->mux->nr_pads_dynamic,
+ GFP_KERNEL);
+ if (!oh->mux->irqs)
+ return -ENOMEM;
+ }
+ oh->mux->irqs[pad_idx] = irq_idx;
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index 6d720621352..a5409ce3f32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -875,6 +875,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -899,7 +903,7 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2420_dss_slaves),
.masters = omap2420_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2420_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -939,6 +943,7 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.slaves = omap2420_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -961,6 +966,10 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
&omap2420_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -972,6 +981,8 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2420_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -981,7 +992,7 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1001,7 +1012,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
static struct omap_hwmod omap2420_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index a2580d01c3f..c4f56cb60d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -942,6 +942,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "tv_clk", .clk = "dss_54m_fck" },
{ .role = "sys_clk", .clk = "dss2_fck" },
};
@@ -966,7 +970,7 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap2430_dss_slaves),
.masters = omap2430_dss_masters,
.masters_cnt = ARRAY_SIZE(omap2430_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
/* l4_core -> dss_dispc */
@@ -1000,6 +1004,7 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.slaves = omap2430_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/* l4_core -> dss_rfbi */
@@ -1016,6 +1021,10 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
&omap2430_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1027,6 +1036,8 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.module_offs = CORE_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap2430_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1036,7 +1047,7 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_venc_hwmod,
- .clk = "dss_54m_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.flags = OCPIF_SWSUP_IDLE,
.user = OCP_USER_MPU | OCP_USER_SDMA,
@@ -1050,7 +1061,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
static struct omap_hwmod omap2430_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_fck",
+ .main_clk = "dss_54m_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c451729d289..c11273da5dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -11,6 +11,7 @@
#include <plat/omap_hwmod.h>
#include <plat/serial.h>
#include <plat/dma.h>
+#include <plat/common.h>
#include <mach/irqs.h>
@@ -43,13 +44,15 @@ static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dss_hwmod_class = {
.name = "dss",
.sysc = &omap2_dss_sysc,
+ .reset = omap_dss_reset,
};
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index bc9035ec87f..5324e8d93bc 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -84,6 +84,8 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod;
static struct omap_hwmod omap3xxx_mcbsp5_hwmod;
static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod;
static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod;
+static struct omap_hwmod omap3xxx_usb_host_hs_hwmod;
+static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
@@ -164,6 +166,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod;
static struct omap_hwmod omap3xxx_uart2_hwmod;
static struct omap_hwmod omap3xxx_uart3_hwmod;
static struct omap_hwmod omap3xxx_uart4_hwmod;
+static struct omap_hwmod am35xx_uart4_hwmod;
static struct omap_hwmod omap3xxx_usbhsotg_hwmod;
/* l3_core -> usbhsotg interface */
@@ -299,6 +302,23 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* AM35xx: L4 CORE -> UART4 interface */
+static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = {
+ {
+ .pa_start = OMAP3_UART4_AM35XX_BASE,
+ .pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+};
+
+static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &am35xx_uart4_hwmod,
+ .clk = "uart4_ick",
+ .addr = am35xx_uart4_addr_space,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
/* L4 CORE -> I2C1 interface */
static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
.master = &omap3xxx_l4_core_hwmod,
@@ -1162,6 +1182,7 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = {
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -1309,6 +1330,39 @@ static struct omap_hwmod omap3xxx_uart4_hwmod = {
.class = &omap2_uart_class,
};
+static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = {
+ { .irq = INT_35XX_UART4_IRQ, },
+};
+
+static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = {
+ { .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, },
+ { .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, },
+};
+
+static struct omap_hwmod_ocp_if *am35xx_uart4_slaves[] = {
+ &am35xx_l4_core__uart4,
+};
+
+static struct omap_hwmod am35xx_uart4_hwmod = {
+ .name = "uart4",
+ .mpu_irqs = am35xx_uart4_mpu_irqs,
+ .sdma_reqs = am35xx_uart4_sdma_reqs,
+ .main_clk = "uart4_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_UART4_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_EN_UART4_SHIFT,
+ },
+ },
+ .slaves = am35xx_uart4_slaves,
+ .slaves_cnt = ARRAY_SIZE(am35xx_uart4_slaves),
+ .class = &omap2_uart_class,
+};
+
+
static struct omap_hwmod_class i2c_class = {
.name = "i2c",
.sysc = &i2c_sysc,
@@ -1369,9 +1423,14 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
- { .role = "tv_clk", .clk = "dss_tv_fck" },
- { .role = "video_clk", .clk = "dss_96m_fck" },
+ /*
+ * The DSS HW needs all DSS clocks enabled during reset. The dss_core
+ * driver does not use these clocks.
+ */
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
+ { .role = "tv_clk", .clk = "dss_tv_fck" },
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
@@ -1394,11 +1453,12 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
.masters = omap3xxx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
- .flags = HWMOD_NO_IDLEST,
+ .flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
@@ -1456,6 +1516,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.slaves = omap3xxx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
+ .dev_attr = &omap2_3_dss_dispc_dev_attr
};
/*
@@ -1486,6 +1547,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dsi1_hwmod,
+ .clk = "dss_ick",
.addr = omap3xxx_dss_dsi1_addrs,
.fw = {
.omap2 = {
@@ -1502,6 +1564,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
&omap3xxx_l4_core__dss_dsi1,
};
+static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
+ { .role = "sys_clk", .clk = "dss2_alwon_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
@@ -1514,6 +1580,8 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_dsi1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
.slaves = omap3xxx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1540,6 +1608,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
&omap3xxx_l4_core__dss_rfbi,
};
+static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
+ { .role = "ick", .clk = "dss_ick" },
+};
+
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
@@ -1551,6 +1623,8 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_rfbi_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap3xxx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1560,7 +1634,7 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_venc_hwmod,
- .clk = "dss_tv_fck",
+ .clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
@@ -1578,10 +1652,15 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
&omap3xxx_l4_core__dss_venc,
};
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+ /* required only on OMAP3430 */
+ { .role = "tv_dac_clk", .clk = "dss_96m_fck" },
+};
+
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
- .main_clk = "dss1_alwon_fck",
+ .main_clk = "dss_tv_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1589,6 +1668,8 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.module_offs = OMAP3430_DSS_MOD,
},
},
+ .opt_clks = dss_venc_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
.slaves = omap3xxx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
.flags = HWMOD_NO_IDLEST,
@@ -1609,7 +1690,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
static struct omap_hwmod omap3xxx_i2c1_hwmod = {
.name = "i2c1",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap2_i2c1_mpu_irqs,
.sdma_reqs = omap2_i2c1_sdma_reqs,
.main_clk = "i2c1_fck",
@@ -1643,7 +1724,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
static struct omap_hwmod omap3xxx_i2c2_hwmod = {
.name = "i2c2",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap2_i2c2_mpu_irqs,
.sdma_reqs = omap2_i2c2_sdma_reqs,
.main_clk = "i2c2_fck",
@@ -1688,7 +1769,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
static struct omap_hwmod omap3xxx_i2c3_hwmod = {
.name = "i2c3",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = i2c3_mpu_irqs,
.sdma_reqs = i2c3_sdma_reqs,
.main_clk = "i2c3_fck",
@@ -3045,7 +3126,35 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
-static struct omap_hwmod omap3xxx_mmc1_hwmod = {
+/* See 35xx errata 2.1.1.128 in SPRZ278F */
+static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = {
+ .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT |
+ OMAP_HSMMC_BROKEN_MULTIBLOCK_READ),
+};
+
+static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
+ .name = "mmc1",
+ .mpu_irqs = omap34xx_mmc1_mpu_irqs,
+ .sdma_reqs = omap34xx_mmc1_sdma_reqs,
+ .opt_clks = omap34xx_mmc1_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
+ .main_clk = "mmchs1_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MMC1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
+ },
+ },
+ .dev_attr = &mmc1_pre_es3_dev_attr,
+ .slaves = omap3xxx_mmc1_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves),
+ .class = &omap34xx_mmc_class,
+};
+
+static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
.name = "mmc1",
.mpu_irqs = omap34xx_mmc1_mpu_irqs,
.sdma_reqs = omap34xx_mmc1_sdma_reqs,
@@ -3088,7 +3197,34 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
&omap3xxx_l4_core__mmc2,
};
-static struct omap_hwmod omap3xxx_mmc2_hwmod = {
+/* See 35xx errata 2.1.1.128 in SPRZ278F */
+static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = {
+ .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
+};
+
+static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
+ .name = "mmc2",
+ .mpu_irqs = omap34xx_mmc2_mpu_irqs,
+ .sdma_reqs = omap34xx_mmc2_sdma_reqs,
+ .opt_clks = omap34xx_mmc2_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
+ .main_clk = "mmchs2_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_EN_MMC2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
+ },
+ },
+ .dev_attr = &mmc2_pre_es3_dev_attr,
+ .slaves = omap3xxx_mmc2_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves),
+ .class = &omap34xx_mmc_class,
+};
+
+static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
.name = "mmc2",
.mpu_irqs = omap34xx_mmc2_mpu_irqs,
.sdma_reqs = omap34xx_mmc2_sdma_reqs,
@@ -3150,13 +3286,223 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
.class = &omap34xx_mmc_class,
};
+/*
+ * 'usb_host_hs' class
+ * high-speed multi-port usb host controller
+ */
+static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = {
+ .master = &omap3xxx_usb_host_hs_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "core_l3_ick",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = {
+ .name = "usb_host_hs",
+ .sysc = &omap3xxx_usb_host_hs_sysc,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_masters[] = {
+ &omap3xxx_usb_host_hs__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = {
+ {
+ .name = "uhh",
+ .pa_start = 0x48064000,
+ .pa_end = 0x480643ff,
+ .flags = ADDR_TYPE_RT
+ },
+ {
+ .name = "ohci",
+ .pa_start = 0x48064400,
+ .pa_end = 0x480647ff,
+ },
+ {
+ .name = "ehci",
+ .pa_start = 0x48064800,
+ .pa_end = 0x48064cff,
+ },
+ {}
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_usb_host_hs_hwmod,
+ .clk = "usbhost_ick",
+ .addr = omap3xxx_usb_host_hs_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_slaves[] = {
+ &omap3xxx_l4_core__usb_host_hs,
+};
+
+static struct omap_hwmod_opt_clk omap3xxx_usb_host_hs_opt_clks[] = {
+ { .role = "ehci_logic_fck", .clk = "usbhost_120m_fck", },
+};
+
+static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
+ { .name = "ohci-irq", .irq = 76 },
+ { .name = "ehci-irq", .irq = 77 },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
+ .name = "usb_host_hs",
+ .class = &omap3xxx_usb_host_hs_hwmod_class,
+ .clkdm_name = "l3_init_clkdm",
+ .mpu_irqs = omap3xxx_usb_host_hs_irqs,
+ .main_clk = "usbhost_48m_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430ES2_USBHOST_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT,
+ .idlest_stdby_bit = OMAP3430ES2_ST_USBHOST_STDBY_SHIFT,
+ },
+ },
+ .opt_clks = omap3xxx_usb_host_hs_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_opt_clks),
+ .slaves = omap3xxx_usb_host_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_slaves),
+ .masters = omap3xxx_usb_host_hs_masters,
+ .masters_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_masters),
+
+ /*
+ * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
+ * id: i660
+ *
+ * Description:
+ * In the following configuration :
+ * - USBHOST module is set to smart-idle mode
+ * - PRCM asserts idle_req to the USBHOST module ( This typically
+ * happens when the system is going to a low power mode : all ports
+ * have been suspended, the master part of the USBHOST module has
+ * entered the standby state, and SW has cut the functional clocks)
+ * - an USBHOST interrupt occurs before the module is able to answer
+ * idle_ack, typically a remote wakeup IRQ.
+ * Then the USB HOST module will enter a deadlock situation where it
+ * is no more accessible nor functional.
+ *
+ * Workaround:
+ * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
+ */
+
+ /*
+ * Errata: USB host EHCI may stall when entering smart-standby mode
+ * Id: i571
+ *
+ * Description:
+ * When the USBHOST module is set to smart-standby mode, and when it is
+ * ready to enter the standby state (i.e. all ports are suspended and
+ * all attached devices are in suspend mode), then it can wrongly assert
+ * the Mstandby signal too early while there are still some residual OCP
+ * transactions ongoing. If this condition occurs, the internal state
+ * machine may go to an undefined state and the USB link may be stuck
+ * upon the next resume.
+ *
+ * Workaround:
+ * Don't use smart standby; use only force standby,
+ * hence HWMOD_SWSUP_MSTANDBY
+ */
+
+ /*
+ * During system boot; If the hwmod framework resets the module
+ * the module will have smart idle settings; which can lead to deadlock
+ * (above Errata Id:i660); so, dont reset the module during boot;
+ * Use HWMOD_INIT_NO_RESET.
+ */
+
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
+ HWMOD_INIT_NO_RESET,
+};
+
+/*
+ * 'usb_tll_hs' class
+ * usb_tll_hs module is the adapter on the usb_host_hs ports
+ */
+static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = {
+ .name = "usb_tll_hs",
+ .sysc = &omap3xxx_usb_tll_hs_sysc,
+};
+
+static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
+ { .name = "tll-irq", .irq = 78 },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = {
+ {
+ .name = "tll",
+ .pa_start = 0x48062000,
+ .pa_end = 0x48062fff,
+ .flags = ADDR_TYPE_RT
+ },
+ {}
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &omap3xxx_usb_tll_hs_hwmod,
+ .clk = "usbtll_ick",
+ .addr = omap3xxx_usb_tll_hs_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_usb_tll_hs_slaves[] = {
+ &omap3xxx_l4_core__usb_tll_hs,
+};
+
+static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
+ .name = "usb_tll_hs",
+ .class = &omap3xxx_usb_tll_hs_hwmod_class,
+ .clkdm_name = "l3_init_clkdm",
+ .mpu_irqs = omap3xxx_usb_tll_hs_irqs,
+ .main_clk = "usbtll_fck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = CORE_MOD,
+ .prcm_reg_id = 3,
+ .module_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
+ .idlest_reg_id = 3,
+ .idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT,
+ },
+ },
+ .slaves = omap3xxx_usb_tll_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_tll_hs_slaves),
+};
+
static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l3_main_hwmod,
&omap3xxx_l4_core_hwmod,
&omap3xxx_l4_per_hwmod,
&omap3xxx_l4_wkup_hwmod,
- &omap3xxx_mmc1_hwmod,
- &omap3xxx_mmc2_hwmod,
&omap3xxx_mmc3_hwmod,
&omap3xxx_mpu_hwmod,
@@ -3171,12 +3517,12 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_timer9_hwmod,
&omap3xxx_timer10_hwmod,
&omap3xxx_timer11_hwmod,
- &omap3xxx_timer12_hwmod,
&omap3xxx_wd_timer2_hwmod,
&omap3xxx_uart1_hwmod,
&omap3xxx_uart2_hwmod,
&omap3xxx_uart3_hwmod,
+
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
@@ -3218,20 +3564,38 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
NULL,
};
+/* GP-only hwmods */
+static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = {
+ &omap3xxx_timer12_hwmod,
+ NULL
+};
+
/* 3430ES1-only hwmods */
static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
- &omap3xxx_iva_hwmod,
&omap3430es1_dss_core_hwmod,
- &omap3xxx_mailbox_hwmod,
NULL
};
/* 3430ES2+-only hwmods */
static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
- &omap3xxx_iva_hwmod,
&omap3xxx_dss_core_hwmod,
&omap3xxx_usbhsotg_hwmod,
- &omap3xxx_mailbox_hwmod,
+ &omap3xxx_usb_host_hs_hwmod,
+ &omap3xxx_usb_tll_hs_hwmod,
+ NULL
+};
+
+/* <= 3430ES3-only hwmods */
+static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = {
+ &omap3xxx_pre_es3_mmc1_hwmod,
+ &omap3xxx_pre_es3_mmc2_hwmod,
+ NULL
+};
+
+/* 3430ES3+-only hwmods */
+static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = {
+ &omap3xxx_es3plus_mmc1_hwmod,
+ &omap3xxx_es3plus_mmc2_hwmod,
NULL
};
@@ -3253,12 +3617,21 @@ static __initdata struct omap_hwmod *omap36xx_hwmods[] = {
&omap36xx_sr2_hwmod,
&omap3xxx_usbhsotg_hwmod,
&omap3xxx_mailbox_hwmod,
+ &omap3xxx_usb_host_hs_hwmod,
+ &omap3xxx_usb_tll_hs_hwmod,
+ &omap3xxx_es3plus_mmc1_hwmod,
+ &omap3xxx_es3plus_mmc2_hwmod,
NULL
};
static __initdata struct omap_hwmod *am35xx_hwmods[] = {
&omap3xxx_dss_core_hwmod, /* XXX ??? */
&am35xx_usbhsotg_hwmod,
+ &am35xx_uart4_hwmod,
+ &omap3xxx_usb_host_hs_hwmod,
+ &omap3xxx_usb_tll_hs_hwmod,
+ &omap3xxx_es3plus_mmc1_hwmod,
+ &omap3xxx_es3plus_mmc2_hwmod,
NULL
};
@@ -3273,6 +3646,13 @@ int __init omap3xxx_hwmod_init(void)
if (r < 0)
return r;
+ /* Register GP-only hwmods. */
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ r = omap_hwmod_register(omap3xxx_gp_hwmods);
+ if (r < 0)
+ return r;
+ }
+
rev = omap_rev();
/*
@@ -3311,6 +3691,21 @@ int __init omap3xxx_hwmod_init(void)
h = omap3430es2plus_hwmods;
};
+ if (h) {
+ r = omap_hwmod_register(h);
+ if (r < 0)
+ return r;
+ }
+
+ h = NULL;
+ if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 ||
+ rev == OMAP3430_REV_ES2_1) {
+ h = omap3430_pre_es3_hwmods;
+ } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 ||
+ rev == OMAP3430_REV_ES3_1_2) {
+ h = omap3430_es3plus_hwmods;
+ };
+
if (h)
r = omap_hwmod_register(h);
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7695e5d4331..f9f15108176 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -30,6 +30,7 @@
#include <plat/mmc.h>
#include <plat/i2c.h>
#include <plat/dmtimer.h>
+#include <plat/common.h>
#include "omap_hwmod_common_data.h"
@@ -69,6 +70,8 @@ static struct omap_hwmod omap44xx_mmc2_hwmod;
static struct omap_hwmod omap44xx_mpu_hwmod;
static struct omap_hwmod omap44xx_mpu_private_hwmod;
static struct omap_hwmod omap44xx_usb_otg_hs_hwmod;
+static struct omap_hwmod omap44xx_usb_host_hs_hwmod;
+static struct omap_hwmod omap44xx_usb_tll_hs_hwmod;
/*
* Interconnects omap_hwmod structures
@@ -1187,6 +1190,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = {
static struct omap_hwmod_class omap44xx_dss_hwmod_class = {
.name = "dss",
.sysc = &omap44xx_dss_sysc,
+ .reset = omap_dss_reset,
};
/* dss */
@@ -1240,12 +1244,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = {
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
{ .role = "sys_clk", .clk = "dss_sys_clk" },
{ .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "dss_clk", .clk = "dss_dss_clk" },
- { .role = "video_clk", .clk = "dss_48mhz_clk" },
+ { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
};
static struct omap_hwmod omap44xx_dss_hwmod = {
.name = "dss_core",
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap44xx_dss_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.main_clk = "dss_dss_clk",
@@ -1325,6 +1329,11 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
{ }
};
+static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = {
+ .manager_count = 3,
+ .has_framedonetv_irq = 1
+};
+
/* l4_per -> dss_dispc */
static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
.master = &omap44xx_l4_per_hwmod,
@@ -1340,12 +1349,6 @@ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = {
&omap44xx_l4_per__dss_dispc,
};
-static struct omap_hwmod_opt_clk dss_dispc_opt_clks[] = {
- { .role = "sys_clk", .clk = "dss_sys_clk" },
- { .role = "tv_clk", .clk = "dss_tv_clk" },
- { .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
-};
-
static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap44xx_dispc_hwmod_class,
@@ -1359,10 +1362,9 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET,
},
},
- .opt_clks = dss_dispc_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(dss_dispc_opt_clks),
.slaves = omap44xx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves),
+ .dev_attr = &omap44xx_dss_dispc_dev_attr
};
/*
@@ -1624,7 +1626,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_hdmi_irqs,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_48mhz_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -1785,7 +1787,7 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap44xx_venc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
- .main_clk = "dss_dss_clk",
+ .main_clk = "dss_tv_clk",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -2246,6 +2248,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = {
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
+ .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -2300,7 +2303,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
.name = "i2c1",
.class = &omap44xx_i2c_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap44xx_i2c1_irqs,
.sdma_reqs = omap44xx_i2c1_sdma_reqs,
.main_clk = "i2c1_fck",
@@ -2356,7 +2359,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
.name = "i2c2",
.class = &omap44xx_i2c_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap44xx_i2c2_irqs,
.sdma_reqs = omap44xx_i2c2_sdma_reqs,
.main_clk = "i2c2_fck",
@@ -2412,7 +2415,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
.name = "i2c3",
.class = &omap44xx_i2c_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap44xx_i2c3_irqs,
.sdma_reqs = omap44xx_i2c3_sdma_reqs,
.main_clk = "i2c3_fck",
@@ -2468,7 +2471,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {
.name = "i2c4",
.class = &omap44xx_i2c_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG,
+ .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap44xx_i2c4_irqs,
.sdma_reqs = omap44xx_i2c4_sdma_reqs,
.main_clk = "i2c4_fck",
@@ -5276,6 +5279,207 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
.slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves),
};
+/*
+ * 'usb_host_hs' class
+ * high-speed multi-port usb host controller
+ */
+static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = {
+ .master = &omap44xx_usb_host_hs_hwmod,
+ .slave = &omap44xx_l3_main_2_hwmod,
+ .clk = "l3_div_ck",
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = {
+ .name = "usb_host_hs",
+ .sysc = &omap44xx_usb_host_hs_sysc,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = {
+ &omap44xx_usb_host_hs__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = {
+ {
+ .name = "uhh",
+ .pa_start = 0x4a064000,
+ .pa_end = 0x4a0647ff,
+ .flags = ADDR_TYPE_RT
+ },
+ {
+ .name = "ohci",
+ .pa_start = 0x4a064800,
+ .pa_end = 0x4a064bff,
+ },
+ {
+ .name = "ehci",
+ .pa_start = 0x4a064c00,
+ .pa_end = 0x4a064fff,
+ },
+ {}
+};
+
+static struct omap_hwmod_irq_info omap44xx_usb_host_hs_irqs[] = {
+ { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START },
+ { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usb_host_hs_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usb_host_hs_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = {
+ &omap44xx_l4_cfg__usb_host_hs,
+};
+
+static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
+ .name = "usb_host_hs",
+ .class = &omap44xx_usb_host_hs_hwmod_class,
+ .clkdm_name = "l3_init_clkdm",
+ .main_clk = "usb_host_hs_fck",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET,
+ .context_offs = OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .mpu_irqs = omap44xx_usb_host_hs_irqs,
+ .slaves = omap44xx_usb_host_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves),
+ .masters = omap44xx_usb_host_hs_masters,
+ .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters),
+
+ /*
+ * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
+ * id: i660
+ *
+ * Description:
+ * In the following configuration :
+ * - USBHOST module is set to smart-idle mode
+ * - PRCM asserts idle_req to the USBHOST module ( This typically
+ * happens when the system is going to a low power mode : all ports
+ * have been suspended, the master part of the USBHOST module has
+ * entered the standby state, and SW has cut the functional clocks)
+ * - an USBHOST interrupt occurs before the module is able to answer
+ * idle_ack, typically a remote wakeup IRQ.
+ * Then the USB HOST module will enter a deadlock situation where it
+ * is no more accessible nor functional.
+ *
+ * Workaround:
+ * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
+ */
+
+ /*
+ * Errata: USB host EHCI may stall when entering smart-standby mode
+ * Id: i571
+ *
+ * Description:
+ * When the USBHOST module is set to smart-standby mode, and when it is
+ * ready to enter the standby state (i.e. all ports are suspended and
+ * all attached devices are in suspend mode), then it can wrongly assert
+ * the Mstandby signal too early while there are still some residual OCP
+ * transactions ongoing. If this condition occurs, the internal state
+ * machine may go to an undefined state and the USB link may be stuck
+ * upon the next resume.
+ *
+ * Workaround:
+ * Don't use smart standby; use only force standby,
+ * hence HWMOD_SWSUP_MSTANDBY
+ */
+
+ /*
+ * During system boot; If the hwmod framework resets the module
+ * the module will have smart idle settings; which can lead to deadlock
+ * (above Errata Id:i660); so, dont reset the module during boot;
+ * Use HWMOD_INIT_NO_RESET.
+ */
+
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
+ HWMOD_INIT_NO_RESET,
+};
+
+/*
+ * 'usb_tll_hs' class
+ * usb_tll_hs module is the adapter on the usb_host_hs ports
+ */
+static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = {
+ .name = "usb_tll_hs",
+ .sysc = &omap44xx_usb_tll_hs_sysc,
+};
+
+static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = {
+ { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = {
+ {
+ .name = "tll",
+ .pa_start = 0x4a062000,
+ .pa_end = 0x4a063fff,
+ .flags = ADDR_TYPE_RT
+ },
+ {}
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = {
+ .master = &omap44xx_l4_cfg_hwmod,
+ .slave = &omap44xx_usb_tll_hs_hwmod,
+ .clk = "l4_div_ck",
+ .addr = omap44xx_usb_tll_hs_addrs,
+ .user = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = {
+ &omap44xx_l4_cfg__usb_tll_hs,
+};
+
+static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = {
+ .name = "usb_tll_hs",
+ .class = &omap44xx_usb_tll_hs_hwmod_class,
+ .clkdm_name = "l3_init_clkdm",
+ .main_clk = "usb_tll_hs_ick",
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET,
+ .context_offs = OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET,
+ .modulemode = MODULEMODE_HWCTRL,
+ },
+ },
+ .mpu_irqs = omap44xx_usb_tll_hs_irqs,
+ .slaves = omap44xx_usb_tll_hs_slaves,
+ .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves),
+};
+
static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
/* dmm class */
@@ -5415,13 +5619,16 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
&omap44xx_uart3_hwmod,
&omap44xx_uart4_hwmod,
+ /* usb host class */
+ &omap44xx_usb_host_hs_hwmod,
+ &omap44xx_usb_tll_hs_hwmod,
+
/* usb_otg_hs class */
&omap44xx_usb_otg_hs_hwmod,
/* wd_timer class */
&omap44xx_wd_timer2_hwmod,
&omap44xx_wd_timer3_hwmod,
-
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index de832ebc93a..51e5418899f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,3 +49,7 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
};
+struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
+ .manager_count = 2,
+ .has_framedonetv_irq = 0
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 39a7c37f458..ad5d8f04c0b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -16,6 +16,8 @@
#include <plat/omap_hwmod.h>
+#include "display.h"
+
/* Common address space across OMAP2xxx */
extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
@@ -111,4 +113,6 @@ extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
extern struct omap_hwmod_class omap2xxx_mcspi_class;
+extern struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr;
+
#endif
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 6a66aa5e2a5..d15225ff5c4 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -237,7 +237,7 @@ static int __devexit omap4_l3_remove(struct platform_device *pdev)
static const struct of_device_id l3_noc_match[] = {
{.compatible = "ti,omap4-l3-noc", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, l3_noc_match);
#else
#define l3_noc_match NULL
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 58775e3c847..4c90477e6f8 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -260,3 +260,38 @@ void am35x_set_mode(u8 musb_mode)
omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
}
+
+void ti81xx_musb_phy_power(u8 on)
+{
+ void __iomem *scm_base = NULL;
+ u32 usbphycfg;
+
+ scm_base = ioremap(TI81XX_SCM_BASE, SZ_2K);
+ if (!scm_base) {
+ pr_err("system control module ioremap failed\n");
+ return;
+ }
+
+ usbphycfg = __raw_readl(scm_base + USBCTRL0);
+
+ if (on) {
+ if (cpu_is_ti816x()) {
+ usbphycfg |= TI816X_USBPHY0_NORMAL_MODE;
+ usbphycfg &= ~TI816X_USBPHY_REFCLK_OSC;
+ } else if (cpu_is_ti814x()) {
+ usbphycfg &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN
+ | USBPHY_DPINPUT | USBPHY_DMINPUT);
+ usbphycfg |= (USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN
+ | USBPHY_DPOPBUFCTL | USBPHY_DMOPBUFCTL);
+ }
+ } else {
+ if (cpu_is_ti816x())
+ usbphycfg &= ~TI816X_USBPHY0_NORMAL_MODE;
+ else if (cpu_is_ti814x())
+ usbphycfg |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
+
+ }
+ __raw_writel(usbphycfg, scm_base + USBCTRL0);
+
+ iounmap(scm_base);
+}
diff --git a/arch/arm/mach-omap2/opp2xxx.h b/arch/arm/mach-omap2/opp2xxx.h
index 8affc66a92c..8fae534eb15 100644
--- a/arch/arm/mach-omap2/opp2xxx.h
+++ b/arch/arm/mach-omap2/opp2xxx.h
@@ -51,7 +51,7 @@ struct prcm_config {
unsigned long cm_clksel2_pll; /* dpllx1 or x2 out */
unsigned long cm_clksel_mdm; /* modem dividers 2430 only */
unsigned long base_sdrc_rfr; /* base refresh timing for a set */
- unsigned char flags;
+ unsigned short flags;
};
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 1e79bdf313e..1881fe91514 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -18,12 +18,13 @@
#include <plat/omap-pm.h>
#include <plat/omap_device.h>
-#include <plat/common.h>
+#include "common.h"
#include "voltage.h"
#include "powerdomain.h"
#include "clockdomain.h"
#include "pm.h"
+#include "twl-common.h"
static struct omap_device_pm_latency *pm_lats;
@@ -226,11 +227,8 @@ postcore_initcall(omap2_common_pm_init);
static int __init omap2_common_pm_late_init(void)
{
- /* Init the OMAP TWL parameters */
- omap3_twl_init();
- omap4_twl_init();
-
/* Init the voltage layer */
+ omap_pmic_late_init();
omap_voltage_late_init();
/* Initialize the voltages */
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 4e166add2f3..b737b11e449 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -21,6 +21,7 @@ extern void omap_sram_idle(void);
extern int omap3_can_sleep(void);
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
extern int omap3_idle_init(void);
+extern int omap4_idle_init(void);
#if defined(CONFIG_PM_OPP)
extern int omap3_opp_init(void);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index cf0c216132a..b8822f8b289 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -30,7 +30,6 @@
#include <linux/irq.h>
#include <linux/time.h>
#include <linux/gpio.h>
-#include <linux/console.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
@@ -42,6 +41,7 @@
#include <plat/dma.h>
#include <plat/board.h>
+#include "common.h"
#include "prm2xxx_3xxx.h"
#include "prm-regbits-24xx.h"
#include "cm2xxx_3xxx.h"
@@ -126,27 +126,11 @@ static void omap2_enter_full_retention(void)
if (omap_irq_pending())
goto no_sleep;
- /* Block console output in case it is on one of the OMAP UARTs */
- if (!is_suspending())
- if (!console_trylock())
- goto no_sleep;
-
- omap_uart_prepare_idle(0);
- omap_uart_prepare_idle(1);
- omap_uart_prepare_idle(2);
-
/* Jump to SRAM suspend code */
omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_POWER));
- omap_uart_resume_idle(2);
- omap_uart_resume_idle(1);
- omap_uart_resume_idle(0);
-
- if (!is_suspending())
- console_unlock();
-
no_sleep:
omap2_gpio_resume_after_idle();
@@ -238,8 +222,6 @@ static int omap2_can_sleep(void)
{
if (omap2_fclks_active())
return 0;
- if (!omap_uart_can_sleep())
- return 0;
if (osc_ck->usecount > 1)
return 0;
if (omap_dma_running())
@@ -290,7 +272,6 @@ static int omap2_pm_suspend(void)
mir1 = omap_readl(0x480fe0a4);
omap_writel(1 << 5, 0x480fe0ac);
- omap_uart_prepare_suspend();
omap2_enter_full_retention();
omap_writel(mir1, 0x480fe0a4);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index efa66494c1e..fc698757892 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -28,7 +28,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/console.h>
#include <trace/events/power.h>
#include <asm/suspend.h>
@@ -36,12 +35,12 @@
#include <plat/sram.h>
#include "clockdomain.h"
#include "powerdomain.h"
-#include <plat/serial.h>
#include <plat/sdrc.h>
#include <plat/prcm.h>
#include <plat/gpmc.h>
#include <plat/dma.h>
+#include "common.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"
@@ -53,15 +52,6 @@
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state = PM_SUSPEND_ON;
-static inline bool is_suspending(void)
-{
- return (suspend_state != PM_SUSPEND_ON) && console_suspend_enabled;
-}
-#else
-static inline bool is_suspending(void)
-{
- return false;
-}
#endif
/* pm34xx errata defined in pm.h */
@@ -194,7 +184,7 @@ static void omap3_save_secure_ram_context(void)
* that any peripheral wake-up events occurring while attempting to
* clear the PM_WKST_x are detected and cleared.
*/
-static int prcm_clear_mod_irqs(s16 module, u8 regs)
+static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
{
u32 wkst, fclk, iclk, clken;
u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
@@ -206,6 +196,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
wkst = omap2_prm_read_mod_reg(module, wkst_off);
wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
+ wkst &= ~ignore_bits;
if (wkst) {
iclk = omap2_cm_read_mod_reg(module, iclk_off);
fclk = omap2_cm_read_mod_reg(module, fclk_off);
@@ -221,6 +212,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
omap2_prm_write_mod_reg(wkst, module, wkst_off);
wkst = omap2_prm_read_mod_reg(module, wkst_off);
+ wkst &= ~ignore_bits;
c++;
}
omap2_cm_write_mod_reg(iclk, module, iclk_off);
@@ -230,76 +222,35 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
return c;
}
-static int _prcm_int_handle_wakeup(void)
+static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
{
int c;
- c = prcm_clear_mod_irqs(WKUP_MOD, 1);
- c += prcm_clear_mod_irqs(CORE_MOD, 1);
- c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
- if (omap_rev() > OMAP3430_REV_ES1_0) {
- c += prcm_clear_mod_irqs(CORE_MOD, 3);
- c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
- }
+ c = prcm_clear_mod_irqs(WKUP_MOD, 1,
+ ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
- return c;
+ return c ? IRQ_HANDLED : IRQ_NONE;
}
-/*
- * PRCM Interrupt Handler
- *
- * The PRM_IRQSTATUS_MPU register indicates if there are any pending
- * interrupts from the PRCM for the MPU. These bits must be cleared in
- * order to clear the PRCM interrupt. The PRCM interrupt handler is
- * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
- * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
- * register indicates that a wake-up event is pending for the MPU and
- * this bit can only be cleared if the all the wake-up events latched
- * in the various PM_WKST_x registers have been cleared. The interrupt
- * handler is implemented using a do-while loop so that if a wake-up
- * event occurred during the processing of the prcm interrupt handler
- * (setting a bit in the corresponding PM_WKST_x register and thus
- * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
- * this would be handled.
- */
-static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
+static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
{
- u32 irqenable_mpu, irqstatus_mpu;
- int c = 0;
-
- irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
- OMAP3_PRM_IRQENABLE_MPU_OFFSET);
- irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- irqstatus_mpu &= irqenable_mpu;
-
- do {
- if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
- OMAP3430_IO_ST_MASK)) {
- c = _prcm_int_handle_wakeup();
-
- /*
- * Is the MPU PRCM interrupt handler racing with the
- * IVA2 PRCM interrupt handler ?
- */
- WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
- "but no wakeup sources are marked\n");
- } else {
- /* XXX we need to expand our PRCM interrupt handler */
- WARN(1, "prcm: WARNING: PRCM interrupt received, but "
- "no code to handle it (%08x)\n", irqstatus_mpu);
- }
-
- omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
-
- irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- irqstatus_mpu &= irqenable_mpu;
+ int c;
- } while (irqstatus_mpu);
+ /*
+ * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
+ * these are handled in a separate handler to avoid acking
+ * IO events before parsing in mux code
+ */
+ c = prcm_clear_mod_irqs(WKUP_MOD, 1,
+ OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
+ c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
+ c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
+ if (omap_rev() > OMAP3430_REV_ES1_0) {
+ c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
+ c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
+ }
- return IRQ_HANDLED;
+ return c ? IRQ_HANDLED : IRQ_NONE;
}
static void omap34xx_save_context(u32 *save)
@@ -375,20 +326,11 @@ void omap_sram_idle(void)
omap3_enable_io_chain();
}
- /* Block console output in case it is on one of the OMAP UARTs */
- if (!is_suspending())
- if (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)
- if (!console_trylock())
- goto console_still_active;
-
pwrdm_pre_transition();
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
- omap_uart_prepare_idle(2);
- omap_uart_prepare_idle(3);
omap2_gpio_prepare_for_idle(per_going_off);
if (per_next_state == PWRDM_POWER_OFF)
omap3_per_save_context();
@@ -396,8 +338,6 @@ void omap_sram_idle(void)
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
- omap_uart_prepare_idle(0);
- omap_uart_prepare_idle(1);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -446,8 +386,6 @@ void omap_sram_idle(void)
omap3_sram_restore_context();
omap2_sms_restore_context();
}
- omap_uart_resume_idle(0);
- omap_uart_resume_idle(1);
if (core_next_state == PWRDM_POWER_OFF)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
@@ -463,14 +401,8 @@ void omap_sram_idle(void)
omap2_gpio_resume_after_idle();
if (per_prev_state == PWRDM_POWER_OFF)
omap3_per_restore_context();
- omap_uart_resume_idle(2);
- omap_uart_resume_idle(3);
}
- if (!is_suspending())
- console_unlock();
-
-console_still_active:
/* Disable IO-PAD and IO-CHAIN wakeup */
if (omap3_has_io_wakeup() &&
(per_next_state < PWRDM_POWER_ON ||
@@ -484,21 +416,11 @@ console_still_active:
clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
}
-int omap3_can_sleep(void)
-{
- if (!omap_uart_can_sleep())
- return 0;
- return 1;
-}
-
static void omap3_pm_idle(void)
{
local_irq_disable();
local_fiq_disable();
- if (!omap3_can_sleep())
- goto out;
-
if (omap_irq_pending() || need_resched())
goto out;
@@ -532,7 +454,6 @@ static int omap3_pm_suspend(void)
goto restore;
}
- omap_uart_prepare_suspend();
omap3_intc_suspend();
omap_sram_idle();
@@ -579,22 +500,27 @@ static int omap3_pm_begin(suspend_state_t state)
{
disable_hlt();
suspend_state = state;
- omap_uart_enable_irqs(0);
+ omap_prcm_irq_prepare();
return 0;
}
static void omap3_pm_end(void)
{
suspend_state = PM_SUSPEND_ON;
- omap_uart_enable_irqs(1);
enable_hlt();
return;
}
+static void omap3_pm_finish(void)
+{
+ omap_prcm_irq_complete();
+}
+
static const struct platform_suspend_ops omap_pm_ops = {
.begin = omap3_pm_begin,
.end = omap3_pm_end,
.enter = omap3_pm_enter,
+ .finish = omap3_pm_finish,
.valid = suspend_valid_only_mem,
};
#endif /* CONFIG_SUSPEND */
@@ -700,10 +626,6 @@ static void __init prcm_setup_regs(void)
OMAP3430_GRPSEL_GPT1_MASK |
OMAP3430_GRPSEL_GPT12_MASK,
WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
- /* For some reason IO doesn't generate wakeup event even if
- * it is selected to mpu wakeup goup */
- omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
- OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
/* Enable PM_WKEN to support DSS LPR */
omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
@@ -880,12 +802,21 @@ static int __init omap3_pm_init(void)
* supervised mode for powerdomains */
prcm_setup_regs();
- ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
- (irq_handler_t)prcm_interrupt_handler,
- IRQF_DISABLED, "prcm", NULL);
+ ret = request_irq(omap_prcm_event_to_irq("wkup"),
+ _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
+
+ if (ret) {
+ pr_err("pm: Failed to request pm_wkup irq\n");
+ goto err1;
+ }
+
+ /* IO interrupt is shared with mux code */
+ ret = request_irq(omap_prcm_event_to_irq("io"),
+ _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
+ omap3_pm_init);
+
if (ret) {
- printk(KERN_ERR "request_irq failed to register for 0x%x\n",
- INT_34XX_PRCM_MPU_IRQ);
+ pr_err("pm: Failed to request pm_io irq\n");
goto err1;
}
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 59a870be839..c264ef7219c 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -1,8 +1,9 @@
/*
* OMAP4 Power Management Routines
*
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
* Rajendra Nayak <rnayak@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,14 +17,17 @@
#include <linux/err.h>
#include <linux/slab.h>
+#include "common.h"
+#include "clockdomain.h"
#include "powerdomain.h"
-#include <mach/omap4-common.h>
+#include "pm.h"
struct power_state {
struct powerdomain *pwrdm;
u32 next_state;
#ifdef CONFIG_SUSPEND
u32 saved_state;
+ u32 saved_logic_state;
#endif
struct list_head node;
};
@@ -33,7 +37,50 @@ static LIST_HEAD(pwrst_list);
#ifdef CONFIG_SUSPEND
static int omap4_pm_suspend(void)
{
- do_wfi();
+ struct power_state *pwrst;
+ int state, ret = 0;
+ u32 cpu_id = smp_processor_id();
+
+ /* Save current powerdomain state */
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
+ pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
+ }
+
+ /* Set targeted power domain states by suspend */
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+ pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF);
+ }
+
+ /*
+ * For MPUSS to hit power domain retention(CSWR or OSWR),
+ * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
+ * since CPU power domain CSWR is not supported by hardware
+ * Only master CPU follows suspend path. All other CPUs follow
+ * CPU hotplug path in system wide suspend. On OMAP4, CPU power
+ * domain CSWR is not supported by hardware.
+ * More details can be found in OMAP4430 TRM section 4.3.4.2.
+ */
+ omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
+
+ /* Restore next powerdomain state */
+ list_for_each_entry(pwrst, &pwrst_list, node) {
+ state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
+ if (state > pwrst->next_state) {
+ pr_info("Powerdomain (%s) didn't enter "
+ "target state %d\n",
+ pwrst->pwrdm->name, pwrst->next_state);
+ ret = -1;
+ }
+ omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
+ pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
+ }
+ if (ret)
+ pr_crit("Could not enter target state in pm_suspend\n");
+ else
+ pr_info("Successfully put all powerdomains to target state\n");
+
return 0;
}
@@ -73,6 +120,22 @@ static const struct platform_suspend_ops omap_pm_ops = {
};
#endif /* CONFIG_SUSPEND */
+/*
+ * Enable hardware supervised mode for all clockdomains if it's
+ * supported. Initiate sleep transition for other clockdomains, if
+ * they are not used
+ */
+static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
+{
+ if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
+ clkdm_allow_idle(clkdm);
+ else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
+ atomic_read(&clkdm->usecount) == 0)
+ clkdm_sleep(clkdm);
+ return 0;
+}
+
+
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
{
struct power_state *pwrst;
@@ -80,14 +143,48 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
if (!pwrdm->pwrsts)
return 0;
+ /*
+ * Skip CPU0 and CPU1 power domains. CPU1 is programmed
+ * through hotplug path and CPU0 explicitly programmed
+ * further down in the code path
+ */
+ if (!strncmp(pwrdm->name, "cpu", 3))
+ return 0;
+
+ /*
+ * FIXME: Remove this check when core retention is supported
+ * Only MPUSS power domain is added in the list.
+ */
+ if (strcmp(pwrdm->name, "mpu_pwrdm"))
+ return 0;
+
pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
if (!pwrst)
return -ENOMEM;
+
pwrst->pwrdm = pwrdm;
- pwrst->next_state = PWRDM_POWER_ON;
+ pwrst->next_state = PWRDM_POWER_RET;
list_add(&pwrst->node, &pwrst_list);
- return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state);
+ return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+}
+
+/**
+ * omap_default_idle - OMAP4 default ilde routine.'
+ *
+ * Implements OMAP4 memory, IO ordering requirements which can't be addressed
+ * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
+ * by secondary CPU with CONFIG_CPUIDLE.
+ */
+static void omap_default_idle(void)
+{
+ local_irq_disable();
+ local_fiq_disable();
+
+ omap_do_wfi();
+
+ local_fiq_enable();
+ local_irq_enable();
}
/**
@@ -99,10 +196,17 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
static int __init omap4_pm_init(void)
{
int ret;
+ struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
+ struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
if (!cpu_is_omap44xx())
return -ENODEV;
+ if (omap_rev() == OMAP4430_REV_ES1_0) {
+ WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+ return -ENODEV;
+ }
+
pr_err("Power Management for TI OMAP4.\n");
ret = pwrdm_for_each(pwrdms_setup, NULL);
@@ -111,10 +215,51 @@ static int __init omap4_pm_init(void)
goto err2;
}
+ /*
+ * The dynamic dependency between MPUSS -> MEMIF and
+ * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
+ * expected. The hardware recommendation is to enable static
+ * dependencies for these to avoid system lock ups or random crashes.
+ */
+ mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
+ emif_clkdm = clkdm_lookup("l3_emif_clkdm");
+ l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
+ l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
+ l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
+ ducati_clkdm = clkdm_lookup("ducati_clkdm");
+ if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
+ (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
+ goto err2;
+
+ ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
+ ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
+ ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
+ if (ret) {
+ pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
+ "wakeup dependency\n");
+ goto err2;
+ }
+
+ ret = omap4_mpuss_init();
+ if (ret) {
+ pr_err("Failed to initialise OMAP4 MPUSS\n");
+ goto err2;
+ }
+
+ (void) clkdm_for_each(clkdms_setup, NULL);
+
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif /* CONFIG_SUSPEND */
+ /* Overwrite the default arch_idle() */
+ pm_idle = omap_default_idle;
+
+ omap4_idle_init();
+
err2:
return ret;
}
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 0363dcb0ef9..5aa5435e3ff 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -4,7 +4,7 @@
/*
* OMAP2/3 PRCM base and module definitions
*
- * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
* Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
@@ -201,6 +201,8 @@
#define OMAP3430_EN_MMC2_SHIFT 25
#define OMAP3430_EN_MMC1_MASK (1 << 24)
#define OMAP3430_EN_MMC1_SHIFT 24
+#define OMAP3430_EN_UART4_MASK (1 << 23)
+#define OMAP3430_EN_UART4_SHIFT 23
#define OMAP3430_EN_MCSPI4_MASK (1 << 21)
#define OMAP3430_EN_MCSPI4_SHIFT 21
#define OMAP3430_EN_MCSPI3_MASK (1 << 20)
@@ -408,6 +410,79 @@
extern void __iomem *prm_base;
extern void __iomem *cm_base;
extern void __iomem *cm2_base;
+
+/**
+ * struct omap_prcm_irq - describes a PRCM interrupt bit
+ * @name: a short name describing the interrupt type, e.g. "wkup" or "io"
+ * @offset: the bit shift of the interrupt inside the IRQ{ENABLE,STATUS} regs
+ * @priority: should this interrupt be handled before @priority=false IRQs?
+ *
+ * Describes interrupt bits inside the PRM_IRQ{ENABLE,STATUS}_MPU* registers.
+ * On systems with multiple PRM MPU IRQ registers, the bitfields read from
+ * the registers are concatenated, so @offset could be > 31 on these systems -
+ * see omap_prm_irq_handler() for more details. I/O ring interrupts should
+ * have @priority set to true.
+ */
+struct omap_prcm_irq {
+ const char *name;
+ unsigned int offset;
+ bool priority;
+};
+
+/**
+ * struct omap_prcm_irq_setup - PRCM interrupt controller details
+ * @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register
+ * @mask: PRM register offset for the first PRM_IRQENABLE_MPU register
+ * @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers
+ * @nr_irqs: number of entries in the @irqs array
+ * @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs)
+ * @irq: MPU IRQ asserted when a PRCM interrupt arrives
+ * @read_pending_irqs: fn ptr to determine if any PRCM IRQs are pending
+ * @ocp_barrier: fn ptr to force buffered PRM writes to complete
+ * @save_and_clear_irqen: fn ptr to save and clear IRQENABLE regs
+ * @restore_irqen: fn ptr to save and clear IRQENABLE regs
+ * @saved_mask: IRQENABLE regs are saved here during suspend
+ * @priority_mask: 1 bit per IRQ, set to 1 if omap_prcm_irq.priority = true
+ * @base_irq: base dynamic IRQ number, returned from irq_alloc_descs() in init
+ * @suspended: set to true after Linux suspend code has called our ->prepare()
+ * @suspend_save_flag: set to true after IRQ masks have been saved and disabled
+ *
+ * @saved_mask, @priority_mask, @base_irq, @suspended, and
+ * @suspend_save_flag are populated dynamically, and are not to be
+ * specified in static initializers.
+ */
+struct omap_prcm_irq_setup {
+ u16 ack;
+ u16 mask;
+ u8 nr_regs;
+ u8 nr_irqs;
+ const struct omap_prcm_irq *irqs;
+ int irq;
+ void (*read_pending_irqs)(unsigned long *events);
+ void (*ocp_barrier)(void);
+ void (*save_and_clear_irqen)(u32 *saved_mask);
+ void (*restore_irqen)(u32 *saved_mask);
+ u32 *saved_mask;
+ u32 *priority_mask;
+ int base_irq;
+ bool suspended;
+ bool suspend_save_flag;
+};
+
+/* OMAP_PRCM_IRQ: convenience macro for creating struct omap_prcm_irq records */
+#define OMAP_PRCM_IRQ(_name, _offset, _priority) { \
+ .name = _name, \
+ .offset = _offset, \
+ .priority = _priority \
+ }
+
+extern void omap_prcm_irq_cleanup(void);
+extern int omap_prcm_register_chain_handler(
+ struct omap_prcm_irq_setup *irq_setup);
+extern int omap_prcm_event_to_irq(const char *event);
+extern void omap_prcm_irq_prepare(void);
+extern void omap_prcm_irq_complete(void);
+
# endif
#endif
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 597e2da831b..626acfad719 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -25,8 +25,7 @@
#include <linux/delay.h>
#include <linux/export.h>
-#include <mach/system.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/prcm.h>
#include <plat/irqs.h>
@@ -59,7 +58,7 @@ u32 omap_prcm_get_reset_sources(void)
EXPORT_SYMBOL(omap_prcm_get_reset_sources);
/* Resets clock rates and reboots the system. Only called from system.h */
-static void omap_prcm_arch_reset(char mode, const char *cmd)
+void omap_prcm_restart(char mode, const char *cmd)
{
s16 prcm_offs = 0;
@@ -110,8 +109,6 @@ static void omap_prcm_arch_reset(char mode, const char *cmd)
omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */
}
-void (*arch_reset)(char, const char *) = omap_prcm_arch_reset;
-
/**
* omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
* @reg: physical address of module IDLEST register
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.c b/arch/arm/mach-omap2/prcm_mpu44xx.c
index 171fe171a74..ca669b50f39 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.c
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.c
@@ -15,7 +15,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include "prcm_mpu44xx.h"
#include "cm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index f02d87f68e5..c1c4d86a79a 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -1,7 +1,7 @@
/*
* OMAP2/3 PRM module functions
*
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
* Copyright (C) 2010 Nokia Corporation
* BenoĂŽt Cousson
* Paul Walmsley
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/cpu.h>
#include <plat/prcm.h>
@@ -27,6 +27,24 @@
#include "prm-regbits-24xx.h"
#include "prm-regbits-34xx.h"
+static const struct omap_prcm_irq omap3_prcm_irqs[] = {
+ OMAP_PRCM_IRQ("wkup", 0, 0),
+ OMAP_PRCM_IRQ("io", 9, 1),
+};
+
+static struct omap_prcm_irq_setup omap3_prcm_irq_setup = {
+ .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET,
+ .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET,
+ .nr_regs = 1,
+ .irqs = omap3_prcm_irqs,
+ .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs),
+ .irq = INT_34XX_PRCM_MPU_IRQ,
+ .read_pending_irqs = &omap3xxx_prm_read_pending_irqs,
+ .ocp_barrier = &omap3xxx_prm_ocp_barrier,
+ .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen,
+ .restore_irqen = &omap3xxx_prm_restore_irqen,
+};
+
u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
{
return __raw_readl(prm_base + module + idx);
@@ -212,3 +230,80 @@ u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
{
return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset);
}
+
+/**
+ * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
+ * @events: ptr to a u32, preallocated by caller
+ *
+ * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM
+ * MPU IRQs, and store the result into the u32 pointed to by @events.
+ * No return value.
+ */
+void omap3xxx_prm_read_pending_irqs(unsigned long *events)
+{
+ u32 mask, st;
+
+ /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */
+ mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+ st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+
+ events[0] = mask & st;
+}
+
+/**
+ * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
+ *
+ * Force any buffered writes to the PRM IP block to complete. Needed
+ * by the PRM IRQ handler, which reads and writes directly to the IP
+ * block, to avoid race conditions after acknowledging or clearing IRQ
+ * bits. No return value.
+ */
+void omap3xxx_prm_ocp_barrier(void)
+{
+ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
+}
+
+/**
+ * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg
+ * @saved_mask: ptr to a u32 array to save IRQENABLE bits
+ *
+ * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask
+ * must be allocated by the caller. Intended to be used in the PRM
+ * interrupt handler suspend callback. The OCP barrier is needed to
+ * ensure the write to disable PRM interrupts reaches the PRM before
+ * returning; otherwise, spurious interrupts might occur. No return
+ * value.
+ */
+void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask)
+{
+ saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+ omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+
+ /* OCP barrier */
+ omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
+}
+
+/**
+ * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args
+ * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
+ *
+ * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended
+ * to be used in the PRM interrupt handler resume callback to restore
+ * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP
+ * barrier should be needed here; any pending PRM interrupts will fire
+ * once the writes reach the PRM. No return value.
+ */
+void omap3xxx_prm_restore_irqen(u32 *saved_mask)
+{
+ omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD,
+ OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+}
+
+static int __init omap3xxx_prcm_init(void)
+{
+ if (cpu_is_omap34xx())
+ return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
+ return 0;
+}
+subsys_initcall(omap3xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index cef533df086..70ac2a19dc5 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -1,7 +1,7 @@
/*
* OMAP2/3 Power/Reset Management (PRM) register definitions
*
- * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
* Copyright (C) 2008-2010 Nokia Corporation
* Paul Walmsley
*
@@ -314,6 +314,13 @@ void omap3_prm_vp_clear_txdone(u8 vp_id);
extern u32 omap3_prm_vcvp_read(u8 offset);
extern void omap3_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
+
+/* PRM interrupt-related functions */
+extern void omap3xxx_prm_read_pending_irqs(unsigned long *events);
+extern void omap3xxx_prm_ocp_barrier(void);
+extern void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask);
+extern void omap3xxx_prm_restore_irqen(u32 *saved_mask);
+
#endif /* CONFIG_ARCH_OMAP4 */
#endif
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 495a31a7e8a..33dd655e6aa 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/cpu.h>
#include <plat/prcm.h>
@@ -27,6 +27,24 @@
#include "prcm44xx.h"
#include "prminst44xx.h"
+static const struct omap_prcm_irq omap4_prcm_irqs[] = {
+ OMAP_PRCM_IRQ("wkup", 0, 0),
+ OMAP_PRCM_IRQ("io", 9, 1),
+};
+
+static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
+ .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
+ .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET,
+ .nr_regs = 2,
+ .irqs = omap4_prcm_irqs,
+ .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
+ .irq = OMAP44XX_IRQ_PRCM,
+ .read_pending_irqs = &omap44xx_prm_read_pending_irqs,
+ .ocp_barrier = &omap44xx_prm_ocp_barrier,
+ .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
+ .restore_irqen = &omap44xx_prm_restore_irqen,
+};
+
/* PRM low-level functions */
/* Read a register in a CM/PRM instance in the PRM module */
@@ -121,3 +139,101 @@ u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
OMAP4430_PRM_DEVICE_INST,
offset);
}
+
+static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs)
+{
+ u32 mask, st;
+
+ /* XXX read mask from RAM? */
+ mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs);
+ st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs);
+
+ return mask & st;
+}
+
+/**
+ * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
+ * @events: ptr to two consecutive u32s, preallocated by caller
+ *
+ * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM
+ * MPU IRQs, and store the result into the two u32s pointed to by @events.
+ * No return value.
+ */
+void omap44xx_prm_read_pending_irqs(unsigned long *events)
+{
+ events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET,
+ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+
+ events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET,
+ OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+}
+
+/**
+ * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
+ *
+ * Force any buffered writes to the PRM IP block to complete. Needed
+ * by the PRM IRQ handler, which reads and writes directly to the IP
+ * block, to avoid race conditions after acknowledging or clearing IRQ
+ * bits. No return value.
+ */
+void omap44xx_prm_ocp_barrier(void)
+{
+ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_REVISION_PRM_OFFSET);
+}
+
+/**
+ * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs
+ * @saved_mask: ptr to a u32 array to save IRQENABLE bits
+ *
+ * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to
+ * @saved_mask. @saved_mask must be allocated by the caller.
+ * Intended to be used in the PRM interrupt handler suspend callback.
+ * The OCP barrier is needed to ensure the write to disable PRM
+ * interrupts reaches the PRM before returning; otherwise, spurious
+ * interrupts might occur. No return value.
+ */
+void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
+{
+ saved_mask[0] =
+ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+ saved_mask[1] =
+ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+
+ omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
+ omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+
+ /* OCP barrier */
+ omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_REVISION_PRM_OFFSET);
+}
+
+/**
+ * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args
+ * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
+ *
+ * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from
+ * @saved_mask. Intended to be used in the PRM interrupt handler resume
+ * callback to restore values saved by omap44xx_prm_save_and_clear_irqen().
+ * No OCP barrier should be needed here; any pending PRM interrupts will fire
+ * once the writes reach the PRM. No return value.
+ */
+void omap44xx_prm_restore_irqen(u32 *saved_mask)
+{
+ omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQENABLE_MPU_OFFSET);
+ omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+}
+
+static int __init omap4xxx_prcm_init(void)
+{
+ if (cpu_is_omap44xx())
+ return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
+ return 0;
+}
+subsys_initcall(omap4xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 3d66ccd849d..7978092946d 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -1,7 +1,7 @@
/*
* OMAP44xx PRM instance offset macros
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
@@ -763,6 +763,12 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
+/* PRM interrupt-related functions */
+extern void omap44xx_prm_read_pending_irqs(unsigned long *events);
+extern void omap44xx_prm_ocp_barrier(void);
+extern void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask);
+extern void omap44xx_prm_restore_irqen(u32 *saved_mask);
+
# endif
#endif
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
new file mode 100644
index 00000000000..860118ab43e
--- /dev/null
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -0,0 +1,320 @@
+/*
+ * OMAP2+ common Power & Reset Management (PRM) IP block functions
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * For historical purposes, the API used to configure the PRM
+ * interrupt handler refers to it as the "PRCM interrupt." The
+ * underlying registers are located in the PRM on OMAP3/4.
+ *
+ * XXX This code should eventually be moved to a PRM driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <mach/system.h>
+#include <plat/common.h>
+#include <plat/prcm.h>
+#include <plat/irqs.h>
+
+#include "prm2xxx_3xxx.h"
+#include "prm44xx.h"
+
+/*
+ * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
+ * XXX this is technically not needed, since
+ * omap_prcm_register_chain_handler() could allocate this based on the
+ * actual amount of memory needed for the SoC
+ */
+#define OMAP_PRCM_MAX_NR_PENDING_REG 2
+
+/*
+ * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
+ * by the PRCM interrupt handler code. There will be one 'chip' per
+ * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have
+ * one "chip" and OMAP4 will have two.)
+ */
+static struct irq_chip_generic **prcm_irq_chips;
+
+/*
+ * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
+ * is currently running on. Defined and passed by initialization code
+ * that calls omap_prcm_register_chain_handler().
+ */
+static struct omap_prcm_irq_setup *prcm_irq_setup;
+
+/* Private functions */
+
+/*
+ * Move priority events from events to priority_events array
+ */
+static void omap_prcm_events_filter_priority(unsigned long *events,
+ unsigned long *priority_events)
+{
+ int i;
+
+ for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
+ priority_events[i] =
+ events[i] & prcm_irq_setup->priority_mask[i];
+ events[i] ^= priority_events[i];
+ }
+}
+
+/*
+ * PRCM Interrupt Handler
+ *
+ * This is a common handler for the OMAP PRCM interrupts. Pending
+ * interrupts are detected by a call to prcm_pending_events and
+ * dispatched accordingly. Clearing of the wakeup events should be
+ * done by the SoC specific individual handlers.
+ */
+static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
+ unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int virtirq;
+ int nr_irqs = prcm_irq_setup->nr_regs * 32;
+
+ /*
+ * If we are suspended, mask all interrupts from PRCM level,
+ * this does not ack them, and they will be pending until we
+ * re-enable the interrupts, at which point the
+ * omap_prcm_irq_handler will be executed again. The
+ * _save_and_clear_irqen() function must ensure that the PRM
+ * write to disable all IRQs has reached the PRM before
+ * returning, or spurious PRCM interrupts may occur during
+ * suspend.
+ */
+ if (prcm_irq_setup->suspended) {
+ prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
+ prcm_irq_setup->suspend_save_flag = true;
+ }
+
+ /*
+ * Loop until all pending irqs are handled, since
+ * generic_handle_irq() can cause new irqs to come
+ */
+ while (!prcm_irq_setup->suspended) {
+ prcm_irq_setup->read_pending_irqs(pending);
+
+ /* No bit set, then all IRQs are handled */
+ if (find_first_bit(pending, nr_irqs) >= nr_irqs)
+ break;
+
+ omap_prcm_events_filter_priority(pending, priority_pending);
+
+ /*
+ * Loop on all currently pending irqs so that new irqs
+ * cannot starve previously pending irqs
+ */
+
+ /* Serve priority events first */
+ for_each_set_bit(virtirq, priority_pending, nr_irqs)
+ generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
+
+ /* Serve normal events next */
+ for_each_set_bit(virtirq, pending, nr_irqs)
+ generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
+ }
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ chip->irq_unmask(&desc->irq_data);
+
+ prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
+}
+
+/* Public functions */
+
+/**
+ * omap_prcm_event_to_irq - given a PRCM event name, returns the
+ * corresponding IRQ on which the handler should be registered
+ * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
+ *
+ * Returns the Linux internal IRQ ID corresponding to @name upon success,
+ * or -ENOENT upon failure.
+ */
+int omap_prcm_event_to_irq(const char *name)
+{
+ int i;
+
+ if (!prcm_irq_setup || !name)
+ return -ENOENT;
+
+ for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
+ if (!strcmp(prcm_irq_setup->irqs[i].name, name))
+ return prcm_irq_setup->base_irq +
+ prcm_irq_setup->irqs[i].offset;
+
+ return -ENOENT;
+}
+
+/**
+ * omap_prcm_irq_cleanup - reverses memory allocated and other steps
+ * done by omap_prcm_register_chain_handler()
+ *
+ * No return value.
+ */
+void omap_prcm_irq_cleanup(void)
+{
+ int i;
+
+ if (!prcm_irq_setup) {
+ pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
+ return;
+ }
+
+ if (prcm_irq_chips) {
+ for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
+ if (prcm_irq_chips[i])
+ irq_remove_generic_chip(prcm_irq_chips[i],
+ 0xffffffff, 0, 0);
+ prcm_irq_chips[i] = NULL;
+ }
+ kfree(prcm_irq_chips);
+ prcm_irq_chips = NULL;
+ }
+
+ kfree(prcm_irq_setup->saved_mask);
+ prcm_irq_setup->saved_mask = NULL;
+
+ kfree(prcm_irq_setup->priority_mask);
+ prcm_irq_setup->priority_mask = NULL;
+
+ irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+
+ if (prcm_irq_setup->base_irq > 0)
+ irq_free_descs(prcm_irq_setup->base_irq,
+ prcm_irq_setup->nr_regs * 32);
+ prcm_irq_setup->base_irq = 0;
+}
+
+void omap_prcm_irq_prepare(void)
+{
+ prcm_irq_setup->suspended = true;
+}
+
+void omap_prcm_irq_complete(void)
+{
+ prcm_irq_setup->suspended = false;
+
+ /* If we have not saved the masks, do not attempt to restore */
+ if (!prcm_irq_setup->suspend_save_flag)
+ return;
+
+ prcm_irq_setup->suspend_save_flag = false;
+
+ /*
+ * Re-enable all masked PRCM irq sources, this causes the PRCM
+ * interrupt to fire immediately if the events were masked
+ * previously in the chain handler
+ */
+ prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
+}
+
+/**
+ * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
+ * handler based on provided parameters
+ * @irq_setup: hardware data about the underlying PRM/PRCM
+ *
+ * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up
+ * one generic IRQ chip per PRM interrupt status/enable register pair.
+ * Returns 0 upon success, -EINVAL if called twice or if invalid
+ * arguments are passed, or -ENOMEM on any other error.
+ */
+int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
+{
+ int nr_regs = irq_setup->nr_regs;
+ u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
+ int offset, i;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ if (!irq_setup)
+ return -EINVAL;
+
+ if (prcm_irq_setup) {
+ pr_err("PRCM: already initialized; won't reinitialize\n");
+ return -EINVAL;
+ }
+
+ if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
+ pr_err("PRCM: nr_regs too large\n");
+ return -EINVAL;
+ }
+
+ prcm_irq_setup = irq_setup;
+
+ prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
+ prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
+ prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
+ GFP_KERNEL);
+
+ if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
+ !prcm_irq_setup->priority_mask) {
+ pr_err("PRCM: kzalloc failed\n");
+ goto err;
+ }
+
+ memset(mask, 0, sizeof(mask));
+
+ for (i = 0; i < irq_setup->nr_irqs; i++) {
+ offset = irq_setup->irqs[i].offset;
+ mask[offset >> 5] |= 1 << (offset & 0x1f);
+ if (irq_setup->irqs[i].priority)
+ irq_setup->priority_mask[offset >> 5] |=
+ 1 << (offset & 0x1f);
+ }
+
+ irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+
+ irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
+ 0);
+
+ if (irq_setup->base_irq < 0) {
+ pr_err("PRCM: failed to allocate irq descs: %d\n",
+ irq_setup->base_irq);
+ goto err;
+ }
+
+ for (i = 0; i <= irq_setup->nr_regs; i++) {
+ gc = irq_alloc_generic_chip("PRCM", 1,
+ irq_setup->base_irq + i * 32, prm_base,
+ handle_level_irq);
+
+ if (!gc) {
+ pr_err("PRCM: failed to allocate generic chip\n");
+ goto err;
+ }
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+
+ ct->regs.ack = irq_setup->ack + i * 4;
+ ct->regs.mask = irq_setup->mask + i * 4;
+
+ irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
+ prcm_irq_chips[i] = gc;
+ }
+
+ return 0;
+
+err:
+ omap_prcm_irq_cleanup();
+ return -ENOMEM;
+}
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 3a7bab16edd..f6de5bc6b12 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm44xx.h"
#include "prminst44xx.h"
diff --git a/arch/arm/mach-omap2/sdram-nokia.c b/arch/arm/mach-omap2/sdram-nokia.c
index 14caa228bc0..7479d7ea137 100644
--- a/arch/arm/mach-omap2/sdram-nokia.c
+++ b/arch/arm/mach-omap2/sdram-nokia.c
@@ -1,7 +1,7 @@
/*
* SDRC register values for Nokia boards
*
- * Copyright (C) 2008, 2010 Nokia Corporation
+ * Copyright (C) 2008, 2010-2011 Nokia Corporation
*
* Lauri Leukkunen <lauri.leukkunen@nokia.com>
*
@@ -18,7 +18,7 @@
#include <linux/io.h>
#include <plat/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/clock.h>
#include <plat/sdrc.h>
@@ -107,14 +107,37 @@ static const struct sdram_timings nokia_195dot2mhz_timings[] = {
},
};
+static const struct sdram_timings nokia_200mhz_timings[] = {
+ {
+ .casl = 3,
+ .tDAL = 30000,
+ .tDPL = 15000,
+ .tRRD = 10000,
+ .tRCD = 20000,
+ .tRP = 15000,
+ .tRAS = 40000,
+ .tRC = 55000,
+ .tRFC = 140000,
+ .tXSR = 200000,
+
+ .tREF = 7800,
+
+ .tXP = 2,
+ .tCKE = 4,
+ .tWTR = 2
+ },
+};
+
static const struct {
long rate;
struct sdram_timings const *data;
} nokia_timings[] = {
{ 83000000, nokia_166mhz_timings },
{ 97600000, nokia_97dot6mhz_timings },
+ { 100000000, nokia_200mhz_timings },
{ 166000000, nokia_166mhz_timings },
{ 195200000, nokia_195dot2mhz_timings },
+ { 200000000, nokia_200mhz_timings },
};
static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1];
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 8f278287477..e3d345f4640 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -23,7 +23,7 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/clock.h>
#include <plat/sram.h>
diff --git a/arch/arm/mach-omap2/sdrc2xxx.c b/arch/arm/mach-omap2/sdrc2xxx.c
index ccdb010f169..791a63cdceb 100644
--- a/arch/arm/mach-omap2/sdrc2xxx.c
+++ b/arch/arm/mach-omap2/sdrc2xxx.c
@@ -24,7 +24,7 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/clock.h>
#include <plat/sram.h>
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 9992dbfdfdb..247d89478f2 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -19,26 +19,21 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/serial_reg.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/serial_8250.h>
#include <linux/pm_runtime.h>
#include <linux/console.h>
-#ifdef CONFIG_SERIAL_OMAP
#include <plat/omap-serial.h>
-#endif
-
-#include <plat/common.h>
+#include "common.h"
#include <plat/board.h>
-#include <plat/clock.h>
#include <plat/dma.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
+#include <plat/omap-pm.h>
#include "prm2xxx_3xxx.h"
#include "pm.h"
@@ -47,603 +42,226 @@
#include "control.h"
#include "mux.h"
-#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
-#define UART_OMAP_WER 0x17 /* Wake-up enable register */
-
-#define UART_ERRATA_FIFO_FULL_ABORT (0x1 << 0)
-#define UART_ERRATA_i202_MDR1_ACCESS (0x1 << 1)
-
/*
- * NOTE: By default the serial timeout is disabled as it causes lost characters
- * over the serial ports. This means that the UART clocks will stay on until
- * disabled via sysfs. This also causes that any deeper omap sleep states are
- * blocked.
+ * NOTE: By default the serial auto_suspend timeout is disabled as it causes
+ * lost characters over the serial ports. This means that the UART clocks will
+ * stay on until power/autosuspend_delay is set for the uart from sysfs.
+ * This also causes that any deeper omap sleep states are blocked.
*/
-#define DEFAULT_TIMEOUT 0
+#define DEFAULT_AUTOSUSPEND_DELAY -1
#define MAX_UART_HWMOD_NAME_LEN 16
struct omap_uart_state {
int num;
int can_sleep;
- struct timer_list timer;
- u32 timeout;
-
- void __iomem *wk_st;
- void __iomem *wk_en;
- u32 wk_mask;
- u32 padconf;
- u32 dma_enabled;
-
- struct clk *ick;
- struct clk *fck;
- int clocked;
-
- int irq;
- int regshift;
- int irqflags;
- void __iomem *membase;
- resource_size_t mapbase;
struct list_head node;
struct omap_hwmod *oh;
struct platform_device *pdev;
-
- u32 errata;
-#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
- int context_valid;
-
- /* Registers to be saved/restored for OFF-mode */
- u16 dll;
- u16 dlh;
- u16 ier;
- u16 sysc;
- u16 scr;
- u16 wer;
- u16 mcr;
-#endif
};
static LIST_HEAD(uart_list);
static u8 num_uarts;
+static u8 console_uart_id = -1;
+static u8 no_console_suspend;
+static u8 uart_debug;
+
+#define DEFAULT_RXDMA_POLLRATE 1 /* RX DMA polling rate (us) */
+#define DEFAULT_RXDMA_BUFSIZE 4096 /* RX DMA buffer size */
+#define DEFAULT_RXDMA_TIMEOUT (3 * HZ)/* RX DMA timeout (jiffies) */
+
+static struct omap_uart_port_info omap_serial_default_info[] __initdata = {
+ {
+ .dma_enabled = false,
+ .dma_rx_buf_size = DEFAULT_RXDMA_BUFSIZE,
+ .dma_rx_poll_rate = DEFAULT_RXDMA_POLLRATE,
+ .dma_rx_timeout = DEFAULT_RXDMA_TIMEOUT,
+ .autosuspend_timeout = DEFAULT_AUTOSUSPEND_DELAY,
+ },
+};
-static inline unsigned int __serial_read_reg(struct uart_port *up,
- int offset)
-{
- offset <<= up->regshift;
- return (unsigned int)__raw_readb(up->membase + offset);
-}
-
-static inline unsigned int serial_read_reg(struct omap_uart_state *uart,
- int offset)
+#ifdef CONFIG_PM
+static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
{
- offset <<= uart->regshift;
- return (unsigned int)__raw_readb(uart->membase + offset);
-}
+ struct omap_device *od = to_omap_device(pdev);
-static inline void __serial_write_reg(struct uart_port *up, int offset,
- int value)
-{
- offset <<= up->regshift;
- __raw_writeb(value, up->membase + offset);
-}
+ if (!od)
+ return;
-static inline void serial_write_reg(struct omap_uart_state *uart, int offset,
- int value)
-{
- offset <<= uart->regshift;
- __raw_writeb(value, uart->membase + offset);
+ if (enable)
+ omap_hwmod_enable_wakeup(od->hwmods[0]);
+ else
+ omap_hwmod_disable_wakeup(od->hwmods[0]);
}
/*
- * Internal UARTs need to be initialized for the 8250 autoconfig to work
- * properly. Note that the TX watermark initialization may not be needed
- * once the 8250.c watermark handling code is merged.
+ * Errata i291: [UART]:Cannot Acknowledge Idle Requests
+ * in Smartidle Mode When Configured for DMA Operations.
+ * WA: configure uart in force idle mode.
*/
-
-static inline void __init omap_uart_reset(struct omap_uart_state *uart)
+static void omap_uart_set_noidle(struct platform_device *pdev)
{
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
- serial_write_reg(uart, UART_OMAP_SCR, 0x08);
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
-}
-
-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+ struct omap_device *od = to_omap_device(pdev);
-/*
- * Work Around for Errata i202 (3430 - 1.12, 3630 - 1.6)
- * The access to uart register after MDR1 Access
- * causes UART to corrupt data.
- *
- * Need a delay =
- * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
- * give 10 times as much
- */
-static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val,
- u8 fcr_val)
-{
- u8 timeout = 255;
-
- serial_write_reg(uart, UART_OMAP_MDR1, mdr1_val);
- udelay(2);
- serial_write_reg(uart, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT |
- UART_FCR_CLEAR_RCVR);
- /*
- * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
- * TX_FIFO_E bit is 1.
- */
- while (UART_LSR_THRE != (serial_read_reg(uart, UART_LSR) &
- (UART_LSR_THRE | UART_LSR_DR))) {
- timeout--;
- if (!timeout) {
- /* Should *never* happen. we warn and carry on */
- dev_crit(&uart->pdev->dev, "Errata i202: timedout %x\n",
- serial_read_reg(uart, UART_LSR));
- break;
- }
- udelay(1);
- }
+ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
}
-static void omap_uart_save_context(struct omap_uart_state *uart)
+static void omap_uart_set_forceidle(struct platform_device *pdev)
{
- u16 lcr = 0;
+ struct omap_device *od = to_omap_device(pdev);
- if (!enable_off_mode)
- return;
-
- lcr = serial_read_reg(uart, UART_LCR);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- uart->dll = serial_read_reg(uart, UART_DLL);
- uart->dlh = serial_read_reg(uart, UART_DLM);
- serial_write_reg(uart, UART_LCR, lcr);
- uart->ier = serial_read_reg(uart, UART_IER);
- uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
- uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
- uart->wer = serial_read_reg(uart, UART_OMAP_WER);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
- uart->mcr = serial_read_reg(uart, UART_MCR);
- serial_write_reg(uart, UART_LCR, lcr);
-
- uart->context_valid = 1;
+ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE);
}
-static void omap_uart_restore_context(struct omap_uart_state *uart)
-{
- u16 efr = 0;
-
- if (!enable_off_mode)
- return;
-
- if (!uart->context_valid)
- return;
-
- uart->context_valid = 0;
-
- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0);
- else
- serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
-
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- efr = serial_read_reg(uart, UART_EFR);
- serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(uart, UART_IER, 0x0);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_write_reg(uart, UART_DLL, uart->dll);
- serial_write_reg(uart, UART_DLM, uart->dlh);
- serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
- serial_write_reg(uart, UART_IER, uart->ier);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_write_reg(uart, UART_MCR, uart->mcr);
- serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_write_reg(uart, UART_EFR, efr);
- serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
- serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
- serial_write_reg(uart, UART_OMAP_WER, uart->wer);
- serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
-
- if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
- omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1);
- else
- /* UART 16x mode */
- serial_write_reg(uart, UART_OMAP_MDR1,
- UART_OMAP_MDR1_16X_MODE);
-}
#else
-static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
-static inline void omap_uart_restore_context(struct omap_uart_state *uart) {}
-#endif /* CONFIG_PM && CONFIG_ARCH_OMAP3 */
-
-static inline void omap_uart_enable_clocks(struct omap_uart_state *uart)
-{
- if (uart->clocked)
- return;
-
- omap_device_enable(uart->pdev);
- uart->clocked = 1;
- omap_uart_restore_context(uart);
-}
-
-#ifdef CONFIG_PM
-
-static inline void omap_uart_disable_clocks(struct omap_uart_state *uart)
-{
- if (!uart->clocked)
- return;
-
- omap_uart_save_context(uart);
- uart->clocked = 0;
- omap_device_idle(uart->pdev);
-}
-
-static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
-{
- /* Set wake-enable bit */
- if (uart->wk_en && uart->wk_mask) {
- u32 v = __raw_readl(uart->wk_en);
- v |= uart->wk_mask;
- __raw_writel(v, uart->wk_en);
- }
-
- /* Ensure IOPAD wake-enables are set */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 v = omap_ctrl_readw(uart->padconf);
- v |= OMAP3_PADCONF_WAKEUPENABLE0;
- omap_ctrl_writew(v, uart->padconf);
- }
-}
-
-static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
-{
- /* Clear wake-enable bit */
- if (uart->wk_en && uart->wk_mask) {
- u32 v = __raw_readl(uart->wk_en);
- v &= ~uart->wk_mask;
- __raw_writel(v, uart->wk_en);
- }
-
- /* Ensure IOPAD wake-enables are cleared */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 v = omap_ctrl_readw(uart->padconf);
- v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
- omap_ctrl_writew(v, uart->padconf);
- }
-}
-
-static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
- int enable)
-{
- u8 idlemode;
-
- if (enable) {
- /**
- * Errata 2.15: [UART]:Cannot Acknowledge Idle Requests
- * in Smartidle Mode When Configured for DMA Operations.
- */
- if (uart->dma_enabled)
- idlemode = HWMOD_IDLEMODE_FORCE;
- else
- idlemode = HWMOD_IDLEMODE_SMART;
- } else {
- idlemode = HWMOD_IDLEMODE_NO;
- }
-
- omap_hwmod_set_slave_idlemode(uart->oh, idlemode);
-}
-
-static void omap_uart_block_sleep(struct omap_uart_state *uart)
-{
- omap_uart_enable_clocks(uart);
-
- omap_uart_smart_idle_enable(uart, 0);
- uart->can_sleep = 0;
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- else
- del_timer(&uart->timer);
-}
-
-static void omap_uart_allow_sleep(struct omap_uart_state *uart)
-{
- if (device_may_wakeup(&uart->pdev->dev))
- omap_uart_enable_wakeup(uart);
- else
- omap_uart_disable_wakeup(uart);
-
- if (!uart->clocked)
- return;
-
- omap_uart_smart_idle_enable(uart, 1);
- uart->can_sleep = 1;
- del_timer(&uart->timer);
-}
-
-static void omap_uart_idle_timer(unsigned long data)
-{
- struct omap_uart_state *uart = (struct omap_uart_state *)data;
-
- omap_uart_allow_sleep(uart);
-}
-
-void omap_uart_prepare_idle(int num)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (num == uart->num && uart->can_sleep) {
- omap_uart_disable_clocks(uart);
- return;
- }
- }
-}
-
-void omap_uart_resume_idle(int num)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (num == uart->num && uart->can_sleep) {
- omap_uart_enable_clocks(uart);
-
- /* Check for IO pad wakeup */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 p = omap_ctrl_readw(uart->padconf);
-
- if (p & OMAP3_PADCONF_WAKEUPEVENT0)
- omap_uart_block_sleep(uart);
- }
-
- /* Check for normal UART wakeup */
- if (__raw_readl(uart->wk_st) & uart->wk_mask)
- omap_uart_block_sleep(uart);
- return;
- }
- }
-}
-
-void omap_uart_prepare_suspend(void)
-{
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- omap_uart_allow_sleep(uart);
- }
-}
-
-int omap_uart_can_sleep(void)
-{
- struct omap_uart_state *uart;
- int can_sleep = 1;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (!uart->clocked)
- continue;
-
- if (!uart->can_sleep) {
- can_sleep = 0;
- continue;
- }
-
- /* This UART can now safely sleep. */
- omap_uart_allow_sleep(uart);
- }
-
- return can_sleep;
-}
+static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
+{}
+static void omap_uart_set_noidle(struct platform_device *pdev) {}
+static void omap_uart_set_forceidle(struct platform_device *pdev) {}
+#endif /* CONFIG_PM */
-/**
- * omap_uart_interrupt()
- *
- * This handler is used only to detect that *any* UART interrupt has
- * occurred. It does _nothing_ to handle the interrupt. Rather,
- * any UART interrupt will trigger the inactivity timer so the
- * UART will not idle or sleep for its timeout period.
- *
- **/
-/* static int first_interrupt; */
-static irqreturn_t omap_uart_interrupt(int irq, void *dev_id)
-{
- struct omap_uart_state *uart = dev_id;
+#ifdef CONFIG_OMAP_MUX
+static struct omap_device_pad default_uart1_pads[] __initdata = {
+ {
+ .name = "uart1_cts.uart1_cts",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_rts.uart1_rts",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_tx.uart1_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart1_rx.uart1_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+};
- omap_uart_block_sleep(uart);
+static struct omap_device_pad default_uart2_pads[] __initdata = {
+ {
+ .name = "uart2_cts.uart2_cts",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_rts.uart2_rts",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_tx.uart2_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart2_rx.uart2_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+};
- return IRQ_NONE;
-}
+static struct omap_device_pad default_uart3_pads[] __initdata = {
+ {
+ .name = "uart3_cts_rctx.uart3_cts_rctx",
+ .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_rts_sd.uart3_rts_sd",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_tx_irtx.uart3_tx_irtx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart3_rx_irrx.uart3_rx_irrx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ },
+};
-static void omap_uart_idle_init(struct omap_uart_state *uart)
-{
- int ret;
-
- uart->can_sleep = 0;
- uart->timeout = DEFAULT_TIMEOUT;
- setup_timer(&uart->timer, omap_uart_idle_timer,
- (unsigned long) uart);
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- omap_uart_smart_idle_enable(uart, 0);
-
- if (cpu_is_omap34xx() && !cpu_is_ti816x()) {
- u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD;
- u32 wk_mask = 0;
- u32 padconf = 0;
-
- /* XXX These PRM accesses do not belong here */
- uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
- uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1);
- switch (uart->num) {
- case 0:
- wk_mask = OMAP3430_ST_UART1_MASK;
- padconf = 0x182;
- break;
- case 1:
- wk_mask = OMAP3430_ST_UART2_MASK;
- padconf = 0x17a;
- break;
- case 2:
- wk_mask = OMAP3430_ST_UART3_MASK;
- padconf = 0x19e;
- break;
- case 3:
- wk_mask = OMAP3630_ST_UART4_MASK;
- padconf = 0x0d2;
- break;
- }
- uart->wk_mask = wk_mask;
- uart->padconf = padconf;
- } else if (cpu_is_omap24xx()) {
- u32 wk_mask = 0;
- u32 wk_en = PM_WKEN1, wk_st = PM_WKST1;
-
- switch (uart->num) {
- case 0:
- wk_mask = OMAP24XX_ST_UART1_MASK;
- break;
- case 1:
- wk_mask = OMAP24XX_ST_UART2_MASK;
- break;
- case 2:
- wk_en = OMAP24XX_PM_WKEN2;
- wk_st = OMAP24XX_PM_WKST2;
- wk_mask = OMAP24XX_ST_UART3_MASK;
- break;
- }
- uart->wk_mask = wk_mask;
- if (cpu_is_omap2430()) {
- uart->wk_en = OMAP2430_PRM_REGADDR(CORE_MOD, wk_en);
- uart->wk_st = OMAP2430_PRM_REGADDR(CORE_MOD, wk_st);
- } else if (cpu_is_omap2420()) {
- uart->wk_en = OMAP2420_PRM_REGADDR(CORE_MOD, wk_en);
- uart->wk_st = OMAP2420_PRM_REGADDR(CORE_MOD, wk_st);
- }
- } else {
- uart->wk_en = NULL;
- uart->wk_st = NULL;
- uart->wk_mask = 0;
- uart->padconf = 0;
- }
+static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = {
+ {
+ .name = "gpmc_wait2.uart4_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "gpmc_wait3.uart4_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2,
+ },
+};
- uart->irqflags |= IRQF_SHARED;
- ret = request_threaded_irq(uart->irq, NULL, omap_uart_interrupt,
- IRQF_SHARED, "serial idle", (void *)uart);
- WARN_ON(ret);
-}
+static struct omap_device_pad default_omap4_uart4_pads[] __initdata = {
+ {
+ .name = "uart4_tx.uart4_tx",
+ .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0,
+ },
+ {
+ .name = "uart4_rx.uart4_rx",
+ .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+ .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0,
+ },
+};
-void omap_uart_enable_irqs(int enable)
+static void omap_serial_fill_default_pads(struct omap_board_data *bdata)
{
- int ret;
- struct omap_uart_state *uart;
-
- list_for_each_entry(uart, &uart_list, node) {
- if (enable) {
- pm_runtime_put_sync(&uart->pdev->dev);
- ret = request_threaded_irq(uart->irq, NULL,
- omap_uart_interrupt,
- IRQF_SHARED,
- "serial idle",
- (void *)uart);
- } else {
- pm_runtime_get_noresume(&uart->pdev->dev);
- free_irq(uart->irq, (void *)uart);
+ switch (bdata->id) {
+ case 0:
+ bdata->pads = default_uart1_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads);
+ break;
+ case 1:
+ bdata->pads = default_uart2_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads);
+ break;
+ case 2:
+ bdata->pads = default_uart3_pads;
+ bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads);
+ break;
+ case 3:
+ if (cpu_is_omap44xx()) {
+ bdata->pads = default_omap4_uart4_pads;
+ bdata->pads_cnt =
+ ARRAY_SIZE(default_omap4_uart4_pads);
+ } else if (cpu_is_omap3630()) {
+ bdata->pads = default_omap36xx_uart4_pads;
+ bdata->pads_cnt =
+ ARRAY_SIZE(default_omap36xx_uart4_pads);
}
+ break;
+ default:
+ break;
}
}
-
-static ssize_t sleep_timeout_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *odev = to_omap_device(pdev);
- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
-
- return sprintf(buf, "%u\n", uart->timeout / HZ);
-}
-
-static ssize_t sleep_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_device *odev = to_omap_device(pdev);
- struct omap_uart_state *uart = odev->hwmods[0]->dev_attr;
- unsigned int value;
-
- if (sscanf(buf, "%u", &value) != 1) {
- dev_err(dev, "sleep_timeout_store: Invalid value\n");
- return -EINVAL;
- }
-
- uart->timeout = value * HZ;
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- else
- /* A zero value means disable timeout feature */
- omap_uart_block_sleep(uart);
-
- return n;
-}
-
-static DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show,
- sleep_timeout_store);
-#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
#else
-static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
-static void omap_uart_block_sleep(struct omap_uart_state *uart)
-{
- /* Needed to enable UART clocks when built without CONFIG_PM */
- omap_uart_enable_clocks(uart);
-}
-#define DEV_CREATE_FILE(dev, attr)
-#endif /* CONFIG_PM */
-
-#ifndef CONFIG_SERIAL_OMAP
-/*
- * Override the default 8250 read handler: mem_serial_in()
- * Empty RX fifo read causes an abort on omap3630 and omap4
- * This function makes sure that an empty rx fifo is not read on these silicons
- * (OMAP1/2/3430 are not affected)
- */
-static unsigned int serial_in_override(struct uart_port *up, int offset)
-{
- if (UART_RX == offset) {
- unsigned int lsr;
- lsr = __serial_read_reg(up, UART_LSR);
- if (!(lsr & UART_LSR_DR))
- return -EPERM;
- }
-
- return __serial_read_reg(up, offset);
-}
+static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {}
+#endif
-static void serial_out_override(struct uart_port *up, int offset, int value)
+char *cmdline_find_option(char *str)
{
- unsigned int status, tmout = 10000;
+ extern char *saved_command_line;
- status = __serial_read_reg(up, UART_LSR);
- while (!(status & UART_LSR_THRE)) {
- /* Wait up to 10ms for the character(s) to be sent. */
- if (--tmout == 0)
- break;
- udelay(1);
- status = __serial_read_reg(up, UART_LSR);
- }
- __serial_write_reg(up, offset, value);
+ return strstr(saved_command_line, str);
}
-#endif
static int __init omap_serial_early_init(void)
{
- int i = 0;
-
do {
char oh_name[MAX_UART_HWMOD_NAME_LEN];
struct omap_hwmod *oh;
struct omap_uart_state *uart;
+ char uart_name[MAX_UART_HWMOD_NAME_LEN];
snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN,
- "uart%d", i + 1);
+ "uart%d", num_uarts + 1);
oh = omap_hwmod_lookup(oh_name);
if (!oh)
break;
@@ -653,21 +271,35 @@ static int __init omap_serial_early_init(void)
return -ENODEV;
uart->oh = oh;
- uart->num = i++;
+ uart->num = num_uarts++;
list_add_tail(&uart->node, &uart_list);
- num_uarts++;
-
- /*
- * NOTE: omap_hwmod_setup*() has not yet been called,
- * so no hwmod functions will work yet.
- */
-
- /*
- * During UART early init, device need to be probed
- * to determine SoC specific init before omap_device
- * is ready. Therefore, don't allow idle here
- */
- uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
+ snprintf(uart_name, MAX_UART_HWMOD_NAME_LEN,
+ "%s%d", OMAP_SERIAL_NAME, uart->num);
+
+ if (cmdline_find_option(uart_name)) {
+ console_uart_id = uart->num;
+
+ if (console_loglevel >= 10) {
+ uart_debug = true;
+ pr_info("%s used as console in debug mode"
+ " uart%d clocks will not be"
+ " gated", uart_name, uart->num);
+ }
+
+ if (cmdline_find_option("no_console_suspend"))
+ no_console_suspend = true;
+
+ /*
+ * omap-uart can be used for earlyprintk logs
+ * So if omap-uart is used as console then prevent
+ * uart reset and idle to get logs from omap-uart
+ * until uart console driver is available to take
+ * care for console messages.
+ * Idling or resetting omap-uart while printing logs
+ * early boot logs can stall the boot-up.
+ */
+ oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
+ }
} while (1);
return 0;
@@ -677,6 +309,7 @@ core_initcall(omap_serial_early_init);
/**
* omap_serial_init_port() - initialize single serial port
* @bdata: port specific board data pointer
+ * @info: platform specific data pointer
*
* This function initialies serial driver for given port only.
* Platforms can call this function instead of omap_serial_init()
@@ -685,7 +318,8 @@ core_initcall(omap_serial_early_init);
* Don't mix calls to omap_serial_init_port() and omap_serial_init(),
* use only one of the two.
*/
-void __init omap_serial_init_port(struct omap_board_data *bdata)
+void __init omap_serial_init_port(struct omap_board_data *bdata,
+ struct omap_uart_port_info *info)
{
struct omap_uart_state *uart;
struct omap_hwmod *oh;
@@ -693,15 +327,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
void *pdata = NULL;
u32 pdata_size = 0;
char *name;
-#ifndef CONFIG_SERIAL_OMAP
- struct plat_serial8250_port ports[2] = {
- {},
- {.flags = 0},
- };
- struct plat_serial8250_port *p = &ports[0];
-#else
struct omap_uart_port_info omap_up;
-#endif
if (WARN_ON(!bdata))
return;
@@ -713,66 +339,34 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
list_for_each_entry(uart, &uart_list, node)
if (bdata->id == uart->num)
break;
+ if (!info)
+ info = omap_serial_default_info;
oh = uart->oh;
- uart->dma_enabled = 0;
-#ifndef CONFIG_SERIAL_OMAP
- name = "serial8250";
-
- /*
- * !! 8250 driver does not use standard IORESOURCE* It
- * has it's own custom pdata that can be taken from
- * the hwmod resource data. But, this needs to be
- * done after the build.
- *
- * ?? does it have to be done before the register ??
- * YES, because platform_device_data_add() copies
- * pdata, it does not use a pointer.
- */
- p->flags = UPF_BOOT_AUTOCONF;
- p->iotype = UPIO_MEM;
- p->regshift = 2;
- p->uartclk = OMAP24XX_BASE_BAUD * 16;
- p->irq = oh->mpu_irqs[0].irq;
- p->mapbase = oh->slaves[0]->addr->pa_start;
- p->membase = omap_hwmod_get_mpu_rt_va(oh);
- p->irqflags = IRQF_SHARED;
- p->private_data = uart;
-
- /*
- * omap44xx, ti816x: Never read empty UART fifo
- * omap3xxx: Never read empty UART fifo on UARTs
- * with IP rev >=0x52
- */
- uart->regshift = p->regshift;
- uart->membase = p->membase;
- if (cpu_is_omap44xx() || cpu_is_ti816x())
- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
- else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF)
- >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
- uart->errata |= UART_ERRATA_FIFO_FULL_ABORT;
-
- if (uart->errata & UART_ERRATA_FIFO_FULL_ABORT) {
- p->serial_in = serial_in_override;
- p->serial_out = serial_out_override;
- }
-
- pdata = &ports[0];
- pdata_size = 2 * sizeof(struct plat_serial8250_port);
-#else
-
name = DRIVER_NAME;
- omap_up.dma_enabled = uart->dma_enabled;
+ omap_up.dma_enabled = info->dma_enabled;
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
- omap_up.mapbase = oh->slaves[0]->addr->pa_start;
- omap_up.membase = omap_hwmod_get_mpu_rt_va(oh);
- omap_up.irqflags = IRQF_SHARED;
- omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ omap_up.flags = UPF_BOOT_AUTOCONF;
+ omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
+ omap_up.set_forceidle = omap_uart_set_forceidle;
+ omap_up.set_noidle = omap_uart_set_noidle;
+ omap_up.enable_wakeup = omap_uart_enable_wakeup;
+ omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
+ omap_up.dma_rx_timeout = info->dma_rx_timeout;
+ omap_up.dma_rx_poll_rate = info->dma_rx_poll_rate;
+ omap_up.autosuspend_timeout = info->autosuspend_timeout;
+
+ /* Enable the MDR1 Errata i202 for OMAP2430/3xxx/44xx */
+ if (!cpu_is_omap2420() && !cpu_is_ti816x())
+ omap_up.errata |= UART_ERRATA_i202_MDR1_ACCESS;
+
+ /* Enable DMA Mode Force Idle Errata i291 for omap34xx/3630 */
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ omap_up.errata |= UART_ERRATA_i291_DMA_FORCEIDLE;
pdata = &omap_up;
pdata_size = sizeof(struct omap_uart_port_info);
-#endif
if (WARN_ON(!oh))
return;
@@ -782,64 +376,29 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
WARN(IS_ERR(pdev), "Could not build omap_device for %s: %s.\n",
name, oh->name);
- omap_device_disable_idle_on_suspend(pdev);
+ if ((console_uart_id == bdata->id) && no_console_suspend)
+ omap_device_disable_idle_on_suspend(pdev);
+
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
- uart->irq = oh->mpu_irqs[0].irq;
- uart->regshift = 2;
- uart->mapbase = oh->slaves[0]->addr->pa_start;
- uart->membase = omap_hwmod_get_mpu_rt_va(oh);
uart->pdev = pdev;
oh->dev_attr = uart;
- console_lock(); /* in case the earlycon is on the UART */
-
- /*
- * Because of early UART probing, UART did not get idled
- * on init. Now that omap_device is ready, ensure full idle
- * before doing omap_device_enable().
- */
- omap_hwmod_idle(uart->oh);
-
- omap_device_enable(uart->pdev);
- omap_uart_idle_init(uart);
- omap_uart_reset(uart);
- omap_hwmod_enable_wakeup(uart->oh);
- omap_device_idle(uart->pdev);
-
- /*
- * Need to block sleep long enough for interrupt driven
- * driver to start. Console driver is in polling mode
- * so device needs to be kept enabled while polling driver
- * is in use.
- */
- if (uart->timeout)
- uart->timeout = (30 * HZ);
- omap_uart_block_sleep(uart);
- uart->timeout = DEFAULT_TIMEOUT;
-
- console_unlock();
-
- if ((cpu_is_omap34xx() && uart->padconf) ||
- (uart->wk_en && uart->wk_mask)) {
+ if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads)
+ && !uart_debug)
device_init_wakeup(&pdev->dev, true);
- DEV_CREATE_FILE(&pdev->dev, &dev_attr_sleep_timeout);
- }
-
- /* Enable the MDR1 errata for OMAP3 */
- if (cpu_is_omap34xx() && !cpu_is_ti816x())
- uart->errata |= UART_ERRATA_i202_MDR1_ACCESS;
}
/**
- * omap_serial_init() - initialize all supported serial ports
+ * omap_serial_board_init() - initialize all supported serial ports
+ * @info: platform specific data pointer
*
* Initializes all available UARTs as serial ports. Platforms
* can call this function when they want to have default behaviour
* for serial ports (e.g initialize them all as serial ports).
*/
-void __init omap_serial_init(void)
+void __init omap_serial_board_init(struct omap_uart_port_info *info)
{
struct omap_uart_state *uart;
struct omap_board_data bdata;
@@ -849,7 +408,25 @@ void __init omap_serial_init(void)
bdata.flags = 0;
bdata.pads = NULL;
bdata.pads_cnt = 0;
- omap_serial_init_port(&bdata);
+ if (cpu_is_omap44xx() || cpu_is_omap34xx())
+ omap_serial_fill_default_pads(&bdata);
+
+ if (!info)
+ omap_serial_init_port(&bdata, NULL);
+ else
+ omap_serial_init_port(&bdata, &info[uart->num]);
}
}
+
+/**
+ * omap_serial_init() - initialize all supported serial ports
+ *
+ * Initializes all available UARTs.
+ * Platforms can call this function when they want to have default behaviour
+ * for serial ports (e.g initialize them all as serial ports).
+ */
+void __init omap_serial_init(void)
+{
+ omap_serial_board_init(NULL);
+}
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
new file mode 100644
index 00000000000..abd28340049
--- /dev/null
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -0,0 +1,379 @@
+/*
+ * OMAP44xx sleep code.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <asm/smp_scu.h>
+#include <asm/memory.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+#include <mach/omap-secure.h>
+
+#include "common.h"
+#include "omap4-sar-layout.h"
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PM)
+
+.macro DO_SMC
+ dsb
+ smc #0
+ dsb
+.endm
+
+ppa_zero_params:
+ .word 0x0
+
+ppa_por_params:
+ .word 1, 0
+
+/*
+ * =============================
+ * == CPU suspend finisher ==
+ * =============================
+ *
+ * void omap4_finish_suspend(unsigned long cpu_state)
+ *
+ * This function code saves the CPU context and performs the CPU
+ * power down sequence. Calling WFI effectively changes the CPU
+ * power domains states to the desired target power state.
+ *
+ * @cpu_state : contains context save state (r0)
+ * 0 - No context lost
+ * 1 - CPUx L1 and logic lost: MPUSS CSWR
+ * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
+ * @return: This function never returns for CPU OFF and DORMANT power states.
+ * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
+ * from this follows a full CPU reset path via ROM code to CPU restore code.
+ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
+ * It returns to the caller for CPU INACTIVE and ON power states or in case
+ * CPU failed to transition to targeted OFF/DORMANT state.
+ */
+ENTRY(omap4_finish_suspend)
+ stmfd sp!, {lr}
+ cmp r0, #0x0
+ beq do_WFI @ No lowpower state, jump to WFI
+
+ /*
+ * Flush all data from the L1 data cache before disabling
+ * SCTLR.C bit.
+ */
+ bl omap4_get_sar_ram_base
+ ldr r9, [r0, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne skip_secure_l1_clean
+ mov r0, #SCU_PM_NORMAL
+ mov r1, #0xFF @ clean seucre L1
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
+ DO_SMC
+ ldmfd r13!, {r4-r12, r14}
+skip_secure_l1_clean:
+ bl v7_flush_dcache_all
+
+ /*
+ * Clear the SCTLR.C bit to prevent further data cache
+ * allocation. Clearing SCTLR.C would make all the data accesses
+ * strongly ordered and would not hit the cache.
+ */
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #(1 << 2) @ Disable the C bit
+ mcr p15, 0, r0, c1, c0, 0
+ isb
+
+ /*
+ * Invalidate L1 data cache. Even though only invalidate is
+ * necessary exported flush API is used here. Doing clean
+ * on already clean cache would be almost NOP.
+ */
+ bl v7_flush_dcache_all
+
+ /*
+ * Switch the CPU from Symmetric Multiprocessing (SMP) mode
+ * to AsymmetricMultiprocessing (AMP) mode by programming
+ * the SCU power status to DORMANT or OFF mode.
+ * This enables the CPU to be taken out of coherency by
+ * preventing the CPU from receiving cache, TLB, or BTB
+ * maintenance operations broadcast by other CPUs in the cluster.
+ */
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne scu_gp_set
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ ldreq r0, [r8, #SCU_OFFSET0]
+ ldrne r0, [r8, #SCU_OFFSET1]
+ mov r1, #0x00
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
+ DO_SMC
+ ldmfd r13!, {r4-r12, r14}
+ b skip_scu_gp_set
+scu_gp_set:
+ mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
+ ands r0, r0, #0x0f
+ ldreq r1, [r8, #SCU_OFFSET0]
+ ldrne r1, [r8, #SCU_OFFSET1]
+ bl omap4_get_scu_base
+ bl scu_power_mode
+skip_scu_gp_set:
+ mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
+ tst r0, #(1 << 18)
+ mrcne p15, 0, r0, c1, c0, 1
+ bicne r0, r0, #(1 << 6) @ Disable SMP bit
+ mcrne p15, 0, r0, c1, c0, 1
+ isb
+ dsb
+#ifdef CONFIG_CACHE_L2X0
+ /*
+ * Clean and invalidate the L2 cache.
+ * Common cache-l2x0.c functions can't be used here since it
+ * uses spinlocks. We are out of coherency here with data cache
+ * disabled. The spinlock implementation uses exclusive load/store
+ * instruction which can fail without data cache being enabled.
+ * OMAP4 hardware doesn't support exclusive monitor which can
+ * overcome exclusive access issue. Because of this, CPU can
+ * lead to deadlock.
+ */
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
+ ands r5, r5, #0x0f
+ ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
+ ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
+ cmp r0, #3
+ bne do_WFI
+#ifdef CONFIG_PL310_ERRATA_727915
+ mov r0, #0x03
+ mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
+ DO_SMC
+#endif
+ bl omap4_get_l2cache_base
+ mov r2, r0
+ ldr r0, =0xffff
+ str r0, [r2, #L2X0_CLEAN_INV_WAY]
+wait:
+ ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
+ ldr r1, =0xffff
+ ands r0, r0, r1
+ bne wait
+#ifdef CONFIG_PL310_ERRATA_727915
+ mov r0, #0x00
+ mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
+ DO_SMC
+#endif
+l2x_sync:
+ bl omap4_get_l2cache_base
+ mov r2, r0
+ mov r0, #0x0
+ str r0, [r2, #L2X0_CACHE_SYNC]
+sync:
+ ldr r0, [r2, #L2X0_CACHE_SYNC]
+ ands r0, r0, #0x1
+ bne sync
+#endif
+
+do_WFI:
+ bl omap_do_wfi
+
+ /*
+ * CPU is here when it failed to enter OFF/DORMANT or
+ * no low power state was attempted.
+ */
+ mrc p15, 0, r0, c1, c0, 0
+ tst r0, #(1 << 2) @ Check C bit enabled?
+ orreq r0, r0, #(1 << 2) @ Enable the C bit
+ mcreq p15, 0, r0, c1, c0, 0
+ isb
+
+ /*
+ * Ensure the CPU power state is set to NORMAL in
+ * SCU power state so that CPU is back in coherency.
+ * In non-coherent mode CPU can lock-up and lead to
+ * system deadlock.
+ */
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ Check SMP bit enabled?
+ orreq r0, r0, #(1 << 6)
+ mcreq p15, 0, r0, c1, c0, 1
+ isb
+ bl omap4_get_sar_ram_base
+ mov r8, r0
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Check for HS device
+ bne scu_gp_clear
+ mov r0, #SCU_PM_NORMAL
+ mov r1, #0x00
+ stmfd r13!, {r4-r12, r14}
+ ldr r12, =OMAP4_MON_SCU_PWR_INDEX
+ DO_SMC
+ ldmfd r13!, {r4-r12, r14}
+ b skip_scu_gp_clear
+scu_gp_clear:
+ bl omap4_get_scu_base
+ mov r1, #SCU_PM_NORMAL
+ bl scu_power_mode
+skip_scu_gp_clear:
+ isb
+ dsb
+ ldmfd sp!, {pc}
+ENDPROC(omap4_finish_suspend)
+
+/*
+ * ============================
+ * == CPU resume entry point ==
+ * ============================
+ *
+ * void omap4_cpu_resume(void)
+ *
+ * ROM code jumps to this function while waking up from CPU
+ * OFF or DORMANT state. Physical address of the function is
+ * stored in the SAR RAM while entering to OFF or DORMANT mode.
+ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
+ */
+ENTRY(omap4_cpu_resume)
+ /*
+ * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
+ * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
+ * init and for CPU1, a secure PPA API provided. CPU0 must be ON
+ * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
+ * OMAP443X GP devices- SMP bit isn't accessible.
+ * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
+ */
+ ldr r8, =OMAP44XX_SAR_RAM_BASE
+ ldr r9, [r8, #OMAP_TYPE_OFFSET]
+ cmp r9, #0x1 @ Skip if GP device
+ bne skip_ns_smp_enable
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0x0f
+ beq skip_ns_smp_enable
+ppa_actrl_retry:
+ mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
+ adr r3, ppa_zero_params @ Pointer to parameters
+ mov r1, #0x0 @ Process ID
+ mov r2, #0x4 @ Flag
+ mov r6, #0xff
+ mov r12, #0x00 @ Secure Service ID
+ DO_SMC
+ cmp r0, #0x0 @ API returns 0 on success.
+ beq enable_smp_bit
+ b ppa_actrl_retry
+enable_smp_bit:
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ Check SMP bit enabled?
+ orreq r0, r0, #(1 << 6)
+ mcreq p15, 0, r0, c1, c0, 1
+ isb
+skip_ns_smp_enable:
+#ifdef CONFIG_CACHE_L2X0
+ /*
+ * Restore the L2 AUXCTRL and enable the L2 cache.
+ * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
+ * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
+ * register r0 contains value to be programmed.
+ * L2 cache is already invalidate by ROM code as part
+ * of MPUSS OFF wakeup path.
+ */
+ ldr r2, =OMAP44XX_L2CACHE_BASE
+ ldr r0, [r2, #L2X0_CTRL]
+ and r0, #0x0f
+ cmp r0, #1
+ beq skip_l2en @ Skip if already enabled
+ ldr r3, =OMAP44XX_SAR_RAM_BASE
+ ldr r1, [r3, #OMAP_TYPE_OFFSET]
+ cmp r1, #0x1 @ Check for HS device
+ bne set_gp_por
+ ldr r0, =OMAP4_PPA_L2_POR_INDEX
+ ldr r1, =OMAP44XX_SAR_RAM_BASE
+ ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+ adr r3, ppa_por_params
+ str r4, [r3, #0x04]
+ mov r1, #0x0 @ Process ID
+ mov r2, #0x4 @ Flag
+ mov r6, #0xff
+ mov r12, #0x00 @ Secure Service ID
+ DO_SMC
+ b set_aux_ctrl
+set_gp_por:
+ ldr r1, =OMAP44XX_SAR_RAM_BASE
+ ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+ ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
+ DO_SMC
+set_aux_ctrl:
+ ldr r1, =OMAP44XX_SAR_RAM_BASE
+ ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
+ ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
+ DO_SMC
+ mov r0, #0x1
+ ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
+ DO_SMC
+skip_l2en:
+#endif
+
+ b cpu_resume @ Jump to generic resume
+ENDPROC(omap4_cpu_resume)
+#endif
+
+#ifndef CONFIG_OMAP4_ERRATA_I688
+ENTRY(omap_bus_sync)
+ mov pc, lr
+ENDPROC(omap_bus_sync)
+#endif
+
+ENTRY(omap_do_wfi)
+ stmfd sp!, {lr}
+ /* Drain interconnect write buffers. */
+ bl omap_bus_sync
+
+ /*
+ * Execute an ISB instruction to ensure that all of the
+ * CP15 register changes have been committed.
+ */
+ isb
+
+ /*
+ * Execute a barrier instruction to ensure that all cache,
+ * TLB and branch predictor maintenance operations issued
+ * by any CPU in the cluster have completed.
+ */
+ dsb
+ dmb
+
+ /*
+ * Execute a WFI instruction and wait until the
+ * STANDBYWFI output is asserted to indicate that the
+ * CPU is in idle and low power state. CPU can specualatively
+ * prefetch the instructions so add NOPs after WFI. Sixteen
+ * NOPs as per Cortex-A9 pipeline.
+ */
+ wfi @ Wait For Interrupt
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ldmfd sp!, {pc}
+ENDPROC(omap_do_wfi)
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 6a4f6839a7d..9dd93453e56 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include <plat/common.h>
+#include "common.h"
#include "pm.h"
#include "smartreflex.h"
@@ -139,7 +139,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
sr_write_reg(sr_info, ERRCONFIG_V1, status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
/* Read the status bits */
- sr_read_reg(sr_info, IRQSTATUS);
+ status = sr_read_reg(sr_info, IRQSTATUS);
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 037b0d7d4e0..6eeff0e0ae0 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -41,7 +41,7 @@
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
#include <asm/sched_clock.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
#include <plat/omap-pm.h>
@@ -254,7 +254,6 @@ static struct omap_dm_timer clksrc;
/*
* clocksource
*/
-static DEFINE_CLOCK_DATA(cd);
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 1);
@@ -268,23 +267,12 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void notrace dmtimer_update_sched_clock(void)
+static u32 notrace dmtimer_read_sched_clock(void)
{
- u32 cyc;
-
- cyc = __omap_dm_timer_read_counter(&clksrc, 1);
-
- update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-unsigned long long notrace sched_clock(void)
-{
- u32 cyc = 0;
-
if (clksrc.reserved)
- cyc = __omap_dm_timer_read_counter(&clksrc, 1);
+ return __omap_dm_timer_read_counter(clksrc.io_base, 1);
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+ return 0;
}
/* Setup free-running counter for clocksource */
@@ -301,7 +289,7 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
__omap_dm_timer_load_start(&clksrc,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
- init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
+ setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
pr_err("Could not register clocksource %s\n",
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 52243577216..10b20c652e5 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -30,6 +30,7 @@
#include <plat/usb.h>
#include "twl-common.h"
+#include "pm.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+void __init omap_pmic_late_init(void)
+{
+ /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+ if (!pmic_i2c_board_info.irq)
+ return;
+
+ omap3_twl_init();
+ omap4_twl_init();
+}
+
#if defined(CONFIG_ARCH_OMAP3)
static struct twl4030_usb_data omap3_usb_pdata = {
.usb_mode = T2_USB_MODE_ULPI,
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
index 5e83a5bd37f..275dde8cb27 100644
--- a/arch/arm/mach-omap2/twl-common.h
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -1,6 +1,8 @@
#ifndef __OMAP_PMIC_COMMON__
#define __OMAP_PMIC_COMMON__
+#include <plat/irqs.h>
+
#define TWL_COMMON_PDATA_USB (1 << 0)
#define TWL_COMMON_PDATA_BCI (1 << 1)
#define TWL_COMMON_PDATA_MADC (1 << 2)
@@ -30,6 +32,7 @@ struct twl4030_platform_data;
void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data);
+void omap_pmic_late_init(void);
static inline void omap2_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data)
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
index 89ae29847c5..771dc781b74 100644
--- a/arch/arm/mach-omap2/usb-host.c
+++ b/arch/arm/mach-omap2/usb-host.c
@@ -28,51 +28,28 @@
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <plat/usb.h>
+#include <plat/omap_device.h>
#include "mux.h"
#ifdef CONFIG_MFD_OMAP_USB_HOST
-#define OMAP_USBHS_DEVICE "usbhs-omap"
-
-static struct resource usbhs_resources[] = {
- {
- .name = "uhh",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "tll",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ehci",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ehci-irq",
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "ohci",
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ohci-irq",
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device usbhs_device = {
- .name = OMAP_USBHS_DEVICE,
- .id = 0,
- .num_resources = ARRAY_SIZE(usbhs_resources),
- .resource = usbhs_resources,
-};
+#define OMAP_USBHS_DEVICE "usbhs_omap"
+#define USBHS_UHH_HWMODNAME "usb_host_hs"
+#define USBHS_TLL_HWMODNAME "usb_tll_hs"
static struct usbhs_omap_platform_data usbhs_data;
static struct ehci_hcd_omap_platform_data ehci_data;
static struct ohci_hcd_omap_platform_data ohci_data;
+static struct omap_device_pm_latency omap_uhhtll_latency[] = {
+ {
+ .deactivate_func = omap_device_idle_hwmods,
+ .activate_func = omap_device_enable_hwmods,
+ .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+ },
+};
+
/* MUX settings for EHCI pins */
/*
* setup_ehci_io_mux - initialize IO pad mux for USBHOST
@@ -508,7 +485,10 @@ static void setup_4430ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
{
- int i;
+ struct omap_hwmod *oh[2];
+ struct omap_device *od;
+ int bus_id = -1;
+ int i;
for (i = 0; i < OMAP3_HS_USB_PORTS; i++) {
usbhs_data.port_mode[i] = pdata->port_mode[i];
@@ -523,44 +503,34 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
usbhs_data.ohci_data = &ohci_data;
if (cpu_is_omap34xx()) {
- usbhs_resources[0].start = OMAP34XX_UHH_CONFIG_BASE;
- usbhs_resources[0].end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
- usbhs_resources[1].start = OMAP34XX_USBTLL_BASE;
- usbhs_resources[1].end = OMAP34XX_USBTLL_BASE + SZ_4K - 1;
- usbhs_resources[2].start = OMAP34XX_EHCI_BASE;
- usbhs_resources[2].end = OMAP34XX_EHCI_BASE + SZ_1K - 1;
- usbhs_resources[3].start = INT_34XX_EHCI_IRQ;
- usbhs_resources[4].start = OMAP34XX_OHCI_BASE;
- usbhs_resources[4].end = OMAP34XX_OHCI_BASE + SZ_1K - 1;
- usbhs_resources[5].start = INT_34XX_OHCI_IRQ;
setup_ehci_io_mux(pdata->port_mode);
setup_ohci_io_mux(pdata->port_mode);
} else if (cpu_is_omap44xx()) {
- usbhs_resources[0].start = OMAP44XX_UHH_CONFIG_BASE;
- usbhs_resources[0].end = OMAP44XX_UHH_CONFIG_BASE + SZ_1K - 1;
- usbhs_resources[1].start = OMAP44XX_USBTLL_BASE;
- usbhs_resources[1].end = OMAP44XX_USBTLL_BASE + SZ_4K - 1;
- usbhs_resources[2].start = OMAP44XX_HSUSB_EHCI_BASE;
- usbhs_resources[2].end = OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
- usbhs_resources[3].start = OMAP44XX_IRQ_EHCI;
- usbhs_resources[4].start = OMAP44XX_HSUSB_OHCI_BASE;
- usbhs_resources[4].end = OMAP44XX_HSUSB_OHCI_BASE + SZ_1K - 1;
- usbhs_resources[5].start = OMAP44XX_IRQ_OHCI;
setup_4430ehci_io_mux(pdata->port_mode);
setup_4430ohci_io_mux(pdata->port_mode);
}
- if (platform_device_add_data(&usbhs_device,
- &usbhs_data, sizeof(usbhs_data)) < 0) {
- printk(KERN_ERR "USBHS platform_device_add_data failed\n");
- goto init_end;
+ oh[0] = omap_hwmod_lookup(USBHS_UHH_HWMODNAME);
+ if (!oh[0]) {
+ pr_err("Could not look up %s\n", USBHS_UHH_HWMODNAME);
+ return;
}
- if (platform_device_register(&usbhs_device) < 0)
- printk(KERN_ERR "USBHS platform_device_register failed\n");
+ oh[1] = omap_hwmod_lookup(USBHS_TLL_HWMODNAME);
+ if (!oh[1]) {
+ pr_err("Could not look up %s\n", USBHS_TLL_HWMODNAME);
+ return;
+ }
-init_end:
- return;
+ od = omap_device_build_ss(OMAP_USBHS_DEVICE, bus_id, oh, 2,
+ (void *)&usbhs_data, sizeof(usbhs_data),
+ omap_uhhtll_latency,
+ ARRAY_SIZE(omap_uhhtll_latency), false);
+ if (IS_ERR(od)) {
+ pr_err("Could not build hwmod devices %s,%s\n",
+ USBHS_UHH_HWMODNAME, USBHS_TLL_HWMODNAME);
+ return;
+ }
}
#else
@@ -570,5 +540,3 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata)
}
#endif
-
-
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 267975086a7..8d5ed775dd5 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -93,6 +93,9 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
if (cpu_is_omap3517() || cpu_is_omap3505()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
+ } else if (cpu_is_ti81xx()) {
+ oh_name = "usb_otg_hs";
+ name = "musb-ti81xx";
} else {
oh_name = "usb_otg_hs";
name = "musb-omap2430";
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index cfe348e1af0..a5ec7f8f2ea 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm-regbits-34xx.h"
#include "voltage.h"
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index 2740a968145..d70b930f273 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index 1f8fdf736e6..8a36342e60d 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm-regbits-34xx.h"
#include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 071101debbb..c005e2f5e38 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include <plat/cpu.h>
#include "prm-regbits-34xx.h"
@@ -31,6 +31,14 @@
* VDD data
*/
+/* OMAP3-common voltagedomain data */
+
+static struct voltagedomain omap3_voltdm_wkup = {
+ .name = "wakeup",
+};
+
+/* 34xx/36xx voltagedomain data */
+
static const struct omap_vfsm_instance omap3_vdd1_vfsm = {
.voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET,
.voltsetup_mask = OMAP3430_SETUP_TIME1_MASK,
@@ -63,10 +71,6 @@ static struct voltagedomain omap3_voltdm_core = {
.vp = &omap3_vp_core,
};
-static struct voltagedomain omap3_voltdm_wkup = {
- .name = "wakeup",
-};
-
static struct voltagedomain *voltagedomains_omap3[] __initdata = {
&omap3_voltdm_mpu,
&omap3_voltdm_core,
@@ -74,11 +78,30 @@ static struct voltagedomain *voltagedomains_omap3[] __initdata = {
NULL,
};
+/* AM35xx voltagedomain data */
+
+static struct voltagedomain am35xx_voltdm_mpu = {
+ .name = "mpu_iva",
+};
+
+static struct voltagedomain am35xx_voltdm_core = {
+ .name = "core",
+};
+
+static struct voltagedomain *voltagedomains_am35xx[] __initdata = {
+ &am35xx_voltdm_mpu,
+ &am35xx_voltdm_core,
+ &omap3_voltdm_wkup,
+ NULL,
+};
+
+
static const char *sys_clk_name __initdata = "sys_ck";
void __init omap3xxx_voltagedomains_init(void)
{
struct voltagedomain *voltdm;
+ struct voltagedomain **voltdms;
int i;
/*
@@ -93,8 +116,13 @@ void __init omap3xxx_voltagedomains_init(void)
omap3_voltdm_core.volt_data = omap34xx_vddcore_volt_data;
}
- for (i = 0; voltdm = voltagedomains_omap3[i], voltdm; i++)
+ if (cpu_is_omap3517() || cpu_is_omap3505())
+ voltdms = voltagedomains_am35xx;
+ else
+ voltdms = voltagedomains_omap3;
+
+ for (i = 0; voltdm = voltdms[i], voltdm; i++)
voltdm->sys_clk.name = sys_clk_name;
- voltdm_init(voltagedomains_omap3);
+ voltdm_init(voltdms);
};
diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
index c4584e9ac71..4e11d022595 100644
--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
@@ -21,7 +21,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
index 66bd700a2b9..807391d84a9 100644
--- a/arch/arm/mach-omap2/vp.c
+++ b/arch/arm/mach-omap2/vp.c
@@ -1,7 +1,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "voltage.h"
#include "vp.h"
diff --git a/arch/arm/mach-omap2/vp3xxx_data.c b/arch/arm/mach-omap2/vp3xxx_data.c
index 260c554b154..bd89f80089f 100644
--- a/arch/arm/mach-omap2/vp3xxx_data.c
+++ b/arch/arm/mach-omap2/vp3xxx_data.c
@@ -19,7 +19,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm-regbits-34xx.h"
#include "voltage.h"
diff --git a/arch/arm/mach-omap2/vp44xx_data.c b/arch/arm/mach-omap2/vp44xx_data.c
index b4e77044891..8c031d16879 100644
--- a/arch/arm/mach-omap2/vp44xx_data.c
+++ b/arch/arm/mach-omap2/vp44xx_data.c
@@ -19,7 +19,7 @@
#include <linux/err.h>
#include <linux/init.h>
-#include <plat/common.h>
+#include "common.h"
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index 5ceafdccc45..3638e5c12b7 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -14,8 +14,8 @@
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
-#include <linux/errno.h>
#include <mach/hardware.h>
+#include <plat/addr-map.h>
#include "common.h"
/*
@@ -41,7 +41,6 @@
/*
* Generic Address Decode Windows bit settings
*/
-#define TARGET_DDR 0
#define TARGET_DEV_BUS 1
#define TARGET_PCI 3
#define TARGET_PCIE 4
@@ -57,27 +56,10 @@
#define ATTR_DEV_BOOT 0xf
#define ATTR_SRAM 0x0
-/*
- * Helpers to get DDR bank info
- */
-#define ORION5X_DDR_REG(x) (ORION5X_DDR_VIRT_BASE | (x))
-#define DDR_BASE_CS(n) ORION5X_DDR_REG(0x1500 + ((n) << 3))
-#define DDR_SIZE_CS(n) ORION5X_DDR_REG(0x1504 + ((n) << 3))
-
-/*
- * CPU Address Decode Windows registers
- */
-#define ORION5X_BRIDGE_REG(x) (ORION5X_BRIDGE_VIRT_BASE | (x))
-#define CPU_WIN_CTRL(n) ORION5X_BRIDGE_REG(0x000 | ((n) << 4))
-#define CPU_WIN_BASE(n) ORION5X_BRIDGE_REG(0x004 | ((n) << 4))
-#define CPU_WIN_REMAP_LO(n) ORION5X_BRIDGE_REG(0x008 | ((n) << 4))
-#define CPU_WIN_REMAP_HI(n) ORION5X_BRIDGE_REG(0x00c | ((n) << 4))
-
-
-struct mbus_dram_target_info orion5x_mbus_dram_info;
static int __initdata win_alloc_count;
-static int __init orion5x_cpu_win_can_remap(int win)
+static int __init cpu_win_can_remap(const struct orion_addr_map_cfg *cfg,
+ const int win)
{
u32 dev, rev;
@@ -91,116 +73,82 @@ static int __init orion5x_cpu_win_can_remap(int win)
return 0;
}
-static int __init setup_cpu_win(int win, u32 base, u32 size,
- u8 target, u8 attr, int remap)
-{
- if (win >= 8) {
- printk(KERN_ERR "setup_cpu_win: trying to allocate "
- "window %d\n", win);
- return -ENOSPC;
- }
-
- writel(base & 0xffff0000, CPU_WIN_BASE(win));
- writel(((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1,
- CPU_WIN_CTRL(win));
-
- if (orion5x_cpu_win_can_remap(win)) {
- if (remap < 0)
- remap = base;
-
- writel(remap & 0xffff0000, CPU_WIN_REMAP_LO(win));
- writel(0, CPU_WIN_REMAP_HI(win));
- }
- return 0;
-}
-
-void __init orion5x_setup_cpu_mbus_bridge(void)
-{
- int i;
- int cs;
+/*
+ * Description of the windows needed by the platform code
+ */
+static struct __initdata orion_addr_map_cfg addr_map_cfg = {
+ .num_wins = 8,
+ .cpu_win_can_remap = cpu_win_can_remap,
+ .bridge_virt_base = ORION5X_BRIDGE_VIRT_BASE,
+};
+static const struct __initdata orion_addr_map_info addr_map_info[] = {
/*
- * First, disable and clear windows.
+ * Setup windows for PCI+PCIe IO+MEM space.
*/
- for (i = 0; i < 8; i++) {
- writel(0, CPU_WIN_BASE(i));
- writel(0, CPU_WIN_CTRL(i));
- if (orion5x_cpu_win_can_remap(i)) {
- writel(0, CPU_WIN_REMAP_LO(i));
- writel(0, CPU_WIN_REMAP_HI(i));
- }
- }
+ { 0, ORION5X_PCIE_IO_PHYS_BASE, ORION5X_PCIE_IO_SIZE,
+ TARGET_PCIE, ATTR_PCIE_IO, ORION5X_PCIE_IO_BUS_BASE
+ },
+ { 1, ORION5X_PCI_IO_PHYS_BASE, ORION5X_PCI_IO_SIZE,
+ TARGET_PCI, ATTR_PCI_IO, ORION5X_PCI_IO_BUS_BASE
+ },
+ { 2, ORION5X_PCIE_MEM_PHYS_BASE, ORION5X_PCIE_MEM_SIZE,
+ TARGET_PCIE, ATTR_PCIE_MEM, -1
+ },
+ { 3, ORION5X_PCI_MEM_PHYS_BASE, ORION5X_PCI_MEM_SIZE,
+ TARGET_PCI, ATTR_PCI_MEM, -1
+ },
+ /* End marker */
+ { -1, 0, 0, 0, 0, 0 }
+};
+void __init orion5x_setup_cpu_mbus_bridge(void)
+{
/*
- * Setup windows for PCI+PCIe IO+MEM space.
+ * Disable, clear and configure windows.
*/
- setup_cpu_win(0, ORION5X_PCIE_IO_PHYS_BASE, ORION5X_PCIE_IO_SIZE,
- TARGET_PCIE, ATTR_PCIE_IO, ORION5X_PCIE_IO_BUS_BASE);
- setup_cpu_win(1, ORION5X_PCI_IO_PHYS_BASE, ORION5X_PCI_IO_SIZE,
- TARGET_PCI, ATTR_PCI_IO, ORION5X_PCI_IO_BUS_BASE);
- setup_cpu_win(2, ORION5X_PCIE_MEM_PHYS_BASE, ORION5X_PCIE_MEM_SIZE,
- TARGET_PCIE, ATTR_PCIE_MEM, -1);
- setup_cpu_win(3, ORION5X_PCI_MEM_PHYS_BASE, ORION5X_PCI_MEM_SIZE,
- TARGET_PCI, ATTR_PCI_MEM, -1);
+ orion_config_wins(&addr_map_cfg, addr_map_info);
win_alloc_count = 4;
/*
* Setup MBUS dram target info.
*/
- orion5x_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
-
- for (i = 0, cs = 0; i < 4; i++) {
- u32 base = readl(DDR_BASE_CS(i));
- u32 size = readl(DDR_SIZE_CS(i));
-
- /*
- * Chip select enabled?
- */
- if (size & 1) {
- struct mbus_dram_window *w;
-
- w = &orion5x_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- w->base = base & 0xffff0000;
- w->size = (size | 0x0000ffff) + 1;
- }
- }
- orion5x_mbus_dram_info.num_cs = cs;
+ orion_setup_cpu_mbus_target(&addr_map_cfg, ORION5X_DDR_WINDOW_CPU_BASE);
}
void __init orion5x_setup_dev_boot_win(u32 base, u32 size)
{
- setup_cpu_win(win_alloc_count++, base, size,
- TARGET_DEV_BUS, ATTR_DEV_BOOT, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++, base, size,
+ TARGET_DEV_BUS, ATTR_DEV_BOOT, -1);
}
void __init orion5x_setup_dev0_win(u32 base, u32 size)
{
- setup_cpu_win(win_alloc_count++, base, size,
- TARGET_DEV_BUS, ATTR_DEV_CS0, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++, base, size,
+ TARGET_DEV_BUS, ATTR_DEV_CS0, -1);
}
void __init orion5x_setup_dev1_win(u32 base, u32 size)
{
- setup_cpu_win(win_alloc_count++, base, size,
- TARGET_DEV_BUS, ATTR_DEV_CS1, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++, base, size,
+ TARGET_DEV_BUS, ATTR_DEV_CS1, -1);
}
void __init orion5x_setup_dev2_win(u32 base, u32 size)
{
- setup_cpu_win(win_alloc_count++, base, size,
- TARGET_DEV_BUS, ATTR_DEV_CS2, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++, base, size,
+ TARGET_DEV_BUS, ATTR_DEV_CS2, -1);
}
void __init orion5x_setup_pcie_wa_win(u32 base, u32 size)
{
- setup_cpu_win(win_alloc_count++, base, size,
- TARGET_PCIE, ATTR_PCIE_WA, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++, base, size,
+ TARGET_PCIE, ATTR_PCIE_WA, -1);
}
-int __init orion5x_setup_sram_win(void)
+void __init orion5x_setup_sram_win(void)
{
- return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
- ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
+ orion_setup_cpu_win(&addr_map_cfg, win_alloc_count++,
+ ORION5X_SRAM_PHYS_BASE, ORION5X_SRAM_SIZE,
+ TARGET_SRAM, ATTR_SRAM, -1);
}
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 22ace0bf2f9..0e28bae20bd 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -15,9 +15,9 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
-#include <linux/mbus.h>
#include <linux/mv643xx_i2c.h>
#include <linux/ata_platform.h>
+#include <linux/delay.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/setup.h>
@@ -31,6 +31,7 @@
#include <plat/orion_nand.h>
#include <plat/time.h>
#include <plat/common.h>
+#include <plat/addr-map.h>
#include "common.h"
/*****************************************************************************
@@ -71,8 +72,7 @@ void __init orion5x_map_io(void)
****************************************************************************/
void __init orion5x_ehci0_init(void)
{
- orion_ehci_init(&orion5x_mbus_dram_info,
- ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL);
+ orion_ehci_init(ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL);
}
@@ -81,8 +81,7 @@ void __init orion5x_ehci0_init(void)
****************************************************************************/
void __init orion5x_ehci1_init(void)
{
- orion_ehci_1_init(&orion5x_mbus_dram_info,
- ORION5X_USB1_PHYS_BASE, IRQ_ORION5X_USB1_CTRL);
+ orion_ehci_1_init(ORION5X_USB1_PHYS_BASE, IRQ_ORION5X_USB1_CTRL);
}
@@ -91,7 +90,7 @@ void __init orion5x_ehci1_init(void)
****************************************************************************/
void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
- orion_ge00_init(eth_data, &orion5x_mbus_dram_info,
+ orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
IRQ_ORION5X_ETH_ERR, orion5x_tclk);
}
@@ -121,8 +120,7 @@ void __init orion5x_i2c_init(void)
****************************************************************************/
void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
{
- orion_sata_init(sata_data, &orion5x_mbus_dram_info,
- ORION5X_SATA_PHYS_BASE, IRQ_ORION5X_SATA);
+ orion_sata_init(sata_data, ORION5X_SATA_PHYS_BASE, IRQ_ORION5X_SATA);
}
@@ -158,8 +156,7 @@ void __init orion5x_uart1_init(void)
****************************************************************************/
void __init orion5x_xor_init(void)
{
- orion_xor0_init(&orion5x_mbus_dram_info,
- ORION5X_XOR_PHYS_BASE,
+ orion_xor0_init(ORION5X_XOR_PHYS_BASE,
ORION5X_XOR_PHYS_BASE + 0x200,
IRQ_ORION5X_XOR0, IRQ_ORION5X_XOR1);
}
@@ -169,12 +166,7 @@ void __init orion5x_xor_init(void)
****************************************************************************/
static void __init orion5x_crypto_init(void)
{
- int ret;
-
- ret = orion5x_setup_sram_win();
- if (ret)
- return;
-
+ orion5x_setup_sram_win();
orion_crypto_init(ORION5X_CRYPTO_PHYS_BASE, ORION5X_SRAM_PHYS_BASE,
SZ_8K, IRQ_ORION5X_CESA);
}
@@ -304,6 +296,17 @@ void __init orion5x_init(void)
orion5x_wdt_init();
}
+void orion5x_restart(char mode, const char *cmd)
+{
+ /*
+ * Enable and issue soft reset
+ */
+ orion5x_setbits(RSTOUTn_MASK, (1 << 2));
+ orion5x_setbits(CPU_SOFT_RESET, 1);
+ mdelay(200);
+ orion5x_clrbits(CPU_SOFT_RESET, 1);
+}
+
/*
* Many orion-based systems have buggy bootloader implementations.
* This is a common fixup for bogus memory tags.
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 909489f4d23..d2513ac79ff 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -20,14 +20,13 @@ extern struct sys_timer orion5x_timer;
* functions to map its interfaces and by the machine-setup to map its on-
* board devices. Details in /mach-orion/addr-map.c
*/
-extern struct mbus_dram_target_info orion5x_mbus_dram_info;
void orion5x_setup_cpu_mbus_bridge(void);
void orion5x_setup_dev_boot_win(u32 base, u32 size);
void orion5x_setup_dev0_win(u32 base, u32 size);
void orion5x_setup_dev1_win(u32 base, u32 size);
void orion5x_setup_dev2_win(u32 base, u32 size);
void orion5x_setup_pcie_wa_win(u32 base, u32 size);
-int orion5x_setup_sram_win(void);
+void orion5x_setup_sram_win(void);
void orion5x_ehci0_init(void);
void orion5x_ehci1_init(void);
@@ -39,6 +38,7 @@ void orion5x_spi_init(void);
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
void orion5x_xor_init(void);
+void orion5x_restart(char, const char *);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-orion5x/d2net-setup.c b/arch/arm/mach-orion5x/d2net-setup.c
index 8c8300951f4..d75dcfa0f01 100644
--- a/arch/arm/mach-orion5x/d2net-setup.c
+++ b/arch/arm/mach-orion5x/d2net-setup.c
@@ -343,6 +343,7 @@ MACHINE_START(D2NET, "LaCie d2 Network")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
#endif
@@ -355,6 +356,7 @@ MACHINE_START(BIGDISK, "LaCie Big Disk Network")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index 4b79a80d5e1..a104d5a80e1 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -364,4 +364,5 @@ MACHINE_START(DB88F5281, "Marvell Orion-2 Development Board")
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 343f60e9639..91b0f478859 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -736,4 +736,5 @@ MACHINE_START(DNS323, "D-Link DNS-323")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/edmini_v2-setup.c b/arch/arm/mach-orion5x/edmini_v2-setup.c
index 70a4e9265f0..355e962137c 100644
--- a/arch/arm/mach-orion5x/edmini_v2-setup.c
+++ b/arch/arm/mach-orion5x/edmini_v2-setup.c
@@ -258,4 +258,5 @@ MACHINE_START(EDMINI_V2, "LaCie Ethernet Disk mini V2")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
index c5196101a23..e9d9afdc265 100644
--- a/arch/arm/mach-orion5x/include/mach/io.h
+++ b/arch/arm/mach-orion5x/include/mach/io.h
@@ -15,31 +15,6 @@
#define IO_SPACE_LIMIT 0xffffffff
-static inline void __iomem *
-__arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
-{
- void __iomem *retval;
- unsigned long offs = paddr - ORION5X_REGS_PHYS_BASE;
- if (mtype == MT_DEVICE && size && offs < ORION5X_REGS_SIZE &&
- size <= ORION5X_REGS_SIZE && offs + size <= ORION5X_REGS_SIZE) {
- retval = (void __iomem *)ORION5X_REGS_VIRT_BASE + offs;
- } else {
- retval = __arm_ioremap(paddr, size, mtype);
- }
-
- return retval;
-}
-
-static inline void
-__arch_iounmap(void __iomem *addr)
-{
- if (addr < (void __iomem *)ORION5X_REGS_VIRT_BASE ||
- addr >= (void __iomem *)(ORION5X_REGS_VIRT_BASE + ORION5X_REGS_SIZE))
- __iounmap(addr);
-}
-
-#define __arch_ioremap __arch_ioremap
-#define __arch_iounmap __arch_iounmap
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index 0a28bbc7689..2745f5d95b3 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -69,7 +69,7 @@
******************************************************************************/
#define ORION5X_DDR_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x00000)
-
+#define ORION5X_DDR_WINDOW_CPU_BASE (ORION5X_DDR_VIRT_BASE | 0x1500)
#define ORION5X_DEV_BUS_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x10000)
#define ORION5X_DEV_BUS_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x10000)
#define ORION5X_DEV_BUS_REG(x) (ORION5X_DEV_BUS_VIRT_BASE | (x))
diff --git a/arch/arm/mach-orion5x/include/mach/system.h b/arch/arm/mach-orion5x/include/mach/system.h
index a1d6e46ab03..825a2650cef 100644
--- a/arch/arm/mach-orion5x/include/mach/system.h
+++ b/arch/arm/mach-orion5x/include/mach/system.h
@@ -11,23 +11,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <mach/bridge-regs.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Enable and issue soft reset
- */
- orion5x_setbits(RSTOUTn_MASK, (1 << 2));
- orion5x_setbits(CPU_SOFT_RESET, 1);
- mdelay(200);
- orion5x_clrbits(CPU_SOFT_RESET, 1);
-}
-
-
#endif
diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h
deleted file mode 100644
index 06b50aeff7b..00000000000
--- a/arch/arm/mach-orion5x/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfd800000UL
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index d3cd3f63258..47587b83284 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -386,6 +386,7 @@ MACHINE_START(KUROBOX_PRO, "Buffalo/Revogear Kurobox Pro")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
#endif
@@ -399,5 +400,6 @@ MACHINE_START(LINKSTATION_PRO, "Buffalo Linkstation Pro/Live")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c
index 9503fff404e..527213169db 100644
--- a/arch/arm/mach-orion5x/ls-chl-setup.c
+++ b/arch/arm/mach-orion5x/ls-chl-setup.c
@@ -140,7 +140,7 @@ static struct mv_sata_platform_data lschl_sata_data = {
static void lschl_power_off(void)
{
- arm_machine_restart('h', NULL);
+ orion5x_restart('h', NULL);
}
/*****************************************************************************
@@ -325,4 +325,5 @@ MACHINE_START(LINKSTATION_LSCHL, "Buffalo Linkstation LiveV3 (LS-CHL)")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c
index ed6d772f4a2..9a8697b97dd 100644
--- a/arch/arm/mach-orion5x/ls_hgl-setup.c
+++ b/arch/arm/mach-orion5x/ls_hgl-setup.c
@@ -186,7 +186,7 @@ static struct mv_sata_platform_data ls_hgl_sata_data = {
static void ls_hgl_power_off(void)
{
- arm_machine_restart('h', NULL);
+ orion5x_restart('h', NULL);
}
@@ -272,4 +272,5 @@ MACHINE_START(LINKSTATION_LS_HGL, "Buffalo Linkstation LS-HGL")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c
index 743f7f1db18..09c73659f46 100644
--- a/arch/arm/mach-orion5x/lsmini-setup.c
+++ b/arch/arm/mach-orion5x/lsmini-setup.c
@@ -186,7 +186,7 @@ static struct mv_sata_platform_data lsmini_sata_data = {
static void lsmini_power_off(void)
{
- arm_machine_restart('h', NULL);
+ orion5x_restart('h', NULL);
}
@@ -274,5 +274,6 @@ MACHINE_START(LINKSTATION_MINI, "Buffalo Linkstation Mini")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index b6ddd7a5db6..5b70026f478 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/mbus.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <plat/mpp.h>
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 6020e26b1c7..65faaa34de6 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -267,5 +267,6 @@ MACHINE_START(MSS2, "Maxtor Shared Storage II")
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
- .fixup = tag_fixup_mem32
+ .fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 201ae367628..c87fde4deec 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -234,5 +234,6 @@ MACHINE_START(MV2120, "HP Media Vault mv2120")
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
- .fixup = tag_fixup_mem32
+ .fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c
index 6197c79a2ec..0180c393c71 100644
--- a/arch/arm/mach-orion5x/net2big-setup.c
+++ b/arch/arm/mach-orion5x/net2big-setup.c
@@ -426,5 +426,6 @@ MACHINE_START(NET2BIG, "LaCie 2Big Network")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index bc4a920e26e..a494c470e3e 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -18,6 +18,7 @@
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
+#include <plat/addr-map.h>
#include "common.h"
/*****************************************************************************
@@ -145,7 +146,7 @@ static int __init pcie_setup(struct pci_sys_data *sys)
/*
* Generic PCIe unit setup.
*/
- orion_pcie_setup(PCIE_BASE, &orion5x_mbus_dram_info);
+ orion_pcie_setup(PCIE_BASE);
/*
* Check whether to apply Orion-1/Orion-NAS PCIe config
@@ -477,7 +478,7 @@ static int __init pci_setup(struct pci_sys_data *sys)
/*
* Point PCI unit MBUS decode windows to DRAM space.
*/
- orion5x_setup_pci_wins(&orion5x_mbus_dram_info);
+ orion5x_setup_pci_wins(&orion_mbus_dram_info);
/*
* Master + Slave enable
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index ebd6767d8e8..292038fc59f 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -175,4 +175,5 @@ MACHINE_START(RD88F5181L_FXO, "Marvell Orion-VoIP FXO Reference Design")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index 05db2d336b0..c44eabaabc1 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -187,4 +187,5 @@ MACHINE_START(RD88F5181L_GE, "Marvell Orion-VoIP GE Reference Design")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index e47fa0578ae..96438b6b202 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -311,4 +311,5 @@ MACHINE_START(RD88F5182, "Marvell Orion-NAS Reference Design")
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 64317251ec0..2c5fab00d20 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -128,4 +128,5 @@ MACHINE_START(RD88F6183AP_GE, "Marvell Orion-1-90 AP GE Reference Design")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c
index 29f1526f7b7..632a861ef82 100644
--- a/arch/arm/mach-orion5x/terastation_pro2-setup.c
+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c
@@ -364,4 +364,5 @@ MACHINE_START(TERASTATION_PRO2, "Buffalo Terastation Pro II/Live")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 31e51f9b4b6..5d640874558 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -178,7 +178,7 @@ static struct hw_pci qnap_ts209_pci __initdata = {
static int __init qnap_ts209_pci_init(void)
{
- if (machine_is_ts_x09())
+ if (machine_is_ts209())
pci_common_init(&qnap_ts209_pci);
return 0;
@@ -329,4 +329,5 @@ MACHINE_START(TS209, "QNAP TS-109/TS-209")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 0fbcc14e09d..4e6ff759cd3 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -318,4 +318,5 @@ MACHINE_START(TS409, "QNAP TS-409")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index b35e2005a34..c96f37472ed 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -627,4 +627,5 @@ MACHINE_START(TS78XX, "Technologic Systems TS-78xx SBC")
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index b8be7d8d0cf..078c03f7cd5 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -179,4 +179,5 @@ MACHINE_START(WNR854T, "Netgear WNR854T")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index faf81a03936..46a9778171c 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -267,4 +267,5 @@ MACHINE_START(WRT350N_V2, "Linksys WRT350N v2")
.init_irq = orion5x_init_irq,
.timer = &orion5x_timer,
.fixup = tag_fixup_mem32,
+ .restart = orion5x_restart,
MACHINE_END
diff --git a/arch/arm/mach-picoxcell/Makefile b/arch/arm/mach-picoxcell/Makefile
index c550b636348..e5ec4a8d9bc 100644
--- a/arch/arm/mach-picoxcell/Makefile
+++ b/arch/arm/mach-picoxcell/Makefile
@@ -1,3 +1,2 @@
obj-y := common.o
obj-y += time.o
-obj-y += io.o
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
index 34d08347be5..a2e8ae8b582 100644
--- a/arch/arm/mach-picoxcell/common.c
+++ b/arch/arm/mach-picoxcell/common.c
@@ -7,23 +7,59 @@
*
* All enquiries to support@picochip.com
*/
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
#include <asm/hardware/vic.h>
+#include <asm/mach/map.h>
#include <mach/map.h>
#include <mach/picoxcell_soc.h>
#include "common.h"
+#define WDT_CTRL_REG_EN_MASK (1 << 0)
+#define WDT_CTRL_REG_OFFS (0x00)
+#define WDT_TIMEOUT_REG_OFFS (0x04)
+static void __iomem *wdt_regs;
+
+/*
+ * The machine restart method can be called from an atomic context so we won't
+ * be able to ioremap the regs then.
+ */
+static void picoxcell_setup_restart(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, NULL,
+ "snps,dw-apb-wdg");
+ if (WARN(!np, "unable to setup watchdog restart"))
+ return;
+
+ wdt_regs = of_iomap(np, 0);
+ WARN(!wdt_regs, "failed to remap watchdog regs");
+}
+
+static struct map_desc io_map __initdata = {
+ .virtual = PHYS_TO_IO(PICOXCELL_PERIPH_BASE),
+ .pfn = __phys_to_pfn(PICOXCELL_PERIPH_BASE),
+ .length = PICOXCELL_PERIPH_LENGTH,
+ .type = MT_DEVICE,
+};
+
+static void __init picoxcell_map_io(void)
+{
+ iotable_init(&io_map, 1);
+}
+
static void __init picoxcell_init_machine(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ picoxcell_setup_restart();
}
static const char *picoxcell_dt_match[] = {
@@ -33,23 +69,36 @@ static const char *picoxcell_dt_match[] = {
};
static const struct of_device_id vic_of_match[] __initconst = {
- { .compatible = "arm,pl192-vic" },
+ { .compatible = "arm,pl192-vic", .data = vic_of_init, },
{ /* Sentinel */ }
};
static void __init picoxcell_init_irq(void)
{
- vic_init(IO_ADDRESS(PICOXCELL_VIC0_BASE), 0, ~0, 0);
- vic_init(IO_ADDRESS(PICOXCELL_VIC1_BASE), 32, ~0, 0);
- irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC0_BASE, 0);
- irq_domain_generate_simple(vic_of_match, PICOXCELL_VIC1_BASE, 32);
+ of_irq_init(vic_of_match);
+}
+
+static void picoxcell_wdt_restart(char mode, const char *cmd)
+{
+ /*
+ * Configure the watchdog to reset with the shortest possible timeout
+ * and give it chance to do the reset.
+ */
+ if (wdt_regs) {
+ writel_relaxed(WDT_CTRL_REG_EN_MASK, wdt_regs + WDT_CTRL_REG_OFFS);
+ writel_relaxed(0, wdt_regs + WDT_TIMEOUT_REG_OFFS);
+ /* No sleeping, possibly atomic. */
+ mdelay(500);
+ }
}
DT_MACHINE_START(PICOXCELL, "Picochip picoXcell")
.map_io = picoxcell_map_io,
- .nr_irqs = ARCH_NR_IRQS,
+ .nr_irqs = NR_IRQS_LEGACY,
.init_irq = picoxcell_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &picoxcell_timer,
.init_machine = picoxcell_init_machine,
.dt_compat = picoxcell_dt_match,
+ .restart = picoxcell_wdt_restart,
MACHINE_END
diff --git a/arch/arm/mach-picoxcell/common.h b/arch/arm/mach-picoxcell/common.h
index 5263f0fa095..83d55ab956a 100644
--- a/arch/arm/mach-picoxcell/common.h
+++ b/arch/arm/mach-picoxcell/common.h
@@ -13,6 +13,5 @@
#include <asm/mach/time.h>
extern struct sys_timer picoxcell_timer;
-extern void picoxcell_map_io(void);
#endif /* __PICOXCELL_COMMON_H__ */
diff --git a/arch/arm/mach-picoxcell/include/mach/entry-macro.S b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
index a6b09f75d9d..9b505ac00be 100644
--- a/arch/arm/mach-picoxcell/include/mach/entry-macro.S
+++ b/arch/arm/mach-picoxcell/include/mach/entry-macro.S
@@ -9,11 +9,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/map.h>
+ .macro disable_fiq
+ .endm
-#define VA_VIC0 IO_ADDRESS(PICOXCELL_VIC0_BASE)
-#define VA_VIC1 IO_ADDRESS(PICOXCELL_VIC1_BASE)
-
-#include <asm/entry-macro-vic2.S>
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
diff --git a/arch/arm/mach-picoxcell/include/mach/irqs.h b/arch/arm/mach-picoxcell/include/mach/irqs.h
index 4d13ed97091..59eac1ee282 100644
--- a/arch/arm/mach-picoxcell/include/mach/irqs.h
+++ b/arch/arm/mach-picoxcell/include/mach/irqs.h
@@ -1,8 +1,6 @@
/*
* Copyright (c) 2011 Picochip Ltd., Jamie Iles
*
- * This file contains the hardware definitions of the picoXcell SoC devices.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -16,10 +14,7 @@
#ifndef __MACH_IRQS_H
#define __MACH_IRQS_H
-#define ARCH_NR_IRQS 64
-#define NR_IRQS (128 + ARCH_NR_IRQS)
-
-#define IRQ_VIC0_BASE 0
-#define IRQ_VIC1_BASE 32
+/* We dynamically allocate our irq_desc's. */
+#define NR_IRQS 0
#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-picoxcell/include/mach/memory.h b/arch/arm/mach-picoxcell/include/mach/memory.h
deleted file mode 100644
index 40a8c178f10..00000000000
--- a/arch/arm/mach-picoxcell/include/mach/memory.h
+++ /dev/null
@@ -1 +0,0 @@
-/* empty */
diff --git a/arch/arm/mach-picoxcell/include/mach/system.h b/arch/arm/mach-picoxcell/include/mach/system.h
index 67c589b0c1b..1a5d8cb57df 100644
--- a/arch/arm/mach-picoxcell/include/mach/system.h
+++ b/arch/arm/mach-picoxcell/include/mach/system.h
@@ -23,9 +23,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(int mode, const char *cmd)
-{
- /* Watchdog reset to go here. */
-}
-
#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-picoxcell/include/mach/vmalloc.h b/arch/arm/mach-picoxcell/include/mach/vmalloc.h
deleted file mode 100644
index 0216cc4b1f0..00000000000
--- a/arch/arm/mach-picoxcell/include/mach/vmalloc.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#define VMALLOC_END 0xfe000000UL
diff --git a/arch/arm/mach-picoxcell/io.c b/arch/arm/mach-picoxcell/io.c
deleted file mode 100644
index 39e9b9e8cc3..00000000000
--- a/arch/arm/mach-picoxcell/io.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * All enquiries to support@picochip.com
- */
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/map.h>
-#include <mach/picoxcell_soc.h>
-
-#include "common.h"
-
-void __init picoxcell_map_io(void)
-{
- struct map_desc io_map = {
- .virtual = PHYS_TO_IO(PICOXCELL_PERIPH_BASE),
- .pfn = __phys_to_pfn(PICOXCELL_PERIPH_BASE),
- .length = PICOXCELL_PERIPH_LENGTH,
- .type = MT_DEVICE,
- };
-
- iotable_init(&io_map, 1);
-}
diff --git a/arch/arm/mach-picoxcell/time.c b/arch/arm/mach-picoxcell/time.c
index 90a554ff449..6c89cf8ab22 100644
--- a/arch/arm/mach-picoxcell/time.c
+++ b/arch/arm/mach-picoxcell/time.c
@@ -11,7 +11,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/sched.h>
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
@@ -66,21 +65,11 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
dw_apb_clocksource_register(cs);
}
-static DEFINE_CLOCK_DATA(cd);
static void __iomem *sched_io_base;
-unsigned long long notrace sched_clock(void)
+unsigned u32 notrace picoxcell_read_sched_clock(void)
{
- cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace picoxcell_update_sched_clock(void)
-{
- cycle_t cyc = sched_io_base ? __raw_readl(sched_io_base) : 0;
-
- update_sched_clock(&cd, cyc, (u32)~0);
+ return __raw_readl(sched_io_base);
}
static const struct of_device_id picoxcell_rtc_ids[] __initconst = {
@@ -100,7 +89,7 @@ static void picoxcell_init_sched_clock(void)
timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
of_node_put(sched_timer);
- init_sched_clock(&cd, picoxcell_update_sched_clock, 32, rate);
+ setup_sched_clock(picoxcell_read_sched_clock, 32, rate);
}
static const struct of_device_id picoxcell_timer_ids[] __initconst = {
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index cdb95e726f5..4cfb40b2ec1 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -260,6 +260,11 @@ void __init pnx4008_map_io(void)
iotable_init(pnx4008_io_desc, ARRAY_SIZE(pnx4008_io_desc));
}
+static void pnx4008_restart(char mode, const char *cmd)
+{
+ soft_restart(0);
+}
+
extern struct sys_timer pnx4008_timer;
MACHINE_START(PNX4008, "Philips PNX4008")
@@ -269,4 +274,5 @@ MACHINE_START(PNX4008, "Philips PNX4008")
.init_irq = pnx4008_init_irq,
.init_machine = pnx4008_init,
.timer = &pnx4008_timer,
+ .restart = pnx4008_restart,
MACHINE_END
diff --git a/arch/arm/mach-pnx4008/include/mach/system.h b/arch/arm/mach-pnx4008/include/mach/system.h
index 5dda2bb55f8..60cfe718809 100644
--- a/arch/arm/mach-pnx4008/include/mach/system.h
+++ b/arch/arm/mach-pnx4008/include/mach/system.h
@@ -21,18 +21,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
static void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- cpu_reset(0);
-}
-
#endif
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
deleted file mode 100644
index 184913c7114..00000000000
--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-pnx4008/include/mach/vmalloc.h
- *
- * Author: Vitaly Wool <source@mvista.com>
- *
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index 83e5d212811..b28a930d4f8 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -16,6 +16,7 @@ extern struct sys_timer sirfsoc_timer;
extern void __init sirfsoc_of_irq_init(void);
extern void __init sirfsoc_of_clk_init(void);
+extern void sirfsoc_restart(char, const char *);
#ifndef CONFIG_DEBUG_LL
static inline void sirfsoc_map_lluart(void) {}
diff --git a/arch/arm/mach-prima2/include/mach/map.h b/arch/arm/mach-prima2/include/mach/map.h
index 66b1ae2e553..6f243532570 100644
--- a/arch/arm/mach-prima2/include/mach/map.h
+++ b/arch/arm/mach-prima2/include/mach/map.h
@@ -9,8 +9,10 @@
#ifndef __MACH_PRIMA2_MAP_H__
#define __MACH_PRIMA2_MAP_H__
-#include <mach/vmalloc.h>
+#include <linux/const.h>
-#define SIRFSOC_VA(x) (VMALLOC_END + ((x) & 0x00FFF000))
+#define SIRFSOC_VA_BASE _AC(0xFEC00000, UL)
+
+#define SIRFSOC_VA(x) (SIRFSOC_VA_BASE + ((x) & 0x00FFF000))
#endif
diff --git a/arch/arm/mach-prima2/include/mach/system.h b/arch/arm/mach-prima2/include/mach/system.h
index 0dbd257ad16..2c7d2a9d0c9 100644
--- a/arch/arm/mach-prima2/include/mach/system.h
+++ b/arch/arm/mach-prima2/include/mach/system.h
@@ -9,21 +9,9 @@
#ifndef __MACH_SYSTEM_H__
#define __MACH_SYSTEM_H__
-#include <linux/bitops.h>
-#include <mach/hardware.h>
-
-#define SIRFSOC_SYS_RST_BIT BIT(31)
-
-extern void __iomem *sirfsoc_rstc_base;
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
-}
-
#endif
diff --git a/arch/arm/mach-prima2/include/mach/vmalloc.h b/arch/arm/mach-prima2/include/mach/vmalloc.h
deleted file mode 100644
index c9f90fec78e..00000000000
--- a/arch/arm/mach-prima2/include/mach/vmalloc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * arch/arm/ach-prima2/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 – 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <linux/const.h>
-
-#define VMALLOC_END _AC(0xFEC00000, UL)
-
-#endif
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index cb53160f6c5..26ebb57719d 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index ef555c04196..02b9c05ff99 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
@@ -39,4 +40,5 @@ MACHINE_START(PRIMA2_EVB, "prima2cb")
.dma_zone_size = SZ_256M,
.init_machine = sirfsoc_mach_init,
.dt_compat = prima2cb_dt_match,
+ .restart = sirfsoc_restart,
MACHINE_END
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 492cfa8d261..762adb73ab7 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -68,3 +68,10 @@ int sirfsoc_reset_device(struct device *dev)
return 0;
}
+
+#define SIRFSOC_SYS_RST_BIT BIT(31)
+
+void sirfsoc_restart(char mode, const char *cmd)
+{
+ writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
+}
diff --git a/arch/arm/mach-pxa/am200epd.c b/arch/arm/mach-pxa/am200epd.c
index 4cb069fd9af..ccdac4b6a46 100644
--- a/arch/arm/mach-pxa/am200epd.c
+++ b/arch/arm/mach-pxa/am200epd.c
@@ -138,7 +138,7 @@ static void am200_cleanup(struct metronomefb_par *par)
{
int i;
- free_irq(IRQ_GPIO(RDY_GPIO_PIN), par);
+ free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par);
for (i = 0; i < ARRAY_SIZE(gpios); i++)
gpio_free(gpios[i]);
@@ -292,7 +292,7 @@ static int am200_setup_irq(struct fb_info *info)
{
int ret;
- ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am200_handle_irq,
+ ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am200_handle_irq,
IRQF_DISABLED|IRQF_TRIGGER_FALLING,
"AM200", info->par);
if (ret)
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index fa8bad235d9..76c4b949403 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -176,7 +176,7 @@ static void am300_cleanup(struct broadsheetfb_par *par)
{
int i;
- free_irq(IRQ_GPIO(RDY_GPIO_PIN), par);
+ free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par);
for (i = 0; i < ARRAY_SIZE(gpios); i++)
gpio_free(gpios[i]);
@@ -240,7 +240,7 @@ static int am300_setup_irq(struct fb_info *info)
int ret;
struct broadsheetfb_par *par = info->par;
- ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am300_handle_irq,
+ ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am300_handle_irq,
IRQF_DISABLED|IRQF_TRIGGER_RISING,
"AM300", par);
if (ret)
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index fc0b8544e17..c35456f02ac 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -179,7 +180,7 @@ static unsigned long balloon3_ac97_pin_config[] __initdata = {
};
static struct ucb1400_pdata vpac270_ucb1400_pdata = {
- .irq = IRQ_GPIO(BALLOON3_GPIO_CODEC_IRQ),
+ .irq = PXA_GPIO_TO_IRQ(BALLOON3_GPIO_CODEC_IRQ),
};
@@ -307,7 +308,7 @@ static inline void balloon3_mmc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static void balloon3_udc_command(int cmd)
{
if (cmd == PXA2XX_UDC_CMD_CONNECT)
@@ -829,4 +830,5 @@ MACHINE_START(BALLOON3, "Balloon3")
.timer = &pxa_timer,
.init_machine = balloon3_init,
.atag_offset = 0x100,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index 4efc16d39c7..c91727d1fe0 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -50,8 +50,8 @@ static struct resource capc7117_ide_resources[] = {
.flags = IORESOURCE_MEM
},
[2] = {
- .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO76)),
- .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO76)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO76)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO76)),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING
}
};
@@ -80,7 +80,7 @@ static void __init capc7117_ide_init(void)
static struct plat_serial8250_port ti16c752_platform_data[] = {
[0] = {
.mapbase = 0x14000000,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO78)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO78)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
@@ -89,7 +89,7 @@ static struct plat_serial8250_port ti16c752_platform_data[] = {
},
[1] = {
.mapbase = 0x14000040,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO79)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO79)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
@@ -98,7 +98,7 @@ static struct plat_serial8250_port ti16c752_platform_data[] = {
},
[2] = {
.mapbase = 0x14000080,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO80)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO80)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
@@ -107,7 +107,7 @@ static struct plat_serial8250_port ti16c752_platform_data[] = {
},
[3] = {
.mapbase = 0x140000c0,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO81)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO81)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
@@ -153,5 +153,6 @@ MACHINE_START(CAPC7117,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
- .init_machine = capc7117_init
+ .init_machine = capc7117_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index 13518a70539..431ef56700c 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -33,7 +33,7 @@
/* GPIO IRQ usage */
#define GPIO83_MMC_IRQ (83)
-#define CMX270_MMC_IRQ IRQ_GPIO(GPIO83_MMC_IRQ)
+#define CMX270_MMC_IRQ PXA_GPIO_TO_IRQ(GPIO83_MMC_IRQ)
/* MMC power enable */
#define GPIO105_MMC_POWER (105)
@@ -380,7 +380,7 @@ static struct spi_board_info cm_x270_spi_devices[] __initdata = {
.modalias = "libertas_spi",
.max_speed_hz = 13000000,
.bus_num = 2,
- .irq = gpio_to_irq(95),
+ .irq = PXA_GPIO_TO_IRQ(95),
.chip_select = 0,
.controller_data = &cm_x270_libertas_chip,
.platform_data = &cm_x270_libertas_pdata,
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index f2e4190080c..8fa4ad27edf 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -58,8 +58,8 @@ extern void cmx270_init(void);
#define CMX255_GPIO_IT8152_IRQ (0)
#define CMX270_GPIO_IT8152_IRQ (22)
-#define CMX255_ETHIRQ IRQ_GPIO(GPIO22_ETHIRQ)
-#define CMX270_ETHIRQ IRQ_GPIO(GPIO10_ETHIRQ)
+#define CMX255_ETHIRQ PXA_GPIO_TO_IRQ(GPIO22_ETHIRQ)
+#define CMX270_ETHIRQ PXA_GPIO_TO_IRQ(GPIO10_ETHIRQ)
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource cmx255_dm9000_resource[] = {
@@ -524,4 +524,5 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX")
#ifdef CONFIG_PCI
.dma_zone_size = SZ_64M,
#endif
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index e096bba8fd5..4b981b82d2a 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -64,7 +64,7 @@
#define GPIO82_MMC_IRQ (82)
#define GPIO85_MMC_WP (85)
-#define CM_X300_MMC_IRQ IRQ_GPIO(GPIO82_MMC_IRQ)
+#define CM_X300_MMC_IRQ PXA_GPIO_TO_IRQ(GPIO82_MMC_IRQ)
#define GPIO95_RTC_CS (95)
#define GPIO96_RTC_WR (96)
@@ -229,8 +229,8 @@ static struct resource dm9000_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO99)),
- .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO99)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO99)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO99)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -858,4 +858,5 @@ MACHINE_START(CM_X300, "CM-X300 module")
.timer = &pxa_timer,
.init_machine = cm_x300_init,
.fixup = cm_x300_fixup,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 80538b8806e..248804bb2c9 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -183,7 +183,7 @@ static inline void income_lcd_init(void) {}
/******************************************************************************
* Backlight
******************************************************************************/
-#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM__MODULE)
+#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
static struct platform_pwm_backlight_data income_backlight_data = {
.pwm_id = 0,
.max_brightness = 0x3ff,
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 05bfa1b1c00..29d5d541f60 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -218,8 +218,8 @@ static struct resource colibri_pxa270_dm9000_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = gpio_to_irq(GPIO114_COLIBRI_PXA270_ETH_IRQ),
- .end = gpio_to_irq(GPIO114_COLIBRI_PXA270_ETH_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO114_COLIBRI_PXA270_ETH_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO114_COLIBRI_PXA270_ETH_IRQ),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING,
},
};
@@ -249,7 +249,7 @@ static pxa2xx_audio_ops_t colibri_pxa270_ac97_pdata = {
};
static struct ucb1400_pdata colibri_pxa270_ucb1400_pdata = {
- .irq = gpio_to_irq(GPIO113_COLIBRI_PXA270_TS_IRQ),
+ .irq = PXA_GPIO_TO_IRQ(GPIO113_COLIBRI_PXA270_TS_IRQ),
};
static struct platform_device colibri_pxa270_ucb1400_device = {
@@ -313,6 +313,7 @@ MACHINE_START(COLIBRI, "Toradex Colibri PXA270")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
@@ -322,5 +323,6 @@ MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index c825e8bf2db..0846d210cb0 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -78,8 +78,8 @@ static struct resource colibri_asix_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
- .end = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
+ .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO),
+ .end = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
@@ -189,5 +189,6 @@ MACHINE_START(COLIBRI300, "Toradex Colibri PXA300")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 692e1ffc558..6ad3359063a 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -115,8 +115,8 @@ static struct resource colibri_asix_resource[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
- .end = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
+ .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO),
+ .end = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
@@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
static inline void __init colibri_pxa320_init_eth(void) {}
#endif /* CONFIG_AX88796 */
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
.gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
.gpio_pullup = -1,
@@ -259,5 +259,6 @@ MACHINE_START(COLIBRI320, "Toradex Colibri PXA320")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 549468d088b..66600f05e43 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -531,7 +531,7 @@ static struct spi_board_info corgi_spi_devices[] = {
.chip_select = 0,
.platform_data = &corgi_ads7846_info,
.controller_data= &corgi_ads7846_chip,
- .irq = gpio_to_irq(CORGI_GPIO_TP_INT),
+ .irq = PXA_GPIO_TO_IRQ(CORGI_GPIO_TP_INT),
}, {
.modalias = "corgi-lcd",
.max_speed_hz = 50000,
@@ -655,7 +655,7 @@ static void corgi_poweroff(void)
/* Green LED off tells the bootloader to halt */
gpio_set_value(CORGI_GPIO_LED_GREEN, 0);
- arm_machine_restart('h', NULL);
+ pxa_restart('h', NULL);
}
static void corgi_restart(char mode, const char *cmd)
@@ -664,13 +664,12 @@ static void corgi_restart(char mode, const char *cmd)
/* Green LED on tells the bootloader to reboot */
gpio_set_value(CORGI_GPIO_LED_GREEN, 1);
- arm_machine_restart('h', cmd);
+ pxa_restart('h', cmd);
}
static void __init corgi_init(void)
{
pm_power_off = corgi_poweroff;
- arm_pm_restart = corgi_restart;
/* Stop 3.6MHz and drive HIGH to PCMCIA and CS */
PCFR |= PCFR_OPDE;
@@ -726,6 +725,7 @@ MACHINE_START(CORGI, "SHARP Corgi")
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
.timer = &pxa_timer,
+ .restart = corgi_restart,
MACHINE_END
#endif
@@ -737,6 +737,7 @@ MACHINE_START(SHEPHERD, "SHARP Shepherd")
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
.timer = &pxa_timer,
+ .restart = corgi_restart,
MACHINE_END
#endif
@@ -748,6 +749,7 @@ MACHINE_START(HUSKY, "SHARP Husky")
.handle_irq = pxa25x_handle_irq,
.init_machine = corgi_init,
.timer = &pxa_timer,
+ .restart = corgi_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index 29034778bfd..39e265cfc86 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
@@ -40,7 +41,9 @@ static struct gpio charger_gpios[] = {
{ CORGI_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" },
{ CORGI_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" },
{ CORGI_GPIO_CHRG_UKN, GPIOF_OUT_INIT_LOW, "Charger Unknown" },
+ { CORGI_GPIO_AC_IN, GPIOF_IN, "Charger Detection" },
{ CORGI_GPIO_KEY_INT, GPIOF_IN, "Key Interrupt" },
+ { CORGI_GPIO_WAKEUP, GPIOF_IN, "System wakeup notification" },
};
static void corgi_charger_init(void)
@@ -90,7 +93,12 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
{
int is_resume = 0;
- dev_dbg(sharpsl_pm.dev, "GPLR0 = %x,%x\n", GPLR0, PEDR);
+ dev_dbg(sharpsl_pm.dev, "PEDR = %x, GPIO_AC_IN = %d, "
+ "GPIO_CHRG_FULL = %d, GPIO_KEY_INT = %d, GPIO_WAKEUP = %d\n",
+ PEDR, gpio_get_value(CORGI_GPIO_AC_IN),
+ gpio_get_value(CORGI_GPIO_CHRG_FULL),
+ gpio_get_value(CORGI_GPIO_KEY_INT),
+ gpio_get_value(CORGI_GPIO_WAKEUP));
if ((PEDR & GPIO_bit(CORGI_GPIO_AC_IN))) {
if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
@@ -124,14 +132,21 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
static unsigned long corgi_charger_wakeup(void)
{
- return ~GPLR0 & ( GPIO_bit(CORGI_GPIO_AC_IN) | GPIO_bit(CORGI_GPIO_KEY_INT) | GPIO_bit(CORGI_GPIO_WAKEUP) );
+ unsigned long ret;
+
+ ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN))
+ | (!gpio_get_value(CORGI_GPIO_KEY_INT)
+ << GPIO_bit(CORGI_GPIO_KEY_INT))
+ | (!gpio_get_value(CORGI_GPIO_WAKEUP)
+ << GPIO_bit(CORGI_GPIO_WAKEUP));
+ return ret;
}
unsigned long corgipm_read_devdata(int type)
{
switch(type) {
case SHARPSL_STATUS_ACIN:
- return ((GPLR(CORGI_GPIO_AC_IN) & GPIO_bit(CORGI_GPIO_AC_IN)) != 0);
+ return !gpio_get_value(CORGI_GPIO_AC_IN);
case SHARPSL_STATUS_LOCK:
return gpio_get_value(sharpsl_pm.machinfo->gpio_batlock);
case SHARPSL_STATUS_CHRGFULL:
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index 5e2cf39e9e4..fb5a51d834e 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -278,4 +278,5 @@ MACHINE_START(CSB726, "Cogent CSB726")
.handle_irq = pxa27x_handle_irq,
.init_machine = csb726_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 2e0425404de..18fd177073f 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -415,9 +415,29 @@ static struct resource pxa_rtc_resources[] = {
},
};
+static struct resource sa1100_rtc_resources[] = {
+ [0] = {
+ .start = 0x40900000,
+ .end = 0x409000ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RTC1Hz,
+ .end = IRQ_RTC1Hz,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_RTCAlrm,
+ .end = IRQ_RTCAlrm,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
struct platform_device sa1100_device_rtc = {
.name = "sa1100-rtc",
.id = -1,
+ .num_resources = ARRAY_SIZE(sa1100_rtc_resources),
+ .resource = sa1100_rtc_resources,
};
struct platform_device pxa_device_rtc = {
@@ -1051,6 +1071,36 @@ struct platform_device pxa3xx_device_ssp4 = {
};
#endif /* CONFIG_PXA3xx || CONFIG_PXA95x */
+struct resource pxa_resource_gpio[] = {
+ {
+ .start = 0x40e00000,
+ .end = 0x40e0ffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_GPIO0,
+ .end = IRQ_GPIO0,
+ .name = "gpio0",
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = IRQ_GPIO1,
+ .end = IRQ_GPIO1,
+ .name = "gpio1",
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = IRQ_GPIO_2_x,
+ .end = IRQ_GPIO_2_x,
+ .name = "gpio_mux",
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device pxa_device_gpio = {
+ .name = "pxa-gpio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pxa_resource_gpio),
+ .resource = pxa_resource_gpio,
+};
+
/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
* See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */
void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h
index 2fd5a8b3575..1475db10725 100644
--- a/arch/arm/mach-pxa/devices.h
+++ b/arch/arm/mach-pxa/devices.h
@@ -16,6 +16,7 @@ extern struct platform_device pxa_device_ficp;
extern struct platform_device sa1100_device_rtc;
extern struct platform_device pxa_device_rtc;
extern struct platform_device pxa_device_ac97;
+extern struct platform_device pxa_device_gpio;
extern struct platform_device pxa27x_device_i2c_power;
extern struct platform_device pxa27x_device_ohci;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 94acc0b01dd..d80c0ba9a09 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -70,7 +70,7 @@
/* common GPIOs */
#define GPIO11_NAND_CS (11)
#define GPIO41_ETHIRQ (41)
-#define EM_X270_ETHIRQ IRQ_GPIO(GPIO41_ETHIRQ)
+#define EM_X270_ETHIRQ PXA_GPIO_TO_IRQ(GPIO41_ETHIRQ)
#define GPIO115_WLAN_PWEN (115)
#define GPIO19_WLAN_STRAP (19)
#define GPIO9_USB_VBUS_EN (9)
@@ -805,7 +805,7 @@ static struct spi_board_info em_x270_spi_devices[] __initdata = {
.modalias = "libertas_spi",
.max_speed_hz = 13000000,
.bus_num = 2,
- .irq = IRQ_GPIO(116),
+ .irq = PXA_GPIO_TO_IRQ(116),
.chip_select = 0,
.controller_data = &em_x270_libertas_chip,
.platform_data = &em_x270_libertas_pdata,
@@ -1203,7 +1203,7 @@ static struct da903x_platform_data em_x270_da9030_info = {
static struct i2c_board_info em_x270_i2c_pmic_info = {
I2C_BOARD_INFO("da9030", 0x49),
- .irq = IRQ_GPIO(0),
+ .irq = PXA_GPIO_TO_IRQ(0),
.platform_data = &em_x270_da9030_info,
};
@@ -1305,6 +1305,7 @@ MACHINE_START(EM_X270, "Compulab EM-X270")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = em_x270_init,
+ .restart = pxa_restart,
MACHINE_END
MACHINE_START(EXEDA, "Compulab eXeda")
@@ -1314,4 +1315,5 @@ MACHINE_START(EXEDA, "Compulab eXeda")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = em_x270_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index d82b7aa3c09..f79a610c62f 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -119,8 +119,8 @@ struct resource eseries_tmio_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_GPIO(GPIO_ESERIES_TMIO_IRQ),
- .end = IRQ_GPIO(GPIO_ESERIES_TMIO_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO_ESERIES_TMIO_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO_ESERIES_TMIO_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -196,6 +196,7 @@ MACHINE_START(E330, "Toshiba e330")
.fixup = eseries_fixup,
.init_machine = e330_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -246,6 +247,7 @@ MACHINE_START(E350, "Toshiba e350")
.fixup = eseries_fixup,
.init_machine = e350_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -369,6 +371,7 @@ MACHINE_START(E400, "Toshiba e400")
.fixup = eseries_fixup,
.init_machine = e400_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -558,6 +561,7 @@ MACHINE_START(E740, "Toshiba e740")
.fixup = eseries_fixup,
.init_machine = e740_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -750,6 +754,7 @@ MACHINE_START(E750, "Toshiba e750")
.fixup = eseries_fixup,
.init_machine = e750_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -955,5 +960,6 @@ MACHINE_START(E800, "Toshiba e800")
.fixup = eseries_fixup,
.init_machine = e800_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 8308eee5a92..15ab2533667 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -804,6 +804,7 @@ MACHINE_START(EZX_A780, "Motorola EZX A780")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = a780_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -870,6 +871,7 @@ MACHINE_START(EZX_E680, "Motorola EZX E680")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = e680_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -936,6 +938,7 @@ MACHINE_START(EZX_A1200, "Motorola EZX A1200")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = a1200_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1127,6 +1130,7 @@ MACHINE_START(EZX_A910, "Motorola EZX A910")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = a910_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1193,6 +1197,7 @@ MACHINE_START(EZX_E6, "Motorola EZX E6")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = e6_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1233,5 +1238,6 @@ MACHINE_START(EZX_E2, "Motorola EZX E2")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = e2_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 92a2e85ab02..0d729e6619d 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -57,3 +57,5 @@ void __init pxa_set_ffuart_info(void *info);
void __init pxa_set_btuart_info(void *info);
void __init pxa_set_stuart_info(void *info);
void __init pxa_set_hwuart_info(void *info);
+
+void pxa_restart(char, const char *);
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 9c8208ca041..ac3b1cef475 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
}
#endif
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
static struct gpio_vbus_mach_info gumstix_udc_info = {
.gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
.gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
@@ -239,4 +239,5 @@ MACHINE_START(GUMSTIX, "Gumstix")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = gumstix_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c
index 4b5e110640b..fde6b4c873c 100644
--- a/arch/arm/mach-pxa/h5000.c
+++ b/arch/arm/mach-pxa/h5000.c
@@ -209,4 +209,5 @@ MACHINE_START(H5400, "HP iPAQ H5000")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = h5000_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/himalaya.c b/arch/arm/mach-pxa/himalaya.c
index f2c32457084..26d069a9f90 100644
--- a/arch/arm/mach-pxa/himalaya.c
+++ b/arch/arm/mach-pxa/himalaya.c
@@ -164,4 +164,5 @@ MACHINE_START(HIMALAYA, "HTC Himalaya")
.handle_irq = pxa25x_handle_irq,
.init_machine = himalaya_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 6f6368ece9b..fb9b62dcf4c 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -252,8 +252,8 @@ static struct resource asic3_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(GPIO12_HX4700_ASIC3_IRQ),
- .end = gpio_to_irq(GPIO12_HX4700_ASIC3_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ),
.flags = IORESOURCE_IRQ,
},
/* SD part */
@@ -263,8 +263,8 @@ static struct resource asic3_resources[] = {
.flags = IORESOURCE_MEM,
},
[3] = {
- .start = gpio_to_irq(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
- .end = gpio_to_irq(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -587,7 +587,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = {
.modalias = "ads7846",
.bus_num = 2,
.max_speed_hz = 2600000, /* 100 kHz sample rate */
- .irq = gpio_to_irq(GPIO58_HX4700_TSC2046_nPENIRQ),
+ .irq = PXA_GPIO_TO_IRQ(GPIO58_HX4700_TSC2046_nPENIRQ),
.platform_data = &tsc2046_info,
.controller_data = &tsc2046_chip,
},
@@ -635,15 +635,15 @@ static struct resource power_supply_resources[] = {
.name = "ac",
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
- .start = gpio_to_irq(GPIOD9_nAC_IN),
- .end = gpio_to_irq(GPIOD9_nAC_IN),
+ .start = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN),
+ .end = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN),
},
[1] = {
.name = "usb",
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
- .start = gpio_to_irq(GPIOD14_nUSBC_DETECT),
- .end = gpio_to_irq(GPIOD14_nUSBC_DETECT),
+ .start = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT),
+ .end = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT),
},
};
@@ -845,4 +845,5 @@ MACHINE_START(H4700, "HP iPAQ HX4700")
.handle_irq = pxa27x_handle_irq,
.init_machine = hx4700_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index f78d5db758d..67400192ed3 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -86,7 +86,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.chip_select = 0,
.platform_data = &mcp251x_info,
.controller_data = &mcp251x_chip_info1,
- .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1)
+ .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1)
},
{
.modalias = "mcp2515",
@@ -95,7 +95,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.chip_select = 1,
.platform_data = &mcp251x_info,
.controller_data = &mcp251x_chip_info2,
- .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2)
+ .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ2)
},
{
.modalias = "mcp2515",
@@ -104,7 +104,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.chip_select = 0,
.platform_data = &mcp251x_info,
.controller_data = &mcp251x_chip_info3,
- .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3)
+ .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ3)
},
{
.modalias = "mcp2515",
@@ -113,7 +113,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.chip_select = 1,
.platform_data = &mcp251x_info,
.controller_data = &mcp251x_chip_info4,
- .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ4)
+ .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ4)
}
};
@@ -196,5 +196,6 @@ MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
- .init_machine = icontrol_init
+ .init_machine = icontrol_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index ddf20e5c376..8af1840e12c 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -75,8 +75,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_GPIO(4),
- .end = IRQ_GPIO(4),
+ .start = PXA_GPIO_TO_IRQ(4),
+ .end = PXA_GPIO_TO_IRQ(4),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -199,4 +199,5 @@ MACHINE_START(PXA_IDP, "Vibren PXA255 IDP")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = idp_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
index 6d7eab3d086..f02fa1e6ba8 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -172,9 +172,9 @@ enum balloon3_features {
/* Balloon3 Interrupts */
#define BALLOON3_IRQ(x) (IRQ_BOARD_START + (x))
-#define BALLOON3_AUX_NIRQ IRQ_GPIO(BALLOON3_GPIO_AUX_NIRQ)
-#define BALLOON3_CODEC_IRQ IRQ_GPIO(BALLOON3_GPIO_CODEC_IRQ)
-#define BALLOON3_S0_CD_IRQ IRQ_GPIO(BALLOON3_GPIO_S0_CD)
+#define BALLOON3_AUX_NIRQ PXA_GPIO_TO_IRQ(BALLOON3_GPIO_AUX_NIRQ)
+#define BALLOON3_CODEC_IRQ PXA_GPIO_TO_IRQ(BALLOON3_GPIO_CODEC_IRQ)
+#define BALLOON3_S0_CD_IRQ PXA_GPIO_TO_IRQ(BALLOON3_GPIO_S0_CD)
#define BALLOON3_NR_IRQS (IRQ_BOARD_START + 16)
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index 5dfd1195a5a..f3c3493b468 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -66,18 +66,18 @@
/*
* Corgi Interrupts
*/
-#define CORGI_IRQ_GPIO_KEY_INT IRQ_GPIO(0)
-#define CORGI_IRQ_GPIO_AC_IN IRQ_GPIO(1)
-#define CORGI_IRQ_GPIO_WAKEUP IRQ_GPIO(3)
-#define CORGI_IRQ_GPIO_AK_INT IRQ_GPIO(4)
-#define CORGI_IRQ_GPIO_TP_INT IRQ_GPIO(5)
-#define CORGI_IRQ_GPIO_nSD_DETECT IRQ_GPIO(9)
-#define CORGI_IRQ_GPIO_nSD_INT IRQ_GPIO(10)
-#define CORGI_IRQ_GPIO_MAIN_BAT_LOW IRQ_GPIO(11)
-#define CORGI_IRQ_GPIO_CF_CD IRQ_GPIO(14)
-#define CORGI_IRQ_GPIO_CHRG_FULL IRQ_GPIO(16) /* Battery fully charged */
-#define CORGI_IRQ_GPIO_CF_IRQ IRQ_GPIO(17)
-#define CORGI_IRQ_GPIO_KEY_SENSE(a) IRQ_GPIO(58+(a)) /* Keyboard Sense lines */
+#define CORGI_IRQ_GPIO_KEY_INT PXA_GPIO_TO_IRQ(0)
+#define CORGI_IRQ_GPIO_AC_IN PXA_GPIO_TO_IRQ(1)
+#define CORGI_IRQ_GPIO_WAKEUP PXA_GPIO_TO_IRQ(3)
+#define CORGI_IRQ_GPIO_AK_INT PXA_GPIO_TO_IRQ(4)
+#define CORGI_IRQ_GPIO_TP_INT PXA_GPIO_TO_IRQ(5)
+#define CORGI_IRQ_GPIO_nSD_DETECT PXA_GPIO_TO_IRQ(9)
+#define CORGI_IRQ_GPIO_nSD_INT PXA_GPIO_TO_IRQ(10)
+#define CORGI_IRQ_GPIO_MAIN_BAT_LOW PXA_GPIO_TO_IRQ(11)
+#define CORGI_IRQ_GPIO_CF_CD PXA_GPIO_TO_IRQ(14)
+#define CORGI_IRQ_GPIO_CHRG_FULL PXA_GPIO_TO_IRQ(16) /* Battery fully charged */
+#define CORGI_IRQ_GPIO_CF_IRQ PXA_GPIO_TO_IRQ(17)
+#define CORGI_IRQ_GPIO_KEY_SENSE(a) PXA_GPIO_TO_IRQ(58+(a)) /* Keyboard Sense lines */
/*
@@ -98,7 +98,7 @@
CORGI_SCP_MIC_BIAS )
#define CORGI_SCOOP_IO_OUT ( CORGI_SCP_MUTE_L | CORGI_SCP_MUTE_R )
-#define CORGI_SCOOP_GPIO_BASE (NR_BUILTIN_GPIO)
+#define CORGI_SCOOP_GPIO_BASE (PXA_NR_BUILTIN_GPIO)
#define CORGI_GPIO_LED_GREEN (CORGI_SCOOP_GPIO_BASE + 0)
#define CORGI_GPIO_SWA (CORGI_SCOOP_GPIO_BASE + 1) /* Hinge Switch A */
#define CORGI_GPIO_SWB (CORGI_SCOOP_GPIO_BASE + 2) /* Hinge Switch B */
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h
index 747ab1a71f2..2628e7b7211 100644
--- a/arch/arm/mach-pxa/include/mach/csb726.h
+++ b/arch/arm/mach-pxa/include/mach/csb726.h
@@ -19,8 +19,8 @@
#define CSB726_FLASH_SIZE (64 * 1024 * 1024)
#define CSB726_FLASH_uMON (8 * 1024 * 1024)
-#define CSB726_IRQ_LAN gpio_to_irq(CSB726_GPIO_IRQ_LAN)
-#define CSB726_IRQ_SM501 gpio_to_irq(CSB726_GPIO_IRQ_SM501)
+#define CSB726_IRQ_LAN PXA_GPIO_TO_IRQ(CSB726_GPIO_IRQ_LAN)
+#define CSB726_IRQ_SM501 PXA_GPIO_TO_IRQ(CSB726_GPIO_IRQ_SM501)
#endif
diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S
index a73bc86a3c2..260c0c17692 100644
--- a/arch/arm/mach-pxa/include/mach/entry-macro.S
+++ b/arch/arm/mach-pxa/include/mach/entry-macro.S
@@ -7,45 +7,9 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/irqs.h>
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- mrc p15, 0, \tmp, c0, c0, 0 @ CPUID
- mov \tmp, \tmp, lsr #13
- and \tmp, \tmp, #0x7 @ Core G
- cmp \tmp, #1
- bhi 1002f
-
- @ Core Generation 1 (PXA25x)
- mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000
- add \base, \base, #0x00d00000
- ldr \irqstat, [\base, #0] @ ICIP
- ldr \irqnr, [\base, #4] @ ICMR
-
- ands \irqnr, \irqstat, \irqnr
- beq 1001f
- rsb \irqstat, \irqnr, #0
- and \irqstat, \irqstat, \irqnr
- clz \irqnr, \irqstat
- rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0))
- b 1001f
-1002:
- @ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx)
- mrc p6, 0, \irqstat, c5, c0, 0 @ ICHP
- tst \irqstat, #0x80000000
- beq 1001f
- bic \irqstat, \irqstat, #0x80000000
- mov \irqnr, \irqstat, lsr #16
- add \irqnr, \irqnr, #(PXA_IRQ(0))
-1001:
- .endm
diff --git a/arch/arm/mach-pxa/include/mach/gpio-pxa.h b/arch/arm/mach-pxa/include/mach/gpio-pxa.h
deleted file mode 100644
index 41b4c93a96c..00000000000
--- a/arch/arm/mach-pxa/include/mach/gpio-pxa.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef __MACH_PXA_GPIO_PXA_H
-#define __MACH_PXA_GPIO_PXA_H
-
-#include <mach/irqs.h>
-#include <mach/hardware.h>
-
-#define GPIO_REGS_VIRT io_p2v(0x40E00000)
-
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-/* GPIO Pin Level Registers */
-#define GPLR0 GPIO_REG(BANK_OFF(0) + 0x00)
-#define GPLR1 GPIO_REG(BANK_OFF(1) + 0x00)
-#define GPLR2 GPIO_REG(BANK_OFF(2) + 0x00)
-#define GPLR3 GPIO_REG(BANK_OFF(3) + 0x00)
-
-/* GPIO Pin Direction Registers */
-#define GPDR0 GPIO_REG(BANK_OFF(0) + 0x0c)
-#define GPDR1 GPIO_REG(BANK_OFF(1) + 0x0c)
-#define GPDR2 GPIO_REG(BANK_OFF(2) + 0x0c)
-#define GPDR3 GPIO_REG(BANK_OFF(3) + 0x0c)
-
-/* GPIO Pin Output Set Registers */
-#define GPSR0 GPIO_REG(BANK_OFF(0) + 0x18)
-#define GPSR1 GPIO_REG(BANK_OFF(1) + 0x18)
-#define GPSR2 GPIO_REG(BANK_OFF(2) + 0x18)
-#define GPSR3 GPIO_REG(BANK_OFF(3) + 0x18)
-
-/* GPIO Pin Output Clear Registers */
-#define GPCR0 GPIO_REG(BANK_OFF(0) + 0x24)
-#define GPCR1 GPIO_REG(BANK_OFF(1) + 0x24)
-#define GPCR2 GPIO_REG(BANK_OFF(2) + 0x24)
-#define GPCR3 GPIO_REG(BANK_OFF(3) + 0x24)
-
-/* GPIO Rising Edge Detect Registers */
-#define GRER0 GPIO_REG(BANK_OFF(0) + 0x30)
-#define GRER1 GPIO_REG(BANK_OFF(1) + 0x30)
-#define GRER2 GPIO_REG(BANK_OFF(2) + 0x30)
-#define GRER3 GPIO_REG(BANK_OFF(3) + 0x30)
-
-/* GPIO Falling Edge Detect Registers */
-#define GFER0 GPIO_REG(BANK_OFF(0) + 0x3c)
-#define GFER1 GPIO_REG(BANK_OFF(1) + 0x3c)
-#define GFER2 GPIO_REG(BANK_OFF(2) + 0x3c)
-#define GFER3 GPIO_REG(BANK_OFF(3) + 0x3c)
-
-/* GPIO Edge Detect Status Registers */
-#define GEDR0 GPIO_REG(BANK_OFF(0) + 0x48)
-#define GEDR1 GPIO_REG(BANK_OFF(1) + 0x48)
-#define GEDR2 GPIO_REG(BANK_OFF(2) + 0x48)
-#define GEDR3 GPIO_REG(BANK_OFF(3) + 0x48)
-
-/* GPIO Alternate Function Select Registers */
-#define GAFR0_L GPIO_REG(0x0054)
-#define GAFR0_U GPIO_REG(0x0058)
-#define GAFR1_L GPIO_REG(0x005C)
-#define GAFR1_U GPIO_REG(0x0060)
-#define GAFR2_L GPIO_REG(0x0064)
-#define GAFR2_U GPIO_REG(0x0068)
-#define GAFR3_L GPIO_REG(0x006C)
-#define GAFR3_U GPIO_REG(0x0070)
-
-/* More handy macros. The argument is a literal GPIO number. */
-
-#define GPIO_bit(x) (1 << ((x) & 0x1f))
-
-#define GPLR(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x00)
-#define GPDR(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x0c)
-#define GPSR(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x18)
-#define GPCR(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x24)
-#define GRER(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x30)
-#define GFER(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x3c)
-#define GEDR(x) GPIO_REG(BANK_OFF((x) >> 5) + 0x48)
-#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2))
-
-
-#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM
-
-#define gpio_to_bank(gpio) ((gpio) >> 5)
-
-#ifdef CONFIG_CPU_PXA26x
-/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
- * as well as their Alternate Function value being '1' for GPIO in GAFRx.
- */
-static inline int __gpio_is_inverted(unsigned gpio)
-{
- return cpu_is_pxa25x() && gpio > 85;
-}
-#else
-static inline int __gpio_is_inverted(unsigned gpio) { return 0; }
-#endif
-
-/*
- * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
- * function of a GPIO, and GPDRx cannot be altered once configured. It
- * is attributed as "occupied" here (I know this terminology isn't
- * accurate, you are welcome to propose a better one :-)
- */
-static inline int __gpio_is_occupied(unsigned gpio)
-{
- if (cpu_is_pxa27x() || cpu_is_pxa25x()) {
- int af = (GAFR(gpio) >> ((gpio & 0xf) * 2)) & 0x3;
- int dir = GPDR(gpio) & GPIO_bit(gpio);
-
- if (__gpio_is_inverted(gpio))
- return af != 1 || dir == 0;
- else
- return af != 0 || dir != 0;
- } else
- return GPDR(gpio) & GPIO_bit(gpio);
-}
-
-#include <plat/gpio-pxa.h>
-#endif /* __MACH_PXA_GPIO_PXA_H */
diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h
index 004cade7bb1..0248e433bc9 100644
--- a/arch/arm/mach-pxa/include/mach/gpio.h
+++ b/arch/arm/mach-pxa/include/mach/gpio.h
@@ -25,24 +25,8 @@
#define __ASM_ARCH_PXA_GPIO_H
#include <asm-generic/gpio.h>
-/* The defines for the driver are needed for the accelerated accessors */
-#include "gpio-pxa.h"
-#define gpio_to_irq(gpio) IRQ_GPIO(gpio)
+#include <mach/irqs.h>
+#include <mach/hardware.h>
-static inline int irq_to_gpio(unsigned int irq)
-{
- int gpio;
-
- if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
- return irq - IRQ_GPIO0;
-
- gpio = irq - PXA_GPIO_IRQ_BASE;
- if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
- return gpio;
-
- return -1;
-}
-
-#include <plat/gpio.h>
#endif
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h
index 9b898680b20..dba14b6503a 100644
--- a/arch/arm/mach-pxa/include/mach/gumstix.h
+++ b/arch/arm/mach-pxa/include/mach/gumstix.h
@@ -24,7 +24,7 @@ has detected a cable insertion; driven low otherwise. */
#define GPIO_GUMSTIX_USB_GPIOx 41
/* usb state change */
-#define GUMSTIX_USB_INTR_IRQ IRQ_GPIO(GPIO_GUMSTIX_USB_GPIOn)
+#define GUMSTIX_USB_INTR_IRQ PXA_GPIO_TO_IRQ(GPIO_GUMSTIX_USB_GPIOn)
#define GPIO_GUMSTIX_USB_GPIOn_MD (GPIO_GUMSTIX_USB_GPIOn | GPIO_IN)
#define GPIO_GUMSTIX_USB_GPIOx_CON_MD (GPIO_GUMSTIX_USB_GPIOx | GPIO_OUT)
@@ -35,7 +35,7 @@ has detected a cable insertion; driven low otherwise. */
*/
#define GUMSTIX_GPIO_nSD_WP 22 /* SD Write Protect */
#define GUMSTIX_GPIO_nSD_DETECT 11 /* MMC/SD Card Detect */
-#define GUMSTIX_IRQ_GPIO_nSD_DETECT IRQ_GPIO(GUMSTIX_GPIO_nSD_DETECT)
+#define GUMSTIX_IRQ_GPIO_nSD_DETECT PXA_GPIO_TO_IRQ(GUMSTIX_GPIO_nSD_DETECT)
/*
* SMC Ethernet definitions
@@ -49,10 +49,10 @@ has detected a cable insertion; driven low otherwise. */
#define GPIO_GUMSTIX_ETH0 36
#define GPIO_GUMSTIX_ETH0_MD (GPIO_GUMSTIX_ETH0 | GPIO_IN)
-#define GUMSTIX_ETH0_IRQ IRQ_GPIO(GPIO_GUMSTIX_ETH0)
+#define GUMSTIX_ETH0_IRQ PXA_GPIO_TO_IRQ(GPIO_GUMSTIX_ETH0)
#define GPIO_GUMSTIX_ETH1 27
#define GPIO_GUMSTIX_ETH1_MD (GPIO_GUMSTIX_ETH1 | GPIO_IN)
-#define GUMSTIX_ETH1_IRQ IRQ_GPIO(GPIO_GUMSTIX_ETH1)
+#define GUMSTIX_ETH1_IRQ PXA_GPIO_TO_IRQ(GPIO_GUMSTIX_ETH1)
/* CF reset line */
@@ -63,18 +63,18 @@ has detected a cable insertion; driven low otherwise. */
#define GPIO4_nSTSCHG GPIO4_nBVD1
#define GPIO11_nCD 11
#define GPIO26_PRDY_nBSY 26
-#define GUMSTIX_S0_nSTSCHG_IRQ IRQ_GPIO(GPIO4_nSTSCHG)
-#define GUMSTIX_S0_nCD_IRQ IRQ_GPIO(GPIO11_nCD)
-#define GUMSTIX_S0_PRDY_nBSY_IRQ IRQ_GPIO(GPIO26_PRDY_nBSY)
+#define GUMSTIX_S0_nSTSCHG_IRQ PXA_GPIO_TO_IRQ(GPIO4_nSTSCHG)
+#define GUMSTIX_S0_nCD_IRQ PXA_GPIO_TO_IRQ(GPIO11_nCD)
+#define GUMSTIX_S0_PRDY_nBSY_IRQ PXA_GPIO_TO_IRQ(GPIO26_PRDY_nBSY)
/* CF slot 1 */
#define GPIO18_nBVD1 18
#define GPIO18_nSTSCHG GPIO18_nBVD1
#define GPIO36_nCD 36
#define GPIO27_PRDY_nBSY 27
-#define GUMSTIX_S1_nSTSCHG_IRQ IRQ_GPIO(GPIO18_nSTSCHG)
-#define GUMSTIX_S1_nCD_IRQ IRQ_GPIO(GPIO36_nCD)
-#define GUMSTIX_S1_PRDY_nBSY_IRQ IRQ_GPIO(GPIO27_PRDY_nBSY)
+#define GUMSTIX_S1_nSTSCHG_IRQ PXA_GPIO_TO_IRQ(GPIO18_nSTSCHG)
+#define GUMSTIX_S1_nCD_IRQ PXA_GPIO_TO_IRQ(GPIO36_nCD)
+#define GUMSTIX_S1_PRDY_nBSY_IRQ PXA_GPIO_TO_IRQ(GPIO27_PRDY_nBSY)
/* CF GPIO line modes */
#define GPIO4_nSTSCHG_MD (GPIO4_nSTSCHG | GPIO_IN)
diff --git a/arch/arm/mach-pxa/include/mach/hx4700.h b/arch/arm/mach-pxa/include/mach/hx4700.h
index 37408449ec2..8bc02913517 100644
--- a/arch/arm/mach-pxa/include/mach/hx4700.h
+++ b/arch/arm/mach-pxa/include/mach/hx4700.h
@@ -15,7 +15,7 @@
#include <linux/gpio.h>
#include <linux/mfd/asic3.h>
-#define HX4700_ASIC3_GPIO_BASE NR_BUILTIN_GPIO
+#define HX4700_ASIC3_GPIO_BASE PXA_NR_BUILTIN_GPIO
#define HX4700_EGPIO_BASE (HX4700_ASIC3_GPIO_BASE + ASIC3_NUM_GPIOS)
#define HX4700_NR_IRQS (IRQ_BOARD_START + 70)
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h
index 5eff96fcc94..22a96f87232 100644
--- a/arch/arm/mach-pxa/include/mach/idp.h
+++ b/arch/arm/mach-pxa/include/mach/idp.h
@@ -131,28 +131,26 @@
#define PCC_VS2 (1 << 1)
#define PCC_VS1 (1 << 0)
-#define PCC_DETECT(x) (GPLR(7 + (x)) & GPIO_bit(7 + (x)))
-
/* A listing of interrupts used by external hardware devices */
-#define TOUCH_PANEL_IRQ IRQ_GPIO(5)
-#define IDE_IRQ IRQ_GPIO(21)
+#define TOUCH_PANEL_IRQ PXA_GPIO_TO_IRQ(5)
+#define IDE_IRQ PXA_GPIO_TO_IRQ(21)
#define TOUCH_PANEL_IRQ_EDGE IRQ_TYPE_EDGE_FALLING
-#define ETHERNET_IRQ IRQ_GPIO(4)
+#define ETHERNET_IRQ PXA_GPIO_TO_IRQ(4)
#define ETHERNET_IRQ_EDGE IRQ_TYPE_EDGE_RISING
#define IDE_IRQ_EDGE IRQ_TYPE_EDGE_RISING
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(7)
+#define PCMCIA_S0_CD_VALID PXA_GPIO_TO_IRQ(7)
#define PCMCIA_S0_CD_VALID_EDGE IRQ_TYPE_EDGE_BOTH
-#define PCMCIA_S1_CD_VALID IRQ_GPIO(8)
+#define PCMCIA_S1_CD_VALID PXA_GPIO_TO_IRQ(8)
#define PCMCIA_S1_CD_VALID_EDGE IRQ_TYPE_EDGE_BOTH
-#define PCMCIA_S0_RDYINT IRQ_GPIO(19)
-#define PCMCIA_S1_RDYINT IRQ_GPIO(22)
+#define PCMCIA_S0_RDYINT PXA_GPIO_TO_IRQ(19)
+#define PCMCIA_S1_RDYINT PXA_GPIO_TO_IRQ(22)
/*
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 7cc5a781e99..32975adf3ca 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -88,10 +88,8 @@
#define IRQ_U2P PXA_IRQ(93) /* USB PHY D+/D- Lines (PXA935) */
#define PXA_GPIO_IRQ_BASE PXA_IRQ(96)
-#define PXA_GPIO_IRQ_NUM (192)
-
-#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
-#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
+#define PXA_NR_BUILTIN_GPIO (192)
+#define PXA_GPIO_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
/*
* The following interrupts are for board specific purposes. Since
@@ -100,7 +98,7 @@
* By default, no board IRQ is reserved. It should be finished in
* custom board since sparse IRQ is already enabled.
*/
-#define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_GPIO_IRQ_NUM)
+#define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_NR_BUILTIN_GPIO)
#define NR_IRQS (IRQ_BOARD_START)
diff --git a/arch/arm/mach-pxa/include/mach/littleton.h b/arch/arm/mach-pxa/include/mach/littleton.h
index b6238cbd8ae..8066be54e9f 100644
--- a/arch/arm/mach-pxa/include/mach/littleton.h
+++ b/arch/arm/mach-pxa/include/mach/littleton.h
@@ -1,13 +1,11 @@
#ifndef __ASM_ARCH_LITTLETON_H
#define __ASM_ARCH_LITTLETON_H
-#include <mach/gpio-pxa.h>
-
#define LITTLETON_ETH_PHYS 0x30000000
#define LITTLETON_GPIO_LCD_CS (17)
-#define EXT0_GPIO_BASE (NR_BUILTIN_GPIO)
+#define EXT0_GPIO_BASE (PXA_NR_BUILTIN_GPIO)
#define EXT0_GPIO(x) (EXT0_GPIO_BASE + (x))
#define LITTLETON_NR_IRQS (IRQ_BOARD_START + 8)
diff --git a/arch/arm/mach-pxa/include/mach/magician.h b/arch/arm/mach-pxa/include/mach/magician.h
index 7cbfc5d3f9d..ba6a6e1d29e 100644
--- a/arch/arm/mach-pxa/include/mach/magician.h
+++ b/arch/arm/mach-pxa/include/mach/magician.h
@@ -78,7 +78,7 @@
* CPLD EGPIOs
*/
-#define MAGICIAN_EGPIO_BASE NR_BUILTIN_GPIO
+#define MAGICIAN_EGPIO_BASE PXA_NR_BUILTIN_GPIO
#define MAGICIAN_EGPIO(reg,bit) \
(MAGICIAN_EGPIO_BASE + 8*reg + bit)
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index f80bbe246af..d4eac3d6ffb 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
#define palm27x_lcd_init(power, mode) do {} while (0)
#endif
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
extern void __init palm27x_udc_init(int vbus, int pullup,
int vbus_inverted);
#else
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h
index ae536e86d8e..2c447133657 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/include/mach/palmld.h
@@ -68,10 +68,10 @@
/* 20, 53 and 86 are usb related too */
/* INTERRUPTS */
-#define IRQ_GPIO_PALMLD_GPIO_RESET IRQ_GPIO(GPIO_NR_PALMLD_GPIO_RESET)
-#define IRQ_GPIO_PALMLD_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMLD_SD_DETECT_N)
-#define IRQ_GPIO_PALMLD_WM9712_IRQ IRQ_GPIO(GPIO_NR_PALMLD_WM9712_IRQ)
-#define IRQ_GPIO_PALMLD_IDE_IRQ IRQ_GPIO(GPIO_NR_PALMLD_IDE_IRQ)
+#define IRQ_GPIO_PALMLD_GPIO_RESET PXA_GPIO_TO_IRQ(GPIO_NR_PALMLD_GPIO_RESET)
+#define IRQ_GPIO_PALMLD_SD_DETECT_N PXA_GPIO_TO_IRQ(GPIO_NR_PALMLD_SD_DETECT_N)
+#define IRQ_GPIO_PALMLD_WM9712_IRQ PXA_GPIO_TO_IRQ(GPIO_NR_PALMLD_WM9712_IRQ)
+#define IRQ_GPIO_PALMLD_IDE_IRQ PXA_GPIO_TO_IRQ(GPIO_NR_PALMLD_IDE_IRQ)
/** HERE ARE INIT VALUES **/
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h
index 6baf7469d4e..0bd4f036c72 100644
--- a/arch/arm/mach-pxa/include/mach/palmt5.h
+++ b/arch/arm/mach-pxa/include/mach/palmt5.h
@@ -48,10 +48,10 @@
#define GPIO_NR_PALMT5_BT_RESET 83
/* INTERRUPTS */
-#define IRQ_GPIO_PALMT5_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMT5_SD_DETECT_N)
-#define IRQ_GPIO_PALMT5_WM9712_IRQ IRQ_GPIO(GPIO_NR_PALMT5_WM9712_IRQ)
-#define IRQ_GPIO_PALMT5_USB_DETECT IRQ_GPIO(GPIO_NR_PALMT5_USB_DETECT)
-#define IRQ_GPIO_PALMT5_GPIO_RESET IRQ_GPIO(GPIO_NR_PALMT5_GPIO_RESET)
+#define IRQ_GPIO_PALMT5_SD_DETECT_N PXA_GPIO_TO_IRQ(GPIO_NR_PALMT5_SD_DETECT_N)
+#define IRQ_GPIO_PALMT5_WM9712_IRQ PXA_GPIO_TO_IRQ(GPIO_NR_PALMT5_WM9712_IRQ)
+#define IRQ_GPIO_PALMT5_USB_DETECT PXA_GPIO_TO_IRQ(GPIO_NR_PALMT5_USB_DETECT)
+#define IRQ_GPIO_PALMT5_GPIO_RESET PXA_GPIO_TO_IRQ(GPIO_NR_PALMT5_GPIO_RESET)
/** HERE ARE INIT VALUES **/
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
index 3f9dd3fd463..c383a21680b 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -52,8 +52,8 @@
#define GPIO_NR_PALMTC_IR_DISABLE 45
/* IRQs */
-#define IRQ_GPIO_PALMTC_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMTC_SD_DETECT_N)
-#define IRQ_GPIO_PALMTC_WLAN_READY IRQ_GPIO(GPIO_NR_PALMTC_WLAN_READY)
+#define IRQ_GPIO_PALMTC_SD_DETECT_N PXA_GPIO_TO_IRQ(GPIO_NR_PALMTC_SD_DETECT_N)
+#define IRQ_GPIO_PALMTC_WLAN_READY PXA_GPIO_TO_IRQ(GPIO_NR_PALMTC_WLAN_READY)
/* UCB1400 GPIOs */
#define GPIO_NR_PALMTC_POWER_DETECT (0x80 | 0x00)
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index 7074a6ed46c..f2e53038025 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -62,10 +62,10 @@
#define GPIO_NR_PALMTX_NAND_BUFFER_DIR 79
/* INTERRUPTS */
-#define IRQ_GPIO_PALMTX_SD_DETECT_N IRQ_GPIO(GPIO_NR_PALMTX_SD_DETECT_N)
-#define IRQ_GPIO_PALMTX_WM9712_IRQ IRQ_GPIO(GPIO_NR_PALMTX_WM9712_IRQ)
-#define IRQ_GPIO_PALMTX_USB_DETECT IRQ_GPIO(GPIO_NR_PALMTX_USB_DETECT)
-#define IRQ_GPIO_PALMTX_GPIO_RESET IRQ_GPIO(GPIO_NR_PALMTX_GPIO_RESET)
+#define IRQ_GPIO_PALMTX_SD_DETECT_N PXA_GPIO_TO_IRQ(GPIO_NR_PALMTX_SD_DETECT_N)
+#define IRQ_GPIO_PALMTX_WM9712_IRQ PXA_GPIO_TO_IRQ(GPIO_NR_PALMTX_WM9712_IRQ)
+#define IRQ_GPIO_PALMTX_USB_DETECT PXA_GPIO_TO_IRQ(GPIO_NR_PALMTX_USB_DETECT)
+#define IRQ_GPIO_PALMTX_GPIO_RESET PXA_GPIO_TO_IRQ(GPIO_NR_PALMTX_GPIO_RESET)
/** HERE ARE INIT VALUES **/
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h
index 4bac588478a..6bf28de228b 100644
--- a/arch/arm/mach-pxa/include/mach/pcm027.h
+++ b/arch/arm/mach-pxa/include/mach/pcm027.h
@@ -34,7 +34,7 @@
/* I2C RTC */
#define PCM027_RTC_IRQ_GPIO 0
-#define PCM027_RTC_IRQ IRQ_GPIO(PCM027_RTC_IRQ_GPIO)
+#define PCM027_RTC_IRQ PXA_GPIO_TO_IRQ(PCM027_RTC_IRQ_GPIO)
#define PCM027_RTC_IRQ_EDGE IRQ_TYPE_EDGE_FALLING
#define ADR_PCM027_RTC 0x51 /* I2C address */
@@ -43,21 +43,21 @@
/* Ethernet chip (SMSC91C111) */
#define PCM027_ETH_IRQ_GPIO 52
-#define PCM027_ETH_IRQ IRQ_GPIO(PCM027_ETH_IRQ_GPIO)
+#define PCM027_ETH_IRQ PXA_GPIO_TO_IRQ(PCM027_ETH_IRQ_GPIO)
#define PCM027_ETH_IRQ_EDGE IRQ_TYPE_EDGE_RISING
#define PCM027_ETH_PHYS PXA_CS5_PHYS
#define PCM027_ETH_SIZE (1*1024*1024)
/* CAN controller SJA1000 (unsupported yet) */
#define PCM027_CAN_IRQ_GPIO 114
-#define PCM027_CAN_IRQ IRQ_GPIO(PCM027_CAN_IRQ_GPIO)
+#define PCM027_CAN_IRQ PXA_GPIO_TO_IRQ(PCM027_CAN_IRQ_GPIO)
#define PCM027_CAN_IRQ_EDGE IRQ_TYPE_EDGE_FALLING
#define PCM027_CAN_PHYS 0x22000000
#define PCM027_CAN_SIZE 0x100
/* SPI GPIO expander (unsupported yet) */
#define PCM027_EGPIO_IRQ_GPIO 27
-#define PCM027_EGPIO_IRQ IRQ_GPIO(PCM027_EGPIO_IRQ_GPIO)
+#define PCM027_EGPIO_IRQ PXA_GPIO_TO_IRQ(PCM027_EGPIO_IRQ_GPIO)
#define PCM027_EGPIO_IRQ_EDGE IRQ_TYPE_EDGE_FALLING
#define PCM027_EGPIO_CS 24
/*
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
index 8a4383b776d..d72791695b2 100644
--- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
@@ -28,14 +28,14 @@
/* CPLD's interrupt controller is connected to PCM-027 GPIO 9 */
#define PCM990_CTRL_INT_IRQ_GPIO 9
-#define PCM990_CTRL_INT_IRQ IRQ_GPIO(PCM990_CTRL_INT_IRQ_GPIO)
+#define PCM990_CTRL_INT_IRQ PXA_GPIO_TO_IRQ(PCM990_CTRL_INT_IRQ_GPIO)
#define PCM990_CTRL_INT_IRQ_EDGE IRQ_TYPE_EDGE_RISING
#define PCM990_CTRL_PHYS PXA_CS1_PHYS /* 16-Bit */
#define PCM990_CTRL_BASE 0xea000000
#define PCM990_CTRL_SIZE (1*1024*1024)
#define PCM990_CTRL_PWR_IRQ_GPIO 14
-#define PCM990_CTRL_PWR_IRQ IRQ_GPIO(PCM990_CTRL_PWR_IRQ_GPIO)
+#define PCM990_CTRL_PWR_IRQ PXA_GPIO_TO_IRQ(PCM990_CTRL_PWR_IRQ_GPIO)
#define PCM990_CTRL_PWR_IRQ_EDGE IRQ_TYPE_EDGE_RISING
/* visible CPLD (U7) registers */
@@ -132,7 +132,7 @@
* IDE
*/
#define PCM990_IDE_IRQ_GPIO 13
-#define PCM990_IDE_IRQ IRQ_GPIO(PCM990_IDE_IRQ_GPIO)
+#define PCM990_IDE_IRQ PXA_GPIO_TO_IRQ(PCM990_IDE_IRQ_GPIO)
#define PCM990_IDE_IRQ_EDGE IRQ_TYPE_EDGE_RISING
#define PCM990_IDE_PLD_PHYS 0x20000000 /* 16 bit wide */
#define PCM990_IDE_PLD_BASE 0xee000000
@@ -188,11 +188,11 @@
* Compact Flash
*/
#define PCM990_CF_IRQ_GPIO 11
-#define PCM990_CF_IRQ IRQ_GPIO(PCM990_CF_IRQ_GPIO)
+#define PCM990_CF_IRQ PXA_GPIO_TO_IRQ(PCM990_CF_IRQ_GPIO)
#define PCM990_CF_IRQ_EDGE IRQ_TYPE_EDGE_RISING
#define PCM990_CF_CD_GPIO 12
-#define PCM990_CF_CD IRQ_GPIO(PCM990_CF_CD_GPIO)
+#define PCM990_CF_CD PXA_GPIO_TO_IRQ(PCM990_CF_CD_GPIO)
#define PCM990_CF_CD_EDGE IRQ_TYPE_EDGE_RISING
#define PCM990_CF_PLD_PHYS 0x30000000 /* 16 bit wide */
@@ -258,14 +258,14 @@
* Wolfson AC97 Touch
*/
#define PCM990_AC97_IRQ_GPIO 10
-#define PCM990_AC97_IRQ IRQ_GPIO(PCM990_AC97_IRQ_GPIO)
+#define PCM990_AC97_IRQ PXA_GPIO_TO_IRQ(PCM990_AC97_IRQ_GPIO)
#define PCM990_AC97_IRQ_EDGE IRQ_TYPE_EDGE_RISING
/*
* MMC phyCORE
*/
#define PCM990_MMC0_IRQ_GPIO 9
-#define PCM990_MMC0_IRQ IRQ_GPIO(PCM990_MMC0_IRQ_GPIO)
+#define PCM990_MMC0_IRQ PXA_GPIO_TO_IRQ(PCM990_MMC0_IRQ_GPIO)
#define PCM990_MMC0_IRQ_EDGE IRQ_TYPE_EDGE_FALLING
/*
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index 83d1cfd00fc..f32ff75dcca 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -47,18 +47,18 @@
#define POODLE_GPIO_DISCHARGE_ON (42) /* Enable battery discharge */
/* PXA GPIOs */
-#define POODLE_IRQ_GPIO_ON_KEY IRQ_GPIO(0)
-#define POODLE_IRQ_GPIO_AC_IN IRQ_GPIO(1)
-#define POODLE_IRQ_GPIO_HP_IN IRQ_GPIO(4)
-#define POODLE_IRQ_GPIO_CO IRQ_GPIO(16)
-#define POODLE_IRQ_GPIO_TP_INT IRQ_GPIO(5)
-#define POODLE_IRQ_GPIO_WAKEUP IRQ_GPIO(11)
-#define POODLE_IRQ_GPIO_GA_INT IRQ_GPIO(10)
-#define POODLE_IRQ_GPIO_CF_IRQ IRQ_GPIO(17)
-#define POODLE_IRQ_GPIO_CF_CD IRQ_GPIO(14)
-#define POODLE_IRQ_GPIO_nSD_INT IRQ_GPIO(8)
-#define POODLE_IRQ_GPIO_nSD_DETECT IRQ_GPIO(9)
-#define POODLE_IRQ_GPIO_MAIN_BAT_LOW IRQ_GPIO(13)
+#define POODLE_IRQ_GPIO_ON_KEY PXA_GPIO_TO_IRQ(0)
+#define POODLE_IRQ_GPIO_AC_IN PXA_GPIO_TO_IRQ(1)
+#define POODLE_IRQ_GPIO_HP_IN PXA_GPIO_TO_IRQ(4)
+#define POODLE_IRQ_GPIO_CO PXA_GPIO_TO_IRQ(16)
+#define POODLE_IRQ_GPIO_TP_INT PXA_GPIO_TO_IRQ(5)
+#define POODLE_IRQ_GPIO_WAKEUP PXA_GPIO_TO_IRQ(11)
+#define POODLE_IRQ_GPIO_GA_INT PXA_GPIO_TO_IRQ(10)
+#define POODLE_IRQ_GPIO_CF_IRQ PXA_GPIO_TO_IRQ(17)
+#define POODLE_IRQ_GPIO_CF_CD PXA_GPIO_TO_IRQ(14)
+#define POODLE_IRQ_GPIO_nSD_INT PXA_GPIO_TO_IRQ(8)
+#define POODLE_IRQ_GPIO_nSD_DETECT PXA_GPIO_TO_IRQ(9)
+#define POODLE_IRQ_GPIO_MAIN_BAT_LOW PXA_GPIO_TO_IRQ(13)
/* SCOOP GPIOs */
#define POODLE_SCOOP_CHARGE_ON SCOOP_GPCR_PA11
@@ -71,7 +71,7 @@
#define POODLE_SCOOP_IO_DIR ( POODLE_SCOOP_VPEN | POODLE_SCOOP_HS_OUT )
#define POODLE_SCOOP_IO_OUT ( 0 )
-#define POODLE_SCOOP_GPIO_BASE (NR_BUILTIN_GPIO)
+#define POODLE_SCOOP_GPIO_BASE (PXA_NR_BUILTIN_GPIO)
#define POODLE_GPIO_CHARGE_ON (POODLE_SCOOP_GPIO_BASE + 0)
#define POODLE_GPIO_CP401 (POODLE_SCOOP_GPIO_BASE + 2)
#define POODLE_GPIO_VPEN (POODLE_SCOOP_GPIO_BASE + 7)
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 685749a51c4..0bfe6507c95 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -108,7 +108,7 @@
#define SPITZ_SCP_SUS_CLR (SPITZ_SCP_MUTE_L | SPITZ_SCP_MUTE_R | SPITZ_SCP_JK_A | SPITZ_SCP_ADC_TEMP_ON)
#define SPITZ_SCP_SUS_SET 0
-#define SPITZ_SCP_GPIO_BASE (NR_BUILTIN_GPIO)
+#define SPITZ_SCP_GPIO_BASE (PXA_NR_BUILTIN_GPIO)
#define SPITZ_GPIO_LED_GREEN (SPITZ_SCP_GPIO_BASE + 0)
#define SPITZ_GPIO_JK_B (SPITZ_SCP_GPIO_BASE + 1)
#define SPITZ_GPIO_CHRG_ON (SPITZ_SCP_GPIO_BASE + 2)
@@ -140,7 +140,7 @@
SPITZ_SCP2_BACKLIGHT_CONT | SPITZ_SCP2_BACKLIGHT_ON | SPITZ_SCP2_MIC_BIAS)
#define SPITZ_SCP2_SUS_SET (SPITZ_SCP2_IR_ON | SPITZ_SCP2_RESERVED_1)
-#define SPITZ_SCP2_GPIO_BASE (NR_BUILTIN_GPIO + 12)
+#define SPITZ_SCP2_GPIO_BASE (PXA_NR_BUILTIN_GPIO + 12)
#define SPITZ_GPIO_IR_ON (SPITZ_SCP2_GPIO_BASE + 0)
#define SPITZ_GPIO_AKIN_PULLUP (SPITZ_SCP2_GPIO_BASE + 1)
#define SPITZ_GPIO_RESERVED_1 (SPITZ_SCP2_GPIO_BASE + 2)
@@ -152,7 +152,7 @@
#define SPITZ_GPIO_MIC_BIAS (SPITZ_SCP2_GPIO_BASE + 8)
/* Akita IO Expander GPIOs */
-#define AKITA_IOEXP_GPIO_BASE (NR_BUILTIN_GPIO + 12)
+#define AKITA_IOEXP_GPIO_BASE (PXA_NR_BUILTIN_GPIO + 12)
#define AKITA_GPIO_RESERVED_0 (AKITA_IOEXP_GPIO_BASE + 0)
#define AKITA_GPIO_RESERVED_1 (AKITA_IOEXP_GPIO_BASE + 1)
#define AKITA_GPIO_MIC_BIAS (AKITA_IOEXP_GPIO_BASE + 2)
@@ -164,23 +164,23 @@
/* Spitz IRQ Definitions */
-#define SPITZ_IRQ_GPIO_KEY_INT IRQ_GPIO(SPITZ_GPIO_KEY_INT)
-#define SPITZ_IRQ_GPIO_AC_IN IRQ_GPIO(SPITZ_GPIO_AC_IN)
-#define SPITZ_IRQ_GPIO_AK_INT IRQ_GPIO(SPITZ_GPIO_AK_INT)
-#define SPITZ_IRQ_GPIO_HP_IN IRQ_GPIO(SPITZ_GPIO_HP_IN)
-#define SPITZ_IRQ_GPIO_TP_INT IRQ_GPIO(SPITZ_GPIO_TP_INT)
-#define SPITZ_IRQ_GPIO_SYNC IRQ_GPIO(SPITZ_GPIO_SYNC)
-#define SPITZ_IRQ_GPIO_ON_KEY IRQ_GPIO(SPITZ_GPIO_ON_KEY)
-#define SPITZ_IRQ_GPIO_SWA IRQ_GPIO(SPITZ_GPIO_SWA)
-#define SPITZ_IRQ_GPIO_SWB IRQ_GPIO(SPITZ_GPIO_SWB)
-#define SPITZ_IRQ_GPIO_BAT_COVER IRQ_GPIO(SPITZ_GPIO_BAT_COVER)
-#define SPITZ_IRQ_GPIO_FATAL_BAT IRQ_GPIO(SPITZ_GPIO_FATAL_BAT)
-#define SPITZ_IRQ_GPIO_CO IRQ_GPIO(SPITZ_GPIO_CO)
-#define SPITZ_IRQ_GPIO_CF_IRQ IRQ_GPIO(SPITZ_GPIO_CF_IRQ)
-#define SPITZ_IRQ_GPIO_CF_CD IRQ_GPIO(SPITZ_GPIO_CF_CD)
-#define SPITZ_IRQ_GPIO_CF2_IRQ IRQ_GPIO(SPITZ_GPIO_CF2_IRQ)
-#define SPITZ_IRQ_GPIO_nSD_INT IRQ_GPIO(SPITZ_GPIO_nSD_INT)
-#define SPITZ_IRQ_GPIO_nSD_DETECT IRQ_GPIO(SPITZ_GPIO_nSD_DETECT)
+#define SPITZ_IRQ_GPIO_KEY_INT PXA_GPIO_TO_IRQ(SPITZ_GPIO_KEY_INT)
+#define SPITZ_IRQ_GPIO_AC_IN PXA_GPIO_TO_IRQ(SPITZ_GPIO_AC_IN)
+#define SPITZ_IRQ_GPIO_AK_INT PXA_GPIO_TO_IRQ(SPITZ_GPIO_AK_INT)
+#define SPITZ_IRQ_GPIO_HP_IN PXA_GPIO_TO_IRQ(SPITZ_GPIO_HP_IN)
+#define SPITZ_IRQ_GPIO_TP_INT PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT)
+#define SPITZ_IRQ_GPIO_SYNC PXA_GPIO_TO_IRQ(SPITZ_GPIO_SYNC)
+#define SPITZ_IRQ_GPIO_ON_KEY PXA_GPIO_TO_IRQ(SPITZ_GPIO_ON_KEY)
+#define SPITZ_IRQ_GPIO_SWA PXA_GPIO_TO_IRQ(SPITZ_GPIO_SWA)
+#define SPITZ_IRQ_GPIO_SWB PXA_GPIO_TO_IRQ(SPITZ_GPIO_SWB)
+#define SPITZ_IRQ_GPIO_BAT_COVER PXA_GPIO_TO_IRQ(SPITZ_GPIO_BAT_COVER)
+#define SPITZ_IRQ_GPIO_FATAL_BAT PXA_GPIO_TO_IRQ(SPITZ_GPIO_FATAL_BAT)
+#define SPITZ_IRQ_GPIO_CO PXA_GPIO_TO_IRQ(SPITZ_GPIO_CO)
+#define SPITZ_IRQ_GPIO_CF_IRQ PXA_GPIO_TO_IRQ(SPITZ_GPIO_CF_IRQ)
+#define SPITZ_IRQ_GPIO_CF_CD PXA_GPIO_TO_IRQ(SPITZ_GPIO_CF_CD)
+#define SPITZ_IRQ_GPIO_CF2_IRQ PXA_GPIO_TO_IRQ(SPITZ_GPIO_CF2_IRQ)
+#define SPITZ_IRQ_GPIO_nSD_INT PXA_GPIO_TO_IRQ(SPITZ_GPIO_nSD_INT)
+#define SPITZ_IRQ_GPIO_nSD_DETECT PXA_GPIO_TO_IRQ(SPITZ_GPIO_nSD_DETECT)
/*
* Shared data structures
diff --git a/arch/arm/mach-pxa/include/mach/system.h b/arch/arm/mach-pxa/include/mach/system.h
index d1fce8b6d10..c5afacd3cc0 100644
--- a/arch/arm/mach-pxa/include/mach/system.h
+++ b/arch/arm/mach-pxa/include/mach/system.h
@@ -9,15 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#include <asm/proc-fns.h>
-#include "hardware.h"
-#include "pxa2xx-regs.h"
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-
-void arch_reset(char mode, const char *cmd);
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 1272c4b56ce..2bb0e862598 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -24,7 +24,7 @@
/*
* SCOOP2 internal GPIOs
*/
-#define TOSA_SCOOP_GPIO_BASE NR_BUILTIN_GPIO
+#define TOSA_SCOOP_GPIO_BASE PXA_NR_BUILTIN_GPIO
#define TOSA_SCOOP_PXA_VCORE1 SCOOP_GPCR_PA11
#define TOSA_GPIO_TC6393XB_REST_IN (TOSA_SCOOP_GPIO_BASE + 1)
#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2)
@@ -42,7 +42,7 @@
/*
* SCOOP2 jacket GPIOs
*/
-#define TOSA_SCOOP_JC_GPIO_BASE (NR_BUILTIN_GPIO + 12)
+#define TOSA_SCOOP_JC_GPIO_BASE (PXA_NR_BUILTIN_GPIO + 12)
#define TOSA_GPIO_BT_LED (TOSA_SCOOP_JC_GPIO_BASE + 0)
#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1)
#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2)
@@ -59,7 +59,7 @@
/*
* TC6393XB GPIOs
*/
-#define TOSA_TC6393XB_GPIO_BASE (NR_BUILTIN_GPIO + 2 * 12)
+#define TOSA_TC6393XB_GPIO_BASE (PXA_NR_BUILTIN_GPIO + 2 * 12)
#define TOSA_GPIO_TG_ON (TOSA_TC6393XB_GPIO_BASE + 0)
#define TOSA_GPIO_L_MUTE (TOSA_TC6393XB_GPIO_BASE + 1)
@@ -141,30 +141,30 @@
/*
* Interrupts
*/
-#define TOSA_IRQ_GPIO_WAKEUP IRQ_GPIO(TOSA_GPIO_WAKEUP)
-#define TOSA_IRQ_GPIO_AC_IN IRQ_GPIO(TOSA_GPIO_AC_IN)
-#define TOSA_IRQ_GPIO_RECORD_BTN IRQ_GPIO(TOSA_GPIO_RECORD_BTN)
-#define TOSA_IRQ_GPIO_SYNC IRQ_GPIO(TOSA_GPIO_SYNC)
-#define TOSA_IRQ_GPIO_USB_IN IRQ_GPIO(TOSA_GPIO_USB_IN)
-#define TOSA_IRQ_GPIO_JACKET_DETECT IRQ_GPIO(TOSA_GPIO_JACKET_DETECT)
-#define TOSA_IRQ_GPIO_nSD_INT IRQ_GPIO(TOSA_GPIO_nSD_INT)
-#define TOSA_IRQ_GPIO_nSD_DETECT IRQ_GPIO(TOSA_GPIO_nSD_DETECT)
-#define TOSA_IRQ_GPIO_BAT1_CRG IRQ_GPIO(TOSA_GPIO_BAT1_CRG)
-#define TOSA_IRQ_GPIO_CF_CD IRQ_GPIO(TOSA_GPIO_CF_CD)
-#define TOSA_IRQ_GPIO_BAT0_CRG IRQ_GPIO(TOSA_GPIO_BAT0_CRG)
-#define TOSA_IRQ_GPIO_TC6393XB_INT IRQ_GPIO(TOSA_GPIO_TC6393XB_INT)
-#define TOSA_IRQ_GPIO_BAT0_LOW IRQ_GPIO(TOSA_GPIO_BAT0_LOW)
-#define TOSA_IRQ_GPIO_EAR_IN IRQ_GPIO(TOSA_GPIO_EAR_IN)
-#define TOSA_IRQ_GPIO_CF_IRQ IRQ_GPIO(TOSA_GPIO_CF_IRQ)
-#define TOSA_IRQ_GPIO_ON_KEY IRQ_GPIO(TOSA_GPIO_ON_KEY)
-#define TOSA_IRQ_GPIO_VGA_LINE IRQ_GPIO(TOSA_GPIO_VGA_LINE)
-#define TOSA_IRQ_GPIO_TP_INT IRQ_GPIO(TOSA_GPIO_TP_INT)
-#define TOSA_IRQ_GPIO_JC_CF_IRQ IRQ_GPIO(TOSA_GPIO_JC_CF_IRQ)
-#define TOSA_IRQ_GPIO_BAT_LOCKED IRQ_GPIO(TOSA_GPIO_BAT_LOCKED)
-#define TOSA_IRQ_GPIO_BAT1_LOW IRQ_GPIO(TOSA_GPIO_BAT1_LOW)
-#define TOSA_IRQ_GPIO_KEY_SENSE(a) IRQ_GPIO(69+(a))
-
-#define TOSA_IRQ_GPIO_MAIN_BAT_LOW IRQ_GPIO(TOSA_GPIO_MAIN_BAT_LOW)
+#define TOSA_IRQ_GPIO_WAKEUP PXA_GPIO_TO_IRQ(TOSA_GPIO_WAKEUP)
+#define TOSA_IRQ_GPIO_AC_IN PXA_GPIO_TO_IRQ(TOSA_GPIO_AC_IN)
+#define TOSA_IRQ_GPIO_RECORD_BTN PXA_GPIO_TO_IRQ(TOSA_GPIO_RECORD_BTN)
+#define TOSA_IRQ_GPIO_SYNC PXA_GPIO_TO_IRQ(TOSA_GPIO_SYNC)
+#define TOSA_IRQ_GPIO_USB_IN PXA_GPIO_TO_IRQ(TOSA_GPIO_USB_IN)
+#define TOSA_IRQ_GPIO_JACKET_DETECT PXA_GPIO_TO_IRQ(TOSA_GPIO_JACKET_DETECT)
+#define TOSA_IRQ_GPIO_nSD_INT PXA_GPIO_TO_IRQ(TOSA_GPIO_nSD_INT)
+#define TOSA_IRQ_GPIO_nSD_DETECT PXA_GPIO_TO_IRQ(TOSA_GPIO_nSD_DETECT)
+#define TOSA_IRQ_GPIO_BAT1_CRG PXA_GPIO_TO_IRQ(TOSA_GPIO_BAT1_CRG)
+#define TOSA_IRQ_GPIO_CF_CD PXA_GPIO_TO_IRQ(TOSA_GPIO_CF_CD)
+#define TOSA_IRQ_GPIO_BAT0_CRG PXA_GPIO_TO_IRQ(TOSA_GPIO_BAT0_CRG)
+#define TOSA_IRQ_GPIO_TC6393XB_INT PXA_GPIO_TO_IRQ(TOSA_GPIO_TC6393XB_INT)
+#define TOSA_IRQ_GPIO_BAT0_LOW PXA_GPIO_TO_IRQ(TOSA_GPIO_BAT0_LOW)
+#define TOSA_IRQ_GPIO_EAR_IN PXA_GPIO_TO_IRQ(TOSA_GPIO_EAR_IN)
+#define TOSA_IRQ_GPIO_CF_IRQ PXA_GPIO_TO_IRQ(TOSA_GPIO_CF_IRQ)
+#define TOSA_IRQ_GPIO_ON_KEY PXA_GPIO_TO_IRQ(TOSA_GPIO_ON_KEY)
+#define TOSA_IRQ_GPIO_VGA_LINE PXA_GPIO_TO_IRQ(TOSA_GPIO_VGA_LINE)
+#define TOSA_IRQ_GPIO_TP_INT PXA_GPIO_TO_IRQ(TOSA_GPIO_TP_INT)
+#define TOSA_IRQ_GPIO_JC_CF_IRQ PXA_GPIO_TO_IRQ(TOSA_GPIO_JC_CF_IRQ)
+#define TOSA_IRQ_GPIO_BAT_LOCKED PXA_GPIO_TO_IRQ(TOSA_GPIO_BAT_LOCKED)
+#define TOSA_IRQ_GPIO_BAT1_LOW PXA_GPIO_TO_IRQ(TOSA_GPIO_BAT1_LOW)
+#define TOSA_IRQ_GPIO_KEY_SENSE(a) PXA_GPIO_TO_IRQ(69+(a))
+
+#define TOSA_IRQ_GPIO_MAIN_BAT_LOW PXA_GPIO_TO_IRQ(TOSA_GPIO_MAIN_BAT_LOW)
#define TOSA_KEY_SYNC KEY_102ND /* ??? */
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h
index 903e1a2e664..d2ca01053f6 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/include/mach/trizeps4.h
@@ -43,30 +43,30 @@
/* Ethernet Controller Davicom DM9000 */
#define GPIO_DM9000 101
-#define TRIZEPS4_ETH_IRQ IRQ_GPIO(GPIO_DM9000)
+#define TRIZEPS4_ETH_IRQ PXA_GPIO_TO_IRQ(GPIO_DM9000)
/* UCB1400 audio / TS-controller */
#define GPIO_UCB1400 1
-#define TRIZEPS4_UCB1400_IRQ IRQ_GPIO(GPIO_UCB1400)
+#define TRIZEPS4_UCB1400_IRQ PXA_GPIO_TO_IRQ(GPIO_UCB1400)
/* PCMCIA socket Compact Flash */
#define GPIO_PCD 11 /* PCMCIA Card Detect */
-#define TRIZEPS4_CD_IRQ IRQ_GPIO(GPIO_PCD)
+#define TRIZEPS4_CD_IRQ PXA_GPIO_TO_IRQ(GPIO_PCD)
#define GPIO_PRDY 13 /* READY / nINT */
-#define TRIZEPS4_READY_NINT IRQ_GPIO(GPIO_PRDY)
+#define TRIZEPS4_READY_NINT PXA_GPIO_TO_IRQ(GPIO_PRDY)
/* MMC socket */
#define GPIO_MMC_DET 12
-#define TRIZEPS4_MMC_IRQ IRQ_GPIO(GPIO_MMC_DET)
+#define TRIZEPS4_MMC_IRQ PXA_GPIO_TO_IRQ(GPIO_MMC_DET)
/* DOC NAND chip */
#define GPIO_DOC_LOCK 94
#define GPIO_DOC_IRQ 93
-#define TRIZEPS4_DOC_IRQ IRQ_GPIO(GPIO_DOC_IRQ)
+#define TRIZEPS4_DOC_IRQ PXA_GPIO_TO_IRQ(GPIO_DOC_IRQ)
/* SPI interface */
#define GPIO_SPI 53
-#define TRIZEPS4_SPI_IRQ IRQ_GPIO(GPIO_SPI)
+#define TRIZEPS4_SPI_IRQ PXA_GPIO_TO_IRQ(GPIO_SPI)
/* LEDS using tx2 / rx2 */
#define GPIO_SYS_BUSY_LED 46
@@ -74,7 +74,7 @@
/* Off-module PIC on ConXS board */
#define GPIO_PIC 0
-#define TRIZEPS4_PIC_IRQ IRQ_GPIO(GPIO_PIC)
+#define TRIZEPS4_PIC_IRQ PXA_GPIO_TO_IRQ(GPIO_PIC)
#ifdef CONFIG_MACH_TRIZEPS_CONXS
/* for CONXS base board define these registers */
diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h
deleted file mode 100644
index bfecfbf5f46..00000000000
--- a/arch/arm/mach-pxa/include/mach/vmalloc.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * arch/arm/mach-pxa/include/mach/vmalloc.h
- *
- * Author: Nicolas Pitre
- * Copyright: (C) 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END (0xe8000000UL)
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 532c5d3a97d..5dae15ea671 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -22,7 +22,6 @@
#include <mach/hardware.h>
#include <mach/irqs.h>
-#include <mach/gpio-pxa.h>
#include "generic.h"
@@ -92,44 +91,6 @@ static struct irq_chip pxa_internal_irq_chip = {
.irq_unmask = pxa_unmask_irq,
};
-/*
- * GPIO IRQs for GPIO 0 and 1
- */
-static int pxa_set_low_gpio_type(struct irq_data *d, unsigned int type)
-{
- int gpio = d->irq - IRQ_GPIO0;
-
- if (__gpio_is_occupied(gpio)) {
- pr_err("%s failed: GPIO is configured\n", __func__);
- return -EINVAL;
- }
-
- if (type & IRQ_TYPE_EDGE_RISING)
- GRER0 |= GPIO_bit(gpio);
- else
- GRER0 &= ~GPIO_bit(gpio);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- GFER0 |= GPIO_bit(gpio);
- else
- GFER0 &= ~GPIO_bit(gpio);
-
- return 0;
-}
-
-static void pxa_ack_low_gpio(struct irq_data *d)
-{
- GEDR0 = (1 << (d->irq - IRQ_GPIO0));
-}
-
-static struct irq_chip pxa_low_gpio_chip = {
- .name = "GPIO-l",
- .irq_ack = pxa_ack_low_gpio,
- .irq_mask = pxa_mask_irq,
- .irq_unmask = pxa_unmask_irq,
- .irq_set_type = pxa_set_low_gpio_type,
-};
-
asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs)
{
uint32_t icip, icmr, mask;
@@ -160,26 +121,7 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1);
}
-static void __init pxa_init_low_gpio_irq(set_wake_t fn)
-{
- int irq;
-
- /* clear edge detection on GPIO 0 and 1 */
- GFER0 &= ~0x3;
- GRER0 &= ~0x3;
- GEDR0 = 0x3;
-
- for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) {
- irq_set_chip_and_handler(irq, &pxa_low_gpio_chip,
- handle_edge_irq);
- irq_set_chip_data(irq, irq_base(0));
- set_irq_flags(irq, IRQF_VALID);
- }
-
- pxa_low_gpio_chip.irq_set_wake = fn;
-}
-
-void __init pxa_init_irq(int irq_nr, set_wake_t fn)
+void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
{
int irq, i, n;
@@ -209,7 +151,6 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn)
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
- pxa_init_low_gpio_irq(fn);
}
#ifdef CONFIG_PM
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 7b324ec6449..1fb86edb857 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -124,8 +124,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)),
- .end = IRQ_GPIO(mfp_to_gpio(MFP_PIN_GPIO90)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO90)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO90)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
}
};
@@ -396,7 +396,7 @@ static struct i2c_board_info littleton_i2c_info[] = {
.type = "da9034",
.addr = 0x34,
.platform_data = &littleton_da9034_info,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO18)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO18)),
},
[1] = {
.type = "max7320",
@@ -445,4 +445,5 @@ MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleto
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = littleton_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index 1dd530279e0..cee9ce2fc0b 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -152,8 +152,8 @@ static void __init lpd270_init_irq(void)
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- irq_set_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
- irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lpd270_irq_handler);
+ irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
}
@@ -505,4 +505,5 @@ MACHINE_START(LOGICPD_PXA270, "LogicPD PXA270 Card Engine")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = lpd270_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index c48ce6da918..6ebd276aebe 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -170,8 +170,8 @@ static void __init lubbock_init_irq(void)
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- irq_set_chained_handler(IRQ_GPIO(0), lubbock_irq_handler);
- irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
+ irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
@@ -556,4 +556,5 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = lubbock_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 4b796c37af3..3d6baf91396 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -184,8 +184,8 @@ static struct resource egpio_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(GPIO13_MAGICIAN_CPLD_IRQ),
- .end = gpio_to_irq(GPIO13_MAGICIAN_CPLD_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO13_MAGICIAN_CPLD_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -468,8 +468,8 @@ static struct resource pasic3_resources[] = {
},
/* No IRQ handler in the PASIC3, DS1WM needs an external IRQ */
[1] = {
- .start = gpio_to_irq(GPIO107_MAGICIAN_DS1WM_IRQ),
- .end = gpio_to_irq(GPIO107_MAGICIAN_DS1WM_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO107_MAGICIAN_DS1WM_IRQ),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -760,4 +760,5 @@ MACHINE_START(MAGICIAN, "HTC Magician")
.handle_irq = pxa27x_handle_irq,
.init_machine = magician_init,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 0567d3965fd..1aebaf71946 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -178,8 +178,8 @@ static void __init mainstone_init_irq(void)
MST_INTMSKENA = 0;
MST_INTSETCLR = 0;
- irq_set_chained_handler(IRQ_GPIO(0), mainstone_irq_handler);
- irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
+ irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
@@ -622,4 +622,5 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = mainstone_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 43a5f6861ca..f14775536b8 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -20,7 +21,6 @@
#include <mach/pxa2xx-regs.h>
#include <mach/mfp-pxa2xx.h>
-#include <mach/gpio-pxa.h>
#include "generic.h"
@@ -29,6 +29,10 @@
#define GAFR_L(x) __GAFR(0, x)
#define GAFR_U(x) __GAFR(1, x)
+#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
+#define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5))
+#define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c)
+
#define PWER_WE35 (1 << 24)
struct gpio_desc {
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index b938fc2c316..e80a3db735c 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -53,6 +53,7 @@
#include <mach/pxa27x-udc.h>
#include <mach/camera.h>
#include <mach/audio.h>
+#include <mach/smemc.h>
#include <media/soc_camera.h>
#include <mach/mioa701.h>
@@ -390,24 +391,19 @@ static struct pxamci_platform_data mioa701_mci_info = {
};
/* FlashRAM */
-static struct resource strataflash_resource = {
+static struct resource docg3_resource = {
.start = PXA_CS0_PHYS,
- .end = PXA_CS0_PHYS + SZ_64M - 1,
+ .end = PXA_CS0_PHYS + SZ_8K - 1,
.flags = IORESOURCE_MEM,
};
-static struct physmap_flash_data strataflash_data = {
- .width = 2,
- /* .set_vpp = mioa701_set_vpp, */
-};
-
-static struct platform_device strataflash = {
- .name = "physmap-flash",
+static struct platform_device docg3 = {
+ .name = "docg3",
.id = -1,
- .resource = &strataflash_resource,
+ .resource = &docg3_resource,
.num_resources = 1,
.dev = {
- .platform_data = &strataflash_data,
+ .platform_data = NULL,
},
};
@@ -541,15 +537,15 @@ static struct pda_power_pdata power_pdata = {
static struct resource power_resources[] = {
[0] = {
.name = "ac",
- .start = gpio_to_irq(GPIO96_AC_DETECT),
- .end = gpio_to_irq(GPIO96_AC_DETECT),
+ .start = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT),
+ .end = PXA_GPIO_TO_IRQ(GPIO96_AC_DETECT),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
},
[1] = {
.name = "usb",
- .start = gpio_to_irq(GPIO13_nUSB_DETECT),
- .end = gpio_to_irq(GPIO13_nUSB_DETECT),
+ .start = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT),
+ .end = PXA_GPIO_TO_IRQ(GPIO13_nUSB_DETECT),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
},
@@ -685,7 +681,7 @@ static struct platform_device *devices[] __initdata = {
&pxa2xx_pcm,
&mioa701_sound,
&power_dev,
- &strataflash,
+ &docg3,
&gpio_vbus,
&mioa701_camera,
&mioa701_board,
@@ -696,13 +692,13 @@ static void mioa701_machine_exit(void);
static void mioa701_poweroff(void)
{
mioa701_machine_exit();
- arm_machine_restart('s', NULL);
+ pxa_restart('s', NULL);
}
static void mioa701_restart(char c, const char *cmd)
{
mioa701_machine_exit();
- arm_machine_restart('s', cmd);
+ pxa_restart('s', cmd);
}
static struct gpio global_gpios[] = {
@@ -720,6 +716,15 @@ static void __init mioa701_machine_init(void)
RTTR = 32768 - 1; /* Reset crazy WinCE value */
UP2OCR = UP2OCR_HXOE;
+ /*
+ * Set up the flash memory : DiskOnChip G3 on first static memory bank
+ */
+ __raw_writel(0x7ff02dd8, MSC0);
+ __raw_writel(0x0001c391, MCMEM0);
+ __raw_writel(0x0001c391, MCATT0);
+ __raw_writel(0x0001c391, MCIO0);
+
+
pxa2xx_mfp_config(ARRAY_AND_SIZE(mioa701_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
@@ -734,7 +739,6 @@ static void __init mioa701_machine_init(void)
pxa_set_udc_info(&mioa701_udc_info);
pxa_set_ac97_info(&mioa701_ac97_info);
pm_power_off = mioa701_poweroff;
- arm_pm_restart = mioa701_restart;
platform_add_devices(devices, ARRAY_SIZE(devices));
gsm_init();
@@ -752,9 +756,11 @@ static void mioa701_machine_exit(void)
MACHINE_START(MIOA701, "MIO A701")
.atag_offset = 0x100,
+ .restart_mode = 's',
.map_io = &pxa27x_map_io,
.init_irq = &pxa27x_init_irq,
.handle_irq = &pxa27x_handle_irq,
.init_machine = mioa701_machine_init,
.timer = &pxa_timer,
+ .restart = mioa701_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/mp900.c b/arch/arm/mach-pxa/mp900.c
index 4af5d513c38..169bf8f97af 100644
--- a/arch/arm/mach-pxa/mp900.c
+++ b/arch/arm/mach-pxa/mp900.c
@@ -98,5 +98,6 @@ MACHINE_START(NEC_MP900, "MobilePro900/C")
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = mp900c_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index 90928d6e1a5..83570a79e7d 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -417,8 +417,8 @@ static struct resource dm9k_resources[] = {
.flags = IORESOURCE_MEM
},
[2] = {
- .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO9)),
- .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO9)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO9)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO9)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE
}
};
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245c0a0..fbc10d7b95d 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info palm27x_udc_info = {
.gpio_vbus_inverted = 1,
};
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 3d4a2819cae..1fa80f4f80c 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -347,5 +347,6 @@ MACHINE_START(PALMLD, "Palm LifeDrive")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmld_init
+ .init_machine = palmld_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 99d6bcf1f97..5ba14316bd9 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -208,5 +208,6 @@ MACHINE_START(PALMT5, "Palm Tungsten|T5")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmt5_init
+ .init_machine = palmt5_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 6ec7caefb37..29b51b40f09 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
/******************************************************************************
* UDC
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
static struct gpio_vbus_mach_info palmtc_udc_info = {
.gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
.gpio_vbus_inverted = 1,
@@ -542,5 +542,6 @@ MACHINE_START(PALMTC, "Palm Tungsten|C")
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmtc_init
+ .init_machine = palmtc_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 9376da06404..5ebf49acb82 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -361,5 +361,6 @@ MACHINE_START(PALMTE2, "Palm Tungsten|E2")
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmte2_init
+ .init_machine = palmte2_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 94e9708b349..ec8249156c0 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -452,6 +452,7 @@ MACHINE_START(TREO680, "Palm Treo 680")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = treo680_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -464,5 +465,6 @@ MACHINE_START(CENTRO, "Palm Centro 685")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = centro_init,
+ .restart = pxa_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 4e3e45927e9..6170d76dfba 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -369,5 +369,6 @@ MACHINE_START(PALMTX, "Palm T|X")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmtx_init
+ .init_machine = palmtx_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 68e18baf8e0..b2dff9d415e 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -404,5 +404,6 @@ MACHINE_START(PALMZ72, "Palm Zire72")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
- .init_machine = palmz72_init
+ .init_machine = palmz72_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c
index 0b825a35353..fe9054435b6 100644
--- a/arch/arm/mach-pxa/pcm027.c
+++ b/arch/arm/mach-pxa/pcm027.c
@@ -265,4 +265,5 @@ MACHINE_START(PCM027, "Phytec Messtechnik GmbH phyCORE-PXA270")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = pcm027_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 6d38c6548b3..abab4e2b122 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -378,7 +378,7 @@ struct pxacamera_platform_data pcm990_pxacamera_platform_data = {
#include <linux/i2c/pca953x.h>
static struct pca953x_platform_data pca9536_data = {
- .gpio_base = NR_BUILTIN_GPIO,
+ .gpio_base = PXA_NR_BUILTIN_GPIO,
};
static int gpio_bus_switch = -EINVAL;
@@ -406,9 +406,9 @@ static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link)
int ret;
if (gpio_bus_switch < 0) {
- ret = gpio_request(NR_BUILTIN_GPIO, "camera");
+ ret = gpio_request(PXA_NR_BUILTIN_GPIO, "camera");
if (!ret) {
- gpio_bus_switch = NR_BUILTIN_GPIO;
+ gpio_bus_switch = PXA_NR_BUILTIN_GPIO;
gpio_direction_output(gpio_bus_switch, 0);
}
}
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 50c83317786..69036e42ca3 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -166,8 +166,8 @@ static struct resource locomo_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_GPIO(10),
- .end = IRQ_GPIO(10),
+ .start = PXA_GPIO_TO_IRQ(10),
+ .end = PXA_GPIO_TO_IRQ(10),
.flags = IORESOURCE_IRQ,
},
};
@@ -212,7 +212,7 @@ static struct spi_board_info poodle_spi_devices[] = {
.bus_num = 1,
.platform_data = &poodle_ads7846_info,
.controller_data= &poodle_ads7846_chip,
- .irq = gpio_to_irq(POODLE_GPIO_TP_INT),
+ .irq = PXA_GPIO_TO_IRQ(POODLE_GPIO_TP_INT),
},
};
@@ -417,12 +417,7 @@ static struct i2c_board_info __initdata poodle_i2c_devices[] = {
static void poodle_poweroff(void)
{
- arm_machine_restart('h', NULL);
-}
-
-static void poodle_restart(char mode, const char *cmd)
-{
- arm_machine_restart('h', cmd);
+ pxa_restart('h', NULL);
}
static void __init poodle_init(void)
@@ -430,7 +425,6 @@ static void __init poodle_init(void)
int ret = 0;
pm_power_off = poodle_poweroff;
- arm_pm_restart = poodle_restart;
PCFR |= PCFR_OPDE;
@@ -472,4 +466,5 @@ MACHINE_START(POODLE, "SHARP Poodle")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = poodle_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index f05f9486b0c..adf058fa97e 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -17,6 +17,7 @@
* need be.
*/
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -208,6 +209,8 @@ static struct clk_lookup pxa25x_clkregs[] = {
INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"),
INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"),
INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL),
+ INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
};
static struct clk_lookup pxa25x_hwuart_clkreg =
@@ -287,7 +290,7 @@ static inline void pxa25x_init_pm(void) {}
static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
uint32_t mask = 0;
if (gpio >= 0 && gpio < 85)
@@ -312,14 +315,12 @@ set_pwer:
void __init pxa25x_init_irq(void)
{
pxa_init_irq(32, pxa25x_set_wake);
- pxa_init_gpio(IRQ_GPIO_2_x, 2, 84, pxa25x_set_wake);
}
#ifdef CONFIG_CPU_PXA26x
void __init pxa26x_init_irq(void)
{
pxa_init_irq(32, pxa25x_set_wake);
- pxa_init_gpio(IRQ_GPIO_2_x, 2, 89, pxa25x_set_wake);
}
#endif
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index bc5a98ebaa7..180bd8675d4 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -229,6 +230,8 @@ static struct clk_lookup pxa27x_clkregs[] = {
INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
+ INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
};
#ifdef CONFIG_PM
@@ -355,7 +358,7 @@ static inline void pxa27x_init_pm(void) {}
*/
static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
uint32_t mask;
if (gpio >= 0 && gpio < 128)
@@ -386,7 +389,6 @@ static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
void __init pxa27x_init_irq(void)
{
pxa_init_irq(34, pxa27x_set_wake);
- pxa_init_gpio(IRQ_GPIO_2_x, 2, 120, pxa27x_set_wake);
}
static struct map_desc pxa27x_io_desc[] __initdata = {
@@ -422,6 +424,7 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
}
static struct platform_device *devices[] __initdata = {
+ &pxa_device_gpio,
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 40bb16501d8..0388eda7878 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -89,6 +89,7 @@ static DEFINE_PXA3_CKEN(gcu, PXA300_GCU, 0, 0);
static struct clk_lookup common_clkregs[] = {
INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", NULL),
INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
};
static DEFINE_PXA3_CKEN(pxa310_mmc3, MMC3, 19500000, 0);
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 8d614ecd8e9..d487e1ff4c9 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -83,6 +83,7 @@ static DEFINE_PXA3_CKEN(gcu, PXA320_GCU, 0, 0);
static struct clk_lookup pxa320_clkregs[] = {
INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", NULL),
INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
};
static int __init pxa320_init(void)
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 0737c59b88a..f107c71c758 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -25,7 +25,6 @@
#include <asm/mach/map.h>
#include <asm/suspend.h>
#include <mach/hardware.h>
-#include <mach/gpio-pxa.h>
#include <mach/pxa3xx-regs.h>
#include <mach/reset.h>
#include <mach/ohci.h>
@@ -56,6 +55,7 @@ static DEFINE_PXA3_CKEN(pxa3xx_pwm0, PWM0, 13000000, 0);
static DEFINE_PXA3_CKEN(pxa3xx_pwm1, PWM1, 13000000, 0);
static DEFINE_PXA3_CKEN(pxa3xx_mmc1, MMC1, 19500000, 0);
static DEFINE_PXA3_CKEN(pxa3xx_mmc2, MMC2, 19500000, 0);
+static DEFINE_PXA3_CKEN(pxa3xx_gpio, GPIO, 13000000, 0);
static DEFINE_CK(pxa3xx_lcd, LCD, &clk_pxa3xx_hsio_ops);
static DEFINE_CK(pxa3xx_smemc, SMC, &clk_pxa3xx_smemc_ops);
@@ -67,6 +67,7 @@ static struct clk_lookup pxa3xx_clkregs[] = {
INIT_CLKREG(&clk_pxa3xx_pout, NULL, "CLK_POUT"),
/* Power I2C clock is always on */
INIT_CLKREG(&clk_dummy, "pxa3xx-pwri2c.1", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
INIT_CLKREG(&clk_pxa3xx_lcd, "pxa2xx-fb", NULL),
INIT_CLKREG(&clk_pxa3xx_camera, NULL, "CAMCLK"),
INIT_CLKREG(&clk_pxa3xx_ac97, NULL, "AC97CLK"),
@@ -88,6 +89,7 @@ static struct clk_lookup pxa3xx_clkregs[] = {
INIT_CLKREG(&clk_pxa3xx_mmc1, "pxa2xx-mci.0", NULL),
INIT_CLKREG(&clk_pxa3xx_mmc2, "pxa2xx-mci.1", NULL),
INIT_CLKREG(&clk_pxa3xx_smemc, "pxa2xx-pcmcia", NULL),
+ INIT_CLKREG(&clk_pxa3xx_gpio, "pxa-gpio", NULL),
};
#ifdef CONFIG_PM
@@ -365,7 +367,8 @@ static struct irq_chip pxa_ext_wakeup_chip = {
.irq_set_type = pxa_set_ext_wakeup_type,
};
-static void __init pxa_init_ext_wakeup_irq(set_wake_t fn)
+static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
+ unsigned int))
{
int irq;
@@ -388,7 +391,6 @@ void __init pxa3xx_init_irq(void)
pxa_init_irq(56, pxa3xx_set_wake);
pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
- pxa_init_gpio(IRQ_GPIO_2_x, 2, 127, NULL);
}
static struct map_desc pxa3xx_io_desc[] __initdata = {
@@ -417,6 +419,7 @@ void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
}
static struct platform_device *devices[] __initdata = {
+ &pxa_device_gpio,
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c
index 51371b39d2a..fccc644702e 100644
--- a/arch/arm/mach-pxa/pxa95x.c
+++ b/arch/arm/mach-pxa/pxa95x.c
@@ -20,7 +20,6 @@
#include <linux/syscore_ops.h>
#include <mach/hardware.h>
-#include <mach/gpio-pxa.h>
#include <mach/pxa3xx-regs.h>
#include <mach/pxa930.h>
#include <mach/reset.h>
@@ -212,11 +211,13 @@ static DEFINE_PXA3_CKEN(pxa95x_ssp3, SSP3, 13000000, 0);
static DEFINE_PXA3_CKEN(pxa95x_ssp4, SSP4, 13000000, 0);
static DEFINE_PXA3_CKEN(pxa95x_pwm0, PWM0, 13000000, 0);
static DEFINE_PXA3_CKEN(pxa95x_pwm1, PWM1, 13000000, 0);
+static DEFINE_PXA3_CKEN(pxa95x_gpio, GPIO, 13000000, 0);
static struct clk_lookup pxa95x_clkregs[] = {
INIT_CLKREG(&clk_pxa95x_pout, NULL, "CLK_POUT"),
/* Power I2C clock is always on */
INIT_CLKREG(&clk_dummy, "pxa3xx-pwri2c.1", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
INIT_CLKREG(&clk_pxa95x_lcd, "pxa2xx-fb", NULL),
INIT_CLKREG(&clk_pxa95x_ffuart, "pxa2xx-uart.0", NULL),
INIT_CLKREG(&clk_pxa95x_btuart, "pxa2xx-uart.1", NULL),
@@ -230,12 +231,12 @@ static struct clk_lookup pxa95x_clkregs[] = {
INIT_CLKREG(&clk_pxa95x_ssp4, "pxa27x-ssp.3", NULL),
INIT_CLKREG(&clk_pxa95x_pwm0, "pxa27x-pwm.0", NULL),
INIT_CLKREG(&clk_pxa95x_pwm1, "pxa27x-pwm.1", NULL),
+ INIT_CLKREG(&clk_pxa95x_gpio, "pxa-gpio", NULL),
};
void __init pxa95x_init_irq(void)
{
pxa_init_irq(96, NULL);
- pxa_init_gpio(IRQ_GPIO_2_x, 2, 127, NULL);
}
/*
@@ -248,6 +249,7 @@ void __init pxa95x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
}
static struct platform_device *devices[] __initdata = {
+ &pxa_device_gpio,
&sa1100_device_rtc,
&pxa_device_rtc,
&pxa27x_device_ssp1,
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index f0c05f4d12e..22818c7694a 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -292,8 +292,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = gpio_to_irq(GPIO_ETH_IRQ),
- .end = gpio_to_irq(GPIO_ETH_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
@@ -672,7 +672,7 @@ static struct lis3lv02d_platform_data lis3_pdata = {
.chip_select = 1, \
.controller_data = (void *) GPIO_ACCEL_CS, \
.platform_data = &lis3_pdata, \
- .irq = gpio_to_irq(GPIO_ACCEL_IRQ), \
+ .irq = PXA_GPIO_TO_IRQ(GPIO_ACCEL_IRQ), \
}
#define SPI_DAC7512 \
@@ -956,7 +956,7 @@ static struct eeti_ts_platform_data eeti_ts_pdata = {
static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
.type = "eeti_ts",
.addr = 0x0a,
- .irq = gpio_to_irq(GPIO_TOUCH_IRQ),
+ .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
.platform_data = &eeti_ts_pdata,
};
@@ -1093,6 +1093,7 @@ MACHINE_START(RAUMFELD_RC, "Raumfeld Controller")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1104,6 +1105,7 @@ MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1115,5 +1117,6 @@ MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker")
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 01e9d643394..c8497b00cdf 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -81,14 +81,17 @@ static void do_hw_reset(void)
OSMR3 = OSCR + 368640; /* ... in 100 ms */
}
-void arch_reset(char mode, const char *cmd)
+void pxa_restart(char mode, const char *cmd)
{
+ local_irq_disable();
+ local_fiq_disable();
+
clear_reset_status(RESET_STATUS_ALL);
switch (mode) {
case 's':
/* Jump into ROM at address 0 */
- cpu_reset(0);
+ soft_restart(0);
break;
case 'g':
do_gpio_reset();
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index fc2c1e05af9..0fe354efb93 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -96,8 +96,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)),
- .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO97)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO97)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO97)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -502,7 +502,7 @@ static struct i2c_board_info saar_i2c_info[] = {
.type = "da9034",
.addr = 0x34,
.platform_data = &saar_da9034_info,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO83)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO83)),
},
};
@@ -602,4 +602,5 @@ MACHINE_START(SAAR, "PXA930 Handheld Platform (aka SAAR)")
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = saar_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c
index 3e999e308a2..febc809ed5a 100644
--- a/arch/arm/mach-pxa/saarb.c
+++ b/arch/arm/mach-pxa/saarb.c
@@ -92,7 +92,7 @@ static struct i2c_board_info saarb_i2c_info[] = {
.type = "88PM860x",
.addr = 0x34,
.platform_data = &saarb_pm8607_info,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO83)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO83)),
},
};
@@ -111,5 +111,6 @@ MACHINE_START(SAARB, "PXA955 Handheld Platform (aka SAARB)")
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = saarb_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 785880f67b6..8d5168d253a 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -907,24 +907,24 @@ static int __devinit sharpsl_pm_probe(struct platform_device *pdev)
gpio_direction_input(sharpsl_pm.machinfo->gpio_batlock);
/* Register interrupt handlers */
- if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "AC Input Detect", sharpsl_ac_isr)) {
- dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin));
+ if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "AC Input Detect", sharpsl_ac_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin));
}
- if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Battery Cover", sharpsl_fatal_isr)) {
- dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock));
+ if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Battery Cover", sharpsl_fatal_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock));
}
if (sharpsl_pm.machinfo->gpio_fatal) {
- if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Fatal Battery", sharpsl_fatal_isr)) {
- dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal));
+ if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Fatal Battery", sharpsl_fatal_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal));
}
}
if (sharpsl_pm.machinfo->batfull_irq) {
/* Register interrupt handler. */
- if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING, "CO", sharpsl_chrg_full_isr)) {
- dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull));
+ if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING, "CO", sharpsl_chrg_full_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull));
}
}
@@ -953,14 +953,14 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
led_trigger_unregister_simple(sharpsl_charge_led_trigger);
- free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
- free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
+ free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
+ free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
if (sharpsl_pm.machinfo->gpio_fatal)
- free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr);
+ free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr);
if (sharpsl_pm.machinfo->batfull_irq)
- free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
+ free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
gpio_free(sharpsl_pm.machinfo->gpio_batlock);
gpio_free(sharpsl_pm.machinfo->gpio_batfull);
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 953a9195f9e..abf355d0c92 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -552,7 +552,7 @@ static struct spi_board_info spitz_spi_devices[] = {
.chip_select = 0,
.platform_data = &spitz_ads7846_info,
.controller_data = &spitz_ads7846_chip,
- .irq = gpio_to_irq(SPITZ_GPIO_TP_INT),
+ .irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT),
}, {
.modalias = "corgi-lcd",
.max_speed_hz = 50000,
@@ -926,7 +926,7 @@ static inline void spitz_i2c_init(void) {}
******************************************************************************/
static void spitz_poweroff(void)
{
- arm_machine_restart('g', NULL);
+ pxa_restart('g', NULL);
}
static void spitz_restart(char mode, const char *cmd)
@@ -943,7 +943,6 @@ static void __init spitz_init(void)
{
init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0);
pm_power_off = spitz_poweroff;
- arm_pm_restart = spitz_restart;
PMCR = 0x00;
@@ -982,33 +981,39 @@ static void __init spitz_fixup(struct tag *tags, char **cmdline,
#ifdef CONFIG_MACH_SPITZ
MACHINE_START(SPITZ, "SHARP Spitz")
+ .restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
+ .restart = spitz_restart,
MACHINE_END
#endif
#ifdef CONFIG_MACH_BORZOI
MACHINE_START(BORZOI, "SHARP Borzoi")
+ .restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
+ .restart = spitz_restart,
MACHINE_END
#endif
#ifdef CONFIG_MACH_AKITA
MACHINE_START(AKITA, "SHARP Akita")
+ .restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
+ .restart = spitz_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 094279aefe9..34cbdac5152 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
@@ -41,6 +42,7 @@ static int spitz_last_ac_status;
static struct gpio spitz_charger_gpios[] = {
{ SPITZ_GPIO_KEY_INT, GPIOF_IN, "Keyboard Interrupt" },
{ SPITZ_GPIO_SYNC, GPIOF_IN, "Sync" },
+ { SPITZ_GPIO_AC_IN, GPIOF_IN, "Charger Detection" },
{ SPITZ_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" },
{ SPITZ_GPIO_JK_B, GPIOF_OUT_INIT_LOW, "JK B" },
{ SPITZ_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" },
@@ -169,14 +171,19 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm)
static unsigned long spitz_charger_wakeup(void)
{
- return (~GPLR0 & GPIO_bit(SPITZ_GPIO_KEY_INT)) | (GPLR0 & GPIO_bit(SPITZ_GPIO_SYNC));
+ unsigned long ret;
+ ret = (!gpio_get_value(SPITZ_GPIO_KEY_INT)
+ << GPIO_bit(SPITZ_GPIO_KEY_INT))
+ | (!gpio_get_value(SPITZ_GPIO_SYNC)
+ << GPIO_bit(SPITZ_GPIO_SYNC));
+ return ret;
}
unsigned long spitzpm_read_devdata(int type)
{
switch (type) {
case SHARPSL_STATUS_ACIN:
- return (((~GPLR(SPITZ_GPIO_AC_IN)) & GPIO_bit(SPITZ_GPIO_AC_IN)) != 0);
+ return !gpio_get_value(SPITZ_GPIO_AC_IN);
case SHARPSL_STATUS_LOCK:
return gpio_get_value(sharpsl_pm.machinfo->gpio_batlock);
case SHARPSL_STATUS_CHRGFULL:
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 4c9a48bef56..d8a2467de92 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -376,7 +376,7 @@ static struct spi_board_info spi_board_info[] __initdata = {
.bus_num = 1,
.chip_select = 0,
.controller_data = &staccel_chip_info,
- .irq = IRQ_GPIO(96),
+ .irq = PXA_GPIO_TO_IRQ(96),
}, {
.modalias = "cc2420",
.max_speed_hz = 6500000,
@@ -546,7 +546,7 @@ static struct i2c_board_info __initdata imote2_pwr_i2c_board_info[] = {
.type = "da9030",
.addr = 0x49,
.platform_data = &imote2_da9030_pdata,
- .irq = gpio_to_irq(1),
+ .irq = PXA_GPIO_TO_IRQ(1),
},
};
@@ -560,18 +560,18 @@ static struct i2c_board_info __initdata imote2_i2c_board_info[] = {
/* Through a nand gate - Also beware, on V2 sensor board the
* pull up resistors are missing.
*/
- .irq = IRQ_GPIO(99),
+ .irq = PXA_GPIO_TO_IRQ(99),
}, { /* ITS400 Sensor board only */
.type = "tsl2561",
.addr = 0x49,
/* Through a nand gate - Also beware, on V2 sensor board the
* pull up resistors are missing.
*/
- .irq = IRQ_GPIO(99),
+ .irq = PXA_GPIO_TO_IRQ(99),
}, { /* ITS400 Sensor board only */
.type = "tmp175",
.addr = 0x4A,
- .irq = IRQ_GPIO(96),
+ .irq = PXA_GPIO_TO_IRQ(96),
}, { /* IMB400 Multimedia board */
.type = "wm8940",
.addr = 0x1A,
@@ -661,8 +661,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_GPIO(40),
- .end = IRQ_GPIO(40),
+ .start = PXA_GPIO_TO_IRQ(40),
+ .end = PXA_GPIO_TO_IRQ(40),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -707,7 +707,7 @@ static int stargate2_mci_init(struct device *dev,
}
gpio_direction_input(SG2_GPIO_nSD_DETECT);
- err = request_irq(IRQ_GPIO(SG2_GPIO_nSD_DETECT),
+ err = request_irq(PXA_GPIO_TO_IRQ(SG2_GPIO_nSD_DETECT),
stargate2_detect_int,
IRQ_TYPE_EDGE_BOTH,
"MMC card detect",
@@ -738,7 +738,7 @@ static void stargate2_mci_setpower(struct device *dev, unsigned int vdd)
static void stargate2_mci_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIO(SG2_GPIO_nSD_DETECT), data);
+ free_irq(PXA_GPIO_TO_IRQ(SG2_GPIO_nSD_DETECT), data);
gpio_free(SG2_SD_POWER_ENABLE);
gpio_free(SG2_GPIO_nSD_DETECT);
}
@@ -913,7 +913,7 @@ static struct i2c_board_info __initdata stargate2_pwr_i2c_board_info[] = {
.type = "da9030",
.addr = 0x49,
.platform_data = &stargate2_da9030_pdata,
- .irq = gpio_to_irq(1),
+ .irq = PXA_GPIO_TO_IRQ(1),
},
};
@@ -938,18 +938,18 @@ static struct i2c_board_info __initdata stargate2_i2c_board_info[] = {
/* Through a nand gate - Also beware, on V2 sensor board the
* pull up resistors are missing.
*/
- .irq = IRQ_GPIO(99),
+ .irq = PXA_GPIO_TO_IRQ(99),
}, { /* ITS400 Sensor board only */
.type = "tsl2561",
.addr = 0x49,
/* Through a nand gate - Also beware, on V2 sensor board the
* pull up resistors are missing.
*/
- .irq = IRQ_GPIO(99),
+ .irq = PXA_GPIO_TO_IRQ(99),
}, { /* ITS400 Sensor board only */
.type = "tmp175",
.addr = 0x4A,
- .irq = IRQ_GPIO(96),
+ .irq = PXA_GPIO_TO_IRQ(96),
},
};
@@ -1005,6 +1005,7 @@ MACHINE_START(INTELMOTE2, "IMOTE 2")
.timer = &pxa_timer,
.init_machine = imote2_init,
.atag_offset = 0x100,
+ .restart = pxa_restart,
MACHINE_END
#endif
@@ -1017,5 +1018,6 @@ MACHINE_START(STARGATE2, "Stargate 2")
.timer = &pxa_timer,
.init_machine = stargate2_init,
.atag_offset = 0x100,
+ .restart = pxa_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index ad47bb98f30..9fb38e80e07 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -85,8 +85,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)),
- .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)),
+ .start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO47)),
+ .end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO47)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
@@ -495,4 +495,5 @@ MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)")
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = tavorevb_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/tavorevb3.c b/arch/arm/mach-pxa/tavorevb3.c
index fd569167302..f7d9305cfd7 100644
--- a/arch/arm/mach-pxa/tavorevb3.c
+++ b/arch/arm/mach-pxa/tavorevb3.c
@@ -101,7 +101,7 @@ static struct i2c_board_info evb3_i2c_info[] = {
.type = "88PM860x",
.addr = 0x34,
.platform_data = &evb3_pm8607_info,
- .irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO83)),
+ .irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO83)),
},
};
@@ -132,4 +132,5 @@ MACHINE_START(TAVOREVB3, "PXA950 Evaluation Board (aka TavorEVB3)")
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = evb3_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index de684701449..b503049d6d2 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
-#include <linux/sched.h>
#include <asm/div64.h>
#include <asm/mach/irq.h>
@@ -32,18 +31,10 @@
* long as there is always less than 582 seconds between successive
* calls to sched_clock() which should always be the case in practice.
*/
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace pxa_read_sched_clock(void)
{
- u32 cyc = OSCR;
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace pxa_update_sched_clock(void)
-{
- u32 cyc = OSCR;
- update_sched_clock(&cd, cyc, (u32)~0);
+ return OSCR;
}
@@ -119,7 +110,7 @@ static void __init pxa_timer_init(void)
OIER = 0;
OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
- init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate);
+ setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate);
clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4);
ckevt_pxa_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 402b0c96613..7ce5c436cc4 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -404,8 +404,8 @@ static struct pda_power_pdata tosa_power_data = {
static struct resource tosa_power_resource[] = {
{
.name = "ac",
- .start = gpio_to_irq(TOSA_GPIO_AC_IN),
- .end = gpio_to_irq(TOSA_GPIO_AC_IN),
+ .start = PXA_GPIO_TO_IRQ(TOSA_GPIO_AC_IN),
+ .end = PXA_GPIO_TO_IRQ(TOSA_GPIO_AC_IN),
.flags = IORESOURCE_IRQ |
IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
@@ -905,7 +905,7 @@ static struct platform_device *devices[] __initdata = {
static void tosa_poweroff(void)
{
- arm_machine_restart('g', NULL);
+ pxa_restart('g', NULL);
}
static void tosa_restart(char mode, const char *cmd)
@@ -935,7 +935,6 @@ static void __init tosa_init(void)
init_gpio_reset(TOSA_GPIO_ON_RESET, 0, 0);
pm_power_off = tosa_poweroff;
- arm_pm_restart = tosa_restart;
PCFR |= PCFR_OPDE;
@@ -970,6 +969,7 @@ static void __init fixup_tosa(struct tag *tags, char **cmdline,
}
MACHINE_START(TOSA, "SHARP Tosa")
+ .restart_mode = 'g',
.fixup = fixup_tosa,
.map_io = pxa25x_map_io,
.nr_irqs = TOSA_NR_IRQS,
@@ -977,4 +977,5 @@ MACHINE_START(TOSA, "SHARP Tosa")
.handle_irq = pxa25x_handle_irq,
.init_machine = tosa_init,
.timer = &pxa_timer,
+ .restart = tosa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 1aaed2b17e1..0f30af617d8 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -561,6 +561,7 @@ MACHINE_START(TRIZEPS4, "Keith und Koep Trizeps IV module")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
@@ -571,4 +572,5 @@ MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 242ddae332d..023d6ca789d 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -422,8 +422,8 @@ static struct resource smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gpio_to_irq(VIPER_ETH_GPIO),
- .end = gpio_to_irq(VIPER_ETH_GPIO),
+ .start = PXA_GPIO_TO_IRQ(VIPER_ETH_GPIO),
+ .end = PXA_GPIO_TO_IRQ(VIPER_ETH_GPIO),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
[2] = {
@@ -546,7 +546,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
/* External UARTs */
{
.mapbase = VIPER_UARTA_PHYS,
- .irq = gpio_to_irq(VIPER_UARTA_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(VIPER_UARTA_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 1843200,
.regshift = 1,
@@ -556,7 +556,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
},
{
.mapbase = VIPER_UARTB_PHYS,
- .irq = gpio_to_irq(VIPER_UARTB_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(VIPER_UARTB_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 1843200,
.regshift = 1,
@@ -596,8 +596,8 @@ static struct resource isp116x_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = gpio_to_irq(VIPER_USB_GPIO),
- .end = gpio_to_irq(VIPER_USB_GPIO),
+ .start = PXA_GPIO_TO_IRQ(VIPER_USB_GPIO),
+ .end = PXA_GPIO_TO_IRQ(VIPER_USB_GPIO),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -998,4 +998,5 @@ MACHINE_START(VIPER, "Arcom/Eurotech VIPER SBC")
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
.init_machine = viper_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index a7539a6ed1f..1f5cfa96f6d 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
.gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
.gpio_pullup = -1,
@@ -395,8 +395,8 @@ static struct resource vpac270_dm9000_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ),
- .end = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO114_VPAC270_ETH_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO114_VPAC270_ETH_IRQ),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -433,7 +433,7 @@ static pxa2xx_audio_ops_t vpac270_ac97_pdata = {
};
static struct ucb1400_pdata vpac270_ucb1400_pdata = {
- .irq = IRQ_GPIO(GPIO113_VPAC270_TS_IRQ),
+ .irq = PXA_GPIO_TO_IRQ(GPIO113_VPAC270_TS_IRQ),
};
static struct platform_device vpac270_ucb1400_device = {
@@ -610,8 +610,8 @@ static struct resource vpac270_ide_resources[] = {
.flags = IORESOURCE_DMA
},
[3] = { /* IDE IRQ pin */
- .start = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ),
- .end = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ),
+ .start = PXA_GPIO_TO_IRQ(GPIO36_VPAC270_IDE_IRQ),
+ .end = PXA_GPIO_TO_IRQ(GPIO36_VPAC270_IDE_IRQ),
.flags = IORESOURCE_IRQ
}
};
@@ -721,5 +721,6 @@ MACHINE_START(VPAC270, "Voipac PXA270")
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
- .init_machine = vpac270_init
+ .init_machine = vpac270_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 70e1730ef28..4bbe9a36fe7 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -185,5 +185,6 @@ MACHINE_START(XCEP, "Iskratel XCEP")
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.timer = &pxa_timer,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index ead32c90fec..b6476848b56 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -573,7 +573,7 @@ static struct spi_board_info spi_board_info[] __initdata = {
.modalias = "libertas_spi",
.platform_data = &z2_lbs_pdata,
.controller_data = &z2_lbs_chip_info,
- .irq = gpio_to_irq(GPIO36_ZIPITZ2_WIFI_IRQ),
+ .irq = PXA_GPIO_TO_IRQ(GPIO36_ZIPITZ2_WIFI_IRQ),
.max_speed_hz = 13000000,
.bus_num = 1,
.chip_select = 0,
@@ -725,4 +725,5 @@ MACHINE_START(ZIPIT2, "Zipit Z2")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = z2_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 498b83b089f..a4dd1c34705 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -233,7 +233,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
/* FIXME: Shared IRQs on COM1-COM4 will not work properly on v1i1 hardware. */
{ /* COM1 */
.mapbase = 0x10000000,
- .irq = gpio_to_irq(ZEUS_UARTA_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_UARTA_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 14745600,
.regshift = 1,
@@ -242,7 +242,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
},
{ /* COM2 */
.mapbase = 0x10800000,
- .irq = gpio_to_irq(ZEUS_UARTB_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_UARTB_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 14745600,
.regshift = 1,
@@ -251,7 +251,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
},
{ /* COM3 */
.mapbase = 0x11000000,
- .irq = gpio_to_irq(ZEUS_UARTC_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_UARTC_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 14745600,
.regshift = 1,
@@ -260,7 +260,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
},
{ /* COM4 */
.mapbase = 0x11800000,
- .irq = gpio_to_irq(ZEUS_UARTD_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_UARTD_GPIO),
.irqflags = IRQF_TRIGGER_RISING,
.uartclk = 14745600,
.regshift = 1,
@@ -321,8 +321,8 @@ static struct resource zeus_dm9k0_resource[] = {
.flags = IORESOURCE_MEM
},
[2] = {
- .start = gpio_to_irq(ZEUS_ETH0_GPIO),
- .end = gpio_to_irq(ZEUS_ETH0_GPIO),
+ .start = PXA_GPIO_TO_IRQ(ZEUS_ETH0_GPIO),
+ .end = PXA_GPIO_TO_IRQ(ZEUS_ETH0_GPIO),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -339,8 +339,8 @@ static struct resource zeus_dm9k1_resource[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = gpio_to_irq(ZEUS_ETH1_GPIO),
- .end = gpio_to_irq(ZEUS_ETH1_GPIO),
+ .start = PXA_GPIO_TO_IRQ(ZEUS_ETH1_GPIO),
+ .end = PXA_GPIO_TO_IRQ(ZEUS_ETH1_GPIO),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -423,7 +423,7 @@ static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
.modalias = "mcp2515",
.platform_data = &zeus_mcp2515_pdata,
- .irq = gpio_to_irq(ZEUS_CAN_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
.bus_num = 3,
.mode = SPI_MODE_0,
@@ -753,7 +753,7 @@ static struct i2c_board_info __initdata zeus_i2c_devices[] = {
{
I2C_BOARD_INFO("pca9535", 0x20),
.platform_data = &zeus_pca953x_pdata[2],
- .irq = gpio_to_irq(ZEUS_EXTGPIO_GPIO),
+ .irq = PXA_GPIO_TO_IRQ(ZEUS_EXTGPIO_GPIO),
},
{ I2C_BOARD_INFO("lm75a", 0x48) },
{ I2C_BOARD_INFO("24c01", 0x50) },
@@ -911,5 +911,6 @@ MACHINE_START(ARCOM_ZEUS, "Arcom/Eurotech ZEUS")
.handle_irq = pxa27x_handle_irq,
.timer = &pxa_timer,
.init_machine = zeus_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 6c39c332841..98eec80623e 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -408,8 +408,8 @@ static void __init zylonite_init(void)
* Note: We depend that the bootloader set
* the correct value to MSC register for SMC91x.
*/
- smc91x_resources[1].start = gpio_to_irq(gpio_eth_irq);
- smc91x_resources[1].end = gpio_to_irq(gpio_eth_irq);
+ smc91x_resources[1].start = PXA_GPIO_TO_IRQ(gpio_eth_irq);
+ smc91x_resources[1].end = PXA_GPIO_TO_IRQ(gpio_eth_irq);
platform_device_register(&smc91x_device);
pxa_set_ac97_info(NULL);
@@ -430,4 +430,5 @@ MACHINE_START(ZYLONITE, "PXA3xx Platform Development Kit (aka Zylonite)")
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = zylonite_init,
+ .restart = pxa_restart,
MACHINE_END
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 93c64d8d7de..86e59c043de 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -231,12 +231,12 @@ static struct i2c_board_info zylonite_i2c_board_info[] = {
.type = "pca9539",
.addr = 0x74,
.platform_data = &gpio_exp[0],
- .irq = IRQ_GPIO(18),
+ .irq = PXA_GPIO_TO_IRQ(18),
}, {
.type = "pca9539",
.addr = 0x75,
.platform_data = &gpio_exp[1],
- .irq = IRQ_GPIO(19),
+ .irq = PXA_GPIO_TO_IRQ(19),
},
};
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index dba6d0c1fc1..c593be428b8 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -12,6 +12,8 @@ config REALVIEW_EB_A9MP
bool "Support Multicore Cortex-A9 Tile"
depends on MACH_REALVIEW_EB
select CPU_V7
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
help
Enable support for the Cortex-A9MPCore tile fitted to the
Realview(R) Emulation Baseboard platform.
@@ -21,6 +23,8 @@ config REALVIEW_EB_ARM11MP
depends on MACH_REALVIEW_EB
select CPU_V6K
select ARCH_HAS_BARRIERS if SMP
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
help
Enable support for the ARM11MPCore tile fitted to the Realview(R)
Emulation Baseboard platform.
@@ -39,6 +43,8 @@ config MACH_REALVIEW_PB11MP
select CPU_V6K
select ARM_GIC
select HAVE_PATA_PLATFORM
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
select ARCH_HAS_BARRIERS if SMP
help
Include support for the ARM(R) RealView(R) Platform Baseboard for
@@ -51,6 +57,7 @@ config MACH_REALVIEW_PB1176
select CPU_V6
select ARM_GIC
select HAVE_TCM
+ select MIGHT_HAVE_CACHE_L2X0
help
Include support for the ARM(R) RealView(R) Platform Baseboard for
ARM1176JZF-S.
@@ -78,6 +85,8 @@ config MACH_REALVIEW_PBX
bool "Support RealView(R) Platform Baseboard Explore"
select ARM_GIC
select HAVE_PATA_PLATFORM
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
select ZONE_DMA if SPARSEMEM
help
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index d5ed5d4f77d..acd329afc3a 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 47259c89a75..735b57aaf2d 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -65,6 +65,5 @@ extern int realview_usb_register(struct resource *res);
extern void realview_init_early(void);
extern void realview_fixup(struct tag *tags, char **from,
struct meminfo *meminfo);
-extern void (*realview_reset)(char);
#endif
diff --git a/arch/arm/mach-realview/include/mach/entry-macro.S b/arch/arm/mach-realview/include/mach/entry-macro.S
index 4071164aeba..e8a5179c265 100644
--- a/arch/arm/mach-realview/include/mach/entry-macro.S
+++ b/arch/arm/mach-realview/include/mach/entry-macro.S
@@ -7,8 +7,6 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-realview/include/mach/system.h b/arch/arm/mach-realview/include/mach/system.h
index 6657ff23116..471b671159c 100644
--- a/arch/arm/mach-realview/include/mach/system.h
+++ b/arch/arm/mach-realview/include/mach/system.h
@@ -21,12 +21,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
-void (*realview_reset)(char mode);
-
static inline void arch_idle(void)
{
/*
@@ -36,15 +30,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * To reset, we hit the on-board reset register
- * in the system FPGA
- */
- if (realview_reset)
- realview_reset(mode);
- dsb();
-}
-
#endif
diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h
deleted file mode 100644
index a2a4c686140..00000000000
--- a/arch/arm/mach-realview/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-realview/include/mach/vmalloc.h
- *
- * Copyright (C) 2003 ARM Limited
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 026c66ad7ec..0069561464f 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
@@ -91,8 +91,8 @@ static struct map_desc realview_eb_io_desc[] __initdata = {
static struct map_desc realview_eb11mp_io_desc[] __initdata = {
{
- .virtual = IO_ADDRESS(REALVIEW_EB11MP_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_EB11MP_GIC_CPU_BASE),
+ .virtual = IO_ADDRESS(REALVIEW_EB11MP_SCU_BASE),
+ .pfn = __phys_to_pfn(REALVIEW_EB11MP_SCU_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
@@ -415,7 +415,7 @@ static struct sys_timer realview_eb_timer = {
.init = realview_eb_timer_init,
};
-static void realview_eb_reset(char mode)
+static void realview_eb_restart(char mode, const char *cmd)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -427,6 +427,7 @@ static void realview_eb_reset(char mode)
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
if (core_tile_eb11mp())
__raw_writel(0x0008, reset_ctrl);
+ dsb();
}
static void __init realview_eb_init(void)
@@ -458,7 +459,6 @@ static void __init realview_eb_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
- realview_reset = realview_eb_reset;
}
MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
@@ -469,8 +469,10 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_eb_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = realview_eb_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
+ .restart = realview_eb_restart,
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index c057540ec77..8fe395568a4 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
@@ -336,12 +336,13 @@ static struct sys_timer realview_pb1176_timer = {
.init = realview_pb1176_timer_init,
};
-static void realview_pb1176_reset(char mode)
+static void realview_pb1176_restart(char mode, const char *cmd)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
__raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
+ dsb();
}
static void realview_pb1176_fixup(struct tag *tags, char **from,
@@ -381,7 +382,6 @@ static void __init realview_pb1176_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
- realview_reset = realview_pb1176_reset;
}
MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
@@ -392,8 +392,10 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pb1176_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = realview_pb1176_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
+ .restart = realview_pb1176_restart,
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 671ad6d6ff0..34a26011bb8 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
@@ -315,7 +315,7 @@ static struct sys_timer realview_pb11mp_timer = {
.init = realview_pb11mp_timer_init,
};
-static void realview_pb11mp_reset(char mode)
+static void realview_pb11mp_restart(char mode, const char *cmd)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -327,6 +327,7 @@ static void realview_pb11mp_reset(char mode)
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
__raw_writel(0x0000, reset_ctrl);
__raw_writel(0x0004, reset_ctrl);
+ dsb();
}
static void __init realview_pb11mp_init(void)
@@ -355,7 +356,6 @@ static void __init realview_pb11mp_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
- realview_reset = realview_pb11mp_reset;
}
MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
@@ -366,8 +366,10 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pb11mp_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = realview_pb11mp_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
+ .restart = realview_pb11mp_restart,
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index cbf22df4ad5..d26a6def1d6 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
@@ -271,7 +271,7 @@ static struct sys_timer realview_pba8_timer = {
.init = realview_pba8_timer_init,
};
-static void realview_pba8_reset(char mode)
+static void realview_pba8_restart(char mode, const char *cmd)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -283,6 +283,7 @@ static void realview_pba8_reset(char mode)
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
__raw_writel(0x0000, reset_ctrl);
__raw_writel(0x0004, reset_ctrl);
+ dsb();
}
static void __init realview_pba8_init(void)
@@ -305,7 +306,6 @@ static void __init realview_pba8_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
- realview_reset = realview_pba8_reset;
}
MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
@@ -316,8 +316,10 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pba8_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = realview_pba8_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
+ .restart = realview_pba8_restart,
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 63c4114afae..a250fb4124b 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
@@ -98,8 +98,8 @@ static struct map_desc realview_pbx_io_desc[] __initdata = {
static struct map_desc realview_local_io_desc[] __initdata = {
{
- .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_GIC_CPU_BASE),
- .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_GIC_CPU_BASE),
+ .virtual = IO_ADDRESS(REALVIEW_PBX_TILE_SCU_BASE),
+ .pfn = __phys_to_pfn(REALVIEW_PBX_TILE_SCU_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
@@ -339,7 +339,7 @@ static void realview_pbx_fixup(struct tag *tags, char **from,
#endif
}
-static void realview_pbx_reset(char mode)
+static void realview_pbx_restart(char mode, const char *cmd)
{
void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
@@ -351,6 +351,7 @@ static void realview_pbx_reset(char mode)
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
__raw_writel(0x00F0, reset_ctrl);
__raw_writel(0x00F4, reset_ctrl);
+ dsb();
}
static void __init realview_pbx_init(void)
@@ -388,7 +389,6 @@ static void __init realview_pbx_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
- realview_reset = realview_pbx_reset;
}
MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
@@ -399,8 +399,10 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
.init_early = realview_init_early,
.init_irq = gic_init_irq,
.timer = &realview_pbx_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = realview_pbx_init,
#ifdef CONFIG_ZONE_DMA
.dma_zone_size = SZ_256M,
#endif
+ .restart = realview_pbx_restart,
MACHINE_END
diff --git a/arch/arm/mach-rpc/include/mach/system.h b/arch/arm/mach-rpc/include/mach/system.h
index 45c7b935dc4..359bab94b6a 100644
--- a/arch/arm/mach-rpc/include/mach/system.h
+++ b/arch/arm/mach-rpc/include/mach/system.h
@@ -7,21 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iomd.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- iomd_writeb(0, IOMD_ROMCR0);
-
- /*
- * Jump into the ROM
- */
- cpu_reset(0);
-}
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
deleted file mode 100644
index fb700228637..00000000000
--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-rpc/include/mach/vmalloc.h
- *
- * Copyright (C) 1997 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define VMALLOC_END 0xdc000000UL
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 8559598ab76..3d44a59fc0d 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -24,6 +24,7 @@
#include <asm/elf.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
+#include <asm/hardware/iomd.h>
#include <asm/page.h>
#include <asm/domain.h>
#include <asm/setup.h>
@@ -214,6 +215,16 @@ static int __init rpc_init(void)
arch_initcall(rpc_init);
+static void rpc_restart(char mode, const char *cmd)
+{
+ iomd_writeb(0, IOMD_ROMCR0);
+
+ /*
+ * Jump into the ROM
+ */
+ soft_restart(0);
+}
+
extern struct sys_timer ioc_timer;
MACHINE_START(RISCPC, "Acorn-RiscPC")
@@ -224,4 +235,5 @@ MACHINE_START(RISCPC, "Acorn-RiscPC")
.map_io = rpc_map_io,
.init_irq = rpc_init_irq,
.timer = &ioc_timer,
+ .restart = rpc_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c
index bc53d2d16d1..ac7b2ad5c40 100644
--- a/arch/arm/mach-s3c2410/bast-irq.c
+++ b/arch/arm/mach-s3c2410/bast-irq.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-s3c2410/common.h b/arch/arm/mach-s3c2410/common.h
new file mode 100644
index 00000000000..f65dc806296
--- /dev/null
+++ b/arch/arm/mach-s3c2410/common.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for S3C2410 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C2410_COMMON_H
+#define __ARCH_ARM_MACH_S3C2410_COMMON_H
+
+void s3c2410_restart(char mode, const char *cmd);
+
+#endif /* __ARCH_ARM_MACH_S3C2410_COMMON_H */
diff --git a/arch/arm/mach-s3c2410/cpu-freq.c b/arch/arm/mach-s3c2410/cpu-freq.c
index 75189df995a..7dc6c46b5e2 100644
--- a/arch/arm/mach-s3c2410/cpu-freq.c
+++ b/arch/arm/mach-s3c2410/cpu-freq.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -115,24 +115,25 @@ static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
.debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
};
-static int s3c2410_cpufreq_add(struct sys_device *sysdev)
+static int s3c2410_cpufreq_add(struct device *dev)
{
return s3c_cpufreq_register(&s3c2410_cpufreq_info);
}
-static struct sysdev_driver s3c2410_cpufreq_driver = {
- .add = s3c2410_cpufreq_add,
+static struct subsys_interface s3c2410_cpufreq_interface = {
+ .name = "s3c2410_cpufreq",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_cpufreq_add,
};
static int __init s3c2410_cpufreq_init(void)
{
- return sysdev_driver_register(&s3c2410_sysclass,
- &s3c2410_cpufreq_driver);
+ return subsys_interface_register(&s3c2410_cpufreq_interface);
}
arch_initcall(s3c2410_cpufreq_init);
-static int s3c2410a_cpufreq_add(struct sys_device *sysdev)
+static int s3c2410a_cpufreq_add(struct device *dev)
{
/* alter the maximum freq settings for S3C2410A. If a board knows
* it only has a maximum of 200, then it should register its own
@@ -143,17 +144,18 @@ static int s3c2410a_cpufreq_add(struct sys_device *sysdev)
s3c2410_cpufreq_info.max.pclk = 66500000;
s3c2410_cpufreq_info.name = "s3c2410a";
- return s3c2410_cpufreq_add(sysdev);
+ return s3c2410_cpufreq_add(dev);
}
-static struct sysdev_driver s3c2410a_cpufreq_driver = {
- .add = s3c2410a_cpufreq_add,
+static struct subsys_interface s3c2410a_cpufreq_interface = {
+ .name = "s3c2410a_cpufreq",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410a_cpufreq_add,
};
static int __init s3c2410a_cpufreq_init(void)
{
- return sysdev_driver_register(&s3c2410a_sysclass,
- &s3c2410a_cpufreq_driver);
+ return subsys_interface_register(&s3c2410a_cpufreq_interface);
}
arch_initcall(s3c2410a_cpufreq_init);
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index dbe43df8cfe..2afd00014a7 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <mach/map.h>
@@ -132,7 +132,7 @@ static struct s3c24xx_dma_order __initdata s3c2410_dma_order = {
},
};
-static int __init s3c2410_dma_add(struct sys_device *sysdev)
+static int __init s3c2410_dma_add(struct device *dev)
{
s3c2410_dma_init();
s3c24xx_dma_order_set(&s3c2410_dma_order);
@@ -140,24 +140,28 @@ static int __init s3c2410_dma_add(struct sys_device *sysdev)
}
#if defined(CONFIG_CPU_S3C2410)
-static struct sysdev_driver s3c2410_dma_driver = {
- .add = s3c2410_dma_add,
+static struct subsys_interface s3c2410_dma_interface = {
+ .name = "s3c2410_dma",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_dma_add,
};
static int __init s3c2410_dma_drvinit(void)
{
- return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_dma_driver);
+ return subsys_interface_register(&s3c2410_interface);
}
arch_initcall(s3c2410_dma_drvinit);
-static struct sysdev_driver s3c2410a_dma_driver = {
- .add = s3c2410_dma_add,
+static struct subsys_interface s3c2410a_dma_interface = {
+ .name = "s3c2410a_dma",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410_dma_add,
};
static int __init s3c2410a_dma_drvinit(void)
{
- return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_dma_driver);
+ return subsys_interface_register(&s3c2410a_dma_interface);
}
arch_initcall(s3c2410a_dma_drvinit);
@@ -165,13 +169,15 @@ arch_initcall(s3c2410a_dma_drvinit);
#if defined(CONFIG_CPU_S3C2442)
/* S3C2442 DMA contains the same selection table as the S3C2410 */
-static struct sysdev_driver s3c2442_dma_driver = {
- .add = s3c2410_dma_add,
+static struct subsys_interface s3c2442_dma_interface = {
+ .name = "s3c2442_dma",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2410_dma_add,
};
static int __init s3c2442_dma_drvinit(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_dma_driver);
+ return subsys_interface_register(&s3c2442_dma_interface);
}
arch_initcall(s3c2442_dma_drvinit);
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index ae8e482b642..acbdfecd418 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -13,7 +13,7 @@
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H __FILE__
-#include <linux/sysdev.h>
+#include <linux/device.h>
#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
@@ -202,7 +202,7 @@ struct s3c2410_dma_chan {
struct s3c2410_dma_buf *end; /* end of queue */
/* system device */
- struct sys_device dev;
+ struct device dev;
};
typedef unsigned long dma_device_t;
diff --git a/arch/arm/mach-s3c2410/include/mach/reset.h b/arch/arm/mach-s3c2410/include/mach/reset.h
deleted file mode 100644
index f8c9387b049..00000000000
--- a/arch/arm/mach-s3c2410/include/mach/reset.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/reset.h
- *
- * Copyright (c) 2007 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 CPU reset controls
-*/
-
-#ifndef __ASM_ARCH_RESET_H
-#define __ASM_ARCH_RESET_H __FILE__
-
-/* This allows the over-ride of the default reset code
-*/
-
-extern void (*s3c24xx_reset_hook)(void);
-
-#endif /* __ASM_ARCH_RESET_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/system-reset.h b/arch/arm/mach-s3c2410/include/mach/system-reset.h
deleted file mode 100644
index 6faadcee772..00000000000
--- a/arch/arm/mach-s3c2410/include/mach/system-reset.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/system-reset.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - System define for arch_reset() function
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/hardware.h>
-#include <plat/watchdog-reset.h>
-
-extern void (*s3c24xx_reset_hook)(void);
-
-static void
-arch_reset(char mode, const char *cmd)
-{
- if (mode == 's') {
- cpu_reset(0);
- }
-
- if (s3c24xx_reset_hook)
- s3c24xx_reset_hook();
-
- arch_wdt_reset();
-
- /* we'll take a jump through zero as a poor second */
- cpu_reset(0);
-}
diff --git a/arch/arm/mach-s3c2410/include/mach/system.h b/arch/arm/mach-s3c2410/include/mach/system.h
index a8cbca6701e..5e215c1a5c8 100644
--- a/arch/arm/mach-s3c2410/include/mach/system.h
+++ b/arch/arm/mach-s3c2410/include/mach/system.h
@@ -15,12 +15,10 @@
#include <mach/map.h>
#include <mach/idle.h>
-#include <mach/reset.h>
#include <mach/regs-clock.h>
void (*s3c24xx_idle)(void);
-void (*s3c24xx_reset_hook)(void);
void s3c24xx_default_idle(void)
{
@@ -54,5 +52,3 @@ static void arch_idle(void)
else
s3c24xx_default_idle();
}
-
-#include <mach/system-reset.h>
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
deleted file mode 100644
index 7a311e8dddb..00000000000
--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2410 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 79838942b0a..4220cc60de3 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -63,6 +63,8 @@
#include <linux/mtd/map.h>
#include <linux/mtd/physmap.h>
+#include "common.h"
+
static struct resource amlm5900_nor_resource = {
.start = 0x00000000,
.end = 0x01000000 - 1,
@@ -241,4 +243,5 @@ MACHINE_START(AML_M5900, "AML_M5900")
.init_irq = s3c24xx_init_irq,
.init_machine = amlm5900_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index a20ae1ad406..feeaf73933d 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -66,6 +66,7 @@
#include "usb-simtec.h"
#include "nor-simtec.h"
+#include "common.h"
#define COPYRIGHT ", Copyright 2004-2008 Simtec Electronics"
@@ -164,22 +165,6 @@ static struct map_desc bast_iodesc[] __initdata = {
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
-static struct s3c24xx_uart_clksrc bast_serial_clocks[] = {
- [0] = {
- .name = "uclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- }
-};
-
-
static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -187,8 +172,6 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = bast_serial_clocks,
- .clocks_size = ARRAY_SIZE(bast_serial_clocks),
},
[1] = {
.hwport = 1,
@@ -196,8 +179,6 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = bast_serial_clocks,
- .clocks_size = ARRAY_SIZE(bast_serial_clocks),
},
/* port 2 is not actually used */
[2] = {
@@ -206,8 +187,6 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = bast_serial_clocks,
- .clocks_size = ARRAY_SIZE(bast_serial_clocks),
}
};
@@ -662,4 +641,5 @@ MACHINE_START(BAST, "Simtec-BAST")
.init_irq = s3c24xx_init_irq,
.init_machine = bast_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 05a7d16e59f..41245a60398 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -18,7 +18,7 @@
#include <linux/memblock.h>
#include <linux/timer.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -70,6 +70,8 @@
#include <sound/uda1380.h>
+#include "common.h"
+
#define H1940_LATCH ((void __force __iomem *)0xF8000000)
#define H1940_PA_LATCH S3C2410_CS2
@@ -751,4 +753,5 @@ MACHINE_START(H1940, "IPAQ-H1940")
.init_irq = h1940_init_irq,
.init_machine = h1940_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c
index 1dc3e323441..383d00ca8f6 100644
--- a/arch/arm/mach-s3c2410/mach-n30.c
+++ b/arch/arm/mach-s3c2410/mach-n30.c
@@ -51,6 +51,8 @@
#include <plat/s3c2410.h>
#include <plat/udc.h>
+#include "common.h"
+
static struct map_desc n30_iodesc[] __initdata = {
/* nothing here yet */
};
@@ -591,6 +593,7 @@ MACHINE_START(N30, "Acer-N30")
.init_machine = n30_init,
.init_irq = s3c24xx_init_irq,
.map_io = n30_map_io,
+ .restart = s3c2410_restart,
MACHINE_END
MACHINE_START(N35, "Acer-N35")
@@ -601,4 +604,5 @@ MACHINE_START(N35, "Acer-N35")
.init_machine = n30_init,
.init_irq = s3c24xx_init_irq,
.map_io = n30_map_io,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-otom.c b/arch/arm/mach-s3c2410/mach-otom.c
index f03f3fd9cec..5f1e0eeb38a 100644
--- a/arch/arm/mach-s3c2410/mach-otom.c
+++ b/arch/arm/mach-s3c2410/mach-otom.c
@@ -38,6 +38,8 @@
#include <plat/iic.h>
#include <plat/cpu.h>
+#include "common.h"
+
static struct map_desc otom11_iodesc[] __initdata = {
/* Device area */
{ (u32)OTOM_VA_CS8900A_BASE, OTOM_PA_CS8900A_BASE, SZ_16M, MT_DEVICE },
@@ -121,4 +123,5 @@ MACHINE_START(OTOM, "Nex Vision - Otom 1.1")
.init_machine = otom11_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index 45185215625..91c16d9d245 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -28,7 +28,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/spi/spi.h>
@@ -62,6 +62,8 @@
#include <plat/cpu.h>
#include <plat/pm.h>
+#include "common.h"
+
static struct map_desc qt2410_iodesc[] __initdata = {
{ 0xe0000000, __phys_to_pfn(S3C2410_CS3+0x01000000), SZ_1M, MT_DEVICE }
};
@@ -350,6 +352,5 @@ MACHINE_START(QT2410, "QT2410")
.init_irq = s3c24xx_init_irq,
.init_machine = qt2410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
-
-
diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c
index 99c9dfdb71c..bdc27e77287 100644
--- a/arch/arm/mach-s3c2410/mach-smdk2410.c
+++ b/arch/arm/mach-s3c2410/mach-smdk2410.c
@@ -54,6 +54,8 @@
#include <plat/common-smdk.h>
+#include "common.h"
+
static struct map_desc smdk2410_iodesc[] __initdata = {
/* nothing here yet */
};
@@ -116,6 +118,5 @@ MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switc
.init_irq = s3c24xx_init_irq,
.init_machine = smdk2410_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
-
-
diff --git a/arch/arm/mach-s3c2410/mach-tct_hammer.c b/arch/arm/mach-s3c2410/mach-tct_hammer.c
index e0d0b6fb280..1114666f0ef 100644
--- a/arch/arm/mach-s3c2410/mach-tct_hammer.c
+++ b/arch/arm/mach-s3c2410/mach-tct_hammer.c
@@ -54,6 +54,8 @@
#include <linux/mtd/map.h>
#include <linux/mtd/physmap.h>
+#include "common.h"
+
static struct resource tct_hammer_nor_resource = {
.start = 0x00000000,
.end = 0x01000000 - 1,
@@ -151,4 +153,5 @@ MACHINE_START(TCT_HAMMER, "TCT_HAMMER")
.init_irq = s3c24xx_init_irq,
.init_machine = tct_hammer_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index df47e8e9006..dbe668a803e 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -53,6 +53,7 @@
#include "usb-simtec.h"
#include "nor-simtec.h"
+#include "common.h"
/* macros for virtual address mods for the io space entries */
#define VA_C5(item) ((unsigned long)(item) + BAST_VAM_CS5)
@@ -109,23 +110,6 @@ static struct map_desc vr1000_iodesc[] __initdata = {
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
-/* uart clock source(s) */
-
-static struct s3c24xx_uart_clksrc vr1000_serial_clocks[] = {
- [0] = {
- .name = "uclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0.
- }
-};
-
static struct s3c2410_uartcfg vr1000_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -133,8 +117,6 @@ static struct s3c2410_uartcfg vr1000_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = vr1000_serial_clocks,
- .clocks_size = ARRAY_SIZE(vr1000_serial_clocks),
},
[1] = {
.hwport = 1,
@@ -142,8 +124,6 @@ static struct s3c2410_uartcfg vr1000_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = vr1000_serial_clocks,
- .clocks_size = ARRAY_SIZE(vr1000_serial_clocks),
},
/* port 2 is not actually used */
[2] = {
@@ -152,9 +132,6 @@ static struct s3c2410_uartcfg vr1000_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = vr1000_serial_clocks,
- .clocks_size = ARRAY_SIZE(vr1000_serial_clocks),
-
}
};
@@ -405,4 +382,5 @@ MACHINE_START(VR1000, "Thorcom-VR1000")
.init_machine = vr1000_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2410_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/pll.c b/arch/arm/mach-s3c2410/pll.c
index 8338865e11c..c07438bfc99 100644
--- a/arch/arm/mach-s3c2410/pll.c
+++ b/arch/arm/mach-s3c2410/pll.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -66,30 +66,34 @@ static struct cpufreq_frequency_table pll_vals_12MHz[] = {
{ .frequency = 270000000, .index = PLLVAL(127, 1, 1), },
};
-static int s3c2410_plls_add(struct sys_device *dev)
+static int s3c2410_plls_add(struct device *dev)
{
return s3c_plltab_register(pll_vals_12MHz, ARRAY_SIZE(pll_vals_12MHz));
}
-static struct sysdev_driver s3c2410_plls_drv = {
- .add = s3c2410_plls_add,
+static struct subsys_interface s3c2410_plls_interface = {
+ .name = "s3c2410_plls",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_plls_add,
};
static int __init s3c2410_pll_init(void)
{
- return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_plls_drv);
+ return subsys_interface_register(&s3c2410_plls_interface);
}
arch_initcall(s3c2410_pll_init);
-static struct sysdev_driver s3c2410a_plls_drv = {
- .add = s3c2410_plls_add,
+static struct subsys_interface s3c2410a_plls_interface = {
+ .name = "s3c2410a_plls",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410_plls_add,
};
static int __init s3c2410a_pll_init(void)
{
- return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_plls_drv);
+ return subsys_interface_register(&s3c2410a_plls_interface);
}
arch_initcall(s3c2410a_pll_init);
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c
index 4728f9aa7df..fda5385deff 100644
--- a/arch/arm/mach-s3c2410/pm.c
+++ b/arch/arm/mach-s3c2410/pm.c
@@ -24,7 +24,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/time.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -111,7 +111,7 @@ struct syscore_ops s3c2410_pm_syscore_ops = {
.resume = s3c2410_pm_resume,
};
-static int s3c2410_pm_add(struct sys_device *dev)
+static int s3c2410_pm_add(struct device *dev)
{
pm_cpu_prep = s3c2410_pm_prepare;
pm_cpu_sleep = s3c2410_cpu_suspend;
@@ -120,52 +120,60 @@ static int s3c2410_pm_add(struct sys_device *dev)
}
#if defined(CONFIG_CPU_S3C2410)
-static struct sysdev_driver s3c2410_pm_driver = {
- .add = s3c2410_pm_add,
+static struct subsys_interface s3c2410_pm_interface = {
+ .name = "s3c2410_pm",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_pm_add,
};
/* register ourselves */
static int __init s3c2410_pm_drvinit(void)
{
- return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_pm_driver);
+ return subsys_interface_register(&s3c2410_pm_interface);
}
arch_initcall(s3c2410_pm_drvinit);
-static struct sysdev_driver s3c2410a_pm_driver = {
- .add = s3c2410_pm_add,
+static struct subsys_interface s3c2410a_pm_interface = {
+ .name = "s3c2410a_pm",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410_pm_add,
};
static int __init s3c2410a_pm_drvinit(void)
{
- return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_pm_driver);
+ return subsys_interface_register(&s3c2410a_pm_interface);
}
arch_initcall(s3c2410a_pm_drvinit);
#endif
#if defined(CONFIG_CPU_S3C2440)
-static struct sysdev_driver s3c2440_pm_driver = {
- .add = s3c2410_pm_add,
+static struct subsys_interface s3c2440_pm_interface = {
+ .name = "s3c2440_pm",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2410_pm_add,
};
static int __init s3c2440_pm_drvinit(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_pm_driver);
+ return subsys_interface_register(&s3c2440_pm_interface);
}
arch_initcall(s3c2440_pm_drvinit);
#endif
#if defined(CONFIG_CPU_S3C2442)
-static struct sysdev_driver s3c2442_pm_driver = {
- .add = s3c2410_pm_add,
+static struct subsys_interface s3c2442_pm_interface = {
+ .name = "s3c2442_pm",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2410_pm_add,
};
static int __init s3c2442_pm_drvinit(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_pm_driver);
+ return subsys_interface_register(&s3c2442_pm_interface);
}
arch_initcall(s3c2442_pm_drvinit);
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index 3d7ebc557a7..061b6bb1a55 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
@@ -42,6 +42,7 @@
#include <plat/clock.h>
#include <plat/pll.h>
#include <plat/pm.h>
+#include <plat/watchdog-reset.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
@@ -123,30 +124,38 @@ static struct clk s3c2410_armclk = {
.id = -1,
};
+static struct clk_lookup s3c2410_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud0", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
+};
+
void __init s3c2410_init_clocks(int xtal)
{
s3c24xx_register_baseclocks(xtal);
s3c2410_setup_clocks();
s3c2410_baseclk_add();
s3c24xx_register_clock(&s3c2410_armclk);
+ clkdev_add_table(s3c2410_clk_lookup, ARRAY_SIZE(s3c2410_clk_lookup));
}
-struct sysdev_class s3c2410_sysclass = {
+struct bus_type s3c2410_subsys = {
.name = "s3c2410-core",
+ .dev_name = "s3c2410-core",
};
/* Note, we would have liked to name this s3c2410-core, but we cannot
- * register two sysdev_class with the same name.
+ * register two subsystems with the same name.
*/
-struct sysdev_class s3c2410a_sysclass = {
+struct bus_type s3c2410a_subsys = {
.name = "s3c2410a-core",
+ .dev_name = "s3c2410a-core",
};
-static struct sys_device s3c2410_sysdev = {
- .cls = &s3c2410_sysclass,
+static struct device s3c2410_dev = {
+ .bus = &s3c2410_subsys,
};
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2410 based system)
* as a driver which may support both 2410 and 2440 may try and use it.
@@ -154,14 +163,14 @@ static struct sys_device s3c2410_sysdev = {
static int __init s3c2410_core_init(void)
{
- return sysdev_class_register(&s3c2410_sysclass);
+ return subsys_system_register(&s3c2410_subsys, NULL);
}
core_initcall(s3c2410_core_init);
static int __init s3c2410a_core_init(void)
{
- return sysdev_class_register(&s3c2410a_sysclass);
+ return subsys_system_register(&s3c2410a_subsys, NULL);
}
core_initcall(s3c2410a_core_init);
@@ -175,11 +184,23 @@ int __init s3c2410_init(void)
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
- return sysdev_register(&s3c2410_sysdev);
+ return device_register(&s3c2410_dev);
}
int __init s3c2410a_init(void)
{
- s3c2410_sysdev.cls = &s3c2410a_sysclass;
+ s3c2410_dev.bus = &s3c2410a_subsys;
return s3c2410_init();
}
+
+void s3c2410_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ soft_restart(0);
+ }
+
+ arch_wdt_reset();
+
+ /* we'll take a jump through zero as a poor second */
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c
index 140711db6c8..d10b695a906 100644
--- a/arch/arm/mach-s3c2412/clock.c
+++ b/arch/arm/mach-s3c2412/clock.c
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/delay.h>
@@ -659,6 +659,12 @@ static struct clk *clks[] __initdata = {
&clk_armclk,
};
+static struct clk_lookup s3c2412_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_usysclk),
+};
+
int __init s3c2412_baseclk_add(void)
{
unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
@@ -751,6 +757,7 @@ int __init s3c2412_baseclk_add(void)
s3c2412_clkcon_enable(clkp, 0);
}
+ clkdev_add_table(s3c2412_clk_lookup, ARRAY_SIZE(s3c2412_clk_lookup));
s3c_pwmclk_init();
return 0;
}
diff --git a/arch/arm/mach-s3c2412/cpu-freq.c b/arch/arm/mach-s3c2412/cpu-freq.c
index eb3ea172133..d8664b7652c 100644
--- a/arch/arm/mach-s3c2412/cpu-freq.c
+++ b/arch/arm/mach-s3c2412/cpu-freq.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -194,7 +194,7 @@ static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
.debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
};
-static int s3c2412_cpufreq_add(struct sys_device *sysdev)
+static int s3c2412_cpufreq_add(struct device *dev)
{
unsigned long fclk_rate;
@@ -244,14 +244,15 @@ err_fclk:
return -ENOENT;
}
-static struct sysdev_driver s3c2412_cpufreq_driver = {
- .add = s3c2412_cpufreq_add,
+static struct subsys_interface s3c2412_cpufreq_interface = {
+ .name = "s3c2412_cpufreq",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_cpufreq_add,
};
static int s3c2412_cpufreq_init(void)
{
- return sysdev_driver_register(&s3c2412_sysclass,
- &s3c2412_cpufreq_driver);
+ return subsys_interface_register(&s3c2412_cpufreq_interface);
}
arch_initcall(s3c2412_cpufreq_init);
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index d2a7d5ef3e6..142acd3b5e1 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/io.h>
@@ -159,19 +159,21 @@ static struct s3c24xx_dma_selection __initdata s3c2412_dma_sel = {
.map_size = ARRAY_SIZE(s3c2412_dma_mappings),
};
-static int __init s3c2412_dma_add(struct sys_device *sysdev)
+static int __init s3c2412_dma_add(struct device *dev)
{
s3c2410_dma_init();
return s3c24xx_dma_init_map(&s3c2412_dma_sel);
}
-static struct sysdev_driver s3c2412_dma_driver = {
- .add = s3c2412_dma_add,
+static struct subsys_interface s3c2412_dma_interface = {
+ .name = "s3c2412_dma",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_dma_add,
};
static int __init s3c2412_dma_init(void)
{
- return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_dma_driver);
+ return subsys_interface_register(&s3c2412_dma_interface);
}
arch_initcall(s3c2412_dma_init);
diff --git a/arch/arm/mach-s3c2412/irq.c b/arch/arm/mach-s3c2412/irq.c
index 1a1aa220972..a8a46c1644f 100644
--- a/arch/arm/mach-s3c2412/irq.c
+++ b/arch/arm/mach-s3c2412/irq.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -170,7 +170,7 @@ static int s3c2412_irq_rtc_wake(struct irq_data *data, unsigned int state)
static struct irq_chip s3c2412_irq_rtc_chip;
-static int s3c2412_irq_add(struct sys_device *sysdev)
+static int s3c2412_irq_add(struct device *dev)
{
unsigned int irqno;
@@ -200,13 +200,15 @@ static int s3c2412_irq_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2412_irq_driver = {
- .add = s3c2412_irq_add,
+static struct subsys_interface s3c2412_irq_interface = {
+ .name = "s3c2412_irq",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_irq_add,
};
static int s3c2412_irq_init(void)
{
- return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_irq_driver);
+ return subsys_interface_register(&s3c2412_irq_interface);
}
arch_initcall(s3c2412_irq_init);
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 286ef1738c6..ae73ba34ecc 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -48,6 +48,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/s3c2412.h>
#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
@@ -661,4 +662,5 @@ MACHINE_START(JIVE, "JIVE")
.map_io = jive_map_io,
.init_machine = jive_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2412_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index f1eec1b5493..b11451b853d 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -134,6 +134,7 @@ MACHINE_START(S3C2413, "S3C2413")
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2412_restart,
MACHINE_END
MACHINE_START(SMDK2412, "SMDK2412")
@@ -145,6 +146,7 @@ MACHINE_START(SMDK2412, "SMDK2412")
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2412_restart,
MACHINE_END
MACHINE_START(SMDK2413, "SMDK2413")
@@ -156,4 +158,5 @@ MACHINE_START(SMDK2413, "SMDK2413")
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2412_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2412/mach-vstms.c b/arch/arm/mach-s3c2412/mach-vstms.c
index 1bbb1ef5f4f..94bfaa1fb14 100644
--- a/arch/arm/mach-s3c2412/mach-vstms.c
+++ b/arch/arm/mach-s3c2412/mach-vstms.c
@@ -162,4 +162,5 @@ MACHINE_START(VSTMS, "VSTMS")
.init_machine = vstms_init,
.map_io = vstms_map_io,
.timer = &s3c24xx_timer,
+ .restart = s3c2412_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index f4077efa51f..d1adfa65f66 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -16,7 +16,7 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -56,7 +56,7 @@ static void s3c2412_pm_prepare(void)
{
}
-static int s3c2412_pm_add(struct sys_device *sysdev)
+static int s3c2412_pm_add(struct device *dev)
{
pm_cpu_prep = s3c2412_pm_prepare;
pm_cpu_sleep = s3c2412_cpu_suspend;
@@ -87,13 +87,15 @@ static struct sleep_save s3c2412_sleep[] = {
SAVE_ITEM(S3C2413_GPJSLPCON),
};
-static struct sysdev_driver s3c2412_pm_driver = {
- .add = s3c2412_pm_add,
+static struct subsys_interface s3c2412_pm_interface = {
+ .name = "s3c2412_pm",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_pm_add,
};
static __init int s3c2412_pm_init(void)
{
- return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver);
+ return subsys_interface_register(&s3c2412_pm_interface);
}
arch_initcall(s3c2412_pm_init);
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index 57a1e01e4e5..aff6e85a97c 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
@@ -32,7 +32,6 @@
#include <asm/proc-fns.h>
#include <asm/irq.h>
-#include <mach/reset.h>
#include <mach/idle.h>
#include <plat/cpu-freq.h>
@@ -131,8 +130,11 @@ static void s3c2412_idle(void)
cpu_do_idle();
}
-static void s3c2412_hard_reset(void)
+void s3c2412_restart(char mode, const char *cmd)
{
+ if (mode == 's')
+ soft_restart(0);
+
/* errata "Watch-dog/Software Reset Problem" specifies that
* this reset must be done with the SYSCLK sourced from
* EXTCLK instead of FOUT to avoid a glitch in the reset
@@ -164,10 +166,6 @@ void __init s3c2412_map_io(void)
s3c24xx_idle = s3c2412_idle;
- /* set custom reset hook */
-
- s3c24xx_reset_hook = s3c2412_hard_reset;
-
/* register our io-tables */
iotable_init(s3c2412_iodesc, ARRAY_SIZE(s3c2412_iodesc));
@@ -220,25 +218,26 @@ void __init s3c2412_init_clocks(int xtal)
s3c2412_baseclk_add();
}
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2412 based system)
* as a driver which may support both 2410 and 2440 may try and use it.
*/
-struct sysdev_class s3c2412_sysclass = {
+struct bus_type s3c2412_subsys = {
.name = "s3c2412-core",
+ .dev_name = "s3c2412-core",
};
static int __init s3c2412_core_init(void)
{
- return sysdev_class_register(&s3c2412_sysclass);
+ return subsys_system_register(&s3c2412_subsys, NULL);
}
core_initcall(s3c2412_core_init);
-static struct sys_device s3c2412_sysdev = {
- .cls = &s3c2412_sysclass,
+static struct device s3c2412_dev = {
+ .bus = &s3c2412_subsys,
};
int __init s3c2412_init(void)
@@ -250,5 +249,5 @@ int __init s3c2412_init(void)
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
- return sysdev_register(&s3c2412_sysdev);
+ return device_register(&s3c2412_dev);
}
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
index 7b805b279ca..ca0cd227f87 100644
--- a/arch/arm/mach-s3c2416/Makefile
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_S3C2416_PM) += pm.o
#obj-$(CONFIG_S3C2416_DMA) += dma.o
# Device setup
-obj-$(CONFIG_S3C2416_SETUP_SDHCI) += setup-sdhci.o
obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
# Machine support
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
index afbbe8bc21d..59f54d1d7f8 100644
--- a/arch/arm/mach-s3c2416/clock.c
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -90,39 +90,38 @@ static struct clksrc_clk hsmmc_div[] = {
},
};
-static struct clksrc_clk hsmmc_mux[] = {
- [0] = {
- .clk = {
- .name = "hsmmc-if",
- .devname = "s3c-sdhci.0",
- .ctrlbit = (1 << 6),
- .enable = s3c2443_clkcon_enable_s,
- },
- .sources = &(struct clksrc_sources) {
- .nr_sources = 2,
- .sources = (struct clk *[]) {
- [0] = &hsmmc_div[0].clk,
- [1] = NULL, /* to fix */
- },
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 16 },
+static struct clksrc_clk hsmmc_mux0 = {
+ .clk = {
+ .name = "hsmmc-if",
+ .devname = "s3c-sdhci.0",
+ .ctrlbit = (1 << 6),
+ .enable = s3c2443_clkcon_enable_s,
},
- [1] = {
- .clk = {
- .name = "hsmmc-if",
- .devname = "s3c-sdhci.1",
- .ctrlbit = (1 << 12),
- .enable = s3c2443_clkcon_enable_s,
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk * []) {
+ [0] = &hsmmc_div[0].clk,
+ [1] = NULL, /* to fix */
},
- .sources = &(struct clksrc_sources) {
- .nr_sources = 2,
- .sources = (struct clk *[]) {
- [0] = &hsmmc_div[1].clk,
- [1] = NULL, /* to fix */
- },
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 16 },
+};
+
+static struct clksrc_clk hsmmc_mux1 = {
+ .clk = {
+ .name = "hsmmc-if",
+ .devname = "s3c-sdhci.1",
+ .ctrlbit = (1 << 12),
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk * []) {
+ [0] = &hsmmc_div[1].clk,
+ [1] = NULL, /* to fix */
},
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 17 },
},
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 17 },
};
static struct clk hsmmc0_clk = {
@@ -144,8 +143,14 @@ static struct clksrc_clk *clksrcs[] __initdata = {
&hsspi_mux,
&hsmmc_div[0],
&hsmmc_div[1],
- &hsmmc_mux[0],
- &hsmmc_mux[1],
+ &hsmmc_mux0,
+ &hsmmc_mux1,
+};
+
+static struct clk_lookup s3c2416_clk_lookup[] = {
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
};
void __init s3c2416_init_clocks(int xtal)
@@ -167,6 +172,7 @@ void __init s3c2416_init_clocks(int xtal)
s3c_register_clksrc(clksrcs[ptr], 1);
s3c24xx_register_clock(&hsmmc0_clk);
+ clkdev_add_table(s3c2416_clk_lookup, ARRAY_SIZE(s3c2416_clk_lookup));
s3c_pwmclk_init();
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 28ad20d4244..36df761061d 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -213,7 +213,7 @@ static int __init s3c2416_add_sub(unsigned int base,
return 0;
}
-static int __init s3c2416_irq_add(struct sys_device *sysdev)
+static int __init s3c2416_irq_add(struct device *dev)
{
printk(KERN_INFO "S3C2416: IRQ Support\n");
@@ -234,13 +234,15 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2416_irq_driver = {
- .add = s3c2416_irq_add,
+static struct subsys_interface s3c2416_irq_interface = {
+ .name = "s3c2416_irq",
+ .subsys = &s3c2416_subsys,
+ .add_dev = s3c2416_irq_add,
};
static int __init s3c2416_irq_init(void)
{
- return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_irq_driver);
+ return subsys_interface_register(&s3c2416_irq_interface);
}
arch_initcall(s3c2416_irq_init);
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
index a9eee531ca7..eebe1e72b93 100644
--- a/arch/arm/mach-s3c2416/mach-smdk2416.c
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -50,6 +50,7 @@
#include <plat/nand.h>
#include <plat/sdhci.h>
#include <plat/udc.h>
+#include <linux/platform_data/s3c-hsudc.h>
#include <plat/regs-fb-v4.h>
#include <plat/fb.h>
@@ -251,4 +252,5 @@ MACHINE_START(SMDK2416, "SMDK2416")
.map_io = smdk2416_map_io,
.init_machine = smdk2416_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2416_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 9ec54f1d8e7..3bdb15a0d41 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -10,7 +10,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
@@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void)
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
}
-static int s3c2416_pm_add(struct sys_device *sysdev)
+static int s3c2416_pm_add(struct device *dev)
{
pm_cpu_prep = s3c2416_pm_prepare;
pm_cpu_sleep = s3c2416_cpu_suspend;
@@ -56,13 +56,15 @@ static int s3c2416_pm_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2416_pm_driver = {
- .add = s3c2416_pm_add,
+static struct subsys_interface s3c2416_pm_interface = {
+ .name = "s3c2416_pm",
+ .subsys = &s3c2416_subsys,
+ .add_dev = s3c2416_pm_add,
};
static __init int s3c2416_pm_init(void)
{
- return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver);
+ return subsys_interface_register(&s3c2416_pm_interface);
}
arch_initcall(s3c2416_pm_init);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index ee214bc83c8..5287d2808d3 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -31,7 +31,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -44,7 +44,6 @@
#include <asm/proc-fns.h>
#include <asm/irq.h>
-#include <mach/reset.h>
#include <mach/idle.h>
#include <mach/regs-s3c2443-clock.h>
@@ -68,16 +67,20 @@ static struct map_desc s3c2416_iodesc[] __initdata = {
IODESC_ENT(TIMER),
};
-struct sysdev_class s3c2416_sysclass = {
+struct bus_type s3c2416_subsys = {
.name = "s3c2416-core",
+ .dev_name = "s3c2416-core",
};
-static struct sys_device s3c2416_sysdev = {
- .cls = &s3c2416_sysclass,
+static struct device s3c2416_dev = {
+ .bus = &s3c2416_subsys,
};
-static void s3c2416_hard_reset(void)
+void s3c2416_restart(char mode, const char *cmd)
{
+ if (mode == 's')
+ soft_restart(0);
+
__raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
}
@@ -85,7 +88,6 @@ int __init s3c2416_init(void)
{
printk(KERN_INFO "S3C2416: Initializing architecture\n");
- s3c24xx_reset_hook = s3c2416_hard_reset;
/* s3c24xx_idle = s3c2416_idle; */
/* change WDT IRQ number */
@@ -105,7 +107,7 @@ int __init s3c2416_init(void)
#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
- return sysdev_register(&s3c2416_sysdev);
+ return device_register(&s3c2416_dev);
}
void __init s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no)
@@ -133,7 +135,7 @@ void __init s3c2416_map_io(void)
iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
}
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2416 based system)
* as a driver which may support both 2443 and 2440 may try and use it.
@@ -141,7 +143,7 @@ void __init s3c2416_map_io(void)
static int __init s3c2416_core_init(void)
{
- return sysdev_class_register(&s3c2416_sysclass);
+ return subsys_system_register(&s3c2416_subsys, NULL);
}
core_initcall(s3c2416_core_init);
diff --git a/arch/arm/mach-s3c2416/setup-sdhci.c b/arch/arm/mach-s3c2416/setup-sdhci.c
deleted file mode 100644
index cee53955eb0..00000000000
--- a/arch/arm/mach-s3c2416/setup-sdhci.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/mach-s3c2416/setup-sdhci.c
- *
- * Copyright 2010 Promwad Innovation Company
- * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
- *
- * S3C2416 - Helper functions for settign up SDHCI device(s) (HSMMC)
- *
- * Based on mach-s3c64xx/setup-sdhci.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-
-/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-
-char *s3c2416_hsmmc_clksrcs[4] = {
- [0] = "hsmmc",
- [1] = "hsmmc",
- [2] = "hsmmc-if",
- /* [3] = "48m", - note not successfully used yet */
-};
diff --git a/arch/arm/mach-s3c2440/clock.c b/arch/arm/mach-s3c2440/clock.c
index f9e6bdaf41d..bedbc87a342 100644
--- a/arch/arm/mach-s3c2440/clock.c
+++ b/arch/arm/mach-s3c2440/clock.c
@@ -28,12 +28,12 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/serial_core.h>
#include <mach/hardware.h>
#include <linux/atomic.h>
@@ -43,6 +43,7 @@
#include <plat/clock.h>
#include <plat/cpu.h>
+#include <plat/regs-serial.h>
/* S3C2440 extended clock support */
@@ -108,7 +109,47 @@ static struct clk s3c2440_clk_ac97 = {
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
-static int s3c2440_clk_add(struct sys_device *sysdev)
+static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
+{
+ unsigned long ucon0, ucon1, ucon2, divisor;
+
+ /* the fun of calculating the uart divisors on the s3c2440 */
+ ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
+ ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
+ ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
+
+ ucon0 &= S3C2440_UCON0_DIVMASK;
+ ucon1 &= S3C2440_UCON1_DIVMASK;
+ ucon2 &= S3C2440_UCON2_DIVMASK;
+
+ if (ucon0 != 0)
+ divisor = (ucon0 >> S3C2440_UCON_DIVSHIFT) + 6;
+ else if (ucon1 != 0)
+ divisor = (ucon1 >> S3C2440_UCON_DIVSHIFT) + 21;
+ else if (ucon2 != 0)
+ divisor = (ucon2 >> S3C2440_UCON_DIVSHIFT) + 36;
+ else
+ /* manual calims 44, seems to be 9 */
+ divisor = 9;
+
+ return clk_get_rate(clk->parent) / divisor;
+}
+
+static struct clk s3c2440_clk_fclk_n = {
+ .name = "fclk_n",
+ .parent = &clk_f,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2440_fclk_n_getrate,
+ },
+};
+
+static struct clk_lookup s3c2440_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
+};
+
+static int s3c2440_clk_add(struct device *dev)
{
struct clk *clock_upll;
struct clk *clock_h;
@@ -126,10 +167,12 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
s3c2440_clk_cam.parent = clock_h;
s3c2440_clk_ac97.parent = clock_p;
s3c2440_clk_cam_upll.parent = clock_upll;
+ s3c24xx_register_clock(&s3c2440_clk_fclk_n);
s3c24xx_register_clock(&s3c2440_clk_ac97);
s3c24xx_register_clock(&s3c2440_clk_cam);
s3c24xx_register_clock(&s3c2440_clk_cam_upll);
+ clkdev_add_table(s3c2440_clk_lookup, ARRAY_SIZE(s3c2440_clk_lookup));
clk_disable(&s3c2440_clk_ac97);
clk_disable(&s3c2440_clk_cam);
@@ -137,13 +180,15 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2440_clk_driver = {
- .add = s3c2440_clk_add,
+static struct subsys_interface s3c2440_clk_interface = {
+ .name = "s3c2440_clk",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_clk_add,
};
-static __init int s3c24xx_clk_driver(void)
+static __init int s3c24xx_clk_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_clk_driver);
+ return subsys_interface_register(&s3c2440_clk_interface);
}
-arch_initcall(s3c24xx_clk_driver);
+arch_initcall(s3c24xx_clk_init);
diff --git a/arch/arm/mach-s3c2440/common.h b/arch/arm/mach-s3c2440/common.h
new file mode 100644
index 00000000000..db8a98ac68c
--- /dev/null
+++ b/arch/arm/mach-s3c2440/common.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for S3C2440 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H
+#define __ARCH_ARM_MACH_S3C2440_COMMON_H
+
+void s3c2440_restart(char mode, const char *cmd);
+
+#endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */
diff --git a/arch/arm/mach-s3c2440/dma.c b/arch/arm/mach-s3c2440/dma.c
index 0e73f8f9d13..15b1ddf8f62 100644
--- a/arch/arm/mach-s3c2440/dma.c
+++ b/arch/arm/mach-s3c2440/dma.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <mach/map.h>
@@ -174,20 +174,22 @@ static struct s3c24xx_dma_order __initdata s3c2440_dma_order = {
},
};
-static int __init s3c2440_dma_add(struct sys_device *sysdev)
+static int __init s3c2440_dma_add(struct device *dev)
{
s3c2410_dma_init();
s3c24xx_dma_order_set(&s3c2440_dma_order);
return s3c24xx_dma_init_map(&s3c2440_dma_sel);
}
-static struct sysdev_driver s3c2440_dma_driver = {
- .add = s3c2440_dma_add,
+static struct subsys_interface s3c2440_dma_interface = {
+ .name = "s3c2440_dma",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_dma_add,
};
static int __init s3c2440_dma_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_dma_driver);
+ return subsys_interface_register(&s3c2440_dma_interface);
}
arch_initcall(s3c2440_dma_init);
diff --git a/arch/arm/mach-s3c2440/irq.c b/arch/arm/mach-s3c2440/irq.c
index eb1cc0f0705..4fee9bc6bcb 100644
--- a/arch/arm/mach-s3c2440/irq.c
+++ b/arch/arm/mach-s3c2440/irq.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -92,7 +92,7 @@ static struct irq_chip s3c_irq_wdtac97 = {
.irq_ack = s3c_irq_wdtac97_ack,
};
-static int s3c2440_irq_add(struct sys_device *sysdev)
+static int s3c2440_irq_add(struct device *dev)
{
unsigned int irqno;
@@ -113,13 +113,15 @@ static int s3c2440_irq_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2440_irq_driver = {
- .add = s3c2440_irq_add,
+static struct subsys_interface s3c2440_irq_interface = {
+ .name = "s3c2440_irq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_irq_add,
};
static int s3c2440_irq_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_irq_driver);
+ return subsys_interface_register(&s3c2440_irq_interface);
}
arch_initcall(s3c2440_irq_init);
diff --git a/arch/arm/mach-s3c2440/mach-anubis.c b/arch/arm/mach-s3c2440/mach-anubis.c
index 74f92fc3fd0..24569550de1 100644
--- a/arch/arm/mach-s3c2440/mach-anubis.c
+++ b/arch/arm/mach-s3c2440/mach-anubis.c
@@ -55,6 +55,8 @@
#include <plat/cpu.h>
#include <plat/audio-simtec.h>
+#include "common.h"
+
#define COPYRIGHT ", Copyright 2005-2009 Simtec Electronics"
static struct map_desc anubis_iodesc[] __initdata = {
@@ -96,22 +98,6 @@ static struct map_desc anubis_iodesc[] __initdata = {
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
-static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
- [0] = {
- .name = "uclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- }
-};
-
-
static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -119,8 +105,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = anubis_serial_clocks,
- .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 2,
@@ -128,8 +113,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = anubis_serial_clocks,
- .clocks_size = ARRAY_SIZE(anubis_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
};
@@ -503,4 +487,5 @@ MACHINE_START(ANUBIS, "Simtec-Anubis")
.init_machine = anubis_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index 38887ee0c78..d6a9763110c 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -49,6 +49,8 @@
#include <plat/cpu.h>
#include <plat/mci.h>
+#include "common.h"
+
static struct map_desc at2440evb_iodesc[] __initdata = {
/* Nothing here */
};
@@ -57,22 +59,6 @@ static struct map_desc at2440evb_iodesc[] __initdata = {
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
-static struct s3c24xx_uart_clksrc at2440evb_serial_clocks[] = {
- [0] = {
- .name = "uclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- }
-};
-
-
static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -80,8 +66,7 @@ static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = at2440evb_serial_clocks,
- .clocks_size = ARRAY_SIZE(at2440evb_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 1,
@@ -89,8 +74,7 @@ static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = at2440evb_serial_clocks,
- .clocks_size = ARRAY_SIZE(at2440evb_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
};
@@ -238,4 +222,5 @@ MACHINE_START(AT2440EVB, "AT2440EVB")
.init_machine = at2440evb_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index de1e0ff46ce..5859e609d28 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -90,6 +90,7 @@
#include <plat/iic.h>
#include <plat/ts.h>
+#include "common.h"
static struct pcf50633 *gta02_pcf;
@@ -600,4 +601,5 @@ MACHINE_START(NEO1973_GTA02, "GTA02")
.init_irq = s3c24xx_init_irq,
.init_machine = gta02_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 91fe0b4c95f..adbbb85bc4c 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -60,6 +60,8 @@
#include <sound/s3c24xx_uda134x.h>
+#include "common.h"
+
#define MACH_MINI2440_DM9K_BASE (S3C2410_CS4 + 0x300)
static struct map_desc mini2440_iodesc[] __initdata = {
@@ -167,6 +169,24 @@ static struct s3c2410fb_display mini2440_lcd_cfg[] __initdata = {
.lcdcon5 = (S3C2410_LCDCON5_FRM565 |
S3C2410_LCDCON5_HWSWP),
},
+ /* mini2440 + 3.5" TFT (LCD-W35i, LQ035Q1DG06 type) + touchscreen*/
+ [3] = {
+ _LCD_DECLARE(
+ /* clock */
+ 7,
+ /* xres, margin_right, margin_left, hsync */
+ 320, 68, 66, 4,
+ /* yres, margin_top, margin_bottom, vsync */
+ 240, 4, 4, 9,
+ /* refresh rate */
+ 60),
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVDEN |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVCLK |
+ S3C2410_LCDCON5_HWSWP),
+ },
};
/* todo - put into gpio header */
@@ -681,4 +701,5 @@ MACHINE_START(MINI2440, "MINI2440")
.init_machine = mini2440_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c
index 61c0bf14816..40eaf844bc1 100644
--- a/arch/arm/mach-s3c2440/mach-nexcoder.c
+++ b/arch/arm/mach-s3c2440/mach-nexcoder.c
@@ -47,6 +47,8 @@
#include <plat/devs.h>
#include <plat/cpu.h>
+#include "common.h"
+
static struct map_desc nexcoder_iodesc[] __initdata = {
/* nothing here yet */
};
@@ -156,4 +158,5 @@ MACHINE_START(NEXCODER_2440, "NexVision - Nexcoder 2440")
.init_machine = nexcoder_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index dc142ebf8cb..4c480ef734f 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -54,6 +54,8 @@
#include <plat/devs.h>
#include <plat/cpu.h>
+#include "common.h"
+
/* onboard perihperal map */
static struct map_desc osiris_iodesc[] __initdata = {
@@ -100,21 +102,6 @@ static struct map_desc osiris_iodesc[] __initdata = {
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
-static struct s3c24xx_uart_clksrc osiris_serial_clocks[] = {
- [0] = {
- .name = "uclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- }
-};
-
static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -122,8 +109,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = osiris_serial_clocks,
- .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 1,
@@ -131,8 +117,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = osiris_serial_clocks,
- .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[2] = {
.hwport = 2,
@@ -140,8 +125,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
- .clocks = osiris_serial_clocks,
- .clocks_size = ARRAY_SIZE(osiris_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
}
};
@@ -452,4 +436,5 @@ MACHINE_START(OSIRIS, "Simtec-OSIRIS")
.init_irq = s3c24xx_init_irq,
.init_machine = osiris_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 0d3453bf567..80077f6472e 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -24,7 +24,7 @@
#include <linux/serial_core.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/pwm.h>
@@ -62,21 +62,14 @@
#include <sound/uda1380.h>
+#include "common.h"
+
#define LCD_PWM_PERIOD 192960
#define LCD_PWM_DUTY 127353
static struct map_desc rx1950_iodesc[] __initdata = {
};
-static struct s3c24xx_uart_clksrc rx1950_serial_clocks[] = {
- [0] = {
- .name = "fclk",
- .divisor = 0x0a,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
@@ -84,8 +77,7 @@ static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = {
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
- .clocks = rx1950_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
},
[1] = {
.hwport = 1,
@@ -93,8 +85,7 @@ static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = {
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
- .clocks = rx1950_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
},
/* IR port */
[2] = {
@@ -103,8 +94,7 @@ static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = {
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0xf1,
- .clocks = rx1950_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
},
};
@@ -832,4 +822,5 @@ MACHINE_START(RX1950, "HP iPAQ RX1950")
.init_irq = s3c24xx_init_irq,
.init_machine = rx1950_init_machine,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx3715.c b/arch/arm/mach-s3c2440/mach-rx3715.c
index e19499c2f90..20103bafbd4 100644
--- a/arch/arm/mach-s3c2440/mach-rx3715.c
+++ b/arch/arm/mach-s3c2440/mach-rx3715.c
@@ -20,7 +20,7 @@
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/console.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
@@ -51,6 +51,8 @@
#include <plat/cpu.h>
#include <plat/pm.h>
+#include "common.h"
+
static struct map_desc rx3715_iodesc[] __initdata = {
/* dump ISA space somewhere unused */
@@ -67,16 +69,6 @@ static struct map_desc rx3715_iodesc[] __initdata = {
},
};
-
-static struct s3c24xx_uart_clksrc rx3715_serial_clocks[] = {
- [0] = {
- .name = "fclk",
- .divisor = 0,
- .min_baud = 0,
- .max_baud = 0,
- }
-};
-
static struct s3c2410_uartcfg rx3715_uartcfgs[] = {
[0] = {
.hwport = 0,
@@ -84,8 +76,7 @@ static struct s3c2410_uartcfg rx3715_uartcfgs[] = {
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
- .clocks = rx3715_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
},
[1] = {
.hwport = 1,
@@ -93,8 +84,7 @@ static struct s3c2410_uartcfg rx3715_uartcfgs[] = {
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x00,
- .clocks = rx3715_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
},
/* IR port */
[2] = {
@@ -103,8 +93,7 @@ static struct s3c2410_uartcfg rx3715_uartcfgs[] = {
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
- .clocks = rx3715_serial_clocks,
- .clocks_size = ARRAY_SIZE(rx3715_serial_clocks),
+ .clk_sel = S3C2410_UCON_CLKSEL3,
}
};
@@ -224,4 +213,5 @@ MACHINE_START(RX3715, "IPAQ-RX3715")
.init_irq = rx3715_init_irq,
.init_machine = rx3715_init_machine,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-smdk2440.c b/arch/arm/mach-s3c2440/mach-smdk2440.c
index 36eeb4197a8..1deb60d12a6 100644
--- a/arch/arm/mach-s3c2440/mach-smdk2440.c
+++ b/arch/arm/mach-s3c2440/mach-smdk2440.c
@@ -47,6 +47,8 @@
#include <plat/common-smdk.h>
+#include "common.h"
+
static struct map_desc smdk2440_iodesc[] __initdata = {
/* ISA IO Space map (memory space selected by A24) */
@@ -181,4 +183,5 @@ MACHINE_START(S3C2440, "SMDK2440")
.map_io = smdk2440_map_io,
.init_machine = smdk2440_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2440_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
index 976002fb1b8..cf7596694ef 100644
--- a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
+++ b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
@@ -17,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -270,7 +270,7 @@ struct s3c_cpufreq_info s3c2440_cpufreq_info = {
.debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
};
-static int s3c2440_cpufreq_add(struct sys_device *sysdev)
+static int s3c2440_cpufreq_add(struct device *dev)
{
xtal = s3c_cpufreq_clk_get(NULL, "xtal");
hclk = s3c_cpufreq_clk_get(NULL, "hclk");
@@ -285,27 +285,29 @@ static int s3c2440_cpufreq_add(struct sys_device *sysdev)
return s3c_cpufreq_register(&s3c2440_cpufreq_info);
}
-static struct sysdev_driver s3c2440_cpufreq_driver = {
- .add = s3c2440_cpufreq_add,
+static struct subsys_interface s3c2440_cpufreq_interface = {
+ .name = "s3c2440_cpufreq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_cpufreq_add,
};
static int s3c2440_cpufreq_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass,
- &s3c2440_cpufreq_driver);
+ return subsys_interface_register(&s3c2440_cpufreq_interface);
}
/* arch_initcall adds the clocks we need, so use subsys_initcall. */
subsys_initcall(s3c2440_cpufreq_init);
-static struct sysdev_driver s3c2442_cpufreq_driver = {
- .add = s3c2440_cpufreq_add,
+static struct subsys_interface s3c2442_cpufreq_interface = {
+ .name = "s3c2442_cpufreq",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_cpufreq_add,
};
static int s3c2442_cpufreq_init(void)
{
- return sysdev_driver_register(&s3c2442_sysclass,
- &s3c2442_cpufreq_driver);
+ return subsys_interface_register(&s3c2442_cpufreq_interface);
}
subsys_initcall(s3c2442_cpufreq_init);
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
index f105d5e8c47..b5368ae8d7f 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-12000000.c
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -51,7 +51,7 @@ static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
{ .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */
};
-static int s3c2440_plls12_add(struct sys_device *dev)
+static int s3c2440_plls12_add(struct device *dev)
{
struct clk *xtal_clk;
unsigned long xtal;
@@ -72,25 +72,29 @@ static int s3c2440_plls12_add(struct sys_device *dev)
return 0;
}
-static struct sysdev_driver s3c2440_plls12_drv = {
- .add = s3c2440_plls12_add,
+static struct subsys_interface s3c2440_plls12_interface = {
+ .name = "s3c2440_plls12",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_plls12_add,
};
static int __init s3c2440_pll_12mhz(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_plls12_drv);
+ return subsys_interface_register(&s3c2440_plls12_interface);
}
arch_initcall(s3c2440_pll_12mhz);
-static struct sysdev_driver s3c2442_plls12_drv = {
- .add = s3c2440_plls12_add,
+static struct subsys_interface s3c2442_plls12_interface = {
+ .name = "s3c2442_plls12",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_plls12_add,
};
static int __init s3c2442_pll_12mhz(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_plls12_drv);
+ return subsys_interface_register(&s3c2442_plls12_interface);
}
diff --git a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
index c8a8f90ef38..42f2b5cd239 100644
--- a/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
+++ b/arch/arm/mach-s3c2440/s3c2440-pll-16934400.c
@@ -14,7 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -79,7 +79,7 @@ static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
{ .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */
};
-static int s3c2440_plls169344_add(struct sys_device *dev)
+static int s3c2440_plls169344_add(struct device *dev)
{
struct clk *xtal_clk;
unsigned long xtal;
@@ -100,28 +100,28 @@ static int s3c2440_plls169344_add(struct sys_device *dev)
return 0;
}
-static struct sysdev_driver s3c2440_plls169344_drv = {
- .add = s3c2440_plls169344_add,
+static struct subsys_interface s3c2440_plls169344_interface = {
+ .name = "s3c2440_plls169344",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_plls169344_add,
};
static int __init s3c2440_pll_16934400(void)
{
- return sysdev_driver_register(&s3c2440_sysclass,
- &s3c2440_plls169344_drv);
-
+ return subsys_interface_register(&s3c2440_plls169344_interface);
}
arch_initcall(s3c2440_pll_16934400);
-static struct sysdev_driver s3c2442_plls169344_drv = {
- .add = s3c2440_plls169344_add,
+static struct subsys_interface s3c2442_plls169344_interface = {
+ .name = "s3c2442_plls169344",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_plls169344_add,
};
static int __init s3c2442_pll_16934400(void)
{
- return sysdev_driver_register(&s3c2442_sysclass,
- &s3c2442_plls169344_drv);
-
+ return subsys_interface_register(&s3c2442_plls169344_interface);
}
arch_initcall(s3c2442_pll_16934400);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index 37f8cc6aabd..517623a09fc 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/gpio.h>
#include <linux/clk.h>
@@ -35,13 +35,14 @@
#include <plat/cpu.h>
#include <plat/s3c244x.h>
#include <plat/pm.h>
+#include <plat/watchdog-reset.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
-static struct sys_device s3c2440_sysdev = {
- .cls = &s3c2440_sysclass,
+static struct device s3c2440_dev = {
+ .bus = &s3c2440_subsys,
};
int __init s3c2440_init(void)
@@ -63,7 +64,7 @@ int __init s3c2440_init(void)
/* register our system device for everything else */
- return sysdev_register(&s3c2440_sysdev);
+ return device_register(&s3c2440_dev);
}
void __init s3c2440_map_io(void)
@@ -73,3 +74,15 @@ void __init s3c2440_map_io(void)
s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up;
s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up;
}
+
+void s3c2440_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ soft_restart(0);
+ }
+
+ arch_wdt_reset();
+
+ /* we'll take a jump through zero as a poor second */
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 2c822e09392..8004e0497bf 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -28,7 +28,6 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
@@ -123,7 +122,7 @@ static struct clk s3c2442_clk_cam_upll = {
},
};
-static int s3c2442_clk_add(struct sys_device *sysdev)
+static int s3c2442_clk_add(struct device *dev)
{
struct clk *clock_upll;
struct clk *clock_h;
@@ -149,20 +148,22 @@ static int s3c2442_clk_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2442_clk_driver = {
- .add = s3c2442_clk_add,
+static struct subsys_interface s3c2442_clk_interface = {
+ .name = "s3c2442_clk",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2442_clk_add,
};
static __init int s3c2442_clk_init(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_clk_driver);
+ return subsys_interface_register(&s3c2442_clk_interface);
}
arch_initcall(s3c2442_clk_init);
-static struct sys_device s3c2442_sysdev = {
- .cls = &s3c2442_sysclass,
+static struct device s3c2442_dev = {
+ .bus = &s3c2442_subsys,
};
int __init s3c2442_init(void)
@@ -175,7 +176,7 @@ int __init s3c2442_init(void)
register_syscore_ops(&s3c244x_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
- return sysdev_register(&s3c2442_sysdev);
+ return device_register(&s3c2442_dev);
}
void __init s3c2442_map_io(void)
diff --git a/arch/arm/mach-s3c2440/s3c244x-clock.c b/arch/arm/mach-s3c2440/s3c244x-clock.c
index 7f5ea0a169a..b3fdbdda3d5 100644
--- a/arch/arm/mach-s3c2440/s3c244x-clock.c
+++ b/arch/arm/mach-s3c2440/s3c244x-clock.c
@@ -28,7 +28,6 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
@@ -73,7 +72,7 @@ static struct clk clk_arm = {
},
};
-static int s3c244x_clk_add(struct sys_device *sysdev)
+static int s3c244x_clk_add(struct device *dev)
{
unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
unsigned long clkdivn;
@@ -115,24 +114,28 @@ static int s3c244x_clk_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2440_clk_driver = {
- .add = s3c244x_clk_add,
+static struct subsys_interface s3c2440_clk_interface = {
+ .name = "s3c2440_clk",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c244x_clk_add,
};
static int s3c2440_clk_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_clk_driver);
+ return subsys_interface_register(&s3c2440_clk_interface);
}
arch_initcall(s3c2440_clk_init);
-static struct sysdev_driver s3c2442_clk_driver = {
- .add = s3c244x_clk_add,
+static struct subsys_interface s3c2442_clk_interface = {
+ .name = "s3c2442_clk",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c244x_clk_add,
};
static int s3c2442_clk_init(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_clk_driver);
+ return subsys_interface_register(&s3c2442_clk_interface);
}
arch_initcall(s3c2442_clk_init);
diff --git a/arch/arm/mach-s3c2440/s3c244x-irq.c b/arch/arm/mach-s3c2440/s3c244x-irq.c
index c63e8f26d90..74d3dcf46a4 100644
--- a/arch/arm/mach-s3c2440/s3c244x-irq.c
+++ b/arch/arm/mach-s3c2440/s3c244x-irq.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -91,7 +91,7 @@ static struct irq_chip s3c_irq_cam = {
.irq_ack = s3c_irq_cam_ack,
};
-static int s3c244x_irq_add(struct sys_device *sysdev)
+static int s3c244x_irq_add(struct device *dev)
{
unsigned int irqno;
@@ -114,25 +114,29 @@ static int s3c244x_irq_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2440_irq_driver = {
- .add = s3c244x_irq_add,
+static struct subsys_interface s3c2440_irq_interface = {
+ .name = "s3c2440_irq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c244x_irq_add,
};
static int s3c2440_irq_init(void)
{
- return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_irq_driver);
+ return subsys_interface_register(&s3c2440_irq_interface);
}
arch_initcall(s3c2440_irq_init);
-static struct sysdev_driver s3c2442_irq_driver = {
- .add = s3c244x_irq_add,
+static struct subsys_interface s3c2442_irq_interface = {
+ .name = "s3c2442_irq",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c244x_irq_add,
};
static int s3c2442_irq_init(void)
{
- return sysdev_driver_register(&s3c2442_sysclass, &s3c2442_irq_driver);
+ return subsys_interface_register(&s3c2442_irq_interface);
}
arch_initcall(s3c2442_irq_init);
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 7e8a23d2098..36bc60f61d0 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -135,17 +135,19 @@ void __init s3c244x_init_clocks(int xtal)
s3c2410_baseclk_add();
}
-/* Since the S3C2442 and S3C2440 share items, put both sysclasses here */
+/* Since the S3C2442 and S3C2440 share items, put both subsystems here */
-struct sysdev_class s3c2440_sysclass = {
+struct bus_type s3c2440_subsys = {
.name = "s3c2440-core",
+ .dev_name = "s3c2440-core",
};
-struct sysdev_class s3c2442_sysclass = {
+struct bus_type s3c2442_subsys = {
.name = "s3c2442-core",
+ .dev_name = "s3c2442-core",
};
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2440 based system)
* as a driver which may support both 2410 and 2440 may try and use it.
@@ -153,14 +155,14 @@ struct sysdev_class s3c2442_sysclass = {
static int __init s3c2440_core_init(void)
{
- return sysdev_class_register(&s3c2440_sysclass);
+ return subsys_system_register(&s3c2440_subsys, NULL);
}
core_initcall(s3c2440_core_init);
static int __init s3c2442_core_init(void)
{
- return sysdev_class_register(&s3c2442_sysclass);
+ return subsys_system_register(&s3c2442_subsys, NULL);
}
core_initcall(s3c2442_core_init);
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 1c2c088aa2e..6dde2696f8f 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -27,7 +27,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/serial_core.h>
diff --git a/arch/arm/mach-s3c2443/dma.c b/arch/arm/mach-s3c2443/dma.c
index fe52151d2e8..de6b4a23c9e 100644
--- a/arch/arm/mach-s3c2443/dma.c
+++ b/arch/arm/mach-s3c2443/dma.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/io.h>
@@ -135,19 +135,21 @@ static struct s3c24xx_dma_selection __initdata s3c2443_dma_sel = {
.map_size = ARRAY_SIZE(s3c2443_dma_mappings),
};
-static int __init s3c2443_dma_add(struct sys_device *sysdev)
+static int __init s3c2443_dma_add(struct device *dev)
{
s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100);
return s3c24xx_dma_init_map(&s3c2443_dma_sel);
}
-static struct sysdev_driver s3c2443_dma_driver = {
- .add = s3c2443_dma_add,
+static struct subsys_interface s3c2443_dma_interface = {
+ .name = "s3c2443_dma",
+ .subsys = &s3c2443_subsys,
+ .add_dev = s3c2443_dma_add,
};
static int __init s3c2443_dma_init(void)
{
- return sysdev_driver_register(&s3c2443_sysclass, &s3c2443_dma_driver);
+ return subsys_interface_register(&s3c2443_dma_interface);
}
arch_initcall(s3c2443_dma_init);
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 83ecb1173fb..35e4ff24fb4 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -241,7 +241,7 @@ static int __init s3c2443_add_sub(unsigned int base,
return 0;
}
-static int __init s3c2443_irq_add(struct sys_device *sysdev)
+static int __init s3c2443_irq_add(struct device *dev)
{
printk("S3C2443: IRQ Support\n");
@@ -265,13 +265,15 @@ static int __init s3c2443_irq_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s3c2443_irq_driver = {
- .add = s3c2443_irq_add,
+static struct subsys_interface s3c2443_irq_interface = {
+ .name = "s3c2443_irq",
+ .subsys = &s3c2443_subsys,
+ .add_dev = s3c2443_irq_add,
};
static int __init s3c2443_irq_init(void)
{
- return sysdev_driver_register(&s3c2443_sysclass, &s3c2443_irq_driver);
+ return subsys_interface_register(&s3c2443_irq_interface);
}
arch_initcall(s3c2443_irq_init);
diff --git a/arch/arm/mach-s3c2443/mach-smdk2443.c b/arch/arm/mach-s3c2443/mach-smdk2443.c
index bec107e0044..20923695622 100644
--- a/arch/arm/mach-s3c2443/mach-smdk2443.c
+++ b/arch/arm/mach-s3c2443/mach-smdk2443.c
@@ -145,4 +145,5 @@ MACHINE_START(SMDK2443, "SMDK2443")
.map_io = smdk2443_map_io,
.init_machine = smdk2443_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c2443_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c2443/s3c2443.c b/arch/arm/mach-s3c2443/s3c2443.c
index a22b771b0f3..b9deaeb0dff 100644
--- a/arch/arm/mach-s3c2443/s3c2443.c
+++ b/arch/arm/mach-s3c2443/s3c2443.c
@@ -19,7 +19,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -31,7 +31,6 @@
#include <asm/irq.h>
#include <mach/regs-s3c2443-clock.h>
-#include <mach/reset.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
@@ -49,16 +48,20 @@ static struct map_desc s3c2443_iodesc[] __initdata = {
IODESC_ENT(TIMER),
};
-struct sysdev_class s3c2443_sysclass = {
+struct bus_type s3c2443_subsys = {
.name = "s3c2443-core",
+ .dev_name = "s3c2443-core",
};
-static struct sys_device s3c2443_sysdev = {
- .cls = &s3c2443_sysclass,
+static struct device s3c2443_dev = {
+ .bus = &s3c2443_subsys,
};
-static void s3c2443_hard_reset(void)
+void s3c2443_restart(char mode, const char *cmd)
{
+ if (mode == 's')
+ soft_restart(0);
+
__raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
}
@@ -66,8 +69,6 @@ int __init s3c2443_init(void)
{
printk("S3C2443: Initialising architecture\n");
- s3c24xx_reset_hook = s3c2443_hard_reset;
-
s3c_nand_setname("s3c2412-nand");
s3c_fb_setname("s3c2443-fb");
@@ -77,7 +78,7 @@ int __init s3c2443_init(void)
s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT;
s3c_device_wdt.resource[1].end = IRQ_S3C2443_WDT;
- return sysdev_register(&s3c2443_sysdev);
+ return device_register(&s3c2443_dev);
}
void __init s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no)
@@ -99,7 +100,7 @@ void __init s3c2443_map_io(void)
iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
}
-/* need to register class before we actually register the device, and
+/* need to register the subsystem before we actually register the device, and
* we also need to ensure that it has been initialised before any of the
* drivers even try to use it (even if not on an s3c2443 based system)
* as a driver which may support both 2443 and 2440 may try and use it.
@@ -107,7 +108,7 @@ void __init s3c2443_map_io(void)
static int __init s3c2443_core_init(void)
{
- return sysdev_class_register(&s3c2443_sysclass);
+ return subsys_system_register(&s3c2443_subsys, NULL);
}
core_initcall(s3c2443_core_init);
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 5552e048c2b..dd20c66cd70 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -8,6 +8,7 @@ config PLAT_S3C64XX
bool
depends on ARCH_S3C64XX
select SAMSUNG_WAKEMASK
+ select PM_GENERIC_DOMAINS
default y
help
Base platform code for any Samsung S3C64XX device
@@ -77,6 +78,11 @@ config S3C64XX_SETUP_SDHCI_GPIO
help
Common setup code for S3C64XX SDHCI GPIO configurations
+config S3C64XX_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations
+
# S36400 Macchine support
config MACH_SMDK6400
@@ -188,7 +194,7 @@ config SMDK6410_WM1190_EV1
depends on MACH_SMDK6410
select REGULATOR
select REGULATOR_WM8350
- select S3C24XX_GPIO_EXTRA64
+ select SAMSUNG_GPIO_EXTRA64
select MFD_WM8350_I2C
select MFD_WM8350_CONFIG_MODE_0
select MFD_WM8350_CONFIG_MODE_3
@@ -206,7 +212,7 @@ config SMDK6410_WM1192_EV1
depends on MACH_SMDK6410
select REGULATOR
select REGULATOR_WM831X
- select S3C24XX_GPIO_EXTRA64
+ select SAMSUNG_GPIO_EXTRA64
select MFD_WM831X
select MFD_WM831X_I2C
help
@@ -276,6 +282,7 @@ config MACH_WLF_CRAGG_6410
select S3C64XX_SETUP_IDE
select S3C64XX_SETUP_FB_24BPP
select S3C64XX_SETUP_KEYPAD
+ select S3C64XX_SETUP_SPI
select SAMSUNG_DEV_ADC
select SAMSUNG_DEV_KEYPAD
select S3C_DEV_USB_HOST
@@ -286,8 +293,8 @@ config MACH_WLF_CRAGG_6410
select S3C_DEV_I2C1
select S3C_DEV_WDT
select S3C_DEV_RTC
- select S3C64XX_DEV_SPI
- select S3C24XX_GPIO_EXTRA128
+ select S3C64XX_DEV_SPI0
+ select SAMSUNG_GPIO_EXTRA128
select I2C
help
Machine support for the Wolfson Cragganmore S3C6410 variant.
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index cfc0b994180..1822ac2eba3 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -10,54 +10,49 @@ obj-m :=
obj-n :=
obj- :=
-# Core files
-obj-y += cpu.o
-obj-y += clock.o
+# Core
-# Core support for S3C6400 system
+obj-y += common.o clock.o
+
+# Core support
obj-$(CONFIG_CPU_S3C6400) += s3c6400.o
obj-$(CONFIG_CPU_S3C6410) += s3c6410.o
-obj-y += irq.o
-obj-y += irq-eint.o
+# PM
+
+obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
# DMA support
obj-$(CONFIG_S3C64XX_DMA) += dma.o
-# Device setup
+# Device support
-obj-$(CONFIG_S3C64XX_SETUP_I2C0) += setup-i2c0.o
-obj-$(CONFIG_S3C64XX_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S3C64XX_SETUP_IDE) += setup-ide.o
-obj-$(CONFIG_S3C64XX_SETUP_KEYPAD) += setup-keypad.o
-obj-$(CONFIG_S3C64XX_SETUP_SDHCI) += setup-sdhci.o
-obj-$(CONFIG_S3C64XX_SETUP_FB_24BPP) += setup-fb-24bpp.o
-obj-$(CONFIG_S3C64XX_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-y += dev-uart.o
+obj-y += dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
-# PM
+# Device setup
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_PM) += sleep.o
-obj-$(CONFIG_PM) += irq-pm.o
+obj-$(CONFIG_S3C64XX_SETUP_FB_24BPP) += setup-fb-24bpp.o
+obj-$(CONFIG_S3C64XX_SETUP_I2C0) += setup-i2c0.o
+obj-$(CONFIG_S3C64XX_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_S3C64XX_SETUP_IDE) += setup-ide.o
+obj-$(CONFIG_S3C64XX_SETUP_KEYPAD) += setup-keypad.o
+obj-$(CONFIG_S3C64XX_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S3C64XX_SETUP_SPI) += setup-spi.o
# Machine support
-obj-$(CONFIG_MACH_ANW6410) += mach-anw6410.o
-obj-$(CONFIG_MACH_SMDK6400) += mach-smdk6400.o
-obj-$(CONFIG_MACH_SMDK6410) += mach-smdk6410.o
-obj-$(CONFIG_MACH_REAL6410) += mach-real6410.o
-obj-$(CONFIG_MACH_MINI6410) += mach-mini6410.o
-obj-$(CONFIG_MACH_NCP) += mach-ncp.o
-obj-$(CONFIG_MACH_HMT) += mach-hmt.o
-obj-$(CONFIG_MACH_SMARTQ) += mach-smartq.o
-obj-$(CONFIG_MACH_SMARTQ5) += mach-smartq5.o
-obj-$(CONFIG_MACH_SMARTQ7) += mach-smartq7.o
-obj-$(CONFIG_MACH_WLF_CRAGG_6410) += mach-crag6410.o mach-crag6410-module.o
-
-# device support
-
-obj-y += dev-uart.o
-obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-$(CONFIG_MACH_ANW6410) += mach-anw6410.o
+obj-$(CONFIG_MACH_HMT) += mach-hmt.o
+obj-$(CONFIG_MACH_MINI6410) += mach-mini6410.o
+obj-$(CONFIG_MACH_NCP) += mach-ncp.o
+obj-$(CONFIG_MACH_REAL6410) += mach-real6410.o
+obj-$(CONFIG_MACH_SMARTQ) += mach-smartq.o
+obj-$(CONFIG_MACH_SMARTQ5) += mach-smartq5.o
+obj-$(CONFIG_MACH_SMARTQ7) += mach-smartq7.o
+obj-$(CONFIG_MACH_SMDK6400) += mach-smdk6400.o
+obj-$(CONFIG_MACH_SMDK6410) += mach-smdk6410.o
+obj-$(CONFIG_MACH_WLF_CRAGG_6410) += mach-crag6410.o mach-crag6410-module.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 39c238d7a3d..31bb27dc4ae 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -184,18 +184,6 @@ static struct clk init_clocks_off[] = {
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_SPI1,
}, {
- .name = "spi_48m",
- .devname = "s3c64xx-spi.0",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SPI0_48,
- }, {
- .name = "spi_48m",
- .devname = "s3c64xx-spi.1",
- .parent = &clk_48m,
- .enable = s3c64xx_sclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
- }, {
.name = "48m",
.devname = "s3c-sdhci.0",
.parent = &clk_48m,
@@ -226,6 +214,22 @@ static struct clk init_clocks_off[] = {
},
};
+static struct clk clk_48m_spi0 = {
+ .name = "spi_48m",
+ .devname = "s3c64xx-spi.0",
+ .parent = &clk_48m,
+ .enable = s3c64xx_sclk_ctrl,
+ .ctrlbit = S3C_CLKCON_SCLK_SPI0_48,
+};
+
+static struct clk clk_48m_spi1 = {
+ .name = "spi_48m",
+ .devname = "s3c64xx-spi.1",
+ .parent = &clk_48m,
+ .enable = s3c64xx_sclk_ctrl,
+ .ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
+};
+
static struct clk init_clocks[] = {
{
.name = "lcd",
@@ -243,24 +247,6 @@ static struct clk init_clocks[] = {
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_UHOST,
}, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.0",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC0,
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC1,
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.2",
- .parent = &clk_h,
- .enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
- }, {
.name = "otg",
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
@@ -310,6 +296,29 @@ static struct clk init_clocks[] = {
}
};
+static struct clk clk_hsmmc0 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.0",
+ .parent = &clk_h,
+ .enable = s3c64xx_hclk_ctrl,
+ .ctrlbit = S3C_CLKCON_HCLK_HSMMC0,
+};
+
+static struct clk clk_hsmmc1 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.1",
+ .parent = &clk_h,
+ .enable = s3c64xx_hclk_ctrl,
+ .ctrlbit = S3C_CLKCON_HCLK_HSMMC1,
+};
+
+static struct clk clk_hsmmc2 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.2",
+ .parent = &clk_h,
+ .enable = s3c64xx_hclk_ctrl,
+ .ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
+};
static struct clk clk_fout_apll = {
.name = "fout_apll",
@@ -578,36 +587,6 @@ static struct clksrc_sources clkset_camif = {
static struct clksrc_clk clksrcs[] = {
{
.clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.0",
- .ctrlbit = S3C_CLKCON_SCLK_MMC0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 },
- .sources = &clkset_spi_mmc,
- }, {
- .clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.1",
- .ctrlbit = S3C_CLKCON_SCLK_MMC1,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 },
- .sources = &clkset_spi_mmc,
- }, {
- .clk = {
- .name = "mmc_bus",
- .devname = "s3c-sdhci.2",
- .ctrlbit = S3C_CLKCON_SCLK_MMC2,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 },
- .sources = &clkset_spi_mmc,
- }, {
- .clk = {
.name = "usb-bus-host",
.ctrlbit = S3C_CLKCON_SCLK_UHOST,
.enable = s3c64xx_sclk_ctrl,
@@ -617,35 +596,6 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_uhost,
}, {
.clk = {
- .name = "uclk1",
- .ctrlbit = S3C_CLKCON_SCLK_UART,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 },
- .sources = &clkset_uart,
- }, {
-/* Where does UCLK0 come from? */
- .clk = {
- .name = "spi-bus",
- .devname = "s3c64xx-spi.0",
- .ctrlbit = S3C_CLKCON_SCLK_SPI0,
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 },
- .sources = &clkset_spi_mmc,
- }, {
- .clk = {
- .name = "spi-bus",
- .devname = "s3c64xx-spi.1",
- .enable = s3c64xx_sclk_ctrl,
- },
- .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 },
- .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 },
- .sources = &clkset_spi_mmc,
- }, {
- .clk = {
.name = "audio-bus",
.devname = "samsung-i2s.0",
.ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
@@ -695,6 +645,78 @@ static struct clksrc_clk clksrcs[] = {
},
};
+/* Where does UCLK0 come from? */
+static struct clksrc_clk clk_sclk_uclk = {
+ .clk = {
+ .name = "uclk1",
+ .ctrlbit = S3C_CLKCON_SCLK_UART,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 },
+ .sources = &clkset_uart,
+};
+
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "mmc_bus",
+ .devname = "s3c-sdhci.0",
+ .ctrlbit = S3C_CLKCON_SCLK_MMC0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 },
+ .sources = &clkset_spi_mmc,
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "mmc_bus",
+ .devname = "s3c-sdhci.1",
+ .ctrlbit = S3C_CLKCON_SCLK_MMC1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 },
+ .sources = &clkset_spi_mmc,
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "mmc_bus",
+ .devname = "s3c-sdhci.2",
+ .ctrlbit = S3C_CLKCON_SCLK_MMC2,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 },
+ .sources = &clkset_spi_mmc,
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "spi-bus",
+ .devname = "s3c64xx-spi.0",
+ .ctrlbit = S3C_CLKCON_SCLK_SPI0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 },
+ .sources = &clkset_spi_mmc,
+};
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "spi-bus",
+ .devname = "s3c64xx-spi.1",
+ .ctrlbit = S3C_CLKCON_SCLK_SPI1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 },
+ .sources = &clkset_spi_mmc,
+};
+
/* Clock initialisation code */
static struct clksrc_clk *init_parents[] = {
@@ -703,9 +725,42 @@ static struct clksrc_clk *init_parents[] = {
&clk_mout_mpll,
};
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uclk,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+};
+
+static struct clk *clk_cdev[] = {
+ &clk_hsmmc0,
+ &clk_hsmmc1,
+ &clk_hsmmc2,
+ &clk_48m_spi0,
+ &clk_48m_spi1,
+};
+
+static struct clk_lookup s3c64xx_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_48m_spi0),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk2", &clk_48m_spi1),
+};
+
#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
-void __init_or_cpufreq s3c6400_setup_clocks(void)
+void __init_or_cpufreq s3c64xx_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long xtal;
@@ -804,13 +859,15 @@ static struct clk *clks[] __initdata = {
* as ARMCLK as well as the necessary parent clocks.
*
* This call does not setup the clocks, which is left to the
- * s3c6400_setup_clocks() call which may be needed by the cpufreq
+ * s3c64xx_setup_clocks() call which may be needed by the cpufreq
* or resume code to re-set the clocks if the bootloader has changed
* them.
*/
void __init s3c64xx_register_clocks(unsigned long xtal,
unsigned armclk_divlimit)
{
+ unsigned int cnt;
+
armclk_mask = armclk_divlimit;
s3c24xx_register_baseclocks(xtal);
@@ -821,7 +878,15 @@ void __init s3c64xx_register_clocks(unsigned long xtal,
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++)
+ s3c_disable_clocks(clk_cdev[cnt], 1);
+
s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1));
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
+ for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++)
+ s3c_register_clksrc(clksrc_cdev[cnt], 1);
+ clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup));
+
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
new file mode 100644
index 00000000000..4a7394d4bd9
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Common Codes for S3C64XX machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/hardware/vic.h>
+
+#include <mach/map.h>
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/gpio-cfg.h>
+#include <plat/irq-uart.h>
+#include <plat/irq-vic-timer.h>
+#include <plat/regs-irqtype.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
+
+#include "common.h"
+
+/* uart registration process */
+
+void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
+}
+
+/* table of supported CPUs */
+
+static const char name_s3c6400[] = "S3C6400";
+static const char name_s3c6410[] = "S3C6410";
+
+static struct cpu_table cpu_ids[] __initdata = {
+ {
+ .idcode = S3C6400_CPU_ID,
+ .idmask = S3C64XX_CPU_MASK,
+ .map_io = s3c6400_map_io,
+ .init_clocks = s3c6400_init_clocks,
+ .init_uarts = s3c64xx_init_uarts,
+ .init = s3c6400_init,
+ .name = name_s3c6400,
+ }, {
+ .idcode = S3C6410_CPU_ID,
+ .idmask = S3C64XX_CPU_MASK,
+ .map_io = s3c6410_map_io,
+ .init_clocks = s3c6410_init_clocks,
+ .init_uarts = s3c64xx_init_uarts,
+ .init = s3c6410_init,
+ .name = name_s3c6410,
+ },
+};
+
+/* minimal IO mapping */
+
+/* see notes on uart map in arch/arm/mach-s3c64xx/include/mach/debug-macro.S */
+#define UART_OFFS (S3C_PA_UART & 0xfffff)
+
+static struct map_desc s3c_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_MEM,
+ .pfn = __phys_to_pfn(S3C64XX_PA_SROM),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)(S3C_VA_UART + UART_OFFS),
+ .pfn = __phys_to_pfn(S3C_PA_UART),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)VA_VIC0,
+ .pfn = __phys_to_pfn(S3C64XX_PA_VIC0),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)VA_VIC1,
+ .pfn = __phys_to_pfn(S3C64XX_PA_VIC1),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_TIMER,
+ .pfn = __phys_to_pfn(S3C_PA_TIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C64XX_VA_GPIO,
+ .pfn = __phys_to_pfn(S3C64XX_PA_GPIO),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C64XX_VA_MODEM,
+ .pfn = __phys_to_pfn(S3C64XX_PA_MODEM),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_WATCHDOG,
+ .pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_USB_HSPHY,
+ .pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
+ .length = SZ_1K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct bus_type s3c64xx_subsys = {
+ .name = "s3c64xx-core",
+ .dev_name = "s3c64xx-core",
+};
+
+static struct device s3c64xx_dev = {
+ .bus = &s3c64xx_subsys,
+};
+
+/* read cpu identification code */
+
+void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
+{
+ /* initialise the io descriptors we need for initialisation */
+ iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
+ iotable_init(mach_desc, size);
+ init_consistent_dma_size(SZ_8M);
+
+ /* detect cpu id */
+ s3c64xx_init_cpu();
+
+ s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+static __init int s3c64xx_dev_init(void)
+{
+ subsys_system_register(&s3c64xx_subsys, NULL);
+ return device_register(&s3c64xx_dev);
+}
+core_initcall(s3c64xx_dev_init);
+
+/*
+ * setup the sources the vic should advertise resume
+ * for, even though it is not doing the wake
+ * (set_irq_wake needs to be valid)
+ */
+#define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE))
+#define IRQ_VIC1_RESUME (1 << (IRQ_RTC_ALARM - IRQ_VIC1_BASE) | \
+ 1 << (IRQ_PENDN - IRQ_VIC1_BASE) | \
+ 1 << (IRQ_HSMMC0 - IRQ_VIC1_BASE) | \
+ 1 << (IRQ_HSMMC1 - IRQ_VIC1_BASE) | \
+ 1 << (IRQ_HSMMC2 - IRQ_VIC1_BASE))
+
+void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
+{
+ printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
+
+ /* initialise the pair of VICs */
+ vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, IRQ_VIC0_RESUME);
+ vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, IRQ_VIC1_RESUME);
+
+ /* add the timer sub-irqs */
+ s3c_init_vic_timer_irq(5, IRQ_TIMER0);
+}
+
+#define eint_offset(irq) ((irq) - IRQ_EINT(0))
+#define eint_irq_to_bit(irq) ((u32)(1 << eint_offset(irq)))
+
+static inline void s3c_irq_eint_mask(struct irq_data *data)
+{
+ u32 mask;
+
+ mask = __raw_readl(S3C64XX_EINT0MASK);
+ mask |= (u32)data->chip_data;
+ __raw_writel(mask, S3C64XX_EINT0MASK);
+}
+
+static void s3c_irq_eint_unmask(struct irq_data *data)
+{
+ u32 mask;
+
+ mask = __raw_readl(S3C64XX_EINT0MASK);
+ mask &= ~((u32)data->chip_data);
+ __raw_writel(mask, S3C64XX_EINT0MASK);
+}
+
+static inline void s3c_irq_eint_ack(struct irq_data *data)
+{
+ __raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
+}
+
+static void s3c_irq_eint_maskack(struct irq_data *data)
+{
+ /* compiler should in-line these */
+ s3c_irq_eint_mask(data);
+ s3c_irq_eint_ack(data);
+}
+
+static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+ int offs = eint_offset(data->irq);
+ int pin, pin_val;
+ int shift;
+ u32 ctrl, mask;
+ u32 newvalue = 0;
+ void __iomem *reg;
+
+ if (offs > 27)
+ return -EINVAL;
+
+ if (offs <= 15)
+ reg = S3C64XX_EINT0CON0;
+ else
+ reg = S3C64XX_EINT0CON1;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ printk(KERN_WARNING "No edge setting!\n");
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S3C2410_EXTINT_RISEEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S3C2410_EXTINT_FALLEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S3C2410_EXTINT_BOTHEDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S3C2410_EXTINT_LOWLEV;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S3C2410_EXTINT_HILEV;
+ break;
+
+ default:
+ printk(KERN_ERR "No such irq type %d", type);
+ return -1;
+ }
+
+ if (offs <= 15)
+ shift = (offs / 2) * 4;
+ else
+ shift = ((offs - 16) / 2) * 4;
+ mask = 0x7 << shift;
+
+ ctrl = __raw_readl(reg);
+ ctrl &= ~mask;
+ ctrl |= newvalue << shift;
+ __raw_writel(ctrl, reg);
+
+ /* set the GPIO pin appropriately */
+
+ if (offs < 16) {
+ pin = S3C64XX_GPN(offs);
+ pin_val = S3C_GPIO_SFN(2);
+ } else if (offs < 23) {
+ pin = S3C64XX_GPL(offs + 8 - 16);
+ pin_val = S3C_GPIO_SFN(3);
+ } else {
+ pin = S3C64XX_GPM(offs - 23);
+ pin_val = S3C_GPIO_SFN(3);
+ }
+
+ s3c_gpio_cfgpin(pin, pin_val);
+
+ return 0;
+}
+
+static struct irq_chip s3c_irq_eint = {
+ .name = "s3c-eint",
+ .irq_mask = s3c_irq_eint_mask,
+ .irq_unmask = s3c_irq_eint_unmask,
+ .irq_mask_ack = s3c_irq_eint_maskack,
+ .irq_ack = s3c_irq_eint_ack,
+ .irq_set_type = s3c_irq_eint_set_type,
+ .irq_set_wake = s3c_irqext_wake,
+};
+
+/* s3c_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from IRQ_EINT(0) to IRQ_EINT(27). It is designed to be inlined into
+ * the specific handlers s3c_irq_demux_eintX_Y.
+ */
+static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
+{
+ u32 status = __raw_readl(S3C64XX_EINT0PEND);
+ u32 mask = __raw_readl(S3C64XX_EINT0MASK);
+ unsigned int irq;
+
+ status &= ~mask;
+ status >>= start;
+ status &= (1 << (end - start + 1)) - 1;
+
+ for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
+ if (status & 1)
+ generic_handle_irq(irq);
+
+ status >>= 1;
+ }
+}
+
+static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+{
+ s3c_irq_demux_eint(0, 3);
+}
+
+static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+{
+ s3c_irq_demux_eint(4, 11);
+}
+
+static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+{
+ s3c_irq_demux_eint(12, 19);
+}
+
+static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+{
+ s3c_irq_demux_eint(20, 27);
+}
+
+static int __init s3c64xx_init_irq_eint(void)
+{
+ int irq;
+
+ for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
+ irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
+ irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
+ irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
+ irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
+ irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
+
+ return 0;
+}
+arch_initcall(s3c64xx_init_irq_eint);
+
+void s3c64xx_restart(char mode, const char *cmd)
+{
+ if (mode != 's')
+ arch_wdt_reset();
+
+ /* if all else fails, or mode was for soft, jump to 0 */
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
new file mode 100644
index 00000000000..5eb9c9a7d73
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Common Header for S3C64XX machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H
+#define __ARCH_ARM_MACH_S3C64XX_COMMON_H
+
+void s3c64xx_init_irq(u32 vic0, u32 vic1);
+void s3c64xx_init_io(struct map_desc *mach_desc, int size);
+
+void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
+void s3c64xx_setup_clocks(void);
+
+void s3c64xx_restart(char mode, const char *cmd);
+
+extern struct syscore_ops s3c64xx_irq_syscore_ops;
+
+#ifdef CONFIG_CPU_S3C6400
+
+extern int s3c6400_init(void);
+extern void s3c6400_init_irq(void);
+extern void s3c6400_map_io(void);
+extern void s3c6400_init_clocks(int xtal);
+
+#else
+#define s3c6400_init_clocks NULL
+#define s3c6400_map_io NULL
+#define s3c6400_init NULL
+#endif
+
+#ifdef CONFIG_CPU_S3C6410
+
+extern int s3c6410_init(void);
+extern void s3c6410_init_irq(void);
+extern void s3c6410_map_io(void);
+extern void s3c6410_init_clocks(int xtal);
+
+#else
+#define s3c6410_init_clocks NULL
+#define s3c6410_map_io NULL
+#define s3c6410_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/cpu.c b/arch/arm/mach-s3c64xx/cpu.c
deleted file mode 100644
index de085b798aa..00000000000
--- a/arch/arm/mach-s3c64xx/cpu.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* linux/arch/arm/plat-s3c64xx/cpu.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C64XX CPU Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <plat/regs-serial.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-
-#include <plat/s3c6400.h>
-#include <plat/s3c6410.h>
-
-/* table of supported CPUs */
-
-static const char name_s3c6400[] = "S3C6400";
-static const char name_s3c6410[] = "S3C6410";
-
-static struct cpu_table cpu_ids[] __initdata = {
- {
- .idcode = S3C6400_CPU_ID,
- .idmask = S3C64XX_CPU_MASK,
- .map_io = s3c6400_map_io,
- .init_clocks = s3c6400_init_clocks,
- .init_uarts = s3c6400_init_uarts,
- .init = s3c6400_init,
- .name = name_s3c6400,
- }, {
- .idcode = S3C6410_CPU_ID,
- .idmask = S3C64XX_CPU_MASK,
- .map_io = s3c6410_map_io,
- .init_clocks = s3c6410_init_clocks,
- .init_uarts = s3c6410_init_uarts,
- .init = s3c6410_init,
- .name = name_s3c6410,
- },
-};
-
-/* minimal IO mapping */
-
-/* see notes on uart map in arch/arm/mach-s3c6400/include/mach/debug-macro.S */
-#define UART_OFFS (S3C_PA_UART & 0xfffff)
-
-static struct map_desc s3c_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_MEM,
- .pfn = __phys_to_pfn(S3C64XX_PA_SROM),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)(S3C_VA_UART + UART_OFFS),
- .pfn = __phys_to_pfn(S3C_PA_UART),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC0,
- .pfn = __phys_to_pfn(S3C64XX_PA_VIC0),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC1,
- .pfn = __phys_to_pfn(S3C64XX_PA_VIC1),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_TIMER,
- .pfn = __phys_to_pfn(S3C_PA_TIMER),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C64XX_VA_GPIO,
- .pfn = __phys_to_pfn(S3C64XX_PA_GPIO),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C64XX_VA_MODEM,
- .pfn = __phys_to_pfn(S3C64XX_PA_MODEM),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_WATCHDOG,
- .pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_USB_HSPHY,
- .pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
- .length = SZ_1K,
- .type = MT_DEVICE,
- },
-};
-
-
-struct sysdev_class s3c64xx_sysclass = {
- .name = "s3c64xx-core",
-};
-
-static struct sys_device s3c64xx_sysdev = {
- .cls = &s3c64xx_sysclass,
-};
-
-/* uart registration process */
-
-void __init s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
-}
-
-/* read cpu identification code */
-
-void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
-{
- /* initialise the io descriptors we need for initialisation */
- iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
- iotable_init(mach_desc, size);
- init_consistent_dma_size(SZ_8M);
-
- /* detect cpu id */
- s3c64xx_init_cpu();
-
- s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
-
-static __init int s3c64xx_sysdev_init(void)
-{
- sysdev_class_register(&s3c64xx_sysclass);
- return sysdev_register(&s3c64xx_sysdev);
-}
-
-core_initcall(s3c64xx_sysdev_init);
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
deleted file mode 100644
index 5e6b42089eb..00000000000
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/* linux/arch/arm/plat-s3c64xx/dev-spi.c
- *
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/spi-clocks.h>
-#include <mach/irqs.h>
-
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-#include <plat/devs.h>
-
-static char *spi_src_clks[] = {
- [S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
- [S3C64XX_SPI_SRCCLK_SPIBUS] = "spi-bus",
- [S3C64XX_SPI_SRCCLK_48M] = "spi_48m",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the GPC-3,7.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s3c64xx_spi_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S3C64XX_GPC(0);
- break;
-
- case 1:
- base = S3C64XX_GPC(4);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgall_range(base, 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-
- return 0;
-}
-
-static struct resource s3c64xx_spi0_resource[] = {
- [0] = {
- .start = S3C64XX_PA_SPI0,
- .end = S3C64XX_PA_SPI0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
- .cfg_gpio = s3c64xx_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s3c64xx_device_spi0 = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s3c64xx_spi0_resource),
- .resource = s3c64xx_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s3c64xx_spi0_pdata,
- },
-};
-EXPORT_SYMBOL(s3c64xx_device_spi0);
-
-static struct resource s3c64xx_spi1_resource[] = {
- [0] = {
- .start = S3C64XX_PA_SPI1,
- .end = S3C64XX_PA_SPI1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI1_TX,
- .end = DMACH_SPI1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI1_RX,
- .end = DMACH_SPI1_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI1,
- .end = IRQ_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
- .cfg_gpio = s3c64xx_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
-};
-
-struct platform_device s3c64xx_device_spi1 = {
- .name = "s3c64xx-spi",
- .id = 1,
- .num_resources = ARRAY_SIZE(s3c64xx_spi1_resource),
- .resource = s3c64xx_spi1_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s3c64xx_spi1_pdata,
- },
-};
-EXPORT_SYMBOL(s3c64xx_device_spi1);
-
-void __init s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S3C64XX_SPI_SRCCLK_48M) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- pd = &s3c64xx_spi0_pdata;
- break;
- case 1:
- pd = &s3c64xx_spi1_pdata;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 17d62f4f820..f2a7a172559 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -35,7 +35,7 @@
/* dma channel state information */
struct s3c64xx_dmac {
- struct sys_device sysdev;
+ struct device dev;
struct clk *clk;
void __iomem *regs;
struct s3c2410_dma_chan *channels;
@@ -631,8 +631,9 @@ static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
return IRQ_HANDLED;
}
-static struct sysdev_class dma_sysclass = {
+static struct bus_type dma_subsys = {
.name = "s3c64xx-dma",
+ .dev_name = "s3c64xx-dma",
};
static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
@@ -651,12 +652,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
return -ENOMEM;
}
- dmac->sysdev.id = chno / 8;
- dmac->sysdev.cls = &dma_sysclass;
+ dmac->dev.id = chno / 8;
+ dmac->dev.bus = &dma_subsys;
- err = sysdev_register(&dmac->sysdev);
+ err = device_register(&dmac->dev);
if (err) {
- printk(KERN_ERR "%s: failed to register sysdevice\n", __func__);
+ printk(KERN_ERR "%s: failed to register device\n", __func__);
goto err_alloc;
}
@@ -667,7 +668,7 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
goto err_dev;
}
- snprintf(clkname, sizeof(clkname), "dma%d", dmac->sysdev.id);
+ snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
dmac->clk = clk_get(NULL, clkname);
if (IS_ERR(dmac->clk)) {
@@ -715,7 +716,7 @@ err_clk:
err_map:
iounmap(regs);
err_dev:
- sysdev_unregister(&dmac->sysdev);
+ device_unregister(&dmac->dev);
err_alloc:
kfree(dmac);
return err;
@@ -733,9 +734,9 @@ static int __init s3c64xx_dma_init(void)
return -ENOMEM;
}
- ret = sysdev_class_register(&dma_sysclass);
+ ret = subsys_system_register(&dma_subsys, NULL);
if (ret) {
- printk(KERN_ERR "%s: failed to create sysclass\n", __func__);
+ printk(KERN_ERR "%s: failed to create subsys\n", __func__);
return -ENOMEM;
}
diff --git a/arch/arm/mach-s3c64xx/include/mach/crag6410.h b/arch/arm/mach-s3c64xx/include/mach/crag6410.h
index be9074e17df..5d55ab018b6 100644
--- a/arch/arm/mach-s3c64xx/include/mach/crag6410.h
+++ b/arch/arm/mach-s3c64xx/include/mach/crag6410.h
@@ -15,9 +15,11 @@
#define BANFF_PMIC_IRQ_BASE IRQ_BOARD_START
#define GLENFARCLAS_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
+#define CODEC_IRQ_BASE (IRQ_BOARD_START + 128)
#define PCA935X_GPIO_BASE GPIO_BOARD_START
-#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
-#define GLENFARCLAS_PMIC_GPIO_BASE (GPIO_BOARD_START + 16)
+#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
+#define GLENFARCLAS_PMIC_GPIO_BASE (GPIO_BOARD_START + 32)
+#define BANFF_PMIC_GPIO_BASE (GPIO_BOARD_START + 64)
#endif
diff --git a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
index dd362604dcc..dc2bc15142c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-s3c64xx/include/mach/entry-macro.S
@@ -12,7 +12,8 @@
* warranty of any kind, whether express or implied.
*/
-#include <mach/map.h>
-#include <mach/irqs.h>
+ .macro disable_fiq
+ .endm
-#include <asm/entry-macro-vic2.S>
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
diff --git a/arch/arm/mach-s3c64xx/include/mach/gpio.h b/arch/arm/mach-s3c64xx/include/mach/gpio.h
index 6e34c2f6e67..8b540c42d5d 100644
--- a/arch/arm/mach-s3c64xx/include/mach/gpio.h
+++ b/arch/arm/mach-s3c64xx/include/mach/gpio.h
@@ -88,6 +88,6 @@ enum s3c_gpio_number {
/* define the number of gpios we need to the one after the GPQ() range */
#define GPIO_BOARD_START (S3C64XX_GPQ(S3C64XX_GPIO_Q_NR) + 1)
-#define BOARD_NR_GPIOS 16
+#define BOARD_NR_GPIOS (16 + CONFIG_SAMSUNG_GPIO_EXTRA)
#define ARCH_NR_GPIOS (GPIO_BOARD_START + BOARD_NR_GPIOS)
diff --git a/arch/arm/mach-s3c64xx/include/mach/irqs.h b/arch/arm/mach-s3c64xx/include/mach/irqs.h
index 443f85b3c20..96d60e0d937 100644
--- a/arch/arm/mach-s3c64xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c64xx/include/mach/irqs.h
@@ -169,7 +169,7 @@
#define IRQ_BOARD_START (IRQ_EINT_GROUP9_BASE + IRQ_EINT_GROUP9_NR + 1)
#ifdef CONFIG_MACH_WLF_CRAGG_6410
-#define IRQ_BOARD_NR 128
+#define IRQ_BOARD_NR 160
#elif defined(CONFIG_SMDK6410_WM1190_EV1)
#define IRQ_BOARD_NR 64
#elif defined(CONFIG_SMDK6410_WM1192_EV1)
diff --git a/arch/arm/mach-s3c64xx/include/mach/map.h b/arch/arm/mach-s3c64xx/include/mach/map.h
index 23a1d71e4d5..8e2097bb208 100644
--- a/arch/arm/mach-s3c64xx/include/mach/map.h
+++ b/arch/arm/mach-s3c64xx/include/mach/map.h
@@ -115,6 +115,8 @@
#define S3C_PA_USB_HSOTG S3C64XX_PA_USB_HSOTG
#define S3C_PA_RTC S3C64XX_PA_RTC
#define S3C_PA_WDT S3C64XX_PA_WATCHDOG
+#define S3C_PA_SPI0 S3C64XX_PA_SPI0
+#define S3C_PA_SPI1 S3C64XX_PA_SPI1
#define SAMSUNG_PA_ADC S3C64XX_PA_ADC
#define SAMSUNG_PA_CFCON S3C64XX_PA_CFCON
diff --git a/arch/arm/mach-s3c64xx/include/mach/system.h b/arch/arm/mach-s3c64xx/include/mach/system.h
index 2e58cb7a714..353ed4389ae 100644
--- a/arch/arm/mach-s3c64xx/include/mach/system.h
+++ b/arch/arm/mach-s3c64xx/include/mach/system.h
@@ -11,20 +11,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H __FILE__
-#include <plat/watchdog-reset.h>
-
static void arch_idle(void)
{
/* nothing here yet */
}
-static void arch_reset(char mode, const char *cmd)
-{
- if (mode != 's')
- arch_wdt_reset();
-
- /* if all else fails, or mode was for soft, jump to 0 */
- cpu_reset(0);
-}
-
#endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
deleted file mode 100644
index 23f75e556a3..00000000000
--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-s3c64xx/include/mach/vmalloc.h
- *
- * from arch/arm/mach-iop3xx/include/mach/vmalloc.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/irq-eint.c b/arch/arm/mach-s3c64xx/irq-eint.c
deleted file mode 100644
index 4d203be1f4c..00000000000
--- a/arch/arm/mach-s3c64xx/irq-eint.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* arch/arm/plat-s3c64xx/irq-eint.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C64XX - Interrupt handling for IRQ_EINT(x)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/sysdev.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <plat/regs-irqtype.h>
-#include <mach/regs-gpio.h>
-#include <plat/gpio-cfg.h>
-
-#include <mach/map.h>
-#include <plat/cpu.h>
-#include <plat/pm.h>
-
-#define eint_offset(irq) ((irq) - IRQ_EINT(0))
-#define eint_irq_to_bit(irq) ((u32)(1 << eint_offset(irq)))
-
-static inline void s3c_irq_eint_mask(struct irq_data *data)
-{
- u32 mask;
-
- mask = __raw_readl(S3C64XX_EINT0MASK);
- mask |= (u32)data->chip_data;
- __raw_writel(mask, S3C64XX_EINT0MASK);
-}
-
-static void s3c_irq_eint_unmask(struct irq_data *data)
-{
- u32 mask;
-
- mask = __raw_readl(S3C64XX_EINT0MASK);
- mask &= ~((u32)data->chip_data);
- __raw_writel(mask, S3C64XX_EINT0MASK);
-}
-
-static inline void s3c_irq_eint_ack(struct irq_data *data)
-{
- __raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND);
-}
-
-static void s3c_irq_eint_maskack(struct irq_data *data)
-{
- /* compiler should in-line these */
- s3c_irq_eint_mask(data);
- s3c_irq_eint_ack(data);
-}
-
-static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
- int offs = eint_offset(data->irq);
- int pin, pin_val;
- int shift;
- u32 ctrl, mask;
- u32 newvalue = 0;
- void __iomem *reg;
-
- if (offs > 27)
- return -EINVAL;
-
- if (offs <= 15)
- reg = S3C64XX_EINT0CON0;
- else
- reg = S3C64XX_EINT0CON1;
-
- switch (type) {
- case IRQ_TYPE_NONE:
- printk(KERN_WARNING "No edge setting!\n");
- break;
-
- case IRQ_TYPE_EDGE_RISING:
- newvalue = S3C2410_EXTINT_RISEEDGE;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- newvalue = S3C2410_EXTINT_FALLEDGE;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- newvalue = S3C2410_EXTINT_BOTHEDGE;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- newvalue = S3C2410_EXTINT_LOWLEV;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- newvalue = S3C2410_EXTINT_HILEV;
- break;
-
- default:
- printk(KERN_ERR "No such irq type %d", type);
- return -1;
- }
-
- if (offs <= 15)
- shift = (offs / 2) * 4;
- else
- shift = ((offs - 16) / 2) * 4;
- mask = 0x7 << shift;
-
- ctrl = __raw_readl(reg);
- ctrl &= ~mask;
- ctrl |= newvalue << shift;
- __raw_writel(ctrl, reg);
-
- /* set the GPIO pin appropriately */
-
- if (offs < 16) {
- pin = S3C64XX_GPN(offs);
- pin_val = S3C_GPIO_SFN(2);
- } else if (offs < 23) {
- pin = S3C64XX_GPL(offs + 8 - 16);
- pin_val = S3C_GPIO_SFN(3);
- } else {
- pin = S3C64XX_GPM(offs - 23);
- pin_val = S3C_GPIO_SFN(3);
- }
-
- s3c_gpio_cfgpin(pin, pin_val);
-
- return 0;
-}
-
-static struct irq_chip s3c_irq_eint = {
- .name = "s3c-eint",
- .irq_mask = s3c_irq_eint_mask,
- .irq_unmask = s3c_irq_eint_unmask,
- .irq_mask_ack = s3c_irq_eint_maskack,
- .irq_ack = s3c_irq_eint_ack,
- .irq_set_type = s3c_irq_eint_set_type,
- .irq_set_wake = s3c_irqext_wake,
-};
-
-/* s3c_irq_demux_eint
- *
- * This function demuxes the IRQ from the group0 external interrupts,
- * from IRQ_EINT(0) to IRQ_EINT(27). It is designed to be inlined into
- * the specific handlers s3c_irq_demux_eintX_Y.
- */
-static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
-{
- u32 status = __raw_readl(S3C64XX_EINT0PEND);
- u32 mask = __raw_readl(S3C64XX_EINT0MASK);
- unsigned int irq;
-
- status &= ~mask;
- status >>= start;
- status &= (1 << (end - start + 1)) - 1;
-
- for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
- if (status & 1)
- generic_handle_irq(irq);
-
- status >>= 1;
- }
-}
-
-static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_eint(0, 3);
-}
-
-static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_eint(4, 11);
-}
-
-static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_eint(12, 19);
-}
-
-static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_eint(20, 27);
-}
-
-static int __init s3c64xx_init_irq_eint(void)
-{
- int irq;
-
- for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
- irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
- irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
- set_irq_flags(irq, IRQF_VALID);
- }
-
- irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
- irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
- irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
- irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
-
- return 0;
-}
-
-arch_initcall(s3c64xx_init_irq_eint);
diff --git a/arch/arm/mach-s3c64xx/irq.c b/arch/arm/mach-s3c64xx/irq.c
deleted file mode 100644
index b07357e9495..00000000000
--- a/arch/arm/mach-s3c64xx/irq.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/* arch/arm/plat-s3c64xx/irq.c
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C64XX - Interrupt handling
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/serial_core.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <mach/map.h>
-#include <plat/irq-vic-timer.h>
-#include <plat/irq-uart.h>
-#include <plat/cpu.h>
-
-/* setup the sources the vic should advertise resume for, even though it
- * is not doing the wake (set_irq_wake needs to be valid) */
-#define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE))
-#define IRQ_VIC1_RESUME (1 << (IRQ_RTC_ALARM - IRQ_VIC1_BASE) | \
- 1 << (IRQ_PENDN - IRQ_VIC1_BASE) | \
- 1 << (IRQ_HSMMC0 - IRQ_VIC1_BASE) | \
- 1 << (IRQ_HSMMC1 - IRQ_VIC1_BASE) | \
- 1 << (IRQ_HSMMC2 - IRQ_VIC1_BASE))
-
-void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
-{
- printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
-
- /* initialise the pair of VICs */
- vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, IRQ_VIC0_RESUME);
- vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, IRQ_VIC1_RESUME);
-
- /* add the timer sub-irqs */
- s3c_init_vic_timer_irq(5, IRQ_TIMER0);
-}
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index 8eba88e7209..b86f2779e4e 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -30,6 +30,7 @@
#include <video/platform_lcd.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -45,13 +46,14 @@
#include <plat/fb.h>
#include <plat/regs-fb-v4.h>
-#include <plat/s3c6410.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <mach/regs-gpio.h>
#include <mach/regs-modem.h>
+#include "common.h"
+
/* DM9000 */
#define ANW6410_PA_DM9000 (0x18000000)
@@ -236,7 +238,9 @@ MACHINE_START(ANW6410, "A&W6410")
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = anw6410_map_io,
.init_machine = anw6410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 66668565ee7..cd3c97e2ee7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -8,19 +8,49 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/gpio.h>
+#include <linux/mfd/wm8994/pdata.h>
+#include <sound/wm5100.h>
#include <sound/wm8996.h>
#include <sound/wm8962.h>
#include <sound/wm9081.h>
#include <mach/crag6410.h>
+static struct wm5100_pdata wm5100_pdata = {
+ .ldo_ena = S3C64XX_GPN(7),
+ .irq_flags = IRQF_TRIGGER_HIGH,
+ .gpio_base = CODEC_GPIO_BASE,
+
+ .in_mode = {
+ WM5100_IN_DIFF,
+ WM5100_IN_DIFF,
+ WM5100_IN_DIFF,
+ WM5100_IN_SE,
+ },
+
+ .hp_pol = CODEC_GPIO_BASE + 3,
+ .jack_modes = {
+ { WM5100_MICDET_MICBIAS3, 0, 0 },
+ { WM5100_MICDET_MICBIAS2, 1, 1 },
+ },
+
+ .gpio_defaults = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0x2, /* IRQ: CMOS output */
+ 0x3, /* CLKOUT: CMOS output */
+ },
+};
+
static struct wm8996_retune_mobile_config wm8996_retune[] = {
{
.name = "Sub LPF",
@@ -72,7 +102,6 @@ static struct wm8962_pdata wm8962_pdata __initdata = {
0x8000 | WM8962_GPIO_FN_DMICDAT,
WM8962_GPIO_FN_IRQ, /* Open drain mode */
},
- .irq_active_low = true,
};
static struct wm9081_pdata wm9081_pdata __initdata = {
@@ -91,6 +120,7 @@ static const struct i2c_board_info wm1254_devs[] = {
static const struct i2c_board_info wm1255_devs[] = {
{ I2C_BOARD_INFO("wm5100", 0x1a),
+ .platform_data = &wm5100_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),
@@ -104,6 +134,24 @@ static const struct i2c_board_info wm1259_devs[] = {
},
};
+static struct wm8994_pdata wm8994_pdata = {
+ .gpio_base = CODEC_GPIO_BASE,
+ .gpio_defaults = {
+ 0x3, /* IRQ out, active high, CMOS */
+ },
+ .irq_base = CODEC_IRQ_BASE,
+ .ldo = {
+ { .supply = "WALLVDD" },
+ { .supply = "WALLVDD" },
+ },
+};
+
+static const struct i2c_board_info wm1277_devs[] = {
+ { I2C_BOARD_INFO("wm8958", 0x1a), /* WM8958 is the superset */
+ .platform_data = &wm8994_pdata,
+ .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
+ },
+};
static __devinitdata const struct {
u8 id;
@@ -125,6 +173,8 @@ static __devinitdata const struct {
{ .id = 0x3b, .name = "1255-EV1 Kilchoman",
.i2c_devs = wm1255_devs, .num_i2c_devs = ARRAY_SIZE(wm1255_devs) },
{ .id = 0x3c, .name = "1273-EV1 Longmorn" },
+ { .id = 0x3d, .name = "1277-EV1 Littlemill",
+ .i2c_devs = wm1277_devs, .num_i2c_devs = ARRAY_SIZE(wm1277_devs) },
};
static __devinit int wlf_gf_module_probe(struct i2c_client *i2c,
@@ -154,8 +204,8 @@ static __devinit int wlf_gf_module_probe(struct i2c_client *i2c,
"Failed to register dev: %d\n", ret);
}
} else {
- dev_warn(&i2c->dev, "Unknown module ID %d revision %d\n",
- id, rev);
+ dev_warn(&i2c->dev, "Unknown module ID 0x%x revision %d\n",
+ id, rev + 1);
}
return 0;
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index d04b6544851..680fd758ff2 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -37,6 +37,9 @@
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/gpio.h>
+#include <sound/wm1250-ev1.h>
+
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -50,7 +53,6 @@
#include <mach/regs-gpio-memport.h>
-#include <plat/s3c6410.h>
#include <plat/regs-serial.h>
#include <plat/regs-fb-v4.h>
#include <plat/fb.h>
@@ -66,6 +68,8 @@
#include <plat/iic.h>
#include <plat/pm.h>
+#include "common.h"
+
/* serial port setup */
#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
@@ -287,6 +291,11 @@ static struct platform_device speyside_wm8962_device = {
.id = -1,
};
+static struct platform_device littlemill_device = {
+ .name = "littlemill",
+ .id = -1,
+};
+
static struct regulator_consumer_supply wallvdd_consumers[] = {
REGULATOR_SUPPLY("SPKVDD1", "1-001a"),
REGULATOR_SUPPLY("SPKVDD2", "1-001a"),
@@ -339,6 +348,7 @@ static struct platform_device *crag6410_devices[] __initdata = {
&crag6410_backlight_device,
&speyside_device,
&speyside_wm8962_device,
+ &littlemill_device,
&lowland_device,
&wallvdd_device,
};
@@ -372,6 +382,10 @@ static struct regulator_init_data vddarm __initdata = {
.driver_data = &vddarm_pdata,
};
+static struct regulator_consumer_supply vddint_consumers[] __initdata = {
+ REGULATOR_SUPPLY("vddint", NULL),
+};
+
static struct regulator_init_data vddint __initdata = {
.constraints = {
.name = "VDDINT",
@@ -380,6 +394,9 @@ static struct regulator_init_data vddint __initdata = {
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
},
+ .num_consumer_supplies = ARRAY_SIZE(vddint_consumers),
+ .consumer_supplies = vddint_consumers,
+ .supply_regulator = "WALLVDD",
};
static struct regulator_init_data vddmem __initdata = {
@@ -500,7 +517,8 @@ static struct wm831x_touch_pdata touch_pdata __initdata = {
static struct wm831x_pdata crag_pmic_pdata __initdata = {
.wm831x_num = 1,
.irq_base = BANFF_PMIC_IRQ_BASE,
- .gpio_base = GPIO_BOARD_START + 8,
+ .gpio_base = BANFF_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
.backup = &banff_backup_pdata,
@@ -605,6 +623,7 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
.wm831x_num = 2,
.irq_base = GLENFARCLAS_PMIC_IRQ_BASE,
.gpio_base = GLENFARCLAS_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
.gpio_defaults = {
/* GPIO1-3: IRQ inputs, rising edge triggered, CMOS */
@@ -622,6 +641,16 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
.disable_touch = true,
};
+static struct wm1250_ev1_pdata wm1250_ev1_pdata = {
+ .gpios = {
+ [WM1250_EV1_GPIO_CLK_ENA] = S3C64XX_GPN(12),
+ [WM1250_EV1_GPIO_CLK_SEL0] = S3C64XX_GPL(12),
+ [WM1250_EV1_GPIO_CLK_SEL1] = S3C64XX_GPL(13),
+ [WM1250_EV1_GPIO_OSR] = S3C64XX_GPL(14),
+ [WM1250_EV1_GPIO_MASTER] = S3C64XX_GPL(8),
+ },
+};
+
static struct i2c_board_info i2c_devs1[] __initdata = {
{ I2C_BOARD_INFO("wm8311", 0x34),
.irq = S3C_EINT(0),
@@ -631,7 +660,13 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
{ I2C_BOARD_INFO("wlf-gf-module", 0x25) },
{ I2C_BOARD_INFO("wlf-gf-module", 0x26) },
- { I2C_BOARD_INFO("wm1250-ev1", 0x27) },
+ { I2C_BOARD_INFO("wm1250-ev1", 0x27),
+ .platform_data = &wm1250_ev1_pdata },
+};
+
+static struct s3c2410_platform_i2c i2c1_pdata = {
+ .frequency = 400000,
+ .bus_num = 1,
};
static void __init crag6410_map_io(void)
@@ -692,7 +727,7 @@ static void __init crag6410_machine_init(void)
s3c_sdhci2_set_platdata(&crag6410_hsmmc2_pdata);
s3c_i2c0_set_platdata(&i2c0_pdata);
- s3c_i2c1_set_platdata(NULL);
+ s3c_i2c1_set_platdata(&i2c1_pdata);
s3c_fb_set_platdata(&crag6410_lcd_pdata);
i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0));
@@ -704,14 +739,16 @@ static void __init crag6410_machine_init(void)
regulator_has_full_constraints();
- s3c_pm_init();
+ s3c64xx_pm_init();
}
MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
/* Maintainer: Mark Brown <broonie@opensource.wolfsonmicro.com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = crag6410_map_io,
.init_machine = crag6410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 952f75ff5de..521e07b8501 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -29,6 +29,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
+#include <asm/hardware/vic.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
@@ -37,12 +38,13 @@
#include <plat/fb.h>
#include <plat/nand.h>
-#include <plat/s3c6410.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -267,7 +269,9 @@ MACHINE_START(HMT, "Airgoo-HMT")
/* Maintainer: Peter Korsgaard <jacmet@sunsite.dk> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = hmt_map_io,
.init_machine = hmt_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index 1bc85c35949..c34c2ab22ea 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -24,6 +24,7 @@
#include <linux/serial_core.h>
#include <linux/types.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -33,7 +34,6 @@
#include <mach/regs-modem.h>
#include <mach/regs-srom.h>
-#include <plat/s3c6410.h>
#include <plat/adc.h>
#include <plat/cpu.h>
#include <plat/devs.h>
@@ -45,6 +45,8 @@
#include <video/platform_lcd.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -345,7 +347,9 @@ MACHINE_START(MINI6410, "MINI6410")
/* Maintainer: Darius Augulis <augulis.darius@gmail.com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = mini6410_map_io,
.init_machine = mini6410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index cb13cba98b3..0efa2ba783b 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -25,6 +25,7 @@
#include <video/platform_lcd.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -39,12 +40,13 @@
#include <plat/iic.h>
#include <plat/fb.h>
-#include <plat/s3c6410.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -99,7 +101,9 @@ MACHINE_START(NCP, "NCP")
/* Maintainer: Samsung Electronics */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = ncp_map_io,
.init_machine = ncp_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 87281e4b847..be2a9a22ab7 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -25,6 +25,7 @@
#include <linux/serial_core.h>
#include <linux/types.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -34,7 +35,6 @@
#include <mach/regs-modem.h>
#include <mach/regs-srom.h>
-#include <plat/s3c6410.h>
#include <plat/adc.h>
#include <plat/cpu.h>
#include <plat/devs.h>
@@ -46,6 +46,8 @@
#include <video/platform_lcd.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
@@ -326,7 +328,9 @@ MACHINE_START(REAL6410, "REAL6410")
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = real6410_map_io,
.init_machine = real6410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index cb1ebeb0876..ce31db13623 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -40,6 +40,8 @@
#include <video/platform_lcd.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 94c831d8836..3f42431d4dd 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -17,19 +17,20 @@
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
-#include <plat/s3c6410.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
#include "mach-smartq.h"
static struct gpio_led smartq5_leds[] = {
@@ -148,7 +149,9 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
/* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq5_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index f112547ce80..e5c09b6db96 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -17,19 +17,20 @@
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/map.h>
#include <mach/regs-gpio.h>
-#include <plat/s3c6410.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
#include "mach-smartq.h"
static struct gpio_led smartq7_leds[] = {
@@ -164,7 +165,9 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
/* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq7_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 73450c2b530..5f096534f4c 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -22,6 +22,7 @@
#include <asm/mach-types.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -31,12 +32,13 @@
#include <plat/regs-serial.h>
-#include <plat/s3c6400.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/iic.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -88,7 +90,9 @@ MACHINE_START(SMDK6400, "SMDK6400")
.atag_offset = 0x100,
.init_irq = s3c6400_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdk6400_map_io,
.init_machine = smdk6400_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 8bc8edd85e5..ca6fc204f0e 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -43,6 +43,7 @@
#include <video/platform_lcd.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -63,7 +64,6 @@
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-#include <plat/s3c6410.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -73,6 +73,8 @@
#include <plat/backlight.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
@@ -700,7 +702,9 @@ MACHINE_START(SMDK6410, "SMDK6410")
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdk6410_map_io,
.init_machine = smdk6410_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index b375cd5c47c..055dac90e0e 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -17,10 +17,12 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/pm_domain.h>
#include <mach/map.h>
#include <mach/irqs.h>
+#include <plat/devs.h>
#include <plat/pm.h>
#include <plat/wakeup-mask.h>
@@ -31,6 +33,148 @@
#include <mach/regs-gpio-memport.h>
#include <mach/regs-modem.h>
+struct s3c64xx_pm_domain {
+ char *const name;
+ u32 ena;
+ u32 pwr_stat;
+ struct generic_pm_domain pd;
+};
+
+static int s3c64xx_pd_off(struct generic_pm_domain *domain)
+{
+ struct s3c64xx_pm_domain *pd;
+ u32 val;
+
+ pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val &= ~(pd->ena);
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
+ return 0;
+}
+
+static int s3c64xx_pd_on(struct generic_pm_domain *domain)
+{
+ struct s3c64xx_pm_domain *pd;
+ u32 val;
+ long retry = 1000000L;
+
+ pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val |= pd->ena;
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
+ /* Not all domains provide power status readback */
+ if (pd->pwr_stat) {
+ do {
+ cpu_relax();
+ if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat)
+ break;
+ } while (retry--);
+
+ if (!retry) {
+ pr_err("Failed to start domain %s\n", pd->name);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static struct s3c64xx_pm_domain s3c64xx_pm_irom = {
+ .name = "IROM",
+ .ena = S3C64XX_NORMALCFG_IROM_ON,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_etm = {
+ .name = "ETM",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_ETM,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_s = {
+ .name = "S",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_S_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_S,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_f = {
+ .name = "F",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_F_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_F,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_p = {
+ .name = "P",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_P_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_P,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_i = {
+ .name = "I",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_I_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_I,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_g = {
+ .name = "G",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_G_ON,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_v = {
+ .name = "V",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_V_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_V,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = {
+ &s3c64xx_pm_irom,
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = {
+ &s3c64xx_pm_etm,
+ &s3c64xx_pm_g,
+ &s3c64xx_pm_v,
+ &s3c64xx_pm_i,
+ &s3c64xx_pm_p,
+ &s3c64xx_pm_s,
+ &s3c64xx_pm_f,
+};
+
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
void s3c_pm_debug_smdkled(u32 set, u32 clear)
{
@@ -89,6 +233,8 @@ static struct sleep_save misc_save[] = {
SAVE_ITEM(S3C64XX_SDMA_SEL),
SAVE_ITEM(S3C64XX_MODEM_MIFPCON),
+
+ SAVE_ITEM(S3C64XX_NORMAL_CFG),
};
void s3c_pm_configure_extint(void)
@@ -179,12 +325,44 @@ static void s3c64xx_pm_prepare(void)
__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
}
-static int s3c64xx_pm_init(void)
+int __init s3c64xx_pm_init(void)
+{
+ int i;
+
+ s3c_pm_init();
+
+ for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++)
+ pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd,
+ &pm_domain_always_on_gov, false);
+
+ for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
+ pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
+
+ if (dev_get_platdata(&s3c_device_fb.dev))
+ pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
+
+ return 0;
+}
+
+static __init int s3c64xx_pm_initcall(void)
{
+ u32 val;
+
pm_cpu_prep = s3c64xx_pm_prepare;
pm_cpu_sleep = s3c64xx_cpu_suspend;
pm_uart_udivslot = 1;
+ /*
+ * Unconditionally disable power domains that contain only
+ * blocks which have no mainline driver support.
+ */
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val &= ~(S3C64XX_NORMALCFG_DOMAIN_G_ON |
+ S3C64XX_NORMALCFG_DOMAIN_V_ON |
+ S3C64XX_NORMALCFG_DOMAIN_I_ON |
+ S3C64XX_NORMALCFG_DOMAIN_P_ON);
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
@@ -198,5 +376,12 @@ static int s3c64xx_pm_init(void)
return 0;
}
+arch_initcall(s3c64xx_pm_initcall);
+
+static __init int s3c64xx_pm_late_initcall(void)
+{
+ pm_genpd_poweroff_unused();
-arch_initcall(s3c64xx_pm_init);
+ return 0;
+}
+late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 7a3bc32df42..4869714c6f1 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -17,7 +17,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
@@ -38,7 +38,8 @@
#include <plat/sdhci.h>
#include <plat/iic-core.h>
#include <plat/onenand-core.h>
-#include <plat/s3c6400.h>
+
+#include "common.h"
void __init s3c6400_map_io(void)
{
@@ -60,7 +61,7 @@ void __init s3c6400_map_io(void)
void __init s3c6400_init_clocks(int xtal)
{
s3c64xx_register_clocks(xtal, S3C6400_CLKDIV0_ARM_MASK);
- s3c6400_setup_clocks();
+ s3c64xx_setup_clocks();
}
void __init s3c6400_init_irq(void)
@@ -70,17 +71,18 @@ void __init s3c6400_init_irq(void)
s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
}
-struct sysdev_class s3c6400_sysclass = {
- .name = "s3c6400-core",
+static struct bus_type s3c6400_subsys = {
+ .name = "s3c6400-core",
+ .dev_name = "s3c6400-core",
};
-static struct sys_device s3c6400_sysdev = {
- .cls = &s3c6400_sysclass,
+static struct device s3c6400_dev = {
+ .bus = &s3c6400_subsys,
};
static int __init s3c6400_core_init(void)
{
- return sysdev_class_register(&s3c6400_sysclass);
+ return subsys_system_register(&s3c6400_subsys, NULL);
}
core_initcall(s3c6400_core_init);
@@ -89,5 +91,5 @@ int __init s3c6400_init(void)
{
printk("S3C6400: Initialising architecture\n");
- return sysdev_register(&s3c6400_sysdev);
+ return device_register(&s3c6400_dev);
}
diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c
index 4117003464a..31c29fdf180 100644
--- a/arch/arm/mach-s3c64xx/s3c6410.c
+++ b/arch/arm/mach-s3c64xx/s3c6410.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
@@ -41,8 +41,8 @@
#include <plat/adc-core.h>
#include <plat/iic-core.h>
#include <plat/onenand-core.h>
-#include <plat/s3c6400.h>
-#include <plat/s3c6410.h>
+
+#include "common.h"
void __init s3c6410_map_io(void)
{
@@ -66,7 +66,7 @@ void __init s3c6410_init_clocks(int xtal)
{
printk(KERN_DEBUG "%s: initialising clocks\n", __func__);
s3c64xx_register_clocks(xtal, S3C6410_CLKDIV0_ARM_MASK);
- s3c6400_setup_clocks();
+ s3c64xx_setup_clocks();
}
void __init s3c6410_init_irq(void)
@@ -75,17 +75,18 @@ void __init s3c6410_init_irq(void)
s3c64xx_init_irq(~0 & ~(1 << 7), ~0);
}
-struct sysdev_class s3c6410_sysclass = {
- .name = "s3c6410-core",
+struct bus_type s3c6410_subsys = {
+ .name = "s3c6410-core",
+ .dev_name = "s3c6410-core",
};
-static struct sys_device s3c6410_sysdev = {
- .cls = &s3c6410_sysclass,
+static struct device s3c6410_dev = {
+ .bus = &s3c6410_subsys,
};
static int __init s3c6410_core_init(void)
{
- return sysdev_class_register(&s3c6410_sysclass);
+ return subsys_system_register(&s3c6410_subsys, NULL);
}
core_initcall(s3c6410_core_init);
@@ -94,5 +95,5 @@ int __init s3c6410_init(void)
{
printk("S3C6410: Initialising architecture\n");
- return sysdev_register(&s3c6410_sysdev);
+ return device_register(&s3c6410_dev);
}
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 83d2afb79e9..2cf80026c58 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -20,7 +20,7 @@
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
{
s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c
deleted file mode 100644
index c75a71b2116..00000000000
--- a/arch/arm/mach-s3c64xx/setup-sdhci.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/setup-sdhci.c
- *
- * Copyright 2008 Simtec Electronics
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C6400/S3C6410 - Helper functions for settign up SDHCI device(s) (HSMMC)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-
-/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-
-char *s3c64xx_hsmmc_clksrcs[4] = {
- [0] = "hsmmc",
- [1] = "hsmmc",
- [2] = "mmc_bus",
- /* [3] = "48m", - note not successfully used yet */
-};
diff --git a/arch/arm/mach-s3c64xx/setup-spi.c b/arch/arm/mach-s3c64xx/setup-spi.c
new file mode 100644
index 00000000000..d9592ad7a82
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/setup-spi.c
@@ -0,0 +1,45 @@
+/* linux/arch/arm/mach-s3c64xx/setup-spi.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S3C64XX_GPC(0), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+};
+
+int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S3C64XX_GPC(4), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index 18690c5f99e..c87f6108eeb 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -36,6 +36,16 @@ config S5P64X0_SETUP_I2C1
help
Common setup code for i2c bus 1.
+config S5P64X0_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations
+
+config S5P64X0_SETUP_SDHCI_GPIO
+ bool
+ help
+ Common setup code for SDHCI gpio.
+
# machine support
config MACH_SMDK6440
@@ -45,13 +55,16 @@ config MACH_SMDK6440
select S3C_DEV_I2C1
select S3C_DEV_RTC
select S3C_DEV_WDT
- select S3C64XX_DEV_SPI
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
select SAMSUNG_DEV_ADC
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select SAMSUNG_DEV_TS
select S5P64X0_SETUP_FB_24BPP
select S5P64X0_SETUP_I2C1
+ select S5P64X0_SETUP_SDHCI_GPIO
help
Machine support for the Samsung SMDK6440
@@ -62,14 +75,28 @@ config MACH_SMDK6450
select S3C_DEV_I2C1
select S3C_DEV_RTC
select S3C_DEV_WDT
- select S3C64XX_DEV_SPI
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
select SAMSUNG_DEV_ADC
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select SAMSUNG_DEV_TS
select S5P64X0_SETUP_FB_24BPP
select S5P64X0_SETUP_I2C1
+ select S5P64X0_SETUP_SDHCI_GPIO
help
Machine support for the Samsung SMDK6450
+menu "Use 8-bit SDHCI bus width"
+
+config S5P64X0_SD_CH1_8BIT
+ bool "SDHCI Channel 1 (Slot 1)"
+ depends on MACH_SMDK6450 || MACH_SMDK6440
+ help
+ Support SDHCI Channel 1 8-bit bus.
+ If selected, Channel 2 is disabled.
+
+endmenu
+
endif
diff --git a/arch/arm/mach-s5p64x0/Makefile b/arch/arm/mach-s5p64x0/Makefile
index a1324d8dc4e..12bb951187a 100644
--- a/arch/arm/mach-s5p64x0/Makefile
+++ b/arch/arm/mach-s5p64x0/Makefile
@@ -10,14 +10,16 @@ obj-m :=
obj-n :=
obj- :=
-# Core support for S5P64X0 system
+# Core
-obj-$(CONFIG_ARCH_S5P64X0) += cpu.o init.o clock.o dma.o
-obj-$(CONFIG_ARCH_S5P64X0) += setup-i2c0.o irq-eint.o
+obj-y += common.o clock.o
obj-$(CONFIG_CPU_S5P6440) += clock-s5p6440.o
obj-$(CONFIG_CPU_S5P6450) += clock-s5p6450.o
+
obj-$(CONFIG_PM) += pm.o irq-pm.o
+obj-y += dma.o
+
# machine support
obj-$(CONFIG_MACH_SMDK6440) += mach-smdk6440.o
@@ -26,7 +28,9 @@ obj-$(CONFIG_MACH_SMDK6450) += mach-smdk6450.o
# device support
obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-y += setup-i2c0.o
obj-$(CONFIG_S5P64X0_SETUP_I2C1) += setup-i2c1.o
obj-$(CONFIG_S5P64X0_SETUP_FB_24BPP) += setup-fb-24bpp.o
+obj-$(CONFIG_S5P64X0_SETUP_SPI) += setup-spi.o
+obj-$(CONFIG_S5P64X0_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index c54c65d511f..ee1e8e7f563 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -31,7 +31,8 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/s5p6440.h>
+
+#include "common.h"
static u32 epll_div[][5] = {
{ 36000000, 0, 48, 1, 4 },
@@ -268,18 +269,6 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 31),
}, {
- .name = "sclk_spi_48",
- .devname = "s3c64xx-spi.0",
- .parent = &clk_48m,
- .enable = s5p64x0_sclk_ctrl,
- .ctrlbit = (1 << 22),
- }, {
- .name = "sclk_spi_48",
- .devname = "s3c64xx-spi.1",
- .parent = &clk_48m,
- .enable = s5p64x0_sclk_ctrl,
- .ctrlbit = (1 << 23),
- }, {
.name = "mmc_48m",
.devname = "s3c-sdhci.0",
.parent = &clk_48m,
@@ -391,65 +380,6 @@ static struct clksrc_sources clkset_audio = {
static struct clksrc_clk clksrcs[] = {
{
.clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.0",
- .ctrlbit = (1 << 24),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group1,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 18, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.1",
- .ctrlbit = (1 << 25),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group1,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 20, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.2",
- .ctrlbit = (1 << 26),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group1,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 22, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 8, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .ctrlbit = (1 << 5),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 13, .size = 1 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
- .ctrlbit = (1 << 20),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group1,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 14, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
- .ctrlbit = (1 << 21),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group1,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 16, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
- }, {
- .clk = {
.name = "sclk_post",
.ctrlbit = (1 << 10),
.enable = s5p64x0_sclk_ctrl,
@@ -487,6 +417,77 @@ static struct clksrc_clk clksrcs[] = {
},
};
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.0",
+ .ctrlbit = (1 << 24),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 18, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.1",
+ .ctrlbit = (1 << 25),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.2",
+ .ctrlbit = (1 << 26),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 22, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uclk = {
+ .clk = {
+ .name = "uclk1",
+ .ctrlbit = (1 << 5),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 13, .size = 1 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.0",
+ .ctrlbit = (1 << 20),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 14, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.1",
+ .ctrlbit = (1 << 21),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
+};
+
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
@@ -505,6 +506,26 @@ static struct clk dummy_apb_pclk = {
.id = -1,
};
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uclk,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2
+};
+
+static struct clk_lookup s5p6440_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_pclk_low.clk),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
+ CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+};
+
void __init_or_cpufreq s5p6440_setup_clocks(void)
{
struct clk *xtal_clk;
@@ -583,9 +604,12 @@ void __init s5p6440_register_clocks(void)
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
+ s3c_register_clksrc(clksrc_cdev[ptr], 1);
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s5p6440_clk_lookup, ARRAY_SIZE(s5p6440_clk_lookup));
s3c24xx_register_clock(&dummy_apb_pclk);
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 2d04abfba12..dae6a13f43b 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -31,7 +31,8 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/s5p6450.h>
+
+#include "common.h"
static struct clksrc_clk clk_mout_dpll = {
.clk = {
@@ -413,65 +414,6 @@ static struct clksrc_clk clk_sclk_audio0 = {
static struct clksrc_clk clksrcs[] = {
{
.clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.0",
- .ctrlbit = (1 << 24),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 18, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.1",
- .ctrlbit = (1 << 25),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 20, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.2",
- .ctrlbit = (1 << 26),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 22, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 8, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .ctrlbit = (1 << 5),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 13, .size = 1 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
- .ctrlbit = (1 << 20),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 14, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
- .ctrlbit = (1 << 21),
- .enable = s5p64x0_sclk_ctrl,
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 16, .size = 2 },
- .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
- }, {
- .clk = {
.name = "sclk_fimc",
.ctrlbit = (1 << 10),
.enable = s5p64x0_sclk_ctrl,
@@ -536,6 +478,97 @@ static struct clksrc_clk clksrcs[] = {
},
};
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.0",
+ .ctrlbit = (1 << 24),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 18, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.1",
+ .ctrlbit = (1 << 25),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.2",
+ .ctrlbit = (1 << 26),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 22, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uclk = {
+ .clk = {
+ .name = "uclk1",
+ .ctrlbit = (1 << 5),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 13, .size = 1 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.0",
+ .ctrlbit = (1 << 20),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 14, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.1",
+ .ctrlbit = (1 << 21),
+ .enable = s5p64x0_sclk_ctrl,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S5P64X0_CLK_DIV2, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uclk,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2,
+};
+
+static struct clk_lookup s5p6450_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_pclk_low.clk),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
+ CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+};
+
/* Clock initialization code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
@@ -634,9 +667,12 @@ void __init s5p6450_register_clocks(void)
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
+ s3c_register_clksrc(clksrc_cdev[ptr], 1);
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s5p6450_clk_lookup, ARRAY_SIZE(s5p6450_clk_lookup));
s3c24xx_register_clock(&dummy_apb_pclk);
diff --git a/arch/arm/mach-s5p64x0/clock.c b/arch/arm/mach-s5p64x0/clock.c
index b52c6e2f37a..241d0e645c8 100644
--- a/arch/arm/mach-s5p64x0/clock.c
+++ b/arch/arm/mach-s5p64x0/clock.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -30,8 +30,8 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
+
+#include "common.h"
struct clksrc_clk clk_mout_apll = {
.clk = {
diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c
new file mode 100644
index 00000000000..52b89a37644
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/common.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Codes for S5P64X0 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+
+#include <asm/irq.h>
+#include <asm/proc-fns.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/map.h>
+#include <mach/hardware.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/pm.h>
+#include <plat/sdhci.h>
+#include <plat/adc-core.h>
+#include <plat/fb-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/regs-irqtype.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
+
+#include "common.h"
+
+static const char name_s5p6440[] = "S5P6440";
+static const char name_s5p6450[] = "S5P6450";
+
+static struct cpu_table cpu_ids[] __initdata = {
+ {
+ .idcode = S5P6440_CPU_ID,
+ .idmask = S5P64XX_CPU_MASK,
+ .map_io = s5p6440_map_io,
+ .init_clocks = s5p6440_init_clocks,
+ .init_uarts = s5p6440_init_uarts,
+ .init = s5p64x0_init,
+ .name = name_s5p6440,
+ }, {
+ .idcode = S5P6450_CPU_ID,
+ .idmask = S5P64XX_CPU_MASK,
+ .map_io = s5p6450_map_io,
+ .init_clocks = s5p6450_init_clocks,
+ .init_uarts = s5p6450_init_uarts,
+ .init = s5p64x0_init,
+ .name = name_s5p6450,
+ },
+};
+
+/* Initial IO mappings */
+
+static struct map_desc s5p64x0_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_CHIPID,
+ .pfn = __phys_to_pfn(S5P64X0_PA_CHIPID),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(S5P64X0_PA_SYSCON),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_TIMER,
+ .pfn = __phys_to_pfn(S5P64X0_PA_TIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_WATCHDOG,
+ .pfn = __phys_to_pfn(S5P64X0_PA_WDT),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SROMC,
+ .pfn = __phys_to_pfn(S5P64X0_PA_SROMC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_GPIO,
+ .pfn = __phys_to_pfn(S5P64X0_PA_GPIO),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)VA_VIC0,
+ .pfn = __phys_to_pfn(S5P64X0_PA_VIC0),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)VA_VIC1,
+ .pfn = __phys_to_pfn(S5P64X0_PA_VIC1),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct map_desc s5p6440_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S3C_VA_UART,
+ .pfn = __phys_to_pfn(S5P6440_PA_UART(0)),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static struct map_desc s5p6450_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S3C_VA_UART,
+ .pfn = __phys_to_pfn(S5P6450_PA_UART(0)),
+ .length = SZ_512K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_UART + SZ_512K,
+ .pfn = __phys_to_pfn(S5P6450_PA_UART(5)),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void s5p64x0_idle(void)
+{
+ unsigned long val;
+
+ if (!need_resched()) {
+ val = __raw_readl(S5P64X0_PWR_CFG);
+ val &= ~(0x3 << 5);
+ val |= (0x1 << 5);
+ __raw_writel(val, S5P64X0_PWR_CFG);
+
+ cpu_do_idle();
+ }
+ local_irq_enable();
+}
+
+/*
+ * s5p64x0_map_io
+ *
+ * register the standard CPU IO areas
+ */
+
+void __init s5p64x0_init_io(struct map_desc *mach_desc, int size)
+{
+ /* initialize the io descriptors we need for initialization */
+ iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
+ if (mach_desc)
+ iotable_init(mach_desc, size);
+
+ /* detect cpu id and rev. */
+ s5p_init_cpu(S5P64X0_SYS_ID);
+
+ s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init s5p6440_map_io(void)
+{
+ /* initialize any device information early */
+ s3c_adc_setname("s3c64xx-adc");
+ s3c_fb_setname("s5p64x0-fb");
+
+ s5p64x0_default_sdhci0();
+ s5p64x0_default_sdhci1();
+ s5p6440_default_sdhci2();
+
+ iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc));
+ init_consistent_dma_size(SZ_8M);
+}
+
+void __init s5p6450_map_io(void)
+{
+ /* initialize any device information early */
+ s3c_adc_setname("s3c64xx-adc");
+ s3c_fb_setname("s5p64x0-fb");
+
+ s5p64x0_default_sdhci0();
+ s5p64x0_default_sdhci1();
+ s5p6450_default_sdhci2();
+
+ iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
+ init_consistent_dma_size(SZ_8M);
+}
+
+/*
+ * s5p64x0_init_clocks
+ *
+ * register and setup the CPU clocks
+ */
+
+void __init s5p6440_init_clocks(int xtal)
+{
+ printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+ s3c24xx_register_baseclocks(xtal);
+ s5p_register_clocks(xtal);
+ s5p6440_register_clocks();
+ s5p6440_setup_clocks();
+}
+
+void __init s5p6450_init_clocks(int xtal)
+{
+ printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
+ s3c24xx_register_baseclocks(xtal);
+ s5p_register_clocks(xtal);
+ s5p6450_register_clocks();
+ s5p6450_setup_clocks();
+}
+
+/*
+ * s5p64x0_init_irq
+ *
+ * register the CPU interrupts
+ */
+
+void __init s5p6440_init_irq(void)
+{
+ /* S5P6440 supports 2 VIC */
+ u32 vic[2];
+
+ /*
+ * VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)]
+ * VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22]
+ */
+ vic[0] = 0xff800ae7;
+ vic[1] = 0xffbf23e5;
+
+ s5p_init_irq(vic, ARRAY_SIZE(vic));
+}
+
+void __init s5p6450_init_irq(void)
+{
+ /* S5P6450 supports only 2 VIC */
+ u32 vic[2];
+
+ /*
+ * VIC0 is missing IRQ_VIC0[(13-15), (21-22)]
+ * VIC1 is missing IRQ VIC1[12, 14, 23]
+ */
+ vic[0] = 0xff9f1fff;
+ vic[1] = 0xff7fafff;
+
+ s5p_init_irq(vic, ARRAY_SIZE(vic));
+}
+
+struct bus_type s5p64x0_subsys = {
+ .name = "s5p64x0-core",
+ .dev_name = "s5p64x0-core",
+};
+
+static struct device s5p64x0_dev = {
+ .bus = &s5p64x0_subsys,
+};
+
+static int __init s5p64x0_core_init(void)
+{
+ return subsys_system_register(&s5p64x0_subsys, NULL);
+}
+core_initcall(s5p64x0_core_init);
+
+int __init s5p64x0_init(void)
+{
+ printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n");
+
+ /* set idle function */
+ pm_idle = s5p64x0_idle;
+
+ return device_register(&s5p64x0_dev);
+}
+
+/* uart registration process */
+void __init s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ int uart;
+
+ for (uart = 0; uart < no; uart++) {
+ s5p_uart_resources[uart].resources->start = S5P6440_PA_UART(uart);
+ s5p_uart_resources[uart].resources->end = S5P6440_PA_UART(uart) + S5P_SZ_UART;
+ }
+
+ s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+void __init s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+#define eint_offset(irq) ((irq) - IRQ_EINT(0))
+
+static int s5p64x0_irq_eint_set_type(struct irq_data *data, unsigned int type)
+{
+ int offs = eint_offset(data->irq);
+ int shift;
+ u32 ctrl, mask;
+ u32 newvalue = 0;
+
+ if (offs > 15)
+ return -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ printk(KERN_WARNING "No edge setting!\n");
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S3C2410_EXTINT_RISEEDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S3C2410_EXTINT_FALLEDGE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S3C2410_EXTINT_BOTHEDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S3C2410_EXTINT_LOWLEV;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S3C2410_EXTINT_HILEV;
+ break;
+ default:
+ printk(KERN_ERR "No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ shift = (offs / 2) * 4;
+ mask = 0x7 << shift;
+
+ ctrl = __raw_readl(S5P64X0_EINT0CON0) & ~mask;
+ ctrl |= newvalue << shift;
+ __raw_writel(ctrl, S5P64X0_EINT0CON0);
+
+ /* Configure the GPIO pin for 6450 or 6440 based on CPU ID */
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgpin(S5P6450_GPN(offs), S3C_GPIO_SFN(2));
+ else
+ s3c_gpio_cfgpin(S5P6440_GPN(offs), S3C_GPIO_SFN(2));
+
+ return 0;
+}
+
+/*
+ * s5p64x0_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from IRQ_EINT(0) to IRQ_EINT(15). It is designed to be inlined into
+ * the specific handlers s5p64x0_irq_demux_eintX_Y.
+ */
+static inline void s5p64x0_irq_demux_eint(unsigned int start, unsigned int end)
+{
+ u32 status = __raw_readl(S5P64X0_EINT0PEND);
+ u32 mask = __raw_readl(S5P64X0_EINT0MASK);
+ unsigned int irq;
+
+ status &= ~mask;
+ status >>= start;
+ status &= (1 << (end - start + 1)) - 1;
+
+ for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
+ if (status & 1)
+ generic_handle_irq(irq);
+ status >>= 1;
+ }
+}
+
+static void s5p64x0_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+{
+ s5p64x0_irq_demux_eint(0, 3);
+}
+
+static void s5p64x0_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+{
+ s5p64x0_irq_demux_eint(4, 11);
+}
+
+static void s5p64x0_irq_demux_eint12_15(unsigned int irq,
+ struct irq_desc *desc)
+{
+ s5p64x0_irq_demux_eint(12, 15);
+}
+
+static int s5p64x0_alloc_gc(void)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("s5p64x0-eint", 1, S5P_IRQ_EINT_BASE,
+ S5P_VA_GPIO, handle_level_irq);
+ if (!gc) {
+ printk(KERN_ERR "%s: irq_alloc_generic_chip for group 0"
+ "external interrupts failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
+ ct->chip.irq_set_wake = s3c_irqext_wake;
+ ct->regs.ack = EINT0PEND_OFFSET;
+ ct->regs.mask = EINT0MASK_OFFSET;
+ irq_setup_generic_chip(gc, IRQ_MSK(16), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ return 0;
+}
+
+static int __init s5p64x0_init_irq_eint(void)
+{
+ int ret = s5p64x0_alloc_gc();
+ irq_set_chained_handler(IRQ_EINT0_3, s5p64x0_irq_demux_eint0_3);
+ irq_set_chained_handler(IRQ_EINT4_11, s5p64x0_irq_demux_eint4_11);
+ irq_set_chained_handler(IRQ_EINT12_15, s5p64x0_irq_demux_eint12_15);
+
+ return ret;
+}
+arch_initcall(s5p64x0_init_irq_eint);
+
+void s5p64x0_restart(char mode, const char *cmd)
+{
+ if (mode != 's')
+ arch_wdt_reset();
+
+ soft_restart(0);
+}
diff --git a/arch/arm/mach-s5p64x0/common.h b/arch/arm/mach-s5p64x0/common.h
new file mode 100644
index 00000000000..f8a60fdc588
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/common.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for S5P64X0 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H
+#define __ARCH_ARM_MACH_S5P64X0_COMMON_H
+
+void s5p6440_init_irq(void);
+void s5p6450_init_irq(void);
+void s5p64x0_init_io(struct map_desc *mach_desc, int size);
+
+void s5p6440_register_clocks(void);
+void s5p6440_setup_clocks(void);
+
+void s5p6450_register_clocks(void);
+void s5p6450_setup_clocks(void);
+
+void s5p64x0_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5P6440
+
+extern int s5p64x0_init(void);
+extern void s5p6440_map_io(void);
+extern void s5p6440_init_clocks(int xtal);
+
+extern void s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5p6440_init_clocks NULL
+#define s5p6440_init_uarts NULL
+#define s5p6440_map_io NULL
+#define s5p64x0_init NULL
+#endif
+
+#ifdef CONFIG_CPU_S5P6450
+
+extern int s5p64x0_init(void);
+extern void s5p6450_map_io(void);
+extern void s5p6450_init_clocks(int xtal);
+
+extern void s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5p6450_init_clocks NULL
+#define s5p6450_init_uarts NULL
+#define s5p6450_map_io NULL
+#define s5p64x0_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5P64X0_COMMON_H */
diff --git a/arch/arm/mach-s5p64x0/cpu.c b/arch/arm/mach-s5p64x0/cpu.c
deleted file mode 100644
index ecab40cf19a..00000000000
--- a/arch/arm/mach-s5p64x0/cpu.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/cpu.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <asm/proc-fns.h>
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/regs-serial.h>
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/adc-core.h>
-#include <plat/fb-core.h>
-
-/* Initial IO mappings */
-
-static struct map_desc s5p64x0_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_GPIO,
- .pfn = __phys_to_pfn(S5P64X0_PA_GPIO),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC0,
- .pfn = __phys_to_pfn(S5P64X0_PA_VIC0),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)VA_VIC1,
- .pfn = __phys_to_pfn(S5P64X0_PA_VIC1),
- .length = SZ_16K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc s5p6440_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(S5P6440_PA_UART(0)),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc s5p6450_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(S5P6450_PA_UART(0)),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART + SZ_512K,
- .pfn = __phys_to_pfn(S5P6450_PA_UART(5)),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-static void s5p64x0_idle(void)
-{
- unsigned long val;
-
- if (!need_resched()) {
- val = __raw_readl(S5P64X0_PWR_CFG);
- val &= ~(0x3 << 5);
- val |= (0x1 << 5);
- __raw_writel(val, S5P64X0_PWR_CFG);
-
- cpu_do_idle();
- }
- local_irq_enable();
-}
-
-/*
- * s5p64x0_map_io
- *
- * register the standard CPU IO areas
- */
-
-void __init s5p6440_map_io(void)
-{
- /* initialize any device information early */
- s3c_adc_setname("s3c64xx-adc");
- s3c_fb_setname("s5p64x0-fb");
-
- iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
- iotable_init(s5p6440_iodesc, ARRAY_SIZE(s5p6440_iodesc));
- init_consistent_dma_size(SZ_8M);
-}
-
-void __init s5p6450_map_io(void)
-{
- /* initialize any device information early */
- s3c_adc_setname("s3c64xx-adc");
- s3c_fb_setname("s5p64x0-fb");
-
- iotable_init(s5p64x0_iodesc, ARRAY_SIZE(s5p64x0_iodesc));
- iotable_init(s5p6450_iodesc, ARRAY_SIZE(s5p6450_iodesc));
- init_consistent_dma_size(SZ_8M);
-}
-
-/*
- * s5p64x0_init_clocks
- *
- * register and setup the CPU clocks
- */
-
-void __init s5p6440_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
- s3c24xx_register_baseclocks(xtal);
- s5p_register_clocks(xtal);
- s5p6440_register_clocks();
- s5p6440_setup_clocks();
-}
-
-void __init s5p6450_init_clocks(int xtal)
-{
- printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
-
- s3c24xx_register_baseclocks(xtal);
- s5p_register_clocks(xtal);
- s5p6450_register_clocks();
- s5p6450_setup_clocks();
-}
-
-/*
- * s5p64x0_init_irq
- *
- * register the CPU interrupts
- */
-
-void __init s5p6440_init_irq(void)
-{
- /* S5P6440 supports 2 VIC */
- u32 vic[2];
-
- /*
- * VIC0 is missing IRQ_VIC0[3, 4, 8, 10, (12-22)]
- * VIC1 is missing IRQ VIC1[1, 3, 4, 10, 11, 12, 14, 15, 22]
- */
- vic[0] = 0xff800ae7;
- vic[1] = 0xffbf23e5;
-
- s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-void __init s5p6450_init_irq(void)
-{
- /* S5P6450 supports only 2 VIC */
- u32 vic[2];
-
- /*
- * VIC0 is missing IRQ_VIC0[(13-15), (21-22)]
- * VIC1 is missing IRQ VIC1[12, 14, 23]
- */
- vic[0] = 0xff9f1fff;
- vic[1] = 0xff7fafff;
-
- s5p_init_irq(vic, ARRAY_SIZE(vic));
-}
-
-struct sysdev_class s5p64x0_sysclass = {
- .name = "s5p64x0-core",
-};
-
-static struct sys_device s5p64x0_sysdev = {
- .cls = &s5p64x0_sysclass,
-};
-
-static int __init s5p64x0_core_init(void)
-{
- return sysdev_class_register(&s5p64x0_sysclass);
-}
-core_initcall(s5p64x0_core_init);
-
-int __init s5p64x0_init(void)
-{
- printk(KERN_INFO "S5P64X0(S5P6440/S5P6450): Initializing architecture\n");
-
- /* set idle function */
- pm_idle = s5p64x0_idle;
-
- return sysdev_register(&s5p64x0_sysdev);
-}
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c
deleted file mode 100644
index 1fd9c79c7db..00000000000
--- a/arch/arm/mach-s5p64x0/dev-spi.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/dev-spi.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/irqs.h>
-#include <mach/regs-clock.h>
-#include <mach/spi-clocks.h>
-
-#include <plat/cpu.h>
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-
-static char *s5p64x0_spi_src_clks[] = {
- [S5P64X0_SPI_SRCCLK_PCLK] = "pclk",
- [S5P64X0_SPI_SRCCLK_SCLK] = "sclk_spi",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the CS.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s5p6440_spi_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S5P6440_GPC(0);
- break;
-
- case 1:
- base = S5P6440_GPC(4);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgall_range(base, 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-
- return 0;
-}
-
-static int s5p6450_spi_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S5P6450_GPC(0);
- break;
-
- case 1:
- base = S5P6450_GPC(4);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgall_range(base, 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-
- return 0;
-}
-
-static struct resource s5p64x0_spi0_resource[] = {
- [0] = {
- .start = S5P64X0_PA_SPI0,
- .end = S5P64X0_PA_SPI0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
- .cfg_gpio = s5p6440_spi_cfg_gpio,
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-static struct s3c64xx_spi_info s5p6450_spi0_pdata = {
- .cfg_gpio = s5p6450_spi_cfg_gpio,
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s5p64x0_device_spi0 = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5p64x0_spi0_resource),
- .resource = s5p64x0_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static struct resource s5p64x0_spi1_resource[] = {
- [0] = {
- .start = S5P64X0_PA_SPI1,
- .end = S5P64X0_PA_SPI1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI1_TX,
- .end = DMACH_SPI1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI1_RX,
- .end = DMACH_SPI1_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI1,
- .end = IRQ_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
- .cfg_gpio = s5p6440_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-static struct s3c64xx_spi_info s5p6450_spi1_pdata = {
- .cfg_gpio = s5p6450_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-struct platform_device s5p64x0_device_spi1 = {
- .name = "s3c64xx-spi",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5p64x0_spi1_resource),
- .resource = s5p64x0_spi1_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-void __init s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S5P64X0_SPI_SRCCLK_SCLK) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- if (soc_is_s5p6450())
- pd = &s5p6450_spi0_pdata;
- else
- pd = &s5p6440_spi0_pdata;
-
- s5p64x0_device_spi0.dev.platform_data = pd;
- break;
- case 1:
- if (soc_is_s5p6450())
- pd = &s5p6450_spi1_pdata;
- else
- pd = &s5p6440_spi1_pdata;
-
- s5p64x0_device_spi1.dev.platform_data = pd;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = s5p64x0_spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c
index 442dd4ad12d..f820c074440 100644
--- a/arch/arm/mach-s5p64x0/dma.c
+++ b/arch/arm/mach-s5p64x0/dma.c
@@ -38,176 +38,74 @@
static u64 dma_dmamask = DMA_BIT_MASK(32);
-struct dma_pl330_peri s5p6440_pdma_peri[22] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = DMACH_MAX,
- }, {
- .peri_id = DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_PCM0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- },
+u8 s5p6440_pdma_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_PCM0_TX,
+ DMACH_PCM0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI0_RX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_SPI1_TX,
+ DMACH_SPI1_RX,
};
struct dma_pl330_platdata s5p6440_pdma_pdata = {
.nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri),
- .peri = s5p6440_pdma_peri,
+ .peri_id = s5p6440_pdma_peri,
};
-struct dma_pl330_peri s5p6450_pdma_peri[32] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART4_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART4_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_USI_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_USI_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PWM,
- }, {
- .peri_id = (u8)DMACH_UART5_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART5_TX,
- .rqtype = MEMTODEV,
- },
+u8 s5p6450_pdma_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_UART4_RX,
+ DMACH_UART4_TX,
+ DMACH_PCM0_TX,
+ DMACH_PCM0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI0_RX,
+ DMACH_PCM1_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM2_TX,
+ DMACH_PCM2_RX,
+ DMACH_SPI1_TX,
+ DMACH_SPI1_RX,
+ DMACH_USI_TX,
+ DMACH_USI_RX,
+ DMACH_MAX,
+ DMACH_I2S1_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S2_TX,
+ DMACH_I2S2_RX,
+ DMACH_PWM,
+ DMACH_UART5_RX,
+ DMACH_UART5_TX,
};
struct dma_pl330_platdata s5p6450_pdma_pdata = {
.nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri),
- .peri = s5p6450_pdma_peri,
+ .peri_id = s5p6450_pdma_peri,
};
struct amba_device s5p64x0_device_pdma = {
@@ -227,10 +125,15 @@ struct amba_device s5p64x0_device_pdma = {
static int __init s5p64x0_dma_init(void)
{
- if (soc_is_s5p6450())
+ if (soc_is_s5p6450()) {
+ dma_cap_set(DMA_SLAVE, s5p6450_pdma_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5p6450_pdma_pdata.cap_mask);
s5p64x0_device_pdma.dev.platform_data = &s5p6450_pdma_pdata;
- else
+ } else {
+ dma_cap_set(DMA_SLAVE, s5p6440_pdma_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5p6440_pdma_pdata.cap_mask);
s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata;
+ }
amba_device_register(&s5p64x0_device_pdma, &iomem_resource);
diff --git a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
index 10b62b4f821..fbb246d0a3d 100644
--- a/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5p64x0/include/mach/entry-macro.S
@@ -10,7 +10,8 @@
* published by the Free Software Foundation.
*/
-#include <mach/map.h>
-#include <plat/irqs.h>
+ .macro disable_fiq
+ .endm
-#include <asm/entry-macro-vic2.S>
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
diff --git a/arch/arm/mach-s5p64x0/include/mach/irqs.h b/arch/arm/mach-s5p64x0/include/mach/irqs.h
index 53982db9d25..5b845e849b3 100644
--- a/arch/arm/mach-s5p64x0/include/mach/irqs.h
+++ b/arch/arm/mach-s5p64x0/include/mach/irqs.h
@@ -141,6 +141,8 @@
#define IRQ_EINT_GROUP(grp, x) (IRQ_EINT_GROUP##grp##_BASE + (x))
+#define IRQ_TIMER_BASE (11)
+
/* Set the default NR_IRQS */
#define NR_IRQS (IRQ_EINT_GROUP8_BASE + IRQ_EINT_GROUP8_NR + 1)
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index 4d3ac8a3709..0c0175dbfa3 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -67,6 +67,8 @@
#define S3C_PA_RTC S5P64X0_PA_RTC
#define S3C_PA_WDT S5P64X0_PA_WDT
#define S3C_PA_FB S5P64X0_PA_FB
+#define S3C_PA_SPI0 S5P64X0_PA_SPI0
+#define S3C_PA_SPI1 S5P64X0_PA_SPI1
#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
#define S5P_PA_SROMC S5P64X0_PA_SROMC
diff --git a/arch/arm/mach-s5p64x0/include/mach/system.h b/arch/arm/mach-s5p64x0/include/mach/system.h
index 60f57532c97..cf26e0954a2 100644
--- a/arch/arm/mach-s5p64x0/include/mach/system.h
+++ b/arch/arm/mach-s5p64x0/include/mach/system.h
@@ -13,8 +13,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H __FILE__
-#include <plat/system-reset.h>
-
static void arch_idle(void)
{
/* nothing here yet */
diff --git a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h b/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
deleted file mode 100644
index 38dcc71a03c..00000000000
--- a/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/vmalloc.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p64x0/init.c b/arch/arm/mach-s5p64x0/init.c
deleted file mode 100644
index 79833caf816..00000000000
--- a/arch/arm/mach-s5p64x0/init.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/init.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * S5P64X0 - Init support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <mach/map.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5p64x0_serial_clocks[] = {
- [0] = {
- .name = "pclk_low",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
- [1] = {
- .name = "uclk1",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
-/* uart registration process */
-
-void __init s5p64x0_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- struct s3c2410_uartcfg *tcfg = cfg;
- u32 ucnt;
-
- for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
- if (!tcfg->clocks) {
- tcfg->clocks = s5p64x0_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(s5p64x0_serial_clocks);
- }
- }
-}
-
-void __init s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- int uart;
-
- for (uart = 0; uart < no; uart++) {
- s5p_uart_resources[uart].resources->start = S5P6440_PA_UART(uart);
- s5p_uart_resources[uart].resources->end = S5P6440_PA_UART(uart) + S5P_SZ_UART;
- }
-
- s5p64x0_common_init_uarts(cfg, no);
- s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
-
-void __init s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- s5p64x0_common_init_uarts(cfg, no);
- s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5p64x0/irq-eint.c b/arch/arm/mach-s5p64x0/irq-eint.c
deleted file mode 100644
index 275dc74f4a7..00000000000
--- a/arch/arm/mach-s5p64x0/irq-eint.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* arch/arm/mach-s5p64x0/irq-eint.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com/
- *
- * Based on linux/arch/arm/mach-s3c64xx/irq-eint.c
- *
- * S5P64X0 - Interrupt handling for External Interrupts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <plat/cpu.h>
-#include <plat/regs-irqtype.h>
-#include <plat/gpio-cfg.h>
-#include <plat/pm.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/regs-clock.h>
-
-#define eint_offset(irq) ((irq) - IRQ_EINT(0))
-
-static int s5p64x0_irq_eint_set_type(struct irq_data *data, unsigned int type)
-{
- int offs = eint_offset(data->irq);
- int shift;
- u32 ctrl, mask;
- u32 newvalue = 0;
-
- if (offs > 15)
- return -EINVAL;
-
- switch (type) {
- case IRQ_TYPE_NONE:
- printk(KERN_WARNING "No edge setting!\n");
- break;
- case IRQ_TYPE_EDGE_RISING:
- newvalue = S3C2410_EXTINT_RISEEDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- newvalue = S3C2410_EXTINT_FALLEDGE;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- newvalue = S3C2410_EXTINT_BOTHEDGE;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- newvalue = S3C2410_EXTINT_LOWLEV;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- newvalue = S3C2410_EXTINT_HILEV;
- break;
- default:
- printk(KERN_ERR "No such irq type %d", type);
- return -EINVAL;
- }
-
- shift = (offs / 2) * 4;
- mask = 0x7 << shift;
-
- ctrl = __raw_readl(S5P64X0_EINT0CON0) & ~mask;
- ctrl |= newvalue << shift;
- __raw_writel(ctrl, S5P64X0_EINT0CON0);
-
- /* Configure the GPIO pin for 6450 or 6440 based on CPU ID */
- if (soc_is_s5p6450())
- s3c_gpio_cfgpin(S5P6450_GPN(offs), S3C_GPIO_SFN(2));
- else
- s3c_gpio_cfgpin(S5P6440_GPN(offs), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-/*
- * s5p64x0_irq_demux_eint
- *
- * This function demuxes the IRQ from the group0 external interrupts,
- * from IRQ_EINT(0) to IRQ_EINT(15). It is designed to be inlined into
- * the specific handlers s5p64x0_irq_demux_eintX_Y.
- */
-static inline void s5p64x0_irq_demux_eint(unsigned int start, unsigned int end)
-{
- u32 status = __raw_readl(S5P64X0_EINT0PEND);
- u32 mask = __raw_readl(S5P64X0_EINT0MASK);
- unsigned int irq;
-
- status &= ~mask;
- status >>= start;
- status &= (1 << (end - start + 1)) - 1;
-
- for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
- if (status & 1)
- generic_handle_irq(irq);
- status >>= 1;
- }
-}
-
-static void s5p64x0_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
-{
- s5p64x0_irq_demux_eint(0, 3);
-}
-
-static void s5p64x0_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
-{
- s5p64x0_irq_demux_eint(4, 11);
-}
-
-static void s5p64x0_irq_demux_eint12_15(unsigned int irq,
- struct irq_desc *desc)
-{
- s5p64x0_irq_demux_eint(12, 15);
-}
-
-static int s5p64x0_alloc_gc(void)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- gc = irq_alloc_generic_chip("s5p64x0-eint", 1, S5P_IRQ_EINT_BASE,
- S5P_VA_GPIO, handle_level_irq);
- if (!gc) {
- printk(KERN_ERR "%s: irq_alloc_generic_chip for group 0"
- "external interrupts failed\n", __func__);
- return -EINVAL;
- }
-
- ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
- ct->chip.irq_set_wake = s3c_irqext_wake;
- ct->regs.ack = EINT0PEND_OFFSET;
- ct->regs.mask = EINT0MASK_OFFSET;
- irq_setup_generic_chip(gc, IRQ_MSK(16), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
- return 0;
-}
-
-static int __init s5p64x0_init_irq_eint(void)
-{
- int ret = s5p64x0_alloc_gc();
- irq_set_chained_handler(IRQ_EINT0_3, s5p64x0_irq_demux_eint0_3);
- irq_set_chained_handler(IRQ_EINT4_11, s5p64x0_irq_demux_eint4_11);
- irq_set_chained_handler(IRQ_EINT12_15, s5p64x0_irq_demux_eint12_15);
-
- return ret;
-}
-arch_initcall(s5p64x0_init_irq_eint);
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 4a1250cd135..a40e325d62c 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -24,9 +24,11 @@
#include <linux/gpio.h>
#include <linux/pwm_backlight.h>
#include <linux/fb.h>
+#include <linux/mmc/host.h>
#include <video/platform_lcd.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/irq.h>
@@ -40,7 +42,6 @@
#include <plat/regs-serial.h>
#include <plat/gpio-cfg.h>
-#include <plat/s5p6440.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -52,6 +53,9 @@
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/regs-fb.h>
+#include <plat/sdhci.h>
+
+#include "common.h"
#define SMDK6440_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -161,6 +165,25 @@ static struct platform_device *smdk6440_devices[] __initdata = {
&s5p6440_device_iis,
&s3c_device_fb,
&smdk6440_lcd_lte480wv,
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_hsmmc2,
+};
+
+static struct s3c_sdhci_platdata smdk6440_hsmmc0_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_NONE,
+};
+
+static struct s3c_sdhci_platdata smdk6440_hsmmc1_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_INTERNAL,
+#if defined(CONFIG_S5P64X0_SD_CH1_8BIT)
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdk6440_hsmmc2_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_NONE,
};
static struct s3c2410_platform_i2c s5p6440_i2c0_data __initdata = {
@@ -201,7 +224,7 @@ static struct platform_pwm_backlight_data smdk6440_bl_data = {
static void __init smdk6440_map_io(void)
{
- s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
+ s5p64x0_init_io(NULL, 0);
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(smdk6440_uartcfgs, ARRAY_SIZE(smdk6440_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -234,6 +257,10 @@ static void __init smdk6440_machine_init(void)
s5p6440_set_lcd_interface();
s3c_fb_set_platdata(&smdk6440_lcd_pdata);
+ s3c_sdhci0_set_platdata(&smdk6440_hsmmc0_pdata);
+ s3c_sdhci1_set_platdata(&smdk6440_hsmmc1_pdata);
+ s3c_sdhci2_set_platdata(&smdk6440_hsmmc2_pdata);
+
platform_add_devices(smdk6440_devices, ARRAY_SIZE(smdk6440_devices));
}
@@ -242,7 +269,9 @@ MACHINE_START(SMDK6440, "SMDK6440")
.atag_offset = 0x100,
.init_irq = s5p6440_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdk6440_map_io,
.init_machine = smdk6440_machine_init,
.timer = &s5p_timer,
+ .restart = s5p64x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index 0ab129ecf00..efb69e2f2af 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -24,9 +24,11 @@
#include <linux/gpio.h>
#include <linux/pwm_backlight.h>
#include <linux/fb.h>
+#include <linux/mmc/host.h>
#include <video/platform_lcd.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/irq.h>
@@ -40,7 +42,6 @@
#include <plat/regs-serial.h>
#include <plat/gpio-cfg.h>
-#include <plat/s5p6450.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -52,6 +53,9 @@
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/regs-fb.h>
+#include <plat/sdhci.h>
+
+#include "common.h"
#define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -179,10 +183,28 @@ static struct platform_device *smdk6450_devices[] __initdata = {
&s5p6450_device_iis0,
&s3c_device_fb,
&smdk6450_lcd_lte480wv,
-
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+ &s3c_device_hsmmc2,
/* s5p6450_device_spi0 will be added */
};
+static struct s3c_sdhci_platdata smdk6450_hsmmc0_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_NONE,
+};
+
+static struct s3c_sdhci_platdata smdk6450_hsmmc1_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_NONE,
+#if defined(CONFIG_S5P64X0_SD_CH1_8BIT)
+ .max_width = 8,
+ .host_caps = MMC_CAP_8_BIT_DATA,
+#endif
+};
+
+static struct s3c_sdhci_platdata smdk6450_hsmmc2_pdata __initdata = {
+ .cd_type = S3C_SDHCI_CD_NONE,
+};
+
static struct s3c2410_platform_i2c s5p6450_i2c0_data __initdata = {
.flags = 0,
.slave_addr = 0x10,
@@ -221,7 +243,7 @@ static struct platform_pwm_backlight_data smdk6450_bl_data = {
static void __init smdk6450_map_io(void)
{
- s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
+ s5p64x0_init_io(NULL, 0);
s3c24xx_init_clocks(19200000);
s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -254,6 +276,10 @@ static void __init smdk6450_machine_init(void)
s5p6450_set_lcd_interface();
s3c_fb_set_platdata(&smdk6450_lcd_pdata);
+ s3c_sdhci0_set_platdata(&smdk6450_hsmmc0_pdata);
+ s3c_sdhci1_set_platdata(&smdk6450_hsmmc1_pdata);
+ s3c_sdhci2_set_platdata(&smdk6450_hsmmc2_pdata);
+
platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices));
}
@@ -262,7 +288,9 @@ MACHINE_START(SMDK6450, "SMDK6450")
.atag_offset = 0x100,
.init_irq = s5p6450_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdk6450_map_io,
.init_machine = smdk6450_machine_init,
.timer = &s5p_timer,
+ .restart = s5p64x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5p64x0/pm.c b/arch/arm/mach-s5p64x0/pm.c
index 69927243d25..23f9b22439c 100644
--- a/arch/arm/mach-s5p64x0/pm.c
+++ b/arch/arm/mach-s5p64x0/pm.c
@@ -160,7 +160,7 @@ static void s5p64x0_pm_prepare(void)
}
-static int s5p64x0_pm_add(struct sys_device *sysdev)
+static int s5p64x0_pm_add(struct device *dev)
{
pm_cpu_prep = s5p64x0_pm_prepare;
pm_cpu_sleep = s5p64x0_cpu_suspend;
@@ -169,15 +169,17 @@ static int s5p64x0_pm_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s5p64x0_pm_driver = {
- .add = s5p64x0_pm_add,
+static struct subsys_interface s5p64x0_pm_interface = {
+ .name = "s5p64x0_pm",
+ .subsys = &s5p64x0_subsys,
+ .add_dev = s5p64x0_pm_add,
};
static __init int s5p64x0_pm_drvinit(void)
{
s3c_pm_init();
- return sysdev_driver_register(&s5p64x0_sysclass, &s5p64x0_pm_driver);
+ return subsys_interface_register(&s5p64x0_pm_interface);
}
arch_initcall(s5p64x0_pm_drvinit);
diff --git a/arch/arm/mach-s5p64x0/setup-sdhci-gpio.c b/arch/arm/mach-s5p64x0/setup-sdhci-gpio.c
new file mode 100644
index 00000000000..8410af0d12b
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/setup-sdhci-gpio.c
@@ -0,0 +1,104 @@
+/* linux/arch/arm/mach-s5p64x0/setup-sdhci-gpio.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5P64X0 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/regs-clock.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/sdhci.h>
+#include <plat/cpu.h>
+
+void s5p64x0_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+
+ /* Set all the necessary GPG pins to special-function 2 */
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgrange_nopull(S5P6450_GPG(0), 2 + width,
+ S3C_GPIO_SFN(2));
+ else
+ s3c_gpio_cfgrange_nopull(S5P6440_GPG(0), 2 + width,
+ S3C_GPIO_SFN(2));
+
+ /* Set GPG[6] pin to special-function 2 - MMC0 CDn */
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ if (soc_is_s5p6450()) {
+ s3c_gpio_setpull(S5P6450_GPG(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5P6450_GPG(6), S3C_GPIO_SFN(2));
+ } else {
+ s3c_gpio_setpull(S5P6440_GPG(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5P6440_GPG(6), S3C_GPIO_SFN(2));
+ }
+ }
+}
+
+void s5p64x0_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+{
+ struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
+
+ /* Set GPH[0:1] pins to special-function 2 - CLK and CMD */
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgrange_nopull(S5P6450_GPH(0), 2, S3C_GPIO_SFN(2));
+ else
+ s3c_gpio_cfgrange_nopull(S5P6440_GPH(0), 2 , S3C_GPIO_SFN(2));
+
+ switch (width) {
+ case 8:
+ /* Set data pins GPH[6:9] special-function 2 */
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgrange_nopull(S5P6450_GPH(6), 4,
+ S3C_GPIO_SFN(2));
+ else
+ s3c_gpio_cfgrange_nopull(S5P6440_GPH(6), 4,
+ S3C_GPIO_SFN(2));
+ case 4:
+ /* set data pins GPH[2:5] special-function 2 */
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgrange_nopull(S5P6450_GPH(2), 4,
+ S3C_GPIO_SFN(2));
+ else
+ s3c_gpio_cfgrange_nopull(S5P6440_GPH(2), 4,
+ S3C_GPIO_SFN(2));
+ default:
+ break;
+ }
+
+ /* Set GPG[6] pin to special-funtion 3 : MMC1 CDn */
+ if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
+ if (soc_is_s5p6450()) {
+ s3c_gpio_setpull(S5P6450_GPG(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5P6450_GPG(6), S3C_GPIO_SFN(3));
+ } else {
+ s3c_gpio_setpull(S5P6440_GPG(6), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5P6440_GPG(6), S3C_GPIO_SFN(3));
+ }
+ }
+}
+
+void s5p6440_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
+{
+ /* Set GPC[4:5] pins to special-function 3 - CLK and CMD */
+ s3c_gpio_cfgrange_nopull(S5P6440_GPC(4), 2, S3C_GPIO_SFN(3));
+
+ /* Set data pins GPH[6:9] pins to special-function 3 */
+ s3c_gpio_cfgrange_nopull(S5P6440_GPH(6), 4, S3C_GPIO_SFN(3));
+}
+
+void s5p6450_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
+{
+ /* Set all the necessary GPG pins to special-function 3 */
+ s3c_gpio_cfgrange_nopull(S5P6450_GPG(7), 2 + width, S3C_GPIO_SFN(3));
+}
diff --git a/arch/arm/mach-s5p64x0/setup-spi.c b/arch/arm/mach-s5p64x0/setup-spi.c
new file mode 100644
index 00000000000..e9b84124035
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/setup-spi.c
@@ -0,0 +1,55 @@
+/* linux/arch/arm/mach-s5p64x0/setup-spi.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/cpu.h>
+#include <plat/s3c64xx-spi.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+{
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgall_range(S5P6450_GPC(0), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ else
+ s3c_gpio_cfgall_range(S5P6440_GPC(0), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+{
+ if (soc_is_s5p6450())
+ s3c_gpio_cfgall_range(S5P6450_GPC(4), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ else
+ s3c_gpio_cfgall_range(S5P6440_GPC(4), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index e538a4c67e9..75a26eaf263 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -45,6 +45,11 @@ config S5PC100_SETUP_SDHCI_GPIO
help
Common setup code for SDHCI gpio.
+config S5PC100_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations.
+
config MACH_SMDKC100
bool "SMDKC100"
select CPU_S5PC100
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index a5e6e608b49..118c711f74e 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -9,28 +9,24 @@ obj-m :=
obj-n :=
obj- :=
-# Core support for S5PC100 system
+# Core
-obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o
-obj-$(CONFIG_CPU_S5PC100) += setup-i2c0.o
-obj-$(CONFIG_CPU_S5PC100) += dma.o
+obj-y += common.o clock.o
-# Helper and device support
-
-obj-$(CONFIG_S5PC100_SETUP_FB_24BPP) += setup-fb-24bpp.o
-obj-$(CONFIG_S5PC100_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S5PC100_SETUP_IDE) += setup-ide.o
-obj-$(CONFIG_S5PC100_SETUP_KEYPAD) += setup-keypad.o
-obj-$(CONFIG_S5PC100_SETUP_SDHCI) += setup-sdhci.o
-obj-$(CONFIG_S5PC100_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
-
-# device support
-obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-y += dma.o
# machine support
obj-$(CONFIG_MACH_SMDKC100) += mach-smdkc100.o
# device support
+
obj-y += dev-audio.o
+
+obj-y += setup-i2c0.o
+obj-$(CONFIG_S5PC100_SETUP_FB_24BPP) += setup-fb-24bpp.o
+obj-$(CONFIG_S5PC100_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_S5PC100_SETUP_IDE) += setup-ide.o
+obj-$(CONFIG_S5PC100_SETUP_KEYPAD) += setup-keypad.o
+obj-$(CONFIG_S5PC100_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S5PC100_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 8d47709da71..247194dd366 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -27,7 +27,8 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/s5pc100.h>
+
+#include "common.h"
static struct clk s5p_clk_otgphy = {
.name = "otg_phy",
@@ -426,24 +427,6 @@ static struct clk init_clocks_off[] = {
.enable = s5pc100_d0_2_ctrl,
.ctrlbit = (1 << 1),
}, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.2",
- .parent = &clk_div_d1_bus.clk,
- .enable = s5pc100_d1_0_ctrl,
- .ctrlbit = (1 << 7),
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_div_d1_bus.clk,
- .enable = s5pc100_d1_0_ctrl,
- .ctrlbit = (1 << 6),
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.0",
- .parent = &clk_div_d1_bus.clk,
- .enable = s5pc100_d1_0_ctrl,
- .ctrlbit = (1 << 5),
- }, {
.name = "modemif",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
@@ -673,24 +656,6 @@ static struct clk init_clocks_off[] = {
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = "spi_48m",
- .devname = "s3c64xx-spi.0",
- .parent = &clk_mout_48m.clk,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = (1 << 7),
- }, {
- .name = "spi_48m",
- .devname = "s3c64xx-spi.1",
- .parent = &clk_mout_48m.clk,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = (1 << 8),
- }, {
- .name = "spi_48m",
- .devname = "s3c64xx-spi.2",
- .parent = &clk_mout_48m.clk,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = (1 << 9),
- }, {
.name = "mmc_48m",
.devname = "s3c-sdhci.0",
.parent = &clk_mout_48m.clk,
@@ -711,6 +676,54 @@ static struct clk init_clocks_off[] = {
},
};
+static struct clk clk_hsmmc2 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.2",
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 7),
+};
+
+static struct clk clk_hsmmc1 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.1",
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 6),
+};
+
+static struct clk clk_hsmmc0 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.0",
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 5),
+};
+
+static struct clk clk_48m_spi0 = {
+ .name = "spi_48m",
+ .devname = "s3c64xx-spi.0",
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 7),
+};
+
+static struct clk clk_48m_spi1 = {
+ .name = "spi_48m",
+ .devname = "s3c64xx-spi.1",
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 8),
+};
+
+static struct clk clk_48m_spi2 = {
+ .name = "spi_48m",
+ .devname = "s3c64xx-spi.2",
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 9),
+};
+
static struct clk clk_vclk54m = {
.name = "vclk_54m",
.rate = 54000000,
@@ -929,49 +942,6 @@ static struct clksrc_clk clk_sclk_spdif = {
static struct clksrc_clk clksrcs[] = {
{
.clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
- .ctrlbit = (1 << 4),
- .enable = s5pc100_sclk0_ctrl,
-
- },
- .sources = &clk_src_group1,
- .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
- .ctrlbit = (1 << 5),
- .enable = s5pc100_sclk0_ctrl,
-
- },
- .sources = &clk_src_group1,
- .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.2",
- .ctrlbit = (1 << 6),
- .enable = s5pc100_sclk0_ctrl,
-
- },
- .sources = &clk_src_group1,
- .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV2, .shift = 12, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .ctrlbit = (1 << 3),
- .enable = s5pc100_sclk0_ctrl,
-
- },
- .sources = &clk_src_group2,
- .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
- .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
- }, {
- .clk = {
.name = "sclk_mixer",
.ctrlbit = (1 << 6),
.enable = s5pc100_sclk0_ctrl,
@@ -1024,39 +994,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 24, .size = 4 },
}, {
.clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.0",
- .ctrlbit = (1 << 12),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_mmc0,
- .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV3, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.1",
- .ctrlbit = (1 << 13),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_mmc12,
- .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV3, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.2",
- .ctrlbit = (1 << 14),
- .enable = s5pc100_sclk1_ctrl,
-
- },
- .sources = &clk_src_mmc12,
- .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
- .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 },
- }, {
- .clk = {
.name = "sclk_irda",
.ctrlbit = (1 << 10),
.enable = s5pc100_sclk0_ctrl,
@@ -1098,6 +1035,89 @@ static struct clksrc_clk clksrcs[] = {
},
};
+static struct clksrc_clk clk_sclk_uart = {
+ .clk = {
+ .name = "uclk1",
+ .ctrlbit = (1 << 3),
+ .enable = s5pc100_sclk0_ctrl,
+ },
+ .sources = &clk_src_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.0",
+ .ctrlbit = (1 << 12),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_mmc0,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.1",
+ .ctrlbit = (1 << 13),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_mmc12,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.2",
+ .ctrlbit = (1 << 14),
+ .enable = s5pc100_sclk1_ctrl,
+ },
+ .sources = &clk_src_mmc12,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.0",
+ .ctrlbit = (1 << 4),
+ .enable = s5pc100_sclk0_ctrl,
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.1",
+ .ctrlbit = (1 << 5),
+ .enable = s5pc100_sclk0_ctrl,
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi2 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.2",
+ .ctrlbit = (1 << 6),
+ .enable = s5pc100_sclk0_ctrl,
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 12, .size = 4 },
+};
+
/* Clock initialisation code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
@@ -1127,6 +1147,25 @@ static struct clksrc_clk *sysclks[] = {
&clk_sclk_spdif,
};
+static struct clk *clk_cdev[] = {
+ &clk_hsmmc0,
+ &clk_hsmmc1,
+ &clk_hsmmc2,
+ &clk_48m_spi0,
+ &clk_48m_spi1,
+ &clk_48m_spi2,
+};
+
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uart,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+ &clk_sclk_spi2,
+};
+
void __init_or_cpufreq s5pc100_setup_clocks(void)
{
unsigned long xtal;
@@ -1266,6 +1305,24 @@ static struct clk *clks[] __initdata = {
&clk_pcmcdclk1,
};
+static struct clk_lookup s5pc100_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uart.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_48m_spi0),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_48m_spi1),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk2", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk1", &clk_48m_spi2),
+ CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk2", &clk_sclk_spi2.clk),
+};
+
void __init s5pc100_register_clocks(void)
{
int ptr;
@@ -1277,9 +1334,16 @@ void __init s5pc100_register_clocks(void)
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
+ s3c_register_clksrc(clksrc_cdev[ptr], 1);
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s5pc100_clk_lookup, ARRAY_SIZE(s5pc100_clk_lookup));
+
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
+ s3c_disable_clocks(clk_cdev[ptr], 1);
s3c24xx_register_clock(&dummy_apb_pclk);
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/common.c
index fd2708e7d8a..c9095730a7f 100644
--- a/arch/arm/mach-s5pc100/cpu.c
+++ b/arch/arm/mach-s5pc100/common.c
@@ -1,17 +1,16 @@
-/* linux/arch/arm/mach-s5pc100/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * Based on mach-s3c6410/cpu.c
+ * Common Codes for S5PC100
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
+ */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -21,40 +20,78 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
+#include <asm/irq.h>
+#include <asm/proc-fns.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
-#include <asm/proc-fns.h>
-
-#include <mach/hardware.h>
#include <mach/map.h>
-#include <asm/irq.h>
-
-#include <plat/regs-serial.h>
+#include <mach/hardware.h>
#include <mach/regs-clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
-#include <plat/ata-core.h>
-#include <plat/iic-core.h>
#include <plat/sdhci.h>
#include <plat/adc-core.h>
-#include <plat/onenand-core.h>
+#include <plat/ata-core.h>
#include <plat/fb-core.h>
+#include <plat/iic-core.h>
+#include <plat/onenand-core.h>
+#include <plat/regs-serial.h>
+#include <plat/watchdog-reset.h>
+
+#include "common.h"
+
+static const char name_s5pc100[] = "S5PC100";
-#include <plat/s5pc100.h>
+static struct cpu_table cpu_ids[] __initdata = {
+ {
+ .idcode = S5PC100_CPU_ID,
+ .idmask = S5PC100_CPU_MASK,
+ .map_io = s5pc100_map_io,
+ .init_clocks = s5pc100_init_clocks,
+ .init_uarts = s5pc100_init_uarts,
+ .init = s5pc100_init,
+ .name = name_s5pc100,
+ },
+};
/* Initial IO mappings */
static struct map_desc s5pc100_iodesc[] __initdata = {
{
+ .virtual = (unsigned long)S5P_VA_CHIPID,
+ .pfn = __phys_to_pfn(S5PC100_PA_CHIPID),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(S5PC100_PA_SYSCON),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_TIMER,
+ .pfn = __phys_to_pfn(S5PC100_PA_TIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_WATCHDOG,
+ .pfn = __phys_to_pfn(S5PC100_PA_WATCHDOG),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SROMC,
+ .pfn = __phys_to_pfn(S5PC100_PA_SROMC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PC100_PA_SYSTIMER),
.length = SZ_16K,
@@ -100,15 +137,27 @@ static void s5pc100_idle(void)
local_irq_enable();
}
-/* s5pc100_map_io
+/*
+ * s5pc100_map_io
*
- * register the standard cpu IO areas
-*/
+ * register the standard CPU IO areas
+ */
-void __init s5pc100_map_io(void)
+void __init s5pc100_init_io(struct map_desc *mach_desc, int size)
{
+ /* initialize the io descriptors we need for initialization */
iotable_init(s5pc100_iodesc, ARRAY_SIZE(s5pc100_iodesc));
+ if (mach_desc)
+ iotable_init(mach_desc, size);
+
+ /* detect cpu id and rev. */
+ s5p_init_cpu(S5P_VA_CHIPID);
+ s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init s5pc100_map_io(void)
+{
/* initialise device information early */
s5pc100_default_sdhci0();
s5pc100_default_sdhci1();
@@ -143,19 +192,19 @@ void __init s5pc100_init_irq(void)
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-static struct sysdev_class s5pc100_sysclass = {
- .name = "s5pc100-core",
+static struct bus_type s5pc100_subsys = {
+ .name = "s5pc100-core",
+ .dev_name = "s5pc100-core",
};
-static struct sys_device s5pc100_sysdev = {
- .cls = &s5pc100_sysclass,
+static struct device s5pc100_dev = {
+ .bus = &s5pc100_subsys,
};
static int __init s5pc100_core_init(void)
{
- return sysdev_class_register(&s5pc100_sysclass);
+ return subsys_system_register(&s5pc100_subsys, NULL);
}
-
core_initcall(s5pc100_core_init);
int __init s5pc100_init(void)
@@ -165,5 +214,20 @@ int __init s5pc100_init(void)
/* set idle function */
pm_idle = s5pc100_idle;
- return sysdev_register(&s5pc100_sysdev);
+ return device_register(&s5pc100_dev);
+}
+
+/* uart registration process */
+
+void __init s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
+}
+
+void s5pc100_restart(char mode, const char *cmd)
+{
+ if (mode != 's')
+ arch_wdt_reset();
+
+ soft_restart(0);
}
diff --git a/arch/arm/mach-s5pc100/common.h b/arch/arm/mach-s5pc100/common.h
new file mode 100644
index 00000000000..9fbd3ae2b40
--- /dev/null
+++ b/arch/arm/mach-s5pc100/common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for S5PC100 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H
+#define __ARCH_ARM_MACH_S5PC100_COMMON_H
+
+void s5pc100_init_io(struct map_desc *mach_desc, int size);
+void s5pc100_init_irq(void);
+
+void s5pc100_register_clocks(void);
+void s5pc100_setup_clocks(void);
+
+void s5pc100_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5PC100
+
+extern int s5pc100_init(void);
+extern void s5pc100_map_io(void);
+extern void s5pc100_init_clocks(int xtal);
+extern void s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5pc100_init_clocks NULL
+#define s5pc100_init_uarts NULL
+#define s5pc100_map_io NULL
+#define s5pc100_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5PC100_COMMON_H */
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
deleted file mode 100644
index e5d6c4dceb5..00000000000
--- a/arch/arm/mach-s5pc100/dev-spi.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/* linux/arch/arm/mach-s5pc100/dev-spi.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/spi-clocks.h>
-#include <mach/irqs.h>
-
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-#include <plat/irqs.h>
-
-static char *spi_src_clks[] = {
- [S5PC100_SPI_SRCCLK_PCLK] = "pclk",
- [S5PC100_SPI_SRCCLK_48M] = "spi_48m",
- [S5PC100_SPI_SRCCLK_SPIBUS] = "spi_bus",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the CS.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s5pc100_spi_cfg_gpio(struct platform_device *pdev)
-{
- switch (pdev->id) {
- case 0:
- s3c_gpio_cfgall_range(S5PC100_GPB(0), 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
- break;
-
- case 1:
- s3c_gpio_cfgall_range(S5PC100_GPB(4), 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
- break;
-
- case 2:
- s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
- s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
- s3c_gpio_cfgall_range(S5PC100_GPB(2), 2,
- S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct resource s5pc100_spi0_resource[] = {
- [0] = {
- .start = S5PC100_PA_SPI0,
- .end = S5PC100_PA_SPI0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
- .cfg_gpio = s5pc100_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s5pc100_device_spi0 = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5pc100_spi0_resource),
- .resource = s5pc100_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pc100_spi0_pdata,
- },
-};
-
-static struct resource s5pc100_spi1_resource[] = {
- [0] = {
- .start = S5PC100_PA_SPI1,
- .end = S5PC100_PA_SPI1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI1_TX,
- .end = DMACH_SPI1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI1_RX,
- .end = DMACH_SPI1_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI1,
- .end = IRQ_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
- .cfg_gpio = s5pc100_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-struct platform_device s5pc100_device_spi1 = {
- .name = "s3c64xx-spi",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5pc100_spi1_resource),
- .resource = s5pc100_spi1_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pc100_spi1_pdata,
- },
-};
-
-static struct resource s5pc100_spi2_resource[] = {
- [0] = {
- .start = S5PC100_PA_SPI2,
- .end = S5PC100_PA_SPI2 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI2_TX,
- .end = DMACH_SPI2_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI2_RX,
- .end = DMACH_SPI2_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI2,
- .end = IRQ_SPI2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
- .cfg_gpio = s5pc100_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-struct platform_device s5pc100_device_spi2 = {
- .name = "s3c64xx-spi",
- .id = 2,
- .num_resources = ARRAY_SIZE(s5pc100_spi2_resource),
- .resource = s5pc100_spi2_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pc100_spi2_pdata,
- },
-};
-
-void __init s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S5PC100_SPI_SRCCLK_SPIBUS) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- pd = &s5pc100_spi0_pdata;
- break;
- case 1:
- pd = &s5pc100_spi1_pdata;
- break;
- case 2:
- pd = &s5pc100_spi2_pdata;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
index 065a087f5a8..c841f4d313f 100644
--- a/arch/arm/mach-s5pc100/dma.c
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -35,100 +35,42 @@
static u64 dma_dmamask = DMA_BIT_MASK(32);
-struct dma_pl330_peri pdma0_peri[30] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = DMACH_IRDA,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_AC97_MICIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMOUT,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_EXTERNAL,
- }, {
- .peri_id = (u8)DMACH_PWM,
- }, {
- .peri_id = (u8)DMACH_SPDIF,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_HSI_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_HSI_TX,
- .rqtype = MEMTODEV,
- },
+u8 pdma0_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_IRDA,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
+ DMACH_EXTERNAL,
+ DMACH_PWM,
+ DMACH_SPDIF,
+ DMACH_HSI_RX,
+ DMACH_HSI_TX,
};
struct dma_pl330_platdata s5pc100_pdma0_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
- .peri = pdma0_peri,
+ .peri_id = pdma0_peri,
};
struct amba_device s5pc100_device_pdma0 = {
@@ -147,98 +89,42 @@ struct amba_device s5pc100_device_pdma0 = {
.periphid = 0x00041330,
};
-struct dma_pl330_peri pdma1_peri[30] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = DMACH_IRDA,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ0,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ1,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ2,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ3,
- },
+u8 pdma1_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_IRDA,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM1_TX,
+ DMACH_MSM_REQ0,
+ DMACH_MSM_REQ1,
+ DMACH_MSM_REQ2,
+ DMACH_MSM_REQ3,
};
struct dma_pl330_platdata s5pc100_pdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
- .peri = pdma1_peri,
+ .peri_id = pdma1_peri,
};
struct amba_device s5pc100_device_pdma1 = {
@@ -259,7 +145,12 @@ struct amba_device s5pc100_device_pdma1 = {
static int __init s5pc100_dma_init(void)
{
+ dma_cap_set(DMA_SLAVE, s5pc100_pdma0_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5pc100_pdma0_pdata.cap_mask);
amba_device_register(&s5pc100_device_pdma0, &iomem_resource);
+
+ dma_cap_set(DMA_SLAVE, s5pc100_pdma1_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5pc100_pdma1_pdata.cap_mask);
amba_device_register(&s5pc100_device_pdma1, &iomem_resource);
return 0;
diff --git a/arch/arm/mach-s5pc100/include/mach/entry-macro.S b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
index ba76af052c8..b8c242edfa2 100644
--- a/arch/arm/mach-s5pc100/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
@@ -12,39 +12,14 @@
* warranty of any kind, whether express or implied.
*/
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
- ldr \base, =VA_VIC0
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, # S5P_IRQ_OFFSET + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- @ otherwise try vic2
- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
.endm
diff --git a/arch/arm/mach-s5pc100/include/mach/irqs.h b/arch/arm/mach-s5pc100/include/mach/irqs.h
index d2eb4757381..2870f12c792 100644
--- a/arch/arm/mach-s5pc100/include/mach/irqs.h
+++ b/arch/arm/mach-s5pc100/include/mach/irqs.h
@@ -97,6 +97,8 @@
#define IRQ_SDMFIQ S5P_IRQ_VIC2(31)
#define IRQ_VIC_END S5P_IRQ_VIC2(31)
+#define IRQ_TIMER_BASE (11)
+
#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index ccbe6b767f7..54bc4f82e17 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -100,6 +100,9 @@
#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
#define S3C_PA_WDT S5PC100_PA_WATCHDOG
+#define S3C_PA_SPI0 S5PC100_PA_SPI0
+#define S3C_PA_SPI1 S5PC100_PA_SPI1
+#define S3C_PA_SPI2 S5PC100_PA_SPI2
#define S5P_PA_CHIPID S5PC100_PA_CHIPID
#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
diff --git a/arch/arm/mach-s5pc100/include/mach/system.h b/arch/arm/mach-s5pc100/include/mach/system.h
index a9ea57c0660..afc96c29851 100644
--- a/arch/arm/mach-s5pc100/include/mach/system.h
+++ b/arch/arm/mach-s5pc100/include/mach/system.h
@@ -11,8 +11,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H __FILE__
-#include <plat/system-reset.h>
-
static void arch_idle(void)
{
/* nothing here yet */
diff --git a/arch/arm/mach-s5pc100/include/mach/vmalloc.h b/arch/arm/mach-s5pc100/include/mach/vmalloc.h
deleted file mode 100644
index 44c8e5726d9..00000000000
--- a/arch/arm/mach-s5pc100/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C6400 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pc100/init.c b/arch/arm/mach-s5pc100/init.c
deleted file mode 100644
index 19d7b523c13..00000000000
--- a/arch/arm/mach-s5pc100/init.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/* linux/arch/arm/plat-s5pc100/s5pc100-init.c
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5pc100.h>
-
-/* uart registration process */
-void __init s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 26f5c91c942..674d22992f3 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -25,6 +25,7 @@
#include <linux/input.h>
#include <linux/pwm_backlight.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -42,7 +43,6 @@
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
-#include <plat/s5pc100.h>
#include <plat/fb.h>
#include <plat/iic.h>
#include <plat/ata.h>
@@ -53,6 +53,8 @@
#include <plat/backlight.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -215,7 +217,7 @@ static struct platform_pwm_backlight_data smdkc100_bl_data = {
static void __init smdkc100_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pc100_init_io(NULL, 0);
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(smdkc100_uartcfgs, ARRAY_SIZE(smdkc100_uartcfgs));
}
@@ -250,7 +252,9 @@ MACHINE_START(SMDKC100, "SMDKC100")
/* Maintainer: Byungho Min <bhmin@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5pc100_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdkc100_map_io,
.init_machine = smdkc100_machine_init,
.timer = &s3c24xx_timer,
+ .restart = s5pc100_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pc100/setup-sdhci.c b/arch/arm/mach-s5pc100/setup-sdhci.c
deleted file mode 100644
index 6418c6e8a7b..00000000000
--- a/arch/arm/mach-s5pc100/setup-sdhci.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/* linux/arch/arm/mach-s5pc100/setup-sdhci.c
- *
- * Copyright 2008 Samsung Electronics
- *
- * S5PC100 - Helper functions for settign up SDHCI device(s) (HSMMC)
- *
- * Based on mach-s3c6410/setup-sdhci.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-
-/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-
-char *s5pc100_hsmmc_clksrcs[4] = {
- [0] = "hsmmc", /* HCLK */
- /* [1] = "hsmmc", - duplicate HCLK entry */
- [2] = "sclk_mmc", /* mmc_bus */
- /* [3] = "48m", - note not successfully used yet */
-};
diff --git a/arch/arm/mach-s5pc100/setup-spi.c b/arch/arm/mach-s5pc100/setup-spi.c
new file mode 100644
index 00000000000..431a6f747ca
--- /dev/null
+++ b/arch/arm/mach-s5pc100/setup-spi.c
@@ -0,0 +1,65 @@
+/* linux/arch/arm/mach-s5pc100/setup-spi.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+ .tx_st_done = 21,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PC100_GPB(0), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+ .tx_st_done = 21,
+};
+
+int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgall_range(S5PC100_GPB(4), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI2
+struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+ .tx_st_done = 21,
+};
+
+int s3c64xx_spi2_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PC100_GPB(2), 2,
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 646057ab2e4..2cdc42e838b 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -60,6 +60,11 @@ config S5PV210_SETUP_FIMC
help
Common setup code for the camera interfaces.
+config S5PV210_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations.
+
menu "S5PC110 Machines"
config MACH_AQUILA
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 009fbe53df9..76a121dd52b 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -10,30 +10,32 @@ obj-m :=
obj-n :=
obj- :=
-# Core support for S5PV210 system
+# Core
+
+obj-y += common.o clock.o
-obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o
-obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
obj-$(CONFIG_PM) += pm.o
+obj-y += dma.o
+
# machine support
obj-$(CONFIG_MACH_AQUILA) += mach-aquila.o
-obj-$(CONFIG_MACH_SMDKV210) += mach-smdkv210.o
-obj-$(CONFIG_MACH_SMDKC110) += mach-smdkc110.o
obj-$(CONFIG_MACH_GONI) += mach-goni.o
+obj-$(CONFIG_MACH_SMDKC110) += mach-smdkc110.o
+obj-$(CONFIG_MACH_SMDKV210) += mach-smdkv210.o
obj-$(CONFIG_MACH_TORBRECK) += mach-torbreck.o
# device support
obj-y += dev-audio.o
-obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-y += setup-i2c0.o
obj-$(CONFIG_S5PV210_SETUP_FB_24BPP) += setup-fb-24bpp.o
obj-$(CONFIG_S5PV210_SETUP_FIMC) += setup-fimc.o
-obj-$(CONFIG_S5PV210_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S5PV210_SETUP_I2C2) += setup-i2c2.o
+obj-$(CONFIG_S5PV210_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_S5PV210_SETUP_I2C2) += setup-i2c2.o
obj-$(CONFIG_S5PV210_SETUP_IDE) += setup-ide.o
obj-$(CONFIG_S5PV210_SETUP_KEYPAD) += setup-keypad.o
-obj-$(CONFIG_S5PV210_SETUP_SDHCI) += setup-sdhci.o
obj-$(CONFIG_S5PV210_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S5PV210_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 4c5ac7a69e9..c78dfddd77f 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <mach/map.h>
@@ -29,7 +29,8 @@
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
-#include <plat/s5pv210.h>
+
+#include "common.h"
static unsigned long xtal;
@@ -399,30 +400,6 @@ static struct clk init_clocks_off[] = {
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<25),
}, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.0",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1<<16),
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1<<17),
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.2",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1<<18),
- }, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.3",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1<<19),
- }, {
.name = "systimer",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
@@ -559,6 +536,38 @@ static struct clk init_clocks[] = {
},
};
+static struct clk clk_hsmmc0 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.0",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1<<16),
+};
+
+static struct clk clk_hsmmc1 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.1",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1<<17),
+};
+
+static struct clk clk_hsmmc2 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.2",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1<<18),
+};
+
+static struct clk clk_hsmmc3 = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.3",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1<<19),
+};
+
static struct clk *clkset_uart_list[] = {
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
@@ -809,46 +818,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 12, .size = 3 },
}, {
.clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.0",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 12),
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.1",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 13),
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.2",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 14),
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 },
- }, {
- .clk = {
- .name = "uclk1",
- .devname = "s5pv210-uart.3",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 15),
- },
- .sources = &clkset_uart,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 },
- }, {
- .clk = {
.name = "sclk_fimc",
.devname = "s5pv210-fimc.0",
.enable = s5pv210_clk_mask1_ctrl,
@@ -906,46 +875,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 4 },
}, {
.clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.0",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 8),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.1",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 9),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.2",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 10),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_mmc",
- .devname = "s3c-sdhci.3",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 11),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
- }, {
- .clk = {
.name = "sclk_mfc",
.devname = "s5p-mfc",
.enable = s5pv210_clk_ip0_ctrl,
@@ -983,26 +912,6 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 28, .size = 4 },
}, {
.clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 16),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 },
- }, {
- .clk = {
- .name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
- .enable = s5pv210_clk_mask0_ctrl,
- .ctrlbit = (1 << 17),
- },
- .sources = &clkset_group2,
- .reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 },
- .reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 },
- }, {
- .clk = {
.name = "sclk_pwi",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 29),
@@ -1022,6 +931,147 @@ static struct clksrc_clk clksrcs[] = {
},
};
+static struct clksrc_clk clk_sclk_uart0 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "s5pv210-uart.0",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart1 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "s5pv210-uart.1",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 13),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart2 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "s5pv210-uart.2",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 14),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_uart3 = {
+ .clk = {
+ .name = "uclk1",
+ .devname = "s5pv210-uart.3",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 15),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc0 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.0",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc1 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.1",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 9),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc2 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.2",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 10),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_mmc3 = {
+ .clk = {
+ .name = "sclk_mmc",
+ .devname = "s3c-sdhci.3",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 11),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
+};
+
+static struct clksrc_clk clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.0",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 },
+ };
+
+static struct clksrc_clk clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "s3c64xx-spi.1",
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 17),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 },
+ };
+
+
+static struct clksrc_clk *clksrc_cdev[] = {
+ &clk_sclk_uart0,
+ &clk_sclk_uart1,
+ &clk_sclk_uart2,
+ &clk_sclk_uart3,
+ &clk_sclk_mmc0,
+ &clk_sclk_mmc1,
+ &clk_sclk_mmc2,
+ &clk_sclk_mmc3,
+ &clk_sclk_spi0,
+ &clk_sclk_spi1,
+};
+
+static struct clk *clk_cdev[] = {
+ &clk_hsmmc0,
+ &clk_hsmmc1,
+ &clk_hsmmc2,
+ &clk_hsmmc3,
+};
+
/* Clock initialisation code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
@@ -1261,6 +1311,25 @@ static struct clk *clks[] __initdata = {
&clk_pcmcdclk2,
};
+static struct clk_lookup s5pv210_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud0", &clk_p),
+ CLKDEV_INIT("s5pv210-uart.0", "clk_uart_baud1", &clk_sclk_uart0.clk),
+ CLKDEV_INIT("s5pv210-uart.1", "clk_uart_baud1", &clk_sclk_uart1.clk),
+ CLKDEV_INIT("s5pv210-uart.2", "clk_uart_baud1", &clk_sclk_uart2.clk),
+ CLKDEV_INIT("s5pv210-uart.3", "clk_uart_baud1", &clk_sclk_uart3.clk),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2),
+ CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.0", &clk_hsmmc3),
+ CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
+ CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
+ CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk),
+ CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+};
+
void __init s5pv210_register_clocks(void)
{
int ptr;
@@ -1273,11 +1342,19 @@ void __init s5pv210_register_clocks(void)
for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++)
s3c_register_clksrc(sclk_tv[ptr], 1);
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
+ s3c_register_clksrc(clksrc_cdev[ptr], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s5pv210_clk_lookup, ARRAY_SIZE(s5pv210_clk_lookup));
+
+ s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
+ for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
+ s3c_disable_clocks(clk_cdev[ptr], 1);
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init();
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/common.c
index 84ec7463323..9c1bcdcc12c 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/common.c
@@ -1,12 +1,13 @@
-/* linux/arch/arm/mach-s5pv210/cpu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
+ * Common Codes for S5PV210
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
+ */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -17,37 +18,78 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
+#include <linux/serial_core.h>
+#include <asm/proc-fns.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
-#include <asm/proc-fns.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <plat/cpu.h>
-#include <plat/devs.h>
#include <plat/clock.h>
-#include <plat/fb-core.h>
-#include <plat/s5pv210.h>
+#include <plat/devs.h>
+#include <plat/sdhci.h>
#include <plat/adc-core.h>
#include <plat/ata-core.h>
+#include <plat/fb-core.h>
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
#include <plat/keypad-core.h>
-#include <plat/sdhci.h>
-#include <plat/reset.h>
#include <plat/tv-core.h>
+#include <plat/regs-serial.h>
+
+#include "common.h"
+
+static const char name_s5pv210[] = "S5PV210/S5PC110";
+
+static struct cpu_table cpu_ids[] __initdata = {
+ {
+ .idcode = S5PV210_CPU_ID,
+ .idmask = S5PV210_CPU_MASK,
+ .map_io = s5pv210_map_io,
+ .init_clocks = s5pv210_init_clocks,
+ .init_uarts = s5pv210_init_uarts,
+ .init = s5pv210_init,
+ .name = name_s5pv210,
+ },
+};
/* Initial IO mappings */
static struct map_desc s5pv210_iodesc[] __initdata = {
{
+ .virtual = (unsigned long)S5P_VA_CHIPID,
+ .pfn = __phys_to_pfn(S5PV210_PA_CHIPID),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_SYS,
+ .pfn = __phys_to_pfn(S5PV210_PA_SYSCON),
+ .length = SZ_64K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_TIMER,
+ .pfn = __phys_to_pfn(S5PV210_PA_TIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S3C_VA_WATCHDOG,
+ .pfn = __phys_to_pfn(S5PV210_PA_WATCHDOG),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5P_VA_SROMC,
+ .pfn = __phys_to_pfn(S5PV210_PA_SROMC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
.length = SZ_4K,
@@ -108,19 +150,32 @@ static void s5pv210_idle(void)
local_irq_enable();
}
-static void s5pv210_sw_reset(void)
+void s5pv210_restart(char mode, const char *cmd)
{
__raw_writel(0x1, S5P_SWRESET);
}
-/* s5pv210_map_io
+/*
+ * s5pv210_map_io
*
* register the standard cpu IO areas
-*/
+ */
-void __init s5pv210_map_io(void)
+void __init s5pv210_init_io(struct map_desc *mach_desc, int size)
{
+ /* initialize the io descriptors we need for initialization */
iotable_init(s5pv210_iodesc, ARRAY_SIZE(s5pv210_iodesc));
+ if (mach_desc)
+ iotable_init(mach_desc, size);
+
+ /* detect cpu id and rev. */
+ s5p_init_cpu(S5P_VA_CHIPID);
+
+ s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
+}
+
+void __init s5pv210_map_io(void)
+{
init_consistent_dma_size(14 << 20);
/* initialise device information early */
@@ -174,19 +229,19 @@ void __init s5pv210_init_irq(void)
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-struct sysdev_class s5pv210_sysclass = {
- .name = "s5pv210-core",
+struct bus_type s5pv210_subsys = {
+ .name = "s5pv210-core",
+ .dev_name = "s5pv210-core",
};
-static struct sys_device s5pv210_sysdev = {
- .cls = &s5pv210_sysclass,
+static struct device s5pv210_dev = {
+ .bus = &s5pv210_subsys,
};
static int __init s5pv210_core_init(void)
{
- return sysdev_class_register(&s5pv210_sysclass);
+ return subsys_system_register(&s5pv210_subsys, NULL);
}
-
core_initcall(s5pv210_core_init);
int __init s5pv210_init(void)
@@ -196,8 +251,12 @@ int __init s5pv210_init(void)
/* set idle function */
pm_idle = s5pv210_idle;
- /* set sw_reset function */
- s5p_reset_hook = s5pv210_sw_reset;
+ return device_register(&s5pv210_dev);
+}
- return sysdev_register(&s5pv210_sysdev);
+/* uart registration process */
+
+void __init s5pv210_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
}
diff --git a/arch/arm/mach-s5pv210/common.h b/arch/arm/mach-s5pv210/common.h
new file mode 100644
index 00000000000..6ed2af5c751
--- /dev/null
+++ b/arch/arm/mach-s5pv210/common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common Header for S5PV210 machines
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H
+#define __ARCH_ARM_MACH_S5PV210_COMMON_H
+
+void s5pv210_init_io(struct map_desc *mach_desc, int size);
+void s5pv210_init_irq(void);
+
+void s5pv210_register_clocks(void);
+void s5pv210_setup_clocks(void);
+
+void s5pv210_restart(char mode, const char *cmd);
+
+#ifdef CONFIG_CPU_S5PV210
+
+extern int s5pv210_init(void);
+extern void s5pv210_map_io(void);
+extern void s5pv210_init_clocks(int xtal);
+extern void s5pv210_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+#else
+#define s5pv210_init_clocks NULL
+#define s5pv210_init_uarts NULL
+#define s5pv210_map_io NULL
+#define s5pv210_init NULL
+#endif
+
+#endif /* __ARCH_ARM_MACH_S5PV210_COMMON_H */
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
deleted file mode 100644
index eaf9a7bff7a..00000000000
--- a/arch/arm/mach-s5pv210/dev-spi.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/dev-spi.c
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-
-#include <mach/dma.h>
-#include <mach/map.h>
-#include <mach/irqs.h>
-#include <mach/spi-clocks.h>
-
-#include <plat/s3c64xx-spi.h>
-#include <plat/gpio-cfg.h>
-
-static char *spi_src_clks[] = {
- [S5PV210_SPI_SRCCLK_PCLK] = "pclk",
- [S5PV210_SPI_SRCCLK_SCLK] = "sclk_spi",
-};
-
-/* SPI Controller platform_devices */
-
-/* Since we emulate multi-cs capability, we do not touch the CS.
- * The emulated CS is toggled by board specific mechanism, as it can
- * be either some immediate GPIO or some signal out of some other
- * chip in between ... or some yet another way.
- * We simply do not assume anything about CS.
- */
-static int s5pv210_spi_cfg_gpio(struct platform_device *pdev)
-{
- unsigned int base;
-
- switch (pdev->id) {
- case 0:
- base = S5PV210_GPB(0);
- break;
-
- case 1:
- base = S5PV210_GPB(4);
- break;
-
- default:
- dev_err(&pdev->dev, "Invalid SPI Controller number!");
- return -EINVAL;
- }
-
- s3c_gpio_cfgall_range(base, 3,
- S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
-
- return 0;
-}
-
-static struct resource s5pv210_spi0_resource[] = {
- [0] = {
- .start = S5PV210_PA_SPI0,
- .end = S5PV210_PA_SPI0 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI0_TX,
- .end = DMACH_SPI0_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI0_RX,
- .end = DMACH_SPI0_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI0,
- .end = IRQ_SPI0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
- .cfg_gpio = s5pv210_spi_cfg_gpio,
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .tx_st_done = 25,
-};
-
-static u64 spi_dmamask = DMA_BIT_MASK(32);
-
-struct platform_device s5pv210_device_spi0 = {
- .name = "s3c64xx-spi",
- .id = 0,
- .num_resources = ARRAY_SIZE(s5pv210_spi0_resource),
- .resource = s5pv210_spi0_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pv210_spi0_pdata,
- },
-};
-
-static struct resource s5pv210_spi1_resource[] = {
- [0] = {
- .start = S5PV210_PA_SPI1,
- .end = S5PV210_PA_SPI1 + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = DMACH_SPI1_TX,
- .end = DMACH_SPI1_TX,
- .flags = IORESOURCE_DMA,
- },
- [2] = {
- .start = DMACH_SPI1_RX,
- .end = DMACH_SPI1_RX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = IRQ_SPI1,
- .end = IRQ_SPI1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
- .cfg_gpio = s5pv210_spi_cfg_gpio,
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .tx_st_done = 25,
-};
-
-struct platform_device s5pv210_device_spi1 = {
- .name = "s3c64xx-spi",
- .id = 1,
- .num_resources = ARRAY_SIZE(s5pv210_spi1_resource),
- .resource = s5pv210_spi1_resource,
- .dev = {
- .dma_mask = &spi_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &s5pv210_spi1_pdata,
- },
-};
-
-void __init s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
-{
- struct s3c64xx_spi_info *pd;
-
- /* Reject invalid configuration */
- if (!num_cs || src_clk_nr < 0
- || src_clk_nr > S5PV210_SPI_SRCCLK_SCLK) {
- printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
- return;
- }
-
- switch (cntrlr) {
- case 0:
- pd = &s5pv210_spi0_pdata;
- break;
- case 1:
- pd = &s5pv210_spi1_pdata;
- break;
- default:
- printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
- __func__, cntrlr);
- return;
- }
-
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- pd->src_clk_name = spi_src_clks[src_clk_nr];
-}
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 86b749c18b7..a6113e0267f 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -35,90 +35,40 @@
static u64 dma_dmamask = DMA_BIT_MASK(32);
-struct dma_pl330_peri pdma0_peri[28] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_AC97_MICIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMIN,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_AC97_PCMOUT,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_PWM,
- }, {
- .peri_id = (u8)DMACH_SPDIF,
- .rqtype = MEMTODEV,
- },
+u8 pdma0_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_MAX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
+ DMACH_MAX,
+ DMACH_PWM,
+ DMACH_SPDIF,
};
struct dma_pl330_platdata s5pv210_pdma0_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma0_peri),
- .peri = pdma0_peri,
+ .peri_id = pdma0_peri,
};
struct amba_device s5pv210_device_pdma0 = {
@@ -137,102 +87,44 @@ struct amba_device s5pv210_device_pdma0 = {
.periphid = 0x00041330,
};
-struct dma_pl330_peri pdma1_peri[32] = {
- {
- .peri_id = (u8)DMACH_UART0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_UART3_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_UART3_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_I2S0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S0S_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_I2S2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_I2S2_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_SPI1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_SPI1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_MAX,
- }, {
- .peri_id = (u8)DMACH_PCM0_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM0_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_PCM1_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM1_TX,
- .rqtype = MEMTODEV,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ0,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ1,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ2,
- }, {
- .peri_id = (u8)DMACH_MSM_REQ3,
- }, {
- .peri_id = (u8)DMACH_PCM2_RX,
- .rqtype = DEVTOMEM,
- }, {
- .peri_id = (u8)DMACH_PCM2_TX,
- .rqtype = MEMTODEV,
- },
+u8 pdma1_peri[] = {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_MAX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_MAX,
+ DMACH_MAX,
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM1_TX,
+ DMACH_MSM_REQ0,
+ DMACH_MSM_REQ1,
+ DMACH_MSM_REQ2,
+ DMACH_MSM_REQ3,
+ DMACH_PCM2_RX,
+ DMACH_PCM2_TX,
};
struct dma_pl330_platdata s5pv210_pdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(pdma1_peri),
- .peri = pdma1_peri,
+ .peri_id = pdma1_peri,
};
struct amba_device s5pv210_device_pdma1 = {
@@ -253,7 +145,12 @@ struct amba_device s5pv210_device_pdma1 = {
static int __init s5pv210_dma_init(void)
{
+ dma_cap_set(DMA_SLAVE, s5pv210_pdma0_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5pv210_pdma0_pdata.cap_mask);
amba_device_register(&s5pv210_device_pdma0, &iomem_resource);
+
+ dma_cap_set(DMA_SLAVE, s5pv210_pdma1_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, s5pv210_pdma1_pdata.cap_mask);
amba_device_register(&s5pv210_device_pdma1, &iomem_resource);
return 0;
diff --git a/arch/arm/mach-s5pv210/include/mach/entry-macro.S b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
index 3aa41ac59f0..bebca1b5d0b 100644
--- a/arch/arm/mach-s5pv210/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5pv210/include/mach/entry-macro.S
@@ -10,45 +10,8 @@
* published by the Free Software Foundation.
*/
-#include <asm/hardware/vic.h>
-#include <mach/map.h>
-#include <plat/irqs.h>
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =VA_VIC0
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, # S5P_IRQ_OFFSET + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- @ otherwise try vic2
- addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- @ otherwise try vic3
- addeq \tmp, \base, #(VA_VIC3 - VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
- .endm
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 5e0de3a31f3..e777e010ed2 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -118,6 +118,8 @@
#define IRQ_MDNIE3 S5P_IRQ_VIC3(8)
#define IRQ_VIC_END S5P_IRQ_VIC3(31)
+#define IRQ_TIMER_BASE (11)
+
#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 7ff609f1568..89c34b8f73b 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -109,6 +109,8 @@
#define S3C_PA_RTC S5PV210_PA_RTC
#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
#define S3C_PA_WDT S5PV210_PA_WATCHDOG
+#define S3C_PA_SPI0 S5PV210_PA_SPI0
+#define S3C_PA_SPI1 S5PV210_PA_SPI1
#define S5P_PA_CHIPID S5PV210_PA_CHIPID
#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
diff --git a/arch/arm/mach-s5pv210/include/mach/system.h b/arch/arm/mach-s5pv210/include/mach/system.h
index af8a200b213..bf288ced860 100644
--- a/arch/arm/mach-s5pv210/include/mach/system.h
+++ b/arch/arm/mach-s5pv210/include/mach/system.h
@@ -13,8 +13,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H __FILE__
-#include <plat/system-reset.h>
-
static void arch_idle(void)
{
/* nothing here yet */
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
deleted file mode 100644
index a6c659d68a5..00000000000
--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/mach-s5p6442/include/mach/vmalloc.h
- *
- * S5PV210 vmalloc definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H __FILE__
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv210/init.c b/arch/arm/mach-s5pv210/init.c
deleted file mode 100644
index 4865ae2c475..00000000000
--- a/arch/arm/mach-s5pv210/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/init.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/s5pv210.h>
-#include <plat/regs-serial.h>
-
-static struct s3c24xx_uart_clksrc s5pv210_serial_clocks[] = {
- [0] = {
- .name = "pclk",
- .divisor = 1,
- .min_baud = 0,
- .max_baud = 0,
- },
-};
-
-/* uart registration process */
-void __init s5pv210_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
-{
- struct s3c2410_uartcfg *tcfg = cfg;
- u32 ucnt;
-
- for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
- if (!tcfg->clocks) {
- tcfg->clocks = s5pv210_serial_clocks;
- tcfg->clocks_size = ARRAY_SIZE(s5pv210_serial_clocks);
- }
- }
-
- s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
-}
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 5811a96125f..5e734d025a6 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -22,6 +22,7 @@
#include <linux/input.h>
#include <linux/gpio.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
@@ -32,7 +33,6 @@
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fb.h>
@@ -41,6 +41,8 @@
#include <plat/s5p-time.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define AQUILA_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -595,8 +597,7 @@ static struct s3c_sdhci_platdata aquila_hsmmc2_data __initdata = {
static void aquila_setup_sdhci(void)
{
- gpio_request(AQUILA_EXT_FLASH_EN, "FLASH_EN");
- gpio_direction_output(AQUILA_EXT_FLASH_EN, 1);
+ gpio_request_one(AQUILA_EXT_FLASH_EN, GPIOF_OUT_INIT_HIGH, "FLASH_EN");
s3c_sdhci0_set_platdata(&aquila_hsmmc0_data);
s3c_sdhci1_set_platdata(&aquila_hsmmc1_data);
@@ -644,7 +645,7 @@ static void __init aquila_sound_init(void)
static void __init aquila_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pv210_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(aquila_uartcfgs, ARRAY_SIZE(aquila_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -680,7 +681,9 @@ MACHINE_START(AQUILA, "Aquila")
Kyungmin Park <kyungmin.park@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5pv210_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = aquila_map_io,
.init_machine = aquila_machine_init,
.timer = &s5p_timer,
+ .restart = s5pv210_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 15edcae448b..ff915261043 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -27,6 +27,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
@@ -37,7 +38,6 @@
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/fb.h>
@@ -54,6 +54,8 @@
#include <media/s5p_fimc.h>
#include <media/noon010pc30.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define GONI_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -227,8 +229,7 @@ static void __init goni_radio_init(void)
i2c1_devs[0].irq = gpio_to_irq(gpio);
gpio = S5PV210_GPJ2(5); /* XMSMDATA_5 */
- gpio_request(gpio, "FM_RST");
- gpio_direction_output(gpio, 1);
+ gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "FM_RST");
}
/* TSP */
@@ -264,8 +265,7 @@ static void __init goni_tsp_init(void)
int gpio;
gpio = S5PV210_GPJ1(3); /* XMSMADDR_11 */
- gpio_request(gpio, "TSP_LDO_ON");
- gpio_direction_output(gpio, 1);
+ gpio_request_one(gpio, GPIOF_OUT_INIT_HIGH, "TSP_LDO_ON");
gpio_export(gpio, 0);
gpio = S5PV210_GPJ0(5); /* XMSMADDR_5 */
@@ -890,7 +890,7 @@ static void __init goni_sound_init(void)
static void __init goni_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pv210_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(goni_uartcfgs, ARRAY_SIZE(goni_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -956,8 +956,10 @@ MACHINE_START(GONI, "GONI")
/* Maintainers: Kyungmin Park <kyungmin.park@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5pv210_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = goni_map_io,
.init_machine = goni_machine_init,
.timer = &s5p_timer,
.reserve = &goni_reserve,
+ .restart = s5pv210_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index f7266bb0cac..b323983b2c5 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -13,8 +13,9 @@
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/i2c.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
@@ -24,7 +25,6 @@
#include <mach/regs-clock.h>
#include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/ata.h>
@@ -32,6 +32,8 @@
#include <plat/pm.h>
#include <plat/s5p-time.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC110_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -109,7 +111,7 @@ static struct i2c_board_info smdkc110_i2c_devs2[] __initdata = {
static void __init smdkc110_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pv210_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -138,7 +140,9 @@ MACHINE_START(SMDKC110, "SMDKC110")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5pv210_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdkc110_map_io,
.init_machine = smdkc110_machine_init,
.timer = &s5p_timer,
+ .restart = s5pv210_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a9106c39239..dff9ea7b5bb 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -13,13 +13,14 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/serial_core.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/dm9000.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/pwm_backlight.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
@@ -33,7 +34,6 @@
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
#include <plat/gpio-cfg.h>
-#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/adc.h>
@@ -47,6 +47,8 @@
#include <plat/backlight.h>
#include <plat/regs-fb-v4.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -153,15 +155,12 @@ static void smdkv210_lte480wv_set_power(struct plat_lcd_data *pd,
{
if (power) {
#if !defined(CONFIG_BACKLIGHT_PWM)
- gpio_request(S5PV210_GPD0(3), "GPD0");
- gpio_direction_output(S5PV210_GPD0(3), 1);
+ gpio_request_one(S5PV210_GPD0(3), GPIOF_OUT_INIT_HIGH, "GPD0");
gpio_free(S5PV210_GPD0(3));
#endif
/* fire nRESET on power up */
- gpio_request(S5PV210_GPH0(6), "GPH0");
-
- gpio_direction_output(S5PV210_GPH0(6), 1);
+ gpio_request_one(S5PV210_GPH0(6), GPIOF_OUT_INIT_HIGH, "GPH0");
gpio_set_value(S5PV210_GPH0(6), 0);
mdelay(10);
@@ -172,8 +171,7 @@ static void smdkv210_lte480wv_set_power(struct plat_lcd_data *pd,
gpio_free(S5PV210_GPH0(6));
} else {
#if !defined(CONFIG_BACKLIGHT_PWM)
- gpio_request(S5PV210_GPD0(3), "GPD0");
- gpio_direction_output(S5PV210_GPD0(3), 0);
+ gpio_request_one(S5PV210_GPD0(3), GPIOF_OUT_INIT_LOW, "GPD0");
gpio_free(S5PV210_GPD0(3));
#endif
}
@@ -273,11 +271,12 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
+ .pwm_period_ns = 1000,
};
static void __init smdkv210_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pv210_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
@@ -315,7 +314,9 @@ MACHINE_START(SMDKV210, "SMDKV210")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5pv210_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = smdkv210_map_io,
.init_machine = smdkv210_machine_init,
.timer = &s5p_timer,
+ .restart = s5pv210_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-torbreck.c b/arch/arm/mach-s5pv210/mach-torbreck.c
index 97cc066c536..74e99bc0dc9 100644
--- a/arch/arm/mach-s5pv210/mach-torbreck.c
+++ b/arch/arm/mach-s5pv210/mach-torbreck.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/serial_core.h>
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
@@ -23,12 +24,13 @@
#include <mach/regs-clock.h>
#include <plat/regs-serial.h>
-#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/iic.h>
#include <plat/s5p-time.h>
+#include "common.h"
+
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define TORBRECK_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -102,7 +104,7 @@ static struct i2c_board_info torbreck_i2c_devs2[] __initdata = {
static void __init torbreck_map_io(void)
{
- s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s5pv210_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(torbreck_uartcfgs, ARRAY_SIZE(torbreck_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
@@ -127,7 +129,9 @@ MACHINE_START(TORBRECK, "TORBRECK")
/* Maintainer: Hyunchul Ko <ghcstop@gmail.com> */
.atag_offset = 0x100,
.init_irq = s5pv210_init_irq,
+ .handle_irq = vic_handle_irq,
.map_io = torbreck_map_io,
.init_machine = torbreck_machine_init,
.timer = &s5p_timer,
+ .restart = s5pv210_restart,
MACHINE_END
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index f149d278377..677c71c41e5 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -133,7 +133,7 @@ static void s5pv210_pm_prepare(void)
s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
}
-static int s5pv210_pm_add(struct sys_device *sysdev)
+static int s5pv210_pm_add(struct device *dev)
{
pm_cpu_prep = s5pv210_pm_prepare;
pm_cpu_sleep = s5pv210_cpu_suspend;
@@ -141,13 +141,15 @@ static int s5pv210_pm_add(struct sys_device *sysdev)
return 0;
}
-static struct sysdev_driver s5pv210_pm_driver = {
- .add = s5pv210_pm_add,
+static struct subsys_interface s5pv210_pm_interface = {
+ .name = "s5pv210_pm",
+ .subsys = &s5pv210_subsys,
+ .add_dev = s5pv210_pm_add,
};
static __init int s5pv210_pm_drvinit(void)
{
- return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver);
+ return subsys_interface_register(&s5pv210_pm_interface);
}
arch_initcall(s5pv210_pm_drvinit);
diff --git a/arch/arm/mach-s5pv210/setup-sdhci.c b/arch/arm/mach-s5pv210/setup-sdhci.c
deleted file mode 100644
index 6b8ccc4d35f..00000000000
--- a/arch/arm/mach-s5pv210/setup-sdhci.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/setup-sdhci.c
- *
- * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S5PV210 - Helper functions for settign up SDHCI device(s) (HSMMC)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-
-/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
-
-char *s5pv210_hsmmc_clksrcs[4] = {
- [0] = "hsmmc", /* HCLK */
- /* [1] = "hsmmc", - duplicate HCLK entry */
- [2] = "sclk_mmc", /* mmc_bus */
- /* [3] = NULL, - reserved */
-};
diff --git a/arch/arm/mach-s5pv210/setup-spi.c b/arch/arm/mach-s5pv210/setup-spi.c
new file mode 100644
index 00000000000..f43c5048a37
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-spi.c
@@ -0,0 +1,51 @@
+/* linux/arch/arm/mach-s5pv210/setup-spi.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPB(0), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPB(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV210_GPB(2), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+ .tx_st_done = 25,
+};
+
+int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPB(4), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPB(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgall_range(S5PV210_GPB(6), 2,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+ return 0;
+}
+#endif
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index 5a616f6e561..f7951aa0456 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+ zreladdr-y += 0xc0208000
else
zreladdr-y += 0xc0008000
endif
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 3dd133f1841..6b93e200bca 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -455,4 +455,5 @@ MACHINE_START(ASSABET, "Intel-Assabet")
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index bda83e1ab07..b07a2c024cb 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -309,4 +309,5 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 7f3da4b11ec..11bb6d0b9be 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -139,4 +139,5 @@ MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
.init_irq = cerf_init_irq,
.timer = &sa1100_timer,
.init_machine = cerf_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index dab3c6347a8..d6df9f6c9f7 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -11,17 +11,39 @@
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
#include <mach/hardware.h>
-/*
- * Very simple clock implementation - we only have one clock to deal with.
- */
+struct clkops {
+ void (*enable)(struct clk *);
+ void (*disable)(struct clk *);
+ unsigned long (*getrate)(struct clk *);
+};
+
struct clk {
+ const struct clkops *ops;
+ unsigned long rate;
unsigned int enabled;
};
-static void clk_gpio27_enable(void)
+#define INIT_CLKREG(_clk, _devname, _conname) \
+ { \
+ .clk = _clk, \
+ .dev_id = _devname, \
+ .con_id = _conname, \
+ }
+
+#define DEFINE_CLK(_name, _ops, _rate) \
+struct clk clk_##_name = { \
+ .ops = _ops, \
+ .rate = _rate, \
+ }
+
+static DEFINE_SPINLOCK(clocks_lock);
+
+static void clk_gpio27_enable(struct clk *clk)
{
/*
* First, set up the 3.6864MHz clock on GPIO 27 for the SA-1111:
@@ -32,38 +54,22 @@ static void clk_gpio27_enable(void)
TUCR = TUCR_3_6864MHz;
}
-static void clk_gpio27_disable(void)
+static void clk_gpio27_disable(struct clk *clk)
{
TUCR = 0;
GPDR &= ~GPIO_32_768kHz;
GAFR &= ~GPIO_32_768kHz;
}
-static struct clk clk_gpio27;
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- const char *devname = dev_name(dev);
-
- return strcmp(devname, "sa1111.0") ? ERR_PTR(-ENOENT) : &clk_gpio27;
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
if (clk->enabled++ == 0)
- clk_gpio27_enable();
+ clk->ops->enable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(clk_enable);
@@ -76,13 +82,48 @@ void clk_disable(struct clk *clk)
spin_lock_irqsave(&clocks_lock, flags);
if (--clk->enabled == 0)
- clk_gpio27_disable();
+ clk->ops->disable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
- return 3686400;
+ unsigned long rate;
+
+ rate = clk->rate;
+ if (clk->ops->getrate)
+ rate = clk->ops->getrate(clk);
+
+ return rate;
}
EXPORT_SYMBOL(clk_get_rate);
+
+const struct clkops clk_gpio27_ops = {
+ .enable = clk_gpio27_enable,
+ .disable = clk_gpio27_disable,
+};
+
+static void clk_dummy_enable(struct clk *clk) { }
+static void clk_dummy_disable(struct clk *clk) { }
+
+const struct clkops clk_dummy_ops = {
+ .enable = clk_dummy_enable,
+ .disable = clk_dummy_disable,
+};
+
+static DEFINE_CLK(gpio27, &clk_gpio27_ops, 3686400);
+static DEFINE_CLK(dummy, &clk_dummy_ops, 0);
+
+static struct clk_lookup sa11xx_clkregs[] = {
+ INIT_CLKREG(&clk_gpio27, "sa1111.0", NULL),
+ INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
+};
+
+static int __init sa11xx_clk_init(void)
+{
+ clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
+ return 0;
+}
+
+postcore_initcall(sa11xx_clk_init);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 2965cc9d424..b9060e236de 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -387,4 +387,5 @@ MACHINE_START(COLLIE, "Sharp-Collie")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = collie_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 5fa5ae1f39e..480d2ea46b0 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -126,6 +126,17 @@ static void sa1100_power_off(void)
PMCR = PMCR_SF;
}
+void sa11x0_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* Jump into ROM at address 0 */
+ soft_restart(0);
+ } else {
+ /* Use on-chip reset capability */
+ RSRR = RSRR_SWR;
+ }
+}
+
static void sa11x0_register_device(struct platform_device *dev, void *data)
{
int err;
@@ -334,9 +345,29 @@ void sa11x0_register_irda(struct irda_platform_data *irda)
sa11x0_register_device(&sa11x0ir_device, irda);
}
+static struct resource sa11x0rtc_resources[] = {
+ [0] = {
+ .start = 0x90010000,
+ .end = 0x900100ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RTC1Hz,
+ .end = IRQ_RTC1Hz,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_RTCAlrm,
+ .end = IRQ_RTCAlrm,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct platform_device sa11x0rtc_device = {
.name = "sa1100-rtc",
.id = -1,
+ .resource = sa11x0rtc_resources,
+ .num_resources = ARRAY_SIZE(sa11x0rtc_resources),
};
static struct platform_device *sa11x0_devices[] __initdata = {
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index b7a9a601c2d..33268cf6be3 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -10,6 +10,7 @@ extern struct sys_timer sa1100_timer;
extern void __init sa1100_map_io(void);
extern void __init sa1100_init_irq(void);
extern void __init sa1100_init_gpio(void);
+extern void sa11x0_restart(char, const char *);
#define SET_BANK(__nr,__start,__size) \
mi->bank[__nr].start = (__start), \
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b30733a2b82..1e6b3c105ba 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -89,5 +89,6 @@ MACHINE_START(H3100, "Compaq iPAQ H3100")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3100_mach_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index 6fd324d9238..6b58e7460ec 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -130,5 +130,6 @@ MACHINE_START(H3600, "Compaq iPAQ H3600")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3600_mach_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 30f4a551b8e..c01bb36db94 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -200,4 +200,5 @@ MACHINE_START(HACKKIT, "HackKit Cpu Board")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = hackkit_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/include/mach/system.h b/arch/arm/mach-sa1100/include/mach/system.h
index ba9da9f7f18..e17b208f76d 100644
--- a/arch/arm/mach-sa1100/include/mach/system.h
+++ b/arch/arm/mach-sa1100/include/mach/system.h
@@ -3,20 +3,7 @@
*
* Copyright (c) 1999 Nicolas Pitre <nico@fluxnic.net>
*/
-#include <mach/hardware.h>
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (mode == 's') {
- /* Jump into ROM at address 0 */
- cpu_reset(0);
- } else {
- /* Use on-chip reset capability */
- RSRR = RSRR_SWR;
- }
-}
diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h
deleted file mode 100644
index b3d00239848..00000000000
--- a/arch/arm/mach-sa1100/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/vmalloc.h
- */
-#define VMALLOC_END (0xe8000000UL)
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index 77198fe02bc..ee121d6f048 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -373,4 +373,5 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index 5bc59d0947b..af4e2761f3d 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -66,4 +66,5 @@ MACHINE_START(LART, "LART")
.init_irq = sa1100_init_irq,
.init_machine = lart_init,
.timer = &sa1100_timer,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 032f3881d14..85f6ee67222 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -19,6 +19,7 @@
#include <asm/mach-types.h>
#include <asm/setup.h>
+#include <asm/page.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
@@ -116,4 +117,5 @@ MACHINE_START(NANOENGINE, "BSE nanoEngine")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = nanoengine_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 65161f2bea2..9307df05353 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -150,4 +150,5 @@ MACHINE_START(PLEB, "PLEB")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = pleb_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index 1cccbf5b9e9..318b2b766a0 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -87,4 +87,5 @@ MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = shannon_init,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 4790f3f3d00..e17c04d6e32 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -396,4 +396,5 @@ MACHINE_START(SIMPAD, "Simpad")
.map_io = simpad_map_io,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
+ .restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index fa6602491d5..69e33535dee 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/sched.h> /* just for sched_clock() - funny that */
#include <linux/timex.h>
#include <linux/clockchips.h>
@@ -20,29 +19,9 @@
#include <asm/sched_clock.h>
#include <mach/hardware.h>
-/*
- * This is the SA11x0 sched_clock implementation.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 3.6864MHz,
- * NSEC_PER_SEC, 60).
- * This gives a resolution of about 271ns and a wrap period of about 19min.
- */
-#define SC_MULT 2275555556u
-#define SC_SHIFT 23
-
-unsigned long long notrace sched_clock(void)
-{
- u32 cyc = OSCR;
- return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace sa1100_update_sched_clock(void)
+static u32 notrace sa1100_read_sched_clock(void)
{
- u32 cyc = OSCR;
- update_sched_clock(&cd, cyc, (u32)~0);
+ return OSCR;
}
#define MIN_OSCR_DELTA 2
@@ -109,8 +88,7 @@ static void __init sa1100_timer_init(void)
OIER = 0;
OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
- init_fixed_sched_clock(&cd, sa1100_update_sched_clock, 32,
- 3686400, SC_MULT, SC_SHIFT);
+ setup_sched_clock(sa1100_read_sched_clock, 32, 3686400);
clockevents_calc_mult_shift(&ckevt_sa1100_osmr0, 3686400, 4);
ckevt_sa1100_osmr0.max_delta_ns =
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index feda3ca7fc9..a851c254ad6 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -26,10 +26,9 @@
#define ROMCARD_SIZE 0x08000000
#define ROMCARD_START 0x10000000
-void arch_reset(char mode, const char *cmd)
+static void shark_restart(char mode, const char *cmd)
{
short temp;
- local_irq_disable();
/* Reset the Machine via pc[3] of the sequoia chipset */
outw(0x09,0x24);
temp=inw(0x26);
@@ -157,4 +156,5 @@ MACHINE_START(SHARK, "Shark")
.init_irq = shark_init_irq,
.timer = &shark_timer,
.dma_zone_size = SZ_4M,
+ .restart = shark_restart,
MACHINE_END
diff --git a/arch/arm/mach-shark/include/mach/system.h b/arch/arm/mach-shark/include/mach/system.h
index 21c373b30bb..1b2f2c5050a 100644
--- a/arch/arm/mach-shark/include/mach/system.h
+++ b/arch/arm/mach-shark/include/mach/system.h
@@ -6,9 +6,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-/* Found in arch/mach-shark/core.c */
-extern void arch_reset(char mode, const char *cmd);
-
static inline void arch_idle(void)
{
}
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
deleted file mode 100644
index b10df988526..00000000000
--- a/arch/arm/mach-shark/include/mach/vmalloc.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * arch/arm/mach-shark/include/mach/vmalloc.h
- */
-#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 737bdc631b0..5ca1f9d6699 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -28,7 +28,6 @@ pfc-$(CONFIG_ARCH_SH73A0) += pfc-sh73a0.o
obj-$(CONFIG_ARCH_SH7367) += entry-intc.o
obj-$(CONFIG_ARCH_SH7377) += entry-intc.o
obj-$(CONFIG_ARCH_SH7372) += entry-intc.o
-obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o
# PM objects
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index b862e9f81e3..a4e6ca04e31 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -466,8 +466,6 @@ static struct map_desc ag5evm_io_desc[] __initdata = {
static void __init ag5evm_map_io(void)
{
iotable_init(ag5evm_io_desc, ARRAY_SIZE(ag5evm_io_desc));
- /* DMA memory at 0xf6000000 - 0xffdfffff */
- init_consistent_dma_size(158 << 20);
/* setup early devices and console here as well */
sh73a0_add_early_devices();
@@ -607,8 +605,9 @@ struct sys_timer ag5evm_timer = {
MACHINE_START(AG5EVM, "ag5evm")
.map_io = ag5evm_map_io,
+ .nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
- .handle_irq = shmobile_handle_irq_gic,
+ .handle_irq = gic_handle_irq,
.init_machine = ag5evm_init,
.timer = &ag5evm_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 4c865ece9ac..6a6f9f7568c 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1172,8 +1172,6 @@ static struct map_desc ap4evb_io_desc[] __initdata = {
static void __init ap4evb_map_io(void)
{
iotable_init(ap4evb_io_desc, ARRAY_SIZE(ap4evb_io_desc));
- /* DMA memory at 0xf6000000 - 0xffdfffff */
- init_consistent_dma_size(158 << 20);
/* setup early devices and console here as well */
sh7372_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 8b620bf0622..72d557281b1 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -261,8 +261,6 @@ static struct map_desc g3evm_io_desc[] __initdata = {
static void __init g3evm_map_io(void)
{
iotable_init(g3evm_io_desc, ARRAY_SIZE(g3evm_io_desc));
- /* DMA memory at 0xf6000000 - 0xffdfffff */
- init_consistent_dma_size(158 << 20);
/* setup early devices and console here as well */
sh7367_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 7719ddc5f59..2220b885cff 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -275,8 +275,6 @@ static struct map_desc g4evm_io_desc[] __initdata = {
static void __init g4evm_map_io(void)
{
iotable_init(g4evm_io_desc, ARRAY_SIZE(g4evm_io_desc));
- /* DMA memory at 0xf6000000 - 0xffdfffff */
- init_consistent_dma_size(158 << 20);
/* setup early devices and console here as well */
sh7377_add_early_devices();
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index bd9a78424d6..857ceeec1bb 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -33,6 +33,7 @@
#include <linux/input/sh_keysc.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(33), /* PINTA2 @ PORT144 */
+ .start = SH73A0_PINT0_IRQ(2), /* PINTA2 */
.flags = IORESOURCE_IRQ,
},
};
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
#define GPIO_LED(n, g) { .name = n, .gpio = g }
static struct gpio_led gpio_leds[] = {
- GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
- GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
- GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
- GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
},
};
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+ .name = "V2513",
+ .pin_gpio_fn = GPIO_FN_TPU1TO2,
+ .pin_gpio = GPIO_PORT153,
+ .channel_offset = 0x90,
+ .timer_bit = 2,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+ [0] = {
+ .name = "TPU12",
+ .start = 0xe6610090,
+ .end = 0xe66100b5,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu12_device = {
+ .name = "leds-renesas-tpu",
+ .id = 12,
+ .dev = {
+ .platform_data = &led_renesas_tpu12_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu12_resources),
+ .resource = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+ .name = "V2514",
+ .pin_gpio_fn = GPIO_FN_TPU4TO1,
+ .pin_gpio = GPIO_PORT199,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+ [0] = {
+ .name = "TPU41",
+ .start = 0xe6640050,
+ .end = 0xe6640075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu41_device = {
+ .name = "leds-renesas-tpu",
+ .id = 41,
+ .dev = {
+ .platform_data = &led_renesas_tpu41_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu41_resources),
+ .resource = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+ .name = "V2515",
+ .pin_gpio_fn = GPIO_FN_TPU2TO1,
+ .pin_gpio = GPIO_PORT197,
+ .channel_offset = 0x50,
+ .timer_bit = 1,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+ [0] = {
+ .name = "TPU21",
+ .start = 0xe6620050,
+ .end = 0xe6620075,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu21_device = {
+ .name = "leds-renesas-tpu",
+ .id = 21,
+ .dev = {
+ .platform_data = &led_renesas_tpu21_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu21_resources),
+ .resource = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+ .name = "KEYLED",
+ .pin_gpio_fn = GPIO_FN_TPU3TO0,
+ .pin_gpio = GPIO_PORT163,
+ .channel_offset = 0x10,
+ .timer_bit = 0,
+ .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+ [0] = {
+ .name = "TPU30",
+ .start = 0xe6630010,
+ .end = 0xe6630035,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device leds_tpu30_device = {
+ .name = "leds-renesas-tpu",
+ .id = 30,
+ .dev = {
+ .platform_data = &led_renesas_tpu30_pdata,
+ },
+ .num_resources = ARRAY_SIZE(tpu30_resources),
+ .resource = tpu30_resources,
+};
+
/* MMCIF */
static struct resource mmcif_resources[] = {
[0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
&keysc_device,
&gpio_keys_device,
&gpio_leds_device,
+ &leds_tpu12_device,
+ &leds_tpu41_device,
+ &leds_tpu21_device,
+ &leds_tpu30_device,
&mmcif_device,
&sdhi0_device,
&sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
shmobile_setup_console();
}
-#define PINTER0A 0xe69000a0
-#define PINTCR0A 0xe69000b0
-
-void __init kota2_init_irq(void)
-{
- sh73a0_init_irq();
-
- /* setup PINT: enable PINTA2 as active low */
- __raw_writel(1 << 29, PINTER0A);
- __raw_writew(2 << 10, PINTCR0A);
-}
-
static void __init kota2_init(void)
{
sh73a0_pinmux_init();
@@ -447,8 +549,9 @@ struct sys_timer kota2_timer = {
MACHINE_START(KOTA2, "kota2")
.map_io = kota2_map_io,
- .init_irq = kota2_init_irq,
- .handle_irq = shmobile_handle_irq_gic,
+ .nr_irqs = NR_IRQS_LEGACY,
+ .init_irq = sh73a0_init_irq,
+ .handle_irq = gic_handle_irq,
.init_machine = kota2_init,
.timer = &kota2_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 9c5e598e0e3..ed525668739 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1390,8 +1390,6 @@ static struct map_desc mackerel_io_desc[] __initdata = {
static void __init mackerel_map_io(void)
{
iotable_init(mackerel_io_desc, ARRAY_SIZE(mackerel_io_desc));
- /* DMA memory at 0xf6000000 - 0xffdfffff */
- init_consistent_dma_size(158 << 20);
/* setup early devices and console here as well */
sh7372_add_early_devices();
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 61a846bb30f..1370a89ca35 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -113,6 +113,12 @@ static struct clk main_clk = {
.ops = &main_clk_ops,
};
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+ .ops = &div2_clk_ops,
+ .parent = &main_clk,
+};
+
/* PLL0, PLL1, PLL2, PLL3 */
static unsigned long pll_recalc(struct clk *clk)
{
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
&extal1_div2_clk,
&extal2_div2_clk,
&main_clk,
+ &main_div2_clk,
&pll0_clk,
&pll1_clk,
&pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
- [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+ [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
MSTP314, MSTP313, MSTP312, MSTP311,
+ MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
MSTP_NR };
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+ [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+ [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+ [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+ [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+ CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
diff --git a/arch/arm/mach-shmobile/entry-gic.S b/arch/arm/mach-shmobile/entry-gic.S
deleted file mode 100644
index e20239b08c8..00000000000
--- a/arch/arm/mach-shmobile/entry-gic.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * ARM Interrupt demux handler using GIC
- *
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2011 Paul Mundt
- * Copyright (C) 2010 - 2011 Renesas Solutions Corp.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <asm/assembler.h>
-#include <asm/entry-macro-multi.S>
-#include <asm/hardware/gic.h>
-#include <asm/hardware/entry-macro-gic.S>
-
- arch_irq_handler shmobile_handle_irq_gic
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 834bd6cd508..be78a2c73db 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -7,7 +7,6 @@ extern void shmobile_secondary_vector(void);
struct clk;
extern int clk_init(void);
extern void shmobile_handle_irq_intc(struct pt_regs *);
-extern void shmobile_handle_irq_gic(struct pt_regs *);
extern struct platform_suspend_ops shmobile_suspend_ops;
struct cpuidle_driver;
extern void (*shmobile_cpuidle_modes[])(void);
@@ -35,8 +34,8 @@ extern void sh7372_add_standard_devices(void);
extern void sh7372_clock_init(void);
extern void sh7372_pinmux_init(void);
extern void sh7372_pm_init(void);
-extern void sh7372_resume_core_standby_a3sm(void);
-extern int sh7372_do_idle_a3sm(unsigned long unused);
+extern void sh7372_resume_core_standby_sysc(void);
+extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
extern struct clk sh7372_extal1_clk;
extern struct clk sh7372_extal2_clk;
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
index 8d4a416d428..2a57b2964ee 100644
--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
@@ -18,14 +18,5 @@
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- .endm
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 7bf0890e16b..de795b42232 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -12,8 +12,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-
-#define ARCH_NR_GPIOS 1024
#include <linux/sh_pfc.h>
#ifdef CONFIG_GPIOLIB
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 84532f9629b..8254ab86f6c 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -480,11 +480,10 @@ struct platform_device;
struct sh7372_pm_domain {
struct generic_pm_domain genpd;
struct dev_power_governor *gov;
- void (*suspend)(void);
+ int (*suspend)(void);
void (*resume)(void);
unsigned int bit_shift;
bool no_debug;
- bool stay_on;
};
static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
@@ -499,6 +498,7 @@ extern struct sh7372_pm_domain sh7372_d4;
extern struct sh7372_pm_domain sh7372_a4r;
extern struct sh7372_pm_domain sh7372_a3rv;
extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a4s;
extern struct sh7372_pm_domain sh7372_a3sp;
extern struct sh7372_pm_domain sh7372_a3sg;
@@ -515,5 +515,7 @@ extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
extern void sh7372_intcs_suspend(void);
extern void sh7372_intcs_resume(void);
+extern void sh7372_intca_suspend(void);
+extern void sh7372_intca_resume(void);
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/system.h b/arch/arm/mach-shmobile/include/mach/system.h
index 76a687eeaa2..956ac18ddbf 100644
--- a/arch/arm/mach-shmobile/include/mach/system.h
+++ b/arch/arm/mach-shmobile/include/mach/system.h
@@ -8,7 +8,7 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
- cpu_reset(0);
+ soft_restart(0);
}
#endif
diff --git a/arch/arm/mach-shmobile/include/mach/vmalloc.h b/arch/arm/mach-shmobile/include/mach/vmalloc.h
deleted file mode 100644
index 2b8fd8b942f..00000000000
--- a/arch/arm/mach-shmobile/include/mach/vmalloc.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_MACH_VMALLOC_H
-#define __ASM_MACH_VMALLOC_H
-
-/* Vmalloc at ... - 0xe5ffffff */
-#define VMALLOC_END 0xe6000000UL
-
-#endif /* __ASM_MACH_VMALLOC_H */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 2d8856df80e..89afcaba99a 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -535,6 +535,7 @@ static struct resource intcs_resources[] __initdata = {
static struct intc_desc intcs_desc __initdata = {
.name = "sh7372-intcs",
.force_enable = ENABLED_INTCS,
+ .skip_syscore_suspend = true,
.resource = intcs_resources,
.num_resources = ARRAY_SIZE(intcs_resources),
.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
@@ -611,3 +612,52 @@ void sh7372_intcs_resume(void)
for (k = 0x80; k <= 0x9c; k += 4)
__raw_writeb(ffd5[k], intcs_ffd5 + k);
}
+
+static unsigned short e694[0x200];
+static unsigned short e695[0x200];
+
+void sh7372_intca_suspend(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x38; k += 4)
+ e694[k] = __raw_readw(0xe6940000 + k);
+
+ for (k = 0x80; k <= 0xb4; k += 4)
+ e694[k] = __raw_readb(0xe6940000 + k);
+
+ for (k = 0x180; k <= 0x1b4; k += 4)
+ e694[k] = __raw_readb(0xe6940000 + k);
+
+ for (k = 0x00; k <= 0x50; k += 4)
+ e695[k] = __raw_readw(0xe6950000 + k);
+
+ for (k = 0x80; k <= 0xa8; k += 4)
+ e695[k] = __raw_readb(0xe6950000 + k);
+
+ for (k = 0x180; k <= 0x1a8; k += 4)
+ e695[k] = __raw_readb(0xe6950000 + k);
+}
+
+void sh7372_intca_resume(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x38; k += 4)
+ __raw_writew(e694[k], 0xe6940000 + k);
+
+ for (k = 0x80; k <= 0xb4; k += 4)
+ __raw_writeb(e694[k], 0xe6940000 + k);
+
+ for (k = 0x180; k <= 0x1b4; k += 4)
+ __raw_writeb(e694[k], 0xe6940000 + k);
+
+ for (k = 0x00; k <= 0x50; k += 4)
+ __raw_writew(e695[k], 0xe6950000 + k);
+
+ for (k = 0x80; k <= 0xa8; k += 4)
+ __raw_writeb(e695[k], 0xe6950000 + k);
+
+ for (k = 0x180; k <= 0x1a8; k += 4)
+ __raw_writeb(e695[k], 0xe6950000 + k);
+}
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 34bbcbfb170..77b8fc12fc2 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -82,11 +82,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
unsigned int mask = 1 << sh7372_pd->bit_shift;
- if (sh7372_pd->suspend)
- sh7372_pd->suspend();
+ if (sh7372_pd->suspend) {
+ int ret = sh7372_pd->suspend();
- if (sh7372_pd->stay_on)
- return 0;
+ if (ret)
+ return ret;
+ }
if (__raw_readl(PSTR) & mask) {
unsigned int retry_count;
@@ -101,8 +102,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
}
if (!sh7372_pd->no_debug)
- pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
+ genpd->name, mask, __raw_readl(PSTR));
return 0;
}
@@ -113,9 +114,6 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
unsigned int retry_count;
int ret = 0;
- if (sh7372_pd->stay_on)
- goto out;
-
if (__raw_readl(PSTR) & mask)
goto out;
@@ -133,8 +131,8 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
ret = -EIO;
if (!sh7372_pd->no_debug)
- pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
+ sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
out:
if (ret == 0 && sh7372_pd->resume && do_resume)
@@ -148,35 +146,60 @@ static int pd_power_up(struct generic_pm_domain *genpd)
return __pd_power_up(to_sh7372_pd(genpd), true);
}
-static void sh7372_a4r_suspend(void)
+static int sh7372_a4r_suspend(void)
{
sh7372_intcs_suspend();
__raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
+ return 0;
}
static bool pd_active_wakeup(struct device *dev)
{
- return true;
+ bool (*active_wakeup)(struct device *dev);
+
+ active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
+ return active_wakeup ? active_wakeup(dev) : true;
}
-static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
+static int sh7372_stop_dev(struct device *dev)
{
- return false;
+ int (*stop)(struct device *dev);
+
+ stop = dev_gpd_data(dev)->ops.stop;
+ if (stop) {
+ int ret = stop(dev);
+ if (ret)
+ return ret;
+ }
+ return pm_clk_suspend(dev);
}
-struct dev_power_governor sh7372_always_on_gov = {
- .power_down_ok = sh7372_power_down_forbidden,
-};
+static int sh7372_start_dev(struct device *dev)
+{
+ int (*start)(struct device *dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ start = dev_gpd_data(dev)->ops.start;
+ if (start)
+ ret = start(dev);
+
+ return ret;
+}
void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
{
struct generic_pm_domain *genpd = &sh7372_pd->genpd;
+ struct dev_power_governor *gov = sh7372_pd->gov;
- pm_genpd_init(genpd, sh7372_pd->gov, false);
- genpd->stop_device = pm_clk_suspend;
- genpd->start_device = pm_clk_resume;
+ pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
+ genpd->dev_ops.stop = sh7372_stop_dev;
+ genpd->dev_ops.start = sh7372_start_dev;
+ genpd->dev_ops.active_wakeup = pd_active_wakeup;
genpd->dev_irq_safe = true;
- genpd->active_wakeup = pd_active_wakeup;
genpd->power_off = pd_power_down;
genpd->power_on = pd_power_up;
__pd_power_up(sh7372_pd, false);
@@ -199,48 +222,73 @@ void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
}
struct sh7372_pm_domain sh7372_a4lc = {
+ .genpd.name = "A4LC",
.bit_shift = 1,
};
struct sh7372_pm_domain sh7372_a4mp = {
+ .genpd.name = "A4MP",
.bit_shift = 2,
};
struct sh7372_pm_domain sh7372_d4 = {
+ .genpd.name = "D4",
.bit_shift = 3,
};
struct sh7372_pm_domain sh7372_a4r = {
+ .genpd.name = "A4R",
.bit_shift = 5,
- .gov = &sh7372_always_on_gov,
.suspend = sh7372_a4r_suspend,
.resume = sh7372_intcs_resume,
- .stay_on = true,
};
struct sh7372_pm_domain sh7372_a3rv = {
+ .genpd.name = "A3RV",
.bit_shift = 6,
};
struct sh7372_pm_domain sh7372_a3ri = {
+ .genpd.name = "A3RI",
.bit_shift = 8,
};
-struct sh7372_pm_domain sh7372_a3sp = {
- .bit_shift = 11,
- .gov = &sh7372_always_on_gov,
+static int sh7372_a4s_suspend(void)
+{
+ /*
+ * The A4S domain contains the CPU core and therefore it should
+ * only be turned off if the CPU is in use.
+ */
+ return -EBUSY;
+}
+
+struct sh7372_pm_domain sh7372_a4s = {
+ .genpd.name = "A4S",
+ .bit_shift = 10,
+ .gov = &pm_domain_always_on_gov,
.no_debug = true,
+ .suspend = sh7372_a4s_suspend,
};
-static void sh7372_a3sp_init(void)
+static int sh7372_a3sp_suspend(void)
{
- /* serial consoles make use of SCIF hardware located in A3SP,
+ /*
+ * Serial consoles make use of SCIF hardware located in A3SP,
* keep such power domain on if "no_console_suspend" is set.
*/
- sh7372_a3sp.stay_on = !console_suspend_enabled;
+ return console_suspend_enabled ? -EBUSY : 0;
}
+struct sh7372_pm_domain sh7372_a3sp = {
+ .genpd.name = "A3SP",
+ .bit_shift = 11,
+ .gov = &pm_domain_always_on_gov,
+ .no_debug = true,
+ .suspend = sh7372_a3sp_suspend,
+};
+
struct sh7372_pm_domain sh7372_a3sg = {
+ .genpd.name = "A3SG",
.bit_shift = 13,
};
@@ -257,11 +305,16 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
return 0;
}
-static void sh7372_enter_core_standby(void)
+static void sh7372_set_reset_vector(unsigned long address)
{
/* set reset vector, translate 4k */
- __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+ __raw_writel(address, SBAR);
__raw_writel(0, APARMBAREA);
+}
+
+static void sh7372_enter_core_standby(void)
+{
+ sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
/* enter sleep mode with SYSTBCR to 0x10 */
__raw_writel(0x10, SYSTBCR);
@@ -274,27 +327,22 @@ static void sh7372_enter_core_standby(void)
#endif
#ifdef CONFIG_SUSPEND
-static void sh7372_enter_a3sm_common(int pllc0_on)
+static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
{
- /* set reset vector, translate 4k */
- __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
- __raw_writel(0, APARMBAREA);
-
if (pllc0_on)
__raw_writel(0, PLLC01STPCR);
else
__raw_writel(1 << 28, PLLC01STPCR);
- __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
- cpu_suspend(0, sh7372_do_idle_a3sm);
+ cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
/* disable reset vector translation */
__raw_writel(0, SBAR);
}
-static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
+static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
{
unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
unsigned long msk, msk2;
@@ -382,7 +430,7 @@ static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
*irqcr2p = irqcr2;
}
-static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
+static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
{
u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
unsigned long tmp;
@@ -415,6 +463,22 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
}
+
+static void sh7372_enter_a3sm_common(int pllc0_on)
+{
+ sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
+ sh7372_enter_sysc(pllc0_on, 1 << 12);
+}
+
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+ sh7372_intca_suspend();
+ memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+ sh7372_set_reset_vector(SMFRAM);
+ sh7372_enter_sysc(pllc0_on, 1 << 10);
+ sh7372_intca_resume();
+}
+
#endif
#ifdef CONFIG_CPU_IDLE
@@ -448,14 +512,20 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
unsigned long msk, msk2;
/* check active clocks to determine potential wakeup sources */
- if (sh7372_a3sm_valid(&msk, &msk2)) {
-
+ if (sh7372_sysc_valid(&msk, &msk2)) {
/* convert INTC mask and sense to SYSC mask and sense */
- sh7372_setup_a3sm(msk, msk2);
-
- /* enter A3SM sleep with PLLC0 off */
- pr_debug("entering A3SM\n");
- sh7372_enter_a3sm_common(0);
+ sh7372_setup_sysc(msk, msk2);
+
+ if (!console_suspend_enabled &&
+ sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
+ /* enter A4S sleep with PLLC0 off */
+ pr_debug("entering A4S\n");
+ sh7372_enter_a4s_common(0);
+ } else {
+ /* enter A3SM sleep with PLLC0 off */
+ pr_debug("entering A3SM\n");
+ sh7372_enter_a3sm_common(0);
+ }
} else {
/* default to Core Standby that supports all wakeup sources */
pr_debug("entering Core Standby\n");
@@ -464,9 +534,37 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
return 0;
}
+/**
+ * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
+ * @notifier: Unused.
+ * @pm_event: Event being handled.
+ * @unused: Unused.
+ */
+static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ /*
+ * This is necessary, because the A4R domain has to be "on"
+ * when suspend_device_irqs() and resume_device_irqs() are
+ * executed during system suspend and resume, respectively, so
+ * that those functions don't crash while accessing the INTCS.
+ */
+ pm_genpd_poweron(&sh7372_a4r.genpd);
+ break;
+ case PM_POST_SUSPEND:
+ pm_genpd_poweroff_unused();
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void sh7372_suspend_init(void)
{
shmobile_suspend_ops.enter = sh7372_enter_suspend;
+ pm_notifier(sh7372_pm_notifier_fn, 0);
}
#else
static void sh7372_suspend_init(void) {}
@@ -482,8 +580,6 @@ void __init sh7372_pm_init(void)
/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
__raw_writel(0, PDNSEL);
- sh7372_a3sp_init();
-
sh7372_suspend_init();
sh7372_cpuidle_init();
}
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2380389e6ac..c197f9d29d0 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -994,12 +994,16 @@ void __init sh7372_add_standard_devices(void)
sh7372_init_pm_domain(&sh7372_a4r);
sh7372_init_pm_domain(&sh7372_a3rv);
sh7372_init_pm_domain(&sh7372_a3ri);
- sh7372_init_pm_domain(&sh7372_a3sg);
+ sh7372_init_pm_domain(&sh7372_a4s);
sh7372_init_pm_domain(&sh7372_a3sp);
+ sh7372_init_pm_domain(&sh7372_a3sg);
sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
+ sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
+ sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
+
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index f3ab3c5810e..1d564674451 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -37,13 +37,18 @@
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
.align 12
.text
- .global sh7372_resume_core_standby_a3sm
-sh7372_resume_core_standby_a3sm:
+ .global sh7372_resume_core_standby_sysc
+sh7372_resume_core_standby_sysc:
ldr pc, 1f
1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
- .global sh7372_do_idle_a3sm
-sh7372_do_idle_a3sm:
+#define SPDCR 0xe6180008
+
+ /* A3SM & A4S power down */
+ .global sh7372_do_idle_sysc
+sh7372_do_idle_sysc:
+ mov r8, r0 /* sleep mode passed in r0 */
+
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
@@ -80,13 +85,9 @@ sh7372_do_idle_a3sm:
dsb
dmb
-#define SPDCR 0xe6180008
-#define A3SM (1 << 12)
-
- /* A3SM power down */
+ /* SYSC power down */
ldr r0, =SPDCR
- ldr r1, =A3SM
- str r1, [r0]
+ str r8, [r0]
1:
b 1b
diff --git a/arch/arm/mach-spear3xx/include/mach/entry-macro.S b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
index 53da4224ba3..de3bb41c8e9 100644
--- a/arch/arm/mach-spear3xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
@@ -11,35 +11,8 @@
* warranty of any kind, whether express or implied.
*/
-#include <asm/hardware/vic.h>
-#include <mach/hardware.h>
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \base, =VA_SPEAR3XX_ML1_VIC_BASE
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
- teq \irqstat, #0
- beq 1001f @ this will set/reset
- @ zero register
- /*
- * Following code will find bit position of least significang
- * bit set in irqstat, using following equation
- * least significant bit set in n = (n & ~(n-1))
- */
- sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
- mvn \tmp, \tmp @ tmp = ~tmp
- and \irqstat, \irqstat, \tmp @ irqstat &= tmp
- /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
- clz \tmp, \irqstat @ tmp = leading zeros
- rsb \irqnr, \tmp, #0x1F @ irqnr = 32 - tmp - 1
-
-1001: /* EQ will be set if no irqs pending */
- .endm
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index b8f31c3935f..14276e5a98d 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -42,6 +42,8 @@ void __init spear3xx_map_io(void);
void __init spear3xx_init_irq(void);
void __init spear3xx_init(void);
+void spear_restart(char, const char *);
+
/* pad mux declarations */
#define PMX_FIRDA_MASK (1 << 14)
#define PMX_I2C_MASK (1 << 13)
diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
deleted file mode 100644
index df977b3c9a6..00000000000
--- a/arch/arm/mach-spear3xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr3xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
index a5ff98eed1d..3462ab9d612 100644
--- a/arch/arm/mach-spear3xx/spear300_evb.c
+++ b/arch/arm/mach-spear3xx/spear300_evb.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
@@ -67,6 +68,8 @@ MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
.atag_offset = 0x100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &spear3xx_timer,
.init_machine = spear300_evb_init,
+ .restart = spear_restart,
MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
index 45d180d5936..f92c4993f65 100644
--- a/arch/arm/mach-spear3xx/spear310_evb.c
+++ b/arch/arm/mach-spear3xx/spear310_evb.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
@@ -73,6 +74,8 @@ MACHINE_START(SPEAR310, "ST-SPEAR310-EVB")
.atag_offset = 0x100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &spear3xx_timer,
.init_machine = spear310_evb_init,
+ .restart = spear_restart,
MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
index 22879848d73..105334ab702 100644
--- a/arch/arm/mach-spear3xx/spear320_evb.c
+++ b/arch/arm/mach-spear3xx/spear320_evb.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
@@ -71,6 +72,8 @@ MACHINE_START(SPEAR320, "ST-SPEAR320-EVB")
.atag_offset = 0x100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &spear3xx_timer,
.init_machine = spear320_evb_init,
+ .restart = spear_restart,
MACHINE_END
diff --git a/arch/arm/mach-spear6xx/include/mach/entry-macro.S b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
index 8a0b0ed7b20..d490a910d92 100644
--- a/arch/arm/mach-spear6xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
@@ -11,44 +11,8 @@
* warranty of any kind, whether express or implied.
*/
-#include <asm/hardware/vic.h>
-#include <mach/hardware.h>
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \base, =VA_SPEAR6XX_CPU_VIC_PRI_BASE
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
- mov \irqnr, #0
- teq \irqstat, #0
- bne 1001f
- ldr \base, =VA_SPEAR6XX_CPU_VIC_SEC_BASE
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
- teq \irqstat, #0
- beq 1002f @ this will set/reset
- @ zero register
- mov \irqnr, #32
-1001:
- /*
- * Following code will find bit position of least significang
- * bit set in irqstat, using following equation
- * least significant bit set in n = (n & ~(n-1))
- */
- sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
- mvn \tmp, \tmp @ tmp = ~tmp
- and \irqstat, \irqstat, \tmp @ irqstat &= tmp
- /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
- clz \tmp, \irqstat @ tmp = leading zeros
-
- rsb \tmp, \tmp, #0x1F @ tmp = 32 - tmp - 1
- add \irqnr, \irqnr, \tmp
-
-1002: /* EQ will be set if no irqs pending */
- .endm
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 183f0238c5e..116b99301cf 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -41,6 +41,8 @@ void __init spear6xx_init(void);
void __init spear600_init(void);
void __init spear6xx_clk_init(void);
+void spear_restart(char, const char *);
+
/* Add spear600 machine device structure declarations here */
#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
deleted file mode 100644
index 4a0b56cb2a9..00000000000
--- a/arch/arm/mach-spear6xx/include/mach/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/include/mach/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr6xx machine family
- *
- * Copyright (C) 2009 ST Microelectronics
- * Rajeev Kumar<rajeev-dlh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MACH_VMALLOC_H
-#define __MACH_VMALLOC_H
-
-#include <plat/vmalloc.h>
-
-#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
index 8238fe38e71..c6e4254741c 100644
--- a/arch/arm/mach-spear6xx/spear600_evb.c
+++ b/arch/arm/mach-spear6xx/spear600_evb.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
@@ -46,6 +47,8 @@ MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
.atag_offset = 0x100,
.map_io = spear6xx_map_io,
.init_irq = spear6xx_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &spear6xx_timer,
.init_machine = spear600_evb_init,
+ .restart = spear_restart,
MACHINE_END
diff --git a/arch/arm/mach-tcc8k/Kconfig b/arch/arm/mach-tcc8k/Kconfig
deleted file mode 100644
index ad86415d157..00000000000
--- a/arch/arm/mach-tcc8k/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-if ARCH_TCC8K
-
-comment "TCC8000 systems:"
-
-config MACH_TCC8000_SDK
- bool "Telechips TCC8000-SDK development kit"
- default y
- help
- Support for the Telechips TCC8000-SDK board.
-
-endif
diff --git a/arch/arm/mach-tcc8k/Makefile b/arch/arm/mach-tcc8k/Makefile
deleted file mode 100644
index 9bacf31e49b..00000000000
--- a/arch/arm/mach-tcc8k/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for TCC8K boards and common files.
-#
-
-# Common support
-obj-y += clock.o irq.o time.o io.o devices.o
-
-# Board specific support
-obj-$(CONFIG_MACH_TCC8000_SDK) += board-tcc8000-sdk.o
diff --git a/arch/arm/mach-tcc8k/Makefile.boot b/arch/arm/mach-tcc8k/Makefile.boot
deleted file mode 100644
index 5e02d4156b0..00000000000
--- a/arch/arm/mach-tcc8k/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
- zreladdr-y += 0x20008000
-params_phys-y := 0x20000100
-initrd_phys-y := 0x20800000
diff --git a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c b/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
deleted file mode 100644
index 777a5bb9eed..00000000000
--- a/arch/arm/mach-tcc8k/board-tcc8000-sdk.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach-types.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-
-#include <mach/clock.h>
-#include <mach/tcc-nand.h>
-#include <mach/tcc8k-regs.h>
-
-#include "common.h"
-
-#define XI_FREQUENCY 12000000
-#define XTI_FREQUENCY 32768
-
-#ifdef CONFIG_MTD_NAND_TCC
-/* NAND */
-static struct tcc_nand_platform_data tcc8k_sdk_nand_data = {
- .width = 1,
- .hw_ecc = 0,
-};
-#endif
-
-static void __init tcc8k_init(void)
-{
-#ifdef CONFIG_MTD_NAND_TCC
- tcc_nand_device.dev.platform_data = &tcc8k_sdk_nand_data;
- platform_device_register(&tcc_nand_device);
-#endif
-}
-
-static void __init tcc8k_init_timer(void)
-{
- tcc_clocks_init(XI_FREQUENCY, XTI_FREQUENCY);
-}
-
-static struct sys_timer tcc8k_timer = {
- .init = tcc8k_init_timer,
-};
-
-static void __init tcc8k_map_io(void)
-{
- tcc8k_map_common_io();
-
- /* set PLL0 clock to 96MHz, adapt UART0 divisor */
- __raw_writel(0x00026003, CKC_BASE + PLL0CFG_OFFS);
- __raw_writel(0x10000001, CKC_BASE + ACLKUART0_OFFS);
-
- /* set PLL1 clock to 192MHz */
- __raw_writel(0x00016003, CKC_BASE + PLL1CFG_OFFS);
-
- /* set PLL2 clock to 48MHz */
- __raw_writel(0x00036003, CKC_BASE + PLL2CFG_OFFS);
-
- /* with CPU freq higher than 150 MHz, need extra DTCM wait */
- __raw_writel(0x00000001, SCFG_BASE + DTCMWAIT_OFFS);
-
- /* PLL locking time as specified */
- udelay(300);
-}
-
-MACHINE_START(TCC8000_SDK, "Telechips TCC8000-SDK Demo Board")
- .atag_offset = 0x100,
- .map_io = tcc8k_map_io,
- .init_irq = tcc8k_init_irq,
- .init_machine = tcc8k_init,
- .timer = &tcc8k_timer,
-MACHINE_END
diff --git a/arch/arm/mach-tcc8k/clock.c b/arch/arm/mach-tcc8k/clock.c
deleted file mode 100644
index e7cdae5c77a..00000000000
--- a/arch/arm/mach-tcc8k/clock.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Lowlevel clock handling for Telechips TCC8xxx SoCs
- *
- * Copyright (C) 2010 by Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/irqs.h>
-#include <mach/tcc8k-regs.h>
-
-#include "common.h"
-
-#define BCLKCTR0 (CKC_BASE + BCLKCTR0_OFFS)
-#define BCLKCTR1 (CKC_BASE + BCLKCTR1_OFFS)
-
-#define ACLKREF (CKC_BASE + ACLKREF_OFFS)
-#define ACLKUART0 (CKC_BASE + ACLKUART0_OFFS)
-#define ACLKUART1 (CKC_BASE + ACLKUART1_OFFS)
-#define ACLKUART2 (CKC_BASE + ACLKUART2_OFFS)
-#define ACLKUART3 (CKC_BASE + ACLKUART3_OFFS)
-#define ACLKUART4 (CKC_BASE + ACLKUART4_OFFS)
-#define ACLKI2C (CKC_BASE + ACLKI2C_OFFS)
-#define ACLKADC (CKC_BASE + ACLKADC_OFFS)
-#define ACLKUSBH (CKC_BASE + ACLKUSBH_OFFS)
-#define ACLKLCD (CKC_BASE + ACLKLCD_OFFS)
-#define ACLKSDH0 (CKC_BASE + ACLKSDH0_OFFS)
-#define ACLKSDH1 (CKC_BASE + ACLKSDH1_OFFS)
-#define ACLKSPI0 (CKC_BASE + ACLKSPI0_OFFS)
-#define ACLKSPI1 (CKC_BASE + ACLKSPI1_OFFS)
-#define ACLKSPDIF (CKC_BASE + ACLKSPDIF_OFFS)
-#define ACLKC3DEC (CKC_BASE + ACLKC3DEC_OFFS)
-#define ACLKCAN0 (CKC_BASE + ACLKCAN0_OFFS)
-#define ACLKCAN1 (CKC_BASE + ACLKCAN1_OFFS)
-#define ACLKGSB0 (CKC_BASE + ACLKGSB0_OFFS)
-#define ACLKGSB1 (CKC_BASE + ACLKGSB1_OFFS)
-#define ACLKGSB2 (CKC_BASE + ACLKGSB2_OFFS)
-#define ACLKGSB3 (CKC_BASE + ACLKGSB3_OFFS)
-#define ACLKTCT (CKC_BASE + ACLKTCT_OFFS)
-#define ACLKTCX (CKC_BASE + ACLKTCX_OFFS)
-#define ACLKTCZ (CKC_BASE + ACLKTCZ_OFFS)
-
-#define ACLK_MAX_DIV (0xfff + 1)
-
-/* Crystal frequencies */
-static unsigned long xi_rate, xti_rate;
-
-static void __iomem *pll_cfg_addr(int pll)
-{
- switch (pll) {
- case 0: return (CKC_BASE + PLL0CFG_OFFS);
- case 1: return (CKC_BASE + PLL1CFG_OFFS);
- case 2: return (CKC_BASE + PLL2CFG_OFFS);
- default:
- BUG();
- }
-}
-
-static int pll_enable(int pll, int enable)
-{
- u32 reg;
- void __iomem *addr = pll_cfg_addr(pll);
-
- reg = __raw_readl(addr);
- if (enable)
- reg &= ~PLLxCFG_PD;
- else
- reg |= PLLxCFG_PD;
-
- __raw_writel(reg, addr);
- return 0;
-}
-
-static int xi_enable(int enable)
-{
- u32 reg;
-
- reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
- if (enable)
- reg |= CLKCTRL_XE;
- else
- reg &= ~CLKCTRL_XE;
-
- __raw_writel(reg, CKC_BASE + CLKCTRL_OFFS);
- return 0;
-}
-
-static int root_clk_enable(enum root_clks src)
-{
- switch (src) {
- case CLK_SRC_PLL0: return pll_enable(0, 1);
- case CLK_SRC_PLL1: return pll_enable(1, 1);
- case CLK_SRC_PLL2: return pll_enable(2, 1);
- case CLK_SRC_XI: return xi_enable(1);
- default:
- BUG();
- }
- return 0;
-}
-
-static int root_clk_disable(enum root_clks src)
-{
- switch (src) {
- case CLK_SRC_PLL0: return pll_enable(0, 0);
- case CLK_SRC_PLL1: return pll_enable(1, 0);
- case CLK_SRC_PLL2: return pll_enable(2, 0);
- case CLK_SRC_XI: return xi_enable(0);
- default:
- BUG();
- }
- return 0;
-}
-
-static int enable_clk(struct clk *clk)
-{
- u32 reg;
-
- if (clk->root_id != CLK_SRC_NOROOT)
- return root_clk_enable(clk->root_id);
-
- if (clk->aclkreg) {
- reg = __raw_readl(clk->aclkreg);
- reg |= ACLK_EN;
- __raw_writel(reg, clk->aclkreg);
- }
- if (clk->bclkctr) {
- reg = __raw_readl(clk->bclkctr);
- reg |= 1 << clk->bclk_shift;
- __raw_writel(reg, clk->bclkctr);
- }
- return 0;
-}
-
-static void disable_clk(struct clk *clk)
-{
- u32 reg;
-
- if (clk->root_id != CLK_SRC_NOROOT) {
- root_clk_disable(clk->root_id);
- return;
- }
-
- if (clk->bclkctr) {
- reg = __raw_readl(clk->bclkctr);
- reg &= ~(1 << clk->bclk_shift);
- __raw_writel(reg, clk->bclkctr);
- }
- if (clk->aclkreg) {
- reg = __raw_readl(clk->aclkreg);
- reg &= ~ACLK_EN;
- __raw_writel(reg, clk->aclkreg);
- }
-}
-
-static unsigned long get_rate_pll(int pll)
-{
- u32 reg;
- unsigned long s, m, p;
- void __iomem *addr = pll_cfg_addr(pll);
-
- reg = __raw_readl(addr);
- s = (reg >> 16) & 0x07;
- m = (reg >> 8) & 0xff;
- p = reg & 0x3f;
-
- return (m * xi_rate) / (p * (1 << s));
-}
-
-static unsigned long get_rate_pll_div(int pll)
-{
- u32 reg;
- unsigned long div = 0;
- void __iomem *addr;
-
- switch (pll) {
- case 0:
- addr = CKC_BASE + CLKDIVC0_OFFS;
- reg = __raw_readl(addr);
- if (reg & CLKDIVC0_P0E)
- div = (reg >> 24) & 0x3f;
- break;
- case 1:
- addr = CKC_BASE + CLKDIVC0_OFFS;
- reg = __raw_readl(addr);
- if (reg & CLKDIVC0_P1E)
- div = (reg >> 16) & 0x3f;
- break;
- case 2:
- addr = CKC_BASE + CLKDIVC1_OFFS;
- reg = __raw_readl(addr);
- if (reg & CLKDIVC1_P2E)
- div = reg & 0x3f;
- break;
- }
- return get_rate_pll(pll) / (div + 1);
-}
-
-static unsigned long get_rate_xi_div(void)
-{
- unsigned long div = 0;
- u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
-
- if (reg & CLKDIVC0_XE)
- div = (reg >> 8) & 0x3f;
-
- return xi_rate / (div + 1);
-}
-
-static unsigned long get_rate_xti_div(void)
-{
- unsigned long div = 0;
- u32 reg = __raw_readl(CKC_BASE + CLKDIVC0_OFFS);
-
- if (reg & CLKDIVC0_XTE)
- div = reg & 0x3f;
-
- return xti_rate / (div + 1);
-}
-
-static unsigned long root_clk_get_rate(enum root_clks src)
-{
- switch (src) {
- case CLK_SRC_PLL0: return get_rate_pll(0);
- case CLK_SRC_PLL1: return get_rate_pll(1);
- case CLK_SRC_PLL2: return get_rate_pll(2);
- case CLK_SRC_PLL0DIV: return get_rate_pll_div(0);
- case CLK_SRC_PLL1DIV: return get_rate_pll_div(1);
- case CLK_SRC_PLL2DIV: return get_rate_pll_div(2);
- case CLK_SRC_XI: return xi_rate;
- case CLK_SRC_XTI: return xti_rate;
- case CLK_SRC_XIDIV: return get_rate_xi_div();
- case CLK_SRC_XTIDIV: return get_rate_xti_div();
- default: return 0;
- }
-}
-
-static unsigned long aclk_get_rate(struct clk *clk)
-{
- u32 reg;
- unsigned long div;
- unsigned int src;
-
- reg = __raw_readl(clk->aclkreg);
- div = reg & 0x0fff;
- src = (reg >> ACLK_SEL_SHIFT) & CLK_SRC_MASK;
- return root_clk_get_rate(src) / (div + 1);
-}
-
-static unsigned long aclk_best_div(struct clk *clk, unsigned long rate)
-{
- unsigned long div, src, freq, r1, r2;
-
- if (!rate)
- return ACLK_MAX_DIV;
-
- src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
- src &= CLK_SRC_MASK;
- freq = root_clk_get_rate(src);
- div = freq / rate;
- if (!div)
- return 1;
- if (div >= ACLK_MAX_DIV)
- return ACLK_MAX_DIV;
- r1 = freq / div;
- r2 = freq / (div + 1);
- if ((rate - r2) < (r1 - rate))
- return div + 1;
-
- return div;
-}
-
-static unsigned long aclk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int src;
-
- src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
- src &= CLK_SRC_MASK;
-
- return root_clk_get_rate(src) / aclk_best_div(clk, rate);
-}
-
-static int aclk_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
-
- reg = __raw_readl(clk->aclkreg) & ~ACLK_DIV_MASK;
- reg |= aclk_best_div(clk, rate) - 1;
- __raw_writel(reg, clk->aclkreg);
- return 0;
-}
-
-static unsigned long get_rate_sys(struct clk *clk)
-{
- unsigned int src;
-
- src = __raw_readl(CKC_BASE + CLKCTRL_OFFS) & CLK_SRC_MASK;
- return root_clk_get_rate(src);
-}
-
-static unsigned long get_rate_bus(struct clk *clk)
-{
- unsigned int reg, sdiv, bdiv, rate;
-
- reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
- rate = get_rate_sys(clk);
- sdiv = (reg >> 20) & 3;
- if (sdiv)
- rate /= sdiv + 1;
- bdiv = (reg >> 4) & 0xff;
- if (bdiv)
- rate /= bdiv + 1;
- return rate;
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
- unsigned int reg, div, fsys, fbus;
-
- fbus = get_rate_bus(clk);
- reg = __raw_readl(CKC_BASE + CLKCTRL_OFFS);
- if (reg & (1 << 29))
- return fbus;
- fsys = get_rate_sys(clk);
- div = (reg >> 16) & 0x0f;
- return fbus + ((fsys - fbus) * (div + 1)) / 16;
-}
-
-static unsigned long get_rate_root(struct clk *clk)
-{
- return root_clk_get_rate(clk->root_id);
-}
-
-static int aclk_set_parent(struct clk *clock, struct clk *parent)
-{
- u32 reg;
-
- if (clock->parent == parent)
- return 0;
-
- clock->parent = parent;
-
- if (!parent)
- return 0;
-
- if (parent->root_id == CLK_SRC_NOROOT)
- return 0;
- reg = __raw_readl(clock->aclkreg);
- reg &= ~ACLK_SEL_MASK;
- reg |= (parent->root_id << ACLK_SEL_SHIFT) & ACLK_SEL_MASK;
- __raw_writel(reg, clock->aclkreg);
-
- return 0;
-}
-
-#define DEFINE_ROOT_CLOCK(name, ri, p) \
- static struct clk name = { \
- .root_id = ri, \
- .get_rate = get_rate_root, \
- .enable = enable_clk, \
- .disable = disable_clk, \
- .parent = p, \
- };
-
-#define DEFINE_SPECIAL_CLOCK(name, gr, p) \
- static struct clk name = { \
- .root_id = CLK_SRC_NOROOT, \
- .get_rate = gr, \
- .parent = p, \
- };
-
-#define DEFINE_ACLOCK(name, bc, bs, ar) \
- static struct clk name = { \
- .root_id = CLK_SRC_NOROOT, \
- .bclkctr = bc, \
- .bclk_shift = bs, \
- .aclkreg = ar, \
- .get_rate = aclk_get_rate, \
- .set_rate = aclk_set_rate, \
- .round_rate = aclk_round_rate, \
- .enable = enable_clk, \
- .disable = disable_clk, \
- .set_parent = aclk_set_parent, \
- };
-
-#define DEFINE_BCLOCK(name, bc, bs, gr, p) \
- static struct clk name = { \
- .root_id = CLK_SRC_NOROOT, \
- .bclkctr = bc, \
- .bclk_shift = bs, \
- .get_rate = gr, \
- .enable = enable_clk, \
- .disable = disable_clk, \
- .parent = p, \
- };
-
-DEFINE_ROOT_CLOCK(xi, CLK_SRC_XI, NULL)
-DEFINE_ROOT_CLOCK(xti, CLK_SRC_XTI, NULL)
-DEFINE_ROOT_CLOCK(xidiv, CLK_SRC_XIDIV, &xi)
-DEFINE_ROOT_CLOCK(xtidiv, CLK_SRC_XTIDIV, &xti)
-DEFINE_ROOT_CLOCK(pll0, CLK_SRC_PLL0, &xi)
-DEFINE_ROOT_CLOCK(pll1, CLK_SRC_PLL1, &xi)
-DEFINE_ROOT_CLOCK(pll2, CLK_SRC_PLL2, &xi)
-DEFINE_ROOT_CLOCK(pll0div, CLK_SRC_PLL0DIV, &pll0)
-DEFINE_ROOT_CLOCK(pll1div, CLK_SRC_PLL1DIV, &pll1)
-DEFINE_ROOT_CLOCK(pll2div, CLK_SRC_PLL2DIV, &pll2)
-
-/* The following 3 clocks are special and are initialized explicitly later */
-DEFINE_SPECIAL_CLOCK(sys, get_rate_sys, NULL)
-DEFINE_SPECIAL_CLOCK(bus, get_rate_bus, &sys)
-DEFINE_SPECIAL_CLOCK(cpu, get_rate_cpu, &sys)
-
-DEFINE_ACLOCK(tct, NULL, 0, ACLKTCT)
-DEFINE_ACLOCK(tcx, NULL, 0, ACLKTCX)
-DEFINE_ACLOCK(tcz, NULL, 0, ACLKTCZ)
-DEFINE_ACLOCK(ref, NULL, 0, ACLKREF)
-DEFINE_ACLOCK(uart0, BCLKCTR0, 5, ACLKUART0)
-DEFINE_ACLOCK(uart1, BCLKCTR0, 23, ACLKUART1)
-DEFINE_ACLOCK(uart2, BCLKCTR0, 6, ACLKUART2)
-DEFINE_ACLOCK(uart3, BCLKCTR0, 8, ACLKUART3)
-DEFINE_ACLOCK(uart4, BCLKCTR1, 6, ACLKUART4)
-DEFINE_ACLOCK(i2c, BCLKCTR0, 7, ACLKI2C)
-DEFINE_ACLOCK(adc, BCLKCTR0, 10, ACLKADC)
-DEFINE_ACLOCK(usbh0, BCLKCTR0, 11, ACLKUSBH)
-DEFINE_ACLOCK(lcd, BCLKCTR0, 13, ACLKLCD)
-DEFINE_ACLOCK(sd0, BCLKCTR0, 17, ACLKSDH0)
-DEFINE_ACLOCK(sd1, BCLKCTR1, 5, ACLKSDH1)
-DEFINE_ACLOCK(spi0, BCLKCTR0, 24, ACLKSPI0)
-DEFINE_ACLOCK(spi1, BCLKCTR0, 30, ACLKSPI1)
-DEFINE_ACLOCK(spdif, BCLKCTR1, 2, ACLKSPDIF)
-DEFINE_ACLOCK(c3dec, BCLKCTR1, 9, ACLKC3DEC)
-DEFINE_ACLOCK(can0, BCLKCTR1, 10, ACLKCAN0)
-DEFINE_ACLOCK(can1, BCLKCTR1, 11, ACLKCAN1)
-DEFINE_ACLOCK(gsb0, BCLKCTR1, 13, ACLKGSB0)
-DEFINE_ACLOCK(gsb1, BCLKCTR1, 14, ACLKGSB1)
-DEFINE_ACLOCK(gsb2, BCLKCTR1, 15, ACLKGSB2)
-DEFINE_ACLOCK(gsb3, BCLKCTR1, 16, ACLKGSB3)
-DEFINE_ACLOCK(usbh1, BCLKCTR1, 20, ACLKUSBH)
-
-DEFINE_BCLOCK(dai0, BCLKCTR0, 0, NULL, NULL)
-DEFINE_BCLOCK(pic, BCLKCTR0, 1, NULL, NULL)
-DEFINE_BCLOCK(tc, BCLKCTR0, 2, NULL, NULL)
-DEFINE_BCLOCK(gpio, BCLKCTR0, 3, NULL, NULL)
-DEFINE_BCLOCK(usbd, BCLKCTR0, 4, NULL, NULL)
-DEFINE_BCLOCK(ecc, BCLKCTR0, 9, NULL, NULL)
-DEFINE_BCLOCK(gdma0, BCLKCTR0, 12, NULL, NULL)
-DEFINE_BCLOCK(rtc, BCLKCTR0, 15, NULL, NULL)
-DEFINE_BCLOCK(nfc, BCLKCTR0, 16, NULL, NULL)
-DEFINE_BCLOCK(g2d, BCLKCTR0, 18, NULL, NULL)
-DEFINE_BCLOCK(gdma1, BCLKCTR0, 22, NULL, NULL)
-DEFINE_BCLOCK(mscl, BCLKCTR0, 25, NULL, NULL)
-DEFINE_BCLOCK(bdma, BCLKCTR1, 0, NULL, NULL)
-DEFINE_BCLOCK(adma0, BCLKCTR1, 1, NULL, NULL)
-DEFINE_BCLOCK(scfg, BCLKCTR1, 3, NULL, NULL)
-DEFINE_BCLOCK(cid, BCLKCTR1, 4, NULL, NULL)
-DEFINE_BCLOCK(dai1, BCLKCTR1, 7, NULL, NULL)
-DEFINE_BCLOCK(adma1, BCLKCTR1, 8, NULL, NULL)
-DEFINE_BCLOCK(gps, BCLKCTR1, 12, NULL, NULL)
-DEFINE_BCLOCK(gdma2, BCLKCTR1, 17, NULL, NULL)
-DEFINE_BCLOCK(gdma3, BCLKCTR1, 18, NULL, NULL)
-DEFINE_BCLOCK(ddrc, BCLKCTR1, 19, NULL, NULL)
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK(NULL, "bus", bus)
- _REGISTER_CLOCK(NULL, "cpu", cpu)
- _REGISTER_CLOCK(NULL, "tct", tct)
- _REGISTER_CLOCK(NULL, "tcx", tcx)
- _REGISTER_CLOCK(NULL, "tcz", tcz)
- _REGISTER_CLOCK(NULL, "ref", ref)
- _REGISTER_CLOCK(NULL, "dai0", dai0)
- _REGISTER_CLOCK(NULL, "pic", pic)
- _REGISTER_CLOCK(NULL, "tc", tc)
- _REGISTER_CLOCK(NULL, "gpio", gpio)
- _REGISTER_CLOCK(NULL, "usbd", usbd)
- _REGISTER_CLOCK("tcc-uart.0", NULL, uart0)
- _REGISTER_CLOCK("tcc-uart.2", NULL, uart2)
- _REGISTER_CLOCK("tcc-i2c", NULL, i2c)
- _REGISTER_CLOCK("tcc-uart.3", NULL, uart3)
- _REGISTER_CLOCK(NULL, "ecc", ecc)
- _REGISTER_CLOCK(NULL, "adc", adc)
- _REGISTER_CLOCK("tcc-usbh.0", "usb", usbh0)
- _REGISTER_CLOCK(NULL, "gdma0", gdma0)
- _REGISTER_CLOCK(NULL, "lcd", lcd)
- _REGISTER_CLOCK(NULL, "rtc", rtc)
- _REGISTER_CLOCK(NULL, "nfc", nfc)
- _REGISTER_CLOCK("tcc-mmc.0", NULL, sd0)
- _REGISTER_CLOCK(NULL, "g2d", g2d)
- _REGISTER_CLOCK(NULL, "gdma1", gdma1)
- _REGISTER_CLOCK("tcc-uart.1", NULL, uart1)
- _REGISTER_CLOCK("tcc-spi.0", NULL, spi0)
- _REGISTER_CLOCK(NULL, "mscl", mscl)
- _REGISTER_CLOCK("tcc-spi.1", NULL, spi1)
- _REGISTER_CLOCK(NULL, "bdma", bdma)
- _REGISTER_CLOCK(NULL, "adma0", adma0)
- _REGISTER_CLOCK(NULL, "spdif", spdif)
- _REGISTER_CLOCK(NULL, "scfg", scfg)
- _REGISTER_CLOCK(NULL, "cid", cid)
- _REGISTER_CLOCK("tcc-mmc.1", NULL, sd1)
- _REGISTER_CLOCK("tcc-uart.4", NULL, uart4)
- _REGISTER_CLOCK(NULL, "dai1", dai1)
- _REGISTER_CLOCK(NULL, "adma1", adma1)
- _REGISTER_CLOCK(NULL, "c3dec", c3dec)
- _REGISTER_CLOCK("tcc-can.0", NULL, can0)
- _REGISTER_CLOCK("tcc-can.1", NULL, can1)
- _REGISTER_CLOCK(NULL, "gps", gps)
- _REGISTER_CLOCK("tcc-gsb.0", NULL, gsb0)
- _REGISTER_CLOCK("tcc-gsb.1", NULL, gsb1)
- _REGISTER_CLOCK("tcc-gsb.2", NULL, gsb2)
- _REGISTER_CLOCK("tcc-gsb.3", NULL, gsb3)
- _REGISTER_CLOCK(NULL, "gdma2", gdma2)
- _REGISTER_CLOCK(NULL, "gdma3", gdma3)
- _REGISTER_CLOCK(NULL, "ddrc", ddrc)
- _REGISTER_CLOCK("tcc-usbh.1", "usb", usbh1)
-};
-
-static struct clk *root_clk_by_index(enum root_clks src)
-{
- switch (src) {
- case CLK_SRC_PLL0: return &pll0;
- case CLK_SRC_PLL1: return &pll1;
- case CLK_SRC_PLL2: return &pll2;
- case CLK_SRC_PLL0DIV: return &pll0div;
- case CLK_SRC_PLL1DIV: return &pll1div;
- case CLK_SRC_PLL2DIV: return &pll2div;
- case CLK_SRC_XI: return &xi;
- case CLK_SRC_XTI: return &xti;
- case CLK_SRC_XIDIV: return &xidiv;
- case CLK_SRC_XTIDIV: return &xtidiv;
- default: return NULL;
- }
-}
-
-static void find_aclk_parent(struct clk *clk)
-{
- unsigned int src;
- struct clk *clock;
-
- if (!clk->aclkreg)
- return;
-
- src = __raw_readl(clk->aclkreg) >> ACLK_SEL_SHIFT;
- src &= CLK_SRC_MASK;
-
- clock = root_clk_by_index(src);
- if (!clock)
- return;
-
- clk->parent = clock;
- clk->set_parent = aclk_set_parent;
-}
-
-void __init tcc_clocks_init(unsigned long xi_freq, unsigned long xti_freq)
-{
- int i;
-
- xi_rate = xi_freq;
- xti_rate = xti_freq;
-
- /* fixup parents and add the clock */
- for (i = 0; i < ARRAY_SIZE(lookups); i++) {
- find_aclk_parent(lookups[i].clk);
- clkdev_add(&lookups[i]);
- }
- tcc8k_timer_init(&tcz, (void __iomem *)TIMER_BASE, INT_TC32);
-}
diff --git a/arch/arm/mach-tcc8k/common.h b/arch/arm/mach-tcc8k/common.h
deleted file mode 100644
index 705690add39..00000000000
--- a/arch/arm/mach-tcc8k/common.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef MACH_TCC8K_COMMON_H
-#define MACH_TCC8K_COMMON_H
-
-#include <linux/platform_device.h>
-
-extern struct platform_device tcc_nand_device;
-
-struct clk;
-
-extern void tcc_clocks_init(unsigned long xi_freq, unsigned long xti_freq);
-extern void tcc8k_timer_init(struct clk *clock, void __iomem *base, int irq);
-extern void tcc8k_init_irq(void);
-extern void tcc8k_map_common_io(void);
-
-#endif
diff --git a/arch/arm/mach-tcc8k/devices.c b/arch/arm/mach-tcc8k/devices.c
deleted file mode 100644
index 6722ad7c283..00000000000
--- a/arch/arm/mach-tcc8k/devices.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * linux/arch/arm/mach-tcc8k/devices.c
- *
- * Copyright (C) Telechips, Inc.
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of GPL v2.
- *
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-static u64 tcc8k_dmamask = DMA_BIT_MASK(32);
-
-#ifdef CONFIG_MTD_NAND_TCC
-/* NAND controller */
-static struct resource tcc_nand_resources[] = {
- {
- .start = (resource_size_t)NFC_BASE,
- .end = (resource_size_t)NFC_BASE + 0x7f,
- .flags = IORESOURCE_MEM,
- }, {
- .start = INT_NFC,
- .end = INT_NFC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device tcc_nand_device = {
- .name = "tcc_nand",
- .id = 0,
- .num_resources = ARRAY_SIZE(tcc_nand_resources),
- .resource = tcc_nand_resources,
-};
-#endif
-
-#ifdef CONFIG_MMC_TCC8K
-/* MMC controller */
-static struct resource tcc8k_mmc0_resource[] = {
- {
- .start = INT_SD0,
- .end = INT_SD0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource tcc8k_mmc1_resource[] = {
- {
- .start = INT_SD1,
- .end = INT_SD1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device tcc8k_mmc0_device = {
- .name = "tcc-mmc",
- .id = 0,
- .num_resources = ARRAY_SIZE(tcc8k_mmc0_resource),
- .resource = tcc8k_mmc0_resource,
- .dev = {
- .dma_mask = &tcc8k_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
-
-struct platform_device tcc8k_mmc1_device = {
- .name = "tcc-mmc",
- .id = 1,
- .num_resources = ARRAY_SIZE(tcc8k_mmc1_resource),
- .resource = tcc8k_mmc1_resource,
- .dev = {
- .dma_mask = &tcc8k_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- }
-};
-
-static inline void tcc8k_init_mmc(void)
-{
- u32 reg = __raw_readl(GPIOPS_BASE + GPIOPS_FS1_OFFS);
-
- reg |= GPIOPS_FS1_SDH0_BITS | GPIOPS_FS1_SDH1_BITS;
- __raw_writel(reg, GPIOPS_BASE + GPIOPS_FS1_OFFS);
-
- platform_device_register(&tcc8k_mmc0_device);
- platform_device_register(&tcc8k_mmc1_device);
-}
-#else
-static inline void tcc8k_init_mmc(void) { }
-#endif
-
-#ifdef CONFIG_USB_OHCI_HCD
-static int tcc8k_ohci_init(struct device *dev)
-{
- u32 reg;
-
- /* Use GPIO PK19 as VBUS control output */
- reg = __raw_readl(GPIOPK_BASE + GPIOPK_FS0_OFFS);
- reg &= ~(1 << 19);
- __raw_writel(reg, GPIOPK_BASE + GPIOPK_FS0_OFFS);
- reg = __raw_readl(GPIOPK_BASE + GPIOPK_FS1_OFFS);
- reg &= ~(1 << 19);
- __raw_writel(reg, GPIOPK_BASE + GPIOPK_FS1_OFFS);
-
- reg = __raw_readl(GPIOPK_BASE + GPIOPK_DOE_OFFS);
- reg |= (1 << 19);
- __raw_writel(reg, GPIOPK_BASE + GPIOPK_DOE_OFFS);
- /* Turn on VBUS */
- reg = __raw_readl(GPIOPK_BASE + GPIOPK_DAT_OFFS);
- reg |= (1 << 19);
- __raw_writel(reg, GPIOPK_BASE + GPIOPK_DAT_OFFS);
-
- return 0;
-}
-
-static struct resource tcc8k_ohci0_resources[] = {
- [0] = {
- .start = (resource_size_t)USBH0_BASE,
- .end = (resource_size_t)USBH0_BASE + 0x5c,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = INT_USBH0,
- .end = INT_USBH0,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource tcc8k_ohci1_resources[] = {
- [0] = {
- .start = (resource_size_t)USBH1_BASE,
- .end = (resource_size_t)USBH1_BASE + 0x5c,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = INT_USBH1,
- .end = INT_USBH1,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct tccohci_platform_data tcc8k_ohci0_platform_data = {
- .controller = 0,
- .port_mode = PMM_PERPORT_MODE,
- .init = tcc8k_ohci_init,
-};
-
-static struct tccohci_platform_data tcc8k_ohci1_platform_data = {
- .controller = 1,
- .port_mode = PMM_PERPORT_MODE,
- .init = tcc8k_ohci_init,
-};
-
-static struct platform_device ohci0_device = {
- .name = "tcc-ohci",
- .id = 0,
- .dev = {
- .dma_mask = &tcc8k_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &tcc8k_ohci0_platform_data,
- },
- .num_resources = ARRAY_SIZE(tcc8k_ohci0_resources),
- .resource = tcc8k_ohci0_resources,
-};
-
-static struct platform_device ohci1_device = {
- .name = "tcc-ohci",
- .id = 1,
- .dev = {
- .dma_mask = &tcc8k_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &tcc8k_ohci1_platform_data,
- },
- .num_resources = ARRAY_SIZE(tcc8k_ohci1_resources),
- .resource = tcc8k_ohci1_resources,
-};
-
-static void __init tcc8k_init_usbhost(void)
-{
- platform_device_register(&ohci0_device);
- platform_device_register(&ohci1_device);
-}
-#else
-static void __init tcc8k_init_usbhost(void) { }
-#endif
-
-/* USB device controller*/
-#ifdef CONFIG_USB_GADGET_TCC8K
-static struct resource udc_resources[] = {
- [0] = {
- .start = INT_USBD,
- .end = INT_USBD,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = INT_UDMA,
- .end = INT_UDMA,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device tcc8k_udc_device = {
- .name = "tcc-udc",
- .id = 0,
- .resource = udc_resources,
- .num_resources = ARRAY_SIZE(udc_resources),
- .dev = {
- .dma_mask = &tcc8k_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static void __init tcc8k_init_usb_gadget(void)
-{
- platform_device_register(&tcc8k_udc_device);
-}
-#else
-static void __init tcc8k_init_usb_gadget(void) { }
-#endif /* CONFIG_USB_GADGET_TCC83X */
-
-static int __init tcc8k_init_devices(void)
-{
- tcc8k_init_mmc();
- tcc8k_init_usbhost();
- tcc8k_init_usb_gadget();
- return 0;
-}
-
-arch_initcall(tcc8k_init_devices);
diff --git a/arch/arm/mach-tcc8k/io.c b/arch/arm/mach-tcc8k/io.c
deleted file mode 100644
index 9b39d7fa658..00000000000
--- a/arch/arm/mach-tcc8k/io.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * linux/arch/arm/mach-tcc8k/io.c
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * derived from TCC83xx io.c
- * Copyright (C) Telechips, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/tcc8k-regs.h>
-
-/*
- * The machine specific code may provide the extra mapping besides the
- * default mapping provided here.
- */
-static struct map_desc tcc8k_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)CS1_BASE_VIRT,
- .pfn = __phys_to_pfn(CS1_BASE),
- .length = CS1_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)AHB_PERI_BASE_VIRT,
- .pfn = __phys_to_pfn(AHB_PERI_BASE),
- .length = AHB_PERI_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)APB0_PERI_BASE_VIRT,
- .pfn = __phys_to_pfn(APB0_PERI_BASE),
- .length = APB0_PERI_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)APB1_PERI_BASE_VIRT,
- .pfn = __phys_to_pfn(APB1_PERI_BASE),
- .length = APB1_PERI_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)EXT_MEM_CTRL_BASE_VIRT,
- .pfn = __phys_to_pfn(EXT_MEM_CTRL_BASE),
- .length = EXT_MEM_CTRL_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-/*
- * Maps common IO regions for tcc8k.
- *
- */
-void __init tcc8k_map_common_io(void)
-{
- iotable_init(tcc8k_io_desc, ARRAY_SIZE(tcc8k_io_desc));
-}
diff --git a/arch/arm/mach-tcc8k/irq.c b/arch/arm/mach-tcc8k/irq.c
deleted file mode 100644
index 209fa5c65d4..00000000000
--- a/arch/arm/mach-tcc8k/irq.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) Telechips, Inc.
- * Copyright (C) 2009-2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU GPL version 2.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-/* Disable IRQ */
-static void tcc8000_mask_ack_irq0(struct irq_data *d)
-{
- PIC0_IEN &= ~(1 << d->irq);
- PIC0_CREQ |= (1 << d->irq);
-}
-
-static void tcc8000_mask_ack_irq1(struct irq_data *d)
-{
- PIC1_IEN &= ~(1 << (d->irq - 32));
- PIC1_CREQ |= (1 << (d->irq - 32));
-}
-
-static void tcc8000_mask_irq0(struct irq_data *d)
-{
- PIC0_IEN &= ~(1 << d->irq);
-}
-
-static void tcc8000_mask_irq1(struct irq_data *d)
-{
- PIC1_IEN &= ~(1 << (d->irq - 32));
-}
-
-static void tcc8000_ack_irq0(struct irq_data *d)
-{
- PIC0_CREQ |= (1 << d->irq);
-}
-
-static void tcc8000_ack_irq1(struct irq_data *d)
-{
- PIC1_CREQ |= (1 << (d->irq - 32));
-}
-
-/* Enable IRQ */
-static void tcc8000_unmask_irq0(struct irq_data *d)
-{
- PIC0_IEN |= (1 << d->irq);
- PIC0_INTOEN |= (1 << d->irq);
-}
-
-static void tcc8000_unmask_irq1(struct irq_data *d)
-{
- PIC1_IEN |= (1 << (d->irq - 32));
- PIC1_INTOEN |= (1 << (d->irq - 32));
-}
-
-static struct irq_chip tcc8000_irq_chip0 = {
- .name = "tcc_irq0",
- .irq_mask = tcc8000_mask_irq0,
- .irq_ack = tcc8000_ack_irq0,
- .irq_mask_ack = tcc8000_mask_ack_irq0,
- .irq_unmask = tcc8000_unmask_irq0,
-};
-
-static struct irq_chip tcc8000_irq_chip1 = {
- .name = "tcc_irq1",
- .irq_mask = tcc8000_mask_irq1,
- .irq_ack = tcc8000_ack_irq1,
- .irq_mask_ack = tcc8000_mask_ack_irq1,
- .irq_unmask = tcc8000_unmask_irq1,
-};
-
-void __init tcc8k_init_irq(void)
-{
- int irqno;
-
- /* Mask and clear all interrupts */
- PIC0_IEN = 0x00000000;
- PIC0_CREQ = 0xffffffff;
- PIC1_IEN = 0x00000000;
- PIC1_CREQ = 0xffffffff;
-
- PIC0_MEN0 = 0x00000003;
- PIC1_MEN1 = 0x00000003;
- PIC1_MEN = 0x00000003;
-
- /* let all IRQs be level triggered */
- PIC0_TMODE = 0xffffffff;
- PIC1_TMODE = 0xffffffff;
- /* all IRQs are IRQs (not FIQs) */
- PIC0_IRQSEL = 0xffffffff;
- PIC1_IRQSEL = 0xffffffff;
-
- for (irqno = 0; irqno < NR_IRQS; irqno++) {
- if (irqno < 32)
- irq_set_chip(irqno, &tcc8000_irq_chip0);
- else
- irq_set_chip(irqno, &tcc8000_irq_chip1);
- irq_set_handler(irqno, handle_level_irq);
- set_irq_flags(irqno, IRQF_VALID);
- }
-}
diff --git a/arch/arm/mach-tcc8k/time.c b/arch/arm/mach-tcc8k/time.c
deleted file mode 100644
index a96babe8377..00000000000
--- a/arch/arm/mach-tcc8k/time.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * TCC8000 system timer setup
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL version 2.
- *
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-
-#include <asm/mach/time.h>
-
-#include <mach/tcc8k-regs.h>
-#include <mach/irqs.h>
-
-#include "common.h"
-
-static void __iomem *timer_base;
-
-static int tcc_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long reg = __raw_readl(timer_base + TC32MCNT_OFFS);
-
- __raw_writel(reg + evt, timer_base + TC32CMP0_OFFS);
- return 0;
-}
-
-static void tcc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- unsigned long tc32irq;
-
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- tc32irq = __raw_readl(timer_base + TC32IRQ_OFFS);
- tc32irq |= TC32IRQ_IRQEN0;
- __raw_writel(tc32irq, timer_base + TC32IRQ_OFFS);
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- tc32irq = __raw_readl(timer_base + TC32IRQ_OFFS);
- tc32irq &= ~TC32IRQ_IRQEN0;
- __raw_writel(tc32irq, timer_base + TC32IRQ_OFFS);
- break;
- case CLOCK_EVT_MODE_PERIODIC:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
-}
-
-static irqreturn_t tcc8k_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- /* Acknowledge TC32 interrupt by reading TC32IRQ */
- __raw_readl(timer_base + TC32IRQ_OFFS);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_tcc = {
- .name = "tcc_timer1",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
- .set_mode = tcc_set_mode,
- .set_next_event = tcc_set_next_event,
- .rating = 200,
-};
-
-static struct irqaction tcc8k_timer_irq = {
- .name = "TC32_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER,
- .handler = tcc8k_timer_interrupt,
- .dev_id = &clockevent_tcc,
-};
-
-static int __init tcc_clockevent_init(struct clk *clock)
-{
- unsigned int c = clk_get_rate(clock);
-
- clocksource_mmio_init(timer_base + TC32MCNT_OFFS, "tcc_tc32", c,
- 200, 32, clocksource_mmio_readl_up);
-
- clockevent_tcc.mult = div_sc(c, NSEC_PER_SEC,
- clockevent_tcc.shift);
- clockevent_tcc.max_delta_ns =
- clockevent_delta2ns(0xfffffffe, &clockevent_tcc);
- clockevent_tcc.min_delta_ns =
- clockevent_delta2ns(0xff, &clockevent_tcc);
-
- clockevent_tcc.cpumask = cpumask_of(0);
-
- clockevents_register_device(&clockevent_tcc);
-
- return 0;
-}
-
-void __init tcc8k_timer_init(struct clk *clock, void __iomem *base, int irq)
-{
- u32 reg;
-
- timer_base = base;
- tcc8k_timer_irq.irq = irq;
-
- /* Enable clocks */
- clk_enable(clock);
-
- /* Initialize 32-bit timer */
- reg = __raw_readl(timer_base + TC32EN_OFFS);
- reg &= ~TC32EN_ENABLE; /* Disable timer */
- __raw_writel(reg, timer_base + TC32EN_OFFS);
- /* Free running timer, counting from 0 to 0xffffffff */
- __raw_writel(0, timer_base + TC32EN_OFFS);
- __raw_writel(0, timer_base + TC32LDV_OFFS);
- reg = __raw_readl(timer_base + TC32IRQ_OFFS);
- reg |= TC32IRQ_IRQEN0; /* irq at match with CMP0 */
- __raw_writel(reg, timer_base + TC32IRQ_OFFS);
-
- __raw_writel(TC32EN_ENABLE, timer_base + TC32EN_OFFS);
-
- tcc_clockevent_init(clock);
- setup_irq(irq, &tcc8k_timer_irq);
-}
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 91aff7cb828..373652d76b9 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -2,11 +2,8 @@ if ARCH_TEGRA
comment "NVIDIA Tegra options"
-choice
- prompt "Select Tegra processor family for target system"
-
config ARCH_TEGRA_2x_SOC
- bool "Tegra 2 family"
+ bool "Enable support for Tegra20 family"
select CPU_V7
select ARM_GIC
select ARCH_REQUIRE_GPIOLIB
@@ -17,22 +14,36 @@ config ARCH_TEGRA_2x_SOC
Support for NVIDIA Tegra AP20 and T20 processors, based on the
ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
-endchoice
+config ARCH_TEGRA_3x_SOC
+ bool "Enable support for Tegra30 family"
+ select CPU_V7
+ select ARM_GIC
+ select ARCH_REQUIRE_GPIOLIB
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select USB_ULPI if USB_SUPPORT
+ select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select USE_OF
+ help
+ Support for NVIDIA Tegra T30 processor family, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
config TEGRA_PCI
bool "PCI Express support"
+ depends on ARCH_TEGRA_2x_SOC
select PCI
comment "Tegra board type"
config MACH_HARMONY
bool "Harmony board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
Support for nVidia Harmony development platform
config MACH_KAEN
bool "Kaen board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_SEABOARD
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
@@ -40,11 +51,13 @@ config MACH_KAEN
config MACH_PAZ00
bool "Paz00 board"
+ depends on ARCH_TEGRA_2x_SOC
help
Support for the Toshiba AC100/Dynabook AZ netbook
config MACH_SEABOARD
bool "Seaboard board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
Support for nVidia Seaboard development platform. It will
@@ -52,25 +65,29 @@ config MACH_SEABOARD
have large similarities with the seaboard design.
config MACH_TEGRA_DT
- bool "Generic Tegra board (FDT support)"
+ bool "Generic Tegra20 board (FDT support)"
+ depends on ARCH_TEGRA_2x_SOC
select USE_OF
help
- Support for generic nVidia Tegra boards using Flattened Device Tree
+ Support for generic NVIDIA Tegra20 boards using Flattened Device Tree
config MACH_TRIMSLICE
bool "TrimSlice board"
+ depends on ARCH_TEGRA_2x_SOC
select TEGRA_PCI
help
Support for CompuLab TrimSlice platform
config MACH_WARIO
bool "Wario board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_SEABOARD
help
Support for the Wario version of Seaboard
config MACH_VENTANA
bool "Ventana board"
+ depends on ARCH_TEGRA_2x_SOC
select MACH_TEGRA_DT
help
Support for the nVidia Ventana development platform
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 91a07e18720..e120ff54f66 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -1,3 +1,4 @@
+obj-y += board-pinmux.o
obj-y += common.o
obj-y += devices.o
obj-y += io.o
@@ -5,12 +6,13 @@ obj-y += irq.o
obj-y += clock.o
obj-y += timer.o
obj-y += pinmux.o
-obj-y += powergate.o
obj-y += fuse.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clock.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += powergate.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-t2-tables.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += pinmux-tegra20-tables.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pinmux-tegra30-tables.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += board-dt-tegra30.o
obj-$(CONFIG_SMP) += platsmp.o localtimer.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
@@ -18,20 +20,22 @@ obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
-obj-${CONFIG_MACH_HARMONY} += board-harmony.o
-obj-${CONFIG_MACH_HARMONY} += board-harmony-pinmux.o
-obj-${CONFIG_MACH_HARMONY} += board-harmony-pcie.o
-obj-${CONFIG_MACH_HARMONY} += board-harmony-power.o
+obj-$(CONFIG_MACH_HARMONY) += board-harmony.o
+obj-$(CONFIG_MACH_HARMONY) += board-harmony-pinmux.o
+obj-$(CONFIG_MACH_HARMONY) += board-harmony-pcie.o
+obj-$(CONFIG_MACH_HARMONY) += board-harmony-power.o
-obj-${CONFIG_MACH_PAZ00} += board-paz00.o
-obj-${CONFIG_MACH_PAZ00} += board-paz00-pinmux.o
+obj-$(CONFIG_MACH_PAZ00) += board-paz00.o
+obj-$(CONFIG_MACH_PAZ00) += board-paz00-pinmux.o
-obj-${CONFIG_MACH_SEABOARD} += board-seaboard.o
-obj-${CONFIG_MACH_SEABOARD} += board-seaboard-pinmux.o
+obj-$(CONFIG_MACH_SEABOARD) += board-seaboard.o
+obj-$(CONFIG_MACH_SEABOARD) += board-seaboard-pinmux.o
-obj-${CONFIG_MACH_TEGRA_DT} += board-dt.o
-obj-${CONFIG_MACH_TEGRA_DT} += board-harmony-pinmux.o
-obj-${CONFIG_MACH_TEGRA_DT} += board-seaboard-pinmux.o
+obj-$(CONFIG_MACH_TEGRA_DT) += board-dt-tegra20.o
+obj-$(CONFIG_MACH_TEGRA_DT) += board-harmony-pinmux.o
+obj-$(CONFIG_MACH_TEGRA_DT) += board-seaboard-pinmux.o
+obj-$(CONFIG_MACH_TEGRA_DT) += board-paz00-pinmux.o
+obj-$(CONFIG_MACH_TEGRA_DT) += board-trimslice-pinmux.o
-obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice.o
-obj-${CONFIG_MACH_TRIMSLICE} += board-trimslice-pinmux.o
+obj-$(CONFIG_MACH_TRIMSLICE) += board-trimslice.o
+obj-$(CONFIG_MACH_TRIMSLICE) += board-trimslice-pinmux.o
diff --git a/arch/arm/mach-tegra/Makefile.boot b/arch/arm/mach-tegra/Makefile.boot
index bd12c9fb81e..9a82094092d 100644
--- a/arch/arm/mach-tegra/Makefile.boot
+++ b/arch/arm/mach-tegra/Makefile.boot
@@ -3,5 +3,8 @@ params_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00000100
initrd_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00800000
dtb-$(CONFIG_MACH_HARMONY) += tegra-harmony.dtb
+dtb-$(CONFIG_MACH_PAZ00) += tegra-paz00.dtb
dtb-$(CONFIG_MACH_SEABOARD) += tegra-seaboard.dtb
+dtb-$(CONFIG_MACH_TRIMSLICE) += tegra-trimslice.dtb
dtb-$(CONFIG_MACH_VENTANA) += tegra-ventana.dtb
+dtb-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra-cardhu.dtb
diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index 74743ad3d2d..7a95e0bc4ab 100644
--- a/arch/arm/mach-tegra/board-dt.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -32,10 +32,12 @@
#include <linux/i2c.h>
#include <linux/i2c-tegra.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/setup.h>
+#include <asm/hardware/gic.h>
#include <mach/iomap.h>
#include <mach/irqs.h>
@@ -46,10 +48,14 @@
#include "devices.h"
void harmony_pinmux_init(void);
+void paz00_pinmux_init(void);
void seaboard_pinmux_init(void);
+void trimslice_pinmux_init(void);
void ventana_pinmux_init(void);
struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("nvidia,tegra20-pinmux", TEGRA_APB_MISC_BASE + 0x14, "tegra-pinmux", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-gpio", TEGRA_GPIO_BASE, "tegra-gpio", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC1_BASE, "sdhci-tegra.0", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC2_BASE, "sdhci-tegra.1", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-sdhci", TEGRA_SDMMC3_BASE, "sdhci-tegra.2", NULL),
@@ -57,16 +63,30 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C_BASE, "tegra-i2c.0", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C2_BASE, "tegra-i2c.1", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_I2C3_BASE, "tegra-i2c.2", NULL),
- OF_DEV_AUXDATA("nvidia,tegra20-i2c", TEGRA_DVC_BASE, "tegra-i2c.3", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-i2c-dvc", TEGRA_DVC_BASE, "tegra-i2c.3", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.0", NULL),
- OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S1_BASE, "tegra-i2s.1", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0",
+ &tegra_ehci1_device.dev.platform_data),
+ OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1",
+ &tegra_ehci2_device.dev.platform_data),
+ OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2",
+ &tegra_ehci3_device.dev.platform_data),
{}
};
static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
/* name parent rate enabled */
{ "uartd", "pll_p", 216000000, true },
+ { "usbd", "clk_m", 12000000, false },
+ { "usb2", "clk_m", 12000000, false },
+ { "usb3", "clk_m", 12000000, false },
+ { "pll_a", "pll_p_out1", 56448000, true },
+ { "pll_a_out0", "pll_a", 11289600, true },
+ { "cdev1", NULL, 0, true },
+ { "i2s1", "pll_a_out0", 11289600, false},
+ { "i2s2", "pll_a_out0", 11289600, false},
{ NULL, NULL, 0, 0},
};
@@ -75,39 +95,23 @@ static struct of_device_id tegra_dt_match_table[] __initdata = {
{}
};
-static struct of_device_id tegra_dt_gic_match[] __initdata = {
- { .compatible = "nvidia,tegra20-gic", },
- {}
-};
-
static struct {
char *machine;
void (*init)(void);
} pinmux_configs[] = {
+ { "compulab,trimslice", trimslice_pinmux_init },
{ "nvidia,harmony", harmony_pinmux_init },
+ { "compal,paz00", paz00_pinmux_init },
{ "nvidia,seaboard", seaboard_pinmux_init },
{ "nvidia,ventana", ventana_pinmux_init },
};
static void __init tegra_dt_init(void)
{
- struct device_node *node;
int i;
- node = of_find_matching_node_by_address(NULL, tegra_dt_gic_match,
- TEGRA_ARM_INT_DIST_BASE);
- if (node)
- irq_domain_add_simple(node, INT_GIC_BASE);
-
tegra_clk_init_from_table(tegra_dt_clk_init_table);
- /*
- * Finished with the static registrations now; fill in the missing
- * devices
- */
- of_platform_populate(NULL, tegra_dt_match_table,
- tegra20_auxdata_lookup, NULL);
-
for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) {
if (of_machine_is_compatible(pinmux_configs[i].machine)) {
pinmux_configs[i].init();
@@ -117,20 +121,31 @@ static void __init tegra_dt_init(void)
WARN(i == ARRAY_SIZE(pinmux_configs),
"Unknown platform! Pinmuxing not initialized\n");
+
+ /*
+ * Finished with the static registrations now; fill in the missing
+ * devices
+ */
+ of_platform_populate(NULL, tegra_dt_match_table,
+ tegra20_auxdata_lookup, NULL);
}
-static const char * tegra_dt_board_compat[] = {
+static const char *tegra20_dt_board_compat[] = {
+ "compulab,trimslice",
"nvidia,harmony",
+ "compal,paz00",
"nvidia,seaboard",
"nvidia,ventana",
NULL
};
-DT_MACHINE_START(TEGRA_DT, "nVidia Tegra (Flattened Device Tree)")
+DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
- .init_irq = tegra_init_irq,
+ .init_early = tegra20_init_early,
+ .init_irq = tegra_dt_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_dt_init,
- .dt_compat = tegra_dt_board_compat,
+ .restart = tegra_assert_system_reset,
+ .dt_compat = tegra20_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
new file mode 100644
index 00000000000..3c197e2440b
--- /dev/null
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -0,0 +1,63 @@
+/*
+ * arch/arm/mach-tegra/board-dt-tegra30.c
+ *
+ * NVIDIA Tegra30 device tree board support
+ *
+ * Copyright (C) 2011 NVIDIA Corporation
+ *
+ * Derived from:
+ *
+ * arch/arm/mach-tegra/board-dt-tegra20.c
+ *
+ * Copyright (C) 2010 Secret Lab Technologies, Ltd.
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
+
+#include "board.h"
+
+static struct of_device_id tegra_dt_match_table[] __initdata = {
+ { .compatible = "simple-bus", },
+ {}
+};
+
+static void __init tegra30_dt_init(void)
+{
+ of_platform_populate(NULL, tegra_dt_match_table,
+ NULL, NULL);
+}
+
+static const char *tegra30_dt_board_compat[] = {
+ "nvidia,cardhu",
+ NULL
+};
+
+DT_MACHINE_START(TEGRA30_DT, "NVIDIA Tegra30 (Flattened Device Tree)")
+ .map_io = tegra_map_common_io,
+ .init_early = tegra30_init_early,
+ .init_irq = tegra_dt_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &tegra_timer,
+ .init_machine = tegra30_dt_init,
+ .restart = tegra_assert_system_reset,
+ .dt_compat = tegra30_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c
index 6db7d699ef1..33c4fedab84 100644
--- a/arch/arm/mach-tegra/board-harmony-pcie.c
+++ b/arch/arm/mach-tegra/board-harmony-pcie.c
@@ -22,7 +22,6 @@
#include <asm/mach-types.h>
-#include <mach/pinmux.h>
#include "board.h"
#include "board-harmony.h"
@@ -48,10 +47,6 @@ static int __init harmony_pcie_init(void)
regulator_enable(regulator);
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_GPV, TEGRA_TRI_NORMAL);
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXA, TEGRA_TRI_NORMAL);
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXK, TEGRA_TRI_NORMAL);
-
err = tegra_pcie_init(true, true);
if (err)
goto err_pcie;
@@ -59,10 +54,6 @@ static int __init harmony_pcie_init(void)
return 0;
err_pcie:
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_GPV, TEGRA_TRI_TRISTATE);
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXA, TEGRA_TRI_TRISTATE);
- tegra_pinmux_set_tristate(TEGRA_PINGROUP_SLXK, TEGRA_TRI_TRISTATE);
-
regulator_disable(regulator);
regulator_put(regulator);
err_reg:
diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
index 7a4a26d5174..465808c8ac0 100644
--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
+++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
@@ -19,10 +19,11 @@
#include <linux/of.h>
#include <mach/pinmux.h>
+#include <mach/pinmux-tegra20.h>
#include "gpio-names.h"
#include "board-harmony.h"
-#include "devices.h"
+#include "board-pinmux.h"
static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -143,11 +144,6 @@ static struct tegra_pingroup_config harmony_pinmux[] = {
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
-static struct platform_device *pinmux_devices[] = {
- &tegra_gpio_device,
- &tegra_pinmux_device,
-};
-
static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TEGRA_GPIO_SD2_CD, .enable = true },
{ .gpio = TEGRA_GPIO_SD2_WP, .enable = true },
@@ -161,13 +157,14 @@ static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TEGRA_GPIO_EXT_MIC_EN, .enable = true },
};
+static struct tegra_board_pinmux_conf conf = {
+ .pgs = harmony_pinmux,
+ .pg_count = ARRAY_SIZE(harmony_pinmux),
+ .gpios = gpio_table,
+ .gpio_count = ARRAY_SIZE(gpio_table),
+};
+
void harmony_pinmux_init(void)
{
- if (!of_machine_is_compatible("nvidia,tegra20"))
- platform_add_devices(pinmux_devices,
- ARRAY_SIZE(pinmux_devices));
-
- tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
-
- tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ tegra_board_pinmux_init(&conf, NULL);
}
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index f0bdc5e3fe5..a0f9634f672 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -31,6 +31,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
#include <asm/setup.h>
#include <mach/tegra_wm8903_pdata.h>
@@ -185,8 +186,10 @@ MACHINE_START(HARMONY, "harmony")
.atag_offset = 0x100,
.fixup = tegra_harmony_fixup,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_harmony_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00-pinmux.c b/arch/arm/mach-tegra/board-paz00-pinmux.c
index be30e215f4b..c775572dcea 100644
--- a/arch/arm/mach-tegra/board-paz00-pinmux.c
+++ b/arch/arm/mach-tegra/board-paz00-pinmux.c
@@ -19,10 +19,11 @@
#include <linux/of.h>
#include <mach/pinmux.h>
+#include <mach/pinmux-tegra20.h>
#include "gpio-names.h"
#include "board-paz00.h"
-#include "devices.h"
+#include "board-pinmux.h"
static struct tegra_pingroup_config paz00_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -30,7 +31,7 @@ static struct tegra_pingroup_config paz00_pinmux[] = {
{TEGRA_PINGROUP_ATC, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATD, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATE, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_CDEV1, TEGRA_MUX_PLLA_OUT, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_CDEV2, TEGRA_MUX_PLLP_OUT4, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_CRTP, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_CSUS, TEGRA_MUX_PLLC_OUT1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
@@ -143,11 +144,6 @@ static struct tegra_pingroup_config paz00_pinmux[] = {
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
-static struct platform_device *pinmux_devices[] = {
- &tegra_gpio_device,
- &tegra_pinmux_device,
-};
-
static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TEGRA_GPIO_SD1_CD, .enable = true },
{ .gpio = TEGRA_GPIO_SD1_WP, .enable = true },
@@ -158,13 +154,14 @@ static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TEGRA_WIFI_LED, .enable = true },
};
+static struct tegra_board_pinmux_conf conf = {
+ .pgs = paz00_pinmux,
+ .pg_count = ARRAY_SIZE(paz00_pinmux),
+ .gpios = gpio_table,
+ .gpio_count = ARRAY_SIZE(gpio_table),
+};
+
void paz00_pinmux_init(void)
{
- if (!of_machine_is_compatible("nvidia,tegra20"))
- platform_add_devices(pinmux_devices,
- ARRAY_SIZE(pinmux_devices));
-
- tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux));
-
- tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ tegra_board_pinmux_init(&conf, NULL);
}
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 55c55ba89f1..fcf4f377b1d 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -23,12 +23,15 @@
#include <linux/serial_8250.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/gpio_keys.h>
#include <linux/pda_power.h>
#include <linux/io.h>
+#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/rfkill-gpio.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
@@ -114,12 +117,37 @@ static struct platform_device leds_gpio = {
},
};
+static struct gpio_keys_button paz00_gpio_keys_buttons[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = TEGRA_GPIO_POWERKEY,
+ .active_low = 1,
+ .desc = "Power",
+ .type = EV_KEY,
+ .wakeup = 1,
+ },
+};
+
+static struct gpio_keys_platform_data paz00_gpio_keys = {
+ .buttons = paz00_gpio_keys_buttons,
+ .nbuttons = ARRAY_SIZE(paz00_gpio_keys_buttons),
+};
+
+static struct platform_device gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &paz00_gpio_keys,
+ },
+};
+
static struct platform_device *paz00_devices[] __initdata = {
&debug_uart,
&tegra_sdhci_device4,
&tegra_sdhci_device1,
&wifi_rfkill_device,
&leds_gpio,
+ &gpio_keys_device,
};
static void paz00_i2c_init(void)
@@ -188,8 +216,10 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
.atag_offset = 0x100,
.fixup = tegra_paz00_fixup,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_paz00_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.h b/arch/arm/mach-tegra/board-paz00.h
index 8aff06eb58c..ffa83f580db 100644
--- a/arch/arm/mach-tegra/board-paz00.h
+++ b/arch/arm/mach-tegra/board-paz00.h
@@ -32,6 +32,9 @@
#define TEGRA_WIFI_RST TEGRA_GPIO_PD1
#define TEGRA_WIFI_LED TEGRA_GPIO_PD0
+/* WakeUp */
+#define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PJ7
+
void paz00_pinmux_init(void);
#endif
diff --git a/arch/arm/mach-tegra/board-pinmux.c b/arch/arm/mach-tegra/board-pinmux.c
new file mode 100644
index 00000000000..adc3efe979b
--- /dev/null
+++ b/arch/arm/mach-tegra/board-pinmux.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+#include <mach/gpio-tegra.h>
+#include <mach/pinmux.h>
+
+#include "board-pinmux.h"
+#include "devices.h"
+
+struct tegra_board_pinmux_conf *confs[2];
+
+static void tegra_board_pinmux_setup_gpios(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(confs); i++) {
+ if (!confs[i])
+ continue;
+
+ tegra_gpio_config(confs[i]->gpios, confs[i]->gpio_count);
+ }
+}
+
+static void tegra_board_pinmux_setup_pinmux(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(confs); i++) {
+ if (!confs[i])
+ continue;
+
+ tegra_pinmux_config_table(confs[i]->pgs, confs[i]->pg_count);
+
+ if (confs[i]->drives)
+ tegra_drive_pinmux_config_table(confs[i]->drives,
+ confs[i]->drive_count);
+ }
+}
+
+static int tegra_board_pinmux_bus_notify(struct notifier_block *nb,
+ unsigned long event, void *vdev)
+{
+ static bool had_gpio;
+ static bool had_pinmux;
+
+ struct device *dev = vdev;
+ const char *devname;
+
+ if (event != BUS_NOTIFY_BOUND_DRIVER)
+ return NOTIFY_DONE;
+
+ devname = dev_name(dev);
+
+ if (!had_gpio && !strcmp(devname, GPIO_DEV)) {
+ tegra_board_pinmux_setup_gpios();
+ had_gpio = true;
+ } else if (!had_pinmux && !strcmp(devname, PINMUX_DEV)) {
+ tegra_board_pinmux_setup_pinmux();
+ had_pinmux = true;
+ }
+
+ if (had_gpio && had_pinmux)
+ return NOTIFY_STOP_MASK;
+ else
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = tegra_board_pinmux_bus_notify,
+};
+
+static struct platform_device *devices[] = {
+ &tegra_gpio_device,
+ &tegra_pinmux_device,
+};
+
+void tegra_board_pinmux_init(struct tegra_board_pinmux_conf *conf_a,
+ struct tegra_board_pinmux_conf *conf_b)
+{
+ confs[0] = conf_a;
+ confs[1] = conf_b;
+
+ bus_register_notifier(&platform_bus_type, &nb);
+
+ if (!of_machine_is_compatible("nvidia,tegra20"))
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
diff --git a/arch/arm/mach-tegra/board-pinmux.h b/arch/arm/mach-tegra/board-pinmux.h
new file mode 100644
index 00000000000..4aac73546f5
--- /dev/null
+++ b/arch/arm/mach-tegra/board-pinmux.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_BOARD_PINMUX_H
+#define __MACH_TEGRA_BOARD_PINMUX_H
+
+#define GPIO_DEV "tegra-gpio"
+#define PINMUX_DEV "tegra-pinmux"
+
+struct tegra_pingroup_config;
+struct tegra_gpio_table;
+
+struct tegra_board_pinmux_conf {
+ struct tegra_pingroup_config *pgs;
+ int pg_count;
+
+ struct tegra_drive_pingroup_config *drives;
+ int drive_count;
+
+ struct tegra_gpio_table *gpios;
+ int gpio_count;
+};
+
+void tegra_board_pinmux_init(struct tegra_board_pinmux_conf *conf_a,
+ struct tegra_board_pinmux_conf *conf_b);
+
+#endif
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
index b1c2972f62f..55e7e43a14a 100644
--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
+++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
@@ -19,11 +19,11 @@
#include <linux/of.h>
#include <mach/pinmux.h>
-#include <mach/pinmux-t2.h>
+#include <mach/pinmux-tegra20.h>
#include "gpio-names.h"
+#include "board-pinmux.h"
#include "board-seaboard.h"
-#include "devices.h"
#define DEFAULT_DRIVE(_name) \
{ \
@@ -37,11 +37,11 @@
.slew_falling = TEGRA_SLEW_SLOWEST, \
}
-static __initdata struct tegra_drive_pingroup_config seaboard_drive_pinmux[] = {
+static struct tegra_drive_pingroup_config seaboard_drive_pinmux[] = {
DEFAULT_DRIVE(SDIO1),
};
-static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
+static struct tegra_pingroup_config common_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -55,7 +55,6 @@ static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
{TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
@@ -65,7 +64,6 @@ static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
{TEGRA_PINGROUP_GMA, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GMB, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_GMC, TEGRA_MUX_UARTD, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GME, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GPU, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GPU7, TEGRA_MUX_RTCK, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -108,13 +106,8 @@ static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
{TEGRA_PINGROUP_LM0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_LM1, TEGRA_MUX_CRT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_LPP, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_LPW0, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_LPW1, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_LPW2, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_LSC0, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_LSC1, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_LSCK, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_LSDA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_LSDI, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_LSPI, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_LVP0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
@@ -122,25 +115,19 @@ static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
{TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDB, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDC, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDD, TEGRA_MUX_SDIO3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDIO1, TEGRA_MUX_SDIO1, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SLXA, TEGRA_MUX_PCIE, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SLXD, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPDI, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPDO, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SPIB, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SPID, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SPIE, TEGRA_MUX_SPI1, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SPIF, TEGRA_MUX_SPI1, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_SPIH, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_UAA, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_UAB, TEGRA_MUX_ULPI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
@@ -160,13 +147,24 @@ static __initdata struct tegra_pingroup_config seaboard_pinmux[] = {
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
-static __initdata struct tegra_pingroup_config ventana_pinmux[] = {
- {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+static struct tegra_pingroup_config seaboard_pinmux[] = {
+ {TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW0, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LPW2, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_LSC1, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSCK, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_LSDA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_HDMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SLXC, TEGRA_MUX_SPDIF, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SLXK, TEGRA_MUX_PCIE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIA, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_SPIC, TEGRA_MUX_GMI, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
+};
+
+static struct tegra_pingroup_config ventana_pinmux[] = {
{TEGRA_PINGROUP_DDC, TEGRA_MUX_RSVD2, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTB, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTC, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
- {TEGRA_PINGROUP_DTD, TEGRA_MUX_VI, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_GMD, TEGRA_MUX_SFLASH, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_LPW0, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_LPW2, TEGRA_MUX_RSVD4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
@@ -181,65 +179,59 @@ static __initdata struct tegra_pingroup_config ventana_pinmux[] = {
{TEGRA_PINGROUP_SPIG, TEGRA_MUX_SPI2_ALT, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
};
-static struct platform_device *pinmux_devices[] = {
- &tegra_gpio_device,
- &tegra_pinmux_device,
-};
-
static struct tegra_gpio_table common_gpio_table[] = {
{ .gpio = TEGRA_GPIO_SD2_CD, .enable = true },
{ .gpio = TEGRA_GPIO_SD2_WP, .enable = true },
{ .gpio = TEGRA_GPIO_SD2_POWER, .enable = true },
+ { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true },
+};
+
+static struct tegra_gpio_table seaboard_gpio_table[] = {
{ .gpio = TEGRA_GPIO_LIDSWITCH, .enable = true },
{ .gpio = TEGRA_GPIO_POWERKEY, .enable = true },
{ .gpio = TEGRA_GPIO_HP_DET, .enable = true },
{ .gpio = TEGRA_GPIO_ISL29018_IRQ, .enable = true },
- { .gpio = TEGRA_GPIO_CDC_IRQ, .enable = true },
{ .gpio = TEGRA_GPIO_USB1, .enable = true },
};
-static void __init update_pinmux(struct tegra_pingroup_config *newtbl, int size)
-{
- int i, j;
- struct tegra_pingroup_config *new_pingroup, *base_pingroup;
-
- /* Update base seaboard pinmux table with secondary board
- * specific pinmux table table.
- */
- for (i = 0; i < size; i++) {
- new_pingroup = &newtbl[i];
- for (j = 0; j < ARRAY_SIZE(seaboard_pinmux); j++) {
- base_pingroup = &seaboard_pinmux[j];
- if (new_pingroup->pingroup == base_pingroup->pingroup) {
- *base_pingroup = *new_pingroup;
- break;
- }
- }
- }
-}
-
-void __init seaboard_common_pinmux_init(void)
-{
- if (!of_machine_is_compatible("nvidia,tegra20"))
- platform_add_devices(pinmux_devices,
- ARRAY_SIZE(pinmux_devices));
+static struct tegra_gpio_table ventana_gpio_table[] = {
+ /* hp_det */
+ { .gpio = TEGRA_GPIO_PW2, .enable = true },
+ /* int_mic_en */
+ { .gpio = TEGRA_GPIO_PX0, .enable = true },
+ /* ext_mic_en */
+ { .gpio = TEGRA_GPIO_PX1, .enable = true },
+};
- tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux));
+static struct tegra_board_pinmux_conf common_conf = {
+ .pgs = common_pinmux,
+ .pg_count = ARRAY_SIZE(common_pinmux),
+ .gpios = common_gpio_table,
+ .gpio_count = ARRAY_SIZE(common_gpio_table),
+};
- tegra_drive_pinmux_config_table(seaboard_drive_pinmux,
- ARRAY_SIZE(seaboard_drive_pinmux));
+static struct tegra_board_pinmux_conf seaboard_conf = {
+ .pgs = seaboard_pinmux,
+ .pg_count = ARRAY_SIZE(seaboard_pinmux),
+ .drives = seaboard_drive_pinmux,
+ .drive_count = ARRAY_SIZE(seaboard_drive_pinmux),
+ .gpios = seaboard_gpio_table,
+ .gpio_count = ARRAY_SIZE(seaboard_gpio_table),
+};
- tegra_gpio_config(common_gpio_table, ARRAY_SIZE(common_gpio_table));
-}
+static struct tegra_board_pinmux_conf ventana_conf = {
+ .pgs = ventana_pinmux,
+ .pg_count = ARRAY_SIZE(ventana_pinmux),
+ .gpios = ventana_gpio_table,
+ .gpio_count = ARRAY_SIZE(ventana_gpio_table),
+};
-void __init seaboard_pinmux_init(void)
+void seaboard_pinmux_init(void)
{
- seaboard_common_pinmux_init();
+ tegra_board_pinmux_init(&common_conf, &seaboard_conf);
}
-void __init ventana_pinmux_init(void)
+void ventana_pinmux_init(void)
{
- update_pinmux(ventana_pinmux, ARRAY_SIZE(ventana_pinmux));
- seaboard_common_pinmux_init();
+ tegra_board_pinmux_init(&common_conf, &ventana_conf);
}
-
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index bf13ea355ef..cfc74d46a09 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -34,6 +34,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include "board.h"
#include "board-seaboard.h"
@@ -282,26 +283,32 @@ static void __init tegra_wario_init(void)
MACHINE_START(SEABOARD, "seaboard")
.atag_offset = 0x100,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_seaboard_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
MACHINE_START(KAEN, "kaen")
.atag_offset = 0x100,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_kaen_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
MACHINE_START(WARIO, "wario")
.atag_offset = 0x100,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_wario_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
index 7ab719d46da..a21a2be57cb 100644
--- a/arch/arm/mach-tegra/board-trimslice-pinmux.c
+++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
@@ -19,12 +19,13 @@
#include <linux/of.h>
#include <mach/pinmux.h>
+#include <mach/pinmux-tegra20.h>
#include "gpio-names.h"
+#include "board-pinmux.h"
#include "board-trimslice.h"
-#include "devices.h"
-static __initdata struct tegra_pingroup_config trimslice_pinmux[] = {
+static struct tegra_pingroup_config trimslice_pinmux[] = {
{TEGRA_PINGROUP_ATA, TEGRA_MUX_IDE, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_ATB, TEGRA_MUX_SDIO4, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_ATC, TEGRA_MUX_NAND, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
@@ -105,7 +106,7 @@ static __initdata struct tegra_pingroup_config trimslice_pinmux[] = {
{TEGRA_PINGROUP_LVS, TEGRA_MUX_DISPLAYA, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_OWC, TEGRA_MUX_RSVD2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_PMC, TEGRA_MUX_PWR_ON, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_PTA, TEGRA_MUX_RSVD3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
+ {TEGRA_PINGROUP_PTA, TEGRA_MUX_GMI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_RM, TEGRA_MUX_I2C, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDB, TEGRA_MUX_PWM, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_SDC, TEGRA_MUX_PWM, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
@@ -143,11 +144,6 @@ static __initdata struct tegra_pingroup_config trimslice_pinmux[] = {
{TEGRA_PINGROUP_XM2D, TEGRA_MUX_NONE, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
};
-static struct platform_device *pinmux_devices[] = {
- &tegra_gpio_device,
- &tegra_pinmux_device,
-};
-
static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TRIMSLICE_GPIO_SD4_CD, .enable = true }, /* mmc4 cd */
{ .gpio = TRIMSLICE_GPIO_SD4_WP, .enable = true }, /* mmc4 wp */
@@ -156,11 +152,14 @@ static struct tegra_gpio_table gpio_table[] = {
{ .gpio = TRIMSLICE_GPIO_USB2_RST, .enable = true }, /* USB2 PHY rst */
};
-void __init trimslice_pinmux_init(void)
+static struct tegra_board_pinmux_conf conf = {
+ .pgs = trimslice_pinmux,
+ .pg_count = ARRAY_SIZE(trimslice_pinmux),
+ .gpios = gpio_table,
+ .gpio_count = ARRAY_SIZE(gpio_table),
+};
+
+void trimslice_pinmux_init(void)
{
- if (!of_machine_is_compatible("nvidia,tegra20"))
- platform_add_devices(pinmux_devices,
- ARRAY_SIZE(pinmux_devices));
- tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux));
- tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ tegra_board_pinmux_init(&conf, NULL);
}
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index 1a6617b7806..cd52820a3e3 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/setup.h>
@@ -174,8 +175,10 @@ MACHINE_START(TRIMSLICE, "trimslice")
.atag_offset = 0x100,
.fixup = tegra_trimslice_fixup,
.map_io = tegra_map_common_io,
- .init_early = tegra_init_early,
+ .init_early = tegra20_init_early,
.init_irq = tegra_init_irq,
+ .handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_trimslice_init,
+ .restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 1d14df7eb7d..75d1543d77c 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -25,10 +25,11 @@
void tegra_assert_system_reset(char mode, const char *cmd);
-void __init tegra_init_early(void);
+void __init tegra20_init_early(void);
+void __init tegra30_init_early(void);
void __init tegra_map_common_io(void);
void __init tegra_init_irq(void);
-void __init tegra_init_clock(void);
+void __init tegra_dt_init_irq(void);
int __init tegra_pcie_init(bool init_port0, bool init_port1);
extern struct sys_timer tegra_timer;
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index f8d41ffc0ca..8337068a4ab 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -387,35 +387,18 @@ EXPORT_SYMBOL(tegra_clk_init_from_table);
void tegra_periph_reset_deassert(struct clk *c)
{
- tegra2_periph_reset_deassert(c);
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, false);
}
EXPORT_SYMBOL(tegra_periph_reset_deassert);
void tegra_periph_reset_assert(struct clk *c)
{
- tegra2_periph_reset_assert(c);
+ BUG_ON(!c->ops->reset);
+ c->ops->reset(c, true);
}
EXPORT_SYMBOL(tegra_periph_reset_assert);
-void __init tegra_init_clock(void)
-{
- tegra2_init_clocks();
-}
-
-/*
- * The SDMMC controllers have extra bits in the clock source register that
- * adjust the delay between the clock and data to compenstate for delays
- * on the PCB.
- */
-void tegra_sdmmc_tap_delay(struct clk *c, int delay)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&c->spinlock, flags);
- tegra2_sdmmc_tap_delay(c, delay);
- spin_unlock_irqrestore(&c->spinlock, flags);
-}
-
#ifdef CONFIG_DEBUG_FS
static int __clk_lock_all_spinlocks(void)
diff --git a/arch/arm/mach-tegra/clock.h b/arch/arm/mach-tegra/clock.h
index 688316abc64..5c44106616c 100644
--- a/arch/arm/mach-tegra/clock.h
+++ b/arch/arm/mach-tegra/clock.h
@@ -146,15 +146,11 @@ struct tegra_clk_init_table {
};
void tegra2_init_clocks(void);
-void tegra2_periph_reset_deassert(struct clk *c);
-void tegra2_periph_reset_assert(struct clk *c);
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
-unsigned long clk_measure_input_freq(void);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
unsigned long clk_get_rate_locked(struct clk *c);
int clk_set_rate_locked(struct clk *c, unsigned long rate);
-void tegra2_sdmmc_tap_delay(struct clk *c, int delay);
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 690b888be50..a2eb90169ae 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-tegra/board-harmony.c
+ * arch/arm/mach-tegra/common.c
*
* Copyright (C) 2010 Google, Inc.
*
@@ -21,8 +21,10 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/of_irq.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
#include <mach/iomap.h>
#include <mach/system.h>
@@ -31,20 +33,31 @@
#include "clock.h"
#include "fuse.h"
-void (*arch_reset)(char mode, const char *cmd) = tegra_assert_system_reset;
+#ifdef CONFIG_OF
+static const struct of_device_id tegra_dt_irq_match[] __initconst = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
+ { }
+};
+
+void __init tegra_dt_init_irq(void)
+{
+ tegra_init_irq();
+ of_irq_init(tegra_dt_irq_match);
+}
+#endif
void tegra_assert_system_reset(char mode, const char *cmd)
{
- void __iomem *reset = IO_ADDRESS(TEGRA_CLK_RESET_BASE + 0x04);
+ void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0);
u32 reg;
- /* use *_related to avoid spinlock since caches are off */
reg = readl_relaxed(reset);
- reg |= 0x04;
+ reg |= 0x10;
writel_relaxed(reg, reset);
}
-static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
{ "pll_p", "clk_m", 216000000, true },
@@ -60,24 +73,38 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
{ "cpu", NULL, 0, true },
{ NULL, NULL, 0, 0},
};
+#endif
-static void __init tegra_init_cache(void)
+static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
{
#ifdef CONFIG_CACHE_L2X0
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
+ u32 aux_ctrl, cache_type;
- writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
- writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);
+ writel_relaxed(tag_latency, p + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(data_latency, p + L2X0_DATA_LATENCY_CTRL);
- l2x0_init(p, 0x6C080001, 0x8200c3fe);
+ cache_type = readl(p + L2X0_CACHE_TYPE);
+ aux_ctrl = (cache_type & 0x700) << (17-8);
+ aux_ctrl |= 0x6C000001;
+
+ l2x0_init(p, aux_ctrl, 0x8200c3fe);
#endif
}
-void __init tegra_init_early(void)
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void __init tegra20_init_early(void)
{
tegra_init_fuse();
- tegra_init_clock();
- tegra_clk_init_from_table(common_clk_init_table);
- tegra_init_cache();
+ tegra2_init_clocks();
+ tegra_clk_init_from_table(tegra20_clk_init_table);
+ tegra_init_cache(0x331, 0x441);
}
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void __init tegra30_init_early(void)
+{
+ tegra_init_cache(0x441, 0x551);
+}
+#endif
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index c8baf8f80d2..fc3ecb66de0 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -26,6 +26,6 @@ void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
unsigned long clk_get_rate_all_locked(struct clk *c);
-void tegra_sdmmc_tap_delay(struct clk *c, int delay);
+void tegra2_sdmmc_tap_delay(struct clk *c, int delay);
#endif
diff --git a/arch/arm/mach-tegra/include/mach/entry-macro.S b/arch/arm/mach-tegra/include/mach/entry-macro.S
index dd165c53889..e577cfe27e7 100644
--- a/arch/arm/mach-tegra/include/mach/entry-macro.S
+++ b/arch/arm/mach-tegra/include/mach/entry-macro.S
@@ -12,45 +12,9 @@
* GNU General Public License for more details.
*
*/
-#include <mach/iomap.h>
-#include <mach/io.h>
-#if defined(CONFIG_ARM_GIC)
-#define HAVE_GET_IRQNR_PREAMBLE
-#include <asm/hardware/entry-macro-gic.S>
-
- /* Uses the GIC interrupt controller built into the cpu */
-#define ICTRL_BASE (IO_CPU_VIRT + 0x100)
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- movw \base, #(ICTRL_BASE & 0x0000ffff)
- movt \base, #((ICTRL_BASE & 0xffff0000) >> 16)
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-#else
- /* legacy interrupt controller for AP16 */
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- @ enable imprecise aborts
- cpsie a
- @ EVP base at 0xf010f000
- mov \base, #0xf0000000
- orr \base, #0x00100000
- orr \base, #0x0000f000
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base, #0x20] @ EVT_IRQ_STS
- cmp \irqnr, #0x80
- .endm
-#endif
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index 35a011fbc42..f15defffb5d 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -71,12 +71,6 @@
#ifndef __ASSEMBLER__
-#define __arch_ioremap tegra_ioremap
-#define __arch_iounmap tegra_iounmap
-
-void __iomem *tegra_ioremap(unsigned long phys, size_t size, unsigned int type);
-void tegra_iounmap(volatile void __iomem *addr);
-
#define IO_ADDRESS(n) (IO_TO_VIRT(n))
#ifdef CONFIG_TEGRA_PCI
diff --git a/arch/arm/mach-tegra/include/mach/irqs.h b/arch/arm/mach-tegra/include/mach/irqs.h
index 73265af4dda..a2146cd6867 100644
--- a/arch/arm/mach-tegra/include/mach/irqs.h
+++ b/arch/arm/mach-tegra/include/mach/irqs.h
@@ -25,7 +25,6 @@
#define IRQ_LOCALTIMER 29
-#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* Primary Interrupt Controller */
#define INT_PRI_BASE (INT_GIC_BASE + 32)
#define INT_TMR1 (INT_PRI_BASE + 0)
@@ -178,6 +177,5 @@
#define NR_BOARD_IRQS 32
#define NR_IRQS (INT_BOARD_BASE + NR_BOARD_IRQS)
-#endif
#endif
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 4f3572a1c68..20bb0545f99 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -53,6 +53,7 @@ struct tegra_kbc_platform_data {
struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
const struct matrix_keymap_data *keymap_data;
+ u32 wakeup_key;
bool wakeup;
bool use_fn_map;
bool use_ghost_filter;
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-t2.h b/arch/arm/mach-tegra/include/mach/pinmux-tegra20.h
index 4c262634726..6a40c1dbab1 100644
--- a/arch/arm/mach-tegra/include/mach/pinmux-t2.h
+++ b/arch/arm/mach-tegra/include/mach/pinmux-tegra20.h
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-tegra/include/mach/pinmux-t2.h
+ * linux/arch/arm/mach-tegra/include/mach/pinmux-tegra20.h
*
* Copyright (C) 2010 Google, Inc.
*
@@ -14,8 +14,8 @@
*
*/
-#ifndef __MACH_TEGRA_PINMUX_T2_H
-#define __MACH_TEGRA_PINMUX_T2_H
+#ifndef __MACH_TEGRA_PINMUX_TEGRA20_H
+#define __MACH_TEGRA_PINMUX_TEGRA20_H
enum tegra_pingroup {
TEGRA_PINGROUP_ATA = 0,
diff --git a/arch/arm/mach-tegra/include/mach/pinmux-tegra30.h b/arch/arm/mach-tegra/include/mach/pinmux-tegra30.h
new file mode 100644
index 00000000000..c1aee3eb2df
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/pinmux-tegra30.h
@@ -0,0 +1,320 @@
+/*
+ * linux/arch/arm/mach-tegra/include/mach/pinmux-tegra30.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010,2011 Nvidia, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MACH_TEGRA_PINMUX_TEGRA30_H
+#define __MACH_TEGRA_PINMUX_TEGRA30_H
+
+enum tegra_pingroup {
+ TEGRA_PINGROUP_ULPI_DATA0 = 0,
+ TEGRA_PINGROUP_ULPI_DATA1,
+ TEGRA_PINGROUP_ULPI_DATA2,
+ TEGRA_PINGROUP_ULPI_DATA3,
+ TEGRA_PINGROUP_ULPI_DATA4,
+ TEGRA_PINGROUP_ULPI_DATA5,
+ TEGRA_PINGROUP_ULPI_DATA6,
+ TEGRA_PINGROUP_ULPI_DATA7,
+ TEGRA_PINGROUP_ULPI_CLK,
+ TEGRA_PINGROUP_ULPI_DIR,
+ TEGRA_PINGROUP_ULPI_NXT,
+ TEGRA_PINGROUP_ULPI_STP,
+ TEGRA_PINGROUP_DAP3_FS,
+ TEGRA_PINGROUP_DAP3_DIN,
+ TEGRA_PINGROUP_DAP3_DOUT,
+ TEGRA_PINGROUP_DAP3_SCLK,
+ TEGRA_PINGROUP_GPIO_PV0,
+ TEGRA_PINGROUP_GPIO_PV1,
+ TEGRA_PINGROUP_SDMMC1_CLK,
+ TEGRA_PINGROUP_SDMMC1_CMD,
+ TEGRA_PINGROUP_SDMMC1_DAT3,
+ TEGRA_PINGROUP_SDMMC1_DAT2,
+ TEGRA_PINGROUP_SDMMC1_DAT1,
+ TEGRA_PINGROUP_SDMMC1_DAT0,
+ TEGRA_PINGROUP_GPIO_PV2,
+ TEGRA_PINGROUP_GPIO_PV3,
+ TEGRA_PINGROUP_CLK2_OUT,
+ TEGRA_PINGROUP_CLK2_REQ,
+ TEGRA_PINGROUP_LCD_PWR1,
+ TEGRA_PINGROUP_LCD_PWR2,
+ TEGRA_PINGROUP_LCD_SDIN,
+ TEGRA_PINGROUP_LCD_SDOUT,
+ TEGRA_PINGROUP_LCD_WR_N,
+ TEGRA_PINGROUP_LCD_CS0_N,
+ TEGRA_PINGROUP_LCD_DC0,
+ TEGRA_PINGROUP_LCD_SCK,
+ TEGRA_PINGROUP_LCD_PWR0,
+ TEGRA_PINGROUP_LCD_PCLK,
+ TEGRA_PINGROUP_LCD_DE,
+ TEGRA_PINGROUP_LCD_HSYNC,
+ TEGRA_PINGROUP_LCD_VSYNC,
+ TEGRA_PINGROUP_LCD_D0,
+ TEGRA_PINGROUP_LCD_D1,
+ TEGRA_PINGROUP_LCD_D2,
+ TEGRA_PINGROUP_LCD_D3,
+ TEGRA_PINGROUP_LCD_D4,
+ TEGRA_PINGROUP_LCD_D5,
+ TEGRA_PINGROUP_LCD_D6,
+ TEGRA_PINGROUP_LCD_D7,
+ TEGRA_PINGROUP_LCD_D8,
+ TEGRA_PINGROUP_LCD_D9,
+ TEGRA_PINGROUP_LCD_D10,
+ TEGRA_PINGROUP_LCD_D11,
+ TEGRA_PINGROUP_LCD_D12,
+ TEGRA_PINGROUP_LCD_D13,
+ TEGRA_PINGROUP_LCD_D14,
+ TEGRA_PINGROUP_LCD_D15,
+ TEGRA_PINGROUP_LCD_D16,
+ TEGRA_PINGROUP_LCD_D17,
+ TEGRA_PINGROUP_LCD_D18,
+ TEGRA_PINGROUP_LCD_D19,
+ TEGRA_PINGROUP_LCD_D20,
+ TEGRA_PINGROUP_LCD_D21,
+ TEGRA_PINGROUP_LCD_D22,
+ TEGRA_PINGROUP_LCD_D23,
+ TEGRA_PINGROUP_LCD_CS1_N,
+ TEGRA_PINGROUP_LCD_M1,
+ TEGRA_PINGROUP_LCD_DC1,
+ TEGRA_PINGROUP_HDMI_INT,
+ TEGRA_PINGROUP_DDC_SCL,
+ TEGRA_PINGROUP_DDC_SDA,
+ TEGRA_PINGROUP_CRT_HSYNC,
+ TEGRA_PINGROUP_CRT_VSYNC,
+ TEGRA_PINGROUP_VI_D0,
+ TEGRA_PINGROUP_VI_D1,
+ TEGRA_PINGROUP_VI_D2,
+ TEGRA_PINGROUP_VI_D3,
+ TEGRA_PINGROUP_VI_D4,
+ TEGRA_PINGROUP_VI_D5,
+ TEGRA_PINGROUP_VI_D6,
+ TEGRA_PINGROUP_VI_D7,
+ TEGRA_PINGROUP_VI_D8,
+ TEGRA_PINGROUP_VI_D9,
+ TEGRA_PINGROUP_VI_D10,
+ TEGRA_PINGROUP_VI_D11,
+ TEGRA_PINGROUP_VI_PCLK,
+ TEGRA_PINGROUP_VI_MCLK,
+ TEGRA_PINGROUP_VI_VSYNC,
+ TEGRA_PINGROUP_VI_HSYNC,
+ TEGRA_PINGROUP_UART2_RXD,
+ TEGRA_PINGROUP_UART2_TXD,
+ TEGRA_PINGROUP_UART2_RTS_N,
+ TEGRA_PINGROUP_UART2_CTS_N,
+ TEGRA_PINGROUP_UART3_TXD,
+ TEGRA_PINGROUP_UART3_RXD,
+ TEGRA_PINGROUP_UART3_CTS_N,
+ TEGRA_PINGROUP_UART3_RTS_N,
+ TEGRA_PINGROUP_GPIO_PU0,
+ TEGRA_PINGROUP_GPIO_PU1,
+ TEGRA_PINGROUP_GPIO_PU2,
+ TEGRA_PINGROUP_GPIO_PU3,
+ TEGRA_PINGROUP_GPIO_PU4,
+ TEGRA_PINGROUP_GPIO_PU5,
+ TEGRA_PINGROUP_GPIO_PU6,
+ TEGRA_PINGROUP_GEN1_I2C_SDA,
+ TEGRA_PINGROUP_GEN1_I2C_SCL,
+ TEGRA_PINGROUP_DAP4_FS,
+ TEGRA_PINGROUP_DAP4_DIN,
+ TEGRA_PINGROUP_DAP4_DOUT,
+ TEGRA_PINGROUP_DAP4_SCLK,
+ TEGRA_PINGROUP_CLK3_OUT,
+ TEGRA_PINGROUP_CLK3_REQ,
+ TEGRA_PINGROUP_GMI_WP_N,
+ TEGRA_PINGROUP_GMI_IORDY,
+ TEGRA_PINGROUP_GMI_WAIT,
+ TEGRA_PINGROUP_GMI_ADV_N,
+ TEGRA_PINGROUP_GMI_CLK,
+ TEGRA_PINGROUP_GMI_CS0_N,
+ TEGRA_PINGROUP_GMI_CS1_N,
+ TEGRA_PINGROUP_GMI_CS2_N,
+ TEGRA_PINGROUP_GMI_CS3_N,
+ TEGRA_PINGROUP_GMI_CS4_N,
+ TEGRA_PINGROUP_GMI_CS6_N,
+ TEGRA_PINGROUP_GMI_CS7_N,
+ TEGRA_PINGROUP_GMI_AD0,
+ TEGRA_PINGROUP_GMI_AD1,
+ TEGRA_PINGROUP_GMI_AD2,
+ TEGRA_PINGROUP_GMI_AD3,
+ TEGRA_PINGROUP_GMI_AD4,
+ TEGRA_PINGROUP_GMI_AD5,
+ TEGRA_PINGROUP_GMI_AD6,
+ TEGRA_PINGROUP_GMI_AD7,
+ TEGRA_PINGROUP_GMI_AD8,
+ TEGRA_PINGROUP_GMI_AD9,
+ TEGRA_PINGROUP_GMI_AD10,
+ TEGRA_PINGROUP_GMI_AD11,
+ TEGRA_PINGROUP_GMI_AD12,
+ TEGRA_PINGROUP_GMI_AD13,
+ TEGRA_PINGROUP_GMI_AD14,
+ TEGRA_PINGROUP_GMI_AD15,
+ TEGRA_PINGROUP_GMI_A16,
+ TEGRA_PINGROUP_GMI_A17,
+ TEGRA_PINGROUP_GMI_A18,
+ TEGRA_PINGROUP_GMI_A19,
+ TEGRA_PINGROUP_GMI_WR_N,
+ TEGRA_PINGROUP_GMI_OE_N,
+ TEGRA_PINGROUP_GMI_DQS,
+ TEGRA_PINGROUP_GMI_RST_N,
+ TEGRA_PINGROUP_GEN2_I2C_SCL,
+ TEGRA_PINGROUP_GEN2_I2C_SDA,
+ TEGRA_PINGROUP_SDMMC4_CLK,
+ TEGRA_PINGROUP_SDMMC4_CMD,
+ TEGRA_PINGROUP_SDMMC4_DAT0,
+ TEGRA_PINGROUP_SDMMC4_DAT1,
+ TEGRA_PINGROUP_SDMMC4_DAT2,
+ TEGRA_PINGROUP_SDMMC4_DAT3,
+ TEGRA_PINGROUP_SDMMC4_DAT4,
+ TEGRA_PINGROUP_SDMMC4_DAT5,
+ TEGRA_PINGROUP_SDMMC4_DAT6,
+ TEGRA_PINGROUP_SDMMC4_DAT7,
+ TEGRA_PINGROUP_SDMMC4_RST_N,
+ TEGRA_PINGROUP_CAM_MCLK,
+ TEGRA_PINGROUP_GPIO_PCC1,
+ TEGRA_PINGROUP_GPIO_PBB0,
+ TEGRA_PINGROUP_CAM_I2C_SCL,
+ TEGRA_PINGROUP_CAM_I2C_SDA,
+ TEGRA_PINGROUP_GPIO_PBB3,
+ TEGRA_PINGROUP_GPIO_PBB4,
+ TEGRA_PINGROUP_GPIO_PBB5,
+ TEGRA_PINGROUP_GPIO_PBB6,
+ TEGRA_PINGROUP_GPIO_PBB7,
+ TEGRA_PINGROUP_GPIO_PCC2,
+ TEGRA_PINGROUP_JTAG_RTCK,
+ TEGRA_PINGROUP_PWR_I2C_SCL,
+ TEGRA_PINGROUP_PWR_I2C_SDA,
+ TEGRA_PINGROUP_KB_ROW0,
+ TEGRA_PINGROUP_KB_ROW1,
+ TEGRA_PINGROUP_KB_ROW2,
+ TEGRA_PINGROUP_KB_ROW3,
+ TEGRA_PINGROUP_KB_ROW4,
+ TEGRA_PINGROUP_KB_ROW5,
+ TEGRA_PINGROUP_KB_ROW6,
+ TEGRA_PINGROUP_KB_ROW7,
+ TEGRA_PINGROUP_KB_ROW8,
+ TEGRA_PINGROUP_KB_ROW9,
+ TEGRA_PINGROUP_KB_ROW10,
+ TEGRA_PINGROUP_KB_ROW11,
+ TEGRA_PINGROUP_KB_ROW12,
+ TEGRA_PINGROUP_KB_ROW13,
+ TEGRA_PINGROUP_KB_ROW14,
+ TEGRA_PINGROUP_KB_ROW15,
+ TEGRA_PINGROUP_KB_COL0,
+ TEGRA_PINGROUP_KB_COL1,
+ TEGRA_PINGROUP_KB_COL2,
+ TEGRA_PINGROUP_KB_COL3,
+ TEGRA_PINGROUP_KB_COL4,
+ TEGRA_PINGROUP_KB_COL5,
+ TEGRA_PINGROUP_KB_COL6,
+ TEGRA_PINGROUP_KB_COL7,
+ TEGRA_PINGROUP_CLK_32K_OUT,
+ TEGRA_PINGROUP_SYS_CLK_REQ,
+ TEGRA_PINGROUP_CORE_PWR_REQ,
+ TEGRA_PINGROUP_CPU_PWR_REQ,
+ TEGRA_PINGROUP_PWR_INT_N,
+ TEGRA_PINGROUP_CLK_32K_IN,
+ TEGRA_PINGROUP_OWR,
+ TEGRA_PINGROUP_DAP1_FS,
+ TEGRA_PINGROUP_DAP1_DIN,
+ TEGRA_PINGROUP_DAP1_DOUT,
+ TEGRA_PINGROUP_DAP1_SCLK,
+ TEGRA_PINGROUP_CLK1_REQ,
+ TEGRA_PINGROUP_CLK1_OUT,
+ TEGRA_PINGROUP_SPDIF_IN,
+ TEGRA_PINGROUP_SPDIF_OUT,
+ TEGRA_PINGROUP_DAP2_FS,
+ TEGRA_PINGROUP_DAP2_DIN,
+ TEGRA_PINGROUP_DAP2_DOUT,
+ TEGRA_PINGROUP_DAP2_SCLK,
+ TEGRA_PINGROUP_SPI2_MOSI,
+ TEGRA_PINGROUP_SPI2_MISO,
+ TEGRA_PINGROUP_SPI2_CS0_N,
+ TEGRA_PINGROUP_SPI2_SCK,
+ TEGRA_PINGROUP_SPI1_MOSI,
+ TEGRA_PINGROUP_SPI1_SCK,
+ TEGRA_PINGROUP_SPI1_CS0_N,
+ TEGRA_PINGROUP_SPI1_MISO,
+ TEGRA_PINGROUP_SPI2_CS1_N,
+ TEGRA_PINGROUP_SPI2_CS2_N,
+ TEGRA_PINGROUP_SDMMC3_CLK,
+ TEGRA_PINGROUP_SDMMC3_CMD,
+ TEGRA_PINGROUP_SDMMC3_DAT0,
+ TEGRA_PINGROUP_SDMMC3_DAT1,
+ TEGRA_PINGROUP_SDMMC3_DAT2,
+ TEGRA_PINGROUP_SDMMC3_DAT3,
+ TEGRA_PINGROUP_SDMMC3_DAT4,
+ TEGRA_PINGROUP_SDMMC3_DAT5,
+ TEGRA_PINGROUP_SDMMC3_DAT6,
+ TEGRA_PINGROUP_SDMMC3_DAT7,
+ TEGRA_PINGROUP_PEX_L0_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L0_RST_N,
+ TEGRA_PINGROUP_PEX_L0_CLKREQ_N,
+ TEGRA_PINGROUP_PEX_WAKE_N,
+ TEGRA_PINGROUP_PEX_L1_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L1_RST_N,
+ TEGRA_PINGROUP_PEX_L1_CLKREQ_N,
+ TEGRA_PINGROUP_PEX_L2_PRSNT_N,
+ TEGRA_PINGROUP_PEX_L2_RST_N,
+ TEGRA_PINGROUP_PEX_L2_CLKREQ_N,
+ TEGRA_PINGROUP_HDMI_CEC,
+ TEGRA_MAX_PINGROUP,
+};
+
+enum tegra_drive_pingroup {
+ TEGRA_DRIVE_PINGROUP_AO1 = 0,
+ TEGRA_DRIVE_PINGROUP_AO2,
+ TEGRA_DRIVE_PINGROUP_AT1,
+ TEGRA_DRIVE_PINGROUP_AT2,
+ TEGRA_DRIVE_PINGROUP_AT3,
+ TEGRA_DRIVE_PINGROUP_AT4,
+ TEGRA_DRIVE_PINGROUP_AT5,
+ TEGRA_DRIVE_PINGROUP_CDEV1,
+ TEGRA_DRIVE_PINGROUP_CDEV2,
+ TEGRA_DRIVE_PINGROUP_CSUS,
+ TEGRA_DRIVE_PINGROUP_DAP1,
+ TEGRA_DRIVE_PINGROUP_DAP2,
+ TEGRA_DRIVE_PINGROUP_DAP3,
+ TEGRA_DRIVE_PINGROUP_DAP4,
+ TEGRA_DRIVE_PINGROUP_DBG,
+ TEGRA_DRIVE_PINGROUP_LCD1,
+ TEGRA_DRIVE_PINGROUP_LCD2,
+ TEGRA_DRIVE_PINGROUP_SDIO2,
+ TEGRA_DRIVE_PINGROUP_SDIO3,
+ TEGRA_DRIVE_PINGROUP_SPI,
+ TEGRA_DRIVE_PINGROUP_UAA,
+ TEGRA_DRIVE_PINGROUP_UAB,
+ TEGRA_DRIVE_PINGROUP_UART2,
+ TEGRA_DRIVE_PINGROUP_UART3,
+ TEGRA_DRIVE_PINGROUP_VI1,
+ TEGRA_DRIVE_PINGROUP_SDIO1,
+ TEGRA_DRIVE_PINGROUP_CRT,
+ TEGRA_DRIVE_PINGROUP_DDC,
+ TEGRA_DRIVE_PINGROUP_GMA,
+ TEGRA_DRIVE_PINGROUP_GMB,
+ TEGRA_DRIVE_PINGROUP_GMC,
+ TEGRA_DRIVE_PINGROUP_GMD,
+ TEGRA_DRIVE_PINGROUP_GME,
+ TEGRA_DRIVE_PINGROUP_GMF,
+ TEGRA_DRIVE_PINGROUP_GMG,
+ TEGRA_DRIVE_PINGROUP_GMH,
+ TEGRA_DRIVE_PINGROUP_OWR,
+ TEGRA_DRIVE_PINGROUP_UAD,
+ TEGRA_DRIVE_PINGROUP_GPV,
+ TEGRA_DRIVE_PINGROUP_DEV3,
+ TEGRA_DRIVE_PINGROUP_CEC,
+ TEGRA_MAX_DRIVE_PINGROUP,
+};
+
+#endif
+
diff --git a/arch/arm/mach-tegra/include/mach/pinmux.h b/arch/arm/mach-tegra/include/mach/pinmux.h
index bb7dfdb6120..055f1792c8f 100644
--- a/arch/arm/mach-tegra/include/mach/pinmux.h
+++ b/arch/arm/mach-tegra/include/mach/pinmux.h
@@ -2,6 +2,7 @@
* linux/arch/arm/mach-tegra/include/mach/pinmux.h
*
* Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010,2011 Nvidia, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -17,18 +18,13 @@
#ifndef __MACH_TEGRA_PINMUX_H
#define __MACH_TEGRA_PINMUX_H
-#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
-#include "pinmux-t2.h"
-#else
-#error "Undefined Tegra architecture"
-#endif
-
enum tegra_mux_func {
TEGRA_MUX_RSVD = 0x8000,
TEGRA_MUX_RSVD1 = 0x8000,
TEGRA_MUX_RSVD2 = 0x8001,
TEGRA_MUX_RSVD3 = 0x8002,
TEGRA_MUX_RSVD4 = 0x8003,
+ TEGRA_MUX_INVALID = 0x4000,
TEGRA_MUX_NONE = -1,
TEGRA_MUX_AHB_CLK,
TEGRA_MUX_APB_CLK,
@@ -90,6 +86,49 @@ enum tegra_mux_func {
TEGRA_MUX_VI,
TEGRA_MUX_VI_SENSOR_CLK,
TEGRA_MUX_XIO,
+ TEGRA_MUX_BLINK,
+ TEGRA_MUX_CEC,
+ TEGRA_MUX_CLK12,
+ TEGRA_MUX_DAP,
+ TEGRA_MUX_DAPSDMMC2,
+ TEGRA_MUX_DDR,
+ TEGRA_MUX_DEV3,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_VI_ALT1,
+ TEGRA_MUX_VI_ALT2,
+ TEGRA_MUX_VI_ALT3,
+ TEGRA_MUX_EMC_DLL,
+ TEGRA_MUX_EXTPERIPH1,
+ TEGRA_MUX_EXTPERIPH2,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_GMI_ALT,
+ TEGRA_MUX_HDA,
+ TEGRA_MUX_HSI,
+ TEGRA_MUX_I2C4,
+ TEGRA_MUX_I2C5,
+ TEGRA_MUX_I2CPWR,
+ TEGRA_MUX_I2S0,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_I2S4,
+ TEGRA_MUX_NAND_ALT,
+ TEGRA_MUX_POPSDIO4,
+ TEGRA_MUX_POPSDMMC4,
+ TEGRA_MUX_PWM0,
+ TEGRA_MUX_PWM1,
+ TEGRA_MUX_PWM2,
+ TEGRA_MUX_PWM3,
+ TEGRA_MUX_SATA,
+ TEGRA_MUX_SPI5,
+ TEGRA_MUX_SPI6,
+ TEGRA_MUX_SYSCLK,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
TEGRA_MUX_SAFE,
TEGRA_MAX_MUX,
};
@@ -105,6 +144,11 @@ enum tegra_tristate {
TEGRA_TRI_TRISTATE = 1,
};
+enum tegra_pin_io {
+ TEGRA_PIN_OUTPUT = 0,
+ TEGRA_PIN_INPUT = 1,
+};
+
enum tegra_vddio {
TEGRA_VDDIO_BB = 0,
TEGRA_VDDIO_LCD,
@@ -115,10 +159,16 @@ enum tegra_vddio {
TEGRA_VDDIO_SYS,
TEGRA_VDDIO_AUDIO,
TEGRA_VDDIO_SD,
+ TEGRA_VDDIO_CAM,
+ TEGRA_VDDIO_GMI,
+ TEGRA_VDDIO_PEXCTL,
+ TEGRA_VDDIO_SDMMC1,
+ TEGRA_VDDIO_SDMMC3,
+ TEGRA_VDDIO_SDMMC4,
};
struct tegra_pingroup_config {
- enum tegra_pingroup pingroup;
+ int pingroup;
enum tegra_mux_func func;
enum tegra_pullupdown pupd;
enum tegra_tristate tristate;
@@ -187,7 +237,7 @@ enum tegra_schmitt {
};
struct tegra_drive_pingroup_config {
- enum tegra_drive_pingroup pingroup;
+ int pingroup;
enum tegra_hsm hsm;
enum tegra_schmitt schmitt;
enum tegra_drive drive;
@@ -208,6 +258,7 @@ struct tegra_pingroup_desc {
int funcs[4];
int func_safe;
int vddio;
+ enum tegra_pin_io io_default;
s16 tri_bank; /* Register bank the tri_reg exists within */
s16 mux_bank; /* Register bank the mux_reg exists within */
s16 pupd_bank; /* Register bank the pupd_reg exists within */
@@ -217,15 +268,23 @@ struct tegra_pingroup_desc {
s8 tri_bit; /* offset into the TRISTATE_REG_* register bit */
s8 mux_bit; /* offset into the PIN_MUX_CTL_* register bit */
s8 pupd_bit; /* offset into the PULL_UPDOWN_REG_* register bit */
+ s8 lock_bit; /* offset of the LOCK bit into mux register bit */
+ s8 od_bit; /* offset of the OD bit into mux register bit */
+ s8 ioreset_bit; /* offset of the IO_RESET bit into mux register bit */
};
-extern const struct tegra_pingroup_desc tegra_soc_pingroups[];
-extern const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[];
+typedef void (*pinmux_init) (const struct tegra_pingroup_desc **pg,
+ int *pg_max, const struct tegra_drive_pingroup_desc **pgdrive,
+ int *pgdrive_max);
-int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
- enum tegra_tristate tristate);
-int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
- enum tegra_pullupdown pupd);
+void tegra20_pinmux_init(const struct tegra_pingroup_desc **pg, int *pg_max,
+ const struct tegra_drive_pingroup_desc **pgdrive, int *pgdrive_max);
+
+void tegra30_pinmux_init(const struct tegra_pingroup_desc **pg, int *pg_max,
+ const struct tegra_drive_pingroup_desc **pgdrive, int *pgdrive_max);
+
+int tegra_pinmux_set_tristate(int pg, enum tegra_tristate tristate);
+int tegra_pinmux_set_pullupdown(int pg, enum tegra_pullupdown pupd);
void tegra_pinmux_config_table(const struct tegra_pingroup_config *config,
int len);
@@ -241,4 +300,3 @@ void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *conf
void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
int len, enum tegra_pullupdown pupd);
#endif
-
diff --git a/arch/arm/mach-tegra/include/mach/system.h b/arch/arm/mach-tegra/include/mach/system.h
index 027c4215d31..a312988bf6f 100644
--- a/arch/arm/mach-tegra/include/mach/system.h
+++ b/arch/arm/mach-tegra/include/mach/system.h
@@ -21,10 +21,6 @@
#ifndef __MACH_TEGRA_SYSTEM_H
#define __MACH_TEGRA_SYSTEM_H
-#include <mach/iomap.h>
-
-extern void (*arch_reset)(char mode, const char *cmd);
-
static inline void arch_idle(void)
{
}
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index 5489f8b5d6a..d23ee2db282 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -60,24 +60,3 @@ void __init tegra_map_common_io(void)
{
iotable_init(tegra_io_desc, ARRAY_SIZE(tegra_io_desc));
}
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type)
-{
- void __iomem *v = IO_ADDRESS(p);
- if (v == NULL)
- v = __arm_ioremap(p, size, type);
- return v;
-}
-EXPORT_SYMBOL(tegra_ioremap);
-
-void tegra_iounmap(volatile void __iomem *addr)
-{
- unsigned long virt = (unsigned long)addr;
-
- if (virt >= VMALLOC_START && virt < VMALLOC_END)
- __iounmap(addr);
-}
-EXPORT_SYMBOL(tegra_iounmap);
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 4956c3cea73..4e1afcd54fa 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <asm/hardware/gic.h>
@@ -28,10 +29,6 @@
#include "board.h"
-#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
-#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
-#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
-
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
@@ -129,6 +126,11 @@ void __init tegra_init_irq(void)
gic_arch_extn.irq_unmask = tegra_unmask;
gic_arch_extn.irq_retrigger = tegra_retrigger;
- gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
- IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
+ /*
+ * Check if there is a devicetree present, since the GIC will be
+ * initialized elsewhere under DT.
+ */
+ if (!of_have_populated_dt())
+ gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
+ IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
}
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index 97ef3e55dfd..ec63c6b2b6b 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -37,7 +37,6 @@
#include <asm/sizes.h>
#include <asm/mach/pci.h>
-#include <mach/pinmux.h>
#include <mach/iomap.h>
#include <mach/clk.h>
#include <mach/powergate.h>
diff --git a/arch/arm/mach-tegra/pinmux-t2-tables.c b/arch/arm/mach-tegra/pinmux-tegra20-tables.c
index a0dc2bc28ed..734add1280b 100644
--- a/arch/arm/mach-tegra/pinmux-t2-tables.c
+++ b/arch/arm/mach-tegra/pinmux-tegra20-tables.c
@@ -1,7 +1,7 @@
/*
- * linux/arch/arm/mach-tegra/pinmux-t2-tables.c
+ * linux/arch/arm/mach-tegra/pinmux-tegra20-tables.c
*
- * Common pinmux configurations for Tegra 2 SoCs
+ * Common pinmux configurations for Tegra20 SoCs
*
* Copyright (C) 2010 NVIDIA Corporation
*
@@ -29,6 +29,7 @@
#include <mach/iomap.h>
#include <mach/pinmux.h>
+#include <mach/pinmux-tegra20.h>
#include <mach/suspend.h>
#define TRISTATE_REG_A 0x14
@@ -43,7 +44,7 @@
.reg = ((r) - PINGROUP_REG_A) \
}
-const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
+static const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
DRIVE_PINGROUP(AO1, 0x868),
DRIVE_PINGROUP(AO2, 0x86c),
DRIVE_PINGROUP(AT1, 0x870),
@@ -105,9 +106,13 @@ const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE
.pupd_bank = 2, \
.pupd_reg = ((pupd_r) - PULLUPDOWN_REG_A), \
.pupd_bit = pupd_b, \
+ .lock_bit = -1, \
+ .od_bit = -1, \
+ .ioreset_bit = -1, \
+ .io_default = -1, \
}
-const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
+static const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
PINGROUP(ATA, NAND, IDE, NAND, GMI, RSVD, IDE, 0x14, 0, 0x80, 24, 0xA0, 0),
PINGROUP(ATB, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 1, 0x80, 16, 0xA0, 2),
PINGROUP(ATC, NAND, IDE, NAND, GMI, SDIO4, IDE, 0x14, 2, 0x80, 22, 0xA0, 4),
@@ -226,3 +231,14 @@ const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
PINGROUP(XM2C, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 30),
PINGROUP(XM2D, DDR, RSVD, RSVD, RSVD, RSVD, RSVD, -1, -1, -1, -1, 0xA8, 28),
};
+
+void __devinit tegra20_pinmux_init(const struct tegra_pingroup_desc **pg,
+ int *pg_max, const struct tegra_drive_pingroup_desc **pgdrive,
+ int *pgdrive_max)
+{
+ *pg = tegra_soc_pingroups;
+ *pg_max = TEGRA_MAX_PINGROUP;
+ *pgdrive = tegra_soc_drive_pingroups;
+ *pgdrive_max = TEGRA_MAX_DRIVE_PINGROUP;
+}
+
diff --git a/arch/arm/mach-tegra/pinmux-tegra30-tables.c b/arch/arm/mach-tegra/pinmux-tegra30-tables.c
new file mode 100644
index 00000000000..14fc0e4c1c4
--- /dev/null
+++ b/arch/arm/mach-tegra/pinmux-tegra30-tables.c
@@ -0,0 +1,376 @@
+/*
+ * linux/arch/arm/mach-tegra/pinmux-tegra30-tables.c
+ *
+ * Common pinmux configurations for Tegra30 SoCs
+ *
+ * Copyright (C) 2010,2011 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <mach/iomap.h>
+#include <mach/pinmux.h>
+#include <mach/pinmux-tegra30.h>
+#include <mach/suspend.h>
+
+#define PINGROUP_REG_A 0x868
+#define MUXCTL_REG_A 0x3000
+
+#define DRIVE_PINGROUP(pg_name, r) \
+ [TEGRA_DRIVE_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .reg_bank = 0, \
+ .reg = ((r) - PINGROUP_REG_A) \
+ }
+
+static const struct tegra_drive_pingroup_desc tegra_soc_drive_pingroups[TEGRA_MAX_DRIVE_PINGROUP] = {
+ DRIVE_PINGROUP(AO1, 0x868),
+ DRIVE_PINGROUP(AO2, 0x86c),
+ DRIVE_PINGROUP(AT1, 0x870),
+ DRIVE_PINGROUP(AT2, 0x874),
+ DRIVE_PINGROUP(AT3, 0x878),
+ DRIVE_PINGROUP(AT4, 0x87c),
+ DRIVE_PINGROUP(AT5, 0x880),
+ DRIVE_PINGROUP(CDEV1, 0x884),
+ DRIVE_PINGROUP(CDEV2, 0x888),
+ DRIVE_PINGROUP(CSUS, 0x88c),
+ DRIVE_PINGROUP(DAP1, 0x890),
+ DRIVE_PINGROUP(DAP2, 0x894),
+ DRIVE_PINGROUP(DAP3, 0x898),
+ DRIVE_PINGROUP(DAP4, 0x89c),
+ DRIVE_PINGROUP(DBG, 0x8a0),
+ DRIVE_PINGROUP(LCD1, 0x8a4),
+ DRIVE_PINGROUP(LCD2, 0x8a8),
+ DRIVE_PINGROUP(SDIO2, 0x8ac),
+ DRIVE_PINGROUP(SDIO3, 0x8b0),
+ DRIVE_PINGROUP(SPI, 0x8b4),
+ DRIVE_PINGROUP(UAA, 0x8b8),
+ DRIVE_PINGROUP(UAB, 0x8bc),
+ DRIVE_PINGROUP(UART2, 0x8c0),
+ DRIVE_PINGROUP(UART3, 0x8c4),
+ DRIVE_PINGROUP(VI1, 0x8c8),
+ DRIVE_PINGROUP(SDIO1, 0x8ec),
+ DRIVE_PINGROUP(CRT, 0x8f8),
+ DRIVE_PINGROUP(DDC, 0x8fc),
+ DRIVE_PINGROUP(GMA, 0x900),
+ DRIVE_PINGROUP(GMB, 0x904),
+ DRIVE_PINGROUP(GMC, 0x908),
+ DRIVE_PINGROUP(GMD, 0x90c),
+ DRIVE_PINGROUP(GME, 0x910),
+ DRIVE_PINGROUP(GMF, 0x914),
+ DRIVE_PINGROUP(GMG, 0x918),
+ DRIVE_PINGROUP(GMH, 0x91c),
+ DRIVE_PINGROUP(OWR, 0x920),
+ DRIVE_PINGROUP(UAD, 0x924),
+ DRIVE_PINGROUP(GPV, 0x928),
+ DRIVE_PINGROUP(DEV3, 0x92c),
+ DRIVE_PINGROUP(CEC, 0x938),
+};
+
+#define PINGROUP(pg_name, vdd, f0, f1, f2, f3, fs, iod, reg) \
+ [TEGRA_PINGROUP_ ## pg_name] = { \
+ .name = #pg_name, \
+ .vddio = TEGRA_VDDIO_ ## vdd, \
+ .funcs = { \
+ TEGRA_MUX_ ## f0, \
+ TEGRA_MUX_ ## f1, \
+ TEGRA_MUX_ ## f2, \
+ TEGRA_MUX_ ## f3, \
+ }, \
+ .func_safe = TEGRA_MUX_ ## fs, \
+ .tri_bank = 1, \
+ .tri_reg = ((reg) - MUXCTL_REG_A), \
+ .tri_bit = 4, \
+ .mux_bank = 1, \
+ .mux_reg = ((reg) - MUXCTL_REG_A), \
+ .mux_bit = 0, \
+ .pupd_bank = 1, \
+ .pupd_reg = ((reg) - MUXCTL_REG_A), \
+ .pupd_bit = 2, \
+ .io_default = TEGRA_PIN_ ## iod, \
+ .od_bit = 6, \
+ .lock_bit = 7, \
+ .ioreset_bit = 8, \
+ }
+
+static const struct tegra_pingroup_desc tegra_soc_pingroups[TEGRA_MAX_PINGROUP] = {
+ /* NAME VDD f0 f1 f2 f3 fSafe io reg */
+ PINGROUP(ULPI_DATA0, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3000),
+ PINGROUP(ULPI_DATA1, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3004),
+ PINGROUP(ULPI_DATA2, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x3008),
+ PINGROUP(ULPI_DATA3, BB, SPI3, HSI, UARTA, ULPI, RSVD, INPUT, 0x300c),
+ PINGROUP(ULPI_DATA4, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3010),
+ PINGROUP(ULPI_DATA5, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3014),
+ PINGROUP(ULPI_DATA6, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x3018),
+ PINGROUP(ULPI_DATA7, BB, SPI2, HSI, UARTA, ULPI, RSVD, INPUT, 0x301c),
+ PINGROUP(ULPI_CLK, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3020),
+ PINGROUP(ULPI_DIR, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3024),
+ PINGROUP(ULPI_NXT, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x3028),
+ PINGROUP(ULPI_STP, BB, SPI1, RSVD, UARTD, ULPI, RSVD, INPUT, 0x302c),
+ PINGROUP(DAP3_FS, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3030),
+ PINGROUP(DAP3_DIN, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3034),
+ PINGROUP(DAP3_DOUT, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x3038),
+ PINGROUP(DAP3_SCLK, BB, I2S2, RSVD1, DISPLAYA, DISPLAYB, RSVD, INPUT, 0x303c),
+ PINGROUP(GPIO_PV0, BB, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3040),
+ PINGROUP(GPIO_PV1, BB, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3044),
+ PINGROUP(SDMMC1_CLK, SDMMC1, SDIO1, RSVD1, RSVD2, INVALID, RSVD, INPUT, 0x3048),
+ PINGROUP(SDMMC1_CMD, SDMMC1, SDIO1, RSVD1, RSVD2, INVALID, RSVD, INPUT, 0x304c),
+ PINGROUP(SDMMC1_DAT3, SDMMC1, SDIO1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3050),
+ PINGROUP(SDMMC1_DAT2, SDMMC1, SDIO1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3054),
+ PINGROUP(SDMMC1_DAT1, SDMMC1, SDIO1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x3058),
+ PINGROUP(SDMMC1_DAT0, SDMMC1, SDIO1, RSVD1, UARTE, INVALID, RSVD, INPUT, 0x305c),
+ PINGROUP(GPIO_PV2, SDMMC1, OWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3060),
+ PINGROUP(GPIO_PV3, SDMMC1, INVALID, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3064),
+ PINGROUP(CLK2_OUT, SDMMC1, EXTPERIPH2, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3068),
+ PINGROUP(CLK2_REQ, SDMMC1, DAP, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x306c),
+ PINGROUP(LCD_PWR1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3070),
+ PINGROUP(LCD_PWR2, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3074),
+ PINGROUP(LCD_SDIN, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD, RSVD, OUTPUT, 0x3078),
+ PINGROUP(LCD_SDOUT, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x307c),
+ PINGROUP(LCD_WR_N, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3080),
+ PINGROUP(LCD_CS0_N, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD, RSVD, OUTPUT, 0x3084),
+ PINGROUP(LCD_DC0, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3088),
+ PINGROUP(LCD_SCK, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x308c),
+ PINGROUP(LCD_PWR0, LCD, DISPLAYA, DISPLAYB, SPI5, INVALID, RSVD, OUTPUT, 0x3090),
+ PINGROUP(LCD_PCLK, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3094),
+ PINGROUP(LCD_DE, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3098),
+ PINGROUP(LCD_HSYNC, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x309c),
+ PINGROUP(LCD_VSYNC, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a0),
+ PINGROUP(LCD_D0, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a4),
+ PINGROUP(LCD_D1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30a8),
+ PINGROUP(LCD_D2, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30ac),
+ PINGROUP(LCD_D3, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b0),
+ PINGROUP(LCD_D4, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b4),
+ PINGROUP(LCD_D5, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30b8),
+ PINGROUP(LCD_D6, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30bc),
+ PINGROUP(LCD_D7, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c0),
+ PINGROUP(LCD_D8, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c4),
+ PINGROUP(LCD_D9, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30c8),
+ PINGROUP(LCD_D10, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30cc),
+ PINGROUP(LCD_D11, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d0),
+ PINGROUP(LCD_D12, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d4),
+ PINGROUP(LCD_D13, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30d8),
+ PINGROUP(LCD_D14, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30dc),
+ PINGROUP(LCD_D15, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e0),
+ PINGROUP(LCD_D16, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e4),
+ PINGROUP(LCD_D17, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30e8),
+ PINGROUP(LCD_D18, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30ec),
+ PINGROUP(LCD_D19, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f0),
+ PINGROUP(LCD_D20, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f4),
+ PINGROUP(LCD_D21, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30f8),
+ PINGROUP(LCD_D22, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x30fc),
+ PINGROUP(LCD_D23, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3100),
+ PINGROUP(LCD_CS1_N, LCD, DISPLAYA, DISPLAYB, SPI5, RSVD2, RSVD, OUTPUT, 0x3104),
+ PINGROUP(LCD_M1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x3108),
+ PINGROUP(LCD_DC1, LCD, DISPLAYA, DISPLAYB, RSVD1, RSVD2, RSVD, OUTPUT, 0x310c),
+ PINGROUP(HDMI_INT, LCD, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3110),
+ PINGROUP(DDC_SCL, LCD, I2C4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3114),
+ PINGROUP(DDC_SDA, LCD, I2C4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3118),
+ PINGROUP(CRT_HSYNC, LCD, CRT, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x311c),
+ PINGROUP(CRT_VSYNC, LCD, CRT, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3120),
+ PINGROUP(VI_D0, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3124),
+ PINGROUP(VI_D1, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3128),
+ PINGROUP(VI_D2, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x312c),
+ PINGROUP(VI_D3, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3130),
+ PINGROUP(VI_D4, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3134),
+ PINGROUP(VI_D5, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3138),
+ PINGROUP(VI_D6, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x313c),
+ PINGROUP(VI_D7, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3140),
+ PINGROUP(VI_D8, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3144),
+ PINGROUP(VI_D9, VI, INVALID, SDIO2, VI, RSVD1, RSVD, INPUT, 0x3148),
+ PINGROUP(VI_D10, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x314c),
+ PINGROUP(VI_D11, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3150),
+ PINGROUP(VI_PCLK, VI, RSVD1, SDIO2, VI, RSVD2, RSVD, INPUT, 0x3154),
+ PINGROUP(VI_MCLK, VI, VI, INVALID, INVALID, INVALID, RSVD, INPUT, 0x3158),
+ PINGROUP(VI_VSYNC, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x315c),
+ PINGROUP(VI_HSYNC, VI, INVALID, RSVD1, VI, RSVD2, RSVD, INPUT, 0x3160),
+ PINGROUP(UART2_RXD, UART, IRDA, SPDIF, UARTA, SPI4, RSVD, INPUT, 0x3164),
+ PINGROUP(UART2_TXD, UART, IRDA, SPDIF, UARTA, SPI4, RSVD, INPUT, 0x3168),
+ PINGROUP(UART2_RTS_N, UART, UARTA, UARTB, GMI, SPI4, RSVD, INPUT, 0x316c),
+ PINGROUP(UART2_CTS_N, UART, UARTA, UARTB, GMI, SPI4, RSVD, INPUT, 0x3170),
+ PINGROUP(UART3_TXD, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x3174),
+ PINGROUP(UART3_RXD, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x3178),
+ PINGROUP(UART3_CTS_N, UART, UARTC, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x317c),
+ PINGROUP(UART3_RTS_N, UART, UARTC, PWM0, GMI, RSVD2, RSVD, INPUT, 0x3180),
+ PINGROUP(GPIO_PU0, UART, OWR, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3184),
+ PINGROUP(GPIO_PU1, UART, RSVD1, UARTA, GMI, RSVD2, RSVD, INPUT, 0x3188),
+ PINGROUP(GPIO_PU2, UART, RSVD1, UARTA, GMI, RSVD2, RSVD, INPUT, 0x318c),
+ PINGROUP(GPIO_PU3, UART, PWM0, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3190),
+ PINGROUP(GPIO_PU4, UART, PWM1, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3194),
+ PINGROUP(GPIO_PU5, UART, PWM2, UARTA, GMI, RSVD1, RSVD, INPUT, 0x3198),
+ PINGROUP(GPIO_PU6, UART, PWM3, UARTA, GMI, RSVD1, RSVD, INPUT, 0x319c),
+ PINGROUP(GEN1_I2C_SDA, UART, I2C, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31a0),
+ PINGROUP(GEN1_I2C_SCL, UART, I2C, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31a4),
+ PINGROUP(DAP4_FS, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31a8),
+ PINGROUP(DAP4_DIN, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31ac),
+ PINGROUP(DAP4_DOUT, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31b0),
+ PINGROUP(DAP4_SCLK, UART, I2S3, RSVD1, GMI, RSVD2, RSVD, INPUT, 0x31b4),
+ PINGROUP(CLK3_OUT, UART, EXTPERIPH3, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31b8),
+ PINGROUP(CLK3_REQ, UART, DEV3, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x31bc),
+ PINGROUP(GMI_WP_N, GMI, RSVD1, NAND, GMI, GMI_ALT, RSVD, INPUT, 0x31c0),
+ PINGROUP(GMI_IORDY, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31c4),
+ PINGROUP(GMI_WAIT, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31c8),
+ PINGROUP(GMI_ADV_N, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31cc),
+ PINGROUP(GMI_CLK, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31d0),
+ PINGROUP(GMI_CS0_N, GMI, RSVD1, NAND, GMI, INVALID, RSVD, INPUT, 0x31d4),
+ PINGROUP(GMI_CS1_N, GMI, RSVD1, NAND, GMI, DTV, RSVD, INPUT, 0x31d8),
+ PINGROUP(GMI_CS2_N, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31dc),
+ PINGROUP(GMI_CS3_N, GMI, RSVD1, NAND, GMI, GMI_ALT, RSVD, INPUT, 0x31e0),
+ PINGROUP(GMI_CS4_N, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31e4),
+ PINGROUP(GMI_CS6_N, GMI, NAND, NAND_ALT, GMI, SATA, RSVD, INPUT, 0x31e8),
+ PINGROUP(GMI_CS7_N, GMI, NAND, NAND_ALT, GMI, GMI_ALT, RSVD, INPUT, 0x31ec),
+ PINGROUP(GMI_AD0, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f0),
+ PINGROUP(GMI_AD1, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f4),
+ PINGROUP(GMI_AD2, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31f8),
+ PINGROUP(GMI_AD3, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x31fc),
+ PINGROUP(GMI_AD4, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3200),
+ PINGROUP(GMI_AD5, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3204),
+ PINGROUP(GMI_AD6, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3208),
+ PINGROUP(GMI_AD7, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x320c),
+ PINGROUP(GMI_AD8, GMI, PWM0, NAND, GMI, RSVD2, RSVD, INPUT, 0x3210),
+ PINGROUP(GMI_AD9, GMI, PWM1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3214),
+ PINGROUP(GMI_AD10, GMI, PWM2, NAND, GMI, RSVD2, RSVD, INPUT, 0x3218),
+ PINGROUP(GMI_AD11, GMI, PWM3, NAND, GMI, RSVD2, RSVD, INPUT, 0x321c),
+ PINGROUP(GMI_AD12, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3220),
+ PINGROUP(GMI_AD13, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3224),
+ PINGROUP(GMI_AD14, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x3228),
+ PINGROUP(GMI_AD15, GMI, RSVD1, NAND, GMI, RSVD2, RSVD, INPUT, 0x322c),
+ PINGROUP(GMI_A16, GMI, UARTD, SPI4, GMI, GMI_ALT, RSVD, INPUT, 0x3230),
+ PINGROUP(GMI_A17, GMI, UARTD, SPI4, GMI, INVALID, RSVD, INPUT, 0x3234),
+ PINGROUP(GMI_A18, GMI, UARTD, SPI4, GMI, INVALID, RSVD, INPUT, 0x3238),
+ PINGROUP(GMI_A19, GMI, UARTD, SPI4, GMI, RSVD3, RSVD, INPUT, 0x323c),
+ PINGROUP(GMI_WR_N, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3240),
+ PINGROUP(GMI_OE_N, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3244),
+ PINGROUP(GMI_DQS, GMI, RSVD1, NAND, GMI, RSVD3, RSVD, INPUT, 0x3248),
+ PINGROUP(GMI_RST_N, GMI, NAND, NAND_ALT, GMI, RSVD3, RSVD, INPUT, 0x324c),
+ PINGROUP(GEN2_I2C_SCL, GMI, I2C2, INVALID, GMI, RSVD3, RSVD, INPUT, 0x3250),
+ PINGROUP(GEN2_I2C_SDA, GMI, I2C2, INVALID, GMI, RSVD3, RSVD, INPUT, 0x3254),
+ PINGROUP(SDMMC4_CLK, SDMMC4, INVALID, NAND, GMI, SDIO4, RSVD, INPUT, 0x3258),
+ PINGROUP(SDMMC4_CMD, SDMMC4, I2C3, NAND, GMI, SDIO4, RSVD, INPUT, 0x325c),
+ PINGROUP(SDMMC4_DAT0, SDMMC4, UARTE, SPI3, GMI, SDIO4, RSVD, INPUT, 0x3260),
+ PINGROUP(SDMMC4_DAT1, SDMMC4, UARTE, SPI3, GMI, SDIO4, RSVD, INPUT, 0x3264),
+ PINGROUP(SDMMC4_DAT2, SDMMC4, UARTE, SPI3, GMI, SDIO4, RSVD, INPUT, 0x3268),
+ PINGROUP(SDMMC4_DAT3, SDMMC4, UARTE, SPI3, GMI, SDIO4, RSVD, INPUT, 0x326c),
+ PINGROUP(SDMMC4_DAT4, SDMMC4, I2C3, I2S4, GMI, SDIO4, RSVD, INPUT, 0x3270),
+ PINGROUP(SDMMC4_DAT5, SDMMC4, VGP3, I2S4, GMI, SDIO4, RSVD, INPUT, 0x3274),
+ PINGROUP(SDMMC4_DAT6, SDMMC4, VGP4, I2S4, GMI, SDIO4, RSVD, INPUT, 0x3278),
+ PINGROUP(SDMMC4_DAT7, SDMMC4, VGP5, I2S4, GMI, SDIO4, RSVD, INPUT, 0x327c),
+ PINGROUP(SDMMC4_RST_N, SDMMC4, VGP6, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3280),
+ PINGROUP(CAM_MCLK, CAM, VI, INVALID, VI_ALT2, POPSDMMC4, RSVD, INPUT, 0x3284),
+ PINGROUP(GPIO_PCC1, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3288),
+ PINGROUP(GPIO_PBB0, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x328c),
+ PINGROUP(CAM_I2C_SCL, CAM, INVALID, I2C3, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3290),
+ PINGROUP(CAM_I2C_SDA, CAM, INVALID, I2C3, RSVD2, POPSDMMC4, RSVD, INPUT, 0x3294),
+ PINGROUP(GPIO_PBB3, CAM, VGP3, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x3298),
+ PINGROUP(GPIO_PBB4, CAM, VGP4, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x329c),
+ PINGROUP(GPIO_PBB5, CAM, VGP5, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x32a0),
+ PINGROUP(GPIO_PBB6, CAM, VGP6, DISPLAYA, DISPLAYB, POPSDMMC4, RSVD, INPUT, 0x32a4),
+ PINGROUP(GPIO_PBB7, CAM, I2S4, RSVD1, RSVD2, POPSDMMC4, RSVD, INPUT, 0x32a8),
+ PINGROUP(GPIO_PCC2, CAM, I2S4, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32ac),
+ PINGROUP(JTAG_RTCK, SYS, RTCK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b0),
+ PINGROUP(PWR_I2C_SCL, SYS, I2CPWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b4),
+ PINGROUP(PWR_I2C_SDA, SYS, I2CPWR, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x32b8),
+ PINGROUP(KB_ROW0, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32bc),
+ PINGROUP(KB_ROW1, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32c0),
+ PINGROUP(KB_ROW2, SYS, KBC, INVALID, RSVD2, RSVD3, RSVD, INPUT, 0x32c4),
+ PINGROUP(KB_ROW3, SYS, KBC, INVALID, RSVD2, INVALID, RSVD, INPUT, 0x32c8),
+ PINGROUP(KB_ROW4, SYS, KBC, INVALID, TRACE, RSVD3, RSVD, INPUT, 0x32cc),
+ PINGROUP(KB_ROW5, SYS, KBC, INVALID, TRACE, OWR, RSVD, INPUT, 0x32d0),
+ PINGROUP(KB_ROW6, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32d4),
+ PINGROUP(KB_ROW7, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32d8),
+ PINGROUP(KB_ROW8, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32dc),
+ PINGROUP(KB_ROW9, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32e0),
+ PINGROUP(KB_ROW10, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32e4),
+ PINGROUP(KB_ROW11, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32e8),
+ PINGROUP(KB_ROW12, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32ec),
+ PINGROUP(KB_ROW13, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32f0),
+ PINGROUP(KB_ROW14, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32f4),
+ PINGROUP(KB_ROW15, SYS, KBC, INVALID, SDIO2, INVALID, RSVD, INPUT, 0x32f8),
+ PINGROUP(KB_COL0, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x32fc),
+ PINGROUP(KB_COL1, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3300),
+ PINGROUP(KB_COL2, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3304),
+ PINGROUP(KB_COL3, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3308),
+ PINGROUP(KB_COL4, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x330c),
+ PINGROUP(KB_COL5, SYS, KBC, INVALID, TRACE, RSVD, RSVD, INPUT, 0x3310),
+ PINGROUP(KB_COL6, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3314),
+ PINGROUP(KB_COL7, SYS, KBC, INVALID, TRACE, INVALID, RSVD, INPUT, 0x3318),
+ PINGROUP(CLK_32K_OUT, SYS, BLINK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x331c),
+ PINGROUP(SYS_CLK_REQ, SYS, SYSCLK, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x3320),
+ PINGROUP(CORE_PWR_REQ, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3324),
+ PINGROUP(CPU_PWR_REQ, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3328),
+ PINGROUP(PWR_INT_N, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x332c),
+ PINGROUP(CLK_32K_IN, SYS, RSVD, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3330),
+ PINGROUP(OWR, SYS, OWR, RSVD, RSVD, RSVD, RSVD, INPUT, 0x3334),
+ PINGROUP(DAP1_FS, AUDIO, I2S0, HDA, GMI, SDIO2, RSVD, INPUT, 0x3338),
+ PINGROUP(DAP1_DIN, AUDIO, I2S0, HDA, GMI, SDIO2, RSVD, INPUT, 0x333c),
+ PINGROUP(DAP1_DOUT, AUDIO, I2S0, HDA, GMI, SDIO2, RSVD, INPUT, 0x3340),
+ PINGROUP(DAP1_SCLK, AUDIO, I2S0, HDA, GMI, SDIO2, RSVD, INPUT, 0x3344),
+ PINGROUP(CLK1_REQ, AUDIO, DAP, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x3348),
+ PINGROUP(CLK1_OUT, AUDIO, EXTPERIPH1, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x334c),
+ PINGROUP(SPDIF_IN, AUDIO, SPDIF, HDA, INVALID, DAPSDMMC2, RSVD, INPUT, 0x3350),
+ PINGROUP(SPDIF_OUT, AUDIO, SPDIF, RSVD1, INVALID, DAPSDMMC2, RSVD, INPUT, 0x3354),
+ PINGROUP(DAP2_FS, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3358),
+ PINGROUP(DAP2_DIN, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x335c),
+ PINGROUP(DAP2_DOUT, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3360),
+ PINGROUP(DAP2_SCLK, AUDIO, I2S1, HDA, RSVD2, GMI, RSVD, INPUT, 0x3364),
+ PINGROUP(SPI2_MOSI, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3368),
+ PINGROUP(SPI2_MISO, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x336c),
+ PINGROUP(SPI2_CS0_N, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3370),
+ PINGROUP(SPI2_SCK, AUDIO, SPI6, SPI2, INVALID, GMI, RSVD, INPUT, 0x3374),
+ PINGROUP(SPI1_MOSI, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x3378),
+ PINGROUP(SPI1_SCK, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x337c),
+ PINGROUP(SPI1_CS0_N, AUDIO, SPI2, SPI1, INVALID, GMI, RSVD, INPUT, 0x3380),
+ PINGROUP(SPI1_MISO, AUDIO, INVALID, SPI1, INVALID, RSVD3, RSVD, INPUT, 0x3384),
+ PINGROUP(SPI2_CS1_N, AUDIO, INVALID, SPI2, INVALID, INVALID, RSVD, INPUT, 0x3388),
+ PINGROUP(SPI2_CS2_N, AUDIO, INVALID, SPI2, INVALID, INVALID, RSVD, INPUT, 0x338c),
+ PINGROUP(SDMMC3_CLK, SDMMC3, UARTA, PWM2, SDIO3, INVALID, RSVD, INPUT, 0x3390),
+ PINGROUP(SDMMC3_CMD, SDMMC3, UARTA, PWM3, SDIO3, INVALID, RSVD, INPUT, 0x3394),
+ PINGROUP(SDMMC3_DAT0, SDMMC3, RSVD, RSVD1, SDIO3, INVALID, RSVD, INPUT, 0x3398),
+ PINGROUP(SDMMC3_DAT1, SDMMC3, RSVD, RSVD1, SDIO3, INVALID, RSVD, INPUT, 0x339c),
+ PINGROUP(SDMMC3_DAT2, SDMMC3, RSVD, PWM1, SDIO3, INVALID, RSVD, INPUT, 0x33a0),
+ PINGROUP(SDMMC3_DAT3, SDMMC3, RSVD, PWM0, SDIO3, INVALID, RSVD, INPUT, 0x33a4),
+ PINGROUP(SDMMC3_DAT4, SDMMC3, PWM1, INVALID, SDIO3, INVALID, RSVD, INPUT, 0x33a8),
+ PINGROUP(SDMMC3_DAT5, SDMMC3, PWM0, INVALID, SDIO3, INVALID, RSVD, INPUT, 0x33ac),
+ PINGROUP(SDMMC3_DAT6, SDMMC3, SPDIF, INVALID, SDIO3, INVALID, RSVD, INPUT, 0x33b0),
+ PINGROUP(SDMMC3_DAT7, SDMMC3, SPDIF, INVALID, SDIO3, INVALID, RSVD, INPUT, 0x33b4),
+ PINGROUP(PEX_L0_PRSNT_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33b8),
+ PINGROUP(PEX_L0_RST_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33bc),
+ PINGROUP(PEX_L0_CLKREQ_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c0),
+ PINGROUP(PEX_WAKE_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c4),
+ PINGROUP(PEX_L1_PRSNT_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33c8),
+ PINGROUP(PEX_L1_RST_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33cc),
+ PINGROUP(PEX_L1_CLKREQ_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d0),
+ PINGROUP(PEX_L2_PRSNT_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d4),
+ PINGROUP(PEX_L2_RST_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33d8),
+ PINGROUP(PEX_L2_CLKREQ_N, PEXCTL, PCIE, HDA, RSVD2, RSVD3, RSVD, INPUT, 0x33dc),
+ PINGROUP(HDMI_CEC, SYS, CEC, RSVD1, RSVD2, RSVD3, RSVD, INPUT, 0x33e0),
+};
+
+void __devinit tegra30_pinmux_init(const struct tegra_pingroup_desc **pg,
+ int *pg_max, const struct tegra_drive_pingroup_desc **pgdrive,
+ int *pgdrive_max)
+{
+ *pg = tegra_soc_pingroups;
+ *pg_max = TEGRA_MAX_PINGROUP;
+ *pgdrive = tegra_soc_drive_pingroups;
+ *pgdrive_max = TEGRA_MAX_DRIVE_PINGROUP;
+}
+
diff --git a/arch/arm/mach-tegra/pinmux.c b/arch/arm/mach-tegra/pinmux.c
index 1d201650d7a..ac35d2b7685 100644
--- a/arch/arm/mach-tegra/pinmux.c
+++ b/arch/arm/mach-tegra/pinmux.c
@@ -21,6 +21,7 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <mach/iomap.h>
#include <mach/pinmux.h>
@@ -33,8 +34,10 @@
#define SLWR(reg) (((reg) >> 28) & 0x3)
#define SLWF(reg) (((reg) >> 30) & 0x3)
-static const struct tegra_pingroup_desc *const pingroups = tegra_soc_pingroups;
-static const struct tegra_drive_pingroup_desc *const drive_pingroups = tegra_soc_drive_pingroups;
+static const struct tegra_pingroup_desc *pingroups;
+static const struct tegra_drive_pingroup_desc *drive_pingroups;
+static int pingroup_max;
+static int drive_max;
static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_AHB_CLK] = "AHB_CLK",
@@ -97,6 +100,49 @@ static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_VI] = "VI",
[TEGRA_MUX_VI_SENSOR_CLK] = "VI_SENSOR_CLK",
[TEGRA_MUX_XIO] = "XIO",
+ [TEGRA_MUX_BLINK] = "BLINK",
+ [TEGRA_MUX_CEC] = "CEC",
+ [TEGRA_MUX_CLK12] = "CLK12",
+ [TEGRA_MUX_DAP] = "DAP",
+ [TEGRA_MUX_DAPSDMMC2] = "DAPSDMMC2",
+ [TEGRA_MUX_DDR] = "DDR",
+ [TEGRA_MUX_DEV3] = "DEV3",
+ [TEGRA_MUX_DTV] = "DTV",
+ [TEGRA_MUX_VI_ALT1] = "VI_ALT1",
+ [TEGRA_MUX_VI_ALT2] = "VI_ALT2",
+ [TEGRA_MUX_VI_ALT3] = "VI_ALT3",
+ [TEGRA_MUX_EMC_DLL] = "EMC_DLL",
+ [TEGRA_MUX_EXTPERIPH1] = "EXTPERIPH1",
+ [TEGRA_MUX_EXTPERIPH2] = "EXTPERIPH2",
+ [TEGRA_MUX_EXTPERIPH3] = "EXTPERIPH3",
+ [TEGRA_MUX_GMI_ALT] = "GMI_ALT",
+ [TEGRA_MUX_HDA] = "HDA",
+ [TEGRA_MUX_HSI] = "HSI",
+ [TEGRA_MUX_I2C4] = "I2C4",
+ [TEGRA_MUX_I2C5] = "I2C5",
+ [TEGRA_MUX_I2CPWR] = "I2CPWR",
+ [TEGRA_MUX_I2S0] = "I2S0",
+ [TEGRA_MUX_I2S1] = "I2S1",
+ [TEGRA_MUX_I2S2] = "I2S2",
+ [TEGRA_MUX_I2S3] = "I2S3",
+ [TEGRA_MUX_I2S4] = "I2S4",
+ [TEGRA_MUX_NAND_ALT] = "NAND_ALT",
+ [TEGRA_MUX_POPSDIO4] = "POPSDIO4",
+ [TEGRA_MUX_POPSDMMC4] = "POPSDMMC4",
+ [TEGRA_MUX_PWM0] = "PWM0",
+ [TEGRA_MUX_PWM1] = "PWM2",
+ [TEGRA_MUX_PWM2] = "PWM2",
+ [TEGRA_MUX_PWM3] = "PWM3",
+ [TEGRA_MUX_SATA] = "SATA",
+ [TEGRA_MUX_SPI5] = "SPI5",
+ [TEGRA_MUX_SPI6] = "SPI6",
+ [TEGRA_MUX_SYSCLK] = "SYSCLK",
+ [TEGRA_MUX_VGP1] = "VGP1",
+ [TEGRA_MUX_VGP2] = "VGP2",
+ [TEGRA_MUX_VGP3] = "VGP3",
+ [TEGRA_MUX_VGP4] = "VGP4",
+ [TEGRA_MUX_VGP5] = "VGP5",
+ [TEGRA_MUX_VGP6] = "VGP6",
[TEGRA_MUX_SAFE] = "<safe>",
};
@@ -116,9 +162,9 @@ static const char *tegra_slew_names[TEGRA_MAX_SLEW] = {
static DEFINE_SPINLOCK(mux_lock);
-static const char *pingroup_name(enum tegra_pingroup pg)
+static const char *pingroup_name(int pg)
{
- if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ if (pg < 0 || pg >= pingroup_max)
return "<UNKNOWN>";
return pingroups[pg].name;
@@ -189,10 +235,10 @@ static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
int i;
unsigned long reg;
unsigned long flags;
- enum tegra_pingroup pg = config->pingroup;
+ int pg = config->pingroup;
enum tegra_mux_func func = config->func;
- if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ if (pg < 0 || pg >= pingroup_max)
return -ERANGE;
if (pingroups[pg].mux_reg < 0)
@@ -230,13 +276,12 @@ static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
return 0;
}
-int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
- enum tegra_tristate tristate)
+int tegra_pinmux_set_tristate(int pg, enum tegra_tristate tristate)
{
unsigned long reg;
unsigned long flags;
- if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ if (pg < 0 || pg >= pingroup_max)
return -ERANGE;
if (pingroups[pg].tri_reg < 0)
@@ -255,13 +300,12 @@ int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
return 0;
}
-int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
- enum tegra_pullupdown pupd)
+int tegra_pinmux_set_pullupdown(int pg, enum tegra_pullupdown pupd)
{
unsigned long reg;
unsigned long flags;
- if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
+ if (pg < 0 || pg >= pingroup_max)
return -ERANGE;
if (pingroups[pg].pupd_reg < 0)
@@ -287,7 +331,7 @@ int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
static void tegra_pinmux_config_pingroup(const struct tegra_pingroup_config *config)
{
- enum tegra_pingroup pingroup = config->pingroup;
+ int pingroup = config->pingroup;
enum tegra_mux_func func = config->func;
enum tegra_pullupdown pupd = config->pupd;
enum tegra_tristate tristate = config->tristate;
@@ -323,9 +367,9 @@ void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int l
tegra_pinmux_config_pingroup(&config[i]);
}
-static const char *drive_pinmux_name(enum tegra_drive_pingroup pg)
+static const char *drive_pinmux_name(int pg)
{
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return "<UNKNOWN>";
return drive_pingroups[pg].name;
@@ -352,12 +396,11 @@ static const char *slew_name(unsigned long val)
return tegra_slew_names[val];
}
-static int tegra_drive_pinmux_set_hsm(enum tegra_drive_pingroup pg,
- enum tegra_hsm hsm)
+static int tegra_drive_pinmux_set_hsm(int pg, enum tegra_hsm hsm)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (hsm != TEGRA_HSM_ENABLE && hsm != TEGRA_HSM_DISABLE)
@@ -377,12 +420,11 @@ static int tegra_drive_pinmux_set_hsm(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_schmitt(enum tegra_drive_pingroup pg,
- enum tegra_schmitt schmitt)
+static int tegra_drive_pinmux_set_schmitt(int pg, enum tegra_schmitt schmitt)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (schmitt != TEGRA_SCHMITT_ENABLE && schmitt != TEGRA_SCHMITT_DISABLE)
@@ -402,12 +444,11 @@ static int tegra_drive_pinmux_set_schmitt(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_drive(enum tegra_drive_pingroup pg,
- enum tegra_drive drive)
+static int tegra_drive_pinmux_set_drive(int pg, enum tegra_drive drive)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (drive < 0 || drive >= TEGRA_MAX_DRIVE)
@@ -425,12 +466,12 @@ static int tegra_drive_pinmux_set_drive(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_pull_down(enum tegra_drive_pingroup pg,
+static int tegra_drive_pinmux_set_pull_down(int pg,
enum tegra_pull_strength pull_down)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (pull_down < 0 || pull_down >= TEGRA_MAX_PULL)
@@ -448,12 +489,12 @@ static int tegra_drive_pinmux_set_pull_down(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_pull_up(enum tegra_drive_pingroup pg,
+static int tegra_drive_pinmux_set_pull_up(int pg,
enum tegra_pull_strength pull_up)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (pull_up < 0 || pull_up >= TEGRA_MAX_PULL)
@@ -471,12 +512,12 @@ static int tegra_drive_pinmux_set_pull_up(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_slew_rising(enum tegra_drive_pingroup pg,
+static int tegra_drive_pinmux_set_slew_rising(int pg,
enum tegra_slew slew_rising)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (slew_rising < 0 || slew_rising >= TEGRA_MAX_SLEW)
@@ -494,12 +535,12 @@ static int tegra_drive_pinmux_set_slew_rising(enum tegra_drive_pingroup pg,
return 0;
}
-static int tegra_drive_pinmux_set_slew_falling(enum tegra_drive_pingroup pg,
+static int tegra_drive_pinmux_set_slew_falling(int pg,
enum tegra_slew slew_falling)
{
unsigned long flags;
u32 reg;
- if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
+ if (pg < 0 || pg >= drive_max)
return -ERANGE;
if (slew_falling < 0 || slew_falling >= TEGRA_MAX_SLEW)
@@ -517,7 +558,7 @@ static int tegra_drive_pinmux_set_slew_falling(enum tegra_drive_pingroup pg,
return 0;
}
-static void tegra_drive_pinmux_config_pingroup(enum tegra_drive_pingroup pingroup,
+static void tegra_drive_pinmux_config_pingroup(int pingroup,
enum tegra_hsm hsm,
enum tegra_schmitt schmitt,
enum tegra_drive drive,
@@ -596,7 +637,7 @@ void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *conf
for (i = 0; i < len; i++) {
int err;
c = config[i];
- if (c.pingroup < 0 || c.pingroup >= TEGRA_MAX_PINGROUP) {
+ if (c.pingroup < 0 || c.pingroup >= pingroup_max) {
WARN_ON(1);
continue;
}
@@ -617,7 +658,7 @@ void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config
for (i = 0; i < len; i++) {
int err;
if (config[i].pingroup < 0 ||
- config[i].pingroup >= TEGRA_MAX_PINGROUP) {
+ config[i].pingroup >= pingroup_max) {
WARN_ON(1);
continue;
}
@@ -635,7 +676,7 @@ void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *conf
{
int i;
int err;
- enum tegra_pingroup pingroup;
+ int pingroup;
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
@@ -654,7 +695,7 @@ void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *co
{
int i;
int err;
- enum tegra_pingroup pingroup;
+ int pingroup;
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
@@ -668,11 +709,36 @@ void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *co
}
}
+static struct of_device_id tegra_pinmux_of_match[] __devinitdata = {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-pinmux", tegra20_pinmux_init },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-pinmux", tegra30_pinmux_init },
+#endif
+ { },
+};
+
static int __devinit tegra_pinmux_probe(struct platform_device *pdev)
{
struct resource *res;
int i;
int config_bad = 0;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_pinmux_of_match, &pdev->dev);
+
+ if (match)
+ ((pinmux_init)(match->data))(&pingroups, &pingroup_max,
+ &drive_pingroups, &drive_max);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ else
+ /* no device tree available, so we must be on tegra20 */
+ tegra20_pinmux_init(&pingroups, &pingroup_max,
+ &drive_pingroups, &drive_max);
+#else
+ pr_warn("non Tegra20 platform requires pinmux devicetree node\n");
+#endif
for (i = 0; ; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
@@ -681,7 +747,7 @@ static int __devinit tegra_pinmux_probe(struct platform_device *pdev)
}
nbanks = i;
- for (i = 0; i < TEGRA_MAX_PINGROUP; i++) {
+ for (i = 0; i < pingroup_max; i++) {
if (pingroups[i].tri_bank >= nbanks) {
dev_err(&pdev->dev, "pingroup %d: bad tri_bank\n", i);
config_bad = 1;
@@ -698,7 +764,7 @@ static int __devinit tegra_pinmux_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < TEGRA_MAX_DRIVE_PINGROUP; i++) {
+ for (i = 0; i < drive_max; i++) {
if (drive_pingroups[i].reg_bank >= nbanks) {
dev_err(&pdev->dev,
"drive pingroup %d: bad reg_bank\n", i);
@@ -741,11 +807,6 @@ static int __devinit tegra_pinmux_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tegra_pinmux_of_match[] __devinitdata = {
- { .compatible = "nvidia,tegra20-pinmux", },
- { },
-};
-
static struct platform_driver tegra_pinmux_driver = {
.driver = {
.name = "tegra-pinmux",
@@ -779,7 +840,7 @@ static int dbg_pinmux_show(struct seq_file *s, void *unused)
int i;
int len;
- for (i = 0; i < TEGRA_MAX_PINGROUP; i++) {
+ for (i = 0; i < pingroup_max; i++) {
unsigned long reg;
unsigned long tri;
unsigned long mux;
@@ -850,7 +911,7 @@ static int dbg_drive_pinmux_show(struct seq_file *s, void *unused)
int i;
int len;
- for (i = 0; i < TEGRA_MAX_DRIVE_PINGROUP; i++) {
+ for (i = 0; i < drive_max; i++) {
u32 reg;
seq_printf(s, "\t{TEGRA_DRIVE_PINGROUP_%s",
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 371869d8ea0..ff9e6b6c046 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -174,7 +174,7 @@ static int tegra_periph_clk_enable_refcount[3 * 32];
#define pmc_readl(reg) \
__raw_readl(reg_pmc_base + (reg))
-unsigned long clk_measure_input_freq(void)
+static unsigned long clk_measure_input_freq(void)
{
u32 clock_autodetect;
clk_writel(OSC_FREQ_DET_TRIG | 1, OSC_FREQ_DET);
@@ -278,18 +278,6 @@ static struct clk_ops tegra_clk_m_ops = {
.disable = tegra2_clk_m_disable,
};
-void tegra2_periph_reset_assert(struct clk *c)
-{
- BUG_ON(!c->ops->reset);
- c->ops->reset(c, true);
-}
-
-void tegra2_periph_reset_deassert(struct clk *c)
-{
- BUG_ON(!c->ops->reset);
- c->ops->reset(c, false);
-}
-
/* super clock functions */
/* "super clocks" on tegra have two-stage muxes and a clock skipping
* super divider. We will ignore the clock skipping divider, since we
@@ -1132,6 +1120,9 @@ static struct clk_ops tegra_periph_clk_ops = {
void tegra2_sdmmc_tap_delay(struct clk *c, int delay)
{
u32 reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->spinlock, flags);
delay = clamp(delay, 0, 15);
reg = clk_readl(c->reg);
@@ -1139,6 +1130,8 @@ void tegra2_sdmmc_tap_delay(struct clk *c, int delay)
reg |= SDMMC_CLK_INT_FB_SEL;
reg |= delay << SDMMC_CLK_INT_FB_DLY_SHIFT;
clk_writel(reg, c->reg);
+
+ spin_unlock_irqrestore(&c->spinlock, flags);
}
/* External memory controller clock ops */
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index e2272d263a8..1d1acda4f3e 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -19,7 +19,6 @@
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -106,25 +105,9 @@ static struct clock_event_device tegra_clockevent = {
.set_mode = tegra_timer_set_mode,
};
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 1MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 1us and a wrap period of about 1h11min.
- */
-#define SC_MULT 4194304000u
-#define SC_SHIFT 22
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace tegra_read_sched_clock(void)
{
- u32 cyc = timer_readl(TIMERUS_CNTR_1US);
- return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-static void notrace tegra_update_sched_clock(void)
-{
- u32 cyc = timer_readl(TIMERUS_CNTR_1US);
- update_sched_clock(&cd, cyc, (u32)~0);
+ return timer_readl(TIMERUS_CNTR_1US);
}
/*
@@ -182,20 +165,28 @@ static struct irqaction tegra_timer_irq = {
static void __init tegra_init_timer(void)
{
struct clk *clk;
- unsigned long rate = clk_measure_input_freq();
+ unsigned long rate;
int ret;
clk = clk_get_sys("timer", NULL);
- BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_warn("Unable to get timer clock."
+ " Assuming 12Mhz input clock.\n");
+ rate = 12000000;
+ } else {
+ clk_enable(clk);
+ rate = clk_get_rate(clk);
+ }
/*
* rtc registers are used by read_persistent_clock, keep the rtc clock
* enabled
*/
clk = clk_get_sys("rtc-tegra", NULL);
- BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ if (IS_ERR(clk))
+ pr_warn("Unable to get rtc-tegra clock\n");
+ else
+ clk_enable(clk);
#ifdef CONFIG_HAVE_ARM_TWD
twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
@@ -218,8 +209,7 @@ static void __init tegra_init_timer(void)
WARN(1, "Unknown clock rate");
}
- init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32,
- 1000000, SC_MULT, SC_SHIFT);
+ setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 1cbcd4fc1e1..54d8f34fdee 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -7,8 +7,8 @@ comment "ST-Ericsson Mobile Platform Products"
config MACH_U300
bool "U300"
select PINCTRL
- select PINMUX_U300
- select GPIO_U300
+ select PINCTRL_U300
+ select PINCTRL_COH901
comment "ST-Ericsson U300/U330/U335/U365 Feature Selections"
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index ac0791e924b..b4c6926a700 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1605,15 +1605,15 @@ static struct platform_device pinmux_device = {
};
/* Pinmux settings */
-static struct pinmux_map u300_pinmux_map[] = {
+static struct pinmux_map __initdata u300_pinmux_map[] = {
/* anonymous maps for chip power and EMIFs */
- PINMUX_MAP_PRIMARY_SYS_HOG("POWER", "power"),
- PINMUX_MAP_PRIMARY_SYS_HOG("EMIF0", "emif0"),
- PINMUX_MAP_PRIMARY_SYS_HOG("EMIF1", "emif1"),
+ PINMUX_MAP_SYS_HOG("POWER", "pinmux-u300", "power"),
+ PINMUX_MAP_SYS_HOG("EMIF0", "pinmux-u300", "emif0"),
+ PINMUX_MAP_SYS_HOG("EMIF1", "pinmux-u300", "emif1"),
/* per-device maps for MMC/SD, SPI and UART */
- PINMUX_MAP_PRIMARY("MMCSD", "mmc0", "mmci"),
- PINMUX_MAP_PRIMARY("SPI", "spi0", "pl022"),
- PINMUX_MAP_PRIMARY("UART0", "uart0", "uart0"),
+ PINMUX_MAP("MMCSD", "pinmux-u300", "mmc0", "mmci"),
+ PINMUX_MAP("SPI", "pinmux-u300", "spi0", "pl022"),
+ PINMUX_MAP("UART0", "pinmux-u300", "uart0", "uart0"),
};
struct u300_mux_hog {
@@ -1888,3 +1888,23 @@ static int core_module_init(void)
return mmc_init(&mmcsd_device);
}
module_init(core_module_init);
+
+/* Forward declare this function from the watchdog */
+void coh901327_watchdog_reset(void);
+
+void u300_restart(char mode, const char *cmd)
+{
+ switch (mode) {
+ case 's':
+ case 'h':
+#ifdef CONFIG_COH901327_WATCHDOG
+ coh901327_watchdog_reset();
+#endif
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ /* Wait for system do die/reset. */
+ while (1);
+}
diff --git a/arch/arm/mach-u300/include/mach/entry-macro.S b/arch/arm/mach-u300/include/mach/entry-macro.S
index 20731ae39d3..7181d6ac665 100644
--- a/arch/arm/mach-u300/include/mach/entry-macro.S
+++ b/arch/arm/mach-u300/include/mach/entry-macro.S
@@ -8,33 +8,9 @@
* Low-level IRQ helper macros for ST-Ericsson U300
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
-#include <mach/hardware.h>
-#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON0_BASE
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
- mov \irqnr, #0
- teq \irqstat, #0
- bne 1002f
-1001: ldr \base, = U300_AHB_PER_VIRT_BASE-U300_AHB_PER_PHYS_BASE+U300_INTCON1_BASE
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
- mov \irqnr, #32
- teq \irqstat, #0
- beq 1003f
-1002: tst \irqstat, #1
- bne 1003f
- add \irqnr, \irqnr, #1
- movs \irqstat, \irqstat, lsr #1
- bne 1002b
-1003: /* EQ will be set if no irqs pending */
- .endm
diff --git a/arch/arm/mach-u300/include/mach/gpio-u300.h b/arch/arm/mach-u300/include/mach/gpio-u300.h
index 0c2b2021951..bf4c7935aec 100644
--- a/arch/arm/mach-u300/include/mach/gpio-u300.h
+++ b/arch/arm/mach-u300/include/mach/gpio-u300.h
@@ -9,121 +9,6 @@
#ifndef __MACH_U300_GPIO_U300_H
#define __MACH_U300_GPIO_U300_H
-/*
- * Individual pin assignments for the B26/S26. Notice that the
- * actual usage of these pins depends on the PAD MUX settings, that
- * is why the same number can potentially appear several times.
- * In the reference design each pin is only used for one purpose.
- * These were determined by inspecting the B26/S26 schematic:
- * 2/1911-ROA 128 1603
- */
-#ifdef CONFIG_MACH_U300_BS2X
-#define U300_GPIO_PIN_UART_RX 0
-#define U300_GPIO_PIN_UART_TX 1
-#define U300_GPIO_PIN_GPIO02 2 /* Unrouted */
-#define U300_GPIO_PIN_GPIO03 3 /* Unrouted */
-#define U300_GPIO_PIN_CAM_SLEEP 4
-#define U300_GPIO_PIN_CAM_REG_EN 5
-#define U300_GPIO_PIN_GPIO06 6 /* Unrouted */
-#define U300_GPIO_PIN_GPIO07 7 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO08 8 /* Service point SP2321 */
-#define U300_GPIO_PIN_GPIO09 9 /* Service point SP2322 */
-#define U300_GPIO_PIN_PHFSENSE 10 /* Headphone jack sensing */
-#define U300_GPIO_PIN_MMC_CLKRET 11 /* Clock return from MMC/SD card */
-#define U300_GPIO_PIN_MMC_CD 12 /* MMC Card insertion detection */
-#define U300_GPIO_PIN_FLIPSENSE 13 /* Mechanical flip sensing */
-#define U300_GPIO_PIN_GPIO14 14 /* DSP JTAG Port RTCK */
-#define U300_GPIO_PIN_GPIO15 15 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO16 16 /* Unrouted */
-#define U300_GPIO_PIN_GPIO17 17 /* Unrouted */
-#define U300_GPIO_PIN_GPIO18 18 /* Unrouted */
-#define U300_GPIO_PIN_GPIO19 19 /* Unrouted */
-#define U300_GPIO_PIN_GPIO20 20 /* Unrouted */
-#define U300_GPIO_PIN_GPIO21 21 /* Unrouted */
-#define U300_GPIO_PIN_GPIO22 22 /* Unrouted */
-#define U300_GPIO_PIN_GPIO23 23 /* Unrouted */
-#endif
-
-/*
- * Individual pin assignments for the B330/S330 and B365/S365.
- * Notice that the actual usage of these pins depends on the
- * PAD MUX settings, that is why the same number can potentially
- * appear several times. In the reference design each pin is only
- * used for one purpose. These were determined by inspecting the
- * S365 schematic.
- */
-#if defined(CONFIG_MACH_U300_BS330) || defined(CONFIG_MACH_U300_BS365) || \
- defined(CONFIG_MACH_U300_BS335)
-#define U300_GPIO_PIN_UART_RX 0
-#define U300_GPIO_PIN_UART_TX 1
-#define U300_GPIO_PIN_UART_CTS 2
-#define U300_GPIO_PIN_UART_RTS 3
-#define U300_GPIO_PIN_CAM_MAIN_STANDBY 4 /* Camera MAIN standby */
-#define U300_GPIO_PIN_GPIO05 5 /* Unrouted */
-#define U300_GPIO_PIN_MS_CD 6 /* Memory Stick Card insertion */
-#define U300_GPIO_PIN_GPIO07 7 /* Test point TP2430 */
-
-#define U300_GPIO_PIN_GPIO08 8 /* Test point TP2437 */
-#define U300_GPIO_PIN_GPIO09 9 /* Test point TP2431 */
-#define U300_GPIO_PIN_GPIO10 10 /* Test point TP2432 */
-#define U300_GPIO_PIN_MMC_CLKRET 11 /* Clock return from MMC/SD card */
-#define U300_GPIO_PIN_MMC_CD 12 /* MMC Card insertion detection */
-#define U300_GPIO_PIN_CAM_SUB_STANDBY 13 /* Camera SUB standby */
-#define U300_GPIO_PIN_GPIO14 14 /* Test point TP2436 */
-#define U300_GPIO_PIN_GPIO15 15 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO16 16 /* Test point TP2438 */
-#define U300_GPIO_PIN_PHFSENSE 17 /* Headphone jack sensing */
-#define U300_GPIO_PIN_GPIO18 18 /* Test point TP2439 */
-#define U300_GPIO_PIN_GPIO19 19 /* Routed somewhere */
-#define U300_GPIO_PIN_GPIO20 20 /* Unrouted */
-#define U300_GPIO_PIN_GPIO21 21 /* Unrouted */
-#define U300_GPIO_PIN_GPIO22 22 /* Unrouted */
-#define U300_GPIO_PIN_GPIO23 23 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO24 24 /* Unrouted */
-#define U300_GPIO_PIN_GPIO25 25 /* Unrouted */
-#define U300_GPIO_PIN_GPIO26 26 /* Unrouted */
-#define U300_GPIO_PIN_GPIO27 27 /* Unrouted */
-#define U300_GPIO_PIN_GPIO28 28 /* Unrouted */
-#define U300_GPIO_PIN_GPIO29 29 /* Unrouted */
-#define U300_GPIO_PIN_GPIO30 30 /* Unrouted */
-#define U300_GPIO_PIN_GPIO31 31 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO32 32 /* Unrouted */
-#define U300_GPIO_PIN_GPIO33 33 /* Unrouted */
-#define U300_GPIO_PIN_GPIO34 34 /* Unrouted */
-#define U300_GPIO_PIN_GPIO35 35 /* Unrouted */
-#define U300_GPIO_PIN_GPIO36 36 /* Unrouted */
-#define U300_GPIO_PIN_GPIO37 37 /* Unrouted */
-#define U300_GPIO_PIN_GPIO38 38 /* Unrouted */
-#define U300_GPIO_PIN_GPIO39 39 /* Unrouted */
-
-#ifdef CONFIG_MACH_U300_BS335
-
-#define U300_GPIO_PIN_GPIO40 40 /* Unrouted */
-#define U300_GPIO_PIN_GPIO41 41 /* Unrouted */
-#define U300_GPIO_PIN_GPIO42 42 /* Unrouted */
-#define U300_GPIO_PIN_GPIO43 43 /* Unrouted */
-#define U300_GPIO_PIN_GPIO44 44 /* Unrouted */
-#define U300_GPIO_PIN_GPIO45 45 /* Unrouted */
-#define U300_GPIO_PIN_GPIO46 46 /* Unrouted */
-#define U300_GPIO_PIN_GPIO47 47 /* Unrouted */
-
-#define U300_GPIO_PIN_GPIO48 48 /* Unrouted */
-#define U300_GPIO_PIN_GPIO49 49 /* Unrouted */
-#define U300_GPIO_PIN_GPIO50 50 /* Unrouted */
-#define U300_GPIO_PIN_GPIO51 51 /* Unrouted */
-#define U300_GPIO_PIN_GPIO52 52 /* Unrouted */
-#define U300_GPIO_PIN_GPIO53 53 /* Unrouted */
-#define U300_GPIO_PIN_GPIO54 54 /* Unrouted */
-#define U300_GPIO_PIN_GPIO55 55 /* Unrouted */
-#endif
-
-#endif
-
/**
* enum u300_gpio_variant - the type of U300 GPIO employed
*/
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index db3fbfa1d6e..ee78a26707e 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -110,7 +110,7 @@
#endif
/* Maximum 8*7 GPIO lines */
-#ifdef CONFIG_GPIO_U300
+#ifdef CONFIG_PINCTRL_COH901
#define IRQ_U300_GPIO_BASE (U300_VIC_IRQS_END)
#define IRQ_U300_GPIO_END (IRQ_U300_GPIO_BASE + 56)
#else
diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h
deleted file mode 100644
index c808f347a08..00000000000
--- a/arch/arm/mach-u300/include/mach/memory.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/include/mach/memory.h
- *
- *
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Memory virtual/physical mapping constants.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-
-#ifndef __MACH_MEMORY_H
-#define __MACH_MEMORY_H
-
-#define PLAT_PHYS_OFFSET UL(0x48000000)
-#define BOOT_PARAMS_OFFSET 0x100
-
-#endif
diff --git a/arch/arm/mach-u300/include/mach/platform.h b/arch/arm/mach-u300/include/mach/platform.h
index 77d9210a82e..096333f32fc 100644
--- a/arch/arm/mach-u300/include/mach/platform.h
+++ b/arch/arm/mach-u300/include/mach/platform.h
@@ -14,6 +14,7 @@
void u300_map_io(void);
void u300_init_irq(void);
void u300_init_devices(void);
+void u300_restart(char, const char *);
extern struct sys_timer u300_timer;
#endif
diff --git a/arch/arm/mach-u300/include/mach/system.h b/arch/arm/mach-u300/include/mach/system.h
index 8daf13634ce..574d46e3829 100644
--- a/arch/arm/mach-u300/include/mach/system.h
+++ b/arch/arm/mach-u300/include/mach/system.h
@@ -8,35 +8,7 @@
* System shutdown and reset functions.
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
-#include <mach/hardware.h>
-#include <asm/io.h>
-#include <asm/hardware/vic.h>
-#include <asm/irq.h>
-
-/* Forward declare this function from the watchdog */
-void coh901327_watchdog_reset(void);
-
static inline void arch_idle(void)
{
cpu_do_idle();
}
-
-static void arch_reset(char mode, const char *cmd)
-{
- switch (mode) {
- case 's':
- case 'h':
- printk(KERN_CRIT "RESET: shutting down/rebooting system\n");
- /* Disable interrupts */
- local_irq_disable();
-#ifdef CONFIG_COH901327_WATCHDOG
- coh901327_watchdog_reset();
-#endif
- break;
- default:
- /* Do nothing */
- break;
- }
- /* Wait for system do die/reset. */
- while (1);
-}
diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h
deleted file mode 100644
index ec423b92b81..00000000000
--- a/arch/arm/mach-u300/include/mach/vmalloc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/include/mach/vmalloc.h
- *
- *
- * Copyright (C) 2006-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Virtual memory allocations
- * End must be above the I/O registers and on an even 2MiB boundary.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#define VMALLOC_END 0xfe800000UL
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index 4d482aacc27..05abd6ad9fa 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -18,8 +18,8 @@
#include <linux/slab.h>
#include <mach/coh901318.h>
#include <mach/dma_channels.h>
-#include <mach/gpio-u300.h>
+#include "u300-gpio.h"
#include "mmc.h"
static struct mmci_platform_data mmc0_plat_data = {
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 5f51bdeef0e..bc1c7897e82 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -9,7 +9,6 @@
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/clockchips.h>
@@ -337,18 +336,10 @@ static struct irqaction u300_timer_irq = {
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by OMAP implementation.)
*/
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace u300_read_sched_clock(void)
{
- u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace u300_update_sched_clock(void)
-{
- u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
- update_sched_clock(&cd, cyc, (u32)~0);
+ return readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC);
}
@@ -366,7 +357,7 @@ static void __init u300_timer_init(void)
clk_enable(clk);
rate = clk_get_rate(clk);
- init_sched_clock(&cd, u300_update_sched_clock, 32, rate);
+ setup_sched_clock(u300_read_sched_clock, 32, rate);
/*
* Disable the "OS" and "DD" timers - these are designed for Symbian!
diff --git a/arch/arm/mach-u300/u300-gpio.h b/arch/arm/mach-u300/u300-gpio.h
new file mode 100644
index 00000000000..847dc25300c
--- /dev/null
+++ b/arch/arm/mach-u300/u300-gpio.h
@@ -0,0 +1,114 @@
+/*
+ * Individual pin assignments for the B26/S26. Notice that the
+ * actual usage of these pins depends on the PAD MUX settings, that
+ * is why the same number can potentially appear several times.
+ * In the reference design each pin is only used for one purpose.
+ * These were determined by inspecting the B26/S26 schematic:
+ * 2/1911-ROA 128 1603
+ */
+#ifdef CONFIG_MACH_U300_BS2X
+#define U300_GPIO_PIN_UART_RX 0
+#define U300_GPIO_PIN_UART_TX 1
+#define U300_GPIO_PIN_GPIO02 2 /* Unrouted */
+#define U300_GPIO_PIN_GPIO03 3 /* Unrouted */
+#define U300_GPIO_PIN_CAM_SLEEP 4
+#define U300_GPIO_PIN_CAM_REG_EN 5
+#define U300_GPIO_PIN_GPIO06 6 /* Unrouted */
+#define U300_GPIO_PIN_GPIO07 7 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO08 8 /* Service point SP2321 */
+#define U300_GPIO_PIN_GPIO09 9 /* Service point SP2322 */
+#define U300_GPIO_PIN_PHFSENSE 10 /* Headphone jack sensing */
+#define U300_GPIO_PIN_MMC_CLKRET 11 /* Clock return from MMC/SD card */
+#define U300_GPIO_PIN_MMC_CD 12 /* MMC Card insertion detection */
+#define U300_GPIO_PIN_FLIPSENSE 13 /* Mechanical flip sensing */
+#define U300_GPIO_PIN_GPIO14 14 /* DSP JTAG Port RTCK */
+#define U300_GPIO_PIN_GPIO15 15 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO16 16 /* Unrouted */
+#define U300_GPIO_PIN_GPIO17 17 /* Unrouted */
+#define U300_GPIO_PIN_GPIO18 18 /* Unrouted */
+#define U300_GPIO_PIN_GPIO19 19 /* Unrouted */
+#define U300_GPIO_PIN_GPIO20 20 /* Unrouted */
+#define U300_GPIO_PIN_GPIO21 21 /* Unrouted */
+#define U300_GPIO_PIN_GPIO22 22 /* Unrouted */
+#define U300_GPIO_PIN_GPIO23 23 /* Unrouted */
+#endif
+
+/*
+ * Individual pin assignments for the B330/S330 and B365/S365.
+ * Notice that the actual usage of these pins depends on the
+ * PAD MUX settings, that is why the same number can potentially
+ * appear several times. In the reference design each pin is only
+ * used for one purpose. These were determined by inspecting the
+ * S365 schematic.
+ */
+#if defined(CONFIG_MACH_U300_BS330) || defined(CONFIG_MACH_U300_BS365) || \
+ defined(CONFIG_MACH_U300_BS335)
+#define U300_GPIO_PIN_UART_RX 0
+#define U300_GPIO_PIN_UART_TX 1
+#define U300_GPIO_PIN_UART_CTS 2
+#define U300_GPIO_PIN_UART_RTS 3
+#define U300_GPIO_PIN_CAM_MAIN_STANDBY 4 /* Camera MAIN standby */
+#define U300_GPIO_PIN_GPIO05 5 /* Unrouted */
+#define U300_GPIO_PIN_MS_CD 6 /* Memory Stick Card insertion */
+#define U300_GPIO_PIN_GPIO07 7 /* Test point TP2430 */
+
+#define U300_GPIO_PIN_GPIO08 8 /* Test point TP2437 */
+#define U300_GPIO_PIN_GPIO09 9 /* Test point TP2431 */
+#define U300_GPIO_PIN_GPIO10 10 /* Test point TP2432 */
+#define U300_GPIO_PIN_MMC_CLKRET 11 /* Clock return from MMC/SD card */
+#define U300_GPIO_PIN_MMC_CD 12 /* MMC Card insertion detection */
+#define U300_GPIO_PIN_CAM_SUB_STANDBY 13 /* Camera SUB standby */
+#define U300_GPIO_PIN_GPIO14 14 /* Test point TP2436 */
+#define U300_GPIO_PIN_GPIO15 15 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO16 16 /* Test point TP2438 */
+#define U300_GPIO_PIN_PHFSENSE 17 /* Headphone jack sensing */
+#define U300_GPIO_PIN_GPIO18 18 /* Test point TP2439 */
+#define U300_GPIO_PIN_GPIO19 19 /* Routed somewhere */
+#define U300_GPIO_PIN_GPIO20 20 /* Unrouted */
+#define U300_GPIO_PIN_GPIO21 21 /* Unrouted */
+#define U300_GPIO_PIN_GPIO22 22 /* Unrouted */
+#define U300_GPIO_PIN_GPIO23 23 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO24 24 /* Unrouted */
+#define U300_GPIO_PIN_GPIO25 25 /* Unrouted */
+#define U300_GPIO_PIN_GPIO26 26 /* Unrouted */
+#define U300_GPIO_PIN_GPIO27 27 /* Unrouted */
+#define U300_GPIO_PIN_GPIO28 28 /* Unrouted */
+#define U300_GPIO_PIN_GPIO29 29 /* Unrouted */
+#define U300_GPIO_PIN_GPIO30 30 /* Unrouted */
+#define U300_GPIO_PIN_GPIO31 31 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO32 32 /* Unrouted */
+#define U300_GPIO_PIN_GPIO33 33 /* Unrouted */
+#define U300_GPIO_PIN_GPIO34 34 /* Unrouted */
+#define U300_GPIO_PIN_GPIO35 35 /* Unrouted */
+#define U300_GPIO_PIN_GPIO36 36 /* Unrouted */
+#define U300_GPIO_PIN_GPIO37 37 /* Unrouted */
+#define U300_GPIO_PIN_GPIO38 38 /* Unrouted */
+#define U300_GPIO_PIN_GPIO39 39 /* Unrouted */
+
+#ifdef CONFIG_MACH_U300_BS335
+
+#define U300_GPIO_PIN_GPIO40 40 /* Unrouted */
+#define U300_GPIO_PIN_GPIO41 41 /* Unrouted */
+#define U300_GPIO_PIN_GPIO42 42 /* Unrouted */
+#define U300_GPIO_PIN_GPIO43 43 /* Unrouted */
+#define U300_GPIO_PIN_GPIO44 44 /* Unrouted */
+#define U300_GPIO_PIN_GPIO45 45 /* Unrouted */
+#define U300_GPIO_PIN_GPIO46 46 /* Unrouted */
+#define U300_GPIO_PIN_GPIO47 47 /* Unrouted */
+
+#define U300_GPIO_PIN_GPIO48 48 /* Unrouted */
+#define U300_GPIO_PIN_GPIO49 49 /* Unrouted */
+#define U300_GPIO_PIN_GPIO50 50 /* Unrouted */
+#define U300_GPIO_PIN_GPIO51 51 /* Unrouted */
+#define U300_GPIO_PIN_GPIO52 52 /* Unrouted */
+#define U300_GPIO_PIN_GPIO53 53 /* Unrouted */
+#define U300_GPIO_PIN_GPIO54 54 /* Unrouted */
+#define U300_GPIO_PIN_GPIO55 55 /* Unrouted */
+#endif
+
+#endif
diff --git a/arch/arm/mach-u300/u300.c b/arch/arm/mach-u300/u300.c
index 89422ee7f3a..f30c69d91d9 100644
--- a/arch/arm/mach-u300/u300.c
+++ b/arch/arm/mach-u300/u300.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/platform.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/memory.h>
@@ -46,9 +47,11 @@ static void __init u300_init_machine(void)
MACHINE_START(U300, MACH_U300_STRING)
/* Maintainer: Linus Walleij <linus.walleij@stericsson.com> */
- .atag_offset = BOOT_PARAMS_OFFSET,
+ .atag_offset = 0x100,
.map_io = u300_map_io,
.init_irq = u300_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &u300_timer,
.init_machine = u300_init_machine,
+ .restart = u300_restart,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 6826faeecc6..23be34b3bb6 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -22,6 +22,12 @@
#include "ste-dma40-db8500.h"
/*
+ * v2 has a new version of this block that need to be forced, the number found
+ * in hardware is incorrect
+ */
+#define U8500_SDI_V2_PERIPHID 0x10480180
+
+/*
* SDI 0 (MicroSD slot)
*/
@@ -117,10 +123,7 @@ static void sdi0_configure(void)
gpio_direction_output(sdi0_en, 1);
/* Add the device, force v2 to subrevision 1 */
- if (cpu_is_u8500v2())
- db8500_add_sdi0(&mop500_sdi0_data, 0x10480180);
- else
- db8500_add_sdi0(&mop500_sdi0_data, 0);
+ db8500_add_sdi0(&mop500_sdi0_data, U8500_SDI_V2_PERIPHID);
}
void mop500_sdi_tc35892_init(void)
@@ -132,6 +135,42 @@ void mop500_sdi_tc35892_init(void)
}
/*
+ * SDI1 (SDIO WLAN)
+ */
+#ifdef CONFIG_STE_DMA40
+static struct stedma40_chan_cfg sdi1_dma_cfg_rx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_PERIPH_TO_MEM,
+ .src_dev_type = DB8500_DMA_DEV32_SD_MM1_RX,
+ .dst_dev_type = STEDMA40_DEV_DST_MEMORY,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+
+static struct stedma40_chan_cfg sdi1_dma_cfg_tx = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = STEDMA40_MEM_TO_PERIPH,
+ .src_dev_type = STEDMA40_DEV_SRC_MEMORY,
+ .dst_dev_type = DB8500_DMA_DEV32_SD_MM1_TX,
+ .src_info.data_width = STEDMA40_WORD_WIDTH,
+ .dst_info.data_width = STEDMA40_WORD_WIDTH,
+};
+#endif
+
+static struct mmci_platform_data mop500_sdi1_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 50000000,
+ .capabilities = MMC_CAP_4_BIT_DATA,
+ .gpio_cd = -1,
+ .gpio_wp = -1,
+#ifdef CONFIG_STE_DMA40
+ .dma_filter = stedma40_filter,
+ .dma_rx_param = &sdi1_dma_cfg_rx,
+ .dma_tx_param = &sdi1_dma_cfg_tx,
+#endif
+};
+
+/*
* SDI 2 (POP eMMC, not on DB8500ed)
*/
@@ -158,7 +197,8 @@ static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = {
static struct mmci_platform_data mop500_sdi2_data = {
.ocr_mask = MMC_VDD_165_195,
.f_max = 50000000,
- .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
+ MMC_CAP_MMC_HIGHSPEED,
.gpio_cd = -1,
.gpio_wp = -1,
#ifdef CONFIG_STE_DMA40
@@ -208,20 +248,10 @@ static struct mmci_platform_data mop500_sdi4_data = {
void __init mop500_sdi_init(void)
{
- u32 periphid = 0;
-
- /* v2 has a new version of this block that need to be forced */
- if (cpu_is_u8500v2())
- periphid = 0x10480180;
- /* PoP:ed eMMC on top of DB8500 v1.0 has problems with high speed */
- if (!cpu_is_u8500v10())
- mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
-
- db8500_add_sdi2(&mop500_sdi2_data, periphid);
-
+ /* PoP:ed eMMC */
+ db8500_add_sdi2(&mop500_sdi2_data, U8500_SDI_V2_PERIPHID);
/* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data, periphid);
-
+ db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
/*
* On boards with the TC35892 GPIO expander, sdi0 will finally
* be added when the TC35892 initializes and calls
@@ -231,13 +261,9 @@ void __init mop500_sdi_init(void)
void __init snowball_sdi_init(void)
{
- u32 periphid = 0x10480180;
-
- mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
-
/* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data, periphid);
-
+ db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
+ /* External Micro SD slot */
mop500_sdi0_data.gpio_cd = SNOWBALL_SDMMC_CD_GPIO;
mop500_sdi0_data.cd_invert = true;
sdi0_en = SNOWBALL_SDMMC_EN_GPIO;
@@ -247,17 +273,15 @@ void __init snowball_sdi_init(void)
void __init hrefv60_sdi_init(void)
{
- u32 periphid = 0x10480180;
-
- mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
-
- db8500_add_sdi2(&mop500_sdi2_data, periphid);
-
+ /* PoP:ed eMMC */
+ db8500_add_sdi2(&mop500_sdi2_data, U8500_SDI_V2_PERIPHID);
/* On-board eMMC */
- db8500_add_sdi4(&mop500_sdi4_data, periphid);
-
+ db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
+ /* External Micro SD slot */
mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO;
sdi0_en = HREFV60_SDMMC_EN_GPIO;
sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO;
sdi0_configure();
+ /* WLAN SDIO channel */
+ db8500_add_sdi1(&mop500_sdi1_data, U8500_SDI_V2_PERIPHID);
}
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index bdd7b80dd7a..9361a529017 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -33,6 +33,7 @@
#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/hardware/gic.h>
#include <plat/i2c.h>
#include <plat/ste_dma40.h>
@@ -672,7 +673,7 @@ static void __init hrefv60_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
mop500_i2c_init();
- mop500_sdi_init();
+ hrefv60_sdi_init();
mop500_spi_init();
mop500_uart_init();
@@ -695,6 +696,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.init_irq = ux500_init_irq,
/* we re-use nomadik timer here */
.timer = &ux500_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = mop500_init_machine,
MACHINE_END
@@ -703,6 +705,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
.map_io = u8500_map_io,
.init_irq = ux500_init_irq,
.timer = &ux500_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = hrefv60_init_machine,
MACHINE_END
@@ -712,5 +715,6 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
.init_irq = ux500_init_irq,
/* we re-use nomadik timer here */
.timer = &ux500_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = snowball_init_machine,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index de18a2a23e6..f926d3db620 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -7,40 +7,77 @@
#ifndef __BOARD_MOP500_H
#define __BOARD_MOP500_H
-/* snowball GPIO for MMC card */
-#define SNOWBALL_SDMMC_EN_GPIO 217
-#define SNOWBALL_SDMMC_1V8_3V_GPIO 228
-#define SNOWBALL_SDMMC_CD_GPIO 218
+/* Snowball specific GPIO assignments, this board has no GPIO expander */
+#define SNOWBALL_ACCEL_INT1_GPIO 163
+#define SNOWBALL_ACCEL_INT2_GPIO 164
+#define SNOWBALL_MAGNET_DRDY_GPIO 165
+#define SNOWBALL_SDMMC_EN_GPIO 217
+#define SNOWBALL_SDMMC_1V8_3V_GPIO 228
+#define SNOWBALL_SDMMC_CD_GPIO 218
/* HREFv60-specific GPIO assignments, this board has no GPIO expander */
-#define HREFV60_TOUCH_RST_GPIO 143
-#define HREFV60_PROX_SENSE_GPIO 217
-#define HREFV60_HAL_SW_GPIO 145
-#define HREFV60_SDMMC_EN_GPIO 169
#define HREFV60_SDMMC_1V8_3V_GPIO 5
-#define HREFV60_SDMMC_CD_GPIO 95
-#define HREFV60_ACCEL_INT1_GPIO 82
-#define HREFV60_ACCEL_INT2_GPIO 83
+#define HREFV60_CAMERA_FLASH_ENABLE 21
#define HREFV60_MAGNET_DRDY_GPIO 32
#define HREFV60_DISP1_RST_GPIO 65
#define HREFV60_DISP2_RST_GPIO 66
+#define HREFV60_ACCEL_INT1_GPIO 82
+#define HREFV60_ACCEL_INT2_GPIO 83
+#define HREFV60_SDMMC_CD_GPIO 95
+#define HREFV60_XSHUTDOWN_SECONDARY_SENSOR 140
+#define HREFV60_TOUCH_RST_GPIO 143
+#define HREFV60_HAL_SW_GPIO 145
+#define HREFV60_SDMMC_EN_GPIO 169
+#define HREFV60_MMIO_XENON_CHARGE 170
+#define HREFV60_PROX_SENSE_GPIO 217
+
+/* MOP500 generic GPIOs */
+#define CAMERA_FLASH_INT_PIN 7
+#define CYPRESS_TOUCH_INT_PIN 84
+#define XSHUTDOWN_PRIMARY_SENSOR 141
+#define XSHUTDOWN_SECONDARY_SENSOR 142
+#define CYPRESS_TOUCH_RST_GPIO 143
+#define MOP500_HDMI_RST_GPIO 196
+#define CYPRESS_SLAVE_SELECT_GPIO 216
/* GPIOs on the TC35892 expander */
#define MOP500_EGPIO(x) (NOMADIK_NR_GPIO + (x))
+#define GPIO_MAGNET_DRDY MOP500_EGPIO(1)
#define GPIO_SDMMC_CD MOP500_EGPIO(3)
+#define GPIO_CAMERA_FLASH_ENABLE MOP500_EGPIO(4)
+#define GPIO_MMIO_XENON_CHARGE MOP500_EGPIO(5)
#define GPIO_PROX_SENSOR MOP500_EGPIO(7)
+#define GPIO_HAL_SENSOR MOP500_EGPIO(8)
+#define GPIO_ACCEL_INT1 MOP500_EGPIO(10)
+#define GPIO_ACCEL_INT2 MOP500_EGPIO(11)
#define GPIO_BU21013_CS MOP500_EGPIO(13)
+#define MOP500_DISP2_RST_GPIO MOP500_EGPIO(14)
+#define MOP500_DISP1_RST_GPIO MOP500_EGPIO(15)
#define GPIO_SDMMC_EN MOP500_EGPIO(17)
#define GPIO_SDMMC_1V8_3V_SEL MOP500_EGPIO(18)
#define MOP500_EGPIO_END MOP500_EGPIO(24)
-/* GPIOs on the AB8500 mixed-signals circuit */
-#define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x))
+/*
+ * GPIOs on the AB8500 mixed-signals circuit
+ * Notice that we subtract 1 from the number passed into the macro, this is
+ * because the AB8500 GPIO pins are enumbered starting from 1, so the value in
+ * parens matches the GPIO pin number in the data sheet.
+ */
+#define MOP500_AB8500_GPIO(x) (MOP500_EGPIO_END + (x) - 1)
+/*Snowball AB8500 GPIO */
+#define SNOWBALL_VSMPS2_1V8_GPIO MOP500_AB8500_PIN_GPIO(1) /* SYSCLKREQ2/GPIO1 */
+#define SNOWBALL_PM_GPIO1_GPIO MOP500_AB8500_PIN_GPIO(2) /* SYSCLKREQ3/GPIO2 */
+#define SNOWBALL_WLAN_CLK_REQ_GPIO MOP500_AB8500_PIN_GPIO(3) /* SYSCLKREQ4/GPIO3 */
+#define SNOWBALL_PM_GPIO4_GPIO MOP500_AB8500_PIN_GPIO(4) /* SYSCLKREQ6/GPIO4 */
+#define SNOWBALL_EN_3V6_GPIO MOP500_AB8500_PIN_GPIO(16) /* PWMOUT3/GPIO16 */
+#define SNOWBALL_PME_ETH_GPIO MOP500_AB8500_PIN_GPIO(24) /* SYSCLKREQ7/GPIO24 */
+#define SNOWBALL_EN_3V3_ETH_GPIO MOP500_AB8500_PIN_GPIO(26) /* GPIO26 */
struct i2c_board_info;
extern void mop500_sdi_init(void);
extern void snowball_sdi_init(void);
+extern void hrefv60_sdi_init(void);
extern void mop500_sdi_tc35892_init(void);
void __init mop500_u8500uib_init(void);
void __init mop500_stuib_init(void);
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
index 82025ba70c0..fe1569b67c9 100644
--- a/arch/arm/mach-ux500/board-u5500.c
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -12,6 +12,7 @@
#include <linux/i2c.h>
#include <linux/mfd/ab5500/ab5500.h>
+#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -149,5 +150,6 @@ MACHINE_START(U5500, "ST-Ericsson U5500 Platform")
.map_io = u5500_map_io,
.init_irq = ux500_init_irq,
.timer = &ux500_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = u5500_init_machine,
MACHINE_END
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index e832664d1bd..73790753700 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -239,23 +239,6 @@ static void clk_prcmu_disable(struct clk *clk)
writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
}
-/* ED doesn't have the combined set/clr registers */
-static void clk_prcmu_ed_enable(struct clk *clk)
-{
- void __iomem *addr = __io_address(U8500_PRCMU_BASE)
- + clk->prcmu_cg_mgt;
-
- writel(readl(addr) | PRCM_MGT_ENABLE, addr);
-}
-
-static void clk_prcmu_ed_disable(struct clk *clk)
-{
- void __iomem *addr = __io_address(U8500_PRCMU_BASE)
- + clk->prcmu_cg_mgt;
-
- writel(readl(addr) & ~PRCM_MGT_ENABLE, addr);
-}
-
static struct clkops clk_prcmu_ops = {
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
@@ -267,7 +250,6 @@ static unsigned int clkrst_base[] = {
[3] = U8500_CLKRST3_BASE,
[5] = U8500_CLKRST5_BASE,
[6] = U8500_CLKRST6_BASE,
- [7] = U8500_CLKRST7_BASE_ED,
};
static void clk_prcc_enable(struct clk *clk)
@@ -321,7 +303,6 @@ static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
-static DEFINE_PRCMU_CLK_RATE(per7clk, 0x0, 16, PER7CLK, 100000000);
static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
@@ -351,44 +332,28 @@ static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
-static DEFINE_PRCC_CLK(1, spi3_ed, 7, 7, NULL);
-static DEFINE_PRCC_CLK(1, spi3_v1, 7, -1, NULL);
+static DEFINE_PRCC_CLK(1, spi3, 7, -1, NULL);
static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(1, msp1_ed, 4, 4, &clk_msp02clk);
-static DEFINE_PRCC_CLK(1, msp1_v1, 4, 4, &clk_msp1clk);
+static DEFINE_PRCC_CLK(1, msp1, 4, 4, &clk_msp1clk);
static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
/* Peripheral Cluster #2 */
-
-static DEFINE_PRCC_CLK(2, gpio1_ed, 12, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssitx_ed, 11, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssirx_ed, 10, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi0_ed, 9, -1, NULL);
-static DEFINE_PRCC_CLK(2, sdi3_ed, 8, 6, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, sdi1_ed, 7, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, msp2_ed, 6, 4, &clk_msp02clk);
-static DEFINE_PRCC_CLK(2, sdi4_ed, 4, 2, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, pwl_ed, 3, 1, NULL);
-static DEFINE_PRCC_CLK(2, spi1_ed, 2, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi2_ed, 1, -1, NULL);
-static DEFINE_PRCC_CLK(2, i2c3_ed, 0, 0, &clk_i2cclk);
-
-static DEFINE_PRCC_CLK(2, gpio1_v1, 11, -1, NULL);
-static DEFINE_PRCC_CLK(2, ssitx_v1, 10, 7, NULL);
-static DEFINE_PRCC_CLK(2, ssirx_v1, 9, 6, NULL);
-static DEFINE_PRCC_CLK(2, spi0_v1, 8, -1, NULL);
-static DEFINE_PRCC_CLK(2, sdi3_v1, 7, 5, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, sdi1_v1, 6, 4, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, msp2_v1, 5, 3, &clk_msp02clk);
-static DEFINE_PRCC_CLK(2, sdi4_v1, 4, 2, &clk_sdmmcclk);
-static DEFINE_PRCC_CLK(2, pwl_v1, 3, 1, NULL);
-static DEFINE_PRCC_CLK(2, spi1_v1, 2, -1, NULL);
-static DEFINE_PRCC_CLK(2, spi2_v1, 1, -1, NULL);
-static DEFINE_PRCC_CLK(2, i2c3_v1, 0, 0, &clk_i2cclk);
+static DEFINE_PRCC_CLK(2, gpio1, 11, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssitx, 10, 7, NULL);
+static DEFINE_PRCC_CLK(2, ssirx, 9, 6, NULL);
+static DEFINE_PRCC_CLK(2, spi0, 8, -1, NULL);
+static DEFINE_PRCC_CLK(2, sdi3, 7, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, sdi1, 6, 4, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, msp2, 5, 3, &clk_msp02clk);
+static DEFINE_PRCC_CLK(2, sdi4, 4, 2, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, pwl, 3, 1, NULL);
+static DEFINE_PRCC_CLK(2, spi1, 2, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi2, 1, -1, NULL);
+static DEFINE_PRCC_CLK(2, i2c3, 0, 0, &clk_i2cclk);
/* Peripheral Cluster #3 */
static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
@@ -397,49 +362,34 @@ static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp1_ed, 2, 2, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp0_ed, 1, 1, &clk_i2cclk);
-static DEFINE_PRCC_CLK(3, ssp1_v1, 2, 2, &clk_sspclk);
-static DEFINE_PRCC_CLK(3, ssp0_v1, 1, 1, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, ssp1, 2, 2, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, ssp0, 1, 1, &clk_sspclk);
static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
/* Peripheral Cluster #4 is in the always on domain */
/* Peripheral Cluster #5 */
static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
-static DEFINE_PRCC_CLK(5, usb_ed, 0, 0, &clk_i2cclk);
-static DEFINE_PRCC_CLK(5, usb_v1, 0, 0, NULL);
+static DEFINE_PRCC_CLK(5, usb, 0, 0, NULL);
/* Peripheral Cluster #6 */
/* MTU ID in data */
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu1_v1, 8, -1, NULL, clk_mtu_get_rate, 1);
-static DEFINE_PRCC_CLK_CUSTOM(6, mtu0_v1, 7, -1, NULL, clk_mtu_get_rate, 0);
-static DEFINE_PRCC_CLK(6, cfgreg_v1, 6, 6, NULL);
-static DEFINE_PRCC_CLK(6, dmc_ed, 6, 6, NULL);
+static DEFINE_PRCC_CLK_CUSTOM(6, mtu1, 8, -1, NULL, clk_mtu_get_rate, 1);
+static DEFINE_PRCC_CLK_CUSTOM(6, mtu0, 7, -1, NULL, clk_mtu_get_rate, 0);
+static DEFINE_PRCC_CLK(6, cfgreg, 6, 6, NULL);
static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
-static DEFINE_PRCC_CLK(6, unipro_v1, 4, 1, &clk_uniproclk);
-static DEFINE_PRCC_CLK(6, cryp1_ed, 4, -1, NULL);
+static DEFINE_PRCC_CLK(6, unipro, 4, 1, &clk_uniproclk);
static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
-static DEFINE_PRCC_CLK(6, rng_ed, 0, 0, &clk_i2cclk);
-static DEFINE_PRCC_CLK(6, rng_v1, 0, 0, &clk_rngclk);
-
-/* Peripheral Cluster #7 */
-
-static DEFINE_PRCC_CLK(7, tzpc0_ed, 4, -1, NULL);
-/* MTU ID in data */
-static DEFINE_PRCC_CLK_CUSTOM(7, mtu1_ed, 3, -1, NULL, clk_mtu_get_rate, 1);
-static DEFINE_PRCC_CLK_CUSTOM(7, mtu0_ed, 2, -1, NULL, clk_mtu_get_rate, 0);
-static DEFINE_PRCC_CLK(7, wdg_ed, 1, -1, NULL);
-static DEFINE_PRCC_CLK(7, cfgreg_ed, 0, -1, NULL);
+static DEFINE_PRCC_CLK(6, rng, 0, 0, &clk_rngclk);
static struct clk clk_dummy_apb_pclk = {
.name = "apb_pclk",
};
-static struct clk_lookup u8500_common_clks[] = {
+static struct clk_lookup u8500_clks[] = {
CLK(dummy_apb_pclk, NULL, "apb_pclk"),
/* Peripheral Cluster #1 */
@@ -494,83 +444,41 @@ static struct clk_lookup u8500_common_clks[] = {
CLK(dmaclk, "dma40.0", NULL),
CLK(b2r2clk, "b2r2", NULL),
CLK(tvclk, "tv", NULL),
-};
-static struct clk_lookup u8500_ed_clks[] = {
- /* Peripheral Cluster #1 */
- CLK(spi3_ed, "spi3", NULL),
- CLK(msp1_ed, "msp1", NULL),
-
- /* Peripheral Cluster #2 */
- CLK(gpio1_ed, "gpio.6", NULL),
- CLK(gpio1_ed, "gpio.7", NULL),
- CLK(ssitx_ed, "ssitx", NULL),
- CLK(ssirx_ed, "ssirx", NULL),
- CLK(spi0_ed, "spi0", NULL),
- CLK(sdi3_ed, "sdi3", NULL),
- CLK(sdi1_ed, "sdi1", NULL),
- CLK(msp2_ed, "msp2", NULL),
- CLK(sdi4_ed, "sdi4", NULL),
- CLK(pwl_ed, "pwl", NULL),
- CLK(spi1_ed, "spi1", NULL),
- CLK(spi2_ed, "spi2", NULL),
- CLK(i2c3_ed, "nmk-i2c.3", NULL),
-
- /* Peripheral Cluster #3 */
- CLK(ssp1_ed, "ssp1", NULL),
- CLK(ssp0_ed, "ssp0", NULL),
-
- /* Peripheral Cluster #5 */
- CLK(usb_ed, "musb-ux500.0", "usb"),
-
- /* Peripheral Cluster #6 */
- CLK(dmc_ed, "dmc", NULL),
- CLK(cryp1_ed, "cryp1", NULL),
- CLK(rng_ed, "rng", NULL),
-
- /* Peripheral Cluster #7 */
- CLK(tzpc0_ed, "tzpc0", NULL),
- CLK(mtu1_ed, "mtu1", NULL),
- CLK(mtu0_ed, "mtu0", NULL),
- CLK(wdg_ed, "wdg", NULL),
- CLK(cfgreg_ed, "cfgreg", NULL),
-};
-
-static struct clk_lookup u8500_v1_clks[] = {
/* Peripheral Cluster #1 */
CLK(i2c4, "nmk-i2c.4", NULL),
- CLK(spi3_v1, "spi3", NULL),
- CLK(msp1_v1, "msp1", NULL),
+ CLK(spi3, "spi3", NULL),
+ CLK(msp1, "msp1", NULL),
/* Peripheral Cluster #2 */
- CLK(gpio1_v1, "gpio.6", NULL),
- CLK(gpio1_v1, "gpio.7", NULL),
- CLK(ssitx_v1, "ssitx", NULL),
- CLK(ssirx_v1, "ssirx", NULL),
- CLK(spi0_v1, "spi0", NULL),
- CLK(sdi3_v1, "sdi3", NULL),
- CLK(sdi1_v1, "sdi1", NULL),
- CLK(msp2_v1, "msp2", NULL),
- CLK(sdi4_v1, "sdi4", NULL),
- CLK(pwl_v1, "pwl", NULL),
- CLK(spi1_v1, "spi1", NULL),
- CLK(spi2_v1, "spi2", NULL),
- CLK(i2c3_v1, "nmk-i2c.3", NULL),
+ CLK(gpio1, "gpio.6", NULL),
+ CLK(gpio1, "gpio.7", NULL),
+ CLK(ssitx, "ssitx", NULL),
+ CLK(ssirx, "ssirx", NULL),
+ CLK(spi0, "spi0", NULL),
+ CLK(sdi3, "sdi3", NULL),
+ CLK(sdi1, "sdi1", NULL),
+ CLK(msp2, "msp2", NULL),
+ CLK(sdi4, "sdi4", NULL),
+ CLK(pwl, "pwl", NULL),
+ CLK(spi1, "spi1", NULL),
+ CLK(spi2, "spi2", NULL),
+ CLK(i2c3, "nmk-i2c.3", NULL),
/* Peripheral Cluster #3 */
- CLK(ssp1_v1, "ssp1", NULL),
- CLK(ssp0_v1, "ssp0", NULL),
+ CLK(ssp1, "ssp1", NULL),
+ CLK(ssp0, "ssp0", NULL),
/* Peripheral Cluster #5 */
- CLK(usb_v1, "musb-ux500.0", "usb"),
+ CLK(usb, "musb-ux500.0", "usb"),
/* Peripheral Cluster #6 */
- CLK(mtu1_v1, "mtu1", NULL),
- CLK(mtu0_v1, "mtu0", NULL),
- CLK(cfgreg_v1, "cfgreg", NULL),
+ CLK(mtu1, "mtu1", NULL),
+ CLK(mtu0, "mtu0", NULL),
+ CLK(cfgreg, "cfgreg", NULL),
CLK(hash1, "hash1", NULL),
- CLK(unipro_v1, "unipro", NULL),
- CLK(rng_v1, "rng", NULL),
+ CLK(unipro, "unipro", NULL),
+ CLK(rng, "rng", NULL),
/* PRCMU level clock gating */
@@ -743,7 +651,7 @@ err_out:
late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_DEBUG_FS) */
-unsigned long clk_smp_twd_rate = 400000000;
+unsigned long clk_smp_twd_rate = 500000000;
unsigned long clk_smp_twd_get_rate(struct clk *clk)
{
@@ -769,7 +677,7 @@ static int clk_twd_cpufreq_transition(struct notifier_block *nb,
if (state == CPUFREQ_PRECHANGE) {
/* Save frequency in simple Hz */
- clk_smp_twd_rate = f->new * 1000;
+ clk_smp_twd_rate = (f->new * 1000) / 2;
}
return NOTIFY_OK;
@@ -790,11 +698,7 @@ late_initcall(clk_init_smp_twd_cpufreq);
int __init clk_init(void)
{
- if (cpu_is_u8500ed()) {
- clk_prcmu_ops.enable = clk_prcmu_ed_enable;
- clk_prcmu_ops.disable = clk_prcmu_ed_disable;
- clk_per6clk.rate = 100000000;
- } else if (cpu_is_u5500()) {
+ if (cpu_is_u5500()) {
/* Clock tree for U5500 not implemented yet */
clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
@@ -802,20 +706,11 @@ int __init clk_init(void)
clk_sdmmcclk.rate = 99900000;
}
- clkdev_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
- if (cpu_is_u8500ed())
- clkdev_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
- else
- clkdev_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
-
+ clkdev_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
clkdev_add(&clk_smp_twd_lookup);
#ifdef CONFIG_DEBUG_FS
- clk_debugfs_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
- if (cpu_is_u8500ed())
- clk_debugfs_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
- else
- clk_debugfs_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
+ clk_debugfs_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
#endif
return 0;
}
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index 9de1af00809..18aa5c05c69 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -30,12 +30,11 @@ static struct map_desc u5500_uart_io_desc[] __initdata = {
};
static struct map_desc u5500_io_desc[] __initdata = {
- __IO_DEV_DESC(U5500_GIC_CPU_BASE, SZ_4K),
+ /* SCU base also covers GIC CPU BASE and TWD with its 4K page */
+ __IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
__IO_DEV_DESC(U5500_GIC_DIST_BASE, SZ_4K),
__IO_DEV_DESC(U5500_L2CC_BASE, SZ_4K),
- __IO_DEV_DESC(U5500_TWD_BASE, SZ_4K),
__IO_DEV_DESC(U5500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
__IO_DEV_DESC(U5500_BACKUPRAM0_BASE, SZ_8K),
__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
@@ -47,26 +46,6 @@ static struct map_desc u5500_io_desc[] __initdata = {
__IO_DEV_DESC(U5500_PRCMU_TCDM_BASE, SZ_4K),
};
-static struct resource db5500_pmu_resources[] = {
- [0] = {
- .start = IRQ_DB5500_PMU0,
- .end = IRQ_DB5500_PMU0,
- .flags = IORESOURCE_IRQ,
- },
- [1] = {
- .start = IRQ_DB5500_PMU1,
- .end = IRQ_DB5500_PMU1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device db5500_pmu_device = {
- .name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
- .num_resources = ARRAY_SIZE(db5500_pmu_resources),
- .resource = db5500_pmu_resources,
-};
-
static struct resource mbox0_resources[] = {
{
.name = "mbox_peer",
@@ -152,7 +131,6 @@ static struct platform_device mbox2_device = {
};
static struct platform_device *db5500_platform_devs[] __initdata = {
- &db5500_pmu_device,
&mbox0_device,
&mbox1_device,
&mbox2_device,
@@ -193,6 +171,25 @@ void __init u5500_map_io(void)
_PRCMU_BASE = __io_address(U5500_PRCMU_BASE);
}
+static void __init db5500_pmu_init(void)
+{
+ struct resource res[] = {
+ [0] = {
+ .start = IRQ_DB5500_PMU0,
+ .end = IRQ_DB5500_PMU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_DB5500_PMU1,
+ .end = IRQ_DB5500_PMU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ platform_device_register_simple("arm-pmu", ARM_PMU_DEVICE_CPU,
+ res, ARRAY_SIZE(res));
+}
+
static int usb_db5500_rx_dma_cfg[] = {
DB5500_DMA_DEV4_USB_OTG_IEP_1_9,
DB5500_DMA_DEV5_USB_OTG_IEP_2_10,
@@ -218,6 +215,7 @@ static int usb_db5500_tx_dma_cfg[] = {
void __init u5500_init_devices(void)
{
db5500_add_gpios();
+ db5500_pmu_init();
db5500_dma_init();
db5500_add_rtc();
db5500_add_usb(usb_db5500_rx_dma_cfg, usb_db5500_tx_dma_cfg);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 13e8890a8b8..7176ee7491a 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2009 ST-Ericsson
+ * Copyright (C) 2008-2009 ST-Ericsson SA
*
* Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
*
@@ -35,12 +35,11 @@ static struct map_desc u8500_uart_io_desc[] __initdata = {
};
static struct map_desc u8500_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K),
+ /* SCU base also covers GIC CPU BASE and TWD with its 4K page */
+ __IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
__IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
__IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
__IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K),
@@ -54,19 +53,6 @@ static struct map_desc u8500_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
__IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
-};
-
-static struct map_desc u8500_ed_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_MTU0_BASE_ED, SZ_4K),
- __IO_DEV_DESC(U8500_CLKRST7_BASE_ED, SZ_8K),
-};
-
-static struct map_desc u8500_v1_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE_V1, SZ_4K),
-};
-
-static struct map_desc u8500_v2_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K),
};
@@ -81,13 +67,6 @@ void __init u8500_map_io(void)
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
- if (cpu_is_u8500ed())
- iotable_init(u8500_ed_io_desc, ARRAY_SIZE(u8500_ed_io_desc));
- else if (cpu_is_u8500v1())
- iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc));
- else if (cpu_is_u8500v2())
- iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
-
_PRCMU_BASE = __io_address(U8500_PRCMU_BASE);
}
@@ -156,12 +135,9 @@ static resource_size_t __initdata db8500_gpio_base[] = {
static void __init db8500_add_gpios(void)
{
struct nmk_gpio_platform_data pdata = {
- /* No custom data yet */
+ .supports_sleepmode = true,
};
- if (cpu_is_u8500v2())
- pdata.supports_sleepmode = true;
-
dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
IRQ_DB8500_GPIO0, &pdata);
}
@@ -193,9 +169,6 @@ static int usb_db8500_tx_dma_cfg[] = {
*/
void __init u8500_init_devices(void)
{
- if (cpu_is_u8500ed())
- dma40_u8500ed_fixup();
-
db8500_add_rtc();
db8500_add_gpios();
db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 73b17404b19..a7c6cdc9b11 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -166,16 +166,6 @@ struct platform_device u8500_dma40_device = {
.resource = dma40_resources
};
-void dma40_u8500ed_fixup(void)
-{
- dma40_plat_data.memcpy = NULL;
- dma40_plat_data.memcpy_len = 0;
- dma40_resources[0].start = U8500_DMA_BASE_ED;
- dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
- dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED;
- dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1;
-}
-
struct resource keypad_resources[] = {
[0] = {
.start = U8500_SKE_BASE,
diff --git a/arch/arm/mach-ux500/id.c b/arch/arm/mach-ux500/id.c
index d35122ebc67..15a0f63b2e2 100644
--- a/arch/arm/mach-ux500/id.c
+++ b/arch/arm/mach-ux500/id.c
@@ -65,6 +65,7 @@ static unsigned int partnumber(unsigned int asicid)
* DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
* DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
* DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
+ * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
* DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
*/
@@ -80,9 +81,10 @@ void __init ux500_map_io(void)
addr = 0x9001FFF4;
break;
- case 0x412fc091: /* DB8500v2 / DB5500v1 */
+ case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
asicid = ux500_read_asicid(0x9001DBF4);
- if (partnumber(asicid) == 0x8500)
+ if (partnumber(asicid) == 0x8500 ||
+ partnumber(asicid) == 0x8520)
/* DB8500v2 */
break;
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index 994b5fe6f85..8e714bcb099 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -65,8 +65,11 @@
#define U5500_PRCMU_TIMER_4_BASE (U5500_PER4_BASE + 0x07450)
#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000)
#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
+#define U5500_MTIMER_BASE (U5500_PER4_BASE + 0xC000)
#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
#define U5500_PRCMU_TCDM_BASE (U5500_PER4_BASE + 0x18000)
+#define U5500_PRCMU_TCPM_BASE (U5500_PER4_BASE + 0x10000)
+#define U5500_TPIU_BASE (U5500_PER4_BASE + 0x50000)
#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
@@ -125,6 +128,7 @@
#define U5500_ACCCON_BASE (0xBFFF1000)
#define U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET (0x00000020)
#define U5500_ACCCON_ACC_CPU_CTRL_OFFSET (0x000000BC)
+#define U5500_INTCON_MBOX1_INT_RESET_ADDR (0xBFFD31A4)
#define U5500_ESRAM_BASE 0x40000000
#define U5500_ESRAM_DMA_LCPA_OFFSET 0x10000
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index 751b0e6938d..80e10f50282 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -22,7 +22,9 @@
#define U8500_ESRAM_DMA_LCPA_OFFSET 0x10000
#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK0 + U8500_ESRAM_DMA_LCPA_OFFSET)
-#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
+
+/* This address fulfills the 256k alignment requirement of the lcla base */
+#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4
#define U8500_PER3_BASE 0x80000000
#define U8500_STM_BASE 0x80100000
@@ -40,15 +42,14 @@
#define U8500_ASIC_ID_BASE 0x9001D000
#define U8500_PER6_BASE 0xa03c0000
+#define U8500_PER7_BASE 0xa03d0000
#define U8500_PER5_BASE 0xa03e0000
-#define U8500_PER7_BASE_ED 0xa03d0000
#define U8500_SVA_BASE 0xa0100000
#define U8500_SIA_BASE 0xa0200000
#define U8500_SGA_BASE 0xa0300000
#define U8500_MCDE_BASE 0xa0350000
-#define U8500_DMA_BASE_ED 0xa0362000
#define U8500_DMA_BASE 0x801C0000 /* v1 */
#define U8500_SBAG_BASE 0xa0390000
@@ -66,13 +67,6 @@
#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xE000)
#define U8500_GPIO3_BASE (U8500_PER5_BASE + 0x1E000)
-/* per7 base addresses */
-#define U8500_CR_BASE_ED (U8500_PER7_BASE_ED + 0x8000)
-#define U8500_MTU0_BASE_ED (U8500_PER7_BASE_ED + 0xa000)
-#define U8500_MTU1_BASE_ED (U8500_PER7_BASE_ED + 0xb000)
-#define U8500_TZPC0_BASE_ED (U8500_PER7_BASE_ED + 0xc000)
-#define U8500_CLKRST7_BASE_ED (U8500_PER7_BASE_ED + 0xf000)
-
#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
@@ -102,12 +96,10 @@
#define U8500_SCR_BASE (U8500_PER4_BASE + 0x05000)
#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000)
#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
-#define U8500_PRCMU_TIMER_3_BASE (U8500_PER4_BASE + 0x07338)
-#define U8500_PRCMU_TIMER_4_BASE (U8500_PER4_BASE + 0x07450)
-#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000)
#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000)
#define U8500_PRCMU_TCPM_BASE (U8500_PER4_BASE + 0x60000)
-
+#define U8500_PRCMU_TIMER_3_BASE (U8500_PER4_BASE + 0x07338)
+#define U8500_PRCMU_TIMER_4_BASE (U8500_PER4_BASE + 0x07450)
/* per3 base addresses */
#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 020b6369a30..5f6cb71fc62 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -18,6 +18,4 @@ extern struct amba_device ux500_pl031_device;
extern struct platform_device u8500_dma40_device;
extern struct platform_device ux500_ske_keypad_device;
-void dma40_u8500ed_fixup(void);
-
#endif
diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
index 071bba94f72..e16299e1020 100644
--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
+++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
@@ -10,8 +10,6 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
index 7389df911b1..c01ef66537f 100644
--- a/arch/arm/mach-ux500/include/mach/gpio.h
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -1,10 +1,5 @@
#ifndef __ASM_ARCH_GPIO_H
#define __ASM_ARCH_GPIO_H
-/*
- * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
- * room for a couple of GPIO expanders.
- */
-#define ARCH_NR_GPIOS 350
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 470ac52663d..b6ba26a1367 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -10,20 +10,21 @@
#ifndef __MACH_HARDWARE_H
#define __MACH_HARDWARE_H
-/* macros to get at IO space when running virtually
+/*
+ * Macros to get at IO space when running virtually
* We dont map all the peripherals, let ioremap do
* this for us. We map only very basic peripherals here.
*/
#define U8500_IO_VIRTUAL 0xf0000000
#define U8500_IO_PHYSICAL 0xa0000000
-/* this macro is used in assembly, so no cast */
+/* This macro is used in assembly, so no cast */
#define IO_ADDRESS(x) \
(((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + U8500_IO_VIRTUAL)
/* typesafe io address */
#define __io_address(n) __io(IO_ADDRESS(n))
-/* used by some plat-nomadik code */
+/* Used by some plat-nomadik code */
#define io_p2v(n) __io_address(n)
#include <mach/db8500-regs.h>
@@ -36,6 +37,5 @@ extern void __iomem *_PRCMU_BASE;
#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
-#endif
-
+#endif /* __ASSEMBLY__ */
#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
index 02b541a37ee..833d6a6edc9 100644
--- a/arch/arm/mach-ux500/include/mach/id.h
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -47,6 +47,30 @@ static inline bool __attribute_const__ cpu_is_u5500(void)
}
/*
+ * 5500 revisions
+ */
+
+static inline bool __attribute_const__ cpu_is_u5500v1(void)
+{
+ return cpu_is_u5500() && (dbx500_revision() & 0xf0) == 0xA0;
+}
+
+static inline bool __attribute_const__ cpu_is_u5500v2(void)
+{
+ return (dbx500_id.revision & 0xf0) == 0xB0;
+}
+
+static inline bool __attribute_const__ cpu_is_u5500v20(void)
+{
+ return cpu_is_u5500() && ((dbx500_revision() & 0xf0) == 0xB0);
+}
+
+static inline bool __attribute_const__ cpu_is_u5500v21(void)
+{
+ return cpu_is_u5500() && (dbx500_revision() == 0xB1);
+}
+
+/*
* 8500 revisions
*/
diff --git a/arch/arm/mach-ux500/include/mach/system.h b/arch/arm/mach-ux500/include/mach/system.h
index c0cd8006f1a..258e5c919c2 100644
--- a/arch/arm/mach-ux500/include/mach/system.h
+++ b/arch/arm/mach-ux500/include/mach/system.h
@@ -17,9 +17,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /* yet to be implemented - TODO */
-}
-
#endif
diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h
deleted file mode 100644
index a4945cb4117..00000000000
--- a/arch/arm/mach-ux500/include/mach/vmalloc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2009 ST-Ericsson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index e340a54251d..02b7b9303f3 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
@@ -141,11 +140,6 @@ static struct map_desc versatile_io_desc[] __initdata = {
},
#ifdef CONFIG_MACH_VERSATILE_AB
{
- .virtual = IO_ADDRESS(VERSATILE_GPIO0_BASE),
- .pfn = __phys_to_pfn(VERSATILE_GPIO0_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE
- }, {
.virtual = IO_ADDRESS(VERSATILE_IB2_BASE),
.pfn = __phys_to_pfn(VERSATILE_IB2_BASE),
.length = SZ_64M,
@@ -745,6 +739,19 @@ static void versatile_leds_event(led_event_t ledevt)
}
#endif /* CONFIG_LEDS */
+void versatile_restart(char mode, const char *cmd)
+{
+ void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
+ u32 val;
+
+ val = __raw_readl(sys + VERSATILE_SYS_RESETCTL_OFFSET);
+ val |= 0x105;
+
+ __raw_writel(0xa05f, sys + VERSATILE_SYS_LOCK_OFFSET);
+ __raw_writel(val, sys + VERSATILE_SYS_RESETCTL_OFFSET);
+ __raw_writel(0, sys + VERSATILE_SYS_LOCK_OFFSET);
+}
+
/* Early initializations */
void __init versatile_init_early(void)
{
diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h
index e01422700eb..2ef2f555f31 100644
--- a/arch/arm/mach-versatile/core.h
+++ b/arch/arm/mach-versatile/core.h
@@ -30,6 +30,7 @@ extern void __init versatile_init_early(void);
extern void __init versatile_init_irq(void);
extern void __init versatile_map_io(void);
extern struct sys_timer versatile_timer;
+extern void versatile_restart(char, const char *);
extern unsigned int mmc_status(struct device *dev);
#ifdef CONFIG_OF
extern struct of_dev_auxdata versatile_auxdata_lookup[];
diff --git a/arch/arm/mach-versatile/include/mach/entry-macro.S b/arch/arm/mach-versatile/include/mach/entry-macro.S
index e6f7c166316..b6f0dbf122e 100644
--- a/arch/arm/mach-versatile/include/mach/entry-macro.S
+++ b/arch/arm/mach-versatile/include/mach/entry-macro.S
@@ -7,39 +7,9 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <mach/hardware.h>
-#include <mach/platform.h>
-#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =IO_ADDRESS(VERSATILE_VIC_BASE)
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get masked status
- mov \irqnr, #0
- teq \irqstat, #0
- beq 1003f
-
-1001: tst \irqstat, #15
- bne 1002f
- add \irqnr, \irqnr, #4
- movs \irqstat, \irqstat, lsr #4
- bne 1001b
-1002: tst \irqstat, #1
- bne 1003f
- add \irqnr, \irqnr, #1
- movs \irqstat, \irqstat, lsr #1
- bne 1002b
-1003: /* EQ will be set if no irqs pending */
-
-@ clz \irqnr, \irqstat
-@1003: /* EQ will be set if we reach MAXIRQNUM */
- .endm
-
diff --git a/arch/arm/mach-versatile/include/mach/system.h b/arch/arm/mach-versatile/include/mach/system.h
index 8ffc12a7cb2..f3fa347895f 100644
--- a/arch/arm/mach-versatile/include/mach/system.h
+++ b/arch/arm/mach-versatile/include/mach/system.h
@@ -21,10 +21,6 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/platform.h>
-
static inline void arch_idle(void)
{
/*
@@ -34,16 +30,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- u32 val;
-
- val = __raw_readl(IO_ADDRESS(VERSATILE_SYS_RESETCTL)) & ~0x7;
- val |= 0x105;
-
- __raw_writel(0xa05f, IO_ADDRESS(VERSATILE_SYS_LOCK));
- __raw_writel(val, IO_ADDRESS(VERSATILE_SYS_RESETCTL));
- __raw_writel(0, IO_ADDRESS(VERSATILE_SYS_LOCK));
-}
-
#endif
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
deleted file mode 100644
index 7d8e069ad51..00000000000
--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-versatile/include/mach/vmalloc.h
- *
- * Copyright (C) 2003 ARM Limited
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-versatile/versatile_ab.c b/arch/arm/mach-versatile/versatile_ab.c
index fda4866703c..98f65493177 100644
--- a/arch/arm/mach-versatile/versatile_ab.c
+++ b/arch/arm/mach-versatile/versatile_ab.c
@@ -21,12 +21,12 @@
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -39,6 +39,8 @@ MACHINE_START(VERSATILE_AB, "ARM-Versatile AB")
.map_io = versatile_map_io,
.init_early = versatile_init_early,
.init_irq = versatile_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &versatile_timer,
.init_machine = versatile_init,
+ .restart = versatile_restart,
MACHINE_END
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index 54e037c090f..ae5ad3c8f3d 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -45,7 +46,9 @@ DT_MACHINE_START(VERSATILE_PB, "ARM-Versatile (Device Tree Support)")
.map_io = versatile_map_io,
.init_early = versatile_init_early,
.init_irq = versatile_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &versatile_timer,
.init_machine = versatile_dt_init,
.dt_compat = versatile_dt_match,
+ .restart = versatile_restart,
MACHINE_END
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index feaf9cbe60f..9581c197500 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -21,13 +21,13 @@
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
+#include <asm/hardware/vic.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
@@ -107,6 +107,8 @@ MACHINE_START(VERSATILE_PB, "ARM-Versatile PB")
.map_io = versatile_map_io,
.init_early = versatile_init_early,
.init_irq = versatile_init_irq,
+ .handle_irq = vic_handle_irq,
.timer = &versatile_timer,
.init_machine = versatile_pb_init,
+ .restart = versatile_restart,
MACHINE_END
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 931148487f0..9b3d0fbaee7 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -8,5 +8,7 @@ config ARCH_VEXPRESS_CA9X4
select ARM_ERRATA_720789
select ARM_ERRATA_751472
select ARM_ERRATA_753970
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
endmenu
diff --git a/arch/arm/mach-vexpress/include/mach/entry-macro.S b/arch/arm/mach-vexpress/include/mach/entry-macro.S
index 73c11297509..a14f9e62ca9 100644
--- a/arch/arm/mach-vexpress/include/mach/entry-macro.S
+++ b/arch/arm/mach-vexpress/include/mach/entry-macro.S
@@ -1,5 +1,3 @@
-#include <asm/hardware/entry-macro-gic.S>
-
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-vexpress/include/mach/system.h b/arch/arm/mach-vexpress/include/mach/system.h
index 899a4e628a4..f653a8e265b 100644
--- a/arch/arm/mach-vexpress/include/mach/system.h
+++ b/arch/arm/mach-vexpress/include/mach/system.h
@@ -30,8 +30,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
-}
-
#endif
diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
deleted file mode 100644
index f43a36ef678..00000000000
--- a/arch/arm/mach-vexpress/include/mach/vmalloc.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-vexpress/include/mach/vmalloc.h
- *
- * Copyright (C) 2003 ARM Limited
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 1fafc324460..b4a28ca0e50 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -10,7 +10,7 @@
#include <linux/ata_platform.h>
#include <linux/smsc911x.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
@@ -23,6 +23,7 @@
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/timer-sp.h>
#include <asm/hardware/sp810.h>
+#include <asm/hardware/gic.h>
#include <mach/ct-ca9x4.h>
#include <mach/motherboard.h>
@@ -437,7 +438,6 @@ static void __init v2m_init(void)
amba_device_register(v2m_amba_devs[i], &iomem_resource);
pm_power_off = v2m_power_off;
- arm_pm_restart = v2m_restart;
ct_desc->init_tile();
}
@@ -448,5 +448,7 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
.init_early = v2m_init_early,
.init_irq = v2m_init_irq,
.timer = &v2m_timer,
+ .handle_irq = gic_handle_irq,
.init_machine = v2m_init,
+ .restart = v2m_restart,
MACHINE_END
diff --git a/arch/arm/mach-vt8500/include/mach/vmalloc.h b/arch/arm/mach-vt8500/include/mach/vmalloc.h
deleted file mode 100644
index 4642290ce41..00000000000
--- a/arch/arm/mach-vt8500/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/mach-vt8500/include/mach/vmalloc.h
- *
- * Copyright (C) 2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index 0a235e50233..604e1db266e 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -33,9 +33,11 @@
#include <mach/regs-serial.h>
#include <mach/regs-clock.h>
#include <mach/regs-ebi.h>
+#include <mach/regs-timer.h>
#include "cpu.h"
#include "clock.h"
+#include "nuc9xx.h"
/* Initial IO mappings */
@@ -222,3 +224,17 @@ void __init nuc900_init_clocks(void)
clkdev_add_table(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs));
}
+#define WTCR (TMR_BA + 0x1C)
+#define WTCLK (1 << 10)
+#define WTE (1 << 7)
+#define WTRE (1 << 1)
+
+void nuc9xx_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* Jump into ROM at address 0 */
+ soft_restart(0);
+ } else {
+ __raw_writel(WTE | WTRE | WTCLK, WTCR);
+ }
+}
diff --git a/arch/arm/mach-w90x900/include/mach/system.h b/arch/arm/mach-w90x900/include/mach/system.h
index ce228bdc66d..2aaeb931161 100644
--- a/arch/arm/mach-w90x900/include/mach/system.h
+++ b/arch/arm/mach-w90x900/include/mach/system.h
@@ -14,28 +14,6 @@
* (at your option) any later version.
*
*/
-
-#include <linux/io.h>
-#include <asm/proc-fns.h>
-#include <mach/map.h>
-#include <mach/regs-timer.h>
-
-#define WTCR (TMR_BA + 0x1C)
-#define WTCLK (1 << 10)
-#define WTE (1 << 7)
-#define WTRE (1 << 1)
-
static void arch_idle(void)
{
}
-
-static void arch_reset(char mode, const char *cmd)
-{
- if (mode == 's') {
- /* Jump into ROM at address 0 */
- cpu_reset(0);
- } else {
- __raw_writel(WTE | WTRE | WTCLK, WTCR);
- }
-}
-
diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h
deleted file mode 100644
index b067e44500a..00000000000
--- a/arch/arm/mach-w90x900/include/mach/vmalloc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/arm/mach-w90x900/include/mach/vmalloc.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * Based on arch/arm/mach-s3c2410/include/mach/vmalloc.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END (0xe0000000UL)
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
index 7bf143c443f..d66d43ae8df 100644
--- a/arch/arm/mach-w90x900/irq.c
+++ b/arch/arm/mach-w90x900/irq.c
@@ -19,7 +19,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/ptrace.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -28,6 +28,8 @@
#include <mach/hardware.h>
#include <mach/regs-irq.h>
+#include "nuc9xx.h"
+
struct group_irq {
unsigned long gpen;
unsigned int enabled;
diff --git a/arch/arm/mach-w90x900/mach-nuc910evb.c b/arch/arm/mach-w90x900/mach-nuc910evb.c
index 31c10901822..b4243e4f156 100644
--- a/arch/arm/mach-w90x900/mach-nuc910evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc910evb.c
@@ -38,4 +38,5 @@ MACHINE_START(W90P910EVB, "W90P910EVB")
.init_irq = nuc900_init_irq,
.init_machine = nuc910evb_init,
.timer = &nuc900_timer,
+ .restart = nuc9xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-w90x900/mach-nuc950evb.c b/arch/arm/mach-w90x900/mach-nuc950evb.c
index 4062e55a57d..067d8f9166d 100644
--- a/arch/arm/mach-w90x900/mach-nuc950evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc950evb.c
@@ -41,4 +41,5 @@ MACHINE_START(W90P950EVB, "W90P950EVB")
.init_irq = nuc900_init_irq,
.init_machine = nuc950evb_init,
.timer = &nuc900_timer,
+ .restart = nuc9xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-w90x900/mach-nuc960evb.c b/arch/arm/mach-w90x900/mach-nuc960evb.c
index 0ab9995d5b5..cbb3adc3db1 100644
--- a/arch/arm/mach-w90x900/mach-nuc960evb.c
+++ b/arch/arm/mach-w90x900/mach-nuc960evb.c
@@ -38,4 +38,5 @@ MACHINE_START(W90N960EVB, "W90N960EVB")
.init_irq = nuc900_init_irq,
.init_machine = nuc960evb_init,
.timer = &nuc900_timer,
+ .restart = nuc9xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-w90x900/nuc910.h b/arch/arm/mach-w90x900/nuc910.h
index 83e9ba5fc26..b14c71a9e68 100644
--- a/arch/arm/mach-w90x900/nuc910.h
+++ b/arch/arm/mach-w90x900/nuc910.h
@@ -12,14 +12,7 @@
* published by the Free Software Foundation.
*
*/
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
/* extern file from nuc910.c */
diff --git a/arch/arm/mach-w90x900/nuc950.h b/arch/arm/mach-w90x900/nuc950.h
index 98a1148bc5a..6e9de3051cd 100644
--- a/arch/arm/mach-w90x900/nuc950.h
+++ b/arch/arm/mach-w90x900/nuc950.h
@@ -12,14 +12,7 @@
* published by the Free Software Foundation.
*
*/
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
/* extern file from nuc950.c */
diff --git a/arch/arm/mach-w90x900/nuc960.h b/arch/arm/mach-w90x900/nuc960.h
index f0c07cbe3a8..9f6df9a0028 100644
--- a/arch/arm/mach-w90x900/nuc960.h
+++ b/arch/arm/mach-w90x900/nuc960.h
@@ -12,14 +12,7 @@
* published by the Free Software Foundation.
*
*/
-
-struct map_desc;
-struct sys_timer;
-
-/* core initialisation functions */
-
-extern void nuc900_init_irq(void);
-extern struct sys_timer nuc900_timer;
+#include "nuc9xx.h"
/* extern file from nuc960.c */
diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h
new file mode 100644
index 00000000000..91acb404779
--- /dev/null
+++ b/arch/arm/mach-w90x900/nuc9xx.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-w90x900/nuc9xx.h
+ *
+ * Copied from nuc910.h, which had:
+ *
+ * Copyright (c) 2008 Nuvoton corporation
+ *
+ * Header file for NUC900 CPU support
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+struct map_desc;
+struct sys_timer;
+
+/* core initialisation functions */
+
+extern void nuc900_init_irq(void);
+extern struct sys_timer nuc900_timer;
+extern void nuc9xx_restart(char, const char *);
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index a2c4e2d0a0d..fa27c498ac0 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -33,6 +33,8 @@
#include <mach/map.h>
#include <mach/regs-timer.h>
+#include "nuc9xx.h"
+
#define RESETINT 0x1f
#define PERIOD (0x01 << 27)
#define ONESHOT (0x00 << 27)
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 73e93687b81..ab5cfddc0d7 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -112,6 +112,7 @@ static const char *xilinx_dt_match[] = {
MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
.map_io = xilinx_map_io,
.init_irq = xilinx_irq_init,
+ .handle_irq = gic_handle_irq,
.init_machine = xilinx_init_machine,
.timer = &xttcpss_sys_timer,
.dt_compat = xilinx_dt_match,
diff --git a/arch/arm/mach-zynq/include/mach/entry-macro.S b/arch/arm/mach-zynq/include/mach/entry-macro.S
index 3cfc01b3746..d621fb73256 100644
--- a/arch/arm/mach-zynq/include/mach/entry-macro.S
+++ b/arch/arm/mach-zynq/include/mach/entry-macro.S
@@ -20,9 +20,6 @@
* GNU General Public License for more details.
*/
-#include <mach/hardware.h>
-#include <asm/hardware/entry-macro-gic.S>
-
.macro disable_fiq
.endm
diff --git a/arch/arm/mach-zynq/include/mach/system.h b/arch/arm/mach-zynq/include/mach/system.h
index 1b84d705c67..8e88e0b8d2b 100644
--- a/arch/arm/mach-zynq/include/mach/system.h
+++ b/arch/arm/mach-zynq/include/mach/system.h
@@ -20,9 +20,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- /* Add architecture specific reset processing here */
-}
-
#endif
diff --git a/arch/arm/mach-zynq/include/mach/vmalloc.h b/arch/arm/mach-zynq/include/mach/vmalloc.h
deleted file mode 100644
index 2398eff1e8b..00000000000
--- a/arch/arm/mach-zynq/include/mach/vmalloc.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-zynq/include/mach/vmalloc.h
- *
- * Copyright (C) 2011 Xilinx
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_VMALLOC_H__
-#define __MACH_VMALLOC_H__
-
-#define VMALLOC_END 0xE0000000UL
-
-#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 67f75a0b66d..4cefb57d9ed 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -629,6 +629,23 @@ config IO_36
comment "Processor Features"
+config ARM_LPAE
+ bool "Support for the Large Physical Address Extension"
+ depends on MMU && CPU_V7
+ help
+ Say Y if you have an ARMv7 processor supporting the LPAE page
+ table format and you would like to access memory beyond the
+ 4GB limit. The resulting kernel image will not run on
+ processors without the LPA extension.
+
+ If unsure, say N.
+
+config ARCH_PHYS_ADDR_T_64BIT
+ def_bool ARM_LPAE
+
+config ARCH_DMA_ADDR_T_64BIT
+ bool
+
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
@@ -816,14 +833,23 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
Say Y here to use the Feroceon L2 cache in writethrough mode.
Unless you specifically require this, say N for writeback mode.
+config MIGHT_HAVE_CACHE_L2X0
+ bool
+ help
+ This option should be selected by machines which have a L2x0
+ or PL310 cache controller, but where its use is optional.
+
+ The only effect of this option is to make CACHE_L2X0 and
+ related options available to the user for configuration.
+
+ Boards or SoCs which always require the cache controller
+ support to be present should select CACHE_L2X0 directly
+ instead of this option, thus preventing the user from
+ inadvertently configuring a broken kernel.
+
config CACHE_L2X0
- bool "Enable the L2x0 outer cache controller"
- depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \
- ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
- ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \
- ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK
- default y
+ bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
+ default MIGHT_HAVE_CACHE_L2X0
select OUTER_CACHE
select OUTER_CACHE_SYNC
help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index c335c76e0d8..caf14dc059e 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -968,7 +968,7 @@ static int __init alignment_init(void)
ai_usermode = safe_usermode(ai_usermode, false);
}
- hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
+ hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8ac9e9f8479..b1e192ba8c2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -61,7 +61,7 @@ static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
-#ifdef CONFIG_ARM_ERRATA_753970
+#ifdef CONFIG_PL310_ERRATA_753970
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 93aac068da9..ee9bb363d60 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif
+#ifdef CONFIG_ARM_LPAE
+#define cpu_set_asid(asid) { \
+ unsigned long ttbl, ttbh; \
+ asm volatile( \
+ " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
+ " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
+ " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
+ : "=&r" (ttbl), "=&r" (ttbh) \
+ : "r" (asid & ~ASID_MASK)); \
+}
+#else
+#define cpu_set_asid(asid) \
+ asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+#endif
+
/*
* We fork()ed a process, and we need a new context for the child
* to run in. We reserve version 0 for initial tasks so we will
@@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
/* set the reserved ASID before flushing the TLB */
- asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+ cpu_set_asid(0);
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@@ -99,7 +114,7 @@ static void reset_context(void *info)
set_mm_context(mm, asid);
/* set the new ASID */
- asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
+ cpu_set_asid(mm->context.id);
isb();
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4e7f6cba1a..1aa664a1999 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -168,7 +168,7 @@ static int __init consistent_init(void)
pte_t *pte;
int i = 0;
unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
void *addr;
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
*handle = ~0;
size = PAGE_ALIGN(size);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index aa33949fef6..bb7eac381a8 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -27,19 +27,6 @@
#include "fault.h"
-/*
- * Fault status register encodings. We steal bit 31 for our own purposes.
- */
-#define FSR_LNX_PF (1 << 31)
-#define FSR_WRITE (1 << 11)
-#define FSR_FS4 (1 << 10)
-#define FSR_FS3_0 (15)
-
-static inline int fsr_fs(unsigned int fsr)
-{
- return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
-}
-
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
@@ -123,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08llx", (long long)pte_val(*pte));
+#ifndef CONFIG_ARM_LPAE
printk(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
+#endif
pte_unmap(pte);
} while(0);
@@ -231,7 +220,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- struct task_struct *tsk)
+ unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
@@ -253,18 +242,7 @@ good_area:
goto out;
}
- /*
- * If for any reason at all we couldn't handle the fault, make
- * sure we exit gracefully rather than endlessly redo the fault.
- */
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR))
- return fault;
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- return fault;
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -279,6 +257,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
+ int write = fsr & FSR_WRITE;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
if (notify_page_fault(regs, fsr))
return 0;
@@ -305,6 +286,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
+retry:
down_read(&mm->mmap_sem);
} else {
/*
@@ -320,14 +302,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#endif
}
- fault = __do_page_fault(mm, addr, fsr, tsk);
- up_read(&mm->mmap_sem);
+ fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+
+ /* If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because
+ * it would already be released in __lock_page_or_retry in
+ * mm/filemap.c. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (fault & VM_FAULT_MAJOR)
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
- else if (fault & VM_FAULT_MINOR)
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, addr);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, addr);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
@@ -441,6 +450,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Only one hardware entry per PMD with LPAE.
+ */
+ index = 0;
+#else
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
* tables layout in pgtable.h). We normally guarantee that we always
@@ -450,6 +465,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
* for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1;
+#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
@@ -489,55 +505,20 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 1;
}
-static struct fsr_info {
+struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
-} fsr_info[] = {
- /*
- * The following are the standard ARMv3 and ARMv4 aborts. ARMv5
- * defines these to be "precise" aborts.
- */
- { do_bad, SIGSEGV, 0, "vector exception" },
- { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
- { do_bad, SIGKILL, 0, "terminal exception" },
- { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
- { do_bad, SIGBUS, 0, "external abort on linefetch" },
- { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
- { do_bad, SIGBUS, 0, "external abort on linefetch" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
- { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
- { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
- { do_bad, SIGBUS, 0, "external abort on translation" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
- { do_bad, SIGBUS, 0, "external abort on translation" },
- { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
- /*
- * The following are "imprecise" aborts, which are signalled by bit
- * 10 of the FSR, and may not be recoverable. These are only
- * supported if the CPU abort handler supports bit 10.
- */
- { do_bad, SIGBUS, 0, "unknown 16" },
- { do_bad, SIGBUS, 0, "unknown 17" },
- { do_bad, SIGBUS, 0, "unknown 18" },
- { do_bad, SIGBUS, 0, "unknown 19" },
- { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
- { do_bad, SIGBUS, 0, "unknown 21" },
- { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
- { do_bad, SIGBUS, 0, "unknown 23" },
- { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
- { do_bad, SIGBUS, 0, "unknown 25" },
- { do_bad, SIGBUS, 0, "unknown 26" },
- { do_bad, SIGBUS, 0, "unknown 27" },
- { do_bad, SIGBUS, 0, "unknown 28" },
- { do_bad, SIGBUS, 0, "unknown 29" },
- { do_bad, SIGBUS, 0, "unknown 30" },
- { do_bad, SIGBUS, 0, "unknown 31" }
};
+/* FSR definition */
+#ifdef CONFIG_ARM_LPAE
+#include "fsr-3level.c"
+#else
+#include "fsr-2level.c"
+#endif
+
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
@@ -573,42 +554,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, fsr, 0);
}
-
-static struct fsr_info ifsr_info[] = {
- { do_bad, SIGBUS, 0, "unknown 0" },
- { do_bad, SIGBUS, 0, "unknown 1" },
- { do_bad, SIGBUS, 0, "debug event" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
- { do_bad, SIGBUS, 0, "unknown 4" },
- { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
- { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
- { do_bad, SIGBUS, 0, "unknown 10" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
- { do_bad, SIGBUS, 0, "external abort on translation" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
- { do_bad, SIGBUS, 0, "external abort on translation" },
- { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
- { do_bad, SIGBUS, 0, "unknown 16" },
- { do_bad, SIGBUS, 0, "unknown 17" },
- { do_bad, SIGBUS, 0, "unknown 18" },
- { do_bad, SIGBUS, 0, "unknown 19" },
- { do_bad, SIGBUS, 0, "unknown 20" },
- { do_bad, SIGBUS, 0, "unknown 21" },
- { do_bad, SIGBUS, 0, "unknown 22" },
- { do_bad, SIGBUS, 0, "unknown 23" },
- { do_bad, SIGBUS, 0, "unknown 24" },
- { do_bad, SIGBUS, 0, "unknown 25" },
- { do_bad, SIGBUS, 0, "unknown 26" },
- { do_bad, SIGBUS, 0, "unknown 27" },
- { do_bad, SIGBUS, 0, "unknown 28" },
- { do_bad, SIGBUS, 0, "unknown 29" },
- { do_bad, SIGBUS, 0, "unknown 30" },
- { do_bad, SIGBUS, 0, "unknown 31" },
-};
-
void __init
hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
@@ -641,6 +586,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0);
}
+#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
@@ -663,3 +609,4 @@ static int __init exceptions_init(void)
}
arch_initcall(exceptions_init);
+#endif
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 49e9e3804de..cf08bdfbe0d 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,3 +1,28 @@
-void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+#ifndef __ARCH_ARM_FAULT_H
+#define __ARCH_ARM_FAULT_H
+
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+#define FSR_WRITE (1 << 11)
+#define FSR_FS4 (1 << 10)
+#define FSR_FS3_0 (15)
+#define FSR_FS5_0 (0x3f)
+
+#ifdef CONFIG_ARM_LPAE
+static inline int fsr_fs(unsigned int fsr)
+{
+ return fsr & FSR_FS5_0;
+}
+#else
+static inline int fsr_fs(unsigned int fsr)
+{
+ return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+#endif
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
+
+#endif /* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c
new file mode 100644
index 00000000000..18ca74c0f34
--- /dev/null
+++ b/arch/arm/mm/fsr-2level.c
@@ -0,0 +1,78 @@
+static struct fsr_info fsr_info[] = {
+ /*
+ * The following are the standard ARMv3 and ARMv4 aborts. ARMv5
+ * defines these to be "precise" aborts.
+ */
+ { do_bad, SIGSEGV, 0, "vector exception" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
+ { do_bad, SIGKILL, 0, "terminal exception" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
+ { do_bad, SIGBUS, 0, "external abort on linefetch" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
+ { do_bad, SIGBUS, 0, "external abort on linefetch" },
+ { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
+ { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
+ { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
+ /*
+ * The following are "imprecise" aborts, which are signalled by bit
+ * 10 of the FSR, and may not be recoverable. These are only
+ * supported if the CPU abort handler supports bit 10.
+ */
+ { do_bad, SIGBUS, 0, "unknown 16" },
+ { do_bad, SIGBUS, 0, "unknown 17" },
+ { do_bad, SIGBUS, 0, "unknown 18" },
+ { do_bad, SIGBUS, 0, "unknown 19" },
+ { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
+ { do_bad, SIGBUS, 0, "unknown 21" },
+ { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
+ { do_bad, SIGBUS, 0, "unknown 23" },
+ { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
+ { do_bad, SIGBUS, 0, "unknown 25" },
+ { do_bad, SIGBUS, 0, "unknown 26" },
+ { do_bad, SIGBUS, 0, "unknown 27" },
+ { do_bad, SIGBUS, 0, "unknown 28" },
+ { do_bad, SIGBUS, 0, "unknown 29" },
+ { do_bad, SIGBUS, 0, "unknown 30" },
+ { do_bad, SIGBUS, 0, "unknown 31" },
+};
+
+static struct fsr_info ifsr_info[] = {
+ { do_bad, SIGBUS, 0, "unknown 0" },
+ { do_bad, SIGBUS, 0, "unknown 1" },
+ { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
+ { do_bad, SIGBUS, 0, "unknown 4" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
+ { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
+ { do_bad, SIGBUS, 0, "unknown 10" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
+ { do_bad, SIGBUS, 0, "external abort on translation" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
+ { do_bad, SIGBUS, 0, "unknown 16" },
+ { do_bad, SIGBUS, 0, "unknown 17" },
+ { do_bad, SIGBUS, 0, "unknown 18" },
+ { do_bad, SIGBUS, 0, "unknown 19" },
+ { do_bad, SIGBUS, 0, "unknown 20" },
+ { do_bad, SIGBUS, 0, "unknown 21" },
+ { do_bad, SIGBUS, 0, "unknown 22" },
+ { do_bad, SIGBUS, 0, "unknown 23" },
+ { do_bad, SIGBUS, 0, "unknown 24" },
+ { do_bad, SIGBUS, 0, "unknown 25" },
+ { do_bad, SIGBUS, 0, "unknown 26" },
+ { do_bad, SIGBUS, 0, "unknown 27" },
+ { do_bad, SIGBUS, 0, "unknown 28" },
+ { do_bad, SIGBUS, 0, "unknown 29" },
+ { do_bad, SIGBUS, 0, "unknown 30" },
+ { do_bad, SIGBUS, 0, "unknown 31" },
+};
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
new file mode 100644
index 00000000000..05a4e943183
--- /dev/null
+++ b/arch/arm/mm/fsr-3level.c
@@ -0,0 +1,68 @@
+static struct fsr_info fsr_info[] = {
+ { do_bad, SIGBUS, 0, "unknown 0" },
+ { do_bad, SIGBUS, 0, "unknown 1" },
+ { do_bad, SIGBUS, 0, "unknown 2" },
+ { do_bad, SIGBUS, 0, "unknown 3" },
+ { do_bad, SIGBUS, 0, "reserved translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
+ { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_bad, SIGBUS, 0, "reserved access flag fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
+ { do_bad, SIGBUS, 0, "reserved permission fault" },
+ { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
+ { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
+ { do_bad, SIGBUS, 0, "synchronous external abort" },
+ { do_bad, SIGBUS, 0, "asynchronous external abort" },
+ { do_bad, SIGBUS, 0, "unknown 18" },
+ { do_bad, SIGBUS, 0, "unknown 19" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error" },
+ { do_bad, SIGBUS, 0, "asynchronous parity error" },
+ { do_bad, SIGBUS, 0, "unknown 26" },
+ { do_bad, SIGBUS, 0, "unknown 27" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "unknown 32" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
+ { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGBUS, 0, "unknown 35" },
+ { do_bad, SIGBUS, 0, "unknown 36" },
+ { do_bad, SIGBUS, 0, "unknown 37" },
+ { do_bad, SIGBUS, 0, "unknown 38" },
+ { do_bad, SIGBUS, 0, "unknown 39" },
+ { do_bad, SIGBUS, 0, "unknown 40" },
+ { do_bad, SIGBUS, 0, "unknown 41" },
+ { do_bad, SIGBUS, 0, "unknown 42" },
+ { do_bad, SIGBUS, 0, "unknown 43" },
+ { do_bad, SIGBUS, 0, "unknown 44" },
+ { do_bad, SIGBUS, 0, "unknown 45" },
+ { do_bad, SIGBUS, 0, "unknown 46" },
+ { do_bad, SIGBUS, 0, "unknown 47" },
+ { do_bad, SIGBUS, 0, "unknown 48" },
+ { do_bad, SIGBUS, 0, "unknown 49" },
+ { do_bad, SIGBUS, 0, "unknown 50" },
+ { do_bad, SIGBUS, 0, "unknown 51" },
+ { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
+ { do_bad, SIGBUS, 0, "unknown 53" },
+ { do_bad, SIGBUS, 0, "unknown 54" },
+ { do_bad, SIGBUS, 0, "unknown 55" },
+ { do_bad, SIGBUS, 0, "unknown 56" },
+ { do_bad, SIGBUS, 0, "unknown 57" },
+ { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
+ { do_bad, SIGBUS, 0, "unknown 59" },
+ { do_bad, SIGBUS, 0, "unknown 60" },
+ { do_bad, SIGBUS, 0, "unknown 61" },
+ { do_bad, SIGBUS, 0, "unknown 62" },
+ { do_bad, SIGBUS, 0, "unknown 63" },
+};
+
+#define ifsr_info fsr_info
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2be9139a4ef..feacf4c7671 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -1,9 +1,38 @@
#include <linux/kernel.h>
#include <asm/cputype.h>
+#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
+#include <asm/sections.h>
+pgd_t *idmap_pgd;
+
+#ifdef CONFIG_ARM_LPAE
+static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+ unsigned long prot)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
+ pmd = pmd_alloc_one(&init_mm, addr);
+ if (!pmd) {
+ pr_warning("Failed to allocate identity pmd.\n");
+ return;
+ }
+ pud_populate(&init_mm, pud, pmd);
+ pmd += pmd_index(addr);
+ } else
+ pmd = pmd_offset(pud, addr);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ *pmd = __pmd((addr & PMD_MASK) | prot);
+ flush_pmd_entry(pmd);
+ } while (pmd++, addr = next, addr != end);
+}
+#else /* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
@@ -15,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
+#endif /* CONFIG_ARM_LPAE */
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
@@ -28,11 +58,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
-void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
+static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
+ prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -43,48 +73,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_SMP
-static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
-{
- pmd_t *pmd = pmd_offset(pud, addr);
- pmd_clear(pmd);
-}
+extern char __idmap_text_start[], __idmap_text_end[];
-static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
+static int __init init_static_idmap(void)
{
- pud_t *pud = pud_offset(pgd, addr);
- unsigned long next;
+ phys_addr_t idmap_start, idmap_end;
- do {
- next = pud_addr_end(addr, end);
- idmap_del_pmd(pud, addr, next);
- } while (pud++, addr = next, addr != end);
-}
+ idmap_pgd = pgd_alloc(&init_mm);
+ if (!idmap_pgd)
+ return -ENOMEM;
-void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
- unsigned long next;
+ /* Add an identity mapping for the physical address of the section. */
+ idmap_start = virt_to_phys((void *)__idmap_text_start);
+ idmap_end = virt_to_phys((void *)__idmap_text_end);
- pgd += pgd_index(addr);
- do {
- next = pgd_addr_end(addr, end);
- idmap_del_pud(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
+ pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
+ (long long)idmap_start, (long long)idmap_end);
+ identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
+
+ return 0;
}
-#endif
+early_initcall(init_static_idmap);
/*
- * In order to soft-boot, we need to insert a 1:1 mapping in place of
- * the user-mode pages. This will then ensure that we have predictable
- * results when turning the mmu off
+ * In order to soft-boot, we need to switch to a 1:1 mapping for the
+ * cpu_reset functions. This will then ensure that we have predictable
+ * results when turning off the mmu.
*/
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
{
- /*
- * We need to access to user-mode page tables here. For kernel threads
- * we don't have any user-mode mappings so we use the context that we
- * "borrowed".
- */
- identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
+ /* Clean and invalidate L1. */
+ flush_cache_all();
+
+ /* Switch to the identity mapping. */
+ cpu_switch_mm(idmap_pgd, &init_mm);
+
+ /* Flush the TLB. */
local_flush_tlb_all();
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fbdd12ea3a5..e34ea8adc1f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,7 +20,6 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
-#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/prom.h>
@@ -32,6 +31,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <asm/memblock.h>
#include "mm.h"
@@ -134,30 +134,18 @@ void show_mem(unsigned int filter)
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
- unsigned long *max_high)
+ unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
- *min = -1UL;
- *max_low = *max_high = 0;
-
- for_each_bank (i, mi) {
- struct membank *bank = &mi->bank[i];
- unsigned long start, end;
-
- start = bank_pfn_start(bank);
- end = bank_pfn_end(bank);
-
- if (*min > start)
- *min = start;
- if (*max_high < end)
- *max_high = end;
- if (bank->highmem)
- continue;
- if (*max_low < end)
- *max_low = end;
- }
+ /* This assumes the meminfo array is properly sorted */
+ *min = bank_pfn_start(&mi->bank[0]);
+ for_each_bank (i, mi)
+ if (mi->bank[i].highmem)
+ break;
+ *max_low = bank_pfn_end(&mi->bank[i - 1]);
+ *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -319,20 +307,10 @@ static void arm_memory_present(void)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
- memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -371,7 +349,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
}
@@ -403,8 +381,6 @@ void __init bootmem_init(void)
*/
arm_bootmem_free(min, max_low, max_high);
- high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
-
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bdb248c4f55..80632e8d753 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,12 +36,6 @@
#include <asm/mach/map.h>
#include "mm.h"
-/*
- * Used by ioremap() and iounmap() code to mark (super)section-mapped
- * I/O regions in vm_struct->flags field.
- */
-#define VM_ARM_SECTION_MAPPING 0x80000000
-
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
@@ -64,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm)
} while (seq != init_mm.context.kvm_seq);
}
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
@@ -79,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmdp;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmdp = pmd_offset(pud, addr);
do {
- pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
+ pmd_t pmd = *pmdp;
- pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
@@ -104,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
- addr += PGDIR_SIZE;
- pgd++;
+ addr += PMD_SIZE;
+ pmdp += 2;
} while (addr < end);
/*
@@ -124,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
/*
* Remove and free any PTE-based mapping, and
@@ -132,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
unmap_area_sections(virt, size);
pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
do {
- pmd_t *pmd = pmd_offset(pgd, addr);
-
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
flush_pmd_entry(pmd);
- addr += PGDIR_SIZE;
- pgd++;
+ addr += PMD_SIZE;
+ pmd += 2;
} while (addr < end);
return 0;
@@ -154,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
/*
* Remove and free any PTE-based mapping, and
@@ -162,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
unmap_area_sections(virt, size);
pgd = pgd_offset_k(virt);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
@@ -170,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
for (i = 0; i < 8; i++) {
- pmd_t *pmd = pmd_offset(pgd, addr);
-
pmd[0] = __pmd(super_pmd_val);
pmd[1] = __pmd(super_pmd_val);
flush_pmd_entry(pmd);
- addr += PGDIR_SIZE;
- pgd++;
+ addr += PMD_SIZE;
+ pmd += 2;
}
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
@@ -195,17 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long addr;
struct vm_struct * area;
+#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
-
- /*
- * Don't allow RAM to be mapped - this causes problems with ARMv6+
- */
- if (WARN_ON(pfn_valid(pfn)))
- return NULL;
+#endif
type = get_mem_type(mtype);
if (!type)
@@ -216,12 +213,40 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
*/
size = PAGE_ALIGN(offset + size);
+ /*
+ * Try to reuse one of the static mapping whenever possible.
+ */
+ read_lock(&vmlist_lock);
+ for (area = vmlist; area; area = area->next) {
+ if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
+ break;
+ if (!(area->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+ continue;
+ if (__phys_to_pfn(area->phys_addr) > pfn ||
+ __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
+ continue;
+ /* we can drop the lock here as we know *area is static */
+ read_unlock(&vmlist_lock);
+ addr = (unsigned long)area->addr;
+ addr += __pfn_to_phys(pfn) - area->phys_addr;
+ return (void __iomem *) (offset + addr);
+ }
+ read_unlock(&vmlist_lock);
+
+ /*
+ * Don't allow RAM to be mapped - this causes problems with ARMv6+
+ */
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
+
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
-#ifndef CONFIG_SMP
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
@@ -313,28 +338,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-#ifndef CONFIG_SMP
- struct vm_struct **p, *tmp;
+ struct vm_struct *vm;
- /*
- * If this is a section based mapping we need to handle it
- * specially as the VM subsystem does not know how to handle
- * such a beast. We need the lock here b/c we need to clear
- * all the mappings before the area can be reclaimed
- * by someone else.
- */
- write_lock(&vmlist_lock);
- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
- if (tmp->flags & VM_ARM_SECTION_MAPPING) {
- unmap_area_sections((unsigned long)tmp->addr,
- tmp->size);
- }
+ read_lock(&vmlist_lock);
+ for (vm = vmlist; vm; vm = vm->next) {
+ if (vm->addr > addr)
+ break;
+ if (!(vm->flags & VM_IOREMAP))
+ continue;
+ /* If this is a static mapping we must leave it alone */
+ if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
+ (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
+ read_unlock(&vmlist_lock);
+ return;
+ }
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast.
+ */
+ if ((vm->addr == addr) &&
+ (vm->flags & VM_ARM_SECTION_MAPPING)) {
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
break;
}
- }
- write_unlock(&vmlist_lock);
#endif
+ }
+ read_unlock(&vmlist_lock);
vunmap(addr);
}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index ad7cce3bc43..70f6d3ea483 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+/*
+ * ARM specific vm_struct->flags bits.
+ */
+
+/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
+#define VM_ARM_SECTION_MAPPING 0x80000000
+
+/* permanent static mappings from iotable_init() */
+#define VM_ARM_STATIC_MAPPING 0x40000000
+
+/* mapping type (attributes) for permanent static mappings */
+#define VM_ARM_MTYPE(mt) ((mt) << 20)
+#define VM_ARM_MTYPE_MASK (0x1f << 20)
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 74be05f3e03..ce8cb1970d7 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -9,13 +9,51 @@
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
-#include <asm/cputype.h>
-#include <asm/system.h>
+#include <asm/cachetype.h>
+
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+ unsigned long pgoff)
+{
+ unsigned long base = addr & ~(SHMLBA-1);
+ unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
+
+ if (base + off <= addr)
+ return base + off;
+
+ return base - off;
+}
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+/* gap between mmap and stack */
+#define MIN_GAP (128*1024*1024UL)
+#define MAX_GAP ((TASK_SIZE)/6*5)
+
+static int mmap_is_legacy(void)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+
+ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ return 1;
+
+ return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_base(unsigned long rnd)
+{
+ unsigned long gap = rlimit(RLIMIT_STACK);
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+}
+
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
@@ -32,25 +70,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
- unsigned int cache_type;
- int do_align = 0, aliasing = 0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
/*
* We only need to do colour alignment if either the I or D
- * caches alias. This is indicated by bits 9 and 21 of the
- * cache type register.
+ * caches alias.
*/
- cache_type = read_cpuid_cachetype();
- if (cache_type != read_cpuid_id()) {
- aliasing = (cache_type | cache_type >> 12) & (1 << 11);
- if (aliasing)
- do_align = filp || flags & MAP_SHARED;
- }
-#else
-#define do_align 0
-#define aliasing 0
-#endif
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
@@ -79,13 +107,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > mm->cached_hole_size) {
start_addr = addr = mm->free_area_cache;
} else {
- start_addr = addr = TASK_UNMAPPED_BASE;
+ start_addr = addr = mm->mmap_base;
mm->cached_hole_size = 0;
}
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
- addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
full_search:
if (do_align)
@@ -122,6 +146,134 @@ full_search:
}
}
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
+
+ /*
+ * We only need to do colour alignment if either the I or D
+ * caches alias.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ /* check if free_area_cache is useful for us */
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
+ /* either no address requested or can't fit in requested address hole */
+ addr = mm->free_area_cache;
+ if (do_align) {
+ unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
+ addr = base + len;
+ }
+
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+ if (!vma || addr <= vma->vm_start)
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+
+ if (mm->mmap_base < len)
+ goto bottomup;
+
+ addr = mm->mmap_base - len;
+ if (do_align)
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+ if (!vma || addr+len <= vma->vm_start)
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+ /* remember the largest hole we saw so far */
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start - len;
+ if (do_align)
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ } while (len < vma->vm_start);
+
+bottomup:
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->cached_hole_size = ~0UL;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = 0UL;
+
+ /* 8 bits of randomness in 20 address space bits */
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE))
+ random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+}
/*
* You really shouldn't be using read() or write() on /dev/mem. This
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cb..94c5a0c94f5 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
+#include <linux/vmalloc.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -150,6 +151,7 @@ static int __init early_nowrite(char *__unused)
}
early_param("nowb", early_nowrite);
+#ifndef CONFIG_ARM_LPAE
static int __init early_ecc(char *p)
{
if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@ static int __init early_ecc(char *p)
return 0;
}
early_param("ecc", early_ecc);
+#endif
static int __init noalign_setup(char *__unused)
{
@@ -228,10 +231,12 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+#ifndef CONFIG_ARM_LPAE
[MT_MINICLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
+#endif
[MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+#ifndef CONFIG_ARM_LPAE
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
@@ -436,6 +442,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#endif
if (is_smp()) {
/*
@@ -474,6 +481,18 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Do not generate access flag faults for the kernel mappings.
+ */
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+ mem_types[i].prot_pte |= PTE_EXT_AF;
+ mem_types[i].prot_sect |= PMD_SECT_AF;
+ }
+ kern_pgprot |= PTE_EXT_AF;
+ vecs_pgprot |= PTE_EXT_AF;
+#endif
+
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
@@ -529,13 +548,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
-static void __init *early_alloc(unsigned long sz)
+static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
{
- void *ptr = __va(memblock_alloc(sz, sz));
+ void *ptr = __va(memblock_alloc(sz, align));
memset(ptr, 0, sz);
return ptr;
}
+static void __init *early_alloc(unsigned long sz)
+{
+ return early_alloc_aligned(sz, sz);
+}
+
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
@@ -572,8 +596,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
+#ifndef CONFIG_ARM_LPAE
if (addr & SECTION_SIZE)
pmd++;
+#endif
do {
*pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +629,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
+#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct map_desc *md,
const struct mem_type *type)
{
@@ -662,6 +689,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
} while (addr != end);
}
+#endif /* !CONFIG_ARM_LPAE */
/*
* Create the page directory entries and any necessary
@@ -685,14 +713,16 @@ static void __init create_mapping(struct map_desc *md)
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
+ md->virtual >= PAGE_OFFSET &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
- " at 0x%08lx overlaps vmalloc space\n",
+ " at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
+#ifndef CONFIG_ARM_LPAE
/*
* Catch 36-bit addresses
*/
@@ -700,6 +730,7 @@ static void __init create_mapping(struct map_desc *md)
create_36bit_mapping(md, type);
return;
}
+#endif
addr = md->virtual & PAGE_MASK;
phys = __pfn_to_phys(md->pfn);
@@ -729,18 +760,33 @@ static void __init create_mapping(struct map_desc *md)
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
{
- int i;
+ struct map_desc *md;
+ struct vm_struct *vm;
+
+ if (!nr)
+ return;
- for (i = 0; i < nr; i++)
- create_mapping(io_desc + i);
+ vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+
+ for (md = io_desc; nr; md++, nr--) {
+ create_mapping(md);
+ vm->addr = (void *)(md->virtual & PAGE_MASK);
+ vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(md->pfn);
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags |= VM_ARM_MTYPE(md->type);
+ vm->caller = iotable_init;
+ vm_area_add_early(vm++);
+ }
}
-static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+static void * __initdata vmalloc_min =
+ (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
- * area - the default is 128m.
+ * area - the default is 240m.
*/
static int __init early_vmalloc(char *arg)
{
@@ -775,6 +821,9 @@ void __init sanity_check_meminfo(void)
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
+ if (bank->start > ULONG_MAX)
+ highmem = 1;
+
#ifdef CONFIG_HIGHMEM
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
@@ -786,7 +835,7 @@ void __init sanity_check_meminfo(void)
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
*/
- if (__va(bank->start) < vmalloc_min &&
+ if (!highmem && __va(bank->start) < vmalloc_min &&
bank->size > vmalloc_min - __va(bank->start)) {
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -807,6 +856,17 @@ void __init sanity_check_meminfo(void)
bank->highmem = highmem;
/*
+ * Highmem banks not allowed with !CONFIG_HIGHMEM.
+ */
+ if (highmem) {
+ printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
+ "(!CONFIG_HIGHMEM).\n",
+ (unsigned long long)bank->start,
+ (unsigned long long)bank->start + bank->size - 1);
+ continue;
+ }
+
+ /*
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
@@ -860,6 +920,7 @@ void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
+ high_memory = __va(lowmem_limit - 1) + 1;
memblock_set_current_limit(lowmem_limit);
}
@@ -890,14 +951,20 @@ static inline void prepare_page_table(void)
/*
* Clear out all the kernel space mappings, except for the first
- * memory bank, up to the end of the vmalloc region.
+ * memory bank, up to the vmalloc region.
*/
for (addr = __phys_to_virt(end);
- addr < VMALLOC_END; addr += PMD_SIZE)
+ addr < VMALLOC_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
+#ifdef CONFIG_ARM_LPAE
+/* the first page is reserved for pgd */
+#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
+ PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
+#else
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+#endif
/*
* Reserve the special regions of memory
@@ -920,8 +987,8 @@ void __init arm_mm_memblock_reserve(void)
}
/*
- * Set up device the mappings. Since we clear out the page tables for all
- * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * Set up the device mappings. Since we clear out the page tables for all
+ * mappings above VMALLOC_START, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +1003,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
vectors_page = early_alloc(PAGE_SIZE);
- for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
+ for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 941a98c9e8a..4fc6794cca4 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
void __init sanity_check_meminfo(void)
{
+ phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
+ high_memory = __va(end - 1) + 1;
}
/*
@@ -43,7 +45,7 @@ void __init paging_init(struct machine_desc *mdesc)
/*
* We don't need to do anything here for nommu machines.
*/
-void setup_mm_for_reboot(char mode)
+void setup_mm_for_reboot(void)
{
}
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index b2027c154b2..a3e78ccabd6 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
+#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -17,6 +18,14 @@
#include "mm.h"
+#ifdef CONFIG_ARM_LPAE
+#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
+#define __pgd_free(pgd) kfree(pgd)
+#else
+#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
+#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
+#endif
+
/*
* need to get a 16k page for level 1
*/
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
- new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
+ new_pgd = __pgd_alloc();
if (!new_pgd)
goto no_pgd;
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Allocate PMD table for modules and pkmap mappings.
+ */
+ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
+ MODULES_VADDR);
+ if (!new_pud)
+ goto no_pud;
+
+ new_pmd = pmd_alloc(mm, new_pud, 0);
+ if (!new_pmd)
+ goto no_pmd;
+#endif
+
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
- * contains the machine vectors.
+ * contains the machine vectors. The vectors are always high
+ * with LPAE.
*/
new_pud = pud_alloc(mm, new_pgd, 0);
if (!new_pud)
@@ -74,7 +98,7 @@ no_pte:
no_pmd:
pud_free(mm, new_pud);
no_pud:
- free_pages((unsigned long)new_pgd, 2);
+ __pgd_free(new_pgd);
no_pgd:
return NULL;
}
@@ -111,5 +135,24 @@ no_pud:
pgd_clear(pgd);
pud_free(mm, pud);
no_pgd:
- free_pages((unsigned long) pgd_base, 2);
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Free modules/pkmap or identity pmd tables.
+ */
+ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ if (pgd_val(*pgd) & L_PGD_SWAPPER)
+ continue;
+ pud = pud_offset(pgd, 0);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ pmd = pmd_offset(pud, 0);
+ pud_clear(pud);
+ pmd_free(mm, pmd);
+ pgd_clear(pgd);
+ pud_free(mm, pud);
+ }
+#endif
+ __pgd_free(pgd_base);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 67469665d47..234951345eb 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm1020_reset)
+ .popsection
/*
* cpu_arm1020_do_idle()
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4251421c0ed..c244b06caac 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm1020e_reset)
+ .popsection
/*
* cpu_arm1020e_do_idle()
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index d283cf3d06e..38fe22efd18 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm1022_reset)
+ .popsection
/*
* cpu_arm1022_do_idle()
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 678a1ceafed..3eb9c3c26c7 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm1026_reset)
+ .popsection
/*
* cpu_arm1026_do_idle()
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index e5b974cddac..4fbeb5b8e6c 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm6_reset)
ENTRY(cpu_arm7_reset)
mov r1, #0
@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
mov r1, #0x30
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
+ENDPROC(cpu_arm6_reset)
+ENDPROC(cpu_arm7_reset)
+ .popsection
__CPUINIT
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 55f4e290665..0ac908c7ade 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm720_reset)
+ .popsection
__CPUINIT
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 4506be3adda..dc5de5d53f2 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm740_reset)
+ .popsection
__CPUINIT
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7e0e1fe4ed4..6ddea3e464b 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm7tdmi_reset)
mov pc, r0
+ENDPROC(cpu_arm7tdmi_reset)
+ .popsection
__CPUINIT
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 88fb3d9e064..cb941ae95f6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm920_reset)
+ .popsection
/*
* cpu_arm920_do_idle()
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 490e1883385..4ec0e074dd5 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm922_reset)
+ .popsection
/*
* cpu_arm922_do_idle()
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 51d494be057..9dccd9a365b 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
+ENDPROC(cpu_arm925_reset)
+ .popsection
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 9f8fd91f918..820259b81a1 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm926_reset)
+ .popsection
/*
* cpu_arm926_do_idle()
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index ac750d50615..9fdc0a17097 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm940_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm940_reset)
+ .popsection
/*
* cpu_arm940_do_idle()
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 683af3a182b..f684cfedcca 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm946_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_arm946_reset)
+ .popsection
/*
* cpu_arm946_do_idle()
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 2120f9e2af7..8881391dfb9 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_arm9tdmi_reset)
mov pc, r0
+ENDPROC(cpu_arm9tdmi_reset)
+ .popsection
__CPUINIT
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 4c7a5710472..272558a133a 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
* loc: location to jump to for soft reset
*/
.align 4
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
nop
nop
mov pc, r0
+ENDPROC(cpu_fa526_reset)
+ .popsection
/*
* cpu_fa526_do_idle()
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 8a6c2f78c1c..ba3c500584a 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_feroceon_reset)
+ .popsection
/*
* cpu_feroceon_do_idle()
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 307a4def8d3..2d8ff3ad86d 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -91,8 +91,9 @@
#if L_PTE_SHARED != PTE_EXT_SHARED
#error PTE shared bit mismatch
#endif
-#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
- L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+#if !defined (CONFIG_ARM_LPAE) && \
+ (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
+ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index db52b0fb14a..cdfedc5b8ad 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
* (same as arm926)
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_mohawk_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_mohawk_reset)
+ .popsection
/*
* cpu_mohawk_do_idle()
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index d50ada26edd..775d70fba93 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_sa110_reset)
+ .popsection
/*
* cpu_sa110_do_idle(type)
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7d91545d089..3aa0da11fd8 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
+ENDPROC(cpu_sa1100_reset)
+ .popsection
/*
* cpu_sa1100_do_idle(type)
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d061d2fa550..5900cd520e8 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
* - loc - location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB
mov pc, r0
+ENDPROC(cpu_v6_reset)
+ .popsection
/*
* cpu_v6_do_idle()
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
new file mode 100644
index 00000000000..3a4b3e7b888
--- /dev/null
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -0,0 +1,171 @@
+/*
+ * arch/arm/mm/proc-v7-2level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define TTB_S (1 << 1)
+#define TTB_RGN_NC (0 << 3)
+#define TTB_RGN_OC_WBWA (1 << 3)
+#define TTB_RGN_OC_WT (2 << 3)
+#define TTB_RGN_OC_WB (3 << 3)
+#define TTB_NOS (1 << 5)
+#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
+#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
+#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
+#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP PMD_SECT_WB
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
+
+/*
+ * cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys
+ *
+ * - pgd_phys - physical address of new TTB
+ *
+ * It is assumed that:
+ * - we are not using split page tables
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+ mov r2, #0
+ ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
+#ifdef CONFIG_ARM_ERRATA_430973
+ mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
+#ifdef CONFIG_ARM_ERRATA_754322
+ dsb
+#endif
+ mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
+ isb
+1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+ isb
+#ifdef CONFIG_ARM_ERRATA_754322
+ dsb
+#endif
+ mcr p15, 0, r1, c13, c0, 1 @ set context ID
+ isb
+#endif
+ mov pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ *
+ * - ptep - pointer to level 2 translation table entry
+ * (hardware version is stored at +2048 bytes)
+ * - pte - PTE value to store
+ * - ext - value for extended PTE bits
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+ str r1, [r0] @ linux version
+
+ bic r3, r1, #0x000003f0
+ bic r3, r3, #PTE_TYPE_MASK
+ orr r3, r3, r2
+ orr r3, r3, #PTE_EXT_AP0 | 2
+
+ tst r1, #1 << 4
+ orrne r3, r3, #PTE_EXT_TEX(1)
+
+ eor r1, r1, #L_PTE_DIRTY
+ tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
+ orrne r3, r3, #PTE_EXT_APX
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
+ tstne r3, #PTE_EXT_APX
+ bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
+
+ ARM( str r3, [r0, #2048]! )
+ THUMB( add r0, r0, #2048 )
+ THUMB( str r3, [r0] )
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+#endif
+ mov pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+ /*
+ * Memory region attributes with SCTLR.TRE=1
+ *
+ * n = TEX[0],C,B
+ * TR = PRRR[2n+1:2n] - memory type
+ * IR = NMRR[2n+1:2n] - inner cacheable property
+ * OR = NMRR[2n+17:2n+16] - outer cacheable property
+ *
+ * n TR IR OR
+ * UNCACHED 000 00
+ * BUFFERABLE 001 10 00 00
+ * WRITETHROUGH 010 10 10 10
+ * WRITEBACK 011 10 11 11
+ * reserved 110
+ * WRITEALLOC 111 10 01 01
+ * DEV_SHARED 100 01
+ * DEV_NONSHARED 100 01
+ * DEV_WC 001 10
+ * DEV_CACHED 011 10
+ *
+ * Other attributes:
+ *
+ * DS0 = PRRR[16] = 0 - device shareable property
+ * DS1 = PRRR[17] = 1 - device shareable property
+ * NS0 = PRRR[18] = 0 - normal shareable property
+ * NS1 = PRRR[19] = 1 - normal shareable property
+ * NOS = PRRR[24+n] = 1 - not outer shareable
+ */
+.equ PRRR, 0xff0a81a8
+.equ NMRR, 0x40e040e0
+
+ /*
+ * Macro for setting up the TTBRx and TTBCR registers.
+ * - \ttb0 and \ttb1 updated with the corresponding flags.
+ */
+ .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+ mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
+ ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
+ ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
+ ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
+ ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
+ mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
+ .endm
+
+ __CPUINIT
+
+ /* AT
+ * TFR EV X F I D LR S
+ * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
+ * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+ * 1 0 110 0011 1100 .111 1101 < we want
+ */
+ .align 2
+ .type v7_crval, #object
+v7_crval:
+ crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
+
+ .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
new file mode 100644
index 00000000000..8de0f1dd154
--- /dev/null
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -0,0 +1,150 @@
+/*
+ * arch/arm/mm/proc-v7-3level.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2011 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ * based on arch/arm/mm/proc-v7-2level.S
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define TTB_IRGN_NC (0 << 8)
+#define TTB_IRGN_WBWA (1 << 8)
+#define TTB_IRGN_WT (2 << 8)
+#define TTB_IRGN_WB (3 << 8)
+#define TTB_RGN_NC (0 << 10)
+#define TTB_RGN_OC_WBWA (1 << 10)
+#define TTB_RGN_OC_WT (2 << 10)
+#define TTB_RGN_OC_WB (3 << 10)
+#define TTB_S (3 << 12)
+#define TTB_EAE (1 << 31)
+
+/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
+#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB)
+#define PMD_FLAGS_UP (PMD_SECT_WB)
+
+/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
+#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
+#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
+
+/*
+ * cpu_v7_switch_mm(pgd_phys, tsk)
+ *
+ * Set the translation table base pointer to be pgd_phys (physical address of
+ * the new TTB).
+ */
+ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
+ ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+ and r3, r1, #0xff
+ mov r3, r3, lsl #(48 - 32) @ ASID
+ mcrr p15, 0, r0, r3, c2 @ set TTB 0
+ isb
+#endif
+ mov pc, lr
+ENDPROC(cpu_v7_switch_mm)
+
+/*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ * - ptep - pointer to level 3 translation table entry
+ * - pte - PTE value to store (64-bit in r2 and r3)
+ */
+ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
+ tst r2, #L_PTE_PRESENT
+ beq 1f
+ tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
+ orreq r2, #L_PTE_RDONLY
+1: strd r2, r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+#endif
+ mov pc, lr
+ENDPROC(cpu_v7_set_pte_ext)
+
+ /*
+ * Memory region attributes for LPAE (defined in pgtable-3level.h):
+ *
+ * n = AttrIndx[2:0]
+ *
+ * n MAIR
+ * UNCACHED 000 00000000
+ * BUFFERABLE 001 01000100
+ * DEV_WC 001 01000100
+ * WRITETHROUGH 010 10101010
+ * WRITEBACK 011 11101110
+ * DEV_CACHED 011 11101110
+ * DEV_SHARED 100 00000100
+ * DEV_NONSHARED 100 00000100
+ * unused 101
+ * unused 110
+ * WRITEALLOC 111 11111111
+ */
+.equ PRRR, 0xeeaa4400 @ MAIR0
+.equ NMRR, 0xff000004 @ MAIR1
+
+ /*
+ * Macro for setting up the TTBRx and TTBCR registers.
+ * - \ttbr1 updated.
+ */
+ .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+ ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
+ cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below)
+ mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
+ orr \tmp, \tmp, #TTB_EAE
+ ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
+ ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
+ ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
+ ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
+ /*
+ * TTBR0/TTBR1 split (PAGE_OFFSET):
+ * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+ * 0x80000000: T0SZ = 0, T1SZ = 1
+ * 0xc0000000: T0SZ = 0, T1SZ = 2
+ *
+ * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+ * booting secondary CPUs would end up using TTBR1 for the identity
+ * mapping set up in TTBR0.
+ */
+ bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET?
+ orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
+#if defined CONFIG_VMSPLIT_2G
+ /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
+ add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries
+#elif defined CONFIG_VMSPLIT_3G
+ /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
+ add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd
+#endif
+ /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
+9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register
+ mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
+ .endm
+
+ __CPUINIT
+
+ /*
+ * AT
+ * TFR EV X F IHD LR S
+ * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
+ * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+ * 11 0 110 1 0011 1100 .111 1101 < we want
+ */
+ .align 2
+ .type v7_crval, #object
+v7_crval:
+ crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
+
+ .previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c559ac3814..7e9b5bf910c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -19,24 +19,11 @@
#include "proc-macros.S"
-#define TTB_S (1 << 1)
-#define TTB_RGN_NC (0 << 3)
-#define TTB_RGN_OC_WBWA (1 << 3)
-#define TTB_RGN_OC_WT (2 << 3)
-#define TTB_RGN_OC_WB (3 << 3)
-#define TTB_NOS (1 << 5)
-#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
-#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
-#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
-#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
-
-/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS_UP PMD_SECT_WB
-
-/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
+#ifdef CONFIG_ARM_LPAE
+#include "proc-v7-3level.S"
+#else
+#include "proc-v7-2level.S"
+#endif
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -63,6 +50,7 @@ ENDPROC(cpu_v7_proc_fin)
* caches disabled.
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@@ -71,6 +59,7 @@ ENTRY(cpu_v7_reset)
isb
mov pc, r0
ENDPROC(cpu_v7_reset)
+ .popsection
/*
* cpu_v7_do_idle()
@@ -97,127 +86,12 @@ ENTRY(cpu_v7_dcache_clean_area)
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
-/*
- * cpu_v7_switch_mm(pgd_phys, tsk)
- *
- * Set the translation table base pointer to be pgd_phys
- *
- * - pgd_phys - physical address of new TTB
- *
- * It is assumed that:
- * - we are not using split page tables
- */
-ENTRY(cpu_v7_switch_mm)
-#ifdef CONFIG_MMU
- mov r2, #0
- ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
- ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
-#ifdef CONFIG_ARM_ERRATA_430973
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
- mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
- isb
-1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
- isb
-#ifdef CONFIG_ARM_ERRATA_754322
- dsb
-#endif
- mcr p15, 0, r1, c13, c0, 1 @ set context ID
- isb
-#endif
- mov pc, lr
-ENDPROC(cpu_v7_switch_mm)
-
-/*
- * cpu_v7_set_pte_ext(ptep, pte)
- *
- * Set a level 2 translation table entry.
- *
- * - ptep - pointer to level 2 translation table entry
- * (hardware version is stored at +2048 bytes)
- * - pte - PTE value to store
- * - ext - value for extended PTE bits
- */
-ENTRY(cpu_v7_set_pte_ext)
-#ifdef CONFIG_MMU
- str r1, [r0] @ linux version
-
- bic r3, r1, #0x000003f0
- bic r3, r3, #PTE_TYPE_MASK
- orr r3, r3, r2
- orr r3, r3, #PTE_EXT_AP0 | 2
-
- tst r1, #1 << 4
- orrne r3, r3, #PTE_EXT_TEX(1)
-
- eor r1, r1, #L_PTE_DIRTY
- tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
- orrne r3, r3, #PTE_EXT_APX
-
- tst r1, #L_PTE_USER
- orrne r3, r3, #PTE_EXT_AP1
-#ifdef CONFIG_CPU_USE_DOMAINS
- @ allow kernel read/write access to read-only user pages
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-#endif
-
- tst r1, #L_PTE_XN
- orrne r3, r3, #PTE_EXT_XN
-
- tst r1, #L_PTE_YOUNG
- tstne r1, #L_PTE_PRESENT
- moveq r3, #0
-
- ARM( str r3, [r0, #2048]! )
- THUMB( add r0, r0, #2048 )
- THUMB( str r3, [r0] )
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
-#endif
- mov pc, lr
-ENDPROC(cpu_v7_set_pte_ext)
-
string cpu_v7_name, "ARMv7 Processor"
.align
- /*
- * Memory region attributes with SCTLR.TRE=1
- *
- * n = TEX[0],C,B
- * TR = PRRR[2n+1:2n] - memory type
- * IR = NMRR[2n+1:2n] - inner cacheable property
- * OR = NMRR[2n+17:2n+16] - outer cacheable property
- *
- * n TR IR OR
- * UNCACHED 000 00
- * BUFFERABLE 001 10 00 00
- * WRITETHROUGH 010 10 10 10
- * WRITEBACK 011 10 11 11
- * reserved 110
- * WRITEALLOC 111 10 01 01
- * DEV_SHARED 100 01
- * DEV_NONSHARED 100 01
- * DEV_WC 001 10
- * DEV_CACHED 011 10
- *
- * Other attributes:
- *
- * DS0 = PRRR[16] = 0 - device shareable property
- * DS1 = PRRR[17] = 1 - device shareable property
- * NS0 = PRRR[18] = 0 - normal shareable property
- * NS1 = PRRR[19] = 1 - normal shareable property
- * NOS = PRRR[24+n] = 1 - not outer shareable
- */
-.equ PRRR, 0xff0a81a8
-.equ NMRR, 0x40e040e0
-
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
-.equ cpu_v7_suspend_size, 4 * 7
+.equ cpu_v7_suspend_size, 4 * 8
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr}
@@ -226,10 +100,11 @@ ENTRY(cpu_v7_do_suspend)
stmia r0!, {r4 - r5}
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 1 @ TTB 1
+ mrc p15, 0, r11, c2, c0, 2 @ TTB control register
mrc p15, 0, r8, c1, c0, 0 @ Control register
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
- stmia r0, {r6 - r10}
+ stmia r0, {r6 - r11}
ldmfd sp!, {r4 - r10, pc}
ENDPROC(cpu_v7_do_suspend)
@@ -241,13 +116,15 @@ ENTRY(cpu_v7_do_resume)
ldmia r0!, {r4 - r5}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
- ldmia r0, {r6 - r10}
+ ldmia r0, {r6 - r11}
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
+#ifndef CONFIG_ARM_LPAE
ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
+#endif
mcr p15, 0, r1, c2, c0, 0 @ TTB 0
mcr p15, 0, r7, c2, c0, 1 @ TTB 1
- mcr p15, 0, ip, c2, c0, 2 @ TTB control register
+ mcr p15, 0, r11, c2, c0, 2 @ TTB control register
mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
teq r4, r9 @ Is it already set?
mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
@@ -284,6 +161,7 @@ __v7_ca5mp_setup:
__v7_ca9mp_setup:
mov r10, #(1 << 0) @ TLB ops broadcasting
b 1f
+__v7_ca7mp_setup:
__v7_ca15mp_setup:
mov r10, #0
1:
@@ -363,11 +241,13 @@ __v7_setup:
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
-#ifdef CONFIG_ARM_ERRATA_751472
- cmp r6, #0x30 @ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+ ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
+ ALT_UP_B(1f)
mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrlt r10, r10, #1 << 11 @ set bit #11
mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
+1:
#endif
3: mov r10, #0
@@ -377,12 +257,7 @@ __v7_setup:
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
- mcr p15, 0, r10, c2, c0, 2 @ TTB control register
- ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
- ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
- ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
- ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
- mcr p15, 0, r8, c2, c0, 1 @ load TTB1
+ v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
ldr r5, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
@@ -404,16 +279,7 @@ __v7_setup:
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
- /* AT
- * TFR EV X F I D LR S
- * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
- * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
- * 1 0 110 0011 1100 .111 1101 < we want
- */
- .type v7_crval, #object
-v7_crval:
- crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
-
+ .align 2
__v7_setup_stack:
.space 4 * 11 @ 11 registers
@@ -435,11 +301,11 @@ __v7_setup_stack:
*/
.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
- PMD_FLAGS_SMP | \mm_mmuflags)
+ PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
- PMD_FLAGS_UP | \mm_mmuflags)
- .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \io_mmuflags
+ PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
+ .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
W(b) \initfunc
.long cpu_arch_name
.long cpu_elf_name
@@ -452,6 +318,7 @@ __v7_setup_stack:
.long v7_cache_fns
.endm
+#ifndef CONFIG_ARM_LPAE
/*
* ARM Ltd. Cortex A5 processor.
*/
@@ -463,6 +330,16 @@ __v7_ca5mp_proc_info:
.size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
/*
+ * ARM Ltd. Cortex A7 processor.
+ */
+ .type __v7_ca7mp_proc_info, #object
+__v7_ca7mp_proc_info:
+ .long 0x410fc070
+ .long 0xff0ffff0
+ __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
+
+ /*
* ARM Ltd. Cortex A9 processor.
*/
.type __v7_ca9mp_proc_info, #object
@@ -471,6 +348,7 @@ __v7_ca9mp_proc_info:
.long 0xff0ffff0
__v7_proc __v7_ca9mp_setup
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+#endif /* CONFIG_ARM_LPAE */
/*
* ARM Ltd. Cortex A15 processor.
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index abf0507a08a..b0d57869da2 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
mov pc, r0
+ENDPROC(cpu_xsc3_reset)
+ .popsection
/*
* cpu_xsc3_do_idle()
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 3277904beba..4ffebaa595e 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
* Beware PXA270 erratum E7.
*/
.align 5
+ .pushsection .idmap.text, "ax"
ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, r0
+ENDPROC(cpu_xscale_reset)
+ .popsection
/*
* cpu_xscale_do_idle()
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index cafa1835433..d18dde95b8a 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -20,6 +20,8 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <asm/opcodes.h>
+
/* This is the kernel's entry point into the floating point emulator.
It is called from the kernel with code similar to this:
@@ -81,11 +83,11 @@ nwfpe_enter:
mov r6, r0 @ save the opcode
emulate:
ldr r1, [sp, #S_PSR] @ fetch the PSR
- bl checkCondition @ check the condition
- cmp r0, #0 @ r0 = 0 ==> condition failed
+ bl arm_check_condition @ check the condition
+ cmp r0, #ARM_OPCODE_CONDTEST_PASS @ condition passed?
@ if condition code failed to match, next insn
- beq next @ get the next instruction;
+ bne next @ get the next instruction;
mov r0, r6 @ prepare for EmulateAll()
bl EmulateAll @ emulate the instruction
diff --git a/arch/arm/nwfpe/fpopcode.c b/arch/arm/nwfpe/fpopcode.c
index 922b8110758..ff983467308 100644
--- a/arch/arm/nwfpe/fpopcode.c
+++ b/arch/arm/nwfpe/fpopcode.c
@@ -61,29 +61,3 @@ const float32 float32Constant[] = {
0x41200000 /* single 10.0 */
};
-/* condition code lookup table
- index into the table is test code: EQ, NE, ... LT, GT, AL, NV
- bit position in short is condition code: NZCV */
-static const unsigned short aCC[16] = {
- 0xF0F0, // EQ == Z set
- 0x0F0F, // NE
- 0xCCCC, // CS == C set
- 0x3333, // CC
- 0xFF00, // MI == N set
- 0x00FF, // PL
- 0xAAAA, // VS == V set
- 0x5555, // VC
- 0x0C0C, // HI == C set && Z clear
- 0xF3F3, // LS == C clear || Z set
- 0xAA55, // GE == (N==V)
- 0x55AA, // LT == (N!=V)
- 0x0A05, // GT == (!Z && (N==V))
- 0xF5FA, // LE == (Z || (N!=V))
- 0xFFFF, // AL always
- 0 // NV
-};
-
-unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes)
-{
- return (aCC[opcode >> 28] >> (ccodes >> 28)) & 1;
-}
diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h
index 786e4c96156..78f02dbfaa8 100644
--- a/arch/arm/nwfpe/fpopcode.h
+++ b/arch/arm/nwfpe/fpopcode.h
@@ -475,9 +475,6 @@ static inline unsigned int getDestinationSize(const unsigned int opcode)
return (nRc);
}
-extern unsigned int checkCondition(const unsigned int opcode,
- const unsigned int ccodes);
-
extern const float64 float64Constant[];
extern const float32 float32Constant[];
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index c074e66ad22..4e0a371630b 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile
index 69b09c1cec8..a99dc15a70f 100644
--- a/arch/arm/plat-iop/Makefile
+++ b/arch/arm/plat-iop/Makefile
@@ -10,10 +10,10 @@ obj-$(CONFIG_ARCH_IOP32X) += i2c.o
obj-$(CONFIG_ARCH_IOP32X) += pci.o
obj-$(CONFIG_ARCH_IOP32X) += setup.o
obj-$(CONFIG_ARCH_IOP32X) += time.o
-obj-$(CONFIG_ARCH_IOP32X) += io.o
obj-$(CONFIG_ARCH_IOP32X) += cp6.o
obj-$(CONFIG_ARCH_IOP32X) += adma.o
obj-$(CONFIG_ARCH_IOP32X) += pmu.o
+obj-$(CONFIG_ARCH_IOP32X) += restart.o
# IOP33X
obj-$(CONFIG_ARCH_IOP33X) += gpio.o
@@ -21,10 +21,10 @@ obj-$(CONFIG_ARCH_IOP33X) += i2c.o
obj-$(CONFIG_ARCH_IOP33X) += pci.o
obj-$(CONFIG_ARCH_IOP33X) += setup.o
obj-$(CONFIG_ARCH_IOP33X) += time.o
-obj-$(CONFIG_ARCH_IOP33X) += io.o
obj-$(CONFIG_ARCH_IOP33X) += cp6.o
obj-$(CONFIG_ARCH_IOP33X) += adma.o
obj-$(CONFIG_ARCH_IOP33X) += pmu.o
+obj-$(CONFIG_ARCH_IOP33X) += restart.o
# IOP13XX
obj-$(CONFIG_ARCH_IOP13XX) += cp6.o
diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c
deleted file mode 100644
index e15bc17db90..00000000000
--- a/arch/arm/plat-iop/io.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * iop3xx custom ioremap implementation
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-
-void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
- unsigned int mtype)
-{
- void __iomem * retval;
-
- switch (cookie) {
- case IOP3XX_PCI_LOWER_IO_PA ... IOP3XX_PCI_UPPER_IO_PA:
- retval = (void *) IOP3XX_PCI_IO_PHYS_TO_VIRT(cookie);
- break;
- case IOP3XX_PERIPHERAL_PHYS_BASE ... IOP3XX_PERIPHERAL_UPPER_PA:
- retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
- break;
- default:
- retval = __arm_ioremap_caller(cookie, size, mtype,
- __builtin_return_address(0));
- }
-
- return retval;
-}
-EXPORT_SYMBOL(__iop3xx_ioremap);
-
-void __iop3xx_iounmap(void __iomem *addr)
-{
- extern void __iounmap(volatile void __iomem *addr);
-
- switch ((u32) addr) {
- case IOP3XX_PCI_LOWER_IO_VA ... IOP3XX_PCI_UPPER_IO_VA:
- case IOP3XX_PERIPHERAL_VIRT_BASE ... IOP3XX_PERIPHERAL_UPPER_VA:
- goto skip;
- }
- __iounmap(addr);
-
-skip:
- return;
-}
-EXPORT_SYMBOL(__iop3xx_iounmap);
diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c
new file mode 100644
index 00000000000..6a85a0c502e
--- /dev/null
+++ b/arch/arm/plat-iop/restart.c
@@ -0,0 +1,19 @@
+/*
+ * restart.c
+ *
+ * Copyright (C) 2001 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <asm/hardware/iop3xx.h>
+#include <mach/hardware.h>
+
+void iop3xx_restart(char mode, const char *cmd)
+{
+ *IOP3XX_PCSR = 0x30;
+
+ /* Jump into ROM at address 0 */
+ soft_restart(0);
+}
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 568dd0223d1..cbfbbe46178 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -18,7 +18,6 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/timex.h>
-#include <linux/sched.h>
#include <linux/io.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -52,21 +51,12 @@ static struct clocksource iop_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static DEFINE_CLOCK_DATA(cd);
-
/*
* IOP sched_clock() implementation via its clocksource.
*/
-unsigned long long notrace sched_clock(void)
+static u32 notrace iop_read_sched_clock(void)
{
- u32 cyc = 0xffffffffu - read_tcr1();
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace iop_update_sched_clock(void)
-{
- u32 cyc = 0xffffffffu - read_tcr1();
- update_sched_clock(&cd, cyc, (u32)~0);
+ return 0xffffffffu - read_tcr1();
}
/*
@@ -152,7 +142,7 @@ void __init iop_init_time(unsigned long tick_rate)
{
u32 timer_ctl;
- init_sched_clock(&cd, iop_update_sched_clock, 32, tick_rate);
+ setup_sched_clock(iop_read_sched_clock, 32, tick_rate);
ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
iop_tick_rate = tick_rate;
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index b3a1f2b3ada..b30708e28c1 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -20,6 +20,7 @@ config ARCH_IMX_V6_V7
bool "i.MX3, i.MX6"
select AUTO_ZRELADDR if !ZBOOT_ROM
select ARM_PATCH_PHYS_VIRT
+ select MIGHT_HAVE_CACHE_L2X0
help
This enables support for systems based on the Freescale i.MX3 and i.MX6
family.
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index b9f0f5f499a..076db84f3e3 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -5,7 +5,6 @@
# Common support
obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
-obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_MXC_TZIC) += tzic.o
obj-$(CONFIG_MXC_AVIC) += avic.o
diff --git a/arch/arm/plat-mxc/cpufreq.c b/arch/arm/plat-mxc/cpufreq.c
index 74aac96cda2..73db34bf588 100644
--- a/arch/arm/plat-mxc/cpufreq.c
+++ b/arch/arm/plat-mxc/cpufreq.c
@@ -17,6 +17,7 @@
* the CPU clock speed on the fly.
*/
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -97,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy,
return ret;
}
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
int i;
diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
deleted file mode 100644
index 12f8f810901..00000000000
--- a/arch/arm/plat-mxc/gic.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/io.h>
-#include <asm/exception.h>
-#include <asm/localtimer.h>
-#include <asm/hardware/gic.h>
-#ifdef CONFIG_SMP
-#include <asm/smp.h>
-#endif
-
-asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
-{
- u32 irqstat, irqnr;
-
- do {
- irqstat = readl_relaxed(gic_cpu_base_addr + GIC_CPU_INTACK);
- irqnr = irqstat & 0x3ff;
- if (irqnr == 1023)
- break;
-
- if (irqnr > 15 && irqnr < 1021)
- handle_IRQ(irqnr, regs);
-#ifdef CONFIG_SMP
- else {
- writel_relaxed(irqstat, gic_cpu_base_addr +
- GIC_CPU_EOI);
- handle_IPI(irqnr, regs);
- }
-#endif
- } while (1);
-}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 83b745a5e1b..1bf0df81bdc 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -71,8 +71,8 @@ extern int mx6q_clocks_init(void);
extern struct platform_device *mxc_register_gpio(char *name, int id,
resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
extern void mxc_set_cpu_type(unsigned int type);
+extern void mxc_restart(char, const char *);
extern void mxc_arch_reset_init(void __iomem *);
-extern void mx51_efikamx_reset(void);
extern int mx53_revision(void);
extern int mx53_display_revision(void);
@@ -85,12 +85,10 @@ enum mxc_cpu_pwr_mode {
};
extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
-extern void (*imx_idle)(void);
extern void imx_print_silicon_rev(const char *cpu, int srev);
void avic_handle_irq(struct pt_regs *);
void tzic_handle_irq(struct pt_regs *);
-void gic_handle_irq(struct pt_regs *);
#define imx1_handle_irq avic_handle_irq
#define imx21_handle_irq avic_handle_irq
@@ -123,6 +121,7 @@ static inline void imx_smp_prepare(void) {}
extern void imx_enable_cpu(int cpu, bool enable);
extern void imx_set_cpu_jump(int cpu, void *jump_addr);
extern void imx_src_init(void);
+extern void imx_src_prepare_restart(void);
extern void imx_gpc_init(void);
extern void imx_gpc_pre_suspend(void);
extern void imx_gpc_post_resume(void);
@@ -132,5 +131,12 @@ extern void imx53_evk_common_init(void);
extern void imx53_qsb_common_init(void);
extern void imx53_smd_common_init(void);
extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
+extern void imx6q_clock_map_io(void);
+
+#ifdef CONFIG_PM
extern void imx6q_pm_init(void);
+#else
+static inline void imx6q_pm_init(void) {}
+#endif
+
#endif
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index ca5cf26a04b..def5d30cb67 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -9,19 +9,8 @@
* published by the Free Software Foundation.
*/
-/* Unused, we use CONFIG_MULTI_IRQ_HANDLER */
-
.macro disable_fiq
.endm
- .macro get_irqnr_preamble, base, tmp
- .endm
-
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- .endm
-
- .macro test_for_ipi, irqnr, irqstat, base, tmp
- .endm
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx25.h b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
index bf64e1e594e..f0726d48df2 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx25.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
@@ -265,16 +265,20 @@
#define MX25_PAD_CSI_D2__CSI_D2 IOMUX_PAD(0x318, 0x120, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D2__UART5_RXD_MUX IOMUX_PAD(0x318, 0x120, 0x11, 0x578, 1, NO_PAD_CTRL)
#define MX25_PAD_CSI_D2__GPIO_1_27 IOMUX_PAD(0x318, 0x120, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D2__CSPI3_MOSI IOMUX_PAD(0x318, 0x120, 0x17, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D3__CSI_D3 IOMUX_PAD(0x31c, 0x124, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D3__GPIO_1_28 IOMUX_PAD(0x31c, 0x124, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D3__CSPI3_MISO IOMUX_PAD(0x31c, 0x124, 0x17, 0x4b4, 1, NO_PAD_CTRL)
#define MX25_PAD_CSI_D4__CSI_D4 IOMUX_PAD(0x320, 0x128, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D4__UART5_RTS IOMUX_PAD(0x320, 0x128, 0x11, 0x574, 1, NO_PAD_CTRL)
#define MX25_PAD_CSI_D4__GPIO_1_29 IOMUX_PAD(0x320, 0x128, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D4__CSPI3_SCLK IOMUX_PAD(0x320, 0x128, 0x17, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D5__CSI_D5 IOMUX_PAD(0x324, 0x12c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D5__GPIO_1_30 IOMUX_PAD(0x324, 0x12c, 0x15, 0, 0, NO_PAD_CTRL)
+#define MX25_PAD_CSI_D5__CSPI3_RDY IOMUX_PAD(0x324, 0x12c, 0x17, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D6__CSI_D6 IOMUX_PAD(0x328, 0x130, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CSI_D6__GPIO_1_31 IOMUX_PAD(0x328, 0x130, 0x15, 0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
index 97b19e7800b..2b7c08d13e8 100644
--- a/arch/arm/plat-mxc/include/mach/mx1.h
+++ b/arch/arm/plat-mxc/include/mach/mx1.h
@@ -12,8 +12,6 @@
#ifndef __MACH_MX1_H__
#define __MACH_MX1_H__
-#include <mach/vmalloc.h>
-
/*
* Memory map
*/
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 00a78193c68..d78298366a9 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -50,20 +50,6 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
-#define IMX_CHIP_REVISION_1_0_STRING "1.0"
-#define IMX_CHIP_REVISION_1_1_STRING "1.1"
-#define IMX_CHIP_REVISION_1_2_STRING "1.2"
-#define IMX_CHIP_REVISION_1_3_STRING "1.3"
-#define IMX_CHIP_REVISION_2_0_STRING "2.0"
-#define IMX_CHIP_REVISION_2_1_STRING "2.1"
-#define IMX_CHIP_REVISION_2_2_STRING "2.2"
-#define IMX_CHIP_REVISION_2_3_STRING "2.3"
-#define IMX_CHIP_REVISION_3_0_STRING "3.0"
-#define IMX_CHIP_REVISION_3_1_STRING "3.1"
-#define IMX_CHIP_REVISION_3_2_STRING "3.2"
-#define IMX_CHIP_REVISION_3_3_STRING "3.3"
-#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
-
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
#endif
@@ -182,7 +168,7 @@ struct cpu_op {
u32 cpu_rate;
};
-int tzic_enable_wake(int is_idle);
+int tzic_enable_wake(void);
extern struct cpu_op *(*get_cpu_op)(int *op);
#endif
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index cf88b3593fb..13ad0df2e86 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -17,16 +17,9 @@
#ifndef __ASM_ARCH_MXC_SYSTEM_H__
#define __ASM_ARCH_MXC_SYSTEM_H__
-extern void (*imx_idle)(void);
-
static inline void arch_idle(void)
{
- if (imx_idle != NULL)
- (imx_idle)();
- else
- cpu_do_idle();
+ cpu_do_idle();
}
-void arch_reset(char mode, const char *cmd);
-
#endif /* __ASM_ARCH_MXC_SYSTEM_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 88fd4045256..477971b0093 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
case MACH_TYPE_PCM043:
case MACH_TYPE_LILLY1131:
case MACH_TYPE_VPR200:
+ case MACH_TYPE_EUKREA_CPUIMX35SD:
uart_base = MX3X_UART1_BASE_ADDR;
break;
case MACH_TYPE_MAGX_ZN5:
diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h
deleted file mode 100644
index ef6379c474b..00000000000
--- a/arch/arm/plat-mxc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2000 Russell King.
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MXC_VMALLOC_H__
-#define __ASM_ARCH_MXC_VMALLOC_H__
-
-/* vmalloc ending address */
-#define VMALLOC_END 0xf4000000UL
-
-#endif /* __ASM_ARCH_MXC_VMALLOC_H__ */
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 42d74ea5908..e032717f7d0 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN (1 << 24)
+#define MX3_PWMCR_WAITEN (1 << 23)
+#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
do_div(c, period_ns);
duty_cycles = c;
+ /*
+ * according to imx pwm RM, the real period value should be
+ * PERIOD value in PWMPR plus 2.
+ */
+ if (period_cycles > 2)
+ period_cycles -= 2;
+ else
+ period_cycles = 0;
+
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER(prescale) |
+ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index 9dad8dcc2ea..3599bf2cfd4 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/common.h>
@@ -28,25 +29,18 @@
#include <asm/system.h>
#include <asm/mach-types.h>
-void (*imx_idle)(void) = NULL;
void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL;
+EXPORT_SYMBOL_GPL(imx_ioremap);
static void __iomem *wdog_base;
/*
* Reset the system. It is called by machine_restart().
*/
-void arch_reset(char mode, const char *cmd)
+void mxc_restart(char mode, const char *cmd)
{
unsigned int wcr_enable;
-#ifdef CONFIG_MACH_MX51_EFIKAMX
- if (machine_is_mx51_efikamx()) {
- mx51_efikamx_reset();
- return;
- }
-#endif
-
if (cpu_is_mx1()) {
wcr_enable = (1 << 0);
} else {
@@ -70,7 +64,7 @@ void arch_reset(char mode, const char *cmd)
mdelay(50);
/* we'll take a jump through zero as a poor second */
- cpu_reset(0);
+ soft_restart(0);
}
void mxc_arch_reset_init(void __iomem *base)
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 4b0fe285e83..1c96cdb4c35 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -108,18 +108,9 @@ static void gpt_irq_acknowledge(void)
static void __iomem *sched_clock_reg;
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace mxc_read_sched_clock(void)
{
- cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
-
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace mxc_update_sched_clock(void)
-{
- cycle_t cyc = sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
- update_sched_clock(&cd, cyc, (u32)~0);
+ return sched_clock_reg ? __raw_readl(sched_clock_reg) : 0;
}
static int __init mxc_clocksource_init(struct clk *timer_clk)
@@ -129,7 +120,7 @@ static int __init mxc_clocksource_init(struct clk *timer_clk)
sched_clock_reg = reg;
- init_sched_clock(&cd, mxc_update_sched_clock, 32, c);
+ setup_sched_clock(mxc_read_sched_clock, 32, c);
return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
clocksource_mmio_readl_up);
}
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index a3c164c7ba8..98308ec1f32 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -73,7 +73,28 @@ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
#define tzic_set_irq_fiq NULL
#endif
-static unsigned int *wakeup_intr[4];
+#ifdef CONFIG_PM
+static void tzic_irq_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ int idx = gc->irq_base >> 5;
+
+ __raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
+}
+
+static void tzic_irq_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ int idx = gc->irq_base >> 5;
+
+ __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)),
+ tzic_base + TZIC_WAKEUP0(idx));
+}
+
+#else
+#define tzic_irq_suspend NULL
+#define tzic_irq_resume NULL
+#endif
static struct mxc_extra_irq tzic_extra_irq = {
#ifdef CONFIG_FIQ
@@ -91,12 +112,13 @@ static __init void tzic_init_gc(unsigned int irq_start)
handle_level_irq);
gc->private = &tzic_extra_irq;
gc->wake_enabled = IRQ_MSK(32);
- wakeup_intr[idx] = &gc->wake_active;
ct = gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
+ ct->chip.irq_suspend = tzic_irq_suspend;
+ ct->chip.irq_resume = tzic_irq_resume;
ct->regs.disable = TZIC_ENCLEAR0(idx);
ct->regs.enable = TZIC_ENSET0(idx);
@@ -167,23 +189,19 @@ void __init tzic_init_irq(void __iomem *irqbase)
/**
* tzic_enable_wake() - enable wakeup interrupt
*
- * @param is_idle 1 if called in idle loop (ENSET0 register);
- * 0 to be used when called from low power entry
* @return 0 if successful; non-zero otherwise
*/
-int tzic_enable_wake(int is_idle)
+int tzic_enable_wake(void)
{
- unsigned int i, v;
+ unsigned int i;
__raw_writel(1, tzic_base + TZIC_DSMINT);
if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0))
return -EAGAIN;
- for (i = 0; i < 4; i++) {
- v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
- *wakeup_intr[i];
- __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
- }
+ for (i = 0; i < 4; i++)
+ __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)),
+ tzic_base + TZIC_WAKEUP0(i));
return 0;
}
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index 30b6433d910..ad1b45b605a 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -17,7 +17,6 @@
#include <linux/clk.h>
#include <linux/jiffies.h>
#include <linux/err.h>
-#include <linux/sched.h>
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
@@ -79,23 +78,12 @@ void __iomem *mtu_base; /* Assigned by machine code */
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace nomadik_read_sched_clock(void)
{
- u32 cyc;
-
if (unlikely(!mtu_base))
return 0;
- cyc = -readl(mtu_base + MTU_VAL(0));
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void notrace nomadik_update_sched_clock(void)
-{
- u32 cyc = -readl(mtu_base + MTU_VAL(0));
- update_sched_clock(&cd, cyc, (u32)~0);
+ return -readl(mtu_base + MTU_VAL(0));
}
#endif
@@ -231,9 +219,11 @@ void __init nmdk_timer_init(void)
rate, 200, 32, clocksource_mmio_readl_down))
pr_err("timer: failed to initialize clock source %s\n",
"mtu_0");
+
#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
- init_sched_clock(&cd, nomadik_update_sched_clock, 32, rate);
+ setup_sched_clock(nomadik_read_sched_clock, 32, rate);
#endif
+
/* Timer 1 is used for events */
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 985262242f2..9a584614e7e 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -4,7 +4,7 @@
# Common support
obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
- usb.o fb.o io.o counter_32k.o
+ usb.o fb.o counter_32k.o
obj-m :=
obj-n :=
obj- :=
@@ -19,7 +19,6 @@ obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
-obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index d9f10a31e60..06383b51e65 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
#include <linux/omapfb.h>
#include <plat/common.h>
@@ -21,6 +22,8 @@
#include <plat/vram.h>
#include <plat/dsp.h>
+#include <plat/omap-secure.h>
+
#define NO_LENGTH_CHECK 0xffffffff
@@ -65,4 +68,12 @@ void __init omap_reserve(void)
omapfb_reserve_sdram_memblock();
omap_vram_reserve_sdram_memblock();
omap_dsp_reserve_sdram_memblock();
+ omap_secure_ram_reserve_memblock();
+}
+
+void __init omap_init_consistent_dma_size(void)
+{
+#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
+ init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
+#endif
}
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index a6cbb712da5..5f0f2292b7f 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -17,7 +17,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/sched.h>
#include <linux/clocksource.h>
#include <asm/sched_clock.h>
@@ -37,41 +36,9 @@ static void __iomem *timer_32k_base;
#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
-/*
- * Returns current time from boot in nsecs. It's OK for this to wrap
- * around for now, as it's just a relative time stamp.
- */
-static DEFINE_CLOCK_DATA(cd);
-
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 32768, NSEC_PER_SEC, 60).
- * This gives a resolution of about 30us and a wrap period of about 36hrs.
- */
-#define SC_MULT 4000000000u
-#define SC_SHIFT 17
-
-static inline unsigned long long notrace _omap_32k_sched_clock(void)
-{
- u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
- return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
-}
-
-#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
-unsigned long long notrace sched_clock(void)
-{
- return _omap_32k_sched_clock();
-}
-#else
-unsigned long long notrace omap_32k_sched_clock(void)
-{
- return _omap_32k_sched_clock();
-}
-#endif
-
-static void notrace omap_update_sched_clock(void)
+static u32 notrace omap_32k_read_sched_clock(void)
{
- u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
- update_sched_clock(&cd, cyc, (u32)~0);
+ return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
}
/**
@@ -147,8 +114,7 @@ int __init omap_init_clocksource_32k(void)
clocksource_mmio_readl_up))
printk(err, "32k_counter");
- init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
- 32768, SC_MULT, SC_SHIFT);
+ setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
}
return 0;
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c22217c2ee5..002fb4d96bb 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1034,6 +1034,18 @@ dma_addr_t omap_get_dma_src_pos(int lch)
if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
offset = p->dma_read(CSAC, lch);
+ if (!cpu_is_omap15xx()) {
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel has
+ * not been started (no data has been transferred so far).
+ * Return the programmed source start address in this case.
+ */
+ if (likely(p->dma_read(CDAC, lch)))
+ offset = p->dma_read(CSAC, lch);
+ else
+ offset = p->dma_read(CSSA, lch);
+ }
+
if (cpu_class_is_omap1())
offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
@@ -1062,8 +1074,16 @@ dma_addr_t omap_get_dma_dst_pos(int lch)
* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
* read before the DMA controller finished disabling the channel.
*/
- if (!cpu_is_omap15xx() && offset == 0)
+ if (!cpu_is_omap15xx() && offset == 0) {
offset = p->dma_read(CDAC, lch);
+ /*
+ * CDAC == 0 indicates that the DMA transfer on the channel has
+ * not been started (no data has been transferred so far).
+ * Return the programmed destination start address in this case.
+ */
+ if (unlikely(!offset))
+ offset = p->dma_read(CDSA, lch);
+ }
if (cpu_class_is_omap1())
offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
diff --git a/arch/arm/plat-omap/include/plat/am33xx.h b/arch/arm/plat-omap/include/plat/am33xx.h
new file mode 100644
index 00000000000..06c19bb7bca
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/am33xx.h
@@ -0,0 +1,25 @@
+/*
+ * This file contains the address info for various AM33XX modules.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc. - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_AM33XX_H
+#define __ASM_ARCH_AM33XX_H
+
+#define L4_SLOW_AM33XX_BASE 0x48000000
+
+#define AM33XX_SCM_BASE 0x44E10000
+#define AM33XX_CTRL_BASE AM33XX_SCM_BASE
+#define AM33XX_PRCM_BASE 0x44E00000
+
+#endif /* __ASM_ARCH_AM33XX_H */
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index 387a9638991..b299b8d201c 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -40,6 +40,7 @@ struct omap_clk {
#define CK_443X (1 << 11)
#define CK_TI816X (1 << 12)
#define CK_446X (1 << 13)
+#define CK_1710 (1 << 15) /* 1710 extra for rate selection */
#define CK_34XX (CK_3430ES1 | CK_3430ES2PLUS)
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 197ca03c3f7..240a7b9fd94 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -59,6 +59,8 @@ struct clkops {
#define RATE_IN_4430 (1 << 5)
#define RATE_IN_TI816X (1 << 6)
#define RATE_IN_4460 (1 << 7)
+#define RATE_IN_AM33XX (1 << 8)
+#define RATE_IN_TI814X (1 << 9)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
#define RATE_IN_34XX (RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
@@ -84,7 +86,7 @@ struct clkops {
struct clksel_rate {
u32 val;
u8 div;
- u8 flags;
+ u16 flags;
};
/**
@@ -165,8 +167,8 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
- u8 flags;
# endif
+ u8 flags;
};
#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index c50df4814f6..b4d7ec3fbfb 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -27,94 +27,14 @@
#ifndef __ARCH_ARM_MACH_OMAP_COMMON_H
#define __ARCH_ARM_MACH_OMAP_COMMON_H
-#include <linux/delay.h>
-
#include <plat/i2c.h>
+#include <plat/omap_hwmod.h>
-struct sys_timer;
-
-extern void omap_map_common_io(void);
-extern struct sys_timer omap1_timer;
-extern struct sys_timer omap2_timer;
-extern struct sys_timer omap3_timer;
-extern struct sys_timer omap3_secure_timer;
-extern struct sys_timer omap4_timer;
-extern bool omap_32k_timer_init(void);
extern int __init omap_init_clocksource_32k(void);
-extern unsigned long long notrace omap_32k_sched_clock(void);
extern void omap_reserve(void);
-
-void omap2420_init_early(void);
-void omap2430_init_early(void);
-void omap3430_init_early(void);
-void omap35xx_init_early(void);
-void omap3630_init_early(void);
-void omap3_init_early(void); /* Do not use this one */
-void am35xx_init_early(void);
-void ti816x_init_early(void);
-void omap4430_init_early(void);
+extern int omap_dss_reset(struct omap_hwmod *);
void omap_sram_init(void);
-/*
- * IO bases for various OMAP processors
- * Except the tap base, rest all the io bases
- * listed are physical addresses.
- */
-struct omap_globals {
- u32 class; /* OMAP class to detect */
- void __iomem *tap; /* Control module ID code */
- void __iomem *sdrc; /* SDRAM Controller */
- void __iomem *sms; /* SDRAM Memory Scheduler */
- void __iomem *ctrl; /* System Control Module */
- void __iomem *ctrl_pad; /* PAD Control Module */
- void __iomem *prm; /* Power and Reset Management */
- void __iomem *cm; /* Clock Management */
- void __iomem *cm2;
-};
-
-void omap2_set_globals_242x(void);
-void omap2_set_globals_243x(void);
-void omap2_set_globals_3xxx(void);
-void omap2_set_globals_443x(void);
-void omap2_set_globals_ti816x(void);
-
-/* These get called from omap2_set_globals_xxxx(), do not call these */
-void omap2_set_globals_tap(struct omap_globals *);
-void omap2_set_globals_sdrc(struct omap_globals *);
-void omap2_set_globals_control(struct omap_globals *);
-void omap2_set_globals_prcm(struct omap_globals *);
-
-void omap242x_map_io(void);
-void omap243x_map_io(void);
-void omap3_map_io(void);
-void omap4_map_io(void);
-
-
-/**
- * omap_test_timeout - busy-loop, testing a condition
- * @cond: condition to test until it evaluates to true
- * @timeout: maximum number of microseconds in the timeout
- * @index: loop index (integer)
- *
- * Loop waiting for @cond to become true or until at least @timeout
- * microseconds have passed. To use, define some integer @index in the
- * calling code. After running, if @index == @timeout, then the loop has
- * timed out.
- */
-#define omap_test_timeout(cond, timeout, index) \
-({ \
- for (index = 0; index < timeout; index++) { \
- if (cond) \
- break; \
- udelay(1); \
- } \
-})
-
-extern struct device *omap2_get_mpuss_device(void);
-extern struct device *omap2_get_iva_device(void);
-extern struct device *omap2_get_l3_device(void);
-extern struct device *omap4_get_dsp_device(void);
-
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 408a12f7920..6b51086fce1 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -69,6 +69,7 @@ unsigned int omap_rev(void);
* cpu_is_omap343x(): True for OMAP3430
* cpu_is_omap443x(): True for OMAP4430
* cpu_is_omap446x(): True for OMAP4460
+ * cpu_is_omap447x(): True for OMAP4470
*/
#define GET_OMAP_CLASS (omap_rev() & 0xff)
@@ -78,6 +79,22 @@ static inline int is_omap ##class (void) \
return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
}
+#define GET_AM_CLASS ((omap_rev() >> 24) & 0xff)
+
+#define IS_AM_CLASS(class, id) \
+static inline int is_am ##class (void) \
+{ \
+ return (GET_AM_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_TI_CLASS ((omap_rev() >> 24) & 0xff)
+
+#define IS_TI_CLASS(class, id) \
+static inline int is_ti ##class (void) \
+{ \
+ return (GET_TI_CLASS == (id)) ? 1 : 0; \
+}
+
#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
#define IS_OMAP_SUBCLASS(subclass, id) \
@@ -92,12 +109,21 @@ static inline int is_ti ##subclass (void) \
return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
}
+#define IS_AM_SUBCLASS(subclass, id) \
+static inline int is_am ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
IS_OMAP_CLASS(7xx, 0x07)
IS_OMAP_CLASS(15xx, 0x15)
IS_OMAP_CLASS(16xx, 0x16)
IS_OMAP_CLASS(24xx, 0x24)
IS_OMAP_CLASS(34xx, 0x34)
IS_OMAP_CLASS(44xx, 0x44)
+IS_AM_CLASS(33xx, 0x33)
+
+IS_TI_CLASS(81xx, 0x81)
IS_OMAP_SUBCLASS(242x, 0x242)
IS_OMAP_SUBCLASS(243x, 0x243)
@@ -105,8 +131,11 @@ IS_OMAP_SUBCLASS(343x, 0x343)
IS_OMAP_SUBCLASS(363x, 0x363)
IS_OMAP_SUBCLASS(443x, 0x443)
IS_OMAP_SUBCLASS(446x, 0x446)
+IS_OMAP_SUBCLASS(447x, 0x447)
IS_TI_SUBCLASS(816x, 0x816)
+IS_TI_SUBCLASS(814x, 0x814)
+IS_AM_SUBCLASS(335x, 0x335)
#define cpu_is_omap7xx() 0
#define cpu_is_omap15xx() 0
@@ -116,10 +145,15 @@ IS_TI_SUBCLASS(816x, 0x816)
#define cpu_is_omap243x() 0
#define cpu_is_omap34xx() 0
#define cpu_is_omap343x() 0
+#define cpu_is_ti81xx() 0
#define cpu_is_ti816x() 0
+#define cpu_is_ti814x() 0
+#define cpu_is_am33xx() 0
+#define cpu_is_am335x() 0
#define cpu_is_omap44xx() 0
#define cpu_is_omap443x() 0
#define cpu_is_omap446x() 0
+#define cpu_is_omap447x() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
@@ -322,7 +356,11 @@ IS_OMAP_TYPE(3517, 0x3517)
# undef cpu_is_omap3530
# undef cpu_is_omap3505
# undef cpu_is_omap3517
+# undef cpu_is_ti81xx
# undef cpu_is_ti816x
+# undef cpu_is_ti814x
+# undef cpu_is_am33xx
+# undef cpu_is_am335x
# define cpu_is_omap3430() is_omap3430()
# define cpu_is_omap3503() (cpu_is_omap3430() && \
(!omap3_has_iva()) && \
@@ -339,16 +377,22 @@ IS_OMAP_TYPE(3517, 0x3517)
!omap3_has_sgx())
# undef cpu_is_omap3630
# define cpu_is_omap3630() is_omap363x()
+# define cpu_is_ti81xx() is_ti81xx()
# define cpu_is_ti816x() is_ti816x()
+# define cpu_is_ti814x() is_ti814x()
+# define cpu_is_am33xx() is_am33xx()
+# define cpu_is_am335x() is_am335x()
#endif
# if defined(CONFIG_ARCH_OMAP4)
# undef cpu_is_omap44xx
# undef cpu_is_omap443x
# undef cpu_is_omap446x
+# undef cpu_is_omap447x
# define cpu_is_omap44xx() is_omap44xx()
# define cpu_is_omap443x() is_omap443x()
# define cpu_is_omap446x() is_omap446x()
+# define cpu_is_omap447x() is_omap447x()
# endif
/* Macros to detect if we have OMAP1 or OMAP2 */
@@ -386,15 +430,27 @@ IS_OMAP_TYPE(3517, 0x3517)
#define TI8168_REV_ES1_0 TI816X_CLASS
#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8))
+#define TI814X_CLASS 0x81400034
+#define TI8148_REV_ES1_0 TI814X_CLASS
+#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
+#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
+
+#define AM335X_CLASS 0x33500034
+#define AM335X_REV_ES1_0 AM335X_CLASS
+
#define OMAP443X_CLASS 0x44300044
#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
#define OMAP4430_REV_ES2_0 (OMAP443X_CLASS | (0x20 << 8))
#define OMAP4430_REV_ES2_1 (OMAP443X_CLASS | (0x21 << 8))
#define OMAP4430_REV_ES2_2 (OMAP443X_CLASS | (0x22 << 8))
+#define OMAP4430_REV_ES2_3 (OMAP443X_CLASS | (0x23 << 8))
#define OMAP446X_CLASS 0x44600044
#define OMAP4460_REV_ES1_0 (OMAP446X_CLASS | (0x10 << 8))
+#define OMAP447X_CLASS 0x44700044
+#define OMAP4470_REV_ES1_0 (OMAP447X_CLASS | (0x10 << 8))
+
void omap2_check_revision(void);
/*
diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
index e87efe1499b..e897978371c 100644
--- a/arch/arm/plat-omap/include/plat/hardware.h
+++ b/arch/arm/plat-omap/include/plat/hardware.h
@@ -286,6 +286,7 @@
#include <plat/omap24xx.h>
#include <plat/omap34xx.h>
#include <plat/omap44xx.h>
-#include <plat/ti816x.h>
+#include <plat/ti81xx.h>
+#include <plat/am33xx.h>
#endif /* __ASM_ARCH_OMAP_HARDWARE_H */
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index 7f2969eadb8..0696bae1818 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -73,6 +73,9 @@
#define OMAP4_L3_IO_OFFSET 0xb4000000
#define OMAP4_L3_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_IO_OFFSET) /* L3 */
+#define AM33XX_L4_WK_IO_OFFSET 0xb5000000
+#define AM33XX_L4_WK_IO_ADDRESS(pa) IOMEM((pa) + AM33XX_L4_WK_IO_OFFSET)
+
#define OMAP4_L3_PER_IO_OFFSET 0xb1100000
#define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
@@ -154,6 +157,15 @@
#define L4_34XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
/*
+ * ----------------------------------------------------------------------------
+ * AM33XX specific IO mapping
+ * ----------------------------------------------------------------------------
+ */
+#define L4_WK_AM33XX_PHYS L4_WK_AM33XX_BASE
+#define L4_WK_AM33XX_VIRT (L4_WK_AM33XX_PHYS + AM33XX_L4_WK_IO_OFFSET)
+#define L4_WK_AM33XX_SIZE SZ_4M /* 1MB of 128MB used, want 1MB sect */
+
+/*
* Need to look at the Size 4M for L4.
* VPOM3430 was not working for Int controller
*/
@@ -247,8 +259,6 @@
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
-void omap_ioremap_init(void);
-
extern u8 omap_readb(u32 pa);
extern u16 omap_readw(u32 pa);
extern u32 omap_readl(u32 pa);
@@ -257,83 +267,9 @@ extern void omap_writew(u16 v, u32 pa);
extern void omap_writel(u32 v, u32 pa);
struct omap_sdrc_params;
-
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-void omap7xx_map_io(void);
-#else
-static inline void omap_map_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP15XX
-void omap15xx_map_io(void);
-#else
-static inline void omap15xx_map_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP16XX
-void omap16xx_map_io(void);
-#else
-static inline void omap16xx_map_io(void)
-{
-}
-#endif
-
-void omap1_init_early(void);
-
-#ifdef CONFIG_SOC_OMAP2420
-extern void omap242x_map_common_io(void);
-#else
-static inline void omap242x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_SOC_OMAP2430
-extern void omap243x_map_common_io(void);
-#else
-static inline void omap243x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-extern void omap34xx_map_common_io(void);
-#else
-static inline void omap34xx_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_SOC_OMAPTI816X
-extern void omapti816x_map_common_io(void);
-#else
-static inline void omapti816x_map_common_io(void)
-{
-}
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-extern void omap44xx_map_common_io(void);
-#else
-static inline void omap44xx_map_common_io(void)
-{
-}
-#endif
-
-extern void omap2_init_common_infrastructure(void);
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
-#define __arch_ioremap omap_ioremap
-#define __arch_iounmap omap_iounmap
-
-void __iomem *omap_ioremap(unsigned long phys, size_t size, unsigned int type);
-void omap_iounmap(volatile void __iomem *addr);
-
extern void __init omap_init_consistent_dma_size(void);
#endif
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index a1d79ee1925..88be3e628b3 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -111,6 +111,32 @@ struct iommu_platform_data {
u32 da_end;
};
+/**
+ * struct iommu_arch_data - omap iommu private data
+ * @name: name of the iommu device
+ * @iommu_dev: handle of the iommu device
+ *
+ * This is an omap iommu private data object, which binds an iommu user
+ * to its iommu device. This object should be placed at the iommu user's
+ * dev_archdata so generic IOMMU API can be used without having to
+ * utilize omap-specific plumbing anymore.
+ */
+struct omap_iommu_arch_data {
+ const char *name;
+ struct omap_iommu *iommu_dev;
+};
+
+/**
+ * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
+ * @dev: iommu client device
+ */
+static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+
+ return arch_data->iommu_dev;
+}
+
/* IOMMU errors */
#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
@@ -163,8 +189,8 @@ extern int omap_iommu_set_isr(const char *name,
void *priv),
void *isr_priv);
-extern void omap_iommu_save_ctx(struct omap_iommu *obj);
-extern void omap_iommu_restore_ctx(struct omap_iommu *obj);
+extern void omap_iommu_save_ctx(struct device *dev);
+extern void omap_iommu_restore_ctx(struct device *dev);
extern int omap_install_iommu_arch(const struct iommu_functions *ops);
extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
@@ -176,6 +202,5 @@ extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
-struct device *omap_find_iommu_device(const char *name);
#endif /* __MACH_IOMMU_H */
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index 6af1a91c0f3..498e57cda6c 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -72,18 +72,18 @@ struct iovm_struct {
#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
-extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da);
+extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);
extern u32
-omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
const struct sg_table *sgt, u32 flags);
extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
- struct omap_iommu *obj, u32 da);
+ struct device *dev, u32 da);
extern u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,
u32 da, size_t bytes, u32 flags);
extern void
-omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
const u32 da);
-extern void *omap_da_to_va(struct omap_iommu *obj, u32 da);
+extern void *omap_da_to_va(struct device *dev, u32 da);
#endif /* __IOMMU_MMAP_H */
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 30e10719b77..2efd6454bce 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -357,7 +357,7 @@
#define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69
#define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70
#define INT_35XX_USBOTG_IRQ 71
-#define INT_35XX_UART4 84
+#define INT_35XX_UART4_IRQ 84
#define INT_35XX_CCDC_VD0_IRQ 88
#define INT_35XX_CCDC_VD1_IRQ 92
#define INT_35XX_CCDC_VD2_IRQ 93
@@ -436,20 +436,6 @@
#define INTCPS_NR_MIR_REGS 3
#define INTCPS_NR_IRQS 96
-#ifndef __ASSEMBLY__
-extern void __iomem *omap_irq_base;
-void omap1_init_irq(void);
-void omap2_init_irq(void);
-void omap3_init_irq(void);
-void ti816x_init_irq(void);
-extern int omap_irq_pending(void);
-void omap_intc_save_context(void);
-void omap_intc_restore_context(void);
-void omap3_intc_suspend(void);
-void omap3_intc_prepare_idle(void);
-void omap3_intc_resume_idle(void);
-#endif
-
#include <mach/hardware.h>
#ifdef CONFIG_FIQ
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 94cf70afb23..f75946c3293 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -96,6 +96,7 @@ struct omap_mmc_platform_data {
*/
u8 wires; /* Used for the MMC driver on omap1 and 2420 */
u32 caps; /* Used for the MMC driver on 2430 and later */
+ u32 pm_caps; /* PM capabilities of the mmc */
/*
* nomux means "standard" muxing is wrong on this board, and
diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h
new file mode 100644
index 00000000000..64f9d1c7f1b
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap-secure.h
@@ -0,0 +1,13 @@
+#ifndef __OMAP_SECURE_H__
+#define __OMAP_SECURE_H__
+
+#include <linux/types.h>
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+extern int omap_secure_ram_reserve_memblock(void);
+#else
+static inline void omap_secure_ram_reserve_memblock(void)
+{ }
+#endif
+
+#endif /* __OMAP_SECURE_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
index 2682043f5a5..9ff444469f3 100644
--- a/arch/arm/plat-omap/include/plat/omap-serial.h
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -19,6 +19,7 @@
#include <linux/serial_core.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <plat/mux.h>
@@ -33,6 +34,8 @@
#define OMAP_MODE13X_SPEED 230400
+#define OMAP_UART_SCR_TX_EMPTY 0x08
+
/* WER = 0x7F
* Enable module level wakeup in WER reg
*/
@@ -51,18 +54,27 @@
#define OMAP_UART_DMA_CH_FREE -1
-#define RX_TIMEOUT (3 * HZ)
#define OMAP_MAX_HSUART_PORTS 4
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
+#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
+
struct omap_uart_port_info {
bool dma_enabled; /* To specify DMA Mode */
unsigned int uartclk; /* UART clock rate */
- void __iomem *membase; /* ioremap cookie or NULL */
- resource_size_t mapbase; /* resource base */
- unsigned long irqflags; /* request_irq flags */
upf_t flags; /* UPF_* flags */
+ u32 errata;
+ unsigned int dma_rx_buf_size;
+ unsigned int dma_rx_timeout;
+ unsigned int autosuspend_timeout;
+ unsigned int dma_rx_poll_rate;
+
+ int (*get_context_loss_count)(struct device *);
+ void (*set_forceidle)(struct platform_device *);
+ void (*set_noidle)(struct platform_device *);
+ void (*enable_wakeup)(struct platform_device *, bool);
};
struct uart_omap_dma {
@@ -86,8 +98,9 @@ struct uart_omap_dma {
spinlock_t rx_lock;
/* timer to poll activity on rx dma */
struct timer_list rx_timer;
- int rx_buf_size;
- int rx_timeout;
+ unsigned int rx_buf_size;
+ unsigned int rx_poll_rate;
+ unsigned int rx_timeout;
};
struct uart_omap_port {
@@ -100,6 +113,10 @@ struct uart_omap_port {
unsigned char mcr;
unsigned char fcr;
unsigned char efr;
+ unsigned char dll;
+ unsigned char dlh;
+ unsigned char mdr1;
+ unsigned char scr;
int use_dma;
/*
@@ -111,6 +128,14 @@ struct uart_omap_port {
unsigned char msr_saved_flags;
char name[20];
unsigned long port_activity;
+ u32 context_loss_cnt;
+ u32 errata;
+ u8 wakeups_enabled;
+
+ struct pm_qos_request pm_qos_request;
+ u32 latency;
+ u32 calc_latency;
+ struct work_struct qos_work;
};
#endif /* __OMAP_SERIAL_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
index b9e85886b9d..0d818acf391 100644
--- a/arch/arm/plat-omap/include/plat/omap34xx.h
+++ b/arch/arm/plat-omap/include/plat/omap34xx.h
@@ -35,6 +35,8 @@
#define L4_EMU_34XX_BASE 0x54000000
#define L3_34XX_BASE 0x68000000
+#define L4_WK_AM33XX_BASE 0x44C00000
+
#define OMAP3430_32KSYNCT_BASE 0x48320000
#define OMAP3430_CM_BASE 0x48004800
#define OMAP3430_PRM_BASE 0x48306800
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index ea2b8a6306e..c0d478e55c8 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -45,6 +45,7 @@
#define OMAP44XX_WKUPGEN_BASE 0x48281000
#define OMAP44XX_MCPDM_BASE 0x40132000
#define OMAP44XX_MCPDM_L3_BASE 0x49032000
+#define OMAP44XX_SAR_RAM_BASE 0x4a326000
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
#define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 8b372ede17c..647010109af 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -97,6 +97,7 @@ struct omap_hwmod_mux_info {
struct omap_device_pad *pads;
int nr_pads_dynamic;
struct omap_device_pad **pads_dynamic;
+ int *irqs;
bool enabled;
};
@@ -416,10 +417,13 @@ struct omap_hwmod_omap4_prcm {
* _HWMOD_NO_MPU_PORT: no path exists for the MPU to write to this module
* _HWMOD_WAKEUP_ENABLED: set when the omap_hwmod code has enabled ENAWAKEUP
* _HWMOD_SYSCONFIG_LOADED: set when the OCP_SYSCONFIG value has been cached
+ * _HWMOD_SKIP_ENABLE: set if hwmod enabled during init (HWMOD_INIT_NO_IDLE) -
+ * causes the first call to _enable() to only update the pinmux
*/
#define _HWMOD_NO_MPU_PORT (1 << 0)
#define _HWMOD_WAKEUP_ENABLED (1 << 1)
#define _HWMOD_SYSCONFIG_LOADED (1 << 2)
+#define _HWMOD_SKIP_ENABLE (1 << 3)
/*
* omap_hwmod._state definitions
@@ -604,6 +608,8 @@ int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
+int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx);
+
/*
* Chip variant-specific hwmod init routines - XXX should be converted
* to use initcalls once the initial boot ordering is straightened out
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 1ab9fd6abe6..198d1e6a4a6 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -2,7 +2,7 @@
* arch/arm/plat-omap/include/mach/serial.h
*
* Copyright (C) 2009 Texas Instruments
- * Addded OMAP4 support- Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Added OMAP4 support- Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -44,6 +44,7 @@
#define OMAP3_UART2_BASE OMAP2_UART2_BASE
#define OMAP3_UART3_BASE 0x49020000
#define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */
+#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */
/* OMAP4 serial ports */
#define OMAP4_UART1_BASE OMAP2_UART1_BASE
@@ -51,10 +52,10 @@
#define OMAP4_UART3_BASE 0x48020000
#define OMAP4_UART4_BASE 0x4806e000
-/* TI816X serial ports */
-#define TI816X_UART1_BASE 0x48020000
-#define TI816X_UART2_BASE 0x48022000
-#define TI816X_UART3_BASE 0x48024000
+/* TI81XX serial ports */
+#define TI81XX_UART1_BASE 0x48020000
+#define TI81XX_UART2_BASE 0x48022000
+#define TI81XX_UART3_BASE 0x48024000
/* AM3505/3517 UART4 */
#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
@@ -89,9 +90,9 @@
#define OMAP4UART2 OMAP2UART2
#define OMAP4UART3 43
#define OMAP4UART4 44
-#define TI816XUART1 81
-#define TI816XUART2 82
-#define TI816XUART3 83
+#define TI81XXUART1 81
+#define TI81XXUART2 82
+#define TI81XXUART3 83
#define ZOOM_UART 95 /* Only on zoom2/3 */
/* This is only used by 8250.c for omap1510 */
@@ -106,15 +107,13 @@
#ifndef __ASSEMBLER__
struct omap_board_data;
+struct omap_uart_port_info;
extern void omap_serial_init(void);
-extern void omap_serial_init_port(struct omap_board_data *bdata);
extern int omap_uart_can_sleep(void);
-extern void omap_uart_check_wakeup(void);
-extern void omap_uart_prepare_suspend(void);
-extern void omap_uart_prepare_idle(int num);
-extern void omap_uart_resume_idle(int num);
-extern void omap_uart_enable_irqs(int enable);
+extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
+extern void omap_serial_init_port(struct omap_board_data *bdata,
+ struct omap_uart_port_info *platform_data);
#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index f500fc34d06..75aa1b2bef5 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -95,6 +95,10 @@ static inline void omap_push_sram_idle(void) {}
*/
#define OMAP2_SRAM_PA 0x40200000
#define OMAP3_SRAM_PA 0x40200000
+#ifdef CONFIG_OMAP4_ERRATA_I688
+#define OMAP4_SRAM_PA 0x40304000
+#define OMAP4_SRAM_VA 0xfe404000
+#else
#define OMAP4_SRAM_PA 0x40300000
-
+#endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/system.h b/arch/arm/plat-omap/include/plat/system.h
index c5fa9e92900..8e5ebd74b12 100644
--- a/arch/arm/plat-omap/include/plat/system.h
+++ b/arch/arm/plat-omap/include/plat/system.h
@@ -12,6 +12,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-extern void (*arch_reset)(char, const char *);
-
#endif
diff --git a/arch/arm/plat-omap/include/plat/ti816x.h b/arch/arm/plat-omap/include/plat/ti81xx.h
index 50510f5dda1..8f9843f7842 100644
--- a/arch/arm/plat-omap/include/plat/ti816x.h
+++ b/arch/arm/plat-omap/include/plat/ti81xx.h
@@ -1,5 +1,5 @@
/*
- * This file contains the address data for various TI816X modules.
+ * This file contains the address data for various TI81XX modules.
*
* Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
*
@@ -13,15 +13,15 @@
* GNU General Public License for more details.
*/
-#ifndef __ASM_ARCH_TI816X_H
-#define __ASM_ARCH_TI816X_H
+#ifndef __ASM_ARCH_TI81XX_H
+#define __ASM_ARCH_TI81XX_H
-#define L4_SLOW_TI816X_BASE 0x48000000
+#define L4_SLOW_TI81XX_BASE 0x48000000
-#define TI816X_SCM_BASE 0x48140000
-#define TI816X_CTRL_BASE TI816X_SCM_BASE
-#define TI816X_PRCM_BASE 0x48180000
+#define TI81XX_SCM_BASE 0x48140000
+#define TI81XX_CTRL_BASE TI81XX_SCM_BASE
+#define TI81XX_PRCM_BASE 0x48180000
-#define TI816X_ARM_INTC_BASE 0x48200000
+#define TI81XX_ARM_INTC_BASE 0x48200000
-#endif /* __ASM_ARCH_TI816X_H */
+#endif /* __ASM_ARCH_TI81XX_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 2f472e989ec..6ee90495ca4 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -99,9 +99,9 @@ static inline void flush(void)
#define DEBUG_LL_ZOOM(mach) \
_DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
-#define DEBUG_LL_TI816X(p, mach) \
- _DEBUG_LL_ENTRY(mach, TI816X_UART##p##_BASE, OMAP_PORT_SHIFT, \
- TI816XUART##p)
+#define DEBUG_LL_TI81XX(p, mach) \
+ _DEBUG_LL_ENTRY(mach, TI81XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ TI81XXUART##p)
static inline void __arch_decomp_setup(unsigned long arch_id)
{
@@ -177,7 +177,10 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
DEBUG_LL_ZOOM(omap_zoom3);
/* TI8168 base boards using UART3 */
- DEBUG_LL_TI816X(3, ti8168evm);
+ DEBUG_LL_TI81XX(3, ti8168evm);
+
+ /* TI8148 base boards using UART1 */
+ DEBUG_LL_TI81XX(1, ti8148evm);
} while (0);
}
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 17d3c939775..dc864b580da 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -100,9 +100,6 @@ extern void usb_musb_init(struct omap_musb_board_data *board_data);
extern void usbhs_init(const struct usbhs_omap_board_data *pdata);
-extern int omap_usbhs_enable(struct device *dev);
-extern void omap_usbhs_disable(struct device *dev);
-
extern int omap4430_phy_power(struct device *dev, int ID, int on);
extern int omap4430_phy_set_clk(struct device *dev, int on);
extern int omap4430_phy_init(struct device *dev);
@@ -114,6 +111,7 @@ extern void am35x_musb_reset(void);
extern void am35x_musb_phy_power(u8 on);
extern void am35x_musb_clear_irq(void);
extern void am35x_set_mode(u8 musb_mode);
+extern void ti81xx_musb_phy_power(u8 on);
/*
* FIXME correct answer depends on hmc_mode,
@@ -273,6 +271,37 @@ static inline void omap2_usbfs_init(struct omap_usb_config *pdata)
#define CONF2_OTGPWRDN (1 << 2)
#define CONF2_DATPOL (1 << 1)
+/* TI81XX specific definitions */
+#define USBCTRL0 0x620
+#define USBSTAT0 0x624
+
+/* TI816X PHY controls bits */
+#define TI816X_USBPHY0_NORMAL_MODE (1 << 0)
+#define TI816X_USBPHY_REFCLK_OSC (1 << 8)
+
+/* TI814X PHY controls bits */
+#define USBPHY_CM_PWRDN (1 << 0)
+#define USBPHY_OTG_PWRDN (1 << 1)
+#define USBPHY_CHGDET_DIS (1 << 2)
+#define USBPHY_CHGDET_RSTRT (1 << 3)
+#define USBPHY_SRCONDM (1 << 4)
+#define USBPHY_SINKONDP (1 << 5)
+#define USBPHY_CHGISINK_EN (1 << 6)
+#define USBPHY_CHGVSRC_EN (1 << 7)
+#define USBPHY_DMPULLUP (1 << 8)
+#define USBPHY_DPPULLUP (1 << 9)
+#define USBPHY_CDET_EXTCTL (1 << 10)
+#define USBPHY_GPIO_MODE (1 << 12)
+#define USBPHY_DPOPBUFCTL (1 << 13)
+#define USBPHY_DMOPBUFCTL (1 << 14)
+#define USBPHY_DPINPUT (1 << 15)
+#define USBPHY_DMINPUT (1 << 16)
+#define USBPHY_DPGPIO_PD (1 << 17)
+#define USBPHY_DMGPIO_PD (1 << 18)
+#define USBPHY_OTGVDET_EN (1 << 19)
+#define USBPHY_OTGSESSEND_EN (1 << 20)
+#define USBPHY_DATA_POLARITY (1 << 23)
+
#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_USB)
u32 omap1_usb0_init(unsigned nwires, unsigned is_device);
u32 omap1_usb1_init(unsigned nwires);
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
deleted file mode 100644
index 333871f5999..00000000000
--- a/arch/arm/plat-omap/io.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Common io.c file
- * This file is created by Russell King <rmk+kernel@arm.linux.org.uk>
- *
- * Copyright (C) 2009 Texas Instruments
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <plat/omap7xx.h>
-#include <plat/omap1510.h>
-#include <plat/omap16xx.h>
-#include <plat/omap24xx.h>
-#include <plat/omap34xx.h>
-#include <plat/omap44xx.h>
-
-#define BETWEEN(p,st,sz) ((p) >= (st) && (p) < ((st) + (sz)))
-#define XLATE(p,pst,vst) ((void __iomem *)((p) - (pst) + (vst)))
-
-static int initialized;
-
-/*
- * Intercept ioremap() requests for addresses in our fixed mapping regions.
- */
-void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
-{
-
- WARN(!initialized, "Do not use ioremap before init_early\n");
-
-#ifdef CONFIG_ARCH_OMAP1
- if (cpu_class_is_omap1()) {
- if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
- return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
- }
- if (cpu_is_omap7xx()) {
- if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE))
- return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START);
-
- if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE))
- return XLATE(p, OMAP7XX_DSPREG_BASE,
- OMAP7XX_DSPREG_START);
- }
- if (cpu_is_omap15xx()) {
- if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
- return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);
-
- if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
- return XLATE(p, OMAP1510_DSPREG_BASE,
- OMAP1510_DSPREG_START);
- }
- if (cpu_is_omap16xx()) {
- if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
- return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);
-
- if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
- return XLATE(p, OMAP16XX_DSPREG_BASE,
- OMAP16XX_DSPREG_START);
- }
-#endif
-#ifdef CONFIG_ARCH_OMAP2
- if (cpu_is_omap24xx()) {
- if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
- return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
- if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
- return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
- }
- if (cpu_is_omap2420()) {
- if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
- return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
- if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
- return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
- if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
- return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
- }
- if (cpu_is_omap2430()) {
- if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
- return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
- if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
- return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
- if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
- return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
- if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
- return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
- }
-#endif
-#ifdef CONFIG_ARCH_OMAP3
- if (cpu_is_ti816x()) {
- if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
- return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
- } else if (cpu_is_omap34xx()) {
- if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
- return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
- if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
- return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
- if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
- return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
- if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
- return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
- if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
- return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
- if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
- return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
- if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
- return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
- }
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- if (cpu_is_omap44xx()) {
- if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
- return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
- if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
- return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
- if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
- return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
- if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE))
- return XLATE(p, OMAP44XX_EMIF1_PHYS, \
- OMAP44XX_EMIF1_VIRT);
- if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE))
- return XLATE(p, OMAP44XX_EMIF2_PHYS, \
- OMAP44XX_EMIF2_VIRT);
- if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE))
- return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT);
- if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
- return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
- if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
- return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
- }
-#endif
- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(omap_ioremap);
-
-void omap_iounmap(volatile void __iomem *addr)
-{
- unsigned long virt = (unsigned long)addr;
-
- if (virt >= VMALLOC_START && virt < VMALLOC_END)
- __iounmap(addr);
-}
-EXPORT_SYMBOL(omap_iounmap);
-
-void __init omap_init_consistent_dma_size(void)
-{
-#ifdef CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE
- init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
-#endif
-}
-
-void __init omap_ioremap_init(void)
-{
- initialized++;
-}
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 8b28664d1c6..4243bdcc87b 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -40,7 +40,11 @@
#define OMAP1_SRAM_PA 0x20000000
#define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800)
#define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000)
+#ifdef CONFIG_OMAP4_ERRATA_I688
+#define OMAP4_SRAM_PUB_PA OMAP4_SRAM_PA
+#else
#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
+#endif
#if defined(CONFIG_ARCH_OMAP2PLUS)
#define SRAM_BOOTLOADER_SZ 0x00
@@ -141,11 +145,9 @@ static void __init omap_detect_sram(void)
omap_sram_size = 0x32000; /* 200K */
else if (cpu_is_omap15xx())
omap_sram_size = 0x30000; /* 192K */
- else if (cpu_is_omap1610() || cpu_is_omap1621() ||
- cpu_is_omap1710())
+ else if (cpu_is_omap1610() || cpu_is_omap1611() ||
+ cpu_is_omap1621() || cpu_is_omap1710())
omap_sram_size = 0x4000; /* 16K */
- else if (cpu_is_omap1611())
- omap_sram_size = SZ_256K;
else {
pr_err("Could not detect SRAM size\n");
omap_sram_size = 0x4000;
@@ -163,6 +165,10 @@ static void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
+#ifdef CONFIG_OMAP4_ERRATA_I688
+ omap_sram_start += PAGE_SIZE;
+ omap_sram_size -= SZ_16K;
+#endif
if (cpu_is_omap34xx()) {
/*
* SRAM must be marked as non-cached on OMAP3 since the
@@ -224,6 +230,9 @@ static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
{
BUG_ON(!_omap_sram_reprogram_clock);
+ /* On 730, bit 13 must always be 1 */
+ if (cpu_is_omap7xx())
+ ckctl |= 0x2000;
_omap_sram_reprogram_clock(dpllctl, ckctl);
}
diff --git a/arch/arm/plat-orion/Makefile b/arch/arm/plat-orion/Makefile
index 95a5fc53b6d..c20ce0f5ce3 100644
--- a/arch/arm/plat-orion/Makefile
+++ b/arch/arm/plat-orion/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := irq.o pcie.o time.o common.o mpp.o
+obj-y := irq.o pcie.o time.o common.o mpp.o addr-map.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
new file mode 100644
index 00000000000..367ca89ac40
--- /dev/null
+++ b/arch/arm/plat-orion/addr-map.c
@@ -0,0 +1,174 @@
+/*
+ * arch/arm/plat-orion/addr-map.c
+ *
+ * Address map functions for Marvell Orion based SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <plat/addr-map.h>
+
+struct mbus_dram_target_info orion_mbus_dram_info;
+
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return &orion_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+/*
+ * DDR target is the same on all Orion platforms.
+ */
+#define TARGET_DDR 0
+
+/*
+ * Helpers to get DDR bank info
+ */
+#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
+#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF 0x0000
+#define WIN_BASE_OFF 0x0004
+#define WIN_REMAP_LO_OFF 0x0008
+#define WIN_REMAP_HI_OFF 0x000c
+
+/*
+ * Default implementation
+ */
+static void __init __iomem *
+orion_win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
+{
+ return (void __iomem *)(cfg->bridge_virt_base + (win << 4));
+}
+
+/*
+ * Default implementation
+ */
+static int __init orion_cpu_win_can_remap(const struct orion_addr_map_cfg *cfg,
+ const int win)
+{
+ if (win < cfg->remappable_wins)
+ return 1;
+
+ return 0;
+}
+
+void __init orion_setup_cpu_win(const struct orion_addr_map_cfg *cfg,
+ const int win, const u32 base,
+ const u32 size, const u8 target,
+ const u8 attr, const int remap)
+{
+ void __iomem *addr = cfg->win_cfg_base(cfg, win);
+ u32 ctrl, base_high, remap_addr;
+
+ if (win >= cfg->num_wins) {
+ printk(KERN_ERR "setup_cpu_win: trying to allocate window "
+ "%d when only %d allowed\n", win, cfg->num_wins);
+ }
+
+ base_high = base & 0xffff0000;
+ ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1;
+
+ writel(base_high, addr + WIN_BASE_OFF);
+ writel(ctrl, addr + WIN_CTRL_OFF);
+ if (cfg->cpu_win_can_remap(cfg, win)) {
+ if (remap < 0)
+ remap_addr = base;
+ else
+ remap_addr = remap;
+ writel(remap_addr & 0xffff0000, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+}
+
+/*
+ * Configure a number of windows.
+ */
+static void __init orion_setup_cpu_wins(const struct orion_addr_map_cfg * cfg,
+ const struct orion_addr_map_info *info)
+{
+ while (info->win != -1) {
+ orion_setup_cpu_win(cfg, info->win, info->base, info->size,
+ info->target, info->attr, info->remap);
+ info++;
+ }
+}
+
+static void __init orion_disable_wins(const struct orion_addr_map_cfg * cfg)
+{
+ void __iomem *addr;
+ int i;
+
+ for (i = 0; i < cfg->num_wins; i++) {
+ addr = cfg->win_cfg_base(cfg, i);
+
+ writel(0, addr + WIN_BASE_OFF);
+ writel(0, addr + WIN_CTRL_OFF);
+ if (cfg->cpu_win_can_remap(cfg, i)) {
+ writel(0, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+ }
+}
+
+/*
+ * Disable, clear and configure windows.
+ */
+void __init orion_config_wins(struct orion_addr_map_cfg * cfg,
+ const struct orion_addr_map_info *info)
+{
+ if (!cfg->cpu_win_can_remap)
+ cfg->cpu_win_can_remap = orion_cpu_win_can_remap;
+
+ if (!cfg->win_cfg_base)
+ cfg->win_cfg_base = orion_win_cfg_base;
+
+ orion_disable_wins(cfg);
+
+ if (info)
+ orion_setup_cpu_wins(cfg, info);
+}
+
+/*
+ * Setup MBUS dram target info.
+ */
+void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
+ const u32 ddr_window_cpu_base)
+{
+ void __iomem *addr;
+ int i;
+ int cs;
+
+ orion_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ addr = (void __iomem *)ddr_window_cpu_base;
+
+ for (i = 0, cs = 0; i < 4; i++) {
+ u32 base = readl(addr + DDR_BASE_CS_OFF(i));
+ u32 size = readl(addr + DDR_SIZE_CS_OFF(i));
+
+ /*
+ * Chip select enabled?
+ */
+ if (size & 1) {
+ struct mbus_dram_window *w;
+
+ w = &orion_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ w->base = base & 0xffff0000;
+ w->size = (size | 0x0000ffff) + 1;
+ }
+ }
+ orion_mbus_dram_info.num_cs = cs;
+}
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 9e5451b3c8e..e5a2fde29b1 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
-#include <linux/mbus.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
@@ -203,13 +202,12 @@ void __init orion_rtc_init(unsigned long mapbase,
****************************************************************************/
static __init void ge_complete(
struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
- struct mbus_dram_target_info *mbus_dram_info, int tclk,
+ int tclk,
struct resource *orion_ge_resource, unsigned long irq,
struct platform_device *orion_ge_shared,
struct mv643xx_eth_platform_data *eth_data,
struct platform_device *orion_ge)
{
- orion_ge_shared_data->dram = mbus_dram_info;
orion_ge_shared_data->t_clk = tclk;
orion_ge_resource->start = irq;
orion_ge_resource->end = irq;
@@ -259,7 +257,6 @@ static struct platform_device orion_ge00 = {
};
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
@@ -267,7 +264,7 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge00_shared_data, mbus_dram_info, tclk,
+ ge_complete(&orion_ge00_shared_data, tclk,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
}
@@ -313,7 +310,6 @@ static struct platform_device orion_ge01 = {
};
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
@@ -321,7 +317,7 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge01_shared_data, mbus_dram_info, tclk,
+ ge_complete(&orion_ge01_shared_data, tclk,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
}
@@ -367,7 +363,6 @@ static struct platform_device orion_ge10 = {
};
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
@@ -375,7 +370,7 @@ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
{
fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge10_shared_data, mbus_dram_info, tclk,
+ ge_complete(&orion_ge10_shared_data, tclk,
orion_ge10_resources, irq, &orion_ge10_shared,
eth_data, &orion_ge10);
}
@@ -421,7 +416,6 @@ static struct platform_device orion_ge11 = {
};
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
@@ -429,7 +423,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
{
fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge11_shared_data, mbus_dram_info, tclk,
+ ge_complete(&orion_ge11_shared_data, tclk,
orion_ge11_resources, irq, &orion_ge11_shared,
eth_data, &orion_ge11);
}
@@ -592,8 +586,6 @@ void __init orion_wdt_init(unsigned long tclk)
/*****************************************************************************
* XOR
****************************************************************************/
-static struct mv_xor_platform_shared_data orion_xor_shared_data;
-
static u64 orion_xor_dmamask = DMA_BIT_MASK(32);
void __init orion_xor_init_channels(
@@ -632,9 +624,6 @@ static struct resource orion_xor0_shared_resources[] = {
static struct platform_device orion_xor0_shared = {
.name = MV_XOR_SHARED_NAME,
.id = 0,
- .dev = {
- .platform_data = &orion_xor_shared_data,
- },
.num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
.resource = orion_xor0_shared_resources,
};
@@ -687,14 +676,11 @@ static struct platform_device orion_xor01_channel = {
},
};
-void __init orion_xor0_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase_low,
+void __init orion_xor0_init(unsigned long mapbase_low,
unsigned long mapbase_high,
unsigned long irq_0,
unsigned long irq_1)
{
- orion_xor_shared_data.dram = mbus_dram_info;
-
orion_xor0_shared_resources[0].start = mapbase_low;
orion_xor0_shared_resources[0].end = mapbase_low + 0xff;
orion_xor0_shared_resources[1].start = mapbase_high;
@@ -727,9 +713,6 @@ static struct resource orion_xor1_shared_resources[] = {
static struct platform_device orion_xor1_shared = {
.name = MV_XOR_SHARED_NAME,
.id = 1,
- .dev = {
- .platform_data = &orion_xor_shared_data,
- },
.num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
.resource = orion_xor1_shared_resources,
};
@@ -828,11 +811,9 @@ static struct platform_device orion_ehci = {
},
};
-void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_init(unsigned long mapbase,
unsigned long irq)
{
- orion_ehci_data.dram = mbus_dram_info;
fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
irq);
@@ -854,11 +835,9 @@ static struct platform_device orion_ehci_1 = {
},
};
-void __init orion_ehci_1_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_1_init(unsigned long mapbase,
unsigned long irq)
{
- orion_ehci_data.dram = mbus_dram_info;
fill_resources(&orion_ehci_1, orion_ehci_1_resources,
mapbase, SZ_4K - 1, irq);
@@ -880,11 +859,9 @@ static struct platform_device orion_ehci_2 = {
},
};
-void __init orion_ehci_2_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_2_init(unsigned long mapbase,
unsigned long irq)
{
- orion_ehci_data.dram = mbus_dram_info;
fill_resources(&orion_ehci_2, orion_ehci_2_resources,
mapbase, SZ_4K - 1, irq);
@@ -911,11 +888,9 @@ static struct platform_device orion_sata = {
};
void __init orion_sata_init(struct mv_sata_platform_data *sata_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq)
{
- sata_data->dram = mbus_dram_info;
orion_sata.dev.platform_data = sata_data;
fill_resources(&orion_sata, orion_sata_resources,
mapbase, 0x5000 - 1, irq);
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 41ab97ebe4c..10d16088813 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
struct orion_gpio_chip *ochip;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ char gc_label[16];
if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
return;
+ snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+ orion_gpio_chip_count);
+
ochip = orion_gpio_chips + orion_gpio_chip_count;
- ochip->chip.label = "orion_gpio";
+ ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
ochip->chip.request = orion_gpio_request;
ochip->chip.direction_input = orion_gpio_direction_input;
ochip->chip.get = orion_gpio_get;
diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
new file mode 100644
index 00000000000..fd556f77562
--- /dev/null
+++ b/arch/arm/plat-orion/include/plat/addr-map.h
@@ -0,0 +1,53 @@
+/*
+ * arch/arm/plat-orion/include/plat/addr-map.h
+ *
+ * Marvell Orion SoC address map handling.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_ADDR_MAP_H
+#define __PLAT_ADDR_MAP_H
+
+extern struct mbus_dram_target_info orion_mbus_dram_info;
+
+struct orion_addr_map_cfg {
+ const int num_wins; /* Total number of windows */
+ const int remappable_wins;
+ const u32 bridge_virt_base;
+
+ /* If NULL, the default cpu_win_can_remap will be used, using
+ the value in remappable_wins */
+ int (*cpu_win_can_remap) (const struct orion_addr_map_cfg *cfg,
+ const int win);
+ /* If NULL, the default win_cfg_base will be used, using the
+ value in bridge_virt_base */
+ void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
+ const int win);
+};
+
+/*
+ * Information needed to setup one address mapping.
+ */
+struct orion_addr_map_info {
+ const int win;
+ const u32 base;
+ const u32 size;
+ const u8 target;
+ const u8 attr;
+ const int remap;
+};
+
+void __init orion_config_wins(struct orion_addr_map_cfg *cfg,
+ const struct orion_addr_map_info *info);
+
+void __init orion_setup_cpu_win(const struct orion_addr_map_cfg *cfg,
+ const int win, const u32 base,
+ const u32 size, const u8 target,
+ const u8 attr, const int remap);
+
+void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
+ const u32 ddr_window_cpu_base);
+#endif
diff --git a/arch/arm/plat-orion/include/plat/audio.h b/arch/arm/plat-orion/include/plat/audio.h
index 9cf1f781329..885f8abd927 100644
--- a/arch/arm/plat-orion/include/plat/audio.h
+++ b/arch/arm/plat-orion/include/plat/audio.h
@@ -1,11 +1,8 @@
#ifndef __PLAT_AUDIO_H
#define __PLAT_AUDIO_H
-#include <linux/mbus.h>
-
struct kirkwood_asoc_platform_data {
u32 tclk;
- struct mbus_dram_target_info *dram;
int burst;
};
#endif
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index a63c357e2ab..0fe08d77e83 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -37,28 +37,24 @@ void __init orion_rtc_init(unsigned long mapbase,
unsigned long irq);
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
int tclk);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
int tclk);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
int tclk);
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq,
unsigned long irq_err,
@@ -82,8 +78,7 @@ void __init orion_spi_1_init(unsigned long mapbase,
void __init orion_wdt_init(unsigned long tclk);
-void __init orion_xor0_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase_low,
+void __init orion_xor0_init(unsigned long mapbase_low,
unsigned long mapbase_high,
unsigned long irq_0,
unsigned long irq_1);
@@ -93,20 +88,16 @@ void __init orion_xor1_init(unsigned long mapbase_low,
unsigned long irq_0,
unsigned long irq_1);
-void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_init(unsigned long mapbase,
unsigned long irq);
-void __init orion_ehci_1_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_1_init(unsigned long mapbase,
unsigned long irq);
-void __init orion_ehci_2_init(struct mbus_dram_target_info *mbus_dram_info,
- unsigned long mapbase,
+void __init orion_ehci_2_init(unsigned long mapbase,
unsigned long irq);
void __init orion_sata_init(struct mv_sata_platform_data *sata_data,
- struct mbus_dram_target_info *mbus_dram_info,
unsigned long mapbase,
unsigned long irq);
diff --git a/arch/arm/plat-orion/include/plat/ehci-orion.h b/arch/arm/plat-orion/include/plat/ehci-orion.h
index 4ec668e7746..6fc78e43042 100644
--- a/arch/arm/plat-orion/include/plat/ehci-orion.h
+++ b/arch/arm/plat-orion/include/plat/ehci-orion.h
@@ -19,7 +19,6 @@ enum orion_ehci_phy_ver {
};
struct orion_ehci_data {
- struct mbus_dram_target_info *dram;
enum orion_ehci_phy_ver phy_version;
};
diff --git a/arch/arm/plat-orion/include/plat/mv_xor.h b/arch/arm/plat-orion/include/plat/mv_xor.h
index bd5f3bdb4ae..2ba1f7d76ee 100644
--- a/arch/arm/plat-orion/include/plat/mv_xor.h
+++ b/arch/arm/plat-orion/include/plat/mv_xor.h
@@ -13,12 +13,6 @@
#define MV_XOR_SHARED_NAME "mv_xor_shared"
#define MV_XOR_NAME "mv_xor"
-struct mbus_dram_target_info;
-
-struct mv_xor_platform_shared_data {
- struct mbus_dram_target_info *dram;
-};
-
struct mv_xor_platform_data {
struct platform_device *shared;
int hw_id;
diff --git a/arch/arm/plat-orion/include/plat/mvsdio.h b/arch/arm/plat-orion/include/plat/mvsdio.h
index 14ca8867600..1190efedcb9 100644
--- a/arch/arm/plat-orion/include/plat/mvsdio.h
+++ b/arch/arm/plat-orion/include/plat/mvsdio.h
@@ -12,7 +12,6 @@
#include <linux/mbus.h>
struct mvsdio_platform_data {
- struct mbus_dram_target_info *dram;
unsigned int clock;
int gpio_card_detect;
int gpio_write_protect;
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index cc99163e73f..fe5b9e86274 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -20,8 +20,7 @@ int orion_pcie_x4_mode(void __iomem *base);
int orion_pcie_get_local_bus_nr(void __iomem *base);
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
void orion_pcie_reset(void __iomem *base);
-void orion_pcie_setup(void __iomem *base,
- struct mbus_dram_target_info *dram);
+void orion_pcie_setup(void __iomem *base);
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val);
int orion_pcie_rd_conf_tlp(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index af2d733c50b..86dbb5bdb17 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -13,6 +13,7 @@
#include <linux/mbus.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
+#include <plat/addr-map.h>
#include <linux/delay.h>
/*
@@ -175,8 +176,7 @@ static void __init orion_pcie_setup_wins(void __iomem *base,
writel(((size - 1) & 0xffff0000) | 1, base + PCIE_BAR_CTRL_OFF(1));
}
-void __init orion_pcie_setup(void __iomem *base,
- struct mbus_dram_target_info *dram)
+void __init orion_pcie_setup(void __iomem *base)
{
u16 cmd;
u32 mask;
@@ -184,7 +184,7 @@ void __init orion_pcie_setup(void __iomem *base,
/*
* Point PCIe unit MBUS decode windows to DRAM space.
*/
- orion_pcie_setup_wins(base, dram);
+ orion_pcie_setup_wins(base, &orion_mbus_dram_info);
/*
* Master + slave enable.
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 69a61367e4b..1ed8d1397fc 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -60,24 +59,10 @@ static u32 ticks_per_jiffy;
* Orion's sched_clock implementation. It has a resolution of
* at least 7.5ns (133MHz TCLK).
*/
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace orion_read_sched_clock(void)
{
- u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
-}
-
-
-static void notrace orion_update_sched_clock(void)
-{
- u32 cyc = ~readl(timer_base + TIMER0_VAL_OFF);
- update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-static void __init setup_sched_clock(unsigned long tclk)
-{
- init_sched_clock(&cd, orion_update_sched_clock, 32, tclk);
+ return ~readl(timer_base + TIMER0_VAL_OFF);
}
/*
@@ -217,7 +202,7 @@ orion_time_init(u32 _bridge_base, u32 _bridge_timer1_clr_mask,
/*
* Set scale and timer for sched_clock.
*/
- setup_sched_clock(tclk);
+ setup_sched_clock(orion_read_sched_clock, 32, tclk);
/*
* Setup free-running clocksource timer (interrupts
diff --git a/arch/arm/plat-pxa/include/plat/gpio-pxa.h b/arch/arm/plat-pxa/include/plat/gpio-pxa.h
deleted file mode 100644
index b6390beff32..00000000000
--- a/arch/arm/plat-pxa/include/plat/gpio-pxa.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __PLAT_PXA_GPIO_H
-#define __PLAT_PXA_GPIO_H
-
-struct irq_data;
-
-/*
- * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
- * one set of registers. The register offsets are organized below:
- *
- * GPLR GPDR GPSR GPCR GRER GFER GEDR
- * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
- * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
- * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
- *
- * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
- * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
- * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
- *
- * NOTE:
- * BANK 3 is only available on PXA27x and later processors.
- * BANK 4 and 5 are only available on PXA935
- */
-
-#define GPIO_BANK(n) (GPIO_REGS_VIRT + BANK_OFF(n))
-
-#define GPLR_OFFSET 0x00
-#define GPDR_OFFSET 0x0C
-#define GPSR_OFFSET 0x18
-#define GPCR_OFFSET 0x24
-#define GRER_OFFSET 0x30
-#define GFER_OFFSET 0x3C
-#define GEDR_OFFSET 0x48
-
-/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85).
- * Those cases currently cause holes in the GPIO number space, the
- * actual number of the last GPIO is recorded by 'pxa_last_gpio'.
- */
-extern int pxa_last_gpio;
-
-typedef int (*set_wake_t)(struct irq_data *d, unsigned int on);
-
-extern void pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn);
-
-#endif /* __PLAT_PXA_GPIO_H */
diff --git a/arch/arm/plat-pxa/include/plat/gpio.h b/arch/arm/plat-pxa/include/plat/gpio.h
deleted file mode 100644
index 258f77210b0..00000000000
--- a/arch/arm/plat-pxa/include/plat/gpio.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __PLAT_GPIO_H
-#define __PLAT_GPIO_H
-
-#define __ARM_GPIOLIB_COMPLEX
-
-/* The individual machine provides register offsets and NR_BUILTIN_GPIO */
-#include <mach/gpio-pxa.h>
-
-static inline int gpio_get_value(unsigned gpio)
-{
- if (__builtin_constant_p(gpio) && (gpio < NR_BUILTIN_GPIO))
- return GPLR(gpio) & GPIO_bit(gpio);
- else
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- if (__builtin_constant_p(gpio) && (gpio < NR_BUILTIN_GPIO)) {
- if (value)
- GPSR(gpio) = GPIO_bit(gpio);
- else
- GPCR(gpio) = GPIO_bit(gpio);
- } else
- __gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep __gpio_cansleep
-
-#endif /* __PLAT_GPIO_H */
diff --git a/arch/arm/plat-s3c24xx/common-smdk.c b/arch/arm/plat-s3c24xx/common-smdk.c
index bcc43f34627..084604be6ad 100644
--- a/arch/arm/plat-s3c24xx/common-smdk.c
+++ b/arch/arm/plat-s3c24xx/common-smdk.c
@@ -19,7 +19,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
diff --git a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
index a9276667c2f..c7adad0e8de 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq-debugfs.c
@@ -12,7 +12,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index b3d3d027899..46807993888 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -20,7 +20,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 3c6335307fb..1121df13e15 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -192,27 +192,6 @@ static unsigned long s3c24xx_read_idcode_v4(void)
return __raw_readl(S3C2410_GSTATUS1);
}
-/* Hook for arm_pm_restart to ensure we execute the reset code
- * with the caches enabled. It seems at least the S3C2440 has a problem
- * resetting if there is bus activity interrupted by the reset.
- */
-static void s3c24xx_pm_restart(char mode, const char *cmd)
-{
- if (mode != 's') {
- unsigned long flags;
-
- local_irq_save(flags);
- __cpuc_flush_kern_all();
- __cpuc_flush_user_all();
-
- arch_reset(mode, cmd);
- local_irq_restore(flags);
- }
-
- /* fallback, or unhandled */
- arm_machine_restart(mode, cmd);
-}
-
void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
{
/* initialise the io descriptors we need for initialisation */
@@ -226,7 +205,5 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
}
s3c24xx_init_cpu();
- arm_pm_restart = s3c24xx_pm_restart;
-
s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
}
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 53754bcf15a..9fe35348e03 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1437,11 +1437,10 @@ int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel)
size_t map_sz = sizeof(*nmap) * sel->map_size;
int ptr;
- nmap = kmalloc(map_sz, GFP_KERNEL);
+ nmap = kmemdup(sel->map, map_sz, GFP_KERNEL);
if (nmap == NULL)
return -ENOMEM;
- memcpy(nmap, sel->map, map_sz);
memcpy(&dma_sel, sel, sizeof(*sel));
dma_sel.map = nmap;
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index fc8c5f89954..bc42c04091f 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <asm/irq.h>
diff --git a/arch/arm/plat-s3c24xx/pm-simtec.c b/arch/arm/plat-s3c24xx/pm-simtec.c
index 663b280d65d..68296b1fe7e 100644
--- a/arch/arm/plat-s3c24xx/pm-simtec.c
+++ b/arch/arm/plat-s3c24xx/pm-simtec.c
@@ -18,7 +18,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/io.h>
diff --git a/arch/arm/plat-s3c24xx/s3c2410-clock.c b/arch/arm/plat-s3c24xx/s3c2410-clock.c
index def76aa3825..25dc4d4397b 100644
--- a/arch/arm/plat-s3c24xx/s3c2410-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2410-clock.c
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/delay.h>
diff --git a/arch/arm/plat-s3c24xx/s3c2412-iotiming.c b/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
index 0b46d3895d6..48eee39ab36 100644
--- a/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
+++ b/arch/arm/plat-s3c24xx/s3c2412-iotiming.c
@@ -17,7 +17,7 @@
#include <linux/ioport.h>
#include <linux/cpufreq.h>
#include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
index 5a21b15b2a9..95e68190d59 100644
--- a/arch/arm/plat-s3c24xx/s3c2443-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -297,13 +297,6 @@ static struct clksrc_clk clk_usb_bus_host = {
static struct clksrc_clk clksrc_clks[] = {
{
- /* ART baud-rate clock sourced from esysclk via a divisor */
- .clk = {
- .name = "uartclk",
- .parent = &clk_esysclk.clk,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
- }, {
/* camera interface bus-clock, divided down from esysclk */
.clk = {
.name = "camif-upll", /* same as 2440 name */
@@ -323,6 +316,15 @@ static struct clksrc_clk clksrc_clks[] = {
},
};
+static struct clksrc_clk clk_esys_uart = {
+ /* ART baud-rate clock sourced from esysclk via a divisor */
+ .clk = {
+ .name = "uartclk",
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
+};
+
static struct clk clk_i2s_ext = {
.name = "i2s-ext",
};
@@ -425,12 +427,6 @@ static struct clk init_clocks[] = {
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA5,
}, {
- .name = "hsmmc",
- .devname = "s3c-sdhci.1",
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_HSMMC,
- }, {
.name = "gpio",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
@@ -512,6 +508,14 @@ static struct clk init_clocks[] = {
}
};
+static struct clk hsmmc1_clk = {
+ .name = "hsmmc",
+ .devname = "s3c-sdhci.1",
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_HSMMC,
+};
+
static inline unsigned long s3c2443_get_hdiv(unsigned long clkcon0)
{
clkcon0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
@@ -577,6 +581,7 @@ static struct clk *clks[] __initdata = {
&clk_epll,
&clk_usb_bus,
&clk_armdiv,
+ &hsmmc1_clk,
};
static struct clksrc_clk *clksrcs[] __initdata = {
@@ -589,6 +594,13 @@ static struct clksrc_clk *clksrcs[] __initdata = {
&clk_arm,
};
+static struct clk_lookup s3c2443_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
+ CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
+ CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
+};
+
void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
unsigned int *divs, int nr_divs,
int divmask)
@@ -618,6 +630,7 @@ void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
/* See s3c2443/etc notes on disabling clocks at init time */
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
s3c2443_common_setup_clocks(get_mpll);
}
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 9b9968fa869..8167ce66188 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -11,6 +11,7 @@ config PLAT_S5P
default y
select ARM_VIC if !ARCH_EXYNOS4
select ARM_GIC if ARCH_EXYNOS4
+ select GIC_NON_BANKED if ARCH_EXYNOS4
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_GPIO_TRACK
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index 876344038b8..30d8c3016e6 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -13,7 +13,6 @@ obj- :=
# Core files
obj-y += dev-uart.o
-obj-y += cpu.o
obj-y += clock.o
obj-y += irq.o
obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 5f84a3f13ef..963edea7f7e 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <asm/div64.h>
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
deleted file mode 100644
index a56959e8351..00000000000
--- a/arch/arm/plat-s5p/cpu.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/* linux/arch/arm/plat-s5p/cpu.c
- *
- * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * S5P CPU Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/s5p6440.h>
-#include <plat/s5p6450.h>
-#include <plat/s5pc100.h>
-#include <plat/s5pv210.h>
-#include <plat/exynos4.h>
-
-/* table of supported CPUs */
-
-static const char name_s5p6440[] = "S5P6440";
-static const char name_s5p6450[] = "S5P6450";
-static const char name_s5pc100[] = "S5PC100";
-static const char name_s5pv210[] = "S5PV210/S5PC110";
-static const char name_exynos4210[] = "EXYNOS4210";
-static const char name_exynos4212[] = "EXYNOS4212";
-static const char name_exynos4412[] = "EXYNOS4412";
-
-static struct cpu_table cpu_ids[] __initdata = {
- {
- .idcode = S5P6440_CPU_ID,
- .idmask = S5P64XX_CPU_MASK,
- .map_io = s5p6440_map_io,
- .init_clocks = s5p6440_init_clocks,
- .init_uarts = s5p6440_init_uarts,
- .init = s5p64x0_init,
- .name = name_s5p6440,
- }, {
- .idcode = S5P6450_CPU_ID,
- .idmask = S5P64XX_CPU_MASK,
- .map_io = s5p6450_map_io,
- .init_clocks = s5p6450_init_clocks,
- .init_uarts = s5p6450_init_uarts,
- .init = s5p64x0_init,
- .name = name_s5p6450,
- }, {
- .idcode = S5PC100_CPU_ID,
- .idmask = S5PC100_CPU_MASK,
- .map_io = s5pc100_map_io,
- .init_clocks = s5pc100_init_clocks,
- .init_uarts = s5pc100_init_uarts,
- .init = s5pc100_init,
- .name = name_s5pc100,
- }, {
- .idcode = S5PV210_CPU_ID,
- .idmask = S5PV210_CPU_MASK,
- .map_io = s5pv210_map_io,
- .init_clocks = s5pv210_init_clocks,
- .init_uarts = s5pv210_init_uarts,
- .init = s5pv210_init,
- .name = name_s5pv210,
- }, {
- .idcode = EXYNOS4210_CPU_ID,
- .idmask = EXYNOS4_CPU_MASK,
- .map_io = exynos4_map_io,
- .init_clocks = exynos4_init_clocks,
- .init_uarts = exynos4_init_uarts,
- .init = exynos_init,
- .name = name_exynos4210,
- }, {
- .idcode = EXYNOS4212_CPU_ID,
- .idmask = EXYNOS4_CPU_MASK,
- .map_io = exynos4_map_io,
- .init_clocks = exynos4_init_clocks,
- .init_uarts = exynos4_init_uarts,
- .init = exynos_init,
- .name = name_exynos4212,
- }, {
- .idcode = EXYNOS4412_CPU_ID,
- .idmask = EXYNOS4_CPU_MASK,
- .map_io = exynos4_map_io,
- .init_clocks = exynos4_init_clocks,
- .init_uarts = exynos4_init_uarts,
- .init = exynos_init,
- .name = name_exynos4412,
- },
-};
-
-/* minimal IO mapping */
-
-static struct map_desc s5p_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5P_VA_CHIPID,
- .pfn = __phys_to_pfn(S5P_PA_CHIPID),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(S5P_PA_SYSCON),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_TIMER,
- .pfn = __phys_to_pfn(S5P_PA_TIMER),
- .length = SZ_16K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_WATCHDOG,
- .pfn = __phys_to_pfn(S3C_PA_WDT),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5P_VA_SROMC,
- .pfn = __phys_to_pfn(S5P_PA_SROMC),
- .length = SZ_4K,
- .type = MT_DEVICE,
- },
-};
-
-/* read cpu identification code */
-
-void __init s5p_init_io(struct map_desc *mach_desc,
- int size, void __iomem *cpuid_addr)
-{
- /* initialize the io descriptors we need for initialization */
- iotable_init(s5p_iodesc, ARRAY_SIZE(s5p_iodesc));
- if (mach_desc)
- iotable_init(mach_desc, size);
-
- /* detect cpu id and rev. */
- s5p_init_cpu(cpuid_addr);
-
- s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index b5bb774985b..c496b359c37 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/gpio.h>
#include <asm/hardware/vic.h>
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index c833e7b5759..17c0a2c58df 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
@@ -321,26 +320,14 @@ static void __iomem *s5p_timer_reg(void)
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
-static DEFINE_CLOCK_DATA(cd);
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace s5p_read_sched_clock(void)
{
void __iomem *reg = s5p_timer_reg();
if (!reg)
return 0;
- return cyc_to_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
-}
-
-static void notrace s5p_update_sched_clock(void)
-{
- void __iomem *reg = s5p_timer_reg();
-
- if (!reg)
- return;
-
- update_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
+ return ~__raw_readl(reg);
}
static void __init s5p_clocksource_init(void)
@@ -358,7 +345,7 @@ static void __init s5p_clocksource_init(void)
s5p_time_setup(timer_source.source_id, TCNT_MAX);
s5p_time_start(timer_source.source_id, PERIODIC);
- init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
+ setup_sched_clock(s5p_read_sched_clock, 32, clock_rate);
if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
clock_rate, 250, 32, clocksource_mmio_readl_down))
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index e1cbc728c77..c8bec9c7655 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 313eb26cfa6..6a2abe67c8b 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -88,12 +88,20 @@ config S5P_GPIO_DRVSTR
config SAMSUNG_GPIO_EXTRA
int "Number of additional GPIO pins"
+ default 128 if SAMSUNG_GPIO_EXTRA128
+ default 64 if SAMSUNG_GPIO_EXTRA64
default 0
help
Use additional GPIO space in addition to the GPIO's the SOC
provides. This allows expanding the GPIO space for use with
GPIO expanders.
+config SAMSUNG_GPIO_EXTRA64
+ bool
+
+config SAMSUNG_GPIO_EXTRA128
+ bool
+
config S3C_GPIO_SPACE
int "Space between gpio banks"
default 0
@@ -226,11 +234,23 @@ config SAMSUNG_DEV_IDE
help
Compile in platform device definitions for IDE
-config S3C64XX_DEV_SPI
+config S3C64XX_DEV_SPI0
+ bool
+ help
+ Compile in platform device definitions for S3C64XX's type
+ SPI controller 0
+
+config S3C64XX_DEV_SPI1
+ bool
+ help
+ Compile in platform device definitions for S3C64XX's type
+ SPI controller 1
+
+config S3C64XX_DEV_SPI2
bool
help
Compile in platform device definitions for S3C64XX's type
- SPI controllers.
+ SPI controller 2
config SAMSUNG_DEV_TS
bool
diff --git a/arch/arm/plat-samsung/clock-clksrc.c b/arch/arm/plat-samsung/clock-clksrc.c
index ae8b8507663..786a4107a15 100644
--- a/arch/arm/plat-samsung/clock-clksrc.c
+++ b/arch/arm/plat-samsung/clock-clksrc.c
@@ -16,7 +16,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <plat/clock.h>
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 3b4451979d1..10f71179071 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -33,7 +33,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
index e657305644c..a976c023b28 100644
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pwm_backlight.h>
-#include <linux/slab.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 4ca8b571f97..32a6e394db2 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mmc/host.h>
#include <linux/ioport.h>
+#include <linux/platform_data/s3c-hsudc.h>
#include <asm/irq.h>
#include <asm/pmu.h>
@@ -61,6 +62,7 @@
#include <plat/regs-iic.h>
#include <plat/regs-serial.h>
#include <plat/regs-spi.h>
+#include <plat/s3c64xx-spi.h>
static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
@@ -1461,3 +1463,129 @@ struct platform_device s3c_device_wdt = {
.resource = s3c_wdt_resource,
};
#endif /* CONFIG_S3C_DEV_WDT */
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+static struct resource s3c64xx_spi0_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C_PA_SPI0, SZ_256),
+ [1] = DEFINE_RES_DMA(DMACH_SPI0_TX),
+ [2] = DEFINE_RES_DMA(DMACH_SPI0_RX),
+ [3] = DEFINE_RES_IRQ(IRQ_SPI0),
+};
+
+struct platform_device s3c64xx_device_spi0 = {
+ .name = "s3c64xx-spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c64xx_spi0_resource),
+ .resource = s3c64xx_spi0_resource,
+ .dev = {
+ .dma_mask = &samsung_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init s3c64xx_spi0_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs)
+{
+ if (!pd) {
+ pr_err("%s:Need to pass platform data\n", __func__);
+ return;
+ }
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0) {
+ pr_err("%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ if (!pd->cfg_gpio)
+ pd->cfg_gpio = s3c64xx_spi0_cfg_gpio;
+
+ s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi0);
+}
+#endif /* CONFIG_S3C64XX_DEV_SPI0 */
+
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+static struct resource s3c64xx_spi1_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C_PA_SPI1, SZ_256),
+ [1] = DEFINE_RES_DMA(DMACH_SPI1_TX),
+ [2] = DEFINE_RES_DMA(DMACH_SPI1_RX),
+ [3] = DEFINE_RES_IRQ(IRQ_SPI1),
+};
+
+struct platform_device s3c64xx_device_spi1 = {
+ .name = "s3c64xx-spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s3c64xx_spi1_resource),
+ .resource = s3c64xx_spi1_resource,
+ .dev = {
+ .dma_mask = &samsung_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init s3c64xx_spi1_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs)
+{
+ if (!pd) {
+ pr_err("%s:Need to pass platform data\n", __func__);
+ return;
+ }
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0) {
+ pr_err("%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ if (!pd->cfg_gpio)
+ pd->cfg_gpio = s3c64xx_spi1_cfg_gpio;
+
+ s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi1);
+}
+#endif /* CONFIG_S3C64XX_DEV_SPI1 */
+
+#ifdef CONFIG_S3C64XX_DEV_SPI2
+static struct resource s3c64xx_spi2_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C_PA_SPI2, SZ_256),
+ [1] = DEFINE_RES_DMA(DMACH_SPI2_TX),
+ [2] = DEFINE_RES_DMA(DMACH_SPI2_RX),
+ [3] = DEFINE_RES_IRQ(IRQ_SPI2),
+};
+
+struct platform_device s3c64xx_device_spi2 = {
+ .name = "s3c64xx-spi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s3c64xx_spi2_resource),
+ .resource = s3c64xx_spi2_resource,
+ .dev = {
+ .dma_mask = &samsung_device_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init s3c64xx_spi2_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs)
+{
+ if (!pd) {
+ pr_err("%s:Need to pass platform data\n", __func__);
+ return;
+ }
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0) {
+ pr_err("%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ if (!pd->cfg_gpio)
+ pd->cfg_gpio = s3c64xx_spi2_cfg_gpio;
+
+ s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi2);
+}
+#endif /* CONFIG_S3C64XX_DEV_SPI2 */
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index 93a994a5dd8..2cded872f22 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -18,23 +18,24 @@
#include <mach/dma.h>
-static inline bool pl330_filter(struct dma_chan *chan, void *param)
-{
- struct dma_pl330_peri *peri = chan->private;
- return peri->peri_id == (unsigned)param;
-}
-
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
struct samsung_dma_info *info)
{
struct dma_chan *chan;
dma_cap_mask_t mask;
struct dma_slave_config slave_config;
+ void *filter_param;
dma_cap_zero(mask);
dma_cap_set(info->cap, mask);
- chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
+ /*
+ * If a dma channel property of a device node from device tree is
+ * specified, use that as the fliter parameter.
+ */
+ filter_param = (dma_ch == DMACH_DT_PROP) ? (void *)info->dt_dmach_prop :
+ (void *)dma_ch;
+ chan = dma_request_channel(mask, pl330_filter, filter_param);
if (info->direction == DMA_FROM_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
index dac4760c0f0..95509d8eb14 100644
--- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
+++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
#define s3c_cpufreq_debugfs_call(x) x
#else
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
#ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
+
extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
#else
+#define s3c2410_iotiming_debugfs NULL
#define s3c2410_iotiming_calc NULL
#define s3c2410_iotiming_get NULL
#define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
/* S3C2412 compatible routines */
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+ struct s3c_cpufreq_config *cfg,
+ union s3c_iobank *iob);
extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
#define s3c_freq_dbg(x...) printk(KERN_INFO x)
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 40fd7b6b5e6..73cb3cfd068 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -152,13 +152,9 @@ extern void s3c_init_cpu(unsigned long idcode,
/* core initialisation functions */
extern void s3c24xx_init_irq(void);
-extern void s3c64xx_init_irq(u32 vic0, u32 vic1);
extern void s5p_init_irq(u32 *vic, u32 num_vic);
extern void s3c24xx_init_io(struct map_desc *mach_desc, int size);
-extern void s3c64xx_init_io(struct map_desc *mach_desc, int size);
-extern void s5p_init_io(struct map_desc *mach_desc,
- int size, void __iomem *cpuid_addr);
extern void s3c24xx_init_cpu(void);
extern void s3c64xx_init_cpu(void);
@@ -183,22 +179,20 @@ extern struct syscore_ops s3c2410_pm_syscore_ops;
extern struct syscore_ops s3c2412_pm_syscore_ops;
extern struct syscore_ops s3c2416_pm_syscore_ops;
extern struct syscore_ops s3c244x_pm_syscore_ops;
-extern struct syscore_ops s3c64xx_irq_syscore_ops;
-
-/* system device classes */
-
-extern struct sysdev_class s3c2410_sysclass;
-extern struct sysdev_class s3c2410a_sysclass;
-extern struct sysdev_class s3c2412_sysclass;
-extern struct sysdev_class s3c2416_sysclass;
-extern struct sysdev_class s3c2440_sysclass;
-extern struct sysdev_class s3c2442_sysclass;
-extern struct sysdev_class s3c2443_sysclass;
-extern struct sysdev_class s3c6410_sysclass;
-extern struct sysdev_class s3c64xx_sysclass;
-extern struct sysdev_class s5p64x0_sysclass;
-extern struct sysdev_class s5pv210_sysclass;
-extern struct sysdev_class exynos4_sysclass;
+
+/* system device subsystems */
+
+extern struct bus_type s3c2410_subsys;
+extern struct bus_type s3c2410a_subsys;
+extern struct bus_type s3c2412_subsys;
+extern struct bus_type s3c2416_subsys;
+extern struct bus_type s3c2440_subsys;
+extern struct bus_type s3c2442_subsys;
+extern struct bus_type s3c2443_subsys;
+extern struct bus_type s3c6410_subsys;
+extern struct bus_type s5p64x0_subsys;
+extern struct bus_type s5pv210_subsys;
+extern struct bus_type exynos4_subsys;
extern void (*s5pc1xx_idle)(void);
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index ab633c9c2ae..4214ea0ff8f 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -39,6 +39,7 @@ extern struct platform_device s3c64xx_device_pcm0;
extern struct platform_device s3c64xx_device_pcm1;
extern struct platform_device s3c64xx_device_spi0;
extern struct platform_device s3c64xx_device_spi1;
+extern struct platform_device s3c64xx_device_spi2;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_cfcon;
@@ -98,8 +99,6 @@ extern struct platform_device s5p6450_device_iis1;
extern struct platform_device s5p6450_device_iis2;
extern struct platform_device s5p6450_device_pcm0;
-extern struct platform_device s5p64x0_device_spi0;
-extern struct platform_device s5p64x0_device_spi1;
extern struct platform_device s5pc100_device_ac97;
extern struct platform_device s5pc100_device_iis0;
@@ -108,9 +107,6 @@ extern struct platform_device s5pc100_device_iis2;
extern struct platform_device s5pc100_device_pcm0;
extern struct platform_device s5pc100_device_pcm1;
extern struct platform_device s5pc100_device_spdif;
-extern struct platform_device s5pc100_device_spi0;
-extern struct platform_device s5pc100_device_spi1;
-extern struct platform_device s5pc100_device_spi2;
extern struct platform_device s5pv210_device_ac97;
extern struct platform_device s5pv210_device_iis0;
@@ -120,8 +116,6 @@ extern struct platform_device s5pv210_device_pcm0;
extern struct platform_device s5pv210_device_pcm1;
extern struct platform_device s5pv210_device_pcm2;
extern struct platform_device s5pv210_device_spdif;
-extern struct platform_device s5pv210_device_spi0;
-extern struct platform_device s5pv210_device_spi1;
extern struct platform_device exynos4_device_ac97;
extern struct platform_device exynos4_device_ahci;
@@ -129,6 +123,7 @@ extern struct platform_device exynos4_device_dwmci;
extern struct platform_device exynos4_device_i2s0;
extern struct platform_device exynos4_device_i2s1;
extern struct platform_device exynos4_device_i2s2;
+extern struct platform_device exynos4_device_ohci;
extern struct platform_device exynos4_device_pcm0;
extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
index 4c1a363526c..22eafc310bd 100644
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -31,6 +31,7 @@ struct samsung_dma_info {
enum dma_slave_buswidth width;
dma_addr_t fifo;
struct s3c2410_dma_client *client;
+ struct property *dt_dmach_prop;
};
struct samsung_dma_ops {
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 2e55e595867..c5eaad529de 100644
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -21,7 +21,8 @@
* use these just as IDs.
*/
enum dma_ch {
- DMACH_UART0_RX,
+ DMACH_DT_PROP = -1,
+ DMACH_UART0_RX = 0,
DMACH_UART0_TX,
DMACH_UART1_RX,
DMACH_UART1_TX,
diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
index 1c1ed548125..d01576318b2 100644
--- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
+++ b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
@@ -12,7 +12,7 @@
#include <plat/dma-core.h>
-extern struct sysdev_class dma_sysclass;
+extern struct bus_type dma_subsys;
extern struct s3c2410_dma_chan s3c2410_chans[S3C_DMA_CHANNELS];
#define DMA_CH_VALID (1<<31)
diff --git a/arch/arm/plat-samsung/include/plat/exynos4.h b/arch/arm/plat-samsung/include/plat/exynos4.h
deleted file mode 100644
index f546e88ebc9..00000000000
--- a/arch/arm/plat-samsung/include/plat/exynos4.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/exynos4.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Header file for exynos4 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for EXYNOS4 related SoCs */
-
-extern void exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void exynos4_register_clocks(void);
-extern void exynos4210_register_clocks(void);
-extern void exynos4212_register_clocks(void);
-extern void exynos4_setup_clocks(void);
-
-#ifdef CONFIG_ARCH_EXYNOS
-extern int exynos_init(void);
-extern void exynos4_init_irq(void);
-extern void exynos4_map_io(void);
-extern void exynos4_init_clocks(int xtal);
-extern struct sys_timer exynos4_timer;
-
-#define exynos4_init_uarts exynos4_common_init_uarts
-
-#else
-#define exynos4_init_clocks NULL
-#define exynos4_init_uarts NULL
-#define exynos4_map_io NULL
-#define exynos_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index d48245bb02b..df8155b9d4d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,6 +24,8 @@
#ifndef __PLAT_GPIO_CFG_H
#define __PLAT_GPIO_CFG_H __FILE__
+#include<linux/types.h>
+
typedef unsigned int __bitwise__ samsung_gpio_pull_t;
typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
diff --git a/arch/arm/plat-samsung/include/plat/irqs.h b/arch/arm/plat-samsung/include/plat/irqs.h
index 08d1a7ef97b..df46b776976 100644
--- a/arch/arm/plat-samsung/include/plat/irqs.h
+++ b/arch/arm/plat-samsung/include/plat/irqs.h
@@ -44,13 +44,14 @@
#define S5P_IRQ_VIC2(x) (S5P_VIC2_BASE + (x))
#define S5P_IRQ_VIC3(x) (S5P_VIC3_BASE + (x))
-#define S5P_TIMER_IRQ(x) (11 + (x))
+#define S5P_TIMER_IRQ(x) (IRQ_TIMER_BASE + (x))
#define IRQ_TIMER0 S5P_TIMER_IRQ(0)
#define IRQ_TIMER1 S5P_TIMER_IRQ(1)
#define IRQ_TIMER2 S5P_TIMER_IRQ(2)
#define IRQ_TIMER3 S5P_TIMER_IRQ(3)
#define IRQ_TIMER4 S5P_TIMER_IRQ(4)
+#define IRQ_TIMER_COUNT (5)
#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_EINT_BASE1) \
: ((x) - 16 + S5P_EINT_BASE2))
diff --git a/arch/arm/plat-samsung/include/plat/keypad.h b/arch/arm/plat-samsung/include/plat/keypad.h
index b59a6483cd8..c81ace332a1 100644
--- a/arch/arm/plat-samsung/include/plat/keypad.h
+++ b/arch/arm/plat-samsung/include/plat/keypad.h
@@ -13,32 +13,7 @@
#ifndef __PLAT_SAMSUNG_KEYPAD_H
#define __PLAT_SAMSUNG_KEYPAD_H
-#include <linux/input/matrix_keypad.h>
-
-#define SAMSUNG_MAX_ROWS 8
-#define SAMSUNG_MAX_COLS 8
-
-/**
- * struct samsung_keypad_platdata - Platform device data for Samsung Keypad.
- * @keymap_data: pointer to &matrix_keymap_data.
- * @rows: number of keypad row supported.
- * @cols: number of keypad col supported.
- * @no_autorepeat: disable key autorepeat.
- * @wakeup: controls whether the device should be set up as wakeup source.
- * @cfg_gpio: configure the GPIO.
- *
- * Initialisation data specific to either the machine or the platform
- * for the device driver to use or call-back when configuring gpio.
- */
-struct samsung_keypad_platdata {
- const struct matrix_keymap_data *keymap_data;
- unsigned int rows;
- unsigned int cols;
- bool no_autorepeat;
- bool wakeup;
-
- void (*cfg_gpio)(unsigned int rows, unsigned int cols);
-};
+#include <linux/input/samsung-keypad.h>
/**
* samsung_keypad_set_platdata - Set platform data for Samsung Keypad device.
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index dcf68709f9c..61fc53740fb 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -17,11 +17,12 @@
#include <linux/irq.h>
-struct sys_device;
+struct device;
#ifdef CONFIG_PM
extern __init int s3c_pm_init(void);
+extern __init int s3c64xx_pm_init(void);
#else
@@ -29,6 +30,11 @@ static inline int s3c_pm_init(void)
{
return 0;
}
+
+static inline int s3c64xx_pm_init(void)
+{
+ return 0;
+}
#endif
/* configuration for the IRQ mask over sleep */
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 72073484702..29c26a81884 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -71,6 +71,7 @@
#define S3C2410_LCON_IRM (1<<6)
#define S3C2440_UCON_CLKMASK (3<<10)
+#define S3C2440_UCON_CLKSHIFT (10)
#define S3C2440_UCON_PCLK (0<<10)
#define S3C2440_UCON_UCLK (1<<10)
#define S3C2440_UCON_PCLK2 (2<<10)
@@ -78,6 +79,7 @@
#define S3C2443_UCON_EPLL (3<<10)
#define S3C6400_UCON_CLKMASK (3<<10)
+#define S3C6400_UCON_CLKSHIFT (10)
#define S3C6400_UCON_PCLK (0<<10)
#define S3C6400_UCON_PCLK2 (2<<10)
#define S3C6400_UCON_UCLK0 (1<<10)
@@ -90,11 +92,14 @@
#define S3C2440_UCON_DIVSHIFT (12)
#define S3C2412_UCON_CLKMASK (3<<10)
+#define S3C2412_UCON_CLKSHIFT (10)
#define S3C2412_UCON_UCLK (1<<10)
#define S3C2412_UCON_USYSCLK (3<<10)
#define S3C2412_UCON_PCLK (0<<10)
#define S3C2412_UCON_PCLK2 (2<<10)
+#define S3C2410_UCON_CLKMASK (1 << 10)
+#define S3C2410_UCON_CLKSHIFT (10)
#define S3C2410_UCON_UCLK (1<<10)
#define S3C2410_UCON_SBREAK (1<<4)
@@ -193,6 +198,7 @@
/* Following are specific to S5PV210 */
#define S5PV210_UCON_CLKMASK (1<<10)
+#define S5PV210_UCON_CLKSHIFT (10)
#define S5PV210_UCON_PCLK (0<<10)
#define S5PV210_UCON_UCLK (1<<10)
@@ -221,29 +227,24 @@
#define S5PV210_UFSTAT_RXMASK (255<<0)
#define S5PV210_UFSTAT_RXSHIFT (0)
-#define NO_NEED_CHECK_CLKSRC 1
+#define S3C2410_UCON_CLKSEL0 (1 << 0)
+#define S3C2410_UCON_CLKSEL1 (1 << 1)
+#define S3C2410_UCON_CLKSEL2 (1 << 2)
+#define S3C2410_UCON_CLKSEL3 (1 << 3)
-#ifndef __ASSEMBLY__
+/* Default values for s5pv210 UCON and UFCON uart registers */
+#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
-/* struct s3c24xx_uart_clksrc
- *
- * this structure defines a named clock source that can be used for the
- * uart, so that the best clock can be selected for the requested baud
- * rate.
- *
- * min_baud and max_baud define the range of baud-rates this clock is
- * acceptable for, if they are both zero, it is assumed any baud rate that
- * can be generated from this clock will be used.
- *
- * divisor gives the divisor from the clock to the one seen by the uart
-*/
+#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
-struct s3c24xx_uart_clksrc {
- const char *name;
- unsigned int divisor;
- unsigned int min_baud;
- unsigned int max_baud;
-};
+#ifndef __ASSEMBLY__
/* configuration structure for per-machine configurations for the
* serial port
@@ -257,15 +258,13 @@ struct s3c2410_uartcfg {
unsigned char unused;
unsigned short flags;
upf_t uart_flags; /* default uart flags */
+ unsigned int clk_sel;
unsigned int has_fracval;
unsigned long ucon; /* value of ucon for port */
unsigned long ulcon; /* value of ulcon for port */
unsigned long ufcon; /* value of ufcon for port */
-
- struct s3c24xx_uart_clksrc *clocks;
- unsigned int clocks_size;
};
/* s3c24xx_uart_devs
diff --git a/arch/arm/plat-samsung/include/plat/reset.h b/arch/arm/plat-samsung/include/plat/reset.h
deleted file mode 100644
index 32ca5179c6e..00000000000
--- a/arch/arm/plat-samsung/include/plat/reset.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/reset.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_RESET_H
-#define __PLAT_SAMSUNG_RESET_H __FILE__
-
-extern void (*s5p_reset_hook)(void);
-
-#endif /* __PLAT_SAMSUNG_RESET_H */
diff --git a/arch/arm/plat-samsung/include/plat/s3c2412.h b/arch/arm/plat-samsung/include/plat/s3c2412.h
index 5bcfd143ba1..cbae50ddacc 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2412.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2412.h
@@ -21,9 +21,12 @@ extern void s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no);
extern void s3c2412_init_clocks(int xtal);
extern int s3c2412_baseclk_add(void);
+
+extern void s3c2412_restart(char mode, const char *cmd);
#else
#define s3c2412_init_clocks NULL
#define s3c2412_init_uarts NULL
#define s3c2412_map_io NULL
#define s3c2412_init NULL
+#define s3c2412_restart NULL
#endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index a764f8503f5..de2b5bdc5eb 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -23,9 +23,11 @@ extern void s3c2416_init_clocks(int xtal);
extern int s3c2416_baseclk_add(void);
+extern void s3c2416_restart(char mode, const char *cmd);
#else
#define s3c2416_init_clocks NULL
#define s3c2416_init_uarts NULL
#define s3c2416_map_io NULL
#define s3c2416_init NULL
+#define s3c2416_restart NULL
#endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c2443.h b/arch/arm/plat-samsung/include/plat/s3c2443.h
index 7fae1a05069..dce05b43d51 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2443.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2443.h
@@ -24,11 +24,13 @@ extern void s3c2443_init_clocks(int xtal);
extern int s3c2443_baseclk_add(void);
+extern void s3c2443_restart(char mode, const char *cmd);
#else
#define s3c2443_init_clocks NULL
#define s3c2443_init_uarts NULL
#define s3c2443_map_io NULL
#define s3c2443_init NULL
+#define s3c2443_restart NULL
#endif
/* common code used by s3c2443 and others.
diff --git a/arch/arm/plat-samsung/include/plat/s3c6400.h b/arch/arm/plat-samsung/include/plat/s3c6400.h
deleted file mode 100644
index 37d428aaaeb..00000000000
--- a/arch/arm/plat-samsung/include/plat/s3c6400.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c6400.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Header file for s3c6400 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S3C6400 related SoCs */
-
-extern void s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s3c6400_setup_clocks(void);
-
-extern void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
-
-#ifdef CONFIG_CPU_S3C6400
-
-extern int s3c6400_init(void);
-extern void s3c6400_init_irq(void);
-extern void s3c6400_map_io(void);
-extern void s3c6400_init_clocks(int xtal);
-
-#define s3c6400_init_uarts s3c6400_common_init_uarts
-
-#else
-#define s3c6400_init_clocks NULL
-#define s3c6400_init_uarts NULL
-#define s3c6400_map_io NULL
-#define s3c6400_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c6410.h b/arch/arm/plat-samsung/include/plat/s3c6410.h
deleted file mode 100644
index 20a6675b9d1..00000000000
--- a/arch/arm/plat-samsung/include/plat/s3c6410.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c6410.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * Header file for s3c6410 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifdef CONFIG_CPU_S3C6410
-
-extern int s3c6410_init(void);
-extern void s3c6410_init_irq(void);
-extern void s3c6410_map_io(void);
-extern void s3c6410_init_clocks(int xtal);
-
-#define s3c6410_init_uarts s3c6400_common_init_uarts
-
-#else
-#define s3c6410_init_clocks NULL
-#define s3c6410_init_uarts NULL
-#define s3c6410_map_io NULL
-#define s3c6410_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index 4c16fa3621b..aea68b60ef9 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -31,7 +31,6 @@ struct s3c64xx_spi_csinfo {
/**
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
- * @src_clk_name: Platform name of the corresponding clock.
* @clk_from_cmu: If the SPI clock/prescalar control block is present
* by the platform's clock-management-unit and not in SPI controller.
* @num_cs: Number of CS this controller emulates.
@@ -43,7 +42,6 @@ struct s3c64xx_spi_csinfo {
*/
struct s3c64xx_spi_info {
int src_clk_nr;
- char *src_clk_name;
bool clk_from_cmu;
int num_cs;
@@ -58,18 +56,28 @@ struct s3c64xx_spi_info {
};
/**
- * s3c64xx_spi_set_info - SPI Controller configure callback by the board
+ * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @cntrlr: SPI controller number the configuration is for.
+ * @pd: SPI platform data to set.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
-extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
-extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
-extern void s5p64x0_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
+extern void s3c64xx_spi0_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs);
+extern void s3c64xx_spi1_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs);
+extern void s3c64xx_spi2_set_platdata(struct s3c64xx_spi_info *pd,
+ int src_clk_nr, int num_cs);
+/* defined by architecture to configure gpio */
+extern int s3c64xx_spi0_cfg_gpio(struct platform_device *dev);
+extern int s3c64xx_spi1_cfg_gpio(struct platform_device *dev);
+extern int s3c64xx_spi2_cfg_gpio(struct platform_device *dev);
+
+extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
+extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
+extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
#endif /* __S3C64XX_PLAT_SPI_H */
diff --git a/arch/arm/plat-samsung/include/plat/s5p6440.h b/arch/arm/plat-samsung/include/plat/s5p6440.h
deleted file mode 100644
index bf85ebbb4fb..00000000000
--- a/arch/arm/plat-samsung/include/plat/s5p6440.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5p6440.h
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5p6440 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
- /* Common init code for S5P6440 related SoCs */
-
-extern void s5p6440_register_clocks(void);
-extern void s5p6440_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6440
-
-extern int s5p64x0_init(void);
-extern void s5p6440_init_irq(void);
-extern void s5p6440_map_io(void);
-extern void s5p6440_init_clocks(int xtal);
-
-extern void s5p6440_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-
-#else
-#define s5p6440_init_clocks NULL
-#define s5p6440_init_uarts NULL
-#define s5p6440_map_io NULL
-#define s5p64x0_init NULL
-#endif
-
-/* S5P6440 timer */
-
-extern struct sys_timer s5p6440_timer;
diff --git a/arch/arm/plat-samsung/include/plat/s5p6450.h b/arch/arm/plat-samsung/include/plat/s5p6450.h
deleted file mode 100644
index da25f9a1c54..00000000000
--- a/arch/arm/plat-samsung/include/plat/s5p6450.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5p6450.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Header file for s5p6450 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5P6450 related SoCs */
-
-extern void s5p6450_register_clocks(void);
-extern void s5p6450_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5P6450
-
-extern int s5p64x0_init(void);
-extern void s5p6450_init_irq(void);
-extern void s5p6450_map_io(void);
-extern void s5p6450_init_clocks(int xtal);
-
-extern void s5p6450_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-
-#else
-#define s5p6450_init_clocks NULL
-#define s5p6450_init_uarts NULL
-#define s5p6450_map_io NULL
-#define s5p64x0_init NULL
-#endif
-
-/* S5P6450 timer */
-
-extern struct sys_timer s5p6450_timer;
diff --git a/arch/arm/plat-samsung/include/plat/s5pc100.h b/arch/arm/plat-samsung/include/plat/s5pc100.h
deleted file mode 100644
index 9a21aeaaf45..00000000000
--- a/arch/arm/plat-samsung/include/plat/s5pc100.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5pc100.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5pc100 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PC100 related SoCs */
-
-extern void s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pc100_register_clocks(void);
-extern void s5pc100_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5PC100
-
-extern int s5pc100_init(void);
-extern void s5pc100_init_irq(void);
-extern void s5pc100_map_io(void);
-extern void s5pc100_init_clocks(int xtal);
-
-#define s5pc100_init_uarts s5pc100_common_init_uarts
-
-#else
-#define s5pc100_init_clocks NULL
-#define s5pc100_init_uarts NULL
-#define s5pc100_map_io NULL
-#define s5pc100_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/s5pv210.h b/arch/arm/plat-samsung/include/plat/s5pv210.h
deleted file mode 100644
index b4bc6be7707..00000000000
--- a/arch/arm/plat-samsung/include/plat/s5pv210.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/s5pv210.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Header file for s5pv210 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PV210 related SoCs */
-
-extern void s5pv210_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pv210_register_clocks(void);
-extern void s5pv210_setup_clocks(void);
-
-#ifdef CONFIG_CPU_S5PV210
-
-extern int s5pv210_init(void);
-extern void s5pv210_init_irq(void);
-extern void s5pv210_map_io(void);
-extern void s5pv210_init_clocks(int xtal);
-
-#define s5pv210_init_uarts s5pv210_common_init_uarts
-
-#else
-#define s5pv210_init_clocks NULL
-#define s5pv210_init_uarts NULL
-#define s5pv210_map_io NULL
-#define s5pv210_init NULL
-#endif
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index e7b3c752e91..656dc00d30e 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -66,8 +66,6 @@ struct s3c_sdhci_platdata {
enum cd_types cd_type;
enum clk_types clk_type;
- char **clocks; /* set of clock sources */
-
int ext_cd_gpio;
bool ext_cd_gpio_invert;
int (*ext_cd_init)(void (*notify_func)(struct platform_device *,
@@ -125,16 +123,17 @@ extern void exynos4_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
extern void exynos4_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
extern void exynos4_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
extern void exynos4_setup_sdhci3_cfg_gpio(struct platform_device *, int w);
+extern void s5p64x0_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void s5p64x0_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
+extern void s5p6440_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
+extern void s5p6450_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
/* S3C2416 SDHCI setup */
#ifdef CONFIG_S3C2416_SETUP_SDHCI
-extern char *s3c2416_hsmmc_clksrcs[4];
-
static inline void s3c2416_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = s3c2416_setup_sdhci0_cfg_gpio;
#endif /* CONFIG_S3C_DEV_HSMMC */
}
@@ -142,7 +141,6 @@ static inline void s3c2416_default_sdhci0(void)
static inline void s3c2416_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s3c2416_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = s3c2416_setup_sdhci1_cfg_gpio;
#endif /* CONFIG_S3C_DEV_HSMMC1 */
}
@@ -152,15 +150,13 @@ static inline void s3c2416_default_sdhci0(void) { }
static inline void s3c2416_default_sdhci1(void) { }
#endif /* CONFIG_S3C2416_SETUP_SDHCI */
+
/* S3C64XX SDHCI setup */
#ifdef CONFIG_S3C64XX_SETUP_SDHCI
-extern char *s3c64xx_hsmmc_clksrcs[4];
-
static inline void s3c6400_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = s3c64xx_setup_sdhci0_cfg_gpio;
#endif
}
@@ -168,7 +164,6 @@ static inline void s3c6400_default_sdhci0(void)
static inline void s3c6400_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = s3c64xx_setup_sdhci1_cfg_gpio;
#endif
}
@@ -176,7 +171,6 @@ static inline void s3c6400_default_sdhci1(void)
static inline void s3c6400_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc2_def_platdata.cfg_gpio = s3c64xx_setup_sdhci2_cfg_gpio;
#endif
}
@@ -184,7 +178,6 @@ static inline void s3c6400_default_sdhci2(void)
static inline void s3c6410_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = s3c64xx_setup_sdhci0_cfg_gpio;
#endif
}
@@ -192,7 +185,6 @@ static inline void s3c6410_default_sdhci0(void)
static inline void s3c6410_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = s3c64xx_setup_sdhci1_cfg_gpio;
#endif
}
@@ -200,7 +192,6 @@ static inline void s3c6410_default_sdhci1(void)
static inline void s3c6410_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = s3c64xx_hsmmc_clksrcs;
s3c_hsmmc2_def_platdata.cfg_gpio = s3c64xx_setup_sdhci2_cfg_gpio;
#endif
}
@@ -215,15 +206,51 @@ static inline void s3c6400_default_sdhci2(void) { }
#endif /* CONFIG_S3C64XX_SETUP_SDHCI */
+/* S5P64X0 SDHCI setup */
+
+#ifdef CONFIG_S5P64X0_SETUP_SDHCI
+static inline void s5p64x0_default_sdhci0(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC
+ s3c_hsmmc0_def_platdata.cfg_gpio = s5p64x0_setup_sdhci0_cfg_gpio;
+#endif
+}
+
+static inline void s5p64x0_default_sdhci1(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC1
+ s3c_hsmmc1_def_platdata.cfg_gpio = s5p64x0_setup_sdhci1_cfg_gpio;
+#endif
+}
+
+static inline void s5p6440_default_sdhci2(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC2
+ s3c_hsmmc2_def_platdata.cfg_gpio = s5p6440_setup_sdhci2_cfg_gpio;
+#endif
+}
+
+static inline void s5p6450_default_sdhci2(void)
+{
+#ifdef CONFIG_S3C_DEV_HSMMC2
+ s3c_hsmmc2_def_platdata.cfg_gpio = s5p6450_setup_sdhci2_cfg_gpio;
+#endif
+}
+
+#else
+static inline void s5p64x0_default_sdhci0(void) { }
+static inline void s5p64x0_default_sdhci1(void) { }
+static inline void s5p6440_default_sdhci2(void) { }
+static inline void s5p6450_default_sdhci2(void) { }
+
+#endif /* CONFIG_S5P64X0_SETUP_SDHCI */
+
/* S5PC100 SDHCI setup */
#ifdef CONFIG_S5PC100_SETUP_SDHCI
-extern char *s5pc100_hsmmc_clksrcs[4];
-
static inline void s5pc100_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s5pc100_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = s5pc100_setup_sdhci0_cfg_gpio;
#endif
}
@@ -231,7 +258,6 @@ static inline void s5pc100_default_sdhci0(void)
static inline void s5pc100_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s5pc100_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = s5pc100_setup_sdhci1_cfg_gpio;
#endif
}
@@ -239,7 +265,6 @@ static inline void s5pc100_default_sdhci1(void)
static inline void s5pc100_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = s5pc100_hsmmc_clksrcs;
s3c_hsmmc2_def_platdata.cfg_gpio = s5pc100_setup_sdhci2_cfg_gpio;
#endif
}
@@ -254,12 +279,9 @@ static inline void s5pc100_default_sdhci2(void) { }
/* S5PV210 SDHCI setup */
#ifdef CONFIG_S5PV210_SETUP_SDHCI
-extern char *s5pv210_hsmmc_clksrcs[4];
-
static inline void s5pv210_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = s5pv210_setup_sdhci0_cfg_gpio;
#endif
}
@@ -267,7 +289,6 @@ static inline void s5pv210_default_sdhci0(void)
static inline void s5pv210_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = s5pv210_setup_sdhci1_cfg_gpio;
#endif
}
@@ -275,7 +296,6 @@ static inline void s5pv210_default_sdhci1(void)
static inline void s5pv210_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
s3c_hsmmc2_def_platdata.cfg_gpio = s5pv210_setup_sdhci2_cfg_gpio;
#endif
}
@@ -283,7 +303,6 @@ static inline void s5pv210_default_sdhci2(void)
static inline void s5pv210_default_sdhci3(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC3
- s3c_hsmmc3_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
s3c_hsmmc3_def_platdata.cfg_gpio = s5pv210_setup_sdhci3_cfg_gpio;
#endif
}
@@ -298,12 +317,9 @@ static inline void s5pv210_default_sdhci3(void) { }
/* EXYNOS4 SDHCI setup */
#ifdef CONFIG_EXYNOS4_SETUP_SDHCI
-extern char *exynos4_hsmmc_clksrcs[4];
-
static inline void exynos4_default_sdhci0(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC
- s3c_hsmmc0_def_platdata.clocks = exynos4_hsmmc_clksrcs;
s3c_hsmmc0_def_platdata.cfg_gpio = exynos4_setup_sdhci0_cfg_gpio;
#endif
}
@@ -311,7 +327,6 @@ static inline void exynos4_default_sdhci0(void)
static inline void exynos4_default_sdhci1(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC1
- s3c_hsmmc1_def_platdata.clocks = exynos4_hsmmc_clksrcs;
s3c_hsmmc1_def_platdata.cfg_gpio = exynos4_setup_sdhci1_cfg_gpio;
#endif
}
@@ -319,7 +334,6 @@ static inline void exynos4_default_sdhci1(void)
static inline void exynos4_default_sdhci2(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC2
- s3c_hsmmc2_def_platdata.clocks = exynos4_hsmmc_clksrcs;
s3c_hsmmc2_def_platdata.cfg_gpio = exynos4_setup_sdhci2_cfg_gpio;
#endif
}
@@ -327,7 +341,6 @@ static inline void exynos4_default_sdhci2(void)
static inline void exynos4_default_sdhci3(void)
{
#ifdef CONFIG_S3C_DEV_HSMMC3
- s3c_hsmmc3_def_platdata.clocks = exynos4_hsmmc_clksrcs;
s3c_hsmmc3_def_platdata.cfg_gpio = exynos4_setup_sdhci3_cfg_gpio;
#endif
}
diff --git a/arch/arm/plat-samsung/include/plat/system-reset.h b/arch/arm/plat-samsung/include/plat/system-reset.h
deleted file mode 100644
index a448e990964..00000000000
--- a/arch/arm/plat-samsung/include/plat/system-reset.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/system-reset.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Based on arch/arm/mach-s3c2410/include/mach/system-reset.h
- *
- * S5P - System define for arch_reset()
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <plat/watchdog-reset.h>
-
-void (*s5p_reset_hook)(void);
-
-static void arch_reset(char mode, const char *cmd)
-{
- /* SWRESET support in s5p_reset_hook() */
-
- if (s5p_reset_hook)
- s5p_reset_hook();
-
- /* Perform reset using Watchdog reset
- * if there is no s5p_reset_hook()
- */
-
- arch_wdt_reset();
-}
diff --git a/arch/arm/plat-samsung/include/plat/udc.h b/arch/arm/plat-samsung/include/plat/udc.h
index 8c22d586bef..de8e2288a50 100644
--- a/arch/arm/plat-samsung/include/plat/udc.h
+++ b/arch/arm/plat-samsung/include/plat/udc.h
@@ -37,20 +37,7 @@ struct s3c2410_udc_mach_info {
extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
-/**
- * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
- * @epnum: Number of endpoints to be instantiated by the controller driver.
- * @gpio_init: Platform specific USB related GPIO initialization.
- * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
- *
- * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
- * controllers.
- */
-struct s3c24xx_hsudc_platdata {
- unsigned int epnum;
- void (*gpio_init)(void);
- void (*gpio_uninit)(void);
-};
+struct s3c24xx_hsudc_platdata;
extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index 40dbb2b0ae2..f19aff19205 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/delay.h>
static inline void arch_wdt_reset(void)
{
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
index efe1d564473..312b510d86b 100644
--- a/arch/arm/plat-samsung/pd.c
+++ b/arch/arm/plat-samsung/pd.c
@@ -11,7 +11,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
diff --git a/arch/arm/plat-samsung/pm-gpio.c b/arch/arm/plat-samsung/pm-gpio.c
index 4be016eaa6d..c2ff92c30bd 100644
--- a/arch/arm/plat-samsung/pm-gpio.c
+++ b/arch/arm/plat-samsung/pm-gpio.c
@@ -14,7 +14,7 @@
*/
#include <linux/kernel.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index dc1185dcf80..c559d8438c7 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -11,7 +11,7 @@
* the Free Software Foundation; either version 2 of the License.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/arch/arm/plat-samsung/wakeup-mask.c b/arch/arm/plat-samsung/wakeup-mask.c
index dc814037297..20c3d9117cc 100644
--- a/arch/arm/plat-samsung/wakeup-mask.c
+++ b/arch/arm/plat-samsung/wakeup-mask.c
@@ -11,7 +11,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index b4f340b8f1f..e0f2e5b9530 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,6 @@
#
# Common support
-obj-y := clock.o time.o
+obj-y := clock.o restart.o time.o
obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o padmux.o
diff --git a/arch/arm/plat-spear/include/plat/system.h b/arch/arm/plat-spear/include/plat/system.h
index a235fa0ca77..86c6f83b44c 100644
--- a/arch/arm/plat-spear/include/plat/system.h
+++ b/arch/arm/plat-spear/include/plat/system.h
@@ -14,10 +14,6 @@
#ifndef __PLAT_SYSTEM_H
#define __PLAT_SYSTEM_H
-#include <linux/io.h>
-#include <asm/hardware/sp810.h>
-#include <mach/hardware.h>
-
static inline void arch_idle(void)
{
/*
@@ -27,15 +23,4 @@ static inline void arch_idle(void)
cpu_do_idle();
}
-static inline void arch_reset(char mode, const char *cmd)
-{
- if (mode == 's') {
- /* software reset, Jump into ROM at address 0 */
- cpu_reset(0);
- } else {
- /* hardware reset, Use on-chip reset capability */
- sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
- }
-}
-
#endif /* __PLAT_SYSTEM_H */
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
deleted file mode 100644
index 8c8b24d0704..00000000000
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/vmalloc.h
- *
- * Defining Vmalloc area for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_VMALLOC_H
-#define __PLAT_VMALLOC_H
-
-#define VMALLOC_END 0xF0000000UL
-
-#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
new file mode 100644
index 00000000000..2b4e3d82957
--- /dev/null
+++ b/arch/arm/plat-spear/restart.c
@@ -0,0 +1,27 @@
+/*
+ * arch/arm/plat-spear/restart.c
+ *
+ * SPEAr platform specific restart functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/io.h>
+#include <asm/hardware/sp810.h>
+#include <mach/hardware.h>
+#include <mach/generic.h>
+
+void spear_restart(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* software reset, Jump into ROM at address 0 */
+ soft_restart(0);
+ } else {
+ /* hardware reset, Use on-chip reset capability */
+ sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+ }
+}
diff --git a/arch/arm/plat-tcc/Kconfig b/arch/arm/plat-tcc/Kconfig
deleted file mode 100644
index 1bf499570f4..00000000000
--- a/arch/arm/plat-tcc/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-if ARCH_TCC_926
-
-menu "Telechips ARM926-based CPUs"
-
-choice
- prompt "Telechips CPU type:"
- default ARCH_TCC8K
-
-config ARCH_TCC8K
- bool TCC8000
- select USB_ARCH_HAS_OHCI
- help
- Support for Telechips TCC8000 systems
-
-endchoice
-
-source "arch/arm/mach-tcc8k/Kconfig"
-
-endmenu
-endif
diff --git a/arch/arm/plat-tcc/Makefile b/arch/arm/plat-tcc/Makefile
deleted file mode 100644
index eceabc869b8..00000000000
--- a/arch/arm/plat-tcc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# "Telechips Platform Common Modules"
-
-obj-y := clock.o system.o
diff --git a/arch/arm/plat-tcc/clock.c b/arch/arm/plat-tcc/clock.c
deleted file mode 100644
index f3ced10d527..00000000000
--- a/arch/arm/plat-tcc/clock.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Clock framework for Telechips SoCs
- * Based on arch/arm/plat-mxc/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2010 Hans J. Koch, hjk@linutronix.de
- *
- * Licensed under the terms of the GPL v2.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
- BUG_ON(clk->refcount == 0);
-
- if (!(--clk->refcount) && clk->disable) {
- /* Unconditionally disable the clock in hardware */
- clk->disable(clk);
- /* recursively disable parents */
- if (clk->parent)
- __clk_disable(clk->parent);
- }
-}
-
-static int __clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (clk->refcount++ == 0 && clk->enable) {
- if (clk->parent)
- ret = __clk_enable(clk->parent);
- if (ret)
- return ret;
- else
- return clk->enable(clk);
- }
-
- return 0;
-}
-
-/* This function increments the reference count on the clock and enables the
- * clock if not already enabled. The parent clock tree is recursively enabled
- */
-int clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (!clk)
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- ret = __clk_enable(clk);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-/* This function decrements the reference count on the clock and disables
- * the clock when reference count is 0. The parent clock tree is
- * recursively disabled
- */
-void clk_disable(struct clk *clk)
-{
- if (!clk)
- return;
-
- mutex_lock(&clocks_mutex);
- __clk_disable(clk);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (!clk)
- return 0UL;
-
- if (clk->get_rate)
- return clk->get_rate(clk);
-
- return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (!clk)
- return 0;
- if (!clk->round_rate)
- return 0;
-
- return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL_GPL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EINVAL;
-
- if (!clk)
- return ret;
- if (!clk->set_rate || !rate)
- return ret;
-
- mutex_lock(&clocks_mutex);
- ret = clk->set_rate(clk, rate);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- struct clk *old;
- int ret = -EINVAL;
-
- if (!clk)
- return ret;
- if (!clk->set_parent || !parent)
- return ret;
-
- mutex_lock(&clocks_mutex);
- old = clk->parent;
- if (clk->refcount)
- __clk_enable(parent);
- ret = clk->set_parent(clk, parent);
- if (ret)
- old = parent;
- if (clk->refcount)
- __clk_disable(old);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
- if (!clk)
- return NULL;
-
- return clk->parent;
-}
-EXPORT_SYMBOL_GPL(clk_get_parent);
diff --git a/arch/arm/plat-tcc/include/mach/clock.h b/arch/arm/plat-tcc/include/mach/clock.h
deleted file mode 100644
index a12f58ad71a..00000000000
--- a/arch/arm/plat-tcc/include/mach/clock.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Low level clock header file for Telechips TCC architecture
- * (C) 2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the GPL v2.
- */
-
-#ifndef __ASM_ARCH_TCC_CLOCK_H__
-#define __ASM_ARCH_TCC_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-
-struct clk {
- struct clk *parent;
- /* id number of a root clock, 0 for normal clocks */
- int root_id;
- /* Reference count of clock enable/disable */
- int refcount;
- /* Address of associated BCLKCTRx register. Must be set. */
- void __iomem *bclkctr;
- /* Bit position for BCLKCTRx. Must be set. */
- int bclk_shift;
- /* Address of ACLKxxx register, if any. */
- void __iomem *aclkreg;
- /* get the current clock rate (always a fresh value) */
- unsigned long (*get_rate) (struct clk *);
- /* Function ptr to set the clock to a new rate. The rate must match a
- supported rate returned from round_rate. Leave blank if clock is not
- programmable */
- int (*set_rate) (struct clk *, unsigned long);
- /* Function ptr to round the requested clock rate to the nearest
- supported rate that is less than or equal to the requested rate. */
- unsigned long (*round_rate) (struct clk *, unsigned long);
- /* Function ptr to enable the clock. Leave blank if clock can not
- be gated. */
- int (*enable) (struct clk *);
- /* Function ptr to disable the clock. Leave blank if clock can not
- be gated. */
- void (*disable) (struct clk *);
- /* Function ptr to set the parent clock of the clock. */
- int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_ARCH_MXC_CLOCK_H__ */
diff --git a/arch/arm/plat-tcc/include/mach/debug-macro.S b/arch/arm/plat-tcc/include/mach/debug-macro.S
deleted file mode 100644
index cf17d04ec30..00000000000
--- a/arch/arm/plat-tcc/include/mach/debug-macro.S
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 1994-1999 Russell King
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
- .macro addruart, rp, rv, tmp
- moveq \rp, #0x90000000 @ physical base address
- movne \rv, #0xF1000000 @ virtual base
- orr \rp, \rp, #0x00007000 @ UART0
- orr \rv, \rv, #0x00007000 @ UART0
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #0x44]
- .endm
-
- .macro waituart,rd,rx
- .endm
-
- .macro busyuart,rd,rx
-1001:
- ldr \rd, [\rx, #0x14]
- tst \rd, #0x20
-
- beq 1001b
- .endm
diff --git a/arch/arm/plat-tcc/include/mach/entry-macro.S b/arch/arm/plat-tcc/include/mach/entry-macro.S
deleted file mode 100644
index 748f401e4b6..00000000000
--- a/arch/arm/plat-tcc/include/mach/entry-macro.S
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * include/asm-arm/arch-tcc83x/entry-macro.S
- *
- * Author : <linux@telechips.com>
- * Created: June 10, 2008
- * Description: Low-level IRQ helper macros for Telechips-based platforms
- *
- * Copyright (C) 2008-2009 Telechips
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- ldr \base, =0xF2003000 @ base address of PIC registers
-
- @@ read MREQ register of PIC0
-
- mov \irqnr, #0
- ldr \irqstat, [\base, #0x00000014 ] @ lower 32 interrupts
- cmp \irqstat, #0
- bne 1001f
-
- @@ read MREQ register of PIC1
-
- ldr \irqstat, [\base, #0x00000094] @ upper 32 interrupts
- cmp \irqstat, #0
- beq 1002f
- mov \irqnr, #0x20
-
-1001:
- movs \tmp, \irqstat, lsl #16
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #16
-
- movs \tmp, \irqstat, lsl #8
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #8
-
- movs \tmp, \irqstat, lsl #4
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #4
-
- movs \tmp, \irqstat, lsl #2
- movne \irqstat, \tmp
- addeq \irqnr, \irqnr, #2
-
- movs \tmp, \irqstat, lsl #1
- addeq \irqnr, \irqnr, #1
- orrs \base, \base, #1
-1002:
- @@ exit here, Z flag unset if IRQ
-
- .endm
diff --git a/arch/arm/plat-tcc/include/mach/hardware.h b/arch/arm/plat-tcc/include/mach/hardware.h
deleted file mode 100644
index e70d126ccaf..00000000000
--- a/arch/arm/plat-tcc/include/mach/hardware.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Author: RidgeRun, Inc. Greg Lonnon <glonnon@ridgerun.com>
- * Reorganized for Linux-2.6 by Tony Lindgren <tony@atomide.com>
- * and Dirk Behme <dirk.behme@de.bosch.com>
- * Rewritten by: <linux@telechips.com>
- * Description: Hardware definitions for TCC8300 processors and boards
- *
- * Copyright (C) 2001 RidgeRun, Inc.
- * Copyright (C) 2008-2009 Telechips
- *
- * Modifications for mainline (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU Pulic License version 2.
- */
-
-#ifndef __ASM_ARCH_TCC_HARDWARE_H
-#define __ASM_ARCH_TCC_HARDWARE_H
-
-#include <asm/sizes.h>
-#ifndef __ASSEMBLER__
-#include <asm/types.h>
-#endif
-#include <mach/io.h>
-
-/*
- * ----------------------------------------------------------------------------
- * Clocks
- * ----------------------------------------------------------------------------
- */
-#define CLKGEN_REG_BASE 0xfffece00
-#define ARM_CKCTL (CLKGEN_REG_BASE + 0x0)
-#define ARM_IDLECT1 (CLKGEN_REG_BASE + 0x4)
-#define ARM_IDLECT2 (CLKGEN_REG_BASE + 0x8)
-#define ARM_EWUPCT (CLKGEN_REG_BASE + 0xC)
-#define ARM_RSTCT1 (CLKGEN_REG_BASE + 0x10)
-#define ARM_RSTCT2 (CLKGEN_REG_BASE + 0x14)
-#define ARM_SYSST (CLKGEN_REG_BASE + 0x18)
-#define ARM_IDLECT3 (CLKGEN_REG_BASE + 0x24)
-
-/* DPLL control registers */
-#define DPLL_CTL 0xfffecf00
-
-#endif /* __ASM_ARCH_TCC_HARDWARE_H */
diff --git a/arch/arm/plat-tcc/include/mach/io.h b/arch/arm/plat-tcc/include/mach/io.h
deleted file mode 100644
index 3e911d3ea0f..00000000000
--- a/arch/arm/plat-tcc/include/mach/io.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * IO definitions for TCC8000 processors and boards
- *
- * Copyright (C) 1997-1999 Russell King
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2010 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GNU Public License version 2.
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/arch/arm/plat-tcc/include/mach/irqs.h b/arch/arm/plat-tcc/include/mach/irqs.h
deleted file mode 100644
index da863894d49..00000000000
--- a/arch/arm/plat-tcc/include/mach/irqs.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * IRQ definitions for TCC8xxx
- *
- * Copyright (C) 2008-2009 Telechips
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#ifndef __ASM_ARCH_TCC_IRQS_H
-#define __ASM_ARCH_TCC_IRQS_H
-
-#define NR_IRQS 64
-
-/* PIC0 interrupts */
-#define INT_ADMA1 0
-#define INT_BDMA 1
-#define INT_ADMA0 2
-#define INT_GDMA1 3
-#define INT_I2S0RX 4
-#define INT_I2S0TX 5
-#define INT_TC 6
-#define INT_UART0 7
-#define INT_USBD 8
-#define INT_SPI0TX 9
-#define INT_UDMA 10
-#define INT_LIRQ 11
-#define INT_GDMA2 12
-#define INT_GDMA0 13
-#define INT_TC32 14
-#define INT_LCD 15
-#define INT_ADC 16
-#define INT_I2C 17
-#define INT_RTCP 18
-#define INT_RTCA 19
-#define INT_NFC 20
-#define INT_SD0 21
-#define INT_GSB0 22
-#define INT_PK 23
-#define INT_USBH0 24
-#define INT_USBH1 25
-#define INT_G2D 26
-#define INT_ECC 27
-#define INT_SPI0RX 28
-#define INT_UART1 29
-#define INT_MSCL 30
-#define INT_GSB1 31
-/* PIC1 interrupts */
-#define INT_E0 32
-#define INT_E1 33
-#define INT_E2 34
-#define INT_E3 35
-#define INT_E4 36
-#define INT_E5 37
-#define INT_E6 38
-#define INT_E7 39
-#define INT_UART2 40
-#define INT_UART3 41
-#define INT_SPI1TX 42
-#define INT_SPI1RX 43
-#define INT_GSB2 44
-#define INT_SPDIF 45
-#define INT_CDIF 46
-#define INT_VBON 47
-#define INT_VBOFF 48
-#define INT_SD1 49
-#define INT_UART4 50
-#define INT_GDMA3 51
-#define INT_I2S1RX 52
-#define INT_I2S1TX 53
-#define INT_CAN0 54
-#define INT_CAN1 55
-#define INT_GSB3 56
-#define INT_KRST 57
-#define INT_UNUSED 58
-#define INT_SD0D3 59
-#define INT_SD1D3 60
-#define INT_GPS0 61
-#define INT_GPS1 62
-#define INT_GPS2 63
-
-#endif /* ASM_ARCH_TCC_IRQS_H */
diff --git a/arch/arm/plat-tcc/include/mach/system.h b/arch/arm/plat-tcc/include/mach/system.h
deleted file mode 100644
index 909e6035d84..00000000000
--- a/arch/arm/plat-tcc/include/mach/system.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Author: <linux@telechips.com>
- * Created: June 10, 2008
- * Description: LINUX SYSTEM FUNCTIONS for TCC83x
- *
- * Copyright (C) 2008-2009 Telechips
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H
-#include <linux/clk.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-
-extern void plat_tcc_reboot(void);
-
-static inline void arch_idle(void)
-{
- cpu_do_idle();
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- plat_tcc_reboot();
-}
-
-#endif
diff --git a/arch/arm/plat-tcc/include/mach/tcc8k-regs.h b/arch/arm/plat-tcc/include/mach/tcc8k-regs.h
deleted file mode 100644
index 1d942829533..00000000000
--- a/arch/arm/plat-tcc/include/mach/tcc8k-regs.h
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Telechips TCC8000 register definitions
- *
- * (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPLv2.
- */
-
-#ifndef TCC8K_REGS_H
-#define TCC8K_REGS_H
-
-#include <linux/types.h>
-
-#define EXT_SDRAM_BASE 0x20000000
-#define INT_SRAM_BASE 0x30000000
-#define INT_SRAM_SIZE SZ_32K
-#define CS0_BASE 0x40000000
-#define CS1_BASE 0x50000000
-#define CS1_SIZE SZ_64K
-#define CS2_BASE 0x60000000
-#define CS3_BASE 0x70000000
-#define AHB_PERI_BASE 0x80000000
-#define AHB_PERI_SIZE SZ_64K
-#define APB0_PERI_BASE 0x90000000
-#define APB0_PERI_SIZE SZ_128K
-#define APB1_PERI_BASE 0x98000000
-#define APB1_PERI_SIZE SZ_128K
-#define DATA_TCM_BASE 0xa0000000
-#define DATA_TCM_SIZE SZ_8K
-#define EXT_MEM_CTRL_BASE 0xf0000000
-#define EXT_MEM_CTRL_SIZE SZ_4K
-
-#define CS1_BASE_VIRT (void __iomem *)0xf7000000
-#define AHB_PERI_BASE_VIRT (void __iomem *)0xf4000000
-#define APB0_PERI_BASE_VIRT (void __iomem *)0xf1000000
-#define APB1_PERI_BASE_VIRT (void __iomem *)0xf2000000
-#define EXT_MEM_CTRL_BASE_VIRT (void __iomem *)0xf3000000
-#define INT_SRAM_BASE_VIRT (void __iomem *)0xf5000000
-#define DATA_TCM_BASE_VIRT (void __iomem *)0xf6000000
-
-#define __REG(x) (*((volatile u32 *)(x)))
-
-/* USB Device Controller Registers */
-#define UDC_BASE (AHB_PERI_BASE_VIRT + 0x8000)
-#define UDC_BASE_PHYS (AHB_PERI_BASE + 0x8000)
-
-#define UDC_IR_OFFS 0x00
-#define UDC_EIR_OFFS 0x04
-#define UDC_EIER_OFFS 0x08
-#define UDC_FAR_OFFS 0x0c
-#define UDC_FNR_OFFS 0x10
-#define UDC_EDR_OFFS 0x14
-#define UDC_RT_OFFS 0x18
-#define UDC_SSR_OFFS 0x1c
-#define UDC_SCR_OFFS 0x20
-#define UDC_EP0SR_OFFS 0x24
-#define UDC_EP0CR_OFFS 0x28
-
-#define UDC_ESR_OFFS 0x2c
-#define UDC_ECR_OFFS 0x30
-#define UDC_BRCR_OFFS 0x34
-#define UDC_BWCR_OFFS 0x38
-#define UDC_MPR_OFFS 0x3c
-#define UDC_DCR_OFFS 0x40
-#define UDC_DTCR_OFFS 0x44
-#define UDC_DFCR_OFFS 0x48
-#define UDC_DTTCR1_OFFS 0x4c
-#define UDC_DTTCR2_OFFS 0x50
-#define UDC_ESR2_OFFS 0x54
-
-#define UDC_SCR2_OFFS 0x58
-#define UDC_EP0BUF_OFFS 0x60
-#define UDC_EP1BUF_OFFS 0x64
-#define UDC_EP2BUF_OFFS 0x68
-#define UDC_EP3BUF_OFFS 0x6c
-#define UDC_PLICR_OFFS 0xa0
-#define UDC_PCR_OFFS 0xa4
-
-#define UDC_UPCR0_OFFS 0xc8
-#define UDC_UPCR1_OFFS 0xcc
-#define UDC_UPCR2_OFFS 0xd0
-#define UDC_UPCR3_OFFS 0xd4
-
-/* Bits in UDC_EIR */
-#define UDC_EIR_EP0I (1 << 0)
-#define UDC_EIR_EP1I (1 << 1)
-#define UDC_EIR_EP2I (1 << 2)
-#define UDC_EIR_EP3I (1 << 3)
-#define UDC_EIR_EPI_MASK 0x0f
-
-/* Bits in UDC_EIER */
-#define UDC_EIER_EP0IE (1 << 0)
-#define UDC_EIER_EP1IE (1 << 1)
-#define UDC_EIER_EP2IE (1 << 2)
-#define UDC_EIER_EP3IE (1 << 3)
-
-/* Bits in UDC_FNR */
-#define UDC_FNR_FN_MASK 0x7ff
-#define UDC_FNR_SM (1 << 13)
-#define UDC_FNR_FTL (1 << 14)
-
-/* Bits in UDC_SSR */
-#define UDC_SSR_HFRES (1 << 0)
-#define UDC_SSR_HFSUSP (1 << 1)
-#define UDC_SSR_HFRM (1 << 2)
-#define UDC_SSR_SDE (1 << 3)
-#define UDC_SSR_HSP (1 << 4)
-#define UDC_SSR_DM (1 << 5)
-#define UDC_SSR_DP (1 << 6)
-#define UDC_SSR_TBM (1 << 7)
-#define UDC_SSR_VBON (1 << 8)
-#define UDC_SSR_VBOFF (1 << 9)
-#define UDC_SSR_EOERR (1 << 10)
-#define UDC_SSR_DCERR (1 << 11)
-#define UDC_SSR_TCERR (1 << 12)
-#define UDC_SSR_BSERR (1 << 13)
-#define UDC_SSR_TMERR (1 << 14)
-#define UDC_SSR_BAERR (1 << 15)
-
-/* Bits in UDC_SCR */
-#define UDC_SCR_HRESE (1 << 0)
-#define UDC_SCR_HSSPE (1 << 1)
-#define UDC_SCR_RRDE (1 << 5)
-#define UDC_SCR_SPDEN (1 << 6)
-#define UDC_SCR_DIEN (1 << 12)
-
-/* Bits in UDC_EP0SR */
-#define UDC_EP0SR_RSR (1 << 0)
-#define UDC_EP0SR_TST (1 << 1)
-#define UDC_EP0SR_SHT (1 << 4)
-#define UDC_EP0SR_LWO (1 << 6)
-
-/* Bits in UDC_EP0CR */
-#define UDC_EP0CR_ESS (1 << 1)
-
-/* Bits in UDC_ESR */
-#define UDC_ESR_RPS (1 << 0)
-#define UDC_ESR_TPS (1 << 1)
-#define UDC_ESR_LWO (1 << 4)
-#define UDC_ESR_FFS (1 << 6)
-
-/* Bits in UDC_ECR */
-#define UDC_ECR_ESS (1 << 1)
-#define UDC_ECR_CDP (1 << 2)
-
-#define UDC_ECR_FLUSH (1 << 6)
-#define UDC_ECR_DUEN (1 << 7)
-
-/* Bits in UDC_UPCR0 */
-#define UDC_UPCR0_VBD (1 << 1)
-#define UDC_UPCR0_VBDS (1 << 6)
-#define UDC_UPCR0_RCD_12 (0x0 << 9)
-#define UDC_UPCR0_RCD_24 (0x1 << 9)
-#define UDC_UPCR0_RCD_48 (0x2 << 9)
-#define UDC_UPCR0_RCS_EXT (0x1 << 11)
-#define UDC_UPCR0_RCS_XTAL (0x0 << 11)
-
-/* Bits in UDC_UPCR1 */
-#define UDC_UPCR1_CDT(x) ((x) << 0)
-#define UDC_UPCR1_OTGT(x) ((x) << 3)
-#define UDC_UPCR1_SQRXT(x) ((x) << 8)
-#define UDC_UPCR1_TXFSLST(x) ((x) << 12)
-
-/* Bits in UDC_UPCR2 */
-#define UDC_UPCR2_TP (1 << 0)
-#define UDC_UPCR2_TXRT(x) ((x) << 2)
-#define UDC_UPCR2_TXVRT(x) ((x) << 5)
-#define UDC_UPCR2_OPMODE(x) ((x) << 9)
-#define UDC_UPCR2_XCVRSEL(x) ((x) << 12)
-#define UDC_UPCR2_TM (1 << 14)
-
-/* USB Host Controller registers */
-#define USBH0_BASE (AHB_PERI_BASE_VIRT + 0xb000)
-#define USBH1_BASE (AHB_PERI_BASE_VIRT + 0xb800)
-
-#define OHCI_INT_ENABLE_OFFS 0x10
-
-#define RH_DESCRIPTOR_A_OFFS 0x48
-#define RH_DESCRIPTOR_B_OFFS 0x4c
-
-#define USBHTCFG0_OFFS 0x100
-#define USBHHCFG0_OFFS 0x104
-#define USBHHCFG1_OFFS 0x104
-
-/* DMA controller registers */
-#define DMAC0_BASE (AHB_PERI_BASE + 0x4000)
-#define DMAC1_BASE (AHB_PERI_BASE + 0xa000)
-#define DMAC2_BASE (AHB_PERI_BASE + 0x4800)
-#define DMAC3_BASE (AHB_PERI_BASE + 0xa800)
-
-#define DMAC_CH_OFFSET(ch) (ch * 0x30)
-
-#define ST_SADR_OFFS 0x00
-#define SPARAM_OFFS 0x04
-#define C_SADR_OFFS 0x0c
-#define ST_DADR_OFFS 0x10
-#define DPARAM_OFFS 0x14
-#define C_DADR_OFFS 0x1c
-#define HCOUNT_OFFS 0x20
-#define CHCTRL_OFFS 0x24
-#define RPTCTRL_OFFS 0x28
-#define EXTREQ_A_OFFS 0x2c
-
-/* Bits in CHCTRL register */
-#define CHCTRL_EN (1 << 0)
-
-#define CHCTRL_IEN (1 << 2)
-#define CHCTRL_FLAG (1 << 3)
-#define CHCTRL_WSIZE8 (0 << 4)
-#define CHCTRL_WSIZE16 (1 << 4)
-#define CHCTRL_WSIZE32 (2 << 4)
-
-#define CHCTRL_BSIZE1 (0 << 6)
-#define CHCTRL_BSIZE2 (1 << 6)
-#define CHCTRL_BSIZE4 (2 << 6)
-#define CHCTRL_BSIZE8 (3 << 6)
-
-#define CHCTRL_TYPE_SINGLE_E (0 << 8)
-#define CHCTRL_TYPE_HW (1 << 8)
-#define CHCTRL_TYPE_SW (2 << 8)
-#define CHCTRL_TYPE_SINGLE_L (3 << 8)
-
-#define CHCTRL_BST (1 << 10)
-
-/* Use DMA controller 0, channel 2 for USB */
-#define USB_DMA_BASE (DMAC0_BASE + DMAC_CH_OFFSET(2))
-
-/* NAND flash controller registers */
-#define NFC_BASE (AHB_PERI_BASE_VIRT + 0xd000)
-#define NFC_BASE_PHYS (AHB_PERI_BASE + 0xd000)
-
-#define NFC_CMD_OFFS 0x00
-#define NFC_LADDR_OFFS 0x04
-#define NFC_BADDR_OFFS 0x08
-#define NFC_SADDR_OFFS 0x0c
-#define NFC_WDATA_OFFS 0x10
-#define NFC_LDATA_OFFS 0x20
-#define NFC_SDATA_OFFS 0x40
-#define NFC_CTRL_OFFS 0x50
-#define NFC_PSTART_OFFS 0x54
-#define NFC_RSTART_OFFS 0x58
-#define NFC_DSIZE_OFFS 0x5c
-#define NFC_IREQ_OFFS 0x60
-#define NFC_RST_OFFS 0x64
-#define NFC_CTRL1_OFFS 0x68
-#define NFC_MDATA_OFFS 0x70
-
-#define NFC_WDATA_PHYS_ADDR (NFC_BASE_PHYS + NFC_WDATA_OFFS)
-
-/* Bits in NFC_CTRL */
-#define NFC_CTRL_BHLD_MASK (0xf << 0)
-#define NFC_CTRL_BPW_MASK (0xf << 4)
-#define NFC_CTRL_BSTP_MASK (0xf << 8)
-#define NFC_CTRL_CADDR_MASK (0x7 << 12)
-#define NFC_CTRL_CADDR_1 (0x0 << 12)
-#define NFC_CTRL_CADDR_2 (0x1 << 12)
-#define NFC_CTRL_CADDR_3 (0x2 << 12)
-#define NFC_CTRL_CADDR_4 (0x3 << 12)
-#define NFC_CTRL_CADDR_5 (0x4 << 12)
-#define NFC_CTRL_MSK (1 << 15)
-#define NFC_CTRL_PSIZE256 (0 << 16)
-#define NFC_CTRL_PSIZE512 (1 << 16)
-#define NFC_CTRL_PSIZE1024 (2 << 16)
-#define NFC_CTRL_PSIZE2048 (3 << 16)
-#define NFC_CTRL_PSIZE4096 (4 << 16)
-#define NFC_CTRL_PSIZE_MASK (7 << 16)
-#define NFC_CTRL_BSIZE1 (0 << 19)
-#define NFC_CTRL_BSIZE2 (1 << 19)
-#define NFC_CTRL_BSIZE4 (2 << 19)
-#define NFC_CTRL_BSIZE8 (3 << 19)
-#define NFC_CTRL_BSIZE_MASK (3 << 19)
-#define NFC_CTRL_RDY (1 << 21)
-#define NFC_CTRL_CS0SEL (1 << 22)
-#define NFC_CTRL_CS1SEL (1 << 23)
-#define NFC_CTRL_CS2SEL (1 << 24)
-#define NFC_CTRL_CS3SEL (1 << 25)
-#define NFC_CTRL_CSMASK (0xf << 22)
-#define NFC_CTRL_BW (1 << 26)
-#define NFC_CTRL_FS (1 << 27)
-#define NFC_CTRL_DEN (1 << 28)
-#define NFC_CTRL_READ_IEN (1 << 29)
-#define NFC_CTRL_PROG_IEN (1 << 30)
-#define NFC_CTRL_RDY_IEN (1 << 31)
-
-/* Bits in NFC_IREQ */
-#define NFC_IREQ_IRQ0 (1 << 0)
-#define NFC_IREQ_IRQ1 (1 << 1)
-#define NFC_IREQ_IRQ2 (1 << 2)
-
-#define NFC_IREQ_FLAG0 (1 << 4)
-#define NFC_IREQ_FLAG1 (1 << 5)
-#define NFC_IREQ_FLAG2 (1 << 6)
-
-/* MMC controller registers */
-#define MMC0_BASE (AHB_PERI_BASE_VIRT + 0xe000)
-#define MMC1_BASE (AHB_PERI_BASE_VIRT + 0xe800)
-
-/* UART base addresses */
-
-#define UART0_BASE (APB0_PERI_BASE_VIRT + 0x07000)
-#define UART0_BASE_PHYS (APB0_PERI_BASE + 0x07000)
-#define UART1_BASE (APB0_PERI_BASE_VIRT + 0x08000)
-#define UART1_BASE_PHYS (APB0_PERI_BASE + 0x08000)
-#define UART2_BASE (APB0_PERI_BASE_VIRT + 0x09000)
-#define UART2_BASE_PHYS (APB0_PERI_BASE + 0x09000)
-#define UART3_BASE (APB0_PERI_BASE_VIRT + 0x0a000)
-#define UART3_BASE_PHYS (APB0_PERI_BASE + 0x0a000)
-#define UART4_BASE (APB0_PERI_BASE_VIRT + 0x15000)
-#define UART4_BASE_PHYS (APB0_PERI_BASE + 0x15000)
-
-#define UART_BASE UART0_BASE
-#define UART_BASE_PHYS UART0_BASE_PHYS
-
-/* ECC controller */
-#define ECC_CTR_BASE (APB0_PERI_BASE_VIRT + 0xd000)
-
-#define ECC_CTRL_OFFS 0x00
-#define ECC_BASE_OFFS 0x04
-#define ECC_MASK_OFFS 0x08
-#define ECC_CLEAR_OFFS 0x0c
-#define ECC4_0_OFFS 0x10
-#define ECC4_1_OFFS 0x14
-
-#define ECC_EADDR0_OFFS 0x50
-
-#define ECC_ERRNUM_OFFS 0x90
-#define ECC_IREQ_OFFS 0x94
-
-/* Bits in ECC_CTRL */
-#define ECC_CTRL_ECC4_DIEN (1 << 28)
-#define ECC_CTRL_ECC8_DIEN (1 << 29)
-#define ECC_CTRL_ECC12_DIEN (1 << 30)
-#define ECC_CTRL_ECC_DISABLE 0x0
-#define ECC_CTRL_ECC_SLC_ENC 0x8
-#define ECC_CTRL_ECC_SLC_DEC 0x9
-#define ECC_CTRL_ECC4_ENC 0xa
-#define ECC_CTRL_ECC4_DEC 0xb
-#define ECC_CTRL_ECC8_ENC 0xc
-#define ECC_CTRL_ECC8_DEC 0xd
-#define ECC_CTRL_ECC12_ENC 0xe
-#define ECC_CTRL_ECC12_DEC 0xf
-
-/* Bits in ECC_IREQ */
-#define ECC_IREQ_E4DI (1 << 4)
-
-#define ECC_IREQ_E4DF (1 << 20)
-#define ECC_IREQ_E4EF (1 << 21)
-
-/* Interrupt controller */
-
-#define PIC0_BASE (APB1_PERI_BASE_VIRT + 0x3000)
-#define PIC0_BASE_PHYS (APB1_PERI_BASE + 0x3000)
-
-#define PIC0_IEN_OFFS 0x00
-#define PIC0_CREQ_OFFS 0x04
-#define PIC0_IREQ_OFFS 0x08
-#define PIC0_IRQSEL_OFFS 0x0c
-#define PIC0_SRC_OFFS 0x10
-#define PIC0_MREQ_OFFS 0x14
-#define PIC0_TSTREQ_OFFS 0x18
-#define PIC0_POL_OFFS 0x1c
-#define PIC0_IRQ_OFFS 0x20
-#define PIC0_FIQ_OFFS 0x24
-#define PIC0_MIRQ_OFFS 0x28
-#define PIC0_MFIQ_OFFS 0x2c
-#define PIC0_TMODE_OFFS 0x30
-#define PIC0_SYNC_OFFS 0x34
-#define PIC0_WKUP_OFFS 0x38
-#define PIC0_TMODEA_OFFS 0x3c
-#define PIC0_INTOEN_OFFS 0x40
-#define PIC0_MEN0_OFFS 0x44
-#define PIC0_MEN_OFFS 0x48
-
-#define PIC0_IEN __REG(PIC0_BASE + PIC0_IEN_OFFS)
-#define PIC0_IEN_PHYS __REG(PIC0_BASE_PHYS + PIC0_IEN_OFFS)
-#define PIC0_CREQ __REG(PIC0_BASE + PIC0_CREQ_OFFS)
-#define PIC0_CREQ_PHYS __REG(PIC0_BASE_PHYS + PIC0_CREQ_OFFS)
-#define PIC0_IREQ __REG(PIC0_BASE + PIC0_IREQ_OFFS)
-#define PIC0_IRQSEL __REG(PIC0_BASE + PIC0_IRQSEL_OFFS)
-#define PIC0_IRQSEL_PHYS __REG(PIC0_BASE_PHYS + PIC0_IRQSEL_OFFS)
-#define PIC0_SRC __REG(PIC0_BASE + PIC0_SRC_OFFS)
-#define PIC0_MREQ __REG(PIC0_BASE + PIC0_MREQ_OFFS)
-#define PIC0_TSTREQ __REG(PIC0_BASE + PIC0_TSTREQ_OFFS)
-#define PIC0_POL __REG(PIC0_BASE + PIC0_POL_OFFS)
-#define PIC0_IRQ __REG(PIC0_BASE + PIC0_IRQ_OFFS)
-#define PIC0_FIQ __REG(PIC0_BASE + PIC0_FIQ_OFFS)
-#define PIC0_MIRQ __REG(PIC0_BASE + PIC0_MIRQ_OFFS)
-#define PIC0_MFIQ __REG(PIC0_BASE + PIC0_MFIQ_OFFS)
-#define PIC0_TMODE __REG(PIC0_BASE + PIC0_TMODE_OFFS)
-#define PIC0_TMODE_PHYS __REG(PIC0_BASE_PHYS + PIC0_TMODE_OFFS)
-#define PIC0_SYNC __REG(PIC0_BASE + PIC0_SYNC_OFFS)
-#define PIC0_WKUP __REG(PIC0_BASE + PIC0_WKUP_OFFS)
-#define PIC0_TMODEA __REG(PIC0_BASE + PIC0_TMODEA_OFFS)
-#define PIC0_INTOEN __REG(PIC0_BASE + PIC0_INTOEN_OFFS)
-#define PIC0_MEN0 __REG(PIC0_BASE + PIC0_MEN0_OFFS)
-#define PIC0_MEN __REG(PIC0_BASE + PIC0_MEN_OFFS)
-
-#define PIC1_BASE (APB1_PERI_BASE_VIRT + 0x3080)
-
-#define PIC1_IEN_OFFS 0x00
-#define PIC1_CREQ_OFFS 0x04
-#define PIC1_IREQ_OFFS 0x08
-#define PIC1_IRQSEL_OFFS 0x0c
-#define PIC1_SRC_OFFS 0x10
-#define PIC1_MREQ_OFFS 0x14
-#define PIC1_TSTREQ_OFFS 0x18
-#define PIC1_POL_OFFS 0x1c
-#define PIC1_IRQ_OFFS 0x20
-#define PIC1_FIQ_OFFS 0x24
-#define PIC1_MIRQ_OFFS 0x28
-#define PIC1_MFIQ_OFFS 0x2c
-#define PIC1_TMODE_OFFS 0x30
-#define PIC1_SYNC_OFFS 0x34
-#define PIC1_WKUP_OFFS 0x38
-#define PIC1_TMODEA_OFFS 0x3c
-#define PIC1_INTOEN_OFFS 0x40
-#define PIC1_MEN1_OFFS 0x44
-#define PIC1_MEN_OFFS 0x48
-
-#define PIC1_IEN __REG(PIC1_BASE + PIC1_IEN_OFFS)
-#define PIC1_CREQ __REG(PIC1_BASE + PIC1_CREQ_OFFS)
-#define PIC1_IREQ __REG(PIC1_BASE + PIC1_IREQ_OFFS)
-#define PIC1_IRQSEL __REG(PIC1_BASE + PIC1_IRQSEL_OFFS)
-#define PIC1_SRC __REG(PIC1_BASE + PIC1_SRC_OFFS)
-#define PIC1_MREQ __REG(PIC1_BASE + PIC1_MREQ_OFFS)
-#define PIC1_TSTREQ __REG(PIC1_BASE + PIC1_TSTREQ_OFFS)
-#define PIC1_POL __REG(PIC1_BASE + PIC1_POL_OFFS)
-#define PIC1_IRQ __REG(PIC1_BASE + PIC1_IRQ_OFFS)
-#define PIC1_FIQ __REG(PIC1_BASE + PIC1_FIQ_OFFS)
-#define PIC1_MIRQ __REG(PIC1_BASE + PIC1_MIRQ_OFFS)
-#define PIC1_MFIQ __REG(PIC1_BASE + PIC1_MFIQ_OFFS)
-#define PIC1_TMODE __REG(PIC1_BASE + PIC1_TMODE_OFFS)
-#define PIC1_SYNC __REG(PIC1_BASE + PIC1_SYNC_OFFS)
-#define PIC1_WKUP __REG(PIC1_BASE + PIC1_WKUP_OFFS)
-#define PIC1_TMODEA __REG(PIC1_BASE + PIC1_TMODEA_OFFS)
-#define PIC1_INTOEN __REG(PIC1_BASE + PIC1_INTOEN_OFFS)
-#define PIC1_MEN1 __REG(PIC1_BASE + PIC1_MEN1_OFFS)
-#define PIC1_MEN __REG(PIC1_BASE + PIC1_MEN_OFFS)
-
-/* Timer registers */
-#define TIMER_BASE (APB1_PERI_BASE_VIRT + 0x4000)
-#define TIMER_BASE_PHYS (APB1_PERI_BASE + 0x4000)
-
-#define TWDCFG_OFFS 0x70
-
-#define TC32EN_OFFS 0x80
-#define TC32LDV_OFFS 0x84
-#define TC32CMP0_OFFS 0x88
-#define TC32CMP1_OFFS 0x8c
-#define TC32PCNT_OFFS 0x90
-#define TC32MCNT_OFFS 0x94
-#define TC32IRQ_OFFS 0x98
-
-/* Bits in TC32EN */
-#define TC32EN_PRESCALE_MASK 0x00ffffff
-#define TC32EN_ENABLE (1 << 24)
-#define TC32EN_LOADZERO (1 << 25)
-#define TC32EN_STOPMODE (1 << 26)
-#define TC32EN_LDM0 (1 << 28)
-#define TC32EN_LDM1 (1 << 29)
-
-/* Bits in TC32IRQ */
-#define TC32IRQ_MSTAT_MASK 0x0000001f
-#define TC32IRQ_RSTAT_MASK (0x1f << 8)
-#define TC32IRQ_IRQEN0 (1 << 16)
-#define TC32IRQ_IRQEN1 (1 << 17)
-#define TC32IRQ_IRQEN2 (1 << 18)
-#define TC32IRQ_IRQEN3 (1 << 19)
-#define TC32IRQ_IRQEN4 (1 << 20)
-#define TC32IRQ_RSYNC (1 << 30)
-#define TC32IRQ_IRQCLR (1 << 31)
-
-/* GPIO registers */
-#define GPIOPD_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPD_DAT_OFFS 0x00
-#define GPIOPD_DOE_OFFS 0x04
-#define GPIOPD_FS0_OFFS 0x08
-#define GPIOPD_FS1_OFFS 0x0c
-#define GPIOPD_FS2_OFFS 0x10
-#define GPIOPD_RPU_OFFS 0x30
-#define GPIOPD_RPD_OFFS 0x34
-#define GPIOPD_DV0_OFFS 0x38
-#define GPIOPD_DV1_OFFS 0x3c
-
-#define GPIOPS_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPS_DAT_OFFS 0x40
-#define GPIOPS_DOE_OFFS 0x44
-#define GPIOPS_FS0_OFFS 0x48
-#define GPIOPS_FS1_OFFS 0x4c
-#define GPIOPS_FS2_OFFS 0x50
-#define GPIOPS_FS3_OFFS 0x54
-#define GPIOPS_RPU_OFFS 0x70
-#define GPIOPS_RPD_OFFS 0x74
-#define GPIOPS_DV0_OFFS 0x78
-#define GPIOPS_DV1_OFFS 0x7c
-
-#define GPIOPS_FS1_SDH0_BITS 0x000000ff
-#define GPIOPS_FS1_SDH1_BITS 0x0000ff00
-
-#define GPIOPU_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOPU_DAT_OFFS 0x80
-#define GPIOPU_DOE_OFFS 0x84
-#define GPIOPU_FS0_OFFS 0x88
-#define GPIOPU_FS1_OFFS 0x8c
-#define GPIOPU_FS2_OFFS 0x90
-#define GPIOPU_RPU_OFFS 0xb0
-#define GPIOPU_RPD_OFFS 0xb4
-#define GPIOPU_DV0_OFFS 0xb8
-#define GPIOPU_DV1_OFFS 0xbc
-
-#define GPIOPU_FS0_TXD0 (1 << 0)
-#define GPIOPU_FS0_RXD0 (1 << 1)
-#define GPIOPU_FS0_CTS0 (1 << 2)
-#define GPIOPU_FS0_RTS0 (1 << 3)
-#define GPIOPU_FS0_TXD1 (1 << 4)
-#define GPIOPU_FS0_RXD1 (1 << 5)
-#define GPIOPU_FS0_CTS1 (1 << 6)
-#define GPIOPU_FS0_RTS1 (1 << 7)
-#define GPIOPU_FS0_TXD2 (1 << 8)
-#define GPIOPU_FS0_RXD2 (1 << 9)
-#define GPIOPU_FS0_CTS2 (1 << 10)
-#define GPIOPU_FS0_RTS2 (1 << 11)
-#define GPIOPU_FS0_TXD3 (1 << 12)
-#define GPIOPU_FS0_RXD3 (1 << 13)
-#define GPIOPU_FS0_CTS3 (1 << 14)
-#define GPIOPU_FS0_RTS3 (1 << 15)
-#define GPIOPU_FS0_TXD4 (1 << 16)
-#define GPIOPU_FS0_RXD4 (1 << 17)
-#define GPIOPU_FS0_CTS4 (1 << 18)
-#define GPIOPU_FS0_RTS4 (1 << 19)
-
-#define GPIOFC_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOFC_DAT_OFFS 0xc0
-#define GPIOFC_DOE_OFFS 0xc4
-#define GPIOFC_FS0_OFFS 0xc8
-#define GPIOFC_FS1_OFFS 0xcc
-#define GPIOFC_FS2_OFFS 0xd0
-#define GPIOFC_FS3_OFFS 0xd4
-#define GPIOFC_RPU_OFFS 0xf0
-#define GPIOFC_RPD_OFFS 0xf4
-#define GPIOFC_DV0_OFFS 0xf8
-#define GPIOFC_DV1_OFFS 0xfc
-
-#define GPIOFD_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOFD_DAT_OFFS 0x100
-#define GPIOFD_DOE_OFFS 0x104
-#define GPIOFD_FS0_OFFS 0x108
-#define GPIOFD_FS1_OFFS 0x10c
-#define GPIOFD_FS2_OFFS 0x110
-#define GPIOFD_RPU_OFFS 0x130
-#define GPIOFD_RPD_OFFS 0x134
-#define GPIOFD_DV0_OFFS 0x138
-#define GPIOFD_DV1_OFFS 0x13c
-
-#define GPIOLC_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOLC_DAT_OFFS 0x140
-#define GPIOLC_DOE_OFFS 0x144
-#define GPIOLC_FS0_OFFS 0x148
-#define GPIOLC_FS1_OFFS 0x14c
-#define GPIOLC_RPU_OFFS 0x170
-#define GPIOLC_RPD_OFFS 0x174
-#define GPIOLC_DV0_OFFS 0x178
-#define GPIOLC_DV1_OFFS 0x17c
-
-#define GPIOLD_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOLD_DAT_OFFS 0x180
-#define GPIOLD_DOE_OFFS 0x184
-#define GPIOLD_FS0_OFFS 0x188
-#define GPIOLD_FS1_OFFS 0x18c
-#define GPIOLD_FS2_OFFS 0x190
-#define GPIOLD_RPU_OFFS 0x1b0
-#define GPIOLD_RPD_OFFS 0x1b4
-#define GPIOLD_DV0_OFFS 0x1b8
-#define GPIOLD_DV1_OFFS 0x1bc
-
-#define GPIOAD_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOAD_DAT_OFFS 0x1c0
-#define GPIOAD_DOE_OFFS 0x1c4
-#define GPIOAD_FS0_OFFS 0x1c8
-#define GPIOAD_RPU_OFFS 0x1f0
-#define GPIOAD_RPD_OFFS 0x1f4
-#define GPIOAD_DV0_OFFS 0x1f8
-#define GPIOAD_DV1_OFFS 0x1fc
-
-#define GPIOXC_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOXC_DAT_OFFS 0x200
-#define GPIOXC_DOE_OFFS 0x204
-#define GPIOXC_FS0_OFFS 0x208
-#define GPIOXC_RPU_OFFS 0x230
-#define GPIOXC_RPD_OFFS 0x234
-#define GPIOXC_DV0_OFFS 0x238
-#define GPIOXC_DV1_OFFS 0x23c
-
-#define GPIOXC_FS0 __REG(GPIOXC_BASE + GPIOXC_FS0_OFFS)
-
-#define GPIOXC_FS0_CS0 (1 << 26)
-#define GPIOXC_FS0_CS1 (1 << 27)
-
-#define GPIOXD_BASE (APB1_PERI_BASE_VIRT + 0x5000)
-
-#define GPIOXD_DAT_OFFS 0x240
-#define GPIOXD_FS0_OFFS 0x248
-#define GPIOXD_RPU_OFFS 0x270
-#define GPIOXD_RPD_OFFS 0x274
-#define GPIOXD_DV0_OFFS 0x278
-#define GPIOXD_DV1_OFFS 0x27c
-
-#define GPIOPK_BASE (APB1_PERI_BASE_VIRT + 0x1c000)
-
-#define GPIOPK_RST_OFFS 0x008
-#define GPIOPK_DAT_OFFS 0x100
-#define GPIOPK_DOE_OFFS 0x104
-#define GPIOPK_FS0_OFFS 0x108
-#define GPIOPK_FS1_OFFS 0x10c
-#define GPIOPK_FS2_OFFS 0x110
-#define GPIOPK_IRQST_OFFS 0x210
-#define GPIOPK_IRQEN_OFFS 0x214
-#define GPIOPK_IRQPOL_OFFS 0x218
-#define GPIOPK_IRQTM0_OFFS 0x21c
-#define GPIOPK_IRQTM1_OFFS 0x220
-#define GPIOPK_CTL_OFFS 0x22c
-
-#define PMGPIO_BASE (APB1_PERI_BASE_VIRT + 0x10000)
-#define BACKUP_RAM_BASE PMGPIO_BASE
-
-#define PMGPIO_DAT_OFFS 0x800
-#define PMGPIO_DOE_OFFS 0x804
-#define PMGPIO_FS0_OFFS 0x808
-#define PMGPIO_RPU_OFFS 0x810
-#define PMGPIO_RPD_OFFS 0x814
-#define PMGPIO_DV0_OFFS 0x818
-#define PMGPIO_DV1_OFFS 0x81c
-#define PMGPIO_EE0_OFFS 0x820
-#define PMGPIO_EE1_OFFS 0x824
-#define PMGPIO_CTL_OFFS 0x828
-#define PMGPIO_DI_OFFS 0x82c
-#define PMGPIO_STR_OFFS 0x830
-#define PMGPIO_STF_OFFS 0x834
-#define PMGPIO_POL_OFFS 0x838
-#define PMGPIO_APB_OFFS 0x800
-
-/* Clock controller registers */
-#define CKC_BASE ((void __iomem *)(APB1_PERI_BASE_VIRT + 0x6000))
-
-#define CLKCTRL_OFFS 0x00
-#define PLL0CFG_OFFS 0x04
-#define PLL1CFG_OFFS 0x08
-#define CLKDIVC0_OFFS 0x0c
-
-#define BCLKCTR0_OFFS 0x14
-#define SWRESET0_OFFS 0x18
-
-#define BCLKCTR1_OFFS 0x60
-#define SWRESET1_OFFS 0x64
-#define PWDCTL_OFFS 0x68
-#define PLL2CFG_OFFS 0x6c
-#define CLKDIVC1_OFFS 0x70
-
-#define ACLKREF_OFFS 0x80
-#define ACLKI2C_OFFS 0x84
-#define ACLKSPI0_OFFS 0x88
-#define ACLKSPI1_OFFS 0x8c
-#define ACLKUART0_OFFS 0x90
-#define ACLKUART1_OFFS 0x94
-#define ACLKUART2_OFFS 0x98
-#define ACLKUART3_OFFS 0x9c
-#define ACLKUART4_OFFS 0xa0
-#define ACLKTCT_OFFS 0xa4
-#define ACLKTCX_OFFS 0xa8
-#define ACLKTCZ_OFFS 0xac
-#define ACLKADC_OFFS 0xb0
-#define ACLKDAI0_OFFS 0xb4
-#define ACLKDAI1_OFFS 0xb8
-#define ACLKLCD_OFFS 0xbc
-#define ACLKSPDIF_OFFS 0xc0
-#define ACLKUSBH_OFFS 0xc4
-#define ACLKSDH0_OFFS 0xc8
-#define ACLKSDH1_OFFS 0xcc
-#define ACLKC3DEC_OFFS 0xd0
-#define ACLKEXT_OFFS 0xd4
-#define ACLKCAN0_OFFS 0xd8
-#define ACLKCAN1_OFFS 0xdc
-#define ACLKGSB0_OFFS 0xe0
-#define ACLKGSB1_OFFS 0xe4
-#define ACLKGSB2_OFFS 0xe8
-#define ACLKGSB3_OFFS 0xec
-
-#define PLLxCFG_PD (1 << 31)
-
-/* CLKCTRL bits */
-#define CLKCTRL_XE (1 << 31)
-
-/* CLKDIVCx bits */
-#define CLKDIVC0_XTE (1 << 7)
-#define CLKDIVC0_XE (1 << 15)
-#define CLKDIVC0_P1E (1 << 23)
-#define CLKDIVC0_P0E (1 << 31)
-
-#define CLKDIVC1_P2E (1 << 7)
-
-/* BCLKCTR0 clock bits */
-#define BCLKCTR0_USBD (1 << 4)
-#define BCLKCTR0_ECC (1 << 9)
-#define BCLKCTR0_USBH0 (1 << 11)
-#define BCLKCTR0_NFC (1 << 16)
-
-/* BCLKCTR1 clock bits */
-#define BCLKCTR1_USBH1 (1 << 20)
-
-/* SWRESET0 bits */
-#define SWRESET0_USBD (1 << 4)
-#define SWRESET0_USBH0 (1 << 11)
-
-/* SWRESET1 bits */
-#define SWRESET1_USBH1 (1 << 20)
-
-/* System clock sources.
- * Note: These are the clock sources that serve as parents for
- * all other clocks. They have no parents themselves.
- *
- * These values are used for struct clk->root_id. All clocks
- * that are not system clock sources have this value set to
- * CLK_SRC_NOROOT.
- * The values for system clocks start with CLK_SRC_PLL0 == 0
- * because this gives us exactly the values needed for the lower
- * 4 bits of ACLK_* registers. Therefore, CLK_SRC_NOROOT is
- * defined as -1 to not disturb the order.
- */
-enum root_clks {
- CLK_SRC_NOROOT = -1,
- CLK_SRC_PLL0 = 0,
- CLK_SRC_PLL1,
- CLK_SRC_PLL0DIV,
- CLK_SRC_PLL1DIV,
- CLK_SRC_XI,
- CLK_SRC_XIDIV,
- CLK_SRC_XTI,
- CLK_SRC_XTIDIV,
- CLK_SRC_PLL2,
- CLK_SRC_PLL2DIV,
- CLK_SRC_PK0,
- CLK_SRC_PK1,
- CLK_SRC_PK2,
- CLK_SRC_PK3,
- CLK_SRC_PK4,
- CLK_SRC_48MHZ
-};
-
-#define CLK_SRC_MASK 0xf
-
-/* Bits in ACLK* registers */
-#define ACLK_EN (1 << 28)
-#define ACLK_SEL_SHIFT 24
-#define ACLK_SEL_MASK 0x0f000000
-#define ACLK_DIV_MASK 0x00000fff
-
-/* System configuration registers */
-
-#define SCFG_BASE (APB1_PERI_BASE_VIRT + 0x13000)
-
-#define BMI_OFFS 0x00
-#define AHBCON0_OFFS 0x04
-#define APBPWE_OFFS 0x08
-#define DTCMWAIT_OFFS 0x0c
-#define ECCSEL_OFFS 0x10
-#define AHBCON1_OFFS 0x14
-#define SDHCFG_OFFS 0x18
-#define REMAP_OFFS 0x20
-#define LCDSIAE_OFFS 0x24
-#define XMCCFG_OFFS 0xe0
-#define IMCCFG_OFFS 0xe4
-
-/* Values for ECCSEL */
-#define ECCSEL_EXTMEM 0x0
-#define ECCSEL_DTCM 0x1
-#define ECCSEL_INT_SRAM 0x2
-#define ECCSEL_AHB 0x3
-
-/* Bits in XMCCFG */
-#define XMCCFG_NFCE (1 << 1)
-#define XMCCFG_FDXD (1 << 2)
-
-/* External memory controller registers */
-
-#define EMC_BASE EXT_MEM_CTRL_BASE
-
-#define SDCFG_OFFS 0x00
-#define SDFSM_OFFS 0x04
-#define MCFG_OFFS 0x08
-
-#define CSCFG0_OFFS 0x10
-#define CSCFG1_OFFS 0x14
-#define CSCFG2_OFFS 0x18
-#define CSCFG3_OFFS 0x1c
-
-#define MCFG_SDEN (1 << 4)
-
-#endif /* TCC8K_REGS_H */
diff --git a/arch/arm/plat-tcc/include/mach/timex.h b/arch/arm/plat-tcc/include/mach/timex.h
deleted file mode 100644
index 057acbe651d..00000000000
--- a/arch/arm/plat-tcc/include/mach/timex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * A definition needed by arch core code.
- *
- */
-#define CLOCK_TICK_RATE (HZ * 100000UL)
diff --git a/arch/arm/plat-tcc/include/mach/uncompress.h b/arch/arm/plat-tcc/include/mach/uncompress.h
deleted file mode 100644
index 7a3e33a27a3..00000000000
--- a/arch/arm/plat-tcc/include/mach/uncompress.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2009 Hans J. Koch <hjk@linutronix.de>
- *
- * This file is licensed under the terms of the GPL version 2.
- */
-
-#include <linux/serial_reg.h>
-#include <linux/types.h>
-
-#include <mach/tcc8k-regs.h>
-
-unsigned int system_rev;
-
-#define ID_MASK 0x7fff
-
-static void putc(int c)
-{
- u32 *uart_lsr = (u32 *)(UART_BASE_PHYS + (UART_LSR << 2));
- u32 *uart_tx = (u32 *)(UART_BASE_PHYS + (UART_TX << 2));
-
- while (!(*uart_lsr & UART_LSR_THRE))
- barrier();
- *uart_tx = c;
-}
-
-static inline void flush(void)
-{
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
-#define arch_decomp_wdog()
diff --git a/arch/arm/plat-tcc/include/mach/vmalloc.h b/arch/arm/plat-tcc/include/mach/vmalloc.h
deleted file mode 100644
index 99414d9c2b9..00000000000
--- a/arch/arm/plat-tcc/include/mach/vmalloc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Author: <linux@telechips.com>
- * Created: June 10, 2008
- *
- * Copyright (C) 2000 Russell King.
- * Copyright (C) 2008-2009 Telechips
- *
- * Licensed under the terms of the GPL v2.
- */
-#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/plat-tcc/system.c b/arch/arm/plat-tcc/system.c
deleted file mode 100644
index cc208fae3e7..00000000000
--- a/arch/arm/plat-tcc/system.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * System functions for Telechips TCCxxxx SoCs
- *
- * Copyright (C) Hans J. Koch <hjk@linutronix.de>
- *
- * Licensed under the terms of the GPL v2.
- *
- */
-
-#include <linux/io.h>
-
-#include <mach/tcc8k-regs.h>
-
-/* System reboot */
-void plat_tcc_reboot(void)
-{
- /* Make sure clocks are on */
- __raw_writel(0xffffffff, CKC_BASE + BCLKCTR0_OFFS);
-
- /* Enable watchdog reset */
- __raw_writel(0x49, TIMER_BASE + TWDCFG_OFFS);
- /* Wait for reset */
- while(1)
- ;
-}
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
index 3d6a4c292ca..b33b74c8723 100644
--- a/arch/arm/plat-versatile/sched-clock.c
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -18,41 +18,24 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
#include <linux/io.h>
-#include <linux/sched.h>
#include <asm/sched_clock.h>
#include <plat/sched_clock.h>
-static DEFINE_CLOCK_DATA(cd);
static void __iomem *ctr;
-/*
- * Constants generated by clocks_calc_mult_shift(m, s, 24MHz, NSEC_PER_SEC, 60).
- * This gives a resolution of about 41ns and a wrap period of about 178s.
- */
-#define SC_MULT 2796202667u
-#define SC_SHIFT 26
-
-unsigned long long notrace sched_clock(void)
+static u32 notrace versatile_read_sched_clock(void)
{
- if (ctr) {
- u32 cyc = readl(ctr);
- return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0,
- SC_MULT, SC_SHIFT);
- } else
- return 0;
-}
+ if (ctr)
+ return readl(ctr);
-static void notrace versatile_update_sched_clock(void)
-{
- u32 cyc = readl(ctr);
- update_sched_clock(&cd, cyc, (u32)~0);
+ return 0;
}
void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate)
{
ctr = reg;
- init_fixed_sched_clock(&cd, versatile_update_sched_clock,
- 32, rate, SC_MULT, SC_SHIFT);
+ setup_sched_clock(versatile_read_sched_clock, 32, rate);
}
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5bdeef96984..f9c9f33f8cb 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
# are merged into mainline or have been edited in the machine database
# within the last 12 months. References to machine_is_NAME() do not count!
#
-# Last update: Sat May 7 08:48:24 2011
+# Last update: Tue Dec 6 11:07:38 2011
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -269,7 +269,7 @@ dns323 MACH_DNS323 DNS323 1542
omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546
nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548
pcm038 MACH_PCM038 PCM038 1551
-ts_x09 MACH_TS209 TS209 1565
+ts209 MACH_TS209 TS209 1565
at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578
@@ -321,7 +321,6 @@ lb88rc8480 MACH_LB88RC8480 LB88RC8480 1769
mx25_3ds MACH_MX25_3DS MX25_3DS 1771
omap3530_lv_som MACH_OMAP3530_LV_SOM OMAP3530_LV_SOM 1773
davinci_da830_evm MACH_DAVINCI_DA830_EVM DAVINCI_DA830_EVM 1781
-at572d940hfek MACH_AT572D940HFEB AT572D940HFEB 1783
dove_db MACH_DOVE_DB DOVE_DB 1788
overo MACH_OVERO OVERO 1798
at2440evb MACH_AT2440EVB AT2440EVB 1799
@@ -459,7 +458,7 @@ guruplug MACH_GURUPLUG GURUPLUG 2659
spear310 MACH_SPEAR310 SPEAR310 2660
spear320 MACH_SPEAR320 SPEAR320 2661
aquila MACH_AQUILA AQUILA 2676
-sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
+esata_sheevaplug MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679
ea2478devkit MACH_EA2478DEVKIT EA2478DEVKIT 2683
terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697
@@ -491,380 +490,53 @@ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
smdkc210 MACH_SMDKC210 SMDKC210 2838
-omap3_braillo MACH_OMAP3_BRAILLO OMAP3_BRAILLO 2839
-spyplug MACH_SPYPLUG SPYPLUG 2840
-ginger MACH_GINGER GINGER 2841
-tny_t3530 MACH_TNY_T3530 TNY_T3530 2842
pca102 MACH_PCA102 PCA102 2843
-spade MACH_SPADE SPADE 2844
-mxc25_topaz MACH_MXC25_TOPAZ MXC25_TOPAZ 2845
t5325 MACH_T5325 T5325 2846
-gw2361 MACH_GW2361 GW2361 2847
-elog MACH_ELOG ELOG 2848
income MACH_INCOME INCOME 2849
-bcm589x MACH_BCM589X BCM589X 2850
-etna MACH_ETNA ETNA 2851
-hawks MACH_HAWKS HAWKS 2852
-meson MACH_MESON MESON 2853
-xsbase255 MACH_XSBASE255 XSBASE255 2854
-pvm2030 MACH_PVM2030 PVM2030 2855
-mioa502 MACH_MIOA502 MIOA502 2856
vvbox_sdorig2 MACH_VVBOX_SDORIG2 VVBOX_SDORIG2 2857
vvbox_sdlite2 MACH_VVBOX_SDLITE2 VVBOX_SDLITE2 2858
vvbox_sdpro4 MACH_VVBOX_SDPRO4 VVBOX_SDPRO4 2859
-htc_spv_m700 MACH_HTC_SPV_M700 HTC_SPV_M700 2860
mx257sx MACH_MX257SX MX257SX 2861
goni MACH_GONI GONI 2862
-msm8x55_svlte_ffa MACH_MSM8X55_SVLTE_FFA MSM8X55_SVLTE_FFA 2863
-msm8x55_svlte_surf MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF 2864
-quickstep MACH_QUICKSTEP QUICKSTEP 2865
-dmw96 MACH_DMW96 DMW96 2866
-hammerhead MACH_HAMMERHEAD HAMMERHEAD 2867
-trident MACH_TRIDENT TRIDENT 2868
-lightning MACH_LIGHTNING LIGHTNING 2869
-iconnect MACH_ICONNECT ICONNECT 2870
-autobot MACH_AUTOBOT AUTOBOT 2871
-coconut MACH_COCONUT COCONUT 2872
-durian MACH_DURIAN DURIAN 2873
-cayenne MACH_CAYENNE CAYENNE 2874
-fuji MACH_FUJI FUJI 2875
-synology_6282 MACH_SYNOLOGY_6282 SYNOLOGY_6282 2876
-em1sy MACH_EM1SY EM1SY 2877
-m502 MACH_M502 M502 2878
-matrix518 MACH_MATRIX518 MATRIX518 2879
-tiny_gurnard MACH_TINY_GURNARD TINY_GURNARD 2880
-spear1310 MACH_SPEAR1310 SPEAR1310 2881
bv07 MACH_BV07 BV07 2882
-mxt_td61 MACH_MXT_TD61 MXT_TD61 2883
openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884
devixp MACH_DEVIXP DEVIXP 2885
miccpt MACH_MICCPT MICCPT 2886
mic256 MACH_MIC256 MIC256 2887
-as1167 MACH_AS1167 AS1167 2888
-omap3_ibiza MACH_OMAP3_IBIZA OMAP3_IBIZA 2889
u5500 MACH_U5500 U5500 2890
-davinci_picto MACH_DAVINCI_PICTO DAVINCI_PICTO 2891
-mecha MACH_MECHA MECHA 2892
-bubba3 MACH_BUBBA3 BUBBA3 2893
-pupitre MACH_PUPITRE PUPITRE 2894
-tegra_vogue MACH_TEGRA_VOGUE TEGRA_VOGUE 2896
-tegra_e1165 MACH_TEGRA_E1165 TEGRA_E1165 2897
-simplenet MACH_SIMPLENET SIMPLENET 2898
-ec4350tbm MACH_EC4350TBM EC4350TBM 2899
-pec_tc MACH_PEC_TC PEC_TC 2900
-pec_hc2 MACH_PEC_HC2 PEC_HC2 2901
-esl_mobilis_a MACH_ESL_MOBILIS_A ESL_MOBILIS_A 2902
-esl_mobilis_b MACH_ESL_MOBILIS_B ESL_MOBILIS_B 2903
-esl_wave_a MACH_ESL_WAVE_A ESL_WAVE_A 2904
-esl_wave_b MACH_ESL_WAVE_B ESL_WAVE_B 2905
-unisense_mmm MACH_UNISENSE_MMM UNISENSE_MMM 2906
-blueshark MACH_BLUESHARK BLUESHARK 2907
-e10 MACH_E10 E10 2908
-app3k_robin MACH_APP3K_ROBIN APP3K_ROBIN 2909
-pov15hd MACH_POV15HD POV15HD 2910
-stella MACH_STELLA STELLA 2911
linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913
-netwalker MACH_NETWALKER NETWALKER 2914
-acsx106 MACH_ACSX106 ACSX106 2915
-atlas5_c1 MACH_ATLAS5_C1 ATLAS5_C1 2916
-nsb3ast MACH_NSB3AST NSB3AST 2917
-gnet_slc MACH_GNET_SLC GNET_SLC 2918
-af4000 MACH_AF4000 AF4000 2919
-ark9431 MACH_ARK9431 ARK9431 2920
-fs_s5pc100 MACH_FS_S5PC100 FS_S5PC100 2921
-omap3505nova8 MACH_OMAP3505NOVA8 OMAP3505NOVA8 2922
-omap3621_edp1 MACH_OMAP3621_EDP1 OMAP3621_EDP1 2923
-oratisaes MACH_ORATISAES ORATISAES 2924
smdkv310 MACH_SMDKV310 SMDKV310 2925
-siemens_l0 MACH_SIEMENS_L0 SIEMENS_L0 2926
-ventana MACH_VENTANA VENTANA 2927
wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928
-ec4350sdb MACH_EC4350SDB EC4350SDB 2929
-mimas MACH_MIMAS MIMAS 2930
-titan MACH_TITAN TITAN 2931
craneboard MACH_CRANEBOARD CRANEBOARD 2932
-es2440 MACH_ES2440 ES2440 2933
-najay_a9263 MACH_NAJAY_A9263 NAJAY_A9263 2934
-htctornado MACH_HTCTORNADO HTCTORNADO 2935
-dimm_mx257 MACH_DIMM_MX257 DIMM_MX257 2936
-jigen301 MACH_JIGEN JIGEN 2937
smdk6450 MACH_SMDK6450 SMDK6450 2938
-meno_qng MACH_MENO_QNG MENO_QNG 2939
-ns2416 MACH_NS2416 NS2416 2940
-rpc353 MACH_RPC353 RPC353 2941
-tq6410 MACH_TQ6410 TQ6410 2942
-sky6410 MACH_SKY6410 SKY6410 2943
-dynasty MACH_DYNASTY DYNASTY 2944
-vivo MACH_VIVO VIVO 2945
-bury_bl7582 MACH_BURY_BL7582 BURY_BL7582 2946
-bury_bps5270 MACH_BURY_BPS5270 BURY_BPS5270 2947
-basi MACH_BASI BASI 2948
-tn200 MACH_TN200 TN200 2949
-c2mmi MACH_C2MMI C2MMI 2950
-meson_6236m MACH_MESON_6236M MESON_6236M 2951
-meson_8626m MACH_MESON_8626M MESON_8626M 2952
-tube MACH_TUBE TUBE 2953
-messina MACH_MESSINA MESSINA 2954
-mx50_arm2 MACH_MX50_ARM2 MX50_ARM2 2955
-cetus9263 MACH_CETUS9263 CETUS9263 2956
brownstone MACH_BROWNSTONE BROWNSTONE 2957
-vmx25 MACH_VMX25 VMX25 2958
-vmx51 MACH_VMX51 VMX51 2959
-abacus MACH_ABACUS ABACUS 2960
-cm4745 MACH_CM4745 CM4745 2961
-oratislink MACH_ORATISLINK ORATISLINK 2962
-davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
-netviz MACH_NETVIZ NETVIZ 2964
flexibity MACH_FLEXIBITY FLEXIBITY 2965
-wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
-lpc24xx MACH_LPC24XX LPC24XX 2967
-spica MACH_SPICA SPICA 2968
-gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
-bipnet MACH_BIPNET BIPNET 2970
-overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
-davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
-pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
-ptx7545 MACH_PTX7545 PTX7545 2974
-tm_efdc MACH_TM_EFDC TM_EFDC 2975
-omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
-flyer MACH_FLYER FLYER 2978
-tornado3240 MACH_TORNADO3240 TORNADO3240 2979
-soli_01 MACH_SOLI_01 SOLI_01 2980
-omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
-helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
-netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
-ssc MACH_SSC SSC 2984
-premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
-wasabi MACH_WASABI WASABI 2986
mx50_rdp MACH_MX50_RDP MX50_RDP 2988
universal_c210 MACH_UNIVERSAL_C210 UNIVERSAL_C210 2989
real6410 MACH_REAL6410 REAL6410 2990
-spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
-ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
-omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
-thebe MACH_THEBE THEBE 2994
-rv082 MACH_RV082 RV082 2995
-armlguest MACH_ARMLGUEST ARMLGUEST 2996
-tjinc1000 MACH_TJINC1000 TJINC1000 2997
dockstar MACH_DOCKSTAR DOCKSTAR 2998
-ax8008 MACH_AX8008 AX8008 2999
-gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
-pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
-ea20 MACH_EA20 EA20 3002
-awm2 MACH_AWM2 AWM2 3003
ti8148evm MACH_TI8148EVM TI8148EVM 3004
seaboard MACH_SEABOARD SEABOARD 3005
-linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
-tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
-rubys MACH_RUBYS RUBYS 3008
-aquarius MACH_AQUARIUS AQUARIUS 3009
mx53_ard MACH_MX53_ARD MX53_ARD 3010
mx53_smd MACH_MX53_SMD MX53_SMD 3011
-lswxl MACH_LSWXL LSWXL 3012
-dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
-sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
-jocpu550 MACH_JOCPU550 JOCPU550 3015
msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
-yanomami MACH_YANOMAMI YANOMAMI 3018
-gta04 MACH_GTA04 GTA04 3019
cm_a510 MACH_CM_A510 CM_A510 3020
-omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
-kx33xx MACH_KX33XX KX33XX 3022
-ptx7510 MACH_PTX7510 PTX7510 3023
-top9000 MACH_TOP9000 TOP9000 3024
-teenote MACH_TEENOTE TEENOTE 3025
-ts3 MACH_TS3 TS3 3026
-a0 MACH_A0 A0 3027
-fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
-fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
-frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
-remus MACH_REMUS REMUS 3031
-at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
-at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
-kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
-armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
-spdm MACH_SPDM SPDM 3037
-gtib MACH_GTIB GTIB 3038
-dgm3240 MACH_DGM3240 DGM3240 3039
-htcmega MACH_HTCMEGA HTCMEGA 3041
-tricorder MACH_TRICORDER TRICORDER 3042
tx28 MACH_TX28 TX28 3043
-bstbrd MACH_BSTBRD BSTBRD 3044
-pwb3090 MACH_PWB3090 PWB3090 3045
-idea6410 MACH_IDEA6410 IDEA6410 3046
-qbc9263 MACH_QBC9263 QBC9263 3047
-borabora MACH_BORABORA BORABORA 3048
-valdez MACH_VALDEZ VALDEZ 3049
-ls9g20 MACH_LS9G20 LS9G20 3050
-mios_v1 MACH_MIOS_V1 MIOS_V1 3051
-s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
-controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
-tin307 MACH_TIN307 TIN307 3054
-tin510 MACH_TIN510 TIN510 3055
-bluecheese MACH_BLUECHEESE BLUECHEESE 3057
-tem3x30 MACH_TEM3X30 TEM3X30 3058
-harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
-msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
-spear900 MACH_SPEAR900 SPEAR900 3061
pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
-rdstor MACH_RDSTOR RDSTOR 3063
-usdloader MACH_USDLOADER USDLOADER 3064
-tsoploader MACH_TSOPLOADER TSOPLOADER 3065
-kronos MACH_KRONOS KRONOS 3066
-ffcore MACH_FFCORE FFCORE 3067
-mone MACH_MONE MONE 3068
-unit2s MACH_UNIT2S UNIT2S 3069
-acer_a5 MACH_ACER_A5 ACER_A5 3070
-etherpro_isp MACH_ETHERPRO_ISP ETHERPRO_ISP 3071
-stretchs7000 MACH_STRETCHS7000 STRETCHS7000 3072
-p87_smartsim MACH_P87_SMARTSIM P87_SMARTSIM 3073
-tulip MACH_TULIP TULIP 3074
-sunflower MACH_SUNFLOWER SUNFLOWER 3075
-rib MACH_RIB RIB 3076
-clod MACH_CLOD CLOD 3077
-rump MACH_RUMP RUMP 3078
-tenderloin MACH_TENDERLOIN TENDERLOIN 3079
-shortloin MACH_SHORTLOIN SHORTLOIN 3080
-antares MACH_ANTARES ANTARES 3082
-wb40n MACH_WB40N WB40N 3083
-herring MACH_HERRING HERRING 3084
-naxy400 MACH_NAXY400 NAXY400 3085
-naxy1200 MACH_NAXY1200 NAXY1200 3086
vpr200 MACH_VPR200 VPR200 3087
-bug20 MACH_BUG20 BUG20 3088
-goflexnet MACH_GOFLEXNET GOFLEXNET 3089
torbreck MACH_TORBRECK TORBRECK 3090
-saarb_mg1 MACH_SAARB_MG1 SAARB_MG1 3091
-callisto MACH_CALLISTO CALLISTO 3092
-multhsu MACH_MULTHSU MULTHSU 3093
-saluda MACH_SALUDA SALUDA 3094
-pemp_omap3_apollo MACH_PEMP_OMAP3_APOLLO PEMP_OMAP3_APOLLO 3095
-vc0718 MACH_VC0718 VC0718 3096
-mvblx MACH_MVBLX MVBLX 3097
-inhand_apeiron MACH_INHAND_APEIRON INHAND_APEIRON 3098
-inhand_fury MACH_INHAND_FURY INHAND_FURY 3099
-inhand_siren MACH_INHAND_SIREN INHAND_SIREN 3100
-hdnvp MACH_HDNVP HDNVP 3101
-softwinner MACH_SOFTWINNER SOFTWINNER 3102
prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103
-nas6210 MACH_NAS6210 NAS6210 3104
-unisdev MACH_UNISDEV UNISDEV 3105
-sbca11 MACH_SBCA11 SBCA11 3106
-saga MACH_SAGA SAGA 3107
-ns_k330 MACH_NS_K330 NS_K330 3108
-tanna MACH_TANNA TANNA 3109
-imate8502 MACH_IMATE8502 IMATE8502 3110
-aspen MACH_ASPEN ASPEN 3111
-daintree_cwac MACH_DAINTREE_CWAC DAINTREE_CWAC 3112
-zmx25 MACH_ZMX25 ZMX25 3113
-maple1 MACH_MAPLE1 MAPLE1 3114
-qsd8x72_surf MACH_QSD8X72_SURF QSD8X72_SURF 3115
-qsd8x72_ffa MACH_QSD8X72_FFA QSD8X72_FFA 3116
-abilene MACH_ABILENE ABILENE 3117
-eigen_ttr MACH_EIGEN_TTR EIGEN_TTR 3118
-iomega_ix2_200 MACH_IOMEGA_IX2_200 IOMEGA_IX2_200 3119
-coretec_vcx7400 MACH_CORETEC_VCX7400 CORETEC_VCX7400 3120
-santiago MACH_SANTIAGO SANTIAGO 3121
-mx257sol MACH_MX257SOL MX257SOL 3122
-strasbourg MACH_STRASBOURG STRASBOURG 3123
-msm8x60_fluid MACH_MSM8X60_FLUID MSM8X60_FLUID 3124
-smartqv5 MACH_SMARTQV5 SMARTQV5 3125
-smartqv3 MACH_SMARTQV3 SMARTQV3 3126
-smartqv7 MACH_SMARTQV7 SMARTQV7 3127
paz00 MACH_PAZ00 PAZ00 3128
acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129
-fwbd_0404 MACH_FWBD_0404 FWBD_0404 3131
-hdgu MACH_HDGU HDGU 3132
-pyramid MACH_PYRAMID PYRAMID 3133
-epiphan MACH_EPIPHAN EPIPHAN 3134
-omap_bender MACH_OMAP_BENDER OMAP_BENDER 3135
-gurnard MACH_GURNARD GURNARD 3136
-gtl_it5100 MACH_GTL_IT5100 GTL_IT5100 3137
-bcm2708 MACH_BCM2708 BCM2708 3138
-mx51_ggc MACH_MX51_GGC MX51_GGC 3139
-sharespace MACH_SHARESPACE SHARESPACE 3140
-haba_knx_explorer MACH_HABA_KNX_EXPLORER HABA_KNX_EXPLORER 3141
-simtec_kirkmod MACH_SIMTEC_KIRKMOD SIMTEC_KIRKMOD 3142
-crux MACH_CRUX CRUX 3143
-mx51_bravo MACH_MX51_BRAVO MX51_BRAVO 3144
-charon MACH_CHARON CHARON 3145
-picocom3 MACH_PICOCOM3 PICOCOM3 3146
-picocom4 MACH_PICOCOM4 PICOCOM4 3147
-serrano MACH_SERRANO SERRANO 3148
-doubleshot MACH_DOUBLESHOT DOUBLESHOT 3149
-evsy MACH_EVSY EVSY 3150
-huashan MACH_HUASHAN HUASHAN 3151
-lausanne MACH_LAUSANNE LAUSANNE 3152
-emerald MACH_EMERALD EMERALD 3153
-tqma35 MACH_TQMA35 TQMA35 3154
-marvel MACH_MARVEL MARVEL 3155
-manuae MACH_MANUAE MANUAE 3156
-chacha MACH_CHACHA CHACHA 3157
-lemon MACH_LEMON LEMON 3158
-csc MACH_CSC CSC 3159
-gira_knxip_router MACH_GIRA_KNXIP_ROUTER GIRA_KNXIP_ROUTER 3160
-t20 MACH_T20 T20 3161
-hdmini MACH_HDMINI HDMINI 3162
-sciphone_g2 MACH_SCIPHONE_G2 SCIPHONE_G2 3163
-express MACH_EXPRESS EXPRESS 3164
-express_kt MACH_EXPRESS_KT EXPRESS_KT 3165
-maximasp MACH_MAXIMASP MAXIMASP 3166
-nitrogen_imx51 MACH_NITROGEN_IMX51 NITROGEN_IMX51 3167
-nitrogen_imx53 MACH_NITROGEN_IMX53 NITROGEN_IMX53 3168
-sunfire MACH_SUNFIRE SUNFIRE 3169
-arowana MACH_AROWANA AROWANA 3170
-tegra_daytona MACH_TEGRA_DAYTONA TEGRA_DAYTONA 3171
-tegra_swordfish MACH_TEGRA_SWORDFISH TEGRA_SWORDFISH 3172
-edison MACH_EDISON EDISON 3173
-svp8500v1 MACH_SVP8500V1 SVP8500V1 3174
-svp8500v2 MACH_SVP8500V2 SVP8500V2 3175
-svp5500 MACH_SVP5500 SVP5500 3176
-b5500 MACH_B5500 B5500 3177
-s5500 MACH_S5500 S5500 3178
-icon MACH_ICON ICON 3179
-elephant MACH_ELEPHANT ELEPHANT 3180
-shooter MACH_SHOOTER SHOOTER 3182
-spade_lte MACH_SPADE_LTE SPADE_LTE 3183
-philhwani MACH_PHILHWANI PHILHWANI 3184
-gsncomm MACH_GSNCOMM GSNCOMM 3185
-strasbourg_a2 MACH_STRASBOURG_A2 STRASBOURG_A2 3186
-mmm MACH_MMM MMM 3187
-davinci_dm365_bv MACH_DAVINCI_DM365_BV DAVINCI_DM365_BV 3188
ag5evm MACH_AG5EVM AG5EVM 3189
-sc575plc MACH_SC575PLC SC575PLC 3190
-sc575hmi MACH_SC575IPC SC575IPC 3191
-omap3_tdm3730 MACH_OMAP3_TDM3730 OMAP3_TDM3730 3192
-top9000_eval MACH_TOP9000_EVAL TOP9000_EVAL 3194
-top9000_su MACH_TOP9000_SU TOP9000_SU 3195
-utm300 MACH_UTM300 UTM300 3196
tsunagi MACH_TSUNAGI TSUNAGI 3197
-ts75xx MACH_TS75XX TS75XX 3198
-ts47xx MACH_TS47XX TS47XX 3200
-da850_k5 MACH_DA850_K5 DA850_K5 3201
-ax502 MACH_AX502 AX502 3202
-igep0032 MACH_IGEP0032 IGEP0032 3203
-antero MACH_ANTERO ANTERO 3204
-synergy MACH_SYNERGY SYNERGY 3205
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
-punica MACH_PUNICA PUNICA 3208
trimslice MACH_TRIMSLICE TRIMSLICE 3209
-mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210
mackerel MACH_MACKEREL MACKEREL 3211
-fa9x27 MACH_FA9X27 FA9X27 3213
-ns2816tb MACH_NS2816TB NS2816TB 3214
-ns2816_ntpad MACH_NS2816_NTPAD NS2816_NTPAD 3215
-ns2816_ntnb MACH_NS2816_NTNB NS2816_NTNB 3216
kaen MACH_KAEN KAEN 3217
-nv1000 MACH_NV1000 NV1000 3218
-nuc950ts MACH_NUC950TS NUC950TS 3219
nokia_rm680 MACH_NOKIA_RM680 NOKIA_RM680 3220
-ast2200 MACH_AST2200 AST2200 3221
-lead MACH_LEAD LEAD 3222
-unino1 MACH_UNINO1 UNINO1 3223
-greeco MACH_GREECO GREECO 3224
-verdi MACH_VERDI VERDI 3225
dm6446_adbox MACH_DM6446_ADBOX DM6446_ADBOX 3226
quad_salsa MACH_QUAD_SALSA QUAD_SALSA 3227
abb_gma_1_1 MACH_ABB_GMA_1_1 ABB_GMA_1_1 3228
@@ -949,13 +621,11 @@ koi MACH_KOI KOI 3312
ts4800 MACH_TS4800 TS4800 3313
tqma9263 MACH_TQMA9263 TQMA9263 3314
holiday MACH_HOLIDAY HOLIDAY 3315
-dma_6410 MACH_DMA6410 DMA6410 3316
pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317
hwgw6410 MACH_HWGW6410 HWGW6410 3318
shenzhou MACH_SHENZHOU SHENZHOU 3319
cwme9210 MACH_CWME9210 CWME9210 3320
cwme9210js MACH_CWME9210JS CWME9210JS 3321
-pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322
colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323
w21 MACH_W21 W21 3324
polysat1 MACH_POLYSAT1 POLYSAT1 3325
@@ -1021,13 +691,11 @@ viprinet MACH_VIPRINET VIPRINET 3385
bockw MACH_BOCKW BOCKW 3386
eva2000 MACH_EVA2000 EVA2000 3387
steelyard MACH_STEELYARD STEELYARD 3388
-sdh001 MACH_MACH_SDH001 MACH_SDH001 3390
nsslsboard MACH_NSSLSBOARD NSSLSBOARD 3392
geneva_b5 MACH_GENEVA_B5 GENEVA_B5 3393
spear1340 MACH_SPEAR1340 SPEAR1340 3394
rexmas MACH_REXMAS REXMAS 3395
msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396
-msm8960_mdp MACH_MSM8960_MDP MSM8960_MDP 3397
msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398
msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399
helios_v2 MACH_HELIOS_V2 HELIOS_V2 3400
@@ -1123,5 +791,381 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+dma210u MACH_DMA210U DMA210U 3495
+em_t3 MACH_EM_T3 EM_T3 3496
+htx3250 MACH_HTX3250 HTX3250 3497
+g50 MACH_G50 G50 3498
+eco5 MACH_ECO5 ECO5 3499
+wintergrasp MACH_WINTERGRASP WINTERGRASP 3500
+puro MACH_PURO PURO 3501
+shooter_k MACH_SHOOTER_K SHOOTER_K 3502
+nspire MACH_NSPIRE NSPIRE 3503
+mickxx MACH_MICKXX MICKXX 3504
+lxmb MACH_LXMB LXMB 3505
+adam MACH_ADAM ADAM 3507
+b1004 MACH_B1004 B1004 3508
+oboea MACH_OBOEA OBOEA 3509
+a1015 MACH_A1015 A1015 3510
+robin_vbdt30 MACH_ROBIN_VBDT30 ROBIN_VBDT30 3511
+tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512
+rfl108200_mk10 MACH_RFL108200_MK10 RFL108200_MK10 3513
+rfl108300_mk16 MACH_RFL108300_MK16 RFL108300_MK16 3514
+rover_v7 MACH_ROVER_V7 ROVER_V7 3515
+miphone MACH_MIPHONE MIPHONE 3516
+femtobts MACH_FEMTOBTS FEMTOBTS 3517
+monopoli MACH_MONOPOLI MONOPOLI 3518
+boss MACH_BOSS BOSS 3519
+davinci_dm368_vtam MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM 3520
+clcon MACH_CLCON CLCON 3521
+nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522
+tahiti MACH_TAHITI TAHITI 3523
+fighter MACH_FIGHTER FIGHTER 3524
+sgh_i710 MACH_SGH_I710 SGH_I710 3525
+integreproscb MACH_INTEGREPROSCB INTEGREPROSCB 3526
+monza MACH_MONZA MONZA 3527
+calimain MACH_CALIMAIN CALIMAIN 3528
+mx6q_sabreauto MACH_MX6Q_SABREAUTO MX6Q_SABREAUTO 3529
+gma01x MACH_GMA01X GMA01X 3530
+sbc51 MACH_SBC51 SBC51 3531
+fit MACH_FIT FIT 3532
+steelhead MACH_STEELHEAD STEELHEAD 3533
+panther MACH_PANTHER PANTHER 3534
+msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535
+lexikonct MACH_LEXIKONCT LEXIKONCT 3536
+ns2816_stb MACH_NS2816_STB NS2816_STB 3537
+sei_mm2_lpc3250 MACH_SEI_MM2_LPC3250 SEI_MM2_LPC3250 3538
+cmimx53 MACH_CMIMX53 CMIMX53 3539
+sandwich MACH_SANDWICH SANDWICH 3540
+chief MACH_CHIEF CHIEF 3541
+pogo_e02 MACH_POGO_E02 POGO_E02 3542
+mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543
+htcmozart MACH_HTCMOZART HTCMOZART 3544
+htcgold MACH_HTCGOLD HTCGOLD 3545
+mt72xx MACH_MT72XX MT72XX 3546
+mx51_ivy MACH_MX51_IVY MX51_IVY 3547
+mx51_lvd MACH_MX51_LVD MX51_LVD 3548
+omap3_wiser2 MACH_OMAP3_WISER2 OMAP3_WISER2 3549
+dreamplug MACH_DREAMPLUG DREAMPLUG 3550
+cobas_c_111 MACH_COBAS_C_111 COBAS_C_111 3551
+cobas_u_411 MACH_COBAS_U_411 COBAS_U_411 3552
+hssd MACH_HSSD HSSD 3553
+iom35x MACH_IOM35X IOM35X 3554
+psom_omap MACH_PSOM_OMAP PSOM_OMAP 3555
+iphone_2g MACH_IPHONE_2G IPHONE_2G 3556
+iphone_3g MACH_IPHONE_3G IPHONE_3G 3557
+ipod_touch_1g MACH_IPOD_TOUCH_1G IPOD_TOUCH_1G 3558
+pharos_tpc MACH_PHAROS_TPC PHAROS_TPC 3559
+mx53_hydra MACH_MX53_HYDRA MX53_HYDRA 3560
+ns2816_dev_board MACH_NS2816_DEV_BOARD NS2816_DEV_BOARD 3561
+iphone_3gs MACH_IPHONE_3GS IPHONE_3GS 3562
+iphone_4 MACH_IPHONE_4 IPHONE_4 3563
+ipod_touch_4g MACH_IPOD_TOUCH_4G IPOD_TOUCH_4G 3564
+dragon_e1100 MACH_DRAGON_E1100 DRAGON_E1100 3565
+topside MACH_TOPSIDE TOPSIDE 3566
+irisiii MACH_IRISIII IRISIII 3567
+deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568
+eti_d1 MACH_ETI_D1 ETI_D1 3569
+som3530sdk MACH_SOM3530SDK SOM3530SDK 3570
+oc_engine MACH_OC_ENGINE OC_ENGINE 3571
+apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572
+alps MACH_ALPS ALPS 3575
+tny_t3730 MACH_TNY_T3730 TNY_T3730 3576
+geryon_nfe MACH_GERYON_NFE GERYON_NFE 3577
+ns2816_ref_board MACH_NS2816_REF_BOARD NS2816_REF_BOARD 3578
+silverstone MACH_SILVERSTONE SILVERSTONE 3579
+mtt2440 MACH_MTT2440 MTT2440 3580
+ynicdb MACH_YNICDB YNICDB 3581
+bct MACH_BCT BCT 3582
+tuscan MACH_TUSCAN TUSCAN 3583
+xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584
+enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585
+ch104mx257 MACH_CH104MX257 CH104MX257 3587
+openpri MACH_OPENPRI OPENPRI 3588
+am335xevm MACH_AM335XEVM AM335XEVM 3589
+picodmb MACH_PICODMB PICODMB 3590
+waluigi MACH_WALUIGI WALUIGI 3591
+punicag7 MACH_PUNICAG7 PUNICAG7 3592
+ipad_1g MACH_IPAD_1G IPAD_1G 3593
+appletv_2g MACH_APPLETV_2G APPLETV_2G 3594
+mach_ecog45 MACH_MACH_ECOG45 MACH_ECOG45 3595
+ait_cam_enc_4xx MACH_AIT_CAM_ENC_4XX AIT_CAM_ENC_4XX 3596
+runnymede MACH_RUNNYMEDE RUNNYMEDE 3597
+play MACH_PLAY PLAY 3598
+hw90260 MACH_HW90260 HW90260 3599
+tagh MACH_TAGH TAGH 3600
+filbert MACH_FILBERT FILBERT 3601
+getinge_netcomv3 MACH_GETINGE_NETCOMV3 GETINGE_NETCOMV3 3602
+cw20 MACH_CW20 CW20 3603
+cinema MACH_CINEMA CINEMA 3604
+cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605
+cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606
+cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607
+mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609
+mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610
+stm_b2000 MACH_STM_B2000 STM_B2000 3612
+m28evk MACH_M28EVK M28EVK 3613
+pda MACH_PDA PDA 3614
+meraki_mr58 MACH_MERAKI_MR58 MERAKI_MR58 3615
+kota2 MACH_KOTA2 KOTA2 3616
+letcool MACH_LETCOOL LETCOOL 3617
+mx27iat MACH_MX27IAT MX27IAT 3618
+apollo_td MACH_APOLLO_TD APOLLO_TD 3619
+arena MACH_ARENA ARENA 3620
+gsngateway MACH_GSNGATEWAY GSNGATEWAY 3621
+lf2000 MACH_LF2000 LF2000 3622
+bonito MACH_BONITO BONITO 3623
+asymptote MACH_ASYMPTOTE ASYMPTOTE 3624
+bst2brd MACH_BST2BRD BST2BRD 3625
+tx335s MACH_TX335S TX335S 3626
+pelco_tesla MACH_PELCO_TESLA PELCO_TESLA 3627
+rrhtestplat MACH_RRHTESTPLAT RRHTESTPLAT 3628
+vidtonic_pro MACH_VIDTONIC_PRO VIDTONIC_PRO 3629
+pl_apollo MACH_PL_APOLLO PL_APOLLO 3630
+pl_phoenix MACH_PL_PHOENIX PL_PHOENIX 3631
+m28cu3 MACH_M28CU3 M28CU3 3632
+vvbox_hd MACH_VVBOX_HD VVBOX_HD 3633
+coreware_sam9260_ MACH_COREWARE_SAM9260_ COREWARE_SAM9260_ 3634
+marmaduke MACH_MARMADUKE MARMADUKE 3635
+amg_xlcore_camera MACH_AMG_XLCORE_CAMERA AMG_XLCORE_CAMERA 3636
+omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637
smdk4212 MACH_SMDK4212 SMDK4212 3638
+dnp9200 MACH_DNP9200 DNP9200 3639
+tf101 MACH_TF101 TF101 3640
+omap3silvio MACH_OMAP3SILVIO OMAP3SILVIO 3641
+picasso2 MACH_PICASSO2 PICASSO2 3642
+vangogh2 MACH_VANGOGH2 VANGOGH2 3643
+olpc_xo_1_75 MACH_OLPC_XO_1_75 OLPC_XO_1_75 3644
+gx400 MACH_GX400 GX400 3645
+gs300 MACH_GS300 GS300 3646
+acer_a9 MACH_ACER_A9 ACER_A9 3647
+vivow_evm MACH_VIVOW_EVM VIVOW_EVM 3648
+veloce_cxq MACH_VELOCE_CXQ VELOCE_CXQ 3649
+veloce_cxm MACH_VELOCE_CXM VELOCE_CXM 3650
+p1852 MACH_P1852 P1852 3651
+naxy100 MACH_NAXY100 NAXY100 3652
+taishan MACH_TAISHAN TAISHAN 3653
+touchlink MACH_TOUCHLINK TOUCHLINK 3654
+stm32f103ze MACH_STM32F103ZE STM32F103ZE 3655
+mcx MACH_MCX MCX 3656
+stm_nmhdk_fli7610 MACH_STM_NMHDK_FLI7610 STM_NMHDK_FLI7610 3657
+top28x MACH_TOP28X TOP28X 3658
+okl4vp_microvisor MACH_OKL4VP_MICROVISOR OKL4VP_MICROVISOR 3659
+pop MACH_POP POP 3660
+layer MACH_LAYER LAYER 3661
+trondheim MACH_TRONDHEIM TRONDHEIM 3662
+eva MACH_EVA EVA 3663
+trust_taurus MACH_TRUST_TAURUS TRUST_TAURUS 3664
+ns2816_huashan MACH_NS2816_HUASHAN NS2816_HUASHAN 3665
+ns2816_yangcheng MACH_NS2816_YANGCHENG NS2816_YANGCHENG 3666
+p852 MACH_P852 P852 3667
+flea3 MACH_FLEA3 FLEA3 3668
+bowfin MACH_BOWFIN BOWFIN 3669
+mv88de3100 MACH_MV88DE3100 MV88DE3100 3670
+pia_am35x MACH_PIA_AM35X PIA_AM35X 3671
+cedar MACH_CEDAR CEDAR 3672
+picasso_e MACH_PICASSO_E PICASSO_E 3673
+samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674
+sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676
+omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677
+modasmc1 MACH_MODASMC1 MODASMC1 3678
+apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679
+matrix506 MACH_MATRIX506 MATRIX506 3680
+msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681
+dm36x_spawndc MACH_DM36X_SPAWNDC DM36X_SPAWNDC 3682
+sff792 MACH_SFF792 SFF792 3683
+am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684
+g3c2440 MACH_G3C2440 G3C2440 3685
+tion270 MACH_TION270 TION270 3686
+w22q7arm02 MACH_W22Q7ARM02 W22Q7ARM02 3687
+omap_cat MACH_OMAP_CAT OMAP_CAT 3688
+at91sam9n12ek MACH_AT91SAM9N12EK AT91SAM9N12EK 3689
+morrison MACH_MORRISON MORRISON 3690
+svdu MACH_SVDU SVDU 3691
+lpp01 MACH_LPP01 LPP01 3692
+ubc283 MACH_UBC283 UBC283 3693
+zeppelin MACH_ZEPPELIN ZEPPELIN 3694
+motus MACH_MOTUS MOTUS 3695
+neomainboard MACH_NEOMAINBOARD NEOMAINBOARD 3696
+devkit3250 MACH_DEVKIT3250 DEVKIT3250 3697
+devkit7000 MACH_DEVKIT7000 DEVKIT7000 3698
+fmc_uic MACH_FMC_UIC FMC_UIC 3699
+fmc_dcm MACH_FMC_DCM FMC_DCM 3700
+batwm MACH_BATWM BATWM 3701
+atlas6cb MACH_ATLAS6CB ATLAS6CB 3702
+blue MACH_BLUE BLUE 3705
+colorado MACH_COLORADO COLORADO 3706
+popc MACH_POPC POPC 3707
+promwad_jade MACH_PROMWAD_JADE PROMWAD_JADE 3708
+amp MACH_AMP AMP 3709
+gnet_amp MACH_GNET_AMP GNET_AMP 3710
+toques MACH_TOQUES TOQUES 3711
+dct_storm MACH_DCT_STORM DCT_STORM 3713
+owl MACH_OWL OWL 3715
+cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716
+adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718
+ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719
+popct MACH_POPCT POPCT 3720
+omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721
+ach MACH_ACH ACH 3722
+module_dtb MACH_MODULE_DTB MODULE_DTB 3723
+oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725
+tt01 MACH_TT01 TT01 3726
+msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727
+msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728
+msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729
+ltu11 MACH_LTU11 LTU11 3730
+am1808_spawnco MACH_AM1808_SPAWNCO AM1808_SPAWNCO 3731
+flx6410 MACH_FLX6410 FLX6410 3732
+mx6q_qsb MACH_MX6Q_QSB MX6Q_QSB 3733
+mx53_plt424 MACH_MX53_PLT424 MX53_PLT424 3734
+jasmine MACH_JASMINE JASMINE 3735
+l138_owlboard_plus MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS 3736
+wr21 MACH_WR21 WR21 3737
+peaboy MACH_PEABOY PEABOY 3739
+mx28_plato MACH_MX28_PLATO MX28_PLATO 3740
+kacom2 MACH_KACOM2 KACOM2 3741
+slco MACH_SLCO SLCO 3742
+imx51pico MACH_IMX51PICO IMX51PICO 3743
+glink1 MACH_GLINK1 GLINK1 3744
+diamond MACH_DIAMOND DIAMOND 3745
+d9000 MACH_D9000 D9000 3746
+w5300e01 MACH_W5300E01 W5300E01 3747
+im6000 MACH_IM6000 IM6000 3748
+mx51_fred51 MACH_MX51_FRED51 MX51_FRED51 3749
+stm32f2 MACH_STM32F2 STM32F2 3750
+ville MACH_VILLE VILLE 3751
+ptip_murnau MACH_PTIP_MURNAU PTIP_MURNAU 3752
+ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753
+mx53grb MACH_MX53GRB MX53GRB 3754
+gagarin MACH_GAGARIN GAGARIN 3755
+nas2big MACH_NAS2BIG NAS2BIG 3757
+superfemto MACH_SUPERFEMTO SUPERFEMTO 3758
+teufel MACH_TEUFEL TEUFEL 3759
+dinara MACH_DINARA DINARA 3760
+vanquish MACH_VANQUISH VANQUISH 3761
+zipabox1 MACH_ZIPABOX1 ZIPABOX1 3762
+u9540 MACH_U9540 U9540 3763
+jet MACH_JET JET 3764
smdk4412 MACH_SMDK4412 SMDK4412 3765
+elite MACH_ELITE ELITE 3766
+spear320_hmi MACH_SPEAR320_HMI SPEAR320_HMI 3767
+ontario MACH_ONTARIO ONTARIO 3768
+mx6q_sabrelite MACH_MX6Q_SABRELITE MX6Q_SABRELITE 3769
+vc200 MACH_VC200 VC200 3770
+msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771
+msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772
+benthossbp MACH_BENTHOSSBP BENTHOSSBP 3773
+smdk5210 MACH_SMDK5210 SMDK5210 3774
+empq2300 MACH_EMPQ2300 EMPQ2300 3775
+minipos MACH_MINIPOS MINIPOS 3776
+omap5_sevm MACH_OMAP5_SEVM OMAP5_SEVM 3777
+shelter MACH_SHELTER SHELTER 3778
+omap3_devkit8500 MACH_OMAP3_DEVKIT8500 OMAP3_DEVKIT8500 3779
+edgetd MACH_EDGETD EDGETD 3780
+copperyard MACH_COPPERYARD COPPERYARD 3781
+edge MACH_EDGE EDGE 3782
+edge_u MACH_EDGE_U EDGE_U 3783
+edge_td MACH_EDGE_TD EDGE_TD 3784
+wdss MACH_WDSS WDSS 3785
+dl_pb25 MACH_DL_PB25 DL_PB25 3786
+dss11 MACH_DSS11 DSS11 3787
+cpa MACH_CPA CPA 3788
+aptp2000 MACH_APTP2000 APTP2000 3789
+marzen MACH_MARZEN MARZEN 3790
+st_turbine MACH_ST_TURBINE ST_TURBINE 3791
+gtl_it3300 MACH_GTL_IT3300 GTL_IT3300 3792
+mx6_mule MACH_MX6_MULE MX6_MULE 3793
+v7pxa_dt MACH_V7PXA_DT V7PXA_DT 3794
+v7mmp_dt MACH_V7MMP_DT V7MMP_DT 3795
+dragon7 MACH_DRAGON7 DRAGON7 3796
+krome MACH_KROME KROME 3797
+oratisdante MACH_ORATISDANTE ORATISDANTE 3798
+fathom MACH_FATHOM FATHOM 3799
+dns325 MACH_DNS325 DNS325 3800
+sarnen MACH_SARNEN SARNEN 3801
+ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 3802
+mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803
+asanti MACH_ASANTI ASANTI 3804
+volta MACH_VOLTA VOLTA 3805
+knight MACH_KNIGHT KNIGHT 3807
+beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808
+becker MACH_BECKER BECKER 3809
+fc360 MACH_FC360 FC360 3810
+pmi2_xls MACH_PMI2_XLS PMI2_XLS 3811
+taranto MACH_TARANTO TARANTO 3812
+plutux MACH_PLUTUX PLUTUX 3813
+ipmp_medcom MACH_IPMP_MEDCOM IPMP_MEDCOM 3814
+absolut MACH_ABSOLUT ABSOLUT 3815
+awpb3 MACH_AWPB3 AWPB3 3816
+nfp32xx_dt MACH_NFP32XX_DT NFP32XX_DT 3817
+dl_pb53 MACH_DL_PB53 DL_PB53 3818
+acu_ii MACH_ACU_II ACU_II 3819
+avalon MACH_AVALON AVALON 3820
+sphinx MACH_SPHINX SPHINX 3821
+titan_t MACH_TITAN_T TITAN_T 3822
+harvest_boris MACH_HARVEST_BORIS HARVEST_BORIS 3823
+mach_msm7x30_m3s MACH_MACH_MSM7X30_M3S MACH_MSM7X30_M3S 3824
+smdk5250 MACH_SMDK5250 SMDK5250 3825
+imxt_lite MACH_IMXT_LITE IMXT_LITE 3826
+imxt_std MACH_IMXT_STD IMXT_STD 3827
+imxt_log MACH_IMXT_LOG IMXT_LOG 3828
+imxt_nav MACH_IMXT_NAV IMXT_NAV 3829
+imxt_full MACH_IMXT_FULL IMXT_FULL 3830
+ag09015 MACH_AG09015 AG09015 3831
+am3517_mt_ventoux MACH_AM3517_MT_VENTOUX AM3517_MT_VENTOUX 3832
+dp1arm9 MACH_DP1ARM9 DP1ARM9 3833
+picasso_m MACH_PICASSO_M PICASSO_M 3834
+video_gadget MACH_VIDEO_GADGET VIDEO_GADGET 3835
+mtt_om3x MACH_MTT_OM3X MTT_OM3X 3836
+mx6q_arm2 MACH_MX6Q_ARM2 MX6Q_ARM2 3837
+picosam9g45 MACH_PICOSAM9G45 PICOSAM9G45 3838
+vpm_dm365 MACH_VPM_DM365 VPM_DM365 3839
+bonfire MACH_BONFIRE BONFIRE 3840
+mt2p2d MACH_MT2P2D MT2P2D 3841
+sigpda01 MACH_SIGPDA01 SIGPDA01 3842
+cn27 MACH_CN27 CN27 3843
+mx25_cwtap MACH_MX25_CWTAP MX25_CWTAP 3844
+apf28 MACH_APF28 APF28 3845
+pelco_maxwell MACH_PELCO_MAXWELL PELCO_MAXWELL 3846
+ge_phoenix MACH_GE_PHOENIX GE_PHOENIX 3847
+empc_a500 MACH_EMPC_A500 EMPC_A500 3848
+ims_arm9 MACH_IMS_ARM9 IMS_ARM9 3849
+mini2416 MACH_MINI2416 MINI2416 3850
+mini2450 MACH_MINI2450 MINI2450 3851
+mini310 MACH_MINI310 MINI310 3852
+spear_hurricane MACH_SPEAR_HURRICANE SPEAR_HURRICANE 3853
+mt7208 MACH_MT7208 MT7208 3854
+lpc178x MACH_LPC178X LPC178X 3855
+farleys MACH_FARLEYS FARLEYS 3856
+efm32gg_dk3750 MACH_EFM32GG_DK3750 EFM32GG_DK3750 3857
+zeus_board MACH_ZEUS_BOARD ZEUS_BOARD 3858
+cc51 MACH_CC51 CC51 3859
+fxi_c210 MACH_FXI_C210 FXI_C210 3860
+msm8627_cdp MACH_MSM8627_CDP MSM8627_CDP 3861
+msm8627_mtp MACH_MSM8627_MTP MSM8627_MTP 3862
+armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863
+primou MACH_PRIMOU PRIMOU 3864
+primoc MACH_PRIMOC PRIMOC 3865
+primoct MACH_PRIMOCT PRIMOCT 3866
+a9500 MACH_A9500 A9500 3867
+pluto MACH_PLUTO PLUTO 3869
+acfx100 MACH_ACFX100 ACFX100 3870
+msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871
+valente MACH_VALENTE VALENTE 3872
+crfs_rfeye MACH_CRFS_RFEYE CRFS_RFEYE 3873
+rfeye MACH_RFEYE RFEYE 3874
+phidget_sbc3 MACH_PHIDGET_SBC3 PHIDGET_SBC3 3875
+tcw_mika MACH_TCW_MIKA TCW_MIKA 3876
+imx28_egf MACH_IMX28_EGF IMX28_EGF 3877
+valente_wx MACH_VALENTE_WX VALENTE_WX 3878
+huangshans MACH_HUANGSHANS HUANGSHANS 3879
+bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880
+prima MACH_PRIMA PRIMA 3881
+evita_ulk MACH_EVITA_ULK EVITA_ULK 3884
+merisc600 MACH_MERISC600 MERISC600 3885
+dolak MACH_DOLAK DOLAK 3886
+sbc53 MACH_SBC53 SBC53 3887
+elite_ulk MACH_ELITE_ULK ELITE_ULK 3888
+pov2 MACH_POV2 POV2 3889
+ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890
+da850_pqab MACH_DA850_PQAB DA850_PQAB 3891
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index 1f17bde52cd..7c756fb189f 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -109,7 +109,7 @@ struct eth_addr {
u8 addr[6];
};
static struct eth_addr __initdata hw_addr[2];
-static struct eth_platform_data __initdata eth_data[2];
+static struct macb_platform_data __initdata eth_data[2];
static struct spi_board_info spi0_board_info[] __initdata = {
{
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 4643ff5107c..c56ddac85d6 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -105,7 +105,7 @@ struct eth_addr {
};
static struct eth_addr __initdata hw_addr[2];
-static struct eth_platform_data __initdata eth_data[2] = {
+static struct macb_platform_data __initdata eth_data[2] = {
{
/*
* The MDIO pullups on STK1000 are a bit too weak for
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 86fab77a5a0..27bd6fbe21c 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -50,7 +50,7 @@ struct eth_addr {
u8 addr[6];
};
static struct eth_addr __initdata hw_addr[1];
-static struct eth_platform_data __initdata eth_data[1] = {
+static struct macb_platform_data __initdata eth_data[1] = {
{
.phy_mask = ~(1U << 1),
},
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index da14fbdd4e8..9d1efd1cd42 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -102,7 +102,7 @@ struct eth_addr {
};
static struct eth_addr __initdata hw_addr[1];
-static struct eth_platform_data __initdata eth_data[1];
+static struct macb_platform_data __initdata eth_data[1];
/*
* The next two functions should go away as the boot loader is
diff --git a/arch/avr32/boards/merisc/merisc_sysfs.c b/arch/avr32/boards/merisc/merisc_sysfs.c
index df431fdba9a..5a252318f4b 100644
--- a/arch/avr32/boards/merisc/merisc_sysfs.c
+++ b/arch/avr32/boards/merisc/merisc_sysfs.c
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c
index e61bc948f95..ed137e33579 100644
--- a/arch/avr32/boards/merisc/setup.c
+++ b/arch/avr32/boards/merisc/setup.c
@@ -52,7 +52,7 @@ struct eth_addr {
};
static struct eth_addr __initdata hw_addr[2];
-static struct eth_platform_data __initdata eth_data[2];
+static struct macb_platform_data __initdata eth_data[2];
static int ads7846_get_pendown_state_PB26(void)
{
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index c4da5cba2db..05358aa5ef7 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -86,7 +86,7 @@ struct eth_addr {
u8 addr[6];
};
static struct eth_addr __initdata hw_addr[2];
-static struct eth_platform_data __initdata eth_data[2];
+static struct macb_platform_data __initdata eth_data[2];
static struct spi_eeprom eeprom_25lc010 = {
.name = "25lc010",
diff --git a/arch/avr32/include/asm/ipcbuf.h b/arch/avr32/include/asm/ipcbuf.h
index 1552c9698f5..84c7e51cb6d 100644
--- a/arch/avr32/include/asm/ipcbuf.h
+++ b/arch/avr32/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __ASM_AVR32_IPCBUF_H
-#define __ASM_AVR32_IPCBUF_H
-
-/*
-* The user_ipc_perm structure for AVR32 architecture.
-* Note extra padding because this structure is passed back and forth
-* between kernel and user space.
-*
-* Pad space is left for:
-* - 32-bit mode_t and seq
-* - 2 miscellaneous 32-bit values
-*/
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __ASM_AVR32_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h
index c8d1fae4947..247b88c760b 100644
--- a/arch/avr32/include/asm/socket.h
+++ b/arch/avr32/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 7a9c03dcb0b..e5deda4691d 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
-#define TIF_FREEZE 29
#define TIF_DEBUG 30 /* debugging enabled */
#define TIF_USERSPACE 31 /* true if FS sets userspace */
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
/* Note: The masks below must never span more than 16 bits! */
diff --git a/arch/avr32/include/asm/types.h b/arch/avr32/include/asm/types.h
index 72667a3b1af..9bb2d8b2e6c 100644
--- a/arch/avr32/include/asm/types.h
+++ b/arch/avr32/include/asm/types.h
@@ -10,12 +10,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c
index e84faffbbec..2233be71e2e 100644
--- a/arch/avr32/kernel/cpu.c
+++ b/arch/avr32/kernel/cpu.c
@@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/module.h>
@@ -26,16 +26,16 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
* XXX: If/when a SMP-capable implementation of AVR32 will ever be
* made, we must make sure that the code executes on the correct CPU.
*/
-static ssize_t show_pc0event(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc0event(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f);
}
-static ssize_t store_pc0event(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc0event(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
@@ -48,16 +48,16 @@ static ssize_t store_pc0event(struct sys_device *dev,
sysreg_write(PCCR, val);
return count;
}
-static ssize_t show_pc0count(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc0count(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pcnt0;
pcnt0 = sysreg_read(PCNT0);
return sprintf(buf, "%lu\n", pcnt0);
}
-static ssize_t store_pc0count(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_pc0count(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val;
@@ -71,16 +71,16 @@ static ssize_t store_pc0count(struct sys_device *dev,
return count;
}
-static ssize_t show_pc1event(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc1event(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f);
}
-static ssize_t store_pc1event(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc1event(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
@@ -93,16 +93,16 @@ static ssize_t store_pc1event(struct sys_device *dev,
sysreg_write(PCCR, val);
return count;
}
-static ssize_t show_pc1count(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pc1count(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pcnt1;
pcnt1 = sysreg_read(PCNT1);
return sprintf(buf, "%lu\n", pcnt1);
}
-static ssize_t store_pc1count(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pc1count(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
@@ -116,16 +116,16 @@ static ssize_t store_pc1count(struct sys_device *dev,
return count;
}
-static ssize_t show_pccycles(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pccycles(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pccnt;
pccnt = sysreg_read(PCCNT);
return sprintf(buf, "%lu\n", pccnt);
}
-static ssize_t store_pccycles(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pccycles(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
@@ -139,16 +139,16 @@ static ssize_t store_pccycles(struct sys_device *dev,
return count;
}
-static ssize_t show_pcenable(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_pcenable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "%c\n", (pccr & 1)?'1':'0');
}
-static ssize_t store_pcenable(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_pcenable(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long pccr, val;
@@ -167,12 +167,12 @@ static ssize_t store_pcenable(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
-static SYSDEV_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
-static SYSDEV_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
-static SYSDEV_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
-static SYSDEV_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
-static SYSDEV_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
+static DEVICE_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
+static DEVICE_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
+static DEVICE_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
+static DEVICE_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
+static DEVICE_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
+static DEVICE_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
#endif /* CONFIG_PERFORMANCE_COUNTERS */
@@ -186,12 +186,12 @@ static int __init topology_init(void)
register_cpu(c, cpu);
#ifdef CONFIG_PERFORMANCE_COUNTERS
- sysdev_create_file(&c->sysdev, &attr_pc0event);
- sysdev_create_file(&c->sysdev, &attr_pc0count);
- sysdev_create_file(&c->sysdev, &attr_pc1event);
- sysdev_create_file(&c->sysdev, &attr_pc1count);
- sysdev_create_file(&c->sysdev, &attr_pccycles);
- sysdev_create_file(&c->sysdev, &attr_pcenable);
+ device_create_file(&c->dev, &dev_attr_pc0event);
+ device_create_file(&c->dev, &dev_attr_pc0count);
+ device_create_file(&c->dev, &dev_attr_pc1event);
+ device_create_file(&c->dev, &dev_attr_pc1count);
+ device_create_file(&c->dev, &dev_attr_pccycles);
+ device_create_file(&c->dev, &dev_attr_pcenable);
#endif
}
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index bc3aa18293d..900e49b2258 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -14,7 +14,7 @@
#include <linux/kernel_stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
/* May be overridden by platform code */
int __weak nmi_enable(void)
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index ef5a2a08fcc..ea339575032 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -34,10 +34,12 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
cpu_idle_sleep();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 7fbf0dcb9af..402a7bb7266 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1067,7 +1067,7 @@ void __init at32_setup_serial_console(unsigned int usart_id)
* -------------------------------------------------------------------- */
#ifdef CONFIG_CPU_AT32AP7000
-static struct eth_platform_data macb0_data;
+static struct macb_platform_data macb0_data;
static struct resource macb0_resource[] = {
PBMEM(0xfff01800),
IRQ(25),
@@ -1076,7 +1076,7 @@ DEFINE_DEV_DATA(macb, 0);
DEV_CLK(hclk, macb0, hsb, 8);
DEV_CLK(pclk, macb0, pbb, 6);
-static struct eth_platform_data macb1_data;
+static struct macb_platform_data macb1_data;
static struct resource macb1_resource[] = {
PBMEM(0xfff01c00),
IRQ(26),
@@ -1086,7 +1086,7 @@ DEV_CLK(hclk, macb1, hsb, 9);
DEV_CLK(pclk, macb1, pbb, 7);
struct platform_device *__init
-at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
+at32_add_device_eth(unsigned int id, struct macb_platform_data *data)
{
struct platform_device *pdev;
u32 pin_mask;
@@ -1163,7 +1163,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
return NULL;
}
- memcpy(pdev->dev.platform_data, data, sizeof(struct eth_platform_data));
+ memcpy(pdev->dev.platform_data, data, sizeof(struct macb_platform_data));
platform_device_register(pdev);
return pdev;
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 5d7ffca7d69..67b111ce332 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/serial.h>
+#include <linux/platform_data/macb.h>
#define GPIO_PIN_NONE (-1)
@@ -42,12 +43,8 @@ struct atmel_uart_data {
void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
struct platform_device *at32_add_device_usart(unsigned int id);
-struct eth_platform_data {
- u32 phy_mask;
- u8 is_rmii;
-};
struct platform_device *
-at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
+at32_add_device_eth(unsigned int id, struct macb_platform_data *data);
struct spi_board_info;
struct platform_device *
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index 5edcb58d6f7..0b7039cf07f 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -80,7 +80,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 2e549572d4f..5553205d7cb 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -97,7 +97,7 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_WATCHDOG=y
diff --git a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
index ad0881ba30a..d95658fc312 100644
--- a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
+++ b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
@@ -68,7 +68,7 @@ CONFIG_I2C_ALGOBIT=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=400
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 8465b3e6b86..498f64a8705 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -105,7 +105,7 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 5e7321b2604..72e0317565e 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -99,7 +99,7 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index a7eb54bf308..2f075e0b262 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -81,7 +81,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index b90d3792ed5..ab38a82597b 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -84,7 +84,7 @@ CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 005362537a7..5c802d6bbbc 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -94,7 +94,7 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_BLACKFIN_TWI=m
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index 580bf4296a1..972aa6263ad 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -101,7 +101,7 @@ CONFIG_I2C=m
CONFIG_I2C_BLACKFIN_TWI=m
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 0e6d841b5d0..7a1e3bf2b04 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -113,7 +113,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index 77a27e31d6d..0fdc4ecaa53 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -85,7 +85,7 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PCA_PLATFORM=y
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index f5ed34e12e0..78adbbf3982 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -84,7 +84,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index d7ff2aee3fb..d3cd0f561c8 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -86,7 +86,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 85014319672..7b982d0502a 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -80,7 +80,7 @@ CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_SPI_SPIDEV=m
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index dbf750cd2db..c280a50e794 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -88,7 +88,7 @@ CONFIG_I2C_CHARDEV=m
CONFIG_I2C_BLACKFIN_TWI=m
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_WATCHDOG=y
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 07ffbdae34e..c940a1e3ab3 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -57,7 +57,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index 707cbf8a259..2e47df77490 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -78,7 +78,7 @@ CONFIG_SERIAL_BFIN_UART1=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_USB_GADGET=m
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 4596935eada..6da629ffc2f 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -72,7 +72,7 @@ CONFIG_SERIAL_BFIN_UART1=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_USB_GADGET=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 9f1d08401fc..349922be01f 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -89,7 +89,7 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_BFIN_WDT=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 6c7b21585a4..0456deaa2d6 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -78,7 +78,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_USB_GADGET=m
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index b192acfae38..89162d0fff9 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -78,7 +78,7 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 06e9f497fae..a26436bf50f 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -68,7 +68,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_SPI_SPIDEV=y
CONFIG_WATCHDOG=y
CONFIG_SOUND=m
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 5e797cf7204..647991514ac 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -70,7 +70,7 @@ CONFIG_SERIAL_BFIN_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_USB=y
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index a566a2fe6b9..8fd9b446d65 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -84,7 +84,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index 12e66cd7cda..0520c160230 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -71,7 +71,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_HWMON=m
CONFIG_WATCHDOG=y
CONFIG_BFIN_WDT=y
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index d496ae9a39b..e4ed865b885 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -92,7 +92,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_BLACKFIN_TWI=y
CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 65f642167a5..c1f45f15295 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -70,7 +70,7 @@ CONFIG_SERIAL_BFIN_UART1=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
-CONFIG_SPI_BFIN=y
+CONFIG_SPI_BFIN5XX=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_BFIN_WDT=y
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index ecacdf34768..68bcc3d119b 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -51,9 +51,6 @@ struct bfin_serial_port {
#elif ANOMALY_05000363
unsigned int anomaly_threshold;
#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- int scts;
-#endif
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
int cts_pin;
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index 05043786da2..e349631c829 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -14,6 +14,9 @@ struct blackfin_cpudata {
struct cpu cpu;
unsigned int imemctl;
unsigned int dmemctl;
+#ifdef CONFIG_SMP
+ struct task_struct *idle;
+#endif
};
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index af6c0aa79ba..dc3d144b4bb 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -37,7 +37,7 @@ extern unsigned long dcache_invld_count[NR_CPUS];
#endif
void smp_icache_flush_range_others(unsigned long start,
- unsigned long end);
+ unsigned long end);
#ifdef CONFIG_HOTPLUG_CPU
void coreb_die(void);
void cpu_die(void);
@@ -46,4 +46,7 @@ int __cpu_disable(void);
int __cpu_die(unsigned int cpu);
#endif
+void smp_timer_broadcast(const struct cpumask *mask);
+
+
#endif /* !__ASM_BLACKFIN_SMP_H */
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8a12..53ad10005ae 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
-#define TIF_FREEZE 6 /* is freezing for suspend */
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a80a9e9fc4..8dd0416673c 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -88,10 +88,12 @@ void cpu_idle(void)
#endif
if (!idle)
idle = default_idle;
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index dfa2525a442..d6102c86d03 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -828,10 +828,18 @@ static inline int __init get_mem_size(void)
u32 ddrctl = bfin_read_EBIU_DDRCTL1();
int ret = 0;
switch (ddrctl & 0xc0000) {
- case DEVSZ_64: ret = 64 / 8;
- case DEVSZ_128: ret = 128 / 8;
- case DEVSZ_256: ret = 256 / 8;
- case DEVSZ_512: ret = 512 / 8;
+ case DEVSZ_64:
+ ret = 64 / 8;
+ break;
+ case DEVSZ_128:
+ ret = 128 / 8;
+ break;
+ case DEVSZ_256:
+ ret = 256 / 8;
+ break;
+ case DEVSZ_512:
+ ret = 512 / 8;
+ break;
}
switch (ddrctl & 0x30000) {
case DEVWD_4: ret *= 2;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 1bcf3a3c57d..d98f2d69b0c 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -219,7 +219,7 @@ static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
#if defined(CONFIG_TICKSOURCE_CORETMR)
/* per-cpu local core timer */
-static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
+DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
static int bfin_coretmr_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
@@ -281,6 +281,7 @@ void bfin_coretmr_init(void)
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
+
irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
@@ -306,6 +307,11 @@ void bfin_coretmr_clockevent_init(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+#ifdef CONFIG_SMP
+ evt->broadcast = smp_timer_broadcast;
+#endif
+
+
evt->name = "bfin_core_timer";
evt->rating = 350;
evt->irq = -1;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index d1c0c0cff3e..a2d96d31bbf 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -61,7 +61,7 @@ static struct physmap_flash_data ezbrd_flash_data = {
static struct resource ezbrd_flash_resource = {
.start = 0x20000000,
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
.end = 0x202fffff,
#else
.end = 0x203fffff,
@@ -122,6 +122,8 @@ static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
.phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
#endif
+ .vlan1_mask = 1,
+ .vlan2_mask = 2,
};
static struct platform_device bfin_mii_bus = {
@@ -292,7 +294,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
};
/* SPI controller data */
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 6,
@@ -715,7 +717,7 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
&bfin_spi1_device,
#endif
@@ -777,7 +779,7 @@ static int __init ezbrd_init(void)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
/* setup BF518-EZBRD GPIO pin PG11 to AMS2, PG15 to AMS3. */
peripheral_request(P_AMS2, "ParaFlash");
-#if !defined(CONFIG_SPI_BFIN) && !defined(CONFIG_SPI_BFIN_MODULE)
+#if !defined(CONFIG_SPI_BFIN5XX) && !defined(CONFIG_SPI_BFIN5XX_MODULE)
peripheral_request(P_AMS3, "ParaFlash");
#endif
return 0;
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 5470bf89e52..f271310f739 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -228,7 +228,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
};
/* SPI controller data */
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 6,
@@ -635,7 +635,7 @@ static struct platform_device *tcm_devices[] __initdata = {
&bfin_mac_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
&bfin_spi1_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 5bc6938157a..c8d5d2b7c73 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -334,7 +334,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -744,7 +744,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_mac_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index cd289698b4d..7330607856e 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -444,7 +444,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
@@ -893,7 +893,7 @@ static struct platform_device *cmbf527_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9f792eafd1c..db3ecfce830 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -371,7 +371,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
@@ -776,7 +776,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_mac_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 3ecafff5d2e..dfdd8e6bac7 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -664,7 +664,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
@@ -1189,7 +1189,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 3a92c4318d2..360e97fc529 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -448,7 +448,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = EXP_GPIO_SPISEL_BASE + 8 + MAX_CTRL_CS,
@@ -831,7 +831,7 @@ static struct platform_device *tll6527m_devices[] __initdata = {
&bfin_mac_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 47cadd316e7..6cb7b3ed9b3 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -125,7 +125,7 @@ static struct platform_device net2272_bfin_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -398,7 +398,7 @@ static struct platform_device *h8606_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -428,7 +428,7 @@ static int __init H8606_init(void)
printk(KERN_INFO "HV Sistemas H8606 board support by http://www.hvsistemas.com\n");
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(h8606_devices, ARRAY_SIZE(h8606_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
return 0;
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 18817d57c7a..de44a3765e5 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -146,7 +146,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -422,7 +422,7 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 2c8f30ef6a7..fe47e048c4e 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -29,7 +29,7 @@
*/
const char bfin_board_name[] = "Bluetechnix CM BF533";
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
@@ -536,7 +536,7 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -549,7 +549,7 @@ static int __init cm_bf533_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
return 0;
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 144556e1449..07811c209b9 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -245,7 +245,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -484,7 +484,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
&smc91x_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index b597d4e50d5..e303dae4e2d 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -104,7 +104,7 @@ static struct platform_device dm9000_device2 = {
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -270,7 +270,7 @@ static struct platform_device *ip0x_devices[] __initdata = {
#endif
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&spi_bfin_master_device,
#endif
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 2afd02e14bd..ce88a7165b6 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -219,9 +219,10 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \
+ defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
{
- .modalias = "ad183x",
+ .modalias = "ad1836",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
@@ -251,7 +252,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -471,7 +472,7 @@ static struct i2c_gpio_platform_data i2c_gpio_data = {
.scl_pin = GPIO_PF3,
.sda_is_open_drain = 0,
.scl_is_open_drain = 0,
- .udelay = 40,
+ .udelay = 10,
};
static struct platform_device i2c_gpio_device = {
@@ -540,27 +541,150 @@ static struct platform_device bfin_dpmc = {
},
};
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
+ defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) \
+ || defined(CONFIG_SND_BF5XX_AC97) || \
+ defined(CONFIG_SND_BF5XX_AC97_MODULE)
+
+#include <asm/bfin_sport.h>
+
+#define SPORT_REQ(x) \
+ [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
+ P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
+
+static const u16 bfin_snd_pin[][7] = {
+ SPORT_REQ(0),
+ SPORT_REQ(1),
+};
+
+static struct bfin_snd_platform_data bfin_snd_data[] = {
+ {
+ .pin_req = &bfin_snd_pin[0][0],
+ },
+ {
+ .pin_req = &bfin_snd_pin[1][0],
+ },
+};
+
+#define BFIN_SND_RES(x) \
+ [x] = { \
+ { \
+ .start = SPORT##x##_TCR1, \
+ .end = SPORT##x##_TCR1, \
+ .flags = IORESOURCE_MEM \
+ }, \
+ { \
+ .start = CH_SPORT##x##_RX, \
+ .end = CH_SPORT##x##_RX, \
+ .flags = IORESOURCE_DMA, \
+ }, \
+ { \
+ .start = CH_SPORT##x##_TX, \
+ .end = CH_SPORT##x##_TX, \
+ .flags = IORESOURCE_DMA, \
+ }, \
+ { \
+ .start = IRQ_SPORT##x##_ERROR, \
+ .end = IRQ_SPORT##x##_ERROR, \
+ .flags = IORESOURCE_IRQ, \
+ } \
+ }
+
+static struct resource bfin_snd_resources[][4] = {
+ BFIN_SND_RES(0),
+ BFIN_SND_RES(1),
+};
+#endif
+
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+ .name = "bfin-i2s-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+ .name = "bfin-tdm-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+ .name = "bfin-ac97-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
+ defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
+static const unsigned ad73311_gpio[] = {
+ GPIO_PF4,
+};
+
+static struct platform_device bfin_ad73311_machine = {
+ .name = "bfin-snd-ad73311",
+ .id = 1,
+ .dev = {
+ .platform_data = (void *)ad73311_gpio,
+ },
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
+static struct platform_device bfin_ad73311_codec_device = {
+ .name = "ad73311",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE)
+static struct platform_device bfin_ad74111_codec_device = {
+ .name = "ad74111",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \
+ defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
- /* TODO: add platform data here */
+ .num_resources =
+ ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+ .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+ .dev = {
+ .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+ },
};
#endif
-#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \
+ defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
static struct platform_device bfin_tdm = {
.name = "bfin-tdm",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
- /* TODO: add platform data here */
+ .num_resources =
+ ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+ .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+ .dev = {
+ .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+ },
};
#endif
-#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \
+ defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
static struct platform_device bfin_ac97 = {
.name = "bfin-ac97",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
- /* TODO: add platform data here */
+ .num_resources =
+ ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
+ .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
+ .dev = {
+ .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
+ },
};
#endif
@@ -580,7 +704,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -596,7 +720,8 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
#endif
-#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || \
+ defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
@@ -618,14 +743,42 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
- &bfin_i2s,
+ &bfin_i2s_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
- &bfin_tdm,
+ &bfin_tdm_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+ &bfin_ac97_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
+ defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
+ &bfin_ad73311_machine,
+#endif
+
+#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
+ &bfin_ad73311_codec_device,
+#endif
+
+#if defined(CONFIG_SND_SOC_AD74111) || defined(CONFIG_SND_SOC_AD74111_MODULE)
+ &bfin_ad74111_codec_device,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_I2S) || \
+ defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
+ &bfin_i2s,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_TDM) || \
+ defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
+ &bfin_tdm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_SOC_AC97) || \
+ defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
&bfin_ac97,
#endif
};
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 604a430038e..0d4a2f61a97 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -31,7 +31,7 @@
*/
const char bfin_board_name[] = "Bluetechnix CM BF537E";
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -735,7 +735,7 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -770,7 +770,7 @@ static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index d916b46a44f..f5536982706 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -32,7 +32,7 @@
*/
const char bfin_board_name[] = "Bluetechnix CM BF537U";
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -700,7 +700,7 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -747,7 +747,7 @@ static int __init cm_bf537u_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537u_devices, ARRAY_SIZE(cm_bf537u_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 5f307228be6..11dadeb33d7 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -125,7 +125,7 @@ static struct platform_device asmb_flash_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -370,7 +370,7 @@ static struct platform_device *dnp5370_devices[] __initdata = {
&bfin_mac_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&spi_bfin_master_device,
#endif
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 3901dd093b9..d2d71282618 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -121,7 +121,7 @@ static struct platform_device net2272_bfin_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
@@ -496,7 +496,7 @@ static struct platform_device *minotaur_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -537,7 +537,7 @@ static int __init minotaur_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(minotaur_devices, ARRAY_SIZE(minotaur_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info,
ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index aebd31c845f..6fd84709fc6 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -154,7 +154,7 @@ static struct platform_device net2272_bfin_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
@@ -477,7 +477,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -508,7 +508,7 @@ static int __init pnav_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info,
ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 7fbb0bbf867..2221173e489 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -1420,7 +1420,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
@@ -1462,7 +1462,7 @@ static struct platform_device bfin_spi0_device = {
/* SPORT SPI controller data */
static struct bfin5xx_spi_master bfin_sport_spi0_info = {
- .num_chipselect = 1, /* master only supports one device */
+ .num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI,
P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0},
@@ -1492,7 +1492,7 @@ static struct platform_device bfin_sport_spi0_device = {
};
static struct bfin5xx_spi_master bfin_sport_spi1_info = {
- .num_chipselect = 1, /* master only supports one device */
+ .num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0},
@@ -1558,6 +1558,71 @@ static struct platform_device bfin_lq035q1_device = {
};
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const struct ppi_info ppi_info = {
+ .type = PPI_TYPE_PPI,
+ .dma_ch = CH_PPI,
+ .irq_err = IRQ_PPI_ERROR,
+ .base = (void __iomem *)PPI_CONTROL,
+ .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_VS6624) \
+ || defined(CONFIG_VIDEO_VS6624_MODULE)
+static struct v4l2_input vs6624_inputs[] = {
+ {
+ .index = 0,
+ .name = "Camera",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_UNKNOWN,
+ },
+};
+
+static struct bcap_route vs6624_routes[] = {
+ {
+ .input = 0,
+ .output = 0,
+ },
+};
+
+static const unsigned vs6624_ce_pin = GPIO_PF10;
+
+static struct bfin_capture_config bfin_capture_data = {
+ .card_name = "BF537",
+ .inputs = vs6624_inputs,
+ .num_inputs = ARRAY_SIZE(vs6624_inputs),
+ .routes = vs6624_routes,
+ .i2c_adapter_id = 0,
+ .board_info = {
+ .type = "vs6624",
+ .addr = 0x10,
+ .platform_data = (void *)&vs6624_ce_pin,
+ },
+ .ppi_info = &ppi_info,
+ .ppi_control = (PACK_EN | DLEN_8 | XFR_TYPE | 0x0020),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+ .name = "bfin_capture",
+ .dev = {
+ .platform_data = &bfin_capture_data,
+ },
+};
+#endif
+
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
@@ -2716,7 +2781,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -2733,6 +2798,11 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_lq035q1_device,
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+ &bfin_capture_device,
+#endif
+
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 6917ce2fa55..988517671a5 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -32,7 +32,7 @@
*/
const char bfin_board_name[] = "Bluetechnix TCM BF537";
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -702,7 +702,7 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -737,7 +737,7 @@ static int __init tcm_bf537_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 8356eb599f1..1633a6f306c 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -490,7 +490,7 @@ static struct platform_device smc91x_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -874,7 +874,7 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
#endif
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bf538_spi_master0,
&bf538_spi_master1,
&bf538_spi_master2,
@@ -938,7 +938,7 @@ static int __init ezkit_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bf538_spi_board_info,
ARRAY_SIZE(bf538_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 0350eacec21..68af594db48 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -854,7 +854,7 @@ static struct platform_device para_flash_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -1175,7 +1175,7 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
&bf54x_sdh_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bf54x_spi_master0,
&bf54x_spi_master1,
#endif
@@ -1210,7 +1210,7 @@ static int __init cm_bf548_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf548_devices, ARRAY_SIZE(cm_bf548_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bf54x_spi_board_info,
ARRAY_SIZE(bf54x_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index bb868ac0fe2..3ea45f8bd61 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -1110,7 +1110,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
};
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -1183,6 +1183,71 @@ static struct platform_device bf54x_spi_master1 = {
};
#endif /* spi master and devices */
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+ P_PPI1_D0, P_PPI1_D1, P_PPI1_D2, P_PPI1_D3,
+ P_PPI1_D4, P_PPI1_D5, P_PPI1_D6, P_PPI1_D7,
+ P_PPI1_CLK, P_PPI1_FS1, P_PPI1_FS2,
+ 0,
+};
+
+static const struct ppi_info ppi_info = {
+ .type = PPI_TYPE_EPPI,
+ .dma_ch = CH_EPPI1,
+ .irq_err = IRQ_EPPI1_ERROR,
+ .base = (void __iomem *)EPPI1_STATUS,
+ .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_VS6624) \
+ || defined(CONFIG_VIDEO_VS6624_MODULE)
+static struct v4l2_input vs6624_inputs[] = {
+ {
+ .index = 0,
+ .name = "Camera",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_UNKNOWN,
+ },
+};
+
+static struct bcap_route vs6624_routes[] = {
+ {
+ .input = 0,
+ .output = 0,
+ },
+};
+
+static const unsigned vs6624_ce_pin = GPIO_PG6;
+
+static struct bfin_capture_config bfin_capture_data = {
+ .card_name = "BF548",
+ .inputs = vs6624_inputs,
+ .num_inputs = ARRAY_SIZE(vs6624_inputs),
+ .routes = vs6624_routes,
+ .i2c_adapter_id = 0,
+ .board_info = {
+ .type = "vs6624",
+ .addr = 0x10,
+ .platform_data = (void *)&vs6624_ce_pin,
+ },
+ .ppi_info = &ppi_info,
+ .ppi_control = (POLC | PACKEN | DLEN_8 | XFR_TYPE | 0x20),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+ .name = "bfin_capture",
+ .dev = {
+ .platform_data = &bfin_capture_data,
+ },
+};
+#endif
+
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
@@ -1502,10 +1567,14 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bf54x_sdh_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bf54x_spi_master0,
&bf54x_spi_master1,
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+ &bfin_capture_device,
+#endif
#if defined(CONFIG_KEYBOARD_BFIN) || defined(CONFIG_KEYBOARD_BFIN_MODULE)
&bf54x_kpad_device,
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index b1b7339b6ba..f6ffd6f054c 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -372,7 +372,7 @@ static struct bfin5xx_spi_chip data_flash_chip_info = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -475,7 +475,7 @@ static struct platform_device bfin_dpmc = {
static struct platform_device *acvilon_devices[] __initdata = {
&bfin_dpmc,
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index c017cf07ed4..d81450f635d 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -29,7 +29,7 @@
*/
const char bfin_board_name[] = "Bluetechnix CM BF561";
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
@@ -488,7 +488,7 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
&net2272_bfin_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -523,7 +523,7 @@ static int __init cm_bf561_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf561_devices, ARRAY_SIZE(cm_bf561_devices));
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 27f22ed381d..838978808a1 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -291,7 +291,7 @@ static struct platform_device ezkit_flash_device = {
};
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
@@ -383,7 +383,7 @@ static struct i2c_gpio_platform_data i2c_gpio_data = {
.scl_pin = GPIO_PF0,
.sda_is_open_drain = 0,
.scl_is_open_drain = 0,
- .udelay = 40,
+ .udelay = 10,
};
static struct platform_device i2c_gpio_device = {
@@ -422,6 +422,96 @@ static struct platform_device bfin_dpmc = {
},
};
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const struct ppi_info ppi_info = {
+ .type = PPI_TYPE_PPI,
+ .dma_ch = CH_PPI0,
+ .irq_err = IRQ_PPI1_ERROR,
+ .base = (void __iomem *)PPI0_CONTROL,
+ .pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_ADV7183) \
+ || defined(CONFIG_VIDEO_ADV7183_MODULE)
+#include <media/adv7183.h>
+static struct v4l2_input adv7183_inputs[] = {
+ {
+ .index = 0,
+ .name = "Composite",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL,
+ },
+ {
+ .index = 1,
+ .name = "S-Video",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL,
+ },
+ {
+ .index = 2,
+ .name = "Component",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL,
+ },
+};
+
+static struct bcap_route adv7183_routes[] = {
+ {
+ .input = ADV7183_COMPOSITE4,
+ .output = ADV7183_8BIT_OUT,
+ },
+ {
+ .input = ADV7183_SVIDEO0,
+ .output = ADV7183_8BIT_OUT,
+ },
+ {
+ .input = ADV7183_COMPONENT0,
+ .output = ADV7183_8BIT_OUT,
+ },
+};
+
+
+static const unsigned adv7183_gpio[] = {
+ GPIO_PF13, /* reset pin */
+ GPIO_PF2, /* output enable pin */
+};
+
+static struct bfin_capture_config bfin_capture_data = {
+ .card_name = "BF561",
+ .inputs = adv7183_inputs,
+ .num_inputs = ARRAY_SIZE(adv7183_inputs),
+ .routes = adv7183_routes,
+ .i2c_adapter_id = 0,
+ .board_info = {
+ .type = "adv7183",
+ .addr = 0x20,
+ .platform_data = (void *)adv7183_gpio,
+ },
+ .ppi_info = &ppi_info,
+ .ppi_control = (PACK_EN | DLEN_8 | DMA32 | FLD_SEL),
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+ .name = "bfin_capture",
+ .dev = {
+ .platform_data = &bfin_capture_data,
+ },
+};
+#endif
+
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
@@ -462,7 +552,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_isp1760_device,
#endif
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
@@ -494,6 +584,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
&ezkit_flash_device,
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+ &bfin_capture_device,
+#endif
+
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
&bfin_i2s,
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index 7977db2f1c1..00bdacee9cc 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -16,6 +16,7 @@
#include <mach/irq.h>
#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
+#define SUPPLE_1_WAKEUP ((IRQ_SUPPLE_1 - (IRQ_CORETMR + 1)) % 32)
static inline void
bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
@@ -42,7 +43,8 @@ bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
static inline void
bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
{
- bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
+ bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP) |
+ IWR_ENABLE(SUPPLE_1_WAKEUP), 0, iwr0, iwr1, iwr2);
}
#endif
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index db22401e760..ab1c617b9cf 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -84,7 +84,7 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
/* CoreB already running, sending ipi to wakeup it */
- platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
+ smp_send_reschedule(cpu);
} else {
/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
@@ -114,7 +114,8 @@ void __init platform_request_ipi(int irq, void *handler)
int ret;
const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
- ret = request_irq(irq, handler, IRQF_PERCPU, name, handler);
+ ret = request_irq(irq, handler, IRQF_PERCPU | IRQF_NO_SUSPEND |
+ IRQF_FORCE_RESUME, name, handler);
if (ret)
panic("Cannot request %s for IPI service", name);
}
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 0784a52389c..ac8f8a43158 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
+#include <linux/clockchips.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -47,9 +48,10 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
-#define BFIN_IPI_RESCHEDULE 0
-#define BFIN_IPI_CALL_FUNC 1
-#define BFIN_IPI_CPU_STOP 2
+#define BFIN_IPI_TIMER 0
+#define BFIN_IPI_RESCHEDULE 1
+#define BFIN_IPI_CALL_FUNC 2
+#define BFIN_IPI_CPU_STOP 3
struct blackfin_flush_data {
unsigned long start;
@@ -160,6 +162,14 @@ static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
+void ipi_timer(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+ evt->event_handler(evt);
+}
+
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
struct ipi_message *msg;
@@ -176,18 +186,17 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
while (msg_queue->count) {
msg = &msg_queue->ipi_message[msg_queue->head];
switch (msg->type) {
+ case BFIN_IPI_TIMER:
+ ipi_timer();
+ break;
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
case BFIN_IPI_CALL_FUNC:
- spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_call_function(cpu, msg);
- spin_lock_irqsave(&msg_queue->lock, flags);
break;
case BFIN_IPI_CPU_STOP:
- spin_unlock_irqrestore(&msg_queue->lock, flags);
ipi_cpu_stop(cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
break;
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
@@ -297,8 +306,6 @@ void smp_send_reschedule(int cpu)
{
cpumask_t callmap;
/* simply trigger an ipi */
- if (cpu_is_offline(cpu))
- return;
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
@@ -308,6 +315,16 @@ void smp_send_reschedule(int cpu)
return;
}
+void smp_send_msg(const struct cpumask *mask, unsigned long type)
+{
+ smp_send_message(*mask, type, NULL, NULL, 0);
+}
+
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+ smp_send_msg(mask, BFIN_IPI_TIMER);
+}
+
void smp_send_stop(void)
{
cpumask_t callmap;
@@ -326,17 +343,24 @@ void smp_send_stop(void)
int __cpuinit __cpu_up(unsigned int cpu)
{
int ret;
- static struct task_struct *idle;
+ struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
+ struct task_struct *idle = ci->idle;
- if (idle)
+ if (idle) {
free_task(idle);
-
- idle = fork_idle(cpu);
- if (IS_ERR(idle)) {
- printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
- return PTR_ERR(idle);
+ idle = NULL;
}
+ if (!idle) {
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle)) {
+ printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+ return PTR_ERR(idle);
+ }
+ ci->idle = idle;
+ } else {
+ init_idle(idle, cpu);
+ }
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
ret = platform_boot_secondary(cpu, idle);
@@ -411,6 +435,7 @@ void __cpuinit secondary_start_kernel(void)
bfin_setup_caches(cpu);
+ notify_cpu_starting(cpu);
/*
* Calibrate loops per jiffy value.
* IRQs need to be enabled here - D-cache can be invalidated
@@ -453,8 +478,10 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
smp_flush_data.start = start;
smp_flush_data.end = end;
- if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
+ preempt_disable();
+ if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
new file mode 100644
index 00000000000..26e67f0f005
--- /dev/null
+++ b/arch/c6x/Kconfig
@@ -0,0 +1,174 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+config TMS320C6X
+ def_bool y
+ select CLKDEV_LOOKUP
+ select GENERIC_IRQ_SHOW
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_DMA_API_DEBUG
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_MEMBLOCK
+ select HAVE_SPARSE_IRQ
+ select OF
+ select OF_EARLY_FLATTREE
+
+config MMU
+ def_bool n
+
+config ZONE_DMA
+ def_bool y
+
+config FPU
+ def_bool n
+
+config HIGHMEM
+ def_bool n
+
+config NUMA
+ def_bool n
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ def_bool n
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+
+config GENERIC_BUG
+ def_bool y
+
+config COMMON_CLKDEV
+ def_bool y
+
+config C6X_BIG_KERNEL
+ bool "Build a big kernel"
+ help
+ The C6X function call instruction has a limited range of +/- 2MiB.
+ This is sufficient for most kernels, but some kernel configurations
+ with lots of compiled-in functionality may require a larger range
+ for function calls. Use this option to have the compiler generate
+ function calls with 32-bit range. This will make the kernel both
+ larger and slower.
+
+ If unsure, say N.
+
+source "init/Kconfig"
+
+# Use the generic interrupt handling code in kernel/irq/
+
+source "kernel/Kconfig.freezer"
+
+config CMDLINE_BOOL
+ bool "Default bootloader kernel arguments"
+
+config CMDLINE
+ string "Kernel command line"
+ depends on CMDLINE_BOOL
+ default "console=ttyS0,57600"
+ help
+ On some architectures there is currently no way for the boot loader
+ to pass arguments to the kernel. For these architectures, you should
+ supply some command-line options at build time by entering them
+ here.
+
+config CMDLINE_FORCE
+ bool "Force default kernel command string"
+ depends on CMDLINE_BOOL
+ default n
+ help
+ Set this to have arguments from the default kernel command string
+ override those passed by the boot loader.
+
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ default n
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+ Note that your board must be properly built and your board
+ port must properly enable any big-endian related features
+ of your chipset/board/processor.
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "13"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
+menu "Processor type and features"
+
+source "arch/c6x/platforms/Kconfig"
+
+config TMS320C6X_CACHES_ON
+ bool "L2 cache support"
+ default y
+
+config KERNEL_RAM_BASE_ADDRESS
+ hex "Virtual address of memory base"
+ default 0xe0000000 if SOC_TMS320C6455
+ default 0xe0000000 if SOC_TMS320C6457
+ default 0xe0000000 if SOC_TMS320C6472
+ default 0x80000000
+
+source "mm/Kconfig"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config ACCESS_CHECK
+ bool "Check the user pointer address"
+ default y
+ help
+ Usually the pointer transfer from user space is checked to see if its
+ address is in the kernel space.
+
+ Say N here to disable that check to improve the performance.
+
+endmenu
diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile
new file mode 100644
index 00000000000..1d08dd07027
--- /dev/null
+++ b/arch/c6x/Makefile
@@ -0,0 +1,60 @@
+#
+# linux/arch/c6x/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+cflags-y += -mno-dsbt -msdata=none
+
+cflags-$(CONFIG_C6X_BIG_KERNEL) += -mlong-calls
+
+CFLAGS_MODULE += -mlong-calls -mno-dsbt -msdata=none
+
+CHECKFLAGS +=
+
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(cflags-y)
+
+ifdef CONFIG_CPU_BIG_ENDIAN
+KBUILD_CFLAGS += -mbig-endian
+KBUILD_AFLAGS += -mbig-endian
+LINKFLAGS += -mbig-endian
+KBUILD_LDFLAGS += -mbig-endian
+LDFLAGS += -EB
+endif
+
+head-y := arch/c6x/kernel/head.o
+core-y += arch/c6x/kernel/ arch/c6x/mm/ arch/c6x/platforms/
+libs-y += arch/c6x/lib/
+
+# Default to vmlinux.bin, override when needed
+all: vmlinux.bin
+
+boot := arch/$(ARCH)/boot
+
+# Are we making a dtbImage.<boardname> target? If so, crack out the boardname
+DTB:=$(subst dtbImage.,,$(filter dtbImage.%, $(MAKECMDGOALS)))
+export DTB
+
+ifneq ($(DTB),)
+core-y += $(boot)/
+endif
+
+# With make 3.82 we cannot mix normal and wildcard targets
+
+vmlinux.bin: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+
+dtbImage.%: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+ @echo ' vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
+ @echo ' dtbImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
+ @echo ' - stripped elf with fdt blob'
+endef
diff --git a/arch/c6x/boot/Makefile b/arch/c6x/boot/Makefile
new file mode 100644
index 00000000000..ecca820e604
--- /dev/null
+++ b/arch/c6x/boot/Makefile
@@ -0,0 +1,30 @@
+#
+# Makefile for bootable kernel images
+#
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+DTC_FLAGS ?= -p 1024
+
+ifneq ($(DTB),)
+obj-y += linked_dtb.o
+endif
+
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+ $(call cmd,dtc)
+
+quiet_cmd_cp = CP $< $@$2
+ cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
+
+# Generate builtin.dtb from $(DTB).dtb
+$(obj)/builtin.dtb: $(obj)/$(DTB).dtb
+ $(call if_changed,cp)
+
+$(obj)/linked_dtb.o: $(obj)/builtin.dtb
+
+$(obj)/dtbImage.%: vmlinux
+ $(call if_changed,objcopy)
+
+clean-files := $(obj)/*.dtb
diff --git a/arch/c6x/boot/dts/dsk6455.dts b/arch/c6x/boot/dts/dsk6455.dts
new file mode 100644
index 00000000000..2b71f800618
--- /dev/null
+++ b/arch/c6x/boot/dts/dsk6455.dts
@@ -0,0 +1,62 @@
+/*
+ * arch/c6x/boot/dts/dsk6455.dts
+ *
+ * DSK6455 Evaluation Platform For TMS320C6455
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6455.dtsi"
+
+/ {
+ model = "Spectrum Digital DSK6455";
+ compatible = "spectrum-digital,dsk6455";
+
+ chosen {
+ bootargs = "root=/dev/nfs ip=dhcp rw";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0xE0000000 0x08000000>;
+ };
+
+ soc {
+ megamod_pic: interrupt-controller@1800000 {
+ interrupts = < 12 13 14 15 >;
+ };
+
+ emifa@70000000 {
+ flash@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x3 0x0 0x400000>;
+ bank-width = <1>;
+ device-width = <1>;
+ partition@0 {
+ reg = <0x0 0x400000>;
+ label = "NOR";
+ };
+ };
+ };
+
+ timer1: timer@2980000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 69 >;
+ };
+
+ clock-controller@029a0000 {
+ clock-frequency = <50000000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/evmc6457.dts b/arch/c6x/boot/dts/evmc6457.dts
new file mode 100644
index 00000000000..0301eb9a8ff
--- /dev/null
+++ b/arch/c6x/boot/dts/evmc6457.dts
@@ -0,0 +1,48 @@
+/*
+ * arch/c6x/boot/dts/evmc6457.dts
+ *
+ * EVMC6457 Evaluation Platform For TMS320C6457
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6457.dtsi"
+
+/ {
+ model = "eInfochips EVMC6457";
+ compatible = "einfochips,evmc6457";
+
+ chosen {
+ bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0xE0000000 0x10000000>;
+ };
+
+ soc {
+ megamod_pic: interrupt-controller@1800000 {
+ interrupts = < 12 13 14 15 >;
+ };
+
+ timer0: timer@2940000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 67 >;
+ };
+
+ clock-controller@29a0000 {
+ clock-frequency = <60000000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/evmc6472.dts b/arch/c6x/boot/dts/evmc6472.dts
new file mode 100644
index 00000000000..3e207b449a9
--- /dev/null
+++ b/arch/c6x/boot/dts/evmc6472.dts
@@ -0,0 +1,73 @@
+/*
+ * arch/c6x/boot/dts/evmc6472.dts
+ *
+ * EVMC6472 Evaluation Platform For TMS320C6472
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6472.dtsi"
+
+/ {
+ model = "eInfochips EVMC6472";
+ compatible = "einfochips,evmc6472";
+
+ chosen {
+ bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0xE0000000 0x10000000>;
+ };
+
+ soc {
+ megamod_pic: interrupt-controller@1800000 {
+ interrupts = < 12 13 14 15 >;
+ };
+
+ timer0: timer@25e0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ timer1: timer@25f0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ timer2: timer@2600000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ timer3: timer@2610000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ timer4: timer@2620000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ timer5: timer@2630000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 16 >;
+ };
+
+ clock-controller@29a0000 {
+ clock-frequency = <25000000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/evmc6474.dts b/arch/c6x/boot/dts/evmc6474.dts
new file mode 100644
index 00000000000..4dc291292bc
--- /dev/null
+++ b/arch/c6x/boot/dts/evmc6474.dts
@@ -0,0 +1,58 @@
+/*
+ * arch/c6x/boot/dts/evmc6474.dts
+ *
+ * EVMC6474 Evaluation Platform For TMS320C6474
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6474.dtsi"
+
+/ {
+ model = "Spectrum Digital EVMC6474";
+ compatible = "spectrum-digital,evmc6474";
+
+ chosen {
+ bootargs = "console=hvc root=/dev/nfs ip=dhcp rw";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x08000000>;
+ };
+
+ soc {
+ megamod_pic: interrupt-controller@1800000 {
+ interrupts = < 12 13 14 15 >;
+ };
+
+ timer3: timer@2940000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 39 >;
+ };
+
+ timer4: timer@2950000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 41 >;
+ };
+
+ timer5: timer@2960000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 43 >;
+ };
+
+ clock-controller@29a0000 {
+ clock-frequency = <50000000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/tms320c6455.dtsi b/arch/c6x/boot/dts/tms320c6455.dtsi
new file mode 100644
index 00000000000..a804ec1e018
--- /dev/null
+++ b/arch/c6x/boot/dts/tms320c6455.dtsi
@@ -0,0 +1,96 @@
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "ti,c64x+";
+ reg = <0>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6455";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ core_pic: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ compatible = "ti,c64x+core-pic";
+ };
+
+ /*
+ * Megamodule interrupt controller
+ */
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ };
+
+ cache-controller@1840000 {
+ compatible = "ti,c64x+cache";
+ reg = <0x01840000 0x8400>;
+ };
+
+ emifa@70000000 {
+ compatible = "ti,c64x+emifa", "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0x70000000 0x100>;
+ ranges = <0x2 0x0 0xa0000000 0x00000008
+ 0x3 0x0 0xb0000000 0x00400000
+ 0x4 0x0 0xc0000000 0x10000000
+ 0x5 0x0 0xD0000000 0x10000000>;
+
+ ti,dscr-dev-enable = <13>;
+ ti,emifa-burst-priority = <255>;
+ ti,emifa-ce-config = <0x00240120
+ 0x00240120
+ 0x00240122
+ 0x00240122>;
+ };
+
+ timer1: timer@2980000 {
+ compatible = "ti,c64x+timer64";
+ reg = <0x2980000 0x40>;
+ ti,dscr-dev-enable = <4>;
+ };
+
+ clock-controller@029a0000 {
+ compatible = "ti,c6455-pll", "ti,c64x+pll";
+ reg = <0x029a0000 0x200>;
+ ti,c64x+pll-bypass-delay = <1440>;
+ ti,c64x+pll-reset-delay = <15360>;
+ ti,c64x+pll-lock-delay = <24000>;
+ };
+
+ device-state-config-regs@2a80000 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02a80000 0x41000>;
+
+ ti,dscr-devstat = <0>;
+ ti,dscr-silicon-rev = <8 28 0xf>;
+ ti,dscr-rmii-resets = <0 0x40020 0x00040000>;
+
+ ti,dscr-locked-regs = <0x40008 0x40004 0x0f0a0b00>;
+ ti,dscr-devstate-ctl-regs =
+ <0 12 0x40008 1 0 0 2
+ 12 1 0x40008 3 0 30 2
+ 13 2 0x4002c 1 0xffffffff 0 1>;
+ ti,dscr-devstate-stat-regs =
+ <0 10 0x40014 1 0 0 3
+ 10 2 0x40018 1 0 0 3>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/tms320c6457.dtsi b/arch/c6x/boot/dts/tms320c6457.dtsi
new file mode 100644
index 00000000000..35f40709a71
--- /dev/null
+++ b/arch/c6x/boot/dts/tms320c6457.dtsi
@@ -0,0 +1,68 @@
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "ti,c64x+";
+ reg = <0>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6457";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ core_pic: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ compatible = "ti,c64x+core-pic";
+ };
+
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&core_pic>;
+ reg = <0x1800000 0x1000>;
+ };
+
+ cache-controller@1840000 {
+ compatible = "ti,c64x+cache";
+ reg = <0x01840000 0x8400>;
+ };
+
+ device-state-controller@2880800 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02880800 0x400>;
+
+ ti,dscr-devstat = <0x20>;
+ ti,dscr-silicon-rev = <0x18 28 0xf>;
+ ti,dscr-mac-fuse-regs = <0x114 3 4 5 6
+ 0x118 0 0 1 2>;
+ ti,dscr-kick-regs = <0x38 0x83E70B13
+ 0x3c 0x95A4F1E0>;
+ };
+
+ timer0: timer@2940000 {
+ compatible = "ti,c64x+timer64";
+ reg = <0x2940000 0x40>;
+ };
+
+ clock-controller@29a0000 {
+ compatible = "ti,c6457-pll", "ti,c64x+pll";
+ reg = <0x029a0000 0x200>;
+ ti,c64x+pll-bypass-delay = <300>;
+ ti,c64x+pll-reset-delay = <24000>;
+ ti,c64x+pll-lock-delay = <50000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/tms320c6472.dtsi b/arch/c6x/boot/dts/tms320c6472.dtsi
new file mode 100644
index 00000000000..b488aaec65c
--- /dev/null
+++ b/arch/c6x/boot/dts/tms320c6472.dtsi
@@ -0,0 +1,134 @@
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ reg = <0>;
+ model = "ti,c64x+";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ reg = <1>;
+ model = "ti,c64x+";
+ };
+ cpu@2 {
+ device_type = "cpu";
+ reg = <2>;
+ model = "ti,c64x+";
+ };
+ cpu@3 {
+ device_type = "cpu";
+ reg = <3>;
+ model = "ti,c64x+";
+ };
+ cpu@4 {
+ device_type = "cpu";
+ reg = <4>;
+ model = "ti,c64x+";
+ };
+ cpu@5 {
+ device_type = "cpu";
+ reg = <5>;
+ model = "ti,c64x+";
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6472";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ core_pic: interrupt-controller {
+ compatible = "ti,c64x+core-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ };
+
+ cache-controller@1840000 {
+ compatible = "ti,c64x+cache";
+ reg = <0x01840000 0x8400>;
+ };
+
+ timer0: timer@25e0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x01 >;
+ reg = <0x25e0000 0x40>;
+ };
+
+ timer1: timer@25f0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x02 >;
+ reg = <0x25f0000 0x40>;
+ };
+
+ timer2: timer@2600000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x04 >;
+ reg = <0x2600000 0x40>;
+ };
+
+ timer3: timer@2610000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x08 >;
+ reg = <0x2610000 0x40>;
+ };
+
+ timer4: timer@2620000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x10 >;
+ reg = <0x2620000 0x40>;
+ };
+
+ timer5: timer@2630000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x20 >;
+ reg = <0x2630000 0x40>;
+ };
+
+ clock-controller@29a0000 {
+ compatible = "ti,c6472-pll", "ti,c64x+pll";
+ reg = <0x029a0000 0x200>;
+ ti,c64x+pll-bypass-delay = <200>;
+ ti,c64x+pll-reset-delay = <12000>;
+ ti,c64x+pll-lock-delay = <80000>;
+ };
+
+ device-state-controller@2a80000 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02a80000 0x1000>;
+
+ ti,dscr-devstat = <0>;
+ ti,dscr-silicon-rev = <0x70c 16 0xff>;
+
+ ti,dscr-mac-fuse-regs = <0x700 1 2 3 4
+ 0x704 5 6 0 0>;
+
+ ti,dscr-rmii-resets = <0x208 1
+ 0x20c 1>;
+
+ ti,dscr-locked-regs = <0x200 0x204 0x0a1e183a
+ 0x40c 0x420 0xbea7
+ 0x41c 0x420 0xbea7>;
+
+ ti,dscr-privperm = <0x41c 0xaaaaaaaa>;
+
+ ti,dscr-devstate-ctl-regs = <0 13 0x200 1 0 0 1>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/tms320c6474.dtsi b/arch/c6x/boot/dts/tms320c6474.dtsi
new file mode 100644
index 00000000000..cc601bf348a
--- /dev/null
+++ b/arch/c6x/boot/dts/tms320c6474.dtsi
@@ -0,0 +1,89 @@
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ reg = <0>;
+ model = "ti,c64x+";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ reg = <1>;
+ model = "ti,c64x+";
+ };
+ cpu@2 {
+ device_type = "cpu";
+ reg = <2>;
+ model = "ti,c64x+";
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6474";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ core_pic: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ compatible = "ti,c64x+core-pic";
+ };
+
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ };
+
+ cache-controller@1840000 {
+ compatible = "ti,c64x+cache";
+ reg = <0x01840000 0x8400>;
+ };
+
+ timer3: timer@2940000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x04 >;
+ reg = <0x2940000 0x40>;
+ };
+
+ timer4: timer@2950000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x02 >;
+ reg = <0x2950000 0x40>;
+ };
+
+ timer5: timer@2960000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x01 >;
+ reg = <0x2960000 0x40>;
+ };
+
+ device-state-controller@2880800 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02880800 0x400>;
+
+ ti,dscr-devstat = <0x004>;
+ ti,dscr-silicon-rev = <0x014 28 0xf>;
+ ti,dscr-mac-fuse-regs = <0x34 3 4 5 6
+ 0x38 0 0 1 2>;
+ };
+
+ clock-controller@29a0000 {
+ compatible = "ti,c6474-pll", "ti,c64x+pll";
+ reg = <0x029a0000 0x200>;
+ ti,c64x+pll-bypass-delay = <120>;
+ ti,c64x+pll-reset-delay = <30000>;
+ ti,c64x+pll-lock-delay = <60000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/linked_dtb.S b/arch/c6x/boot/linked_dtb.S
new file mode 100644
index 00000000000..57a4454eaec
--- /dev/null
+++ b/arch/c6x/boot/linked_dtb.S
@@ -0,0 +1,2 @@
+.section __fdt_blob,"a"
+.incbin "arch/c6x/boot/builtin.dtb"
diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig
new file mode 100644
index 00000000000..4663487c67a
--- /dev/null
+++ b/arch/c6x/configs/dsk6455_defconfig
@@ -0,0 +1,44 @@
+CONFIG_SOC_TMS320C6455=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig
new file mode 100644
index 00000000000..bba40e195ec
--- /dev/null
+++ b/arch/c6x/configs/evmc6457_defconfig
@@ -0,0 +1,41 @@
+CONFIG_SOC_TMS320C6457=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+CONFIG_BOARD_EVM6457=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig
new file mode 100644
index 00000000000..8c46155f6d3
--- /dev/null
+++ b/arch/c6x/configs/evmc6472_defconfig
@@ -0,0 +1,42 @@
+CONFIG_SOC_TMS320C6472=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_BOARD_EVM6472=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig
new file mode 100644
index 00000000000..15533f63231
--- /dev/null
+++ b/arch/c6x/configs/evmc6474_defconfig
@@ -0,0 +1,42 @@
+CONFIG_SOC_TMS320C6474=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_BOARD_EVM6474=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
new file mode 100644
index 00000000000..13dcf78adf9
--- /dev/null
+++ b/arch/c6x/include/asm/Kbuild
@@ -0,0 +1,54 @@
+include include/asm-generic/Kbuild.asm
+
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += futex.h
+generic-y += hw_irq.h
+generic-y += io.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mmu_context.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += pgalloc.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += tlbflush.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
diff --git a/arch/c6x/include/asm/asm-offsets.h b/arch/c6x/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/c6x/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h
new file mode 100644
index 00000000000..39ab7e874d9
--- /dev/null
+++ b/arch/c6x/include/asm/bitops.h
@@ -0,0 +1,105 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_BITOPS_H
+#define _ASM_C6X_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/*
+ * We are lucky, DSP is perfect for bitops: do it in 3 cycles
+ */
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ * Note __ffs(0) = undef, __ffs(1) = 0, __ffs(0x80000000) = 31.
+ *
+ */
+static inline unsigned long __ffs(unsigned long x)
+{
+ asm (" bitr .M1 %0,%0\n"
+ " nop\n"
+ " lmbd .L1 1,%0,%0\n"
+ : "+a"(x));
+
+ return x;
+}
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static inline int fls(int x)
+{
+ if (!x)
+ return 0;
+
+ asm (" lmbd .L1 1,%0,%0\n" : "+a"(x));
+
+ return 32 - x;
+}
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ * Note ffs(0) = 0, ffs(1) = 1, ffs(0x80000000) = 32.
+ */
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+
+ return __ffs(x) + 1;
+}
+
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_C6X_BITOPS_H */
diff --git a/arch/c6x/include/asm/byteorder.h b/arch/c6x/include/asm/byteorder.h
new file mode 100644
index 00000000000..166038db342
--- /dev/null
+++ b/arch/c6x/include/asm/byteorder.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_C6X_BYTEORDER_H
+#define _ASM_C6X_BYTEORDER_H
+
+#include <asm/types.h>
+
+#ifdef _BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else /* _BIG_ENDIAN */
+#include <linux/byteorder/little_endian.h>
+#endif /* _BIG_ENDIAN */
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
new file mode 100644
index 00000000000..6d521d96d94
--- /dev/null
+++ b/arch/c6x/include/asm/cache.h
@@ -0,0 +1,90 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CACHE_H
+#define _ASM_C6X_CACHE_H
+
+#include <linux/irqflags.h>
+
+/*
+ * Cache line size
+ */
+#define L1D_CACHE_BYTES 64
+#define L1P_CACHE_BYTES 32
+#define L2_CACHE_BYTES 128
+
+/*
+ * L2 used as cache
+ */
+#define L2MODE_SIZE L2MODE_256K_CACHE
+
+/*
+ * For practical reasons the L1_CACHE_BYTES defines should not be smaller than
+ * the L2 line size
+ */
+#define L1_CACHE_BYTES L2_CACHE_BYTES
+
+#define L2_CACHE_ALIGN_LOW(x) \
+ (((x) & ~(L2_CACHE_BYTES - 1)))
+#define L2_CACHE_ALIGN_UP(x) \
+ (((x) + (L2_CACHE_BYTES - 1)) & ~(L2_CACHE_BYTES - 1))
+#define L2_CACHE_ALIGN_CNT(x) \
+ (((x) + (sizeof(int) - 1)) & ~(sizeof(int) - 1))
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
+
+/*
+ * This is the granularity of hardware cacheability control.
+ */
+#define CACHEABILITY_ALIGN 0x01000000
+
+/*
+ * Align a physical address to MAR regions
+ */
+#define CACHE_REGION_START(v) \
+ (((u32) (v)) & ~(CACHEABILITY_ALIGN - 1))
+#define CACHE_REGION_END(v) \
+ (((u32) (v) + (CACHEABILITY_ALIGN - 1)) & ~(CACHEABILITY_ALIGN - 1))
+
+extern void __init c6x_cache_init(void);
+
+extern void enable_caching(unsigned long start, unsigned long end);
+extern void disable_caching(unsigned long start, unsigned long end);
+
+extern void L1_cache_off(void);
+extern void L1_cache_on(void);
+
+extern void L1P_cache_global_invalidate(void);
+extern void L1D_cache_global_invalidate(void);
+extern void L1D_cache_global_writeback(void);
+extern void L1D_cache_global_writeback_invalidate(void);
+extern void L2_cache_set_mode(unsigned int mode);
+extern void L2_cache_global_writeback_invalidate(void);
+extern void L2_cache_global_writeback(void);
+
+extern void L1P_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L1D_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L1D_cache_block_writeback_invalidate(unsigned int start,
+ unsigned int end);
+extern void L1D_cache_block_writeback(unsigned int start, unsigned int end);
+extern void L2_cache_block_invalidate(unsigned int start, unsigned int end);
+extern void L2_cache_block_writeback(unsigned int start, unsigned int end);
+extern void L2_cache_block_writeback_invalidate(unsigned int start,
+ unsigned int end);
+extern void L2_cache_block_invalidate_nowait(unsigned int start,
+ unsigned int end);
+extern void L2_cache_block_writeback_nowait(unsigned int start,
+ unsigned int end);
+
+extern void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
+ unsigned int end);
+
+#endif /* _ASM_C6X_CACHE_H */
diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h
new file mode 100644
index 00000000000..df5db90dbe5
--- /dev/null
+++ b/arch/c6x/include/asm/cacheflush.h
@@ -0,0 +1,65 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CACHEFLUSH_H
+#define _ASM_C6X_CACHEFLUSH_H
+
+#include <linux/spinlock.h>
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/string.h>
+
+/*
+ * virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * physically-indexed cache management
+ */
+#define flush_icache_range(s, e) \
+do { \
+ L1D_cache_block_writeback((s), (e)); \
+ L1P_cache_block_invalidate((s), (e)); \
+} while (0)
+
+#define flush_icache_page(vma, page) \
+do { \
+ if ((vma)->vm_flags & PROT_EXEC) \
+ L1D_cache_block_writeback_invalidate(page_address(page), \
+ (unsigned long) page_address(page) + PAGE_SIZE)); \
+ L1P_cache_block_invalidate(page_address(page), \
+ (unsigned long) page_address(page) + PAGE_SIZE)); \
+} while (0)
+
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* _ASM_C6X_CACHEFLUSH_H */
diff --git a/arch/c6x/include/asm/checksum.h b/arch/c6x/include/asm/checksum.h
new file mode 100644
index 00000000000..7246816d6e4
--- /dev/null
+++ b/arch/c6x/include/asm/checksum.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_CHECKSUM_H
+#define _ASM_C6X_CHECKSUM_H
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ unsigned long long tmp;
+
+ asm ("add .d1 %1,%5,%1\n"
+ "|| addu .l1 %3,%4,%0\n"
+ "addu .l1 %2,%0,%0\n"
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ "|| shl .s1 %1,8,%1\n"
+#endif
+ "addu .l1 %1,%0,%0\n"
+ "add .l1 %P0,%p0,%2\n"
+ : "=&a"(tmp), "+a"(len), "+a"(sum)
+ : "a" (saddr), "a" (daddr), "a" (proto));
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_C6X_CHECKSUM_H */
diff --git a/arch/c6x/include/asm/clkdev.h b/arch/c6x/include/asm/clkdev.h
new file mode 100644
index 00000000000..76a070b1c2e
--- /dev/null
+++ b/arch/c6x/include/asm/clkdev.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_CLKDEV_H
+#define _ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+struct clk;
+
+static inline int __clk_get(struct clk *clk)
+{
+ return 1;
+}
+
+static inline void __clk_put(struct clk *clk)
+{
+}
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+#endif /* _ASM_CLKDEV_H */
diff --git a/arch/c6x/include/asm/clock.h b/arch/c6x/include/asm/clock.h
new file mode 100644
index 00000000000..bcf42b2b4b1
--- /dev/null
+++ b/arch/c6x/include/asm/clock.h
@@ -0,0 +1,148 @@
+/*
+ * TI C64X clock definitions
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments.
+ * Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ * Copied heavily from arm/mach-davinci/clock.h, so:
+ *
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_C6X_CLOCK_H
+#define _ASM_C6X_CLOCK_H
+
+#ifndef __ASSEMBLER__
+
+#include <linux/list.h>
+
+/* PLL/Reset register offsets */
+#define PLLCTL 0x100
+#define PLLM 0x110
+#define PLLPRE 0x114
+#define PLLDIV1 0x118
+#define PLLDIV2 0x11c
+#define PLLDIV3 0x120
+#define PLLPOST 0x128
+#define PLLCMD 0x138
+#define PLLSTAT 0x13c
+#define PLLALNCTL 0x140
+#define PLLDCHANGE 0x144
+#define PLLCKEN 0x148
+#define PLLCKSTAT 0x14c
+#define PLLSYSTAT 0x150
+#define PLLDIV4 0x160
+#define PLLDIV5 0x164
+#define PLLDIV6 0x168
+#define PLLDIV7 0x16c
+#define PLLDIV8 0x170
+#define PLLDIV9 0x174
+#define PLLDIV10 0x178
+#define PLLDIV11 0x17c
+#define PLLDIV12 0x180
+#define PLLDIV13 0x184
+#define PLLDIV14 0x188
+#define PLLDIV15 0x18c
+#define PLLDIV16 0x190
+
+/* PLLM register bits */
+#define PLLM_PLLM_MASK 0xff
+#define PLLM_VAL(x) ((x) - 1)
+
+/* PREDIV register bits */
+#define PLLPREDIV_EN BIT(15)
+#define PLLPREDIV_VAL(x) ((x) - 1)
+
+/* PLLCTL register bits */
+#define PLLCTL_PLLEN BIT(0)
+#define PLLCTL_PLLPWRDN BIT(1)
+#define PLLCTL_PLLRST BIT(3)
+#define PLLCTL_PLLDIS BIT(4)
+#define PLLCTL_PLLENSRC BIT(5)
+#define PLLCTL_CLKMODE BIT(8)
+
+/* PLLCMD register bits */
+#define PLLCMD_GOSTAT BIT(0)
+
+/* PLLSTAT register bits */
+#define PLLSTAT_GOSTAT BIT(0)
+
+/* PLLDIV register bits */
+#define PLLDIV_EN BIT(15)
+#define PLLDIV_RATIO_MASK 0x1f
+#define PLLDIV_RATIO(x) ((x) - 1)
+
+struct pll_data;
+
+struct clk {
+ struct list_head node;
+ struct module *owner;
+ const char *name;
+ unsigned long rate;
+ int usecount;
+ u32 flags;
+ struct clk *parent;
+ struct list_head children; /* list of children */
+ struct list_head childnode; /* parent's child list node */
+ struct pll_data *pll_data;
+ u32 div;
+ unsigned long (*recalc) (struct clk *);
+ int (*set_rate) (struct clk *clk, unsigned long rate);
+ int (*round_rate) (struct clk *clk, unsigned long rate);
+};
+
+/* Clock flags: SoC-specific flags start at BIT(16) */
+#define ALWAYS_ENABLED BIT(1)
+#define CLK_PLL BIT(2) /* PLL-derived clock */
+#define PRE_PLL BIT(3) /* source is before PLL mult/div */
+#define FIXED_DIV_PLL BIT(4) /* fixed divisor from PLL */
+#define FIXED_RATE_PLL BIT(5) /* fixed ouput rate PLL */
+
+#define MAX_PLL_SYSCLKS 16
+
+struct pll_data {
+ void __iomem *base;
+ u32 num;
+ u32 flags;
+ u32 input_rate;
+ u32 bypass_delay; /* in loops */
+ u32 reset_delay; /* in loops */
+ u32 lock_delay; /* in loops */
+ struct clk sysclks[MAX_PLL_SYSCLKS + 1];
+};
+
+/* pll_data flag bit */
+#define PLL_HAS_PRE BIT(0)
+#define PLL_HAS_MUL BIT(1)
+#define PLL_HAS_POST BIT(2)
+
+#define CLK(dev, con, ck) \
+ { \
+ .dev_id = dev, \
+ .con_id = con, \
+ .clk = ck, \
+ } \
+
+extern void c6x_clks_init(struct clk_lookup *clocks);
+extern int clk_register(struct clk *clk);
+extern void clk_unregister(struct clk *clk);
+extern void c64x_setup_clocks(void);
+
+extern struct pll_data c6x_soc_pll1;
+
+extern struct clk clkin1;
+extern struct clk c6x_core_clk;
+extern struct clk c6x_i2c_clk;
+extern struct clk c6x_watchdog_clk;
+extern struct clk c6x_mcbsp1_clk;
+extern struct clk c6x_mcbsp2_clk;
+extern struct clk c6x_mdio_clk;
+
+#endif
+
+#endif /* _ASM_C6X_CLOCK_H */
diff --git a/arch/c6x/include/asm/delay.h b/arch/c6x/include/asm/delay.h
new file mode 100644
index 00000000000..f314c2e9eb5
--- /dev/null
+++ b/arch/c6x/include/asm/delay.h
@@ -0,0 +1,67 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_DELAY_H
+#define _ASM_C6X_DELAY_H
+
+#include <linux/kernel.h>
+
+extern unsigned int ticks_per_ns_scaled;
+
+static inline void __delay(unsigned long loops)
+{
+ uint32_t tmp;
+
+ /* 6 cycles per loop */
+ asm volatile (" mv .s1 %0,%1\n"
+ "0: [%1] b .s1 0b\n"
+ " add .l1 -6,%0,%0\n"
+ " cmplt .l1 1,%0,%1\n"
+ " nop 3\n"
+ : "+a"(loops), "=A"(tmp));
+}
+
+static inline void _c6x_tickdelay(unsigned int x)
+{
+ uint32_t cnt, endcnt;
+
+ asm volatile (" mvc .s2 TSCL,%0\n"
+ " add .s2x %0,%1,%2\n"
+ " || mvk .l2 1,B0\n"
+ "0: [B0] b .s2 0b\n"
+ " mvc .s2 TSCL,%0\n"
+ " sub .s2 %0,%2,%0\n"
+ " cmpgt .l2 0,%0,B0\n"
+ " nop 2\n"
+ : "=b"(cnt), "+a"(x), "=b"(endcnt) : : "B0");
+}
+
+/* use scaled math to avoid slow division */
+#define C6X_NDELAY_SCALE 10
+
+static inline void _ndelay(unsigned int n)
+{
+ _c6x_tickdelay((ticks_per_ns_scaled * n) >> C6X_NDELAY_SCALE);
+}
+
+static inline void _udelay(unsigned int n)
+{
+ while (n >= 10) {
+ _ndelay(10000);
+ n -= 10;
+ }
+ while (n-- > 0)
+ _ndelay(1000);
+}
+
+#define udelay(x) _udelay((unsigned int)(x))
+#define ndelay(x) _ndelay((unsigned int)(x))
+
+#endif /* _ASM_C6X_DELAY_H */
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..03579fd99db
--- /dev/null
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _ASM_C6X_DMA_MAPPING_H
+#define _ASM_C6X_DMA_MAPPING_H
+
+#include <linux/dma-debug.h>
+#include <asm-generic/dma-coherent.h>
+
+#define dma_supported(d, m) 1
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == ~0;
+}
+
+extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction dir);
+
+extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction);
+
+extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction);
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t handle;
+
+ handle = dma_map_single(dev, page_address(page) + offset, size, dir);
+
+ debug_dma_map_page(dev, page, offset, size, dir, handle, false);
+
+ return handle;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(dev, handle, size, dir);
+
+ debug_dma_unmap_page(dev, handle, size, dir, false);
+}
+
+extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+
+extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+ size_t size,
+ enum dma_data_direction dir);
+
+extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+
+extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+
+extern void coherent_mem_init(u32 start, u32 size);
+extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
+
+#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/include/asm/dscr.h b/arch/c6x/include/asm/dscr.h
new file mode 100644
index 00000000000..561ba833204
--- /dev/null
+++ b/arch/c6x/include/asm/dscr.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _ASM_C6X_DSCR_H
+#define _ASM_C6X_DSCR_H
+
+enum dscr_devstate_t {
+ DSCR_DEVSTATE_ENABLED,
+ DSCR_DEVSTATE_DISABLED,
+};
+
+/*
+ * Set the device state of the device with the given ID.
+ *
+ * Individual drivers should use this to enable or disable the
+ * hardware device. The devid used to identify the device being
+ * controlled should be a property in the device's tree node.
+ */
+extern void dscr_set_devstate(int devid, enum dscr_devstate_t state);
+
+/*
+ * Assert or de-assert an RMII reset.
+ */
+extern void dscr_rmii_reset(int id, int assert);
+
+extern void dscr_probe(void);
+
+#endif /* _ASM_C6X_DSCR_H */
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
new file mode 100644
index 00000000000..d57865ba2c4
--- /dev/null
+++ b/arch/c6x/include/asm/elf.h
@@ -0,0 +1,113 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_ELF_H
+#define _ASM_C6X_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpreg_t;
+
+#define ELF_NGREG 58
+#define ELF_NFPREG 1
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_TI_C6000)
+
+#define elf_check_const_displacement(x) (1)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#ifdef __LITTLE_ENDIAN__
+#define ELF_DATA ELFDATA2LSB
+#else
+#define ELF_DATA ELFDATA2MSB
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_TI_C6000
+
+/* Nothing for now. Need to setup DP... */
+#define ELF_PLAT_INIT(_r)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_CORE_COPY_REGS(_dest, _regs) \
+ memcpy((char *) &_dest, (char *) _regs, \
+ sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+/* C6X specific section types */
+#define SHT_C6000_UNWIND 0x70000001
+#define SHT_C6000_PREEMPTMAP 0x70000002
+#define SHT_C6000_ATTRIBUTES 0x70000003
+
+/* C6X specific DT_ tags */
+#define DT_C6000_DSBT_BASE 0x70000000
+#define DT_C6000_DSBT_SIZE 0x70000001
+#define DT_C6000_PREEMPTMAP 0x70000002
+#define DT_C6000_DSBT_INDEX 0x70000003
+
+/* C6X specific relocs */
+#define R_C6000_NONE 0
+#define R_C6000_ABS32 1
+#define R_C6000_ABS16 2
+#define R_C6000_ABS8 3
+#define R_C6000_PCR_S21 4
+#define R_C6000_PCR_S12 5
+#define R_C6000_PCR_S10 6
+#define R_C6000_PCR_S7 7
+#define R_C6000_ABS_S16 8
+#define R_C6000_ABS_L16 9
+#define R_C6000_ABS_H16 10
+#define R_C6000_SBR_U15_B 11
+#define R_C6000_SBR_U15_H 12
+#define R_C6000_SBR_U15_W 13
+#define R_C6000_SBR_S16 14
+#define R_C6000_SBR_L16_B 15
+#define R_C6000_SBR_L16_H 16
+#define R_C6000_SBR_L16_W 17
+#define R_C6000_SBR_H16_B 18
+#define R_C6000_SBR_H16_H 19
+#define R_C6000_SBR_H16_W 20
+#define R_C6000_SBR_GOT_U15_W 21
+#define R_C6000_SBR_GOT_L16_W 22
+#define R_C6000_SBR_GOT_H16_W 23
+#define R_C6000_DSBT_INDEX 24
+#define R_C6000_PREL31 25
+#define R_C6000_COPY 26
+#define R_C6000_ALIGN 253
+#define R_C6000_FPHEAD 254
+#define R_C6000_NOCMP 255
+
+#endif /*_ASM_C6X_ELF_H */
diff --git a/arch/c6x/include/asm/ftrace.h b/arch/c6x/include/asm/ftrace.h
new file mode 100644
index 00000000000..3701958d3d1
--- /dev/null
+++ b/arch/c6x/include/asm/ftrace.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_C6X_FTRACE_H
+#define _ASM_C6X_FTRACE_H
+
+/* empty */
+
+#endif /* _ASM_C6X_FTRACE_H */
diff --git a/arch/c6x/include/asm/hardirq.h b/arch/c6x/include/asm/hardirq.h
new file mode 100644
index 00000000000..9621954f98f
--- /dev/null
+++ b/arch/c6x/include/asm/hardirq.h
@@ -0,0 +1,20 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_C6X_HARDIRQ_H
+#define _ASM_C6X_HARDIRQ_H
+
+extern void ack_bad_irq(int irq);
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
+
+#endif /* _ASM_C6X_HARDIRQ_H */
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
new file mode 100644
index 00000000000..a6ae3c9d9c4
--- /dev/null
+++ b/arch/c6x/include/asm/irq.h
@@ -0,0 +1,302 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Large parts taken directly from powerpc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_IRQ_H
+#define _ASM_C6X_IRQ_H
+
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <asm/percpu.h>
+
+#define irq_canonicalize(irq) (irq)
+
+/*
+ * The C64X+ core has 16 IRQ vectors. One each is used by Reset and NMI. Two
+ * are reserved. The remaining 12 vectors are used to route SoC interrupts.
+ * These interrupt vectors are prioritized with IRQ 4 having the highest
+ * priority and IRQ 15 having the lowest.
+ *
+ * The C64x+ megamodule provides a PIC which combines SoC IRQ sources into a
+ * single core IRQ vector. There are four combined sources, each of which
+ * feed into one of the 12 general interrupt vectors. The remaining 8 vectors
+ * can each route a single SoC interrupt directly.
+ */
+#define NR_PRIORITY_IRQS 16
+
+#define NR_IRQS_LEGACY NR_PRIORITY_IRQS
+
+/* Total number of virq in the platform */
+#define NR_IRQS 256
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ 0
+
+/* This type is the placeholder for a hardware interrupt number. It has to
+ * be big enough to enclose whatever representation is used by a given
+ * platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+/* Interrupt controller "host" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The host
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a host can cover more than one PIC if they have a flat number
+ * model). It's the host callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are fairly agnostic to the fact that
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart host->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
+ */
+struct irq_host;
+struct radix_tree_root;
+struct device_node;
+
+/* Functions below are provided by the host and called whenever a new mapping
+ * is created or an old mapping is disposed. The host can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
+ */
+struct irq_host_ops {
+ /* Match an interrupt controller device node to a host, returns
+ * 1 on a match
+ */
+ int (*match)(struct irq_host *h, struct device_node *node);
+
+ /* Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ */
+ int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
+
+ /* Dispose of such a mapping */
+ void (*unmap)(struct irq_host *h, unsigned int virq);
+
+ /* Translate device-tree interrupt specifier from raw format coming
+ * from the firmware to a irq_hw_number_t (interrupt line number) and
+ * type (sense) that can be passed to set_irq_type(). In the absence
+ * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
+ * will return the hw number in the first cell and IRQ_TYPE_NONE for
+ * the type (which amount to keeping whatever default value the
+ * interrupt controller has for that line)
+ */
+ int (*xlate)(struct irq_host *h, struct device_node *ctrler,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+};
+
+struct irq_host {
+ struct list_head link;
+
+ /* type of reverse mapping technique */
+ unsigned int revmap_type;
+#define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */
+#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_HOST_MAP_TREE 3 /* radix tree */
+ union {
+ struct {
+ unsigned int size;
+ unsigned int *revmap;
+ } linear;
+ struct radix_tree_root tree;
+ } revmap_data;
+ struct irq_host_ops *ops;
+ void *host_data;
+ irq_hw_number_t inval_irq;
+
+ /* Optional device node pointer */
+ struct device_node *of_node;
+};
+
+struct irq_data;
+extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
+extern irq_hw_number_t virq_to_hw(unsigned int virq);
+extern bool virq_is_host(unsigned int virq, struct irq_host *host);
+
+/**
+ * irq_alloc_host - Allocate a new irq_host data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
+ * @ops: map/unmap host callbacks
+ * @inval_irq: provide a hw number in that host space that is always invalid
+ *
+ * Allocates and initialize and irq_host structure. Note that in the case of
+ * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
+ * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
+ * later during boot automatically (the reverse mapping will use the slow path
+ * until that happens).
+ */
+extern struct irq_host *irq_alloc_host(struct device_node *of_node,
+ unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq);
+
+
+/**
+ * irq_find_host - Locates a host for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+extern struct irq_host *irq_find_host(struct device_node *node);
+
+
+/**
+ * irq_set_default_host - Set a "default" host
+ * @host: default host pointer
+ *
+ * For convenience, it's possible to set a "default" host that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+extern void irq_set_default_host(struct irq_host *host);
+
+
+/**
+ * irq_set_virq_count - Set the maximum number of virt irqs
+ * @count: number of linux virtual irqs, capped with NR_IRQS
+ *
+ * This is mainly for use by platforms like iSeries who want to program
+ * the virtual irq number in the controller to avoid the reverse mapping
+ */
+extern void irq_set_virq_count(unsigned int count);
+
+
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux virq space
+ * @host: host owning this hardware interrupt or NULL for default host
+ * @hwirq: hardware irq number in that host space
+ *
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * virq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ */
+extern unsigned int irq_create_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+
+/**
+ * irq_dispose_mapping - Unmap an interrupt
+ * @virq: linux virq number of the interrupt to unmap
+ */
+extern void irq_dispose_mapping(unsigned int virq);
+
+/**
+ * irq_find_mapping - Find a linux virq from an hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+extern unsigned int irq_find_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+/**
+ * irq_create_direct_mapping - Allocate a virq for direct mapping
+ * @host: host to allocate the virq for or NULL for default host
+ *
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux virq as the hardware interrupt number.
+ */
+extern unsigned int irq_create_direct_mapping(struct irq_host *host);
+
+/**
+ * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
+ * @host: host owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq);
+
+/**
+ * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+/**
+ * irq_linear_revmap - Find a linux virq from a hw irq number.
+ * @host: host owning this hardware interrupt
+ * @hwirq: hardware irq number in that host space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+
+extern unsigned int irq_linear_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq);
+
+
+
+/**
+ * irq_alloc_virt - Allocate virtual irq numbers
+ * @host: host owning these new virtual irqs
+ * @count: number of consecutive numbers to allocate
+ * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
+ *
+ * This is a low level function that is used internally by irq_create_mapping()
+ * and that can be used by some irq controllers implementations for things
+ * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
+ */
+extern unsigned int irq_alloc_virt(struct irq_host *host,
+ unsigned int count,
+ unsigned int hint);
+
+/**
+ * irq_free_virt - Free virtual irq numbers
+ * @virq: virtual irq number of the first interrupt to free
+ * @count: number of interrupts to free
+ *
+ * This function is the opposite of irq_alloc_virt. It will not clear reverse
+ * maps, this should be done previously by unmap'ing the interrupt. In fact,
+ * all interrupts covered by the range being freed should have been unmapped
+ * prior to calling this.
+ */
+extern void irq_free_virt(unsigned int virq, unsigned int count);
+
+extern void __init init_pic_c64xplus(void);
+
+extern void init_IRQ(void);
+
+struct pt_regs;
+
+extern asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs);
+
+extern unsigned long irq_err_count;
+
+#endif /* _ASM_C6X_IRQ_H */
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
new file mode 100644
index 00000000000..cf78e09e18c
--- /dev/null
+++ b/arch/c6x/include/asm/irqflags.h
@@ -0,0 +1,72 @@
+/*
+ * C6X IRQ flag handling
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ asm volatile (" mvc .s2 CSR,%0\n" : "=b"(flags));
+ return flags;
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ flags |= 1;
+ arch_local_irq_restore(flags);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ flags &= ~1;
+ arch_local_irq_restore(flags);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(flags & ~1);
+ return flags;
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & 1) == 0;
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/c6x/include/asm/linkage.h b/arch/c6x/include/asm/linkage.h
new file mode 100644
index 00000000000..376925c47d5
--- /dev/null
+++ b/arch/c6x/include/asm/linkage.h
@@ -0,0 +1,30 @@
+#ifndef _ASM_C6X_LINKAGE_H
+#define _ASM_C6X_LINKAGE_H
+
+#ifdef __ASSEMBLER__
+
+#define __ALIGN .align 2
+#define __ALIGN_STR ".align 2"
+
+#ifndef __DSBT__
+#define ENTRY(name) \
+ .global name @ \
+ __ALIGN @ \
+name:
+#else
+#define ENTRY(name) \
+ .global name @ \
+ .hidden name @ \
+ __ALIGN @ \
+name:
+#endif
+
+#define ENDPROC(name) \
+ .type name, @function @ \
+ .size name, . - name
+
+#endif
+
+#include <asm-generic/linkage.h>
+
+#endif /* _ASM_C6X_LINKAGE_H */
diff --git a/arch/c6x/include/asm/megamod-pic.h b/arch/c6x/include/asm/megamod-pic.h
new file mode 100644
index 00000000000..eca0a867803
--- /dev/null
+++ b/arch/c6x/include/asm/megamod-pic.h
@@ -0,0 +1,9 @@
+#ifndef _C6X_MEGAMOD_PIC_H
+#define _C6X_MEGAMOD_PIC_H
+
+#ifdef __KERNEL__
+
+extern void __init megamod_pic_init(void);
+
+#endif /* __KERNEL__ */
+#endif /* _C6X_MEGAMOD_PIC_H */
diff --git a/arch/c6x/include/asm/mmu.h b/arch/c6x/include/asm/mmu.h
new file mode 100644
index 00000000000..41592bf1606
--- /dev/null
+++ b/arch/c6x/include/asm/mmu.h
@@ -0,0 +1,18 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_MMU_H
+#define _ASM_C6X_MMU_H
+
+typedef struct {
+ unsigned long end_brk;
+} mm_context_t;
+
+#endif /* _ASM_C6X_MMU_H */
diff --git a/arch/c6x/include/asm/module.h b/arch/c6x/include/asm/module.h
new file mode 100644
index 00000000000..a453f9744f4
--- /dev/null
+++ b/arch/c6x/include/asm/module.h
@@ -0,0 +1,33 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.34 by: Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_MODULE_H
+#define _ASM_C6X_MODULE_H
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#define Elf_Word Elf32_Word
+
+/*
+ * This file contains the C6x architecture specific module code.
+ */
+struct mod_arch_specific {
+};
+
+struct loaded_sections {
+ unsigned int new_vaddr;
+ unsigned int loaded;
+};
+
+#endif /* _ASM_C6X_MODULE_H */
diff --git a/arch/c6x/include/asm/mutex.h b/arch/c6x/include/asm/mutex.h
new file mode 100644
index 00000000000..7a7248e0462
--- /dev/null
+++ b/arch/c6x/include/asm/mutex.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_C6X_MUTEX_H
+#define _ASM_C6X_MUTEX_H
+
+#include <asm-generic/mutex-null.h>
+
+#endif /* _ASM_C6X_MUTEX_H */
diff --git a/arch/c6x/include/asm/page.h b/arch/c6x/include/asm/page.h
new file mode 100644
index 00000000000..d18e2b0c7ae
--- /dev/null
+++ b/arch/c6x/include/asm/page.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_C6X_PAGE_H
+#define _ASM_C6X_PAGE_H
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/page.h>
+
+#endif /* _ASM_C6X_PAGE_H */
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
new file mode 100644
index 00000000000..68c8af4f1f9
--- /dev/null
+++ b/arch/c6x/include/asm/pgtable.h
@@ -0,0 +1,81 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PGTABLE_H
+#define _ASM_C6X_PGTABLE_H
+
+#include <asm-generic/4level-fixup.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define VMALLOC_START 0
+#define VMALLOC_END 0xffffffff
+
+#define pgd_present(pgd) (1)
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+
+#define pmd_offset(a, b) ((void *)0)
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK)
+
+#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
+#define pgprot_noncached(prot) (prot)
+
+extern void paging_init(void);
+
+#define __swp_type(x) (0)
+#define __swp_offset(x) (0)
+#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_file(pte_t pte)
+{
+ return 0;
+}
+
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+extern unsigned long empty_zero_page;
+
+#define swapper_pg_dir ((pgd_t *) 0)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+#define io_remap_pfn_range remap_pfn_range
+
+#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
+ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
new file mode 100644
index 00000000000..8154c4ee8c9
--- /dev/null
+++ b/arch/c6x/include/asm/processor.h
@@ -0,0 +1,132 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PROCESSOR_H
+#define _ASM_C6X_PROCESSOR_H
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/current.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() \
+({ \
+ void *__pc; \
+ asm("mvc .S2 pce1,%0\n" : "=b"(__pc)); \
+ __pc; \
+})
+
+/*
+ * User space process size. This is mostly meaningless for NOMMU
+ * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
+ * Since calls like mmap() can return an address or an error, we
+ * have to allow room for error returns when code does something
+ * like:
+ *
+ * addr = do_mmap(...)
+ * if ((unsigned long)addr >= TASK_SIZE)
+ * ... its an error code, not an address ...
+ *
+ * Here, we allow for 4096 error codes which means we really can't
+ * use the last 4K page on systems with RAM extending all the way
+ * to the end of the 32-bit address space.
+ */
+#define TASK_SIZE 0xFFFFF000
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's. We won't be using it
+ */
+#define TASK_UNMAPPED_BASE 0
+
+struct thread_struct {
+ unsigned long long b15_14;
+ unsigned long long a15_14;
+ unsigned long long b13_12;
+ unsigned long long a13_12;
+ unsigned long long b11_10;
+ unsigned long long a11_10;
+ unsigned long long ricl_icl;
+ unsigned long usp; /* user stack pointer */
+ unsigned long pc; /* kernel pc */
+ unsigned long wchan;
+};
+
+#define INIT_THREAD \
+{ \
+ .usp = 0, \
+ .wchan = 0, \
+}
+
+#define INIT_MMAP { \
+ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
+ NULL, NULL }
+
+#define task_pt_regs(task) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
+
+#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
+#define free_kernel_stack(page) free_page((page))
+
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+extern void start_thread(struct pt_regs *regs, unsigned int pc,
+ unsigned long usp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+/*
+ * saved PC of a blocked thread.
+ */
+#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
+
+/*
+ * saved kernel SP and DP of a blocked thread.
+ */
+#ifdef _BIG_ENDIAN
+#define thread_saved_ksp(tsk) \
+ (*(unsigned long *)&(tsk)->thread.b15_14)
+#define thread_saved_dp(tsk) \
+ (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
+#else
+#define thread_saved_ksp(tsk) \
+ (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
+#define thread_saved_dp(tsk) \
+ (*(unsigned long *)&(tsk)->thread.b15_14)
+#endif
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(task)->sp)
+
+#define cpu_relax() do { } while (0)
+
+extern const struct seq_operations cpuinfo_op;
+
+#endif /* ASM_C6X_PROCESSOR_H */
diff --git a/arch/c6x/include/asm/procinfo.h b/arch/c6x/include/asm/procinfo.h
new file mode 100644
index 00000000000..c139d1e71f8
--- /dev/null
+++ b/arch/c6x/include/asm/procinfo.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated
+ * Author: Mark Salter (msalter@redhat.com)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PROCINFO_H
+#define _ASM_C6X_PROCINFO_H
+
+#ifdef __KERNEL__
+
+struct proc_info_list {
+ unsigned int cpu_val;
+ unsigned int cpu_mask;
+ const char *arch_name;
+ const char *elf_name;
+ unsigned int elf_hwcap;
+};
+
+#else /* __KERNEL__ */
+#include <asm/elf.h>
+#warning "Please include asm/elf.h instead"
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_C6X_PROCINFO_H */
diff --git a/arch/c6x/include/asm/prom.h b/arch/c6x/include/asm/prom.h
new file mode 100644
index 00000000000..b4ec95f0751
--- /dev/null
+++ b/arch/c6x/include/asm/prom.h
@@ -0,0 +1 @@
+/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/c6x/include/asm/ptrace.h b/arch/c6x/include/asm/ptrace.h
new file mode 100644
index 00000000000..21e8d7931fe
--- /dev/null
+++ b/arch/c6x/include/asm/ptrace.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2004, 2006, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_PTRACE_H
+#define _ASM_C6X_PTRACE_H
+
+#define BKPT_OPCODE 0x56454314 /* illegal opcode */
+
+#ifdef _BIG_ENDIAN
+#define PT_LO(odd, even) odd
+#define PT_HI(odd, even) even
+#else
+#define PT_LO(odd, even) even
+#define PT_HI(odd, even) odd
+#endif
+
+#define PT_A4_ORG PT_LO(1, 0)
+#define PT_TSR PT_HI(1, 0)
+#define PT_ILC PT_LO(3, 2)
+#define PT_RILC PT_HI(3, 2)
+#define PT_CSR PT_LO(5, 4)
+#define PT_PC PT_HI(5, 4)
+#define PT_B16 PT_LO(7, 6)
+#define PT_B17 PT_HI(7, 6)
+#define PT_B18 PT_LO(9, 8)
+#define PT_B19 PT_HI(9, 8)
+#define PT_B20 PT_LO(11, 10)
+#define PT_B21 PT_HI(11, 10)
+#define PT_B22 PT_LO(13, 12)
+#define PT_B23 PT_HI(13, 12)
+#define PT_B24 PT_LO(15, 14)
+#define PT_B25 PT_HI(15, 14)
+#define PT_B26 PT_LO(17, 16)
+#define PT_B27 PT_HI(17, 16)
+#define PT_B28 PT_LO(19, 18)
+#define PT_B29 PT_HI(19, 18)
+#define PT_B30 PT_LO(21, 20)
+#define PT_B31 PT_HI(21, 20)
+#define PT_B0 PT_LO(23, 22)
+#define PT_B1 PT_HI(23, 22)
+#define PT_B2 PT_LO(25, 24)
+#define PT_B3 PT_HI(25, 24)
+#define PT_B4 PT_LO(27, 26)
+#define PT_B5 PT_HI(27, 26)
+#define PT_B6 PT_LO(29, 28)
+#define PT_B7 PT_HI(29, 28)
+#define PT_B8 PT_LO(31, 30)
+#define PT_B9 PT_HI(31, 30)
+#define PT_B10 PT_LO(33, 32)
+#define PT_B11 PT_HI(33, 32)
+#define PT_B12 PT_LO(35, 34)
+#define PT_B13 PT_HI(35, 34)
+#define PT_A16 PT_LO(37, 36)
+#define PT_A17 PT_HI(37, 36)
+#define PT_A18 PT_LO(39, 38)
+#define PT_A19 PT_HI(39, 38)
+#define PT_A20 PT_LO(41, 40)
+#define PT_A21 PT_HI(41, 40)
+#define PT_A22 PT_LO(43, 42)
+#define PT_A23 PT_HI(43, 42)
+#define PT_A24 PT_LO(45, 44)
+#define PT_A25 PT_HI(45, 44)
+#define PT_A26 PT_LO(47, 46)
+#define PT_A27 PT_HI(47, 46)
+#define PT_A28 PT_LO(49, 48)
+#define PT_A29 PT_HI(49, 48)
+#define PT_A30 PT_LO(51, 50)
+#define PT_A31 PT_HI(51, 50)
+#define PT_A0 PT_LO(53, 52)
+#define PT_A1 PT_HI(53, 52)
+#define PT_A2 PT_LO(55, 54)
+#define PT_A3 PT_HI(55, 54)
+#define PT_A4 PT_LO(57, 56)
+#define PT_A5 PT_HI(57, 56)
+#define PT_A6 PT_LO(59, 58)
+#define PT_A7 PT_HI(59, 58)
+#define PT_A8 PT_LO(61, 60)
+#define PT_A9 PT_HI(61, 60)
+#define PT_A10 PT_LO(63, 62)
+#define PT_A11 PT_HI(63, 62)
+#define PT_A12 PT_LO(65, 64)
+#define PT_A13 PT_HI(65, 64)
+#define PT_A14 PT_LO(67, 66)
+#define PT_A15 PT_HI(67, 66)
+#define PT_B14 PT_LO(69, 68)
+#define PT_B15 PT_HI(69, 68)
+
+#define NR_PTREGS 70
+
+#define PT_DP PT_B14 /* Data Segment Pointer (B14) */
+#define PT_SP PT_B15 /* Stack Pointer (B15) */
+
+#ifndef __ASSEMBLY__
+
+#ifdef _BIG_ENDIAN
+#define REG_PAIR(odd, even) unsigned long odd; unsigned long even
+#else
+#define REG_PAIR(odd, even) unsigned long even; unsigned long odd
+#endif
+
+/*
+ * this struct defines the way the registers are stored on the
+ * stack during a system call. fields defined with REG_PAIR
+ * are saved and restored using double-word memory operations
+ * which means the word ordering of the pair depends on endianess.
+ */
+struct pt_regs {
+ REG_PAIR(tsr, orig_a4);
+ REG_PAIR(rilc, ilc);
+ REG_PAIR(pc, csr);
+
+ REG_PAIR(b17, b16);
+ REG_PAIR(b19, b18);
+ REG_PAIR(b21, b20);
+ REG_PAIR(b23, b22);
+ REG_PAIR(b25, b24);
+ REG_PAIR(b27, b26);
+ REG_PAIR(b29, b28);
+ REG_PAIR(b31, b30);
+
+ REG_PAIR(b1, b0);
+ REG_PAIR(b3, b2);
+ REG_PAIR(b5, b4);
+ REG_PAIR(b7, b6);
+ REG_PAIR(b9, b8);
+ REG_PAIR(b11, b10);
+ REG_PAIR(b13, b12);
+
+ REG_PAIR(a17, a16);
+ REG_PAIR(a19, a18);
+ REG_PAIR(a21, a20);
+ REG_PAIR(a23, a22);
+ REG_PAIR(a25, a24);
+ REG_PAIR(a27, a26);
+ REG_PAIR(a29, a28);
+ REG_PAIR(a31, a30);
+
+ REG_PAIR(a1, a0);
+ REG_PAIR(a3, a2);
+ REG_PAIR(a5, a4);
+ REG_PAIR(a7, a6);
+ REG_PAIR(a9, a8);
+ REG_PAIR(a11, a10);
+ REG_PAIR(a13, a12);
+
+ REG_PAIR(a15, a14);
+ REG_PAIR(sp, dp);
+};
+
+#ifdef __KERNEL__
+
+#include <linux/linkage.h>
+
+#define user_mode(regs) ((((regs)->tsr) & 0x40) != 0)
+
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+#define user_stack_pointer(regs) ((regs)->sp)
+
+extern void show_regs(struct pt_regs *);
+
+extern asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs);
+extern asmlinkage void syscall_trace_exit(struct pt_regs *regs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_C6X_PTRACE_H */
diff --git a/arch/c6x/include/asm/sections.h b/arch/c6x/include/asm/sections.h
new file mode 100644
index 00000000000..f703989d837
--- /dev/null
+++ b/arch/c6x/include/asm/sections.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_C6X_SECTIONS_H
+#define _ASM_C6X_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _vectors_start[];
+extern char _vectors_end[];
+
+extern char _data_lma[];
+extern char _fdt_start[], _fdt_end[];
+
+#endif /* _ASM_C6X_SECTIONS_H */
diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h
new file mode 100644
index 00000000000..1808f279f82
--- /dev/null
+++ b/arch/c6x/include/asm/setup.h
@@ -0,0 +1,32 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SETUP_H
+#define _ASM_C6X_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#ifndef __ASSEMBLY__
+extern char c6x_command_line[COMMAND_LINE_SIZE];
+
+extern int c6x_add_memory(phys_addr_t start, unsigned long size);
+
+extern unsigned long ram_start;
+extern unsigned long ram_end;
+
+extern int c6x_num_cores;
+extern unsigned int c6x_silicon_rev;
+extern unsigned int c6x_devstat;
+extern unsigned char c6x_fuse_mac[6];
+
+extern void machine_init(unsigned long dt_ptr);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_C6X_SETUP_H */
diff --git a/arch/c6x/include/asm/sigcontext.h b/arch/c6x/include/asm/sigcontext.h
new file mode 100644
index 00000000000..eb702f39cde
--- /dev/null
+++ b/arch/c6x/include/asm/sigcontext.h
@@ -0,0 +1,80 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SIGCONTEXT_H
+#define _ASM_C6X_SIGCONTEXT_H
+
+
+struct sigcontext {
+ unsigned long sc_mask; /* old sigmask */
+ unsigned long sc_sp; /* old user stack pointer */
+
+ unsigned long sc_a4;
+ unsigned long sc_b4;
+ unsigned long sc_a6;
+ unsigned long sc_b6;
+ unsigned long sc_a8;
+ unsigned long sc_b8;
+
+ unsigned long sc_a0;
+ unsigned long sc_a1;
+ unsigned long sc_a2;
+ unsigned long sc_a3;
+ unsigned long sc_a5;
+ unsigned long sc_a7;
+ unsigned long sc_a9;
+
+ unsigned long sc_b0;
+ unsigned long sc_b1;
+ unsigned long sc_b2;
+ unsigned long sc_b3;
+ unsigned long sc_b5;
+ unsigned long sc_b7;
+ unsigned long sc_b9;
+
+ unsigned long sc_a16;
+ unsigned long sc_a17;
+ unsigned long sc_a18;
+ unsigned long sc_a19;
+ unsigned long sc_a20;
+ unsigned long sc_a21;
+ unsigned long sc_a22;
+ unsigned long sc_a23;
+ unsigned long sc_a24;
+ unsigned long sc_a25;
+ unsigned long sc_a26;
+ unsigned long sc_a27;
+ unsigned long sc_a28;
+ unsigned long sc_a29;
+ unsigned long sc_a30;
+ unsigned long sc_a31;
+
+ unsigned long sc_b16;
+ unsigned long sc_b17;
+ unsigned long sc_b18;
+ unsigned long sc_b19;
+ unsigned long sc_b20;
+ unsigned long sc_b21;
+ unsigned long sc_b22;
+ unsigned long sc_b23;
+ unsigned long sc_b24;
+ unsigned long sc_b25;
+ unsigned long sc_b26;
+ unsigned long sc_b27;
+ unsigned long sc_b28;
+ unsigned long sc_b29;
+ unsigned long sc_b30;
+ unsigned long sc_b31;
+
+ unsigned long sc_csr;
+ unsigned long sc_pc;
+};
+
+#endif /* _ASM_C6X_SIGCONTEXT_H */
diff --git a/arch/c6x/include/asm/signal.h b/arch/c6x/include/asm/signal.h
new file mode 100644
index 00000000000..f1cd870596a
--- /dev/null
+++ b/arch/c6x/include/asm/signal.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_C6X_SIGNAL_H
+#define _ASM_C6X_SIGNAL_H
+
+#include <asm-generic/signal.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+
+struct pt_regs;
+
+extern asmlinkage int do_rt_sigreturn(struct pt_regs *regs);
+extern asmlinkage void do_notify_resume(struct pt_regs *regs,
+ u32 thread_info_flags,
+ int syscall);
+#endif
+
+#endif /* _ASM_C6X_SIGNAL_H */
diff --git a/arch/c6x/include/asm/soc.h b/arch/c6x/include/asm/soc.h
new file mode 100644
index 00000000000..43f50159e59
--- /dev/null
+++ b/arch/c6x/include/asm/soc.h
@@ -0,0 +1,35 @@
+/*
+ * Miscellaneous SoC-specific hooks.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _ASM_C6X_SOC_H
+#define _ASM_C6X_SOC_H
+
+struct soc_ops {
+ /* Return active exception event or -1 if none */
+ int (*get_exception)(void);
+
+ /* Assert an event */
+ void (*assert_event)(unsigned int evt);
+};
+
+extern struct soc_ops soc_ops;
+
+extern int soc_get_exception(void);
+extern void soc_assert_event(unsigned int event);
+extern int soc_mac_addr(unsigned int index, u8 *addr);
+
+/*
+ * for mmio on SoC devices. regs are always same byte order as cpu.
+ */
+#define soc_readl(addr) __raw_readl(addr)
+#define soc_writel(b, addr) __raw_writel((b), (addr))
+
+#endif /* _ASM_C6X_SOC_H */
diff --git a/arch/c6x/include/asm/string.h b/arch/c6x/include/asm/string.h
new file mode 100644
index 00000000000..b21517c80a1
--- /dev/null
+++ b/arch/c6x/include/asm/string.h
@@ -0,0 +1,21 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_STRING_H
+#define _ASM_C6X_STRING_H
+
+#include <asm/page.h>
+#include <linux/linkage.h>
+
+asmlinkage extern void *memcpy(void *to, const void *from, size_t n);
+
+#define __HAVE_ARCH_MEMCPY
+
+#endif /* _ASM_C6X_STRING_H */
diff --git a/arch/c6x/include/asm/swab.h b/arch/c6x/include/asm/swab.h
new file mode 100644
index 00000000000..fd4bb0520e5
--- /dev/null
+++ b/arch/c6x/include/asm/swab.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SWAB_H
+#define _ASM_C6X_SWAB_H
+
+static inline __attribute_const__ __u16 __c6x_swab16(__u16 val)
+{
+ asm("swap4 .l1 %0,%0\n" : "+a"(val));
+ return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swab32(__u32 val)
+{
+ asm("swap4 .l1 %0,%0\n"
+ "swap2 .l1 %0,%0\n"
+ : "+a"(val));
+ return val;
+}
+
+static inline __attribute_const__ __u64 __c6x_swab64(__u64 val)
+{
+ asm(" swap2 .s1 %p0,%P0\n"
+ "|| swap2 .l1 %P0,%p0\n"
+ " swap4 .l1 %p0,%p0\n"
+ " swap4 .l1 %P0,%P0\n"
+ : "+a"(val));
+ return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swahw32(__u32 val)
+{
+ asm("swap2 .l1 %0,%0\n" : "+a"(val));
+ return val;
+}
+
+static inline __attribute_const__ __u32 __c6x_swahb32(__u32 val)
+{
+ asm("swap4 .l1 %0,%0\n" : "+a"(val));
+ return val;
+}
+
+#define __arch_swab16 __c6x_swab16
+#define __arch_swab32 __c6x_swab32
+#define __arch_swab64 __c6x_swab64
+#define __arch_swahw32 __c6x_swahw32
+#define __arch_swahb32 __c6x_swahb32
+
+#endif /* _ASM_C6X_SWAB_H */
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
new file mode 100644
index 00000000000..ae2be315ee9
--- /dev/null
+++ b/arch/c6x/include/asm/syscall.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_C6X_SYSCALL_H
+#define __ASM_C6X_SYSCALL_H
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->b0;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->a4) ? regs->a4 : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a4;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a4 = error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned int i,
+ unsigned int n, unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--)
+ break;
+ *args++ = regs->a4;
+ case 1:
+ if (!n--)
+ break;
+ *args++ = regs->b4;
+ case 2:
+ if (!n--)
+ break;
+ *args++ = regs->a6;
+ case 3:
+ if (!n--)
+ break;
+ *args++ = regs->b6;
+ case 4:
+ if (!n--)
+ break;
+ *args++ = regs->a8;
+ case 5:
+ if (!n--)
+ break;
+ *args++ = regs->b8;
+ case 6:
+ if (!n--)
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ switch (i) {
+ case 0:
+ if (!n--)
+ break;
+ regs->a4 = *args++;
+ case 1:
+ if (!n--)
+ break;
+ regs->b4 = *args++;
+ case 2:
+ if (!n--)
+ break;
+ regs->a6 = *args++;
+ case 3:
+ if (!n--)
+ break;
+ regs->b6 = *args++;
+ case 4:
+ if (!n--)
+ break;
+ regs->a8 = *args++;
+ case 5:
+ if (!n--)
+ break;
+ regs->a9 = *args++;
+ case 6:
+ if (!n)
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/syscalls.h b/arch/c6x/include/asm/syscalls.h
new file mode 100644
index 00000000000..aed53da703c
--- /dev/null
+++ b/arch/c6x/include/asm/syscalls.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ASM_C6X_SYSCALLS_H
+#define __ASM_C6X_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+
+/* The following are trampolines in entry.S to handle 64-bit arguments */
+extern long sys_pread_c6x(unsigned int fd, char __user *buf,
+ size_t count, off_t pos_low, off_t pos_high);
+extern long sys_pwrite_c6x(unsigned int fd, const char __user *buf,
+ size_t count, off_t pos_low, off_t pos_high);
+extern long sys_truncate64_c6x(const char __user *path,
+ off_t length_low, off_t length_high);
+extern long sys_ftruncate64_c6x(unsigned int fd,
+ off_t length_low, off_t length_high);
+extern long sys_fadvise64_c6x(int fd, u32 offset_lo, u32 offset_hi,
+ u32 len, int advice);
+extern long sys_fadvise64_64_c6x(int fd, u32 offset_lo, u32 offset_hi,
+ u32 len_lo, u32 len_hi, int advice);
+extern long sys_fallocate_c6x(int fd, int mode,
+ u32 offset_lo, u32 offset_hi,
+ u32 len_lo, u32 len_hi);
+extern int sys_cache_sync(unsigned long s, unsigned long e);
+
+struct pt_regs;
+
+extern asmlinkage long sys_c6x_clone(struct pt_regs *regs);
+extern asmlinkage long sys_c6x_execve(const char __user *name,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp,
+ struct pt_regs *regs);
+
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/system.h b/arch/c6x/include/asm/system.h
new file mode 100644
index 00000000000..e076dc0eacc
--- /dev/null
+++ b/arch/c6x/include/asm/system.h
@@ -0,0 +1,168 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_SYSTEM_H
+#define _ASM_C6X_SYSTEM_H
+
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+
+#define prepare_to_switch() do { } while (0)
+
+struct task_struct;
+struct thread_struct;
+asmlinkage void *__switch_to(struct thread_struct *prev,
+ struct thread_struct *next,
+ struct task_struct *tsk);
+
+#define switch_to(prev, next, last) \
+ do { \
+ current->thread.wchan = (u_long) __builtin_return_address(0); \
+ (last) = __switch_to(&(prev)->thread, \
+ &(next)->thread, (prev)); \
+ mb(); \
+ current->thread.wchan = 0; \
+ } while (0)
+
+/* Reset the board */
+#define HARD_RESET_NOW()
+
+#define get_creg(reg) \
+ ({ unsigned int __x; \
+ asm volatile ("mvc .s2 " #reg ",%0\n" : "=b"(__x)); __x; })
+
+#define set_creg(reg, v) \
+ do { unsigned int __x = (unsigned int)(v); \
+ asm volatile ("mvc .s2 %0," #reg "\n" : : "b"(__x)); \
+ } while (0)
+
+#define or_creg(reg, n) \
+ do { unsigned __x, __n = (unsigned)(n); \
+ asm volatile ("mvc .s2 " #reg ",%0\n" \
+ "or .l2 %1,%0,%0\n" \
+ "mvc .s2 %0," #reg "\n" \
+ "nop\n" \
+ : "=&b"(__x) : "b"(__n)); \
+ } while (0)
+
+#define and_creg(reg, n) \
+ do { unsigned __x, __n = (unsigned)(n); \
+ asm volatile ("mvc .s2 " #reg ",%0\n" \
+ "and .l2 %1,%0,%0\n" \
+ "mvc .s2 %0," #reg "\n" \
+ "nop\n" \
+ : "=&b"(__x) : "b"(__n)); \
+ } while (0)
+
+#define get_coreid() (get_creg(DNUM) & 0xff)
+
+/* Set/get IST */
+#define set_ist(x) set_creg(ISTP, x)
+#define get_ist() get_creg(ISTP)
+
+/*
+ * Exception management
+ */
+asmlinkage void enable_exception(void);
+#define disable_exception()
+#define get_except_type() get_creg(EFR)
+#define ack_exception(type) set_creg(ECR, 1 << (type))
+#define get_iexcept() get_creg(IERR)
+#define set_iexcept(mask) set_creg(IERR, (mask))
+
+/*
+ * Misc. functions
+ */
+#define nop() asm("NOP\n");
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned int)(x), (void *) (ptr), \
+ sizeof(*(ptr))))
+#define tas(ptr) xchg((ptr), 1)
+
+unsigned int _lmbd(unsigned int, unsigned int);
+unsigned int _bitr(unsigned int);
+
+struct __xchg_dummy { unsigned int a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+static inline unsigned int __xchg(unsigned int x, volatile void *ptr, int size)
+{
+ unsigned int tmp;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ tmp = 0;
+ tmp = *((unsigned char *) ptr);
+ *((unsigned char *) ptr) = (unsigned char) x;
+ break;
+ case 2:
+ tmp = 0;
+ tmp = *((unsigned short *) ptr);
+ *((unsigned short *) ptr) = x;
+ break;
+ case 4:
+ tmp = 0;
+ tmp = *((unsigned int *) ptr);
+ *((unsigned int *) ptr) = x;
+ break;
+ }
+ local_irq_restore(flags);
+ return tmp;
+}
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n), \
+ sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#include <asm-generic/cmpxchg.h>
+
+#define _extu(x, s, e) \
+ ({ unsigned int __x; \
+ asm volatile ("extu .S2 %3,%1,%2,%0\n" : \
+ "=b"(__x) : "n"(s), "n"(e), "b"(x)); \
+ __x; })
+
+
+extern unsigned int c6x_core_freq;
+
+struct pt_regs;
+
+extern void die(char *str, struct pt_regs *fp, int nr);
+extern asmlinkage int process_exception(struct pt_regs *regs);
+extern void time_init(void);
+extern void free_initmem(void);
+
+extern void (*c6x_restart)(void);
+extern void (*c6x_halt)(void);
+
+#endif /* _ASM_C6X_SYSTEM_H */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
new file mode 100644
index 00000000000..fd99148cda9
--- /dev/null
+++ b/arch/c6x/include/asm/thread_info.h
@@ -0,0 +1,121 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.3x: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_THREAD_INFO_H
+#define _ASM_C6X_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_SIZE 4096
+#define THREAD_SHIFT 12
+#define THREAD_ORDER 0
+#else
+#define THREAD_SIZE 8192
+#define THREAD_SHIFT 13
+#define THREAD_ORDER 1
+#endif
+
+#define THREAD_START_SP (THREAD_SIZE - 8)
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 = preemptable, <0 = BUG */
+ mm_segment_t addr_limit; /* thread address space */
+ struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* get the thread information struct of current task */
+static inline __attribute__((const))
+struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ asm volatile (" clr .s2 B15,0,%1,%0\n"
+ : "=b" (ti)
+ : "Iu5" (THREAD_SHIFT - 1));
+ return ti;
+}
+
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+/* thread information allocation */
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#else
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
+#endif
+
+#define alloc_thread_info_node(tsk, node) \
+ ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
+
+#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flag bit numbers
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+
+#define TIF_POLLING_NRFLAG 16 /* true if polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 17 /* OOM killer killed process */
+
+#define TIF_WORK_MASK 0x00007FFE /* work on irq/exception return */
+#define TIF_ALLWORK_MASK 0x00007FFF /* work on any return to u-space */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_C6X_THREAD_INFO_H */
diff --git a/arch/c6x/include/asm/timer64.h b/arch/c6x/include/asm/timer64.h
new file mode 100644
index 00000000000..bbe27bb9887
--- /dev/null
+++ b/arch/c6x/include/asm/timer64.h
@@ -0,0 +1,6 @@
+#ifndef _C6X_TIMER64_H
+#define _C6X_TIMER64_H
+
+extern void __init timer64_init(void);
+
+#endif /* _C6X_TIMER64_H */
diff --git a/arch/c6x/include/asm/timex.h b/arch/c6x/include/asm/timex.h
new file mode 100644
index 00000000000..508c3ec971f
--- /dev/null
+++ b/arch/c6x/include/asm/timex.h
@@ -0,0 +1,33 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Modified for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_TIMEX_H
+#define _ASM_C6X_TIMEX_H
+
+#define CLOCK_TICK_RATE ((1000 * 1000000UL) / 6)
+
+/* 64-bit timestamp */
+typedef unsigned long long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned l, h;
+
+ asm volatile (" dint\n"
+ " mvc .s2 TSCL,%0\n"
+ " mvc .s2 TSCH,%1\n"
+ " rint\n"
+ : "=b"(l), "=b"(h));
+ return ((cycles_t)h << 32) | l;
+}
+
+#endif /* _ASM_C6X_TIMEX_H */
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
new file mode 100644
index 00000000000..8709e5e29d2
--- /dev/null
+++ b/arch/c6x/include/asm/tlb.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_C6X_TLB_H
+#define _ASM_C6X_TLB_H
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/c6x/include/asm/traps.h b/arch/c6x/include/asm/traps.h
new file mode 100644
index 00000000000..62124d7b1b5
--- /dev/null
+++ b/arch/c6x/include/asm/traps.h
@@ -0,0 +1,36 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_TRAPS_H
+#define _ASM_C6X_TRAPS_H
+
+#define EXCEPT_TYPE_NXF 31 /* NMI */
+#define EXCEPT_TYPE_EXC 30 /* external exception */
+#define EXCEPT_TYPE_IXF 1 /* internal exception */
+#define EXCEPT_TYPE_SXF 0 /* software exception */
+
+#define EXCEPT_CAUSE_LBX (1 << 7) /* loop buffer exception */
+#define EXCEPT_CAUSE_PRX (1 << 6) /* privilege exception */
+#define EXCEPT_CAUSE_RAX (1 << 5) /* resource access exception */
+#define EXCEPT_CAUSE_RCX (1 << 4) /* resource conflict exception */
+#define EXCEPT_CAUSE_OPX (1 << 3) /* opcode exception */
+#define EXCEPT_CAUSE_EPX (1 << 2) /* execute packet exception */
+#define EXCEPT_CAUSE_FPX (1 << 1) /* fetch packet exception */
+#define EXCEPT_CAUSE_IFX (1 << 0) /* instruction fetch exception */
+
+struct exception_info {
+ char *kernel_str;
+ int signo;
+ int code;
+};
+
+extern int (*c6x_nmi_handler)(struct pt_regs *regs);
+
+#endif /* _ASM_C6X_TRAPS_H */
diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h
new file mode 100644
index 00000000000..453dd263bee
--- /dev/null
+++ b/arch/c6x/include/asm/uaccess.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_UACCESS_H
+#define _ASM_C6X_UACCESS_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+
+#ifdef CONFIG_ACCESS_CHECK
+#define __access_ok _access_ok
+#endif
+
+/*
+ * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
+ *
+ * C6X supports unaligned 32 and 64 bit loads and stores.
+ */
+static inline __must_check long __copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ u32 tmp32;
+ u64 tmp64;
+
+ if (__builtin_constant_p(n)) {
+ switch (n) {
+ case 1:
+ *(u8 *)to = *(u8 __force *)from;
+ return 0;
+ case 4:
+ asm volatile ("ldnw .d1t1 *%2,%0\n"
+ "nop 4\n"
+ "stnw .d1t1 %0,*%1\n"
+ : "=&a"(tmp32)
+ : "A"(to), "a"(from)
+ : "memory");
+ return 0;
+ case 8:
+ asm volatile ("ldndw .d1t1 *%2,%0\n"
+ "nop 4\n"
+ "stndw .d1t1 %0,*%1\n"
+ : "=&a"(tmp64)
+ : "a"(to), "a"(from)
+ : "memory");
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline __must_check long __copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ u32 tmp32;
+ u64 tmp64;
+
+ if (__builtin_constant_p(n)) {
+ switch (n) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 4:
+ asm volatile ("ldnw .d1t1 *%2,%0\n"
+ "nop 4\n"
+ "stnw .d1t1 %0,*%1\n"
+ : "=&a"(tmp32)
+ : "a"(to), "a"(from)
+ : "memory");
+ return 0;
+ case 8:
+ asm volatile ("ldndw .d1t1 *%2,%0\n"
+ "nop 4\n"
+ "stndw .d1t1 %0,*%1\n"
+ : "=&a"(tmp64)
+ : "a"(to), "a"(from)
+ : "memory");
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+
+#define __copy_to_user __copy_to_user
+#define __copy_from_user __copy_from_user
+
+extern int _access_ok(unsigned long addr, unsigned long size);
+#ifdef CONFIG_ACCESS_CHECK
+#define __access_ok _access_ok
+#endif
+
+#include <asm-generic/uaccess.h>
+
+#endif /* _ASM_C6X_UACCESS_H */
diff --git a/arch/c6x/include/asm/unaligned.h b/arch/c6x/include/asm/unaligned.h
new file mode 100644
index 00000000000..b976cb740ea
--- /dev/null
+++ b/arch/c6x/include/asm/unaligned.h
@@ -0,0 +1,170 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_C6X_UNALIGNED_H
+#define _ASM_C6X_UNALIGNED_H
+
+#include <linux/swab.h>
+
+/*
+ * The C64x+ can do unaligned word and dword accesses in hardware
+ * using special load/store instructions.
+ */
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ const u8 *_p = p;
+ return _p[0] | _p[1] << 8;
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ const u8 *_p = p;
+ return _p[0] << 8 | _p[1];
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ u8 *_p = p;
+ _p[0] = val;
+ _p[1] = val >> 8;
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ u8 *_p = p;
+ _p[0] = val >> 8;
+ _p[1] = val;
+}
+
+static inline u32 get_unaligned32(const void *p)
+{
+ u32 val = (u32) p;
+ asm (" ldnw .d1t1 *%0,%0\n"
+ " nop 4\n"
+ : "+a"(val));
+ return val;
+}
+
+static inline void put_unaligned32(u32 val, void *p)
+{
+ asm volatile (" stnw .d2t1 %0,*%1\n"
+ : : "a"(val), "b"(p) : "memory");
+}
+
+static inline u64 get_unaligned64(const void *p)
+{
+ u64 val;
+ asm volatile (" ldndw .d1t1 *%1,%0\n"
+ " nop 4\n"
+ : "=a"(val) : "a"(p));
+ return val;
+}
+
+static inline void put_unaligned64(u64 val, const void *p)
+{
+ asm volatile (" stndw .d2t1 %0,*%1\n"
+ : : "a"(val), "b"(p) : "memory");
+}
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+#define get_unaligned_le32(p) __swab32(get_unaligned32(p))
+#define get_unaligned_le64(p) __swab64(get_unaligned64(p))
+#define get_unaligned_be32(p) get_unaligned32(p)
+#define get_unaligned_be64(p) get_unaligned64(p)
+#define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
+#define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
+#define put_unaligned_be32(v, p) put_unaligned32((v), (p))
+#define put_unaligned_be64(v, p) put_unaligned64((v), (p))
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#else
+
+#define get_unaligned_le32(p) get_unaligned32(p)
+#define get_unaligned_le64(p) get_unaligned64(p)
+#define get_unaligned_be32(p) __swab32(get_unaligned32(p))
+#define get_unaligned_be64(p) __swab64(get_unaligned64(p))
+#define put_unaligned_le32(v, p) put_unaligned32((v), (p))
+#define put_unaligned_le64(v, p) put_unaligned64((v), (p))
+#define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
+#define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif
+
+/*
+ * Cause a link-time error if we try an unaligned access other than
+ * 1,2,4 or 8 bytes long
+ */
+extern int __bad_unaligned_access_size(void);
+
+#define __get_unaligned_le(ptr) (typeof(*(ptr)))({ \
+ sizeof(*(ptr)) == 1 ? *(ptr) : \
+ (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) : \
+ (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) : \
+ (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) : \
+ __bad_unaligned_access_size()))); \
+ })
+
+#define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({ \
+ sizeof(*(ptr)) == 1 ? *(ptr) : \
+ (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) : \
+ (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) : \
+ (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) : \
+ __bad_unaligned_access_size()))); \
+ })
+
+#define __put_unaligned_le(val, ptr) ({ \
+ void *__gu_p = (ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ *(u8 *)__gu_p = (__force u8)(val); \
+ break; \
+ case 2: \
+ put_unaligned_le16((__force u16)(val), __gu_p); \
+ break; \
+ case 4: \
+ put_unaligned_le32((__force u32)(val), __gu_p); \
+ break; \
+ case 8: \
+ put_unaligned_le64((__force u64)(val), __gu_p); \
+ break; \
+ default: \
+ __bad_unaligned_access_size(); \
+ break; \
+ } \
+ (void)0; })
+
+#define __put_unaligned_be(val, ptr) ({ \
+ void *__gu_p = (ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ *(u8 *)__gu_p = (__force u8)(val); \
+ break; \
+ case 2: \
+ put_unaligned_be16((__force u16)(val), __gu_p); \
+ break; \
+ case 4: \
+ put_unaligned_be32((__force u32)(val), __gu_p); \
+ break; \
+ case 8: \
+ put_unaligned_be64((__force u64)(val), __gu_p); \
+ break; \
+ default: \
+ __bad_unaligned_access_size(); \
+ break; \
+ } \
+ (void)0; })
+
+#endif /* _ASM_C6X_UNALIGNED_H */
diff --git a/arch/c6x/include/asm/unistd.h b/arch/c6x/include/asm/unistd.h
new file mode 100644
index 00000000000..6d54ea4262e
--- /dev/null
+++ b/arch/c6x/include/asm/unistd.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * Based on arch/tile version.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+#if !defined(_ASM_C6X_UNISTD_H) || defined(__SYSCALL)
+#define _ASM_C6X_UNISTD_H
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+/* C6X-specific syscalls. */
+#define __NR_cache_sync (__NR_arch_specific_syscall + 0)
+__SYSCALL(__NR_cache_sync, sys_cache_sync)
+
+#endif /* _ASM_C6X_UNISTD_H */
diff --git a/arch/c6x/kernel/Makefile b/arch/c6x/kernel/Makefile
new file mode 100644
index 00000000000..580a515a944
--- /dev/null
+++ b/arch/c6x/kernel/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for arch/c6x/kernel/
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y := process.o traps.o irq.o signal.o ptrace.o
+obj-y += setup.o sys_c6x.o time.o devicetree.o
+obj-y += switch_to.o entry.o vectors.o c6x_ksyms.o
+obj-y += soc.o dma.o
+
+obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/c6x/kernel/asm-offsets.c b/arch/c6x/kernel/asm-offsets.c
new file mode 100644
index 00000000000..759ad6d207b
--- /dev/null
+++ b/arch/c6x/kernel/asm-offsets.c
@@ -0,0 +1,123 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <asm/procinfo.h>
+#include <linux/kbuild.h>
+#include <linux/unistd.h>
+
+void foo(void)
+{
+ OFFSET(REGS_A16, pt_regs, a16);
+ OFFSET(REGS_A17, pt_regs, a17);
+ OFFSET(REGS_A18, pt_regs, a18);
+ OFFSET(REGS_A19, pt_regs, a19);
+ OFFSET(REGS_A20, pt_regs, a20);
+ OFFSET(REGS_A21, pt_regs, a21);
+ OFFSET(REGS_A22, pt_regs, a22);
+ OFFSET(REGS_A23, pt_regs, a23);
+ OFFSET(REGS_A24, pt_regs, a24);
+ OFFSET(REGS_A25, pt_regs, a25);
+ OFFSET(REGS_A26, pt_regs, a26);
+ OFFSET(REGS_A27, pt_regs, a27);
+ OFFSET(REGS_A28, pt_regs, a28);
+ OFFSET(REGS_A29, pt_regs, a29);
+ OFFSET(REGS_A30, pt_regs, a30);
+ OFFSET(REGS_A31, pt_regs, a31);
+
+ OFFSET(REGS_B16, pt_regs, b16);
+ OFFSET(REGS_B17, pt_regs, b17);
+ OFFSET(REGS_B18, pt_regs, b18);
+ OFFSET(REGS_B19, pt_regs, b19);
+ OFFSET(REGS_B20, pt_regs, b20);
+ OFFSET(REGS_B21, pt_regs, b21);
+ OFFSET(REGS_B22, pt_regs, b22);
+ OFFSET(REGS_B23, pt_regs, b23);
+ OFFSET(REGS_B24, pt_regs, b24);
+ OFFSET(REGS_B25, pt_regs, b25);
+ OFFSET(REGS_B26, pt_regs, b26);
+ OFFSET(REGS_B27, pt_regs, b27);
+ OFFSET(REGS_B28, pt_regs, b28);
+ OFFSET(REGS_B29, pt_regs, b29);
+ OFFSET(REGS_B30, pt_regs, b30);
+ OFFSET(REGS_B31, pt_regs, b31);
+
+ OFFSET(REGS_A0, pt_regs, a0);
+ OFFSET(REGS_A1, pt_regs, a1);
+ OFFSET(REGS_A2, pt_regs, a2);
+ OFFSET(REGS_A3, pt_regs, a3);
+ OFFSET(REGS_A4, pt_regs, a4);
+ OFFSET(REGS_A5, pt_regs, a5);
+ OFFSET(REGS_A6, pt_regs, a6);
+ OFFSET(REGS_A7, pt_regs, a7);
+ OFFSET(REGS_A8, pt_regs, a8);
+ OFFSET(REGS_A9, pt_regs, a9);
+ OFFSET(REGS_A10, pt_regs, a10);
+ OFFSET(REGS_A11, pt_regs, a11);
+ OFFSET(REGS_A12, pt_regs, a12);
+ OFFSET(REGS_A13, pt_regs, a13);
+ OFFSET(REGS_A14, pt_regs, a14);
+ OFFSET(REGS_A15, pt_regs, a15);
+
+ OFFSET(REGS_B0, pt_regs, b0);
+ OFFSET(REGS_B1, pt_regs, b1);
+ OFFSET(REGS_B2, pt_regs, b2);
+ OFFSET(REGS_B3, pt_regs, b3);
+ OFFSET(REGS_B4, pt_regs, b4);
+ OFFSET(REGS_B5, pt_regs, b5);
+ OFFSET(REGS_B6, pt_regs, b6);
+ OFFSET(REGS_B7, pt_regs, b7);
+ OFFSET(REGS_B8, pt_regs, b8);
+ OFFSET(REGS_B9, pt_regs, b9);
+ OFFSET(REGS_B10, pt_regs, b10);
+ OFFSET(REGS_B11, pt_regs, b11);
+ OFFSET(REGS_B12, pt_regs, b12);
+ OFFSET(REGS_B13, pt_regs, b13);
+ OFFSET(REGS_DP, pt_regs, dp);
+ OFFSET(REGS_SP, pt_regs, sp);
+
+ OFFSET(REGS_TSR, pt_regs, tsr);
+ OFFSET(REGS_ORIG_A4, pt_regs, orig_a4);
+
+ DEFINE(REGS__END, sizeof(struct pt_regs));
+ BLANK();
+
+ OFFSET(THREAD_PC, thread_struct, pc);
+ OFFSET(THREAD_B15_14, thread_struct, b15_14);
+ OFFSET(THREAD_A15_14, thread_struct, a15_14);
+ OFFSET(THREAD_B13_12, thread_struct, b13_12);
+ OFFSET(THREAD_A13_12, thread_struct, a13_12);
+ OFFSET(THREAD_B11_10, thread_struct, b11_10);
+ OFFSET(THREAD_A11_10, thread_struct, a11_10);
+ OFFSET(THREAD_RICL_ICL, thread_struct, ricl_icl);
+ BLANK();
+
+ OFFSET(TASK_STATE, task_struct, state);
+ BLANK();
+
+ OFFSET(THREAD_INFO_FLAGS, thread_info, flags);
+ OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
+ BLANK();
+
+ /* These would be unneccessary if we ran asm files
+ * through the preprocessor.
+ */
+ DEFINE(KTHREAD_SIZE, THREAD_SIZE);
+ DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
+ DEFINE(KTHREAD_START_SP, THREAD_START_SP);
+ DEFINE(ENOSYS_, ENOSYS);
+ DEFINE(NR_SYSCALLS_, __NR_syscalls);
+
+ DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
+ DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
+ DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
+ DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
+ DEFINE(_TIF_POLLING_NRFLAG, (1<<TIF_POLLING_NRFLAG));
+
+ DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
+ DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
+}
diff --git a/arch/c6x/kernel/c6x_ksyms.c b/arch/c6x/kernel/c6x_ksyms.c
new file mode 100644
index 00000000000..0ba3e0bba3e
--- /dev/null
+++ b/arch/c6x/kernel/c6x_ksyms.c
@@ -0,0 +1,66 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <asm/checksum.h>
+#include <linux/io.h>
+
+/*
+ * libgcc functions - used internally by the compiler...
+ */
+extern int __c6xabi_divi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divi);
+
+extern unsigned __c6xabi_divu(unsigned dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divu);
+
+extern int __c6xabi_remi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_remi);
+
+extern unsigned __c6xabi_remu(unsigned dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_remu);
+
+extern int __c6xabi_divremi(int dividend, int divisor);
+EXPORT_SYMBOL(__c6xabi_divremi);
+
+extern unsigned __c6xabi_divremu(unsigned dividend, unsigned divisor);
+EXPORT_SYMBOL(__c6xabi_divremu);
+
+extern unsigned long long __c6xabi_mpyll(unsigned long long src1,
+ unsigned long long src2);
+EXPORT_SYMBOL(__c6xabi_mpyll);
+
+extern long long __c6xabi_negll(long long src);
+EXPORT_SYMBOL(__c6xabi_negll);
+
+extern unsigned long long __c6xabi_llshl(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshl);
+
+extern long long __c6xabi_llshr(long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshr);
+
+extern unsigned long long __c6xabi_llshru(unsigned long long src1, uint src2);
+EXPORT_SYMBOL(__c6xabi_llshru);
+
+extern void __c6xabi_strasgi(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi);
+
+extern void __c6xabi_push_rts(void);
+EXPORT_SYMBOL(__c6xabi_push_rts);
+
+extern void __c6xabi_pop_rts(void);
+EXPORT_SYMBOL(__c6xabi_pop_rts);
+
+extern void __c6xabi_strasgi_64plus(int *dst, const int *src, unsigned cnt);
+EXPORT_SYMBOL(__c6xabi_strasgi_64plus);
+
+/* lib functions */
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
new file mode 100644
index 00000000000..bdb56f09d0a
--- /dev/null
+++ b/arch/c6x/kernel/devicetree.c
@@ -0,0 +1,53 @@
+/*
+ * Architecture specific OF callbacks.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+void __init early_init_devtree(void *params)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line);
+
+ /* Scan memory nodes and rebuild MEMBLOCKs */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ c6x_add_memory(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
new file mode 100644
index 00000000000..ab7b12de144
--- /dev/null
+++ b/arch/c6x/kernel/dma.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+static void c6x_dma_sync(dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = handle;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ L2_cache_block_invalidate(paddr, paddr + size);
+ break;
+ case DMA_TO_DEVICE:
+ L2_cache_block_writeback(paddr, paddr + size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ L2_cache_block_writeback_invalidate(paddr, paddr + size);
+ break;
+ default:
+ break;
+ }
+}
+
+dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr = virt_to_phys(ptr);
+
+ c6x_dma_sync(addr, size, dir);
+
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+ return addr;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+
+void dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ c6x_dma_sync(handle, size, dir);
+
+ debug_dma_unmap_page(dev, handle, size, dir, true);
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
+
+int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
+ dir);
+
+ debug_dma_map_sg(dev, sglist, nents, nents, dir);
+
+ return nents;
+}
+EXPORT_SYMBOL(dma_map_sg);
+
+
+void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
+
+ debug_dma_unmap_sg(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_unmap_sg);
+
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ c6x_dma_sync(handle, size, dir);
+
+ debug_dma_sync_single_for_cpu(dev, handle, size, dir);
+}
+EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
+{
+ c6x_dma_sync(handle, size, dir);
+
+ debug_dma_sync_single_for_device(dev, handle, size, dir);
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg->length, dir);
+
+ debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ dma_sync_single_for_device(dev, sg_dma_address(sg),
+ sg->length, dir);
+
+ debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(dma_init);
diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S
new file mode 100644
index 00000000000..3e977ccda82
--- /dev/null
+++ b/arch/c6x/kernel/entry.S
@@ -0,0 +1,803 @@
+;
+; Port on Texas Instruments TMS320C6x architecture
+;
+; Copyright (C) 2004-2011 Texas Instruments Incorporated
+; Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
+; Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+;
+; This program is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License version 2 as
+; published by the Free Software Foundation.
+;
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+
+; Registers naming
+#define DP B14
+#define SP B15
+
+#ifndef CONFIG_PREEMPT
+#define resume_kernel restore_all
+#endif
+
+ .altmacro
+
+ .macro MASK_INT reg
+ MVC .S2 CSR,reg
+ CLR .S2 reg,0,0,reg
+ MVC .S2 reg,CSR
+ .endm
+
+ .macro UNMASK_INT reg
+ MVC .S2 CSR,reg
+ SET .S2 reg,0,0,reg
+ MVC .S2 reg,CSR
+ .endm
+
+ .macro GET_THREAD_INFO reg
+ SHR .S1X SP,THREAD_SHIFT,reg
+ SHL .S1 reg,THREAD_SHIFT,reg
+ .endm
+
+ ;;
+ ;; This defines the normal kernel pt_regs layout.
+ ;;
+ .macro SAVE_ALL __rp __tsr
+ STW .D2T2 B0,*SP--[2] ; save original B0
+ MVKL .S2 current_ksp,B0
+ MVKH .S2 current_ksp,B0
+ LDW .D2T2 *B0,B1 ; KSP
+
+ NOP 3
+ STW .D2T2 B1,*+SP[1] ; save original B1
+ XOR .D2 SP,B1,B0 ; (SP ^ KSP)
+ LDW .D2T2 *+SP[1],B1 ; restore B0/B1
+ LDW .D2T2 *++SP[2],B0
+ SHR .S2 B0,THREAD_SHIFT,B0 ; 0 if already using kstack
+ [B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack
+ [B0] MV .S2 B1,SP ; and switch to kstack
+||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack
+
+ SUBAW .D2 SP,2,SP
+
+ ADD .D1X SP,-8,A15
+ || STDW .D2T1 A15:A14,*SP--[16] ; save A15:A14
+
+ STDW .D2T2 B13:B12,*SP--[1]
+ || STDW .D1T1 A13:A12,*A15--[1]
+ || MVC .S2 __rp,B13
+
+ STDW .D2T2 B11:B10,*SP--[1]
+ || STDW .D1T1 A11:A10,*A15--[1]
+ || MVC .S2 CSR,B12
+
+ STDW .D2T2 B9:B8,*SP--[1]
+ || STDW .D1T1 A9:A8,*A15--[1]
+ || MVC .S2 RILC,B11
+ STDW .D2T2 B7:B6,*SP--[1]
+ || STDW .D1T1 A7:A6,*A15--[1]
+ || MVC .S2 ILC,B10
+
+ STDW .D2T2 B5:B4,*SP--[1]
+ || STDW .D1T1 A5:A4,*A15--[1]
+
+ STDW .D2T2 B3:B2,*SP--[1]
+ || STDW .D1T1 A3:A2,*A15--[1]
+ || MVC .S2 __tsr,B5
+
+ STDW .D2T2 B1:B0,*SP--[1]
+ || STDW .D1T1 A1:A0,*A15--[1]
+ || MV .S1X B5,A5
+
+ STDW .D2T2 B31:B30,*SP--[1]
+ || STDW .D1T1 A31:A30,*A15--[1]
+ STDW .D2T2 B29:B28,*SP--[1]
+ || STDW .D1T1 A29:A28,*A15--[1]
+ STDW .D2T2 B27:B26,*SP--[1]
+ || STDW .D1T1 A27:A26,*A15--[1]
+ STDW .D2T2 B25:B24,*SP--[1]
+ || STDW .D1T1 A25:A24,*A15--[1]
+ STDW .D2T2 B23:B22,*SP--[1]
+ || STDW .D1T1 A23:A22,*A15--[1]
+ STDW .D2T2 B21:B20,*SP--[1]
+ || STDW .D1T1 A21:A20,*A15--[1]
+ STDW .D2T2 B19:B18,*SP--[1]
+ || STDW .D1T1 A19:A18,*A15--[1]
+ STDW .D2T2 B17:B16,*SP--[1]
+ || STDW .D1T1 A17:A16,*A15--[1]
+
+ STDW .D2T2 B13:B12,*SP--[1] ; save PC and CSR
+
+ STDW .D2T2 B11:B10,*SP--[1] ; save RILC and ILC
+ STDW .D2T1 A5:A4,*SP--[1] ; save TSR and orig A4
+
+ ;; We left an unused word on the stack just above pt_regs.
+ ;; It is used to save whether or not this frame is due to
+ ;; a syscall. It is cleared here, but the syscall handler
+ ;; sets it to a non-zero value.
+ MVK .L2 0,B1
+ STW .D2T2 B1,*+SP(REGS__END+8) ; clear syscall flag
+ .endm
+
+ .macro RESTORE_ALL __rp __tsr
+ LDDW .D2T2 *++SP[1],B9:B8 ; get TSR (B9)
+ LDDW .D2T2 *++SP[1],B11:B10 ; get RILC (B11) and ILC (B10)
+ LDDW .D2T2 *++SP[1],B13:B12 ; get PC (B13) and CSR (B12)
+
+ ADDAW .D1X SP,30,A15
+
+ LDDW .D1T1 *++A15[1],A17:A16
+ || LDDW .D2T2 *++SP[1],B17:B16
+ LDDW .D1T1 *++A15[1],A19:A18
+ || LDDW .D2T2 *++SP[1],B19:B18
+ LDDW .D1T1 *++A15[1],A21:A20
+ || LDDW .D2T2 *++SP[1],B21:B20
+ LDDW .D1T1 *++A15[1],A23:A22
+ || LDDW .D2T2 *++SP[1],B23:B22
+ LDDW .D1T1 *++A15[1],A25:A24
+ || LDDW .D2T2 *++SP[1],B25:B24
+ LDDW .D1T1 *++A15[1],A27:A26
+ || LDDW .D2T2 *++SP[1],B27:B26
+ LDDW .D1T1 *++A15[1],A29:A28
+ || LDDW .D2T2 *++SP[1],B29:B28
+ LDDW .D1T1 *++A15[1],A31:A30
+ || LDDW .D2T2 *++SP[1],B31:B30
+
+ LDDW .D1T1 *++A15[1],A1:A0
+ || LDDW .D2T2 *++SP[1],B1:B0
+
+ LDDW .D1T1 *++A15[1],A3:A2
+ || LDDW .D2T2 *++SP[1],B3:B2
+ || MVC .S2 B9,__tsr
+ LDDW .D1T1 *++A15[1],A5:A4
+ || LDDW .D2T2 *++SP[1],B5:B4
+ || MVC .S2 B11,RILC
+ LDDW .D1T1 *++A15[1],A7:A6
+ || LDDW .D2T2 *++SP[1],B7:B6
+ || MVC .S2 B10,ILC
+
+ LDDW .D1T1 *++A15[1],A9:A8
+ || LDDW .D2T2 *++SP[1],B9:B8
+ || MVC .S2 B13,__rp
+
+ LDDW .D1T1 *++A15[1],A11:A10
+ || LDDW .D2T2 *++SP[1],B11:B10
+ || MVC .S2 B12,CSR
+
+ LDDW .D1T1 *++A15[1],A13:A12
+ || LDDW .D2T2 *++SP[1],B13:B12
+
+ MV .D2X A15,SP
+ || MVKL .S1 current_ksp,A15
+ MVKH .S1 current_ksp,A15
+ || ADDAW .D1X SP,6,A14
+ STW .D1T1 A14,*A15 ; save kernel stack pointer
+
+ LDDW .D2T1 *++SP[1],A15:A14
+
+ B .S2 __rp ; return from interruption
+ LDDW .D2T2 *+SP[1],SP:DP
+ NOP 4
+ .endm
+
+ .section .text
+
+ ;;
+ ;; Jump to schedule() then return to ret_from_exception
+ ;;
+_reschedule:
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 schedule,A0
+ MVKH .S1 schedule,A0
+ B .S2X A0
+#else
+ B .S1 schedule
+#endif
+ ADDKPC .S2 ret_from_exception,B3,4
+
+ ;;
+ ;; Called before syscall handler when process is being debugged
+ ;;
+tracesys_on:
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 syscall_trace_entry,A0
+ MVKH .S1 syscall_trace_entry,A0
+ B .S2X A0
+#else
+ B .S1 syscall_trace_entry
+#endif
+ ADDKPC .S2 ret_from_syscall_trace,B3,3
+ ADD .S1X 8,SP,A4
+
+ret_from_syscall_trace:
+ ;; tracing returns (possibly new) syscall number
+ MV .D2X A4,B0
+ || MVK .S2 __NR_syscalls,B1
+ CMPLTU .L2 B0,B1,B1
+
+ [!B1] BNOP .S2 ret_from_syscall_function,5
+ || MVK .S1 -ENOSYS,A4
+
+ ;; reload syscall args from (possibly modified) stack frame
+ ;; and get syscall handler addr from sys_call_table:
+ LDW .D2T2 *+SP(REGS_B4+8),B4
+ || MVKL .S2 sys_call_table,B1
+ LDW .D2T1 *+SP(REGS_A6+8),A6
+ || MVKH .S2 sys_call_table,B1
+ LDW .D2T2 *+B1[B0],B0
+ || MVKL .S2 ret_from_syscall_function,B3
+ LDW .D2T2 *+SP(REGS_B6+8),B6
+ || MVKH .S2 ret_from_syscall_function,B3
+ LDW .D2T1 *+SP(REGS_A8+8),A8
+ LDW .D2T2 *+SP(REGS_B8+8),B8
+ NOP
+ ; B0 = sys_call_table[__NR_*]
+ BNOP .S2 B0,5 ; branch to syscall handler
+ || LDW .D2T1 *+SP(REGS_ORIG_A4+8),A4
+
+syscall_exit_work:
+ AND .D1 _TIF_SYSCALL_TRACE,A2,A0
+ [!A0] BNOP .S1 work_pending,5
+ [A0] B .S2 syscall_trace_exit
+ ADDKPC .S2 resume_userspace,B3,1
+ MVC .S2 CSR,B1
+ SET .S2 B1,0,0,B1
+ MVC .S2 B1,CSR ; enable ints
+
+work_pending:
+ AND .D1 _TIF_NEED_RESCHED,A2,A0
+ [!A0] BNOP .S1 work_notifysig,5
+
+work_resched:
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 schedule,A1
+ MVKH .S1 schedule,A1
+ B .S2X A1
+#else
+ B .S2 schedule
+#endif
+ ADDKPC .S2 work_rescheduled,B3,4
+work_rescheduled:
+ ;; make sure we don't miss an interrupt setting need_resched or
+ ;; sigpending between sampling and the rti
+ MASK_INT B2
+ GET_THREAD_INFO A12
+ LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
+ MVK .S1 _TIF_WORK_MASK,A1
+ MVK .S1 _TIF_NEED_RESCHED,A3
+ NOP 2
+ AND .D1 A1,A2,A0
+ || AND .S1 A3,A2,A1
+ [!A0] BNOP .S1 restore_all,5
+ [A1] BNOP .S1 work_resched,5
+
+work_notifysig:
+ B .S2 do_notify_resume
+ LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
+ ADDKPC .S2 resume_userspace,B3,1
+ ADD .S1X 8,SP,A4 ; pt_regs pointer is first arg
+ MV .D2X A2,B4 ; thread_info flags is second arg
+
+ ;;
+ ;; On C64x+, the return way from exception and interrupt
+ ;; is a little bit different
+ ;;
+ENTRY(ret_from_exception)
+#ifdef CONFIG_PREEMPT
+ MASK_INT B2
+#endif
+
+ENTRY(ret_from_interrupt)
+ ;;
+ ;; Check if we are comming from user mode.
+ ;;
+ LDW .D2T2 *+SP(REGS_TSR+8),B0
+ MVK .S2 0x40,B1
+ NOP 3
+ AND .D2 B0,B1,B0
+ [!B0] BNOP .S2 resume_kernel,5
+
+resume_userspace:
+ ;; make sure we don't miss an interrupt setting need_resched or
+ ;; sigpending between sampling and the rti
+ MASK_INT B2
+ GET_THREAD_INFO A12
+ LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
+ MVK .S1 _TIF_WORK_MASK,A1
+ MVK .S1 _TIF_NEED_RESCHED,A3
+ NOP 2
+ AND .D1 A1,A2,A0
+ [A0] BNOP .S1 work_pending,5
+ BNOP .S1 restore_all,5
+
+ ;;
+ ;; System call handling
+ ;; B0 = syscall number (in sys_call_table)
+ ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
+ ;; A4 is the return value register
+ ;;
+system_call_saved:
+ MVK .L2 1,B2
+ STW .D2T2 B2,*+SP(REGS__END+8) ; set syscall flag
+ MVC .S2 B2,ECR ; ack the software exception
+
+ UNMASK_INT B2 ; re-enable global IT
+
+system_call_saved_noack:
+ ;; Check system call number
+ MVK .S2 __NR_syscalls,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_ni_syscall,A0
+#endif
+ CMPLTU .L2 B0,B1,B1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKH .S1 sys_ni_syscall,A0
+#endif
+
+ ;; Check for ptrace
+ GET_THREAD_INFO A12
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B1] B .S2X A0
+#else
+ [!B1] B .S2 sys_ni_syscall
+#endif
+ [!B1] ADDKPC .S2 ret_from_syscall_function,B3,4
+
+ ;; Get syscall handler addr from sys_call_table
+ ;; call tracesys_on or call syscall handler
+ LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
+ || MVKL .S2 sys_call_table,B1
+ MVKH .S2 sys_call_table,B1
+ LDW .D2T2 *+B1[B0],B0
+ NOP 2
+ ; A2 = thread_info flags
+ AND .D1 _TIF_SYSCALL_TRACE,A2,A2
+ [A2] BNOP .S1 tracesys_on,5
+ ;; B0 = _sys_call_table[__NR_*]
+ B .S2 B0
+ ADDKPC .S2 ret_from_syscall_function,B3,4
+
+ret_from_syscall_function:
+ STW .D2T1 A4,*+SP(REGS_A4+8) ; save return value in A4
+ ; original A4 is in orig_A4
+syscall_exit:
+ ;; make sure we don't miss an interrupt setting need_resched or
+ ;; sigpending between sampling and the rti
+ MASK_INT B2
+ LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
+ MVK .S1 _TIF_ALLWORK_MASK,A1
+ NOP 3
+ AND .D1 A1,A2,A2 ; check for work to do
+ [A2] BNOP .S1 syscall_exit_work,5
+
+restore_all:
+ RESTORE_ALL NRP,NTSR
+
+ ;;
+ ;; After a fork we jump here directly from resume,
+ ;; so that A4 contains the previous task structure.
+ ;;
+ENTRY(ret_from_fork)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 schedule_tail,A0
+ MVKH .S1 schedule_tail,A0
+ B .S2X A0
+#else
+ B .S2 schedule_tail
+#endif
+ ADDKPC .S2 ret_from_fork_2,B3,4
+ret_from_fork_2:
+ ;; return 0 in A4 for child process
+ GET_THREAD_INFO A12
+ BNOP .S2 syscall_exit,3
+ MVK .L2 0,B0
+ STW .D2T2 B0,*+SP(REGS_A4+8)
+ENDPROC(ret_from_fork)
+
+ ;;
+ ;; These are the interrupt handlers, responsible for calling __do_IRQ()
+ ;; int6 is used for syscalls (see _system_call entry)
+ ;;
+ .macro SAVE_ALL_INT
+ SAVE_ALL IRP,ITSR
+ .endm
+
+ .macro CALL_INT int
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 c6x_do_IRQ,A0
+ MVKH .S1 c6x_do_IRQ,A0
+ BNOP .S2X A0,1
+ MVK .S1 int,A4
+ ADDAW .D2 SP,2,B4
+ MVKL .S2 ret_from_interrupt,B3
+ MVKH .S2 ret_from_interrupt,B3
+#else
+ CALLP .S2 c6x_do_IRQ,B3
+ || MVK .S1 int,A4
+ || ADDAW .D2 SP,2,B4
+ B .S1 ret_from_interrupt
+ NOP 5
+#endif
+ .endm
+
+ENTRY(_int4_handler)
+ SAVE_ALL_INT
+ CALL_INT 4
+ENDPROC(_int4_handler)
+
+ENTRY(_int5_handler)
+ SAVE_ALL_INT
+ CALL_INT 5
+ENDPROC(_int5_handler)
+
+ENTRY(_int6_handler)
+ SAVE_ALL_INT
+ CALL_INT 6
+ENDPROC(_int6_handler)
+
+ENTRY(_int7_handler)
+ SAVE_ALL_INT
+ CALL_INT 7
+ENDPROC(_int7_handler)
+
+ENTRY(_int8_handler)
+ SAVE_ALL_INT
+ CALL_INT 8
+ENDPROC(_int8_handler)
+
+ENTRY(_int9_handler)
+ SAVE_ALL_INT
+ CALL_INT 9
+ENDPROC(_int9_handler)
+
+ENTRY(_int10_handler)
+ SAVE_ALL_INT
+ CALL_INT 10
+ENDPROC(_int10_handler)
+
+ENTRY(_int11_handler)
+ SAVE_ALL_INT
+ CALL_INT 11
+ENDPROC(_int11_handler)
+
+ENTRY(_int12_handler)
+ SAVE_ALL_INT
+ CALL_INT 12
+ENDPROC(_int12_handler)
+
+ENTRY(_int13_handler)
+ SAVE_ALL_INT
+ CALL_INT 13
+ENDPROC(_int13_handler)
+
+ENTRY(_int14_handler)
+ SAVE_ALL_INT
+ CALL_INT 14
+ENDPROC(_int14_handler)
+
+ENTRY(_int15_handler)
+ SAVE_ALL_INT
+ CALL_INT 15
+ENDPROC(_int15_handler)
+
+ ;;
+ ;; Handler for uninitialized and spurious interrupts
+ ;;
+ENTRY(_bad_interrupt)
+ B .S2 IRP
+ NOP 5
+ENDPROC(_bad_interrupt)
+
+ ;;
+ ;; Entry for NMI/exceptions/syscall
+ ;;
+ENTRY(_nmi_handler)
+ SAVE_ALL NRP,NTSR
+
+ MVC .S2 EFR,B2
+ CMPEQ .L2 1,B2,B2
+ || MVC .S2 TSR,B1
+ CLR .S2 B1,10,10,B1
+ MVC .S2 B1,TSR
+#ifdef CONFIG_C6X_BIG_KERNEL
+ [!B2] MVKL .S1 process_exception,A0
+ [!B2] MVKH .S1 process_exception,A0
+ [!B2] B .S2X A0
+#else
+ [!B2] B .S2 process_exception
+#endif
+ [B2] B .S2 system_call_saved
+ [!B2] ADDAW .D2 SP,2,B1
+ [!B2] MV .D1X B1,A4
+ ADDKPC .S2 ret_from_trap,B3,2
+
+ret_from_trap:
+ MV .D2X A4,B0
+ [!B0] BNOP .S2 ret_from_exception,5
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S2 system_call_saved_noack,B3
+ MVKH .S2 system_call_saved_noack,B3
+#endif
+ LDW .D2T2 *+SP(REGS_B0+8),B0
+ LDW .D2T1 *+SP(REGS_A4+8),A4
+ LDW .D2T2 *+SP(REGS_B4+8),B4
+ LDW .D2T1 *+SP(REGS_A6+8),A6
+ LDW .D2T2 *+SP(REGS_B6+8),B6
+ LDW .D2T1 *+SP(REGS_A8+8),A8
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || B .S2 B3
+#else
+ || B .S2 system_call_saved_noack
+#endif
+ LDW .D2T2 *+SP(REGS_B8+8),B8
+ NOP 4
+ENDPROC(_nmi_handler)
+
+ ;;
+ ;; Jump to schedule() then return to ret_from_isr
+ ;;
+#ifdef CONFIG_PREEMPT
+resume_kernel:
+ GET_THREAD_INFO A12
+ LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
+ NOP 4
+ [A1] BNOP .S2 restore_all,5
+
+preempt_schedule:
+ GET_THREAD_INFO A2
+ LDW .D1T1 *+A2(THREAD_INFO_FLAGS),A1
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S2 preempt_schedule_irq,B0
+ MVKH .S2 preempt_schedule_irq,B0
+ NOP 2
+#else
+ NOP 4
+#endif
+ AND .D1 _TIF_NEED_RESCHED,A1,A1
+ [!A1] BNOP .S2 restore_all,5
+#ifdef CONFIG_C6X_BIG_KERNEL
+ B .S2 B0
+#else
+ B .S2 preempt_schedule_irq
+#endif
+ ADDKPC .S2 preempt_schedule,B3,4
+#endif /* CONFIG_PREEMPT */
+
+ENTRY(enable_exception)
+ DINT
+ MVC .S2 TSR,B0
+ MVC .S2 B3,NRP
+ MVK .L2 0xc,B1
+ OR .D2 B0,B1,B0
+ MVC .S2 B0,TSR ; Set GEE and XEN in TSR
+ B .S2 NRP
+ NOP 5
+ENDPROC(enable_exception)
+
+ENTRY(sys_sigaltstack)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 do_sigaltstack,A0 ; branch to do_sigaltstack
+ MVKH .S1 do_sigaltstack,A0
+ B .S2X A0
+#else
+ B .S2 do_sigaltstack
+#endif
+ LDW .D2T1 *+SP(REGS_SP+8),A6
+ NOP 4
+ENDPROC(sys_sigaltstack)
+
+ ;; kernel_execve
+ENTRY(kernel_execve)
+ MVK .S2 __NR_execve,B0
+ SWE
+ BNOP .S2 B3,5
+ENDPROC(kernel_execve)
+
+ ;;
+ ;; Special system calls
+ ;; return address is in B3
+ ;;
+ENTRY(sys_clone)
+ ADD .D1X SP,8,A4
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_c6x_clone,A0
+ MVKH .S1 sys_c6x_clone,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 sys_c6x_clone
+ NOP 5
+#endif
+ENDPROC(sys_clone)
+
+ENTRY(sys_rt_sigreturn)
+ ADD .D1X SP,8,A4
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 do_rt_sigreturn,A0
+ MVKH .S1 do_rt_sigreturn,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 do_rt_sigreturn
+ NOP 5
+#endif
+ENDPROC(sys_rt_sigreturn)
+
+ENTRY(sys_execve)
+ ADDAW .D2 SP,2,B6 ; put regs addr in 4th parameter
+ ; & adjust regs stack addr
+ LDW .D2T2 *+SP(REGS_B4+8),B4
+
+ ;; c6x_execve(char *name, char **argv,
+ ;; char **envp, struct pt_regs *regs)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_c6x_execve,A0
+ MVKH .S1 sys_c6x_execve,A0
+ B .S2X A0
+#else
+ || B .S2 sys_c6x_execve
+#endif
+ STW .D2T2 B3,*SP--[2]
+ ADDKPC .S2 ret_from_c6x_execve,B3,3
+
+ret_from_c6x_execve:
+ LDW .D2T2 *++SP[2],B3
+ NOP 4
+ BNOP .S2 B3,5
+ENDPROC(sys_execve)
+
+ENTRY(sys_pread_c6x)
+ MV .D2X A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_pread64,A0
+ MVKH .S1 sys_pread64,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 sys_pread64
+ NOP 5
+#endif
+ENDPROC(sys_pread_c6x)
+
+ENTRY(sys_pwrite_c6x)
+ MV .D2X A8,B7
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_pwrite64,A0
+ MVKH .S1 sys_pwrite64,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 sys_pwrite64
+ NOP 5
+#endif
+ENDPROC(sys_pwrite_c6x)
+
+;; On Entry
+;; A4 - path
+;; B4 - offset_lo (LE), offset_hi (BE)
+;; A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_truncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ MV .S2 B4,B5
+ MV .D2X A6,B4
+#else
+ MV .D2X A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_truncate64,A0
+ MVKH .S1 sys_truncate64,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 sys_truncate64
+ NOP 5
+#endif
+ENDPROC(sys_truncate64_c6x)
+
+;; On Entry
+;; A4 - fd
+;; B4 - offset_lo (LE), offset_hi (BE)
+;; A6 - offset_lo (BE), offset_hi (LE)
+ENTRY(sys_ftruncate64_c6x)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ MV .S2 B4,B5
+ MV .D2X A6,B4
+#else
+ MV .D2X A6,B5
+#endif
+#ifdef CONFIG_C6X_BIG_KERNEL
+ || MVKL .S1 sys_ftruncate64,A0
+ MVKH .S1 sys_ftruncate64,A0
+ BNOP .S2X A0,5
+#else
+ || B .S2 sys_ftruncate64
+ NOP 5
+#endif
+ENDPROC(sys_ftruncate64_c6x)
+
+#ifdef __ARCH_WANT_SYSCALL_OFF_T
+;; On Entry
+;; A4 - fd
+;; B4 - offset_lo (LE), offset_hi (BE)
+;; A6 - offset_lo (BE), offset_hi (LE)
+;; B6 - len
+;; A8 - advice
+ENTRY(sys_fadvise64_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 sys_fadvise64,A0
+ MVKH .S1 sys_fadvise64,A0
+ BNOP .S2X A0,2
+#else
+ B .S2 sys_fadvise64
+ NOP 2
+#endif
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ MV .L2 B4,B5
+ || MV .D2X A6,B4
+#else
+ MV .D2X A6,B5
+#endif
+ MV .D1X B6,A6
+ MV .D2X A8,B6
+#endif
+ENDPROC(sys_fadvise64_c6x)
+
+;; On Entry
+;; A4 - fd
+;; B4 - offset_lo (LE), offset_hi (BE)
+;; A6 - offset_lo (BE), offset_hi (LE)
+;; B6 - len_lo (LE), len_hi (BE)
+;; A8 - len_lo (BE), len_hi (LE)
+;; B8 - advice
+ENTRY(sys_fadvise64_64_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 sys_fadvise64_64,A0
+ MVKH .S1 sys_fadvise64_64,A0
+ BNOP .S2X A0,2
+#else
+ B .S2 sys_fadvise64_64
+ NOP 2
+#endif
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ MV .L2 B4,B5
+ || MV .D2X A6,B4
+ MV .L1 A8,A6
+ || MV .D1X B6,A7
+#else
+ MV .D2X A6,B5
+ MV .L1 A8,A7
+ || MV .D1X B6,A6
+#endif
+ MV .L2 B8,B6
+ENDPROC(sys_fadvise64_64_c6x)
+
+;; On Entry
+;; A4 - fd
+;; B4 - mode
+;; A6 - offset_hi
+;; B6 - offset_lo
+;; A8 - len_hi
+;; B8 - len_lo
+ENTRY(sys_fallocate_c6x)
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 sys_fallocate,A0
+ MVKH .S1 sys_fallocate,A0
+ BNOP .S2X A0,1
+#else
+ B .S2 sys_fallocate
+ NOP
+#endif
+ MV .D1 A6,A7
+ MV .D1X B6,A6
+ MV .D2X A8,B7
+ MV .D2 B8,B6
+ENDPROC(sys_fallocate_c6x)
+
+ ;; put this in .neardata for faster access when using DSBT mode
+ .section .neardata,"aw",@progbits
+ .global current_ksp
+ .hidden current_ksp
+current_ksp:
+ .word init_thread_union + THREAD_START_SP
diff --git a/arch/c6x/kernel/head.S b/arch/c6x/kernel/head.S
new file mode 100644
index 00000000000..133eab6edf6
--- /dev/null
+++ b/arch/c6x/kernel/head.S
@@ -0,0 +1,84 @@
+;
+; Port on Texas Instruments TMS320C6x architecture
+;
+; Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+; This program is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License version 2 as
+; published by the Free Software Foundation.
+;
+#include <linux/linkage.h>
+#include <linux/of_fdt.h>
+#include <asm/asm-offsets.h>
+
+ __HEAD
+ENTRY(_c_int00)
+ ;; Save magic and pointer
+ MV .S1 A4,A10
+ MV .S2 B4,B10
+ MVKL .S2 __bss_start,B5
+ MVKH .S2 __bss_start,B5
+ MVKL .S2 __bss_stop,B6
+ MVKH .S2 __bss_stop,B6
+ SUB .L2 B6,B5,B6 ; bss size
+
+ ;; Set the stack pointer
+ MVKL .S2 current_ksp,B0
+ MVKH .S2 current_ksp,B0
+ LDW .D2T2 *B0,B15
+
+ ;; clear bss
+ SHR .S2 B6,3,B0 ; number of dwords to clear
+ ZERO .L2 B13
+ ZERO .L2 B12
+bss_loop:
+ BDEC .S2 bss_loop,B0
+ NOP 3
+ CMPLT .L2 B0,0,B1
+ [!B1] STDW .D2T2 B13:B12,*B5++[1]
+
+ NOP 4
+ AND .D2 ~7,B15,B15
+
+ ;; Clear GIE and PGIE
+ MVC .S2 CSR,B2
+ CLR .S2 B2,0,1,B2
+ MVC .S2 B2,CSR
+ MVC .S2 TSR,B2
+ CLR .S2 B2,0,1,B2
+ MVC .S2 B2,TSR
+ MVC .S2 ITSR,B2
+ CLR .S2 B2,0,1,B2
+ MVC .S2 B2,ITSR
+ MVC .S2 NTSR,B2
+ CLR .S2 B2,0,1,B2
+ MVC .S2 B2,NTSR
+
+ ;; pass DTB pointer to machine_init (or zero if none)
+ MVKL .S1 OF_DT_HEADER,A0
+ MVKH .S1 OF_DT_HEADER,A0
+ CMPEQ .L1 A10,A0,A0
+ [A0] MV .S1X B10,A4
+ [!A0] MVK .S1 0,A4
+
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 machine_init,A0
+ MVKH .S1 machine_init,A0
+ B .S2X A0
+ ADDKPC .S2 0f,B3,4
+0:
+#else
+ CALLP .S2 machine_init,B3
+#endif
+
+ ;; Jump to Linux init
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 start_kernel,A0
+ MVKH .S1 start_kernel,A0
+ B .S2X A0
+#else
+ B .S2 start_kernel
+#endif
+ NOP 5
+L1: BNOP .S2 L1,5
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
new file mode 100644
index 00000000000..0929e4b2b24
--- /dev/null
+++ b/arch/c6x/kernel/irq.c
@@ -0,0 +1,728 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ *
+ * This borrows heavily from powerpc version, which is:
+ *
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/megamod-pic.h>
+
+unsigned long irq_err_count;
+
+static DEFINE_RAW_SPINLOCK(core_irq_lock);
+
+static void mask_core_irq(struct irq_data *data)
+{
+ unsigned int prio = data->irq;
+
+ BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
+
+ raw_spin_lock(&core_irq_lock);
+ and_creg(IER, ~(1 << prio));
+ raw_spin_unlock(&core_irq_lock);
+}
+
+static void unmask_core_irq(struct irq_data *data)
+{
+ unsigned int prio = data->irq;
+
+ raw_spin_lock(&core_irq_lock);
+ or_creg(IER, 1 << prio);
+ raw_spin_unlock(&core_irq_lock);
+}
+
+static struct irq_chip core_chip = {
+ .name = "core",
+ .irq_mask = mask_core_irq,
+ .irq_unmask = unmask_core_irq,
+};
+
+asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
+
+ generic_handle_irq(prio);
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+}
+
+static struct irq_host *core_host;
+
+static int core_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ if (hw < 4 || hw >= NR_PRIORITY_IRQS)
+ return -EINVAL;
+
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
+ return 0;
+}
+
+static struct irq_host_ops core_host_ops = {
+ .map = core_host_map,
+};
+
+void __init init_IRQ(void)
+{
+ struct device_node *np;
+
+ /* Mask all priority IRQs */
+ and_creg(IER, ~0xfff0);
+
+ np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
+ if (np != NULL) {
+ /* create the core host */
+ core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0,
+ &core_host_ops, 0);
+ if (core_host)
+ irq_set_default_host(core_host);
+ of_node_put(np);
+ }
+
+ printk(KERN_INFO "Core interrupt controller initialized\n");
+
+ /* now we're ready for other SoC controllers */
+ megamod_pic_init();
+
+ /* Clear all general IRQ flags */
+ set_creg(ICR, 0xfff0);
+}
+
+void ack_bad_irq(int irq)
+{
+ printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+ irq_err_count++;
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+ return 0;
+}
+
+/*
+ * IRQ controller and virtual interrupts
+ */
+
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+ irq_hw_number_t hwirq;
+ struct irq_host *host;
+};
+
+static LIST_HEAD(irq_hosts);
+static DEFINE_RAW_SPINLOCK(irq_big_lock);
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_map_entry irq_map[NR_IRQS];
+static unsigned int irq_virq_count = NR_IRQS;
+static struct irq_host *irq_default_host;
+
+irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+ return irq_map[d->irq].hwirq;
+}
+EXPORT_SYMBOL_GPL(irqd_to_hwirq);
+
+irq_hw_number_t virq_to_hw(unsigned int virq)
+{
+ return irq_map[virq].hwirq;
+}
+EXPORT_SYMBOL_GPL(virq_to_hw);
+
+bool virq_is_host(unsigned int virq, struct irq_host *host)
+{
+ return irq_map[virq].host == host;
+}
+EXPORT_SYMBOL_GPL(virq_is_host);
+
+static int default_irq_host_match(struct irq_host *h, struct device_node *np)
+{
+ return h->of_node != NULL && h->of_node == np;
+}
+
+struct irq_host *irq_alloc_host(struct device_node *of_node,
+ unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq)
+{
+ struct irq_host *host;
+ unsigned int size = sizeof(struct irq_host);
+ unsigned int i;
+ unsigned int *rmap;
+ unsigned long flags;
+
+ /* Allocate structure and revmap table if using linear mapping */
+ if (revmap_type == IRQ_HOST_MAP_LINEAR)
+ size += revmap_arg * sizeof(unsigned int);
+ host = kzalloc(size, GFP_KERNEL);
+ if (host == NULL)
+ return NULL;
+
+ /* Fill structure */
+ host->revmap_type = revmap_type;
+ host->inval_irq = inval_irq;
+ host->ops = ops;
+ host->of_node = of_node_get(of_node);
+
+ if (host->ops->match == NULL)
+ host->ops->match = default_irq_host_match;
+
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* Check for the priority controller. */
+ if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
+ if (irq_map[0].host != NULL) {
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+ of_node_put(host->of_node);
+ kfree(host);
+ return NULL;
+ }
+ irq_map[0].host = host;
+ }
+
+ list_add(&host->link, &irq_hosts);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+
+ /* Additional setups per revmap type */
+ switch (revmap_type) {
+ case IRQ_HOST_MAP_PRIORITY:
+ /* 0 is always the invalid number for priority */
+ host->inval_irq = 0;
+ /* setup us as the host for all priority interrupts */
+ for (i = 1; i < NR_PRIORITY_IRQS; i++) {
+ irq_map[i].hwirq = i;
+ smp_wmb();
+ irq_map[i].host = host;
+ smp_wmb();
+
+ ops->map(host, i, i);
+ }
+ break;
+ case IRQ_HOST_MAP_LINEAR:
+ rmap = (unsigned int *)(host + 1);
+ for (i = 0; i < revmap_arg; i++)
+ rmap[i] = NO_IRQ;
+ host->revmap_data.linear.size = revmap_arg;
+ smp_wmb();
+ host->revmap_data.linear.revmap = rmap;
+ break;
+ case IRQ_HOST_MAP_TREE:
+ INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
+
+ return host;
+}
+
+struct irq_host *irq_find_host(struct device_node *node)
+{
+ struct irq_host *h, *found = NULL;
+ unsigned long flags;
+
+ /* We might want to match the legacy controller last since
+ * it might potentially be set to match all interrupts in
+ * the absence of a device node. This isn't a problem so far
+ * yet though...
+ */
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link)
+ if (h->ops->match(h, node)) {
+ found = h;
+ break;
+ }
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+ return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+void irq_set_default_host(struct irq_host *host)
+{
+ pr_debug("irq: Default host set to @0x%p\n", host);
+
+ irq_default_host = host;
+}
+
+void irq_set_virq_count(unsigned int count)
+{
+ pr_debug("irq: Trying to set virq count to %d\n", count);
+
+ BUG_ON(count < NR_PRIORITY_IRQS);
+ if (count < NR_IRQS)
+ irq_virq_count = count;
+}
+
+static int irq_setup_virq(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ int res;
+
+ res = irq_alloc_desc_at(virq, 0);
+ if (res != virq) {
+ pr_debug("irq: -> allocating desc failed\n");
+ goto error;
+ }
+
+ /* map it */
+ smp_wmb();
+ irq_map[virq].hwirq = hwirq;
+ smp_mb();
+
+ if (host->ops->map(host, virq, hwirq)) {
+ pr_debug("irq: -> mapping failed, freeing\n");
+ goto errdesc;
+ }
+
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
+ return 0;
+
+errdesc:
+ irq_free_descs(virq, 1);
+error:
+ irq_free_virt(virq, 1);
+ return -1;
+}
+
+unsigned int irq_create_direct_mapping(struct irq_host *host)
+{
+ unsigned int virq;
+
+ if (host == NULL)
+ host = irq_default_host;
+
+ BUG_ON(host == NULL);
+ WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
+
+ virq = irq_alloc_virt(host, 1, 0);
+ if (virq == NO_IRQ) {
+ pr_debug("irq: create_direct virq allocation failed\n");
+ return NO_IRQ;
+ }
+
+ pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+ if (irq_setup_virq(host, virq, virq))
+ return NO_IRQ;
+
+ return virq;
+}
+
+unsigned int irq_create_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ unsigned int virq, hint;
+
+ pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
+
+ /* Look for default host if nececssary */
+ if (host == NULL)
+ host = irq_default_host;
+ if (host == NULL) {
+ printk(KERN_WARNING "irq_create_mapping called for"
+ " NULL host, hwirq=%lx\n", hwirq);
+ WARN_ON(1);
+ return NO_IRQ;
+ }
+ pr_debug("irq: -> using host @%p\n", host);
+
+ /* Check if mapping already exists */
+ virq = irq_find_mapping(host, hwirq);
+ if (virq != NO_IRQ) {
+ pr_debug("irq: -> existing mapping on virq %d\n", virq);
+ return virq;
+ }
+
+ /* Allocate a virtual interrupt number */
+ hint = hwirq % irq_virq_count;
+ virq = irq_alloc_virt(host, 1, hint);
+ if (virq == NO_IRQ) {
+ pr_debug("irq: -> virq allocation failed\n");
+ return NO_IRQ;
+ }
+
+ if (irq_setup_virq(host, virq, hwirq))
+ return NO_IRQ;
+
+ pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
+ hwirq, host->of_node ? host->of_node->full_name : "null", virq);
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ struct irq_host *host;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ unsigned int virq;
+
+ if (controller == NULL)
+ host = irq_default_host;
+ else
+ host = irq_find_host(controller);
+ if (host == NULL) {
+ printk(KERN_WARNING "irq: no irq host found for %s !\n",
+ controller->full_name);
+ return NO_IRQ;
+ }
+
+ /* If host has no translation, then we assume interrupt line */
+ if (host->ops->xlate == NULL)
+ hwirq = intspec[0];
+ else {
+ if (host->ops->xlate(host, controller, intspec, intsize,
+ &hwirq, &type))
+ return NO_IRQ;
+ }
+
+ /* Create mapping */
+ virq = irq_create_mapping(host, hwirq);
+ if (virq == NO_IRQ)
+ return virq;
+
+ /* Set type if specified and different than the current one */
+ if (type != IRQ_TYPE_NONE &&
+ type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+ irq_set_irq_type(virq, type);
+ return virq;
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+
+void irq_dispose_mapping(unsigned int virq)
+{
+ struct irq_host *host;
+ irq_hw_number_t hwirq;
+
+ if (virq == NO_IRQ)
+ return;
+
+ /* Never unmap priority interrupts */
+ if (virq < NR_PRIORITY_IRQS)
+ return;
+
+ host = irq_map[virq].host;
+ if (WARN_ON(host == NULL))
+ return;
+
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+ /* remove chip and handler */
+ irq_set_chip_and_handler(virq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(virq);
+
+ /* Tell the PIC about it */
+ if (host->ops->unmap)
+ host->ops->unmap(host, virq);
+ smp_mb();
+
+ /* Clear reverse map */
+ hwirq = irq_map[virq].hwirq;
+ switch (host->revmap_type) {
+ case IRQ_HOST_MAP_LINEAR:
+ if (hwirq < host->revmap_data.linear.size)
+ host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
+ break;
+ case IRQ_HOST_MAP_TREE:
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_delete(&host->revmap_data.tree, hwirq);
+ mutex_unlock(&revmap_trees_mutex);
+ break;
+ }
+
+ /* Destroy map */
+ smp_mb();
+ irq_map[virq].hwirq = host->inval_irq;
+
+ irq_free_descs(virq, 1);
+ /* Free it */
+ irq_free_virt(virq, 1);
+}
+EXPORT_SYMBOL_GPL(irq_dispose_mapping);
+
+unsigned int irq_find_mapping(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ unsigned int i;
+ unsigned int hint = hwirq % irq_virq_count;
+
+ /* Look for default host if nececssary */
+ if (host == NULL)
+ host = irq_default_host;
+ if (host == NULL)
+ return NO_IRQ;
+
+ /* Slow path does a linear search of the map */
+ i = hint;
+ do {
+ if (irq_map[i].host == host &&
+ irq_map[i].hwirq == hwirq)
+ return i;
+ i++;
+ if (i >= irq_virq_count)
+ i = 4;
+ } while (i != hint);
+ return NO_IRQ;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+unsigned int irq_radix_revmap_lookup(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ struct irq_map_entry *ptr;
+ unsigned int virq;
+
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
+ return irq_find_mapping(host, hwirq);
+
+ /*
+ * The ptr returned references the static global irq_map.
+ * but freeing an irq can delete nodes along the path to
+ * do the lookup via call_rcu.
+ */
+ rcu_read_lock();
+ ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
+ rcu_read_unlock();
+
+ /*
+ * If found in radix tree, then fine.
+ * Else fallback to linear lookup - this should not happen in practice
+ * as it means that we failed to insert the node in the radix tree.
+ */
+ if (ptr)
+ virq = ptr - irq_map;
+ else
+ virq = irq_find_mapping(host, hwirq);
+
+ return virq;
+}
+
+void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
+ return;
+
+ if (virq != NO_IRQ) {
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_insert(&host->revmap_data.tree, hwirq,
+ &irq_map[virq]);
+ mutex_unlock(&revmap_trees_mutex);
+ }
+}
+
+unsigned int irq_linear_revmap(struct irq_host *host,
+ irq_hw_number_t hwirq)
+{
+ unsigned int *revmap;
+
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
+ return irq_find_mapping(host, hwirq);
+
+ /* Check revmap bounds */
+ if (unlikely(hwirq >= host->revmap_data.linear.size))
+ return irq_find_mapping(host, hwirq);
+
+ /* Check if revmap was allocated */
+ revmap = host->revmap_data.linear.revmap;
+ if (unlikely(revmap == NULL))
+ return irq_find_mapping(host, hwirq);
+
+ /* Fill up revmap with slow path if no mapping found */
+ if (unlikely(revmap[hwirq] == NO_IRQ))
+ revmap[hwirq] = irq_find_mapping(host, hwirq);
+
+ return revmap[hwirq];
+}
+
+unsigned int irq_alloc_virt(struct irq_host *host,
+ unsigned int count,
+ unsigned int hint)
+{
+ unsigned long flags;
+ unsigned int i, j, found = NO_IRQ;
+
+ if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
+ return NO_IRQ;
+
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* Use hint for 1 interrupt if any */
+ if (count == 1 && hint >= NR_PRIORITY_IRQS &&
+ hint < irq_virq_count && irq_map[hint].host == NULL) {
+ found = hint;
+ goto hint_found;
+ }
+
+ /* Look for count consecutive numbers in the allocatable
+ * (non-legacy) space
+ */
+ for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
+ if (irq_map[i].host != NULL)
+ j = 0;
+ else
+ j++;
+
+ if (j == count) {
+ found = i - count + 1;
+ break;
+ }
+ }
+ if (found == NO_IRQ) {
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+ return NO_IRQ;
+ }
+ hint_found:
+ for (i = found; i < (found + count); i++) {
+ irq_map[i].hwirq = host->inval_irq;
+ smp_wmb();
+ irq_map[i].host = host;
+ }
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+ return found;
+}
+
+void irq_free_virt(unsigned int virq, unsigned int count)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ WARN_ON(virq < NR_PRIORITY_IRQS);
+ WARN_ON(count == 0 || (virq + count) > irq_virq_count);
+
+ if (virq < NR_PRIORITY_IRQS) {
+ if (virq + count < NR_PRIORITY_IRQS)
+ return;
+ count -= NR_PRIORITY_IRQS - virq;
+ virq = NR_PRIORITY_IRQS;
+ }
+
+ if (count > irq_virq_count || virq > irq_virq_count - count) {
+ if (virq > irq_virq_count)
+ return;
+ count = irq_virq_count - virq;
+ }
+
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
+ for (i = virq; i < (virq + count); i++) {
+ struct irq_host *host;
+
+ host = irq_map[i].host;
+ irq_map[i].hwirq = host->inval_irq;
+ smp_wmb();
+ irq_map[i].host = NULL;
+ }
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
+}
+
+#ifdef CONFIG_VIRQ_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+ unsigned long flags;
+ struct irq_desc *desc;
+ const char *p;
+ static const char none[] = "none";
+ void *data;
+ int i;
+
+ seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
+ "chip name", "chip data", "host name");
+
+ for (i = 1; i < nr_irqs; i++) {
+ desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ if (desc->action && desc->action->handler) {
+ struct irq_chip *chip;
+
+ seq_printf(m, "%5d ", i);
+ seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
+
+ chip = irq_desc_get_chip(desc);
+ if (chip && chip->name)
+ p = chip->name;
+ else
+ p = none;
+ seq_printf(m, "%-15s ", p);
+
+ data = irq_desc_get_chip_data(desc);
+ seq_printf(m, "0x%16p ", data);
+
+ if (irq_map[i].host && irq_map[i].host->of_node)
+ p = irq_map[i].host->of_node->full_name;
+ else
+ p = none;
+ seq_printf(m, "%s\n", p);
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+ .open = virq_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+ if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
+ NULL, &virq_debug_fops) == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+device_initcall(irq_debugfs_init);
+#endif /* CONFIG_VIRQ_DEBUG */
diff --git a/arch/c6x/kernel/module.c b/arch/c6x/kernel/module.c
new file mode 100644
index 00000000000..5fc03f18f56
--- /dev/null
+++ b/arch/c6x/kernel/module.c
@@ -0,0 +1,123 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2005, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Thomas Charleux (thomas.charleux@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+
+static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift)
+{
+ u32 opcode;
+ long ep = (long)ip & ~31;
+ long delta = ((long)dest - ep) >> 2;
+ long mask = (1 << maskbits) - 1;
+
+ if ((delta >> (maskbits - 1)) == 0 ||
+ (delta >> (maskbits - 1)) == -1) {
+ opcode = *ip;
+ opcode &= ~(mask << shift);
+ opcode |= ((delta & mask) << shift);
+ *ip = opcode;
+
+ pr_debug("REL PCR_S%d[%p] dest[%p] opcode[%08x]\n",
+ maskbits, ip, (void *)dest, opcode);
+
+ return 0;
+ }
+ pr_err("PCR_S%d reloc %p -> %p out of range!\n",
+ maskbits, ip, (void *)dest);
+
+ return -1;
+}
+
+/*
+ * apply a RELA relocation
+ */
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf_Sym *sym;
+ u32 *location, opcode;
+ unsigned int i;
+ Elf32_Addr v;
+ Elf_Addr offset = 0;
+
+ pr_debug("Applying relocate section %u to %u with offset 0x%x\n",
+ relsec, sechdrs[relsec].sh_info, offset);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset - offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+
+ /* this is the adjustment to be made */
+ v = sym->st_value + rel[i].r_addend;
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_C6000_ABS32:
+ pr_debug("RELA ABS32: [%p] = 0x%x\n", location, v);
+ *location = v;
+ break;
+ case R_C6000_ABS16:
+ pr_debug("RELA ABS16: [%p] = 0x%x\n", location, v);
+ *(u16 *)location = v;
+ break;
+ case R_C6000_ABS8:
+ pr_debug("RELA ABS8: [%p] = 0x%x\n", location, v);
+ *(u8 *)location = v;
+ break;
+ case R_C6000_ABS_L16:
+ opcode = *location;
+ opcode &= ~0x7fff80;
+ opcode |= ((v & 0xffff) << 7);
+ pr_debug("RELA ABS_L16[%p] v[0x%x] opcode[0x%x]\n",
+ location, v, opcode);
+ *location = opcode;
+ break;
+ case R_C6000_ABS_H16:
+ opcode = *location;
+ opcode &= ~0x7fff80;
+ opcode |= ((v >> 9) & 0x7fff80);
+ pr_debug("RELA ABS_H16[%p] v[0x%x] opcode[0x%x]\n",
+ location, v, opcode);
+ *location = opcode;
+ break;
+ case R_C6000_PCR_S21:
+ if (fixup_pcr(location, v, 21, 7))
+ return -ENOEXEC;
+ break;
+ case R_C6000_PCR_S12:
+ if (fixup_pcr(location, v, 12, 16))
+ return -ENOEXEC;
+ break;
+ case R_C6000_PCR_S10:
+ if (fixup_pcr(location, v, 10, 13))
+ return -ENOEXEC;
+ break;
+ default:
+ pr_err("module %s: Unknown RELA relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
new file mode 100644
index 00000000000..7ca8c41b03c
--- /dev/null
+++ b/arch/c6x/kernel/process.c
@@ -0,0 +1,265 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/init_task.h>
+#include <linux/tick.h>
+#include <linux/mqueue.h>
+#include <linux/syscalls.h>
+#include <linux/reboot.h>
+
+#include <asm/syscalls.h>
+
+/* hooks for board specific support */
+void (*c6x_restart)(void);
+void (*c6x_halt)(void);
+
+extern asmlinkage void ret_from_fork(void);
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+
+/*
+ * Initial thread structure.
+ */
+union thread_union init_thread_union __init_task_data = {
+ INIT_THREAD_INFO(init_task)
+};
+
+/*
+ * Initial task structure.
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
+
+/*
+ * power off function, if any
+ */
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+static void c6x_idle(void)
+{
+ unsigned long tmp;
+
+ /*
+ * Put local_irq_enable and idle in same execute packet
+ * to make them atomic and avoid race to idle with
+ * interrupts enabled.
+ */
+ asm volatile (" mvc .s2 CSR,%0\n"
+ " or .d2 1,%0,%0\n"
+ " mvc .s2 %0,CSR\n"
+ "|| idle\n"
+ : "=b"(tmp));
+}
+
+/*
+ * The idle loop for C64x
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+ while (1) {
+ local_irq_disable();
+ if (need_resched()) {
+ local_irq_enable();
+ break;
+ }
+ c6x_idle(); /* enables local irqs */
+ }
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+static void halt_loop(void)
+{
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1)
+ asm volatile("idle\n");
+}
+
+void machine_restart(char *__unused)
+{
+ if (c6x_restart)
+ c6x_restart();
+ halt_loop();
+}
+
+void machine_halt(void)
+{
+ if (c6x_halt)
+ c6x_halt();
+ halt_loop();
+}
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ halt_loop();
+}
+
+static void kernel_thread_helper(int dummy, void *arg, int (*fn)(void *))
+{
+ do_exit(fn(arg));
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ /*
+ * copy_thread sets a4 to zero (child return from fork)
+ * so we can't just set things up to directly return to
+ * fn.
+ */
+ memset(&regs, 0, sizeof(regs));
+ regs.b4 = (unsigned long) arg;
+ regs.a6 = (unsigned long) fn;
+ regs.pc = (unsigned long) kernel_thread_helper;
+ local_save_flags(regs.csr);
+ regs.csr |= 1;
+ regs.tsr = 5; /* Set GEE and GIE in TSR */
+
+ /* Ok, create the new process.. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, -1, &regs,
+ 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+void flush_thread(void)
+{
+}
+
+void exit_thread(void)
+{
+}
+
+SYSCALL_DEFINE1(c6x_clone, struct pt_regs *, regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+
+ /* syscall puts clone_flags in A4 and usp in B4 */
+ clone_flags = regs->orig_a4;
+ if (regs->b4)
+ newsp = regs->b4;
+ else
+ newsp = regs->sp;
+
+ return do_fork(clone_flags, newsp, regs, 0, (int __user *)regs->a6,
+ (int __user *)regs->b6);
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ */
+void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
+{
+ /*
+ * The binfmt loader will setup a "full" stack, but the C6X
+ * operates an "empty" stack. So we adjust the usp so that
+ * argc doesn't get destroyed if an interrupt is taken before
+ * it is read from the stack.
+ *
+ * NB: Library startup code needs to match this.
+ */
+ usp -= 8;
+
+ set_fs(USER_DS);
+ regs->pc = pc;
+ regs->sp = usp;
+ regs->tsr |= 0x40; /* set user mode */
+ current->thread.usp = usp;
+}
+
+/*
+ * Copy a new thread context in its stack.
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long ustk_size,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+
+ childregs = task_pt_regs(p);
+
+ *childregs = *regs;
+ childregs->a4 = 0;
+
+ if (usp == -1)
+ /* case of __kernel_thread: we return to supervisor space */
+ childregs->sp = (unsigned long)(childregs + 1);
+ else
+ /* Otherwise use the given stack */
+ childregs->sp = usp;
+
+ /* Set usp/ksp */
+ p->thread.usp = childregs->sp;
+ /* switch_to uses stack to save/restore 14 callee-saved regs */
+ thread_saved_ksp(p) = (unsigned long)childregs - 8;
+ p->thread.pc = (unsigned int) ret_from_fork;
+ p->thread.wchan = (unsigned long) ret_from_fork;
+#ifdef __DSBT__
+ {
+ unsigned long dp;
+
+ asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
+
+ thread_saved_dp(p) = dp;
+ if (usp == -1)
+ childregs->dp = dp;
+ }
+#endif
+ return 0;
+}
+
+/*
+ * c6x_execve() executes a new program.
+ */
+SYSCALL_DEFINE4(c6x_execve, const char __user *, name,
+ const char __user *const __user *, argv,
+ const char __user *const __user *, envp,
+ struct pt_regs *, regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ return p->thread.wchan;
+}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
new file mode 100644
index 00000000000..3c494e84444
--- /dev/null
+++ b/arch/c6x/kernel/ptrace.c
@@ -0,0 +1,187 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
+
+#include <asm/cacheflush.h>
+
+#define PT_REG_SIZE (sizeof(struct pt_regs))
+
+/*
+ * Called by kernel/ptrace.c when detaching.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* nothing to do */
+}
+
+/*
+ * Get a register number from live pt_regs for the specified task.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+ long *addr = (long *)task_pt_regs(task);
+
+ if (regno == PT_TSR || regno == PT_CSR)
+ return 0;
+
+ return addr[regno];
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task,
+ int regno,
+ unsigned long data)
+{
+ unsigned long *addr = (unsigned long *)task_pt_regs(task);
+
+ if (regno != PT_TSR && regno != PT_CSR)
+ addr[regno] = data;
+
+ return 0;
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs,
+ 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs *regs = task_pt_regs(target);
+
+ /* Don't copyin TSR or CSR */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs,
+ 0, PT_TSR * sizeof(long));
+ if (ret)
+ return ret;
+
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PT_TSR * sizeof(long),
+ (PT_TSR + 1) * sizeof(long));
+ if (ret)
+ return ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs,
+ (PT_TSR + 1) * sizeof(long),
+ PT_CSR * sizeof(long));
+ if (ret)
+ return ret;
+
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PT_CSR * sizeof(long),
+ (PT_CSR + 1) * sizeof(long));
+ if (ret)
+ return ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs,
+ (PT_CSR + 1) * sizeof(long), -1);
+ return ret;
+}
+
+enum c6x_regset {
+ REGSET_GPR,
+};
+
+static const struct user_regset c6x_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = gpr_get,
+ .set = gpr_set
+ },
+};
+
+static const struct user_regset_view user_c6x_native_view = {
+ .name = "tic6x",
+ .e_machine = EM_TI_C6000,
+ .regsets = c6x_regsets,
+ .n = ARRAY_SIZE(c6x_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_c6x_native_view;
+}
+
+/*
+ * Perform ptrace request
+ */
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = 0;
+
+ switch (request) {
+ /*
+ * write the word at location addr.
+ */
+ case PTRACE_POKETEXT:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ if (ret == 0 && request == PTRACE_POKETEXT)
+ flush_icache_range(addr, addr + 4);
+ break;
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * handle tracing of system call entry
+ * - return the revised system call number or ULONG_MAX to cause ENOSYS
+ */
+asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
+{
+ if (tracehook_report_syscall_entry(regs))
+ /* tracing decided this syscall should not happen, so
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in
+ * regs->orig_a4
+ */
+ return ULONG_MAX;
+
+ return regs->b0;
+}
+
+/*
+ * handle tracing of system call exit
+ */
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
new file mode 100644
index 00000000000..0c07921747f
--- /dev/null
+++ b/arch/c6x/kernel/setup.c
@@ -0,0 +1,510 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/clkdev.h>
+#include <linux/initrd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+
+
+#include <asm/sections.h>
+#include <asm/div64.h>
+#include <asm/setup.h>
+#include <asm/dscr.h>
+#include <asm/clock.h>
+#include <asm/soc.h>
+
+static const char *c6x_soc_name;
+
+int c6x_num_cores;
+EXPORT_SYMBOL_GPL(c6x_num_cores);
+
+unsigned int c6x_silicon_rev;
+EXPORT_SYMBOL_GPL(c6x_silicon_rev);
+
+/*
+ * Device status register. This holds information
+ * about device configuration needed by some drivers.
+ */
+unsigned int c6x_devstat;
+EXPORT_SYMBOL_GPL(c6x_devstat);
+
+/*
+ * Some SoCs have fuse registers holding a unique MAC
+ * address. This is parsed out of the device tree with
+ * the resulting MAC being held here.
+ */
+unsigned char c6x_fuse_mac[6];
+
+unsigned long memory_start;
+unsigned long memory_end;
+
+unsigned long ram_start;
+unsigned long ram_end;
+
+/* Uncached memory for DMA consistent use (memdma=) */
+static unsigned long dma_start __initdata;
+static unsigned long dma_size __initdata;
+
+char c6x_command_line[COMMAND_LINE_SIZE];
+
+#if defined(CONFIG_CMDLINE_BOOL)
+static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) =
+ CONFIG_CMDLINE;
+#endif
+
+struct cpuinfo_c6x {
+ const char *cpu_name;
+ const char *cpu_voltage;
+ const char *mmu;
+ const char *fpu;
+ char *cpu_rev;
+ unsigned int core_id;
+ char __cpu_rev[5];
+};
+
+static DEFINE_PER_CPU(struct cpuinfo_c6x, cpu_data);
+
+unsigned int ticks_per_ns_scaled;
+EXPORT_SYMBOL(ticks_per_ns_scaled);
+
+unsigned int c6x_core_freq;
+
+static void __init get_cpuinfo(void)
+{
+ unsigned cpu_id, rev_id, csr;
+ struct clk *coreclk = clk_get_sys(NULL, "core");
+ unsigned long core_khz;
+ u64 tmp;
+ struct cpuinfo_c6x *p;
+ struct device_node *node, *np;
+
+ p = &per_cpu(cpu_data, smp_processor_id());
+
+ if (!IS_ERR(coreclk))
+ c6x_core_freq = clk_get_rate(coreclk);
+ else {
+ printk(KERN_WARNING
+ "Cannot find core clock frequency. Using 700MHz\n");
+ c6x_core_freq = 700000000;
+ }
+
+ core_khz = c6x_core_freq / 1000;
+
+ tmp = (uint64_t)core_khz << C6X_NDELAY_SCALE;
+ do_div(tmp, 1000000);
+ ticks_per_ns_scaled = tmp;
+
+ csr = get_creg(CSR);
+ cpu_id = csr >> 24;
+ rev_id = (csr >> 16) & 0xff;
+
+ p->mmu = "none";
+ p->fpu = "none";
+ p->cpu_voltage = "unknown";
+
+ switch (cpu_id) {
+ case 0:
+ p->cpu_name = "C67x";
+ p->fpu = "yes";
+ break;
+ case 2:
+ p->cpu_name = "C62x";
+ break;
+ case 8:
+ p->cpu_name = "C64x";
+ break;
+ case 12:
+ p->cpu_name = "C64x";
+ break;
+ case 16:
+ p->cpu_name = "C64x+";
+ p->cpu_voltage = "1.2";
+ break;
+ default:
+ p->cpu_name = "unknown";
+ break;
+ }
+
+ if (cpu_id < 16) {
+ switch (rev_id) {
+ case 0x1:
+ if (cpu_id > 8) {
+ p->cpu_rev = "DM640/DM641/DM642/DM643";
+ p->cpu_voltage = "1.2 - 1.4";
+ } else {
+ p->cpu_rev = "C6201";
+ p->cpu_voltage = "2.5";
+ }
+ break;
+ case 0x2:
+ p->cpu_rev = "C6201B/C6202/C6211";
+ p->cpu_voltage = "1.8";
+ break;
+ case 0x3:
+ p->cpu_rev = "C6202B/C6203/C6204/C6205";
+ p->cpu_voltage = "1.5";
+ break;
+ case 0x201:
+ p->cpu_rev = "C6701 revision 0 (early CPU)";
+ p->cpu_voltage = "1.8";
+ break;
+ case 0x202:
+ p->cpu_rev = "C6701/C6711/C6712";
+ p->cpu_voltage = "1.8";
+ break;
+ case 0x801:
+ p->cpu_rev = "C64x";
+ p->cpu_voltage = "1.5";
+ break;
+ default:
+ p->cpu_rev = "unknown";
+ }
+ } else {
+ p->cpu_rev = p->__cpu_rev;
+ snprintf(p->__cpu_rev, sizeof(p->__cpu_rev), "0x%x", cpu_id);
+ }
+
+ p->core_id = get_coreid();
+
+ node = of_find_node_by_name(NULL, "cpus");
+ if (node) {
+ for_each_child_of_node(node, np)
+ if (!strcmp("cpu", np->name))
+ ++c6x_num_cores;
+ of_node_put(node);
+ }
+
+ node = of_find_node_by_name(NULL, "soc");
+ if (node) {
+ if (of_property_read_string(node, "model", &c6x_soc_name))
+ c6x_soc_name = "unknown";
+ of_node_put(node);
+ } else
+ c6x_soc_name = "unknown";
+
+ printk(KERN_INFO "CPU%d: %s rev %s, %s volts, %uMHz\n",
+ p->core_id, p->cpu_name, p->cpu_rev,
+ p->cpu_voltage, c6x_core_freq / 1000000);
+}
+
+/*
+ * Early parsing of the command line
+ */
+static u32 mem_size __initdata;
+
+/* "mem=" parsing. */
+static int __init early_mem(char *p)
+{
+ if (!p)
+ return -EINVAL;
+
+ mem_size = memparse(p, &p);
+ /* don't remove all of memory when handling "mem={invalid}" */
+ if (mem_size == 0)
+ return -EINVAL;
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+/* "memdma=<size>[@<address>]" parsing. */
+static int __init early_memdma(char *p)
+{
+ if (!p)
+ return -EINVAL;
+
+ dma_size = memparse(p, &p);
+ if (*p == '@')
+ dma_start = memparse(p, &p);
+
+ return 0;
+}
+early_param("memdma", early_memdma);
+
+int __init c6x_add_memory(phys_addr_t start, unsigned long size)
+{
+ static int ram_found __initdata;
+
+ /* We only handle one bank (the one with PAGE_OFFSET) for now */
+ if (ram_found)
+ return -EINVAL;
+
+ if (start > PAGE_OFFSET || PAGE_OFFSET >= (start + size))
+ return 0;
+
+ ram_start = start;
+ ram_end = start + size;
+
+ ram_found = 1;
+ return 0;
+}
+
+/*
+ * Do early machine setup and device tree parsing. This is called very
+ * early on the boot process.
+ */
+notrace void __init machine_init(unsigned long dt_ptr)
+{
+ struct boot_param_header *dtb = __va(dt_ptr);
+ struct boot_param_header *fdt = (struct boot_param_header *)_fdt_start;
+
+ /* interrupts must be masked */
+ set_creg(IER, 2);
+
+ /*
+ * Set the Interrupt Service Table (IST) to the beginning of the
+ * vector table.
+ */
+ set_ist(_vectors_start);
+
+ lockdep_init();
+
+ /*
+ * dtb is passed in from bootloader.
+ * fdt is linked in blob.
+ */
+ if (dtb && dtb != fdt)
+ fdt = dtb;
+
+ /* Do some early initialization based on the flat device tree */
+ early_init_devtree(fdt);
+
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE);
+ parse_early_param();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ int bootmap_size;
+ struct memblock_region *reg;
+
+ printk(KERN_INFO "Initializing kernel\n");
+
+ /* Initialize command line */
+ *cmdline_p = c6x_command_line;
+
+ memory_end = ram_end;
+ memory_end &= ~(PAGE_SIZE - 1);
+
+ if (mem_size && (PAGE_OFFSET + PAGE_ALIGN(mem_size)) < memory_end)
+ memory_end = PAGE_OFFSET + PAGE_ALIGN(mem_size);
+
+ /* add block that this kernel can use */
+ memblock_add(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+ /* reserve kernel text/data/bss */
+ memblock_reserve(PAGE_OFFSET,
+ PAGE_ALIGN((unsigned long)&_end - PAGE_OFFSET));
+
+ if (dma_size) {
+ /* align to cacheability granularity */
+ dma_size = CACHE_REGION_END(dma_size);
+
+ if (!dma_start)
+ dma_start = memory_end - dma_size;
+
+ /* align to cacheability granularity */
+ dma_start = CACHE_REGION_START(dma_start);
+
+ /* reserve DMA memory taken from kernel memory */
+ if (memblock_is_region_memory(dma_start, dma_size))
+ memblock_reserve(dma_start, dma_size);
+ }
+
+ memory_start = PAGE_ALIGN((unsigned int) &_end);
+
+ printk(KERN_INFO "Memory Start=%08lx, Memory End=%08lx\n",
+ memory_start, memory_end);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Reserve initrd memory if in kernel memory.
+ */
+ if (initrd_start < initrd_end)
+ if (memblock_is_region_memory(initrd_start,
+ initrd_end - initrd_start))
+ memblock_reserve(initrd_start,
+ initrd_end - initrd_start);
+#endif
+
+ init_mm.start_code = (unsigned long) &_stext;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = memory_start;
+ init_mm.brk = memory_start;
+
+ /*
+ * Give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory
+ */
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ memory_start >> PAGE_SHIFT,
+ PAGE_OFFSET >> PAGE_SHIFT,
+ memory_end >> PAGE_SHIFT);
+ memblock_reserve(memory_start, bootmap_size);
+
+ unflatten_device_tree();
+
+ c6x_cache_init();
+
+ /* Set the whole external memory as non-cacheable */
+ disable_caching(ram_start, ram_end - 1);
+
+ /* Set caching of external RAM used by Linux */
+ for_each_memblock(memory, reg)
+ enable_caching(CACHE_REGION_START(reg->base),
+ CACHE_REGION_START(reg->base + reg->size - 1));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Enable caching for initrd which falls outside kernel memory.
+ */
+ if (initrd_start < initrd_end) {
+ if (!memblock_is_region_memory(initrd_start,
+ initrd_end - initrd_start))
+ enable_caching(CACHE_REGION_START(initrd_start),
+ CACHE_REGION_START(initrd_end - 1));
+ }
+#endif
+
+ /*
+ * Disable caching for dma coherent memory taken from kernel memory.
+ */
+ if (dma_size && memblock_is_region_memory(dma_start, dma_size))
+ disable_caching(dma_start,
+ CACHE_REGION_START(dma_start + dma_size - 1));
+
+ /* Initialize the coherent memory allocator */
+ coherent_mem_init(dma_start, dma_size);
+
+ /*
+ * Free all memory as a starting point.
+ */
+ free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET);
+
+ /*
+ * Then reserve memory which is already being used.
+ */
+ for_each_memblock(reserved, reg) {
+ pr_debug("reserved - 0x%08x-0x%08x\n",
+ (u32) reg->base, (u32) reg->size);
+ reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ }
+
+ max_low_pfn = PFN_DOWN(memory_end);
+ min_low_pfn = PFN_UP(memory_start);
+ max_mapnr = max_low_pfn - min_low_pfn;
+
+ /* Get kmalloc into gear */
+ paging_init();
+
+ /*
+ * Probe for Device State Configuration Registers.
+ * We have to do this early in case timer needs to be enabled
+ * through DSCR.
+ */
+ dscr_probe();
+
+ /* We do this early for timer and core clock frequency */
+ c64x_setup_clocks();
+
+ /* Get CPU info */
+ get_cpuinfo();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+}
+
+#define cpu_to_ptr(n) ((void *)((long)(n)+1))
+#define ptr_to_cpu(p) ((long)(p) - 1)
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ int n = ptr_to_cpu(v);
+ struct cpuinfo_c6x *p = &per_cpu(cpu_data, n);
+
+ if (n == 0) {
+ seq_printf(m,
+ "soc\t\t: %s\n"
+ "soc revision\t: 0x%x\n"
+ "soc cores\t: %d\n",
+ c6x_soc_name, c6x_silicon_rev, c6x_num_cores);
+ }
+
+ seq_printf(m,
+ "\n"
+ "processor\t: %d\n"
+ "cpu\t\t: %s\n"
+ "core revision\t: %s\n"
+ "core voltage\t: %s\n"
+ "core id\t\t: %d\n"
+ "mmu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "cpu MHz\t\t: %u\n"
+ "bogomips\t: %lu.%02lu\n\n",
+ n,
+ p->cpu_name, p->cpu_rev, p->cpu_voltage,
+ p->core_id, p->mmu, p->fpu,
+ (c6x_core_freq + 500000) / 1000000,
+ (loops_per_jiffy/(500000/HZ)),
+ (loops_per_jiffy/(5000/HZ))%100);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ c_start,
+ c_stop,
+ c_next,
+ show_cpuinfo
+};
+
+static struct cpu cpu_devices[NR_CPUS];
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_present_cpu(i)
+ register_cpu(&cpu_devices[i], i);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
new file mode 100644
index 00000000000..304f675826e
--- /dev/null
+++ b/arch/c6x/kernel/signal.c
@@ -0,0 +1,377 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Do a signal return, undo the signal stack.
+ */
+
+#define RETCODE_SIZE (9 << 2) /* 9 instructions = 36 bytes */
+
+struct rt_sigframe {
+ struct siginfo __user *pinfo;
+ void __user *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long retcode[RETCODE_SIZE >> 2];
+};
+
+static int restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
+{
+ int err = 0;
+
+ /* The access_ok check was done by caller, so use __get_user here */
+#define COPY(x) (err |= __get_user(regs->x, &sc->sc_##x))
+
+ COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+ COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+ COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+ COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+ COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+ COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+ COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+ COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+ COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+ COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+ COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+ COPY(csr); COPY(pc);
+
+#undef COPY
+
+ return err;
+}
+
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+
+ /*
+ * Since we stacked the signal on a dword boundary,
+ * 'sp' should be dword aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 7)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *) ((unsigned long) regs->sp + 8);
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ return regs->a4;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+ err |= __put_user(mask, &sc->sc_mask);
+
+ /* The access_ok check was done by caller, so use __put_user here */
+#define COPY(x) (err |= __put_user(regs->x, &sc->sc_##x))
+
+ COPY(sp); COPY(a4); COPY(b4); COPY(a6); COPY(b6); COPY(a8); COPY(b8);
+ COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(a5); COPY(a7); COPY(a9);
+ COPY(b0); COPY(b1); COPY(b2); COPY(b3); COPY(b5); COPY(b7); COPY(b9);
+
+ COPY(a16); COPY(a17); COPY(a18); COPY(a19);
+ COPY(a20); COPY(a21); COPY(a22); COPY(a23);
+ COPY(a24); COPY(a25); COPY(a26); COPY(a27);
+ COPY(a28); COPY(a29); COPY(a30); COPY(a31);
+ COPY(b16); COPY(b17); COPY(b18); COPY(b19);
+ COPY(b20); COPY(b21); COPY(b22); COPY(b23);
+ COPY(b24); COPY(b25); COPY(b26); COPY(b27);
+ COPY(b28); COPY(b29); COPY(b30); COPY(b31);
+
+ COPY(csr); COPY(pc);
+
+#undef COPY
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = regs->sp;
+
+ /*
+ * This is the X/Open sanctioned signal stack switching.
+ */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(sp) == 0)
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /*
+ * No matter what happens, 'sp' must be dword
+ * aligned. Otherwise, nasty things will happen
+ */
+ return (void __user *)((sp - framesize) & ~7);
+}
+
+static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ unsigned long __user *retcode;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto segv_and_exit;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Clear all the bits of the ucontext we don't use. */
+ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace */
+ retcode = (unsigned long __user *) &frame->retcode;
+
+ /* The access_ok check was done above, so use __put_user here */
+#define COPY(x) (err |= __put_user(x, retcode++))
+
+ COPY(0x0000002AUL | (__NR_rt_sigreturn << 7));
+ /* MVK __NR_rt_sigreturn,B0 */
+ COPY(0x10000000UL); /* SWE */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+ COPY(0x00006000UL); /* NOP 4 */
+
+#undef COPY
+
+ if (err)
+ goto segv_and_exit;
+
+ flush_icache_range((unsigned long) &frame->retcode,
+ (unsigned long) &frame->retcode + RETCODE_SIZE);
+
+ retcode = (unsigned long __user *) &frame->retcode;
+
+ /* Change user context to branch to signal handler */
+ regs->sp = (unsigned long) frame - 8;
+ regs->b3 = (unsigned long) retcode;
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ /* Give the signal number to the handler */
+ regs->a4 = signr;
+
+ /*
+ * For realtime signals we must also set the second and third
+ * arguments for the signal handler.
+ * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
+ */
+ regs->b4 = (unsigned long)&frame->info;
+ regs->a6 = (unsigned long)&frame->uc;
+
+ return 0;
+
+segv_and_exit:
+ force_sigsegv(signr, current);
+ return -EFAULT;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->a4) {
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->a4 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->a4 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+do_restart:
+ regs->a4 = regs->orig_a4;
+ regs->pc -= 4;
+ break;
+ }
+}
+
+/*
+ * handle the actual delivery of a signal to userspace
+ */
+static int handle_signal(int sig,
+ siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs,
+ int syscall)
+{
+ int ret;
+
+ /* Are we from a system call? */
+ if (syscall) {
+ /* If so, check system call restarting.. */
+ switch (regs->a4) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->a4 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->a4 = -EINTR;
+ break;
+ }
+
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->a4 = regs->orig_a4;
+ regs->pc -= 4;
+ }
+ }
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+ if (ret == 0) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ return ret;
+}
+
+/*
+ * handle a potential signal
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ sigset_t *oldset;
+ int signr;
+
+ /* we want the common case to go fast, which is why we may in certain
+ * cases get here from kernel mode */
+ if (!user_mode(regs))
+ return;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ if (handle_signal(signr, &info, &ka, oldset,
+ regs, syscall) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs, 0);
+ }
+
+ return;
+ }
+
+ /* did we come from a system call? */
+ if (syscall) {
+ /* restart the system call - no handlers present */
+ switch (regs->a4) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->a4 = regs->orig_a4;
+ regs->pc -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->a4 = regs->orig_a4;
+ regs->b0 = __NR_restart_syscall;
+ regs->pc -= 4;
+ break;
+ }
+ }
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
+ int syscall)
+{
+ /* deal with pending signal delivery */
+ if (thread_info_flags & ((1 << TIF_SIGPENDING) |
+ (1 << TIF_RESTORE_SIGMASK)))
+ do_signal(regs, syscall);
+
+ if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
new file mode 100644
index 00000000000..dd45bc39af0
--- /dev/null
+++ b/arch/c6x/kernel/soc.c
@@ -0,0 +1,91 @@
+/*
+ * Miscellaneous SoC-specific hooks.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <asm/system.h>
+#include <asm/setup.h>
+#include <asm/soc.h>
+
+struct soc_ops soc_ops;
+
+int soc_get_exception(void)
+{
+ if (!soc_ops.get_exception)
+ return -1;
+ return soc_ops.get_exception();
+}
+
+void soc_assert_event(unsigned int evt)
+{
+ if (soc_ops.assert_event)
+ soc_ops.assert_event(evt);
+}
+
+static u8 cmdline_mac[6];
+
+static int __init get_mac_addr_from_cmdline(char *str)
+{
+ int count, i, val;
+
+ for (count = 0; count < 6 && *str; count++, str += 3) {
+ if (!isxdigit(str[0]) || !isxdigit(str[1]))
+ return 0;
+ if (str[2] != ((count < 5) ? ':' : '\0'))
+ return 0;
+
+ for (i = 0, val = 0; i < 2; i++) {
+ val = val << 4;
+ val |= isdigit(str[i]) ?
+ str[i] - '0' : toupper(str[i]) - 'A' + 10;
+ }
+ cmdline_mac[count] = val;
+ }
+ return 1;
+}
+__setup("emac_addr=", get_mac_addr_from_cmdline);
+
+/*
+ * Setup the MAC address for SoC ethernet devices.
+ *
+ * Before calling this function, the ethernet driver will have
+ * initialized the addr with local-mac-address from the device
+ * tree (if found). Allow command line to override, but not
+ * the fused address.
+ */
+int soc_mac_addr(unsigned int index, u8 *addr)
+{
+ int i, have_dt_mac = 0, have_cmdline_mac = 0, have_fuse_mac = 0;
+
+ for (i = 0; i < 6; i++) {
+ if (cmdline_mac[i])
+ have_cmdline_mac = 1;
+ if (c6x_fuse_mac[i])
+ have_fuse_mac = 1;
+ if (addr[i])
+ have_dt_mac = 1;
+ }
+
+ /* cmdline overrides all */
+ if (have_cmdline_mac)
+ memcpy(addr, cmdline_mac, 6);
+ else if (!have_dt_mac) {
+ if (have_fuse_mac)
+ memcpy(addr, c6x_fuse_mac, 6);
+ else
+ random_ether_addr(addr);
+ }
+
+ /* adjust for specific EMAC device */
+ addr[5] += index * c6x_num_cores;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(soc_mac_addr);
diff --git a/arch/c6x/kernel/switch_to.S b/arch/c6x/kernel/switch_to.S
new file mode 100644
index 00000000000..09177ed0fa5
--- /dev/null
+++ b/arch/c6x/kernel/switch_to.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+#define SP B15
+
+ /*
+ * void __switch_to(struct thread_info *prev,
+ * struct thread_info *next,
+ * struct task_struct *tsk) ;
+ */
+ENTRY(__switch_to)
+ LDDW .D2T2 *+B4(THREAD_B15_14),B7:B6
+ || MV .L2X A4,B5 ; prev
+ || MV .L1X B4,A5 ; next
+ || MVC .S2 RILC,B1
+
+ STW .D2T2 B3,*+B5(THREAD_PC)
+ || STDW .D1T1 A13:A12,*+A4(THREAD_A13_12)
+ || MVC .S2 ILC,B0
+
+ LDW .D2T2 *+B4(THREAD_PC),B3
+ || LDDW .D1T1 *+A5(THREAD_A13_12),A13:A12
+
+ STDW .D1T1 A11:A10,*+A4(THREAD_A11_10)
+ || STDW .D2T2 B1:B0,*+B5(THREAD_RICL_ICL)
+#ifndef __DSBT__
+ || MVKL .S2 current_ksp,B1
+#endif
+
+ STDW .D2T2 B15:B14,*+B5(THREAD_B15_14)
+ || STDW .D1T1 A15:A14,*+A4(THREAD_A15_14)
+#ifndef __DSBT__
+ || MVKH .S2 current_ksp,B1
+#endif
+
+ ;; Switch to next SP
+ MV .S2 B7,SP
+#ifdef __DSBT__
+ || STW .D2T2 B7,*+B14(current_ksp)
+#else
+ || STW .D2T2 B7,*B1
+ || MV .L2 B6,B14
+#endif
+ || LDDW .D1T1 *+A5(THREAD_RICL_ICL),A1:A0
+
+ STDW .D2T2 B11:B10,*+B5(THREAD_B11_10)
+ || LDDW .D1T1 *+A5(THREAD_A15_14),A15:A14
+
+ STDW .D2T2 B13:B12,*+B5(THREAD_B13_12)
+ || LDDW .D1T1 *+A5(THREAD_A11_10),A11:A10
+
+ B .S2 B3 ; return in next E1
+ || LDDW .D2T2 *+B4(THREAD_B13_12),B13:B12
+
+ LDDW .D2T2 *+B4(THREAD_B11_10),B11:B10
+ NOP
+
+ MV .L2X A0,B0
+ || MV .S1 A6,A4
+
+ MVC .S2 B0,ILC
+ || MV .L2X A1,B1
+
+ MVC .S2 B1,RILC
+ENDPROC(__switch_to)
diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c
new file mode 100644
index 00000000000..3e9bdfbee8a
--- /dev/null
+++ b/arch/c6x/kernel/sys_c6x.c
@@ -0,0 +1,74 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/syscalls.h>
+
+#ifdef CONFIG_ACCESS_CHECK
+int _access_ok(unsigned long addr, unsigned long size)
+{
+ if (!size)
+ return 1;
+
+ if (!addr || addr > (0xffffffffUL - (size - 1)))
+ goto _bad_access;
+
+ if (segment_eq(get_fs(), KERNEL_DS))
+ return 1;
+
+ if (memory_start <= addr && (addr + size - 1) < memory_end)
+ return 1;
+
+_bad_access:
+ pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n",
+ current->pid, addr, size);
+ return 0;
+}
+EXPORT_SYMBOL(_access_ok);
+#endif
+
+/* sys_cache_sync -- sync caches over given range */
+asmlinkage int sys_cache_sync(unsigned long s, unsigned long e)
+{
+ L1D_cache_block_writeback_invalidate(s, e);
+ L1P_cache_block_invalidate(s, e);
+
+ return 0;
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/*
+ * Use trampolines
+ */
+#define sys_pread64 sys_pread_c6x
+#define sys_pwrite64 sys_pwrite_c6x
+#define sys_truncate64 sys_truncate64_c6x
+#define sys_ftruncate64 sys_ftruncate64_c6x
+#define sys_fadvise64 sys_fadvise64_c6x
+#define sys_fadvise64_64 sys_fadvise64_64_c6x
+#define sys_fallocate sys_fallocate_c6x
+
+/* Use sys_mmap_pgoff directly */
+#define sys_mmap2 sys_mmap_pgoff
+
+/*
+ * Note that we can't include <linux/unistd.h> here since the header
+ * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
+ */
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
new file mode 100644
index 00000000000..4c9f136165f
--- /dev/null
+++ b/arch/c6x/kernel/time.c
@@ -0,0 +1,65 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+#include <asm/timer64.h>
+
+static u32 sched_clock_multiplier;
+#define SCHED_CLOCK_SHIFT 16
+
+static cycle_t tsc_read(struct clocksource *cs)
+{
+ return get_cycles();
+}
+
+static struct clocksource clocksource_tsc = {
+ .name = "timestamp",
+ .rating = 300,
+ .read = tsc_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * scheduler clock - returns current time in nanoseconds.
+ */
+u64 sched_clock(void)
+{
+ u64 tsc = get_cycles();
+
+ return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+}
+
+void time_init(void)
+{
+ u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+ do_div(tmp, c6x_core_freq);
+ sched_clock_multiplier = tmp;
+
+ clocksource_register_hz(&clocksource_tsc, c6x_core_freq);
+
+ /* write anything into TSCL to enable counting */
+ set_creg(TSCL, 0);
+
+ /* probe for timer64 event timer */
+ timer64_init();
+}
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
new file mode 100644
index 00000000000..f50e3edd6da
--- /dev/null
+++ b/arch/c6x/kernel/traps.c
@@ -0,0 +1,423 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <linux/bug.h>
+
+#include <asm/soc.h>
+#include <asm/traps.h>
+
+int (*c6x_nmi_handler)(struct pt_regs *regs);
+
+void __init trap_init(void)
+{
+ ack_exception(EXCEPT_TYPE_NXF);
+ ack_exception(EXCEPT_TYPE_EXC);
+ ack_exception(EXCEPT_TYPE_IXF);
+ ack_exception(EXCEPT_TYPE_SXF);
+ enable_exception();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ pr_err("\n");
+ pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
+ pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
+ pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
+ pr_err("A1: %08lx B1: %08lx\n", regs->a1, regs->b1);
+ pr_err("A2: %08lx B2: %08lx\n", regs->a2, regs->b2);
+ pr_err("A3: %08lx B3: %08lx\n", regs->a3, regs->b3);
+ pr_err("A4: %08lx B4: %08lx\n", regs->a4, regs->b4);
+ pr_err("A5: %08lx B5: %08lx\n", regs->a5, regs->b5);
+ pr_err("A6: %08lx B6: %08lx\n", regs->a6, regs->b6);
+ pr_err("A7: %08lx B7: %08lx\n", regs->a7, regs->b7);
+ pr_err("A8: %08lx B8: %08lx\n", regs->a8, regs->b8);
+ pr_err("A9: %08lx B9: %08lx\n", regs->a9, regs->b9);
+ pr_err("A10: %08lx B10: %08lx\n", regs->a10, regs->b10);
+ pr_err("A11: %08lx B11: %08lx\n", regs->a11, regs->b11);
+ pr_err("A12: %08lx B12: %08lx\n", regs->a12, regs->b12);
+ pr_err("A13: %08lx B13: %08lx\n", regs->a13, regs->b13);
+ pr_err("A14: %08lx B14: %08lx\n", regs->a14, regs->dp);
+ pr_err("A15: %08lx B15: %08lx\n", regs->a15, regs->sp);
+ pr_err("A16: %08lx B16: %08lx\n", regs->a16, regs->b16);
+ pr_err("A17: %08lx B17: %08lx\n", regs->a17, regs->b17);
+ pr_err("A18: %08lx B18: %08lx\n", regs->a18, regs->b18);
+ pr_err("A19: %08lx B19: %08lx\n", regs->a19, regs->b19);
+ pr_err("A20: %08lx B20: %08lx\n", regs->a20, regs->b20);
+ pr_err("A21: %08lx B21: %08lx\n", regs->a21, regs->b21);
+ pr_err("A22: %08lx B22: %08lx\n", regs->a22, regs->b22);
+ pr_err("A23: %08lx B23: %08lx\n", regs->a23, regs->b23);
+ pr_err("A24: %08lx B24: %08lx\n", regs->a24, regs->b24);
+ pr_err("A25: %08lx B25: %08lx\n", regs->a25, regs->b25);
+ pr_err("A26: %08lx B26: %08lx\n", regs->a26, regs->b26);
+ pr_err("A27: %08lx B27: %08lx\n", regs->a27, regs->b27);
+ pr_err("A28: %08lx B28: %08lx\n", regs->a28, regs->b28);
+ pr_err("A29: %08lx B29: %08lx\n", regs->a29, regs->b29);
+ pr_err("A30: %08lx B30: %08lx\n", regs->a30, regs->b30);
+ pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
+}
+
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_stack(current, &stack);
+}
+EXPORT_SYMBOL(dump_stack);
+
+
+void die(char *str, struct pt_regs *fp, int nr)
+{
+ console_verbose();
+ pr_err("%s: %08x\n", str, nr);
+ show_regs(fp);
+
+ pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, (PAGE_SIZE +
+ (unsigned long) current));
+
+ dump_stack();
+ while (1)
+ ;
+}
+
+static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
+{
+ if (user_mode(fp))
+ return;
+
+ die(str, fp, nr);
+}
+
+
+/* Internal exceptions */
+static struct exception_info iexcept_table[10] = {
+ { "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
+ { "Oops - fetch packet", SIGBUS, BUS_ADRERR },
+ { "Oops - execute packet", SIGILL, ILL_ILLOPC },
+ { "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
+ { "Oops - resource conflict", SIGILL, ILL_ILLOPC },
+ { "Oops - resource access", SIGILL, ILL_PRVREG },
+ { "Oops - privilege", SIGILL, ILL_PRVOPC },
+ { "Oops - loops buffer", SIGILL, ILL_ILLOPC },
+ { "Oops - software exception", SIGILL, ILL_ILLTRP },
+ { "Oops - unknown exception", SIGILL, ILL_ILLOPC }
+};
+
+/* External exceptions */
+static struct exception_info eexcept_table[128] = {
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - external exception", SIGBUS, BUS_ADRERR },
+ { "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+ { "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+ { "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
+ { "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+ { "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
+ { "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+ { "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
+ { "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
+ { "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
+};
+
+static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ siginfo_t info;
+
+ if (except_info->code != TRAP_BRKPT)
+ pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
+ except_info->kernel_str, regs->pc,
+ except_info->signo, except_info->code);
+
+ die_if_kernel(except_info->kernel_str, regs, addr);
+
+ info.si_signo = except_info->signo;
+ info.si_errno = 0;
+ info.si_code = except_info->code;
+ info.si_addr = (void __user *)addr;
+
+ force_sig_info(except_info->signo, &info, current);
+}
+
+/*
+ * Process an internal exception (non maskable)
+ */
+static int process_iexcept(struct pt_regs *regs)
+{
+ unsigned int iexcept_report = get_iexcept();
+ unsigned int iexcept_num;
+
+ ack_exception(EXCEPT_TYPE_IXF);
+
+ pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
+
+ while (iexcept_report) {
+ iexcept_num = __ffs(iexcept_report);
+ iexcept_report &= ~(1 << iexcept_num);
+ set_iexcept(iexcept_report);
+ if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
+ /* This is a breakpoint */
+ struct exception_info bkpt_exception = {
+ "Oops - undefined instruction",
+ SIGTRAP, TRAP_BRKPT
+ };
+ do_trap(&bkpt_exception, regs);
+ iexcept_report &= ~(0xFF);
+ set_iexcept(iexcept_report);
+ continue;
+ }
+
+ do_trap(&iexcept_table[iexcept_num], regs);
+ }
+ return 0;
+}
+
+/*
+ * Process an external exception (maskable)
+ */
+static void process_eexcept(struct pt_regs *regs)
+{
+ int evt;
+
+ pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
+
+ while ((evt = soc_get_exception()) >= 0)
+ do_trap(&eexcept_table[evt], regs);
+
+ ack_exception(EXCEPT_TYPE_EXC);
+}
+
+/*
+ * Main exception processing
+ */
+asmlinkage int process_exception(struct pt_regs *regs)
+{
+ unsigned int type;
+ unsigned int type_num;
+ unsigned int ie_num = 9; /* default is unknown exception */
+
+ while ((type = get_except_type()) != 0) {
+ type_num = fls(type) - 1;
+
+ switch (type_num) {
+ case EXCEPT_TYPE_NXF:
+ ack_exception(EXCEPT_TYPE_NXF);
+ if (c6x_nmi_handler)
+ (c6x_nmi_handler)(regs);
+ else
+ pr_alert("NMI interrupt!\n");
+ break;
+
+ case EXCEPT_TYPE_IXF:
+ if (process_iexcept(regs))
+ return 1;
+ break;
+
+ case EXCEPT_TYPE_EXC:
+ process_eexcept(regs);
+ break;
+
+ case EXCEPT_TYPE_SXF:
+ ie_num = 8;
+ default:
+ ack_exception(type_num);
+ do_trap(&iexcept_table[ie_num], regs);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int kstack_depth_to_print = 48;
+
+static void show_trace(unsigned long *stack, unsigned long *endstack)
+{
+ unsigned long addr;
+ int i;
+
+ pr_debug("Call trace:");
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+ if (i % 5 == 0)
+ pr_debug("\n ");
+#endif
+ pr_debug(" [<%08lx>]", addr);
+ print_symbol(" %s\n", addr);
+ i++;
+ }
+ }
+ pr_debug("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *p, *endstack;
+ int i;
+
+ if (!stack) {
+ if (task && task != current)
+ /* We know this is a kernel stack,
+ so this is the start/end */
+ stack = (unsigned long *)thread_saved_ksp(task);
+ else
+ stack = (unsigned long *)&stack;
+ }
+ endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
+ & -THREAD_SIZE);
+
+ pr_debug("Stack from %08lx:", (unsigned long)stack);
+ for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
+ if (p + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ pr_cont("\n ");
+ pr_cont(" %08lx", *p++);
+ }
+ pr_cont("\n");
+ show_trace(stack, endstack);
+}
+
+int is_valid_bugaddr(unsigned long addr)
+{
+ return __kernel_text_address(addr);
+}
diff --git a/arch/c6x/kernel/vectors.S b/arch/c6x/kernel/vectors.S
new file mode 100644
index 00000000000..c95c66fc71e
--- /dev/null
+++ b/arch/c6x/kernel/vectors.S
@@ -0,0 +1,81 @@
+;
+; Port on Texas Instruments TMS320C6x architecture
+;
+; Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+; This program is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License version 2 as
+; published by the Free Software Foundation.
+;
+; This section handles all the interrupt vector routines.
+; At RESET the processor sets up the DRAM timing parameters and
+; branches to the label _c_int00 which handles initialization for the C code.
+;
+
+#define ALIGNMENT 5
+
+ .macro IRQVEC name, handler
+ .align ALIGNMENT
+ .hidden \name
+ .global \name
+\name:
+#ifdef CONFIG_C6X_BIG_KERNEL
+ STW .D2T1 A0,*B15--[2]
+ || MVKL .S1 \handler,A0
+ MVKH .S1 \handler,A0
+ B .S2X A0
+ LDW .D2T1 *++B15[2],A0
+ NOP 4
+ NOP
+ NOP
+ .endm
+#else /* CONFIG_C6X_BIG_KERNEL */
+ B .S2 \handler
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ .endm
+#endif /* CONFIG_C6X_BIG_KERNEL */
+
+ .sect ".vectors","ax"
+ .align ALIGNMENT
+ .global RESET
+ .hidden RESET
+RESET:
+#ifdef CONFIG_C6X_BIG_KERNEL
+ MVKL .S1 _c_int00,A0 ; branch to _c_int00
+ MVKH .S1 _c_int00,A0
+ B .S2X A0
+#else
+ B .S2 _c_int00
+ NOP
+ NOP
+#endif
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+
+
+ IRQVEC NMI,_nmi_handler ; NMI interrupt
+ IRQVEC AINT,_bad_interrupt ; reserved
+ IRQVEC MSGINT,_bad_interrupt ; reserved
+
+ IRQVEC INT4,_int4_handler
+ IRQVEC INT5,_int5_handler
+ IRQVEC INT6,_int6_handler
+ IRQVEC INT7,_int7_handler
+ IRQVEC INT8,_int8_handler
+ IRQVEC INT9,_int9_handler
+ IRQVEC INT10,_int10_handler
+ IRQVEC INT11,_int11_handler
+ IRQVEC INT12,_int12_handler
+ IRQVEC INT13,_int13_handler
+ IRQVEC INT14,_int14_handler
+ IRQVEC INT15,_int15_handler
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..1d81c4c129e
--- /dev/null
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -0,0 +1,162 @@
+/*
+ * ld script for the c6x kernel
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ * Mark Salter <msalter@redhat.com>
+ */
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+ENTRY(_c_int00)
+
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+#define READONLY_SEGMENT_START \
+ . = PAGE_OFFSET;
+#define READWRITE_SEGMENT_START \
+ . = ALIGN(128); \
+ _data_lma = .;
+
+SECTIONS
+{
+ /*
+ * Start kernel read only segment
+ */
+ READONLY_SEGMENT_START
+
+ .vectors :
+ {
+ _vectors_start = .;
+ *(.vectors)
+ . = ALIGN(0x400);
+ _vectors_end = .;
+ }
+
+ . = ALIGN(0x1000);
+ .cmdline :
+ {
+ *(.cmdline)
+ }
+
+ /*
+ * This section contains data which may be shared with other
+ * cores. It needs to be a fixed offset from PAGE_OFFSET
+ * regardless of kernel configuration.
+ */
+ .virtio_ipc_dev :
+ {
+ *(.virtio_ipc_dev)
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ .init :
+ {
+ _stext = .;
+ _sinittext = .;
+ HEAD_TEXT
+ INIT_TEXT
+ _einittext = .;
+ }
+
+ __init_begin = _stext;
+ INIT_DATA_SECTION(16)
+
+ PERCPU_SECTION(128)
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text :
+ {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ IRQENTRY_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+
+ EXCEPTION_TABLE(16)
+ NOTES
+
+ RO_DATA_SECTION(PAGE_SIZE)
+ .const :
+ {
+ *(.const .const.* .gnu.linkonce.r.*)
+ *(.switch)
+ }
+
+ . = ALIGN (8) ;
+ __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
+ {
+ _fdt_start = . ; /* place for fdt blob */
+ *(__fdt_blob) ; /* Any link-placed DTB */
+ BYTE(0); /* section always has contents */
+ . = _fdt_start + 0x4000; /* Pad up to 16kbyte */
+ _fdt_end = . ;
+ }
+
+ _etext = .;
+
+ /*
+ * Start kernel read-write segment.
+ */
+ READWRITE_SEGMENT_START
+ _sdata = .;
+
+ .fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
+ {
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(128)
+ READ_MOSTLY_DATA(128)
+ DATA_DATA
+ CONSTRUCTORS
+ *(.data1)
+ *(.fardata .fardata.*)
+ *(.data.debug_bpt)
+ }
+
+ .neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
+ {
+ *(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
+ *(.neardata .neardata.* .gnu.linkonce.s.*)
+ . = ALIGN(8);
+ }
+
+ _edata = .;
+
+ __bss_start = .;
+ SBSS(8)
+ BSS(8)
+ .far :
+ {
+ . = ALIGN(8);
+ *(.dynfar)
+ *(.far .far.* .gnu.linkonce.b.*)
+ . = ALIGN(8);
+ }
+ __bss_stop = .;
+
+ _end = .;
+
+ DWARF_DEBUG
+
+ /DISCARD/ :
+ {
+ EXIT_TEXT
+ EXIT_DATA
+ EXIT_CALL
+ *(.discard)
+ *(.discard.*)
+ *(.interp)
+ }
+}
diff --git a/arch/c6x/lib/Makefile b/arch/c6x/lib/Makefile
new file mode 100644
index 00000000000..ffd3c659091
--- /dev/null
+++ b/arch/c6x/lib/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for arch/c6x/lib/
+#
+
+lib-y := divu.o divi.o pop_rts.o push_rts.o remi.o remu.o strasgi.o llshru.o
+lib-y += llshr.o llshl.o negll.o mpyll.o divremi.o divremu.o
+lib-y += checksum.o csum_64plus.o memcpy_64plus.o strasgi_64plus.o
diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c
new file mode 100644
index 00000000000..67cc93b0b93
--- /dev/null
+++ b/arch/c6x/lib/checksum.c
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <net/checksum.h>
+
+#include <asm/byteorder.h>
+
+/*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *csum_err)
+{
+ int missing;
+
+ missing = __copy_from_user(dst, src, len);
+ if (missing) {
+ memset(dst + len - missing, 0, missing);
+ *csum_err = -EFAULT;
+ } else
+ *csum_err = 0;
+
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+/* These are from csum_64plus.S */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(ip_compute_csum);
+EXPORT_SYMBOL(ip_fast_csum);
diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S
new file mode 100644
index 00000000000..6d258964722
--- /dev/null
+++ b/arch/c6x/lib/csum_64plus.S
@@ -0,0 +1,419 @@
+;
+; linux/arch/c6x/lib/csum_64plus.s
+;
+; Port on Texas Instruments TMS320C6x architecture
+;
+; Copyright (C) 2006, 2009, 2010, 2011 Texas Instruments Incorporated
+; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+; This program is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License version 2 as
+; published by the Free Software Foundation.
+;
+#include <linux/linkage.h>
+
+;
+;unsigned int csum_partial_copy(const char *src, char * dst,
+; int len, int sum)
+;
+; A4: src
+; B4: dst
+; A6: len
+; B6: sum
+; return csum in A4
+;
+
+ .text
+ENTRY(csum_partial_copy)
+ MVC .S2 ILC,B30
+
+ MV .D1X B6,A31 ; given csum
+ ZERO .D1 A9 ; csum (a side)
+|| ZERO .D2 B9 ; csum (b side)
+|| SHRU .S2X A6,2,B5 ; len / 4
+
+ ;; Check alignment and size
+ AND .S1 3,A4,A1
+|| AND .S2 3,B4,B0
+ OR .L2X B0,A1,B0 ; non aligned condition
+|| MVC .S2 B5,ILC
+|| MVK .D2 1,B2
+|| MV .D1X B5,A1 ; words condition
+ [!A1] B .S1 L8
+ [B0] BNOP .S1 L6,5
+
+ SPLOOP 1
+
+ ;; Main loop for aligned words
+ LDW .D1T1 *A4++,A7
+ NOP 4
+ MV .S2X A7,B7
+|| EXTU .S1 A7,0,16,A16
+ STW .D2T2 B7,*B4++
+|| MPYU .M2 B7,B2,B8
+|| ADD .L1 A16,A9,A9
+ NOP
+ SPKERNEL 8,0
+|| ADD .L2 B8,B9,B9
+
+ ZERO .D1 A1
+|| ADD .L1X A9,B9,A9 ; add csum from a and b sides
+
+L6:
+ [!A1] BNOP .S1 L8,5
+
+ ;; Main loop for non-aligned words
+ SPLOOP 2
+ || MVK .L1 1,A2
+
+ LDNW .D1T1 *A4++,A7
+ NOP 3
+
+ NOP
+ MV .S2X A7,B7
+ || EXTU .S1 A7,0,16,A16
+ || MPYU .M1 A7,A2,A8
+
+ ADD .L1 A16,A9,A9
+ SPKERNEL 6,0
+ || STNW .D2T2 B7,*B4++
+ || ADD .L1 A8,A9,A9
+
+L8: AND .S2X 2,A6,B5
+ CMPGT .L2 B5,0,B0
+ [!B0] BNOP .S1 L82,4
+
+ ;; Manage half-word
+ ZERO .L1 A7
+|| ZERO .D1 A8
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+ LDBU .D1T1 *A4++,A7
+ LDBU .D1T1 *A4++,A8
+ NOP 3
+ SHL .S1 A7,8,A0
+ ADD .S1 A8,A9,A9
+ STB .D2T1 A7,*B4++
+|| ADD .S1 A0,A9,A9
+ STB .D2T1 A8,*B4++
+
+#else
+
+ LDBU .D1T1 *A4++,A7
+ LDBU .D1T1 *A4++,A8
+ NOP 3
+ ADD .S1 A7,A9,A9
+ SHL .S1 A8,8,A0
+
+ STB .D2T1 A7,*B4++
+|| ADD .S1 A0,A9,A9
+ STB .D2T1 A8,*B4++
+
+#endif
+
+ ;; Manage eventually the last byte
+L82: AND .S2X 1,A6,B0
+ [!B0] BNOP .S1 L9,5
+
+|| ZERO .L1 A7
+
+L83: LDBU .D1T1 *A4++,A7
+ NOP 4
+
+ MV .L2X A7,B7
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+
+ STB .D2T2 B7,*B4++
+|| SHL .S1 A7,8,A7
+ ADD .S1 A7,A9,A9
+
+#else
+
+ STB .D2T2 B7,*B4++
+|| ADD .S1 A7,A9,A9
+
+#endif
+
+ ;; Fold the csum
+L9: SHRU .S2X A9,16,B0
+ [!B0] BNOP .S1 L10,5
+
+L91: SHRU .S2X A9,16,B4
+|| EXTU .S1 A9,16,16,A3
+ ADD .D1X A3,B4,A9
+
+ SHRU .S1 A9,16,A0
+ [A0] BNOP .S1 L91,5
+
+L10: ADD .D1 A31,A9,A9
+ MV .D1 A9,A4
+
+ BNOP .S2 B3,4
+ MVC .S2 B30,ILC
+ENDPROC(csum_partial_copy)
+
+;
+;unsigned short
+;ip_fast_csum(unsigned char *iph, unsigned int ihl)
+;{
+; unsigned int checksum = 0;
+; unsigned short *tosum = (unsigned short *) iph;
+; int len;
+;
+; len = ihl*4;
+;
+; if (len <= 0)
+; return 0;
+;
+; while(len) {
+; len -= 2;
+; checksum += *tosum++;
+; }
+; if (len & 1)
+; checksum += *(unsigned char*) tosum;
+;
+; while(checksum >> 16)
+; checksum = (checksum & 0xffff) + (checksum >> 16);
+;
+; return ~checksum;
+;}
+;
+; A4: iph
+; B4: ihl
+; return checksum in A4
+;
+ .text
+
+ENTRY(ip_fast_csum)
+ ZERO .D1 A5
+ || MVC .S2 ILC,B30
+ SHL .S2 B4,2,B0
+ CMPGT .L2 B0,0,B1
+ [!B1] BNOP .S1 L15,4
+ [!B1] ZERO .D1 A3
+
+ [!B0] B .S1 L12
+ SHRU .S2 B0,1,B0
+ MVC .S2 B0,ILC
+ NOP 3
+
+ SPLOOP 1
+ LDHU .D1T1 *A4++,A3
+ NOP 3
+ NOP
+ SPKERNEL 5,0
+ || ADD .L1 A3,A5,A5
+
+L12: SHRU .S1 A5,16,A0
+ [!A0] BNOP .S1 L14,5
+
+L13: SHRU .S2X A5,16,B4
+ EXTU .S1 A5,16,16,A3
+ ADD .D1X A3,B4,A5
+ SHRU .S1 A5,16,A0
+ [A0] BNOP .S1 L13,5
+
+L14: NOT .D1 A5,A3
+ EXTU .S1 A3,16,16,A3
+
+L15: BNOP .S2 B3,3
+ MVC .S2 B30,ILC
+ MV .D1 A3,A4
+ENDPROC(ip_fast_csum)
+
+;
+;unsigned short
+;do_csum(unsigned char *buff, unsigned int len)
+;{
+; int odd, count;
+; unsigned int result = 0;
+;
+; if (len <= 0)
+; goto out;
+; odd = 1 & (unsigned long) buff;
+; if (odd) {
+;#ifdef __LITTLE_ENDIAN
+; result += (*buff << 8);
+;#else
+; result = *buff;
+;#endif
+; len--;
+; buff++;
+; }
+; count = len >> 1; /* nr of 16-bit words.. */
+; if (count) {
+; if (2 & (unsigned long) buff) {
+; result += *(unsigned short *) buff;
+; count--;
+; len -= 2;
+; buff += 2;
+; }
+; count >>= 1; /* nr of 32-bit words.. */
+; if (count) {
+; unsigned int carry = 0;
+; do {
+; unsigned int w = *(unsigned int *) buff;
+; count--;
+; buff += 4;
+; result += carry;
+; result += w;
+; carry = (w > result);
+; } while (count);
+; result += carry;
+; result = (result & 0xffff) + (result >> 16);
+; }
+; if (len & 2) {
+; result += *(unsigned short *) buff;
+; buff += 2;
+; }
+; }
+; if (len & 1)
+;#ifdef __LITTLE_ENDIAN
+; result += *buff;
+;#else
+; result += (*buff << 8);
+;#endif
+; result = (result & 0xffff) + (result >> 16);
+; /* add up carry.. */
+; result = (result & 0xffff) + (result >> 16);
+; if (odd)
+; result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+;out:
+; return result;
+;}
+;
+; A4: buff
+; B4: len
+; return checksum in A4
+;
+
+ENTRY(do_csum)
+ CMPGT .L2 B4,0,B0
+ [!B0] BNOP .S1 L26,3
+ EXTU .S1 A4,31,31,A0
+
+ MV .L1 A0,A3
+|| MV .S1X B3,A5
+|| MV .L2 B4,B3
+|| ZERO .D1 A1
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ [A0] SUB .L2 B3,1,B3
+|| [A0] LDBU .D1T1 *A4++,A1
+#else
+ [!A0] BNOP .S1 L21,5
+|| [A0] LDBU .D1T1 *A4++,A0
+ SUB .L2 B3,1,B3
+|| SHL .S1 A0,8,A1
+L21:
+#endif
+ SHR .S2 B3,1,B0
+ [!B0] BNOP .S1 L24,3
+ MVK .L1 2,A0
+ AND .L1 A4,A0,A0
+
+ [!A0] BNOP .S1 L22,5
+|| [A0] LDHU .D1T1 *A4++,A0
+ SUB .L2 B0,1,B0
+|| SUB .S2 B3,2,B3
+|| ADD .L1 A0,A1,A1
+L22:
+ SHR .S2 B0,1,B0
+|| ZERO .L1 A0
+
+ [!B0] BNOP .S1 L23,5
+|| [B0] MVC .S2 B0,ILC
+
+ SPLOOP 3
+ SPMASK L1
+|| MV .L1 A1,A2
+|| LDW .D1T1 *A4++,A1
+
+ NOP 4
+ ADD .L1 A0,A1,A0
+ ADD .L1 A2,A0,A2
+
+ SPKERNEL 1,2
+|| CMPGTU .L1 A1,A2,A0
+
+ ADD .L1 A0,A2,A6
+ EXTU .S1 A6,16,16,A7
+ SHRU .S2X A6,16,B0
+ NOP 1
+ ADD .L1X A7,B0,A1
+L23:
+ MVK .L2 2,B0
+ AND .L2 B3,B0,B0
+ [B0] LDHU .D1T1 *A4++,A0
+ NOP 4
+ [B0] ADD .L1 A0,A1,A1
+L24:
+ EXTU .S2 B3,31,31,B0
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ [!B0] BNOP .S1 L25,4
+|| [B0] LDBU .D1T1 *A4,A0
+ SHL .S1 A0,8,A0
+ ADD .L1 A0,A1,A1
+L25:
+#else
+ [B0] LDBU .D1T1 *A4,A0
+ NOP 4
+ [B0] ADD .L1 A0,A1,A1
+#endif
+ EXTU .S1 A1,16,16,A0
+ SHRU .S2X A1,16,B0
+ NOP 1
+ ADD .L1X A0,B0,A0
+ SHRU .S1 A0,16,A1
+ ADD .L1 A0,A1,A0
+ EXTU .S1 A0,16,16,A1
+ EXTU .S1 A1,16,24,A2
+
+ EXTU .S1 A1,24,16,A0
+|| MV .L2X A3,B0
+
+ [B0] OR .L1 A0,A2,A1
+L26:
+ NOP 1
+ BNOP .S2X A5,4
+ MV .L1 A1,A4
+ENDPROC(do_csum)
+
+;__wsum csum_partial(const void *buff, int len, __wsum wsum)
+;{
+; unsigned int sum = (__force unsigned int)wsum;
+; unsigned int result = do_csum(buff, len);
+;
+; /* add in old sum, and carry.. */
+; result += sum;
+; if (sum > result)
+; result += 1;
+; return (__force __wsum)result;
+;}
+;
+ENTRY(csum_partial)
+ MV .L1X B3,A9
+|| CALLP .S2 do_csum,B3
+|| MV .S1 A6,A8
+ BNOP .S2X A9,2
+ ADD .L1 A8,A4,A1
+ CMPGTU .L1 A8,A1,A0
+ ADD .L1 A1,A0,A4
+ENDPROC(csum_partial)
+
+;unsigned short
+;ip_compute_csum(unsigned char *buff, unsigned int len)
+;
+; A4: buff
+; B4: len
+; return checksum in A4
+
+ENTRY(ip_compute_csum)
+ MV .L1X B3,A9
+|| CALLP .S2 do_csum,B3
+ BNOP .S2X A9,3
+ NOT .S1 A4,A4
+ CLR .S1 A4,16,31,A4
+ENDPROC(ip_compute_csum)
diff --git a/arch/c6x/lib/divi.S b/arch/c6x/lib/divi.S
new file mode 100644
index 00000000000..4bde924f2a9
--- /dev/null
+++ b/arch/c6x/lib/divi.S
@@ -0,0 +1,53 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+ .text
+ENTRY(__c6xabi_divi)
+ call .s2 __c6xabi_divu
+|| mv .d2 B3, B5
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B1
+
+ [A1] neg .l1 A4, A4
+|| [B1] neg .l2 B4, B4
+|| xor .s1x A1, B1, A1
+ [A1] addkpc .s2 _divu_ret, B3, 4
+_divu_ret:
+ neg .l1 A4, A4
+|| mv .l2 B3,B5
+|| ret .s2 B5
+ nop 5
+ENDPROC(__c6xabi_divi)
diff --git a/arch/c6x/lib/divremi.S b/arch/c6x/lib/divremi.S
new file mode 100644
index 00000000000..64bc5aa95ad
--- /dev/null
+++ b/arch/c6x/lib/divremi.S
@@ -0,0 +1,46 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_divremi)
+ stw .d2t2 B3, *B15--[2]
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B2
+|| mv .s1 A4, A5
+|| call .s2 __c6xabi_divu
+
+ [A1] neg .l1 A4, A4
+|| [B2] neg .l2 B4, B4
+|| xor .s2x B2, A1, B0
+|| mv .d2 B4, B2
+
+ [B0] addkpc .s2 _divu_ret_1, B3, 1
+ [!B0] addkpc .s2 _divu_ret_2, B3, 1
+ nop 2
+_divu_ret_1:
+ neg .l1 A4, A4
+_divu_ret_2:
+ ldw .d2t2 *++B15[2], B3
+
+ mpy32 .m1x A4, B2, A6
+ nop 3
+ ret .s2 B3
+ sub .l1 A5, A6, A5
+ nop 4
+ENDPROC(__c6xabi_divremi)
diff --git a/arch/c6x/lib/divremu.S b/arch/c6x/lib/divremu.S
new file mode 100644
index 00000000000..caa9f23ee16
--- /dev/null
+++ b/arch/c6x/lib/divremu.S
@@ -0,0 +1,87 @@
+;; Copyright 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_divremu)
+ ;; We use a series of up to 31 subc instructions. First, we find
+ ;; out how many leading zero bits there are in the divisor. This
+ ;; gives us both a shift count for aligning (shifting) the divisor
+ ;; to the, and the number of times we have to execute subc.
+
+ ;; At the end, we have both the remainder and most of the quotient
+ ;; in A4. The top bit of the quotient is computed first and is
+ ;; placed in A2.
+
+ ;; Return immediately if the dividend is zero. Setting B4 to 1
+ ;; is a trick to allow us to leave the following insns in the jump
+ ;; delay slot without affecting the result.
+ mv .s2x A4, B1
+
+ [b1] lmbd .l2 1, B4, B1
+||[!b1] b .s2 B3 ; RETURN A
+||[!b1] mvk .d2 1, B4
+
+||[!b1] zero .s1 A5
+ mv .l1x B1, A6
+|| shl .s2 B4, B1, B4
+
+ ;; The loop performs a maximum of 28 steps, so we do the
+ ;; first 3 here.
+ cmpltu .l1x A4, B4, A2
+ [!A2] sub .l1x A4, B4, A4
+|| shru .s2 B4, 1, B4
+|| xor .s1 1, A2, A2
+
+ shl .s1 A2, 31, A2
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+
+ ;; RETURN A may happen here (note: must happen before the next branch)
+__divremu0:
+ cmpgt .l2 B1, 7, B0
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+|| [b0] b .s1 __divremu0
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+|| mvk .s1 32, A1
+ sub .l1 A1, A6, A6
+|| extu .s1 A4, A6, A5
+ shl .s1 A4, A6, A4
+ shru .s1 A4, 1, A4
+|| sub .l1 A6, 1, A6
+ or .l1 A2, A4, A4
+ shru .s1 A4, A6, A4
+ nop
+ENDPROC(__c6xabi_divremu)
diff --git a/arch/c6x/lib/divu.S b/arch/c6x/lib/divu.S
new file mode 100644
index 00000000000..64af3c006dd
--- /dev/null
+++ b/arch/c6x/lib/divu.S
@@ -0,0 +1,98 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+ .text
+ENTRY(__c6xabi_divu)
+ ;; We use a series of up to 31 subc instructions. First, we find
+ ;; out how many leading zero bits there are in the divisor. This
+ ;; gives us both a shift count for aligning (shifting) the divisor
+ ;; to the, and the number of times we have to execute subc.
+
+ ;; At the end, we have both the remainder and most of the quotient
+ ;; in A4. The top bit of the quotient is computed first and is
+ ;; placed in A2.
+
+ ;; Return immediately if the dividend is zero.
+ mv .s2x A4, B1
+ [B1] lmbd .l2 1, B4, B1
+|| [!B1] b .s2 B3 ; RETURN A
+|| [!B1] mvk .d2 1, B4
+ mv .l1x B1, A6
+|| shl .s2 B4, B1, B4
+
+ ;; The loop performs a maximum of 28 steps, so we do the
+ ;; first 3 here.
+ cmpltu .l1x A4, B4, A2
+ [!A2] sub .l1x A4, B4, A4
+|| shru .s2 B4, 1, B4
+|| xor .s1 1, A2, A2
+
+ shl .s1 A2, 31, A2
+|| [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+
+ ;; RETURN A may happen here (note: must happen before the next branch)
+_divu_loop:
+ cmpgt .l2 B1, 7, B0
+|| [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+|| [B0] b .s1 _divu_loop
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+|| mvk .s1 32, A1
+ sub .l1 A1, A6, A6
+ shl .s1 A4, A6, A4
+ shru .s1 A4, 1, A4
+|| sub .l1 A6, 1, A6
+ or .l1 A2, A4, A4
+ shru .s1 A4, A6, A4
+ nop
+ENDPROC(__c6xabi_divu)
diff --git a/arch/c6x/lib/llshl.S b/arch/c6x/lib/llshl.S
new file mode 100644
index 00000000000..7b105e2d1b7
--- /dev/null
+++ b/arch/c6x/lib/llshl.S
@@ -0,0 +1,37 @@
+;; Copyright (C) 2010 Texas Instruments Incorporated
+;; Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; uint64_t __c6xabi_llshl(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_llshl)
+ mv .l1x B4,A1
+ [!A1] b .s2 B3 ; just return if zero shift
+ mvk .s1 32,A0
+ sub .d1 A0,A1,A0
+ cmplt .l1 0,A0,A2
+ [A2] shru .s1 A4,A0,A0
+ [!A2] neg .l1 A0,A5
+|| [A2] shl .s1 A5,A1,A5
+ [!A2] shl .s1 A4,A5,A5
+|| [A2] or .d1 A5,A0,A5
+|| [!A2] mvk .l1 0,A4
+ [A2] shl .s1 A4,A1,A4
+ bnop .s2 B3,5
+ENDPROC(__c6xabi_llshl)
diff --git a/arch/c6x/lib/llshr.S b/arch/c6x/lib/llshr.S
new file mode 100644
index 00000000000..fde1bec7cf5
--- /dev/null
+++ b/arch/c6x/lib/llshr.S
@@ -0,0 +1,38 @@
+;; Copyright (C) 2010 Texas Instruments Incorporated
+;; Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; uint64_t __c6xabi_llshr(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_llshr)
+ mv .l1x B4,A1
+ [!A1] b .s2 B3 ; return if zero shift count
+ mvk .s1 32,A0
+ sub .d1 A0,A1,A0
+ cmplt .l1 0,A0,A2
+ [A2] shl .s1 A5,A0,A0
+ nop
+ [!A2] neg .l1 A0,A4
+|| [A2] shru .s1 A4,A1,A4
+ [!A2] shr .s1 A5,A4,A4
+|| [A2] or .d1 A4,A0,A4
+ [!A2] shr .s1 A5,0x1f,A5
+ [A2] shr .s1 A5,A1,A5
+ bnop .s2 B3,5
+ENDPROC(__c6xabi_llshr)
diff --git a/arch/c6x/lib/llshru.S b/arch/c6x/lib/llshru.S
new file mode 100644
index 00000000000..596ae3ff5c0
--- /dev/null
+++ b/arch/c6x/lib/llshru.S
@@ -0,0 +1,38 @@
+;; Copyright (C) 2010 Texas Instruments Incorporated
+;; Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; uint64_t __c6xabi_llshru(uint64_t val, uint shift)
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_llshru)
+ mv .l1x B4,A1
+ [!A1] b .s2 B3 ; return if zero shift count
+ mvk .s1 32,A0
+ sub .d1 A0,A1,A0
+ cmplt .l1 0,A0,A2
+ [A2] shl .s1 A5,A0,A0
+ nop
+ [!A2] neg .l1 A0,A4
+|| [A2] shru .s1 A4,A1,A4
+ [!A2] shru .s1 A5,A4,A4
+|| [A2] or .d1 A4,A0,A4
+|| [!A2] mvk .l1 0,A5
+ [A2] shru .s1 A5,A1,A5
+ bnop .s2 B3,5
+ENDPROC(__c6xabi_llshru)
diff --git a/arch/c6x/lib/memcpy_64plus.S b/arch/c6x/lib/memcpy_64plus.S
new file mode 100644
index 00000000000..0bbc2cbf931
--- /dev/null
+++ b/arch/c6x/lib/memcpy_64plus.S
@@ -0,0 +1,46 @@
+; Port on Texas Instruments TMS320C6x architecture
+;
+; Copyright (C) 2006, 2009, 2010 Texas Instruments Incorporated
+; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+;
+; This program is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License version 2 as
+; published by the Free Software Foundation.
+;
+
+#include <linux/linkage.h>
+
+ .text
+
+ENTRY(memcpy)
+ AND .L1 0x1,A6,A0
+ || AND .S1 0x2,A6,A1
+ || AND .L2X 0x4,A6,B0
+ || MV .D1 A4,A3
+ || MVC .S2 ILC,B2
+
+ [A0] LDB .D2T1 *B4++,A5
+ [A1] LDB .D2T1 *B4++,A7
+ [A1] LDB .D2T1 *B4++,A8
+ [B0] LDNW .D2T1 *B4++,A9
+ || SHRU .S2X A6,0x3,B1
+ [!B1] BNOP .S2 B3,1
+
+ [A0] STB .D1T1 A5,*A3++
+ ||[B1] MVC .S2 B1,ILC
+ [A1] STB .D1T1 A7,*A3++
+ [A1] STB .D1T1 A8,*A3++
+ [B0] STNW .D1T1 A9,*A3++ ; return when len < 8
+
+ SPLOOP 2
+
+ LDNDW .D2T1 *B4++,A9:A8
+ NOP 3
+
+ NOP
+ SPKERNEL 0,0
+ || STNDW .D1T1 A9:A8,*A3++
+
+ BNOP .S2 B3,4
+ MVC .S2 B2,ILC
+ENDPROC(memcpy)
diff --git a/arch/c6x/lib/mpyll.S b/arch/c6x/lib/mpyll.S
new file mode 100644
index 00000000000..f1034418b4d
--- /dev/null
+++ b/arch/c6x/lib/mpyll.S
@@ -0,0 +1,49 @@
+;; Copyright (C) 2010 Texas Instruments Incorporated
+;; Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ ;; uint64_t __c6xabi_mpyll(uint64_t x, uint64_t y)
+ ;;
+ ;; 64x64 multiply
+ ;; First compute partial results using 32-bit parts of x and y:
+ ;;
+ ;; b63 b32 b31 b0
+ ;; -----------------------------
+ ;; | 1 | 0 |
+ ;; -----------------------------
+ ;;
+ ;; P0 = X0*Y0
+ ;; P1 = X0*Y1 + X1*Y0
+ ;; P2 = X1*Y1
+ ;;
+ ;; result = (P2 << 64) + (P1 << 32) + P0
+ ;;
+ ;; Since the result is also 64-bit, we can skip the P2 term.
+
+ .text
+ENTRY(__c6xabi_mpyll)
+ mpy32u .m1x A4,B4,A1:A0 ; X0*Y0
+ b .s2 B3
+ || mpy32u .m2x B5,A4,B1:B0 ; X0*Y1 (don't need upper 32-bits)
+ || mpy32u .m1x A5,B4,A3:A2 ; X1*Y0 (don't need upper 32-bits)
+ nop
+ nop
+ mv .s1 A0,A4
+ add .l1x A2,B0,A5
+ add .s1 A1,A5,A5
+ENDPROC(__c6xabi_mpyll)
diff --git a/arch/c6x/lib/negll.S b/arch/c6x/lib/negll.S
new file mode 100644
index 00000000000..82f4bcec9af
--- /dev/null
+++ b/arch/c6x/lib/negll.S
@@ -0,0 +1,31 @@
+;; Copyright (C) 2010 Texas Instruments Incorporated
+;; Contributed by Mark Salter <msalter@redhat.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+;; int64_t __c6xabi_negll(int64_t val)
+
+#include <linux/linkage.h>
+
+ .text
+ENTRY(__c6xabi_negll)
+ b .s2 B3
+ mvk .l1 0,A0
+ subu .l1 A0,A4,A3:A2
+ sub .l1 A0,A5,A0
+|| ext .s1 A3,24,24,A5
+ add .l1 A5,A0,A5
+ mv .s1 A2,A4
+ENDPROC(__c6xabi_negll)
diff --git a/arch/c6x/lib/pop_rts.S b/arch/c6x/lib/pop_rts.S
new file mode 100644
index 00000000000..d7d96c70e9e
--- /dev/null
+++ b/arch/c6x/lib/pop_rts.S
@@ -0,0 +1,32 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+
+ENTRY(__c6xabi_pop_rts)
+ lddw .d2t2 *++B15, B3:B2
+ lddw .d2t1 *++B15, A11:A10
+ lddw .d2t2 *++B15, B11:B10
+ lddw .d2t1 *++B15, A13:A12
+ lddw .d2t2 *++B15, B13:B12
+ lddw .d2t1 *++B15, A15:A14
+|| b .s2 B3
+ ldw .d2t2 *++B15[2], B14
+ nop 4
+ENDPROC(__c6xabi_pop_rts)
diff --git a/arch/c6x/lib/push_rts.S b/arch/c6x/lib/push_rts.S
new file mode 100644
index 00000000000..f6e3db3b606
--- /dev/null
+++ b/arch/c6x/lib/push_rts.S
@@ -0,0 +1,31 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+
+ENTRY(__c6xabi_push_rts)
+ stw .d2t2 B14, *B15--[2]
+ stdw .d2t1 A15:A14, *B15--
+|| b .s2x A3
+ stdw .d2t2 B13:B12, *B15--
+ stdw .d2t1 A13:A12, *B15--
+ stdw .d2t2 B11:B10, *B15--
+ stdw .d2t1 A11:A10, *B15--
+ stdw .d2t2 B3:B2, *B15--
+ENDPROC(__c6xabi_push_rts)
diff --git a/arch/c6x/lib/remi.S b/arch/c6x/lib/remi.S
new file mode 100644
index 00000000000..6f2ca18c3f9
--- /dev/null
+++ b/arch/c6x/lib/remi.S
@@ -0,0 +1,64 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+ .text
+
+ENTRY(__c6xabi_remi)
+ stw .d2t2 B3, *B15--[2]
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B2
+|| mv .s1 A4, A5
+|| call .s2 __c6xabi_divu
+
+ [A1] neg .l1 A4, A4
+|| [B2] neg .l2 B4, B4
+|| xor .s2x B2, A1, B0
+|| mv .d2 B4, B2
+
+ [B0] addkpc .s2 _divu_ret_1, B3, 1
+ [!B0] addkpc .s2 _divu_ret_2, B3, 1
+ nop 2
+_divu_ret_1:
+ neg .l1 A4, A4
+_divu_ret_2:
+ ldw .d2t2 *++B15[2], B3
+
+ mpy32 .m1x A4, B2, A6
+ nop 3
+ ret .s2 B3
+ sub .l1 A5, A6, A4
+ nop 4
+ENDPROC(__c6xabi_remi)
diff --git a/arch/c6x/lib/remu.S b/arch/c6x/lib/remu.S
new file mode 100644
index 00000000000..3fae719185a
--- /dev/null
+++ b/arch/c6x/lib/remu.S
@@ -0,0 +1,82 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+
+ .text
+
+ENTRY(__c6xabi_remu)
+ ;; The ABI seems designed to prevent these functions calling each other,
+ ;; so we duplicate most of the divsi3 code here.
+ mv .s2x A4, B1
+ lmbd .l2 1, B4, B1
+|| [!B1] b .s2 B3 ; RETURN A
+|| [!B1] mvk .d2 1, B4
+
+ mv .l1x B1, A7
+|| shl .s2 B4, B1, B4
+
+ cmpltu .l1x A4, B4, A1
+ [!A1] sub .l1x A4, B4, A4
+ shru .s2 B4, 1, B4
+
+_remu_loop:
+ cmpgt .l2 B1, 7, B0
+|| [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ ;; RETURN A may happen here (note: must happen before the next branch)
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+|| [B0] b .s1 _remu_loop
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+ [B1] subc .l1x A4,B4,A4
+|| [B1] add .s2 -1, B1, B1
+ [B1] subc .l1x A4,B4,A4
+
+ extu .s1 A4, A7, A4
+ nop 2
+ENDPROC(__c6xabi_remu)
diff --git a/arch/c6x/lib/strasgi.S b/arch/c6x/lib/strasgi.S
new file mode 100644
index 00000000000..de274076553
--- /dev/null
+++ b/arch/c6x/lib/strasgi.S
@@ -0,0 +1,89 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+
+ENTRY(__c6xabi_strasgi)
+ ;; This is essentially memcpy, with alignment known to be at least
+ ;; 4, and the size a multiple of 4 greater than or equal to 28.
+ ldw .d2t1 *B4++, A0
+|| mvk .s2 16, B1
+ ldw .d2t1 *B4++, A1
+|| mvk .s2 20, B2
+|| sub .d1 A6, 24, A6
+ ldw .d2t1 *B4++, A5
+ ldw .d2t1 *B4++, A7
+|| mv .l2x A6, B7
+ ldw .d2t1 *B4++, A8
+ ldw .d2t1 *B4++, A9
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+_strasgi_loop:
+ stw .d1t2 B5, *A4++
+|| [B0] ldw .d2t1 *B4++, A0
+|| mv .s2x A1, B5
+|| mv .l2 B7, B6
+
+ [B0] sub .d2 B6, 24, B7
+|| [B0] b .s2 _strasgi_loop
+|| cmpltu .l2 B1, B6, B0
+
+ [B0] ldw .d2t1 *B4++, A1
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A5, B5
+|| cmpltu .l2 12, B6, B0
+
+ [B0] ldw .d2t1 *B4++, A5
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A7, B5
+|| cmpltu .l2 8, B6, B0
+
+ [B0] ldw .d2t1 *B4++, A7
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A8, B5
+|| cmpltu .l2 4, B6, B0
+
+ [B0] ldw .d2t1 *B4++, A8
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A9, B5
+|| cmpltu .l2 0, B6, B0
+
+ [B0] ldw .d2t1 *B4++, A9
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+ ;; loop back branch happens here
+
+ cmpltu .l2 B1, B6, B0
+|| ret .s2 b3
+
+ [B0] stw .d1t1 A1, *A4++
+|| cmpltu .l2 12, B6, B0
+ [B0] stw .d1t1 A5, *A4++
+|| cmpltu .l2 8, B6, B0
+ [B0] stw .d1t1 A7, *A4++
+|| cmpltu .l2 4, B6, B0
+ [B0] stw .d1t1 A8, *A4++
+|| cmpltu .l2 0, B6, B0
+ [B0] stw .d1t1 A9, *A4++
+
+ ;; return happens here
+ENDPROC(__c6xabi_strasgi)
diff --git a/arch/c6x/lib/strasgi_64plus.S b/arch/c6x/lib/strasgi_64plus.S
new file mode 100644
index 00000000000..c9fd159b5fa
--- /dev/null
+++ b/arch/c6x/lib/strasgi_64plus.S
@@ -0,0 +1,39 @@
+;; Copyright 2010 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+;;
+;; This program is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2 of the License, or
+;; (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; if not, write to the Free Software
+;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+#include <linux/linkage.h>
+
+ .text
+
+ENTRY(__c6xabi_strasgi_64plus)
+ shru .s2x a6, 2, b31
+|| mv .s1 a4, a30
+|| mv .d2 b4, b30
+
+ add .s2 -4, b31, b31
+
+ sploopd 1
+|| mvc .s2 b31, ilc
+ ldw .d2t2 *b30++, b31
+ nop 4
+ mv .s1x b31,a31
+ spkernel 6, 0
+|| stw .d1t1 a31, *a30++
+
+ ret .s2 b3
+ nop 5
+ENDPROC(__c6xabi_strasgi_64plus)
diff --git a/arch/c6x/mm/Makefile b/arch/c6x/mm/Makefile
new file mode 100644
index 00000000000..136a97576c6
--- /dev/null
+++ b/arch/c6x/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux c6x-specific parts of the memory manager.
+#
+
+obj-y := init.o dma-coherent.o
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
new file mode 100644
index 00000000000..4187e518037
--- /dev/null
+++ b/arch/c6x/mm/dma-coherent.c
@@ -0,0 +1,143 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * DMA uncached mapping support.
+ *
+ * Using code pulled from ARM
+ * Copyright (C) 2000-2004 Russell King
+ *
+ */
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+
+#include <asm/page.h>
+
+/*
+ * DMA coherent memory management, can be redefined using the memdma=
+ * kernel command line
+ */
+
+/* none by default */
+static phys_addr_t dma_base;
+static u32 dma_size;
+static u32 dma_pages;
+
+static unsigned long *dma_bitmap;
+
+/* bitmap lock */
+static DEFINE_SPINLOCK(dma_lock);
+
+/*
+ * Return a DMA coherent and contiguous memory chunk from the DMA memory
+ */
+static inline u32 __alloc_dma_pages(int order)
+{
+ unsigned long flags;
+ u32 pos;
+
+ spin_lock_irqsave(&dma_lock, flags);
+ pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return dma_base + (pos << PAGE_SHIFT);
+}
+
+static void __free_dma_pages(u32 addr, int order)
+{
+ unsigned long flags;
+ u32 pos = (addr - dma_base) >> PAGE_SHIFT;
+
+ if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
+ printk(KERN_ERR "%s: freeing outside range.\n", __func__);
+ BUG();
+ }
+
+ spin_lock_irqsave(&dma_lock, flags);
+ bitmap_release_region(dma_bitmap, pos, order);
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+/*
+ * Allocate DMA coherent memory space and return both the kernel
+ * virtual and DMA address for that space.
+ */
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
+{
+ u32 paddr;
+ int order;
+
+ if (!dma_size || !size)
+ return NULL;
+
+ order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+ paddr = __alloc_dma_pages(order);
+
+ if (handle)
+ *handle = paddr;
+
+ if (!paddr)
+ return NULL;
+
+ return phys_to_virt(paddr);
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+/*
+ * Free DMA coherent memory as defined by the above mapping.
+ */
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ int order;
+
+ if (!dma_size || !size)
+ return;
+
+ order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+ __free_dma_pages(virt_to_phys(vaddr), order);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Initialise the coherent DMA memory allocator using the given uncached region.
+ */
+void __init coherent_mem_init(phys_addr_t start, u32 size)
+{
+ phys_addr_t bitmap_phys;
+
+ if (!size)
+ return;
+
+ printk(KERN_INFO
+ "Coherent memory (DMA) region start=0x%x size=0x%x\n",
+ start, size);
+
+ dma_base = start;
+ dma_size = size;
+
+ /* allocate bitmap */
+ dma_pages = dma_size >> PAGE_SHIFT;
+ if (dma_size & (PAGE_SIZE - 1))
+ ++dma_pages;
+
+ bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
+
+ dma_bitmap = phys_to_virt(bitmap_phys);
+ memset(dma_bitmap, 0, dma_pages * PAGE_SIZE);
+}
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
new file mode 100644
index 00000000000..89395f09648
--- /dev/null
+++ b/arch/c6x/mm/init.c
@@ -0,0 +1,113 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blkdev.h>
+#endif
+#include <linux/initrd.h>
+
+#include <asm/sections.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ struct pglist_data *pgdat = NODE_DATA(0);
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ /*
+ * Set up user data space
+ */
+ set_fs(KERNEL_DS);
+
+ /*
+ * Define zones
+ */
+ zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
+ pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT;
+
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ int codek, datak;
+ unsigned long tmp;
+ unsigned long len = memory_end - memory_start;
+
+ high_memory = (void *)(memory_end & PAGE_MASK);
+
+ /* this will put all memory onto the freelists */
+ totalram_pages = free_all_bootmem();
+
+ codek = (_etext - _stext) >> 10;
+ datak = (_end - _sdata) >> 10;
+
+ tmp = nr_free_pages() << PAGE_SHIFT;
+ printk(KERN_INFO "Memory: %luk/%luk RAM (%dk kernel code, %dk data)\n",
+ tmp >> 10, len >> 10, codek, datak);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ int pages = 0;
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ totalram_pages++;
+ pages++;
+ }
+ printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
+ (pages * PAGE_SIZE) >> 10);
+}
+#endif
+
+void __init free_initmem(void)
+{
+ unsigned long addr;
+
+ /*
+ * The following code should be cool even if these sections
+ * are not page aligned.
+ */
+ addr = PAGE_ALIGN((unsigned long)(__init_begin));
+
+ /* next to check that the page we free is not a partial page */
+ for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
+ addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
+ (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
+}
diff --git a/arch/c6x/platforms/Kconfig b/arch/c6x/platforms/Kconfig
new file mode 100644
index 00000000000..401ee678fd0
--- /dev/null
+++ b/arch/c6x/platforms/Kconfig
@@ -0,0 +1,16 @@
+
+config SOC_TMS320C6455
+ bool "TMS320C6455"
+ default n
+
+config SOC_TMS320C6457
+ bool "TMS320C6457"
+ default n
+
+config SOC_TMS320C6472
+ bool "TMS320C6472"
+ default n
+
+config SOC_TMS320C6474
+ bool "TMS320C6474"
+ default n
diff --git a/arch/c6x/platforms/Makefile b/arch/c6x/platforms/Makefile
new file mode 100644
index 00000000000..9a95b9bca8d
--- /dev/null
+++ b/arch/c6x/platforms/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for arch/c6x/platforms
+#
+# Copyright 2010, 2011 Texas Instruments Incorporated
+#
+
+obj-y = platform.o cache.o megamod-pic.o pll.o plldata.o timer64.o
+obj-y += dscr.o
+
+# SoC objects
+obj-$(CONFIG_SOC_TMS320C6455) += emif.o
+obj-$(CONFIG_SOC_TMS320C6457) += emif.o
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c
new file mode 100644
index 00000000000..86318a16a25
--- /dev/null
+++ b/arch/c6x/platforms/cache.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include <asm/cache.h>
+#include <asm/soc.h>
+
+/*
+ * Internal Memory Control Registers for caches
+ */
+#define IMCR_CCFG 0x0000
+#define IMCR_L1PCFG 0x0020
+#define IMCR_L1PCC 0x0024
+#define IMCR_L1DCFG 0x0040
+#define IMCR_L1DCC 0x0044
+#define IMCR_L2ALLOC0 0x2000
+#define IMCR_L2ALLOC1 0x2004
+#define IMCR_L2ALLOC2 0x2008
+#define IMCR_L2ALLOC3 0x200c
+#define IMCR_L2WBAR 0x4000
+#define IMCR_L2WWC 0x4004
+#define IMCR_L2WIBAR 0x4010
+#define IMCR_L2WIWC 0x4014
+#define IMCR_L2IBAR 0x4018
+#define IMCR_L2IWC 0x401c
+#define IMCR_L1PIBAR 0x4020
+#define IMCR_L1PIWC 0x4024
+#define IMCR_L1DWIBAR 0x4030
+#define IMCR_L1DWIWC 0x4034
+#define IMCR_L1DWBAR 0x4040
+#define IMCR_L1DWWC 0x4044
+#define IMCR_L1DIBAR 0x4048
+#define IMCR_L1DIWC 0x404c
+#define IMCR_L2WB 0x5000
+#define IMCR_L2WBINV 0x5004
+#define IMCR_L2INV 0x5008
+#define IMCR_L1PINV 0x5028
+#define IMCR_L1DWB 0x5040
+#define IMCR_L1DWBINV 0x5044
+#define IMCR_L1DINV 0x5048
+#define IMCR_MAR_BASE 0x8000
+#define IMCR_MAR96_111 0x8180
+#define IMCR_MAR128_191 0x8200
+#define IMCR_MAR224_239 0x8380
+#define IMCR_L2MPFAR 0xa000
+#define IMCR_L2MPFSR 0xa004
+#define IMCR_L2MPFCR 0xa008
+#define IMCR_L2MPLK0 0xa100
+#define IMCR_L2MPLK1 0xa104
+#define IMCR_L2MPLK2 0xa108
+#define IMCR_L2MPLK3 0xa10c
+#define IMCR_L2MPLKCMD 0xa110
+#define IMCR_L2MPLKSTAT 0xa114
+#define IMCR_L2MPPA_BASE 0xa200
+#define IMCR_L1PMPFAR 0xa400
+#define IMCR_L1PMPFSR 0xa404
+#define IMCR_L1PMPFCR 0xa408
+#define IMCR_L1PMPLK0 0xa500
+#define IMCR_L1PMPLK1 0xa504
+#define IMCR_L1PMPLK2 0xa508
+#define IMCR_L1PMPLK3 0xa50c
+#define IMCR_L1PMPLKCMD 0xa510
+#define IMCR_L1PMPLKSTAT 0xa514
+#define IMCR_L1PMPPA_BASE 0xa600
+#define IMCR_L1DMPFAR 0xac00
+#define IMCR_L1DMPFSR 0xac04
+#define IMCR_L1DMPFCR 0xac08
+#define IMCR_L1DMPLK0 0xad00
+#define IMCR_L1DMPLK1 0xad04
+#define IMCR_L1DMPLK2 0xad08
+#define IMCR_L1DMPLK3 0xad0c
+#define IMCR_L1DMPLKCMD 0xad10
+#define IMCR_L1DMPLKSTAT 0xad14
+#define IMCR_L1DMPPA_BASE 0xae00
+#define IMCR_L2PDWAKE0 0xc040
+#define IMCR_L2PDWAKE1 0xc044
+#define IMCR_L2PDSLEEP0 0xc050
+#define IMCR_L2PDSLEEP1 0xc054
+#define IMCR_L2PDSTAT0 0xc060
+#define IMCR_L2PDSTAT1 0xc064
+
+/*
+ * CCFG register values and bits
+ */
+#define L2MODE_0K_CACHE 0x0
+#define L2MODE_32K_CACHE 0x1
+#define L2MODE_64K_CACHE 0x2
+#define L2MODE_128K_CACHE 0x3
+#define L2MODE_256K_CACHE 0x7
+
+#define L2PRIO_URGENT 0x0
+#define L2PRIO_HIGH 0x1
+#define L2PRIO_MEDIUM 0x2
+#define L2PRIO_LOW 0x3
+
+#define CCFG_ID 0x100 /* Invalidate L1P bit */
+#define CCFG_IP 0x200 /* Invalidate L1D bit */
+
+static void __iomem *cache_base;
+
+/*
+ * L1 & L2 caches generic functions
+ */
+#define imcr_get(reg) soc_readl(cache_base + (reg))
+#define imcr_set(reg, value) \
+do { \
+ soc_writel((value), cache_base + (reg)); \
+ soc_readl(cache_base + (reg)); \
+} while (0)
+
+static void cache_block_operation_wait(unsigned int wc_reg)
+{
+ /* Wait for completion */
+ while (imcr_get(wc_reg))
+ cpu_relax();
+}
+
+static DEFINE_SPINLOCK(cache_lock);
+
+/*
+ * Generic function to perform a block cache operation as
+ * invalidate or writeback/invalidate
+ */
+static void cache_block_operation(unsigned int *start,
+ unsigned int *end,
+ unsigned int bar_reg,
+ unsigned int wc_reg)
+{
+ unsigned long flags;
+ unsigned int wcnt =
+ (L2_CACHE_ALIGN_CNT((unsigned int) end)
+ - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
+ unsigned int wc = 0;
+
+ for (; wcnt; wcnt -= wc, start += wc) {
+loop:
+ spin_lock_irqsave(&cache_lock, flags);
+
+ /*
+ * If another cache operation is occuring
+ */
+ if (unlikely(imcr_get(wc_reg))) {
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ /* Wait for previous operation completion */
+ cache_block_operation_wait(wc_reg);
+
+ /* Try again */
+ goto loop;
+ }
+
+ imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
+
+ if (wcnt > 0xffff)
+ wc = 0xffff;
+ else
+ wc = wcnt;
+
+ /* Set word count value in the WC register */
+ imcr_set(wc_reg, wc & 0xffff);
+
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ /* Wait for completion */
+ cache_block_operation_wait(wc_reg);
+ }
+}
+
+static void cache_block_operation_nowait(unsigned int *start,
+ unsigned int *end,
+ unsigned int bar_reg,
+ unsigned int wc_reg)
+{
+ unsigned long flags;
+ unsigned int wcnt =
+ (L2_CACHE_ALIGN_CNT((unsigned int) end)
+ - L2_CACHE_ALIGN_LOW((unsigned int) start)) >> 2;
+ unsigned int wc = 0;
+
+ for (; wcnt; wcnt -= wc, start += wc) {
+
+ spin_lock_irqsave(&cache_lock, flags);
+
+ imcr_set(bar_reg, L2_CACHE_ALIGN_LOW((unsigned int) start));
+
+ if (wcnt > 0xffff)
+ wc = 0xffff;
+ else
+ wc = wcnt;
+
+ /* Set word count value in the WC register */
+ imcr_set(wc_reg, wc & 0xffff);
+
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ /* Don't wait for completion on last cache operation */
+ if (wcnt > 0xffff)
+ cache_block_operation_wait(wc_reg);
+ }
+}
+
+/*
+ * L1 caches management
+ */
+
+/*
+ * Disable L1 caches
+ */
+void L1_cache_off(void)
+{
+ unsigned int dummy;
+
+ imcr_set(IMCR_L1PCFG, 0);
+ dummy = imcr_get(IMCR_L1PCFG);
+
+ imcr_set(IMCR_L1DCFG, 0);
+ dummy = imcr_get(IMCR_L1DCFG);
+}
+
+/*
+ * Enable L1 caches
+ */
+void L1_cache_on(void)
+{
+ unsigned int dummy;
+
+ imcr_set(IMCR_L1PCFG, 7);
+ dummy = imcr_get(IMCR_L1PCFG);
+
+ imcr_set(IMCR_L1DCFG, 7);
+ dummy = imcr_get(IMCR_L1DCFG);
+}
+
+/*
+ * L1P global-invalidate all
+ */
+void L1P_cache_global_invalidate(void)
+{
+ unsigned int set = 1;
+ imcr_set(IMCR_L1PINV, set);
+ while (imcr_get(IMCR_L1PINV) & 1)
+ cpu_relax();
+}
+
+/*
+ * L1D global-invalidate all
+ *
+ * Warning: this operation causes all updated data in L1D to
+ * be discarded rather than written back to the lower levels of
+ * memory
+ */
+void L1D_cache_global_invalidate(void)
+{
+ unsigned int set = 1;
+ imcr_set(IMCR_L1DINV, set);
+ while (imcr_get(IMCR_L1DINV) & 1)
+ cpu_relax();
+}
+
+void L1D_cache_global_writeback(void)
+{
+ unsigned int set = 1;
+ imcr_set(IMCR_L1DWB, set);
+ while (imcr_get(IMCR_L1DWB) & 1)
+ cpu_relax();
+}
+
+void L1D_cache_global_writeback_invalidate(void)
+{
+ unsigned int set = 1;
+ imcr_set(IMCR_L1DWBINV, set);
+ while (imcr_get(IMCR_L1DWBINV) & 1)
+ cpu_relax();
+}
+
+/*
+ * L2 caches management
+ */
+
+/*
+ * Set L2 operation mode
+ */
+void L2_cache_set_mode(unsigned int mode)
+{
+ unsigned int ccfg = imcr_get(IMCR_CCFG);
+
+ /* Clear and set the L2MODE bits in CCFG */
+ ccfg &= ~7;
+ ccfg |= (mode & 7);
+ imcr_set(IMCR_CCFG, ccfg);
+ ccfg = imcr_get(IMCR_CCFG);
+}
+
+/*
+ * L2 global-writeback and global-invalidate all
+ */
+void L2_cache_global_writeback_invalidate(void)
+{
+ imcr_set(IMCR_L2WBINV, 1);
+ while (imcr_get(IMCR_L2WBINV))
+ cpu_relax();
+}
+
+/*
+ * L2 global-writeback all
+ */
+void L2_cache_global_writeback(void)
+{
+ imcr_set(IMCR_L2WB, 1);
+ while (imcr_get(IMCR_L2WB))
+ cpu_relax();
+}
+
+/*
+ * Cacheability controls
+ */
+void enable_caching(unsigned long start, unsigned long end)
+{
+ unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
+ unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
+
+ for (; mar <= mar_e; mar += 4)
+ imcr_set(mar, imcr_get(mar) | 1);
+}
+
+void disable_caching(unsigned long start, unsigned long end)
+{
+ unsigned int mar = IMCR_MAR_BASE + ((start >> 24) << 2);
+ unsigned int mar_e = IMCR_MAR_BASE + ((end >> 24) << 2);
+
+ for (; mar <= mar_e; mar += 4)
+ imcr_set(mar, imcr_get(mar) & ~1);
+}
+
+
+/*
+ * L1 block operations
+ */
+void L1P_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L1PIBAR, IMCR_L1PIWC);
+}
+
+void L1D_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L1DIBAR, IMCR_L1DIWC);
+}
+
+void L1D_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L1DWIBAR, IMCR_L1DWIWC);
+}
+
+void L1D_cache_block_writeback(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L1DWBAR, IMCR_L1DWWC);
+}
+
+/*
+ * L2 block operations
+ */
+void L2_cache_block_invalidate(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2IBAR, IMCR_L2IWC);
+}
+
+void L2_cache_block_writeback(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2WBAR, IMCR_L2WWC);
+}
+
+void L2_cache_block_writeback_invalidate(unsigned int start, unsigned int end)
+{
+ cache_block_operation((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2WIBAR, IMCR_L2WIWC);
+}
+
+void L2_cache_block_invalidate_nowait(unsigned int start, unsigned int end)
+{
+ cache_block_operation_nowait((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2IBAR, IMCR_L2IWC);
+}
+
+void L2_cache_block_writeback_nowait(unsigned int start, unsigned int end)
+{
+ cache_block_operation_nowait((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2WBAR, IMCR_L2WWC);
+}
+
+void L2_cache_block_writeback_invalidate_nowait(unsigned int start,
+ unsigned int end)
+{
+ cache_block_operation_nowait((unsigned int *) start,
+ (unsigned int *) end,
+ IMCR_L2WIBAR, IMCR_L2WIWC);
+}
+
+
+/*
+ * L1 and L2 caches configuration
+ */
+void __init c6x_cache_init(void)
+{
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, "ti,c64x+cache");
+ if (!node)
+ return;
+
+ cache_base = of_iomap(node, 0);
+
+ of_node_put(node);
+
+ if (!cache_base)
+ return;
+
+ /* Set L2 caches on the the whole L2 SRAM memory */
+ L2_cache_set_mode(L2MODE_SIZE);
+
+ /* Enable L1 */
+ L1_cache_on();
+}
diff --git a/arch/c6x/platforms/dscr.c b/arch/c6x/platforms/dscr.c
new file mode 100644
index 00000000000..f848a65ee64
--- /dev/null
+++ b/arch/c6x/platforms/dscr.c
@@ -0,0 +1,598 @@
+/*
+ * Device State Control Registers driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The Device State Control Registers (DSCR) provide SoC level control over
+ * a number of peripherals. Details vary considerably among the various SoC
+ * parts. In general, the DSCR block will provide one or more configuration
+ * registers often protected by a lock register. One or more key values must
+ * be written to a lock register in order to unlock the configuration register.
+ * The configuration register may be used to enable (and disable in some
+ * cases) SoC pin drivers, peripheral clock sources (internal or pin), etc.
+ * In some cases, a configuration register is write once or the individual
+ * bits are write once. That is, you may be able to enable a device, but
+ * will not be able to disable it.
+ *
+ * In addition to device configuration, the DSCR block may provide registers
+ * which are used to reset SoC peripherals, provide device ID information,
+ * provide MAC addresses, and other miscellaneous functions.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+
+#define MAX_DEVSTATE_IDS 32
+#define MAX_DEVCTL_REGS 8
+#define MAX_DEVSTAT_REGS 8
+#define MAX_LOCKED_REGS 4
+#define MAX_SOC_EMACS 2
+
+struct rmii_reset_reg {
+ u32 reg;
+ u32 mask;
+};
+
+/*
+ * Some registerd may be locked. In order to write to these
+ * registers, the key value must first be written to the lockreg.
+ */
+struct locked_reg {
+ u32 reg; /* offset from base */
+ u32 lockreg; /* offset from base */
+ u32 key; /* unlock key */
+};
+
+/*
+ * This describes a contiguous area of like control bits used to enable/disable
+ * SoC devices. Each controllable device is given an ID which is used by the
+ * individual device drivers to control the device state. These IDs start at
+ * zero and are assigned sequentially to the control bitfield ranges described
+ * by this structure.
+ */
+struct devstate_ctl_reg {
+ u32 reg; /* register holding the control bits */
+ u8 start_id; /* start id of this range */
+ u8 num_ids; /* number of devices in this range */
+ u8 enable_only; /* bits are write-once to enable only */
+ u8 enable; /* value used to enable device */
+ u8 disable; /* value used to disable device */
+ u8 shift; /* starting (rightmost) bit in range */
+ u8 nbits; /* number of bits per device */
+};
+
+
+/*
+ * This describes a region of status bits indicating the state of
+ * various devices. This is used internally to wait for status
+ * change completion when enabling/disabling a device. Status is
+ * optional and not all device controls will have a corresponding
+ * status.
+ */
+struct devstate_stat_reg {
+ u32 reg; /* register holding the status bits */
+ u8 start_id; /* start id of this range */
+ u8 num_ids; /* number of devices in this range */
+ u8 enable; /* value indicating enabled state */
+ u8 disable; /* value indicating disabled state */
+ u8 shift; /* starting (rightmost) bit in range */
+ u8 nbits; /* number of bits per device */
+};
+
+struct devstate_info {
+ struct devstate_ctl_reg *ctl;
+ struct devstate_stat_reg *stat;
+};
+
+/* These are callbacks to SOC-specific code. */
+struct dscr_ops {
+ void (*init)(struct device_node *node);
+};
+
+struct dscr_regs {
+ spinlock_t lock;
+ void __iomem *base;
+ u32 kick_reg[2];
+ u32 kick_key[2];
+ struct locked_reg locked[MAX_LOCKED_REGS];
+ struct devstate_info devstate_info[MAX_DEVSTATE_IDS];
+ struct rmii_reset_reg rmii_resets[MAX_SOC_EMACS];
+ struct devstate_ctl_reg devctl[MAX_DEVCTL_REGS];
+ struct devstate_stat_reg devstat[MAX_DEVSTAT_REGS];
+};
+
+static struct dscr_regs dscr;
+
+static struct locked_reg *find_locked_reg(u32 reg)
+{
+ int i;
+
+ for (i = 0; i < MAX_LOCKED_REGS; i++)
+ if (dscr.locked[i].key && reg == dscr.locked[i].reg)
+ return &dscr.locked[i];
+ return NULL;
+}
+
+/*
+ * Write to a register with one lock
+ */
+static void dscr_write_locked1(u32 reg, u32 val,
+ u32 lock, u32 key)
+{
+ void __iomem *reg_addr = dscr.base + reg;
+ void __iomem *lock_addr = dscr.base + lock;
+
+ /*
+ * For some registers, the lock is relocked after a short number
+ * of cycles. We have to put the lock write and register write in
+ * the same fetch packet to meet this timing. The .align ensures
+ * the two stw instructions are in the same fetch packet.
+ */
+ asm volatile ("b .s2 0f\n"
+ "nop 5\n"
+ " .align 5\n"
+ "0:\n"
+ "stw .D1T2 %3,*%2\n"
+ "stw .D1T2 %1,*%0\n"
+ :
+ : "a"(reg_addr), "b"(val), "a"(lock_addr), "b"(key)
+ );
+
+ /* in case the hw doesn't reset the lock */
+ soc_writel(0, lock_addr);
+}
+
+/*
+ * Write to a register protected by two lock registers
+ */
+static void dscr_write_locked2(u32 reg, u32 val,
+ u32 lock0, u32 key0,
+ u32 lock1, u32 key1)
+{
+ soc_writel(key0, dscr.base + lock0);
+ soc_writel(key1, dscr.base + lock1);
+ soc_writel(val, dscr.base + reg);
+ soc_writel(0, dscr.base + lock0);
+ soc_writel(0, dscr.base + lock1);
+}
+
+static void dscr_write(u32 reg, u32 val)
+{
+ struct locked_reg *lock;
+
+ lock = find_locked_reg(reg);
+ if (lock)
+ dscr_write_locked1(reg, val, lock->lockreg, lock->key);
+ else if (dscr.kick_key[0])
+ dscr_write_locked2(reg, val, dscr.kick_reg[0], dscr.kick_key[0],
+ dscr.kick_reg[1], dscr.kick_key[1]);
+ else
+ soc_writel(val, dscr.base + reg);
+}
+
+
+/*
+ * Drivers can use this interface to enable/disable SoC IP blocks.
+ */
+void dscr_set_devstate(int id, enum dscr_devstate_t state)
+{
+ struct devstate_ctl_reg *ctl;
+ struct devstate_stat_reg *stat;
+ struct devstate_info *info;
+ u32 ctl_val, val;
+ int ctl_shift, ctl_mask;
+ unsigned long flags;
+
+ if (!dscr.base)
+ return;
+
+ if (id < 0 || id >= MAX_DEVSTATE_IDS)
+ return;
+
+ info = &dscr.devstate_info[id];
+ ctl = info->ctl;
+ stat = info->stat;
+
+ if (ctl == NULL)
+ return;
+
+ ctl_shift = ctl->shift + ctl->nbits * (id - ctl->start_id);
+ ctl_mask = ((1 << ctl->nbits) - 1) << ctl_shift;
+
+ switch (state) {
+ case DSCR_DEVSTATE_ENABLED:
+ ctl_val = ctl->enable << ctl_shift;
+ break;
+ case DSCR_DEVSTATE_DISABLED:
+ if (ctl->enable_only)
+ return;
+ ctl_val = ctl->disable << ctl_shift;
+ break;
+ default:
+ return;
+ }
+
+ spin_lock_irqsave(&dscr.lock, flags);
+
+ val = soc_readl(dscr.base + ctl->reg);
+ val &= ~ctl_mask;
+ val |= ctl_val;
+
+ dscr_write(ctl->reg, val);
+
+ spin_unlock_irqrestore(&dscr.lock, flags);
+
+ if (!stat)
+ return;
+
+ ctl_shift = stat->shift + stat->nbits * (id - stat->start_id);
+
+ if (state == DSCR_DEVSTATE_ENABLED)
+ ctl_val = stat->enable;
+ else
+ ctl_val = stat->disable;
+
+ do {
+ val = soc_readl(dscr.base + stat->reg);
+ val >>= ctl_shift;
+ val &= ((1 << stat->nbits) - 1);
+ } while (val != ctl_val);
+}
+EXPORT_SYMBOL(dscr_set_devstate);
+
+/*
+ * Drivers can use this to reset RMII module.
+ */
+void dscr_rmii_reset(int id, int assert)
+{
+ struct rmii_reset_reg *r;
+ unsigned long flags;
+ u32 val;
+
+ if (id < 0 || id >= MAX_SOC_EMACS)
+ return;
+
+ r = &dscr.rmii_resets[id];
+ if (r->mask == 0)
+ return;
+
+ spin_lock_irqsave(&dscr.lock, flags);
+
+ val = soc_readl(dscr.base + r->reg);
+ if (assert)
+ dscr_write(r->reg, val | r->mask);
+ else
+ dscr_write(r->reg, val & ~(r->mask));
+
+ spin_unlock_irqrestore(&dscr.lock, flags);
+}
+EXPORT_SYMBOL(dscr_rmii_reset);
+
+static void __init dscr_parse_devstat(struct device_node *node,
+ void __iomem *base)
+{
+ u32 val;
+ int err;
+
+ err = of_property_read_u32_array(node, "ti,dscr-devstat", &val, 1);
+ if (!err)
+ c6x_devstat = soc_readl(base + val);
+ printk(KERN_INFO "DEVSTAT: %08x\n", c6x_devstat);
+}
+
+static void __init dscr_parse_silicon_rev(struct device_node *node,
+ void __iomem *base)
+{
+ u32 vals[3];
+ int err;
+
+ err = of_property_read_u32_array(node, "ti,dscr-silicon-rev", vals, 3);
+ if (!err) {
+ c6x_silicon_rev = soc_readl(base + vals[0]);
+ c6x_silicon_rev >>= vals[1];
+ c6x_silicon_rev &= vals[2];
+ }
+}
+
+/*
+ * Some SoCs will have a pair of fuse registers which hold
+ * an ethernet MAC address. The "ti,dscr-mac-fuse-regs"
+ * property is a mapping from fuse register bytes to MAC
+ * address bytes. The expected format is:
+ *
+ * ti,dscr-mac-fuse-regs = <reg0 b3 b2 b1 b0
+ * reg1 b3 b2 b1 b0>
+ *
+ * reg0 and reg1 are the offsets of the two fuse registers.
+ * b3-b0 positionally represent bytes within the fuse register.
+ * b3 is the most significant byte and b0 is the least.
+ * Allowable values for b3-b0 are:
+ *
+ * 0 = fuse register byte not used in MAC address
+ * 1-6 = index+1 into c6x_fuse_mac[]
+ */
+static void __init dscr_parse_mac_fuse(struct device_node *node,
+ void __iomem *base)
+{
+ u32 vals[10], fuse;
+ int f, i, j, err;
+
+ err = of_property_read_u32_array(node, "ti,dscr-mac-fuse-regs",
+ vals, 10);
+ if (err)
+ return;
+
+ for (f = 0; f < 2; f++) {
+ fuse = soc_readl(base + vals[f * 5]);
+ for (j = (f * 5) + 1, i = 24; i >= 0; i -= 8, j++)
+ if (vals[j] && vals[j] <= 6)
+ c6x_fuse_mac[vals[j] - 1] = fuse >> i;
+ }
+}
+
+static void __init dscr_parse_rmii_resets(struct device_node *node,
+ void __iomem *base)
+{
+ const __be32 *p;
+ int i, size;
+
+ /* look for RMII reset registers */
+ p = of_get_property(node, "ti,dscr-rmii-resets", &size);
+ if (p) {
+ /* parse all the reg/mask pairs we can handle */
+ size /= (sizeof(*p) * 2);
+ if (size > MAX_SOC_EMACS)
+ size = MAX_SOC_EMACS;
+
+ for (i = 0; i < size; i++) {
+ dscr.rmii_resets[i].reg = be32_to_cpup(p++);
+ dscr.rmii_resets[i].mask = be32_to_cpup(p++);
+ }
+ }
+}
+
+
+static void __init dscr_parse_privperm(struct device_node *node,
+ void __iomem *base)
+{
+ u32 vals[2];
+ int err;
+
+ err = of_property_read_u32_array(node, "ti,dscr-privperm", vals, 2);
+ if (err)
+ return;
+ dscr_write(vals[0], vals[1]);
+}
+
+/*
+ * SoCs may have "locked" DSCR registers which can only be written
+ * to only after writing a key value to a lock registers. These
+ * regisers can be described with the "ti,dscr-locked-regs" property.
+ * This property provides a list of register descriptions with each
+ * description consisting of three values.
+ *
+ * ti,dscr-locked-regs = <reg0 lockreg0 key0
+ * ...
+ * regN lockregN keyN>;
+ *
+ * reg is the offset of the locked register
+ * lockreg is the offset of the lock register
+ * key is the unlock key written to lockreg
+ *
+ */
+static void __init dscr_parse_locked_regs(struct device_node *node,
+ void __iomem *base)
+{
+ struct locked_reg *r;
+ const __be32 *p;
+ int i, size;
+
+ p = of_get_property(node, "ti,dscr-locked-regs", &size);
+ if (p) {
+ /* parse all the register descriptions we can handle */
+ size /= (sizeof(*p) * 3);
+ if (size > MAX_LOCKED_REGS)
+ size = MAX_LOCKED_REGS;
+
+ for (i = 0; i < size; i++) {
+ r = &dscr.locked[i];
+
+ r->reg = be32_to_cpup(p++);
+ r->lockreg = be32_to_cpup(p++);
+ r->key = be32_to_cpup(p++);
+ }
+ }
+}
+
+/*
+ * SoCs may have DSCR registers which are only write enabled after
+ * writing specific key values to two registers. The two key registers
+ * and the key values can be parsed from a "ti,dscr-kick-regs"
+ * propety with the following layout:
+ *
+ * ti,dscr-kick-regs = <kickreg0 key0 kickreg1 key1>
+ *
+ * kickreg is the offset of the "kick" register
+ * key is the value which unlocks writing for protected regs
+ */
+static void __init dscr_parse_kick_regs(struct device_node *node,
+ void __iomem *base)
+{
+ u32 vals[4];
+ int err;
+
+ err = of_property_read_u32_array(node, "ti,dscr-kick-regs", vals, 4);
+ if (!err) {
+ dscr.kick_reg[0] = vals[0];
+ dscr.kick_key[0] = vals[1];
+ dscr.kick_reg[1] = vals[2];
+ dscr.kick_key[1] = vals[3];
+ }
+}
+
+
+/*
+ * SoCs may provide controls to enable/disable individual IP blocks. These
+ * controls in the DSCR usually control pin drivers but also may control
+ * clocking and or resets. The device tree is used to describe the bitfields
+ * in registers used to control device state. The number of bits and their
+ * values may vary even within the same register.
+ *
+ * The layout of these bitfields is described by the ti,dscr-devstate-ctl-regs
+ * property. This property is a list where each element describes a contiguous
+ * range of control fields with like properties. Each element of the list
+ * consists of 7 cells with the following values:
+ *
+ * start_id num_ids reg enable disable start_bit nbits
+ *
+ * start_id is device id for the first device control in the range
+ * num_ids is the number of device controls in the range
+ * reg is the offset of the register holding the control bits
+ * enable is the value to enable a device
+ * disable is the value to disable a device (0xffffffff if cannot disable)
+ * start_bit is the bit number of the first bit in the range
+ * nbits is the number of bits per device control
+ */
+static void __init dscr_parse_devstate_ctl_regs(struct device_node *node,
+ void __iomem *base)
+{
+ struct devstate_ctl_reg *r;
+ const __be32 *p;
+ int i, j, size;
+
+ p = of_get_property(node, "ti,dscr-devstate-ctl-regs", &size);
+ if (p) {
+ /* parse all the ranges we can handle */
+ size /= (sizeof(*p) * 7);
+ if (size > MAX_DEVCTL_REGS)
+ size = MAX_DEVCTL_REGS;
+
+ for (i = 0; i < size; i++) {
+ r = &dscr.devctl[i];
+
+ r->start_id = be32_to_cpup(p++);
+ r->num_ids = be32_to_cpup(p++);
+ r->reg = be32_to_cpup(p++);
+ r->enable = be32_to_cpup(p++);
+ r->disable = be32_to_cpup(p++);
+ if (r->disable == 0xffffffff)
+ r->enable_only = 1;
+ r->shift = be32_to_cpup(p++);
+ r->nbits = be32_to_cpup(p++);
+
+ for (j = r->start_id;
+ j < (r->start_id + r->num_ids);
+ j++)
+ dscr.devstate_info[j].ctl = r;
+ }
+ }
+}
+
+/*
+ * SoCs may provide status registers indicating the state (enabled/disabled) of
+ * devices on the SoC. The device tree is used to describe the bitfields in
+ * registers used to provide device status. The number of bits and their
+ * values used to provide status may vary even within the same register.
+ *
+ * The layout of these bitfields is described by the ti,dscr-devstate-stat-regs
+ * property. This property is a list where each element describes a contiguous
+ * range of status fields with like properties. Each element of the list
+ * consists of 7 cells with the following values:
+ *
+ * start_id num_ids reg enable disable start_bit nbits
+ *
+ * start_id is device id for the first device status in the range
+ * num_ids is the number of devices covered by the range
+ * reg is the offset of the register holding the status bits
+ * enable is the value indicating device is enabled
+ * disable is the value indicating device is disabled
+ * start_bit is the bit number of the first bit in the range
+ * nbits is the number of bits per device status
+ */
+static void __init dscr_parse_devstate_stat_regs(struct device_node *node,
+ void __iomem *base)
+{
+ struct devstate_stat_reg *r;
+ const __be32 *p;
+ int i, j, size;
+
+ p = of_get_property(node, "ti,dscr-devstate-stat-regs", &size);
+ if (p) {
+ /* parse all the ranges we can handle */
+ size /= (sizeof(*p) * 7);
+ if (size > MAX_DEVSTAT_REGS)
+ size = MAX_DEVSTAT_REGS;
+
+ for (i = 0; i < size; i++) {
+ r = &dscr.devstat[i];
+
+ r->start_id = be32_to_cpup(p++);
+ r->num_ids = be32_to_cpup(p++);
+ r->reg = be32_to_cpup(p++);
+ r->enable = be32_to_cpup(p++);
+ r->disable = be32_to_cpup(p++);
+ r->shift = be32_to_cpup(p++);
+ r->nbits = be32_to_cpup(p++);
+
+ for (j = r->start_id;
+ j < (r->start_id + r->num_ids);
+ j++)
+ dscr.devstate_info[j].stat = r;
+ }
+ }
+}
+
+static struct of_device_id dscr_ids[] __initdata = {
+ { .compatible = "ti,c64x+dscr" },
+ {}
+};
+
+/*
+ * Probe for DSCR area.
+ *
+ * This has to be done early on in case timer or interrupt controller
+ * needs something. e.g. On C6455 SoC, timer must be enabled through
+ * DSCR before it is functional.
+ */
+void __init dscr_probe(void)
+{
+ struct device_node *node;
+ void __iomem *base;
+
+ spin_lock_init(&dscr.lock);
+
+ node = of_find_matching_node(NULL, dscr_ids);
+ if (!node)
+ return;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ of_node_put(node);
+ return;
+ }
+
+ dscr.base = base;
+
+ dscr_parse_devstat(node, base);
+ dscr_parse_silicon_rev(node, base);
+ dscr_parse_mac_fuse(node, base);
+ dscr_parse_rmii_resets(node, base);
+ dscr_parse_locked_regs(node, base);
+ dscr_parse_kick_regs(node, base);
+ dscr_parse_devstate_ctl_regs(node, base);
+ dscr_parse_devstate_stat_regs(node, base);
+ dscr_parse_privperm(node, base);
+}
diff --git a/arch/c6x/platforms/emif.c b/arch/c6x/platforms/emif.c
new file mode 100644
index 00000000000..8b564dec241
--- /dev/null
+++ b/arch/c6x/platforms/emif.c
@@ -0,0 +1,87 @@
+/*
+ * External Memory Interface
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+
+#define NUM_EMIFA_CHIP_ENABLES 4
+
+struct emifa_regs {
+ u32 midr;
+ u32 stat;
+ u32 reserved1[6];
+ u32 bprio;
+ u32 reserved2[23];
+ u32 cecfg[NUM_EMIFA_CHIP_ENABLES];
+ u32 reserved3[4];
+ u32 awcc;
+ u32 reserved4[7];
+ u32 intraw;
+ u32 intmsk;
+ u32 intmskset;
+ u32 intmskclr;
+};
+
+static struct of_device_id emifa_match[] __initdata = {
+ { .compatible = "ti,c64x+emifa" },
+ {}
+};
+
+/*
+ * Parse device tree for existence of an EMIF (External Memory Interface)
+ * and initialize it if found.
+ */
+static int __init c6x_emifa_init(void)
+{
+ struct emifa_regs __iomem *regs;
+ struct device_node *node;
+ const __be32 *p;
+ u32 val;
+ int i, len, err;
+
+ node = of_find_matching_node(NULL, emifa_match);
+ if (!node)
+ return 0;
+
+ regs = of_iomap(node, 0);
+ if (!regs)
+ return 0;
+
+ /* look for a dscr-based enable for emifa pin buffers */
+ err = of_property_read_u32_array(node, "ti,dscr-dev-enable", &val, 1);
+ if (!err)
+ dscr_set_devstate(val, DSCR_DEVSTATE_ENABLED);
+
+ /* set up the chip enables */
+ p = of_get_property(node, "ti,emifa-ce-config", &len);
+ if (p) {
+ len /= sizeof(u32);
+ if (len > NUM_EMIFA_CHIP_ENABLES)
+ len = NUM_EMIFA_CHIP_ENABLES;
+ for (i = 0; i <= len; i++)
+ soc_writel(be32_to_cpup(&p[i]), &regs->cecfg[i]);
+ }
+
+ err = of_property_read_u32_array(node, "ti,emifa-burst-priority", &val, 1);
+ if (!err)
+ soc_writel(val, &regs->bprio);
+
+ err = of_property_read_u32_array(node, "ti,emifa-async-wait-control", &val, 1);
+ if (!err)
+ soc_writel(val, &regs->awcc);
+
+ iounmap(regs);
+ of_node_put(node);
+ return 0;
+}
+pure_initcall(c6x_emifa_init);
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
new file mode 100644
index 00000000000..7c37a947fb1
--- /dev/null
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -0,0 +1,349 @@
+/*
+ * Support for C64x+ Megamodule Interrupt Controller
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ * Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/soc.h>
+#include <asm/megamod-pic.h>
+
+#define NR_COMBINERS 4
+#define NR_MUX_OUTPUTS 12
+
+#define IRQ_UNMAPPED 0xffff
+
+/*
+ * Megamodule Interrupt Controller register layout
+ */
+struct megamod_regs {
+ u32 evtflag[8];
+ u32 evtset[8];
+ u32 evtclr[8];
+ u32 reserved0[8];
+ u32 evtmask[8];
+ u32 mevtflag[8];
+ u32 expmask[8];
+ u32 mexpflag[8];
+ u32 intmux_unused;
+ u32 intmux[7];
+ u32 reserved1[8];
+ u32 aegmux[2];
+ u32 reserved2[14];
+ u32 intxstat;
+ u32 intxclr;
+ u32 intdmask;
+ u32 reserved3[13];
+ u32 evtasrt;
+};
+
+struct megamod_pic {
+ struct irq_host *irqhost;
+ struct megamod_regs __iomem *regs;
+ raw_spinlock_t lock;
+
+ /* hw mux mapping */
+ unsigned int output_to_irq[NR_MUX_OUTPUTS];
+};
+
+static struct megamod_pic *mm_pic;
+
+struct megamod_cascade_data {
+ struct megamod_pic *pic;
+ int index;
+};
+
+static struct megamod_cascade_data cascade_data[NR_COMBINERS];
+
+static void mask_megamod(struct irq_data *data)
+{
+ struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t src = irqd_to_hwirq(data);
+ u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
+
+ raw_spin_lock(&pic->lock);
+ soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
+ raw_spin_unlock(&pic->lock);
+}
+
+static void unmask_megamod(struct irq_data *data)
+{
+ struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t src = irqd_to_hwirq(data);
+ u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
+
+ raw_spin_lock(&pic->lock);
+ soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
+ raw_spin_unlock(&pic->lock);
+}
+
+static struct irq_chip megamod_chip = {
+ .name = "megamod",
+ .irq_mask = mask_megamod,
+ .irq_unmask = unmask_megamod,
+};
+
+static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct megamod_cascade_data *cascade;
+ struct megamod_pic *pic;
+ u32 events;
+ int n, idx;
+
+ cascade = irq_desc_get_handler_data(desc);
+
+ pic = cascade->pic;
+ idx = cascade->index;
+
+ while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
+ n = __ffs(events);
+
+ irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
+
+ soc_writel(1 << n, &pic->regs->evtclr[idx]);
+
+ generic_handle_irq(irq);
+ }
+}
+
+static int megamod_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct megamod_pic *pic = h->host_data;
+ int i;
+
+ /* We shouldn't see a hwirq which is muxed to core controller */
+ for (i = 0; i < NR_MUX_OUTPUTS; i++)
+ if (pic->output_to_irq[i] == hw)
+ return -1;
+
+ irq_set_chip_data(virq, pic);
+ irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
+
+ /* Set default irq type */
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int megamod_xlate(struct irq_host *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+
+{
+ /* megamod intspecs must have 1 cell */
+ BUG_ON(intsize != 1);
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static struct irq_host_ops megamod_host_ops = {
+ .map = megamod_map,
+ .xlate = megamod_xlate,
+};
+
+static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
+{
+ int index, offset;
+ u32 val;
+
+ if (src < 0 || src >= (NR_COMBINERS * 32)) {
+ pic->output_to_irq[output] = IRQ_UNMAPPED;
+ return;
+ }
+
+ /* four mappings per mux register */
+ index = output / 4;
+ offset = (output & 3) * 8;
+
+ val = soc_readl(&pic->regs->intmux[index]);
+ val &= ~(0xff << offset);
+ val |= src << offset;
+ soc_writel(val, &pic->regs->intmux[index]);
+}
+
+/*
+ * Parse the MUX mapping, if one exists.
+ *
+ * The MUX map is an array of up to 12 cells; one for each usable core priority
+ * interrupt. The value of a given cell is the megamodule interrupt source
+ * which is to me MUXed to the output corresponding to the cell position
+ * withing the array. The first cell in the array corresponds to priority
+ * 4 and the last (12th) cell corresponds to priority 15. The allowed
+ * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
+ * sources (0 - 3) are not allowed to be mapped through this property. They
+ * are handled through the "interrupts" property. This allows us to use a
+ * value of zero as a "do not map" placeholder.
+ */
+static void __init parse_priority_map(struct megamod_pic *pic,
+ int *mapping, int size)
+{
+ struct device_node *np = pic->irqhost->of_node;
+ const __be32 *map;
+ int i, maplen;
+ u32 val;
+
+ map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
+ if (map) {
+ maplen /= 4;
+ if (maplen > size)
+ maplen = size;
+
+ for (i = 0; i < maplen; i++) {
+ val = be32_to_cpup(map);
+ if (val && val >= 4)
+ mapping[i] = val;
+ ++map;
+ }
+ }
+}
+
+static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
+{
+ struct megamod_pic *pic;
+ int i, irq;
+ int mapping[NR_MUX_OUTPUTS];
+
+ pr_info("Initializing C64x+ Megamodule PIC\n");
+
+ pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
+ if (!pic) {
+ pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
+ return NULL;
+ }
+
+ pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
+ NR_COMBINERS * 32, &megamod_host_ops,
+ IRQ_UNMAPPED);
+ if (!pic->irqhost) {
+ pr_err("%s: Could not alloc host.\n", np->full_name);
+ goto error_free;
+ }
+
+ pic->irqhost->host_data = pic;
+
+ raw_spin_lock_init(&pic->lock);
+
+ pic->regs = of_iomap(np, 0);
+ if (!pic->regs) {
+ pr_err("%s: Could not map registers.\n", np->full_name);
+ goto error_free;
+ }
+
+ /* Initialize MUX map */
+ for (i = 0; i < ARRAY_SIZE(mapping); i++)
+ mapping[i] = IRQ_UNMAPPED;
+
+ parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
+
+ /*
+ * We can have up to 12 interrupts cascading to the core controller.
+ * These cascades can be from the combined interrupt sources or for
+ * individual interrupt sources. The "interrupts" property only
+ * deals with the cascaded combined interrupts. The individual
+ * interrupts muxed to the core controller use the core controller
+ * as their interrupt parent.
+ */
+ for (i = 0; i < NR_COMBINERS; i++) {
+
+ irq = irq_of_parse_and_map(np, i);
+ if (irq == NO_IRQ)
+ continue;
+
+ /*
+ * We count on the core priority interrupts (4 - 15) being
+ * direct mapped. Check that device tree provided something
+ * in that range.
+ */
+ if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
+ pr_err("%s: combiner-%d virq %d out of range!\n",
+ np->full_name, i, irq);
+ continue;
+ }
+
+ /* record the mapping */
+ mapping[irq - 4] = i;
+
+ pr_debug("%s: combiner-%d cascading to virq %d\n",
+ np->full_name, i, irq);
+
+ cascade_data[i].pic = pic;
+ cascade_data[i].index = i;
+
+ /* mask and clear all events in combiner */
+ soc_writel(~0, &pic->regs->evtmask[i]);
+ soc_writel(~0, &pic->regs->evtclr[i]);
+
+ irq_set_handler_data(irq, &cascade_data[i]);
+ irq_set_chained_handler(irq, megamod_irq_cascade);
+ }
+
+ /* Finally, set up the MUX registers */
+ for (i = 0; i < NR_MUX_OUTPUTS; i++) {
+ if (mapping[i] != IRQ_UNMAPPED) {
+ pr_debug("%s: setting mux %d to priority %d\n",
+ np->full_name, mapping[i], i + 4);
+ set_megamod_mux(pic, mapping[i], i);
+ }
+ }
+
+ return pic;
+
+error_free:
+ kfree(pic);
+
+ return NULL;
+}
+
+/*
+ * Return next active event after ACK'ing it.
+ * Return -1 if no events active.
+ */
+static int get_exception(void)
+{
+ int i, bit;
+ u32 mask;
+
+ for (i = 0; i < NR_COMBINERS; i++) {
+ mask = soc_readl(&mm_pic->regs->mexpflag[i]);
+ if (mask) {
+ bit = __ffs(mask);
+ soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
+ return (i * 32) + bit;
+ }
+ }
+ return -1;
+}
+
+static void assert_event(unsigned int val)
+{
+ soc_writel(val, &mm_pic->regs->evtasrt);
+}
+
+void __init megamod_pic_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
+ if (!np)
+ return;
+
+ mm_pic = init_megamod_pic(np);
+ of_node_put(np);
+
+ soc_ops.get_exception = get_exception;
+ soc_ops.assert_event = assert_event;
+
+ return;
+}
diff --git a/arch/c6x/platforms/platform.c b/arch/c6x/platforms/platform.c
new file mode 100644
index 00000000000..26c1a355d60
--- /dev/null
+++ b/arch/c6x/platforms/platform.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2011 Texas Instruments Incorporated
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+
+static int __init c6x_device_probe(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ return 0;
+}
+core_initcall(c6x_device_probe);
diff --git a/arch/c6x/platforms/pll.c b/arch/c6x/platforms/pll.c
new file mode 100644
index 00000000000..3aa898f7ce4
--- /dev/null
+++ b/arch/c6x/platforms/pll.c
@@ -0,0 +1,444 @@
+/*
+ * Clock and PLL control for C64x+ devices
+ *
+ * Copyright (C) 2010, 2011 Texas Instruments.
+ * Contributed by: Mark Salter <msalter@redhat.com>
+ *
+ * Copied heavily from arm/mach-davinci/clock.c, so:
+ *
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <asm/clock.h>
+#include <asm/soc.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+static DEFINE_SPINLOCK(clockfw_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+ if (clk->parent)
+ __clk_enable(clk->parent);
+ clk->usecount++;
+}
+
+static void __clk_disable(struct clk *clk)
+{
+ if (WARN_ON(clk->usecount == 0))
+ return;
+ --clk->usecount;
+
+ if (clk->parent)
+ __clk_disable(clk->parent);
+}
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ __clk_enable(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ if (clk->round_rate)
+ return clk->round_rate(clk, rate);
+
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+/* Propagate rate to children */
+static void propagate_rate(struct clk *root)
+{
+ struct clk *clk;
+
+ list_for_each_entry(clk, &root->children, childnode) {
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ }
+}
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (clk == NULL || IS_ERR(clk))
+ return ret;
+
+ if (clk->set_rate)
+ ret = clk->set_rate(clk, rate);
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (ret == 0) {
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ }
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ unsigned long flags;
+
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ /* Cannot change parent on enabled clock */
+ if (WARN_ON(clk->usecount))
+ return -EINVAL;
+
+ mutex_lock(&clocks_mutex);
+ clk->parent = parent;
+ list_del_init(&clk->childnode);
+ list_add(&clk->childnode, &clk->parent->children);
+ mutex_unlock(&clocks_mutex);
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+ propagate_rate(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+int clk_register(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ if (WARN(clk->parent && !clk->parent->rate,
+ "CLK: %s parent %s has no rate!\n",
+ clk->name, clk->parent->name))
+ return -EINVAL;
+
+ mutex_lock(&clocks_mutex);
+ list_add_tail(&clk->node, &clocks);
+ if (clk->parent)
+ list_add_tail(&clk->childnode, &clk->parent->children);
+ mutex_unlock(&clocks_mutex);
+
+ /* If rate is already set, use it */
+ if (clk->rate)
+ return 0;
+
+ /* Else, see if there is a way to calculate it */
+ if (clk->recalc)
+ clk->rate = clk->recalc(clk);
+
+ /* Otherwise, default to parent rate */
+ else if (clk->parent)
+ clk->rate = clk->parent->rate;
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return;
+
+ mutex_lock(&clocks_mutex);
+ list_del(&clk->node);
+ list_del(&clk->childnode);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+
+static u32 pll_read(struct pll_data *pll, int reg)
+{
+ return soc_readl(pll->base + reg);
+}
+
+static unsigned long clk_sysclk_recalc(struct clk *clk)
+{
+ u32 v, plldiv = 0;
+ struct pll_data *pll;
+ unsigned long rate = clk->rate;
+
+ if (WARN_ON(!clk->parent))
+ return rate;
+
+ rate = clk->parent->rate;
+
+ /* the parent must be a PLL */
+ if (WARN_ON(!clk->parent->pll_data))
+ return rate;
+
+ pll = clk->parent->pll_data;
+
+ /* If pre-PLL, source clock is before the multiplier and divider(s) */
+ if (clk->flags & PRE_PLL)
+ rate = pll->input_rate;
+
+ if (!clk->div) {
+ pr_debug("%s: (no divider) rate = %lu KHz\n",
+ clk->name, rate / 1000);
+ return rate;
+ }
+
+ if (clk->flags & FIXED_DIV_PLL) {
+ rate /= clk->div;
+ pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n",
+ clk->name, clk->div, rate / 1000);
+ return rate;
+ }
+
+ v = pll_read(pll, clk->div);
+ if (v & PLLDIV_EN)
+ plldiv = (v & PLLDIV_RATIO_MASK) + 1;
+
+ if (plldiv == 0)
+ plldiv = 1;
+
+ rate /= plldiv;
+
+ pr_debug("%s: (divide by %d) rate = %lu KHz\n",
+ clk->name, plldiv, rate / 1000);
+
+ return rate;
+}
+
+static unsigned long clk_leafclk_recalc(struct clk *clk)
+{
+ if (WARN_ON(!clk->parent))
+ return clk->rate;
+
+ pr_debug("%s: (parent %s) rate = %lu KHz\n",
+ clk->name, clk->parent->name, clk->parent->rate / 1000);
+
+ return clk->parent->rate;
+}
+
+static unsigned long clk_pllclk_recalc(struct clk *clk)
+{
+ u32 ctrl, mult = 0, prediv = 0, postdiv = 0;
+ u8 bypass;
+ struct pll_data *pll = clk->pll_data;
+ unsigned long rate = clk->rate;
+
+ if (clk->flags & FIXED_RATE_PLL)
+ return rate;
+
+ ctrl = pll_read(pll, PLLCTL);
+ rate = pll->input_rate = clk->parent->rate;
+
+ if (ctrl & PLLCTL_PLLEN)
+ bypass = 0;
+ else
+ bypass = 1;
+
+ if (pll->flags & PLL_HAS_MUL) {
+ mult = pll_read(pll, PLLM);
+ mult = (mult & PLLM_PLLM_MASK) + 1;
+ }
+ if (pll->flags & PLL_HAS_PRE) {
+ prediv = pll_read(pll, PLLPRE);
+ if (prediv & PLLDIV_EN)
+ prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
+ else
+ prediv = 0;
+ }
+ if (pll->flags & PLL_HAS_POST) {
+ postdiv = pll_read(pll, PLLPOST);
+ if (postdiv & PLLDIV_EN)
+ postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
+ else
+ postdiv = 1;
+ }
+
+ if (!bypass) {
+ if (prediv)
+ rate /= prediv;
+ if (mult)
+ rate *= mult;
+ if (postdiv)
+ rate /= postdiv;
+
+ pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] "
+ "--> %luMHz output.\n",
+ pll->num, clk->parent->rate / 1000000,
+ prediv, mult, postdiv, rate / 1000000);
+ } else
+ pr_debug("PLL%d: input = %luMHz, bypass mode.\n",
+ pll->num, clk->parent->rate / 1000000);
+
+ return rate;
+}
+
+
+static void __init __init_clk(struct clk *clk)
+{
+ INIT_LIST_HEAD(&clk->node);
+ INIT_LIST_HEAD(&clk->children);
+ INIT_LIST_HEAD(&clk->childnode);
+
+ if (!clk->recalc) {
+
+ /* Check if clock is a PLL */
+ if (clk->pll_data)
+ clk->recalc = clk_pllclk_recalc;
+
+ /* Else, if it is a PLL-derived clock */
+ else if (clk->flags & CLK_PLL)
+ clk->recalc = clk_sysclk_recalc;
+
+ /* Otherwise, it is a leaf clock (PSC clock) */
+ else if (clk->parent)
+ clk->recalc = clk_leafclk_recalc;
+ }
+}
+
+void __init c6x_clks_init(struct clk_lookup *clocks)
+{
+ struct clk_lookup *c;
+ struct clk *clk;
+ size_t num_clocks = 0;
+
+ for (c = clocks; c->clk; c++) {
+ clk = c->clk;
+
+ __init_clk(clk);
+ clk_register(clk);
+ num_clocks++;
+
+ /* Turn on clocks that Linux doesn't otherwise manage */
+ if (clk->flags & ALWAYS_ENABLED)
+ clk_enable(clk);
+ }
+
+ clkdev_add_table(clocks, num_clocks);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CLKNAME_MAX 10 /* longest clock name */
+#define NEST_DELTA 2
+#define NEST_MAX 4
+
+static void
+dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
+{
+ char *state;
+ char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
+ struct clk *clk;
+ unsigned i;
+
+ if (parent->flags & CLK_PLL)
+ state = "pll";
+ else
+ state = "";
+
+ /* <nest spaces> name <pad to end> */
+ memset(buf, ' ', sizeof(buf) - 1);
+ buf[sizeof(buf) - 1] = 0;
+ i = strlen(parent->name);
+ memcpy(buf + nest, parent->name,
+ min(i, (unsigned)(sizeof(buf) - 1 - nest)));
+
+ seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
+ buf, parent->usecount, state, clk_get_rate(parent));
+ /* REVISIT show device associations too */
+
+ /* cost is now small, but not linear... */
+ list_for_each_entry(clk, &parent->children, childnode) {
+ dump_clock(s, nest + NEST_DELTA, clk);
+ }
+}
+
+static int c6x_ck_show(struct seq_file *m, void *v)
+{
+ struct clk *clk;
+
+ /*
+ * Show clock tree; We trust nonzero usecounts equate to PSC enables...
+ */
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(clk, &clocks, node)
+ if (!clk->parent)
+ dump_clock(m, 0, clk);
+ mutex_unlock(&clocks_mutex);
+
+ return 0;
+}
+
+static int c6x_ck_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, c6x_ck_show, NULL);
+}
+
+static const struct file_operations c6x_ck_operations = {
+ .open = c6x_ck_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init c6x_clk_debugfs_init(void)
+{
+ debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL,
+ &c6x_ck_operations);
+
+ return 0;
+}
+device_initcall(c6x_clk_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
new file mode 100644
index 00000000000..2cfd6f42968
--- /dev/null
+++ b/arch/c6x/platforms/plldata.c
@@ -0,0 +1,404 @@
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/clock.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+/*
+ * Common SoC clock support.
+ */
+
+/* Default input for PLL1 */
+struct clk clkin1 = {
+ .name = "clkin1",
+ .node = LIST_HEAD_INIT(clkin1.node),
+ .children = LIST_HEAD_INIT(clkin1.children),
+ .childnode = LIST_HEAD_INIT(clkin1.childnode),
+};
+
+struct pll_data c6x_soc_pll1 = {
+ .num = 1,
+ .sysclks = {
+ {
+ .name = "pll1",
+ .parent = &clkin1,
+ .pll_data = &c6x_soc_pll1,
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk1",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk2",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk3",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk4",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk5",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk6",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk7",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk8",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk9",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk10",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk11",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk12",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk13",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk14",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk15",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ {
+ .name = "pll1_sysclk16",
+ .parent = &c6x_soc_pll1.sysclks[0],
+ .flags = CLK_PLL,
+ },
+ },
+};
+
+/* CPU core clock */
+struct clk c6x_core_clk = {
+ .name = "core",
+};
+
+/* miscellaneous IO clocks */
+struct clk c6x_i2c_clk = {
+ .name = "i2c",
+};
+
+struct clk c6x_watchdog_clk = {
+ .name = "watchdog",
+};
+
+struct clk c6x_mcbsp1_clk = {
+ .name = "mcbsp1",
+};
+
+struct clk c6x_mcbsp2_clk = {
+ .name = "mcbsp2",
+};
+
+struct clk c6x_mdio_clk = {
+ .name = "mdio",
+};
+
+
+#ifdef CONFIG_SOC_TMS320C6455
+static struct clk_lookup c6455_clks[] = {
+ CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+ CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+ CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+ CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+ CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+ CLK(NULL, "core", &c6x_core_clk),
+ CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+ CLK("watchdog", NULL, &c6x_watchdog_clk),
+ CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+ CLK("", NULL, NULL)
+};
+
+
+static void __init c6455_setup_clocks(struct device_node *node)
+{
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct clk *sysclks = pll->sysclks;
+
+ pll->flags = PLL_HAS_PRE | PLL_HAS_MUL;
+
+ sysclks[2].flags |= FIXED_DIV_PLL;
+ sysclks[2].div = 3;
+ sysclks[3].flags |= FIXED_DIV_PLL;
+ sysclks[3].div = 6;
+ sysclks[4].div = PLLDIV4;
+ sysclks[5].div = PLLDIV5;
+
+ c6x_core_clk.parent = &sysclks[0];
+ c6x_i2c_clk.parent = &sysclks[3];
+ c6x_watchdog_clk.parent = &sysclks[3];
+ c6x_mdio_clk.parent = &sysclks[3];
+
+ c6x_clks_init(c6455_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6455 */
+
+#ifdef CONFIG_SOC_TMS320C6457
+static struct clk_lookup c6457_clks[] = {
+ CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+ CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
+ CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+ CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+ CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+ CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+ CLK(NULL, "core", &c6x_core_clk),
+ CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+ CLK("watchdog", NULL, &c6x_watchdog_clk),
+ CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+ CLK("", NULL, NULL)
+};
+
+static void __init c6457_setup_clocks(struct device_node *node)
+{
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct clk *sysclks = pll->sysclks;
+
+ pll->flags = PLL_HAS_MUL | PLL_HAS_POST;
+
+ sysclks[1].flags |= FIXED_DIV_PLL;
+ sysclks[1].div = 1;
+ sysclks[2].flags |= FIXED_DIV_PLL;
+ sysclks[2].div = 3;
+ sysclks[3].flags |= FIXED_DIV_PLL;
+ sysclks[3].div = 6;
+ sysclks[4].div = PLLDIV4;
+ sysclks[5].div = PLLDIV5;
+
+ c6x_core_clk.parent = &sysclks[1];
+ c6x_i2c_clk.parent = &sysclks[3];
+ c6x_watchdog_clk.parent = &sysclks[5];
+ c6x_mdio_clk.parent = &sysclks[5];
+
+ c6x_clks_init(c6457_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6455 */
+
+#ifdef CONFIG_SOC_TMS320C6472
+static struct clk_lookup c6472_clks[] = {
+ CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+ CLK(NULL, "pll1_sysclk1", &c6x_soc_pll1.sysclks[1]),
+ CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+ CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+ CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+ CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+ CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
+ CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
+ CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
+ CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
+ CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
+ CLK(NULL, "core", &c6x_core_clk),
+ CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+ CLK("watchdog", NULL, &c6x_watchdog_clk),
+ CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+ CLK("", NULL, NULL)
+};
+
+/* assumptions used for delay loop calculations */
+#define MIN_CLKIN1_KHz 15625
+#define MAX_CORE_KHz 700000
+#define MIN_PLLOUT_KHz MIN_CLKIN1_KHz
+
+static void __init c6472_setup_clocks(struct device_node *node)
+{
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct clk *sysclks = pll->sysclks;
+ int i;
+
+ pll->flags = PLL_HAS_MUL;
+
+ for (i = 1; i <= 6; i++) {
+ sysclks[i].flags |= FIXED_DIV_PLL;
+ sysclks[i].div = 1;
+ }
+
+ sysclks[7].flags |= FIXED_DIV_PLL;
+ sysclks[7].div = 3;
+ sysclks[8].flags |= FIXED_DIV_PLL;
+ sysclks[8].div = 6;
+ sysclks[9].flags |= FIXED_DIV_PLL;
+ sysclks[9].div = 2;
+ sysclks[10].div = PLLDIV10;
+
+ c6x_core_clk.parent = &sysclks[get_coreid() + 1];
+ c6x_i2c_clk.parent = &sysclks[8];
+ c6x_watchdog_clk.parent = &sysclks[8];
+ c6x_mdio_clk.parent = &sysclks[5];
+
+ c6x_clks_init(c6472_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6472 */
+
+
+#ifdef CONFIG_SOC_TMS320C6474
+static struct clk_lookup c6474_clks[] = {
+ CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+ CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
+ CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
+ CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
+ CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
+ CLK(NULL, "pll1_sysclk12", &c6x_soc_pll1.sysclks[12]),
+ CLK(NULL, "pll1_sysclk13", &c6x_soc_pll1.sysclks[13]),
+ CLK(NULL, "core", &c6x_core_clk),
+ CLK("i2c_davinci.1", NULL, &c6x_i2c_clk),
+ CLK("mcbsp.1", NULL, &c6x_mcbsp1_clk),
+ CLK("mcbsp.2", NULL, &c6x_mcbsp2_clk),
+ CLK("watchdog", NULL, &c6x_watchdog_clk),
+ CLK("2c81800.mdio", NULL, &c6x_mdio_clk),
+ CLK("", NULL, NULL)
+};
+
+static void __init c6474_setup_clocks(struct device_node *node)
+{
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct clk *sysclks = pll->sysclks;
+
+ pll->flags = PLL_HAS_MUL;
+
+ sysclks[7].flags |= FIXED_DIV_PLL;
+ sysclks[7].div = 1;
+ sysclks[9].flags |= FIXED_DIV_PLL;
+ sysclks[9].div = 3;
+ sysclks[10].flags |= FIXED_DIV_PLL;
+ sysclks[10].div = 6;
+
+ sysclks[11].div = PLLDIV11;
+
+ sysclks[12].flags |= FIXED_DIV_PLL;
+ sysclks[12].div = 2;
+
+ sysclks[13].div = PLLDIV13;
+
+ c6x_core_clk.parent = &sysclks[7];
+ c6x_i2c_clk.parent = &sysclks[10];
+ c6x_watchdog_clk.parent = &sysclks[10];
+ c6x_mcbsp1_clk.parent = &sysclks[10];
+ c6x_mcbsp2_clk.parent = &sysclks[10];
+
+ c6x_clks_init(c6474_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6474 */
+
+static struct of_device_id c6x_clkc_match[] __initdata = {
+#ifdef CONFIG_SOC_TMS320C6455
+ { .compatible = "ti,c6455-pll", .data = c6455_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6457
+ { .compatible = "ti,c6457-pll", .data = c6457_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6472
+ { .compatible = "ti,c6472-pll", .data = c6472_setup_clocks },
+#endif
+#ifdef CONFIG_SOC_TMS320C6474
+ { .compatible = "ti,c6474-pll", .data = c6474_setup_clocks },
+#endif
+ { .compatible = "ti,c64x+pll" },
+ {}
+};
+
+void __init c64x_setup_clocks(void)
+{
+ void (*__setup_clocks)(struct device_node *np);
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct device_node *node;
+ const struct of_device_id *id;
+ int err;
+ u32 val;
+
+ node = of_find_matching_node(NULL, c6x_clkc_match);
+ if (!node)
+ return;
+
+ pll->base = of_iomap(node, 0);
+ if (!pll->base)
+ goto out;
+
+ err = of_property_read_u32(node, "clock-frequency", &val);
+ if (err || val == 0) {
+ pr_err("%s: no clock-frequency found! Using %dMHz\n",
+ node->full_name, (int)val / 1000000);
+ val = 25000000;
+ }
+ clkin1.rate = val;
+
+ err = of_property_read_u32(node, "ti,c64x+pll-bypass-delay", &val);
+ if (err)
+ val = 5000;
+ pll->bypass_delay = val;
+
+ err = of_property_read_u32(node, "ti,c64x+pll-reset-delay", &val);
+ if (err)
+ val = 30000;
+ pll->reset_delay = val;
+
+ err = of_property_read_u32(node, "ti,c64x+pll-lock-delay", &val);
+ if (err)
+ val = 30000;
+ pll->lock_delay = val;
+
+ /* id->data is a pointer to SoC-specific setup */
+ id = of_match_node(c6x_clkc_match, node);
+ if (id && id->data) {
+ __setup_clocks = id->data;
+ __setup_clocks(node);
+ }
+
+out:
+ of_node_put(node);
+}
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
new file mode 100644
index 00000000000..03c03c24919
--- /dev/null
+++ b/arch/c6x/platforms/timer64.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2010, 2011 Texas Instruments Incorporated
+ * Contributed by: Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/soc.h>
+#include <asm/dscr.h>
+#include <asm/timer64.h>
+
+struct timer_regs {
+ u32 reserved0;
+ u32 emumgt;
+ u32 reserved1;
+ u32 reserved2;
+ u32 cntlo;
+ u32 cnthi;
+ u32 prdlo;
+ u32 prdhi;
+ u32 tcr;
+ u32 tgcr;
+ u32 wdtcr;
+};
+
+static struct timer_regs __iomem *timer;
+
+#define TCR_TSTATLO 0x001
+#define TCR_INVOUTPLO 0x002
+#define TCR_INVINPLO 0x004
+#define TCR_CPLO 0x008
+#define TCR_ENAMODELO_ONCE 0x040
+#define TCR_ENAMODELO_CONT 0x080
+#define TCR_ENAMODELO_MASK 0x0c0
+#define TCR_PWIDLO_MASK 0x030
+#define TCR_CLKSRCLO 0x100
+#define TCR_TIENLO 0x200
+#define TCR_TSTATHI (0x001 << 16)
+#define TCR_INVOUTPHI (0x002 << 16)
+#define TCR_CPHI (0x008 << 16)
+#define TCR_PWIDHI_MASK (0x030 << 16)
+#define TCR_ENAMODEHI_ONCE (0x040 << 16)
+#define TCR_ENAMODEHI_CONT (0x080 << 16)
+#define TCR_ENAMODEHI_MASK (0x0c0 << 16)
+
+#define TGCR_TIMLORS 0x001
+#define TGCR_TIMHIRS 0x002
+#define TGCR_TIMMODE_UD32 0x004
+#define TGCR_TIMMODE_WDT64 0x008
+#define TGCR_TIMMODE_CD32 0x00c
+#define TGCR_TIMMODE_MASK 0x00c
+#define TGCR_PSCHI_MASK (0x00f << 8)
+#define TGCR_TDDRHI_MASK (0x00f << 12)
+
+/*
+ * Timer clocks are divided down from the CPU clock
+ * The divisor is in the EMUMGTCLKSPD register
+ */
+#define TIMER_DIVISOR \
+ ((soc_readl(&timer->emumgt) & (0xf << 16)) >> 16)
+
+#define TIMER64_RATE (c6x_core_freq / TIMER_DIVISOR)
+
+#define TIMER64_MODE_DISABLED 0
+#define TIMER64_MODE_ONE_SHOT TCR_ENAMODELO_ONCE
+#define TIMER64_MODE_PERIODIC TCR_ENAMODELO_CONT
+
+static int timer64_mode;
+static int timer64_devstate_id = -1;
+
+static void timer64_config(unsigned long period)
+{
+ u32 tcr = soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK;
+
+ soc_writel(tcr, &timer->tcr);
+ soc_writel(period - 1, &timer->prdlo);
+ soc_writel(0, &timer->cntlo);
+ tcr |= timer64_mode;
+ soc_writel(tcr, &timer->tcr);
+}
+
+static void timer64_enable(void)
+{
+ u32 val;
+
+ if (timer64_devstate_id >= 0)
+ dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
+
+ /* disable timer, reset count */
+ soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
+ soc_writel(0, &timer->prdlo);
+
+ /* use internal clock and 1 cycle pulse width */
+ val = soc_readl(&timer->tcr);
+ soc_writel(val & ~(TCR_CLKSRCLO | TCR_PWIDLO_MASK), &timer->tcr);
+
+ /* dual 32-bit unchained mode */
+ val = soc_readl(&timer->tgcr) & ~TGCR_TIMMODE_MASK;
+ soc_writel(val, &timer->tgcr);
+ soc_writel(val | (TGCR_TIMLORS | TGCR_TIMMODE_UD32), &timer->tgcr);
+}
+
+static void timer64_disable(void)
+{
+ /* disable timer, reset count */
+ soc_writel(soc_readl(&timer->tcr) & ~TCR_ENAMODELO_MASK, &timer->tcr);
+ soc_writel(0, &timer->prdlo);
+
+ if (timer64_devstate_id >= 0)
+ dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_DISABLED);
+}
+
+static int next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ timer64_config(delta);
+ return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ timer64_enable();
+ timer64_mode = TIMER64_MODE_PERIODIC;
+ timer64_config(TIMER64_RATE / HZ);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ timer64_enable();
+ timer64_mode = TIMER64_MODE_ONE_SHOT;
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ timer64_mode = TIMER64_MODE_DISABLED;
+ timer64_disable();
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device t64_clockevent_device = {
+ .name = "TIMER64_EVT32_TIMER",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 200,
+ .set_mode = set_clock_mode,
+ .set_next_event = next_event,
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &t64_clockevent_device;
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction timer_iact = {
+ .name = "timer",
+ .flags = IRQF_TIMER,
+ .handler = timer_interrupt,
+ .dev_id = &t64_clockevent_device,
+};
+
+void __init timer64_init(void)
+{
+ struct clock_event_device *cd = &t64_clockevent_device;
+ struct device_node *np, *first = NULL;
+ u32 val;
+ int err, found = 0;
+
+ for_each_compatible_node(np, NULL, "ti,c64x+timer64") {
+ err = of_property_read_u32(np, "ti,core-mask", &val);
+ if (!err) {
+ if (val & (1 << get_coreid())) {
+ found = 1;
+ break;
+ }
+ } else if (!first)
+ first = np;
+ }
+ if (!found) {
+ /* try first one with no core-mask */
+ if (first)
+ np = of_node_get(first);
+ else {
+ pr_debug("Cannot find ti,c64x+timer64 timer.\n");
+ return;
+ }
+ }
+
+ timer = of_iomap(np, 0);
+ if (!timer) {
+ pr_debug("%s: Cannot map timer registers.\n", np->full_name);
+ goto out;
+ }
+ pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
+
+ cd->irq = irq_of_parse_and_map(np, 0);
+ if (cd->irq == NO_IRQ) {
+ pr_debug("%s: Cannot find interrupt.\n", np->full_name);
+ iounmap(timer);
+ goto out;
+ }
+
+ /* If there is a device state control, save the ID. */
+ err = of_property_read_u32(np, "ti,dscr-dev-enable", &val);
+ if (!err) {
+ timer64_devstate_id = val;
+
+ /*
+ * It is necessary to enable the timer block here because
+ * the TIMER_DIVISOR macro needs to read a timer register
+ * to get the divisor.
+ */
+ dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
+ }
+
+ pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
+
+ clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
+
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(250, cd);
+
+ cd->cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_register_device(cd);
+ setup_irq(cd->irq, &timer_iact);
+
+out:
+ of_node_put(np);
+ return;
+}
diff --git a/arch/cris/Kconfig.debug b/arch/cris/Kconfig.debug
index 0b9a630dc81..14881e81e8a 100644
--- a/arch/cris/Kconfig.debug
+++ b/arch/cris/Kconfig.debug
@@ -1,6 +1,5 @@
menu "Kernel hacking"
-#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
config PROFILING
bool "Kernel profiling support"
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index a2bde374462..b34438e026b 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -404,8 +404,7 @@ static int __init init_axis_flash(void)
*/
int blockstat;
do {
- blockstat = main_mtd->block_isbad(main_mtd,
- ptable_sector);
+ blockstat = mtd_block_isbad(main_mtd, ptable_sector);
if (blockstat < 0)
ptable_sector = 0; /* read error */
else if (blockstat)
@@ -413,8 +412,8 @@ static int __init init_axis_flash(void)
} while (blockstat && ptable_sector);
#endif
if (ptable_sector) {
- main_mtd->read(main_mtd, ptable_sector, PAGESIZE,
- &len, page);
+ mtd_read(main_mtd, ptable_sector, PAGESIZE, &len,
+ page);
ptable_head = &((struct partitiontable *) page)->head;
}
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index bb978ede898..6773fc83a67 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -47,14 +47,12 @@ static struct clocksource cont_rotime = {
.rating = 300,
.read = read_cont_rotime,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init etrax_init_cont_rotime(void)
{
- cont_rotime.mult = clocksource_khz2mult(100000, cont_rotime.shift);
- clocksource_register(&cont_rotime);
+ clocksource_register_khz(&cont_rotime, 100000);
return 0;
}
arch_initcall(etrax_init_cont_rotime);
diff --git a/arch/cris/include/asm/ipcbuf.h b/arch/cris/include/asm/ipcbuf.h
index 8b0c18b0284..84c7e51cb6d 100644
--- a/arch/cris/include/asm/ipcbuf.h
+++ b/arch/cris/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __CRIS_IPCBUF_H__
-#define __CRIS_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for CRIS architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __CRIS_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h
index 1a4a61909ca..e269264df7c 100644
--- a/arch/cris/include/asm/socket.h
+++ b/arch/cris/include/asm/socket.h
@@ -64,6 +64,9 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 332f19c5455..29b92884d79 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -86,7 +86,6 @@ struct thread_info {
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
index 551a12c0aa0..adaf82780bb 100644
--- a/arch/cris/include/asm/types.h
+++ b/arch/cris/include/asm/types.h
@@ -3,12 +3,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index dfde78b0932..bbbf7927f23 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -342,16 +342,6 @@ source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
-#config MATH_EMULATION
-# bool "Math emulation support (EXPERIMENTAL)"
-# depends on EXPERIMENTAL
-# help
-# At some point in the future, this will cause floating-point math
-# instructions to be emulated by the kernel on machines that lack a
-# floating-point math coprocessor. Thrill-seekers and chronically
-# sleep-deprived psychotic hacker types can say Y now, everyone else
-# should probably wait a while.
-
menu "Power management options"
config ARCH_SUSPEND_POSSIBLE
diff --git a/arch/frv/include/asm/ipcbuf.h b/arch/frv/include/asm/ipcbuf.h
index b546f67e455..84c7e51cb6d 100644
--- a/arch/frv/include/asm/ipcbuf.h
+++ b/arch/frv/include/asm/ipcbuf.h
@@ -1,30 +1 @@
-#ifndef __ASM_IPCBUF_H__
-#define __ASM_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for FR-V architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __ASM_IPCBUF_H__ */
-
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h
index a6b26880c1e..ce80fdadcce 100644
--- a/arch/frv/include/asm/socket.h
+++ b/arch/frv/include/asm/socket.h
@@ -62,5 +62,8 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index cefbe73dc11..92d83ea99ae 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/frv/include/asm/types.h b/arch/frv/include/asm/types.h
index aa3e7fdc7f2..390a612f3a5 100644
--- a/arch/frv/include/asm/types.h
+++ b/arch/frv/include/asm/types.h
@@ -14,12 +14,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/h8300/include/asm/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h
index 2cd1ebcc109..84c7e51cb6d 100644
--- a/arch/h8300/include/asm/ipcbuf.h
+++ b/arch/h8300/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __H8300_IPCBUF_H__
-#define __H8300_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for H8/300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __H8300_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h
index 04c0f4596eb..cf1daab6f27 100644
--- a/arch/h8300/include/asm/socket.h
+++ b/arch/h8300/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index d6f1784bfde..9c126e0c09a 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
-#define TIF_FREEZE 16 /* is freezing for suspend */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
index bb2c91a3522..07257d9487d 100644
--- a/arch/h8300/include/asm/types.h
+++ b/arch/h8300/include/asm/types.h
@@ -3,27 +3,10 @@
#include <asm-generic/int-ll64.h>
-#if !defined(__ASSEMBLY__)
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-typedef unsigned short umode_t;
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
#ifdef __KERNEL__
#define BITS_PER_LONG 32
#endif /* __KERNEL__ */
-#endif /* __ASSEMBLY__ */
-
#endif /* _H8300_TYPES_H */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2732e1b0aa3..bd7266903bf 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -23,6 +23,9 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
@@ -471,9 +474,6 @@ config NODES_SHIFT
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 6073b187528..3deac956d32 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -26,59 +26,53 @@
#include <linux/jiffies.h>
#include <asm/processor.h>
-typedef u64 cputime_t;
-typedef u64 cputime64_t;
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
-#define cputime_zero ((cputime_t)0)
#define cputime_one_jiffy jiffies_to_cputime(1)
-#define cputime_max ((~((cputime_t)0) >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-
-#define cputime64_zero ((cputime64_t)0)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime_to_cputime64(__ct) (__ct)
/*
* Convert cputime <-> jiffies (HZ)
*/
-#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime_to_jiffies(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif) \
+ (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif) \
+ (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
/*
* Convert cputime <-> microseconds
*/
-#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
+#define cputime_to_usecs(__ct) \
+ ((__force u64)(__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs) \
+ (__force cputime_t)((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs) \
+ (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
*/
-#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
-#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
+#define cputime_to_secs(__ct) \
+ ((__force u64)(__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs) \
+ (__force cputime_t)((__secs) * NSEC_PER_SEC)
/*
* Convert cputime <-> timespec (nsec)
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- cputime_t ret = val->tv_sec * NSEC_PER_SEC;
- return (ret + val->tv_nsec);
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{
- val->tv_sec = ct / NSEC_PER_SEC;
- val->tv_nsec = ct % NSEC_PER_SEC;
+ val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+ val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
}
/*
@@ -86,25 +80,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/
static inline cputime_t timeval_to_cputime(struct timeval *val)
{
- cputime_t ret = val->tv_sec * NSEC_PER_SEC;
- return (ret + val->tv_usec * NSEC_PER_USEC);
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{
- val->tv_sec = ct / NSEC_PER_SEC;
- val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+ val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+ val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
}
/*
* Convert cputime <-> clock (USER_HZ)
*/
-#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
+#define cputime_to_clock_t(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x) \
+ (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
/*
* Convert cputime64 to clock.
*/
-#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
+#define cputime64_to_clock_t(__ct) \
+ cputime_to_clock_t((__force cputime_t)__ct)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 105c93b00b1..b6a809fa299 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -11,10 +11,12 @@ extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_pass_through;
extern int iommu_detected;
+extern int iommu_group_mf;
#else
#define iommu_pass_through (0)
#define no_iommu (1)
#define iommu_detected (0)
+#define iommu_group_mf (0)
#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/include/asm/ipcbuf.h b/arch/ia64/include/asm/ipcbuf.h
index 079899ae7d3..84c7e51cb6d 100644
--- a/arch/ia64/include/asm/ipcbuf.h
+++ b/arch/ia64/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_IA64_IPCBUF_H
-#define _ASM_IA64_IPCBUF_H
-
-/*
- * The ipc64_perm structure for IA-64 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned short seq;
- unsigned short __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_IA64_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 51427eaa51b..4b03664e3fb 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -71,4 +71,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff0cc84e7bc..e054bcc4273 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -113,7 +113,6 @@ struct thread_info {
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
-#define TIF_FREEZE 20 /* is freezing for suspend */
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@ struct thread_info {
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
/* "work to do on user-return" bits */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index 82b3939d271..3f5b122d997 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -28,8 +28,6 @@
# define __IA64_UL(x) ((unsigned long)(x))
# define __IA64_UL_CONST(x) x##UL
-typedef unsigned int umode_t;
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index 1d2427d116e..fbb519828aa 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -71,7 +71,7 @@
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
__DEFINE_GUEST_HANDLE(uint, unsigned int);
__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-__DEFINE_GUEST_HANDLE(u64, unsigned long);
+
DEFINE_GUEST_HANDLE(char);
DEFINE_GUEST_HANDLE(int);
DEFINE_GUEST_HANDLE(long);
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index c539c689493..2d67317a1ec 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -24,7 +24,7 @@
* Copyright (C) 2006, Intel Corp. All rights reserved.
*
*/
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
@@ -35,10 +35,10 @@
#define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte;
#define define_one_ro(name) \
-static SYSDEV_ATTR(name, 0444, show_##name, NULL)
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
#define define_one_rw(name) \
-static SYSDEV_ATTR(name, 0644, show_##name, store_##name)
+static DEVICE_ATTR(name, 0644, show_##name, store_##name)
static u64 call_start[NR_CPUS];
static u64 phys_addr[NR_CPUS];
@@ -55,7 +55,7 @@ static u64 resources[NR_CPUS];
#define show(name) \
static ssize_t \
-show_##name(struct sys_device *dev, struct sysdev_attribute *attr, \
+show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 cpu=dev->id; \
@@ -64,7 +64,7 @@ show_##name(struct sys_device *dev, struct sysdev_attribute *attr, \
#define store(name) \
static ssize_t \
-store_##name(struct sys_device *dev, struct sysdev_attribute *attr, \
+store_##name(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t size) \
{ \
unsigned int cpu=dev->id; \
@@ -78,7 +78,7 @@ show(call_start)
* processor. The cpu number in driver is only used for storing data.
*/
static ssize_t
-store_call_start(struct sys_device *dev, struct sysdev_attribute *attr,
+store_call_start(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
@@ -127,7 +127,7 @@ show(err_type_info)
store(err_type_info)
static ssize_t
-show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr,
+show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned int cpu=dev->id;
@@ -135,7 +135,7 @@ show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr,
}
static ssize_t
-store_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr,
+store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
@@ -159,8 +159,8 @@ show(err_struct_info)
store(err_struct_info)
static ssize_t
-show_err_data_buffer(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+show_err_data_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned int cpu=dev->id;
@@ -171,8 +171,8 @@ show_err_data_buffer(struct sys_device *dev,
}
static ssize_t
-store_err_data_buffer(struct sys_device *dev,
- struct sysdev_attribute *attr,
+store_err_data_buffer(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned int cpu=dev->id;
@@ -209,14 +209,14 @@ define_one_ro(capabilities);
define_one_ro(resources);
static struct attribute *default_attrs[] = {
- &attr_call_start.attr,
- &attr_virtual_to_phys.attr,
- &attr_err_type_info.attr,
- &attr_err_struct_info.attr,
- &attr_err_data_buffer.attr,
- &attr_status.attr,
- &attr_capabilities.attr,
- &attr_resources.attr,
+ &dev_attr_call_start.attr,
+ &dev_attr_virtual_to_phys.attr,
+ &dev_attr_err_type_info.attr,
+ &dev_attr_err_struct_info.attr,
+ &dev_attr_err_data_buffer.attr,
+ &dev_attr_status.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_resources.attr,
NULL
};
@@ -225,12 +225,12 @@ static struct attribute_group err_inject_attr_group = {
.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
-static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev)
+static int __cpuinit err_inject_add_dev(struct device * sys_dev)
{
return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
}
-static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev)
+static int __cpuinit err_inject_remove_dev(struct device * sys_dev)
{
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
return 0;
@@ -239,9 +239,9 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *sys_dev;
- sys_dev = get_cpu_sysdev(cpu);
+ sys_dev = get_cpu_device(cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -283,13 +283,13 @@ static void __exit
err_inject_exit(void)
{
int i;
- struct sys_device *sys_dev;
+ struct device *sys_dev;
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Exit error injection driver.\n");
#endif
for_each_online_cpu(i) {
- sys_dev = get_cpu_sysdev(i);
+ sys_dev = get_cpu_device(i);
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
}
unregister_hotcpu_notifier(&err_inject_cpu_notifier);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index c16162c7086..eb117572005 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -33,6 +33,7 @@ int force_iommu __read_mostly;
#endif
int iommu_pass_through;
+int iommu_group_mf;
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 89accc626b8..b2c65e034f5 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2228,7 +2228,7 @@ pfm_alloc_file(pfm_context_t *ctx)
/*
* allocate a new dcache entry
*/
- path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
+ path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
if (!path.dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5e2c72498c5..cd57d7312de 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -220,6 +220,23 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
}
}
+/* merge overlaps */
+static int __init
+merge_regions (struct rsvd_region *rsvd_region, int max)
+{
+ int i;
+ for (i = 1; i < max; ++i) {
+ if (rsvd_region[i].start >= rsvd_region[i-1].end)
+ continue;
+ if (rsvd_region[i].end > rsvd_region[i-1].end)
+ rsvd_region[i-1].end = rsvd_region[i].end;
+ --max;
+ memmove(&rsvd_region[i], &rsvd_region[i+1],
+ (max - i) * sizeof(struct rsvd_region));
+ }
+ return max;
+}
+
/*
* Request address space for all standard resources
*/
@@ -270,6 +287,7 @@ static void __init setup_crashkernel(unsigned long total, int *n)
if (ret == 0 && size > 0) {
if (!base) {
sort_regions(rsvd_region, *n);
+ *n = merge_regions(rsvd_region, *n);
base = kdump_find_rsvd_region(size,
rsvd_region, *n);
}
@@ -373,6 +391,7 @@ reserve_memory (void)
BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
sort_regions(rsvd_region, num_rsvd_regions);
+ num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 9be1f11a01d..9deb21dbf62 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -350,7 +350,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
}
/* Add cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_add_dev(struct device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
@@ -400,7 +400,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
/* Remove cache interface for CPU device */
-static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_remove_dev(struct device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i;
@@ -428,9 +428,9 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *sys_dev;
- sys_dev = get_cpu_sysdev(cpu);
+ sys_dev = get_cpu_device(cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -454,7 +454,7 @@ static int __init cache_sysfs_init(void)
int i;
for_each_online_cpu(i) {
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
+ struct device *sys_dev = get_cpu_device((unsigned int)i);
cache_add_dev(sys_dev);
}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 43f4c92816e..40505200249 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
return kvm;
}
-struct kvm_io_range {
+struct kvm_ia64_io_range {
unsigned long start;
unsigned long size;
unsigned long type;
};
-static const struct kvm_io_range io_ranges[] = {
+static const struct kvm_ia64_io_range io_ranges[] = {
{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
@@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int i, j;
+ int j;
unsigned long base_gfn;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- memslot = &slots->memslots[i];
+ kvm_for_each_memslot(memslot, slots) {
base_gfn = memslot->base_gfn;
-
for (j = 0; j < memslot->npages; j++) {
if (memslot->rmap[j])
put_page((struct page *)memslot->rmap[j]);
@@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots->memslots[log->slot];
+ memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index f114a3b14c6..1516d1dc11f 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -16,6 +16,7 @@
*/
#include <linux/bootmem.h>
#include <linux/efi.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
@@ -348,7 +349,7 @@ paging_init (void)
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
- add_active_range(0, 0, max_low_pfn);
+ memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 00cb0e26c64..13df239dbed 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -557,8 +558,7 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
#endif
if (start < end)
- add_active_range(nid, __pa(start) >> PAGE_SHIFT,
- __pa(end) >> PAGE_SHIFT);
+ memblock_add_node(__pa(start), end - start, nid);
return 0;
}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 485c42d97e8..dfac09ab027 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -150,12 +150,11 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
* PROM does not support SAL_INTR_REDIRECT, or it failed.
* Revert to old method.
*/
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info),
+ GFP_ATOMIC);
if (new_irq_info == NULL)
return NULL;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
/* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info);
unregister_intr_pda(new_irq_info);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 5698f29d5ad..8886a0bc4a1 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -127,12 +127,11 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
+ soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
- memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
soft->pbi_buscommon.bs_base = (unsigned long)
ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
sizeof(struct pic));
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 642451e770e..e77c477245f 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -600,11 +600,11 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
+ tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
+ GFP_KERNEL);
if (!tioca_common)
return NULL;
- memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
tioca_common->ca_common.bs_base = (unsigned long)
ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
sizeof(struct tioca_common));
diff --git a/arch/m32r/include/asm/ipcbuf.h b/arch/m32r/include/asm/ipcbuf.h
index 8d2d7c8ffdb..84c7e51cb6d 100644
--- a/arch/m32r/include/asm/ipcbuf.h
+++ b/arch/m32r/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef _ASM_M32R_IPCBUF_H
-#define _ASM_M32R_IPCBUF_H
-
-/*
- * The ipc64_perm structure for m32r architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_M32R_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h
index 469787c3009..e8b8c5bb053 100644
--- a/arch/m32r/include/asm/socket.h
+++ b/arch/m32r/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 0227dba4406..bf8fa3c06f4 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/m32r/include/asm/types.h b/arch/m32r/include/asm/types.h
index bd0035597b3..bb2eeadecf9 100644
--- a/arch/m32r/include/asm/types.h
+++ b/arch/m32r/include/asm/types.h
@@ -3,12 +3,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 973e68614f2..99c363617f2 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -3,7 +3,6 @@ config M68K
default y
select HAVE_IDE
select HAVE_AOUT if MMU
- select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
@@ -38,12 +37,15 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
+config GENERIC_CSUM
+ bool
+
config TIME_LOW_RES
bool
default y
config ARCH_USES_GETTIMEOFFSET
- def_bool MMU
+ def_bool MMU && !COLDFIRE
config NO_IOPORT
def_bool y
@@ -58,6 +60,12 @@ config ZONE_DMA
config CPU_HAS_NO_BITFIELDS
bool
+config CPU_HAS_NO_MULDIV64
+ bool
+
+config CPU_HAS_ADDRESS_SPACES
+ bool
+
config HZ
int
default 1000 if CLEOPATRA
@@ -78,9 +86,12 @@ config MMU
config MMU_MOTOROLA
bool
+config MMU_COLDFIRE
+ bool
+
config MMU_SUN3
bool
- depends on MMU && !MMU_MOTOROLA
+ depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE
menu "Platform setup"
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index e632b2d1210..8a9c767125a 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -1,8 +1,42 @@
comment "Processor Type"
+choice
+ prompt "CPU family support"
+ default M68KCLASSIC if MMU
+ default COLDFIRE if !MMU
+ help
+ The Freescale (was Motorola) M68K family of processors implements
+ the full 68000 processor instruction set.
+ The Freescale ColdFire family of processors is a modern derivitive
+ of the 68000 processor family. They are mainly targeted at embedded
+ applications, and are all System-On-Chip (SOC) devices, as opposed
+ to stand alone CPUs. They implement a subset of the original 68000
+ processor instruction set.
+ If you anticipate running this kernel on a computer with a classic
+ MC68xxx processor, select M68KCLASSIC.
+ If you anticipate running this kernel on a computer with a ColdFire
+ processor, select COLDFIRE.
+
+config M68KCLASSIC
+ bool "Classic M68K CPU family support"
+
+config COLDFIRE
+ bool "Coldfire CPU family support"
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select CPU_HAS_NO_BITFIELDS
+ select CPU_HAS_NO_MULDIV64
+ select GENERIC_CSUM
+
+endchoice
+
+if M68KCLASSIC
+
config M68000
bool
select CPU_HAS_NO_BITFIELDS
+ select CPU_HAS_NO_MULDIV64
+ select GENERIC_CSUM
help
The Freescale (was Motorola) 68000 CPU is the first generation of
the well known M68K family of processors. The CPU core as well as
@@ -18,21 +52,11 @@ config MCPU32
based on the 68020 processor. For the most part it is used in
System-On-Chip parts, and does not contain a paging MMU.
-config COLDFIRE
- bool
- select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
- select CPU_HAS_NO_BITFIELDS
- help
- The Freescale ColdFire family of processors is a modern derivitive
- of the 68000 processor family. They are mainly targeted at embedded
- applications, and are all System-On-Chip (SOC) devices, as opposed
- to stand alone CPUs. They implement a subset of the original 68000
- processor instruction set.
-
config M68020
bool "68020 support"
depends on MMU
+ select GENERIC_ATOMIC64
+ select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68020
processor, say Y. Otherwise, say N. Note that the 68020 requires a
@@ -42,6 +66,8 @@ config M68020
config M68030
bool "68030 support"
depends on MMU && !MMU_SUN3
+ select GENERIC_ATOMIC64
+ select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68030
processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
@@ -50,6 +76,8 @@ config M68030
config M68040
bool "68040 support"
depends on MMU && !MMU_SUN3
+ select GENERIC_ATOMIC64
+ select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68LC040
or MC68040 processor, say Y. Otherwise, say N. Note that an
@@ -59,6 +87,8 @@ config M68040
config M68060
bool "68060 support"
depends on MMU && !MMU_SUN3
+ select GENERIC_ATOMIC64
+ select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68060
processor, say Y. Otherwise, say N.
@@ -91,10 +121,13 @@ config M68360
help
Motorola 68360 processor support.
+endif # M68KCLASSIC
+
+if COLDFIRE
+
config M5206
bool "MCF5206"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -103,7 +136,6 @@ config M5206
config M5206e
bool "MCF5206e"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -112,7 +144,6 @@ config M5206e
config M520x
bool "MCF520x"
depends on !MMU
- select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
help
@@ -121,7 +152,6 @@ config M520x
config M523x
bool "MCF523x"
depends on !MMU
- select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -131,7 +161,6 @@ config M523x
config M5249
bool "MCF5249"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -143,7 +172,6 @@ config M527x
config M5271
bool "MCF5271"
depends on !MMU
- select COLDFIRE
select M527x
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -154,7 +182,6 @@ config M5271
config M5272
bool "MCF5272"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_MBAR
help
@@ -163,7 +190,6 @@ config M5272
config M5275
bool "MCF5275"
depends on !MMU
- select COLDFIRE
select M527x
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -174,7 +200,6 @@ config M5275
config M528x
bool "MCF528x"
depends on !MMU
- select COLDFIRE
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_SPLIT
select HAVE_IPSBAR
@@ -184,7 +209,6 @@ config M528x
config M5307
bool "MCF5307"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_CACHE_CB
select HAVE_MBAR
@@ -194,7 +218,6 @@ config M5307
config M532x
bool "MCF532x"
depends on !MMU
- select COLDFIRE
select HAVE_CACHE_CB
help
Freescale (Motorola) ColdFire 532x processor support.
@@ -202,7 +225,6 @@ config M532x
config M5407
bool "MCF5407"
depends on !MMU
- select COLDFIRE
select COLDFIRE_SW_A7
select HAVE_CACHE_CB
select HAVE_MBAR
@@ -214,9 +236,8 @@ config M54xx
config M547x
bool "MCF547x"
- depends on !MMU
- select COLDFIRE
select M54xx
+ select MMU_COLDFIRE if MMU
select HAVE_CACHE_CB
select HAVE_MBAR
help
@@ -224,14 +245,15 @@ config M547x
config M548x
bool "MCF548x"
- depends on !MMU
- select COLDFIRE
+ select MMU_COLDFIRE if MMU
select M54xx
select HAVE_CACHE_CB
select HAVE_MBAR
help
Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
+endif # COLDFIRE
+
comment "Processor Specific Options"
diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug
index 2bdb1b01115..87233acef18 100644
--- a/arch/m68k/Kconfig.debug
+++ b/arch/m68k/Kconfig.debug
@@ -2,6 +2,25 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config BOOTPARAM
+ bool 'Compiled-in Kernel Boot Parameter'
+
+config BOOTPARAM_STRING
+ string 'Kernel Boot Parameter'
+ default 'console=ttyS0,19200'
+ depends on BOOTPARAM
+
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ depends on MVME16x || MAC
+ default y
+ help
+ Write kernel log output directly to a serial port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized.
+ You should normally say N here, unless you want to debug such a crash.
+
if !MMU
config FULLDEBUG
@@ -15,14 +34,6 @@ config HIGHPROFILE
help
Use a fast secondary clock to produce profiling information.
-config BOOTPARAM
- bool 'Compiled-in Kernel Boot Parameter'
-
-config BOOTPARAM_STRING
- string 'Kernel Boot Parameter'
- default 'console=ttyS0,19200'
- depends on BOOTPARAM
-
config NO_KERNEL_MSG
bool "Suppress Kernel BUG Messages"
help
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 6033f5d4e67..04a3d9be90e 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -8,8 +8,8 @@ config ARCH_MAY_HAVE_PC_FDC
menu "Platform devices"
config HEARTBEAT
- bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
- default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
+ bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || Q40
+ default y if !AMIGA && !APOLLO && !ATARI && !Q40 && HP300
help
Use the power-on LED on your machine as a load meter. The exact
behavior is platform-dependent, but normally the flash frequency is
@@ -59,27 +59,6 @@ endmenu
menu "Character devices"
-config ATARI_MFPSER
- tristate "Atari MFP serial support"
- depends on ATARI
- ---help---
- If you like to use the MFP serial ports ("Modem1", "Serial1") under
- Linux, say Y. The driver equally supports all kinds of MFP serial
- ports and automatically detects whether Serial1 is available.
-
- To compile this driver as a module, choose M here.
-
- Note for Falcon users: You also have an MFP port, it's just not
- wired to the outside... But you could use the port under Linux.
-
-config ATARI_MIDI
- tristate "Atari MIDI serial support"
- depends on ATARI
- help
- If you want to use your Atari's MIDI port in Linux, say Y.
-
- To compile this driver as a module, choose M here.
-
config ATARI_DSP56K
tristate "Atari DSP56k support (EXPERIMENTAL)"
depends on ATARI && EXPERIMENTAL
@@ -99,15 +78,6 @@ config AMIGA_BUILTIN_SERIAL
To compile this driver as a module, choose M here.
-config MULTIFACE_III_TTY
- tristate "Multiface Card III serial support"
- depends on AMIGA
- help
- If you want to use a Multiface III card's serial port in Linux,
- answer Y.
-
- To compile this driver as a module, choose M here.
-
config HPDCA
tristate "HP DCA serial support"
depends on DIO && SERIAL_8250
@@ -122,13 +92,9 @@ config HPAPCI
If you want to use the internal "APCI" serial ports on an HP400
machine, say Y here.
-config DN_SERIAL
- bool "Support for DN serial port (dummy)"
- depends on APOLLO
-
config SERIAL_CONSOLE
bool "Support for serial port console"
- depends on (AMIGA || ATARI || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || AMIGA_BUILTIN_SERIAL=y || MULTIFACE_III_TTY=y || SERIAL=y || SERIAL167 || DN_SERIAL)
+ depends on AMIGA_BUILTIN_SERIAL=y
---help---
If you say Y here, it will be possible to use a serial port as the
system console (the system console is the device which receives all
@@ -140,10 +106,10 @@ config SERIAL_CONSOLE
(/dev/tty0) will still be used as the system console by default, but
you can alter that using a kernel command line option such as
"console=ttyS1". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
+ your boot loader about how to pass options to the kernel at boot
+ time.)
- If you don't have a VGA card installed and you say Y here, the
+ If you don't have a graphical console and you say Y here, the
kernel will automatically use the first serial line, /dev/ttyS0, as
system console.
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index ef4a26aff78..7cdf6b01038 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -1,5 +1,7 @@
comment "Machine Types"
+if M68KCLASSIC
+
config AMIGA
bool "Amiga support"
depends on MMU
@@ -130,6 +132,8 @@ config SUN3
If you don't want to compile a kernel exclusively for a Sun 3, say N.
+endif # M68KCLASSIC
+
config PILOT
bool
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 6d196dadfdb..8048e1b7e55 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -82,8 +82,6 @@ __ALIGN_STR "\n\t"
extern void atari_microwire_cmd(int cmd);
-extern int atari_SCC_reset_done;
-
static unsigned int atari_irq_startup(struct irq_data *data)
{
unsigned int irq = data->irq;
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index 5a484247e49..a547ba9683d 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -202,7 +202,6 @@ static void __init atari_init_mfp_port(int cflag)
static void __init atari_init_scc_port(int cflag)
{
- extern int atari_SCC_reset_done;
static int clksrc_table[9] =
/* reg 11: 0x50 = BRG, 0x00 = RTxC, 0x28 = TRxC */
{ 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00 };
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index dbb49fc6463..e93fdae10b2 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -255,7 +255,6 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
# CONFIG_USB_SUPPORT is not set
CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_MULTIFACE_III_TTY=m
CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 562b221f695..66b26c1e848 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -223,8 +223,6 @@ CONFIG_LOGO=y
CONFIG_HID=m
CONFIG_HIDRAW=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_DN_SERIAL=y
-CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 82978df637f..15133251598 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -235,10 +235,7 @@ CONFIG_DMASOUND_ATARI=m
CONFIG_HID=m
CONFIG_HIDRAW=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_ATARI_MFPSER=y
-CONFIG_ATARI_MIDI=y
CONFIG_ATARI_DSP56K=m
-CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index ad9e85760e3..55d394edf63 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -318,13 +318,8 @@ CONFIG_DMASOUND_Q40=m
CONFIG_HID=m
CONFIG_HIDRAW=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_ATARI_MFPSER=y
-CONFIG_ATARI_MIDI=y
CONFIG_ATARI_DSP56K=m
CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_MULTIFACE_III_TTY=m
-CONFIG_SERIAL167=y
-CONFIG_DN_SERIAL=y
CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index c45aaf3b816..cdb70d66e53 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -218,8 +218,6 @@ CONFIG_GEN_RTC_X=y
CONFIG_HID=m
CONFIG_HIDRAW=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_SERIAL167=y
-CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index c5748bb4ea7..a985a7e87d4 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -39,7 +39,7 @@ enum {
#define MAX_UNIT 8
/* These identify the driver base version and may not be removed. */
-static const char version[] __devinitdata =
+static const char version[] __devinitconst =
KERN_INFO KBUILD_MODNAME ".c:v" DRV_VERSION " " DRV_RELDATE
" S.Opichal, M.Jurik, P.Stehlik\n"
KERN_INFO " http://aranym.org/\n";
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index 1c05a626054..bf16af1edac 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -24,7 +24,8 @@
unsigned long hp300_model;
unsigned long hp300_uart_scode = -1;
-unsigned char ledstate;
+unsigned char hp300_ledstate;
+EXPORT_SYMBOL(hp300_ledstate);
static char s_hp330[] __initdata = "330";
static char s_hp340[] __initdata = "340";
diff --git a/arch/m68k/include/asm/anchor.h b/arch/m68k/include/asm/anchor.h
deleted file mode 100644
index 871c0d5cfc3..00000000000
--- a/arch/m68k/include/asm/anchor.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/****************************************************************************/
-
-/*
- * anchor.h -- Anchor CO-MEM Lite PCI host bridge part.
- *
- * (C) Copyright 2000, Moreton Bay (www.moreton.com.au)
- */
-
-/****************************************************************************/
-#ifndef anchor_h
-#define anchor_h
-/****************************************************************************/
-
-/*
- * Define basic addressing info.
- */
-#if defined(CONFIG_M5407C3)
-#define COMEM_BASE 0xFFFF0000 /* Base of CO-MEM address space */
-#define COMEM_IRQ 25 /* IRQ of anchor part */
-#else
-#define COMEM_BASE 0x80000000 /* Base of CO-MEM address space */
-#define COMEM_IRQ 25 /* IRQ of anchor part */
-#endif
-
-/****************************************************************************/
-
-/*
- * 4-byte registers of CO-MEM, so adjust register addresses for
- * easy access. Handy macro for word access too.
- */
-#define LREG(a) ((a) >> 2)
-#define WREG(a) ((a) >> 1)
-
-
-/*
- * Define base addresses within CO-MEM Lite register address space.
- */
-#define COMEM_I2O 0x0000 /* I2O registers */
-#define COMEM_OPREGS 0x0400 /* Operation registers */
-#define COMEM_PCIBUS 0x2000 /* Direct access to PCI bus */
-#define COMEM_SHMEM 0x4000 /* Shared memory region */
-
-#define COMEM_SHMEMSIZE 0x4000 /* Size of shared memory */
-
-
-/*
- * Define CO-MEM Registers.
- */
-#define COMEM_I2OHISR 0x0030 /* I2O host interrupt status */
-#define COMEM_I2OHIMR 0x0034 /* I2O host interrupt mask */
-#define COMEM_I2OLISR 0x0038 /* I2O local interrupt status */
-#define COMEM_I2OLIMR 0x003c /* I2O local interrupt mask */
-#define COMEM_IBFPFIFO 0x0040 /* I2O inbound free/post FIFO */
-#define COMEM_OBPFFIFO 0x0044 /* I2O outbound post/free FIFO */
-#define COMEM_IBPFFIFO 0x0048 /* I2O inbound post/free FIFO */
-#define COMEM_OBFPFIFO 0x004c /* I2O outbound free/post FIFO */
-
-#define COMEM_DAHBASE 0x0460 /* Direct access base address */
-
-#define COMEM_NVCMD 0x04a0 /* I2C serial command */
-#define COMEM_NVREAD 0x04a4 /* I2C serial read */
-#define COMEM_NVSTAT 0x04a8 /* I2C status */
-
-#define COMEM_DMALBASE 0x04b0 /* DMA local base address */
-#define COMEM_DMAHBASE 0x04b4 /* DMA host base address */
-#define COMEM_DMASIZE 0x04b8 /* DMA size */
-#define COMEM_DMACTL 0x04bc /* DMA control */
-
-#define COMEM_HCTL 0x04e0 /* Host control */
-#define COMEM_HINT 0x04e4 /* Host interrupt control/status */
-#define COMEM_HLDATA 0x04e8 /* Host to local data mailbox */
-#define COMEM_LINT 0x04f4 /* Local interrupt contole status */
-#define COMEM_LHDATA 0x04f8 /* Local to host data mailbox */
-
-#define COMEM_LBUSCFG 0x04fc /* Local bus configuration */
-
-
-/*
- * Commands and flags for use with Direct Access Register.
- */
-#define COMEM_DA_IACK 0x00000000 /* Interrupt acknowledge (read) */
-#define COMEM_DA_SPCL 0x00000010 /* Special cycle (write) */
-#define COMEM_DA_MEMRD 0x00000004 /* Memory read cycle */
-#define COMEM_DA_MEMWR 0x00000004 /* Memory write cycle */
-#define COMEM_DA_IORD 0x00000002 /* I/O read cycle */
-#define COMEM_DA_IOWR 0x00000002 /* I/O write cycle */
-#define COMEM_DA_CFGRD 0x00000006 /* Configuration read cycle */
-#define COMEM_DA_CFGWR 0x00000006 /* Configuration write cycle */
-
-#define COMEM_DA_ADDR(a) ((a) & 0xffffe000)
-
-#define COMEM_DA_OFFSET(a) ((a) & 0x00001fff)
-
-
-/*
- * The PCI bus will be limited in what slots will actually be used.
- * Define valid device numbers for different boards.
- */
-#if defined(CONFIG_M5407C3)
-#define COMEM_MINDEV 14 /* Minimum valid DEVICE */
-#define COMEM_MAXDEV 14 /* Maximum valid DEVICE */
-#define COMEM_BRIDGEDEV 15 /* Slot bridge is in */
-#else
-#define COMEM_MINDEV 0 /* Minimum valid DEVICE */
-#define COMEM_MAXDEV 3 /* Maximum valid DEVICE */
-#endif
-
-#define COMEM_MAXPCI (COMEM_MAXDEV+1) /* Maximum PCI devices */
-
-
-/****************************************************************************/
-#endif /* anchor_h */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 0392b28656a..c0cb3635077 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -30,6 +30,8 @@ extern u_long atari_switches;
extern int atari_rtc_year_offset;
extern int atari_dont_touch_floppy_select;
+extern int atari_SCC_reset_done;
+
/* convenience macros for testing machine type */
#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST)
#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 65c6be6c818..4eba796c00d 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -55,6 +55,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
return c != 0;
}
+static inline int atomic_dec_and_test_lt(atomic_t *v)
+{
+ char c;
+ __asm__ __volatile__(
+ "subql #1,%1; slt %0"
+ : "=d" (c), "=m" (*v)
+ : "m" (*v));
+ return c != 0;
+}
+
static inline int atomic_inc_and_test(atomic_t *v)
{
char c;
diff --git a/arch/m68k/include/asm/blinken.h b/arch/m68k/include/asm/blinken.h
index 1a749cf7b06..0626582a7db 100644
--- a/arch/m68k/include/asm/blinken.h
+++ b/arch/m68k/include/asm/blinken.h
@@ -17,15 +17,15 @@
#define HP300_LEDS 0xf001ffff
-extern unsigned char ledstate;
+extern unsigned char hp300_ledstate;
static __inline__ void blinken_leds(int on, int off)
{
if (MACH_IS_HP300)
{
- ledstate |= on;
- ledstate &= ~off;
- out_8(HP300_LEDS, ~ledstate);
+ hp300_ledstate |= on;
+ hp300_ledstate &= ~off;
+ out_8(HP300_LEDS, ~hp300_ledstate);
}
}
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 73de7c89d8e..8104bd87464 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -2,23 +2,89 @@
#define _M68K_CACHEFLUSH_H
#include <linux/mm.h>
+#ifdef CONFIG_COLDFIRE
+#include <asm/mcfsim.h>
+#endif
/* cache code */
#define FLUSH_I_AND_D (0x00000808)
#define FLUSH_I (0x00000008)
+#ifndef ICACHE_MAX_ADDR
+#define ICACHE_MAX_ADDR 0
+#define ICACHE_SET_MASK 0
+#define DCACHE_MAX_ADDR 0
+#define DCACHE_SETMASK 0
+#endif
+
+static inline void flush_cf_icache(unsigned long start, unsigned long end)
+{
+ unsigned long set;
+
+ for (set = start; set <= end; set += (0x10 - 3)) {
+ __asm__ __volatile__ (
+ "cpushl %%ic,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%ic,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%ic,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%ic,(%0)"
+ : "=a" (set)
+ : "a" (set));
+ }
+}
+
+static inline void flush_cf_dcache(unsigned long start, unsigned long end)
+{
+ unsigned long set;
+
+ for (set = start; set <= end; set += (0x10 - 3)) {
+ __asm__ __volatile__ (
+ "cpushl %%dc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%dc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%dc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%dc,(%0)"
+ : "=a" (set)
+ : "a" (set));
+ }
+}
+
+static inline void flush_cf_bcache(unsigned long start, unsigned long end)
+{
+ unsigned long set;
+
+ for (set = start; set <= end; set += (0x10 - 3)) {
+ __asm__ __volatile__ (
+ "cpushl %%bc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ "addq%.l #1,%0\n\t"
+ "cpushl %%bc,(%0)"
+ : "=a" (set)
+ : "a" (set));
+ }
+}
+
/*
* Cache handling functions
*/
static inline void flush_icache(void)
{
- if (CPU_IS_040_OR_060)
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_icache(0, ICACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
asm volatile ( "nop\n"
" .chip 68040\n"
" cpusha %bc\n"
" .chip 68k");
- else {
+ } else {
unsigned long tmp;
asm volatile ( "movec %%cacr,%0\n"
" or.w %1,%0\n"
@@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len);
process changes. */
#define __flush_cache_all() \
({ \
- if (CPU_IS_040_OR_060) \
+ if (CPU_IS_COLDFIRE) { \
+ flush_cf_dcache(0, DCACHE_MAX_ADDR); \
+ } else if (CPU_IS_040_OR_060) { \
__asm__ __volatile__("nop\n\t" \
".chip 68040\n\t" \
"cpusha %dc\n\t" \
".chip 68k"); \
- else { \
+ } else { \
unsigned long _tmp; \
__asm__ __volatile__("movec %%cacr,%0\n\t" \
"orw %1,%0\n\t" \
@@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
static inline void __flush_page_to_ram(void *vaddr)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ unsigned long addr, start, end;
+ addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
+ start = addr & ICACHE_SET_MASK;
+ end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_bcache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_bcache(start, end);
+ } else if (CPU_IS_040_OR_060) {
__asm__ __volatile__("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index ec514485c8b..2f88d867c71 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -3,6 +3,10 @@
#include <linux/in6.h>
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -34,30 +38,6 @@ extern __wsum csum_partial_copy_nocheck(const void *src,
void *dst, int len,
__wsum sum);
-
-#ifdef CONFIG_COLDFIRE
-
-/*
- * The ColdFire cores don't support all the 68k instructions used
- * in the optimized checksum code below. So it reverts back to using
- * more standard C coded checksums. The fast checksum code is
- * significantly larger than the optimized version, so it is not
- * inlined here.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- unsigned int tmp = (__force u32)sum;
-
- tmp = (tmp & 0xffff) + (tmp >> 16);
- tmp = (tmp & 0xffff) + (tmp >> 16);
-
- return (__force __sum16)~tmp;
-}
-
-#else
-
/*
* This is a version of ip_fast_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
@@ -97,8 +77,6 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)~sum;
}
-#endif /* CONFIG_COLDFIRE */
-
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
@@ -167,4 +145,5 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
return csum_fold(sum);
}
+#endif /* CONFIG_GENERIC_CSUM */
#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index edb66148a71..444ea8a09e9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -1,7 +1,9 @@
#ifndef _M68K_DIV64_H
#define _M68K_DIV64_H
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
+#include <asm-generic/div64.h>
+#else
#include <linux/types.h>
@@ -27,8 +29,6 @@
__rem; \
})
-#else
-#include <asm-generic/div64.h>
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
#endif /* _M68K_DIV64_H */
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 01c193d9141..e9b7cda5974 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,10 +59,10 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
-#ifndef CONFIG_SUN3
-#define ELF_EXEC_PAGESIZE 4096
-#else
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
#define ELF_EXEC_PAGESIZE 8192
+#else
+#define ELF_EXEC_PAGESIZE 4096
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index c3c5a8643e1..622138dc728 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -222,16 +222,24 @@
* Non-MMU systems do not reserve %a2 in this way, and this definition is
* not used for them.
*/
+#ifdef CONFIG_MMU
+
#define curptr a2
#define GET_CURRENT(tmp) get_current tmp
.macro get_current reg=%d0
movel %sp,\reg
- andw #-THREAD_SIZE,\reg
+ andl #-THREAD_SIZE,\reg
movel \reg,%curptr
movel %curptr@,%curptr
.endm
+#else
+
+#define GET_CURRENT(tmp)
+
+#endif /* CONFIG_MMU */
+
#else /* C source */
#define STR(X) STR1(X)
diff --git a/arch/m68k/include/asm/fpu.h b/arch/m68k/include/asm/fpu.h
index ffb6b8cfc6d..526db9da9e4 100644
--- a/arch/m68k/include/asm/fpu.h
+++ b/arch/m68k/include/asm/fpu.h
@@ -12,6 +12,8 @@
#define FPSTATESIZE (96)
#elif defined(CONFIG_M68KFPU_EMU)
#define FPSTATESIZE (28)
+#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#define FPSTATESIZE (16)
#elif defined(CONFIG_M68060)
#define FPSTATESIZE (12)
#else
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index b2046839f4b..00d0071de4c 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -225,7 +225,8 @@ static inline void gpio_set_value(unsigned gpio, int value)
static inline int gpio_to_irq(unsigned gpio)
{
- return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL;
+ return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
+ : __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned irq)
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
index a623ea3f095..84c7e51cb6d 100644
--- a/arch/m68k/include/asm/ipcbuf.h
+++ b/arch/m68k/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef __m68k_IPCBUF_H__
-#define __m68k_IPCBUF_H__
-
-/*
- * The user_ipc_perm structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* __m68k_IPCBUF_H__ */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 6198df5ff24..0e89fa05de0 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -25,7 +25,8 @@
#define NR_IRQS 0
#endif
-#ifdef CONFIG_MMU
+#if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \
+ defined(CONFIG_M68040) || defined(CONFIG_M68060)
/*
* Interrupt source definitions
@@ -80,7 +81,7 @@ extern unsigned int irq_canonicalize(unsigned int irq);
#else
#define irq_canonicalize(irq) (irq)
-#endif /* CONFIG_MMU */
+#endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */
asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
extern atomic_t irq_err_count;
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 16a1835f9b2..47906aafbf6 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -39,8 +39,12 @@
#define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */
#define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */
#define ACR_CM 0x00000060 /* Cache mode mask */
+#define ACR_SP 0x00000008 /* Supervisor protect */
#define ACR_WPROTECT 0x00000004 /* Write protect */
+#define ACR_BA(x) ((x) & 0xff000000)
+#define ACR_ADMSK(x) ((((x) - 1) & 0xff000000) >> 8)
+
#if defined(CONFIG_M5407)
#define ICACHE_SIZE 0x4000 /* instruction - 16k */
@@ -56,6 +60,11 @@
#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
#define CACHE_WAYS 4 /* 4 ways */
+#define ICACHE_SET_MASK ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define DCACHE_SET_MASK ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define ICACHE_MAX_ADDR ICACHE_SET_MASK
+#define DCACHE_MAX_ADDR DCACHE_SET_MASK
+
/*
* Version 4 cores have a true harvard style separate instruction
* and data cache. Enable data and instruction caches, also enable write
@@ -73,6 +82,27 @@
#else
#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
#endif
+#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
+
+#if defined(CONFIG_MMU)
+/*
+ * If running with the MMU enabled then we need to map the internal
+ * register region as non-cacheable. And then we map all our RAM as
+ * cacheable and supervisor access only.
+ */
+#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
+ ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+ ACR_ENABLE+ACR_SUPER+ACR_SP)
+#define ACR2_MODE 0
+#define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+ ACR_ENABLE+ACR_SUPER+ACR_SP)
+
+#else
+
+/*
+ * For the non-MMU enabled case we map all of RAM as cacheable.
+ */
#if defined(CONFIG_CACHE_COPYBACK)
#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
#else
@@ -80,7 +110,6 @@
#endif
#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
-#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
@@ -94,4 +123,5 @@
#define CACHE_PUSH
#endif
+#endif /* CONFIG_MMU */
#endif /* m54xxacr_h */
diff --git a/arch/m68k/include/asm/mac_baboon.h b/arch/m68k/include/asm/mac_baboon.h
index c2a042b8c34..a2d32f6589f 100644
--- a/arch/m68k/include/asm/mac_baboon.h
+++ b/arch/m68k/include/asm/mac_baboon.h
@@ -29,4 +29,10 @@ struct baboon {
*/
};
+extern int baboon_present;
+
+extern void baboon_register_interrupts(void);
+extern void baboon_irq_enable(int);
+extern void baboon_irq_disable(int);
+
#endif /* __ASSEMBLY **/
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index a2c7e6fcca3..fde874a01e2 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -159,4 +159,6 @@ extern void iop_upload_code(uint, __u8 *, uint, __u16);
extern void iop_download_code(uint, __u8 *, uint, __u16);
extern __u8 *iop_compare_code(uint, __u8 *, uint, __u16);
+extern void iop_register_interrupts(void);
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_oss.h b/arch/m68k/include/asm/mac_oss.h
index 3cf2b6ed685..425fbff4f4d 100644
--- a/arch/m68k/include/asm/mac_oss.h
+++ b/arch/m68k/include/asm/mac_oss.h
@@ -58,25 +58,6 @@
#define OSS_POWEROFF 0x80
-/*
- * OSS Interrupt levels for various sub-systems
- *
- * This mapping is laid out with two things in mind: first, we try to keep
- * things on their own levels to avoid having to do double-dispatches. Second,
- * the levels match as closely as possible the alternate IRQ mapping mode (aka
- * "A/UX mode") available on some VIA machines.
- */
-
-#define OSS_IRQLEV_DISABLED 0
-#define OSS_IRQLEV_IOPISM 1 /* ADB? */
-#define OSS_IRQLEV_SCSI IRQ_AUTO_2
-#define OSS_IRQLEV_NUBUS IRQ_AUTO_3 /* keep this on its own level */
-#define OSS_IRQLEV_IOPSCC IRQ_AUTO_4 /* matches VIA alternate mapping */
-#define OSS_IRQLEV_SOUND IRQ_AUTO_5 /* matches VIA alternate mapping */
-#define OSS_IRQLEV_60HZ 6 /* matches VIA alternate mapping */
-#define OSS_IRQLEV_VIA1 IRQ_AUTO_6 /* matches VIA alternate mapping */
-#define OSS_IRQLEV_PARITY 7 /* matches VIA alternate mapping */
-
#ifndef __ASSEMBLY__
struct mac_oss {
@@ -91,4 +72,8 @@ struct mac_oss {
extern volatile struct mac_oss *oss;
extern int oss_present;
+extern void oss_register_interrupts(void);
+extern void oss_irq_enable(int);
+extern void oss_irq_disable(int);
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_psc.h b/arch/m68k/include/asm/mac_psc.h
index 7808bb0b232..e5c0d71d154 100644
--- a/arch/m68k/include/asm/mac_psc.h
+++ b/arch/m68k/include/asm/mac_psc.h
@@ -211,6 +211,10 @@
extern volatile __u8 *psc;
extern int psc_present;
+extern void psc_register_interrupts(void);
+extern void psc_irq_enable(int);
+extern void psc_irq_disable(int);
+
/*
* Access functions
*/
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index a59665e1d41..aeeedf8b2d2 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -254,6 +254,15 @@
extern volatile __u8 *via1,*via2;
extern int rbv_present,via_alt_mapping;
+extern void via_register_interrupts(void);
+extern void via_irq_enable(int);
+extern void via_irq_disable(int);
+extern void via_nubus_irq_startup(int irq);
+extern void via_nubus_irq_shutdown(int irq);
+extern void via1_irq(unsigned int irq, struct irq_desc *desc);
+extern void via1_set_head(int);
+extern int via2_scsi_drq_pending(void);
+
static inline int rbv_set_video_bpp(int bpp)
{
char val = (bpp==1)?0:(bpp==2)?1:(bpp==4)?2:(bpp==8)?3:-1;
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 12ebe43b008..682a1a2ff55 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -11,17 +11,11 @@
extern void mac_reset(void);
extern void mac_poweroff(void);
extern void mac_init_IRQ(void);
-extern int mac_irq_pending(unsigned int);
+
extern void mac_irq_enable(struct irq_data *data);
extern void mac_irq_disable(struct irq_data *data);
/*
- * Floppy driver magic hook - probably shouldn't be here
- */
-
-extern void via1_set_head(int);
-
-/*
* Macintosh Table
*/
@@ -48,7 +42,7 @@ struct mac_model
#define MAC_ADB_IOP 6
#define MAC_VIA_II 1
-#define MAC_VIA_IIci 2
+#define MAC_VIA_IICI 2
#define MAC_VIA_QUADRA 3
#define MAC_SCSI_NONE 0
diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h
index ebe1b70fe90..92aa8a4c2d0 100644
--- a/arch/m68k/include/asm/macints.h
+++ b/arch/m68k/include/asm/macints.h
@@ -104,6 +104,9 @@
#define IRQ_PSC4_3 (35)
#define IRQ_MAC_MACE_DMA IRQ_PSC4_3
+/* OSS Level 4 interrupts */
+#define IRQ_MAC_SCC (33)
+
/* Level 5 (PSC, AV Macs only) interrupts */
#define IRQ_PSC5_0 (40)
#define IRQ_PSC5_1 (41)
@@ -131,9 +134,6 @@
#define IRQ_BABOON_2 (66)
#define IRQ_BABOON_3 (67)
-/* On non-PSC machines, the serial ports share an IRQ */
-#define IRQ_MAC_SCC IRQ_AUTO_4
-
#define SLOT2IRQ(x) (x + 47)
#define IRQ2SLOT(x) (x - 47)
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
new file mode 100644
index 00000000000..313f3dd23cd
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -0,0 +1,102 @@
+#ifndef M68K_MCF_PGALLOC_H
+#define M68K_MCF_PGALLOC_H
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long) pte);
+}
+
+extern const char bad_pmd_string[];
+
+extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+
+ if (!page)
+ return NULL;
+
+ memset((void *)page, 0, PAGE_SIZE);
+ return (pte_t *) (page);
+}
+
+extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
+
+#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
+
+#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
+ (unsigned long)(page_address(page)))
+
+#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+ unsigned long address)
+{
+ __free_page(page);
+}
+
+#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+ pte_t *pte;
+
+ if (!page)
+ return NULL;
+
+ pte = kmap(page);
+ if (pte) {
+ clear_page(pte);
+ __flush_page_to_ram(pte);
+ flush_tlb_kernel_page(pte);
+ nocache_page(pte);
+ }
+ kunmap(page);
+
+ return page;
+}
+
+extern inline void pte_free(struct mm_struct *mm, struct page *page)
+{
+ __free_page(page);
+}
+
+/*
+ * In our implementation, each pgd entry contains 1 pmd that is never allocated
+ * or freed. pgd_present is always 1, so this should never be called. -NL
+ */
+#define pmd_free(mm, pmd) BUG()
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long) pgd);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *new_pgd;
+
+ new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
+ if (!new_pgd)
+ return NULL;
+ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+ memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
+ return new_pgd;
+}
+
+#define pgd_populate(mm, pmd, pte) BUG()
+
+#endif /* M68K_MCF_PGALLOC_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
new file mode 100644
index 00000000000..756bde4fb4f
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -0,0 +1,425 @@
+#ifndef _MCF_PGTABLE_H
+#define _MCF_PGTABLE_H
+
+#include <asm/mcfmmu.h>
+#include <asm/page.h>
+
+/*
+ * MMUDR bits, in proper place. We write these directly into the MMUDR
+ * after masking from the pte.
+ */
+#define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */
+#define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */
+#define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */
+#define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */
+#define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */
+#define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */
+#define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */
+
+#define CF_CACHEMASK (~MMUDR_CM_CCB)
+#define CF_PAGE_MMUDR_MASK 0x000000fe
+
+#define _PAGE_NOCACHE030 CF_PAGE_NOCACHE
+
+/*
+ * MMUTR bits, need shifting down.
+ */
+#define CF_PAGE_MMUTR_MASK 0x00000c00
+#define CF_PAGE_MMUTR_SHIFT 10
+
+#define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
+#define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
+
+/*
+ * Fake bits, not implemented in CF, will get masked out before
+ * hitting hardware.
+ */
+#define CF_PAGE_DIRTY 0x00000001
+#define CF_PAGE_FILE 0x00000200
+#define CF_PAGE_ACCESSED 0x00001000
+
+#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
+#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
+#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
+#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
+#define _DESCTYPE_MASK 0x003
+#define _CACHEMASK040 (~0x060)
+#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
+
+/*
+ * Externally used page protection values.
+ */
+#define _PAGE_PRESENT (CF_PAGE_VALID)
+#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
+#define _PAGE_DIRTY (CF_PAGE_DIRTY)
+#define _PAGE_READWRITE (CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_SYSTEM \
+ | CF_PAGE_SHARED)
+
+/*
+ * Compound page protection values.
+ */
+#define PAGE_NONE __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_SHARED)
+
+#define PAGE_INIT __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC \
+ | CF_PAGE_SYSTEM)
+
+#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC \
+ | CF_PAGE_SYSTEM)
+
+#define PAGE_COPY __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_DIRTY)
+
+/*
+ * Page protections for initialising protection_map. See mm/mmap.c
+ * for use. In general, the bit positions are xwr, and P-items are
+ * private, the S-items are shared.
+ */
+#define __P000 PAGE_NONE
+#define __P001 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE)
+#define __P010 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_WRITABLE)
+#define __P011 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE)
+#define __P100 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_EXEC)
+#define __P101 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+#define __P110 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC)
+#define __P111 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC)
+
+#define __S000 PAGE_NONE
+#define __S001 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE)
+#define __S010 PAGE_SHARED
+#define __S011 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_SHARED \
+ | CF_PAGE_READABLE)
+#define __S100 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_EXEC)
+#define __S101 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+#define __S110 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_SHARED \
+ | CF_PAGE_EXEC)
+#define __S111 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_SHARED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+
+#define PTE_MASK PAGE_MASK
+#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+#define pmd_set(pmdp, ptep) do {} while (0)
+
+static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+ pgd_val(*pgdp) = virt_to_phys(pmdp);
+}
+
+#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
+#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
+
+static inline int pte_none(pte_t pte)
+{
+ return !pte_val(pte);
+}
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_VALID;
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_val(*ptep) = 0;
+}
+
+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define pte_page(pte) virt_to_page(__pte_page(pte))
+
+static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+#define pmd_none(pmd) pmd_none2(&(pmd))
+static inline int pmd_bad2(pmd_t *pmd) { return 0; }
+#define pmd_bad(pmd) pmd_bad2(&(pmd))
+#define pmd_present(pmd) (!pmd_none2(&(pmd)))
+static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
+
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgdp) {}
+
+#define pte_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
+ __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
+ __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
+ __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not...
+ * [we have the full set here even if they don't change from m68k]
+ */
+static inline int pte_read(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_READABLE;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_WRITABLE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_EXEC;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_ACCESSED;
+}
+
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_FILE;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_WRITABLE;
+ return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_READABLE;
+ return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_EXEC;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= CF_PAGE_WRITABLE;
+ return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+ pte_val(pte) |= CF_PAGE_READABLE;
+ return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ pte_val(pte) |= CF_PAGE_EXEC;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= CF_PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= CF_PAGE_ACCESSED;
+ return pte;
+}
+
+static inline pte_t pte_mknocache(pte_t pte)
+{
+ pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
+ return pte;
+}
+
+static inline pte_t pte_mkcache(pte_t pte)
+{
+ pte_val(pte) &= ~CF_PAGE_NOCACHE;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
+
+#define swapper_pg_dir kernel_pg_dir
+extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
+
+/*
+ * Find an entry in a pagetable directory.
+ */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/*
+ * Find an entry in a kernel pagetable directory.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * Find an entry in the second-level pagetable.
+ */
+static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+/*
+ * Find an entry in the third-level pagetable.
+ */
+#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
+
+/*
+ * Disable caching for page at given kernel virtual address.
+ */
+static inline void nocache_page(void *vaddr)
+{
+ pgd_t *dir;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long addr = (unsigned long) vaddr;
+
+ dir = pgd_offset_k(addr);
+ pmdp = pmd_offset(dir, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+ *ptep = pte_mknocache(*ptep);
+}
+
+/*
+ * Enable caching for page at given kernel virtual address.
+ */
+static inline void cache_page(void *vaddr)
+{
+ pgd_t *dir;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long addr = (unsigned long) vaddr;
+
+ dir = pgd_offset_k(addr);
+ pmdp = pmd_offset(dir, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+ *ptep = pte_mkcache(*ptep);
+}
+
+#define PTE_FILE_MAX_BITS 21
+#define PTE_FILE_SHIFT 11
+
+static inline unsigned long pte_to_pgoff(pte_t pte)
+{
+ return pte_val(pte) >> PTE_FILE_SHIFT;
+}
+
+static inline pte_t pgoff_to_pte(unsigned pgoff)
+{
+ return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
+}
+
+/*
+ * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
+ */
+#define __swp_type(x) ((x).val & 0xFF)
+#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
+#define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
+ (off << PTE_FILE_SHIFT) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) (__pte((x).val))
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
+ __pte_offset(addr))
+#define pte_unmap(pte) ((void) 0)
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _MCF_PGTABLE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
new file mode 100644
index 00000000000..26cc3d5a63f
--- /dev/null
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -0,0 +1,112 @@
+/*
+ * mcfmmu.h -- definitions for the ColdFire v4e MMU
+ *
+ * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef MCFMMU_H
+#define MCFMMU_H
+
+/*
+ * The MMU support registers are mapped into the address space using
+ * the processor MMUBASE register. We used a fixed address for mapping,
+ * there doesn't seem any need to make this configurable yet.
+ */
+#define MMUBASE 0xfe000000
+
+/*
+ * The support registers of the MMU. Names are the sames as those
+ * used in the Freescale v4e documentation.
+ */
+#define MMUCR (MMUBASE + 0x00) /* Control register */
+#define MMUOR (MMUBASE + 0x04) /* Operation register */
+#define MMUSR (MMUBASE + 0x08) /* Status register */
+#define MMUAR (MMUBASE + 0x10) /* TLB Address register */
+#define MMUTR (MMUBASE + 0x14) /* TLB Tag register */
+#define MMUDR (MMUBASE + 0x18) /* TLB Data register */
+
+/*
+ * MMU Control register bit flags
+ */
+#define MMUCR_EN 0x00000001 /* Virtual mode enable */
+#define MMUCR_ASM 0x00000002 /* Address space mode */
+
+/*
+ * MMU Operation register.
+ */
+#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
+#define MMUOR_ACC 0x00000002 /* TLB access */
+#define MMUOR_RD 0x00000004 /* TLB access read */
+#define MMUOR_WR 0x00000000 /* TLB access write */
+#define MMUOR_ADR 0x00000008 /* TLB address select */
+#define MMUOR_ITLB 0x00000010 /* ITLB operation */
+#define MMUOR_CAS 0x00000020 /* Clear non-locked ASID TLBs */
+#define MMUOR_CNL 0x00000040 /* Clear non-locked TLBs */
+#define MMUOR_CA 0x00000080 /* Clear all TLBs */
+#define MMUOR_STLB 0x00000100 /* Search TLBs */
+#define MMUOR_AAN 16 /* TLB allocation address */
+#define MMUOR_AAMASK 0xffff0000 /* AA mask */
+
+/*
+ * MMU Status register.
+ */
+#define MMUSR_HIT 0x00000002 /* Search TLB hit */
+#define MMUSR_WF 0x00000008 /* Write access fault */
+#define MMUSR_RF 0x00000010 /* Read access fault */
+#define MMUSR_SPF 0x00000020 /* Supervisor protect fault */
+
+/*
+ * MMU Read/Write Tag register.
+ */
+#define MMUTR_V 0x00000001 /* Valid */
+#define MMUTR_SG 0x00000002 /* Shared global */
+#define MMUTR_IDN 2 /* Address Space ID */
+#define MMUTR_IDMASK 0x000003fc /* ASID mask */
+#define MMUTR_VAN 10 /* Virtual Address */
+#define MMUTR_VAMASK 0xfffffc00 /* VA mask */
+
+/*
+ * MMU Read/Write Data register.
+ */
+#define MMUDR_LK 0x00000002 /* Lock entry */
+#define MMUDR_X 0x00000004 /* Execute access enable */
+#define MMUDR_W 0x00000008 /* Write access enable */
+#define MMUDR_R 0x00000010 /* Read access enable */
+#define MMUDR_SP 0x00000020 /* Supervisor access enable */
+#define MMUDR_CM_CWT 0x00000000 /* Cachable write thru */
+#define MMUDR_CM_CCB 0x00000040 /* Cachable copy back */
+#define MMUDR_CM_NCP 0x00000080 /* Non-cachable precise */
+#define MMUDR_CM_NCI 0x000000c0 /* Non-cachable imprecise */
+#define MMUDR_SZ_1MB 0x00000000 /* 1MB page size */
+#define MMUDR_SZ_4KB 0x00000100 /* 4kB page size */
+#define MMUDR_SZ_8KB 0x00000200 /* 8kB page size */
+#define MMUDR_SZ_1KB 0x00000300 /* 1kB page size */
+#define MMUDR_PAN 10 /* Physical address */
+#define MMUDR_PAMASK 0xfffffc00 /* PA mask */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Simple access functions for the MMU registers. Nothing fancy
+ * currently required, just simple 32bit access.
+ */
+static inline u32 mmu_read(u32 a)
+{
+ return *((volatile u32 *) a);
+}
+
+static inline void mmu_write(u32 a, u32 v)
+{
+ *((volatile u32 *) a) = v;
+ __asm__ __volatile__ ("nop");
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
+
+#endif
+
+#endif /* MCFMMU_H */
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index 7d4341e55a9..dc3be991d63 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -8,7 +8,206 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
}
#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
+
+#if defined(CONFIG_COLDFIRE)
+
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/mcfmmu.h>
+#include <asm/mmu.h>
+
+#define NO_CONTEXT 256
+#define LAST_CONTEXT 255
+#define FIRST_CONTEXT 1
+
+extern unsigned long context_map[];
+extern mm_context_t next_mmu_context;
+
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+ mm_context_t ctx;
+
+ if (mm->context != NO_CONTEXT)
+ return;
+ while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+ atomic_inc(&nr_free_contexts);
+ steal_context();
+ }
+ ctx = next_mmu_context;
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context = ctx;
+ context_mm[ctx] = mm;
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context != NO_CONTEXT) {
+ clear_bit(mm->context, context_map);
+ mm->context = NO_CONTEXT;
+ atomic_inc(&nr_free_contexts);
+ }
+}
+
+static inline void set_context(mm_context_t context, pgd_t *pgd)
+{
+ __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ get_mmu_context(tsk->mm);
+ set_context(tsk->mm->context, next->pgd);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *active_mm,
+ struct mm_struct *mm)
+{
+ get_mmu_context(mm);
+ set_context(mm->context, mm->pgd);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+extern void mmu_context_init(void);
+#define prepare_arch_switch(next) load_ksp_mmu(next)
+
+static inline void load_ksp_mmu(struct task_struct *task)
+{
+ unsigned long flags;
+ struct mm_struct *mm;
+ int asid;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long mmuar;
+
+ local_irq_save(flags);
+ mmuar = task->thread.ksp;
+
+ /* Search for a valid TLB entry, if one is found, don't remap */
+ mmu_write(MMUAR, mmuar);
+ mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
+ if (mmu_read(MMUSR) & MMUSR_HIT)
+ goto end;
+
+ if (mmuar >= PAGE_OFFSET) {
+ mm = &init_mm;
+ } else {
+ pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
+ mm = task->mm;
+ }
+
+ if (!mm)
+ goto bug;
+
+ pgd = pgd_offset(mm, mmuar);
+ if (pgd_none(*pgd))
+ goto bug;
+
+ pmd = pmd_offset(pgd, mmuar);
+ if (pmd_none(*pmd))
+ goto bug;
+
+ pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
+ : pte_offset_map(pmd, mmuar);
+ if (pte_none(*pte) || !pte_present(*pte))
+ goto bug;
+
+ set_pte(pte, pte_mkyoung(*pte));
+ asid = mm->context & 0xff;
+ if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
+ set_pte(pte, pte_wrprotect(*pte));
+
+ mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+ (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+ >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+ mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+ ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+ mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+
+ goto end;
+
+bug:
+ pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
+end:
+ local_irq_restore(flags);
+}
+
+#elif defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+#include <linux/sched.h>
+
+extern unsigned long get_free_context(struct mm_struct *mm);
+extern void clear_context(unsigned long context);
+
+/* set the context for a new task to unmapped */
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ mm->context = SUN3_INVALID_CONTEXT;
+ return 0;
+}
+
+/* find the context given to this process, and if it hasn't already
+ got one, go get one for it. */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+ if (mm->context == SUN3_INVALID_CONTEXT)
+ mm->context = get_free_context(mm);
+}
+
+/* flush context if allocated... */
+static inline void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context != SUN3_INVALID_CONTEXT)
+ clear_context(mm->context);
+}
+
+static inline void activate_context(struct mm_struct *mm)
+{
+ get_mmu_context(mm);
+ sun3_put_context(mm->context);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ activate_context(tsk->mm);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+ activate_context(next_mm);
+}
+
+#else
#include <asm/setup.h>
#include <asm/page.h>
@@ -103,55 +302,8 @@ static inline void activate_mm(struct mm_struct *prev_mm,
switch_mm_0460(next_mm);
}
-#else /* CONFIG_SUN3 */
-#include <asm/sun3mmu.h>
-#include <linux/sched.h>
-
-extern unsigned long get_free_context(struct mm_struct *mm);
-extern void clear_context(unsigned long context);
-
-/* set the context for a new task to unmapped */
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- mm->context = SUN3_INVALID_CONTEXT;
- return 0;
-}
-
-/* find the context given to this process, and if it hasn't already
- got one, go get one for it. */
-static inline void get_mmu_context(struct mm_struct *mm)
-{
- if(mm->context == SUN3_INVALID_CONTEXT)
- mm->context = get_free_context(mm);
-}
-
-/* flush context if allocated... */
-static inline void destroy_context(struct mm_struct *mm)
-{
- if(mm->context != SUN3_INVALID_CONTEXT)
- clear_context(mm->context);
-}
-
-static inline void activate_context(struct mm_struct *mm)
-{
- get_mmu_context(mm);
- sun3_put_context(mm->context);
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
- activate_context(tsk->mm);
-}
-
-#define deactivate_mm(tsk,mm) do { } while (0)
-
-static inline void activate_mm(struct mm_struct *prev_mm,
- struct mm_struct *next_mm)
-{
- activate_context(next_mm);
-}
-
#endif
+
#else /* !CONFIG_MMU */
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 45bd3f589bf..e0fdd4d0807 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -8,6 +8,7 @@
#define _PAGE_PRESENT 0x001
#define _PAGE_SHORT 0x002
#define _PAGE_RONLY 0x004
+#define _PAGE_READWRITE 0x000
#define _PAGE_ACCESSED 0x008
#define _PAGE_DIRTY 0x010
#define _PAGE_SUPER 0x080 /* 68040 supervisor only */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index dfebb7c1e37..98baa82a861 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -6,10 +6,10 @@
#include <asm/page_offset.h>
/* PAGE_SHIFT determines the page size */
-#ifndef CONFIG_SUN3
-#define PAGE_SHIFT (12)
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
+#define PAGE_SHIFT 13
#else
-#define PAGE_SHIFT (13)
+#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
@@ -36,6 +36,10 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
+extern unsigned long _rambase;
+extern unsigned long _ramstart;
+extern unsigned long _ramend;
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index a8d1c60eb9c..90595721185 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -5,9 +5,6 @@
extern unsigned long memory_start;
extern unsigned long memory_end;
-extern unsigned long _rambase;
-extern unsigned long _ramstart;
-extern unsigned long _ramend;
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
diff --git a/arch/m68k/include/asm/page_offset.h b/arch/m68k/include/asm/page_offset.h
index 1780152d81d..82626a8f1d0 100644
--- a/arch/m68k/include/asm/page_offset.h
+++ b/arch/m68k/include/asm/page_offset.h
@@ -1,11 +1,9 @@
/* This handles the memory map.. */
-#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define PAGE_OFFSET_RAW 0x00000000
-#else
+#if defined(CONFIG_RAMBASE)
+#define PAGE_OFFSET_RAW CONFIG_RAMBASE
+#elif defined(CONFIG_SUN3)
#define PAGE_OFFSET_RAW 0x0E000000
-#endif
#else
-#define PAGE_OFFSET_RAW CONFIG_RAMBASE
+#define PAGE_OFFSET_RAW 0x00000000
#endif
diff --git a/arch/m68k/include/asm/pgalloc.h b/arch/m68k/include/asm/pgalloc.h
index c294aad8a90..37bee7e3223 100644
--- a/arch/m68k/include/asm/pgalloc.h
+++ b/arch/m68k/include/asm/pgalloc.h
@@ -7,7 +7,9 @@
#ifdef CONFIG_MMU
#include <asm/virtconvert.h>
-#ifdef CONFIG_SUN3
+#if defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgalloc.h>
+#elif defined(CONFIG_SUN3)
#include <asm/sun3_pgalloc.h>
#else
#include <asm/motorola_pgalloc.h>
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 87174c904d2..dc35e0e106e 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -40,6 +40,8 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#ifdef CONFIG_SUN3
#define PGDIR_SHIFT 17
+#elif defined(CONFIG_COLDFIRE)
+#define PGDIR_SHIFT 22
#else
#define PGDIR_SHIFT 25
#endif
@@ -54,6 +56,10 @@
#define PTRS_PER_PTE 16
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
+#elif defined(CONFIG_COLDFIRE)
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 1024
#else
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD 8
@@ -66,12 +72,22 @@
#ifdef CONFIG_SUN3
#define KMAP_START 0x0DC00000
#define KMAP_END 0x0E000000
+#elif defined(CONFIG_COLDFIRE)
+#define KMAP_START 0xe0000000
+#define KMAP_END 0xf0000000
#else
#define KMAP_START 0xd0000000
#define KMAP_END 0xf0000000
#endif
-#ifndef CONFIG_SUN3
+#ifdef CONFIG_SUN3
+extern unsigned long m68k_vmalloc_end;
+#define VMALLOC_START 0x0f800000
+#define VMALLOC_END m68k_vmalloc_end
+#elif defined(CONFIG_COLDFIRE)
+#define VMALLOC_START 0xd0000000
+#define VMALLOC_END 0xe0000000
+#else
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
@@ -82,11 +98,7 @@
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END KMAP_START
-#else
-extern unsigned long m68k_vmalloc_end;
-#define VMALLOC_START 0x0f800000
-#define VMALLOC_END m68k_vmalloc_end
-#endif /* CONFIG_SUN3 */
+#endif
/* zero page used for uninitialized stuff */
extern void *empty_zero_page;
@@ -130,6 +142,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#ifdef CONFIG_SUN3
#include <asm/sun3_pgtable.h>
+#elif defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgtable.h>
#else
#include <asm/motorola_pgtable.h>
#endif
@@ -138,6 +152,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/*
* Macro to mark a page protection value as "uncacheable".
*/
+#ifdef CONFIG_COLDFIRE
+# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
+#else
#ifdef SUN3_PAGE_NOCACHE
# define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
#else
@@ -152,6 +169,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot)))
+#endif /* CONFIG_COLDFIRE */
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 568facf3027..46460fa15d5 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -48,10 +48,12 @@ static inline void wrusp(unsigned long usp)
* so don't change it unless you know what you are doing.
*/
#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_SIZE (0xF0000000UL)
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_SIZE (0xC0000000UL)
+#elif defined(CONFIG_SUN3)
#define TASK_SIZE (0x0E000000UL)
+#else
+#define TASK_SIZE (0xF0000000UL)
#endif
#else
#define TASK_SIZE (0xFFFFFFFFUL)
@@ -66,10 +68,12 @@ static inline void wrusp(unsigned long usp)
* space during mmap's.
*/
#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_UNMAPPED_BASE 0xC0000000UL
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_UNMAPPED_BASE 0x60000000UL
+#elif defined(CONFIG_SUN3)
#define TASK_UNMAPPED_BASE 0x0A000000UL
+#else
+#define TASK_UNMAPPED_BASE 0xC0000000UL
#endif
#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
#else
@@ -88,14 +92,12 @@ struct thread_struct {
unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
- struct thread_info info;
};
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \
.fs = __KERNEL_DS, \
- .info = INIT_THREAD_INFO(init_task), \
}
#ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index ee959219fdf..0fa80e97ed2 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -22,23 +22,26 @@ typedef struct {
} mm_segment_t;
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define USER_DS MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
/*
* Get/set the SFC/DFC registers for MOVES instructions
*/
+#define USER_DS MAKE_MM_SEG(__USER_DS)
+#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
static inline mm_segment_t get_fs(void)
{
-#ifdef CONFIG_MMU
mm_segment_t _v;
__asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
-
return _v;
-#else
- return USER_DS;
-#endif
+}
+
+static inline void set_fs(mm_segment_t val)
+{
+ __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+ "movec %0,%/dfc\n\t"
+ : /* no outputs */ : "r" (val.seg) : "memory");
}
static inline mm_segment_t get_ds(void)
@@ -47,14 +50,13 @@ static inline mm_segment_t get_ds(void)
return KERNEL_DS;
}
-static inline void set_fs(mm_segment_t val)
-{
-#ifdef CONFIG_MMU
- __asm__ __volatile__ ("movec %0,%/sfc\n\t"
- "movec %0,%/dfc\n\t"
- : /* no outputs */ : "r" (val.seg) : "memory");
+#else
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
#endif
-}
#define segment_eq(a,b) ((a).seg == (b).seg)
diff --git a/arch/m68k/include/asm/serial.h b/arch/m68k/include/asm/serial.h
index 2b90d6e6907..7267536adbc 100644
--- a/arch/m68k/include/asm/serial.h
+++ b/arch/m68k/include/asm/serial.h
@@ -25,9 +25,11 @@
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#endif
+#ifdef CONFIG_ISA
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+#endif
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 4dfb3952b37..00c2c5397d3 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -40,6 +40,7 @@
#define MACH_HP300 9
#define MACH_Q40 10
#define MACH_SUN3X 11
+#define MACH_M54XX 12
#define COMMAND_LINE_SIZE 256
@@ -211,23 +212,27 @@ extern unsigned long m68k_machtype;
#define CPUB_68030 1
#define CPUB_68040 2
#define CPUB_68060 3
+#define CPUB_COLDFIRE 4
#define CPU_68020 (1<<CPUB_68020)
#define CPU_68030 (1<<CPUB_68030)
#define CPU_68040 (1<<CPUB_68040)
#define CPU_68060 (1<<CPUB_68060)
+#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
#define FPUB_68881 0
#define FPUB_68882 1
#define FPUB_68040 2 /* Internal FPU */
#define FPUB_68060 3 /* Internal FPU */
#define FPUB_SUNFPA 4 /* Sun-3 FPA */
+#define FPUB_COLDFIRE 5 /* ColdFire FPU */
#define FPU_68881 (1<<FPUB_68881)
#define FPU_68882 (1<<FPUB_68882)
#define FPU_68040 (1<<FPUB_68040)
#define FPU_68060 (1<<FPUB_68060)
#define FPU_SUNFPA (1<<FPUB_SUNFPA)
+#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
#define MMUB_68851 0
#define MMUB_68030 1 /* Internal MMU */
@@ -235,6 +240,7 @@ extern unsigned long m68k_machtype;
#define MMUB_68060 3 /* Internal MMU */
#define MMUB_APOLLO 4 /* Custom Apollo */
#define MMUB_SUN3 5 /* Custom Sun-3 */
+#define MMUB_COLDFIRE 6 /* Internal MMU */
#define MMU_68851 (1<<MMUB_68851)
#define MMU_68030 (1<<MMUB_68030)
@@ -242,6 +248,7 @@ extern unsigned long m68k_machtype;
#define MMU_68060 (1<<MMUB_68060)
#define MMU_SUN3 (1<<MMUB_SUN3)
#define MMU_APOLLO (1<<MMUB_APOLLO)
+#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
#ifdef __KERNEL__
@@ -341,6 +348,13 @@ extern int m68k_is040or060;
# endif
#endif
+#if !defined(CONFIG_COLDFIRE)
+# define CPU_IS_COLDFIRE (0)
+#else
+# define CPU_IS_COLDFIRE (1)
+# define MMU_IS_COLDFIRE (1)
+#endif
+
#define CPU_TYPE (m68k_cputype)
#ifdef CONFIG_M68KFPU_EMU
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index a29dd74a17c..523db2a51cf 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -15,11 +15,7 @@ struct sigcontext {
unsigned long sc_pc;
unsigned short sc_formatvec;
#ifndef __uClinux__
-# ifdef __mcoldfire__
- unsigned long sc_fpregs[2][2]; /* room for two fp registers */
-# else
unsigned long sc_fpregs[2*3]; /* room for two fp registers */
-# endif
unsigned long sc_fpcntl[3];
unsigned char sc_fpstate[216];
#endif
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h
index 9bf49c87d95..d4708ce466e 100644
--- a/arch/m68k/include/asm/socket.h
+++ b/arch/m68k/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 790988967ba..e8665e6f946 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -3,6 +3,7 @@
#include <asm/types.h>
#include <asm/page.h>
+#include <asm/segment.h>
/*
* On machines with 4k pages we default to an 8k thread size, though we
@@ -26,6 +27,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags;
struct exec_domain *exec_domain; /* execution domain */
+ mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */
@@ -39,6 +41,7 @@ struct thread_info {
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
+ .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
.fn = do_no_restart_syscall, \
@@ -47,34 +50,6 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
-#ifdef CONFIG_MMU
-
-#ifndef __ASSEMBLY__
-#include <asm/current.h>
-#endif
-
-#ifdef ASM_OFFSETS_C
-#define task_thread_info(tsk) ((struct thread_info *) NULL)
-#else
-#include <asm/asm-offsets.h>
-#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
-#endif
-
-#define init_thread_info (init_task.thread.info)
-#define task_stack_page(tsk) ((tsk)->stack)
-#define current_thread_info() task_thread_info(current)
-
-#define __HAVE_THREAD_FUNCTIONS
-
-#define setup_thread_stack(p, org) ({ \
- *(struct task_struct **)(p)->stack = (p); \
- task_thread_info(p)->task = (p); \
-})
-
-#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
-
-#else /* !CONFIG_MMU */
-
#ifndef __ASSEMBLY__
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
@@ -92,8 +67,6 @@ static inline struct thread_info *current_thread_info(void)
#define init_thread_info (init_thread_union.thread_info)
-#endif /* CONFIG_MMU */
-
/* entry.S relies on these definitions!
* bits 0-7 are tested at every exception exit
* bits 8-15 are also tested at syscall exit
@@ -103,7 +76,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
-#define TIF_FREEZE 17 /* thread is freezing for suspend */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index a6b4ed4fc90..965ea35c9a4 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -5,10 +5,13 @@
#ifndef CONFIG_SUN3
#include <asm/current.h>
+#include <asm/mcfmmu.h>
static inline void flush_tlb_kernel_page(void *addr)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ mmu_write(MMUOR, MMUOR_CNL);
+ } else if (CPU_IS_040_OR_060) {
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
__asm__ __volatile__(".chip 68040\n\t"
@@ -25,12 +28,15 @@ static inline void flush_tlb_kernel_page(void *addr)
*/
static inline void __flush_tlb(void)
{
- if (CPU_IS_040_OR_060)
+ if (CPU_IS_COLDFIRE) {
+ mmu_write(MMUOR, MMUOR_CNL);
+ } else if (CPU_IS_040_OR_060) {
__asm__ __volatile__(".chip 68040\n\t"
"pflushan\n\t"
".chip 68k");
- else if (CPU_IS_020_OR_030)
+ } else if (CPU_IS_020_OR_030) {
__asm__ __volatile__("pflush #0,#4");
+ }
}
static inline void __flush_tlb040_one(unsigned long addr)
@@ -43,7 +49,9 @@ static inline void __flush_tlb040_one(unsigned long addr)
static inline void __flush_tlb_one(unsigned long addr)
{
- if (CPU_IS_040_OR_060)
+ if (CPU_IS_COLDFIRE)
+ mmu_write(MMUOR, MMUOR_CNL);
+ else if (CPU_IS_040_OR_060)
__flush_tlb040_one(addr);
else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
@@ -56,12 +64,15 @@ static inline void __flush_tlb_one(unsigned long addr)
*/
static inline void flush_tlb_all(void)
{
- if (CPU_IS_040_OR_060)
+ if (CPU_IS_COLDFIRE) {
+ mmu_write(MMUOR, MMUOR_CNL);
+ } else if (CPU_IS_040_OR_060) {
__asm__ __volatile__(".chip 68040\n\t"
"pflusha\n\t"
".chip 68k");
- else if (CPU_IS_020_OR_030)
+ } else if (CPU_IS_020_OR_030) {
__asm__ __volatile__("pflusha");
+ }
}
static inline void flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 151068f64f4..4aff3358fba 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -18,6 +18,7 @@
typedef void (*e_vector)(void);
extern e_vector vectors[];
+extern e_vector *_ramvec;
asmlinkage void auto_inthandler(void);
asmlinkage void user_inthandler(void);
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
index b17fd115a4e..89705adcbd5 100644
--- a/arch/m68k/include/asm/types.h
+++ b/arch/m68k/include/asm/types.h
@@ -10,12 +10,6 @@
*/
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 7107f3fbdbb..9c80cd515b2 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr,
}
/*
+ * Not all varients of the 68k family support the notion of address spaces.
+ * The traditional 680x0 parts do, and they use the sfc/dfc registers and
+ * the "moves" instruction to access user space from kernel space. Other
+ * family members like ColdFire don't support this, and only have a single
+ * address space, and use the usual "move" instruction for user space access.
+ *
+ * Outside of this difference the user space access functions are the same.
+ * So lets keep the code simple and just define in what we need to use.
+ */
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+#define MOVES "moves"
+#else
+#define MOVES "move"
+#endif
+
+/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
@@ -43,7 +59,7 @@ extern int __get_user_bad(void);
#define __put_user_asm(res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \
- "1: moves."#bwl" %2,%1\n" \
+ "1: "MOVES"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -83,8 +99,8 @@ asm volatile ("\n" \
{ \
const void __user *__pu_ptr = (ptr); \
asm volatile ("\n" \
- "1: moves.l %2,(%1)+\n" \
- "2: moves.l %R2,(%1)\n" \
+ "1: "MOVES".l %2,(%1)+\n" \
+ "2: "MOVES".l %R2,(%1)\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -115,12 +131,12 @@ asm volatile ("\n" \
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \
asm volatile ("\n" \
- "1: moves."#bwl" %2,%1\n" \
+ "1: "MOVES"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
"10: move.l %3,%0\n" \
- " sub."#bwl" %1,%1\n" \
+ " sub.l %1,%1\n" \
" jra 2b\n" \
" .previous\n" \
"\n" \
@@ -152,8 +168,8 @@ asm volatile ("\n" \
const void *__gu_ptr = (ptr); \
u64 __gu_val; \
asm volatile ("\n" \
- "1: moves.l (%2)+,%1\n" \
- "2: moves.l (%2),%R1\n" \
+ "1: "MOVES".l (%2)+,%1\n" \
+ "2: "MOVES".l (%2),%R1\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned
#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
asm volatile ("\n" \
- "1: moves."#s1" (%2)+,%3\n" \
+ "1: "MOVES"."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \
- "2: moves."#s2" (%2)+,%3\n" \
+ "2: "MOVES"."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \
- "3: moves."#s3" (%2)+,%3\n" \
+ "3: "MOVES"."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \
" .endif\n" \
"4:\n" \
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
asm volatile ("\n" \
" move."#s1" (%2)+,%3\n" \
- "11: moves."#s1" %3,(%1)+\n" \
+ "11: "MOVES"."#s1" %3,(%1)+\n" \
"12: move."#s2" (%2)+,%3\n" \
- "21: moves."#s2" %3,(%1)+\n" \
+ "21: "MOVES"."#s2" %3,(%1)+\n" \
"22:\n" \
" .ifnc \""#s3"\",\"\"\n" \
" move."#s3" (%2)+,%3\n" \
- "31: moves."#s3" %3,(%1)+\n" \
+ "31: "MOVES"."#s3" %3,(%1)+\n" \
"32:\n" \
" .endif\n" \
"4:\n" \
diff --git a/arch/m68k/include/asm/ucontext.h b/arch/m68k/include/asm/ucontext.h
index 00dcc5176c5..e4e22669edc 100644
--- a/arch/m68k/include/asm/ucontext.h
+++ b/arch/m68k/include/asm/ucontext.h
@@ -7,11 +7,7 @@ typedef greg_t gregset_t[NGREG];
typedef struct fpregset {
int f_fpcntl[3];
-#ifdef __mcoldfire__
- int f_fpregs[8][2];
-#else
int f_fpregs[8*3];
-#endif
} fpregset_t;
struct mcontext {
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 43f984e9397..ea0b502f845 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -132,10 +132,10 @@
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
-/*#define __NR_create_module 127*/
+#define __NR_create_module 127
#define __NR_init_module 128
#define __NR_delete_module 129
-/*#define __NR_get_kernel_syms 130*/
+#define __NR_get_kernel_syms 130
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
@@ -172,7 +172,7 @@
#define __NR_setresuid 164
#define __NR_getresuid 165
#define __NR_getpagesize 166
-/*#define __NR_query_module 167*/
+#define __NR_query_module 167
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
@@ -193,8 +193,8 @@
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
-/*#define __NR_getpmsg 188*/ /* some people actually want streams */
-/*#define __NR_putpmsg 189*/ /* some people actually want streams */
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
#define __NR_vfork 190
#define __NR_ugetrlimit 191
#define __NR_mmap2 192
@@ -350,10 +350,12 @@
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
#define __NR_setns 344
+#define __NR_process_vm_readv 345
+#define __NR_process_vm_writev 346
#ifdef __KERNEL__
-#define NR_syscalls 345
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index c5696193281..40d29a788b0 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -2,19 +2,24 @@
# Makefile for the linux kernel.
#
-extra-$(CONFIG_MMU) := head.o
+extra-$(CONFIG_AMIGA) := head.o
+extra-$(CONFIG_ATARI) := head.o
+extra-$(CONFIG_MAC) := head.o
+extra-$(CONFIG_APOLLO) := head.o
+extra-$(CONFIG_VME) := head.o
+extra-$(CONFIG_HP300) := head.o
+extra-$(CONFIG_Q40) := head.o
+extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds
-obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
- signal.o sys_m68k.o syscalltable.o time.o traps.o
+obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
+obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
-obj-$(CONFIG_MMU) += ints.o vectors.o
+obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
+obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
ifndef CONFIG_MMU_SUN3
-obj-y += dma.o
-endif
-ifndef CONFIG_MMU
-obj-y += init_task.o
+obj-y += dma.o
endif
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 983fed9d469..a972b00cd77 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -24,8 +24,7 @@ int main(void)
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
- DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
- DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
+ DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 081cf96f243..b8daf64e347 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#include "entry_mm.S"
#else
#include "entry_no.S"
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index c713f514843..675a854966a 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -99,7 +99,8 @@ do_trace_exit:
jra .Lret_from_exception
ENTRY(ret_from_signal)
- tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
+ movel %curptr@(TASK_STACK),%a1
+ tstb %a1@(TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
@@ -120,11 +121,13 @@ ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
+ movel %d1,%a1
+
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
- tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
+ tstb %a1@(TINFO_FLAGS+2)
jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
@@ -133,7 +136,8 @@ syscall:
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
- movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
+ movel %curptr@(TASK_STACK),%a1
+ movew %a1@(TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
@@ -159,7 +163,8 @@ ENTRY(ret_from_exception)
andw #ALLOWINT,%sr
resume_userspace:
- moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
+ movel %curptr@(TASK_STACK),%a1
+ moveb %a1@(TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
@@ -199,7 +204,8 @@ do_delayed_trace:
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
- addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
@@ -211,7 +217,8 @@ auto_irqhandler_fixup = . + 2
addql #8,%sp | pop parameters off stack
ret_from_interrupt:
- subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
2: RESTORE_ALL
@@ -232,7 +239,8 @@ ret_from_last_interrupt:
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
- addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
@@ -243,7 +251,8 @@ user_irqvec_fixup = . + 2
jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack
- subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
@@ -252,13 +261,15 @@ user_irqvec_fixup = . + 2
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
- addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
- subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
index 1b4289061a6..d80cba45589 100644
--- a/arch/m68k/kernel/entry_no.S
+++ b/arch/m68k/kernel/entry_no.S
@@ -44,8 +44,7 @@
ENTRY(buserr)
SAVE_ALL_INT
- moveq #-1,%d0
- movel %d0,%sp@(PT_OFF_ORIG_D0)
+ GET_CURRENT(%d0)
movel %sp,%sp@- /* stack frame pointer argument */
jsr buserr_c
addql #4,%sp
@@ -53,8 +52,7 @@ ENTRY(buserr)
ENTRY(trap)
SAVE_ALL_INT
- moveq #-1,%d0
- movel %d0,%sp@(PT_OFF_ORIG_D0)
+ GET_CURRENT(%d0)
movel %sp,%sp@- /* stack frame pointer argument */
jsr trap_c
addql #4,%sp
@@ -65,8 +63,7 @@ ENTRY(trap)
.globl dbginterrupt
ENTRY(dbginterrupt)
SAVE_ALL_INT
- moveq #-1,%d0
- movel %d0,%sp@(PT_OFF_ORIG_D0)
+ GET_CURRENT(%d0)
movel %sp,%sp@- /* stack frame pointer argument */
jsr dbginterrupt_c
addql #4,%sp
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 27622b3273c..d197e7ff62c 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -250,9 +250,8 @@
* USE_MFP: Use the ST-MFP port (Modem1) for serial debug.
*
* Macintosh constants:
- * MAC_SERIAL_DEBUG: Turns on serial debug output for the Macintosh.
- * MAC_USE_SCC_A: Use the SCC port A (modem) for serial debug.
- * MAC_USE_SCC_B: Use the SCC port B (printer) for serial debug (default).
+ * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug and early console.
+ * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug and early console.
*/
#include <linux/linkage.h>
@@ -268,33 +267,25 @@
#include <asm/machw.h>
-/*
- * Macintosh console support
- */
-
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
#define CONSOLE
#define CONSOLE_PENGUIN
#endif
-/*
- * Macintosh serial debug support; outputs boot info to the printer
- * and/or modem serial ports
- */
-#undef MAC_SERIAL_DEBUG
+#ifdef CONFIG_EARLY_PRINTK
+#define SERIAL_DEBUG
+#else
+#undef SERIAL_DEBUG
+#endif
-/*
- * Macintosh serial debug port selection; define one or both;
- * requires MAC_SERIAL_DEBUG to be defined
- */
-#define MAC_USE_SCC_A /* Macintosh modem serial port */
-#define MAC_USE_SCC_B /* Macintosh printer serial port */
+#else /* !CONFIG_MAC */
-#endif /* CONFIG_MAC */
+#define SERIAL_DEBUG
+
+#endif /* !CONFIG_MAC */
#undef MMU_PRINT
#undef MMU_NOCACHE_KERNEL
-#define SERIAL_DEBUG
#undef DEBUG
/*
@@ -655,11 +646,11 @@ ENTRY(__start)
lea %pc@(L(mac_rowbytes)),%a1
movel %a0@,%a1@
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
get_bi_record BI_MAC_SCCBASE
lea %pc@(L(mac_sccbase)),%a1
movel %a0@,%a1@
-#endif /* MAC_SERIAL_DEBUG */
+#endif
#if 0
/*
@@ -1427,7 +1418,7 @@ L(mmu_fixup_done):
subl %d0,L(console_font)
subl %d0,L(console_font_data)
#endif
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
orl #0x50000000,L(mac_sccbase)
#endif
1:
@@ -1917,7 +1908,7 @@ mmu_030_print:
jbne 30b
mmu_print_done:
- puts "\n\n"
+ puts "\n"
func_return mmu_print
@@ -2768,7 +2759,7 @@ L(scc_initable_mac):
.byte 9,0 /* no interrupts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
- .byte 12,10,13,0 /* 9600 baud */
+ .byte 12,1,13,0 /* 38400 baud */
.byte 14,1 /* Baud rate generator enable */
.byte 3,0xc1 /* enable receiver */
.byte 5,0xea /* enable transmitter */
@@ -2906,10 +2897,12 @@ func_start serial_init,%d0/%d1/%a0/%a1
#endif
#ifdef CONFIG_MAC
is_not_mac(L(serial_init_not_mac))
-#ifdef MAC_SERIAL_DEBUG
-#if !defined(MAC_USE_SCC_A) && !defined(MAC_USE_SCC_B)
-#define MAC_USE_SCC_B
-#endif
+
+#ifdef SERIAL_DEBUG
+/* You may define either or both of these. */
+#define MAC_USE_SCC_A /* Modem port */
+#define MAC_USE_SCC_B /* Printer port */
+
#define mac_scc_cha_b_ctrl_offset 0x0
#define mac_scc_cha_a_ctrl_offset 0x2
#define mac_scc_cha_b_data_offset 0x4
@@ -2940,7 +2933,7 @@ func_start serial_init,%d0/%d1/%a0/%a1
jra 7b
8:
#endif /* MAC_USE_SCC_B */
-#endif /* MAC_SERIAL_DEBUG */
+#endif /* SERIAL_DEBUG */
jra L(serial_init_done)
L(serial_init_not_mac):
@@ -3011,7 +3004,7 @@ func_start serial_putc,%d0/%d1/%a0/%a1
#ifdef CONFIG_MAC
is_not_mac(5f)
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
#ifdef MAC_USE_SCC_A
movel %pc@(L(mac_sccbase)),%a1
@@ -3029,7 +3022,7 @@ func_start serial_putc,%d0/%d1/%a0/%a1
moveb %d0,%a1@(mac_scc_cha_b_data_offset)
#endif /* MAC_USE_SCC_B */
-#endif /* MAC_SERIAL_DEBUG */
+#endif /* SERIAL_DEBUG */
jra L(serial_putc_done)
5:
@@ -3248,33 +3241,39 @@ func_return putn
#ifdef CONFIG_MAC
/*
- * mac_serial_print
+ * mac_early_print
*
* This routine takes its parameters on the stack. It then
- * turns around and calls the internal routine. This routine
- * is used until the Linux console driver initializes itself.
+ * turns around and calls the internal routines. This routine
+ * is used by the boot console.
*
* The calling parameters are:
- * void mac_serial_print(const char *str);
+ * void mac_early_print(const char *str, unsigned length);
*
* This routine does NOT understand variable arguments only
* simple strings!
*/
-ENTRY(mac_serial_print)
- moveml %d0/%a0,%sp@-
-#if 1
- move %sr,%sp@-
+ENTRY(mac_early_print)
+ moveml %d0/%d1/%a0,%sp@-
+ movew %sr,%sp@-
ori #0x0700,%sr
-#endif
- movel %sp@(10),%a0 /* fetch parameter */
+ movel %sp@(18),%a0 /* fetch parameter */
+ movel %sp@(22),%d1 /* fetch parameter */
jra 2f
-1: serial_putc %d0
-2: moveb %a0@+,%d0
- jne 1b
-#if 1
- move %sp@+,%sr
+1:
+#ifdef CONSOLE
+ console_putc %d0
#endif
- moveml %sp@+,%d0/%a0
+#ifdef SERIAL_DEBUG
+ serial_putc %d0
+#endif
+ subq #1,%d1
+2: jeq 3f
+ moveb %a0@+,%d0
+ jne 1b
+3:
+ movew %sp@+,%sr
+ moveml %sp@+,%d0/%d1/%a0
rts
#endif /* CONFIG_MAC */
@@ -3409,10 +3408,10 @@ func_start console_put_stats,%a0/%d7
* a0 = pointer to boot_info
* d7 = value of boot_info fields
*/
- puts "\nMacLinux\n\n"
+ puts "\nMacLinux\n"
#ifdef SERIAL_DEBUG
- puts " vidaddr:"
+ puts "\n vidaddr:"
putn %pc@(L(mac_videobase)) /* video addr. */
puts "\n _stext:"
@@ -3423,19 +3422,21 @@ func_start console_put_stats,%a0/%d7
lea %pc@(_end),%a0
putn %a0
- puts "\ncpuid:"
+ puts "\n cpuid:"
putn %pc@(L(cputype))
- putc '\n'
-#ifdef MAC_SERIAL_DEBUG
+# ifdef CONFIG_MAC
+ puts "\n sccbase:"
putn %pc@(L(mac_sccbase))
+# endif
+# ifdef MMU_PRINT
putc '\n'
-#endif
-# if defined(MMU_PRINT)
jbsr mmu_print_machine_cpu_types
-# endif /* MMU_PRINT */
+# endif
#endif /* SERIAL_DEBUG */
+ putc '\n'
+
func_return console_put_stats
#ifdef CONSOLE_PENGUIN
@@ -3896,11 +3897,11 @@ L(mac_dimensions):
.long 0
L(mac_rowbytes):
.long 0
-#ifdef MAC_SERIAL_DEBUG
+#ifdef SERIAL_DEBUG
L(mac_sccbase):
.long 0
-#endif /* MAC_SERIAL_DEBUG */
#endif
+#endif /* CONFIG_MAC */
#if defined (CONFIG_APOLLO)
LSRB0 = 0x10412
diff --git a/arch/m68k/kernel/init_task.c b/arch/m68k/kernel/init_task.c
index cbf9dc3cc51..c744cfc6bfa 100644
--- a/arch/m68k/kernel/init_task.c
+++ b/arch/m68k/kernel/init_task.c
@@ -19,7 +19,6 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
*
* All other task structs will be allocated on slabs in fork.c
*/
-__asm__(".align 4");
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
@@ -27,7 +26,7 @@ EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
- * We need to make sure that this is 8192-byte aligned due to the
+ * We need to make sure that this is THREAD size aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 1b7a14d1a00..774c1bd59c3 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
/*
* Simpler 68k and ColdFire parts also need a few other gcc functions.
*/
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c
index 1bc223aa07e..125f34e00bf 100644
--- a/arch/m68k/kernel/process_mm.c
+++ b/arch/m68k/kernel/process_mm.c
@@ -33,22 +33,6 @@
#include <asm/setup.h>
#include <asm/pgtable.h>
-/*
- * Initial task/thread structure. Make this a per-architecture thing,
- * because different architectures tend to have different
- * alignment requirements and potentially different initial
- * setup.
- */
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union __init_task_data
- __attribute__((aligned(THREAD_SIZE))) =
- { INIT_THREAD_INFO(init_task) };
-
-/* initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
asmlinkage void ret_from_fork(void);
@@ -188,9 +172,7 @@ void flush_thread(void)
current->thread.fs = __USER_DS;
if (!FPU_IS_EMU)
- asm volatile (".chip 68k/68881\n\t"
- "frestore %0@\n\t"
- ".chip 68k" : : "a" (&zero));
+ asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
}
/*
@@ -264,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
- if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
- asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
- "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
- : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
- : "memory");
+ if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
+ if (CPU_IS_COLDFIRE) {
+ asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
+ "fmovel %/fpiar,%1\n\t"
+ "fmovel %/fpcr,%2\n\t"
+ "fmovel %/fpsr,%3"
+ :
+ : "m" (p->thread.fp[0]),
+ "m" (p->thread.fpcntl[0]),
+ "m" (p->thread.fpcntl[1]),
+ "m" (p->thread.fpcntl[2])
+ : "memory");
+ } else {
+ asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+ "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+ :
+ : "m" (p->thread.fp[0]),
+ "m" (p->thread.fpcntl[0])
+ : "memory");
+ }
+ }
+
/* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
}
@@ -301,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0;
- asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
- :: "m" (fpu->fpcntl[0])
- : "memory");
- asm volatile ("fmovemx %/fp0-%/fp7,%0"
- :: "m" (fpu->fpregs[0])
- : "memory");
+ if (CPU_IS_COLDFIRE) {
+ asm volatile ("fmovel %/fpiar,%0\n\t"
+ "fmovel %/fpcr,%1\n\t"
+ "fmovel %/fpsr,%2\n\t"
+ "fmovemd %/fp0-%/fp7,%3"
+ :
+ : "m" (fpu->fpcntl[0]),
+ "m" (fpu->fpcntl[1]),
+ "m" (fpu->fpcntl[2]),
+ "m" (fpu->fpregs[0])
+ : "memory");
+ } else {
+ asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+ :
+ : "m" (fpu->fpcntl[0])
+ : "memory");
+ asm volatile ("fmovemx %/fp0-%/fp7,%0"
+ :
+ : "m" (fpu->fpregs[0])
+ : "memory");
+ }
+
return 1;
}
EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/m68k/kernel/ptrace_mm.c b/arch/m68k/kernel/ptrace_mm.c
index 0b252683cef..7bc999b7352 100644
--- a/arch/m68k/kernel/ptrace_mm.c
+++ b/arch/m68k/kernel/ptrace_mm.c
@@ -18,6 +18,7 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
+#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -275,3 +276,20 @@ asmlinkage void syscall_trace(void)
current->exit_code = 0;
}
}
+
+#ifdef CONFIG_COLDFIRE
+asmlinkage int syscall_trace_enter(void)
+{
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(task_pt_regs(current));
+ return ret;
+}
+
+asmlinkage void syscall_trace_leave(void)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
+#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index c3b45061dd0..d872ce4807c 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -221,7 +221,8 @@ void __init setup_arch(char **cmdline_p)
#endif
/* The bootinfo is located right after the kernel bss */
- m68k_parse_bootinfo((const struct bi_record *)_end);
+ if (!CPU_IS_COLDFIRE)
+ m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
@@ -235,7 +236,7 @@ void __init setup_arch(char **cmdline_p)
* with them, we should add a test to check_bugs() below] */
#ifndef CONFIG_M68KFPU_EMU_ONLY
/* clear the fpu if we have one */
- if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
+ if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero));
}
@@ -258,6 +259,10 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
+#if defined(CONFIG_BOOTPARAM)
+ strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
+ m68k_command_line[CL_SIZE - 1] = 0;
+#endif /* CONFIG_BOOTPARAM */
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
@@ -323,6 +328,11 @@ void __init setup_arch(char **cmdline_p)
config_sun3x();
break;
#endif
+#ifdef CONFIG_COLDFIRE
+ case MACH_M54XX:
+ config_BSP(NULL, 0);
+ break;
+#endif
default:
panic("No configuration setup");
}
@@ -384,6 +394,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1)
+#define LOOP_CYCLES_COLDFIRE (2)
if (CPU_IS_020) {
cpu = "68020";
@@ -397,6 +408,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
} else if (CPU_IS_060) {
cpu = "68060";
clockfactor = LOOP_CYCLES_68060;
+ } else if (CPU_IS_COLDFIRE) {
+ cpu = "ColdFire";
+ clockfactor = LOOP_CYCLES_COLDFIRE;
} else {
cpu = "680x0";
clockfactor = 0;
@@ -415,6 +429,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA";
+ else if (m68k_fputype & FPU_COLDFIRE)
+ fpu = "ColdFire";
else
fpu = "none";
#endif
@@ -431,6 +447,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo";
+ else if (m68k_mmutype & MMU_COLDFIRE)
+ mmu = "ColdFire";
else
mmu = "unknown";
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 2ed8c0fb151..ca3df0dc7e8 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -47,7 +47,6 @@ EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE];
/* machine dependent timer functions */
-void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
int (*mach_set_clock_mmss)(unsigned long);
/* machine dependent reboot functions */
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c
index a0afc239304..cb856f9da65 100644
--- a/arch/m68k/kernel/signal_mm.c
+++ b/arch/m68k/kernel/signal_mm.c
@@ -56,7 +56,11 @@ static const int frame_extra_sizes[16] = {
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
[2] = sizeof(((struct frame *)0)->un.fmt2),
[3] = sizeof(((struct frame *)0)->un.fmt3),
+#ifdef CONFIG_COLDFIRE
+ [4] = 0,
+#else
[4] = sizeof(((struct frame *)0)->un.fmt4),
+#endif
[5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
[6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
[7] = sizeof(((struct frame *)0)->un.fmt7),
@@ -84,7 +88,11 @@ int handle_kernel_fault(struct pt_regs *regs)
regs->stkadj = frame_extra_sizes[regs->format];
tregs = (struct pt_regs *)((long)regs + regs->stkadj);
tregs->vector = regs->vector;
+#ifdef CONFIG_COLDFIRE
+ tregs->format = 4;
+#else
tregs->format = 0;
+#endif
tregs->pc = fixup->fixup;
tregs->sr = regs->sr;
@@ -195,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */
- if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+ if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+ (sc->sc_fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
@@ -214,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc)
sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0))
goto out;
+ } else if (CPU_IS_COLDFIRE) {
+ if (!(sc->sc_fpstate[0] == 0x00 ||
+ sc->sc_fpstate[0] == 0x05 ||
+ sc->sc_fpstate[0] == 0xe5))
+ goto out;
} else
goto out;
- __asm__ volatile (".chip 68k/68881\n\t"
- "fmovemx %0,%%fp0-%%fp1\n\t"
- "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
- ".chip 68k"
- : /* no outputs */
- : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
+ "fmovel %1,%%fpcr\n\t"
+ "fmovel %2,%%fpsr\n\t"
+ "fmovel %3,%%fpiar"
+ : /* no outputs */
+ : "m" (sc->sc_fpregs[0]),
+ "m" (sc->sc_fpcntl[0]),
+ "m" (sc->sc_fpcntl[1]),
+ "m" (sc->sc_fpcntl[2]));
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %0,%%fp0-%%fp1\n\t"
+ "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+ ".chip 68k"
+ : /* no outputs */
+ : "m" (*sc->sc_fpregs),
+ "m" (*sc->sc_fpcntl));
+ }
+ }
+
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "frestore %0\n\t"
+ ".chip 68k"
+ : : "m" (*sc->sc_fpstate));
}
- __asm__ volatile (".chip 68k/68881\n\t"
- "frestore %0\n\t"
- ".chip 68k" : : "m" (*sc->sc_fpstate));
err = 0;
out:
@@ -241,7 +274,7 @@ out:
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
unsigned char fpstate[FPCONTEXT_SIZE];
- int context_size = CPU_IS_060 ? 8 : 0;
+ int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
fpregset_t fpregs;
int err = 1;
@@ -260,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
- if (!CPU_IS_060)
+ if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
/* Verify the frame format. */
- if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+ if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+ (fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
@@ -282,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
fpstate[3] == 0x60 ||
fpstate[3] == 0xe0))
goto out;
+ } else if (CPU_IS_COLDFIRE) {
+ if (!(fpstate[3] == 0x00 ||
+ fpstate[3] == 0x05 ||
+ fpstate[3] == 0xe5))
+ goto out;
} else
goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs)))
goto out;
- __asm__ volatile (".chip 68k/68881\n\t"
- "fmovemx %0,%%fp0-%%fp7\n\t"
- "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
- ".chip 68k"
- : /* no outputs */
- : "m" (*fpregs.f_fpregs),
- "m" (*fpregs.f_fpcntl));
+
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
+ "fmovel %1,%%fpcr\n\t"
+ "fmovel %2,%%fpsr\n\t"
+ "fmovel %3,%%fpiar"
+ : /* no outputs */
+ : "m" (fpregs.f_fpregs[0]),
+ "m" (fpregs.f_fpcntl[0]),
+ "m" (fpregs.f_fpcntl[1]),
+ "m" (fpregs.f_fpcntl[2]));
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %0,%%fp0-%%fp7\n\t"
+ "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+ ".chip 68k"
+ : /* no outputs */
+ : "m" (*fpregs.f_fpregs),
+ "m" (*fpregs.f_fpcntl));
+ }
}
if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size))
goto out;
- __asm__ volatile (".chip 68k/68881\n\t"
- "frestore %0\n\t"
- ".chip 68k" : : "m" (*fpstate));
+
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("frestore %0" : : "m" (*fpstate));
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "frestore %0\n\t"
+ ".chip 68k"
+ : : "m" (*fpstate));
+ }
err = 0;
out:
@@ -336,8 +394,12 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
- __asm__ __volatile__
- (" movel %0,%/a0\n\t"
+ __asm__ __volatile__ (
+#ifdef CONFIG_COLDFIRE
+ " movel %0,%/sp\n\t"
+ " bra ret_from_signal\n"
+#else
+ " movel %0,%/a0\n\t"
" subl %1,%/a0\n\t" /* make room on stack */
" movel %/a0,%/sp\n\t" /* set stack pointer */
/* move switch_stack and pt_regs */
@@ -350,6 +412,7 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
"2: movel %4@+,%/a0@+\n\t"
" dbra %1,2b\n\t"
" bral ret_from_signal\n"
+#endif
: /* no outputs, it doesn't ever return */
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
"n" (frame_offset), "a" (buf + fsize/4)
@@ -516,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
return;
}
- __asm__ volatile (".chip 68k/68881\n\t"
- "fsave %0\n\t"
- ".chip 68k"
- : : "m" (*sc->sc_fpstate) : "memory");
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fsave %0"
+ : : "m" (*sc->sc_fpstate) : "memory");
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fsave %0\n\t"
+ ".chip 68k"
+ : : "m" (*sc->sc_fpstate) : "memory");
+ }
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
@@ -530,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3;
}
- __asm__ volatile (".chip 68k/68881\n\t"
- "fmovemx %%fp0-%%fp1,%0\n\t"
- "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
- ".chip 68k"
- : "=m" (*sc->sc_fpregs),
- "=m" (*sc->sc_fpcntl)
- : /* no inputs */
- : "memory");
+
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
+ "fmovel %%fpcr,%1\n\t"
+ "fmovel %%fpsr,%2\n\t"
+ "fmovel %%fpiar,%3"
+ : "=m" (sc->sc_fpregs[0]),
+ "=m" (sc->sc_fpcntl[0]),
+ "=m" (sc->sc_fpcntl[1]),
+ "=m" (sc->sc_fpcntl[2])
+ : /* no inputs */
+ : "memory");
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %%fp0-%%fp1,%0\n\t"
+ "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+ ".chip 68k"
+ : "=m" (*sc->sc_fpregs),
+ "=m" (*sc->sc_fpcntl)
+ : /* no inputs */
+ : "memory");
+ }
}
}
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
unsigned char fpstate[FPCONTEXT_SIZE];
- int context_size = CPU_IS_060 ? 8 : 0;
+ int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
int err = 0;
if (FPU_IS_EMU) {
@@ -557,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
return err;
}
- __asm__ volatile (".chip 68k/68881\n\t"
- "fsave %0\n\t"
- ".chip 68k"
- : : "m" (*fpstate) : "memory");
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fsave %0\n\t"
+ ".chip 68k"
+ : : "m" (*fpstate) : "memory");
+ }
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs;
- if (!CPU_IS_060)
+ if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 &&
@@ -575,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3;
}
- __asm__ volatile (".chip 68k/68881\n\t"
- "fmovemx %%fp0-%%fp7,%0\n\t"
- "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
- ".chip 68k"
- : "=m" (*fpregs.f_fpregs),
- "=m" (*fpregs.f_fpcntl)
- : /* no inputs */
- : "memory");
+ if (CPU_IS_COLDFIRE) {
+ __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
+ "fmovel %%fpcr,%1\n\t"
+ "fmovel %%fpsr,%2\n\t"
+ "fmovel %%fpiar,%3"
+ : "=m" (fpregs.f_fpregs[0]),
+ "=m" (fpregs.f_fpcntl[0]),
+ "=m" (fpregs.f_fpcntl[1]),
+ "=m" (fpregs.f_fpcntl[2])
+ : /* no inputs */
+ : "memory");
+ } else {
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %%fp0-%%fp7,%0\n\t"
+ "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+ ".chip 68k"
+ : "=m" (*fpregs.f_fpregs),
+ "=m" (*fpregs.f_fpcntl)
+ : /* no inputs */
+ : "memory");
+ }
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs));
}
@@ -679,8 +778,7 @@ static inline void push_cache (unsigned long vaddr)
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
- }
- else {
+ } else if (!CPU_IS_COLDFIRE) {
/*
* 68030/68020 have no writeback cache;
* still need to clear icache.
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c468f2edaa8..ce827b37611 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns
+ .long sys_process_vm_readv /* 345 */
+ .long sys_process_vm_writev
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index a5cf40c26de..75ab79b3bde 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
#include "time_mm.c"
#else
#include "time_no.c"
diff --git a/arch/m68k/kernel/time_no.c b/arch/m68k/kernel/time_no.c
index 6623909f70e..3ef0f7768dc 100644
--- a/arch/m68k/kernel/time_no.c
+++ b/arch/m68k/kernel/time_no.c
@@ -26,6 +26,9 @@
#define TICK_SIZE (tick_nsec / 1000)
+/* machine dependent timer functions */
+void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
+
static inline int set_rtc_mmss(unsigned long nowtime)
{
if (mach_set_clock_mmss)
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 89362f2bb56..a76452ca964 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -706,6 +706,88 @@ create_atc_entry:
#endif /* CPU_M68020_OR_M68030 */
#endif /* !CONFIG_SUN3 */
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#include <asm/mcfmmu.h>
+
+/*
+ * The following table converts the FS encoding of a ColdFire
+ * exception stack frame into the error_code value needed by
+ * do_fault.
+*/
+static const unsigned char fs_err_code[] = {
+ 0, /* 0000 */
+ 0, /* 0001 */
+ 0, /* 0010 */
+ 0, /* 0011 */
+ 1, /* 0100 */
+ 0, /* 0101 */
+ 0, /* 0110 */
+ 0, /* 0111 */
+ 2, /* 1000 */
+ 3, /* 1001 */
+ 2, /* 1010 */
+ 0, /* 1011 */
+ 1, /* 1100 */
+ 1, /* 1101 */
+ 0, /* 1110 */
+ 0 /* 1111 */
+};
+
+static inline void access_errorcf(unsigned int fs, struct frame *fp)
+{
+ unsigned long mmusr, addr;
+ unsigned int err_code;
+ int need_page_fault;
+
+ mmusr = mmu_read(MMUSR);
+ addr = mmu_read(MMUAR);
+
+ /*
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ */
+ switch (fs) {
+ case 5: /* 0101 TLB opword X miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
+ addr = fp->ptregs.pc;
+ break;
+ case 6: /* 0110 TLB extension word X miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
+ addr = fp->ptregs.pc + sizeof(long);
+ break;
+ case 10: /* 1010 TLB W miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
+ break;
+ case 14: /* 1110 TLB R miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
+ break;
+ default:
+ /* 0000 Normal */
+ /* 0001 Reserved */
+ /* 0010 Interrupt during debug service routine */
+ /* 0011 Reserved */
+ /* 0100 X Protection */
+ /* 0111 IFP in emulator mode */
+ /* 1000 W Protection*/
+ /* 1001 Write error*/
+ /* 1011 Reserved*/
+ /* 1100 R Protection*/
+ /* 1101 R Protection*/
+ /* 1111 OEP in emulator mode*/
+ need_page_fault = 1;
+ break;
+ }
+
+ if (need_page_fault) {
+ err_code = fs_err_code[fs];
+ if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
+ err_code |= 2; /* bit1 - write, bit0 - protection */
+ do_page_fault(&fp->ptregs, addr, err_code);
+ }
+}
+#endif /* CONFIG_COLDFIRE CONFIG_MMU */
+
asmlinkage void buserr_c(struct frame *fp)
{
/* Only set esp0 if coming from user mode */
@@ -716,6 +798,28 @@ asmlinkage void buserr_c(struct frame *fp)
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+ if (CPU_IS_COLDFIRE) {
+ unsigned int fs;
+ fs = (fp->ptregs.vector & 0x3) |
+ ((fp->ptregs.vector & 0xc00) >> 8);
+ switch (fs) {
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xa:
+ case 0xd:
+ case 0xe:
+ case 0xf:
+ access_errorcf(fs, fp);
+ return;
+ default:
+ break;
+ }
+ }
+#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
+
switch (fp->ptregs.format) {
#if defined (CONFIG_M68060)
case 4: /* 68060 access error */
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux-nommu.lds
index 4e238934083..8e66ccb0935 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -69,6 +69,7 @@ SECTIONS {
SCHED_TEXT
LOCK_TEXT
*(.text..lock)
+ *(.fixup)
. = ALIGN(16); /* Exception table */
__start___ex_table = .;
@@ -161,6 +162,13 @@ SECTIONS {
_edata = . ;
} > DATA
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ } > DATA
+ NOTES > DATA
+
.init.text : {
. = ALIGN(PAGE_SIZE);
__init_begin = .;
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index d0993594f55..63407c83682 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,7 +31,9 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+ _sbss = .;
BSS_SECTION(0, 0, 0)
+ _ebss = .;
_edata = .; /* End of data section */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 8080469ee6c..ad0f46d64c0 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,7 +44,9 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ _sbss = .;
BSS_SECTION(0, 0, 0)
+ _ebss = .;
_end = . ;
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
index 030dabf0bc5..69ec7963887 100644
--- a/arch/m68k/kernel/vmlinux.lds.S
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -1,5 +1,14 @@
-#ifdef CONFIG_MMU
-#include "vmlinux.lds_mm.S"
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS (7);
+ data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
#else
-#include "vmlinux.lds_no.S"
+#include "vmlinux-std.lds"
+#endif
+#else
+#include "vmlinux-nommu.lds"
#endif
diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
deleted file mode 100644
index 99ba315bd0a..00000000000
--- a/arch/m68k/kernel/vmlinux.lds_mm.S
+++ /dev/null
@@ -1,10 +0,0 @@
-PHDRS
-{
- text PT_LOAD FILEHDR PHDRS FLAGS (7);
- data PT_LOAD FLAGS (7);
-}
-#ifdef CONFIG_SUN3
-#include "vmlinux-sun3.lds"
-#else
-#include "vmlinux-std.lds"
-#endif
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 1a1bd9067e9..a9d782d3427 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -6,9 +6,11 @@
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
memcpy.o memset.o memmove.o
-ifdef CONFIG_MMU
-lib-y += string.o uaccess.o checksum_mm.o
-else
-lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
+lib-$(CONFIG_MMU) += string.o uaccess.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o
+
+ifndef CONFIG_GENERIC_CSUM
+lib-y += checksum.o
endif
diff --git a/arch/m68k/lib/checksum_mm.c b/arch/m68k/lib/checksum.c
index 6216f12a756..6216f12a756 100644
--- a/arch/m68k/lib/checksum_mm.c
+++ b/arch/m68k/lib/checksum.c
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c
deleted file mode 100644
index e4c6354da76..00000000000
--- a/arch/m68k/lib/checksum_no.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
- * Fixed some nasty bugs, causing some horrible crashes.
- * A: At some points, the sum (%0) was used as
- * length-counter instead of the length counter
- * (%1). Thanks to Roman Hodek for pointing this out.
- * B: GCC seems to mess up if one uses too many
- * data-registers to hold input values and one tries to
- * specify d0 and d1 as scratch registers. Letting gcc choose these
- * registers itself solves the problem.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
- of the assembly has to go. */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
- /* add up 16-bit and 16-bit for 16+c bit */
- x = (x & 0xffff) + (x >> 16);
- /* add up carry.. */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = *buff;
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *) buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long carry = 0;
- do {
- unsigned long w = *(unsigned long *) buff;
- count--;
- buff += 4;
- result += carry;
- result += w;
- carry = (w > result);
- } while (count);
- result += carry;
- result = (result & 0xffff) + (result >> 16);
- }
- if (len & 2) {
- result += *(unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += (*buff << 8);
- result = from32to16(result);
- if (odd)
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
- return result;
-}
-
-#ifdef CONFIG_COLDFIRE
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
- return (__force __sum16)~do_csum(iph,ihl*4);
-}
-EXPORT_SYMBOL(ip_fast_csum);
-#endif
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
- unsigned int result = do_csum(buff, len);
-
- /* add in old sum, and carry.. */
- result += (__force u32)sum;
- if ((__force u32)sum > result)
- result += 1;
- return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err)
-{
- if (csum_err) *csum_err = 0;
- memcpy(dst, (__force const void *)src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 13854ed8cd9..5664386338d 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -15,17 +15,17 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
asm volatile ("\n"
" tst.l %0\n"
" jeq 2f\n"
- "1: moves.l (%1)+,%3\n"
+ "1: "MOVES".l (%1)+,%3\n"
" move.l %3,(%2)+\n"
" subq.l #1,%0\n"
" jne 1b\n"
"2: btst #1,%5\n"
" jeq 4f\n"
- "3: moves.w (%1)+,%3\n"
+ "3: "MOVES".w (%1)+,%3\n"
" move.w %3,(%2)+\n"
"4: btst #0,%5\n"
" jeq 6f\n"
- "5: moves.b (%1)+,%3\n"
+ "5: "MOVES".b (%1)+,%3\n"
" move.b %3,(%2)+\n"
"6:\n"
" .section .fixup,\"ax\"\n"
@@ -68,17 +68,17 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
" tst.l %0\n"
" jeq 4f\n"
"1: move.l (%1)+,%3\n"
- "2: moves.l %3,(%2)+\n"
+ "2: "MOVES".l %3,(%2)+\n"
"3: subq.l #1,%0\n"
" jne 1b\n"
"4: btst #1,%5\n"
" jeq 6f\n"
" move.w (%1)+,%3\n"
- "5: moves.w %3,(%2)+\n"
+ "5: "MOVES".w %3,(%2)+\n"
"6: btst #0,%5\n"
" jeq 8f\n"
" move.b (%1)+,%3\n"
- "7: moves.b %3,(%2)+\n"
+ "7: "MOVES".b %3,(%2)+\n"
"8:\n"
" .section .fixup,\"ax\"\n"
" .even\n"
@@ -115,7 +115,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
return count;
asm volatile ("\n"
- "1: moves.b (%2)+,%4\n"
+ "1: "MOVES".b (%2)+,%4\n"
" move.b %4,(%1)+\n"
" jeq 2f\n"
" subq.l #1,%3\n"
@@ -152,7 +152,7 @@ long strnlen_user(const char __user *src, long n)
asm volatile ("\n"
"1: subq.l #1,%1\n"
" jmi 3f\n"
- "2: moves.b (%0)+,%2\n"
+ "2: "MOVES".b (%0)+,%2\n"
" tst.b %2\n"
" jne 1b\n"
" jra 4f\n"
@@ -188,15 +188,15 @@ unsigned long __clear_user(void __user *to, unsigned long n)
asm volatile ("\n"
" tst.l %0\n"
" jeq 3f\n"
- "1: moves.l %2,(%1)+\n"
+ "1: "MOVES".l %2,(%1)+\n"
"2: subq.l #1,%0\n"
" jne 1b\n"
"3: btst #1,%4\n"
" jeq 5f\n"
- "4: moves.w %2,(%1)+\n"
+ "4: "MOVES".w %2,(%1)+\n"
"5: btst #0,%4\n"
" jeq 7f\n"
- "6: moves.b %2,(%1)\n"
+ "6: "MOVES".b %2,(%1)\n"
"7:\n"
" .section .fixup,\"ax\"\n"
" .even\n"
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index b403924a1ca..3fe0e43d44f 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -8,13 +8,8 @@
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/irq.h>
-#include <asm/traps.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_baboon.h>
@@ -23,7 +18,6 @@
int baboon_present;
static volatile struct baboon *baboon;
-static unsigned char baboon_disabled;
#if 0
extern int macide_ack_intr(struct ata_channel *);
@@ -89,51 +83,32 @@ static void baboon_irq(unsigned int irq, struct irq_desc *desc)
void __init baboon_register_interrupts(void)
{
- baboon_disabled = 0;
irq_set_chained_handler(IRQ_NUBUS_C, baboon_irq);
}
/*
- * The means for masking individual baboon interrupts remains a mystery, so
- * enable the umbrella interrupt only when no baboon interrupt is disabled.
+ * The means for masking individual Baboon interrupts remains a mystery.
+ * However, since we only use the IDE IRQ, we can just enable/disable all
+ * Baboon interrupts. If/when we handle more than one Baboon IRQ, we must
+ * either figure out how to mask them individually or else implement the
+ * same workaround that's used for NuBus slots (see nubus_disabled and
+ * via_nubus_irq_shutdown).
*/
void baboon_irq_enable(int irq)
{
- int irq_idx = IRQ_IDX(irq);
-
#ifdef DEBUG_IRQUSE
printk("baboon_irq_enable(%d)\n", irq);
#endif
- baboon_disabled &= ~(1 << irq_idx);
- if (!baboon_disabled)
- mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
+ mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
}
void baboon_irq_disable(int irq)
{
- int irq_idx = IRQ_IDX(irq);
-
#ifdef DEBUG_IRQUSE
printk("baboon_irq_disable(%d)\n", irq);
#endif
- baboon_disabled |= 1 << irq_idx;
- if (baboon_disabled)
- mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
-}
-
-void baboon_irq_clear(int irq)
-{
- int irq_idx = IRQ_IDX(irq);
-
- baboon->mb_ifr &= ~(1 << irq_idx);
-}
-
-int baboon_irq_pending(int irq)
-{
- int irq_idx = IRQ_IDX(irq);
-
- return baboon->mb_ifr & (1 << irq_idx);
+ mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
}
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index c247de02bc7..f60ff5f5920 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -71,6 +71,31 @@ static void mac_get_model(char *str);
static void mac_identify(void);
static void mac_report_hardware(void);
+#ifdef CONFIG_EARLY_PRINTK
+asmlinkage void __init mac_early_print(const char *s, unsigned n);
+
+static void __init mac_early_cons_write(struct console *con,
+ const char *s, unsigned n)
+{
+ mac_early_print(s, n);
+}
+
+static struct console __initdata mac_early_cons = {
+ .name = "early",
+ .write = mac_early_cons_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+int __init mac_unregister_early_cons(void)
+{
+ /* mac_early_print can't be used after init sections are discarded */
+ return unregister_console(&mac_early_cons);
+}
+
+late_initcall(mac_unregister_early_cons);
+#endif
+
static void __init mac_sched_init(irq_handler_t vector)
{
via_init_clock(vector);
@@ -164,6 +189,10 @@ void __init config_mac(void)
mach_beep = mac_mksound;
#endif
+#ifdef CONFIG_EARLY_PRINTK
+ register_console(&mac_early_cons);
+#endif
+
/*
* Determine hardware present
*/
@@ -192,7 +221,7 @@ void __init config_mac(void)
* inaccurate, so look here if a new Mac model won't run. Example: if
* a Mac crashes immediately after the VIA1 registers have been dumped
* to the screen, it probably died attempting to read DirB on a RBV.
- * Meaning it should have MAC_VIA_IIci here :-)
+ * Meaning it should have MAC_VIA_IICI here :-)
*/
struct mac_model *macintosh_config;
@@ -267,7 +296,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_IICI,
.name = "IIci",
.adb_type = MAC_ADB_II,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -276,7 +305,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_IIFX,
.name = "IIfx",
.adb_type = MAC_ADB_IOP,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_IOP,
.nubus_type = MAC_NUBUS,
@@ -285,7 +314,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_IISI,
.name = "IIsi",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -294,7 +323,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_IIVI,
.name = "IIvi",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -303,7 +332,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_IIVX,
.name = "IIvx",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -318,7 +347,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_CLII,
.name = "Classic II",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -327,7 +356,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_CCL,
.name = "Color Classic",
.adb_type = MAC_ADB_CUDA,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -336,7 +365,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_CCLII,
.name = "Color Classic II",
.adb_type = MAC_ADB_CUDA,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -351,7 +380,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_LC,
.name = "LC",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -360,7 +389,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_LCII,
.name = "LC II",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -369,7 +398,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_LCIII,
.name = "LC III",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -497,7 +526,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_P460,
.name = "Performa 460",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -524,7 +553,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_P520,
.name = "Performa 520",
.adb_type = MAC_ADB_CUDA,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -533,7 +562,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_P550,
.name = "Performa 550",
.adb_type = MAC_ADB_CUDA,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -565,7 +594,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_TV,
.name = "TV",
.adb_type = MAC_ADB_CUDA,
- .via_type = MAC_VIA_QUADRA,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -574,7 +603,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_P600,
.name = "Performa 600",
.adb_type = MAC_ADB_IISI,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
.nubus_type = MAC_NUBUS,
@@ -645,8 +674,8 @@ static struct mac_model mac_data_table[] = {
}, {
.ident = MAC_MODEL_PB150,
.name = "PowerBook 150",
- .adb_type = MAC_ADB_PB1,
- .via_type = MAC_VIA_IIci,
+ .adb_type = MAC_ADB_PB2,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.ide_type = MAC_IDE_PB,
.scc_type = MAC_SCC_QUADRA,
@@ -732,17 +761,13 @@ static struct mac_model mac_data_table[] = {
* PowerBook Duos are pretty much like normal PowerBooks
* All of these probably have onboard SONIC in the Dock which
* means we'll have to probe for it eventually.
- *
- * Are these really MAC_VIA_IIci? The developer notes for the
- * Duos show pretty much the same custom parts as in most of
- * the other PowerBooks which would imply MAC_VIA_QUADRA.
*/
{
.ident = MAC_MODEL_PB210,
.name = "PowerBook Duo 210",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -751,7 +776,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_PB230,
.name = "PowerBook Duo 230",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -760,7 +785,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_PB250,
.name = "PowerBook Duo 250",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -769,7 +794,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_PB270C,
.name = "PowerBook Duo 270c",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -778,7 +803,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_PB280,
.name = "PowerBook Duo 280",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -787,7 +812,7 @@ static struct mac_model mac_data_table[] = {
.ident = MAC_MODEL_PB280C,
.name = "PowerBook Duo 280c",
.adb_type = MAC_ADB_PB2,
- .via_type = MAC_VIA_IIci,
+ .via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
.nubus_type = MAC_NUBUS,
@@ -864,8 +889,14 @@ static void __init mac_identify(void)
scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC_B;
break;
default:
- scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC;
- scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC;
+ /* On non-PSC machines, the serial ports share an IRQ. */
+ if (macintosh_config->ident == MAC_MODEL_IIFX) {
+ scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC;
+ scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC;
+ } else {
+ scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_AUTO_4;
+ scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_AUTO_4;
+ }
break;
}
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index a5462cc0bfd..7d8d46127ad 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -115,7 +115,6 @@
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_iop.h>
-#include <asm/mac_oss.h>
/*#define DEBUG_IOP*/
@@ -149,8 +148,6 @@ static struct listener iop_listeners[NUM_IOPS][NUM_IOP_CHAN];
irqreturn_t iop_ism_irq(int, void *);
-extern void oss_irq_enable(int);
-
/*
* Private access functions
*/
@@ -304,11 +301,10 @@ void __init iop_init(void)
void __init iop_register_interrupts(void)
{
if (iop_ism_present) {
- if (oss_present) {
- if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 0,
+ if (macintosh_config->ident == MAC_MODEL_IIFX) {
+ if (request_irq(IRQ_MAC_ADB, iop_ism_irq, 0,
"ISM IOP", (void *)IOP_NUM_ISM))
pr_err("Couldn't register ISM IOP interrupt\n");
- oss_irq_enable(IRQ_MAC_ADB);
} else {
if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
(void *)IOP_NUM_ISM))
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index ba220b70ab8..5c1a6b2ff0a 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -26,10 +26,6 @@
* - slot 6: timer 1 (not on IIci)
* - slot 7: status of IRQ; signals 'any enabled int.'
*
- * 2 - OSS (IIfx only?)
- * - slot 0: SCSI interrupt
- * - slot 1: Sound interrupt
- *
* Levels 3-6 vary by machine type. For VIA or RBV Macintoshes:
*
* 3 - unused (?)
@@ -42,21 +38,30 @@
*
* 6 - off switch (?)
*
- * For OSS Macintoshes (IIfx only at this point):
+ * Machines with Quadra-like VIA hardware, except PSC and PMU machines, support
+ * an alternate interrupt mapping, as used by A/UX. It spreads ethernet and
+ * sound out to their own autovector IRQs and gives VIA1 a higher priority:
*
- * 3 - Nubus interrupt
- * - slot 0: Slot $9
- * - slot 1: Slot $A
- * - slot 2: Slot $B
- * - slot 3: Slot $C
- * - slot 4: Slot $D
- * - slot 5: Slot $E
+ * 1 - unused (?)
*
- * 4 - SCC IOP
+ * 3 - on-board SONIC
+ *
+ * 5 - Apple Sound Chip (ASC)
+ *
+ * 6 - VIA1
+ *
+ * For OSS Macintoshes (IIfx only), we apply an interrupt mapping similar to
+ * the Quadra (A/UX) mapping:
+ *
+ * 1 - ISM IOP (ADB)
*
- * 5 - ISM IOP (ADB?)
+ * 2 - SCSI
*
- * 6 - unused
+ * 3 - NuBus
+ *
+ * 4 - SCC IOP
+ *
+ * 6 - VIA1
*
* For PSC Macintoshes (660AV, 840AV):
*
@@ -100,88 +105,29 @@
* case. They're hidden behind the Nubus slot $C interrupt thus adding a
* third layer of indirection. Why oh why did the Apple engineers do that?
*
- * - We support "fast" and "slow" handlers, just like the Amiga port. The
- * fast handlers are called first and with all interrupts disabled. They
- * are expected to execute quickly (hence the name). The slow handlers are
- * called last with interrupts enabled and the interrupt level restored.
- * They must therefore be reentrant.
- *
- * TODO:
- *
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/interrupt.h> /* for intr_count */
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <asm/system.h>
#include <asm/irq.h>
-#include <asm/traps.h>
-#include <asm/bootinfo.h>
#include <asm/macintosh.h>
+#include <asm/macints.h>
#include <asm/mac_via.h>
#include <asm/mac_psc.h>
+#include <asm/mac_oss.h>
+#include <asm/mac_iop.h>
+#include <asm/mac_baboon.h>
#include <asm/hwtest.h>
-#include <asm/errno.h>
-#include <asm/macints.h>
#include <asm/irq_regs.h>
-#include <asm/mac_oss.h>
#define SHUTUP_SONIC
/*
- * VIA/RBV hooks
- */
-
-extern void via_register_interrupts(void);
-extern void via_irq_enable(int);
-extern void via_irq_disable(int);
-extern void via_irq_clear(int);
-extern int via_irq_pending(int);
-
-/*
- * OSS hooks
- */
-
-extern void oss_register_interrupts(void);
-extern void oss_irq_enable(int);
-extern void oss_irq_disable(int);
-extern void oss_irq_clear(int);
-extern int oss_irq_pending(int);
-
-/*
- * PSC hooks
- */
-
-extern void psc_register_interrupts(void);
-extern void psc_irq_enable(int);
-extern void psc_irq_disable(int);
-extern void psc_irq_clear(int);
-extern int psc_irq_pending(int);
-
-/*
- * IOP hooks
- */
-
-extern void iop_register_interrupts(void);
-
-/*
- * Baboon hooks
- */
-
-extern int baboon_present;
-
-extern void baboon_register_interrupts(void);
-extern void baboon_irq_enable(int);
-extern void baboon_irq_disable(int);
-extern void baboon_irq_clear(int);
-
-/*
* console_loglevel determines NMI handler function
*/
@@ -190,10 +136,15 @@ irqreturn_t mac_debug_handler(int, void *);
/* #define DEBUG_MACINTS */
+static unsigned int mac_irq_startup(struct irq_data *);
+static void mac_irq_shutdown(struct irq_data *);
+
static struct irq_chip mac_irq_chip = {
.name = "mac",
.irq_enable = mac_irq_enable,
.irq_disable = mac_irq_disable,
+ .irq_startup = mac_irq_startup,
+ .irq_shutdown = mac_irq_shutdown,
};
void __init mac_init_IRQ(void)
@@ -239,8 +190,6 @@ void __init mac_init_IRQ(void)
/*
* mac_irq_enable - enable an interrupt source
* mac_irq_disable - disable an interrupt source
- * mac_clear_irq - clears a pending interrupt
- * mac_irq_pending - returns the pending status of an IRQ (nonzero = pending)
*
* These routines are just dispatchers to the VIA/OSS/PSC routines.
*/
@@ -252,8 +201,6 @@ void mac_irq_enable(struct irq_data *data)
switch(irq_src) {
case 1:
- via_irq_enable(irq);
- break;
case 2:
case 7:
if (oss_present)
@@ -262,6 +209,7 @@ void mac_irq_enable(struct irq_data *data)
via_irq_enable(irq);
break;
case 3:
+ case 4:
case 5:
case 6:
if (psc_present)
@@ -269,10 +217,6 @@ void mac_irq_enable(struct irq_data *data)
else if (oss_present)
oss_irq_enable(irq);
break;
- case 4:
- if (psc_present)
- psc_irq_enable(irq);
- break;
case 8:
if (baboon_present)
baboon_irq_enable(irq);
@@ -287,8 +231,6 @@ void mac_irq_disable(struct irq_data *data)
switch(irq_src) {
case 1:
- via_irq_disable(irq);
- break;
case 2:
case 7:
if (oss_present)
@@ -297,6 +239,7 @@ void mac_irq_disable(struct irq_data *data)
via_irq_disable(irq);
break;
case 3:
+ case 4:
case 5:
case 6:
if (psc_present)
@@ -304,10 +247,6 @@ void mac_irq_disable(struct irq_data *data)
else if (oss_present)
oss_irq_disable(irq);
break;
- case 4:
- if (psc_present)
- psc_irq_disable(irq);
- break;
case 8:
if (baboon_present)
baboon_irq_disable(irq);
@@ -315,65 +254,27 @@ void mac_irq_disable(struct irq_data *data)
}
}
-void mac_clear_irq(unsigned int irq)
+static unsigned int mac_irq_startup(struct irq_data *data)
{
- switch(IRQ_SRC(irq)) {
- case 1:
- via_irq_clear(irq);
- break;
- case 2:
- case 7:
- if (oss_present)
- oss_irq_clear(irq);
- else
- via_irq_clear(irq);
- break;
- case 3:
- case 5:
- case 6:
- if (psc_present)
- psc_irq_clear(irq);
- else if (oss_present)
- oss_irq_clear(irq);
- break;
- case 4:
- if (psc_present)
- psc_irq_clear(irq);
- break;
- case 8:
- if (baboon_present)
- baboon_irq_clear(irq);
- break;
- }
+ int irq = data->irq;
+
+ if (IRQ_SRC(irq) == 7 && !oss_present)
+ via_nubus_irq_startup(irq);
+ else
+ mac_irq_enable(data);
+
+ return 0;
}
-int mac_irq_pending(unsigned int irq)
+static void mac_irq_shutdown(struct irq_data *data)
{
- switch(IRQ_SRC(irq)) {
- case 1:
- return via_irq_pending(irq);
- case 2:
- case 7:
- if (oss_present)
- return oss_irq_pending(irq);
- else
- return via_irq_pending(irq);
- case 3:
- case 5:
- case 6:
- if (psc_present)
- return psc_irq_pending(irq);
- else if (oss_present)
- return oss_irq_pending(irq);
- break;
- case 4:
- if (psc_present)
- return psc_irq_pending(irq);
- break;
- }
- return 0;
+ int irq = data->irq;
+
+ if (IRQ_SRC(irq) == 7 && !oss_present)
+ via_nubus_irq_shutdown(irq);
+ else
+ mac_irq_disable(data);
}
-EXPORT_SYMBOL(mac_irq_pending);
static int num_debug[8];
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index a4c82dab9ff..6c4c882c126 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -1,5 +1,5 @@
/*
- * OSS handling
+ * Operating System Services (OSS) chip handling
* Written by Joshua M. Thompson (funaho@jurai.org)
*
*
@@ -30,8 +30,6 @@
int oss_present;
volatile struct mac_oss *oss;
-extern void via1_irq(unsigned int irq, struct irq_desc *desc);
-
/*
* Initialize the OSS
*
@@ -51,10 +49,8 @@ void __init oss_init(void)
/* do this by setting the source's interrupt level to zero. */
for (i = 0; i <= OSS_NUM_SOURCES; i++) {
- oss->irq_level[i] = OSS_IRQLEV_DISABLED;
+ oss->irq_level[i] = 0;
}
- /* If we disable VIA1 here, we never really handle it... */
- oss->irq_level[OSS_VIA1] = OSS_IRQLEV_VIA1;
}
/*
@@ -66,17 +62,13 @@ void __init oss_nubus_init(void)
}
/*
- * Handle miscellaneous OSS interrupts. Right now that's just sound
- * and SCSI; everything else is routed to its own autovector IRQ.
+ * Handle miscellaneous OSS interrupts.
*/
static void oss_irq(unsigned int irq, struct irq_desc *desc)
{
- int events;
-
- events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
- if (!events)
- return;
+ int events = oss->irq_pending &
+ (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
#ifdef DEBUG_IRQS
if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
@@ -84,16 +76,20 @@ static void oss_irq(unsigned int irq, struct irq_desc *desc)
(int) oss->irq_pending);
}
#endif
- /* FIXME: how do you clear a pending IRQ? */
- if (events & OSS_IP_SOUND) {
- oss->irq_pending &= ~OSS_IP_SOUND;
- /* FIXME: call sound handler */
- } else if (events & OSS_IP_SCSI) {
+ if (events & OSS_IP_IOPSCC) {
+ oss->irq_pending &= ~OSS_IP_IOPSCC;
+ generic_handle_irq(IRQ_MAC_SCC);
+ }
+
+ if (events & OSS_IP_SCSI) {
oss->irq_pending &= ~OSS_IP_SCSI;
generic_handle_irq(IRQ_MAC_SCSI);
- } else {
- /* FIXME: error check here? */
+ }
+
+ if (events & OSS_IP_IOPISM) {
+ oss->irq_pending &= ~OSS_IP_IOPISM;
+ generic_handle_irq(IRQ_MAC_ADB);
}
}
@@ -132,14 +128,29 @@ static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
/*
* Register the OSS and NuBus interrupt dispatchers.
+ *
+ * This IRQ mapping is laid out with two things in mind: first, we try to keep
+ * things on their own levels to avoid having to do double-dispatches. Second,
+ * the levels match as closely as possible the alternate IRQ mapping mode (aka
+ * "A/UX mode") available on some VIA machines.
*/
+#define OSS_IRQLEV_IOPISM IRQ_AUTO_1
+#define OSS_IRQLEV_SCSI IRQ_AUTO_2
+#define OSS_IRQLEV_NUBUS IRQ_AUTO_3
+#define OSS_IRQLEV_IOPSCC IRQ_AUTO_4
+#define OSS_IRQLEV_VIA1 IRQ_AUTO_6
+
void __init oss_register_interrupts(void)
{
- irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
- irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
- irq_set_chained_handler(OSS_IRQLEV_SOUND, oss_irq);
- irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
+
+ /* OSS_VIA1 gets enabled here because it has no machspec interrupt. */
+ oss->irq_level[OSS_VIA1] = IRQ_AUTO_6;
}
/*
@@ -158,13 +169,13 @@ void oss_irq_enable(int irq) {
switch(irq) {
case IRQ_MAC_SCC:
oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_IOPSCC;
- break;
+ return;
case IRQ_MAC_ADB:
oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_IOPISM;
- break;
+ return;
case IRQ_MAC_SCSI:
oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI;
- break;
+ return;
case IRQ_NUBUS_9:
case IRQ_NUBUS_A:
case IRQ_NUBUS_B:
@@ -173,13 +184,11 @@ void oss_irq_enable(int irq) {
case IRQ_NUBUS_E:
irq -= NUBUS_SOURCE_BASE;
oss->irq_level[irq] = OSS_IRQLEV_NUBUS;
- break;
-#ifdef DEBUG_IRQUSE
- default:
- printk("%s unknown irq %d\n", __func__, irq);
- break;
-#endif
+ return;
}
+
+ if (IRQ_SRC(irq) == 1)
+ via_irq_enable(irq);
}
/*
@@ -195,50 +204,14 @@ void oss_irq_disable(int irq) {
#endif
switch(irq) {
case IRQ_MAC_SCC:
- oss->irq_level[OSS_IOPSCC] = OSS_IRQLEV_DISABLED;
- break;
- case IRQ_MAC_ADB:
- oss->irq_level[OSS_IOPISM] = OSS_IRQLEV_DISABLED;
- break;
- case IRQ_MAC_SCSI:
- oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED;
- break;
- case IRQ_NUBUS_9:
- case IRQ_NUBUS_A:
- case IRQ_NUBUS_B:
- case IRQ_NUBUS_C:
- case IRQ_NUBUS_D:
- case IRQ_NUBUS_E:
- irq -= NUBUS_SOURCE_BASE;
- oss->irq_level[irq] = OSS_IRQLEV_DISABLED;
- break;
-#ifdef DEBUG_IRQUSE
- default:
- printk("%s unknown irq %d\n", __func__, irq);
- break;
-#endif
- }
-}
-
-/*
- * Clear an OSS interrupt
- *
- * Not sure if this works or not but it's the only method I could
- * think of based on the contents of the mac_oss structure.
- */
-
-void oss_irq_clear(int irq) {
- /* FIXME: how to do this on OSS? */
- switch(irq) {
- case IRQ_MAC_SCC:
- oss->irq_pending &= ~OSS_IP_IOPSCC;
- break;
+ oss->irq_level[OSS_IOPSCC] = 0;
+ return;
case IRQ_MAC_ADB:
- oss->irq_pending &= ~OSS_IP_IOPISM;
- break;
+ oss->irq_level[OSS_IOPISM] = 0;
+ return;
case IRQ_MAC_SCSI:
- oss->irq_pending &= ~OSS_IP_SCSI;
- break;
+ oss->irq_level[OSS_SCSI] = 0;
+ return;
case IRQ_NUBUS_9:
case IRQ_NUBUS_A:
case IRQ_NUBUS_B:
@@ -246,36 +219,10 @@ void oss_irq_clear(int irq) {
case IRQ_NUBUS_D:
case IRQ_NUBUS_E:
irq -= NUBUS_SOURCE_BASE;
- oss->irq_pending &= ~(1 << irq);
- break;
+ oss->irq_level[irq] = 0;
+ return;
}
-}
-
-/*
- * Check to see if a specific OSS interrupt is pending
- */
-int oss_irq_pending(int irq)
-{
- switch(irq) {
- case IRQ_MAC_SCC:
- return oss->irq_pending & OSS_IP_IOPSCC;
- break;
- case IRQ_MAC_ADB:
- return oss->irq_pending & OSS_IP_IOPISM;
- break;
- case IRQ_MAC_SCSI:
- return oss->irq_pending & OSS_IP_SCSI;
- break;
- case IRQ_NUBUS_9:
- case IRQ_NUBUS_A:
- case IRQ_NUBUS_B:
- case IRQ_NUBUS_C:
- case IRQ_NUBUS_D:
- case IRQ_NUBUS_E:
- irq -= NUBUS_SOURCE_BASE;
- return oss->irq_pending & (1 << irq);
- break;
- }
- return 0;
+ if (IRQ_SRC(irq) == 1)
+ via_irq_disable(irq);
}
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index e6c2d20f328..6f026fc302f 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -180,20 +180,3 @@ void psc_irq_disable(int irq) {
#endif
psc_write_byte(pIER, 1 << irq_idx);
}
-
-void psc_irq_clear(int irq) {
- int irq_src = IRQ_SRC(irq);
- int irq_idx = IRQ_IDX(irq);
- int pIFR = pIERbase + (irq_src << 4);
-
- psc_write_byte(pIFR, 1 << irq_idx);
-}
-
-int psc_irq_pending(int irq)
-{
- int irq_src = IRQ_SRC(irq);
- int irq_idx = IRQ_IDX(irq);
- int pIFR = pIERbase + (irq_src << 4);
-
- return psc_read_byte(pIFR) & (1 << irq_idx);
-}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index f1600ad2662..2d85662715f 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -63,24 +63,50 @@ static int gIER,gIFR,gBufA,gBufB;
#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
-/* To disable a NuBus slot on Quadras we make that slot IRQ line an output set
- * high. On RBV we just use the slot interrupt enable register. On Macs with
- * genuine VIA chips we must use nubus_disabled to keep track of disabled slot
- * interrupts. When any slot IRQ is disabled we mask the (edge triggered) CA1
- * or "SLOTS" interrupt. When no slot is disabled, we unmask the CA1 interrupt.
- * So, on genuine VIAs, having more than one NuBus IRQ can mean trouble,
- * because closing one of those drivers can mask all of the NuBus interrupts.
- * Also, since we can't mask the unregistered slot IRQs on genuine VIAs, it's
- * possible to get interrupts from cards that MacOS or the ROM has configured
- * but we have not. FWIW, "Designing Cards and Drivers for Macintosh II and
- * Macintosh SE", page 9-8, says, a slot IRQ with no driver would crash MacOS.
+
+/*
+ * On Macs with a genuine VIA chip there is no way to mask an individual slot
+ * interrupt. This limitation also seems to apply to VIA clone logic cores in
+ * Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
+ *
+ * We used to fake it by configuring the relevent VIA pin as an output
+ * (to mask the interrupt) or input (to unmask). That scheme did not work on
+ * (at least) the Quadra 700. A NuBus card's /NMRQ signal is an open-collector
+ * circuit (see Designing Cards and Drivers for Macintosh II and Macintosh SE,
+ * p. 10-11 etc) but VIA outputs are not (see datasheet).
+ *
+ * Driving these outputs high must cause the VIA to source current and the
+ * card to sink current when it asserts /NMRQ. Current will flow but the pin
+ * voltage is uncertain and so the /NMRQ condition may still cause a transition
+ * at the VIA2 CA1 input (which explains the lost interrupts). A side effect
+ * is that a disabled slot IRQ can never be tested as pending or not.
+ *
+ * Driving these outputs low doesn't work either. All the slot /NMRQ lines are
+ * (active low) OR'd together to generate the CA1 (aka "SLOTS") interrupt (see
+ * The Guide To Macintosh Family Hardware, 2nd edition p. 167). If we drive a
+ * disabled /NMRQ line low, the falling edge immediately triggers a CA1
+ * interrupt and all slot interrupts after that will generate no transition
+ * and therefore no interrupt, even after being re-enabled.
+ *
+ * So we make the VIA port A I/O lines inputs and use nubus_disabled to keep
+ * track of their states. When any slot IRQ becomes disabled we mask the CA1
+ * umbrella interrupt. Only when all slot IRQs become enabled do we unmask
+ * the CA1 interrupt. It must remain enabled even when cards have no interrupt
+ * handler registered. Drivers must therefore disable a slot interrupt at the
+ * device before they call free_irq (like shared and autovector interrupts).
+ *
+ * There is also a related problem when MacOS is used to boot Linux. A network
+ * card brought up by a MacOS driver may raise an interrupt while Linux boots.
+ * This can be fatal since it can't be handled until the right driver loads
+ * (if such a driver exists at all). Apparently related to this hardware
+ * limitation, "Designing Cards and Drivers", p. 9-8, says that a slot
+ * interrupt with no driver would crash MacOS (the book was written before
+ * the appearance of Macs with RBV or OSS).
*/
+
static u8 nubus_disabled;
void via_debug_dump(void);
-void via_irq_enable(int irq);
-void via_irq_disable(int irq);
-void via_irq_clear(int irq);
/*
* Initialize the VIAs
@@ -100,7 +126,7 @@ void __init via_init(void)
/* IIci, IIsi, IIvx, IIvi (P6xx), LC series */
- case MAC_VIA_IIci:
+ case MAC_VIA_IICI:
via1 = (void *) VIA1_BASE;
if (macintosh_config->ident == MAC_MODEL_IIFX) {
via2 = NULL;
@@ -197,38 +223,17 @@ void __init via_init(void)
if (oss_present)
return;
- /* Some machines support an alternate IRQ mapping that spreads */
- /* Ethernet and Sound out to their own autolevel IRQs and moves */
- /* VIA1 to level 6. A/UX uses this mapping and we do too. Note */
- /* that the IIfx emulates this alternate mapping using the OSS. */
-
- via_alt_mapping = 0;
- if (macintosh_config->via_type == MAC_VIA_QUADRA)
- switch (macintosh_config->ident) {
- case MAC_MODEL_C660:
- case MAC_MODEL_Q840:
- /* not applicable */
- break;
- case MAC_MODEL_P588:
- case MAC_MODEL_TV:
- case MAC_MODEL_PB140:
- case MAC_MODEL_PB145:
- case MAC_MODEL_PB160:
- case MAC_MODEL_PB165:
- case MAC_MODEL_PB165C:
- case MAC_MODEL_PB170:
- case MAC_MODEL_PB180:
- case MAC_MODEL_PB180C:
- case MAC_MODEL_PB190:
- case MAC_MODEL_PB520:
- /* not yet tested */
- break;
- default:
- via_alt_mapping = 1;
- via1[vDirB] |= 0x40;
- via1[vBufB] &= ~0x40;
- break;
- }
+ if ((macintosh_config->via_type == MAC_VIA_QUADRA) &&
+ (macintosh_config->adb_type != MAC_ADB_PB1) &&
+ (macintosh_config->adb_type != MAC_ADB_PB2) &&
+ (macintosh_config->ident != MAC_MODEL_C660) &&
+ (macintosh_config->ident != MAC_MODEL_Q840)) {
+ via_alt_mapping = 1;
+ via1[vDirB] |= 0x40;
+ via1[vBufB] &= ~0x40;
+ } else {
+ via_alt_mapping = 0;
+ }
/*
* Now initialize VIA2. For RBV we just kill all interrupts;
@@ -248,22 +253,28 @@ void __init via_init(void)
via2[vACR] &= ~0x03; /* disable port A & B latches */
}
+ /* Everything below this point is VIA2 only... */
+
+ if (rbv_present)
+ return;
+
/*
- * Set vPCR for control line interrupts (but not on RBV)
+ * Set vPCR for control line interrupts.
+ *
+ * CA1 (SLOTS IRQ), CB1 (ASC IRQ): negative edge trigger.
+ *
+ * Macs with ESP SCSI have a negative edge triggered SCSI interrupt.
+ * Testing reveals that PowerBooks do too. However, the SE/30
+ * schematic diagram shows an active high NCR5380 IRQ line.
*/
- if (!rbv_present) {
- /* For all VIA types, CA1 (SLOTS IRQ) and CB1 (ASC IRQ)
- * are made negative edge triggered here.
- */
- if (macintosh_config->scsi_type == MAC_SCSI_OLD) {
- /* CB2 (IRQ) indep. input, positive edge */
- /* CA2 (DRQ) indep. input, positive edge */
- via2[vPCR] = 0x66;
- } else {
- /* CB2 (IRQ) indep. input, negative edge */
- /* CA2 (DRQ) indep. input, negative edge */
- via2[vPCR] = 0x22;
- }
+
+ pr_debug("VIA2 vPCR is 0x%02X\n", via2[vPCR]);
+ if (macintosh_config->via_type == MAC_VIA_II) {
+ /* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, pos. edge */
+ via2[vPCR] = 0x66;
+ } else {
+ /* CA2 (SCSI DRQ), CB2 (SCSI IRQ): indep. input, neg. edge */
+ via2[vPCR] = 0x22;
}
}
@@ -378,34 +389,55 @@ void __init via_nubus_init(void)
via2[gBufB] |= 0x02;
}
- /* Disable all the slot interrupts (where possible). */
+ /*
+ * Disable the slot interrupts. On some hardware that's not possible.
+ * On some hardware it's unclear what all of these I/O lines do.
+ */
switch (macintosh_config->via_type) {
case MAC_VIA_II:
- /* Just make the port A lines inputs. */
- switch(macintosh_config->ident) {
- case MAC_MODEL_II:
- case MAC_MODEL_IIX:
- case MAC_MODEL_IICX:
- case MAC_MODEL_SE30:
- /* The top two bits are RAM size outputs. */
- via2[vDirA] &= 0xC0;
- break;
- default:
- via2[vDirA] &= 0x80;
- }
+ case MAC_VIA_QUADRA:
+ pr_debug("VIA2 vDirA is 0x%02X\n", via2[vDirA]);
break;
- case MAC_VIA_IIci:
+ case MAC_VIA_IICI:
/* RBV. Disable all the slot interrupts. SIER works like IER. */
via2[rSIER] = 0x7F;
break;
+ }
+}
+
+void via_nubus_irq_startup(int irq)
+{
+ int irq_idx = IRQ_IDX(irq);
+
+ switch (macintosh_config->via_type) {
+ case MAC_VIA_II:
case MAC_VIA_QUADRA:
- /* Disable the inactive slot interrupts by making those lines outputs. */
- if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
- (macintosh_config->adb_type != MAC_ADB_PB2)) {
- via2[vBufA] |= 0x7F;
- via2[vDirA] |= 0x7F;
+ /* Make the port A line an input. Probably redundant. */
+ if (macintosh_config->via_type == MAC_VIA_II) {
+ /* The top two bits are RAM size outputs. */
+ via2[vDirA] &= 0xC0 | ~(1 << irq_idx);
+ } else {
+ /* Allow NuBus slots 9 through F. */
+ via2[vDirA] &= 0x80 | ~(1 << irq_idx);
}
+ /* fall through */
+ case MAC_VIA_IICI:
+ via_irq_enable(irq);
+ break;
+ }
+}
+
+void via_nubus_irq_shutdown(int irq)
+{
+ switch (macintosh_config->via_type) {
+ case MAC_VIA_II:
+ case MAC_VIA_QUADRA:
+ /* Ensure that the umbrella CA1 interrupt remains enabled. */
+ via_irq_enable(irq);
+ break;
+ case MAC_VIA_IICI:
+ via_irq_disable(irq);
break;
}
}
@@ -531,25 +563,18 @@ void via_irq_enable(int irq) {
} else if (irq_src == 7) {
switch (macintosh_config->via_type) {
case MAC_VIA_II:
+ case MAC_VIA_QUADRA:
nubus_disabled &= ~(1 << irq_idx);
/* Enable the CA1 interrupt when no slot is disabled. */
if (!nubus_disabled)
via2[gIER] = IER_SET_BIT(1);
break;
- case MAC_VIA_IIci:
+ case MAC_VIA_IICI:
/* On RBV, enable the slot interrupt.
* SIER works like IER.
*/
via2[rSIER] = IER_SET_BIT(irq_idx);
break;
- case MAC_VIA_QUADRA:
- /* Make the port A line an input to enable the slot irq.
- * But not on PowerBooks, that's ADB.
- */
- if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
- (macintosh_config->adb_type != MAC_ADB_PB2))
- via2[vDirA] &= ~(1 << irq_idx);
- break;
}
}
}
@@ -569,60 +594,18 @@ void via_irq_disable(int irq) {
} else if (irq_src == 7) {
switch (macintosh_config->via_type) {
case MAC_VIA_II:
+ case MAC_VIA_QUADRA:
nubus_disabled |= 1 << irq_idx;
if (nubus_disabled)
via2[gIER] = IER_CLR_BIT(1);
break;
- case MAC_VIA_IIci:
+ case MAC_VIA_IICI:
via2[rSIER] = IER_CLR_BIT(irq_idx);
break;
- case MAC_VIA_QUADRA:
- if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
- (macintosh_config->adb_type != MAC_ADB_PB2))
- via2[vDirA] |= 1 << irq_idx;
- break;
}
}
}
-void via_irq_clear(int irq) {
- int irq_src = IRQ_SRC(irq);
- int irq_idx = IRQ_IDX(irq);
- int irq_bit = 1 << irq_idx;
-
- if (irq_src == 1) {
- via1[vIFR] = irq_bit;
- } else if (irq_src == 2) {
- via2[gIFR] = irq_bit | rbv_clear;
- } else if (irq_src == 7) {
- /* FIXME: There is no way to clear an individual nubus slot
- * IRQ flag, other than getting the device to do it.
- */
- }
-}
-
-/*
- * Returns nonzero if an interrupt is pending on the given
- * VIA/IRQ combination.
- */
-
-int via_irq_pending(int irq)
-{
- int irq_src = IRQ_SRC(irq);
- int irq_idx = IRQ_IDX(irq);
- int irq_bit = 1 << irq_idx;
-
- if (irq_src == 1) {
- return via1[vIFR] & irq_bit;
- } else if (irq_src == 2) {
- return via2[gIFR] & irq_bit;
- } else if (irq_src == 7) {
- /* Always 0 for MAC_VIA_QUADRA if the slot irq is disabled. */
- return ~via2[gBufA] & irq_bit;
- }
- return 0;
-}
-
void via1_set_head(int head)
{
if (head == 0)
@@ -631,3 +614,9 @@ void via1_set_head(int head)
via1[vBufA] |= VIA1A_vHeadSel;
}
EXPORT_SYMBOL(via1_set_head);
+
+int via2_scsi_drq_pending(void)
+{
+ return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
+}
+EXPORT_SYMBOL(via2_scsi_drq_pending);
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 09cadf1058d..cfbf3205724 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -4,6 +4,8 @@
obj-y := init.o
-obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o
-obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
-obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
+obj-$(CONFIG_MMU) += cache.o fault.o
+obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
+obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
+obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
+
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 5437fff5fe0..95d0bf66e2e 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
void flush_icache_range(unsigned long address, unsigned long endaddr)
{
-
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = address & ICACHE_SET_MASK;
+ end = endaddr & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+ } else if (CPU_IS_040_OR_060) {
address &= PAGE_MASK;
do {
@@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = addr & ICACHE_SET_MASK;
+ end = (addr + len) & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+
+ } else if (CPU_IS_040_OR_060) {
asm volatile ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index bbe525434cc..89f3b203814 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -24,6 +24,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
+#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
@@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable);
extern pmd_t *zero_pgtable;
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS &vectors[0]
+#else
+#define VECTORS _ramvec
+#endif
+
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+ pr_notice("Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
+ " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
+ MLK(VECTORS, VECTORS + 256),
+ MLM(KMAP_START, KMAP_END),
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_stext, _etext),
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(_sbss, _ebss));
+}
+
void __init mem_init(void)
{
pg_data_t *pgdat;
@@ -106,7 +139,7 @@ void __init mem_init(void)
}
}
-#ifndef CONFIG_SUN3
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -125,6 +158,7 @@ void __init mem_init(void)
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
+ print_memmap();
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 69345849454..1cc2bed4c3d 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
break;
}
} else {
- physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
+ _PAGE_DIRTY | _PAGE_READWRITE);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644
index 00000000000..babd5a97cdc
--- /dev/null
+++ b/arch/m68k/mm/mcfmmu.c
@@ -0,0 +1,198 @@
+/*
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/mcf_pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern unsigned long num_pages;
+
+void free_initmem(void)
+{
+}
+
+/*
+ * ColdFire paging_init derived from sun3.
+ */
+void __init paging_init(void)
+{
+ pgd_t *pg_dir;
+ pte_t *pg_table;
+ unsigned long address, size;
+ unsigned long next_pgtable, bootmem_end;
+ unsigned long zones_size[MAX_NR_ZONES];
+ enum zone_type zone;
+ int i;
+
+ empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+ pg_dir = swapper_pg_dir;
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+ size = num_pages * sizeof(pte_t);
+ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+ next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+
+ bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+ address = PAGE_OFFSET;
+ while (address < (unsigned long)high_memory) {
+ pg_table = (pte_t *) next_pgtable;
+ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+ pgd_val(*pg_dir) = (unsigned long) pg_table;
+ pg_dir++;
+
+ /* now change pg_table to kernel virtual addresses */
+ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
+ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ if (address >= (unsigned long) high_memory)
+ pte_val(pte) = 0;
+
+ set_pte(pg_table, pte);
+ address += PAGE_SIZE;
+ }
+ }
+
+ current->mm = NULL;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ zones_size[zone] = 0x0;
+ zones_size[ZONE_DMA] = num_pages;
+ free_area_init(zones_size);
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+ unsigned long flags, mmuar;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int asid;
+
+ local_irq_save(flags);
+
+ mmuar = (dtlb) ? mmu_read(MMUAR) :
+ regs->pc + (extension_word * sizeof(long));
+
+ mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+ if (!mm) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pgd = pgd_offset(mm, mmuar);
+ if (pgd_none(*pgd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pmd = pmd_offset(pgd, mmuar);
+ if (pmd_none(*pmd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+ : pte_offset_map(pmd, mmuar);
+ if (pte_none(*pte) || !pte_present(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ if (write) {
+ if (!pte_write(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+ set_pte(pte, pte_mkdirty(*pte));
+ }
+
+ set_pte(pte, pte_mkyoung(*pte));
+ asid = mm->context & 0xff;
+ if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+ set_pte(pte, pte_wrprotect(*pte));
+
+ mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+ (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+ >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+ mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+ ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+ if (dtlb)
+ mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+ else
+ mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * Initialize the context management stuff.
+ * The following was taken from arch/ppc/mmu_context.c
+ */
+void __init mmu_context_init(void)
+{
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes FIRST_CONTEXT < 32.
+ */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP. If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ */
+void steal_context(void)
+{
+ struct mm_struct *mm;
+ /*
+ * free up context `next_mmu_context'
+ * if we shouldn't free context 0, don't...
+ */
+ if (next_mmu_context < FIRST_CONTEXT)
+ next_mmu_context = FIRST_CONTEXT;
+ mm = context_mm[next_mmu_context];
+ flush_tlb_mm(mm);
+ destroy_context(mm);
+}
+
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 34c77ce24fb..a5dbb74fe1d 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr)
void cache_clear (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp;
/*
@@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear);
void cache_push (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp = PAGE_SIZE;
/*
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 31a66d99cbc..c3fb3bdd7ed 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -124,6 +124,163 @@ static void __init mvme16x_init_IRQ (void)
#define PccSCCMICR 0x1d
#define PccSCCTICR 0x1e
#define PccSCCRICR 0x1f
+#define PccTPIACKR 0x25
+
+#ifdef CONFIG_EARLY_PRINTK
+
+/**** cd2401 registers ****/
+#define CD2401_ADDR (0xfff45000)
+
+#define CyGFRCR (0x81)
+#define CyCCR (0x13)
+#define CyCLR_CHAN (0x40)
+#define CyINIT_CHAN (0x20)
+#define CyCHIP_RESET (0x10)
+#define CyENB_XMTR (0x08)
+#define CyDIS_XMTR (0x04)
+#define CyENB_RCVR (0x02)
+#define CyDIS_RCVR (0x01)
+#define CyCAR (0xee)
+#define CyIER (0x11)
+#define CyMdmCh (0x80)
+#define CyRxExc (0x20)
+#define CyRxData (0x08)
+#define CyTxMpty (0x02)
+#define CyTxRdy (0x01)
+#define CyLICR (0x26)
+#define CyRISR (0x89)
+#define CyTIMEOUT (0x80)
+#define CySPECHAR (0x70)
+#define CyOVERRUN (0x08)
+#define CyPARITY (0x04)
+#define CyFRAME (0x02)
+#define CyBREAK (0x01)
+#define CyREOIR (0x84)
+#define CyTEOIR (0x85)
+#define CyMEOIR (0x86)
+#define CyNOTRANS (0x08)
+#define CyRFOC (0x30)
+#define CyRDR (0xf8)
+#define CyTDR (0xf8)
+#define CyMISR (0x8b)
+#define CyRISR (0x89)
+#define CyTISR (0x8a)
+#define CyMSVR1 (0xde)
+#define CyMSVR2 (0xdf)
+#define CyDSR (0x80)
+#define CyDCD (0x40)
+#define CyCTS (0x20)
+#define CyDTR (0x02)
+#define CyRTS (0x01)
+#define CyRTPRL (0x25)
+#define CyRTPRH (0x24)
+#define CyCOR1 (0x10)
+#define CyPARITY_NONE (0x00)
+#define CyPARITY_E (0x40)
+#define CyPARITY_O (0xC0)
+#define Cy_5_BITS (0x04)
+#define Cy_6_BITS (0x05)
+#define Cy_7_BITS (0x06)
+#define Cy_8_BITS (0x07)
+#define CyCOR2 (0x17)
+#define CyETC (0x20)
+#define CyCtsAE (0x02)
+#define CyCOR3 (0x16)
+#define Cy_1_STOP (0x02)
+#define Cy_2_STOP (0x04)
+#define CyCOR4 (0x15)
+#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
+#define CyCOR5 (0x14)
+#define CyCOR6 (0x18)
+#define CyCOR7 (0x07)
+#define CyRBPR (0xcb)
+#define CyRCOR (0xc8)
+#define CyTBPR (0xc3)
+#define CyTCOR (0xc0)
+#define CySCHR1 (0x1f)
+#define CySCHR2 (0x1e)
+#define CyTPR (0xda)
+#define CyPILR1 (0xe3)
+#define CyPILR2 (0xe0)
+#define CyPILR3 (0xe1)
+#define CyCMR (0x1b)
+#define CyASYNC (0x02)
+#define CyLICR (0x26)
+#define CyLIVR (0x09)
+#define CySCRL (0x23)
+#define CySCRH (0x22)
+#define CyTFTC (0x80)
+
+static void cons_write(struct console *co, const char *str, unsigned count)
+{
+ volatile unsigned char *base_addr = (u_char *)CD2401_ADDR;
+ volatile u_char sink;
+ u_char ier;
+ int port;
+ u_char do_lf = 0;
+ int i = 0;
+
+ /* Ensure transmitter is enabled! */
+
+ port = 0;
+ base_addr[CyCAR] = (u_char)port;
+ while (base_addr[CyCCR])
+ ;
+ base_addr[CyCCR] = CyENB_XMTR;
+
+ ier = base_addr[CyIER];
+ base_addr[CyIER] = CyTxMpty;
+
+ while (1) {
+ if (pcc2chip[PccSCCTICR] & 0x20)
+ {
+ /* We have a Tx int. Acknowledge it */
+ sink = pcc2chip[PccTPIACKR];
+ if ((base_addr[CyLICR] >> 2) == port) {
+ if (i == count) {
+ /* Last char of string is now output */
+ base_addr[CyTEOIR] = CyNOTRANS;
+ break;
+ }
+ if (do_lf) {
+ base_addr[CyTDR] = '\n';
+ str++;
+ i++;
+ do_lf = 0;
+ }
+ else if (*str == '\n') {
+ base_addr[CyTDR] = '\r';
+ do_lf = 1;
+ }
+ else {
+ base_addr[CyTDR] = *str++;
+ i++;
+ }
+ base_addr[CyTEOIR] = 0;
+ }
+ else
+ base_addr[CyTEOIR] = CyNOTRANS;
+ }
+ }
+
+ base_addr[CyIER] = ier;
+}
+
+static struct console cons_info =
+{
+ .name = "sercon",
+ .write = cons_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+static void __init mvme16x_early_console(void)
+{
+ register_console(&cons_info);
+
+ printk(KERN_INFO "MVME16x: early console registered\n");
+}
+#endif
void __init config_mvme16x(void)
{
@@ -183,6 +340,9 @@ void __init config_mvme16x(void)
pcc2chip[PccSCCMICR] = 0x10;
pcc2chip[PccSCCTICR] = 0x10;
pcc2chip[PccSCCRICR] = 0x10;
+#ifdef CONFIG_EARLY_PRINTK
+ mvme16x_early_console();
+#endif
}
}
diff --git a/arch/m68k/platform/54xx/config.c b/arch/m68k/platform/54xx/config.c
index 78130984db9..ee043540bfa 100644
--- a/arch/m68k/platform/54xx/config.c
+++ b/arch/m68k/platform/54xx/config.c
@@ -13,11 +13,17 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/m54xxsim.h>
#include <asm/mcfuart.h>
#include <asm/m54xxgpt.h>
+#ifdef CONFIG_MMU
+#include <asm/mmu_context.h>
+#endif
/***************************************************************************/
@@ -95,8 +101,49 @@ static void mcf54xx_reset(void)
/***************************************************************************/
+#ifdef CONFIG_MMU
+
+unsigned long num_pages;
+
+static void __init mcf54xx_bootmem_alloc(void)
+{
+ unsigned long start_pfn;
+ unsigned long memstart;
+
+ /* _rambase and _ramend will be naturally page aligned */
+ m68k_memory[0].addr = _rambase;
+ m68k_memory[0].size = _ramend - _rambase;
+
+ /* compute total pages in system */
+ num_pages = (_ramend - _rambase) >> PAGE_SHIFT;
+
+ /* page numbers */
+ memstart = PAGE_ALIGN(_ramstart);
+ min_low_pfn = _rambase >> PAGE_SHIFT;
+ start_pfn = memstart >> PAGE_SHIFT;
+ max_low_pfn = _ramend >> PAGE_SHIFT;
+ high_memory = (void *)_ramend;
+
+ m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+ module_fixup(NULL, __start_fixup, __stop_fixup);
+
+ /* setup bootmem data */
+ m68k_setup_node(0);
+ memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
+ min_low_pfn, max_low_pfn);
+ free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
+}
+
+#endif /* CONFIG_MMU */
+
+/***************************************************************************/
+
void __init config_BSP(char *commandp, int size)
{
+#ifdef CONFIG_MMU
+ mcf54xx_bootmem_alloc();
+ mmu_context_init();
+#endif
mach_reset = mcf54xx_reset;
m54xx_uarts_init();
}
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile
index e4dfd8fde06..ee61bf84d4a 100644
--- a/arch/m68k/platform/68328/Makefile
+++ b/arch/m68k/platform/68328/Makefile
@@ -14,12 +14,8 @@ obj-$(CONFIG_M68328) += config.o
obj-$(CONFIG_ROM) += romvec.o
extra-y := head.o
-extra-$(CONFIG_M68328) += bootlogo.rh head.o
-
-$(obj)/bootlogo.rh: $(src)/bootlogo.h
- perl $(src)/bootlogo.pl < $(src)/bootlogo.h > $(obj)/bootlogo.rh
$(obj)/head.o: $(obj)/$(head-y)
ln -sf $(head-y) $(obj)/head.o
-clean-files := $(obj)/bootlogo.rh $(obj)/head.o $(head-y)
+clean-files := $(obj)/head.o $(head-y)
diff --git a/arch/m68k/platform/68328/bootlogo.h b/arch/m68k/platform/68328/bootlogo.h
index 67bc2c17386..b896c933faf 100644
--- a/arch/m68k/platform/68328/bootlogo.h
+++ b/arch/m68k/platform/68328/bootlogo.h
@@ -1,6 +1,6 @@
#define bootlogo_width 160
#define bootlogo_height 160
-static unsigned char bootlogo_bits[] = {
+unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/platform/68328/bootlogo.pl b/arch/m68k/platform/68328/bootlogo.pl
deleted file mode 100644
index b04ae3f50da..00000000000
--- a/arch/m68k/platform/68328/bootlogo.pl
+++ /dev/null
@@ -1,10 +0,0 @@
-
-$_ = join("", <>);
-
-s/(0x[0-9a-f]{2})/sprintf("0x%.2x",ord(pack("b8",unpack("B8",chr(hex($1))))))/gei;
-
-s/^ / .byte /gm;
-s/[,};]+$//gm;
-s/^static.*//gm;
-
-print $_;
diff --git a/arch/m68k/platform/68328/config.c b/arch/m68k/platform/68328/config.c
index a7bd21deb00..d70bf2623db 100644
--- a/arch/m68k/platform/68328/config.c
+++ b/arch/m68k/platform/68328/config.c
@@ -20,6 +20,9 @@
#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/MC68328.h>
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+#include "bootlogo.h"
+#endif
/***************************************************************************/
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index aecff532b34..2ebfd642081 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -24,19 +24,7 @@
.global _ramstart
.global _ramend
-.global penguin_bits
-
-#ifdef CONFIG_PILOT
-
-#define IMR 0xFFFFF304
-
- .data
- .align 16
-
-penguin_bits:
-#include "bootlogo.rh"
-
-#endif
+.global bootlogo_bits
/*****************************************************************************/
@@ -185,9 +173,6 @@ L3:
moveq #79, %d7
movel %d0, _ramend
- movel %a3, %d0
- movel %d0, rom_length
-
pea 0
pea env
pea %sp@(4)
@@ -196,7 +181,7 @@ L3:
DBG_PUTC('H')
#ifdef CONFIG_PILOT
- movel #penguin_bits, 0xFFFFFA00
+ movel #bootlogo_bits, 0xFFFFFA00
moveb #10, 0xFFFFFA05
movew #160, 0xFFFFFA08
movew #160, 0xFFFFFA0A
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index 6ec77d3ea0b..a5ff96d0295 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -8,7 +8,7 @@
.global _ramend
#ifdef CONFIG_INIT_LCD
- .global splash_bits
+ .global bootlogo_bits
#endif
.data
@@ -29,16 +29,11 @@ _ramend:
#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
-#ifdef CONFIG_INIT_LCD
-splash_bits:
-#include "bootlogo.rh"
-#endif
-
.text
_start:
_stext: movew #0x2700,%sr
#ifdef CONFIG_INIT_LCD
- movel #splash_bits, 0xfffffA00 /* LSSA */
+ movel #bootlogo_bits, 0xfffffA00 /* LSSA */
moveb #0x28, 0xfffffA05 /* LVPW */
movew #0x280, 0xFFFFFa08 /* LXMAX */
movew #0x1df, 0xFFFFFa0a /* LYMAX */
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index 309f725995b..f2678866067 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -93,7 +93,6 @@ static struct clocksource m68328_clk = {
.name = "timer",
.rating = 250,
.read = m68328_read_clk,
- .shift = 20,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -115,8 +114,7 @@ void hw_timer_init(void)
/* Enable timer 1 */
TCTL |= TCTL_TEN;
- m68328_clk.mult = clocksource_hz2mult(TICKS_PER_JIFFY*HZ, m68328_clk.shift);
- clocksource_register(&m68328_clk);
+ clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
}
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/dma_timer.c b/arch/m68k/platform/coldfire/dma_timer.c
index a5f562823d7..235ad57c470 100644
--- a/arch/m68k/platform/coldfire/dma_timer.c
+++ b/arch/m68k/platform/coldfire/dma_timer.c
@@ -44,7 +44,6 @@ static struct clocksource clocksource_cf_dt = {
.rating = 200,
.read = cf_dt_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -60,9 +59,7 @@ static int __init init_cf_dt_clocksource(void)
__raw_writeb(0x00, DTER0);
__raw_writel(0x00000000, DTRR0);
__raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0);
- clocksource_cf_dt.mult = clocksource_hz2mult(DMA_FREQ,
- clocksource_cf_dt.shift);
- return clocksource_register(&clocksource_cf_dt);
+ return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ);
}
arch_initcall(init_cf_dt_clocksource);
diff --git a/arch/m68k/platform/coldfire/entry.S b/arch/m68k/platform/coldfire/entry.S
index 3157461a8d1..863889fc31c 100644
--- a/arch/m68k/platform/coldfire/entry.S
+++ b/arch/m68k/platform/coldfire/entry.S
@@ -54,7 +54,6 @@ sw_usp:
.globl ret_from_signal
.globl sys_call_table
.globl inthandler
-.globl fasthandler
enosys:
mov.l #sys_ni_syscall,%d3
@@ -63,6 +62,7 @@ enosys:
ENTRY(system_call)
SAVE_ALL_SYS
move #0x2000,%sr /* enable intrs again */
+ GET_CURRENT(%d2)
cmpl #NR_syscalls,%d0
jcc enosys
@@ -166,6 +166,7 @@ Lsignal_return:
*/
ENTRY(inthandler)
SAVE_ALL_INT
+ GET_CURRENT(%d2)
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
@@ -191,7 +192,9 @@ ENTRY(resume)
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
RDUSP /* movel %usp,%a3 */
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
-
+#ifdef CONFIG_MMU
+ movel %a1,%a2 /* set new current */
+#endif
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
WRUSP /* movel %a3,%usp */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
diff --git a/arch/m68k/platform/coldfire/gpio.c b/arch/m68k/platform/coldfire/gpio.c
index ff004579345..292a1a5a2d7 100644
--- a/arch/m68k/platform/coldfire/gpio.c
+++ b/arch/m68k/platform/coldfire/gpio.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <asm/gpio.h>
#include <asm/pinmux.h>
@@ -115,13 +115,14 @@ void mcf_gpio_free(struct gpio_chip *chip, unsigned offset)
mcf_pinmux_release(mcf_chip->gpio_to_pinmux[offset], 0);
}
-struct sysdev_class mcf_gpio_sysclass = {
- .name = "gpio",
+struct bus_type mcf_gpio_subsys = {
+ .name = "gpio",
+ .dev_name = "gpio",
};
static int __init mcf_gpio_sysinit(void)
{
- return sysdev_class_register(&mcf_gpio_sysclass);
+ return subsys_system_register(&mcf_gpio_subsys, NULL);
}
core_initcall(mcf_gpio_sysinit);
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index c33483824a2..38f04a3f620 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -3,7 +3,7 @@
/*
* head.S -- common startup code for ColdFire CPUs.
*
- * (C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>.
+ * (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
*/
/*****************************************************************************/
@@ -13,6 +13,7 @@
#include <asm/asm-offsets.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
+#include <asm/mcfmmu.h>
#include <asm/thread_info.h>
/*****************************************************************************/
@@ -135,6 +136,14 @@ _init_sp:
__HEAD
+#ifdef CONFIG_MMU
+_start0:
+ jmp _start
+.global kernel_pg_dir
+.equ kernel_pg_dir,_start0
+.equ .,_start0+0x1000
+#endif
+
/*
* This is the codes first entry point. This is where it all
* begins...
@@ -143,6 +152,9 @@ __HEAD
_start:
nop /* filler */
movew #0x2700, %sr /* no interrupts */
+ movel #CACHE_INIT,%d0 /* disable cache */
+ movec %d0,%CACR
+ nop
#if defined(CONFIG_UBOOT)
movel %sp,_init_sp /* save initial stack pointer */
#endif
@@ -176,9 +188,6 @@ _start:
* it is very similar. Define the exact settings in the headers
* then the code here is the same for all.
*/
- movel #CACHE_INIT,%d0 /* invalidate whole cache */
- movec %d0,%CACR
- nop
movel #ACR0_MODE,%d0 /* set RAM region for caching */
movec %d0,%ACR0
movel #ACR1_MODE,%d0 /* anything else to cache? */
@@ -193,6 +202,26 @@ _start:
movec %d0,%CACR
nop
+#ifdef CONFIG_MMU
+ /*
+ * Identity mapping for the kernel region.
+ */
+ movel #(MMUBASE+1),%d0 /* enable MMUBAR registers */
+ movec %d0,%MMUBAR
+ movel #MMUOR_CA,%d0 /* clear TLB entries */
+ movel %d0,MMUOR
+ movel #0,%d0 /* set ASID to 0 */
+ movec %d0,%asid
+
+ movel #MMUCR_EN,%d0 /* Enable the identity map */
+ movel %d0,MMUCR
+ nop /* sync i-pipeline */
+
+ movel #_vstart,%a0 /* jump to "virtual" space */
+ jmp %a0@
+_vstart:
+#endif /* CONFIG_MMU */
+
#ifdef CONFIG_ROMFS_FS
/*
* Move ROM filesystem above bss :-)
@@ -238,6 +267,22 @@ _clear_bss:
lea init_thread_union,%a0
lea THREAD_SIZE(%a0),%sp
+#ifdef CONFIG_MMU
+.global m68k_cputype
+.global m68k_mmutype
+.global m68k_fputype
+.global m68k_machtype
+ movel #CPU_COLDFIRE,%d0
+ movel %d0,m68k_cputype /* Mark us as a ColdFire */
+ movel #MMU_COLDFIRE,%d0
+ movel %d0,m68k_mmutype
+ movel #FPU_COLDFIRE,%d0
+ movel %d0,m68k_fputype
+ movel #MACH_M54XX,%d0
+ movel %d0,m68k_machtype /* Mark us as a 54xx machine */
+ lea init_task,%a2 /* Set "current" init task */
+#endif
+
/*
* Assember start up done, start code proper.
*/
diff --git a/arch/m68k/platform/coldfire/pit.c b/arch/m68k/platform/coldfire/pit.c
index c2b980926be..02663d25822 100644
--- a/arch/m68k/platform/coldfire/pit.c
+++ b/arch/m68k/platform/coldfire/pit.c
@@ -144,7 +144,6 @@ static struct clocksource pit_clk = {
.name = "pit",
.rating = 100,
.read = pit_read_clk,
- .shift = 20,
.mask = CLOCKSOURCE_MASK(32),
};
@@ -162,8 +161,7 @@ void hw_timer_init(void)
setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq);
- pit_clk.mult = clocksource_hz2mult(FREQ, pit_clk.shift);
- clocksource_register(&pit_clk);
+ clocksource_register_hz(&pit_clk, FREQ);
}
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/sltimers.c b/arch/m68k/platform/coldfire/sltimers.c
index 6a85daf9a7f..54e1452f853 100644
--- a/arch/m68k/platform/coldfire/sltimers.c
+++ b/arch/m68k/platform/coldfire/sltimers.c
@@ -98,23 +98,25 @@ static struct irqaction mcfslt_timer_irq = {
static cycle_t mcfslt_read_clk(struct clocksource *cs)
{
unsigned long flags;
- u32 cycles;
- u16 scnt;
+ u32 cycles, scnt;
local_irq_save(flags);
scnt = __raw_readl(TA(MCFSLT_SCNT));
cycles = mcfslt_cnt;
+ if (__raw_readl(TA(MCFSLT_SSR)) & MCFSLT_SSR_TE) {
+ cycles += mcfslt_cycles_per_jiffy;
+ scnt = __raw_readl(TA(MCFSLT_SCNT));
+ }
local_irq_restore(flags);
/* subtract because slice timers count down */
- return cycles - scnt;
+ return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt);
}
static struct clocksource mcfslt_clk = {
.name = "slt",
.rating = 250,
.read = mcfslt_read_clk,
- .shift = 20,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -136,8 +138,7 @@ void hw_timer_init(void)
setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq);
- mcfslt_clk.mult = clocksource_hz2mult(MCF_BUSCLK, mcfslt_clk.shift);
- clocksource_register(&mcfslt_clk);
+ clocksource_register_hz(&mcfslt_clk, MCF_BUSCLK);
#ifdef CONFIG_HIGHPROFILE
mcfslt_profile_init();
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index 60242f65fea..0d90da32fcd 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -88,7 +88,6 @@ static struct clocksource mcftmr_clk = {
.name = "tmr",
.rating = 250,
.read = mcftmr_read_clk,
- .shift = 20,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -109,8 +108,7 @@ void hw_timer_init(void)
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
- mcftmr_clk.mult = clocksource_hz2mult(FREQ, mcftmr_clk.shift);
- clocksource_register(&mcftmr_clk);
+ clocksource_register_hz(&mcftmr_clk, FREQ);
setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index cc54187f3d3..a175132e449 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -9,7 +9,14 @@
#ifndef _ASM_MICROBLAZE_IRQ_H
#define _ASM_MICROBLAZE_IRQ_H
-#define NR_IRQS 32
+
+/*
+ * Linux IRQ# is currently offset by one to map to the hardware
+ * irq number. So hardware IRQ0 maps to Linux irq 1.
+ */
+#define NO_IRQ_OFFSET 1
+#define IRQ_OFFSET NO_IRQ_OFFSET
+#define NR_IRQS (32 + IRQ_OFFSET)
#include <asm-generic/irq.h>
/* This type is the placeholder for a hardware interrupt number. It has to
@@ -20,8 +27,6 @@ typedef unsigned long irq_hw_number_t;
extern unsigned int nr_irq;
-#define NO_IRQ (-1)
-
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h
deleted file mode 100644
index 20a8e257c77..00000000000
--- a/arch/microblaze/include/asm/memblock.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
-#define _ASM_MICROBLAZE_MEMBLOCK_H
-
-#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
-
-
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index ed9d0f6e2cd..a25e6b5e2ad 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -174,15 +174,8 @@ extern int page_is_ram(unsigned long pfn);
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
-
-# ifndef CONFIG_MMU
-# define __pa(vaddr) ((unsigned long) (vaddr))
-# define __va(paddr) ((void *) (paddr))
-# else /* CONFIG_MMU */
-# define __pa(x) __virt_to_phys((unsigned long)(x))
-# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
-# endif /* CONFIG_MMU */
-
+# define __pa(x) __virt_to_phys((unsigned long)(x))
+# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
/* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 904e5ef6a11..6c72ed7eba9 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -26,12 +26,6 @@ int setup_early_printk(char *opt);
void remap_early_printk(void);
void disable_early_printk(void);
-#if defined(CONFIG_EARLY_PRINTK)
-#define eprintk early_printk
-#else
-#define eprintk printk
-#endif
-
void heartbeat(void);
void setup_heartbeat(void);
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b73da2ac21b..1a8ab6a5c03 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
-#define TIF_FREEZE 14 /* Freezing for suspend */
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_POLLING_NRFLAG 16
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_IRET (1 << TIF_IRET)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 7d7092b917a..d20ffbc86be 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -391,8 +391,11 @@
#define __NR_clock_adjtime 373
#define __NR_syncfs 374
#define __NR_setns 375
+#define __NR_sendmmsg 376
+#define __NR_process_vm_readv 377
+#define __NR_process_vm_writev 378
-#define __NR_syscalls 376
+#define __NR_syscalls 379
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index d26d92d4775..8356e47631c 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -50,9 +50,9 @@ static void early_printk_uartlite_write(struct console *unused,
const char *s, unsigned n)
{
while (*s && n-- > 0) {
- early_printk_uartlite_putc(*s);
if (*s == '\n')
early_printk_uartlite_putc('\r');
+ early_printk_uartlite_putc(*s);
s++;
}
}
@@ -94,9 +94,9 @@ static void early_printk_uart16550_write(struct console *unused,
const char *s, unsigned n)
{
while (*s && n-- > 0) {
- early_printk_uart16550_putc(*s);
if (*s == '\n')
early_printk_uart16550_putc('\r');
+ early_printk_uart16550_putc(*s);
s++;
}
}
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index ca15bc5c744..66e34a3bfe1 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -468,7 +468,7 @@ C_ENTRY(sys_fork_wrapper):
addi r5, r0, SIGCHLD /* Arg 0: flags */
lwi r6, r1, PT_R1 /* Arg 1: child SP (use parent's) */
addik r7, r1, 0 /* Arg 2: parent context */
- add r8. r0, r0 /* Arg 3: (unused) */
+ add r8, r0, r0 /* Arg 3: (unused) */
add r9, r0, r0; /* Arg 4: (unused) */
brid do_fork /* Do real work (tail-call) */
add r10, r0, r0; /* Arg 5: (unused) */
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index eb41441c7fd..44b177e2ab1 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -42,8 +42,9 @@ unsigned int nr_irq;
static void intc_enable_or_unmask(struct irq_data *d)
{
- unsigned long mask = 1 << d->irq;
- pr_debug("enable_or_unmask: %d\n", d->irq);
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("enable_or_unmask: %ld\n", d->hwirq);
out_be32(INTC_BASE + SIE, mask);
/* ack level irqs because they can't be acked during
@@ -56,20 +57,21 @@ static void intc_enable_or_unmask(struct irq_data *d)
static void intc_disable_or_mask(struct irq_data *d)
{
- pr_debug("disable: %d\n", d->irq);
- out_be32(INTC_BASE + CIE, 1 << d->irq);
+ pr_debug("disable: %ld\n", d->hwirq);
+ out_be32(INTC_BASE + CIE, 1 << d->hwirq);
}
static void intc_ack(struct irq_data *d)
{
- pr_debug("ack: %d\n", d->irq);
- out_be32(INTC_BASE + IAR, 1 << d->irq);
+ pr_debug("ack: %ld\n", d->hwirq);
+ out_be32(INTC_BASE + IAR, 1 << d->hwirq);
}
static void intc_mask_ack(struct irq_data *d)
{
- unsigned long mask = 1 << d->irq;
- pr_debug("disable_and_ack: %d\n", d->irq);
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("disable_and_ack: %ld\n", d->hwirq);
out_be32(INTC_BASE + CIE, mask);
out_be32(INTC_BASE + IAR, mask);
}
@@ -91,7 +93,7 @@ unsigned int get_irq(struct pt_regs *regs)
* order to handle multiple interrupt controllers. It currently
* is hardcoded to check for interrupts only on the first INTC.
*/
- irq = in_be32(INTC_BASE + IVR);
+ irq = in_be32(INTC_BASE + IVR) + NO_IRQ_OFFSET;
pr_debug("get_irq: %d\n", irq);
return irq;
@@ -99,7 +101,7 @@ unsigned int get_irq(struct pt_regs *regs)
void __init init_IRQ(void)
{
- u32 i, j, intr_type;
+ u32 i, intr_mask;
struct device_node *intc = NULL;
#ifdef CONFIG_SELFMOD_INTC
unsigned int intc_baseaddr = 0;
@@ -113,35 +115,24 @@ void __init init_IRQ(void)
0
};
#endif
- const char * const intc_list[] = {
- "xlnx,xps-intc-1.00.a",
- NULL
- };
-
- for (j = 0; intc_list[j] != NULL; j++) {
- intc = of_find_compatible_node(NULL, NULL, intc_list[j]);
- if (intc)
- break;
- }
+ intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a");
BUG_ON(!intc);
- intc_baseaddr = be32_to_cpup(of_get_property(intc,
- "reg", NULL));
+ intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL));
intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
nr_irq = be32_to_cpup(of_get_property(intc,
"xlnx,num-intr-inputs", NULL));
- intr_type =
- be32_to_cpup(of_get_property(intc,
- "xlnx,kind-of-intr", NULL));
- if (intr_type > (u32)((1ULL << nr_irq) - 1))
+ intr_mask =
+ be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
+ if (intr_mask > (u32)((1ULL << nr_irq) - 1))
printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
#ifdef CONFIG_SELFMOD_INTC
selfmod_function((int *) arr_func, intc_baseaddr);
#endif
- printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
- intc_list[j], intc_baseaddr, nr_irq, intr_type);
+ printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
+ intc_baseaddr, nr_irq, intr_mask);
/*
* Disable all external interrupts until they are
@@ -155,8 +146,8 @@ void __init init_IRQ(void)
/* Turn on the Master Enable. */
out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
- for (i = 0; i < nr_irq; ++i) {
- if (intr_type & (0x00000001 << i)) {
+ for (i = IRQ_OFFSET; i < (nr_irq + IRQ_OFFSET); ++i) {
+ if (intr_mask & (0x00000001 << (i - IRQ_OFFSET))) {
irq_set_chip_and_handler_name(i, &intc_dev,
handle_edge_irq, "edge");
irq_clear_status_flags(i, IRQ_LEVEL);
@@ -165,5 +156,6 @@ void __init init_IRQ(void)
handle_level_irq, "level");
irq_set_status_flags(i, IRQ_LEVEL);
}
+ irq_get_irq_data(i)->hwirq = i - IRQ_OFFSET;
}
}
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index e5d63a89b9b..bbebcae72c0 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -33,11 +33,12 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irq_enter();
irq = get_irq(regs);
next_irq:
- BUG_ON(irq == -1U);
- generic_handle_irq(irq);
+ BUG_ON(!irq);
+ /* Substract 1 because of get_irq */
+ generic_handle_irq(irq + IRQ_OFFSET - NO_IRQ_OFFSET);
irq = get_irq(regs);
- if (irq != -1U) {
+ if (irq) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
@@ -52,13 +53,13 @@ next_irq:
intc without any cascades or any connection that's why mapping is 1:1 */
unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
{
- return hwirq;
+ return hwirq + IRQ_OFFSET;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
- return intspec[0];
+ return intspec[0] + IRQ_OFFSET;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 142426f631b..f39257a5abc 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -100,7 +100,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
break;
case R_MICROBLAZE_64_NONE:
- pr_debug("R_MICROBLAZE_NONE\n");
+ pr_debug("R_MICROBLAZE_64_NONE\n");
break;
case R_MICROBLAZE_NONE:
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 95cc295976a..7dcb5bfffb7 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -103,10 +103,12 @@ void cpu_idle(void)
if (!idle)
idle = default_idle;
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 977484add21..80d314e8190 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -122,7 +122,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
@@ -130,7 +129,7 @@ void __init early_init_devtree(void *params)
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
- memblock_analyze();
+ memblock_allow_resize();
pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index bd8ccab5cef..88a01636f78 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -19,50 +19,11 @@
static int handle; /* reset pin handle */
static unsigned int reset_val;
-static int of_reset_gpio_handle(void)
-{
- int ret; /* variable which stored handle reset gpio pin */
- struct device_node *root; /* root node */
- struct device_node *gpio; /* gpio node */
- struct gpio_chip *gc;
- u32 flags;
- const void *gpio_spec;
-
- /* find out root node */
- root = of_find_node_by_path("/");
-
- /* give me handle for gpio node to be possible allocate pin */
- ret = of_parse_phandles_with_args(root, "hard-reset-gpios",
- "#gpio-cells", 0, &gpio, &gpio_spec);
- if (ret) {
- pr_debug("%s: can't parse gpios property\n", __func__);
- goto err0;
- }
-
- gc = of_node_to_gpiochip(gpio);
- if (!gc) {
- pr_debug("%s: gpio controller %s isn't registered\n",
- root->full_name, gpio->full_name);
- ret = -ENODEV;
- goto err1;
- }
-
- ret = gc->of_xlate(gc, root, gpio_spec, &flags);
- if (ret < 0)
- goto err1;
-
- ret += gc->base;
-err1:
- of_node_put(gpio);
-err0:
- pr_debug("%s exited with status %d\n", __func__, ret);
- return ret;
-}
-
void of_platform_reset_gpio_probe(void)
{
int ret;
- handle = of_reset_gpio_handle();
+ handle = of_get_named_gpio(of_find_node_by_path("/"),
+ "hard-reset-gpios", 0);
if (!gpio_is_valid(handle)) {
printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n",
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 0e654a12d37..604cd9dd133 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -145,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
- eprintk("Ramdisk addr 0x%08x, ", ram);
+ printk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
- eprintk("FDT at 0x%08x\n", fdt);
+ printk("FDT at 0x%08x\n", fdt);
else
- eprintk("Compiled-in FDT at 0x%08x\n",
+ printk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
- eprintk("Found romfs @ 0x%08x (0x%08x)\n",
+ printk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
- eprintk("#### klimit %p ####\n", old_klimit);
+ printk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
- eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss);
- eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
+ printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
- eprintk("!!!Your kernel has setup MSR instruction but "
+ printk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %x\n", msr);
#else
if (!msr)
- eprintk("!!!Your kernel not setup MSR instruction but "
+ printk("!!!Your kernel not setup MSR instruction but "
"CPU have it %x\n", msr);
#endif
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 8789daa2a34..6a2b294ef6d 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -380,3 +380,6 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns /* 375 */
+ .long sys_sendmmsg
+ .long sys_process_vm_readv
+ .long sys_process_vm_writev
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index af74b1113aa..3cb0bf64013 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -243,7 +243,7 @@ static int timer_initialized;
void __init time_init(void)
{
- u32 irq, i = 0;
+ u32 irq;
u32 timer_num = 1;
struct device_node *timer = NULL;
const void *prop;
@@ -258,33 +258,24 @@ void __init time_init(void)
0
};
#endif
- const char * const timer_list[] = {
- "xlnx,xps-timer-1.00.a",
- NULL
- };
-
- for (i = 0; timer_list[i] != NULL; i++) {
- timer = of_find_compatible_node(NULL, NULL, timer_list[i]);
- if (timer)
- break;
- }
+ timer = of_find_compatible_node(NULL, NULL, "xlnx,xps-timer-1.00.a");
BUG_ON(!timer);
timer_baseaddr = be32_to_cpup(of_get_property(timer, "reg", NULL));
timer_baseaddr = (unsigned long) ioremap(timer_baseaddr, PAGE_SIZE);
- irq = be32_to_cpup(of_get_property(timer, "interrupts", NULL));
+ irq = irq_of_parse_and_map(timer, 0);
timer_num = be32_to_cpup(of_get_property(timer,
"xlnx,one-timer-only", NULL));
if (timer_num) {
- eprintk(KERN_EMERG "Please enable two timers in HW\n");
+ printk(KERN_EMERG "Please enable two timers in HW\n");
BUG();
}
#ifdef CONFIG_SELFMOD_TIMER
selfmod_function((int *) arr_func, timer_baseaddr);
#endif
- printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n",
- timer_list[i], timer_baseaddr, irq);
+ printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n",
+ timer_baseaddr, irq);
/* If there is clock-frequency property than use it */
prop = of_get_property(timer, "clock-frequency", NULL);
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index c13067b243c..844960e8ae1 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -20,6 +20,7 @@ lib-y += uaccess_old.o
lib-y += ashldi3.o
lib-y += ashrdi3.o
+lib-y += cmpdi2.o
lib-y += divsi3.o
lib-y += lshrdi3.o
lib-y += modsi3.o
diff --git a/arch/microblaze/lib/cmpdi2.c b/arch/microblaze/lib/cmpdi2.c
new file mode 100644
index 00000000000..a708400ea7b
--- /dev/null
+++ b/arch/microblaze/lib/cmpdi2.c
@@ -0,0 +1,26 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+word_type __cmpdi2(long long a, long long b)
+{
+ const DWunion au = {
+ .ll = a
+ };
+ const DWunion bu = {
+ .ll = b
+ };
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+
+ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+ return 0;
+ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+ return 2;
+
+ return 1;
+}
+EXPORT_SYMBOL(__cmpdi2);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index db841c7b9d5..0d71b2ed810 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -242,7 +242,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
line, pin);
virq = irq_create_mapping(NULL, line);
- if (virq != NO_IRQ)
+ if (virq)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
@@ -253,7 +253,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
}
- if (virq == NO_IRQ) {
+ if (!virq) {
pr_debug(" Failed to map !\n");
return -1;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b70c96f4417..29d92187ff3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
select HAVE_DMA_ATTRS
@@ -25,6 +26,9 @@ config MIPS
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
select IRQ_FORCED_THREADING
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
menu "Machine selection"
@@ -65,7 +69,6 @@ config AR7
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_ZBOOT_UART16550
select ARCH_REQUIRE_GPIOLIB
- select GCD
select VLYNQ
help
Support for the Texas Instruments AR7 System-on-a-Chip
@@ -2064,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config ARCH_SPARSEMEM_ENABLE
bool
select SPARSEMEM_STATIC
@@ -2370,10 +2370,6 @@ config TC
Linux driver support status is documented at:
<http://www.linux-mips.org/wiki/DECstation>
-#config ACCESSBUS
-# bool "Access.Bus support"
-# depends on TC
-
config MMU
bool
default y
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 40b223b603b..c22385400fc 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -834,10 +834,13 @@ static struct mtd_partition mtd_partitions[] = {
}
};
+static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
+
static struct physmap_flash_data flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(mtd_partitions),
.parts = mtd_partitions,
+ .part_probe_types = bcm63xx_part_types,
};
static struct resource mtd_resources[] = {
diff --git a/arch/mips/include/asm/ip32/mace.h b/arch/mips/include/asm/ip32/mace.h
index d08d7c67213..c523123df38 100644
--- a/arch/mips/include/asm/ip32/mace.h
+++ b/arch/mips/include/asm/ip32/mace.h
@@ -95,7 +95,7 @@ struct mace_video {
* Ethernet interface
*/
struct mace_ethernet {
- volatile unsigned long mac_ctrl;
+ volatile u64 mac_ctrl;
volatile unsigned long int_stat;
volatile unsigned long dma_ctrl;
volatile unsigned long timer;
diff --git a/arch/mips/include/asm/ipcbuf.h b/arch/mips/include/asm/ipcbuf.h
index d47d08f264e..84c7e51cb6d 100644
--- a/arch/mips/include/asm/ipcbuf.h
+++ b/arch/mips/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_IPCBUF_H
-#define _ASM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for alpha architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit seq
- * - 2 miscellaneous 64-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned short seq;
- unsigned short __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
index ed72e6a26b7..1e6b587f62c 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -16,7 +16,6 @@
#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
-#define CRC_LEN 4 /* Length of CRC in bytes */
#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
#define NUM_PIRELLI 2
@@ -77,19 +76,19 @@ struct bcm_tag {
/* 192-195: Version flash layout */
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
- char fskernel_crc[CRC_LEN];
+ __u32 fskernel_crc;
/* 200-215: Unused except on Alice Gate where is is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
- char image_crc[CRC_LEN];
+ __u32 image_crc;
/* 220-223: CRC32 of rootfs partition */
- char rootfs_crc[CRC_LEN];
+ __u32 rootfs_crc;
/* 224-227: CRC32 of kernel partition */
- char kernel_crc[CRC_LEN];
+ __u32 kernel_crc;
/* 228-235: Unused at present */
char reserved1[8];
/* 236-239: CRC32 of header excluding last 20 bytes */
- char header_crc[CRC_LEN];
+ __u32 header_crc;
/* 240-255: Unused at present */
char reserved2[16];
};
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h
index 9de5190f248..ad5c0a7a02a 100644
--- a/arch/mips/include/asm/socket.h
+++ b/arch/mips/include/asm/socket.h
@@ -82,6 +82,9 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#ifdef __KERNEL__
/** sock_type - Socket types
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6639e..0d85d8e440c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19
#define TIF_FIXADE 20 /* Fix address errors in software */
#define TIF_LOGADE 21 /* Log address errors to syslog */
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_FIXADE (1<<TIF_FIXADE)
#define _TIF_LOGADE (1<<TIF_LOGADE)
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index 533812b6188..43bf70ebd3a 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -21,12 +21,6 @@
# include <asm-generic/int-ll64.h>
#endif
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 4f2971bcf8e..315fc0b250f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
- return -ENOSPC;
+ return -EINVAL;
}
mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_cpuc, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c47f96e453c..7955409051c 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -56,7 +56,8 @@ void __noreturn cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
@@ -77,7 +78,8 @@ void __noreturn cpu_idle(void)
system_state == SYSTEM_BOOTING))
play_dead();
#endif
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 84af26ab221..b1cb8f87d7b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/screen_info.h>
+#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
@@ -352,7 +353,7 @@ static void __init bootmem_init(void)
continue;
#endif
- add_active_range(0, start, end);
+ memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
/*
diff --git a/arch/mips/sgi-ip27/Kconfig b/arch/mips/sgi-ip27/Kconfig
index bc5e9769bb7..4b2ea282b9c 100644
--- a/arch/mips/sgi-ip27/Kconfig
+++ b/arch/mips/sgi-ip27/Kconfig
@@ -1,9 +1,3 @@
-#config SGI_SN0_XXL
-# bool "IP27 XXL"
-# depends on SGI_IP27
-# This options adds support for userspace processes up to 16TB size.
-# Normally the limit is just .5TB.
-
choice
prompt "Node addressing mode"
depends on SGI_IP27
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index bc1297109cc..b105eca3c02 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -12,6 +12,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -381,8 +382,8 @@ static void __init szmem(void)
continue;
}
num_physpages += slot_psize;
- add_active_range(node, slot_getbasepfn(node, slot),
- slot_getbasepfn(node, slot) + slot_psize);
+ memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
+ PFN_PHYS(slot_psize), node);
}
}
}
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index 3e639bda43f..3cd937e0e9a 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -71,7 +71,6 @@ config SIBYTE_SB1xxx_SOC
bool
select DMA_COHERENT
select IRQ_CPU
- select SIBYTE_CFE
select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c
index 7f8416f8622..8e93b212252 100644
--- a/arch/mips/txx9/generic/7segled.c
+++ b/arch/mips/txx9/generic/7segled.c
@@ -9,7 +9,7 @@
* (C) Copyright TOSHIBA CORPORATION 2005-2007
* All Rights Reserved.
*/
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/slab.h>
#include <linux/map_to_7segment.h>
#include <asm/txx9/generic.h>
@@ -37,8 +37,8 @@ int txx9_7segled_putc(unsigned int pos, char c)
return 0;
}
-static ssize_t ascii_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t ascii_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned int ch = dev->id;
@@ -46,8 +46,8 @@ static ssize_t ascii_store(struct sys_device *dev,
return size;
}
-static ssize_t raw_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t raw_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned int ch = dev->id;
@@ -55,19 +55,19 @@ static ssize_t raw_store(struct sys_device *dev,
return size;
}
-static SYSDEV_ATTR(ascii, 0200, NULL, ascii_store);
-static SYSDEV_ATTR(raw, 0200, NULL, raw_store);
+static DEVICE_ATTR(ascii, 0200, NULL, ascii_store);
+static DEVICE_ATTR(raw, 0200, NULL, raw_store);
-static ssize_t map_seg7_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t map_seg7_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
memcpy(buf, &txx9_seg7map, sizeof(txx9_seg7map));
return sizeof(txx9_seg7map);
}
-static ssize_t map_seg7_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t map_seg7_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
if (size != sizeof(txx9_seg7map))
@@ -76,10 +76,11 @@ static ssize_t map_seg7_store(struct sysdev_class *class,
return size;
}
-static SYSDEV_CLASS_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
+static DEVICE_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
-static struct sysdev_class tx_7segled_sysdev_class = {
- .name = "7segled",
+static struct bus_type tx_7segled_subsys = {
+ .name = "7segled",
+ .dev_name = "7segled",
};
static int __init tx_7segled_init_sysfs(void)
@@ -87,26 +88,25 @@ static int __init tx_7segled_init_sysfs(void)
int error, i;
if (!tx_7segled_num)
return -ENODEV;
- error = sysdev_class_register(&tx_7segled_sysdev_class);
+ error = subsys_system_register(&tx_7segled_subsys, NULL);
if (error)
return error;
- error = sysdev_class_create_file(&tx_7segled_sysdev_class,
- &attr_map_seg7);
+ error = device_create_file(tx_7segled_subsys.dev_root, &dev_attr_map_seg7);
if (error)
return error;
for (i = 0; i < tx_7segled_num; i++) {
- struct sys_device *dev;
+ struct device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
error = -ENODEV;
break;
}
dev->id = i;
- dev->cls = &tx_7segled_sysdev_class;
- error = sysdev_register(dev);
+ dev->dev = &tx_7segled_subsys;
+ error = device_register(dev);
if (!error) {
- sysdev_create_file(dev, &attr_ascii);
- sysdev_create_file(dev, &attr_raw);
+ device_create_file(dev, &dev_attr_ascii);
+ device_create_file(dev, &dev_attr_raw);
}
}
return error;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index ec38e00b255..ae77a7916c0 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -22,7 +22,7 @@
#include <linux/serial_core.h>
#include <linux/mtd/physmap.h>
#include <linux/leds.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
@@ -897,10 +897,13 @@ void __init txx9_aclc_init(unsigned long baseaddr, int irq,
#endif
}
-static struct sysdev_class txx9_sramc_sysdev_class;
+static struct bus_type txx9_sramc_subsys = {
+ .name = "txx9_sram",
+ .dev_name = "txx9_sram",
+};
-struct txx9_sramc_sysdev {
- struct sys_device dev;
+struct txx9_sramc_dev {
+ struct device dev;
struct bin_attribute bindata_attr;
void __iomem *base;
};
@@ -909,7 +912,7 @@ static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct txx9_sramc_sysdev *dev = bin_attr->private;
+ struct txx9_sramc_dev *dev = bin_attr->private;
size_t ramsize = bin_attr->size;
if (pos >= ramsize)
@@ -924,7 +927,7 @@ static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct txx9_sramc_sysdev *dev = bin_attr->private;
+ struct txx9_sramc_dev *dev = bin_attr->private;
size_t ramsize = bin_attr->size;
if (pos >= ramsize)
@@ -937,18 +940,13 @@ static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
void __init txx9_sramc_init(struct resource *r)
{
- struct txx9_sramc_sysdev *dev;
+ struct txx9_sramc_dev *dev;
size_t size;
int err;
- if (!txx9_sramc_sysdev_class.name) {
- txx9_sramc_sysdev_class.name = "txx9_sram";
- err = sysdev_class_register(&txx9_sramc_sysdev_class);
- if (err) {
- txx9_sramc_sysdev_class.name = NULL;
- return;
- }
- }
+ err = subsys_system_register(&txx9_sramc_subsys, NULL);
+ if (err)
+ return;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
@@ -956,7 +954,7 @@ void __init txx9_sramc_init(struct resource *r)
dev->base = ioremap(r->start, size);
if (!dev->base)
goto exit;
- dev->dev.cls = &txx9_sramc_sysdev_class;
+ dev->dev.bus = &txx9_sramc_subsys;
sysfs_bin_attr_init(&dev->bindata_attr);
dev->bindata_attr.attr.name = "bindata";
dev->bindata_attr.attr.mode = S_IRUSR | S_IWUSR;
@@ -964,12 +962,12 @@ void __init txx9_sramc_init(struct resource *r)
dev->bindata_attr.write = txx9_sram_write;
dev->bindata_attr.size = size;
dev->bindata_attr.private = dev;
- err = sysdev_register(&dev->dev);
+ err = device_register(&dev->dev);
if (err)
goto exit;
err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
if (err) {
- sysdev_unregister(&dev->dev);
+ device_unregister(&dev->dev);
goto exit;
}
return;
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index ba3cec3155d..6567895d1f5 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/param.h>
#include <linux/ptrace.h>
diff --git a/arch/mn10300/include/asm/ipcbuf.h b/arch/mn10300/include/asm/ipcbuf.h
index f6f63d44827..84c7e51cb6d 100644
--- a/arch/mn10300/include/asm/ipcbuf.h
+++ b/arch/mn10300/include/asm/ipcbuf.h
@@ -1,29 +1 @@
-#ifndef _ASM_IPCBUF_H
-#define _ASM_IPCBUF_H
-
-/*
- * The ipc64_perm structure for MN10300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h
index 4e60c428128..876356d7852 100644
--- a/arch/mn10300/include/asm/socket.h
+++ b/arch/mn10300/include/asm/socket.h
@@ -62,4 +62,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 87c213002d4..28cf52100ba 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE +(1 << TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/mn10300/include/asm/types.h b/arch/mn10300/include/asm/types.h
index c1833eb192e..713d4ba108a 100644
--- a/arch/mn10300/include/asm/types.h
+++ b/arch/mn10300/include/asm/types.h
@@ -13,12 +13,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
deleted file mode 100644
index bbe5a1c788c..00000000000
--- a/arch/openrisc/include/asm/memblock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_OPENRISC_MEMBLOCK_H
-#define __ASM_OPENRISC_MEMBLOCK_H
-
-/* empty */
-
-#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
index d5bc5f813e8..e5fc7887783 100644
--- a/arch/openrisc/kernel/idle.c
+++ b/arch/openrisc/kernel/idle.c
@@ -51,7 +51,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -69,7 +70,8 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 1bb58ba89af..3d4478f6c94 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -76,14 +76,13 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
- memblock_analyze();
+ memblock_allow_resize();
/* We must copy the flattend device tree from init memory to regular
* memory because the device tree references the strings in it
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 6ab9580b0b0..d9dc6cd3b7d 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -136,16 +136,9 @@ struct hpux_ustat {
*/
static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
{
- struct super_block *s;
struct hpux_ustat tmp; /* Changed to hpux_ustat */
struct kstatfs sbuf;
- int err = -EINVAL;
-
- s = user_get_super(dev);
- if (s == NULL)
- goto out;
- err = statfs_by_dentry(s->s_root, &sbuf);
- drop_super(s);
+ int err = vfs_ustat(dev, &sbuf);
if (err)
goto out;
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
index 225b7d6a1a0..d28c51b6106 100644
--- a/arch/parisc/include/asm/socket.h
+++ b/arch/parisc/include/asm/socket.h
@@ -61,6 +61,9 @@
#define SO_RXQ_OVFL 0x4021
+#define SO_WIFI_STATUS 0x4022
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index aa8de727e90..6d9c7c7973d 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,7 +58,6 @@ struct thread_info {
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
-#define TIF_FREEZE 7 /* is freezing for suspend */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9 /* single stepping? */
#define TIF_BLOCKSTEP 10 /* branch stepping? */
@@ -69,7 +68,6 @@ struct thread_info {
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h
index 80e415c9936..8866f9bbdea 100644
--- a/arch/parisc/include/asm/types.h
+++ b/arch/parisc/include/asm/types.h
@@ -3,10 +3,4 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
#endif
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 45b7389d77a..7c0774397b8 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -198,8 +198,6 @@ static struct clocksource clocksource_cr16 = {
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
- .mult = 0, /* to be set */
- .shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -270,7 +268,5 @@ void __init time_init(void)
/* register at clocksource framework */
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
- clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz,
- clocksource_cr16.shift);
- clocksource_register(&clocksource_cr16);
+ clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6ffe3df3e57..1919634a9b3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -87,6 +87,10 @@ config ARCH_HAS_ILOG2_U64
bool
default y if 64BIT
+config ARCH_HAS_CPU_IDLE_WAIT
+ bool
+ default y
+
config GENERIC_HWEIGHT
bool
default y
@@ -117,6 +121,7 @@ config PPC
select HAVE_KRETPROBES
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select USE_GENERIC_SMP_HELPERS if SMP
@@ -132,6 +137,7 @@ config PPC
select IRQ_PER_CPU
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
+ select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_BPF_JIT if (PPC64 && NET)
@@ -362,8 +368,9 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
- depends on PPC64 || 6xx || FSL_BOOKE
- select RELOCATABLE if PPC64 || FSL_BOOKE
+ depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x)
+ select RELOCATABLE if PPC64 || 44x
+ select DYNAMIC_MEMSTART if FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -421,9 +428,6 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on (SMP && PPC_PSERIES) || PPC_PS3
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config SYS_SUPPORTS_HUGETLBFS
bool
@@ -687,6 +691,10 @@ config FSL_LBC
controller. Also contains some common code used by
drivers for specific local bus peripherals.
+config FSL_IFC
+ bool
+ depends on FSL_SOC
+
config FSL_GTM
bool
depends on PPC_83xx || QUICC_ENGINE || CPM2
@@ -773,6 +781,10 @@ source "drivers/rapidio/Kconfig"
endmenu
+config NONSTATIC_KERNEL
+ bool
+ default n
+
menu "Advanced setup"
depends on PPC32
@@ -822,13 +834,32 @@ config LOWMEM_CAM_NUM
int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL
default 3
+config DYNAMIC_MEMSTART
+ bool "Enable page aligned dynamic load address for kernel (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x)
+ select NONSTATIC_KERNEL
+ help
+ This option enables the kernel to be loaded at any page aligned
+ physical address. The kernel creates a mapping from KERNELBASE to
+ the address where the kernel is loaded. The page size here implies
+ the TLB page size of the mapping for kernel on the particular platform.
+ Please refer to the init code for finding the TLB page size.
+
+ DYNAMIC_MEMSTART is an easy way of implementing pseudo-RELOCATABLE
+ kernel image, where the only restriction is the page aligned kernel
+ load address. When this option is enabled, the compile time physical
+ address CONFIG_PHYSICAL_START is ignored.
+
+ This option is overridden by CONFIG_RELOCATABLE
+
config RELOCATABLE
bool "Build a relocatable kernel (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || PPC_47x)
+ depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && 44x
+ select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running at the
- location the kernel is loaded at (some alignment restrictions may
- exist).
+ location the kernel is loaded at, without any alignment restrictions.
+ This feature is a superset of DYNAMIC_MEMSTART and hence overrides it.
One use is for the kexec on panic case where the recovery kernel
must live at a different physical address than the primary
@@ -838,7 +869,11 @@ config RELOCATABLE
it has been loaded at and the compile time physical addresses
CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
setting can still be useful to bootwrappers that need to know the
- load location of the kernel (eg. u-boot/mkimage).
+ load address of the kernel (eg. u-boot/mkimage).
+
+config RELOCATABLE_PPC32
+ def_bool y
+ depends on PPC32 && RELOCATABLE
config PAGE_OFFSET_BOOL
bool "Set custom page offset address"
@@ -868,7 +903,7 @@ config KERNEL_START_BOOL
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
default PAGE_OFFSET if PAGE_OFFSET_BOOL
- default "0xc2000000" if CRASH_DUMP && !RELOCATABLE
+ default "0xc2000000" if CRASH_DUMP && !NONSTATIC_KERNEL
default "0xc0000000"
config PHYSICAL_START_BOOL
@@ -881,7 +916,7 @@ config PHYSICAL_START_BOOL
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
- default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !RELOCATABLE
+ default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !NONSTATIC_KERNEL
default "0x00000000"
config PHYSICAL_ALIGN
@@ -927,6 +962,7 @@ endmenu
if PPC64
config RELOCATABLE
bool "Build a relocatable kernel"
+ select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running anywhere
in the RMA (real memory area) at any 16k-aligned base address.
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 1b8a9c905cf..4ccb2a009f7 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -336,4 +336,16 @@ config PPC_EARLY_DEBUG_CPM_ADDR
platform probing is done, all platforms selected must
share the same address.
+config STRICT_DEVMEM
+ def_bool y
+ prompt "Filter access to /dev/mem"
+ help
+ This option restricts access to /dev/mem. If this option is
+ disabled, you allow userspace access to all memory, including
+ kernel and userspace memory. Accidental memory access is likely
+ to be disastrous.
+ Memory access is required for experts who want to debug the kernel.
+
+ If you are unsure, say Y.
+
endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 70ba0c0a122..b8b105c01c6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -63,9 +63,9 @@ override CC += -m$(CONFIG_WORD_SIZE)
override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR)
endif
-LDFLAGS_vmlinux-yy := -Bstatic
-LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie
-LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy)
+LDFLAGS_vmlinux-y := -Bstatic
+LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
@@ -131,8 +131,7 @@ KBUILD_CFLAGS += -mno-sched-epilog
endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
-cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
-cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
+cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_E200) += -Wa,-me200
@@ -166,7 +165,7 @@ all: zImage
# With make 3.82 we cannot mix normal and wildcard targets
BOOT_TARGETS1 := zImage zImage.initrd uImage
-BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
+BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% uImage.%
PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 72ee8c1fba4..15986e70799 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -45,6 +45,7 @@ $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
@@ -79,7 +80,8 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
- gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
+ gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c \
+ treeboot-currituck.c
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -199,6 +201,7 @@ image-$(CONFIG_EP405) += dtbImage.ep405
image-$(CONFIG_HOTFOOT) += cuImage.hotfoot
image-$(CONFIG_WALNUT) += treeImage.walnut
image-$(CONFIG_ACADIA) += cuImage.acadia
+image-$(CONFIG_OBS600) += uImage.obs600
# Board ports in arch/powerpc/platform/44x/Kconfig
image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony
@@ -212,6 +215,7 @@ image-$(CONFIG_WARP) += cuImage.warp
image-$(CONFIG_YOSEMITE) += cuImage.yosemite
image-$(CONFIG_ISS4xx) += treeImage.iss4xx \
treeImage.iss4xx-mpic
+image-$(CONFIG_CURRITUCK) += treeImage.currituck
# Board ports in arch/powerpc/platform/8xx/Kconfig
image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads
@@ -316,6 +320,12 @@ $(obj)/zImage.iseries: vmlinux
$(obj)/uImage: vmlinux $(wrapperbits)
$(call if_changed,wrap,uboot)
+$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+ $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+
+$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+ $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb)
+
$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
diff --git a/arch/powerpc/boot/dcr.h b/arch/powerpc/boot/dcr.h
index 645a7c964e5..cc73f7a95e2 100644
--- a/arch/powerpc/boot/dcr.h
+++ b/arch/powerpc/boot/dcr.h
@@ -9,6 +9,12 @@
})
#define mtdcr(rn, val) \
asm volatile("mtdcr %0,%1" : : "i"(rn), "r"(val))
+#define mfdcrx(rn) \
+ ({ \
+ unsigned long rval; \
+ asm volatile("mfdcrx %0,%1" : "=r"(rval) : "r"(rn)); \
+ rval; \
+ })
/* 440GP/440GX SDRAM controller DCRs */
#define DCRN_SDRAM0_CFGADDR 0x010
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S
index d271ab54267..bbcb8a4cc12 100644
--- a/arch/powerpc/boot/div64.S
+++ b/arch/powerpc/boot/div64.S
@@ -57,3 +57,55 @@ __div64_32:
stw r8,4(r3)
mr r3,r6 # return the remainder in r3
blr
+
+/*
+ * Extended precision shifts.
+ *
+ * Updated to be valid for shift counts from 0 to 63 inclusive.
+ * -- Gabriel
+ *
+ * R3/R4 has 64 bit value
+ * R5 has shift count
+ * result in R3/R4
+ *
+ * ashrdi3: arithmetic right shift (sign propagation)
+ * lshrdi3: logical right shift
+ * ashldi3: left shift
+ */
+ .globl __ashrdi3
+__ashrdi3:
+ subfic r6,r5,32
+ srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
+ addi r7,r5,32 # could be xori, or addi with -32
+ slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
+ rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
+ sraw r7,r3,r7 # t2 = MSW >> (count-32)
+ or r4,r4,r6 # LSW |= t1
+ slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
+ sraw r3,r3,r5 # MSW = MSW >> count
+ or r4,r4,r7 # LSW |= t2
+ blr
+
+ .globl __ashldi3
+__ashldi3:
+ subfic r6,r5,32
+ slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
+ addi r7,r5,32 # could be xori, or addi with -32
+ srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
+ slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
+ or r3,r3,r6 # MSW |= t1
+ slw r4,r4,r5 # LSW = LSW << count
+ or r3,r3,r7 # MSW |= t2
+ blr
+
+ .globl __lshrdi3
+__lshrdi3:
+ subfic r6,r5,32
+ srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
+ addi r7,r5,32 # could be xori, or addi with -32
+ slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
+ srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
+ or r4,r4,r6 # LSW |= t1
+ srw r3,r3,r5 # MSW = MSW >> count
+ or r4,r4,r7 # LSW |= t2
+ blr
diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
index 261d10c4534..227290db866 100644
--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
+++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
@@ -256,7 +256,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <400000000>;
interrupts = <9 0x8>;
@@ -266,7 +266,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <400000000>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts
new file mode 100644
index 00000000000..b801dd06e57
--- /dev/null
+++ b/arch/powerpc/boot/dts/currituck.dts
@@ -0,0 +1,237 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright Š 2011 Tony Breeds IBM Corporation
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000; // spin table
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "ibm,currituck";
+ compatible = "ibm,currituck";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,476";
+ reg = <0>;
+ clock-frequency = <1600000000>; // 1.6 GHz
+ timebase-frequency = <100000000>; // 100Mhz
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "ok";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ model = "PowerPC,476";
+ reg = <1>;
+ clock-frequency = <1600000000>; // 1.6 GHz
+ timebase-frequency = <100000000>; // 100Mhz
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0x0 0x01f00000>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x0>; // filled in by zImage
+ };
+
+ MPIC: interrupt-controller {
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ dcr-reg = <0xffc00000 0x00040000>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ };
+
+ plb {
+ compatible = "ibm,plb6";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clock-frequency = <200000000>; // 200Mhz
+
+ POB0: opb {
+ compatible = "ibm,opb-4xx", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* Wish there was a nicer way of specifying a full
+ * 32-bit range
+ */
+ ranges = <0x00000000 0x00000200 0x00000000 0x80000000
+ 0x80000000 0x00000200 0x80000000 0x80000000>;
+ clock-frequency = <100000000>;
+
+ UART0: serial@10000000 {
+ device_type = "serial";
+ compatible = "ns16750", "ns16550";
+ reg = <0x10000000 0x00000008>;
+ virtual-reg = <0xe1000000>;
+ clock-frequency = <1851851>; // PCIe refclk/MCGC0_CTL[UART]
+ current-speed = <115200>;
+ interrupt-parent = <&MPIC>;
+ interrupts = <34 2>;
+ };
+
+ IIC0: i2c@00000000 {
+ compatible = "ibm,iic-currituck", "ibm,iic";
+ reg = <0x0 0x00000014>;
+ interrupt-parent = <&MPIC>;
+ interrupts = <79 2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ rtc@68 {
+ compatible = "stm,m41t80", "m41st85";
+ reg = <0x68>;
+ };
+ };
+ };
+
+ PCIE0: pciex@10100000000 { // 4xGBIF1
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+ primary;
+ port = <0x0>; /* port number */
+ reg = <0x00000101 0x00000000 0x0 0x10000000 /* Config space access */
+ 0x00000100 0x00000000 0x0 0x00001000>; /* UTL Registers space access */
+ dcr-reg = <0x80 0x20>;
+
+// pci_space < pci_addr > < cpu_addr > < size >
+ ranges = <0x02000000 0x00000000 0x80000000 0x00000110 0x80000000 0x0 0x80000000
+ 0x01000000 0x0 0x0 0x00000140 0x0 0x0 0x00010000>;
+
+ /* Inbound starting at 0 to memsize filled in by zImage */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+ /* This drives busses 0 to 0xf */
+ bus-range = <0x0 0xf>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &MPIC 46 0x2 /* int A */
+ 0x0 0x0 0x0 0x2 &MPIC 47 0x2 /* int B */
+ 0x0 0x0 0x0 0x3 &MPIC 48 0x2 /* int C */
+ 0x0 0x0 0x0 0x4 &MPIC 49 0x2 /* int D */>;
+ };
+
+ PCIE1: pciex@30100000000 { // 4xGBIF0
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+ primary;
+ port = <0x1>; /* port number */
+ reg = <0x00000301 0x00000000 0x0 0x10000000 /* Config space access */
+ 0x00000300 0x00000000 0x0 0x00001000>; /* UTL Registers space access */
+ dcr-reg = <0x60 0x20>;
+
+ ranges = <0x02000000 0x00000000 0x80000000 0x00000310 0x80000000 0x0 0x80000000
+ 0x01000000 0x0 0x0 0x00000340 0x0 0x0 0x00010000>;
+
+ /* Inbound starting at 0 to memsize filled in by zImage */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+ /* This drives busses 0 to 0xf */
+ bus-range = <0x0 0xf>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &MPIC 38 0x2 /* int A */
+ 0x0 0x0 0x0 0x2 &MPIC 39 0x2 /* int B */
+ 0x0 0x0 0x0 0x3 &MPIC 40 0x2 /* int C */
+ 0x0 0x0 0x0 0x4 &MPIC 41 0x2 /* int D */>;
+ };
+
+ PCIE2: pciex@38100000000 { // 2xGBIF0
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-476fpe", "ibm,plb-pciex";
+ primary;
+ port = <0x2>; /* port number */
+ reg = <0x00000381 0x00000000 0x0 0x10000000 /* Config space access */
+ 0x00000380 0x00000000 0x0 0x00001000>; /* UTL Registers space access */
+ dcr-reg = <0xA0 0x20>;
+
+ ranges = <0x02000000 0x00000000 0x80000000 0x00000390 0x80000000 0x0 0x80000000
+ 0x01000000 0x0 0x0 0x000003C0 0x0 0x0 0x00010000>;
+
+ /* Inbound starting at 0 to memsize filled in by zImage */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x0>;
+
+ /* This drives busses 0 to 0xf */
+ bus-range = <0x0 0xf>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &MPIC 54 0x2 /* int A */
+ 0x0 0x0 0x0 0x2 &MPIC 55 0x2 /* int B */
+ 0x0 0x0 0x0 0x3 &MPIC 56 0x2 /* int C */
+ 0x0 0x0 0x0 0x4 &MPIC 57 0x2 /* int D */>;
+ };
+
+ };
+
+ chosen {
+ linux,stdout-path = &UART0;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
new file mode 100644
index 00000000000..89af6263770
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
@@ -0,0 +1,248 @@
+/*
+ * MPC8536 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8536-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupts = <24 0x2 0 0>;
+ bus-range = <0 0xff>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <25 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <25 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci2 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xb000 */
+&pci3 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <27 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <27 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+ >;
+ };
+};
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8536-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8536-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8536-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+ spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+
+ /* mark compat w/8572 to get some erratum treatment */
+ gpio-controller@f000 {
+ compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio";
+ };
+
+ sata@18000 {
+ compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+ reg = <0x18000 0x1000>;
+ cell-index = <1>;
+ interrupts = <74 0x2 0 0>;
+ };
+
+ sata@19000 {
+ compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+ reg = <0x19000 0x1000>;
+ cell-index = <2>;
+ interrupts = <41 0x2 0 0>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8536-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+ usb@22000 {
+ compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+ reg = <0x22000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <28 0x2 0 0>;
+ };
+
+ usb@23000 {
+ compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+ reg = <0x23000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <46 0x2 0 0>;
+ };
+
+ ptp_clock@24e00 {
+ interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>;
+ };
+
+/include/ "pq3-etsec1-2.dtsi"
+
+ ethernet@26000 {
+ cell-index = <1>;
+ };
+
+ usb@2b000 {
+ compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
+ reg = <0x2b000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <60 0x2 0 0>;
+ };
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.0-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,mpc8536-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
new file mode 100644
index 00000000000..7de45a784df
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
@@ -0,0 +1,63 @@
+/*
+ * MPC8536 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8536";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8536@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
new file mode 100644
index 00000000000..b68eb119fae
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-post.dtsi
@@ -0,0 +1,191 @@
+/*
+ * MPC8544 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8544-lbc", "fsl,pq3-localbus", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupts = <24 0x2 0 0>;
+ bus-range = <0 0xff>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <25 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <25 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci2 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xb000 */
+&pci3 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <27 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <27 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8544-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <10>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8544-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8544-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8544-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2, 256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+
+ ethernet@26000 {
+ cell-index = <1>;
+ };
+
+/include/ "pq3-sec2.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,mpc8544-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
new file mode 100644
index 00000000000..8777f9239d9
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
@@ -0,0 +1,63 @@
+/*
+ * MPC8544 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8544";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8544@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
new file mode 100644
index 00000000000..9d8023a69d7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
@@ -0,0 +1,143 @@
+/*
+ * MPC8548 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8548-lbc", "fsl,pq3-localbus", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupts = <24 0x2 0 0>;
+ bus-range = <0 0xff>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupts = <25 0x2 0 0>;
+ bus-range = <0 0xff>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+};
+
+/* controller at 0xa000 */
+&pci2 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8548-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <10>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8548-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8548-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8548-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-etsec1-3.dtsi"
+
+/include/ "pq3-sec2.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,mpc8548-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
new file mode 100644
index 00000000000..289f1218d75
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
@@ -0,0 +1,62 @@
+/*
+ * MPC8548 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8548";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8548@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi
new file mode 100644
index 00000000000..64e7075a9cd
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8568si-post.dtsi
@@ -0,0 +1,270 @@
+/*
+ * MPC8568 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus", "simple-bus";
+ interrupts = <19 2 0 0>;
+ sleep = <&pmc 0x08000000>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupts = <24 0x2 0 0>;
+ bus-range = <0 0xff>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ sleep = <&pmc 0x80000000>;
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+ sleep = <&pmc 0x20000000>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <48 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ sleep = <&pmc 0x00080000>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8568-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <10>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8568-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8568-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+ i2c-sleep-nexus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000004>;
+ ranges;
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+
+ };
+
+ duart-sleep-nexus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000002>;
+ ranges;
+
+/include/ "pq3-duart-0.dtsi"
+
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8568-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+ dma@21300 {
+ sleep = <&pmc 0x00000400>;
+ };
+
+/include/ "pq3-etsec1-0.dtsi"
+ ethernet@24000 {
+ sleep = <&pmc 0x00000080>;
+ };
+
+/include/ "pq3-etsec1-1.dtsi"
+ ethernet@25000 {
+ sleep = <&pmc 0x00000040>;
+ };
+
+ par_io@e0100 {
+ reg = <0xe0100 0x100>;
+ device_type = "par_io";
+ };
+
+/include/ "pq3-sec2.1-0.dtsi"
+ crypto@30000 {
+ sleep = <&pmc 0x01000000>;
+ };
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-rmu-0.dtsi"
+ rmu@d3000 {
+ sleep = <&pmc 0x00040000>;
+ };
+
+ global-utilities@e0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
+ reg = <0xe0000 0x1000>;
+ ranges = <0 0xe0000 0x1000>;
+ fsl,has-rstcr;
+
+ pmc: power@70 {
+ compatible = "fsl,mpc8568-pmc",
+ "fsl,mpc8548-pmc";
+ reg = <0x70 0x20>;
+ };
+ };
+};
+
+&qe {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "qe";
+ compatible = "fsl,qe";
+ sleep = <&pmc 0x00000800>;
+ brg-frequency = <0>;
+ bus-frequency = <396000000>;
+ fsl,qe-num-riscs = <2>;
+ fsl,qe-num-snums = <28>;
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@4c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,spi";
+ reg = <0x4c0 0x40>;
+ cell-index = <0>;
+ interrupts = <2>;
+ interrupt-parent = <&qeic>;
+ };
+
+ spi@500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl,spi";
+ reg = <0x500 0x40>;
+ interrupts = <1>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@3000 {
+ cell-index = <2>;
+ reg = <0x3000 0x200>;
+ interrupts = <33>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x10000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x10000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
new file mode 100644
index 00000000000..eacd62c5fe6
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
@@ -0,0 +1,65 @@
+/*
+ * MPC8568 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8568";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8568@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ sleep = <&pmc 0x00008000 // core
+ &pmc 0x00004000>; // timebase
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi
new file mode 100644
index 00000000000..3e6346a4a18
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8569si-post.dtsi
@@ -0,0 +1,304 @@
+/*
+ * MPC8569 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+ sleep = <&pmc 0x08000000>;
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+ sleep = <&pmc 0x20000000>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <48 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ sleep = <&pmc 0x00080000>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8569-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <10>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8569-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8569-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+ i2c-sleep-nexus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000004>;
+ ranges;
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+
+ };
+
+ duart-sleep-nexus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000002>;
+ ranges;
+
+/include/ "pq3-duart-0.dtsi"
+
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8569-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ sleep = <&pmc 0x00200000>;
+ };
+
+ par_io@e0100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe0100 0x100>;
+ ranges = <0x0 0xe0100 0x100>;
+ device_type = "par_io";
+ };
+
+/include/ "pq3-sec3.1-0.dtsi"
+ crypto@30000 {
+ sleep = <&pmc 0x01000000>;
+ };
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-rmu-0.dtsi"
+ rmu@d3000 {
+ sleep = <&pmc 0x00040000>;
+ };
+
+ global-utilities@e0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
+ reg = <0xe0000 0x1000>;
+ ranges = <0 0xe0000 0x1000>;
+ fsl,has-rstcr;
+
+ pmc: power@70 {
+ compatible = "fsl,mpc8569-pmc",
+ "fsl,mpc8548-pmc";
+ reg = <0x70 0x20>;
+ };
+ };
+};
+
+&qe {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "qe";
+ compatible = "fsl,qe";
+ sleep = <&pmc 0x00000800>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ fsl,qe-num-riscs = <4>;
+ fsl,qe-num-snums = <46>;
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <46 2 0 0 46 2 0 0>; //high:30 low:30
+ interrupt-parent = <&mpic>;
+ };
+
+ timer@440 {
+ compatible = "fsl,mpc8569-qe-gtm",
+ "fsl,qe-gtm", "fsl,gtm";
+ reg = <0x440 0x40>;
+ interrupts = <12 13 14 15>;
+ interrupt-parent = <&qeic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ };
+
+ spi@4c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc8569-qe-spi", "fsl,spi";
+ reg = <0x4c0 0x40>;
+ cell-index = <0>;
+ interrupts = <2>;
+ interrupt-parent = <&qeic>;
+ };
+
+ spi@500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl,spi";
+ reg = <0x500 0x40>;
+ interrupts = <1>;
+ interrupt-parent = <&qeic>;
+ };
+
+ usb@6c0 {
+ compatible = "fsl,mpc8569-qe-usb",
+ "fsl,mpc8323-qe-usb";
+ reg = <0x6c0 0x40 0x8b00 0x100>;
+ interrupts = <11>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2200 {
+ cell-index = <3>;
+ reg = <0x2200 0x200>;
+ interrupts = <34>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@3000 {
+ cell-index = <2>;
+ reg = <0x3000 0x200>;
+ interrupts = <33>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@3200 {
+ cell-index = <4>;
+ reg = <0x3200 0x200>;
+ interrupts = <35>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@3400 {
+ cell-index = <6>;
+ reg = <0x3400 0x200>;
+ interrupts = <41>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@3600 {
+ cell-index = <8>;
+ reg = <0x3600 0x200>;
+ interrupts = <43>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x20000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x20000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
new file mode 100644
index 00000000000..b07064d1193
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
@@ -0,0 +1,64 @@
+/*
+ * MPC8569 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8569";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8569@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ sleep = <&pmc 0x00008000 // core
+ &pmc 0x00004000>; // timebase
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
new file mode 100644
index 00000000000..d44e25a4873
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-post.dtsi
@@ -0,0 +1,196 @@
+/*
+ * MPC8572 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x8000 */
+&pci0 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <24 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <24 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <25 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <25 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci2 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,mpc8572-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,mpc8572-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+ memory-controller@6000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x6000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-dma-1.dtsi"
+/include/ "pq3-gpio-0.dtsi"
+ gpio-controller@f000 {
+ compatible = "fsl,mpc8572-gpio", "fsl,pq3-gpio";
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8572-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x100000>; // L2,1M
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+ ptp_clock@24e00 {
+ interrupts = <68 2 0 0 69 2 0 0 70 2 0 0 71 2 0 0>;
+ };
+
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-etsec1-3.dtsi"
+/include/ "pq3-sec3.0-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,mpc8572-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
new file mode 100644
index 00000000000..ca188326c2c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
@@ -0,0 +1,70 @@
+/*
+ * MPC8572 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,MPC8572";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8572@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,8572@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
new file mode 100644
index 00000000000..bd9e163c764
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -0,0 +1,198 @@
+/*
+ * P1010/P1014 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <16 2 0 0 19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+ compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1010-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1010-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1010-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+ spi0: spi@7000 {
+ fsl,espi-num-chipselects = <1>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+/include/ "pq3-sata2-0.dtsi"
+/include/ "pq3-sata2-1.dtsi"
+
+ can0: can@1c000 {
+ compatible = "fsl,p1010-flexcan";
+ reg = <0x1c000 0x1000>;
+ interrupts = <48 0x2 0 0>;
+ };
+
+ can1: can@1d000 {
+ compatible = "fsl,p1010-flexcan";
+ reg = <0x1d000 0x1000>;
+ interrupts = <61 0x2 0 0>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1010-l2-cache-controller",
+ "fsl,p1014-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ fsl,sdhci-auto-cmd12;
+ };
+
+/include/ "pq3-sec4.4-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+ enet0: ethernet@b0000 {
+ queue-group@b0000 {
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ };
+ };
+
+/include/ "pq3-etsec2-1.dtsi"
+ enet1: ethernet@b1000 {
+ queue-group@b1000 {
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ };
+ };
+
+/include/ "pq3-etsec2-2.dtsi"
+ enet2: ethernet@b2000 {
+ queue-group@b2000 {
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ };
+
+ };
+
+ global-utilities@e0000 {
+ compatible = "fsl,p1010-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
new file mode 100644
index 00000000000..7354a8f90ea
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
@@ -0,0 +1,64 @@
+/*
+ * P1010/P1014 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1010";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ can0 = &can0;
+ can1 = &can1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1010@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
new file mode 100644
index 00000000000..fc924c5ffeb
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -0,0 +1,174 @@
+/*
+ * P1020/P1011 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1020-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+ spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-usb2-dr-1.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.3-0.dtsi"
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+ enet0: enet0_grp2: ethernet@b0000 {
+ };
+
+/include/ "pq3-etsec2-1.dtsi"
+ enet1: enet1_grp2: ethernet@b1000 {
+ };
+
+/include/ "pq3-etsec2-2.dtsi"
+ enet2: enet2_grp2: ethernet@b2000 {
+ };
+
+ global-utilities@e0000 {
+ compatible = "fsl,p1020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
+/include/ "pq3-etsec2-grp2-2.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
new file mode 100644
index 00000000000..6f0376e554e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1020/P1011 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
new file mode 100644
index 00000000000..38ba54d1e32
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -0,0 +1,225 @@
+/*
+ * P1021/P1012 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1021-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1021-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1021-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+ spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1021-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.3-0.dtsi"
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+ enet0: enet0_grp2: ethernet@b0000 {
+ };
+
+/include/ "pq3-etsec2-1.dtsi"
+ enet1: enet1_grp2: ethernet@b1000 {
+ };
+
+/include/ "pq3-etsec2-2.dtsi"
+ enet2: enet2_grp2: ethernet@b2000 {
+ };
+
+ global-utilities@e0000 {
+ compatible = "fsl,p1021-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
+
+&qe {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "qe";
+ compatible = "fsl,qe";
+ fsl,qe-num-riscs = <1>;
+ fsl,qe-num-snums = <28>;
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <63 2 0 0 60 2 0 0>; //high:47 low:44
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ mdio@2120 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2120 0x18>;
+ compatible = "fsl,ucc-mdio";
+ };
+
+ ucc@2400 {
+ cell-index = <5>;
+ reg = <0x2400 0x200>;
+ interrupts = <40>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x6000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x6000>;
+ };
+ };
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
+/include/ "pq3-etsec2-grp2-2.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
new file mode 100644
index 00000000000..4abd54bc330
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1021/P1012 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1021";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1021@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1021@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
new file mode 100644
index 00000000000..16239b199d0
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -0,0 +1,235 @@
+/*
+ * P1022/P1013 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0x9000 */
+&pci0 {
+ compatible = "fsl,p1022-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xa000 */
+&pci1 {
+ compatible = "fsl,p1022-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0xb000 */
+&pci2 {
+ compatible = "fsl,p1022-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1022-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1022-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1022-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+ spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-dma-1.dtsi"
+ dma@c300 {
+ dma00: dma-channel@0 {
+ compatible = "fsl,ssi-dma-channel";
+ };
+ dma01: dma-channel@80 {
+ compatible = "fsl,ssi-dma-channel";
+ };
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+
+ display@10000 {
+ compatible = "fsl,diu", "fsl,p1022-diu";
+ reg = <0x10000 1000>;
+ interrupts = <64 2 0 0>;
+ };
+
+ ssi@15000 {
+ compatible = "fsl,mpc8610-ssi";
+ cell-index = <0>;
+ reg = <0x15000 0x100>;
+ interrupts = <75 2 0 0>;
+ fsl,playback-dma = <&dma00>;
+ fsl,capture-dma = <&dma01>;
+ fsl,fifo-depth = <15>;
+ };
+
+/include/ "pq3-sata2-0.dtsi"
+/include/ "pq3-sata2-1.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1022-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-usb2-dr-1.dtsi"
+
+/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ fsl,sdhci-auto-cmd12;
+ };
+
+/include/ "pq3-sec3.3-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+/include/ "pq3-etsec2-0.dtsi"
+ enet0: enet0_grp2: ethernet@b0000 {
+ };
+
+/include/ "pq3-etsec2-1.dtsi"
+ enet1: enet1_grp2: ethernet@b1000 {
+ };
+
+ global-utilities@e0000 {
+ compatible = "fsl,p1022-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+
+ power@e0070{
+ compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
+ reg = <0xe0070 0x20>;
+ };
+
+};
+
+/include/ "pq3-etsec2-grp2-0.dtsi"
+/include/ "pq3-etsec2-grp2-1.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
new file mode 100644
index 00000000000..e930f4f7ca8
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -0,0 +1,68 @@
+/*
+ * P1022/P1013 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1022";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1022@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1022@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
new file mode 100644
index 00000000000..b06bb4cc1fe
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -0,0 +1,224 @@
+/*
+ * P1023/P1017 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0xa000 */
+&pci0 {
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ };
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ };
+};
+
+/* controller at 0xb000 */
+&pci2 {
+ compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 0 0>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 0 0>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1023-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1023-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1023-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+
+/include/ "pq3-espi-0.dtsi"
+ spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1023-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+
+ crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30000 0x10000>;
+ ranges = <0 0x30000 0x10000>;
+ interrupts = <58 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <57 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <57 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+ };
+
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,p1023-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
new file mode 100644
index 00000000000..ac45f6d9338
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
@@ -0,0 +1,76 @@
+/*
+ * P1023/P1017 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1023";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1023@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1023@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
new file mode 100644
index 00000000000..c041050561a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -0,0 +1,194 @@
+/*
+ * P2020/P2010 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <19 2 0 0>;
+};
+
+/* controller at 0xa000 */
+&pci0 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <26 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <26 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0x9000 */
+&pci1 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <25 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <25 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+ >;
+ };
+};
+
+/* controller at 0x8000 */
+&pci2 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupts = <24 2 0 0>;
+
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <24 2 0 0>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x8 0x1 0x0 0x0
+ 0000 0x0 0x0 0x2 &mpic 0x9 0x1 0x0 0x0
+ 0000 0x0 0x0 0x3 &mpic 0xa 0x1 0x0 0x0
+ 0000 0x0 0x0 0x4 &mpic 0xb 0x1 0x0 0x0
+ >;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p2020-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p2020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p2020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <18 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+/include/ "pq3-i2c-1.dtsi"
+/include/ "pq3-duart-0.dtsi"
+/include/ "pq3-espi-0.dtsi"
+ spi0: spi@7000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "pq3-dma-1.dtsi"
+/include/ "pq3-gpio-0.dtsi"
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p2020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2,512K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+/include/ "pq3-usb2-dr-0.dtsi"
+/include/ "pq3-etsec1-0.dtsi"
+/include/ "pq3-etsec1-timer-0.dtsi"
+
+ ptp_clock@24e00 {
+ interrupts = <68 2 0 0 69 2 0 0 70 2 0 0>;
+ };
+
+
+/include/ "pq3-etsec1-1.dtsi"
+/include/ "pq3-etsec1-2.dtsi"
+/include/ "pq3-esdhc-0.dtsi"
+/include/ "pq3-sec3.1-0.dtsi"
+/include/ "pq3-mpic.dtsi"
+/include/ "pq3-mpic-timer-B.dtsi"
+
+ global-utilities@e0000 {
+ compatible = "fsl,p2020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
new file mode 100644
index 00000000000..3213288641d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
@@ -0,0 +1,69 @@
+/*
+ * P2020/P2010 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P2020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P2020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P2020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
new file mode 100644
index 00000000000..234a399ddeb
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -0,0 +1,325 @@
+/*
+ * P2041/P2040 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x201000 */
+&pci1 {
+ compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x202000 */
+&pci2 {
+ compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,dcsr-npc";
+ reg = <0x1000 0x1000 0x1000000 0x8000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0xB0000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@40000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x40000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@41000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x41000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@42000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu2>;
+ reg = <0x42000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@43000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu3>;
+ reg = <0x43000 0x1000>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 2 1 27>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p2041-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ sdhci,auto-cmd12;
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+ usb0: usb@210000 {
+ phy_type = "utmi";
+ port0;
+ };
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+ usb1: usb@211000 {
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
new file mode 100644
index 00000000000..2d0a40d6b10
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
@@ -0,0 +1,111 @@
+/*
+ * P2041 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P2041";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,e500mc@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,e500mc@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
new file mode 100644
index 00000000000..d41d08de7f7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -0,0 +1,352 @@
+/*
+ * P3041 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x201000 */
+&pci1 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x202000 */
+&pci2 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x203000 */
+&pci3 {
+ compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 12>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 12>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 43 1 0 0
+ 0000 0 0 2 &mpic 0 1 0 0
+ 0000 0 0 3 &mpic 4 1 0 0
+ 0000 0 0 4 &mpic 8 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,dcsr-npc";
+ reg = <0x1000 0x1000 0x1000000 0x8000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0xB0000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,p3041-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,p3041-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,p3041-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,p3041-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@40000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x40000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@41000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x41000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@42000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu2>;
+ reg = <0x42000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@43000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu3>;
+ reg = <0x43000 0x1000>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 2 1 27>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p3041-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ sdhci,auto-cmd12;
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+ usb0: usb@210000 {
+ phy_type = "utmi";
+ port0;
+ };
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+ usb1: usb@211000 {
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
new file mode 100644
index 00000000000..136def3536b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
@@ -0,0 +1,112 @@
+/*
+ * P3041 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P3041";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,e500mc@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,e500mc@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
new file mode 100644
index 00000000000..a63edd195ae
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
@@ -0,0 +1,296 @@
+/*
+ * P3060 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x201000 */
+&pci1 {
+ compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,dcsr-npc";
+ reg = <0x1000 0x1000 0x1000000 0x8000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0xB0000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@40000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x40000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@41000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x41000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@44000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu4>;
+ reg = <0x44000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@45000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu5>;
+ reg = <0x45000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@46000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu6>;
+ reg = <0x46000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@47000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu7>;
+ reg = <0x47000 0x1000>;
+ };
+
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p3060-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x5000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-rmu-0.dtsi"
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p3060-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+/include/ "qoriq-usb2-dr-0.dtsi"
+/include/ "qoriq-sec4.1-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi
new file mode 100644
index 00000000000..00c8e70e7b9
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi
@@ -0,0 +1,125 @@
+/*
+ * P3060 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P3060";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu4: PowerPC,e500mc@4 {
+ device_type = "cpu";
+ reg = <4>;
+ next-level-cache = <&L2_4>;
+ L2_4: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu5: PowerPC,e500mc@5 {
+ device_type = "cpu";
+ reg = <5>;
+ next-level-cache = <&L2_5>;
+ L2_5: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu6: PowerPC,e500mc@6 {
+ device_type = "cpu";
+ reg = <6>;
+ next-level-cache = <&L2_6>;
+ L2_6: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu7: PowerPC,e500mc@7 {
+ device_type = "cpu";
+ reg = <7>;
+ next-level-cache = <&L2_7>;
+ L2_7: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
new file mode 100644
index 00000000000..8d35d2c1f69
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -0,0 +1,350 @@
+/*
+ * P4080/P4040 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x201000 */
+&pci1 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x202000 */
+&pci2 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,dcsr-npc";
+ reg = <0x1000 0x1000 0x1000000 0x8000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0xB0000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-ddr@13000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr2>;
+ reg = <0x13000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@40000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x40000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@41000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x41000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@42000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu2>;
+ reg = <0x42000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@43000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu3>;
+ reg = <0x43000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@44000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu4>;
+ reg = <0x44000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@45000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu5>;
+ reg = <0x45000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@46000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu6>;
+ reg = <0x46000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@47000 {
+ compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu7>;
+ reg = <0x47000 0x1000>;
+ };
+
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x5000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-rmu-0.dtsi"
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p4080-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ voltage-ranges = <3300 3300>;
+ sdhci,auto-cmd12;
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+/include/ "qoriq-usb2-dr-0.dtsi"
+/include/ "qoriq-sec4.0-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
new file mode 100644
index 00000000000..b9556ee3a63
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
@@ -0,0 +1,143 @@
+/*
+ * P4080/P4040 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P4080";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e500mc@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e500mc@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu2: PowerPC,e500mc@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu3: PowerPC,e500mc@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu4: PowerPC,e500mc@4 {
+ device_type = "cpu";
+ reg = <4>;
+ next-level-cache = <&L2_4>;
+ L2_4: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu5: PowerPC,e500mc@5 {
+ device_type = "cpu";
+ reg = <5>;
+ next-level-cache = <&L2_5>;
+ L2_5: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu6: PowerPC,e500mc@6 {
+ device_type = "cpu";
+ reg = <6>;
+ next-level-cache = <&L2_6>;
+ L2_6: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu7: PowerPC,e500mc@7 {
+ device_type = "cpu";
+ reg = <7>;
+ next-level-cache = <&L2_7>;
+ L2_7: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
new file mode 100644
index 00000000000..914074b91a8
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -0,0 +1,355 @@
+/*
+ * P5020/5010 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
+ interrupts = <25 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+};
+
+/* controller at 0x200000 */
+&pci0 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 15>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 15>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x201000 */
+&pci1 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 14>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 14>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1 0 0
+ 0000 0 0 2 &mpic 5 1 0 0
+ 0000 0 0 3 &mpic 6 1 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x202000 */
+&pci2 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 13>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 13>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 1 0 0
+ >;
+ };
+};
+
+/* controller at 0x203000 */
+&pci3 {
+ compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ bus-range = <0x0 0xff>;
+ clock-frequency = <33333333>;
+ interrupts = <16 2 1 12>;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ interrupts = <16 2 1 12>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 43 1 0 0
+ 0000 0 0 2 &mpic 0 1 0 0
+ 0000 0 0 3 &mpic 4 1 0 0
+ 0000 0 0 4 &mpic 8 1 0 0
+ >;
+ };
+};
+
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <16 2 1 11>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+
+ port2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <2>;
+ };
+};
+
+&dcsr {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,dcsr", "simple-bus";
+
+ dcsr-epu@0 {
+ compatible = "fsl,dcsr-epu";
+ interrupts = <52 2 0 0
+ 84 2 0 0
+ 85 2 0 0>;
+ reg = <0x0 0x1000>;
+ };
+ dcsr-npc {
+ compatible = "fsl,dcsr-npc";
+ reg = <0x1000 0x1000 0x1000000 0x8000>;
+ };
+ dcsr-nxc@2000 {
+ compatible = "fsl,dcsr-nxc";
+ reg = <0x2000 0x1000>;
+ };
+ dcsr-corenet {
+ compatible = "fsl,dcsr-corenet";
+ reg = <0x8000 0x1000 0xB0000 0x1000>;
+ };
+ dcsr-dpaa@9000 {
+ compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa";
+ reg = <0x9000 0x1000>;
+ };
+ dcsr-ocn@11000 {
+ compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn";
+ reg = <0x11000 0x1000>;
+ };
+ dcsr-ddr@12000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr1>;
+ reg = <0x12000 0x1000>;
+ };
+ dcsr-ddr@13000 {
+ compatible = "fsl,dcsr-ddr";
+ dev-handle = <&ddr2>;
+ reg = <0x13000 0x1000>;
+ };
+ dcsr-nal@18000 {
+ compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal";
+ reg = <0x18000 0x1000>;
+ };
+ dcsr-rcpm@22000 {
+ compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm";
+ reg = <0x22000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@40000 {
+ compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu0>;
+ reg = <0x40000 0x1000>;
+ };
+ dcsr-cpu-sb-proxy@41000 {
+ compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
+ cpu-handle = <&cpu1>;
+ reg = <0x41000 0x1000>;
+ };
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+
+ soc-sram-error {
+ compatible = "fsl,soc-sram-error";
+ interrupts = <16 2 1 29>;
+ };
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ ddr1: memory-controller@8000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupts = <16 2 1 23>;
+ };
+
+ ddr2: memory-controller@9000 {
+ compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupts = <16 2 1 22>;
+ };
+
+ cpc: l3-cache-controller@10000 {
+ compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
+ reg = <0x10000 0x1000
+ 0x11000 0x1000>;
+ interrupts = <16 2 1 27
+ 16 2 1 26>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ interrupts = <16 2 1 31>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,pamu-v1.0", "fsl,pamu";
+ reg = <0x20000 0x4000>;
+ interrupts = <
+ 24 2 0 0
+ 16 2 1 30>;
+ };
+
+/include/ "qoriq-mpic.dtsi"
+
+ guts: global-utilities@e0000 {
+ compatible = "fsl,qoriq-device-config-1.0";
+ reg = <0xe0000 0xe00>;
+ fsl,has-rstcr;
+ #sleep-cells = <1>;
+ fsl,liodn-bits = <12>;
+ };
+
+ pins: global-utilities@e0e00 {
+ compatible = "fsl,qoriq-pin-control-1.0";
+ reg = <0xe0e00 0x200>;
+ #sleep-cells = <2>;
+ };
+
+ clockgen: global-utilities@e1000 {
+ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
+ reg = <0xe1000 0x1000>;
+ clock-frequency = <0>;
+ };
+
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
+
+ sfp: sfp@e8000 {
+ compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
+ reg = <0xe8000 0x1000>;
+ };
+
+ serdes: serdes@ea000 {
+ compatible = "fsl,p5020-serdes";
+ reg = <0xea000 0x1000>;
+ };
+
+/include/ "qoriq-dma-0.dtsi"
+/include/ "qoriq-dma-1.dtsi"
+/include/ "qoriq-espi-0.dtsi"
+ spi@110000 {
+ fsl,espi-num-chipselects = <4>;
+ };
+
+/include/ "qoriq-esdhc-0.dtsi"
+ sdhc@114000 {
+ sdhci,auto-cmd12;
+ };
+
+/include/ "qoriq-i2c-0.dtsi"
+/include/ "qoriq-i2c-1.dtsi"
+/include/ "qoriq-duart-0.dtsi"
+/include/ "qoriq-duart-1.dtsi"
+/include/ "qoriq-gpio-0.dtsi"
+/include/ "qoriq-usb2-mph-0.dtsi"
+ usb0: usb@210000 {
+ phy_type = "utmi";
+ port0;
+ };
+
+/include/ "qoriq-usb2-dr-0.dtsi"
+ usb1: usb@211000 {
+ dr_mode = "host";
+ phy_type = "utmi";
+ };
+
+/include/ "qoriq-sata2-0.dtsi"
+/include/ "qoriq-sata2-1.dtsi"
+/include/ "qoriq-sec4.2-0.dtsi"
+};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
new file mode 100644
index 00000000000..ae823a47584
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
@@ -0,0 +1,96 @@
+/*
+ * P5020/P5010 Silicon/SoC Device Tree Source (pre include)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P5020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
+
+ aliases {
+ ccsr = &soc;
+ dcsr = &dcsr;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+ msi0 = &msi0;
+ msi1 = &msi1;
+ msi2 = &msi2;
+
+ crypto = &crypto;
+ sec_jr0 = &sec_jr0;
+ sec_jr1 = &sec_jr1;
+ sec_jr2 = &sec_jr2;
+ sec_jr3 = &sec_jr3;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,e5500@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ cpu1: PowerPC,e5500@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ next-level-cache = <&cpc>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi
new file mode 100644
index 00000000000..b5b37ad30e7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-dma-0.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 DMA device tree stub [ controller @ offset 0x21000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <20 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <21 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <22 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <23 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi
new file mode 100644
index 00000000000..28cb8a55d80
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-dma-1.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 DMA device tree stub [ controller @ offset 0xc300 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma@c300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0xc300 0x4>;
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <76 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <77 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <78 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <79 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi
new file mode 100644
index 00000000000..5e268fdb9d1
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-duart-0.dtsi
@@ -0,0 +1,51 @@
+/*
+ * PQ3 DUART device tree stub [ controller @ offset 0x4000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2 0 0>;
+};
+
+serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi
new file mode 100644
index 00000000000..5743433e278
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-esdhc-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 eSDHC device tree stub [ controller @ offset 0x2e000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sdhc@2e000 {
+ compatible = "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x2 0 0>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi
new file mode 100644
index 00000000000..75854b2e039
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-espi-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 eSPI device tree stub [ controller @ offset 0x7000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+spi@7000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc8536-espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
new file mode 100644
index 00000000000..a1979ae334a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x24000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@24000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ ranges = <0x0 0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
+};
+
+mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
new file mode 100644
index 00000000000..4c4fdde1ec2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x25000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@25000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ ranges = <0x0 0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
+};
+
+mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
new file mode 100644
index 00000000000..4b8ab438668
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x26000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@26000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <2>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x26000 0x1000>;
+ ranges = <0x0 0x26000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
+};
+
+mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
new file mode 100644
index 00000000000..40c9137729a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
@@ -0,0 +1,53 @@
+/*
+ * PQ3 eTSEC device tree stub [ @ offsets 0x27000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ethernet@27000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <3>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x27000 0x1000>;
+ ranges = <0x0 0x27000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>;
+};
+
+mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi
new file mode 100644
index 00000000000..efe2ca04bce
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-timer-0.dtsi
@@ -0,0 +1,39 @@
+/*
+ * PQ3 eTSEC Timer (IEEE 1588) device tree stub [ @ offsets 0x24e00 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ptp_clock@24e00 {
+ compatible = "fsl,etsec-ptp";
+ reg = <0x24e00 0xb0>;
+ interrupts = <68 2 0 0 69 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
new file mode 100644
index 00000000000..1382fec9e8c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
@@ -0,0 +1,60 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x24000/0xb0000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+mdio@24000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-mdio";
+ reg = <0x24000 0x1000 0xb0030 0x4>;
+};
+
+ethernet@b0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ fsl,magic-packet;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+
+ queue-group@b0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000 0x1000>;
+ interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
new file mode 100644
index 00000000000..221cd2ea5b3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
@@ -0,0 +1,60 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x25000/0xb1000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+mdio@25000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x25000 0x1000 0xb1030 0x4>;
+};
+
+ethernet@b1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ fsl,magic-packet;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+
+ queue-group@b1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb1000 0x1000>;
+ interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
new file mode 100644
index 00000000000..61456c31760
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
@@ -0,0 +1,59 @@
+/*
+ * PQ3 eTSEC2 device tree stub [ @ offsets 0x26000/0xb2000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mdio@26000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x26000 0x1000 0xb1030 0x4>;
+};
+
+ethernet@b2000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ fsl,magic-packet;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+
+ queue-group@b2000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb2000 0x1000>;
+ interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi
new file mode 100644
index 00000000000..034ab8fac22
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-0.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb4000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet0_grp2 {
+ queue-group@b4000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb4000 0x1000>;
+ interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi
new file mode 100644
index 00000000000..3be9ba3b374
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-1.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb5000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet1_grp2 {
+ queue-group@b5000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb5000 0x1000>;
+ interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi
new file mode 100644
index 00000000000..02a33457048
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-grp2-2.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 eTSEC2 Group 2 device tree stub [ @ offsets 0xb6000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&enet2_grp2 {
+ queue-group@b6000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb6000 0x1000>;
+ interrupts = <25 2 0 0 26 2 0 0 27 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
new file mode 100644
index 00000000000..72a3ef5945c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-gpio-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 GPIO device tree stub [ controller @ offset 0xf000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,pq3-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2 0 0>;
+ gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi
new file mode 100644
index 00000000000..d1dd6fb82a7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-i2c-0.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 I2C device tree stub [ controller @ offset 0x3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2 0 0>;
+ dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi
new file mode 100644
index 00000000000..a9bd803e209
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-i2c-1.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 I2C device tree stub [ controller @ offset 0x3100 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2 0 0>;
+ dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi
new file mode 100644
index 00000000000..8734cffae1a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-timer-B.dtsi
@@ -0,0 +1,42 @@
+/*
+ * PQ3 MPIC Timer (Group B) device tree stub [ controller @ offset 0x42100 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+timer@42100 {
+ compatible = "fsl,mpic-global-timer";
+ reg = <0x42100 0x100 0x42300 4>;
+ interrupts = <4 0 3 0
+ 5 0 3 0
+ 6 0 3 0
+ 7 0 3 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
new file mode 100644
index 00000000000..5c804606584
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -0,0 +1,66 @@
+/*
+ * PQ3 MPIC device tree stub [ controller @ offset 0x40000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic";
+ device_type = "open-pic";
+};
+
+timer@41100 {
+ compatible = "fsl,mpic-global-timer";
+ reg = <0x41100 0x100 0x41300 4>;
+ interrupts = <0 0 3 0
+ 1 0 3 0
+ 2 0 3 0
+ 3 0 3 0>;
+};
+
+msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi
new file mode 100644
index 00000000000..587ca9ffad7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-rmu-0.dtsi
@@ -0,0 +1,68 @@
+/*
+ * PQ3 RIO Message Unit device tree stub [ controller @ offset 0xd3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+rmu: rmu@d3000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,srio-rmu";
+ reg = <0xd3000 0x500>;
+ ranges = <0x0 0xd3000 0x500>;
+
+ message-unit@0 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x0 0x100>;
+ interrupts = <
+ 53 2 0 0 /* msg1_tx_irq */
+ 54 2 0 0>;/* msg1_rx_irq */
+ };
+ message-unit@100 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x100 0x100>;
+ interrupts = <
+ 55 2 0 0 /* msg2_tx_irq */
+ 56 2 0 0>;/* msg2_rx_irq */
+ };
+ doorbell-unit@400 {
+ compatible = "fsl,srio-dbell-unit";
+ reg = <0x400 0x80>;
+ interrupts = <
+ 49 2 0 0 /* bell_outb_irq */
+ 50 2 0 0>;/* bell_inb_irq */
+ };
+ port-write-unit@4e0 {
+ compatible = "fsl,srio-port-write-unit";
+ reg = <0x4e0 0x20>;
+ interrupts = <48 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi
new file mode 100644
index 00000000000..3c28dd08d38
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sata2-0.dtsi
@@ -0,0 +1,40 @@
+/*
+ * PQ3 SATAv2 device tree stub [ controller @ offset 0x18000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@18000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x18000 0x1000>;
+ cell-index = <1>;
+ interrupts = <74 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi
new file mode 100644
index 00000000000..eefaf2855e3
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sata2-1.dtsi
@@ -0,0 +1,40 @@
+/*
+ * PQ3 SATAv2 device tree stub [ controller @ offset 0x19000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@19000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x19000 0x1000>;
+ cell-index = <2>;
+ interrupts = <41 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi
new file mode 100644
index 00000000000..02a5c7ae72d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec2.1-0.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 Sec/Crypto 2.1 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+ compatible = "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 0 0>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xfe>;
+ fsl,descriptor-types-mask = <0x12b0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi
new file mode 100644
index 00000000000..bba1ba44ccf
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.0-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.0 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+ compatible = "fsl,sec3.0",
+ "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+ "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 0 0 58 2 0 0>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x9fe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi
new file mode 100644
index 00000000000..8f0a5669bee
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.1-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.1 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+ compatible = "fsl,sec3.1", "fsl,sec3.0",
+ "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+ "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 0 0 58 2 0 0>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xbfe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi
new file mode 100644
index 00000000000..c227f2748a2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec3.3-0.dtsi
@@ -0,0 +1,45 @@
+/*
+ * PQ3 Sec/Crypto 3.3 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+ compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0",
+ "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
+ "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 0 0 58 2 0 0>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x97c>;
+ fsl,descriptor-types-mask = <0x3a30abf>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
new file mode 100644
index 00000000000..bf957a7fca2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
@@ -0,0 +1,65 @@
+/*
+ * PQ3 Sec/Crypto 4.4 device tree stub [ controller @ offset 0x30000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto@30000 {
+ compatible = "fsl,sec4.4", "fsl,sec4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30000 0x10000>;
+ interrupts = <58 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <45 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi
new file mode 100644
index 00000000000..185ab9dc3ec
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 USB DR device tree stub [ controller @ offset 0x22000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@22000 {
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <28 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi
new file mode 100644
index 00000000000..fe24cd612ff
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-usb2-dr-1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * PQ3 USB DR device tree stub [ controller @ offset 0x23000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@23000 {
+ compatible = "fsl-usb2-dr";
+ reg = <0x23000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <46 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi
new file mode 100644
index 00000000000..1aebf3ea4ca
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-dma-0.dtsi
@@ -0,0 +1,66 @@
+/*
+ * QorIQ DMA device tree stub [ controller @ offset 0x100000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <28 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <29 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <30 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <31 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi
new file mode 100644
index 00000000000..ecf5e180fe7
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-dma-1.dtsi
@@ -0,0 +1,66 @@
+/*
+ * QorIQ DMA device tree stub [ controller @ offset 0x101000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupts = <32 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <33 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <34 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <35 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi
new file mode 100644
index 00000000000..225c07b4e8a
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-duart-0.dtsi
@@ -0,0 +1,51 @@
+/*
+ * QorIQ DUART device tree stub [ controller @ offset 0x11c000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+};
+
+serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi
new file mode 100644
index 00000000000..d23233a56b9
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-duart-1.dtsi
@@ -0,0 +1,51 @@
+/*
+ * QorIQ DUART device tree stub [ controller @ offset 0x11d000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+};
+
+serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi
new file mode 100644
index 00000000000..20835ae216c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-esdhc-0.dtsi
@@ -0,0 +1,40 @@
+/*
+ * QorIQ eSDHC device tree stub [ controller @ offset 0x114000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sdhc: sdhc@114000 {
+ compatible = "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2 0 0>;
+ clock-frequency = <0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi
new file mode 100644
index 00000000000..6db06975e09
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-espi-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ eSPI device tree stub [ controller @ offset 0x110000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+spi@110000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi
new file mode 100644
index 00000000000..cf714f5f68b
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-gpio-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ GPIO device tree stub [ controller @ offset 0x130000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+gpio0: gpio@130000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2 0 0>;
+ #gpio-cells = <2>;
+ gpio-controller;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi
new file mode 100644
index 00000000000..5f9bf7debe4
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-i2c-0.dtsi
@@ -0,0 +1,53 @@
+/*
+ * QorIQ I2C device tree stub [ controller @ offset 0x118000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+};
+
+i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2 0 0>;
+ dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi
new file mode 100644
index 00000000000..7989bf5eeb5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-i2c-1.dtsi
@@ -0,0 +1,53 @@
+/*
+ * QorIQ I2C device tree stub [ controller @ offset 0x119000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+};
+
+i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2 0 0>;
+ dfsrr;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
new file mode 100644
index 00000000000..b9bada6a87d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
@@ -0,0 +1,106 @@
+/*
+ * QorIQ MPIC device tree stub [ controller @ offset 0x40000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <4>;
+ reg = <0x40000 0x40000>;
+ compatible = "fsl,mpic", "chrp,open-pic";
+ device_type = "open-pic";
+ clock-frequency = <0x0>;
+};
+
+timer@41100 {
+ compatible = "fsl,mpic-global-timer";
+ reg = <0x41100 0x100 0x41300 4>;
+ interrupts = <0 0 3 0
+ 1 0 3 0
+ 2 0 3 0
+ 3 0 3 0>;
+};
+
+msi0: msi@41600 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41600 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
+};
+
+msi1: msi@41800 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41800 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe8 0 0 0
+ 0xe9 0 0 0
+ 0xea 0 0 0
+ 0xeb 0 0 0
+ 0xec 0 0 0
+ 0xed 0 0 0
+ 0xee 0 0 0
+ 0xef 0 0 0>;
+};
+
+msi2: msi@41a00 {
+ compatible = "fsl,mpic-msi";
+ reg = <0x41a00 0x200>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xf0 0 0 0
+ 0xf1 0 0 0
+ 0xf2 0 0 0
+ 0xf3 0 0 0
+ 0xf4 0 0 0
+ 0xf5 0 0 0
+ 0xf6 0 0 0
+ 0xf7 0 0 0>;
+};
+
+timer@42100 {
+ compatible = "fsl,mpic-global-timer";
+ reg = <0x42100 0x100 0x42300 4>;
+ interrupts = <4 0 3 0
+ 5 0 3 0
+ 6 0 3 0
+ 7 0 3 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi
new file mode 100644
index 00000000000..ca7fec792e5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-rmu-0.dtsi
@@ -0,0 +1,68 @@
+/*
+ * QorIQ RIO Message Unit device tree stub [ controller @ offset 0xd3000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+rmu: rmu@d3000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,srio-rmu";
+ reg = <0xd3000 0x500>;
+ ranges = <0x0 0xd3000 0x500>;
+
+ message-unit@0 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x0 0x100>;
+ interrupts = <
+ 60 2 0 0 /* msg1_tx_irq */
+ 61 2 0 0>;/* msg1_rx_irq */
+ };
+ message-unit@100 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x100 0x100>;
+ interrupts = <
+ 62 2 0 0 /* msg2_tx_irq */
+ 63 2 0 0>;/* msg2_rx_irq */
+ };
+ doorbell-unit@400 {
+ compatible = "fsl,srio-dbell-unit";
+ reg = <0x400 0x80>;
+ interrupts = <
+ 56 2 0 0 /* bell_outb_irq */
+ 57 2 0 0>;/* bell_inb_irq */
+ };
+ port-write-unit@4e0 {
+ compatible = "fsl,srio-port-write-unit";
+ reg = <0x4e0 0x20>;
+ interrupts = <16 2 1 11>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi
new file mode 100644
index 00000000000..b642047fdec
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sata2-0.dtsi
@@ -0,0 +1,39 @@
+/*
+ * QorIQ SATAv2 device tree stub [ controller @ offset 0x220000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@220000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x220000 0x1000>;
+ interrupts = <68 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi
new file mode 100644
index 00000000000..c5737025975
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sata2-1.dtsi
@@ -0,0 +1,39 @@
+/*
+ * QorIQ SATAv2 device tree stub [ controller @ offset 0x221000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+sata@221000 {
+ compatible = "fsl,pq-sata-v2";
+ reg = <0x221000 0x1000>;
+ interrupts = <69 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
new file mode 100644
index 00000000000..0cbbac32953
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.0-0.dtsi
@@ -0,0 +1,100 @@
+/*
+ * QorIQ Sec/Crypto 4.0 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+};
+
+sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi
new file mode 100644
index 00000000000..3308986bba0
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.1-0.dtsi
@@ -0,0 +1,109 @@
+/*
+ * QorIQ Sec/Crypto 4.1 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.1", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.1-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.1-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.1-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.1-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.1-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.1-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.1-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.1-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.1-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+};
+
+sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
new file mode 100644
index 00000000000..7990e0d3d6f
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec4.2-0.dtsi
@@ -0,0 +1,109 @@
+/*
+ * QorIQ Sec/Crypto 4.2 device tree stub [ controller @ offset 0x300000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+crypto: crypto@300000 {
+ compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x300000 0x10000>;
+ ranges = <0 0x300000 0x10000>;
+ interrupts = <92 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x1000 0x1000>;
+ interrupts = <88 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x2000 0x1000>;
+ interrupts = <89 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x3000 0x1000>;
+ interrupts = <90 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ compatible = "fsl,sec-v4.2-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x4000 0x1000>;
+ interrupts = <91 2 0 0>;
+ };
+
+ rtic@6000 {
+ compatible = "fsl,sec-v4.2-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x6000 0x100>;
+ ranges = <0x0 0x6100 0xe00>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x80>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x80>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x80>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v4.2-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x500 0x80>;
+ };
+ };
+};
+
+sec_mon: sec_mon@314000 {
+ compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
+ reg = <0x314000 0x1000>;
+ interrupts = <93 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi
new file mode 100644
index 00000000000..4dd6f84c239
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-usb2-dr-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ USB DR device tree stub [ controller @ offset 0x211000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@211000 {
+ compatible = "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <45 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi
new file mode 100644
index 00000000000..f053835aa1c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-usb2-mph-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ USB Host device tree stub [ controller @ offset 0x210000 ]
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+usb@210000 {
+ compatible = "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <44 0x2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts
index 2266bbb303d..38dcb96c8e2 100644
--- a/arch/powerpc/boot/dts/gef_ppc9a.dts
+++ b/arch/powerpc/boot/dts/gef_ppc9a.dts
@@ -339,7 +339,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <0x2a 0x2>;
@@ -349,7 +349,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts
index 429e87d9ace..5ab8932d09b 100644
--- a/arch/powerpc/boot/dts/gef_sbc310.dts
+++ b/arch/powerpc/boot/dts/gef_sbc310.dts
@@ -337,7 +337,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <0x2a 0x2>;
@@ -347,7 +347,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index d81201ac2ca..d5341f5741a 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -337,7 +337,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <0x2a 0x2>;
@@ -347,7 +347,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <0x1c 0x2>;
diff --git a/arch/powerpc/boot/dts/klondike.dts b/arch/powerpc/boot/dts/klondike.dts
new file mode 100644
index 00000000000..8c942903361
--- /dev/null
+++ b/arch/powerpc/boot/dts/klondike.dts
@@ -0,0 +1,227 @@
+/*
+ * Device Tree for Klondike (APM8018X) board.
+ *
+ * Copyright (c) 2010, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ model = "apm,klondike";
+ compatible = "apm,klondike";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ ethernet0 = &EMAC0;
+ ethernet1 = &EMAC1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,apm8018x";
+ reg = <0x00000000>;
+ clock-frequency = <300000000>; /* Filled in by U-Boot */
+ timebase-frequency = <300000000>; /* Filled in by U-Boot */
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <16384>; /* 16 kB */
+ d-cache-size = <16384>; /* 16 kB */
+ dcr-controller;
+ dcr-access-method = "native";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>; /* Filled in by U-Boot */
+ };
+
+ UIC0: interrupt-controller {
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <0>;
+ dcr-reg = <0x0c0 0x010>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UIC1: interrupt-controller1 {
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <1>;
+ dcr-reg = <0x0d0 0x010>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ UIC2: interrupt-controller2 {
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <2>;
+ dcr-reg = <0x0e0 0x010>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x0a 0x4 0x0b 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ UIC3: interrupt-controller3 {
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <3>;
+ dcr-reg = <0x0f0 0x010>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x10 0x4 0x11 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ plb {
+ compatible = "ibm,plb4";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+
+ SDRAM0: memory-controller {
+ compatible = "ibm,sdram-apm8018x";
+ dcr-reg = <0x010 0x002>;
+ };
+
+ MAL0: mcmal {
+ compatible = "ibm,mcmal2";
+ dcr-reg = <0x180 0x062>;
+ num-tx-chans = <2>;
+ num-rx-chans = <16>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-parent = <&UIC1>;
+ interrupts = </*TXEOB*/ 0x6 0x4
+ /*RXEOB*/ 0x7 0x4
+ /*SERR*/ 0x1 0x4
+ /*TXDE*/ 0x2 0x4
+ /*RXDE*/ 0x3 0x4>;
+ };
+
+ POB0: opb {
+ compatible = "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x20000000 0x20000000 0x30000000
+ 0x50000000 0x50000000 0x10000000
+ 0x60000000 0x60000000 0x10000000
+ 0xFE000000 0xFE000000 0x00010000>;
+ dcr-reg = <0x100 0x020>;
+ clock-frequency = <300000000>; /* Filled in by U-Boot */
+
+ RGMII0: emac-rgmii@400a2000 {
+ compatible = "ibm,rgmii";
+ reg = <0x400a2000 0x00000010>;
+ has-mdio;
+ };
+
+ TAH0: emac-tah@400a3000 {
+ compatible = "ibm,tah";
+ reg = <0x400a3000 0x100>;
+ };
+
+ TAH1: emac-tah@400a4000 {
+ compatible = "ibm,tah";
+ reg = <0x400a4000 0x100>;
+ };
+
+ EMAC0: ethernet@400a0000 {
+ compatible = "ibm,emac4", "ibm-emac4sync";
+ interrupt-parent = <&EMAC0>;
+ interrupts = <0x0>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </*Status*/ 0x0 &UIC0 0x13 0x4>;
+ reg = <0x400a0000 0x00000100>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0x0>;
+ mal-rx-channel = <0x0>;
+ cell-index = <0>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <2048>;
+ phy-mode = "rgmii";
+ phy-address = <0x2>;
+ turbo = "no";
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <0>;
+ tah-device = <&TAH0>;
+ tah-channel = <0>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+ };
+
+ EMAC1: ethernet@400a1000 {
+ compatible = "ibm,emac4", "ibm-emac4sync";
+ status = "disabled";
+ interrupt-parent = <&EMAC1>;
+ interrupts = <0x0>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </*Status*/ 0x0 &UIC0 0x14 0x4>;
+ reg = <0x400a1000 0x00000100>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <1>;
+ mal-rx-channel = <8>;
+ cell-index = <1>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <2048>;
+ phy-mode = "rgmii";
+ phy-address = <0x3>;
+ turbo = "no";
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <1>;
+ tah-device = <&TAH1>;
+ tah-channel = <0>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+ mdio-device = <&EMAC0>;
+ };
+ };
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@50001000";
+ };
+};
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts
index d16bae1230f..983aee18579 100644
--- a/arch/powerpc/boot/dts/kmeter1.dts
+++ b/arch/powerpc/boot/dts/kmeter1.dts
@@ -80,7 +80,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <264000000>;
interrupts = <9 0x8>;
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index 8d725d10882..0a4545159e8 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -84,7 +84,7 @@ XXXX add flash parts, rtc, ??
serial0: serial@80004500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x80004500 0x8>;
clock-frequency = <97553800>;
current-speed = <9600>;
@@ -95,7 +95,7 @@ XXXX add flash parts, rtc, ??
serial1: serial@80004600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x80004600 0x8>;
clock-frequency = <97553800>;
current-speed = <57600>;
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index b13a11eb81b..0e758b347cd 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -84,7 +84,7 @@ XXXX add flash parts, rtc, ??
serial0: serial@80004500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x80004500 0x8>;
clock-frequency = <130041000>;
current-speed = <9600>;
@@ -95,7 +95,7 @@ XXXX add flash parts, rtc, ??
serial1: serial@80004600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x80004600 0x8>;
clock-frequency = <130041000>;
current-speed = <57600>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 697b3f6b78b..22b0832b6c3 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -233,7 +233,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <133333333>;
interrupts = <9 0x8>;
@@ -243,7 +243,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <133333333>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a0bd1881081..f66d10d95a8 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -208,7 +208,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <133333333>;
interrupts = <9 0x8>;
@@ -218,7 +218,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <133333333>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index ac1eb320c7b..1c836c6c5be 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -261,7 +261,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -271,7 +271,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 4dd08c32297..811848e93ae 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -265,7 +265,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <133333333>;
interrupts = <9 0x8>;
@@ -275,7 +275,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <133333333>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 05ad8c98e52..da9c72ddc34 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -105,7 +105,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -115,7 +115,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index f4fadb23ad6..ff7b15b340a 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -83,7 +83,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -93,7 +93,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 505dc842d80..2608679d0d4 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -283,7 +283,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>; // from bootloader
interrupts = <9 0x8>;
@@ -293,7 +293,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>; // from bootloader
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index eb732115f01..6cd044d8fb8 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -189,7 +189,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>; // from bootloader
interrupts = <9 0x8>;
@@ -199,7 +199,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>; // from bootloader
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 230febb9b72..4552864082c 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -242,7 +242,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -252,7 +252,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 45cfa1c50a2..c0e450a551b 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -136,7 +136,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <264000000>;
interrupts = <9 0x8>;
@@ -146,7 +146,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <264000000>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index bdf4459677b..b6e9aec1d86 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -102,7 +102,7 @@
serial0: serial@4500 {
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
interrupts = <9 8>;
interrupt-parent = <&ipic>;
@@ -112,7 +112,7 @@
serial1: serial@4600 {
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
interrupts = <10 8>;
interrupt-parent = <&ipic>;
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index 855782c5e5e..cfccef57cd1 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -276,7 +276,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -286,7 +286,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index dbc1b988b29..353deff1b7f 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -321,7 +321,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -331,7 +331,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts
index 9ea78305696..ef4a305a0d0 100644
--- a/arch/powerpc/boot/dts/mpc8377_wlan.dts
+++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts
@@ -304,7 +304,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -314,7 +314,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index f70cf600083..538fcb92733 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -315,7 +315,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -325,7 +325,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 3447eb9f6e8..32333a908f3 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -321,7 +321,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -331,7 +331,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 645ec51cc6e..5387092fdfb 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -313,7 +313,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -323,7 +323,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 15560c619b0..46224c2430f 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -319,7 +319,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -329,7 +329,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index a75c10eed26..c15881574fd 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -9,24 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8536si-pre.dtsi"
/ {
model = "fsl,mpc8536ds";
compatible = "fsl,mpc8536ds";
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- pci3 = &pci3;
- };
cpus {
#cpus = <1>;
@@ -45,403 +32,34 @@
reg = <0 0 0 0>; // Filled by U-Boot
};
- soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8536-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8536-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 0x2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8536-l2-cache-controller";
- reg = <0x20000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 0x2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- interrupts = <0 0x1>;
- interrupt-parent = <&mpic>;
- };
- };
-
- spi@7000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc8536-espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- fsl,espi-num-chipselects = <4>;
-
- flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,s25sl12801";
- reg = <0>;
- spi-max-frequency = <40000000>;
- partition@u-boot {
- label = "u-boot";
- reg = <0x00000000 0x00100000>;
- read-only;
- };
- partition@kernel {
- label = "kernel";
- reg = <0x00100000 0x00500000>;
- read-only;
- };
- partition@dtb {
- label = "dtb";
- reg = <0x00600000 0x00100000>;
- read-only;
- };
- partition@fs {
- label = "file system";
- reg = <0x00700000 0x00900000>;
- };
- };
- flash@1 {
- compatible = "spansion,s25sl12801";
- reg = <1>;
- spi-max-frequency = <40000000>;
- };
- flash@2 {
- compatible = "spansion,s25sl12801";
- reg = <2>;
- spi-max-frequency = <40000000>;
- };
- flash@3 {
- compatible = "spansion,s25sl12801";
- reg = <3>;
- spi-max-frequency = <40000000>;
- };
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma";
- reg = <0x21300 4>;
- ranges = <0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- usb@22000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
- reg = <0x22000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- phy_type = "ulpi";
- };
-
- usb@23000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
- reg = <0x23000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <46 0x2>;
- phy_type = "ulpi";
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 0x1>;
- reg = <0>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 0x1>;
- reg = <1>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- usb@2b000 {
- compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
- reg = <0x2b000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <60 0x2>;
- dr_mode = "peripheral";
- phy_type = "ulpi";
- };
-
- sdhci@2e000 {
- compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- clock-frequency = <250000000>;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
- "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x9fe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- sata@18000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
- reg = <0x18000 0x1000>;
- cell-index = <1>;
- interrupts = <74 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- sata@19000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
- reg = <0x19000 0x1000>;
- cell-index = <2>;
- interrupts = <41 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- big-endian;
- };
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+ };
- msi@41600 {
- compatible = "fsl,mpc8536-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
+ board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0 0xffe00000 0x100000>;
};
pci0: pci@ffe08000 {
- compatible = "fsl,mpc8540-pci";
- device_type = "pci";
+ reg = <0 0xffe08000 0 0x1000>;
+ ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000
+ 0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x11 J17 Slot 1 */
- 0x8800 0 0 1 &mpic 1 1
- 0x8800 0 0 2 &mpic 2 1
- 0x8800 0 0 3 &mpic 3 1
- 0x8800 0 0 4 &mpic 4 1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <24 0x2>;
- bus-range = <0 0xff>;
- ranges = <0x02000000 0 0x80000000 0 0x80000000 0 0x10000000
- 0x01000000 0 0x00000000 0 0xffc00000 0 0x00010000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe08000 0 0x1000>;
+ 0x8800 0 0 1 &mpic 1 1 0 0
+ 0x8800 0 0 2 &mpic 2 1 0 0
+ 0x8800 0 0 3 &mpic 3 1 0 0
+ 0x8800 0 0 4 &mpic 4 1 0 0>;
};
pci1: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0x98000000 0 0x98000000 0 0x08000000
0x01000000 0 0x00000000 0 0xffc20000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0x98000000
0x02000000 0 0x98000000
0 0x08000000
@@ -453,31 +71,10 @@
};
pci2: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0x90000000 0 0x90000000 0 0x08000000
0x01000000 0 0x00000000 0 0xffc10000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 0 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0x90000000
0x02000000 0 0x90000000
0 0x08000000
@@ -489,32 +86,10 @@
};
pci3: pcie@ffe0b000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe0b000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0xa0000000 0 0xa0000000 0 0x20000000
0x01000000 0 0x00000000 0 0xffc30000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <27 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 8 1
- 0000 0 0 2 &mpic 9 1
- 0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
- >;
-
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xa0000000
0x02000000 0 0xa0000000
0 0x20000000
@@ -525,3 +100,6 @@
};
};
};
+
+/include/ "fsl/mpc8536si-post.dtsi"
+/include/ "mpc8536ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
new file mode 100644
index 00000000000..1462e4cf49d
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -0,0 +1,141 @@
+/*
+ * MPC8536DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_soc {
+ i2c@3100 {
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0 0x1 0 0>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ partition@u-boot {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@kernel {
+ label = "kernel";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@dtb {
+ label = "dtb";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@fs {
+ label = "file system";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ flash@1 {
+ compatible = "spansion,s25sl12801";
+ reg = <1>;
+ spi-max-frequency = <40000000>;
+ };
+ flash@2 {
+ compatible = "spansion,s25sl12801";
+ reg = <2>;
+ spi-max-frequency = <40000000>;
+ };
+ flash@3 {
+ compatible = "spansion,s25sl12801";
+ reg = <3>;
+ spi-max-frequency = <40000000>;
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ usb@23000 {
+ phy_type = "ulpi";
+ };
+
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <10 0x1 0 0>;
+ reg = <0>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <10 0x1 0 0>;
+ reg = <1>;
+ device_type = "ethernet-phy";
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet2: ethernet@26000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ usb@2b000 {
+ dr_mode = "peripheral";
+ phy_type = "ulpi";
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index d95b26021e6..8f4b929b1d1 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -1,5 +1,5 @@
/*
- * MPC8536 DS Device Tree Source
+ * MPC8536DS Device Tree Source (36-bit address map)
*
* Copyright 2008-2009 Freescale Semiconductor, Inc.
*
@@ -9,24 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8536si-pre.dtsi"
/ {
model = "fsl,mpc8536ds";
compatible = "fsl,mpc8536ds";
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- pci3 = &pci3;
- };
cpus {
#cpus = <1>;
@@ -45,351 +32,34 @@
reg = <0 0 0 0>; // Filled by U-Boot
};
- soc@fffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0xf 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8536-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8536-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 0x2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8536-l2-cache-controller";
- reg = <0x20000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 0x2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- interrupts = <0 0x1>;
- interrupt-parent = <&mpic>;
- };
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8536-dma", "fsl,eloplus-dma";
- reg = <0x21300 4>;
- ranges = <0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8536-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- usb@22000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
- reg = <0x22000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- phy_type = "ulpi";
- };
-
- usb@23000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
- reg = <0x23000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <46 0x2>;
- phy_type = "ulpi";
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 0x1>;
- reg = <0>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 0x1>;
- reg = <1>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- usb@2b000 {
- compatible = "fsl,mpc8536-usb2-dr", "fsl-usb2-dr";
- reg = <0x2b000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <60 0x2>;
- dr_mode = "peripheral";
- phy_type = "ulpi";
- };
-
- sdhci@2e000 {
- compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- clock-frequency = <250000000>;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
- "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x9fe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- sata@18000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
- reg = <0x18000 0x1000>;
- cell-index = <1>;
- interrupts = <74 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- sata@19000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
- reg = <0x19000 0x1000>;
- cell-index = <2>;
- interrupts = <41 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- big-endian;
- };
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+ };
- msi@41600 {
- compatible = "fsl,mpc8536-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
+ board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
};
- pci0: pci@fffe08000 {
- compatible = "fsl,mpc8540-pci";
- device_type = "pci";
+ pci0: pci@ffe08000 {
+ reg = <0xf 0xffe08000 0 0x1000>;
+ ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x11 J17 Slot 1 */
- 0x8800 0 0 1 &mpic 1 1
- 0x8800 0 0 2 &mpic 2 1
- 0x8800 0 0 3 &mpic 3 1
- 0x8800 0 0 4 &mpic 4 1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <24 0x2>;
- bus-range = <0 0xff>;
- ranges = <0x02000000 0 0xf0000000 0xc 0x00000000 0 0x10000000
- 0x01000000 0 0x00000000 0xf 0xffc00000 0 0x00010000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xf 0xffe08000 0 0x1000>;
+ 0x8800 0 0 1 &mpic 1 1 0 0
+ 0x8800 0 0 2 &mpic 2 1 0 0
+ 0x8800 0 0 3 &mpic 3 1 0 0
+ 0x8800 0 0 4 &mpic 4 1 0 0>;
};
- pci1: pcie@fffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
+ pci1: pcie@ffe09000 {
reg = <0xf 0xffe09000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0xf8000000 0xc 0x18000000 0 0x08000000
0x01000000 0 0x00000000 0xf 0xffc20000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xf8000000
0x02000000 0 0xf8000000
0 0x08000000
@@ -401,31 +71,10 @@
};
pci2: pcie@fffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe0a000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0xf8000000 0xc 0x10000000 0 0x08000000
0x01000000 0 0x00000000 0xf 0xffc10000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 0 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
- >;
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xf8000000
0x02000000 0 0xf8000000
0 0x08000000
@@ -437,32 +86,10 @@
};
pci3: pcie@fffe0b000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe0b000 0 0x1000>;
- bus-range = <0 0xff>;
ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xffc30000 0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <27 0x2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 8 1
- 0000 0 0 2 &mpic 9 1
- 0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
- >;
-
pcie@0 {
- reg = <0 0 0 0 0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
0 0x20000000
@@ -473,3 +100,6 @@
};
};
};
+
+/include/ "fsl/mpc8536si-post.dtsi"
+/include/ "mpc8536ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 8d1bf0fd926..f99fb110c97 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -243,7 +243,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -253,7 +253,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 87ff96549fa..0f5e9391279 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -209,7 +209,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -219,7 +219,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index d793968743c..e934987e882 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -9,339 +9,52 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8544si-pre.dtsi"
+
/ {
model = "MPC8544DS";
compatible = "MPC8544DS", "MPC85xxDS";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- pci3 = &pci3;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8544@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
memory {
device_type = "memory";
- reg = <0x0 0x0>; // Filled by U-Boot
+ reg = <0 0 0 0>; // Filled by U-Boot
};
- soc8544@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
-
- ranges = <0x0 0xe0000000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8544-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8544-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8544-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2, 256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8544-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8544-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8544-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8544-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8544-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "TSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy0>;
- tbi-handle = <&tbi0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x0>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
-
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "TSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy1>;
- tbi-handle = <&tbi1>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- crypto@30000 {
- compatible = "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xfe>;
- fsl,descriptor-types-mask = <0x12b0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
+ lbc: localbus@e0005000 {
+ reg = <0 0xe0005000 0 0x1000>;
+ };
- msi@41600 {
- compatible = "fsl,mpc8544-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
+ board_soc: soc: soc8544@e0000000 {
+ ranges = <0x0 0x0 0xe0000000 0x100000>;
};
pci0: pci@e0008000 {
- compatible = "fsl,mpc8540-pci";
- device_type = "pci";
+ reg = <0 0xe0008000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xe1000000 0x0 0x10000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x11 J17 Slot 1 */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
+ 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
/* IDSEL 0x12 J16 Slot 2 */
- 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x9000 0x0 0x0 0x4 &mpic 0x1 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xe1000000 0x0 0x10000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008000 0x1000>;
+ 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x9000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0>;
};
pci1: pcie@e0009000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0009000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xe1010000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
+ reg = <0x0 0xe0009000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xe1010000 0x0 0x10000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -353,31 +66,10 @@
};
pci2: pcie@e000a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x1000000 0x0 0x0 0xe1020000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
+ reg = <0x0 0xe000a000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe1020000 0x0 0x10000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x10000000
@@ -388,44 +80,11 @@
};
};
- pci3: pcie@e000b000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000b000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xb0000000 0xb0000000 0x0 0x100000
- 0x1000000 0x0 0x0 0xb0100000 0x0 0x100000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <27 2>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x1>;
- interrupt-map = <
- // IDSEL 0x1c USB
- 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
- 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
- 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
- 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
- // IDSEL 0x1d Audio
- 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
- // IDSEL 0x1e Legacy
- 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
- 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
- // IDSEL 0x1f IDE/SATA
- 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
- 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
- >;
-
+ board_pci3: pci3: pcie@e000b000 {
+ reg = <0x0 0xe000b000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xb0000000 0 0xb0000000 0x0 0x100000
+ 0x1000000 0x0 0x00000000 0 0xb0100000 0x0 0x100000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xb0000000
0x2000000 0x0 0xb0000000
0x0 0x100000
@@ -433,70 +92,14 @@
0x1000000 0x0 0x0
0x1000000 0x0 0x0
0x0 0x100000>;
-
- uli1575@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0xb0000000
- 0x2000000 0x0 0xb0000000
- 0x0 0x100000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- isa@1e {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0xf000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0
- 0x1000000 0x0 0x0
- 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <9 2>;
- interrupt-parent = <&mpic>;
- };
-
- i8042@60 {
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
- interrupts = <1 3 12 3>;
- interrupt-parent = <&i8259>;
-
- keyboard@0 {
- reg = <0x0>;
- compatible = "pnpPNP,303";
- };
-
- mouse@1 {
- reg = <0x1>;
- compatible = "pnpPNP,f03";
- };
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
-
- gpio@400 {
- reg = <0x1 0x400 0x80>;
- };
- };
- };
};
};
};
+
+/*
+ * mpc8544ds.dtsi must be last to ensure board_pci3 overrides pci3 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8544si-post.dtsi"
+/include/ "mpc8544ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dtsi b/arch/powerpc/boot/dts/mpc8544ds.dtsi
new file mode 100644
index 00000000000..270f64b90f4
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8544ds.dtsi
@@ -0,0 +1,161 @@
+/*
+ * MPC8544DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_soc {
+ enet0: ethernet@24000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <10 1 0 0>;
+ reg = <0x0>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <10 1 0 0>;
+ reg = <0x1>;
+ device_type = "ethernet-phy";
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet2: ethernet@26000 {
+ phy-handle = <&phy1>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+};
+
+&board_pci3 {
+ pcie@0 {
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+ // IDSEL 0x1c USB
+ 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+ 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+ 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+ 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+ // IDSEL 0x1d Audio
+ 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+ // IDSEL 0x1e Legacy
+ 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+ 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+ // IDSEL 0x1f IDE/SATA
+ 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+ 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+ >;
+
+
+ uli1575@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0xb0000000
+ 0x2000000 0x0 0xb0000000
+ 0x0 0x100000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0xf000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0
+ 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <9 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+ interrupts = <1 3 12 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0x0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <0x1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+
+ gpio@400 {
+ reg = <0x1 0x400 0x80>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index a17a5572fb7..07b8dae0f46 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -9,13 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8548si-pre.dtsi"
/ {
model = "MPC8548CDS";
compatible = "MPC8548CDS", "MPC85xxCDS";
- #address-cells = <1>;
- #size-cells = <1>;
aliases {
ethernet0 = &enet0;
@@ -29,76 +27,19 @@
pci2 = &pci2;
};
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8548@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>; // 33 MHz, from uboot
- bus-frequency = <0>; // 166 MHz
- clock-frequency = <0>; // 825 MHz, from uboot
- next-level-cache = <&L2>;
- };
- };
-
memory {
device_type = "memory";
- reg = <0x0 0x8000000>; // 128M at 0x0
+ reg = <0 0 0x0 0x8000000>; // 128M at 0x0
};
- soc8548@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0xe0000000 0x100000>;
- bus-frequency = <0>;
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8548-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8548-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
+ lbc: localbus@e0005000 {
+ reg = <0 0xe0005000 0 0x1000>;
+ };
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8548-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
+ soc: soc8548@e0000000 {
+ ranges = <0 0x0 0xe0000000 0x100000>;
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
-
eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
@@ -116,351 +57,178 @@
};
i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
-
eeprom@50 {
compatible = "atmel,24c64";
reg = <0x50>;
};
};
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
+ };
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <5 1>;
- reg = <0x0>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <5 1>;
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <5 1>;
- reg = <0x2>;
- device_type = "ethernet-phy";
- };
- phy3: ethernet-phy@3 {
- interrupt-parent = <&mpic>;
- interrupts = <5 1>;
- reg = <0x3>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <5 1 0 0>;
+ reg = <0x0>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <5 1 0 0>;
+ reg = <0x1>;
+ device_type = "ethernet-phy";
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <5 1 0 0>;
+ reg = <0x2>;
+ device_type = "ethernet-phy";
+ };
+ phy3: ethernet-phy@3 {
+ interrupts = <5 1 0 0>;
+ reg = <0x3>;
+ device_type = "ethernet-phy";
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
+ };
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
+ };
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
enet3: ethernet@27000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <3>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x27000 0x1000>;
- ranges = <0x0 0x27000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <37 2 38 2 39 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi3>;
phy-handle = <&phy3>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi3: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
};
- global-utilities@e0000 { //global utilities reg
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- crypto@30000 {
- compatible = "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xfe>;
- fsl,descriptor-types-mask = <0x12b0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
+ mdio@27520 {
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
};
pci0: pci@e0008000 {
+ reg = <0 0xe0008000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x4 (PCIX Slot 2) */
- 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x5 (PCIX Slot 3) */
- 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1
- 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1
- 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1
- 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1
+ 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+ 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+ 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+ 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
/* IDSEL 0x6 (PCIX Slot 4) */
- 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1
- 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1
+ 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
/* IDSEL 0x8 (PCIX Slot 5) */
- 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0xC (Tsi310 bridge) */
- 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x14 (Slot 2) */
- 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x15 (Slot 3) */
- 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1
- 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1
- 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1
- 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1
+ 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+ 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+ 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+ 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
/* IDSEL 0x16 (Slot 4) */
- 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1
- 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1
- 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1
- 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1
+ 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
/* IDSEL 0x18 (Slot 5) */
- 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x1C (Tsi310 bridge PCI primary) */
- 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- bus-range = <0 0>;
- ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x10000000
- 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008000 0x1000>;
- compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
- device_type = "pci";
+ 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
pci_bridge@1c {
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x00 (PrPMC Site) */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x04 (VIA chip) */
- 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1
+ 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
/* IDSEL 0x05 (8139) */
- 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1
+ 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
/* IDSEL 0x06 (Slot 6) */
- 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1
- 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1
+ 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
/* IDESL 0x07 (Slot 7) */
- 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1
- 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1>;
+ 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
+ 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
reg = <0xe000 0x0 0x0 0x0 0x0>;
#interrupt-cells = <1>;
@@ -492,7 +260,7 @@
#address-cells = <0>;
#interrupt-cells = <2>;
compatible = "chrp,iic";
- interrupts = <0 1>;
+ interrupts = <0 1 0 0>;
interrupt-parent = <&mpic>;
};
@@ -505,56 +273,25 @@
};
pci1: pci@e0009000 {
+ reg = <0 0xe0009000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x15 */
- 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1
- 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1
- 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1
- 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- bus-range = <0 0>;
- ranges = <0x2000000 0x0 0x90000000 0x90000000 0x0 0x10000000
- 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0009000 0x1000>;
- compatible = "fsl,mpc8540-pci";
- device_type = "pci";
+ 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
+ 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
};
pci2: pcie@e000a000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x0 (PEX) */
- 00000 0x0 0x0 0x1 &mpic 0x0 0x1
- 00000 0x0 0x0 0x2 &mpic 0x1 0x1
- 00000 0x0 0x0 0x3 &mpic 0x2 0x1
- 00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xe3000000 0x0 0x100000>;
- clock-frequency = <33333333>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
+ reg = <0 0xe000a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -565,3 +302,5 @@
};
};
};
+
+/include/ "fsl/mpc8548si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index 5c5614f9eb1..fe10438613d 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -209,7 +209,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -219,7 +219,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 647daf8e729..09598bb5d44 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -9,60 +9,25 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8568si-pre.dtsi"
/ {
model = "MPC8568EMDS";
compatible = "MPC8568EMDS", "MPC85xxMDS";
- #address-cells = <1>;
- #size-cells = <1>;
aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- serial1 = &serial1;
pci0 = &pci0;
pci1 = &pci1;
- rapidio0 = &rio0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8568@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- sleep = <&pmc 0x00008000 // core
- &pmc 0x00004000>; // timebase
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
+ rapidio0 = &rio;
};
memory {
device_type = "memory";
- reg = <0x0 0x10000000>;
+ reg = <0x0 0x0 0x0 0x0>;
};
- localbus@e0005000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,mpc8568-localbus", "fsl,pq3-localbus",
- "simple-bus";
- reg = <0xe0005000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <19 2>;
-
+ lbc: localbus@e0005000 {
+ reg = <0x0 0xe0005000 0x0 0x1000>;
ranges = <0x0 0x0 0xfe000000 0x02000000
0x1 0x0 0xf8000000 0x00008000
0x2 0x0 0xf0000000 0x04000000
@@ -104,288 +69,65 @@
};
};
- soc8568@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0xe0000000 0x100000>;
- bus-frequency = <0>;
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8568-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8568-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8568-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
+ soc: soc8568@e0000000 {
+ ranges = <0x0 0x0 0xe0000000 0x100000>;
i2c-sleep-nexus {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- sleep = <&pmc 0x00000004>;
- ranges;
-
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
-
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
- interrupts = <3 1>;
- interrupt-parent = <&mpic>;
+ interrupts = <3 1 0 0>;
};
};
+ };
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy2>;
};
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8568-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- sleep = <&pmc 0x00000400>;
-
- dma-channel@0 {
- compatible = "fsl,mpc8568-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
+ mdio@24520 {
+ phy0: ethernet-phy@7 {
+ interrupts = <1 1 0 0>;
+ reg = <0x7>;
+ device_type = "ethernet-phy";
};
- dma-channel@80 {
- compatible = "fsl,mpc8568-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ device_type = "ethernet-phy";
};
- dma-channel@100 {
- compatible = "fsl,mpc8568-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
+ phy2: ethernet-phy@2 {
+ interrupts = <1 1 0 0>;
+ reg = <0x2>;
+ device_type = "ethernet-phy";
};
- dma-channel@180 {
- compatible = "fsl,mpc8568-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
+ phy3: ethernet-phy@3 {
+ interrupts = <2 1 0 0>;
+ reg = <0x3>;
+ device_type = "ethernet-phy";
};
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy2>;
- sleep = <&pmc 0x00000080>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@7 {
- interrupt-parent = <&mpic>;
- interrupts = <1 1>;
- reg = <0x7>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <1 1>;
- reg = <0x2>;
- device_type = "ethernet-phy";
- };
- phy3: ethernet-phy@3 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x3>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
sleep = <&pmc 0x00000040>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
};
- duart-sleep-nexus {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- sleep = <&pmc 0x00000002>;
- ranges;
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
- global-utilities@e0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- ranges = <0 0xe0000 0x1000>;
- fsl,has-rstcr;
-
- pmc: power@70 {
- compatible = "fsl,mpc8568-pmc",
- "fsl,mpc8548-pmc";
- reg = <0x70 0x20>;
- };
- };
-
- crypto@30000 {
- compatible = "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xfe>;
- fsl,descriptor-types-mask = <0x12b0ebf>;
- sleep = <&pmc 0x01000000>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,mpc8568-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
par_io@e0100 {
- reg = <0xe0100 0x100>;
- device_type = "par_io";
num-ports = <7>;
pio1: ucc_pin@01 {
@@ -448,57 +190,21 @@
};
};
- qe@e0080000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "qe";
- compatible = "fsl,qe";
- ranges = <0x0 0xe0080000 0x40000>;
- reg = <0xe0080000 0x480>;
- sleep = <&pmc 0x00000800>;
- brg-frequency = <0>;
- bus-frequency = <396000000>;
- fsl,qe-num-riscs = <2>;
- fsl,qe-num-snums = <28>;
-
- muram@10000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,qe-muram", "fsl,cpm-muram";
- ranges = <0x0 0x10000 0x10000>;
-
- data-only@0 {
- compatible = "fsl,qe-muram-data",
- "fsl,cpm-muram-data";
- reg = <0x0 0x10000>;
- };
- };
+ qe: qe@e0080000 {
+ ranges = <0x0 0x0 0xe0080000 0x40000>;
+ reg = <0x0 0xe0080000 0x0 0x480>;
spi@4c0 {
- cell-index = <0>;
- compatible = "fsl,spi";
- reg = <0x4c0 0x40>;
- interrupts = <2>;
- interrupt-parent = <&qeic>;
mode = "cpu";
};
spi@500 {
- cell-index = <1>;
- compatible = "fsl,spi";
- reg = <0x500 0x40>;
- interrupts = <1>;
- interrupt-parent = <&qeic>;
mode = "cpu";
};
enet2: ucc@2000 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <1>;
- reg = <0x2000 0x200>;
- interrupts = <32>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk16";
@@ -510,10 +216,6 @@
enet3: ucc@3000 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <2>;
- reg = <0x3000 0x200>;
- interrupts = <33>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk16";
@@ -532,102 +234,57 @@
* gianfar's MDIO bus */
qe_phy0: ethernet-phy@07 {
interrupt-parent = <&mpic>;
- interrupts = <1 1>;
+ interrupts = <1 1 0 0>;
reg = <0x7>;
device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@01 {
interrupt-parent = <&mpic>;
- interrupts = <2 1>;
+ interrupts = <2 1 0 0>;
reg = <0x1>;
device_type = "ethernet-phy";
};
qe_phy2: ethernet-phy@02 {
interrupt-parent = <&mpic>;
- interrupts = <1 1>;
+ interrupts = <1 1 0 0>;
reg = <0x2>;
device_type = "ethernet-phy";
};
qe_phy3: ethernet-phy@03 {
interrupt-parent = <&mpic>;
- interrupts = <2 1>;
+ interrupts = <2 1 0 0>;
reg = <0x3>;
device_type = "ethernet-phy";
};
};
-
- qeic: interrupt-controller@80 {
- interrupt-controller;
- compatible = "fsl,qe-ic";
- #address-cells = <0>;
- #interrupt-cells = <1>;
- reg = <0x80 0x80>;
- big-endian;
- interrupts = <46 2 46 2>; //high:30 low:30
- interrupt-parent = <&mpic>;
- };
-
};
pci0: pci@e0008000 {
+ reg = <0x0 0xe0008000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0x0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xe2000000 0x0 0x800000>;
+ clock-frequency = <66666666>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x12 AD18 */
- 0x9000 0x0 0x0 0x1 &mpic 0x5 0x1
- 0x9000 0x0 0x0 0x2 &mpic 0x6 0x1
- 0x9000 0x0 0x0 0x3 &mpic 0x7 0x1
- 0x9000 0x0 0x0 0x4 &mpic 0x4 0x1
+ 0x9000 0x0 0x0 0x1 &mpic 0x5 0x1 0 0
+ 0x9000 0x0 0x0 0x2 &mpic 0x6 0x1 0 0
+ 0x9000 0x0 0x0 0x3 &mpic 0x7 0x1 0 0
+ 0x9000 0x0 0x0 0x4 &mpic 0x4 0x1 0 0
/* IDSEL 0x13 AD19 */
- 0x9800 0x0 0x0 0x1 &mpic 0x6 0x1
- 0x9800 0x0 0x0 0x2 &mpic 0x7 0x1
- 0x9800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x9800 0x0 0x0 0x4 &mpic 0x5 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
- sleep = <&pmc 0x80000000>;
- clock-frequency = <66666666>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008000 0x1000>;
- compatible = "fsl,mpc8540-pci";
- device_type = "pci";
+ 0x9800 0x0 0x0 0x1 &mpic 0x6 0x1 0 0
+ 0x9800 0x0 0x0 0x2 &mpic 0x7 0x1 0 0
+ 0x9800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x9800 0x0 0x0 0x4 &mpic 0x5 0x1 0 0>;
};
/* PCI Express */
pci1: pcie@e000a000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x0 (PEX) */
- 00000 0x0 0x0 0x1 &mpic 0x0 0x1
- 00000 0x0 0x0 0x2 &mpic 0x1 0x1
- 00000 0x0 0x0 0x3 &mpic 0x2 0x1
- 00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
- sleep = <&pmc 0x20000000>;
- clock-frequency = <33333333>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000 0x0 0xa0000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0x0 0xe2800000 0x0 0x800000>;
+ reg = <0x0 0xe000a000 0x0 0x1000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x10000000
@@ -638,22 +295,11 @@
};
};
- rio0: rapidio@e00c00000 {
- #address-cells = <2>;
- #size-cells = <2>;
- compatible = "fsl,mpc8568-rapidio", "fsl,rapidio-delta";
- reg = <0xe00c0000 0x20000>;
- ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>;
- interrupts = <48 2 /* error */
- 49 2 /* bell_outb */
- 50 2 /* bell_inb */
- 53 2 /* msg1_tx */
- 54 2 /* msg1_rx */
- 55 2 /* msg2_tx */
- 56 2 /* msg2_rx */>;
- interrupt-parent = <&mpic>;
- sleep = <&pmc 0x00080000 /* controller */
- &pmc 0x00040000>; /* message unit */
+ rio: rapidio@e00c00000 {
+ reg = <0x0 0xe00c0000 0x0 0x20000>;
+ port1 {
+ ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
+ };
};
leds {
@@ -672,3 +318,5 @@
};
};
};
+
+/include/ "fsl/mpc8568si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 8b72eaff5b0..7e283c891b7 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -9,66 +9,36 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8569si-pre.dtsi"
/ {
model = "MPC8569EMDS";
compatible = "fsl,MPC8569EMDS";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mpic>;
aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- ethernet0 = &enet0;
- ethernet1 = &enet1;
ethernet2 = &enet2;
ethernet3 = &enet3;
ethernet5 = &enet5;
ethernet7 = &enet7;
- pci1 = &pci1;
- rapidio0 = &rio0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8569@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- sleep = <&pmc 0x00008000 // core
- &pmc 0x00004000>; // timebase
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
+ rapidio0 = &rio;
};
memory {
device_type = "memory";
};
- localbus@e0005000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,mpc8569-elbc", "fsl,elbc", "simple-bus";
- reg = <0xe0005000 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
- sleep = <&pmc 0x08000000>;
-
- ranges = <0x0 0x0 0xfe000000 0x02000000
- 0x1 0x0 0xf8000000 0x00008000
- 0x2 0x0 0xf0000000 0x04000000
- 0x3 0x0 0xfc000000 0x00008000
- 0x4 0x0 0xf8008000 0x00008000
- 0x5 0x0 0xf8010000 0x00008000>;
+ lbc: localbus@e0005000 {
+ reg = <0x0 0xe0005000 0x0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xfe000000 0x02000000
+ 0x1 0x0 0x0 0xf8000000 0x00008000
+ 0x2 0x0 0x0 0xf0000000 0x04000000
+ 0x3 0x0 0x0 0xfc000000 0x00008000
+ 0x4 0x0 0x0 0xf8008000 0x00008000
+ 0x5 0x0 0x0 0xf8010000 0x00008000>;
nor@0,0 {
#address-cells = <1>;
@@ -133,220 +103,25 @@
};
};
- soc@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,mpc8569-immr", "simple-bus";
- ranges = <0x0 0xe0000000 0x100000>;
- bus-frequency = <0>;
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8569-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8569-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
+ soc: soc@e0000000 {
+ ranges = <0x0 0x0 0xe0000000 0x100000>;
i2c-sleep-nexus {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- sleep = <&pmc 0x00000004>;
- ranges;
-
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
-
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
- interrupts = <3 1>;
- interrupt-parent = <&mpic>;
+ interrupts = <3 1 0 0>;
};
};
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
- };
-
- duart-sleep-nexus {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- sleep = <&pmc 0x00000002>;
- ranges;
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8569-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
};
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8569-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8569-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8569-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8569-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8569-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- sdhci@2e000 {
- compatible = "fsl,mpc8569-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x8>;
- interrupt-parent = <&mpic>;
- sleep = <&pmc 0x00200000>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
+ sdhc@2e000 {
status = "disabled";
sdhci,1-bit-only;
};
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- sleep = <&pmc 0x01000000>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,mpc8568-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- ranges = <0 0xe0000 0x1000>;
- fsl,has-rstcr;
-
- pmc: power@70 {
- compatible = "fsl,mpc8569-pmc",
- "fsl,mpc8548-pmc";
- reg = <0x70 0x20>;
- };
- };
-
par_io@e0100 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xe0100 0x100>;
- ranges = <0x0 0xe0100 0x100>;
- device_type = "par_io";
num-ports = <7>;
qe_pio_e: gpio-controller@80 {
@@ -447,47 +222,11 @@
};
};
- qe@e0080000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "qe";
- compatible = "fsl,qe";
- ranges = <0x0 0xe0080000 0x40000>;
- reg = <0xe0080000 0x480>;
- sleep = <&pmc 0x00000800>;
- brg-frequency = <0>;
- bus-frequency = <0>;
- fsl,qe-num-riscs = <4>;
- fsl,qe-num-snums = <46>;
-
- qeic: interrupt-controller@80 {
- interrupt-controller;
- compatible = "fsl,qe-ic";
- #address-cells = <0>;
- #interrupt-cells = <1>;
- reg = <0x80 0x80>;
- interrupts = <46 2 46 2>; //high:30 low:30
- interrupt-parent = <&mpic>;
- };
-
- timer@440 {
- compatible = "fsl,mpc8569-qe-gtm",
- "fsl,qe-gtm", "fsl,gtm";
- reg = <0x440 0x40>;
- interrupts = <12 13 14 15>;
- interrupt-parent = <&qeic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
+ qe: qe@e0080000 {
+ ranges = <0x0 0x0 0xe0080000 0x40000>;
+ reg = <0x0 0xe0080000 0x0 0x480>;
spi@4c0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc8569-qe-spi", "fsl,spi";
- reg = <0x4c0 0x40>;
- cell-index = <0>;
- interrupts = <2>;
- interrupt-parent = <&qeic>;
gpios = <&qe_pio_e 30 0>;
mode = "cpu-qe";
@@ -499,20 +238,10 @@
};
spi@500 {
- cell-index = <1>;
- compatible = "fsl,spi";
- reg = <0x500 0x40>;
- interrupts = <1>;
- interrupt-parent = <&qeic>;
mode = "cpu";
};
usb@6c0 {
- compatible = "fsl,mpc8569-qe-usb",
- "fsl,mpc8323-qe-usb";
- reg = <0x6c0 0x40 0x8b00 0x100>;
- interrupts = <11>;
- interrupt-parent = <&qeic>;
fsl,fullspeed-clock = "clk5";
fsl,lowspeed-clock = "brg10";
gpios = <&qe_pio_f 3 0 /* USBOE */
@@ -527,10 +256,6 @@
enet0: ucc@2000 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <1>;
- reg = <0x2000 0x200>;
- interrupts = <32>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk12";
@@ -548,35 +273,33 @@
qe_phy0: ethernet-phy@07 {
interrupt-parent = <&mpic>;
- interrupts = <1 1>;
+ interrupts = <1 1 0 0>;
reg = <0x7>;
device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@01 {
interrupt-parent = <&mpic>;
- interrupts = <2 1>;
+ interrupts = <2 1 0 0>;
reg = <0x1>;
device_type = "ethernet-phy";
};
qe_phy2: ethernet-phy@02 {
interrupt-parent = <&mpic>;
- interrupts = <3 1>;
+ interrupts = <3 1 0 0>;
reg = <0x2>;
device_type = "ethernet-phy";
};
qe_phy3: ethernet-phy@03 {
interrupt-parent = <&mpic>;
- interrupts = <4 1>;
+ interrupts = <4 1 0 0>;
reg = <0x3>;
device_type = "ethernet-phy";
};
qe_phy5: ethernet-phy@04 {
- interrupt-parent = <&mpic>;
reg = <0x04>;
device_type = "ethernet-phy";
};
qe_phy7: ethernet-phy@06 {
- interrupt-parent = <&mpic>;
reg = <0x6>;
device_type = "ethernet-phy";
};
@@ -610,10 +333,6 @@
enet2: ucc@2200 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <3>;
- reg = <0x2200 0x200>;
- interrupts = <34>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk12";
@@ -637,10 +356,6 @@
enet1: ucc@3000 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <2>;
- reg = <0x3000 0x200>;
- interrupts = <33>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk17";
@@ -664,10 +379,6 @@
enet3: ucc@3200 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <4>;
- reg = <0x3200 0x200>;
- interrupts = <35>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk17";
@@ -691,10 +402,6 @@
enet5: ucc@3400 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <6>;
- reg = <0x3400 0x200>;
- interrupts = <41>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "none";
@@ -706,10 +413,6 @@
enet7: ucc@3600 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <8>;
- reg = <0x3600 0x200>;
- interrupts = <43>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "none";
@@ -717,50 +420,14 @@
phy-handle = <&qe_phy7>;
phy-connection-type = "sgmii";
};
-
- muram@10000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,qe-muram", "fsl,cpm-muram";
- ranges = <0x0 0x10000 0x20000>;
-
- data-only@0 {
- compatible = "fsl,qe-muram-data",
- "fsl,cpm-muram-data";
- reg = <0x0 0x20000>;
- };
- };
-
};
/* PCI Express */
pci1: pcie@e000a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 (PEX) */
- 00000 0x0 0x0 0x1 &mpic 0x0 0x1
- 00000 0x0 0x0 0x2 &mpic 0x1 0x1
- 00000 0x0 0x0 0x3 &mpic 0x2 0x1
- 00000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>;
- sleep = <&pmc 0x20000000>;
- clock-frequency = <33333333>;
+ reg = <0x0 0xe000a000 0x0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x00800000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x10000000
@@ -771,20 +438,15 @@
};
};
- rio0: rapidio@e00c00000 {
- #address-cells = <2>;
- #size-cells = <2>;
- compatible = "fsl,mpc8569-rapidio", "fsl,rapidio-delta";
- reg = <0xe00c0000 0x20000>;
- ranges = <0x0 0x0 0xc0000000 0x0 0x20000000>;
- interrupts = <48 2 /* error */
- 49 2 /* bell_outb */
- 50 2 /* bell_inb */
- 53 2 /* msg1_tx */
- 54 2 /* msg1_rx */
- 55 2 /* msg2_tx */
- 56 2 /* msg2_rx */>;
- interrupt-parent = <&mpic>;
- sleep = <&pmc 0x00080000>;
+ rio: rapidio@e00c00000 {
+ reg = <0x0 0xe00c0000 0x0 0x20000>;
+ port1 {
+ ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
+ };
+ port2 {
+ status = "disabled";
+ };
};
};
+
+/include/ "fsl/mpc8569si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index f6c04d25e91..0c9f2955deb 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -9,67 +9,18 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8572si-pre.dtsi"
+
/ {
model = "fsl,MPC8572DS";
compatible = "fsl,MPC8572DS";
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8572@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,8572@1 {
- device_type = "cpu";
- reg = <0x1>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
memory {
device_type = "memory";
};
- localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+ board_lbc: lbc: localbus@ffe05000 {
reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
0x1 0x0 0x0 0xe0000000 0x08000000
@@ -78,601 +29,17 @@
0x4 0x0 0x0 0xffa40000 0x00040000
0x5 0x0 0x0 0xffa80000 0x00040000
0x6 0x0 0x0 0xffac0000 0x00040000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
-
- ramdisk@0 {
- reg = <0x0 0x03000000>;
- read-only;
- };
-
- diagnostic@3000000 {
- reg = <0x03000000 0x00e00000>;
- read-only;
- };
-
- dink@3e00000 {
- reg = <0x03e00000 0x00200000>;
- read-only;
- };
-
- kernel@4000000 {
- reg = <0x04000000 0x00400000>;
- read-only;
- };
-
- jffs2@4400000 {
- reg = <0x04400000 0x03b00000>;
- };
-
- dtb@7f00000 {
- reg = <0x07f00000 0x00080000>;
- read-only;
- };
-
- u-boot@7f80000 {
- reg = <0x07f80000 0x00080000>;
- read-only;
- };
- };
-
- nand@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x2 0x0 0x40000>;
-
- u-boot@0 {
- reg = <0x0 0x02000000>;
- read-only;
- };
-
- jffs2@2000000 {
- reg = <0x02000000 0x10000000>;
- };
-
- ramdisk@12000000 {
- reg = <0x12000000 0x08000000>;
- read-only;
- };
-
- kernel@1a000000 {
- reg = <0x1a000000 0x04000000>;
- };
-
- dtb@1e000000 {
- reg = <0x1e000000 0x01000000>;
- read-only;
- };
-
- empty@1f000000 {
- reg = <0x1f000000 0x21000000>;
- };
- };
-
- nand@4,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x4 0x0 0x40000>;
- };
-
- nand@5,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x5 0x0 0x40000>;
- };
-
- nand@6,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x6 0x0 0x40000>;
- };
};
- soc8572@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
+ board_soc: soc: soc8572@ffe00000 {
ranges = <0x0 0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8572-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- memory-controller@6000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x6000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8572-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x100000>; // L2, 1M
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- ptp_clock@24E00 {
- compatible = "fsl,etsec-ptp";
- reg = <0x24E00 0xB0>;
- interrupts = <68 2 69 2 70 2 71 2>;
- interrupt-parent = < &mpic >;
- fsl,tclk-period = <5>;
- fsl,tmr-prsc = <200>;
- fsl,tmr-add = <0xAAAAAAAB>;
- fsl,tmr-fiper1 = <0x3B9AC9FB>;
- fsl,tmr-fiper2 = <0x3B9AC9FB>;
- fsl,max-adj = <499999999>;
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x1>;
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x2>;
- };
- phy3: ethernet-phy@3 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x3>;
- };
-
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi2>;
- phy-handle = <&phy2>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet3: ethernet@27000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <3>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x27000 0x1000>;
- ranges = <0x0 0x27000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <37 2 38 2 39 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi3>;
- phy-handle = <&phy3>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi3: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8572-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- msi@41600 {
- compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
- "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x9fe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
};
- pci0: pcie@ffe08000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
+ board_pci0: pci0: pcie@ffe08000 {
reg = <0 0xffe08000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x11 func 0 - PCI slot 1 */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 1 - PCI slot 1 */
- 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 2 - PCI slot 1 */
- 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 3 - PCI slot 1 */
- 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 4 - PCI slot 1 */
- 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 5 - PCI slot 1 */
- 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 6 - PCI slot 1 */
- 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 7 - PCI slot 1 */
- 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x12 func 0 - PCI slot 2 */
- 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 1 - PCI slot 2 */
- 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 2 - PCI slot 2 */
- 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 3 - PCI slot 2 */
- 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 4 - PCI slot 2 */
- 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 5 - PCI slot 2 */
- 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 6 - PCI slot 2 */
- 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 7 - PCI slot 2 */
- 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
- // IDSEL 0x1c USB
- 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
- 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
- 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
- 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
- // IDSEL 0x1d Audio
- 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
- // IDSEL 0x1e Legacy
- 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
- 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
- // IDSEL 0x1f IDE/SATA
- 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
- 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
- >;
-
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -680,99 +47,14 @@
0x1000000 0x0 0x0
0x1000000 0x0 0x0
0x0 0x10000>;
- uli1575@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- isa@1e {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0xf000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0 0x1000000 0x0 0x0
- 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <9 2>;
- interrupt-parent = <&mpic>;
- };
-
- i8042@60 {
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
- interrupts = <1 3 12 3>;
- interrupt-parent =
- <&i8259>;
-
- keyboard@0 {
- reg = <0x0>;
- compatible = "pnpPNP,303";
- };
-
- mouse@1 {
- reg = <0x1>;
- compatible = "pnpPNP,f03";
- };
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
-
- gpio@400 {
- reg = <0x1 0x400 0x80>;
- };
- };
- };
};
-
};
pci1: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -784,31 +66,10 @@
};
pci2: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xc0000000
0x2000000 0x0 0xc0000000
0x0 0x20000000
@@ -819,3 +80,11 @@
};
};
};
+
+/*
+ * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8572si-post.dtsi"
+/include/ "mpc8572ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dtsi b/arch/powerpc/boot/dts/mpc8572ds.dtsi
new file mode 100644
index 00000000000..c3d4fac0532
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds.dtsi
@@ -0,0 +1,397 @@
+/*
+ * MPC8572DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ ramdisk@0 {
+ reg = <0x0 0x03000000>;
+ read-only;
+ };
+
+ diagnostic@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ read-only;
+ };
+
+ dink@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ read-only;
+ };
+
+ kernel@4000000 {
+ reg = <0x04000000 0x00400000>;
+ read-only;
+ };
+
+ jffs2@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ };
+
+ dtb@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ read-only;
+ };
+
+ u-boot@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ u-boot@0 {
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ jffs2@2000000 {
+ reg = <0x02000000 0x10000000>;
+ };
+
+ ramdisk@12000000 {
+ reg = <0x12000000 0x08000000>;
+ read-only;
+ };
+
+ kernel@1a000000 {
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ dtb@1e000000 {
+ reg = <0x1e000000 0x01000000>;
+ read-only;
+ };
+
+ empty@1f000000 {
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ nand@4,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x4 0x0 0x40000>;
+ };
+
+ nand@5,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x5 0x0 0x40000>;
+ };
+
+ nand@6,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x6 0x0 0x40000>;
+ };
+};
+
+&board_soc {
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <10 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <10 1 0 0>;
+ reg = <0x1>;
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <10 1 0 0>;
+ reg = <0x2>;
+ };
+ phy3: ethernet-phy@3 {
+ interrupts = <10 1 0 0>;
+ reg = <0x3>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ ptp_clock@24e00 {
+ fsl,tclk-period = <5>;
+ fsl,tmr-prsc = <200>;
+ fsl,tmr-add = <0xAAAAAAAB>;
+ fsl,tmr-fiper1 = <0x3B9AC9FB>;
+ fsl,tmr-fiper2 = <0x3B9AC9FB>;
+ fsl,max-adj = <499999999>;
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet2: ethernet@26000 {
+ tbi-handle = <&tbi2>;
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+
+ };
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet3: ethernet@27000 {
+ tbi-handle = <&tbi3>;
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@27520 {
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+};
+
+&board_pci0 {
+ pcie@0 {
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x11 func 0 - PCI slot 1 */
+ 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 1 - PCI slot 1 */
+ 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 2 - PCI slot 1 */
+ 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 3 - PCI slot 1 */
+ 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 4 - PCI slot 1 */
+ 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 5 - PCI slot 1 */
+ 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 6 - PCI slot 1 */
+ 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x11 func 7 - PCI slot 1 */
+ 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1 0 0
+ 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x12 func 0 - PCI slot 2 */
+ 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 1 - PCI slot 2 */
+ 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 2 - PCI slot 2 */
+ 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 3 - PCI slot 2 */
+ 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 4 - PCI slot 2 */
+ 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 5 - PCI slot 2 */
+ 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 6 - PCI slot 2 */
+ 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ /* IDSEL 0x12 func 7 - PCI slot 2 */
+ 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1 0 0
+ 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1 0 0
+
+ // IDSEL 0x1c USB
+ 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+ 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+ 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+ 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+ // IDSEL 0x1d Audio
+ 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+ // IDSEL 0x1e Legacy
+ 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+ 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+ // IDSEL 0x1f IDE/SATA
+ 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+ 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+ >;
+
+
+ uli1575@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0xf000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0
+ 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <9 2 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+ interrupts = <1 3 12 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0x0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <0x1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+
+ gpio@400 {
+ reg = <0x1 0x400 0x80>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_36b.dts b/arch/powerpc/boot/dts/mpc8572ds_36b.dts
index f6365db3b97..6c3d0b305e1 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_36b.dts
@@ -1,5 +1,5 @@
/*
- * MPC8572 DS Device Tree Source
+ * MPC8572DS Device Tree Source (36-bit address map)
*
* Copyright 2007-2009 Freescale Semiconductor Inc.
*
@@ -9,67 +9,18 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/mpc8572si-pre.dtsi"
+
/ {
model = "fsl,MPC8572DS";
compatible = "fsl,MPC8572DS";
- #address-cells = <2>;
- #size-cells = <2>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8572@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,8572@1 {
- device_type = "cpu";
- reg = <0x1>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
memory {
device_type = "memory";
};
- localbus@fffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+ board_lbc: lbc: localbus@fffe05000 {
reg = <0xf 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
0x1 0x0 0xf 0xe0000000 0x08000000
@@ -78,588 +29,17 @@
0x4 0x0 0xf 0xffa40000 0x00040000
0x5 0x0 0xf 0xffa80000 0x00040000
0x6 0x0 0xf 0xffac0000 0x00040000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
-
- ramdisk@0 {
- reg = <0x0 0x03000000>;
- read-only;
- };
-
- diagnostic@3000000 {
- reg = <0x03000000 0x00e00000>;
- read-only;
- };
-
- dink@3e00000 {
- reg = <0x03e00000 0x00200000>;
- read-only;
- };
-
- kernel@4000000 {
- reg = <0x04000000 0x00400000>;
- read-only;
- };
-
- jffs2@4400000 {
- reg = <0x04400000 0x03b00000>;
- };
-
- dtb@7f00000 {
- reg = <0x07f00000 0x00080000>;
- read-only;
- };
-
- u-boot@7f80000 {
- reg = <0x07f80000 0x00080000>;
- read-only;
- };
- };
-
- nand@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x2 0x0 0x40000>;
-
- u-boot@0 {
- reg = <0x0 0x02000000>;
- read-only;
- };
-
- jffs2@2000000 {
- reg = <0x02000000 0x10000000>;
- };
-
- ramdisk@12000000 {
- reg = <0x12000000 0x08000000>;
- read-only;
- };
-
- kernel@1a000000 {
- reg = <0x1a000000 0x04000000>;
- };
-
- dtb@1e000000 {
- reg = <0x1e000000 0x01000000>;
- read-only;
- };
-
- empty@1f000000 {
- reg = <0x1f000000 0x21000000>;
- };
- };
-
- nand@4,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x4 0x0 0x40000>;
- };
-
- nand@5,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x5 0x0 0x40000>;
- };
-
- nand@6,0 {
- compatible = "fsl,mpc8572-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x6 0x0 0x40000>;
- };
};
- soc8572@fffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
+ board_soc: soc: soc8572@fffe00000 {
ranges = <0x0 0xf 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8572-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- memory-controller@6000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x6000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8572-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x100000>; // L2, 1M
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x1>;
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x2>;
- };
- phy3: ethernet-phy@3 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x3>;
- };
-
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi2>;
- phy-handle = <&phy2>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet3: ethernet@27000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <3>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x27000 0x1000>;
- ranges = <0x0 0x27000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <37 2 38 2 39 2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi3>;
- phy-handle = <&phy3>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi3: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8572-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- msi@41600 {
- compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
- "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x9fe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
};
- pci0: pcie@fffe08000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
+ board_pci0: pci0: pcie@fffe08000 {
reg = <0xf 0xffe08000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x11 func 0 - PCI slot 1 */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 1 - PCI slot 1 */
- 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 2 - PCI slot 1 */
- 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 3 - PCI slot 1 */
- 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 4 - PCI slot 1 */
- 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 5 - PCI slot 1 */
- 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 6 - PCI slot 1 */
- 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 7 - PCI slot 1 */
- 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x12 func 0 - PCI slot 2 */
- 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 1 - PCI slot 2 */
- 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 2 - PCI slot 2 */
- 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 3 - PCI slot 2 */
- 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 4 - PCI slot 2 */
- 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 5 - PCI slot 2 */
- 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 6 - PCI slot 2 */
- 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 7 - PCI slot 2 */
- 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
- // IDSEL 0x1c USB
- 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
- 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
- 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
- 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
- // IDSEL 0x1d Audio
- 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
- // IDSEL 0x1e Legacy
- 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
- 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
- // IDSEL 0x1f IDE/SATA
- 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
- 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
- >;
-
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -667,99 +47,14 @@
0x1000000 0x0 0x0
0x1000000 0x0 0x0
0x0 0x10000>;
- uli1575@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0xe0000000
- 0x2000000 0x0 0xe0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- isa@1e {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0xf000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0 0x1000000 0x0 0x0
- 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <9 2>;
- interrupt-parent = <&mpic>;
- };
-
- i8042@60 {
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
- interrupts = <1 3 12 3>;
- interrupt-parent =
- <&i8259>;
-
- keyboard@0 {
- reg = <0x0>;
- compatible = "pnpPNP,303";
- };
-
- mouse@1 {
- reg = <0x1>;
- compatible = "pnpPNP,f03";
- };
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
-
- gpio@400 {
- reg = <0x1 0x400 0x80>;
- };
- };
- };
};
-
};
pci1: pcie@fffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -771,31 +66,10 @@
};
pci2: pcie@fffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x00010000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -806,3 +80,11 @@
};
};
};
+
+/*
+ * mpc8572ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/mpc8572si-post.dtsi"
+/include/ "mpc8572ds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
index 3375c2ab0c3..d34d1271212 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -14,494 +14,69 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "mpc8572ds.dts"
+
/ {
model = "fsl,MPC8572DS";
compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- pci0 = &pci0;
- pci1 = &pci1;
- };
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
PowerPC,8572@0 {
- device_type = "cpu";
- reg = <0x0>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
};
-
+ PowerPC,8572@1 {
+ status = "disabled";
+ };
};
- memory {
- device_type = "memory";
- reg = <0x0 0x0>; // Filled by U-Boot
+ localbus@ffe05000 {
+ status = "disabled";
};
soc8572@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
+ serial@4600 {
+ status = "disabled";
};
-
- ecm@1000 {
- compatible = "fsl,mpc8572-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
+ dma@c300 {
+ status = "disabled";
};
-
- memory-controller@2000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
+ gpio-controller@f000 {
};
-
- memory-controller@6000 {
- compatible = "fsl,mpc8572-memory-controller";
- reg = <0x6000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8572-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
+ l2-cache-controller@20000 {
cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
};
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
+ ethernet@26000 {
+ status = "disabled";
};
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
+ mdio@26520 {
+ status = "disabled";
};
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
+ ethernet@27000 {
+ status = "disabled";
};
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <10 1>;
- reg = <0x1>;
- };
- };
+ mdio@27520 {
+ status = "disabled";
};
-
- enet1: ethernet@25000 {
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
+ pic@40000 {
+ protected-sources = <
+ 31 32 33 37 38 39 /* enet2 enet3 */
+ 76 77 78 79 26 42 /* dma2 pci2 serial*/
+ 0xe4 0xe5 0xe6 0xe7 /* msi */
+ >;
};
msi@41600 {
- compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
msi-available-ranges = <0 0x80>;
interrupts = <
0xe0 0
0xe1 0
0xe2 0
0xe3 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8572-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
- "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x9fe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- protected-sources = <
- 31 32 33 37 38 39 /* enet2 enet3 */
- 76 77 78 79 26 42 /* dma2 pci2 serial*/
- 0xe4 0xe5 0xe6 0xe7 /* msi */
- >;
};
- };
-
- pci0: pcie@ffe08000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xffe08000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x11 func 0 - PCI slot 1 */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 1 - PCI slot 1 */
- 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 2 - PCI slot 1 */
- 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 3 - PCI slot 1 */
- 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 4 - PCI slot 1 */
- 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 5 - PCI slot 1 */
- 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 6 - PCI slot 1 */
- 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 func 7 - PCI slot 1 */
- 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x12 func 0 - PCI slot 2 */
- 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 1 - PCI slot 2 */
- 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 2 - PCI slot 2 */
- 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 3 - PCI slot 2 */
- 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 4 - PCI slot 2 */
- 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 5 - PCI slot 2 */
- 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 6 - PCI slot 2 */
- 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
-
- /* IDSEL 0x12 func 7 - PCI slot 2 */
- 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
- 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
- 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
- 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
-
- // IDSEL 0x1c USB
- 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
- 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
- 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
- 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
-
- // IDSEL 0x1d Audio
- 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
- // IDSEL 0x1e Legacy
- 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
- 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
- // IDSEL 0x1f IDE/SATA
- 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
- 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
-
- >;
-
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- uli1575@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- isa@1e {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0xf000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0 0x1000000 0x0 0x0
- 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <9 2>;
- interrupt-parent = <&mpic>;
- };
-
- i8042@60 {
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
- interrupts = <1 3 12 3>;
- interrupt-parent =
- <&i8259>;
-
- keyboard@0 {
- reg = <0x0>;
- compatible = "pnpPNP,303";
- };
-
- mouse@1 {
- reg = <0x1>;
- compatible = "pnpPNP,f03";
- };
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
-
- gpio@400 {
- reg = <0x1 0x400 0x80>;
- };
- };
- };
+ timer@42100 {
+ status = "disabled";
};
-
};
-
- pci1: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xffe09000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- };
+ pcie@ffe0a000 {
+ status = "disabled";
};
};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
index e7b477f6a3f..d6a8fafc0d0 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -15,169 +15,74 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "mpc8572ds.dts"
+
/ {
model = "fsl,MPC8572DS";
compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- pci2 = &pci2;
- };
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
+ PowerPC,8572@0 {
+ status = "disabled";
+ };
PowerPC,8572@1 {
- device_type = "cpu";
- reg = <0x1>;
- d-cache-line-size = <32>; // 32 bytes
- i-cache-line-size = <32>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>;
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
};
};
- memory {
- device_type = "memory";
- reg = <0x0 0x0>; // Filled by U-Boot
+ localbus@ffe05000 {
+ status = "disabled";
};
soc8572@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8572-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
+ ecm-law@0 {
+ status = "disabled";
};
-
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8572-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
+ ecm@1000 {
+ status = "disabled";
+ };
+ memory-controller@2000 {
+ status = "disabled";
+ };
+ memory-controller@6000 {
+ status = "disabled";
+ };
+ i2c@3000 {
+ status = "disabled";
+ };
+ i2c@3100 {
+ status = "disabled";
+ };
+ serial@4500 {
+ status = "disabled";
+ };
+ gpio-controller@f000 {
+ status = "disabled";
+ };
+ l2-cache-controller@20000 {
+ cache-size = <0x80000>; // L2, 512K
+ };
+ dma@21300 {
+ status = "disabled";
+ };
+ ethernet@24000 {
+ status = "disabled";
};
-
mdio@24520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x24520 0x20>;
-
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- reg = <0x2>;
- };
- phy3: ethernet-phy@3 {
- interrupt-parent = <&mpic>;
- reg = <0x3>;
- };
+ status = "disabled";
};
-
- enet2: ethernet@26000 {
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy2>;
- phy-connection-type = "rgmii-id";
+ ptp_clock@24e00 {
+ status = "disabled";
};
-
- enet3: ethernet@27000 {
- cell-index = <3>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x27000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <37 2 38 2 39 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy3>;
- phy-connection-type = "rgmii-id";
+ ethernet@25000 {
+ status = "disabled";
};
-
- msi@41600 {
- compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0x80 0x80>;
- interrupts = <
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
+ mdio@25520 {
+ status = "disabled";
};
-
- serial0: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
+ crypto@30000 {
+ status = "disabled";
};
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
+ pic@40000 {
protected-sources = <
18 16 10 42 45 58 /* MEM L2 mdio serial crypto */
29 30 34 35 36 40 /* enet0 enet1 */
@@ -189,41 +94,25 @@
0xe0 0xe1 0xe2 0xe3 /* msi */
>;
};
- };
-
- pci2: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xffe0a000 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
- 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
+ timer@41100 {
+ status = "disabled";
};
+ msi@41600 {
+ msi-available-ranges = <0x80 0x80>;
+ interrupts = <
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ };
+ global-utilities@e0000 {
+ status = "disabled";
+ };
+ };
+ pcie@ffe08000 {
+ status = "disabled";
+ };
+ pcie@ffe09000 {
+ status = "disabled";
};
};
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 83c3218cb4d..6a109a0ceac 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -175,7 +175,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -186,7 +186,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 848320e4d3c..1e8666ccbed 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -26,13 +26,6 @@
serial1 = &serial1;
pci0 = &pci0;
pci1 = &pci1;
-/*
- * Only one of Rapid IO or PCI can be present due to HW limitations and
- * due to the fact that the 2 now share address space in the new memory
- * map. The most likely case is that we have PCI, so comment out the
- * rapidio node. Leave it here for reference.
- */
- /* rapidio0 = &rapidio0; */
};
cpus {
@@ -335,7 +328,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -345,7 +338,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <28 2>;
@@ -361,6 +354,41 @@
device_type = "open-pic";
};
+ rmu: rmu@d3000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,srio-rmu";
+ reg = <0xd3000 0x500>;
+ ranges = <0x0 0xd3000 0x500>;
+
+ message-unit@0 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x0 0x100>;
+ interrupts = <
+ 53 2 /* msg1_tx_irq */
+ 54 2>;/* msg1_rx_irq */
+ };
+ message-unit@100 {
+ compatible = "fsl,srio-msg-unit";
+ reg = <0x100 0x100>;
+ interrupts = <
+ 55 2 /* msg2_tx_irq */
+ 56 2>;/* msg2_rx_irq */
+ };
+ doorbell-unit@400 {
+ compatible = "fsl,srio-dbell-unit";
+ reg = <0x400 0x80>;
+ interrupts = <
+ 49 2 /* bell_outb_irq */
+ 50 2>;/* bell_inb_irq */
+ };
+ port-write-unit@4e0 {
+ compatible = "fsl,srio-port-write-unit";
+ reg = <0x4e0 0x20>;
+ interrupts = <48 2>;
+ };
+ };
+
global-utilities@e0000 {
compatible = "fsl,mpc8641-guts";
reg = <0xe0000 0x1000>;
@@ -612,16 +640,27 @@
};
};
/*
- rapidio0: rapidio@ffec0000 {
+ * Only one of Rapid IO or PCI can be present due to HW limitations and
+ * due to the fact that the 2 now share address space in the new memory
+ * map. The most likely case is that we have PCI, so comment out the
+ * rapidio node. Leave it here for reference.
+
+ rapidio@ffec0000 {
+ reg = <0xffec0000 0x11000>;
+ compatible = "fsl,srio";
+ interrupt-parent = <&mpic>;
+ interrupts = <48 2>;
#address-cells = <2>;
#size-cells = <2>;
- compatible = "fsl,rapidio-delta";
- reg = <0xffec0000 0x20000>;
- ranges = <0 0 0x80000000 0 0x20000000>;
- interrupt-parent = <&mpic>;
- // err_irq bell_outb_irq bell_inb_irq
- // msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq
- interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ ranges = <0 0 0x80000000 0 0x20000000>;
+ };
};
*/
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
index 8be8e701e1d..fd4cd4da60b 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
@@ -328,7 +328,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -338,7 +338,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <28 2>;
diff --git a/arch/powerpc/boot/dts/obs600.dts b/arch/powerpc/boot/dts/obs600.dts
new file mode 100644
index 00000000000..18e7d79ee4c
--- /dev/null
+++ b/arch/powerpc/boot/dts/obs600.dts
@@ -0,0 +1,314 @@
+/*
+ * Device Tree Source for PlatHome OpenBlockS 600 (405EX)
+ *
+ * Copyright 2011 Ben Herrenschmidt, IBM Corp.
+ *
+ * Based on Kilauea by:
+ *
+ * Copyright 2007-2009 DENX Software Engineering, Stefan Roese <sr@denx.de>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ model = "PlatHome,OpenBlockS 600";
+ compatible = "plathome,obs600";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ ethernet0 = &EMAC0;
+ ethernet1 = &EMAC1;
+ serial0 = &UART0;
+ serial1 = &UART1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,405EX";
+ reg = <0x00000000>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+ timebase-frequency = <0>; /* Filled in by U-Boot */
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <16384>; /* 16 kB */
+ d-cache-size = <16384>; /* 16 kB */
+ dcr-controller;
+ dcr-access-method = "native";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000>; /* Filled in by U-Boot */
+ };
+
+ UIC0: interrupt-controller {
+ compatible = "ibm,uic-405ex", "ibm,uic";
+ interrupt-controller;
+ cell-index = <0>;
+ dcr-reg = <0x0c0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UIC1: interrupt-controller1 {
+ compatible = "ibm,uic-405ex","ibm,uic";
+ interrupt-controller;
+ cell-index = <1>;
+ dcr-reg = <0x0d0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ UIC2: interrupt-controller2 {
+ compatible = "ibm,uic-405ex","ibm,uic";
+ interrupt-controller;
+ cell-index = <2>;
+ dcr-reg = <0x0e0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x1c 0x4 0x1d 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ CPM0: cpm {
+ compatible = "ibm,cpm";
+ dcr-access-method = "native";
+ dcr-reg = <0x0b0 0x003>;
+ unused-units = <0x00000000>;
+ idle-doze = <0x02000000>;
+ standby = <0xe3e74800>;
+ };
+
+ plb {
+ compatible = "ibm,plb-405ex", "ibm,plb4";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+
+ SDRAM0: memory-controller {
+ compatible = "ibm,sdram-405ex", "ibm,sdram-4xx-ddr2";
+ dcr-reg = <0x010 0x002>;
+ interrupt-parent = <&UIC2>;
+ interrupts = <0x5 0x4 /* ECC DED Error */
+ 0x6 0x4>; /* ECC SEC Error */
+ };
+
+ CRYPTO: crypto@ef700000 {
+ compatible = "amcc,ppc405ex-crypto", "amcc,ppc4xx-crypto";
+ reg = <0xef700000 0x80400>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x17 0x2>;
+ };
+
+ MAL0: mcmal {
+ compatible = "ibm,mcmal-405ex", "ibm,mcmal2";
+ dcr-reg = <0x180 0x062>;
+ num-tx-chans = <2>;
+ num-rx-chans = <2>;
+ interrupt-parent = <&MAL0>;
+ interrupts = <0x0 0x1 0x2 0x3 0x4>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </*TXEOB*/ 0x0 &UIC0 0xa 0x4
+ /*RXEOB*/ 0x1 &UIC0 0xb 0x4
+ /*SERR*/ 0x2 &UIC1 0x0 0x4
+ /*TXDE*/ 0x3 &UIC1 0x1 0x4
+ /*RXDE*/ 0x4 &UIC1 0x2 0x4>;
+ interrupt-map-mask = <0xffffffff>;
+ };
+
+ POB0: opb {
+ compatible = "ibm,opb-405ex", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x80000000 0x80000000 0x10000000
+ 0xef600000 0xef600000 0x00a00000
+ 0xf0000000 0xf0000000 0x10000000>;
+ dcr-reg = <0x0a0 0x005>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+
+ EBC0: ebc {
+ compatible = "ibm,ebc-405ex", "ibm,ebc";
+ dcr-reg = <0x012 0x002>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+ /* ranges property is supplied by U-Boot */
+ interrupts = <0x5 0x1>;
+ interrupt-parent = <&UIC1>;
+
+ nor_flash@0,0 {
+ compatible = "amd,s29gl512n", "cfi-flash";
+ bank-width = <2>;
+ reg = <0x00000000 0x00000000 0x08000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "kernel + initrd";
+ reg = <0x00000000 0x03de0000>;
+ };
+ partition@3de0000 {
+ label = "user config area";
+ reg = <0x03de0000 0x00080000>;
+ };
+ partition@3e60000 {
+ label = "user program area";
+ reg = <0x03e60000 0x04000000>;
+ };
+ partition@7e60000 {
+ label = "flat device tree";
+ reg = <0x07e60000 0x00080000>;
+ };
+ partition@7ee0000 {
+ label = "test program";
+ reg = <0x07ee0000 0x00080000>;
+ };
+ partition@7f60000 {
+ label = "u-boot env";
+ reg = <0x07f60000 0x00040000>;
+ };
+ partition@7fa0000 {
+ label = "u-boot";
+ reg = <0x07fa0000 0x00060000>;
+ };
+ };
+ };
+
+ UART0: serial@ef600200 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0xef600200 0x00000008>;
+ virtual-reg = <0xef600200>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+ current-speed = <0>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x1a 0x4>;
+ };
+
+ UART1: serial@ef600300 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0xef600300 0x00000008>;
+ virtual-reg = <0xef600300>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+ current-speed = <0>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x1 0x4>;
+ };
+
+ IIC0: i2c@ef600400 {
+ compatible = "ibm,iic-405ex", "ibm,iic";
+ reg = <0xef600400 0x00000014>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x2 0x4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@68 {
+ compatible = "dallas,ds1340";
+ reg = <0x68>;
+ };
+ };
+
+ IIC1: i2c@ef600500 {
+ compatible = "ibm,iic-405ex", "ibm,iic";
+ reg = <0xef600500 0x00000014>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x7 0x4>;
+ };
+
+ RGMII0: emac-rgmii@ef600b00 {
+ compatible = "ibm,rgmii-405ex", "ibm,rgmii";
+ reg = <0xef600b00 0x00000104>;
+ has-mdio;
+ };
+
+ EMAC0: ethernet@ef600900 {
+ linux,network-index = <0x0>;
+ device_type = "network";
+ compatible = "ibm,emac-405ex", "ibm,emac4sync";
+ interrupt-parent = <&EMAC0>;
+ interrupts = <0x0 0x1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </*Status*/ 0x0 &UIC0 0x18 0x4
+ /*Wake*/ 0x1 &UIC1 0x1d 0x4>;
+ reg = <0xef600900 0x000000c4>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
+ phy-mode = "rgmii";
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <0>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+ };
+
+ EMAC1: ethernet@ef600a00 {
+ linux,network-index = <0x1>;
+ device_type = "network";
+ compatible = "ibm,emac-405ex", "ibm,emac4sync";
+ interrupt-parent = <&EMAC1>;
+ interrupts = <0x0 0x1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = </*Status*/ 0x0 &UIC0 0x19 0x4
+ /*Wake*/ 0x1 &UIC1 0x1f 0x4>;
+ reg = <0xef600a00 0x000000c4>;
+ local-mac-address = [000000000000]; /* Filled in by U-Boot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <1>;
+ mal-rx-channel = <1>;
+ cell-index = <1>;
+ max-frame-size = <9000>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
+ phy-mode = "rgmii";
+ phy-map = <0x00000000>;
+ rgmii-device = <&RGMII0>;
+ rgmii-channel = <1>;
+ has-inverted-stacr-oc;
+ has-new-stacr-staopc;
+ };
+
+ GPIO: gpio@ef600800 {
+ device_type = "gpio";
+ compatible = "ibm,gpio-405ex", "ibm,ppc4xx-gpio";
+ reg = <0xef600800 0x50>;
+ };
+ };
+ };
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@ef600200";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
index d6c669c888e..b868d22984e 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ b/arch/powerpc/boot/dts/p1010rdb.dts
@@ -9,230 +9,33 @@
* option) any later version.
*/
-/include/ "p1010si.dtsi"
+/include/ "fsl/p1010si-pre.dtsi"
/ {
model = "fsl,P1010RDB";
compatible = "fsl,P1010RDB";
- aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- pci0 = &pci0;
- pci1 = &pci1;
- can0 = &can0;
- can1 = &can1;
- };
-
memory {
device_type = "memory";
};
- ifc@ffe1e000 {
+ board_ifc: ifc: ifc@ffe1e000 {
/* NOR, NAND Flashes and CPLD on board */
ranges = <0x0 0x0 0x0 0xee000000 0x02000000
0x1 0x0 0x0 0xff800000 0x00010000
0x3 0x0 0x0 0xffb00000 0x00000020>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x2000000>;
- bank-width = <2>;
- device-width = <1>;
-
- partition@40000 {
- /* 256KB for DTB Image */
- reg = <0x00040000 0x00040000>;
- label = "NOR DTB Image";
- };
-
- partition@80000 {
- /* 7 MB for Linux Kernel Image */
- reg = <0x00080000 0x00700000>;
- label = "NOR Linux Kernel Image";
- };
-
- partition@800000 {
- /* 20MB for JFFS2 based Root file System */
- reg = <0x00800000 0x01400000>;
- label = "NOR JFFS2 Root File System";
- };
-
- partition@1f00000 {
- /* This location must not be altered */
- /* 512KB for u-boot Bootloader Image */
- /* 512KB for u-boot Environment Variables */
- reg = <0x01f00000 0x00100000>;
- label = "NOR U-Boot Image";
- read-only;
- };
- };
-
- nand@1,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,ifc-nand";
- reg = <0x1 0x0 0x10000>;
-
- partition@0 {
- /* This location must not be altered */
- /* 1MB for u-boot Bootloader Image */
- reg = <0x0 0x00100000>;
- label = "NAND U-Boot Image";
- read-only;
- };
-
- partition@100000 {
- /* 1MB for DTB Image */
- reg = <0x00100000 0x00100000>;
- label = "NAND DTB Image";
- };
-
- partition@200000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00200000 0x00400000>;
- label = "NAND Linux Kernel Image";
- };
-
- partition@600000 {
- /* 4MB for Compressed Root file System Image */
- reg = <0x00600000 0x00400000>;
- label = "NAND Compressed RFS Image";
- };
-
- partition@a00000 {
- /* 15MB for JFFS2 based Root file System */
- reg = <0x00a00000 0x00f00000>;
- label = "NAND JFFS2 Root File System";
- };
-
- partition@1900000 {
- /* 7MB for User Area */
- reg = <0x01900000 0x00700000>;
- label = "NAND User area";
- };
- };
-
- cpld@3,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p1010rdb-cpld";
- reg = <0x3 0x0 0x0000020>;
- bank-width = <1>;
- device-width = <1>;
- };
+ reg = <0x0 0xffe1e000 0 0x2000>;
};
- soc@ffe00000 {
- spi@7000 {
- flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,s25sl12801";
- reg = <0>;
- spi-max-frequency = <50000000>;
-
- partition@0 {
- /* 1MB for u-boot Bootloader Image */
- /* 1MB for Environment */
- reg = <0x0 0x00100000>;
- label = "SPI Flash U-Boot Image";
- read-only;
- };
-
- partition@100000 {
- /* 512KB for DTB Image */
- reg = <0x00100000 0x00080000>;
- label = "SPI Flash DTB Image";
- };
-
- partition@180000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00180000 0x00400000>;
- label = "SPI Flash Linux Kernel Image";
- };
-
- partition@580000 {
- /* 4MB for Compressed RFS Image */
- reg = <0x00580000 0x00400000>;
- label = "SPI Flash Compressed RFSImage";
- };
-
- partition@980000 {
- /* 6.5MB for JFFS2 based RFS */
- reg = <0x00980000 0x00680000>;
- label = "SPI Flash JFFS2 RFS";
- };
- };
- };
-
- usb@22000 {
- phy_type = "utmi";
- };
-
- mdio@24000 {
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x1>;
- };
-
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x0>;
- };
-
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x2>;
- };
- };
-
- enet0: ethernet@b0000 {
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
- };
-
- enet1: ethernet@b1000 {
- phy-handle = <&phy1>;
- tbi-handle = <&tbi0>;
- phy-connection-type = "sgmii";
- };
-
- enet2: ethernet@b2000 {
- phy-handle = <&phy2>;
- tbi-handle = <&tbi1>;
- phy-connection-type = "sgmii";
- };
+ board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
};
pci0: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
-
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -244,24 +47,10 @@
};
pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -272,3 +61,6 @@
};
};
};
+
+/include/ "p1010rdb.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
new file mode 100644
index 00000000000..d4c4a773028
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -0,0 +1,234 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_ifc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x2000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 7 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00700000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@800000 {
+ /* 20MB for JFFS2 based Root file System */
+ reg = <0x00800000 0x01400000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@1f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x01f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x1 0x0 0x10000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00f00000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1900000 {
+ /* 7MB for User Area */
+ reg = <0x01900000 0x00700000>;
+ label = "NAND User area";
+ };
+ };
+
+ cpld@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1010rdb-cpld";
+ reg = <0x3 0x0 0x0000020>;
+ bank-width = <1>;
+ device-width = <1>;
+ };
+};
+
+&board_soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+
+ partition@0 {
+ /* 1MB for u-boot Bootloader Image */
+ /* 1MB for Environment */
+ reg = <0x0 0x00100000>;
+ label = "SPI Flash U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 512KB for DTB Image */
+ reg = <0x00100000 0x00080000>;
+ label = "SPI Flash DTB Image";
+ };
+
+ partition@180000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00180000 0x00400000>;
+ label = "SPI Flash Linux Kernel Image";
+ };
+
+ partition@580000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00580000 0x00400000>;
+ label = "SPI Flash Compressed RFSImage";
+ };
+
+ partition@980000 {
+ /* 6.5MB for JFFS2 based RFS */
+ reg = <0x00980000 0x00680000>;
+ label = "SPI Flash JFFS2 RFS";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "utmi";
+ dr_mode = "host";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x1>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x0>;
+ };
+
+ phy2: ethernet-phy@2 {
+ interrupts = <2 1 0 0>;
+ reg = <0x2>;
+ };
+
+ tbi-phy@3 {
+ device-type = "tbi-phy";
+ reg = <0x3>;
+ };
+ };
+
+ mdio@25000 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy1>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy2>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dts b/arch/powerpc/boot/dts/p1010rdb_36b.dts
new file mode 100644
index 00000000000..64776f4a465
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_36b.dts
@@ -0,0 +1,89 @@
+/*
+ * P1010 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB";
+ compatible = "fsl,P1010RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ board_ifc: ifc: ifc@fffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0xf 0xee000000 0x02000000
+ 0x1 0x0 0xf 0xff800000 0x00010000
+ 0x3 0x0 0xf 0xffb00000 0x00000020>;
+ reg = <0xf 0xffe1e000 0 0x2000>;
+ };
+
+ board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1010rdb.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010si.dtsi b/arch/powerpc/boot/dts/p1010si.dtsi
deleted file mode 100644
index cabe0a453ae..00000000000
--- a/arch/powerpc/boot/dts/p1010si.dtsi
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * P1010si Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
- compatible = "fsl,P1010";
- #address-cells = <2>;
- #size-cells = <2>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P1010@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
- };
-
- ifc@ffe1e000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
- reg = <0x0 0xffe1e000 0 0x2000>;
- interrupts = <16 2 19 2>;
- interrupt-parent = <&mpic>;
- };
-
- soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p1010-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1010-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1010-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- spi@7000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc8536-espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- fsl,espi-num-chipselects = <1>;
- };
-
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- sata@18000 {
- compatible = "fsl,pq-sata-v2";
- reg = <0x18000 0x1000>;
- cell-index = <1>;
- interrupts = <74 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- sata@19000 {
- compatible = "fsl,pq-sata-v2";
- reg = <0x19000 0x1000>;
- cell-index = <2>;
- interrupts = <41 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- can0: can@1c000 {
- compatible = "fsl,p1010-flexcan";
- reg = <0x1c000 0x1000>;
- interrupts = <48 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- can1: can@1d000 {
- compatible = "fsl,p1010-flexcan";
- reg = <0x1d000 0x1000>;
- interrupts = <61 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1010-l2-cache-controller",
- "fsl,p1014-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2,256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p1010-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,p1010-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- usb@22000 {
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- dr_mode = "host";
- };
-
- mdio@24000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x24000 0x1000 0xb0030 0x4>;
- };
-
- mdio@25000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-tbi";
- reg = <0x25000 0x1000 0xb1030 0x4>;
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- mdio@26000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-tbi";
- reg = <0x26000 0x1000 0xb1030 0x4>;
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- sdhci@2e000 {
- compatible = "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x8>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- fsl,sdhci-auto-cmd12;
- };
-
- enet0: ethernet@b0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb0000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- interrupts = <29 2 30 2 34 2>;
- };
-
- };
-
- enet1: ethernet@b1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb1000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- interrupts = <35 2 36 2 40 2>;
- };
-
- };
-
- enet2: ethernet@b2000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb2000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- interrupts = <31 2 32 2 33 2>;
- };
-
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1010-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1010-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
- };
-
- pci0: pcie@ffe09000 {
- compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- pci1: pcie@ffe0a000 {
- compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index d6a8ae45813..518bf99b1f5 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -9,267 +9,33 @@
* option) any later version.
*/
-/include/ "p1020si.dtsi"
-
+/include/ "fsl/p1020si-pre.dtsi"
/ {
model = "fsl,P1020RDB";
compatible = "fsl,P1020RDB";
- aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- pci0 = &pci0;
- pci1 = &pci1;
- };
-
memory {
device_type = "memory";
};
- localbus@ffe05000 {
+ board_lbc: lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
/* NOR, NAND Flashes and Vitesse 5 port L2 switch */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
0x1 0x0 0x0 0xffa00000 0x00040000
0x2 0x0 0x0 0xffb00000 0x00020000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x1000000>;
- bank-width = <2>;
- device-width = <1>;
-
- partition@0 {
- /* This location must not be altered */
- /* 256KB for Vitesse 7385 Switch firmware */
- reg = <0x0 0x00040000>;
- label = "NOR (RO) Vitesse-7385 Firmware";
- read-only;
- };
-
- partition@40000 {
- /* 256KB for DTB Image */
- reg = <0x00040000 0x00040000>;
- label = "NOR (RO) DTB Image";
- read-only;
- };
-
- partition@80000 {
- /* 3.5 MB for Linux Kernel Image */
- reg = <0x00080000 0x00380000>;
- label = "NOR (RO) Linux Kernel Image";
- read-only;
- };
-
- partition@400000 {
- /* 11MB for JFFS2 based Root file System */
- reg = <0x00400000 0x00b00000>;
- label = "NOR (RW) JFFS2 Root File System";
- };
-
- partition@f00000 {
- /* This location must not be altered */
- /* 512KB for u-boot Bootloader Image */
- /* 512KB for u-boot Environment Variables */
- reg = <0x00f00000 0x00100000>;
- label = "NOR (RO) U-Boot Image";
- read-only;
- };
- };
-
- nand@1,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p1020-fcm-nand",
- "fsl,elbc-fcm-nand";
- reg = <0x1 0x0 0x40000>;
-
- partition@0 {
- /* This location must not be altered */
- /* 1MB for u-boot Bootloader Image */
- reg = <0x0 0x00100000>;
- label = "NAND (RO) U-Boot Image";
- read-only;
- };
-
- partition@100000 {
- /* 1MB for DTB Image */
- reg = <0x00100000 0x00100000>;
- label = "NAND (RO) DTB Image";
- read-only;
- };
-
- partition@200000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00200000 0x00400000>;
- label = "NAND (RO) Linux Kernel Image";
- read-only;
- };
-
- partition@600000 {
- /* 4MB for Compressed Root file System Image */
- reg = <0x00600000 0x00400000>;
- label = "NAND (RO) Compressed RFS Image";
- read-only;
- };
-
- partition@a00000 {
- /* 7MB for JFFS2 based Root file System */
- reg = <0x00a00000 0x00700000>;
- label = "NAND (RW) JFFS2 Root File System";
- };
-
- partition@1100000 {
- /* 15MB for JFFS2 based Root file System */
- reg = <0x01100000 0x00f00000>;
- label = "NAND (RW) Writable User area";
- };
- };
-
- L2switch@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "vitesse-7385";
- reg = <0x2 0x0 0x20000>;
- };
-
};
- soc@ffe00000 {
- i2c@3000 {
- rtc@68 {
- compatible = "dallas,ds1339";
- reg = <0x68>;
- };
- };
-
- spi@7000 {
-
- fsl_m25p80@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,espi-flash";
- reg = <0>;
- linux,modalias = "fsl_m25p80";
- modal = "s25sl128b";
- spi-max-frequency = <50000000>;
- mode = <0>;
-
- partition@0 {
- /* 512KB for u-boot Bootloader Image */
- reg = <0x0 0x00080000>;
- label = "SPI (RO) U-Boot Image";
- read-only;
- };
-
- partition@80000 {
- /* 512KB for DTB Image */
- reg = <0x00080000 0x00080000>;
- label = "SPI (RO) DTB Image";
- read-only;
- };
-
- partition@100000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00100000 0x00400000>;
- label = "SPI (RO) Linux Kernel Image";
- read-only;
- };
-
- partition@500000 {
- /* 4MB for Compressed RFS Image */
- reg = <0x00500000 0x00400000>;
- label = "SPI (RO) Compressed RFS Image";
- read-only;
- };
-
- partition@900000 {
- /* 7MB for JFFS2 based RFS */
- reg = <0x00900000 0x00700000>;
- label = "SPI (RW) JFFS2 RFS";
- };
- };
- };
-
- mdio@24000 {
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
-
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x1>;
- };
- };
-
- mdio@25000 {
-
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet0: ethernet@b0000 {
- fixed-link = <1 1 1000 0 0>;
- phy-connection-type = "rgmii-id";
-
- };
-
- enet1: ethernet@b1000 {
- phy-handle = <&phy0>;
- tbi-handle = <&tbi0>;
- phy-connection-type = "sgmii";
-
- };
-
- enet2: ethernet@b2000 {
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- };
-
- usb@22000 {
- phy_type = "ulpi";
- };
-
- /* USB2 is shared with localbus, so it must be disabled
- by default. We can't put 'status = "disabled";' here
- since U-Boot doesn't clear the status property when
- it enables USB2. OTOH, U-Boot does create a new node
- when there isn't any. So, just comment it out.
- usb@23000 {
- phy_type = "ulpi";
- };
- */
-
+ board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
};
pci0: pcie@ffe09000 {
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
+ reg = <0 0xffe09000 0 0x1000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -281,21 +47,10 @@
};
pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -306,3 +61,6 @@
};
};
};
+
+/include/ "p1020rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb.dtsi b/arch/powerpc/boot/dts/p1020rdb.dtsi
new file mode 100644
index 00000000000..b5bd86f4baf
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb.dtsi
@@ -0,0 +1,247 @@
+/*
+ * P1020 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR (RO) Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR (RO) DTB Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR (RW) JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR (RO) U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND (RO) U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND (RO) DTB Image";
+ read-only;
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND (RO) Compressed RFS Image";
+ read-only;
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND (RW) JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND (RW) Writable User area";
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+};
+
+&board_soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+
+ partition@u-boot {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "u-boot";
+ read-only;
+ };
+
+ partition@dtb {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "dtb";
+ read-only;
+ };
+
+ partition@kernel {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "kernel";
+ read-only;
+ };
+
+ partition@fs {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "file system";
+ read-only;
+ };
+
+ partition@jffs-fs {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "file system jffs2";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ usb@23000 {
+ phy_type = "ulpi";
+ };
+ */
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x1>;
+ };
+
+ tbi-phy@2 {
+ device_type = "tbi-phy";
+ reg = <0x2>;
+ };
+ };
+
+ mdio@25000 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb_36b.dts b/arch/powerpc/boot/dts/p1020rdb_36b.dts
new file mode 100644
index 00000000000..bdbdb6097e5
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_36b.dts
@@ -0,0 +1,66 @@
+/*
+ * P1020 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020RDB";
+ compatible = "fsl,P1020RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ board_lbc: lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xffa00000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00020000>;
+ };
+
+ board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
index f0bf7f42f09..41b4585c5da 100644
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
@@ -16,7 +16,7 @@
* option) any later version.
*/
-/include/ "p1020si.dtsi"
+/include/ "p1020rdb.dts"
/ {
model = "fsl,P1020RDB";
@@ -32,7 +32,7 @@
cpus {
PowerPC,P1020@1 {
- status = "disabled";
+ status = "disabled";
};
};
@@ -45,169 +45,19 @@
};
soc@ffe00000 {
- i2c@3000 {
- rtc@68 {
- compatible = "dallas,ds1339";
- reg = <0x68>;
- };
- };
-
serial1: serial@4600 {
status = "disabled";
};
- spi@7000 {
- fsl_m25p80@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,espi-flash";
- reg = <0>;
- linux,modalias = "fsl_m25p80";
- spi-max-frequency = <40000000>;
-
- partition@0 {
- /* 512KB for u-boot Bootloader Image */
- reg = <0x0 0x00080000>;
- label = "SPI (RO) U-Boot Image";
- read-only;
- };
-
- partition@80000 {
- /* 512KB for DTB Image */
- reg = <0x00080000 0x00080000>;
- label = "SPI (RO) DTB Image";
- read-only;
- };
-
- partition@100000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00100000 0x00400000>;
- label = "SPI (RO) Linux Kernel Image";
- read-only;
- };
-
- partition@500000 {
- /* 4MB for Compressed RFS Image */
- reg = <0x00500000 0x00400000>;
- label = "SPI (RO) Compressed RFS Image";
- read-only;
- };
-
- partition@900000 {
- /* 7MB for JFFS2 based RFS */
- reg = <0x00900000 0x00700000>;
- label = "SPI (RW) JFFS2 RFS";
- };
- };
- };
-
- mdio@24000 {
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
- reg = <0x1>;
- };
- };
-
- mdio@25000 {
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
enet0: ethernet@b0000 {
status = "disabled";
};
- enet1: ethernet@b1000 {
- phy-handle = <&phy0>;
- tbi-handle = <&tbi0>;
- phy-connection-type = "sgmii";
- };
-
- enet2: ethernet@b2000 {
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
- };
-
- usb@22000 {
- phy_type = "ulpi";
- };
-
- /* USB2 is shared with localbus, so it must be disabled
- by default. We can't put 'status = "disabled";' here
- since U-Boot doesn't clear the status property when
- it enables USB2. OTOH, U-Boot does create a new node
- when there isn't any. So, just comment it out.
- usb@23000 {
- phy_type = "ulpi";
- };
- */
-
mpic: pic@40000 {
protected-sources = <
42 29 30 34 /* serial1, enet0-queue-group0 */
17 18 24 45 /* enet0-queue-group1, crypto */
>;
};
-
- };
-
- pci0: pcie@ffe09000 {
- ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@ffe0a000 {
- ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
index 6ec02204a44..51745382188 100644
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
@@ -15,7 +15,7 @@
* option) any later version.
*/
-/include/ "p1020si.dtsi"
+/include/ "p1020rdb.dts"
/ {
model = "fsl,P1020RDB";
@@ -28,7 +28,7 @@
cpus {
PowerPC,P1020@0 {
- status = "disabled";
+ status = "disabled";
};
};
@@ -85,12 +85,6 @@
status = "disabled";
};
- enet0: ethernet@b0000 {
- fixed-link = <1 1 1000 0 0>;
- phy-connection-type = "rgmii-id";
-
- };
-
enet1: ethernet@b1000 {
status = "disabled";
};
@@ -135,7 +129,6 @@
global-utilities@e0000 { //global utilities block
status = "disabled";
};
-
};
pci0: pcie@ffe09000 {
diff --git a/arch/powerpc/boot/dts/p1020si.dtsi b/arch/powerpc/boot/dts/p1020si.dtsi
deleted file mode 100644
index 5c5acb66c3f..00000000000
--- a/arch/powerpc/boot/dts/p1020si.dtsi
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * P1020si Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
- compatible = "fsl,P1020";
- #address-cells = <2>;
- #size-cells = <2>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P1020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P1020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
-
- localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
- };
-
- soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p1020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- mode = "cpu";
- };
-
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2,256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- mdio@24000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x24000 0x1000 0xb0030 0x4>;
-
- };
-
- mdio@25000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-tbi";
- reg = <0x25000 0x1000 0xb1030 0x4>;
-
- };
-
- enet0: ethernet@b0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb0000 0x1000>;
- interrupts = <29 2 30 2 34 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb4000 0x1000>;
- interrupts = <17 2 18 2 24 2>;
- };
- };
-
- enet1: ethernet@b1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb1000 0x1000>;
- interrupts = <35 2 36 2 40 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb5000 0x1000>;
- interrupts = <51 2 52 2 67 2>;
- };
- };
-
- enet2: ethernet@b2000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
-
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb2000 0x1000>;
- interrupts = <31 2 32 2 33 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb6000 0x1000>;
- interrupts = <25 2 26 2 27 2>;
- };
- };
-
- usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- };
-
- /* USB2 is shared with localbus, so it must be disabled
- by default. We can't put 'status = "disabled";' here
- since U-Boot doesn't clear the status property when
- it enables USB2. OTOH, U-Boot does create a new node
- when there isn't any. So, just comment it out.
- usb@23000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x23000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <46 0x2>;
- phy_type = "ulpi";
- };
- */
-
- sdhci@2e000 {
- compatible = "fsl,p1020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1020-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1020-guts","fsl,p2020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
- };
-
- pci0: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- pci1: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-};
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
index ad5b8526900..d9540791e43 100644
--- a/arch/powerpc/boot/dts/p1021mds.dts
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -9,53 +9,22 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "fsl/p1021si-pre.dtsi"
/ {
model = "fsl,P1021";
compatible = "fsl,P1021MDS";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
ethernet3 = &enet3;
ethernet4 = &enet4;
- pci0 = &pci0;
- pci1 = &pci1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P1021@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P1021@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
};
memory {
device_type = "memory";
};
- localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
+ lbc: localbus@ffe05000 {
+ reg = <0x0 0xffe05000 0x0 0x1000>;
/* NAND Flash, BCSR, PMC0/1*/
ranges = <0x0 0x0 0x0 0xfc000000 0x02000000
@@ -138,99 +107,26 @@
};
};
- soc@ffe00000 {
-
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
+ soc: soc@ffe00000 {
compatible = "fsl,p1021-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1021-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1021-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
};
};
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- espi,num-ss-bits = <4>;
- mode = "cpu";
-
- fsl_m25p80@0 {
+
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "fsl,espi-flash";
+ compatible = "spansion,s25sl12801";
reg = <0>;
- linux,modalias = "fsl_m25p80";
spi-max-frequency = <40000000>; /* input clock */
+
partition@u-boot {
label = "u-boot-spi";
reg = <0x00000000 0x00100000>;
@@ -253,237 +149,49 @@
};
};
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1021-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2,256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
phy_type = "ulpi";
};
- mdio@24000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x24000 0x1000 0xb0030 0x4>;
-
+ mdio@24000 {
phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <1 1>;
+ interrupts = <1 1 0 0>;
reg = <0x0>;
};
phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <2 1>;
+ interrupts = <2 1 0 0>;
reg = <0x1>;
};
phy4: ethernet-phy@4 {
- interrupt-parent = <&mpic>;
reg = <0x4>;
};
+ tbi-phy@5 {
+ device_type = "tbi-phy";
+ reg = <0x5>;
+ };
};
mdio@25000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-tbi";
- reg = <0x25000 0x1000 0xb1030 0x4>;
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
- enet0: ethernet@B0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
+ ethernet@b0000 {
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
- queue-group@0{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB0000 0x1000>;
- interrupts = <29 2 30 2 34 2>;
- };
- queue-group@1{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB4000 0x1000>;
- interrupts = <17 2 18 2 24 2>;
- };
};
- enet1: ethernet@B1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
+ ethernet@b1000 {
phy-handle = <&phy4>;
tbi-handle = <&tbi0>;
phy-connection-type = "sgmii";
- queue-group@0{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB1000 0x1000>;
- interrupts = <35 2 36 2 40 2>;
- };
- queue-group@1{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB5000 0x1000>;
- interrupts = <51 2 52 2 67 2>;
- };
};
- enet2: ethernet@B2000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
+ ethernet@b2000 {
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
- queue-group@0{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB2000 0x1000>;
- interrupts = <31 2 32 2 33 2>;
- };
- queue-group@1{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB6000 0x1000>;
- interrupts = <25 2 26 2 27 2>;
- };
- };
-
- sdhci@2e000 {
- compatible = "fsl,p1021-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.3", "fsl,sec3.1",
- "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x97c>;
- fsl,descriptor-types-mask = <0x3a30abf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1021-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1021-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
};
par_io@e0100 {
@@ -499,8 +207,7 @@
0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
- 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9
-*/
+ 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
@@ -535,31 +242,10 @@
};
pci0: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -571,31 +257,10 @@
};
pci1: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 0 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xc0000000
0x2000000 0x0 0xc0000000
0x0 0x20000000
@@ -606,36 +271,16 @@
};
};
- qe@ffe80000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "qe";
- compatible = "fsl,qe";
+ qe: qe@ffe80000 {
ranges = <0x0 0x0 0xffe80000 0x40000>;
reg = <0 0xffe80000 0 0x480>;
brg-frequency = <0>;
bus-frequency = <0>;
- fsl,qe-num-riscs = <1>;
- fsl,qe-num-snums = <28>;
status = "disabled"; /* no firmware loaded */
- qeic: interrupt-controller@80 {
- interrupt-controller;
- compatible = "fsl,qe-ic";
- #address-cells = <0>;
- #interrupt-cells = <1>;
- reg = <0x80 0x80>;
- interrupts = <63 2 60 2>; //high:47 low:44
- interrupt-parent = <&mpic>;
- };
-
enet3: ucc@2000 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <1>;
- reg = <0x2000 0x200>;
- interrupts = <32>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "clk12";
tx-clock-name = "clk9";
@@ -645,20 +290,15 @@
};
mdio@2120 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x2120 0x18>;
- compatible = "fsl,ucc-mdio";
-
qe_phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
- interrupts = <4 1>;
+ interrupts = <4 1 0 0>;
reg = <0x0>;
device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@03 {
interrupt-parent = <&mpic>;
- interrupts = <5 1>;
+ interrupts = <5 1 0 0>;
reg = <0x3>;
device_type = "ethernet-phy";
};
@@ -671,10 +311,6 @@
enet4: ucc@2400 {
device_type = "network";
compatible = "ucc_geth";
- cell-index = <5>;
- reg = <0x2400 0x200>;
- interrupts = <40>;
- interrupt-parent = <&qeic>;
local-mac-address = [ 00 00 00 00 00 00 ];
rx-clock-name = "none";
tx-clock-name = "clk13";
@@ -682,18 +318,7 @@
phy-handle = <&qe_phy1>;
phy-connection-type = "rmii";
};
-
- muram@10000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,qe-muram", "fsl,cpm-muram";
- ranges = <0x0 0x10000 0x6000>;
-
- data-only@0 {
- compatible = "fsl,qe-muram-data",
- "fsl,cpm-muram-data";
- reg = <0x0 0x6000>;
- };
- };
};
};
+
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index b9b8719a620..ef95717db4b 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -8,57 +8,36 @@
* kind, whether express or implied.
*/
-/dts-v1/;
+/include/ "fsl/p1022si-pre.dtsi"
/ {
- model = "fsl,P1022";
+ model = "fsl,P1022DS";
compatible = "fsl,P1022DS";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P1022@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P1022@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
memory {
device_type = "memory";
};
- localbus@fffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2 0 0>;
-
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
0x1 0x0 0xf 0xe0000000 0x08000000
- 0x2 0x0 0x0 0xffa00000 0x00040000
+ 0x2 0x0 0xf 0xff800000 0x00040000
0x3 0x0 0xf 0xffdf0000 0x00008000>;
+ /*
+ * This node is used to access the pixis via "indirect" mode,
+ * which is done by writing the pixis register index to chip
+ * select 0 and the value to/from chip select 1. Indirect
+ * mode is the only way to access the pixis when DIU video
+ * is enabled. Note that this assumes that the first column
+ * of the 'ranges' property above is the chip select number.
+ */
+ board-control@0,0 {
+ compatible = "fsl,p1022ds-indirect-pixis";
+ reg = <0x0 0x0 1 /* CS0 */
+ 0x1 0x0 1>; /* CS1 */
+ };
+
nor@0,0 {
#address-cells = <1>;
#size-cells = <1>;
@@ -161,51 +140,10 @@
};
};
- soc@fffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p1022-immr", "simple-bus";
+ soc: soc@fffe00000 {
ranges = <0x0 0xf 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1022-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2 0 0>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1022-memory-controller";
- reg = <0x2000 0x1000>;
- interrupts = <16 2 0 0>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2 0 0>;
- dfsrr;
- };
i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2 0 0>;
- dfsrr;
-
wm8776:codec@1a {
compatible = "wlf,wm8776";
reg = <0x1a>;
@@ -216,41 +154,14 @@
};
};
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2 0 0>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2 0 0>;
- };
-
spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2 0 0>;
- espi,num-ss-bits = <4>;
- mode = "cpu";
-
- fsl_m25p80@0 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "fsl,espi-flash";
+ compatible = "spansion,s25sl12801";
reg = <0>;
- linux,modalias = "fsl_m25p80";
spi-max-frequency = <40000000>; /* input clock */
+
partition@0 {
label = "u-boot-spi";
reg = <0x00000000 0x00100000>;
@@ -274,115 +185,20 @@
};
ssi@15000 {
- compatible = "fsl,mpc8610-ssi";
- cell-index = <0>;
- reg = <0x15000 0x100>;
- interrupts = <75 2 0 0>;
fsl,mode = "i2s-slave";
codec-handle = <&wm8776>;
- fsl,playback-dma = <&dma00>;
- fsl,capture-dma = <&dma01>;
- fsl,fifo-depth = <15>;
fsl,ssi-asynchronous;
};
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma00: dma-channel@0 {
- compatible = "fsl,ssi-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <76 2 0 0>;
- };
- dma01: dma-channel@80 {
- compatible = "fsl,ssi-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <77 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <78 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <79 2 0 0>;
- };
- };
-
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2 0 0>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1022-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2, 256K
- interrupts = <16 2 0 0>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <20 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <21 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <22 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <23 2 0 0>;
- };
- };
-
usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupts = <28 0x2 0 0>;
phy_type = "ulpi";
};
- mdio@24000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x24000 0x1000 0xb0030 0x4>;
+ usb@23000 {
+ status = "disabled";
+ };
+ mdio@24000 {
phy0: ethernet-phy@0 {
interrupts = <3 1 0 0>;
reg = <0x1>;
@@ -391,189 +207,28 @@
interrupts = <9 1 0 0>;
reg = <0x2>;
};
+ tbi-phy@2 {
+ device_type = "tbi-phy";
+ reg = <0x2>;
+ };
};
- mdio@25000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x25000 0x1000 0xb1030 0x4>;
- };
-
- enet0: ethernet@B0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- fsl,magic-packet;
- fsl,wake-on-filer;
- local-mac-address = [ 00 00 00 00 00 00 ];
+ ethernet@b0000 {
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
- queue-group@0{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB0000 0x1000>;
- interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
- };
- queue-group@1{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB4000 0x1000>;
- interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
- };
};
- enet1: ethernet@B1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
+ ethernet@b1000 {
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
- queue-group@0{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB1000 0x1000>;
- interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
- };
- queue-group@1{
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xB5000 0x1000>;
- interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
- };
- };
-
- sdhci@2e000 {
- compatible = "fsl,p1022-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2 0 0>;
- fsl,sdhci-auto-cmd12;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.3", "fsl,sec3.1", "fsl,sec3.0",
- "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
- "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 0 0 58 2 0 0>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0x97c>;
- fsl,descriptor-types-mask = <0x3a30abf>;
- };
-
- sata@18000 {
- compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
- reg = <0x18000 0x1000>;
- cell-index = <1>;
- interrupts = <74 0x2 0 0>;
- };
-
- sata@19000 {
- compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
- reg = <0x19000 0x1000>;
- cell-index = <2>;
- interrupts = <41 0x2 0 0>;
- };
-
- power@e0070{
- compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
- reg = <0xe0070 0x20>;
- };
-
- display@10000 {
- compatible = "fsl,diu", "fsl,p1022-diu";
- reg = <0x10000 1000>;
- interrupts = <64 2 0 0>;
- };
-
- timer@41100 {
- compatible = "fsl,mpic-global-timer";
- reg = <0x41100 0x100 0x41300 4>;
- interrupts = <0 0 3 0
- 1 0 3 0
- 2 0 3 0
- 3 0 3 0>;
- };
-
- timer@42100 {
- compatible = "fsl,mpic-global-timer";
- reg = <0x42100 0x100 0x42300 4>;
- interrupts = <4 0 3 0
- 5 0 3 0
- 6 0 3 0
- 7 0 3 0>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1022-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1022-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
};
};
pci0: pcie@fffe09000 {
- compatible = "fsl,p1022-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupts = <16 2 0 0>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -585,30 +240,11 @@
};
pci1: pcie@fffe0a000 {
- compatible = "fsl,p1022-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupts = <16 2 0 0>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 0 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
- >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -619,32 +255,11 @@
};
};
-
pci2: pcie@fffe0b000 {
- compatible = "fsl,p1022-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0xf 0xffe0b000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupts = <16 2 0 0>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 8 1
- 0000 0 0 2 &mpic 9 1
- 0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xe0000000
0x2000000 0x0 0xe0000000
0x0 0x20000000
@@ -655,3 +270,5 @@
};
};
};
+
+/include/ "fsl/p1022si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1023rds.dts b/arch/powerpc/boot/dts/p1023rds.dts
index d9b776740a6..beb6cb12e59 100644
--- a/arch/powerpc/boot/dts/p1023rds.dts
+++ b/arch/powerpc/boot/dts/p1023rds.dts
@@ -34,137 +34,30 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/dts-v1/;
+/include/ "fsl/p1023si-pre.dtsi"
/ {
model = "fsl,P1023";
compatible = "fsl,P1023RDS";
#address-cells = <2>;
#size-cells = <2>;
-
- aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,P1023@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- cpu1: PowerPC,P1023@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
+ interrupt-parent = <&mpic>;
memory {
device_type = "memory";
};
- soc@ff600000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p1023-immr", "simple-bus";
+ soc: soc@ff600000 {
ranges = <0x0 0x0 0xff600000 0x200000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1023-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1023-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
};
};
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p1023-espi", "fsl,mpc8536-espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- fsl,espi-num-chipselects = <4>;
-
fsl_dataflash@0 {
#address-cells = <1>;
#size-cells = <1>;
@@ -186,197 +79,14 @@
};
};
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,qoriq-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1023-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2,256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
dr_mode = "host";
phy_type = "ulpi";
};
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x30000 0x10000>;
- ranges = <0 0x30000 0x10000>;
- interrupt-parent = <&mpic>;
- interrupts = <58 2>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupts = <45 2>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupts = <45 2>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupts = <57 2>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupts = <57 2>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.2-rtic",
- "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- power@e0070{
- compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc",
- "fsl,p1022-pmc";
- reg = <0xe0070 0x20>;
- etsec1_clk: soc-clk@B0{
- fsl,pmcdr-mask = <0x00000080>;
- };
- etsec2_clk: soc-clk@B1{
- fsl,pmcdr-mask = <0x00000040>;
- };
- etsec3_clk: soc-clk@B2{
- fsl,pmcdr-mask = <0x00000020>;
- };
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1023-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1023-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
};
- localbus@ff605000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
+ lbc: localbus@ff605000 {
reg = <0 0xff605000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
/* NOR Flash, BCSR */
ranges = <0x0 0x0 0x0 0xee000000 0x02000000
@@ -428,33 +138,18 @@
};
pci0: pcie@ff60a000 {
- compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
- cell-index = <1>;
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xff60a000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ /* IRQ[0:3] are pulled up on board, set to active-low */
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
- 0000 0 0 1 &mpic 0 1
- 0000 0 0 2 &mpic 1 1
- 0000 0 0 3 &mpic 2 1
- 0000 0 0 4 &mpic 3 1
+ 0000 0 0 1 &mpic 0 1 0 0
+ 0000 0 0 2 &mpic 1 1 0 0
+ 0000 0 0 3 &mpic 2 1 0 0
+ 0000 0 0 4 &mpic 3 1 0 0
>;
ranges = <0x2000000 0x0 0xc0000000
0x2000000 0x0 0xc0000000
@@ -466,34 +161,22 @@
};
};
- pci1: pcie@ff609000 {
- compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
- cell-index = <2>;
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
+ board_pci1: pci1: pcie@ff609000 {
reg = <0 0xff609000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ /*
+ * IRQ[4:6] only for PCIe, set to active-high,
+ * IRQ[7] is pulled up on board, set to active-low
+ */
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
- 0000 0 0 1 &mpic 4 1
- 0000 0 0 2 &mpic 5 1
- 0000 0 0 3 &mpic 6 1
- 0000 0 0 4 &mpic 7 1
+ 0000 0 0 1 &mpic 4 2 0 0
+ 0000 0 0 2 &mpic 5 2 0 0
+ 0000 0 0 3 &mpic 6 2 0 0
+ 0000 0 0 4 &mpic 7 1 0 0
>;
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
@@ -506,33 +189,21 @@
};
pci2: pcie@ff60b000 {
- cell-index = <3>;
- compatible = "fsl,p1023-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
reg = <0 0xff60b000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ /*
+ * IRQ[8:10] are pulled up on board, set to active-low
+ * IRQ[11] only for PCIe, set to active-high,
+ */
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
- 0000 0 0 1 &mpic 8 1
- 0000 0 0 2 &mpic 9 1
- 0000 0 0 3 &mpic 10 1
- 0000 0 0 4 &mpic 11 1
+ 0000 0 0 1 &mpic 8 1 0 0
+ 0000 0 0 2 &mpic 9 1 0 0
+ 0000 0 0 3 &mpic 10 1 0 0
+ 0000 0 0 4 &mpic 11 2 0 0
>;
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
@@ -544,3 +215,5 @@
};
};
};
+
+/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 66f03d6477b..237310cc7e6 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -9,30 +9,17 @@
* option) any later version.
*/
-/include/ "p2020si.dtsi"
+/include/ "fsl/p2020si-pre.dtsi"
/ {
model = "fsl,P2020DS";
compatible = "fsl,P2020DS";
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- };
-
-
memory {
device_type = "memory";
};
- localbus@ffe05000 {
- compatible = "fsl,elbc", "simple-bus";
+ board_lbc: lbc: localbus@ffe05000 {
ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
0x1 0x0 0x0 0xe0000000 0x08000000
0x2 0x0 0x0 0xffa00000 0x00040000
@@ -40,203 +27,18 @@
0x4 0x0 0x0 0xffa40000 0x00040000
0x5 0x0 0x0 0xffa80000 0x00040000
0x6 0x0 0x0 0xffac0000 0x00040000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
-
- ramdisk@0 {
- reg = <0x0 0x03000000>;
- read-only;
- };
-
- diagnostic@3000000 {
- reg = <0x03000000 0x00e00000>;
- read-only;
- };
-
- dink@3e00000 {
- reg = <0x03e00000 0x00200000>;
- read-only;
- };
-
- kernel@4000000 {
- reg = <0x04000000 0x00400000>;
- read-only;
- };
-
- jffs2@4400000 {
- reg = <0x04400000 0x03b00000>;
- };
-
- dtb@7f00000 {
- reg = <0x07f00000 0x00080000>;
- read-only;
- };
-
- u-boot@7f80000 {
- reg = <0x07f80000 0x00080000>;
- read-only;
- };
- };
-
- nand@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x2 0x0 0x40000>;
-
- u-boot@0 {
- reg = <0x0 0x02000000>;
- read-only;
- };
-
- jffs2@2000000 {
- reg = <0x02000000 0x10000000>;
- };
-
- ramdisk@12000000 {
- reg = <0x12000000 0x08000000>;
- read-only;
- };
-
- kernel@1a000000 {
- reg = <0x1a000000 0x04000000>;
- };
-
- dtb@1e000000 {
- reg = <0x1e000000 0x01000000>;
- read-only;
- };
-
- empty@1f000000 {
- reg = <0x1f000000 0x21000000>;
- };
- };
-
- board-control@3,0 {
- compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis";
- reg = <0x3 0x0 0x30>;
- };
-
- nand@4,0 {
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x4 0x0 0x40000>;
- };
-
- nand@5,0 {
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x5 0x0 0x40000>;
- };
-
- nand@6,0 {
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x6 0x0 0x40000>;
- };
+ reg = <0 0xffe05000 0 0x1000>;
};
- soc@ffe00000 {
-
- usb@22000 {
- phy_type = "ulpi";
- };
-
- mdio@24520 {
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x1>;
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x2>;
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
-
- };
-
- mdio@25520 {
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- mdio@26520 {
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
-
- };
-
- ptp_clock@24E00 {
- compatible = "fsl,etsec-ptp";
- reg = <0x24E00 0xB0>;
- interrupts = <68 2 69 2 70 2>;
- interrupt-parent = < &mpic >;
- fsl,tclk-period = <5>;
- fsl,tmr-prsc = <200>;
- fsl,tmr-add = <0xCCCCCCCD>;
- fsl,tmr-fiper1 = <0x3B9AC9FB>;
- fsl,tmr-fiper2 = <0x0001869B>;
- fsl,max-adj = <249999999>;
- };
-
- enet0: ethernet@24000 {
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
- };
-
- enet1: ethernet@25000 {
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
-
- };
-
- enet2: ethernet@26000 {
- tbi-handle = <&tbi2>;
- phy-handle = <&phy2>;
- phy-connection-type = "rgmii-id";
- };
-
-
- msi@41600 {
- compatible = "fsl,mpic-msi";
- };
+ board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
};
- pci0: pcie@ffe08000 {
+ pci2: pcie@ffe08000 {
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x8 0x1
- 0000 0x0 0x0 0x2 &mpic 0x9 0x1
- 0000 0x0 0x0 0x3 &mpic 0xa 0x1
- 0000 0x0 0x0 0x4 &mpic 0xb 0x1
- >;
+ reg = <0 0xffe08000 0 0x1000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -247,61 +49,11 @@
};
};
- pci1: pcie@ffe09000 {
+ board_pci1: pci1: pcie@ffe09000 {
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
-
- // IDSEL 0x11 func 0 - PCI slot 1
- 0x8800 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8800 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 1 - PCI slot 1
- 0x8900 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8900 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 2 - PCI slot 1
- 0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 3 - PCI slot 1
- 0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 4 - PCI slot 1
- 0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 5 - PCI slot 1
- 0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 6 - PCI slot 1
- 0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x11 func 7 - PCI slot 1
- 0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2
- 0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2
-
- // IDSEL 0x1d Audio
- 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
-
- // IDSEL 0x1e Legacy
- 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
- 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
-
- // IDSEL 0x1f IDE/SATA
- 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
- 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
- >;
-
+ reg = <0 0xffe09000 0 0x1000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -309,89 +61,14 @@
0x1000000 0x0 0x0
0x1000000 0x0 0x0
0x0 0x10000>;
- uli1575@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x10000>;
- isa@1e {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0xf000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0 0x1000000 0x0 0x0
- 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <4 1>;
- interrupt-parent = <&mpic>;
- };
-
- i8042@60 {
- #size-cells = <0>;
- #address-cells = <1>;
- reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
- interrupts = <1 3 12 3>;
- interrupt-parent =
- <&i8259>;
-
- keyboard@0 {
- reg = <0x0>;
- compatible = "pnpPNP,303";
- };
-
- mouse@1 {
- reg = <0x1>;
- compatible = "pnpPNP,f03";
- };
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
-
- gpio@400 {
- reg = <0x1 0x400 0x80>;
- };
- };
- };
};
-
};
- pci2: pcie@ffe0a000 {
+ pci0: pcie@ffe0a000 {
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
+ reg = <0 0xffe0a000 0 0x1000>;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0xc0000000
0x2000000 0x0 0xc0000000
0x0 0x20000000
@@ -402,3 +79,11 @@
};
};
};
+
+/*
+ * p2020ds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask
+ */
+
+/include/ "fsl/p2020si-post.dtsi"
+/include/ "p2020ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dtsi b/arch/powerpc/boot/dts/p2020ds.dtsi
new file mode 100644
index 00000000000..c1cf6cef4dd
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020ds.dtsi
@@ -0,0 +1,316 @@
+/*
+ * P2020DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ ramdisk@0 {
+ reg = <0x0 0x03000000>;
+ read-only;
+ };
+
+ diagnostic@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ read-only;
+ };
+
+ dink@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ read-only;
+ };
+
+ kernel@4000000 {
+ reg = <0x04000000 0x00400000>;
+ read-only;
+ };
+
+ jffs2@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ };
+
+ dtb@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ read-only;
+ };
+
+ u-boot@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ u-boot@0 {
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ jffs2@2000000 {
+ reg = <0x02000000 0x10000000>;
+ };
+
+ ramdisk@12000000 {
+ reg = <0x12000000 0x08000000>;
+ read-only;
+ };
+
+ kernel@1a000000 {
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ dtb@1e000000 {
+ reg = <0x1e000000 0x01000000>;
+ read-only;
+ };
+
+ empty@1f000000 {
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,p2020ds-fpga", "fsl,fpga-ngpixis";
+ reg = <0x3 0x0 0x30>;
+ };
+
+ nand@4,0 {
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x4 0x0 0x40000>;
+ };
+
+ nand@5,0 {
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x5 0x0 0x40000>;
+ };
+
+ nand@6,0 {
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x6 0x0 0x40000>;
+ };
+};
+
+&board_soc {
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <3 1 0 0>;
+ reg = <0x1>;
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <3 1 0 0>;
+ reg = <0x2>;
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+
+ };
+
+ ptp_clock@24e00 {
+ fsl,tclk-period = <5>;
+ fsl,tmr-prsc = <200>;
+ fsl,tmr-add = <0xCCCCCCCD>;
+ fsl,tmr-fiper1 = <0x3B9AC9FB>;
+ fsl,tmr-fiper2 = <0x0001869B>;
+ fsl,max-adj = <249999999>;
+ };
+
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet2: ethernet@26000 {
+ tbi-handle = <&tbi2>;
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+};
+
+&board_pci1 {
+ pcie@0 {
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ // IDSEL 0x11 func 0 - PCI slot 1
+ 0x8800 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8800 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 1 - PCI slot 1
+ 0x8900 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8900 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 2 - PCI slot 1
+ 0x8a00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8a00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 3 - PCI slot 1
+ 0x8b00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8b00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 4 - PCI slot 1
+ 0x8c00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8c00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 5 - PCI slot 1
+ 0x8d00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8d00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 6 - PCI slot 1
+ 0x8e00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8e00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x11 func 7 - PCI slot 1
+ 0x8f00 0x0 0x0 0x1 &i8259 0x9 0x2
+ 0x8f00 0x0 0x0 0x2 &i8259 0xa 0x2
+
+ // IDSEL 0x1d Audio
+ 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+ // IDSEL 0x1e Legacy
+ 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+ 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+ // IDSEL 0x1f IDE/SATA
+ 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+ 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+ >;
+
+ uli1575@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0xf000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0
+ 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <4 1 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+ interrupts = <1 3 12 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0x0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <0x1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+
+ gpio@400 {
+ reg = <0x1 0x400 0x80>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 1d7a05f3021..26759a59171 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -9,7 +9,7 @@
* option) any later version.
*/
-/include/ "p2020si.dtsi"
+/include/ "fsl/p2020si-pre.dtsi"
/ {
model = "fsl,P2020RDB";
@@ -29,7 +29,8 @@
device_type = "memory";
};
- localbus@ffe05000 {
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
/* NOR and NAND Flashes */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -140,7 +141,9 @@
};
- soc@ffe00000 {
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+
i2c@3000 {
rtc@68 {
compatible = "dallas,ds1339";
@@ -148,17 +151,13 @@
};
};
- spi@7000 {
-
- fsl_m25p80@0 {
+ spi@7000 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "fsl,espi-flash";
+ compatible = "spansion,s25sl12801";
reg = <0>;
- linux,modalias = "fsl_m25p80";
- modal = "s25sl128b";
spi-max-frequency = <50000000>;
- mode = <0>;
partition@0 {
/* 512KB for u-boot Bootloader Image */
@@ -202,15 +201,17 @@
mdio@24520 {
phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
+ interrupts = <3 1 0 0>;
reg = <0x0>;
- };
+ };
phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
+ interrupts = <3 1 0 0>;
reg = <0x1>;
- };
+ };
+ tbi-phy@2 {
+ device_type = "tbi-phy";
+ reg = <0x2>;
+ };
};
mdio@25520 {
@@ -224,11 +225,7 @@
status = "disabled";
};
- ptp_clock@24E00 {
- compatible = "fsl,etsec-ptp";
- reg = <0x24E00 0xB0>;
- interrupts = <68 2 69 2 70 2>;
- interrupt-parent = < &mpic >;
+ ptp_clock@24e00 {
fsl,tclk-period = <5>;
fsl,tmr-prsc = <200>;
fsl,tmr-add = <0xCCCCCCCD>;
@@ -252,29 +249,18 @@
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
-
};
pci0: pcie@ffe08000 {
+ reg = <0 0xffe08000 0 0x1000>;
status = "disabled";
};
pci1: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
+ pcie@0 {
ranges = <0x2000000 0x0 0xa0000000
0x2000000 0x0 0xa0000000
0x0 0x20000000
@@ -286,21 +272,10 @@
};
pci2: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
ranges = <0x2000000 0x0 0x80000000
0x2000000 0x0 0x80000000
0x0 0x20000000
@@ -311,3 +286,5 @@
};
};
};
+
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
index fc8ddddfccb..66aac864c4c 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -14,28 +14,16 @@
* option) any later version.
*/
-/include/ "p2020si.dtsi"
+/include/ "p2020rdb.dts"
/ {
model = "fsl,P2020RDB";
compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
- aliases {
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- serial0 = &serial0;
- pci0 = &pci0;
- };
-
cpus {
PowerPC,P2020@1 {
- status = "disabled";
+ status = "disabled";
};
-
- };
-
- memory {
- device_type = "memory";
};
localbus@ffe05000 {
@@ -43,115 +31,18 @@
};
soc@ffe00000 {
- i2c@3000 {
- rtc@68 {
- compatible = "dallas,ds1339";
- reg = <0x68>;
- };
- };
-
serial1: serial@4600 {
status = "disabled";
};
- spi@7000 {
-
- fsl_m25p80@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,espi-flash";
- reg = <0>;
- linux,modalias = "fsl_m25p80";
- modal = "s25sl128b";
- spi-max-frequency = <50000000>;
- mode = <0>;
-
- partition@0 {
- /* 512KB for u-boot Bootloader Image */
- reg = <0x0 0x00080000>;
- label = "SPI (RO) U-Boot Image";
- read-only;
- };
-
- partition@80000 {
- /* 512KB for DTB Image */
- reg = <0x00080000 0x00080000>;
- label = "SPI (RO) DTB Image";
- read-only;
- };
-
- partition@100000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00100000 0x00400000>;
- label = "SPI (RO) Linux Kernel Image";
- read-only;
- };
-
- partition@500000 {
- /* 4MB for Compressed RFS Image */
- reg = <0x00500000 0x00400000>;
- label = "SPI (RO) Compressed RFS Image";
- read-only;
- };
-
- partition@900000 {
- /* 7MB for JFFS2 based RFS */
- reg = <0x00900000 0x00700000>;
- label = "SPI (RW) JFFS2 RFS";
- };
- };
- };
-
dma@c300 {
status = "disabled";
};
- usb@22000 {
- phy_type = "ulpi";
- };
-
- mdio@24520 {
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x1>;
- };
- };
-
- mdio@25520 {
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- mdio@26520 {
- status = "disabled";
- };
-
enet0: ethernet@24000 {
status = "disabled";
};
- enet1: ethernet@25000 {
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- phy-connection-type = "sgmii";
-
- };
-
- enet2: ethernet@26000 {
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
- };
-
-
mpic: pic@40000 {
protected-sources = <
42 76 77 78 79 /* serial1 , dma2 */
@@ -164,40 +55,12 @@
msi@41600 {
status = "disabled";
};
-
-
};
pci0: pcie@ffe08000 {
status = "disabled";
};
- pci1: pcie@ffe09000 {
- ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x4 0x1
- 0000 0x0 0x0 0x2 &mpic 0x5 0x1
- 0000 0x0 0x0 0x3 &mpic 0x6 0x1
- 0000 0x0 0x0 0x4 &mpic 0x7 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
pci2: pcie@ffe0a000 {
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
index 261c34ba45e..9bd8ef493dd 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -15,28 +15,18 @@
* option) any later version.
*/
-/include/ "p2020si.dtsi"
+/include/ "p2020rdb.dts"
/ {
model = "fsl,P2020RDB";
compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
- aliases {
- ethernet0 = &enet0;
- serial0 = &serial1;
- pci1 = &pci1;
- };
-
cpus {
PowerPC,P2020@0 {
- status = "disabled";
+ status = "disabled";
};
};
- memory {
- device_type = "memory";
- };
-
localbus@ffe05000 {
status = "disabled";
};
@@ -70,55 +60,10 @@
status = "disabled";
};
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
- };
-
gpio: gpio-controller@f000 {
status = "disabled";
};
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p2020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2,512K
- interrupt-parent = <&mpic>;
- };
-
dma@21300 {
status = "disabled";
};
@@ -139,12 +84,6 @@
status = "disabled";
};
- enet0: ethernet@24000 {
- fixed-link = <1 1 1000 0 0>;
- phy-connection-type = "rgmii-id";
-
- };
-
enet1: ethernet@25000 {
status = "disabled";
};
@@ -170,22 +109,6 @@
>;
};
- msi@41600 {
- compatible = "fsl,p2020-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
global-utilities@e0000 { //global utilities block
status = "disabled";
};
@@ -199,30 +122,4 @@
pci1: pcie@ffe09000 {
status = "disabled";
};
-
- pci2: pcie@ffe0a000 {
- ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1
- >;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
};
diff --git a/arch/powerpc/boot/dts/p2020si.dtsi b/arch/powerpc/boot/dts/p2020si.dtsi
deleted file mode 100644
index 6def17f265d..00000000000
--- a/arch/powerpc/boot/dts/p2020si.dtsi
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * P2020 Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
- compatible = "fsl,P2020";
- #address-cells = <2>;
- #size-cells = <2>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P2020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P2020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
-
- localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
- };
-
- soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p2020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p2020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p2020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- mode = "cpu";
- };
-
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
- };
-
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p2020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2,512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- };
-
- mdio@24520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x24520 0x20>;
- };
-
- mdio@25520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x26520 0x20>;
- };
-
- mdio@26520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
-
- };
-
- enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
-
- };
-
- sdhci@2e000 {
- compatible = "fsl,p2020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p2020-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p2020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
- };
-
- pci0: pcie@ffe08000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe08000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
- };
-
- pci1: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- };
-
- pci2: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
- };
-};
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index 79b6895027c..4f957db0123 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/include/ "p2041si.dtsi"
+/include/ "fsl/p2041si-pre.dtsi"
/ {
model = "fsl,P2041RDB";
@@ -50,6 +50,8 @@
};
soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
spi@110000 {
flash@0 {
#address-cells = <1>;
@@ -106,7 +108,18 @@
};
};
- localbus@ffe124000 {
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+
+ lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
ranges = <0 0 0xf 0xe8000000 0x08000000>;
@@ -122,6 +135,7 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -137,6 +151,7 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -152,6 +167,7 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -163,3 +179,5 @@
};
};
};
+
+/include/ "fsl/p2041si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041si.dtsi b/arch/powerpc/boot/dts/p2041si.dtsi
deleted file mode 100644
index f7492edd0df..00000000000
--- a/arch/powerpc/boot/dts/p2041si.dtsi
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * P2041 Silicon Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
- compatible = "fsl,P2041";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- sdhc = &sdhc;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e500mc@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e500mc@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu2: PowerPC,e500mc@2 {
- device_type = "cpu";
- reg = <2>;
- next-level-cache = <&L2_2>;
- L2_2: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu3: PowerPC,e500mc@3 {
- device_type = "cpu";
- reg = <3>;
- next-level-cache = <&L2_3>;
- L2_3: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- };
-
- dcsr: dcsr@f00000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- interrupt-parent = <&mpic>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p2041-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p2041-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr>;
- reg = <0x12000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p2041-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p2041-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- dcsr-cpu-sb-proxy@42000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu2>;
- reg = <0x42000 0x1000>;
- };
- dcsr-cpu-sb-proxy@43000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu3>;
- reg = <0x43000 0x1000>;
- };
- };
-
- soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
- reg = <0x10000 0x1000>;
- interrupts = <16 2 1 27>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x4000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic", "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi0: msi@41600 {
- compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- msi1: msi@41800 {
- compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe8 0 0 0
- 0xe9 0 0 0
- 0xea 0 0 0
- 0xeb 0 0 0
- 0xec 0 0 0
- 0xed 0 0 0
- 0xee 0 0 0
- 0xef 0 0 0>;
- };
-
- msi2: msi@41a00 {
- compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xf0 0 0 0
- 0xf1 0 0 0
- 0xf2 0 0 0
- 0xf3 0 0 0
- 0xf4 0 0 0
- 0xf5 0 0 0
- 0xf6 0 0 0
- 0xf7 0 0 0>;
- };
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p2041-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p2041-serdes";
- reg = <0xea000 0x1000>;
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p2041-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <31 2 0 0>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p2041-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <32 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <33 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <34 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p2041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <35 2 0 0>;
- };
- };
-
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p2041-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2 0 0>;
- fsl,espi-num-chipselects = <4>;
- };
-
- sdhc: sdhc@114000 {
- compatible = "fsl,p2041-esdhc", "fsl,esdhc";
- reg = <0x114000 0x1000>;
- interrupts = <48 2 0 0>;
- sdhci,auto-cmd12;
- clock-frequency = <0>;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p2041-gpio", "fsl,qoriq-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2 0 0>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- usb0: usb@210000 {
- compatible = "fsl,p2041-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x2 0 0>;
- phy_type = "utmi";
- port0;
- };
-
- usb1: usb@211000 {
- compatible = "fsl,p2041-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <45 0x2 0 0>;
- phy_type = "utmi";
- };
-
- sata@220000 {
- compatible = "fsl,p2041-sata", "fsl,pq-sata-v2";
- reg = <0x220000 0x1000>;
- interrupts = <68 0x2 0 0>;
- };
-
- sata@221000 {
- compatible = "fsl,p2041-sata", "fsl,pq-sata-v2";
- reg = <0x221000 0x1000>;
- interrupts = <69 0x2 0 0>;
- };
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupts = <92 2 0 0>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupts = <88 2 0 0>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupts = <89 2 0 0>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupts = <90 2 0 0>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupts = <91 2 0 0>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.2-rtic",
- "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupts = <93 2 0 0>;
- };
-
- };
-
- localbus@ffe124000 {
- compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
- };
-
- pci0: pcie@ffe200000 {
- compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <33333333>;
- fsl,msi = <&msi0>;
- interrupts = <16 2 1 15>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
- };
-
- pci1: pcie@ffe201000 {
- compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <33333333>;
- fsl,msi = <&msi1>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
- };
-
- pci2: pcie@ffe202000 {
- compatible = "fsl,p2041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <33333333>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 13>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 13>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 42 1 0 0
- 0000 0 0 2 &mpic 9 1 0 0
- 0000 0 0 3 &mpic 10 1 0 0
- 0000 0 0 4 &mpic 11 1 0 0
- >;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index bbd113b49a8..f469145abae 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/include/ "p3041si.dtsi"
+/include/ "fsl/p3041si-pre.dtsi"
/ {
model = "fsl,P3041DS";
@@ -50,6 +50,8 @@
};
soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
spi@110000 {
flash@0 {
#address-cells = <1>;
@@ -99,7 +101,18 @@
};
};
- localbus@ffe124000 {
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+
+ lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
2 0 0xf 0xffa00000 0x00040000
@@ -160,6 +173,7 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -175,6 +189,7 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -190,6 +205,7 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -205,6 +221,7 @@
reg = <0xf 0xfe203000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -216,3 +233,5 @@
};
};
};
+
+/include/ "fsl/p3041si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p3041si.dtsi b/arch/powerpc/boot/dts/p3041si.dtsi
deleted file mode 100644
index 87130b732bc..00000000000
--- a/arch/powerpc/boot/dts/p3041si.dtsi
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * P3041 Silicon Device Tree Source
- *
- * Copyright 2010-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
- compatible = "fsl,P3041";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- pci3 = &pci3;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- sdhc = &sdhc;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
-
-/*
- rio0 = &rapidio0;
- */
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e500mc@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e500mc@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu2: PowerPC,e500mc@2 {
- device_type = "cpu";
- reg = <2>;
- next-level-cache = <&L2_2>;
- L2_2: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu3: PowerPC,e500mc@3 {
- device_type = "cpu";
- reg = <3>;
- next-level-cache = <&L2_3>;
- L2_3: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- };
-
- dcsr: dcsr@f00000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- interrupt-parent = <&mpic>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p43041-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p43041-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr>;
- reg = <0x12000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p43041-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p43041-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- dcsr-cpu-sb-proxy@42000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu2>;
- reg = <0x42000 0x1000>;
- };
- dcsr-cpu-sb-proxy@43000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu3>;
- reg = <0x43000 0x1000>;
- };
- };
-
- soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
- reg = <0x10000 0x1000>;
- interrupts = <16 2 1 27>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x4000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic", "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi0: msi@41600 {
- compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- msi1: msi@41800 {
- compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe8 0 0 0
- 0xe9 0 0 0
- 0xea 0 0 0
- 0xeb 0 0 0
- 0xec 0 0 0
- 0xed 0 0 0
- 0xee 0 0 0
- 0xef 0 0 0>;
- };
-
- msi2: msi@41a00 {
- compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xf0 0 0 0
- 0xf1 0 0 0
- 0xf2 0 0 0
- 0xf3 0 0 0
- 0xf4 0 0 0
- 0xf5 0 0 0
- 0xf6 0 0 0
- 0xf7 0 0 0>;
- };
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p3041-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p3041-serdes";
- reg = <0xea000 0x1000>;
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <31 2 0 0>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p3041-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <32 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <33 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <34 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p3041-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <35 2 0 0>;
- };
- };
-
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p3041-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2 0 0>;
- fsl,espi-num-chipselects = <4>;
- };
-
- sdhc: sdhc@114000 {
- compatible = "fsl,p3041-esdhc", "fsl,esdhc";
- reg = <0x114000 0x1000>;
- interrupts = <48 2 0 0>;
- sdhci,auto-cmd12;
- clock-frequency = <0>;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p3041-gpio", "fsl,qoriq-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2 0 0>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- usb0: usb@210000 {
- compatible = "fsl,p3041-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x2 0 0>;
- phy_type = "utmi";
- port0;
- };
-
- usb1: usb@211000 {
- compatible = "fsl,p3041-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <45 0x2 0 0>;
- dr_mode = "host";
- phy_type = "utmi";
- };
-
- sata@220000 {
- compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
- reg = <0x220000 0x1000>;
- interrupts = <68 0x2 0 0>;
- };
-
- sata@221000 {
- compatible = "fsl,p3041-sata", "fsl,pq-sata-v2";
- reg = <0x221000 0x1000>;
- interrupts = <69 0x2 0 0>;
- };
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupts = <92 2 0 0>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupts = <88 2 0 0>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupts = <89 2 0 0>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupts = <90 2 0 0>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupts = <91 2 0 0>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.2-rtic",
- "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupts = <93 2 0 0>;
- };
- };
-
-/*
- rapidio0: rapidio@ffe0c0000
-*/
-
- localbus@ffe124000 {
- compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
- };
-
- pci0: pcie@ffe200000 {
- compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi0>;
- interrupts = <16 2 1 15>;
-
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
- };
-
- pci1: pcie@ffe201000 {
- compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi1>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
- };
-
- pci2: pcie@ffe202000 {
- compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 13>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 13>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 42 1 0 0
- 0000 0 0 2 &mpic 9 1 0 0
- 0000 0 0 3 &mpic 10 1 0 0
- 0000 0 0 4 &mpic 11 1 0 0
- >;
- };
- };
-
- pci3: pcie@ffe203000 {
- compatible = "fsl,p3041-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 12>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 12>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 43 1 0 0
- 0000 0 0 2 &mpic 0 1 0 0
- 0000 0 0 3 &mpic 4 1 0 0
- 0000 0 0 4 &mpic 8 1 0 0
- >;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/p3060qds.dts b/arch/powerpc/boot/dts/p3060qds.dts
index 08b9193213e..529042e4b9a 100644
--- a/arch/powerpc/boot/dts/p3060qds.dts
+++ b/arch/powerpc/boot/dts/p3060qds.dts
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/include/ "p3060si.dtsi"
+/include/ "fsl/p3060si-pre.dtsi"
/ {
model = "fsl,P3060QDS";
@@ -50,6 +50,8 @@
};
soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
spi@110000 {
flash@0 {
#address-cells = <1>;
@@ -138,7 +140,7 @@
};
};
- rapidio@ffe0c0000 {
+ rio: rapidio@ffe0c0000 {
reg = <0xf 0xfe0c0000 0 0x11000>;
port1 {
@@ -149,7 +151,7 @@
};
};
- localbus@ffe124000 {
+ lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
2 0 0xf 0xffa00000 0x00040000
@@ -210,6 +212,7 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -225,6 +228,7 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -236,3 +240,5 @@
};
};
};
+
+/include/ "fsl/p3060si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p3060si.dtsi b/arch/powerpc/boot/dts/p3060si.dtsi
deleted file mode 100644
index 68947e157bb..00000000000
--- a/arch/powerpc/boot/dts/p3060si.dtsi
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * P3060 Silicon Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
- compatible = "fsl,P3060";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e500mc@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e500mc@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu4: PowerPC,e500mc@4 {
- device_type = "cpu";
- reg = <4>;
- next-level-cache = <&L2_4>;
- L2_4: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu5: PowerPC,e500mc@5 {
- device_type = "cpu";
- reg = <5>;
- next-level-cache = <&L2_5>;
- L2_5: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu6: PowerPC,e500mc@6 {
- device_type = "cpu";
- reg = <6>;
- next-level-cache = <&L2_6>;
- L2_6: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu7: PowerPC,e500mc@7 {
- device_type = "cpu";
- reg = <7>;
- next-level-cache = <&L2_7>;
- L2_7: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- };
-
- dcsr: dcsr@f00000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- interrupt-parent = <&mpic>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr>;
- reg = <0x12000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- dcsr-cpu-sb-proxy@44000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu4>;
- reg = <0x44000 0x1000>;
- };
- dcsr-cpu-sb-proxy@45000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu5>;
- reg = <0x45000 0x1000>;
- };
- dcsr-cpu-sb-proxy@46000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu6>;
- reg = <0x46000 0x1000>;
- };
- dcsr-cpu-sb-proxy@47000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu7>;
- reg = <0x47000 0x1000>;
- };
- };
-
- soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p3060-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 27>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x5000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic", "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi0: msi@41600 {
- compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- msi1: msi@41800 {
- compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe8 0 0 0
- 0xe9 0 0 0
- 0xea 0 0 0
- 0xeb 0 0 0
- 0xec 0 0 0
- 0xed 0 0 0
- 0xee 0 0 0
- 0xef 0 0 0>;
- };
-
- msi2: msi@41a00 {
- compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xf0 0 0 0
- 0xf1 0 0 0
- 0xf2 0 0 0
- 0xf3 0 0 0
- 0xf4 0 0 0
- 0xf5 0 0 0
- 0xf6 0 0 0
- 0xf7 0 0 0>;
- };
-
- rmu: rmu@d3000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,srio-rmu";
- reg = <0xd3000 0x500>;
- ranges = <0x0 0xd3000 0x500>;
-
- message-unit@0 {
- compatible = "fsl,srio-msg-unit";
- reg = <0x0 0x100>;
- interrupts = <
- 60 2 0 0 /* msg1_tx_irq */
- 61 2 0 0>;/* msg1_rx_irq */
- };
- message-unit@100 {
- compatible = "fsl,srio-msg-unit";
- reg = <0x100 0x100>;
- interrupts = <
- 62 2 0 0 /* msg2_tx_irq */
- 63 2 0 0>;/* msg2_rx_irq */
- };
- doorbell-unit@400 {
- compatible = "fsl,srio-dbell-unit";
- reg = <0x400 0x80>;
- interrupts = <
- 56 2 0 0 /* bell_outb_irq */
- 57 2 0 0>;/* bell_inb_irq */
- };
- port-write-unit@4e0 {
- compatible = "fsl,srio-port-write-unit";
- reg = <0x4e0 0x20>;
- interrupts = <16 2 1 11>;
- };
- };
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p3060-serdes";
- reg = <0xea000 0x1000>;
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p3060-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <31 2 0 0>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p3060-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <32 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <33 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <34 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p3060-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <35 2 0 0>;
- };
- };
-
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p3060-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2 0 0>;
- fsl,espi-num-chipselects = <4>;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p3060-gpio", "fsl,qoriq-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2 0 0>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- usb0: usb@210000 {
- compatible = "fsl,p3060-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x2 0 0>;
- };
-
- usb1: usb@211000 {
- compatible = "fsl,p3060-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <45 0x2 0 0>;
- };
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.1", "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupt-parent = <&mpic>;
- interrupts = <92 2 0 0>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <88 2 0 0>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <89 2 0 0>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <90 2 0 0>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.1-job-ring", "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <91 2 0 0>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.1-rtic", "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.1-rtic-memory", "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.1-mon", "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <93 2 0 0>;
- };
- };
-
- rapidio@ffe0c0000 {
- compatible = "fsl,srio";
- interrupts = <16 2 1 11>;
- #address-cells = <2>;
- #size-cells = <2>;
- fsl,srio-rmu-handle = <&rmu>;
- ranges;
-
- port1 {
- #address-cells = <2>;
- #size-cells = <2>;
- cell-index = <1>;
- };
-
- port2 {
- #address-cells = <2>;
- #size-cells = <2>;
- cell-index = <2>;
- };
- };
-
- localbus@ffe124000 {
- compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
- };
-
- pci0: pcie@ffe200000 {
- compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <33333333>;
- fsl,msi = <&msi0>;
- interrupts = <16 2 1 15>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
- };
-
- pci1: pcie@ffe201000 {
- compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <33333333>;
- fsl,msi = <&msi1>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index c7916dc2801..6d60e54e50a 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/include/ "p4080si.dtsi"
+/include/ "fsl/p4080si-pre.dtsi"
/ {
model = "fsl,P4080DS";
@@ -50,6 +50,9 @@
};
soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
spi@110000 {
flash@0 {
#address-cells = <1>;
@@ -105,12 +108,18 @@
};
};
- rapidio0: rapidio@ffe0c0000 {
- reg = <0xf 0xfe0c0000 0 0x20000>;
- ranges = <0 0 0xc 0x20000000 0 0x01000000>;
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
};
- localbus@ffe124000 {
+ lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
3 0 0xf 0xffdf0000 0x00008000>;
@@ -132,6 +141,7 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -147,6 +157,7 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -162,6 +173,7 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -174,3 +186,5 @@
};
};
+
+/include/ "fsl/p4080si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p4080si.dtsi b/arch/powerpc/boot/dts/p4080si.dtsi
deleted file mode 100644
index f20c01ab247..00000000000
--- a/arch/powerpc/boot/dts/p4080si.dtsi
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * P4080 Silicon Device Tree Source
- *
- * Copyright 2009-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
- compatible = "fsl,P4080";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- sdhc = &sdhc;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
-
- rio0 = &rapidio0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e500mc@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e500mc@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu2: PowerPC,e500mc@2 {
- device_type = "cpu";
- reg = <2>;
- next-level-cache = <&L2_2>;
- L2_2: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu3: PowerPC,e500mc@3 {
- device_type = "cpu";
- reg = <3>;
- next-level-cache = <&L2_3>;
- L2_3: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu4: PowerPC,e500mc@4 {
- device_type = "cpu";
- reg = <4>;
- next-level-cache = <&L2_4>;
- L2_4: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu5: PowerPC,e500mc@5 {
- device_type = "cpu";
- reg = <5>;
- next-level-cache = <&L2_5>;
- L2_5: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu6: PowerPC,e500mc@6 {
- device_type = "cpu";
- reg = <6>;
- next-level-cache = <&L2_6>;
- L2_6: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu7: PowerPC,e500mc@7 {
- device_type = "cpu";
- reg = <7>;
- next-level-cache = <&L2_7>;
- L2_7: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- };
-
- dcsr: dcsr@f00000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- interrupt-parent = <&mpic>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p4080-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p4080-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr1>;
- reg = <0x12000 0x1000>;
- };
- dcsr-ddr@13000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr2>;
- reg = <0x13000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p4080-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p4080-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- dcsr-cpu-sb-proxy@42000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu2>;
- reg = <0x42000 0x1000>;
- };
- dcsr-cpu-sb-proxy@43000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu3>;
- reg = <0x43000 0x1000>;
- };
- dcsr-cpu-sb-proxy@44000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu4>;
- reg = <0x44000 0x1000>;
- };
- dcsr-cpu-sb-proxy@45000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu5>;
- reg = <0x45000 0x1000>;
- };
- dcsr-cpu-sb-proxy@46000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu6>;
- reg = <0x46000 0x1000>;
- };
- dcsr-cpu-sb-proxy@47000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu7>;
- reg = <0x47000 0x1000>;
- };
- };
-
- soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- ddr2: memory-controller@9000 {
- compatible = "fsl,qoriq-memory-controller-v4.4","fsl,qoriq-memory-controller";
- reg = <0x9000 0x1000>;
- interrupts = <16 2 1 22>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p4080-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 27
- 16 2 1 26>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x5000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic", "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi0: msi@41600 {
- compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- msi1: msi@41800 {
- compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe8 0 0 0
- 0xe9 0 0 0
- 0xea 0 0 0
- 0xeb 0 0 0
- 0xec 0 0 0
- 0xed 0 0 0
- 0xee 0 0 0
- 0xef 0 0 0>;
- };
-
- msi2: msi@41a00 {
- compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xf0 0 0 0
- 0xf1 0 0 0
- 0xf2 0 0 0
- 0xf3 0 0 0
- 0xf4 0 0 0
- 0xf5 0 0 0
- 0xf6 0 0 0
- 0xf7 0 0 0>;
- };
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p4080-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p4080-serdes";
- reg = <0xea000 0x1000>;
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <31 2 0 0>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <32 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <33 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <34 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p4080-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <35 2 0 0>;
- };
- };
-
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p4080-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2 0 0>;
- fsl,espi-num-chipselects = <4>;
- };
-
- sdhc: sdhc@114000 {
- compatible = "fsl,p4080-esdhc", "fsl,esdhc";
- reg = <0x114000 0x1000>;
- interrupts = <48 2 0 0>;
- voltage-ranges = <3300 3300>;
- sdhci,auto-cmd12;
- clock-frequency = <0>;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p4080-gpio", "fsl,qoriq-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2 0 0>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- usb0: usb@210000 {
- compatible = "fsl,p4080-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x2 0 0>;
- };
-
- usb1: usb@211000 {
- compatible = "fsl,p4080-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <45 0x2 0 0>;
- };
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupt-parent = <&mpic>;
- interrupts = <92 2 0 0>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <88 2 0 0>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <89 2 0 0>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <90 2 0 0>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <91 2 0 0>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <93 2 0 0>;
- };
- };
-
- rapidio0: rapidio@ffe0c0000 {
- #address-cells = <2>;
- #size-cells = <2>;
- compatible = "fsl,rapidio-delta";
- interrupts = <
- 16 2 1 11 /* err_irq */
- 56 2 0 0 /* bell_outb_irq */
- 57 2 0 0 /* bell_inb_irq */
- 60 2 0 0 /* msg1_tx_irq */
- 61 2 0 0 /* msg1_rx_irq */
- 62 2 0 0 /* msg2_tx_irq */
- 63 2 0 0>; /* msg2_rx_irq */
- };
-
- localbus@ffe124000 {
- compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
- };
-
- pci0: pcie@ffe200000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi0>;
- interrupts = <16 2 1 15>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
- };
-
- pci1: pcie@ffe201000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi1>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
- };
-
- pci2: pcie@ffe202000 {
- compatible = "fsl,p4080-pcie";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 13>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 13>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 42 1 0 0
- 0000 0 0 2 &mpic 9 1 0 0
- 0000 0 0 3 &mpic 10 1 0 0
- 0000 0 0 4 &mpic 11 1 0 0
- >;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index e6d40999ccd..1c250684c90 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/include/ "p5020si.dtsi"
+/include/ "fsl/p5020si-pre.dtsi"
/ {
model = "fsl,P5020DS";
@@ -50,6 +50,8 @@
};
soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
spi@110000 {
flash@0 {
#address-cells = <1>;
@@ -99,7 +101,18 @@
};
};
- localbus@ffe124000 {
+ rio: rapidio@ffe0c0000 {
+ reg = <0xf 0xfe0c0000 0 0x11000>;
+
+ port1 {
+ ranges = <0 0 0xc 0x20000000 0 0x10000000>;
+ };
+ port2 {
+ ranges = <0 0 0xc 0x30000000 0 0x10000000>;
+ };
+ };
+
+ lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
2 0 0xf 0xffa00000 0x00040000
@@ -160,7 +173,7 @@
reg = <0xf 0xfe200000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
-
+ fsl,msi = <&msi0>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -176,6 +189,7 @@
reg = <0xf 0xfe201000 0 0x1000>;
ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ fsl,msi = <&msi1>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -191,6 +205,7 @@
reg = <0xf 0xfe202000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -206,6 +221,7 @@
reg = <0xf 0xfe203000 0 0x1000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ fsl,msi = <&msi2>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
@@ -217,3 +233,5 @@
};
};
};
+
+/include/ "fsl/p5020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p5020si.dtsi b/arch/powerpc/boot/dts/p5020si.dtsi
deleted file mode 100644
index e7948ad71fa..00000000000
--- a/arch/powerpc/boot/dts/p5020si.dtsi
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * P5020 Silicon Device Tree Source
- *
- * Copyright 2010-2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/dts-v1/;
-
-/ {
- compatible = "fsl,P5020";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- pci3 = &pci3;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- sdhc = &sdhc;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
-
-/*
- rio0 = &rapidio0;
- */
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e5500@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e5500@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- };
-
- dcsr: dcsr@f00000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- interrupt-parent = <&mpic>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p5020-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p5020-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr1>;
- reg = <0x12000 0x1000>;
- };
- dcsr-ddr@13000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr2>;
- reg = <0x13000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p5020-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p5020-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- };
-
- soc: soc@ffe000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- ddr2: memory-controller@9000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x9000 0x1000>;
- interrupts = <16 2 1 22>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 27
- 16 2 1 26>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x4000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
- mpic: pic@40000 {
- clock-frequency = <0>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <4>;
- reg = <0x40000 0x40000>;
- compatible = "fsl,mpic", "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi0: msi@41600 {
- compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0 0 0
- 0xe1 0 0 0
- 0xe2 0 0 0
- 0xe3 0 0 0
- 0xe4 0 0 0
- 0xe5 0 0 0
- 0xe6 0 0 0
- 0xe7 0 0 0>;
- };
-
- msi1: msi@41800 {
- compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe8 0 0 0
- 0xe9 0 0 0
- 0xea 0 0 0
- 0xeb 0 0 0
- 0xec 0 0 0
- 0xed 0 0 0
- 0xee 0 0 0
- 0xef 0 0 0>;
- };
-
- msi2: msi@41a00 {
- compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xf0 0 0 0
- 0xf1 0 0 0
- 0xf2 0 0 0
- 0xf3 0 0 0
- 0xf4 0 0 0
- 0xf5 0 0 0
- 0xf6 0 0 0
- 0xf7 0 0 0>;
- };
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p5020-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p5020-serdes";
- reg = <0xea000 0x1000>;
- };
-
- dma0: dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
- reg = <0x100300 0x4>;
- ranges = <0x0 0x100100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <31 2 0 0>;
- };
- };
-
- dma1: dma@101300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,p5020-dma", "fsl,eloplus-dma";
- reg = <0x101300 0x4>;
- ranges = <0x0 0x101100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupts = <32 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupts = <33 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupts = <34 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,p5020-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupts = <35 2 0 0>;
- };
- };
-
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,p5020-espi", "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2 0 0>;
- fsl,espi-num-chipselects = <4>;
- };
-
- sdhc: sdhc@114000 {
- compatible = "fsl,p5020-esdhc", "fsl,esdhc";
- reg = <0x114000 0x1000>;
- interrupts = <48 2 0 0>;
- sdhci,auto-cmd12;
- clock-frequency = <0>;
- };
-
- i2c@118000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x118000 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@118100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x118100 0x100>;
- interrupts = <38 2 0 0>;
- dfsrr;
- };
-
- i2c@119000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <2>;
- compatible = "fsl-i2c";
- reg = <0x119000 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- i2c@119100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <3>;
- compatible = "fsl-i2c";
- reg = <0x119100 0x100>;
- interrupts = <39 2 0 0>;
- dfsrr;
- };
-
- serial0: serial@11c500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c500 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial1: serial@11c600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11c600 0x100>;
- clock-frequency = <0>;
- interrupts = <36 2 0 0>;
- };
-
- serial2: serial@11d500 {
- cell-index = <2>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d500 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- serial3: serial@11d600 {
- cell-index = <3>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x11d600 0x100>;
- clock-frequency = <0>;
- interrupts = <37 2 0 0>;
- };
-
- gpio0: gpio@130000 {
- compatible = "fsl,p5020-gpio", "fsl,qoriq-gpio";
- reg = <0x130000 0x1000>;
- interrupts = <55 2 0 0>;
- #gpio-cells = <2>;
- gpio-controller;
- };
-
- usb0: usb@210000 {
- compatible = "fsl,p5020-usb2-mph",
- "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- reg = <0x210000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <44 0x2 0 0>;
- phy_type = "utmi";
- port0;
- };
-
- usb1: usb@211000 {
- compatible = "fsl,p5020-usb2-dr",
- "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- reg = <0x211000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <45 0x2 0 0>;
- dr_mode = "host";
- phy_type = "utmi";
- };
-
- sata@220000 {
- compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
- reg = <0x220000 0x1000>;
- interrupts = <68 0x2 0 0>;
- };
-
- sata@221000 {
- compatible = "fsl,p5020-sata", "fsl,pq-sata-v2";
- reg = <0x221000 0x1000>;
- interrupts = <69 0x2 0 0>;
- };
-
- crypto: crypto@300000 {
- compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x300000 0x10000>;
- ranges = <0 0x300000 0x10000>;
- interrupts = <92 2 0 0>;
-
- sec_jr0: jr@1000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x1000 0x1000>;
- interrupts = <88 2 0 0>;
- };
-
- sec_jr1: jr@2000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x2000 0x1000>;
- interrupts = <89 2 0 0>;
- };
-
- sec_jr2: jr@3000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x3000 0x1000>;
- interrupts = <90 2 0 0>;
- };
-
- sec_jr3: jr@4000 {
- compatible = "fsl,sec-v4.2-job-ring",
- "fsl,sec-v4.0-job-ring";
- reg = <0x4000 0x1000>;
- interrupts = <91 2 0 0>;
- };
-
- rtic@6000 {
- compatible = "fsl,sec-v4.2-rtic",
- "fsl,sec-v4.0-rtic";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6000 0x100>;
- ranges = <0x0 0x6100 0xe00>;
-
- rtic_a: rtic-a@0 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x00 0x20 0x100 0x80>;
- };
-
- rtic_b: rtic-b@20 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x20 0x20 0x200 0x80>;
- };
-
- rtic_c: rtic-c@40 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x40 0x20 0x300 0x80>;
- };
-
- rtic_d: rtic-d@60 {
- compatible = "fsl,sec-v4.2-rtic-memory",
- "fsl,sec-v4.0-rtic-memory";
- reg = <0x60 0x20 0x500 0x80>;
- };
- };
- };
-
- sec_mon: sec_mon@314000 {
- compatible = "fsl,sec-v4.2-mon", "fsl,sec-v4.0-mon";
- reg = <0x314000 0x1000>;
- interrupts = <93 2 0 0>;
- };
- };
-
-/*
- rapidio0: rapidio@ffe0c0000
-*/
-
- localbus@ffe124000 {
- compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
- };
-
- pci0: pcie@ffe200000 {
- compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi0>;
- interrupts = <16 2 1 15>;
-
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
- };
-
- pci1: pcie@ffe201000 {
- compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi1>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
- };
-
- pci2: pcie@ffe202000 {
- compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 13>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 13>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 42 1 0 0
- 0000 0 0 2 &mpic 9 1 0 0
- 0000 0 0 3 &mpic 10 1 0 0
- 0000 0 0 4 &mpic 11 1 0 0
- >;
- };
- };
-
- pci3: pcie@ffe203000 {
- compatible = "fsl,p5020-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <0x1fca055>;
- fsl,msi = <&msi2>;
- interrupts = <16 2 1 12>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 12>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 43 1 0 0
- 0000 0 0 2 &mpic 0 1 0 0
- 0000 0 0 3 &mpic 4 1 0 0
- 0000 0 0 4 &mpic 8 1 0 0
- >;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
index 0dc90f9bd81..b1e45a8537a 100644
--- a/arch/powerpc/boot/dts/sbc8349.dts
+++ b/arch/powerpc/boot/dts/sbc8349.dts
@@ -222,7 +222,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <9 0x8>;
@@ -232,7 +232,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <10 0x8>;
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
index 94a33225171..77be77116c2 100644
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ b/arch/powerpc/boot/dts/sbc8548.dts
@@ -316,7 +316,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <0x2a 0x2>;
@@ -326,7 +326,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <0x2a 0x2>;
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index ee5538feb45..56bebce8784 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -347,7 +347,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -357,7 +357,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <28 2>;
diff --git a/arch/powerpc/boot/dts/socrates.dts b/arch/powerpc/boot/dts/socrates.dts
index 38c35404bdc..134a5ff917e 100644
--- a/arch/powerpc/boot/dts/socrates.dts
+++ b/arch/powerpc/boot/dts/socrates.dts
@@ -199,7 +199,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -209,7 +209,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/storcenter.dts b/arch/powerpc/boot/dts/storcenter.dts
index eab680ce10d..2a555738517 100644
--- a/arch/powerpc/boot/dts/storcenter.dts
+++ b/arch/powerpc/boot/dts/storcenter.dts
@@ -74,7 +74,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x20>;
clock-frequency = <97553800>; /* Hz */
current-speed = <115200>;
@@ -85,7 +85,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x20>;
clock-frequency = <97553800>; /* Hz */
current-speed = <9600>;
diff --git a/arch/powerpc/boot/dts/stxssa8555.dts b/arch/powerpc/boot/dts/stxssa8555.dts
index 49efd44057d..4f166b01c1b 100644
--- a/arch/powerpc/boot/dts/stxssa8555.dts
+++ b/arch/powerpc/boot/dts/stxssa8555.dts
@@ -210,7 +210,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -220,7 +220,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 0a4cedbdcb5..ed264d9ae35 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -250,7 +250,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -260,7 +260,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index f49d0918131..92524211581 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -224,7 +224,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -234,7 +234,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 9452c3c0511..6e1ac50852a 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -305,7 +305,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
current-speed = <115200>;
@@ -316,7 +316,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
current-speed = <115200>;
@@ -352,7 +352,7 @@
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
1 0x0 0xf8000000 0x08000000 // NOR FLASH bank 0
- 2 0x0 0xa3000000 0x00008000 // CAN (2 x i82527)
+ 2 0x0 0xa3000000 0x00008000 // CAN (2 x CC770)
3 0x0 0xa3010000 0x00008000 // NAND FLASH
>;
@@ -393,18 +393,27 @@
};
/* Note: CAN support needs be enabled in U-Boot */
- can0@2,0 {
- compatible = "intel,82527"; // Bosch CC770
+ can@2,0 {
+ compatible = "bosch,cc770"; // Bosch CC770
reg = <2 0x0 0x100>;
interrupts = <4 1>;
interrupt-parent = <&mpic>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
+ bosch,clock-out-frequency = <16000000>;
};
- can1@2,100 {
- compatible = "intel,82527"; // Bosch CC770
+ can@2,100 {
+ compatible = "bosch,cc770"; // Bosch CC770
reg = <2 0x100 0x100>;
interrupts = <4 1>;
interrupt-parent = <&mpic>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
};
/* Note: NAND support needs to be enabled in U-Boot */
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 619776f72c9..161e75eac7f 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -305,7 +305,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
current-speed = <115200>;
@@ -316,7 +316,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
current-speed = <115200>;
@@ -352,7 +352,7 @@
ranges = <
0 0x0 0xfc000000 0x04000000 // NOR FLASH bank 1
1 0x0 0xf8000000 0x08000000 // NOR FLASH bank 0
- 2 0x0 0xe3000000 0x00008000 // CAN (2 x i82527)
+ 2 0x0 0xe3000000 0x00008000 // CAN (2 x CC770)
3 0x0 0xe3010000 0x00008000 // NAND FLASH
>;
@@ -393,18 +393,27 @@
};
/* Note: CAN support needs be enabled in U-Boot */
- can0@2,0 {
- compatible = "intel,82527"; // Bosch CC770
+ can@2,0 {
+ compatible = "bosch,cc770"; // Bosch CC770
reg = <2 0x0 0x100>;
interrupts = <4 1>;
interrupt-parent = <&mpic>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
+ bosch,clock-out-frequency = <16000000>;
};
- can1@2,100 {
- compatible = "intel,82527"; // Bosch CC770
+ can@2,100 {
+ compatible = "bosch,cc770"; // Bosch CC770
reg = <2 0x100 0x100>;
interrupts = <4 1>;
interrupt-parent = <&mpic>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
};
/* Note: NAND support needs to be enabled in U-Boot */
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 81bad8cd375..aa6ff0d3dd9 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -224,7 +224,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
@@ -234,7 +234,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/tqm8xx.dts b/arch/powerpc/boot/dts/tqm8xx.dts
index f6da7ec49a8..c3dba2518d8 100644
--- a/arch/powerpc/boot/dts/tqm8xx.dts
+++ b/arch/powerpc/boot/dts/tqm8xx.dts
@@ -57,6 +57,7 @@
ranges = <
0x0 0x0 0x40000000 0x800000
+ 0x3 0x0 0xc0000000 0x200
>;
flash@0,0 {
@@ -67,6 +68,30 @@
bank-width = <4>;
device-width = <2>;
};
+
+ /* Note: CAN support needs be enabled in U-Boot */
+ can@3,0 {
+ compatible = "intc,82527";
+ reg = <3 0x0 0x80>;
+ interrupts = <8 1>;
+ interrupt-parent = <&PIC>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
+ bosch,clock-out-frequency = <16000000>;
+ };
+
+ can@3,100 {
+ compatible = "intc,82527";
+ reg = <3 0x100 0x80>;
+ interrupts = <8 1>;
+ interrupt-parent = <&PIC>;
+ bosch,external-clock-frequency = <16000000>;
+ bosch,disconnect-rx1-input;
+ bosch,disconnect-tx1-output;
+ bosch,iso-low-speed-mux;
+ };
};
soc@fff00000 {
diff --git a/arch/powerpc/boot/dts/xcalibur1501.dts b/arch/powerpc/boot/dts/xcalibur1501.dts
index ac0a617b429..cc00f4ddd9a 100644
--- a/arch/powerpc/boot/dts/xcalibur1501.dts
+++ b/arch/powerpc/boot/dts/xcalibur1501.dts
@@ -531,7 +531,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -542,7 +542,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index c41a80c55e4..8fd7b703135 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -333,7 +333,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
current-speed = <115200>;
@@ -344,7 +344,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
current-speed = <115200>;
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index c0efcbb4513..0baa8283d08 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -337,7 +337,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
current-speed = <9600>;
@@ -348,7 +348,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
current-speed = <9600>;
diff --git a/arch/powerpc/boot/dts/xpedite5301.dts b/arch/powerpc/boot/dts/xpedite5301.dts
index db7faf5ebb3..53c1c6a9752 100644
--- a/arch/powerpc/boot/dts/xpedite5301.dts
+++ b/arch/powerpc/boot/dts/xpedite5301.dts
@@ -441,7 +441,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -452,7 +452,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5330.dts b/arch/powerpc/boot/dts/xpedite5330.dts
index c364ca6ff7d..21522598315 100644
--- a/arch/powerpc/boot/dts/xpedite5330.dts
+++ b/arch/powerpc/boot/dts/xpedite5330.dts
@@ -477,7 +477,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -488,7 +488,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/dts/xpedite5370.dts b/arch/powerpc/boot/dts/xpedite5370.dts
index 7a8a4afd56c..11dbda10d75 100644
--- a/arch/powerpc/boot/dts/xpedite5370.dts
+++ b/arch/powerpc/boot/dts/xpedite5370.dts
@@ -439,7 +439,7 @@
serial0: serial@4500 {
cell-index = <0>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
@@ -450,7 +450,7 @@
serial1: serial@4600 {
cell-index = <1>;
device_type = "serial";
- compatible = "ns16550";
+ compatible = "fsl,ns16550", "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
interrupts = <42 2>;
diff --git a/arch/powerpc/boot/treeboot-currituck.c b/arch/powerpc/boot/treeboot-currituck.c
new file mode 100644
index 00000000000..925ae43b746
--- /dev/null
+++ b/arch/powerpc/boot/treeboot-currituck.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright Š 2011 Tony Breeds IBM Corporation
+ *
+ * Based on earlier code:
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ * Copyright 2007 David Gibson, IBM Corporation.
+ * Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ * Copyright Š 2011 David Kleikamp IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+#define MAX_RANKS 0x4
+#define DDR3_MR0CF 0x80010011U
+
+static unsigned long long ibm_currituck_memsize;
+static unsigned long long ibm_currituck_detect_memsize(void)
+{
+ u32 reg;
+ unsigned i;
+ unsigned long long memsize = 0;
+
+ for(i = 0; i < MAX_RANKS; i++){
+ reg = mfdcrx(DDR3_MR0CF + i);
+
+ if (!(reg & 1))
+ continue;
+
+ reg &= 0x0000f000;
+ reg >>= 12;
+ memsize += (0x800000ULL << reg);
+ }
+
+ return memsize;
+}
+
+static void ibm_currituck_fixups(void)
+{
+ void *devp = finddevice("/");
+ u32 dma_ranges[7];
+
+ dt_fixup_memory(0x0ULL, ibm_currituck_memsize);
+
+ while ((devp = find_node_by_devtype(devp, "pci"))) {
+ if (getprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges)) < 0) {
+ printf("%s: Failed to get dma-ranges\r\n", __func__);
+ continue;
+ }
+
+ dma_ranges[5] = ibm_currituck_memsize >> 32;
+ dma_ranges[6] = ibm_currituck_memsize & 0xffffffffUL;
+
+ setprop(devp, "dma-ranges", dma_ranges, sizeof(dma_ranges));
+ }
+}
+
+#define SPRN_PIR 0x11E /* Processor Indentification Register */
+void platform_init(void)
+{
+ unsigned long end_of_ram, avail_ram;
+ u32 pir_reg;
+ int node, size;
+ const u32 *timebase;
+
+ ibm_currituck_memsize = ibm_currituck_detect_memsize();
+ if (ibm_currituck_memsize >> 32)
+ end_of_ram = ~0UL;
+ else
+ end_of_ram = ibm_currituck_memsize;
+ avail_ram = end_of_ram - (unsigned long)_end;
+
+ simple_alloc_init(_end, avail_ram, 128, 64);
+ platform_ops.fixups = ibm_currituck_fixups;
+ platform_ops.exit = ibm44x_dbcr_reset;
+ pir_reg = mfspr(SPRN_PIR);
+
+ /* Make sure FDT blob is sane */
+ if (fdt_check_header(_dtb_start) != 0)
+ fatal("Invalid device tree blob\n");
+
+ node = fdt_node_offset_by_prop_value(_dtb_start, -1, "device_type",
+ "cpu", sizeof("cpu"));
+ if (!node)
+ fatal("Cannot find cpu node\n");
+ timebase = fdt_getprop(_dtb_start, node, "timebase-frequency", &size);
+ if (timebase && (size == 4))
+ timebase_period_ns = 1000000000 / *timebase;
+
+ fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+ fdt_init(_dtb_start);
+
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index c74531af72c..f090e6d2907 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -163,7 +163,7 @@ coff)
link_address='0x500000'
pie=
;;
-miboot|uboot)
+miboot|uboot*)
# miboot and U-boot want just the bare bits, not an ELF binary
ext=bin
objflags="-O binary"
@@ -244,6 +244,9 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+treeboot-currituck)
+ link_address='0x1000000'
+ ;;
treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
@@ -257,6 +260,8 @@ vmz="$tmpdir/`basename \"$kernel\"`.$ext"
if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
+ strip_size=$(stat -c %s $vmz.$$)
+
if [ -n "$gzip" ]; then
gzip -n -f -9 "$vmz.$$"
fi
@@ -266,6 +271,24 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
else
vmz="$vmz.$$"
fi
+else
+ # Calculate the vmlinux.strip size
+ ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
+ strip_size=$(stat -c %s $vmz.$$)
+ rm -f $vmz.$$
+fi
+
+# Round the size to next higher MB limit
+round_size=$(((strip_size + 0xfffff) & 0xfff00000))
+
+round_size=0x$(printf "%x" $round_size)
+link_addr=$(printf "%d" $link_address)
+
+if [ $link_addr -lt $strip_size ]; then
+ echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \
+ "overlaps the address of the wrapper($link_address)"
+ echo "INFO: Fixing the link_address of wrapper to ($round_size)"
+ link_address=$round_size
fi
vmz="$vmz$gzip"
@@ -291,6 +314,26 @@ uboot)
fi
exit 0
;;
+uboot-obs600)
+ rm -f "$ofile"
+ # obs600 wants a multi image with an initrd, so we need to put a fake
+ # one in even when building a "normal" image.
+ if [ -n "$initrd" ]; then
+ real_rd="$initrd"
+ else
+ real_rd=`mktemp`
+ echo "\0" >>"$real_rd"
+ fi
+ ${MKIMAGE} -A ppc -O linux -T multi -C gzip -a $membase -e $membase \
+ $uboot_version -d "$vmz":"$real_rd":"$dtb" "$ofile"
+ if [ -z "$initrd" ]; then
+ rm -f "$real_rd"
+ fi
+ if [ -z "$cacheit" ]; then
+ rm -f "$vmz"
+ fi
+ exit 0
+ ;;
esac
addsec() {
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
new file mode 100644
index 00000000000..c0d228dc73d
--- /dev/null
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -0,0 +1,55 @@
+CONFIG_40x=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_WALNUT is not set
+CONFIG_APM8018X=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_MIGRATION is not set
+# CONFIG_SUSPEND is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_SAS_ATTRS=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_AVERAGE=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_FTRACE is not set
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
new file mode 100644
index 00000000000..91c110dad2d
--- /dev/null
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -0,0 +1,83 @@
+CONFIG_40x=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_WALNUT is not set
+CONFIG_OBS600=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MATH_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_NDFC=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+CONFIG_NETDEVICES=y
+CONFIG_IBM_EMAC=y
+CONFIG_IBM_EMAC_RXB=256
+CONFIG_IBM_EMAC_TXB=256
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IBM_IIC=y
+CONFIG_SENSORS_LM75=y
+CONFIG_THERMAL=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_EXT2_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
new file mode 100644
index 00000000000..4192322f8a7
--- /dev/null
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -0,0 +1,110 @@
+CONFIG_44x=y
+CONFIG_SMP=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PPC_47x=y
+# CONFIG_EBONY is not set
+CONFIG_CURRITUCK=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+CONFIG_MATH_EMULATION=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_SIL24=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_E1000E=y
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_IBM_IIC=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NLS_DEFAULT="n"
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x10000000
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x200
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index a6eb6ad05b2..ca00cf750d3 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -25,7 +25,8 @@ CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="root=/dev/issblk0"
# CONFIG_PCI is not set
CONFIG_ADVANCED_OPTIONS=y
-CONFIG_RELOCATABLE=y
+CONFIG_NONSTATIC_KERNEL=y
+CONFIG_DYNAMIC_MEMSTART=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
new file mode 100644
index 00000000000..acf7fb28046
--- /dev/null
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -0,0 +1,307 @@
+CONFIG_PPC64=y
+CONFIG_PPC_BOOK3E_64=y
+# CONFIG_VIRT_CPU_ACCOUNTING is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=256
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=19
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_COUNTERS=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_SCOM_DEBUGFS=y
+CONFIG_PPC_A2_DD2=y
+CONFIG_KVM_GUEST=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NUMA=y
+# CONFIG_MIGRATION is not set
+CONFIG_PPC_64K_PAGES=y
+CONFIG_SCHED_SMT=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_SECCOMP is not set
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NET_TCPPROBE=y
+# CONFIG_WIRELESS is not set
+CONFIG_NET_9P=y
+CONFIG_NET_9P_DEBUG=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_LE_BYTE_SWAP=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_MISC_DEVICES=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=m
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_MV=y
+CONFIG_SATA_SIL=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_MARVELL=y
+CONFIG_PATA_SIL680=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_E1000E=y
+CONFIG_TIGON3=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=1024
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1511=y
+CONFIG_RTC_DRV_DS1553=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=m
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_PPC_EMULATED_STATS=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+CONFIG_VIRQ_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index f087de6ec03..f8aef205d22 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -37,6 +37,8 @@ CONFIG_FSL_LBC=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
+CONFIG_RAPIDIO=y
+CONFIG_FSL_RIO=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -94,17 +96,17 @@ CONFIG_SATA_SIL24=y
CONFIG_SATA_SIL=y
CONFIG_PATA_SIL680=y
CONFIG_NETDEVICES=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_FSL_PQ_MDIO=y
CONFIG_E1000=y
CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
+CONFIG_VITESSE_PHY=y
+CONFIG_FIXED_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
@@ -155,6 +157,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 782822c32d1..7ed8d4cf271 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -23,6 +23,8 @@ CONFIG_P5020_DS=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
+CONFIG_RAPIDIO=y
+CONFIG_FSL_RIO=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -57,7 +59,6 @@ CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -81,6 +82,7 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index a1e5a178a4a..f37a2ab4888 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,5 +1,4 @@
CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -93,15 +92,14 @@ CONFIG_SATA_FSL=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -120,6 +118,9 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
@@ -163,6 +164,10 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
@@ -182,6 +187,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -213,4 +219,5 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index dd1e41386c4..abdcd317cda 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -1,5 +1,4 @@
CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_EXPERIMENTAL=y
@@ -26,6 +25,7 @@ CONFIG_MPC85xx_MDS=y
CONFIG_MPC8536_DS=y
CONFIG_MPC85xx_DS=y
CONFIG_MPC85xx_RDB=y
+CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
@@ -94,15 +94,14 @@ CONFIG_SATA_FSL=y
CONFIG_PATA_ALI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -121,6 +120,9 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
@@ -164,6 +166,10 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
@@ -183,6 +189,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
CONFIG_ADFS_FS=m
CONFIG_AFFS_FS=m
CONFIG_HFS_FS=m
@@ -214,4 +221,5 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 6cdf1c0d2c8..3b98d735434 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -52,6 +52,8 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_NDFC=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 535711fcb13..2156e077859 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -390,6 +390,11 @@ CONFIG_HUGETLBFS=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 185c292b0f1..ded867871e9 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -6,10 +6,10 @@ CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_NAMESPACES=y
+CONFIG_SPARSE_IRQ=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
# CONFIG_PERF_EVENTS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
@@ -17,6 +17,7 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_PS3=y
@@ -27,14 +28,14 @@ CONFIG_PS3_VRAM=m
CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_KEXEC=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
-CONFIG_PM=y
+CONFIG_PM_RUNTIME=y
CONFIG_PM_DEBUG=y
# CONFIG_SECCOMP is not set
# CONFIG_PCI is not set
@@ -81,20 +82,23 @@ CONFIG_SCSI_MULTI_LUN=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_GELIC_NET=y
CONFIG_GELIC_WIRELESS=y
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_USB_USBNET=m
# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
@@ -135,22 +139,21 @@ CONFIG_USB=m
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PS3=m
+CONFIG_RTC_DRV_PS3=y
+# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
-CONFIG_INOTIFY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
@@ -167,19 +170,17 @@ CONFIG_CIFS=m
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PROVE_LOCKING=y
CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index a72f2415a64..30e7d0d20e4 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -304,6 +304,11 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d51df17c7e6..7e313f1ed18 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -34,3 +34,5 @@ header-y += termios.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
+
+generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index e30442c539c..ad55a1ccb9f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -201,6 +201,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
+#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000)
#ifndef __ASSEMBLY__
@@ -425,7 +426,7 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -437,7 +438,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
- CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX)
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 1cf20bdfbec..487d46ff68a 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/time.h>
#include <asm/param.h>
-typedef u64 cputime_t;
-typedef u64 cputime64_t;
-
-#define cputime_zero ((cputime_t)0)
-#define cputime_max ((~((cputime_t)0) >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-
-#define cputime64_zero ((cputime64_t)0)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime_to_cputime64(__ct) (__ct)
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
#ifdef __KERNEL__
@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/* Estimate the scaled cputime by scaling the real cputime based on
@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct)
{
if (cpu_has_feature(CPU_FTR_SPURR) &&
__get_cpu_var(cputime_last_delta))
- return ct * __get_cpu_var(cputime_scaled_last_delta) /
- __get_cpu_var(cputime_last_delta);
+ return (__force u64) ct *
+ __get_cpu_var(cputime_scaled_last_delta) /
+ __get_cpu_var(cputime_last_delta);
return ct;
}
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
static inline void setup_cputime_one_jiffy(void)
@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
- cputime_t ct;
+ u64 ct;
u64 sec;
/* have to be a little careful about overflow */
@@ -114,28 +98,28 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
do_div(ct, HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime64_t) ct;
}
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/*
* Convert cputime <-> microseconds
*/
-extern u64 __cputime_msec_factor;
+extern u64 __cputime_usec_factor;
static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+ return mulhdu((__force u64) ct, __cputime_usec_factor);
}
static inline cputime_t usecs_to_cputime(const unsigned long us)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -143,13 +127,15 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
- do_div(ct, 1000);
+ do_div(ct, 1000000);
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
@@ -157,12 +143,12 @@ extern u64 __cputime_sec_factor;
static inline unsigned long cputime_to_secs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_sec_factor);
+ return mulhdu((__force u64) ct, __cputime_sec_factor);
}
static inline cputime_t secs_to_cputime(const unsigned long sec)
{
- return (cputime_t) sec * tb_ticks_per_sec;
+ return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
}
/*
@@ -170,7 +156,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec)
*/
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
@@ -182,11 +168,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
static inline cputime_t timespec_to_cputime(const struct timespec *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
do_div(ct, 1000000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
@@ -194,7 +180,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p)
*/
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
@@ -206,11 +192,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
static inline cputime_t timeval_to_cputime(const struct timeval *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_usec * tb_ticks_per_sec;
do_div(ct, 1000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
@@ -220,12 +206,12 @@ extern u64 __cputime_clockt_factor;
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
{
- return mulhdu(ct, __cputime_clockt_factor);
+ return mulhdu((__force u64) ct, __cputime_clockt_factor);
}
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
@@ -236,8 +222,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
do_div(ct, USER_HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime_t) ct;
}
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
new file mode 100644
index 00000000000..b955012939a
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -0,0 +1,834 @@
+/* Freescale Integrated Flash Controller
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <dipen.dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_FSL_IFC_H
+#define __ASM_FSL_IFC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+
+#define FSL_IFC_BANK_COUNT 4
+
+/*
+ * CSPR - Chip Select Property Register
+ */
+#define CSPR_BA 0xFFFF0000
+#define CSPR_BA_SHIFT 16
+#define CSPR_PORT_SIZE 0x00000180
+#define CSPR_PORT_SIZE_SHIFT 7
+/* Port Size 8 bit */
+#define CSPR_PORT_SIZE_8 0x00000080
+/* Port Size 16 bit */
+#define CSPR_PORT_SIZE_16 0x00000100
+/* Port Size 32 bit */
+#define CSPR_PORT_SIZE_32 0x00000180
+/* Write Protect */
+#define CSPR_WP 0x00000040
+#define CSPR_WP_SHIFT 6
+/* Machine Select */
+#define CSPR_MSEL 0x00000006
+#define CSPR_MSEL_SHIFT 1
+/* NOR */
+#define CSPR_MSEL_NOR 0x00000000
+/* NAND */
+#define CSPR_MSEL_NAND 0x00000002
+/* GPCM */
+#define CSPR_MSEL_GPCM 0x00000004
+/* Bank Valid */
+#define CSPR_V 0x00000001
+#define CSPR_V_SHIFT 0
+
+/*
+ * Address Mask Register
+ */
+#define IFC_AMASK_MASK 0xFFFF0000
+#define IFC_AMASK_SHIFT 16
+#define IFC_AMASK(n) (IFC_AMASK_MASK << \
+ (__ilog2(n) - IFC_AMASK_SHIFT))
+
+/*
+ * Chip Select Option Register IFC_NAND Machine
+ */
+/* Enable ECC Encoder */
+#define CSOR_NAND_ECC_ENC_EN 0x80000000
+#define CSOR_NAND_ECC_MODE_MASK 0x30000000
+/* 4 bit correction per 520 Byte sector */
+#define CSOR_NAND_ECC_MODE_4 0x00000000
+/* 8 bit correction per 528 Byte sector */
+#define CSOR_NAND_ECC_MODE_8 0x10000000
+/* Enable ECC Decoder */
+#define CSOR_NAND_ECC_DEC_EN 0x04000000
+/* Row Address Length */
+#define CSOR_NAND_RAL_MASK 0x01800000
+#define CSOR_NAND_RAL_SHIFT 20
+#define CSOR_NAND_RAL_1 0x00000000
+#define CSOR_NAND_RAL_2 0x00800000
+#define CSOR_NAND_RAL_3 0x01000000
+#define CSOR_NAND_RAL_4 0x01800000
+/* Page Size 512b, 2k, 4k */
+#define CSOR_NAND_PGS_MASK 0x00180000
+#define CSOR_NAND_PGS_SHIFT 16
+#define CSOR_NAND_PGS_512 0x00000000
+#define CSOR_NAND_PGS_2K 0x00080000
+#define CSOR_NAND_PGS_4K 0x00100000
+/* Spare region Size */
+#define CSOR_NAND_SPRZ_MASK 0x0000E000
+#define CSOR_NAND_SPRZ_SHIFT 13
+#define CSOR_NAND_SPRZ_16 0x00000000
+#define CSOR_NAND_SPRZ_64 0x00002000
+#define CSOR_NAND_SPRZ_128 0x00004000
+#define CSOR_NAND_SPRZ_210 0x00006000
+#define CSOR_NAND_SPRZ_218 0x00008000
+#define CSOR_NAND_SPRZ_224 0x0000A000
+/* Pages Per Block */
+#define CSOR_NAND_PB_MASK 0x00000700
+#define CSOR_NAND_PB_SHIFT 8
+#define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NAND_TRHZ_MASK 0x0000001C
+#define CSOR_NAND_TRHZ_SHIFT 2
+#define CSOR_NAND_TRHZ_20 0x00000000
+#define CSOR_NAND_TRHZ_40 0x00000004
+#define CSOR_NAND_TRHZ_60 0x00000008
+#define CSOR_NAND_TRHZ_80 0x0000000C
+#define CSOR_NAND_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_NAND_BCTLD 0x00000001
+
+/*
+ * Chip Select Option Register - NOR Flash Mode
+ */
+/* Enable Address shift Mode */
+#define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000
+/* Page Read Enable from NOR device */
+#define CSOR_NOR_PGRD_EN 0x10000000
+/* AVD Toggle Enable during Burst Program */
+#define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000
+/* Address Data Multiplexing Shift */
+#define CSOR_NOR_ADM_MASK 0x0003E000
+#define CSOR_NOR_ADM_SHIFT_SHIFT 13
+#define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT)
+/* Type of the NOR device hooked */
+#define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000
+#define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NOR_TRHZ_MASK 0x0000001C
+#define CSOR_NOR_TRHZ_SHIFT 2
+#define CSOR_NOR_TRHZ_20 0x00000000
+#define CSOR_NOR_TRHZ_40 0x00000004
+#define CSOR_NOR_TRHZ_60 0x00000008
+#define CSOR_NOR_TRHZ_80 0x0000000C
+#define CSOR_NOR_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_NOR_BCTLD 0x00000001
+
+/*
+ * Chip Select Option Register - GPCM Mode
+ */
+/* GPCM Mode - Normal */
+#define CSOR_GPCM_GPMODE_NORMAL 0x00000000
+/* GPCM Mode - GenericASIC */
+#define CSOR_GPCM_GPMODE_ASIC 0x80000000
+/* Parity Mode odd/even */
+#define CSOR_GPCM_PARITY_EVEN 0x40000000
+/* Parity Checking enable/disable */
+#define CSOR_GPCM_PAR_EN 0x20000000
+/* GPCM Timeout Count */
+#define CSOR_GPCM_GPTO_MASK 0x0F000000
+#define CSOR_GPCM_GPTO_SHIFT 24
+#define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT)
+/* GPCM External Access Termination mode for read access */
+#define CSOR_GPCM_RGETA_EXT 0x00080000
+/* GPCM External Access Termination mode for write access */
+#define CSOR_GPCM_WGETA_EXT 0x00040000
+/* Address Data Multiplexing Shift */
+#define CSOR_GPCM_ADM_MASK 0x0003E000
+#define CSOR_GPCM_ADM_SHIFT_SHIFT 13
+#define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT)
+/* Generic ASIC Parity error indication delay */
+#define CSOR_GPCM_GAPERRD_MASK 0x00000180
+#define CSOR_GPCM_GAPERRD_SHIFT 7
+#define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_GPCM_TRHZ_MASK 0x0000001C
+#define CSOR_GPCM_TRHZ_20 0x00000000
+#define CSOR_GPCM_TRHZ_40 0x00000004
+#define CSOR_GPCM_TRHZ_60 0x00000008
+#define CSOR_GPCM_TRHZ_80 0x0000000C
+#define CSOR_GPCM_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_GPCM_BCTLD 0x00000001
+
+/*
+ * Ready Busy Status Register (RB_STAT)
+ */
+/* CSn is READY */
+#define IFC_RB_STAT_READY_CS0 0x80000000
+#define IFC_RB_STAT_READY_CS1 0x40000000
+#define IFC_RB_STAT_READY_CS2 0x20000000
+#define IFC_RB_STAT_READY_CS3 0x10000000
+
+/*
+ * General Control Register (GCR)
+ */
+#define IFC_GCR_MASK 0x8000F800
+/* reset all IFC hardware */
+#define IFC_GCR_SOFT_RST_ALL 0x80000000
+/* Turnaroud Time of external buffer */
+#define IFC_GCR_TBCTL_TRN_TIME 0x0000F800
+#define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11
+
+/*
+ * Common Event and Error Status Register (CM_EVTER_STAT)
+ */
+/* Chip select error */
+#define IFC_CM_EVTER_STAT_CSER 0x80000000
+
+/*
+ * Common Event and Error Enable Register (CM_EVTER_EN)
+ */
+/* Chip select error checking enable */
+#define IFC_CM_EVTER_EN_CSEREN 0x80000000
+
+/*
+ * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN)
+ */
+/* Chip select error interrupt enable */
+#define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000
+
+/*
+ * Common Transfer Error Attribute Register-0 (CM_ERATTR0)
+ */
+/* transaction type of error Read/Write */
+#define IFC_CM_ERATTR0_ERTYP_READ 0x80000000
+#define IFC_CM_ERATTR0_ERAID 0x0FF00000
+#define IFC_CM_ERATTR0_ERAID_SHIFT 20
+#define IFC_CM_ERATTR0_ESRCID 0x0000FF00
+#define IFC_CM_ERATTR0_ESRCID_SHIFT 8
+
+/*
+ * Clock Control Register (CCR)
+ */
+#define IFC_CCR_MASK 0x0F0F8800
+/* Clock division ratio */
+#define IFC_CCR_CLK_DIV_MASK 0x0F000000
+#define IFC_CCR_CLK_DIV_SHIFT 24
+#define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT)
+/* IFC Clock Delay */
+#define IFC_CCR_CLK_DLY_MASK 0x000F0000
+#define IFC_CCR_CLK_DLY_SHIFT 16
+#define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT)
+/* Invert IFC clock before sending out */
+#define IFC_CCR_INV_CLK_EN 0x00008000
+/* Fedback IFC Clock */
+#define IFC_CCR_FB_IFC_CLK_SEL 0x00000800
+
+/*
+ * Clock Status Register (CSR)
+ */
+/* Clk is stable */
+#define IFC_CSR_CLK_STAT_STABLE 0x80000000
+
+/*
+ * IFC_NAND Machine Specific Registers
+ */
+/*
+ * NAND Configuration Register (NCFGR)
+ */
+/* Auto Boot Mode */
+#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* Addressing Mode-ROW0+n/COL0 */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
+/* Addressing Mode-ROW0+n/COL0+n */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000
+/* Number of loop iterations of FIR sequences for multi page operations */
+#define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000
+#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12
+#define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT)
+/* Number of wait cycles */
+#define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF
+#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0
+
+/*
+ * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1)
+ */
+/* General purpose FCM flash command bytes CMD0-CMD7 */
+#define IFC_NAND_FCR0_CMD0 0xFF000000
+#define IFC_NAND_FCR0_CMD0_SHIFT 24
+#define IFC_NAND_FCR0_CMD1 0x00FF0000
+#define IFC_NAND_FCR0_CMD1_SHIFT 16
+#define IFC_NAND_FCR0_CMD2 0x0000FF00
+#define IFC_NAND_FCR0_CMD2_SHIFT 8
+#define IFC_NAND_FCR0_CMD3 0x000000FF
+#define IFC_NAND_FCR0_CMD3_SHIFT 0
+#define IFC_NAND_FCR1_CMD4 0xFF000000
+#define IFC_NAND_FCR1_CMD4_SHIFT 24
+#define IFC_NAND_FCR1_CMD5 0x00FF0000
+#define IFC_NAND_FCR1_CMD5_SHIFT 16
+#define IFC_NAND_FCR1_CMD6 0x0000FF00
+#define IFC_NAND_FCR1_CMD6_SHIFT 8
+#define IFC_NAND_FCR1_CMD7 0x000000FF
+#define IFC_NAND_FCR1_CMD7_SHIFT 0
+
+/*
+ * Flash ROW and COL Address Register (ROWn, COLn)
+ */
+/* Main/spare region locator */
+#define IFC_NAND_COL_MS 0x80000000
+/* Column Address */
+#define IFC_NAND_COL_CA_MASK 0x00000FFF
+
+/*
+ * NAND Flash Byte Count Register (NAND_BC)
+ */
+/* Byte Count for read/Write */
+#define IFC_NAND_BC 0x000001FF
+
+/*
+ * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2)
+ */
+/* NAND Machine specific opcodes OP0-OP14*/
+#define IFC_NAND_FIR0_OP0 0xFC000000
+#define IFC_NAND_FIR0_OP0_SHIFT 26
+#define IFC_NAND_FIR0_OP1 0x03F00000
+#define IFC_NAND_FIR0_OP1_SHIFT 20
+#define IFC_NAND_FIR0_OP2 0x000FC000
+#define IFC_NAND_FIR0_OP2_SHIFT 14
+#define IFC_NAND_FIR0_OP3 0x00003F00
+#define IFC_NAND_FIR0_OP3_SHIFT 8
+#define IFC_NAND_FIR0_OP4 0x000000FC
+#define IFC_NAND_FIR0_OP4_SHIFT 2
+#define IFC_NAND_FIR1_OP5 0xFC000000
+#define IFC_NAND_FIR1_OP5_SHIFT 26
+#define IFC_NAND_FIR1_OP6 0x03F00000
+#define IFC_NAND_FIR1_OP6_SHIFT 20
+#define IFC_NAND_FIR1_OP7 0x000FC000
+#define IFC_NAND_FIR1_OP7_SHIFT 14
+#define IFC_NAND_FIR1_OP8 0x00003F00
+#define IFC_NAND_FIR1_OP8_SHIFT 8
+#define IFC_NAND_FIR1_OP9 0x000000FC
+#define IFC_NAND_FIR1_OP9_SHIFT 2
+#define IFC_NAND_FIR2_OP10 0xFC000000
+#define IFC_NAND_FIR2_OP10_SHIFT 26
+#define IFC_NAND_FIR2_OP11 0x03F00000
+#define IFC_NAND_FIR2_OP11_SHIFT 20
+#define IFC_NAND_FIR2_OP12 0x000FC000
+#define IFC_NAND_FIR2_OP12_SHIFT 14
+#define IFC_NAND_FIR2_OP13 0x00003F00
+#define IFC_NAND_FIR2_OP13_SHIFT 8
+#define IFC_NAND_FIR2_OP14 0x000000FC
+#define IFC_NAND_FIR2_OP14_SHIFT 2
+
+/*
+ * Instruction opcodes to be programmed
+ * in FIR registers- 6bits
+ */
+enum ifc_nand_fir_opcodes {
+ IFC_FIR_OP_NOP,
+ IFC_FIR_OP_CA0,
+ IFC_FIR_OP_CA1,
+ IFC_FIR_OP_CA2,
+ IFC_FIR_OP_CA3,
+ IFC_FIR_OP_RA0,
+ IFC_FIR_OP_RA1,
+ IFC_FIR_OP_RA2,
+ IFC_FIR_OP_RA3,
+ IFC_FIR_OP_CMD0,
+ IFC_FIR_OP_CMD1,
+ IFC_FIR_OP_CMD2,
+ IFC_FIR_OP_CMD3,
+ IFC_FIR_OP_CMD4,
+ IFC_FIR_OP_CMD5,
+ IFC_FIR_OP_CMD6,
+ IFC_FIR_OP_CMD7,
+ IFC_FIR_OP_CW0,
+ IFC_FIR_OP_CW1,
+ IFC_FIR_OP_CW2,
+ IFC_FIR_OP_CW3,
+ IFC_FIR_OP_CW4,
+ IFC_FIR_OP_CW5,
+ IFC_FIR_OP_CW6,
+ IFC_FIR_OP_CW7,
+ IFC_FIR_OP_WBCD,
+ IFC_FIR_OP_RBCD,
+ IFC_FIR_OP_BTRD,
+ IFC_FIR_OP_RDSTAT,
+ IFC_FIR_OP_NWAIT,
+ IFC_FIR_OP_WFR,
+ IFC_FIR_OP_SBRD,
+ IFC_FIR_OP_UA,
+ IFC_FIR_OP_RB,
+};
+
+/*
+ * NAND Chip Select Register (NAND_CSEL)
+ */
+#define IFC_NAND_CSEL 0x0C000000
+#define IFC_NAND_CSEL_SHIFT 26
+#define IFC_NAND_CSEL_CS0 0x00000000
+#define IFC_NAND_CSEL_CS1 0x04000000
+#define IFC_NAND_CSEL_CS2 0x08000000
+#define IFC_NAND_CSEL_CS3 0x0C000000
+
+/*
+ * NAND Operation Sequence Start (NANDSEQ_STRT)
+ */
+/* NAND Flash Operation Start */
+#define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000
+/* Automatic Erase */
+#define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000
+/* Automatic Program */
+#define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000
+/* Automatic Copyback */
+#define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000
+/* Automatic Read Operation */
+#define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000
+/* Automatic Status Read */
+#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800
+
+/*
+ * NAND Event and Error Status Register (NAND_EVTER_STAT)
+ */
+/* Operation Complete */
+#define IFC_NAND_EVTER_STAT_OPC 0x80000000
+/* Flash Timeout Error */
+#define IFC_NAND_EVTER_STAT_FTOER 0x08000000
+/* Write Protect Error */
+#define IFC_NAND_EVTER_STAT_WPER 0x04000000
+/* ECC Error */
+#define IFC_NAND_EVTER_STAT_ECCER 0x02000000
+/* RCW Load Done */
+#define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000
+/* Boot Loadr Done */
+#define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000
+/* Bad Block Indicator search select */
+#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800
+
+/*
+ * NAND Flash Page Read Completion Event Status Register
+ * (PGRDCMPL_EVT_STAT)
+ */
+#define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000
+/* Small Page 0-15 Done */
+#define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n)))
+/* Large Page(2K) 0-3 Done */
+#define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4))
+/* Large Page(4K) 0-1 Done */
+#define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8))
+
+/*
+ * NAND Event and Error Enable Register (NAND_EVTER_EN)
+ */
+/* Operation complete event enable */
+#define IFC_NAND_EVTER_EN_OPC_EN 0x80000000
+/* Page read complete event enable */
+#define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000
+/* Flash Timeout error enable */
+#define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000
+/* Write Protect error enable */
+#define IFC_NAND_EVTER_EN_WPER_EN 0x04000000
+/* ECC error logging enable */
+#define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000
+
+/*
+ * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN)
+ */
+/* Enable interrupt for operation complete */
+#define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000
+/* Enable interrupt for Page read complete */
+#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000
+/* Enable interrupt for Flash timeout error */
+#define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000
+/* Enable interrupt for Write protect error */
+#define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000
+/* Enable interrupt for ECC error*/
+#define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000
+
+/*
+ * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0)
+ */
+#define IFC_NAND_ERATTR0_MASK 0x0C080000
+/* Error on CS0-3 for NAND */
+#define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000
+#define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000
+#define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000
+/* Transaction type of error Read/Write */
+#define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000
+
+/*
+ * NAND Flash Status Register (NAND_FSR)
+ */
+/* First byte of data read from read status op */
+#define IFC_NAND_NFSR_RS0 0xFF000000
+/* Second byte of data read from read status op */
+#define IFC_NAND_NFSR_RS1 0x00FF0000
+
+/*
+ * ECC Error Status Registers (ECCSTAT0-ECCSTAT3)
+ */
+/* Number of ECC errors on sector n (n = 0-15) */
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0
+
+/*
+ * NAND Control Register (NANDCR)
+ */
+#define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000
+#define IFC_NAND_NCR_FTOCNT_SHIFT 25
+#define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT)
+
+/*
+ * NAND_AUTOBOOT_TRGR
+ */
+/* Trigger RCW load */
+#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000
+/* Trigget Auto Boot */
+#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000
+
+/*
+ * NAND_MDR
+ */
+/* 1st read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA0 0xFF000000
+/* 2nd read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA1 0x00FF0000
+
+/*
+ * NOR Machine Specific Registers
+ */
+/*
+ * NOR Event and Error Status Register (NOR_EVTER_STAT)
+ */
+/* NOR Command Sequence Operation Complete */
+#define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000
+/* Write Protect Error */
+#define IFC_NOR_EVTER_STAT_WPER 0x04000000
+/* Command Sequence Timeout Error */
+#define IFC_NOR_EVTER_STAT_STOER 0x01000000
+
+/*
+ * NOR Event and Error Enable Register (NOR_EVTER_EN)
+ */
+/* NOR Command Seq complete event enable */
+#define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000
+/* Write Protect Error Checking Enable */
+#define IFC_NOR_EVTER_EN_WPEREN 0x04000000
+/* Timeout Error Enable */
+#define IFC_NOR_EVTER_EN_STOEREN 0x01000000
+
+/*
+ * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN)
+ */
+/* Enable interrupt for OPC complete */
+#define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000
+/* Enable interrupt for write protect error */
+#define IFC_NOR_EVTER_INTR_WPEREN 0x04000000
+/* Enable interrupt for timeout error */
+#define IFC_NOR_EVTER_INTR_STOEREN 0x01000000
+
+/*
+ * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_NOR_ERATTR0_ERSRCID 0xFF000000
+/* AXI ID for error transation */
+#define IFC_NOR_ERATTR0_ERAID 0x000FF000
+/* Chip select corresponds to NOR error */
+#define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010
+#define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020
+#define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030
+/* Type of transaction read/write */
+#define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001
+
+/*
+ * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2)
+ */
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00
+
+/*
+ * NOR Control Register (NORCR)
+ */
+#define IFC_NORCR_MASK 0x0F0F0000
+/* No. of Address/Data Phase */
+#define IFC_NORCR_NUM_PHASE_MASK 0x0F000000
+#define IFC_NORCR_NUM_PHASE_SHIFT 24
+#define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT)
+/* Sequence Timeout Count */
+#define IFC_NORCR_STOCNT_MASK 0x000F0000
+#define IFC_NORCR_STOCNT_SHIFT 16
+#define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT)
+
+/*
+ * GPCM Machine specific registers
+ */
+/*
+ * GPCM Event and Error Status Register (GPCM_EVTER_STAT)
+ */
+/* Timeout error */
+#define IFC_GPCM_EVTER_STAT_TOER 0x04000000
+/* Parity error */
+#define IFC_GPCM_EVTER_STAT_PER 0x01000000
+
+/*
+ * GPCM Event and Error Enable Register (GPCM_EVTER_EN)
+ */
+/* Timeout error enable */
+#define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000
+/* Parity error enable */
+#define IFC_GPCM_EVTER_EN_PER_EN 0x01000000
+
+/*
+ * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN)
+ */
+/* Enable Interrupt for timeout error */
+#define IFC_GPCM_EEIER_TOERIR_EN 0x04000000
+/* Enable Interrupt for Parity error */
+#define IFC_GPCM_EEIER_PERIR_EN 0x01000000
+
+/*
+ * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000
+/* AXI ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERAID 0x000FF000
+/* Chip select corresponds to GPCM error */
+#define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040
+#define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080
+#define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0
+/* Type of transaction read/Write */
+#define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001
+
+/*
+ * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2)
+ */
+/* On which beat of address/data parity error is observed */
+#define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00
+/* Parity Error on byte */
+#define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0
+/* Parity Error reported in addr or data phase */
+#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001
+
+/*
+ * GPCM Status Register (GPCM_STAT)
+ */
+#define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */
+
+/*
+ * IFC Controller NAND Machine registers
+ */
+struct fsl_ifc_nand {
+ __be32 ncfgr;
+ u32 res1[0x4];
+ __be32 nand_fcr0;
+ __be32 nand_fcr1;
+ u32 res2[0x8];
+ __be32 row0;
+ u32 res3;
+ __be32 col0;
+ u32 res4;
+ __be32 row1;
+ u32 res5;
+ __be32 col1;
+ u32 res6;
+ __be32 row2;
+ u32 res7;
+ __be32 col2;
+ u32 res8;
+ __be32 row3;
+ u32 res9;
+ __be32 col3;
+ u32 res10[0x24];
+ __be32 nand_fbcr;
+ u32 res11;
+ __be32 nand_fir0;
+ __be32 nand_fir1;
+ __be32 nand_fir2;
+ u32 res12[0x10];
+ __be32 nand_csel;
+ u32 res13;
+ __be32 nandseq_strt;
+ u32 res14;
+ __be32 nand_evter_stat;
+ u32 res15;
+ __be32 pgrdcmpl_evt_stat;
+ u32 res16[0x2];
+ __be32 nand_evter_en;
+ u32 res17[0x2];
+ __be32 nand_evter_intr_en;
+ u32 res18[0x2];
+ __be32 nand_erattr0;
+ __be32 nand_erattr1;
+ u32 res19[0x10];
+ __be32 nand_fsr;
+ u32 res20;
+ __be32 nand_eccstat[4];
+ u32 res21[0x20];
+ __be32 nanndcr;
+ u32 res22[0x2];
+ __be32 nand_autoboot_trgr;
+ u32 res23;
+ __be32 nand_mdr;
+ u32 res24[0x5C];
+};
+
+/*
+ * IFC controller NOR Machine registers
+ */
+struct fsl_ifc_nor {
+ __be32 nor_evter_stat;
+ u32 res1[0x2];
+ __be32 nor_evter_en;
+ u32 res2[0x2];
+ __be32 nor_evter_intr_en;
+ u32 res3[0x2];
+ __be32 nor_erattr0;
+ __be32 nor_erattr1;
+ __be32 nor_erattr2;
+ u32 res4[0x4];
+ __be32 norcr;
+ u32 res5[0xEF];
+};
+
+/*
+ * IFC controller GPCM Machine registers
+ */
+struct fsl_ifc_gpcm {
+ __be32 gpcm_evter_stat;
+ u32 res1[0x2];
+ __be32 gpcm_evter_en;
+ u32 res2[0x2];
+ __be32 gpcm_evter_intr_en;
+ u32 res3[0x2];
+ __be32 gpcm_erattr0;
+ __be32 gpcm_erattr1;
+ __be32 gpcm_erattr2;
+ __be32 gpcm_stat;
+ u32 res4[0x1F3];
+};
+
+/*
+ * IFC Controller Registers
+ */
+struct fsl_ifc_regs {
+ __be32 ifc_rev;
+ u32 res1[0x3];
+ struct {
+ __be32 cspr;
+ u32 res2[0x2];
+ } cspr_cs[FSL_IFC_BANK_COUNT];
+ u32 res3[0x18];
+ struct {
+ __be32 amask;
+ u32 res4[0x2];
+ } amask_cs[FSL_IFC_BANK_COUNT];
+ u32 res5[0x18];
+ struct {
+ __be32 csor;
+ u32 res6[0x2];
+ } csor_cs[FSL_IFC_BANK_COUNT];
+ u32 res7[0x18];
+ struct {
+ __be32 ftim[4];
+ u32 res8[0x8];
+ } ftim_cs[FSL_IFC_BANK_COUNT];
+ u32 res9[0x60];
+ __be32 rb_stat;
+ u32 res10[0x2];
+ __be32 ifc_gcr;
+ u32 res11[0x2];
+ __be32 cm_evter_stat;
+ u32 res12[0x2];
+ __be32 cm_evter_en;
+ u32 res13[0x2];
+ __be32 cm_evter_intr_en;
+ u32 res14[0x2];
+ __be32 cm_erattr0;
+ __be32 cm_erattr1;
+ u32 res15[0x2];
+ __be32 ifc_ccr;
+ __be32 ifc_csr;
+ u32 res16[0x2EB];
+ struct fsl_ifc_nand ifc_nand;
+ struct fsl_ifc_nor ifc_nor;
+ struct fsl_ifc_gpcm ifc_gpcm;
+};
+
+extern unsigned int convert_ifc_address(phys_addr_t addr_base);
+extern int fsl_ifc_find(phys_addr_t addr_base);
+
+/* overview of the fsl ifc controller */
+
+struct fsl_ifc_ctrl {
+ /* device info */
+ struct device *dev;
+ struct fsl_ifc_regs __iomem *regs;
+ int irq;
+ int nand_irq;
+ spinlock_t lock;
+ void *nand;
+
+ u32 nand_stat;
+ wait_queue_head_t nand_wait;
+};
+
+extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+
+
+#endif /* __ASM_FSL_IFC_H */
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 8a0b5ece8f7..420b45368fc 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -238,8 +238,6 @@ struct fsl_lbc_regs {
#define FPAR_LP_CI_SHIFT 0
__be32 fbcr; /**< Flash Byte Count Register */
#define FBCR_BC 0x00000FFF
- u8 res11[0x8];
- u8 res8[0xF00];
};
/*
@@ -294,6 +292,11 @@ struct fsl_lbc_ctrl {
/* status read from LTESR by irq handler */
unsigned int irq_status;
+
+#ifdef CONFIG_SUSPEND
+ /* save regs when system go to deep-sleep */
+ struct fsl_lbc_regs *saved_regs;
+#endif
};
extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 86004930a78..dfdb95bc59a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -5,7 +5,6 @@
#include <asm/page.h>
extern struct kmem_cache *hugepte_cache;
-extern void __init reserve_hugetlb_gpages(void);
static inline pte_t *hugepd_page(hugepd_t hpd)
{
@@ -22,14 +21,14 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
unsigned pdshift)
{
/*
- * On 32-bit, we have multiple higher-level table entries that point to
- * the same hugepte. Just use the first one since they're all
+ * On FSL BookE, we have multiple higher-level table entries that
+ * point to the same hugepte. Just use the first one since they're all
* identical. So for that case, idx=0.
*/
unsigned long idx = 0;
pte_t *dir = hugepd_page(*hpdp);
-#ifdef CONFIG_PPC64
+#ifndef CONFIG_PPC_FSL_BOOK3E
idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
#endif
@@ -53,7 +52,8 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
}
#endif
-void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte);
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+ pte_t pte);
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
@@ -124,7 +124,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
+#ifdef HUGETLB_NEED_PRELOAD
+ /*
+ * The "return 1" forces a call of update_mmu_cache, which will write a
+ * TLB entry. Without this, platforms that don't do a write of the TLB
+ * entry in the TLB miss handler asm will fault ad infinitum.
+ */
+ ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ return 1;
+#else
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+#endif
}
static inline pte_t huge_ptep_get(pte_t *ptep)
@@ -142,14 +152,24 @@ static inline void arch_release_hugepage(struct page *page)
}
#else /* ! CONFIG_HUGETLB_PAGE */
-static inline void reserve_hugetlb_gpages(void)
-{
- pr_err("Cannot reserve gpages without hugetlb enabled\n");
-}
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+
+/*
+ * FSL Book3E platforms require special gpage handling - the gpages
+ * are reserved early in the boot process by memblock instead of via
+ * the .dts as on IBM platforms.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+extern void __init reserve_hugetlb_gpages(void);
+#else
+static inline void reserve_hugetlb_gpages(void)
+{
+}
#endif
#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 45698d55cd6..a3855b81ead 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -394,7 +394,7 @@ __do_out_asm(_rec_outl, "stwbrx")
#endif /* CONFIG_PPC32 */
/* The "__do_*" operations below provide the actual "base" implementation
- * for each of the defined acccessor. Some of them use the out_* functions
+ * for each of the defined accessors. Some of them use the out_* functions
* directly, some of them still use EEH, though we might change that in the
* future. Those macros below provide the necessary argument swapping and
* handling of the IO base for PIO.
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index bffd062adf7..c9776202d7e 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -32,11 +32,11 @@
#ifndef __ASSEMBLY__
-#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL)
extern void reserve_kdump_trampoline(void);
extern void setup_kdump_trampoline(void);
#else
-/* !CRASH_DUMP || RELOCATABLE */
+/* !CRASH_DUMP || !NONSTATIC_KERNEL */
static inline void reserve_kdump_trampoline(void) { ; }
static inline void setup_kdump_trampoline(void) { ; }
#endif
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f921eb121d3..16d7e33d35e 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -49,7 +49,6 @@
#define KEXEC_STATE_REAL_MODE 2
#ifndef __ASSEMBLY__
-#include <linux/cpumask.h>
#include <asm/reg.h>
typedef void (*crash_shutdown_t)(void);
@@ -73,11 +72,6 @@ extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
-extern cpumask_t cpus_in_sr;
-static inline int kexec_sr_activated(int cpu)
-{
- return cpumask_test_cpu(cpu, &cpus_in_sr);
-}
struct kimage;
struct pt_regs;
@@ -94,7 +88,6 @@ extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
#else /* !CONFIG_KEXEC */
-static inline int kexec_sr_activated(int cpu) { return 0; }
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
index d8520ef121f..fc195d0b3c3 100644
--- a/arch/powerpc/include/asm/keylargo.h
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -51,7 +51,7 @@
#define KL_GPIO_SOUND_POWER (KEYLARGO_GPIO_0+0x05)
-/* Hrm... this one is only to be used on Pismo. It seeem to also
+/* Hrm... this one is only to be used on Pismo. It seems to also
* control the timebase enable on other machines. Still to be
* experimented... --BenH.
*/
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 0ad432bc81d..f7727d91ac6 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -170,8 +170,8 @@ struct kvm_sregs {
} ppc64;
struct {
__u32 sr[16];
- __u64 ibat[8];
- __u64 dbat[8];
+ __u64 ibat[8];
+ __u64 dbat[8];
} ppc32;
} s;
struct {
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d4df013ad77..69c7377d207 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
}
#endif
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
- unsigned long pte_index)
-{
- unsigned long rb, va_low;
-
- rb = (v & ~0x7fUL) << 16; /* AVA field */
- va_low = pte_index >> 3;
- if (v & HPTE_V_SECONDARY)
- va_low = ~va_low;
- /* xor vsid from AVA */
- if (!(v & HPTE_V_1TB_SEG))
- va_low ^= v >> 12;
- else
- va_low ^= v >> 24;
- va_low &= 0x7ff;
- if (v & HPTE_V_LARGE) {
- rb |= 1; /* L field */
- if (cpu_has_feature(CPU_FTR_ARCH_206) &&
- (r & 0xff000)) {
- /* non-16MB large page, must be 64k */
- /* (masks depend on page size) */
- rb |= 0x1000; /* page encoding in LP field */
- rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
- }
- } else {
- /* 4kB page */
- rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
- }
- rb |= (v >> 54) & 0x300; /* B field */
- return rb;
-}
-
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index e43fe42b987..d0ac94f98f9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
#define SPAPR_TCE_SHIFT 12
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index f77c708c67a..233f9ecae76 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -231,7 +231,7 @@ LV1_CALL(allocate_memory, 4, 2, 0 )
LV1_CALL(write_htab_entry, 4, 0, 1 )
LV1_CALL(construct_virtual_address_space, 3, 2, 2 )
LV1_CALL(invalidate_htab_entries, 5, 0, 3 )
-LV1_CALL(get_virtual_address_space_id_of_ppe, 1, 1, 4 )
+LV1_CALL(get_virtual_address_space_id_of_ppe, 0, 1, 4 )
LV1_CALL(query_logical_partition_address_region_info, 1, 5, 6 )
LV1_CALL(select_virtual_address_space, 1, 0, 7 )
LV1_CALL(pause, 1, 0, 9 )
@@ -264,7 +264,7 @@ LV1_CALL(configure_execution_time_variable, 1, 0, 77 )
LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
LV1_CALL(create_repository_node, 6, 0, 90 )
-LV1_CALL(get_repository_node_value, 5, 2, 91 )
+LV1_CALL(read_repository_node, 5, 2, 91 )
LV1_CALL(modify_repository_node_value, 6, 0, 92 )
LV1_CALL(remove_repository_node, 4, 0, 93 )
LV1_CALL(read_htab_entries, 2, 5, 95 )
@@ -276,7 +276,7 @@ LV1_CALL(construct_io_irq_outlet, 1, 1, 120 )
LV1_CALL(destruct_io_irq_outlet, 1, 0, 121 )
LV1_CALL(map_htab, 1, 1, 122 )
LV1_CALL(unmap_htab, 1, 0, 123 )
-LV1_CALL(get_version_info, 0, 1, 127 )
+LV1_CALL(get_version_info, 0, 2, 127 )
LV1_CALL(insert_htab_entry, 6, 3, 158 )
LV1_CALL(read_virtual_uart, 3, 1, 162 )
LV1_CALL(write_virtual_uart, 3, 1, 163 )
@@ -294,9 +294,9 @@ LV1_CALL(unmap_device_dma_region, 4, 0, 177 )
LV1_CALL(net_add_multicast_address, 4, 0, 185 )
LV1_CALL(net_remove_multicast_address, 4, 0, 186 )
LV1_CALL(net_start_tx_dma, 4, 0, 187 )
-LV1_CALL(net_stop_tx_dma, 3, 0, 188 )
+LV1_CALL(net_stop_tx_dma, 2, 0, 188 )
LV1_CALL(net_start_rx_dma, 4, 0, 189 )
-LV1_CALL(net_stop_rx_dma, 3, 0, 190 )
+LV1_CALL(net_stop_rx_dma, 2, 0, 190 )
LV1_CALL(net_set_interrupt_status_indicator, 4, 0, 191 )
LV1_CALL(net_set_interrupt_mask, 4, 0, 193 )
LV1_CALL(net_control, 6, 2, 194 )
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index b540d6fcedd..bf37931d1ad 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -213,6 +213,9 @@ struct machdep_calls {
* allow assignment/enabling of the device. */
int (*pcibios_enable_device_hook)(struct pci_dev *);
+ /* Called after scan and before resource survey */
+ void (*pcibios_fixup_phb)(struct pci_controller *hose);
+
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
*/
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h
deleted file mode 100644
index 43efc345065..00000000000
--- a/arch/powerpc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_POWERPC_MEMBLOCK_H
-#define _ASM_POWERPC_MEMBLOCK_H
-
-#include <asm/udbg.h>
-
-#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
-
-#endif /* _ASM_POWERPC_MEMBLOCK_H */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 0260ea5ec3c..f5f89cafebd 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -214,6 +214,10 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
+#ifdef CONFIG_PPC_ICSWX
+ struct spinlock *cop_lockp; /* guard cop related stuff */
+ unsigned long acop; /* mask of enabled coprocessor types */
+#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
u64 high_slices_psize; /* 4 bits per slice for now */
@@ -254,6 +258,13 @@ extern int mmu_vmemmap_psize;
#ifdef CONFIG_PPC64
extern unsigned long linear_map_top;
+
+/*
+ * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+ * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to
+ * return 1, indicating that the tlb requires preloading.
+ */
+#define HUGETLB_NEED_PRELOAD
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index db645ec842b..412ba493cb9 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -312,10 +312,9 @@ extern void slb_set_size(u16 size);
* (i.e. everything above 0xC000000000000000), except the very top
* segment, which simplifies several things.
*
- * - We allow for 15 significant bits of ESID and 20 bits of
- * context for user addresses. i.e. 8T (43 bits) of address space for
- * up to 1M contexts (although the page table structure and context
- * allocation will need changes to take advantage of this).
+ * - We allow for 16 significant bits of ESID and 19 bits of
+ * context for user addresses. i.e. 16T (44 bits) of address space for
+ * up to half a million contexts.
*
* - The scramble function gives robust scattering in the hash
* table (at least based on some initial results). The previous
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index e6fae49e0b7..67b4d983723 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -251,6 +251,9 @@ struct mpic_irq_save {
/* The instance data of a given MPIC */
struct mpic
{
+ /* The OpenFirmware dt node for this MPIC */
+ struct device_node *node;
+
/* The remapper for this MPIC */
struct irq_host *irqhost;
@@ -293,6 +296,9 @@ struct mpic
/* Register access method */
enum mpic_reg_type reg_type;
+ /* The physical base address of the MPIC */
+ phys_addr_t paddr;
+
/* The various ioremap'ed bases */
struct mpic_reg_bank gregs;
struct mpic_reg_bank tmregs;
@@ -331,11 +337,11 @@ struct mpic
* Note setting any ID (leaving those bits to 0) means standard MPIC
*/
-/* This is the primary controller, only that one has IPIs and
- * has afinity control. A non-primary MPIC always uses CPU0
- * registers only
+/*
+ * This is a secondary ("chained") controller; it only uses the CPU0
+ * registers. Primary controllers have IPIs and affinity control.
*/
-#define MPIC_PRIMARY 0x00000001
+#define MPIC_SECONDARY 0x00000001
/* Set this for a big-endian MPIC */
#define MPIC_BIG_ENDIAN 0x00000002
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 2893e8f5406..a4b28f165b6 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -109,6 +109,14 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
#ifndef __ASSEMBLY__
@@ -169,7 +177,11 @@ enum OpalPendingState {
OPAL_EVENT_NVRAM = 0x2,
OPAL_EVENT_RTC = 0x4,
OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
- OPAL_EVENT_CONSOLE_INPUT = 0x10
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100
};
/* Machine check related definitions */
@@ -258,13 +270,49 @@ enum OpalPeAction {
OPAL_MAP_PE = 1
};
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
enum OpalPciResetAndReinitScope {
OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
- OPAL_PCI_IODA_RESET = 6,
+ OPAL_PCI_IODA_TABLE_RESET = 6,
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
};
-enum OpalPciResetState { OPAL_DEASSERT_RESET = 0, OPAL_ASSERT_RESET = 1 };
+enum OpalPciMaskAction {
+ OPAL_UNMASK_ERROR_TYPE = 0,
+ OPAL_MASK_ERROR_TYPE = 1
+};
+
+enum OpalSlotLedType {
+ OPAL_SLOT_LED_ID_TYPE = 0,
+ OPAL_SLOT_LED_FAULT_TYPE = 1
+};
+
+enum OpalLedAction {
+ OPAL_TURN_OFF_LED = 0,
+ OPAL_TURN_ON_LED = 1,
+ OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
+};
+
+enum OpalEpowStatus {
+ OPAL_EPOW_NONE = 0,
+ OPAL_EPOW_UPS = 1,
+ OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
+ OPAL_EPOW_OVER_INTERNAL_TEMP = 3
+};
struct opal_machine_check_event {
enum OpalMCE_Version version:8; /* 0x00 */
@@ -314,8 +362,74 @@ struct opal_machine_check_event {
} u;
};
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ uint32_t brdgCtl;
+
+ // P7IOC utl regs
+ uint32_t portStatusReg;
+ uint32_t rootCmplxStatus;
+ uint32_t busAgentStatus;
+
+ // P7IOC cfg regs
+ uint32_t deviceStatus;
+ uint32_t slotStatus;
+ uint32_t linkStatus;
+ uint32_t devCmdStatus;
+ uint32_t devSecStatus;
+
+ // cfg AER regs
+ uint32_t rootErrorStatus;
+ uint32_t uncorrErrorStatus;
+ uint32_t corrErrorStatus;
+ uint32_t tlpHdr1;
+ uint32_t tlpHdr2;
+ uint32_t tlpHdr3;
+ uint32_t tlpHdr4;
+ uint32_t sourceId;
+
+ uint32_t rsv3;
+
+ // Record data about the call to allocate a buffer.
+ uint64_t errorClass;
+ uint64_t correlator;
+
+ //P7IOC MMIO Error Regs
+ uint64_t p7iocPlssr; // n120
+ uint64_t p7iocCsr; // n110
+ uint64_t lemFir; // nC00
+ uint64_t lemErrorMask; // nC18
+ uint64_t lemWOF; // nC40
+ uint64_t phbErrorStatus; // nC80
+ uint64_t phbFirstErrorStatus; // nC88
+ uint64_t phbErrorLog0; // nCC0
+ uint64_t phbErrorLog1; // nCC8
+ uint64_t mmioErrorStatus; // nD00
+ uint64_t mmioFirstErrorStatus; // nD08
+ uint64_t mmioErrorLog0; // nD40
+ uint64_t mmioErrorLog1; // nD48
+ uint64_t dma0ErrorStatus; // nD80
+ uint64_t dma0FirstErrorStatus; // nD88
+ uint64_t dma0ErrorLog0; // nDC0
+ uint64_t dma0ErrorLog1; // nDC8
+ uint64_t dma1ErrorStatus; // nE00
+ uint64_t dma1FirstErrorStatus; // nE08
+ uint64_t dma1ErrorLog0; // nE40
+ uint64_t dma1ErrorLog1; // nE48
+ uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
typedef struct oppanel_line {
- /* XXX */
+ const char * line;
+ uint64_t line_len;
} oppanel_line_t;
/* API functions */
@@ -413,6 +527,15 @@ int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
uint64_t pci_mem_size);
int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
+int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer, uint64_t diag_buffer_len);
+int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len);
+int64_t opal_pci_fence_phb(uint64_t phb_id);
+int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
+int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
+int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
+int64_t opal_get_epow_status(uint64_t *status);
+int64_t opal_set_system_attention_led(uint8_t led_action);
+
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 17722c73ba2..269c05a36d9 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -135,6 +135,7 @@ struct paca_struct {
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+ u8 nap_state_lost; /* NV GPR values lost in power7_idle */
#ifdef CONFIG_PPC_POWERNV
/* Pointer to OPAL machine check event structure set by the
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index dd9c4fd038e..f072e974f8a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -92,20 +92,34 @@ extern unsigned int HPAGE_SHIFT;
#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
-#if defined(CONFIG_RELOCATABLE)
+#if defined(CONFIG_NONSTATIC_KERNEL)
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
+
+#ifdef CONFIG_RELOCATABLE_PPC32
+extern long long virt_phys_offset;
#endif
+
+#endif /* __ASSEMBLY__ */
#define PHYSICAL_START kernstart_addr
-#else
+
+#else /* !CONFIG_NONSTATIC_KERNEL */
#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
#endif
+/* See Description below for VIRT_PHYS_OFFSET */
+#ifdef CONFIG_RELOCATABLE_PPC32
+#define VIRT_PHYS_OFFSET virt_phys_offset
+#else
+#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
+#endif
+
+
#ifdef CONFIG_PPC64
#define MEMORY_START 0UL
-#elif defined(CONFIG_RELOCATABLE)
+#elif defined(CONFIG_NONSTATIC_KERNEL)
#define MEMORY_START memstart_addr
#else
#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
@@ -125,12 +139,77 @@ extern phys_addr_t kernstart_addr;
* determine MEMORY_START until then. However we can determine PHYSICAL_START
* from information at hand (program counter, TLB lookup).
*
+ * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
+ *
+ * With RELOCATABLE_PPC32, we support loading the kernel at any physical
+ * address without any restriction on the page alignment.
+ *
+ * We find the runtime address of _stext and relocate ourselves based on
+ * the following calculation:
+ *
+ * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(_stext.run,256M)
+ * and create the following mapping:
+ *
+ * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
+ *
+ * When we process relocations, we cannot depend on the
+ * existing equation for the __va()/__pa() translations:
+ *
+ * __va(x) = (x) - PHYSICAL_START + KERNELBASE
+ *
+ * Where:
+ * PHYSICAL_START = kernstart_addr = Physical address of _stext
+ * KERNELBASE = Compiled virtual address of _stext.
+ *
+ * This formula holds true iff, kernel load address is TLB page aligned.
+ *
+ * In our case, we need to also account for the shift in the kernel Virtual
+ * address.
+ *
+ * E.g.,
+ *
+ * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
+ * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
+ *
+ * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
+ * = 0xbc100000 , which is wrong.
+ *
+ * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
+ * according to our mapping.
+ *
+ * Hence we use the following formula to get the translations right:
+ *
+ * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
+ *
+ * Where :
+ * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
+ * Effective KERNELBASE = virtual_base =
+ * = ALIGN_DOWN(KERNELBASE,256M) +
+ * MODULO(PHYSICAL_START,256M)
+ *
+ * To make the cost of __va() / __pa() more light weight, we introduce
+ * a new variable virt_phys_offset, which will hold :
+ *
+ * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
+ * = ALIGN_DOWN(KERNELBASE,256M) -
+ * ALIGN_DOWN(PHYSICALSTART,256M)
+ *
+ * Hence :
+ *
+ * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
+ * = x + virt_phys_offset
+ *
+ * and
+ * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
+ * = x - virt_phys_offset
+ *
* On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
* the other definitions for __va & __pa.
*/
#ifdef CONFIG_BOOKE
-#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE))
-#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
+#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
@@ -290,6 +369,7 @@ extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);
+extern int devmem_is_allowed(unsigned long pfn);
#ifdef CONFIG_PPC_SMLPAR
void arch_free_page(struct page *page, int order);
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fb40ede6bc0..fed85e6290e 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -130,7 +130,9 @@ do { \
#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 56b879ab3a4..882b6aa6c85 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -153,8 +153,8 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
-#ifdef CONFIG_EEH
struct pci_dev *pcidev; /* back-pointer to the pci device */
+#ifdef CONFIG_EEH
int class_code; /* pci device class */
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
@@ -164,6 +164,10 @@ struct pci_dn {
int eeh_false_positives; /* # times this device reported #ff's */
u32 config_space[16]; /* saved PCI config space */
#endif
+#define IODA_INVALID_PE (-1)
+#ifdef CONFIG_PPC_POWERNV
+ int pe_number;
+#endif
};
/* Get the pointer to a device_node's pci_dn */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 49c3de582be..1c92013466e 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -184,8 +184,6 @@ extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
-extern int pci_read_irq_line(struct pci_dev *dev);
-
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 88b0bd925a8..2e0e4110f7a 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -170,6 +170,9 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#define pgprot_cached_noncoherent(prot) \
+ (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
#define pgprot_writecombine pgprot_noncached_wc
struct file;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index eb11a446720..b585bff1a02 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -382,6 +382,9 @@ static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
}
#endif
+extern unsigned long cpuidle_disable;
+enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 559da199edb..7fdc2c0b7fa 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -951,6 +951,7 @@
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
#define PVR_476 0x11a52000
+#define PVR_476FPE 0x7ff50000
#define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 03c48e819c8..500fe1dc43e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -187,6 +187,10 @@
#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
#endif
+#ifdef CONFIG_PPC_ICSWX
+#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */
+#endif
+
/* Bit definitions for CCR1. */
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 41f69ae79d4..01c143bb77a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -245,6 +245,12 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+#ifdef CONFIG_PPC_RTAS_DAEMON
+extern void rtas_cancel_event_scan(void);
+#else
+static inline void rtas_cancel_event_scan(void) { }
+#endif
+
/* Error types logged. */
#define ERR_FLAG_ALREADY_LOGGED 0x0
#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
@@ -307,5 +313,17 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
extern void __cpuinit rtas_give_timebase(void);
extern void __cpuinit rtas_take_timebase(void);
+#ifdef CONFIG_PPC_RTAS
+static inline int page_is_rtas_user_buf(unsigned long pfn)
+{
+ unsigned long paddr = (pfn << PAGE_SHIFT);
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+ return 1;
+ return 0;
+}
+#else
+static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
deleted file mode 100644
index bb1e2cdeb9b..00000000000
--- a/arch/powerpc/include/asm/rwsem.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _ASM_POWERPC_RWSEM_H
-#define _ASM_POWERPC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_PPC64
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg(&sem->count, tmp,
- tmp + RWSEM_ACTIVE_READ_BIAS)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
- long tmp;
-
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- rwsem_down_write_failed(sem);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- __down_write_nested(sem, 0);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
- if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
- (atomic_long_t *)&sem->count) < 0))
- rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
- atomic_long_add(delta, (atomic_long_t *)&sem->count);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
- (atomic_long_t *)&sem->count);
- if (tmp < 0)
- rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
- return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 866f7606da6..2fc2af8fbf5 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -69,4 +69,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 4e360bd4a35..93f280e2327 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -25,7 +25,7 @@
#ifdef __KERNEL__
#include <linux/workqueue.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/mutex.h>
#define LS_SIZE (256 * 1024)
@@ -166,7 +166,7 @@ struct spu {
/* beat only */
u64 shadow_int_mask_RW[3];
- struct sys_device sysdev;
+ struct device dev;
int has_mem_affinity;
struct list_head aff_list;
@@ -237,7 +237,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
struct file;
struct spufs_calls {
long (*create_thread)(const char __user *name,
- unsigned int flags, mode_t mode,
+ unsigned int flags, umode_t mode,
struct file *neighbor);
long (*spu_run)(struct file *filp, __u32 __user *unpc,
__u32 __user *ustatus);
@@ -270,11 +270,11 @@ struct spufs_calls {
int register_spu_syscalls(struct spufs_calls *calls);
void unregister_spu_syscalls(struct spufs_calls *calls);
-int spu_add_sysdev_attr(struct sysdev_attribute *attr);
-void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+int spu_add_dev_attr(struct device_attribute *attr);
+void spu_remove_dev_attr(struct device_attribute *attr);
-int spu_add_sysdev_attr_group(struct attribute_group *attrs);
-void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+int spu_add_dev_attr_group(struct attribute_group *attrs);
+void spu_remove_dev_attr_group(struct attribute_group *attrs);
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt);
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index e30a13d1ee7..c377457d1b8 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -193,8 +193,8 @@ extern void cacheable_memzero(void *p, unsigned int nb);
extern void *cacheable_memcpy(void *, const void *, unsigned int);
extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
-extern int die(const char *, struct pt_regs *, long);
extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void die(const char *, struct pt_regs *, long);
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
#ifdef CONFIG_BOOKE_WDT
@@ -221,6 +221,15 @@ extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
extern int powersave_nap; /* set if nap mode can be used in idle loop */
+void cpu_idle_wait(void);
+
+#ifdef CONFIG_PSERIES_IDLE
+extern void update_smt_snooze_delay(int snooze);
+extern int pseries_notify_cpuidle_add_cpu(int cpu);
+#else
+static inline void update_smt_snooze_delay(int snooze) {}
+static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
+#endif
/*
* Atomic exchange
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index f663634cccc..743f36b38e5 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -26,10 +26,14 @@
/*
* Tces come in two formats, one for the virtual bus and a different
- * format for PCI
+ * format for PCI. PCI TCEs can have hardware or software maintianed
+ * coherency.
*/
-#define TCE_VB 0
-#define TCE_PCI 1
+#define TCE_VB 0
+#define TCE_PCI 1
+#define TCE_PCI_SWINV_CREATE 2
+#define TCE_PCI_SWINV_FREE 4
+#define TCE_PCI_SWINV_PAIR 8
/* TCE page size is 4096 bytes (1 << 12) */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231ec1f..96471494096 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
-#define TIF_FREEZE 14 /* Freezing for suspend */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index fe6f7c2c9c6..7eb10fb96cd 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
extern void secondary_cpu_time_init(void);
extern void iSeries_time_init_early(void);
+DECLARE_PER_CPU(u64, decrementers_next_tb);
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 1e104af0848..c97185885c6 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -3,7 +3,7 @@
#ifdef __KERNEL__
-struct sys_device;
+struct device;
struct device_node;
#ifdef CONFIG_NUMA
@@ -86,19 +86,19 @@ extern int __node_distance(int, int);
extern void __init dump_numa_cpu_topology(void);
-extern int sysfs_add_device_to_node(struct sys_device *dev, int nid);
-extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid);
+extern int sysfs_add_device_to_node(struct device *dev, int nid);
+extern void sysfs_remove_device_from_node(struct device *dev, int nid);
#else
static inline void dump_numa_cpu_topology(void) {}
-static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+static inline int sysfs_add_device_to_node(struct device *dev, int nid)
{
return 0;
}
-static inline void sysfs_remove_device_from_node(struct sys_device *dev,
+static inline void sysfs_remove_device_from_node(struct device *dev,
int nid)
{
}
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index 8947b9827bc..0abf7f2c6df 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -5,8 +5,11 @@
* This is here because we used to use l64 for 64bit powerpc
* and we don't want to impact user mode with our change to ll64
* in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
*/
-#if defined(__powerpc64__) && !defined(__KERNEL__)
+#if !defined(__SANE_USERSPACE_TYPES__) && defined(__powerpc64__) && !defined(__KERNEL__)
# include <asm-generic/int-l64.h>
#else
# include <asm-generic/int-ll64.h>
@@ -27,12 +30,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifdef __powerpc64__
-typedef unsigned int umode_t;
-#else
-typedef unsigned short umode_t;
-#endif
-
typedef struct {
__u32 u[4];
} __attribute__((aligned(16))) __vector128;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ce4f7f17911..ee728e433aa 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -85,6 +85,8 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
+obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o
+
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7c5324f1ec9..04caee7d9bc 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -208,6 +208,7 @@ int main(void)
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
+ DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
#endif /* CONFIG_PPC64 */
/* RTAS */
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index a3c684b4c86..92c6b008dd2 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -451,15 +451,15 @@ out:
static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
{
struct cache_dir *cache_dir;
- struct sys_device *sysdev;
+ struct device *dev;
struct kobject *kobj = NULL;
- sysdev = get_cpu_sysdev(cpu_id);
- WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
- if (!sysdev)
+ dev = get_cpu_device(cpu_id);
+ WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
+ if (!dev)
goto err;
- kobj = kobject_create_and_add("cache", &sysdev->kobj);
+ kobj = kobject_create_and_add("cache", &dev->kobj);
if (!kobj)
goto err;
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
index 7f818feaa7a..ebc62f42a23 100644
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -41,11 +41,16 @@ _GLOBAL(__setup_cpu_a2)
* core local but doing it always won't hurt
*/
-#ifdef CONFIG_PPC_WSP_COPRO
+#ifdef CONFIG_PPC_ICSWX
/* Make sure ACOP starts out as zero */
li r3,0
mtspr SPRN_ACOP,r3
+ /* Skip the following if we are in Guest mode */
+ mfmsr r3
+ andis. r0,r3,MSR_GS@h
+ bne _icswx_skip_guest
+
/* Enable icswx instruction */
mfspr r3,SPRN_A2_CCR2
ori r3,r3,A2_CCR2_ENABLE_ICSWX
@@ -54,7 +59,8 @@ _GLOBAL(__setup_cpu_a2)
/* Unmask all CTs in HACOP */
li r3,-1
mtspr SPRN_HACOP,r3
-#endif /* CONFIG_PPC_WSP_COPRO */
+_icswx_skip_guest:
+#endif /* CONFIG_PPC_ICSWX */
/* Enable doorbell */
mfspr r3,SPRN_A2_CCR2
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index edae5bb06f1..81db9e2a8a2 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1505,6 +1505,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
+ { /* APM8018X */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x7ff11432,
+ .cpu_name = "APM8018X",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 |
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@@ -1830,6 +1843,20 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_47x,
.platform = "ppc470",
},
+ { /* 476fpe */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x7ff50000,
+ .cpu_name = "476fpe",
+ .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
{ /* 476 iss */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00050000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index d879809d5c4..28be3452e67 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -10,85 +10,85 @@
*
*/
-#undef DEBUG
-
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
-#include <linux/bootmem.h>
#include <linux/export.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/elf.h>
-#include <linux/elfcore.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
-#include <linux/memblock.h>
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
#include <asm/kdump.h>
#include <asm/prom.h>
-#include <asm/firmware.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/setjmp.h>
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
+/*
+ * The primary CPU waits a while for all secondary CPUs to enter. This is to
+ * avoid sending an IPI if the secondary CPUs are entering
+ * crash_kexec_secondary on their own (eg via a system reset).
+ *
+ * The secondary timeout has to be longer than the primary. Both timeouts are
+ * in milliseconds.
+ */
+#define PRIMARY_TIMEOUT 500
+#define SECONDARY_TIMEOUT 1000
-/* This keeps a track of which one is crashing cpu. */
+#define IPI_TIMEOUT 10000
+#define REAL_MODE_TIMEOUT 10000
+
+/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
-static cpumask_t cpus_in_crash = CPU_MASK_NONE;
-cpumask_t cpus_in_sr = CPU_MASK_NONE;
+static atomic_t cpus_in_crash;
+static int time_to_dump;
#define CRASH_HANDLER_MAX 3
/* NULL terminated list of shutdown handles */
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
static DEFINE_SPINLOCK(crash_handlers_lock);
+static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+static int crash_shutdown_cpu = -1;
+
+static int handle_fault(struct pt_regs *regs)
+{
+ if (crash_shutdown_cpu == smp_processor_id())
+ longjmp(crash_shutdown_buf, 1);
+ return 0;
+}
+
#ifdef CONFIG_SMP
-static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
void crash_ipi_callback(struct pt_regs *regs)
{
+ static cpumask_t cpus_state_saved = CPU_MASK_NONE;
+
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
hard_irq_disable();
- if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
crash_save_cpu(regs, cpu);
- cpumask_set_cpu(cpu, &cpus_in_crash);
-
- /*
- * Entered via soft-reset - could be the kdump
- * process is invoked using soft-reset or user activated
- * it if some CPU did not respond to an IPI.
- * For soft-reset, the secondary CPU can enter this func
- * twice. 1 - using IPI, and 2. soft-reset.
- * Tell the kexec CPU that entered via soft-reset and ready
- * to go down.
- */
- if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
- cpumask_clear_cpu(cpu, &cpus_in_sr);
- atomic_inc(&enter_on_soft_reset);
+ cpumask_set_cpu(cpu, &cpus_state_saved);
}
+ atomic_inc(&cpus_in_crash);
+ smp_mb__after_atomic_inc();
+
/*
* Starting the kdump boot.
* This barrier is needed to make sure that all CPUs are stopped.
- * If not, soft-reset will be invoked to bring other CPUs.
*/
- while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
+ while (!time_to_dump)
cpu_relax();
if (ppc_md.kexec_cpu_down)
@@ -103,106 +103,99 @@ void crash_ipi_callback(struct pt_regs *regs)
/* NOTREACHED */
}
-/*
- * Wait until all CPUs are entered via soft-reset.
- */
-static void crash_soft_reset_check(int cpu)
-{
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
-
- cpumask_clear_cpu(cpu, &cpus_in_sr);
- while (atomic_read(&enter_on_soft_reset) != ncpus)
- cpu_relax();
-}
-
-
static void crash_kexec_prepare_cpus(int cpu)
{
unsigned int msecs;
-
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ int tries = 0;
+ int (*old_handler)(struct pt_regs *regs);
+
+ printk(KERN_EMERG "Sending IPI to other CPUs\n");
crash_send_ipi(crash_ipi_callback);
smp_wmb();
+again:
/*
* FIXME: Until we will have the way to stop other CPUs reliably,
* the crash CPU will send an IPI and wait for other CPUs to
* respond.
- * Delay of at least 10 seconds.
*/
- printk(KERN_EMERG "Sending IPI to other cpus...\n");
- msecs = 10000;
- while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
- cpu_relax();
+ msecs = IPI_TIMEOUT;
+ while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
mdelay(1);
- }
/* Would it be better to replace the trap vector here? */
+ if (atomic_read(&cpus_in_crash) >= ncpus) {
+ printk(KERN_EMERG "IPI complete\n");
+ return;
+ }
+
+ printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
+ ncpus - atomic_read(&cpus_in_crash));
+
/*
- * FIXME: In case if we do not get all CPUs, one possibility: ask the
- * user to do soft reset such that we get all.
- * Soft-reset will be used until better mechanism is implemented.
+ * If we have a panic timeout set then we can't wait indefinitely
+ * for someone to activate system reset. We also give up on the
+ * second time through if system reset fail to work.
*/
- if (cpumask_weight(&cpus_in_crash) < ncpus) {
- printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
- ncpus - cpumask_weight(&cpus_in_crash));
- printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
- cpumask_clear(&cpus_in_sr);
- atomic_set(&enter_on_soft_reset, 0);
- while (cpumask_weight(&cpus_in_crash) < ncpus)
- cpu_relax();
- }
+ if ((panic_timeout > 0) || (tries > 0))
+ return;
+
/*
- * Make sure all CPUs are entered via soft-reset if the kdump is
- * invoked using soft-reset.
+ * A system reset will cause all CPUs to take an 0x100 exception.
+ * The primary CPU returns here via setjmp, and the secondary
+ * CPUs reexecute the crash_kexec_secondary path.
*/
- if (cpumask_test_cpu(cpu, &cpus_in_sr))
- crash_soft_reset_check(cpu);
- /* Leave the IPI callback set */
+ old_handler = __debugger;
+ __debugger = handle_fault;
+ crash_shutdown_cpu = smp_processor_id();
+
+ if (setjmp(crash_shutdown_buf) == 0) {
+ printk(KERN_EMERG "Activate system reset (dumprestart) "
+ "to stop other cpu(s)\n");
+
+ /*
+ * A system reset will force all CPUs to execute the
+ * crash code again. We need to reset cpus_in_crash so we
+ * wait for everyone to do this.
+ */
+ atomic_set(&cpus_in_crash, 0);
+ smp_mb();
+
+ while (atomic_read(&cpus_in_crash) < ncpus)
+ cpu_relax();
+ }
+
+ crash_shutdown_cpu = -1;
+ __debugger = old_handler;
+
+ tries++;
+ goto again;
}
/*
- * This function will be called by secondary cpus or by kexec cpu
- * if soft-reset is activated to stop some CPUs.
+ * This function will be called by secondary cpus.
*/
void crash_kexec_secondary(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
unsigned long flags;
- int msecs = 5;
+ int msecs = SECONDARY_TIMEOUT;
local_irq_save(flags);
- /* Wait 5ms if the kexec CPU is not entered yet. */
+
+ /* Wait for the primary crash CPU to signal its progress */
while (crashing_cpu < 0) {
if (--msecs < 0) {
- /*
- * Either kdump image is not loaded or
- * kdump process is not started - Probably xmon
- * exited using 'x'(exit and recover) or
- * kexec_should_crash() failed for all running tasks.
- */
- cpumask_clear_cpu(cpu, &cpus_in_sr);
+ /* No response, kdump image may not have been loaded */
local_irq_restore(flags);
return;
}
+
mdelay(1);
- cpu_relax();
- }
- if (cpu == crashing_cpu) {
- /*
- * Panic CPU will enter this func only via soft-reset.
- * Wait until all secondary CPUs entered and
- * then start kexec boot.
- */
- crash_soft_reset_check(cpu);
- cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
- if (ppc_md.kexec_cpu_down)
- ppc_md.kexec_cpu_down(1, 0);
- machine_kexec(kexec_crash_image);
- /* NOTREACHED */
}
+
crash_ipi_callback(regs);
}
@@ -211,7 +204,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
static void crash_kexec_prepare_cpus(int cpu)
{
/*
- * move the secondarys to us so that we can copy
+ * move the secondaries to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
@@ -225,7 +218,6 @@ static void crash_kexec_prepare_cpus(int cpu)
void crash_kexec_secondary(struct pt_regs *regs)
{
- cpumask_clear(&cpus_in_sr);
}
#endif /* CONFIG_SMP */
@@ -236,7 +228,7 @@ static void crash_kexec_wait_realmode(int cpu)
unsigned int msecs;
int i;
- msecs = 10000;
+ msecs = REAL_MODE_TIMEOUT;
for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
if (i == cpu)
continue;
@@ -308,22 +300,11 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
}
EXPORT_SYMBOL(crash_shutdown_unregister);
-static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
-static int crash_shutdown_cpu = -1;
-
-static int handle_fault(struct pt_regs *regs)
-{
- if (crash_shutdown_cpu == smp_processor_id())
- longjmp(crash_shutdown_buf, 1);
- return 0;
-}
-
void default_machine_crash_shutdown(struct pt_regs *regs)
{
unsigned int i;
int (*old_handler)(struct pt_regs *regs);
-
/*
* This function is only called after the system
* has panicked or is otherwise in a critical state.
@@ -341,15 +322,26 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* such that another IPI will not be sent.
*/
crashing_cpu = smp_processor_id();
- crash_save_cpu(regs, crashing_cpu);
+
+ /*
+ * If we came in via system reset, wait a while for the secondary
+ * CPUs to enter.
+ */
+ if (TRAP(regs) == 0x100)
+ mdelay(PRIMARY_TIMEOUT);
+
crash_kexec_prepare_cpus(crashing_cpu);
- cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+
+ crash_save_cpu(regs, crashing_cpu);
+
+ time_to_dump = 1;
+
crash_kexec_wait_realmode(crashing_cpu);
machine_kexec_mask_interrupts();
/*
- * Call registered shutdown routines savely. Swap out
+ * Call registered shutdown routines safely. Swap out
* __debugger_fault_handler, and replace on exit.
*/
old_handler = __debugger_fault_handler;
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 424afb6b8fb..b3ba5163eae 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -28,7 +28,7 @@
#define DBG(fmt...)
#endif
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
void __init reserve_kdump_trampoline(void)
{
memblock_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -67,7 +67,7 @@ void __init setup_kdump_trampoline(void)
DBG(" <- setup_kdump_trampoline()\n");
}
-#endif /* CONFIG_RELOCATABLE */
+#endif /* CONFIG_NONSTATIC_KERNEL */
static int __init parse_savemaxmem(char *p)
{
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cf9c69b9189..d4be7bb3dbd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -65,7 +65,7 @@ BEGIN_FTR_SECTION
lbz r0,PACAPROCSTART(r13)
cmpwi r0,0x80
bne 1f
- li r0,0
+ li r0,1
stb r0,PACAPROCSTART(r13)
b kvm_start_guest
1:
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index b725dab0f88..7dd2981bcc5 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -64,6 +64,35 @@ _ENTRY(_start);
mr r31,r3 /* save device tree ptr */
li r24,0 /* CPU number */
+#ifdef CONFIG_RELOCATABLE
+/*
+ * Relocate ourselves to the current runtime address.
+ * This is called only by the Boot CPU.
+ * "relocate" is called with our current runtime virutal
+ * address.
+ * r21 will be loaded with the physical runtime address of _stext
+ */
+ bl 0f /* Get our runtime address */
+0: mflr r21 /* Make it accessible */
+ addis r21,r21,(_stext - 0b)@ha
+ addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
+
+ /*
+ * We have the runtime (virutal) address of our base.
+ * We calculate our shift of offset from a 256M page.
+ * We could map the 256M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
+ rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virutal Address */
+
+ bl relocate
+#endif
+
bl init_cpu_state
/*
@@ -88,6 +117,65 @@ _ENTRY(_start);
#ifdef CONFIG_RELOCATABLE
/*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries.
+ *
+ * r25 will contain RPN/ERPN for the start address of memory
+ * r21 will contain the current offset of _stext
+ */
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+
+ /*
+ * Compute the kernstart_addr.
+ * kernstart_addr => (r6,r8)
+ * kernstart_addr & ~0xfffffff => (r6,r7)
+ */
+ rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
+ rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
+ rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
+ or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
+
+ /* Store kernstart_addr */
+ stw r6,0(r3) /* higher 32bit */
+ stw r8,4(r3) /* lower 32bit */
+
+ /*
+ * Compute the virt_phys_offset :
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
+ *
+ */
+
+ /* KERNELBASE&~0xfffffff => (r4,r5) */
+ li r4, 0 /* higer 32bit */
+ lis r5,KERNELBASE@h
+ rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
+
+ /*
+ * 64bit subtraction.
+ */
+ subfc r5,r7,r5
+ subfe r4,r6,r4
+
+ /* Store virt_phys_offset */
+ lis r3,virt_phys_offset@ha
+ la r3,virt_phys_offset@l(r3)
+
+ stw r4,0(r3)
+ stw r5,4(r3)
+
+#elif defined(CONFIG_DYNAMIC_MEMSTART)
+ /*
+ * Mapping based, page aligned dynamic kernel loading.
+ *
* r25 will contain RPN/ERPN for the start address of memory
*
* Add the difference between KERNELBASE and PAGE_OFFSET to the
@@ -732,6 +820,8 @@ _GLOBAL(init_cpu_state)
/* We use the PVR to differenciate 44x cores from 476 */
mfspr r3,SPRN_PVR
srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476FPE@h
+ beq head_start_47x
cmplwi cr0,r3,PVR_476@h
beq head_start_47x
cmplwi cr0,r3,PVR_476_ISS@h
@@ -800,12 +890,29 @@ skpinv: addi r4,r4,1 /* Increment */
/*
* Configure and load pinned entry into TLB slot 63.
*/
+#ifdef CONFIG_NONSTATIC_KERNEL
+ /*
+ * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
+ * entries of the initial mapping set by the boot loader.
+ * The XLAT entry is stored in r25
+ */
+
+ /* Read the XLAT entry for our current mapping */
+ tlbre r25,r23,PPC44x_TLB_XLAT
+
+ lis r3,KERNELBASE@h
+ ori r3,r3,KERNELBASE@l
+
+ /* Use our current RPN entry */
+ mr r4,r25
+#else
lis r3,PAGE_OFFSET@h
ori r3,r3,PAGE_OFFSET@l
/* Kernel is at the base of RAM */
li r4, 0 /* Load the kernel physical address */
+#endif
/* Load the kernel PID = 0 */
li r0,0
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 9f5d210ddf3..d5d78c4ceef 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -197,7 +197,7 @@ _ENTRY(__early_start)
bl early_init
-#ifdef CONFIG_RELOCATABLE
+#ifdef CONFIG_DYNAMIC_MEMSTART
lis r3,kernstart_addr@ha
la r3,kernstart_addr@l(r3)
#ifdef CONFIG_PHYS_64BIT
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 39a2baa6ad5..7c66ce13da8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,13 +39,23 @@
#define cpu_should_die() 0
#endif
+unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(cpuidle_disable);
+
static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
+ cpuidle_disable = IDLE_POWERSAVE_OFF;
return 0;
}
__setup("powersave=off", powersave_off);
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_TRACEPOINTS)
+static const bool idle_uses_rcu = 1;
+#else
+static const bool idle_uses_rcu;
+#endif
+
/*
* The body of the idle task.
*/
@@ -56,7 +66,10 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ if (!idle_uses_rcu)
+ rcu_idle_enter();
+
while (!need_resched() && !cpu_should_die()) {
ppc64_runlatch_off();
@@ -93,7 +106,9 @@ void cpu_idle(void)
HMT_medium();
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ if (!idle_uses_rcu)
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
if (cpu_should_die())
cpu_die();
@@ -102,6 +117,29 @@ void cpu_idle(void)
}
}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
+ * idle loop and start using the new idle loop.
+ * Required while changing idle handler on SMP systems.
+ * Caller must have changed idle handler to the new value before the call.
+ * This window may be larger on shared systems.
+ */
+void cpu_idle_wait(void)
+{
+ int cpu;
+ smp_mb();
+
+ /* kick all the CPUs so that they exit out of old idle routine */
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ smp_send_reschedule(cpu);
+ }
+ put_online_cpus();
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
int powersave_nap;
#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 3a70845a51c..fcdff198da4 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -54,6 +54,7 @@ _GLOBAL(power7_idle)
li r0,0
stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
stb r0,PACAHARDIRQEN(r13)
+ stb r0,PACA_NAPSTATELOST(r13)
/* Continue saving state */
SAVE_GPR(2, r1)
@@ -86,6 +87,9 @@ _GLOBAL(power7_wakeup_loss)
rfid
_GLOBAL(power7_wakeup_noloss)
+ lbz r0,PACA_NAPSTATELOST(r13)
+ cmpwi r0,0
+ bne .power7_wakeup_loss
ld r1,PACAR1(r13)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5c3c46948d9..701d4aceb4f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -115,6 +115,15 @@ static inline notrace void set_soft_enabled(unsigned long enable)
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}
+static inline notrace void decrementer_check_overflow(void)
+{
+ u64 now = get_tb_or_rtc();
+ u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+
+ if (now >= *next_tb)
+ set_dec(1);
+}
+
notrace void arch_local_irq_restore(unsigned long en)
{
/*
@@ -164,24 +173,21 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
local_paca->hard_enabled = en;
-#ifndef CONFIG_BOOKE
- /* On server, re-trigger the decrementer if it went negative since
- * some processors only trigger on edge transitions of the sign bit.
- *
- * BookE has a level sensitive decrementer (latches in TSR) so we
- * don't need that
+ /*
+ * Trigger the decrementer if we have a pending event. Some processors
+ * only trigger on edge transitions of the sign bit. We might also
+ * have disabled interrupts long enough that the decrementer wrapped
+ * to positive.
*/
- if ((int)mfspr(SPRN_DEC) < 0)
- mtspr(SPRN_DEC, 1);
-#endif /* CONFIG_BOOKE */
+ decrementer_check_overflow();
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect.
*/
if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- u64 tmp;
- lv1_get_version_info(&tmp);
+ u64 tmp, tmp2;
+ lv1_get_version_info(&tmp, &tmp2);
}
__hard_irq_enable();
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c7b5afeecaf..3fea3689527 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -441,6 +441,9 @@ static void __init fixup_port_irq(int index,
return;
port->irq = virq;
+
+ if (of_device_is_compatible(np, "fsl,ns16550"))
+ port->handle_irq = fsl8250_handle_irq;
}
static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 84daabe2fcb..578f35f1872 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -783,7 +783,7 @@ static const struct file_operations lparcfg_fops = {
static int __init lparcfg_init(void)
{
struct proc_dir_entry *ent;
- mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR) &&
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 9ce1672afb5..c957b1202bd 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -107,9 +107,6 @@ void __init reserve_crashkernel(void)
unsigned long long crash_size, crash_base;
int ret;
- /* this is necessary because of memblock_phys_mem_size() */
- memblock_analyze();
-
/* use common parsing */
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
@@ -128,7 +125,7 @@ void __init reserve_crashkernel(void)
crash_size = resource_size(&crashk_res);
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
if (crashk_res.start != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 458ed3bee66..fa4a573d671 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -214,7 +214,7 @@ char __devinit *pcibios_setup(char *str)
* If the interrupt is used, then gets the interrupt line from the
* openfirmware and sets it in the pci_dev and pci_config line.
*/
-int pci_read_irq_line(struct pci_dev *pci_dev)
+static int pci_read_irq_line(struct pci_dev *pci_dev)
{
struct of_irq oirq;
unsigned int virq;
@@ -283,7 +283,6 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
return 0;
}
-EXPORT_SYMBOL(pci_read_irq_line);
/*
* Platform support for /proc/bus/pci/X/Y mmap()s,
@@ -921,18 +920,22 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
struct resource *res = dev->resource + i;
if (!res->flags)
continue;
- /* On platforms that have PCI_PROBE_ONLY set, we don't
- * consider 0 as an unassigned BAR value. It's technically
- * a valid value, but linux doesn't like it... so when we can
- * re-assign things, we do so, but if we can't, we keep it
- * around and hope for the best...
+
+ /* If we're going to re-assign everything, we mark all resources
+ * as unset (and 0-base them). In addition, we mark BARs starting
+ * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
+ * since in that case, we don't want to re-assign anything
*/
- if (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY)) {
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
+ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
+ (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
+ /* Only print message if not re-assigning */
+ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
+ pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
+ "is unassigned\n",
+ pci_name(dev), i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned int)res->flags);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
@@ -1042,6 +1045,16 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
if (i >= 3 && bus->self->transparent)
continue;
+ /* If we are going to re-assign everything, mark the resource
+ * as unset and move it down to 0
+ */
+ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
+ res->flags |= IORESOURCE_UNSET;
+ res->end -= res->start;
+ res->start = 0;
+ continue;
+ }
+
pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
pci_name(dev), i,
(unsigned long long)res->start,\
@@ -1262,18 +1275,15 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
pci_bus_for_each_resource(bus, res, i) {
if (!res || !res->flags || res->start > res->end || res->parent)
continue;
+
+ /* If the resource was left unset at this point, we clear it */
+ if (res->flags & IORESOURCE_UNSET)
+ goto clear_resource;
+
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?
&ioport_resource : &iomem_resource;
else {
- /* Don't bother with non-root busses when
- * re-assigning all resources. We clear the
- * resource flags as if they were colliding
- * and as such ensure proper re-allocation
- * later.
- */
- if (pci_has_flag(PCI_REASSIGN_ALL_RSRC))
- goto clear_resource;
pr = pci_find_parent_resource(bus->self, res);
if (pr == res) {
/* this happens when the generic PCI
@@ -1304,9 +1314,9 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
if (reparent_resources(pr, res) == 0)
continue;
}
- printk(KERN_WARNING "PCI: Cannot allocate resource region "
- "%d of PCI bridge %d, will remap\n", i, bus->number);
-clear_resource:
+ pr_warning("PCI: Cannot allocate resource region "
+ "%d of PCI bridge %d, will remap\n", i, bus->number);
+ clear_resource:
res->start = res->end = 0;
res->flags = 0;
}
@@ -1451,16 +1461,11 @@ void __init pcibios_resource_survey(void)
{
struct pci_bus *b;
- /* Allocate and assign resources. If we re-assign everything, then
- * we skip the allocate phase
- */
+ /* Allocate and assign resources */
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
-
- if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
- pcibios_allocate_resources(0);
- pcibios_allocate_resources(1);
- }
+ pcibios_allocate_resources(0);
+ pcibios_allocate_resources(1);
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
@@ -1732,6 +1737,12 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+ /* Platform gets a chance to do some global fixups before
+ * we proceed to resource allocation
+ */
+ if (ppc_md.pcibios_fixup_phb)
+ ppc_md.pcibios_fixup_phb(hose);
+
/* Configure PCI Express settings */
if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
struct pci_bus *child;
@@ -1747,10 +1758,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int i, class = dev->class >> 8;
+ /* When configured as agent, programing interface = 1 */
+ int prog_if = dev->class & 0xf;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
class == PCI_CLASS_BRIDGE_OTHER) &&
(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
+ (prog_if == 0) &&
(dev->bus->parent == NULL)) {
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 4e69deb89b3..dd9e4a04bf7 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -50,6 +50,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
dn->data = pdn;
pdn->node = dn;
pdn->phb = phb;
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
regs = of_get_property(dn, "reg", NULL);
if (regs) {
/* First register entry is addr (00BBSS00) */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6457574c0b2..ebe5766781a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -584,16 +584,32 @@ static struct regbit {
unsigned long bit;
const char *name;
} msr_bits[] = {
+#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
+ {MSR_SF, "SF"},
+ {MSR_HV, "HV"},
+#endif
+ {MSR_VEC, "VEC"},
+ {MSR_VSX, "VSX"},
+#ifdef CONFIG_BOOKE
+ {MSR_CE, "CE"},
+#endif
{MSR_EE, "EE"},
{MSR_PR, "PR"},
{MSR_FP, "FP"},
- {MSR_VEC, "VEC"},
- {MSR_VSX, "VSX"},
{MSR_ME, "ME"},
- {MSR_CE, "CE"},
+#ifdef CONFIG_BOOKE
{MSR_DE, "DE"},
+#else
+ {MSR_SE, "SE"},
+ {MSR_BE, "BE"},
+#endif
{MSR_IR, "IR"},
{MSR_DR, "DR"},
+ {MSR_PMM, "PMM"},
+#ifndef CONFIG_BOOKE
+ {MSR_RI, "RI"},
+ {MSR_LE, "LE"},
+#endif
{0, NULL}
};
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa1235b0503..abe405dab34 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -733,8 +733,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- memblock_init();
-
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
@@ -756,20 +754,14 @@ void __init early_init_devtree(void *params)
early_reserve_mem();
phyp_dump_reserve_mem();
- limit = memory_limit;
- if (! limit) {
- phys_addr_t memsize;
-
- /* Ensure that total memory size is page-aligned, because
- * otherwise mark_bootmem() gets upset. */
- memblock_analyze();
- memsize = memblock_phys_mem_size();
- if ((memsize & PAGE_MASK) != memsize)
- limit = memsize & PAGE_MASK;
- }
+ /*
+ * Ensure that total memory size is page-aligned, because otherwise
+ * mark_bootmem() gets upset.
+ */
+ limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index cc584865b3d..eca626ea3f2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -742,7 +742,7 @@ static unsigned char ibm_architecture_vec[] = {
W(0xffffffff), /* virt_base */
W(0xffffffff), /* virt_size */
W(0xffffffff), /* load_base */
- W(64), /* 64MB min RMA */
+ W(256), /* 256MB min RMA */
W(0xffffffff), /* full client load */
0, /* min RMA percentage of total RAM */
48, /* max log_2(hash table size) */
@@ -1224,14 +1224,6 @@ static void __init prom_init_mem(void)
RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
- /* Check if we have an initrd after the kernel, if we do move our bottom
- * point to after it
- */
- if (RELOC(prom_initrd_start)) {
- if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
- RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
- }
-
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
* alloc_top_high. This must be the real top of RAM so we can put
@@ -1269,6 +1261,15 @@ static void __init prom_init_mem(void)
RELOC(alloc_top) = RELOC(rmo_top);
RELOC(alloc_top_high) = RELOC(ram_top);
+ /*
+ * Check if we have an initrd after the kernel but still inside
+ * the RMO. If we do move our bottom point to after it.
+ */
+ if (RELOC(prom_initrd_start) &&
+ RELOC(prom_initrd_start) < RELOC(rmo_top) &&
+ RELOC(prom_initrd_end) > RELOC(alloc_bottom))
+ RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
+
prom_printf("memory layout at init:\n");
prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
@@ -2079,7 +2080,7 @@ static void __init prom_check_displays(void)
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
clut = RELOC(default_colors);
- for (i = 0; i < 32; i++, clut += 3)
+ for (i = 0; i < 16; i++, clut += 3)
if (prom_set_color(ih, i, clut[0], clut[1],
clut[2]) != 0)
break;
@@ -2844,7 +2845,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
RELOC(of_platform) = prom_find_machine_type();
prom_printf("Detected machine type: %x\n", RELOC(of_platform));
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
/* Bail if this is a kdump kernel. */
if (PHYSICAL_START > 0)
prom_panic("Error: You can't boot a kdump kernel from OF!\n");
@@ -2969,9 +2970,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* in case stdin is USB and still active on IBM machines...
* Unfortunately quiesce crashes on some powermacs if we have
- * closed stdin already (in particular the powerbook 101).
+ * closed stdin already (in particular the powerbook 101). It
+ * appears that the OPAL version of OFW doesn't like it either.
*/
- if (RELOC(of_platform) != PLATFORM_POWERMAC)
+ if (RELOC(of_platform) != PLATFORM_POWERMAC &&
+ RELOC(of_platform) != PLATFORM_OPAL)
prom_close_stdin();
/*
@@ -2987,8 +2990,12 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* is common to us and kexec
*/
hdr = RELOC(dt_header_start);
- prom_printf("returning from prom_init\n");
- prom_debug("->dt_header_start=0x%x\n", hdr);
+
+ /* Don't print anything after quiesce under OPAL, it crashes OFW */
+ if (RELOC(of_platform) != PLATFORM_OPAL) {
+ prom_printf("returning from prom_init\n");
+ prom_debug("->dt_header_start=0x%x\n", hdr);
+ }
#ifdef CONFIG_PPC32
reloc_got2(-offset);
diff --git a/arch/powerpc/kernel/reloc_32.S b/arch/powerpc/kernel/reloc_32.S
new file mode 100644
index 00000000000..ef46ba6e094
--- /dev/null
+++ b/arch/powerpc/kernel/reloc_32.S
@@ -0,0 +1,208 @@
+/*
+ * Code to process dynamic relocations for PPC32.
+ *
+ * Copyrights (C) IBM Corporation, 2011.
+ * Author: Suzuki Poulose <suzuki@in.ibm.com>
+ *
+ * - Based on ppc64 code - reloc_64.S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+
+/* Dynamic section table entry tags */
+DT_RELA = 7 /* Tag for Elf32_Rela section */
+DT_RELASZ = 8 /* Size of the Rela relocs */
+DT_RELAENT = 9 /* Size of one Rela reloc entry */
+
+STN_UNDEF = 0 /* Undefined symbol index */
+STB_LOCAL = 0 /* Local binding for the symbol */
+
+R_PPC_ADDR16_LO = 4 /* Lower half of (S+A) */
+R_PPC_ADDR16_HI = 5 /* Upper half of (S+A) */
+R_PPC_ADDR16_HA = 6 /* High Adjusted (S+A) */
+R_PPC_RELATIVE = 22
+
+/*
+ * r3 = desired final address
+ */
+
+_GLOBAL(relocate)
+
+ mflr r0 /* Save our LR */
+ bl 0f /* Find our current runtime address */
+0: mflr r12 /* Make it accessible */
+ mtlr r0
+
+ lwz r11, (p_dyn - 0b)(r12)
+ add r11, r11, r12 /* runtime address of .dynamic section */
+ lwz r9, (p_rela - 0b)(r12)
+ add r9, r9, r12 /* runtime address of .rela.dyn section */
+ lwz r10, (p_st - 0b)(r12)
+ add r10, r10, r12 /* runtime address of _stext section */
+ lwz r13, (p_sym - 0b)(r12)
+ add r13, r13, r12 /* runtime address of .dynsym section */
+
+ /*
+ * Scan the dynamic section for RELA, RELASZ entries
+ */
+ li r6, 0
+ li r7, 0
+ li r8, 0
+1: lwz r5, 0(r11) /* ELF_Dyn.d_tag */
+ cmpwi r5, 0 /* End of ELF_Dyn[] */
+ beq eodyn
+ cmpwi r5, DT_RELA
+ bne relasz
+ lwz r7, 4(r11) /* r7 = rela.link */
+ b skip
+relasz:
+ cmpwi r5, DT_RELASZ
+ bne relaent
+ lwz r8, 4(r11) /* r8 = Total Rela relocs size */
+ b skip
+relaent:
+ cmpwi r5, DT_RELAENT
+ bne skip
+ lwz r6, 4(r11) /* r6 = Size of one Rela reloc */
+skip:
+ addi r11, r11, 8
+ b 1b
+eodyn: /* End of Dyn Table scan */
+
+ /* Check if we have found all the entries */
+ cmpwi r7, 0
+ beq done
+ cmpwi r8, 0
+ beq done
+ cmpwi r6, 0
+ beq done
+
+
+ /*
+ * Work out the current offset from the link time address of .rela
+ * section.
+ * cur_offset[r7] = rela.run[r9] - rela.link [r7]
+ * _stext.link[r12] = _stext.run[r10] - cur_offset[r7]
+ * final_offset[r3] = _stext.final[r3] - _stext.link[r12]
+ */
+ subf r7, r7, r9 /* cur_offset */
+ subf r12, r7, r10
+ subf r3, r12, r3 /* final_offset */
+
+ subf r8, r6, r8 /* relaz -= relaent */
+ /*
+ * Scan through the .rela table and process each entry
+ * r9 - points to the current .rela table entry
+ * r13 - points to the symbol table
+ */
+
+ /*
+ * Check if we have a relocation based on symbol
+ * r5 will hold the value of the symbol.
+ */
+applyrela:
+ lwz r4, 4(r9) /* r4 = rela.r_info */
+ srwi r5, r4, 8 /* ELF32_R_SYM(r_info) */
+ cmpwi r5, STN_UNDEF /* sym == STN_UNDEF ? */
+ beq get_type /* value = 0 */
+ /* Find the value of the symbol at index(r5) */
+ slwi r5, r5, 4 /* r5 = r5 * sizeof(Elf32_Sym) */
+ add r12, r13, r5 /* r12 = &__dyn_sym[Index] */
+
+ /*
+ * GNU ld has a bug, where dynamic relocs based on
+ * STB_LOCAL symbols, the value should be assumed
+ * to be zero. - Alan Modra
+ */
+ /* XXX: Do we need to check if we are using GNU ld ? */
+ lbz r5, 12(r12) /* r5 = dyn_sym[Index].st_info */
+ extrwi r5, r5, 4, 24 /* r5 = ELF32_ST_BIND(r5) */
+ cmpwi r5, STB_LOCAL /* st_value = 0, ld bug */
+ beq get_type /* We have r5 = 0 */
+ lwz r5, 4(r12) /* r5 = __dyn_sym[Index].st_value */
+
+get_type:
+ /* Load the relocation type to r4 */
+ extrwi r4, r4, 8, 24 /* r4 = ELF32_R_TYPE(r_info) = ((char*)r4)[3] */
+
+ /* R_PPC_RELATIVE */
+ cmpwi r4, R_PPC_RELATIVE
+ bne hi16
+ lwz r4, 0(r9) /* r_offset */
+ lwz r0, 8(r9) /* r_addend */
+ add r0, r0, r3 /* final addend */
+ stwx r0, r4, r7 /* memory[r4+r7]) = (u32)r0 */
+ b nxtrela /* continue */
+
+ /* R_PPC_ADDR16_HI */
+hi16:
+ cmpwi r4, R_PPC_ADDR16_HI
+ bne ha16
+ lwz r4, 0(r9) /* r_offset */
+ lwz r0, 8(r9) /* r_addend */
+ add r0, r0, r3
+ add r0, r0, r5 /* r0 = (S+A+Offset) */
+ extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */
+ b store_half
+
+ /* R_PPC_ADDR16_HA */
+ha16:
+ cmpwi r4, R_PPC_ADDR16_HA
+ bne lo16
+ lwz r4, 0(r9) /* r_offset */
+ lwz r0, 8(r9) /* r_addend */
+ add r0, r0, r3
+ add r0, r0, r5 /* r0 = (S+A+Offset) */
+ extrwi r5, r0, 1, 16 /* Extract bit 16 */
+ extrwi r0, r0, 16, 0 /* r0 = (r0 >> 16) */
+ add r0, r0, r5 /* Add it to r0 */
+ b store_half
+
+ /* R_PPC_ADDR16_LO */
+lo16:
+ cmpwi r4, R_PPC_ADDR16_LO
+ bne nxtrela
+ lwz r4, 0(r9) /* r_offset */
+ lwz r0, 8(r9) /* r_addend */
+ add r0, r0, r3
+ add r0, r0, r5 /* r0 = (S+A+Offset) */
+ extrwi r0, r0, 16, 16 /* r0 &= 0xffff */
+ /* Fall through to */
+
+ /* Store half word */
+store_half:
+ sthx r0, r4, r7 /* memory[r4+r7] = (u16)r0 */
+
+nxtrela:
+ /*
+ * We have to flush the modified instructions to the
+ * main storage from the d-cache. And also, invalidate the
+ * cached instructions in i-cache which has been modified.
+ *
+ * We delay the sync / isync operation till the end, since
+ * we won't be executing the modified instructions until
+ * we return from here.
+ */
+ dcbst r4,r7
+ sync /* Ensure the data is flushed before icbi */
+ icbi r4,r7
+ cmpwi r8, 0 /* relasz = 0 ? */
+ ble done
+ add r9, r9, r6 /* move to next entry in the .rela table */
+ subf r8, r6, r8 /* relasz -= relaent */
+ b applyrela
+
+done:
+ sync /* Wait for the flush to finish */
+ isync /* Discard prefetched instructions */
+ blr
+
+p_dyn: .long __dynamic_start - 0b
+p_rela: .long __rela_dyn_start - 0b
+p_sym: .long __dynamic_symtab - 0b
+p_st: .long _stext - 0b
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index e037c7494fd..4174b4b2324 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -568,6 +568,12 @@ static void rtas_flash_firmware(int reboot_type)
}
/*
+ * Just before starting the firmware flash, cancel the event scan work
+ * to avoid any soft lockup issues.
+ */
+ rtas_cancel_event_scan();
+
+ /*
* NOTE: the "first" block must be under 4GB, so we create
* an entry with no data blocks in the reserved buffer in
* the kernel data segment.
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 481ef064c8f..1045ff49cc6 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -472,6 +472,13 @@ static void start_event_scan(void)
&event_scan_work, event_scan_delay);
}
+/* Cancel the rtas event scan work */
+void rtas_cancel_event_scan(void)
+{
+ cancel_delayed_work_sync(&event_scan_work);
+}
+EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
+
static int __init rtas_init(void)
{
struct proc_dir_entry *entry;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index fb9bb46e7e8..4cb8f1e9d04 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -35,6 +35,8 @@
#include <linux/pci.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
+#include <linux/hugetlb.h>
+
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -64,6 +66,7 @@
#include <asm/mmu_context.h>
#include <asm/code-patching.h>
#include <asm/kvm_ppc.h>
+#include <asm/hugetlb.h>
#include "setup.h"
@@ -217,6 +220,13 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ /*
+ * Reserve any gigantic pages requested on the command line.
+ * memblock needs to have been initialized by the time this is
+ * called since this will reserve memory.
+ */
+ reserve_hugetlb_gpages();
+
DBG(" <- early_setup()\n");
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6df70907d60..46695febc09 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -27,7 +27,7 @@
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/topology.h>
@@ -187,7 +187,8 @@ int smp_request_message_ipi(int virq, int msg)
return 1;
}
#endif
- err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
+ err = request_irq(virq, smp_ipi_action[msg],
+ IRQF_PERCPU | IRQF_NO_THREAD,
smp_ipi_name[msg], 0);
WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
virq, smp_ipi_name[msg], err);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index ce035c1905f..883e74c0d1b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -1,4 +1,4 @@
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
@@ -18,6 +18,7 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
+#include <asm/system.h>
#include "cacheinfo.h"
@@ -37,12 +38,12 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
/* Time in microseconds we delay before sleeping in the idle loop */
DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
-static ssize_t store_smt_snooze_delay(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_smt_snooze_delay(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t ret;
long snooze;
@@ -50,21 +51,22 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev,
if (ret != 1)
return -EINVAL;
- per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
+ per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
+ update_smt_snooze_delay(snooze);
return count;
}
-static ssize_t show_smt_snooze_delay(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t show_smt_snooze_delay(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
+ return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
}
-static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
+static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
store_smt_snooze_delay);
static int __init setup_smt_snooze_delay(char *str)
@@ -117,25 +119,25 @@ static void write_##NAME(void *val) \
ppc_enable_pmcs(); \
mtspr(ADDRESS, *(unsigned long *)val); \
} \
-static ssize_t show_##NAME(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
- struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
unsigned long val; \
- smp_call_function_single(cpu->sysdev.id, read_##NAME, &val, 1); \
+ smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
return sprintf(buf, "%lx\n", val); \
} \
static ssize_t __used \
- store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
+ store_##NAME(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
unsigned long val; \
int ret = sscanf(buf, "%lx", &val); \
if (ret != 1) \
return -EINVAL; \
- smp_call_function_single(cpu->sysdev.id, write_##NAME, &val, 1); \
+ smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
return count; \
}
@@ -177,23 +179,25 @@ SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
SYSFS_PMCSETUP(purr, SPRN_PURR);
SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
+SYSFS_PMCSETUP(pir, SPRN_PIR);
-static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
-static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
-static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
-static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+static DEVICE_ATTR(spurr, 0600, show_spurr, NULL);
+static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
+static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(pir, 0400, show_pir, NULL);
unsigned long dscr_default = 0;
EXPORT_SYMBOL(dscr_default);
-static ssize_t show_dscr_default(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_dscr_default(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", dscr_default);
}
-static ssize_t __used store_dscr_default(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, const char *buf,
+static ssize_t __used store_dscr_default(struct device *dev,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
@@ -207,15 +211,14 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class,
return count;
}
-static SYSDEV_CLASS_ATTR(dscr_default, 0600,
+static DEVICE_ATTR(dscr_default, 0600,
show_dscr_default, store_dscr_default);
static void sysfs_create_dscr_default(void)
{
int err = 0;
if (cpu_has_feature(CPU_FTR_DSCR))
- err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &attr_dscr_default.attr);
+ err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
}
#endif /* CONFIG_PPC64 */
@@ -259,72 +262,72 @@ SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
#endif /* HAS_PPC_PMC_PA6T */
#ifdef HAS_PPC_PMC_IBM
-static struct sysdev_attribute ibm_common_attrs[] = {
- _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
- _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+static struct device_attribute ibm_common_attrs[] = {
+ __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+ __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
};
#endif /* HAS_PPC_PMC_G4 */
#ifdef HAS_PPC_PMC_G4
-static struct sysdev_attribute g4_common_attrs[] = {
- _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
- _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
- _SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
+static struct device_attribute g4_common_attrs[] = {
+ __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+ __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+ __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
};
#endif /* HAS_PPC_PMC_G4 */
-static struct sysdev_attribute classic_pmc_attrs[] = {
- _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1),
- _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2),
- _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3),
- _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4),
- _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5),
- _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6),
+static struct device_attribute classic_pmc_attrs[] = {
+ __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
+ __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
+ __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
+ __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
+ __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
+ __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
#ifdef CONFIG_PPC64
- _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7),
- _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8),
+ __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
+ __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
#endif
};
#ifdef HAS_PPC_PMC_PA6T
-static struct sysdev_attribute pa6t_attrs[] = {
- _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
- _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
- _SYSDEV_ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
- _SYSDEV_ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
- _SYSDEV_ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
- _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
- _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
- _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
+static struct device_attribute pa6t_attrs[] = {
+ __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
+ __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
+ __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
+ __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
+ __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
+ __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
+ __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
+ __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
#ifdef CONFIG_DEBUG_KERNEL
- _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0),
- _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1),
- _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4),
- _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5),
- _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0),
- _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1),
- _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2),
- _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3),
- _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4),
- _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5),
- _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6),
- _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7),
- _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8),
- _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9),
- _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat),
- _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr),
- _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr),
- _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr),
- _SYSDEV_ATTR(der, 0600, show_der, store_der),
- _SYSDEV_ATTR(mer, 0600, show_mer, store_mer),
- _SYSDEV_ATTR(ber, 0600, show_ber, store_ber),
- _SYSDEV_ATTR(ier, 0600, show_ier, store_ier),
- _SYSDEV_ATTR(sier, 0600, show_sier, store_sier),
- _SYSDEV_ATTR(siar, 0600, show_siar, store_siar),
- _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0),
- _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1),
- _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2),
- _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3),
+ __ATTR(hid0, 0600, show_hid0, store_hid0),
+ __ATTR(hid1, 0600, show_hid1, store_hid1),
+ __ATTR(hid4, 0600, show_hid4, store_hid4),
+ __ATTR(hid5, 0600, show_hid5, store_hid5),
+ __ATTR(ima0, 0600, show_ima0, store_ima0),
+ __ATTR(ima1, 0600, show_ima1, store_ima1),
+ __ATTR(ima2, 0600, show_ima2, store_ima2),
+ __ATTR(ima3, 0600, show_ima3, store_ima3),
+ __ATTR(ima4, 0600, show_ima4, store_ima4),
+ __ATTR(ima5, 0600, show_ima5, store_ima5),
+ __ATTR(ima6, 0600, show_ima6, store_ima6),
+ __ATTR(ima7, 0600, show_ima7, store_ima7),
+ __ATTR(ima8, 0600, show_ima8, store_ima8),
+ __ATTR(ima9, 0600, show_ima9, store_ima9),
+ __ATTR(imaat, 0600, show_imaat, store_imaat),
+ __ATTR(btcr, 0600, show_btcr, store_btcr),
+ __ATTR(pccr, 0600, show_pccr, store_pccr),
+ __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
+ __ATTR(der, 0600, show_der, store_der),
+ __ATTR(mer, 0600, show_mer, store_mer),
+ __ATTR(ber, 0600, show_ber, store_ber),
+ __ATTR(ier, 0600, show_ier, store_ier),
+ __ATTR(sier, 0600, show_sier, store_sier),
+ __ATTR(siar, 0600, show_siar, store_siar),
+ __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
+ __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
+ __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
+ __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
#endif /* CONFIG_DEBUG_KERNEL */
};
#endif /* HAS_PPC_PMC_PA6T */
@@ -333,14 +336,14 @@ static struct sysdev_attribute pa6t_attrs[] = {
static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
- struct sysdev_attribute *attrs, *pmc_attrs;
+ struct device *s = &c->dev;
+ struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
#ifdef CONFIG_PPC64
if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
cpu_has_feature(CPU_FTR_SMT))
- sysdev_create_file(s, &attr_smt_snooze_delay);
+ device_create_file(s, &dev_attr_smt_snooze_delay);
#endif
/* PMC stuff */
@@ -348,14 +351,14 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -363,7 +366,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
pmc_attrs = NULL;
break;
#endif /* HAS_PPC_PMC_PA6T */
@@ -374,24 +377,27 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
}
for (i = 0; i < nattrs; i++)
- sysdev_create_file(s, &attrs[i]);
+ device_create_file(s, &attrs[i]);
if (pmc_attrs)
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
- sysdev_create_file(s, &pmc_attrs[i]);
+ device_create_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_MMCRA))
- sysdev_create_file(s, &attr_mmcra);
+ device_create_file(s, &dev_attr_mmcra);
if (cpu_has_feature(CPU_FTR_PURR))
- sysdev_create_file(s, &attr_purr);
+ device_create_file(s, &dev_attr_purr);
if (cpu_has_feature(CPU_FTR_SPURR))
- sysdev_create_file(s, &attr_spurr);
+ device_create_file(s, &dev_attr_spurr);
if (cpu_has_feature(CPU_FTR_DSCR))
- sysdev_create_file(s, &attr_dscr);
+ device_create_file(s, &dev_attr_dscr);
+
+ if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
+ device_create_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
cacheinfo_cpu_online(cpu);
@@ -401,8 +407,8 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
- struct sysdev_attribute *attrs, *pmc_attrs;
+ struct device *s = &c->dev;
+ struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
BUG_ON(!c->hotpluggable);
@@ -410,7 +416,7 @@ static void unregister_cpu_online(unsigned int cpu)
#ifdef CONFIG_PPC64
if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
cpu_has_feature(CPU_FTR_SMT))
- sysdev_remove_file(s, &attr_smt_snooze_delay);
+ device_remove_file(s, &dev_attr_smt_snooze_delay);
#endif
/* PMC stuff */
@@ -418,14 +424,14 @@ static void unregister_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -433,7 +439,7 @@ static void unregister_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute);
+ nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
pmc_attrs = NULL;
break;
#endif /* HAS_PPC_PMC_PA6T */
@@ -444,24 +450,27 @@ static void unregister_cpu_online(unsigned int cpu)
}
for (i = 0; i < nattrs; i++)
- sysdev_remove_file(s, &attrs[i]);
+ device_remove_file(s, &attrs[i]);
if (pmc_attrs)
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
- sysdev_remove_file(s, &pmc_attrs[i]);
+ device_remove_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_MMCRA))
- sysdev_remove_file(s, &attr_mmcra);
+ device_remove_file(s, &dev_attr_mmcra);
if (cpu_has_feature(CPU_FTR_PURR))
- sysdev_remove_file(s, &attr_purr);
+ device_remove_file(s, &dev_attr_purr);
if (cpu_has_feature(CPU_FTR_SPURR))
- sysdev_remove_file(s, &attr_spurr);
+ device_remove_file(s, &dev_attr_spurr);
if (cpu_has_feature(CPU_FTR_DSCR))
- sysdev_remove_file(s, &attr_dscr);
+ device_remove_file(s, &dev_attr_dscr);
+
+ if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
+ device_remove_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
cacheinfo_cpu_offline(cpu);
@@ -513,70 +522,70 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
static DEFINE_MUTEX(cpu_mutex);
-int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+int cpu_add_dev_attr(struct device_attribute *attr)
{
int cpu;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
- sysdev_create_file(get_cpu_sysdev(cpu), attr);
+ device_create_file(get_cpu_device(cpu), attr);
}
mutex_unlock(&cpu_mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
-int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+int cpu_add_dev_attr_group(struct attribute_group *attrs)
{
int cpu;
- struct sys_device *sysdev;
+ struct device *dev;
int ret;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- ret = sysfs_create_group(&sysdev->kobj, attrs);
+ dev = get_cpu_device(cpu);
+ ret = sysfs_create_group(&dev->kobj, attrs);
WARN_ON(ret != 0);
}
mutex_unlock(&cpu_mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
-void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+void cpu_remove_dev_attr(struct device_attribute *attr)
{
int cpu;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
- sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+ device_remove_file(get_cpu_device(cpu), attr);
}
mutex_unlock(&cpu_mutex);
}
-EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
-void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+void cpu_remove_dev_attr_group(struct attribute_group *attrs)
{
int cpu;
- struct sys_device *sysdev;
+ struct device *dev;
mutex_lock(&cpu_mutex);
for_each_possible_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- sysfs_remove_group(&sysdev->kobj, attrs);
+ dev = get_cpu_device(cpu);
+ sysfs_remove_group(&dev->kobj, attrs);
}
mutex_unlock(&cpu_mutex);
}
-EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
/* NUMA stuff */
@@ -590,18 +599,18 @@ static void register_nodes(void)
register_one_node(i);
}
-int sysfs_add_device_to_node(struct sys_device *dev, int nid)
+int sysfs_add_device_to_node(struct device *dev, int nid)
{
struct node *node = &node_devices[nid];
- return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
+ return sysfs_create_link(&node->dev.kobj, &dev->kobj,
kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
-void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
+void sysfs_remove_device_from_node(struct device *dev, int nid)
{
struct node *node = &node_devices[nid];
- sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
+ sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
@@ -614,14 +623,14 @@ static void register_nodes(void)
#endif
/* Only valid if CPU is present. */
-static ssize_t show_physical_id(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_physical_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
+ return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
}
-static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
+static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
@@ -646,7 +655,7 @@ static int __init topology_init(void)
if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
- sysdev_create_file(&c->sysdev, &attr_physical_id);
+ device_create_file(&c->dev, &dev_attr_physical_id);
}
if (cpu_online(cpu))
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 522bb1dfc35..567dd7c3ac2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -86,8 +86,6 @@ static struct clocksource clocksource_rtc = {
.rating = 400,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 22,
- .mult = 0, /* To be filled in */
.read = rtc_read,
};
@@ -97,8 +95,6 @@ static struct clocksource clocksource_timebase = {
.rating = 400,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 22,
- .mult = 0, /* To be filled in */
.read = timebase_read,
};
@@ -110,22 +106,16 @@ static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev);
static struct clock_event_device decrementer_clockevent = {
- .name = "decrementer",
- .rating = 200,
- .shift = 0, /* To be filled in */
- .mult = 0, /* To be filled in */
- .irq = 0,
- .set_next_event = decrementer_set_next_event,
- .set_mode = decrementer_set_mode,
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .name = "decrementer",
+ .rating = 200,
+ .irq = 0,
+ .set_next_event = decrementer_set_next_event,
+ .set_mode = decrementer_set_mode,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
};
-struct decrementer_clock {
- struct clock_event_device event;
- u64 next_tb;
-};
-
-static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
+DEFINE_PER_CPU(u64, decrementers_next_tb);
+static DEFINE_PER_CPU(struct clock_event_device, decrementers);
#ifdef CONFIG_PPC_ISERIES
static unsigned long __initdata iSeries_recal_titan;
@@ -168,13 +158,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Factors for converting from cputime_t (timebase ticks) to
- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
* These are all stored as 0.64 fixed-point binary fractions.
*/
u64 __cputime_jiffies_factor;
EXPORT_SYMBOL(__cputime_jiffies_factor);
-u64 __cputime_msec_factor;
-EXPORT_SYMBOL(__cputime_msec_factor);
+u64 __cputime_usec_factor;
+EXPORT_SYMBOL(__cputime_usec_factor);
u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
@@ -192,8 +182,8 @@ static void calc_cputime_factors(void)
div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
__cputime_jiffies_factor = res.result_low;
- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
- __cputime_msec_factor = res.result_low;
+ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
+ __cputime_usec_factor = res.result_low;
div128_by_32(1, 0, tb_ticks_per_sec, &res);
__cputime_sec_factor = res.result_low;
div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
@@ -441,7 +431,7 @@ EXPORT_SYMBOL(profile_pc);
/*
* This function recalibrates the timebase based on the 49-bit time-of-day
* value in the Titan chip. The Titan is much more accurate than the value
- * returned by the service processor for the timebase frequency.
+ * returned by the service processor for the timebase frequency.
*/
static int __init iSeries_tb_recal(void)
@@ -576,9 +566,8 @@ void arch_irq_work_raise(void)
void timer_interrupt(struct pt_regs * regs)
{
struct pt_regs *old_regs;
- struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
- struct clock_event_device *evt = &decrementer->event;
- u64 now;
+ u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ struct clock_event_device *evt = &__get_cpu_var(decrementers);
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
@@ -613,16 +602,9 @@ void timer_interrupt(struct pt_regs * regs)
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
- now = get_tb_or_rtc();
- if (now >= decrementer->next_tb) {
- decrementer->next_tb = ~(u64)0;
- if (evt->event_handler)
- evt->event_handler(evt);
- } else {
- now = decrementer->next_tb - now;
- if (now <= DECREMENTER_MAX)
- set_dec((int)now);
- }
+ *next_tb = ~(u64)0;
+ if (evt->event_handler)
+ evt->event_handler(evt);
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
@@ -650,9 +632,9 @@ static void generic_suspend_disable_irqs(void)
* with suspending.
*/
- set_dec(0x7fffffff);
+ set_dec(DECREMENTER_MAX);
local_irq_disable();
- set_dec(0x7fffffff);
+ set_dec(DECREMENTER_MAX);
}
static void generic_suspend_enable_irqs(void)
@@ -824,9 +806,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
++vdso_data->tb_update_count;
smp_mb();
- /* XXX this assumes clock->shift == 22 */
- /* 4611686018 ~= 2^(20+64-22) / 1e9 */
- new_tb_to_xs = (u64) mult * 4611686018ULL;
+ /* 19342813113834067 ~= 2^(20+64) / 1e9 */
+ new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
@@ -877,9 +858,7 @@ static void __init clocksource_init(void)
else
clock = &clocksource_timebase;
- clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
-
- if (clocksource_register(clock)) {
+ if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
printk(KERN_ERR "clocksource: %s is already registered\n",
clock->name);
return;
@@ -892,7 +871,7 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
+ __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
set_dec(evt);
return 0;
}
@@ -904,34 +883,9 @@ static void decrementer_set_mode(enum clock_event_mode mode,
decrementer_set_next_event(DECREMENTER_MAX, dev);
}
-static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
- int shift)
-{
- uint64_t tmp = ((uint64_t)ticks) << shift;
-
- do_div(tmp, nsec);
- return tmp;
-}
-
-static void __init setup_clockevent_multiplier(unsigned long hz)
-{
- u64 mult, shift = 32;
-
- while (1) {
- mult = div_sc64(hz, NSEC_PER_SEC, shift);
- if (mult && (mult >> 32UL) == 0UL)
- break;
-
- shift--;
- }
-
- decrementer_clockevent.shift = shift;
- decrementer_clockevent.mult = mult;
-}
-
static void register_decrementer_clockevent(int cpu)
{
- struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
+ struct clock_event_device *dec = &per_cpu(decrementers, cpu);
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
@@ -946,7 +900,8 @@ static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
- setup_clockevent_multiplier(ppc_tb_freq);
+ clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
+
decrementer_clockevent.max_delta_ns =
clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
decrementer_clockevent.min_delta_ns =
@@ -1014,10 +969,10 @@ void __init time_init(void)
boot_tb = get_tb_or_rtc();
/* If platform provided a timezone (pmac), we correct the time */
- if (timezone_offset) {
+ if (timezone_offset) {
sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
- }
+ }
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5459d148a0f..c091527efd8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -98,18 +98,14 @@ static void pmac_backlight_unblank(void)
static inline void pmac_backlight_unblank(void) { }
#endif
-int die(const char *str, struct pt_regs *regs, long err)
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
+static int die_counter;
+
+static unsigned __kprobes long oops_begin(struct pt_regs *regs)
{
- static struct {
- raw_spinlock_t lock;
- u32 lock_owner;
- int lock_owner_depth;
- } die = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock),
- .lock_owner = -1,
- .lock_owner_depth = 0
- };
- static int die_counter;
+ int cpu;
unsigned long flags;
if (debugger(regs))
@@ -117,66 +113,109 @@ int die(const char *str, struct pt_regs *regs, long err)
oops_enter();
- if (die.lock_owner != raw_smp_processor_id()) {
- console_verbose();
- raw_spin_lock_irqsave(&die.lock, flags);
- die.lock_owner = smp_processor_id();
- die.lock_owner_depth = 0;
- bust_spinlocks(1);
- if (machine_is(powermac))
- pmac_backlight_unblank();
- } else {
- local_save_flags(flags);
+ /* racy, but better than risking deadlock. */
+ raw_local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (!arch_spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+ else
+ arch_spin_lock(&die_lock);
}
+ die_nest_count++;
+ die_owner = cpu;
+ console_verbose();
+ bust_spinlocks(1);
+ if (machine_is(powermac))
+ pmac_backlight_unblank();
+ return flags;
+}
- if (++die.lock_owner_depth < 3) {
- printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP NR_CPUS=%d ", NR_CPUS);
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC ");
-#endif
-#ifdef CONFIG_NUMA
- printk("NUMA ");
-#endif
- printk("%s\n", ppc_md.name ? ppc_md.name : "");
+static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+ int signr)
+{
+ bust_spinlocks(0);
+ die_owner = -1;
+ add_taint(TAINT_DIE);
+ die_nest_count--;
+ oops_exit();
+ printk("\n");
+ if (!die_nest_count)
+ /* Nest count reaches zero, release the lock. */
+ arch_spin_unlock(&die_lock);
+ raw_local_irq_restore(flags);
- if (notify_die(DIE_OOPS, str, regs, err, 255,
- SIGSEGV) == NOTIFY_STOP)
- return 1;
+ /*
+ * A system reset (0x100) is a request to dump, so we always send
+ * it through the crashdump code.
+ */
+ if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
+ crash_kexec(regs);
- print_modules();
- show_regs(regs);
- } else {
- printk("Recursive die() failure, output suppressed\n");
+ /*
+ * We aren't the primary crash CPU. We need to send it
+ * to a holding pattern to avoid it ending up in the panic
+ * code.
+ */
+ crash_kexec_secondary(regs);
}
- bust_spinlocks(0);
- die.lock_owner = -1;
- add_taint(TAINT_DIE);
- raw_spin_unlock_irqrestore(&die.lock, flags);
+ if (!signr)
+ return;
- if (kexec_should_crash(current) ||
- kexec_sr_activated(smp_processor_id()))
- crash_kexec(regs);
- crash_kexec_secondary(regs);
+ /*
+ * While our oops output is serialised by a spinlock, output
+ * from panic() called below can race and corrupt it. If we
+ * know we are going to panic, delay for 1 second so we have a
+ * chance to get clean backtraces from all CPUs that are oopsing.
+ */
+ if (in_interrupt() || panic_on_oops || !current->pid ||
+ is_global_init(current)) {
+ mdelay(MSEC_PER_SEC);
+ }
if (in_interrupt())
panic("Fatal exception in interrupt");
-
if (panic_on_oops)
panic("Fatal exception");
+ do_exit(signr);
+}
- oops_exit();
- do_exit(err);
+static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+{
+ printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP NR_CPUS=%d ", NR_CPUS);
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC ");
+#endif
+#ifdef CONFIG_NUMA
+ printk("NUMA ");
+#endif
+ printk("%s\n", ppc_md.name ? ppc_md.name : "");
+
+ if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
+ return 1;
+
+ print_modules();
+ show_regs(regs);
return 0;
}
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ unsigned long flags = oops_begin(regs);
+
+ if (__die(str, regs, err))
+ err = 0;
+ oops_end(flags, regs, err);
+}
+
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs, siginfo_t *info)
{
@@ -195,10 +234,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
"at %016lx nip %016lx lr %016lx code %x\n";
if (!user_mode(regs)) {
- if (die("Exception in kernel mode", regs, signr))
- return;
- } else if (show_unhandled_signals &&
- unhandled_signal(current, signr)) {
+ die("Exception in kernel mode", regs, signr);
+ return;
+ }
+
+ if (show_unhandled_signals && unhandled_signal(current, signr)) {
printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, signr,
addr, regs->nip, regs->link, code);
@@ -220,25 +260,8 @@ void system_reset_exception(struct pt_regs *regs)
return;
}
-#ifdef CONFIG_KEXEC
- cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
-#endif
-
die("System Reset", regs, SIGABRT);
- /*
- * Some CPUs when released from the debugger will execute this path.
- * These CPUs entered the debugger via a soft-reset. If the CPU was
- * hung before entering the debugger it will return to the hung
- * state when exiting this function. This causes a problem in
- * kdump since the hung CPU(s) will not respond to the IPI sent
- * from kdump. To prevent the problem we call crash_kexec_secondary()
- * here. If a kdump had not been initiated or we exit the debugger
- * with the "exit and recover" command (x) crash_kexec_secondary()
- * will return after 5ms and the CPU returns to its previous state.
- */
- crash_kexec_secondary(regs);
-
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f65af61996b..8b086299ba2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
- .pm = GENERIC_SUBSYS_PM_OPS,
};
/**
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 920276c0f6a..710a54005df 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -170,7 +170,13 @@ SECTIONS
}
#ifdef CONFIG_RELOCATABLE
. = ALIGN(8);
- .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
+ .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
+ {
+#ifdef CONFIG_RELOCATABLE_PPC32
+ __dynamic_symtab = .;
+#endif
+ *(.dynsym)
+ }
.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
.dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
{
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a459479995c..e41ac6f7dcf 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- memslot = &kvm->memslots->memslots[log->slot];
+ memslot = id_to_memslot(kvm->memslots, log->slot);
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cb137a9b03..336983da9e7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
tpaca->kvm_hstate.napping = 0;
vcpu->cpu = vc->pcpu;
smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
tpaca->cpu_start = 0x80;
wmb();
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 286f13d601c..a795a13f4a7 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
* to allocate contiguous physical memory for the real memory
* areas for guests.
*/
-void kvm_rma_init(void)
+void __init kvm_rma_init(void)
{
unsigned long i;
unsigned long j, npages;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 44d8829334a..5c8b26183f5 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -112,6 +112,9 @@ kvm_start_guest:
stbcix r0, r5, r6 /* clear it */
stwcix r8, r5, r7 /* EOI it */
+ /* NV GPR values from power7_idle() will no longer be valid */
+ stb r0, PACA_NAPSTATELOST(r13)
+
.global kvmppc_hv_entry
kvmppc_hv_entry:
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 3c791e1eb67..e2cfb9e1e20 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -658,10 +658,12 @@ program_interrupt:
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
+#ifdef CONFIG_KVM_BOOK3S_64_PR
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
r = RESUME_GUEST;
break;
}
+#endif
run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 26d20903f2b..8c0d45a6faf 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -15,6 +15,7 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 166a6a0ad54..7735a2c2e6d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,13 +16,15 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
- checksum_wrappers_64.o hweight_64.o
+ checksum_wrappers_64.o hweight_64.o \
+ copyuser_power7.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP) += locks.o
+obj-$(CONFIG_ALTIVEC) += copyuser_power7_vmx.o
endif
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 578b625d6a3..773d38f90aa 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -11,6 +11,12 @@
.align 7
_GLOBAL(__copy_tofrom_user)
+BEGIN_FTR_SECTION
+ nop
+FTR_SECTION_ELSE
+ b __copy_tofrom_user_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
+_GLOBAL(__copy_tofrom_user_base)
/* first check for a whole page copy on a page boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
new file mode 100644
index 00000000000..497db7b23bb
--- /dev/null
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -0,0 +1,683 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE 256
+#define STK_REG(i) (112 + ((i)-14)*8)
+
+ .macro err1
+100:
+ .section __ex_table,"a"
+ .align 3
+ .llong 100b,.Ldo_err1
+ .previous
+ .endm
+
+ .macro err2
+200:
+ .section __ex_table,"a"
+ .align 3
+ .llong 200b,.Ldo_err2
+ .previous
+ .endm
+
+#ifdef CONFIG_ALTIVEC
+ .macro err3
+300:
+ .section __ex_table,"a"
+ .align 3
+ .llong 300b,.Ldo_err3
+ .previous
+ .endm
+
+ .macro err4
+400:
+ .section __ex_table,"a"
+ .align 3
+ .llong 400b,.Ldo_err4
+ .previous
+ .endm
+
+
+.Ldo_err4:
+ ld r16,STK_REG(r16)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r14,STK_REG(r14)(r1)
+.Ldo_err3:
+ bl .exit_vmx_copy
+ ld r0,STACKFRAMESIZE+16(r1)
+ mtlr r0
+ b .Lexit
+#endif /* CONFIG_ALTIVEC */
+
+.Ldo_err2:
+ ld r22,STK_REG(r22)(r1)
+ ld r21,STK_REG(r21)(r1)
+ ld r20,STK_REG(r20)(r1)
+ ld r19,STK_REG(r19)(r1)
+ ld r18,STK_REG(r18)(r1)
+ ld r17,STK_REG(r17)(r1)
+ ld r16,STK_REG(r16)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r14,STK_REG(r14)(r1)
+.Lexit:
+ addi r1,r1,STACKFRAMESIZE
+.Ldo_err1:
+ ld r3,48(r1)
+ ld r4,56(r1)
+ ld r5,64(r1)
+ b __copy_tofrom_user_base
+
+
+_GLOBAL(__copy_tofrom_user_power7)
+#ifdef CONFIG_ALTIVEC
+ cmpldi r5,16
+ cmpldi cr1,r5,4096
+
+ std r3,48(r1)
+ std r4,56(r1)
+ std r5,64(r1)
+
+ blt .Lshort_copy
+ bgt cr1,.Lvmx_copy
+#else
+ cmpldi r5,16
+
+ std r3,48(r1)
+ std r4,56(r1)
+ std r5,64(r1)
+
+ blt .Lshort_copy
+#endif
+
+.Lnonvmx_copy:
+ /* Get the source 8B aligned */
+ neg r6,r4
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-3)
+
+ bf cr7*4+3,1f
+err1; lbz r0,0(r4)
+ addi r4,r4,1
+err1; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err1; lhz r0,0(r4)
+ addi r4,r4,2
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err1; lwz r0,0(r4)
+ addi r4,r4,4
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+3: sub r5,r5,r6
+ cmpldi r5,128
+ blt 5f
+
+ mflr r0
+ stdu r1,-STACKFRAMESIZE(r1)
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+ std r17,STK_REG(r17)(r1)
+ std r18,STK_REG(r18)(r1)
+ std r19,STK_REG(r19)(r1)
+ std r20,STK_REG(r20)(r1)
+ std r21,STK_REG(r21)(r1)
+ std r22,STK_REG(r22)(r1)
+ std r0,STACKFRAMESIZE+16(r1)
+
+ srdi r6,r5,7
+ mtctr r6
+
+ /* Now do cacheline (128B) sized loads and stores. */
+ .align 5
+4:
+err2; ld r0,0(r4)
+err2; ld r6,8(r4)
+err2; ld r7,16(r4)
+err2; ld r8,24(r4)
+err2; ld r9,32(r4)
+err2; ld r10,40(r4)
+err2; ld r11,48(r4)
+err2; ld r12,56(r4)
+err2; ld r14,64(r4)
+err2; ld r15,72(r4)
+err2; ld r16,80(r4)
+err2; ld r17,88(r4)
+err2; ld r18,96(r4)
+err2; ld r19,104(r4)
+err2; ld r20,112(r4)
+err2; ld r21,120(r4)
+ addi r4,r4,128
+err2; std r0,0(r3)
+err2; std r6,8(r3)
+err2; std r7,16(r3)
+err2; std r8,24(r3)
+err2; std r9,32(r3)
+err2; std r10,40(r3)
+err2; std r11,48(r3)
+err2; std r12,56(r3)
+err2; std r14,64(r3)
+err2; std r15,72(r3)
+err2; std r16,80(r3)
+err2; std r17,88(r3)
+err2; std r18,96(r3)
+err2; std r19,104(r3)
+err2; std r20,112(r3)
+err2; std r21,120(r3)
+ addi r3,r3,128
+ bdnz 4b
+
+ clrldi r5,r5,(64-7)
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+ ld r17,STK_REG(r17)(r1)
+ ld r18,STK_REG(r18)(r1)
+ ld r19,STK_REG(r19)(r1)
+ ld r20,STK_REG(r20)(r1)
+ ld r21,STK_REG(r21)(r1)
+ ld r22,STK_REG(r22)(r1)
+ addi r1,r1,STACKFRAMESIZE
+
+ /* Up to 127B to go */
+5: srdi r6,r5,4
+ mtocrf 0x01,r6
+
+6: bf cr7*4+1,7f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+err1; ld r7,16(r4)
+err1; ld r8,24(r4)
+err1; ld r9,32(r4)
+err1; ld r10,40(r4)
+err1; ld r11,48(r4)
+err1; ld r12,56(r4)
+ addi r4,r4,64
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+err1; std r7,16(r3)
+err1; std r8,24(r3)
+err1; std r9,32(r3)
+err1; std r10,40(r3)
+err1; std r11,48(r3)
+err1; std r12,56(r3)
+ addi r3,r3,64
+
+ /* Up to 63B to go */
+7: bf cr7*4+2,8f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+err1; ld r7,16(r4)
+err1; ld r8,24(r4)
+ addi r4,r4,32
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+err1; std r7,16(r3)
+err1; std r8,24(r3)
+ addi r3,r3,32
+
+ /* Up to 31B to go */
+8: bf cr7*4+3,9f
+err1; ld r0,0(r4)
+err1; ld r6,8(r4)
+ addi r4,r4,16
+err1; std r0,0(r3)
+err1; std r6,8(r3)
+ addi r3,r3,16
+
+9: clrldi r5,r5,(64-4)
+
+ /* Up to 15B to go */
+.Lshort_copy:
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err1; lwz r6,4(r4)
+ addi r4,r4,8
+err1; stw r0,0(r3)
+err1; stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err1; lwz r0,0(r4)
+ addi r4,r4,4
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err1; lhz r0,0(r4)
+ addi r4,r4,2
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err1; lbz r0,0(r4)
+err1; stb r0,0(r3)
+
+15: li r3,0
+ blr
+
+.Lunwind_stack_nonvmx_copy:
+ addi r1,r1,STACKFRAMESIZE
+ b .Lnonvmx_copy
+
+#ifdef CONFIG_ALTIVEC
+.Lvmx_copy:
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ bl .enter_vmx_copy
+ cmpwi r3,0
+ ld r0,STACKFRAMESIZE+16(r1)
+ ld r3,STACKFRAMESIZE+48(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ ld r5,STACKFRAMESIZE+64(r1)
+ mtlr r0
+
+ beq .Lunwind_stack_nonvmx_copy
+
+ /*
+ * If source and destination are not relatively aligned we use a
+ * slower permute loop.
+ */
+ xor r6,r4,r3
+ rldicl. r6,r6,0,(64-4)
+ bne .Lvmx_unaligned_copy
+
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+err3; lbz r0,0(r4)
+ addi r4,r4,1
+err3; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+err3; ld r0,0(r4)
+ addi r4,r4,8
+err3; std r0,0(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ bf cr7*4+3,5f
+err3; lvx vr1,r0,r4
+ addi r4,r4,16
+err3; stvx vr1,r0,r3
+ addi r3,r3,16
+
+5: bf cr7*4+2,6f
+err3; lvx vr1,r0,r4
+err3; lvx vr0,r4,r9
+ addi r4,r4,32
+err3; stvx vr1,r0,r3
+err3; stvx vr0,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+err3; lvx vr3,r0,r4
+err3; lvx vr2,r4,r9
+err3; lvx vr1,r4,r10
+err3; lvx vr0,r4,r11
+ addi r4,r4,64
+err3; stvx vr3,r0,r3
+err3; stvx vr2,r3,r9
+err3; stvx vr1,r3,r10
+err3; stvx vr0,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+err4; lvx vr7,r0,r4
+err4; lvx vr6,r4,r9
+err4; lvx vr5,r4,r10
+err4; lvx vr4,r4,r11
+err4; lvx vr3,r4,r12
+err4; lvx vr2,r4,r14
+err4; lvx vr1,r4,r15
+err4; lvx vr0,r4,r16
+ addi r4,r4,128
+err4; stvx vr7,r0,r3
+err4; stvx vr6,r3,r9
+err4; stvx vr5,r3,r10
+err4; stvx vr4,r3,r11
+err4; stvx vr3,r3,r12
+err4; stvx vr2,r3,r14
+err4; stvx vr1,r3,r15
+err4; stvx vr0,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+err3; lvx vr3,r0,r4
+err3; lvx vr2,r4,r9
+err3; lvx vr1,r4,r10
+err3; lvx vr0,r4,r11
+ addi r4,r4,64
+err3; stvx vr3,r0,r3
+err3; stvx vr2,r3,r9
+err3; stvx vr1,r3,r10
+err3; stvx vr0,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+err3; lvx vr1,r0,r4
+err3; lvx vr0,r4,r9
+ addi r4,r4,32
+err3; stvx vr1,r0,r3
+err3; stvx vr0,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+err3; lvx vr1,r0,r4
+ addi r4,r4,16
+err3; stvx vr1,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err3; ld r0,0(r4)
+ addi r4,r4,8
+err3; std r0,0(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err3; lbz r0,0(r4)
+err3; stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ b .exit_vmx_copy /* tail call optimise */
+
+.Lvmx_unaligned_copy:
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+err3; lbz r0,0(r4)
+ addi r4,r4,1
+err3; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err3; lwz r7,4(r4)
+ addi r4,r4,8
+err3; stw r0,0(r3)
+err3; stw r7,4(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ lvsl vr16,0,r4 /* Setup permute control vector */
+err3; lvx vr0,0,r4
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+err3; stvx vr8,r0,r3
+ addi r3,r3,16
+ vor vr0,vr1,vr1
+
+5: bf cr7*4+2,6f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+err3; lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+err3; lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+err3; lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+err3; lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+err3; lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+err3; stvx vr10,r3,r10
+err3; stvx vr11,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(r14)(r1)
+ std r15,STK_REG(r15)(r1)
+ std r16,STK_REG(r16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+err4; lvx vr7,r0,r4
+ vperm vr8,vr0,vr7,vr16
+err4; lvx vr6,r4,r9
+ vperm vr9,vr7,vr6,vr16
+err4; lvx vr5,r4,r10
+ vperm vr10,vr6,vr5,vr16
+err4; lvx vr4,r4,r11
+ vperm vr11,vr5,vr4,vr16
+err4; lvx vr3,r4,r12
+ vperm vr12,vr4,vr3,vr16
+err4; lvx vr2,r4,r14
+ vperm vr13,vr3,vr2,vr16
+err4; lvx vr1,r4,r15
+ vperm vr14,vr2,vr1,vr16
+err4; lvx vr0,r4,r16
+ vperm vr15,vr1,vr0,vr16
+ addi r4,r4,128
+err4; stvx vr8,r0,r3
+err4; stvx vr9,r3,r9
+err4; stvx vr10,r3,r10
+err4; stvx vr11,r3,r11
+err4; stvx vr12,r3,r12
+err4; stvx vr13,r3,r14
+err4; stvx vr14,r3,r15
+err4; stvx vr15,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(r14)(r1)
+ ld r15,STK_REG(r15)(r1)
+ ld r16,STK_REG(r16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+err3; lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+err3; lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+err3; lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+err3; lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+err3; stvx vr10,r3,r10
+err3; stvx vr11,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+err3; lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+err3; stvx vr8,r0,r3
+err3; stvx vr9,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+err3; lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+err3; stvx vr8,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ addi r4,r4,-16 /* Unwind the +16 load offset */
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
+err3; lwz r6,4(r4)
+ addi r4,r4,8
+err3; stw r0,0(r3)
+err3; stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+err3; lwz r0,0(r4)
+ addi r4,r4,4
+err3; stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+err3; lhz r0,0(r4)
+ addi r4,r4,2
+err3; sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+err3; lbz r0,0(r4)
+err3; stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ b .exit_vmx_copy /* tail call optimise */
+#endif /* CONFiG_ALTIVEC */
diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/copyuser_power7_vmx.c
new file mode 100644
index 00000000000..6e1efadac48
--- /dev/null
+++ b/arch/powerpc/lib/copyuser_power7_vmx.c
@@ -0,0 +1,50 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2011
+ *
+ * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+
+int enter_vmx_copy(void)
+{
+ if (in_interrupt())
+ return 0;
+
+ /* This acts as preempt_disable() as well and will make
+ * enable_kernel_altivec(). We need to disable page faults
+ * as they can call schedule and thus make us lose the VMX
+ * context. So on page faults, we just fail which will cause
+ * a fallback to the normal non-vmx copy.
+ */
+ pagefault_disable();
+
+ enable_kernel_altivec();
+
+ return 1;
+}
+
+/*
+ * This function must return 0 because we tail call optimise when calling
+ * from __copy_tofrom_user_power7 which returns 0 on success.
+ */
+int exit_vmx_copy(void)
+{
+ pagefault_enable();
+ return 0;
+}
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index f60e006d90c..388b95e1a00 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -78,11 +78,7 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
"tlbwe %1,%3,%5\n"
"tlbwe %0,%3,%6\n"
:
-#ifdef CONFIG_PPC47x
- : "r" (PPC47x_TLB2_S_RWX),
-#else
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
-#endif
"r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (entry),
@@ -221,7 +217,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
{
u64 size;
-#ifndef CONFIG_RELOCATABLE
+#ifndef CONFIG_NONSTATIC_KERNEL
/* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
*/
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 991ee813d2a..3787b61f7d2 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \
mmu_context_hash$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_ICSWX) += icswx.o
+obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5efe8c96d37..2f0d1b032a8 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -44,6 +44,8 @@
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
+#include "icswx.h"
+
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -143,6 +145,21 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
+#ifdef CONFIG_PPC_ICSWX
+ /*
+ * we need to do this early because this "data storage
+ * interrupt" does not update the DAR/DEAR so we don't want to
+ * look at it
+ */
+ if (error_code & ICSWX_DSI_UCT) {
+ int ret;
+
+ ret = acop_handle_fault(regs, address, error_code);
+ if (ret)
+ return ret;
+ }
+#endif
+
if (notify_page_fault(regs))
return 0;
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 343ad0b8726..3bc700655fc 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -37,31 +37,32 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
return found;
}
-void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte)
+void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
+ pte_t pte)
{
unsigned long mas1, mas2;
u64 mas7_3;
unsigned long psize, tsize, shift;
unsigned long flags;
+ struct mm_struct *mm;
#ifdef CONFIG_PPC_FSL_BOOK3E
- int index, lz, ncams;
- struct vm_area_struct *vma;
+ int index, ncams;
#endif
if (unlikely(is_kernel_addr(ea)))
return;
+ mm = vma->vm_mm;
+
#ifdef CONFIG_PPC_MM_SLICES
- psize = mmu_get_tsize(get_slice_psize(mm, ea));
- tsize = mmu_get_psize(psize);
+ psize = get_slice_psize(mm, ea);
+ tsize = mmu_get_tsize(psize);
shift = mmu_psize_defs[psize].shift;
#else
- vma = find_vma(mm, ea);
- psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */
- asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize));
- shift = 31 - lz;
- tsize = 21 - lz;
+ psize = vma_mmu_pagesize(vma);
+ shift = __ilog2(psize);
+ tsize = shift - 10;
#endif
/*
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5964371303a..a8b3cc7d90f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -15,6 +15,7 @@
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/moduleparam.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -28,22 +29,22 @@ unsigned int HPAGE_SHIFT;
/*
* Tracks gpages after the device tree is scanned and before the
- * huge_boot_pages list is ready. On 64-bit implementations, this is
- * just used to track 16G pages and so is a single array. 32-bit
- * implementations may have more than one gpage size due to limitations
- * of the memory allocators, so we need multiple arrays
+ * huge_boot_pages list is ready. On non-Freescale implementations, this is
+ * just used to track 16G pages and so is a single array. FSL-based
+ * implementations may have more than one gpage size, so we need multiple
+ * arrays
*/
-#ifdef CONFIG_PPC64
-#define MAX_NUMBER_GPAGES 1024
-static u64 gpage_freearray[MAX_NUMBER_GPAGES];
-static unsigned nr_gpages;
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
#define MAX_NUMBER_GPAGES 128
struct psize_gpages {
u64 gpage_list[MAX_NUMBER_GPAGES];
unsigned int nr_gpages;
};
static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
+#else
+#define MAX_NUMBER_GPAGES 1024
+static u64 gpage_freearray[MAX_NUMBER_GPAGES];
+static unsigned nr_gpages;
#endif
static inline int shift_to_mmu_psize(unsigned int shift)
@@ -114,12 +115,12 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
struct kmem_cache *cachep;
pte_t *new;
-#ifdef CONFIG_PPC64
- cachep = PGT_CACHE(pdshift - pshift);
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
int i;
int num_hugepd = 1 << (pshift - pdshift);
cachep = hugepte_cache;
+#else
+ cachep = PGT_CACHE(pdshift - pshift);
#endif
new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
@@ -131,12 +132,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
return -ENOMEM;
spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_PPC64
- if (!hugepd_none(*hpdp))
- kmem_cache_free(cachep, new);
- else
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
-#else
+#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* We have multiple higher-level entries that point to the same
* actual pte location. Fill in each as we go and backtrack on error.
@@ -155,11 +151,28 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0;
kmem_cache_free(cachep, new);
}
+#else
+ if (!hugepd_none(*hpdp))
+ kmem_cache_free(cachep, new);
+ else
+ hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#endif
spin_unlock(&mm->page_table_lock);
return 0;
}
+/*
+ * These macros define how to determine which level of the page table holds
+ * the hpdp.
+ */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
+#define HUGEPD_PUD_SHIFT PUD_SHIFT
+#else
+#define HUGEPD_PGD_SHIFT PUD_SHIFT
+#define HUGEPD_PUD_SHIFT PMD_SHIFT
+#endif
+
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
pgd_t *pg;
@@ -172,12 +185,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
- if (pshift >= PUD_SHIFT) {
+
+ if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg;
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
- if (pshift >= PMD_SHIFT) {
+ if (pshift >= HUGEPD_PUD_SHIFT) {
hpdp = (hugepd_t *)pu;
} else {
pdshift = PMD_SHIFT;
@@ -197,7 +211,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(hpdp, addr, pdshift);
}
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
*/
@@ -317,7 +331,7 @@ void __init reserve_hugetlb_gpages(void)
}
}
-#else /* PPC64 */
+#else /* !PPC_FSL_BOOK3E */
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy or bootmem allocator is setup.
@@ -355,7 +369,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -415,11 +429,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
unsigned long pdmask = ~((1UL << pdshift) - 1);
unsigned int num_hugepd = 1;
-#ifdef CONFIG_PPC64
- unsigned int shift = hugepd_shift(*hpdp);
-#else
- /* Note: On 32-bit the hpdp may be the first of several */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ /* Note: On fsl the hpdp may be the first of several */
num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
+#else
+ unsigned int shift = hugepd_shift(*hpdp);
#endif
start &= pdmask;
@@ -437,10 +451,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
hpdp->pd = 0;
tlb->need_flush = 1;
-#ifdef CONFIG_PPC64
- pgtable_free_tlb(tlb, hugepte, pdshift - shift);
-#else
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
hugepd_free(tlb, hugepte);
+#else
+ pgtable_free_tlb(tlb, hugepte, pdshift - shift);
#endif
}
@@ -453,14 +468,23 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long start;
start = addr;
- pmd = pmd_offset(pud, addr);
do {
+ pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd))
continue;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ /*
+ * Increment next by the size of the huge mapping since
+ * there may be more than one entry at this level for a
+ * single hugepage, but all of them point to
+ * the same kmem cache that holds the hugepte.
+ */
+ next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+#endif
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling);
- } while (pmd++, addr = next, addr != end);
+ } while (addr = next, addr != end);
start &= PUD_MASK;
if (start < floor)
@@ -487,8 +511,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start;
start = addr;
- pud = pud_offset(pgd, addr);
do {
+ pud = pud_offset(pgd, addr);
next = pud_addr_end(addr, end);
if (!is_hugepd(pud)) {
if (pud_none_or_clear_bad(pud))
@@ -496,10 +520,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
} else {
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ /*
+ * Increment next by the size of the huge mapping since
+ * there may be more than one entry at this level for a
+ * single hugepage, but all of them point to
+ * the same kmem cache that holds the hugepte.
+ */
+ next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+#endif
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling);
}
- } while (pud++, addr = next, addr != end);
+ } while (addr = next, addr != end);
start &= PGDIR_MASK;
if (start < floor)
@@ -554,12 +587,12 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else {
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
- * on 32-bit there may be more than one entry at the pgd
- * level for a single hugepage, but all of them point to
- * the same kmem cache that holds the hugepte.
+ * there may be more than one entry at the pgd level
+ * for a single hugepage, but all of them point to the
+ * same kmem cache that holds the hugepte.
*/
next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
#endif
@@ -697,19 +730,17 @@ int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
return 1;
}
+#ifdef CONFIG_PPC_MM_SLICES
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
-#ifdef CONFIG_PPC_MM_SLICES
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
-#else
- return get_unmapped_area(file, addr, len, pgoff, flags);
-#endif
}
+#endif
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
@@ -783,7 +814,7 @@ static int __init hugepage_setup_sz(char *str)
}
__setup("hugepagesz=", hugepage_setup_sz);
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
new file mode 100644
index 00000000000..5d9a59eaad9
--- /dev/null
+++ b/arch/powerpc/mm/icswx.c
@@ -0,0 +1,273 @@
+/*
+ * ICSWX and ACOP Management
+ *
+ * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "icswx.h"
+
+/*
+ * The processor and its L2 cache cause the icswx instruction to
+ * generate a COP_REQ transaction on PowerBus. The transaction has no
+ * address, and the processor does not perform an MMU access to
+ * authenticate the transaction. The command portion of the PowerBus
+ * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
+ * Process ID (PID), which the coprocessor compares to the authorized
+ * LPID and PID held in the coprocessor, to determine if the process
+ * is authorized to generate the transaction. The data of the COP_REQ
+ * transaction is 128-byte or less in size and is placed in cacheable
+ * memory on a 128-byte cache line boundary.
+ *
+ * The task to use a coprocessor should use use_cop() to mark the use
+ * of the Coprocessor Type (CT) and context switching. On a server
+ * class processor, the PID register is used only for coprocessor
+ * management + * and so a coprocessor PID is allocated before
+ * executing icswx + * instruction. Drop_cop() is used to free the
+ * coprocessor PID.
+ *
+ * Example:
+ * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
+ * Each HFI have multiple windows. Each HFI window serves as a
+ * network device sending to and receiving from HFI network.
+ * HFI immediate send function uses icswx instruction. The immediate
+ * send function allows small (single cache-line) packets be sent
+ * without using the regular HFI send FIFO and doorbell, which are
+ * much slower than immediate send.
+ *
+ * For each task intending to use HFI immediate send, the HFI driver
+ * calls use_cop() to obtain a coprocessor PID for the task.
+ * The HFI driver then allocate a free HFI window and save the
+ * coprocessor PID to the HFI window to allow the task to use the
+ * HFI window.
+ *
+ * The HFI driver repeatedly creates immediate send packets and
+ * issues icswx instruction to send data through the HFI window.
+ * The HFI compares the coprocessor PID in the CPU PID register
+ * to the PID held in the HFI window to determine if the transaction
+ * is allowed.
+ *
+ * When the task to release the HFI window, the HFI driver calls
+ * drop_cop() to release the coprocessor PID.
+ */
+
+void switch_cop(struct mm_struct *next)
+{
+#ifdef CONFIG_ICSWX_PID
+ mtspr(SPRN_PID, next->context.cop_pid);
+#endif
+ mtspr(SPRN_ACOP, next->context.acop);
+}
+
+/**
+ * Start using a coprocessor.
+ * @acop: mask of coprocessor to be used.
+ * @mm: The mm the coprocessor to associate with. Most likely current mm.
+ *
+ * Return a positive PID if successful. Negative errno otherwise.
+ * The returned PID will be fed to the coprocessor to determine if an
+ * icswx transaction is authenticated.
+ */
+int use_cop(unsigned long acop, struct mm_struct *mm)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_ICSWX))
+ return -ENODEV;
+
+ if (!mm || !acop)
+ return -EINVAL;
+
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
+ spin_lock(mm->context.cop_lockp);
+
+ ret = get_cop_pid(mm);
+ if (ret < 0)
+ goto out;
+
+ /* update acop */
+ mm->context.acop |= acop;
+
+ sync_cop(mm);
+
+ /*
+ * If this is a threaded process then there might be other threads
+ * running. We need to send an IPI to force them to pick up any
+ * change in PID and ACOP.
+ */
+ if (atomic_read(&mm->mm_users) > 1)
+ smp_call_function(sync_cop, mm, 1);
+
+out:
+ spin_unlock(mm->context.cop_lockp);
+ spin_unlock(&mm->page_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(use_cop);
+
+/**
+ * Stop using a coprocessor.
+ * @acop: mask of coprocessor to be stopped.
+ * @mm: The mm the coprocessor associated with.
+ */
+void drop_cop(unsigned long acop, struct mm_struct *mm)
+{
+ int free_pid;
+
+ if (!cpu_has_feature(CPU_FTR_ICSWX))
+ return;
+
+ if (WARN_ON_ONCE(!mm))
+ return;
+
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
+ spin_lock(mm->context.cop_lockp);
+
+ mm->context.acop &= ~acop;
+
+ free_pid = disable_cop_pid(mm);
+ sync_cop(mm);
+
+ /*
+ * If this is a threaded process then there might be other threads
+ * running. We need to send an IPI to force them to pick up any
+ * change in PID and ACOP.
+ */
+ if (atomic_read(&mm->mm_users) > 1)
+ smp_call_function(sync_cop, mm, 1);
+
+ if (free_pid != COP_PID_NONE)
+ free_cop_pid(free_pid);
+
+ spin_unlock(mm->context.cop_lockp);
+ spin_unlock(&mm->page_table_lock);
+}
+EXPORT_SYMBOL_GPL(drop_cop);
+
+static int acop_use_cop(int ct)
+{
+ /* todo */
+ return -1;
+}
+
+/*
+ * Get the instruction word at the NIP
+ */
+static u32 acop_get_inst(struct pt_regs *regs)
+{
+ u32 inst;
+ u32 __user *p;
+
+ p = (u32 __user *)regs->nip;
+ if (!access_ok(VERIFY_READ, p, sizeof(*p)))
+ return 0;
+
+ if (__get_user(inst, p))
+ return 0;
+
+ return inst;
+}
+
+/**
+ * @regs: regsiters at time of interrupt
+ * @address: storage address
+ * @error_code: Fault code, usually the DSISR or ESR depending on
+ * processor type
+ *
+ * Return 0 if we are able to resolve the data storage fault that
+ * results from a CT miss in the ACOP register.
+ */
+int acop_handle_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
+{
+ int ct;
+ u32 inst = 0;
+
+ if (!cpu_has_feature(CPU_FTR_ICSWX)) {
+ pr_info("No coprocessors available");
+ _exception(SIGILL, regs, ILL_ILLOPN, address);
+ }
+
+ if (!user_mode(regs)) {
+ /* this could happen if the HV denies the
+ * kernel access, for now we just die */
+ die("ICSWX from kernel failed", regs, SIGSEGV);
+ }
+
+ /* Some implementations leave us a hint for the CT */
+ ct = ICSWX_GET_CT_HINT(error_code);
+ if (ct < 0) {
+ /* we have to peek at the instruction word to figure out CT */
+ u32 ccw;
+ u32 rs;
+
+ inst = acop_get_inst(regs);
+ if (inst == 0)
+ return -1;
+
+ rs = (inst >> (31 - 10)) & 0x1f;
+ ccw = regs->gpr[rs];
+ ct = (ccw >> 16) & 0x3f;
+ }
+
+ if (!acop_use_cop(ct))
+ return 0;
+
+ /* at this point the CT is unknown to the system */
+ pr_warn("%s[%d]: Coprocessor %d is unavailable",
+ current->comm, current->pid, ct);
+
+ /* get inst if we don't already have it */
+ if (inst == 0) {
+ inst = acop_get_inst(regs);
+ if (inst == 0)
+ return -1;
+ }
+
+ /* Check if the instruction is the "record form" */
+ if (inst & 1) {
+ /*
+ * the instruction is "record" form so we can reject
+ * using CR0
+ */
+ regs->ccr &= ~(0xful << 28);
+ regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
+
+ /* Move on to the next instruction */
+ regs->nip += 4;
+ } else {
+ /*
+ * There is no architected mechanism to report a bad
+ * CT so we could either SIGILL or report nothing.
+ * Since the non-record version should only bu used
+ * for "hints" or "don't care" we should probably do
+ * nothing. However, I could see how some people
+ * might want an SIGILL so it here if you want it.
+ */
+#ifdef CONFIG_PPC_ICSWX_USE_SIGILL
+ _exception(SIGILL, regs, ILL_ILLOPN, address);
+#else
+ regs->nip += 4;
+#endif
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acop_handle_fault);
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
new file mode 100644
index 00000000000..42176bd0884
--- /dev/null
+++ b/arch/powerpc/mm/icswx.h
@@ -0,0 +1,62 @@
+#ifndef _ARCH_POWERPC_MM_ICSWX_H_
+#define _ARCH_POWERPC_MM_ICSWX_H_
+
+/*
+ * ICSWX and ACOP Management
+ *
+ * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/mmu_context.h>
+
+/* also used to denote that PIDs are not used */
+#define COP_PID_NONE 0
+
+static inline void sync_cop(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (mm == current->active_mm)
+ switch_cop(current->active_mm);
+}
+
+#ifdef CONFIG_PPC_ICSWX_PID
+extern int get_cop_pid(struct mm_struct *mm);
+extern int disable_cop_pid(struct mm_struct *mm);
+extern void free_cop_pid(int free_pid);
+#else
+#define get_cop_pid(m) (COP_PID_NONE)
+#define disable_cop_pid(m) (COP_PID_NONE)
+#define free_cop_pid(p)
+#endif
+
+/*
+ * These are implementation bits for architected registers. If this
+ * ever becomes architecture the should be moved to reg.h et. al.
+ */
+/* UCT is the same bit for Server and Embedded */
+#define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */
+
+#ifdef CONFIG_PPC_BOOK3E
+/* Embedded implementation gives us no hints as to what the CT is */
+#define ICSWX_GET_CT_HINT(x) (-1)
+#else
+/* Server implementation contains the CT value in the DSISR */
+#define ICSWX_DSISR_CTMASK 0x00003f00
+#define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8)
+#endif
+
+#define ICSWX_RC_STARTED 0x8 /* The request has been started */
+#define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */
+#define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */
+#define ICSWX_RC_UNDEFINED 0x1 /* Reserved */
+
+extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/icswx_pid.c b/arch/powerpc/mm/icswx_pid.c
new file mode 100644
index 00000000000..91e30eb7d05
--- /dev/null
+++ b/arch/powerpc/mm/icswx_pid.c
@@ -0,0 +1,87 @@
+/*
+ * ICSWX and ACOP/PID Management
+ *
+ * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include "icswx.h"
+
+#define COP_PID_MIN (COP_PID_NONE + 1)
+#define COP_PID_MAX (0xFFFF)
+
+static DEFINE_SPINLOCK(mmu_context_acop_lock);
+static DEFINE_IDA(cop_ida);
+
+static int new_cop_pid(struct ida *ida, int min_id, int max_id,
+ spinlock_t *lock)
+{
+ int index;
+ int err;
+
+again:
+ if (!ida_pre_get(ida, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(lock);
+ err = ida_get_new_above(ida, min_id, &index);
+ spin_unlock(lock);
+
+ if (err == -EAGAIN)
+ goto again;
+ else if (err)
+ return err;
+
+ if (index > max_id) {
+ spin_lock(lock);
+ ida_remove(ida, index);
+ spin_unlock(lock);
+ return -ENOMEM;
+ }
+
+ return index;
+}
+
+int get_cop_pid(struct mm_struct *mm)
+{
+ int pid;
+
+ if (mm->context.cop_pid == COP_PID_NONE) {
+ pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
+ &mmu_context_acop_lock);
+ if (pid >= 0)
+ mm->context.cop_pid = pid;
+ }
+ return mm->context.cop_pid;
+}
+
+int disable_cop_pid(struct mm_struct *mm)
+{
+ int free_pid = COP_PID_NONE;
+
+ if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
+ free_pid = mm->context.cop_pid;
+ mm->context.cop_pid = COP_PID_NONE;
+ }
+ return free_pid;
+}
+
+void free_cop_pid(int free_pid)
+{
+ spin_lock(&mmu_context_acop_lock);
+ ida_remove(&cop_ida, free_pid);
+ spin_unlock(&mmu_context_acop_lock);
+}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 161cefde5c1..6157be2a704 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -65,6 +65,13 @@ phys_addr_t memstart_addr = (phys_addr_t)~0ull;
EXPORT_SYMBOL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL(kernstart_addr);
+
+#ifdef CONFIG_RELOCATABLE_PPC32
+/* Used in __va()/__pa() */
+long long virt_phys_offset;
+EXPORT_SYMBOL(virt_phys_offset);
+#endif
+
phys_addr_t lowmem_end_addr;
int boot_mapsize;
@@ -134,8 +141,7 @@ void __init MMU_init(void)
if (memblock.memory.cnt > 1) {
#ifndef CONFIG_WII
- memblock.memory.cnt = 1;
- memblock_analyze();
+ memblock_enforce_memory_limit(memblock.memory.regions[0].size);
printk(KERN_WARNING "Only using first contiguous memory region");
#else
wii_memory_fixups();
@@ -158,7 +164,6 @@ void __init MMU_init(void)
#ifndef CONFIG_HIGHMEM
total_memory = total_lowmem;
memblock_enforce_memory_limit(total_lowmem);
- memblock_analyze();
#endif /* CONFIG_HIGHMEM */
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2dd6bdd31fe..d974b79a306 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -51,6 +51,7 @@
#include <asm/vdso.h>
#include <asm/fixmap.h>
#include <asm/swiotlb.h>
+#include <asm/rtas.h>
#include "mmu_decl.h"
@@ -199,7 +200,7 @@ void __init do_init_bootmem(void)
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
- add_active_range(0, start_pfn, end_pfn);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
}
/* Add all physical memory to the bootmem map, mark each area
@@ -553,7 +554,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE)
if (is_vm_hugetlb_page(vma))
- book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
+ book3e_hugetlb_preload(vma, address, *ptep);
#endif
}
@@ -585,3 +586,23 @@ static int add_system_ram_resources(void)
return 0;
}
subsys_initcall(add_system_ram_resources);
+
+#ifdef CONFIG_STRICT_DEVMEM
+/*
+ * devmem_is_allowed(): check to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ if (page_is_rtas_user_buf(pfn))
+ return 1;
+ return 0;
+}
+#endif /* CONFIG_STRICT_DEVMEM */
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
index 5a783d8e8e8..67a42ed0d2f 100644
--- a/arch/powerpc/mm/mmap_64.c
+++ b/arch/powerpc/mm/mmap_64.c
@@ -53,14 +53,6 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-/*
- * Since get_random_int() returns the same value within a 1 jiffy window,
- * we will almost always get the same randomisation for the stack and mmap
- * region. This will mean the relative distance between stack and mmap will
- * be the same.
- *
- * To avoid this we can shift the randomness by 1 bit.
- */
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
@@ -68,11 +60,11 @@ static unsigned long mmap_rnd(void)
if (current->flags & PF_RANDOMIZE) {
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(22-PAGE_SHIFT)));
+ rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
else
- rnd = (long)(get_random_int() % (1<<(29-PAGE_SHIFT)));
+ rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
}
- return (rnd << PAGE_SHIFT) * 2;
+ return rnd << PAGE_SHIFT;
}
static inline unsigned long mmap_base(void)
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index ca988a3d5fb..40677aa0190 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,200 +24,7 @@
#include <asm/mmu_context.h>
-#ifdef CONFIG_PPC_ICSWX
-/*
- * The processor and its L2 cache cause the icswx instruction to
- * generate a COP_REQ transaction on PowerBus. The transaction has
- * no address, and the processor does not perform an MMU access
- * to authenticate the transaction. The command portion of the
- * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and
- * the coprocessor Process ID (PID), which the coprocessor compares
- * to the authorized LPID and PID held in the coprocessor, to determine
- * if the process is authorized to generate the transaction.
- * The data of the COP_REQ transaction is 128-byte or less and is
- * placed in cacheable memory on a 128-byte cache line boundary.
- *
- * The task to use a coprocessor should use use_cop() to allocate
- * a coprocessor PID before executing icswx instruction. use_cop()
- * also enables the coprocessor context switching. Drop_cop() is
- * used to free the coprocessor PID.
- *
- * Example:
- * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
- * Each HFI have multiple windows. Each HFI window serves as a
- * network device sending to and receiving from HFI network.
- * HFI immediate send function uses icswx instruction. The immediate
- * send function allows small (single cache-line) packets be sent
- * without using the regular HFI send FIFO and doorbell, which are
- * much slower than immediate send.
- *
- * For each task intending to use HFI immediate send, the HFI driver
- * calls use_cop() to obtain a coprocessor PID for the task.
- * The HFI driver then allocate a free HFI window and save the
- * coprocessor PID to the HFI window to allow the task to use the
- * HFI window.
- *
- * The HFI driver repeatedly creates immediate send packets and
- * issues icswx instruction to send data through the HFI window.
- * The HFI compares the coprocessor PID in the CPU PID register
- * to the PID held in the HFI window to determine if the transaction
- * is allowed.
- *
- * When the task to release the HFI window, the HFI driver calls
- * drop_cop() to release the coprocessor PID.
- */
-
-#define COP_PID_NONE 0
-#define COP_PID_MIN (COP_PID_NONE + 1)
-#define COP_PID_MAX (0xFFFF)
-
-static DEFINE_SPINLOCK(mmu_context_acop_lock);
-static DEFINE_IDA(cop_ida);
-
-void switch_cop(struct mm_struct *next)
-{
- mtspr(SPRN_PID, next->context.cop_pid);
- mtspr(SPRN_ACOP, next->context.acop);
-}
-
-static int new_cop_pid(struct ida *ida, int min_id, int max_id,
- spinlock_t *lock)
-{
- int index;
- int err;
-
-again:
- if (!ida_pre_get(ida, GFP_KERNEL))
- return -ENOMEM;
-
- spin_lock(lock);
- err = ida_get_new_above(ida, min_id, &index);
- spin_unlock(lock);
-
- if (err == -EAGAIN)
- goto again;
- else if (err)
- return err;
-
- if (index > max_id) {
- spin_lock(lock);
- ida_remove(ida, index);
- spin_unlock(lock);
- return -ENOMEM;
- }
-
- return index;
-}
-
-static void sync_cop(void *arg)
-{
- struct mm_struct *mm = arg;
-
- if (mm == current->active_mm)
- switch_cop(current->active_mm);
-}
-
-/**
- * Start using a coprocessor.
- * @acop: mask of coprocessor to be used.
- * @mm: The mm the coprocessor to associate with. Most likely current mm.
- *
- * Return a positive PID if successful. Negative errno otherwise.
- * The returned PID will be fed to the coprocessor to determine if an
- * icswx transaction is authenticated.
- */
-int use_cop(unsigned long acop, struct mm_struct *mm)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_ICSWX))
- return -ENODEV;
-
- if (!mm || !acop)
- return -EINVAL;
-
- /* The page_table_lock ensures mm_users won't change under us */
- spin_lock(&mm->page_table_lock);
- spin_lock(mm->context.cop_lockp);
-
- if (mm->context.cop_pid == COP_PID_NONE) {
- ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
- &mmu_context_acop_lock);
- if (ret < 0)
- goto out;
-
- mm->context.cop_pid = ret;
- }
- mm->context.acop |= acop;
-
- sync_cop(mm);
-
- /*
- * If this is a threaded process then there might be other threads
- * running. We need to send an IPI to force them to pick up any
- * change in PID and ACOP.
- */
- if (atomic_read(&mm->mm_users) > 1)
- smp_call_function(sync_cop, mm, 1);
-
- ret = mm->context.cop_pid;
-
-out:
- spin_unlock(mm->context.cop_lockp);
- spin_unlock(&mm->page_table_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(use_cop);
-
-/**
- * Stop using a coprocessor.
- * @acop: mask of coprocessor to be stopped.
- * @mm: The mm the coprocessor associated with.
- */
-void drop_cop(unsigned long acop, struct mm_struct *mm)
-{
- int free_pid = COP_PID_NONE;
-
- if (!cpu_has_feature(CPU_FTR_ICSWX))
- return;
-
- if (WARN_ON_ONCE(!mm))
- return;
-
- /* The page_table_lock ensures mm_users won't change under us */
- spin_lock(&mm->page_table_lock);
- spin_lock(mm->context.cop_lockp);
-
- mm->context.acop &= ~acop;
-
- if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
- free_pid = mm->context.cop_pid;
- mm->context.cop_pid = COP_PID_NONE;
- }
-
- sync_cop(mm);
-
- /*
- * If this is a threaded process then there might be other threads
- * running. We need to send an IPI to force them to pick up any
- * change in PID and ACOP.
- */
- if (atomic_read(&mm->mm_users) > 1)
- smp_call_function(sync_cop, mm, 1);
-
- if (free_pid != COP_PID_NONE) {
- spin_lock(&mmu_context_acop_lock);
- ida_remove(&cop_ida, free_pid);
- spin_unlock(&mmu_context_acop_lock);
- }
-
- spin_unlock(mm->context.cop_lockp);
- spin_unlock(&mm->page_table_lock);
-}
-EXPORT_SYMBOL_GPL(drop_cop);
-
-#endif /* CONFIG_PPC_ICSWX */
+#include "icswx.h"
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b22a83a91cb..4ff3d8e411a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
}
/*
- * get_active_region_work_fn - A helper function for get_node_active_region
- * Returns datax set to the start_pfn and end_pfn if they contain
- * the initial value of datax->start_pfn between them
- * @start_pfn: start page(inclusive) of region to check
- * @end_pfn: end page(exclusive) of region to check
- * @datax: comes in with ->start_pfn set to value to search for and
- * goes out with active range if it contains it
- * Returns 1 if search value is in range else 0
- */
-static int __init get_active_region_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct node_active_region *data;
- data = (struct node_active_region *)datax;
-
- if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
- data->start_pfn = start_pfn;
- data->end_pfn = end_pfn;
- return 1;
- }
- return 0;
-
-}
-
-/*
- * get_node_active_region - Return active region containing start_pfn
+ * get_node_active_region - Return active region containing pfn
* Active range returned is empty if none found.
- * @start_pfn: The page to return the region for.
- * @node_ar: Returned set to the active region containing start_pfn
+ * @pfn: The page to return the region for
+ * @node_ar: Returned set to the active region containing @pfn
*/
-static void __init get_node_active_region(unsigned long start_pfn,
- struct node_active_region *node_ar)
+static void __init get_node_active_region(unsigned long pfn,
+ struct node_active_region *node_ar)
{
- int nid = early_pfn_to_nid(start_pfn);
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
- node_ar->nid = nid;
- node_ar->start_pfn = start_pfn;
- node_ar->end_pfn = start_pfn;
- work_with_active_regions(nid, get_active_region_work_fn, node_ar);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ if (pfn >= start_pfn && pfn < end_pfn) {
+ node_ar->nid = nid;
+ node_ar->start_pfn = start_pfn;
+ node_ar->end_pfn = end_pfn;
+ break;
+ }
+ }
}
static void map_cpu_to_node(int cpu, int node)
@@ -406,7 +386,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
of_node_put(memory);
}
-static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
+static unsigned long read_n_cells(int n, const unsigned int **buf)
{
unsigned long result = 0;
@@ -521,7 +501,7 @@ static int of_get_assoc_arrays(struct device_node *memory,
aa->n_arrays = *prop++;
aa->array_sz = *prop++;
- /* Now that we know the number of arrrays and size of each array,
+ /* Now that we know the number of arrays and size of each array,
* revalidate the size of the property read in.
*/
if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
@@ -710,9 +690,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
- add_active_range(nid, base >> PAGE_SHIFT,
- (base >> PAGE_SHIFT)
- + (sz >> PAGE_SHIFT));
+ memblock_set_node(base, sz, nid);
} while (--ranges);
}
}
@@ -802,8 +780,7 @@ new_range:
continue;
}
- add_active_range(nid, start >> PAGE_SHIFT,
- (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ memblock_set_node(start, size, nid);
if (--ranges)
goto new_range;
@@ -839,7 +816,8 @@ static void __init setup_nonnuma(void)
end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
node_set_online(nid);
}
}
@@ -969,7 +947,7 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
.priority = 1 /* Must run before sched domains notifier. */
};
-static void mark_reserved_regions_for_nid(int nid)
+static void __init mark_reserved_regions_for_nid(int nid)
{
struct pglist_data *node = NODE_DATA(nid);
struct memblock_region *reg;
@@ -1462,7 +1440,7 @@ int arch_update_cpu_topology(void)
{
int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
- struct sys_device *sysdev;
+ struct device *dev;
for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
@@ -1483,9 +1461,9 @@ int arch_update_cpu_topology(void)
register_cpu_under_node(cpu, nid);
put_online_cpus();
- sysdev = get_cpu_sysdev(cpu);
- if (sysdev)
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return 1;
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index dc4a5f385e4..ff672bd8fea 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -94,11 +94,11 @@
srdi r15,r16,60 /* get region */
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
- bne- dtlb_miss_fault_bolted
+ bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */
rlwinm r10,r11,32-19,27,27
rlwimi r10,r11,32-16,19,19
- cmpwi r15,0
+ cmpwi r15,0 /* user vs kernel check */
ori r10,r10,_PAGE_PRESENT
oris r11,r10,_PAGE_ACCESSED@h
@@ -120,44 +120,38 @@ tlb_miss_common_bolted:
rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
cmpldi cr0,r14,0
clrrdi r15,r15,3
- beq tlb_miss_fault_bolted
+ beq tlb_miss_fault_bolted /* No PGDIR, bail */
BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,r16)
- ldx r14,r14,r15
- beq normal_tlb_miss_done
+ ldx r14,r14,r15 /* grab pgd entry */
+ beq normal_tlb_miss_done /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE
- ldx r14,r14,r15
+ ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
#ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
-
- cmpldi cr0,r14,0
- beq tlb_miss_fault_bolted
-
- ldx r14,r14,r15
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
+ ldx r14,r14,r15 /* grab pud entry */
#endif /* CONFIG_PPC_64K_PAGES */
rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
clrrdi r15,r15,3
-
- cmpldi cr0,r14,0
- beq tlb_miss_fault_bolted
-
- ldx r14,r14,r15
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_bolted
+ ldx r14,r14,r15 /* Grab pmd entry */
rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
clrrdi r15,r15,3
-
- cmpldi cr0,r14,0
- beq tlb_miss_fault_bolted
-
- ldx r14,r14,r15
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_bolted
+ ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */
/* Check if required permissions are met */
andc. r15,r11,r14
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 4e13d6f9023..df32a838dcf 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -52,7 +52,7 @@
* indirect page table entries.
*/
#ifdef CONFIG_PPC_BOOK3E_MMU
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
@@ -615,7 +615,6 @@ static void __early_init_mmu(int boot_cpu)
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
- memblock_analyze();
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 153022971da..a392d12dd21 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -1,19 +1,3 @@
-#config BUBINGA
-# bool "Bubinga"
-# depends on 40x
-# default n
-# select 405EP
-# help
-# This option enables support for the IBM 405EP evaluation board.
-
-#config CPCI405
-# bool "CPCI405"
-# depends on 40x
-# default n
-# select 405GP
-# help
-# This option enables support for the CPCI405 board.
-
config ACADIA
bool "Acadia"
depends on 40x
@@ -65,14 +49,6 @@ config MAKALU
help
This option enables support for the AMCC PPC405EX board.
-#config SYCAMORE
-# bool "Sycamore"
-# depends on 40x
-# default n
-# select 405GPR
-# help
-# This option enables support for the IBM PPC405GPr evaluation board.
-
config WALNUT
bool "Walnut"
depends on 40x
@@ -100,6 +76,16 @@ config XILINX_VIRTEX_GENERIC_BOARD
Most Virtex designs should use this unless it needs to do some
special configuration at board probe time.
+config OBS600
+ bool "OpenBlockS 600"
+ depends on 40x
+ default n
+ select 405EX
+ select PPC40x_SIMPLE
+ help
+ This option enables support for PlatHome OpenBlockS 600 server
+
+
config PPC40x_SIMPLE
bool "Simple PowerPC 40x board support"
depends on 40x
@@ -173,16 +159,11 @@ config IBM405_ERR77
config IBM405_ERR51
bool
-#config BIOS_FIXUP
-# bool
-# depends on BUBINGA || EP405 || SYCAMORE || WALNUT
-# default y
-
-#config PPC4xx_DMA
-# bool "PPC4xx DMA controller support"
-# depends on 4xx
-
-#config PPC4xx_EDMA
-# bool
-# depends on !STB03xxx && PPC4xx_DMA
-# default y
+config APM8018X
+ bool "APM8018X"
+ depends on 40x
+ default n
+ select PPC40x_SIMPLE
+ help
+ This option enables support for the AppliedMicro APM8018X evaluation
+ board.
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index e8dd5c5df7d..97612068fae 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -55,7 +55,9 @@ static const char *board[] __initdata = {
"amcc,haleakala",
"amcc,kilauea",
"amcc,makalu",
- "est,hotfoot"
+ "apm,klondike",
+ "est,hotfoot",
+ "plathome,obs600"
};
static int __init ppc40x_probe(void)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 762322ce24a..fcf6bf2ceee 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -75,7 +75,7 @@ config KATMAI
select PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PCC4xx_MSI
+ select PPC4xx_MSI
help
This option enables support for the AMCC PPC440SPe evaluation board.
@@ -186,6 +186,16 @@ config ISS4xx
help
This option enables support for the IBM ISS simulation environment
+config CURRITUCK
+ bool "IBM Currituck (476fpe) Support"
+ depends on PPC_47x
+ default n
+ select SWIOTLB
+ select 476FPE
+ select PPC4xx_PCI_EXPRESS
+ help
+ This option enables support for the IBM Currituck (476fpe) evaluation board
+
config ICON
bool "Icon"
depends on 44x
@@ -197,22 +207,6 @@ config ICON
help
This option enables support for the AMCC PPC440SPe evaluation board.
-#config LUAN
-# bool "Luan"
-# depends on 44x
-# default n
-# select 440SP
-# help
-# This option enables support for the IBM PPC440SP evaluation board.
-
-#config OCOTEA
-# bool "Ocotea"
-# depends on 44x
-# default n
-# select 440GX
-# help
-# This option enables support for the IBM PPC440GX evaluation board.
-
config XILINX_VIRTEX440_GENERIC_BOARD
bool "Generic Xilinx Virtex 5 FXT board support"
depends on 44x
@@ -308,6 +302,10 @@ config 460SX
select IBM_EMAC_ZMII
select IBM_EMAC_TAH
+config 476FPE
+ bool
+ select PPC_FPU
+
config APM821xx
bool
select PPC_FPU
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 553db600721..d03833abec0 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o
obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
+obj-$(CONFIG_CURRITUCK) += currituck.o
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
new file mode 100644
index 00000000000..3f6229b5dee
--- /dev/null
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -0,0 +1,204 @@
+/*
+ * Currituck board specific routines
+ *
+ * Copyright Š 2011 Tony Breeds IBM Corporation
+ *
+ * Based on earlier code:
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003-2005 Zultys Technologies
+ *
+ * Rewritten and ported to the merged powerpc tree:
+ * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ * Copyright Š 2011 David Kliekamp IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+#include <linux/pci.h>
+
+static __initdata struct of_device_id ppc47x_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,plb6", },
+ { .compatible = "ibm,opb", },
+ { .compatible = "ibm,ebc", },
+ {},
+};
+
+/* The EEPROM is missing and the default values are bogus. This forces USB in
+ * to EHCI mode */
+static void __devinit quirk_ppc_currituck_usb_fixup(struct pci_dev *dev)
+{
+ if (of_machine_is_compatible("ibm,currituck")) {
+ pci_write_config_dword(dev, 0xe0, 0x0114231f);
+ pci_write_config_dword(dev, 0xe4, 0x00006c40);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
+
+static int __init ppc47x_device_probe(void)
+{
+ of_platform_bus_probe(NULL, ppc47x_of_bus, NULL);
+
+ return 0;
+}
+machine_device_initcall(ppc47x, ppc47x_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init ppc47x_init_irq(void)
+{
+ struct device_node *np;
+
+ /* Find top level interrupt controller */
+ for_each_node_with_property(np, "interrupt-controller") {
+ if (of_get_property(np, "interrupts", NULL) == NULL)
+ break;
+ }
+ if (np == NULL)
+ panic("Can't find top level interrupt controller");
+
+ /* Check type and do appropriate initialization */
+ if (of_device_is_compatible(np, "chrp,open-pic")) {
+ /* The MPIC driver will get everything it needs from the
+ * device-tree, just pass 0 to all arguments
+ */
+ struct mpic *mpic =
+ mpic_alloc(np, 0, 0, 0, 0, " MPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+ ppc_md.get_irq = mpic_get_irq;
+ } else
+ panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_ppc47x_setup_cpu(int cpu)
+{
+ mpic_setup_this_cpu();
+}
+
+static int __cpuinit smp_ppc47x_kick_cpu(int cpu)
+{
+ struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+ const u64 *spin_table_addr_prop;
+ u32 *spin_table;
+ extern void start_secondary_47x(void);
+
+ BUG_ON(cpunode == NULL);
+
+ /* Assume spin table. We could test for the enable-method in
+ * the device-tree but currently there's little point as it's
+ * our only supported method
+ */
+ spin_table_addr_prop =
+ of_get_property(cpunode, "cpu-release-addr", NULL);
+
+ if (spin_table_addr_prop == NULL) {
+ pr_err("CPU%d: Can't start, missing cpu-release-addr !\n",
+ cpu);
+ return 1;
+ }
+
+ /* Assume it's mapped as part of the linear mapping. This is a bit
+ * fishy but will work fine for now
+ *
+ * XXX: Is there any reason to assume differently?
+ */
+ spin_table = (u32 *)__va(*spin_table_addr_prop);
+ pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+ spin_table[3] = cpu;
+ smp_wmb();
+ spin_table[1] = __pa(start_secondary_47x);
+ mb();
+
+ return 0;
+}
+
+static struct smp_ops_t ppc47x_smp_ops = {
+ .probe = smp_mpic_probe,
+ .message_pass = smp_mpic_message_pass,
+ .setup_cpu = smp_ppc47x_setup_cpu,
+ .kick_cpu = smp_ppc47x_kick_cpu,
+ .give_timebase = smp_generic_give_timebase,
+ .take_timebase = smp_generic_take_timebase,
+};
+
+static void __init ppc47x_smp_init(void)
+{
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ smp_ops = &ppc47x_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init ppc47x_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init ppc47x_setup_arch(void)
+{
+
+ /* No need to check the DMA config as we /know/ our windows are all of
+ * RAM. Lets hope that doesn't change */
+#ifdef CONFIG_SWIOTLB
+ if (memblock_end_of_DRAM() > 0xffffffff) {
+ ppc_swiotlb_enable = 1;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ }
+#endif
+ ppc47x_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init ppc47x_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,currituck"))
+ return 0;
+
+ return 1;
+}
+
+/* Use USB controller should have been hardware swizzled but it wasn't :( */
+static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
+{
+ if (dev->vendor == 0x1033 && (dev->device == 0x0035 ||
+ dev->device == 0x00e0)) {
+ dev->irq = irq_create_mapping(NULL, 47);
+ pr_info("%s: Mapping irq 47 %d\n", __func__, dev->irq);
+ }
+}
+
+define_machine(ppc47x) {
+ .name = "PowerPC 47x",
+ .probe = ppc47x_probe,
+ .progress = udbg_progress,
+ .init_IRQ = ppc47x_init_irq,
+ .setup_arch = ppc47x_setup_arch,
+ .pci_irq_fixup = ppc47x_pci_irq_fixup,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index 19395f18b1d..5b8cdbb82f8 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -71,7 +71,7 @@ static void __init iss4xx_init_irq(void)
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
- struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+ struct mpic *mpic = mpic_alloc(np, 0, 0, 0, 0,
" MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index b3ebce1aec0..c16999802ec 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -12,7 +12,6 @@ config MPC5121_ADS
bool "Freescale MPC5121E ADS"
depends on PPC_MPC512x
select DEFAULT_UIMAGE
- select MPC5121_ADS_CPLD
help
This option enables support for the MPC5121E ADS board.
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index aa0d84d2258..464ea8e0292 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -36,38 +36,7 @@ static void __init asp834x_setup_arch(void)
mpc834x_usb_cfg();
}
-static void __init asp834x_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- of_node_put(np);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
-
-static struct __initdata of_device_id asp8347_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init asp8347_declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, asp8347_ids, NULL);
- return 0;
-}
-machine_device_initcall(asp834x, asp8347_declare_of_platform_devices);
+machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -82,7 +51,7 @@ define_machine(asp834x) {
.name = "ASP8347E",
.probe = asp834x_probe,
.setup_arch = asp834x_setup_arch,
- .init_IRQ = asp834x_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index c55129f5760..65eb792a0d0 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -51,15 +51,14 @@
*/
static void __init mpc83xx_km_setup_arch(void)
{
+#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
+#endif
if (ppc_md.progress)
ppc_md.progress("kmpbec83xx_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
@@ -122,54 +121,7 @@ static void __init mpc83xx_km_setup_arch(void)
#endif /* CONFIG_QUICC_ENGINE */
}
-static struct of_device_id kmpbec83xx_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .type = "qe", },
- { .compatible = "fsl,qe", },
- {},
-};
-
-static int __init kmeter_declare_of_platform_devices(void)
-{
- /* Publish the QE devices */
- of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices);
-
-static void __init mpc83xx_km_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,pq2pro-pic");
- if (!np) {
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
- }
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
- of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return;
- }
- qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
- of_node_put(np);
-#endif /* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices);
/* list of the supported boards */
static char *board[] __initdata = {
@@ -198,7 +150,7 @@ define_machine(mpc83xx_km) {
.name = "mpc83xx-km-platform",
.probe = mpc83xx_km_probe,
.setup_arch = mpc83xx_km_setup_arch,
- .init_IRQ = mpc83xx_km_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index f01806c940e..125336f750c 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -11,10 +11,15 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
+#include <asm/ipic.h>
+#include <asm/qe_ic.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
#include "mpc83xx.h"
@@ -65,3 +70,75 @@ long __init mpc83xx_time_init(void)
return 0;
}
+
+void __init mpc83xx_ipic_init_IRQ(void)
+{
+ struct device_node *np;
+
+ /* looking for fsl,pq2pro-pic which is asl compatible with fsl,ipic */
+ np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
+ if (!np)
+ np = of_find_node_by_type(NULL, "ipic");
+ if (!np)
+ return;
+
+ ipic_init(np, 0);
+
+ of_node_put(np);
+
+ /* Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ ipic_set_default_priority();
+}
+
+#ifdef CONFIG_QUICC_ENGINE
+void __init mpc83xx_qe_init_IRQ(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (!np) {
+ np = of_find_node_by_type(NULL, "qeic");
+ if (!np)
+ return;
+ }
+ qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
+ of_node_put(np);
+}
+
+void __init mpc83xx_ipic_and_qe_init_IRQ(void)
+{
+ mpc83xx_ipic_init_IRQ();
+ mpc83xx_qe_init_IRQ();
+}
+#endif /* CONFIG_QUICC_ENGINE */
+
+static struct of_device_id __initdata of_bus_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .compatible = "simple-bus" },
+ { .compatible = "gianfar" },
+ { .compatible = "gpio-leds", },
+ { .type = "qe", },
+ { .compatible = "fsl,qe", },
+ {},
+};
+
+int __init mpc83xx_declare_of_platform_devices(void)
+{
+ of_platform_bus_probe(NULL, of_bus_ids, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PCI
+void __init mpc83xx_setup_pci(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
+ mpc83xx_add_bridge(np);
+ for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
+ mpc83xx_add_bridge(np);
+}
+#endif
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index d0c4e15b779..4f2d9fea77b 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -27,36 +27,13 @@
*/
static void __init mpc830x_rdb_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8308-pcie")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc831x_usb_cfg();
}
-static void __init mpc830x_rdb_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
-
static const char *board[] __initdata = {
"MPC8308RDB",
"fsl,mpc8308rdb",
@@ -72,24 +49,13 @@ static int __init mpc830x_rdb_probe(void)
return of_flat_dt_match(of_get_flat_dt_root(), board);
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .compatible = "simple-bus" },
- { .compatible = "gianfar" },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
- return 0;
-}
-machine_device_initcall(mpc830x_rdb, declare_of_platform_devices);
+machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
define_machine(mpc830x_rdb) {
.name = "MPC830x RDB",
.probe = mpc830x_rdb_probe,
.setup_arch = mpc830x_rdb_setup_arch,
- .init_IRQ = mpc830x_rdb_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index f859ead49a8..fa25977c52d 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -28,38 +28,13 @@
*/
static void __init mpc831x_rdb_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
- for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc831x_usb_cfg();
}
-static void __init mpc831x_rdb_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
-
static const char *board[] __initdata = {
"MPC8313ERDB",
"fsl,mpc8315erdb",
@@ -74,25 +49,13 @@ static int __init mpc831x_rdb_probe(void)
return of_flat_dt_match(of_get_flat_dt_root(), board);
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .compatible = "simple-bus" },
- { .compatible = "gianfar" },
- { .compatible = "gpio-leds", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
- return 0;
-}
-machine_device_initcall(mpc831x_rdb, declare_of_platform_devices);
+machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices);
define_machine(mpc831x_rdb) {
.name = "MPC831x RDB",
.probe = mpc831x_rdb_probe,
.setup_arch = mpc831x_rdb_setup_arch,
- .init_IRQ = mpc831x_rdb_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 32a52896822..e36bc611dd6 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -72,10 +72,7 @@ static void __init mpc832x_sys_setup_arch(void)
of_node_put(np);
}
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
@@ -101,51 +98,7 @@ static void __init mpc832x_sys_setup_arch(void)
#endif /* CONFIG_QUICC_ENGINE */
}
-static struct of_device_id mpc832x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .type = "qe", },
- { .compatible = "fsl,qe", },
- {},
-};
-
-static int __init mpc832x_declare_of_platform_devices(void)
-{
- /* Publish the QE devices */
- of_platform_bus_probe(NULL, mpc832x_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc832x_mds, mpc832x_declare_of_platform_devices);
-
-static void __init mpc832x_sys_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
- of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return;
- }
- qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
- of_node_put(np);
-#endif /* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -161,7 +114,7 @@ define_machine(mpc832x_mds) {
.name = "MPC832x MDS",
.probe = mpc832x_sys_probe,
.setup_arch = mpc832x_sys_setup_arch,
- .init_IRQ = mpc832x_sys_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 17f99745f0e..eff5baabc3f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -193,17 +193,14 @@ machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
*/
static void __init mpc832x_rdb_setup_arch(void)
{
-#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE)
+#if defined(CONFIG_QUICC_ENGINE)
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
@@ -218,52 +215,7 @@ static void __init mpc832x_rdb_setup_arch(void)
#endif /* CONFIG_QUICC_ENGINE */
}
-static struct of_device_id mpc832x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .type = "qe", },
- { .compatible = "fsl,qe", },
- {},
-};
-
-static int __init mpc832x_declare_of_platform_devices(void)
-{
- /* Publish the QE devices */
- of_platform_bus_probe(NULL, mpc832x_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
-
-static void __init mpc832x_rdb_init_IRQ(void)
-{
-
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
- of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return;
- }
- qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
- of_node_put(np);
-#endif /* CONFIG_QUICC_ENGINE */
-}
+machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -279,7 +231,7 @@ define_machine(mpc832x_rdb) {
.name = "MPC832x RDB",
.probe = mpc832x_rdb_probe,
.setup_arch = mpc832x_rdb_setup_arch,
- .init_IRQ = mpc832x_rdb_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 6b45969567d..39849dd1b5b 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -41,13 +41,12 @@
static struct of_device_id __initdata mpc834x_itx_ids[] = {
{ .compatible = "fsl,pq2pro-localbus", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
{},
};
static int __init mpc834x_itx_declare_of_platform_devices(void)
{
+ mpc83xx_declare_of_platform_devices();
return of_platform_bus_probe(NULL, mpc834x_itx_ids, NULL);
}
machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
@@ -59,37 +58,14 @@ machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
*/
static void __init mpc834x_itx_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc834x_itx_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc834x_usb_cfg();
}
-static void __init mpc834x_itx_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
-
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
@@ -104,7 +80,7 @@ define_machine(mpc834x_itx) {
.name = "MPC834x ITX",
.probe = mpc834x_itx_probe,
.setup_arch = mpc834x_itx_setup_arch,
- .init_IRQ = mpc834x_itx_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 041c5177e73..5828d8e97c3 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -77,51 +77,15 @@ static int mpc834xemds_usb_cfg(void)
*/
static void __init mpc834x_mds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc834x_mds_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc834xemds_usb_cfg();
}
-static void __init mpc834x_mds_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
-
-static struct of_device_id mpc834x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init mpc834x_declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, mpc834x_ids, NULL);
- return 0;
-}
-machine_device_initcall(mpc834x_mds, mpc834x_declare_of_platform_devices);
+machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -137,7 +101,7 @@ define_machine(mpc834x_mds) {
.name = "MPC834x MDS",
.probe = mpc834x_mds_probe,
.setup_arch = mpc834x_mds_setup_arch,
- .init_IRQ = mpc834x_mds_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 934cc8c46bb..ad8e4bcd7d5 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -80,10 +80,7 @@ static void __init mpc836x_mds_setup_arch(void)
of_node_put(np);
}
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
@@ -144,23 +141,7 @@ static void __init mpc836x_mds_setup_arch(void)
#endif /* CONFIG_QUICC_ENGINE */
}
-static struct of_device_id mpc836x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .type = "qe", },
- { .compatible = "fsl,qe", },
- {},
-};
-
-static int __init mpc836x_declare_of_platform_devices(void)
-{
- /* Publish the QE devices */
- of_platform_bus_probe(NULL, mpc836x_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
+machine_device_initcall(mpc836x_mds, mpc83xx_declare_of_platform_devices);
#ifdef CONFIG_QE_USB
static int __init mpc836x_usb_cfg(void)
@@ -226,34 +207,6 @@ err:
machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
#endif /* CONFIG_QE_USB */
-static void __init mpc836x_mds_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
- of_node_put(np);
-
-#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np) {
- np = of_find_node_by_type(NULL, "qeic");
- if (!np)
- return;
- }
- qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
- of_node_put(np);
-#endif /* CONFIG_QUICC_ENGINE */
-}
-
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
@@ -268,7 +221,7 @@ define_machine(mpc836x_mds) {
.name = "MPC836x MDS",
.probe = mpc836x_mds_probe,
.setup_arch = mpc836x_mds_setup_arch,
- .init_IRQ = mpc836x_mds_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index b0090aac964..f8769d713d6 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -27,61 +27,19 @@
#include "mpc83xx.h"
-static struct of_device_id __initdata mpc836x_rdk_ids[] = {
- { .compatible = "simple-bus", },
- {},
-};
-
-static int __init mpc836x_rdk_declare_of_platform_devices(void)
-{
- return of_platform_bus_probe(NULL, mpc836x_rdk_ids, NULL);
-}
-machine_device_initcall(mpc836x_rdk, mpc836x_rdk_declare_of_platform_devices);
+machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices);
static void __init mpc836x_rdk_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
#endif
}
-static void __init mpc836x_rdk_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /*
- * Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
- of_node_put(np);
-#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
- if (!np)
- return;
-
- qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
- of_node_put(np);
-#endif
-}
-
/*
* Called very early, MMU is off, device-tree isn't unflattened.
*/
@@ -96,7 +54,7 @@ define_machine(mpc836x_rdk) {
.name = "MPC836x RDK",
.probe = mpc836x_rdk_probe,
.setup_arch = mpc836x_rdk_setup_arch,
- .init_IRQ = mpc836x_rdk_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_and_qe_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 83068322abd..e53a60b6c86 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -79,54 +79,14 @@ out:
*/
static void __init mpc837x_mds_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc837x_mds_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
- for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc837xmds_usb_cfg();
}
-static struct of_device_id mpc837x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init mpc837x_declare_of_platform_devices(void)
-{
- /* Publish platform_device */
- of_platform_bus_probe(NULL, mpc837x_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc837x_mds, mpc837x_declare_of_platform_devices);
-
-static void __init mpc837x_mds_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
+machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -142,7 +102,7 @@ define_machine(mpc837x_mds) {
.name = "MPC837x MDS",
.probe = mpc837x_mds_probe,
.setup_arch = mpc837x_mds_setup_arch,
- .init_IRQ = mpc837x_mds_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 7bafbf2ec0f..16c9c9cbbb7 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -50,56 +50,15 @@ static void mpc837x_rdb_sd_cfg(void)
*/
static void __init mpc837x_rdb_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
- for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie")
- mpc83xx_add_bridge(np);
-#endif
+ mpc83xx_setup_pci();
mpc837x_usb_cfg();
mpc837x_rdb_sd_cfg();
}
-static struct of_device_id mpc837x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- { .compatible = "gpio-leds", },
- {},
-};
-
-static int __init mpc837x_declare_of_platform_devices(void)
-{
- /* Publish platform_device */
- of_platform_bus_probe(NULL, mpc837x_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc837x_rdb, mpc837x_declare_of_platform_devices);
-
-static void __init mpc837x_rdb_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-}
+machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices);
static const char *board[] __initdata = {
"fsl,mpc8377rdb",
@@ -121,7 +80,7 @@ define_machine(mpc837x_rdb) {
.name = "MPC837x RDB/WLAN",
.probe = mpc837x_rdb_probe,
.setup_arch = mpc837x_rdb_setup_arch,
- .init_IRQ = mpc837x_rdb_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 82a434510d8..0cf74d7ea1c 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -70,5 +70,21 @@ extern long mpc83xx_time_init(void);
extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
extern int mpc831x_usb_cfg(void);
+extern void mpc83xx_ipic_init_IRQ(void);
+#ifdef CONFIG_QUICC_ENGINE
+extern void mpc83xx_qe_init_IRQ(void);
+extern void mpc83xx_ipic_and_qe_init_IRQ(void);
+#else
+static inline void __init mpc83xx_qe_init_IRQ(void) {}
+#define mpc83xx_ipic_and_qe_init_IRQ mpc83xx_ipic_init_IRQ
+#endif /* CONFIG_QUICC_ENGINE */
+
+#ifdef CONFIG_PCI
+extern void mpc83xx_setup_pci(void);
+#else
+#define mpc83xx_setup_pci() do {} while (0)
+#endif
+
+extern int mpc83xx_declare_of_platform_devices(void);
#endif /* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index af41d8c810a..8a81d7640b1 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -48,52 +48,13 @@
*/
static void __init sbc834x_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("sbc834x_setup_arch()", 0);
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
- mpc83xx_add_bridge(np);
-#endif
-
+ mpc83xx_setup_pci();
}
-static void __init sbc834x_init_IRQ(void)
-{
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "ipic");
- if (!np)
- return;
-
- ipic_init(np, 0);
-
- /* Initialize the default interrupt mapping priorities,
- * in case the boot rom changed something on us.
- */
- ipic_set_default_priority();
-
- of_node_put(np);
-}
-
-static struct __initdata of_device_id sbc834x_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init sbc834x_declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, sbc834x_ids, NULL);
- return 0;
-}
-machine_device_initcall(sbc834x, sbc834x_declare_of_platform_devices);
+machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
@@ -102,14 +63,14 @@ static int __init sbc834x_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "SBC834x");
+ return of_flat_dt_is_compatible(root, "SBC834xE");
}
define_machine(sbc834x) {
- .name = "SBC834x",
+ .name = "SBC834xE",
.probe = sbc834x_probe,
.setup_arch = sbc834x_setup_arch,
- .init_IRQ = sbc834x_init_IRQ,
+ .init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 45023e26aea..d7946be298b 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,7 @@ config P3060_QDS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index bc5acb95917..9cb2d4320dc 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -3,6 +3,8 @@
#
obj-$(CONFIG_SMP) += smp.o
+obj-y += common.o
+
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
new file mode 100644
index 00000000000..9fef5302adc
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -0,0 +1,66 @@
+/*
+ * Routines common to most mpc85xx-based boards.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of_platform.h>
+
+#include <sysdev/cpm2_pic.h>
+
+#include "mpc85xx.h"
+
+static struct of_device_id __initdata mpc85xx_common_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .compatible = "simple-bus", },
+ { .name = "cpm", },
+ { .name = "localbus", },
+ { .compatible = "gianfar", },
+ { .compatible = "fsl,qe", },
+ { .compatible = "fsl,cpm2", },
+ { .compatible = "fsl,srio", },
+ {},
+};
+
+int __init mpc85xx_common_publish_devices(void)
+{
+ return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
+}
+#ifdef CONFIG_CPM2
+static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cascade_irq;
+
+ while ((cascade_irq = cpm2_get_irq()) >= 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+
+void __init mpc85xx_cpm2_pic_init(void)
+{
+ struct device_node *np;
+ int irq;
+
+ /* Setup CPM2 PIC */
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
+ if (np == NULL) {
+ printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
+ return;
+ }
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ of_node_put(np);
+ printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
+ return;
+ }
+
+ cpm2_pic_init(np);
+ of_node_put(np);
+ irq_set_chained_handler(irq, cpm2_cascade);
+}
+#endif
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 802ad110b75..07e3e6c4737 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -31,32 +31,18 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "smp.h"
void __init corenet_ds_pic_init(void)
{
struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
- unsigned int flags = MPIC_PRIMARY | MPIC_BIG_ENDIAN |
+ unsigned int flags = MPIC_BIG_ENDIAN |
MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU;
- np = of_find_node_by_type(np, "open-pic");
-
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
- mpic = mpic_alloc(np, r.start, flags, 0, 256, " OpenPIC ");
+ mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
@@ -65,10 +51,6 @@ void __init corenet_ds_pic_init(void)
/*
* Setup the architecture
*/
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
void __init corenet_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
@@ -77,9 +59,7 @@ void __init corenet_ds_setup_arch(void)
#endif
dma_addr_t max = 0xffffffff;
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
#ifdef CONFIG_PCI
for_each_node_by_type(np, "pci") {
@@ -112,7 +92,7 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
.compatible = "simple-bus"
},
{
- .compatible = "fsl,rapidio-delta",
+ .compatible = "fsl,srio",
},
{
.compatible = "fsl,p4080-pcie",
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index c46f9359be1..20f75d7819c 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -35,6 +35,7 @@
#include <asm/cpm2.h>
#include <sysdev/cpm2_pic.h>
+#include "mpc85xx.h"
#define KSI8560_CPLD_HVR 0x04 /* Hardware Version Register */
#define KSI8560_CPLD_PVR 0x08 /* PLD Version Register */
@@ -54,60 +55,15 @@ static void machine_restart(char *cmd)
for (;;);
}
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cascade_irq;
-
- while ((cascade_irq = cpm2_get_irq()) >= 0)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
static void __init ksi8560_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-#ifdef CONFIG_CPM2
- int irq;
-#endif
-
- np = of_find_node_by_type(NULL, "open-pic");
-
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-#ifdef CONFIG_CPM2
- /* Setup CPM2 PIC */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
- if (np == NULL) {
- printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
- return;
- }
- irq = irq_of_parse_and_map(np, 0);
-
- cpm2_pic_init(np);
- of_node_put(np);
- irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+ mpc85xx_cpm2_pic_init();
}
#ifdef CONFIG_CPM2
@@ -215,22 +171,7 @@ static void ksi8560_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .type = "soc", },
- { .type = "simple-bus", },
- { .name = "cpm", },
- { .name = "localbus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(ksi8560, declare_of_platform_devices);
+machine_device_initcall(ksi8560, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index f79f2f10214..cf266826682 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -32,31 +32,15 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
void __init mpc8536_ds_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
}
@@ -104,19 +88,7 @@ static void __init mpc8536_ds_setup_arch(void)
printk("MPC8536 DS board from Freescale Semiconductor\n");
}
-static struct of_device_id __initdata mpc8536_ds_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init mpc8536_ds_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, mpc8536_ds_ids, NULL);
-}
-machine_device_initcall(mpc8536_ds, mpc8536_ds_publish_devices);
+machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
new file mode 100644
index 00000000000..2aa7c5dc2c7
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -0,0 +1,11 @@
+#ifndef MPC85xx_H
+#define MPC85xx_H
+extern int mpc85xx_common_publish_devices(void);
+
+#ifdef CONFIG_CPM2
+extern void mpc85xx_cpm2_pic_init(void);
+#else
+static inline void __init mpc85xx_cpm2_pic_init(void) {}
+#endif /* CONFIG_CPM2 */
+
+#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 3b2c9bb6619..3bebb5173bf 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -35,6 +35,8 @@
#include <sysdev/cpm2_pic.h>
#endif
+#include "mpc85xx.h"
+
#ifdef CONFIG_PCI
static int mpc85xx_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
@@ -46,63 +48,15 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
}
#endif /* CONFIG_PCI */
-#ifdef CONFIG_CPM2
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cascade_irq;
-
- while ((cascade_irq = cpm2_get_irq()) >= 0)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* CONFIG_CPM2 */
-
static void __init mpc85xx_ads_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-#ifdef CONFIG_CPM2
- int irq;
-#endif
-
- np = of_find_node_by_type(np, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-#ifdef CONFIG_CPM2
- /* Setup CPM2 PIC */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
- if (np == NULL) {
- printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
- return;
- }
- irq = irq_of_parse_and_map(np, 0);
-
- cpm2_pic_init(np);
- of_node_put(np);
- irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+ mpc85xx_cpm2_pic_init();
}
/*
@@ -221,23 +175,7 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .name = "soc", },
- { .type = "soc", },
- { .name = "cpm", },
- { .name = "localbus", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(mpc85xx_ads, declare_of_platform_devices);
+machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 66cb8d64079..40f03da616a 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -46,6 +46,8 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
/* CADMUS info */
/* xxx - galak, move into device tree */
#define CADMUS_BASE (0xf8004000)
@@ -177,7 +179,7 @@ static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
static struct irqaction mpc85xxcds_8259_irqaction = {
.handler = mpc85xx_8259_cascade_action,
- .flags = IRQF_SHARED,
+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
.name = "8259 cascade",
};
#endif /* PPC_I8259 */
@@ -186,30 +188,10 @@ static struct irqaction mpc85xxcds_8259_irqaction = {
static void __init mpc85xx_cds_pic_init(void)
{
struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-
- np = of_find_node_by_type(np, "open-pic");
-
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
-
- /* Return the mpic node */
- of_node_put(np);
-
mpic_init(mpic);
}
@@ -330,19 +312,7 @@ static int __init mpc85xx_cds_probe(void)
return of_flat_dt_is_compatible(root, "MPC85xxCDS");
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- return of_platform_bus_probe(NULL, of_bus_ids, NULL);
-}
-machine_device_initcall(mpc85xx_cds, declare_of_platform_devices);
+machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
define_machine(mpc85xx_cds) {
.name = "MPC85xx CDS",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 1b9a8cf1873..eefbb91e1d6 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -35,6 +35,9 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
#undef DEBUG
@@ -60,43 +63,27 @@ static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
void __init mpc85xx_ds_pic_init(void)
{
struct mpic *mpic;
- struct resource r;
- struct device_node *np;
#ifdef CONFIG_PPC_I8259
+ struct device_node *np;
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
unsigned long root = of_get_flat_dt_root();
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY |
+ mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
}
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
#ifdef CONFIG_PPC_I8259
@@ -152,9 +139,6 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
/*
* Setup the architecture
*/
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
static void __init mpc85xx_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
@@ -187,9 +171,7 @@ static void __init mpc85xx_ds_setup_arch(void)
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
#ifdef CONFIG_SWIOTLB
if (memblock_end_of_DRAM() > max) {
@@ -219,21 +201,9 @@ static int __init mpc8544_ds_probe(void)
return 0;
}
-static struct of_device_id __initdata mpc85xxds_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init mpc85xxds_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, mpc85xxds_ids, NULL);
-}
-machine_device_initcall(mpc8544_ds, mpc85xxds_publish_devices);
-machine_device_initcall(mpc8572_ds, mpc85xxds_publish_devices);
-machine_device_initcall(p2020_ds, mpc85xxds_publish_devices);
+machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
+machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
+machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index a23a3ff634c..1d15a0cd2c8 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -51,6 +51,9 @@
#include <asm/qe_ic.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
#undef DEBUG
#ifdef DEBUG
@@ -153,30 +156,7 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
* Setup the architecture
*
*/
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
-
#ifdef CONFIG_QUICC_ENGINE
-static struct of_device_id mpc85xx_qe_ids[] __initdata = {
- { .type = "qe", },
- { .compatible = "fsl,qe", },
- { },
-};
-
-static void __init mpc85xx_publish_qe_devices(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!of_device_is_available(np)) {
- of_node_put(np);
- return;
- }
-
- of_platform_bus_probe(NULL, mpc85xx_qe_ids, NULL);
-}
-
static void __init mpc85xx_mds_reset_ucc_phys(void)
{
struct device_node *np;
@@ -347,7 +327,6 @@ static void __init mpc85xx_mds_qeic_init(void)
of_node_put(np);
}
#else
-static void __init mpc85xx_publish_qe_devices(void) { }
static void __init mpc85xx_mds_qe_init(void) { }
static void __init mpc85xx_mds_qeic_init(void) { }
#endif /* CONFIG_QUICC_ENGINE */
@@ -381,9 +360,7 @@ static void __init mpc85xx_mds_setup_arch(void)
}
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
mpc85xx_mds_qe_init();
@@ -429,24 +406,11 @@ machine_arch_initcall(mpc8568_mds, board_fixups);
machine_arch_initcall(mpc8569_mds, board_fixups);
static struct of_device_id mpc85xx_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- { .compatible = "fsl,rapidio-delta", },
{ .compatible = "fsl,mpc8548-guts", },
{ .compatible = "gpio-leds", },
{},
};
-static struct of_device_id p1021_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
static int __init mpc85xx_publish_devices(void)
{
if (machine_is(mpc8568_mds))
@@ -454,23 +418,15 @@ static int __init mpc85xx_publish_devices(void)
if (machine_is(mpc8569_mds))
simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
+ mpc85xx_common_publish_devices();
of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
- mpc85xx_publish_qe_devices();
-
- return 0;
-}
-
-static int __init p1021_publish_devices(void)
-{
- of_platform_bus_probe(NULL, p1021_ids, NULL);
- mpc85xx_publish_qe_devices();
return 0;
}
machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices);
-machine_device_initcall(p1021_mds, p1021_publish_devices);
+machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices);
machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
@@ -478,26 +434,11 @@ machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier);
static void __init mpc85xx_mds_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np)
- return;
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
mpic_init(mpic);
mpc85xx_mds_qeic_init();
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index f5ff9110c97..ccf520e890b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -29,6 +29,9 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
#undef DEBUG
@@ -42,49 +45,28 @@
void __init mpc85xx_rdb_pic_init(void)
{
struct mpic *mpic;
- struct resource r;
- struct device_node *np;
unsigned long root = of_get_flat_dt_root();
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY |
+ mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
}
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-
}
/*
* Setup the architecture
*/
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
static void __init mpc85xx_rdb_setup_arch(void)
{
#ifdef CONFIG_PCI
@@ -102,27 +84,12 @@ static void __init mpc85xx_rdb_setup_arch(void)
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
-
printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
}
-static struct of_device_id __initdata mpc85xxrdb_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init mpc85xxrdb_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL);
-}
-machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices);
-machine_device_initcall(p1020_rdb, mpc85xxrdb_publish_devices);
+machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices);
+machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index d7387fa7f53..538bc3f57e9 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -28,33 +28,18 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
void __init p1010_rdb_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
mpic_init(mpic);
-
}
@@ -81,18 +66,7 @@ static void __init p1010_rdb_setup_arch(void)
printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
}
-static struct of_device_id __initdata p1010rdb_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- {},
-};
-
-static int __init p1010rdb_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, p1010rdb_ids, NULL);
-}
-machine_device_initcall(p1010_rdb, p1010rdb_publish_devices);
+machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices);
machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
/*
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index fda15716fad..bb3d84f4046 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -26,6 +26,9 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <asm/fsl_guts.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
@@ -238,38 +241,15 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
void __init p1022_ds_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np) {
- pr_err("Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- pr_err("Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
-
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
}
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
/*
* Setup the architecture
*/
@@ -309,9 +289,7 @@ static void __init p1022_ds_setup_arch(void)
diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
#ifdef CONFIG_SWIOTLB
if (memblock_end_of_DRAM() > max) {
@@ -325,10 +303,6 @@ static void __init p1022_ds_setup_arch(void)
}
static struct of_device_id __initdata p1022_ds_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
/* So that the DMA channel nodes can be probed individually: */
{ .compatible = "fsl,eloplus-dma", },
{},
@@ -336,6 +310,7 @@ static struct of_device_id __initdata p1022_ds_ids[] = {
static int __init p1022_ds_publish_devices(void)
{
+ mpc85xx_common_publish_devices();
return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
}
machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 835e0b335bf..d951e7027bb 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -30,19 +30,18 @@
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
+#include "smp.h"
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
/* ************************************************************************
*
* Setup the architecture
*
*/
-#ifdef CONFIG_SMP
-void __init mpc85xx_smp_init(void);
-#endif
-
static void __init mpc85xx_rds_setup_arch(void)
{
struct device_node *np;
@@ -87,53 +86,19 @@ static void __init mpc85xx_rds_setup_arch(void)
fsl_add_bridge(np, 0);
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
-}
-
-static struct of_device_id p1023_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- {},
-};
-
-
-static int __init p1023_publish_devices(void)
-{
- of_platform_bus_probe(NULL, p1023_ids, NULL);
-
- return 0;
}
-machine_device_initcall(p1023_rds, p1023_publish_devices);
+machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices);
static void __init mpc85xx_rds_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
mpic_init(mpic);
}
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
index 01dcf44871e..081cf4ac188 100644
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -70,7 +70,7 @@ define_machine(p3060_qds) {
.power_save = e500_idle,
};
-machine_device_initcall(p3060_qds, declare_of_platform_devices);
+machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 14632a97122..184a5078461 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -48,35 +48,16 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
static int sbc_rev;
static void __init sbc8548_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-
- np = of_find_node_by_type(np, "open-pic");
-
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
-
- /* Return the mpic node */
- of_node_put(np);
-
mpic_init(mpic);
}
@@ -149,21 +130,7 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .name = "soc", },
- { .type = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(sbc8548, declare_of_platform_devices);
+machine_device_initcall(sbc8548, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index cebd786dc33..940752e9305 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -32,68 +32,22 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#include <sysdev/cpm2_pic.h>
#endif
-#ifdef CONFIG_CPM2
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cascade_irq;
-
- while ((cascade_irq = cpm2_get_irq()) >= 0)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* CONFIG_CPM2 */
-
static void __init sbc8560_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np = NULL;
-#ifdef CONFIG_CPM2
- int irq;
-#endif
-
- np = of_find_node_by_type(np, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-#ifdef CONFIG_CPM2
- /* Setup CPM2 PIC */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
- if (np == NULL) {
- printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
- return;
- }
- irq = irq_of_parse_and_map(np, 0);
-
- cpm2_pic_init(np);
- of_node_put(np);
- irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+ mpc85xx_cpm2_pic_init();
}
/*
@@ -208,23 +162,7 @@ static void sbc8560_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .name = "soc", },
- { .type = "soc", },
- { .name = "cpm", },
- { .name = "localbus", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(sbc8560, declare_of_platform_devices);
+machine_device_initcall(sbc8560, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 2df4785ffd4..ff4249044a3 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -27,6 +27,7 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
+#include "smp.h"
extern void __early_start(void);
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
new file mode 100644
index 00000000000..e2b44933ff1
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -0,0 +1,15 @@
+#ifndef POWERPC_85XX_SMP_H_
+#define POWERPC_85XX_SMP_H_ 1
+
+#include <linux/init.h>
+
+#ifdef CONFIG_SMP
+void __init mpc85xx_smp_init(void);
+#else
+static inline void mpc85xx_smp_init(void)
+{
+ /* Nothing to do */
+}
+#endif
+
+#endif /* not POWERPC_85XX_SMP_H_ */
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 747d8fb3ab8..18f635906b2 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -41,32 +41,17 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
#include "socrates_fpga_pic.h"
static void __init socrates_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
struct device_node *np;
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
np = of_find_compatible_node(NULL, NULL, "abb,socrates-fpga-pic");
@@ -96,17 +81,7 @@ static void __init socrates_setup_arch(void)
#endif
}
-static struct of_device_id __initdata socrates_of_bus_ids[] = {
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init socrates_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
-}
-machine_device_initcall(socrates, socrates_publish_devices);
+machine_device_initcall(socrates, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 5387e9f06bd..e9e5234b4e7 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -40,70 +40,21 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
-#include <sysdev/cpm2_pic.h>
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cascade_irq;
-
- while ((cascade_irq = cpm2_get_irq()) >= 0)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
#endif /* CONFIG_CPM2 */
static void __init stx_gp3_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-#ifdef CONFIG_CPM2
- int irq;
-#endif
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-#ifdef CONFIG_CPM2
- /* Setup CPM2 PIC */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
- if (np == NULL) {
- printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
- return;
- }
- irq = irq_of_parse_and_map(np, 0);
-
- if (irq == NO_IRQ) {
- of_node_put(np);
- printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
- return;
- }
-
- cpm2_pic_init(np);
- of_node_put(np);
- irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+ mpc85xx_cpm2_pic_init();
}
/*
@@ -144,19 +95,7 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(stx_gp3, declare_of_platform_devices);
+machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 325de772725..bf7c89fb75b 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -38,70 +38,21 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "mpc85xx.h"
+
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
-#include <sysdev/cpm2_pic.h>
-
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cascade_irq;
-
- while ((cascade_irq = cpm2_get_irq()) >= 0)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
#endif /* CONFIG_CPM2 */
static void __init tqm85xx_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-#ifdef CONFIG_CPM2
- int irq;
-#endif
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (!np) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Could not map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
-#ifdef CONFIG_CPM2
- /* Setup CPM2 PIC */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
- if (np == NULL) {
- printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
- return;
- }
- irq = irq_of_parse_and_map(np, 0);
-
- if (irq == NO_IRQ) {
- of_node_put(np);
- printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n");
- return;
- }
-
- cpm2_pic_init(np);
- of_node_put(np);
- irq_set_chained_handler(irq, cpm2_cascade);
-#endif
+ mpc85xx_cpm2_pic_init();
}
/*
@@ -173,19 +124,7 @@ static void __init tqm85xx_ti1520_fixup(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
tqm85xx_ti1520_fixup);
-static struct of_device_id __initdata of_bus_ids[] = {
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(tqm85xx, declare_of_platform_devices);
+machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices);
static const char *board[] __initdata = {
"tqc,tqm8540",
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index a9dc5e79512..3a69f8b77de 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -32,6 +32,9 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
/* A few bit definitions needed for fixups on some boards */
#define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */
@@ -40,29 +43,11 @@
void __init xes_mpc85xx_pic_init(void)
{
- struct mpic *mpic;
- struct resource r;
- struct device_node *np;
-
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL) {
- printk(KERN_ERR "Could not find open-pic node\n");
- return;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "Failed to map mpic register space\n");
- of_node_put(np);
- return;
- }
-
- mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
- of_node_put(np);
-
mpic_init(mpic);
}
@@ -136,9 +121,6 @@ static int primary_phb_addr;
/*
* Setup the architecture
*/
-#ifdef CONFIG_SMP
-extern void __init mpc85xx_smp_init(void);
-#endif
static void __init xes_mpc85xx_setup_arch(void)
{
#ifdef CONFIG_PCI
@@ -172,26 +154,12 @@ static void __init xes_mpc85xx_setup_arch(void)
}
#endif
-#ifdef CONFIG_SMP
mpc85xx_smp_init();
-#endif
}
-static struct of_device_id __initdata xes_mpc85xx_ids[] = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .compatible = "simple-bus", },
- { .compatible = "gianfar", },
- {},
-};
-
-static int __init xes_mpc85xx_publish_devices(void)
-{
- return of_platform_bus_probe(NULL, xes_mpc85xx_ids, NULL);
-}
-machine_device_initcall(xes_mpc8572, xes_mpc85xx_publish_devices);
-machine_device_initcall(xes_mpc8548, xes_mpc85xx_publish_devices);
-machine_device_initcall(xes_mpc8540, xes_mpc85xx_publish_devices);
+machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
+machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
+machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index b11c3535f35..569262ca499 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -161,7 +161,7 @@ mpc86xx_time_init(void)
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
- { .compatible = "fsl,rapidio-delta", },
+ { .compatible = "fsl,srio", },
{ .compatible = "gianfar", },
{},
};
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 8ef8960abda..52bbfa03153 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -31,26 +31,16 @@ static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
void __init mpc86xx_init_irq(void)
{
- struct mpic *mpic;
- struct device_node *np;
- struct resource res;
#ifdef CONFIG_PPC_I8259
+ struct device_node *np;
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
- /* Determine PIC address. */
- np = of_find_node_by_type(NULL, "open-pic");
- if (np == NULL)
- return;
- of_address_to_resource(np, 0, &res);
-
- mpic = mpic_alloc(np, res.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
- MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0,
+ MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
0, 256, " MPIC ");
- of_node_put(np);
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 100feed9794..0cfb46d54b8 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -208,6 +208,12 @@ config PPC_PASEMI_CPUFREQ
endmenu
+menu "CPUIdle driver"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
depends on 6xx && (PPC_PREP || PPC_PMAC)
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index fbecae0fbb4..425db18580a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -174,7 +174,6 @@ config BOOKE
config FSL_BOOKE
bool
depends on (E200 || E500) && PPC32
- select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
default y
# this is for common code between PPC32 & PPC64 FSL BOOKE
@@ -182,6 +181,7 @@ config PPC_FSL_BOOK3E
bool
select FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI
+ select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
default y if FSL_BOOKE
config PTE_64BIT
@@ -236,7 +236,7 @@ config VSX
config PPC_ICSWX
bool "Support for PowerPC icswx coprocessor instruction"
- depends on POWER4
+ depends on POWER4 || PPC_A2
default n
---help---
@@ -252,6 +252,25 @@ config PPC_ICSWX
If in doubt, say N here.
+config PPC_ICSWX_PID
+ bool "icswx requires direct PID management"
+ depends on PPC_ICSWX && POWER4
+ default y
+ ---help---
+ The PID register in server is used explicitly for ICSWX. In
+ embedded systems PID managment is done by the system.
+
+config PPC_ICSWX_USE_SIGILL
+ bool "Should a bad CT cause a SIGILL?"
+ depends on PPC_ICSWX
+ default n
+ ---help---
+ Should a bad CT used for "non-record form ICSWX" cause an
+ illegal intruction signal or should it be silent as
+ architected.
+
+ If in doubt, say N here.
+
config SPE
bool "SPE Support"
depends on E200 || (E500 && !PPC_E500MC)
@@ -290,7 +309,7 @@ config PPC_BOOK3E_MMU
config PPC_MM_SLICES
bool
- default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
+ default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
default n
config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 4d4c8c16912..94560db788b 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -46,7 +46,7 @@
*/
#include <linux/module.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <asm/spu.h>
@@ -59,8 +59,8 @@
#define TEMP_MIN 65
#define TEMP_MAX 125
-#define SYSDEV_PREFIX_ATTR(_prefix,_name,_mode) \
-struct sysdev_attribute attr_ ## _prefix ## _ ## _name = { \
+#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \
+struct device_attribute attr_ ## _prefix ## _ ## _name = { \
.attr = { .name = __stringify(_name), .mode = _mode }, \
.show = _prefix ## _show_ ## _name, \
.store = _prefix ## _store_ ## _name, \
@@ -76,36 +76,36 @@ static inline u8 temp_to_reg(u8 temp)
return ((temp - TEMP_MIN) >> 1) & 0x3f;
}
-static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev)
{
struct spu *spu;
- spu = container_of(sysdev, struct spu, sysdev);
+ spu = container_of(dev, struct spu, dev);
return cbe_get_pmd_regs(spu_devnode(spu));
}
/* returns the value for a given spu in a given register */
-static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
+static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg)
{
union spe_reg value;
struct spu *spu;
- spu = container_of(sysdev, struct spu, sysdev);
+ spu = container_of(dev, struct spu, dev);
value.val = in_be64(&reg->val);
return value.spe[spu->spe_id];
}
-static ssize_t spu_show_temp(struct sys_device *sysdev, struct sysdev_attribute *attr,
+static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 value;
struct cbe_pmd_regs __iomem *pmd_regs;
- pmd_regs = get_pmd_regs(sysdev);
+ pmd_regs = get_pmd_regs(dev);
- value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
+ value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1);
return sprintf(buf, "%d\n", reg_to_temp(value));
}
@@ -147,48 +147,48 @@ static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char
return size;
}
-static ssize_t spu_show_throttle_end(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_end(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(get_pmd_regs(sysdev), buf, 0);
+ return show_throttle(get_pmd_regs(dev), buf, 0);
}
-static ssize_t spu_show_throttle_begin(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_begin(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(get_pmd_regs(sysdev), buf, 8);
+ return show_throttle(get_pmd_regs(dev), buf, 8);
}
-static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_show_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(get_pmd_regs(sysdev), buf, 16);
+ return show_throttle(get_pmd_regs(dev), buf, 16);
}
-static ssize_t spu_store_throttle_end(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_end(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(get_pmd_regs(sysdev), buf, size, 0);
+ return store_throttle(get_pmd_regs(dev), buf, size, 0);
}
-static ssize_t spu_store_throttle_begin(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_begin(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(get_pmd_regs(sysdev), buf, size, 8);
+ return store_throttle(get_pmd_regs(dev), buf, size, 8);
}
-static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t spu_store_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(get_pmd_regs(sysdev), buf, size, 16);
+ return store_throttle(get_pmd_regs(dev), buf, size, 16);
}
-static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
+static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos)
{
struct cbe_pmd_regs __iomem *pmd_regs;
u64 value;
- pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+ pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
value = in_be64(&pmd_regs->ts_ctsr2);
value = (value >> pos) & 0x3f;
@@ -199,64 +199,64 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
/* shows the temperature of the DTS on the PPE,
* located near the linear thermal sensor */
-static ssize_t ppe_show_temp0(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_temp0(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return ppe_show_temp(sysdev, buf, 32);
+ return ppe_show_temp(dev, buf, 32);
}
/* shows the temperature of the second DTS on the PPE */
-static ssize_t ppe_show_temp1(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_temp1(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return ppe_show_temp(sysdev, buf, 0);
+ return ppe_show_temp(dev, buf, 0);
}
-static ssize_t ppe_show_throttle_end(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_end(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32);
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32);
}
-static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_begin(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40);
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40);
}
-static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t ppe_show_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48);
+ return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48);
}
-static ssize_t ppe_store_throttle_end(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_end(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32);
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32);
}
-static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_begin(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40);
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40);
}
-static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev,
- struct sysdev_attribute *attr, const char *buf, size_t size)
+static ssize_t ppe_store_throttle_full_stop(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48);
+ return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48);
}
-static struct sysdev_attribute attr_spu_temperature = {
+static struct device_attribute attr_spu_temperature = {
.attr = {.name = "temperature", .mode = 0400 },
.show = spu_show_temp,
};
-static SYSDEV_PREFIX_ATTR(spu, throttle_end, 0600);
-static SYSDEV_PREFIX_ATTR(spu, throttle_begin, 0600);
-static SYSDEV_PREFIX_ATTR(spu, throttle_full_stop, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600);
static struct attribute *spu_attributes[] = {
@@ -272,19 +272,19 @@ static struct attribute_group spu_attribute_group = {
.attrs = spu_attributes,
};
-static struct sysdev_attribute attr_ppe_temperature0 = {
+static struct device_attribute attr_ppe_temperature0 = {
.attr = {.name = "temperature0", .mode = 0400 },
.show = ppe_show_temp0,
};
-static struct sysdev_attribute attr_ppe_temperature1 = {
+static struct device_attribute attr_ppe_temperature1 = {
.attr = {.name = "temperature1", .mode = 0400 },
.show = ppe_show_temp1,
};
-static SYSDEV_PREFIX_ATTR(ppe, throttle_end, 0600);
-static SYSDEV_PREFIX_ATTR(ppe, throttle_begin, 0600);
-static SYSDEV_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600);
+static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
static struct attribute *ppe_attributes[] = {
&attr_ppe_temperature0.attr,
@@ -307,7 +307,7 @@ static int __init init_default_values(void)
{
int cpu;
struct cbe_pmd_regs __iomem *pmd_regs;
- struct sys_device *sysdev;
+ struct device *dev;
union ppe_spe_reg tpr;
union spe_reg str1;
u64 str2;
@@ -349,14 +349,14 @@ static int __init init_default_values(void)
for_each_possible_cpu (cpu) {
pr_debug("processing cpu %d\n", cpu);
- sysdev = get_cpu_sysdev(cpu);
+ dev = get_cpu_device(cpu);
- if (!sysdev) {
- pr_info("invalid sysdev pointer for cbe_thermal\n");
+ if (!dev) {
+ pr_info("invalid dev pointer for cbe_thermal\n");
return -EINVAL;
}
- pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+ pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
if (!pmd_regs) {
pr_info("invalid CBE regs pointer for cbe_thermal\n");
@@ -379,8 +379,8 @@ static int __init thermal_init(void)
int rc = init_default_values();
if (rc == 0) {
- spu_add_sysdev_attr_group(&spu_attribute_group);
- cpu_add_sysdev_attr_group(&ppe_attribute_group);
+ spu_add_dev_attr_group(&spu_attribute_group);
+ cpu_add_dev_attr_group(&ppe_attribute_group);
}
return rc;
@@ -389,8 +389,8 @@ module_init(thermal_init);
static void __exit thermal_exit(void)
{
- spu_remove_sysdev_attr_group(&spu_attribute_group);
- cpu_remove_sysdev_attr_group(&ppe_attribute_group);
+ spu_remove_dev_attr_group(&spu_attribute_group);
+ cpu_remove_dev_attr_group(&ppe_attribute_group);
}
module_exit(thermal_exit);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 592c3d51b81..ae9fc7bc17d 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1037,6 +1037,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
/* The fixed mapping is only supported on axon machines */
np = of_find_node_by_name(NULL, "axon");
+ of_node_put(np);
+
if (!np) {
pr_debug("iommu: fixed mapping disabled, no axons found\n");
return -1;
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 0fc9b725612..62002a7edfe 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -184,24 +184,10 @@ static int __init cell_publish_devices(void)
}
machine_subsys_initcall(cell, cell_publish_devices);
-static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct mpic *mpic = irq_desc_get_handler_data(desc);
- unsigned int virq;
-
- virq = mpic_get_one_irq(mpic);
- if (virq != NO_IRQ)
- generic_handle_irq(virq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
static void __init mpic_init_IRQ(void)
{
struct device_node *dn;
struct mpic *mpic;
- unsigned int virq;
for (dn = NULL;
(dn = of_find_node_by_name(dn, "interrupt-controller"));) {
@@ -211,19 +197,10 @@ static void __init mpic_init_IRQ(void)
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
- mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC ");
+ mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC ");
if (mpic == NULL)
continue;
mpic_init(mpic);
-
- virq = irq_of_parse_and_map(dn, 0);
- if (virq == NO_IRQ)
- continue;
-
- printk(KERN_INFO "%s : hooking up to IRQ %d\n",
- dn->full_name, virq);
- irq_set_handler_data(virq, mpic);
- irq_set_chained_handler(virq, cell_mpic_cascade);
}
}
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f5c5c762d5a..4a255cf8cd1 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e94d3ecdd8b..8b1213993b1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -519,31 +519,32 @@ void spu_init_channels(struct spu *spu)
}
EXPORT_SYMBOL_GPL(spu_init_channels);
-static struct sysdev_class spu_sysdev_class = {
+static struct bus_type spu_subsys = {
.name = "spu",
+ .dev_name = "spu",
};
-int spu_add_sysdev_attr(struct sysdev_attribute *attr)
+int spu_add_dev_attr(struct device_attribute *attr)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
- sysdev_create_file(&spu->sysdev, attr);
+ device_create_file(&spu->dev, attr);
mutex_unlock(&spu_full_list_mutex);
return 0;
}
-EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
+EXPORT_SYMBOL_GPL(spu_add_dev_attr);
-int spu_add_sysdev_attr_group(struct attribute_group *attrs)
+int spu_add_dev_attr_group(struct attribute_group *attrs)
{
struct spu *spu;
int rc = 0;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list) {
- rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
+ rc = sysfs_create_group(&spu->dev.kobj, attrs);
/* we're in trouble here, but try unwinding anyway */
if (rc) {
@@ -552,7 +553,7 @@ int spu_add_sysdev_attr_group(struct attribute_group *attrs)
list_for_each_entry_continue_reverse(spu,
&spu_full_list, full_list)
- sysfs_remove_group(&spu->sysdev.kobj, attrs);
+ sysfs_remove_group(&spu->dev.kobj, attrs);
break;
}
}
@@ -561,45 +562,45 @@ int spu_add_sysdev_attr_group(struct attribute_group *attrs)
return rc;
}
-EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
-void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
+void spu_remove_dev_attr(struct device_attribute *attr)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
- sysdev_remove_file(&spu->sysdev, attr);
+ device_remove_file(&spu->dev, attr);
mutex_unlock(&spu_full_list_mutex);
}
-EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
+EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
-void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
+void spu_remove_dev_attr_group(struct attribute_group *attrs)
{
struct spu *spu;
mutex_lock(&spu_full_list_mutex);
list_for_each_entry(spu, &spu_full_list, full_list)
- sysfs_remove_group(&spu->sysdev.kobj, attrs);
+ sysfs_remove_group(&spu->dev.kobj, attrs);
mutex_unlock(&spu_full_list_mutex);
}
-EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
+EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
-static int spu_create_sysdev(struct spu *spu)
+static int spu_create_dev(struct spu *spu)
{
int ret;
- spu->sysdev.id = spu->number;
- spu->sysdev.cls = &spu_sysdev_class;
- ret = sysdev_register(&spu->sysdev);
+ spu->dev.id = spu->number;
+ spu->dev.bus = &spu_subsys;
+ ret = device_register(&spu->dev);
if (ret) {
printk(KERN_ERR "Can't register SPU %d with sysfs\n",
spu->number);
return ret;
}
- sysfs_add_device_to_node(&spu->sysdev, spu->node);
+ sysfs_add_device_to_node(&spu->dev, spu->node);
return 0;
}
@@ -635,7 +636,7 @@ static int __init create_spu(void *data)
if (ret)
goto out_destroy;
- ret = spu_create_sysdev(spu);
+ ret = spu_create_dev(spu);
if (ret)
goto out_free_irqs;
@@ -692,10 +693,10 @@ static unsigned long long spu_acct_time(struct spu *spu,
}
-static ssize_t spu_stat_show(struct sys_device *sysdev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t spu_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct spu *spu = container_of(sysdev, struct spu, sysdev);
+ struct spu *spu = container_of(dev, struct spu, dev);
return sprintf(buf, "%s %llu %llu %llu %llu "
"%llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -714,7 +715,7 @@ static ssize_t spu_stat_show(struct sys_device *sysdev,
spu->stats.libassist);
}
-static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
+static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
@@ -813,8 +814,8 @@ static int __init init_spu_base(void)
if (!spu_management_ops)
goto out;
- /* create sysdev class for spus */
- ret = sysdev_class_register(&spu_sysdev_class);
+ /* create system subsystem for spus */
+ ret = subsys_system_register(&spu_subsys, NULL);
if (ret)
goto out;
@@ -823,7 +824,7 @@ static int __init init_spu_base(void)
if (ret < 0) {
printk(KERN_WARNING "%s: Error initializing spus\n",
__func__);
- goto out_unregister_sysdev_class;
+ goto out_unregister_subsys;
}
if (ret > 0)
@@ -833,15 +834,15 @@ static int __init init_spu_base(void)
xmon_register_spus(&spu_full_list);
crash_register_spus(&spu_full_list);
mutex_unlock(&spu_full_list_mutex);
- spu_add_sysdev_attr(&attr_stat);
+ spu_add_dev_attr(&dev_attr_stat);
register_syscore_ops(&spu_syscore_ops);
spu_init_affinity();
return 0;
- out_unregister_sysdev_class:
- sysdev_class_unregister(&spu_sysdev_class);
+ out_unregister_subsys:
+ bus_unregister(&spu_subsys);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 75530d99eda..714bbfc3162 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -65,8 +65,8 @@ static inline void spufs_calls_put(struct spufs_calls *calls) { }
#endif /* CONFIG_SPU_FS_MODULE */
-asmlinkage long sys_spu_create(const char __user *name,
- unsigned int flags, mode_t mode, int neighbor_fd)
+SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
+ umode_t, mode, int, neighbor_fd)
{
long ret;
struct file *neighbor;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index e481f6b9a78..d4a094ca96f 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -74,7 +74,6 @@ spufs_alloc_inode(struct super_block *sb)
static void spufs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}
@@ -92,7 +91,7 @@ spufs_init_once(void *p)
}
static struct inode *
-spufs_new_inode(struct super_block *sb, int mode)
+spufs_new_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode;
@@ -124,7 +123,7 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr)
static int
spufs_new_file(struct super_block *sb, struct dentry *dentry,
- const struct file_operations *fops, int mode,
+ const struct file_operations *fops, umode_t mode,
size_t size, struct spu_context *ctx)
{
static const struct inode_operations spufs_file_iops = {
@@ -194,7 +193,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
}
static int spufs_fill_dir(struct dentry *dir,
- const struct spufs_tree_descr *files, int mode,
+ const struct spufs_tree_descr *files, umode_t mode,
struct spu_context *ctx)
{
struct dentry *dentry, *tmp;
@@ -264,7 +263,7 @@ EXPORT_SYMBOL_GPL(spufs_context_fops);
static int
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
- int mode)
+ umode_t mode)
{
int ret;
struct inode *inode;
@@ -447,7 +446,7 @@ spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
static int
spufs_create_context(struct inode *inode, struct dentry *dentry,
- struct vfsmount *mnt, int flags, int mode,
+ struct vfsmount *mnt, int flags, umode_t mode,
struct file *aff_filp)
{
int ret;
@@ -521,7 +520,7 @@ out:
}
static int
-spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
+spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int ret;
struct inode *inode;
@@ -584,7 +583,7 @@ out:
static int spufs_create_gang(struct inode *inode,
struct dentry *dentry,
- struct vfsmount *mnt, int mode)
+ struct vfsmount *mnt, umode_t mode)
{
int ret;
@@ -612,7 +611,7 @@ out:
static struct file_system_type spufs_type;
long spufs_create(struct path *path, struct dentry *dentry,
- unsigned int flags, mode_t mode, struct file *filp)
+ unsigned int flags, umode_t mode, struct file *filp)
{
int ret;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 099245f230b..67852ade4c0 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -237,7 +237,7 @@ struct spufs_inode_info {
struct spufs_tree_descr {
const char *name;
const struct file_operations *ops;
- int mode;
+ umode_t mode;
size_t size;
};
@@ -249,7 +249,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
extern struct spufs_calls spufs_calls;
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
- mode_t mode, struct file *filp);
+ umode_t mode, struct file *filp);
/* ELF coredump callbacks for writing SPU ELF notes */
extern int spufs_coredump_extra_notes_size(void);
extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 71a5b520726..8591bb62d7f 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -60,7 +60,7 @@ out:
}
static long do_spu_create(const char __user *pathname, unsigned int flags,
- mode_t mode, struct file *neighbor)
+ umode_t mode, struct file *neighbor)
{
struct path path;
struct dentry *dentry;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 12278649841..f1f17bb2c33 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -435,8 +435,7 @@ static void __init chrp_find_openpic(void)
if (len > 1)
isu_size = iranges[3];
- chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY,
- isu_size, 0, " MPIC ");
+ chrp_mpic = mpic_alloc(np, opaddr, 0, isu_size, 0, " MPIC ");
if (chrp_mpic == NULL) {
printk(KERN_ERR "Failed to allocate MPIC structure\n");
goto bail;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 2e9bcf6444c..9cfcf20c056 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -148,30 +148,14 @@ static void __init holly_setup_arch(void)
static void __init holly_init_IRQ(void)
{
struct mpic *mpic;
- phys_addr_t mpic_paddr = 0;
- struct device_node *tsi_pic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
- tsi_pic = of_find_node_by_type(NULL, "open-pic");
- if (tsi_pic) {
- unsigned int size;
- const void *prop = of_get_property(tsi_pic, "reg", &size);
- mpic_paddr = of_translate_address(tsi_pic, prop);
- }
-
- if (mpic_paddr == 0) {
- printk(KERN_ERR "%s: No tsi108 PIC found !\n", __func__);
- return;
- }
-
- pr_debug("%s: tsi108 pic phys_addr = 0x%x\n", __func__, (u32) mpic_paddr);
-
- mpic = mpic_alloc(tsi_pic, mpic_paddr,
- MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24,
NR_IRQS-4, /* num_sources used */
@@ -179,7 +163,7 @@ static void __init holly_init_IRQ(void)
BUG_ON(mpic == NULL);
- mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+ mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
mpic_init(mpic);
@@ -204,7 +188,6 @@ static void __init holly_init_IRQ(void)
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
- of_node_put(tsi_pic);
}
void holly_show_cpuinfo(struct seq_file *m)
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 244f997de79..bcfad92c9ce 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -81,29 +81,19 @@ static void __init linkstation_setup_arch(void)
static void __init linkstation_init_IRQ(void)
{
struct mpic *mpic;
- struct device_node *dnp;
- const u32 *prop;
- int size;
- phys_addr_t paddr;
- dnp = of_find_node_by_type(NULL, "open-pic");
- if (dnp == NULL)
- return;
-
- prop = of_get_property(dnp, "reg", &size);
- paddr = (phys_addr_t)of_translate_address(dnp, prop);
-
- mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC ");
+ mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
+ 4, 32, " EPIC ");
BUG_ON(mpic == NULL);
/* PCI IRQs */
- mpic_assign_isu(mpic, 0, paddr + 0x10200);
+ mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
/* I2C */
- mpic_assign_isu(mpic, 1, paddr + 0x11000);
+ mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
/* ttyS0, ttyS1 */
- mpic_assign_isu(mpic, 2, paddr + 0x11100);
+ mpic_assign_isu(mpic, 2, mpic->paddr + 0x11100);
mpic_init(mpic);
}
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f8f33e16c6b..f3350d786f5 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -102,31 +102,14 @@ static void __init mpc7448_hpc2_setup_arch(void)
static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
- phys_addr_t mpic_paddr = 0;
- struct device_node *tsi_pic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
- tsi_pic = of_find_node_by_type(NULL, "open-pic");
- if (tsi_pic) {
- unsigned int size;
- const void *prop = of_get_property(tsi_pic, "reg", &size);
- mpic_paddr = of_translate_address(tsi_pic, prop);
- }
-
- if (mpic_paddr == 0) {
- printk("%s: No tsi108 PIC found !\n", __func__);
- return;
- }
-
- DBG("%s: tsi108 pic phys_addr = 0x%x\n", __func__,
- (u32) mpic_paddr);
-
- mpic = mpic_alloc(tsi_pic, mpic_paddr,
- MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24,
NR_IRQS-4, /* num_sources used */
@@ -134,7 +117,7 @@ static void __init mpc7448_hpc2_init_IRQ(void)
BUG_ON(mpic == NULL);
- mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+ mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
mpic_init(mpic);
@@ -159,7 +142,6 @@ static void __init mpc7448_hpc2_init_IRQ(void)
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
- of_node_put(tsi_pic);
}
void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index f1eebcae9bf..afa63883496 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -83,35 +83,17 @@ static void __init storcenter_setup_arch(void)
static void __init storcenter_init_IRQ(void)
{
struct mpic *mpic;
- struct device_node *dnp;
- const void *prop;
- int size;
- phys_addr_t paddr;
-
- dnp = of_find_node_by_type(NULL, "open-pic");
- if (dnp == NULL)
- return;
-
- prop = of_get_property(dnp, "reg", &size);
- if (prop == NULL) {
- of_node_put(dnp);
- return;
- }
-
- paddr = (phys_addr_t)of_translate_address(dnp, prop);
- mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET,
- 16, 32, " OpenPIC ");
-
- of_node_put(dnp);
+ mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
+ 16, 32, " OpenPIC ");
BUG_ON(mpic == NULL);
/*
* 16 Serial Interrupts followed by 16 Internal Interrupts.
* I2C is the second internal, so it is at 17, 0x11020.
*/
- mpic_assign_isu(mpic, 0, paddr + 0x10200);
- mpic_assign_isu(mpic, 1, paddr + 0x11000);
+ mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
+ mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
mpic_init(mpic);
}
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 1b5dc1a2e14..6d8dadf19f0 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -79,24 +79,19 @@ void __init wii_memory_fixups(void)
BUG_ON(memblock.memory.cnt != 2);
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
- p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
- p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
+ /* trim unaligned tail */
+ memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
+ (phys_addr_t)ULLONG_MAX);
- wii_hole_start = p[0].base + p[0].size;
+ /* determine hole, add & reserve them */
+ wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
wii_hole_size = p[1].base - wii_hole_start;
-
- pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
- pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
- pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
-
- p[0].size += wii_hole_size + p[1].size;
-
- memblock.memory.cnt = 1;
- memblock_analyze();
-
- /* reserve the hole */
+ memblock_add(wii_hole_start, wii_hole_size);
memblock_reserve(wii_hole_start, wii_hole_size);
+ BUG_ON(memblock.memory.cnt != 1);
+ __memblock_dump_all();
+
/* allow ioremapping the address space in the hole */
__allow_ioremap_reserved = 1;
}
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index ea0acbd8966..8fc62586a97 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -563,7 +563,8 @@ static void yield_shared_processor(void)
static void iseries_shared_idle(void)
{
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && !hvlpevent_is_pending()) {
local_irq_disable();
ppc64_runlatch_off();
@@ -577,7 +578,8 @@ static void iseries_shared_idle(void)
}
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
if (hvlpevent_is_pending())
process_iSeries_events();
@@ -593,7 +595,8 @@ static void iseries_dedicated_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
if (!need_resched()) {
while (!need_resched()) {
ppc64_runlatch_off();
@@ -610,7 +613,8 @@ static void iseries_dedicated_idle(void)
}
ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 7e2a5515ed7..02df49fb59f 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -24,7 +24,7 @@
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index dd2e48b2850..401e3f3f74c 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -207,6 +207,54 @@ static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose,
return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset);
}
+static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset,
+ int len, u32 *val)
+{
+ volatile void __iomem *addr;
+
+ addr = hose->cfg_addr;
+ addr += ((offset & ~3) << 2) + (4 - len - (offset & 3));
+
+ switch (len) {
+ case 1:
+ *val = in_8(addr);
+ break;
+ case 2:
+ *val = in_be16(addr);
+ break;
+ default:
+ *val = in_be32(addr);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset,
+ int len, u32 val)
+{
+ volatile void __iomem *addr;
+
+ addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3));
+
+ if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST)
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (len) {
+ case 1:
+ out_8(addr, val);
+ break;
+ case 2:
+ out_be16(addr, val);
+ break;
+ default:
+ out_be32(addr, val);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
@@ -217,6 +265,9 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
+ if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
+ return u3_ht_root_read_config(hose, offset, len, val);
+
if (offset > 0xff)
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -252,6 +303,9 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
+ if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
+ return u3_ht_root_write_config(hose, offset, len, val);
+
if (offset > 0xff)
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -428,6 +482,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
* reg_property and using some accessor functions instead
*/
hose->cfg_data = ioremap(0xf2000000, 0x02000000);
+ hose->cfg_addr = ioremap(0xf8070000, 0x1000);
hose->first_busno = 0;
hose->last_busno = 0xef;
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 4c372047c94..0bcbfe7b2c5 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -221,7 +221,7 @@ static void __init maple_init_IRQ(void)
unsigned long openpic_addr = 0;
int naddr, n, i, opplen, has_isus = 0;
struct mpic *mpic;
- unsigned int flags = MPIC_PRIMARY;
+ unsigned int flags = 0;
/* Locate MPIC in the device-tree. Note that there is a bug
* in Maple device-tree where the type of the controller is
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 6f355821055..98b7a7c1317 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -224,7 +224,7 @@ static __init void pas_init_IRQ(void)
openpic_addr = of_read_number(opprop, naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
- mpic_flags = MPIC_PRIMARY | MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
+ mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
if (nmiprop)
@@ -234,7 +234,7 @@ static __init void pas_init_IRQ(void)
mpic_flags, 0, 0, "PASEMI-OPIC");
BUG_ON(!mpic);
- mpic_assign_isu(mpic, 0, openpic_addr + 0x10000);
+ mpic_assign_isu(mpic, 0, mpic->paddr + 0x10000);
mpic_init(mpic);
/* The NMI/MCK source needs to be prio 15 */
if (nmiprop) {
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 04af5f48b4e..1fc386a23f1 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -23,7 +23,7 @@
#include <linux/pmu.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/hardirq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 901bfbddc3d..7761aabfc29 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -52,13 +52,8 @@ struct device_node *of_irq_dflt_pic;
/* Default addresses */
static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
-#define GC_LEVEL_MASK 0x3ff00000
-#define OHARE_LEVEL_MASK 0x1ff00000
-#define HEATHROW_LEVEL_MASK 0x1ff00000
-
static int max_irqs;
static int max_real_irqs;
-static u32 level_mask[4];
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
@@ -217,8 +212,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id)
for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
- /* We must read level interrupts from the level register */
- bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+ bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
@@ -248,8 +242,7 @@ static unsigned int pmac_pic_get_irq(void)
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
- /* We must read level interrupts from the level register */
- bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
+ bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
@@ -284,19 +277,14 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- int level;
-
if (hw >= max_irqs)
return -EINVAL;
/* Mark level interrupts, set delayed disable for edge ones and set
* handlers
*/
- level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
- if (level)
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &pmac_pic,
- level ? handle_level_irq : handle_edge_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
return 0;
}
@@ -334,21 +322,14 @@ static void __init pmac_pic_probe_oldstyle(void)
if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
max_irqs = max_real_irqs = 32;
- level_mask[0] = GC_LEVEL_MASK;
} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
max_irqs = max_real_irqs = 32;
- level_mask[0] = OHARE_LEVEL_MASK;
-
/* We might have a second cascaded ohare */
slave = of_find_node_by_name(NULL, "pci106b,7");
- if (slave) {
+ if (slave)
max_irqs = 64;
- level_mask[1] = OHARE_LEVEL_MASK;
- }
} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
max_irqs = max_real_irqs = 64;
- level_mask[0] = HEATHROW_LEVEL_MASK;
- level_mask[1] = 0;
/* We might have a second cascaded heathrow */
slave = of_find_node_by_name(master, "mac-io");
@@ -363,11 +344,8 @@ static void __init pmac_pic_probe_oldstyle(void)
}
/* We found a slave */
- if (slave) {
+ if (slave)
max_irqs = 128;
- level_mask[2] = HEATHROW_LEVEL_MASK;
- level_mask[3] = 0;
- }
}
BUG_ON(master == NULL);
@@ -464,18 +442,6 @@ int of_irq_map_oldworld(struct device_node *device, int index,
}
#endif /* CONFIG_PPC32 */
-static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct mpic *mpic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq = mpic_get_one_irq(mpic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
{
#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
@@ -498,14 +464,8 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
int master)
{
const char *name = master ? " MPIC 1 " : " MPIC 2 ";
- struct resource r;
struct mpic *mpic;
- unsigned int flags = master ? MPIC_PRIMARY : 0;
- int rc;
-
- rc = of_address_to_resource(np, 0, &r);
- if (rc)
- return NULL;
+ unsigned int flags = master ? 0 : MPIC_SECONDARY;
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
@@ -519,7 +479,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
if (master && (flags & MPIC_BIG_ENDIAN))
flags |= MPIC_U3_HT_IRQS;
- mpic = mpic_alloc(np, r.start, flags, 0, 0, name);
+ mpic = mpic_alloc(np, 0, flags, 0, 0, name);
if (mpic == NULL)
return NULL;
@@ -532,7 +492,6 @@ static int __init pmac_pic_probe_mpic(void)
{
struct mpic *mpic1, *mpic2;
struct device_node *np, *master = NULL, *slave = NULL;
- unsigned int cascade;
/* We can have up to 2 MPICs cascaded */
for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
@@ -568,27 +527,14 @@ static int __init pmac_pic_probe_mpic(void)
of_node_put(master);
- /* No slave, let's go out */
- if (slave == NULL)
- return 0;
-
- /* Get/Map slave interrupt */
- cascade = irq_of_parse_and_map(slave, 0);
- if (cascade == NO_IRQ) {
- printk(KERN_ERR "Failed to map cascade IRQ\n");
- return 0;
- }
-
- mpic2 = pmac_setup_one_mpic(slave, 0);
- if (mpic2 == NULL) {
- printk(KERN_ERR "Failed to setup slave MPIC\n");
+ /* Set up a cascaded controller, if present */
+ if (slave) {
+ mpic2 = pmac_setup_one_mpic(slave, 0);
+ if (mpic2 == NULL)
+ printk(KERN_ERR "Failed to setup slave MPIC\n");
of_node_put(slave);
- return 0;
}
- irq_set_handler_data(cascade, mpic2);
- irq_set_chained_handler(cascade, pmac_u3_cascade);
- of_node_put(slave);
return 0;
}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 96580b189ec..970ea1de429 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -494,11 +494,15 @@ static int __init pmac_declare_of_platform_devices(void)
return -1;
np = of_find_node_by_name(NULL, "valkyrie");
- if (np)
+ if (np) {
of_platform_device_create(np, "valkyrie", NULL);
+ of_node_put(np);
+ }
np = of_find_node_by_name(NULL, "platinum");
- if (np)
+ if (np) {
of_platform_device_create(np, "platinum", NULL);
+ of_node_put(np);
+ }
np = of_find_node_by_type(NULL, "smu");
if (np) {
of_platform_device_create(np, "smu", NULL);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 9b6a820bdd7..44d769258eb 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(void)
if (psurge_secondary_virq)
rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
- IRQF_PERCPU, "IPI", NULL);
+ IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL);
if (rc)
pr_err("Failed to setup secondary cpu IPI\n");
@@ -408,13 +408,13 @@ static int __init smp_psurge_kick_cpu(int nr)
static struct irqaction psurge_irqaction = {
.handler = psurge_ipi_intr,
- .flags = IRQF_PERCPU,
+ .flags = IRQF_PERCPU | IRQF_NO_THREAD,
.name = "primary IPI",
};
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
- if (cpu_nr != 0)
+ if (cpu_nr != 0 || !psurge_start)
return;
/* reset the entry point so if we get another intr we won't
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 31853008b41..bcc3cb48a44 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -2,4 +2,4 @@ obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
obj-y += opal-rtc.o opal-nvram.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o
+obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 4a3f46d8533..3bb07e5e43c 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -99,3 +99,11 @@ OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL);
OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW);
OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL);
OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET);
+OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA);
+OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA);
+OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB);
+OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT);
+OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
+OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
+OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
new file mode 100644
index 00000000000..f31162cfdaa
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -0,0 +1,1330 @@
+/*
+ * Support PCI/PCIe on PowerNV platforms
+ *
+ * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/opal.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+struct resource_wrap {
+ struct list_head link;
+ resource_size_t size;
+ resource_size_t align;
+ struct pci_dev *dev; /* Set if it's a device */
+ struct pci_bus *bus; /* Set if it's a bridge */
+};
+
+static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
+ struct va_format *vaf)
+{
+ char pfix[32];
+
+ if (pe->pdev)
+ strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
+ else
+ sprintf(pfix, "%04x:%02x ",
+ pci_domain_nr(pe->pbus), pe->pbus->number);
+ return printk("pci %s%s: [PE# %.3d] %pV", level, pfix, pe->pe_number, vaf);
+}
+
+#define define_pe_printk_level(func, kern_level) \
+static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ int r; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ r = __pe_printk(kern_level, pe, &vaf); \
+ va_end(args); \
+ \
+ return r; \
+} \
+
+define_pe_printk_level(pe_err, KERN_ERR);
+define_pe_printk_level(pe_warn, KERN_WARNING);
+define_pe_printk_level(pe_info, KERN_INFO);
+
+
+/* Calculate resource usage & alignment requirement of a single
+ * device. This will also assign all resources within the device
+ * for a given type starting at 0 for the biggest one and then
+ * assigning in decreasing order of size.
+ */
+static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
+ resource_size_t *size,
+ resource_size_t *align)
+{
+ resource_size_t start;
+ struct resource *r;
+ int i;
+
+ pr_devel(" -> CDR %s\n", pci_name(dev));
+
+ *size = *align = 0;
+
+ /* Clear the resources out and mark them all unset */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & flags))
+ continue;
+ if (r->start) {
+ r->end -= r->start;
+ r->start = 0;
+ }
+ r->flags |= IORESOURCE_UNSET;
+ }
+
+ /* We currently keep all memory resources together, we
+ * will handle prefetch & 64-bit separately in the future
+ * but for now we stick everybody in M32
+ */
+ start = 0;
+ for (;;) {
+ resource_size_t max_size = 0;
+ int max_no = -1;
+
+ /* Find next biggest resource */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_UNSET) ||
+ !(r->flags & flags))
+ continue;
+ if (resource_size(r) > max_size) {
+ max_size = resource_size(r);
+ max_no = i;
+ }
+ }
+ if (max_no < 0)
+ break;
+ r = &dev->resource[max_no];
+ if (max_size > *align)
+ *align = max_size;
+ *size += max_size;
+ r->start = start;
+ start += max_size;
+ r->end = r->start + max_size - 1;
+ r->flags &= ~IORESOURCE_UNSET;
+ pr_devel(" -> R%d %016llx..%016llx\n",
+ max_no, r->start, r->end);
+ }
+ pr_devel(" <- CDR %s size=%llx align=%llx\n",
+ pci_name(dev), *size, *align);
+}
+
+/* Allocate a resource "wrap" for a given device or bridge and
+ * insert it at the right position in the sorted list
+ */
+static void __devinit pnv_ioda_add_wrap(struct list_head *list,
+ struct pci_bus *bus,
+ struct pci_dev *dev,
+ resource_size_t size,
+ resource_size_t align)
+{
+ struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
+
+ w->size = size;
+ w->align = align;
+ w->dev = dev;
+ w->bus = bus;
+
+ list_for_each_entry(w1, list, link) {
+ if (w1->align < align) {
+ list_add_tail(&w->link, &w1->link);
+ return;
+ }
+ }
+ list_add_tail(&w->link, list);
+}
+
+/* Offset device resources of a given type */
+static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
+ unsigned int flags,
+ resource_size_t offset)
+{
+ struct resource *r;
+ int i;
+
+ pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
+
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ r = &dev->resource[i];
+ if (r->flags & flags) {
+ dev->resource[i].start += offset;
+ dev->resource[i].end += offset;
+ }
+ }
+
+ pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
+}
+
+/* Offset bus resources (& all children) of a given type */
+static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
+ unsigned int flags,
+ resource_size_t offset)
+{
+ struct resource *r;
+ struct pci_dev *dev;
+ struct pci_bus *cbus;
+ int i;
+
+ pr_devel(" -> OBR %s [%x] +%016llx\n",
+ bus->self ? pci_name(bus->self) : "root", flags, offset);
+
+ for (i = 0; i < 2; i++) {
+ r = bus->resource[i];
+ if (r && (r->flags & flags)) {
+ bus->resource[i]->start += offset;
+ bus->resource[i]->end += offset;
+ }
+ }
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ pnv_ioda_offset_dev(dev, flags, offset);
+ list_for_each_entry(cbus, &bus->children, node)
+ pnv_ioda_offset_bus(cbus, flags, offset);
+
+ pr_devel(" <- OBR %s [%x]\n",
+ bus->self ? pci_name(bus->self) : "root", flags);
+}
+
+/* This is the guts of our IODA resource allocation. This is called
+ * recursively for each bus in the system. It calculates all the
+ * necessary size and requirements for children and assign them
+ * resources such that:
+ *
+ * - Each function fits in it's own contiguous set of IO/M32
+ * segment
+ *
+ * - All segments behind a P2P bridge are contiguous and obey
+ * alignment constraints of those bridges
+ */
+static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
+ resource_size_t *size,
+ resource_size_t *align)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ resource_size_t dev_size, dev_align, start;
+ resource_size_t min_align, min_balign;
+ struct pci_dev *cdev;
+ struct pci_bus *cbus;
+ struct list_head head;
+ struct resource_wrap *w;
+ unsigned int bres;
+
+ *size = *align = 0;
+
+ pr_devel("-> CBR %s [%x]\n",
+ bus->self ? pci_name(bus->self) : "root", flags);
+
+ /* Calculate alignment requirements based on the type
+ * of resource we are working on
+ */
+ if (flags & IORESOURCE_IO) {
+ bres = 0;
+ min_align = phb->ioda.io_segsize;
+ min_balign = 0x1000;
+ } else {
+ bres = 1;
+ min_align = phb->ioda.m32_segsize;
+ min_balign = 0x100000;
+ }
+
+ /* Gather all our children resources ordered by alignment */
+ INIT_LIST_HEAD(&head);
+
+ /* - Busses */
+ list_for_each_entry(cbus, &bus->children, node) {
+ pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
+ pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
+ }
+
+ /* - Devices */
+ list_for_each_entry(cdev, &bus->devices, bus_list) {
+ pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
+ /* Align them to segment size */
+ if (dev_align < min_align)
+ dev_align = min_align;
+ pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
+ }
+ if (list_empty(&head))
+ goto empty;
+
+ /* Now we can do two things: assign offsets to them within that
+ * level and get our total alignment & size requirements. The
+ * assignment algorithm is going to be uber-trivial for now, we
+ * can try to be smarter later at filling out holes.
+ */
+ start = bus->self ? 0 : bus->resource[bres]->start;
+
+ /* Don't hand out IO 0 */
+ if ((flags & IORESOURCE_IO) && !bus->self)
+ start += 0x1000;
+
+ while(!list_empty(&head)) {
+ w = list_first_entry(&head, struct resource_wrap, link);
+ list_del(&w->link);
+ if (w->size) {
+ if (start) {
+ start = ALIGN(start, w->align);
+ if (w->dev)
+ pnv_ioda_offset_dev(w->dev,flags,start);
+ else if (w->bus)
+ pnv_ioda_offset_bus(w->bus,flags,start);
+ }
+ if (w->align > *align)
+ *align = w->align;
+ }
+ start += w->size;
+ kfree(w);
+ }
+ *size = start;
+
+ /* Align and setup bridge resources */
+ *align = max_t(resource_size_t, *align,
+ max_t(resource_size_t, min_align, min_balign));
+ *size = ALIGN(*size,
+ max_t(resource_size_t, min_align, min_balign));
+ empty:
+ /* Only setup P2P's, not the PHB itself */
+ if (bus->self) {
+ WARN_ON(bus->resource[bres] == NULL);
+ bus->resource[bres]->start = 0;
+ bus->resource[bres]->flags = (*size) ? flags : 0;
+ bus->resource[bres]->end = (*size) ? (*size - 1) : 0;
+
+ /* Clear prefetch bus resources for now */
+ bus->resource[2]->flags = 0;
+ }
+
+ pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
+ bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
+}
+
+static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
+{
+ struct device_node *np;
+
+ np = pci_device_to_OF_node(dev);
+ if (!np)
+ return NULL;
+ return PCI_DN(np);
+}
+
+static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ unsigned int pe, i;
+ resource_size_t pos;
+ struct resource io_res;
+ struct resource m32_res;
+ struct pci_bus_region region;
+ int rc;
+
+ /* Anything not referenced in the device-tree gets PE#0 */
+ pe = pdn ? pdn->pe_number : 0;
+
+ /* Calculate the device min/max */
+ io_res.start = m32_res.start = (resource_size_t)-1;
+ io_res.end = m32_res.end = 0;
+ io_res.flags = IORESOURCE_IO;
+ m32_res.flags = IORESOURCE_MEM;
+
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ struct resource *r = NULL;
+ if (dev->resource[i].flags & IORESOURCE_IO)
+ r = &io_res;
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ r = &m32_res;
+ if (!r)
+ continue;
+ if (dev->resource[i].start < r->start)
+ r->start = dev->resource[i].start;
+ if (dev->resource[i].end > r->end)
+ r->end = dev->resource[i].end;
+ }
+
+ /* Setup IO segments */
+ if (io_res.start < io_res.end) {
+ pcibios_resource_to_bus(dev, &region, &io_res);
+ pos = region.start;
+ i = pos / phb->ioda.io_segsize;
+ while(i < phb->ioda.total_pe && pos <= region.end) {
+ if (phb->ioda.io_segmap[i]) {
+ pr_err("%s: Trying to use IO seg #%d which is"
+ " already used by PE# %d\n",
+ pci_name(dev), i,
+ phb->ioda.io_segmap[i]);
+ /* XXX DO SOMETHING TO DISABLE DEVICE ? */
+ break;
+ }
+ phb->ioda.io_segmap[i] = pe;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
+ OPAL_IO_WINDOW_TYPE,
+ 0, i);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("%s: OPAL error %d setting up mapping"
+ " for IO seg# %d\n",
+ pci_name(dev), rc, i);
+ /* XXX DO SOMETHING TO DISABLE DEVICE ? */
+ break;
+ }
+ pos += phb->ioda.io_segsize;
+ i++;
+ };
+ }
+
+ /* Setup M32 segments */
+ if (m32_res.start < m32_res.end) {
+ pcibios_resource_to_bus(dev, &region, &m32_res);
+ pos = region.start;
+ i = pos / phb->ioda.m32_segsize;
+ while(i < phb->ioda.total_pe && pos <= region.end) {
+ if (phb->ioda.m32_segmap[i]) {
+ pr_err("%s: Trying to use M32 seg #%d which is"
+ " already used by PE# %d\n",
+ pci_name(dev), i,
+ phb->ioda.m32_segmap[i]);
+ /* XXX DO SOMETHING TO DISABLE DEVICE ? */
+ break;
+ }
+ phb->ioda.m32_segmap[i] = pe;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
+ OPAL_M32_WINDOW_TYPE,
+ 0, i);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("%s: OPAL error %d setting up mapping"
+ " for M32 seg# %d\n",
+ pci_name(dev), rc, i);
+ /* XXX DO SOMETHING TO DISABLE DEVICE ? */
+ break;
+ }
+ pos += phb->ioda.m32_segsize;
+ i++;
+ }
+ }
+}
+
+/* Check if a resource still fits in the total IO or M32 range
+ * for a given PHB
+ */
+static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
+ struct resource *r)
+{
+ struct resource *bounds;
+
+ if (r->flags & IORESOURCE_IO)
+ bounds = &hose->io_resource;
+ else if (r->flags & IORESOURCE_MEM)
+ bounds = &hose->mem_resources[0];
+ else
+ return 1;
+
+ if (r->start >= bounds->start && r->end <= bounds->end)
+ return 1;
+ r->flags = 0;
+ return 0;
+}
+
+static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pci_bus *cbus;
+ struct pci_dev *cdev;
+ unsigned int i;
+
+ /* We used to clear all device enables here. However it looks like
+ * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
+ * and shoot fatal errors to the PHB which in turns fences itself
+ * and we can't recover from that ... yet. So for now, let's leave
+ * the enables as-is and hope for the best.
+ */
+
+ /* Check if bus resources fit in our IO or M32 range */
+ for (i = 0; bus->self && (i < 2); i++) {
+ struct resource *r = bus->resource[i];
+ if (r && !pnv_ioda_resource_fit(hose, r))
+ pr_err("%s: Bus %d resource %d disabled, no room\n",
+ pci_name(bus->self), bus->number, i);
+ }
+
+ /* Update self if it's not a PHB */
+ if (bus->self)
+ pci_setup_bridge(bus);
+
+ /* Update child devices */
+ list_for_each_entry(cdev, &bus->devices, bus_list) {
+ /* Check if resource fits, if not, disabled it */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ struct resource *r = &cdev->resource[i];
+ if (!pnv_ioda_resource_fit(hose, r))
+ pr_err("%s: Resource %d disabled, no room\n",
+ pci_name(cdev), i);
+ }
+
+ /* Assign segments */
+ pnv_ioda_setup_pe_segments(cdev);
+
+ /* Update HW BARs */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ pci_update_resource(cdev, i);
+ }
+
+ /* Update child busses */
+ list_for_each_entry(cbus, &bus->children, node)
+ pnv_ioda_update_resources(cbus);
+}
+
+static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
+{
+ unsigned long pe;
+
+ do {
+ pe = find_next_zero_bit(phb->ioda.pe_alloc,
+ phb->ioda.total_pe, 0);
+ if (pe >= phb->ioda.total_pe)
+ return IODA_INVALID_PE;
+ } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+
+ phb->ioda.pe_array[pe].pe_number = pe;
+ return pe;
+}
+
+static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
+{
+ WARN_ON(phb->ioda.pe_array[pe].pdev);
+
+ memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
+ clear_bit(pe, phb->ioda.pe_alloc);
+}
+
+/* Currently those 2 are only used when MSIs are enabled, this will change
+ * but in the meantime, we need to protect them to avoid warnings
+ */
+#ifdef CONFIG_PCI_MSI
+static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+ if (!pdn)
+ return NULL;
+ if (pdn->pe_number == IODA_INVALID_PE)
+ return NULL;
+ return &phb->ioda.pe_array[pdn->pe_number];
+}
+
+static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
+{
+ struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
+
+ while (!pe && dev->bus->self) {
+ dev = dev->bus->self;
+ pe = __pnv_ioda_get_one_pe(dev);
+ if (pe)
+ pe = pe->bus_pe;
+ }
+ return pe;
+}
+#endif /* CONFIG_PCI_MSI */
+
+static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ struct pci_dev *parent;
+ uint8_t bcomp, dcomp, fcomp;
+ long rc, rid_end, rid;
+
+ /* Bus validation ? */
+ if (pe->pbus) {
+ int count;
+
+ dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
+ parent = pe->pbus->self;
+ count = pe->pbus->subordinate - pe->pbus->secondary + 1;
+ switch(count) {
+ case 1: bcomp = OpalPciBusAll; break;
+ case 2: bcomp = OpalPciBus7Bits; break;
+ case 4: bcomp = OpalPciBus6Bits; break;
+ case 8: bcomp = OpalPciBus5Bits; break;
+ case 16: bcomp = OpalPciBus4Bits; break;
+ case 32: bcomp = OpalPciBus3Bits; break;
+ default:
+ pr_err("%s: Number of subordinate busses %d"
+ " unsupported\n",
+ pci_name(pe->pbus->self), count);
+ /* Do an exact match only */
+ bcomp = OpalPciBusAll;
+ }
+ rid_end = pe->rid + (count << 8);
+ } else {
+ parent = pe->pdev->bus->self;
+ bcomp = OpalPciBusAll;
+ dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
+ rid_end = pe->rid + 1;
+ }
+
+ /* Associate PE in PELT */
+ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+ bcomp, dcomp, fcomp, OPAL_MAP_PE);
+ if (rc) {
+ pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+ return -ENXIO;
+ }
+ opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+ /* Add to all parents PELT-V */
+ while (parent) {
+ struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+ if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+ rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+ pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
+ /* XXX What to do in case of error ? */
+ }
+ parent = parent->bus->self;
+ }
+ /* Setup reverse map */
+ for (rid = pe->rid; rid < rid_end; rid++)
+ phb->ioda.pe_rmap[rid] = pe->pe_number;
+
+ /* Setup one MVTs on IODA1 */
+ if (phb->type == PNV_PHB_IODA1) {
+ pe->mve_number = pe->pe_number;
+ rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
+ pe->pe_number);
+ if (rc) {
+ pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+ rc, pe->mve_number);
+ pe->mve_number = -1;
+ } else {
+ rc = opal_pci_set_mve_enable(phb->opal_id,
+ pe->mve_number, OPAL_ENABLE_MVE);
+ if (rc) {
+ pe_err(pe, "OPAL error %ld enabling MVE %d\n",
+ rc, pe->mve_number);
+ pe->mve_number = -1;
+ }
+ }
+ } else if (phb->type == PNV_PHB_IODA2)
+ pe->mve_number = 0;
+
+ return 0;
+}
+
+static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ struct pnv_ioda_pe *lpe;
+
+ list_for_each_entry(lpe, &phb->ioda.pe_list, link) {
+ if (lpe->dma_weight < pe->dma_weight) {
+ list_add_tail(&pe->link, &lpe->link);
+ return;
+ }
+ }
+ list_add_tail(&pe->link, &phb->ioda.pe_list);
+}
+
+static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
+{
+ /* This is quite simplistic. The "base" weight of a device
+ * is 10. 0 means no DMA is to be accounted for it.
+ */
+
+ /* If it's a bridge, no DMA */
+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return 0;
+
+ /* Reduce the weight of slow USB controllers */
+ if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
+ dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
+ dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ return 3;
+
+ /* Increase the weight of RAID (includes Obsidian) */
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
+ return 15;
+
+ /* Default */
+ return 10;
+}
+
+static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+
+ if (!pdn) {
+ pr_err("%s: Device tree node not associated properly\n",
+ pci_name(dev));
+ return NULL;
+ }
+ if (pdn->pe_number != IODA_INVALID_PE)
+ return NULL;
+
+ /* PE#0 has been pre-set */
+ if (dev->bus->number == 0)
+ pe_num = 0;
+ else
+ pe_num = pnv_ioda_alloc_pe(phb);
+ if (pe_num == IODA_INVALID_PE) {
+ pr_warning("%s: Not enough PE# available, disabling device\n",
+ pci_name(dev));
+ return NULL;
+ }
+
+ /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
+ * pointer in the PE data structure, both should be destroyed at the
+ * same time. However, this needs to be looked at more closely again
+ * once we actually start removing things (Hotplug, SR-IOV, ...)
+ *
+ * At some point we want to remove the PDN completely anyways
+ */
+ pe = &phb->ioda.pe_array[pe_num];
+ pci_dev_get(dev);
+ pdn->pcidev = dev;
+ pdn->pe_number = pe_num;
+ pe->pdev = dev;
+ pe->pbus = NULL;
+ pe->tce32_seg = -1;
+ pe->mve_number = -1;
+ pe->rid = dev->bus->number << 8 | pdn->devfn;
+
+ pe_info(pe, "Associated device to PE\n");
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ if (pe_num)
+ pnv_ioda_free_pe(phb, pe_num);
+ pdn->pe_number = IODA_INVALID_PE;
+ pe->pdev = NULL;
+ pci_dev_put(dev);
+ return NULL;
+ }
+
+ /* Assign a DMA weight to the device */
+ pe->dma_weight = pnv_ioda_dma_weight(dev);
+ if (pe->dma_weight != 0) {
+ phb->ioda.dma_weight += pe->dma_weight;
+ phb->ioda.dma_pe_count++;
+ }
+
+ /* Link the PE */
+ pnv_ioda_link_pe_by_weight(phb, pe);
+
+ return pe;
+}
+
+static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+ if (pdn == NULL) {
+ pr_warn("%s: No device node associated with device !\n",
+ pci_name(dev));
+ continue;
+ }
+ pci_dev_get(dev);
+ pdn->pcidev = dev;
+ pdn->pe_number = pe->pe_number;
+ pe->dma_weight += pnv_ioda_dma_weight(dev);
+ if (dev->subordinate)
+ pnv_ioda_setup_same_PE(dev->subordinate, pe);
+ }
+}
+
+static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
+ struct pnv_ioda_pe *ppe)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_bus *bus = dev->subordinate;
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+
+ if (!bus) {
+ pr_warning("%s: Bridge without a subordinate bus !\n",
+ pci_name(dev));
+ return;
+ }
+ pe_num = pnv_ioda_alloc_pe(phb);
+ if (pe_num == IODA_INVALID_PE) {
+ pr_warning("%s: Not enough PE# available, disabling bus\n",
+ pci_name(dev));
+ return;
+ }
+
+ pe = &phb->ioda.pe_array[pe_num];
+ ppe->bus_pe = pe;
+ pe->pbus = bus;
+ pe->pdev = NULL;
+ pe->tce32_seg = -1;
+ pe->mve_number = -1;
+ pe->rid = bus->secondary << 8;
+ pe->dma_weight = 0;
+
+ pe_info(pe, "Secondary busses %d..%d associated with PE\n",
+ bus->secondary, bus->subordinate);
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ if (pe_num)
+ pnv_ioda_free_pe(phb, pe_num);
+ pe->pbus = NULL;
+ return;
+ }
+
+ /* Associate it with all child devices */
+ pnv_ioda_setup_same_PE(bus, pe);
+
+ /* Account for one DMA PE if at least one DMA capable device exist
+ * below the bridge
+ */
+ if (pe->dma_weight != 0) {
+ phb->ioda.dma_weight += pe->dma_weight;
+ phb->ioda.dma_pe_count++;
+ }
+
+ /* Link the PE */
+ pnv_ioda_link_pe_by_weight(phb, pe);
+}
+
+static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ struct pnv_ioda_pe *pe;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pe = pnv_ioda_setup_dev_PE(dev);
+ if (pe == NULL)
+ continue;
+ /* Leaving the PCIe domain ... single PE# */
+ if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ pnv_ioda_setup_bus_PE(dev, pe);
+ else if (dev->subordinate)
+ pnv_ioda_setup_PEs(dev->subordinate);
+ }
+}
+
+static void __devinit pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb,
+ struct pci_dev *dev)
+{
+ /* We delay DMA setup after we have assigned all PE# */
+}
+
+static void __devinit pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
+ struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ if (dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ }
+}
+
+static void __devinit pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe,
+ unsigned int base,
+ unsigned int segs)
+{
+
+ struct page *tce_mem = NULL;
+ const __be64 *swinvp;
+ struct iommu_table *tbl;
+ unsigned int i;
+ int64_t rc;
+ void *addr;
+
+ /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
+#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
+
+ /* XXX FIXME: Handle 64-bit only DMA devices */
+ /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
+ /* XXX FIXME: Allocate multi-level tables on PHB3 */
+
+ /* We shouldn't already have a 32-bit DMA associated */
+ if (WARN_ON(pe->tce32_seg >= 0))
+ return;
+
+ /* Grab a 32-bit TCE table */
+ pe->tce32_seg = base;
+ pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
+ (base << 28), ((base + segs) << 28) - 1);
+
+ /* XXX Currently, we allocate one big contiguous table for the
+ * TCEs. We only really need one chunk per 256M of TCE space
+ * (ie per segment) but that's an optimization for later, it
+ * requires some added smarts with our get/put_tce implementation
+ */
+ tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
+ get_order(TCE32_TABLE_SIZE * segs));
+ if (!tce_mem) {
+ pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
+ goto fail;
+ }
+ addr = page_address(tce_mem);
+ memset(addr, 0, TCE32_TABLE_SIZE * segs);
+
+ /* Configure HW */
+ for (i = 0; i < segs; i++) {
+ rc = opal_pci_map_pe_dma_window(phb->opal_id,
+ pe->pe_number,
+ base + i, 1,
+ __pa(addr) + TCE32_TABLE_SIZE * i,
+ TCE32_TABLE_SIZE, 0x1000);
+ if (rc) {
+ pe_err(pe, " Failed to configure 32-bit TCE table,"
+ " err %ld\n", rc);
+ goto fail;
+ }
+ }
+
+ /* Setup linux iommu table */
+ tbl = &pe->tce32_table;
+ pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
+ base << 28);
+
+ /* OPAL variant of P7IOC SW invalidated TCEs */
+ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
+ if (swinvp) {
+ /* We need a couple more fields -- an address and a data
+ * to or. Since the bus is only printed out on table free
+ * errors, and on the first pass the data will be a relative
+ * bus number, print that out instead.
+ */
+ tbl->it_busno = 0;
+ tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE
+ | TCE_PCI_SWINV_PAIR;
+ }
+ iommu_init_table(tbl, phb->hose->node);
+
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
+ return;
+ fail:
+ /* XXX Failure: Try to fallback to 64-bit only ? */
+ if (pe->tce32_seg >= 0)
+ pe->tce32_seg = -1;
+ if (tce_mem)
+ __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
+}
+
+static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
+{
+ struct pci_controller *hose = phb->hose;
+ unsigned int residual, remaining, segs, tw, base;
+ struct pnv_ioda_pe *pe;
+
+ /* If we have more PE# than segments available, hand out one
+ * per PE until we run out and let the rest fail. If not,
+ * then we assign at least one segment per PE, plus more based
+ * on the amount of devices under that PE
+ */
+ if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
+ residual = 0;
+ else
+ residual = phb->ioda.tce32_count -
+ phb->ioda.dma_pe_count;
+
+ pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
+ hose->global_number, phb->ioda.tce32_count);
+ pr_info("PCI: %d PE# for a total weight of %d\n",
+ phb->ioda.dma_pe_count, phb->ioda.dma_weight);
+
+ /* Walk our PE list and configure their DMA segments, hand them
+ * out one base segment plus any residual segments based on
+ * weight
+ */
+ remaining = phb->ioda.tce32_count;
+ tw = phb->ioda.dma_weight;
+ base = 0;
+ list_for_each_entry(pe, &phb->ioda.pe_list, link) {
+ if (!pe->dma_weight)
+ continue;
+ if (!remaining) {
+ pe_warn(pe, "No DMA32 resources available\n");
+ continue;
+ }
+ segs = 1;
+ if (residual) {
+ segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
+ if (segs > remaining)
+ segs = remaining;
+ }
+ pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
+ pe->dma_weight, segs);
+ pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
+ remaining -= segs;
+ base += segs;
+ }
+}
+
+#ifdef CONFIG_PCI_MSI
+static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg)
+{
+ struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+ unsigned int xive_num = hwirq - phb->msi_base;
+ uint64_t addr64;
+ uint32_t addr32, data;
+ int rc;
+
+ /* No PE assigned ? bail out ... no MSI for you ! */
+ if (pe == NULL)
+ return -ENXIO;
+
+ /* Check if we have an MVE */
+ if (pe->mve_number < 0)
+ return -ENXIO;
+
+ /* Assign XIVE to PE */
+ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
+ if (rc) {
+ pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
+ pci_name(dev), rc, xive_num);
+ return -EIO;
+ }
+
+ if (is_64) {
+ rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
+ &addr64, &data);
+ if (rc) {
+ pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
+ pci_name(dev), rc);
+ return -EIO;
+ }
+ msg->address_hi = addr64 >> 32;
+ msg->address_lo = addr64 & 0xfffffffful;
+ } else {
+ rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
+ &addr32, &data);
+ if (rc) {
+ pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
+ pci_name(dev), rc);
+ return -EIO;
+ }
+ msg->address_hi = 0;
+ msg->address_lo = addr32;
+ }
+ msg->data = data;
+
+ pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
+ " address=%x_%08x data=%x PE# %d\n",
+ pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
+ msg->address_hi, msg->address_lo, data, pe->pe_number);
+
+ return 0;
+}
+
+static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
+{
+ unsigned int bmap_size;
+ const __be32 *prop = of_get_property(phb->hose->dn,
+ "ibm,opal-msi-ranges", NULL);
+ if (!prop) {
+ /* BML Fallback */
+ prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
+ }
+ if (!prop)
+ return;
+
+ phb->msi_base = be32_to_cpup(prop);
+ phb->msi_count = be32_to_cpup(prop + 1);
+ bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
+ phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
+ if (!phb->msi_map) {
+ pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
+ phb->hose->global_number);
+ return;
+ }
+ phb->msi_setup = pnv_pci_ioda_msi_setup;
+ phb->msi32_support = 1;
+ pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
+ phb->msi_count, phb->msi_base);
+}
+#else
+static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
+#endif /* CONFIG_PCI_MSI */
+
+/* This is the starting point of our IODA specific resource
+ * allocation process
+ */
+static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose)
+{
+ resource_size_t size, align;
+ struct pci_bus *child;
+
+ /* Associate PEs per functions */
+ pnv_ioda_setup_PEs(hose->bus);
+
+ /* Calculate all resources */
+ pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align);
+ pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align);
+
+ /* Apply then to HW */
+ pnv_ioda_update_resources(hose->bus);
+
+ /* Setup DMA */
+ pnv_ioda_setup_dma(hose->private_data);
+
+ /* Configure PCI Express settings */
+ list_for_each_entry(child, &hose->bus->children, node) {
+ struct pci_dev *self = child->self;
+ if (!self)
+ continue;
+ pcie_bus_configure_settings(child, self->pcie_mpss);
+ }
+}
+
+/* Prevent enabling devices for which we couldn't properly
+ * assign a PE
+ */
+static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
+{
+ struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ return -EINVAL;
+ return 0;
+}
+
+static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
+ u32 devfn)
+{
+ return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
+}
+
+void __init pnv_pci_init_ioda1_phb(struct device_node *np)
+{
+ struct pci_controller *hose;
+ static int primary = 1;
+ struct pnv_phb *phb;
+ unsigned long size, m32map_off, iomap_off, pemap_off;
+ const u64 *prop64;
+ u64 phb_id;
+ void *aux;
+ long rc;
+
+ pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name);
+
+ prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
+ if (!prop64) {
+ pr_err(" Missing \"ibm,opal-phbid\" property !\n");
+ return;
+ }
+ phb_id = be64_to_cpup(prop64);
+ pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
+
+ phb = alloc_bootmem(sizeof(struct pnv_phb));
+ if (phb) {
+ memset(phb, 0, sizeof(struct pnv_phb));
+ phb->hose = hose = pcibios_alloc_controller(np);
+ }
+ if (!phb || !phb->hose) {
+ pr_err("PCI: Failed to allocate PCI controller for %s\n",
+ np->full_name);
+ return;
+ }
+
+ spin_lock_init(&phb->lock);
+ /* XXX Use device-tree */
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+ hose->private_data = phb;
+ phb->opal_id = phb_id;
+ phb->type = PNV_PHB_IODA1;
+
+ /* Detect specific models for error handling */
+ if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
+ phb->model = PNV_PHB_MODEL_P7IOC;
+ else
+ phb->model = PNV_PHB_MODEL_UNKNOWN;
+
+ /* We parse "ranges" now since we need to deduce the register base
+ * from the IO base
+ */
+ pci_process_bridge_OF_ranges(phb->hose, np, primary);
+ primary = 0;
+
+ /* Magic formula from Milton */
+ phb->regs = of_iomap(np, 0);
+ if (phb->regs == NULL)
+ pr_err(" Failed to map registers !\n");
+
+
+ /* XXX This is hack-a-thon. This needs to be changed so that:
+ * - we obtain stuff like PE# etc... from device-tree
+ * - we properly re-allocate M32 ourselves
+ * (the OFW one isn't very good)
+ */
+
+ /* Initialize more IODA stuff */
+ phb->ioda.total_pe = 128;
+
+ phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
+ /* OFW Has already off top 64k of M32 space (MSI space) */
+ phb->ioda.m32_size += 0x10000;
+
+ phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
+ phb->ioda.m32_pci_base = hose->mem_resources[0].start -
+ hose->pci_mem_offset;
+ phb->ioda.io_size = hose->pci_io_size;
+ phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
+ phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
+
+ /* Allocate aux data & arrays */
+ size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
+ m32map_off = size;
+ size += phb->ioda.total_pe;
+ iomap_off = size;
+ size += phb->ioda.total_pe;
+ pemap_off = size;
+ size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
+ aux = alloc_bootmem(size);
+ memset(aux, 0, size);
+ phb->ioda.pe_alloc = aux;
+ phb->ioda.m32_segmap = aux + m32map_off;
+ phb->ioda.io_segmap = aux + iomap_off;
+ phb->ioda.pe_array = aux + pemap_off;
+ set_bit(0, phb->ioda.pe_alloc);
+
+ INIT_LIST_HEAD(&phb->ioda.pe_list);
+
+ /* Calculate how many 32-bit TCE segments we have */
+ phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
+
+ /* Clear unusable m64 */
+ hose->mem_resources[1].flags = 0;
+ hose->mem_resources[1].start = 0;
+ hose->mem_resources[1].end = 0;
+ hose->mem_resources[2].flags = 0;
+ hose->mem_resources[2].start = 0;
+ hose->mem_resources[2].end = 0;
+
+#if 0
+ rc = opal_pci_set_phb_mem_window(opal->phb_id,
+ window_type,
+ window_num,
+ starting_real_address,
+ starting_pci_address,
+ segment_size);
+#endif
+
+ pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
+ phb->ioda.total_pe,
+ phb->ioda.m32_size, phb->ioda.m32_segsize,
+ phb->ioda.io_size, phb->ioda.io_segsize);
+
+ if (phb->regs) {
+ pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100));
+ pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160));
+ pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170));
+ pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178));
+ pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180));
+ pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190));
+ pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198));
+ pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0));
+ }
+ phb->hose->ops = &pnv_pci_ops;
+
+ /* Setup RID -> PE mapping function */
+ phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
+
+ /* Setup TCEs */
+ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+
+ /* Setup MSI support */
+ pnv_pci_init_ioda_msis(phb);
+
+ /* We set both probe_only and PCI_REASSIGN_ALL_RSRC. This is an
+ * odd combination which essentially means that we skip all resource
+ * fixups and assignments in the generic code, and do it all
+ * ourselves here
+ */
+ pci_probe_only = 1;
+ ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb;
+ ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+
+ /* Reset IODA tables to a clean state */
+ rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
+ if (rc)
+ pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
+ opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
+}
+
+void __init pnv_pci_init_ioda_hub(struct device_node *np)
+{
+ struct device_node *phbn;
+ const u64 *prop64;
+ u64 hub_id;
+
+ pr_info("Probing IODA IO-Hub %s\n", np->full_name);
+
+ prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
+ if (!prop64) {
+ pr_err(" Missing \"ibm,opal-hubid\" property !\n");
+ return;
+ }
+ hub_id = be64_to_cpup(prop64);
+ pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
+
+ /* Count child PHBs */
+ for_each_child_of_node(np, phbn) {
+ /* Look for IODA1 PHBs */
+ if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
+ pnv_pci_init_ioda1_phb(phbn);
+ }
+}
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 4c80f7c77d5..264967770c3 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -137,6 +137,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np,
phb->hose->private_data = phb;
phb->opal_id = phb_id;
phb->type = PNV_PHB_P5IOC2;
+ phb->model = PNV_PHB_MODEL_P5IOC2;
phb->regs = of_iomap(np, 0);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 85bb66d7f93..a70bc1e385e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -144,6 +144,112 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
}
#endif /* CONFIG_PCI_MSI */
+static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
+{
+ struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
+ int i;
+
+ pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
+
+ pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
+ pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
+ pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
+ pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
+ pr_info(" sourceId = 0x%08x\n", data->sourceId);
+
+ pr_info(" errorClass = 0x%016llx\n", data->errorClass);
+ pr_info(" correlator = 0x%016llx\n", data->correlator);
+
+ pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
+ pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
+ pr_info(" lemFir = 0x%016llx\n", data->lemFir);
+ pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
+
+ for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
+ if ((data->pestA[i] >> 63) == 0 &&
+ (data->pestB[i] >> 63) == 0)
+ continue;
+ pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
+ }
+}
+
+static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
+{
+ switch(phb->model) {
+ case PNV_PHB_MODEL_P7IOC:
+ pnv_pci_dump_p7ioc_diag_data(phb);
+ break;
+ default:
+ pr_warning("PCI %d: Can't decode this PHB diag data\n",
+ phb->hose->global_number);
+ }
+}
+
+static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
+{
+ unsigned long flags, rc;
+ int has_diag;
+
+ spin_lock_irqsave(&phb->lock, flags);
+
+ rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+ has_diag = (rc == OPAL_SUCCESS);
+
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ if (rc) {
+ pr_warning("PCI %d: Failed to clear EEH freeze state"
+ " for PE#%d, err %ld\n",
+ phb->hose->global_number, pe_no, rc);
+
+ /* For now, let's only display the diag buffer when we fail to clear
+ * the EEH status. We'll do more sensible things later when we have
+ * proper EEH support. We need to make sure we don't pollute ourselves
+ * with the normal errors generated when probing empty slots
+ */
+ if (has_diag)
+ pnv_pci_dump_phb_diag_data(phb);
+ else
+ pr_warning("PCI %d: No diag data available\n",
+ phb->hose->global_number);
+ }
+
+ spin_unlock_irqrestore(&phb->lock, flags);
+}
+
static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
u32 bdfn)
{
@@ -165,15 +271,8 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
}
cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
bdfn, pe_no, fstate);
- if (fstate != 0) {
- rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
- if (rc) {
- pr_warning("PCI %d: Failed to clear EEH freeze state"
- " for PE#%d, err %lld\n",
- phb->hose->global_number, pe_no, rc);
- }
- }
+ if (fstate != 0)
+ pnv_pci_handle_eeh_config(phb, pe_no);
}
static int pnv_pci_read_config(struct pci_bus *bus,
@@ -257,12 +356,54 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
+
+static void pnv_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ unsigned long start, end, inc;
+
+ start = __pa(startp);
+ end = __pa(endp);
+
+
+ /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
+ if (tbl->it_busno) {
+ start <<= 12;
+ end <<= 12;
+ inc = 128 << 12;
+ start |= tbl->it_busno;
+ end |= tbl->it_busno;
+ }
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
+ }
+ /* Default (older HW) */
+ else
+ inc = 128;
+
+ end |= inc - 1; /* round up end to be different than start */
+
+ mb(); /* Ensure above stores are visible */
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
+ }
+ /* The iommu layer will do another mb() for us on build() and
+ * we don't care on free()
+ */
+}
+
+
static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 proto_tce;
- u64 *tcep;
+ u64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -270,25 +411,33 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+ rpn = __pa(uaddr) >> TCE_SHIFT;
- while (npages--) {
- /* can't move this out since we might cross LMB boundary */
- rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
- *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+ while (npages--)
+ *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
+
+ /* Some implementations won't cache invalid TCEs and thus may not
+ * need that flush. We'll probably turn it_type into a bit mask
+ * of flags if that becomes the case
+ */
+ if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+ pnv_tce_invalidate(tbl, tces, tcep - 1);
- uaddr += TCE_PAGE_SIZE;
- tcep++;
- }
return 0;
}
static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
{
- u64 *tcep = ((u64 *)tbl->it_base) + index;
+ u64 *tcep, *tces;
+
+ tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
while (npages--)
*(tcep++) = 0;
+
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
+ pnv_tce_invalidate(tbl, tces, tcep - 1);
}
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
@@ -308,13 +457,14 @@ static struct iommu_table * __devinit
pnv_pci_setup_bml_iommu(struct pci_controller *hose)
{
struct iommu_table *tbl;
- const __be64 *basep;
+ const __be64 *basep, *swinvp;
const __be32 *sizep;
basep = of_get_property(hose->dn, "linux,tce-base", NULL);
sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL) {
- pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name);
+ pr_err("PCI: %s has missing tce entries !\n",
+ hose->dn->full_name);
return NULL;
}
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
@@ -323,6 +473,15 @@ pnv_pci_setup_bml_iommu(struct pci_controller *hose)
pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
be32_to_cpup(sizep), 0);
iommu_init_table(tbl, hose->node);
+
+ /* Deal with SW invalidated TCEs when needed (BML way) */
+ swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
+ NULL);
+ if (swinvp) {
+ tbl->it_busno = swinvp[1];
+ tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+ }
return tbl;
}
@@ -356,6 +515,13 @@ static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
+/* Fixup wrong class code in p7ioc root complex */
+static void __devinit pnv_p7ioc_rc_quirk(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
+
static int pnv_pci_probe_mode(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
@@ -400,12 +566,24 @@ void __init pnv_pci_init(void)
init_pci_config_tokens();
find_and_init_phbs();
#endif /* CONFIG_PPC_POWERNV_RTAS */
- } else {
- /* OPAL is here, do our normal stuff */
+ }
+ /* OPAL is here, do our normal stuff */
+ else {
+ int found_ioda = 0;
+
+ /* Look for IODA IO-Hubs. We don't support mixing IODA
+ * and p5ioc2 due to the need to change some global
+ * probing flags
+ */
+ for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+ pnv_pci_init_ioda_hub(np);
+ found_ioda = 1;
+ }
/* Look for p5ioc2 IO-Hubs */
- for_each_compatible_node(np, NULL, "ibm,p5ioc2")
- pnv_pci_init_p5ioc2_hub(np);
+ if (!found_ioda)
+ for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+ pnv_pci_init_p5ioc2_hub(np);
}
/* Setup the linkage between OF nodes and PHBs */
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index d4dbc495093..8bc47963464 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -9,9 +9,63 @@ enum pnv_phb_type {
PNV_PHB_IODA2,
};
+/* Precise PHB model for error management */
+enum pnv_phb_model {
+ PNV_PHB_MODEL_UNKNOWN,
+ PNV_PHB_MODEL_P5IOC2,
+ PNV_PHB_MODEL_P7IOC,
+};
+
+#define PNV_PCI_DIAG_BUF_SIZE 4096
+
+/* Data associated with a PE, including IOMMU tracking etc.. */
+struct pnv_ioda_pe {
+ /* A PE can be associated with a single device or an
+ * entire bus (& children). In the former case, pdev
+ * is populated, in the later case, pbus is.
+ */
+ struct pci_dev *pdev;
+ struct pci_bus *pbus;
+
+ /* Effective RID (device RID for a device PE and base bus
+ * RID with devfn 0 for a bus PE)
+ */
+ unsigned int rid;
+
+ /* PE number */
+ unsigned int pe_number;
+
+ /* "Weight" assigned to the PE for the sake of DMA resource
+ * allocations
+ */
+ unsigned int dma_weight;
+
+ /* This is a PCI-E -> PCI-X bridge, this points to the
+ * corresponding bus PE
+ */
+ struct pnv_ioda_pe *bus_pe;
+
+ /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
+ int tce32_seg;
+ int tce32_segcount;
+ struct iommu_table tce32_table;
+
+ /* XXX TODO: Add support for additional 64-bit iommus */
+
+ /* MSIs. MVE index is identical for for 32 and 64 bit MSI
+ * and -1 if not supported. (It's actually identical to the
+ * PE number)
+ */
+ int mve_number;
+
+ /* Link in list of PE#s */
+ struct list_head link;
+};
+
struct pnv_phb {
struct pci_controller *hose;
enum pnv_phb_type type;
+ enum pnv_phb_model model;
u64 opal_id;
void __iomem *regs;
spinlock_t lock;
@@ -34,7 +88,52 @@ struct pnv_phb {
struct {
struct iommu_table iommu_table;
} p5ioc2;
+
+ struct {
+ /* Global bridge info */
+ unsigned int total_pe;
+ unsigned int m32_size;
+ unsigned int m32_segsize;
+ unsigned int m32_pci_base;
+ unsigned int io_size;
+ unsigned int io_segsize;
+ unsigned int io_pci_base;
+
+ /* PE allocation bitmap */
+ unsigned long *pe_alloc;
+
+ /* M32 & IO segment maps */
+ unsigned int *m32_segmap;
+ unsigned int *io_segmap;
+ struct pnv_ioda_pe *pe_array;
+
+ /* Reverse map of PEs, will have to extend if
+ * we are to support more than 256 PEs, indexed
+ * bus { bus, devfn }
+ */
+ unsigned char pe_rmap[0x10000];
+
+ /* 32-bit TCE tables allocation */
+ unsigned long tce32_count;
+
+ /* Total "weight" for the sake of DMA resources
+ * allocation
+ */
+ unsigned int dma_weight;
+ unsigned int dma_pe_count;
+
+ /* Sorted list of used PE's, sorted at
+ * boot for resource allocation purposes
+ */
+ struct list_head pe_list;
+ } ioda;
};
+
+ /* PHB status structure */
+ union {
+ unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
+ struct OpalIoP7IOCPhbErrorData p7ioc;
+ } diag;
};
extern struct pci_ops pnv_pci_ops;
@@ -43,6 +142,7 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset);
extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
+extern void pnv_pci_init_ioda_hub(struct device_node *np);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index e8773668524..17210c526c5 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -75,7 +75,7 @@ int __devinit pnv_smp_kick_cpu(int nr)
/* On OPAL v2 the CPU are still spinning inside OPAL itself,
* get them back now
*/
- if (firmware_has_feature(FW_FEATURE_OPALv2)) {
+ if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
rc = opal_start_cpu(pcpu, start_here);
if (rc != OPAL_SUCCESS)
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 1d6f4f478fe..617efa12a3a 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -31,18 +31,18 @@
#if defined(DEBUG)
#define DBG udbg_printf
+#define FAIL udbg_printf
#else
-#define DBG pr_debug
+#define DBG pr_devel
+#define FAIL pr_debug
#endif
/**
* struct ps3_bmp - a per cpu irq status and mask bitmap structure
* @status: 256 bit status bitmap indexed by plug
- * @unused_1:
+ * @unused_1: Alignment
* @mask: 256 bit mask bitmap indexed by plug
- * @unused_2:
- * @lock:
- * @ipi_debug_brk_mask:
+ * @unused_2: Alignment
*
* The HV maintains per SMT thread mappings of HV outlet to HV plug on
* behalf of the guest. These mappings are implemented as 256 bit guest
@@ -73,21 +73,24 @@ struct ps3_bmp {
unsigned long mask;
u64 unused_2[3];
};
- u64 ipi_debug_brk_mask;
- spinlock_t lock;
};
/**
* struct ps3_private - a per cpu data structure
* @bmp: ps3_bmp structure
+ * @bmp_lock: Syncronize access to bmp.
+ * @ipi_debug_brk_mask: Mask for debug break IPIs
* @ppe_id: HV logical_ppe_id
* @thread_id: HV thread_id
+ * @ipi_mask: Mask of IPI virqs
*/
struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
+ spinlock_t bmp_lock;
u64 ppe_id;
u64 thread_id;
+ unsigned long ipi_debug_brk_mask;
unsigned long ipi_mask;
};
@@ -105,7 +108,7 @@ static void ps3_chip_mask(struct irq_data *d)
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
- pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
+ DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
@@ -126,7 +129,7 @@ static void ps3_chip_unmask(struct irq_data *d)
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
- pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
+ DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
@@ -190,19 +193,19 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
*virq = irq_create_mapping(NULL, outlet);
if (*virq == NO_IRQ) {
- pr_debug("%s:%d: irq_create_mapping failed: outlet %lu\n",
+ FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
goto fail_create;
}
- pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
+ DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
result = irq_set_chip_data(*virq, pd);
if (result) {
- pr_debug("%s:%d: irq_set_chip_data failed\n",
+ FAIL("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
@@ -228,13 +231,13 @@ static int ps3_virq_destroy(unsigned int virq)
{
const struct ps3_private *pd = irq_get_chip_data(virq);
- pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
+ DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
- pr_debug("%s:%d <-\n", __func__, __LINE__);
+ DBG("%s:%d <-\n", __func__, __LINE__);
return 0;
}
@@ -257,7 +260,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
result = ps3_virq_setup(cpu, outlet, virq);
if (result) {
- pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
+ FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
goto fail_setup;
}
@@ -269,7 +272,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
outlet, 0);
if (result) {
- pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
+ FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_connect;
@@ -298,7 +301,7 @@ int ps3_irq_plug_destroy(unsigned int virq)
int result;
const struct ps3_private *pd = irq_get_chip_data(virq);
- pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
+ DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
ps3_chip_mask(irq_get_irq_data(virq));
@@ -306,7 +309,7 @@ int ps3_irq_plug_destroy(unsigned int virq)
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
if (result)
- pr_info("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
+ FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
ps3_virq_destroy(virq);
@@ -334,7 +337,7 @@ int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
result = lv1_construct_event_receive_port(&outlet);
if (result) {
- pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
+ FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
*virq = NO_IRQ;
return result;
@@ -360,14 +363,14 @@ int ps3_event_receive_port_destroy(unsigned int virq)
{
int result;
- pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
+ DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
if (result)
- pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
+ FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
/*
@@ -375,7 +378,7 @@ int ps3_event_receive_port_destroy(unsigned int virq)
* calls from interrupt context (smp_call_function) when kexecing.
*/
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
@@ -411,7 +414,7 @@ int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
if (result) {
- pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
+ FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
@@ -419,7 +422,7 @@ int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
return result;
}
- pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+ DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, *virq);
return 0;
@@ -433,14 +436,14 @@ int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
int result;
- pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+ DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, virq);
result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
if (result)
- pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
+ FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
@@ -455,7 +458,7 @@ int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
result = ps3_virq_destroy(virq);
BUG_ON(result);
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
@@ -480,7 +483,7 @@ int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
if (result) {
- pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
+ FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
@@ -510,7 +513,7 @@ int ps3_io_irq_destroy(unsigned int virq)
result = lv1_destruct_io_irq_outlet(outlet);
if (result)
- pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
+ FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
@@ -542,7 +545,7 @@ int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
if (result) {
- pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+ FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
@@ -562,7 +565,7 @@ int ps3_vuart_irq_destroy(unsigned int virq)
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
- pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+ FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
@@ -595,7 +598,7 @@ int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
if (result) {
- pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
+ FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
@@ -626,7 +629,7 @@ int ps3_spe_irq_destroy(unsigned int virq)
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
const char* func, int line)
{
- pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
+ pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
func, line, header, cpu,
*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
*p & 0xffff);
@@ -635,7 +638,7 @@ static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
static void __maybe_unused _dump_256_bmp(const char *header,
const u64 *p, unsigned cpu, const char* func, int line)
{
- pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
+ pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
func, line, header, cpu, p[0], p[1], p[2], p[3]);
}
@@ -644,10 +647,10 @@ static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
{
unsigned long flags;
- spin_lock_irqsave(&pd->bmp.lock, flags);
+ spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
- _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
- spin_unlock_irqrestore(&pd->bmp.lock, flags);
+ _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
+ spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
@@ -656,9 +659,9 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
{
unsigned long flags;
- spin_lock_irqsave(&pd->bmp.lock, flags);
- _dump_64_bmp("mask", &pd->bmp.mask, pd->thread_id, func, line);
- spin_unlock_irqrestore(&pd->bmp.lock, flags);
+ spin_lock_irqsave(&pd->bmp_lock, flags);
+ _dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
+ spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#else
static void dump_bmp(struct ps3_private* pd) {};
@@ -667,7 +670,7 @@ static void dump_bmp(struct ps3_private* pd) {};
static int ps3_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
- pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
+ DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
@@ -690,10 +693,10 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
- pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
+ set_bit(63 - virq, &pd->ipi_debug_brk_mask);
- pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__,
- cpu, virq, pd->bmp.ipi_debug_brk_mask);
+ DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+ cpu, virq, pd->ipi_debug_brk_mask);
}
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
@@ -714,14 +717,14 @@ static unsigned int ps3_get_irq(void)
/* check for ipi break first to stop this cpu ASAP */
- if (x & pd->bmp.ipi_debug_brk_mask)
- x &= pd->bmp.ipi_debug_brk_mask;
+ if (x & pd->ipi_debug_brk_mask)
+ x &= pd->ipi_debug_brk_mask;
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
if (unlikely(plug == NO_IRQ)) {
- pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__,
+ DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
@@ -760,9 +763,9 @@ void __init ps3_init_IRQ(void)
lv1_get_logical_ppe_id(&pd->ppe_id);
pd->thread_id = get_hard_smp_processor_id(cpu);
- spin_lock_init(&pd->bmp.lock);
+ spin_lock_init(&pd->bmp_lock);
- pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
+ DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
__func__, __LINE__, pd->ppe_id, pd->thread_id,
ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
@@ -770,7 +773,7 @@ void __init ps3_init_IRQ(void)
pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
if (result)
- pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
+ FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
" %s\n", __func__, __LINE__,
ps3_result(result));
}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 72714ad2784..8bd6ba54269 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -319,7 +319,6 @@ static int __init ps3_mm_add_memory(void)
}
memblock_add(start_addr, map.r1.size);
- memblock_analyze();
result = online_pages(start_pfn, nr_pages);
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index ca40f6afd35..7bdfea336f5 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -44,7 +44,7 @@ static void _dump_field(const char *hdr, u64 n, const char *func, int line)
s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
s[i] = 0;
- pr_debug("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
+ pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
#endif
}
@@ -53,7 +53,7 @@ static void _dump_field(const char *hdr, u64 n, const char *func, int line)
static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
u64 n4, const char *func, int line)
{
- pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
@@ -65,13 +65,13 @@ static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
u64 v1, u64 v2, const char *func, int line)
{
- pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
_dump_field("n4: ", n4, func, line);
- pr_debug("%s:%d: v1: %016llx\n", func, line, v1);
- pr_debug("%s:%d: v2: %016llx\n", func, line, v2);
+ pr_devel("%s:%d: v1: %016llx\n", func, line, v1);
+ pr_devel("%s:%d: v2: %016llx\n", func, line, v2);
}
/**
@@ -131,11 +131,11 @@ static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
lpar_id = id;
}
- result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
+ result = lv1_read_repository_node(lpar_id, n1, n2, n3, n4, &v1,
&v2);
if (result) {
- pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
+ pr_warn("%s:%d: lv1_read_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
dump_node_name(lpar_id, n1, n2, n3, n4);
return -ENOENT;
@@ -149,10 +149,10 @@ static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
*_v2 = v2;
if (v1 && !_v1)
- pr_debug("%s:%d: warning: discarding non-zero v1: %016llx\n",
+ pr_devel("%s:%d: warning: discarding non-zero v1: %016llx\n",
__func__, __LINE__, v1);
if (v2 && !_v2)
- pr_debug("%s:%d: warning: discarding non-zero v2: %016llx\n",
+ pr_devel("%s:%d: warning: discarding non-zero v2: %016llx\n",
__func__, __LINE__, v2);
return 0;
@@ -323,16 +323,16 @@ int ps3_repository_find_device(struct ps3_repository_device *repo)
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
- pr_debug("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
+ pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
return result;
}
- pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
+ pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
num_dev);
if (tmp.dev_index >= num_dev) {
- pr_debug("%s:%d: no device found\n", __func__, __LINE__);
+ pr_devel("%s:%d: no device found\n", __func__, __LINE__);
return -ENODEV;
}
@@ -340,7 +340,7 @@ int ps3_repository_find_device(struct ps3_repository_device *repo)
&tmp.dev_type);
if (result) {
- pr_debug("%s:%d read_dev_type failed\n", __func__, __LINE__);
+ pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__);
return result;
}
@@ -348,12 +348,12 @@ int ps3_repository_find_device(struct ps3_repository_device *repo)
&tmp.dev_id);
if (result) {
- pr_debug("%s:%d ps3_repository_read_dev_id failed\n", __func__,
+ pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__,
__LINE__);
return result;
}
- pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
+ pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
*repo = tmp;
@@ -367,14 +367,14 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
struct ps3_repository_device tmp;
unsigned int num_dev;
- pr_debug(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
+ pr_devel(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
bus_id, dev_id);
for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
result = ps3_repository_read_bus_id(tmp.bus_index,
&tmp.bus_id);
if (result) {
- pr_debug("%s:%u read_bus_id(%u) failed\n", __func__,
+ pr_devel("%s:%u read_bus_id(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
@@ -382,23 +382,23 @@ int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
if (tmp.bus_id == bus_id)
goto found_bus;
- pr_debug("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
+ pr_devel("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
tmp.bus_id);
}
- pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
+ pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
found_bus:
result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type);
if (result) {
- pr_debug("%s:%u read_bus_type(%u) failed\n", __func__,
+ pr_devel("%s:%u read_bus_type(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
- pr_debug("%s:%u read_bus_num_dev failed\n", __func__,
+ pr_devel("%s:%u read_bus_num_dev failed\n", __func__,
__LINE__);
return result;
}
@@ -408,7 +408,7 @@ found_bus:
tmp.dev_index,
&tmp.dev_id);
if (result) {
- pr_debug("%s:%u read_dev_id(%u:%u) failed\n", __func__,
+ pr_devel("%s:%u read_dev_id(%u:%u) failed\n", __func__,
__LINE__, tmp.bus_index, tmp.dev_index);
return result;
}
@@ -416,21 +416,21 @@ found_bus:
if (tmp.dev_id == dev_id)
goto found_dev;
- pr_debug("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
+ pr_devel("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
tmp.dev_id);
}
- pr_debug(" <- %s:%u: dev not found\n", __func__, __LINE__);
+ pr_devel(" <- %s:%u: dev not found\n", __func__, __LINE__);
return result;
found_dev:
result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
&tmp.dev_type);
if (result) {
- pr_debug("%s:%u read_dev_type failed\n", __func__, __LINE__);
+ pr_devel("%s:%u read_dev_type failed\n", __func__, __LINE__);
return result;
}
- pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
+ pr_devel(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
__func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
tmp.dev_index, tmp.bus_id, tmp.dev_id);
*repo = tmp;
@@ -443,18 +443,18 @@ int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type,
int result = 0;
struct ps3_repository_device repo;
- pr_debug(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
+ pr_devel(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
repo.bus_type = bus_type;
result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
if (result) {
- pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
+ pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
}
result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
if (result) {
- pr_debug("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
+ pr_devel("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
repo.bus_index);
return result;
}
@@ -469,13 +469,13 @@ int __devinit ps3_repository_find_devices(enum ps3_bus_type bus_type,
result = callback(&repo);
if (result) {
- pr_debug("%s:%d: abort at callback\n", __func__,
+ pr_devel("%s:%d: abort at callback\n", __func__,
__LINE__);
break;
}
}
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
@@ -489,7 +489,7 @@ int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
for (i = from; i < 10; i++) {
error = ps3_repository_read_bus_type(i, &type);
if (error) {
- pr_debug("%s:%d read_bus_type failed\n",
+ pr_devel("%s:%d read_bus_type failed\n",
__func__, __LINE__);
*bus_index = UINT_MAX;
return error;
@@ -509,7 +509,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
int result = 0;
unsigned int res_index;
- pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
+ pr_devel("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
*interrupt_id = UINT_MAX;
@@ -521,7 +521,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
repo->dev_index, res_index, &t, &id);
if (result) {
- pr_debug("%s:%d read_dev_intr failed\n",
+ pr_devel("%s:%d read_dev_intr failed\n",
__func__, __LINE__);
return result;
}
@@ -535,7 +535,7 @@ int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
if (res_index == 10)
return -ENODEV;
- pr_debug("%s:%d: found intr_type %u at res_index %u\n",
+ pr_devel("%s:%d: found intr_type %u at res_index %u\n",
__func__, __LINE__, intr_type, res_index);
return result;
@@ -547,7 +547,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo,
int result = 0;
unsigned int res_index;
- pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
+ pr_devel("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
*bus_addr = *len = 0;
@@ -560,7 +560,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo,
repo->dev_index, res_index, &t, &a, &l);
if (result) {
- pr_debug("%s:%d read_dev_reg failed\n",
+ pr_devel("%s:%d read_dev_reg failed\n",
__func__, __LINE__);
return result;
}
@@ -575,7 +575,7 @@ int ps3_repository_find_reg(const struct ps3_repository_device *repo,
if (res_index == 10)
return -ENODEV;
- pr_debug("%s:%d: found reg_type %u at res_index %u\n",
+ pr_devel("%s:%d: found reg_type %u at res_index %u\n",
__func__, __LINE__, reg_type, res_index);
return result;
@@ -1009,7 +1009,7 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
int result = 0;
unsigned int res_index;
- pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+ pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
for (res_index = 0; res_index < 10; res_index++) {
@@ -1021,13 +1021,13 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
if (result) {
if (result != LV1_NO_ENTRY)
- pr_debug("%s:%d ps3_repository_read_dev_intr"
+ pr_devel("%s:%d ps3_repository_read_dev_intr"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
- pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+ pr_devel("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
intr_type, interrupt_id);
}
@@ -1042,18 +1042,18 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
if (result) {
if (result != LV1_NO_ENTRY)
- pr_debug("%s:%d ps3_repository_read_dev_reg"
+ pr_devel("%s:%d ps3_repository_read_dev_reg"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
- pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+ pr_devel("%s:%d (%u:%u) reg_type %u, bus_addr %llxh, len %llxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
reg_type, bus_addr, len);
}
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
@@ -1063,22 +1063,22 @@ static int dump_stor_dev_info(struct ps3_repository_device *repo)
unsigned int num_regions, region_index;
u64 port, blk_size, num_blocks;
- pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+ pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
result = ps3_repository_read_stor_dev_info(repo->bus_index,
repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
if (result) {
- pr_debug("%s:%d ps3_repository_read_stor_dev_info"
+ pr_devel("%s:%d ps3_repository_read_stor_dev_info"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
goto out;
}
- pr_debug("%s:%d (%u:%u): port %lu, blk_size %lu, num_blocks "
- "%lu, num_regions %u\n",
- __func__, __LINE__, repo->bus_index, repo->dev_index, port,
- blk_size, num_blocks, num_regions);
+ pr_devel("%s:%d (%u:%u): port %llu, blk_size %llu, num_blocks "
+ "%llu, num_regions %u\n",
+ __func__, __LINE__, repo->bus_index, repo->dev_index,
+ port, blk_size, num_blocks, num_regions);
for (region_index = 0; region_index < num_regions; region_index++) {
unsigned int region_id;
@@ -1088,19 +1088,20 @@ static int dump_stor_dev_info(struct ps3_repository_device *repo)
repo->dev_index, region_index, &region_id,
&region_start, &region_size);
if (result) {
- pr_debug("%s:%d ps3_repository_read_stor_dev_region"
+ pr_devel("%s:%d ps3_repository_read_stor_dev_region"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
- pr_debug("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
+ pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
- region_id, region_start, region_size);
+ region_id, (unsigned long)region_start,
+ (unsigned long)region_size);
}
out:
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
@@ -1109,7 +1110,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
{
int result = 0;
- pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
+ pr_devel(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
for (repo->dev_index = 0; repo->dev_index < num_dev;
repo->dev_index++) {
@@ -1118,7 +1119,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
repo->dev_index, &repo->dev_type);
if (result) {
- pr_debug("%s:%d ps3_repository_read_dev_type"
+ pr_devel("%s:%d ps3_repository_read_dev_type"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
@@ -1128,15 +1129,15 @@ static int dump_device_info(struct ps3_repository_device *repo,
repo->dev_index, &repo->dev_id);
if (result) {
- pr_debug("%s:%d ps3_repository_read_dev_id"
+ pr_devel("%s:%d ps3_repository_read_dev_id"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
continue;
}
- pr_debug("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__,
+ pr_devel("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__,
__LINE__, repo->bus_index, repo->dev_index,
- repo->dev_type, repo->dev_id);
+ repo->dev_type, (unsigned long)repo->dev_id);
ps3_repository_dump_resource_info(repo);
@@ -1144,7 +1145,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
dump_stor_dev_info(repo);
}
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
@@ -1153,7 +1154,7 @@ int ps3_repository_dump_bus_info(void)
int result = 0;
struct ps3_repository_device repo;
- pr_debug(" -> %s:%d\n", __func__, __LINE__);
+ pr_devel(" -> %s:%d\n", __func__, __LINE__);
memset(&repo, 0, sizeof(repo));
@@ -1164,7 +1165,7 @@ int ps3_repository_dump_bus_info(void)
&repo.bus_type);
if (result) {
- pr_debug("%s:%d read_bus_type(%u) failed\n",
+ pr_devel("%s:%d read_bus_type(%u) failed\n",
__func__, __LINE__, repo.bus_index);
break;
}
@@ -1173,32 +1174,32 @@ int ps3_repository_dump_bus_info(void)
&repo.bus_id);
if (result) {
- pr_debug("%s:%d read_bus_id(%u) failed\n",
+ pr_devel("%s:%d read_bus_id(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
if (repo.bus_index != repo.bus_id)
- pr_debug("%s:%d bus_index != bus_id\n",
+ pr_devel("%s:%d bus_index != bus_id\n",
__func__, __LINE__);
result = ps3_repository_read_bus_num_dev(repo.bus_index,
&num_dev);
if (result) {
- pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+ pr_devel("%s:%d read_bus_num_dev(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
- pr_debug("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
+ pr_devel("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
__func__, __LINE__, repo.bus_index, repo.bus_type,
- repo.bus_id, num_dev);
+ (unsigned long)repo.bus_id, num_dev);
dump_device_info(&repo, num_dev);
}
- pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index e8ec1b2bfff..2d664c5a83b 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -193,10 +193,12 @@ static int ps3_set_dabr(unsigned long dabr)
static void __init ps3_setup_arch(void)
{
+ u64 tmp;
DBG(" -> %s:%d\n", __func__, __LINE__);
- lv1_get_version_info(&ps3_firmware_version.raw);
+ lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
+
printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
ps3_firmware_version.major, ps3_firmware_version.minor,
ps3_firmware_version.rev);
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index efc1cd8c034..4b35166229f 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg)
" (%d)\n", __func__, __LINE__, cpu, msg, result);
}
-static int ps3_smp_probe(void)
+static int __init ps3_smp_probe(void)
{
int cpu;
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 451fad1c92a..e17fa1432d8 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -154,7 +154,7 @@ static unsigned long get_vas_id(void)
u64 id;
lv1_get_logical_ppe_id(&id);
- lv1_get_virtual_address_space_id_of_ppe(id, &id);
+ lv1_get_virtual_address_space_id_of_ppe(&id);
return id;
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index c81f6bb9c10..ae7b6d41fed 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -120,3 +120,12 @@ config DTL
which are accessible through a debugfs file.
Say N if you are unsure.
+
+config PSERIES_IDLE
+ tristate "Cpuidle driver for pSeries platforms"
+ depends on CPU_IDLE
+ depends on PPC_PSERIES
+ default y
+ help
+ Select this option to enable processor idle state management
+ through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 3556e402cbf..236db46b407 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
+obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 3cafc306b97..c638535753d 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -33,7 +33,7 @@
#include <linux/sched.h>
#include <linux/stringify.h>
#include <linux/swap.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/mmu.h>
@@ -65,7 +65,7 @@ static unsigned int oom_kb = CMM_OOM_KB;
static unsigned int cmm_debug = CMM_DEBUG;
static unsigned int cmm_disabled = CMM_DISABLE;
static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
-static struct sys_device cmm_sysdev;
+static struct device cmm_dev;
MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
@@ -347,25 +347,25 @@ static int cmm_thread(void *dummy)
}
#define CMM_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+ static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
-static ssize_t show_oom_pages(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_oom_pages(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
}
-static ssize_t store_oom_pages(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_oom_pages(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val = simple_strtoul (buf, NULL, 10);
@@ -379,17 +379,18 @@ static ssize_t store_oom_pages(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(oom_freed_kb, S_IWUSR| S_IRUGO,
+static DEVICE_ATTR(oom_freed_kb, S_IWUSR | S_IRUGO,
show_oom_pages, store_oom_pages);
-static struct sysdev_attribute *cmm_attrs[] = {
- &attr_loaned_kb,
- &attr_loaned_target_kb,
- &attr_oom_freed_kb,
+static struct device_attribute *cmm_attrs[] = {
+ &dev_attr_loaned_kb,
+ &dev_attr_loaned_target_kb,
+ &dev_attr_oom_freed_kb,
};
-static struct sysdev_class cmm_sysdev_class = {
+static struct bus_type cmm_subsys = {
.name = "cmm",
+ .dev_name = "cmm",
};
/**
@@ -398,21 +399,21 @@ static struct sysdev_class cmm_sysdev_class = {
* Return value:
* 0 on success / other on failure
**/
-static int cmm_sysfs_register(struct sys_device *sysdev)
+static int cmm_sysfs_register(struct device *dev)
{
int i, rc;
- if ((rc = sysdev_class_register(&cmm_sysdev_class)))
+ if ((rc = subsys_system_register(&cmm_subsys, NULL)))
return rc;
- sysdev->id = 0;
- sysdev->cls = &cmm_sysdev_class;
+ dev->id = 0;
+ dev->bus = &cmm_subsys;
- if ((rc = sysdev_register(sysdev)))
- goto class_unregister;
+ if ((rc = device_register(dev)))
+ goto subsys_unregister;
for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
- if ((rc = sysdev_create_file(sysdev, cmm_attrs[i])))
+ if ((rc = device_create_file(dev, cmm_attrs[i])))
goto fail;
}
@@ -420,10 +421,10 @@ static int cmm_sysfs_register(struct sys_device *sysdev)
fail:
while (--i >= 0)
- sysdev_remove_file(sysdev, cmm_attrs[i]);
- sysdev_unregister(sysdev);
-class_unregister:
- sysdev_class_unregister(&cmm_sysdev_class);
+ device_remove_file(dev, cmm_attrs[i]);
+ device_unregister(dev);
+subsys_unregister:
+ bus_unregister(&cmm_subsys);
return rc;
}
@@ -431,14 +432,14 @@ class_unregister:
* cmm_unregister_sysfs - Unregister from sysfs
*
**/
-static void cmm_unregister_sysfs(struct sys_device *sysdev)
+static void cmm_unregister_sysfs(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
- sysdev_remove_file(sysdev, cmm_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&cmm_sysdev_class);
+ device_remove_file(dev, cmm_attrs[i]);
+ device_unregister(dev);
+ bus_unregister(&cmm_subsys);
}
/**
@@ -657,7 +658,7 @@ static int cmm_init(void)
if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
goto out_oom_notifier;
- if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+ if ((rc = cmm_sysfs_register(&cmm_dev)))
goto out_reboot_notifier;
if (register_memory_notifier(&cmm_mem_nb) ||
@@ -678,7 +679,7 @@ static int cmm_init(void)
out_unregister_notifier:
unregister_memory_notifier(&cmm_mem_nb);
unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
- cmm_unregister_sysfs(&cmm_sysdev);
+ cmm_unregister_sysfs(&cmm_dev);
out_reboot_notifier:
unregister_reboot_notifier(&cmm_reboot_nb);
out_oom_notifier:
@@ -701,7 +702,7 @@ static void cmm_exit(void)
unregister_memory_notifier(&cmm_mem_nb);
unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
cmm_free_pages(loaned_pages);
- cmm_unregister_sysfs(&cmm_sysdev);
+ cmm_unregister_sysfs(&cmm_dev);
}
/**
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index f106662f438..c9311cfdfca 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
if (opcode > MAX_HCALL_OPCODE)
return;
- h = &get_cpu_var(hcall_stats)[opcode / 4];
+ h = &__get_cpu_var(hcall_stats)[opcode / 4];
h->tb_start = mftb();
h->purr_start = mfspr(SPRN_PURR);
}
@@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
h->num_calls++;
h->tb_total += mftb() - h->tb_start;
h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
-
- put_cpu_var(hcall_stats);
}
static int __init hcall_inst_init(void)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index b719d970973..c442f2b1980 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -52,13 +52,42 @@
#include "plpar_wrappers.h"
+static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ unsigned long start, end, inc;
+
+ start = __pa(startp);
+ end = __pa(endp);
+ inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
+
+ /* If this is non-zero, change the format. We shift the
+ * address and or in the magic from the device tree. */
+ if (tbl->it_busno) {
+ start <<= 12;
+ end <<= 12;
+ inc <<= 12;
+ start |= tbl->it_busno;
+ end |= tbl->it_busno;
+ }
+
+ end |= inc - 1; /* round up end to be different than start */
+
+ mb(); /* Make sure TCEs in memory are written */
+ while (start <= end) {
+ out_be64(invalidate, start);
+ start += inc;
+ }
+}
+
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 proto_tce;
- u64 *tcep;
+ u64 *tcep, *tces;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -66,7 +95,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((u64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
@@ -76,18 +105,24 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
uaddr += TCE_PAGE_SIZE;
tcep++;
}
+
+ if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+ tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
return 0;
}
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
- u64 *tcep;
+ u64 *tcep, *tces;
- tcep = ((u64 *)tbl->it_base) + index;
+ tces = tcep = ((u64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
+
+ if (tbl->it_type == TCE_PCI_SWINV_FREE)
+ tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
}
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
@@ -425,7 +460,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table *tbl)
{
struct device_node *node;
- const unsigned long *basep;
+ const unsigned long *basep, *sw_inval;
const u32 *sizep;
node = phb->dn;
@@ -462,6 +497,22 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_index = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
+
+ sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
+ if (sw_inval) {
+ /*
+ * This property contains information on how to
+ * invalidate the TCE entry. The first property is
+ * the base MMIO address used to invalidate entries.
+ * The second property tells us the format of the TCE
+ * invalidate (whether it needs to be shifted) and
+ * some magic routing info to add to our invalidate
+ * command.
+ */
+ tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
+ tbl->it_busno = sw_inval[1]; /* overload this with magic */
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
+ }
}
/*
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 27a49508b41..948e0e3b354 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -554,7 +554,10 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
goto out;
(*depth)++;
+ preempt_disable();
trace_hcall_entry(opcode, args);
+ if (opcode == H_CEDE)
+ rcu_idle_enter();
(*depth)--;
out:
@@ -575,7 +578,10 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
goto out;
(*depth)++;
+ if (opcode == H_CEDE)
+ rcu_idle_exit();
trace_hcall_exit(opcode, retval, retbuf);
+ preempt_enable();
(*depth)--;
out:
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index a76b22844d1..330a57b7c17 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -625,6 +625,8 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
{
static unsigned int oops_count = 0;
static bool panicking = false;
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
size_t text_len;
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
int rc = -1;
@@ -655,6 +657,9 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
if (clobbering_unread_rtas_event())
return;
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
if (big_oops_buf) {
text_len = capture_last_msgs(old_msgs, old_len,
new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
@@ -670,4 +675,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
(int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count);
+
+ spin_unlock_irqrestore(&lock, flags);
}
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
new file mode 100644
index 00000000000..085fd3f45ad
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -0,0 +1,329 @@
+/*
+ * processor_idle - idle state cpuidle driver.
+ * Adapted from drivers/idle/intel_idle.c and
+ * drivers/acpi/processor_idle.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu.h>
+
+#include <asm/paca.h>
+#include <asm/reg.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+
+#include "plpar_wrappers.h"
+#include "pseries.h"
+
+struct cpuidle_driver pseries_idle_driver = {
+ .name = "pseries_idle",
+ .owner = THIS_MODULE,
+};
+
+#define MAX_IDLE_STATE_COUNT 2
+
+static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
+static struct cpuidle_device __percpu *pseries_cpuidle_devices;
+static struct cpuidle_state *cpuidle_state_table;
+
+void update_smt_snooze_delay(int snooze)
+{
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+ if (drv)
+ drv->states[0].target_residency = snooze;
+}
+
+static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
+{
+
+ *kt_before = ktime_get_real();
+ *in_purr = mfspr(SPRN_PURR);
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
+{
+ get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->idle = 0;
+
+ return ktime_to_us(ktime_sub(ktime_get_real(), kt_before));
+}
+
+static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+ ktime_t kt_before;
+ unsigned long start_snooze;
+ long snooze = drv->states[0].target_residency;
+
+ idle_loop_prolog(&in_purr, &kt_before);
+
+ if (snooze) {
+ start_snooze = get_tb() + snooze * tb_ticks_per_usec;
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ while ((snooze < 0) || (get_tb() < start_snooze)) {
+ if (need_resched() || cpu_is_offline(dev->cpu))
+ goto out;
+ ppc64_runlatch_off();
+ HMT_low();
+ HMT_very_low();
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
+ local_irq_disable();
+ }
+
+out:
+ HMT_medium();
+ dev->last_residency =
+ (int)idle_loop_epilog(in_purr, kt_before);
+ return index;
+}
+
+static int dedicated_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+ ktime_t kt_before;
+
+ idle_loop_prolog(&in_purr, &kt_before);
+ get_lppaca()->donate_dedicated_cpu = 1;
+
+ ppc64_runlatch_off();
+ HMT_medium();
+ cede_processor();
+
+ get_lppaca()->donate_dedicated_cpu = 0;
+ dev->last_residency =
+ (int)idle_loop_epilog(in_purr, kt_before);
+ return index;
+}
+
+static int shared_cede_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long in_purr;
+ ktime_t kt_before;
+
+ idle_loop_prolog(&in_purr, &kt_before);
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ */
+ cede_processor();
+
+ dev->last_residency =
+ (int)idle_loop_epilog(in_purr, kt_before);
+ return index;
+}
+
+/*
+ * States for dedicated partition case.
+ */
+static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
+ { /* Snooze */
+ .name = "snooze",
+ .desc = "snooze",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &snooze_loop },
+ { /* CEDE */
+ .name = "CEDE",
+ .desc = "CEDE",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 10,
+ .enter = &dedicated_cede_loop },
+};
+
+/*
+ * States for shared partition case.
+ */
+static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
+ { /* Shared Cede */
+ .name = "Shared Cede",
+ .desc = "Shared Cede",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 0,
+ .target_residency = 0,
+ .enter = &shared_cede_loop },
+};
+
+int pseries_notify_cpuidle_add_cpu(int cpu)
+{
+ struct cpuidle_device *dev =
+ per_cpu_ptr(pseries_cpuidle_devices, cpu);
+ if (dev && cpuidle_get_driver()) {
+ cpuidle_disable_device(dev);
+ cpuidle_enable_device(dev);
+ }
+ return 0;
+}
+
+/*
+ * pseries_cpuidle_driver_init()
+ */
+static int pseries_cpuidle_driver_init(void)
+{
+ int idle_state;
+ struct cpuidle_driver *drv = &pseries_idle_driver;
+
+ drv->state_count = 0;
+
+ for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
+
+ if (idle_state > max_idle_state)
+ break;
+
+ /* is the state not enabled? */
+ if (cpuidle_state_table[idle_state].enter == NULL)
+ continue;
+
+ drv->states[drv->state_count] = /* structure copy */
+ cpuidle_state_table[idle_state];
+
+ if (cpuidle_state_table == dedicated_states)
+ drv->states[drv->state_count].target_residency =
+ __get_cpu_var(smt_snooze_delay);
+
+ drv->state_count += 1;
+ }
+
+ return 0;
+}
+
+/* pseries_idle_devices_uninit(void)
+ * unregister cpuidle devices and de-allocate memory
+ */
+static void pseries_idle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(pseries_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(pseries_cpuidle_devices);
+ return;
+}
+
+/* pseries_idle_devices_init()
+ * allocate, initialize and register cpuidle device
+ */
+static int pseries_idle_devices_init(void)
+{
+ int i;
+ struct cpuidle_driver *drv = &pseries_idle_driver;
+ struct cpuidle_device *dev;
+
+ pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (pseries_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(pseries_cpuidle_devices, i);
+ dev->state_count = drv->state_count;
+ dev->cpu = i;
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_DEBUG \
+ "cpuidle_register_device %d failed!\n", i);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * pseries_idle_probe()
+ * Choose state table for shared versus dedicated partition
+ */
+static int pseries_idle_probe(void)
+{
+
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return -ENODEV;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
+ if (max_idle_state == 0) {
+ printk(KERN_DEBUG "pseries processor idle disabled.\n");
+ return -EPERM;
+ }
+
+ if (get_lppaca()->shared_proc)
+ cpuidle_state_table = shared_states;
+ else
+ cpuidle_state_table = dedicated_states;
+
+ return 0;
+}
+
+static int __init pseries_processor_idle_init(void)
+{
+ int retval;
+
+ retval = pseries_idle_probe();
+ if (retval)
+ return retval;
+
+ pseries_cpuidle_driver_init();
+ retval = cpuidle_register_driver(&pseries_idle_driver);
+ if (retval) {
+ printk(KERN_DEBUG "Registration of pseries driver failed.\n");
+ return retval;
+ }
+
+ retval = pseries_idle_devices_init();
+ if (retval) {
+ pseries_idle_devices_uninit();
+ cpuidle_unregister_driver(&pseries_idle_driver);
+ return retval;
+ }
+
+ printk(KERN_DEBUG "pseries_idle_driver registered\n");
+
+ return 0;
+}
+
+static void __exit pseries_processor_idle_exit(void)
+{
+
+ pseries_idle_devices_uninit();
+ cpuidle_unregister_driver(&pseries_idle_driver);
+
+ return;
+}
+
+module_init(pseries_processor_idle_init);
+module_exit(pseries_processor_idle_exit);
+
+MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("Cpuidle driver for POWER");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 24c7162f11d..9a3dda07566 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -57,4 +57,7 @@ extern struct device_node *dlpar_configure_connector(u32);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
+/* Snooze Delay, pseries_idle */
+DECLARE_PER_CPU(long, smt_snooze_delay);
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index c8b3c69fe89..af281dce510 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -15,7 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/cputhreads.h>
@@ -184,7 +184,7 @@ static ssize_t get_best_energy_list(char *page, int activate)
return s-page;
}
-static ssize_t get_best_energy_data(struct sys_device *dev,
+static ssize_t get_best_energy_data(struct device *dev,
char *page, int activate)
{
int rc;
@@ -207,26 +207,26 @@ static ssize_t get_best_energy_data(struct sys_device *dev,
/* Wrapper functions */
-static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *page)
+static ssize_t cpu_activate_hint_list_show(struct device *dev,
+ struct device_attribute *attr, char *page)
{
return get_best_energy_list(page, 1);
}
-static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *page)
+static ssize_t cpu_deactivate_hint_list_show(struct device *dev,
+ struct device_attribute *attr, char *page)
{
return get_best_energy_list(page, 0);
}
-static ssize_t percpu_activate_hint_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *page)
+static ssize_t percpu_activate_hint_show(struct device *dev,
+ struct device_attribute *attr, char *page)
{
return get_best_energy_data(dev, page, 1);
}
-static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *page)
+static ssize_t percpu_deactivate_hint_show(struct device *dev,
+ struct device_attribute *attr, char *page)
{
return get_best_energy_data(dev, page, 0);
}
@@ -241,48 +241,48 @@ static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
* Per-cpu value of the hint
*/
-struct sysdev_class_attribute attr_cpu_activate_hint_list =
- _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+struct device_attribute attr_cpu_activate_hint_list =
+ __ATTR(pseries_activate_hint_list, 0444,
cpu_activate_hint_list_show, NULL);
-struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
- _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+struct device_attribute attr_cpu_deactivate_hint_list =
+ __ATTR(pseries_deactivate_hint_list, 0444,
cpu_deactivate_hint_list_show, NULL);
-struct sysdev_attribute attr_percpu_activate_hint =
- _SYSDEV_ATTR(pseries_activate_hint, 0444,
+struct device_attribute attr_percpu_activate_hint =
+ __ATTR(pseries_activate_hint, 0444,
percpu_activate_hint_show, NULL);
-struct sysdev_attribute attr_percpu_deactivate_hint =
- _SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+struct device_attribute attr_percpu_deactivate_hint =
+ __ATTR(pseries_deactivate_hint, 0444,
percpu_deactivate_hint_show, NULL);
static int __init pseries_energy_init(void)
{
int cpu, err;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
if (!check_for_h_best_energy()) {
printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
return 0;
}
/* Create the sysfs files */
- err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &attr_cpu_activate_hint_list.attr);
+ err = device_create_file(cpu_subsys.dev_root,
+ &attr_cpu_activate_hint_list);
if (!err)
- err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
- &attr_cpu_deactivate_hint_list.attr);
+ err = device_create_file(cpu_subsys.dev_root,
+ &attr_cpu_deactivate_hint_list);
if (err)
return err;
for_each_possible_cpu(cpu) {
- cpu_sys_dev = get_cpu_sysdev(cpu);
- err = sysfs_create_file(&cpu_sys_dev->kobj,
- &attr_percpu_activate_hint.attr);
+ cpu_dev = get_cpu_device(cpu);
+ err = device_create_file(cpu_dev,
+ &attr_percpu_activate_hint);
if (err)
break;
- err = sysfs_create_file(&cpu_sys_dev->kobj,
- &attr_percpu_deactivate_hint.attr);
+ err = device_create_file(cpu_dev,
+ &attr_percpu_deactivate_hint);
if (err)
break;
}
@@ -298,23 +298,20 @@ static int __init pseries_energy_init(void)
static void __exit pseries_energy_cleanup(void)
{
int cpu;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
if (!sysfs_entries)
return;
/* Remove the sysfs files */
- sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
- &attr_cpu_activate_hint_list.attr);
-
- sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
- &attr_cpu_deactivate_hint_list.attr);
+ device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list);
+ device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list);
for_each_possible_cpu(cpu) {
- cpu_sys_dev = get_cpu_sysdev(cpu);
- sysfs_remove_file(&cpu_sys_dev->kobj,
+ cpu_dev = get_cpu_device(cpu);
+ sysfs_remove_file(&cpu_dev->kobj,
&attr_percpu_activate_hint.attr);
- sysfs_remove_file(&cpu_sys_dev->kobj,
+ sysfs_remove_file(&cpu_dev->kobj,
&attr_percpu_deactivate_hint.attr);
}
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c3408ca8855..f79f1278dfc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
+#include <linux/cpuidle.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -74,9 +75,6 @@ EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
-static void pseries_shared_idle_sleep(void);
-static void pseries_dedicated_idle_sleep(void);
-
static struct device_node *pSeries_mpic_node;
static void pSeries_show_cpuinfo(struct seq_file *m)
@@ -192,8 +190,7 @@ static void __init pseries_mpic_init_IRQ(void)
BUG_ON(openpic_addr == 0);
/* Setup the openpic driver */
- mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
- MPIC_PRIMARY,
+ mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 0,
16, 250, /* isu size, irq count */
" MPIC ");
BUG_ON(mpic == NULL);
@@ -352,8 +349,25 @@ static int alloc_dispatch_log_kmem_cache(void)
}
early_initcall(alloc_dispatch_log_kmem_cache);
+static void pSeries_idle(void)
+{
+ /* This would call on the cpuidle framework, and the back-end pseries
+ * driver to go to idle states
+ */
+ if (cpuidle_idle_call()) {
+ /* On error, execute default handler
+ * to go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
+ }
+}
+
static void __init pSeries_setup_arch(void)
{
+ panic_timeout = 10;
+
/* Discover PIC type and setup ppc_md accordingly */
pseries_discover_pic();
@@ -374,18 +388,9 @@ static void __init pSeries_setup_arch(void)
pSeries_nvram_init();
- /* Choose an idle loop */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
vpa_init(boot_cpuid);
- if (get_lppaca()->shared_proc) {
- printk(KERN_DEBUG "Using shared processor idle loop\n");
- ppc_md.power_save = pseries_shared_idle_sleep;
- } else {
- printk(KERN_DEBUG "Using dedicated idle loop\n");
- ppc_md.power_save = pseries_dedicated_idle_sleep;
- }
- } else {
- printk(KERN_DEBUG "Using default idle loop\n");
+ ppc_md.power_save = pSeries_idle;
}
if (firmware_has_feature(FW_FEATURE_LPAR))
@@ -586,80 +591,6 @@ static int __init pSeries_probe(void)
return 1;
}
-
-DECLARE_PER_CPU(long, smt_snooze_delay);
-
-static void pseries_dedicated_idle_sleep(void)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long start_snooze;
- unsigned long in_purr, out_purr;
- long snooze = __get_cpu_var(smt_snooze_delay);
-
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
- get_lppaca()->donate_dedicated_cpu = 1;
- in_purr = mfspr(SPRN_PURR);
-
- /*
- * We come in with interrupts disabled, and need_resched()
- * has been checked recently. If we should poll for a little
- * while, do so.
- */
- if (snooze) {
- start_snooze = get_tb() + snooze * tb_ticks_per_usec;
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while ((snooze < 0) || (get_tb() < start_snooze)) {
- if (need_resched() || cpu_is_offline(cpu))
- goto out;
- ppc64_runlatch_off();
- HMT_low();
- HMT_very_low();
- }
-
- HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
- local_irq_disable();
- if (need_resched() || cpu_is_offline(cpu))
- goto out;
- }
-
- cede_processor();
-
-out:
- HMT_medium();
- out_purr = mfspr(SPRN_PURR);
- get_lppaca()->wait_state_cycles += out_purr - in_purr;
- get_lppaca()->donate_dedicated_cpu = 0;
- get_lppaca()->idle = 0;
-}
-
-static void pseries_shared_idle_sleep(void)
-{
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- */
- cede_processor();
-
- get_lppaca()->idle = 0;
-}
-
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 26e93fd4c62..eadba9521a3 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -22,7 +22,7 @@
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/ptrace.h>
@@ -148,6 +148,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
set_default_offline_state(cpu);
#endif
+ pseries_notify_cpuidle_add_cpu(cpu);
}
static int __devinit smp_pSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index d3de0849f29..b84a8b2238d 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -26,7 +26,7 @@
#include <asm/rtas.h>
static u64 stream_id;
-static struct sys_device suspend_sysdev;
+static struct device suspend_dev;
static DECLARE_COMPLETION(suspend_work);
static struct rtas_suspend_me_data suspend_data;
static atomic_t suspending;
@@ -110,8 +110,8 @@ static int pseries_prepare_late(void)
/**
* store_hibernate - Initiate partition hibernation
- * @classdev: sysdev class struct
- * @attr: class device attribute struct
+ * @dev: subsys root device
+ * @attr: device attribute struct
* @buf: buffer
* @count: buffer size
*
@@ -121,8 +121,8 @@ static int pseries_prepare_late(void)
* Return value:
* number of bytes printed to buffer / other on failure
**/
-static ssize_t store_hibernate(struct sysdev_class *classdev,
- struct sysdev_class_attribute *attr,
+static ssize_t store_hibernate(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
@@ -148,10 +148,11 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
return rc;
}
-static SYSDEV_CLASS_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
+static DEVICE_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
-static struct sysdev_class suspend_sysdev_class = {
+static struct bus_type suspend_subsys = {
.name = "power",
+ .dev_name = "power",
};
static const struct platform_suspend_ops pseries_suspend_ops = {
@@ -167,23 +168,23 @@ static const struct platform_suspend_ops pseries_suspend_ops = {
* Return value:
* 0 on success / other on failure
**/
-static int pseries_suspend_sysfs_register(struct sys_device *sysdev)
+static int pseries_suspend_sysfs_register(struct device *dev)
{
int rc;
- if ((rc = sysdev_class_register(&suspend_sysdev_class)))
+ if ((rc = subsys_system_register(&suspend_subsys, NULL)))
return rc;
- sysdev->id = 0;
- sysdev->cls = &suspend_sysdev_class;
+ dev->id = 0;
+ dev->bus = &suspend_subsys;
- if ((rc = sysdev_class_create_file(&suspend_sysdev_class, &attr_hibernate)))
- goto class_unregister;
+ if ((rc = device_create_file(suspend_subsys.dev_root, &dev_attr_hibernate)))
+ goto subsys_unregister;
return 0;
-class_unregister:
- sysdev_class_unregister(&suspend_sysdev_class);
+subsys_unregister:
+ bus_unregister(&suspend_subsys);
return rc;
}
@@ -204,7 +205,7 @@ static int __init pseries_suspend_init(void)
if (suspend_data.token == RTAS_UNKNOWN_SERVICE)
return 0;
- if ((rc = pseries_suspend_sysfs_register(&suspend_sysdev)))
+ if ((rc = pseries_suspend_sysfs_register(&suspend_dev)))
return rc;
ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index bd560c786ed..57d22a2f4ba 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -1,20 +1,28 @@
config PPC_WSP
bool
select PPC_A2
+ select GENERIC_TBSYNC
+ select PPC_ICSWX
select PPC_SCOM
select PPC_XICS
select PPC_ICP_NATIVE
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
+ select PPC_WSP_COPRO
default n
menu "WSP platform selection"
depends on PPC_BOOK3E_64
config PPC_PSR2
- bool "PSR-2 platform"
- select GENERIC_TBSYNC
+ bool "PowerEN System Reference Platform 2"
+ select EPAPR_BOOT
+ select PPC_WSP
+ default y
+
+config PPC_CHROMA
+ bool "PowerEN PCIe Chroma Card"
select EPAPR_BOOT
select PPC_WSP
default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
index a1486b436f0..56817ac98fc 100644
--- a/arch/powerpc/platforms/wsp/Makefile
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -1,8 +1,10 @@
ccflags-y += -mno-minimal-toc
-obj-y += setup.o ics.o
-obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
+obj-y += setup.o ics.o wsp.o
+obj-$(CONFIG_PPC_PSR2) += psr2.o
+obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o
+obj-$(CONFIG_PPC_WSP) += opb_pic.o
obj-$(CONFIG_PPC_WSP) += scom_wsp.o
obj-$(CONFIG_SMP) += smp.o scom_smp.o
obj-$(CONFIG_PCI) += wsp_pci.o
-obj-$(CONFIG_PCI_MSI) += msi.o \ No newline at end of file
+obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
new file mode 100644
index 00000000000..ca6fa26f6e6
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/chroma.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/time.h>
+
+#include <asm/machdep.h>
+#include <asm/system.h>
+#include <asm/udbg.h>
+
+#include "ics.h"
+#include "wsp.h"
+
+void __init chroma_setup_arch(void)
+{
+ wsp_setup_arch();
+ wsp_setup_h8();
+
+}
+
+static int __init chroma_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
+ return 0;
+
+ return 1;
+}
+
+define_machine(chroma_md) {
+ .name = "Chroma PCIe",
+ .probe = chroma_probe,
+ .setup_arch = chroma_setup_arch,
+ .restart = wsp_h8_restart,
+ .power_off = wsp_h8_power_off,
+ .halt = wsp_halt,
+ .calibrate_decr = generic_calibrate_decr,
+ .init_IRQ = wsp_setup_irq,
+ .progress = udbg_progress,
+ .power_save = book3e_idle,
+};
+
+machine_arch_initcall(chroma_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
new file mode 100644
index 00000000000..d18e6cc19df
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/h8.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/io.h>
+
+#include "wsp.h"
+
+/*
+ * The UART connection to the H8 is over ttyS1 which is just a 16550.
+ * We assume that FW has it setup right and no one messes with it.
+ */
+
+
+static u8 __iomem *h8;
+
+#define RBR 0 /* Receiver Buffer Register */
+#define THR 0 /* Transmitter Holding Register */
+#define LSR 5 /* Line Status Register */
+#define LSR_DR 0x01 /* LSR value for Data-Ready */
+#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */
+static void wsp_h8_putc(int c)
+{
+ u8 lsr;
+
+ do {
+ lsr = readb(h8 + LSR);
+ } while ((lsr & LSR_THRE) != LSR_THRE);
+ writeb(c, h8 + THR);
+}
+
+static int wsp_h8_getc(void)
+{
+ u8 lsr;
+
+ do {
+ lsr = readb(h8 + LSR);
+ } while ((lsr & LSR_DR) != LSR_DR);
+
+ return readb(h8 + RBR);
+}
+
+static void wsp_h8_puts(const char *s, int sz)
+{
+ int i;
+
+ for (i = 0; i < sz; i++) {
+ wsp_h8_putc(s[i]);
+
+ /* no flow control so wait for echo */
+ wsp_h8_getc();
+ }
+ wsp_h8_putc('\r');
+ wsp_h8_putc('\n');
+}
+
+static void wsp_h8_terminal_cmd(const char *cmd, int sz)
+{
+ hard_irq_disable();
+ wsp_h8_puts(cmd, sz);
+ /* should never return, but just in case */
+ for (;;)
+ continue;
+}
+
+
+void wsp_h8_restart(char *cmd)
+{
+ static const char restart[] = "warm-reset";
+
+ (void)cmd;
+ wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
+}
+
+void wsp_h8_power_off(void)
+{
+ static const char off[] = "power-off";
+
+ wsp_h8_terminal_cmd(off, sizeof(off) - 1);
+}
+
+static void __iomem *wsp_h8_getaddr(void)
+{
+ struct device_node *aliases;
+ struct device_node *uart;
+ struct property *path;
+ void __iomem *va = NULL;
+
+ /*
+ * there is nothing in the devtree to tell us which is mapped
+ * to the H8, but se know it is the second serial port.
+ */
+
+ aliases = of_find_node_by_path("/aliases");
+ if (aliases == NULL)
+ return NULL;
+
+ path = of_find_property(aliases, "serial1", NULL);
+ if (path == NULL)
+ goto out;
+
+ uart = of_find_node_by_path(path->value);
+ if (uart == NULL)
+ goto out;
+
+ va = of_iomap(uart, 0);
+
+ /* remove it so no one messes with it */
+ of_detach_node(uart);
+ of_node_put(uart);
+
+out:
+ of_node_put(aliases);
+
+ return va;
+}
+
+void __init wsp_setup_h8(void)
+{
+ h8 = wsp_h8_getaddr();
+
+ /* Devtree change? lets hard map it anyway */
+ if (h8 == NULL) {
+ pr_warn("UART to H8 could not be found");
+ h8 = ioremap(0xffc0008000ULL, 0x100);
+ }
+}
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index be05631a3c1..19f353dfcd0 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -320,7 +320,8 @@ void __init opb_pic_init(void)
}
/* Attach opb interrupt handler to new virtual IRQ */
- rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
+ rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
+ "OPB LS Cascade", opb);
if (rc) {
printk("opb: request_irq failed: %d\n", rc);
continue;
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 166f2e4b4be..0c1ae06d0be 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -14,10 +14,10 @@
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/time.h>
#include <asm/machdep.h>
#include <asm/system.h>
-#include <asm/time.h>
#include <asm/udbg.h>
#include "ics.h"
@@ -27,7 +27,8 @@
static void psr2_spin(void)
{
hard_irq_disable();
- for (;;) ;
+ for (;;)
+ continue;
}
static void psr2_restart(char *cmd)
@@ -35,65 +36,32 @@ static void psr2_restart(char *cmd)
psr2_spin();
}
-static int psr2_probe_devices(void)
-{
- struct device_node *np;
-
- /* Our RTC is a ds1500. It seems to be programatically compatible
- * with the ds1511 for which we have a driver so let's use that
- */
- np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
- if (np != NULL) {
- struct resource res;
- if (of_address_to_resource(np, 0, &res) == 0)
- platform_device_register_simple("ds1511", 0, &res, 1);
- }
- return 0;
-}
-machine_arch_initcall(psr2_md, psr2_probe_devices);
-
-static void __init psr2_setup_arch(void)
-{
- /* init to some ~sane value until calibrate_delay() runs */
- loops_per_jiffy = 50000000;
-
- scom_init_wsp();
-
- /* Setup SMP callback */
-#ifdef CONFIG_SMP
- a2_setup_smp();
-#endif
-#ifdef CONFIG_PCI
- wsp_setup_pci();
-#endif
-
-}
-
static int __init psr2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
+ if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
+ /* chroma systems also claim they are psr2s */
+ return 0;
+ }
+
if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
return 0;
return 1;
}
-static void __init psr2_init_irq(void)
-{
- wsp_init_irq();
- opb_pic_init();
-}
-
define_machine(psr2_md) {
.name = "PSR2 A2",
.probe = psr2_probe,
- .setup_arch = psr2_setup_arch,
+ .setup_arch = wsp_setup_arch,
.restart = psr2_restart,
.power_off = psr2_spin,
.halt = psr2_spin,
.calibrate_decr = generic_calibrate_decr,
- .init_IRQ = psr2_init_irq,
+ .init_IRQ = wsp_setup_irq,
.progress = udbg_progress,
.power_save = book3e_idle,
};
+
+machine_arch_initcall(psr2_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
new file mode 100644
index 00000000000..d25cc96c21b
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include <asm/scom.h>
+
+#include "wsp.h"
+#include "ics.h"
+
+#define WSP_SOC_COMPATIBLE "ibm,wsp-soc"
+#define PBIC_COMPATIBLE "ibm,wsp-pbic"
+#define COPRO_COMPATIBLE "ibm,wsp-coprocessor"
+
+static int __init wsp_probe_buses(void)
+{
+ static __initdata struct of_device_id bus_ids[] = {
+ /*
+ * every node in between needs to be here or you won't
+ * find it
+ */
+ { .compatible = WSP_SOC_COMPATIBLE, },
+ { .compatible = PBIC_COMPATIBLE, },
+ { .compatible = COPRO_COMPATIBLE, },
+ {},
+ };
+ of_platform_bus_probe(NULL, bus_ids, NULL);
+
+ return 0;
+}
+
+void __init wsp_setup_arch(void)
+{
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+ scom_init_wsp();
+
+ /* Setup SMP callback */
+#ifdef CONFIG_SMP
+ a2_setup_smp();
+#endif
+#ifdef CONFIG_PCI
+ wsp_setup_pci();
+#endif
+}
+
+void __init wsp_setup_irq(void)
+{
+ wsp_init_irq();
+ opb_pic_init();
+}
+
+
+int __init wsp_probe_devices(void)
+{
+ struct device_node *np;
+
+ /* Our RTC is a ds1500. It seems to be programatically compatible
+ * with the ds1511 for which we have a driver so let's use that
+ */
+ np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
+ if (np != NULL) {
+ struct resource res;
+ if (of_address_to_resource(np, 0, &res) == 0)
+ platform_device_register_simple("ds1511", 0, &res, 1);
+ }
+
+ wsp_probe_buses();
+
+ return 0;
+}
+
+void wsp_halt(void)
+{
+ u64 val;
+ scom_map_t m;
+ struct device_node *dn;
+ struct device_node *mine;
+ struct device_node *me;
+
+ me = of_get_cpu_node(smp_processor_id(), NULL);
+ mine = scom_find_parent(me);
+
+ /* This will halt all the A2s but not power off the chip */
+ for_each_node_with_property(dn, "scom-controller") {
+ if (dn == mine)
+ continue;
+ m = scom_map(dn, 0, 1);
+
+ /* read-modify-write it so the HW probe does not get
+ * confused */
+ val = scom_read(m, 0);
+ val |= 1;
+ scom_write(m, 0, val);
+ scom_unmap(m);
+ }
+ m = scom_map(mine, 0, 1);
+ val = scom_read(m, 0);
+ val |= 1;
+ scom_write(m, 0, val);
+ /* should never return */
+ scom_unmap(m);
+}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
index 33479818f62..10c1d1fff36 100644
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -6,15 +6,25 @@
/* Devtree compatible strings for major devices */
#define PCIE_COMPATIBLE "ibm,wsp-pciex"
+extern void wsp_setup_arch(void);
+extern void wsp_setup_irq(void);
+extern int wsp_probe_devices(void);
+extern void wsp_halt(void);
+
extern void wsp_setup_pci(void);
extern void scom_init_wsp(void);
extern void a2_setup_smp(void);
extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
struct device_node *np);
-int smp_a2_cpu_bootable(unsigned int nr);
-int __devinit smp_a2_kick_cpu(int nr);
+extern int smp_a2_cpu_bootable(unsigned int nr);
+extern int __devinit smp_a2_kick_cpu(int nr);
+
+extern void opb_pic_init(void);
-void opb_pic_init(void);
+/* chroma specific managment */
+extern void wsp_h8_restart(char *cmd);
+extern void wsp_h8_power_off(void);
+extern void __init wsp_setup_h8(void);
#endif /* __WSP_H */
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
index d2571096c3e..7f5b8380886 100755
--- a/arch/powerpc/relocs_check.pl
+++ b/arch/powerpc/relocs_check.pl
@@ -32,8 +32,18 @@ while (<FD>) {
next if (!/\s+R_/);
# These relocations are okay
- next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or
- /R_PPC64_ADDR64\s+mach_/);
+ # On PPC64:
+ # R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64
+ # On PPC:
+ # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
+ # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+ # R_PPC_NONE
+
+ next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or
+ /\bR_PPC64_ADDR64\s+mach_/);
+ next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or
+ /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or
+ /\bR_PPC_NONE\b/);
# If we see this type of relcoation it's an idication that
# we /may/ be using an old version of binutils.
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 84e13253aec..5e37b471786 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,10 +17,11 @@ obj-$(CONFIG_FSL_SOC) += fsl_soc.o
obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
+obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
-obj-$(CONFIG_FSL_RIO) += fsl_rio.o
+obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ba427191906..1c16141c031 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -25,7 +25,6 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index af1a5df46b3..b6731e4a664 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -280,6 +280,7 @@ void __init ehv_pic_init(void)
if (!ehv_pic->irqhost) {
of_node_put(np);
+ kfree(ehv_pic);
return;
}
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
new file mode 100644
index 00000000000..b31f19f6103
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Freescale Integrated Flash Controller
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <asm/prom.h>
+#include <asm/fsl_ifc.h>
+
+struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+
+/*
+ * convert_ifc_address - convert the base address
+ * @addr_base: base address of the memory bank
+ */
+unsigned int convert_ifc_address(phys_addr_t addr_base)
+{
+ return addr_base & CSPR_BA;
+}
+EXPORT_SYMBOL(convert_ifc_address);
+
+/*
+ * fsl_ifc_find - find IFC bank
+ * @addr_base: base address of the memory bank
+ *
+ * This function walks IFC banks comparing "Base address" field of the CSPR
+ * registers with the supplied addr_base argument. When bases match this
+ * function returns bank number (starting with 0), otherwise it returns
+ * appropriate errno value.
+ */
+int fsl_ifc_find(phys_addr_t addr_base)
+{
+ int i = 0;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) {
+ __be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+ if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+ convert_ifc_address(addr_base))
+ return i;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fsl_ifc_find);
+
+static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+
+ /*
+ * Clear all the common status and event registers
+ */
+ if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* enable all error and events */
+ out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+
+ /* enable all error and event interrupts */
+ out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
+ out_be32(&ifc->cm_erattr0, 0x0);
+ out_be32(&ifc->cm_erattr1, 0x0);
+
+ return 0;
+}
+
+static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+
+ free_irq(ctrl->nand_irq, ctrl);
+ free_irq(ctrl->irq, ctrl);
+
+ irq_dispose_mapping(ctrl->nand_irq);
+ irq_dispose_mapping(ctrl->irq);
+
+ iounmap(ctrl->regs);
+
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ctrl);
+
+ return 0;
+}
+
+/*
+ * NAND events are split between an operational interrupt which only
+ * receives OPC, and an error interrupt that receives everything else,
+ * including non-NAND errors. Whichever interrupt gets to it first
+ * records the status and wakes the wait queue.
+ */
+static DEFINE_SPINLOCK(nand_irq_lock);
+
+static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&nand_irq_lock, flags);
+
+ stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+ if (stat) {
+ out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+ ctrl->nand_stat = stat;
+ wake_up(&ctrl->nand_wait);
+ }
+
+ spin_unlock_irqrestore(&nand_irq_lock, flags);
+
+ return stat;
+}
+
+static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+
+ if (check_nand_stat(ctrl))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/*
+ * NOTE: This interrupt is used to report ifc events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ u32 err_axiid, err_srcid, status, cs_err, err_addr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* read for chip select error */
+ cs_err = in_be32(&ifc->cm_evter_stat);
+ if (cs_err) {
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
+ "any memory bank 0x%08X\n", cs_err);
+ /* clear the chip select error */
+ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+
+ /* read error attribute registers print the error information */
+ status = in_be32(&ifc->cm_erattr0);
+ err_addr = in_be32(&ifc->cm_erattr1);
+
+ if (status & IFC_CM_ERATTR0_ERTYP_READ)
+ dev_err(ctrl->dev, "Read transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+ else
+ dev_err(ctrl->dev, "Write transaction error"
+ "CM_ERATTR0 0x%08X\n", status);
+
+ err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
+ IFC_CM_ERATTR0_ERAID_SHIFT;
+ dev_err(ctrl->dev, "AXI ID of the error"
+ "transaction 0x%08X\n", err_axiid);
+
+ err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
+ IFC_CM_ERATTR0_ESRCID_SHIFT;
+ dev_err(ctrl->dev, "SRC ID of the error"
+ "transaction 0x%08X\n", err_srcid);
+
+ dev_err(ctrl->dev, "Transaction Address corresponding to error"
+ "ERADDR 0x%08X\n", err_addr);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (check_nand_stat(ctrl))
+ ret = IRQ_HANDLED;
+
+ return ret;
+}
+
+/*
+ * fsl_ifc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev)
+{
+ int ret = 0;
+
+
+ dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+ fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL);
+ if (!fsl_ifc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+ /* IOMAP the entire IFC region */
+ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the Controller level irq */
+ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource "
+ "for IFC\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the nand machine irq */
+ fsl_ifc_ctrl_dev->nand_irq =
+ irq_of_parse_and_map(dev->dev.of_node, 1);
+ if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource "
+ "for NAND Machine\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ fsl_ifc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
+ if (ret < 0)
+ goto err;
+
+ init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
+
+ ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
+ "fsl-ifc", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->irq);
+ goto err_irq;
+ }
+
+ ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0,
+ "fsl-ifc-nand", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->nand_irq);
+ goto err_nandirq;
+ }
+
+ return 0;
+
+err_nandirq:
+ free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
+err_irq:
+ free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+err:
+ return ret;
+}
+
+static const struct of_device_id fsl_ifc_match[] = {
+ {
+ .compatible = "fsl,ifc",
+ },
+ {},
+};
+
+static struct platform_driver fsl_ifc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-ifc",
+ .of_match_table = fsl_ifc_match,
+ },
+ .probe = fsl_ifc_ctrl_probe,
+ .remove = fsl_ifc_ctrl_remove,
+};
+
+module_platform_driver(fsl_ifc_ctrl_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index c4d96fa32ba..483126d7b3c 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -328,9 +328,42 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
+ fsl_lbc_ctrl_dev = NULL;
return ret;
}
+#ifdef CONFIG_SUSPEND
+
+/* save lbc registers */
+static int fsl_lbc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ ctrl->saved_regs = kmalloc(sizeof(struct fsl_lbc_regs), GFP_KERNEL);
+ if (!ctrl->saved_regs)
+ return -ENOMEM;
+
+ _memcpy_fromio(ctrl->saved_regs, lbc, sizeof(struct fsl_lbc_regs));
+ return 0;
+}
+
+/* restore lbc registers */
+static int fsl_lbc_resume(struct platform_device *pdev)
+{
+ struct fsl_lbc_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ if (ctrl->saved_regs) {
+ _memcpy_toio(lbc, ctrl->saved_regs,
+ sizeof(struct fsl_lbc_regs));
+ kfree(ctrl->saved_regs);
+ ctrl->saved_regs = NULL;
+ }
+ return 0;
+}
+#endif /* CONFIG_SUSPEND */
+
static const struct of_device_id fsl_lbc_match[] = {
{ .compatible = "fsl,elbc", },
{ .compatible = "fsl,pq3-localbus", },
@@ -345,6 +378,10 @@ static struct platform_driver fsl_lbc_ctrl_driver = {
.of_match_table = fsl_lbc_match,
},
.probe = fsl_lbc_ctrl_probe,
+#ifdef CONFIG_SUSPEND
+ .suspend = fsl_lbc_suspend,
+ .resume = fsl_lbc_resume,
+#endif
};
static int __init fsl_lbc_init(void)
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e5c344d336e..ecb5c1946d2 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -23,6 +23,8 @@
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic.h>
+#include <asm/fsl_hcalls.h>
+
#include "fsl_msi.h"
#include "fsl_pci.h"
@@ -148,14 +150,49 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct device_node *np;
+ phandle phandle = 0;
int rc, hwirq = -ENOMEM;
unsigned int virq;
struct msi_desc *entry;
struct msi_msg msg;
struct fsl_msi *msi_data;
+ /*
+ * If the PCI node has an fsl,msi property, then we need to use it
+ * to find the specific MSI.
+ */
+ np = of_parse_phandle(hose->dn, "fsl,msi", 0);
+ if (np) {
+ if (of_device_is_compatible(np, "fsl,mpic-msi") ||
+ of_device_is_compatible(np, "fsl,vmpic-msi"))
+ phandle = np->phandle;
+ else {
+ dev_err(&pdev->dev,
+ "node %s has an invalid fsl,msi phandle %u\n",
+ hose->dn->full_name, np->phandle);
+ return -EINVAL;
+ }
+ }
+
list_for_each_entry(entry, &pdev->msi_list, list) {
+ /*
+ * Loop over all the MSI devices until we find one that has an
+ * available interrupt.
+ */
list_for_each_entry(msi_data, &msi_head, list) {
+ /*
+ * If the PCI node has an fsl,msi property, then we
+ * restrict our search to the corresponding MSI node.
+ * The simplest way is to skip over MSI nodes with the
+ * wrong phandle. Under the Freescale hypervisor, this
+ * has the additional benefit of skipping over MSI
+ * nodes that are not mapped in the PAMU.
+ */
+ if (phandle && (phandle != msi_data->phandle))
+ continue;
+
hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (hwirq >= 0)
break;
@@ -163,16 +200,14 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (hwirq < 0) {
rc = hwirq;
- pr_debug("%s: fail allocating msi interrupt\n",
- __func__);
+ dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
goto out_free;
}
virq = irq_create_mapping(msi_data->irqhost, hwirq);
if (virq == NO_IRQ) {
- pr_debug("%s: fail mapping hwirq 0x%x\n",
- __func__, hwirq);
+ dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
rc = -ENOSPC;
goto out_free;
@@ -201,6 +236,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 intr_index;
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data;
+ unsigned int ret;
cascade_data = irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
@@ -232,6 +268,14 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
case FSL_PIC_IP_IPIC:
msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
break;
+ case FSL_PIC_IP_VMPIC:
+ ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
+ if (ret) {
+ pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
+ "irq %u (ret=%u)\n", irq, ret);
+ msir_value = 0;
+ }
+ break;
}
while (msir_value) {
@@ -249,6 +293,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
+ case FSL_PIC_IP_VMPIC:
chip->irq_eoi(idata);
break;
case FSL_PIC_IP_IPIC:
@@ -278,7 +323,8 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
}
if (msi->bitmap.bitmap)
msi_bitmap_free(&msi->bitmap);
- iounmap(msi->msi_regs);
+ if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
+ iounmap(msi->msi_regs);
kfree(msi);
return 0;
@@ -350,25 +396,37 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
goto error_out;
}
- /* Get the MSI reg base */
- err = of_address_to_resource(dev->dev.of_node, 0, &res);
- if (err) {
- dev_err(&dev->dev, "%s resource error!\n",
+ /*
+ * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
+ * property. Instead, we use hypercalls to access the MSI.
+ */
+ if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
+ err = of_address_to_resource(dev->dev.of_node, 0, &res);
+ if (err) {
+ dev_err(&dev->dev, "invalid resource for node %s\n",
dev->dev.of_node->full_name);
- goto error_out;
- }
+ goto error_out;
+ }
- msi->msi_regs = ioremap(res.start, resource_size(&res));
- if (!msi->msi_regs) {
- dev_err(&dev->dev, "ioremap problem failed\n");
- goto error_out;
+ msi->msi_regs = ioremap(res.start, resource_size(&res));
+ if (!msi->msi_regs) {
+ dev_err(&dev->dev, "could not map node %s\n",
+ dev->dev.of_node->full_name);
+ goto error_out;
+ }
+ msi->msiir_offset =
+ features->msiir_offset + (res.start & 0xfffff);
}
msi->feature = features->fsl_pic_ip;
msi->irqhost->host_data = msi;
- msi->msiir_offset = features->msiir_offset + (res.start & 0xfffff);
+ /*
+ * Remember the phandle, so that we can match with any PCI nodes
+ * that have an "fsl,msi" property.
+ */
+ msi->phandle = dev->dev.of_node->phandle;
rc = fsl_msi_init_allocator(msi);
if (rc) {
@@ -437,6 +495,11 @@ static const struct fsl_msi_feature ipic_msi_feature = {
.msiir_offset = 0x38,
};
+static const struct fsl_msi_feature vmpic_msi_feature = {
+ .fsl_pic_ip = FSL_PIC_IP_VMPIC,
+ .msiir_offset = 0,
+};
+
static const struct of_device_id fsl_of_msi_ids[] = {
{
.compatible = "fsl,mpic-msi",
@@ -446,6 +509,10 @@ static const struct of_device_id fsl_of_msi_ids[] = {
.compatible = "fsl,ipic-msi",
.data = (void *)&ipic_msi_feature,
},
+ {
+ .compatible = "fsl,vmpic-msi",
+ .data = (void *)&vmpic_msi_feature,
+ },
{}
};
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 1313abbc520..f6c646a5254 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -13,15 +13,17 @@
#ifndef _POWERPC_SYSDEV_FSL_MSI_H
#define _POWERPC_SYSDEV_FSL_MSI_H
+#include <linux/of.h>
#include <asm/msi_bitmap.h>
#define NR_MSI_REG 8
#define IRQS_PER_MSI_REG 32
#define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG)
-#define FSL_PIC_IP_MASK 0x0000000F
-#define FSL_PIC_IP_MPIC 0x00000001
-#define FSL_PIC_IP_IPIC 0x00000002
+#define FSL_PIC_IP_MASK 0x0000000F
+#define FSL_PIC_IP_MPIC 0x00000001
+#define FSL_PIC_IP_IPIC 0x00000002
+#define FSL_PIC_IP_VMPIC 0x00000003
struct fsl_msi {
struct irq_host *irqhost;
@@ -36,6 +38,8 @@ struct fsl_msi {
struct msi_bitmap bitmap;
struct list_head list; /* support multiple MSI banks */
+
+ phandle phandle;
};
#endif /* _POWERPC_SYSDEV_FSL_MSI_H */
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4ce547e0047..3b61e8cf342 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -65,6 +65,30 @@ static int __init fsl_pcie_check_link(struct pci_controller *hose)
}
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+#define MAX_PHYS_ADDR_BITS 40
+static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+
+static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ /*
+ * Fixup PCI devices that are able to DMA to above the physical
+ * address width of the SoC such that we can address any internal
+ * SoC address from across PCI if needed
+ */
+ if ((dev->bus == &pci_bus_type) &&
+ dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
+ set_dma_ops(dev, &dma_direct_ops);
+ set_dma_offset(dev, pci64_dma_offset);
+ }
+
+ *dev->dma_mask = dma_mask;
+ return 0;
+}
+
static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
unsigned int index, const struct resource *res,
resource_size_t offset)
@@ -113,6 +137,8 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
char *name = hose->dn->full_name;
+ const u64 *reg;
+ int len;
pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
(u64)rsrc->start, (u64)resource_size(rsrc));
@@ -205,6 +231,33 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
/* Setup inbound mem window */
mem = memblock_end_of_DRAM();
+
+ /*
+ * The msi-address-64 property, if it exists, indicates the physical
+ * address of the MSIIR register. Normally, this register is located
+ * inside CCSR, so the ATMU that covers all of CCSR is used. But if
+ * this property exists, then we normally need to create a new ATMU
+ * for it. For now, however, we cheat. The only entity that creates
+ * this property is the Freescale hypervisor, and the address is
+ * specified in the partition configuration. Typically, the address
+ * is located in the page immediately after the end of DDR. If so, we
+ * can avoid allocating a new ATMU by extending the DDR ATMU by one
+ * page.
+ */
+ reg = of_get_property(hose->dn, "msi-address-64", &len);
+ if (reg && (len == sizeof(u64))) {
+ u64 address = be64_to_cpup(reg);
+
+ if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
+ pr_info("%s: extending DDR ATMU to cover MSIIR", name);
+ mem += PAGE_SIZE;
+ } else {
+ /* TODO: Create a new ATMU for MSIIR */
+ pr_warn("%s: msi-address-64 address of %llx is "
+ "unsupported\n", name, address);
+ }
+ }
+
sz = min(mem, paddr_lo);
mem_log = __ilog2_u64(sz);
@@ -228,6 +281,37 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
hose->dma_window_base_cur = 0x00000000;
hose->dma_window_size = (resource_size_t)sz;
+
+ /*
+ * if we have >4G of memory setup second PCI inbound window to
+ * let devices that are 64-bit address capable to work w/o
+ * SWIOTLB and access the full range of memory
+ */
+ if (sz != mem) {
+ mem_log = __ilog2_u64(mem);
+
+ /* Size window up if we dont fit in exact power-of-2 */
+ if ((1ull << mem_log) != mem)
+ mem_log++;
+
+ piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
+
+ /* Setup inbound memory window */
+ out_be32(&pci->piw[win_idx].pitar, 0x00000000);
+ out_be32(&pci->piw[win_idx].piwbear,
+ pci64_dma_offset >> 44);
+ out_be32(&pci->piw[win_idx].piwbar,
+ pci64_dma_offset >> 12);
+ out_be32(&pci->piw[win_idx].piwar, piwar);
+
+ /*
+ * install our own dma_set_mask handler to fixup dma_ops
+ * and dma_offset
+ */
+ ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
+
+ pr_info("%s: Setup 64-bit PCI DMA window\n", name);
+ }
} else {
u64 paddr = 0;
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 22ffccd8bef..a4c4f4a932d 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
* - Added Port-Write message handling
* - Added Machine Check exception handling
*
- * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
* Zhang Wei <wei.zhang@freescale.com>
*
* Copyright 2005 MontaVista Software, Inc.
@@ -28,240 +28,33 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/device.h>
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/machdep.h>
-#include <asm/uaccess.h>
-#undef DEBUG_PW /* Port-Write debugging */
+#include "fsl_rio.h"
-/* RapidIO definition irq, which read from OF-tree */
-#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
-#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
-#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
-#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
-
-#define IPWSR_CLEAR 0x98
-#define OMSR_CLEAR 0x1cb3
-#define IMSR_CLEAR 0x491
-#define IDSR_CLEAR 0x91
-#define ODSR_CLEAR 0x1c00
-#define LTLEECSR_ENABLE_ALL 0xFFC000FC
-#define ESCSR_CLEAR 0x07120204
-#define IECSR_CLEAR 0x80000000
+#undef DEBUG_PW /* Port-Write debugging */
#define RIO_PORT1_EDCSR 0x0640
#define RIO_PORT2_EDCSR 0x0680
#define RIO_PORT1_IECSR 0x10130
#define RIO_PORT2_IECSR 0x101B0
-#define RIO_IM0SR 0x13064
-#define RIO_IM1SR 0x13164
-#define RIO_OM0SR 0x13004
-#define RIO_OM1SR 0x13104
-
-#define RIO_ATMU_REGS_OFFSET 0x10c00
-#define RIO_P_MSG_REGS_OFFSET 0x11000
-#define RIO_S_MSG_REGS_OFFSET 0x13000
+
#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
+#define ESCSR_CLEAR 0x07120204
#define RIO_PORT2_ESCSR 0x178
#define RIO_CCSR 0x15c
-#define RIO_LTLEDCSR 0x0608
#define RIO_LTLEDCSR_IER 0x80000000
#define RIO_LTLEDCSR_PRT 0x01000000
-#define RIO_LTLEECSR 0x060c
-#define RIO_EPWISR 0x10010
+#define IECSR_CLEAR 0x80000000
#define RIO_ISR_AACR 0x10120
#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
-#define RIO_MAINT_WIN_SIZE 0x400000
-#define RIO_DBELL_WIN_SIZE 0x1000
-
-#define RIO_MSG_OMR_MUI 0x00000002
-#define RIO_MSG_OSR_TE 0x00000080
-#define RIO_MSG_OSR_QOI 0x00000020
-#define RIO_MSG_OSR_QFI 0x00000010
-#define RIO_MSG_OSR_MUB 0x00000004
-#define RIO_MSG_OSR_EOMI 0x00000002
-#define RIO_MSG_OSR_QEI 0x00000001
-
-#define RIO_MSG_IMR_MI 0x00000002
-#define RIO_MSG_ISR_TE 0x00000080
-#define RIO_MSG_ISR_QFI 0x00000010
-#define RIO_MSG_ISR_DIQI 0x00000001
-
-#define RIO_IPWMR_SEN 0x00100000
-#define RIO_IPWMR_QFIE 0x00000100
-#define RIO_IPWMR_EIE 0x00000020
-#define RIO_IPWMR_CQ 0x00000002
-#define RIO_IPWMR_PWE 0x00000001
-
-#define RIO_IPWSR_QF 0x00100000
-#define RIO_IPWSR_TE 0x00000080
-#define RIO_IPWSR_QFI 0x00000010
-#define RIO_IPWSR_PWD 0x00000008
-#define RIO_IPWSR_PWB 0x00000004
-
-/* EPWISR Error match value */
-#define RIO_EPWISR_PINT1 0x80000000
-#define RIO_EPWISR_PINT2 0x40000000
-#define RIO_EPWISR_MU 0x00000002
-#define RIO_EPWISR_PW 0x00000001
-
-#define RIO_MSG_DESC_SIZE 32
-#define RIO_MSG_BUFFER_SIZE 4096
-#define RIO_MIN_TX_RING_SIZE 2
-#define RIO_MAX_TX_RING_SIZE 2048
-#define RIO_MIN_RX_RING_SIZE 2
-#define RIO_MAX_RX_RING_SIZE 2048
-
-#define DOORBELL_DMR_DI 0x00000002
-#define DOORBELL_DSR_TE 0x00000080
-#define DOORBELL_DSR_QFI 0x00000010
-#define DOORBELL_DSR_DIQI 0x00000001
-#define DOORBELL_TID_OFFSET 0x02
-#define DOORBELL_SID_OFFSET 0x04
-#define DOORBELL_INFO_OFFSET 0x06
-
-#define DOORBELL_MESSAGE_SIZE 0x08
-#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
-#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
-#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
-
-struct rio_atmu_regs {
- u32 rowtar;
- u32 rowtear;
- u32 rowbar;
- u32 pad2;
- u32 rowar;
- u32 pad3[3];
-};
-
-struct rio_msg_regs {
- u32 omr; /* 0xD_3000 - Outbound message 0 mode register */
- u32 osr; /* 0xD_3004 - Outbound message 0 status register */
- u32 pad1;
- u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue
- dequeue pointer address register */
- u32 pad2;
- u32 osar; /* 0xD_3014 - Outbound message 0 source address
- register */
- u32 odpr; /* 0xD_3018 - Outbound message 0 destination port
- register */
- u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes
- Register*/
- u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count
- register */
- u32 pad3;
- u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue
- enqueue pointer address register */
- u32 pad4[13];
- u32 imr; /* 0xD_3060 - Inbound message 0 mode register */
- u32 isr; /* 0xD_3064 - Inbound message 0 status register */
- u32 pad5;
- u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue
- pointer address register*/
- u32 pad6;
- u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue
- pointer address register */
- u32 pad7[226];
- u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */
- u32 odsr; /* 0xD_3404 - Outbound doorbell status register */
- u32 res0[4];
- u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port
- register */
- u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes
- register */
- u32 res1[3];
- u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold
- configuration register */
- u32 res2[12];
- u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */
- u32 dsr; /* 0xD_3464 - Inbound doorbell status register */
- u32 pad8;
- u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer
- address register */
- u32 pad9;
- u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer
- address register */
- u32 pad10[26];
- u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */
- u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */
- u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address
- register */
- u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address
- register */
-};
-
-struct rio_tx_desc {
- u32 res1;
- u32 saddr;
- u32 dport;
- u32 dattr;
- u32 res2;
- u32 res3;
- u32 dwcnt;
- u32 res4;
-};
-
-struct rio_dbell_ring {
- void *virt;
- dma_addr_t phys;
-};
-
-struct rio_msg_tx_ring {
- void *virt;
- dma_addr_t phys;
- void *virt_buffer[RIO_MAX_TX_RING_SIZE];
- dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
- int tx_slot;
- int size;
- void *dev_id;
-};
-
-struct rio_msg_rx_ring {
- void *virt;
- dma_addr_t phys;
- void *virt_buffer[RIO_MAX_RX_RING_SIZE];
- int rx_slot;
- int size;
- void *dev_id;
-};
-
-struct rio_port_write_msg {
- void *virt;
- dma_addr_t phys;
- u32 msg_count;
- u32 err_count;
- u32 discard_count;
-};
-
-struct rio_priv {
- struct device *dev;
- void __iomem *regs_win;
- struct rio_atmu_regs __iomem *atmu_regs;
- struct rio_atmu_regs __iomem *maint_atmu_regs;
- struct rio_atmu_regs __iomem *dbell_atmu_regs;
- void __iomem *dbell_win;
- void __iomem *maint_win;
- struct rio_msg_regs __iomem *msg_regs;
- struct rio_dbell_ring dbell_ring;
- struct rio_msg_tx_ring msg_tx_ring;
- struct rio_msg_rx_ring msg_rx_ring;
- struct rio_port_write_msg port_write_msg;
- int bellirq;
- int txirq;
- int rxirq;
- int pwirq;
- struct work_struct pw_work;
- struct kfifo pw_fifo;
- spinlock_t pw_fifo_lock;
-};
#define __fsl_read_rio_config(x, addr, err, op) \
__asm__ __volatile__( \
@@ -279,7 +72,12 @@ struct rio_priv {
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
-static void __iomem *rio_regs_win;
+void __iomem *rio_regs_win;
+void __iomem *rmu_regs_win;
+resource_size_t rio_law_start;
+
+struct fsl_rio_dbell *dbell;
+struct fsl_rio_pw *pw;
#ifdef CONFIG_E500
int fsl_rio_mcheck_exception(struct pt_regs *regs)
@@ -311,42 +109,6 @@ EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
#endif
/**
- * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
- * @mport: RapidIO master port info
- * @index: ID of RapidIO interface
- * @destid: Destination ID of target device
- * @data: 16-bit info field of RapidIO doorbell message
- *
- * Sends a MPC85xx doorbell message. Returns %0 on success or
- * %-EINVAL on failure.
- */
-static int fsl_rio_doorbell_send(struct rio_mport *mport,
- int index, u16 destid, u16 data)
-{
- struct rio_priv *priv = mport->priv;
- pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
- index, destid, data);
- switch (mport->phy_type) {
- case RIO_PHY_PARALLEL:
- out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
- out_be16(priv->dbell_win, data);
- break;
- case RIO_PHY_SERIAL:
- /* In the serial version silicons, such as MPC8548, MPC8641,
- * below operations is must be.
- */
- out_be32(&priv->msg_regs->odmr, 0x00000000);
- out_be32(&priv->msg_regs->odretcr, 0x00000004);
- out_be32(&priv->msg_regs->oddpr, destid << 16);
- out_be32(&priv->msg_regs->oddatr, data);
- out_be32(&priv->msg_regs->odmr, 0x00000001);
- break;
- }
-
- return 0;
-}
-
-/**
* fsl_local_config_read - Generate a MPC85xx local config space read
* @mport: RapidIO master port info
* @index: ID of RapdiIO interface
@@ -384,8 +146,8 @@ static int fsl_local_config_write(struct rio_mport *mport,
{
struct rio_priv *priv = mport->priv;
pr_debug
- ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
- index, offset, data);
+ ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
+ index, offset, data);
out_be32(priv->regs_win + offset, data);
return 0;
@@ -413,8 +175,9 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
u32 rval, err = 0;
pr_debug
- ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
- index, destid, hopcount, offset, len);
+ ("fsl_rio_config_read:"
+ " index %d destid %d hopcount %d offset %8.8x len %d\n",
+ index, destid, hopcount, offset, len);
/* 16MB maintenance window possible */
/* allow only aligned access to maintenance registers */
@@ -423,7 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
- out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
+ out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
@@ -470,8 +233,9 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
struct rio_priv *priv = mport->priv;
u8 *data;
pr_debug
- ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
- index, destid, hopcount, offset, len, val);
+ ("fsl_rio_config_write:"
+ " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
+ index, destid, hopcount, offset, len, val);
/* 16MB maintenance windows possible */
/* allow only aligned access to maintenance registers */
@@ -480,7 +244,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
- out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
+ out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
@@ -500,590 +264,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
return 0;
}
-/**
- * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
- * @mport: Master port with outbound message queue
- * @rdev: Target of outbound message
- * @mbox: Outbound mailbox
- * @buffer: Message to add to outbound queue
- * @len: Length of message
- *
- * Adds the @buffer message to the MPC85xx outbound message queue. Returns
- * %0 on success or %-EINVAL on failure.
- */
-static int
-fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
- void *buffer, size_t len)
-{
- struct rio_priv *priv = mport->priv;
- u32 omr;
- struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
- + priv->msg_tx_ring.tx_slot;
- int ret = 0;
-
- pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
- "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
-
- if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Copy and clear rest of buffer */
- memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
- len);
- if (len < (RIO_MAX_MSG_SIZE - 4))
- memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
- + len, 0, RIO_MAX_MSG_SIZE - len);
-
- switch (mport->phy_type) {
- case RIO_PHY_PARALLEL:
- /* Set mbox field for message */
- desc->dport = mbox & 0x3;
-
- /* Enable EOMI interrupt, set priority, and set destid */
- desc->dattr = 0x28000000 | (rdev->destid << 2);
- break;
- case RIO_PHY_SERIAL:
- /* Set mbox field for message, and set destid */
- desc->dport = (rdev->destid << 16) | (mbox & 0x3);
-
- /* Enable EOMI interrupt and priority */
- desc->dattr = 0x28000000;
- break;
- }
-
- /* Set transfer size aligned to next power of 2 (in double words) */
- desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
-
- /* Set snooping and source buffer address */
- desc->saddr = 0x00000004
- | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
-
- /* Increment enqueue pointer */
- omr = in_be32(&priv->msg_regs->omr);
- out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
-
- /* Go to next descriptor */
- if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
- priv->msg_tx_ring.tx_slot = 0;
-
- out:
- return ret;
-}
-
-/**
- * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles outbound message interrupts. Executes a register outbound
- * mailbox event handler and acks the interrupt occurrence.
- */
-static irqreturn_t
-fsl_rio_tx_handler(int irq, void *dev_instance)
-{
- int osr;
- struct rio_mport *port = (struct rio_mport *)dev_instance;
- struct rio_priv *priv = port->priv;
-
- osr = in_be32(&priv->msg_regs->osr);
-
- if (osr & RIO_MSG_OSR_TE) {
- pr_info("RIO: outbound message transmission error\n");
- out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
- goto out;
- }
-
- if (osr & RIO_MSG_OSR_QOI) {
- pr_info("RIO: outbound message queue overflow\n");
- out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
- goto out;
- }
-
- if (osr & RIO_MSG_OSR_EOMI) {
- u32 dqp = in_be32(&priv->msg_regs->odqdpar);
- int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
- port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
- slot);
-
- /* Ack the end-of-message interrupt */
- out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
- }
-
- out:
- return IRQ_HANDLED;
-}
-
-/**
- * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
- * @mport: Master port implementing the outbound message unit
- * @dev_id: Device specific pointer to pass on event
- * @mbox: Mailbox to open
- * @entries: Number of entries in the outbound mailbox ring
- *
- * Initializes buffer ring, request the outbound message interrupt,
- * and enables the outbound message unit. Returns %0 on success and
- * %-EINVAL or %-ENOMEM on failure.
- */
-static int
-fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
-{
- int i, j, rc = 0;
- struct rio_priv *priv = mport->priv;
-
- if ((entries < RIO_MIN_TX_RING_SIZE) ||
- (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Initialize shadow copy ring */
- priv->msg_tx_ring.dev_id = dev_id;
- priv->msg_tx_ring.size = entries;
-
- for (i = 0; i < priv->msg_tx_ring.size; i++) {
- priv->msg_tx_ring.virt_buffer[i] =
- dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
- &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
- if (!priv->msg_tx_ring.virt_buffer[i]) {
- rc = -ENOMEM;
- for (j = 0; j < priv->msg_tx_ring.size; j++)
- if (priv->msg_tx_ring.virt_buffer[j])
- dma_free_coherent(priv->dev,
- RIO_MSG_BUFFER_SIZE,
- priv->msg_tx_ring.
- virt_buffer[j],
- priv->msg_tx_ring.
- phys_buffer[j]);
- goto out;
- }
- }
-
- /* Initialize outbound message descriptor ring */
- priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
- priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
- &priv->msg_tx_ring.phys, GFP_KERNEL);
- if (!priv->msg_tx_ring.virt) {
- rc = -ENOMEM;
- goto out_dma;
- }
- memset(priv->msg_tx_ring.virt, 0,
- priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
- priv->msg_tx_ring.tx_slot = 0;
-
- /* Point dequeue/enqueue pointers at first entry in ring */
- out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
- out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
-
- /* Configure for snooping */
- out_be32(&priv->msg_regs->osar, 0x00000004);
-
- /* Clear interrupt status */
- out_be32(&priv->msg_regs->osr, 0x000000b3);
-
- /* Hook up outbound message handler */
- rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
- "msg_tx", (void *)mport);
- if (rc < 0)
- goto out_irq;
-
- /*
- * Configure outbound message unit
- * Snooping
- * Interrupts (all enabled, except QEIE)
- * Chaining mode
- * Disable
- */
- out_be32(&priv->msg_regs->omr, 0x00100220);
-
- /* Set number of entries */
- out_be32(&priv->msg_regs->omr,
- in_be32(&priv->msg_regs->omr) |
- ((get_bitmask_order(entries) - 2) << 12));
-
- /* Now enable the unit */
- out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
-
- out:
- return rc;
-
- out_irq:
- dma_free_coherent(priv->dev,
- priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
- priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
-
- out_dma:
- for (i = 0; i < priv->msg_tx_ring.size; i++)
- dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
- priv->msg_tx_ring.virt_buffer[i],
- priv->msg_tx_ring.phys_buffer[i]);
-
- return rc;
-}
-
-/**
- * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
- * @mport: Master port implementing the outbound message unit
- * @mbox: Mailbox to close
- *
- * Disables the outbound message unit, free all buffers, and
- * frees the outbound message interrupt.
- */
-static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
-{
- struct rio_priv *priv = mport->priv;
- /* Disable inbound message unit */
- out_be32(&priv->msg_regs->omr, 0);
-
- /* Free ring */
- dma_free_coherent(priv->dev,
- priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
- priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
-
- /* Free interrupt */
- free_irq(IRQ_RIO_TX(mport), (void *)mport);
-}
-
-/**
- * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles inbound message interrupts. Executes a registered inbound
- * mailbox event handler and acks the interrupt occurrence.
- */
-static irqreturn_t
-fsl_rio_rx_handler(int irq, void *dev_instance)
-{
- int isr;
- struct rio_mport *port = (struct rio_mport *)dev_instance;
- struct rio_priv *priv = port->priv;
-
- isr = in_be32(&priv->msg_regs->isr);
-
- if (isr & RIO_MSG_ISR_TE) {
- pr_info("RIO: inbound message reception error\n");
- out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
- goto out;
- }
-
- /* XXX Need to check/dispatch until queue empty */
- if (isr & RIO_MSG_ISR_DIQI) {
- /*
- * We implement *only* mailbox 0, but can receive messages
- * for any mailbox/letter to that mailbox destination. So,
- * make the callback with an unknown/invalid mailbox number
- * argument.
- */
- port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
-
- /* Ack the queueing interrupt */
- out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
- }
-
- out:
- return IRQ_HANDLED;
-}
-
-/**
- * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
- * @mport: Master port implementing the inbound message unit
- * @dev_id: Device specific pointer to pass on event
- * @mbox: Mailbox to open
- * @entries: Number of entries in the inbound mailbox ring
- *
- * Initializes buffer ring, request the inbound message interrupt,
- * and enables the inbound message unit. Returns %0 on success
- * and %-EINVAL or %-ENOMEM on failure.
- */
-static int
-fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
-{
- int i, rc = 0;
- struct rio_priv *priv = mport->priv;
-
- if ((entries < RIO_MIN_RX_RING_SIZE) ||
- (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Initialize client buffer ring */
- priv->msg_rx_ring.dev_id = dev_id;
- priv->msg_rx_ring.size = entries;
- priv->msg_rx_ring.rx_slot = 0;
- for (i = 0; i < priv->msg_rx_ring.size; i++)
- priv->msg_rx_ring.virt_buffer[i] = NULL;
-
- /* Initialize inbound message ring */
- priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
- priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
- &priv->msg_rx_ring.phys, GFP_KERNEL);
- if (!priv->msg_rx_ring.virt) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Point dequeue/enqueue pointers at first entry in ring */
- out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
- out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
-
- /* Clear interrupt status */
- out_be32(&priv->msg_regs->isr, 0x00000091);
-
- /* Hook up inbound message handler */
- rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
- "msg_rx", (void *)mport);
- if (rc < 0) {
- dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
- priv->msg_tx_ring.virt_buffer[i],
- priv->msg_tx_ring.phys_buffer[i]);
- goto out;
- }
-
- /*
- * Configure inbound message unit:
- * Snooping
- * 4KB max message size
- * Unmask all interrupt sources
- * Disable
- */
- out_be32(&priv->msg_regs->imr, 0x001b0060);
-
- /* Set number of queue entries */
- setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
-
- /* Now enable the unit */
- setbits32(&priv->msg_regs->imr, 0x1);
-
- out:
- return rc;
-}
-
-/**
- * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
- * @mport: Master port implementing the inbound message unit
- * @mbox: Mailbox to close
- *
- * Disables the inbound message unit, free all buffers, and
- * frees the inbound message interrupt.
- */
-static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
-{
- struct rio_priv *priv = mport->priv;
- /* Disable inbound message unit */
- out_be32(&priv->msg_regs->imr, 0);
-
- /* Free ring */
- dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
- priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
-
- /* Free interrupt */
- free_irq(IRQ_RIO_RX(mport), (void *)mport);
-}
-
-/**
- * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
- * @mport: Master port implementing the inbound message unit
- * @mbox: Inbound mailbox number
- * @buf: Buffer to add to inbound queue
- *
- * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
- * %0 on success or %-EINVAL on failure.
- */
-static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
-{
- int rc = 0;
- struct rio_priv *priv = mport->priv;
-
- pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
- priv->msg_rx_ring.rx_slot);
-
- if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
- printk(KERN_ERR
- "RIO: error adding inbound buffer %d, buffer exists\n",
- priv->msg_rx_ring.rx_slot);
- rc = -EINVAL;
- goto out;
- }
-
- priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
- if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
- priv->msg_rx_ring.rx_slot = 0;
-
- out:
- return rc;
-}
-
-/**
- * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
- * @mport: Master port implementing the inbound message unit
- * @mbox: Inbound mailbox number
- *
- * Gets the next available inbound message from the inbound message queue.
- * A pointer to the message is returned on success or NULL on failure.
- */
-static void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
-{
- struct rio_priv *priv = mport->priv;
- u32 phys_buf, virt_buf;
- void *buf = NULL;
- int buf_idx;
-
- phys_buf = in_be32(&priv->msg_regs->ifqdpar);
-
- /* If no more messages, then bail out */
- if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
- goto out2;
-
- virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
- - priv->msg_rx_ring.phys);
- buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
- buf = priv->msg_rx_ring.virt_buffer[buf_idx];
-
- if (!buf) {
- printk(KERN_ERR
- "RIO: inbound message copy failed, no buffers\n");
- goto out1;
- }
-
- /* Copy max message size, caller is expected to allocate that big */
- memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
-
- /* Clear the available buffer */
- priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
-
- out1:
- setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
-
- out2:
- return buf;
-}
-
-/**
- * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles doorbell interrupts. Parses a list of registered
- * doorbell event handlers and executes a matching event handler.
- */
-static irqreturn_t
-fsl_rio_dbell_handler(int irq, void *dev_instance)
-{
- int dsr;
- struct rio_mport *port = (struct rio_mport *)dev_instance;
- struct rio_priv *priv = port->priv;
-
- dsr = in_be32(&priv->msg_regs->dsr);
-
- if (dsr & DOORBELL_DSR_TE) {
- pr_info("RIO: doorbell reception error\n");
- out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
- goto out;
- }
-
- if (dsr & DOORBELL_DSR_QFI) {
- pr_info("RIO: doorbell queue full\n");
- out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
- }
-
- /* XXX Need to check/dispatch until queue empty */
- if (dsr & DOORBELL_DSR_DIQI) {
- u32 dmsg =
- (u32) priv->dbell_ring.virt +
- (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
- struct rio_dbell *dbell;
- int found = 0;
-
- pr_debug
- ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
- DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
-
- list_for_each_entry(dbell, &port->dbells, node) {
- if ((dbell->res->start <= DBELL_INF(dmsg)) &&
- (dbell->res->end >= DBELL_INF(dmsg))) {
- found = 1;
- break;
- }
- }
- if (found) {
- dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
- DBELL_INF(dmsg));
- } else {
- pr_debug
- ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
- DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
- }
- setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
- out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
- }
-
- out:
- return IRQ_HANDLED;
-}
-
-/**
- * fsl_rio_doorbell_init - MPC85xx doorbell interface init
- * @mport: Master port implementing the inbound doorbell unit
- *
- * Initializes doorbell unit hardware and inbound DMA buffer
- * ring. Called from fsl_rio_setup(). Returns %0 on success
- * or %-ENOMEM on failure.
- */
-static int fsl_rio_doorbell_init(struct rio_mport *mport)
-{
- struct rio_priv *priv = mport->priv;
- int rc = 0;
-
- /* Map outbound doorbell window immediately after maintenance window */
- priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
- RIO_DBELL_WIN_SIZE);
- if (!priv->dbell_win) {
- printk(KERN_ERR
- "RIO: unable to map outbound doorbell window\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /* Initialize inbound doorbells */
- priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 *
- DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
- if (!priv->dbell_ring.virt) {
- printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
- rc = -ENOMEM;
- iounmap(priv->dbell_win);
- goto out;
- }
-
- /* Point dequeue/enqueue pointers at first entry in ring */
- out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
- out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
-
- /* Clear interrupt status */
- out_be32(&priv->msg_regs->dsr, 0x00000091);
-
- /* Hook up doorbell handler */
- rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
- "dbell_rx", (void *)mport);
- if (rc < 0) {
- iounmap(priv->dbell_win);
- dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE,
- priv->dbell_ring.virt, priv->dbell_ring.phys);
- printk(KERN_ERR
- "MPC85xx RIO: unable to request inbound doorbell irq");
- goto out;
- }
-
- /* Configure doorbells for snooping, 512 entries, and enable */
- out_be32(&priv->msg_regs->dmr, 0x00108161);
-
- out:
- return rc;
-}
-
-static void port_error_handler(struct rio_mport *port, int offset)
+void fsl_rio_port_error_handler(int offset)
{
/*XXX: Error recovery is not implemented, we just clear errors */
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
@@ -1098,263 +279,6 @@ static void port_error_handler(struct rio_mport *port, int offset)
out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
}
}
-
-static void msg_unit_error_handler(struct rio_mport *port)
-{
- struct rio_priv *priv = port->priv;
-
- /*XXX: Error recovery is not implemented, we just clear errors */
- out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
-
- out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
- out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
- out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
- out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
-
- out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
- out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
-
- out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
-}
-
-/**
- * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
- * @irq: Linux interrupt number
- * @dev_instance: Pointer to interrupt-specific data
- *
- * Handles port write interrupts. Parses a list of registered
- * port write event handlers and executes a matching event handler.
- */
-static irqreturn_t
-fsl_rio_port_write_handler(int irq, void *dev_instance)
-{
- u32 ipwmr, ipwsr;
- struct rio_mport *port = (struct rio_mport *)dev_instance;
- struct rio_priv *priv = port->priv;
- u32 epwisr, tmp;
-
- epwisr = in_be32(priv->regs_win + RIO_EPWISR);
- if (!(epwisr & RIO_EPWISR_PW))
- goto pw_done;
-
- ipwmr = in_be32(&priv->msg_regs->pwmr);
- ipwsr = in_be32(&priv->msg_regs->pwsr);
-
-#ifdef DEBUG_PW
- pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
- if (ipwsr & RIO_IPWSR_QF)
- pr_debug(" QF");
- if (ipwsr & RIO_IPWSR_TE)
- pr_debug(" TE");
- if (ipwsr & RIO_IPWSR_QFI)
- pr_debug(" QFI");
- if (ipwsr & RIO_IPWSR_PWD)
- pr_debug(" PWD");
- if (ipwsr & RIO_IPWSR_PWB)
- pr_debug(" PWB");
- pr_debug(" )\n");
-#endif
- /* Schedule deferred processing if PW was received */
- if (ipwsr & RIO_IPWSR_QFI) {
- /* Save PW message (if there is room in FIFO),
- * otherwise discard it.
- */
- if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
- priv->port_write_msg.msg_count++;
- kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
- RIO_PW_MSG_SIZE);
- } else {
- priv->port_write_msg.discard_count++;
- pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
- priv->port_write_msg.discard_count);
- }
- /* Clear interrupt and issue Clear Queue command. This allows
- * another port-write to be received.
- */
- out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
- out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
-
- schedule_work(&priv->pw_work);
- }
-
- if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
- priv->port_write_msg.err_count++;
- pr_debug("RIO: Port-Write Transaction Err (%d)\n",
- priv->port_write_msg.err_count);
- /* Clear Transaction Error: port-write controller should be
- * disabled when clearing this error
- */
- out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
- out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
- out_be32(&priv->msg_regs->pwmr, ipwmr);
- }
-
- if (ipwsr & RIO_IPWSR_PWD) {
- priv->port_write_msg.discard_count++;
- pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
- priv->port_write_msg.discard_count);
- out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
- }
-
-pw_done:
- if (epwisr & RIO_EPWISR_PINT1) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
- port_error_handler(port, 0);
- }
-
- if (epwisr & RIO_EPWISR_PINT2) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
- port_error_handler(port, 1);
- }
-
- if (epwisr & RIO_EPWISR_MU) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
- msg_unit_error_handler(port);
- }
-
- return IRQ_HANDLED;
-}
-
-static void fsl_pw_dpc(struct work_struct *work)
-{
- struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
- unsigned long flags;
- u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
-
- /*
- * Process port-write messages
- */
- spin_lock_irqsave(&priv->pw_fifo_lock, flags);
- while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
- RIO_PW_MSG_SIZE)) {
- /* Process one message */
- spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
-#ifdef DEBUG_PW
- {
- u32 i;
- pr_debug("%s : Port-Write Message:", __func__);
- for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
- if ((i%4) == 0)
- pr_debug("\n0x%02x: 0x%08x", i*4,
- msg_buffer[i]);
- else
- pr_debug(" 0x%08x", msg_buffer[i]);
- }
- pr_debug("\n");
- }
-#endif
- /* Pass the port-write message to RIO core for processing */
- rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
- spin_lock_irqsave(&priv->pw_fifo_lock, flags);
- }
- spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
-}
-
-/**
- * fsl_rio_pw_enable - enable/disable port-write interface init
- * @mport: Master port implementing the port write unit
- * @enable: 1=enable; 0=disable port-write message handling
- */
-static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
-{
- struct rio_priv *priv = mport->priv;
- u32 rval;
-
- rval = in_be32(&priv->msg_regs->pwmr);
-
- if (enable)
- rval |= RIO_IPWMR_PWE;
- else
- rval &= ~RIO_IPWMR_PWE;
-
- out_be32(&priv->msg_regs->pwmr, rval);
-
- return 0;
-}
-
-/**
- * fsl_rio_port_write_init - MPC85xx port write interface init
- * @mport: Master port implementing the port write unit
- *
- * Initializes port write unit hardware and DMA buffer
- * ring. Called from fsl_rio_setup(). Returns %0 on success
- * or %-ENOMEM on failure.
- */
-static int fsl_rio_port_write_init(struct rio_mport *mport)
-{
- struct rio_priv *priv = mport->priv;
- int rc = 0;
-
- /* Following configurations require a disabled port write controller */
- out_be32(&priv->msg_regs->pwmr,
- in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
-
- /* Initialize port write */
- priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
- RIO_PW_MSG_SIZE,
- &priv->port_write_msg.phys, GFP_KERNEL);
- if (!priv->port_write_msg.virt) {
- pr_err("RIO: unable allocate port write queue\n");
- return -ENOMEM;
- }
-
- priv->port_write_msg.err_count = 0;
- priv->port_write_msg.discard_count = 0;
-
- /* Point dequeue/enqueue pointers at first entry */
- out_be32(&priv->msg_regs->epwqbar, 0);
- out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
-
- pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
- in_be32(&priv->msg_regs->epwqbar),
- in_be32(&priv->msg_regs->pwqbar));
-
- /* Clear interrupt status IPWSR */
- out_be32(&priv->msg_regs->pwsr,
- (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
- /* Configure port write contoller for snooping enable all reporting,
- clear queue full */
- out_be32(&priv->msg_regs->pwmr,
- RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
-
-
- /* Hook up port-write handler */
- rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
- IRQF_SHARED, "port-write", (void *)mport);
- if (rc < 0) {
- pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
- goto err_out;
- }
- /* Enable Error Interrupt */
- out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
-
- INIT_WORK(&priv->pw_work, fsl_pw_dpc);
- spin_lock_init(&priv->pw_fifo_lock);
- if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
- pr_err("FIFO allocation failed\n");
- rc = -ENOMEM;
- goto err_out_irq;
- }
-
- pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
- in_be32(&priv->msg_regs->pwmr),
- in_be32(&priv->msg_regs->pwsr));
-
- return rc;
-
-err_out_irq:
- free_irq(IRQ_RIO_PW(mport), (void *)mport);
-err_out:
- dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
- priv->port_write_msg.virt,
- priv->port_write_msg.phys);
- return rc;
-}
-
static inline void fsl_rio_info(struct device *dev, u32 ccsr)
{
const char *str;
@@ -1411,16 +335,21 @@ int fsl_rio_setup(struct platform_device *dev)
struct rio_mport *port;
struct rio_priv *priv;
int rc = 0;
- const u32 *dt_range, *cell;
- struct resource regs;
+ const u32 *dt_range, *cell, *port_index;
+ u32 active_ports = 0;
+ struct resource regs, rmu_regs;
+ struct device_node *np, *rmu_node;
int rlen;
u32 ccsr;
- u64 law_start, law_size;
+ u64 range_start, range_size;
int paw, aw, sw;
+ u32 i;
+ static int tmp;
+ struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL};
if (!dev->dev.of_node) {
dev_err(&dev->dev, "Device OF-Node is NULL");
- return -EFAULT;
+ return -ENODEV;
}
rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
@@ -1429,37 +358,17 @@ int fsl_rio_setup(struct platform_device *dev)
dev->dev.of_node->full_name);
return -EFAULT;
}
- dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
+ dev_info(&dev->dev, "Of-device full name %s\n",
+ dev->dev.of_node->full_name);
dev_info(&dev->dev, "Regs: %pR\n", &regs);
- dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
- if (!dt_range) {
- dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
- dev->dev.of_node->full_name);
- return -EFAULT;
+ rio_regs_win = ioremap(regs.start, resource_size(&regs));
+ if (!rio_regs_win) {
+ dev_err(&dev->dev, "Unable to map rio register window\n");
+ rc = -ENOMEM;
+ goto err_rio_regs;
}
- /* Get node address wide */
- cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
- if (cell)
- aw = *cell;
- else
- aw = of_n_addr_cells(dev->dev.of_node);
- /* Get node size wide */
- cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
- if (cell)
- sw = *cell;
- else
- sw = of_n_size_cells(dev->dev.of_node);
- /* Get parent address wide wide */
- paw = of_n_addr_cells(dev->dev.of_node);
-
- law_start = of_read_number(dt_range + aw, paw);
- law_size = of_read_number(dt_range + aw + paw, sw);
-
- dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
- law_start, law_size);
-
ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
if (!ops) {
rc = -ENOMEM;
@@ -1479,143 +388,257 @@ int fsl_rio_setup(struct platform_device *dev)
ops->add_inb_buffer = fsl_add_inb_buffer;
ops->get_inb_message = fsl_get_inb_message;
- port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
- if (!port) {
+ rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
+ if (!rmu_node)
+ goto err_rmu;
+ rc = of_address_to_resource(rmu_node, 0, &rmu_regs);
+ if (rc) {
+ dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+ rmu_node->full_name);
+ goto err_rmu;
+ }
+ rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
+ if (!rmu_regs_win) {
+ dev_err(&dev->dev, "Unable to map rmu register window\n");
rc = -ENOMEM;
- goto err_port;
+ goto err_rmu;
+ }
+ for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") {
+ rmu_np[tmp] = np;
+ tmp++;
}
- port->index = 0;
- priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
- if (!priv) {
- printk(KERN_ERR "Can't alloc memory for 'priv'\n");
+ /*set up doobell node*/
+ np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit");
+ if (!np) {
+ rc = -ENODEV;
+ goto err_dbell;
+ }
+ dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL);
+ if (!(dbell)) {
+ dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n");
rc = -ENOMEM;
- goto err_priv;
+ goto err_dbell;
}
+ dbell->dev = &dev->dev;
+ dbell->bellirq = irq_of_parse_and_map(np, 1);
+ dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq);
- INIT_LIST_HEAD(&port->dbells);
- port->iores.start = law_start;
- port->iores.end = law_start + law_size - 1;
- port->iores.flags = IORESOURCE_MEM;
- port->iores.name = "rio_io_win";
-
- if (request_resource(&iomem_resource, &port->iores) < 0) {
- dev_err(&dev->dev, "RIO: Error requesting master port region"
- " 0x%016llx-0x%016llx\n",
- (u64)port->iores.start, (u64)port->iores.end);
- rc = -ENOMEM;
- goto err_res;
+ aw = of_n_addr_cells(np);
+ dt_range = of_get_property(np, "reg", &rlen);
+ if (!dt_range) {
+ pr_err("%s: unable to find 'reg' property\n",
+ np->full_name);
+ rc = -ENOMEM;
+ goto err_pw;
}
+ range_start = of_read_number(dt_range, aw);
+ dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win +
+ (u32)range_start);
- priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0);
- priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
- priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
- priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
- dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
- priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
-
- rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
- strcpy(port->name, "RIO0 mport");
-
- priv->dev = &dev->dev;
-
- port->ops = ops;
- port->priv = priv;
- port->phys_efptr = 0x100;
-
- priv->regs_win = ioremap(regs.start, resource_size(&regs));
- rio_regs_win = priv->regs_win;
-
- /* Probe the master port phy type */
- ccsr = in_be32(priv->regs_win + RIO_CCSR);
- port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
- dev_info(&dev->dev, "RapidIO PHY type: %s\n",
- (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
- ((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
- "unknown"));
- /* Checking the port training status */
- if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
- dev_err(&dev->dev, "Port is not ready. "
- "Try to restart connection...\n");
- switch (port->phy_type) {
- case RIO_PHY_SERIAL:
+ /*set up port write node*/
+ np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit");
+ if (!np) {
+ rc = -ENODEV;
+ goto err_pw;
+ }
+ pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL);
+ if (!(pw)) {
+ dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n");
+ rc = -ENOMEM;
+ goto err_pw;
+ }
+ pw->dev = &dev->dev;
+ pw->pwirq = irq_of_parse_and_map(np, 0);
+ dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq);
+ aw = of_n_addr_cells(np);
+ dt_range = of_get_property(np, "reg", &rlen);
+ if (!dt_range) {
+ pr_err("%s: unable to find 'reg' property\n",
+ np->full_name);
+ rc = -ENOMEM;
+ goto err;
+ }
+ range_start = of_read_number(dt_range, aw);
+ pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start);
+
+ /*set up ports node*/
+ for_each_child_of_node(dev->dev.of_node, np) {
+ port_index = of_get_property(np, "cell-index", NULL);
+ if (!port_index) {
+ dev_err(&dev->dev, "Can't get %s property 'cell-index'\n",
+ np->full_name);
+ continue;
+ }
+
+ dt_range = of_get_property(np, "ranges", &rlen);
+ if (!dt_range) {
+ dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Get node address wide */
+ cell = of_get_property(np, "#address-cells", NULL);
+ if (cell)
+ aw = *cell;
+ else
+ aw = of_n_addr_cells(np);
+ /* Get node size wide */
+ cell = of_get_property(np, "#size-cells", NULL);
+ if (cell)
+ sw = *cell;
+ else
+ sw = of_n_size_cells(np);
+ /* Get parent address wide wide */
+ paw = of_n_addr_cells(np);
+ range_start = of_read_number(dt_range + aw, paw);
+ range_size = of_read_number(dt_range + aw + paw, sw);
+
+ dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n",
+ np->full_name, range_start, range_size);
+
+ port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
+ if (!port)
+ continue;
+
+ i = *port_index - 1;
+ port->index = (unsigned char)i;
+
+ priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->dev, "Can't alloc memory for 'priv'\n");
+ kfree(port);
+ continue;
+ }
+
+ INIT_LIST_HEAD(&port->dbells);
+ port->iores.start = range_start;
+ port->iores.end = port->iores.start + range_size - 1;
+ port->iores.flags = IORESOURCE_MEM;
+ port->iores.name = "rio_io_win";
+
+ if (request_resource(&iomem_resource, &port->iores) < 0) {
+ dev_err(&dev->dev, "RIO: Error requesting master port region"
+ " 0x%016llx-0x%016llx\n",
+ (u64)port->iores.start, (u64)port->iores.end);
+ kfree(priv);
+ kfree(port);
+ continue;
+ }
+ sprintf(port->name, "RIO mport %d", i);
+
+ priv->dev = &dev->dev;
+ port->ops = ops;
+ port->priv = priv;
+ port->phys_efptr = 0x100;
+ priv->regs_win = rio_regs_win;
+
+ /* Probe the master port phy type */
+ ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20);
+ port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
+ if (port->phy_type == RIO_PHY_PARALLEL) {
+ dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n");
+ release_resource(&port->iores);
+ kfree(priv);
+ kfree(port);
+ continue;
+ }
+ dev_info(&dev->dev, "RapidIO PHY type: Serial\n");
+ /* Checking the port training status */
+ if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) {
+ dev_err(&dev->dev, "Port %d is not ready. "
+ "Try to restart connection...\n", i);
/* Disable ports */
- out_be32(priv->regs_win + RIO_CCSR, 0);
+ out_be32(priv->regs_win
+ + RIO_CCSR + i*0x20, 0);
/* Set 1x lane */
- setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
+ setbits32(priv->regs_win
+ + RIO_CCSR + i*0x20, 0x02000000);
/* Enable ports */
- setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
- break;
- case RIO_PHY_PARALLEL:
- /* Disable ports */
- out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
- /* Enable ports */
- out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
- break;
- }
- msleep(100);
- if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
- dev_err(&dev->dev, "Port restart failed.\n");
- rc = -ENOLINK;
- goto err;
+ setbits32(priv->regs_win
+ + RIO_CCSR + i*0x20, 0x00600000);
+ msleep(100);
+ if (in_be32((priv->regs_win
+ + RIO_ESCSR + i*0x20)) & 1) {
+ dev_err(&dev->dev,
+ "Port %d restart failed.\n", i);
+ release_resource(&port->iores);
+ kfree(priv);
+ kfree(port);
+ continue;
+ }
+ dev_info(&dev->dev, "Port %d restart success!\n", i);
}
- dev_info(&dev->dev, "Port restart success!\n");
- }
- fsl_rio_info(&dev->dev, ccsr);
+ fsl_rio_info(&dev->dev, ccsr);
- port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
+ port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
& RIO_PEF_CTLS) >> 4;
- dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
- port->sys_size ? 65536 : 256);
+ dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
+ port->sys_size ? 65536 : 256);
+
+ if (rio_register_mport(port)) {
+ release_resource(&port->iores);
+ kfree(priv);
+ kfree(port);
+ continue;
+ }
+ if (port->host_deviceid >= 0)
+ out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+ RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+ else
+ out_be32(priv->regs_win + RIO_GCCSR,
+ RIO_PORT_GEN_MASTER);
+
+ priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+ + ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET :
+ RIO_ATMU_REGS_PORT2_OFFSET));
- if (rio_register_mport(port))
+ priv->maint_atmu_regs = priv->atmu_regs + 1;
+
+ /* Set to receive any dist ID for serial RapidIO controller. */
+ if (port->phy_type == RIO_PHY_SERIAL)
+ out_be32((priv->regs_win
+ + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA);
+
+ /* Configure maintenance transaction window */
+ out_be32(&priv->maint_atmu_regs->rowbar,
+ port->iores.start >> 12);
+ out_be32(&priv->maint_atmu_regs->rowar,
+ 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
+
+ priv->maint_win = ioremap(port->iores.start,
+ RIO_MAINT_WIN_SIZE);
+
+ rio_law_start = range_start;
+
+ fsl_rio_setup_rmu(port, rmu_np[i]);
+
+ dbell->mport[i] = port;
+
+ active_ports++;
+ }
+
+ if (!active_ports) {
+ rc = -ENOLINK;
goto err;
+ }
- if (port->host_deviceid >= 0)
- out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
- RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
- else
- out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
-
- priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
- + RIO_ATMU_REGS_OFFSET);
- priv->maint_atmu_regs = priv->atmu_regs + 1;
- priv->dbell_atmu_regs = priv->atmu_regs + 2;
- priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
- ((port->phy_type == RIO_PHY_SERIAL) ?
- RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
-
- /* Set to receive any dist ID for serial RapidIO controller. */
- if (port->phy_type == RIO_PHY_SERIAL)
- out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
-
- /* Configure maintenance transaction window */
- out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
- out_be32(&priv->maint_atmu_regs->rowar,
- 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
-
- priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
-
- /* Configure outbound doorbell window */
- out_be32(&priv->dbell_atmu_regs->rowbar,
- (law_start + RIO_MAINT_WIN_SIZE) >> 12);
- out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
- fsl_rio_doorbell_init(port);
- fsl_rio_port_write_init(port);
+ fsl_rio_doorbell_init(dbell);
+ fsl_rio_port_write_init(pw);
return 0;
err:
- iounmap(priv->regs_win);
- release_resource(&port->iores);
-err_res:
- kfree(priv);
-err_priv:
- kfree(port);
-err_port:
+ kfree(pw);
+err_pw:
+ kfree(dbell);
+err_dbell:
+ iounmap(rmu_regs_win);
+err_rmu:
kfree(ops);
err_ops:
+ iounmap(rio_regs_win);
+err_rio_regs:
return rc;
}
@@ -1631,7 +654,7 @@ static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev)
static const struct of_device_id fsl_of_rio_rpn_ids[] = {
{
- .compatible = "fsl,rapidio-delta",
+ .compatible = "fsl,srio",
},
{},
};
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
new file mode 100644
index 00000000000..ae8e27405a0
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rio.h
@@ -0,0 +1,135 @@
+/*
+ * Freescale MPC85xx/MPC86xx RapidIO support
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
+ * Zhang Wei <wei.zhang@freescale.com>
+ * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
+ * Liu Gang <Gang.Liu@freescale.com>
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __FSL_RIO_H
+#define __FSL_RIO_H
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/kfifo.h>
+
+#define RIO_REGS_WIN(mport) (((struct rio_priv *)(mport->priv))->regs_win)
+
+#define RIO_MAINT_WIN_SIZE 0x400000
+#define RIO_LTLEDCSR 0x0608
+
+#define DOORBELL_ROWAR_EN 0x80000000
+#define DOORBELL_ROWAR_TFLOWLV 0x08000000 /* highest priority level */
+#define DOORBELL_ROWAR_PCI 0x02000000 /* PCI window */
+#define DOORBELL_ROWAR_NREAD 0x00040000 /* NREAD */
+#define DOORBELL_ROWAR_MAINTRD 0x00070000 /* maintenance read */
+#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserverd */
+#define DOORBELL_ROWAR_MAINTWD 0x00007000
+#define DOORBELL_ROWAR_SIZE 0x0000000b /* window size is 4k */
+
+#define RIO_ATMU_REGS_PORT1_OFFSET 0x10c00
+#define RIO_ATMU_REGS_PORT2_OFFSET 0x10e00
+#define RIO_S_DBELL_REGS_OFFSET 0x13400
+#define RIO_S_PW_REGS_OFFSET 0x134e0
+#define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40
+
+#define MAX_MSG_UNIT_NUM 2
+#define MAX_PORT_NUM 4
+
+struct rio_atmu_regs {
+ u32 rowtar;
+ u32 rowtear;
+ u32 rowbar;
+ u32 pad1;
+ u32 rowar;
+ u32 pad2[3];
+};
+
+struct rio_dbell_ring {
+ void *virt;
+ dma_addr_t phys;
+};
+
+struct rio_port_write_msg {
+ void *virt;
+ dma_addr_t phys;
+ u32 msg_count;
+ u32 err_count;
+ u32 discard_count;
+};
+
+struct fsl_rio_dbell {
+ struct rio_mport *mport[MAX_PORT_NUM];
+ struct device *dev;
+ struct rio_dbell_regs __iomem *dbell_regs;
+ struct rio_dbell_ring dbell_ring;
+ int bellirq;
+};
+
+struct fsl_rio_pw {
+ struct device *dev;
+ struct rio_pw_regs __iomem *pw_regs;
+ struct rio_port_write_msg port_write_msg;
+ int pwirq;
+ struct work_struct pw_work;
+ struct kfifo pw_fifo;
+ spinlock_t pw_fifo_lock;
+};
+
+struct rio_priv {
+ struct device *dev;
+ void __iomem *regs_win;
+ struct rio_atmu_regs __iomem *atmu_regs;
+ struct rio_atmu_regs __iomem *maint_atmu_regs;
+ void __iomem *maint_win;
+ void *rmm_handle; /* RapidIO message manager(unit) Handle */
+};
+
+extern void __iomem *rio_regs_win;
+extern void __iomem *rmu_regs_win;
+
+extern resource_size_t rio_law_start;
+
+extern struct fsl_rio_dbell *dbell;
+extern struct fsl_rio_pw *pw;
+
+extern int fsl_rio_setup_rmu(struct rio_mport *mport,
+ struct device_node *node);
+extern int fsl_rio_port_write_init(struct fsl_rio_pw *pw);
+extern int fsl_rio_pw_enable(struct rio_mport *mport, int enable);
+extern void fsl_rio_port_error_handler(int offset);
+extern int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell);
+
+extern int fsl_rio_doorbell_send(struct rio_mport *mport,
+ int index, u16 destid, u16 data);
+extern int fsl_add_outb_message(struct rio_mport *mport,
+ struct rio_dev *rdev,
+ int mbox, void *buffer, size_t len);
+extern int fsl_open_outb_mbox(struct rio_mport *mport,
+ void *dev_id, int mbox, int entries);
+extern void fsl_close_outb_mbox(struct rio_mport *mport, int mbox);
+extern int fsl_open_inb_mbox(struct rio_mport *mport,
+ void *dev_id, int mbox, int entries);
+extern void fsl_close_inb_mbox(struct rio_mport *mport, int mbox);
+extern int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf);
+extern void *fsl_get_inb_message(struct rio_mport *mport, int mbox);
+
+#endif
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
new file mode 100644
index 00000000000..15485789e9d
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -0,0 +1,1104 @@
+/*
+ * Freescale MPC85xx/MPC86xx RapidIO RMU support
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
+ * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
+ * Zhang Wei <wei.zhang@freescale.com>
+ * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
+ * Liu Gang <Gang.Liu@freescale.com>
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include "fsl_rio.h"
+
+#define GET_RMM_HANDLE(mport) \
+ (((struct rio_priv *)(mport->priv))->rmm_handle)
+
+/* RapidIO definition irq, which read from OF-tree */
+#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
+#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
+#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
+#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
+
+#define RIO_MIN_TX_RING_SIZE 2
+#define RIO_MAX_TX_RING_SIZE 2048
+#define RIO_MIN_RX_RING_SIZE 2
+#define RIO_MAX_RX_RING_SIZE 2048
+
+#define RIO_IPWMR_SEN 0x00100000
+#define RIO_IPWMR_QFIE 0x00000100
+#define RIO_IPWMR_EIE 0x00000020
+#define RIO_IPWMR_CQ 0x00000002
+#define RIO_IPWMR_PWE 0x00000001
+
+#define RIO_IPWSR_QF 0x00100000
+#define RIO_IPWSR_TE 0x00000080
+#define RIO_IPWSR_QFI 0x00000010
+#define RIO_IPWSR_PWD 0x00000008
+#define RIO_IPWSR_PWB 0x00000004
+
+#define RIO_EPWISR 0x10010
+/* EPWISR Error match value */
+#define RIO_EPWISR_PINT1 0x80000000
+#define RIO_EPWISR_PINT2 0x40000000
+#define RIO_EPWISR_MU 0x00000002
+#define RIO_EPWISR_PW 0x00000001
+
+#define IPWSR_CLEAR 0x98
+#define OMSR_CLEAR 0x1cb3
+#define IMSR_CLEAR 0x491
+#define IDSR_CLEAR 0x91
+#define ODSR_CLEAR 0x1c00
+#define LTLEECSR_ENABLE_ALL 0xFFC000FC
+#define RIO_LTLEECSR 0x060c
+
+#define RIO_IM0SR 0x64
+#define RIO_IM1SR 0x164
+#define RIO_OM0SR 0x4
+#define RIO_OM1SR 0x104
+
+#define RIO_DBELL_WIN_SIZE 0x1000
+
+#define RIO_MSG_OMR_MUI 0x00000002
+#define RIO_MSG_OSR_TE 0x00000080
+#define RIO_MSG_OSR_QOI 0x00000020
+#define RIO_MSG_OSR_QFI 0x00000010
+#define RIO_MSG_OSR_MUB 0x00000004
+#define RIO_MSG_OSR_EOMI 0x00000002
+#define RIO_MSG_OSR_QEI 0x00000001
+
+#define RIO_MSG_IMR_MI 0x00000002
+#define RIO_MSG_ISR_TE 0x00000080
+#define RIO_MSG_ISR_QFI 0x00000010
+#define RIO_MSG_ISR_DIQI 0x00000001
+
+#define RIO_MSG_DESC_SIZE 32
+#define RIO_MSG_BUFFER_SIZE 4096
+
+#define DOORBELL_DMR_DI 0x00000002
+#define DOORBELL_DSR_TE 0x00000080
+#define DOORBELL_DSR_QFI 0x00000010
+#define DOORBELL_DSR_DIQI 0x00000001
+#define DOORBELL_TID_OFFSET 0x02
+#define DOORBELL_SID_OFFSET 0x04
+#define DOORBELL_INFO_OFFSET 0x06
+
+#define DOORBELL_MESSAGE_SIZE 0x08
+#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
+#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
+#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
+
+struct rio_msg_regs {
+ u32 omr;
+ u32 osr;
+ u32 pad1;
+ u32 odqdpar;
+ u32 pad2;
+ u32 osar;
+ u32 odpr;
+ u32 odatr;
+ u32 odcr;
+ u32 pad3;
+ u32 odqepar;
+ u32 pad4[13];
+ u32 imr;
+ u32 isr;
+ u32 pad5;
+ u32 ifqdpar;
+ u32 pad6;
+ u32 ifqepar;
+};
+
+struct rio_dbell_regs {
+ u32 odmr;
+ u32 odsr;
+ u32 pad1[4];
+ u32 oddpr;
+ u32 oddatr;
+ u32 pad2[3];
+ u32 odretcr;
+ u32 pad3[12];
+ u32 dmr;
+ u32 dsr;
+ u32 pad4;
+ u32 dqdpar;
+ u32 pad5;
+ u32 dqepar;
+};
+
+struct rio_pw_regs {
+ u32 pwmr;
+ u32 pwsr;
+ u32 epwqbar;
+ u32 pwqbar;
+};
+
+
+struct rio_tx_desc {
+ u32 pad1;
+ u32 saddr;
+ u32 dport;
+ u32 dattr;
+ u32 pad2;
+ u32 pad3;
+ u32 dwcnt;
+ u32 pad4;
+};
+
+struct rio_msg_tx_ring {
+ void *virt;
+ dma_addr_t phys;
+ void *virt_buffer[RIO_MAX_TX_RING_SIZE];
+ dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
+ int tx_slot;
+ int size;
+ void *dev_id;
+};
+
+struct rio_msg_rx_ring {
+ void *virt;
+ dma_addr_t phys;
+ void *virt_buffer[RIO_MAX_RX_RING_SIZE];
+ int rx_slot;
+ int size;
+ void *dev_id;
+};
+
+struct fsl_rmu {
+ struct rio_msg_regs __iomem *msg_regs;
+ struct rio_msg_tx_ring msg_tx_ring;
+ struct rio_msg_rx_ring msg_rx_ring;
+ int txirq;
+ int rxirq;
+};
+
+/**
+ * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles outbound message interrupts. Executes a register outbound
+ * mailbox event handler and acks the interrupt occurrence.
+ */
+static irqreturn_t
+fsl_rio_tx_handler(int irq, void *dev_instance)
+{
+ int osr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
+
+ osr = in_be32(&rmu->msg_regs->osr);
+
+ if (osr & RIO_MSG_OSR_TE) {
+ pr_info("RIO: outbound message transmission error\n");
+ out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
+ goto out;
+ }
+
+ if (osr & RIO_MSG_OSR_QOI) {
+ pr_info("RIO: outbound message queue overflow\n");
+ out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
+ goto out;
+ }
+
+ if (osr & RIO_MSG_OSR_EOMI) {
+ u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
+ int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
+ if (port->outb_msg[0].mcback != NULL) {
+ port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
+ -1,
+ slot);
+ }
+ /* Ack the end-of-message interrupt */
+ out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles inbound message interrupts. Executes a registered inbound
+ * mailbox event handler and acks the interrupt occurrence.
+ */
+static irqreturn_t
+fsl_rio_rx_handler(int irq, void *dev_instance)
+{
+ int isr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
+
+ isr = in_be32(&rmu->msg_regs->isr);
+
+ if (isr & RIO_MSG_ISR_TE) {
+ pr_info("RIO: inbound message reception error\n");
+ out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
+ goto out;
+ }
+
+ /* XXX Need to check/dispatch until queue empty */
+ if (isr & RIO_MSG_ISR_DIQI) {
+ /*
+ * Can receive messages for any mailbox/letter to that
+ * mailbox destination. So, make the callback with an
+ * unknown/invalid mailbox number argument.
+ */
+ if (port->inb_msg[0].mcback != NULL)
+ port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
+ -1,
+ -1);
+
+ /* Ack the queueing interrupt */
+ out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles doorbell interrupts. Parses a list of registered
+ * doorbell event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_dbell_handler(int irq, void *dev_instance)
+{
+ int dsr;
+ struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
+ int i;
+
+ dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
+
+ if (dsr & DOORBELL_DSR_TE) {
+ pr_info("RIO: doorbell reception error\n");
+ out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
+ goto out;
+ }
+
+ if (dsr & DOORBELL_DSR_QFI) {
+ pr_info("RIO: doorbell queue full\n");
+ out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
+ }
+
+ /* XXX Need to check/dispatch until queue empty */
+ if (dsr & DOORBELL_DSR_DIQI) {
+ u32 dmsg =
+ (u32) fsl_dbell->dbell_ring.virt +
+ (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
+ struct rio_dbell *dbell;
+ int found = 0;
+
+ pr_debug
+ ("RIO: processing doorbell,"
+ " sid %2.2x tid %2.2x info %4.4x\n",
+ DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+
+ for (i = 0; i < MAX_PORT_NUM; i++) {
+ if (fsl_dbell->mport[i]) {
+ list_for_each_entry(dbell,
+ &fsl_dbell->mport[i]->dbells, node) {
+ if ((dbell->res->start
+ <= DBELL_INF(dmsg))
+ && (dbell->res->end
+ >= DBELL_INF(dmsg))) {
+ found = 1;
+ break;
+ }
+ }
+ if (found && dbell->dinb) {
+ dbell->dinb(fsl_dbell->mport[i],
+ dbell->dev_id, DBELL_SID(dmsg),
+ DBELL_TID(dmsg),
+ DBELL_INF(dmsg));
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ pr_debug
+ ("RIO: spurious doorbell,"
+ " sid %2.2x tid %2.2x info %4.4x\n",
+ DBELL_SID(dmsg), DBELL_TID(dmsg),
+ DBELL_INF(dmsg));
+ }
+ setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
+ out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+void msg_unit_error_handler(void)
+{
+
+ /*XXX: Error recovery is not implemented, we just clear errors */
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+ out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
+ out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
+ out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
+ out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
+
+ out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
+ out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
+
+ out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
+}
+
+/**
+ * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles port write interrupts. Parses a list of registered
+ * port write event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_port_write_handler(int irq, void *dev_instance)
+{
+ u32 ipwmr, ipwsr;
+ struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
+ u32 epwisr, tmp;
+
+ epwisr = in_be32(rio_regs_win + RIO_EPWISR);
+ if (!(epwisr & RIO_EPWISR_PW))
+ goto pw_done;
+
+ ipwmr = in_be32(&pw->pw_regs->pwmr);
+ ipwsr = in_be32(&pw->pw_regs->pwsr);
+
+#ifdef DEBUG_PW
+ pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
+ if (ipwsr & RIO_IPWSR_QF)
+ pr_debug(" QF");
+ if (ipwsr & RIO_IPWSR_TE)
+ pr_debug(" TE");
+ if (ipwsr & RIO_IPWSR_QFI)
+ pr_debug(" QFI");
+ if (ipwsr & RIO_IPWSR_PWD)
+ pr_debug(" PWD");
+ if (ipwsr & RIO_IPWSR_PWB)
+ pr_debug(" PWB");
+ pr_debug(" )\n");
+#endif
+ /* Schedule deferred processing if PW was received */
+ if (ipwsr & RIO_IPWSR_QFI) {
+ /* Save PW message (if there is room in FIFO),
+ * otherwise discard it.
+ */
+ if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
+ pw->port_write_msg.msg_count++;
+ kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
+ RIO_PW_MSG_SIZE);
+ } else {
+ pw->port_write_msg.discard_count++;
+ pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+ pw->port_write_msg.discard_count);
+ }
+ /* Clear interrupt and issue Clear Queue command. This allows
+ * another port-write to be received.
+ */
+ out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
+ out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
+ schedule_work(&pw->pw_work);
+ }
+
+ if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+ pw->port_write_msg.err_count++;
+ pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+ pw->port_write_msg.err_count);
+ /* Clear Transaction Error: port-write controller should be
+ * disabled when clearing this error
+ */
+ out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+ out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
+ out_be32(&pw->pw_regs->pwmr, ipwmr);
+ }
+
+ if (ipwsr & RIO_IPWSR_PWD) {
+ pw->port_write_msg.discard_count++;
+ pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+ pw->port_write_msg.discard_count);
+ out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
+ }
+
+pw_done:
+ if (epwisr & RIO_EPWISR_PINT1) {
+ tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ fsl_rio_port_error_handler(0);
+ }
+
+ if (epwisr & RIO_EPWISR_PINT2) {
+ tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ fsl_rio_port_error_handler(1);
+ }
+
+ if (epwisr & RIO_EPWISR_MU) {
+ tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ msg_unit_error_handler();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fsl_pw_dpc(struct work_struct *work)
+{
+ struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
+ u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
+
+ /*
+ * Process port-write messages
+ */
+ while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer,
+ RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
+ /* Process one message */
+#ifdef DEBUG_PW
+ {
+ u32 i;
+ pr_debug("%s : Port-Write Message:", __func__);
+ for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
+ if ((i%4) == 0)
+ pr_debug("\n0x%02x: 0x%08x", i*4,
+ msg_buffer[i]);
+ else
+ pr_debug(" 0x%08x", msg_buffer[i]);
+ }
+ pr_debug("\n");
+ }
+#endif
+ /* Pass the port-write message to RIO core for processing */
+ rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+ }
+}
+
+/**
+ * fsl_rio_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable: 1=enable; 0=disable port-write message handling
+ */
+int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
+{
+ u32 rval;
+
+ rval = in_be32(&pw->pw_regs->pwmr);
+
+ if (enable)
+ rval |= RIO_IPWMR_PWE;
+ else
+ rval &= ~RIO_IPWMR_PWE;
+
+ out_be32(&pw->pw_regs->pwmr, rval);
+
+ return 0;
+}
+
+/**
+ * fsl_rio_port_write_init - MPC85xx port write interface init
+ * @mport: Master port implementing the port write unit
+ *
+ * Initializes port write unit hardware and DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+
+int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
+{
+ int rc = 0;
+
+ /* Following configurations require a disabled port write controller */
+ out_be32(&pw->pw_regs->pwmr,
+ in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
+
+ /* Initialize port write */
+ pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
+ RIO_PW_MSG_SIZE,
+ &pw->port_write_msg.phys, GFP_KERNEL);
+ if (!pw->port_write_msg.virt) {
+ pr_err("RIO: unable allocate port write queue\n");
+ return -ENOMEM;
+ }
+
+ pw->port_write_msg.err_count = 0;
+ pw->port_write_msg.discard_count = 0;
+
+ /* Point dequeue/enqueue pointers at first entry */
+ out_be32(&pw->pw_regs->epwqbar, 0);
+ out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
+
+ pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
+ in_be32(&pw->pw_regs->epwqbar),
+ in_be32(&pw->pw_regs->pwqbar));
+
+ /* Clear interrupt status IPWSR */
+ out_be32(&pw->pw_regs->pwsr,
+ (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+ /* Configure port write contoller for snooping enable all reporting,
+ clear queue full */
+ out_be32(&pw->pw_regs->pwmr,
+ RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
+
+
+ /* Hook up port-write handler */
+ rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
+ IRQF_SHARED, "port-write", (void *)pw);
+ if (rc < 0) {
+ pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
+ goto err_out;
+ }
+ /* Enable Error Interrupt */
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
+
+ INIT_WORK(&pw->pw_work, fsl_pw_dpc);
+ spin_lock_init(&pw->pw_fifo_lock);
+ if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+ pr_err("FIFO allocation failed\n");
+ rc = -ENOMEM;
+ goto err_out_irq;
+ }
+
+ pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
+ in_be32(&pw->pw_regs->pwmr),
+ in_be32(&pw->pw_regs->pwsr));
+
+ return rc;
+
+err_out_irq:
+ free_irq(IRQ_RIO_PW(pw), (void *)pw);
+err_out:
+ dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
+ pw->port_write_msg.virt,
+ pw->port_write_msg.phys);
+ return rc;
+}
+
+/**
+ * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell message
+ *
+ * Sends a MPC85xx doorbell message. Returns %0 on success or
+ * %-EINVAL on failure.
+ */
+int fsl_rio_doorbell_send(struct rio_mport *mport,
+ int index, u16 destid, u16 data)
+{
+ pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
+ index, destid, data);
+
+ /* In the serial version silicons, such as MPC8548, MPC8641,
+ * below operations is must be.
+ */
+ out_be32(&dbell->dbell_regs->odmr, 0x00000000);
+ out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
+ out_be32(&dbell->dbell_regs->oddpr, destid << 16);
+ out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
+ out_be32(&dbell->dbell_regs->odmr, 0x00000001);
+
+ return 0;
+}
+
+/**
+ * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ *
+ * Adds the @buffer message to the MPC85xx outbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int
+fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+ void *buffer, size_t len)
+{
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+ u32 omr;
+ struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
+ + rmu->msg_tx_ring.tx_slot;
+ int ret = 0;
+
+ pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
+ "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
+ if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Copy and clear rest of buffer */
+ memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
+ len);
+ if (len < (RIO_MAX_MSG_SIZE - 4))
+ memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
+ + len, 0, RIO_MAX_MSG_SIZE - len);
+
+ /* Set mbox field for message, and set destid */
+ desc->dport = (rdev->destid << 16) | (mbox & 0x3);
+
+ /* Enable EOMI interrupt and priority */
+ desc->dattr = 0x28000000 | ((mport->index) << 20);
+
+ /* Set transfer size aligned to next power of 2 (in double words) */
+ desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
+
+ /* Set snooping and source buffer address */
+ desc->saddr = 0x00000004
+ | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
+
+ /* Increment enqueue pointer */
+ omr = in_be32(&rmu->msg_regs->omr);
+ out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
+
+ /* Go to next descriptor */
+ if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
+ rmu->msg_tx_ring.tx_slot = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ *
+ * Initializes buffer ring, request the outbound message interrupt,
+ * and enables the outbound message unit. Returns %0 on success and
+ * %-EINVAL or %-ENOMEM on failure.
+ */
+int
+fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+ int i, j, rc = 0;
+ struct rio_priv *priv = mport->priv;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+ if ((entries < RIO_MIN_TX_RING_SIZE) ||
+ (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize shadow copy ring */
+ rmu->msg_tx_ring.dev_id = dev_id;
+ rmu->msg_tx_ring.size = entries;
+
+ for (i = 0; i < rmu->msg_tx_ring.size; i++) {
+ rmu->msg_tx_ring.virt_buffer[i] =
+ dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+ &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
+ if (!rmu->msg_tx_ring.virt_buffer[i]) {
+ rc = -ENOMEM;
+ for (j = 0; j < rmu->msg_tx_ring.size; j++)
+ if (rmu->msg_tx_ring.virt_buffer[j])
+ dma_free_coherent(priv->dev,
+ RIO_MSG_BUFFER_SIZE,
+ rmu->msg_tx_ring.
+ virt_buffer[j],
+ rmu->msg_tx_ring.
+ phys_buffer[j]);
+ goto out;
+ }
+ }
+
+ /* Initialize outbound message descriptor ring */
+ rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ &rmu->msg_tx_ring.phys, GFP_KERNEL);
+ if (!rmu->msg_tx_ring.virt) {
+ rc = -ENOMEM;
+ goto out_dma;
+ }
+ memset(rmu->msg_tx_ring.virt, 0,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
+ rmu->msg_tx_ring.tx_slot = 0;
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
+ out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
+
+ /* Configure for snooping */
+ out_be32(&rmu->msg_regs->osar, 0x00000004);
+
+ /* Clear interrupt status */
+ out_be32(&rmu->msg_regs->osr, 0x000000b3);
+
+ /* Hook up outbound message handler */
+ rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
+ "msg_tx", (void *)mport);
+ if (rc < 0)
+ goto out_irq;
+
+ /*
+ * Configure outbound message unit
+ * Snooping
+ * Interrupts (all enabled, except QEIE)
+ * Chaining mode
+ * Disable
+ */
+ out_be32(&rmu->msg_regs->omr, 0x00100220);
+
+ /* Set number of entries */
+ out_be32(&rmu->msg_regs->omr,
+ in_be32(&rmu->msg_regs->omr) |
+ ((get_bitmask_order(entries) - 2) << 12));
+
+ /* Now enable the unit */
+ out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
+
+out:
+ return rc;
+
+out_irq:
+ dma_free_coherent(priv->dev,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
+
+out_dma:
+ for (i = 0; i < rmu->msg_tx_ring.size; i++)
+ dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+ rmu->msg_tx_ring.virt_buffer[i],
+ rmu->msg_tx_ring.phys_buffer[i]);
+
+ return rc;
+}
+
+/**
+ * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the outbound message unit, free all buffers, and
+ * frees the outbound message interrupt.
+ */
+void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct rio_priv *priv = mport->priv;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+ /* Disable inbound message unit */
+ out_be32(&rmu->msg_regs->omr, 0);
+
+ /* Free ring */
+ dma_free_coherent(priv->dev,
+ rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
+
+ /* Free interrupt */
+ free_irq(IRQ_RIO_TX(mport), (void *)mport);
+}
+
+/**
+ * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring, request the inbound message interrupt,
+ * and enables the inbound message unit. Returns %0 on success
+ * and %-EINVAL or %-ENOMEM on failure.
+ */
+int
+fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+ int i, rc = 0;
+ struct rio_priv *priv = mport->priv;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+ if ((entries < RIO_MIN_RX_RING_SIZE) ||
+ (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize client buffer ring */
+ rmu->msg_rx_ring.dev_id = dev_id;
+ rmu->msg_rx_ring.size = entries;
+ rmu->msg_rx_ring.rx_slot = 0;
+ for (i = 0; i < rmu->msg_rx_ring.size; i++)
+ rmu->msg_rx_ring.virt_buffer[i] = NULL;
+
+ /* Initialize inbound message ring */
+ rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
+ rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+ &rmu->msg_rx_ring.phys, GFP_KERNEL);
+ if (!rmu->msg_rx_ring.virt) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
+ out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
+
+ /* Clear interrupt status */
+ out_be32(&rmu->msg_regs->isr, 0x00000091);
+
+ /* Hook up inbound message handler */
+ rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
+ "msg_rx", (void *)mport);
+ if (rc < 0) {
+ dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
+ rmu->msg_tx_ring.virt_buffer[i],
+ rmu->msg_tx_ring.phys_buffer[i]);
+ goto out;
+ }
+
+ /*
+ * Configure inbound message unit:
+ * Snooping
+ * 4KB max message size
+ * Unmask all interrupt sources
+ * Disable
+ */
+ out_be32(&rmu->msg_regs->imr, 0x001b0060);
+
+ /* Set number of queue entries */
+ setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
+
+ /* Now enable the unit */
+ setbits32(&rmu->msg_regs->imr, 0x1);
+
+out:
+ return rc;
+}
+
+/**
+ * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the inbound message unit, free all buffers, and
+ * frees the inbound message interrupt.
+ */
+void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct rio_priv *priv = mport->priv;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+ /* Disable inbound message unit */
+ out_be32(&rmu->msg_regs->imr, 0);
+
+ /* Free ring */
+ dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+ rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
+
+ /* Free interrupt */
+ free_irq(IRQ_RIO_RX(mport), (void *)mport);
+}
+
+/**
+ * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ *
+ * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+ int rc = 0;
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+
+ pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
+ rmu->msg_rx_ring.rx_slot);
+
+ if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
+ printk(KERN_ERR
+ "RIO: error adding inbound buffer %d, buffer exists\n",
+ rmu->msg_rx_ring.rx_slot);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
+ if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
+ rmu->msg_rx_ring.rx_slot = 0;
+
+out:
+ return rc;
+}
+
+/**
+ * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ *
+ * Gets the next available inbound message from the inbound message queue.
+ * A pointer to the message is returned on success or NULL on failure.
+ */
+void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
+{
+ struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
+ u32 phys_buf, virt_buf;
+ void *buf = NULL;
+ int buf_idx;
+
+ phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
+
+ /* If no more messages, then bail out */
+ if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
+ goto out2;
+
+ virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf
+ - rmu->msg_rx_ring.phys);
+ buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
+ buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
+
+ if (!buf) {
+ printk(KERN_ERR
+ "RIO: inbound message copy failed, no buffers\n");
+ goto out1;
+ }
+
+ /* Copy max message size, caller is expected to allocate that big */
+ memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
+
+ /* Clear the available buffer */
+ rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
+
+out1:
+ setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
+
+out2:
+ return buf;
+}
+
+/**
+ * fsl_rio_doorbell_init - MPC85xx doorbell interface init
+ * @mport: Master port implementing the inbound doorbell unit
+ *
+ * Initializes doorbell unit hardware and inbound DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
+{
+ int rc = 0;
+
+ /* Initialize inbound doorbells */
+ dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
+ DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
+ if (!dbell->dbell_ring.virt) {
+ printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
+ out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
+
+ /* Clear interrupt status */
+ out_be32(&dbell->dbell_regs->dsr, 0x00000091);
+
+ /* Hook up doorbell handler */
+ rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
+ "dbell_rx", (void *)dbell);
+ if (rc < 0) {
+ dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
+ dbell->dbell_ring.virt, dbell->dbell_ring.phys);
+ printk(KERN_ERR
+ "MPC85xx RIO: unable to request inbound doorbell irq");
+ goto out;
+ }
+
+ /* Configure doorbells for snooping, 512 entries, and enable */
+ out_be32(&dbell->dbell_regs->dmr, 0x00108161);
+
+out:
+ return rc;
+}
+
+int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
+{
+ struct rio_priv *priv;
+ struct fsl_rmu *rmu;
+ u64 msg_start;
+ const u32 *msg_addr;
+ int mlen;
+ int aw;
+
+ if (!mport || !mport->priv)
+ return -EINVAL;
+
+ priv = mport->priv;
+
+ if (!node) {
+ dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
+ priv->dev->of_node->full_name);
+ return -EINVAL;
+ }
+
+ rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
+ if (!rmu)
+ return -ENOMEM;
+
+ aw = of_n_addr_cells(node);
+ msg_addr = of_get_property(node, "reg", &mlen);
+ if (!msg_addr) {
+ pr_err("%s: unable to find 'reg' property of message-unit\n",
+ node->full_name);
+ kfree(rmu);
+ return -ENOMEM;
+ }
+ msg_start = of_read_number(msg_addr, aw);
+
+ rmu->msg_regs = (struct rio_msg_regs *)
+ (rmu_regs_win + (u32)msg_start);
+
+ rmu->txirq = irq_of_parse_and_map(node, 0);
+ rmu->rxirq = irq_of_parse_and_map(node, 1);
+ printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
+ node->full_name, rmu->txirq, rmu->rxirq);
+
+ priv->rmm_handle = rmu;
+
+ rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+
+ return 0;
+}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 8c7e8528e7c..4e9ccb1015d 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -154,7 +154,7 @@ static inline unsigned int mpic_processor_id(struct mpic *mpic)
{
unsigned int cpu = 0;
- if (mpic->flags & MPIC_PRIMARY)
+ if (!(mpic->flags & MPIC_SECONDARY))
cpu = hard_smp_processor_id();
return cpu;
@@ -315,29 +315,25 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
}
#ifdef CONFIG_PPC_DCR
-static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
- struct mpic_reg_bank *rb,
+static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
- const u32 *dbasep;
-
- dbasep = of_get_property(node, "dcr-reg", NULL);
-
- rb->dhost = dcr_map(node, *dbasep + offset, size);
+ phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
+ rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost));
}
-static inline void mpic_map(struct mpic *mpic, struct device_node *node,
+static inline void mpic_map(struct mpic *mpic,
phys_addr_t phys_addr, struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
if (mpic->flags & MPIC_USES_DCR)
- _mpic_map_dcr(mpic, node, rb, offset, size);
+ _mpic_map_dcr(mpic, rb, offset, size);
else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
-#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */
@@ -901,7 +897,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (vold != vnew)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
- return IRQ_SET_MASK_OK_NOCOPY;;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
void mpic_set_vector(unsigned int virq, unsigned int vector)
@@ -990,7 +986,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
#ifdef CONFIG_SMP
else if (hw >= mpic->ipi_vecs[0]) {
- WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+ WARN_ON(mpic->flags & MPIC_SECONDARY);
DBG("mpic: mapping as IPI\n");
irq_set_chip_data(virq, mpic);
@@ -1001,7 +997,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
#endif /* CONFIG_SMP */
if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
- WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+ WARN_ON(mpic->flags & MPIC_SECONDARY);
DBG("mpic: mapping as timer\n");
irq_set_chip_data(virq, mpic);
@@ -1115,17 +1111,28 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
+/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
+static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
+ unsigned int virq;
+
+ BUG_ON(!(mpic->flags & MPIC_SECONDARY));
+
+ virq = mpic_get_one_irq(mpic);
+ if (virq != NO_IRQ)
+ generic_handle_irq(virq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
static struct irq_host_ops mpic_host_ops = {
.match = mpic_host_match,
.map = mpic_host_map,
.xlate = mpic_host_xlate,
};
-static int mpic_reset_prohibited(struct device_node *node)
-{
- return node && of_get_property(node, "pic-no-reset", NULL);
-}
-
/*
* Exported functions
*/
@@ -1137,27 +1144,60 @@ struct mpic * __init mpic_alloc(struct device_node *node,
unsigned int irq_count,
const char *name)
{
- struct mpic *mpic;
- u32 greg_feature;
- const char *vers;
- int i;
- int intvec_top;
- u64 paddr = phys_addr;
+ int i, psize, intvec_top;
+ struct mpic *mpic;
+ u32 greg_feature;
+ const char *vers;
+ const u32 *psrc;
+
+ /* Default MPIC search parameters */
+ static const struct of_device_id __initconst mpic_device_id[] = {
+ { .type = "open-pic", },
+ { .compatible = "open-pic", },
+ {},
+ };
+
+ /*
+ * If we were not passed a device-tree node, then perform the default
+ * search for standardized a standardized OpenPIC.
+ */
+ if (node) {
+ node = of_node_get(node);
+ } else {
+ node = of_find_matching_node(NULL, mpic_device_id);
+ if (!node)
+ return NULL;
+ }
+
+ /* Pick the physical address from the device tree if unspecified */
+ if (!phys_addr) {
+ /* Check if it is DCR-based */
+ if (of_get_property(node, "dcr-reg", NULL)) {
+ flags |= MPIC_USES_DCR;
+ } else {
+ struct resource r;
+ if (of_address_to_resource(node, 0, &r))
+ goto err_of_node_put;
+ phys_addr = r.start;
+ }
+ }
mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
- return NULL;
+ goto err_of_node_put;
mpic->name = name;
+ mpic->node = node;
+ mpic->paddr = phys_addr;
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.name = name;
- if (flags & MPIC_PRIMARY)
+ if (!(flags & MPIC_SECONDARY))
mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_U3_HT_IRQS
mpic->hc_ht_irq = mpic_irq_ht_chip;
mpic->hc_ht_irq.name = name;
- if (flags & MPIC_PRIMARY)
+ if (!(flags & MPIC_SECONDARY))
mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
@@ -1194,28 +1234,22 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->spurious_vec = intvec_top;
/* Check for "big-endian" in device-tree */
- if (node && of_get_property(node, "big-endian", NULL) != NULL)
+ if (of_get_property(mpic->node, "big-endian", NULL) != NULL)
mpic->flags |= MPIC_BIG_ENDIAN;
- if (node && of_device_is_compatible(node, "fsl,mpic"))
+ if (of_device_is_compatible(mpic->node, "fsl,mpic"))
mpic->flags |= MPIC_FSL;
/* Look for protected sources */
- if (node) {
- int psize;
- unsigned int bits, mapsize;
- const u32 *psrc =
- of_get_property(node, "protected-sources", &psize);
- if (psrc) {
- psize /= 4;
- bits = intvec_top + 1;
- mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long);
- mpic->protected = kzalloc(mapsize, GFP_KERNEL);
- BUG_ON(mpic->protected == NULL);
- for (i = 0; i < psize; i++) {
- if (psrc[i] > intvec_top)
- continue;
- __set_bit(psrc[i], mpic->protected);
- }
+ psrc = of_get_property(mpic->node, "protected-sources", &psize);
+ if (psrc) {
+ /* Allocate a bitmap with one bit per interrupt */
+ unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
+ mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL);
+ BUG_ON(mpic->protected == NULL);
+ for (i = 0; i < psize/sizeof(u32); i++) {
+ if (psrc[i] > intvec_top)
+ continue;
+ __set_bit(psrc[i], mpic->protected);
}
}
@@ -1224,42 +1258,32 @@ struct mpic * __init mpic_alloc(struct device_node *node,
#endif
/* default register type */
- mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
- mpic_access_mmio_be : mpic_access_mmio_le;
-
- /* If no physical address is passed in, a device-node is mandatory */
- BUG_ON(paddr == 0 && node == NULL);
+ if (flags & MPIC_BIG_ENDIAN)
+ mpic->reg_type = mpic_access_mmio_be;
+ else
+ mpic->reg_type = mpic_access_mmio_le;
- /* If no physical address passed in, check if it's dcr based */
- if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) {
+ /*
+ * An MPIC with a "dcr-reg" property must be accessed that way, but
+ * only if the kernel includes DCR support.
+ */
#ifdef CONFIG_PPC_DCR
- mpic->flags |= MPIC_USES_DCR;
+ if (flags & MPIC_USES_DCR)
mpic->reg_type = mpic_access_dcr;
#else
- BUG();
-#endif /* CONFIG_PPC_DCR */
- }
-
- /* If the MPIC is not DCR based, and no physical address was passed
- * in, try to obtain one
- */
- if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
- const u32 *reg = of_get_property(node, "reg", NULL);
- BUG_ON(reg == NULL);
- paddr = of_translate_address(node, reg);
- BUG_ON(paddr == OF_BAD_ADDR);
- }
+ BUG_ON(flags & MPIC_USES_DCR);
+#endif
/* Map the global registers */
- mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
- mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+ mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+ mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */
/* When using a device-node, reset requests are only honored if the MPIC
* is allowed to reset.
*/
- if (mpic_reset_prohibited(node))
+ if (of_get_property(mpic->node, "pic-no-reset", NULL))
mpic->flags |= MPIC_NO_RESET;
if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
@@ -1307,7 +1331,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
for_each_possible_cpu(i) {
unsigned int cpu = get_hard_smp_processor_id(i);
- mpic_map(mpic, node, paddr, &mpic->cpuregs[cpu],
+ mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
0x1000);
}
@@ -1315,16 +1339,21 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
- mpic_map(mpic, node, paddr, &mpic->isus[0],
+ mpic_map(mpic, mpic->paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
- mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
+ mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR,
isu_size ? isu_size : mpic->num_sources,
&mpic_host_ops,
flags & MPIC_LARGE_VECTORS ? 2048 : 256);
+
+ /*
+ * FIXME: The code leaks the MPIC object and mappings here; this
+ * is very unlikely to fail but it ought to be fixed anyways.
+ */
if (mpic->irqhost == NULL)
return NULL;
@@ -1347,19 +1376,23 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
" max %d CPUs\n",
- name, vers, (unsigned long long)paddr, num_possible_cpus());
+ name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
mpic->next = mpics;
mpics = mpic;
- if (flags & MPIC_PRIMARY) {
+ if (!(flags & MPIC_SECONDARY)) {
mpic_primary = mpic;
irq_set_default_host(mpic->irqhost);
}
return mpic;
+
+err_of_node_put:
+ of_node_put(node);
+ return NULL;
}
void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
@@ -1369,7 +1402,7 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
BUG_ON(isu_num >= MPIC_MAX_ISU);
- mpic_map(mpic, mpic->irqhost->of_node,
+ mpic_map(mpic,
paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
@@ -1385,8 +1418,7 @@ void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
void __init mpic_init(struct mpic *mpic)
{
- int i;
- int cpu;
+ int i, cpu;
BUG_ON(mpic->num_sources == 0);
@@ -1424,7 +1456,7 @@ void __init mpic_init(struct mpic *mpic)
/* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
- if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) {
+ if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
mpic_scan_ht_pics(mpic);
mpic_u3msi_init(mpic);
}
@@ -1471,6 +1503,17 @@ void __init mpic_init(struct mpic *mpic)
GFP_KERNEL);
BUG_ON(mpic->save_data == NULL);
#endif
+
+ /* Check if this MPIC is chained from a parent interrupt controller */
+ if (mpic->flags & MPIC_SECONDARY) {
+ int virq = irq_of_parse_and_map(mpic->node, 0);
+ if (virq != NO_IRQ) {
+ printk(KERN_INFO "%s: hooking up to IRQ %d\n",
+ mpic->node->full_name, virq);
+ irq_set_handler_data(virq, mpic);
+ irq_set_chained_handler(virq, &mpic_cascade);
+ }
+ }
}
void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
index 73b86cc5ea7..82e2cfe35c6 100644
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -179,12 +179,12 @@ static struct kobj_attribute cpm_idle_attr =
static void cpm_idle_config_sysfs(void)
{
- struct sys_device *sys_dev;
+ struct device *dev;
unsigned long ret;
- sys_dev = get_cpu_sysdev(0);
+ dev = get_cpu_device(0);
- ret = sysfs_create_file(&sys_dev->kobj,
+ ret = sysfs_create_file(&dev->kobj,
&cpm_idle_attr.attr);
if (ret)
printk(KERN_WARNING
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 862f11b3821..4f05f754234 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -185,9 +185,15 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
out:
dma_offset_set = 1;
pci_dram_offset = res->start;
+ hose->dma_window_base_cur = res->start;
+ hose->dma_window_size = resource_size(res);
printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
pci_dram_offset);
+ printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
+ (unsigned long long)hose->dma_window_base_cur);
+ printk(KERN_INFO "DMA window size 0x%016llx\n",
+ (unsigned long long)hose->dma_window_size);
return 0;
}
@@ -647,6 +653,7 @@ static unsigned int ppc4xx_pciex_port_count;
struct ppc4xx_pciex_hwops
{
+ bool want_sdr;
int (*core_init)(struct device_node *np);
int (*port_init_hw)(struct ppc4xx_pciex_port *port);
int (*setup_utl)(struct ppc4xx_pciex_port *port);
@@ -916,6 +923,7 @@ static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
{
+ .want_sdr = true,
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speA_pciex_init_port_hw,
.setup_utl = ppc440speA_pciex_init_utl,
@@ -924,6 +932,7 @@ static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
{
+ .want_sdr = true,
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speB_pciex_init_port_hw,
.setup_utl = ppc440speB_pciex_init_utl,
@@ -1034,6 +1043,7 @@ static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
{
+ .want_sdr = true,
.core_init = ppc460ex_pciex_core_init,
.port_init_hw = ppc460ex_pciex_init_port_hw,
.setup_utl = ppc460ex_pciex_init_utl,
@@ -1181,6 +1191,7 @@ done:
}
static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
+ .want_sdr = true,
.core_init = ppc460sx_pciex_core_init,
.port_init_hw = ppc460sx_pciex_init_port_hw,
.setup_utl = ppc460sx_pciex_init_utl,
@@ -1276,6 +1287,7 @@ static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
{
+ .want_sdr = true,
.core_init = ppc405ex_pciex_core_init,
.port_init_hw = ppc405ex_pciex_init_port_hw,
.setup_utl = ppc405ex_pciex_init_utl,
@@ -1284,6 +1296,52 @@ static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
#endif /* CONFIG_40x */
+#ifdef CONFIG_476FPE
+static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
+{
+ return 4;
+}
+
+static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
+{
+ u32 timeout_ms = 20;
+ u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
+ void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
+ 0x1000);
+
+ printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
+
+ if (mbase == NULL) {
+ printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
+ port->index);
+ return;
+ }
+
+ while (timeout_ms--) {
+ val = in_le32(mbase + PECFG_TLDLP);
+
+ if ((val & mask) == mask)
+ break;
+ msleep(10);
+ }
+
+ if (val & PECFG_TLDLP_PRESENT) {
+ printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
+ port->link = 1;
+ } else
+ printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
+
+ iounmap(mbase);
+ return;
+}
+
+static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
+{
+ .core_init = ppc_476fpe_pciex_core_init,
+ .check_link = ppc_476fpe_pciex_check_link,
+};
+#endif /* CONFIG_476FPE */
+
/* Check that the core has been initied and if not, do it */
static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
{
@@ -1309,6 +1367,10 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
#endif
+#ifdef CONFIG_476FPE
+ if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe"))
+ ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
+#endif
if (ppc4xx_pciex_hwops == NULL) {
printk(KERN_WARNING "PCIE: unknown host type %s\n",
np->full_name);
@@ -1617,6 +1679,10 @@ static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
| DCRO_PEGPL_OMRxMSKL_VAL);
+ else if (of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
+ sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
+ | DCRO_PEGPL_OMRxMSKL_VAL);
else
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
sa | DCRO_PEGPL_OMR1MSKL_UOT
@@ -1739,9 +1805,10 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(size));
if (res->flags & IORESOURCE_PREFETCH)
- sa |= 0x8;
+ sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
- if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
+ if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
+ of_device_is_compatible(port->node, "ibm,plb-pciex-476fpe"))
sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
@@ -1972,13 +2039,15 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
}
port->node = of_node_get(np);
- pval = of_get_property(np, "sdr-base", NULL);
- if (pval == NULL) {
- printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
- np->full_name);
- return;
+ if (ppc4xx_pciex_hwops->want_sdr) {
+ pval = of_get_property(np, "sdr-base", NULL);
+ if (pval == NULL) {
+ printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
+ np->full_name);
+ return;
+ }
+ port->sdr_base = *pval;
}
- port->sdr_base = *pval;
/* Check if device_type property is set to "pci" or "pci-endpoint".
* Resulting from this setup this PCIe port will be configured
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index 32ce763a375..bb4821938ab 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -476,6 +476,13 @@
#define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002
#define DCRO_PEGPL_OMR3MSKL_IO 0x00000002
+/* 476FPE */
+#define PCCFG_LCPA 0x270
+#define PECFG_TLDLP 0x3F8
+#define PECFG_TLDLP_LNKUP 0x00000008
+#define PECFG_TLDLP_PRESENT 0x00000010
+#define DCRO_PEGPL_476FPE_OMR1MSKL_UOT 0x00000004
+
/* SDR Bit Mappings */
#define PESDRx_RCSSET_HLDPLB 0x10000000
#define PESDRx_RCSSET_RSTGU 0x01000000
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
index e23f23cf9f5..521e67a49dc 100644
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -139,14 +139,10 @@ struct qe_pin {
struct qe_pin *qe_pin_request(struct device_node *np, int index)
{
struct qe_pin *qe_pin;
- struct device_node *gpio_np;
struct gpio_chip *gc;
struct of_mm_gpio_chip *mm_gc;
struct qe_gpio_chip *qe_gc;
int err;
- int size;
- const void *gpio_spec;
- const u32 *gpio_cells;
unsigned long flags;
qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
@@ -155,45 +151,25 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
return ERR_PTR(-ENOMEM);
}
- err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
- &gpio_np, &gpio_spec);
- if (err) {
- pr_debug("%s: can't parse gpios property\n", __func__);
+ err = of_get_gpio(np, index);
+ if (err < 0)
+ goto err0;
+ gc = gpio_to_chip(err);
+ if (WARN_ON(!gc))
goto err0;
- }
- if (!of_device_is_compatible(gpio_np, "fsl,mpc8323-qe-pario-bank")) {
+ if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
pr_debug("%s: tried to get a non-qe pin\n", __func__);
err = -EINVAL;
- goto err1;
- }
-
- gc = of_node_to_gpiochip(gpio_np);
- if (!gc) {
- pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpio_np->full_name);
- err = -ENODEV;
- goto err1;
- }
-
- gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
- if (!gpio_cells || size != sizeof(*gpio_cells) ||
- *gpio_cells != gc->of_gpio_n_cells) {
- pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpio_np->full_name);
- err = -EINVAL;
- goto err1;
+ goto err0;
}
- err = gc->of_xlate(gc, np, gpio_spec, NULL);
- if (err < 0)
- goto err1;
-
mm_gc = to_of_mm_gpio_chip(gc);
qe_gc = to_qe_gpio_chip(mm_gc);
spin_lock_irqsave(&qe_gc->lock, flags);
+ err -= gc->base;
if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
qe_pin->controller = qe_gc;
qe_pin->num = err;
@@ -206,8 +182,6 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
if (!err)
return qe_pin;
-err1:
- of_node_put(gpio_np);
err0:
kfree(qe_pin);
pr_debug("%s failed with status %d\n", __func__, err);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3363fbc964f..ceb09cbd232 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -216,7 +216,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
- if (!div16 && (divisor & 1))
+ if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 18e75ca19fe..73034bd203c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -22,7 +22,6 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
-#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
@@ -484,13 +483,14 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
return 0;
}
-static struct sysdev_class qe_ic_sysclass = {
+static struct bus_type qe_ic_subsys = {
.name = "qe_ic",
+ .dev_name = "qe_ic",
};
-static struct sys_device device_qe_ic = {
+static struct device device_qe_ic = {
.id = 0,
- .cls = &qe_ic_sysclass,
+ .bus = &qe_ic_subsys,
};
static int __init init_qe_ic_sysfs(void)
@@ -499,12 +499,12 @@ static int __init init_qe_ic_sysfs(void)
printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
- rc = sysdev_class_register(&qe_ic_sysclass);
+ rc = subsys_system_register(&qe_ic_subsys, NULL);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys class\n");
return -ENODEV;
}
- rc = sysdev_register(&device_qe_ic);
+ rc = device_register(&device_qe_ic);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys device\n");
return -ENODEV;
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 3330feca750..063c901b126 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -18,7 +18,6 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
-#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 9518d367a64..253dce98c16 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -27,33 +27,50 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
+ unsigned int ret = XICS_IRQ_SPURIOUS;
rc = plpar_hcall(H_XIRR, retbuf, cppr);
- if (rc != H_SUCCESS)
- panic(" bad return code xirr - rc = %lx\n", rc);
- return (unsigned int)retbuf[0];
-}
+ if (rc == H_SUCCESS) {
+ ret = (unsigned int)retbuf[0];
+ } else {
+ pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
+ __func__, cppr, rc);
+ WARN_ON_ONCE(1);
+ }
-static inline void icp_hv_set_xirr(unsigned int value)
-{
- long rc = plpar_hcall_norets(H_EOI, value);
- if (rc != H_SUCCESS)
- panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
+ return ret;
}
static inline void icp_hv_set_cppr(u8 value)
{
long rc = plpar_hcall_norets(H_CPPR, value);
- if (rc != H_SUCCESS)
- panic("bad return code cppr - rc = %lx\n", rc);
+ if (rc != H_SUCCESS) {
+ pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
+ __func__, value, rc);
+ WARN_ON_ONCE(1);
+ }
+}
+
+static inline void icp_hv_set_xirr(unsigned int value)
+{
+ long rc = plpar_hcall_norets(H_EOI, value);
+ if (rc != H_SUCCESS) {
+ pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
+ __func__, value, rc);
+ WARN_ON_ONCE(1);
+ icp_hv_set_cppr(value >> 24);
+ }
}
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
- long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
- value);
- if (rc != H_SUCCESS)
- panic("bad return code qirr - rc = %lx\n", rc);
+ int hw_cpu = get_hard_smp_processor_id(n_cpu);
+ long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+ if (rc != H_SUCCESS) {
+ pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
+ "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
+ WARN_ON_ONCE(1);
+ }
}
static void icp_hv_eoi(struct irq_data *d)
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 63762c672a0..d72eda6a4c0 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -137,7 +137,7 @@ static void xics_request_ipi(void)
* IPIs are marked IRQF_PERCPU. The handler was set in map.
*/
BUG_ON(request_irq(ipi, icp_ops->ipi_action,
- IRQF_PERCPU, "IPI", NULL));
+ IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
int __init xics_smp_probe(void)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 03a217ae3be..cb95eea74d3 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -228,13 +228,11 @@ Commands:\n\
t print backtrace\n\
x exit monitor and recover\n\
X exit monitor and dont recover\n"
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E)
" u dump segment table or SLB\n"
-#endif
-#ifdef CONFIG_PPC_STD_MMU_32
+#elif defined(CONFIG_PPC_STD_MMU_32)
" u dump segment registers\n"
-#endif
-#ifdef CONFIG_44x
+#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E)
" u dump TLB\n"
#endif
" ? help\n"
@@ -340,7 +338,7 @@ int cpus_are_in_xmon(void)
static inline int unrecoverable_excp(struct pt_regs *regs)
{
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOK3E)
+#if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E)
/* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */
return 0;
#else
@@ -885,13 +883,11 @@ cmds(struct pt_regs *excp)
case 'u':
dump_segments();
break;
-#endif
-#ifdef CONFIG_4xx
+#elif defined(CONFIG_4xx)
case 'u':
dump_tlb_44x();
break;
-#endif
-#ifdef CONFIG_PPC_BOOK3E
+#elif defined(CONFIG_PPC_BOOK3E)
case 'u':
dump_tlb_book3e();
break;
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index ae4b01060ed..9858476fa0f 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -1,6 +1,7 @@
-obj-y += kernel/
-obj-y += mm/
-obj-y += crypto/
-obj-y += appldata/
-obj-y += hypfs/
-obj-y += kvm/
+obj-y += kernel/
+obj-y += mm/
+obj-$(CONFIG_KVM) += kvm/
+obj-$(CONFIG_CRYPTO_HW) += crypto/
+obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
+obj-$(CONFIG_APPLDATA_BASE) += appldata/
+obj-$(CONFIG_MATHEMU) += math-emu/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 373679b3744..d1727584230 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -87,11 +87,13 @@ config S390
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
- select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select HAVE_RCU_TABLE_FREE if SMP
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
@@ -191,18 +193,13 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config SCHED_MC
- def_bool y
- prompt "Multi-core scheduler support"
- depends on SMP
- help
- Multi-core scheduler support improves the CPU scheduler's decision
- making when dealing with multi-core CPU chips at a cost of slightly
- increased overhead in some places.
+ def_bool n
config SCHED_BOOK
def_bool y
prompt "Book scheduler support"
- depends on SMP && SCHED_MC
+ depends on SMP
+ select SCHED_MC
help
Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books.
@@ -345,9 +342,6 @@ config WARN_DYNAMIC_STACK
Say N if you are unsure.
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
comment "Kernel preemption"
source "kernel/Kconfig.preempt"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 27a0b5df5ea..e9f35334169 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -99,7 +99,6 @@ core-y += arch/s390/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
-drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
# must be linked after kernel
drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 92f1cb745d6..4de031d6b76 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data)
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
- cputime_to_jiffies(kstat_cpu(i).cpustat.user);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
os_data->os_cpu[j].per_cpu_nice =
- cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
os_data->os_cpu[j].per_cpu_system =
- cputime_to_jiffies(kstat_cpu(i).cpustat.system);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
os_data->os_cpu[j].per_cpu_idle =
- cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
os_data->os_cpu[j].per_cpu_irq =
- cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
os_data->os_cpu[j].per_cpu_softirq =
- cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
os_data->os_cpu[j].per_cpu_iowait =
- cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
os_data->os_cpu[j].per_cpu_steal =
- cputime_to_jiffies(kstat_cpu(i).cpustat.steal);
+ cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
os_data->os_cpu[j].cpu_id = i;
j++;
}
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 635d677d328..f2737a005af 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -23,4 +23,4 @@ $(obj)/compressed/vmlinux: FORCE
install: $(CONFIGURE) $(obj)/image
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
- System.map Kerntypes "$(INSTALL_PATH)"
+ System.map "$(INSTALL_PATH)"
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 481f4f76f66..8a2a887478c 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -97,7 +97,7 @@ static void hypfs_delete_tree(struct dentry *root)
}
}
-static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
+static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
{
struct inode *ret = new_inode(sb);
@@ -107,7 +107,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
- if (mode & S_IFDIR)
+ if (S_ISDIR(mode))
set_nlink(ret, 2);
}
return ret;
@@ -259,9 +259,9 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
return 0;
}
-static int hypfs_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int hypfs_show_options(struct seq_file *s, struct dentry *root)
{
- struct hypfs_sb_info *hypfs_info = mnt->mnt_sb->s_fs_info;
+ struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info;
seq_printf(s, ",uid=%u", hypfs_info->uid);
seq_printf(s, ",gid=%u", hypfs_info->gid);
@@ -333,7 +333,7 @@ static void hypfs_kill_super(struct super_block *sb)
static struct dentry *hypfs_create_file(struct super_block *sb,
struct dentry *parent, const char *name,
- char *data, mode_t mode)
+ char *data, umode_t mode)
{
struct dentry *dentry;
struct inode *inode;
@@ -350,13 +350,13 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
dentry = ERR_PTR(-ENOMEM);
goto fail;
}
- if (mode & S_IFREG) {
+ if (S_ISREG(mode)) {
inode->i_fop = &hypfs_file_ops;
if (data)
inode->i_size = strlen(data);
else
inode->i_size = 0;
- } else if (mode & S_IFDIR) {
+ } else if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inc_nlink(parent->d_inode);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 08143487829..c23c3900c30 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -16,114 +16,100 @@
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
-typedef unsigned long long cputime_t;
-typedef unsigned long long cputime64_t;
+typedef unsigned long long __nocast cputime_t;
+typedef unsigned long long __nocast cputime64_t;
-#ifndef __s390x__
-
-static inline unsigned int
-__div(unsigned long long n, unsigned int base)
+static inline unsigned long __div(unsigned long long n, unsigned long base)
{
+#ifndef __s390x__
register_pair rp;
rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd;
+#else /* __s390x__ */
+ return n / base;
+#endif /* __s390x__ */
}
-#else /* __s390x__ */
+#define cputime_one_jiffy jiffies_to_cputime(1)
+
+/*
+ * Convert cputime to jiffies and back.
+ */
+static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
+{
+ return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
+}
-static inline unsigned int
-__div(unsigned long long n, unsigned int base)
+static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
- return n / base;
+ return (__force cputime_t)(jif * (4096000000ULL / HZ));
}
-#endif /* __s390x__ */
+static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
+{
+ unsigned long long jif = (__force unsigned long long) cputime;
+ do_div(jif, 4096000000ULL / HZ);
+ return jif;
+}
-#define cputime_zero (0ULL)
-#define cputime_one_jiffy jiffies_to_cputime(1)
-#define cputime_max ((~0UL >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ({ \
- unsigned long long __div = (__a); \
- do_div(__div,__n); \
- __div; \
-})
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
-#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
-
-#define cputime64_zero (0ULL)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime_to_cputime64(__ct) (__ct)
-
-static inline u64
-cputime64_to_jiffies64(cputime64_t cputime)
-{
- do_div(cputime, 4096000000ULL / HZ);
- return cputime;
+static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
+{
+ return (__force cputime64_t)(jif * (4096000000ULL / HZ));
}
/*
* Convert cputime to microseconds and back.
*/
-static inline unsigned int
-cputime_to_usecs(const cputime_t cputime)
+static inline unsigned int cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096);
+ return (__force unsigned long long) cputime >> 12;
}
-static inline cputime_t
-usecs_to_cputime(const unsigned int m)
+static inline cputime_t usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096;
+ return (__force cputime_t)(m * 4096ULL);
}
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
/*
* Convert cputime to milliseconds and back.
*/
-static inline unsigned int
-cputime_to_secs(const cputime_t cputime)
+static inline unsigned int cputime_to_secs(const cputime_t cputime)
{
- return __div(cputime, 2048000000) >> 1;
+ return __div((__force unsigned long long) cputime, 2048000000) >> 1;
}
-static inline cputime_t
-secs_to_cputime(const unsigned int s)
+static inline cputime_t secs_to_cputime(const unsigned int s)
{
- return (cputime_t) s * 4096000000ULL;
+ return (__force cputime_t)(s * 4096000000ULL);
}
/*
* Convert cputime to timespec and back.
*/
-static inline cputime_t
-timespec_to_cputime(const struct timespec *value)
+static inline cputime_t timespec_to_cputime(const struct timespec *value)
{
- return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
+ unsigned long long ret = value->tv_sec * 4096000000ULL;
+ return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
}
-static inline void
-cputime_to_timespec(const cputime_t cputime, struct timespec *value)
+static inline void cputime_to_timespec(const cputime_t cputime,
+ struct timespec *value)
{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
register_pair rp;
- rp.pair = cputime >> 1;
+ rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
- value->tv_sec = cputime / 4096000000ULL;
+ value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
+ value->tv_sec = __cputime / 4096000000ULL;
#endif
}
@@ -132,50 +118,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
* Since cputime and timeval have the same resolution (microseconds)
* this is easy.
*/
-static inline cputime_t
-timeval_to_cputime(const struct timeval *value)
+static inline cputime_t timeval_to_cputime(const struct timeval *value)
{
- return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
+ unsigned long long ret = value->tv_sec * 4096000000ULL;
+ return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
}
-static inline void
-cputime_to_timeval(const cputime_t cputime, struct timeval *value)
+static inline void cputime_to_timeval(const cputime_t cputime,
+ struct timeval *value)
{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
register_pair rp;
- rp.pair = cputime >> 1;
+ rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = (cputime % 4096000000ULL) / 4096;
- value->tv_sec = cputime / 4096000000ULL;
+ value->tv_usec = (__cputime % 4096000000ULL) / 4096;
+ value->tv_sec = __cputime / 4096000000ULL;
#endif
}
/*
* Convert cputime to clock and back.
*/
-static inline clock_t
-cputime_to_clock_t(cputime_t cputime)
+static inline clock_t cputime_to_clock_t(cputime_t cputime)
{
- return cputime_div(cputime, 4096000000ULL / USER_HZ);
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, 4096000000ULL / USER_HZ);
+ return clock;
}
-static inline cputime_t
-clock_t_to_cputime(unsigned long x)
+static inline cputime_t clock_t_to_cputime(unsigned long x)
{
- return (cputime_t) x * (4096000000ULL / USER_HZ);
+ return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
}
/*
* Convert cputime64 to clock.
*/
-static inline clock_t
-cputime64_to_clock_t(cputime64_t cputime)
+static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
{
- return cputime_div(cputime, 4096000000ULL / USER_HZ);
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, 4096000000ULL / USER_HZ);
+ return clock;
}
struct s390_idle_data {
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 18124b75a7a..9d88db1f55d 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -73,7 +73,7 @@ typedef struct debug_info {
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
- mode_t mode;
+ umode_t mode;
} debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id,
@@ -124,7 +124,7 @@ debug_info_t *debug_register(const char *name, int pages, int nr_areas,
int buf_size);
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
- int buf_size, mode_t mode, uid_t uid,
+ int buf_size, umode_t mode, uid_t uid,
gid_t gid);
void debug_unregister(debug_info_t* id);
diff --git a/arch/s390/include/asm/kdebug.h b/arch/s390/include/asm/kdebug.h
index 40db27cd6e6..5c1abd47612 100644
--- a/arch/s390/include/asm/kdebug.h
+++ b/arch/s390/include/asm/kdebug.h
@@ -22,6 +22,6 @@ enum die_val {
DIE_NMI_IPI,
};
-extern void die(const char *, struct pt_regs *, long);
+extern void die(struct pt_regs *, const char *);
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 9e13c7d56cc..707f2306725 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -97,47 +97,52 @@ struct _lowcore {
__u32 gpregs_save_area[16]; /* 0x0180 */
__u32 cregs_save_area[16]; /* 0x01c0 */
+ /* Save areas. */
+ __u32 save_area_sync[8]; /* 0x0200 */
+ __u32 save_area_async[8]; /* 0x0220 */
+ __u32 save_area_restart[1]; /* 0x0240 */
+ __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */
+
/* Return psws. */
- __u32 save_area[16]; /* 0x0200 */
- psw_t return_psw; /* 0x0240 */
- psw_t return_mcck_psw; /* 0x0248 */
+ psw_t return_psw; /* 0x0248 */
+ psw_t return_mcck_psw; /* 0x0250 */
/* CPU time accounting values */
- __u64 sync_enter_timer; /* 0x0250 */
- __u64 async_enter_timer; /* 0x0258 */
- __u64 mcck_enter_timer; /* 0x0260 */
- __u64 exit_timer; /* 0x0268 */
- __u64 user_timer; /* 0x0270 */
- __u64 system_timer; /* 0x0278 */
- __u64 steal_timer; /* 0x0280 */
- __u64 last_update_timer; /* 0x0288 */
- __u64 last_update_clock; /* 0x0290 */
+ __u64 sync_enter_timer; /* 0x0258 */
+ __u64 async_enter_timer; /* 0x0260 */
+ __u64 mcck_enter_timer; /* 0x0268 */
+ __u64 exit_timer; /* 0x0270 */
+ __u64 user_timer; /* 0x0278 */
+ __u64 system_timer; /* 0x0280 */
+ __u64 steal_timer; /* 0x0288 */
+ __u64 last_update_timer; /* 0x0290 */
+ __u64 last_update_clock; /* 0x0298 */
/* Current process. */
- __u32 current_task; /* 0x0298 */
- __u32 thread_info; /* 0x029c */
- __u32 kernel_stack; /* 0x02a0 */
+ __u32 current_task; /* 0x02a0 */
+ __u32 thread_info; /* 0x02a4 */
+ __u32 kernel_stack; /* 0x02a8 */
/* Interrupt and panic stack. */
- __u32 async_stack; /* 0x02a4 */
- __u32 panic_stack; /* 0x02a8 */
+ __u32 async_stack; /* 0x02ac */
+ __u32 panic_stack; /* 0x02b0 */
/* Address space pointer. */
- __u32 kernel_asce; /* 0x02ac */
- __u32 user_asce; /* 0x02b0 */
- __u32 current_pid; /* 0x02b4 */
+ __u32 kernel_asce; /* 0x02b4 */
+ __u32 user_asce; /* 0x02b8 */
+ __u32 current_pid; /* 0x02bc */
/* SMP info area */
- __u32 cpu_nr; /* 0x02b8 */
- __u32 softirq_pending; /* 0x02bc */
- __u32 percpu_offset; /* 0x02c0 */
- __u32 ext_call_fast; /* 0x02c4 */
- __u64 int_clock; /* 0x02c8 */
- __u64 mcck_clock; /* 0x02d0 */
- __u64 clock_comparator; /* 0x02d8 */
- __u32 machine_flags; /* 0x02e0 */
- __u32 ftrace_func; /* 0x02e4 */
- __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
+ __u32 cpu_nr; /* 0x02c0 */
+ __u32 softirq_pending; /* 0x02c4 */
+ __u32 percpu_offset; /* 0x02c8 */
+ __u32 ext_call_fast; /* 0x02cc */
+ __u64 int_clock; /* 0x02d0 */
+ __u64 mcck_clock; /* 0x02d8 */
+ __u64 clock_comparator; /* 0x02e0 */
+ __u32 machine_flags; /* 0x02e8 */
+ __u32 ftrace_func; /* 0x02ec */
+ __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
@@ -229,57 +234,62 @@ struct _lowcore {
psw_t mcck_new_psw; /* 0x01e0 */
psw_t io_new_psw; /* 0x01f0 */
- /* Entry/exit save area & return psws. */
- __u64 save_area[16]; /* 0x0200 */
- psw_t return_psw; /* 0x0280 */
- psw_t return_mcck_psw; /* 0x0290 */
+ /* Save areas. */
+ __u64 save_area_sync[8]; /* 0x0200 */
+ __u64 save_area_async[8]; /* 0x0240 */
+ __u64 save_area_restart[1]; /* 0x0280 */
+ __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */
+
+ /* Return psws. */
+ psw_t return_psw; /* 0x0290 */
+ psw_t return_mcck_psw; /* 0x02a0 */
/* CPU accounting and timing values. */
- __u64 sync_enter_timer; /* 0x02a0 */
- __u64 async_enter_timer; /* 0x02a8 */
- __u64 mcck_enter_timer; /* 0x02b0 */
- __u64 exit_timer; /* 0x02b8 */
- __u64 user_timer; /* 0x02c0 */
- __u64 system_timer; /* 0x02c8 */
- __u64 steal_timer; /* 0x02d0 */
- __u64 last_update_timer; /* 0x02d8 */
- __u64 last_update_clock; /* 0x02e0 */
+ __u64 sync_enter_timer; /* 0x02b0 */
+ __u64 async_enter_timer; /* 0x02b8 */
+ __u64 mcck_enter_timer; /* 0x02c0 */
+ __u64 exit_timer; /* 0x02c8 */
+ __u64 user_timer; /* 0x02d0 */
+ __u64 system_timer; /* 0x02d8 */
+ __u64 steal_timer; /* 0x02e0 */
+ __u64 last_update_timer; /* 0x02e8 */
+ __u64 last_update_clock; /* 0x02f0 */
/* Current process. */
- __u64 current_task; /* 0x02e8 */
- __u64 thread_info; /* 0x02f0 */
- __u64 kernel_stack; /* 0x02f8 */
+ __u64 current_task; /* 0x02f8 */
+ __u64 thread_info; /* 0x0300 */
+ __u64 kernel_stack; /* 0x0308 */
/* Interrupt and panic stack. */
- __u64 async_stack; /* 0x0300 */
- __u64 panic_stack; /* 0x0308 */
+ __u64 async_stack; /* 0x0310 */
+ __u64 panic_stack; /* 0x0318 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0310 */
- __u64 user_asce; /* 0x0318 */
- __u64 current_pid; /* 0x0320 */
+ __u64 kernel_asce; /* 0x0320 */
+ __u64 user_asce; /* 0x0328 */
+ __u64 current_pid; /* 0x0330 */
/* SMP info area */
- __u32 cpu_nr; /* 0x0328 */
- __u32 softirq_pending; /* 0x032c */
- __u64 percpu_offset; /* 0x0330 */
- __u64 ext_call_fast; /* 0x0338 */
- __u64 int_clock; /* 0x0340 */
- __u64 mcck_clock; /* 0x0348 */
- __u64 clock_comparator; /* 0x0350 */
- __u64 vdso_per_cpu_data; /* 0x0358 */
- __u64 machine_flags; /* 0x0360 */
- __u64 ftrace_func; /* 0x0368 */
- __u64 gmap; /* 0x0370 */
- __u64 cmf_hpp; /* 0x0378 */
+ __u32 cpu_nr; /* 0x0338 */
+ __u32 softirq_pending; /* 0x033c */
+ __u64 percpu_offset; /* 0x0340 */
+ __u64 ext_call_fast; /* 0x0348 */
+ __u64 int_clock; /* 0x0350 */
+ __u64 mcck_clock; /* 0x0358 */
+ __u64 clock_comparator; /* 0x0360 */
+ __u64 vdso_per_cpu_data; /* 0x0368 */
+ __u64 machine_flags; /* 0x0370 */
+ __u64 ftrace_func; /* 0x0378 */
+ __u64 gmap; /* 0x0380 */
+ __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */
/* Interrupt response block. */
- __u8 irb[64]; /* 0x0380 */
+ __u8 irb[64]; /* 0x0400 */
/* Per cpu primary space access list */
- __u32 paste[16]; /* 0x03c0 */
+ __u32 paste[16]; /* 0x0440 */
- __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */
+ __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
/*
* 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 5325c89a584..0fbd1899c7b 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -19,7 +19,7 @@
#define ARCH_NEEDS_WEAK_PER_CPU
#endif
-#define arch_irqsafe_cpu_to_op(pcp, val, op) \
+#define arch_this_cpu_to_op(pcp, val, op) \
do { \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ old__, new__, prev__; \
@@ -41,27 +41,27 @@ do { \
preempt_enable(); \
} while (0)
-#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
-#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
-#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
-#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
-#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
-#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
-#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
+#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
-#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
-#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
-#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
+#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
-#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
-#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
-#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
+#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
({ \
typedef typeof(pcp) pcp_op_T__; \
pcp_op_T__ ret__; \
@@ -79,10 +79,10 @@ do { \
ret__; \
})
-#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
#include <asm-generic/percpu.h>
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 524d23b8610..011358c1b18 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -128,28 +128,11 @@ static inline int is_zero_pfn(unsigned long pfn)
* effect, this also makes sure that 64 bit module code cannot be used
* as system call address.
*/
-
extern unsigned long VMALLOC_START;
+extern unsigned long VMALLOC_END;
+extern struct page *vmemmap;
-#ifndef __s390x__
-#define VMALLOC_SIZE (96UL << 20)
-#define VMALLOC_END 0x7e000000UL
-#define VMEM_MAP_END 0x80000000UL
-#else /* __s390x__ */
-#define VMALLOC_SIZE (128UL << 30)
-#define VMALLOC_END 0x3e000000000UL
-#define VMEM_MAP_END 0x40000000000UL
-#endif /* __s390x__ */
-
-/*
- * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
- * mapping. This needs to be calculated at compile time since the size of the
- * VMEM_MAP is static but the size of struct page can change.
- */
-#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
-#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
-#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
-#define vmemmap ((struct page *) VMALLOC_END)
+#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
/*
* A 31 bit pagetable entry of S390 has following format:
@@ -599,10 +582,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
skey = page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
- if (bits) {
- skey ^= bits;
- page_set_storage_key(address, skey, 1);
- }
+ if (bits & _PAGE_CHANGED)
+ page_set_storage_key(address, skey ^ bits, 1);
+ else if (bits)
+ page_reset_referenced(address);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
/* Get host changed & referenced bits from pgste */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 5f33d37d032..27272f6a14c 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,8 +80,6 @@ struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
- unsigned long prot_addr; /* address of protection-excep. */
- unsigned int trap_no;
unsigned long gmap_addr; /* address of last gmap fault. */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index a65846340d5..56da355678f 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -324,7 +324,8 @@ struct pt_regs
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
- unsigned int svc_code;
+ unsigned int int_code;
+ unsigned long int_parm_long;
};
/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index e63d13dd3bf..d75c8e78f7e 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -352,7 +352,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
- * @queue_start_poll: polling handlers (one per input queue or NULL)
+ * @queue_start_poll_array: polling handlers (one per input queue or NULL)
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
@@ -372,7 +372,8 @@ struct qdio_initialize {
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
- void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
+ void (**queue_start_poll_array) (struct ccw_device *, int,
+ unsigned long);
int scan_threshold;
unsigned long int_parm;
void **input_sbal_addr_array;
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index e3bffd4e2d6..7040b8567cd 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -56,6 +56,7 @@ enum {
ec_schedule = 0,
ec_call_function,
ec_call_function_single,
+ ec_stop_cpu,
};
/*
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ab47a69fdf0..c32e9123b40 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -23,7 +23,6 @@ extern void __cpu_die (unsigned int cpu);
extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
-extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h
index fdff1e995c7..67b5c1b14b5 100644
--- a/arch/s390/include/asm/socket.h
+++ b/arch/s390/include/asm/socket.h
@@ -70,4 +70,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 545d219e6a2..0fb34027d3f 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,8 +4,8 @@
#ifdef CONFIG_64BIT
#define SECTION_SIZE_BITS 28
-#define MAX_PHYSADDR_BITS 42
-#define MAX_PHYSMEM_BITS 42
+#define MAX_PHYSADDR_BITS 46
+#define MAX_PHYSMEM_BITS 46
#else
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index b239ff53b18..fb214dd9b7e 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -27,7 +27,7 @@ static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return test_tsk_thread_flag(task, TIF_SYSCALL) ?
- (regs->svc_code & 0xffff) : -1;
+ (regs->int_code & 0xffff) : -1;
}
static inline void syscall_rollback(struct task_struct *task,
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index ef573c1d71a..d73cc6b6000 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,8 +20,6 @@
struct task_struct;
-extern int sysctl_userprocess_debug;
-
extern struct task_struct *__switch_to(void *, void *);
extern void update_per_regs(struct task_struct *task);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a23183423b1..a73038155e0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
-#define TIF_FREEZE 21 /* thread is freezing for suspend */
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#ifdef CONFIG_64BIT
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 005d77d8ae2..0837de80c35 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -4,6 +4,10 @@
#include <linux/cpumask.h>
#include <asm/sysinfo.h>
+struct cpu;
+
+#ifdef CONFIG_SCHED_BOOK
+
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
@@ -16,8 +20,6 @@ static inline const struct cpumask *cpu_coregroup_mask(int cpu)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define mc_capable() (1)
-#ifdef CONFIG_SCHED_BOOK
-
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
@@ -29,19 +31,45 @@ static inline const struct cpumask *cpu_book_mask(int cpu)
#define topology_book_id(cpu) (cpu_book_id[cpu])
#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
-#endif /* CONFIG_SCHED_BOOK */
-
+int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
void store_topology(struct sysinfo_15_1_x *info);
+void topology_expect_change(void);
+
+#else /* CONFIG_SCHED_BOOK */
+
+static inline void topology_schedule_update(void) { }
+static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline void topology_expect_change(void) { }
-#define POLARIZATION_UNKNWN (-1)
+#endif /* CONFIG_SCHED_BOOK */
+
+#define POLARIZATION_UNKNOWN (-1)
#define POLARIZATION_HRZ (0)
#define POLARIZATION_VL (1)
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
-#ifdef CONFIG_SMP
+extern int cpu_polarization[];
+
+static inline void cpu_set_polarization(int cpu, int val)
+{
+#ifdef CONFIG_SCHED_BOOK
+ cpu_polarization[cpu] = val;
+#endif
+}
+
+static inline int cpu_read_polarization(int cpu)
+{
+#ifdef CONFIG_SCHED_BOOK
+ return cpu_polarization[cpu];
+#else
+ return POLARIZATION_HRZ;
+#endif
+}
+
+#ifdef CONFIG_SCHED_BOOK
void s390_init_cpu_topology(void);
#else
static inline void s390_init_cpu_topology(void)
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index eeb52ccf499..05ebbcdbbf6 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -13,8 +13,6 @@
#ifndef __ASSEMBLY__
-typedef unsigned short umode_t;
-
/* A address type so that arithmetic can be done on it & it can be upgraded to
64 bit when necessary
*/
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 58de4c91c33..8a8008fe7b8 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -398,6 +398,7 @@
#define __ARCH_WANT_SYS_SIGNAL
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dd4f0764091..7d9ec924e7e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,8 @@ extra-y += head.o init_task.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
-obj-$(CONFIG_SMP) += smp.o topology.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SCHED_BOOK) += topology.o
obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
switch_cpu.o)
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 751318765e2..6e6a72e66d6 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -45,7 +45,8 @@ int main(void)
DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
- DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code));
+ DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
+ DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
@@ -108,7 +109,9 @@ int main(void)
DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
- DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
+ DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
+ DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
+ DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
@@ -150,7 +153,6 @@ int main(void)
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
- DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */
return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f8828d38fa6..3aa4d00aaf5 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -33,7 +33,7 @@ s390_base_mcck_handler_fn:
.previous
ENTRY(s390_base_ext_handler)
- stmg %r0,%r15,__LC_SAVE_AREA
+ stmg %r0,%r15,__LC_SAVE_AREA_ASYNC
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_ext_handler_fn
@@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler)
ltgr %r1,%r1
jz 1f
basr %r14,%r1
-1: lmg %r0,%r15,__LC_SAVE_AREA
+1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
@@ -53,7 +53,7 @@ s390_base_ext_handler_fn:
.previous
ENTRY(s390_base_pgm_handler)
- stmg %r0,%r15,__LC_SAVE_AREA
+ stmg %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_pgm_handler_fn
@@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler)
ltgr %r1,%r1
jz 1f
basr %r14,%r1
- lmg %r0,%r15,__LC_SAVE_AREA
+ lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
@@ -142,7 +142,7 @@ s390_base_mcck_handler_fn:
.previous
ENTRY(s390_base_ext_handler)
- stm %r0,%r15,__LC_SAVE_AREA
+ stm %r0,%r15,__LC_SAVE_AREA_ASYNC
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,2f-0b(%r13)
@@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler)
ltr %r1,%r1
jz 1f
basr %r14,%r1
-1: lm %r0,%r15,__LC_SAVE_AREA
+1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpsw __LC_EXT_OLD_PSW
@@ -164,7 +164,7 @@ s390_base_ext_handler_fn:
.previous
ENTRY(s390_base_pgm_handler)
- stm %r0,%r15,__LC_SAVE_AREA
+ stm %r0,%r15,__LC_SAVE_AREA_SYNC
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,2f-0b(%r13)
@@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler)
ltr %r1,%r1
jz 1f
basr %r14,%r1
- lm %r0,%r15,__LC_SAVE_AREA
+ lm %r0,%r15,__LC_SAVE_AREA_SYNC
lpsw __LC_PGM_OLD_PSW
1: lpsw disabled_wait_psw-0b(%r13)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 84a98289844..ab64bdbab2a 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -278,9 +278,6 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
{
if (call >> 16) /* hack for backward compatibility */
return -EINVAL;
-
- call &= 0xffff;
-
switch (call) {
case SEMTIMEDOP:
return compat_sys_semtimedop(first, compat_ptr(ptr),
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4f68c81d3ff..6fe78c2f95d 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -501,8 +501,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* We forgot to include these in the sigcontext.
To avoid breaking binary compatibility, they are passed as args. */
- regs->gprs[4] = current->thread.trap_no;
- regs->gprs[5] = current->thread.prot_addr;
+ if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGTRAP || sig == SIGFPE) {
+ /* set extra registers only for synchronous signals */
+ regs->gprs[4] = regs->int_code & 127;
+ regs->gprs[5] = regs->int_parm_long;
+ }
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
@@ -544,9 +548,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+ regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- regs->gprs[14] = (__u64) frame->retcode;
+ regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE;
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __force __user *)(frame->retcode));
}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 5ad6bc078bf..6848828b962 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -74,7 +74,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
- int nr_areas, int buf_size, mode_t mode);
+ int nr_areas, int buf_size, umode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static int debug_prolog_level_fn(debug_info_t * id,
@@ -330,7 +330,7 @@ debug_info_free(debug_info_t* db_info){
static debug_info_t*
debug_info_create(const char *name, int pages_per_area, int nr_areas,
- int buf_size, mode_t mode)
+ int buf_size, umode_t mode)
{
debug_info_t* rc;
@@ -688,7 +688,7 @@ debug_close(struct inode *inode, struct file *file)
*/
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
- int nr_areas, int buf_size, mode_t mode,
+ int nr_areas, int buf_size, umode_t mode,
uid_t uid, gid_t gid)
{
debug_info_t *rc = NULL;
@@ -1090,7 +1090,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
int rc = 0;
int i;
unsigned long flags;
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
if (!id)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 45df6d456aa..e2f847599c8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1578,10 +1578,15 @@ void show_code(struct pt_regs *regs)
ptr += sprintf(ptr, "%s Code:", mode);
hops = 0;
while (start < end && hops < 8) {
- *ptr++ = (start == 32) ? '>' : ' ';
+ opsize = insn_length(code[start]);
+ if (start + opsize == 32)
+ *ptr++ = '#';
+ else if (start == 32)
+ *ptr++ = '>';
+ else
+ *ptr++ = ' ';
addr = regs->psw.addr + start - 32;
ptr += sprintf(ptr, ONELONG, addr);
- opsize = insn_length(code[start]);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index c9ffe002519..52098d6dfaa 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -434,18 +434,22 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
}
}
-static void __init setup_boot_command_line(void)
+static inline int has_ebcdic_char(const char *str)
{
int i;
- /* convert arch command line to ascii */
- for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
- if (COMMAND_LINE[i] & 0x80)
- break;
- if (i < ARCH_COMMAND_LINE_SIZE)
- EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
- COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
+ for (i = 0; str[i]; i++)
+ if (str[i] & 0x80)
+ return 1;
+ return 0;
+}
+static void __init setup_boot_command_line(void)
+{
+ COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+ /* convert arch command line to ascii if necessary */
+ if (has_ebcdic_char(COMMAND_LINE))
+ EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
strlcpy(boot_command_line, strstrip(COMMAND_LINE),
ARCH_COMMAND_LINE_SIZE);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b13157057e0..3705700ed37 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -19,32 +19,22 @@
#include <asm/unistd.h>
#include <asm/page.h>
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS = STACK_FRAME_OVERHEAD
-SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
-SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
-SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
-SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
-SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
-SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
-SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
-SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
-SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0 = __PT_GPRS
+__PT_R1 = __PT_GPRS + 4
+__PT_R2 = __PT_GPRS + 8
+__PT_R3 = __PT_GPRS + 12
+__PT_R4 = __PT_GPRS + 16
+__PT_R5 = __PT_GPRS + 20
+__PT_R6 = __PT_GPRS + 24
+__PT_R7 = __PT_GPRS + 28
+__PT_R8 = __PT_GPRS + 32
+__PT_R9 = __PT_GPRS + 36
+__PT_R10 = __PT_GPRS + 40
+__PT_R11 = __PT_GPRS + 44
+__PT_R12 = __PT_GPRS + 48
+__PT_R13 = __PT_GPRS + 524
+__PT_R14 = __PT_GPRS + 56
+__PT_R15 = __PT_GPRS + 60
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT
#define BASED(name) name-system_call(%r13)
-#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Ltrace_irq_on_caller)
- basr %r14,%r1
+ l %r1,BASED(.Lhardirqs_on)
+ basr %r14,%r1 # call trace_hardirqs_on_caller
+#endif
.endm
.macro TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Ltrace_irq_off_caller)
- basr %r14,%r1
- .endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
+ l %r1,BASED(.Lhardirqs_off)
+ basr %r14,%r1 # call trace_hardirqs_off_caller
#endif
+ .endm
-#ifdef CONFIG_LOCKDEP
.macro LOCKDEP_SYS_EXIT
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- jz 0f
+#ifdef CONFIG_LOCKDEP
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jz .+10
l %r1,BASED(.Llockdep_sys_exit)
- basr %r14,%r1
-0:
- .endm
-#else
-#define LOCKDEP_SYS_EXIT
+ basr %r14,%r1 # call lockdep_sys_exit
#endif
-
-/*
- * Register usage in interrupt handlers:
- * R9 - pointer to current task structure
- * R13 - pointer to literal pool
- * R14 - return register for function calls
- * R15 - kernel stack pointer
- */
-
- .macro UPDATE_VTIME lc_from,lc_to,lc_sum
- lm %r10,%r11,\lc_from
- sl %r10,\lc_to
- sl %r11,\lc_to+4
- bc 3,BASED(0f)
- sl %r10,BASED(.Lc_1)
-0: al %r10,\lc_sum
- al %r11,\lc_sum+4
- bc 12,BASED(1f)
- al %r10,BASED(.Lc_1)
-1: stm %r10,%r11,\lc_sum
- .endm
-
- .macro SAVE_ALL_SVC psworg,savearea
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
- s %r15,BASED(.Lc_spsize) # make room for registers & psw
- .endm
-
- .macro SAVE_ALL_BASE savearea
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
.endm
- .macro SAVE_ALL_PGM psworg,savearea
- tm \psworg+1,0x01 # test problem state bit
+ .macro CHECK_STACK stacksize,savearea
#ifdef CONFIG_CHECK_STACK
- bnz BASED(1f)
- tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- bnz BASED(2f)
- la %r12,\psworg
- b BASED(stack_overflow)
-#else
- bz BASED(2f)
+ tml %r15,\stacksize - CONFIG_STACK_GUARD
+ la %r14,\savearea
+ jz stack_overflow
#endif
-1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
-2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
.endm
- .macro SAVE_ALL_ASYNC psworg,savearea
- stm %r12,%r15,\savearea
- l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
- la %r12,\psworg
- tm \psworg+1,0x01 # test problem state bit
- bnz BASED(1f) # from user -> load async stack
- clc \psworg+4(4),BASED(.Lcritical_end)
- bhe BASED(0f)
- clc \psworg+4(4),BASED(.Lcritical_start)
- bl BASED(0f)
- l %r14,BASED(.Lcleanup_critical)
- basr %r14,%r14
- tm 1(%r12),0x01 # retest problem state after cleanup
- bnz BASED(1f)
-0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
+ .macro SWITCH_ASYNC savearea,stack,shift
+ tmh %r8,0x0001 # interrupting from user ?
+ jnz 1f
+ lr %r14,%r9
+ sl %r14,BASED(.Lcritical_start)
+ cl %r14,BASED(.Lcritical_length)
+ jhe 0f
+ la %r11,\savearea # inside critical section, do cleanup
+ bras %r14,cleanup_critical
+ tmh %r8,0x0001 # retest problem state after cleanup
+ jnz 1f
+0: l %r14,\stack # are we already on the target stack?
slr %r14,%r15
- sra %r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
- bnz BASED(1f)
- tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- bnz BASED(2f)
- b BASED(stack_overflow)
-#else
- bz BASED(2f)
-#endif
-1: l %r15,__LC_ASYNC_STACK
-2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ sra %r14,\shift
+ jnz 1f
+ CHECK_STACK 1<<\shift,\savearea
+ j 2f
+1: l %r15,\stack # load target stack
+2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
- .macro CREATE_STACK_FRAME savearea
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
- stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+ .macro ADD64 high,low,timer
+ al \high,\timer
+ al \low,\timer+4
+ brc 12,.+8
+ ahi \high,1
.endm
- .macro RESTORE_ALL psworg,sync
- mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni \psworg+1,0xfd # clear wait state bit
- .endif
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
- stpt __LC_EXIT_TIMER
- lpsw \psworg # back to caller
+ .macro SUB64 high,low,timer
+ sl \high,\timer
+ sl \low,\timer+4
+ brc 3,.+8
+ ahi \high,-1
+ .endm
+
+ .macro UPDATE_VTIME high,low,enter_timer
+ lm \high,\low,__LC_EXIT_TIMER
+ SUB64 \high,\low,\enter_timer
+ ADD64 \high,\low,__LC_USER_TIMER
+ stm \high,\low,__LC_USER_TIMER
+ lm \high,\low,__LC_LAST_UPDATE_TIMER
+ SUB64 \high,\low,__LC_EXIT_TIMER
+ ADD64 \high,\low,__LC_SYSTEM_TIMER
+ stm \high,\low,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
.macro REENABLE_IRQS
- mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
- ni __SF_EMPTY(%r15),0xbf
- ssm __SF_EMPTY(%r15)
+ st %r8,__LC_RETURN_PSW
+ ni __LC_RETURN_PSW,0xbf
+ ssm __LC_RETURN_PSW
.endm
.section .kprobes.text, "ax"
@@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT
* gpr2 = prev
*/
ENTRY(__switch_to)
- basr %r1,0
-0: l %r4,__THREAD_info(%r2) # get thread_info of prev
+ l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
- bz 1f-0b(%r1)
+ jz 0f
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
-1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
+0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
l %r15,__THREAD_ksp(%r3) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
@@ -224,48 +171,55 @@ __critical_start:
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-sysc_saveall:
- SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+3(%r12),_TIF_SYSCALL
+sysc_stm:
+ stm %r8,%r15,__LC_SAVE_AREA_SYNC
+ l %r12,__LC_THREAD_INFO
+ l %r13,__LC_SVC_NEW_PSW+4
+sysc_per:
+ l %r15,__LC_KERNEL_STACK
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+ UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
- xr %r7,%r7
- icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0
- bnz BASED(sysc_nr_ok) # svc number > 0
+ oi __TI_flags+3(%r12),_TIF_SYSCALL
+ lh %r8,__PT_INT_CODE+2(%r11)
+ sla %r8,2 # shift and test for svc0
+ jnz sysc_nr_ok
# svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls)
- bnl BASED(sysc_nr_ok)
- sth %r1,SP_SVC_CODE+2(%r15)
- lr %r7,%r1 # copy svc number to %r7
+ jnl sysc_nr_ok
+ sth %r1,__PT_INT_CODE+2(%r11)
+ lr %r8,%r1
+ sla %r8,2
sysc_nr_ok:
- sll %r7,2 # svc number *4
- l %r10,BASED(.Lsysc_table)
+ l %r10,BASED(.Lsys_call_table) # 31 bit system call table
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ st %r2,__PT_ORIG_GPR2(%r11)
+ st %r7,STACK_FRAME_OVERHEAD(%r15)
+ l %r9,0(%r8,%r10) # get system call addr.
tm __TI_flags+2(%r12),_TIF_TRACE >> 8
- mvc SP_ARGS(4,%r15),SP_R7(%r15)
- l %r8,0(%r7,%r10) # get system call addr.
- bnz BASED(sysc_tracesys)
- basr %r14,%r8 # call sys_xxxx
- st %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ jnz sysc_tracesys
+ basr %r14,%r9 # call sys_xxxx
+ st %r2,__PT_R2(%r11) # store return value
sysc_return:
LOCKDEP_SYS_EXIT
sysc_tif:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- bno BASED(sysc_restore)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno sysc_restore
tm __TI_flags+3(%r12),_TIF_WORK_SVC
- bnz BASED(sysc_work) # there is work to do (signals etc.)
+ jnz sysc_work # check for work
ni __TI_flags+3(%r12),255-_TIF_SYSCALL
sysc_restore:
- RESTORE_ALL __LC_RETURN_PSW,1
+ mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ stpt __LC_EXIT_TIMER
+ lm %r0,%r15,__PT_R0(%r11)
+ lpsw __LC_RETURN_PSW
sysc_done:
#
@@ -273,16 +227,16 @@ sysc_done:
#
sysc_work:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
- bo BASED(sysc_mcck_pending)
+ jo sysc_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- bo BASED(sysc_reschedule)
+ jo sysc_reschedule
tm __TI_flags+3(%r12),_TIF_SIGPENDING
- bo BASED(sysc_sigpending)
+ jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
- bo BASED(sysc_notify_resume)
+ jo sysc_notify_resume
tm __TI_flags+3(%r12),_TIF_PER_TRAP
- bo BASED(sysc_singlestep)
- b BASED(sysc_return) # beware of critical section cleanup
+ jo sysc_singlestep
+ j sysc_return # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
@@ -290,13 +244,13 @@ sysc_work:
sysc_reschedule:
l %r1,BASED(.Lschedule)
la %r14,BASED(sysc_return)
- br %r1 # call scheduler
+ br %r1 # call schedule
#
# _TIF_MCCK_PENDING is set, call handler
#
sysc_mcck_pending:
- l %r1,BASED(.Ls390_handle_mcck)
+ l %r1,BASED(.Lhandle_mcck)
la %r14,BASED(sysc_return)
br %r1 # TIF bit will be cleared by handler
@@ -305,23 +259,24 @@ sysc_mcck_pending:
#
sysc_sigpending:
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r12),_TIF_SYSCALL
- bno BASED(sysc_return)
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
- xr %r7,%r7 # svc 0 returns -ENOSYS
- clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2)
- bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0
- icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number
- b BASED(sysc_nr_ok) # restart svc
+ jno sysc_return
+ lm %r2,%r7,__PT_R2(%r11) # load svc arguments
+ xr %r8,%r8 # svc 0 returns -ENOSYS
+ clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+ jnl sysc_nr_ok # invalid svc number -> do svc 0
+ lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
+ sla %r8,2
+ j sysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
sysc_notify_resume:
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lr %r2,%r11 # pass pointer to pt_regs
l %r1,BASED(.Ldo_notify_resume)
la %r14,BASED(sysc_return)
br %r1 # call do_notify_resume
@@ -331,56 +286,57 @@ sysc_notify_resume:
#
sysc_singlestep:
ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
- la %r2,SP_PTREGS(%r15) # address of register-save area
- l %r1,BASED(.Lhandle_per) # load adr. of per handler
- la %r14,BASED(sysc_return) # load adr. of system return
- br %r1 # branch to do_per_trap
+ lr %r2,%r11 # pass pointer to pt_regs
+ l %r1,BASED(.Ldo_per_trap)
+ la %r14,BASED(sysc_return)
+ br %r1 # call do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
sysc_tracesys:
- l %r1,BASED(.Ltrace_entry)
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Ltrace_enter)
+ lr %r2,%r11 # pass pointer to pt_regs
la %r3,0
xr %r0,%r0
- icm %r0,3,SP_SVC_CODE(%r15)
- st %r0,SP_R2(%r15)
- basr %r14,%r1
+ icm %r0,3,__PT_INT_CODE+2(%r11)
+ st %r0,__PT_R2(%r11)
+ basr %r14,%r1 # call do_syscall_trace_enter
cl %r2,BASED(.Lnr_syscalls)
- bnl BASED(sysc_tracenogo)
- lr %r7,%r2
- sll %r7,2 # svc number *4
- l %r8,0(%r7,%r10)
+ jnl sysc_tracenogo
+ lr %r8,%r2
+ sll %r8,2
+ l %r9,0(%r8,%r10)
sysc_tracego:
- lm %r3,%r6,SP_R3(%r15)
- mvc SP_ARGS(4,%r15),SP_R7(%r15)
- l %r2,SP_ORIG_R2(%r15)
- basr %r14,%r8 # call sys_xxx
- st %r2,SP_R2(%r15) # store return value
+ lm %r3,%r7,__PT_R3(%r11)
+ st %r7,STACK_FRAME_OVERHEAD(%r15)
+ l %r2,__PT_ORIG_GPR2(%r11)
+ basr %r14,%r9 # call sys_xxx
+ st %r2,__PT_R2(%r11) # store return value
sysc_tracenogo:
tm __TI_flags+2(%r12),_TIF_TRACE >> 8
- bz BASED(sysc_return)
+ jz sysc_return
l %r1,BASED(.Ltrace_exit)
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lr %r2,%r11 # pass pointer to pt_regs
la %r14,BASED(sysc_return)
- br %r1
+ br %r1 # call do_syscall_trace_exit
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ l %r12,__LC_THREAD_INFO
l %r13,__LC_SVC_NEW_PSW+4
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
- bo BASED(0f)
- st %r15,SP_R15(%r15) # store stack pointer for new kthread
-0: l %r1,BASED(.Lschedtail)
- basr %r14,%r1
+ tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
+ jo 0f
+ st %r15,__PT_R15(%r11) # store stack pointer for new kthread
+0: l %r1,BASED(.Lschedule_tail)
+ basr %r14,%r1 # call schedule_tail
TRACE_IRQS_ON
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- b BASED(sysc_tracenogo)
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ j sysc_tracenogo
#
# kernel_execve function needs to deal with pt_regs that is not
@@ -390,153 +346,98 @@ ENTRY(kernel_execve)
stm %r12,%r15,48(%r15)
lr %r14,%r15
l %r13,__LC_SVC_NEW_PSW+4
- s %r15,BASED(.Lc_spsize)
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
st %r14,__SF_BACKCHAIN(%r15)
- la %r12,SP_PTREGS(%r15)
+ la %r12,STACK_FRAME_OVERHEAD(%r15)
xc 0(__PT_SIZE,%r12),0(%r12)
l %r1,BASED(.Ldo_execve)
lr %r5,%r12
- basr %r14,%r1
+ basr %r14,%r1 # call do_execve
ltr %r2,%r2
- be BASED(0f)
- a %r15,BASED(.Lc_spsize)
+ je 0f
+ ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
lm %r12,%r15,48(%r15)
br %r14
# execve succeeded.
-0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
l %r15,__LC_KERNEL_STACK # load ksp
- s %r15,BASED(.Lc_spsize) # make room for registers & psw
- mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs
l %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
l %r1,BASED(.Lexecve_tail)
- basr %r14,%r1
- b BASED(sysc_return)
+ basr %r14,%r1 # call execve_tail
+ j sysc_return
/*
* Program check handler routine
*/
ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
stpt __LC_SYNC_ENTER_TIMER
- SAVE_ALL_BASE __LC_SAVE_AREA
- tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- bnz BASED(pgm_per) # got per exception -> special case
- SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- bz BASED(pgm_no_vtime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime:
- l %r3,__LC_PGM_ILC # load program interruption code
- l %r4,__LC_TRANS_EXC_CODE
- REENABLE_IRQS
- la %r8,0x7f
- nr %r8,%r3
- sll %r8,2
- l %r1,BASED(.Ljump_table)
- l %r1,0(%r8,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
- basr %r14,%r1 # branch to interrupt-handler
-pgm_exit:
- b BASED(sysc_return)
-
-#
-# handle per exception
-#
-pgm_per:
- tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- bnz BASED(pgm_per_std) # ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
- clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
- be BASED(pgm_svcper)
-# no interesting special case, ignore PER event
- lm %r12,%r15,__LC_SAVE_AREA
- lpsw 0x28
-
-#
-# Normal per exception
-#
-pgm_per_std:
- SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- bz BASED(pgm_no_vtime2)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-pgm_no_vtime2:
+ stm %r8,%r15,__LC_SAVE_AREA_SYNC
+ l %r12,__LC_THREAD_INFO
+ l %r13,__LC_SVC_NEW_PSW+4
+ lm %r8,%r9,__LC_PGM_OLD_PSW
+ tmh %r8,0x0001 # test problem state bit
+ jnz 1f # -> fault in user space
+ tmh %r8,0x4000 # PER bit set in old PSW ?
+ jnz 0f # -> enabled, can't be a double fault
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jnz pgm_svcper # -> single stepped svc
+0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ j 2f
+1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ l %r15,__LC_KERNEL_STACK
+2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
+ stm %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
+ mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jz 0f
l %r1,__TI_task(%r12)
- tm SP_PSW+1(%r15),0x01 # kernel per event ?
- bz BASED(kernel_per)
- mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+ tmh %r8,0x0001 # kernel per event ?
+ jz pgm_kprobe
+ oi __TI_flags+3(%r12),_TIF_PER_TRAP
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
- oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
- l %r3,__LC_PGM_ILC # load program interruption code
- l %r4,__LC_TRANS_EXC_CODE
- REENABLE_IRQS
- la %r8,0x7f
- nr %r8,%r3 # clear per-event-bit and ilc
- be BASED(pgm_exit2) # only per or per+check ?
- sll %r8,2
+0: REENABLE_IRQS
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r1,BASED(.Ljump_table)
- l %r1,0(%r8,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
+ la %r10,0x7f
+ n %r10,__PT_INT_CODE(%r11)
+ je sysc_return
+ sll %r10,2
+ l %r1,0(%r10,%r1) # load address of handler routine
+ lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
-pgm_exit2:
- b BASED(sysc_return)
+ j sysc_return
#
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
#
-pgm_svcper:
- SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- l %r8,__TI_task(%r12)
- mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
- mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
- mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
- b BASED(sysc_do_svc)
+pgm_kprobe:
+ REENABLE_IRQS
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ l %r1,BASED(.Ldo_per_trap)
+ lr %r2,%r11 # pass pointer to pt_regs
+ basr %r14,%r1 # call do_per_trap
+ j sysc_return
#
-# per was called from kernel, must be kprobes
+# single stepped system call
#
-kernel_per:
- REENABLE_IRQS
- la %r2,SP_PTREGS(%r15) # address of register-save area
- l %r1,BASED(.Lhandle_per) # load adr. of per handler
- basr %r14,%r1 # branch to do_single_step
- b BASED(pgm_exit)
+pgm_svcper:
+ oi __TI_flags+3(%r12),_TIF_PER_TRAP
+ mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
+ mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
+ lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
/*
* IO interrupt handler routine
@@ -545,28 +446,35 @@ kernel_per:
ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
- CREATE_STACK_FRAME __LC_SAVE_AREA+16
- mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- bz BASED(io_no_vtime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-io_no_vtime:
+ stm %r8,%r15,__LC_SAVE_AREA_ASYNC
+ l %r12,__LC_THREAD_INFO
+ l %r13,__LC_SVC_NEW_PSW+4
+ lm %r8,%r9,__LC_IO_OLD_PSW
+ tmh %r8,0x0001 # interrupting from user ?
+ jz io_skip
+ UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+io_skip:
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+ stm %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF
- l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
- la %r2,SP_PTREGS(%r15) # address of register-save area
- basr %r14,%r1 # branch to standard irq handler
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ l %r1,BASED(.Ldo_IRQ)
+ lr %r2,%r11 # pass pointer to pt_regs
+ basr %r14,%r1 # call do_IRQ
io_return:
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
io_tif:
tm __TI_flags+3(%r12),_TIF_WORK_INT
- bnz BASED(io_work) # there is work to do (signals etc.)
+ jnz io_work # there is work to do (signals etc.)
io_restore:
- RESTORE_ALL __LC_RETURN_PSW,0
+ mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ ni __LC_RETURN_PSW+1,0xfd # clean wait state bit
+ stpt __LC_EXIT_TIMER
+ lm %r0,%r15,__PT_R0(%r11)
+ lpsw __LC_RETURN_PSW
io_done:
#
@@ -577,28 +485,29 @@ io_done:
# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- bo BASED(io_work_user) # yes -> do resched & signal
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jo io_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- bnz BASED(io_restore) # preemption disabled
+ jnz io_restore # preemption disabled
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- bno BASED(io_restore)
+ jno io_restore
# switch to kernel stack
- l %r1,SP_R15(%r15)
- s %r1,BASED(.Lc_spsize)
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ l %r1,__PT_R15(%r11)
+ ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
lr %r15,%r1
# TRACE_IRQS_ON already done at io_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
- l %r1,BASED(.Lpreempt_schedule_irq)
+ l %r1,BASED(.Lpreempt_irq)
basr %r14,%r1 # call preempt_schedule_irq
- b BASED(io_return)
+ j io_return
#else
- b BASED(io_restore)
+ j io_restore
#endif
#
@@ -606,9 +515,10 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
- s %r1,BASED(.Lc_spsize)
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
lr %r15,%r1
#
@@ -618,24 +528,24 @@ io_work_user:
#
io_work_tif:
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
- bo BASED(io_mcck_pending)
+ jo io_mcck_pending
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
- bo BASED(io_reschedule)
+ jo io_reschedule
tm __TI_flags+3(%r12),_TIF_SIGPENDING
- bo BASED(io_sigpending)
+ jo io_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
- bo BASED(io_notify_resume)
- b BASED(io_return) # beware of critical section cleanup
+ jo io_notify_resume
+ j io_return # beware of critical section cleanup
#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
# TRACE_IRQS_ON already done at io_return
- l %r1,BASED(.Ls390_handle_mcck)
+ l %r1,BASED(.Lhandle_mcck)
basr %r14,%r1 # TIF bit will be cleared by handler
TRACE_IRQS_OFF
- b BASED(io_return)
+ j io_return
#
# _TIF_NEED_RESCHED is set, call schedule
@@ -643,37 +553,37 @@ io_mcck_pending:
io_reschedule:
# TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Lschedule)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
basr %r14,%r1 # call scheduler
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_return)
+ j io_return
#
# _TIF_SIGPENDING is set, call do_signal
#
io_sigpending:
# TRACE_IRQS_ON already done at io_return
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal)
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ lr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # call do_signal
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_return)
+ j io_return
#
# _TIF_SIGPENDING is set, call do_signal
#
io_notify_resume:
# TRACE_IRQS_ON already done at io_return
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume)
- basr %r14,%r1 # call do_signal
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ lr %r2,%r11 # pass pointer to pt_regs
+ basr %r14,%r1 # call do_notify_resume
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_return)
+ j io_return
/*
* External interrupt handler routine
@@ -682,23 +592,25 @@ io_notify_resume:
ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
- CREATE_STACK_FRAME __LC_SAVE_AREA+16
- mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- bz BASED(ext_no_vtime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
-ext_no_vtime:
+ stm %r8,%r15,__LC_SAVE_AREA_ASYNC
+ l %r12,__LC_THREAD_INFO
+ l %r13,__LC_SVC_NEW_PSW+4
+ lm %r8,%r9,__LC_EXT_OLD_PSW
+ tmh %r8,0x0001 # interrupting from user ?
+ jz ext_skip
+ UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ext_skip:
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
+ stm %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF
- la %r2,SP_PTREGS(%r15) # address of register-save area
+ lr %r2,%r11 # pass pointer to pt_regs
l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
l %r4,__LC_EXT_PARAMS # get external parameters
l %r1,BASED(.Ldo_extint)
- basr %r14,%r1
- b BASED(io_return)
+ basr %r14,%r1 # call do_extint
+ j io_return
__critical_end:
@@ -710,82 +622,74 @@ ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
- SAVE_ALL_BASE __LC_SAVE_AREA+32
- la %r12,__LC_MCK_OLD_PSW
+ l %r12,__LC_THREAD_INFO
+ l %r13,__LC_SVC_NEW_PSW+4
+ lm %r8,%r9,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
- bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
- mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
+ jo mcck_panic # yes -> rest of mcck code invalid
+ la %r14,__LC_CPU_TIMER_SAVE_AREA
+ mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- bo BASED(1f)
+ jo 3f
la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
- bl BASED(0f)
+ jl 0f
la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER
- bl BASED(0f)
+ jl 1f
la %r14,__LC_EXIT_TIMER
-0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
- bl BASED(0f)
+1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
+ jl 2f
la %r14,__LC_LAST_UPDATE_TIMER
-0: spt 0(%r14)
+2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- bno BASED(mcck_int_main) # no -> skip cleanup critical
- tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
- bnz BASED(mcck_int_main) # from user -> load async stack
- clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
- bhe BASED(mcck_int_main)
- clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
- bl BASED(mcck_int_main)
- l %r14,BASED(.Lcleanup_critical)
- basr %r14,%r14
-mcck_int_main:
- l %r14,__LC_PANIC_STACK # are we already on the panic stack?
- slr %r14,%r15
- sra %r14,PAGE_SHIFT
- be BASED(0f)
- l %r15,__LC_PANIC_STACK # load panic stack
-0: s %r15,BASED(.Lc_spsize) # make room for registers & psw
- CREATE_STACK_FRAME __LC_SAVE_AREA+32
- mvc SP_PSW(8,%r15),0(%r12)
- l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
- bno BASED(mcck_no_vtime) # no -> skip cleanup critical
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- bz BASED(mcck_no_vtime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
-mcck_no_vtime:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Ls390_mcck)
- basr %r14,%r1 # call machine check handler
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- bno BASED(mcck_return)
+3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+ jno mcck_panic # no -> skip cleanup critical
+ tm %r8,0x0001 # interrupting from user ?
+ jz mcck_skip
+ UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+mcck_skip:
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
+ mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+ stm %r8,%r9,__PT_PSW(%r11)
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ l %r1,BASED(.Ldo_machine_check)
+ lr %r2,%r11 # pass pointer to pt_regs
+ basr %r14,%r1 # call s390_do_machine_check
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
- s %r1,BASED(.Lc_spsize)
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
lr %r15,%r1
- stosm __SF_EMPTY(%r15),0x04 # turn dat on
+ ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
- bno BASED(mcck_return)
+ jno mcck_return
TRACE_IRQS_OFF
- l %r1,BASED(.Ls390_handle_mcck)
- basr %r14,%r1 # call machine check handler
+ l %r1,BASED(.Lhandle_mcck)
+ basr %r14,%r1 # call s390_handle_mcck
TRACE_IRQS_ON
mcck_return:
- mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
+ mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
- bno BASED(0f)
- lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
+ jno 0f
+ lm %r0,%r15,__PT_R0(%r11)
stpt __LC_EXIT_TIMER
- lpsw __LC_RETURN_MCCK_PSW # back to caller
-0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
- lpsw __LC_RETURN_MCCK_PSW # back to caller
+ lpsw __LC_RETURN_MCCK_PSW
+0: lm %r0,%r15,__PT_R0(%r11)
+ lpsw __LC_RETURN_MCCK_PSW
- RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+mcck_panic:
+ l %r14,__LC_PANIC_STACK
+ slr %r14,%r15
+ sra %r14,PAGE_SHIFT
+ jz 0f
+ l %r15,__LC_PANIC_STACK
+0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ j mcck_skip
/*
* Restart interruption handler, kick starter for additional CPUs
@@ -799,18 +703,18 @@ restart_base:
stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
- l %r15,__LC_SAVE_AREA+60 # load ksp
+ l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
lam %a0,%a15,__LC_AREGS_SAVE_AREA
- lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+ lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone
l %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
- stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
+ ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
basr %r14,0
l %r14,restart_addr-.(%r14)
- basr %r14,%r14 # branch to start_secondary
+ basr %r14,%r14 # call start_secondary
restart_addr:
.long start_secondary
.align 8
@@ -835,19 +739,19 @@ restart_go:
# PSW restart interrupt handler
#
ENTRY(psw_restart_int_handler)
- st %r15,__LC_SAVE_AREA+48(%r0) # save r15
+ st %r15,__LC_SAVE_AREA_RESTART
basr %r15,0
0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack
l %r15,0(%r15)
- ahi %r15,-SP_SIZE # make room for pt_regs
- stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
- mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack
- mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+ ahi %r15,-__PT_SIZE # create pt_regs on stack
+ stm %r0,%r14,__PT_R0(%r15)
+ mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
+ mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
+ ahi %r15,-STACK_FRAME_OVERHEAD
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
basr %r14,0
1: l %r14,.Ldo_restart-1b(%r14)
basr %r14,%r14
-
basr %r14,0 # load disabled wait PSW if
2: lpsw restart_psw_crash-2b(%r14) # do_restart returns
.align 4
@@ -869,215 +773,174 @@ restart_psw_crash:
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
- sl %r15,BASED(.Lc_spsize)
- mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
- stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- la %r1,__LC_SAVE_AREA
- ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
- be BASED(0f)
- ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
- be BASED(0f)
- la %r1,__LC_SAVE_AREA+16
-0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
- l %r1,BASED(1f) # branch to kernel_stack_overflow
- la %r2,SP_PTREGS(%r15) # load pt_regs
- br %r1
+ ahi %r15,-__PT_SIZE # create pt_regs
+ stm %r0,%r7,__PT_R0(%r15)
+ stm %r8,%r9,__PT_PSW(%r15)
+ mvc __PT_R8(32,%r11),0(%r14)
+ lr %r15,%r11
+ ahi %r15,-STACK_FRAME_OVERHEAD
+ l %r1,BASED(1f)
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ lr %r2,%r11 # pass pointer to pt_regs
+ br %r1 # branch to kernel_stack_overflow
1: .long kernel_stack_overflow
#endif
-cleanup_table_system_call:
- .long system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_tif:
- .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
-cleanup_table_sysc_restore:
- .long sysc_restore + 0x80000000, sysc_done + 0x80000000
-cleanup_table_io_tif:
- .long io_tif + 0x80000000, io_restore + 0x80000000
-cleanup_table_io_restore:
- .long io_restore + 0x80000000, io_done + 0x80000000
+cleanup_table:
+ .long system_call + 0x80000000
+ .long sysc_do_svc + 0x80000000
+ .long sysc_tif + 0x80000000
+ .long sysc_restore + 0x80000000
+ .long sysc_done + 0x80000000
+ .long io_tif + 0x80000000
+ .long io_restore + 0x80000000
+ .long io_done + 0x80000000
cleanup_critical:
- clc 4(4,%r12),BASED(cleanup_table_system_call)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_system_call+4)
- bl BASED(cleanup_system_call)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
- bl BASED(cleanup_sysc_tif)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
- bl BASED(cleanup_sysc_restore)
-0:
- clc 4(4,%r12),BASED(cleanup_table_io_tif)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
- bl BASED(cleanup_io_tif)
-0:
- clc 4(4,%r12),BASED(cleanup_table_io_restore)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
- bl BASED(cleanup_io_restore)
-0:
- br %r14
+ cl %r9,BASED(cleanup_table) # system_call
+ jl 0f
+ cl %r9,BASED(cleanup_table+4) # sysc_do_svc
+ jl cleanup_system_call
+ cl %r9,BASED(cleanup_table+8) # sysc_tif
+ jl 0f
+ cl %r9,BASED(cleanup_table+12) # sysc_restore
+ jl cleanup_sysc_tif
+ cl %r9,BASED(cleanup_table+16) # sysc_done
+ jl cleanup_sysc_restore
+ cl %r9,BASED(cleanup_table+20) # io_tif
+ jl 0f
+ cl %r9,BASED(cleanup_table+24) # io_restore
+ jl cleanup_io_tif
+ cl %r9,BASED(cleanup_table+28) # io_done
+ jl cleanup_io_restore
+0: br %r14
cleanup_system_call:
- mvc __LC_RETURN_PSW(8),0(%r12)
- clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
- bh BASED(0f)
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
- c %r12,BASED(.Lmck_old_psw)
- be BASED(0f)
+ # check if stpt has been executed
+ cl %r9,BASED(cleanup_system_call_insn)
+ jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0: c %r12,BASED(.Lmck_old_psw)
- la %r12,__LC_SAVE_AREA+32
- be BASED(0f)
- la %r12,__LC_SAVE_AREA+16
-0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
- bhe BASED(cleanup_vtime)
- clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
- bh BASED(0f)
- mvc __LC_SAVE_AREA(16),0(%r12)
-0: st %r13,4(%r12)
- l %r15,__LC_KERNEL_STACK # problem state -> load ksp
- s %r15,BASED(.Lc_spsize) # make room for registers & psw
- st %r15,12(%r12)
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc 0(4,%r12),__LC_THREAD_INFO
- l %r12,__LC_THREAD_INFO
- mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+3(%r12),_TIF_SYSCALL
-cleanup_vtime:
- clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
- bhe BASED(cleanup_stime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
- clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
- bh BASED(cleanup_update)
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+ chi %r11,__LC_SAVE_AREA_ASYNC
+ je 0f
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: # check if stm has been executed
+ cl %r9,BASED(cleanup_system_call_insn+4)
+ jh 0f
+ mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
+0: # set up saved registers r12, and r13
+ st %r12,16(%r11) # r12 thread-info pointer
+ st %r13,20(%r11) # r13 literal-pool pointer
+ # check if the user time calculation has been done
+ cl %r9,BASED(cleanup_system_call_insn+8)
+ jh 0f
+ l %r10,__LC_EXIT_TIMER
+ l %r15,__LC_EXIT_TIMER+4
+ SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
+ ADD64 %r10,%r15,__LC_USER_TIMER
+ st %r10,__LC_USER_TIMER
+ st %r15,__LC_USER_TIMER+4
+0: # check if the system time calculation has been done
+ cl %r9,BASED(cleanup_system_call_insn+12)
+ jh 0f
+ l %r10,__LC_LAST_UPDATE_TIMER
+ l %r15,__LC_LAST_UPDATE_TIMER+4
+ SUB64 %r10,%r15,__LC_EXIT_TIMER
+ ADD64 %r10,%r15,__LC_SYSTEM_TIMER
+ st %r10,__LC_SYSTEM_TIMER
+ st %r15,__LC_SYSTEM_TIMER+4
+0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
- la %r12,__LC_RETURN_PSW
+ # set up saved register 11
+ l %r15,__LC_KERNEL_STACK
+ ahi %r15,-__PT_SIZE
+ st %r15,12(%r11) # r11 pt_regs pointer
+ # fill pt_regs
+ mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
+ stm %r0,%r7,__PT_R0(%r15)
+ mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ # setup saved register 15
+ ahi %r15,-STACK_FRAME_OVERHEAD
+ st %r15,28(%r11) # r15 stack pointer
+ # set new psw address and exit
+ l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
br %r14
cleanup_system_call_insn:
- .long sysc_saveall + 0x80000000
.long system_call + 0x80000000
- .long sysc_vtime + 0x80000000
- .long sysc_stime + 0x80000000
- .long sysc_update + 0x80000000
+ .long sysc_stm + 0x80000000
+ .long sysc_vtime + 0x80000000 + 36
+ .long sysc_vtime + 0x80000000 + 76
cleanup_sysc_tif:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
- la %r12,__LC_RETURN_PSW
+ l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000
br %r14
cleanup_sysc_restore:
- clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
- be BASED(2f)
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
- c %r12,BASED(.Lmck_old_psw)
- be BASED(0f)
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
- be BASED(2f)
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
- c %r12,BASED(.Lmck_old_psw)
- la %r12,__LC_SAVE_AREA+32
- be BASED(1f)
- la %r12,__LC_SAVE_AREA+16
-1: mvc 0(16,%r12),SP_R12(%r15)
- lm %r0,%r11,SP_R0(%r15)
- l %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+ cl %r9,BASED(cleanup_sysc_restore_insn)
+ jhe 0f
+ l %r9,12(%r11) # get saved pointer to pt_regs
+ mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
+ mvc 0(32,%r11),__PT_R8(%r9)
+ lm %r0,%r7,__PT_R0(%r9)
+0: lm %r8,%r9,__LC_RETURN_PSW
br %r14
cleanup_sysc_restore_insn:
.long sysc_done - 4 + 0x80000000
- .long sysc_done - 8 + 0x80000000
cleanup_io_tif:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
- la %r12,__LC_RETURN_PSW
+ l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000
br %r14
cleanup_io_restore:
- clc 4(4,%r12),BASED(cleanup_io_restore_insn)
- be BASED(1f)
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
- clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
- be BASED(1f)
- mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
- lm %r0,%r11,SP_R0(%r15)
- l %r15,SP_R15(%r15)
-1: la %r12,__LC_RETURN_PSW
+ cl %r9,BASED(cleanup_io_restore_insn)
+ jhe 0f
+ l %r9,12(%r11) # get saved r11 pointer to pt_regs
+ mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ mvc 0(32,%r11),__PT_R8(%r9)
+ lm %r0,%r7,__PT_R0(%r9)
+0: lm %r8,%r9,__LC_RETURN_PSW
br %r14
cleanup_io_restore_insn:
.long io_done - 4 + 0x80000000
- .long io_done - 8 + 0x80000000
/*
* Integer constants
*/
- .align 4
-.Lc_spsize: .long SP_SIZE
-.Lc_overhead: .long STACK_FRAME_OVERHEAD
-.Lnr_syscalls: .long NR_syscalls
-.L0x018: .short 0x018
-.L0x020: .short 0x020
-.L0x028: .short 0x028
-.L0x030: .short 0x030
-.L0x038: .short 0x038
-.Lc_1: .long 1
+ .align 4
+.Lnr_syscalls: .long NR_syscalls
/*
* Symbol constants
*/
-.Ls390_mcck: .long s390_do_machine_check
-.Ls390_handle_mcck:
- .long s390_handle_mcck
-.Lmck_old_psw: .long __LC_MCK_OLD_PSW
-.Ldo_IRQ: .long do_IRQ
-.Ldo_extint: .long do_extint
-.Ldo_signal: .long do_signal
-.Ldo_notify_resume:
- .long do_notify_resume
-.Lhandle_per: .long do_per_trap
-.Ldo_execve: .long do_execve
-.Lexecve_tail: .long execve_tail
-.Ljump_table: .long pgm_check_table
-.Lschedule: .long schedule
+.Ldo_machine_check: .long s390_do_machine_check
+.Lhandle_mcck: .long s390_handle_mcck
+.Ldo_IRQ: .long do_IRQ
+.Ldo_extint: .long do_extint
+.Ldo_signal: .long do_signal
+.Ldo_notify_resume: .long do_notify_resume
+.Ldo_per_trap: .long do_per_trap
+.Ldo_execve: .long do_execve
+.Lexecve_tail: .long execve_tail
+.Ljump_table: .long pgm_check_table
+.Lschedule: .long schedule
#ifdef CONFIG_PREEMPT
-.Lpreempt_schedule_irq:
- .long preempt_schedule_irq
+.Lpreempt_irq: .long preempt_schedule_irq
#endif
-.Ltrace_entry: .long do_syscall_trace_enter
-.Ltrace_exit: .long do_syscall_trace_exit
-.Lschedtail: .long schedule_tail
-.Lsysc_table: .long sys_call_table
+.Ltrace_enter: .long do_syscall_trace_enter
+.Ltrace_exit: .long do_syscall_trace_exit
+.Lschedule_tail: .long schedule_tail
+.Lsys_call_table: .long sys_call_table
+.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
-.Ltrace_irq_on_caller:
- .long trace_hardirqs_on_caller
-.Ltrace_irq_off_caller:
- .long trace_hardirqs_off_caller
+.Lhardirqs_on: .long trace_hardirqs_on_caller
+.Lhardirqs_off: .long trace_hardirqs_off_caller
#endif
#ifdef CONFIG_LOCKDEP
-.Llockdep_sys_exit:
- .long lockdep_sys_exit
+.Llockdep_sys_exit: .long lockdep_sys_exit
#endif
-.Lcritical_start:
- .long __critical_start + 0x80000000
-.Lcritical_end:
- .long __critical_end + 0x80000000
-.Lcleanup_critical:
- .long cleanup_critical
+.Lcritical_start: .long __critical_start + 0x80000000
+.Lcritical_length: .long __critical_end - __critical_start
.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index ef8fb1d6e8d..bf538aaf407 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -6,15 +6,15 @@
#include <asm/ptrace.h>
-extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
+extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
-void do_protection_exception(struct pt_regs *, long, unsigned long);
-void do_dat_exception(struct pt_regs *, long, unsigned long);
-void do_asce_exception(struct pt_regs *, long, unsigned long);
+void do_protection_exception(struct pt_regs *regs);
+void do_dat_exception(struct pt_regs *regs);
+void do_asce_exception(struct pt_regs *regs);
void do_per_trap(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
@@ -28,7 +28,7 @@ void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
void do_restart(void);
int __cpuinit start_secondary(void *cpuvoid);
void __init startup_init(void);
-void die(const char * str, struct pt_regs * regs, long err);
+void die(struct pt_regs *regs, const char *str);
void __init time_init(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 83a93747e2f..412a7b8783d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -19,32 +19,22 @@
#include <asm/unistd.h>
#include <asm/page.h>
-/*
- * Stack layout for the system_call stack entry.
- * The first few entries are identical to the user_regs_struct.
- */
-SP_PTREGS = STACK_FRAME_OVERHEAD
-SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
-SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
-SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
-SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
-SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
-SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
-SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
-SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
-SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE
-SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+__PT_R0 = __PT_GPRS
+__PT_R1 = __PT_GPRS + 8
+__PT_R2 = __PT_GPRS + 16
+__PT_R3 = __PT_GPRS + 24
+__PT_R4 = __PT_GPRS + 32
+__PT_R5 = __PT_GPRS + 40
+__PT_R6 = __PT_GPRS + 48
+__PT_R7 = __PT_GPRS + 56
+__PT_R8 = __PT_GPRS + 64
+__PT_R9 = __PT_GPRS + 72
+__PT_R10 = __PT_GPRS + 80
+__PT_R11 = __PT_GPRS + 88
+__PT_R12 = __PT_GPRS + 96
+__PT_R13 = __PT_GPRS + 104
+__PT_R14 = __PT_GPRS + 112
+__PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
@@ -59,154 +49,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#define BASED(name) name-system_call(%r13)
- .macro SPP newpp
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
- jz .+8
- .insn s,0xb2800000,\newpp
-#endif
- .endm
-
- .macro HANDLE_SIE_INTERCEPT
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- tm __TI_flags+6(%r12),_TIF_SIE>>8
- jz 0f
- SPP __LC_CMF_HPP # set host id
- clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
- jl 0f
- clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
- jhe 0f
- mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
-0:
-#endif
- .endm
-
-#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
brasl %r14,trace_hardirqs_on_caller
+#endif
.endm
.macro TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
brasl %r14,trace_hardirqs_off_caller
- .endm
-#else
-#define TRACE_IRQS_ON
-#define TRACE_IRQS_OFF
#endif
+ .endm
-#ifdef CONFIG_LOCKDEP
.macro LOCKDEP_SYS_EXIT
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- jz 0f
+#ifdef CONFIG_LOCKDEP
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jz .+10
brasl %r14,lockdep_sys_exit
-0:
- .endm
-#else
-#define LOCKDEP_SYS_EXIT
#endif
-
- .macro UPDATE_VTIME lc_from,lc_to,lc_sum
- lg %r10,\lc_from
- slg %r10,\lc_to
- alg %r10,\lc_sum
- stg %r10,\lc_sum
.endm
-/*
- * Register usage in interrupt handlers:
- * R9 - pointer to current task structure
- * R13 - pointer to literal pool
- * R14 - return register for function calls
- * R15 - kernel stack pointer
- */
+ .macro SPP newpp
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
- .macro SAVE_ALL_SVC psworg,savearea
- stmg %r11,%r15,\savearea
- lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
- aghi %r15,-SP_SIZE # make room for registers & psw
- lg %r11,__LC_LAST_BREAK
+ .macro HANDLE_SIE_INTERCEPT scratch
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ tm __TI_flags+6(%r12),_TIF_SIE>>8
+ jz .+42
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ jz .+8
+ .insn s,0xb2800000,BASED(.Lhost_id) # set host id
+ lgr \scratch,%r9
+ slg \scratch,BASED(.Lsie_loop)
+ clg \scratch,BASED(.Lsie_length)
+ jhe .+10
+ lg %r9,BASED(.Lsie_loop)
+#endif
.endm
- .macro SAVE_ALL_PGM psworg,savearea
- stmg %r11,%r15,\savearea
- tm \psworg+1,0x01 # test problem state bit
+ .macro CHECK_STACK stacksize,savearea
#ifdef CONFIG_CHECK_STACK
- jnz 1f
- tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- jnz 2f
- la %r12,\psworg
- j stack_overflow
-#else
- jz 2f
+ tml %r15,\stacksize - CONFIG_STACK_GUARD
+ lghi %r14,\savearea
+ jz stack_overflow
#endif
-1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
-2: aghi %r15,-SP_SIZE # make room for registers & psw
- larl %r13,system_call
- lg %r11,__LC_LAST_BREAK
.endm
- .macro SAVE_ALL_ASYNC psworg,savearea
- stmg %r11,%r15,\savearea
- larl %r13,system_call
- lg %r11,__LC_LAST_BREAK
- la %r12,\psworg
- tm \psworg+1,0x01 # test problem state bit
- jnz 1f # from user -> load kernel stack
- clc \psworg+8(8),BASED(.Lcritical_end)
+ .macro SWITCH_ASYNC savearea,stack,shift
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz 1f
+ lgr %r14,%r9
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
jhe 0f
- clc \psworg+8(8),BASED(.Lcritical_start)
- jl 0f
+ lghi %r11,\savearea # inside critical section, do cleanup
brasl %r14,cleanup_critical
- tm 1(%r12),0x01 # retest problem state after cleanup
+ tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
-0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
+0: lg %r14,\stack # are we already on the target stack?
slgr %r14,%r15
- srag %r14,%r14,STACK_SHIFT
-#ifdef CONFIG_CHECK_STACK
+ srag %r14,%r14,\shift
jnz 1f
- tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- jnz 2f
- j stack_overflow
-#else
- jz 2f
-#endif
-1: lg %r15,__LC_ASYNC_STACK # load async stack
-2: aghi %r15,-SP_SIZE # make room for registers & psw
- .endm
-
- .macro CREATE_STACK_FRAME savearea
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
- stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
+ CHECK_STACK 1<<\shift,\savearea
+ j 2f
+1: lg %r15,\stack # load target stack
+2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
- .macro RESTORE_ALL psworg,sync
- mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
- .if !\sync
- ni \psworg+1,0xfd # clear wait state bit
- .endif
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
- lpswe \psworg # back to caller
+ .macro UPDATE_VTIME scratch,enter_timer
+ lg \scratch,__LC_EXIT_TIMER
+ slg \scratch,\enter_timer
+ alg \scratch,__LC_USER_TIMER
+ stg \scratch,__LC_USER_TIMER
+ lg \scratch,__LC_LAST_UPDATE_TIMER
+ slg \scratch,__LC_EXIT_TIMER
+ alg \scratch,__LC_SYSTEM_TIMER
+ stg \scratch,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro LAST_BREAK
- srag %r10,%r11,23
- jz 0f
- stg %r11,__TI_last_break(%r12)
-0:
+ .macro LAST_BREAK scratch
+ srag \scratch,%r10,23
+ jz .+10
+ stg %r10,__TI_last_break(%r12)
.endm
.macro REENABLE_IRQS
- mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
- ni __SF_EMPTY(%r15),0xbf
- ssm __SF_EMPTY(%r15)
+ stg %r8,__LC_RETURN_PSW
+ ni __LC_RETURN_PSW,0xbf
+ ssm __LC_RETURN_PSW
.endm
.section .kprobes.text, "ax"
@@ -245,55 +184,66 @@ __critical_start:
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-sysc_saveall:
- SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+7(%r12),_TIF_SYSCALL
+sysc_stmg:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+sysc_per:
+ lg %r15,__LC_KERNEL_STACK
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-sysc_stime:
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-sysc_update:
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- LAST_BREAK
+ UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r13
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
- llgh %r7,SP_SVC_CODE+2(%r15)
- slag %r7,%r7,2 # shift and test for svc 0
+ oi __TI_flags+7(%r12),_TIF_SYSCALL
+ llgh %r8,__PT_INT_CODE+2(%r11)
+ slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
- llgfr %r1,%r1 # clear high word in r1
+ llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls
jnl sysc_nr_ok
- sth %r1,SP_SVC_CODE+2(%r15)
- slag %r7,%r1,2 # shift and test for svc 0
+ sth %r1,__PT_INT_CODE+2(%r11)
+ slag %r8,%r1,2
sysc_nr_ok:
- larl %r10,sys_call_table
+ larl %r10,sys_call_table # 64 bit system call table
#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
+ tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
jno sysc_noemu
- larl %r10,sys_call_table_emu # use 31 bit emulation system calls
+ larl %r10,sys_call_table_emu # 31 bit system call table
sysc_noemu:
#endif
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stg %r2,__PT_ORIG_GPR2(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lgf %r9,0(%r8,%r10) # get system call add.
tm __TI_flags+6(%r12),_TIF_TRACE >> 8
- mvc SP_ARGS(8,%r15),SP_R7(%r15)
- lgf %r8,0(%r7,%r10) # load address of system call routine
jnz sysc_tracesys
- basr %r14,%r8 # call sys_xxxx
- stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ basr %r14,%r9 # call sys_xxxx
+ stg %r2,__PT_R2(%r11) # store return value
sysc_return:
LOCKDEP_SYS_EXIT
sysc_tif:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno sysc_restore
tm __TI_flags+7(%r12),_TIF_WORK_SVC
- jnz sysc_work # there is work to do (signals etc.)
+ jnz sysc_work # check for work
ni __TI_flags+7(%r12),255-_TIF_SYSCALL
sysc_restore:
- RESTORE_ALL __LC_RETURN_PSW,1
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
sysc_done:
#
@@ -317,7 +267,7 @@ sysc_work:
#
sysc_reschedule:
larl %r14,sysc_return
- jg schedule # return point is sysc_return
+ jg schedule
#
# _TIF_MCCK_PENDING is set, call handler
@@ -331,33 +281,33 @@ sysc_mcck_pending:
#
sysc_sigpending:
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
- la %r2,SP_PTREGS(%r15) # load pt_regs
- brasl %r14,do_signal # call do_signal
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
- lghi %r7,0 # svc 0 returns -ENOSYS
- lh %r1,SP_SVC_CODE+2(%r15) # load new svc number
+ lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lghi %r8,0 # svc 0 returns -ENOSYS
+ lh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
jnl sysc_nr_ok # invalid svc number -> do svc 0
- slag %r7,%r1,2
+ slag %r8,%r1,2
j sysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
sysc_notify_resume:
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,sysc_return
- jg do_notify_resume # call do_notify_resume
+ jg do_notify_resume
#
# _TIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP)
- la %r2,SP_PTREGS(%r15) # address of register-save area
- larl %r14,sysc_return # load adr. of system return
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,sysc_return
jg do_per_trap
#
@@ -365,41 +315,41 @@ sysc_singlestep:
# and after the system call
#
sysc_tracesys:
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0
- llgh %r0,SP_SVC_CODE+2(%r15)
- stg %r0,SP_R2(%r15)
+ llgh %r0,__PT_INT_CODE+2(%r11)
+ stg %r0,__PT_R2(%r11)
brasl %r14,do_syscall_trace_enter
lghi %r0,NR_syscalls
clgr %r0,%r2
jnh sysc_tracenogo
- sllg %r7,%r2,2 # svc number *4
- lgf %r8,0(%r7,%r10)
+ sllg %r8,%r2,2
+ lgf %r9,0(%r8,%r10)
sysc_tracego:
- lmg %r3,%r6,SP_R3(%r15)
- mvc SP_ARGS(8,%r15),SP_R7(%r15)
- lg %r2,SP_ORIG_R2(%r15)
- basr %r14,%r8 # call sys_xxx
- stg %r2,SP_R2(%r15) # store return value
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
+ basr %r14,%r9 # call sys_xxx
+ stg %r2,__PT_R2(%r11) # store return value
sysc_tracenogo:
tm __TI_flags+6(%r12),_TIF_TRACE >> 8
jz sysc_return
- la %r2,SP_PTREGS(%r15) # load pt_regs
- larl %r14,sysc_return # return point is sysc_return
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,sysc_return
jg do_syscall_trace_exit
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
- lg %r13,__LC_SVC_NEW_PSW+8
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ lg %r12,__LC_THREAD_INFO
+ tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jo 0f
- stg %r15,SP_R15(%r15) # store stack pointer for new kthread
+ stg %r15,__PT_R15(%r11) # store stack pointer for new kthread
0: brasl %r14,schedule_tail
TRACE_IRQS_ON
- stosm 24(%r15),0x03 # reenable interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
j sysc_tracenogo
#
@@ -409,26 +359,26 @@ ENTRY(ret_from_fork)
ENTRY(kernel_execve)
stmg %r12,%r15,96(%r15)
lgr %r14,%r15
- aghi %r15,-SP_SIZE
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
stg %r14,__SF_BACKCHAIN(%r15)
- la %r12,SP_PTREGS(%r15)
+ la %r12,STACK_FRAME_OVERHEAD(%r15)
xc 0(__PT_SIZE,%r12),0(%r12)
lgr %r5,%r12
brasl %r14,do_execve
ltgfr %r2,%r2
je 0f
- aghi %r15,SP_SIZE
+ aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE)
lmg %r12,%r15,96(%r15)
br %r14
# execve succeeded.
-0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
lg %r15,__LC_KERNEL_STACK # load ksp
- aghi %r15,-SP_SIZE # make room for registers & psw
- lg %r13,__LC_SVC_NEW_PSW+8
- mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs
lg %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,execve_tail
j sysc_return
@@ -437,127 +387,72 @@ ENTRY(kernel_execve)
*/
ENTRY(pgm_check_handler)
-/*
- * First we need to check for a special case:
- * Single stepping an instruction that disables the PER event mask will
- * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
- * For a single stepped SVC the program check handler gets control after
- * the SVC new PSW has been loaded. But we want to execute the SVC first and
- * then handle the PER event. Therefore we update the SVC old PSW to point
- * to the pgm_check_handler and branch to the SVC handler after we checked
- * if we have to load the kernel stack register.
- * For every other possible cause for PER event without the PER mask set
- * we just ignore the PER event (FIXME: is there anything we have to do
- * for LPSW?).
- */
stpt __LC_SYNC_ENTER_TIMER
- tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- jnz pgm_per # got per exception -> special case
- SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- HANDLE_SIE_INTERCEPT
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- jz pgm_no_vtime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- LAST_BREAK
-pgm_no_vtime:
- stg %r11,SP_ARGS(%r15)
- lgf %r3,__LC_PGM_ILC # load program interruption code
- lg %r4,__LC_TRANS_EXC_CODE
- REENABLE_IRQS
- lghi %r8,0x7f
- ngr %r8,%r3
- sll %r8,3
- larl %r1,pgm_check_table
- lg %r1,0(%r8,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
- basr %r14,%r1 # branch to interrupt-handler
-pgm_exit:
- j sysc_return
-
-#
-# handle per exception
-#
-pgm_per:
- tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- jnz pgm_per_std # ok, normal per event from user space
-# ok its one of the special cases, now we need to find out which one
- clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
- je pgm_svcper
-# no interesting special case, ignore PER event
- lpswe __LC_PGM_OLD_PSW
-
-#
-# Normal per exception
-#
-pgm_per_std:
- SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- HANDLE_SIE_INTERCEPT
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- jz pgm_no_vtime2
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- LAST_BREAK
-pgm_no_vtime2:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14
+ tmhh %r8,0x0001 # test problem state bit
+ jnz 1f # -> fault in user space
+ tmhh %r8,0x4000 # PER bit set in old PSW ?
+ jnz 0f # -> enabled, can't be a double fault
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jnz pgm_svcper # -> single stepped svc
+0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ j 2f
+1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r14
+ lg %r15,__LC_KERNEL_STACK
+2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
+ mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+ stg %r10,__PT_ARGS(%r11)
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jz 0f
lg %r1,__TI_task(%r12)
- tm SP_PSW+1(%r15),0x01 # kernel per event ?
- jz kernel_per
- mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+ tmhh %r8,0x0001 # kernel per event ?
+ jz pgm_kprobe
+ oi __TI_flags+7(%r12),_TIF_PER_TRAP
mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
- oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
- lgf %r3,__LC_PGM_ILC # load program interruption code
- lg %r4,__LC_TRANS_EXC_CODE
- REENABLE_IRQS
- lghi %r8,0x7f
- ngr %r8,%r3 # clear per-event-bit and ilc
- je pgm_exit2
- sll %r8,3
+0: REENABLE_IRQS
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
- lg %r1,0(%r8,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
+ llgh %r10,__PT_INT_CODE+2(%r11)
+ nill %r10,0x007f
+ sll %r10,3
+ je sysc_return
+ lg %r1,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
-pgm_exit2:
j sysc_return
#
-# it was a single stepped SVC that is causing all the trouble
+# PER event in supervisor state, must be kprobes
#
-pgm_svcper:
- SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SAVE_AREA
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- LAST_BREAK
- lg %r8,__TI_task(%r12)
- mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
- mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
- mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
- j sysc_do_svc
+pgm_kprobe:
+ REENABLE_IRQS
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_per_trap
+ j sysc_return
#
-# per was called from kernel, must be kprobes
+# single stepped system call
#
-kernel_per:
- REENABLE_IRQS
- la %r2,SP_PTREGS(%r15) # address of register-save area
- brasl %r14,do_per_trap
- j pgm_exit
+pgm_svcper:
+ oi __TI_flags+7(%r12),_TIF_PER_TRAP
+ mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ larl %r14,sysc_per
+ stg %r14,__LC_RETURN_PSW+8
+ lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs
/*
* IO interrupt handler routine
@@ -565,21 +460,25 @@ kernel_per:
ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
- CREATE_STACK_FRAME __LC_SAVE_AREA+40
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- HANDLE_SIE_INTERCEPT
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- jz io_no_vtime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
- LAST_BREAK
-io_no_vtime:
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_IO_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user?
+ jz io_skip
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
+io_skip:
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF
- la %r2,SP_PTREGS(%r15) # address of register-save area
- brasl %r14,do_IRQ # call standard irq handler
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_IRQ
io_return:
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
@@ -587,7 +486,14 @@ io_tif:
tm __TI_flags+7(%r12),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_restore:
- RESTORE_ALL __LC_RETURN_PSW,0
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
io_done:
#
@@ -600,7 +506,7 @@ io_done:
# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
jo io_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
@@ -609,10 +515,11 @@ io_work:
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jno io_restore
# switch to kernel stack
- lg %r1,SP_R15(%r15)
- aghi %r1,-SP_SIZE
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ lg %r1,__PT_R15(%r11)
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
# TRACE_IRQS_ON already done at io_return, call
# TRACE_IRQS_OFF to keep things symmetrical
@@ -628,9 +535,10 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
- aghi %r1,-SP_SIZE
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
#
@@ -663,9 +571,9 @@ io_mcck_pending:
#
io_reschedule:
# TRACE_IRQS_ON already done at io_return
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
brasl %r14,schedule # call scheduler
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j io_return
@@ -674,10 +582,10 @@ io_reschedule:
#
io_sigpending:
# TRACE_IRQS_ON already done at io_return
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
- brasl %r14,do_signal # call do_signal
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j io_return
@@ -686,10 +594,10 @@ io_sigpending:
#
io_notify_resume:
# TRACE_IRQS_ON already done at io_return
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
- brasl %r14,do_notify_resume # call do_notify_resume
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ ssm __LC_SVC_NEW_PSW # reenable interrupts
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_notify_resume
+ ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j io_return
@@ -699,21 +607,24 @@ io_notify_resume:
ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
- CREATE_STACK_FRAME __LC_SAVE_AREA+40
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- HANDLE_SIE_INTERCEPT
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- jz ext_no_vtime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
- LAST_BREAK
-ext_no_vtime:
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_EXT_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user ?
+ jz ext_skip
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
+ext_skip:
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
TRACE_IRQS_OFF
lghi %r1,4096
- la %r2,SP_PTREGS(%r15) # address of register-save area
+ lgr %r2,%r11 # pass pointer to pt_regs
llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
llgf %r4,__LC_EXT_PARAMS # get external parameter
lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
@@ -730,81 +641,77 @@ ENTRY(mcck_int_handler)
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- stmg %r11,%r15,__LC_SAVE_AREA+80
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
larl %r13,system_call
- lg %r11,__LC_LAST_BREAK
- la %r12,__LC_MCK_OLD_PSW
+ lmg %r8,%r9,__LC_MCK_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14
tm __LC_MCCK_CODE,0x80 # system damage?
- jo mcck_int_main # yes -> rest of mcck code invalid
- la %r14,4095
- mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
+ jo mcck_panic # yes -> rest of mcck code invalid
+ lghi %r14,__LC_CPU_TIMER_SAVE_AREA
+ mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- jo 1f
+ jo 3f
la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
jl 0f
la %r14,__LC_ASYNC_ENTER_TIMER
0: clc 0(8,%r14),__LC_EXIT_TIMER
- jl 0f
+ jl 1f
la %r14,__LC_EXIT_TIMER
-0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
- jl 0f
+1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
+ jl 2f
la %r14,__LC_LAST_UPDATE_TIMER
-0: spt 0(%r14)
+2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno mcck_int_main # no -> skip cleanup critical
- tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
- jnz mcck_int_main # from user -> load kernel stack
- clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
- jhe mcck_int_main
- clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
- jl mcck_int_main
- brasl %r14,cleanup_critical
-mcck_int_main:
- lg %r14,__LC_PANIC_STACK # are we already on the panic stack?
- slgr %r14,%r15
- srag %r14,%r14,PAGE_SHIFT
- jz 0f
- lg %r15,__LC_PANIC_STACK # load panic stack
-0: aghi %r15,-SP_SIZE # make room for registers & psw
- CREATE_STACK_FRAME __LC_SAVE_AREA+80
- mvc SP_PSW(16,%r15),0(%r12)
- lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
- tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
- jno mcck_no_vtime # no -> no timer update
- HANDLE_SIE_INTERCEPT
- tm SP_PSW+1(%r15),0x01 # interrupting from user ?
- jz mcck_no_vtime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
- LAST_BREAK
-mcck_no_vtime:
- la %r2,SP_PTREGS(%r15) # load pt_regs
+3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+ jno mcck_panic # no -> skip cleanup critical
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
+ tm %r8,0x0001 # interrupting from user ?
+ jz mcck_skip
+ UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+ LAST_BREAK %r14
+mcck_skip:
+ lghi %r14,__LC_GPREGS_SAVE_AREA
+ mvc __PT_R0(128,%r11),0(%r14)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- aghi %r1,-SP_SIZE
- mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- stosm __SF_EMPTY(%r15),0x04 # turn dat on
+ ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jno mcck_return
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
mcck_return:
- mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
stpt __LC_EXIT_TIMER
-0: lpswe __LC_RETURN_MCCK_PSW # back to caller
-mcck_done:
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0: lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_MCCK_PSW
+
+mcck_panic:
+ lg %r14,__LC_PANIC_STACK
+ slgr %r14,%r15
+ srag %r14,%r14,PAGE_SHIFT
+ jz 0f
+ lg %r15,__LC_PANIC_STACK
+0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ j mcck_skip
/*
* Restart interruption handler, kick starter for additional CPUs
@@ -818,17 +725,18 @@ restart_base:
stck __LC_LAST_UPDATE_CLOCK
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
- lg %r15,__LC_SAVE_AREA+120 # load ksp
+ lghi %r10,__LC_GPREGS_SAVE_AREA
+ lg %r15,120(%r10) # load ksp
lghi %r10,__LC_CREGS_SAVE_AREA
- lctlg %c0,%c15,0(%r10) # get new ctl regs
+ lctlg %c0,%c15,0(%r10) # get new ctl regs
lghi %r10,__LC_AREGS_SAVE_AREA
lam %a0,%a15,0(%r10)
- lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+ lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone
lg %r1,__LC_THREAD_INFO
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
- stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
+ ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
brasl %r14,start_secondary
.align 8
restart_vtime:
@@ -852,16 +760,16 @@ restart_go:
# PSW restart interrupt handler
#
ENTRY(psw_restart_int_handler)
- stg %r15,__LC_SAVE_AREA+120(%r0) # save r15
+ stg %r15,__LC_SAVE_AREA_RESTART
larl %r15,restart_stack # load restart stack
lg %r15,0(%r15)
- aghi %r15,-SP_SIZE # make room for pt_regs
- stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack
- mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack
- mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0
+ aghi %r15,-__PT_SIZE # create pt_regs on stack
+ stmg %r0,%r14,__PT_R0(%r15)
+ mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+ mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
brasl %r14,do_restart
-
larl %r14,restart_psw_crash # load disabled wait PSW if
lpswe 0(%r14) # do_restart returns
.align 8
@@ -877,172 +785,153 @@ restart_psw_crash:
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r15,__LC_PANIC_STACK # change to panic stack
- aghi %r15,-SP_SIZE
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
- la %r1,__LC_SAVE_AREA
- chi %r12,__LC_SVC_OLD_PSW
- je 0f
- chi %r12,__LC_PGM_OLD_PSW
- je 0f
- la %r1,__LC_SAVE_AREA+40
-0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
- mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ lg %r11,__LC_PANIC_STACK # change to panic stack
+ aghi %r11,-__PT_SIZE # create pt_regs
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ lgr %r15,%r11
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
#endif
-cleanup_table_system_call:
- .quad system_call, sysc_do_svc
-cleanup_table_sysc_tif:
- .quad sysc_tif, sysc_restore
-cleanup_table_sysc_restore:
- .quad sysc_restore, sysc_done
-cleanup_table_io_tif:
- .quad io_tif, io_restore
-cleanup_table_io_restore:
- .quad io_restore, io_done
+ .align 8
+cleanup_table:
+ .quad system_call
+ .quad sysc_do_svc
+ .quad sysc_tif
+ .quad sysc_restore
+ .quad sysc_done
+ .quad io_tif
+ .quad io_restore
+ .quad io_done
cleanup_critical:
- clc 8(8,%r12),BASED(cleanup_table_system_call)
+ clg %r9,BASED(cleanup_table) # system_call
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_system_call+8)
+ clg %r9,BASED(cleanup_table+8) # sysc_do_svc
jl cleanup_system_call
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
+ clg %r9,BASED(cleanup_table+16) # sysc_tif
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
+ clg %r9,BASED(cleanup_table+24) # sysc_restore
jl cleanup_sysc_tif
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
+ clg %r9,BASED(cleanup_table+32) # sysc_done
jl cleanup_sysc_restore
-0:
- clc 8(8,%r12),BASED(cleanup_table_io_tif)
+ clg %r9,BASED(cleanup_table+40) # io_tif
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
+ clg %r9,BASED(cleanup_table+48) # io_restore
jl cleanup_io_tif
-0:
- clc 8(8,%r12),BASED(cleanup_table_io_restore)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
+ clg %r9,BASED(cleanup_table+56) # io_done
jl cleanup_io_restore
-0:
- br %r14
+0: br %r14
+
cleanup_system_call:
- mvc __LC_RETURN_PSW(16),0(%r12)
- clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+ # check if stpt has been executed
+ clg %r9,BASED(cleanup_system_call_insn)
jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
- cghi %r12,__LC_MCK_OLD_PSW
- je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0: cghi %r12,__LC_MCK_OLD_PSW
- la %r12,__LC_SAVE_AREA+80
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
- la %r12,__LC_SAVE_AREA+40
-0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
- jhe cleanup_vtime
- clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: # check if stmg has been executed
+ clg %r9,BASED(cleanup_system_call_insn+8)
jh 0f
- mvc __LC_SAVE_AREA(40),0(%r12)
-0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
- aghi %r15,-SP_SIZE # make room for registers & psw
- stg %r15,32(%r12)
- stg %r11,0(%r12)
- CREATE_STACK_FRAME __LC_SAVE_AREA
- mvc 8(8,%r12),__LC_THREAD_INFO
- lg %r12,__LC_THREAD_INFO
- mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC
- oi __TI_flags+7(%r12),_TIF_SYSCALL
-cleanup_vtime:
- clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
- jhe cleanup_stime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
-cleanup_stime:
- clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
- jh cleanup_update
- UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
-cleanup_update:
+ mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
+0: # check if base register setup + TIF bit load has been done
+ clg %r9,BASED(cleanup_system_call_insn+16)
+ jhe 0f
+ # set up saved registers r10 and r12
+ stg %r10,16(%r11) # r10 last break
+ stg %r12,32(%r11) # r12 thread-info pointer
+0: # check if the user time update has been done
+ clg %r9,BASED(cleanup_system_call_insn+24)
+ jh 0f
+ lg %r15,__LC_EXIT_TIMER
+ slg %r15,__LC_SYNC_ENTER_TIMER
+ alg %r15,__LC_USER_TIMER
+ stg %r15,__LC_USER_TIMER
+0: # check if the system time update has been done
+ clg %r9,BASED(cleanup_system_call_insn+32)
+ jh 0f
+ lg %r15,__LC_LAST_UPDATE_TIMER
+ slg %r15,__LC_EXIT_TIMER
+ alg %r15,__LC_SYSTEM_TIMER
+ stg %r15,__LC_SYSTEM_TIMER
+0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- srag %r12,%r11,23
- lg %r12,__LC_THREAD_INFO
+ # do LAST_BREAK
+ lg %r9,16(%r11)
+ srag %r9,%r9,23
jz 0f
- stg %r11,__TI_last_break(%r12)
-0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
- la %r12,__LC_RETURN_PSW
+ mvc __TI_last_break(8,%r12),16(%r11)
+0: # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
+ aghi %r15,-__PT_SIZE
+ stg %r15,24(%r11) # r11 pt_regs pointer
+ # fill pt_regs
+ mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r15)
+ mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ # setup saved register r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r15,56(%r11) # r15 stack pointer
+ # set new psw address and exit
+ larl %r9,sysc_do_svc
br %r14
cleanup_system_call_insn:
- .quad sysc_saveall
.quad system_call
- .quad sysc_vtime
- .quad sysc_stime
- .quad sysc_update
+ .quad sysc_stmg
+ .quad sysc_per
+ .quad sysc_vtime+18
+ .quad sysc_vtime+42
cleanup_sysc_tif:
- mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
- la %r12,__LC_RETURN_PSW
+ larl %r9,sysc_tif
br %r14
cleanup_sysc_restore:
- clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
- je 2f
- clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
- jhe 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
- cghi %r12,__LC_MCK_OLD_PSW
+ clg %r9,BASED(cleanup_sysc_restore_insn)
je 0f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
-0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
- cghi %r12,__LC_MCK_OLD_PSW
- la %r12,__LC_SAVE_AREA+80
- je 1f
- la %r12,__LC_SAVE_AREA+40
-1: mvc 0(40,%r12),SP_R11(%r15)
- lmg %r0,%r10,SP_R0(%r15)
- lg %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+ lg %r9,24(%r11) # get saved pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
cleanup_sysc_restore_insn:
.quad sysc_done - 4
- .quad sysc_done - 16
cleanup_io_tif:
- mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
- la %r12,__LC_RETURN_PSW
+ larl %r9,io_tif
br %r14
cleanup_io_restore:
- clc 8(8,%r12),BASED(cleanup_io_restore_insn)
- je 1f
- clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
- jhe 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
- mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
- lmg %r0,%r10,SP_R0(%r15)
- lg %r15,SP_R15(%r15)
-1: la %r12,__LC_RETURN_PSW
+ clg %r9,BASED(cleanup_io_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved r11 pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ ni __LC_RETURN_PSW+1,0xfd # clear wait state bit
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
cleanup_io_restore_insn:
.quad io_done - 4
- .quad io_done - 16
/*
* Integer constants
*/
- .align 4
+ .align 8
.Lcritical_start:
- .quad __critical_start
-.Lcritical_end:
- .quad __critical_end
+ .quad __critical_start
+.Lcritical_length:
+ .quad __critical_end - __critical_start
+
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
/*
@@ -1054,6 +943,7 @@ ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
oi __TI_flags+6(%r14),_TIF_SIE>>8
@@ -1070,7 +960,7 @@ sie_gmap:
SPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
sie_done:
- SPP __LC_CMF_HPP # set host id
+ SPP __SF_EMPTY+16(%r15) # set host id
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_exit:
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -1093,8 +983,10 @@ sie_fault:
.align 8
.Lsie_loop:
.quad sie_loop
-.Lsie_done:
- .quad sie_done
+.Lsie_length:
+ .quad sie_done - sie_loop
+.Lhost_id:
+ .quad 0
.section __ex_table,"a"
.quad sie_loop,sie_fault
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 900068d2bf9..c27a0727f93 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -329,8 +329,8 @@ iplstart:
#
# reset files in VM reader
#
- stidp __LC_SAVE_AREA # store cpuid
- tm __LC_SAVE_AREA,0xff # running VM ?
+ stidp __LC_SAVE_AREA_SYNC # store cpuid
+ tm __LC_SAVE_AREA_SYNC,0xff# running VM ?
bno .Lnoreset
la %r2,.Lreset
lhi %r3,26
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3cd0f25ab01..47b168fb29c 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -208,6 +208,7 @@ void machine_kexec_cleanup(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_SYMBOL(lowcore_ptr);
+ VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
}
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 19b4568f4ce..22d502e885e 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -64,70 +64,82 @@ void detect_memory_layout(struct mem_chunk chunk[])
EXPORT_SYMBOL(detect_memory_layout);
/*
+ * Move memory chunks array from index "from" to index "to"
+ */
+static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
+{
+ int cnt = MEMORY_CHUNKS - to;
+
+ memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
+}
+
+/*
+ * Initialize memory chunk
+ */
+static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
+ unsigned long size, int type)
+{
+ chunk->type = type;
+ chunk->addr = addr;
+ chunk->size = size;
+}
+
+/*
* Create memory hole with given address, size, and type
*/
-void create_mem_hole(struct mem_chunk chunks[], unsigned long addr,
+void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
unsigned long size, int type)
{
- unsigned long start, end, new_size;
- int i;
+ unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
+ int i, ch_type;
for (i = 0; i < MEMORY_CHUNKS; i++) {
- if (chunks[i].size == 0)
- continue;
- if (addr + size < chunks[i].addr)
- continue;
- if (addr >= chunks[i].addr + chunks[i].size)
+ if (chunk[i].size == 0)
continue;
- start = max(addr, chunks[i].addr);
- end = min(addr + size, chunks[i].addr + chunks[i].size);
- new_size = end - start;
- if (new_size == 0)
- continue;
- if (start == chunks[i].addr &&
- end == chunks[i].addr + chunks[i].size) {
- /* Remove chunk */
- chunks[i].type = type;
- } else if (start == chunks[i].addr) {
- /* Make chunk smaller at start */
- if (i >= MEMORY_CHUNKS - 1)
- panic("Unable to create memory hole");
- memmove(&chunks[i + 1], &chunks[i],
- sizeof(struct mem_chunk) *
- (MEMORY_CHUNKS - (i + 1)));
- chunks[i + 1].addr = chunks[i].addr + new_size;
- chunks[i + 1].size = chunks[i].size - new_size;
- chunks[i].size = new_size;
- chunks[i].type = type;
- i += 1;
- } else if (end == chunks[i].addr + chunks[i].size) {
- /* Make chunk smaller at end */
- if (i >= MEMORY_CHUNKS - 1)
- panic("Unable to create memory hole");
- memmove(&chunks[i + 1], &chunks[i],
- sizeof(struct mem_chunk) *
- (MEMORY_CHUNKS - (i + 1)));
- chunks[i + 1].addr = start;
- chunks[i + 1].size = new_size;
- chunks[i + 1].type = type;
- chunks[i].size -= new_size;
+
+ /* Define chunk properties */
+ ch_start = chunk[i].addr;
+ ch_size = chunk[i].size;
+ ch_end = ch_start + ch_size - 1;
+ ch_type = chunk[i].type;
+
+ /* Is memory chunk hit by memory hole? */
+ if (addr + size <= ch_start)
+ continue; /* No: memory hole in front of chunk */
+ if (addr > ch_end)
+ continue; /* No: memory hole after chunk */
+
+ /* Yes: Define local hole properties */
+ lh_start = max(addr, chunk[i].addr);
+ lh_end = min(addr + size - 1, ch_end);
+ lh_size = lh_end - lh_start + 1;
+
+ if (lh_start == ch_start && lh_end == ch_end) {
+ /* Hole covers complete memory chunk */
+ mem_chunk_init(&chunk[i], lh_start, lh_size, type);
+ } else if (lh_end == ch_end) {
+ /* Hole starts in memory chunk and convers chunk end */
+ mem_chunk_move(chunk, i + 1, i);
+ mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
+ ch_type);
+ mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
i += 1;
+ } else if (lh_start == ch_start) {
+ /* Hole ends in memory chunk */
+ mem_chunk_move(chunk, i + 1, i);
+ mem_chunk_init(&chunk[i], lh_start, lh_size, type);
+ mem_chunk_init(&chunk[i + 1], lh_end + 1,
+ ch_size - lh_size, ch_type);
+ break;
} else {
- /* Create memory hole */
- if (i >= MEMORY_CHUNKS - 2)
- panic("Unable to create memory hole");
- memmove(&chunks[i + 2], &chunks[i],
- sizeof(struct mem_chunk) *
- (MEMORY_CHUNKS - (i + 2)));
- chunks[i + 1].addr = addr;
- chunks[i + 1].size = size;
- chunks[i + 1].type = type;
- chunks[i + 2].addr = addr + size;
- chunks[i + 2].size =
- chunks[i].addr + chunks[i].size - (addr + size);
- chunks[i + 2].type = chunks[i].type;
- chunks[i].size = addr - chunks[i].addr;
- i += 2;
+ /* Hole splits memory chunk */
+ mem_chunk_move(chunk, i + 2, i);
+ mem_chunk_init(&chunk[i], ch_start,
+ lh_start - ch_start, ch_type);
+ mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
+ mem_chunk_init(&chunk[i + 2], lh_end + 1,
+ ch_end - lh_end, ch_type);
+ break;
}
}
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 9451b210a1b..3201ae44799 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -91,10 +91,12 @@ static void default_idle(void)
void cpu_idle(void)
{
for (;;) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched())
default_idle();
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 450931a45b6..573bc29551e 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
- if (addr == (addr_t) &dummy->regs.psw.addr)
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
-
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
/* Transfer 31 bit amode bit to psw mask. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
} else {
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
#endif
static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_last_break_get,
+ .set = s390_last_break_set,
},
#endif
[REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_compat_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_compat_last_break_get,
+ .set = s390_compat_last_break_set,
},
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_S390_SYSTEM_CALL,
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 732a793ec53..36b32658fb2 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -17,11 +17,11 @@
#
ENTRY(store_status)
/* Save register one and load save area base */
- stg %r1,__LC_SAVE_AREA+120(%r0)
+ stg %r1,__LC_SAVE_AREA_RESTART
lghi %r1,SAVE_AREA_BASE
/* General purpose registers */
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- lg %r2,__LC_SAVE_AREA+120(%r0)
+ lg %r2,__LC_SAVE_AREA_RESTART
stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
/* Control registers */
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e58a462949b..354de0763ef 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
@@ -94,6 +95,15 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
int __initdata memory_end_set;
unsigned long __initdata memory_end;
+unsigned long VMALLOC_START;
+EXPORT_SYMBOL(VMALLOC_START);
+
+unsigned long VMALLOC_END;
+EXPORT_SYMBOL(VMALLOC_END);
+
+struct page *vmemmap;
+EXPORT_SYMBOL(vmemmap);
+
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
@@ -277,6 +287,15 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+static int __init parse_vmalloc(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+ VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
+ return 0;
+}
+early_param("vmalloc", parse_vmalloc);
+
unsigned int user_mode = HOME_SPACE_MODE;
EXPORT_SYMBOL_GPL(user_mode);
@@ -382,7 +401,6 @@ setup_lowcore(void)
__ctl_set_bit(14, 29);
}
#else
- lc->cmf_hpp = -1ULL;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -478,8 +496,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
static void __init setup_memory_end(void)
{
- unsigned long memory_size;
- unsigned long max_mem;
+ unsigned long vmax, vmalloc_size, tmp;
int i;
@@ -489,12 +506,9 @@ static void __init setup_memory_end(void)
memory_end_set = 1;
}
#endif
- memory_size = 0;
+ real_memory_size = 0;
memory_end &= PAGE_MASK;
- max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
- memory_end = min(max_mem, memory_end);
-
/*
* Make sure all chunks are MAX_ORDER aligned so we don't need the
* extra checks that HOLES_IN_ZONE would require.
@@ -514,23 +528,48 @@ static void __init setup_memory_end(void)
chunk->addr = start;
chunk->size = end - start;
}
+ real_memory_size = max(real_memory_size,
+ chunk->addr + chunk->size);
}
+ /* Choose kernel address space layout: 2, 3, or 4 levels. */
+#ifdef CONFIG_64BIT
+ vmalloc_size = VMALLOC_END ?: 128UL << 30;
+ tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
+ tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
+ if (tmp <= (1UL << 42))
+ vmax = 1UL << 42; /* 3-level kernel page table */
+ else
+ vmax = 1UL << 53; /* 4-level kernel page table */
+#else
+ vmalloc_size = VMALLOC_END ?: 96UL << 20;
+ vmax = 1UL << 31; /* 2-level kernel page table */
+#endif
+ /* vmalloc area is at the end of the kernel address space. */
+ VMALLOC_END = vmax;
+ VMALLOC_START = vmax - vmalloc_size;
+
+ /* Split remaining virtual space between 1:1 mapping & vmemmap array */
+ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+ tmp = VMALLOC_START - tmp * sizeof(struct page);
+ tmp &= ~((vmax >> 11) - 1); /* align to page table level */
+ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
+ vmemmap = (struct page *) tmp;
+
+ /* Take care that memory_end is set and <= vmemmap */
+ memory_end = min(memory_end ?: real_memory_size, tmp);
+
+ /* Fixup memory chunk array to fit into 0..memory_end */
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
- real_memory_size = max(real_memory_size,
- chunk->addr + chunk->size);
- if (chunk->addr >= max_mem) {
+ if (chunk->addr >= memory_end) {
memset(chunk, 0, sizeof(*chunk));
continue;
}
- if (chunk->addr + chunk->size > max_mem)
- chunk->size = max_mem - chunk->addr;
- memory_size = max(memory_size, chunk->addr + chunk->size);
+ if (chunk->addr + chunk->size > memory_end)
+ chunk->size = memory_end - chunk->addr;
}
- if (!memory_end)
- memory_end = memory_size;
}
void *restart_stack __attribute__((__section__(".data")));
@@ -579,7 +618,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
*msg = "first memory chunk must be at least crashkernel size";
return 0;
}
- if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+ if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
return OLDMEM_BASE;
for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
@@ -654,7 +693,6 @@ static int __init verify_crash_base(unsigned long crash_base,
static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
int type)
{
-
create_mem_hole(memory_chunk, addr, size, type);
}
@@ -820,7 +858,8 @@ setup_memory(void)
end_chunk = min(end_chunk, end_pfn);
if (start_chunk >= end_chunk)
continue;
- add_active_range(0, start_chunk, end_chunk);
+ memblock_add_node(PFN_PHYS(start_chunk),
+ PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
for (; pfn < end_chunk; pfn++)
page_set_storage_key(PFN_PHYS(pfn),
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 05a85bc14c9..a8ba840294f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -302,9 +302,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* We forgot to include these in the sigcontext.
To avoid breaking binary compatibility, they are passed as args. */
- regs->gprs[4] = current->thread.trap_no;
- regs->gprs[5] = current->thread.prot_addr;
- regs->gprs[6] = task_thread_info(current)->last_break;
+ if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGTRAP || sig == SIGFPE) {
+ /* set extra registers only for synchronous signals */
+ regs->gprs[4] = regs->int_code & 127;
+ regs->gprs[5] = regs->int_parm_long;
+ regs->gprs[6] = task_thread_info(current)->last_break;
+ }
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -434,13 +438,13 @@ void do_signal(struct pt_regs *regs)
* call information.
*/
current_thread_info()->system_call =
- test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0;
+ test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
if (current_thread_info()->system_call) {
- regs->svc_code = current_thread_info()->system_call;
+ regs->int_code = current_thread_info()->system_call;
/* Check for system call restarting. */
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
@@ -457,12 +461,12 @@ void do_signal(struct pt_regs *regs)
regs->gprs[2] = regs->orig_gpr2;
regs->psw.addr =
__rewind_psw(regs->psw,
- regs->svc_code >> 16);
+ regs->int_code >> 16);
break;
}
- /* No longer in a system call */
- clear_thread_flag(TIF_SYSCALL);
}
+ /* No longer in a system call */
+ clear_thread_flag(TIF_SYSCALL);
if ((is_compat_task() ?
handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,12 +490,13 @@ void do_signal(struct pt_regs *regs)
}
/* No handlers present - check for system call restart */
+ clear_thread_flag(TIF_SYSCALL);
if (current_thread_info()->system_call) {
- regs->svc_code = current_thread_info()->system_call;
+ regs->int_code = current_thread_info()->system_call;
switch (regs->gprs[2]) {
case -ERESTART_RESTARTBLOCK:
/* Restart with sys_restart_syscall */
- regs->svc_code = __NR_restart_syscall;
+ regs->int_code = __NR_restart_syscall;
/* fallthrough */
case -ERESTARTNOHAND:
case -ERESTARTSYS:
@@ -500,9 +505,6 @@ void do_signal(struct pt_regs *regs)
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
break;
- default:
- clear_thread_flag(TIF_SYSCALL);
- break;
}
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3ea872890da..2398ce6b15a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -69,9 +69,7 @@ enum s390_cpu_state {
};
DEFINE_MUTEX(smp_cpu_state_mutex);
-int smp_cpu_polarization[NR_CPUS];
static int smp_cpu_state[NR_CPUS];
-static int cpu_management;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
@@ -149,29 +147,59 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
sp -= sizeof(struct pt_regs);
regs = (struct pt_regs *) sp;
memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
- regs->psw = lc->psw_save_area;
+ regs->psw = current_lc->psw_save_area;
sp -= STACK_FRAME_OVERHEAD;
sf = (struct stack_frame *) sp;
- sf->back_chain = regs->gprs[15];
+ sf->back_chain = 0;
smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
}
+static void smp_stop_cpu(void)
+{
+ while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
+ cpu_relax();
+}
+
void smp_send_stop(void)
{
- int cpu, rc;
+ cpumask_t cpumask;
+ int cpu;
+ u64 end;
/* Disable all interrupts/machine checks */
__load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
trace_hardirqs_off();
- /* stop all processors */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- do {
- rc = sigp(cpu, sigp_stop);
- } while (rc == sigp_busy);
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
+
+ if (oops_in_progress) {
+ /*
+ * Give the other cpus the opportunity to complete
+ * outstanding interrupts before stopping them.
+ */
+ end = get_clock() + (1000000UL << 12);
+ for_each_cpu(cpu, &cpumask) {
+ set_bit(ec_stop_cpu, (unsigned long *)
+ &lowcore_ptr[cpu]->ext_call_fast);
+ while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
+ get_clock() < end)
+ cpu_relax();
+ }
+ while (get_clock() < end) {
+ for_each_cpu(cpu, &cpumask)
+ if (cpu_stopped(cpu))
+ cpumask_clear_cpu(cpu, &cpumask);
+ if (cpumask_empty(&cpumask))
+ break;
+ cpu_relax();
+ }
+ }
+ /* stop all processors */
+ for_each_cpu(cpu, &cpumask) {
+ while (sigp(cpu, sigp_stop) == sigp_busy)
+ cpu_relax();
while (!cpu_stopped(cpu))
cpu_relax();
}
@@ -187,7 +215,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
{
unsigned long bits;
- if (ext_int_code == 0x1202)
+ if ((ext_int_code & 0xffff) == 0x1202)
kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
else
kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
@@ -196,6 +224,9 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
*/
bits = xchg(&S390_lowcore.ext_call_fast, 0);
+ if (test_bit(ec_stop_cpu, &bits))
+ smp_stop_cpu();
+
if (test_bit(ec_schedule, &bits))
scheduler_ipi();
@@ -204,6 +235,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
+
}
/*
@@ -369,7 +401,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
- smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
if (!cpu_stopped(logical_cpu))
continue;
set_cpu_present(logical_cpu, true);
@@ -403,7 +435,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
- smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
set_cpu_present(logical_cpu, true);
if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@@ -656,7 +688,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
- sizeof(struct stack_frame));
memset(sf, 0, sizeof(struct stack_frame));
sf->gprs[9] = (unsigned long) sf;
- cpu_lowcore->save_area[15] = (unsigned long) sf;
+ cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
atomic_inc(&init_mm.context.attach_count);
asm volatile(
@@ -806,7 +838,7 @@ void __init smp_prepare_boot_cpu(void)
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED;
- smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -831,8 +863,8 @@ int setup_profiling_timer(unsigned int multiplier)
}
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t cpu_configure_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t cpu_configure_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
ssize_t count;
@@ -842,8 +874,8 @@ static ssize_t cpu_configure_show(struct sys_device *dev,
return count;
}
-static ssize_t cpu_configure_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t cpu_configure_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int cpu = dev->id;
@@ -868,7 +900,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_STANDBY;
- smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ topology_expect_change();
}
}
break;
@@ -877,7 +910,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
- smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ topology_expect_change();
}
}
break;
@@ -889,52 +923,21 @@ out:
put_online_cpus();
return rc ? rc : count;
}
-static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
+static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
#endif /* CONFIG_HOTPLUG_CPU */
-static ssize_t cpu_polarization_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
-{
- int cpu = dev->id;
- ssize_t count;
-
- mutex_lock(&smp_cpu_state_mutex);
- switch (smp_cpu_polarization[cpu]) {
- case POLARIZATION_HRZ:
- count = sprintf(buf, "horizontal\n");
- break;
- case POLARIZATION_VL:
- count = sprintf(buf, "vertical:low\n");
- break;
- case POLARIZATION_VM:
- count = sprintf(buf, "vertical:medium\n");
- break;
- case POLARIZATION_VH:
- count = sprintf(buf, "vertical:high\n");
- break;
- default:
- count = sprintf(buf, "unknown\n");
- break;
- }
- mutex_unlock(&smp_cpu_state_mutex);
- return count;
-}
-static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
-
-static ssize_t show_cpu_address(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_cpu_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
}
-static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
-
+static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
static struct attribute *cpu_common_attrs[] = {
#ifdef CONFIG_HOTPLUG_CPU
- &attr_configure.attr,
+ &dev_attr_configure.attr,
#endif
- &attr_address.attr,
- &attr_polarization.attr,
+ &dev_attr_address.attr,
NULL,
};
@@ -942,8 +945,8 @@ static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs,
};
-static ssize_t show_capability(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_capability(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned int capability;
int rc;
@@ -953,10 +956,10 @@ static ssize_t show_capability(struct sys_device *dev,
return rc;
return sprintf(buf, "%u\n", capability);
}
-static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
+static DEVICE_ATTR(capability, 0444, show_capability, NULL);
-static ssize_t show_idle_count(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_idle_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle;
unsigned long long idle_count;
@@ -976,10 +979,10 @@ repeat:
goto repeat;
return sprintf(buf, "%llu\n", idle_count);
}
-static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
+static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
-static ssize_t show_idle_time(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_idle_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
@@ -1001,12 +1004,12 @@ repeat:
goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
-static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
static struct attribute *cpu_online_attrs[] = {
- &attr_capability.attr,
- &attr_idle_count.attr,
- &attr_idle_time_us.attr,
+ &dev_attr_capability.attr,
+ &dev_attr_idle_count.attr,
+ &dev_attr_idle_time_us.attr,
NULL,
};
@@ -1019,7 +1022,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
{
unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
+ struct device *s = &c->dev;
struct s390_idle_data *idle;
int err = 0;
@@ -1045,7 +1048,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
+ struct device *s = &c->dev;
int rc;
c->hotpluggable = 1;
@@ -1055,11 +1058,20 @@ static int __devinit smp_add_present_cpu(int cpu)
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
- if (!cpu_online(cpu))
- goto out;
- rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- if (!rc)
- return 0;
+ if (cpu_online(cpu)) {
+ rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+ if (rc)
+ goto out_online;
+ }
+ rc = topology_cpu_init(c);
+ if (rc)
+ goto out_topology;
+ return 0;
+
+out_topology:
+ if (cpu_online(cpu))
+ sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+out_online:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
@@ -1098,8 +1110,8 @@ out:
return rc;
}
-static ssize_t __ref rescan_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t __ref rescan_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -1108,64 +1120,19 @@ static ssize_t __ref rescan_store(struct sysdev_class *class,
rc = smp_rescan_cpus();
return rc ? rc : count;
}
-static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
+static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */
-static ssize_t dispatching_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- char *buf)
-{
- ssize_t count;
-
- mutex_lock(&smp_cpu_state_mutex);
- count = sprintf(buf, "%d\n", cpu_management);
- mutex_unlock(&smp_cpu_state_mutex);
- return count;
-}
-
-static ssize_t dispatching_store(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
- const char *buf,
- size_t count)
-{
- int val, rc;
- char delim;
-
- if (sscanf(buf, "%d %c", &val, &delim) != 1)
- return -EINVAL;
- if (val != 0 && val != 1)
- return -EINVAL;
- rc = 0;
- get_online_cpus();
- mutex_lock(&smp_cpu_state_mutex);
- if (cpu_management == val)
- goto out;
- rc = topology_set_cpu_management(val);
- if (!rc)
- cpu_management = val;
-out:
- mutex_unlock(&smp_cpu_state_mutex);
- put_online_cpus();
- return rc ? rc : count;
-}
-static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
- dispatching_store);
-
-static int __init topology_init(void)
+static int __init s390_smp_init(void)
{
- int cpu;
- int rc;
+ int cpu, rc;
register_cpu_notifier(&smp_cpu_nb);
-
#ifdef CONFIG_HOTPLUG_CPU
- rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
+ rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
if (rc)
return rc;
#endif
- rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
- if (rc)
- return rc;
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
@@ -1173,4 +1140,4 @@ static int __init topology_init(void)
}
return 0;
}
-subsys_initcall(topology_init);
+subsys_initcall(s390_smp_init);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 476081440df..78ea1948ff5 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -60,74 +60,22 @@ out:
}
/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls.
*/
SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
unsigned long, third, void __user *, ptr)
{
- struct ipc_kludge tmp;
- int ret;
-
- switch (call) {
- case SEMOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- (unsigned)second, NULL);
- case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- (unsigned)second,
- (const struct timespec __user *) third);
- case SEMGET:
- return sys_semget(first, (int)second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void __user * __user *) ptr))
- return -EFAULT;
- return sys_semctl(first, (int)second, third, fourth);
- }
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- (size_t)second, third);
- break;
- case MSGRCV:
- if (!ptr)
- return -EINVAL;
- if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
- sizeof (struct ipc_kludge)))
- return -EFAULT;
- return sys_msgrcv (first, tmp.msgp,
- (size_t)second, tmp.msgtyp, third);
- case MSGGET:
- return sys_msgget((key_t)first, (int)second);
- case MSGCTL:
- return sys_msgctl(first, (int)second,
- (struct msqid_ds __user *)ptr);
-
- case SHMAT: {
- ulong raddr;
- ret = do_shmat(first, (char __user *)ptr,
- (int)second, &raddr);
- if (ret)
- return ret;
- return put_user (raddr, (ulong __user *) third);
- break;
- }
- case SHMDT:
- return sys_shmdt ((char __user *)ptr);
- case SHMGET:
- return sys_shmget(first, (size_t)second, third);
- case SHMCTL:
- return sys_shmctl(first, (int)second,
- (struct shmid_ds __user *) ptr);
- default:
- return -ENOSYS;
-
- }
-
- return -EINVAL;
+ if (call >> 16)
+ return -EINVAL;
+ /* The s390 sys_ipc variant has only five parameters instead of six
+ * like the generic variant. The only difference is the handling of
+ * the SEMTIMEDOP subcall where on s390 the third parameter is used
+ * as a pointer to a struct timespec where the generic variant uses
+ * the fifth parameter.
+ * Therefore we can call the generic variant by simply passing the
+ * third parameter also as fifth parameter.
+ */
+ return sys_ipc(call, first, second, third, ptr, third);
}
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ebbfab3c6e5..fa02f443f5f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -27,7 +27,7 @@
#include <linux/cpu.h>
#include <linux/stop_machine.h>
#include <linux/time.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
@@ -1116,34 +1116,35 @@ out_unlock:
/*
* Sysfs interface functions
*/
-static struct sysdev_class etr_sysclass = {
- .name = "etr",
+static struct bus_type etr_subsys = {
+ .name = "etr",
+ .dev_name = "etr",
};
-static struct sys_device etr_port0_dev = {
+static struct device etr_port0_dev = {
.id = 0,
- .cls = &etr_sysclass,
+ .bus = &etr_subsys,
};
-static struct sys_device etr_port1_dev = {
+static struct device etr_port1_dev = {
.id = 1,
- .cls = &etr_sysclass,
+ .bus = &etr_subsys,
};
/*
- * ETR class attributes
+ * ETR subsys attributes
*/
-static ssize_t etr_stepping_port_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t etr_stepping_port_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", etr_port0.esw.p);
}
-static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
+static DEVICE_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
-static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t etr_stepping_mode_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
char *mode_str;
@@ -1157,12 +1158,12 @@ static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
return sprintf(buf, "%s\n", mode_str);
}
-static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
+static DEVICE_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
/*
* ETR port attributes
*/
-static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
+static inline struct etr_aib *etr_aib_from_dev(struct device *dev)
{
if (dev == &etr_port0_dev)
return etr_port0_online ? &etr_port0 : NULL;
@@ -1170,8 +1171,8 @@ static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
return etr_port1_online ? &etr_port1 : NULL;
}
-static ssize_t etr_online_show(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t etr_online_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
unsigned int online;
@@ -1180,8 +1181,8 @@ static ssize_t etr_online_show(struct sys_device *dev,
return sprintf(buf, "%i\n", online);
}
-static ssize_t etr_online_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t etr_online_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int value;
@@ -1218,20 +1219,20 @@ out:
return count;
}
-static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
+static DEVICE_ATTR(online, 0600, etr_online_show, etr_online_store);
-static ssize_t etr_stepping_control_show(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t etr_stepping_control_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
etr_eacr.e0 : etr_eacr.e1);
}
-static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
+static DEVICE_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
-static ssize_t etr_mode_code_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_mode_code_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
if (!etr_port0_online && !etr_port1_online)
/* Status word is not uptodate if both ports are offline. */
@@ -1240,10 +1241,10 @@ static ssize_t etr_mode_code_show(struct sys_device *dev,
etr_port0.esw.psc0 : etr_port0.esw.psc1);
}
-static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
+static DEVICE_ATTR(state_code, 0400, etr_mode_code_show, NULL);
-static ssize_t etr_untuned_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_untuned_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1252,10 +1253,10 @@ static ssize_t etr_untuned_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf1.u);
}
-static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
+static DEVICE_ATTR(untuned, 0400, etr_untuned_show, NULL);
-static ssize_t etr_network_id_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_network_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1264,10 +1265,10 @@ static ssize_t etr_network_id_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf1.net_id);
}
-static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
+static DEVICE_ATTR(network, 0400, etr_network_id_show, NULL);
-static ssize_t etr_id_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1276,10 +1277,10 @@ static ssize_t etr_id_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf1.etr_id);
}
-static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
+static DEVICE_ATTR(id, 0400, etr_id_show, NULL);
-static ssize_t etr_port_number_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_port_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1288,10 +1289,10 @@ static ssize_t etr_port_number_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf1.etr_pn);
}
-static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
+static DEVICE_ATTR(port, 0400, etr_port_number_show, NULL);
-static ssize_t etr_coupled_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_coupled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1300,10 +1301,10 @@ static ssize_t etr_coupled_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf3.c);
}
-static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
+static DEVICE_ATTR(coupled, 0400, etr_coupled_show, NULL);
-static ssize_t etr_local_time_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_local_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1312,10 +1313,10 @@ static ssize_t etr_local_time_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf3.blto);
}
-static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
+static DEVICE_ATTR(local_time, 0400, etr_local_time_show, NULL);
-static ssize_t etr_utc_offset_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t etr_utc_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct etr_aib *aib = etr_aib_from_dev(dev);
@@ -1324,64 +1325,64 @@ static ssize_t etr_utc_offset_show(struct sys_device *dev,
return sprintf(buf, "%i\n", aib->edf3.buo);
}
-static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
+static DEVICE_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
-static struct sysdev_attribute *etr_port_attributes[] = {
- &attr_online,
- &attr_stepping_control,
- &attr_state_code,
- &attr_untuned,
- &attr_network,
- &attr_id,
- &attr_port,
- &attr_coupled,
- &attr_local_time,
- &attr_utc_offset,
+static struct device_attribute *etr_port_attributes[] = {
+ &dev_attr_online,
+ &dev_attr_stepping_control,
+ &dev_attr_state_code,
+ &dev_attr_untuned,
+ &dev_attr_network,
+ &dev_attr_id,
+ &dev_attr_port,
+ &dev_attr_coupled,
+ &dev_attr_local_time,
+ &dev_attr_utc_offset,
NULL
};
-static int __init etr_register_port(struct sys_device *dev)
+static int __init etr_register_port(struct device *dev)
{
- struct sysdev_attribute **attr;
+ struct device_attribute **attr;
int rc;
- rc = sysdev_register(dev);
+ rc = device_register(dev);
if (rc)
goto out;
for (attr = etr_port_attributes; *attr; attr++) {
- rc = sysdev_create_file(dev, *attr);
+ rc = device_create_file(dev, *attr);
if (rc)
goto out_unreg;
}
return 0;
out_unreg:
for (; attr >= etr_port_attributes; attr--)
- sysdev_remove_file(dev, *attr);
- sysdev_unregister(dev);
+ device_remove_file(dev, *attr);
+ device_unregister(dev);
out:
return rc;
}
-static void __init etr_unregister_port(struct sys_device *dev)
+static void __init etr_unregister_port(struct device *dev)
{
- struct sysdev_attribute **attr;
+ struct device_attribute **attr;
for (attr = etr_port_attributes; *attr; attr++)
- sysdev_remove_file(dev, *attr);
- sysdev_unregister(dev);
+ device_remove_file(dev, *attr);
+ device_unregister(dev);
}
static int __init etr_init_sysfs(void)
{
int rc;
- rc = sysdev_class_register(&etr_sysclass);
+ rc = subsys_system_register(&etr_subsys, NULL);
if (rc)
goto out;
- rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
+ rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_port);
if (rc)
- goto out_unreg_class;
- rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
+ goto out_unreg_subsys;
+ rc = device_create_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
if (rc)
goto out_remove_stepping_port;
rc = etr_register_port(&etr_port0_dev);
@@ -1395,11 +1396,11 @@ static int __init etr_init_sysfs(void)
out_remove_port0:
etr_unregister_port(&etr_port0_dev);
out_remove_stepping_mode:
- sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
+ device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_mode);
out_remove_stepping_port:
- sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
-out_unreg_class:
- sysdev_class_unregister(&etr_sysclass);
+ device_remove_file(etr_subsys.dev_root, &dev_attr_stepping_port);
+out_unreg_subsys:
+ bus_unregister(&etr_subsys);
out:
return rc;
}
@@ -1599,14 +1600,15 @@ out_unlock:
}
/*
- * STP class sysfs interface functions
+ * STP subsys sysfs interface functions
*/
-static struct sysdev_class stp_sysclass = {
- .name = "stp",
+static struct bus_type stp_subsys = {
+ .name = "stp",
+ .dev_name = "stp",
};
-static ssize_t stp_ctn_id_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_ctn_id_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online)
@@ -1615,10 +1617,10 @@ static ssize_t stp_ctn_id_show(struct sysdev_class *class,
*(unsigned long long *) stp_info.ctnid);
}
-static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
+static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
-static ssize_t stp_ctn_type_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_ctn_type_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online)
@@ -1626,10 +1628,10 @@ static ssize_t stp_ctn_type_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", stp_info.ctn);
}
-static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
+static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
-static ssize_t stp_dst_offset_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_dst_offset_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x2000))
@@ -1637,10 +1639,10 @@ static ssize_t stp_dst_offset_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
}
-static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
+static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
-static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_leap_seconds_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x8000))
@@ -1648,10 +1650,10 @@ static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
}
-static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
+static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
-static ssize_t stp_stratum_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_stratum_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online)
@@ -1659,10 +1661,10 @@ static ssize_t stp_stratum_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
}
-static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
+static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
-static ssize_t stp_time_offset_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_time_offset_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x0800))
@@ -1670,10 +1672,10 @@ static ssize_t stp_time_offset_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", (int) stp_info.tto);
}
-static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
+static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
-static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_time_zone_offset_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online || !(stp_info.vbits & 0x4000))
@@ -1681,11 +1683,11 @@ static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
}
-static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
+static DEVICE_ATTR(time_zone_offset, 0400,
stp_time_zone_offset_show, NULL);
-static ssize_t stp_timing_mode_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_timing_mode_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online)
@@ -1693,10 +1695,10 @@ static ssize_t stp_timing_mode_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", stp_info.tmd);
}
-static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
+static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
-static ssize_t stp_timing_state_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_timing_state_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
if (!stp_online)
@@ -1704,17 +1706,17 @@ static ssize_t stp_timing_state_show(struct sysdev_class *class,
return sprintf(buf, "%i\n", stp_info.tst);
}
-static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
+static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
-static ssize_t stp_online_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_online_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", stp_online);
}
-static ssize_t stp_online_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t stp_online_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int value;
@@ -1736,47 +1738,47 @@ static ssize_t stp_online_store(struct sysdev_class *class,
}
/*
- * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
- * stp/online but attr_online already exists in this file ..
+ * Can't use DEVICE_ATTR because the attribute should be named
+ * stp/online but dev_attr_online already exists in this file ..
*/
-static struct sysdev_class_attribute attr_stp_online = {
+static struct device_attribute dev_attr_stp_online = {
.attr = { .name = "online", .mode = 0600 },
.show = stp_online_show,
.store = stp_online_store,
};
-static struct sysdev_class_attribute *stp_attributes[] = {
- &attr_ctn_id,
- &attr_ctn_type,
- &attr_dst_offset,
- &attr_leap_seconds,
- &attr_stp_online,
- &attr_stratum,
- &attr_time_offset,
- &attr_time_zone_offset,
- &attr_timing_mode,
- &attr_timing_state,
+static struct device_attribute *stp_attributes[] = {
+ &dev_attr_ctn_id,
+ &dev_attr_ctn_type,
+ &dev_attr_dst_offset,
+ &dev_attr_leap_seconds,
+ &dev_attr_stp_online,
+ &dev_attr_stratum,
+ &dev_attr_time_offset,
+ &dev_attr_time_zone_offset,
+ &dev_attr_timing_mode,
+ &dev_attr_timing_state,
NULL
};
static int __init stp_init_sysfs(void)
{
- struct sysdev_class_attribute **attr;
+ struct device_attribute **attr;
int rc;
- rc = sysdev_class_register(&stp_sysclass);
+ rc = subsys_system_register(&stp_subsys, NULL);
if (rc)
goto out;
for (attr = stp_attributes; *attr; attr++) {
- rc = sysdev_class_create_file(&stp_sysclass, *attr);
+ rc = device_create_file(stp_subsys.dev_root, *attr);
if (rc)
goto out_unreg;
}
return 0;
out_unreg:
for (; attr >= stp_attributes; attr--)
- sysdev_class_remove_file(&stp_sysclass, *attr);
- sysdev_class_unregister(&stp_sysclass);
+ device_remove_file(stp_subsys.dev_root, *attr);
+ bus_unregister(&stp_subsys);
out:
return rc;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index fdb5b8cb260..7370a41948c 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,22 +1,22 @@
/*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007,2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/device.h>
+#include <linux/workqueue.h>
#include <linux/bootmem.h>
+#include <linux/cpuset.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/smp.h>
-#include <linux/cpuset.h>
-#include <asm/delay.h>
+#include <linux/mm.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
@@ -31,7 +31,6 @@ struct mask_info {
static int topology_enabled = 1;
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
@@ -41,11 +40,12 @@ static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
-#ifdef CONFIG_SCHED_BOOK
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
-#endif
+
+/* smp_cpu_state_mutex must be held when accessing this array */
+int cpu_polarization[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -71,7 +71,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
struct mask_info *core,
- int z10)
+ int one_core_per_cpu)
{
unsigned int cpu;
@@ -85,18 +85,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) != rcpu)
continue;
-#ifdef CONFIG_SCHED_BOOK
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
-#endif
cpumask_set_cpu(lcpu, &core->mask);
- if (z10) {
+ if (one_core_per_cpu) {
cpu_core_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_core_id[lcpu] = core->id;
}
- smp_cpu_polarization[lcpu] = tl_cpu->pp;
+ cpu_set_polarization(lcpu, tl_cpu->pp);
}
}
return core;
@@ -111,13 +109,11 @@ static void clear_masks(void)
cpumask_clear(&info->mask);
info = info->next;
}
-#ifdef CONFIG_SCHED_BOOK
info = &book_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
-#endif
}
static union topology_entry *next_tle(union topology_entry *tle)
@@ -127,66 +123,75 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
{
-#ifdef CONFIG_SCHED_BOOK
- struct mask_info *book = &book_info;
- struct cpuid cpu_id;
-#else
- struct mask_info *book = NULL;
-#endif
struct mask_info *core = &core_info;
+ struct mask_info *book = &book_info;
union topology_entry *tle, *end;
- int z10 = 0;
-#ifdef CONFIG_SCHED_BOOK
- get_cpu_id(&cpu_id);
- z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
-#endif
- spin_lock_irq(&topology_lock);
- clear_masks();
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
-#ifdef CONFIG_SCHED_BOOK
- if (z10) {
- switch (tle->nl) {
- case 1:
- book = book->next;
- book->id = tle->container.id;
- break;
- case 0:
- core = add_cpus_to_mask(&tle->cpu, book, core, z10);
- break;
- default:
- clear_masks();
- goto out;
- }
- tle = next_tle(tle);
- continue;
- }
-#endif
switch (tle->nl) {
-#ifdef CONFIG_SCHED_BOOK
case 2:
book = book->next;
book->id = tle->container.id;
break;
-#endif
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, core, z10);
+ add_cpus_to_mask(&tle->cpu, book, core, 0);
break;
default:
clear_masks();
- goto out;
+ return;
}
tle = next_tle(tle);
}
-out:
+}
+
+static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+{
+ struct mask_info *core = &core_info;
+ struct mask_info *book = &book_info;
+ union topology_entry *tle, *end;
+
+ tle = info->tle;
+ end = (union topology_entry *)((unsigned long)info + info->length);
+ while (tle < end) {
+ switch (tle->nl) {
+ case 1:
+ book = book->next;
+ book->id = tle->container.id;
+ break;
+ case 0:
+ core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+ break;
+ default:
+ clear_masks();
+ return;
+ }
+ tle = next_tle(tle);
+ }
+}
+
+static void tl_to_cores(struct sysinfo_15_1_x *info)
+{
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ spin_lock_irq(&topology_lock);
+ clear_masks();
+ switch (cpu_id.machine) {
+ case 0x2097:
+ case 0x2098:
+ __tl_to_cores_z10(info);
+ break;
+ default:
+ __tl_to_cores_generic(info);
+ }
spin_unlock_irq(&topology_lock);
}
@@ -196,7 +201,7 @@ static void topology_update_polarization_simple(void)
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
- smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+ cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
}
@@ -215,8 +220,7 @@ static int ptf(unsigned long fc)
int topology_set_cpu_management(int fc)
{
- int cpu;
- int rc;
+ int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
@@ -227,7 +231,7 @@ int topology_set_cpu_management(int fc)
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
- smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
@@ -239,29 +243,25 @@ static void update_cpu_core_map(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-#ifdef CONFIG_SCHED_BOOK
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
-#endif
}
spin_unlock_irqrestore(&topology_lock, flags);
}
void store_topology(struct sysinfo_15_1_x *info)
{
-#ifdef CONFIG_SCHED_BOOK
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
-#endif
stsi(info, 15, 1, 2);
}
int arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
- struct sys_device *sysdev;
+ struct device *dev;
int cpu;
if (!MACHINE_HAS_TOPOLOGY) {
@@ -273,8 +273,8 @@ int arch_update_cpu_topology(void)
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return 1;
}
@@ -296,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored)
set_topology_timer();
}
+static struct timer_list topology_timer =
+ TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
+
+static atomic_t topology_poll = ATOMIC_INIT(0);
+
static void set_topology_timer(void)
{
- topology_timer.function = topology_timer_fn;
- topology_timer.data = 0;
- topology_timer.expires = jiffies + 60 * HZ;
- add_timer(&topology_timer);
+ if (atomic_add_unless(&topology_poll, -1, 0))
+ mod_timer(&topology_timer, jiffies + HZ / 10);
+ else
+ mod_timer(&topology_timer, jiffies + HZ * 60);
+}
+
+void topology_expect_change(void)
+{
+ if (!MACHINE_HAS_TOPOLOGY)
+ return;
+ /* This is racy, but it doesn't matter since it is just a heuristic.
+ * Worst case is that we poll in a higher frequency for a bit longer.
+ */
+ if (atomic_read(&topology_poll) > 60)
+ return;
+ atomic_add(60, &topology_poll);
+ set_topology_timer();
}
static int __init early_parse_topology(char *p)
@@ -313,23 +331,6 @@ static int __init early_parse_topology(char *p)
}
early_param("topology", early_parse_topology);
-static int __init init_topology_update(void)
-{
- int rc;
-
- rc = 0;
- if (!MACHINE_HAS_TOPOLOGY) {
- topology_update_polarization_simple();
- goto out;
- }
- init_timer_deferrable(&topology_timer);
- set_topology_timer();
-out:
- update_cpu_core_map();
- return rc;
-}
-__initcall(init_topology_update);
-
static void __init alloc_masks(struct sysinfo_15_1_x *info,
struct mask_info *mask, int offset)
{
@@ -357,10 +358,108 @@ void __init s390_init_cpu_topology(void)
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
- printk(" %d", info->mag[i]);
- printk(" / %d\n", info->mnest);
+ printk(KERN_CONT " %d", info->mag[i]);
+ printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1);
-#ifdef CONFIG_SCHED_BOOK
alloc_masks(info, &book_info, 2);
-#endif
}
+
+static int cpu_management;
+
+static ssize_t dispatching_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ count = sprintf(buf, "%d\n", cpu_management);
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+
+static ssize_t dispatching_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int val, rc;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ rc = 0;
+ get_online_cpus();
+ mutex_lock(&smp_cpu_state_mutex);
+ if (cpu_management == val)
+ goto out;
+ rc = topology_set_cpu_management(val);
+ if (rc)
+ goto out;
+ cpu_management = val;
+ topology_expect_change();
+out:
+ mutex_unlock(&smp_cpu_state_mutex);
+ put_online_cpus();
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(dispatching, 0644, dispatching_show,
+ dispatching_store);
+
+static ssize_t cpu_polarization_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cpu = dev->id;
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ switch (cpu_read_polarization(cpu)) {
+ case POLARIZATION_HRZ:
+ count = sprintf(buf, "horizontal\n");
+ break;
+ case POLARIZATION_VL:
+ count = sprintf(buf, "vertical:low\n");
+ break;
+ case POLARIZATION_VM:
+ count = sprintf(buf, "vertical:medium\n");
+ break;
+ case POLARIZATION_VH:
+ count = sprintf(buf, "vertical:high\n");
+ break;
+ default:
+ count = sprintf(buf, "unknown\n");
+ break;
+ }
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
+
+static struct attribute *topology_cpu_attrs[] = {
+ &dev_attr_polarization.attr,
+ NULL,
+};
+
+static struct attribute_group topology_cpu_attr_group = {
+ .attrs = topology_cpu_attrs,
+};
+
+int topology_cpu_init(struct cpu *cpu)
+{
+ return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+}
+
+static int __init topology_init(void)
+{
+ if (!MACHINE_HAS_TOPOLOGY) {
+ topology_update_polarization_simple();
+ goto out;
+ }
+ set_topology_timer();
+out:
+ update_cpu_core_map();
+ return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
+}
+device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a9807dd8627..5ce3750b181 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -43,9 +43,9 @@
#include <asm/debug.h>
#include "entry.h"
-void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
+void (*pgm_check_table[128])(struct pt_regs *regs);
-int show_unhandled_signals;
+int show_unhandled_signals = 1;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -234,7 +234,7 @@ void show_regs(struct pt_regs *regs)
static DEFINE_SPINLOCK(die_lock);
-void die(const char * str, struct pt_regs * regs, long err)
+void die(struct pt_regs *regs, const char *str)
{
static int die_counter;
@@ -243,7 +243,7 @@ void die(const char * str, struct pt_regs * regs, long err)
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
- printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+ printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
@@ -254,7 +254,7 @@ void die(const char * str, struct pt_regs * regs, long err)
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
- notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+ notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
show_regs(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE);
@@ -267,8 +267,7 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-static void inline report_user_fault(struct pt_regs *regs, long int_code,
- int signr)
+static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
@@ -276,7 +275,7 @@ static void inline report_user_fault(struct pt_regs *regs, long int_code,
return;
if (!printk_ratelimit())
return;
- printk("User process fault: interruption code 0x%lX ", int_code);
+ printk("User process fault: interruption code 0x%X ", regs->int_code);
print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
show_regs(regs);
@@ -287,19 +286,28 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
-static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
- struct pt_regs *regs, siginfo_t *info)
+static inline void __user *get_psw_address(struct pt_regs *regs)
{
- if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
- pgm_int_code, signr) == NOTIFY_STOP)
+ return (void __user *)
+ ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
+}
+
+static void __kprobes do_trap(struct pt_regs *regs,
+ int si_signo, int si_code, char *str)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, str, regs, 0,
+ regs->int_code, si_signo) == NOTIFY_STOP)
return;
if (regs->psw.mask & PSW_MASK_PSTATE) {
- struct task_struct *tsk = current;
-
- tsk->thread.trap_no = pgm_int_code & 0xffff;
- force_sig_info(signr, info, tsk);
- report_user_fault(regs, pgm_int_code, signr);
+ info.si_signo = si_signo;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = get_psw_address(regs);
+ force_sig_info(si_signo, &info, current);
+ report_user_fault(regs, si_signo);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -311,18 +319,11 @@ static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
if (btt == BUG_TRAP_TYPE_WARN)
return;
- die(str, regs, pgm_int_code);
+ die(regs, str);
}
}
}
-static inline void __user *get_psw_address(struct pt_regs *regs,
- long pgm_int_code)
-{
- return (void __user *)
- ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
-}
-
void __kprobes do_per_trap(struct pt_regs *regs)
{
siginfo_t info;
@@ -339,26 +340,19 @@ void __kprobes do_per_trap(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
}
-static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+static void default_trap_handler(struct pt_regs *regs)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
- report_user_fault(regs, pgm_int_code, SIGSEGV);
+ report_user_fault(regs, SIGSEGV);
do_exit(SIGSEGV);
} else
- die("Unknown program exception", regs, pgm_int_code);
+ die(regs, "Unknown program exception");
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
-static void name(struct pt_regs *regs, long pgm_int_code, \
- unsigned long trans_exc_code) \
+static void name(struct pt_regs *regs) \
{ \
- siginfo_t info; \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = get_psw_address(regs, pgm_int_code); \
- do_trap(pgm_int_code, signr, str, regs, &info); \
+ do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -388,42 +382,34 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
"translation exception")
-static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
- int fpc, long pgm_int_code)
+static inline void do_fp_trap(struct pt_regs *regs, int fpc)
{
- siginfo_t si;
-
- si.si_signo = SIGFPE;
- si.si_errno = 0;
- si.si_addr = location;
- si.si_code = 0;
+ int si_code = 0;
/* FPC[2] is Data Exception Code */
if ((fpc & 0x00000300) == 0) {
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
if (fpc & 0x8000) /* invalid fp operation */
- si.si_code = FPE_FLTINV;
+ si_code = FPE_FLTINV;
else if (fpc & 0x4000) /* div by 0 */
- si.si_code = FPE_FLTDIV;
+ si_code = FPE_FLTDIV;
else if (fpc & 0x2000) /* overflow */
- si.si_code = FPE_FLTOVF;
+ si_code = FPE_FLTOVF;
else if (fpc & 0x1000) /* underflow */
- si.si_code = FPE_FLTUND;
+ si_code = FPE_FLTUND;
else if (fpc & 0x0800) /* inexact */
- si.si_code = FPE_FLTRES;
+ si_code = FPE_FLTRES;
}
- do_trap(pgm_int_code, SIGFPE,
- "floating point exception", regs, &si);
+ do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+static void __kprobes illegal_op(struct pt_regs *regs)
{
siginfo_t info;
__u8 opcode[6];
__u16 __user *location;
int signal = 0;
- location = get_psw_address(regs, pgm_int_code);
+ location = get_psw_address(regs);
if (regs->psw.mask & PSW_MASK_PSTATE) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -467,44 +453,31 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
* If we get an illegal op in kernel mode, send it through the
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
*/
- if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
+ if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
#ifdef CONFIG_MATHEMU
if (signal == SIGFPE)
- do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, pgm_int_code);
- else if (signal == SIGSEGV) {
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *) location;
- do_trap(pgm_int_code, signal,
- "user address fault", regs, &info);
- } else
+ do_fp_trap(regs, current->thread.fp_regs.fpc);
+ else if (signal == SIGSEGV)
+ do_trap(regs, signal, SEGV_MAPERR, "user address fault");
+ else
#endif
- if (signal) {
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *) location;
- do_trap(pgm_int_code, signal,
- "illegal operation", regs, &info);
- }
+ if (signal)
+ do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
#ifdef CONFIG_MATHEMU
-void specification_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void specification_exception(struct pt_regs *regs)
{
__u8 opcode[6];
__u16 __user *location = NULL;
int signal = 0;
- location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
+ location = (__u16 __user *) get_psw_address(regs);
if (regs->psw.mask & PSW_MASK_PSTATE) {
get_user(*((__u16 *) opcode), location);
@@ -539,30 +512,21 @@ void specification_exception(struct pt_regs *regs, long pgm_int_code,
signal = SIGILL;
if (signal == SIGFPE)
- do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, pgm_int_code);
- else if (signal) {
- siginfo_t info;
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPN;
- info.si_addr = location;
- do_trap(pgm_int_code, signal,
- "specification exception", regs, &info);
- }
+ do_fp_trap(regs, current->thread.fp_regs.fpc);
+ else if (signal)
+ do_trap(regs, signal, ILL_ILLOPN, "specification exception");
}
#else
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
-static void data_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+static void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
int signal = 0;
- location = get_psw_address(regs, pgm_int_code);
+ location = get_psw_address(regs);
if (MACHINE_HAS_IEEE)
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -627,32 +591,18 @@ static void data_exception(struct pt_regs *regs, long pgm_int_code,
else
signal = SIGILL;
if (signal == SIGFPE)
- do_fp_trap(regs, location,
- current->thread.fp_regs.fpc, pgm_int_code);
- else if (signal) {
- siginfo_t info;
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPN;
- info.si_addr = location;
- do_trap(pgm_int_code, signal, "data exception", regs, &info);
- }
+ do_fp_trap(regs, current->thread.fp_regs.fpc);
+ else if (signal)
+ do_trap(regs, signal, ILL_ILLOPN, "data exception");
}
-static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+static void space_switch_exception(struct pt_regs *regs)
{
- siginfo_t info;
-
/* Set user psw back to home space mode. */
if (regs->psw.mask & PSW_MASK_PSTATE)
regs->psw.mask |= PSW_ASC_HOME;
/* Send SIGILL. */
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_PRVOPC;
- info.si_addr = get_psw_address(regs, pgm_int_code);
- do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
+ do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
}
void __kprobes kernel_stack_overflow(struct pt_regs * regs)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a9a301866b3..354dd39073e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -125,8 +125,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
return trans_exc_code != 3;
}
-static inline void report_user_fault(struct pt_regs *regs, long int_code,
- int signr, unsigned long address)
+static inline void report_user_fault(struct pt_regs *regs, long signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
@@ -134,10 +133,12 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code,
return;
if (!printk_ratelimit())
return;
- printk("User process fault: interruption code 0x%lX ", int_code);
+ printk(KERN_ALERT "User process fault: interruption code 0x%X ",
+ regs->int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
- printk("\n");
- printk("failing address: %lX\n", address);
+ printk(KERN_CONT "\n");
+ printk(KERN_ALERT "failing address: %lX\n",
+ regs->int_parm_long & __FAIL_ADDR_MASK);
show_regs(regs);
}
@@ -145,24 +146,18 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code,
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
-static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
- int si_code, unsigned long trans_exc_code)
+static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
{
struct siginfo si;
- unsigned long address;
- address = trans_exc_code & __FAIL_ADDR_MASK;
- current->thread.prot_addr = address;
- current->thread.trap_no = int_code;
- report_user_fault(regs, int_code, SIGSEGV, address);
+ report_user_fault(regs, SIGSEGV);
si.si_signo = SIGSEGV;
si.si_code = si_code;
- si.si_addr = (void __user *) address;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
}
-static noinline void do_no_context(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long address;
@@ -178,55 +173,48 @@ static noinline void do_no_context(struct pt_regs *regs, long int_code,
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- address = trans_exc_code & __FAIL_ADDR_MASK;
- if (!user_space_fault(trans_exc_code))
+ address = regs->int_parm_long & __FAIL_ADDR_MASK;
+ if (!user_space_fault(regs->int_parm_long))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
- die("Oops", regs, int_code);
+ die(regs, "Oops");
do_exit(SIGKILL);
}
-static noinline void do_low_address(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_low_address(struct pt_regs *regs)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
- die ("Low-address protection", regs, int_code);
+ die (regs, "Low-address protection");
do_exit(SIGKILL);
}
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs);
}
-static noinline void do_sigbus(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code)
+static noinline void do_sigbus(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- unsigned long address;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- address = trans_exc_code & __FAIL_ADDR_MASK;
- tsk->thread.prot_addr = address;
- tsk->thread.trap_no = int_code;
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
- si.si_addr = (void __user *) address;
+ si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGBUS, &si, tsk);
}
-static noinline void do_fault_error(struct pt_regs *regs, long int_code,
- unsigned long trans_exc_code, int fault)
+static noinline void do_fault_error(struct pt_regs *regs, int fault)
{
int si_code;
@@ -238,24 +226,24 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
- do_sigsegv(regs, int_code, si_code, trans_exc_code);
+ do_sigsegv(regs, si_code);
return;
}
case VM_FAULT_BADCONTEXT:
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
- do_no_context(regs, int_code, trans_exc_code);
+ do_no_context(regs);
else
- do_sigbus(regs, int_code, trans_exc_code);
+ do_sigbus(regs);
} else
BUG();
break;
@@ -273,12 +261,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline int do_exception(struct pt_regs *regs, int access,
- unsigned long trans_exc_code)
+static inline int do_exception(struct pt_regs *regs, int access)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
int fault;
@@ -288,6 +276,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
tsk = current;
mm = tsk->mm;
+ trans_exc_code = regs->int_parm_long;
/*
* Verify that the fault happened in user space, that
@@ -387,45 +376,46 @@ out:
return fault;
}
-void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void __kprobes do_protection_exception(struct pt_regs *regs)
{
+ unsigned long trans_exc_code;
int fault;
+ trans_exc_code = regs->int_parm_long;
/* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16);
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
- do_low_address(regs, pgm_int_code, trans_exc_code);
+ do_low_address(regs);
return;
}
- fault = do_exception(regs, VM_WRITE, trans_exc_code);
+ fault = do_exception(regs, VM_WRITE);
if (unlikely(fault))
- do_fault_error(regs, 4, trans_exc_code, fault);
+ do_fault_error(regs, fault);
}
-void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void __kprobes do_dat_exception(struct pt_regs *regs)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
- fault = do_exception(regs, access, trans_exc_code);
+ fault = do_exception(regs, access);
if (unlikely(fault))
- do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
+ do_fault_error(regs, fault);
}
#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
- unsigned long trans_exc_code)
+void __kprobes do_asce_exception(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ unsigned long trans_exc_code;
+ trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
@@ -440,12 +430,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
- do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
+ do_sigsegv(regs, SEGV_MAPERR);
return;
}
no_context:
- do_no_context(regs, pgm_int_code, trans_exc_code);
+ do_no_context(regs);
}
#endif
@@ -459,14 +449,15 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
- uaddr &= PAGE_MASK;
+ regs.int_code = pgm_int_code;
+ regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
access = write ? VM_WRITE : VM_READ;
- fault = do_exception(&regs, access, uaddr | 2);
+ fault = do_exception(&regs, access);
if (unlikely(fault)) {
if (fault & VM_FAULT_OOM)
return -EFAULT;
else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(&regs, pgm_int_code, uaddr);
+ do_sigbus(&regs);
}
return fault ? -EFAULT : 0;
}
@@ -509,7 +500,7 @@ int pfault_init(void)
.reserved = __PF_RES_FIELD };
int rc;
- if (!MACHINE_IS_VM || pfault_disable)
+ if (pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
@@ -530,7 +521,7 @@ void pfault_fini(void)
.refversn = 2,
};
- if (!MACHINE_IS_VM || pfault_disable)
+ if (pfault_disable)
return;
asm volatile(
" diag %0,0,0x258\n"
@@ -643,8 +634,6 @@ static int __init pfault_irq_init(void)
{
int rc;
- if (!MACHINE_IS_VM)
- return 0;
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d4b9fb4d004..5d633019d8f 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -93,18 +93,22 @@ static unsigned long setup_zero_pages(void)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type;
+ unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
- S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
- /* A three level page table (4TB) is enough for the kernel space. */
- S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
+ if (VMALLOC_END > (1UL << 42)) {
+ asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION2_ENTRY_EMPTY;
+ } else {
+ asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION3_ENTRY_EMPTY;
+ }
#else
- S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+ asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 301c84d3b54..9a4d02f64f1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -33,17 +33,6 @@
#define FRAG_MASK 0x03
#endif
-unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
-EXPORT_SYMBOL(VMALLOC_START);
-
-static int __init parse_vmalloc(char *arg)
-{
- if (!arg)
- return -EINVAL;
- VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
- return 0;
-}
-early_param("vmalloc", parse_vmalloc);
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
@@ -267,7 +256,10 @@ static int gmap_alloc_table(struct gmap *gmap,
struct page *page;
unsigned long *new;
+ /* since we dont free the gmap table until gmap_free we can unlock */
+ spin_unlock(&gmap->mm->page_table_lock);
page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ spin_lock(&gmap->mm->page_table_lock);
if (!page)
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index f43c0e4282a..9daee91e6c3 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -22,6 +22,7 @@
#include <asm/irq.h>
#include "hwsampler.h"
+#include "op_counter.h"
#define MAX_NUM_SDB 511
#define MIN_NUM_SDB 1
@@ -896,6 +897,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
if (sample_data_ptr->P == 1) {
/* userspace sample */
unsigned int pid = sample_data_ptr->prim_asn;
+ if (!counter_config.user)
+ goto skip_sample;
rcu_read_lock();
tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
if (tsk)
@@ -903,6 +906,8 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
rcu_read_unlock();
} else {
/* kernelspace sample */
+ if (!counter_config.kernel)
+ goto skip_sample;
regs = task_pt_regs(current);
}
@@ -910,7 +915,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
!sample_data_ptr->P, tsk);
mutex_unlock(&hws_sem);
-
+ skip_sample:
sample_data_ptr++;
}
}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 6efc18b5e60..2297be406c6 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -2,10 +2,11 @@
* arch/s390/oprofile/init.c
*
* S390 Version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
* Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
* Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
+ * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
*
* @remark Copyright 2002-2011 OProfile authors
*/
@@ -14,6 +15,8 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/module.h>
+#include <asm/processor.h>
#include "../../../drivers/oprofile/oprof.h"
@@ -22,6 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#ifdef CONFIG_64BIT
#include "hwsampler.h"
+#include "op_counter.h"
#define DEFAULT_INTERVAL 4127518
@@ -35,16 +39,41 @@ static unsigned long oprofile_max_interval;
static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
-static int hwsampler_file;
+static int hwsampler_enabled;
static int hwsampler_running; /* start_mutex must be held to change */
+static int hwsampler_available;
static struct oprofile_operations timer_ops;
+struct op_counter_config counter_config;
+
+enum __force_cpu_type {
+ reserved = 0, /* do not force */
+ timer,
+};
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
+{
+ if (!strcmp(str, "timer")) {
+ force_cpu_type = timer;
+ printk(KERN_INFO "oprofile: forcing timer to be returned "
+ "as cpu type\n");
+ } else {
+ force_cpu_type = 0;
+ }
+
+ return 0;
+}
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
+MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
+ "(report cpu_type \"timer\"");
+
static int oprofile_hwsampler_start(void)
{
int retval;
- hwsampler_running = hwsampler_file;
+ hwsampler_running = hwsampler_enabled;
if (!hwsampler_running)
return timer_ops.start();
@@ -72,10 +101,16 @@ static void oprofile_hwsampler_stop(void)
return;
}
+/*
+ * File ops used for:
+ * /dev/oprofile/0/enabled
+ * /dev/oprofile/hwsampling/hwsampler (cpu_type = timer)
+ */
+
static ssize_t hwsampler_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
- return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset);
+ return oprofilefs_ulong_to_user(hwsampler_enabled, buf, count, offset);
}
static ssize_t hwsampler_write(struct file *file, char const __user *buf,
@@ -88,9 +123,12 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
if (oprofile_started)
/*
* save to do without locking as we set
@@ -99,7 +137,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
*/
return -EBUSY;
- hwsampler_file = val;
+ hwsampler_enabled = val;
return count;
}
@@ -109,38 +147,311 @@ static const struct file_operations hwsampler_fops = {
.write = hwsampler_write,
};
+/*
+ * File ops used for:
+ * /dev/oprofile/0/count
+ * /dev/oprofile/hwsampling/hw_interval (cpu_type = timer)
+ *
+ * Make sure that the value is within the hardware range.
+ */
+
+static ssize_t hw_interval_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(oprofile_hw_interval, buf,
+ count, offset);
+}
+
+static ssize_t hw_interval_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+ if (val < oprofile_min_interval)
+ oprofile_hw_interval = oprofile_min_interval;
+ else if (val > oprofile_max_interval)
+ oprofile_hw_interval = oprofile_max_interval;
+ else
+ oprofile_hw_interval = val;
+
+ return count;
+}
+
+static const struct file_operations hw_interval_fops = {
+ .read = hw_interval_read,
+ .write = hw_interval_write,
+};
+
+/*
+ * File ops used for:
+ * /dev/oprofile/0/event
+ * Only a single event with number 0 is supported with this counter.
+ *
+ * /dev/oprofile/0/unit_mask
+ * This is a dummy file needed by the user space tools.
+ * No value other than 0 is accepted or returned.
+ */
+
+static ssize_t hwsampler_zero_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(0, buf, count, offset);
+}
+
+static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+ if (val != 0)
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations zero_fops = {
+ .read = hwsampler_zero_read,
+ .write = hwsampler_zero_write,
+};
+
+/* /dev/oprofile/0/kernel file ops. */
+
+static ssize_t hwsampler_kernel_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(counter_config.kernel,
+ buf, count, offset);
+}
+
+static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ counter_config.kernel = val;
+
+ return count;
+}
+
+static const struct file_operations kernel_fops = {
+ .read = hwsampler_kernel_read,
+ .write = hwsampler_kernel_write,
+};
+
+/* /dev/oprofile/0/user file ops. */
+
+static ssize_t hwsampler_user_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(counter_config.user,
+ buf, count, offset);
+}
+
+static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ counter_config.user = val;
+
+ return count;
+}
+
+static const struct file_operations user_fops = {
+ .read = hwsampler_user_read,
+ .write = hwsampler_user_write,
+};
+
+
+/*
+ * File ops used for: /dev/oprofile/timer/enabled
+ * The value always has to be the inverted value of hwsampler_enabled. So
+ * no separate variable is created. That way we do not need locking.
+ */
+
+static ssize_t timer_enabled_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(!hwsampler_enabled, buf, count, offset);
+}
+
+static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ /* Timer cannot be disabled without having hardware sampling. */
+ if (val == 0 && !hwsampler_available)
+ return -EINVAL;
+
+ if (oprofile_started)
+ /*
+ * save to do without locking as we set
+ * hwsampler_running in start() when start_mutex is
+ * held
+ */
+ return -EBUSY;
+
+ hwsampler_enabled = !val;
+
+ return count;
+}
+
+static const struct file_operations timer_enabled_fops = {
+ .read = timer_enabled_read,
+ .write = timer_enabled_write,
+};
+
+
static int oprofile_create_hwsampling_files(struct super_block *sb,
- struct dentry *root)
+ struct dentry *root)
{
- struct dentry *hw_dir;
+ struct dentry *dir;
+
+ dir = oprofilefs_mkdir(sb, root, "timer");
+ if (!dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
+
+ if (!hwsampler_available)
+ return 0;
/* reinitialize default values */
- hwsampler_file = 1;
+ hwsampler_enabled = 1;
+ counter_config.kernel = 1;
+ counter_config.user = 1;
- hw_dir = oprofilefs_mkdir(sb, root, "hwsampling");
- if (!hw_dir)
- return -EINVAL;
+ if (!force_cpu_type) {
+ /*
+ * Create the counter file system. A single virtual
+ * counter is created which can be used to
+ * enable/disable hardware sampling dynamically from
+ * user space. The user space will configure a single
+ * counter with a single event. The value of 'event'
+ * and 'unit_mask' are not evaluated by the kernel code
+ * and can only be set to 0.
+ */
+
+ dir = oprofilefs_mkdir(sb, root, "0");
+ if (!dir)
+ return -EINVAL;
- oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops);
- oprofilefs_create_ulong(sb, hw_dir, "hw_interval",
- &oprofile_hw_interval);
- oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval",
- &oprofile_min_interval);
- oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval",
- &oprofile_max_interval);
- oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks",
- &oprofile_sdbt_blocks);
+ oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
+ oprofilefs_create_file(sb, dir, "event", &zero_fops);
+ oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
+ oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
+ oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
+ oprofilefs_create_file(sb, dir, "user", &user_fops);
+ oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+ } else {
+ /*
+ * Hardware sampling can be used but the cpu_type is
+ * forced to timer in order to deal with legacy user
+ * space tools. The /dev/oprofile/hwsampling fs is
+ * provided in that case.
+ */
+ dir = oprofilefs_mkdir(sb, root, "hwsampling");
+ if (!dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, dir, "hwsampler",
+ &hwsampler_fops);
+ oprofilefs_create_file(sb, dir, "hw_interval",
+ &hw_interval_fops);
+ oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
+ &oprofile_min_interval);
+ oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
+ &oprofile_max_interval);
+ oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+ }
return 0;
}
static int oprofile_hwsampler_init(struct oprofile_operations *ops)
{
+ /*
+ * Initialize the timer mode infrastructure as well in order
+ * to be able to switch back dynamically. oprofile_timer_init
+ * is not supposed to fail.
+ */
+ if (oprofile_timer_init(ops))
+ BUG();
+
+ memcpy(&timer_ops, ops, sizeof(timer_ops));
+ ops->create_files = oprofile_create_hwsampling_files;
+
+ /*
+ * If the user space tools do not support newer cpu types,
+ * the force_cpu_type module parameter
+ * can be used to always return \"timer\" as cpu type.
+ */
+ if (force_cpu_type != timer) {
+ struct cpuid id;
+
+ get_cpu_id (&id);
+
+ switch (id.machine) {
+ case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
+ case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
+ default: return -ENODEV;
+ }
+ }
+
if (hwsampler_setup())
return -ENODEV;
/*
- * create hwsampler files only if hwsampler_setup() succeeds.
+ * Query the range for the sampling interval from the
+ * hardware.
*/
oprofile_min_interval = hwsampler_query_min_interval();
if (oprofile_min_interval == 0)
@@ -155,23 +466,17 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
if (oprofile_hw_interval > oprofile_max_interval)
oprofile_hw_interval = oprofile_max_interval;
- if (oprofile_timer_init(ops))
- return -ENODEV;
-
- printk(KERN_INFO "oprofile: using hardware sampling\n");
-
- memcpy(&timer_ops, ops, sizeof(timer_ops));
+ printk(KERN_INFO "oprofile: System z hardware sampling "
+ "facility found.\n");
ops->start = oprofile_hwsampler_start;
ops->stop = oprofile_hwsampler_stop;
- ops->create_files = oprofile_create_hwsampling_files;
return 0;
}
static void oprofile_hwsampler_exit(void)
{
- oprofile_timer_exit();
hwsampler_shutdown();
}
@@ -182,7 +487,15 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = s390_backtrace;
#ifdef CONFIG_64BIT
- return oprofile_hwsampler_init(ops);
+
+ /*
+ * -ENODEV is not reported to the caller. The module itself
+ * will use the timer mode sampling as fallback and this is
+ * always available.
+ */
+ hwsampler_available = oprofile_hwsampler_init(ops) == 0;
+
+ return 0;
#else
return -ENODEV;
#endif
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h
new file mode 100644
index 00000000000..1a8d3ca0901
--- /dev/null
+++ b/arch/s390/oprofile/op_counter.h
@@ -0,0 +1,23 @@
+/**
+ * arch/s390/oprofile/op_counter.h
+ *
+ * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
+ *
+ * @remark Copyright 2011 OProfile authors
+ */
+
+#ifndef OP_COUNTER_H
+#define OP_COUNTER_H
+
+struct op_counter_config {
+ /* `enabled' maps to the hwsampler_file variable. */
+ /* `count' maps to the oprofile_hw_interval variable. */
+ /* `event' and `unit_mask' are unused. */
+ unsigned long kernel;
+ unsigned long user;
+};
+
+extern struct op_counter_config counter_config;
+
+#endif /* OP_COUNTER_H */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 455ce2d7682..3df65d39abc 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -5,6 +5,9 @@ config SCORE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
choice
prompt "System type"
@@ -58,9 +61,6 @@ config 32BIT
config ARCH_FLATMEM_ENABLE
def_bool y
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
source "mm/Kconfig"
config MEMORY_START
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c
index 6f898c05787..b48459afefd 100644
--- a/arch/score/kernel/setup.c
+++ b/arch/score/kernel/setup.c
@@ -26,6 +26,7 @@
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/ioport.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
@@ -54,7 +55,8 @@ static void __init bootmem_init(void)
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
min_low_pfn, max_low_pfn);
- add_active_range(0, min_low_pfn, max_low_pfn);
+ memblock_add_node(PFN_PHYS(min_low_pfn),
+ PFN_PHYS(max_low_pfn - min_low_pfn), 0);
free_bootmem(PFN_PHYS(start_pfn),
(max_low_pfn - start_pfn) << PAGE_SHIFT);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ead164080cf..3c8db65c89e 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -4,6 +4,7 @@ config SUPERH
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index ec8c84c14b1..895e337c79b 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
#define GBECONT 0xffc10100
#define GBECONT_RMII1 BIT(17)
#define GBECONT_RMII0 BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800)
+ if (((unsigned long)addr & 0x00000fff) < 0x0800)
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
else
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
},
};
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
{
- if ((addr & 0x00000fff) < 0x0800) {
+ if (((unsigned long)addr & 0x00000fff) < 0x0800) {
gpio_set_value(GPIO_PTT4, 1);
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
} else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
- .chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
- .chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
+ .chan_priv_tx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ },
+ .chan_priv_rx = {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ }
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 83cc704770d..b1cb2715ad6 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -12,18 +12,19 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/string.h>
#include <asm/dma.h>
-static struct sysdev_class dma_sysclass = {
+static struct bus_type dma_subsys = {
.name = "dma",
+ .dev_name = "dma",
};
-static ssize_t dma_show_devices(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_devices(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
ssize_t len = 0;
int i;
@@ -43,29 +44,29 @@ static ssize_t dma_show_devices(struct sys_device *dev,
return len;
}
-static SYSDEV_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
+static DEVICE_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
-static int __init dma_sysclass_init(void)
+static int __init dma_subsys_init(void)
{
int ret;
- ret = sysdev_class_register(&dma_sysclass);
+ ret = subsys_system_register(&dma_subsys, NULL);
if (unlikely(ret))
return ret;
- return sysfs_create_file(&dma_sysclass.kset.kobj, &attr_devices.attr);
+ return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr);
}
-postcore_initcall(dma_sysclass_init);
+postcore_initcall(dma_subsys_init);
-static ssize_t dma_show_dev_id(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_dev_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "%s\n", channel->dev_id);
}
-static ssize_t dma_store_dev_id(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t dma_store_dev_id(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
@@ -73,10 +74,10 @@ static ssize_t dma_store_dev_id(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
+static DEVICE_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
-static ssize_t dma_store_config(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t dma_store_config(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
@@ -88,17 +89,17 @@ static ssize_t dma_store_config(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config);
+static DEVICE_ATTR(config, S_IWUSR, NULL, dma_store_config);
-static ssize_t dma_show_mode(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t dma_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "0x%08x\n", channel->mode);
}
-static ssize_t dma_store_mode(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t dma_store_mode(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
@@ -106,38 +107,38 @@ static ssize_t dma_store_mode(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
#define dma_ro_attr(field, fmt) \
-static ssize_t dma_show_##field(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf)\
+static ssize_t dma_show_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf)\
{ \
struct dma_channel *channel = to_dma_channel(dev); \
return sprintf(buf, fmt, channel->field); \
} \
-static SYSDEV_ATTR(field, S_IRUGO, dma_show_##field, NULL);
+static DEVICE_ATTR(field, S_IRUGO, dma_show_##field, NULL);
dma_ro_attr(count, "0x%08x\n");
dma_ro_attr(flags, "0x%08lx\n");
int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
- struct sys_device *dev = &chan->dev;
+ struct device *dev = &chan->dev;
char name[16];
int ret;
dev->id = chan->vchan;
- dev->cls = &dma_sysclass;
+ dev->bus = &dma_subsys;
- ret = sysdev_register(dev);
+ ret = device_register(dev);
if (ret)
return ret;
- ret |= sysdev_create_file(dev, &attr_dev_id);
- ret |= sysdev_create_file(dev, &attr_count);
- ret |= sysdev_create_file(dev, &attr_mode);
- ret |= sysdev_create_file(dev, &attr_flags);
- ret |= sysdev_create_file(dev, &attr_config);
+ ret |= device_create_file(dev, &dev_attr_dev_id);
+ ret |= device_create_file(dev, &dev_attr_count);
+ ret |= device_create_file(dev, &dev_attr_mode);
+ ret |= device_create_file(dev, &dev_attr_flags);
+ ret |= device_create_file(dev, &dev_attr_config);
if (unlikely(ret)) {
dev_err(&info->pdev->dev, "Failed creating attrs\n");
@@ -150,17 +151,17 @@ int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
void dma_remove_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
- struct sys_device *dev = &chan->dev;
+ struct device *dev = &chan->dev;
char name[16];
- sysdev_remove_file(dev, &attr_dev_id);
- sysdev_remove_file(dev, &attr_count);
- sysdev_remove_file(dev, &attr_mode);
- sysdev_remove_file(dev, &attr_flags);
- sysdev_remove_file(dev, &attr_config);
+ device_remove_file(dev, &dev_attr_dev_id);
+ device_remove_file(dev, &dev_attr_count);
+ device_remove_file(dev, &dev_attr_mode);
+ device_remove_file(dev, &dev_attr_flags);
+ device_remove_file(dev, &dev_attr_config);
snprintf(name, sizeof(name), "dma%d", chan->chan);
sysfs_remove_link(&info->pdev->dev.kobj, name);
- sysdev_unregister(dev);
+ device_unregister(dev);
}
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 07373a07409..6aa2080c006 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -14,7 +14,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <cpu/dma.h>
#include <asm-generic/dma.h>
@@ -91,7 +91,7 @@ struct dma_channel {
wait_queue_head_t wait_queue;
- struct sys_device dev;
+ struct device dev;
void *priv_data;
};
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
deleted file mode 100644
index e87063fad2e..00000000000
--- a/arch/sh/include/asm/memblock.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASM_SH_MEMBLOCK_H
-#define __ASM_SH_MEMBLOCK_H
-
-#endif /* __ASM_SH_MEMBLOCK_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ea2d5089de1..20ee40af16e 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19 /* Freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
/*
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index f0907995b4c..a8140f0bbf6 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -337,9 +337,9 @@ static struct kobj_type ktype_percpu_entry = {
.default_attrs = sq_sysfs_attrs,
};
-static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+static int __devinit sq_dev_add(struct device *dev)
{
- unsigned int cpu = sysdev->id;
+ unsigned int cpu = dev->id;
struct kobject *kobj;
int error;
@@ -348,25 +348,27 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
return -ENOMEM;
kobj = sq_kobject[cpu];
- error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
+ error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
"%s", "sq");
if (!error)
kobject_uevent(kobj, KOBJ_ADD);
return error;
}
-static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+static int __devexit sq_dev_remove(struct device *dev)
{
- unsigned int cpu = sysdev->id;
+ unsigned int cpu = dev->id;
struct kobject *kobj = sq_kobject[cpu];
kobject_put(kobj);
return 0;
}
-static struct sysdev_driver sq_sysdev_driver = {
- .add = sq_sysdev_add,
- .remove = __devexit_p(sq_sysdev_remove),
+static struct subsys_interface sq_interface = {
+ .name = "sq"
+ .subsys = &cpu_subsys,
+ .add_dev = sq_dev_add,
+ .remove_dev = __devexit_p(sq_dev_remove),
};
static int __init sq_api_init(void)
@@ -386,7 +388,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_bitmap))
goto out;
- ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
+ ret = subsys_interface_register(&sq_interface);
if (unlikely(ret != 0))
goto out;
@@ -401,7 +403,7 @@ out:
static void __exit sq_api_exit(void)
{
- sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
+ subsys_interface_unregister(&sq_interface);
kfree(sq_bitmap);
kmem_cache_destroy(sq_cache);
}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index db4ecd731a0..406508d4ce7 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -89,7 +89,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -111,7 +112,8 @@ void cpu_idle(void)
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index c5a33f007f8..9fea49f6e66 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -157,9 +157,6 @@ void __init reserve_crashkernel(void)
unsigned long long crash_size, crash_base;
int ret;
- /* this is necessary because of memblock_phys_mem_size() */
- memblock_analyze();
-
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 1a0e946679a..7b57bf1dc85 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -230,7 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
PAGE_KERNEL);
- add_active_range(nid, start_pfn, end_pfn);
+ memblock_set_node(PFN_PHYS(start_pfn),
+ PFN_PHYS(end_pfn - start_pfn), nid);
}
void __init __weak plat_early_device_setup(void)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index c3e61b36649..cb8f9920f4d 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS
CPU_SUBTYPE_SH7785)
default "1"
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 939ca0f356f..82cc576fab1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -324,7 +324,6 @@ void __init paging_init(void)
unsigned long vaddr, end;
int nid;
- memblock_init();
sh_mv.mv_mem_init();
early_reserve_mem();
@@ -337,7 +336,7 @@ void __init paging_init(void)
sh_mv.mv_mem_reserve();
memblock_enforce_memory_limit(memory_limit);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index b4c2d2b946d..e4dd5d5a111 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = sh_backtrace;
return -ENODEV;
}
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a4644f5ea98..96657992a72 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -32,6 +32,7 @@ config SPARC
config SPARC32
def_bool !64BIT
+ select GENERIC_ATOMIC64
config SPARC64
def_bool 64BIT
@@ -44,6 +45,7 @@ config SPARC64
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
select HAVE_SYSCALL_WRAPPERS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
@@ -353,9 +355,6 @@ config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
-config ARCH_POPULATES_NODE_MAP
- def_bool y if SPARC64
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y if SPARC64
@@ -386,9 +385,7 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-if SPARC64
source "kernel/Kconfig.preempt"
-endif
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 5c3c8b69884..9dd0a769fa1 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
-#ifdef __KERNEL__
+#include <asm-generic/atomic64.h>
#include <asm/system.h>
@@ -52,112 +52,10 @@ extern void atomic_set(atomic_t *, int);
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-/* This is the old 24-bit implementation. It's still used internally
- * by some sparc-specific code, notably the semaphore implementation.
- */
-typedef struct { volatile int counter; } atomic24_t;
-
-#ifndef CONFIG_SMP
-
-#define ATOMIC24_INIT(i) { (i) }
-#define atomic24_read(v) ((v)->counter)
-#define atomic24_set(v, i) (((v)->counter) = i)
-
-#else
-/* We do the bulk of the actual work out of line in two common
- * routines in assembler, see arch/sparc/lib/atomic.S for the
- * "fun" details.
- *
- * For SMP the trick is you embed the spin lock byte within
- * the word, use the low byte so signedness is easily retained
- * via a quick arithmetic shift. It looks like this:
- *
- * ----------------------------------------
- * | signed 24-bit counter value | lock | atomic_t
- * ----------------------------------------
- * 31 8 7 0
- */
-
-#define ATOMIC24_INIT(i) { ((i) << 8) }
-
-static inline int atomic24_read(const atomic24_t *v)
-{
- int ret = v->counter;
-
- while(ret & 0xff)
- ret = v->counter;
-
- return ret >> 8;
-}
-
-#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
-#endif
-
-static inline int __atomic24_add(int i, atomic24_t *v)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g7");
-
- ptr = &v->counter;
- increment = i;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_add\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
- : "0" (increment), "r" (ptr)
- : "memory", "cc");
-
- return increment;
-}
-
-static inline int __atomic24_sub(int i, atomic24_t *v)
-{
- register volatile int *ptr asm("g1");
- register int increment asm("g2");
- register int tmp1 asm("g3");
- register int tmp2 asm("g4");
- register int tmp3 asm("g7");
-
- ptr = &v->counter;
- increment = i;
-
- __asm__ __volatile__(
- "mov %%o7, %%g4\n\t"
- "call ___atomic24_sub\n\t"
- " add %%o7, 8, %%o7\n"
- : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
- : "0" (increment), "r" (ptr)
- : "memory", "cc");
-
- return increment;
-}
-
-#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
-#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
-
-#define atomic24_dec_return(v) __atomic24_sub(1, (v))
-#define atomic24_inc_return(v) __atomic24_add(1, (v))
-
-#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
-#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
-
-#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
-#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
-
-#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
-
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#endif /* !(__KERNEL__) */
-
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h
deleted file mode 100644
index c67b047ef85..00000000000
--- a/arch/sparc/include/asm/memblock.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _SPARC64_MEMBLOCK_H
-#define _SPARC64_MEMBLOCK_H
-
-#include <asm/oplib.h>
-
-#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
-
-#endif /* !(_SPARC64_MEMBLOCK_H) */
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index 156707b0f18..bb5c2ac4055 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -8,14 +8,10 @@
#ifndef _SPARC_PAGE_H
#define _SPARC_PAGE_H
-#define PAGE_SHIFT 12
+#include <linux/const.h>
-#ifndef __ASSEMBLY__
-/* I have my suspicions... -DaveM */
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#else
-#define PAGE_SIZE (1 << PAGE_SHIFT)
-#endif
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#include <asm/btfixup.h>
diff --git a/arch/sparc/include/asm/pgtsun4.h b/arch/sparc/include/asm/pgtsun4.h
deleted file mode 100644
index 5a0d661fb82..00000000000
--- a/arch/sparc/include/asm/pgtsun4.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * pgtsun4.h: Sun4 specific pgtable.h defines and code.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-#ifndef _SPARC_PGTSUN4C_H
-#define _SPARC_PGTSUN4C_H
-
-#include <asm/contregs.h>
-
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define SUN4C_PMD_SHIFT 23
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define SUN4C_PGDIR_SHIFT 23
-#define SUN4C_PGDIR_SIZE (1UL << SUN4C_PGDIR_SHIFT)
-#define SUN4C_PGDIR_MASK (~(SUN4C_PGDIR_SIZE-1))
-#define SUN4C_PGDIR_ALIGN(addr) (((addr)+SUN4C_PGDIR_SIZE-1)&SUN4C_PGDIR_MASK)
-
-/* To represent how the sun4c mmu really lays things out. */
-#define SUN4C_REAL_PGDIR_SHIFT 18
-#define SUN4C_REAL_PGDIR_SIZE (1UL << SUN4C_REAL_PGDIR_SHIFT)
-#define SUN4C_REAL_PGDIR_MASK (~(SUN4C_REAL_PGDIR_SIZE-1))
-#define SUN4C_REAL_PGDIR_ALIGN(addr) (((addr)+SUN4C_REAL_PGDIR_SIZE-1)&SUN4C_REAL_PGDIR_MASK)
-
-/* 19 bit PFN on sun4 */
-#define SUN4C_PFN_MASK 0x7ffff
-
-/* Don't increase these unless the structures in sun4c.c are fixed */
-#define SUN4C_MAX_SEGMAPS 256
-#define SUN4C_MAX_CONTEXTS 16
-
-/*
- * To be efficient, and not have to worry about allocating such
- * a huge pgd, we make the kernel sun4c tables each hold 1024
- * entries and the pgd similarly just like the i386 tables.
- */
-#define SUN4C_PTRS_PER_PTE 1024
-#define SUN4C_PTRS_PER_PMD 1
-#define SUN4C_PTRS_PER_PGD 1024
-
-/*
- * Sparc SUN4C pte fields.
- */
-#define _SUN4C_PAGE_VALID 0x80000000
-#define _SUN4C_PAGE_SILENT_READ 0x80000000 /* synonym */
-#define _SUN4C_PAGE_DIRTY 0x40000000
-#define _SUN4C_PAGE_SILENT_WRITE 0x40000000 /* synonym */
-#define _SUN4C_PAGE_PRIV 0x20000000 /* privileged page */
-#define _SUN4C_PAGE_NOCACHE 0x10000000 /* non-cacheable page */
-#define _SUN4C_PAGE_PRESENT 0x08000000 /* implemented in software */
-#define _SUN4C_PAGE_IO 0x04000000 /* I/O page */
-#define _SUN4C_PAGE_FILE 0x02000000 /* implemented in software */
-#define _SUN4C_PAGE_READ 0x00800000 /* implemented in software */
-#define _SUN4C_PAGE_WRITE 0x00400000 /* implemented in software */
-#define _SUN4C_PAGE_ACCESSED 0x00200000 /* implemented in software */
-#define _SUN4C_PAGE_MODIFIED 0x00100000 /* implemented in software */
-
-#define _SUN4C_READABLE (_SUN4C_PAGE_READ|_SUN4C_PAGE_SILENT_READ|\
- _SUN4C_PAGE_ACCESSED)
-#define _SUN4C_WRITEABLE (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE|\
- _SUN4C_PAGE_MODIFIED)
-
-#define _SUN4C_PAGE_CHG_MASK (0xffff|_SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_MODIFIED)
-
-#define SUN4C_PAGE_NONE __pgprot(_SUN4C_PAGE_PRESENT)
-#define SUN4C_PAGE_SHARED __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE|\
- _SUN4C_PAGE_WRITE)
-#define SUN4C_PAGE_COPY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
-#define SUN4C_PAGE_READONLY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
-#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
- _SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
-
-/* SUN4C swap entry encoding
- *
- * We use 5 bits for the type and 19 for the offset. This gives us
- * 32 swapfiles of 4GB each. Encoding looks like:
- *
- * RRRRRRRRooooooooooooooooooottttt
- * fedcba9876543210fedcba9876543210
- *
- * The top 8 bits are reserved for protection and status bits, especially
- * FILE and PRESENT.
- */
-#define SUN4C_SWP_TYPE_MASK 0x1f
-#define SUN4C_SWP_OFF_MASK 0x7ffff
-#define SUN4C_SWP_OFF_SHIFT 5
-
-#ifndef __ASSEMBLY__
-
-static inline unsigned long sun4c_get_synchronous_error(void)
-{
- unsigned long sync_err;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (sync_err) :
- "r" (AC_SYNC_ERR), "i" (ASI_CONTROL));
- return sync_err;
-}
-
-static inline unsigned long sun4c_get_synchronous_address(void)
-{
- unsigned long sync_addr;
-
- __asm__ __volatile__("lda [%1] %2, %0\n\t" :
- "=r" (sync_addr) :
- "r" (AC_SYNC_VA), "i" (ASI_CONTROL));
- return sync_addr;
-}
-
-/* SUN4 pte, segmap, and context manipulation */
-static inline unsigned long sun4c_get_segmap(unsigned long addr)
-{
- register unsigned long entry;
-
- __asm__ __volatile__("\n\tlduha [%1] %2, %0\n\t" :
- "=r" (entry) :
- "r" (addr), "i" (ASI_SEGMAP));
- return entry;
-}
-
-static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
-{
- __asm__ __volatile__("\n\tstha %1, [%0] %2; nop; nop; nop;\n\t" : :
- "r" (addr), "r" (entry),
- "i" (ASI_SEGMAP)
- : "memory");
-}
-
-static inline unsigned long sun4c_get_pte(unsigned long addr)
-{
- register unsigned long entry;
-
- __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
- "=r" (entry) :
- "r" (addr), "i" (ASI_PTE));
- return entry;
-}
-
-static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
-{
- __asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
- "r" (addr),
- "r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
- : "memory");
-}
-
-static inline int sun4c_get_context(void)
-{
- register int ctx;
-
- __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
- "=r" (ctx) :
- "r" (AC_CONTEXT), "i" (ASI_CONTROL));
-
- return ctx;
-}
-
-static inline int sun4c_set_context(int ctx)
-{
- __asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
- "r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
- : "memory");
-
- return ctx;
-}
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_PGTSUN4_H) */
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index 98d6ebb922f..dbfc1a34b3a 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -20,7 +20,6 @@ typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
-typedef unsigned short __kernel_umode_t;
typedef unsigned int __kernel_nlink_t;
typedef int __kernel_daddr_t;
typedef long __kernel_off_t;
@@ -55,7 +54,6 @@ typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
-typedef unsigned short __kernel_umode_t;
typedef short __kernel_nlink_t;
typedef long __kernel_daddr_t;
typedef long __kernel_off_t;
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index e49b828a247..aa42fe30d5b 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -143,10 +143,11 @@ struct sigstack {
#define SA_ONSTACK _SV_SSTACK
#define SA_RESTART _SV_INTR
#define SA_ONESHOT _SV_RESET
-#define SA_NOMASK 0x20u
+#define SA_NODEFER 0x20u
#define SA_NOCLDWAIT 0x100u
#define SA_SIGINFO 0x200u
+#define SA_NOMASK SA_NODEFER
#define SIG_BLOCK 0x01 /* for blocking signals */
#define SIG_UNBLOCK 0x02 /* for unblocking signals */
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h
index 9d3fefcff2f..8af1b64168b 100644
--- a/arch/sparc/include/asm/socket.h
+++ b/arch/sparc/include/asm/socket.h
@@ -58,6 +58,9 @@
#define SO_RXQ_OVFL 0x0024
+#define SO_WIFI_STATUS 0x0025
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fa575323341..c2a1080cdd3 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -95,7 +95,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
* Observe the order of get_free_pages() in alloc_thread_info_node().
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
*/
-#define THREAD_SIZE 8192
+#define THREAD_SIZE (2 * PAGE_SIZE)
/*
* Offsets in thread_info structure, used in assembly code
@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
* TIF_NEED_RESCHED */
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
-#define TIF_FREEZE 11 /* is freezing for suspend */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
_TIF_SIGPENDING | \
_TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 60d86be1a53..01d057fe6a3 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 12 is available */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14
-#define TIF_FREEZE 15 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
_TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 91e5a034f98..383d156cde9 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -12,12 +12,6 @@
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
#endif /* defined(__sparc__) */
#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 7429b47c3ac..381edcd5bc2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
dp->rcv_buf_len = 4096;
- dp->ds_states = kzalloc(sizeof(ds_states_template),
- GFP_KERNEL);
+ dp->ds_states = kmemdup(ds_states_template,
+ sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
- memcpy(dp->ds_states, ds_states_template,
- sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b272cda35a0..af5755d20fb 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (!irq)
return -ENOMEM;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
return irq;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 3739a06a76c..39d8b05201a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -95,12 +95,14 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched() && !cpu_is_offline(cpu))
sparc64_yield(cpu);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 46614807a57..741df916c12 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *new_val;
int err;
- new_val = kmalloc(len, GFP_KERNEL);
+ new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
- memcpy(new_val, val, len);
-
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index fe1e3fc31bc..ffb883ddd0f 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -84,7 +84,7 @@ static void prom_sync_me(void)
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(0);
- if(current->pid != 0) {
+ if (!is_idle_task(current)) {
local_irq_enable();
sys_sync();
local_irq_disable();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 75607724d29..3b1bd7c5016 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -840,7 +840,7 @@ static void tsb_sync(void *info)
struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
struct mm_struct *mm = info;
- /* It is not valid to test "currrent->active_mm == mm" here.
+ /* It is not valid to test "current->active_mm == mm" here.
*
* The value of "current" is not changed atomically with
* switch_mm(). But that's OK, we just need to check the
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 441521ad8a3..232df994953 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -368,11 +368,11 @@ static unsigned long mmap_rnd(void)
if (current->flags & PF_RANDOMIZE) {
unsigned long val = get_random_int();
if (test_thread_flag(TIF_32BIT))
- rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
+ rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else
- rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
+ rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
}
- return (rnd << PAGE_SHIFT) * 2;
+ return rnd << PAGE_SHIFT;
}
void arch_pick_mmap_layout(struct mm_struct *mm)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 7408201d7ef..654e8aad3bb 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -3,7 +3,7 @@
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
#include <linux/sched.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/percpu.h>
@@ -16,13 +16,13 @@
static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
#define SHOW_MMUSTAT_ULONG(NAME) \
-static ssize_t show_##NAME(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
return sprintf(buf, "%lu\n", p->NAME); \
} \
-static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
+static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
@@ -58,38 +58,38 @@ SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
static struct attribute *mmu_stat_attrs[] = {
- &attr_immu_tsb_hits_ctx0_8k_tte.attr,
- &attr_immu_tsb_ticks_ctx0_8k_tte.attr,
- &attr_immu_tsb_hits_ctx0_64k_tte.attr,
- &attr_immu_tsb_ticks_ctx0_64k_tte.attr,
- &attr_immu_tsb_hits_ctx0_4mb_tte.attr,
- &attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
- &attr_immu_tsb_hits_ctx0_256mb_tte.attr,
- &attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
- &attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
- &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
- &attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
- &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
- &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
- &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
- &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
- &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
- &attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
- &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
- &attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
- &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
- &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
- &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
- &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
- &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
- &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
- &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
- &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
- &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
- &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
- &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
- &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
- &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
+ &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
+ &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
+ &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
+ &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
+ &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
+ &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
+ &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
+ &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
+ &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
+ &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
+ &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
NULL,
};
@@ -139,15 +139,15 @@ static unsigned long write_mmustat_enable(unsigned long val)
return sun4v_mmustat_conf(ra, &orig_ra);
}
-static ssize_t show_mmustat_enable(struct sys_device *s,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mmustat_enable(struct device *s,
+ struct device_attribute *attr, char *buf)
{
unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
return sprintf(buf, "%lx\n", val);
}
-static ssize_t store_mmustat_enable(struct sys_device *s,
- struct sysdev_attribute *attr, const char *buf,
+static ssize_t store_mmustat_enable(struct device *s,
+ struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val, err;
@@ -163,39 +163,39 @@ static ssize_t store_mmustat_enable(struct sys_device *s,
return count;
}
-static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
+static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
static int mmu_stats_supported;
-static int register_mmu_stats(struct sys_device *s)
+static int register_mmu_stats(struct device *s)
{
if (!mmu_stats_supported)
return 0;
- sysdev_create_file(s, &attr_mmustat_enable);
+ device_create_file(s, &dev_attr_mmustat_enable);
return sysfs_create_group(&s->kobj, &mmu_stat_group);
}
#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_mmu_stats(struct sys_device *s)
+static void unregister_mmu_stats(struct device *s)
{
if (!mmu_stats_supported)
return;
sysfs_remove_group(&s->kobj, &mmu_stat_group);
- sysdev_remove_file(s, &attr_mmustat_enable);
+ device_remove_file(s, &dev_attr_mmustat_enable);
}
#endif
#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
-static ssize_t show_##NAME(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
cpuinfo_sparc *c = &cpu_data(dev->id); \
return sprintf(buf, "%lu\n", c->MEMBER); \
}
#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
-static ssize_t show_##NAME(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
cpuinfo_sparc *c = &cpu_data(dev->id); \
return sprintf(buf, "%u\n", c->MEMBER); \
@@ -209,14 +209,14 @@ SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
-static struct sysdev_attribute cpu_core_attrs[] = {
- _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL),
- _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
- _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
- _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
- _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
- _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
- _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
+static struct device_attribute cpu_core_attrs[] = {
+ __ATTR(clock_tick, 0444, show_clock_tick, NULL),
+ __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
+ __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
+ __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
+ __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
+ __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
+ __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
};
static DEFINE_PER_CPU(struct cpu, cpu_devices);
@@ -224,11 +224,11 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
+ struct device *s = &c->dev;
int i;
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
- sysdev_create_file(s, &cpu_core_attrs[i]);
+ device_create_file(s, &cpu_core_attrs[i]);
register_mmu_stats(s);
}
@@ -237,12 +237,12 @@ static void register_cpu_online(unsigned int cpu)
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
+ struct device *s = &c->dev;
int i;
unregister_mmu_stats(s);
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
- sysdev_remove_file(s, &cpu_core_attrs[i]);
+ device_remove_file(s, &cpu_core_attrs[i]);
}
#endif
diff --git a/arch/sparc/lib/atomic_32.S b/arch/sparc/lib/atomic_32.S
index 178cbb8ae1b..eb6c7359cbd 100644
--- a/arch/sparc/lib/atomic_32.S
+++ b/arch/sparc/lib/atomic_32.S
@@ -40,60 +40,5 @@ ___xchg32_sun4md:
mov %g4, %o7
#endif
- /* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
- * Really, some things here for SMP are overly clever, go read the header.
- */
- .globl ___atomic24_add
-___atomic24_add:
- rd %psr, %g3 ! Keep the code small, old way was stupid
- nop; nop; nop; ! Let the bits set
- or %g3, PSR_PIL, %g7 ! Disable interrupts
- wr %g7, 0x0, %psr ! Set %psr
- nop; nop; nop; ! Let the bits set
-#ifdef CONFIG_SMP
-1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 1b ! Nope...
- ld [%g1], %g7 ! Load locked atomic24_t
- sra %g7, 8, %g7 ! Get signed 24-bit integer
- add %g7, %g2, %g2 ! Add in argument
- sll %g2, 8, %g7 ! Transpose back to atomic24_t
- st %g7, [%g1] ! Clever: This releases the lock as well.
-#else
- ld [%g1], %g7 ! Load locked atomic24_t
- add %g7, %g2, %g2 ! Add in argument
- st %g2, [%g1] ! Store it back
-#endif
- wr %g3, 0x0, %psr ! Restore original PSR_PIL
- nop; nop; nop; ! Let the bits set
- jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
- mov %g4, %o7 ! Restore %o7
-
- .globl ___atomic24_sub
-___atomic24_sub:
- rd %psr, %g3 ! Keep the code small, old way was stupid
- nop; nop; nop; ! Let the bits set
- or %g3, PSR_PIL, %g7 ! Disable interrupts
- wr %g7, 0x0, %psr ! Set %psr
- nop; nop; nop; ! Let the bits set
-#ifdef CONFIG_SMP
-1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 1b ! Nope...
- ld [%g1], %g7 ! Load locked atomic24_t
- sra %g7, 8, %g7 ! Get signed 24-bit integer
- sub %g7, %g2, %g2 ! Subtract argument
- sll %g2, 8, %g7 ! Transpose back to atomic24_t
- st %g7, [%g1] ! Clever: This releases the lock as well
-#else
- ld [%g1], %g7 ! Load locked atomic24_t
- sub %g7, %g2, %g2 ! Subtract argument
- st %g2, [%g1] ! Store it back
-#endif
- wr %g3, 0x0, %psr ! Restore original PSR_PIL
- nop; nop; nop; ! Let the bits set
- jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
- mov %g4, %o7 ! Restore %o7
-
.globl __atomic_end
__atomic_end:
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1b30bb3bfdb..f73c2240fe6 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -62,8 +62,6 @@ extern void ___rw_read_enter(void);
extern void ___rw_read_try(void);
extern void ___rw_read_exit(void);
extern void ___rw_write_enter(void);
-extern void ___atomic24_add(void);
-extern void ___atomic24_sub(void);
/* Alias functions whose names begin with "." and export the aliases.
* The module references will be fixed up by module_frob_arch_sections.
@@ -97,10 +95,6 @@ EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter);
#endif
-/* Atomic operations. */
-EXPORT_SYMBOL(___atomic24_add);
-EXPORT_SYMBOL(___atomic24_sub);
-
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2f482..8a7f81743c1 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
- else if ((insn & 0x80002000) == 0x80002000 &&
- (insn & 0x01800000) != 0x01800000) /* %LO */
+ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 8e073d80213..b3f5e7dfea5 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -790,7 +790,7 @@ static int find_node(unsigned long addr)
return -1;
}
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = find_node(start);
start += PAGE_SIZE;
@@ -808,7 +808,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
return start;
}
#else
-u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = 0;
return end;
@@ -816,7 +816,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
#endif
/* This must be invoked after performing all of the necessary
- * add_active_range() calls for 'nid'. We need to be able to get
+ * memblock_set_node() calls for 'nid'. We need to be able to get
* correct data from get_pfn_range_for_nid().
*/
static void __init allocate_node_data(int nid)
@@ -987,14 +987,11 @@ static void __init add_node_ranges(void)
this_end = memblock_nid_range(start, end, &nid);
- numadbg("Adding active range nid[%d] "
+ numadbg("Setting memblock NUMA node nid[%d] "
"start[%lx] end[%lx]\n",
nid, start, this_end);
- add_active_range(nid,
- start >> PAGE_SHIFT,
- this_end >> PAGE_SHIFT);
-
+ memblock_set_node(start, this_end - start, nid);
start = this_end;
}
}
@@ -1282,7 +1279,6 @@ static void __init bootmem_init_nonnuma(void)
{
unsigned long top_of_ram = memblock_end_of_DRAM();
unsigned long total_ram = memblock_phys_mem_size();
- struct memblock_region *reg;
numadbg("bootmem_init_nonnuma()\n");
@@ -1292,20 +1288,8 @@ static void __init bootmem_init_nonnuma(void)
(top_of_ram - total_ram) >> 20);
init_node_masks_nonnuma();
-
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
-
- if (!reg->size)
- continue;
-
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- add_active_range(0, start_pfn, end_pfn);
- }
-
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
allocate_node_data(0);
-
node_set_online(0);
}
@@ -1769,8 +1753,6 @@ void __init paging_init(void)
sun4v_ktsb_init();
}
- memblock_init();
-
/* Find available physical memory...
*
* Read it twice in order to work around a bug in openfirmware.
@@ -1796,7 +1778,7 @@ void __init paging_init(void)
memblock_enforce_memory_limit(cmdline_memory_size);
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
set_bit(0, mmu_context_bmap);
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 94e9a511de8..f80f8ceabc6 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -74,16 +74,6 @@ enum {
*/
void tile_irq_activate(unsigned int irq, int tile_irq_type);
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core. We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
void setup_irq_regs(void);
#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index aa0134db2dd..02e62806501 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* Remove an irq from the disabled mask. If we're in an interrupt
* context, defer enabling the HW interrupt until we leave.
*/
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+ get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
if (__get_cpu_var(irq_depth) == 0)
- unmask_irqs(1UL << irq);
+ unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(enable_percpu_irq);
/*
* Add an irq to the disabled mask. We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
* in an interrupt context, the return path is careful to avoid
* unmasking a newly disabled interrupt.
*/
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) |= (1UL << irq);
- mask_irqs(1UL << irq);
+ get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+ mask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(disable_percpu_irq);
/* Mask an interrupt. */
static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
static struct irq_chip tile_irq_chip = {
.name = "tile_irq_chip",
+ .irq_enable = tile_irq_chip_enable,
+ .irq_disable = tile_irq_chip_disable,
.irq_ack = tile_irq_chip_ack,
.irq_eoi = tile_irq_chip_eoi,
.irq_mask = tile_irq_chip_mask,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658f2ce426a..b3ed19f8779 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 1b6244e69de..25567934a21 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/sections.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 9c45d8bbdf5..4c1ac6e5347 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -85,7 +85,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
if (cpu_is_offline(cpu))
BUG(); /* no HOTPLUG_CPU */
@@ -105,7 +106,8 @@ void cpu_idle(void)
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index b671a86f451..f862b005eb7 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -14,10 +14,11 @@
* /sys entry support.
*/
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/stat.h>
#include <hv/hypervisor.h>
/* Return a string queried from the hypervisor, truncated to page size. */
@@ -31,55 +32,55 @@ static ssize_t get_hv_confstr(char *page, int query)
return n;
}
-static ssize_t chip_width_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t chip_width_show(struct device *dev,
+ struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", smp_width);
}
-static SYSDEV_CLASS_ATTR(chip_width, 0444, chip_width_show, NULL);
+static DEVICE_ATTR(chip_width, 0444, chip_width_show, NULL);
-static ssize_t chip_height_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t chip_height_show(struct device *dev,
+ struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", smp_height);
}
-static SYSDEV_CLASS_ATTR(chip_height, 0444, chip_height_show, NULL);
+static DEVICE_ATTR(chip_height, 0444, chip_height_show, NULL);
-static ssize_t chip_serial_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t chip_serial_show(struct device *dev,
+ struct device_attribute *attr,
char *page)
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
}
-static SYSDEV_CLASS_ATTR(chip_serial, 0444, chip_serial_show, NULL);
+static DEVICE_ATTR(chip_serial, 0444, chip_serial_show, NULL);
-static ssize_t chip_revision_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t chip_revision_show(struct device *dev,
+ struct device_attribute *attr,
char *page)
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
}
-static SYSDEV_CLASS_ATTR(chip_revision, 0444, chip_revision_show, NULL);
+static DEVICE_ATTR(chip_revision, 0444, chip_revision_show, NULL);
-static ssize_t type_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr,
char *page)
{
return sprintf(page, "tilera\n");
}
-static SYSDEV_CLASS_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR(type, 0444, type_show, NULL);
#define HV_CONF_ATTR(name, conf) \
- static ssize_t name ## _show(struct sysdev_class *dev, \
- struct sysdev_class_attribute *attr, \
+ static ssize_t name ## _show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
return get_hv_confstr(page, conf); \
} \
- static SYSDEV_CLASS_ATTR(name, 0444, name ## _show, NULL);
+ static DEVICE_ATTR(name, 0444, name ## _show, NULL);
HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
@@ -95,15 +96,15 @@ HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
static struct attribute *board_attrs[] = {
- &attr_board_part.attr,
- &attr_board_serial.attr,
- &attr_board_revision.attr,
- &attr_board_description.attr,
- &attr_mezz_part.attr,
- &attr_mezz_serial.attr,
- &attr_mezz_revision.attr,
- &attr_mezz_description.attr,
- &attr_switch_control.attr,
+ &dev_attr_board_part.attr,
+ &dev_attr_board_serial.attr,
+ &dev_attr_board_revision.attr,
+ &dev_attr_board_description.attr,
+ &dev_attr_mezz_part.attr,
+ &dev_attr_mezz_serial.attr,
+ &dev_attr_mezz_revision.attr,
+ &dev_attr_mezz_description.attr,
+ &dev_attr_switch_control.attr,
NULL
};
@@ -150,12 +151,11 @@ hvconfig_bin_read(struct file *filp, struct kobject *kobj,
static int __init create_sysfs_entries(void)
{
- struct sysdev_class *cls = &cpu_sysdev_class;
int err = 0;
#define create_cpu_attr(name) \
if (!err) \
- err = sysfs_create_file(&cls->kset.kobj, &attr_##name.attr);
+ err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name);
create_cpu_attr(chip_width);
create_cpu_attr(chip_height);
create_cpu_attr(chip_serial);
@@ -163,7 +163,7 @@ static int __init create_sysfs_entries(void)
#define create_hv_attr(name) \
if (!err) \
- err = sysfs_create_file(hypervisor_kobj, &attr_##name.attr);
+ err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name);
create_hv_attr(type);
create_hv_attr(version);
create_hv_attr(config_version);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index a87d2a859ba..2a81d32de0d 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
EXPORT_SYMBOL(current_text_addr);
EXPORT_SYMBOL(dump_stack);
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 25b7b90fd62..c1eaaa1fcc2 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -54,7 +54,7 @@ static noinline void force_sig_info_fault(const char *type, int si_signo,
if (unlikely(tsk->pid < 2)) {
panic("Signal %d (code %d) at %#lx sent to %s!",
si_signo, si_code & 0xffff, address,
- tsk->pid ? "init" : "the idle task");
+ is_idle_task(tsk) ? "the idle task" : "init");
}
info.si_signo = si_signo;
@@ -515,7 +515,7 @@ no_context:
if (unlikely(tsk->pid < 2)) {
panic("Kernel page fault running %s!",
- tsk->pid ? "init" : "the idle task");
+ is_idle_task(tsk) ? "the idle task" : "init");
}
/*
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cbe6f4f9eca..1cc6ae477c9 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
VM_BUG_ON(!virt_addr_valid((void *)addr));
page = virt_to_page((void *)addr);
if (put_page_testzero(page)) {
- int pages = (1 << order);
homecache_change_page_home(page, order, initial_page_home());
- while (pages--)
- __free_page(page++);
+ if (order == 0) {
+ free_hot_cold_page(page, 0);
+ } else {
+ init_page_count(page);
+ __free_pages(page, order);
+ }
}
}
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 5bd1bad33fa..200c4ab1240 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 6
#define TIF_RESTORE_SIGMASK 7
-#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c5338351aec..69f24905abd 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -246,10 +246,12 @@ void default_idle(void)
if (need_resched())
schedule();
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
nsecs = disable_timer();
idle_sleep(nsecs);
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
}
}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index a08d9fab81f..82a6e22f1f3 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -75,8 +75,6 @@ static struct clocksource itimer_clocksource = {
.rating = 300,
.read = itimer_read,
.mask = CLOCKSOURCE_MASK(64),
- .mult = 1000,
- .shift = 0,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -94,9 +92,9 @@ static void __init setup_itimer(void)
clockevent_delta2ns(60 * HZ, &itimer_clockevent);
itimer_clockevent.min_delta_ns =
clockevent_delta2ns(1, &itimer_clockevent);
- err = clocksource_register(&itimer_clocksource);
+ err = clocksource_register_hz(&itimer_clocksource, USEC_PER_SEC);
if (err) {
- printk(KERN_ERR "clocksource_register returned %d\n", err);
+ printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
return;
}
clockevents_register_device(&itimer_clockevent);
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
index 1a5c5a5eb39..adddf6d6407 100644
--- a/arch/unicore32/include/asm/io.h
+++ b/arch/unicore32/include/asm/io.h
@@ -37,15 +37,9 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
*/
#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
#define ioremap_cached(cookie, size) __uc32_ioremap_cached(cookie, size)
+#define ioremap_nocache(cookie, size) __uc32_ioremap(cookie, size)
#define iounmap(cookie) __uc32_iounmap(cookie)
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#undef xlate_dev_mem_ptr
-#define xlate_dev_mem_ptr(p) __va(p)
-
#define HAVE_ARCH_PIO_SIZE
#define PIO_OFFSET (unsigned int)(PCI_IOBASE)
#define PIO_MASK (unsigned int)(IO_SPACE_LIMIT)
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index c270e9e0486..89f7557583b 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_MEMDIE 18
-#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
/*
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index ba401df971e..52edc2b6287 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -55,7 +55,8 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
local_irq_disable();
stop_critical_timings();
@@ -63,7 +64,8 @@ void cpu_idle(void)
local_irq_enable();
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/unicore32/kernel/puv3-core.c b/arch/unicore32/kernel/puv3-core.c
index 1a505a78776..254adeecc61 100644
--- a/arch/unicore32/kernel/puv3-core.c
+++ b/arch/unicore32/kernel/puv3-core.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
index e731c561ed4..181108b8ecc 100644
--- a/arch/unicore32/kernel/puv3-nb0916.c
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/io.h>
@@ -124,7 +123,7 @@ int __init mach_nb0916_init(void)
if (request_irq(gpio_to_irq(GPI_LCD_CASE_OFF),
&nb0916_lcdcaseoff_handler,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"NB0916 lcd case off", NULL) < 0) {
printk(KERN_DEBUG "LCD-Case-OFF IRQ %d not available\n",
@@ -132,7 +131,7 @@ int __init mach_nb0916_init(void)
}
if (request_irq(gpio_to_irq(GPI_OTP_INT), &nb0916_overheat_handler,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"NB0916 overheating protection", NULL) < 0) {
printk(KERN_DEBUG "Overheating Protection IRQ %d not available\n",
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 471b6bca8da..87adbf5ebfe 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
+#include <asm/memblock.h>
#include "setup.h"
@@ -64,7 +65,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
*/
static struct resource mem_res[] = {
{
- .name = "Kernel text",
+ .name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
index b163fca5678..911b549a6df 100644
--- a/arch/unicore32/kernel/signal.c
+++ b/arch/unicore32/kernel/signal.c
@@ -63,10 +63,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0) {
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
}
err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
@@ -321,6 +318,7 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
+ sigset_t blocked;
int usig = sig;
int ret;
@@ -372,13 +370,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
/*
* Block the signal if we were successful.
*/
- spin_lock_irq(&tsk->sighand->siglock);
- sigorsets(&tsk->blocked, &tsk->blocked,
- &ka->sa.sa_mask);
+ sigorsets(&blocked, &tsk->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&tsk->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
+ sigaddset(&blocked, sig);
+ set_current_blocked(&blocked);
return 0;
}
diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c
index 080710c0924..d3824b2ff64 100644
--- a/arch/unicore32/kernel/time.c
+++ b/arch/unicore32/kernel/time.c
@@ -86,7 +86,7 @@ static struct clocksource cksrc_puv3_oscr = {
static struct irqaction puv3_timer_irq = {
.name = "ost0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = puv3_ost0_interrupt,
.dev_id = &ckevt_puv3_osmr0,
};
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 3b379cddbc6..de186bde897 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -26,6 +26,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/memblock.h>
#include <mach/map.h>
#include "mm.h"
@@ -245,7 +246,6 @@ void __init uc32_memblock_init(struct meminfo *mi)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
meminfo_cmp, NULL);
- memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -264,7 +264,7 @@ void __init uc32_memblock_init(struct meminfo *mi)
uc32_mm_memblock_reserve();
- memblock_analyze();
+ memblock_allow_resize();
memblock_dump_all();
}
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index 3e5c3e5a0b4..43c20b40e44 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -25,6 +25,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/memblock.h>
#include <mach/map.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 08af6457de7..1a31254ceb8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,8 @@ config X86
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
@@ -60,6 +62,7 @@ config X86
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
+ select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_ARCH_JUMP_LABEL
select HAVE_TEXT_POKE_SMP
select HAVE_GENERIC_HARDIRQS
@@ -202,9 +205,6 @@ config ZONE_DMA32
bool
default X86_64
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config AUDIT_ARCH
bool
default X86_64
@@ -341,6 +341,7 @@ config X86_EXTENDED_PLATFORM
If you enable this option then you'll be able to select support
for the following (non-PC) 64 bit x86 platforms:
+ Numascale NumaChip
ScaleMP vSMP
SGI Ultraviolet
@@ -349,6 +350,18 @@ config X86_EXTENDED_PLATFORM
endif
# This is an alphabetically sorted list of 64 bit extended platforms
# Please maintain the alphabetic order if and when there are additions
+config X86_NUMACHIP
+ bool "Numascale NumaChip"
+ depends on X86_64
+ depends on X86_EXTENDED_PLATFORM
+ depends on NUMA
+ depends on SMP
+ depends on X86_X2APIC
+ depends on !EDAC_AMD64
+ ---help---
+ Adds support for Numascale NumaChip large-SMP systems. Needed to
+ enable more than ~168 cores.
+ If you don't have one of these, you should say N here.
config X86_VSMP
bool "ScaleMP vSMP"
@@ -388,7 +401,7 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
@@ -397,7 +410,10 @@ config X86_INTEL_MID
systems which do not have the PCI legacy interfaces (Moorestown,
Medfield). If you are building for a PC class system say N here.
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+ bool
config X86_MRST
bool "Moorestown MID platform"
@@ -409,6 +425,7 @@ config X86_MRST
select SPI
select INTEL_SCU_IPC
select X86_PLATFORM_DEVICES
+ select X86_INTEL_MID
---help---
Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
Internet Device(MID) platform. Moorestown consists of two chips:
@@ -1724,7 +1741,7 @@ source "drivers/sfi/Kconfig"
config X86_APM_BOOT
def_bool y
- depends on APM || APM_MODULE
+ depends on APM
menuconfig APM
tristate "APM (Advanced Power Management) BIOS support"
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index a6253ec1b28..3e274564f6b 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -134,7 +134,7 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rsp,0
pushfq_cfi
/*CFI_REL_OFFSET rflags,0*/
- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
+ movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
CFI_REGISTER rip,r10
pushq_cfi $__USER32_CS
/*CFI_REL_OFFSET cs,0*/
@@ -150,9 +150,8 @@ ENTRY(ia32_sysenter_target)
.section __ex_table,"a"
.quad 1b,ia32_badarg
.previous
- GET_THREAD_INFO(%r10)
- orl $TS_COMPAT,TI_status(%r10)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
@@ -162,13 +161,12 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
- GET_THREAD_INFO(%r10)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz sysexit_audit
sysexit_from_sys_call:
- andl $~TS_COMPAT,TI_status(%r10)
+ andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
@@ -205,7 +203,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
@@ -215,12 +213,11 @@ sysexit_from_sys_call:
movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
call audit_syscall_exit
- GET_THREAD_INFO(%r10)
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
cli
TRACE_IRQS_OFF
- testl %edi,TI_flags(%r10)
+ testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
@@ -238,7 +235,7 @@ sysexit_audit:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz sysenter_auditsys
#endif
SAVE_REST
@@ -309,9 +306,8 @@ ENTRY(ia32_cstar_target)
.section __ex_table,"a"
.quad 1b,ia32_badarg
.previous
- GET_THREAD_INFO(%r10)
- orl $TS_COMPAT,TI_status(%r10)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
@@ -321,13 +317,12 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
- GET_THREAD_INFO(%r10)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz sysretl_audit
sysretl_from_sys_call:
- andl $~TS_COMPAT,TI_status(%r10)
+ andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
RESTORE_ARGS 0,-ARG_SKIP,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
@@ -355,7 +350,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
@@ -420,9 +415,8 @@ ENTRY(ia32_syscall)
/* note the registers are not zero extended to the sf.
this could be a problem. */
SAVE_ARGS 0,1,0
- GET_THREAD_INFO(%r10)
- orl $TS_COMPAT,TI_status(%r10)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
@@ -459,8 +453,8 @@ quiet_ni_syscall:
CFI_ENDPROC
.macro PTREGSCALL label, func, arg
- .globl \label
-\label:
+ ALIGN
+GLOBAL(\label)
leaq \func(%rip),%rax
leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
jmp ia32_ptregs_common
@@ -477,7 +471,8 @@ quiet_ni_syscall:
PTREGSCALL stub32_vfork, sys_vfork, %rdi
PTREGSCALL stub32_iopl, sys_iopl, %rsi
-ENTRY(ia32_ptregs_common)
+ ALIGN
+ia32_ptregs_common:
popq %r11
CFI_ENDPROC
CFI_STARTPROC32 simple
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 091508b533b..952bd0100c5 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -4,10 +4,10 @@
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
-1: lock
+672: lock
.section .smp_locks,"a"
.balign 4
- .long 1b - .
+ .long 672b - .
.previous
.endm
#else
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 1a6c09af048..3ab9bdd87e7 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -176,6 +176,7 @@ static inline u64 native_x2apic_icr_read(void)
}
extern int x2apic_phys;
+extern int x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
@@ -198,6 +199,9 @@ static inline void x2apic_force_phys(void)
x2apic_phys = 1;
}
#else
+static inline void disable_x2apic(void)
+{
+}
static inline void check_x2apic(void)
{
}
@@ -212,6 +216,7 @@ static inline void x2apic_force_phys(void)
{
}
+#define nox2apic 0
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
@@ -410,6 +415,7 @@ extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
+
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
diff --git a/arch/x86/include/asm/apic_flat_64.h b/arch/x86/include/asm/apic_flat_64.h
new file mode 100644
index 00000000000..a2d31279644
--- /dev/null
+++ b/arch/x86/include/asm/apic_flat_64.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_APIC_FLAT_64_H
+#define _ASM_X86_APIC_FLAT_64_H
+
+extern void flat_init_apic_ldr(void);
+
+#endif
+
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 3925d800786..134bba00df0 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -144,6 +144,7 @@
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800
+#define XAPIC_ENABLE (1UL << 11)
#define X2APIC_ENABLE (1UL << 10)
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 1775d6e5920..b97596e2b68 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -380,6 +380,8 @@ static inline unsigned long __fls(unsigned long word)
return word;
}
+#undef ADDR
+
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
@@ -395,10 +397,25 @@ static inline unsigned long __fls(unsigned long word)
static inline int ffs(int x)
{
int r;
-#ifdef CONFIG_X86_CMOV
+
+#ifdef CONFIG_X86_64
+ /*
+ * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before, except that the
+ * top 32 bits will be cleared.
+ *
+ * We cannot do this on 32 bits because at the very least some
+ * 486 CPUs did not behave this way.
+ */
+ long tmp = -1;
+ asm("bsfl %1,%0"
+ : "=r" (r)
+ : "rm" (x), "0" (tmp));
+#elif defined(CONFIG_X86_CMOV)
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
+ : "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
@@ -422,7 +439,22 @@ static inline int ffs(int x)
static inline int fls(int x)
{
int r;
-#ifdef CONFIG_X86_CMOV
+
+#ifdef CONFIG_X86_64
+ /*
+ * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before, except that the
+ * top 32 bits will be cleared.
+ *
+ * We cannot do this on 32 bits because at the very least some
+ * 486 CPUs did not behave this way.
+ */
+ long tmp = -1;
+ asm("bsrl %1,%0"
+ : "=r" (r)
+ : "rm" (x), "0" (tmp));
+#elif defined(CONFIG_X86_CMOV)
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
@@ -434,11 +466,35 @@ static inline int fls(int x)
#endif
return r + 1;
}
-#endif /* __KERNEL__ */
-
-#undef ADDR
-#ifdef __KERNEL__
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#ifdef CONFIG_X86_64
+static __always_inline int fls64(__u64 x)
+{
+ long bitpos = -1;
+ /*
+ * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before.
+ */
+ asm("bsrq %1,%0"
+ : "+r" (bitpos)
+ : "rm" (x));
+ return bitpos + 1;
+}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
#include <asm-generic/bitops/find.h>
@@ -450,12 +506,6 @@ static inline int fls(int x)
#include <asm-generic/bitops/const_hweight.h>
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls64.h>
-
-#ifdef __KERNEL__
-
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5d3acdf5a7a..0c9fa2745f1 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
__compiletime_error("Bad argument size for xadd");
+extern void __add_wrong_size(void)
+ __compiletime_error("Bad argument size for add");
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
#endif
+/*
+ * An exchange-type operation, which takes a value and a pointer, and
+ * returns a the old value.
+ */
+#define __xchg_op(ptr, arg, op, lock) \
+ ({ \
+ __typeof__ (*(ptr)) __ret = (arg); \
+ switch (sizeof(*(ptr))) { \
+ case __X86_CASE_B: \
+ asm volatile (lock #op "b %b0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_W: \
+ asm volatile (lock #op "w %w0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_L: \
+ asm volatile (lock #op "l %0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_Q: \
+ asm volatile (lock #op "q %q0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ default: \
+ __ ## op ## _wrong_size(); \
+ } \
+ __ret; \
+ })
+
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Since this is generally used to protect other memory information, we
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/
-#define __xchg(x, ptr, size) \
-({ \
- __typeof(*(ptr)) __x = (x); \
- switch (size) { \
- case __X86_CASE_B: \
- { \
- volatile u8 *__ptr = (volatile u8 *)(ptr); \
- asm volatile("xchgb %0,%1" \
- : "=q" (__x), "+m" (*__ptr) \
- : "0" (__x) \
- : "memory"); \
- break; \
- } \
- case __X86_CASE_W: \
- { \
- volatile u16 *__ptr = (volatile u16 *)(ptr); \
- asm volatile("xchgw %0,%1" \
- : "=r" (__x), "+m" (*__ptr) \
- : "0" (__x) \
- : "memory"); \
- break; \
- } \
- case __X86_CASE_L: \
- { \
- volatile u32 *__ptr = (volatile u32 *)(ptr); \
- asm volatile("xchgl %0,%1" \
- : "=r" (__x), "+m" (*__ptr) \
- : "0" (__x) \
- : "memory"); \
- break; \
- } \
- case __X86_CASE_Q: \
- { \
- volatile u64 *__ptr = (volatile u64 *)(ptr); \
- asm volatile("xchgq %0,%1" \
- : "=r" (__x), "+m" (*__ptr) \
- : "0" (__x) \
- : "memory"); \
- break; \
- } \
- default: \
- __xchg_wrong_size(); \
- } \
- __x; \
-})
-
-#define xchg(ptr, v) \
- __xchg((v), (ptr), sizeof(*ptr))
+#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -165,46 +154,80 @@ extern void __xadd_wrong_size(void)
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
#endif
-#define __xadd(ptr, inc, lock) \
+/*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+ *
+ * xadd() is locked when multiple CPUs are online
+ * xadd_sync() is always locked
+ * xadd_local() is never locked
+ */
+#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
+#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
+#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
+#define __add(ptr, inc, lock) \
({ \
__typeof__ (*(ptr)) __ret = (inc); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
- asm volatile (lock "xaddb %b0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
- : : "memory", "cc"); \
+ asm volatile (lock "addb %b1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
break; \
case __X86_CASE_W: \
- asm volatile (lock "xaddw %w0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
- : : "memory", "cc"); \
+ asm volatile (lock "addw %w1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
break; \
case __X86_CASE_L: \
- asm volatile (lock "xaddl %0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
- : : "memory", "cc"); \
+ asm volatile (lock "addl %1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
break; \
case __X86_CASE_Q: \
- asm volatile (lock "xaddq %q0, %1\n" \
- : "+r" (__ret), "+m" (*(ptr)) \
- : : "memory", "cc"); \
+ asm volatile (lock "addq %1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
break; \
default: \
- __xadd_wrong_size(); \
+ __add_wrong_size(); \
} \
__ret; \
})
/*
- * xadd() adds "inc" to "*ptr" and atomically returns the previous
- * value of "*ptr".
+ * add_*() adds "inc" to "*ptr"
*
- * xadd() is locked when multiple CPUs are online
- * xadd_sync() is always locked
- * xadd_local() is never locked
+ * __add() takes a lock prefix
+ * add_smp() is locked when multiple CPUs are online
+ * add_sync() is always locked
*/
-#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
-#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
-#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
+#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
+
+#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
+({ \
+ bool __ret; \
+ __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
+ __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
+ BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
+ BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
+ VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
+ VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
+ asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
+ : "=a" (__ret), "+d" (__old2), \
+ "+m" (*(p1)), "+m" (*(p2)) \
+ : "i" (2 * sizeof(long)), "a" (__old1), \
+ "b" (__new1), "c" (__new2)); \
+ __ret; \
+})
+
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
#endif /* ASM_X86_CMPXCHG_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index fbebb07dd80..53f4b219336 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -166,52 +166,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
#endif
-#define cmpxchg8b(ptr, o1, o2, n1, n2) \
-({ \
- char __ret; \
- __typeof__(o2) __dummy; \
- __typeof__(*(ptr)) __old1 = (o1); \
- __typeof__(o2) __old2 = (o2); \
- __typeof__(*(ptr)) __new1 = (n1); \
- __typeof__(o2) __new2 = (n2); \
- asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
- : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
- : "a" (__old1), "d"(__old2), \
- "b" (__new1), "c" (__new2) \
- : "memory"); \
- __ret; })
-
-
-#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
-({ \
- char __ret; \
- __typeof__(o2) __dummy; \
- __typeof__(*(ptr)) __old1 = (o1); \
- __typeof__(o2) __old2 = (o2); \
- __typeof__(*(ptr)) __new1 = (n1); \
- __typeof__(o2) __new2 = (n2); \
- asm volatile("cmpxchg8b %2; setz %1" \
- : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
- : "a" (__old), "d"(__old2), \
- "b" (__new1), "c" (__new2), \
- : "memory"); \
- __ret; })
-
-
-#define cmpxchg_double(ptr, o1, o2, n1, n2) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- VM_BUG_ON((unsigned long)(ptr) % 8); \
- cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
-})
-
-#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- VM_BUG_ON((unsigned long)(ptr) % 8); \
- cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
-})
-
#define system_has_cmpxchg_double() cpu_has_cx8
#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 285da02c38f..614be87f1a9 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -20,49 +20,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg16b(ptr, o1, o2, n1, n2) \
-({ \
- char __ret; \
- __typeof__(o2) __junk; \
- __typeof__(*(ptr)) __old1 = (o1); \
- __typeof__(o2) __old2 = (o2); \
- __typeof__(*(ptr)) __new1 = (n1); \
- __typeof__(o2) __new2 = (n2); \
- asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
- : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
- : "b"(__new1), "c"(__new2), \
- "a"(__old1), "d"(__old2)); \
- __ret; })
-
-
-#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
-({ \
- char __ret; \
- __typeof__(o2) __junk; \
- __typeof__(*(ptr)) __old1 = (o1); \
- __typeof__(o2) __old2 = (o2); \
- __typeof__(*(ptr)) __new1 = (n1); \
- __typeof__(o2) __new2 = (n2); \
- asm volatile("cmpxchg16b %2;setz %1" \
- : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
- : "b"(__new1), "c"(__new2), \
- "a"(__old1), "d"(__old2)); \
- __ret; })
-
-#define cmpxchg_double(ptr, o1, o2, n1, n2) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- VM_BUG_ON((unsigned long)(ptr) % 16); \
- cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
-})
-
-#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
-({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- VM_BUG_ON((unsigned long)(ptr) % 16); \
- cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
-})
-
#define system_has_cmpxchg_double() cpu_has_cx16
#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f3444f700f3..17c5d4bdee5 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -197,7 +197,10 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9a2d644c08e..ced283ac79d 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_X86_32
#include <linux/types.h>
+#include <linux/log2.h>
/*
* do_div() is NOT a C function. It wants to return
@@ -21,15 +22,20 @@
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
- asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
- __upper = __high; \
- if (__high) { \
- __upper = __high % (__base); \
- __high = __high / (__base); \
+ if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
+ __mod = n & (__base - 1); \
+ n >>= ilog2(__base); \
+ } else { \
+ asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
+ __upper = __high; \
+ if (__high) { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm("divl %2" : "=a" (__low), "=d" (__mod) \
+ : "rm" (__base), "0" (__low), "1" (__upper)); \
+ asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
} \
- asm("divl %2":"=a" (__low), "=d" (__mod) \
- : "rm" (__base), "0" (__low), "1" (__upper)); \
- asm("":"=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 908b96957d8..37782566af2 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -117,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+extern u64 early_reserve_e820(u64 sizet, u64 align);
void memblock_x86_fill(void);
void memblock_find_dma_reserve(void);
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 55e4de613f0..da0b3ca815b 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -11,6 +11,7 @@ typedef struct {
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count;
+ unsigned int icr_read_retry_count;
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index c9e09ea0564..6919e936345 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
#ifdef CONFIG_SMP
#define safe_address (__per_cpu_offset[0])
#else
-#define safe_address (kstat_cpu(0).cpustat.user)
+#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
#endif
/*
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 88c765e1641..74df3f1eddf 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -137,6 +137,13 @@ static inline int insn_is_avx(struct insn *insn)
return (insn->vex_prefix.value != 0);
}
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 4420993acc4..925b605eb5c 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -3,11 +3,15 @@
#include <linux/notifier.h>
-#define IPCMSG_VRTC 0xFA /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
+#define IPCMSG_WARM_RESET 0xF0
+#define IPCMSG_COLD_RESET 0xF1
+#define IPCMSG_SOFT_RESET 0xF2
+#define IPCMSG_COLD_BOOT 0xF3
+
+#define IPCMSG_VRTC 0xFA /* Set vRTC device */
+ /* Command id associated with message IPCMSG_VRTC */
+ #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
+ #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
/* Read single register */
int intel_scu_ipc_ioread8(u16 addr, u8 *data);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 345c99cef15..dffc38ee625 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,6 +5,7 @@ extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
+extern int iommu_group_mf;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index a026507893e..ab4092e3214 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -181,6 +181,7 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+ int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
void (*halt)(struct x86_emulate_ctxt *ctxt);
void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
@@ -364,6 +365,7 @@ enum x86_intercept {
#endif
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b4973f4dab9..52d6640a5ca 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -16,10 +16,12 @@
#include <linux/mmu_notifier.h>
#include <linux/tracepoint.h>
#include <linux/cpumask.h>
+#include <linux/irq_work.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
+#include <linux/perf_event.h>
#include <asm/pvclock-abi.h>
#include <asm/desc.h>
@@ -31,6 +33,8 @@
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+
#define KVM_MMIO_SIZE 16
#define KVM_PIO_PAGE_OFFSET 1
@@ -228,7 +232,7 @@ struct kvm_mmu_page {
* One bit set per slot which has memory
* in this shadow page.
*/
- DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+ DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
@@ -239,14 +243,9 @@ struct kvm_mmu_page {
int clear_spte_count;
#endif
- struct rcu_head rcu;
-};
+ int write_flooding_count;
-struct kvm_pv_mmu_op_buffer {
- void *ptr;
- unsigned len;
- unsigned processed;
- char buf[512] __aligned(sizeof(long));
+ struct rcu_head rcu;
};
struct kvm_pio_request {
@@ -294,6 +293,37 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */
};
+enum pmc_type {
+ KVM_PMC_GP = 0,
+ KVM_PMC_FIXED,
+};
+
+struct kvm_pmc {
+ enum pmc_type type;
+ u8 idx;
+ u64 counter;
+ u64 eventsel;
+ struct perf_event *perf_event;
+ struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+ unsigned nr_arch_gp_counters;
+ unsigned nr_arch_fixed_counters;
+ unsigned available_event_types;
+ u64 fixed_ctr_ctrl;
+ u64 global_ctrl;
+ u64 global_status;
+ u64 global_ovf_ctrl;
+ u64 counter_bitmask[2];
+ u64 global_ctrl_mask;
+ u8 version;
+ struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
+ struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+ struct irq_work irq_work;
+ u64 reprogram_pmi;
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu *walk_mmu;
- /* only needed in kvm_pv_mmu_op() path, but it's hot so
- * put it here to avoid allocation */
- struct kvm_pv_mmu_op_buffer mmu_op_buffer;
-
struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
- gfn_t last_pt_write_gfn;
- int last_pt_write_count;
- u64 *last_pte_updated;
- gfn_t last_pte_gfn;
-
struct fpu guest_fpu;
u64 xcr0;
@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
unsigned access;
gfn_t mmio_gfn;
+ struct kvm_pmu pmu;
+
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
cpumask_var_t wbinvd_dirty_mask;
+ unsigned long last_retry_eip;
+ unsigned long last_retry_addr;
+
struct {
bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -459,7 +485,6 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages;
- atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+ struct kvm_memory_slot *slot);
void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
- gpa_t addr, unsigned long *ret);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
extern bool tdp_enabled;
@@ -692,6 +717,7 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
+#define EMULTYPE_RETRY (1 << 3)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len);
@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+bool kvm_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes,
- bool guest_initiated);
+ const u8 *new, int bytes);
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
+static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+{
+ return gpa;
+}
+
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+int kvm_is_in_guest(void);
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae..88d0c3c74c1 100644
--- a/arch/x86/include/asm/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
@@ -15,7 +15,7 @@
#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
#define CALIBRATE_LATCH \
- ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
+ ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
static inline void mach_prepare_counter(void)
{
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 01fdf5674e2..0e8e85bb7c5 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -81,8 +81,8 @@ static inline unsigned char current_lock_cmos_reg(void)
#else
#define lock_cmos_prefix(reg) do {} while (0)
#define lock_cmos_suffix(reg) do {} while (0)
-#define lock_cmos(reg)
-#define unlock_cmos()
+#define lock_cmos(reg) do { } while (0)
+#define unlock_cmos() do { } while (0)
#define do_i_have_lock_cmos() 0
#define current_lock_cmos_reg() 0
#endif
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 0e8ae57d365..f35ce43c1a7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -50,10 +50,11 @@
#define MCJ_CTX_MASK 3
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
#define MCJ_CTX_RANDOM 0 /* inject context: random */
-#define MCJ_CTX_PROCESS 1 /* inject context: process */
-#define MCJ_CTX_IRQ 2 /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST 4 /* do NMI broadcasting */
-#define MCJ_EXCEPTION 8 /* raise as exception */
+#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
+#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
+#define MCJ_EXCEPTION 0x8 /* raise as exception */
+#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
/* Fields are zero when not available */
struct mce {
@@ -120,7 +121,8 @@ struct mce_log {
#ifdef __KERNEL__
-extern struct atomic_notifier_head x86_mce_decoder_chain;
+extern void mce_register_decode_chain(struct notifier_block *nb);
+extern void mce_unregister_decode_chain(struct notifier_block *nb);
#include <linux/percpu.h>
#include <linux/init.h>
@@ -149,7 +151,7 @@ static inline void enable_p5_mce(void) {}
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct sys_device, mce_sysdev);
+DECLARE_PER_CPU(struct device, mce_device);
/*
* Maximum banks number.
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
deleted file mode 100644
index 0cd3800f33b..00000000000
--- a/arch/x86/include/asm/memblock.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _X86_MEMBLOCK_H
-#define _X86_MEMBLOCK_H
-
-#define ARCH_DISCARD_MEMBLOCK
-
-u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
-
-void memblock_x86_reserve_range(u64 start, u64 end, char *name);
-void memblock_x86_free_range(u64 start, u64 end);
-struct range;
-int __get_free_all_memory_range(struct range **range, int nodeid,
- unsigned long start_pfn, unsigned long end_pfn);
-int get_free_all_memory_range(struct range **rangep, int nodeid);
-
-void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn);
-u64 memblock_x86_hole_size(u64 start, u64 end);
-u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
-u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
-u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
-bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
-
-#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 24215072d0e..4ebe157bf73 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -48,6 +48,7 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
#ifdef CONFIG_MICROCODE_AMD
extern struct microcode_ops * __init init_amd_microcode(void);
+extern void __exit exit_amd_microcode(void);
static inline void get_ucode_data(void *to, const u8 *from, size_t n)
{
@@ -59,6 +60,7 @@ static inline struct microcode_ops * __init init_amd_microcode(void)
{
return NULL;
}
+static inline void __exit exit_amd_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index e6283129c82..0a0a9546043 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
};
extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
static inline enum mrst_cpu_type mrst_identify_cpu(void)
{
return __mrst_cpu_chip;
}
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
enum mrst_timer_options {
MRST_TIMER_DEFAULT,
MRST_TIMER_APBT_ONLY,
@@ -58,7 +67,7 @@ extern struct console early_mrst_console;
extern void mrst_early_console_init(void);
extern struct console early_hsu_console;
-extern void hsu_early_console_init(void);
+extern void hsu_early_console_init(const char *);
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274c..95203d40ffd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
return native_write_msr_safe(msr, low, high);
}
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
#define rdmsr_safe(msr, p1, p2) \
({ \
int __err; \
diff --git a/arch/x86/include/asm/numachip/numachip_csr.h b/arch/x86/include/asm/numachip/numachip_csr.h
new file mode 100644
index 00000000000..660f843df92
--- /dev/null
+++ b/arch/x86/include/asm/numachip/numachip_csr.h
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-Specific Header file
+ *
+ * Copyright (C) 2011 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <linux/io.h>
+#include <linux/swab.h>
+#include <asm/types.h>
+#include <asm/processor.h>
+
+#define CSR_NODE_SHIFT 16
+#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT)
+#define CSR_NODE_MASK 0x0fff /* 4K nodes */
+
+/* 32K CSR space, b15 indicates geo/non-geo */
+#define CSR_OFFSET_MASK 0x7fffUL
+
+/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */
+#define NUMACHIP_GCSR_BASE 0x3fff00000000ULL
+#define NUMACHIP_GCSR_LIM 0x3fff0fffffffULL
+#define NUMACHIP_GCSR_SIZE (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
+
+/*
+ * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
+ * when using the direct mapping on x86_64, both start and size needs to be
+ * aligned with PMD_SIZE which is 2M
+ */
+#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL
+#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL
+#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
+
+static inline void *gcsr_address(int node, unsigned long offset)
+{
+ return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
+ CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline void *lcsr_address(unsigned long offset)
+{
+ return __va(NUMACHIP_LCSR_BASE | (1UL << 15) |
+ CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline unsigned int read_gcsr(int node, unsigned long offset)
+{
+ return swab32(readl(gcsr_address(node, offset)));
+}
+
+static inline void write_gcsr(int node, unsigned long offset, unsigned int val)
+{
+ writel(swab32(val), gcsr_address(node, offset));
+}
+
+static inline unsigned int read_lcsr(unsigned long offset)
+{
+ return swab32(readl(lcsr_address(offset)));
+}
+
+static inline void write_lcsr(unsigned long offset, unsigned int val)
+{
+ writel(swab32(val), lcsr_address(offset));
+}
+
+/* ========================================================================= */
+/* CSR_G0_STATE_CLEAR */
+/* ========================================================================= */
+
+#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
+union numachip_csr_g0_state_clear {
+ unsigned int v;
+ struct numachip_csr_g0_state_clear_s {
+ unsigned int _state:2;
+ unsigned int _rsvd_2_6:5;
+ unsigned int _lost:1;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G0_NODE_IDS */
+/* ========================================================================= */
+
+#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
+union numachip_csr_g0_node_ids {
+ unsigned int v;
+ struct numachip_csr_g0_node_ids_s {
+ unsigned int _initialid:16;
+ unsigned int _nodeid:12;
+ unsigned int _rsvd_28_31:4;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_GEN */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
+union numachip_csr_g3_ext_irq_gen {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_gen_s {
+ unsigned int _vector:8;
+ unsigned int _msgtype:3;
+ unsigned int _index:5;
+ unsigned int _destination_apic_id:16;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_STATUS */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
+union numachip_csr_g3_ext_irq_status {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_status_s {
+ unsigned int _result:32;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_DEST */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
+union numachip_csr_g3_ext_irq_dest {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_dest_s {
+ unsigned int _irq:8;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_NC_ATT_MAP_SELECT */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
+union numachip_csr_g3_nc_att_map_select {
+ unsigned int v;
+ struct numachip_csr_g3_nc_att_map_select_s {
+ unsigned int _upper_address_bits:4;
+ unsigned int _select_ram:4;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_NC_ATT_MAP_SELECT_0-255 */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
+
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 3470c9d0ebb..7a11910a63c 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -414,22 +414,6 @@ do { \
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
-#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
-#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
-#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
-#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
-#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
-#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
-#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
-#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
-#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
-
#ifndef CONFIG_M386
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
@@ -445,29 +429,22 @@ do { \
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif /* !CONFIG_M386 */
#ifdef CONFIG_X86_CMPXCHG64
-#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
+#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
- char __ret; \
- typeof(o1) __o1 = o1; \
- typeof(o1) __n1 = n1; \
- typeof(o2) __o2 = o2; \
- typeof(o2) __n2 = n2; \
- typeof(o2) __dummy = n2; \
+ bool __ret; \
+ typeof(pcp1) __o1 = (o1), __n1 = (n1); \
+ typeof(pcp2) __o2 = (o2), __n2 = (n2); \
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
- : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
- : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
+ : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
+ : "b" (__n1), "c" (__n2), "a" (__o1)); \
__ret; \
})
-#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
-#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
-#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
+#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
+#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
#endif /* CONFIG_X86_CMPXCHG64 */
/*
@@ -495,44 +472,28 @@ do { \
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
-#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
-#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
-#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
-
/*
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
* is not supported on early AMD64 processors so we must be able to emulate
* it in software. The address used in the cmpxchg16 instruction must be
* aligned to a 16 byte boundary.
*/
-#ifdef CONFIG_SMP
-#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
-#else
-#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
-#endif
-#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
+#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
({ \
- char __ret; \
- typeof(o1) __o1 = o1; \
- typeof(o1) __n1 = n1; \
- typeof(o2) __o2 = o2; \
- typeof(o2) __n2 = n2; \
- typeof(o2) __dummy; \
- alternative_io(CMPXCHG16B_EMU_CALL, \
- "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
+ bool __ret; \
+ typeof(pcp1) __o1 = (o1), __n1 = (n1); \
+ typeof(pcp2) __o2 = (o2), __n2 = (n2); \
+ alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
+ "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
X86_FEATURE_CX16, \
- ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
- "S" (&pcp1), "b"(__n1), "c"(__n2), \
- "a"(__o1), "d"(__o2) : "memory"); \
+ ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
+ "+m" (pcp2), "+d" (__o2)), \
+ "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
__ret; \
})
-#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
-#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
-#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
+#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
+#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
#endif
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f61c62f7d5d..096c975e099 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -57,6 +57,7 @@
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
+#define ARCH_PERFMON_EVENTS_COUNT 7
/*
* Intel "Architectural Performance Monitoring" CPUID
@@ -72,6 +73,19 @@ union cpuid10_eax {
unsigned int full;
};
+union cpuid10_ebx {
+ struct {
+ unsigned int no_unhalted_core_cycles:1;
+ unsigned int no_instructions_retired:1;
+ unsigned int no_unhalted_reference_cycles:1;
+ unsigned int no_llc_reference:1;
+ unsigned int no_llc_misses:1;
+ unsigned int no_branch_instruction_retired:1;
+ unsigned int no_branch_misses_retired:1;
+ } split;
+ unsigned int full;
+};
+
union cpuid10_edx {
struct {
unsigned int num_counters_fixed:5;
@@ -81,6 +95,15 @@ union cpuid10_edx {
unsigned int full;
};
+struct x86_pmu_capability {
+ int version;
+ int num_counters_gp;
+ int num_counters_fixed;
+ int bit_width_gp;
+ int bit_width_fixed;
+ unsigned int events_mask;
+ int events_mask_len;
+};
/*
* Fixed-purpose performance events:
@@ -89,23 +112,24 @@ union cpuid10_edx {
/*
* All 3 fixed-mode PMCs are configured via this single MSR:
*/
-#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
+#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
/*
* The counts are available in three separate MSRs:
*/
/* Instr_Retired.Any: */
-#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
+#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
-#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
+#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
+#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
-#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
-#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
+#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
+#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
+#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
/*
* We model BTS tracing as another fixed-mode PMC.
@@ -202,6 +226,7 @@ struct perf_guest_switch_msr {
};
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
+extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
@@ -209,6 +234,11 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
return NULL;
}
+static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+ memset(cap, 0, sizeof(*cap));
+}
+
static inline void perf_events_lapic_init(void) { }
#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18601c86fab..49afb3f41eb 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -703,7 +703,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
pte_update(mm, addr, ptep);
}
-#define flush_tlb_fix_spurious_fault(vma, address)
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 2dddb317bb3..f8ab3eaad12 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -6,6 +6,7 @@
* EFLAGS bits
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b650435ffb5..aa9088c2693 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
u16 apicid;
u16 initial_apicid;
u16 x86_clflush_size;
-#ifdef CONFIG_SMP
/* number of cores as seen by the OS: */
u16 booted_cores;
/* Physical processor id: */
@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
-#endif
u32 microcode;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 972c260919a..a82c2bf504b 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
- asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
- : "+m" (lock->head_tail)
- :
- : "memory", "cc");
+ __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
}
-#else
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
-{
- asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
- : "+m" (lock->head_tail)
- :
- : "memory", "cc");
-}
-#endif
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c2ff2a1d845..2d2f01ce6dc 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
+bool set_pm_idle_to_default(void);
void stop_this_cpu(void *dummy);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a1fe5c127b5..74047159d0a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,7 +40,8 @@ struct thread_info {
*/
__u8 supervisor_stack[0];
#endif
- int uaccess_err;
+ int sig_on_uaccess_error:1;
+ int uaccess_err:1; /* uaccess failed */
};
#define INIT_THREAD_INFO(tsk) \
@@ -90,7 +91,6 @@ struct thread_info {
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_DEBUG 21 /* uses debug registers */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
-#define TIF_FREEZE 23 /* is freezing for suspend */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
@@ -112,7 +112,6 @@ struct thread_info {
#define _TIF_FORK (1 << TIF_FORK)
#define _TIF_DEBUG (1 << TIF_DEBUG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
@@ -231,6 +230,12 @@ static inline struct thread_info *current_thread_info(void)
movq PER_CPU_VAR(kernel_stack),reg ; \
subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+/*
+ * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
+ * a certain register (to be used in assembler memory operands).
+ */
+#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+
#endif
#endif /* !X86_32 */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fa7b9176b76..431793e5d48 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -32,6 +32,22 @@ extern int no_timer_check;
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC. Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ * - sqazi@google.com
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
+ unsigned long long quot;
+ unsigned long long rem;
int cpu = smp_processor_id();
unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+ quot = (cyc >> CYC2NS_SCALE_FACTOR);
+ rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+ ns += quot * per_cpu(cyc2ns, cpu) +
+ ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
return ns;
}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c00692476e9..800f77c6005 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -130,10 +130,8 @@ extern void setup_node_to_cpumask_map(void);
.balance_interval = 1, \
}
-#ifdef CONFIG_X86_64
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
-#endif
#else /* !CONFIG_NUMA */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 83e2efd181e..15d99153a96 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,6 +51,8 @@ extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern unsigned long native_calibrate_tsc(void);
+extern int tsc_clocksource_reliable;
+
/*
* Boot-time check whether the TSCs are synchronized across
* all CPUs/cores:
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 36361bf6fdd..8be5f54d936 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -462,7 +462,7 @@ struct __large_struct { unsigned long buf[100]; };
barrier();
#define uaccess_catch(err) \
- (err) |= current_thread_info()->uaccess_err; \
+ (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
current_thread_info()->uaccess_err = prev_err; \
} while (0)
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 10474fb1185..cf1d73643f6 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -57,6 +57,7 @@
#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
+#define UV2_HUB_PART_NUMBER_X 0x1111
/* Compat: if this #define is present, UV headers support UV2 */
#define UV2_HUB_IS_SUPPORTED 1
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 1971e652d24..1ac860a0984 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -7,6 +7,7 @@
struct mpc_bus;
struct mpc_cpu;
struct mpc_table;
+struct cpuinfo_x86;
/**
* struct x86_init_mpparse - platform specific mpparse ops
@@ -147,6 +148,7 @@ struct x86_init_ops {
*/
struct x86_cpuinit_ops {
void (*setup_percpu_clockev)(void);
+ void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
};
/**
@@ -186,5 +188,6 @@ extern struct x86_msi_ops x86_msi;
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
+extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4558f0d0822..ce664f33ea8 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -219,6 +219,8 @@ static int __init
acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
+ int apic_id;
+ u8 enabled;
processor = (struct acpi_madt_local_x2apic *)header;
@@ -227,6 +229,8 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
+ apic_id = processor->local_apic_id;
+ enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
#ifdef CONFIG_X86_X2APIC
/*
* We need to register disabled CPU as well to permit
@@ -235,8 +239,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
- acpi_register_lapic(processor->local_apic_id, /* APIC ID */
- processor->lapic_flags & ACPI_MADT_ENABLED);
+ if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled)
+ printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ else
+ acpi_register_lapic(apic_id, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4c39baa8fac..013c1810ce7 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -123,16 +123,14 @@ int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
unsigned int mask;
- int cuid = 0;
+ int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
return 0;
pci_read_config_dword(link, 0x1d4, &mask);
-#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
-#endif
return (mask >> (4 * cuid)) & 0xf;
}
@@ -141,7 +139,7 @@ int amd_set_subcaches(int cpu, int mask)
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
unsigned int reg;
- int cuid = 0;
+ int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
return -EINVAL;
@@ -159,9 +157,7 @@ int amd_set_subcaches(int cpu, int mask)
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}
-#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
-#endif
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3d2661ca654..6e76c191a83 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -88,13 +88,13 @@ static u32 __init allocate_aperture(void)
*/
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
aper_size, aper_size);
- if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
+ if (!addr || addr + aper_size > GART_MAX_ADDR) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%lx,%uK)\n",
addr, aper_size>>10);
return 0;
}
- memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
+ memblock_reserve(addr, aper_size);
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 767fd04f284..0ae0323b1f9 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
# APIC probe will depend on the listing order here
+obj-$(CONFIG_X86_NUMACHIP) += apic_numachip.o
obj-$(CONFIG_X86_UV) += x2apic_uv_x.o
obj-$(CONFIG_X86_X2APIC) += x2apic_phys.o
obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f98d84caf94..2eec05b6d1b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -146,16 +146,26 @@ __setup("apicpmtimer", setup_apicpmtimer);
int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
/* x2apic enabled before OS handover */
-static int x2apic_preenabled;
+int x2apic_preenabled;
+static int x2apic_disabled;
+static int nox2apic;
static __init int setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
- pr_warning("Bios already enabled x2apic, "
- "can't enforce nox2apic");
- return 0;
- }
+ int apicid = native_apic_msr_read(APIC_ID);
+
+ if (apicid >= 255) {
+ pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
+ apicid);
+ return 0;
+ }
+
+ pr_warning("x2apic already enabled. will disable it\n");
+ } else
+ setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+
+ nox2apic = 1;
- setup_clear_cpu_cap(X86_FEATURE_X2APIC);
return 0;
}
early_param("nox2apic", setup_nox2apic);
@@ -250,6 +260,7 @@ u32 native_safe_apic_wait_icr_idle(void)
send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
if (!send_status)
break;
+ inc_irq_stat(icr_read_retry_count);
udelay(100);
} while (timeout++ < 1000);
@@ -876,8 +887,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
- exit_idle();
irq_enter();
+ exit_idle();
local_apic_timer_interrupt();
irq_exit();
@@ -1431,6 +1442,45 @@ void __init bsp_end_local_APIC_setup(void)
}
#ifdef CONFIG_X86_X2APIC
+/*
+ * Need to disable xapic and x2apic at the same time and then enable xapic mode
+ */
+static inline void __disable_x2apic(u64 msr)
+{
+ wrmsrl(MSR_IA32_APICBASE,
+ msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
+ wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
+}
+
+static __init void disable_x2apic(void)
+{
+ u64 msr;
+
+ if (!cpu_has_x2apic)
+ return;
+
+ rdmsrl(MSR_IA32_APICBASE, msr);
+ if (msr & X2APIC_ENABLE) {
+ u32 x2apic_id = read_apic_id();
+
+ if (x2apic_id >= 255)
+ panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+
+ pr_info("Disabling x2apic\n");
+ __disable_x2apic(msr);
+
+ if (nox2apic) {
+ clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
+ setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+ }
+
+ x2apic_disabled = 1;
+ x2apic_mode = 0;
+
+ register_lapic_address(mp_lapic_addr);
+ }
+}
+
void check_x2apic(void)
{
if (x2apic_enabled()) {
@@ -1441,15 +1491,20 @@ void check_x2apic(void)
void enable_x2apic(void)
{
- int msr, msr2;
+ u64 msr;
+
+ rdmsrl(MSR_IA32_APICBASE, msr);
+ if (x2apic_disabled) {
+ __disable_x2apic(msr);
+ return;
+ }
if (!x2apic_mode)
return;
- rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
printk_once(KERN_INFO "Enabling x2apic\n");
- wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2);
+ wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
}
}
#endif /* CONFIG_X86_X2APIC */
@@ -1486,25 +1541,34 @@ void __init enable_IR_x2apic(void)
ret = save_ioapic_entries();
if (ret) {
pr_info("Saving IO-APIC state failed: %d\n", ret);
- goto out;
+ return;
}
local_irq_save(flags);
legacy_pic->mask_all();
mask_ioapic_entries();
+ if (x2apic_preenabled && nox2apic)
+ disable_x2apic();
+
if (dmar_table_init_ret)
ret = -1;
else
ret = enable_IR();
+ if (!x2apic_supported())
+ goto skip_x2apic;
+
if (ret < 0) {
/* IR is required if there is APIC ID > 255 even when running
* under KVM
*/
if (max_physical_apicid > 255 ||
- !hypervisor_x2apic_available())
- goto nox2apic;
+ !hypervisor_x2apic_available()) {
+ if (x2apic_preenabled)
+ disable_x2apic();
+ goto skip_x2apic;
+ }
/*
* without IR all CPUs can be addressed by IOAPIC/MSI
* only in physical mode
@@ -1512,8 +1576,10 @@ void __init enable_IR_x2apic(void)
x2apic_force_phys();
}
- if (ret == IRQ_REMAP_XAPIC_MODE)
- goto nox2apic;
+ if (ret == IRQ_REMAP_XAPIC_MODE) {
+ pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
+ goto skip_x2apic;
+ }
x2apic_enabled = 1;
@@ -1523,22 +1589,11 @@ void __init enable_IR_x2apic(void)
pr_info("Enabled x2apic\n");
}
-nox2apic:
+skip_x2apic:
if (ret < 0) /* IR enabling failed */
restore_ioapic_entries();
legacy_pic->restore_mask();
local_irq_restore(flags);
-
-out:
- if (x2apic_enabled || !x2apic_supported())
- return;
-
- if (x2apic_preenabled)
- panic("x2apic: enabled by BIOS but kernel init failed.");
- else if (ret == IRQ_REMAP_XAPIC_MODE)
- pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
- else if (ret < 0)
- pr_info("x2apic not enabled, IRQ remapping init failed\n");
}
#ifdef CONFIG_X86_64
@@ -1809,8 +1864,8 @@ void smp_spurious_interrupt(struct pt_regs *regs)
{
u32 v;
- exit_idle();
irq_enter();
+ exit_idle();
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
@@ -1846,8 +1901,8 @@ void smp_error_interrupt(struct pt_regs *regs)
"Illegal register address", /* APIC Error Bit 7 */
};
- exit_idle();
irq_enter();
+ exit_idle();
/* First tickle the hardware, only then report what went on. -- REW */
v0 = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index f7a41e4cae4..8c3cdded6f2 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -62,7 +62,7 @@ static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
-static void flat_init_apic_ldr(void)
+void flat_init_apic_ldr(void)
{
unsigned long val;
unsigned long num, id;
@@ -171,9 +171,14 @@ static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
return initial_apic_id >> index_msb;
}
+static int flat_probe(void)
+{
+ return 1;
+}
+
static struct apic apic_flat = {
.name = "flat",
- .probe = NULL,
+ .probe = flat_probe,
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
.apic_id_registered = flat_apic_id_registered,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
new file mode 100644
index 00000000000..09d3d8c1cd9
--- /dev/null
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -0,0 +1,294 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-Specific APIC Code
+ *
+ * Copyright (C) 2011 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+
+#include <asm/numachip/numachip_csr.h>
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+#include <asm/apic_flat_64.h>
+
+static int numachip_system __read_mostly;
+
+static struct apic apic_numachip __read_mostly;
+
+static unsigned int get_apic_id(unsigned long x)
+{
+ unsigned long value;
+ unsigned int id;
+
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ id = ((x >> 24) & 0xffU) | ((value << 2) & 0x3f00U);
+
+ return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+ unsigned long x;
+
+ x = ((id & 0xffU) << 24);
+ return x;
+}
+
+static unsigned int read_xapic_id(void)
+{
+ return get_apic_id(apic_read(APIC_ID));
+}
+
+static int numachip_apic_id_registered(void)
+{
+ return physid_isset(read_xapic_id(), phys_cpu_present_map);
+}
+
+static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+ return initial_apic_id >> index_msb;
+}
+
+static const struct cpumask *numachip_target_cpus(void)
+{
+ return cpu_online_mask;
+}
+
+static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+ cpumask_clear(retmask);
+ cpumask_set_cpu(cpu, retmask);
+}
+
+static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+{
+ union numachip_csr_g3_ext_irq_gen int_gen;
+
+ int_gen.s._destination_apic_id = phys_apicid;
+ int_gen.s._vector = 0;
+ int_gen.s._msgtype = APIC_DM_INIT >> 8;
+ int_gen.s._index = 0;
+
+ write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+
+ int_gen.s._msgtype = APIC_DM_STARTUP >> 8;
+ int_gen.s._vector = start_rip >> 12;
+
+ write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+
+ atomic_set(&init_deasserted, 1);
+ return 0;
+}
+
+static void numachip_send_IPI_one(int cpu, int vector)
+{
+ union numachip_csr_g3_ext_irq_gen int_gen;
+ int apicid = per_cpu(x86_cpu_to_apicid, cpu);
+
+ int_gen.s._destination_apic_id = apicid;
+ int_gen.s._vector = vector;
+ int_gen.s._msgtype = (vector == NMI_VECTOR ? APIC_DM_NMI : APIC_DM_FIXED) >> 8;
+ int_gen.s._index = 0;
+
+ write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
+}
+
+static void numachip_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask)
+ numachip_send_IPI_one(cpu, vector);
+}
+
+static void numachip_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
+{
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if (cpu != this_cpu)
+ numachip_send_IPI_one(cpu, vector);
+ }
+}
+
+static void numachip_send_IPI_allbutself(int vector)
+{
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != this_cpu)
+ numachip_send_IPI_one(cpu, vector);
+ }
+}
+
+static void numachip_send_IPI_all(int vector)
+{
+ numachip_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static void numachip_send_IPI_self(int vector)
+{
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+}
+
+static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+ int cpu;
+
+ /*
+ * We're using fixed IRQ delivery, can only return one phys APIC ID.
+ * May as well be the first.
+ */
+ cpu = cpumask_first(cpumask);
+ if (likely((unsigned)cpu < nr_cpu_ids))
+ return per_cpu(x86_cpu_to_apicid, cpu);
+
+ return BAD_APICID;
+}
+
+static unsigned int
+numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
+{
+ int cpu;
+
+ /*
+ * We're using fixed IRQ delivery, can only return one phys APIC ID.
+ * May as well be the first.
+ */
+ for_each_cpu_and(cpu, cpumask, andmask) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask))
+ break;
+ }
+ return per_cpu(x86_cpu_to_apicid, cpu);
+}
+
+static int __init numachip_probe(void)
+{
+ return apic == &apic_numachip;
+}
+
+static void __init map_csrs(void)
+{
+ printk(KERN_INFO "NumaChip: Mapping local CSR space (%016llx - %016llx)\n",
+ NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_BASE + NUMACHIP_LCSR_SIZE - 1);
+ init_extra_mapping_uc(NUMACHIP_LCSR_BASE, NUMACHIP_LCSR_SIZE);
+
+ printk(KERN_INFO "NumaChip: Mapping global CSR space (%016llx - %016llx)\n",
+ NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_BASE + NUMACHIP_GCSR_SIZE - 1);
+ init_extra_mapping_uc(NUMACHIP_GCSR_BASE, NUMACHIP_GCSR_SIZE);
+}
+
+static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
+{
+ c->phys_proc_id = node;
+ per_cpu(cpu_llc_id, smp_processor_id()) = node;
+}
+
+static int __init numachip_system_init(void)
+{
+ unsigned int val;
+
+ if (!numachip_system)
+ return 0;
+
+ x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
+
+ map_csrs();
+
+ val = read_lcsr(CSR_G0_NODE_IDS);
+ printk(KERN_INFO "NumaChip: Local NodeID = %08x\n", val);
+
+ return 0;
+}
+early_initcall(numachip_system_init);
+
+static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ if (!strncmp(oem_id, "NUMASC", 6)) {
+ numachip_system = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct apic apic_numachip __refconst = {
+
+ .name = "NumaConnect system",
+ .probe = numachip_probe,
+ .acpi_madt_oem_check = numachip_acpi_madt_oem_check,
+ .apic_id_registered = numachip_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ .irq_dest_mode = 0, /* physical */
+
+ .target_cpus = numachip_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = 0,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = numachip_vector_allocation_domain,
+ .init_apic_ldr = flat_init_apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = numachip_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xffU << 24,
+
+ .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = numachip_send_IPI_mask,
+ .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = numachip_send_IPI_allbutself,
+ .send_IPI_all = numachip_send_IPI_all,
+ .send_IPI_self = numachip_send_IPI_self,
+
+ .wakeup_secondary_cpu = numachip_wakeup_secondary,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .inquire_remote_apic = NULL, /* REMRD not supported */
+
+ .read = native_apic_mem_read,
+ .write = native_apic_mem_write,
+ .icr_read = native_apic_icr_read,
+ .icr_write = native_apic_icr_write,
+ .wait_icr_idle = native_apic_wait_icr_idle,
+ .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
+};
+apic_driver(apic_numachip);
+
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d939d7847e..fb072754bc1 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
unsigned vector, me;
ack_APIC_irq();
- exit_idle();
irq_enter();
+ exit_idle();
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
@@ -2948,6 +2948,10 @@ static inline void __init check_timer(void)
}
local_irq_disable();
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
+ if (x2apic_preenabled)
+ apic_printk(APIC_QUIET, KERN_INFO
+ "Perhaps problem with the pre-enabled x2apic mode\n"
+ "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
out:
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 62ae3001ae0..9d59bbacd4e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+ if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+ uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 452932d3473..5da1269e8dd 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -62,7 +62,8 @@ early_param("memory_corruption_check_size", set_corruption_check_size);
void __init setup_bios_corruption_check(void)
{
- u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */
+ phys_addr_t start, end;
+ u64 i;
if (memory_corruption_check == -1) {
memory_corruption_check =
@@ -82,28 +83,23 @@ void __init setup_bios_corruption_check(void)
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
- while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
- u64 size;
- addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
+ for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
+ PAGE_SIZE, corruption_check_size);
+ end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
+ PAGE_SIZE, corruption_check_size);
+ if (start >= end)
+ continue;
- if (addr == MEMBLOCK_ERROR)
- break;
-
- if (addr >= corruption_check_size)
- break;
-
- if ((addr + size) > corruption_check_size)
- size = corruption_check_size - addr;
-
- memblock_x86_reserve_range(addr, addr + size, "SCAN RAM");
- scan_areas[num_scan_areas].addr = addr;
- scan_areas[num_scan_areas].size = size;
- num_scan_areas++;
+ memblock_reserve(start, end - start);
+ scan_areas[num_scan_areas].addr = start;
+ scan_areas[num_scan_areas].size = end - start;
/* Assume we've already mapped this early memory */
- memset(__va(addr), 0, size);
+ memset(__va(start), 0, end - start);
- addr += size;
+ if (++num_scan_areas >= MAX_SCAN_AREAS)
+ break;
}
if (num_scan_areas)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c7e46cb3532..f4773f4aae3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -148,7 +148,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -192,7 +191,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
valid_k7:
;
-#endif
}
static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -353,6 +351,13 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
if (node == NUMA_NO_NODE)
node = per_cpu(cpu_llc_id, cpu);
+ /*
+ * If core numbers are inconsistent, it's likely a multi-fabric platform,
+ * so invoke platform-specific handler
+ */
+ if (c->phys_proc_id != node)
+ x86_cpuinit.fixup_cpu_id(c, node);
+
if (!node_online(node)) {
/*
* Two possibilities here:
@@ -442,8 +447,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
- u32 dummy;
-
early_init_amd_mc(c);
/*
@@ -473,12 +476,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
-
- rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
+ u32 dummy;
+
#ifdef CONFIG_SMP
unsigned long long value;
@@ -657,6 +660,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
}
}
+
+ rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e58d978e075..159103c0b1f 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -278,7 +278,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
}
#ifdef CONFIG_X86_32
/* Cyrix III family needs CX8 & PGE explicitly enabled. */
- if (c->x86_model >= 6 && c->x86_model <= 9) {
+ if (c->x86_model >= 6 && c->x86_model <= 13) {
rdmsr(MSR_VIA_FCR, lo, hi);
lo |= (1<<1 | 1<<7);
wrmsr(MSR_VIA_FCR, lo, hi);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index aa003b13a83..850f2963a42 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -676,9 +676,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
-#ifdef CONFIG_SMP
c->cpu_index = 0;
-#endif
filter_cpuid_features(c, false);
setup_smep(c);
@@ -764,10 +762,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
c->apicid = c->initial_apicid;
# endif
#endif
-
-#ifdef CONFIG_X86_HT
c->phys_proc_id = c->initial_apicid;
-#endif
}
setup_smep(c);
@@ -1141,6 +1136,15 @@ static void dbg_restore_debug_regs(void)
#endif /* ! CONFIG_KGDB */
/*
+ * Prints an error where the NUMA and configured core-number mismatch and the
+ * platform didn't override this to fix it up
+ */
+void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
+{
+ pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
+}
+
+/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 1b22dcc51af..8bacc7826fb 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -1,5 +1,4 @@
#ifndef ARCH_X86_CPU_H
-
#define ARCH_X86_CPU_H
struct cpu_model_info {
@@ -35,6 +34,4 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
-extern void get_cpu_cap(struct cpuinfo_x86 *c);
-
-#endif
+#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 523131213f0..3e6ff6cbf42 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -181,7 +181,6 @@ static void __cpuinit trap_init_f00f_bug(void)
static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -198,7 +197,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
"with B stepping processors.\n");
}
-#endif
}
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a3b0811693c..6b45e5e7a90 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -844,8 +844,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
#include <linux/kobject.h>
#include <linux/sysfs.h>
-
-extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
+#include <linux/cpu.h>
/* pointer to kobject for cpuX/cache */
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
@@ -1073,9 +1072,9 @@ err_out:
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+static int __cpuinit cache_add_dev(struct device *dev)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
struct _cpuid4_info *this_leaf;
@@ -1087,7 +1086,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
- &sys_dev->kobj, "%s", "cache");
+ &dev->kobj, "%s", "cache");
if (retval < 0) {
cpuid4_cache_sysfs_exit(cpu);
return retval;
@@ -1124,9 +1123,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return 0;
}
-static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+static void __cpuinit cache_remove_dev(struct device *dev)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
unsigned long i;
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
@@ -1145,17 +1144,17 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
+ dev = get_cpu_device(cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- cache_add_dev(sys_dev);
+ cache_add_dev(dev);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- cache_remove_dev(sys_dev);
+ cache_remove_dev(dev);
break;
}
return NOTIFY_OK;
@@ -1174,9 +1173,9 @@ static int __cpuinit cache_sysfs_init(void)
for_each_online_cpu(i) {
int err;
- struct sys_device *sys_dev = get_cpu_sysdev(i);
+ struct device *dev = get_cpu_device(i);
- err = cache_add_dev(sys_dev);
+ err = cache_add_dev(dev);
if (err)
return err;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 319882ef848..fc4beb39357 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fs.h>
+#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/notifier.h>
#include <linux/kdebug.h>
@@ -92,6 +93,18 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
return NMI_HANDLED;
}
+static void mce_irq_ipi(void *info)
+{
+ int cpu = smp_processor_id();
+ struct mce *m = &__get_cpu_var(injectm);
+
+ if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
+ m->inject_flags & MCJ_EXCEPTION) {
+ cpumask_clear_cpu(cpu, mce_inject_cpumask);
+ raise_exception(m, NULL);
+ }
+}
+
/* Inject mce on current CPU */
static int raise_local(void)
{
@@ -139,9 +152,10 @@ static void raise_mce(struct mce *m)
return;
#ifdef CONFIG_X86_LOCAL_APIC
- if (m->inject_flags & MCJ_NMI_BROADCAST) {
+ if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
unsigned long start;
int cpu;
+
get_online_cpus();
cpumask_copy(mce_inject_cpumask, cpu_online_mask);
cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
@@ -151,13 +165,25 @@ static void raise_mce(struct mce *m)
MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
cpumask_clear_cpu(cpu, mce_inject_cpumask);
}
- if (!cpumask_empty(mce_inject_cpumask))
- apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
+ if (!cpumask_empty(mce_inject_cpumask)) {
+ if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
+ /*
+ * don't wait because mce_irq_ipi is necessary
+ * to be sync with following raise_local
+ */
+ preempt_disable();
+ smp_call_function_many(mce_inject_cpumask,
+ mce_irq_ipi, NULL, 0);
+ preempt_enable();
+ } else if (m->inject_flags & MCJ_NMI_BROADCAST)
+ apic->send_IPI_mask(mce_inject_cpumask,
+ NMI_VECTOR);
+ }
start = jiffies;
while (!cpumask_empty(mce_inject_cpumask)) {
if (!time_before(jiffies, start + 2*HZ)) {
printk(KERN_ERR
- "Timeout waiting for mce inject NMI %lx\n",
+ "Timeout waiting for mce inject %lx\n",
*cpumask_bits(mce_inject_cpumask));
break;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index fefcc69ee8b..ed44c8a6585 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,4 +1,4 @@
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <asm/mce.h>
enum severity_level {
@@ -17,7 +17,7 @@ enum severity_level {
struct mce_bank {
u64 ctl; /* subevents to enable */
unsigned char init; /* initialise bank? */
- struct sysdev_attribute attr; /* sysdev attribute */
+ struct device_attribute attr; /* device attribute */
char attrname[ATTR_LEN]; /* attribute name */
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2af127d4c3d..f22a9f7f639 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/string.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/ctype.h>
@@ -95,13 +95,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
static DEFINE_PER_CPU(struct mce, mces_seen);
static int cpu_missing;
-/*
- * CPU/chipset specific EDAC code can register a notifier call here to print
- * MCE errors in a human-readable form.
- */
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
-EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
-
/* MCA banks polled by the period polling timer for corrected events */
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
@@ -109,6 +102,12 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
static DEFINE_PER_CPU(struct work_struct, mce_work);
+/*
+ * CPU/chipset specific EDAC code can register a notifier call here to print
+ * MCE errors in a human-readable form.
+ */
+ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
{
@@ -119,9 +118,7 @@ void mce_setup(struct mce *m)
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
-#ifdef CONFIG_SMP
m->socketid = cpu_data(m->extcpu).phys_proc_id;
-#endif
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
}
@@ -190,6 +187,57 @@ void mce_log(struct mce *mce)
set_bit(0, &mce_need_notify);
}
+static void drain_mcelog_buffer(void)
+{
+ unsigned int next, i, prev = 0;
+
+ next = rcu_dereference_check_mce(mcelog.next);
+
+ do {
+ struct mce *m;
+
+ /* drain what was logged during boot */
+ for (i = prev; i < next; i++) {
+ unsigned long start = jiffies;
+ unsigned retries = 1;
+
+ m = &mcelog.entry[i];
+
+ while (!m->finished) {
+ if (time_after_eq(jiffies, start + 2*retries))
+ retries++;
+
+ cpu_relax();
+
+ if (!m->finished && retries >= 4) {
+ pr_err("MCE: skipping error being logged currently!\n");
+ break;
+ }
+ }
+ smp_rmb();
+ atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
+ }
+
+ memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
+ prev = next;
+ next = cmpxchg(&mcelog.next, prev, 0);
+ } while (next != prev);
+}
+
+
+void mce_register_decode_chain(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+ drain_mcelog_buffer();
+}
+EXPORT_SYMBOL_GPL(mce_register_decode_chain);
+
+void mce_unregister_decode_chain(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
+}
+EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
+
static void print_mce(struct mce *m)
{
int ret = 0;
@@ -1770,7 +1818,7 @@ static struct syscore_ops mce_syscore_ops = {
};
/*
- * mce_sysdev: Sysfs support
+ * mce_device: Sysfs support
*/
static void mce_cpu_restart(void *data)
@@ -1806,27 +1854,28 @@ static void mce_enable_ce(void *all)
__mcheck_cpu_init_timer();
}
-static struct sysdev_class mce_sysdev_class = {
+static struct bus_type mce_subsys = {
.name = "machinecheck",
+ .dev_name = "machinecheck",
};
-DEFINE_PER_CPU(struct sys_device, mce_sysdev);
+DEFINE_PER_CPU(struct device, mce_device);
__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
-static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
+static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
{
return container_of(attr, struct mce_bank, attr);
}
-static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t show_bank(struct device *s, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
}
-static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t set_bank(struct device *s, struct device_attribute *attr,
const char *buf, size_t size)
{
u64 new;
@@ -1841,14 +1890,14 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
}
static ssize_t
-show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
+show_trigger(struct device *s, struct device_attribute *attr, char *buf)
{
strcpy(buf, mce_helper);
strcat(buf, "\n");
return strlen(mce_helper) + 1;
}
-static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
+static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
const char *buf, size_t siz)
{
char *p;
@@ -1863,8 +1912,8 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
return strlen(mce_helper) + !!p;
}
-static ssize_t set_ignore_ce(struct sys_device *s,
- struct sysdev_attribute *attr,
+static ssize_t set_ignore_ce(struct device *s,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
u64 new;
@@ -1887,8 +1936,8 @@ static ssize_t set_ignore_ce(struct sys_device *s,
return size;
}
-static ssize_t set_cmci_disabled(struct sys_device *s,
- struct sysdev_attribute *attr,
+static ssize_t set_cmci_disabled(struct device *s,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
u64 new;
@@ -1910,108 +1959,107 @@ static ssize_t set_cmci_disabled(struct sys_device *s,
return size;
}
-static ssize_t store_int_with_restart(struct sys_device *s,
- struct sysdev_attribute *attr,
+static ssize_t store_int_with_restart(struct device *s,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
- ssize_t ret = sysdev_store_int(s, attr, buf, size);
+ ssize_t ret = device_store_int(s, attr, buf, size);
mce_restart();
return ret;
}
-static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
-static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
-static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
-static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
+static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
+static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
+static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
+static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
-static struct sysdev_ext_attribute attr_check_interval = {
- _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
- store_int_with_restart),
+static struct dev_ext_attribute dev_attr_check_interval = {
+ __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
&check_interval
};
-static struct sysdev_ext_attribute attr_ignore_ce = {
- _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
+static struct dev_ext_attribute dev_attr_ignore_ce = {
+ __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
&mce_ignore_ce
};
-static struct sysdev_ext_attribute attr_cmci_disabled = {
- _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
+static struct dev_ext_attribute dev_attr_cmci_disabled = {
+ __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
&mce_cmci_disabled
};
-static struct sysdev_attribute *mce_sysdev_attrs[] = {
- &attr_tolerant.attr,
- &attr_check_interval.attr,
- &attr_trigger,
- &attr_monarch_timeout.attr,
- &attr_dont_log_ce.attr,
- &attr_ignore_ce.attr,
- &attr_cmci_disabled.attr,
+static struct device_attribute *mce_device_attrs[] = {
+ &dev_attr_tolerant.attr,
+ &dev_attr_check_interval.attr,
+ &dev_attr_trigger,
+ &dev_attr_monarch_timeout.attr,
+ &dev_attr_dont_log_ce.attr,
+ &dev_attr_ignore_ce.attr,
+ &dev_attr_cmci_disabled.attr,
NULL
};
-static cpumask_var_t mce_sysdev_initialized;
+static cpumask_var_t mce_device_initialized;
-/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
-static __cpuinit int mce_sysdev_create(unsigned int cpu)
+/* Per cpu device init. All of the cpus still share the same ctrl bank: */
+static __cpuinit int mce_device_create(unsigned int cpu)
{
- struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
+ struct device *dev = &per_cpu(mce_device, cpu);
int err;
int i, j;
if (!mce_available(&boot_cpu_data))
return -EIO;
- memset(&sysdev->kobj, 0, sizeof(struct kobject));
- sysdev->id = cpu;
- sysdev->cls = &mce_sysdev_class;
+ memset(&dev->kobj, 0, sizeof(struct kobject));
+ dev->id = cpu;
+ dev->bus = &mce_subsys;
- err = sysdev_register(sysdev);
+ err = device_register(dev);
if (err)
return err;
- for (i = 0; mce_sysdev_attrs[i]; i++) {
- err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
+ for (i = 0; mce_device_attrs[i]; i++) {
+ err = device_create_file(dev, mce_device_attrs[i]);
if (err)
goto error;
}
for (j = 0; j < banks; j++) {
- err = sysdev_create_file(sysdev, &mce_banks[j].attr);
+ err = device_create_file(dev, &mce_banks[j].attr);
if (err)
goto error2;
}
- cpumask_set_cpu(cpu, mce_sysdev_initialized);
+ cpumask_set_cpu(cpu, mce_device_initialized);
return 0;
error2:
while (--j >= 0)
- sysdev_remove_file(sysdev, &mce_banks[j].attr);
+ device_remove_file(dev, &mce_banks[j].attr);
error:
while (--i >= 0)
- sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
+ device_remove_file(dev, mce_device_attrs[i]);
- sysdev_unregister(sysdev);
+ device_unregister(dev);
return err;
}
-static __cpuinit void mce_sysdev_remove(unsigned int cpu)
+static __cpuinit void mce_device_remove(unsigned int cpu)
{
- struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
+ struct device *dev = &per_cpu(mce_device, cpu);
int i;
- if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
+ if (!cpumask_test_cpu(cpu, mce_device_initialized))
return;
- for (i = 0; mce_sysdev_attrs[i]; i++)
- sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
+ for (i = 0; mce_device_attrs[i]; i++)
+ device_remove_file(dev, mce_device_attrs[i]);
for (i = 0; i < banks; i++)
- sysdev_remove_file(sysdev, &mce_banks[i].attr);
+ device_remove_file(dev, &mce_banks[i].attr);
- sysdev_unregister(sysdev);
- cpumask_clear_cpu(cpu, mce_sysdev_initialized);
+ device_unregister(dev);
+ cpumask_clear_cpu(cpu, mce_device_initialized);
}
/* Make sure there are no machine checks on offlined CPUs. */
@@ -2061,7 +2109,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- mce_sysdev_create(cpu);
+ mce_device_create(cpu);
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
break;
@@ -2069,7 +2117,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD_FROZEN:
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
- mce_sysdev_remove(cpu);
+ mce_device_remove(cpu);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -2103,7 +2151,7 @@ static __init void mce_init_banks(void)
for (i = 0; i < banks; i++) {
struct mce_bank *b = &mce_banks[i];
- struct sysdev_attribute *a = &b->attr;
+ struct device_attribute *a = &b->attr;
sysfs_attr_init(&a->attr);
a->attr.name = b->attrname;
@@ -2123,16 +2171,16 @@ static __init int mcheck_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
- zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
+ zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
mce_init_banks();
- err = sysdev_class_register(&mce_sysdev_class);
+ err = subsys_system_register(&mce_subsys, NULL);
if (err)
return err;
for_each_online_cpu(i) {
- err = mce_sysdev_create(i);
+ err = mce_device_create(i);
if (err)
return err;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index f5474218cff..ba0b94a7e20 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -17,7 +17,6 @@
#include <linux/notifier.h>
#include <linux/kobject.h>
#include <linux/percpu.h>
-#include <linux/sysdev.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sysfs.h>
@@ -64,11 +63,9 @@ struct threshold_bank {
};
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
-#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
};
-#endif
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
@@ -202,10 +199,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!block)
per_cpu(bank_map, cpu) |= (1 << bank);
-#ifdef CONFIG_SMP
if (shared_bank[bank] && c->cpu_core_id)
break;
-#endif
+
offset = setup_APIC_mce(offset,
(high & MASK_LVTOFF_HI) >> 20);
@@ -531,7 +527,6 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
sprintf(name, "threshold_bank%i", bank);
-#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = cpumask_first(cpu_llc_shared_mask(cpu));
@@ -548,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (!b)
goto out;
- err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj,
+ err = sysfs_create_link(&per_cpu(mce_device, cpu).kobj,
b->kobj, name);
if (err)
goto out;
@@ -558,7 +553,6 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
-#endif
b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
@@ -571,7 +565,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
- b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj);
+ b->kobj = kobject_create_and_add(name, &per_cpu(mce_device, cpu).kobj);
if (!b->kobj)
goto out_free;
@@ -591,7 +585,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (i == cpu)
continue;
- err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj,
+ err = sysfs_create_link(&per_cpu(mce_device, i).kobj,
b->kobj, name);
if (err)
goto out;
@@ -669,7 +663,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#ifdef CONFIG_SMP
/* sibling symlink */
if (shared_bank[bank] && b->blocks->cpu != cpu) {
- sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_device, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
@@ -681,7 +675,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
if (i == cpu)
continue;
- sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_device, i).kobj, name);
per_cpu(threshold_banks, i)[bank] = NULL;
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 787e06c84ea..67bb17a37a0 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/export.h>
-#include <linux/sysdev.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/smp.h>
@@ -69,16 +68,16 @@ static atomic_t therm_throt_en = ATOMIC_INIT(0);
static u32 lvtthmr_init __read_mostly;
#ifdef CONFIG_SYSFS
-#define define_therm_throt_sysdev_one_ro(_name) \
- static SYSDEV_ATTR(_name, 0444, \
- therm_throt_sysdev_show_##_name, \
+#define define_therm_throt_device_one_ro(_name) \
+ static DEVICE_ATTR(_name, 0444, \
+ therm_throt_device_show_##_name, \
NULL) \
-#define define_therm_throt_sysdev_show_func(event, name) \
+#define define_therm_throt_device_show_func(event, name) \
\
-static ssize_t therm_throt_sysdev_show_##event##_##name( \
- struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t therm_throt_device_show_##event##_##name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
@@ -95,20 +94,20 @@ static ssize_t therm_throt_sysdev_show_##event##_##name( \
return ret; \
}
-define_therm_throt_sysdev_show_func(core_throttle, count);
-define_therm_throt_sysdev_one_ro(core_throttle_count);
+define_therm_throt_device_show_func(core_throttle, count);
+define_therm_throt_device_one_ro(core_throttle_count);
-define_therm_throt_sysdev_show_func(core_power_limit, count);
-define_therm_throt_sysdev_one_ro(core_power_limit_count);
+define_therm_throt_device_show_func(core_power_limit, count);
+define_therm_throt_device_one_ro(core_power_limit_count);
-define_therm_throt_sysdev_show_func(package_throttle, count);
-define_therm_throt_sysdev_one_ro(package_throttle_count);
+define_therm_throt_device_show_func(package_throttle, count);
+define_therm_throt_device_one_ro(package_throttle_count);
-define_therm_throt_sysdev_show_func(package_power_limit, count);
-define_therm_throt_sysdev_one_ro(package_power_limit_count);
+define_therm_throt_device_show_func(package_power_limit, count);
+define_therm_throt_device_one_ro(package_power_limit_count);
static struct attribute *thermal_throttle_attrs[] = {
- &attr_core_throttle_count.attr,
+ &dev_attr_core_throttle_count.attr,
NULL
};
@@ -223,36 +222,36 @@ static int thresh_event_valid(int event)
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+static __cpuinit int thermal_throttle_add_dev(struct device *dev,
unsigned int cpu)
{
int err;
struct cpuinfo_x86 *c = &cpu_data(cpu);
- err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
+ err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
if (err)
return err;
if (cpu_has(c, X86_FEATURE_PLN))
- err = sysfs_add_file_to_group(&sys_dev->kobj,
- &attr_core_power_limit_count.attr,
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_core_power_limit_count.attr,
thermal_attr_group.name);
if (cpu_has(c, X86_FEATURE_PTS)) {
- err = sysfs_add_file_to_group(&sys_dev->kobj,
- &attr_package_throttle_count.attr,
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_throttle_count.attr,
thermal_attr_group.name);
if (cpu_has(c, X86_FEATURE_PLN))
- err = sysfs_add_file_to_group(&sys_dev->kobj,
- &attr_package_power_limit_count.attr,
+ err = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_package_power_limit_count.attr,
thermal_attr_group.name);
}
return err;
}
-static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
+static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
{
- sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group);
+ sysfs_remove_group(&dev->kobj, &thermal_attr_group);
}
/* Mutex protecting device creation against CPU hotplug: */
@@ -265,16 +264,16 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
int err = 0;
- sys_dev = get_cpu_sysdev(cpu);
+ dev = get_cpu_device(cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(sys_dev, cpu);
+ err = thermal_throttle_add_dev(dev, cpu);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
@@ -283,7 +282,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mutex_lock(&therm_cpu_lock);
- thermal_throttle_remove_dev(sys_dev);
+ thermal_throttle_remove_dev(dev);
mutex_unlock(&therm_cpu_lock);
break;
}
@@ -310,7 +309,7 @@ static __init int thermal_throttle_init_device(void)
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
+ err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -323,17 +322,6 @@ device_initcall(thermal_throttle_init_device);
#endif /* CONFIG_SYSFS */
-/*
- * Set up the most two significant bit to notify mce log that this thermal
- * event type.
- * This is a temp solution. May be changed in the future with mce log
- * infrasture.
- */
-#define CORE_THROTTLED (0)
-#define CORE_POWER_LIMIT ((__u64)1 << 62)
-#define PACKAGE_THROTTLED ((__u64)2 << 62)
-#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
-
static void notify_thresholds(__u64 msr_val)
{
/* check whether the interrupt handler is defined;
@@ -363,27 +351,23 @@ static void intel_thermal_interrupt(void)
if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
THERMAL_THROTTLING_EVENT,
CORE_LEVEL) != 0)
- mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
+ mce_log_therm_throt_event(msr_val);
if (this_cpu_has(X86_FEATURE_PLN))
- if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
- CORE_LEVEL) != 0)
- mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
+ CORE_LEVEL);
if (this_cpu_has(X86_FEATURE_PTS)) {
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
- if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
THERMAL_THROTTLING_EVENT,
- PACKAGE_LEVEL) != 0)
- mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
+ PACKAGE_LEVEL);
if (this_cpu_has(X86_FEATURE_PLN))
- if (therm_throt_process(msr_val &
+ therm_throt_process(msr_val &
PACKAGE_THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
- PACKAGE_LEVEL) != 0)
- mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
- | msr_val);
+ PACKAGE_LEVEL);
}
}
@@ -397,8 +381,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
{
- exit_idle();
irq_enter();
+ exit_idle();
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
irq_exit();
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index d746df2909c..aa578cadb94 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt;
asmlinkage void smp_threshold_interrupt(void)
{
- exit_idle();
irq_enter();
+ exit_idle();
inc_irq_stat(irq_threshold_count);
mce_threshold_vector();
irq_exit();
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index a71efcdbb09..97b26356e9e 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
if (tmp != mask_lo) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND);
mask_lo = tmp;
}
}
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+ wbinvd();
}
static void post_set(void) __releases(set_atomicity_lock)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 640891014b2..5adce1040b1 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
return -EOPNOTSUPP;
}
- /*
- * Do not allow config1 (extended registers) to propagate,
- * there's no sane user-space generalization yet:
- */
if (attr->type == PERF_TYPE_RAW)
- return 0;
+ return x86_pmu_extra_regs(event->attr.config, event);
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
@@ -488,18 +484,195 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
+/*
+ * Event scheduler state:
+ *
+ * Assign events iterating over all events and counters, beginning
+ * with events with least weights first. Keep the current iterator
+ * state in struct sched_state.
+ */
+struct sched_state {
+ int weight;
+ int event; /* event index */
+ int counter; /* counter index */
+ int unassigned; /* number of events to be assigned left */
+ unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+};
+
+/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
+#define SCHED_STATES_MAX 2
+
+struct perf_sched {
+ int max_weight;
+ int max_events;
+ struct event_constraint **constraints;
+ struct sched_state state;
+ int saved_states;
+ struct sched_state saved[SCHED_STATES_MAX];
+};
+
+/*
+ * Initialize interator that runs through all events and counters.
+ */
+static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
+ int num, int wmin, int wmax)
+{
+ int idx;
+
+ memset(sched, 0, sizeof(*sched));
+ sched->max_events = num;
+ sched->max_weight = wmax;
+ sched->constraints = c;
+
+ for (idx = 0; idx < num; idx++) {
+ if (c[idx]->weight == wmin)
+ break;
+ }
+
+ sched->state.event = idx; /* start with min weight */
+ sched->state.weight = wmin;
+ sched->state.unassigned = num;
+}
+
+static void perf_sched_save_state(struct perf_sched *sched)
+{
+ if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
+ return;
+
+ sched->saved[sched->saved_states] = sched->state;
+ sched->saved_states++;
+}
+
+static bool perf_sched_restore_state(struct perf_sched *sched)
+{
+ if (!sched->saved_states)
+ return false;
+
+ sched->saved_states--;
+ sched->state = sched->saved[sched->saved_states];
+
+ /* continue with next counter: */
+ clear_bit(sched->state.counter++, sched->state.used);
+
+ return true;
+}
+
+/*
+ * Select a counter for the current event to schedule. Return true on
+ * success.
+ */
+static bool __perf_sched_find_counter(struct perf_sched *sched)
+{
+ struct event_constraint *c;
+ int idx;
+
+ if (!sched->state.unassigned)
+ return false;
+
+ if (sched->state.event >= sched->max_events)
+ return false;
+
+ c = sched->constraints[sched->state.event];
+
+ /* Prefer fixed purpose counters */
+ if (x86_pmu.num_counters_fixed) {
+ idx = X86_PMC_IDX_FIXED;
+ for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) {
+ if (!__test_and_set_bit(idx, sched->state.used))
+ goto done;
+ }
+ }
+ /* Grab the first unused counter starting with idx */
+ idx = sched->state.counter;
+ for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+ if (!__test_and_set_bit(idx, sched->state.used))
+ goto done;
+ }
+
+ return false;
+
+done:
+ sched->state.counter = idx;
+
+ if (c->overlap)
+ perf_sched_save_state(sched);
+
+ return true;
+}
+
+static bool perf_sched_find_counter(struct perf_sched *sched)
+{
+ while (!__perf_sched_find_counter(sched)) {
+ if (!perf_sched_restore_state(sched))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Go through all unassigned events and find the next one to schedule.
+ * Take events with the least weight first. Return true on success.
+ */
+static bool perf_sched_next_event(struct perf_sched *sched)
+{
+ struct event_constraint *c;
+
+ if (!sched->state.unassigned || !--sched->state.unassigned)
+ return false;
+
+ do {
+ /* next event */
+ sched->state.event++;
+ if (sched->state.event >= sched->max_events) {
+ /* next weight */
+ sched->state.event = 0;
+ sched->state.weight++;
+ if (sched->state.weight > sched->max_weight)
+ return false;
+ }
+ c = sched->constraints[sched->state.event];
+ } while (c->weight != sched->state.weight);
+
+ sched->state.counter = 0; /* start with first counter */
+
+ return true;
+}
+
+/*
+ * Assign a counter for each event.
+ */
+static int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign)
+{
+ struct perf_sched sched;
+
+ perf_sched_init(&sched, constraints, n, wmin, wmax);
+
+ do {
+ if (!perf_sched_find_counter(&sched))
+ break; /* failed */
+ if (assign)
+ assign[sched.state.event] = sched.state.counter;
+ } while (perf_sched_next_event(&sched));
+
+ return sched.state.unassigned;
+}
+
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int i, j, w, wmax, num = 0;
+ int i, wmin, wmax, num = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
- for (i = 0; i < n; i++) {
+ for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
constraints[i] = c;
+ wmin = min(wmin, c->weight);
+ wmax = max(wmax, c->weight);
}
/*
@@ -525,59 +698,11 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (assign)
assign[i] = hwc->idx;
}
- if (i == n)
- goto done;
-
- /*
- * begin slow path
- */
-
- bitmap_zero(used_mask, X86_PMC_IDX_MAX);
-
- /*
- * weight = number of possible counters
- *
- * 1 = most constrained, only works on one counter
- * wmax = least constrained, works on any counter
- *
- * assign events to counters starting with most
- * constrained events.
- */
- wmax = x86_pmu.num_counters;
-
- /*
- * when fixed event counters are present,
- * wmax is incremented by 1 to account
- * for one more choice
- */
- if (x86_pmu.num_counters_fixed)
- wmax++;
-
- for (w = 1, num = n; num && w <= wmax; w++) {
- /* for each event */
- for (i = 0; num && i < n; i++) {
- c = constraints[i];
- hwc = &cpuc->event_list[i]->hw;
-
- if (c->weight != w)
- continue;
- for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
- if (!test_bit(j, used_mask))
- break;
- }
-
- if (j == X86_PMC_IDX_MAX)
- break;
+ /* slow path */
+ if (i != n)
+ num = perf_assign_events(constraints, n, wmin, wmax, assign);
- __set_bit(j, used_mask);
-
- if (assign)
- assign[i] = j;
- num--;
- }
- }
-done:
/*
* scheduling failed or is just a simulation,
* free resources if necessary
@@ -588,7 +713,7 @@ done:
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
/*
@@ -607,7 +732,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
if (is_x86_event(leader)) {
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = leader;
n++;
}
@@ -620,7 +745,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
continue;
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = event;
n++;
@@ -1123,6 +1248,7 @@ static void __init pmu_check_apic(void)
static int __init init_hw_perf_events(void)
{
+ struct x86_pmu_quirk *quirk;
struct event_constraint *c;
int err;
@@ -1151,8 +1277,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.quirks)
- x86_pmu.quirks();
+ for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
+ quirk->func();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
@@ -1175,12 +1301,18 @@ static int __init init_hw_perf_events(void)
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
- 0, x86_pmu.num_counters);
+ 0, x86_pmu.num_counters, 0);
if (x86_pmu.event_constraints) {
+ /*
+ * event on fixed counter2 (REF_CYCLES) only works on this
+ * counter, so do not extend mask to generic counters
+ */
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != X86_RAW_EVENT_MASK)
+ if (c->cmask != X86_RAW_EVENT_MASK
+ || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
continue;
+ }
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
@@ -1316,7 +1448,7 @@ static int validate_event(struct perf_event *event)
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
- ret = -ENOSPC;
+ ret = -EINVAL;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1473,7 @@ static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
- int ret = -ENOSPC, n;
+ int ret = -EINVAL, n;
fake_cpuc = allocate_fake_cpuc();
if (IS_ERR(fake_cpuc))
@@ -1570,3 +1702,15 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
return misc;
}
+
+void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+ cap->version = x86_pmu.version;
+ cap->num_counters_gp = x86_pmu.num_counters;
+ cap->num_counters_fixed = x86_pmu.num_counters_fixed;
+ cap->bit_width_gp = x86_pmu.cntval_bits;
+ cap->bit_width_fixed = x86_pmu.cntval_bits;
+ cap->events_mask = (unsigned int)x86_pmu.events_maskl;
+ cap->events_mask_len = x86_pmu.events_mask_len;
+}
+EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index b9698d40ac4..8944062f46e 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -45,6 +45,7 @@ struct event_constraint {
u64 code;
u64 cmask;
int weight;
+ int overlap;
};
struct amd_nb {
@@ -151,15 +152,40 @@ struct cpu_hw_events {
void *kfree_on_online;
};
-#define __EVENT_CONSTRAINT(c, n, m, w) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
+ .overlap = (o), \
}
#define EVENT_CONSTRAINT(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
+
+/*
+ * The overlap flag marks event constraints with overlapping counter
+ * masks. This is the case if the counter mask of such an event is not
+ * a subset of any other counter mask of a constraint with an equal or
+ * higher weight, e.g.:
+ *
+ * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
+ * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
+ * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
+ *
+ * The event scheduler may not select the correct counter in the first
+ * cycle because it needs to know which subsequent events will be
+ * scheduled. It may fail to schedule the events then. So we set the
+ * overlap flag for such constraints to give the scheduler a hint which
+ * events to select for counter rescheduling.
+ *
+ * Care must be taken as the rescheduling algorithm is O(n!) which
+ * will increase scheduling cycles for an over-commited system
+ * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
+ * and its counter masks must be kept at a minimum.
+ */
+#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
/*
* Constraint on the Event code.
@@ -235,6 +261,11 @@ union perf_capabilities {
u64 capabilities;
};
+struct x86_pmu_quirk {
+ struct x86_pmu_quirk *next;
+ void (*func)(void);
+};
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -259,6 +290,11 @@ struct x86_pmu {
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
+ union {
+ unsigned long events_maskl;
+ unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
+ };
+ int events_mask_len;
int apic;
u64 max_period;
struct event_constraint *
@@ -268,7 +304,7 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
- void (*quirks)(void);
+ struct x86_pmu_quirk *quirks;
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
@@ -309,6 +345,15 @@ struct x86_pmu {
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
+#define x86_add_quirk(func_) \
+do { \
+ static struct x86_pmu_quirk __quirk __initdata = { \
+ .func = func_, \
+ }; \
+ __quirk.next = x86_pmu.quirks; \
+ x86_pmu.quirks = &__quirk; \
+} while (0)
+
#define ERF_NO_HT_SHARING 1
#define ERF_HAS_RSP_1 2
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index aeefd45697a..0397b23be8e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
-static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
+static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index ab6343d2182..3b8a2d30d14 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
goto out;
}
- pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
- pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+ pr_info("IBS: LVT offset %d assigned\n", offset);
return 0;
out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
static __init int amd_ibs_init(void)
{
u32 caps;
- int ret;
+ int ret = -EINVAL;
caps = __get_ibs_caps();
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- if (!ibs_eilvt_valid()) {
- ret = force_ibs_eilvt_setup();
- if (ret) {
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
- }
- }
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
get_online_cpus();
ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
smp_call_function(setup_APIC_ibs, NULL, 1);
put_online_cpus();
- return perf_event_ibs_init();
+ ret = perf_event_ibs_init();
+out:
+ if (ret)
+ pr_err("Failed to setup IBS, %d\n", ret);
+ return ret;
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2be5ebe9987..3bd37bdf1b8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -28,6 +28,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -45,12 +46,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /*
- * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
- * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
- * ratio between these counters.
- */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
@@ -68,7 +64,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
@@ -90,7 +86,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
@@ -102,7 +98,7 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
@@ -125,7 +121,7 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
@@ -1169,7 +1165,7 @@ again:
*/
c = &unconstrained;
} else if (intel_try_alt_er(event, orig_idx)) {
- raw_spin_unlock(&era->lock);
+ raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
}
raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1519,7 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs,
};
-static void intel_clovertown_quirks(void)
+static __init void intel_clovertown_quirk(void)
{
/*
* PEBS is unreliable due to:
@@ -1545,12 +1541,60 @@ static void intel_clovertown_quirks(void)
x86_pmu.pebs_constraints = NULL;
}
+static __init void intel_sandybridge_quirk(void)
+{
+ printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ x86_pmu.pebs = 0;
+ x86_pmu.pebs_constraints = NULL;
+}
+
+static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
+ { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
+ { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
+ { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
+ { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
+ { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
+ { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
+ { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
+};
+
+static __init void intel_arch_events_quirk(void)
+{
+ int bit;
+
+ /* disable event that reported as not presend by cpuid */
+ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
+ intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
+ printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
+ intel_arch_events_map[bit].name);
+ }
+}
+
+static __init void intel_nehalem_quirk(void)
+{
+ union cpuid10_ebx ebx;
+
+ ebx.full = x86_pmu.events_maskl;
+ if (ebx.split.no_branch_misses_retired) {
+ /*
+ * Erratum AAJ80 detected, we work it around by using
+ * the BR_MISP_EXEC.ANY event. This will over-count
+ * branch-misses, but it's still much better than the
+ * architectural event which is often completely bogus:
+ */
+ intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+ ebx.split.no_branch_misses_retired = 0;
+ x86_pmu.events_maskl = ebx.full;
+ printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
+ }
+}
+
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
+ union cpuid10_ebx ebx;
unsigned int unused;
- unsigned int ebx;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -1567,8 +1611,8 @@ __init int intel_pmu_init(void)
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
- cpuid(10, &eax.full, &ebx, &unused, &edx.full);
- if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+ cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+ if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
return -ENODEV;
version = eax.split.version_id;
@@ -1582,6 +1626,9 @@ __init int intel_pmu_init(void)
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.events_maskl = ebx.full;
+ x86_pmu.events_mask_len = eax.split.mask_length;
+
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
@@ -1601,6 +1648,8 @@ __init int intel_pmu_init(void)
intel_ds_init();
+ x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
+
/*
* Install the hw-cache-events table:
*/
@@ -1610,7 +1659,7 @@ __init int intel_pmu_init(void)
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
- x86_pmu.quirks = intel_clovertown_quirks;
+ x86_add_quirk(intel_clovertown_quirk);
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
@@ -1644,17 +1693,8 @@ __init int intel_pmu_init(void)
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
- if (ebx & 0x40) {
- /*
- * Erratum AAJ80 detected, we work it around by using
- * the BR_MISP_EXEC.ANY event. This will over-count
- * branch-misses, but it's still much better than the
- * architectural event which is often completely bogus:
- */
- intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+ x86_add_quirk(intel_nehalem_quirk);
- pr_cont("erratum AAJ80 worked around, ");
- }
pr_cont("Nehalem events, ");
break;
@@ -1694,6 +1734,7 @@ __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
+ x86_add_quirk(intel_sandybridge_quirk);
case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -1730,5 +1771,6 @@ __init int intel_pmu_init(void)
break;
}
}
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index c0d238f49db..73da6b64f5b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
+ int is_64bit = 0;
/*
* We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} else
kaddr = (void *)to;
- kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+ is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+ insn_init(&insn, kaddr, is_64bit);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 492bf1358a7..ef484d9d0a2 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1268,7 +1268,7 @@ reserve:
}
done:
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c
index 5abbea297e0..7b3fe56b1c2 100644
--- a/arch/x86/kernel/cpu/powerflags.c
+++ b/arch/x86/kernel/cpu/powerflags.c
@@ -16,5 +16,6 @@ const char *const x86_power_flags[32] = {
"100mhzsteps",
"hwpstate",
"", /* tsc invariant mapped to constant_tsc */
- /* nothing */
+ "cpb", /* core performance boost */
+ "eff_freq_ro", /* Readonly aperf/mperf */
};
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 14b23140e81..8022c668148 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_x86 *c = v;
- unsigned int cpu = 0;
+ unsigned int cpu;
int i;
-#ifdef CONFIG_SMP
cpu = c->cpu_index;
-#endif
seq_printf(m, "processor\t: %u\n"
"vendor_id\t: %s\n"
"cpu family\t: %d\n"
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 212a6a42527..a524353d93f 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -177,7 +177,7 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier =
.notifier_call = cpuid_class_cpu_callback,
};
-static char *cpuid_devnode(struct device *dev, mode_t *mode)
+static char *cpuid_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
}
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 3b97a80ce32..c99f9ed013d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad EIP value.");
+ printk(KERN_CONT " Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 19853ad8afc..6d728d9284b 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(" Bad RIP value.");
+ printk(KERN_CONT " Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk("<%02x> ", c);
+ printk(KERN_CONT "<%02x> ", c);
else
- printk("%02x ", c);
+ printk(KERN_CONT "%02x ", c);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 303a0e48f07..8071e2f3d6e 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -738,35 +738,17 @@ core_initcall(e820_mark_nvs_memory);
/*
* pre allocated 4k and reserved it in memblock and e820_saved
*/
-u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
+u64 __init early_reserve_e820(u64 size, u64 align)
{
- u64 size = 0;
u64 addr;
- u64 start;
- for (start = startt; ; start += size) {
- start = memblock_x86_find_in_range_size(start, &size, align);
- if (start == MEMBLOCK_ERROR)
- return 0;
- if (size >= sizet)
- break;
+ addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ if (addr) {
+ e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
+ printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+ update_e820_saved();
}
-#ifdef CONFIG_X86_32
- if (start >= MAXMEM)
- return 0;
- if (start + size > MAXMEM)
- size = MAXMEM - start;
-#endif
-
- addr = round_down(start + size - sizet, align);
- if (addr < start)
- return 0;
- memblock_x86_reserve_range(addr, addr + sizet, "new next");
- e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
- update_e820_saved();
-
return addr;
}
@@ -1090,7 +1072,7 @@ void __init memblock_x86_fill(void)
* We are safe to enable resizing, beause memblock_x86_fill()
* is rather later for x86
*/
- memblock_can_resize = 1;
+ memblock_allow_resize();
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
@@ -1105,22 +1087,36 @@ void __init memblock_x86_fill(void)
memblock_add(ei->addr, ei->size);
}
- memblock_analyze();
memblock_dump_all();
}
void __init memblock_find_dma_reserve(void)
{
#ifdef CONFIG_X86_64
- u64 free_size_pfn;
- u64 mem_size_pfn;
+ u64 nr_pages = 0, nr_free_pages = 0;
+ unsigned long start_pfn, end_pfn;
+ phys_addr_t start, end;
+ int i;
+ u64 u;
+
/*
* need to find out used area below MAX_DMA_PFN
* need to use memblock to get free size in [0, MAX_DMA_PFN]
* at first, and assume boot_mem will not take below MAX_DMA_PFN
*/
- mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
- free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
- set_dma_reserve(mem_size_pfn - free_size_pfn);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
+ start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
+ end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
+ nr_pages += end_pfn - start_pfn;
+ }
+
+ for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
+ start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
+ end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
+ if (start_pfn < end_pfn)
+ nr_free_pages += end_pfn - start_pfn;
+ }
+
+ set_dma_reserve(nr_pages - nr_free_pages);
#endif
}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index cd28a350f7f..9d42a52d233 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -247,7 +247,7 @@ static int __init setup_early_printk(char *buf)
}
if (!strncmp(buf, "hsu", 3)) {
- hsu_early_console_init();
+ hsu_early_console_init(buf + 3);
early_console_register(&early_hsu_console, keep);
}
#endif
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f3f6f534400..22d0e21b4dd 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -625,6 +625,8 @@ work_notifysig: # deal with pending signals and
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace_sig
@@ -638,6 +640,8 @@ work_notifysig_v86:
#else
movl %esp, %eax
#endif
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace_sig
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index faf8d5e74b0..a20e1cb9dc8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -221,7 +221,7 @@ ENDPROC(native_usergs_sysret64)
/*CFI_REL_OFFSET ss,0*/
pushq_cfi %rax /* rsp */
CFI_REL_OFFSET rsp,0
- pushq_cfi $X86_EFLAGS_IF /* eflags - interrupts on */
+ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
/*CFI_REL_OFFSET rflags,0*/
pushq_cfi $__KERNEL_CS /* cs */
/*CFI_REL_OFFSET cs,0*/
@@ -411,7 +411,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
- je int_ret_from_sys_call
+ jz retint_restore_args
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
jnz int_ret_from_sys_call
@@ -465,7 +465,7 @@ ENTRY(system_call)
* after the swapgs, so that it can do the swapgs
* for the guest and jump here on syscall.
*/
-ENTRY(system_call_after_swapgs)
+GLOBAL(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -478,8 +478,7 @@ ENTRY(system_call_after_swapgs)
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
- GET_THREAD_INFO(%rcx)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz tracesys
system_call_fastpath:
cmpq $__NR_syscall_max,%rax
@@ -496,10 +495,9 @@ ret_from_sys_call:
/* edi: flagmask */
sysret_check:
LOCKDEP_SYS_EXIT
- GET_THREAD_INFO(%rcx)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- movl TI_flags(%rcx),%edx
+ movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
andl %edi,%edx
jnz sysret_careful
CFI_REMEMBER_STATE
@@ -583,7 +581,7 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz auditsys
#endif
SAVE_REST
@@ -612,8 +610,6 @@ tracesys:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $3,CS-ARGOFFSET(%rsp)
- je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
GLOBAL(int_with_check)
@@ -953,6 +949,7 @@ END(common_interrupt)
ENTRY(\sym)
INTR_FRAME
pushq_cfi $~(\num)
+.Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -976,13 +973,21 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \
x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_SMP
-.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
+ ALIGN
+ INTR_FRAME
+.irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
.if NUM_INVALIDATE_TLB_VECTORS > \idx
-apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
- invalidate_interrupt\idx smp_invalidate_interrupt
+ENTRY(invalidate_interrupt\idx)
+ pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx)
+ jmp .Lcommon_invalidate_interrupt0
+ CFI_ADJUST_CFA_OFFSET -8
+END(invalidate_interrupt\idx)
.endif
.endr
+ CFI_ENDPROC
+apicinterrupt INVALIDATE_TLB_VECTOR_START, \
+ invalidate_interrupt0, smp_invalidate_interrupt
#endif
apicinterrupt THRESHOLD_APIC_VECTOR \
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index af0699ba48c..48d9d4ea102 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+ memblock_reserve(lowmem, 0x100000 - lowmem);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3bb08509a7a..51ff18616d5 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -31,9 +31,8 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
- memblock_init();
-
- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -42,7 +41,7 @@ void __init i386_start_kernel(void)
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5655c2272ad..3a3b779f41d 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,9 +98,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
{
copy_bootdata(__va(real_mode_data));
- memblock_init();
-
- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
@@ -109,7 +108,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
- memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
+ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b946a9eac7d..ad0de0c2714 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -2,7 +2,6 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/export.h>
-#include <linux/sysdev.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i8253.h>
@@ -32,8 +31,6 @@
#define HPET_MIN_CYCLES 128
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
-#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
-
/*
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
@@ -55,6 +52,11 @@ struct hpet_dev {
char name[10];
};
+inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
+{
+ return container_of(evtdev, struct hpet_dev, evt);
+}
+
inline unsigned int hpet_readl(unsigned int a)
{
return readl(hpet_virt_address + a);
@@ -1049,6 +1051,14 @@ int hpet_rtc_timer_init(void)
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+static void hpet_disable_rtc_channel(void)
+{
+ unsigned long cfg;
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+}
+
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
@@ -1060,6 +1070,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
return 0;
hpet_rtc_flags &= ~bit_mask;
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
+
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1138,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, delta;
+ unsigned int delta;
int lost_ints = -1;
- if (unlikely(!hpet_rtc_flags)) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 429e0c92924..7943e0c21bd 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -74,6 +74,10 @@ int arch_show_interrupts(struct seq_file *p, int prec)
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
seq_printf(p, " IRQ work interrupts\n");
+ seq_printf(p, "%*s: ", prec, "RTR");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
+ seq_printf(p, " APIC ICR read retries\n");
#endif
if (x86_platform_ipi_callback) {
seq_printf(p, "%*s: ", prec, "PLT");
@@ -136,6 +140,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += irq_stats(cpu)->irq_spurious_count;
sum += irq_stats(cpu)->apic_perf_irqs;
sum += irq_stats(cpu)->apic_irq_work_irqs;
+ sum += irq_stats(cpu)->icr_read_retry_count;
#endif
if (x86_platform_ipi_callback)
sum += irq_stats(cpu)->x86_platform_ipis;
@@ -181,8 +186,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
unsigned vector = ~regs->orig_ax;
unsigned irq;
- exit_idle();
irq_enter();
+ exit_idle();
irq = __this_cpu_read(vector_irq[vector]);
@@ -209,10 +214,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
ack_APIC_irq();
- exit_idle();
-
irq_enter();
+ exit_idle();
+
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index acf8fbf8fbd..69bca468c47 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#ifdef CONFIG_DEBUG_STACKOVERFLOW
u64 curbase = (u64)task_stack_page(current);
+ if (user_mode_vm(regs))
+ return;
+
WARN_ONCE(regs->sp >= curbase &&
regs->sp <= curbase + THREAD_SIZE &&
regs->sp < curbase + sizeof(struct thread_info) +
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index b3300e6bace..313fb5cddbc 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -9,7 +9,7 @@
#include <linux/kprobes.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index ea9d5f2f13e..2889b3d4388 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -50,7 +50,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
put_online_cpus();
}
-void arch_jump_label_transform_static(struct jump_entry *entry,
+__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
__jump_label_transform(entry, type, text_poke_early);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a9c2116001d..f0c6fd6f176 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -39,8 +39,6 @@
#include <asm/desc.h>
#include <asm/tlbflush.h>
-#define MMU_QUEUE_SIZE 1024
-
static int kvmapf = 1;
static int parse_no_kvmapf(char *arg)
@@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
-struct kvm_para_state {
- u8 mmu_queue[MMU_QUEUE_SIZE];
- int mmu_queue_len;
-};
-
-static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
-static struct kvm_para_state *kvm_para_state(void)
-{
- return &per_cpu(para_state, raw_smp_processor_id());
-}
-
/*
* No need for any "IO delay" on KVM
*/
@@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
}
}
-static void kvm_mmu_op(void *buffer, unsigned len)
-{
- int r;
- unsigned long a1, a2;
-
- do {
- a1 = __pa(buffer);
- a2 = 0; /* on i386 __pa() always returns <4G */
- r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
- buffer += r;
- len -= r;
- } while (len);
-}
-
-static void mmu_queue_flush(struct kvm_para_state *state)
-{
- if (state->mmu_queue_len) {
- kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
- state->mmu_queue_len = 0;
- }
-}
-
-static void kvm_deferred_mmu_op(void *buffer, int len)
-{
- struct kvm_para_state *state = kvm_para_state();
-
- if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
- kvm_mmu_op(buffer, len);
- return;
- }
- if (state->mmu_queue_len + len > sizeof state->mmu_queue)
- mmu_queue_flush(state);
- memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
- state->mmu_queue_len += len;
-}
-
-static void kvm_mmu_write(void *dest, u64 val)
-{
- __u64 pte_phys;
- struct kvm_mmu_op_write_pte wpte;
-
-#ifdef CONFIG_HIGHPTE
- struct page *page;
- unsigned long dst = (unsigned long) dest;
-
- page = kmap_atomic_to_page(dest);
- pte_phys = page_to_pfn(page);
- pte_phys <<= PAGE_SHIFT;
- pte_phys += (dst & ~(PAGE_MASK));
-#else
- pte_phys = (unsigned long)__pa(dest);
-#endif
- wpte.header.op = KVM_MMU_OP_WRITE_PTE;
- wpte.pte_val = val;
- wpte.pte_phys = pte_phys;
-
- kvm_deferred_mmu_op(&wpte, sizeof wpte);
-}
-
-/*
- * We only need to hook operations that are MMU writes. We hook these so that
- * we can use lazy MMU mode to batch these operations. We could probably
- * improve the performance of the host code if we used some of the information
- * here to simplify processing of batched writes.
- */
-static void kvm_set_pte(pte_t *ptep, pte_t pte)
-{
- kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
-{
- kvm_mmu_write(pmdp, pmd_val(pmd));
-}
-
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
-static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
- kvm_mmu_write(ptep, pte_val(pte));
-}
-
-static void kvm_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- kvm_mmu_write(ptep, 0);
-}
-
-static void kvm_pmd_clear(pmd_t *pmdp)
-{
- kvm_mmu_write(pmdp, 0);
-}
-#endif
-
-static void kvm_set_pud(pud_t *pudp, pud_t pud)
-{
- kvm_mmu_write(pudp, pud_val(pud));
-}
-
-#if PAGETABLE_LEVELS == 4
-static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
-{
- kvm_mmu_write(pgdp, pgd_val(pgd));
-}
-#endif
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-static void kvm_flush_tlb(void)
-{
- struct kvm_mmu_op_flush_tlb ftlb = {
- .header.op = KVM_MMU_OP_FLUSH_TLB,
- };
-
- kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
-}
-
-static void kvm_release_pt(unsigned long pfn)
-{
- struct kvm_mmu_op_release_pt rpt = {
- .header.op = KVM_MMU_OP_RELEASE_PT,
- .pt_phys = (u64)pfn << PAGE_SHIFT,
- };
-
- kvm_mmu_op(&rpt, sizeof rpt);
-}
-
-static void kvm_enter_lazy_mmu(void)
-{
- paravirt_enter_lazy_mmu();
-}
-
-static void kvm_leave_lazy_mmu(void)
-{
- struct kvm_para_state *state = kvm_para_state();
-
- mmu_queue_flush(state);
- paravirt_leave_lazy_mmu();
-}
-
static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
@@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;
- if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
- pv_mmu_ops.set_pte = kvm_set_pte;
- pv_mmu_ops.set_pte_at = kvm_set_pte_at;
- pv_mmu_ops.set_pmd = kvm_set_pmd;
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
- pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
- pv_mmu_ops.pte_clear = kvm_pte_clear;
- pv_mmu_ops.pmd_clear = kvm_pmd_clear;
-#endif
- pv_mmu_ops.set_pud = kvm_set_pud;
-#if PAGETABLE_LEVELS == 4
- pv_mmu_ops.set_pgd = kvm_set_pgd;
-#endif
-#endif
- pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
- pv_mmu_ops.release_pte = kvm_release_pt;
- pv_mmu_ops.release_pmd = kvm_release_pt;
- pv_mmu_ops.release_pud = kvm_release_pt;
-
- pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
- pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
- }
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index d494799aafc..fe86493f3ed 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -1,14 +1,18 @@
/*
* AMD CPU Microcode Update Driver for Linux
- * Copyright (C) 2008 Advanced Micro Devices Inc.
+ * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
*
* Author: Peter Oruba <peter.oruba@amd.com>
*
* Based on work by:
* Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
*
- * This driver allows to upgrade microcode on AMD
- * family 0x10 and 0x11 processors.
+ * Maintainers:
+ * Andreas Herrmann <andreas.herrmann3@amd.com>
+ * Borislav Petkov <borislav.petkov@amd.com>
+ *
+ * This driver allows to upgrade microcode on F10h AMD
+ * CPUs and later.
*
* Licensed under the terms of the GNU General Public
* License version 2. See file COPYING for details.
@@ -71,6 +75,9 @@ struct microcode_amd {
static struct equiv_cpu_entry *equiv_cpu_table;
+/* page-sized ucode patch buffer */
+void *patch;
+
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -86,27 +93,76 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
return 0;
}
-static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
- int rev)
+static unsigned int verify_ucode_size(int cpu, u32 patch_size,
+ unsigned int size)
{
- unsigned int current_cpu_id;
- u16 equiv_cpu_id = 0;
- unsigned int i = 0;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u32 max_size;
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+#define F15H_MPB_MAX_SIZE 4096
+
+ switch (c->x86) {
+ case 0x14:
+ max_size = F14H_MPB_MAX_SIZE;
+ break;
+ case 0x15:
+ max_size = F15H_MPB_MAX_SIZE;
+ break;
+ default:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+ }
+
+ if (patch_size > min_t(u32, size, max_size)) {
+ pr_err("patch size mismatch\n");
+ return 0;
+ }
+
+ return patch_size;
+}
+
+static u16 find_equiv_id(void)
+{
+ unsigned int current_cpu_id, i = 0;
BUG_ON(equiv_cpu_table == NULL);
+
current_cpu_id = cpuid_eax(0x00000001);
while (equiv_cpu_table[i].installed_cpu != 0) {
- if (current_cpu_id == equiv_cpu_table[i].installed_cpu) {
- equiv_cpu_id = equiv_cpu_table[i].equiv_cpu;
- break;
- }
+ if (current_cpu_id == equiv_cpu_table[i].installed_cpu)
+ return equiv_cpu_table[i].equiv_cpu;
+
i++;
}
+ return 0;
+}
+/*
+ * we signal a good patch is found by returning its size > 0
+ */
+static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
+ unsigned int leftover_size, int rev,
+ unsigned int *current_size)
+{
+ struct microcode_header_amd *mc_hdr;
+ unsigned int actual_size;
+ u16 equiv_cpu_id;
+
+ /* size of the current patch we're staring at */
+ *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+
+ equiv_cpu_id = find_equiv_id();
if (!equiv_cpu_id)
return 0;
+ /*
+ * let's look at the patch header itself now
+ */
+ mc_hdr = (struct microcode_header_amd *)(ucode_ptr + SECTION_HDR_SIZE);
+
if (mc_hdr->processor_rev_id != equiv_cpu_id)
return 0;
@@ -120,7 +176,20 @@ static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
if (mc_hdr->patch_id <= rev)
return 0;
- return 1;
+ /*
+ * now that the header looks sane, verify its size
+ */
+ actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+ if (!actual_size)
+ return 0;
+
+ /* clear the patch buffer */
+ memset(patch, 0, PAGE_SIZE);
+
+ /* all looks ok, get the binary patch */
+ get_ucode_data(patch, ucode_ptr + SECTION_HDR_SIZE, actual_size);
+
+ return actual_size;
}
static int apply_microcode_amd(int cpu)
@@ -155,63 +224,6 @@ static int apply_microcode_amd(int cpu)
return 0;
}
-static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
-{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- u32 max_size, actual_size;
-
-#define F1XH_MPB_MAX_SIZE 2048
-#define F14H_MPB_MAX_SIZE 1824
-#define F15H_MPB_MAX_SIZE 4096
-
- switch (c->x86) {
- case 0x14:
- max_size = F14H_MPB_MAX_SIZE;
- break;
- case 0x15:
- max_size = F15H_MPB_MAX_SIZE;
- break;
- default:
- max_size = F1XH_MPB_MAX_SIZE;
- break;
- }
-
- actual_size = *(u32 *)(buf + 4);
-
- if (actual_size + SECTION_HDR_SIZE > size || actual_size > max_size) {
- pr_err("section size mismatch\n");
- return 0;
- }
-
- return actual_size;
-}
-
-static struct microcode_header_amd *
-get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
-{
- struct microcode_header_amd *mc = NULL;
- unsigned int actual_size = 0;
-
- if (*(u32 *)buf != UCODE_UCODE_TYPE) {
- pr_err("invalid type field in container file section header\n");
- goto out;
- }
-
- actual_size = verify_ucode_size(cpu, buf, size);
- if (!actual_size)
- goto out;
-
- mc = vzalloc(actual_size);
- if (!mc)
- goto out;
-
- get_ucode_data(mc, buf + SECTION_HDR_SIZE, actual_size);
- *mc_size = actual_size + SECTION_HDR_SIZE;
-
-out:
- return mc;
-}
-
static int install_equiv_cpu_table(const u8 *buf)
{
unsigned int *ibuf = (unsigned int *)buf;
@@ -247,36 +259,38 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct microcode_header_amd *mc_hdr = NULL;
- unsigned int mc_size, leftover;
+ unsigned int mc_size, leftover, current_size = 0;
int offset;
const u8 *ucode_ptr = data;
void *new_mc = NULL;
unsigned int new_rev = uci->cpu_sig.rev;
- enum ucode_state state = UCODE_OK;
+ enum ucode_state state = UCODE_ERROR;
offset = install_equiv_cpu_table(ucode_ptr);
if (offset < 0) {
pr_err("failed to create equivalent cpu table\n");
- return UCODE_ERROR;
+ goto out;
}
-
ucode_ptr += offset;
leftover = size - offset;
- while (leftover) {
- mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
- if (!mc_hdr)
- break;
+ if (*(u32 *)ucode_ptr != UCODE_UCODE_TYPE) {
+ pr_err("invalid type field in container file section header\n");
+ goto free_table;
+ }
- if (get_matching_microcode(cpu, mc_hdr, new_rev)) {
- vfree(new_mc);
+ while (leftover) {
+ mc_size = get_matching_microcode(cpu, ucode_ptr, leftover,
+ new_rev, &current_size);
+ if (mc_size) {
+ mc_hdr = patch;
+ new_mc = patch;
new_rev = mc_hdr->patch_id;
- new_mc = mc_hdr;
- } else
- vfree(mc_hdr);
+ goto out_ok;
+ }
- ucode_ptr += mc_size;
- leftover -= mc_size;
+ ucode_ptr += current_size;
+ leftover -= current_size;
}
if (!new_mc) {
@@ -284,19 +298,16 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
goto free_table;
}
- if (!leftover) {
- vfree(uci->mc);
- uci->mc = new_mc;
- pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
- cpu, uci->cpu_sig.rev, new_rev);
- } else {
- vfree(new_mc);
- state = UCODE_ERROR;
- }
+out_ok:
+ uci->mc = new_mc;
+ state = UCODE_OK;
+ pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
+ cpu, uci->cpu_sig.rev, new_rev);
free_table:
free_equiv_cpu_table();
+out:
return state;
}
@@ -337,7 +348,6 @@ static void microcode_fini_cpu_amd(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- vfree(uci->mc);
uci->mc = NULL;
}
@@ -351,5 +361,14 @@ static struct microcode_ops microcode_amd_ops = {
struct microcode_ops * __init init_amd_microcode(void)
{
+ patch = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!patch)
+ return NULL;
+
return &microcode_amd_ops;
}
+
+void __exit exit_amd_microcode(void)
+{
+ free_page((unsigned long)patch);
+}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index f2d2a664e79..fda91c30710 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
return 0;
}
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
{
misc_deregister(&microcode_dev);
}
@@ -292,8 +292,8 @@ static int reload_for_cpu(int cpu)
return err;
}
-static ssize_t reload_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t reload_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
@@ -318,30 +318,30 @@ static ssize_t reload_store(struct sys_device *dev,
return ret;
}
-static ssize_t version_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
}
-static ssize_t pf_show(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t pf_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}
-static SYSDEV_ATTR(reload, 0200, NULL, reload_store);
-static SYSDEV_ATTR(version, 0400, version_show, NULL);
-static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL);
+static DEVICE_ATTR(reload, 0200, NULL, reload_store);
+static DEVICE_ATTR(version, 0400, version_show, NULL);
+static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
static struct attribute *mc_default_attrs[] = {
- &attr_reload.attr,
- &attr_version.attr,
- &attr_processor_flags.attr,
+ &dev_attr_reload.attr,
+ &dev_attr_version.attr,
+ &dev_attr_processor_flags.attr,
NULL
};
@@ -405,43 +405,45 @@ static enum ucode_state microcode_update_cpu(int cpu)
return ustate;
}
-static int mc_sysdev_add(struct sys_device *sys_dev)
+static int mc_device_add(struct device *dev, struct subsys_interface *sif)
{
- int err, cpu = sys_dev->id;
+ int err, cpu = dev->id;
if (!cpu_online(cpu))
return 0;
pr_debug("CPU%d added\n", cpu);
- err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+ err = sysfs_create_group(&dev->kobj, &mc_attr_group);
if (err)
return err;
if (microcode_init_cpu(cpu) == UCODE_ERROR) {
- sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+ sysfs_remove_group(&dev->kobj, &mc_attr_group);
return -EINVAL;
}
return err;
}
-static int mc_sysdev_remove(struct sys_device *sys_dev)
+static int mc_device_remove(struct device *dev, struct subsys_interface *sif)
{
- int cpu = sys_dev->id;
+ int cpu = dev->id;
if (!cpu_online(cpu))
return 0;
pr_debug("CPU%d removed\n", cpu);
microcode_fini_cpu(cpu);
- sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+ sysfs_remove_group(&dev->kobj, &mc_attr_group);
return 0;
}
-static struct sysdev_driver mc_sysdev_driver = {
- .add = mc_sysdev_add,
- .remove = mc_sysdev_remove,
+static struct subsys_interface mc_cpu_interface = {
+ .name = "microcode",
+ .subsys = &cpu_subsys,
+ .add_dev = mc_device_add,
+ .remove_dev = mc_device_remove,
};
/**
@@ -464,9 +466,9 @@ static __cpuinit int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
+ dev = get_cpu_device(cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -474,13 +476,13 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
pr_debug("CPU%d added\n", cpu);
- if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
+ if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/* Suspend is in progress, only remove the interface */
- sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
+ sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
break;
@@ -519,27 +521,23 @@ static int __init microcode_init(void)
microcode_pdev = platform_device_register_simple("microcode", -1,
NULL, 0);
- if (IS_ERR(microcode_pdev)) {
- microcode_dev_exit();
+ if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev);
- }
get_online_cpus();
mutex_lock(&microcode_mutex);
- error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver);
+ error = subsys_interface_register(&mc_cpu_interface);
mutex_unlock(&microcode_mutex);
put_online_cpus();
- if (error) {
- platform_device_unregister(microcode_pdev);
- return error;
- }
+ if (error)
+ goto out_pdev;
error = microcode_dev_init();
if (error)
- return error;
+ goto out_driver;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,11 +546,27 @@ static int __init microcode_init(void)
" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
return 0;
+
+out_driver:
+ get_online_cpus();
+ mutex_lock(&microcode_mutex);
+
+ subsys_interface_unregister(&mc_cpu_interface);
+
+ mutex_unlock(&microcode_mutex);
+ put_online_cpus();
+
+out_pdev:
+ platform_device_unregister(microcode_pdev);
+ return error;
+
}
module_init(microcode_init);
static void __exit microcode_exit(void)
{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
microcode_dev_exit();
unregister_hotcpu_notifier(&mc_cpu_notifier);
@@ -561,7 +575,7 @@ static void __exit microcode_exit(void)
get_online_cpus();
mutex_lock(&microcode_mutex);
- sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+ subsys_interface_unregister(&mc_cpu_interface);
mutex_unlock(&microcode_mutex);
put_online_cpus();
@@ -570,6 +584,9 @@ static void __exit microcode_exit(void)
microcode_ops = NULL;
+ if (c->x86_vendor == X86_VENDOR_AMD)
+ exit_amd_microcode();
+
pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n");
}
module_exit(microcode_exit);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 9103b89c145..ca470e4c92d 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
}
#endif
+ set_bit(m->busid, mp_bus_not_pci);
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
- set_bit(m->busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early)
static void __init smp_reserve_memory(struct mpf_intel *mpf)
{
- unsigned long size = get_mpc_size(mpf->physptr);
-
- memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
+ memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
}
static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
mpf, (u64)virt_to_phys(mpf));
mem = virt_to_phys(mpf);
- memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
+ memblock_reserve(mem, sizeof(*mpf));
if (mpf->physptr)
smp_reserve_memory(mpf);
@@ -836,10 +834,8 @@ early_param("alloc_mptable", parse_alloc_mptable_opt);
void __init early_reserve_e820_mpc_new(void)
{
- if (enable_update_mptable && alloc_mptable) {
- u64 startt = 0;
- mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
- }
+ if (enable_update_mptable && alloc_mptable)
+ mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
}
static int __init update_mp_table(void)
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 12fcbe2c143..96356762a51 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -236,7 +236,7 @@ static struct notifier_block __refdata msr_class_cpu_notifier = {
.notifier_call = msr_class_cpu_callback,
};
-static char *msr_devnode(struct device *dev, mode_t *mode)
+static char *msr_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 80dc793b3f6..1c4d769e21e 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -45,6 +45,15 @@ int iommu_detected __read_mostly = 0;
*/
int iommu_pass_through __read_mostly;
+/*
+ * Group multi-function PCI devices into a single device-group for the
+ * iommu_device_group interface. This tells the iommu driver to pretend
+ * it cannot distinguish between functions of a device, exposing only one
+ * group for the device. Useful for disallowing use of individual PCI
+ * functions from userspace drivers.
+ */
+int iommu_group_mf __read_mostly;
+
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
/* Dummy device used for NULL arguments (normally ISA). */
@@ -169,6 +178,8 @@ static __init int iommu_setup(char *p)
#endif
if (!strncmp(p, "pt", 2))
iommu_pass_through = 1;
+ if (!strncmp(p, "group_mf", 8))
+ iommu_group_mf = 1;
gart_parse_options(p);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b9b3b1a5164..15763af7bfe 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -293,7 +293,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.orig_ax = -1;
regs.ip = (unsigned long) kernel_thread_helper;
regs.cs = __KERNEL_CS | get_kernel_rpl();
- regs.flags = X86_EFLAGS_IF | 0x2;
+ regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
@@ -403,6 +403,14 @@ void default_idle(void)
EXPORT_SYMBOL(default_idle);
#endif
+bool set_pm_idle_to_default(void)
+{
+ bool ret = !!pm_idle;
+
+ pm_idle = default_idle;
+
+ return ret;
+}
void stop_this_cpu(void *dummy)
{
local_irq_disable();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 795b79f984c..485204f58cd 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -99,7 +99,8 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -116,7 +117,8 @@ void cpu_idle(void)
pm_idle();
start_critical_timings();
}
- tick_nohz_restart_sched_tick();
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3bd7e6eebf3..9b9fe4a85c8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -122,7 +122,7 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- tick_nohz_stop_sched_tick(1);
+ tick_nohz_idle_enter();
while (!need_resched()) {
rmb();
@@ -139,8 +139,14 @@ void cpu_idle(void)
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();
+
+ /* enter_idle() needs rcu for notifiers */
+ rcu_idle_enter();
+
if (cpuidle_idle_call())
pm_idle();
+
+ rcu_idle_exit();
start_critical_timings();
/* In many cases the interrupt that ended idle
@@ -149,7 +155,7 @@ void cpu_idle(void)
__exit_idle();
}
- tick_nohz_restart_sched_tick();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -293,13 +299,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
- p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
- memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
- IO_BITMAP_BYTES);
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 82528799c5d..89a04c7b5bb 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -749,7 +749,8 @@ put:
/*
* Handle PTRACE_POKEUSR calls for the debug register area.
*/
-int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
+static int ptrace_set_debugreg(struct task_struct *tsk, int n,
+ unsigned long val)
{
struct thread_struct *thread = &(tsk->thread);
int rc = 0;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index b78643d0f9a..03920a15a63 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+ quirk_amd_nb_node);
+
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e334be1182b..37a458b521a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
*/
/*
- * Some machines require the "reboot=b" commandline option,
+ * Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_KBD) {
+ reboot_type = BOOT_KBD;
+ printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
{ /* Handle reboot issue on Acer Aspire one */
- .callback = set_bios_reboot,
+ .callback = set_kbd_reboot,
.ident = "Acer Aspire One A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
},
},
+ { /* Handle problems with rebooting on the OptiPlex 990. */
+ .callback = set_pci_reboot,
+ .ident = "Dell OptiPlex 990",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 348ce016a83..af6db6ec5b2 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -12,6 +12,7 @@
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/time.h>
+#include <asm/mrst.h>
#ifdef CONFIG_X86_32
/*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
if (of_have_populated_dt())
return 0;
+ /* Intel MID platforms don't have ioport rtc */
+ if (mrst_identify_cpu())
+ return -ENODEV;
+
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
"registered platform RTC device (no PNP device found)\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cf0ef986cb6..d05444ac2ae 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void)
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
- memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
+ memblock_reserve(__pa(_brk_start),
+ __pa(_brk_end) - __pa(_brk_start));
/* Mark brk area as locked down and no longer taking any
new allocations */
@@ -331,13 +332,13 @@ static void __init relocate_initrd(void)
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
PAGE_SIZE);
- if (ramdisk_here == MEMBLOCK_ERROR)
+ if (!ramdisk_here)
panic("Cannot find place for new RAMDISK of size %lld\n",
ramdisk_size);
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
+ memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -393,7 +394,7 @@ static void __init reserve_initrd(void)
initrd_start = 0;
if (ramdisk_size >= (end_of_lowmem>>1)) {
- memblock_x86_free_range(ramdisk_image, ramdisk_end);
+ memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
printk(KERN_ERR "initrd too large to handle, "
"disabling initrd\n");
return;
@@ -416,7 +417,7 @@ static void __init reserve_initrd(void)
relocate_initrd();
- memblock_x86_free_range(ramdisk_image, ramdisk_end);
+ memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
}
#else
static void __init reserve_initrd(void)
@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
{
struct setup_data *data;
u64 pa_data;
- char buf[32];
if (boot_params.hdr.version < 0x0209)
return;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
- sprintf(buf, "setup data %x", data->type);
- memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
+ memblock_reserve(pa_data, sizeof(*data) + data->len);
pa_data = data->next;
early_iounmap(data, sizeof(*data));
}
@@ -554,7 +553,7 @@ static void __init reserve_crashkernel(void)
crash_base = memblock_find_in_range(alignment,
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
- if (crash_base == MEMBLOCK_ERROR) {
+ if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void)
return;
}
}
- memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
+ memblock_reserve(crash_base, crash_size);
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void)
addr = find_ibft_region(&size);
if (size)
- memblock_x86_reserve_range(addr, addr + size, "* ibft");
+ memblock_reserve(addr, size);
}
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 54ddaeb221c..46a01bdc27e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -682,7 +682,6 @@ static int
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
struct pt_regs *regs)
{
- sigset_t blocked;
int ret;
/* Are we from a system call? */
@@ -733,10 +732,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
*/
regs->flags &= ~X86_EFLAGS_TF;
- sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&blocked, sig);
- set_current_blocked(&blocked);
+ block_sigmask(ka, sig);
tracehook_signal_handler(sig, info, ka, regs,
test_thread_flag(TIF_SINGLESTEP));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9f548cb4a95..e38e21754ee 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -840,7 +840,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
- !physid_isset(apicid, phys_cpu_present_map)) {
+ !physid_isset(apicid, phys_cpu_present_map) ||
+ (!x2apic_mode && apicid >= 255)) {
printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
return -EINVAL;
}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a91ae7709b4..a73b61055ad 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -14,11 +14,11 @@ void __init setup_trampolines(void)
/* Has to be in very low memory so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (mem == MEMBLOCK_ERROR)
+ if (!mem)
panic("Cannot allocate trampoline\n");
x86_trampoline_base = __va(mem);
- memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
+ memblock_reserve(mem, size);
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
x86_trampoline_base, (unsigned long long)mem, size);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a8e3eb83466..fa1191fb679 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -306,15 +306,10 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
== NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
-#ifdef CONFIG_KPROBES
+
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
return;
-#else
- if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
- == NOTIFY_STOP)
- return;
-#endif
preempt_conditional_sti(regs);
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index db483369f10..2c9cf0fd78f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -35,7 +35,7 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1;
-static int tsc_clocksource_reliable;
+int tsc_clocksource_reliable;
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -178,11 +178,11 @@ static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
}
#define CAL_MS 10
-#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
+#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
#define CAL_PIT_LOOPS 1000
#define CAL2_MS 50
-#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
+#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
#define CAL2_PIT_LOOPS 5000
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 0aa5fed8b9e..9eba29b46cb 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -113,7 +113,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
if (unsynchronized_tsc())
return;
- if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+ if (tsc_clocksource_reliable) {
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
pr_info(
"Skipped synchronization checks as TSC is reliable.\n");
@@ -172,7 +172,7 @@ void __cpuinit check_tsc_sync_target(void)
{
int cpus = 2;
- if (unsynchronized_tsc() || boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+ if (unsynchronized_tsc() || tsc_clocksource_reliable)
return;
/*
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e4d4a22e8b9..b07ba939356 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -57,7 +57,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
static int __init vsyscall_setup(char *str)
{
@@ -140,11 +140,40 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
+static bool write_ok_or_segv(unsigned long ptr, size_t size)
+{
+ /*
+ * XXX: if access_ok, get_user, and put_user handled
+ * sig_on_uaccess_error, this could go away.
+ */
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
+ siginfo_t info;
+ struct thread_struct *thread = &current->thread;
+
+ thread->error_code = 6; /* user fault, no page, write */
+ thread->cr2 = ptr;
+ thread->trap_no = 14;
+
+ memset(&info, 0, sizeof(info));
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)ptr;
+
+ force_sig_info(SIGSEGV, &info, current);
+ return false;
+ } else {
+ return true;
+ }
+}
+
bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr;
+ int prev_sig_on_uaccess_error;
long ret;
/*
@@ -180,35 +209,65 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
if (seccomp_mode(&tsk->seccomp))
do_exit(SIGKILL);
+ /*
+ * With a real vsyscall, page faults cause SIGSEGV. We want to
+ * preserve that behavior to make writing exploits harder.
+ */
+ prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+ current_thread_info()->sig_on_uaccess_error = 1;
+
+ /*
+ * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
+ * 64-bit, so we don't need to special-case it here. For all the
+ * vsyscalls, 0 means "don't write anything" not "write it at
+ * address 0".
+ */
+ ret = -EFAULT;
switch (vsyscall_nr) {
case 0:
+ if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
+ !write_ok_or_segv(regs->si, sizeof(struct timezone)))
+ break;
+
ret = sys_gettimeofday(
(struct timeval __user *)regs->di,
(struct timezone __user *)regs->si);
break;
case 1:
+ if (!write_ok_or_segv(regs->di, sizeof(time_t)))
+ break;
+
ret = sys_time((time_t __user *)regs->di);
break;
case 2:
+ if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+ !write_ok_or_segv(regs->si, sizeof(unsigned)))
+ break;
+
ret = sys_getcpu((unsigned __user *)regs->di,
(unsigned __user *)regs->si,
0);
break;
}
+ current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+
if (ret == -EFAULT) {
- /*
- * Bad news -- userspace fed a bad pointer to a vsyscall.
- *
- * With a real vsyscall, that would have caused SIGSEGV.
- * To make writing reliable exploits using the emulated
- * vsyscalls harder, generate SIGSEGV here as well.
- */
+ /* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall fault (exploit attempt?)");
- goto sigsegv;
+
+ /*
+ * If we failed to generate a signal for any reason,
+ * generate one here. (This should be impossible.)
+ */
+ if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+ !sigismember(&tsk->pending.signal, SIGSEGV)))
+ goto sigsegv;
+
+ return true; /* Don't emulate the ret. */
}
regs->ax = ret;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index c1d6cd54939..91f83e21b98 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -92,6 +92,7 @@ struct x86_init_ops x86_init __initdata = {
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
.setup_percpu_clockev = setup_secondary_APIC_clock,
+ .fixup_cpu_id = x86_default_fixup_cpu_id,
};
static void default_nmi_init(void) { };
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ff5790d8e99..1a7fe868f37 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -35,6 +35,7 @@ config KVM
select KVM_MMIO
select TASKSTATS
select TASK_DELAY_ACCT
+ select PERF_EVENTS
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -52,6 +53,8 @@ config KVM
config KVM_INTEL
tristate "KVM for Intel processors support"
depends on KVM
+ # for perf_guest_get_msrs():
+ depends on CPU_SUP_INTEL
---help---
Provides support for KVM on Intel processors equipped with the VT
extensions.
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index f15501f431c..4f579e8dcac 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF) += $(addprefix ../../../virt/kvm/, async_pf.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
- i8254.o timer.o
+ i8254.o timer.o cpuid.o pmu.o
kvm-intel-y += vmx.o
kvm-amd-y += svm.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
new file mode 100644
index 00000000000..89b02bfaaca
--- /dev/null
+++ b/arch/x86/kvm/cpuid.c
@@ -0,0 +1,670 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ * cpuid support routines
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ * Copyright IBM Corporation, 2008
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <asm/user.h>
+#include <asm/xsave.h>
+#include "cpuid.h"
+#include "lapic.h"
+#include "mmu.h"
+#include "trace.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (!best)
+ return;
+
+ /* Update OSXSAVE bit */
+ if (cpu_has_xsave && best->function == 0x1) {
+ best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+ best->ecx |= bit(X86_FEATURE_OSXSAVE);
+ }
+
+ if (apic) {
+ if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+ apic->lapic_timer.timer_mode_mask = 3 << 17;
+ else
+ apic->lapic_timer.timer_mode_mask = 1 << 17;
+ }
+
+ kvm_pmu_cpuid_update(vcpu);
+}
+
+static int is_efer_nx(void)
+{
+ unsigned long long efer = 0;
+
+ rdmsrl_safe(MSR_EFER, &efer);
+ return efer & EFER_NX;
+}
+
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_cpuid_entry2 *e, *entry;
+
+ entry = NULL;
+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+ e = &vcpu->arch.cpuid_entries[i];
+ if (e->function == 0x80000001) {
+ entry = e;
+ break;
+ }
+ }
+ if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
+ entry->edx &= ~(1 << 20);
+ printk(KERN_INFO "kvm: guest NX capability removed\n");
+ }
+}
+
+/* when an old userspace process fills a new kernel module */
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid *cpuid,
+ struct kvm_cpuid_entry __user *entries)
+{
+ int r, i;
+ struct kvm_cpuid_entry *cpuid_entries;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -ENOMEM;
+ cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
+ if (!cpuid_entries)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(cpuid_entries, entries,
+ cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+ goto out_free;
+ for (i = 0; i < cpuid->nent; i++) {
+ vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
+ vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
+ vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
+ vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
+ vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
+ vcpu->arch.cpuid_entries[i].index = 0;
+ vcpu->arch.cpuid_entries[i].flags = 0;
+ vcpu->arch.cpuid_entries[i].padding[0] = 0;
+ vcpu->arch.cpuid_entries[i].padding[1] = 0;
+ vcpu->arch.cpuid_entries[i].padding[2] = 0;
+ }
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ cpuid_fix_nx_cap(vcpu);
+ r = 0;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+ kvm_update_cpuid(vcpu);
+
+out_free:
+ vfree(cpuid_entries);
+out:
+ return r;
+}
+
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+{
+ int r;
+
+ r = -E2BIG;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+ cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
+ vcpu->arch.cpuid_nent = cpuid->nent;
+ kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
+ kvm_update_cpuid(vcpu);
+ return 0;
+
+out:
+ return r;
+}
+
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+{
+ int r;
+
+ r = -E2BIG;
+ if (cpuid->nent < vcpu->arch.cpuid_nent)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out;
+ return 0;
+
+out:
+ cpuid->nent = vcpu->arch.cpuid_nent;
+ return r;
+}
+
+static void cpuid_mask(u32 *word, int wordnum)
+{
+ *word &= boot_cpu_data.x86_capability[wordnum];
+}
+
+static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ u32 index)
+{
+ entry->function = function;
+ entry->index = index;
+ cpuid_count(entry->function, entry->index,
+ &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
+ entry->flags = 0;
+}
+
+static bool supported_xcr0_bit(unsigned bit)
+{
+ u64 mask = ((u64)1 << bit);
+
+ return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+}
+
+#define F(x) bit(X86_FEATURE_##x)
+
+static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ u32 index, int *nent, int maxnent)
+{
+ int r;
+ unsigned f_nx = is_efer_nx() ? F(NX) : 0;
+#ifdef CONFIG_X86_64
+ unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+ ? F(GBPAGES) : 0;
+ unsigned f_lm = F(LM);
+#else
+ unsigned f_gbpages = 0;
+ unsigned f_lm = 0;
+#endif
+ unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+
+ /* cpuid 1.edx */
+ const u32 kvm_supported_word0_x86_features =
+ F(FPU) | F(VME) | F(DE) | F(PSE) |
+ F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+ F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+ F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+ F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+ 0 /* Reserved, DS, ACPI */ | F(MMX) |
+ F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+ 0 /* HTT, TM, Reserved, PBE */;
+ /* cpuid 0x80000001.edx */
+ const u32 kvm_supported_word1_x86_features =
+ F(FPU) | F(VME) | F(DE) | F(PSE) |
+ F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+ F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+ F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+ F(PAT) | F(PSE36) | 0 /* Reserved */ |
+ f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+ F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
+ 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
+ /* cpuid 1.ecx */
+ const u32 kvm_supported_word4_x86_features =
+ F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
+ 0 /* DS-CPL, VMX, SMX, EST */ |
+ 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+ F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
+ 0 /* Reserved, DCA */ | F(XMM4_1) |
+ F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
+ 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+ F(F16C) | F(RDRAND);
+ /* cpuid 0x80000001.ecx */
+ const u32 kvm_supported_word6_x86_features =
+ F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+ F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+ F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+ 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_supported_word5_x86_features =
+ F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+ F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+ F(PMM) | F(PMM_EN);
+
+ /* cpuid 7.0.ebx */
+ const u32 kvm_supported_word9_x86_features =
+ F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+
+ r = -E2BIG;
+
+ if (*nent >= maxnent)
+ goto out;
+
+ do_cpuid_1_ent(entry, function, index);
+ ++*nent;
+
+ switch (function) {
+ case 0:
+ entry->eax = min(entry->eax, (u32)0xd);
+ break;
+ case 1:
+ entry->edx &= kvm_supported_word0_x86_features;
+ cpuid_mask(&entry->edx, 0);
+ entry->ecx &= kvm_supported_word4_x86_features;
+ cpuid_mask(&entry->ecx, 4);
+ /* we support x2apic emulation even if host does not support
+ * it since we emulate x2apic in software */
+ entry->ecx |= F(X2APIC);
+ break;
+ /* function 2 entries are STATEFUL. That is, repeated cpuid commands
+ * may return different values. This forces us to get_cpu() before
+ * issuing the first command, and also to emulate this annoying behavior
+ * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
+ case 2: {
+ int t, times = entry->eax & 0xff;
+
+ entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+ entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+ for (t = 1; t < times; ++t) {
+ if (*nent >= maxnent)
+ goto out;
+
+ do_cpuid_1_ent(&entry[t], function, 0);
+ entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+ ++*nent;
+ }
+ break;
+ }
+ /* function 4 has additional index. */
+ case 4: {
+ int i, cache_type;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ /* read more entries until cache_type is zero */
+ for (i = 1; ; ++i) {
+ if (*nent >= maxnent)
+ goto out;
+
+ cache_type = entry[i - 1].eax & 0x1f;
+ if (!cache_type)
+ break;
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
+ case 7: {
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ /* Mask ebx against host capbability word 9 */
+ if (index == 0) {
+ entry->ebx &= kvm_supported_word9_x86_features;
+ cpuid_mask(&entry->ebx, 9);
+ } else
+ entry->ebx = 0;
+ entry->eax = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ }
+ case 9:
+ break;
+ case 0xa: { /* Architectural Performance Monitoring */
+ struct x86_pmu_capability cap;
+ union cpuid10_eax eax;
+ union cpuid10_edx edx;
+
+ perf_get_x86_pmu_capability(&cap);
+
+ /*
+ * Only support guest architectural pmu on a host
+ * with architectural pmu.
+ */
+ if (!cap.version)
+ memset(&cap, 0, sizeof(cap));
+
+ eax.split.version_id = min(cap.version, 2);
+ eax.split.num_counters = cap.num_counters_gp;
+ eax.split.bit_width = cap.bit_width_gp;
+ eax.split.mask_length = cap.events_mask_len;
+
+ edx.split.num_counters_fixed = cap.num_counters_fixed;
+ edx.split.bit_width_fixed = cap.bit_width_fixed;
+ edx.split.reserved = 0;
+
+ entry->eax = eax.full;
+ entry->ebx = cap.events_mask;
+ entry->ecx = 0;
+ entry->edx = edx.full;
+ break;
+ }
+ /* function 0xb has additional index. */
+ case 0xb: {
+ int i, level_type;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ /* read more entries until level_type is zero */
+ for (i = 1; ; ++i) {
+ if (*nent >= maxnent)
+ goto out;
+
+ level_type = entry[i - 1].ecx & 0xff00;
+ if (!level_type)
+ break;
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
+ case 0xd: {
+ int idx, i;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ for (idx = 1, i = 1; idx < 64; ++idx) {
+ if (*nent >= maxnent)
+ goto out;
+
+ do_cpuid_1_ent(&entry[i], function, idx);
+ if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
+ continue;
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ ++i;
+ }
+ break;
+ }
+ case KVM_CPUID_SIGNATURE: {
+ char signature[12] = "KVMKVMKVM\0\0";
+ u32 *sigptr = (u32 *)signature;
+ entry->eax = 0;
+ entry->ebx = sigptr[0];
+ entry->ecx = sigptr[1];
+ entry->edx = sigptr[2];
+ break;
+ }
+ case KVM_CPUID_FEATURES:
+ entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+ (1 << KVM_FEATURE_NOP_IO_DELAY) |
+ (1 << KVM_FEATURE_CLOCKSOURCE2) |
+ (1 << KVM_FEATURE_ASYNC_PF) |
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+
+ if (sched_info_on())
+ entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+
+ entry->ebx = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ case 0x80000000:
+ entry->eax = min(entry->eax, 0x8000001a);
+ break;
+ case 0x80000001:
+ entry->edx &= kvm_supported_word1_x86_features;
+ cpuid_mask(&entry->edx, 1);
+ entry->ecx &= kvm_supported_word6_x86_features;
+ cpuid_mask(&entry->ecx, 6);
+ break;
+ case 0x80000008: {
+ unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+ unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ unsigned phys_as = entry->eax & 0xff;
+
+ if (!g_phys_as)
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->ebx = entry->edx = 0;
+ break;
+ }
+ case 0x80000019:
+ entry->ecx = entry->edx = 0;
+ break;
+ case 0x8000001a:
+ break;
+ case 0x8000001d:
+ break;
+ /*Add support for Centaur's CPUID instruction*/
+ case 0xC0000000:
+ /*Just support up to 0xC0000004 now*/
+ entry->eax = min(entry->eax, 0xC0000004);
+ break;
+ case 0xC0000001:
+ entry->edx &= kvm_supported_word5_x86_features;
+ cpuid_mask(&entry->edx, 5);
+ break;
+ case 3: /* Processor serial number */
+ case 5: /* MONITOR/MWAIT */
+ case 6: /* Thermal management */
+ case 0x80000007: /* Advanced power management */
+ case 0xC0000002:
+ case 0xC0000003:
+ case 0xC0000004:
+ default:
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
+ kvm_x86_ops->set_supported_cpuid(function, entry);
+
+ r = 0;
+
+out:
+ put_cpu();
+
+ return r;
+}
+
+#undef F
+
+struct kvm_cpuid_param {
+ u32 func;
+ u32 idx;
+ bool has_leaf_count;
+ bool (*qualifier)(struct kvm_cpuid_param *param);
+};
+
+static bool is_centaur_cpu(struct kvm_cpuid_param *param)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
+}
+
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+{
+ struct kvm_cpuid_entry2 *cpuid_entries;
+ int limit, nent = 0, r = -E2BIG, i;
+ u32 func;
+ static struct kvm_cpuid_param param[] = {
+ { .func = 0, .has_leaf_count = true },
+ { .func = 0x80000000, .has_leaf_count = true },
+ { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
+ { .func = KVM_CPUID_SIGNATURE },
+ { .func = KVM_CPUID_FEATURES },
+ };
+
+ if (cpuid->nent < 1)
+ goto out;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+ r = -ENOMEM;
+ cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+ if (!cpuid_entries)
+ goto out;
+
+ r = 0;
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ struct kvm_cpuid_param *ent = &param[i];
+
+ if (ent->qualifier && !ent->qualifier(ent))
+ continue;
+
+ r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
+ &nent, cpuid->nent);
+
+ if (r)
+ goto out_free;
+
+ if (!ent->has_leaf_count)
+ continue;
+
+ limit = cpuid_entries[nent - 1].eax;
+ for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
+ r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
+ &nent, cpuid->nent);
+
+ if (r)
+ goto out_free;
+ }
+
+ r = -EFAULT;
+ if (copy_to_user(entries, cpuid_entries,
+ nent * sizeof(struct kvm_cpuid_entry2)))
+ goto out_free;
+ cpuid->nent = nent;
+ r = 0;
+
+out_free:
+ vfree(cpuid_entries);
+out:
+ return r;
+}
+
+static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+{
+ struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+ int j, nent = vcpu->arch.cpuid_nent;
+
+ e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+ /* when no next entry is found, the current entry[i] is reselected */
+ for (j = i + 1; ; j = (j + 1) % nent) {
+ struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+ if (ej->function == e->function) {
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+ return j;
+ }
+ }
+ return 0; /* silence gcc, even though control never reaches here */
+}
+
+/* find an entry with matching function, matching index (if needed), and that
+ * should be read next (if it's stateful) */
+static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
+ u32 function, u32 index)
+{
+ if (e->function != function)
+ return 0;
+ if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
+ return 0;
+ if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
+ !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
+ return 0;
+ return 1;
+}
+
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index)
+{
+ int i;
+ struct kvm_cpuid_entry2 *best = NULL;
+
+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+ struct kvm_cpuid_entry2 *e;
+
+ e = &vcpu->arch.cpuid_entries[i];
+ if (is_matching_cpuid_entry(e, function, index)) {
+ if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
+ move_to_next_stateful_cpuid_entry(vcpu, i);
+ best = e;
+ break;
+ }
+ }
+ return best;
+}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
+
+int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best)
+ return best->eax & 0xff;
+not_found:
+ return 36;
+}
+
+/*
+ * If no match is found, check whether we exceed the vCPU's limit
+ * and return the content of the highest valid _standard_ leaf instead.
+ * This is to satisfy the CPUID specification.
+ */
+static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
+ u32 function, u32 index)
+{
+ struct kvm_cpuid_entry2 *maxlevel;
+
+ maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+ if (!maxlevel || maxlevel->eax >= function)
+ return NULL;
+ if (function & 0x80000000) {
+ maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
+ if (!maxlevel)
+ return NULL;
+ }
+ return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+}
+
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+{
+ u32 function, index;
+ struct kvm_cpuid_entry2 *best;
+
+ function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
+ best = kvm_find_cpuid_entry(vcpu, function, index);
+
+ if (!best)
+ best = check_cpuid_limit(vcpu, function, index);
+
+ if (best) {
+ kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
+ }
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ trace_kvm_cpuid(function,
+ kvm_register_read(vcpu, VCPU_REGS_RAX),
+ kvm_register_read(vcpu, VCPU_REGS_RBX),
+ kvm_register_read(vcpu, VCPU_REGS_RCX),
+ kvm_register_read(vcpu, VCPU_REGS_RDX));
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
new file mode 100644
index 00000000000..5b97e1797a6
--- /dev/null
+++ b/arch/x86/kvm/cpuid.h
@@ -0,0 +1,46 @@
+#ifndef ARCH_X86_KVM_CPUID_H
+#define ARCH_X86_KVM_CPUID_H
+
+#include "x86.h"
+
+void kvm_update_cpuid(struct kvm_vcpu *vcpu);
+struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+ u32 function, u32 index);
+int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid *cpuid,
+ struct kvm_cpuid_entry __user *entries);
+int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
+int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
+
+
+static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_SMEP));
+}
+
+static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
+}
+
+#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f1e3be18a08..05a562b8502 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -125,8 +125,9 @@
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
+#define PageTable (1 << 29) /* instruction used to write page table */
/* Source 2 operand type */
-#define Src2Shift (29)
+#define Src2Shift (30)
#define Src2None (OpNone << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
@@ -1674,11 +1675,6 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_grp1a(struct x86_emulate_ctxt *ctxt)
-{
- return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
-}
-
static int em_grp2(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->modrm_reg) {
@@ -1788,7 +1784,7 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
return rc;
}
-static int em_grp9(struct x86_emulate_ctxt *ctxt)
+static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
@@ -1831,6 +1827,24 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
return rc;
}
+static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
+{
+ /* Save real source value, then compare EAX against destination. */
+ ctxt->src.orig_val = ctxt->src.val;
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
+ emulate_2op_SrcV(ctxt, "cmp");
+
+ if (ctxt->eflags & EFLG_ZF) {
+ /* Success: write back to memory. */
+ ctxt->dst.val = ctxt->src.orig_val;
+ } else {
+ /* Failure: write the value we saw to EAX. */
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
+ }
+ return X86EMUL_CONTINUE;
+}
+
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
@@ -2481,6 +2495,15 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_call(struct x86_emulate_ctxt *ctxt)
+{
+ long rel = ctxt->src.val;
+
+ ctxt->src.val = (unsigned long)ctxt->_eip;
+ jmp_rel(ctxt, rel);
+ return em_push(ctxt);
+}
+
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
@@ -2622,12 +2645,75 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
+{
+ u64 pmc;
+
+ if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
+ return emulate_gp(ctxt, 0);
+ ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
+ ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
+ return X86EMUL_CONTINUE;
+}
+
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src.val;
return X86EMUL_CONTINUE;
}
+static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+{
+ if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
+ return emulate_gp(ctxt, 0);
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_dr_write(struct x86_emulate_ctxt *ctxt)
+{
+ unsigned long val;
+
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ val = ctxt->src.val & ~0ULL;
+ else
+ val = ctxt->src.val & ~0U;
+
+ /* #UD condition is already handled. */
+ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
+ return emulate_gp(ctxt, 0);
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
+{
+ u64 msr_data;
+
+ msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
+ | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
+ if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
+ return emulate_gp(ctxt, 0);
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
+{
+ u64 msr_data;
+
+ if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
+ return emulate_gp(ctxt, 0);
+
+ ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
+ ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
+ return X86EMUL_CONTINUE;
+}
+
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
@@ -2775,6 +2861,24 @@ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_in(struct x86_emulate_ctxt *ctxt)
+{
+ if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
+ &ctxt->dst.val))
+ return X86EMUL_IO_NEEDED;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_out(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
+ &ctxt->src.val, 1);
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
@@ -2794,6 +2898,69 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_bt(struct x86_emulate_ctxt *ctxt)
+{
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ /* only subword offset */
+ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
+
+ emulate_2op_SrcV_nobyte(ctxt, "bt");
+ return X86EMUL_CONTINUE;
+}
+
+static int em_bts(struct x86_emulate_ctxt *ctxt)
+{
+ emulate_2op_SrcV_nobyte(ctxt, "bts");
+ return X86EMUL_CONTINUE;
+}
+
+static int em_btr(struct x86_emulate_ctxt *ctxt)
+{
+ emulate_2op_SrcV_nobyte(ctxt, "btr");
+ return X86EMUL_CONTINUE;
+}
+
+static int em_btc(struct x86_emulate_ctxt *ctxt)
+{
+ emulate_2op_SrcV_nobyte(ctxt, "btc");
+ return X86EMUL_CONTINUE;
+}
+
+static int em_bsf(struct x86_emulate_ctxt *ctxt)
+{
+ u8 zf;
+
+ __asm__ ("bsf %2, %0; setz %1"
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
+
+ ctxt->eflags &= ~X86_EFLAGS_ZF;
+ if (zf) {
+ ctxt->eflags |= X86_EFLAGS_ZF;
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ }
+ return X86EMUL_CONTINUE;
+}
+
+static int em_bsr(struct x86_emulate_ctxt *ctxt)
+{
+ u8 zf;
+
+ __asm__ ("bsr %2, %0; setz %1"
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
+
+ ctxt->eflags &= ~X86_EFLAGS_ZF;
+ if (zf) {
+ ctxt->eflags |= X86_EFLAGS_ZF;
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ }
+ return X86EMUL_CONTINUE;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -2867,9 +3034,6 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
break;
}
case 4: {
- u64 cr4;
-
- cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
@@ -3003,6 +3167,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
+#define I2bvIP(_f, _e, _i, _p) \
+ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
@@ -3033,17 +3199,17 @@ static struct opcode group7_rm7[] = {
static struct opcode group1[] = {
I(Lock, em_add),
- I(Lock, em_or),
+ I(Lock | PageTable, em_or),
I(Lock, em_adc),
I(Lock, em_sbb),
- I(Lock, em_and),
+ I(Lock | PageTable, em_and),
I(Lock, em_sub),
I(Lock, em_xor),
I(0, em_cmp),
};
static struct opcode group1A[] = {
- D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
+ I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
};
static struct opcode group3[] = {
@@ -3058,16 +3224,19 @@ static struct opcode group3[] = {
};
static struct opcode group4[] = {
- D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
+ I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+ I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
N, N, N, N, N, N,
};
static struct opcode group5[] = {
- D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
- D(SrcMem | ModRM | Stack),
+ I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+ I(DstMem | SrcNone | ModRM | Lock, em_grp45),
+ I(SrcMem | ModRM | Stack, em_grp45),
I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
- D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
- D(SrcMem | ModRM | Stack), N,
+ I(SrcMem | ModRM | Stack, em_grp45),
+ I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
+ I(SrcMem | ModRM | Stack, em_grp45), N,
};
static struct opcode group6[] = {
@@ -3096,18 +3265,21 @@ static struct group_dual group7 = { {
static struct opcode group8[] = {
N, N, N, N,
- D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
- D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
+ I(DstMem | SrcImmByte | ModRM, em_bt),
+ I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
+ I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
+ I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
};
static struct group_dual group9 = { {
- N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
+ N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static struct opcode group11[] = {
- I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
+ I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+ X7(D(Undefined)),
};
static struct gprefix pfx_0f_6f_0f_7f = {
@@ -3120,7 +3292,7 @@ static struct opcode opcode_table[256] = {
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
- I6ALU(Lock, em_or),
+ I6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
@@ -3132,7 +3304,7 @@ static struct opcode opcode_table[256] = {
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
- I6ALU(Lock, em_and), N, N,
+ I6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
@@ -3155,8 +3327,8 @@ static struct opcode opcode_table[256] = {
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
- D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
- D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
+ I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
+ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
@@ -3165,11 +3337,11 @@ static struct opcode opcode_table[256] = {
G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
G(DstMem | SrcImmByte | ModRM | Group, group1),
I2bv(DstMem | SrcReg | ModRM, em_test),
- I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
+ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
- I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
+ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
- I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+ I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
@@ -3182,7 +3354,7 @@ static struct opcode opcode_table[256] = {
II(ImplicitOps | Stack, em_popf, popf), N, N,
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
- I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
+ I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstDI | String, em_cmp),
/* 0xA8 - 0xAF */
@@ -3213,13 +3385,13 @@ static struct opcode opcode_table[256] = {
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte, em_loop)),
I(SrcImmByte, em_jcxz),
- D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
- D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
+ I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
+ I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
- D(SrcImm | Stack), D(SrcImm | ImplicitOps),
+ I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
- D2bvIP(SrcDX | DstAcc, in, check_perm_in),
- D2bvIP(SrcAcc | DstDX, out, check_perm_out),
+ I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
+ I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3242,15 +3414,15 @@ static struct opcode twobyte_table[256] = {
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
- DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
- DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
+ IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
+ IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
N, N, N, N,
N, N, N, N, N, N, N, N,
/* 0x30 - 0x3F */
- DI(ImplicitOps | Priv, wrmsr),
+ II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
- DI(ImplicitOps | Priv, rdmsr),
- DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
+ II(ImplicitOps | Priv, em_rdmsr, rdmsr),
+ IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | VendorSpecific, em_sysenter),
I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
N, N,
@@ -3275,26 +3447,28 @@ static struct opcode twobyte_table[256] = {
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
- DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
+ DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
D(DstMem | SrcReg | Src2ImmByte | ModRM),
D(DstMem | SrcReg | Src2CL | ModRM), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
- DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
+ DI(ImplicitOps, rsm),
+ I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
D(DstMem | SrcReg | Src2ImmByte | ModRM),
D(DstMem | SrcReg | Src2CL | ModRM),
D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
- D2bv(DstMem | SrcReg | ModRM | Lock),
+ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
- D(DstMem | SrcReg | ModRM | BitOp | Lock),
+ I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
- G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
- D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
+ G(BitOp, group8),
+ I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
+ I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xCF */
D2bv(DstMem | SrcReg | ModRM | Lock),
@@ -3320,6 +3494,7 @@ static struct opcode twobyte_table[256] = {
#undef D2bv
#undef D2bvIP
#undef I2bv
+#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
@@ -3697,6 +3872,11 @@ done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
+{
+ return ctxt->d & PageTable;
+}
+
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
@@ -3720,7 +3900,6 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
- u64 msr_data;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
@@ -3854,15 +4033,6 @@ special_insn:
goto cannot_emulate;
ctxt->dst.val = (s32) ctxt->src.val;
break;
- case 0x6c: /* insb */
- case 0x6d: /* insw/insd */
- ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
- goto do_io_in;
- case 0x6e: /* outsb */
- case 0x6f: /* outsw/outsd */
- ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
- goto do_io_out;
- break;
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
jmp_rel(ctxt, ctxt->src.val);
@@ -3870,9 +4040,6 @@ special_insn:
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
- case 0x8f: /* pop (sole member of Grp1a) */
- rc = em_grp1a(ctxt);
- break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
break;
@@ -3905,38 +4072,11 @@ special_insn:
ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
rc = em_grp2(ctxt);
break;
- case 0xe4: /* inb */
- case 0xe5: /* in */
- goto do_io_in;
- case 0xe6: /* outb */
- case 0xe7: /* out */
- goto do_io_out;
- case 0xe8: /* call (near) */ {
- long int rel = ctxt->src.val;
- ctxt->src.val = (unsigned long) ctxt->_eip;
- jmp_rel(ctxt, rel);
- rc = em_push(ctxt);
- break;
- }
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
- case 0xec: /* in al,dx */
- case 0xed: /* in (e/r)ax,dx */
- do_io_in:
- if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
- &ctxt->dst.val))
- goto done; /* IO is needed */
- break;
- case 0xee: /* out dx,al */
- case 0xef: /* out dx,(e/r)ax */
- do_io_out:
- ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
- &ctxt->src.val, 1);
- ctxt->dst.type = OP_NONE; /* Disable writeback. */
- break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
@@ -3956,12 +4096,6 @@ special_insn:
case 0xfd: /* std */
ctxt->eflags |= EFLG_DF;
break;
- case 0xfe: /* Grp4 */
- rc = em_grp45(ctxt);
- break;
- case 0xff: /* Grp5 */
- rc = em_grp45(ctxt);
- break;
default:
goto cannot_emulate;
}
@@ -4036,49 +4170,6 @@ twobyte_insn:
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
- case 0x22: /* mov reg, cr */
- if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
- emulate_gp(ctxt, 0);
- rc = X86EMUL_PROPAGATE_FAULT;
- goto done;
- }
- ctxt->dst.type = OP_NONE;
- break;
- case 0x23: /* mov from reg to dr */
- if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
- ((ctxt->mode == X86EMUL_MODE_PROT64) ?
- ~0ULL : ~0U)) < 0) {
- /* #UD condition is already handled by the code above */
- emulate_gp(ctxt, 0);
- rc = X86EMUL_PROPAGATE_FAULT;
- goto done;
- }
-
- ctxt->dst.type = OP_NONE; /* no writeback */
- break;
- case 0x30:
- /* wrmsr */
- msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
- | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
- if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
- emulate_gp(ctxt, 0);
- rc = X86EMUL_PROPAGATE_FAULT;
- goto done;
- }
- rc = X86EMUL_CONTINUE;
- break;
- case 0x32:
- /* rdmsr */
- if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
- emulate_gp(ctxt, 0);
- rc = X86EMUL_PROPAGATE_FAULT;
- goto done;
- } else {
- ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
- ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
- }
- rc = X86EMUL_CONTINUE;
- break;
case 0x40 ... 0x4f: /* cmov */
ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
if (!test_cc(ctxt->b, ctxt->eflags))
@@ -4091,93 +4182,21 @@ twobyte_insn:
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
- case 0xa3:
- bt: /* bt */
- ctxt->dst.type = OP_NONE;
- /* only subword offset */
- ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
- emulate_2op_SrcV_nobyte(ctxt, "bt");
- break;
case 0xa4: /* shld imm8, r, r/m */
case 0xa5: /* shld cl, r, r/m */
emulate_2op_cl(ctxt, "shld");
break;
- case 0xab:
- bts: /* bts */
- emulate_2op_SrcV_nobyte(ctxt, "bts");
- break;
case 0xac: /* shrd imm8, r, r/m */
case 0xad: /* shrd cl, r, r/m */
emulate_2op_cl(ctxt, "shrd");
break;
case 0xae: /* clflush */
break;
- case 0xb0 ... 0xb1: /* cmpxchg */
- /*
- * Save real source value, then compare EAX against
- * destination.
- */
- ctxt->src.orig_val = ctxt->src.val;
- ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
- emulate_2op_SrcV(ctxt, "cmp");
- if (ctxt->eflags & EFLG_ZF) {
- /* Success: write back to memory. */
- ctxt->dst.val = ctxt->src.orig_val;
- } else {
- /* Failure: write the value we saw to EAX. */
- ctxt->dst.type = OP_REG;
- ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
- }
- break;
- case 0xb3:
- btr: /* btr */
- emulate_2op_SrcV_nobyte(ctxt, "btr");
- break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
- case 0xba: /* Grp8 */
- switch (ctxt->modrm_reg & 3) {
- case 0:
- goto bt;
- case 1:
- goto bts;
- case 2:
- goto btr;
- case 3:
- goto btc;
- }
- break;
- case 0xbb:
- btc: /* btc */
- emulate_2op_SrcV_nobyte(ctxt, "btc");
- break;
- case 0xbc: { /* bsf */
- u8 zf;
- __asm__ ("bsf %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- ctxt->dst.type = OP_NONE; /* Disable writeback. */
- }
- break;
- }
- case 0xbd: { /* bsr */
- u8 zf;
- __asm__ ("bsr %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- ctxt->dst.type = OP_NONE; /* Disable writeback. */
- }
- break;
- }
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
@@ -4194,9 +4213,6 @@ twobyte_insn:
ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
(u64) ctxt->src.val;
break;
- case 0xc7: /* Grp9 (cmpxchg8b) */
- rc = em_grp9(ctxt);
- break;
default:
goto cannot_emulate;
}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 76e3f1cd036..d68f99df690 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
{
+ struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
struct kvm_timer *pt = &ps->pit_timer;
s64 interval;
+ if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
+ return;
+
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -393,15 +397,11 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
case 1:
/* FIXME: enhance mode 4 precision */
case 4:
- if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
- create_pit_timer(ps, val, 0);
- }
+ create_pit_timer(kvm, val, 0);
break;
case 2:
case 3:
- if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
- create_pit_timer(ps, val, 1);
- }
+ create_pit_timer(kvm, val, 1);
break;
default:
destroy_pit_timer(kvm->arch.vpit);
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index cac4746d7ff..b6a73537e1e 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -262,9 +262,10 @@ int kvm_pic_read_irq(struct kvm *kvm)
void kvm_pic_reset(struct kvm_kpic_state *s)
{
- int irq;
- struct kvm_vcpu *vcpu0 = s->pics_state->kvm->bsp_vcpu;
+ int irq, i;
+ struct kvm_vcpu *vcpu;
u8 irr = s->irr, isr = s->imr;
+ bool found = false;
s->last_irr = 0;
s->irr = 0;
@@ -281,12 +282,19 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
s->special_fully_nested_mode = 0;
s->init4 = 0;
- for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
- if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
- if (irr & (1 << irq) || isr & (1 << irq)) {
- pic_clear_isr(s, irq);
- }
- }
+ kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+ if (kvm_apic_accept_pic_intr(vcpu)) {
+ found = true;
+ break;
+ }
+
+
+ if (!found)
+ return;
+
+ for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+ if (irr & (1 << irq) || isr & (1 << irq))
+ pic_clear_isr(s, irq);
}
static void pic_ioport_write(void *opaque, u32 addr, u32 val)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 54abb40199d..cfdc6e0ef00 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -38,6 +38,7 @@
#include "irq.h"
#include "trace.h"
#include "x86.h"
+#include "cpuid.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -1120,7 +1121,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
return 0;
}
-static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = apic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 138e8cc6fea..6f4ce2575d0 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,6 +34,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f1b36cf3e3d..2a2a9b40db1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -59,15 +59,6 @@ enum {
AUDIT_POST_SYNC
};
-char *audit_point_name[] = {
- "pre page fault",
- "post page fault",
- "pre pte write",
- "post pte write",
- "pre sync",
- "post sync"
-};
-
#undef MMU_DEBUG
#ifdef MMU_DEBUG
@@ -87,9 +78,6 @@ static int dbg = 0;
module_param(dbg, bool, 0644);
#endif
-static int oos_shadow = 1;
-module_param(oos_shadow, bool, 0644);
-
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
@@ -593,6 +581,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
return 0;
}
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+{
+ return cache->nobjs;
+}
+
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
struct kmem_cache *cache)
{
@@ -953,21 +946,35 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
}
}
+static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
+ struct kvm_memory_slot *slot)
+{
+ struct kvm_lpage_info *linfo;
+
+ if (likely(level == PT_PAGE_TABLE_LEVEL))
+ return &slot->rmap[gfn - slot->base_gfn];
+
+ linfo = lpage_info_slot(gfn, slot, level);
+ return &linfo->rmap_pde;
+}
+
/*
* Take gfn and return the reverse mapping to it.
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{
struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn);
- if (likely(level == PT_PAGE_TABLE_LEVEL))
- return &slot->rmap[gfn - slot->base_gfn];
+ return __gfn_to_rmap(kvm, gfn, level, slot);
+}
- linfo = lpage_info_slot(gfn, slot, level);
+static bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_memory_cache *cache;
- return &linfo->rmap_pde;
+ cache = &vcpu->arch.mmu_pte_list_desc_cache;
+ return mmu_memory_cache_free_objects(cache);
}
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -1004,17 +1011,16 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep);
}
-static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+ struct kvm_memory_slot *slot)
{
unsigned long *rmapp;
u64 *spte;
int i, write_protected = 0;
- rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
-
+ rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
- BUG_ON(!spte);
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writable_pte(*spte)) {
@@ -1027,12 +1033,11 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- rmapp = gfn_to_rmap(kvm, gfn, i);
+ rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
- BUG_ON(!spte);
BUG_ON(!(*spte & PT_PRESENT_MASK));
- BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+ BUG_ON(!is_large_pte(*spte));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
if (is_writable_pte(*spte)) {
drop_spte(kvm, spte);
@@ -1047,6 +1052,14 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
return write_protected;
}
+static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+}
+
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
@@ -1103,15 +1116,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
unsigned long data))
{
- int i, j;
+ int j;
int ret;
int retval = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- struct kvm_memory_slot *memslot = &slots->memslots[i];
+ kvm_for_each_memslot(memslot, slots) {
unsigned long start = memslot->userspace_addr;
unsigned long end;
@@ -1324,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+ bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
sp->parent_ptes = 0;
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
@@ -1511,6 +1524,13 @@ static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
return ret;
}
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
+static void mmu_audit_disable(void) { }
+#endif
+
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
@@ -1640,6 +1660,18 @@ static void init_shadow_page_table(struct kvm_mmu_page *sp)
sp->spt[i] = 0ull;
}
+static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
+{
+ sp->write_flooding_count = 0;
+}
+
+static void clear_sp_write_flooding_count(u64 *spte)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(spte));
+
+ __clear_sp_write_flooding_count(sp);
+}
+
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
@@ -1683,6 +1715,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
} else if (sp->unsync)
kvm_mmu_mark_parents_unsync(sp);
+ __clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
return sp;
}
@@ -1796,7 +1829,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
}
}
-static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte)
{
u64 pte;
@@ -1804,17 +1837,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
pte = *spte;
if (is_shadow_present_pte(pte)) {
- if (is_last_spte(pte, sp->role.level))
+ if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
- else {
+ if (is_large_pte(pte))
+ --kvm->stat.lpages;
+ } else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
}
- } else if (is_mmio_spte(pte))
+ return true;
+ }
+
+ if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte);
- if (is_large_pte(pte))
- --kvm->stat.lpages;
+ return false;
}
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
@@ -1831,15 +1868,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
mmu_page_remove_parent_pte(sp, parent_pte);
}
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- vcpu->arch.last_pte_updated = NULL;
-}
-
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *parent_pte;
@@ -1899,7 +1927,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
}
sp->role.invalid = 1;
- kvm_mmu_reset_last_pte_updated(kvm);
return ret;
}
@@ -1985,7 +2012,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
}
-static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct hlist_node *node;
@@ -1994,7 +2021,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
-
+ spin_lock(&kvm->mmu_lock);
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
sp->role.word);
@@ -2002,22 +2029,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- return r;
-}
-
-static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
-{
- struct kvm_mmu_page *sp;
- struct hlist_node *node;
- LIST_HEAD(invalid_list);
+ spin_unlock(&kvm->mmu_lock);
- for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
- pgprintk("%s: zap %llx %x\n",
- __func__, gfn, sp->role.word);
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
@@ -2169,8 +2185,6 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
return 1;
if (!need_unsync && !s->unsync) {
- if (!oos_shadow)
- return 1;
need_unsync = true;
}
}
@@ -2191,11 +2205,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_mmio_spte(sptep, gfn, pfn, pte_access))
return 0;
- /*
- * We don't set the accessed bit, since we sometimes want to see
- * whether the guest actually used the pte (in order to detect
- * demand paging).
- */
spte = PT_PRESENT_MASK;
if (!speculative)
spte |= shadow_accessed_mask;
@@ -2346,10 +2355,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
}
}
kvm_release_pfn_clean(pfn);
- if (speculative) {
- vcpu->arch.last_pte_updated = sptep;
- vcpu->arch.last_pte_gfn = gfn;
- }
}
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2840,12 +2845,12 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
return;
vcpu_clear_mmio_info(vcpu, ~0ul);
- trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
mmu_sync_children(vcpu, sp);
- trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+ kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
return;
}
for (i = 0; i < 4; ++i) {
@@ -2857,7 +2862,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
mmu_sync_children(vcpu, sp);
}
}
- trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
+ kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -3510,28 +3515,119 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
kvm_mmu_flush_tlb(vcpu);
}
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
+static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+ const u8 *new, int *bytes)
{
- u64 *spte = vcpu->arch.last_pte_updated;
+ u64 gentry;
+ int r;
+
+ /*
+ * Assume that the pte write on a page table of the same type
+ * as the current vcpu paging mode since we update the sptes only
+ * when they have the same mode.
+ */
+ if (is_pae(vcpu) && *bytes == 4) {
+ /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+ *gpa &= ~(gpa_t)7;
+ *bytes = 8;
+ r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
+ if (r)
+ gentry = 0;
+ new = (const u8 *)&gentry;
+ }
- return !!(spte && (*spte & shadow_accessed_mask));
+ switch (*bytes) {
+ case 4:
+ gentry = *(const u32 *)new;
+ break;
+ case 8:
+ gentry = *(const u64 *)new;
+ break;
+ default:
+ gentry = 0;
+ break;
+ }
+
+ return gentry;
}
-static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+/*
+ * If we're seeing too many writes to a page, it may no longer be a page table,
+ * or we may be forking, in which case it is better to unmap the page.
+ */
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
{
- u64 *spte = vcpu->arch.last_pte_updated;
+ /*
+ * Skip write-flooding detected for the sp whose level is 1, because
+ * it can become unsync, then the guest page is not write-protected.
+ */
+ if (sp->role.level == 1)
+ return false;
- if (spte
- && vcpu->arch.last_pte_gfn == gfn
- && shadow_accessed_mask
- && !(*spte & shadow_accessed_mask)
- && is_shadow_present_pte(*spte))
- set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ return ++sp->write_flooding_count >= 3;
+}
+
+/*
+ * Misaligned accesses are too much trouble to fix up; also, they usually
+ * indicate a page is not used as a page table.
+ */
+static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
+ int bytes)
+{
+ unsigned offset, pte_size, misaligned;
+
+ pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+ gpa, bytes, sp->role.word);
+
+ offset = offset_in_page(gpa);
+ pte_size = sp->role.cr4_pae ? 8 : 4;
+
+ /*
+ * Sometimes, the OS only writes the last one bytes to update status
+ * bits, for example, in linux, andb instruction is used in clear_bit().
+ */
+ if (!(offset & (pte_size - 1)) && bytes == 1)
+ return false;
+
+ misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+ misaligned |= bytes < 4;
+
+ return misaligned;
+}
+
+static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
+{
+ unsigned page_offset, quadrant;
+ u64 *spte;
+ int level;
+
+ page_offset = offset_in_page(gpa);
+ level = sp->role.level;
+ *nspte = 1;
+ if (!sp->role.cr4_pae) {
+ page_offset <<= 1; /* 32->64 */
+ /*
+ * A 32-bit pde maps 4MB while the shadow pdes map
+ * only 2MB. So we need to double the offset again
+ * and zap two pdes instead of one.
+ */
+ if (level == PT32_ROOT_LEVEL) {
+ page_offset &= ~7; /* kill rounding error */
+ page_offset <<= 1;
+ *nspte = 2;
+ }
+ quadrant = page_offset >> PAGE_SHIFT;
+ page_offset &= ~PAGE_MASK;
+ if (quadrant != sp->role.quadrant)
+ return NULL;
+ }
+
+ spte = &sp->spt[page_offset / sizeof(*spte)];
+ return spte;
}
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes,
- bool guest_initiated)
+ const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
union kvm_mmu_page_role mask = { .word = 0 };
@@ -3539,8 +3635,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct hlist_node *node;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
- unsigned pte_size, page_offset, misaligned, quadrant, offset;
- int level, npte, invlpg_counter, r, flooded = 0;
+ int npte;
bool remote_flush, local_flush, zap_page;
/*
@@ -3551,112 +3646,45 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
return;
zap_page = remote_flush = local_flush = false;
- offset = offset_in_page(gpa);
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
+ gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
/*
- * Assume that the pte write on a page table of the same type
- * as the current vcpu paging mode since we update the sptes only
- * when they have the same mode.
+ * No need to care whether allocation memory is successful
+ * or not since pte prefetch is skiped if it does not have
+ * enough objects in the cache.
*/
- if ((is_pae(vcpu) && bytes == 4) || !new) {
- /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
- if (is_pae(vcpu)) {
- gpa &= ~(gpa_t)7;
- bytes = 8;
- }
- r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
- if (r)
- gentry = 0;
- new = (const u8 *)&gentry;
- }
-
- switch (bytes) {
- case 4:
- gentry = *(const u32 *)new;
- break;
- case 8:
- gentry = *(const u64 *)new;
- break;
- default:
- gentry = 0;
- break;
- }
+ mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
- gentry = 0;
- kvm_mmu_free_some_pages(vcpu);
++vcpu->kvm->stat.mmu_pte_write;
- trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
- if (guest_initiated) {
- kvm_mmu_access_page(vcpu, gfn);
- if (gfn == vcpu->arch.last_pt_write_gfn
- && !last_updated_pte_accessed(vcpu)) {
- ++vcpu->arch.last_pt_write_count;
- if (vcpu->arch.last_pt_write_count >= 3)
- flooded = 1;
- } else {
- vcpu->arch.last_pt_write_gfn = gfn;
- vcpu->arch.last_pt_write_count = 1;
- vcpu->arch.last_pte_updated = NULL;
- }
- }
+ kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
- pte_size = sp->role.cr4_pae ? 8 : 4;
- misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
- misaligned |= bytes < 4;
- if (misaligned || flooded) {
- /*
- * Misaligned accesses are too much trouble to fix
- * up; also, they usually indicate a page is not used
- * as a page table.
- *
- * If we're seeing too many writes to a page,
- * it may no longer be a page table, or we may be
- * forking, in which case it is better to unmap the
- * page.
- */
- pgprintk("misaligned: gpa %llx bytes %d role %x\n",
- gpa, bytes, sp->role.word);
+ spte = get_written_sptes(sp, gpa, &npte);
+
+ if (detect_write_misaligned(sp, gpa, bytes) ||
+ detect_write_flooding(sp, spte)) {
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
&invalid_list);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
- page_offset = offset;
- level = sp->role.level;
- npte = 1;
- if (!sp->role.cr4_pae) {
- page_offset <<= 1; /* 32->64 */
- /*
- * A 32-bit pde maps 4MB while the shadow pdes map
- * only 2MB. So we need to double the offset again
- * and zap two pdes instead of one.
- */
- if (level == PT32_ROOT_LEVEL) {
- page_offset &= ~7; /* kill rounding error */
- page_offset <<= 1;
- npte = 2;
- }
- quadrant = page_offset >> PAGE_SHIFT;
- page_offset &= ~PAGE_MASK;
- if (quadrant != sp->role.quadrant)
- continue;
- }
+
+ spte = get_written_sptes(sp, gpa, &npte);
+ if (!spte)
+ continue;
+
local_flush = true;
- spte = &sp->spt[page_offset / sizeof(*spte)];
while (npte--) {
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
- & mask.word))
+ & mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (!remote_flush && need_remote_flush(entry, *spte))
remote_flush = true;
@@ -3665,7 +3693,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
- trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
+ kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
spin_unlock(&vcpu->kvm->mmu_lock);
}
@@ -3679,9 +3707,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
- spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- spin_unlock(&vcpu->kvm->mmu_lock);
+
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
@@ -3702,10 +3729,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
+static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
+ return vcpu_match_mmio_gpa(vcpu, addr);
+
+ return vcpu_match_mmio_gva(vcpu, addr);
+}
+
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
void *insn, int insn_len)
{
- int r;
+ int r, emulation_type = EMULTYPE_RETRY;
enum emulation_result er;
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
@@ -3717,11 +3752,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
goto out;
}
- r = mmu_topup_memory_caches(vcpu);
- if (r)
- goto out;
+ if (is_mmio_page_fault(vcpu, cr2))
+ emulation_type = 0;
- er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
+ er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
switch (er) {
case EMULATE_DONE:
@@ -3792,7 +3826,11 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+ vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+ vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.mmu.translate_gpa = translate_gpa;
+ vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
return alloc_mmu_pages(vcpu);
}
@@ -3852,14 +3890,14 @@ restart:
spin_unlock(&kvm->mmu_lock);
}
-static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
- struct list_head *invalid_list)
+static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
+ struct list_head *invalid_list)
{
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
+ kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
}
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
@@ -3874,15 +3912,15 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int idx, freed_pages;
+ int idx;
LIST_HEAD(invalid_list);
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
if (!kvm_freed && nr_to_scan > 0 &&
kvm->arch.n_used_mmu_pages > 0) {
- freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
- &invalid_list);
+ kvm_mmu_remove_some_alloc_mmu_pages(kvm,
+ &invalid_list);
kvm_freed = kvm;
}
nr_to_scan--;
@@ -3944,15 +3982,15 @@ nomem:
*/
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
- int i;
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++)
- nr_pages += slots->memslots[i].npages;
+ kvm_for_each_memslot(memslot, slots)
+ nr_pages += memslot->npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3961,127 +3999,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
return nr_mmu_pages;
}
-static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
- unsigned len)
-{
- if (len > buffer->len)
- return NULL;
- return buffer->ptr;
-}
-
-static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
- unsigned len)
-{
- void *ret;
-
- ret = pv_mmu_peek_buffer(buffer, len);
- if (!ret)
- return ret;
- buffer->ptr += len;
- buffer->len -= len;
- buffer->processed += len;
- return ret;
-}
-
-static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
- gpa_t addr, gpa_t value)
-{
- int bytes = 8;
- int r;
-
- if (!is_long_mode(vcpu) && !is_pae(vcpu))
- bytes = 4;
-
- r = mmu_topup_memory_caches(vcpu);
- if (r)
- return r;
-
- if (!emulator_write_phys(vcpu, addr, &value, bytes))
- return -EFAULT;
-
- return 1;
-}
-
-static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
-{
- (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
- return 1;
-}
-
-static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
-{
- spin_lock(&vcpu->kvm->mmu_lock);
- mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
- spin_unlock(&vcpu->kvm->mmu_lock);
- return 1;
-}
-
-static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
- struct kvm_pv_mmu_op_buffer *buffer)
-{
- struct kvm_mmu_op_header *header;
-
- header = pv_mmu_peek_buffer(buffer, sizeof *header);
- if (!header)
- return 0;
- switch (header->op) {
- case KVM_MMU_OP_WRITE_PTE: {
- struct kvm_mmu_op_write_pte *wpte;
-
- wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
- if (!wpte)
- return 0;
- return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
- wpte->pte_val);
- }
- case KVM_MMU_OP_FLUSH_TLB: {
- struct kvm_mmu_op_flush_tlb *ftlb;
-
- ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
- if (!ftlb)
- return 0;
- return kvm_pv_mmu_flush_tlb(vcpu);
- }
- case KVM_MMU_OP_RELEASE_PT: {
- struct kvm_mmu_op_release_pt *rpt;
-
- rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
- if (!rpt)
- return 0;
- return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
- }
- default: return 0;
- }
-}
-
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
- gpa_t addr, unsigned long *ret)
-{
- int r;
- struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
-
- buffer->ptr = buffer->buf;
- buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
- buffer->processed = 0;
-
- r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
- if (r)
- goto out;
-
- while (buffer->len) {
- r = kvm_pv_mmu_op_one(vcpu, buffer);
- if (r < 0)
- goto out;
- if (r == 0)
- break;
- }
-
- r = 1;
-out:
- *ret = buffer->processed;
- return r;
-}
-
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
struct kvm_shadow_walk_iterator iterator;
@@ -4110,12 +4027,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
void kvm_mmu_module_exit(void)
{
mmu_destroy_caches();
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 746ec259d02..fe15dcc07a6 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -19,6 +19,15 @@
#include <linux/ratelimit.h>
+char const *audit_point_name[] = {
+ "pre page fault",
+ "post page fault",
+ "pre pte write",
+ "post pte write",
+ "pre sync",
+ "post sync"
+};
+
#define audit_printk(kvm, fmt, args...) \
printk(KERN_ERR "audit: (%s) error: " \
fmt, audit_point_name[kvm->arch.audit_point], ##args)
@@ -224,7 +233,10 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
mmu_spte_walk(vcpu, audit_spte);
}
-static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
+static bool mmu_audit;
+static struct jump_label_key mmu_audit_key;
+
+static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
@@ -236,18 +248,18 @@ static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
audit_vcpu_spte(vcpu);
}
-static bool mmu_audit;
+static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
+{
+ if (static_branch((&mmu_audit_key)))
+ __kvm_mmu_audit(vcpu, point);
+}
static void mmu_audit_enable(void)
{
- int ret;
-
if (mmu_audit)
return;
- ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
- WARN_ON(ret);
-
+ jump_label_inc(&mmu_audit_key);
mmu_audit = true;
}
@@ -256,8 +268,7 @@ static void mmu_audit_disable(void)
if (!mmu_audit)
return;
- unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
- tracepoint_synchronize_unregister();
+ jump_label_dec(&mmu_audit_key);
mmu_audit = false;
}
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index eed67f34146..89fb0e81322 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -243,25 +243,6 @@ TRACE_EVENT(
TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
__entry->access)
);
-
-TRACE_EVENT(
- kvm_mmu_audit,
- TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
- TP_ARGS(vcpu, audit_point),
-
- TP_STRUCT__entry(
- __field(struct kvm_vcpu *, vcpu)
- __field(int, audit_point)
- ),
-
- TP_fast_assign(
- __entry->vcpu = vcpu;
- __entry->audit_point = audit_point;
- ),
-
- TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
- audit_point_name[__entry->audit_point])
-);
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 92994100638..15610285ebb 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) {
gfn_t table_gfn;
+ clear_sp_write_flooding_count(it.sptep);
drop_large_spte(vcpu, it.sptep);
sp = NULL;
@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_walk_next(&it)) {
gfn_t direct_gfn;
+ clear_sp_write_flooding_count(it.sptep);
validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep);
@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(it.sptep, sp);
}
+ clear_sp_write_flooding_count(it.sptep);
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
user_fault, write_fault, emulate, it.level,
gw->gfn, pfn, prefault, map_writable);
@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
*/
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
- if (!prefault) {
+ if (!prefault)
inject_page_fault(vcpu, &walker.fault);
- /* reset fork detector */
- vcpu->arch.last_pt_write_count = 0;
- }
+
return 0;
}
@@ -631,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
- trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
+ kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
@@ -641,11 +642,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
sptep, *sptep, emulate);
- if (!emulate)
- vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
++vcpu->stat.pf_fixed;
- trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
+ kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
return emulate;
@@ -656,65 +654,66 @@ out_unlock:
return 0;
}
+static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
+{
+ int offset = 0;
+
+ WARN_ON(sp->role.level != 1);
+
+ if (PTTYPE == 32)
+ offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+ return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+}
+
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
- gpa_t pte_gpa = -1;
int level;
u64 *sptep;
- int need_flush = 0;
vcpu_clear_mmio_info(vcpu, gva);
- spin_lock(&vcpu->kvm->mmu_lock);
+ /*
+ * No need to check return value here, rmap_can_add() can
+ * help us to skip pte prefetch later.
+ */
+ mmu_topup_memory_caches(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
- int offset, shift;
+ pt_element_t gpte;
+ gpa_t pte_gpa;
if (!sp->unsync)
break;
- shift = PAGE_SHIFT -
- (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
- offset = sp->role.quadrant << shift;
-
- pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+ pte_gpa = FNAME(get_level1_sp_gpa)(sp);
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
- if (is_shadow_present_pte(*sptep)) {
- if (is_large_pte(*sptep))
- --vcpu->kvm->stat.lpages;
- drop_spte(vcpu->kvm, sptep);
- need_flush = 1;
- } else if (is_mmio_spte(*sptep))
- mmu_spte_clear_no_track(sptep);
+ if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
+ kvm_flush_remote_tlbs(vcpu->kvm);
- break;
+ if (!rmap_can_add(vcpu))
+ break;
+
+ if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+ sizeof(pt_element_t)))
+ break;
+
+ FNAME(update_pte)(vcpu, sp, sptep, &gpte);
}
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
-
- if (need_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
-
- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-
spin_unlock(&vcpu->kvm->mmu_lock);
-
- if (pte_gpa == -1)
- return;
-
- if (mmu_topup_memory_caches(vcpu))
- return;
- kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -769,19 +768,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
- int i, offset, nr_present;
+ int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
- offset = nr_present = 0;
-
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
- if (PTTYPE == 32)
- offset = sp->role.quadrant << PT64_LEVEL_BITS;
-
- first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+ first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644
index 00000000000..7aad5446f39
--- /dev/null
+++ b/arch/x86/kvm/pmu.c
@@ -0,0 +1,533 @@
+/*
+ * Kernel-based Virtual Machine -- Performane Monitoring Unit support
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ * Avi Kivity <avi@redhat.com>
+ * Gleb Natapov <gleb@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+
+static struct kvm_arch_event_perf_mapping {
+ u8 eventsel;
+ u8 unit_mask;
+ unsigned event_type;
+ bool inexact;
+} arch_events[] = {
+ /* Index must match CPUID 0x0A.EBX bit vector */
+ [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+ [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
+ [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
+ [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+ [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+};
+
+/* mapping between fixed pmc index and arch_events array */
+int fixed_pmc_events[] = {1, 0, 2};
+
+static bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+ return pmc->type == KVM_PMC_GP;
+}
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+
+ return pmu->counter_bitmask[pmc->type];
+}
+
+static inline bool pmc_enabled(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+ return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+ u32 base)
+{
+ if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+ return &pmu->gp_counters[msr - base];
+ return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+ int base = MSR_CORE_PERF_FIXED_CTR0;
+ if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+ return &pmu->fixed_counters[msr - base];
+ return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
+{
+ return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
+}
+
+static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+ if (idx < X86_PMC_IDX_FIXED)
+ return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
+ else
+ return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+}
+
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.apic)
+ kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+static void trigger_pmi(struct irq_work *irq_work)
+{
+ struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
+ irq_work);
+ struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
+ arch.pmu);
+
+ kvm_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+ __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+ if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+ kvm_perf_overflow(perf_event, data, regs);
+ kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+ /*
+ * Inject PMI. If vcpu was in a guest mode during NMI PMI
+ * can be ejected on a guest mode re-entry. Otherwise we can't
+ * be sure that vcpu wasn't executing hlt instruction at the
+ * time of vmexit and is not going to re-enter guest mode until,
+ * woken up. So we should wake it, but this is impossible from
+ * NMI context. Do it from irq work instead.
+ */
+ if (!kvm_is_in_guest())
+ irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+ else
+ kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+ }
+}
+
+static u64 read_pmc(struct kvm_pmc *pmc)
+{
+ u64 counter, enabled, running;
+
+ counter = pmc->counter;
+
+ if (pmc->perf_event)
+ counter += perf_event_read_value(pmc->perf_event,
+ &enabled, &running);
+
+ /* FIXME: Scaling needed? */
+
+ return counter & pmc_bitmask(pmc);
+}
+
+static void stop_counter(struct kvm_pmc *pmc)
+{
+ if (pmc->perf_event) {
+ pmc->counter = read_pmc(pmc);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+}
+
+static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+ unsigned config, bool exclude_user, bool exclude_kernel,
+ bool intr)
+{
+ struct perf_event *event;
+ struct perf_event_attr attr = {
+ .type = type,
+ .size = sizeof(attr),
+ .pinned = true,
+ .exclude_idle = true,
+ .exclude_host = 1,
+ .exclude_user = exclude_user,
+ .exclude_kernel = exclude_kernel,
+ .config = config,
+ };
+
+ attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+ event = perf_event_create_kernel_counter(&attr, -1, current,
+ intr ? kvm_perf_overflow_intr :
+ kvm_perf_overflow, pmc);
+ if (IS_ERR(event)) {
+ printk_once("kvm: pmu event creation failed %ld\n",
+ PTR_ERR(event));
+ return;
+ }
+
+ pmc->perf_event = event;
+ clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
+ u8 unit_mask)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arch_events); i++)
+ if (arch_events[i].eventsel == event_select
+ && arch_events[i].unit_mask == unit_mask
+ && (pmu->available_event_types & (1 << i)))
+ break;
+
+ if (i == ARRAY_SIZE(arch_events))
+ return PERF_COUNT_HW_MAX;
+
+ return arch_events[i].event_type;
+}
+
+static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+ unsigned config, type = PERF_TYPE_RAW;
+ u8 event_select, unit_mask;
+
+ pmc->eventsel = eventsel;
+
+ stop_counter(pmc);
+
+ if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+ return;
+
+ event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+ unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+ if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE |
+ ARCH_PERFMON_EVENTSEL_INV |
+ ARCH_PERFMON_EVENTSEL_CMASK))) {
+ config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
+ unit_mask);
+ if (config != PERF_COUNT_HW_MAX)
+ type = PERF_TYPE_HARDWARE;
+ }
+
+ if (type == PERF_TYPE_RAW)
+ config = eventsel & X86_RAW_EVENT_MASK;
+
+ reprogram_counter(pmc, type, config,
+ !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+ !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+ eventsel & ARCH_PERFMON_EVENTSEL_INT);
+}
+
+static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+{
+ unsigned en = en_pmi & 0x3;
+ bool pmi = en_pmi & 0x8;
+
+ stop_counter(pmc);
+
+ if (!en || !pmc_enabled(pmc))
+ return;
+
+ reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+ arch_events[fixed_pmc_events[idx]].event_type,
+ !(en & 0x2), /* exclude user */
+ !(en & 0x1), /* exclude kernel */
+ pmi);
+}
+
+static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+{
+ return (ctrl >> (idx * 4)) & 0xf;
+}
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+{
+ int i;
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ u8 en_pmi = fixed_en_pmi(data, i);
+ struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
+
+ if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+ continue;
+
+ reprogram_fixed_counter(pmc, en_pmi, i);
+ }
+
+ pmu->fixed_ctr_ctrl = data;
+}
+
+static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+{
+ struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+
+ if (!pmc)
+ return;
+
+ if (pmc_is_gp(pmc))
+ reprogram_gp_counter(pmc, pmc->eventsel);
+ else {
+ int fidx = idx - X86_PMC_IDX_FIXED;
+ reprogram_fixed_counter(pmc,
+ fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+ }
+}
+
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+{
+ int bit;
+ u64 diff = pmu->global_ctrl ^ data;
+
+ pmu->global_ctrl = data;
+
+ for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+ reprogram_idx(pmu, bit);
+}
+
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ int ret;
+
+ switch (msr) {
+ case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ ret = pmu->version > 1;
+ break;
+ default:
+ ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
+ || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
+ || get_fixed_pmc(pmu, msr);
+ break;
+ }
+ return ret;
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ switch (index) {
+ case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ *data = pmu->fixed_ctr_ctrl;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ *data = pmu->global_status;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ *data = pmu->global_ctrl;
+ return 0;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ *data = pmu->global_ovf_ctrl;
+ return 0;
+ default:
+ if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+ (pmc = get_fixed_pmc(pmu, index))) {
+ *data = read_pmc(pmc);
+ return 0;
+ } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+ *data = pmc->eventsel;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ switch (index) {
+ case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ if (pmu->fixed_ctr_ctrl == data)
+ return 0;
+ if (!(data & 0xfffffffffffff444)) {
+ reprogram_fixed_counters(pmu, data);
+ return 0;
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ break; /* RO MSR */
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (pmu->global_ctrl == data)
+ return 0;
+ if (!(data & pmu->global_ctrl_mask)) {
+ global_ctrl_changed(pmu, data);
+ return 0;
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+ pmu->global_status &= ~data;
+ pmu->global_ovf_ctrl = data;
+ return 0;
+ }
+ break;
+ default:
+ if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+ (pmc = get_fixed_pmc(pmu, index))) {
+ data = (s64)(s32)data;
+ pmc->counter += data - read_pmc(pmc);
+ return 0;
+ } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+ if (data == pmc->eventsel)
+ return 0;
+ if (!(data & 0xffffffff00200000ull)) {
+ reprogram_gp_counter(pmc, data);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ bool fast_mode = pmc & (1u << 31);
+ bool fixed = pmc & (1u << 30);
+ struct kvm_pmc *counters;
+ u64 ctr;
+
+ pmc &= (3u << 30) - 1;
+ if (!fixed && pmc >= pmu->nr_arch_gp_counters)
+ return 1;
+ if (fixed && pmc >= pmu->nr_arch_fixed_counters)
+ return 1;
+ counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+ ctr = read_pmc(&counters[pmc]);
+ if (fast_mode)
+ ctr = (u32)ctr;
+ *data = ctr;
+
+ return 0;
+}
+
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_cpuid_entry2 *entry;
+ unsigned bitmap_len;
+
+ pmu->nr_arch_gp_counters = 0;
+ pmu->nr_arch_fixed_counters = 0;
+ pmu->counter_bitmask[KVM_PMC_GP] = 0;
+ pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+ pmu->version = 0;
+
+ entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+ if (!entry)
+ return;
+
+ pmu->version = entry->eax & 0xff;
+ if (!pmu->version)
+ return;
+
+ pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
+ X86_PMC_MAX_GENERIC);
+ pmu->counter_bitmask[KVM_PMC_GP] =
+ ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
+ bitmap_len = (entry->eax >> 24) & 0xff;
+ pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
+
+ if (pmu->version == 1) {
+ pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
+ return;
+ }
+
+ pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+ X86_PMC_MAX_FIXED);
+ pmu->counter_bitmask[KVM_PMC_FIXED] =
+ ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
+ pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
+ | (((1ull << pmu->nr_arch_fixed_counters) - 1)
+ << X86_PMC_IDX_FIXED));
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ memset(pmu, 0, sizeof(*pmu));
+ for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ pmu->gp_counters[i].type = KVM_PMC_GP;
+ pmu->gp_counters[i].vcpu = vcpu;
+ pmu->gp_counters[i].idx = i;
+ }
+ for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+ pmu->fixed_counters[i].type = KVM_PMC_FIXED;
+ pmu->fixed_counters[i].vcpu = vcpu;
+ pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+ }
+ init_irq_work(&pmu->irq_work, trigger_pmi);
+ kvm_pmu_cpuid_update(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ int i;
+
+ irq_work_sync(&pmu->irq_work);
+ for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ struct kvm_pmc *pmc = &pmu->gp_counters[i];
+ stop_counter(pmc);
+ pmc->counter = pmc->eventsel = 0;
+ }
+
+ for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+ stop_counter(&pmu->fixed_counters[i]);
+
+ pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
+ pmu->global_ovf_ctrl = 0;
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_reset(vcpu);
+}
+
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ u64 bitmask;
+ int bit;
+
+ bitmask = pmu->reprogram_pmi;
+
+ for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+ struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
+
+ if (unlikely(!pmc || !pmc->perf_event)) {
+ clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+ continue;
+ }
+
+ reprogram_idx(pmu, bit);
+ }
+}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e32243eac2f..5fa553babe5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1014,6 +1014,7 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+ set_intercept(svm, INTERCEPT_RDPMC);
set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT);
@@ -2770,6 +2771,19 @@ static int emulate_on_interception(struct vcpu_svm *svm)
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
+static int rdpmc_interception(struct vcpu_svm *svm)
+{
+ int err;
+
+ if (!static_cpu_has(X86_FEATURE_NRIPS))
+ return emulate_on_interception(svm);
+
+ err = kvm_rdpmc(&svm->vcpu);
+ kvm_complete_insn_gp(&svm->vcpu, err);
+
+ return 1;
+}
+
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
@@ -3190,6 +3204,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
+ [SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index ae432ea1cd8..6b85cc647f3 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -18,9 +18,10 @@
#include <linux/atomic.h>
#include "kvm_timer.h"
-static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
+enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
- int restart_timer = 0;
+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
+ struct kvm_vcpu *vcpu = ktimer->vcpu;
wait_queue_head_t *q = &vcpu->wq;
/*
@@ -40,26 +41,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
if (ktimer->t_ops->is_periodic(ktimer)) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
- restart_timer = 1;
- }
-
- return restart_timer;
-}
-
-enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
-{
- int restart_timer;
- struct kvm_vcpu *vcpu;
- struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
-
- vcpu = ktimer->vcpu;
- if (!vcpu)
- return HRTIMER_NORESTART;
-
- restart_timer = __kvm_timer_fn(vcpu, ktimer);
- if (restart_timer)
return HRTIMER_RESTART;
- else
+ } else
return HRTIMER_NORESTART;
}
-
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 579a0b51696..906a7e84200 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -18,6 +18,7 @@
#include "irq.h"
#include "mmu.h"
+#include "cpuid.h"
#include <linux/kvm_host.h>
#include <linux/module.h>
@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
int save_nmsrs, index;
unsigned long *msr_bitmap;
- vmx_load_host_state(vmx);
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_RDPMC_EXITING |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 1;
/* Otherwise falls through */
default:
- vmx_load_host_state(to_vmx(vcpu));
if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
- vmx_load_host_state(to_vmx(vcpu));
data = msr->data;
break;
}
@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
switch (msr_index) {
case MSR_EFER:
- vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data);
break;
#ifdef CONFIG_X86_64
@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
- vmx_load_host_state(vmx);
msr->data = data;
break;
}
@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING |
- CPU_BASED_INVLPG_EXITING;
+ CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_RDPMC_EXITING;
if (yield_on_hlt)
min |= CPU_BASED_HLT_EXITING;
@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
gfn_t base_gfn;
slots = kvm_memslots(kvm);
- base_gfn = slots->memslots[0].base_gfn +
- kvm->memslots->memslots[0].npages - 3;
+ slot = id_to_memslot(slots, 0);
+ base_gfn = slot->base_gfn + slot->npages - 3;
+
return base_gfn << PAGE_SHIFT;
}
return kvm->arch.tss_addr;
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
- if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
- /* We can get here when nested_run_pending caused
- * vmx_interrupt_allowed() to return false. In this case, do
- * nothing - the interrupt will be injected later.
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+ /*
+ * We get here if vmx_interrupt_allowed() said we can't
+ * inject to L1 now because L2 must run. Ask L2 to exit
+ * right after entry, so we can inject to L1 more promptly.
*/
+ kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
return;
+ }
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
- struct vmcs12 *vmcs12;
- if (to_vmx(vcpu)->nested.nested_run_pending)
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ if (to_vmx(vcpu)->nested.nested_run_pending ||
+ (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_VALID_MASK))
return 0;
nested_vmx_vmexit(vcpu);
- vmcs12 = get_vmcs12(vcpu);
vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
vmcs12->vm_exit_intr_info = 0;
/* fall through to normal code, but now in L1, not L2 */
@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+ int err;
+
+ err = kvm_rdpmc(vcpu);
+ kvm_complete_insn_gp(vcpu, err);
+
+ return 1;
+}
+
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
+ [EXIT_REASON_RDPMC] = handle_rdpmc,
[EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c38efd7b792..1171def5f96 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -26,6 +26,7 @@
#include "tss.h"
#include "kvm_cache_regs.h"
#include "x86.h"
+#include "cpuid.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -82,8 +83,6 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries);
static void process_nmi(struct kvm_vcpu *vcpu);
struct kvm_x86_ops *kvm_x86_ops;
@@ -574,58 +573,6 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
-static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
-
-static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_SMEP));
-}
-
-static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static void update_cpuid(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
- struct kvm_lapic *apic = vcpu->arch.apic;
- u32 timer_mode_mask;
-
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
- if (!best)
- return;
-
- /* Update OSXSAVE bit */
- if (cpu_has_xsave && best->function == 0x1) {
- best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
- if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
- best->ecx |= bit(X86_FEATURE_OSXSAVE);
- }
-
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- best->function == 0x1) {
- best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
- timer_mode_mask = 3 << 17;
- } else
- timer_mode_mask = 1 << 17;
-
- if (apic)
- apic->lapic_timer.timer_mode_mask = timer_mode_mask;
-}
-
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -659,7 +606,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
- update_cpuid(vcpu);
+ kvm_update_cpuid(vcpu);
return 0;
}
@@ -813,6 +760,21 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
+bool kvm_rdpmc(struct kvm_vcpu *vcpu)
+{
+ u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ u64 data;
+ int err;
+
+ err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+ if (err)
+ return err;
+ kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+ return err;
+}
+EXPORT_SYMBOL_GPL(kvm_rdpmc);
+
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1362,12 +1324,11 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
- page = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!page)
+ page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
+ if (IS_ERR(page)) {
+ r = PTR_ERR(page);
goto out;
- r = -EFAULT;
- if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
- goto out_free;
+ }
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
@@ -1656,8 +1617,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
- case MSR_P6_EVNTSEL0:
- case MSR_P6_EVNTSEL1:
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
@@ -1669,8 +1628,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
- case MSR_P6_PERFCTR0:
- case MSR_P6_PERFCTR1:
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
@@ -1707,6 +1664,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
+ if (kvm_pmu_msr(vcpu, msr))
+ return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
@@ -1869,10 +1828,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_K8_SYSCFG:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
- case MSR_P6_PERFCTR0:
- case MSR_P6_PERFCTR1:
- case MSR_P6_EVNTSEL0:
- case MSR_P6_EVNTSEL1:
case MSR_K7_EVNTSEL0:
case MSR_K7_PERFCTR0:
case MSR_K8_INT_PENDING_MSG:
@@ -1983,6 +1938,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = 0xbe702111;
break;
default:
+ if (kvm_pmu_msr(vcpu, msr))
+ return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -2041,15 +1998,12 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
if (msrs.nmsrs >= MAX_IO_MSRS)
goto out;
- r = -ENOMEM;
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
- entries = kmalloc(size, GFP_KERNEL);
- if (!entries)
+ entries = memdup_user(user_msrs->entries, size);
+ if (IS_ERR(entries)) {
+ r = PTR_ERR(entries);
goto out;
-
- r = -EFAULT;
- if (copy_from_user(entries, user_msrs->entries, size))
- goto out_free;
+ }
r = n = __msr_io(vcpu, &msrs, entries, do_msr);
if (r < 0)
@@ -2135,6 +2089,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
+ case KVM_CAP_TSC_DEADLINE_TIMER:
+ r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+ break;
default:
r = 0;
break;
@@ -2266,466 +2223,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
}
-static int is_efer_nx(void)
-{
- unsigned long long efer = 0;
-
- rdmsrl_safe(MSR_EFER, &efer);
- return efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
- int i;
- struct kvm_cpuid_entry2 *e, *entry;
-
- entry = NULL;
- for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
- e = &vcpu->arch.cpuid_entries[i];
- if (e->function == 0x80000001) {
- entry = e;
- break;
- }
- }
- if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
- entry->edx &= ~(1 << 20);
- printk(KERN_INFO "kvm: guest NX capability removed\n");
- }
-}
-
-/* when an old userspace process fills a new kernel module */
-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
- struct kvm_cpuid *cpuid,
- struct kvm_cpuid_entry __user *entries)
-{
- int r, i;
- struct kvm_cpuid_entry *cpuid_entries;
-
- r = -E2BIG;
- if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
- goto out;
- r = -ENOMEM;
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
- if (!cpuid_entries)
- goto out;
- r = -EFAULT;
- if (copy_from_user(cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry)))
- goto out_free;
- for (i = 0; i < cpuid->nent; i++) {
- vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
- vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
- vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
- vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
- vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
- vcpu->arch.cpuid_entries[i].index = 0;
- vcpu->arch.cpuid_entries[i].flags = 0;
- vcpu->arch.cpuid_entries[i].padding[0] = 0;
- vcpu->arch.cpuid_entries[i].padding[1] = 0;
- vcpu->arch.cpuid_entries[i].padding[2] = 0;
- }
- vcpu->arch.cpuid_nent = cpuid->nent;
- cpuid_fix_nx_cap(vcpu);
- r = 0;
- kvm_apic_set_version(vcpu);
- kvm_x86_ops->cpuid_update(vcpu);
- update_cpuid(vcpu);
-
-out_free:
- vfree(cpuid_entries);
-out:
- return r;
-}
-
-static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
-{
- int r;
-
- r = -E2BIG;
- if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
- goto out;
- r = -EFAULT;
- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
- goto out;
- vcpu->arch.cpuid_nent = cpuid->nent;
- kvm_apic_set_version(vcpu);
- kvm_x86_ops->cpuid_update(vcpu);
- update_cpuid(vcpu);
- return 0;
-
-out:
- return r;
-}
-
-static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
- struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
-{
- int r;
-
- r = -E2BIG;
- if (cpuid->nent < vcpu->arch.cpuid_nent)
- goto out;
- r = -EFAULT;
- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
- goto out;
- return 0;
-
-out:
- cpuid->nent = vcpu->arch.cpuid_nent;
- return r;
-}
-
-static void cpuid_mask(u32 *word, int wordnum)
-{
- *word &= boot_cpu_data.x86_capability[wordnum];
-}
-
-static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- u32 index)
-{
- entry->function = function;
- entry->index = index;
- cpuid_count(entry->function, entry->index,
- &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
- entry->flags = 0;
-}
-
-static bool supported_xcr0_bit(unsigned bit)
-{
- u64 mask = ((u64)1 << bit);
-
- return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
-}
-
-#define F(x) bit(X86_FEATURE_##x)
-
-static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- u32 index, int *nent, int maxnent)
-{
- unsigned f_nx = is_efer_nx() ? F(NX) : 0;
-#ifdef CONFIG_X86_64
- unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
- ? F(GBPAGES) : 0;
- unsigned f_lm = F(LM);
-#else
- unsigned f_gbpages = 0;
- unsigned f_lm = 0;
-#endif
- unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
-
- /* cpuid 1.edx */
- const u32 kvm_supported_word0_x86_features =
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
- 0 /* Reserved, DS, ACPI */ | F(MMX) |
- F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
- 0 /* HTT, TM, Reserved, PBE */;
- /* cpuid 0x80000001.edx */
- const u32 kvm_supported_word1_x86_features =
- F(FPU) | F(VME) | F(DE) | F(PSE) |
- F(TSC) | F(MSR) | F(PAE) | F(MCE) |
- F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
- F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
- F(PAT) | F(PSE36) | 0 /* Reserved */ |
- f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
- 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
- /* cpuid 1.ecx */
- const u32 kvm_supported_word4_x86_features =
- F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
- 0 /* DS-CPL, VMX, SMX, EST */ |
- 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
- 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
- 0 /* Reserved, DCA */ | F(XMM4_1) |
- F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
- 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
- F(F16C) | F(RDRAND);
- /* cpuid 0x80000001.ecx */
- const u32 kvm_supported_word6_x86_features =
- F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
- F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
- F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
- 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
-
- /* cpuid 0xC0000001.edx */
- const u32 kvm_supported_word5_x86_features =
- F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
- F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
- F(PMM) | F(PMM_EN);
-
- /* cpuid 7.0.ebx */
- const u32 kvm_supported_word9_x86_features =
- F(SMEP) | F(FSGSBASE) | F(ERMS);
-
- /* all calls to cpuid_count() should be made on the same cpu */
- get_cpu();
- do_cpuid_1_ent(entry, function, index);
- ++*nent;
-
- switch (function) {
- case 0:
- entry->eax = min(entry->eax, (u32)0xd);
- break;
- case 1:
- entry->edx &= kvm_supported_word0_x86_features;
- cpuid_mask(&entry->edx, 0);
- entry->ecx &= kvm_supported_word4_x86_features;
- cpuid_mask(&entry->ecx, 4);
- /* we support x2apic emulation even if host does not support
- * it since we emulate x2apic in software */
- entry->ecx |= F(X2APIC);
- break;
- /* function 2 entries are STATEFUL. That is, repeated cpuid commands
- * may return different values. This forces us to get_cpu() before
- * issuing the first command, and also to emulate this annoying behavior
- * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
- case 2: {
- int t, times = entry->eax & 0xff;
-
- entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
- entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- for (t = 1; t < times && *nent < maxnent; ++t) {
- do_cpuid_1_ent(&entry[t], function, 0);
- entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
- ++*nent;
- }
- break;
- }
- /* function 4 has additional index. */
- case 4: {
- int i, cache_type;
-
- entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- /* read more entries until cache_type is zero */
- for (i = 1; *nent < maxnent; ++i) {
- cache_type = entry[i - 1].eax & 0x1f;
- if (!cache_type)
- break;
- do_cpuid_1_ent(&entry[i], function, i);
- entry[i].flags |=
- KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- ++*nent;
- }
- break;
- }
- case 7: {
- entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- /* Mask ebx against host capbability word 9 */
- if (index == 0) {
- entry->ebx &= kvm_supported_word9_x86_features;
- cpuid_mask(&entry->ebx, 9);
- } else
- entry->ebx = 0;
- entry->eax = 0;
- entry->ecx = 0;
- entry->edx = 0;
- break;
- }
- case 9:
- break;
- /* function 0xb has additional index. */
- case 0xb: {
- int i, level_type;
-
- entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- /* read more entries until level_type is zero */
- for (i = 1; *nent < maxnent; ++i) {
- level_type = entry[i - 1].ecx & 0xff00;
- if (!level_type)
- break;
- do_cpuid_1_ent(&entry[i], function, i);
- entry[i].flags |=
- KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- ++*nent;
- }
- break;
- }
- case 0xd: {
- int idx, i;
-
- entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
- do_cpuid_1_ent(&entry[i], function, idx);
- if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
- continue;
- entry[i].flags |=
- KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- ++*nent;
- ++i;
- }
- break;
- }
- case KVM_CPUID_SIGNATURE: {
- char signature[12] = "KVMKVMKVM\0\0";
- u32 *sigptr = (u32 *)signature;
- entry->eax = 0;
- entry->ebx = sigptr[0];
- entry->ecx = sigptr[1];
- entry->edx = sigptr[2];
- break;
- }
- case KVM_CPUID_FEATURES:
- entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
- (1 << KVM_FEATURE_NOP_IO_DELAY) |
- (1 << KVM_FEATURE_CLOCKSOURCE2) |
- (1 << KVM_FEATURE_ASYNC_PF) |
- (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
-
- if (sched_info_on())
- entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
-
- entry->ebx = 0;
- entry->ecx = 0;
- entry->edx = 0;
- break;
- case 0x80000000:
- entry->eax = min(entry->eax, 0x8000001a);
- break;
- case 0x80000001:
- entry->edx &= kvm_supported_word1_x86_features;
- cpuid_mask(&entry->edx, 1);
- entry->ecx &= kvm_supported_word6_x86_features;
- cpuid_mask(&entry->ecx, 6);
- break;
- case 0x80000008: {
- unsigned g_phys_as = (entry->eax >> 16) & 0xff;
- unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
- unsigned phys_as = entry->eax & 0xff;
-
- if (!g_phys_as)
- g_phys_as = phys_as;
- entry->eax = g_phys_as | (virt_as << 8);
- entry->ebx = entry->edx = 0;
- break;
- }
- case 0x80000019:
- entry->ecx = entry->edx = 0;
- break;
- case 0x8000001a:
- break;
- case 0x8000001d:
- break;
- /*Add support for Centaur's CPUID instruction*/
- case 0xC0000000:
- /*Just support up to 0xC0000004 now*/
- entry->eax = min(entry->eax, 0xC0000004);
- break;
- case 0xC0000001:
- entry->edx &= kvm_supported_word5_x86_features;
- cpuid_mask(&entry->edx, 5);
- break;
- case 3: /* Processor serial number */
- case 5: /* MONITOR/MWAIT */
- case 6: /* Thermal management */
- case 0xA: /* Architectural Performance Monitoring */
- case 0x80000007: /* Advanced power management */
- case 0xC0000002:
- case 0xC0000003:
- case 0xC0000004:
- default:
- entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
- break;
- }
-
- kvm_x86_ops->set_supported_cpuid(function, entry);
-
- put_cpu();
-}
-
-#undef F
-
-static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 __user *entries)
-{
- struct kvm_cpuid_entry2 *cpuid_entries;
- int limit, nent = 0, r = -E2BIG;
- u32 func;
-
- if (cpuid->nent < 1)
- goto out;
- if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
- cpuid->nent = KVM_MAX_CPUID_ENTRIES;
- r = -ENOMEM;
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
- if (!cpuid_entries)
- goto out;
-
- do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
- limit = cpuid_entries[0].eax;
- for (func = 1; func <= limit && nent < cpuid->nent; ++func)
- do_cpuid_ent(&cpuid_entries[nent], func, 0,
- &nent, cpuid->nent);
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
-
- do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
- limit = cpuid_entries[nent - 1].eax;
- for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
- do_cpuid_ent(&cpuid_entries[nent], func, 0,
- &nent, cpuid->nent);
-
-
-
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
-
- /* Add support for Centaur's CPUID instruction. */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
- do_cpuid_ent(&cpuid_entries[nent], 0xC0000000, 0,
- &nent, cpuid->nent);
-
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
-
- limit = cpuid_entries[nent - 1].eax;
- for (func = 0xC0000001;
- func <= limit && nent < cpuid->nent; ++func)
- do_cpuid_ent(&cpuid_entries[nent], func, 0,
- &nent, cpuid->nent);
-
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
- }
-
- do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
- cpuid->nent);
-
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
-
- do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
- cpuid->nent);
-
- r = -E2BIG;
- if (nent >= cpuid->nent)
- goto out_free;
-
- r = -EFAULT;
- if (copy_to_user(entries, cpuid_entries,
- nent * sizeof(struct kvm_cpuid_entry2)))
- goto out_free;
- cpuid->nent = nent;
- r = 0;
-
-out_free:
- vfree(cpuid_entries);
-out:
- return r;
-}
-
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
@@ -3043,13 +2540,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
- u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
- r = -ENOMEM;
- if (!u.lapic)
- goto out;
- r = -EFAULT;
- if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
+ u.lapic = memdup_user(argp, sizeof(*u.lapic));
+ if (IS_ERR(u.lapic)) {
+ r = PTR_ERR(u.lapic);
goto out;
+ }
+
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
if (r)
goto out;
@@ -3228,14 +2724,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_SET_XSAVE: {
- u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
- r = -ENOMEM;
- if (!u.xsave)
- break;
-
- r = -EFAULT;
- if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
- break;
+ u.xsave = memdup_user(argp, sizeof(*u.xsave));
+ if (IS_ERR(u.xsave)) {
+ r = PTR_ERR(u.xsave);
+ goto out;
+ }
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
@@ -3256,15 +2749,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_SET_XCRS: {
- u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
- r = -ENOMEM;
- if (!u.xcrs)
- break;
-
- r = -EFAULT;
- if (copy_from_user(u.xcrs, argp,
- sizeof(struct kvm_xcrs)))
- break;
+ u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
+ if (IS_ERR(u.xcrs)) {
+ r = PTR_ERR(u.xcrs);
+ goto out;
+ }
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
@@ -3461,16 +2950,59 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
return 0;
}
+/**
+ * write_protect_slot - write protect a slot for dirty logging
+ * @kvm: the kvm instance
+ * @memslot: the slot we protect
+ * @dirty_bitmap: the bitmap indicating which pages are dirty
+ * @nr_dirty_pages: the number of dirty pages
+ *
+ * We have two ways to find all sptes to protect:
+ * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
+ * checks ones that have a spte mapping a page in the slot.
+ * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ *
+ * Generally speaking, if there are not so many dirty pages compared to the
+ * number of shadow pages, we should use the latter.
+ *
+ * Note that letting others write into a page marked dirty in the old bitmap
+ * by using the remaining tlb entry is not a problem. That page will become
+ * write protected again when we flush the tlb and then be reported dirty to
+ * the user space by copying the old bitmap.
+ */
+static void write_protect_slot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ unsigned long *dirty_bitmap,
+ unsigned long nr_dirty_pages)
+{
+ /* Not many dirty pages compared to # of shadow pages. */
+ if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
+ unsigned long gfn_offset;
+
+ for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
+ unsigned long gfn = memslot->base_gfn + gfn_offset;
+
+ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
+ spin_unlock(&kvm->mmu_lock);
+ }
+ kvm_flush_remote_tlbs(kvm);
+ } else {
+ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_slot_remove_write_access(kvm, memslot->id);
+ spin_unlock(&kvm->mmu_lock);
+ }
+}
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r, i;
+ int r;
struct kvm_memory_slot *memslot;
- unsigned long n;
- unsigned long is_dirty = 0;
+ unsigned long n, nr_dirty_pages;
mutex_lock(&kvm->slots_lock);
@@ -3478,43 +3010,41 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots->memslots[log->slot];
+ memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
-
- for (i = 0; !is_dirty && i < n/sizeof(long); i++)
- is_dirty = memslot->dirty_bitmap[i];
+ nr_dirty_pages = memslot->nr_dirty_pages;
/* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
+ if (nr_dirty_pages) {
struct kvm_memslots *slots, *old_slots;
- unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap, *dirty_bitmap_head;
- dirty_bitmap = memslot->dirty_bitmap_head;
- if (memslot->dirty_bitmap == dirty_bitmap)
- dirty_bitmap += n / sizeof(long);
- memset(dirty_bitmap, 0, n);
+ dirty_bitmap = memslot->dirty_bitmap;
+ dirty_bitmap_head = memslot->dirty_bitmap_head;
+ if (dirty_bitmap == dirty_bitmap_head)
+ dirty_bitmap_head += n / sizeof(long);
+ memset(dirty_bitmap_head, 0, n);
r = -ENOMEM;
- slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
if (!slots)
goto out;
- memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
- slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
- slots->generation++;
+
+ memslot = id_to_memslot(slots, log->slot);
+ memslot->nr_dirty_pages = 0;
+ memslot->dirty_bitmap = dirty_bitmap_head;
+ update_memslots(slots, NULL);
old_slots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
- dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);
- spin_lock(&kvm->mmu_lock);
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- spin_unlock(&kvm->mmu_lock);
+ write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
@@ -3659,14 +3189,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+ struct kvm_irqchip *chip;
- r = -ENOMEM;
- if (!chip)
+ chip = memdup_user(argp, sizeof(*chip));
+ if (IS_ERR(chip)) {
+ r = PTR_ERR(chip);
goto out;
- r = -EFAULT;
- if (copy_from_user(chip, argp, sizeof *chip))
- goto get_irqchip_out;
+ }
+
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
@@ -3685,14 +3215,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
- struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
+ struct kvm_irqchip *chip;
- r = -ENOMEM;
- if (!chip)
+ chip = memdup_user(argp, sizeof(*chip));
+ if (IS_ERR(chip)) {
+ r = PTR_ERR(chip);
goto out;
- r = -EFAULT;
- if (copy_from_user(chip, argp, sizeof *chip))
- goto set_irqchip_out;
+ }
+
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
@@ -3899,12 +3429,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
kvm_x86_ops->get_segment(vcpu, var, seg);
}
-static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
-{
- return gpa;
-}
-
-static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
gpa_t t_gpa;
struct x86_exception exception;
@@ -4088,7 +3613,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
if (ret < 0)
return 0;
- kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
+ kvm_mmu_pte_write(vcpu, gpa, val, bytes);
return 1;
}
@@ -4325,7 +3850,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (!exchanged)
return X86EMUL_CMPXCHG_FAILED;
- kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+ kvm_mmu_pte_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
@@ -4350,32 +3875,24 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
return r;
}
-
-static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
- int size, unsigned short port, void *val,
- unsigned int count)
+static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
+ unsigned short port, void *val,
+ unsigned int count, bool in)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
- if (vcpu->arch.pio.count)
- goto data_avail;
-
- trace_kvm_pio(0, port, size, count);
+ trace_kvm_pio(!in, port, size, count);
vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = 1;
+ vcpu->arch.pio.in = in;
vcpu->arch.pio.count = count;
vcpu->arch.pio.size = size;
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
- data_avail:
- memcpy(val, vcpu->arch.pio_data, size * count);
vcpu->arch.pio.count = 0;
return 1;
}
vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = KVM_EXIT_IO_IN;
+ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = size;
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
vcpu->run->io.count = count;
@@ -4384,36 +3901,37 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
return 0;
}
-static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
- int size, unsigned short port,
- const void *val, unsigned int count)
+static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port, void *val,
+ unsigned int count)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ int ret;
- trace_kvm_pio(1, port, size, count);
-
- vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = 0;
- vcpu->arch.pio.count = count;
- vcpu->arch.pio.size = size;
-
- memcpy(vcpu->arch.pio_data, val, size * count);
+ if (vcpu->arch.pio.count)
+ goto data_avail;
- if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+ ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
+ if (ret) {
+data_avail:
+ memcpy(val, vcpu->arch.pio_data, size * count);
vcpu->arch.pio.count = 0;
return 1;
}
- vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = KVM_EXIT_IO_OUT;
- vcpu->run->io.size = size;
- vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
- vcpu->run->io.count = count;
- vcpu->run->io.port = port;
-
return 0;
}
+static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port,
+ const void *val, unsigned int count)
+{
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ memcpy(vcpu->arch.pio_data, val, size * count);
+ return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+}
+
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
return kvm_x86_ops->get_segment_base(vcpu, seg);
@@ -4628,6 +4146,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
}
+static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
+ u32 pmc, u64 *pdata)
+{
+ return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+}
+
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
emul_to_vcpu(ctxt)->arch.halt_request = 1;
@@ -4680,6 +4204,7 @@ static struct x86_emulate_ops emulate_ops = {
.set_dr = emulator_set_dr,
.set_msr = emulator_set_msr,
.get_msr = emulator_get_msr,
+ .read_pmc = emulator_read_pmc,
.halt = emulator_halt,
.wbinvd = emulator_wbinvd,
.fix_hypercall = emulator_fix_hypercall,
@@ -4837,6 +4362,50 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
return false;
}
+static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
+ unsigned long cr2, int emulation_type)
+{
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+
+ last_retry_eip = vcpu->arch.last_retry_eip;
+ last_retry_addr = vcpu->arch.last_retry_addr;
+
+ /*
+ * If the emulation is caused by #PF and it is non-page_table
+ * writing instruction, it means the VM-EXIT is caused by shadow
+ * page protected, we can zap the shadow page and retry this
+ * instruction directly.
+ *
+ * Note: if the guest uses a non-page-table modifying instruction
+ * on the PDE that points to the instruction, then we will unmap
+ * the instruction and go to an infinite loop. So, we cache the
+ * last retried eip and the last fault address, if we meet the eip
+ * and the address again, we can break out of the potential infinite
+ * loop.
+ */
+ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+
+ if (!(emulation_type & EMULTYPE_RETRY))
+ return false;
+
+ if (x86_page_table_writing_insn(ctxt))
+ return false;
+
+ if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+ return false;
+
+ vcpu->arch.last_retry_eip = ctxt->eip;
+ vcpu->arch.last_retry_addr = cr2;
+
+ if (!vcpu->arch.mmu.direct_map)
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+
+ kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+ return true;
+}
+
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
@@ -4878,6 +4447,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE;
}
+ if (retry_instruction(ctxt, cr2, emulation_type))
+ return EMULATE_DONE;
+
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
@@ -5096,17 +4668,17 @@ static void kvm_timer_init(void)
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
-static int kvm_is_in_guest(void)
+int kvm_is_in_guest(void)
{
- return percpu_read(current_vcpu) != NULL;
+ return __this_cpu_read(current_vcpu) != NULL;
}
static int kvm_is_user_mode(void)
{
int user_mode = 3;
- if (percpu_read(current_vcpu))
- user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+ if (__this_cpu_read(current_vcpu))
+ user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
return user_mode != 0;
}
@@ -5115,8 +4687,8 @@ static unsigned long kvm_get_guest_ip(void)
{
unsigned long ip = 0;
- if (percpu_read(current_vcpu))
- ip = kvm_rip_read(percpu_read(current_vcpu));
+ if (__this_cpu_read(current_vcpu))
+ ip = kvm_rip_read(__this_cpu_read(current_vcpu));
return ip;
}
@@ -5129,13 +4701,13 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
{
- percpu_write(current_vcpu, vcpu);
+ __this_cpu_write(current_vcpu, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
{
- percpu_write(current_vcpu, NULL);
+ __this_cpu_write(current_vcpu, NULL);
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
@@ -5234,15 +4806,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
-static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
- unsigned long a1)
-{
- if (is_long_mode(vcpu))
- return a0;
- else
- return a0 | ((gpa_t)a1 << 32);
-}
-
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret;
@@ -5338,9 +4901,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
- case KVM_HC_MMU_OP:
- r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
- break;
default:
ret = -KVM_ENOSYS;
break;
@@ -5370,125 +4930,6 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
-static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
-{
- struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- int j, nent = vcpu->arch.cpuid_nent;
-
- e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
- /* when no next entry is found, the current entry[i] is reselected */
- for (j = i + 1; ; j = (j + 1) % nent) {
- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
- if (ej->function == e->function) {
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- return j;
- }
- }
- return 0; /* silence gcc, even though control never reaches here */
-}
-
-/* find an entry with matching function, matching index (if needed), and that
- * should be read next (if it's stateful) */
-static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
- u32 function, u32 index)
-{
- if (e->function != function)
- return 0;
- if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
- return 0;
- if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
- !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
- return 0;
- return 1;
-}
-
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function, u32 index)
-{
- int i;
- struct kvm_cpuid_entry2 *best = NULL;
-
- for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
- struct kvm_cpuid_entry2 *e;
-
- e = &vcpu->arch.cpuid_entries[i];
- if (is_matching_cpuid_entry(e, function, index)) {
- if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
- move_to_next_stateful_cpuid_entry(vcpu, i);
- best = e;
- break;
- }
- }
- return best;
-}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
-
-int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
- if (!best || best->eax < 0x80000008)
- goto not_found;
- best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best)
- return best->eax & 0xff;
-not_found:
- return 36;
-}
-
-/*
- * If no match is found, check whether we exceed the vCPU's limit
- * and return the content of the highest valid _standard_ leaf instead.
- * This is to satisfy the CPUID specification.
- */
-static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
- u32 function, u32 index)
-{
- struct kvm_cpuid_entry2 *maxlevel;
-
- maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
- if (!maxlevel || maxlevel->eax >= function)
- return NULL;
- if (function & 0x80000000) {
- maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
- if (!maxlevel)
- return NULL;
- }
- return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
-}
-
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
-{
- u32 function, index;
- struct kvm_cpuid_entry2 *best;
-
- function = kvm_register_read(vcpu, VCPU_REGS_RAX);
- index = kvm_register_read(vcpu, VCPU_REGS_RCX);
- kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
- best = kvm_find_cpuid_entry(vcpu, function, index);
-
- if (!best)
- best = check_cpuid_limit(vcpu, function, index);
-
- if (best) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
- }
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- trace_kvm_cpuid(function,
- kvm_register_read(vcpu, VCPU_REGS_RAX),
- kvm_register_read(vcpu, VCPU_REGS_RBX),
- kvm_register_read(vcpu, VCPU_REGS_RCX),
- kvm_register_read(vcpu, VCPU_REGS_RDX));
-}
-EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
-
/*
* Check if userspace requested an interrupt window, and that the
* interrupt window is open.
@@ -5649,6 +5090,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
+ bool req_immediate_exit = 0;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5688,7 +5130,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
-
+ req_immediate_exit =
+ kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
+ if (kvm_check_request(KVM_REQ_PMU, vcpu))
+ kvm_handle_pmu_event(vcpu);
+ if (kvm_check_request(KVM_REQ_PMI, vcpu))
+ kvm_deliver_pmi(vcpu);
}
r = kvm_mmu_reload(vcpu);
@@ -5739,6 +5186,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ if (req_immediate_exit)
+ smp_send_reschedule(vcpu->cpu);
+
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -5944,10 +5394,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (r <= 0)
goto out;
- if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
- kvm_register_write(vcpu, VCPU_REGS_RAX,
- kvm_run->hypercall.ret);
-
r = __vcpu_run(vcpu);
out:
@@ -6149,7 +5595,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
- update_cpuid(vcpu);
+ kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
@@ -6426,6 +5872,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
+ kvm_pmu_reset(vcpu);
+
return kvm_x86_ops->vcpu_reset(vcpu);
}
@@ -6474,10 +5922,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- vcpu->arch.mmu.translate_gpa = translate_gpa;
- vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
@@ -6514,6 +5958,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_mce_banks;
kvm_async_pf_hash_reset(vcpu);
+ kvm_pmu_init(vcpu);
return 0;
fail_free_mce_banks:
@@ -6532,6 +5977,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
int idx;
+ kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index d36fe237c66..cb80c293cdd 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -33,9 +33,6 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
return (nr == BP_VECTOR) || (nr == OF_VECTOR);
}
-struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function, u32 index);
-
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
@@ -125,4 +122,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception);
+extern u64 host_xcr0;
+
#endif
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index 46fc4ee09fc..88ad5fbda6e 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -82,9 +82,16 @@ insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
const insn_attr_t *table;
if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
return 0;
- table = inat_avx_tables[vex_m][vex_p];
+ /* At first, this checks the master table */
+ table = inat_avx_tables[vex_m][0];
if (!table)
return 0;
+ if (!inat_is_group(table[opcode]) && vex_p) {
+ /* If this is not a group, get attribute directly */
+ table = inat_avx_tables[vex_m][vex_p];
+ if (!table)
+ return 0;
+ }
return table[opcode];
}
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 374562ed670..5a1f9f3e3fb 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -202,7 +202,7 @@ void insn_get_opcode(struct insn *insn)
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
- if (!inat_accept_vex(insn->attr))
+ if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
insn->attr = 0; /* This instruction is bad */
goto end; /* VEX has only 1 byte for opcode */
}
@@ -249,6 +249,8 @@ void insn_get_modrm(struct insn *insn)
pfx = insn_last_prefix(insn);
insn->attr = inat_get_group_attribute(mod, pfx,
insn->attr);
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+ insn->attr = 0; /* This is bad */
}
}
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 82004d2bf05..bd59090825d 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -164,15 +164,13 @@ EXPORT_SYMBOL(strchr);
size_t strlen(const char *s)
{
int d0;
- int res;
+ size_t res;
asm volatile("repne\n\t"
- "scasb\n\t"
- "notl %0\n\t"
- "decl %0"
+ "scasb"
: "=c" (res), "=&D" (d0)
: "1" (s), "a" (0), "0" (0xffffffffu)
: "memory");
- return res;
+ return ~res - 1;
}
EXPORT_SYMBOL(strlen);
#endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index a793da5e560..5b83c51c12e 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1,5 +1,11 @@
# x86 Opcode Maps
#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2
+# (#325383-040US, October 2011)
+# - Intel(R) Advanced Vector Extensions Programming Reference
+# (#319433-011,JUNE 2011).
+#
#<Opcode maps>
# Table: table-name
# Referrer: escaped-name
@@ -15,10 +21,13 @@
# EndTable
#
# AVX Superscripts
-# (VEX): this opcode can accept VEX prefix.
-# (oVEX): this opcode requires VEX prefix.
-# (o128): this opcode only supports 128bit VEX.
-# (o256): this opcode only supports 256bit VEX.
+# (v): this opcode requires VEX prefix.
+# (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+# - (66): the last prefix is 0x66
+# - (F3): the last prefix is 0xF3
+# - (F2): the last prefix is 0xF2
#
Table: one byte opcode
@@ -199,8 +208,8 @@ a0: MOV AL,Ob
a1: MOV rAX,Ov
a2: MOV Ob,AL
a3: MOV Ov,rAX
-a4: MOVS/B Xb,Yb
-a5: MOVS/W/D/Q Xv,Yv
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
a6: CMPS/B Xb,Yb
a7: CMPS/W/D Xv,Yv
a8: TEST AL,Ib
@@ -233,8 +242,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix)
-c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
c6: Grp11 Eb,Ib (1A)
c7: Grp11 Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -320,14 +329,19 @@ AVXcode: 1
# 3DNow! uses the last imm byte as opcode extension.
0f: 3DNow! Pq,Qq,Ib
# 0x0f 0x10-0x1f
-10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128)
-11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128)
-12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX)
-13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128)
-14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX)
-15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX)
-16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX)
-17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128)
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
18: Grp16 (1A)
19:
1a:
@@ -345,14 +359,14 @@ AVXcode: 1
25:
26:
27:
-28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX)
-29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX)
-2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128)
-2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX)
-2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128)
-2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128)
-2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128)
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
# 0x0f 0x30-0x3f
30: WRMSR
31: RDTSC
@@ -388,65 +402,66 @@ AVXcode: 1
4e: CMOVLE/NG Gv,Ev
4f: CMOVNLE/G Gv,Ev
# 0x0f 0x50-0x5f
-50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX)
-51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128)
-52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128)
-53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128)
-54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX)
-55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX)
-56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX)
-57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX)
-58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128)
-59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128)
-5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128)
-5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX)
-5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128)
-5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128)
-5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128)
-5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128)
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
# 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128)
-61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128)
-62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128)
-63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128)
-64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128)
-65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128)
-66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128)
-67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128)
-68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128)
-69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128)
-6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128)
-6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128)
-6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128)
-6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128)
-6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128)
-6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX)
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3)
# 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128)
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
71: Grp12 (1A)
72: Grp13 (1A)
73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128)
-75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128)
-76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128)
-77: emms/vzeroupper/vzeroall (VEX)
-78: VMREAD Ed/q,Gd/q
-79: VMWRITE Gd/q,Ed/q
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy
+79: VMWRITE Gy,Ey
7a:
7b:
-7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX)
-7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX)
-7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128)
-7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
# 0x0f 0x80-0x8f
80: JO Jz (f64)
81: JNO Jz (f64)
-82: JB/JNAE/JC Jz (f64)
-83: JNB/JAE/JNC Jz (f64)
-84: JZ/JE Jz (f64)
-85: JNZ/JNE Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
86: JBE/JNA Jz (f64)
-87: JNBE/JA Jz (f64)
+87: JA/JNBE Jz (f64)
88: JS Jz (f64)
89: JNS Jz (f64)
8a: JP/JPE Jz (f64)
@@ -502,18 +517,18 @@ b8: JMPE | POPCNT Gv,Ev (F3)
b9: Grp10 (1A)
ba: Grp8 Ev,Ib (1A)
bb: BTC Ev,Gv
-bc: BSF Gv,Ev
-bd: BSR Gv,Ev
+bc: BSF Gv,Ev | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev | LZCNT Gv,Ev (F3)
be: MOVSX Gv,Eb
bf: MOVSX Gv,Ew
# 0x0f 0xc0-0xcf
c0: XADD Eb,Gb
c1: XADD Ev,Gv
-c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX)
-c3: movnti Md/q,Gd/q
-c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128)
-c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128)
-c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX)
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
c7: Grp9 (1A)
c8: BSWAP RAX/EAX/R8/R8D
c9: BSWAP RCX/ECX/R9/R9D
@@ -524,55 +539,55 @@ cd: BSWAP RBP/EBP/R13/R13D
ce: BSWAP RSI/ESI/R14/R14D
cf: BSWAP RDI/EDI/R15/R15D
# 0x0f 0xd0-0xdf
-d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX)
-d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128)
-d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128)
-d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128)
-d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128)
-d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128)
-d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128)
-d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128)
-d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128)
-da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128)
-db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128)
-dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128)
-dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128)
-de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128)
-df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128)
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1)
# 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128)
-e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128)
-e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128)
-e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128)
-e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128)
-e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128)
-e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX)
-e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX)
-e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128)
-e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128)
-ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128)
-eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128)
-ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128)
-ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128)
-ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128)
-ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128)
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1)
# 0x0f 0xf0-0xff
-f0: lddqu Vdq,Mdq (F2),(VEX)
-f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128)
-f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128)
-f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128)
-f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128)
-f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128)
-f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128)
-f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128)
-f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128)
-f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128)
-fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128)
-fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128)
-fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128)
-fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128)
-fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128)
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
ff:
EndTable
@@ -580,155 +595,193 @@ Table: 3-byte opcode 1 (0x0f 0x38)
Referrer: 3-byte escape 1
AVXcode: 2
# 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128)
-01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128)
-02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128)
-03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128)
-04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128)
-05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128)
-06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128)
-07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128)
-08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128)
-09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128)
-0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128)
-0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128)
-0c: Vpermilps /r (66),(oVEX)
-0d: Vpermilpd /r (66),(oVEX)
-0e: vtestps /r (66),(oVEX)
-0f: vtestpd /r (66),(oVEX)
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
# 0x0f 0x38 0x10-0x1f
10: pblendvb Vdq,Wdq (66)
11:
12:
-13:
+13: vcvtph2ps Vx,Wx,Ib (66),(v)
14: blendvps Vdq,Wdq (66)
15: blendvpd Vdq,Wdq (66)
-16:
-17: ptest Vdq,Wdq (66),(VEX)
-18: vbroadcastss /r (66),(oVEX)
-19: vbroadcastsd /r (66),(oVEX),(o256)
-1a: vbroadcastf128 /r (66),(oVEX),(o256)
+16: vpermps Vqq,Hqq,Wqq (66),(v)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v)
+1a: vbroadcastf128 Vqq,Mdq (66),(v)
1b:
-1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128)
-1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128)
-1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
1f:
# 0x0f 0x38 0x20-0x2f
-20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128)
-21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128)
-22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128)
-23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128)
-24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128)
-25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128)
+20: vpmovsxbw Vx,Ux/Mq (66),(v1)
+21: vpmovsxbd Vx,Ux/Md (66),(v1)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1)
+24: vpmovsxwq Vx,Ux/Md (66),(v1)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1)
26:
27:
-28: pmuldq Vdq,Wdq (66),(VEX),(o128)
-29: pcmpeqq Vdq,Wdq (66),(VEX),(o128)
-2a: movntdqa Vdq,Mdq (66),(VEX),(o128)
-2b: packusdw Vdq,Wdq (66),(VEX),(o128)
-2c: vmaskmovps(ld) /r (66),(oVEX)
-2d: vmaskmovpd(ld) /r (66),(oVEX)
-2e: vmaskmovps(st) /r (66),(oVEX)
-2f: vmaskmovpd(st) /r (66),(oVEX)
+28: vpmuldq Vx,Hx,Wx (66),(v1)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1)
+2a: vmovntdqa Vx,Mx (66),(v1)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
# 0x0f 0x38 0x30-0x3f
-30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128)
-31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128)
-32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128)
-33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128)
-34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128)
-35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128)
-36:
-37: pcmpgtq Vdq,Wdq (66),(VEX),(o128)
-38: pminsb Vdq,Wdq (66),(VEX),(o128)
-39: pminsd Vdq,Wdq (66),(VEX),(o128)
-3a: pminuw Vdq,Wdq (66),(VEX),(o128)
-3b: pminud Vdq,Wdq (66),(VEX),(o128)
-3c: pmaxsb Vdq,Wdq (66),(VEX),(o128)
-3d: pmaxsd Vdq,Wdq (66),(VEX),(o128)
-3e: pmaxuw Vdq,Wdq (66),(VEX),(o128)
-3f: pmaxud Vdq,Wdq (66),(VEX),(o128)
+30: vpmovzxbw Vx,Ux/Mq (66),(v1)
+31: vpmovzxbd Vx,Ux/Md (66),(v1)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1)
+34: vpmovzxwq Vx,Ux/Md (66),(v1)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1)
+36: vpermd Vqq,Hqq,Wqq (66),(v)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1)
+39: vpminsd Vx,Hx,Wx (66),(v1)
+3a: vpminuw Vx,Hx,Wx (66),(v1)
+3b: vpminud Vx,Hx,Wx (66),(v1)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1)
# 0x0f 0x38 0x40-0x8f
-40: pmulld Vdq,Wdq (66),(VEX),(o128)
-41: phminposuw Vdq,Wdq (66),(VEX),(o128)
-80: INVEPT Gd/q,Mdq (66)
-81: INVPID Gd/q,Mdq (66)
+40: vpmulld Vx,Hx,Wx (66),(v1)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42:
+43:
+44:
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v)
+5a: vbroadcasti128 Vqq,Mdq (66),(v)
+# Skip 0x5b-0x77
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+# Skip 0x7a-0x7f
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
# 0x0f 0x38 0x90-0xbf (FMA)
-96: vfmaddsub132pd/ps /r (66),(VEX)
-97: vfmsubadd132pd/ps /r (66),(VEX)
-98: vfmadd132pd/ps /r (66),(VEX)
-99: vfmadd132sd/ss /r (66),(VEX),(o128)
-9a: vfmsub132pd/ps /r (66),(VEX)
-9b: vfmsub132sd/ss /r (66),(VEX),(o128)
-9c: vfnmadd132pd/ps /r (66),(VEX)
-9d: vfnmadd132sd/ss /r (66),(VEX),(o128)
-9e: vfnmsub132pd/ps /r (66),(VEX)
-9f: vfnmsub132sd/ss /r (66),(VEX),(o128)
-a6: vfmaddsub213pd/ps /r (66),(VEX)
-a7: vfmsubadd213pd/ps /r (66),(VEX)
-a8: vfmadd213pd/ps /r (66),(VEX)
-a9: vfmadd213sd/ss /r (66),(VEX),(o128)
-aa: vfmsub213pd/ps /r (66),(VEX)
-ab: vfmsub213sd/ss /r (66),(VEX),(o128)
-ac: vfnmadd213pd/ps /r (66),(VEX)
-ad: vfnmadd213sd/ss /r (66),(VEX),(o128)
-ae: vfnmsub213pd/ps /r (66),(VEX)
-af: vfnmsub213sd/ss /r (66),(VEX),(o128)
-b6: vfmaddsub231pd/ps /r (66),(VEX)
-b7: vfmsubadd231pd/ps /r (66),(VEX)
-b8: vfmadd231pd/ps /r (66),(VEX)
-b9: vfmadd231sd/ss /r (66),(VEX),(o128)
-ba: vfmsub231pd/ps /r (66),(VEX)
-bb: vfmsub231sd/ss /r (66),(VEX),(o128)
-bc: vfnmadd231pd/ps /r (66),(VEX)
-bd: vfnmadd231sd/ss /r (66),(VEX),(o128)
-be: vfnmsub231pd/ps /r (66),(VEX)
-bf: vfnmsub231sd/ss /r (66),(VEX),(o128)
+90: vgatherdd/q Vx,Hx,Wx (66),(v)
+91: vgatherqd/q Vx,Hx,Wx (66),(v)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
# 0x0f 0x38 0xc0-0xff
-db: aesimc Vdq,Wdq (66),(VEX),(o128)
-dc: aesenc Vdq,Wdq (66),(VEX),(o128)
-dd: aesenclast Vdq,Wdq (66),(VEX),(o128)
-de: aesdec Vdq,Wdq (66),(VEX),(o128)
-df: aesdeclast Vdq,Wdq (66),(VEX),(o128)
-f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2)
-f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
+f3: ANDN Gy,By,Ey (v)
+f4: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
Referrer: 3-byte escape 2
AVXcode: 3
# 0x0f 0x3a 0x00-0xff
-04: vpermilps /r,Ib (66),(oVEX)
-05: vpermilpd /r,Ib (66),(oVEX)
-06: vperm2f128 /r,Ib (66),(oVEX),(o256)
-08: roundps Vdq,Wdq,Ib (66),(VEX)
-09: roundpd Vdq,Wdq,Ib (66),(VEX)
-0a: roundss Vss,Wss,Ib (66),(VEX),(o128)
-0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128)
-0c: blendps Vdq,Wdq,Ib (66),(VEX)
-0d: blendpd Vdq,Wdq,Ib (66),(VEX)
-0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128)
-0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128)
-14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128)
-15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128)
-16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128)
-17: extractps Ed,Vdq,Ib (66),(VEX),(o128)
-18: vinsertf128 /r,Ib (66),(oVEX),(o256)
-19: vextractf128 /r,Ib (66),(oVEX),(o256)
-20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128)
-21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128)
-22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128)
-40: dpps Vdq,Wdq,Ib (66),(VEX)
-41: dppd Vdq,Wdq,Ib (66),(VEX),(o128)
-42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128)
-44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128)
-4a: vblendvps /r,Ib (66),(oVEX)
-4b: vblendvpd /r,Ib (66),(oVEX)
-4c: vpblendvb /r,Ib (66),(oVEX),(o128)
-60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128)
-61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128)
-62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128)
-63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128)
-df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128)
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03:
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66)
+09: vroundpd Vx,Wx,Ib (66)
+0a: vroundss Vss,Wss,Ib (66),(v1)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v)
+19: vextractf128 Wdq,Vqq,Ib (66),(v)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v)
+39: vextracti128 Wdq,Vqq,Ib (66),(v)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
EndTable
GrpTable: Grp1
@@ -790,7 +843,7 @@ GrpTable: Grp5
2: CALLN Ev (f64)
3: CALLF Ep
4: JMPN Ev (f64)
-5: JMPF Ep
+5: JMPF Mp
6: PUSH Ev (d64)
7:
EndTable
@@ -807,7 +860,7 @@ EndTable
GrpTable: Grp7
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5:
@@ -824,44 +877,45 @@ EndTable
GrpTable: Grp9
1: CMPXCHG8B/16B Mq/Mdq
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3)
-7: VMPTRST Mq
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3)
EndTable
GrpTable: Grp10
EndTable
GrpTable: Grp11
+# Note: the operands are given by group opcode
0: MOV
EndTable
GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128)
-4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128)
-6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp13
-2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128)
-4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128)
-6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128)
-3: psrldq Udq,Ib (66),(11B),(VEX),(o128)
-6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128)
-7: pslldq Udq,Ib (66),(11B),(VEX),(o128)
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
EndTable
GrpTable: Grp15
-0: fxsave
-1: fxstor
-2: ldmxcsr (VEX)
-3: stmxcsr (VEX)
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
5: XRSTOR | lfence (11B)
-6: mfence (11B)
+6: XSAVEOPT | mfence (11B)
7: clflush | sfence (11B)
EndTable
@@ -872,6 +926,12 @@ GrpTable: Grp16
3: prefetch T2
EndTable
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 3d11327c9ab..23d8e5fecf7 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
-obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
-
obj-$(CONFIG_MEMTEST) += memtest.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index d0474ad2a6e..1fb85dbe390 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -25,7 +25,7 @@ int fixup_exception(struct pt_regs *regs)
if (fixup) {
/* If fixup is less than 16, it means uaccess error */
if (fixup->fixup < 16) {
- current_thread_info()->uaccess_err = -EFAULT;
+ current_thread_info()->uaccess_err = 1;
regs->ip += fixup->fixup;
return 1;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5db0490deb0..9d74824a708 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -626,7 +626,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+ unsigned long address, int signal, int si_code)
{
struct task_struct *tsk = current;
unsigned long *stackend;
@@ -634,8 +634,17 @@ no_context(struct pt_regs *regs, unsigned long error_code,
int sig;
/* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs))
+ if (fixup_exception(regs)) {
+ if (current_thread_info()->sig_on_uaccess_error && signal) {
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code | PF_USER;
+ tsk->thread.cr2 = address;
+
+ /* XXX: hwpoison faults will set the wrong code. */
+ force_sig_info_fault(signal, si_code, address, tsk, 0);
+ }
return;
+ }
/*
* 32-bit:
@@ -755,7 +764,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (is_f00f_bug(regs, address))
return;
- no_context(regs, error_code, address);
+ no_context(regs, error_code, address, SIGSEGV, si_code);
}
static noinline void
@@ -819,7 +828,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
- no_context(regs, error_code, address);
+ no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
}
@@ -854,7 +863,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (!(fault & VM_FAULT_RETRY))
up_read(&current->mm->mmap_sem);
if (!(error_code & PF_USER))
- no_context(regs, error_code, address);
+ no_context(regs, error_code, address, 0, 0);
return 1;
}
if (!(fault & VM_FAULT_ERROR))
@@ -864,7 +873,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
up_read(&current->mm->mmap_sem);
- no_context(regs, error_code, address);
+ no_context(regs, error_code, address,
+ SIGSEGV, SEGV_MAPERR);
return 1;
}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ea305856151..dd74e46828c 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index b4996266210..f4f29b19fac 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
+ arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 87488b93a65..a298914058f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
good_end = max_pfn_mapped << PAGE_SHIFT;
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
- if (base == MEMBLOCK_ERROR)
+ if (!base)
panic("Cannot find space for the kernel page tables");
pgt_buf_start = base >> PAGE_SHIFT;
@@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
void __init native_pagetable_reserve(u64 start, u64 end)
{
- memblock_x86_reserve_range(start, end, "PGTABLE");
+ memblock_reserve(start, end - start);
}
struct map_range {
@@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
* so that they can be reused for other purposes.
*
- * On native it just means calling memblock_x86_reserve_range, on Xen it
- * also means marking RW the pagetable pages that we allocated before
+ * On native it just means calling memblock_reserve, on Xen it also
+ * means marking RW the pagetable pages that we allocated before
* but that haven't been used.
*
* In fact on xen we mark RO the whole range pgt_buf_start -
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 29f7c6d9817..0c1da394a63 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page)
void __init add_highpages_with_active_regions(int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
- struct range *range;
- int nr_range;
- int i;
-
- nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
-
- for (i = 0; i < nr_range; i++) {
- struct page *page;
- int node_pfn;
-
- for (node_pfn = range[i].start; node_pfn < range[i].end;
- node_pfn++) {
- if (!pfn_valid(node_pfn))
- continue;
- page = pfn_to_page(node_pfn);
- add_one_highpage_init(page);
- }
+ phys_addr_t start, end;
+ u64 i;
+
+ for_each_free_mem_range(i, nid, &start, &end, NULL) {
+ unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
+ start_pfn, end_pfn);
+ unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
+ start_pfn, end_pfn);
+ for ( ; pfn < e_pfn; pfn++)
+ if (pfn_valid(pfn))
+ add_one_highpage_init(pfn_to_page(pfn));
}
}
#else
@@ -650,18 +644,18 @@ void __init initmem_init(void)
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn;
- memblock_x86_register_active_regions(0, 0, highend_pfn);
- sparse_memory_present_with_active_regions(0);
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
num_physpages = highend_pfn;
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
- memblock_x86_register_active_regions(0, 0, max_low_pfn);
- sparse_memory_present_with_active_regions(0);
num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
+
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ sparse_memory_present_with_active_regions(0);
+
#ifdef CONFIG_FLATMEM
max_mapnr = num_physpages;
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bbaaa005bf0..a8a56ce3a96 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
- memblock_x86_register_active_regions(0, 0, max_pfn);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
}
#endif
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
deleted file mode 100644
index 992da5ec5a6..00000000000
--- a/arch/x86/mm/memblock.c
+++ /dev/null
@@ -1,348 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/memblock.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/range.h>
-
-/* Check for already reserved areas */
-bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
-{
- struct memblock_region *r;
- u64 addr = *addrp, last;
- u64 size = *sizep;
- bool changed = false;
-
-again:
- last = addr + size;
- for_each_memblock(reserved, r) {
- if (last > r->base && addr < r->base) {
- size = r->base - addr;
- changed = true;
- goto again;
- }
- if (last > (r->base + r->size) && addr < (r->base + r->size)) {
- addr = round_up(r->base + r->size, align);
- size = last - addr;
- changed = true;
- goto again;
- }
- if (last <= (r->base + r->size) && addr >= r->base) {
- *sizep = 0;
- return false;
- }
- }
- if (changed) {
- *addrp = addr;
- *sizep = size;
- }
- return changed;
-}
-
-/*
- * Find next free range after start, and size is returned in *sizep
- */
-u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
-{
- struct memblock_region *r;
-
- for_each_memblock(memory, r) {
- u64 ei_start = r->base;
- u64 ei_last = ei_start + r->size;
- u64 addr;
-
- addr = round_up(ei_start, align);
- if (addr < start)
- addr = round_up(start, align);
- if (addr >= ei_last)
- continue;
- *sizep = ei_last - addr;
- while (memblock_x86_check_reserved_size(&addr, sizep, align))
- ;
-
- if (*sizep)
- return addr;
- }
-
- return MEMBLOCK_ERROR;
-}
-
-static __init struct range *find_range_array(int count)
-{
- u64 end, size, mem;
- struct range *range;
-
- size = sizeof(struct range) * count;
- end = memblock.current_limit;
-
- mem = memblock_find_in_range(0, end, size, sizeof(struct range));
- if (mem == MEMBLOCK_ERROR)
- panic("can not find more space for range array");
-
- /*
- * This range is tempoaray, so don't reserve it, it will not be
- * overlapped because We will not alloccate new buffer before
- * We discard this one
- */
- range = __va(mem);
- memset(range, 0, size);
-
- return range;
-}
-
-static void __init memblock_x86_subtract_reserved(struct range *range, int az)
-{
- u64 final_start, final_end;
- struct memblock_region *r;
-
- /* Take out region array itself at first*/
- memblock_free_reserved_regions();
-
- memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
-
- for_each_memblock(reserved, r) {
- memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
- final_start = PFN_DOWN(r->base);
- final_end = PFN_UP(r->base + r->size);
- if (final_start >= final_end)
- continue;
- subtract_range(range, az, final_start, final_end);
- }
-
- /* Put region array back ? */
- memblock_reserve_reserved_regions();
-}
-
-struct count_data {
- int nr;
-};
-
-static int __init count_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct count_data *data = datax;
-
- data->nr++;
-
- return 0;
-}
-
-static int __init count_early_node_map(int nodeid)
-{
- struct count_data data;
-
- data.nr = 0;
- work_with_active_regions(nodeid, count_work_fn, &data);
-
- return data.nr;
-}
-
-int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- int count;
- struct range *range;
- int nr_range;
-
- count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
-
- range = find_range_array(count);
- nr_range = 0;
-
- /*
- * Use early_node_map[] and memblock.reserved.region to get range array
- * at first
- */
- nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
- subtract_range(range, count, 0, start_pfn);
- subtract_range(range, count, end_pfn, -1ULL);
-
- memblock_x86_subtract_reserved(range, count);
- nr_range = clean_sort_range(range, count);
-
- *rangep = range;
- return nr_range;
-}
-
-int __init get_free_all_memory_range(struct range **rangep, int nodeid)
-{
- unsigned long end_pfn = -1UL;
-
-#ifdef CONFIG_X86_32
- end_pfn = max_low_pfn;
-#endif
- return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
-}
-
-static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
-{
- int i, count;
- struct range *range;
- int nr_range;
- u64 final_start, final_end;
- u64 free_size;
- struct memblock_region *r;
-
- count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
-
- range = find_range_array(count);
- nr_range = 0;
-
- addr = PFN_UP(addr);
- limit = PFN_DOWN(limit);
-
- for_each_memblock(memory, r) {
- final_start = PFN_UP(r->base);
- final_end = PFN_DOWN(r->base + r->size);
- if (final_start >= final_end)
- continue;
- if (final_start >= limit || final_end <= addr)
- continue;
-
- nr_range = add_range(range, count, nr_range, final_start, final_end);
- }
- subtract_range(range, count, 0, addr);
- subtract_range(range, count, limit, -1ULL);
-
- /* Subtract memblock.reserved.region in range ? */
- if (!get_free)
- goto sort_and_count_them;
- for_each_memblock(reserved, r) {
- final_start = PFN_DOWN(r->base);
- final_end = PFN_UP(r->base + r->size);
- if (final_start >= final_end)
- continue;
- if (final_start >= limit || final_end <= addr)
- continue;
-
- subtract_range(range, count, final_start, final_end);
- }
-
-sort_and_count_them:
- nr_range = clean_sort_range(range, count);
-
- free_size = 0;
- for (i = 0; i < nr_range; i++)
- free_size += range[i].end - range[i].start;
-
- return free_size << PAGE_SHIFT;
-}
-
-u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
-{
- return __memblock_x86_memory_in_range(addr, limit, true);
-}
-
-u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
-{
- return __memblock_x86_memory_in_range(addr, limit, false);
-}
-
-void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
-{
- if (start == end)
- return;
-
- if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
- return;
-
- memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
-
- memblock_reserve(start, end - start);
-}
-
-void __init memblock_x86_free_range(u64 start, u64 end)
-{
- if (start == end)
- return;
-
- if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
- return;
-
- memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
-
- memblock_free(start, end - start);
-}
-
-/*
- * Need to call this function after memblock_x86_register_active_regions,
- * so early_node_map[] is filled already.
- */
-u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
-{
- u64 addr;
- addr = find_memory_core_early(nid, size, align, start, end);
- if (addr != MEMBLOCK_ERROR)
- return addr;
-
- /* Fallback, should already have start end within node range */
- return memblock_find_in_range(start, end, size, align);
-}
-
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
- */
-static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn)
-{
- u64 align = PAGE_SIZE;
-
- *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
- *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
-
- /* Skip map entries smaller than a page */
- if (*ei_startpfn >= *ei_endpfn)
- return 0;
-
- /* Skip if map is outside the node */
- if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
- return 0;
-
- /* Check for overlaps */
- if (*ei_startpfn < start_pfn)
- *ei_startpfn = start_pfn;
- if (*ei_endpfn > last_pfn)
- *ei_endpfn = last_pfn;
-
- return 1;
-}
-
-/* Walk the memblock.memory map and register active regions within a node */
-void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- unsigned long ei_startpfn;
- unsigned long ei_endpfn;
- struct memblock_region *r;
-
- for_each_memblock(memory, r)
- if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- add_active_range(nid, ei_startpfn, ei_endpfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init memblock_x86_hole_size(u64 start, u64 end)
-{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long last_pfn = end >> PAGE_SHIFT;
- unsigned long ei_startpfn, ei_endpfn, ram = 0;
- struct memblock_region *r;
-
- for_each_memblock(memory, r)
- if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- ram += ei_endpfn - ei_startpfn;
-
- return end - start - ((u64)ram << PAGE_SHIFT);
-}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 92faf3a1c53..c80b9fb9573 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
(unsigned long long) pattern,
(unsigned long long) start_bad,
(unsigned long long) end_bad);
- memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
+ memblock_reserve(start_bad, end_bad - start_bad);
}
static void __init memtest(u64 pattern, u64 start_phys, u64 size)
@@ -70,24 +70,19 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
static void __init do_one_pass(u64 pattern, u64 start, u64 end)
{
- u64 size = 0;
-
- while (start < end) {
- start = memblock_x86_find_in_range_size(start, &size, 1);
-
- /* done ? */
- if (start >= end)
- break;
- if (start + size > end)
- size = end - start;
-
- printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
- (unsigned long long) start,
- (unsigned long long) start + size,
- (unsigned long long) cpu_to_be64(pattern));
- memtest(pattern, start, size);
-
- start += size;
+ u64 i;
+ phys_addr_t this_start, this_end;
+
+ for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
+ this_start = clamp_t(phys_addr_t, this_start, start, end);
+ this_end = clamp_t(phys_addr_t, this_end, start, end);
+ if (this_start < this_end) {
+ printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
+ (unsigned long long)this_start,
+ (unsigned long long)this_end,
+ (unsigned long long)cpu_to_be64(pattern));
+ memtest(pattern, this_start, this_end - this_start);
+ }
}
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index fbeaaf41661..496f494593b 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end)
{
- const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
- const u64 nd_high = PFN_PHYS(max_pfn_mapped);
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
bool remapped = false;
u64 nd_pa;
@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
nd_pa = __pa(nd);
remapped = true;
} else {
- nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
- nd_size, SMP_CACHE_BYTES);
- if (nd_pa == MEMBLOCK_ERROR)
- nd_pa = memblock_find_in_range(nd_low, nd_high,
- nd_size, SMP_CACHE_BYTES);
- if (nd_pa == MEMBLOCK_ERROR) {
+ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa) {
pr_err("Cannot find %zu bytes in node %d\n",
nd_size, nid);
return;
}
- memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
nd = __va(nd_pa);
}
@@ -371,8 +364,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt)
- memblock_x86_free_range(__pa(numa_distance),
- __pa(numa_distance) + size);
+ memblock_free(__pa(numa_distance), size);
numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */
}
@@ -395,13 +387,13 @@ static int __init numa_alloc_distance(void)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
size, PAGE_SIZE);
- if (phys == MEMBLOCK_ERROR) {
+ if (!phys) {
pr_warning("NUMA: Warning: can't allocate distance table!\n");
/* don't retry until explicitly reset */
numa_distance = (void *)1LU;
return -ENOMEM;
}
- memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
+ memblock_reserve(phys, size);
numa_distance = __va(phys);
numa_distance_cnt = cnt;
@@ -482,8 +474,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
numaram = 0;
}
- e820ram = max_pfn - (memblock_x86_hole_size(0,
- PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
+ e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
+
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
@@ -505,13 +497,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL;
- for (i = 0; i < mi->nr_blks; i++)
- memblock_x86_register_active_regions(mi->blk[i].nid,
- mi->blk[i].start >> PAGE_SHIFT,
- mi->blk[i].end >> PAGE_SHIFT);
-
- /* for out of order entries */
- sort_node_map();
+ for (i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *mb = &mi->blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
+ }
/*
* If sections array is gonna be used for pfn -> nid mapping, check
@@ -545,6 +534,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
setup_node_data(nid, start, end);
}
+ /* Dump memblock with node info and return. */
+ memblock_dump_all();
return 0;
}
@@ -582,7 +573,7 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- remove_all_active_ranges();
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
ret = init_func();
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3adebe7e536..534255a36b6 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
/* allocate node memory and the lowmem remap area */
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
- if (node_pa == MEMBLOCK_ERROR) {
+ if (!node_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
size, nid);
return;
}
- memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
+ memblock_reserve(node_pa, size);
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
max_low_pfn << PAGE_SHIFT,
size, LARGE_PAGE_BYTES);
- if (remap_pa == MEMBLOCK_ERROR) {
+ if (!remap_pa) {
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
size, nid);
- memblock_x86_free_range(node_pa, node_pa + size);
+ memblock_free(node_pa, size);
return;
}
- memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
+ memblock_reserve(remap_pa, size);
remap_va = phys_to_virt(remap_pa);
/* perform actual remap */
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index dd27f401f0a..92e27119ee1 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void)
for_each_online_node(i)
pages += free_all_bootmem_node(NODE_DATA(i));
- pages += free_all_memory_core_early(MAX_NUMNODES);
+ pages += free_low_memory_core_early(MAX_NUMNODES);
return pages;
}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index d0ed086b624..46db56845f1 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
return -ENOENT;
}
+static u64 mem_hole_size(u64 start, u64 end)
+{
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = PFN_DOWN(end);
+
+ if (start_pfn < end_pfn)
+ return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
+ return 0;
+}
+
/*
* Sets up nid to range from @start to @end. The return value is -errno if
* something went wrong, 0 otherwise.
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Calculate target node size. x86_32 freaks on __udivdi3() so do
* the division in ulong number of pages and convert back.
*/
- size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
+ size = max_addr - addr - mem_hole_size(addr, max_addr);
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
/*
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Continue to add memory to this fake node if its
* non-reserved memory is less than the per-node size.
*/
- while (end - start -
- memblock_x86_hole_size(start, end) < size) {
+ while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > limit) {
end = limit;
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end -
- memblock_x86_hole_size(end, limit) < size)
+ if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
{
u64 end = start + size;
- while (end - start - memblock_x86_hole_size(start, end) < size) {
+ while (end - start - mem_hole_size(start, end) < size) {
end += FAKE_NODE_MIN_SIZE;
if (end > max_addr) {
end = max_addr;
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* creates a uniform distribution of node sizes across the entire
* machine (but not necessarily over physical nodes).
*/
- min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
- MAX_NUMNODES;
+ min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* this one must extend to the boundary.
*/
if (end < dma32_end && dma32_end - end -
- memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
+ mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
end = dma32_end;
/*
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
* next node, this one must extend to the end of the
* physical node.
*/
- if (limit - end -
- memblock_x86_hole_size(end, limit) < size)
+ if (limit - end - mem_hole_size(end, limit) < size)
end = limit;
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
phys_size, PAGE_SIZE);
- if (phys == MEMBLOCK_ERROR) {
+ if (!phys) {
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
goto no_emu;
}
- memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
+ memblock_reserve(phys, phys_size);
phys_dist = __va(phys);
for (i = 0; i < numa_dist_cnt; i++)
@@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* free the copied physical distance table */
if (phys_dist)
- memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
+ memblock_free(__pa(phys_dist), phys_size);
return;
no_emu:
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f9e526742fa..eda2acbb6e8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -998,7 +998,7 @@ out_err:
}
EXPORT_SYMBOL(set_memory_uc);
-int _set_memory_array(unsigned long *addr, int addrinarray,
+static int _set_memory_array(unsigned long *addr, int addrinarray,
unsigned long new_type)
{
int i, j;
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 81dbfdeb080..fd61b3fb734 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -69,6 +69,12 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain;
+ apic_id = pa->apic_id;
+ if (!cpu_has_x2apic && (apic_id >= 0xff)) {
+ printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n",
+ pxm, apic_id);
+ return;
+ }
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -76,7 +82,6 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
return;
}
- apic_id = pa->apic_id;
if (apic_id >= MAX_LOCAL_APIC) {
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index bfab3fa10ed..7b65f752c5f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
break;
}
if (filter[i].jt != 0) {
- if (filter[i].jf)
- t_offset += is_near(f_offset) ? 2 : 6;
+ if (filter[i].jf && f_offset)
+ t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
index 446902b2a6b..1599f568f0e 100644
--- a/arch/x86/oprofile/Makefile
+++ b/arch/x86/oprofile/Makefile
@@ -4,9 +4,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
- timer_int.o )
+ timer_int.o nmi_timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
op_model_ppro.o op_model_p4.o
-oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index cdfe4c54dec..9e138d00ad3 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -16,34 +16,23 @@
* with the NMI mode driver.
*/
+#ifdef CONFIG_X86_LOCAL_APIC
extern int op_nmi_init(struct oprofile_operations *ops);
-extern int op_nmi_timer_init(struct oprofile_operations *ops);
extern void op_nmi_exit(void);
-extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+#else
+static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
+static void op_nmi_exit(void) { }
+#endif
+extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- int ret;
-
- ret = -ENODEV;
-
-#ifdef CONFIG_X86_LOCAL_APIC
- ret = op_nmi_init(ops);
-#endif
-#ifdef CONFIG_X86_IO_APIC
- if (ret < 0)
- ret = op_nmi_timer_init(ops);
-#endif
ops->backtrace = x86_backtrace;
-
- return ret;
+ return op_nmi_init(ops);
}
-
void oprofile_arch_exit(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
op_nmi_exit();
-#endif
}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 75f9528e037..26b8a8514ee 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -595,24 +595,36 @@ static int __init p4_init(char **cpu_type)
return 0;
}
-static int force_arch_perfmon;
-static int force_cpu_type(const char *str, struct kernel_param *kp)
+enum __force_cpu_type {
+ reserved = 0, /* do not force */
+ timer,
+ arch_perfmon,
+};
+
+static int force_cpu_type;
+
+static int set_cpu_type(const char *str, struct kernel_param *kp)
{
- if (!strcmp(str, "arch_perfmon")) {
- force_arch_perfmon = 1;
+ if (!strcmp(str, "timer")) {
+ force_cpu_type = timer;
+ printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
+ } else if (!strcmp(str, "arch_perfmon")) {
+ force_cpu_type = arch_perfmon;
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+ } else {
+ force_cpu_type = 0;
}
return 0;
}
-module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
- if (force_arch_perfmon && cpu_has_arch_perfmon)
+ if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
return 0;
/*
@@ -679,6 +691,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
if (!cpu_has_apic)
return -ENODEV;
+ if (force_cpu_type == timer)
+ return -ENODEV;
+
switch (vendor) {
case X86_VENDOR_AMD:
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
deleted file mode 100644
index 7f8052cd662..00000000000
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * @file nmi_timer_int.c
- *
- * @remark Copyright 2003 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo <zwane@linuxpower.ca>
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/rcupdate.h>
-#include <linux/kdebug.h>
-
-#include <asm/nmi.h>
-#include <asm/apic.h>
-#include <asm/ptrace.h>
-
-static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
-{
- oprofile_add_sample(regs, 0);
- return NMI_HANDLED;
-}
-
-static int timer_start(void)
-{
- if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
- 0, "oprofile-timer"))
- return 1;
- return 0;
-}
-
-
-static void timer_stop(void)
-{
- unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
- synchronize_sched(); /* Allow already-started NMIs to complete. */
-}
-
-
-int __init op_nmi_timer_init(struct oprofile_operations *ops)
-{
- ops->start = timer_start;
- ops->stop = timer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
- return 0;
-}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index db0e9a51e61..da8fe0535ff 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -44,7 +44,7 @@ static inline void set_bios_x(void)
pcibios_enabled = 1;
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
if (__supported_pte_mask & _PAGE_NX)
- printk(KERN_INFO "PCI : PCI BIOS aera is rw and x. Use pci=nobios if you want it NX.\n");
+ printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
}
/*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 37718f0f053..4cf9bd0a165 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -238,7 +238,8 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
spin_lock_irqsave(&rtc_lock, flags);
efi_call_phys_prelog();
- status = efi_call_phys2(efi_phys.get_time, tm, tc);
+ status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+ virt_to_phys(tc));
efi_call_phys_epilog();
spin_unlock_irqrestore(&rtc_lock, flags);
return status;
@@ -352,8 +353,7 @@ void __init efi_memblock_x86_reserve_range(void)
boot_params.efi_info.efi_memdesc_size;
memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
- memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
- "EFI memmap");
+ memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
}
#if EFI_DEBUG
@@ -397,16 +397,14 @@ void __init efi_reserve_boot_services(void)
if ((start+size >= virt_to_phys(_text)
&& start <= virt_to_phys(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
- memblock_x86_check_reserved_size(&start, &size,
- 1<<EFI_PAGE_SHIFT)) {
+ memblock_is_region_reserved(start, size)) {
/* Could not reserve, skip it */
md->num_pages = 0;
memblock_dbg(PFX "Could not reserve boot range "
"[0x%010llx-0x%010llx]\n",
start, start+size-1);
} else
- memblock_x86_reserve_range(start, start+size,
- "EFI Boot");
+ memblock_reserve(start, size);
}
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index e36bf714cb7..40e446941dd 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -39,43 +39,14 @@
*/
static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
void efi_call_phys_prelog(void)
{
- unsigned long cr4;
- unsigned long temp;
struct desc_ptr gdt_descr;
local_irq_save(efi_rt_eflags);
- /*
- * If I don't have PAE, I should just duplicate two entries in page
- * directory. If I have PAE, I just need to duplicate one entry in
- * page directory.
- */
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- swapper_pg_dir[0].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- } else {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- efi_bak_pg_dir_pointer[1].pgd =
- swapper_pg_dir[pgd_index(0x400000)].pgd;
- swapper_pg_dir[pgd_index(0)].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- temp = PAGE_OFFSET + 0x400000;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- swapper_pg_dir[pgd_index(temp)].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
void efi_call_phys_epilog(void)
{
- unsigned long cr4;
struct desc_ptr gdt_descr;
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- } else {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- efi_bak_pg_dir_pointer[1].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(swapper_pg_dir);
__flush_tlb_all();
local_irq_restore(efi_rt_eflags);
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 25bfdbb5b13..3c6e328483c 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -245,16 +245,24 @@ struct console early_mrst_console = {
* Following is the early console based on Medfield HSU (High
* Speed UART) device.
*/
-#define HSU_PORT2_PADDR 0xffa28180
+#define HSU_PORT_BASE 0xffa28080
static void __iomem *phsu;
-void hsu_early_console_init(void)
+void hsu_early_console_init(const char *s)
{
+ unsigned long paddr, port = 0;
u8 lcr;
- phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
- HSU_PORT2_PADDR);
+ /*
+ * Select the early HSU console port if specified by user in the
+ * kernel command line.
+ */
+ if (*s && !kstrtoul(s, 10, &port))
+ port = clamp_val(port, 0, 2);
+
+ paddr = HSU_PORT_BASE + port * 0x80;
+ phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE, paddr);
/* Disable FIFO */
writeb(0x0, phsu + UART_FCR);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index b1489a06a49..ad4ec1cb097 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
EXPORT_SYMBOL_GPL(sfi_mrtc_array);
int sfi_mrtc_num;
+static void mrst_power_off(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+ else
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
/* parse all the mtimer info to a static mtimer array */
static int __init sfi_parse_mtmr(struct sfi_table_header *table)
{
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void)
return 0;
}
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
- intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
- intel_scu_ipc_simple_command(0xf1, 0);
-}
-
/*
* Moorestown does not have external NMI source nor port 0x61 to report
* NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
return max7315;
}
+static void *tca6416_platform_data(void *info)
+{
+ static struct pca953x_platform_data tca6416;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ strcpy(i2c_info->type, "tca6416");
+ strcpy(base_pin_name, "tca6416_base");
+ strcpy(intr_pin_name, "tca6416_int");
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ tca6416.gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ tca6416.irq_base = -1;
+ }
+ return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("mpu3050_int");
+
+ if (intr == -1)
+ return NULL;
+
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ return NULL;
+}
+
static void __init *emc1403_platform_data(void *info)
{
static short intr2nd_pdata;
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
static const struct devs_id __initconst device_ids[] = {
{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+ {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+ {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+ {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
/* MSIC subdevices */
{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
index 309c70fb775..5d4ba301e77 100644
--- a/arch/x86/platform/uv/uv_sysfs.c
+++ b/arch/x86/platform/uv/uv_sysfs.c
@@ -19,7 +19,7 @@
* Copyright (c) Russ Anderson
*/
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <asm/uv/bios.h>
#include <asm/uv/uv.h>
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index f8208267733..d511aa97533 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -18,14 +18,21 @@ chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
-posttest: $(obj)/test_get_len vmlinux
+quiet_cmd_sanitytest = TEST $@
+ cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
+
+posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
+ $(call cmd,sanitytest)
-hostprogs-y := test_get_len
+hostprogs-y += test_get_len insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+
# Dependencies are also needed.
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index eaf11f52fc0..5f6a5b6c3a1 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -47,7 +47,7 @@ BEGIN {
sep_expr = "^\\|$"
group_expr = "^Grp[0-9A-Za-z]+"
- imm_expr = "^[IJAO][a-z]"
+ imm_expr = "^[IJAOL][a-z]"
imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
@@ -59,6 +59,7 @@ BEGIN {
imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
imm_flag["Ob"] = "INAT_MOFFSET"
imm_flag["Ov"] = "INAT_MOFFSET"
+ imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
@@ -70,8 +71,12 @@ BEGIN {
lprefix3_expr = "\\(F2\\)"
max_lprefix = 4
- vexok_expr = "\\(VEX\\)"
- vexonly_expr = "\\(oVEX\\)"
+ # All opcodes starting with lower-case 'v' or with (v1) superscript
+ # accepts VEX prefix
+ vexok_opcode_expr = "^v.*"
+ vexok_expr = "\\(v1\\)"
+ # All opcodes with (v) superscript supports *only* VEX prefix
+ vexonly_expr = "\\(v\\)"
prefix_expr = "\\(Prefix\\)"
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
@@ -85,8 +90,8 @@ BEGIN {
prefix_num["SEG=GS"] = "INAT_PFX_GS"
prefix_num["SEG=SS"] = "INAT_PFX_SS"
prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
- prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2"
- prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3"
+ prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+ prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
clear_vars()
}
@@ -310,12 +315,10 @@ function convert_operands(count,opnd, i,j,imm,mod)
if (match(opcode, fpu_expr))
flags = add_flags(flags, "INAT_MODRM")
- # check VEX only code
+ # check VEX codes
if (match(ext, vexonly_expr))
flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
-
- # check VEX only code
- if (match(ext, vexok_expr))
+ else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
flags = add_flags(flags, "INAT_VEXOK")
# check prefixes
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
new file mode 100644
index 00000000000..cc2f8c13128
--- /dev/null
+++ b/arch/x86/tools/insn_sanity.c
@@ -0,0 +1,275 @@
+/*
+ * x86 decoder sanity test - based on test_get_insn.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ * Copyright (C) Hitachi, Ltd., 2011
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define unlikely(cond) (cond)
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
+#include <asm/insn.h>
+#include <inat.c>
+#include <insn.c>
+
+/*
+ * Test of instruction analysis against tampering.
+ * Feed random binary to instruction decoder and ensure not to
+ * access out-of-instruction-buffer.
+ */
+
+#define DEFAULT_MAX_ITER 10000
+#define INSN_NOP 0x90
+
+static const char *prog; /* Program name */
+static int verbose; /* Verbosity */
+static int x86_64; /* x86-64 bit mode flag */
+static unsigned int seed; /* Random seed */
+static unsigned long iter_start; /* Start of iteration number */
+static unsigned long iter_end = DEFAULT_MAX_ITER; /* End of iteration number */
+static FILE *input_file; /* Input file name */
+
+static void usage(const char *err)
+{
+ if (err)
+ fprintf(stderr, "Error: %s\n\n", err);
+ fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
+ fprintf(stderr, "\t-y 64bit mode\n");
+ fprintf(stderr, "\t-n 32bit mode\n");
+ fprintf(stderr, "\t-v Verbosity(-vv dumps any decoded result)\n");
+ fprintf(stderr, "\t-s Give a random seed (and iteration number)\n");
+ fprintf(stderr, "\t-m Give a maximum iteration number\n");
+ fprintf(stderr, "\t-i Give an input file with decoded binary\n");
+ exit(1);
+}
+
+static void dump_field(FILE *fp, const char *name, const char *indent,
+ struct insn_field *field)
+{
+ fprintf(fp, "%s.%s = {\n", indent, name);
+ fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n",
+ indent, field->value, field->bytes[0], field->bytes[1],
+ field->bytes[2], field->bytes[3]);
+ fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent,
+ field->got, field->nbytes);
+}
+
+static void dump_insn(FILE *fp, struct insn *insn)
+{
+ fprintf(fp, "Instruction = {\n");
+ dump_field(fp, "prefixes", "\t", &insn->prefixes);
+ dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix);
+ dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix);
+ dump_field(fp, "opcode", "\t", &insn->opcode);
+ dump_field(fp, "modrm", "\t", &insn->modrm);
+ dump_field(fp, "sib", "\t", &insn->sib);
+ dump_field(fp, "displacement", "\t", &insn->displacement);
+ dump_field(fp, "immediate1", "\t", &insn->immediate1);
+ dump_field(fp, "immediate2", "\t", &insn->immediate2);
+ fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
+ insn->attr, insn->opnd_bytes, insn->addr_bytes);
+ fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
+ insn->length, insn->x86_64, insn->kaddr);
+}
+
+static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter,
+ unsigned char *insn_buf, struct insn *insn)
+{
+ int i;
+
+ fprintf(fp, "%s:\n", msg);
+
+ dump_insn(fp, insn);
+
+ fprintf(fp, "You can reproduce this with below command(s);\n");
+
+ /* Input a decoded instruction sequence directly */
+ fprintf(fp, " $ echo ");
+ for (i = 0; i < MAX_INSN_SIZE; i++)
+ fprintf(fp, " %02x", insn_buf[i]);
+ fprintf(fp, " | %s -i -\n", prog);
+
+ if (!input_file) {
+ fprintf(fp, "Or \n");
+ /* Give a seed and iteration number */
+ fprintf(fp, " $ %s -s 0x%x,%lu\n", prog, seed, nr_iter);
+ }
+}
+
+static void init_random_seed(void)
+{
+ int fd;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0)
+ goto fail;
+
+ if (read(fd, &seed, sizeof(seed)) != sizeof(seed))
+ goto fail;
+
+ close(fd);
+ return;
+fail:
+ usage("Failed to open /dev/urandom");
+}
+
+/* Read given instruction sequence from the input file */
+static int read_next_insn(unsigned char *insn_buf)
+{
+ char buf[256] = "", *tmp;
+ int i;
+
+ tmp = fgets(buf, ARRAY_SIZE(buf), input_file);
+ if (tmp == NULL || feof(input_file))
+ return 0;
+
+ for (i = 0; i < MAX_INSN_SIZE; i++) {
+ insn_buf[i] = (unsigned char)strtoul(tmp, &tmp, 16);
+ if (*tmp != ' ')
+ break;
+ }
+
+ return i;
+}
+
+static int generate_insn(unsigned char *insn_buf)
+{
+ int i;
+
+ if (input_file)
+ return read_next_insn(insn_buf);
+
+ /* Fills buffer with random binary up to MAX_INSN_SIZE */
+ for (i = 0; i < MAX_INSN_SIZE - 1; i += 2)
+ *(unsigned short *)(&insn_buf[i]) = random() & 0xffff;
+
+ while (i < MAX_INSN_SIZE)
+ insn_buf[i++] = random() & 0xff;
+
+ return i;
+}
+
+static void parse_args(int argc, char **argv)
+{
+ int c;
+ char *tmp = NULL;
+ int set_seed = 0;
+
+ prog = argv[0];
+ while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) {
+ switch (c) {
+ case 'y':
+ x86_64 = 1;
+ break;
+ case 'n':
+ x86_64 = 0;
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'i':
+ if (strcmp("-", optarg) == 0)
+ input_file = stdin;
+ else
+ input_file = fopen(optarg, "r");
+ if (!input_file)
+ usage("Failed to open input file");
+ break;
+ case 's':
+ seed = (unsigned int)strtoul(optarg, &tmp, 0);
+ if (*tmp == ',') {
+ optarg = tmp + 1;
+ iter_start = strtoul(optarg, &tmp, 0);
+ }
+ if (*tmp != '\0' || tmp == optarg)
+ usage("Failed to parse seed");
+ set_seed = 1;
+ break;
+ case 'm':
+ iter_end = strtoul(optarg, &tmp, 0);
+ if (*tmp != '\0' || tmp == optarg)
+ usage("Failed to parse max_iter");
+ break;
+ default:
+ usage(NULL);
+ }
+ }
+
+ /* Check errors */
+ if (iter_end < iter_start)
+ usage("Max iteration number must be bigger than iter-num");
+
+ if (set_seed && input_file)
+ usage("Don't use input file (-i) with random seed (-s)");
+
+ /* Initialize random seed */
+ if (!input_file) {
+ if (!set_seed) /* No seed is given */
+ init_random_seed();
+ srand(seed);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct insn insn;
+ int insns = 0;
+ int errors = 0;
+ unsigned long i;
+ unsigned char insn_buf[MAX_INSN_SIZE * 2];
+
+ parse_args(argc, argv);
+
+ /* Prepare stop bytes with NOPs */
+ memset(insn_buf + MAX_INSN_SIZE, INSN_NOP, MAX_INSN_SIZE);
+
+ for (i = 0; i < iter_end; i++) {
+ if (generate_insn(insn_buf) <= 0)
+ break;
+
+ if (i < iter_start) /* Skip to given iteration number */
+ continue;
+
+ /* Decode an instruction */
+ insn_init(&insn, insn_buf, x86_64);
+ insn_get_length(&insn);
+
+ if (insn.next_byte <= insn.kaddr ||
+ insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
+ /* Access out-of-range memory */
+ dump_stream(stderr, "Error: Found an access violation", i, insn_buf, &insn);
+ errors++;
+ } else if (verbose && !insn_complete(&insn))
+ dump_stream(stdout, "Info: Found an undecodable input", i, insn_buf, &insn);
+ else if (verbose >= 2)
+ dump_insn(stdout, &insn);
+ insns++;
+ }
+
+ fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+
+ return errors ? 1 : 0;
+}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 26c731a106a..fdce49c7aff 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -29,7 +29,8 @@ config XEN_PVHVM
config XEN_MAX_DOMAIN_MEMORY
int
- default 128
+ default 500 if X86_64
+ default 64 if X86_32
depends on XEN
help
This only affects the sizing of some bss arrays, the unused
@@ -48,3 +49,4 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
+
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index 7c0fedd98ea..ef1db1900d8 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -109,7 +109,7 @@ static const struct file_operations u32_array_fops = {
.llseek = no_llseek,
};
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
u32 *array, unsigned elements)
{
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index e2813208483..78d25499be5 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,7 +3,7 @@
struct dentry * __init xen_init_debugfs(void);
-struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
+struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
struct dentry *parent,
u32 *array, unsigned elements);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1f928659c33..12eb07bfb26 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1215,8 +1215,6 @@ asmlinkage void __init xen_start_kernel(void)
local_irq_disable();
early_boot_irqs_disabled = true;
- memblock_init();
-
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
xen_ident_map_ISA();
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 5a40d24ba33..3a5f55d5190 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -54,6 +54,20 @@ static int map_pte_fn(pte_t *pte, struct page *pmd_page,
return 0;
}
+/*
+ * This function is used to map shared frames to store grant status. It is
+ * different from map_pte_fn above, the frames type here is uint64_t.
+ */
+static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+ uint64_t **frames = (uint64_t **)data;
+
+ set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+ (*frames)++;
+ return 0;
+}
+
static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
unsigned long addr, void *data)
{
@@ -64,10 +78,10 @@ static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
- struct grant_entry **__shared)
+ void **__shared)
{
int rc;
- struct grant_entry *shared = *__shared;
+ void *shared = *__shared;
if (shared == NULL) {
struct vm_struct *area =
@@ -83,8 +97,30 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
return rc;
}
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
- unsigned long nr_gframes)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+ unsigned long max_nr_gframes,
+ grant_status_t **__shared)
+{
+ int rc;
+ grant_status_t *shared = *__shared;
+
+ if (shared == NULL) {
+ /* No need to pass in PTE as we are going to do it
+ * in apply_to_page_range anyhow. */
+ struct vm_struct *area =
+ alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
+ BUG_ON(area == NULL);
+ shared = area->addr;
+ *__shared = shared;
+ }
+
+ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+ PAGE_SIZE * nr_gframes,
+ map_pte_fn_status, &frames);
+ return rc;
+}
+
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
{
apply_to_page_range(&init_mm, (unsigned long)shared,
PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 87f6673b120..58a0e46c404 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1774,10 +1774,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
__xen_write_cr3(true, __pa(pgd));
xen_mc_issue(PARAVIRT_LAZY_CPU);
- memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
- __pa(xen_start_info->pt_base +
- xen_start_info->nr_pt_frames * PAGE_SIZE),
- "XEN PAGETABLES");
+ memblock_reserve(__pa(xen_start_info->pt_base),
+ xen_start_info->nr_pt_frames * PAGE_SIZE);
return pgd;
}
@@ -1853,10 +1851,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
PFN_DOWN(__pa(initial_page_table)));
xen_write_cr3(__pa(initial_page_table));
- memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
- __pa(xen_start_info->pt_base +
- xen_start_info->nr_pt_frames * PAGE_SIZE),
- "XEN PAGETABLES");
+ memblock_reserve(__pa(xen_start_info->pt_base),
+ xen_start_info->nr_pt_frames * PAGE_SIZE);
return initial_page_table;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 38d0af4fefe..e03c6369217 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -75,7 +75,7 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
- memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
+ memblock_reserve(start, size);
xen_max_p2m_pfn = PFN_DOWN(start + size);
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
domid_t domid = DOMID_SELF;
int ret;
- ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
- if (ret > 0)
- max_pages = ret;
+ /*
+ * For the initial domain we use the maximum reservation as
+ * the maximum page.
+ *
+ * For guest domains the current maximum reservation reflects
+ * the current maximum rather than the static maximum. In this
+ * case the e820 map provided to us will cover the static
+ * maximum region.
+ */
+ if (xen_initial_domain()) {
+ ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+ if (ret > 0)
+ max_pages = ret;
+ }
+
return min(max_pages, MAX_DOMAIN_PAGES);
}
@@ -299,9 +311,8 @@ char * __init xen_memory_setup(void)
* - xen_start_info
* See comment above "struct start_info" in <xen/interface/xen.h>
*/
- memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
- __pa(xen_start_info->pt_base),
- "XEN START INFO");
+ memblock_reserve(__pa(xen_start_info->mfn_list),
+ xen_start_info->pt_base - xen_start_info->mfn_list);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
@@ -410,6 +421,6 @@ void __init xen_arch_setup(void)
#endif
disable_cpuidle();
boot_option_idle_override = IDLE_HALT;
-
+ WARN_ON(set_pm_idle_to_default());
fiddle_vdso();
}
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h
index cbdf2ffaacf..bb06968be22 100644
--- a/arch/xtensa/include/asm/socket.h
+++ b/arch/xtensa/include/asm/socket.h
@@ -73,4 +73,7 @@
#define SO_RXQ_OVFL 40
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be8accb0b0..6abbedd09d8 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_FREEZE 17 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
index b1c981e39b5..6d4db7e8ffa 100644
--- a/arch/xtensa/include/asm/types.h
+++ b/arch/xtensa/include/asm/types.h
@@ -23,8 +23,6 @@
#ifndef __ASSEMBLY__
-typedef unsigned short umode_t;
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index f3e5eb43f71..ac62f9cf1e1 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -41,14 +41,6 @@ static struct clocksource ccount_clocksource = {
.rating = 200,
.read = ccount_read,
.mask = CLOCKSOURCE_MASK(32),
- /*
- * With a shift of 22 the lower limit of the cpu clock is
- * 1MHz, where NSEC_PER_CCOUNT is 1000 or a bit less than
- * 2^10: Since we have 32 bits and the multiplicator can
- * already take up as much as 10 bits, this leaves us with
- * remaining upper 22 bits.
- */
- .shift = 22,
};
static irqreturn_t timer_interrupt(int irq, void *dev_id);
@@ -66,10 +58,7 @@ void __init time_init(void)
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
(int)(ccount_per_jiffy/(10000/HZ))%100);
#endif
- ccount_clocksource.mult =
- clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ,
- ccount_clocksource.shift);
- clocksource_register(&ccount_clocksource);
+ clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
/* Initialize the linux timer interrupt. */
diff --git a/block/Kconfig b/block/Kconfig
index e97934eecec..09acf1b3990 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,6 +99,12 @@ config BLK_DEV_THROTTLING
See Documentation/cgroups/blkio-controller.txt for more information.
+menu "Partition Types"
+
+source "block/partitions/Kconfig"
+
+endmenu
+
endif # BLOCK
config BLOCK_COMPAT
diff --git a/block/Makefile b/block/Makefile
index 514c6e4f427..39b76ba66ff 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,8 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
+ blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \
+ partition-generic.o partitions/
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8f630cec906..b8c143d68ee 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
struct cgroup *);
-static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
-static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
+static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
+ struct cgroup_taskset *);
+static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
+ struct cgroup_taskset *);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
@@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
struct cgroup_subsys blkio_subsys = {
.name = "blkio",
.create = blkiocg_create,
- .can_attach_task = blkiocg_can_attach_task,
- .attach_task = blkiocg_attach_task,
+ .can_attach = blkiocg_can_attach,
+ .attach = blkiocg_attach,
.destroy = blkiocg_destroy,
.populate = blkiocg_populate,
#ifdef CONFIG_BLK_CGROUP
@@ -1626,30 +1628,39 @@ done:
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
+ struct task_struct *task;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- task_lock(tsk);
- ioc = tsk->io_context;
- if (ioc && atomic_read(&ioc->nr_tasks) > 1)
- ret = -EINVAL;
- task_unlock(tsk);
-
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ task_lock(task);
+ ioc = task->io_context;
+ if (ioc && atomic_read(&ioc->nr_tasks) > 1)
+ ret = -EINVAL;
+ task_unlock(task);
+ if (ret)
+ break;
+ }
return ret;
}
-static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
+ struct task_struct *task;
struct io_context *ioc;
- task_lock(tsk);
- ioc = tsk->io_context;
- if (ioc)
- ioc->cgroup_changed = 1;
- task_unlock(tsk);
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ task_lock(task);
+ ioc = task->io_context;
+ if (ioc)
+ ioc->cgroup_changed = 1;
+ task_unlock(task);
+ }
}
void blkio_policy_register(struct blkio_policy_type *blkiop)
diff --git a/block/blk-core.c b/block/blk-core.c
index ea70e6c80cd..15de223c7f9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all)
blk_throtl_drain(q);
- __blk_run_queue(q);
+ /*
+ * This function might be called on a queue which failed
+ * driver init after queue creation. Some drivers
+ * (e.g. fd) get unhappy in such cases. Kick queue iff
+ * dispatch queue has something on it.
+ */
+ if (!list_empty(&q->queue_head))
+ __blk_run_queue(q);
if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
q->backing_dev_info.name = "block";
+ q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+ q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
blk_cleanup_queue(uninit_q);
@@ -563,18 +571,9 @@ struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
- return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock, int node_id)
-{
if (!q)
return NULL;
- q->node = node_id;
if (blk_init_free_list(q))
return NULL;
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
return NULL;
}
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
diff --git a/block/blk-map.c b/block/blk-map.c
index 164cd005970..623e1cd4cff 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
+ if (!reading)
bio->bi_rw |= REQ_WRITE;
if (do_copy)
diff --git a/block/blk-tag.c b/block/blk-tag.c
index e74d6d13838..4af6f5cc116 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- int tag = rq->tag;
+ unsigned tag = rq->tag; /* negative tags invalid */
- BUG_ON(tag == -1);
-
- if (unlikely(tag >= bqt->max_depth)) {
- /*
- * This can happen after tag depth has been reduced.
- * But tag shouldn't be larger than real_max_depth.
- */
- WARN_ON(tag >= bqt->real_max_depth);
- return;
- }
+ BUG_ON(tag >= bqt->real_max_depth);
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
diff --git a/block/bsg.c b/block/bsg.c
index 702f1316bb8..9651ec7b87c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1070,7 +1070,7 @@ EXPORT_SYMBOL_GPL(bsg_register_queue);
static struct cdev bsg_cdev;
-static char *bsg_devnode(struct device *dev, mode_t *mode)
+static char *bsg_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 16ace89613b..3548705b04e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
/*
* reposition in fifo if next is older than rq
*/
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
cfq_remove_request(next);
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(next), rq_is_sync(next));
+
+ cfqq = RQ_CFQQ(next);
+ /*
+ * all requests of this queue are merged to other queues, delete it
+ * from the service tree. If it's the active_queue,
+ * cfq_dispatch_requests() will choose to expire it or do idle
+ */
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+ cfqq != cfqd->active_queue)
+ cfq_del_cfqq_rr(cfqd, cfqq);
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -3184,7 +3196,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
}
}
- if (ret)
+ if (ret && ret != -EEXIST)
printk(KERN_ERR "cfq: cic link failed!\n");
return ret;
@@ -3200,6 +3212,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
+ int ret;
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -3207,6 +3220,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (!ioc)
return NULL;
+retry:
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
goto out;
@@ -3215,7 +3229,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (cic == NULL)
goto err;
- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+ if (ret == -EEXIST) {
+ /* someone has linked cic to ioc already */
+ cfq_cic_free(cic);
+ goto retry;
+ } else if (ret)
goto err_free;
out:
@@ -4036,6 +4055,11 @@ static void *cfq_init_queue(struct request_queue *q)
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
+
+ spin_lock(&cic_index_lock);
+ ida_remove(&cic_index_ida, cfqd->cic_index);
+ spin_unlock(&cic_index_lock);
+
kfree(cfqd);
return NULL;
}
diff --git a/block/genhd.c b/block/genhd.c
index 02e9fca8082..83e7c04015e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/kobj_map.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/log2.h>
@@ -507,7 +506,7 @@ static int exact_lock(dev_t devt, void *data)
return 0;
}
-void register_disk(struct gendisk *disk)
+static void register_disk(struct gendisk *disk)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
@@ -1109,7 +1108,7 @@ struct class block_class = {
.name = "block",
};
-static char *block_devnode(struct device *dev, mode_t *mode)
+static char *block_devnode(struct device *dev, umode_t *mode)
{
struct gendisk *disk = dev_to_disk(dev);
diff --git a/block/ioctl.c b/block/ioctl.c
index ca939fc1030..4828fa34981 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -5,7 +5,7 @@
#include <linux/blkpg.h>
#include <linux/hdreg.h>
#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
#include <linux/blktrace_api.h>
#include <asm/uaccess.h>
@@ -180,6 +180,26 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
/*
+ * Is it an unrecognized ioctl? The correct returns are either
+ * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
+ * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
+ * code before returning.
+ *
+ * Confused drivers sometimes return EINVAL, which is wrong. It
+ * means "I understood the ioctl command, but the parameters to
+ * it were wrong".
+ *
+ * We should aim to just fix the broken drivers, the EINVAL case
+ * should go away.
+ */
+static inline int is_unrecognized_ioctl(int ret)
+{
+ return ret == -EINVAL ||
+ ret == -ENOTTY ||
+ ret == -ENOIOCTLCMD;
+}
+
+/*
* always keep this in sync with compat_blkdev_ioctl()
*/
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
@@ -196,8 +216,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EACCES;
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
+ if (!is_unrecognized_ioctl(ret))
return ret;
fsync_bdev(bdev);
@@ -206,8 +225,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKROSET:
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
+ if (!is_unrecognized_ioctl(ret))
return ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
diff --git a/fs/partitions/check.c b/block/partition-generic.c
index e3c63d1c5e1..d06ec1c829c 100644
--- a/fs/partitions/check.c
+++ b/block/partition-generic.c
@@ -1,6 +1,4 @@
/*
- * fs/partitions/check.c
- *
* Code extracted from drivers/block/genhd.c
* Copyright (C) 1991-1998 Linus Torvalds
* Re-organised Feb 1998 Russell King
@@ -9,8 +7,6 @@
* block drivers, which allows all the partition code to
* be grouped in one location, and it to be mostly self
* contained.
- *
- * Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
*/
#include <linux/init.h>
@@ -22,98 +18,11 @@
#include <linux/genhd.h>
#include <linux/blktrace_api.h>
-#include "check.h"
-
-#include "acorn.h"
-#include "amiga.h"
-#include "atari.h"
-#include "ldm.h"
-#include "mac.h"
-#include "msdos.h"
-#include "osf.h"
-#include "sgi.h"
-#include "sun.h"
-#include "ibm.h"
-#include "ultrix.h"
-#include "efi.h"
-#include "karma.h"
-#include "sysv68.h"
+#include "partitions/check.h"
#ifdef CONFIG_BLK_DEV_MD
extern void md_autodetect_dev(dev_t dev);
#endif
-
-int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
-
-static int (*check_part[])(struct parsed_partitions *) = {
- /*
- * Probe partition formats with tables at disk address 0
- * that also have an ADFS boot block at 0xdc0.
- */
-#ifdef CONFIG_ACORN_PARTITION_ICS
- adfspart_check_ICS,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_POWERTEC
- adfspart_check_POWERTEC,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_EESOX
- adfspart_check_EESOX,
-#endif
-
- /*
- * Now move on to formats that only have partition info at
- * disk address 0xdc0. Since these may also have stale
- * PC/BIOS partition tables, they need to come before
- * the msdos entry.
- */
-#ifdef CONFIG_ACORN_PARTITION_CUMANA
- adfspart_check_CUMANA,
-#endif
-#ifdef CONFIG_ACORN_PARTITION_ADFS
- adfspart_check_ADFS,
-#endif
-
-#ifdef CONFIG_EFI_PARTITION
- efi_partition, /* this must come before msdos */
-#endif
-#ifdef CONFIG_SGI_PARTITION
- sgi_partition,
-#endif
-#ifdef CONFIG_LDM_PARTITION
- ldm_partition, /* this must come before msdos */
-#endif
-#ifdef CONFIG_MSDOS_PARTITION
- msdos_partition,
-#endif
-#ifdef CONFIG_OSF_PARTITION
- osf_partition,
-#endif
-#ifdef CONFIG_SUN_PARTITION
- sun_partition,
-#endif
-#ifdef CONFIG_AMIGA_PARTITION
- amiga_partition,
-#endif
-#ifdef CONFIG_ATARI_PARTITION
- atari_partition,
-#endif
-#ifdef CONFIG_MAC_PARTITION
- mac_partition,
-#endif
-#ifdef CONFIG_ULTRIX_PARTITION
- ultrix_partition,
-#endif
-#ifdef CONFIG_IBM_PARTITION
- ibm_partition,
-#endif
-#ifdef CONFIG_KARMA_PARTITION
- karma_partition,
-#endif
-#ifdef CONFIG_SYSV68_PARTITION
- sysv68_partition,
-#endif
- NULL
-};
/*
* disk_name() is used by partition check code and the genhd driver.
@@ -155,65 +64,6 @@ const char *__bdevname(dev_t dev, char *buffer)
EXPORT_SYMBOL(__bdevname);
-static struct parsed_partitions *
-check_partition(struct gendisk *hd, struct block_device *bdev)
-{
- struct parsed_partitions *state;
- int i, res, err;
-
- state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
- if (!state)
- return NULL;
- state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
- if (!state->pp_buf) {
- kfree(state);
- return NULL;
- }
- state->pp_buf[0] = '\0';
-
- state->bdev = bdev;
- disk_name(hd, 0, state->name);
- snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
- if (isdigit(state->name[strlen(state->name)-1]))
- sprintf(state->name, "p");
-
- state->limit = disk_max_parts(hd);
- i = res = err = 0;
- while (!res && check_part[i]) {
- memset(&state->parts, 0, sizeof(state->parts));
- res = check_part[i++](state);
- if (res < 0) {
- /* We have hit an I/O error which we don't report now.
- * But record it, and let the others do their job.
- */
- err = res;
- res = 0;
- }
-
- }
- if (res > 0) {
- printk(KERN_INFO "%s", state->pp_buf);
-
- free_page((unsigned long)state->pp_buf);
- return state;
- }
- if (state->access_beyond_eod)
- err = -ENOSPC;
- if (err)
- /* The partition is unrecognized. So report I/O errors if there were any */
- res = err;
- if (!res)
- strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
- else if (warn_no_part)
- strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
-
- printk(KERN_INFO "%s", state->pp_buf);
-
- free_page((unsigned long)state->pp_buf);
- kfree(state);
- return ERR_PTR(res);
-}
-
static ssize_t part_partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/fs/partitions/Kconfig b/block/partitions/Kconfig
index cb5f0a3f1b0..cb5f0a3f1b0 100644
--- a/fs/partitions/Kconfig
+++ b/block/partitions/Kconfig
diff --git a/fs/partitions/Makefile b/block/partitions/Makefile
index 03af8eac51d..03af8eac51d 100644
--- a/fs/partitions/Makefile
+++ b/block/partitions/Makefile
diff --git a/fs/partitions/acorn.c b/block/partitions/acorn.c
index fbeb697374d..fbeb697374d 100644
--- a/fs/partitions/acorn.c
+++ b/block/partitions/acorn.c
diff --git a/fs/partitions/acorn.h b/block/partitions/acorn.h
index ede82852969..ede82852969 100644
--- a/fs/partitions/acorn.h
+++ b/block/partitions/acorn.h
diff --git a/fs/partitions/amiga.c b/block/partitions/amiga.c
index 70cbf44a156..70cbf44a156 100644
--- a/fs/partitions/amiga.c
+++ b/block/partitions/amiga.c
diff --git a/fs/partitions/amiga.h b/block/partitions/amiga.h
index d094585cada..d094585cada 100644
--- a/fs/partitions/amiga.h
+++ b/block/partitions/amiga.h
diff --git a/fs/partitions/atari.c b/block/partitions/atari.c
index 9875b05e80a..9875b05e80a 100644
--- a/fs/partitions/atari.c
+++ b/block/partitions/atari.c
diff --git a/fs/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f3..fe2d32a89f3 100644
--- a/fs/partitions/atari.h
+++ b/block/partitions/atari.h
diff --git a/block/partitions/check.c b/block/partitions/check.c
new file mode 100644
index 00000000000..bc908672c97
--- /dev/null
+++ b/block/partitions/check.c
@@ -0,0 +1,166 @@
+/*
+ * fs/partitions/check.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ *
+ * We now have independent partition support from the
+ * block drivers, which allows all the partition code to
+ * be grouped in one location, and it to be mostly self
+ * contained.
+ *
+ * Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
+ */
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
+
+#include "check.h"
+
+#include "acorn.h"
+#include "amiga.h"
+#include "atari.h"
+#include "ldm.h"
+#include "mac.h"
+#include "msdos.h"
+#include "osf.h"
+#include "sgi.h"
+#include "sun.h"
+#include "ibm.h"
+#include "ultrix.h"
+#include "efi.h"
+#include "karma.h"
+#include "sysv68.h"
+
+int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
+
+static int (*check_part[])(struct parsed_partitions *) = {
+ /*
+ * Probe partition formats with tables at disk address 0
+ * that also have an ADFS boot block at 0xdc0.
+ */
+#ifdef CONFIG_ACORN_PARTITION_ICS
+ adfspart_check_ICS,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+ adfspart_check_POWERTEC,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+ adfspart_check_EESOX,
+#endif
+
+ /*
+ * Now move on to formats that only have partition info at
+ * disk address 0xdc0. Since these may also have stale
+ * PC/BIOS partition tables, they need to come before
+ * the msdos entry.
+ */
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+ adfspart_check_CUMANA,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+ adfspart_check_ADFS,
+#endif
+
+#ifdef CONFIG_EFI_PARTITION
+ efi_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_SGI_PARTITION
+ sgi_partition,
+#endif
+#ifdef CONFIG_LDM_PARTITION
+ ldm_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_MSDOS_PARTITION
+ msdos_partition,
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ osf_partition,
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ sun_partition,
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ amiga_partition,
+#endif
+#ifdef CONFIG_ATARI_PARTITION
+ atari_partition,
+#endif
+#ifdef CONFIG_MAC_PARTITION
+ mac_partition,
+#endif
+#ifdef CONFIG_ULTRIX_PARTITION
+ ultrix_partition,
+#endif
+#ifdef CONFIG_IBM_PARTITION
+ ibm_partition,
+#endif
+#ifdef CONFIG_KARMA_PARTITION
+ karma_partition,
+#endif
+#ifdef CONFIG_SYSV68_PARTITION
+ sysv68_partition,
+#endif
+ NULL
+};
+
+struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
+{
+ struct parsed_partitions *state;
+ int i, res, err;
+
+ state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!state->pp_buf) {
+ kfree(state);
+ return NULL;
+ }
+ state->pp_buf[0] = '\0';
+
+ state->bdev = bdev;
+ disk_name(hd, 0, state->name);
+ snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
+ if (isdigit(state->name[strlen(state->name)-1]))
+ sprintf(state->name, "p");
+
+ state->limit = disk_max_parts(hd);
+ i = res = err = 0;
+ while (!res && check_part[i]) {
+ memset(&state->parts, 0, sizeof(state->parts));
+ res = check_part[i++](state);
+ if (res < 0) {
+ /* We have hit an I/O error which we don't report now.
+ * But record it, and let the others do their job.
+ */
+ err = res;
+ res = 0;
+ }
+
+ }
+ if (res > 0) {
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
+ return state;
+ }
+ if (state->access_beyond_eod)
+ err = -ENOSPC;
+ if (err)
+ /* The partition is unrecognized. So report I/O errors if there were any */
+ res = err;
+ if (!res)
+ strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
+ else if (warn_no_part)
+ strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
+
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
+ kfree(state);
+ return ERR_PTR(res);
+}
diff --git a/fs/partitions/check.h b/block/partitions/check.h
index d68bf4dc3bc..52b100311ec 100644
--- a/fs/partitions/check.h
+++ b/block/partitions/check.h
@@ -22,6 +22,9 @@ struct parsed_partitions {
char *pp_buf;
};
+struct parsed_partitions *
+check_partition(struct gendisk *, struct block_device *);
+
static inline void *read_part_sector(struct parsed_partitions *state,
sector_t n, Sector *p)
{
diff --git a/fs/partitions/efi.c b/block/partitions/efi.c
index 6296b403c67..6296b403c67 100644
--- a/fs/partitions/efi.c
+++ b/block/partitions/efi.c
diff --git a/fs/partitions/efi.h b/block/partitions/efi.h
index b69ab729558..b69ab729558 100644
--- a/fs/partitions/efi.h
+++ b/block/partitions/efi.h
diff --git a/fs/partitions/ibm.c b/block/partitions/ibm.c
index d513a07f44b..d513a07f44b 100644
--- a/fs/partitions/ibm.c
+++ b/block/partitions/ibm.c
diff --git a/fs/partitions/ibm.h b/block/partitions/ibm.h
index 08fb0804a81..08fb0804a81 100644
--- a/fs/partitions/ibm.h
+++ b/block/partitions/ibm.h
diff --git a/fs/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706..0ea19312706 100644
--- a/fs/partitions/karma.c
+++ b/block/partitions/karma.c
diff --git a/fs/partitions/karma.h b/block/partitions/karma.h
index c764b2e9df2..c764b2e9df2 100644
--- a/fs/partitions/karma.h
+++ b/block/partitions/karma.h
diff --git a/fs/partitions/ldm.c b/block/partitions/ldm.c
index bd8ae788f68..bd8ae788f68 100644
--- a/fs/partitions/ldm.c
+++ b/block/partitions/ldm.c
diff --git a/fs/partitions/ldm.h b/block/partitions/ldm.h
index 374242c0971..374242c0971 100644
--- a/fs/partitions/ldm.h
+++ b/block/partitions/ldm.h
diff --git a/fs/partitions/mac.c b/block/partitions/mac.c
index 11f688bd76c..11f688bd76c 100644
--- a/fs/partitions/mac.c
+++ b/block/partitions/mac.c
diff --git a/fs/partitions/mac.h b/block/partitions/mac.h
index 3c7d9843638..3c7d9843638 100644
--- a/fs/partitions/mac.h
+++ b/block/partitions/mac.h
diff --git a/fs/partitions/msdos.c b/block/partitions/msdos.c
index 5f79a6677c6..5f79a6677c6 100644
--- a/fs/partitions/msdos.c
+++ b/block/partitions/msdos.c
diff --git a/fs/partitions/msdos.h b/block/partitions/msdos.h
index 38c781c490b..38c781c490b 100644
--- a/fs/partitions/msdos.h
+++ b/block/partitions/msdos.h
diff --git a/fs/partitions/osf.c b/block/partitions/osf.c
index 764b86a0196..764b86a0196 100644
--- a/fs/partitions/osf.c
+++ b/block/partitions/osf.c
diff --git a/fs/partitions/osf.h b/block/partitions/osf.h
index 20ed2315ec1..20ed2315ec1 100644
--- a/fs/partitions/osf.h
+++ b/block/partitions/osf.h
diff --git a/fs/partitions/sgi.c b/block/partitions/sgi.c
index ea8a86dceaf..ea8a86dceaf 100644
--- a/fs/partitions/sgi.c
+++ b/block/partitions/sgi.c
diff --git a/fs/partitions/sgi.h b/block/partitions/sgi.h
index b9553ebdd5a..b9553ebdd5a 100644
--- a/fs/partitions/sgi.h
+++ b/block/partitions/sgi.h
diff --git a/fs/partitions/sun.c b/block/partitions/sun.c
index b5b6fcfb3d3..b5b6fcfb3d3 100644
--- a/fs/partitions/sun.c
+++ b/block/partitions/sun.c
diff --git a/fs/partitions/sun.h b/block/partitions/sun.h
index 2424baa8319..2424baa8319 100644
--- a/fs/partitions/sun.h
+++ b/block/partitions/sun.h
diff --git a/fs/partitions/sysv68.c b/block/partitions/sysv68.c
index 9627ccffc1c..9627ccffc1c 100644
--- a/fs/partitions/sysv68.c
+++ b/block/partitions/sysv68.c
diff --git a/fs/partitions/sysv68.h b/block/partitions/sysv68.h
index bf2f5ffa97a..bf2f5ffa97a 100644
--- a/fs/partitions/sysv68.h
+++ b/block/partitions/sysv68.h
diff --git a/fs/partitions/ultrix.c b/block/partitions/ultrix.c
index 8dbaf9f77a9..8dbaf9f77a9 100644
--- a/fs/partitions/ultrix.c
+++ b/block/partitions/ultrix.c
diff --git a/fs/partitions/ultrix.h b/block/partitions/ultrix.h
index a3cc00b2bde..a3cc00b2bde 100644
--- a/fs/partitions/ultrix.h
+++ b/block/partitions/ultrix.h
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 527a857d10b..ae9c3ceb286 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -329,7 +329,6 @@ config CRYPTO_CRC32C_INTEL
config CRYPTO_GHASH
tristate "GHASH digest algorithm"
- select CRYPTO_SHASH
select CRYPTO_GF128MUL
help
GHASH is message digest algorithm for GCM (Galois/Counter Mode).
@@ -477,7 +476,6 @@ config CRYPTO_WP512
config CRYPTO_GHASH_CLMUL_NI_INTEL
tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
depends on X86 && 64BIT
- select CRYPTO_SHASH
select CRYPTO_CRYPTD
help
GHASH is message digest algorithm for GCM (Galois/Counter Mode).
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b5e6f243f74..5afe5d1f199 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -116,6 +116,8 @@ source "drivers/vlynq/Kconfig"
source "drivers/virtio/Kconfig"
+source "drivers/hv/Kconfig"
+
source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
@@ -132,8 +134,6 @@ source "drivers/iommu/Kconfig"
source "drivers/virt/Kconfig"
-source "drivers/hv/Kconfig"
-
source "drivers/devfreq/Kconfig"
endmenu
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index de0e3df7677..7556913aba4 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -374,7 +374,7 @@ config ACPI_CUSTOM_METHOD
depends on DEBUG_FS
default n
help
- This debug facility allows ACPI AML methods to me inserted and/or
+ This debug facility allows ACPI AML methods to be inserted and/or
replaced without rebooting the system. For details refer to:
Documentation/acpi/method-customizing.txt.
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c2793a82f12..d707756228c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -356,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
* Value - Value to write to the register, in bit
- * position zero. The bit is automaticallly
+ * position zero. The bit is automatically
* shifted to the correct position.
*
* RETURN: Status
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca..6a9e3bad13f 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,8 +932,10 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
struct pstore_info *psi);
@@ -986,17 +988,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1012,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,10 +1050,12 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7711d94a040..86933ca8b47 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -873,7 +873,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
static const struct battery_file {
struct file_operations ops;
- mode_t mode;
+ umode_t mode;
const char *name;
} acpi_battery_file[] = {
FILE_DESCRIPTION_RO(info),
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c47ae9793a..b258cab9061 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@ int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
- mode_t mode = 0400;
+ umode_t mode = 0400;
if (ec_device_count == 0) {
acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 2672c798272..7aff6312ce7 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -596,6 +596,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (ACPI_SUCCESS(status)) {
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ pcie_clear_aspm(root->bus);
+ }
} else {
dev_info(root->bus->bridge,
"ACPI _OSC request failed (%s), "
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d7bc9f6b6c..20a68ca386d 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -446,7 +446,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
int result = 0;
- struct sys_device *sysdev;
+ struct device *dev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
@@ -491,8 +491,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
per_cpu(processors, pr->id) = pr;
- sysdev = get_cpu_sysdev(pr->id);
- if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+ dev = get_cpu_device(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
result = -EFAULT;
goto err_free_cpumask;
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 870550d6a4b..3b599abf2b4 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
#include <asm/uaccess.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab58db..0a7ed69546b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e80113..54eaf96ab21 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -52,6 +52,10 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
#else
@@ -109,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
return ret;
}
-static int amba_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void amba_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define amba_pm_prepare NULL
-#define amba_pm_complete NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -155,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -189,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
return ret;
}
-static int amba_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_SUSPEND */
#define amba_pm_suspend NULL
#define amba_pm_resume NULL
-#define amba_pm_suspend_noirq NULL
-#define amba_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
@@ -234,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -268,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -302,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -336,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
return ret;
}
-static int amba_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
#define amba_pm_poweroff NULL
#define amba_pm_restore NULL
-#define amba_pm_freeze_noirq NULL
-#define amba_pm_thaw_noirq NULL
-#define amba_pm_poweroff_noirq NULL
-#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -402,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
- .prepare = amba_pm_prepare,
- .complete = amba_pm_complete,
.suspend = amba_pm_suspend,
.resume = amba_pm_resume,
.freeze = amba_pm_freeze,
.thaw = amba_pm_thaw,
.poweroff = amba_pm_poweroff,
.restore = amba_pm_restore,
- .suspend_noirq = amba_pm_suspend_noirq,
- .resume_noirq = amba_pm_resume_noirq,
- .freeze_noirq = amba_pm_freeze_noirq,
- .thaw_noirq = amba_pm_thaw_noirq,
- .poweroff_noirq = amba_pm_poweroff_noirq,
- .restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cf26222a93c..d07bf0366d9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -52,7 +52,8 @@
#define DRV_VERSION "3.0"
enum {
- AHCI_PCI_BAR = 5,
+ AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_STANDARD = 5,
};
enum board_ids {
@@ -375,6 +376,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+ /* ST Microelectronics */
+ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
+
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
@@ -622,6 +626,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+ */
+ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+ return 0;
+
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1026,6 +1037,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
+ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
@@ -1057,6 +1069,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
+ /* The Connext uses non-standard BAR */
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
@@ -1065,7 +1081,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1108,7 +1124,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1172,8 +1188,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+ ata_port_pbar_desc(ap, ahci_pci_bar,
0x100 + ap->port_no * 0x80, "port");
/* set enclosure management message type */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 43b875810d1..48be4e18916 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -202,6 +202,71 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ahci_suspend(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+ int rc;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(dev, "firmware update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ /*
+ * AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+
+ rc = ata_host_suspend(host, PMSG_SUSPEND);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->suspend)
+ return pdata->suspend(dev);
+ return 0;
+}
+
+static int ahci_resume(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ int rc;
+
+ if (pdata && pdata->resume) {
+ rc = pdata->resume(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static struct dev_pm_ops ahci_pm_ops = {
+ .suspend = &ahci_suspend,
+ .resume = &ahci_resume,
+};
+#endif
+
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci", },
{},
@@ -214,6 +279,9 @@ static struct platform_driver ahci_driver = {
.name = "ahci",
.owner = THIS_MODULE,
.of_match_table = ahci_of_match,
+#ifdef CONFIG_PM
+ .pm = &ahci_pm_ops,
+#endif
},
.id_table = ahci_devtype,
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3c92dbd751e..a72bfd0ecfe 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -746,9 +746,6 @@ static void ahci_start_port(struct ata_port *ap)
/* enable FIS reception */
ahci_start_fis_rx(ap);
- /* enable DMA */
- ahci_start_engine(ap);
-
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
ata_for_each_link(link, ap, EDGE) {
@@ -2022,7 +2019,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ahci_power_down(ap);
else {
ata_port_err(ap, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
+ ata_port_freeze(ap);
}
return rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c04ad68cb60..11c9aea4f4f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -66,6 +66,7 @@
#include <asm/byteorder.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -3248,10 +3249,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
if (libata_dma_mask & mode_mask)
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+ dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
+ dev->udma_mask);
else
dma_mask = 0;
@@ -5234,73 +5235,55 @@ bool ata_link_offline(struct ata_link *link)
}
#ifdef CONFIG_PM
-static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
{
+ struct ata_link *link;
unsigned long flags;
- int i, rc;
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
- struct ata_link *link;
+ int rc;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
- */
- if (ap->pflags & ATA_PFLAG_PM_PENDING) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
+ /* Previous resume operation might still be in
+ * progress. Wait for PM_PENDING to clear.
+ */
+ if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ }
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
+ /* request PM ops to EH */
+ spin_lock_irqsave(ap->lock, flags);
- ap->pm_mesg = mesg;
- if (wait) {
- rc = 0;
- ap->pm_result = &rc;
- }
+ ap->pm_mesg = mesg;
+ if (wait) {
+ rc = 0;
+ ap->pm_result = &rc;
+ }
- ap->pflags |= ATA_PFLAG_PM_PENDING;
- ata_for_each_link(link, ap, HOST_FIRST) {
- link->eh_info.action |= action;
- link->eh_info.flags |= ehi_flags;
- }
+ ap->pflags |= ATA_PFLAG_PM_PENDING;
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ link->eh_info.action |= action;
+ link->eh_info.flags |= ehi_flags;
+ }
- ata_port_schedule_eh(ap);
+ ata_port_schedule_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* wait and check result */
- if (wait) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- if (rc)
- return rc;
- }
+ /* wait and check result */
+ if (wait) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
- return 0;
+ return rc;
}
-/**
- * ata_host_suspend - suspend host
- * @host: host to suspend
- * @mesg: PM message
- *
- * Suspend @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and waits for EH
- * to finish.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
{
+ struct ata_port *ap = to_ata_port(dev);
unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
@@ -5315,31 +5298,108 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
if (mesg.event == PM_EVENT_SUSPEND)
ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
- rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
- if (rc == 0)
- host->dev->power.power_state = mesg;
+ rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
return rc;
}
+static int ata_port_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_SUSPEND);
+}
+
+static int ata_port_do_freeze(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
+ return ata_port_suspend_common(dev, PMSG_FREEZE);
+}
+
+static int ata_port_poweroff(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+}
+
+static int ata_port_resume_common(struct device *dev)
+{
+ struct ata_port *ap = to_ata_port(dev);
+ int rc;
+
+ rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+ return rc;
+}
+
+static int ata_port_resume(struct device *dev)
+{
+ int rc;
+
+ rc = ata_port_resume_common(dev);
+ if (!rc) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return rc;
+}
+
+static int ata_port_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops ata_port_pm_ops = {
+ .suspend = ata_port_suspend,
+ .resume = ata_port_resume,
+ .freeze = ata_port_do_freeze,
+ .thaw = ata_port_resume,
+ .poweroff = ata_port_poweroff,
+ .restore = ata_port_resume,
+
+ .runtime_suspend = ata_port_suspend,
+ .runtime_resume = ata_port_resume_common,
+ .runtime_idle = ata_port_runtime_idle,
+};
+
+/**
+ * ata_host_suspend - suspend host
+ * @host: host to suspend
+ * @mesg: PM message
+ *
+ * Suspend @host. Actual operation is performed by port suspend.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+ host->dev->power.power_state = mesg;
+ return 0;
+}
+
/**
* ata_host_resume - resume host
* @host: host to resume
*
- * Resume @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and returns.
- * Note that all resume operations are performed parallelly.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
+ * Resume @host. Actual operation is performed by port resume.
*/
void ata_host_resume(struct ata_host *host)
{
- ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
}
#endif
+struct device_type ata_port_type = {
+ .name = "ata_port",
+#ifdef CONFIG_PM
+ .pm = &ata_port_pm_ops,
+#endif
+};
+
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2a5412e7e9c..508a60bfe5c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3381,6 +3381,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
if (!shost)
goto err_alloc;
+ shost->eh_noresume = 1;
*(struct ata_port **)&shost->hostdata[0] = ap;
ap->scsi_host = shost;
@@ -3398,7 +3399,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ rc = scsi_add_host(ap->scsi_host, &ap->tdev);
if (rc)
goto err_add;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4cadfa28f94..9691dd0966d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -929,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
bytes = (bc_hi << 8) | bc_lo;
/* shall be cleared to zero, indicating xfer of data */
- if (unlikely(ireason & (1 << 0)))
+ if (unlikely(ireason & ATAPI_COD))
goto atapi_check;
/* make sure transfer direction matches expected */
- i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+ i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
if (unlikely(do_write != i_write))
goto atapi_check;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index ce9dc6207f3..9a7f0ea565d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -32,6 +32,7 @@
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -279,6 +280,7 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
+ dev->type = &ata_port_type;
dev->parent = get_device(parent);
dev->release = ata_tport_release;
@@ -289,6 +291,9 @@ int ata_tport_add(struct device *parent,
goto tport_err;
}
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
transport_add_device(dev);
transport_configure_device(dev);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 773de97988a..814486d35c4 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -58,6 +58,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
+extern struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index e8574bba3ee..048589fad2c 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -963,17 +963,7 @@ static struct platform_driver arasan_cf_driver = {
},
};
-static int __init arasan_cf_init(void)
-{
- return platform_driver_register(&arasan_cf_driver);
-}
-module_init(arasan_cf_init);
-
-static void __exit arasan_cf_exit(void)
-{
- platform_driver_unregister(&arasan_cf_driver);
-}
-module_exit(arasan_cf_exit);
+module_platform_driver(arasan_cf_driver);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index a76f24a8e5d..a7d91a72ee3 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -360,7 +360,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
- if (!irq) {
+ if (!gpio_is_valid(irq)) {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
@@ -414,8 +414,8 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- ret = ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
- irq ? ata_sff_interrupt : NULL,
+ return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+ gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
if (!ret)
@@ -454,20 +454,7 @@ static struct platform_driver pata_at91_driver = {
},
};
-static int __init pata_at91_init(void)
-{
- return platform_driver_register(&pata_at91_driver);
-}
-
-static void __exit pata_at91_exit(void)
-{
- platform_driver_unregister(&pata_at91_driver);
-}
-
-
-module_init(pata_at91_init);
-module_exit(pata_at91_exit);
-
+module_platform_driver(pata_at91_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index bd987bb082e..d6a4677fdf7 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -418,14 +418,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(tcyc_tdvs<<8 | tdvs));
ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
-
- /* Enable host ATAPI Untra DMA interrupts */
- ATAPI_SET_INT_MASK(base,
- ATAPI_GET_INT_MASK(base)
- | UDMAIN_DONE_MASK
- | UDMAOUT_DONE_MASK
- | UDMAIN_TERM_MASK
- | UDMAOUT_TERM_MASK);
}
}
}
@@ -470,10 +462,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
-
- /* Enable host ATAPI Multi DMA interrupts */
- ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
- | MULTI_DONE_MASK | MULTI_TERM_MASK);
SSYNC();
}
}
@@ -1153,15 +1141,11 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
{
unsigned char host_stat = 0;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- unsigned short int_status = ATAPI_GET_INT_STATUS(base);
- if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
+ if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
host_stat |= ATA_DMA_ACTIVE;
- if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
- ATAPI_DEV_INT))
+ if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
host_stat |= ATA_DMA_INTR;
- if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
- host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 628c8fae593..7a402c75ab9 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -1,6 +1,7 @@
/*
* pata_cs5536.c - CS5536 PATA for new ATA layer
* (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ * (C) 2011 Bartlomiej Zolnierkiewicz
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -55,24 +56,16 @@ MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
#define DRV_VERSION "0.0.8"
enum {
- CFG = 0,
- DTC = 1,
- CAST = 2,
- ETC = 3,
-
- MSR_IDE_BASE = 0x51300000,
- MSR_IDE_CFG = (MSR_IDE_BASE + 0x10),
- MSR_IDE_DTC = (MSR_IDE_BASE + 0x12),
- MSR_IDE_CAST = (MSR_IDE_BASE + 0x13),
- MSR_IDE_ETC = (MSR_IDE_BASE + 0x14),
-
+ MSR_IDE_CFG = 0x51300010,
PCI_IDE_CFG = 0x40,
- PCI_IDE_DTC = 0x48,
- PCI_IDE_CAST = 0x4c,
- PCI_IDE_ETC = 0x50,
- IDE_CFG_CHANEN = 0x2,
- IDE_CFG_CABLE = 0x10000,
+ CFG = 0,
+ DTC = 2,
+ CAST = 3,
+ ETC = 4,
+
+ IDE_CFG_CHANEN = (1 << 1),
+ IDE_CFG_CABLE = (1 << 17) | (1 << 16),
IDE_D0_SHIFT = 24,
IDE_D1_SHIFT = 16,
@@ -84,45 +77,50 @@ enum {
IDE_CAST_CMD_MASK = 0xff,
IDE_CAST_CMD_SHIFT = 24,
- IDE_ETC_NODMA = 0x03,
-};
-
-static const u32 msr_reg[4] = {
- MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
-};
-
-static const u8 pci_reg[4] = {
- PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+ IDE_ETC_UDMA_MASK = 0xc0,
};
-static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy __maybe_unused;
- rdmsr(msr_reg[reg], *val, dummy);
+ rdmsr(MSR_IDE_CFG + reg, *val, dummy);
return 0;
}
- return pci_read_config_dword(pdev, pci_reg[reg], val);
+ return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
}
-static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+static int cs5536_write(struct pci_dev *pdev, int reg, int val)
{
if (unlikely(use_msr)) {
- wrmsr(msr_reg[reg], val, 0);
+ wrmsr(MSR_IDE_CFG + reg, val, 0);
return 0;
}
- return pci_write_config_dword(pdev, pci_reg[reg], val);
+ return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
+{
+ struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ u32 dtc;
+
+ cs5536_read(pdev, DTC, &dtc);
+ dtc &= ~(IDE_DRV_MASK << dshift);
+ dtc |= tim << dshift;
+ cs5536_write(pdev, DTC, dtc);
}
/**
* cs5536_cable_detect - detect cable type
* @ap: Port to detect on
*
- * Perform cable detection for ATA66 capable cable. Return a libata
- * cable type.
+ * Perform cable detection for ATA66 capable cable.
+ *
+ * Returns a cable type.
*/
static int cs5536_cable_detect(struct ata_port *ap)
@@ -132,7 +130,7 @@ static int cs5536_cable_detect(struct ata_port *ap)
cs5536_read(pdev, CFG, &cfg);
- if (cfg & (IDE_CFG_CABLE << ap->port_no))
+ if (cfg & IDE_CFG_CABLE)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
@@ -162,19 +160,15 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
- int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
- u32 dtc, cast, etc;
+ u32 cast;
if (pair)
cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
- cs5536_read(pdev, DTC, &dtc);
- cs5536_read(pdev, CAST, &cast);
- cs5536_read(pdev, ETC, &etc);
+ cs5536_program_dtc(adev, drv_timings[mode]);
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= drv_timings[mode] << dshift;
+ cs5536_read(pdev, CAST, &cast);
cast &= ~(IDE_CAST_DRV_MASK << cshift);
cast |= addr_timings[mode] << cshift;
@@ -182,12 +176,7 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
- etc &= ~(IDE_DRV_MASK << dshift);
- etc |= IDE_ETC_NODMA << dshift;
-
- cs5536_write(pdev, DTC, dtc);
cs5536_write(pdev, CAST, cast);
- cs5536_write(pdev, ETC, etc);
}
/**
@@ -208,25 +197,21 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 dtc, etc;
+ u32 etc;
int mode = adev->dma_mode;
int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- if (mode >= XFER_UDMA_0) {
- cs5536_read(pdev, ETC, &etc);
+ cs5536_read(pdev, ETC, &etc);
+ if (mode >= XFER_UDMA_0) {
etc &= ~(IDE_DRV_MASK << dshift);
etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
-
- cs5536_write(pdev, ETC, etc);
} else { /* MWDMA */
- cs5536_read(pdev, DTC, &dtc);
-
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
-
- cs5536_write(pdev, DTC, dtc);
+ etc &= ~(IDE_ETC_UDMA_MASK << dshift);
+ cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
}
+
+ cs5536_write(pdev, ETC, etc);
}
static struct scsi_host_template cs5536_sht = {
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index ca9d9caedfa..c5af97f5107 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -235,17 +235,7 @@ static struct platform_driver pata_imx_driver = {
},
};
-static int __init pata_imx_init(void)
-{
- return platform_driver_register(&pata_imx_driver);
-}
-
-static void __exit pata_imx_exit(void)
-{
- platform_driver_unregister(&pata_imx_driver);
-}
-module_init(pata_imx_init);
-module_exit(pata_imx_exit);
+module_platform_driver(pata_imx_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("low-level driver for iMX PATA");
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 15b64311fe0..badb1789a91 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -205,21 +205,10 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.remove = __devexit_p(ixp4xx_pata_remove),
};
-static int __init ixp4xx_pata_init(void)
-{
- return platform_driver_register(&ixp4xx_pata_platform_driver);
-}
-
-static void __exit ixp4xx_pata_exit(void)
-{
- platform_driver_unregister(&ixp4xx_pata_platform_driver);
-}
+module_platform_driver(ixp4xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(ixp4xx_pata_init);
-module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3e1746314f2..00748ae1a01 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -897,26 +897,7 @@ static struct platform_driver mpc52xx_ata_of_platform_driver = {
},
};
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_ata_init(void)
-{
- printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
- return platform_driver_register(&mpc52xx_ata_of_platform_driver);
-}
-
-static void __exit
-mpc52xx_ata_exit(void)
-{
- platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
-}
-
-module_init(mpc52xx_ata_init);
-module_exit(mpc52xx_ata_exit);
+module_platform_driver(mpc52xx_ata_of_platform_driver);
MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 2a472c5bb7d..1654dc27e7f 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -12,8 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/ata_platform.h>
static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
@@ -22,7 +21,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
- struct resource irq_res;
+ struct resource *irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
@@ -51,11 +50,9 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
}
}
- ret = of_irq_to_resource(dn, 0, &irq_res);
- if (!ret)
- irq_res.start = irq_res.end = 0;
- else
- irq_res.flags = 0;
+ irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+ if (irq_res)
+ irq_res->flags = 0;
prop = of_get_property(dn, "reg-shift", NULL);
if (prop)
@@ -75,7 +72,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
- return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
reg_shift, pio_mask);
}
@@ -101,17 +98,7 @@ static struct platform_driver pata_of_platform_driver = {
.remove = __devexit_p(pata_of_platform_remove),
};
-static int __init pata_of_platform_init(void)
-{
- return platform_driver_register(&pata_of_platform_driver);
-}
-module_init(pata_of_platform_init);
-
-static void __exit pata_of_platform_exit(void)
-{
- platform_driver_unregister(&pata_of_platform_driver);
-}
-module_exit(pata_of_platform_exit);
+module_platform_driver(pata_of_platform_driver);
MODULE_DESCRIPTION("OF-platform PATA driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index b86d7e22595..5ff31b68135 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -132,20 +132,9 @@ static struct platform_driver palmld_pata_platform_driver = {
.remove = __devexit_p(palmld_pata_remove),
};
-static int __init palmld_pata_init(void)
-{
- return platform_driver_register(&palmld_pata_platform_driver);
-}
-
-static void __exit palmld_pata_exit(void)
-{
- platform_driver_unregister(&palmld_pata_platform_driver);
-}
+module_platform_driver(palmld_pata_platform_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PalmLD PATA driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(palmld_pata_init);
-module_exit(palmld_pata_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 2067308f683..f1848aeda78 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -256,17 +256,7 @@ static struct platform_driver pata_platform_driver = {
},
};
-static int __init pata_platform_init(void)
-{
- return platform_driver_register(&pata_platform_driver);
-}
-
-static void __exit pata_platform_exit(void)
-{
- platform_driver_unregister(&pata_platform_driver);
-}
-module_init(pata_platform_init);
-module_exit(pata_platform_exit);
+module_platform_driver(pata_platform_driver);
module_param(pio_mask, int, 0);
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index b4ede40f8ae..0bb0fb7b26b 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -390,18 +390,7 @@ static struct platform_driver pxa_ata_driver = {
},
};
-static int __init pxa_ata_init(void)
-{
- return platform_driver_register(&pxa_ata_driver);
-}
-
-static void __exit pxa_ata_exit(void)
-{
- platform_driver_unregister(&pxa_ata_driver);
-}
-
-module_init(pxa_ata_init);
-module_exit(pxa_ata_exit);
+module_platform_driver(pxa_ata_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 1b9d10d9c5d..9417101bd5c 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -188,9 +188,6 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
.remove = __devexit_p(rb532_pata_driver_remove),
@@ -200,27 +197,13 @@ static struct platform_driver rb532_pata_platform_driver = {
},
};
-/* ------------------------------------------------------------------------ */
-
#define DRV_INFO DRV_DESC " version " DRV_VERSION
-static int __init rb532_pata_module_init(void)
-{
- printk(KERN_INFO DRV_INFO "\n");
-
- return platform_driver_register(&rb532_pata_platform_driver);
-}
-
-static void __exit rb532_pata_module_exit(void)
-{
- platform_driver_unregister(&rb532_pata_platform_driver);
-}
+module_platform_driver(rb532_pata_platform_driver);
MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-
-module_init(rb532_pata_module_init);
-module_exit(rb532_pata_module_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 5c4237452f5..69f7cde49c6 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1777,18 +1777,7 @@ static struct platform_driver sata_dwc_driver = {
.remove = sata_dwc_remove,
};
-static int __init sata_dwc_init(void)
-{
- return platform_driver_register(&sata_dwc_driver);
-}
-
-static void __exit sata_dwc_exit(void)
-{
- platform_driver_unregister(&sata_dwc_driver);
-}
-
-module_init(sata_dwc_init);
-module_exit(sata_dwc_exit);
+module_platform_driver(sata_dwc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 78ae7b67b09..5a2c95ba050 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1452,21 +1452,9 @@ static struct platform_driver fsl_sata_driver = {
#endif
};
-static int __init sata_fsl_init(void)
-{
- platform_driver_register(&fsl_sata_driver);
- return 0;
-}
-
-static void __exit sata_fsl_exit(void)
-{
- platform_driver_unregister(&fsl_sata_driver);
-}
+module_platform_driver(fsl_sata_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
MODULE_VERSION("1.10");
-
-module_init(sata_fsl_init);
-module_exit(sata_fsl_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 0b8b8b488ee..38950ea8398 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3988,7 +3988,7 @@ static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -3998,7 +3998,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -4019,6 +4019,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
+ const struct mbus_dram_target_info *dram;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
@@ -4072,8 +4073,9 @@ static int mv_platform_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
@@ -4141,17 +4143,18 @@ static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
+ const struct mbus_dram_target_info *dram;
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
- const struct mv_sata_platform_data *mv_platform_data = \
- pdev->dev.platform_data;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
/* initialize adapter */
ret = mv_init_host(host);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3d0c2b0fed9..9e373ba2030 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
// get real pkt length pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
skb_trim(skb, length);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 21cf46f4524..e95c67edb2c 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -174,4 +174,15 @@ config SYS_HYPERVISOR
source "drivers/base/regmap/Kconfig"
+config DMA_SHARED_BUFFER
+ bool "Buffer framework to be shared between drivers"
+ default n
+ select ANON_INODES
+ depends on EXPERIMENTAL
+ help
+ This option enables the framework for buffer-sharing between
+ multiple drivers. A buffer is associated with a file using driver
+ APIs extension; the file's descriptor can then be passed on to other
+ driver.
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 99a375ad2cc..2c8272dd93c 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -3,16 +3,17 @@
obj-y := core.o sys.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
- attribute_container.o transport_class.o
+ attribute_container.o transport_class.o \
+ topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
-obj-$(CONFIG_SMP) += topology.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 21c1b96c34c..7a6ae422876 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -4,7 +4,9 @@
* struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
*
* @subsys - the struct kset that defines this subsystem
- * @devices_kset - the list of devices associated
+ * @devices_kset - the subsystem's 'devices' directory
+ * @interfaces - list of subsystem interfaces associated
+ * @mutex - protect the devices, and interfaces lists.
*
* @drivers_kset - the list of drivers associated
* @klist_devices - the klist to iterate over the @devices_kset
@@ -14,10 +16,8 @@
* @bus - pointer back to the struct bus_type that this structure is associated
* with.
*
- * @class_interfaces - list of class_interfaces associated
* @glue_dirs - "glue" directory to put in-between the parent device to
* avoid namespace conflicts
- * @class_mutex - mutex to protect the children, devices, and interfaces lists.
* @class - pointer back to the struct class that this structure is associated
* with.
*
@@ -28,6 +28,8 @@
struct subsys_private {
struct kset subsys;
struct kset *devices_kset;
+ struct list_head interfaces;
+ struct mutex mutex;
struct kset *drivers_kset;
struct klist klist_devices;
@@ -36,9 +38,7 @@ struct subsys_private {
unsigned int drivers_autoprobe:1;
struct bus_type *bus;
- struct list_head class_interfaces;
struct kset glue_dirs;
- struct mutex class_mutex;
struct class *class;
};
#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
@@ -94,7 +94,6 @@ extern int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
-extern int system_bus_init(void);
extern int cpu_dev_init(void);
extern int bus_add_device(struct device *dev);
@@ -116,6 +115,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
+/* /sys/devices directory */
extern struct kset *devices_kset;
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 000e7b2006f..99dc5921e1d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -16,9 +16,14 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/mutex.h>
#include "base.h"
#include "power/power.h"
+/* /sys/devices/system */
+/* FIXME: make static after drivers/base/sys.c is deleted */
+struct kset *system_kset;
+
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
@@ -360,6 +365,47 @@ struct device *bus_find_device_by_name(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device_by_name);
+/**
+ * subsys_find_device_by_id - find a device with a specific enumeration number
+ * @subsys: subsystem
+ * @id: index 'id' in struct device
+ * @hint: device to check first
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to a full list search. Either way a reference for
+ * the returned object is taken.
+ */
+struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
+ struct device *hint)
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!subsys)
+ return NULL;
+
+ if (hint) {
+ klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
+ dev = next_device(&i);
+ if (dev && dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ klist_iter_exit(&i);
+ }
+
+ klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
+ while ((dev = next_device(&i))) {
+ if (dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ }
+ klist_iter_exit(&i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -487,38 +533,59 @@ out_put:
void bus_probe_device(struct device *dev)
{
struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
int ret;
- if (bus && bus->p->drivers_autoprobe) {
+ if (!bus)
+ return;
+
+ if (bus->p->drivers_autoprobe) {
ret = device_attach(dev);
WARN_ON(ret < 0);
}
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->add_dev)
+ sif->add_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
}
/**
* bus_remove_device - remove device from bus
* @dev: device to be removed
*
- * - Remove symlink from bus's directory.
+ * - Remove device from all interfaces.
+ * - Remove symlink from bus' directory.
* - Delete device from bus's list.
* - Detach from its driver.
* - Drop reference taken in bus_add_device().
*/
void bus_remove_device(struct device *dev)
{
- if (dev->bus) {
- sysfs_remove_link(&dev->kobj, "subsystem");
- sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev_name(dev));
- device_remove_attrs(dev->bus, dev);
- if (klist_node_attached(&dev->p->knode_bus))
- klist_del(&dev->p->knode_bus);
-
- pr_debug("bus: '%s': remove device %s\n",
- dev->bus->name, dev_name(dev));
- device_release_driver(dev);
- bus_put(dev->bus);
- }
+ struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
+
+ if (!bus)
+ return;
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->remove_dev)
+ sif->remove_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
+
+ sysfs_remove_link(&dev->kobj, "subsystem");
+ sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
+ dev_name(dev));
+ device_remove_attrs(dev->bus, dev);
+ if (klist_node_attached(&dev->p->knode_bus))
+ klist_del(&dev->p->knode_bus);
+
+ pr_debug("bus: '%s': remove device %s\n",
+ dev->bus->name, dev_name(dev));
+ device_release_driver(dev);
+ bus_put(dev->bus);
}
static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
@@ -847,14 +914,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
/**
- * bus_register - register a bus with the system.
+ * __bus_register - register a driver-core subsystem
* @bus: bus.
*
* Once we have that, we registered the bus with the kobject
* infrastructure, then register the children subsystems it has:
- * the devices and drivers that belong to the bus.
+ * the devices and drivers that belong to the subsystem.
*/
-int bus_register(struct bus_type *bus)
+int __bus_register(struct bus_type *bus, struct lock_class_key *key)
{
int retval;
struct subsys_private *priv;
@@ -898,6 +965,8 @@ int bus_register(struct bus_type *bus)
goto bus_drivers_fail;
}
+ INIT_LIST_HEAD(&priv->interfaces);
+ __mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
@@ -927,7 +996,7 @@ out:
bus->p = NULL;
return retval;
}
-EXPORT_SYMBOL_GPL(bus_register);
+EXPORT_SYMBOL_GPL(__bus_register);
/**
* bus_unregister - remove a bus from the system
@@ -939,6 +1008,8 @@ EXPORT_SYMBOL_GPL(bus_register);
void bus_unregister(struct bus_type *bus)
{
pr_debug("bus: '%s': unregistering\n", bus->name);
+ if (bus->dev_root)
+ device_unregister(bus->dev_root);
bus_remove_attrs(bus);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
@@ -1028,10 +1099,194 @@ void bus_sort_breadthfirst(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+/**
+ * subsys_dev_iter_init - initialize subsys device iterator
+ * @iter: subsys iterator to initialize
+ * @subsys: the subsys we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize subsys iterator @iter such that it iterates over devices
+ * of @subsys. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->p->knode_bus;
+ klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
+
+/**
+ * subsys_dev_iter_next - iterate to the next device
+ * @iter: subsys iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into subsys code.
+ */
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ for (;;) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = container_of(knode, struct device_private, knode_bus)->device;
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
+
+/**
+ * subsys_dev_iter_exit - finish iteration
+ * @iter: subsys iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
+
+int subsys_interface_register(struct subsys_interface *sif)
+{
+ struct bus_type *subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif || !sif->subsys)
+ return -ENODEV;
+
+ subsys = bus_get(sif->subsys);
+ if (!subsys)
+ return -EINVAL;
+
+ mutex_lock(&subsys->p->mutex);
+ list_add_tail(&sif->node, &subsys->p->interfaces);
+ if (sif->add_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->add_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(subsys_interface_register);
+
+void subsys_interface_unregister(struct subsys_interface *sif)
+{
+ struct bus_type *subsys = sif->subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif)
+ return;
+
+ mutex_lock(&subsys->p->mutex);
+ list_del_init(&sif->node);
+ if (sif->remove_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->remove_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ bus_put(subsys);
+}
+EXPORT_SYMBOL_GPL(subsys_interface_unregister);
+
+static void system_root_device_release(struct device *dev)
+{
+ kfree(dev);
+}
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys - system subsystem
+ * @groups - default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitely named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ struct device *dev;
+ int err;
+
+ err = bus_register(subsys);
+ if (err < 0)
+ return err;
+
+ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_dev;
+ }
+
+ err = dev_set_name(dev, "%s", subsys->name);
+ if (err < 0)
+ goto err_name;
+
+ dev->kobj.parent = &system_kset->kobj;
+ dev->groups = groups;
+ dev->release = system_root_device_release;
+
+ err = device_register(dev);
+ if (err < 0)
+ goto err_dev_reg;
+
+ subsys->dev_root = dev;
+ return 0;
+
+err_dev_reg:
+ put_device(dev);
+ dev = NULL;
+err_name:
+ kfree(dev);
+err_dev:
+ bus_unregister(subsys);
+ return err;
+}
+EXPORT_SYMBOL_GPL(subsys_system_register);
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
if (!bus_kset)
return -ENOMEM;
+
+ system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
+ if (!system_kset)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index b80d91cc8c3..03243d4002f 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -184,9 +184,9 @@ int __class_register(struct class *cls, struct lock_class_key *key)
if (!cp)
return -ENOMEM;
klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
- INIT_LIST_HEAD(&cp->class_interfaces);
+ INIT_LIST_HEAD(&cp->interfaces);
kset_init(&cp->glue_dirs);
- __mutex_init(&cp->class_mutex, "struct class mutex", key);
+ __mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) {
kfree(cp);
@@ -460,15 +460,15 @@ int class_interface_register(struct class_interface *class_intf)
if (!parent)
return -EINVAL;
- mutex_lock(&parent->p->class_mutex);
- list_add_tail(&class_intf->node, &parent->p->class_interfaces);
+ mutex_lock(&parent->p->mutex);
+ list_add_tail(&class_intf->node, &parent->p->interfaces);
if (class_intf->add_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->add_dev(dev, class_intf);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->class_mutex);
+ mutex_unlock(&parent->p->mutex);
return 0;
}
@@ -482,7 +482,7 @@ void class_interface_unregister(struct class_interface *class_intf)
if (!parent)
return;
- mutex_lock(&parent->p->class_mutex);
+ mutex_lock(&parent->p->mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
@@ -490,7 +490,7 @@ void class_interface_unregister(struct class_interface *class_intf)
class_intf->remove_dev(dev, class_intf);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->class_mutex);
+ mutex_unlock(&parent->p->mutex);
class_put(parent);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d8b3d89db04..4a67cc0c8b3 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -118,6 +118,56 @@ static const struct sysfs_ops dev_sysfs_ops = {
.store = dev_attr_store,
};
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+ssize_t device_store_ulong(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ char *end;
+ unsigned long new = simple_strtoul(buf, &end, 0);
+ if (end == buf)
+ return -EINVAL;
+ *(unsigned long *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_ulong);
+
+ssize_t device_show_ulong(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_ulong);
+
+ssize_t device_store_int(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ char *end;
+ long new = simple_strtol(buf, &end, 0);
+ if (end == buf || new > INT_MAX || new < INT_MIN)
+ return -EINVAL;
+ *(int *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_int);
+
+ssize_t device_show_int(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_int);
/**
* device_release - free device structure.
@@ -198,7 +248,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
- mode_t mode = 0;
+ umode_t mode = 0;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
@@ -464,7 +514,7 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
static struct device_attribute devt_attr =
__ATTR(dev, S_IRUGO, show_dev, NULL);
-/* kset to create /sys/devices/ */
+/* /sys/devices/ */
struct kset *devices_kset;
/**
@@ -711,6 +761,10 @@ static struct kobject *get_device_parent(struct device *dev,
return k;
}
+ /* subsystems can specify a default root directory for their devices */
+ if (!parent && dev->bus && dev->bus->dev_root)
+ return &dev->bus->dev_root->kobj;
+
if (parent)
return &parent->kobj;
return NULL;
@@ -731,14 +785,6 @@ static void cleanup_device_parent(struct device *dev)
cleanup_glue_dir(dev, dev->kobj.parent);
}
-static void setup_parent(struct device *dev, struct device *parent)
-{
- struct kobject *kobj;
- kobj = get_device_parent(dev, parent);
- if (kobj)
- dev->kobj.parent = kobj;
-}
-
static int device_add_class_symlinks(struct device *dev)
{
int error;
@@ -891,6 +937,7 @@ int device_private_init(struct device *dev)
int device_add(struct device *dev)
{
struct device *parent = NULL;
+ struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
@@ -914,6 +961,10 @@ int device_add(struct device *dev)
dev->init_name = NULL;
}
+ /* subsystems can specify simple device enumeration */
+ if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
+ dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+
if (!dev_name(dev)) {
error = -EINVAL;
goto name_error;
@@ -922,7 +973,9 @@ int device_add(struct device *dev)
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
parent = get_device(dev->parent);
- setup_parent(dev, parent);
+ kobj = get_device_parent(dev, parent);
+ if (kobj)
+ dev->kobj.parent = kobj;
/* use parent numa_node */
if (parent)
@@ -982,17 +1035,17 @@ int device_add(struct device *dev)
&parent->p->klist_children);
if (dev->class) {
- mutex_lock(&dev->class->p->class_mutex);
+ mutex_lock(&dev->class->p->mutex);
/* tie the class to the device */
klist_add_tail(&dev->knode_class,
&dev->class->p->klist_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf,
- &dev->class->p->class_interfaces, node)
+ &dev->class->p->interfaces, node)
if (class_intf->add_dev)
class_intf->add_dev(dev, class_intf);
- mutex_unlock(&dev->class->p->class_mutex);
+ mutex_unlock(&dev->class->p->mutex);
}
done:
put_device(dev);
@@ -1107,15 +1160,15 @@ void device_del(struct device *dev)
if (dev->class) {
device_remove_class_symlinks(dev);
- mutex_lock(&dev->class->p->class_mutex);
+ mutex_lock(&dev->class->p->mutex);
/* notify any interfaces that the device is now gone */
list_for_each_entry(class_intf,
- &dev->class->p->class_interfaces, node)
+ &dev->class->p->interfaces, node)
if (class_intf->remove_dev)
class_intf->remove_dev(dev, class_intf);
/* remove the device from the class list */
klist_del(&dev->knode_class);
- mutex_unlock(&dev->class->p->class_mutex);
+ mutex_unlock(&dev->class->p->mutex);
}
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
@@ -1182,7 +1235,7 @@ static struct device *next_device(struct klist_iter *i)
* freed by the caller.
*/
const char *device_get_devnode(struct device *dev,
- mode_t *mode, const char **tmp)
+ umode_t *mode, const char **tmp)
{
char *s;
@@ -1743,8 +1796,10 @@ void device_shutdown(void)
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 251acea3d35..9a5578efbc9 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -1,8 +1,7 @@
/*
- * drivers/base/cpu.c - basic CPU class support
+ * CPU subsystem support
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -14,40 +13,40 @@
#include "base.h"
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
-
-struct sysdev_class cpu_sysdev_class = {
+struct bus_type cpu_subsys = {
.name = "cpu",
- .attrs = cpu_sysdev_class_attrs,
+ .dev_name = "cpu",
};
-EXPORT_SYMBOL(cpu_sysdev_class);
+EXPORT_SYMBOL_GPL(cpu_subsys);
-static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_online(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
+ return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
}
-static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t __ref store_online(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t ret;
cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
- ret = cpu_down(cpu->sysdev.id);
+ ret = cpu_down(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
- ret = cpu_up(cpu->sysdev.id);
+ ret = cpu_up(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
@@ -60,44 +59,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
ret = count;
return ret;
}
-static SYSDEV_ATTR(online, 0644, show_online, store_online);
+static DEVICE_ATTR(online, 0644, show_online, store_online);
static void __cpuinit register_cpu_control(struct cpu *cpu)
{
- sysdev_create_file(&cpu->sysdev, &attr_online);
+ device_create_file(&cpu->dev, &dev_attr_online);
}
void unregister_cpu(struct cpu *cpu)
{
- int logical_cpu = cpu->sysdev.id;
+ int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
- sysdev_remove_file(&cpu->sysdev, &attr_online);
+ device_remove_file(&cpu->dev, &dev_attr_online);
- sysdev_unregister(&cpu->sysdev);
+ device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_probe_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_probe(buf, count);
}
-static ssize_t cpu_release_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_release_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_release(buf, count);
}
-static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -109,15 +108,15 @@ static inline void register_cpu_control(struct cpu *cpu)
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
-static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t rc;
unsigned long long addr;
int cpunum;
- cpunum = cpu->sysdev.id;
+ cpunum = cpu->dev.id;
/*
* Might be reading other cpu's data based on which cpu read thread
@@ -129,7 +128,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
-static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
#endif
/*
@@ -137,12 +136,12 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*/
struct cpu_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
const struct cpumask *const * const map;
};
-static ssize_t show_cpus_attr(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_cpus_attr(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
@@ -153,10 +152,10 @@ static ssize_t show_cpus_attr(struct sysdev_class *class,
return n;
}
-#define _CPU_ATTR(name, map) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
+#define _CPU_ATTR(name, map) \
+ { __ATTR(name, 0444, show_cpus_attr, NULL), map }
-/* Keep in sync with cpu_sysdev_class_attrs */
+/* Keep in sync with cpu_subsys_attrs */
static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
@@ -166,19 +165,19 @@ static struct cpu_attr cpu_attrs[] = {
/*
* Print values for NR_CPUS and offlined cpus
*/
-static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_kernel_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
return n;
}
-static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;
-static ssize_t print_cpus_offline(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_offline(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t offline;
@@ -205,7 +204,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
n += snprintf(&buf[n], len - n, "\n");
return n;
}
-static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
/*
* register_cpu - Setup a sysfs device for a CPU.
@@ -218,57 +217,73 @@ static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
- cpu->node_id = cpu_to_node(num);
- cpu->sysdev.id = num;
- cpu->sysdev.cls = &cpu_sysdev_class;
-
- error = sysdev_register(&cpu->sysdev);
+ cpu->node_id = cpu_to_node(num);
+ cpu->dev.id = num;
+ cpu->dev.bus = &cpu_subsys;
+ error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
if (!error)
- per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
#ifdef CONFIG_KEXEC
if (!error)
- error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
+ error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
#endif
return error;
}
-struct sys_device *get_cpu_sysdev(unsigned cpu)
+struct device *get_cpu_device(unsigned cpu)
{
if (cpu < nr_cpu_ids && cpu_possible(cpu))
return per_cpu(cpu_sys_devices, cpu);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(get_cpu_sysdev);
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ &dev_attr_probe.attr,
+ &dev_attr_release.attr,
+#endif
+ &cpu_attrs[0].attr.attr,
+ &cpu_attrs[1].attr.attr,
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+ NULL
+};
+
+static struct attribute_group cpu_root_attr_group = {
+ .attrs = cpu_root_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &cpu_root_attr_group,
+ NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
int __init cpu_dev_init(void)
{
int err;
- err = sysdev_class_register(&cpu_sysdev_class);
+ err = subsys_system_register(&cpu_subsys, cpu_root_attr_groups);
+ if (err)
+ return err;
+
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (!err)
- err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
+ err = sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
#endif
-
return err;
}
-
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
- &attr_probe,
- &attr_release,
-#endif
- &cpu_attrs[0].attr,
- &cpu_attrs[1].attr,
- &cpu_attrs[2].attr,
- &attr_kernel_max,
- &attr_offline,
- NULL
-};
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 65cd7483245..524bf96c289 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(devm_kzalloc);
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with dev_kzalloc().
+ * Free memory allocated with devm_kzalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index a4760e095ff..8493536ea55 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -40,7 +40,7 @@ static struct req {
struct completion done;
int err;
const char *name;
- mode_t mode; /* 0 => delete */
+ umode_t mode; /* 0 => delete */
struct device *dev;
} *requests;
@@ -142,7 +142,7 @@ int devtmpfs_delete_node(struct device *dev)
return req.err;
}
-static int dev_mkdir(const char *name, mode_t mode)
+static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
@@ -189,7 +189,7 @@ static int create_path(const char *nodepath)
return err;
}
-static int handle_create(const char *nodename, mode_t mode, struct device *dev)
+static int handle_create(const char *nodename, umode_t mode, struct device *dev)
{
struct dentry *dentry;
struct path path;
@@ -378,7 +378,7 @@ int devtmpfs_mount(const char *mntdir)
static DECLARE_COMPLETION(setup_done);
-static int handle(const char *name, mode_t mode, struct device *dev)
+static int handle(const char *name, umode_t mode, struct device *dev)
{
if (mode)
return handle_create(name, mode, dev);
@@ -413,10 +413,9 @@ static int devtmpfsd(void *p)
}
spin_lock(&req_lock);
}
- set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&req_lock);
schedule();
- __set_current_state(TASK_RUNNING);
}
return 0;
out:
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
new file mode 100644
index 00000000000..e38ad243b4b
--- /dev/null
+++ b/drivers/base/dma-buf.c
@@ -0,0 +1,291 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+
+static inline int is_dma_buf_file(struct file *);
+
+static int dma_buf_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ dmabuf->ops->release(dmabuf);
+ kfree(dmabuf);
+ return 0;
+}
+
+static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_release,
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+ return file->f_op == &dma_buf_fops;
+}
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ *
+ * @priv: [in] Attach private data of allocator to this buffer
+ * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
+ * @size: [in] Size of the buffer
+ * @flags: [in] mode flags for the file.
+ *
+ * Returns, on success, a newly created dma_buf object, which wraps the
+ * supplied private data and operations for dma_buf_ops. On either missing
+ * ops, or error in allocating struct dma_buf, will return negative error.
+ *
+ */
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+ size_t size, int flags)
+{
+ struct dma_buf *dmabuf;
+ struct file *file;
+
+ if (WARN_ON(!priv || !ops
+ || !ops->map_dma_buf
+ || !ops->unmap_dma_buf
+ || !ops->release)) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+ if (dmabuf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dmabuf->priv = priv;
+ dmabuf->ops = ops;
+ dmabuf->size = size;
+
+ file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+
+ dmabuf->file = file;
+
+ mutex_init(&dmabuf->lock);
+ INIT_LIST_HEAD(&dmabuf->attachments);
+
+ return dmabuf;
+}
+EXPORT_SYMBOL_GPL(dma_buf_export);
+
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * @dmabuf: [in] pointer to dma_buf for which fd is required.
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf)
+{
+ int error, fd;
+
+ if (!dmabuf || !dmabuf->file)
+ return -EINVAL;
+
+ error = get_unused_fd();
+ if (error < 0)
+ return error;
+ fd = error;
+
+ fd_install(fd, dmabuf->file);
+
+ return fd;
+}
+EXPORT_SYMBOL_GPL(dma_buf_fd);
+
+/**
+ * dma_buf_get - returns the dma_buf structure related to an fd
+ * @fd: [in] fd associated with the dma_buf to be returned
+ *
+ * On success, returns the dma_buf structure associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (!is_dma_buf_file(file)) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file->private_data;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf: [in] buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput()
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf || !dmabuf->file))
+ return;
+
+ fput(dmabuf->file);
+}
+EXPORT_SYMBOL_GPL(dma_buf_put);
+
+/**
+ * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Returns struct dma_buf_attachment * for this attachment; may return negative
+ * error codes.
+ *
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ struct dma_buf_attachment *attach;
+ int ret;
+
+ if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
+ if (attach == NULL)
+ goto err_alloc;
+
+ mutex_lock(&dmabuf->lock);
+
+ attach->dev = dev;
+ attach->dmabuf = dmabuf;
+ if (dmabuf->ops->attach) {
+ ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ if (ret)
+ goto err_attach;
+ }
+ list_add(&attach->node, &dmabuf->attachments);
+
+ mutex_unlock(&dmabuf->lock);
+ return attach;
+
+err_alloc:
+ return ERR_PTR(-ENOMEM);
+err_attach:
+ kfree(attach);
+ mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_attach);
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
+ * optionally calls detach() of dma_buf_ops for device-specific detach
+ * @dmabuf: [in] buffer to detach from.
+ * @attach: [in] attachment to be detached; is free'd after this call.
+ *
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+ if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+ return;
+
+ mutex_lock(&dmabuf->lock);
+ list_del(&attach->node);
+ if (dmabuf->ops->detach)
+ dmabuf->ops->detach(dmabuf, attach);
+
+ mutex_unlock(&dmabuf->lock);
+ kfree(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_detach);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; may return NULL
+ * or ERR_PTR.
+ *
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table = ERR_PTR(-EINVAL);
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->map_dma_buf)
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ mutex_unlock(&attach->dmabuf->lock);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ *
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table)
+{
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table
+ || !attach->dmabuf->ops))
+ return;
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->unmap_dma_buf)
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
+ mutex_unlock(&attach->dmabuf->lock);
+
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 06ed6b4e7df..26ab358dac6 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
int loading = simple_strtol(buf, NULL, 10);
int i;
+ mutex_lock(&fw_lock);
+
+ if (!fw_priv->fw)
+ goto out;
+
switch (loading) {
case 1:
- mutex_lock(&fw_lock);
- if (!fw_priv->fw) {
- mutex_unlock(&fw_lock);
- break;
- }
firmware_free_data(fw_priv->fw);
memset(fw_priv->fw, 0, sizeof(struct firmware));
/* If the pages are not owned by 'struct firmware' */
@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
- mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_load_abort(fw_priv);
break;
}
-
+out:
+ mutex_unlock(&fw_lock);
return count;
}
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
return 0;
}
+ read_lock_usermodehelper();
+
if (WARN_ON(usermodehelper_is_disabled())) {
dev_err(device, "firmware: %s will not be loaded\n", name);
retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_destroy_instance(fw_priv);
out:
+ read_unlock_usermodehelper();
+
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c8a934e7942..c16f0b808a1 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -31,7 +31,6 @@ void __init driver_init(void)
* core core pieces.
*/
platform_bus_init();
- system_bus_init();
cpu_dev_init();
memory_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8272d92d22c..f17e3ea041c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/memory.c - basic Memory class support
+ * Memory subsystem support
*
* Written by Matt Tolentino <matthew.e.tolentino@intel.com>
* Dave Hansen <haveblue@us.ibm.com>
@@ -10,7 +10,6 @@
* SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/topology.h>
@@ -38,26 +37,9 @@ static inline int base_memory_block_id(int section_nr)
return section_nr / sections_per_block;
}
-static struct sysdev_class memory_sysdev_class = {
+static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
-};
-
-static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-{
- return MEMORY_CLASS_NAME;
-}
-
-static int memory_uevent(struct kset *kset, struct kobject *obj,
- struct kobj_uevent_env *env)
-{
- int retval = 0;
-
- return retval;
-}
-
-static const struct kset_uevent_ops memory_uevent_ops = {
- .name = memory_uevent_name,
- .uevent = memory_uevent,
+ .dev_name = MEMORY_CLASS_NAME,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -96,21 +78,21 @@ int register_memory(struct memory_block *memory)
{
int error;
- memory->sysdev.cls = &memory_sysdev_class;
- memory->sysdev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
- error = sysdev_register(&memory->sysdev);
+ error = device_register(&memory->dev);
return error;
}
static void
unregister_memory(struct memory_block *memory)
{
- BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
+ BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->sysdev.kobj);
- sysdev_unregister(&memory->sysdev);
+ kobject_put(&memory->dev.kobj);
+ device_unregister(&memory->dev);
}
unsigned long __weak memory_block_size_bytes(void)
@@ -138,22 +120,22 @@ static unsigned long get_memory_block_size(void)
* uses.
*/
-static ssize_t show_mem_start_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_start_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sprintf(buf, "%08lx\n", phys_index);
}
-static ssize_t show_mem_end_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_end_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->end_section_nr / sections_per_block;
@@ -163,13 +145,13 @@ static ssize_t show_mem_end_phys_index(struct sys_device *dev,
/*
* Show whether the section of memory is likely to be hot-removable
*/
-static ssize_t show_mem_removable(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_removable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long i, pfn;
int ret = 1;
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) {
pfn = section_nr_to_pfn(mem->start_section_nr + i);
@@ -182,11 +164,11 @@ static ssize_t show_mem_removable(struct sys_device *dev,
/*
* online, offline, going offline, etc.
*/
-static ssize_t show_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
ssize_t len = 0;
/*
@@ -324,13 +306,13 @@ out:
}
static ssize_t
-store_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf, size_t count)
+store_mem_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
struct memory_block *mem;
int ret = -EINVAL;
- mem = container_of(dev, struct memory_block, sysdev);
+ mem = container_of(dev, struct memory_block, dev);
if (!strncmp(buf, "online", min((int)count, 6)))
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -351,41 +333,41 @@ store_mem_state(struct sys_device *dev,
* s.t. if I offline all of these sections I can then
* remove the physical device?
*/
-static ssize_t show_phys_device(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_phys_device(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
-static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
-static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
-static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
#define mem_create_simple_file(mem, attr_name) \
- sysdev_create_file(&mem->sysdev, &attr_##attr_name)
+ device_create_file(&mem->dev, &dev_attr_##attr_name)
#define mem_remove_simple_file(mem, attr_name) \
- sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
+ device_remove_file(&mem->dev, &dev_attr_##attr_name)
/*
* Block size attribute stuff
*/
static ssize_t
-print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lx\n", get_memory_block_size());
}
-static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static int block_size_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &attr_block_size_bytes.attr);
+ return device_create_file(memory_subsys.dev_root,
+ &dev_attr_block_size_bytes);
}
/*
@@ -396,7 +378,7 @@ static int block_size_init(void)
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
-memory_probe_store(struct class *class, struct class_attribute *attr,
+memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
@@ -423,12 +405,11 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
out:
return ret;
}
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
static int memory_probe_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_probe.attr);
+ return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
}
#else
static inline int memory_probe_init(void)
@@ -444,8 +425,8 @@ static inline int memory_probe_init(void)
/* Soft offline a page */
static ssize_t
-store_soft_offline_page(struct class *class,
- struct class_attribute *attr,
+store_soft_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -463,8 +444,8 @@ store_soft_offline_page(struct class *class,
/* Forcibly offline a page, including killing processes. */
static ssize_t
-store_hard_offline_page(struct class *class,
- struct class_attribute *attr,
+store_hard_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -478,18 +459,18 @@ store_hard_offline_page(struct class *class,
return ret ? ret : count;
}
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
int err;
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_soft_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_soft_offline_page);
if (!err)
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_hard_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_hard_offline_page);
return err;
}
#else
@@ -509,31 +490,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
struct memory_block *find_memory_block_hinted(struct mem_section *section,
struct memory_block *hint)
{
- struct kobject *kobj;
- struct sys_device *sysdev;
- struct memory_block *mem;
- char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
int block_id = base_memory_block_id(__section_nr(section));
+ struct device *hintdev = hint ? &hint->dev : NULL;
+ struct device *dev;
- kobj = hint ? &hint->sysdev.kobj : NULL;
-
- /*
- * This only works because we know that section == sysdev->id
- * slightly redundant with sysdev_register()
- */
- sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
-
- kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
- if (!kobj)
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+ if (hint)
+ put_device(&hint->dev);
+ if (!dev)
return NULL;
-
- sysdev = container_of(kobj, struct sys_device, kobj);
- mem = container_of(sysdev, struct memory_block, sysdev);
-
- return mem;
+ return container_of(dev, struct memory_block, dev);
}
/*
@@ -542,7 +515,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
* this gets to be a real problem, we can always use a radix
* tree or something here.
*
- * This could be made generic for all sysdev classes.
+ * This could be made generic for all device subsystems.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
@@ -598,7 +571,7 @@ static int add_memory_section(int nid, struct mem_section *section,
mem = find_memory_block(section);
if (mem) {
mem->section_count++;
- kobject_put(&mem->sysdev.kobj);
+ kobject_put(&mem->dev.kobj);
} else
ret = init_memory_block(&mem, section, state);
@@ -631,7 +604,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
unregister_memory(mem);
kfree(mem);
} else
- kobject_put(&mem->sysdev.kobj);
+ kobject_put(&mem->dev.kobj);
mutex_unlock(&mem_sysfs_mutex);
return 0;
@@ -664,8 +637,7 @@ int __init memory_dev_init(void)
int err;
unsigned long block_sz;
- memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
- ret = sysdev_class_register(&memory_sysdev_class);
+ ret = subsys_system_register(&memory_subsys, NULL);
if (ret)
goto out;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5693ecee9a4..44f427a6611 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -1,8 +1,7 @@
/*
- * drivers/base/node.c - basic Node class support
+ * Basic Node interface support
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -19,18 +18,16 @@
#include <linux/swap.h>
#include <linux/slab.h>
-static struct sysdev_class_attribute *node_state_attrs[];
-
-static struct sysdev_class node_class = {
+static struct bus_type node_subsys = {
.name = "node",
- .attrs = node_state_attrs,
+ .dev_name = "node",
};
-static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
{
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
+ const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
@@ -44,23 +41,23 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
return len;
}
-static inline ssize_t node_read_cpumask(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 0, buf);
}
-static inline ssize_t node_read_cpulist(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpulist(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 1, buf);
}
-static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
-static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
#define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_meminfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n;
int nid = dev->id;
@@ -157,10 +154,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
}
#undef K
-static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
-static ssize_t node_read_numastat(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_numastat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf,
"numa_hit %lu\n"
@@ -176,10 +173,10 @@ static ssize_t node_read_numastat(struct sys_device * dev,
node_page_state(dev->id, NUMA_LOCAL),
node_page_state(dev->id, NUMA_OTHER));
}
-static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
-static ssize_t node_read_vmstat(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t node_read_vmstat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nid = dev->id;
int i;
@@ -191,10 +188,10 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
return n;
}
-static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
-static ssize_t node_read_distance(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_distance(struct device *dev,
+ struct device_attribute *attr, char * buf)
{
int nid = dev->id;
int len = 0;
@@ -212,7 +209,7 @@ static ssize_t node_read_distance(struct sys_device * dev,
len += sprintf(buf + len, "\n");
return len;
}
-static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
#ifdef CONFIG_HUGETLBFS
/*
@@ -230,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_HIGH_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -266,17 +263,17 @@ int register_node(struct node *node, int num, struct node *parent)
{
int error;
- node->sysdev.id = num;
- node->sysdev.cls = &node_class;
- error = sysdev_register(&node->sysdev);
+ node->dev.id = num;
+ node->dev.bus = &node_subsys;
+ error = device_register(&node->dev);
if (!error){
- sysdev_create_file(&node->sysdev, &attr_cpumap);
- sysdev_create_file(&node->sysdev, &attr_cpulist);
- sysdev_create_file(&node->sysdev, &attr_meminfo);
- sysdev_create_file(&node->sysdev, &attr_numastat);
- sysdev_create_file(&node->sysdev, &attr_distance);
- sysdev_create_file(&node->sysdev, &attr_vmstat);
+ device_create_file(&node->dev, &dev_attr_cpumap);
+ device_create_file(&node->dev, &dev_attr_cpulist);
+ device_create_file(&node->dev, &dev_attr_meminfo);
+ device_create_file(&node->dev, &dev_attr_numastat);
+ device_create_file(&node->dev, &dev_attr_distance);
+ device_create_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_register_node(node);
@@ -296,17 +293,17 @@ int register_node(struct node *node, int num, struct node *parent)
*/
void unregister_node(struct node *node)
{
- sysdev_remove_file(&node->sysdev, &attr_cpumap);
- sysdev_remove_file(&node->sysdev, &attr_cpulist);
- sysdev_remove_file(&node->sysdev, &attr_meminfo);
- sysdev_remove_file(&node->sysdev, &attr_numastat);
- sysdev_remove_file(&node->sysdev, &attr_distance);
- sysdev_remove_file(&node->sysdev, &attr_vmstat);
+ device_remove_file(&node->dev, &dev_attr_cpumap);
+ device_remove_file(&node->dev, &dev_attr_cpulist);
+ device_remove_file(&node->dev, &dev_attr_meminfo);
+ device_remove_file(&node->dev, &dev_attr_numastat);
+ device_remove_file(&node->dev, &dev_attr_distance);
+ device_remove_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
- sysdev_unregister(&node->sysdev);
+ device_unregister(&node->dev);
}
struct node node_devices[MAX_NUMNODES];
@@ -317,41 +314,41 @@ struct node node_devices[MAX_NUMNODES];
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ ret = sysfs_create_link(&node_devices[nid].dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ kobject_name(&node_devices[nid].dev.kobj));
return 0;
}
@@ -393,15 +390,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
- &mem_blk->sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
+ ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
- return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -434,10 +431,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
- sysfs_remove_link(&mem_blk->sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ sysfs_remove_link(&mem_blk->dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
@@ -468,7 +465,7 @@ static int link_mem_sections(int nid)
}
if (mem_blk)
- kobject_put(&mem_blk->sysdev.kobj);
+ kobject_put(&mem_blk->dev.kobj);
return err;
}
@@ -596,19 +593,19 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
}
struct node_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
enum node_states state;
};
-static ssize_t show_node_state(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_node_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
return print_nodes_state(na->state, buf);
}
#define _NODE_ATTR(name, state) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+ { __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
_NODE_ATTR(possible, N_POSSIBLE),
@@ -620,17 +617,26 @@ static struct node_attr node_state_attr[] = {
#endif
};
-static struct sysdev_class_attribute *node_state_attrs[] = {
- &node_state_attr[0].attr,
- &node_state_attr[1].attr,
- &node_state_attr[2].attr,
- &node_state_attr[3].attr,
+static struct attribute *node_state_attrs[] = {
+ &node_state_attr[0].attr.attr,
+ &node_state_attr[1].attr.attr,
+ &node_state_attr[2].attr.attr,
+ &node_state_attr[3].attr.attr,
#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr,
+ &node_state_attr[4].attr.attr,
#endif
NULL
};
+static struct attribute_group memory_root_attr_group = {
+ .attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
@@ -639,7 +645,7 @@ static int __init register_node_type(void)
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
- ret = sysdev_class_register(&node_class);
+ ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
if (!ret) {
hotplug_memory_notifier(node_memory_callback,
NODE_CALLBACK_PRI);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7a24895543e..f0c605e99ad 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -383,7 +383,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device *platform_device_register_full(
- struct platform_device_info *pdevinfo)
+ const struct platform_device_info *pdevinfo)
{
int ret = -ENOMEM;
struct platform_device *pdev;
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
return ret;
}
-int platform_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-void platform_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
return ret;
}
-int platform_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
return ret;
}
-int platform_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
return ret;
}
-int platform_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
return ret;
}
-int platform_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
return ret;
}
-int platform_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
return ret;
}
-int platform_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd1790..2e58ebb1f6c 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5..92e6a904806 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/suspend.h>
+#include <linux/export.h>
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } else { \
+ __routine = dev_gpd_data(dev)->ops.callback; \
+ if (__routine) \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
+({ \
+ ktime_t __start = ktime_get(); \
+ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
+ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
+ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
+ if (__elapsed > __gpd_data->td.field) { \
+ __gpd_data->td.field = __elapsed; \
+ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ __elapsed); \
+ } \
+ __retval; \
+})
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
+static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
+ stop_latency_ns, "stop");
+}
+
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
+ start_latency_ns, "start");
+}
+
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
+ ktime_t time_start = ktime_get();
+ s64 elapsed_ns;
+
ret = genpd->power_on(genpd);
if (ret)
goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_on_latency_ns) {
+ genpd->power_on_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-on latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
int ret = 0;
if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_suspend) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- ret = drv->pm->runtime_suspend(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ ret = genpd_save_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
if (!gpd_data->need_restore)
return;
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_resume) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- drv->pm->runtime_resume(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
if (genpd->power_off) {
+ ktime_t time_start;
+ s64 elapsed_ns;
+
if (atomic_read(&genpd->sd_count) > 0) {
ret = -EBUSY;
goto out;
}
+ time_start = ktime_get();
+
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_set_active(genpd);
goto out;
}
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_off_latency_ns) {
+ genpd->power_off_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-off latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd->power_off_time = ktime_get();
+
+ /* Update PM QoS information for devices in the domain. */
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
+
+ pm_runtime_update_max_time_suspended(pdd->dev,
+ td->start_latency_ns +
+ td->restore_state_latency_ns +
+ genpd->power_on_latency_ns);
+ }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ bool (*stop_ok)(struct device *__dev);
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (genpd->stop_device) {
- int ret = genpd->stop_device(dev);
- if (ret)
- return ret;
- }
+ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+ if (stop_ok && !stop_ok(dev))
+ return -EBUSY;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_update_max_time_suspended(dev,
+ dev_gpd_data(dev)->td.start_latency_ns);
/*
* If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
mutex_unlock(&genpd->lock);
out:
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
return 0;
}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+}
+
+static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
+}
+
+static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
+}
+
+static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
+}
+
+static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
+}
+
+static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
+}
+
+static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
+}
+
+static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
+}
+
+static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
+}
+
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ active_wakeup = genpd_dev_active_wakeup(genpd, dev);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
* so pm_genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+ return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
}
/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_suspend_noirq(dev);
+ ret = genpd_suspend_late(genpd, dev);
if (ret)
return ret;
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_resume_noirq(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+ return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
}
/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+ return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
}
/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_freeze_noirq(dev);
+ ret = genpd_freeze_late(genpd, dev);
if (ret)
return ret;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
return 0;
}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_thaw_noirq(dev);
+ return genpd_thaw_early(genpd, dev);
}
/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Power off a device under the assumption that its pm_domain field points to
- * the domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late powering off of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off)
- return 0;
-
- ret = pm_generic_poweroff_noirq(dev);
- if (ret)
- return ret;
-
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
- return 0;
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
-
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
}
/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
-
- return pm_generic_restore_noirq(dev);
-}
-
-/**
- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
- * @dev: Device to resume.
- *
- * Restore a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_restore(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
+ genpd_start_dev(genpd, dev);
- return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
-#define pm_genpd_dev_poweroff_noirq NULL
-#define pm_genpd_dev_poweroff NULL
#define pm_genpd_restore_noirq NULL
-#define pm_genpd_restore NULL
#define pm_genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
/**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
* @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
*/
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct gpd_timing_data *td)
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
gpd_data->base.dev = dev;
gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ if (td)
+ gpd_data->td = *td;
out:
genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+ * @dev: Device to add the callbacks to.
+ * @ops: Set of callbacks to add.
+ * @td: Timing data to add to the device along with the callbacks (optional).
+ */
+int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data && ops))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+
+/**
+ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+ * @dev: Device to remove the callbacks from.
+ * @clear_td: If set, clear the device's timing data too.
+ */
+int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (clear_td)
+ gpd_data->td = (struct gpd_timing_data){ 0 };
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+
+/* Default device callbacks for generic PM domains. */
+
+/**
+ * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_save_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.save_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend)
+ return drv->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_restore_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.restore_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_resume)
+ return drv->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+
+ return cb ? cb(dev) : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+
+ return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+
+ return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume - Default "device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+
+ return cb ? cb(dev) : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
+
+ return cb ? cb(dev) : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+
+ return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+
+ return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
+
+ return cb ? cb(dev) : pm_generic_thaw(dev);
+}
+
+/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
+ genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
- genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->dev_ops.save_state = pm_genpd_default_save_state;
+ genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+ genpd->dev_ops.suspend = pm_genpd_default_suspend;
+ genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
+ genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
+ genpd->dev_ops.resume = pm_genpd_default_resume;
+ genpd->dev_ops.freeze = pm_genpd_default_freeze;
+ genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+ genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+ genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 00000000000..51527ee92d1
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+/**
+ * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * @dev: Device to check.
+ */
+bool default_stop_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
+ return true;
+
+ return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
+ && td->break_even_ns < dev->power.max_time_suspended_ns;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_dev_off_time_ns;
+ s64 off_on_time_ns;
+ ktime_t time_now = ktime_get();
+
+ off_on_time_ns = genpd->power_off_latency_ns +
+ genpd->power_on_latency_ns;
+ /*
+ * It doesn't make sense to remove power from the domain if saving
+ * the state of all devices in it and the power off/power on operations
+ * take too much time.
+ *
+ * All devices in this domain have been stopped already at this point.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ if (pdd->dev->driver)
+ off_on_time_ns +=
+ to_gpd_data(pdd)->td.save_state_latency_ns;
+ }
+
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ struct generic_pm_domain *sd = link->slave;
+ s64 sd_max_off_ns = sd->max_off_time_ns;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
+ sd->power_off_time));
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ min_dev_off_time_ns = -1;
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ struct device *dev = pdd->dev;
+ s64 dev_off_time_ns;
+
+ if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ continue;
+
+ td = &to_gpd_data(pdd)->td;
+ dev_off_time_ns = dev->power.max_time_suspended_ns -
+ (td->start_latency_ns + td->restore_state_latency_ns +
+ ktime_to_ns(ktime_sub(time_now,
+ dev->power.suspend_time)));
+ if (dev_off_time_ns <= off_on_time_ns)
+ return false;
+
+ if (min_dev_off_time_ns > dev_off_time_ns
+ || min_dev_off_time_ns < 0)
+ min_dev_off_time_ns = dev_off_time_ns;
+ }
+
+ if (min_dev_off_time_ns < 0) {
+ /*
+ * There are no latency constraints, so the domain can spend
+ * arbitrary time in the "off" state.
+ */
+ genpd->max_off_time_ns = -1;
+ return true;
+ }
+
+ /*
+ * The difference between the computed minimum delta and the time needed
+ * to turn the domain on is the maximum theoretical time this domain can
+ * spend in the "off" state.
+ */
+ min_dev_off_time_ns -= genpd->power_on_latency_ns;
+
+ /*
+ * If the difference between the computed minimum delta and the time
+ * needed to turn the domain off and back on on is smaller than the
+ * domain's power break even time, removing power from the domain is not
+ * worth it.
+ */
+ if (genpd->break_even_ns >
+ min_dev_off_time_ns - genpd->power_off_latency_ns)
+ return false;
+
+ genpd->max_off_time_ns = min_dev_off_time_ns;
+ return true;
+}
+
+struct dev_power_governor simple_qos_governor = {
+ .stop_ok = default_stop_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+ return false;
+}
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .power_down_ok = always_on_power_down_ok,
+ .stop_ok = default_stop_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49..10bdd793f0b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
- * If the device has not been suspended at run time, execute the
- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
- * return its error code. Otherwise, return zero.
+ * Execute the PM callback corresponding to @event provided by the driver of
+ * @dev, if defined, and return its error code. Return 0 if the callback is
+ * not present.
*/
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
- if (!pm || pm_runtime_suspended(dev))
+ if (!pm)
return 0;
switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
case PM_EVENT_HIBERNATE:
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
+ case PM_EVENT_RESUME:
+ callback = noirq ? pm->resume_noirq : pm->resume;
+ break;
case PM_EVENT_THAW:
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
+ case PM_EVENT_RESTORE:
+ callback = noirq ? pm->restore_noirq : pm->restore;
+ break;
default:
callback = NULL;
break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw);
/**
- * __pm_generic_resume - Generic resume/restore callback for subsystems.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the resume/resotre callback provided by the @dev's driver, if
- * defined. If it returns 0, change the device's runtime PM status to 'active'.
- * Return the callback's error code.
- */
-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
- int ret;
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
-
- if (!callback)
- return 0;
-
- ret = callback(dev);
- if (!ret && !noirq && pm_runtime_enabled(dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
-}
-
-/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, true);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
pm_runtime_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
-
-struct dev_pm_ops generic_subsys_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = pm_generic_prepare,
- .suspend = pm_generic_suspend,
- .suspend_noirq = pm_generic_suspend_noirq,
- .resume = pm_generic_resume,
- .resume_noirq = pm_generic_resume_noirq,
- .freeze = pm_generic_freeze,
- .freeze_noirq = pm_generic_freeze_noirq,
- .thaw = pm_generic_thaw,
- .thaw_noirq = pm_generic_thaw_noirq,
- .poweroff = pm_generic_poweroff,
- .poweroff_noirq = pm_generic_poweroff_noirq,
- .restore = pm_generic_restore,
- .restore_noirq = pm_generic_restore_noirq,
- .complete = pm_generic_complete,
-#endif
-#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
-#endif
-};
-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438..e2cc3d2e0ec 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
#include "../base.h"
#include "power.h"
+typedef int (*pm_callback_t)(struct device *);
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
ktime_t calltime = ktime_set(0, 0);
if (initcall_debug) {
- pr_info("calling %s+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
}
/**
- * pm_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend) {
- error = ops->suspend(dev);
- suspend_report_result(ops->suspend, error);
- }
- break;
+ return ops->suspend;
case PM_EVENT_RESUME:
- if (ops->resume) {
- error = ops->resume(dev);
- suspend_report_result(ops->resume, error);
- }
- break;
+ return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze) {
- error = ops->freeze(dev);
- suspend_report_result(ops->freeze, error);
- }
- break;
+ return ops->freeze;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff) {
- error = ops->poweroff(dev);
- suspend_report_result(ops->poweroff, error);
- }
- break;
+ return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw) {
- error = ops->thaw(dev);
- suspend_report_result(ops->thaw, error);
- }
+ return ops->thaw;
break;
case PM_EVENT_RESTORE:
- if (ops->restore) {
- error = ops->restore(dev);
- suspend_report_result(ops->restore, error);
- }
- break;
+ return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
}
- initcall_debug_report(dev, calltime, error);
-
- return error;
+ return NULL;
}
/**
- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int pm_noirq_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime = ktime_set(0, 0), delta, rettime;
-
- if (initcall_debug) {
- pr_info("calling %s+ @ %i, parent: %s\n",
- dev_name(dev), task_pid_nr(current),
- dev->parent ? dev_name(dev->parent) : "none");
- calltime = ktime_get();
- }
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend_noirq) {
- error = ops->suspend_noirq(dev);
- suspend_report_result(ops->suspend_noirq, error);
- }
- break;
+ return ops->suspend_noirq;
case PM_EVENT_RESUME:
- if (ops->resume_noirq) {
- error = ops->resume_noirq(dev);
- suspend_report_result(ops->resume_noirq, error);
- }
- break;
+ return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze_noirq) {
- error = ops->freeze_noirq(dev);
- suspend_report_result(ops->freeze_noirq, error);
- }
- break;
+ return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff_noirq) {
- error = ops->poweroff_noirq(dev);
- suspend_report_result(ops->poweroff_noirq, error);
- }
- break;
+ return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw_noirq) {
- error = ops->thaw_noirq(dev);
- suspend_report_result(ops->thaw_noirq, error);
- }
- break;
+ return ops->thaw_noirq;
case PM_EVENT_RESTORE:
- if (ops->restore_noirq) {
- error = ops->restore_noirq(dev);
- suspend_report_result(ops->restore_noirq, error);
- }
- break;
+ return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
- }
-
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- printk("initcall %s_i+ returned %d after %Ld usecs\n",
- dev_name(dev), error,
- (unsigned long long)ktime_to_ns(delta) >> 10);
}
- return error;
+ return NULL;
}
static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev);
+
+ pm_dev_dbg(dev, state, info);
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+ info = "EARLY power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "EARLY type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
+ info = "EARLY type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "EARLY class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
+ info = "EARLY class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
+ info = "EARLY bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "EARLY driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
TRACE_RESUME(error);
return error;
}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * @dev: Device to resume.
- * @cb: Resume callback to execute.
- */
-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
-{
- int error;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
- error = cb(dev);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
bool put = false;
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
put = true;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
} else if (dev->class->resume) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_resume(dev, dev->class->resume);
+ info = "legacy class ";
+ callback = dev->class->resume;
goto End;
}
}
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
}
}
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
End:
+ error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
+ void (*callback)(struct device *) = NULL;
+ char *info = NULL;
+
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pm_domain->ops.complete)
- dev->pm_domain->ops.complete(dev);
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "completing type ");
- if (dev->type->pm->complete)
- dev->type->pm->complete(dev);
+ info = "completing type ";
+ callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "completing class ");
- if (dev->class->pm->complete)
- dev->class->pm->complete(dev);
+ info = "completing class ";
+ callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "completing ");
- if (dev->bus->pm->complete)
- dev->bus->pm->complete(dev);
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
}
device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error;
+ pm_callback_t callback = NULL;
+ char *info = NULL;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
- if (error)
- return error;
+ info = "LATE power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "LATE type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- return error;
+ info = "LATE type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- return error;
+ info = "LATE class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "LATE ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- return error;
+ info = "LATE bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
- return 0;
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "LATE driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
}
/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
- pm_dev_dbg(dev, state, "legacy ");
+ pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend);
+ goto End;
}
}
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
End:
if (!error) {
dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
+ int (*callback)(struct device *) = NULL;
+ char *info = NULL;
int error = 0;
device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = device_may_wakeup(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pm_domain->ops.prepare)
- error = dev->pm_domain->ops.prepare(dev);
- suspend_report_result(dev->pm_domain->ops.prepare, error);
- if (error)
- goto End;
+ info = "preparing power domain ";
+ callback = dev->pm_domain->ops.prepare;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "preparing type ");
- if (dev->type->pm->prepare)
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing type ";
+ callback = dev->type->pm->prepare;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "preparing class ");
- if (dev->class->pm->prepare)
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing class ";
+ callback = dev->class->pm->prepare;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "preparing ");
- if (dev->bus->pm->prepare)
- error = dev->bus->pm->prepare(dev);
- suspend_report_result(dev->bus->pm->prepare, error);
+ info = "preparing bus ";
+ callback = dev->bus->pm->prepare;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "preparing driver ";
+ callback = dev->driver->pm->prepare;
+ }
+
+ if (callback) {
+ error = callback(dev);
+ suspend_report_result(callback, error);
}
- End:
device_unlock(dev);
return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c50fc4..c5d35883746 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c = dev->power.constraints;
+
+ return c ? pm_qos_read_value(c) : 0;
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
*/
s32 dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c;
unsigned long flags;
- s32 ret = 0;
+ s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
-
- c = dev->power.constraints;
- if (c)
- ret = pm_qos_read_value(c);
-
+ ret = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int error = -ENODEV;
+
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ if (ancestor)
+ error = dev_pm_qos_add_request(ancestor, req, value);
+
+ if (error)
+ req->dev = NULL;
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8..541f821d4ea 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_idle;
+
if (callback)
__rpm_callback(callback, dev);
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
+struct rpm_qos_data {
+ ktime_t time_now;
+ s64 constraint_ns;
+};
+
+/**
+ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
+ * @dev: Device whose timing data to use.
+ * @data: PM QoS constraint data to update.
+ *
+ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
+ * to by @data.
+ */
+static int rpm_update_qos_constraint(struct device *dev, void *data)
+{
+ struct rpm_qos_data *qos = data;
+ unsigned long flags;
+ s64 delta_ns;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.max_time_suspended_ns < 0)
+ goto out;
+
+ delta_ns = dev->power.max_time_suspended_ns -
+ ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
+ if (delta_ns <= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
+ qos->constraint_ns = delta_ns;
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
+ struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ qos.constraint_ns = __dev_pm_qos_read_value(dev);
+ if (qos.constraint_ns < 0) {
+ /* Negative constraint means "never suspend". */
+ retval = -EPERM;
+ goto out;
+ }
+ qos.constraint_ns *= NSEC_PER_USEC;
+ qos.time_now = ktime_get();
+
__update_runtime_status(dev, RPM_SUSPENDING);
+ if (!dev->power.ignore_children) {
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = device_for_each_child(dev, &qos,
+ rpm_update_qos_constraint);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ if (retval)
+ goto fail;
+ }
+
+ dev->power.suspend_time = qos.time_now;
+ dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
+
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_suspend;
+
retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ if (retval)
+ goto fail;
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
- wake_up_all(&dev->power.wait_queue);
- goto out;
- }
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
+
+ fail:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+ dev->power.deferred_resume = false;
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
}
/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_resume;
+
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
+
+/**
+ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
+ * @dev: Device to handle.
+ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
+ *
+ * Update the device's power.max_time_suspended_ns field by subtracting
+ * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
+ * never negative.
+ */
+void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
+ if (dev->power.max_time_suspended_ns > delta_ns)
+ dev->power.max_time_suspended_ns -= delta_ns;
+ else
+ dev->power.max_time_suspended_ns = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 2fc6a66f39a..0f6c7fb418e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -13,3 +13,6 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+
+config REGMAP_IRQ
+ bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0573c8a9dac..defd57963c8 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 348ff02eb93..1a02b7537c8 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -74,6 +74,7 @@ struct regmap {
struct reg_default *reg_defaults;
const void *reg_defaults_raw;
void *cache;
+ bool cache_dirty;
};
struct regcache_ops {
@@ -105,7 +106,7 @@ static inline void regmap_debugfs_exit(struct regmap *map) { }
#endif
/* regcache core declarations */
-int regcache_init(struct regmap *map);
+int regcache_init(struct regmap *map, const struct regmap_config *config);
void regcache_exit(struct regmap *map);
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value);
@@ -118,10 +119,7 @@ unsigned int regcache_get_val(const void *base, unsigned int idx,
bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val);
-extern struct regcache_ops regcache_indexed_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
deleted file mode 100644
index 507731ad8ec..00000000000
--- a/drivers/base/regmap/regcache-indexed.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Register cache access API - indexed caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int regcache_indexed_read(struct regmap *map, unsigned int reg,
- unsigned int *value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret >= 0)
- *value = map->reg_defaults[ret].def;
-
- return ret;
-}
-
-static int regcache_indexed_write(struct regmap *map, unsigned int reg,
- unsigned int value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret < 0)
- return regcache_insert_reg(map, reg, value);
- map->reg_defaults[ret].def = value;
- return 0;
-}
-
-static int regcache_indexed_sync(struct regmap *map)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = _regmap_write(map, map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret < 0)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- }
- return 0;
-}
-
-struct regcache_ops regcache_indexed_ops = {
- .type = REGCACHE_INDEXED,
- .name = "indexed",
- .read = regcache_indexed_read,
- .write = regcache_indexed_write,
- .sync = regcache_indexed_sync
-};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 066aeece362..b7d16143ede 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -15,6 +15,8 @@
#include "internal.h"
+static int regcache_lzo_exit(struct regmap *map);
+
struct regcache_lzo_ctx {
void *wmem;
void *dst;
@@ -27,7 +29,7 @@ struct regcache_lzo_ctx {
};
#define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(void)
+static int regcache_lzo_block_count(struct regmap *map)
{
return LZO_BLOCK_NUM;
}
@@ -106,19 +108,22 @@ static inline int regcache_lzo_get_blkindex(struct regmap *map,
unsigned int reg)
{
return (reg * map->cache_word_size) /
- DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static inline int regcache_lzo_get_blkpos(struct regmap *map,
unsigned int reg)
{
- return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+ return reg % (DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map)) /
map->cache_word_size);
}
static inline int regcache_lzo_get_blksize(struct regmap *map)
{
- return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ return DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static int regcache_lzo_init(struct regmap *map)
@@ -131,7 +136,7 @@ static int regcache_lzo_init(struct regmap *map)
ret = 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
GFP_KERNEL);
if (!map->cache)
@@ -190,7 +195,7 @@ static int regcache_lzo_init(struct regmap *map)
return 0;
err:
- regcache_exit(map);
+ regcache_lzo_exit(map);
return ret;
}
@@ -203,7 +208,7 @@ static int regcache_lzo_exit(struct regmap *map)
if (!lzo_blocks)
return 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
/*
* the pointer to the bitmap used for syncing the cache
* is shared amongst all lzo_blocks. Ensure it is freed
@@ -351,7 +356,7 @@ static int regcache_lzo_sync(struct regmap *map)
}
struct regcache_ops regcache_lzo_ops = {
- .type = REGCACHE_LZO,
+ .type = REGCACHE_COMPRESSED,
.name = "lzo",
.init = regcache_lzo_init,
.exit = regcache_lzo_exit,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e31498499b0..32620c4f168 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -11,12 +11,15 @@
*/
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <linux/rbtree.h>
+#include <linux/seq_file.h>
#include "internal.h"
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
struct regcache_rbtree_node {
/* the actual rbtree node holding this block */
@@ -124,6 +127,60 @@ static int regcache_rbtree_insert(struct rb_root *root,
return 1;
}
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+ struct regmap *map = s->private;
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct regcache_rbtree_node *n;
+ struct rb_node *node;
+ unsigned int base, top;
+ int nodes = 0;
+ int registers = 0;
+
+ mutex_lock(&map->lock);
+
+ for (node = rb_first(&rbtree_ctx->root); node != NULL;
+ node = rb_next(node)) {
+ n = container_of(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(n, &base, &top);
+ seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+
+ nodes++;
+ registers += top - base + 1;
+ }
+
+ seq_printf(s, "%d nodes, %d registers, average %d registers\n",
+ nodes, registers, registers / nodes);
+
+ mutex_unlock(&map->lock);
+
+ return 0;
+}
+
+static int rbtree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rbtree_show, inode->i_private);
+}
+
+static const struct file_operations rbtree_fops = {
+ .open = rbtree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+ debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#else
+static void rbtree_debugfs_init(struct regmap *map)
+{
+}
+#endif
+
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
@@ -146,10 +203,12 @@ static int regcache_rbtree_init(struct regmap *map)
goto err;
}
+ rbtree_debugfs_init(map);
+
return 0;
err:
- regcache_exit(map);
+ regcache_rbtree_exit(map);
return ret;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 666f6f5011d..1ead66186b7 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -19,7 +19,6 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
- &regcache_indexed_ops,
&regcache_rbtree_ops,
&regcache_lzo_ops,
};
@@ -61,8 +60,10 @@ static int regcache_hw_init(struct regmap *map)
map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
GFP_KERNEL);
- if (!map->reg_defaults)
- return -ENOMEM;
+ if (!map->reg_defaults) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
/* fill the reg_defaults */
map->num_reg_defaults = count;
@@ -77,9 +78,15 @@ static int regcache_hw_init(struct regmap *map)
}
return 0;
+
+err_free:
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
-int regcache_init(struct regmap *map)
+int regcache_init(struct regmap *map, const struct regmap_config *config)
{
int ret;
int i;
@@ -100,6 +107,12 @@ int regcache_init(struct regmap *map)
return -EINVAL;
}
+ map->num_reg_defaults = config->num_reg_defaults;
+ map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+ map->reg_defaults_raw = config->reg_defaults_raw;
+ map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -112,10 +125,10 @@ int regcache_init(struct regmap *map)
* won't vanish from under us. We'll need to make
* a copy of it.
*/
- if (map->reg_defaults) {
+ if (config->reg_defaults) {
if (!map->num_reg_defaults)
return -EINVAL;
- tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
sizeof(struct reg_default), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
@@ -136,9 +149,18 @@ int regcache_init(struct regmap *map)
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
- return map->cache_ops->init(map);
+ ret = map->cache_ops->init(map);
+ if (ret)
+ goto err_free;
}
return 0;
+
+err_free:
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
void regcache_exit(struct regmap *map)
@@ -171,16 +193,21 @@ void regcache_exit(struct regmap *map)
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
+ int ret;
+
if (map->cache_type == REGCACHE_NONE)
return -ENOSYS;
BUG_ON(!map->cache_ops);
- if (!regmap_readable(map, reg))
- return -EIO;
+ if (!regmap_volatile(map, reg)) {
+ ret = map->cache_ops->read(map, reg, value);
- if (!regmap_volatile(map, reg))
- return map->cache_ops->read(map, reg, value);
+ if (ret == 0)
+ trace_regmap_reg_read_cache(map->dev, reg, *value);
+
+ return ret;
+ }
return -EINVAL;
}
@@ -241,6 +268,8 @@ int regcache_sync(struct regmap *map)
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map->dev, name, "start");
+ if (!map->cache_dirty)
+ goto out;
if (map->cache_ops->sync) {
ret = map->cache_ops->sync(map);
} else {
@@ -291,6 +320,23 @@ void regcache_cache_only(struct regmap *map, bool enable)
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
+ * regcache_mark_dirty: Mark the register cache as dirty
+ *
+ * @map: map to mark
+ *
+ * Mark the register cache as dirty, for example due to the device
+ * having been powered down for suspend. If the cache is not marked
+ * as dirty then the cache sync will be suppressed.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+ mutex_lock(&map->lock);
+ map->cache_dirty = true;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
* regcache_cache_bypass: Put a register map into cache bypass mode
*
* @map: map to configure
@@ -381,22 +427,3 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else
return -ENOENT;
}
-
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val)
-{
- void *tmp;
-
- tmp = krealloc(map->reg_defaults,
- (map->num_reg_defaults + 1) * sizeof(struct reg_default),
- GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- map->reg_defaults = tmp;
- map->num_reg_defaults++;
- map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
- map->reg_defaults[map->num_reg_defaults - 1].def = val;
- sort(map->reg_defaults, map->num_reg_defaults,
- sizeof(struct reg_default), regcache_default_cmp, NULL);
- return 0;
-}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 00000000000..428836fc583
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,302 @@
+/*
+ * regmap based irq_chip
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+ struct mutex lock;
+
+ struct regmap *map;
+ struct regmap_irq_chip *chip;
+
+ int irq_base;
+
+ void *status_reg_buf;
+ unsigned int *status_buf;
+ unsigned int *mask_buf;
+ unsigned int *mask_buf_def;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+ int irq)
+{
+ return &data->chip->irqs[irq - data->irq_base];
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ int i, ret;
+
+ /*
+ * If there's been a change in the mask write it back to the
+ * hardware. We rely on the use of the regmap core cache to
+ * suppress pointless writes.
+ */
+ for (i = 0; i < d->chip->num_regs; i++) {
+ ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ d->chip->mask_base + i);
+ }
+
+ mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+}
+
+static struct irq_chip regmap_irq_chip = {
+ .name = "regmap",
+ .irq_bus_lock = regmap_irq_lock,
+ .irq_bus_sync_unlock = regmap_irq_sync_unlock,
+ .irq_disable = regmap_irq_disable,
+ .irq_enable = regmap_irq_enable,
+};
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ bool handled = false;
+
+ ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
+ chip->num_regs);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+
+ data->status_buf[i] &= ~data->mask_buf[i];
+
+ if (data->status_buf[i] && chip->ack_base) {
+ ret = regmap_write(map, chip->ack_base + i,
+ data->status_buf[i]);
+ if (ret != 0)
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ chip->ack_base + i, ret);
+ }
+ }
+
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (data->status_buf[chip->irqs[i].reg_offset] &
+ chip->irqs[i].mask) {
+ handle_nested_irq(data->irq_base + i);
+ handled = true;
+ }
+ }
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+/**
+ * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ *
+ * map: The regmap for the device.
+ * irq: The IRQ the device uses to signal interrupts
+ * irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * chip: Configuration for the interrupt controller.
+ * data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache. The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data *d;
+ int cur_irq, i;
+ int ret = -ENOMEM;
+
+ irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+ if (irq_base < 0) {
+ dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ return irq_base;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_buf)
+ goto err_alloc;
+
+ d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+
+ d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf)
+ goto err_alloc;
+
+ d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf_def)
+ goto err_alloc;
+
+ d->map = map;
+ d->chip = chip;
+ d->irq_base = irq_base;
+ mutex_init(&d->lock);
+
+ for (i = 0; i < chip->num_irqs; i++)
+ d->mask_buf_def[chip->irqs[i].reg_offset]
+ |= chip->irqs[i].mask;
+
+ /* Mask all the interrupts by default */
+ for (i = 0; i < chip->num_regs; i++) {
+ d->mask_buf[i] = d->mask_buf_def[i];
+ ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ chip->mask_base + i, ret);
+ goto err_alloc;
+ }
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = irq_base;
+ cur_irq < chip->num_irqs + irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, d);
+ irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+ chip->name, d);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+ if (!d)
+ return;
+
+ free_irq(irq, d);
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+ return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bf441db1ee9..be10a4ff660 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -64,6 +64,18 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (!regmap_volatile(map, reg + i))
+ return false;
+
+ return true;
+}
+
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -78,6 +90,16 @@ static void regmap_format_7_9_write(struct regmap *map,
*out = cpu_to_be16((reg << 9) | val);
}
+static void regmap_format_10_14_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = (val >> 8) | (reg << 6);
+ out[0] = reg >> 2;
+}
+
static void regmap_format_8(void *buf, unsigned int val)
{
u8 *b = buf;
@@ -127,7 +149,7 @@ struct regmap *regmap_init(struct device *dev,
int ret = -EINVAL;
if (!bus || !config)
- return NULL;
+ goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
@@ -147,12 +169,6 @@ struct regmap *regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->reg_defaults = config->reg_defaults;
- map->num_reg_defaults = config->num_reg_defaults;
- map->num_reg_defaults_raw = config->num_reg_defaults_raw;
- map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
- map->cache_word_size = config->val_bits / 8;
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
@@ -182,6 +198,16 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 10:
+ switch (config->val_bits) {
+ case 14:
+ map->format.format_write = regmap_format_10_14_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
case 8:
map->format.format_reg = regmap_format_8;
break;
@@ -215,14 +241,16 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- ret = regcache_init(map);
- if (ret < 0)
- goto err_map;
-
regmap_debugfs_init(map);
+ ret = regcache_init(map, config);
+ if (ret < 0)
+ goto err_free_workbuf;
+
return map;
+err_free_workbuf:
+ kfree(map->work_buf);
err_map:
kfree(map);
err:
@@ -231,6 +259,39 @@ err:
EXPORT_SYMBOL_GPL(regmap_init);
/**
+ * regmap_reinit_cache(): Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration. Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache. This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ regcache_exit(map);
+
+ map->max_register = config->max_register;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->cache_type = config->cache_type;
+
+ ret = regcache_init(map, config);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+
+/**
* regmap_exit(): Free a previously allocated register map
*/
void regmap_exit(struct regmap *map)
@@ -306,8 +367,10 @@ int _regmap_write(struct regmap *map, unsigned int reg,
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
- if (map->cache_only)
+ if (map->cache_only) {
+ map->cache_dirty = true;
return 0;
+ }
}
trace_regmap_reg_write(map->dev, reg, val);
@@ -375,9 +438,11 @@ EXPORT_SYMBOL_GPL(regmap_write);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- WARN_ON(map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -422,15 +487,15 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
{
int ret;
- if (!map->format.parse_val)
- return -EINVAL;
-
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
return 0;
}
+ if (!map->format.parse_val)
+ return -EINVAL;
+
if (map->cache_only)
return -EBUSY;
@@ -481,15 +546,11 @@ EXPORT_SYMBOL_GPL(regmap_read);
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- int i;
- bool vol = true;
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
-
- WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -517,16 +578,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
{
int ret, i;
size_t val_bytes = map->format.val_bytes;
- bool vol = true;
+ bool vol = regmap_volatile_range(map, reg, val_count);
if (!map->format.parse_val)
return -EINVAL;
- /* Is this a block of volatile registers? */
- for (i = 0; i < val_count; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
-
if (vol || map->cache_type == REGCACHE_NONE) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
@@ -546,40 +602,73 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
-/**
- * remap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
{
int ret;
- unsigned int tmp;
+ unsigned int tmp, orig;
mutex_lock(&map->lock);
- ret = _regmap_read(map, reg, &tmp);
+ ret = _regmap_read(map, reg, &orig);
if (ret != 0)
goto out;
- tmp &= ~mask;
+ tmp = orig & ~mask;
tmp |= val & mask;
- ret = _regmap_write(map, reg, tmp);
+ if (tmp != orig) {
+ ret = _regmap_write(map, reg, tmp);
+ *change = true;
+ } else {
+ *change = false;
+ }
out:
mutex_unlock(&map->lock);
return ret;
}
+
+/**
+ * regmap_update_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ bool change;
+ return _regmap_update_bits(map, reg, mask, val, &change);
+}
EXPORT_SYMBOL_GPL(regmap_update_bits);
+/**
+ * regmap_update_bits_check: Perform a read/modify/write cycle on the
+ * register map and report if updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return _regmap_update_bits(map, reg, mask, val, change);
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 9dff77bfe1e..409f5ce7882 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -126,7 +126,7 @@ void sysdev_class_remove_file(struct sysdev_class *c,
}
EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
-static struct kset *system_kset;
+extern struct kset *system_kset;
int sysdev_class_register(struct sysdev_class *cls)
{
@@ -331,14 +331,6 @@ void sysdev_unregister(struct sys_device *sysdev)
EXPORT_SYMBOL_GPL(sysdev_register);
EXPORT_SYMBOL_GPL(sysdev_unregister);
-int __init system_bus_init(void)
-{
- system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
- return -ENOMEM;
- return 0;
-}
-
#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr)
ssize_t sysdev_store_ulong(struct sys_device *sysdev,
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index f6f37a05a0c..ae989c57cd5 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
@@ -32,14 +31,14 @@
#include <linux/topology.h>
#define define_one_ro_named(_name, _func) \
-static SYSDEV_ATTR(_name, 0444, _func, NULL)
+ static DEVICE_ATTR(_name, 0444, _func, NULL)
#define define_one_ro(_name) \
-static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
+ static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
#define define_id_show_func(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
@@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
#ifdef arch_provides_topology_pointers
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(0, topology_##name(cpu), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
@@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
#else
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
return show_cpumap(0, topology_##name(dev->id), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
@@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list, show_book_cpumask_list);
#endif
static struct attribute *default_attrs[] = {
- &attr_physical_package_id.attr,
- &attr_core_id.attr,
- &attr_thread_siblings.attr,
- &attr_thread_siblings_list.attr,
- &attr_core_siblings.attr,
- &attr_core_siblings_list.attr,
+ &dev_attr_physical_package_id.attr,
+ &dev_attr_core_id.attr,
+ &dev_attr_thread_siblings.attr,
+ &dev_attr_thread_siblings_list.attr,
+ &dev_attr_core_siblings.attr,
+ &dev_attr_core_siblings_list.attr,
#ifdef CONFIG_SCHED_BOOK
- &attr_book_id.attr,
- &attr_book_siblings.attr,
- &attr_book_siblings_list.attr,
+ &dev_attr_book_id.attr,
+ &dev_attr_book_siblings.attr,
+ &dev_attr_book_siblings_list.attr,
#endif
NULL
};
@@ -146,16 +145,16 @@ static struct attribute_group topology_attr_group = {
/* Add/Remove cpu_topology interface for CPU device */
static int __cpuinit topology_add_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- return sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+ return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
static void __cpuinit topology_remove_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
+ sysfs_remove_group(&dev->kobj, &topology_attr_group);
}
static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 30a3085d335..fda56bde36b 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1b51d8b7ac8..443b83a2fd7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pr_debug("Switched to core: 0x%X\n", core->id.id);
}
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{
+ switch (core->id.id) {
+ case BCMA_CORE_CHIPCOMMON:
+ return 3 * BCMA_CORE_SIZE;
+ case BCMA_CORE_PCIE:
+ return 2 * BCMA_CORE_SIZE;
+ }
+
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
+ return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset);
}
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread16(core->bus->mmio + offset);
}
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread32(core->bus->mmio + offset);
}
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite8(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite16(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite32(value, core->bus->mmio + offset);
}
@@ -224,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ /* Host specific */
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ return 0;
+}
+
+static int bcma_host_pci_resume(struct pci_dev *dev)
+{
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+ int err;
+
+ /* Host specific */
+ pci_set_power_state(dev, 0);
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ pci_restore_state(dev);
+
+ /* Bus specific */
+ err = bcma_bus_resume(bus);
+ if (err)
+ return err;
+
+ return 0;
+}
+#else /* CONFIG_PM */
+# define bcma_host_pci_suspend NULL
+# define bcma_host_pci_resume NULL
+#endif /* CONFIG_PM */
+
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -239,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
+ .suspend = bcma_host_pci_suspend,
+ .resume = bcma_host_pci_resume,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 70c84b95109..10f92b371e5 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return 0;
}
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ return 0;
+}
+#endif
+
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index d7292390d23..6f230fb087c 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
u16 v;
int i;
+ bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+ SSB_SPROM_REVISION_REV;
+
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
+ bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+ bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+ bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+ bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+ bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+ bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+ bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+ bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+ bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+ bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+ bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+ bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+ bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+ bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+ bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+ bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+ bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+ bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
}
int bcma_sprom_get(struct bcma_bus *bus)
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8eba86bba59..386146d792d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -63,7 +63,7 @@
#include <linux/mutex.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 5f8e39c43ae..e86d2062a16 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -270,7 +270,7 @@ static const struct file_operations aoe_fops = {
.llseek = noop_llseek,
};
-static char *aoe_devnode(struct device *dev, mode_t *mode)
+static char *aoe_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d22119d49e5..ec246437f5a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -17,7 +17,7 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
-#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
+#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -402,14 +402,13 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
error = -EBUSY;
if (bdev->bd_openers <= 1) {
/*
- * Invalidate the cache first, so it isn't written
- * back to the device.
+ * Kill the cache first, so it isn't written back to the
+ * device.
*
* Another thread might instantiate more buffercache here,
* but there is not much we can do to close that race.
*/
- invalidate_bh_lrus();
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ kill_bdev(bdev);
brd_free_pages(brd);
error = 0;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac30a7a..587cce57ada 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9955a53733b..510fb10ec45 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -188,7 +188,6 @@ static int print_unex = 1;
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
-#include <linux/buffer_head.h> /* for invalidate_buffers() */
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a9338..f00257782fc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -69,7 +69,6 @@
#include <linux/freezer.h>
#include <linux/mutex.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
@@ -422,7 +421,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -797,7 +796,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a63b0a2b780..d59edeabd93 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2817,7 +2817,7 @@ static const struct block_device_operations pktcdvd_ops = {
.check_events = pkt_check_events,
};
-static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
+static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b..148ab944378 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
}
/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
@@ -2424,64 +2401,6 @@ err_unlock:
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fd5adcd5594..6d5a914b961 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <asm/macintosh.h>
#include <asm/mac_via.h>
#define CARDNAME "swim"
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17a..89ddab127e3 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f759ad4584c..37c794d3126 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -613,7 +613,7 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
/*
* Ensure we connect even when two watches fire in
- * close successsion and we miss the intermediate value
+ * close succession and we miss the intermediate value
* of frontend_state.
*/
if (dev->state == XenbusStateConnected)
@@ -787,17 +787,14 @@ static const struct xenbus_device_id xen_blkbk_ids[] = {
};
-static struct xenbus_driver xen_blkbk = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = xen_blkbk_ids,
+static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
.otherend_changed = frontend_changed
-};
+);
int xen_blkif_xenbus_init(void)
{
- return xenbus_register_backend(&xen_blkbk);
+ return xenbus_register_backend(&xen_blkbk_driver);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec590841..9fd3ee203b1 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1437,16 +1437,13 @@ static const struct xenbus_device_id blkfront_ids[] = {
{ "" }
};
-static struct xenbus_driver blkfront = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = blkfront_ids,
+static DEFINE_XENBUS_DRIVER(blkfront, ,
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
.otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
-};
+);
static int __init xlblk_init(void)
{
@@ -1461,7 +1458,7 @@ static int __init xlblk_init(void)
return -ENODEV;
}
- ret = xenbus_register_frontend(&blkfront);
+ ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
return ret;
@@ -1474,7 +1471,7 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront);
+ return xenbus_unregister_driver(&blkfront_driver);
}
module_exit(xlblk_exit);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index fb1975d82a7..1a17e338735 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -456,7 +456,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (ace->irq == NO_IRQ)
+ if (!ace->irq)
/* No IRQ assigned, so need to poll */
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
@@ -1034,12 +1034,12 @@ static int __devinit ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq != NO_IRQ) {
+ if (ace->irq) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = NO_IRQ;
+ ace->irq = 0;
}
}
@@ -1086,7 +1086,7 @@ static void __devexit ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq != NO_IRQ)
+ if (ace->irq)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1156,7 +1156,7 @@ static int __devinit ace_probe(struct platform_device *dev)
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
u32 id = dev->id;
- int irq = NO_IRQ;
+ int irq = 0;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c2..5ccf142ef0b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 106beb194f3..07f14d10ea4 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -30,6 +30,7 @@
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.0"
+#define ATH3K_FIRMWARE "ath3k-1.fw"
#define ATH3K_DNLOAD 0x01
#define ATH3K_GETSTATE 0x05
@@ -400,9 +401,15 @@ static int ath3k_probe(struct usb_interface *intf,
return 0;
}
- if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- BT_ERR("Error loading firmware");
- return -EIO;
+ ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ BT_ERR("Firmware file \"%s\" not found",
+ ATH3K_FIRMWARE);
+ else
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+ ATH3K_FIRMWARE, ret);
+ return ret;
}
ret = ath3k_load_firmware(udev, firmware);
@@ -423,22 +430,10 @@ static struct usb_driver ath3k_driver = {
.id_table = ath3k_table,
};
-static int __init ath3k_init(void)
-{
- BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
- return usb_register(&ath3k_driver);
-}
-
-static void __exit ath3k_exit(void)
-{
- usb_deregister(&ath3k_driver);
-}
-
-module_init(ath3k_init);
-module_exit(ath3k_exit);
+module_usb_driver(ath3k_driver);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 54952ab800b..1e742a50e2c 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -281,26 +281,7 @@ static struct usb_driver bcm203x_driver = {
.id_table = bcm203x_table,
};
-static int __init bcm203x_init(void)
-{
- int err;
-
- BT_INFO("Broadcom Blutonium firmware driver ver %s", VERSION);
-
- err = usb_register(&bcm203x_driver);
- if (err < 0)
- BT_ERR("Failed to register USB driver");
-
- return err;
-}
-
-static void __exit bcm203x_exit(void)
-{
- usb_deregister(&bcm203x_driver);
-}
-
-module_init(bcm203x_init);
-module_exit(bcm203x_exit);
+module_usb_driver(bcm203x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 61b591470a9..a323baee51b 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -751,9 +751,7 @@ static void bfusb_disconnect(struct usb_interface *intf)
bfusb_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
@@ -764,26 +762,7 @@ static struct usb_driver bfusb_driver = {
.id_table = bfusb_table,
};
-static int __init bfusb_init(void)
-{
- int err;
-
- BT_INFO("BlueFRITZ! USB driver ver %s", VERSION);
-
- err = usb_register(&bfusb_driver);
- if (err < 0)
- BT_ERR("Failed to register BlueFRITZ! USB driver");
-
- return err;
-}
-
-static void __exit bfusb_exit(void)
-{
- usb_deregister(&bfusb_driver);
-}
-
-module_init(bfusb_init);
-module_exit(bfusb_exit);
+module_usb_driver(bfusb_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index aed1904ea67..c6a0c610374 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -844,9 +844,7 @@ static int bluecard_close(bluecard_info_t *info)
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 751b338d904..62831603de5 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -521,20 +521,7 @@ static struct usb_driver bpa10x_driver = {
.id_table = bpa10x_table,
};
-static int __init bpa10x_init(void)
-{
- BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&bpa10x_driver);
-}
-
-static void __exit bpa10x_exit(void)
-{
- usb_deregister(&bpa10x_driver);
-}
-
-module_init(bpa10x_init);
-module_exit(bpa10x_exit);
+module_usb_driver(bpa10x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4fc01949d39..0c97e5d514b 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -636,9 +636,7 @@ static int bt3c_close(bt3c_info_t *info)
bt3c_hci_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c8616..6c3defa5084 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
add_wait_queue(&thread->wait_q, &wait);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2c..27b74b0d547 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 526b61807d9..200b3a2877d 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -565,9 +565,7 @@ static int btuart_close(btuart_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fe4ebc375b3..55ac349695c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,6 +101,7 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x413c, 0x8197) },
{ } /* Terminating entry */
@@ -315,7 +316,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -400,7 +402,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -506,15 +509,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
- urb->dev = data->udev;
- urb->pipe = pipe;
- urb->context = hdev;
- urb->complete = btusb_isoc_complete;
- urb->interval = data->isoc_rx_ep->bInterval;
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+ hdev, data->isoc_rx_ep->bInterval);
urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
- urb->transfer_buffer = buf;
- urb->transfer_buffer_length = size;
__fill_isoc_descriptor(urb, size,
le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
@@ -523,7 +521,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -727,6 +726,9 @@ static int btusb_send_frame(struct sk_buff *skb)
usb_fill_bulk_urb(urb, data->udev, pipe,
skb->data, skb->len, btusb_tx_complete, skb);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ urb->transfer_flags = URB_ISO_ASAP;
+
hdev->stat.acl_tx++;
break;
@@ -770,16 +772,17 @@ skip_waking:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p submission failed", hdev->name, urb);
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
@@ -1223,20 +1226,7 @@ static struct usb_driver btusb_driver = {
.supports_autosuspend = 1,
};
-static int __init btusb_init(void)
-{
- BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&btusb_driver);
-}
-
-static void __exit btusb_exit(void)
-{
- usb_deregister(&btusb_driver);
-}
-
-module_init(btusb_init);
-module_exit(btusb_exit);
+module_usb_driver(btusb_driver);
module_param(ignore_dga, bool, 0644);
MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 5e4c2de9fc3..969bb22e493 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -551,9 +551,7 @@ static int dtl1_close(dtl1_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 67c180c2c1e..2ed6ab1c6e1 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
#define VERSION "1.3"
+static bool amp;
+
struct vhci_data {
struct hci_dev *hdev;
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
hdev->bus = HCI_VIRTUAL;
hdev->driver_data = data;
+ if (amp)
+ hdev->dev_type = HCI_AMP;
+
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
@@ -264,10 +269,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- if (hci_unregister_dev(hdev) < 0) {
- BT_ERR("Can't unregister HCI device %s", hdev->name);
- }
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
file->private_data = NULL;
@@ -306,6 +308,9 @@ static void __exit vhci_exit(void)
module_init(vhci_init);
module_exit(vhci_exit);
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index f997c27d79e..2118211aff9 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -267,7 +267,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/buffer_head.h>
#include <linux/major.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index b072648dc3f..17e05d1076b 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
switch (*bridge_agpstat & 7) {
case 4:
*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
"Fixing up support for x2 & x1\n");
break;
case 2:
*bridge_agpstat |= AGPSTAT2_1X;
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
"Fixing up support for x1\n");
break;
default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
} else {
- printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
if (!(*bridge_agpstat & AGPSTAT3_8X)) {
printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
*bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->driver->cache_flush();
#ifdef CONFIG_X86
if (set_memory_uc((unsigned long)table, 1 << page_order))
- printk(KERN_WARNING "Could not set GATT table memory to UC!");
+ printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
bridge->gatt_table = (void *)table;
#else
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 52e08ca3ccd..3d3c1e6703b 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -95,6 +95,8 @@ static struct amba_id nmk_rng_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
static struct amba_driver nmk_rng_driver = {
.drv = {
.owner = THIS_MODULE,
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 3ed20e8abc0..cdd4c09fda9 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -560,7 +560,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_CONTROL(BT_H_BUSY); /* set */
/*
- * Uncached, ordered writes should just proceeed serially but
+ * Uncached, ordered writes should just proceed serially but
* some BMCs don't clear B2H_ATN with one hit. Fast-path a
* workaround without too much penalty to the general case.
*/
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffad2c..34767a6d7f4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 14517903371..d6e9d081c8b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -847,7 +847,7 @@ static const struct file_operations kmsg_fops = {
static const struct memdev {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
@@ -901,7 +901,7 @@ static const struct file_operations memory_fops = {
.llseek = noop_llseek,
};
-static char *mem_devnode(struct device *dev, mode_t *mode)
+static char *mem_devnode(struct device *dev, umode_t *mode)
{
if (mode && devlist[MINOR(dev->devt)].mode)
*mode = devlist[MINOR(dev->devt)].mode;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 778273c9324..522136d4084 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -258,7 +258,7 @@ int misc_deregister(struct miscdevice *misc)
EXPORT_SYMBOL(misc_register);
EXPORT_SYMBOL(misc_deregister);
-static char *misc_devnode(struct device *dev, mode_t *mode)
+static char *misc_devnode(struct device *dev, umode_t *mode)
{
struct miscdevice *c = dev_get_drvdata(dev);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6035ab8d5ef..85da8740586 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -624,8 +624,8 @@ static struct timer_rand_state input_timer_state;
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
struct {
- cycles_t cycles;
long jiffies;
+ unsigned cycles;
unsigned num;
} sample;
long delta, delta2, delta3;
@@ -637,7 +637,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
goto out;
sample.jiffies = jiffies;
- sample.cycles = get_cycles();
+
+ /* Use arch random value, fall back to cycles */
+ if (!arch_get_random_int(&sample.cycles))
+ sample.cycles = get_cycles();
+
sample.num = num;
mix_pool_bytes(&input_pool, &sample, sizeof(sample));
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b6de2c04714..54a3a6d0981 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -308,7 +308,7 @@ static const struct file_operations raw_ctl_fops = {
static struct cdev raw_cdev;
-static char *raw_devnode(struct device *dev, mode_t *mode)
+static char *raw_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index cf3ee008dca..4dc019408fa 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -329,7 +329,7 @@ static struct device_attribute srom_dev_attrs[] = {
__ATTR_NULL
};
-static char *srom_devnode(struct device *dev, mode_t *mode)
+static char *srom_devnode(struct device *dev, umode_t *mode)
{
*mode = S_IRUGO | S_IWUSR;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 35309274ad6..9b3cd08cd0e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -3,5 +3,8 @@ config CLKDEV_LOOKUP
bool
select HAVE_CLK
+config HAVE_CLK_PREPARE
+ bool
+
config HAVE_MACH_CLKDEV
bool
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index effe7974aa9..6b5cf02c35c 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
#ifndef CONFIG_X86_64
#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
- ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
+ ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
/*
* Some boards have the PMTMR running way too fast. We check
* the PMTMR rate against PIT channel 2 to catch these cases.
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 59feefe0e3e..fb6b6d28b60 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -58,25 +58,15 @@ static struct clocksource clocksource_dbx500_prcmu = {
};
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace dbx500_prcmu_sched_clock_read(void)
{
- u32 cyc;
-
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
- cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+ return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
}
-static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
-{
- u32 cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
- update_sched_clock(&cd, cyc, (u32)~0);
-}
#endif
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
@@ -97,7 +87,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- init_sched_clock(&cd, clksrc_dbx500_prcmu_update_sched_clock,
+ setup_sched_clock(dbx500_prcmu_sched_clock_read,
32, RATE_32K);
#endif
clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 27c49e60b7d..e7cab2da910 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -53,7 +53,7 @@ static cycle_t i8253_read(struct clocksource *cs)
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
- if (count > LATCH) {
+ if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
@@ -114,8 +114,8 @@ static void init_pit_timer(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- outb_p(LATCH >> 8 , PIT_CH0); /* MSB */
+ outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
+ outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
break;
case CLOCK_EVT_MODE_SHUTDOWN:
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 79c47e88d5d..55d0f95f82f 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -59,7 +59,6 @@ static struct clocksource clksrc = {
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 18,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -256,7 +255,6 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
- clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift);
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
@@ -292,7 +290,7 @@ static int __init tcb_clksrc_init(void)
__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
/* and away we go! */
- clocksource_register(&clksrc);
+ clocksource_register_hz(&clksrc, divided_rate);
/* channel 2: periodic and oneshot timer support */
setup_clkevents(tc, clk32k_divisor_idx);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 987a165ede2..8c2df3499da 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -679,7 +679,7 @@ static struct kobj_type ktype_cpufreq = {
*/
static int cpufreq_add_dev_policy(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
int ret = 0;
#ifdef CONFIG_SMP
@@ -728,7 +728,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&sys_dev->kobj,
+ ret = sysfs_create_link(&dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (ret)
@@ -761,7 +761,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
if (j == cpu)
continue;
@@ -770,8 +770,8 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
- cpu_sys_dev = get_cpu_sysdev(j);
- ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
+ cpu_dev = get_cpu_device(j);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
@@ -783,7 +783,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
static int cpufreq_add_dev_interface(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
@@ -793,7 +793,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &sys_dev->kobj, "cpufreq");
+ &dev->kobj, "cpufreq");
if (ret)
return ret;
@@ -866,9 +866,9 @@ err_out_kobj_put:
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
-static int cpufreq_add_dev(struct sys_device *sys_dev)
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
@@ -947,7 +947,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_policy(cpu, policy, dev);
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
@@ -956,7 +956,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
goto err_unlock_policy;
}
- ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
@@ -999,15 +999,15 @@ module_out:
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
-static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
#ifdef CONFIG_SMP
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
unsigned int j;
#endif
@@ -1032,7 +1032,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
pr_debug("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &sys_dev->kobj;
+ kobj = &dev->kobj;
cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
@@ -1071,8 +1071,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
strncpy(per_cpu(cpufreq_cpu_governor, j),
data->governor->name, CPUFREQ_NAME_LEN);
#endif
- cpu_sys_dev = get_cpu_sysdev(j);
- kobj = &cpu_sys_dev->kobj;
+ cpu_dev = get_cpu_device(j);
+ kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
lock_policy_rwsem_write(cpu);
@@ -1112,11 +1112,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(cpumask_weight(data->cpus) > 1)) {
/* first sibling now owns the new sysfs dir */
cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
+ cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
/* finally remove our own symlink */
lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, sif);
}
#endif
@@ -1128,9 +1128,9 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
}
-static int cpufreq_remove_dev(struct sys_device *sys_dev)
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int retval;
if (cpu_is_offline(cpu))
@@ -1139,7 +1139,7 @@ static int cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- retval = __cpufreq_remove_dev(sys_dev);
+ retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1271,9 +1271,11 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
-static struct sysdev_driver cpufreq_sysdev_driver = {
- .add = cpufreq_add_dev,
- .remove = cpufreq_remove_dev,
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
};
@@ -1765,25 +1767,25 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
- if (sys_dev) {
+ dev = get_cpu_device(cpu);
+ if (dev) {
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -1830,8 +1832,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = sysdev_driver_register(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+ ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_null_driver;
@@ -1850,7 +1851,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
driver_data->name);
- goto err_sysdev_unreg;
+ goto err_if_unreg;
}
}
@@ -1858,9 +1859,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("driver %s up and running\n", driver_data->name);
return 0;
-err_sysdev_unreg:
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@@ -1887,7 +1887,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+ subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1907,8 +1907,7 @@ static int __init cpufreq_core_init(void)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
- cpufreq_global_kobject = kobject_create_and_add("cpufreq",
- &cpu_sysdev_class.kset.kobj);
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c97b468ee9f..235a340e81f 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -95,27 +95,26 @@ static struct dbs_tuners {
.freq_step = 5,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index fa8af4ebb1d..3d679eee70a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -119,27 +119,26 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
- iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
- j_dbs_info->prev_cpu_iowait);
+ iowait_time = (unsigned int)
+ (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->cpu = cpu;
this_dbs_info->rate_mult = 1;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c5072a91e84..b40ee1403be 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h>
@@ -61,9 +60,8 @@ static int cpufreq_stats_update(unsigned int cpu)
spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state)
- stat->time_in_state[stat->last_index] =
- cputime64_add(stat->time_in_state[stat->last_index],
- cputime_sub(cur_time, stat->last_time));
+ stat->time_in_state[stat->last_index] +=
+ cur_time - stat->last_time;
stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 06ce2680d00..59f4261c753 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -291,10 +291,10 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
- if (!sys_dev)
+ if (!dev)
return -EINVAL;
if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
@@ -303,7 +303,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(sys_dev))) {
+ if ((ret = cpuidle_add_sysfs(cpu_dev))) {
module_put(cpuidle_driver->owner);
return ret;
}
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
@@ -354,7 +354,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(sys_dev);
+ cpuidle_remove_sysfs(cpu_dev);
list_del(&dev->device_list);
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
@@ -411,7 +411,7 @@ static int __init cpuidle_init(void)
if (cpuidle_disabled())
return -ENODEV;
- ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
+ ret = cpuidle_add_interface(cpu_subsys.dev_root);
if (ret)
return ret;
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 38c3fd8b9d7..7db186685c2 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,7 +5,7 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/sysdev.h>
+#include <linux/device.h>
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
@@ -23,11 +23,11 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
-extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
-extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
+extern int cpuidle_add_interface(struct device *dev);
+extern void cpuidle_remove_interface(struct device *dev);
extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct sys_device *sysdev);
-extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
+extern int cpuidle_add_sysfs(struct device *dev);
+extern void cpuidle_remove_sysfs(struct device *dev);
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 1e756e160dc..3fe41fe4851 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -22,8 +22,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-static ssize_t show_available_governors(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_available_governors(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t i = 0;
@@ -42,8 +42,8 @@ out:
return i;
}
-static ssize_t show_current_driver(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_driver(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -59,8 +59,8 @@ static ssize_t show_current_driver(struct sysdev_class *class,
return ret;
}
-static ssize_t show_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_governor(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -75,8 +75,8 @@ static ssize_t show_current_governor(struct sysdev_class *class,
return ret;
}
-static ssize_t store_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t store_current_governor(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
@@ -109,50 +109,48 @@ static ssize_t store_current_governor(struct sysdev_class *class,
return count;
}
-static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
- NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-static struct attribute *cpuclass_default_attrs[] = {
- &attr_current_driver.attr,
- &attr_current_governor_ro.attr,
+static struct attribute *cpuidle_default_attrs[] = {
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor_ro.attr,
NULL
};
-static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
- NULL);
-static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_governor, 0644, show_current_governor,
+ store_current_governor);
-static struct attribute *cpuclass_switch_attrs[] = {
- &attr_available_governors.attr,
- &attr_current_driver.attr,
- &attr_current_governor.attr,
+static struct attribute *cpuidle_switch_attrs[] = {
+ &dev_attr_available_governors.attr,
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor.attr,
NULL
};
-static struct attribute_group cpuclass_attr_group = {
- .attrs = cpuclass_default_attrs,
+static struct attribute_group cpuidle_attr_group = {
+ .attrs = cpuidle_default_attrs,
.name = "cpuidle",
};
/**
- * cpuidle_add_class_sysfs - add CPU global sysfs attributes
+ * cpuidle_add_interface - add CPU global sysfs attributes
*/
-int cpuidle_add_class_sysfs(struct sysdev_class *cls)
+int cpuidle_add_interface(struct device *dev)
{
if (sysfs_switch)
- cpuclass_attr_group.attrs = cpuclass_switch_attrs;
+ cpuidle_attr_group.attrs = cpuidle_switch_attrs;
- return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
+ return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
/**
- * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
+ * cpuidle_remove_interface - remove CPU global sysfs attributes
*/
-void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
+void cpuidle_remove_interface(struct device *dev)
{
- sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
+ sysfs_remove_group(&dev->kobj, &cpuidle_attr_group);
}
struct cpuidle_attr {
@@ -365,16 +363,16 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-int cpuidle_add_sysfs(struct sys_device *sysdev)
+int cpuidle_add_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
int error;
dev = per_cpu(cpuidle_devices, cpu);
- error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
+ error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
kobject_uevent(&dev->kobj, KOBJ_ADD);
@@ -383,11 +381,11 @@ int cpuidle_add_sysfs(struct sys_device *sysdev)
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-void cpuidle_remove_sysfs(struct sys_device *sysdev)
+void cpuidle_remove_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f2144..dcd8babae9e 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
else
op.config |= CFG_MID_FRAG;
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ if (first_block) {
+ writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
}
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f049103708..464fa2147df 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
comment "DEVFREQ Drivers"
+config ARM_EXYNOS4_BUS_DEVFREQ
+ bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+ and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+ It reads PPMU counters of memory controllers and adjusts
+ the operating frequencies and voltages with OPP support.
+ To operate with optimal voltages, ASV support is required
+ (CONFIG_EXYNOS_ASV).
+
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89e970..8c464234f7e 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9cb8c..c189b82f5ec 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!IS_ERR(devfreq)) {
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
err = -EINVAL;
- goto out;
+ goto err_out;
}
}
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
dev_err(dev, "%s: Unable to create devfreq for the device\n",
__func__);
err = -ENOMEM;
- goto out;
+ goto err_out;
}
mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->next_polling);
}
mutex_unlock(&devfreq_list_lock);
- goto out;
+out:
+ return devfreq;
+
err_init:
device_unregister(&devfreq->dev);
err_dev:
mutex_unlock(&devfreq->lock);
kfree(devfreq);
-out:
- if (err)
- return ERR_PTR(err);
- else
- return devfreq;
+err_out:
+ return ERR_PTR(err);
}
/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 00000000000..6460577d670
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ * This version supports EXYNOS4210 only. This changes bus frequencies
+ * and vddint voltages. Exynos4412/4212 should be able to be supported
+ * with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT 1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+ TYPE_BUSF_EXYNOS4210,
+ TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO 40
+
+enum ppmu_counter {
+ PPMU_PMNCNT0 = 0,
+ PPMU_PMCCNT1,
+ PPMU_PMNCNT2,
+ PPMU_PMNCNT3,
+ PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+ void __iomem *hw_base;
+ unsigned int ccnt;
+ unsigned int event;
+ unsigned int count[PPMU_PMNCNT_MAX];
+ bool ccnt_overflow;
+ bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ LV_2,
+ LV_3,
+ LV_4,
+ _LV_END
+};
+#define EX4210_LV_MAX LV_2
+#define EX4x12_LV_MAX LV_4
+#define EX4210_LV_NUM (LV_2 + 1)
+#define EX4x12_LV_NUM (LV_4 + 1)
+
+struct busfreq_data {
+ enum exynos4_busf_type type;
+ struct device *dev;
+ struct devfreq *devfreq;
+ bool disabled;
+ struct regulator *vdd_int;
+ struct regulator *vdd_mif; /* Exynos4412/4212 only */
+ struct opp *curr_opp;
+ struct exynos4_ppmu dmc[2];
+
+ struct notifier_block pm_notifier;
+ struct mutex lock;
+
+ /* Dividers calculated at boot/probe-time */
+ unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+ unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+ {LV_0, 400000, 1150000},
+ {LV_1, 267000, 1050000},
+ {LV_2, 133000, 1025000},
+ {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+ {LV_0, 400000, 1100000},
+ {LV_1, 267000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 950000},
+ {LV_4, 100000, 950000},
+ {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+ {LV_0, 200000, 1000000},
+ {LV_1, 160000, 950000},
+ {LV_2, 133000, 925000},
+ {LV_3, 100000, 900000},
+ {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+ {1150000, 1050000, 1050000},
+ {1125000, 1025000, 1025000},
+ {1100000, 1000000, 1000000},
+ {1075000, 975000, 975000},
+ {1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+ /* 400 267 160 133 100 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+ {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+ /* 200 160 133 100 */
+ {1000000, 950000, 925000, 900000}, /* ASV0 */
+ {975000, 925000, 925000, 900000}, /* ASV1 */
+ {950000, 925000, 900000, 875000}, /* ASV2 */
+ {950000, 900000, 900000, 875000}, /* ASV3 */
+ {925000, 875000, 875000, 875000}, /* ASV4 */
+ {900000, 850000, 850000, 850000}, /* ASV5 */
+ {900000, 850000, 850000, 850000}, /* ASV6 */
+ {900000, 850000, 850000, 850000}, /* ASV7 */
+ {900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+ /* DMC L1: 266.7MHz */
+ { 4, 1, 1, 2, 1, 1, 3, 1 },
+ /* DMC L2: 133MHz */
+ { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+ /* ACLK200 L1: 160MHz */
+ { 4, 7, 5, 6, 1 },
+ /* ACLK200 L2: 133MHz */
+ { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+ /* ACLK_GDL/R L3: 133MHz */
+ { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP}
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1, 1, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 1, 1, 2, 1, 1},
+ /* DMC L2: 160MHz */
+ {5, 1, 1, 4, 1, 1},
+ /* DMC L3: 133MHz */
+ {5, 1, 1, 5, 1, 1},
+ /* DMC L4: 100MHz */
+ {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+ /*
+ * Clock divider value for following
+ * { G2DACP, DIVC2C, DIVC2C_ACLK }
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 2, 1},
+ /* DMC L2: 160MHz */
+ {5, 4, 1},
+ /* DMC L3: 133MHz */
+ {5, 5, 1},
+ /* DMC L4: 100MHz */
+ {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+ DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+ /*
+ * Clock divider value for following
+ * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+ */
+
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 160MHz */
+ {4, 4, 5},
+ /* SCLK_MFC: 133MHz */
+ {5, 5, 5},
+ /* SCLK_MFC: 100MHz */
+ {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4210_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ break;
+
+ if (index == EX4210_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+ tmp = data->top_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ break;
+
+ if (index == EX4x12_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - DMC1 */
+ tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+ tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+ S5P_CLKDIV_DMC1_C2C_MASK |
+ S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+ S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][1] <<
+ S5P_CLKDIV_DMC1_C2C_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][2] <<
+ S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+ } while (tmp & 0x111111);
+
+ /* Change Divider - TOP */
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+ S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ (exynos4x12_clkdiv_top[index][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4x12_clkdiv_top[index][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4x12_clkdiv_top[index][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4x12_clkdiv_top[index][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - MFC */
+ tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+ tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+ S5P_CLKDIV_MFC_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_MFC);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+ } while (tmp & 0x1);
+
+ /* Change Divider - JPEG */
+ tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+ tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+ S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1);
+
+ /* Change Divider - FIMC0~3 */
+ tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+ tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+ S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1111);
+
+ return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+ /* Reset PPMU */
+ __raw_writel(0x8000000f, ppmu_base + 0xf010);
+ __raw_writel(0x8000000f, ppmu_base + 0xf050);
+ __raw_writel(0x6, ppmu_base + 0xf000);
+ __raw_writel(0x0, ppmu_base + 0xf100);
+
+ /* Set PPMU Event */
+ data->dmc[i].event = 0x6;
+ __raw_writel(((data->dmc[i].event << 12) | 0x1),
+ ppmu_base + 0xfc);
+
+ /* Start PPMU */
+ __raw_writel(0x1, ppmu_base + 0xf000);
+ }
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+ u32 overflow;
+
+ /* Stop PPMU */
+ __raw_writel(0x0, ppmu_base + 0xf000);
+
+ /* Update local data from PPMU */
+ overflow = __raw_readl(ppmu_base + 0xf050);
+
+ data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+ data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+ for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+ data->dmc[i].count[j] = __raw_readl(
+ ppmu_base + (0xf110 + (0x10 * j)));
+ data->dmc[i].count_overflow[j] = overflow & (1 << j);
+ }
+ }
+
+ busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+ int i = 0;
+
+ while (exynos4x12_intclk_table[i].clk) {
+ if (exynos4x12_intclk_table[i].clk <= mifclk)
+ return i;
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+ struct opp *oldopp)
+{
+ int err = 0, tmp;
+ unsigned long volt = opp_get_voltage(opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ /* OPP represents DMC clock + INT voltage */
+ err = regulator_set_voltage(data->vdd_int, volt,
+ MAX_SAFEVOLT);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ /* OPP represents MIF clock + MIF voltage */
+ err = regulator_set_voltage(data->vdd_mif, volt,
+ MAX_SAFEVOLT);
+ if (err)
+ break;
+
+ tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ if (tmp < 0) {
+ err = tmp;
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ }
+ err = regulator_set_voltage(data->vdd_int,
+ exynos4x12_intclk_table[tmp].volt,
+ MAX_SAFEVOLT);
+ /* Try to recover */
+ if (err)
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+ int err = 0;
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+ unsigned long freq = opp_get_freq(opp);
+
+ if (old_freq == freq)
+ return 0;
+
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+ mutex_lock(&data->lock);
+
+ if (data->disabled)
+ goto out;
+
+ if (old_freq < freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ if (old_freq != freq) {
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto out;
+
+ if (old_freq > freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ data->curr_opp = opp;
+out:
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+ u64 p0 = data->dmc[0].count[0];
+ u64 p1 = data->dmc[1].count[0];
+
+ p0 *= data->dmc[1].ccnt;
+ p1 *= data->dmc[0].ccnt;
+
+ if (data->dmc[1].ccnt == 0)
+ return 0;
+
+ if (p0 > p1)
+ return 0;
+ return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ int busier_dmc;
+ int cycles_x2 = 2; /* 2 x cycles */
+ void __iomem *addr;
+ u32 timing;
+ u32 memctrl;
+
+ exynos4_read_ppmu(data);
+ busier_dmc = exynos4_get_busier_dmc(data);
+ stat->current_frequency = opp_get_freq(data->curr_opp);
+
+ if (busier_dmc)
+ addr = S5P_VA_DMC1;
+ else
+ addr = S5P_VA_DMC0;
+
+ memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+ timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+ switch ((memctrl >> 8) & 0xf) {
+ case 0x4: /* DDR2 */
+ cycles_x2 = ((timing >> 16) & 0xf) * 2;
+ break;
+ case 0x5: /* LPDDR2 */
+ case 0x6: /* DDR3 */
+ cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+ break;
+ default:
+ pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+ (memctrl >> 8) & 0xf);
+ return -EINVAL;
+ }
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+ stat->total_time = data->dmc[busier_dmc].ccnt;
+
+ /* If the counters have overflown, retry */
+ if (data->dmc[busier_dmc].ccnt_overflow ||
+ data->dmc[busier_dmc].count_overflow[0])
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+ .initial_freq = 400000,
+ .polling_ms = 50,
+ .target = exynos4_bus_target,
+ .get_dev_status = exynos4_bus_get_dev_status,
+ .exit = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+ u32 tmp;
+ int mgrp;
+ int i, err = 0;
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK |
+ S5P_CLKDIV_DMC0_COPY2_MASK |
+ S5P_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][6] <<
+ S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][7] <<
+ S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4210_clkdiv_top[i][0] <<
+ S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ (exynos4210_clkdiv_top[i][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4210_clkdiv_top[i][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4210_clkdiv_top[i][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4210_clkdiv_top[i][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ data->top_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+ /* Use merged grouping for voltage */
+ switch (tmp) {
+ case 0:
+ mgrp = 0;
+ break;
+ case 1:
+ case 2:
+ mgrp = 1;
+ break;
+ case 3:
+ case 4:
+ mgrp = 2;
+ break;
+ case 5:
+ case 6:
+ mgrp = 3;
+ break;
+ case 7:
+ mgrp = 4;
+ break;
+ default:
+ pr_warn("Unknown ASV Group. Use max voltage.\n");
+ mgrp = 0;
+ }
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++)
+ exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ exynos4210_busclk_table[i].volt);
+ if (err) {
+ dev_err(data->dev, "Cannot add opp entries.\n");
+ return err;
+ }
+ }
+
+
+ return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+ unsigned int i;
+ unsigned int tmp;
+ int ret;
+
+ /* Enable pause function for DREX2 DVFS */
+ tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+ tmp |= DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ if (tmp > 8)
+ tmp = 0;
+ pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4x12_mif_step_50[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4x12_int_volt[tmp][i];
+ }
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ exynos4x12_mifclk_table[i].volt);
+ if (ret) {
+ dev_err(data->dev, "Fail to add opp entries.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ pm_notifier);
+ struct opp *opp;
+ unsigned long maxfreq = ULONG_MAX;
+ int err = 0;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /* Set Fastest and Deactivate DVFS */
+ mutex_lock(&data->lock);
+
+ data->disabled = true;
+
+ opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto unlock;
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto unlock;
+
+ data->curr_opp = opp;
+unlock:
+ mutex_unlock(&data->lock);
+ if (err)
+ return err;
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* Reactivate */
+ mutex_lock(&data->lock);
+ data->disabled = false;
+ mutex_unlock(&data->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+ struct busfreq_data *data;
+ struct opp *opp;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ data->type = pdev->id_entry->driver_data;
+ data->dmc[0].hw_base = S5P_VA_DMC0;
+ data->dmc[1].hw_base = S5P_VA_DMC1;
+ data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+ data->dev = dev;
+ mutex_init(&data->lock);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_init_tables(data);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_init_tables(data);
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ }
+ if (err)
+ goto err_regulator;
+
+ data->vdd_int = regulator_get(dev, "vdd_int");
+ if (IS_ERR(data->vdd_int)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+ err = PTR_ERR(data->vdd_int);
+ goto err_regulator;
+ }
+ if (data->type == TYPE_BUSF_EXYNOS4x12) {
+ data->vdd_mif = regulator_get(dev, "vdd_mif");
+ if (IS_ERR(data->vdd_mif)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+ err = PTR_ERR(data->vdd_mif);
+ regulator_put(data->vdd_int);
+ goto err_regulator;
+
+ }
+ }
+
+ opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+ exynos4_devfreq_profile.initial_freq);
+ err = PTR_ERR(opp);
+ goto err_opp_add;
+ }
+ data->curr_opp = opp;
+
+ platform_set_drvdata(pdev, data);
+
+ busfreq_mon_reset(data);
+
+ data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+ &devfreq_simple_ondemand, NULL);
+ if (IS_ERR(data->devfreq)) {
+ err = PTR_ERR(data->devfreq);
+ goto err_opp_add;
+ }
+
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ err = register_pm_notifier(&data->pm_notifier);
+ if (err) {
+ dev_err(dev, "Failed to setup pm notifier\n");
+ goto err_devfreq_add;
+ }
+
+ return 0;
+err_devfreq_add:
+ devfreq_remove_device(data->devfreq);
+err_opp_add:
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ regulator_put(data->vdd_int);
+err_regulator:
+ kfree(data);
+ return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&data->pm_notifier);
+ devfreq_remove_device(data->devfreq);
+ regulator_put(data->vdd_int);
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ kfree(data);
+
+ return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ busfreq_mon_reset(data);
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+ .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+ { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+ { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+ .probe = exynos4_busfreq_probe,
+ .remove = __devexit_p(exynos4_busfreq_remove),
+ .id_table = exynos4_busfreq_id,
+ .driver = {
+ .name = "exynos4-busfreq",
+ .owner = THIS_MODULE,
+ .pm = &exynos4_busfreq_pm,
+ },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+ return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+ platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ab8f469f5cf..5a99bb3f255 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MX3
+ depends on SOC_IMX31 || SOC_IMX35
select DMA_ENGINE
default y
help
@@ -216,7 +216,7 @@ config PCH_DMA
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b7cbd1ab1db..0698695e8bf 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2054,6 +2054,8 @@ static struct amba_id pl08x_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl08x_ids);
+
static struct amba_driver pl08x_amba_driver = {
.drv.name = DRIVER_NAME,
.id_table = pl08x_ids,
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5..2b8661b54ea 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
+static void dmatest_callback(void *arg)
{
- complete(completion);
+ struct dmatest_done *done = arg;
+
+ done->done = true;
+ wake_up_all(done->wait);
}
/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
*/
static int dmatest_func(void *data)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
+ struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
- set_freezable_with_signal();
+ set_freezable();
ret = -ENOMEM;
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
- struct completion cmp;
- unsigned long start, tmo, end = 0 /* compiler... */;
- bool reload = true;
u8 align = 0;
total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
continue;
}
- init_completion(&cmp);
+ done.done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &cmp;
+ tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- do {
- start = jiffies;
- if (reload)
- end = start + msecs_to_jiffies(timeout);
- else if (end <= start)
- end = start + 1;
- tmo = wait_for_completion_interruptible_timeout(&cmp,
- end - start);
- reload = try_to_freeze();
- } while (tmo == -ERESTARTSYS);
+ wait_event_freezable_timeout(done_wait, done.done,
+ msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (tmo == 0) {
+ if (!done.done) {
+ /*
+ * We're leaving the timed out dma operation with
+ * dangling pointer to done_wait. To make this
+ * correct, we'll need to allocate wait_done for
+ * each test iteration and perform "who's gonna
+ * free it this time?" dancing. For now, just
+ * leave it dangling.
+ */
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 9a353c2216d..e779b434af4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1250,7 +1250,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
static void
mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->xor_base;
u32 win_enable = 0;
@@ -1264,7 +1264,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -1290,7 +1290,7 @@ static struct platform_driver mv_xor_driver = {
static int mv_xor_shared_probe(struct platform_device *pdev)
{
- struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct mv_xor_shared_private *msp;
struct resource *res;
@@ -1323,8 +1323,9 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (msd != NULL && msd->dram != NULL)
- mv_xor_conf_mbus_windows(msp, msd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(msp, dram);
return 0;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b4588bdd98b..fc903c0ed23 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -334,7 +334,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
goto err_irq;
}
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_clk;
@@ -372,7 +372,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
- clk_disable(mxs_dma->clk);
+ clk_disable_unprepare(mxs_dma->clk);
}
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -578,7 +578,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_out;
@@ -604,7 +604,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
- clk_disable(mxs_dma->clk);
+ clk_disable_unprepare(mxs_dma->clk);
return 0;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 571041477ab..09adcfcd953 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -19,6 +19,7 @@
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/of.h>
#define NR_DEFAULT_DESC 16
@@ -116,6 +117,9 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+/* forward declaration */
+static struct amba_driver pl330_driver;
+
static inline struct dma_pl330_chan *
to_pchan(struct dma_chan *ch)
{
@@ -267,6 +271,32 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+bool pl330_filter(struct dma_chan *chan, void *param)
+{
+ u8 *peri_id;
+
+ if (chan->device->dev->driver != &pl330_driver.drv)
+ return false;
+
+#ifdef CONFIG_OF
+ if (chan->device->dev->of_node) {
+ const __be32 *prop_value;
+ phandle phandle;
+ struct device_node *node;
+
+ prop_value = ((struct property *)param)->value;
+ phandle = be32_to_cpup(prop_value++);
+ node = of_find_node_by_phandle(phandle);
+ return ((chan->private == node) &&
+ (chan->chan_id == be32_to_cpup(prop_value)));
+ }
+#endif
+
+ peri_id = chan->private;
+ return *peri_id == (unsigned)param;
+}
+EXPORT_SYMBOL(pl330_filter);
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -497,7 +527,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac)
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
struct dma_pl330_dmac *pdmac = pch->dmac;
- struct dma_pl330_peri *peri = pch->chan.private;
+ u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
@@ -522,13 +552,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- if (peri) {
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = pch->chan.chan_id;
- } else {
- desc->req.rqtype = MEMTOMEM;
- desc->req.peri = 0;
- }
+ desc->req.peri = peri_id ? pch->chan.chan_id : 0;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -615,12 +639,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
case DMA_TO_DEVICE:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_FROM_DEVICE:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
@@ -646,16 +672,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len))
return NULL;
- if (peri && peri->rqtype != MEMTOMEM)
- return NULL;
-
pi = &pch->dmac->pif;
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
@@ -664,6 +686,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = MEMTOMEM;
/* Select max possible burst size */
burst = pi->pcfg.data_bus_width / 8;
@@ -692,25 +715,14 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
{
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
int i;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len || !peri))
+ if (unlikely(!pch || !sgl || !sg_len))
return NULL;
- /* Make sure the direction is consistent */
- if ((direction == DMA_TO_DEVICE &&
- peri->rqtype != MEMTODEV) ||
- (direction == DMA_FROM_DEVICE &&
- peri->rqtype != DEVTOMEM)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
- __func__, __LINE__);
- return NULL;
- }
-
addr = pch->fifo_addr;
first = NULL;
@@ -750,11 +762,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (direction == DMA_TO_DEVICE) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
@@ -856,32 +870,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
+ (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
- if (pdat) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
-
- switch (peri->rqtype) {
- case MEMTOMEM:
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- dma_cap_set(DMA_CYCLIC, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
- }
- pch->chan.private = peri;
- } else {
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- pch->chan.private = NULL;
- }
+ if (!adev->dev.of_node)
+ pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
+ else
+ pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
@@ -894,6 +892,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
pd->dev = &adev->dev;
+ if (pdat) {
+ pd->cap_mask = pdat->cap_mask;
+ } else {
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ if (pi->pcfg.num_peri) {
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ }
+ }
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
@@ -990,6 +997,8 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index fe90cd4a7eb..e48ab3108ad 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -32,7 +32,6 @@
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <linux/edac.h>
@@ -243,8 +242,8 @@ struct edac_device_ctl_info {
*/
struct edac_dev_sysfs_attribute *sysfs_attributes;
- /* pointer to main 'edac' class in sysfs */
- struct sysdev_class *edac_class;
+ /* pointer to main 'edac' subsys in sysfs */
+ struct bus_type *edac_subsys;
/* the internal state of this controller instance */
int op_state;
@@ -342,7 +341,7 @@ struct edac_pci_ctl_info {
int pci_idx;
- struct sysdev_class *edac_class; /* pointer to class */
+ struct bus_type *edac_subsys; /* pointer to subsystem */
/* the internal state of this controller instance */
int op_state;
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index c3f67437afb..4b154593343 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -23,7 +23,6 @@
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 86649df0028..b4ea185cceb 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,5 +1,5 @@
/*
- * file for managing the edac_device class of devices for EDAC
+ * file for managing the edac_device subsystem of devices for EDAC
*
* (C) 2007 SoftwareBitMaker
*
@@ -230,21 +230,21 @@ static struct kobj_type ktype_device_ctrl = {
*/
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
int err;
debugf1("%s()\n", __func__);
/* get the /sys/devices/system/edac reference */
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class error\n", __func__);
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys error\n", __func__);
err = -ENODEV;
goto err_out;
}
- /* Point to the 'edac_class' this instance 'reports' to */
- edac_dev->edac_class = edac_class;
+ /* Point to the 'edac_subsys' this instance 'reports' to */
+ edac_dev->edac_subsys = edac_subsys;
/* Init the devices's kobject */
memset(&edac_dev->kobj, 0, sizeof(struct kobject));
@@ -261,7 +261,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* register */
err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
- &edac_class->kset.kobj,
+ &edac_subsys->dev_root->kobj,
"%s", edac_dev->name);
if (err) {
debugf1("%s()Failed to register '.../edac/%s'\n",
@@ -284,7 +284,7 @@ err_kobj_reg:
module_put(edac_dev->owner);
err_mod_get:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
err_out:
return err;
@@ -308,7 +308,7 @@ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
* b) 'kfree' the memory
*/
kobject_put(&dev->kobj);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
/* edac_dev -> instance information */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d69144a0904..ca6c04d350e 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -25,7 +25,6 @@
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/edac.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 29ffa350bfb..d56e63477d5 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1021,19 +1021,19 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
int edac_sysfs_setup_mc_kset(void)
{
int err = -EINVAL;
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
debugf1("%s()\n", __func__);
- /* get the /sys/devices/system/edac class reference */
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class error=%d\n", __func__, err);
+ /* get the /sys/devices/system/edac subsys reference */
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys error=%d\n", __func__, err);
goto fail_out;
}
/* Init the MC's kobject */
- mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
+ mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
if (!mc_kset) {
err = -ENOMEM;
debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
@@ -1045,7 +1045,7 @@ int edac_sysfs_setup_mc_kset(void)
return 0;
fail_kset:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
fail_out:
return err;
@@ -1059,6 +1059,6 @@ fail_out:
void edac_sysfs_teardown_mc_kset(void)
{
kset_unregister(mc_kset);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 17aabb7b90e..00f81b47a51 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,8 +10,6 @@
#ifndef __EDAC_MODULE_H__
#define __EDAC_MODULE_H__
-#include <linux/sysdev.h>
-
#include "edac_core.h"
/*
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2b378207d57..63af1c5673d 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 495198ad059..97f5064e399 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -338,12 +338,12 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
* edac_pci_main_kobj_setup()
*
* setup the sysfs for EDAC PCI attributes
- * assumes edac_class has already been initialized
+ * assumes edac_subsys has already been initialized
*/
static int edac_pci_main_kobj_setup(void)
{
int err;
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
debugf0("%s()\n", __func__);
@@ -354,9 +354,9 @@ static int edac_pci_main_kobj_setup(void)
/* First time, so create the main kobject and its
* controls and attributes
*/
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class\n", __func__);
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys\n", __func__);
err = -ENODEV;
goto decrement_count_fail;
}
@@ -381,7 +381,7 @@ static int edac_pci_main_kobj_setup(void)
/* Instanstiate the pci object */
err = kobject_init_and_add(edac_pci_top_main_kobj,
&ktype_edac_pci_main_kobj,
- &edac_class->kset.kobj, "pci");
+ &edac_subsys->dev_root->kobj, "pci");
if (err) {
debugf1("Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
@@ -404,7 +404,7 @@ kzalloc_fail:
module_put(THIS_MODULE);
mod_get_fail:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
decrement_count_fail:
/* if are on this error exit, nothing to tear down */
@@ -432,7 +432,7 @@ static void edac_pci_main_kobj_teardown(void)
__func__);
kobject_put(edac_pci_top_main_kobj);
}
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
/*
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 86ad2eee120..670c4481453 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -26,7 +26,7 @@ EXPORT_SYMBOL_GPL(edac_handlers);
int edac_err_assert = 0;
EXPORT_SYMBOL_GPL(edac_err_assert);
-static atomic_t edac_class_valid = ATOMIC_INIT(0);
+static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
/*
* called to determine if there is an EDAC driver interested in
@@ -54,36 +54,37 @@ EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
* sysfs object: /sys/devices/system/edac
* need to export to other files
*/
-struct sysdev_class edac_class = {
+struct bus_type edac_subsys = {
.name = "edac",
+ .dev_name = "edac",
};
-EXPORT_SYMBOL_GPL(edac_class);
+EXPORT_SYMBOL_GPL(edac_subsys);
/* return pointer to the 'edac' node in sysfs */
-struct sysdev_class *edac_get_sysfs_class(void)
+struct bus_type *edac_get_sysfs_subsys(void)
{
int err = 0;
- if (atomic_read(&edac_class_valid))
+ if (atomic_read(&edac_subsys_valid))
goto out;
/* create the /sys/devices/system/edac directory */
- err = sysdev_class_register(&edac_class);
+ err = subsys_system_register(&edac_subsys, NULL);
if (err) {
printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
return NULL;
}
out:
- atomic_inc(&edac_class_valid);
- return &edac_class;
+ atomic_inc(&edac_subsys_valid);
+ return &edac_subsys;
}
-EXPORT_SYMBOL_GPL(edac_get_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
-void edac_put_sysfs_class(void)
+void edac_put_sysfs_subsys(void)
{
/* last user unregisters it */
- if (atomic_dec_and_test(&edac_class_valid))
- sysdev_class_unregister(&edac_class);
+ if (atomic_dec_and_test(&edac_subsys_valid))
+ bus_unregister(&edac_subsys);
}
-EXPORT_SYMBOL_GPL(edac_put_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 70ad8923f1d..8568d9b6187 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2234,7 +2234,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_unregister_decode_chain(&i7_mce_dec);
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2336,7 +2336,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a5da732fe5b..4184e0171f0 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -277,11 +277,9 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
- int row, multi_chan, chan;
+ int row, chan;
unsigned long offst, page;
- multi_chan = mci->csrows[0].nr_channels - 1;
-
if (!(info->errsts2 & 0x0003))
return 0;
@@ -294,20 +292,30 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
}
page = (unsigned long) info->eap;
- if (info->xeap & 1)
- page |= 0x100000000ul;
- chan = page & 1;
page >>= 1;
- offst = page & ((1 << PAGE_SHIFT) - 1);
- page >>= PAGE_SHIFT;
+ if (info->xeap & 1)
+ page |= 0x80000000;
+ page >>= (PAGE_SHIFT - 1);
row = edac_mc_find_csrow_by_page(mci, page);
+ if (row == -1) {
+ i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
+ "\tXEAP=%u\n"
+ "\t EAP=0x%08x\n"
+ "\tPAGE=0x%08x\n",
+ (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
+ return 0;
+ }
+ chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
+ offst = info->eap
+ & ((1 << PAGE_SHIFT) -
+ (1 << mci->csrows[row].grain));
+
if (info->errsts & 0x0002)
edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- multi_chan ? chan : 0,
- "i82975x CE");
+ chan, "i82975x CE");
return 1;
}
@@ -410,7 +418,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index d0864d9c38a..bd926ea2e00 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -884,7 +884,7 @@ static int __init mce_amd_init(void)
pr_info("MCE: In-kernel MCE decoding enabled.\n");
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
}
@@ -893,7 +893,7 @@ early_initcall(mce_amd_init);
#ifdef MODULE
static void __exit mce_amd_exit(void)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_unregister_decode_chain(&amd_mce_dec_nb);
kfree(fam_ops);
}
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 73c3e26a0bc..885e8ad8fdc 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -11,7 +11,6 @@
*/
#include <linux/kobject.h>
-#include <linux/sysdev.h>
#include <linux/edac.h>
#include <linux/module.h>
#include <asm/mce.h>
@@ -116,14 +115,14 @@ static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc,
static int __init edac_init_mce_inject(void)
{
- struct sysdev_class *edac_class = NULL;
+ struct bus_type *edac_subsys = NULL;
int i, err = 0;
- edac_class = edac_get_sysfs_class();
- if (!edac_class)
+ edac_subsys = edac_get_sysfs_subsys();
+ if (!edac_subsys)
return -EINVAL;
- mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj);
+ mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj);
if (!mce_kobj) {
printk(KERN_ERR "Error creating a mce kset.\n");
err = -ENOMEM;
@@ -147,7 +146,7 @@ err_sysfs_create:
kobject_del(mce_kobj);
err_mce_kobj:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
return err;
}
@@ -161,7 +160,7 @@ static void __exit edac_exit_mce_inject(void)
kobject_del(mce_kobj);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
module_init(edac_init_mce_inject);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9c..73464a62adf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,p1020-memory-controller", },
{ .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
- { .compatible = "fsl,p4080-memory-controller", },
+ { .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 38400963e24..fc757069c6a 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -142,7 +142,7 @@
/*
* The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly acccessed and have a base and length defined by the
+ * indirectly accessed and have a base and length defined by the
* device tree. The base can be anything; however, we expect the
* length to be precisely two registers, the first for the address
* window and the second for the data window.
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 7a402bfbee7..1dc118d83cc 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mce->cpuvendor, mce->cpuid, mce->time,
mce->socketid, mce->apicid);
-#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
return NOTIFY_DONE;
-#endif
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
@@ -1661,8 +1659,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1731,8 +1728,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- atomic_notifier_chain_register(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87f..d25599f2a3f 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -490,7 +495,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return 0;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
@@ -560,7 +566,7 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
- efi_pstore_write(type, &id, (unsigned int)id, 0, psi);
+ efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
return 0;
}
@@ -576,12 +582,14 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c4e7c59d1c6..91ddf0f7a1b 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -345,7 +345,8 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
/* The size reported is the min of all of our buffers */
- *data_size = min(*data_size, gsmi_dev.data_buf->length);
+ *data_size = min_t(unsigned long, *data_size,
+ gsmi_dev.data_buf->length);
*data_size = min_t(unsigned long, *data_size, param.data_len);
/* Copy data back to return buffer. */
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb10790..3ee852c9925 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -433,11 +433,11 @@ static int __init ibft_check_device(void)
* Helper routiners to check to determine if the entry is valid
* in the proper iBFT structure.
*/
-static mode_t ibft_check_nic_for(void *data, int type)
+static umode_t ibft_check_nic_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_ETH_INDEX:
@@ -488,11 +488,11 @@ static mode_t ibft_check_nic_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_tgt_for(void *data, int type)
+static umode_t __init ibft_check_tgt_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_TGT_INDEX:
@@ -524,11 +524,11 @@ static mode_t __init ibft_check_tgt_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_initiator_for(void *data, int type)
+static umode_t __init ibft_check_initiator_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_initiator *init = entry->initiator;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_INI_INDEX:
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd..4da4eb9ae92 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951..1eedb6f7fda 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
#include <linux/module.h>
#include <linux/sigma.h>
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+ size_t payload = 0;
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ payload = sigma_action_len(sa);
+ break;
+ default:
+ break;
+ }
+
+ payload = ALIGN(payload, 2);
+
+ return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
size_t len = sigma_action_len(sa);
- int ret = 0;
+ int ret;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
ret = i2c_master_send(client, (void *)&sa->addr, len);
if (ret < 0)
return -EINVAL;
break;
-
case SIGMA_ACTION_DELAY:
- ret = 0;
udelay(len);
len = 0;
break;
-
case SIGMA_ACTION_END:
- return 1;
-
+ return 0;
default:
return -EINVAL;
}
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
+ return 1;
}
static int
process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
{
- pr_debug("%s: processing %p\n", __func__, ssfw);
+ struct sigma_action *sa;
+ size_t size;
+ int ret;
+
+ while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+ sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+ size = sigma_action_size(sa);
+ ssfw->pos += size;
+ if (ssfw->pos > ssfw->fw->size || size == 0)
+ break;
+
+ ret = process_sigma_action(client, sa);
- while (1) {
- int ret = process_sigma_action(client, ssfw);
pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
+
+ if (ret <= 0)
return ret;
}
+
+ if (ssfw->pos != ssfw->fw->size)
+ return -EINVAL;
+
+ return 0;
}
int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
/* then verify the header */
ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
+
+ /*
+ * Reject too small or unreasonable large files. The upper limit has been
+ * chosen a bit arbitrarily, but it should be enough for all practical
+ * purposes and having the limit makes it easier to avoid integer
+ * overflows later in the loading process.
+ */
+ if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
goto done;
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
goto done;
- crc = crc32(0, fw->data, fw->size);
+ crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
+ if (crc != le32_to_cpu(ssfw_head->crc))
goto done;
ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8482a23887d..573532f7553 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,7 +70,7 @@ config GPIO_GENERIC
config GPIO_DA9052
tristate "Dialog DA9052 GPIO"
- depends on PMIC_DA9052
+ depends on PMIC_DA9052 && BROKEN
help
Say yes here to enable the GPIO driver for the DA9052 chip.
@@ -141,6 +141,12 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device
+config GPIO_PXA
+ bool "PXA GPIO support"
+ depends on ARCH_PXA || ARCH_MMP
+ help
+ Say yes here to support the PXA GPIO device
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -170,15 +176,6 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
-config GPIO_U300
- bool "ST-Ericsson U300 COH 901 335/571 GPIO"
- depends on GPIOLIB && ARCH_U300
- help
- Say yes here to support GPIO interface on ST-Ericsson U300.
- The names of the two IP block variants supported are
- COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
- ports of 8 GPIO pins each.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on PCI
@@ -356,7 +353,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
+ depends on PCI && X86 && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -387,7 +384,7 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
+ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
depends on PCI && X86
select GENERIC_IRQ_CHIP
help
@@ -395,11 +392,12 @@ config GPIO_PCH
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7223 and ML7831.
ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ ML7831 IOH is for general purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8d..62e641e79e8 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
@@ -40,7 +40,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
-obj-$(CONFIG_PLAT_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
@@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
-obj-$(CONFIG_MACH_U300) += gpio-u300.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 9f278153700..2f263cc3256 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -193,17 +193,7 @@ static struct platform_driver adp5520_gpio_driver = {
.remove = __devexit_p(adp5520_gpio_remove),
};
-static int __init adp5520_gpio_init(void)
-{
- return platform_driver_register(&adp5520_gpio_driver);
-}
-module_init(adp5520_gpio_init);
-
-static void __exit adp5520_gpio_exit(void)
-{
- platform_driver_unregister(&adp5520_gpio_driver);
-}
-module_exit(adp5520_gpio_exit);
+module_platform_driver(adp5520_gpio_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("GPIO ADP5520 Driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3525ad91877..9ad1703d140 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -418,9 +418,8 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
if (ret)
goto err_irq;
- dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
- gc->base, gc->base + gc->ngpio - 1,
- pdata->irq_base, client->name, revid);
+ dev_info(&client->dev, "IRQ Base: %d Rev.: %d\n",
+ pdata->irq_base, revid);
if (pdata->setup) {
ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index ec57936aef6..5ca4098ba09 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -223,9 +223,6 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
goto err_release_mem;
}
- printk(KERN_INFO "bt8xxgpio: Abusing BT8xx card for GPIOs %d to %d\n",
- bg->gpio.base, bg->gpio.base + BT8XXGPIO_NR_GPIOS - 1);
-
return 0;
err_release_mem:
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba56ad..19eda1bbe34 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -347,7 +347,6 @@ static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
if (err)
goto release_region;
- dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
return 0;
release_region:
@@ -382,18 +381,7 @@ static struct platform_driver cs5535_gpio_driver = {
.remove = __devexit_p(cs5535_gpio_remove),
};
-static int __init cs5535_gpio_init(void)
-{
- return platform_driver_register(&cs5535_gpio_driver);
-}
-
-static void __exit cs5535_gpio_exit(void)
-{
- platform_driver_unregister(&cs5535_gpio_driver);
-}
-
-module_init(cs5535_gpio_init);
-module_exit(cs5535_gpio_exit);
+module_platform_driver(cs5535_gpio_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13..56dd047d584 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
@@ -259,17 +254,7 @@ static struct platform_driver da9052_gpio_driver = {
},
};
-static int __init da9052_gpio_init(void)
-{
- return platform_driver_register(&da9052_gpio_driver);
-}
-module_init(da9052_gpio_init);
-
-static void __exit da9052_gpio_exit(void)
-{
- return platform_driver_unregister(&da9052_gpio_driver);
-}
-module_exit(da9052_gpio_exit);
+module_platform_driver(da9052_gpio_driver);
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 4e24436b0f8..e38dd0c3197 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -524,17 +524,7 @@ static struct platform_driver bgpio_driver = {
.remove = __devexit_p(bgpio_pdev_remove),
};
-static int __init bgpio_platform_init(void)
-{
- return platform_driver_register(&bgpio_driver);
-}
-module_init(bgpio_platform_init);
-
-static void __exit bgpio_platform_exit(void)
-{
- platform_driver_unregister(&bgpio_driver);
-}
-module_exit(bgpio_platform_exit);
+module_platform_driver(bgpio_driver);
#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac077e5d..f2f000dd70b 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -201,8 +201,6 @@ static int __devinit ttl_probe(struct platform_device *pdev)
goto out_iounmap_regs;
}
- dev_info(&pdev->dev, "module %d: registered GPIO device\n",
- pdata->modno);
return 0;
out_iounmap_regs:
@@ -239,20 +237,9 @@ static struct platform_driver ttl_driver = {
.remove = __devexit_p(ttl_remove),
};
-static int __init ttl_init(void)
-{
- return platform_driver_register(&ttl_driver);
-}
-
-static void __exit ttl_exit(void)
-{
- platform_driver_unregister(&ttl_driver);
-}
+module_platform_driver(ttl_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ttl");
-
-module_init(ttl_init);
-module_exit(ttl_exit);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e7386925..461958fc226 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e1..5cd04b65c55 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 1ebedfb6d46..839624f9fe6 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -1150,8 +1150,8 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
nmk_gpio_init_irq(nmk_chip);
- dev_info(&dev->dev, "Bits %i-%i at address %p\n",
- nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ dev_info(&dev->dev, "at address %p\n",
+ nmk_chip->addr);
return 0;
out_free:
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 147df8ae79d..d3f3e8f5456 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
* Translate OpenFirmware node properties into platform_data
* WARNING: This is DEPRECATED and will be removed eventually!
*/
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
*invert = *val;
}
#else
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
*gpio_base = -1;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 3e1f1ecd07b..2d1de9e7e9b 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -290,10 +290,7 @@ static int pcf857x_probe(struct i2c_client *client,
* methods can't be called from sleeping contexts.
*/
- dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
- gpio->chip.base,
- gpio->chip.base + gpio->chip.ngpio - 1,
- client->name,
+ dev_info(&client->dev, "%s\n",
client->irq ? " (irq ignored)" : "");
/* Let platform code set up the GPIOs and their users.
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index a6008e123d0..f0603297f82 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -49,8 +49,8 @@ struct pch_regs {
enum pch_type_t {
INTEL_EG20T_PCH,
- OKISEMI_ML7223m_IOH, /* OKISEMI ML7223 IOH PCIe Bus-m */
- OKISEMI_ML7223n_IOH /* OKISEMI ML7223 IOH PCIe Bus-n */
+ OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
+ OKISEMI_ML7223n_IOH /* LAPIS Semiconductor ML7223 IOH PCIe Bus-n */
};
/* Specifies number of GPIO PINS */
@@ -524,6 +524,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1..8f79c03049f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
@@ -350,6 +346,8 @@ static struct amba_id pl061_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
+
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index ee137712f9d..b2d3ee1d183 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -11,14 +11,46 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <mach/gpio-pxa.h>
+/*
+ * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
+ * one set of registers. The register offsets are organized below:
+ *
+ * GPLR GPDR GPSR GPCR GRER GFER GEDR
+ * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
+ * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
+ * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
+ *
+ * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
+ * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
+ * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
+ *
+ * NOTE:
+ * BANK 3 is only available on PXA27x and later processors.
+ * BANK 4 and 5 are only available on PXA935
+ */
+
+#define GPLR_OFFSET 0x00
+#define GPDR_OFFSET 0x0C
+#define GPSR_OFFSET 0x18
+#define GPCR_OFFSET 0x24
+#define GRER_OFFSET 0x30
+#define GFER_OFFSET 0x3C
+#define GEDR_OFFSET 0x48
+#define GAFR_OFFSET 0x54
+#define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
+
+#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
int pxa_last_gpio;
@@ -39,8 +71,20 @@ struct pxa_gpio_chip {
#endif
};
+enum {
+ PXA25X_GPIO = 0,
+ PXA26X_GPIO,
+ PXA27X_GPIO,
+ PXA3XX_GPIO,
+ PXA93X_GPIO,
+ MMP_GPIO = 0x10,
+ MMP2_GPIO,
+};
+
static DEFINE_SPINLOCK(gpio_lock);
static struct pxa_gpio_chip *pxa_gpio_chips;
+static int gpio_type;
+static void __iomem *gpio_reg_base;
#define for_each_gpio_chip(i, c) \
for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
@@ -55,6 +99,122 @@ static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
return &pxa_gpio_chips[gpio_to_bank(gpio)];
}
+static inline int gpio_is_pxa_type(int type)
+{
+ return (type & MMP_GPIO) == 0;
+}
+
+static inline int gpio_is_mmp_type(int type)
+{
+ return (type & MMP_GPIO) != 0;
+}
+
+/* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
+ * as well as their Alternate Function value being '1' for GPIO in GAFRx.
+ */
+static inline int __gpio_is_inverted(int gpio)
+{
+ if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
+ return 1;
+ return 0;
+}
+
+/*
+ * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
+ * function of a GPIO, and GPDRx cannot be altered once configured. It
+ * is attributed as "occupied" here (I know this terminology isn't
+ * accurate, you are welcome to propose a better one :-)
+ */
+static inline int __gpio_is_occupied(unsigned gpio)
+{
+ struct pxa_gpio_chip *pxachip;
+ void __iomem *base;
+ unsigned long gafr = 0, gpdr = 0;
+ int ret, af = 0, dir = 0;
+
+ pxachip = gpio_to_pxachip(gpio);
+ base = gpio_chip_base(&pxachip->chip);
+ gpdr = readl_relaxed(base + GPDR_OFFSET);
+
+ switch (gpio_type) {
+ case PXA25X_GPIO:
+ case PXA26X_GPIO:
+ case PXA27X_GPIO:
+ gafr = readl_relaxed(base + GAFR_OFFSET);
+ af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
+ dir = gpdr & GPIO_bit(gpio);
+
+ if (__gpio_is_inverted(gpio))
+ ret = (af != 1) || (dir == 0);
+ else
+ ret = (af != 0) || (dir != 0);
+ break;
+ default:
+ ret = gpdr & GPIO_bit(gpio);
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_ARCH_PXA
+static inline int __pxa_gpio_to_irq(int gpio)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return PXA_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __pxa_irq_to_gpio(int irq)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return irq - PXA_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
+static inline int __pxa_irq_to_gpio(int irq) { return -1; }
+#endif
+
+#ifdef CONFIG_ARCH_MMP
+static inline int __mmp_gpio_to_irq(int gpio)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return MMP_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __mmp_irq_to_gpio(int irq)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return irq - MMP_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
+static inline int __mmp_irq_to_gpio(int irq) { return -1; }
+#endif
+
+static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio, ret;
+
+ gpio = chip->base + offset;
+ ret = __pxa_gpio_to_irq(gpio);
+ if (ret >= 0)
+ return ret;
+ return __mmp_gpio_to_irq(gpio);
+}
+
+int pxa_irq_to_gpio(int irq)
+{
+ int ret;
+
+ ret = __pxa_irq_to_gpio(irq);
+ if (ret >= 0)
+ return ret;
+ return __mmp_irq_to_gpio(irq);
+}
+
static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
void __iomem *base = gpio_chip_base(chip);
@@ -63,12 +223,12 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&gpio_lock, flags);
- value = __raw_readl(base + GPDR_OFFSET);
+ value = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
value |= mask;
else
value &= ~mask;
- __raw_writel(value, base + GPDR_OFFSET);
+ writel_relaxed(value, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -81,16 +241,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
uint32_t tmp, mask = 1 << offset;
unsigned long flags;
- __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
spin_lock_irqsave(&gpio_lock, flags);
- tmp = __raw_readl(base + GPDR_OFFSET);
+ tmp = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
tmp &= ~mask;
else
tmp |= mask;
- __raw_writel(tmp, base + GPDR_OFFSET);
+ writel_relaxed(tmp, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -98,16 +258,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
+ return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
}
static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- __raw_writel(1 << offset, gpio_chip_base(chip) +
+ writel_relaxed(1 << offset, gpio_chip_base(chip) +
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
-static int __init pxa_init_gpio_chip(int gpio_end)
+static int __devinit pxa_init_gpio_chip(int gpio_end)
{
int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
struct pxa_gpio_chip *chips;
@@ -122,7 +282,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
struct gpio_chip *c = &chips[i].chip;
sprintf(chips[i].label, "gpio-%d", i);
- chips[i].regbase = GPIO_BANK(i);
+ chips[i].regbase = gpio_reg_base + BANK_OFF(i);
c->base = gpio;
c->label = chips[i].label;
@@ -131,6 +291,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
c->direction_output = pxa_gpio_direction_output;
c->get = pxa_gpio_get;
c->set = pxa_gpio_set;
+ c->to_irq = pxa_gpio_to_irq;
/* number of GPIOs on last bank may be less than 32 */
c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -147,18 +308,18 @@ static inline void update_edge_detect(struct pxa_gpio_chip *c)
{
uint32_t grer, gfer;
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
grer |= c->irq_edge_rise & c->irq_mask;
gfer |= c->irq_edge_fall & c->irq_mask;
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct pxa_gpio_chip *c;
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
unsigned long gpdr, mask = GPIO_bit(gpio);
c = gpio_to_pxachip(gpio);
@@ -176,12 +337,12 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
- gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
+ gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
if (__gpio_is_inverted(gpio))
- __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET);
else
- __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
if (type & IRQ_TYPE_EDGE_RISING)
c->irq_edge_rise |= mask;
@@ -212,9 +373,9 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
for_each_gpio_chip(gpio, c) {
gpio_base = c->chip.base;
- gedr = __raw_readl(c->regbase + GEDR_OFFSET);
+ gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
gedr = gedr & c->irq_mask;
- __raw_writel(gedr, c->regbase + GEDR_OFFSET);
+ writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
n = find_first_bit(&gedr, BITS_PER_LONG);
while (n < BITS_PER_LONG) {
@@ -229,29 +390,29 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
static void pxa_ack_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
- __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
+ writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
}
static void pxa_mask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
uint32_t grer, gfer;
c->irq_mask &= ~GPIO_bit(gpio);
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static void pxa_unmask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
c->irq_mask |= GPIO_bit(gpio);
@@ -266,34 +427,143 @@ static struct irq_chip pxa_muxed_gpio_chip = {
.irq_set_type = pxa_gpio_irq_type,
};
-void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
+static int pxa_gpio_nums(void)
{
- struct pxa_gpio_chip *c;
- int gpio, irq;
+ int count = 0;
+
+#ifdef CONFIG_ARCH_PXA
+ if (cpu_is_pxa25x()) {
+#ifdef CONFIG_CPU_PXA26x
+ count = 89;
+ gpio_type = PXA26X_GPIO;
+#elif defined(CONFIG_PXA25x)
+ count = 84;
+ gpio_type = PXA26X_GPIO;
+#endif /* CONFIG_CPU_PXA26x */
+ } else if (cpu_is_pxa27x()) {
+ count = 120;
+ gpio_type = PXA27X_GPIO;
+ } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
+ count = 191;
+ gpio_type = PXA93X_GPIO;
+ } else if (cpu_is_pxa3xx()) {
+ count = 127;
+ gpio_type = PXA3XX_GPIO;
+ }
+#endif /* CONFIG_ARCH_PXA */
+
+#ifdef CONFIG_ARCH_MMP
+ if (cpu_is_pxa168() || cpu_is_pxa910()) {
+ count = 127;
+ gpio_type = MMP_GPIO;
+ } else if (cpu_is_mmp2()) {
+ count = 191;
+ gpio_type = MMP2_GPIO;
+ }
+#endif /* CONFIG_ARCH_MMP */
+ return count;
+}
- pxa_last_gpio = end;
+static int __devinit pxa_gpio_probe(struct platform_device *pdev)
+{
+ struct pxa_gpio_chip *c;
+ struct resource *res;
+ struct clk *clk;
+ int gpio, irq, ret;
+ int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+
+ pxa_last_gpio = pxa_gpio_nums();
+ if (!pxa_last_gpio)
+ return -EINVAL;
+
+ irq0 = platform_get_irq_byname(pdev, "gpio0");
+ irq1 = platform_get_irq_byname(pdev, "gpio1");
+ irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
+ if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
+ || (irq_mux <= 0))
+ return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ gpio_reg_base = ioremap(res->start, resource_size(res));
+ if (!gpio_reg_base)
+ return -EINVAL;
+
+ if (irq0 > 0)
+ gpio_offset = 2;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
+ PTR_ERR(clk));
+ iounmap(gpio_reg_base);
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ clk_unprepare(clk);
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
/* Initialize GPIO chips */
- pxa_init_gpio_chip(end);
+ pxa_init_gpio_chip(pxa_last_gpio);
/* clear all GPIO edge detects */
for_each_gpio_chip(gpio, c) {
- __raw_writel(0, c->regbase + GFER_OFFSET);
- __raw_writel(0, c->regbase + GRER_OFFSET);
- __raw_writel(~0,c->regbase + GEDR_OFFSET);
+ writel_relaxed(0, c->regbase + GFER_OFFSET);
+ writel_relaxed(0, c->regbase + GRER_OFFSET);
+ writel_relaxed(~0,c->regbase + GEDR_OFFSET);
+ /* unmask GPIO edge detect for AP side */
+ if (gpio_is_mmp_type(gpio_type))
+ writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
}
- for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
+#ifdef CONFIG_ARCH_PXA
+ irq = gpio_to_irq(0);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
+
+ irq = gpio_to_irq(1);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
+#endif
+
+ for (irq = gpio_to_irq(gpio_offset);
+ irq <= gpio_to_irq(pxa_last_gpio); irq++) {
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- /* Install handler for GPIO>=2 edge detect interrupts */
- irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
- pxa_muxed_gpio_chip.irq_set_wake = fn;
+ irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
+ return 0;
}
+static struct platform_driver pxa_gpio_driver = {
+ .probe = pxa_gpio_probe,
+ .driver = {
+ .name = "pxa-gpio",
+ },
+};
+
+static int __init pxa_gpio_init(void)
+{
+ return platform_driver_register(&pxa_gpio_driver);
+}
+postcore_initcall(pxa_gpio_init);
+
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
{
@@ -301,13 +571,13 @@ static int pxa_gpio_suspend(void)
int gpio;
for_each_gpio_chip(gpio, c) {
- c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
- c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
- c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
- c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
+ c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
+ c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
+ c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
+ c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
/* Clear GPIO transition detect bits */
- __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
+ writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
}
return 0;
}
@@ -319,12 +589,12 @@ static void pxa_gpio_resume(void)
for_each_gpio_chip(gpio, c) {
/* restore level with set/clear */
- __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
- __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
+ writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
+ writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
- __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
- __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
- __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
+ writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
}
}
#else
@@ -336,3 +606,10 @@ struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+
+static int __init pxa_gpio_sysinit(void)
+{
+ register_syscore_ops(&pxa_gpio_syscore_ops);
+ return 0;
+}
+postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 2762698e020..e97016af644 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -227,18 +227,7 @@ static struct platform_driver rdc321x_gpio_driver = {
.remove = __devexit_p(rdc321x_gpio_remove),
};
-static int __init rdc321x_gpio_init(void)
-{
- return platform_driver_register(&rdc321x_gpio_driver);
-}
-
-static void __exit rdc321x_gpio_exit(void)
-{
- platform_driver_unregister(&rdc321x_gpio_driver);
-}
-
-module_init(rdc321x_gpio_init);
-module_exit(rdc321x_gpio_exit);
+module_platform_driver(rdc321x_gpio_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x GPIO driver");
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 86625185271..a7661773c05 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -22,8 +22,11 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
#include <asm/irq.h>
@@ -230,7 +233,7 @@ static int samsung_gpio_setcfg_2bit(struct samsung_gpio_chip *chip,
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
*
- * The reverse of samsung_gpio_setcfg_2bit(). Will return a value whicg
+ * The reverse of samsung_gpio_setcfg_2bit(). Will return a value which
* could be directly passed back to samsung_gpio_setcfg_2bit(), from the
* S3C_GPIO_SPECIAL() macro.
*/
@@ -467,33 +470,42 @@ static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
#endif
static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
- {
+ [0] = {
.cfg_eint = 0x0,
- }, {
+ },
+ [1] = {
.cfg_eint = 0x3,
- }, {
+ },
+ [2] = {
.cfg_eint = 0x7,
- }, {
+ },
+ [3] = {
.cfg_eint = 0xF,
- }, {
+ },
+ [4] = {
.cfg_eint = 0x0,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [5] = {
.cfg_eint = 0x2,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [6] = {
.cfg_eint = 0x3,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [7] = {
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [8] = {
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
- }, {
+ },
+ [9] = {
.cfg_eint = 0x3,
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
@@ -2374,6 +2386,63 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
#endif
};
+#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
+static int exynos4_gpio_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
+{
+ const __be32 *gpio = gpio_spec;
+ const u32 n = be32_to_cpup(gpio);
+ unsigned int pin = gc->base + be32_to_cpu(gpio[0]);
+
+ if (WARN_ON(gc->of_gpio_n_cells < 4))
+ return -EINVAL;
+
+ if (n > gc->ngpio)
+ return -EINVAL;
+
+ if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(be32_to_cpu(gpio[1]))))
+ pr_warn("gpio_xlate: failed to set pin function\n");
+ if (s3c_gpio_setpull(pin, be32_to_cpu(gpio[2])))
+ pr_warn("gpio_xlate: failed to set pin pull up/down\n");
+ if (s5p_gpio_set_drvstr(pin, be32_to_cpu(gpio[3])))
+ pr_warn("gpio_xlate: failed to set pin drive strength\n");
+
+ return n;
+}
+
+static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
+ { .compatible = "samsung,exynos4-gpio", },
+ {}
+};
+
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ struct gpio_chip *gc = &chip->chip;
+ u64 address;
+
+ if (!of_have_populated_dt())
+ return;
+
+ address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset;
+ gc->of_node = of_find_matching_node_by_address(NULL,
+ exynos4_gpio_dt_match, address);
+ if (!gc->of_node) {
+ pr_info("gpio: device tree node not found for gpio controller"
+ " with base address %08llx\n", address);
+ return;
+ }
+ gc->of_gpio_n_cells = 4;
+ gc->of_xlate = exynos4_gpio_xlate;
+}
+#elif defined(CONFIG_ARCH_EXYNOS4)
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ return;
+}
+#endif /* defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) */
+
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
@@ -2455,6 +2524,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO1, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_1, nr_chips, S5P_VA_GPIO1);
@@ -2467,6 +2540,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO2, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_2, nr_chips, S5P_VA_GPIO2);
@@ -2479,6 +2556,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO3, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_3, nr_chips, S5P_VA_GPIO3);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 16351584549..8cadf4d683a 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -297,18 +297,7 @@ static struct platform_driver sch_gpio_driver = {
.remove = __devexit_p(sch_gpio_remove),
};
-static int __init sch_gpio_init(void)
-{
- return platform_driver_register(&sch_gpio_driver);
-}
-
-static void __exit sch_gpio_exit(void)
-{
- platform_driver_unregister(&sch_gpio_driver);
-}
-
-module_init(sch_gpio_init);
-module_exit(sch_gpio_exit);
+module_platform_driver(sch_gpio_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("GPIO interface for Intel Poulsbo SCH");
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index c593bd46bfb..031c6adf5b6 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -359,18 +359,7 @@ static struct platform_driver timbgpio_platform_driver = {
/*--------------------------------------------------------------------------*/
-static int __init timbgpio_init(void)
-{
- return platform_driver_register(&timbgpio_platform_driver);
-}
-
-static void __exit timbgpio_exit(void)
-{
- platform_driver_unregister(&timbgpio_platform_driver);
-}
-
-module_init(timbgpio_init);
-module_exit(timbgpio_exit);
+module_platform_driver(timbgpio_platform_driver);
MODULE_DESCRIPTION("Timberdale GPIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1392c..26405efe0f9 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -103,23 +103,12 @@ static struct platform_driver ucb1400_gpio_driver = {
},
};
-static int __init ucb1400_gpio_init(void)
-{
- return platform_driver_register(&ucb1400_gpio_driver);
-}
-
-static void __exit ucb1400_gpio_exit(void)
-{
- platform_driver_unregister(&ucb1400_gpio_driver);
-}
-
void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
{
ucbdata = data;
}
-module_init(ucb1400_gpio_init);
-module_exit(ucb1400_gpio_exit);
+module_platform_driver(ucb1400_gpio_driver);
MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98723cb9ac6..82d5c20ad3c 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -571,15 +571,4 @@ static struct platform_driver giu_device_driver = {
},
};
-static int __init vr41xx_giu_init(void)
-{
- return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
- platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
+module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index ef5aabd8b8b..76ebfe5ff70 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -315,17 +315,7 @@ static struct platform_driver vx855gpio_driver = {
.remove = __devexit_p(vx855gpio_remove),
};
-static int vx855gpio_init(void)
-{
- return platform_driver_register(&vx855gpio_driver);
-}
-module_init(vx855gpio_init);
-
-static void vx855gpio_exit(void)
-{
- platform_driver_unregister(&vx855gpio_driver);
-}
-module_exit(vx855gpio_exit);
+module_platform_driver(vx855gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 96198f3fab7..92ea5350dfe 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -117,6 +117,60 @@ static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
#ifdef CONFIG_DEBUG_FS
+static const char *wm8994_gpio_fn(u16 fn)
+{
+ switch (fn) {
+ case WM8994_GP_FN_PIN_SPECIFIC:
+ return "pin-specific";
+ case WM8994_GP_FN_GPIO:
+ return "GPIO";
+ case WM8994_GP_FN_SDOUT:
+ return "SDOUT";
+ case WM8994_GP_FN_IRQ:
+ return "IRQ";
+ case WM8994_GP_FN_TEMPERATURE:
+ return "Temperature";
+ case WM8994_GP_FN_MICBIAS1_DET:
+ return "MICBIAS1 detect";
+ case WM8994_GP_FN_MICBIAS1_SHORT:
+ return "MICBIAS1 short";
+ case WM8994_GP_FN_MICBIAS2_DET:
+ return "MICBIAS2 detect";
+ case WM8994_GP_FN_MICBIAS2_SHORT:
+ return "MICBIAS2 short";
+ case WM8994_GP_FN_FLL1_LOCK:
+ return "FLL1 lock";
+ case WM8994_GP_FN_FLL2_LOCK:
+ return "FLL2 lock";
+ case WM8994_GP_FN_SRC1_LOCK:
+ return "SRC1 lock";
+ case WM8994_GP_FN_SRC2_LOCK:
+ return "SRC2 lock";
+ case WM8994_GP_FN_DRC1_ACT:
+ return "DRC1 activity";
+ case WM8994_GP_FN_DRC2_ACT:
+ return "DRC2 activity";
+ case WM8994_GP_FN_DRC3_ACT:
+ return "DRC3 activity";
+ case WM8994_GP_FN_WSEQ_STATUS:
+ return "Write sequencer";
+ case WM8994_GP_FN_FIFO_ERROR:
+ return "FIFO error";
+ case WM8994_GP_FN_OPCLK:
+ return "OPCLK";
+ case WM8994_GP_FN_THW:
+ return "Thermal warning";
+ case WM8994_GP_FN_DCS_DONE:
+ return "DC servo";
+ case WM8994_GP_FN_FLL1_OUT:
+ return "FLL1 output";
+ case WM8994_GP_FN_FLL2_OUT:
+ return "FLL1 output";
+ default:
+ return "Unknown";
+ }
+}
+
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -148,8 +202,29 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- /* No decode yet; note that GPIO2 is special */
- seq_printf(s, "(%x)\n", reg);
+ if (reg & WM8994_GPN_DIR)
+ seq_printf(s, "in ");
+ else
+ seq_printf(s, "out ");
+
+ if (reg & WM8994_GPN_PU)
+ seq_printf(s, "pull up ");
+
+ if (reg & WM8994_GPN_PD)
+ seq_printf(s, "pull down ");
+
+ if (reg & WM8994_GPN_POL)
+ seq_printf(s, "inverted ");
+ else
+ seq_printf(s, "noninverted ");
+
+ if (reg & WM8994_GPN_OP_CFG)
+ seq_printf(s, "open drain ");
+ else
+ seq_printf(s, "CMOS ");
+
+ seq_printf(s, "%s (%x)\n",
+ wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
}
}
#else
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 0ce6ac9898b..79b0fe8a725 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -206,7 +206,6 @@ static int __devinit xgpio_of_probe(struct device_node *np)
np->full_name, status);
return status;
}
- pr_info("XGpio: %s: registered\n", np->full_name);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a971e3d043b..17fdf4b6af9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -114,7 +114,7 @@ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpio_desc[gpio].chip;
}
@@ -1089,6 +1089,10 @@ unlock:
if (status)
goto fail;
+ pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic");
+
return 0;
fail:
/* failures here can mean systems won't boot... */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1368826ef28..2418429a983 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -162,3 +162,6 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c0496f66070..0cde1b80fdb 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 6d440fb894c..325365f6d35 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
break;
}
}
+
+ mutex_unlock(&dev->struct_mutex);
+
if (request->handle == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8323fc38984..5e818a808ac 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
+#include "drm_fourcc.h"
struct drm_prop_enum_list {
int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
struct drm_mode_set set;
int ret;
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
}
}
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ }
+
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ mutex_lock(&dev->mode_config.mutex);
+
+ plane->dev = dev;
+ drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ mutex_unlock(&dev->mode_config.mutex);
+ return -ENOMEM;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
+ struct drm_plane *plane, *plt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
@@ -1471,6 +1548,245 @@ out:
}
/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = plane->gamma_size;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ obj = drm_mode_object_find(dev, plane_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
+ if (!req->flags)
return -EINVAL;
- }
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
- DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
@@ -1664,6 +1976,42 @@ out:
return ret;
}
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_RGB332;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* TODO check buffer is sufficiently large */
+ /* TODO setup destructor callback */
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+ if (IS_ERR(fb)) {
+ DRM_ERROR("could not create framebuffer\n");
+ ret = PTR_ERR(fb);
+ goto out;
+ }
+
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
+ DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
+ DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ ret = format_check(r);
+ if (ret) {
+ DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
+ mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
found = 1;
if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
+ r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
+ uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->flags = property->flags;
if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_ENUM) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+ blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void *blob_ptr;
+ void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe..84a4a809793 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_fourcc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
@@ -456,6 +457,30 @@ done:
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
@@ -510,8 +535,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
@@ -687,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
@@ -824,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
EXPORT_SYMBOL(drm_helper_connector_dpms);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
+ int i;
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
- fb->pitch = mode_cmd->pitch;
- fb->bits_per_pixel = mode_cmd->bpp;
- fb->depth = mode_cmd->depth;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
return 0;
}
@@ -985,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 40c187c60f4..ebf7d3f68fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3e927ce7557..ece03fc2d38 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -508,25 +508,10 @@ static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
- u8 rev = ext[0x01], d = ext[0x02];
+ u8 d = ext[0x02];
u8 *det_base = ext + d;
- switch (rev) {
- case 0:
- /* can't happen */
- return;
- case 1:
- /* have to infer how many blocks we have, check pixel clock */
- for (i = 0; i < 6; i++)
- if (det_base[18*i] || det_base[18*i+1])
- n++;
- break;
- default:
- /* explicit count */
- n = min(ext[0x03] & 0x0f, 6);
- break;
- }
-
+ n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < drm_num_cea_modes) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea[1] >= 3) {
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+ if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
{
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
-
- switch ((db[0] & 0xe0) >> 5) {
- case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
- sad_count = dbl / 3;
- memcpy(eld + 20 + mnl, &db[1], dbl);
- break;
- case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
- eld[7] = db[1];
- break;
- case VENDOR_BLOCK:
- /* HDMI Vendor-Specific Data Block */
- if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
- parse_hdmi_vsdb(connector, db);
- break;
- default:
- break;
+ if (cea[1] >= 3)
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+
+ switch ((db[0] & 0xe0) >> 5) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
}
- }
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 5f2064489fd..a91ffb11722 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -378,3 +378,287 @@ static const struct {
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes =
+ sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 80fe39d98b0..aada26f63de 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
+ /*
+ * It's a waste of time and effort to switch back to text console
+ * if the kernel should reboot before panic messages can be seen.
+ */
+ if (panic_timeout < 0)
+ return 0;
+
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4911e1d1dcf..c00cf154cc0 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
goto out;
old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
+ filp->f_op = fops_get(dev->driver->fops);
if (filp->f_op == NULL) {
filp->f_op = old_fops;
goto out;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 904d7e9c8e4..956fd38d7c9 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
int i;
idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (idx < 0)
return -EINVAL;
- }
i = 0;
+ mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
int i;
idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
i = 0;
+
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx) {
client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
memset(stats, 0, sizeof(*stats));
- mutex_lock(&dev->struct_mutex);
-
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
stats->count = dev->counters;
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 632ae243ede..c79c713eeba 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -33,6 +33,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644
index cebce45f442..00000000000
--- a/drivers/gpu/drm/drm_sman.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas HellstrĂśm <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0f9ef9bf673..62c3675045a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -72,7 +72,7 @@ static int drm_class_resume(struct device *dev)
return 0;
}
-static char *drm_devnode(struct device *dev, mode_t *mode)
+static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 847466aab43..f9aaa56eae0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -18,3 +18,10 @@ config DRM_EXYNOS_FIMD
help
Choose this option if you want to use Exynos FIMD for DRM.
If M is selected, the module will be called exynos_drm_fimd
+
+config DRM_EXYNOS_HDMI
+ tristate "Exynos DRM HDMI"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos HDMI for DRM.
+ If M is selected, the module will be called exynos_drm_hdmi
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 0496d3ff268..395e69c9a96 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -5,7 +5,10 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
- exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
+ exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644
index 00000000000..84b614fe26f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ hdmi_attach_ddc_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_ddc "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_ddc "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+ {"s5p_ddc", 0},
+ { },
+};
+
+struct i2c_driver ddc_driver = {
+ .driver = {
+ .name = "s5p_ddc",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ddc_idtable,
+ .probe = s5p_ddc_probe,
+ .remove = __devexit_p(s5p_ddc_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(ddc_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc..3cf785c5818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,83 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
- return ERR_PTR(-ENOMEM);
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ return NULL;
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
- return ERR_PTR(-ENOMEM);
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ return NULL;
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01..c913f2bad76 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,12 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
-
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e76872..d620b078425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb..e3861ac4929 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,16 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -71,11 +51,13 @@ struct exynos_drm_crtc_pos {
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occured through
* this pipe value.
+ * @dpms: store the crtc dpms value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct exynos_drm_overlay overlay;
unsigned int pipe;
+ unsigned int dpms;
};
static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -85,30 +67,35 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
+ int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ buffer = exynos_drm_fb_buffer(fb, i);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
- return -EFAULT;
- }
-
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -119,7 +106,8 @@ static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitch;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
/* set overlay range to be displayed. */
overlay->crtc_x = pos->crtc_x;
@@ -171,9 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (exynos_crtc->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
- /* TODO */
+ mutex_unlock(&dev->struct_mutex);
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +201,34 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ /*
+ * when set_crtc is requested from user or at booting time,
+ * crtc->commit would be called without dpms call so if dpms is
+ * no power on then crtc->dpms should be called
+ * with DRM_MODE_DPMS_ON for the hardware power to be on.
+ */
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
+ int mode = DRM_MODE_DPMS_ON;
+
+ /*
+ * enable hardware(power on) to all encoders hdmi connected
+ * to current crtc.
+ */
+ exynos_drm_crtc_dpms(crtc, mode);
+ /*
+ * enable dma to all encoders connected to current crtc and
+ * lcd panel.
+ */
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_dpms_from_crtc);
+ }
+
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
@@ -342,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
}
exynos_crtc->pipe = nr;
+ exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
crtc = &exynos_crtc->drm_crtc;
private->crtc[nr] = crtc;
@@ -355,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_enable_vblank);
@@ -367,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2..25f72a62cb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c1..35889ca255e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -35,13 +36,16 @@
#include "exynos_drm_fbdev.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
-#define DRIVER_NAME "exynos-drm"
+#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20110530"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+#define VBLANK_OFF_DELAY 50000
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -61,6 +65,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -73,6 +80,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_crtc;
}
+ for (nr = 0; nr < MAX_PLANE; nr++) {
+ ret = exynos_plane_init(dev, nr);
+ if (ret)
+ goto err_crtc;
+ }
+
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
goto err_crtc;
@@ -96,6 +109,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_drm_device;
}
+ drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
return 0;
err_drm_device:
@@ -116,6 +131,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
@@ -158,6 +174,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = exynos_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
};
static struct drm_driver exynos_drm_driver = {
@@ -177,15 +205,7 @@ static struct drm_driver exynos_drm_driver = {
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
.ioctls = exynos_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = exynos_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- },
+ .fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae7..e685e1e3305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,14 +29,20 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
+#define MAX_PLANE 5
+#define MAX_FB_BUFFER 3
+#define DEFAULT_ZPOS -1
struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
+extern unsigned int drm_vblank_offdelay;
+
/* this enumerates display type. */
enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_NONE,
@@ -56,8 +62,8 @@ enum exynos_drm_output_type {
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay);
- void (*commit)(struct device *subdrv_dev);
- void (*disable)(struct device *subdrv_dev);
+ void (*commit)(struct device *subdrv_dev, int zpos);
+ void (*disable)(struct device *subdrv_dev, int zpos);
};
/*
@@ -79,9 +85,11 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
- * @vaddr: virtual memory addresss to this overlay.
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ * allocated for a overlay.
+ * @vaddr: array of virtual memory addresss to this overlay.
+ * @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
* @index_color: if using color key feature then this value would be used
@@ -108,8 +116,10 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
- void __iomem *vaddr;
+ uint32_t pixel_format;
+ dma_addr_t dma_addr[MAX_FB_BUFFER];
+ void __iomem *vaddr[MAX_FB_BUFFER];
+ int zpos;
bool default_win;
bool color_key;
@@ -130,7 +140,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -143,6 +153,8 @@ struct exynos_drm_display {
/*
* Exynos drm manager ops
*
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
@@ -150,6 +162,8 @@ struct exynos_drm_display {
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
+ void (*dpms)(struct device *subdrv_dev, int mode);
+ void (*apply)(struct device *subdrv_dev);
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
@@ -178,7 +192,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67..86b93dde219 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -42,30 +42,70 @@
* @drm_encoder: encoder object.
* @manager: specific encoder has its own manager to control a hardware
* appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
*/
struct exynos_drm_encoder {
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
+ int dpms;
};
-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
- DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+
+ DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+
+ if (exynos_encoder->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->apply)
+ manager_ops->apply(manager->dev);
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
static bool
exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -116,15 +156,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -152,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
exynos_encoder->manager->pipe = -1;
drm_encoder_cleanup(encoder);
- encoder->dev->mode_config.num_encoder--;
kfree(exynos_encoder);
}
@@ -182,6 +217,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
}
+ exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
exynos_encoder->manager = manager;
encoder = &exynos_encoder->drm_encoder;
encoder->possible_crtcs = possible_crtcs;
@@ -208,10 +244,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -245,13 +294,83 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
manager_ops->disable_vblank(manager->dev);
}
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
- overlay_ops->commit(manager->dev);
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ int crtc = *(int *)data;
+ int zpos = DEFAULT_ZPOS;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
+
+ exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+}
+
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_encoder_dpms(encoder, mode);
+
+ exynos_encoder->dpms = mode;
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct drm_connector *connector;
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops && manager_ops->dpms)
+ manager_ops->dpms(manager->dev, mode);
+
+ /*
+ * set current dpms mode to the connector connected to
+ * current encoder. connector->dpms would be checked
+ * at drm_helper_connector_dpms()
+ */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ connector->dpms = mode;
+
+ /*
+ * if this condition is ok then it means that the crtc is already
+ * detached from encoder and last function for detaching is properly
+ * done, so clear pipe from manager to prevent repeated call.
+ */
+ if (mode > DRM_MODE_DPMS_ON) {
+ if (!encoder->crtc)
+ manager->pipe = -1;
+ }
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +380,24 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev, zpos);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a..97b087a51cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -39,7 +39,13 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
void (*fn)(struct drm_encoder *, void *));
void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
+ void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd524..3733fe6723d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,9 +29,10 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -40,15 +41,11 @@
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
- * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -59,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
- /*
- * default framebuffer has no gem object so
- * a buffer of the default framebuffer should be released at here.
- */
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
-
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -79,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
return drm_gem_handle_create(file_priv,
- &exynos_fb->exynos_gem_obj->base, handle);
+ &exynos_fb->exynos_gem_obj[0]->base, handle);
}
static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -100,146 +90,110 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.dirty = exynos_drm_fb_dirty,
};
-static struct drm_framebuffer *
-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd)
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
- struct drm_framebuffer *fb;
- struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- struct drm_gem_object *obj;
- unsigned int size;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- mode_cmd->pitch = max(mode_cmd->pitch,
- mode_cmd->width * (mode_cmd->bpp >> 3));
-
- DRM_LOG_KMS("drm fb create(%dx%d)\n",
- mode_cmd->width, mode_cmd->height);
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
- fb = &exynos_fb->fb;
- ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- DRM_ERROR("failed to initialize framebuffer.\n");
- goto err_init;
+ DRM_ERROR("failed to initialize framebuffer\n");
+ return ERR_PTR(ret);
}
- DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
- size = mode_cmd->pitch * mode_cmd->height;
+ return &exynos_fb->fb;
+}
- /*
- * mode_cmd->handle could be NULL at booting time or
- * with user request. if NULL, a new buffer or a gem object
- * would be allocated.
- */
- if (!mode_cmd->handle) {
- if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
-
- /*
- * in case that file_priv is NULL, it allocates
- * only buffer and this buffer would be used
- * for default framebuffer.
- */
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
- goto err_buffer;
- }
-
- exynos_fb->entry = entry;
-
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
-
- goto out;
- } else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
- if (IS_ERR(exynos_gem_obj)) {
- ret = PTR_ERR(exynos_gem_obj);
- goto err_buffer;
- }
- }
- } else {
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- goto err_buffer;
- }
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct exynos_drm_fb *exynos_fb;
+ int nr;
+ int i;
- exynos_gem_obj = to_exynos_gem_obj(obj);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- drm_gem_object_unreference_unlocked(obj);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ return ERR_PTR(-ENOENT);
}
- /*
- * if got a exynos_gem_obj from either a handle or
- * a new creation then exynos_fb->exynos_gem_obj is NULL
- * so that default framebuffer has no its own gem object,
- * only its own buffer object.
- */
- exynos_fb->entry = exynos_gem_obj->entry;
-
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
- (unsigned int)&exynos_gem_obj->base);
+ drm_gem_object_unreference_unlocked(obj);
-out:
- exynos_fb->exynos_gem_obj = exynos_gem_obj;
+ fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(fb))
+ return fb;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ exynos_fb = to_exynos_fb(fb);
+ nr = exynos_drm_format_num_buffers(fb->pixel_format);
- return fb;
-
-err_buffer:
- drm_framebuffer_cleanup(fb);
-
-err_init:
- kfree(exynos_fb);
+ for (i = 1; i < nr; i++) {
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ exynos_drm_fb_destroy(fb);
+ return ERR_PTR(-ENOENT);
+ }
- return ERR_PTR(ret);
-}
+ drm_gem_object_unreference_unlocked(obj);
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
+ }
- return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+ return fb;
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ if (index >= MAX_FB_BUFFER)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ buffer = exynos_fb->exynos_gem_obj[index]->buffer;
+ if (!buffer)
+ return NULL;
+
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
+
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
- return entry;
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
- .fb_create = exynos_drm_fb_create,
+ .fb_create = exynos_user_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index eb35931d302..3ecb30d9355 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -28,9 +28,27 @@
#ifndef _EXYNOS_DRM_FB_H_
#define _EXYNOS_DRM_FB_H
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd);
+static inline int exynos_drm_format_num_buffers(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ return 2;
+ case DRM_FORMAT_YUV420M:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a771..d7ae29d2f3d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,7 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -42,8 +42,8 @@
drm_fb_helper)
struct exynos_drm_fbdev {
- struct drm_fb_helper drm_fb_helper;
- struct drm_framebuffer *fb;
+ struct drm_fb_helper drm_fb_helper;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
};
static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -85,36 +85,32 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
- struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_fb->fb = fb;
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
-
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ /* RGB formats use only one buffer */
+ buffer = exynos_drm_fb_buffer(fb, 0);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
- offset += fbi->var.yoffset * fb->pitch;
+ offset += fbi->var.yoffset * fb->pitches[0];
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -125,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_device *dev = helper->dev;
struct fb_info *fbi;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct platform_device *pdev = dev->platformdev;
+ unsigned long size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -139,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
@@ -151,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ exynos_gem_obj = exynos_drm_gem_create(dev, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto out;
+ }
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
- ret = PTR_ERR(exynos_fbdev->fb);
+ ret = PTR_ERR(helper->fb);
goto out;
}
- helper->fb = exynos_fbdev->fb;
helper->fbdev = fbi;
fbi->par = helper;
@@ -171,10 +179,11 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
- if (ret < 0)
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
+ if (ret < 0) {
fb_dealloc_cmap(&fbi->cmap);
+ goto out;
+ }
/*
* if failed, all resources allocated above would be released by
@@ -207,36 +216,43 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
{
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
- struct drm_framebuffer *fb = exynos_fbdev->fb;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_framebuffer *fb = helper->fb;
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ unsigned long size;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (helper->fb != fb) {
- DRM_ERROR("drm framebuffer is different\n");
- return -EINVAL;
- }
-
if (exynos_drm_fbdev_is_samefb(fb, sizes))
return 0;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ if (exynos_fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(exynos_fbdev->exynos_gem_obj);
if (fb->funcs->destroy)
fb->funcs->destroy(fb);
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR(exynos_fbdev->fb)) {
- DRM_ERROR("failed to allocate fb.\n");
- return PTR_ERR(exynos_fbdev->fb);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ exynos_gem_obj = exynos_drm_gem_create(dev, size);
+ if (IS_ERR(exynos_gem_obj))
+ return PTR_ERR(exynos_gem_obj);
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR_OR_NULL(helper->fb)) {
+ DRM_ERROR("failed to create drm framebuffer.\n");
+ return PTR_ERR(helper->fb);
}
- helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -369,6 +385,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
+ if (fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
exynos_drm_fbdev_destroy(dev, private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;
@@ -405,6 +424,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +445,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9..ca83139cd30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <drm/exynos_drm.h>
#include <plat/regs-fb-v4.h>
@@ -64,10 +65,11 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
+ bool enabled;
};
struct fimd_context {
@@ -84,6 +86,8 @@ struct fimd_context {
unsigned long irq_flags;
u32 vidcon0;
u32 vidcon1;
+ bool suspended;
+ struct mutex lock;
struct fb_videomode *timing;
};
@@ -119,12 +123,12 @@ static int fimd_display_power_on(struct device *dev, int mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* TODO */
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -132,12 +136,68 @@ static struct exynos_drm_display fimd_display = {
.power_on = fimd_display_power_on,
};
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ mutex_lock(&ctx->lock);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /*
+ * enable fimd hardware only if suspended status.
+ *
+ * P.S. fimd_dpms function would be called at booting time so
+ * clk_enable could be called double time.
+ */
+ if (ctx->suspended)
+ pm_runtime_get_sync(subdrv_dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ pm_runtime_put_sync(subdrv_dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+ struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+ struct fimd_win_data *win_data;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+ ovl_ops->commit(subdrv_dev, i);
+ }
+
+ if (mgr_ops && mgr_ops->commit)
+ mgr_ops->commit(subdrv_dev);
+}
+
static void fimd_commit(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fb_videomode *timing = ctx->timing;
u32 val;
+ if (ctx->suspended)
+ return;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
/* setup polarity values from machine code. */
@@ -184,6 +244,9 @@ static int fimd_enable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return -EPERM;
+
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -208,6 +271,9 @@ static void fimd_disable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -219,6 +285,8 @@ static void fimd_disable_vblank(struct device *dev)
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .dpms = fimd_dpms,
+ .apply = fimd_apply,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
@@ -229,6 +297,7 @@ static void fimd_win_mode_set(struct device *dev,
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
+ int win;
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -238,12 +307,19 @@ static void fimd_win_mode_set(struct device *dev,
return;
}
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
offset = overlay->fb_x * (overlay->bpp >> 3);
offset += overlay->fb_y * overlay->pitch;
DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
- win_data = &ctx->win_data[ctx->default_win];
+ win_data = &ctx->win_data[win];
win_data->offset_x = overlay->crtc_x;
win_data->offset_y = overlay->crtc_y;
@@ -251,8 +327,8 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
- win_data->vaddr = overlay->vaddr + offset;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -263,7 +339,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -346,15 +422,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
-static void fimd_win_commit(struct device *dev)
+static void fimd_win_commit(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
- int win = ctx->default_win;
+ int win = zpos;
unsigned long val, alpha, size;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
@@ -376,16 +458,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -437,22 +519,32 @@ static void fimd_win_commit(struct device *dev)
if (win != 0)
fimd_win_set_colkey(dev, win);
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val |= WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
/* Enable DMA channel and unprotect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = true;
}
-static void fimd_win_disable(struct device *dev)
+static void fimd_win_disable(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
- int win = ctx->default_win;
+ int win = zpos;
u32 val;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
@@ -473,6 +565,8 @@ static void fimd_win_disable(struct device *dev)
val &= ~SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = false;
}
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -508,9 +602,17 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
wake_up_interruptible(&e->base.file_priv->event_wait);
}
- if (is_checked)
+ if (is_checked) {
drm_vblank_put(drm_dev, crtc);
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
+ }
+
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -528,9 +630,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /* check the crtc is detached already from encoder */
+ if (manager->pipe < 0)
+ goto out;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+out:
return IRQ_HANDLED;
}
@@ -551,7 +658,7 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
+ * vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
@@ -704,9 +811,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->irq = res->start;
- for (win = 0; win < WINDOWS_NR; win++)
- fimd_clear_win(ctx, win);
-
ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret < 0) {
dev_err(dev, "irq request failed.\n");
@@ -731,10 +835,20 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
+ mutex_init(&ctx->lock);
+
platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ for (win = 0; win < WINDOWS_NR; win++)
+ fimd_clear_win(ctx, win);
+
exynos_drm_subdrv_register(subdrv);
return 0;
@@ -762,14 +876,25 @@ err_clk_get:
static int __devexit fimd_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
+ if (ctx->suspended)
+ goto out;
+
clk_disable(ctx->lcd_clk);
clk_disable(ctx->bus_clk);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_sync(dev);
+
+out:
+ pm_runtime_disable(dev);
+
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
@@ -783,12 +908,102 @@ static int __devexit fimd_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = pm_runtime_suspend(dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int fimd_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_resume(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to resume runtime pm.\n");
+ return ret;
+ }
+
+ pm_runtime_disable(dev);
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to active runtime pm.\n");
+ pm_runtime_enable(dev);
+ pm_runtime_suspend(dev);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ clk_disable(ctx->lcd_clk);
+ clk_disable(ctx->bus_clk);
+
+ ctx->suspended = true;
+ return 0;
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = clk_enable(ctx->bus_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ clk_disable(ctx->bus_clk);
+ return ret;
+ }
+
+ ctx->suspended = false;
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ fimd_enable_vblank(dev);
+
+ fimd_apply(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+ SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
static struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = __devexit_p(fimd_remove),
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
+ .pm = &fimd_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906e..025abb3e3b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -55,104 +55,128 @@ static unsigned int convert_to_vm_err_msg(int msg)
return out_msg;
}
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ unsigned int *handle)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ int ret;
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
- int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- size = roundup(size, PAGE_SIZE);
+ if (!exynos_gem_obj)
+ return;
+
+ obj = &exynos_gem_obj->base;
+
+ DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+ exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(exynos_gem_obj);
+}
+
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+ int ret;
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
- DRM_ERROR("failed to allocate exynos gem object.\n");
- return ERR_PTR(-ENOMEM);
+ DRM_ERROR("failed to allocate exynos gem object\n");
+ return NULL;
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object\n");
+ kfree(exynos_gem_obj);
+ return NULL;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
- ret = drm_gem_create_mmap_offset(obj);
- if (ret < 0) {
- DRM_ERROR("failed to allocate mmap offset.\n");
- goto err_create_mmap_offset;
- }
-
- /*
- * allocate a id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, obj, handle);
- if (ret)
- goto err_handle_create;
-
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
-
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
-
return exynos_gem_obj;
+}
-err_handle_create:
- drm_gem_free_mmap_offset(obj);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned long size)
+{
+ struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
-err_create_mmap_offset:
- drm_gem_object_release(obj);
+ size = roundup(size, PAGE_SIZE);
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
- kfree(exynos_gem_obj);
+ exynos_gem_obj = exynos_drm_gem_init(dev, size);
+ if (!exynos_gem_obj) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ exynos_gem_obj->buffer = buffer;
- return ERR_PTR(ret);
+ return exynos_gem_obj;
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
@@ -171,36 +195,37 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
}
static int exynos_drm_gem_mmap_buffer(struct file *filp,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= (VM_IO | VM_RESERVED);
+ /* in case of direct mapping, always having non-cachable attribute */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_file = filp;
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -218,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
};
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
@@ -264,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
-
DRM_DEBUG_KMS("%s\n", __FILE__);
- DRM_DEBUG_KMS("handle count = %d\n",
- atomic_read(&gem_obj->handle_count));
-
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
-
- /* release file pointer to gem object. */
- drm_gem_object_release(gem_obj);
-
- exynos_gem_obj = to_exynos_gem_obj(gem_obj);
-
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
-
- kfree(exynos_gem_obj);
+ exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -302,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset)
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -329,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
- *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
-
- drm_gem_object_unreference(obj);
+ if (!exynos_gem_obj->base.map_list.map) {
+ ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (ret)
+ goto out;
+ }
+ *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ unsigned int handle)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * obj->refcount and obj->handle_count are decreased and
+ * if both them are 0 then exynos_drm_gem_free_object()
+ * would be called by callback to release resources.
+ */
+ ret = drm_gem_handle_delete(file_priv, handle);
+ if (ret < 0) {
+ DRM_ERROR("failed to delete drm_gem_handle.\n");
+ return ret;
+ }
return 0;
}
@@ -360,7 +407,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -388,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle)
-{
- int ret;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277..67cdc916870 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -44,14 +60,16 @@
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
- struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct drm_gem_object base;
+ struct exynos_drm_gem_buf *buffer;
};
-/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a new buffer with gem object */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ unsigned long size);
/*
* request gem object creation and buffer allocation as the size
@@ -59,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
* height and bpp.
*/
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
-/* unmap a buffer from user space. */
-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -77,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
/* create memory region for drm framebuffer. */
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args);
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/* map memory region for drm framebuffer to user space. */
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset);
-
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-/*
- * mmap the physically continuous memory that a gem object contains
- * to user space.
- */
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/*
* destroy memory region allocated.
@@ -102,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
* would be released by drm_gem_handle_delete().
*/
int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle);
+ struct drm_device *dev,
+ unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644
index 00000000000..ed8a319ed84
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev) to_context(dev)
+#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
+ struct drm_hdmi_context, subdrv);
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_display_ops *hdmi_display_ops;
+static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
+static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+
+struct drm_hdmi_context {
+ struct exynos_drm_subdrv subdrv;
+ struct exynos_drm_hdmi_context *hdmi_ctx;
+ struct exynos_drm_hdmi_context *mixer_ctx;
+ struct work_struct work;
+};
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (display_ops)
+ hdmi_display_ops = display_ops;
+}
+EXPORT_SYMBOL(exynos_drm_display_ops_register);
+
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops)
+ hdmi_manager_ops = manager_ops;
+}
+EXPORT_SYMBOL(exynos_drm_manager_ops_register);
+
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (overlay_ops)
+ hdmi_overlay_ops = overlay_ops;
+}
+EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->is_connected)
+ return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+ return false;
+}
+
+static int drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector, u8 *edid, int len)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->get_edid)
+ return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
+ connector, edid, len);
+
+ return 0;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->check_timing)
+ return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
+ timing);
+
+ return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->power_on)
+ return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+ return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .is_connected = drm_hdmi_is_connected,
+ .get_edid = drm_hdmi_get_edid,
+ .check_timing = drm_hdmi_check_timing,
+ .power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
+ return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
+ manager->pipe);
+
+ return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
+ return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
+ hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->commit)
+ hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (hdmi_manager_ops && hdmi_manager_ops->disable)
+ hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+ .dpms = drm_hdmi_dpms,
+ .enable_vblank = drm_hdmi_enable_vblank,
+ .disable_vblank = drm_hdmi_disable_vblank,
+ .mode_set = drm_hdmi_mode_set,
+ .commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+ struct exynos_drm_overlay *overlay)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
+ hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
+ hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
+ hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+ .mode_set = drm_mixer_mode_set,
+ .commit = drm_mixer_commit,
+ .disable = drm_mixer_disable,
+};
+
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+ struct drm_hdmi_context *ctx;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_common_hdmi_pd *pd;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ pd = pdev->dev.platform_data;
+
+ if (!pd) {
+ DRM_DEBUG_KMS("platform data is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->hdmi_dev) {
+ DRM_DEBUG_KMS("hdmi device is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->mixer_dev) {
+ DRM_DEBUG_KMS("mixer device is null.\n");
+ return -EFAULT;
+ }
+
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi driver.\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mixer_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register mixer driver.\n");
+ goto err_hdmidrv;
+ }
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->hdmi_dev);
+ if (!ctx->hdmi_ctx) {
+ DRM_DEBUG_KMS("hdmi context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->hdmi_ctx->drm_dev = drm_dev;
+
+ ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->mixer_dev);
+ if (!ctx->mixer_ctx) {
+ DRM_DEBUG_KMS("mixer context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->mixer_ctx->drm_dev = drm_dev;
+
+ return 0;
+
+err_mixerdrv:
+ platform_driver_unregister(&mixer_driver);
+err_hdmidrv:
+ platform_driver_unregister(&hdmi_driver);
+ return ret;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ platform_driver_unregister(&hdmi_driver);
+ platform_driver_unregister(&mixer_driver);
+}
+
+static void exynos_drm_hdmi_late_probe(struct work_struct *work)
+{
+ struct drm_hdmi_context *ctx = container_of(work,
+ struct drm_hdmi_context, work);
+
+ /*
+ * this function calls subdrv->probe() so this must be called
+ * after probe context.
+ *
+ * PS. subdrv->probe() will call platform_driver_register() to probe
+ * hdmi and mixer driver.
+ */
+ exynos_drm_subdrv_register(&ctx->subdrv);
+}
+
+static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_subdrv *subdrv;
+ struct drm_hdmi_context *ctx;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ subdrv = &ctx->subdrv;
+
+ subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
+ subdrv->manager.pipe = -1;
+ subdrv->manager.ops = &drm_hdmi_manager_ops;
+ subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
+ subdrv->manager.display_ops = &drm_hdmi_display_ops;
+ subdrv->manager.dev = dev;
+
+ platform_set_drvdata(pdev, subdrv);
+
+ INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
+
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+ struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+ kfree(ctx);
+
+ return 0;
+}
+
+static struct platform_driver exynos_drm_common_hdmi_driver = {
+ .probe = exynos_drm_hdmi_probe,
+ .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .driver = {
+ .name = "exynos-drm-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+
+static int __init exynos_drm_hdmi_init(void)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit exynos_drm_hdmi_exit(void)
+{
+ platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+}
+
+module_init(exynos_drm_hdmi_init);
+module_exit(exynos_drm_hdmi_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644
index 00000000000..3c29f790ee4
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -0,0 +1,73 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ * this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+ struct drm_device *drm_dev;
+ void *ctx;
+};
+
+struct exynos_hdmi_display_ops {
+ bool (*is_connected)(void *ctx);
+ int (*get_edid)(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len);
+ int (*check_timing)(void *ctx, void *timing);
+ int (*power_on)(void *ctx, int mode);
+};
+
+struct exynos_hdmi_manager_ops {
+ void (*mode_set)(void *ctx, void *mode);
+ void (*commit)(void *ctx);
+ void (*disable)(void *ctx);
+};
+
+struct exynos_hdmi_overlay_ops {
+ int (*enable_vblank)(void *ctx, int pipe);
+ void (*disable_vblank)(void *ctx);
+ void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+ void (*win_commit)(void *ctx, int zpos);
+ void (*win_disable)(void *ctx, int zpos);
+};
+
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops);
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops);
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 00000000000..bdcf770aa22
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "exynos_drm.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+struct exynos_plane {
+ struct drm_plane base;
+ struct exynos_drm_overlay overlay;
+ bool enabled;
+};
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ struct exynos_drm_crtc_pos pos;
+ unsigned int x = src_x >> 16;
+ unsigned int y = src_y >> 16;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+ pos.crtc_x = crtc_x;
+ pos.crtc_y = crtc_y;
+ pos.crtc_w = crtc_w;
+ pos.crtc_h = crtc_h;
+
+ pos.fb_x = x;
+ pos.fb_y = y;
+
+ /* TODO: scale feature */
+ ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+ if (ret < 0)
+ return ret;
+
+ exynos_drm_fn_encoder(crtc, overlay,
+ exynos_drm_encoder_crtc_mode_set);
+ exynos_drm_fn_encoder(crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_plane_commit);
+
+ exynos_plane->enabled = true;
+
+ return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!exynos_plane->enabled)
+ return 0;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_disable);
+
+ exynos_plane->enabled = false;
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ exynos_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(exynos_plane);
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+ .update_plane = exynos_update_plane,
+ .disable_plane = exynos_disable_plane,
+ .destroy = exynos_plane_destroy,
+};
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+{
+ struct exynos_plane *exynos_plane;
+ uint32_t possible_crtcs;
+
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane)
+ return -ENOMEM;
+
+ /* all CRTCs are available */
+ possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ /* TODO: format */
+ return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, NULL, 0, false);
+}
+
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_plane_set_zpos *zpos_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct exynos_plane *exynos_plane;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
+ if (zpos_req->zpos != DEFAULT_ZPOS) {
+ DRM_ERROR("zpos not within limits\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, zpos_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ zpos_req->plane_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ plane = obj_to_plane(obj);
+ exynos_plane = container_of(plane, struct exynos_plane, base);
+
+ exynos_plane->overlay.zpos = zpos_req->zpos;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 00000000000..16b71f8217e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr);
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 00000000000..f48f7ce92f5
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#define HDMI_OVERLAY_NUMBER 3
+#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 hdmiphy_conf27[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf27_027[32] = {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v_blank = {0x0d, 0x6a, 0x01},
+ .h_v_line = {0x0d, 0xa2, 0x35},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00},
+ .h_sync_gen = {0x0e, 0x30, 0x11},
+ .v_sync_gen1 = {0x0f, 0x90, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v_blank = {0xee, 0xf2, 0x00},
+ .h_v_line = {0xee, 0x22, 0x67},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x6c, 0x50, 0x02},
+ .v_sync_gen1 = {0x0a, 0x50, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x71, 0x01, 0x01, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x0E, 0xEA, 0x08},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0x38, 0x87, 0x73},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x0e, 0xea, 0x08},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0xa4, 0x44, 0x4a},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_conf hdmi_confs[] = {
+ { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
+ { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+};
+
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+ return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_id, u8 value)
+{
+ writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+ u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdata->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_0);
+ DUMPREG(HDMI_V_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_2);
+ DUMPREG(HDMI_H_V_LINE_0);
+ DUMPREG(HDMI_H_V_LINE_1);
+ DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F_0);
+ DUMPREG(HDMI_V_BLANK_F_1);
+ DUMPREG(HDMI_V_BLANK_F_2);
+ DUMPREG(HDMI_H_SYNC_GEN_0);
+ DUMPREG(HDMI_H_SYNC_GEN_1);
+ DUMPREG(HDMI_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static int hdmi_conf_index(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == mode->hdisplay &&
+ hdmi_confs[i].height == mode->vdisplay &&
+ hdmi_confs[i].vrefresh == mode->vrefresh &&
+ hdmi_confs[i].interlace ==
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+ true : false))
+ return i;
+
+ return -1;
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+
+ if (val)
+ return true;
+
+ return false;
+}
+
+static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len)
+{
+ struct edid *raw_edid;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!hdata->ddc_port)
+ return -ENODEV;
+
+ raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+ if (raw_edid) {
+ memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
+ * EDID_LENGTH, len));
+ DRM_DEBUG_KMS("width[%d] x height[%d]\n",
+ raw_edid->width_cm, raw_edid->height_cm);
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+ struct fb_videomode *check_timing = timing;
+ int i;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+ check_timing->yres, check_timing->refresh,
+ check_timing->vmode);
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == check_timing->xres &&
+ hdmi_confs[i].height == check_timing->yres &&
+ hdmi_confs[i].vrefresh == check_timing->refresh &&
+ hdmi_confs[i].interlace ==
+ ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hdmi_display_power_on(void *ctx, int mode)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG_KMS("hdmi [on]\n");
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("hdmi [off]\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_hdmi_display_ops display_ops = {
+ .is_connected = hdmi_is_connected,
+ .get_edid = hdmi_get_edid,
+ .check_timing = hdmi_check_timing,
+ .power_on = hdmi_display_power_on,
+};
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* resetting HDMI core */
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* enable HPD interrupts */
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+ /* choose HDMI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* disable bluescreen */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ /* choose bluescreen (fecal) color */
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+ /* force RGB, look to CEA-861-D, table 7 for more detail */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+ hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata,
+ const struct hdmi_preset_conf *conf)
+{
+ const struct hdmi_core_regs *core = &conf->core;
+ const struct hdmi_tg_regs *tg = &conf->tg;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+ if (core->int_pro_mode[0])
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+ HDMI_FIELD_EN);
+ else
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+ u8 buffer[2];
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = 0x00;
+
+ if (hdata->hdmiphy_port)
+ i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+ /* reset hdmiphy */
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+ u8 buffer[32];
+ u8 operation[2];
+ u8 read_buffer[32] = {0, };
+ int ret;
+ int i;
+
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("hdmiphy is not attached\n");
+ return;
+ }
+
+ /* pixel clock */
+ memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+ ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+ if (ret != 32) {
+ DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+ return;
+ }
+
+ mdelay(10);
+
+ /* operation mode */
+ operation[0] = 0x1f;
+ operation[1] = 0x80;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+ if (ret != 2) {
+ DRM_ERROR("failed to enable hdmiphy\n");
+ return;
+ }
+
+ ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+ if (ret < 0) {
+ DRM_ERROR("failed to read hdmiphy config\n");
+ return;
+ }
+
+ for (i = 0; i < ret; i++)
+ DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+ "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_preset_conf *conf =
+ hdmi_confs[hdata->cur_conf].conf;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmiphy_conf_reset(hdata);
+ hdmiphy_conf_apply(hdata);
+
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+ /* setting core registers */
+ hdmi_timing_apply(hdata, conf);
+
+ hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ int conf_idx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ conf_idx = hdmi_conf_index(mode);
+ if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+}
+
+static void hdmi_commit(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_conf_apply(hdata);
+
+ hdata->enabled = true;
+}
+
+static void hdmi_disable(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->enabled) {
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ }
+}
+
+static struct exynos_hdmi_manager_ops manager_ops = {
+ .mode_set = hdmi_mode_set,
+ .commit = hdmi_commit,
+ .disable = hdmi_disable,
+};
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void hdmi_hotplug_func(struct work_struct *work)
+{
+ struct hdmi_context *hdata =
+ container_of(work, struct hdmi_context, hotplug_work);
+ struct exynos_drm_hdmi_context *ctx =
+ (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+ u32 intc_flag;
+
+ intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
+ /* clearing flags for HPD plug/unplug */
+ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+ DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_UNPLUG);
+ }
+ if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+ DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_PLUG);
+ }
+
+ if (ctx->drm_dev && hdata->hpd_handle)
+ queue_work(hdata->wq, &hdata->hotplug_work);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+{
+ struct device *dev = hdata->dev;
+ struct hdmi_resources *res = &hdata->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ DRM_DEBUG_KMS("HDMI resource init\n");
+
+ memset(res, 0, sizeof *res);
+
+ /* get clocks, power */
+ res->hdmi = clk_get(dev, "hdmi");
+ if (IS_ERR_OR_NULL(res->hdmi)) {
+ DRM_ERROR("failed to get clock 'hdmi'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+ goto fail;
+ }
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+ goto fail;
+ }
+ res->hdmiphy = clk_get(dev, "hdmiphy");
+ if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ DRM_ERROR("failed to get clock 'hdmiphy'\n");
+ goto fail;
+ }
+
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ sizeof res->regul_bulk[0], GFP_KERNEL);
+ if (!res->regul_bulk) {
+ DRM_ERROR("failed to get memory for regulators\n");
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ DRM_ERROR("failed to get regulators\n");
+ goto fail;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return 0;
+fail:
+ DRM_ERROR("HDMI resource init - failed\n");
+ return -ENODEV;
+}
+
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof *res);
+
+ return 0;
+}
+
+static void hdmi_resource_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn HDMI power on */
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ /* power-on hdmi physical interface */
+ clk_enable(res->hdmiphy);
+ /* turn clocks on */
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+}
+
+static void hdmi_resource_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn clocks off */
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ /* power-off hdmiphy */
+ clk_disable(res->hdmiphy);
+ /* turn HDMI power off */
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+ if (ddc)
+ hdmi_ddc = ddc;
+}
+EXPORT_SYMBOL(hdmi_attach_ddc_client);
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+ if (hdmiphy)
+ hdmi_hdmiphy = hdmiphy;
+}
+EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct hdmi_context *hdata;
+ struct exynos_drm_hdmi_pdata *pdata;
+ struct resource *res;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ DRM_ERROR("no platform data specified\n");
+ return -EINVAL;
+ }
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata) {
+ DRM_ERROR("out of memory\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)hdata;
+ hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ hdata->default_win = pdata->default_win;
+ hdata->default_timing = &pdata->timing;
+ hdata->default_bpp = pdata->bpp;
+ hdata->dev = dev;
+
+ ret = hdmi_resources_init(hdata);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_data;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("failed to find registers\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!hdata->regs_res) {
+ DRM_ERROR("failed to claim register region\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs = ioremap(res->start, resource_size(res));
+ if (!hdata->regs) {
+ DRM_ERROR("failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* DDC i2c driver */
+ if (i2c_add_driver(&ddc_driver)) {
+ DRM_ERROR("failed to register ddc i2c driver\n");
+ ret = -ENOENT;
+ goto err_iomap;
+ }
+
+ hdata->ddc_port = hdmi_ddc;
+
+ /* hdmiphy i2c driver */
+ if (i2c_add_driver(&hdmiphy_driver)) {
+ DRM_ERROR("failed to register hdmiphy i2c driver\n");
+ ret = -ENOENT;
+ goto err_ddc;
+ }
+
+ hdata->hdmiphy_port = hdmi_hdmiphy;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ DRM_ERROR("get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto err_hdmiphy;
+ }
+
+ /* create workqueue and hotplug work */
+ hdata->wq = alloc_workqueue("exynos-drm-hdmi",
+ WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+ if (hdata->wq == NULL) {
+ DRM_ERROR("Failed to create workqueue.\n");
+ ret = -ENOMEM;
+ goto err_hdmiphy;
+ }
+ INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
+
+ /* register hpd interrupt */
+ ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
+ drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("request interrupt failed.\n");
+ goto err_workqueue;
+ }
+ hdata->irq = res->start;
+
+ /* register specific callbacks to common hdmi. */
+ exynos_drm_display_ops_register(&display_ops);
+ exynos_drm_manager_ops_register(&manager_ops);
+
+ hdmi_resource_poweron(hdata);
+
+ return 0;
+
+err_workqueue:
+ destroy_workqueue(hdata->wq);
+err_hdmiphy:
+ i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+ i2c_del_driver(&ddc_driver);
+err_iomap:
+ iounmap(hdata->regs);
+err_req_region:
+ release_resource(hdata->regs_res);
+ kfree(hdata->regs_res);
+err_resource:
+ hdmi_resources_cleanup(hdata);
+err_data:
+ kfree(hdata);
+ kfree(drm_hdmi_ctx);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+ struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_resource_poweroff(hdata);
+
+ disable_irq(hdata->irq);
+ free_irq(hdata->irq, hdata);
+
+ cancel_work_sync(&hdata->hotplug_work);
+ destroy_workqueue(hdata->wq);
+
+ hdmi_resources_cleanup(hdata);
+
+ iounmap(hdata->regs);
+
+ release_resource(hdata->regs_res);
+ kfree(hdata->regs_res);
+
+ /* hdmiphy i2c driver */
+ i2c_del_driver(&hdmiphy_driver);
+ /* DDC i2c driver */
+ i2c_del_driver(&ddc_driver);
+
+ kfree(hdata);
+
+ return 0;
+}
+
+struct platform_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = __devexit_p(hdmi_remove),
+ .driver = {
+ .name = "exynos4-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+EXPORT_SYMBOL(hdmi_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644
index 00000000000..31d6cf84c1a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+struct hdmi_conf {
+ int width;
+ int height;
+ int vrefresh;
+ bool interlace;
+ const u8 *hdmiphy_data;
+ const struct hdmi_preset_conf *conf;
+};
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct fb_videomode *default_timing;
+ unsigned int default_win;
+ unsigned int default_bpp;
+ bool hpd_handle;
+ bool enabled;
+
+ struct resource *regs_res;
+ /** base address of HDMI registers */
+ void __iomem *regs;
+ /** HDMI hotplug interrupt */
+ unsigned int irq;
+ /** workqueue for delayed work */
+ struct workqueue_struct *wq;
+ /** hotplug handling work */
+ struct work_struct hotplug_work;
+
+ struct i2c_client *ddc_port;
+ struct i2c_client *hdmiphy_port;
+
+ /** current hdmiphy conf index */
+ int cur_conf;
+ /** other resources */
+ struct hdmi_resources res;
+
+ void *parent_ctx;
+};
+
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644
index 00000000000..9fe2995ab9f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ hdmi_attach_hdmiphy_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+ { "s5p_hdmiphy", 0 },
+ { },
+};
+
+struct i2c_driver hdmiphy_driver = {
+ .driver = {
+ .name = "s5p-hdmiphy",
+ .owner = THIS_MODULE,
+ },
+ .id_table = hdmiphy_id,
+ .probe = hdmiphy_probe,
+ .remove = __devexit_p(hdmiphy_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 00000000000..ac24cff3977
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1070 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+ u32 reg_id, u32 val, u32 mask)
+{
+ u32 old = mixer_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_reg_write(res, reg_id, val);
+ }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+ vp_filter_set(res, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ vp_filter_set(res, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ vp_filter_set(res, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ /* block update on vsync */
+ mixer_reg_writemask(res, MXR_STATUS, enable ?
+ MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+ vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ /* choosing between interlace and progressive mode */
+ val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+ MXR_CFG_SCAN_PROGRASSIVE);
+
+ /* choosing between porper HD and SD mode */
+ if (height == 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (height == 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (height == 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (height == 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ if (height == 480) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 576) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 720) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else if (height == 1080) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ }
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val = enable ? ~0 : 0;
+
+ switch (win) {
+ case 0:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ break;
+ case 1:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ break;
+ case 2:
+ vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+ break;
+ }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, full_height, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ unsigned int buf_num;
+ dma_addr_t luma_addr[2], chroma_addr[2];
+ bool tiled_mode = false;
+ bool crcb_mode = false;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_NV12MT:
+ tiled_mode = true;
+ case DRM_FORMAT_NV12M:
+ crcb_mode = false;
+ buf_num = 2;
+ break;
+ /* TODO: single buffer format NV12, NV21 */
+ default:
+ /* ignore pixel format at disable time */
+ if (!win_data->dma_addr)
+ break;
+
+ DRM_ERROR("pixel format for vp is wrong [%d].\n",
+ win_data->pixel_format);
+ return;
+ }
+
+ full_width = win_data->fb_width;
+ full_height = win_data->fb_height;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* scaling feature: (src << 16) / dst */
+ x_ratio = (width << 16) / width;
+ y_ratio = (height << 16) / height;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ if (buf_num == 2) {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->chroma_dma_addr;
+ } else {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->dma_addr
+ + (full_width * full_height);
+ }
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ ctx->interlace = true;
+ if (tiled_mode) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + full_width;
+ chroma_addr[1] = chroma_addr[0] + full_width;
+ }
+ } else {
+ ctx->interlace = false;
+ luma_addr[1] = 0;
+ chroma_addr[1] = 0;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* interlace or progressive scan mode */
+ val = (ctx->interlace ? ~0 : 0);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+ /* setup format */
+ val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height / 2));
+
+ vp_reg_write(res, VP_SRC_WIDTH, width);
+ vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(src_x_offset));
+ vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+
+ vp_reg_write(res, VP_DST_WIDTH, width);
+ vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ if (ctx->interlace) {
+ vp_reg_write(res, VP_DST_HEIGHT, height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ } else {
+ vp_reg_write(res, VP_DST_HEIGHT, height);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ }
+
+ vp_reg_write(res, VP_H_RATIO, x_ratio);
+ vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+ vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ /* set buffer address to vp */
+ vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ vp_regs_dump(ctx);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ dma_addr_t dma_addr;
+ unsigned int fmt;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ #define RGB565 4
+ #define ARGB1555 5
+ #define ARGB4444 6
+ #define ARGB8888 7
+
+ switch (win_data->bpp) {
+ case 16:
+ fmt = ARGB4444;
+ break;
+ case 32:
+ fmt = ARGB8888;
+ break;
+ default:
+ fmt = ARGB8888;
+ }
+
+ dma_addr = win_data->dma_addr;
+ full_width = win_data->fb_width;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* 2x scaling feature */
+ x_ratio = 0;
+ y_ratio = 0;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ /* converting dma address base and source offset */
+ dma_addr = dma_addr
+ + (src_x_offset * win_data->bpp >> 3)
+ + (src_y_offset * full_width * win_data->bpp >> 3);
+ src_x_offset = 0;
+ src_y_offset = 0;
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ ctx->interlace = true;
+ else
+ ctx->interlace = false;
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* setup format */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+
+ val = MXR_GRP_WH_WIDTH(width);
+ val |= MXR_GRP_WH_HEIGHT(height);
+ val |= MXR_GRP_WH_H_SCALE(x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(y_ratio);
+ mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(src_x_offset);
+ val |= MXR_GRP_SXY_SY(src_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(dst_x_offset);
+ val |= MXR_GRP_DXY_DY(dst_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+ /* set buffer address to mixer */
+ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int tries = 100;
+
+ vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ mdelay(10);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_ctx->pipe = pipe;
+
+ /* enable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+ MXR_INT_EN_VSYNC);
+
+ return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+ struct exynos_drm_overlay *overlay)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct hdmi_win_data *win_data;
+ int win;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+ overlay->fb_width, overlay->fb_height,
+ overlay->fb_x, overlay->fb_y,
+ overlay->crtc_width, overlay->crtc_height,
+ overlay->crtc_x, overlay->crtc_y);
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ win_data = &mixer_ctx->win_data[win];
+
+ win_data->dma_addr = overlay->dma_addr[0];
+ win_data->vaddr = overlay->vaddr[0];
+ win_data->chroma_dma_addr = overlay->dma_addr[1];
+ win_data->chroma_vaddr = overlay->vaddr[1];
+ win_data->pixel_format = overlay->pixel_format;
+ win_data->bpp = overlay->bpp;
+
+ win_data->crtc_x = overlay->crtc_x;
+ win_data->crtc_y = overlay->crtc_y;
+ win_data->crtc_width = overlay->crtc_width;
+ win_data->crtc_height = overlay->crtc_height;
+
+ win_data->fb_x = overlay->fb_x;
+ win_data->fb_y = overlay->fb_y;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+
+ win_data->mode_width = overlay->mode_width;
+ win_data->mode_height = overlay->mode_height;
+
+ win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ if (win > 1)
+ vp_video_buffer(mixer_ctx, win);
+ else
+ mixer_graph_buffer(mixer_ctx, win);
+}
+
+static void mixer_win_disable(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+ unsigned long flags;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(mixer_ctx, false);
+
+ mixer_cfg_layer(mixer_ctx, win, false);
+
+ mixer_vsync_set_update(mixer_ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static struct exynos_hdmi_overlay_ops overlay_ops = {
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
+/* for pageflip event */
+static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ is_checked = true;
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ if (is_checked)
+ drm_vblank_put(drm_dev, crtc);
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+ struct mixer_context *ctx =
+ (struct mixer_context *)drm_hdmi_ctx->ctx;
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val, val_base;
+
+ spin_lock(&res->reg_slock);
+
+ /* read interrupt status for handling and clearing flags for VSYNC */
+ val = mixer_reg_read(res, MXR_INT_STATUS);
+
+ /* handling VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ /* interlace scan need to check shadow register */
+ if (ctx->interlace) {
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (ctx->win_data[0].dma_addr != val_base)
+ goto out;
+
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (ctx->win_data[1].dma_addr != val_base)
+ goto out;
+ }
+
+ drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ }
+
+out:
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mixer_reg_write(res, MXR_INT_STATUS, val);
+
+ spin_unlock(&res->reg_slock);
+
+ return IRQ_HANDLED;
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > video > layer0
+ * because typical usage scenario would be
+ * layer0 - framebuffer
+ * video - video overlay
+ * layer1 - OSD
+ */
+ val = MXR_LAYER_CFG_GRP0_VAL(1);
+ val |= MXR_LAYER_CFG_VP_VAL(2);
+ val |= MXR_LAYER_CFG_GRP1_VAL(3);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_resource_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_win_reset(ctx);
+}
+
+static void mixer_resource_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("resume - start\n");
+
+ mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("suspend - start\n");
+
+ mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ .runtime_suspend = mixer_runtime_suspend,
+ .runtime_resume = mixer_runtime_resume,
+};
+
+static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
+{
+ struct mixer_context *mixer_ctx =
+ (struct mixer_context *)ctx->ctx;
+ struct device *dev = &pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+ int ret;
+
+ mixer_res->dev = dev;
+ spin_lock_init(&mixer_res->reg_slock);
+
+ mixer_res->mixer = clk_get(dev, "mixer");
+ if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+ dev_err(dev, "failed to get clock 'mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->vp = clk_get(dev, "vp");
+ if (IS_ERR_OR_NULL(mixer_res->vp)) {
+ dev_err(dev, "failed to get clock 'vp'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+ mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->mixer_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->vp_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_vp_regs;
+ }
+
+ ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_vp_regs;
+ }
+ mixer_res->irq = res->start;
+
+ return 0;
+
+fail_vp_regs:
+ iounmap(mixer_res->vp_regs);
+
+fail_mixer_regs:
+ iounmap(mixer_res->mixer_regs);
+
+fail:
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+ clk_put(mixer_res->sclk_dac);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+ clk_put(mixer_res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+ clk_put(mixer_res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(mixer_res->vp))
+ clk_put(mixer_res->vp);
+ if (!IS_ERR_OR_NULL(mixer_res->mixer))
+ clk_put(mixer_res->mixer);
+ mixer_res->dev = NULL;
+ return ret;
+}
+
+static void mixer_resources_cleanup(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ disable_irq(res->irq);
+ free_irq(res->irq, ctx);
+
+ iounmap(res->vp_regs);
+ iounmap(res->mixer_regs);
+}
+
+static int __devinit mixer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *ctx;
+ int ret;
+
+ dev_info(dev, "probe start\n");
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ /* acquire resources: regs, irqs, clocks */
+ ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+ if (ret)
+ goto fail;
+
+ /* register specific callback point to common hdmi. */
+ exynos_drm_overlay_ops_register(&overlay_ops);
+
+ mixer_resource_poweron(ctx);
+
+ return 0;
+
+
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx =
+ platform_get_drvdata(pdev);
+ struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+
+ dev_info(dev, "remove sucessful\n");
+
+ mixer_resource_poweroff(ctx);
+ mixer_resources_cleanup(ctx);
+
+ return 0;
+}
+
+struct platform_driver mixer_driver = {
+ .driver = {
+ .name = "s5p-mixer",
+ .owner = THIS_MODULE,
+ .pm = &mixer_pm_ops,
+ },
+ .probe = mixer_probe,
+ .remove = __devexit_p(mixer_remove),
+};
+EXPORT_SYMBOL(mixer_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 00000000000..cebacfefc07
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+#define HDMI_OVERLAY_NUMBER 3
+
+struct hdmi_win_data {
+ dma_addr_t dma_addr;
+ void __iomem *vaddr;
+ dma_addr_t chroma_dma_addr;
+ void __iomem *chroma_vaddr;
+ uint32_t pixel_format;
+ unsigned int bpp;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int scan_flags;
+};
+
+struct mixer_resources {
+ struct device *dev;
+ /** interrupt index */
+ int irq;
+ /** pointer to Mixer registers */
+ void __iomem *mixer_regs;
+ /** pointer to Video Processor registers */
+ void __iomem *vp_regs;
+ /** spinlock for protection of registers */
+ spinlock_t reg_slock;
+ /** other resources */
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+struct mixer_context {
+ unsigned int default_win;
+ struct fb_videomode *default_timing;
+ unsigned int default_bpp;
+
+ /** mixer interrupt */
+ unsigned int irq;
+ /** current crtc pipe for vblank */
+ int pipe;
+ /** interlace scan mode */
+ bool interlace;
+ /** vp enabled status */
+ bool vp_enabled;
+
+ /** mixer and vp resources */
+ struct mixer_resources mixer_res;
+
+ /** overlay window data */
+ struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
+};
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 00000000000..72e6b52be74
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,147 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_ACR_CON HDMI_CORE_BASE(0x0180)
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
+#define HDMI_AUI_CON HDMI_CORE_BASE(0x0360)
+#define HDMI_SPD_CON HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+#define HDMI_FIELD_EN (1 << 1)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 00000000000..fd2f4d14cf6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+#define MXR_CM_COEFF_Y 0x0080
+#define MXR_CM_COEFF_CB 0x0084
+#define MXR_CM_COEFF_CR 0x0088
+#define MXR_GRAPHIC0_BASE_S 0x2024
+#define MXR_GRAPHIC1_BASE_S 0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i) (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i) (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_RGB601_0_255 (0 << 9)
+#define MXR_CFG_RGB601_16_235 (1 << 9)
+#define MXR_CFG_RGB709_0_255 (2 << 9)
+#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK 0x600
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_OUT_MASK (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 00000000000..10b737af0a7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
new file mode 100644
index 00000000000..754e14bdc80
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -0,0 +1,27 @@
+config DRM_GMA500
+ tristate "Intel GMA5/600 KMS Framebuffer"
+ depends on DRM && PCI && X86 && EXPERIMENTAL
+ select FB_CFB_COPYAREA
+ select FB_CFB_FILLRECT
+ select FB_CFB_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for an experimental 2D KMS framebuffer driver for the
+ Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+ devices.
+
+config DRM_GMA600
+ bool "Intel GMA600 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+ platforms with LVDS ports. HDMI and MIPI are not currently
+ supported.
+
+config DRM_GMA3600
+ bool "Intel GMA3600/3650 support (Experimental)"
+ depends on DRM_GMA500
+ help
+ Say yes to include basic support for Intel GMA3600/3650 (Intel
+ Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 00000000000..81c103be5e2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,40 @@
+#
+# KMS driver for the GMA500
+#
+ccflags-y += -Iinclude/drm
+
+gma500_gfx-y += gem_glue.o \
+ accel_2d.o \
+ backlight.o \
+ framebuffer.o \
+ gem.o \
+ gtt.o \
+ intel_bios.o \
+ intel_i2c.o \
+ intel_gmbus.o \
+ intel_opregion.o \
+ mmu.o \
+ power.o \
+ psb_drv.o \
+ psb_intel_display.o \
+ psb_intel_lvds.o \
+ psb_intel_modes.o \
+ psb_intel_sdvo.o \
+ psb_lid.o \
+ psb_irq.o \
+ psb_device.o \
+ mid_bios.o
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+ oaktrail_crtc.o \
+ oaktrail_lvds.o \
+ oaktrail_hdmi.o \
+ oaktrail_hdmi_i2c.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644
index 00000000000..d5ef1a5793c
--- /dev/null
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ * psb_spank - reset the 2D engine
+ * @dev_priv: our PSB DRM device
+ *
+ * Soft reset the graphics engine and then reload the necessary registers.
+ * We use this at initialisation time but it will become relevant for
+ * accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+ PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+ _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+ _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+ PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+ msleep(1);
+
+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+ wmb();
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ wmb();
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ msleep(1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ * psb2_2d_wait_available - wait for FIFO room
+ * @dev_priv: our DRM device
+ * @size: size (in dwords) of the command we want to issue
+ *
+ * Wait until there is room to load the FIFO with our data. If the
+ * device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+ unsigned size)
+{
+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ unsigned long t = jiffies + HZ;
+
+ while (avail < size) {
+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+ if (time_after(jiffies, t)) {
+ psb_spank(dev_priv);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+/**
+ * psb_2d_submit - submit a 2D command
+ * @dev_priv: our DRM device
+ * @cmdbuf: command to issue
+ * @size: length (in dwords)
+ *
+ * Issue one or more 2D commands to the accelerator. This needs to be
+ * serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+ unsigned size)
+{
+ int ret = 0;
+ int i;
+ unsigned submit_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ while (size > 0) {
+ submit_size = (size < 0x60) ? size : 0x60;
+ size -= submit_size;
+ ret = psb_2d_wait_available(dev_priv, submit_size);
+ if (ret)
+ break;
+
+ submit_size <<= 2;
+
+ for (i = 0; i < submit_size; i += 4)
+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+ }
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return ret;
+}
+
+
+/**
+ * psb_accel_2d_copy_direction - compute blit order
+ * @xdir: X direction of move
+ * @ydir: Y direction of move
+ *
+ * Compute the correct order setings to ensure that an overlapping blit
+ * correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+ if (xdir < 0)
+ return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+ PSB_2D_COPYORDER_TR2BL;
+ else
+ return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+ PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ * psb_accel_2d_copy - accelerated 2D copy
+ * @dev_priv: our DRM device
+ * @src_offset in bytes
+ * @src_stride in bytes
+ * @src_format psb 2D format defines
+ * @dst_offset in bytes
+ * @dst_stride in bytes
+ * @dst_format psb 2D format defines
+ * @src_x offset in pixels
+ * @src_y offset in pixels
+ * @dst_x offset in pixels
+ * @dst_y offset in pixels
+ * @size_x of the copied area
+ * @size_y of the copied area
+ *
+ * Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+ uint32_t src_offset, uint32_t src_stride,
+ uint32_t src_format, uint32_t dst_offset,
+ uint32_t dst_stride, uint32_t dst_format,
+ uint16_t src_x, uint16_t src_y,
+ uint16_t dst_x, uint16_t dst_y,
+ uint16_t size_x, uint16_t size_y)
+{
+ uint32_t blit_cmd;
+ uint32_t buffer[10];
+ uint32_t *buf;
+ uint32_t direction;
+
+ buf = buffer;
+
+ direction =
+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_TR2BL) {
+ src_x += size_x - 1;
+ dst_x += size_x - 1;
+ }
+ if (direction == PSB_2D_COPYORDER_BR2TL ||
+ direction == PSB_2D_COPYORDER_BL2TR) {
+ src_y += size_y - 1;
+ dst_y += size_y - 1;
+ }
+
+ blit_cmd =
+ PSB_2D_BLIT_BH |
+ PSB_2D_ROT_NONE |
+ PSB_2D_DSTCK_DISABLE |
+ PSB_2D_SRCCK_DISABLE |
+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+ *buf++ = PSB_2D_FENCE_BH;
+ *buf++ =
+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+ PSB_2D_DST_STRIDE_SHIFT);
+ *buf++ = dst_offset;
+ *buf++ =
+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+ PSB_2D_SRC_STRIDE_SHIFT);
+ *buf++ = src_offset;
+ *buf++ =
+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+ *buf++ = blit_cmd;
+ *buf++ =
+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+ PSB_2D_DST_YSTART_SHIFT);
+ *buf++ =
+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+ PSB_2D_DST_YSIZE_SHIFT);
+ *buf++ = PSB_2D_FLUSH_BH;
+
+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
+ * @info: our framebuffer
+ * @a: copyarea parameters from the framebuffer core
+ *
+ * Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+ const struct fb_copyarea *a)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ uint32_t stride;
+ uint32_t src_format;
+ uint32_t dst_format;
+
+ if (!fb)
+ return;
+
+ offset = psbfb->gtt->offset;
+ stride = fb->pitches[0];
+
+ switch (fb->depth) {
+ case 8:
+ src_format = PSB_2D_SRC_332RGB;
+ dst_format = PSB_2D_DST_332RGB;
+ break;
+ case 15:
+ src_format = PSB_2D_SRC_555RGB;
+ dst_format = PSB_2D_DST_555RGB;
+ break;
+ case 16:
+ src_format = PSB_2D_SRC_565RGB;
+ dst_format = PSB_2D_DST_565RGB;
+ break;
+ case 24:
+ case 32:
+ /* this is wrong but since we don't do blending its okay */
+ src_format = PSB_2D_SRC_8888ARGB;
+ dst_format = PSB_2D_DST_8888ARGB;
+ break;
+ default:
+ /* software fallback */
+ cfb_copyarea(info, a);
+ return;
+ }
+
+ if (!gma_power_begin(dev, false)) {
+ cfb_copyarea(info, a);
+ return;
+ }
+ psb_accel_2d_copy(dev_priv,
+ offset, stride, src_format,
+ offset, stride, dst_format,
+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+ gma_power_end(dev);
+}
+
+/**
+ * psbfb_copyarea - 2D copy interface
+ * @info: our framebuffer
+ * @region: region to copy
+ *
+ * Copy an area of the framebuffer console either by the accelerator
+ * or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ if (unlikely(info->state != FBINFO_STATE_RUNNING))
+ return;
+
+ /* Avoid the 8 pixel erratum */
+ if (region->width == 8 || region->height == 8 ||
+ (info->flags & FBINFO_HWACCEL_DISABLED))
+ return cfb_copyarea(info, region);
+
+ psbfb_copyarea_accel(info, region);
+}
+
+/**
+ * psbfb_sync - synchronize 2D
+ * @info: our framebuffer
+ *
+ * Wait for the 2D engine to quiesce so that we can do CPU
+ * access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long _end = jiffies + DRM_HZ;
+ int busy = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->lock_2d, flags);
+ /*
+ * First idle the 2D engine.
+ */
+
+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+ goto out;
+
+ do {
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+
+ if (busy)
+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+ if (busy)
+ goto out;
+
+ do {
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+ cpu_relax();
+ } while (busy && !time_after_eq(jiffies, _end));
+ if (busy)
+ busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+ _PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+ spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+ return (busy) ? -EBUSY : 0;
+}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
new file mode 100644
index 00000000000..20793951fca
--- /dev/null
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -0,0 +1,49 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->ops->backlight_init(dev);
+#else
+ return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ backlight_update_status(dev_priv->backlight_device);
+ backlight_device_unregister(dev_priv->backlight_device);
+ }
+#endif
+}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644
index 00000000000..4a5b099c3bc
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -0,0 +1,351 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+ u8 sr1;
+ u32 vga_reg;
+
+ vga_reg = VGACNTRL;
+
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ udelay(300);
+
+ REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+ REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ cdv_disable_vga(dev);
+
+ cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+ cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+ /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+ the HDMI interface */
+ if (REG_READ(SDVOB) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+ if (REG_READ(SDVOC) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int cdv_brightness;
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return cdv_brightness;
+}
+
+
+static int cdv_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ /* FIXME */
+ }
+ return 0;
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+ cdv_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+ .get_brightness = cdv_get_brightness,
+ .update_status = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ cdv_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &cdv_ops, &props);
+ if (IS_ERR(cdv_backlight_device))
+ return PTR_ERR(cdv_backlight_device);
+
+ ret = cdv_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(cdv_backlight_device);
+ cdv_backlight_device = NULL;
+ return ret;
+ }
+ cdv_backlight_device->props.brightness = 100;
+ cdv_backlight_device->props.max_brightness = 100;
+ backlight_update_status(cdv_backlight_device);
+ dev_priv->backlight_device = cdv_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Cedarview specific chip logic and low level methods
+ * for power management
+ *
+ * FIXME: we need to implement the apm/ospm base management bits
+ * for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_GFX_MASK 0x3
+#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
+#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt;
+ int i;
+
+ dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_APMBA) & 0xFFFF;
+ dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_OSPMBA) & 0xFFFF;
+
+ /* Force power on for now */
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+ break;
+ udelay(10);
+ }
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+ outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * cdv_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ *
+ * FIXME: review
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+/**
+ * cdv_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ *
+ * FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+ cdv_get_core_freq(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+ .name = "GMA3600/3650",
+ .accel_2d = 0,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+ .chip_setup = cdv_chip_setup,
+
+ .crtc_helper = &cdv_intel_helper_funcs,
+ .crtc_funcs = &cdv_intel_crtc_funcs,
+
+ .output_init = cdv_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = cdv_backlight_init,
+#endif
+
+ .init_pm = cdv_init_pm,
+ .save_regs = cdv_save_display_registers,
+ .restore_regs = cdv_restore_display_registers,
+ .power_down = cdv_power_down,
+ .power_up = cdv_power_up,
+};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644
index 00000000000..2a88b7beb55
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright Š 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+ int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+
+extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ /* FIXME: msleep ?? */
+ mdelay(20);
+}
+
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644
index 00000000000..6d0f10b7569
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright Š 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 temp, reg;
+ reg = ADPA;
+
+ temp = REG_READ(reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* The lowest clock for CDV is 20000KHz */
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ /* The max clock for CDV is 355 instead of 400 */
+ max_clock = 355000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc =
+ to_psb_intel_crtc(crtc);
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+ u32 adpa_reg;
+
+ if (psb_intel_crtc->pipe == 0)
+ dpll_md_reg = DPLL_A_MD;
+ else
+ dpll_md_reg = DPLL_B_MD;
+
+ adpa_reg = ADPA;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ {
+ dpll_md = REG_READ(dpll_md_reg);
+ REG_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ if (psb_intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+ bool force)
+{
+ struct drm_device *dev = connector->dev;
+ u32 hotplug_en;
+ int i, tries = 0, ret = false;
+ u32 adpa_orig;
+
+ /* disable the DAC when doing the hotplug detection */
+
+ adpa_orig = REG_READ(ADPA);
+
+ REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+
+ /*
+ * On a CDV thep, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+ tries = 2;
+
+ hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(REG_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
+
+ if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+ CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* Restore the saved ADPA */
+ REG_WRITE(ADPA, adpa_orig);
+ return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+ struct drm_connector *connector, bool force)
+{
+ if (cdv_intel_crt_detect_hotplug(connector, force))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+ .dpms = cdv_intel_crt_dpms,
+ .mode_fixup = cdv_intel_crt_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .commit = psb_intel_encoder_commit,
+ .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cdv_intel_crt_destroy,
+ .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_crt_connector_helper_funcs = {
+ .mode_valid = cdv_intel_crt_mode_valid,
+ .get_modes = cdv_intel_crt_get_modes,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+ .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+
+ struct psb_intel_connector *psb_intel_connector;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ u32 i2c_reg;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ drm_connector_init(dev, connector,
+ &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ encoder = &psb_intel_encoder->base;
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+
+ /* Set up the DDC bus. */
+ i2c_reg = GPIOA;
+ /* Remove the following code for CDV */
+ /*
+ if (dev_priv->crt_ddc_bus != 0)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }*/
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ i2c_reg, "CRTDDC_A");
+ if (!psb_intel_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ goto failed_ddc;
+ }
+
+ psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ /*
+ psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+ psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+
+ return;
+failed_ddc:
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ drm_connector_cleanup(&psb_intel_connector->base);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+ return;
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644
index 00000000000..18d11525095
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -0,0 +1,1508 @@
+/*
+ * Copyright Š 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+ int min, max;
+};
+
+struct cdv_intel_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+#define INTEL_P2_NUM 2
+
+struct cdv_intel_limit_t {
+ struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct cdv_intel_p2_t p2;
+};
+
+#define CDV_LIMIT_SINGLE_LVDS_96 0
+#define CDV_LIMIT_SINGLE_LVDS_100 1
+#define CDV_LIMIT_DAC_HDMI_27 2
+#define CDV_LIMIT_DAC_HDMI_96 3
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+ { /* CDV_SIGNLE_LVDS_96MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ .p2 = {.dot_limit = 200000,
+ .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_SINGLE_LVDS_100MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_DAC_HDMI_27MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+ { /* CDV_DAC_HDMI_96MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+};
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) \
+ msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before read\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after read\n");
+ return ret;
+ }
+
+ *val = REG_READ(SB_DATA);
+
+ return 0;
+}
+
+static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+ int ret;
+ static bool dpio_debug = true;
+ u32 temp;
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+ DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+ }
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before write\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_DATA, val);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after write\n");
+ return ret;
+ }
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+ }
+
+ return 0;
+}
+
+/* Reset the DPIO configuration register. The BIOS does this at every
+ * mode set.
+ */
+static void cdv_sb_reset(struct drm_device *dev)
+{
+
+ REG_WRITE(DPIO_CFG, 0);
+ REG_READ(DPIO_CFG);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus. They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+ struct cdv_intel_clock_t *clock)
+{
+ struct psb_intel_crtc *psb_crtc =
+ to_psb_intel_crtc(crtc);
+ int pipe = psb_crtc->pipe;
+ u32 m, n_vco, p;
+ int ret = 0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ u32 ref_value;
+
+ cdv_sb_reset(dev);
+
+ if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+ DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+ return -EBUSY;
+ }
+
+ /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+ ref_value = 0x68A701;
+
+ cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+ /* We don't know what the other fields of these regs are, so
+ * leave them in place.
+ */
+ ret = cdv_sb_read(dev, SB_M(pipe), &m);
+ if (ret)
+ return ret;
+ m &= ~SB_M_DIVIDER_MASK;
+ m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+ ret = cdv_sb_write(dev, SB_M(pipe), m);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+ if (ret)
+ return ret;
+
+ /* Follow the BIOS to program the N_DIVIDER REG */
+ n_vco &= 0xFFFF;
+ n_vco |= 0x107;
+ n_vco &= ~(SB_N_VCO_SEL_MASK |
+ SB_N_DIVIDER_MASK |
+ SB_N_CB_TUNE_MASK);
+
+ n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+ if (clock->vco < 2250000) {
+ n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 2750000) {
+ n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 3300000) {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+ } else {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+ }
+
+ ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_P(pipe), &p);
+ if (ret)
+ return ret;
+ p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+ p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+ switch (clock->p2) {
+ case 5:
+ p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+ break;
+ case 10:
+ p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+ break;
+ case 14:
+ p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+ break;
+ case 7:
+ p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+ break;
+ default:
+ DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+ return -EINVAL;
+ }
+ ret = cdv_sb_write(dev, SB_P(pipe), p);
+ if (ret)
+ return ret;
+
+ /* always Program the Lane Register for the Pipe A*/
+ if (pipe == 0) {
+ /* Program the Lane0/1 for HDMI B */
+ u32 lane_reg, lane_value;
+
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ /* Program the Lane2/3 for HDMI C */
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+
+ return 0;
+}
+
+/*
+ * Returns whether any encoder on the specified pipe is of the specified type
+ */
+bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct cdv_intel_limit_t *limit;
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * Now only single-channel LVDS is supported on CDV. If it is
+ * incorrect, please add the dual-channel LVDS.
+ */
+ if (refclk == 96000)
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+ int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = (refclk * clock->m) / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+ const struct cdv_intel_limit_t *limit,
+ struct cdv_intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ /* unnecessary to check the range of m(m1/M2)/n again */
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cdv_intel_clock_t clock;
+ const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+ int err = target;
+
+
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ clock.m1 = 0;
+ /* m1 is reserved as 0 in CDV, n is a ring counter.
+ So skip the m1 loop */
+ for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+ clock.m2++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ cdv_intel_clock(dev, refclk, &clock);
+
+ if (!cdv_intel_PLL_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
+
+int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Jim Bish - switch plan and pipe per scott */
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Jim Bish - changed pipe/plane here as well. */
+
+ /* Wait for vblank for the disable to take effect */
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ cdv_intel_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see cdv_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ struct cdv_intel_clock_t clock;
+ u32 dpll = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ /* Hack selection about ref clk for CRT */
+ /* Select 27MHz as the reference clk for HDMI */
+ if (is_crt || is_hdmi)
+ refclk = 27000;
+
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ return 0;
+ }
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_SYNCLOCK_ENABLE;
+ dpll |= DPLL_VGA_MODE_DIS;
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ /* dpll |= (2 << 11); */
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(dpll_reg);
+
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+
+ udelay(150);
+
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds |=
+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+ LVDS_PIPEB_SELECT;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ REG_WRITE(dpll_reg,
+ (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
+
+ if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ dev_err(dev->dev, "Failed to get DPLL lock\n");
+ return -EBUSY;
+ }
+
+ {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ }
+
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(pipesrc_reg,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ cdv_intel_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->save_palette_a[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+ /*NOTE: DSPSIZE DSPPOS only for psb*/
+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private * dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ DRM_DEBUG(
+ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+ REG_READ(pipeA ? FPA0 : FPB0),
+ REG_READ(pipeA ? FPA1 : FPB1),
+ REG_READ(pipeA ? DPLL_A : DPLL_B),
+ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+ REG_READ(pipeA ? DSPABASE : DSPBBASE)
+ );
+
+ DRM_DEBUG(
+ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ DRM_DEBUG("write dpll: %x\n",
+ REG_READ(pipeA ? DPLL_A : DPLL_B));
+ udelay(150);
+ }
+
+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+ REG_READ(pipeA ? FPA0 : FPB0);
+
+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+ REG_READ(pipeA ? FPA1 : FPB1);
+
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+
+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ /* turn off the cursor */
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t adder;
+
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ adder = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+ int ret = 0;
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+
+ ret = drm_crtc_helper_set_config(set);
+
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ u32 dpll;
+ u32 fp;
+ struct cdv_intel_clock_t clock;
+ bool is_lvds;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = (pipe == 0) ?
+ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA0 :
+ dev_priv->saveFPB0;
+ else
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA1 :
+ dev_priv->saveFPB1;
+
+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ if (clock.p1 == 0) {
+ clock.p1 = 4;
+ dev_err(dev->dev, "PLL %d\n", dpll);
+ }
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ gma_power_end(dev);
+ } else {
+ htot = (pipe == 0) ?
+ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ hsync = (pipe == 0) ?
+ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ vtot = (pipe == 0) ?
+ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ vsync = (pipe == 0) ?
+ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ kfree(psb_intel_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = cdv_intel_crtc_dpms,
+ .mode_fixup = cdv_intel_crtc_mode_fixup,
+ .mode_set = cdv_intel_crtc_mode_set,
+ .mode_set_base = cdv_intel_pipe_set_base,
+ .prepare = cdv_intel_crtc_prepare,
+ .commit = cdv_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+ .save = cdv_intel_crtc_save,
+ .restore = cdv_intel_crtc_restore,
+ .cursor_set = cdv_intel_crtc_cursor_set,
+ .cursor_move = cdv_intel_crtc_cursor_move,
+ .gamma_set = cdv_intel_crtc_gamma_set,
+ .set_config = cdv_crtc_set_config,
+ .destroy = cdv_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on oaktrail
+ */
+void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ uint32_t control;
+ uint32_t base;
+
+ switch (pipe) {
+ case 0:
+ control = CURACNTR;
+ base = CURABASE;
+ break;
+ case 1:
+ control = CURBCNTR;
+ base = CURBBASE;
+ break;
+ case 2:
+ control = CURCCNTR;
+ base = CURCBASE;
+ break;
+ default:
+ return;
+ }
+
+ REG_WRITE(control, 0);
+ REG_WRITE(base, 0);
+}
+
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644
index 00000000000..50d7cfb5166
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright Š 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ * We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE (1 << 7)
+#define HDMI_AUDIO_ENABLE (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
+/* hdmi-b control bits */
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+ u32 hdmi_reg;
+ u32 save_HDMIB;
+ bool has_hdmi_sink;
+ bool has_hdmi_audio;
+ /* Should set this when detect hotplug */
+ bool hdmi_device_connected;
+ struct mdfld_hdmi_i2c *i2c_bus;
+ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
+ struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ u32 hdmib;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+ hdmib = (2 << 10);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->pipe == 1)
+ hdmib |= HDMIB_PIPE_B_SELECT;
+
+ if (hdmi_priv->has_hdmi_audio) {
+ hdmib |= HDMI_AUDIO_ENABLE;
+ hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+ }
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ u32 hdmib;
+
+ hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+ else
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+ hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_connector *psb_intel_connector =
+ to_psb_intel_connector(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+ hdmi_priv->has_hdmi_sink = false;
+ hdmi_priv->has_hdmi_audio = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_audio =
+ drm_detect_monitor_audio(edid);
+ }
+
+ psb_intel_connector->base.display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+ bool centre;
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property, &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property, value))
+ return -1;
+
+ centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (centre) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+ return -1;
+ } else {
+ struct drm_encoder_helper_funcs *helpers
+ = encoder->helper_private;
+ helpers->mode_set(encoder, &crtc->saved_mode,
+ &crtc->saved_adjusted_mode);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct edid *edid = NULL;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_HIGH;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
+ * will go beyond the stolen memory size allocated to the framebuffer
+ */
+ if (mode->hdisplay > 1680)
+ return MODE_PANEL;
+ if (mode->vdisplay > 1050)
+ return MODE_PANEL;
+ return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+ .dpms = cdv_hdmi_dpms,
+ .mode_fixup = cdv_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = cdv_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_hdmi_connector_helper_funcs = {
+ .get_modes = cdv_hdmi_get_modes,
+ .mode_valid = cdv_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_hdmi_save,
+ .restore = cdv_hdmi_restore,
+ .detect = cdv_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_hdmi_set_property,
+ .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int reg)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct mid_intel_hdmi_priv *hdmi_priv;
+ int ddc_bus;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+
+ if (!psb_intel_connector)
+ goto err_connector;
+
+ hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+ if (!hdmi_priv)
+ goto err_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &cdv_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ hdmi_priv->hdmi_reg = reg;
+ hdmi_priv->has_hdmi_sink = false;
+ psb_intel_encoder->dev_priv = hdmi_priv;
+
+ drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_hdmi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+
+ switch (reg) {
+ case SDVOB:
+ ddc_bus = GPIOE;
+ break;
+ case SDVOC:
+ ddc_bus = GPIOD;
+ break;
+ default:
+ DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+ goto failed_ddc;
+ break;
+ }
+
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+ if (!psb_intel_encoder->i2c_bus) {
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ goto failed_ddc;
+ }
+
+ hdmi_priv->hdmi_i2c_adapter =
+ &(psb_intel_encoder->i2c_bus->adapter);
+ hdmi_priv->dev = dev;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_ddc:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+err_priv:
+ kfree(psb_intel_connector);
+err_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644
index 00000000000..50e744be985
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright Š 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+ /**
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 retval;
+
+ if (gma_power_begin(dev, false)) {
+ retval = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ retval = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return retval;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+ return 0;
+
+ DRM_ERROR("I2C transfer error\n");
+ return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->lvds_bl) {
+ DRM_ERROR("NO LVDS Backlight Info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ cdv_lvds_i2c_set_brightness(dev, level);
+ else
+ cdv_lvds_pwm_set_brightness(dev, level);
+}
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl =
+ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+ struct drm_encoder *encoder, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ cdv_intel_lvds_set_backlight(dev,
+ dev_priv->mode_dev.backlight_duty_cycle);
+ } else {
+ cdv_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ if (mode == DRM_MODE_DPMS_ON)
+ cdv_intel_lvds_set_power(dev, encoder, true);
+ else
+ cdv_intel_lvds_set_power(dev, encoder, false);
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ cdv_intel_lvds_set_power(dev, encoder, false);
+
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ cdv_intel_lvds_get_max_backlight(dev);
+
+ cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int ret;
+
+ ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int cdv_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property,
+ &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ return -1;
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+ else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv =
+ encoder->dev->dev_private;
+ struct backlight_device *bd =
+ dev_priv->backlight_device;
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+#endif
+ }
+ } else if (!strcmp(property->name, "DPMS") && encoder) {
+ struct drm_encoder_helper_funcs *helpers =
+ encoder->helper_private;
+ helpers->dpms(encoder, value);
+ }
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+ cdv_intel_lvds_helper_funcs = {
+ .dpms = cdv_intel_lvds_encoder_dpms,
+ .mode_fixup = cdv_intel_lvds_mode_fixup,
+ .prepare = cdv_intel_lvds_prepare,
+ .mode_set = cdv_intel_lvds_mode_set,
+ .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_lvds_connector_helper_funcs = {
+ .get_modes = cdv_intel_lvds_get_modes,
+ .mode_valid = cdv_intel_lvds_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_intel_lvds_save,
+ .restore = cdv_intel_lvds_restore,
+ .detect = cdv_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_lvds_set_property,
+ .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+ .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct cdv_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan;
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv)
+ goto failed_lvds_priv;
+
+ psb_intel_encoder->dev_priv = lvds_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+
+
+ drm_connector_init(dev, connector,
+ &cdv_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /**
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ GPIOB,
+ "LVDSBLC_B");
+ if (!psb_intel_encoder->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ GPIOC,
+ "LVDSDDC_C");
+ if (!psb_intel_encoder->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ psb_intel_ddc_get_modes(connector,
+ &psb_intel_encoder->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this?*/
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ cdv_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ DRM_DEBUG
+ ("Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ printk(KERN_ERR "Failed find\n");
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+failed_ddc:
+ printk(KERN_ERR "Failed DDC\n");
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+failed_blc_i2c:
+ printk(KERN_ERR "Failed BLC\n");
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(lvds_priv);
+failed_lvds_priv:
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644
index 00000000000..791c0ef1a65
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -0,0 +1,831 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+ .destroy = psb_user_framebuffer_destroy,
+ .create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+ uint32_t v;
+
+ if (!fb)
+ return -ENOMEM;
+
+ if (regno > 255)
+ return 1;
+
+ red = CMAP_TOHW(red, info->var.red.length);
+ blue = CMAP_TOHW(blue, info->var.blue.length);
+ green = CMAP_TOHW(green, info->var.green.length);
+ transp = CMAP_TOHW(transp, info->var.transp.length);
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ if (regno < 16) {
+ switch (fb->bits_per_pixel) {
+ case 16:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ case 24:
+ case 32:
+ ((uint32_t *) info->pseudo_palette)[regno] = v;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_device *dev = psbfb->base.dev;
+
+ /*
+ * We have to poke our nose in here. The core fb code assumes
+ * panning is part of the hardware that can be invoked before
+ * the actual fb is mapped. In our case that isn't quite true.
+ */
+ if (psbfb->gtt->npage) {
+ /* GTT roll shifts in 4K pages, we need to shift the right
+ number of pages */
+ int pages = info->fix.line_length >> 12;
+ psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ }
+ return 0;
+}
+
+void psbfb_suspend(struct drm_device *dev)
+{
+ struct drm_framebuffer *fb = 0;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+ console_lock();
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = psbfb->fbdev;
+ fb_set_suspend(info, 1);
+ drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ console_unlock();
+}
+
+void psbfb_resume(struct drm_device *dev)
+{
+ struct drm_framebuffer *fb = 0;
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+
+ console_lock();
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = psbfb->fbdev;
+ fb_set_suspend(info, 0);
+ drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ console_unlock();
+ drm_helper_disable_unused_functions(dev);
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct psb_framebuffer *psbfb = vma->vm_private_data;
+ struct drm_device *dev = psbfb->base.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int page_num;
+ int i;
+ unsigned long address;
+ int ret;
+ unsigned long pfn;
+ /* FIXME: assumes fb at stolen base which may not be true */
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+
+ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ address = (unsigned long)vmf->virtual_address;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < page_num; i++) {
+ pfn = (phys_addr >> PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, address, pfn);
+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ break;
+ else if (unlikely(ret != 0)) {
+ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ return ret;
+ }
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ }
+ return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct psbfb_vm_ops = {
+ .fault = psbfb_vm_fault,
+ .open = psbfb_vm_open,
+ .close = psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct psb_fbdev *fbdev = info->par;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+
+ if (!psbfb->addr_space)
+ psbfb->addr_space = vma->vm_file->f_mapping;
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psbfb_vm_ops;
+ vma->vm_private_data = (void *)psbfb;
+ vma->vm_flags |= VM_RESERVED | VM_IO |
+ VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = psbfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = psbfb_pan,
+ .fb_mmap = psbfb_mmap,
+ .fb_sync = psbfb_sync,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ * psb_framebuffer_init - initialize a framebuffer
+ * @dev: our DRM device
+ * @fb: framebuffer to set up
+ * @mode_cmd: mode description
+ * @gt: backing object
+ *
+ * Configure and fill in the boilerplate for our frame buffer. Return
+ * 0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+ struct psb_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ u32 bpp, depth;
+ int ret;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
+ return -EINVAL;
+ switch (bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
+ return 0;
+}
+
+/**
+ * psb_framebuffer_create - create a framebuffer backed by gt
+ * @dev: our DRM device
+ * @mode_cmd: the description of the requested mode
+ * @gt: the backing object
+ *
+ * Create a framebuffer object backed by the gt, and fill in the
+ * boilerplate required
+ *
+ * TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+ (struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct gtt_range *gt)
+{
+ struct psb_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+ return &fb->base;
+}
+
+/**
+ * psbfb_alloc - allocate frame buffer memory
+ * @dev: the DRM device
+ * @aligned_size: space needed
+ * @force: fall back to GEM buffers if need be
+ *
+ * Allocate the frame buffer. In the usual case we get a GTT range that
+ * is stolen memory backed and life is simple. If there isn't sufficient
+ * we fail as we don't have the virtual mapping space to really vmap it
+ * and the kernel console code can't handle non linear framebuffers.
+ *
+ * Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+ struct gtt_range *backing;
+ /* Begin by trying to use stolen memory backing */
+ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+ if (backing) {
+ if (drm_gem_private_object_init(dev,
+ &backing->gem, aligned_size) == 0)
+ return backing;
+ psb_gtt_free_range(dev, backing);
+ }
+ return NULL;
+}
+
+/**
+ * psbfb_create - create a framebuffer
+ * @fbdev: the framebuffer device
+ * @sizes: specification of the layout
+ *
+ * Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ int size;
+ int ret;
+ struct gtt_range *backing;
+ u32 bpp, depth;
+ int gtt_roll = 0;
+ int pitch_lines = 0;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ bpp = sizes->surface_bpp;
+
+ /* No 24bit packed */
+ if (bpp == 24)
+ bpp = 32;
+
+ do {
+ /*
+ * Acceleration via the GTT requires pitch to be
+ * power of two aligned. Preferably page but less
+ * is ok with some fonts
+ */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+ depth = sizes->surface_depth;
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the fb in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+
+ if (pitch_lines)
+ pitch_lines *= 2;
+ else
+ pitch_lines = 1;
+ gtt_roll++;
+ } while (backing == NULL && pitch_lines <= 16);
+
+ /* The final pitch we accepted if we succeeded */
+ pitch_lines /= 2;
+
+ if (backing == NULL) {
+ /*
+ * We couldn't get the space we wanted, fall back to the
+ * display engine requirement instead. The HW requires
+ * the pitch to be 64 byte aligned
+ */
+
+ gtt_roll = 0; /* Don't use GTT accelerated scrolling */
+ pitch_lines = 64;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+ if (backing == NULL)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ info->par = fbdev;
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+ if (ret)
+ goto out_unref;
+
+ fb = &psbfb->base;
+ psbfb->fbdev = info;
+
+ fbdev->psb_fb_helper.fb = fb;
+ fbdev->psb_fb_helper.fbdev = info;
+
+ strcpy(info->fix.id, "psbfb");
+
+ info->flags = FBINFO_DEFAULT;
+ if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+ info->fbops = &psbfb_ops;
+ else if (gtt_roll) { /* GTT rolling seems best */
+ info->fbops = &psbfb_roll_ops;
+ info->flags |= FBINFO_HWACCEL_YPAN;
+ } else /* Software */
+ info->fbops = &psbfb_unaccel_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ info->fix.smem_start = dev->mode_config.fb_base;
+ info->fix.smem_len = size;
+ info->fix.ywrapstep = gtt_roll;
+ info->fix.ypanstep = 0;
+
+ /* Accessed stolen memory directly */
+ info->screen_base = (char *)dev_priv->vram_addr +
+ backing->offset;
+ info->screen_size = size;
+
+ if (dev_priv->gtt.stolen_size) {
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+ }
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+ info->pixmap.size = 64 * 1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+
+ dev_info(dev->dev, "allocated %dx%d fb\n",
+ psbfb->base.width, psbfb->base.height);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+out_unref:
+ if (backing->stolen)
+ psb_gtt_free_range(dev, backing);
+ else
+ drm_gem_object_unreference(&backing->gem);
+out_err1:
+ mutex_unlock(&dev->struct_mutex);
+ psb_gtt_free_range(dev, backing);
+ return ret;
+}
+
+/**
+ * psb_user_framebuffer_create - create framebuffer
+ * @dev: our DRM device
+ * @filp: client file
+ * @cmd: mode request
+ *
+ * Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+ (struct drm_device *dev, struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *cmd)
+{
+ struct gtt_range *r;
+ struct drm_gem_object *obj;
+
+ /*
+ * Find the GEM object and thus the gtt range object that is
+ * to back this space
+ */
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ /* Let the core code do all the work */
+ r = container_of(obj, struct gtt_range, gem);
+ return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, int regno)
+{
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = psbfb_create(psb_fbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+ .gamma_set = psbfb_gamma_set,
+ .gamma_get = psbfb_gamma_get,
+ .fb_probe = psbfb_probe,
+};
+
+int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+ struct fb_info *info;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+ if (fbdev->psb_fb_helper.fbdev) {
+ info = fbdev->psb_fb_helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ drm_fb_helper_fini(&fbdev->psb_fb_helper);
+ drm_framebuffer_cleanup(&psbfb->base);
+
+ if (psbfb->gtt)
+ drm_gem_object_unreference(&psbfb->gtt->gem);
+ return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+ struct psb_fbdev *fbdev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "no memory\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->fbdev = fbdev;
+ fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+ drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+ return 0;
+}
+
+void psb_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->fbdev)
+ return;
+
+ psb_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+ drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ * @fb: framebuffer
+ * @file_priv: our DRM file
+ * @handle: returned handle
+ *
+ * Our framebuffer object is a GTT range which also contains a GEM
+ * object. We need to turn it into a handle for userspace. GEM will do
+ * the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ * psb_user_framebuffer_destroy - destruct user created fb
+ * @fb: framebuffer
+ *
+ * User framebuffers are backed by GEM objects so all we have to do is
+ * clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ struct drm_device *dev = fb->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+ struct drm_crtc *crtc;
+ int reset = 0;
+
+ /* Should never get stolen memory for a user fb */
+ WARN_ON(r->stolen);
+
+ /* Check if we are erroneously live */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->fb == fb)
+ reset = 1;
+
+ if (reset)
+ /*
+ * Now force a sane response before we permit the DRM CRTC
+ * layer to do stupid things like blank the display. Instead
+ * we reset this framebuffer as if the user had forced a reset.
+ * We must do this before the cleanup so that the DRM layer
+ * doesn't get a chance to stick its oar in where it isn't
+ * wanted.
+ */
+ drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+
+ /* Let DRM do its clean up */
+ drm_framebuffer_cleanup(fb);
+ /* We are no longer using the resource in GEM */
+ drm_gem_object_unreference_unlocked(&r->gem);
+ kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+ .fb_create = psb_user_framebuffer_create,
+ .output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_property *backlight;
+
+ if (dev_priv->backlight_property)
+ return 0;
+
+ backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "backlight", 2);
+ backlight->values[0] = 0;
+ backlight->values[1] = 100;
+
+ dev_priv->backlight_property = backlight;
+
+ return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+
+ drm_mode_create_scaling_mode_property(dev);
+ psb_create_backlight_property(dev);
+
+ dev_priv->ops->output_init(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &psb_intel_encoder->base;
+ int crtc_mask = 0, clone_mask = 0;
+
+ /* valid crtcs */
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+ break;
+ case INTEL_OUTPUT_SDVO:
+ crtc_mask = ((1 << 0) | (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ break;
+ case INTEL_OUTPUT_LVDS:
+ if (IS_MRST(dev))
+ crtc_mask = (1 << 0);
+ else
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ break;
+ case INTEL_OUTPUT_MIPI:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ crtc_mask = (1 << 2);
+ clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ break;
+ case INTEL_OUTPUT_HDMI:
+ if (IS_MFLD(dev))
+ crtc_mask = (1 << 1);
+ else
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_HDMI);
+ break;
+ }
+ encoder->possible_crtcs = crtc_mask;
+ encoder->possible_clones =
+ psb_intel_connector_clones(dev, clone_mask);
+ }
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ int i;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *) &psb_mode_funcs;
+
+ /* set memory base */
+ /* Oaktrail and Poulsbo should use BAR 2*/
+ pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+ &(dev->mode_config.fb_base));
+
+ /* num pipes is 2 for PSB but 1 for Mrst */
+ for (i = 0; i < dev_priv->num_pipe; i++)
+ psb_intel_crtc_init(dev, i, mode_dev);
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ psb_setup_outputs(dev);
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+ mutex_lock(&dev->struct_mutex);
+
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644
index 00000000000..989558a9e6e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+ struct drm_framebuffer base;
+ struct address_space *addr_space;
+ struct fb_info *fbdev;
+ struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+ struct drm_fb_helper psb_fb_helper;
+ struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
new file mode 100644
index 00000000000..9fbb86868e2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -0,0 +1,292 @@
+/*
+ * psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ * - we need to work out if the MMU is relevant (eg for
+ * accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+
+int psb_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL;
+}
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+ struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+ drm_gem_object_release_wrap(obj);
+ /* This must occur last as it frees up the memory of the GEM object */
+ psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -EINVAL;
+}
+
+/**
+ * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ int ret = 0;
+ struct drm_gem_object *obj;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ /* What validation is needed here ? */
+
+ /* Make it mmapable */
+ if (!obj->map_list.map) {
+ ret = gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ }
+ /* GEM should really work out the hash offsets for us */
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * psb_gem_create - create a mappable object
+ * @file: the DRM file of the client
+ * @dev: our device
+ * @size: the size requested
+ * @handlep: returned handle (opaque number)
+ *
+ * Create a GEM object, fill in the boilerplate and attach a handle to
+ * it so that userspace can speak about it. This does the core work
+ * for the various methods that do/will create GEM objects for things
+ */
+static int psb_gem_create(struct drm_file *file,
+ struct drm_device *dev, uint64_t size, uint32_t *handlep)
+{
+ struct gtt_range *r;
+ int ret;
+ u32 handle;
+
+ size = roundup(size, PAGE_SIZE);
+
+ /* Allocate our object - for now a direct gtt range which is not
+ stolen memory backed */
+ r = psb_gtt_alloc_range(dev, size, "gem", 0);
+ if (r == NULL) {
+ dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+ return -ENOSPC;
+ }
+ /* Initialize the extra goodies GEM needs to do all the hard work */
+ if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+ psb_gtt_free_range(dev, r);
+ /* GEM doesn't give an error code so use -ENOMEM */
+ dev_err(dev->dev, "GEM init failed for %lld\n", size);
+ return -ENOMEM;
+ }
+ /* Give the object a handle so we can carry it more easily */
+ ret = drm_gem_handle_create(file, &r->gem, &handle);
+ if (ret) {
+ dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+ &r->gem, size);
+ drm_gem_object_release(&r->gem);
+ psb_gtt_free_range(dev, r);
+ return ret;
+ }
+ /* We have the initial and handle reference but need only one now */
+ drm_gem_object_unreference(&r->gem);
+ *handlep = handle;
+ return 0;
+}
+
+/**
+ * psb_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+/**
+ * psb_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via psb_gem_dumb_create, at least
+ * we hope it was created that way. i915 seems to assume the caller
+ * does the checking but that might be worth review ! FIXME
+ */
+int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * psb_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * This code eventually needs to handle faulting objects in and out
+ * of the GTT and repacking it when we run out of space. We can put
+ * that off for now and for our simple uses
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj;
+ struct gtt_range *r;
+ int ret;
+ unsigned long pfn;
+ pgoff_t page_offset;
+ struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
+
+ obj = vma->vm_private_data; /* GEM object */
+ dev = obj->dev;
+ dev_priv = dev->dev_private;
+
+ r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ something from beneath our feet */
+ mutex_lock(&dev->struct_mutex);
+
+ /* For now the mmap pins the object and it stays pinned. As things
+ stand that will do us no harm */
+ if (r->mmapping == 0) {
+ ret = psb_gtt_pin(r);
+ if (ret < 0) {
+ dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ goto fail;
+ }
+ r->mmapping = 1;
+ }
+
+ /* Page relative to the VMA start - we must calculate this ourselves
+ because vmf->pgoff is the fake GEM offset */
+ page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+ >> PAGE_SHIFT;
+
+ /* CPU view of the page, don't go via the GART for CPU writes */
+ if (r->stolen)
+ pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ else
+ pfn = page_to_pfn(r->pages[page_offset]);
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+ int size, u32 *handle)
+{
+ struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+ if (gtt == NULL)
+ return -ENOMEM;
+ if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+ goto free_gtt;
+ if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+ return 0;
+free_gtt:
+ psb_gtt_free_range(dev, gtt);
+ return -ENOMEM;
+}
+
+/*
+ * GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_create *args = data;
+ int ret;
+ if (args->flags & GMA_GEM_CREATE_STOLEN) {
+ ret = psb_gem_create_stolen(file, dev, args->size,
+ &args->handle);
+ if (ret == 0)
+ return 0;
+ /* Fall throguh */
+ args->flags &= ~GMA_GEM_CREATE_STOLEN;
+ }
+ return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_mmap *args = data;
+ return dev->driver->dumb_map_offset(file, dev,
+ args->handle, &args->offset);
+}
+
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
new file mode 100644
index 00000000000..daac1212065
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.c
@@ -0,0 +1,89 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+{
+ /* Remove the list map if one is present */
+ if (obj->map_list.map) {
+ struct drm_gem_mm *mm = obj->dev->mm_private;
+ struct drm_map_list *list = &obj->map_list;
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+ }
+ drm_gem_object_release(obj);
+}
+
+/**
+ * gem_create_mmap_offset - invent an mmap offset
+ * @obj: our object
+ *
+ * Standard implementation of offset generation for mmap as is
+ * duplicated in several drivers. This belongs in GEM.
+ */
+int gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret;
+
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (list->map == NULL)
+ return -ENOMEM;
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->size;
+ map->handle = obj;
+
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->size / PAGE_SIZE, 0, 0);
+ if (!list->file_offset_node) {
+ dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+ obj->name);
+ ret = -ENOSPC;
+ goto free_it;
+ }
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto free_it;
+ }
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ dev_err(dev->dev, "failed to add to map hash\n");
+ goto free_mm;
+ }
+ return 0;
+
+free_mm:
+ drm_mm_put_block(list->file_offset_node);
+free_it:
+ kfree(list->map);
+ list->map = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
new file mode 100644
index 00000000000..ce5ce30f74d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gem_glue.h
@@ -0,0 +1,2 @@
+extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 00000000000..e770bd190a5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+
+
+/*
+ * GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ * psb_gtt_mask_pte - generate GTT pte entry
+ * @pfn: page number to encode
+ * @type: type of memory in the GTT
+ *
+ * Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ * psb_gtt_entry - find the GTT entries for a gtt_range
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Given a gtt_range object return the GTT offset of the page table
+ * entries for this gtt_range
+ */
+u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long offset;
+
+ offset = r->resource.start - dev_priv->gtt_mem->start;
+
+ return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ * psb_gtt_insert - put an object into the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Take our preallocated GTT range and insert the GEM object into
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+{
+ u32 *gtt_slot, pte;
+ struct page **pages;
+ int i;
+
+ if (r->pages == NULL) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(r->stolen); /* refcount these maybe ? */
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pages = r->pages;
+
+ /* Make sure changes are visible to the GPU */
+ set_pages_array_uc(pages, r->npage);
+
+ /* Write our page entries into the GTT itself */
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ /* Make sure all the entries are set before we return */
+ ioread32(gtt_slot - 1);
+
+ return 0;
+}
+
+/**
+ * psb_gtt_remove - remove an object from the GTT
+ * @dev: our DRM device
+ * @r: our GTT range
+ *
+ * Remove a preallocated GTT range from the GTT. Overwrite all the
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
+ */
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 *gtt_slot, pte;
+ int i;
+
+ WARN_ON(r->stolen);
+
+ gtt_slot = psb_gtt_entry(dev, r);
+ pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+
+ for (i = 0; i < r->npage; i++)
+ iowrite32(pte, gtt_slot++);
+ ioread32(gtt_slot - 1);
+ set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ * psb_gtt_roll - set scrolling position
+ * @dev: our DRM device
+ * @r: the gtt mapping we are using
+ * @roll: roll offset
+ *
+ * Roll an existing pinned mapping by moving the pages through the GTT.
+ * This allows us to implement hardware scrolling on the consoles without
+ * a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+ u32 *gtt_slot, pte;
+ int i;
+
+ if (roll >= r->npage) {
+ WARN_ON(1);
+ return;
+ }
+
+ r->roll = roll;
+
+ /* Not currently in the GTT - no worry we will write the mapping at
+ the right position when it gets pinned */
+ if (!r->stolen && !r->in_gart)
+ return;
+
+ gtt_slot = psb_gtt_entry(dev, r);
+
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ ioread32(gtt_slot - 1);
+}
+
+/**
+ * psb_gtt_attach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Pin and build an in kernel list of the pages that back our GEM object.
+ * While we hold this the pages cannot be swapped out. This is protected
+ * via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ int i;
+ struct page *p;
+ int pages = gt->gem.size / PAGE_SIZE;
+
+ WARN_ON(gt->pages);
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = gt->gem.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+ if (gt->pages == NULL)
+ return -ENOMEM;
+ gt->npage = pages;
+
+ for (i = 0; i < pages; i++) {
+ /* FIXME: needs updating as per mail from Hugh Dickins */
+ p = read_cache_page_gfp(mapping, i,
+ __GFP_COLD | GFP_KERNEL);
+ if (IS_ERR(p))
+ goto err;
+ gt->pages[i] = p;
+ }
+ return 0;
+
+err:
+ while (i--)
+ page_cache_release(gt->pages[i]);
+ kfree(gt->pages);
+ gt->pages = NULL;
+ return PTR_ERR(p);
+}
+
+/**
+ * psb_gtt_detach_pages - attach and pin GEM pages
+ * @gt: the gtt range
+ *
+ * Undo the effect of psb_gtt_attach_pages. At this point the pages
+ * must have been removed from the GTT as they could now be paged out
+ * and move bus address. This is protected via the gtt mutex which the
+ * caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+ int i;
+ for (i = 0; i < gt->npage; i++) {
+ /* FIXME: do we need to force dirty */
+ set_page_dirty(gt->pages[i]);
+ page_cache_release(gt->pages[i]);
+ }
+ kfree(gt->pages);
+ gt->pages = NULL;
+}
+
+/**
+ * psb_gtt_pin - pin pages into the GTT
+ * @gt: range to pin
+ *
+ * Pin a set of pages into the GTT. The pins are refcounted so that
+ * multiple pins need multiple unpins to undo.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+ int ret = 0;
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ ret = psb_gtt_attach_pages(gt);
+ if (ret < 0)
+ goto out;
+ ret = psb_gtt_insert(dev, gt);
+ if (ret < 0) {
+ psb_gtt_detach_pages(gt);
+ goto out;
+ }
+ }
+ gt->in_gart++;
+out:
+ mutex_unlock(&dev_priv->gtt_mutex);
+ return ret;
+}
+
+/**
+ * psb_gtt_unpin - Drop a GTT pin requirement
+ * @gt: range to pin
+ *
+ * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ * will be removed from the GTT which will also drop the page references
+ * and allow the VM to clean up or page stuff.
+ *
+ * Non GEM backed objects treat this as a no-op as they are always GTT
+ * backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+ struct drm_device *dev = gt->gem.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->gtt_mutex);
+
+ WARN_ON(!gt->in_gart);
+
+ gt->in_gart--;
+ if (gt->in_gart == 0 && gt->stolen == 0) {
+ psb_gtt_remove(dev, gt);
+ psb_gtt_detach_pages(gt);
+ }
+ mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ * GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ * psb_gtt_alloc_range - allocate GTT address space
+ * @dev: Our DRM device
+ * @len: length (bytes) of address space required
+ * @name: resource name
+ * @backed: resource should be backed by stolen pages
+ *
+ * Ask the kernel core to find us a suitable range of addresses
+ * to use for a GTT mapping.
+ *
+ * Returns a gtt_range structure describing the object, or NULL on
+ * error. On successful return the resource is both allocated and marked
+ * as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gtt_range *gt;
+ struct resource *r = dev_priv->gtt_mem;
+ int ret;
+ unsigned long start, end;
+
+ if (backed) {
+ /* The start of the GTT is the stolen pages */
+ start = r->start;
+ end = r->start + dev_priv->gtt.stolen_size - 1;
+ } else {
+ /* The rest we will use for GEM backed objects */
+ start = r->start + dev_priv->gtt.stolen_size;
+ end = r->end;
+ }
+
+ gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+ if (gt == NULL)
+ return NULL;
+ gt->resource.name = name;
+ gt->stolen = backed;
+ gt->in_gart = backed;
+ gt->roll = 0;
+ /* Ensure this is set for non GEM objects */
+ gt->gem.dev = dev;
+ ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+ len, start, end, PAGE_SIZE, NULL, NULL);
+ if (ret == 0) {
+ gt->offset = gt->resource.start - r->start;
+ return gt;
+ }
+ kfree(gt);
+ return NULL;
+}
+
+/**
+ * psb_gtt_free_range - release GTT address space
+ * @dev: our DRM device
+ * @gt: a mapping created with psb_gtt_alloc_range
+ *
+ * Release a resource that was allocated with psb_gtt_alloc_range. If the
+ * object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+ /* Undo the mmap pin if we are destroying the object */
+ if (gt->mmapping) {
+ psb_gtt_unpin(gt);
+ gt->mmapping = 0;
+ }
+ WARN_ON(gt->in_gart && !gt->stolen);
+ release_resource(&gt->resource);
+ kfree(gt);
+}
+
+void psb_gtt_alloc(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gtt_map) {
+ iounmap(dev_priv->gtt_map);
+ dev_priv->gtt_map = NULL;
+ }
+ if (dev_priv->gtt_initialized) {
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl);
+ PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+ }
+ if (dev_priv->vram_addr)
+ iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned gtt_pages;
+ unsigned long stolen_size, vram_stolen_size;
+ unsigned i, num_pages;
+ unsigned pfn_base;
+ uint32_t vram_pages;
+ uint32_t dvmt_mode = 0;
+ struct psb_gtt *pg;
+
+ int ret = 0;
+ uint32_t pte;
+
+ mutex_init(&dev_priv->gtt_mutex);
+
+ psb_gtt_alloc(dev);
+ pg = &dev_priv->gtt;
+
+ /* Enable the GTT */
+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+ dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ (void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+ /* The root resource we allocate address space from */
+ dev_priv->gtt_initialized = 1;
+
+ pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+ /*
+ * The video mmu has a hw bug when accessing 0x0D0000000.
+ * Make gatt start at 0x0e000,0000. This doesn't actually
+ * matter for us but may do if the video acceleration ever
+ * gets opened up.
+ */
+ pg->mmu_gatt_start = 0xE0000000;
+
+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ >> PAGE_SHIFT;
+ /* Some CDV firmware doesn't report this currently. In which case the
+ system has 64 gtt pages */
+ if (pg->gtt_start == 0 || gtt_pages == 0) {
+ dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+ gtt_pages = 64;
+ pg->gtt_start = dev_priv->pge_ctl;
+ }
+
+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+ >> PAGE_SHIFT;
+ dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+ if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+ static struct resource fudge; /* Preferably peppermint */
+ /* This can occur on CDV SDV systems. Fudge it in this case.
+ We really don't care what imaginary space is being allocated
+ at this point */
+ dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+ pg->gatt_start = 0x40000000;
+ pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ /* This is a little confusing but in fact the GTT is providing
+ a view from the GPU into memory and not vice versa. As such
+ this is really allocating space that is not the same as the
+ CPU address space on CDV */
+ fudge.start = 0x40000000;
+ fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+ fudge.name = "fudge";
+ fudge.flags = IORESOURCE_MEM;
+ dev_priv->gtt_mem = &fudge;
+ }
+
+ pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+ vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+ - PAGE_SIZE;
+
+ stolen_size = vram_stolen_size;
+
+ printk(KERN_INFO "Stolen memory information\n");
+ printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
+ printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+ vram_stolen_size/1024);
+ dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
+ printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
+ (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+
+ if (resume && (gtt_pages != pg->gtt_pages) &&
+ (stolen_size != pg->stolen_size)) {
+ dev_err(dev->dev, "GTT resume error.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ pg->gtt_pages = gtt_pages;
+ pg->stolen_size = stolen_size;
+ dev_priv->vram_stolen_size = vram_stolen_size;
+
+ /*
+ * Map the GTT and the stolen memory area
+ */
+ dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ gtt_pages << PAGE_SHIFT);
+ if (!dev_priv->gtt_map) {
+ dev_err(dev->dev, "Failure to map gtt.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+ if (!dev_priv->vram_addr) {
+ dev_err(dev->dev, "Failure to map stolen base.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /*
+ * Insert vram stolen pages into the GTT
+ */
+
+ pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+ printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages, pfn_base << PAGE_SHIFT, 0);
+ for (i = 0; i < num_pages; ++i) {
+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
+ iowrite32(pte, dev_priv->gtt_map + i);
+ }
+
+ /*
+ * Init rest of GTT to the scratch page to avoid accidents or scribbles
+ */
+
+ pfn_base = page_to_pfn(dev_priv->scratch_page);
+ pte = psb_gtt_mask_pte(pfn_base, 0);
+ for (; i < gtt_pages; ++i)
+ iowrite32(pte, dev_priv->gtt_map + i);
+
+ (void) ioread32(dev_priv->gtt_map + i - 1);
+ return 0;
+
+out_err:
+ psb_gtt_takedown(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 00000000000..aa1742387f5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+ uint32_t gatt_start;
+ uint32_t mmu_gatt_start;
+ uint32_t gtt_start;
+ uint32_t gtt_phys_start;
+ unsigned gtt_pages;
+ unsigned gatt_pages;
+ unsigned long stolen_size;
+ unsigned long vram_stolen_size;
+ struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+ struct resource resource; /* Resource for our allocation */
+ u32 offset; /* GTT offset of our object */
+ struct drm_gem_object gem; /* GEM high level stuff */
+ int in_gart; /* Currently in the GART (ref ct) */
+ bool stolen; /* Backed from stolen RAM */
+ bool mmapping; /* Is mmappable */
+ struct page **pages; /* Backing pages if present */
+ int npage; /* Number of backing pages */
+ int roll; /* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+ const char *name, int backed);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+ struct gtt_range *gt, int roll);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644
index 00000000000..d4d0c5b8bf9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+ struct bdb_lvds_backlight *lvds_bl;
+ u8 p_type = 0;
+ void *bl_start = NULL;
+ struct bdb_lvds_options *lvds_opts
+ = find_section(bdb, BDB_LVDS_OPTIONS);
+
+ dev_priv->lvds_bl = NULL;
+
+ if (lvds_opts)
+ p_type = lvds_opts->panel_type;
+ else
+ return;
+
+ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+ if (!lvds_bl) {
+ dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ return;
+ }
+ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
+ dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_lvds_options *lvds_options;
+ struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_entry *entry;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ /* Defaults if we can't find VBT info */
+ dev_priv->lvds_dither = 0;
+ dev_priv->lvds_vbt = 0;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+
+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ dvo_timing = &entry->dvo_timing;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+ GFP_KERNEL);
+ if (panel_fixed_mode == NULL) {
+ dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ return;
+ }
+
+ dev_priv->lvds_vbt = 1;
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+ if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+ } else {
+ dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_priv->lvds_vbt = 0;
+ kfree(panel_fixed_mode);
+ }
+ return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode,
+ dvo_timing + sdvo_lvds_options->panel_type);
+
+ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_features *general;
+
+ /* Set sensible defaults in case we can't find the general block */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ dev_priv->lvds_use_ssc = general->enable_ssc;
+
+ if (dev_priv->lvds_use_ssc) {
+ dev_priv->lvds_ssc_freq
+ = general->ssc_freq ? 100 : 96;
+ }
+ }
+}
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool psb_intel_init_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ struct vbt_header *vbt = NULL;
+ struct bdb_header *bdb;
+ u8 __iomem *bios;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_backlight_data(dev_priv, bdb);
+
+ pci_unmap_rom(pdev, bios);
+
+ return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *sdvo_lvds_vbt_mode =
+ dev_priv->sdvo_lvds_vbt_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode =
+ dev_priv->lfp_lvds_vbt_mode;
+ struct bdb_lvds_backlight *lvds_bl =
+ dev_priv->lvds_bl;
+
+ /*free sdvo panel mode*/
+ if (sdvo_lvds_vbt_mode) {
+ dev_priv->sdvo_lvds_vbt_mode = NULL;
+ kfree(sdvo_lvds_vbt_mode);
+ }
+
+ if (lfp_lvds_vbt_mode) {
+ dev_priv->lfp_lvds_vbt_mode = NULL;
+ kfree(lfp_lvds_vbt_mode);
+ }
+
+ if (lvds_bl) {
+ dev_priv->lvds_bl = NULL;
+ kfree(lvds_bl);
+ }
+}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644
index 00000000000..70f1bf01818
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd8:3; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /* device info */
+ u8 tv_or_lvds_info[33];
+ u8 dev1[33];
+ u8 dev2[33];
+ u8 dev3[33];
+ u8 dev4[33];
+ /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+ u8 type:2;
+ u8 pol:1;
+ u8 gpio:3;
+ u8 gmbus:2;
+ u16 freq;
+ u8 minbrightness;
+ u8 i2caddr;
+ u8 brightnesscmd;
+ /*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+extern bool psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 00000000000..147584ac8d0
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Š 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_psb_private *dev_priv;
+ u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+ REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ /* FIXME: We are never Pineview, right?
+
+ u32 val;
+
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = REG_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ REG_WRITE(DSPCLK_GATE_D, val);
+
+ return;
+ */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved = REG_READ(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | clock_bits);
+ REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | data_bits);
+ REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
+
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "gma500 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ struct drm_device *dev = dev_priv->dev;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = 0;
+
+ REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = REG_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+ }
+
+ goto done;
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
+ */
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+ return i;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+ };
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "gma500 gmbus %s",
+ names[i]);
+
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
+
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->gmbus == NULL)
+ return;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+}
diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644
index 00000000000..98a28c20955
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright Š 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 val;
+
+ val = REG_READ(chan->reg);
+ return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, clock_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+ REG_WRITE(chan->reg, reserved | clock_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct psb_intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ u32 reserved = 0, data_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved =
+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits =
+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(chan->reg, reserved | data_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ * %GPIOA
+ * %GPIOB
+ * %GPIOC
+ * %GPIOD
+ * %GPIOE
+ * %GPIOF
+ * %GPIOG
+ * %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name)
+{
+ struct psb_intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ goto out_free;
+
+ chan->drm_dev = dev;
+ chan->reg = reg;
+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+ chan->adapter.owner = THIS_MODULE;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 20;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ if (i2c_bit_add_bus(&chan->adapter))
+ goto out_free;
+
+ /* JJJ: raise SCL and SDA? */
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(20);
+
+ return chan;
+
+out_free:
+ kfree(chan);
+ return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+ if (!chan)
+ return;
+
+ i2c_del_adapter(&chan->adapter);
+ kfree(chan);
+}
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
new file mode 100644
index 00000000000..d946bc1b17b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_opregion.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * FIXME: resolve with the i915 version
+ */
+
+#include "psb_drv.h"
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+struct opregion_apci {
+ /*FIXME: add it later*/
+} __packed;
+
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+struct opregion_acpi {
+ /*FIXME: add it later*/
+} __packed;
+
+int gma_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 opregion_phy;
+ void *base;
+ u32 *lid_state;
+
+ dev_priv->lid_state = NULL;
+
+ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
+ if (opregion_phy == 0)
+ return -ENOTSUPP;
+
+ base = ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ lid_state = base + 0x01ac;
+
+ dev_priv->lid_state = lid_state;
+ dev_priv->lid_last_state = readl(lid_state);
+ return 0;
+}
+
+int gma_intel_opregion_exit(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->lid_state)
+ iounmap(dev_priv->lid_state);
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644
index 00000000000..5eee9ad80da
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -0,0 +1,263 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ uint32_t fuse_value = 0;
+ uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK 0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+ if (pci_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+ if (IS_MRST(dev))
+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+ DRM_INFO("internal display is %s\n",
+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+ /* Prevent runtime suspend at start*/
+ if (dev_priv->iLVDS_enable) {
+ dev_priv->is_lvds_on = true;
+ dev_priv->is_mipi_on = false;
+ } else {
+ dev_priv->is_mipi_on = true;
+ dev_priv->is_lvds_on = false;
+ }
+
+ dev_priv->video_device_fuse = fuse_value;
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+ dev_priv->fuse_reg_value = fuse_value;
+
+ switch (fuse_value_tmp) {
+ case FB_SKU_100:
+ dev_priv->core_freq = 200;
+ break;
+ case FB_SKU_100L:
+ dev_priv->core_freq = 100;
+ break;
+ case FB_SKU_83:
+ dev_priv->core_freq = 166;
+ break;
+ default:
+ dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+ fuse_value_tmp);
+ dev_priv->core_freq = 0;
+ }
+ dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+ pci_dev_put(pci_root);
+}
+
+/*
+ * Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+ uint32_t platform_rev_id = 0;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+ pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+ dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+ pci_dev_put(pci_gfx_root);
+ dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+ dev_priv->platform_rev_id);
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+ u32 addr;
+ u16 new_size;
+ u8 *vbt_virtual;
+ u8 bpi;
+ u8 number_desc = 0;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info ti;
+ void *pGCT;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+ pci_dev_put(pci_gfx_root);
+
+ dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+ /* check for platform config address == 0. */
+ /* this means fw doesn't support vbt */
+
+ if (addr == 0) {
+ vbt->size = 0;
+ return;
+ }
+
+ /* get the virtual address of the vbt */
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL) {
+ vbt->size = 0;
+ return;
+ }
+
+ memcpy(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual); /* Free virtual address space */
+
+ /* No matching signature don't process the data */
+ if (memcmp(vbt->signature, "$GCT", 4)) {
+ vbt->size = 0;
+ return;
+ }
+
+ dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+
+ switch (vbt->revision) {
+ case 0:
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 1:
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 0x10:
+ /*header definition changed from rev 01 (v2) to rev 10h. */
+ /*so, some values have changed location*/
+ new_size = vbt->checksum; /*checksum contains lo size byte*/
+ /*LSB of oaktrail_gct contains hi size byte*/
+ new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
+
+ vbt->checksum = vbt->size; /*size contains the checksum*/
+ if (new_size > 0xff)
+ vbt->size = 0xff; /*restrict size to 255*/
+ else
+ vbt->size = new_size;
+
+ /* number of descriptors defined in the GCT */
+ number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
+ bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
+ vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+ GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+ pGCT = vbt->oaktrail_gct;
+ pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+ dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+
+ /*copy the GCT display timings into a temp structure*/
+ memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+
+ /*now copy the temp struct into the dev_priv->gct_data*/
+ dp_ti->pixel_clock = ti.pixel_clock;
+ dp_ti->hactive_hi = ti.hactive_hi;
+ dp_ti->hactive_lo = ti.hactive_lo;
+ dp_ti->hblank_hi = ti.hblank_hi;
+ dp_ti->hblank_lo = ti.hblank_lo;
+ dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti.vactive_hi;
+ dp_ti->vactive_lo = ti.vactive_lo;
+ dp_ti->vblank_hi = ti.vblank_hi;
+ dp_ti->vblank_lo = ti.vblank_lo;
+ dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+
+ /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ *((u8 *)pGCT + 0x0d);
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+ (*((u8 *)pGCT + 0x0e)) << 8;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown revision of GCT!\n");
+ vbt->size = 0;
+ }
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ mid_get_fuse_settings(dev);
+ mid_get_vbt_data(dev_priv);
+ mid_get_pci_revID(dev_priv);
+ return 0;
+}
diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644
index 00000000000..00e7d564b7e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mid_bios.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 00000000000..c904d73b1de
--- /dev/null
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,858 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+ /* protects driver- and pd structures. Always take in read mode
+ * before taking the page table spinlock.
+ */
+ struct rw_semaphore sem;
+
+ /* protects page tables, directory tables and pt tables.
+ * and pt structures.
+ */
+ spinlock_t lock;
+
+ atomic_t needs_tlbflush;
+
+ uint8_t __iomem *register_map;
+ struct psb_mmu_pd *default_pd;
+ /*uint32_t bif_ctrl;*/
+ int has_clflush;
+ int clflush_add;
+ unsigned long clflush_mask;
+
+ struct drm_psb_private *dev_priv;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+ struct psb_mmu_pd *pd;
+ uint32_t index;
+ uint32_t count;
+ struct page *p;
+ uint32_t *v;
+};
+
+struct psb_mmu_pd {
+ struct psb_mmu_driver *driver;
+ int hw_context;
+ struct psb_mmu_pt **tables;
+ struct page *p;
+ struct page *dummy_pt;
+ struct page *dummy_page;
+ uint32_t pd_mask;
+ uint32_t invalid_pde;
+ uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+ return offset >> PSB_PDE_SHIFT;
+}
+
+static inline void psb_clflush(void *addr)
+{
+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+ void *addr)
+{
+ if (!driver->has_clflush)
+ return;
+
+ mb();
+ psb_clflush(addr);
+ mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+ uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
+ int i;
+ uint8_t *clf;
+
+ clf = kmap_atomic(page, KM_USER0);
+ mb();
+ for (i = 0; i < clflush_count; ++i) {
+ psb_clflush(clf);
+ clf += clflush_add;
+ }
+ mb();
+ kunmap_atomic(clf, KM_USER0);
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver,
+ struct page *page[], unsigned long num_pages)
+{
+ int i;
+
+ if (!driver->has_clflush)
+ return ;
+
+ for (i = 0; i < num_pages; i++)
+ psb_page_clflush(driver, *page++);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+ int force)
+{
+ atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+ down_write(&driver->sem);
+ psb_mmu_flush_pd_locked(driver, force);
+ up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+ if (rc_prot)
+ down_write(&driver->sem);
+ if (rc_prot)
+ up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+ /*ttm_tt_cache_flush(&pd->p, 1);*/
+ psb_pages_clflush(pd->driver, &pd->p, 1);
+ down_write(&pd->driver->sem);
+ wmb();
+ psb_mmu_flush_pd_locked(pd->driver, 1);
+ pd->hw_context = hw_context;
+ up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+ unsigned long end)
+{
+
+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+ return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+ uint32_t mask = PSB_PTE_VALID;
+
+ if (type & PSB_MMU_CACHED_MEMORY)
+ mask |= PSB_PTE_CACHED;
+ if (type & PSB_MMU_RO_MEMORY)
+ mask |= PSB_PTE_RO;
+ if (type & PSB_MMU_WO_MEMORY)
+ mask |= PSB_PTE_WO;
+
+ return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults, int invalid_type)
+{
+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ uint32_t *v;
+ int i;
+
+ if (!pd)
+ return NULL;
+
+ pd->p = alloc_page(GFP_DMA32);
+ if (!pd->p)
+ goto out_err1;
+ pd->dummy_pt = alloc_page(GFP_DMA32);
+ if (!pd->dummy_pt)
+ goto out_err2;
+ pd->dummy_page = alloc_page(GFP_DMA32);
+ if (!pd->dummy_page)
+ goto out_err3;
+
+ if (!trap_pagefaults) {
+ pd->invalid_pde =
+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+ invalid_type);
+ pd->invalid_pte =
+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+ invalid_type);
+ } else {
+ pd->invalid_pde = 0;
+ pd->invalid_pte = 0;
+ }
+
+ v = kmap(pd->dummy_pt);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pte;
+
+ kunmap(pd->dummy_pt);
+
+ v = kmap(pd->p);
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ v[i] = pd->invalid_pde;
+
+ kunmap(pd->p);
+
+ clear_page(kmap(pd->dummy_page));
+ kunmap(pd->dummy_page);
+
+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+ if (!pd->tables)
+ goto out_err4;
+
+ pd->hw_context = -1;
+ pd->pd_mask = PSB_PTE_VALID;
+ pd->driver = driver;
+
+ return pd;
+
+out_err4:
+ __free_page(pd->dummy_page);
+out_err3:
+ __free_page(pd->dummy_pt);
+out_err2:
+ __free_page(pd->p);
+out_err1:
+ kfree(pd);
+ return NULL;
+}
+
+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+ __free_page(pt->p);
+ kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_driver *driver = pd->driver;
+ struct psb_mmu_pt *pt;
+ int i;
+
+ down_write(&driver->sem);
+ if (pd->hw_context != -1)
+ psb_mmu_flush_pd_locked(driver, 1);
+
+ /* Should take the spinlock here, but we don't need to do that
+ since we have the semaphore in write mode. */
+
+ for (i = 0; i < 1024; ++i) {
+ pt = pd->tables[i];
+ if (pt)
+ psb_mmu_free_pt(pt);
+ }
+
+ vfree(pd->tables);
+ __free_page(pd->dummy_page);
+ __free_page(pd->dummy_pt);
+ __free_page(pd->p);
+ kfree(pd);
+ up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+ void *v;
+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
+ spinlock_t *lock = &pd->driver->lock;
+ uint8_t *clf;
+ uint32_t *ptes;
+ int i;
+
+ if (!pt)
+ return NULL;
+
+ pt->p = alloc_page(GFP_DMA32);
+ if (!pt->p) {
+ kfree(pt);
+ return NULL;
+ }
+
+ spin_lock(lock);
+
+ v = kmap_atomic(pt->p, KM_USER0);
+ clf = (uint8_t *) v;
+ ptes = (uint32_t *) v;
+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+ *ptes++ = pd->invalid_pte;
+
+
+ if (pd->driver->has_clflush && pd->hw_context != -1) {
+ mb();
+ for (i = 0; i < clflush_count; ++i) {
+ psb_clflush(clf);
+ clf += clflush_add;
+ }
+ mb();
+ }
+
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(lock);
+
+ pt->count = 0;
+ pt->pd = pd;
+ pt->index = 0;
+
+ return pt;
+}
+
+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ uint32_t *v;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ while (!pt) {
+ spin_unlock(lock);
+ pt = psb_mmu_alloc_pt(pd);
+ if (!pt)
+ return NULL;
+ spin_lock(lock);
+
+ if (pd->tables[index]) {
+ spin_unlock(lock);
+ psb_mmu_free_pt(pt);
+ spin_lock(lock);
+ pt = pd->tables[index];
+ continue;
+ }
+
+ v = kmap_atomic(pd->p, KM_USER0);
+ pd->tables[index] = pt;
+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+ pt->index = index;
+ kunmap_atomic((void *) v, KM_USER0);
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver, (void *) &v[index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ }
+ pt->v = kmap_atomic(pt->p, KM_USER0);
+ return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+ unsigned long addr)
+{
+ uint32_t index = psb_mmu_pd_index(addr);
+ struct psb_mmu_pt *pt;
+ spinlock_t *lock = &pd->driver->lock;
+
+ spin_lock(lock);
+ pt = pd->tables[index];
+ if (!pt) {
+ spin_unlock(lock);
+ return NULL;
+ }
+ pt->v = kmap_atomic(pt->p, KM_USER0);
+ return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+ struct psb_mmu_pd *pd = pt->pd;
+ uint32_t *v;
+
+ kunmap_atomic(pt->v, KM_USER0);
+ if (pt->count == 0) {
+ v = kmap_atomic(pd->p, KM_USER0);
+ v[pt->index] = pd->invalid_pde;
+ pd->tables[pt->index] = NULL;
+
+ if (pd->hw_context != -1) {
+ psb_mmu_clflush(pd->driver,
+ (void *) &v[pt->index]);
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+ }
+ kunmap_atomic(pt->v, KM_USER0);
+ spin_unlock(&pd->driver->lock);
+ psb_mmu_free_pt(pt);
+ return;
+ }
+ spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+ unsigned long addr, uint32_t pte)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+ unsigned long addr)
+{
+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+ uint32_t mmu_offset, uint32_t gtt_start,
+ uint32_t gtt_pages)
+{
+ uint32_t *v;
+ uint32_t start = psb_mmu_pd_index(mmu_offset);
+ struct psb_mmu_driver *driver = pd->driver;
+ int num_pages = gtt_pages;
+
+ down_read(&driver->sem);
+ spin_lock(&driver->lock);
+
+ v = kmap_atomic(pd->p, KM_USER0);
+ v += start;
+
+ while (gtt_pages--) {
+ *v++ = gtt_start | pd->pd_mask;
+ gtt_start += PAGE_SIZE;
+ }
+
+ /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+ psb_pages_clflush(pd->driver, &pd->p, num_pages);
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(&driver->lock);
+
+ if (pd->hw_context != -1)
+ atomic_set(&pd->driver->needs_tlbflush, 1);
+
+ up_read(&pd->driver->sem);
+ psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ /* down_read(&driver->sem); */
+ pd = driver->default_pd;
+ /* up_read(&driver->sem); */
+
+ return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+{
+ struct psb_mmu_pd *pd;
+
+ pd = psb_mmu_get_default_pd(driver);
+ return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+ psb_mmu_free_pagedir(driver->default_pd);
+ kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+ int trap_pagefaults,
+ int invalid_type,
+ struct drm_psb_private *dev_priv)
+{
+ struct psb_mmu_driver *driver;
+
+ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return NULL;
+ driver->dev_priv = dev_priv;
+
+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+ invalid_type);
+ if (!driver->default_pd)
+ goto out_err1;
+
+ spin_lock_init(&driver->lock);
+ init_rwsem(&driver->sem);
+ down_write(&driver->sem);
+ driver->register_map = registers;
+ atomic_set(&driver->needs_tlbflush, 1);
+
+ driver->has_clflush = 0;
+
+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+ uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+ /*
+ * clflush size is determined at kernel setup for x86_64
+ * but not for i386. We have to do it here.
+ */
+
+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+ clflush_size = ((misc >> 8) & 0xff) * 8;
+ driver->has_clflush = 1;
+ driver->clflush_add =
+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
+ driver->clflush_mask = driver->clflush_add - 1;
+ driver->clflush_mask = ~driver->clflush_mask;
+ }
+
+ up_write(&driver->sem);
+ return driver;
+
+out_err1:
+ kfree(driver);
+ return NULL;
+}
+
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long clflush_add = pd->driver->clflush_add;
+ unsigned long clflush_mask = pd->driver->clflush_mask;
+
+ if (!pd->driver->has_clflush) {
+ /*ttm_tt_cache_flush(&pd->p, num_pages);*/
+ psb_pages_clflush(pd->driver, &pd->p, num_pages);
+ return;
+ }
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+ mb();
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_clflush(&pt->v
+ [psb_mmu_pt_index(addr)]);
+ } while (addr +=
+ clflush_add,
+ (addr & clflush_mask) < next);
+
+ psb_mmu_pt_unmap_unlock(pt);
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ mb();
+}
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages)
+{
+ struct psb_mmu_pt *pt;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt)
+ goto out;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 0);
+
+ return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+ uint32_t num_pages, uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+
+ if (hw_tile_stride)
+ rows = num_pages / desired_tile_stride;
+ else
+ desired_tile_stride = num_pages;
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ /* down_read(&pd->driver->sem); */
+
+ /* Make sure we only need to flush this processor's cache */
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_map_lock(pd, addr);
+ if (!pt)
+ continue;
+ do {
+ psb_mmu_invalidate_pte(pt, addr);
+ --pt->count;
+
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+ address += row_add;
+ }
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ /* up_read(&pd->driver->sem); */
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+ unsigned long address, uint32_t num_pages,
+ int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long f_address = address;
+ int ret = 0;
+
+ down_read(&pd->driver->sem);
+
+ addr = address;
+ end = addr + (num_pages << PAGE_SHIFT);
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ do {
+ pte = psb_mmu_mask_pte(start_pfn++, type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 1);
+
+ return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type)
+{
+ struct psb_mmu_pt *pt;
+ uint32_t rows = 1;
+ uint32_t i;
+ uint32_t pte;
+ unsigned long addr;
+ unsigned long end;
+ unsigned long next;
+ unsigned long add;
+ unsigned long row_add;
+ unsigned long f_address = address;
+ int ret = 0;
+
+ if (hw_tile_stride) {
+ if (num_pages % desired_tile_stride != 0)
+ return -EINVAL;
+ rows = num_pages / desired_tile_stride;
+ } else {
+ desired_tile_stride = num_pages;
+ }
+
+ add = desired_tile_stride << PAGE_SHIFT;
+ row_add = hw_tile_stride << PAGE_SHIFT;
+
+ down_read(&pd->driver->sem);
+
+ for (i = 0; i < rows; ++i) {
+
+ addr = address;
+ end = addr + add;
+
+ do {
+ next = psb_pd_addr_end(addr, end);
+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+ if (!pt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ do {
+ pte =
+ psb_mmu_mask_pte(page_to_pfn(*pages++),
+ type);
+ psb_mmu_set_pte(pt, addr, pte);
+ pt->count++;
+ } while (addr += PAGE_SIZE, addr < next);
+ psb_mmu_pt_unmap_unlock(pt);
+
+ } while (addr = next, next != end);
+
+ address += row_add;
+ }
+out:
+ if (pd->hw_context != -1)
+ psb_mmu_flush_ptes(pd, f_address, num_pages,
+ desired_tile_stride, hw_tile_stride);
+
+ up_read(&pd->driver->sem);
+
+ if (pd->hw_context != -1)
+ psb_mmu_flush(pd->driver, 1);
+
+ return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn)
+{
+ int ret;
+ struct psb_mmu_pt *pt;
+ uint32_t tmp;
+ spinlock_t *lock = &pd->driver->lock;
+
+ down_read(&pd->driver->sem);
+ pt = psb_mmu_pt_map_lock(pd, virtual);
+ if (!pt) {
+ uint32_t *v;
+
+ spin_lock(lock);
+ v = kmap_atomic(pd->p, KM_USER0);
+ tmp = v[psb_mmu_pd_index(virtual)];
+ kunmap_atomic(v, KM_USER0);
+ spin_unlock(lock);
+
+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+ !(pd->invalid_pte & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
+ goto out;
+ }
+ tmp = pt->v[psb_mmu_pt_index(virtual)];
+ if (!(tmp & PSB_PTE_VALID)) {
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ *pfn = tmp >> PAGE_SHIFT;
+ }
+ psb_mmu_pt_unmap_unlock(pt);
+out:
+ up_read(&pd->driver->sem);
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644
index 00000000000..2da1f368f14
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -0,0 +1,252 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_vbt {
+ s8 signature[4]; /*4 bytes,"$GCT" */
+ u8 revision;
+ u8 size;
+ u8 checksum;
+ void *oaktrail_gct;
+} __packed;
+
+struct oaktrail_timing_info {
+ u16 pixel_clock;
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_offset_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_offset_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_offset_hi:2;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 height_mm_hi:4;
+ u8 width_mm_hi:4;
+ u8 hborder;
+ u8 vborder;
+ u8 unknown0:1;
+ u8 hsync_positive:1;
+ u8 vsync_positive:1;
+ u8 separate_sync:2;
+ u8 stereo:1;
+ u8 unknown6:1;
+ u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+ u16 pixel_clock;
+ u32 hactive_lo:8;
+ u32 hactive_hi:4;
+ u32 hblank_lo:8;
+ u32 hblank_hi:4;
+ u32 hsync_offset_lo:8;
+ u16 hsync_offset_hi:2;
+ u16 hsync_pulse_width_lo:8;
+ u16 hsync_pulse_width_hi:2;
+ u16 hsync_positive:1;
+ u16 rsvd_1:3;
+ u8 vactive_lo:8;
+ u16 vactive_hi:4;
+ u16 vblank_lo:8;
+ u16 vblank_hi:4;
+ u16 vsync_offset_lo:4;
+ u16 vsync_offset_hi:2;
+ u16 vsync_pulse_width_lo:4;
+ u16 vsync_pulse_width_hi:2;
+ u16 vsync_positive:1;
+ u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+ u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+ /* 0x61190 if MIPI */
+ u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+ /* Register 0x61210 */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+ u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+ u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+ u16 Panel_MIPI_Display_Descriptor;
+ /*16 bits, Defined as follows: */
+ /* if MIPI, 0x0000 if LVDS */
+ /* Bit 0, Type, 2 bits, */
+ /* 0: Type-1, */
+ /* 1: Type-2, */
+ /* 2: Type-3, */
+ /* 3: Type-4 */
+ /* Bit 2, Pixel Format, 4 bits */
+ /* Bit0: 16bpp (not supported in LNC), */
+ /* Bit1: 18bpp loosely packed, */
+ /* Bit2: 18bpp packed, */
+ /* Bit3: 24bpp */
+ /* Bit 6, Reserved, 2 bits, 00b */
+ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+ /* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+ struct {
+ u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+ u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+ u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+ /* 1: Burst and non-burst */
+ /* 2/3: Reserved */
+ u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+ u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+ u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+ u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+ u16 Rsvd:5;/*5 bits,00000b */
+ } panelrx;
+ u16 panel_receiver;
+} __packed;
+
+struct oaktrail_gct_v1 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_v2 {
+ union { /*8 bits,Defined as follows: */
+ struct {
+ u8 PanelType:4; /*4 bits, Bit field for panels*/
+ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
+ /*2 bits,Specifies which of the*/
+ u8 BootPanelIndex:2;
+ /* 4 panels to use by default*/
+ u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+ /* the 4 MIPI DSI receivers to use*/
+ } PD;
+ u8 PanelDescriptor;
+ };
+ struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct oaktrail_gct_data {
+ u8 bpi; /* boot panel index, number of panel used during boot */
+ u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+ struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+ u32 Panel_Port_Control;
+ u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+ u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+ u32 PP_Cycle_Delay;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC 0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 0x3
+#define MODE_SETTING_IN_DSR 0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+
+#define GCT_R10_HEADER_SIZE 16
+#define GCT_R10_DISPLAY_DESC_SIZE 28
+
+/*
+ * Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+ struct pci_dev *dev;
+ void __iomem *regs;
+ unsigned int mmio, mmio_len;
+ int dpms_mode;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ /* register state */
+ u32 saveDPLL_CTRL;
+ u32 saveDPLL_DIV_CTRL;
+ u32 saveDPLL_ADJUST;
+ u32 saveDPLL_UPDATE;
+ u32 saveDPLL_CLK_ENABLE;
+ u32 savePCH_HTOTAL_B;
+ u32 savePCH_HBLANK_B;
+ u32 savePCH_HSYNC_B;
+ u32 savePCH_VTOTAL_B;
+ u32 savePCH_VBLANK_B;
+ u32 savePCH_VSYNC_B;
+ u32 savePCH_PIPEBCONF;
+ u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644
index 00000000000..9d12a3ee160
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright Š 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct oaktrail_limit_t {
+ struct psb_intel_range_t dot, m, p1;
+};
+
+struct oaktrail_clock_t {
+ /* derived values */
+ int dot;
+ int m;
+ int p1;
+};
+
+#define MRST_LIMIT_LVDS_100L 0
+#define MRST_LIMIT_LVDS_83 1
+#define MRST_LIMIT_LVDS_100 2
+
+#define MRST_DOT_MIN 19750
+#define MRST_DOT_MAX 120000
+#define MRST_M_MIN_100L 20
+#define MRST_M_MIN_100 10
+#define MRST_M_MIN_83 12
+#define MRST_M_MAX_100L 34
+#define MRST_M_MAX_100 17
+#define MRST_M_MAX_83 20
+#define MRST_P1_MIN 2
+#define MRST_P1_MAX_0 7
+#define MRST_P1_MAX_1 8
+
+static const struct oaktrail_limit_t oaktrail_limits[] = {
+ { /* MRST_LIMIT_LVDS_100L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ },
+ { /* MRST_LIMIT_LVDS_83L */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+ },
+ { /* MRST_LIMIT_LVDS_100 */
+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+ },
+};
+
+#define MRST_M_MIN 10
+static const u32 oaktrail_m_converts[] = {
+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+{
+ const struct oaktrail_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+ switch (dev_priv->core_freq) {
+ case 100:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+ break;
+ case 166:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+ break;
+ case 200:
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+ break;
+ }
+ } else {
+ limit = NULL;
+ dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+{
+ pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
+ prefix, clock->dot, clock->m, clock->p1);
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool
+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+ struct oaktrail_clock_t *best_clock)
+{
+ struct oaktrail_clock_t clock;
+ const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ oaktrail_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+ return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+ /* Wait for for the pipe disable to take effect. */
+ psb_intel_wait_for_vblank(dev);
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3FFF);
+ REG_WRITE(DSPFW1, 0x3F88080A);
+ REG_WRITE(DSPFW2, 0x0b060808);
+ REG_WRITE(DSPFW3, 0x0);
+ REG_WRITE(DSPFW4, 0x08030404);
+ REG_WRITE(DSPFW5, 0x04040404);
+ REG_WRITE(DSPFW6, 0x78);
+ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+ /* Must write Bit 14 of the Chicken Bit Register */
+
+ gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk = 0;
+ struct oaktrail_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_mipi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct psb_intel_encoder *psb_intel_encoder = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_connector *connector;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&psb_intel_crtc->saved_mode,
+ mode,
+ sizeof(struct drm_display_mode));
+ memcpy(&psb_intel_crtc->saved_adjusted_mode,
+ adjusted_mode,
+ sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (oaktrail_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) |
+ (mode->crtc_vdisplay - 1));
+
+ if (psb_intel_encoder)
+ drm_connector_property_get_value(connector,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /* Moorestown doesn't have register support for centering so
+ * we need to mess with the h/vblank and h/vsync start and
+ * ends to get centering */
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay -
+ mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay -
+ mode->crtc_vdisplay) / 2;
+
+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg,
+ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+ REG_WRITE(hsync_reg,
+ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+ REG_WRITE(vblank_reg,
+ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+ REG_WRITE(vsync_reg,
+ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ } else {
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
+
+ if (is_mipi)
+ goto oaktrail_crtc_mode_set_exit;
+
+ refclk = dev_priv->core_freq * 1000;
+
+ dpll = 0; /*BIT16 = 0 for 100MHz reference */
+
+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+
+ if (!ok) {
+ dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
+ } else {
+ dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
+ "m = %x, p1 = %x.\n", clock.dot, clock.m,
+ clock.p1);
+ }
+
+ fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+ dpll |= DPLL_VGA_MODE_DIS;
+
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ if (is_lvds)
+ dpll |= DPLLA_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply -
+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ mrstPrintPll("chosen", &clock);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Check the DPLLA lock bit PIPEACONF[29] */
+ udelay(150);
+ }
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ psb_intel_wait_for_vblank(dev);
+
+oaktrail_crtc_mode_set_exit:
+ gma_power_end(dev);
+ return 0;
+}
+
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+
+ int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+ .dpms = oaktrail_crtc_dpms,
+ .mode_fixup = oaktrail_crtc_mode_fixup,
+ .mode_set = oaktrail_crtc_mode_set,
+ .mode_set_base = oaktrail_pipe_set_base,
+ .prepare = oaktrail_crtc_prepare,
+ .commit = oaktrail_crtc_commit,
+};
+
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644
index 00000000000..63aea2f010d
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -0,0 +1,512 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->iLVDS_enable)
+ oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+ else
+ dev_err(dev->dev, "DSI is not supported\n");
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+ return 0;
+}
+
+/*
+ * Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
+ u32 max_pwm_blc;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ if (gma_power_begin(dev, 0)) {
+ /* Calculate and set the brightness value */
+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+ blc_pwm_ctl = level * max_pwm_blc / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj1;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* force PWM bit on */
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+ gma_power_end(dev);
+ }
+ oaktrail_brightness = level;
+ return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+ bl_max_freq = 256;
+ /* this needs to be set elsewhere */
+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+ return -ERANGE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+ .get_brightness = oaktrail_get_brightness,
+ .update_status = oaktrail_set_brightness,
+};
+
+int oaktrail_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+ NULL, (void *)dev, &oaktrail_ops, &props);
+
+ if (IS_ERR(oaktrail_backlight_device))
+ return PTR_ERR(oaktrail_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret < 0) {
+ backlight_device_unregister(oaktrail_backlight_device);
+ return ret;
+ }
+ oaktrail_backlight_device->props.brightness = 100;
+ oaktrail_backlight_device->props.max_brightness = 100;
+ backlight_update_status(oaktrail_backlight_device);
+ dev_priv->backlight_device = oaktrail_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Moorestown specific chip logic and low level methods
+ * for power management
+ */
+
+static void oaktrail_init_pm(struct drm_device *dev)
+{
+}
+
+/**
+ * oaktrail_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+ u32 pp_stat;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+
+ /* Save cursor regs */
+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+ /* Save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_save(dev);
+
+ /* Save performance state */
+ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+ /* LVDS state */
+ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+ /* HW overlay */
+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+ /* DPST registers */
+ dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ if (dev_priv->iLVDS_enable) {
+ /* Shut down the panel */
+ PSB_WVDC32(0, PP_CONTROL);
+
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x80000000);
+
+ /* Turn off the plane */
+ PSB_WVDC32(0x58000000, DSPACNTR);
+ /* Trigger the plane disable */
+ PSB_WVDC32(0, DSPASURF);
+
+ /* Wait ~4 ticks */
+ msleep(4);
+
+ /* Turn off pipe */
+ PSB_WVDC32(0x0, PIPEACONF);
+ /* Wait ~8 ticks */
+ msleep(8);
+
+ /* Turn off PLLs */
+ PSB_WVDC32(0, MRST_DPLL_A);
+ }
+ return 0;
+}
+
+/**
+ * oaktrail_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_stat;
+ int i;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /* Make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ /* set the plls */
+ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+
+ /* Actually enable it */
+ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+
+ /* Restore performance mode*/
+ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+
+ /* Enable the pipe*/
+ if (dev_priv->iLVDS_enable)
+ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+
+ /* Set up the plane*/
+ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+
+ /* Enable the plane */
+ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+
+ /* Enable Cursor A */
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+ /* Restore palette (gamma) */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ oaktrail_hdmi_restore(dev);
+
+ if (dev_priv->iLVDS_enable) {
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+ }
+
+ /* Wait for cycle delay */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x08000000);
+
+ /* Wait for panel power up */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x10000000);
+
+ /* Restore HW overlay */
+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+ /* DPST registers */
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+ HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+ HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+ return 0;
+}
+
+/**
+ * oaktrail_power_down - power down the display island
+ * @dev: our DRM device
+ *
+ * Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask ;
+ u32 pwr_sts;
+
+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == pwr_mask)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ u32 pwr_sts, pwr_cnt;
+
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~pwr_mask;
+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == 0)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+ int ret;
+
+ ret = mid_chip_setup(dev);
+ if (ret < 0)
+ return ret;
+ if (vbt->size == 0) {
+ /* Now pull the BIOS data */
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ }
+ return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+
+ oaktrail_hdmi_teardown(dev);
+ if (vbt->size == 0)
+ psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+ .name = "Oaktrail",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = oaktrail_chip_setup,
+ .chip_teardown = oaktrail_teardown,
+ .crtc_helper = &oaktrail_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = oaktrail_backlight_init,
+#endif
+
+ .init_pm = oaktrail_init_pm,
+ .save_regs = oaktrail_save_display_registers,
+ .restore_regs = oaktrail_restore_display_registers,
+ .power_down = oaktrail_power_down,
+ .power_up = oaktrail_power_up,
+
+ .i2c_bus = 1,
+};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644
index 00000000000..36878a60080
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright Š 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+
+#define HDMI_HICR 0x1004
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_DETECT_HDP (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN (1 << 7)
+#define HDMI_MODE_OUTPUT (1 << 0)
+#define HDMI_HBLANK_A 0x3100
+
+#define HDMI_AUDIO_CTRL 0x4000
+#define HDMI_ENABLE_AUDIO (1 << 0)
+
+#define PCH_HTOTAL_B 0x3100
+#define PCH_HBLANK_B 0x3104
+#define PCH_HSYNC_B 0x3108
+#define PCH_VTOTAL_B 0x310C
+#define PCH_VBLANK_B 0x3110
+#define PCH_VSYNC_B 0x3114
+#define PCH_PIPEBSRC 0x311C
+
+#define PCH_PIPEB_DSL 0x3800
+#define PCH_PIPEB_SLC 0x3804
+#define PCH_PIPEBCONF 0x3808
+#define PCH_PIPEBSTAT 0x3824
+
+#define CDVO_DFT 0x5000
+#define CDVO_SLEWRATE 0x5004
+#define CDVO_STRENGTH 0x5008
+#define CDVO_RCOMP 0x500C
+
+#define DPLL_CTRL 0x6000
+#define DPLL_PDIV_SHIFT 16
+#define DPLL_PDIV_MASK (0xf << 16)
+#define DPLL_PWRDN (1 << 4)
+#define DPLL_RESET (1 << 3)
+#define DPLL_FASTEN (1 << 2)
+#define DPLL_ENSTAT (1 << 1)
+#define DPLL_DITHEN (1 << 0)
+
+#define DPLL_DIV_CTRL 0x6004
+#define DPLL_CLKF_MASK 0xffffffc0
+#define DPLL_CLKR_MASK (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP (1 << 31)
+#define DPLL_SEL_HDMI (1 << 8)
+#define DPLL_EN_HDMI (1 << 1)
+#define DPLL_EN_VGA (1 << 0)
+
+#define DPLL_ADJUST 0x600C
+#define DPLL_STATUS 0x6010
+#define DPLL_UPDATE 0x6014
+#define DPLL_DFT 0x6020
+
+struct intel_range {
+ int min, max;
+};
+
+struct oaktrail_hdmi_limit {
+ struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+ int np;
+ int nr;
+ int nf;
+ int dot;
+};
+
+#define VCO_MIN 320000
+#define VCO_MAX 1650000
+#define NP_MIN 1
+#define NP_MAX 15
+#define NR_MIN 1
+#define NR_MAX 64
+#define NF_MIN 2
+#define NF_MAX 4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+ .vco = { .min = VCO_MIN, .max = VCO_MAX },
+ .np = { .min = NP_MIN, .max = NP_MAX },
+ .nr = { .min = NR_MIN, .max = NR_MAX },
+ .nf = { .min = NF_MIN, .max = NF_MAX },
+};
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* FIXME: Can we do this as a sleep ? */
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static void scu_busy_loop(void *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+ void *base;
+ /* FIXME: at least make these defines */
+ unsigned int scu_ipc_mmio = 0xff11c000;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map SCU mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(HDMI_HCR, 0x67);
+ HDMI_READ(HDMI_HCR);
+
+ HDMI_WRITE(0x51a8, 0x10);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(0x51a8, 0x0);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+
+ HDMI_WRITE(HDMI_HCR, 0x47);
+ HDMI_READ(HDMI_HCR);
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ /* Disable VGACNTRL */
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ /* wait for pipe off */
+ udelay(150);
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ /* wait for dpll off */
+ udelay(150);
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+ psb_intel_crtc_load_lut(crtc);
+ }
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+ /* LNC Chicken Bits */
+ REG_WRITE(0x70400, 0x4000);
+}
+
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ static int dpms_mode = -1;
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ if (dpms_mode == mode)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ temp = 0x0;
+ else
+ temp = 0x99;
+
+ dpms_mode = mode;
+ HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct oaktrail_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* XXX: Disable the panel fitter if it was on our pipe */
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* reset controller: FIXME - can we sort out the ioremap mess ? */
+ iounmap(hdmi_dev->regs);
+ oaktrail_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Setting DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (hdmi_dev->regs == NULL) {
+ DRM_ERROR("failed to do hdmi mmio mapping\n");
+ return -ENOMEM;
+ }
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ return 0;
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HSR);
+ DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+ if ((temp & HDMI_DETECT_HDP) != 0)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ return status;
+}
+
+static const unsigned char raw_edid[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+ 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+ 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+ 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+ 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+ 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ struct drm_display_mode *mode, *t;
+ int i = 0, ret = 0;
+
+ i2c_adap = i2c_get_adapter(3);
+ if (i2c_adap == NULL) {
+ DRM_ERROR("No ddc adapter available!\n");
+ edid = (struct edid *)raw_edid;
+ } else {
+ edid = (struct edid *)raw_edid;
+ /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+ }
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ }
+
+ /*
+ * prune modes that require frame buffer bigger than stolen mem
+ */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+ i++;
+ drm_mode_remove(connector, mode);
+ }
+ }
+ return ret - i;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ oaktrail_hdmi_audio_enable(dev);
+ return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+ .dpms = oaktrail_hdmi_dpms,
+ .mode_fixup = oaktrail_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = oaktrail_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ oaktrail_hdmi_connector_helper_funcs = {
+ .get_modes = oaktrail_hdmi_get_modes,
+ .mode_valid = oaktrail_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = oaktrail_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+ .destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &oaktrail_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, encoder,
+ &oaktrail_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+ drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_sysfs_connector_add(connector);
+
+ return;
+
+failed_connector:
+ kfree(psb_intel_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+ {}
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+ if (!pdev)
+ return;
+
+ hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev->dev, "failed to allocate memory\n");
+ goto out;
+ }
+
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hdmi controller\n");
+ goto free;
+ }
+
+ hdmi_dev->mmio = pci_resource_start(pdev, 0);
+ hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (!hdmi_dev->regs) {
+ dev_err(dev->dev, "failed to map hdmi mmio\n");
+ goto free;
+ }
+
+ hdmi_dev->dev = pdev;
+ pci_set_drvdata(pdev, hdmi_dev);
+
+ /* Initialize i2c controller */
+ ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+ if (ret)
+ dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+ dev_priv->hdmi_priv = hdmi_dev;
+ oaktrail_hdmi_audio_disable(dev);
+ return;
+
+free:
+ kfree(hdmi_dev);
+out:
+ return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct pci_dev *pdev;
+
+ if (hdmi_dev) {
+ pdev = hdmi_dev->dev;
+ pci_set_drvdata(pdev, NULL);
+ oaktrail_hdmi_i2c_exit(pdev);
+ iounmap(hdmi_dev->regs);
+ kfree(hdmi_dev);
+ pci_dev_put(pdev);
+ }
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+ hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+ hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+ hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+ hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+ /* pipe B */
+ dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+ dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
+ dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
+ dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
+ dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
+ dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
+ dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
+ dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+
+ hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+ hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+ hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+ hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+ hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
+ hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+ hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+ hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
+
+ /* plane */
+ dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+ dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+ dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+ dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+ dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+
+ /* cursor B */
+ dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+ /* save palette */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+ PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+ PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+ DRM_UDELAY(150);
+
+ /* pipe */
+ PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+ PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+ PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+ PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
+ PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+ PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+ PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
+
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+ PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
+
+ PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+ /* plane */
+ PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+ PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+ PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+
+ /* cursor B */
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+ /* restore palette */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644
index 00000000000..705440874ac
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright Š 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_DETECT_HDP (1 << 6)
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+#define HDMI_HICR 0x1004
+#define HDMI_INTR_I2C_ERROR (1 << 4)
+#define HDMI_INTR_I2C_FULL (1 << 3)
+#define HDMI_INTR_I2C_DONE (1 << 2)
+#define HDMI_INTR_HPD (1 << 0)
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_HI2CRDB0 0x1200
+#define HDMI_HI2CHCR 0x1240
+#define HI2C_HDCP_WRITE (0 << 2)
+#define HI2C_HDCP_RI_READ (1 << 2)
+#define HI2C_HDCP_READ (2 << 2)
+#define HI2C_EDID_READ (3 << 2)
+#define HI2C_READ_CONTINUE (1 << 1)
+#define HI2C_ENABLE_TRANSACTION (1 << 0)
+
+#define HDMI_ICRH 0x1100
+#define HDMI_HI2CTDR0 0x1244
+#define HDMI_HI2CTDR1 0x1248
+
+#define I2C_STAT_INIT 0
+#define I2C_READ_DONE 1
+#define I2C_TRANSACTION_DONE 2
+
+struct hdmi_i2c_dev {
+ struct i2c_adapter *adap;
+ struct mutex i2c_lock;
+ struct completion complete;
+ int status;
+ struct i2c_msg *msg;
+ int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HICR);
+ temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+ HDMI_WRITE(HDMI_HICR, temp);
+ HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ HDMI_WRITE(HDMI_HICR, 0x0);
+ HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ i2c_dev->status = I2C_STAT_INIT;
+ i2c_dev->msg = pmsg;
+ i2c_dev->buf_offset = 0;
+ INIT_COMPLETION(i2c_dev->complete);
+
+ /* Enable I2C transaction */
+ temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+ HDMI_WRITE(HDMI_HI2CHCR, temp);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ while (i2c_dev->status != I2C_TRANSACTION_DONE)
+ wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+ 10 * HZ);
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ /*
+ * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+ */
+ return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+ struct i2c_msg *pmsg,
+ int num)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ int i, err = 0;
+
+ mutex_lock(&i2c_dev->i2c_lock);
+
+ /* Enable i2c unit */
+ HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+ /* Enable irq */
+ hdmi_i2c_irq_enable(hdmi_dev);
+ for (i = 0; i < num; i++) {
+ if (pmsg->len && pmsg->buf) {
+ if (pmsg->flags & I2C_M_RD)
+ err = xfer_read(adap, pmsg);
+ else
+ err = xfer_write(adap, pmsg);
+ }
+ pmsg++; /* next message */
+ }
+
+ /* Disable irq */
+ hdmi_i2c_irq_disable(hdmi_dev);
+
+ mutex_unlock(&i2c_dev->i2c_lock);
+
+ return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+ .master_xfer = oaktrail_hdmi_i2c_access,
+ .functionality = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+ .name = "oaktrail_hdmi_i2c",
+ .nr = 3,
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_DDC,
+ .algo = &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ u8 *buf = msg->buf;
+ u32 temp;
+ int i, offset;
+
+ offset = i2c_dev->buf_offset;
+ for (i = 0; i < 0x10; i++) {
+ temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+ memcpy(buf + (offset + i * 4), &temp, 4);
+ }
+ i2c_dev->buf_offset += (0x10 * 4);
+
+ /* clearing read buffer full intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+ HDMI_READ(HDMI_HISR);
+
+ /* continue read transaction */
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_READ_DONE;
+ return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ /* clear transaction done intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+ HDMI_READ(HDMI_HISR);
+
+
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_TRANSACTION_DONE;
+ return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev = dev;
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 stat;
+
+ stat = HDMI_READ(HDMI_HISR);
+
+ if (stat & HDMI_INTR_HPD) {
+ HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+ HDMI_READ(HDMI_HISR);
+ }
+
+ if (stat & HDMI_INTR_I2C_FULL)
+ hdmi_i2c_read(hdmi_dev);
+
+ if (stat & HDMI_INTR_I2C_DONE)
+ hdmi_i2c_transaction_done(hdmi_dev);
+
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+ void *base;
+ unsigned int gpio_base = 0xff12c000;
+ int gpio_len = 0x1000;
+ u32 temp;
+
+ base = ioremap((resource_size_t)gpio_base, gpio_len);
+ if (base == NULL) {
+ DRM_ERROR("gpio ioremap fail\n");
+ return;
+ }
+
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+ writel((temp | 0x00000a00), (base + 0x44));
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+ iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+ int ret;
+
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+ if (i2c_dev == NULL) {
+ DRM_ERROR("Can't allocate interface\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+ init_completion(&i2c_dev->complete);
+ mutex_init(&i2c_dev->i2c_lock);
+ i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+ hdmi_dev->i2c_dev = i2c_dev;
+
+ /* Enable HDMI I2C function on gpio */
+ oaktrail_hdmi_i2c_gpio_fix();
+
+ /* request irq */
+ ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+ goto err;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+ return ret;
+
+err:
+ kfree(i2c_dev);
+exit:
+ return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+ struct oaktrail_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ hdmi_dev = pci_get_drvdata(dev);
+ if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
+ DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+
+ i2c_dev = hdmi_dev->i2c_dev;
+ kfree(i2c_dev);
+ free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644
index 00000000000..238bbe10530
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright Š 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/mrst.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+ struct psb_intel_encoder *psb_intel_encoder,
+ bool on)
+{
+ u32 pp_status;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+ dev_priv->is_lvds_on = true;
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, true);
+ } else {
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, false);
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ dev_priv->is_lvds_on = false;
+ pm_request_idle(&dev->pdev->dev);
+ }
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ else
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ u32 lvds_port;
+ uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+ lvds_port = (REG_READ(LVDS) &
+ (~LVDS_PIPEB_SELECT)) |
+ LVDS_PORT_EN |
+ LVDS_BORDER_EN;
+
+ /* If the firmware says dither on Moorestown, or the BIOS does
+ on Oaktrail then enable dithering */
+ if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(LVDS, lvds_port);
+
+ /* Find the connector we're trying to set up */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+ }
+
+ if (!connector) {
+ DRM_ERROR("Couldn't find connector when setting mode");
+ return;
+ }
+
+ drm_connector_property_get_value(
+ connector,
+ dev->mode_config.scaling_mode_property,
+ &v);
+
+ if (v == DRM_MODE_SCALE_NO_SCALE)
+ REG_WRITE(PFIT_CONTROL, 0);
+ else if (v == DRM_MODE_SCALE_ASPECT) {
+ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ else if ((adjusted_mode->crtc_hdisplay *
+ mode->vdisplay) > (mode->hdisplay *
+ adjusted_mode->crtc_vdisplay))
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_PILLARBOX);
+ else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+ PFIT_SCALING_MODE_LETTERBOX);
+ } else
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+ } else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+ gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ ret = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ oaktrail_lvds_get_max_backlight(dev);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+ .dpms = oaktrail_lvds_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = oaktrail_lvds_prepare,
+ .mode_set = oaktrail_lvds_mode_set,
+ .commit = oaktrail_lvds_commit,
+};
+
+static struct drm_display_mode lvds_configuration_modes[] = {
+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
+ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+ 846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+ /* hard coded fixed mode for LVDS 800x480 */
+ { DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+ 802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+ 1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+ 1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+ { DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+ 1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+ /* hard coded fixed mode for LVDS 1024x768 */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+ /* hard coded fixed mode for LVDS 1366x768 */
+ { DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+ 1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+ mode_dev->panel_fixed_mode = NULL;
+
+ /* Use the firmware provided data on Moorestown */
+ if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return;
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+#if 0
+ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+ printk(KERN_INFO "htotal is %d\n", mode->htotal);
+ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+ printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+ mode_dev->panel_fixed_mode = mode;
+ }
+
+ /* Use the BIOS VBT mode if available */
+ if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+ mode_dev->vbt_mode);
+
+ /* Then try the LVDS VBT mode */
+ if (mode_dev->panel_fixed_mode == NULL)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+ /* Then guess */
+ if (mode_dev->panel_fixed_mode == NULL)
+ mode_dev->panel_fixed_mode
+ = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+ drm_mode_set_name(mode_dev->panel_fixed_mode);
+ drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct edid *edid;
+ int ret = 0;
+ struct i2c_adapter *i2c_adap;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
+ return;
+
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ dev_priv->is_lvds_on = true;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ mode_dev->panel_wants_dither = false;
+ if (dev_priv->vbt_data.size != 0x00)
+ mode_dev->panel_wants_dither = (dev_priv->gct_data.
+ Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+ if (dev_priv->lvds_dither)
+ mode_dev->panel_wants_dither = 1;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+ if (i2c_adap == NULL)
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ if (i2c_adap) {
+ edid = drm_get_edid(connector, i2c_adap);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ }
+ /*
+ * If we didn't get EDID, try geting panel timing
+ * from configuration data
+ */
+ oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
+}
+
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
new file mode 100644
index 00000000000..94025693bae
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.c
@@ -0,0 +1,316 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex; /* Serialize power ops */
+static spinlock_t power_ctrl_lock; /* Serialize power claim */
+
+/**
+ * gma_power_init - initialise power manager
+ * @dev: our device
+ *
+ * Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: Move APM/OSPM base into relevant device code */
+ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+ dev_priv->ospm_base &= 0xffff;
+
+ dev_priv->display_power = true; /* We start active */
+ dev_priv->display_count = 0; /* Currently no users */
+ dev_priv->suspended = false; /* And not suspended */
+ spin_lock_init(&power_ctrl_lock);
+ mutex_init(&power_mutex);
+
+ dev_priv->ops->init_pm(dev);
+}
+
+/**
+ * gma_power_uninit - end power manager
+ * @dev: device to end for
+ *
+ * Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+ pm_runtime_disable(&dev->pdev->dev);
+ pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ * gma_suspend_display - suspend the display logic
+ * @dev: our DRM device
+ *
+ * Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->suspended)
+ return;
+ dev_priv->ops->save_regs(dev);
+ dev_priv->ops->power_down(dev);
+ dev_priv->display_power = false;
+}
+
+/**
+ * gma_resume_display - resume display side logic
+ *
+ * Resume the display hardware restoring state and enabling
+ * as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->suspended == false)
+ return;
+
+ /* turn on the display power island */
+ dev_priv->ops->power_up(dev);
+ dev_priv->suspended = false;
+ dev_priv->display_power = true;
+
+ PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
+ dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+ dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ * gma_suspend_pci - suspend PCI side
+ * @pdev: PCI device
+ *
+ * Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int bsm, vbt;
+
+ if (dev_priv->suspended)
+ return;
+
+ pci_save_state(pdev);
+ pci_read_config_dword(pdev, 0x5C, &bsm);
+ dev_priv->saveBSM = bsm;
+ pci_read_config_dword(pdev, 0xFC, &vbt);
+ dev_priv->saveVBT = vbt;
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+ pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ dev_priv->suspended = true;
+}
+
+/**
+ * gma_resume_pci - resume helper
+ * @dev: our PCI device
+ *
+ * Perform the resume processing on our PCI device state - rewrite
+ * register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->suspended)
+ return true;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+ pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+ /* restoring MSI address and data in PCIx space */
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+ pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+ ret = pci_enable_device(pdev);
+
+ if (ret != 0)
+ dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+ else
+ dev_priv->suspended = false;
+ return !dev_priv->suspended;
+}
+
+/**
+ * gma_power_suspend - bus callback for suspend
+ * @pdev: our PCI device
+ * @state: suspend type
+ *
+ * Called back by the PCI layer during a suspend of the system. We
+ * perform the necessary shut down steps and save enough state that
+ * we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&power_mutex);
+ if (!dev_priv->suspended) {
+ if (dev_priv->display_count) {
+ mutex_unlock(&power_mutex);
+ return -EBUSY;
+ }
+ psb_irq_uninstall(dev);
+ gma_suspend_display(dev);
+ gma_suspend_pci(pdev);
+ }
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_resume - resume power
+ * @pdev: PCI device
+ *
+ * Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ mutex_lock(&power_mutex);
+ gma_resume_pci(pdev);
+ gma_resume_display(pdev);
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ mutex_unlock(&power_mutex);
+ return 0;
+}
+
+/**
+ * gma_power_is_on - returne true if power is on
+ * @dev: our DRM device
+ *
+ * Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->display_power;
+}
+
+/**
+ * gma_power_begin - begin requiring power
+ * @dev: our DRM device
+ * @force_on: true to force power on
+ *
+ * Begin an action that requires the display power island is enabled.
+ * We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ /* Power already on ? */
+ if (dev_priv->display_power) {
+ dev_priv->display_count++;
+ pm_runtime_get(&dev->pdev->dev);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+ if (force_on == false)
+ goto out_false;
+
+ /* Ok power up needed */
+ ret = gma_resume_pci(dev->pdev);
+ if (ret == 0) {
+ psb_irq_preinstall(dev);
+ psb_irq_postinstall(dev);
+ pm_runtime_get(&dev->pdev->dev);
+ dev_priv->display_count++;
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return true;
+ }
+out_false:
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ return false;
+}
+
+/**
+ * gma_power_end - end use of power
+ * @dev: Our DRM device
+ *
+ * Indicate that one of our gma_power_begin() requested periods when
+ * the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ spin_lock_irqsave(&power_ctrl_lock, flags);
+ dev_priv->display_count--;
+ WARN_ON(dev_priv->display_count < 0);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
+ pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+ return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+ return gma_power_resume(dev);;
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+ struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_psb_private *dev_priv = drmdev->dev_private;
+ if (dev_priv->display_count)
+ return 0;
+ else
+ return 1;
+}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
new file mode 100644
index 00000000000..1969d2ecb32
--- /dev/null
+++ b/drivers/gpu/drm/gma500/power.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ * Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation. Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644
index 00000000000..e5f5906172b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -0,0 +1,328 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+ psb_intel_sdvo_init(dev, SDVOB);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+ }
+ return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(psb_backlight_device);
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ psb_intel_lvds_set_brightness(dev, level);
+ psb_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+ .get_brightness = psb_get_brightness,
+ .update_status = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ psb_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &psb_ops, &props);
+ if (IS_ERR(psb_backlight_device))
+ return PTR_ERR(psb_backlight_device);
+
+ ret = psb_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(psb_backlight_device);
+ psb_backlight_device = NULL;
+ return ret;
+ }
+ psb_backlight_device->props.brightness = 100;
+ psb_backlight_device->props.max_brightness = 100;
+ backlight_update_status(psb_backlight_device);
+ dev_priv->backlight_device = psb_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Poulsbo specific chip logic and low level methods
+ * for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+ gating &= ~3; /* Disable 2D clock gating */
+ gating |= 1;
+ PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+ PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ * psb_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Save crtc and output state */
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->save(crtc);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->save(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+}
+
+/**
+ * psb_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->restore(crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->restore(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+ psb_get_core_freq(dev);
+ gma_intel_setup_gmbus(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+ gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+ .name = "Poulsbo",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = PSB_SGX_OFFSET,
+ .chip_setup = psb_chip_setup,
+ .chip_teardown = psb_chip_teardown,
+
+ .crtc_helper = &psb_intel_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = psb_backlight_init,
+#endif
+
+ .init_pm = psb_init_pm,
+ .save_regs = psb_save_display_registers,
+ .restore_regs = psb_restore_display_registers,
+ .power_down = psb_power_down,
+ .power_up = psb_power_up,
+};
+
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 00000000000..f14768f2b36
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,703 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+ { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ /* Atom E620 */
+ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+ { 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_ADB \
+ DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION \
+ DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+ struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY \
+ DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+ struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_GAMMA \
+ DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+ struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL \
+ DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+ uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
+ DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+ struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE \
+ DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+ DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+ psb_intel_get_pipe_from_crtc_id, 0),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+ return;
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ uint32_t stolen_gtt;
+
+ int ret = -ENOMEM;
+
+ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+ dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+
+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ stolen_gtt =
+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+ (stolen_gtt << PAGE_SHIFT) * 1024;
+
+ if (1 || drm_debug) {
+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+ _PSB_CC_REVISION_MAJOR_SHIFT,
+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+ _PSB_CC_REVISION_MINOR_SHIFT);
+ DRM_INFO
+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+ _PSB_CC_REVISION_DESIGNER_SHIFT);
+ }
+
+
+ spin_lock_init(&dev_priv->irqmask_lock);
+ spin_lock_init(&dev_priv->lock_2d);
+
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+ PSB_RSGX32(PSB_CR_BIF_BANK1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+ PSB_CR_BIF_CTRL);
+ psb_spank(dev_priv);
+
+ /* mmu_gatt ?? */
+ PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ return 0;
+out_err:
+ psb_do_takedown(dev);
+ return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* Kill vblank etc here */
+
+ gma_backlight_exit(dev);
+
+ psb_modeset_cleanup(dev);
+
+ if (dev_priv) {
+ psb_lid_timer_takedown(dev_priv);
+ gma_intel_opregion_exit(dev);
+
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
+ psb_do_takedown(dev);
+
+
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+
+ /*destroy VBT data*/
+ psb_intel_destroy_bios(dev);
+ }
+
+ gma_power_uninit(dev);
+
+ return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct drm_psb_private *dev_priv;
+ unsigned long resource_start;
+ struct psb_gtt *pg;
+ unsigned long irqflags;
+ int ret = -ENOMEM;
+ uint32_t tt_pages;
+ struct drm_connector *connector;
+ struct psb_intel_encoder *psb_intel_encoder;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev_priv->ops = (struct psb_ops *)chipset;
+ dev_priv->dev = dev;
+ dev->dev_private = (void *) dev_priv;
+
+ if (!IS_PSB(dev)) {
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ }
+
+ dev_priv->num_pipe = dev_priv->ops->pipes;
+
+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+ dev_priv->vdc_reg =
+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+ if (!dev_priv->vdc_reg)
+ goto out_err;
+
+ dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+ PSB_SGX_SIZE);
+ if (!dev_priv->sgx_reg)
+ goto out_err;
+
+ ret = dev_priv->ops->chip_setup(dev);
+ if (ret)
+ goto out_err;
+
+ /* Init OSPM support */
+ gma_power_init(dev);
+
+ ret = -ENOMEM;
+
+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+ if (!dev_priv->scratch_page)
+ goto out_err;
+
+ set_pages_uc(dev_priv->scratch_page, 1);
+
+ ret = psb_gtt_init(dev, 0);
+ if (ret)
+ goto out_err;
+
+ dev_priv->mmu = psb_mmu_driver_init((void *)0,
+ drm_psb_trap_pagefaults, 0,
+ dev_priv);
+ if (!dev_priv->mmu)
+ goto out_err;
+
+ pg = &dev_priv->gtt;
+
+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+
+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+ if (!dev_priv->pf_pd)
+ goto out_err;
+
+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+ ret = psb_do_init(dev);
+ if (ret)
+ return ret;
+
+ PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+ PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+/* igd_opregion_init(&dev_priv->opregion_dev); */
+ acpi_video_register();
+ if (dev_priv->lid_state)
+ psb_lid_timer_init(dev_priv);
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Install interrupt handlers prior to powering off SGX or else we will
+ * crash.
+ */
+ dev_priv->vdc_irq_mask = 0;
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+ dev_priv->pipestat[2] = 0;
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_install(dev);
+
+ dev->vblank_disable_allowed = 1;
+
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+ psb_modeset_init(dev);
+ psb_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ case INTEL_OUTPUT_MIPI:
+ ret = gma_backlight_init(dev);
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+#if 0
+ /*enable runtime pm at last*/
+ pm_runtime_enable(&dev->pdev->dev);
+ pm_runtime_set_active(&dev->pdev->dev);
+#endif
+ /*Intel drm driver load is done, continue doing pvr load*/
+ return 0;
+out_err:
+ psb_driver_unload(dev);
+ return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ if (bd) {
+ bd->props.brightness = bd->ops->get_brightness(bd);
+ backlight_update_status(bd);
+ }
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj2 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj1 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_dpst_lut_arg *lut_arg = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct psb_intel_crtc *psb_intel_crtc;
+ int i = 0;
+ int32_t obj_id;
+
+ obj_id = lut_arg->output_id;
+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ dev_dbg(dev->dev, "Invalid Connector object.\n");
+ return -EINVAL;
+ }
+
+ connector = obj_to_connector(obj);
+ crtc = connector->encoder->crtc;
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ for (i = 0; i < 256; i++)
+ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+ psb_intel_crtc_load_lut(crtc);
+
+ return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ uint32_t obj_id;
+ uint16_t op;
+ struct drm_mode_modeinfo *umode;
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_mode_operation_arg *arg;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ int ret = 0;
+ int resp = MODE_OK;
+
+ arg = (struct drm_psb_mode_operation_arg *)data;
+ obj_id = arg->obj_id;
+ op = arg->operation;
+
+ switch (op) {
+ case PSB_MODE_OPERATION_MODE_VALID:
+ umode = &arg->mode;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, obj_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto mode_op_out;
+ }
+
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto mode_op_out;
+ }
+
+ /* drm_crtc_convert_umode(mode, umode); */
+ {
+ mode->clock = umode->clock;
+ mode->hdisplay = umode->hdisplay;
+ mode->hsync_start = umode->hsync_start;
+ mode->hsync_end = umode->hsync_end;
+ mode->htotal = umode->htotal;
+ mode->hskew = umode->hskew;
+ mode->vdisplay = umode->vdisplay;
+ mode->vsync_start = umode->vsync_start;
+ mode->vsync_end = umode->vsync_end;
+ mode->vtotal = umode->vtotal;
+ mode->vscan = umode->vscan;
+ mode->vrefresh = umode->vrefresh;
+ mode->flags = umode->flags;
+ mode->type = umode->type;
+ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ }
+
+ connector_funcs = (struct drm_connector_helper_funcs *)
+ connector->helper_private;
+
+ if (connector_funcs->mode_valid) {
+ resp = connector_funcs->mode_valid(connector, mode);
+ arg->data = resp;
+ }
+
+ /*do some clean up work*/
+ if (mode)
+ drm_mode_destroy(dev, mode);
+mode_op_out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+
+ default:
+ dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ struct drm_psb_stolen_memory_arg *arg = data;
+
+ arg->base = dev_priv->stolen_base;
+ arg->size = dev_priv->vram_stolen_size;
+
+ return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+ return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ static unsigned int runtime_allowed;
+
+ if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+ runtime_allowed++;
+ pm_runtime_allow(&dev->pdev->dev);
+ dev_priv->rpm_enabled = 1;
+ }
+ return drm_ioctl(filp, cmd, arg);
+ /* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ * - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+ .resume = gma_power_resume,
+ .suspend = gma_power_suspend,
+ .runtime_suspend = psb_runtime_suspend,
+ .runtime_resume = psb_runtime_resume,
+ .runtime_idle = psb_runtime_idle,
+};
+
+static struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+ DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+ .load = psb_driver_load,
+ .unload = psb_driver_unload,
+
+ .ioctls = psb_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+ .device_is_agp = psb_driver_device_is_agp,
+ .irq_preinstall = psb_irq_preinstall,
+ .irq_postinstall = psb_irq_postinstall,
+ .irq_uninstall = psb_irq_uninstall,
+ .irq_handler = psb_irq_handler,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
+ .lastclose = psb_lastclose,
+ .open = psb_driver_open,
+ .preclose = psb_driver_preclose,
+ .postclose = psb_driver_close,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+
+ .gem_init_object = psb_gem_init_object,
+ .gem_free_object = psb_gem_free_object,
+ .gem_vm_ops = &psb_gem_vm_ops,
+ .dumb_create = psb_gem_dumb_create,
+ .dumb_map_offset = psb_gem_dumb_map_gtt,
+ .dumb_destroy = psb_gem_dumb_destroy,
+ .fops = &psb_gem_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = PSB_DRM_DRIVER_DATE,
+ .major = PSB_DRM_DRIVER_MAJOR,
+ .minor = PSB_DRM_DRIVER_MINOR,
+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = psb_probe,
+ .remove = psb_remove,
+ .driver.pm = &psb_pm_ops,
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+ return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+ drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644
index 00000000000..eb1568a0da9
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -0,0 +1,956 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include "drm_global.h"
+#include "gem_glue.h"
+#include "gma_drm.h"
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "gtt.h"
+#include "power.h"
+#include "oaktrail.h"
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE 2
+
+enum {
+ CHIP_PSB_8108 = 0, /* Poulsbo */
+ CHIP_PSB_8109 = 1, /* Poulsbo */
+ CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
+ CHIP_MFLD_0130 = 3, /* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+
+/*
+ * Driver definitions
+ */
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
+
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+/*
+ * Hardware offsets
+ */
+#define PSB_VDC_OFFSET 0x00000000
+#define PSB_VDC_SIZE 0x000080000
+#define MRST_MMIO_SIZE 0x0000C0000
+#define MDFLD_MMIO_SIZE 0x000100000
+#define PSB_SGX_SIZE 0x8000
+#define PSB_SGX_OFFSET 0x00040000
+#define MRST_SGX_OFFSET 0x00080000
+/*
+ * PCI resource identifiers
+ */
+#define PSB_MMIO_RESOURCE 0
+#define PSB_GATT_RESOURCE 2
+#define PSB_GTT_RESOURCE 3
+/*
+ * PCI configuration
+ */
+#define PSB_GMCH_CTRL 0x52
+#define PSB_BSM 0x5C
+#define _PSB_GMCH_ENABLED 0x4
+#define PSB_PGETBL_CTL 0x2020
+#define _PSB_PGETBL_ENABLED 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT 0x4000
+
+/* To get rid of */
+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/*
+ * SGX side MMU definitions (these can probably go)
+ */
+
+/*
+ * Flags for external memory type field.
+ */
+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
+/*
+ * PTE's and PDE's
+ */
+#define PSB_PDE_MASK 0x003FFFFF
+#define PSB_PDE_SHIFT 22
+#define PSB_PTE_SHIFT 12
+/*
+ * Cache control
+ */
+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
+#define PSB_PTE_WO 0x0002 /* Write only */
+#define PSB_PTE_RO 0x0004 /* Read only */
+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
+
+/*
+ * VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING 0x2064
+#define PSB_TOPAZ_CLOCKGATING 0x2068
+#define PSB_HWSTAM 0x2098
+#define PSB_INSTPM 0x20C0
+#define PSB_INT_IDENTITY_R 0x20A4
+#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
+#define _PSB_DPST_PIPEB_FLAG (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
+#define _PSB_DPST_PIPEA_FLAG (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
+#define _MDFLD_MIPIA_FLAG (1<<16)
+#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_SGX_FLAG (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \
+ _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+ _MDFLD_PIPEB_EVENT_FLAG | \
+ _PSB_PIPEA_EVENT_FLAG | \
+ _PSB_VSYNC_PIPEA_FLAG | \
+ _MDFLD_MIPIA_FLAG | \
+ _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R 0x20A4
+#define PSB_INT_MASK_R 0x20A8
+#define PSB_INT_ENABLE_R 0x20A0
+
+#define _PSB_MMU_ER_MASK 0x0001FF00
+#define _PSB_MMU_ER_HOST (1 << 16)
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST 1
+#define PSB_UIRQ_OOM_REPLY 2
+#define PSB_UIRQ_FIRE_TA_REPLY 3
+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 (1 << 0)
+#define MDFLD_DSR_2D_3D_2 (1 << 1)
+#define MDFLD_DSR_CURSOR_0 (1 << 2)
+#define MDFLD_DSR_CURSOR_2 (1 << 3)
+#define MDFLD_DSR_OVERLAY_0 (1 << 4)
+#define MDFLD_DSR_OVERLAY_2 (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE (1 << 31)
+#define MDFLD_DSR_FULLSCREEN (1 << 30)
+#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON 1
+#define PSB_PWR_STATE_OFF 2
+
+#define PSB_PMPOLICY_NOPM 0
+#define PSB_PMPOLICY_CLOCKGATING 1
+#define PSB_PMPOLICY_POWERDOWN 2
+
+#define PSB_PMSTATE_POWERUP 0
+#define PSB_PMSTATE_CLOCKGATED 1
+#define PSB_PMSTATE_POWERDOWN 2
+#define PSB_PCIx_MSI_ADDR_LOC 0x94
+#define PSB_PCIx_MSI_DATA_LOC 0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ int enabled;
+};
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 i2c_speed;
+ u8 ddc_pin;
+};
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE 3
+
+struct drm_psb_private {
+ struct drm_device *dev;
+ const struct psb_ops *ops;
+
+ struct psb_gtt gtt;
+
+ /* GTT Memory manager */
+ struct psb_gtt_mm *gtt_mm;
+ struct page *scratch_page;
+ u32 *gtt_map;
+ uint32_t stolen_base;
+ void *vram_addr;
+ unsigned long vram_stolen_size;
+ int gtt_initialized;
+ u16 gmch_ctrl; /* Saved GTT setup */
+ u32 pge_ctl;
+
+ struct mutex gtt_mutex;
+ struct resource *gtt_mem; /* Our PCI resource */
+
+ struct psb_mmu_driver *mmu;
+ struct psb_mmu_pd *pf_pd;
+
+ /*
+ * Register base
+ */
+
+ uint8_t *sgx_reg;
+ uint8_t *vdc_reg;
+ uint32_t gatt_free_offset;
+
+ /*
+ * Fencing / irq.
+ */
+
+ uint32_t vdc_irq_mask;
+ uint32_t pipestat[PSB_NUM_PIPE];
+
+ spinlock_t irqmask_lock;
+
+ /*
+ * Power
+ */
+
+ bool suspended;
+ bool display_power;
+ int display_count;
+
+ /*
+ * Modesetting
+ */
+ struct psb_intel_mode_device mode_dev;
+
+ struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+ struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+ uint32_t num_pipe;
+
+ /*
+ * OSPM info (Power management base) (can go ?)
+ */
+ uint32_t ospm_base;
+
+ /*
+ * Sizes info
+ */
+
+ u32 fuse_reg_value;
+ u32 video_device_fuse;
+
+ /* PCI revision ID for B0:D2:F0 */
+ uint8_t platform_rev_id;
+
+ /* gmbus */
+ struct intel_gmbus *gmbus;
+
+ /* Used by SDVO */
+ int crt_ddc_pin;
+ /* FIXME: The mappings should be parsed from bios but for now we can
+ pretend there are no mappings available */
+ struct sdvo_device_mapping sdvo_mappings[2];
+ u32 hotplug_supported_mask;
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ /*
+ * LVDS info
+ */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *lfp_lvds_vbt_mode;
+ struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+ struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+ struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ int lvds_ssc_freq;
+ bool is_lvds_on;
+ bool is_mipi_on;
+ u32 mipi_ctrl_display;
+
+ unsigned int core_freq;
+ uint32_t iLVDS_enable;
+
+ /* Runtime PM state */
+ int rpm_enabled;
+
+ /* MID specific */
+ struct oaktrail_vbt vbt_data;
+ struct oaktrail_gct_data gct_data;
+
+ /* MIPI Panel type etc */
+ int panel_id;
+ bool dual_mipi; /* dual display - DPI & DBI */
+ bool dpi_panel_on; /* The DPI panel power is on */
+ bool dpi_panel_on2; /* The DPI panel power is on */
+ bool dbi_panel_on; /* The DBI panel power is on */
+ bool dbi_panel_on2; /* The DBI panel power is on */
+ u32 dsr_fb_update; /* DSR FB update counter */
+
+ /* Moorestown HDMI state */
+ struct oaktrail_hdmi_dev *hdmi_priv;
+
+ /* Moorestown pipe config register value cache */
+ uint32_t pipeconf;
+ uint32_t pipeconf1;
+ uint32_t pipeconf2;
+
+ /* Moorestown plane control register value cache */
+ uint32_t dspcntr;
+ uint32_t dspcntr1;
+ uint32_t dspcntr2;
+
+ /* Moorestown MM backlight cache */
+ uint8_t saveBKLTCNT;
+ uint8_t saveBKLTREQ;
+ uint8_t saveBKLTBRTL;
+
+ /*
+ * Register state
+ */
+ uint32_t saveDSPACNTR;
+ uint32_t saveDSPBCNTR;
+ uint32_t savePIPEACONF;
+ uint32_t savePIPEBCONF;
+ uint32_t savePIPEASRC;
+ uint32_t savePIPEBSRC;
+ uint32_t saveFPA0;
+ uint32_t saveFPA1;
+ uint32_t saveDPLL_A;
+ uint32_t saveDPLL_A_MD;
+ uint32_t saveHTOTAL_A;
+ uint32_t saveHBLANK_A;
+ uint32_t saveHSYNC_A;
+ uint32_t saveVTOTAL_A;
+ uint32_t saveVBLANK_A;
+ uint32_t saveVSYNC_A;
+ uint32_t saveDSPASTRIDE;
+ uint32_t saveDSPASIZE;
+ uint32_t saveDSPAPOS;
+ uint32_t saveDSPABASE;
+ uint32_t saveDSPASURF;
+ uint32_t saveDSPASTATUS;
+ uint32_t saveFPB0;
+ uint32_t saveFPB1;
+ uint32_t saveDPLL_B;
+ uint32_t saveDPLL_B_MD;
+ uint32_t saveHTOTAL_B;
+ uint32_t saveHBLANK_B;
+ uint32_t saveHSYNC_B;
+ uint32_t saveVTOTAL_B;
+ uint32_t saveVBLANK_B;
+ uint32_t saveVSYNC_B;
+ uint32_t saveDSPBSTRIDE;
+ uint32_t saveDSPBSIZE;
+ uint32_t saveDSPBPOS;
+ uint32_t saveDSPBBASE;
+ uint32_t saveDSPBSURF;
+ uint32_t saveDSPBSTATUS;
+ uint32_t saveVCLK_DIVISOR_VGA0;
+ uint32_t saveVCLK_DIVISOR_VGA1;
+ uint32_t saveVCLK_POST_DIV;
+ uint32_t saveVGACNTRL;
+ uint32_t saveADPA;
+ uint32_t saveLVDS;
+ uint32_t saveDVOA;
+ uint32_t saveDVOB;
+ uint32_t saveDVOC;
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePaletteA[256];
+ uint32_t savePaletteB[256];
+ uint32_t saveBLC_PWM_CTL2;
+ uint32_t saveBLC_PWM_CTL;
+ uint32_t saveCLOCKGATING;
+ uint32_t saveDSPARB;
+ uint32_t saveDSPATILEOFF;
+ uint32_t saveDSPBTILEOFF;
+ uint32_t saveDSPAADDR;
+ uint32_t saveDSPBADDR;
+ uint32_t savePFIT_AUTO_RATIOS;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t savePP_ON_DELAYS;
+ uint32_t savePP_OFF_DELAYS;
+ uint32_t savePP_DIVISOR;
+ uint32_t saveBSM;
+ uint32_t saveVBT;
+ uint32_t saveBCLRPAT_A;
+ uint32_t saveBCLRPAT_B;
+ uint32_t saveDSPALINOFF;
+ uint32_t saveDSPBLINOFF;
+ uint32_t savePERF_MODE;
+ uint32_t saveDSPFW1;
+ uint32_t saveDSPFW2;
+ uint32_t saveDSPFW3;
+ uint32_t saveDSPFW4;
+ uint32_t saveDSPFW5;
+ uint32_t saveDSPFW6;
+ uint32_t saveCHICKENBIT;
+ uint32_t saveDSPACURSOR_CTRL;
+ uint32_t saveDSPBCURSOR_CTRL;
+ uint32_t saveDSPACURSOR_BASE;
+ uint32_t saveDSPBCURSOR_BASE;
+ uint32_t saveDSPACURSOR_POS;
+ uint32_t saveDSPBCURSOR_POS;
+ uint32_t save_palette_a[256];
+ uint32_t save_palette_b[256];
+ uint32_t saveOV_OVADD;
+ uint32_t saveOV_OGAMC0;
+ uint32_t saveOV_OGAMC1;
+ uint32_t saveOV_OGAMC2;
+ uint32_t saveOV_OGAMC3;
+ uint32_t saveOV_OGAMC4;
+ uint32_t saveOV_OGAMC5;
+ uint32_t saveOVC_OVADD;
+ uint32_t saveOVC_OGAMC0;
+ uint32_t saveOVC_OGAMC1;
+ uint32_t saveOVC_OGAMC2;
+ uint32_t saveOVC_OGAMC3;
+ uint32_t saveOVC_OGAMC4;
+ uint32_t saveOVC_OGAMC5;
+
+ /* MSI reg save */
+ uint32_t msi_addr;
+ uint32_t msi_data;
+
+ /* Medfield specific register save state */
+ uint32_t saveHDMIPHYMISCCTL;
+ uint32_t saveHDMIB_CONTROL;
+ uint32_t saveDSPCCNTR;
+ uint32_t savePIPECCONF;
+ uint32_t savePIPECSRC;
+ uint32_t saveHTOTAL_C;
+ uint32_t saveHBLANK_C;
+ uint32_t saveHSYNC_C;
+ uint32_t saveVTOTAL_C;
+ uint32_t saveVBLANK_C;
+ uint32_t saveVSYNC_C;
+ uint32_t saveDSPCSTRIDE;
+ uint32_t saveDSPCSIZE;
+ uint32_t saveDSPCPOS;
+ uint32_t saveDSPCSURF;
+ uint32_t saveDSPCSTATUS;
+ uint32_t saveDSPCLINOFF;
+ uint32_t saveDSPCTILEOFF;
+ uint32_t saveDSPCCURSOR_CTRL;
+ uint32_t saveDSPCCURSOR_BASE;
+ uint32_t saveDSPCCURSOR_POS;
+ uint32_t save_palette_c[256];
+ uint32_t saveOV_OVADD_C;
+ uint32_t saveOV_OGAMC0_C;
+ uint32_t saveOV_OGAMC1_C;
+ uint32_t saveOV_OGAMC2_C;
+ uint32_t saveOV_OGAMC3_C;
+ uint32_t saveOV_OGAMC4_C;
+ uint32_t saveOV_OGAMC5_C;
+
+ /* DSI register save */
+ uint32_t saveDEVICE_READY_REG;
+ uint32_t saveINTR_EN_REG;
+ uint32_t saveDSI_FUNC_PRG_REG;
+ uint32_t saveHS_TX_TIMEOUT_REG;
+ uint32_t saveLP_RX_TIMEOUT_REG;
+ uint32_t saveTURN_AROUND_TIMEOUT_REG;
+ uint32_t saveDEVICE_RESET_REG;
+ uint32_t saveDPI_RESOLUTION_REG;
+ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+ uint32_t saveINIT_COUNT_REG;
+ uint32_t saveMAX_RET_PAK_REG;
+ uint32_t saveVIDEO_FMT_REG;
+ uint32_t saveEOT_DISABLE_REG;
+ uint32_t saveLP_BYTECLK_REG;
+ uint32_t saveHS_LS_DBI_ENABLE_REG;
+ uint32_t saveTXCLKESC_REG;
+ uint32_t saveDPHY_PARAM_REG;
+ uint32_t saveMIPI_CONTROL_REG;
+ uint32_t saveMIPI;
+ uint32_t saveMIPI_C;
+
+ /* DPST register save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+ uint32_t savePWM_CONTROL_LOGIC;
+
+ /*
+ * DSI info.
+ */
+ void * dbi_dsr_info;
+ void * dbi_dpu_info;
+ void * dsi_configs[2];
+ /*
+ * LID-Switch
+ */
+ spinlock_t lid_lock;
+ struct timer_list lid_timer;
+ struct psb_intel_opregion opregion;
+ u32 *lid_state;
+ u32 lid_last_state;
+
+ /*
+ * Watchdog
+ */
+
+ uint32_t apm_reg;
+ uint16_t apm_base;
+
+ /*
+ * Used for modifying backlight from
+ * xrandr -- consider removing and using HAL instead
+ */
+ struct backlight_device *backlight_device;
+ struct drm_property *backlight_property;
+ uint32_t blc_adj1;
+ uint32_t blc_adj2;
+
+ void *fbdev;
+
+ /* 2D acceleration */
+ spinlock_t lock_2d;
+};
+
+
+/*
+ * Operations for each board type
+ */
+
+struct psb_ops {
+ const char *name;
+ unsigned int accel_2d:1;
+ int pipes; /* Number of output pipes */
+ int crtcs; /* Number of CRTCs */
+ int sgx_offset; /* Base offset of SGX device */
+
+ /* Sub functions */
+ struct drm_crtc_helper_funcs const *crtc_helper;
+ struct drm_crtc_funcs const *crtc_funcs;
+
+ /* Setup hooks */
+ int (*chip_setup)(struct drm_device *dev);
+ void (*chip_teardown)(struct drm_device *dev);
+
+ /* Display management hooks */
+ int (*output_init)(struct drm_device *dev);
+ /* Power management hooks */
+ void (*init_pm)(struct drm_device *dev);
+ int (*save_regs)(struct drm_device *dev);
+ int (*restore_regs)(struct drm_device *dev);
+ int (*power_up)(struct drm_device *dev);
+ int (*power_down)(struct drm_device *dev);
+
+ void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ /* Backlight */
+ int (*backlight_init)(struct drm_device *dev);
+#endif
+ int i2c_bus; /* I2C bus identifier for Moorestown */
+};
+
+
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+ return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ * MMU stuff.
+ */
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+ int trap_pagefaults,
+ int invalid_type,
+ struct drm_psb_private *dev_priv);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+ *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+ uint32_t gtt_start, uint32_t gtt_pages);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+ int trap_pagefaults,
+ int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+ unsigned long address,
+ uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+ uint32_t start_pfn,
+ unsigned long address,
+ uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+ unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+ unsigned long address, uint32_t num_pages,
+ uint32_t desired_tile_stride,
+ uint32_t hw_tile_stride);
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/*
+ * intel_opregion.c
+ */
+extern int gma_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_exit(struct drm_device *dev);
+
+/*
+ * framebuffer.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+/*
+ * accel_2d.c
+ */
+extern void psbfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern int psb_gem_init_object(struct drm_gem_object *obj);
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/*
+ * Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT (1 << 1)
+#define PSB_D_IRQ (1 << 2)
+#define PSB_D_ENTRY (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV (1 << 4)
+#define PSB_D_DBI_BF (1 << 5)
+#define PSB_D_PM (1 << 6)
+#define PSB_D_RENDER (1 << 7)
+#define PSB_D_REG (1 << 8)
+#define PSB_D_MSVDX (1 << 9)
+#define PSB_D_TOPAZ (1 << 10)
+
+extern int drm_psb_no_fb;
+extern int drm_idle_check_interval;
+
+/*
+ * Utilities
+ */
+
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return ioread32(dev_priv->vdc_reg + reg);
+}
+
+#define REG_READ(reg) REGISTER_READ(dev, (reg))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+ uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+ uint32_t reg, uint32_t val)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs) \
+({ \
+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
+ printk(KERN_ERR \
+ "access sgx when it's off!! (READ) %s, %d\n", \
+ __FILE__, __LINE__); \
+ melay(1000); \
+ } \
+ ioread32(dev_priv->sgx_reg + (_offs)); \
+})
+#else
+#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644
index 00000000000..49e983508d5
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -0,0 +1,1446 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct psb_intel_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+#define INTEL_P2_NUM 2
+
+struct psb_intel_limit_t {
+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct psb_intel_p2_t p2;
+};
+
+#define I8XX_DOT_MIN 25000
+#define I8XX_DOT_MAX 350000
+#define I8XX_VCO_MIN 930000
+#define I8XX_VCO_MAX 1400000
+#define I8XX_N_MIN 3
+#define I8XX_N_MAX 16
+#define I8XX_M_MIN 96
+#define I8XX_M_MAX 140
+#define I8XX_M1_MIN 18
+#define I8XX_M1_MAX 26
+#define I8XX_M2_MIN 6
+#define I8XX_M2_MAX 16
+#define I8XX_P_MIN 4
+#define I8XX_P_MAX 128
+#define I8XX_P1_MIN 2
+#define I8XX_P1_MAX 33
+#define I8XX_P1_LVDS_MIN 1
+#define I8XX_P1_LVDS_MAX 6
+#define I8XX_P2_SLOW 4
+#define I8XX_P2_FAST 2
+#define I8XX_P2_LVDS_SLOW 14
+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
+#define I8XX_P2_SLOW_LIMIT 165000
+
+#define I9XX_DOT_MIN 20000
+#define I9XX_DOT_MAX 400000
+#define I9XX_VCO_MIN 1400000
+#define I9XX_VCO_MAX 2800000
+#define I9XX_N_MIN 3
+#define I9XX_N_MAX 8
+#define I9XX_M_MIN 70
+#define I9XX_M_MAX 120
+#define I9XX_M1_MIN 10
+#define I9XX_M1_MAX 20
+#define I9XX_M2_MIN 5
+#define I9XX_M2_MAX 9
+#define I9XX_P_SDVO_DAC_MIN 5
+#define I9XX_P_SDVO_DAC_MAX 80
+#define I9XX_P_LVDS_MIN 7
+#define I9XX_P_LVDS_MAX 98
+#define I9XX_P1_MIN 1
+#define I9XX_P1_MAX 8
+#define I9XX_P2_SDVO_DAC_SLOW 10
+#define I9XX_P2_SDVO_DAC_FAST 5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
+#define I9XX_P2_LVDS_SLOW 14
+#define I9XX_P2_LVDS_FAST 7
+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC 0
+#define INTEL_LIMIT_I8XX_LVDS 1
+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
+#define INTEL_LIMIT_I9XX_LVDS 3
+
+static const struct psb_intel_limit_t psb_intel_limits[] = {
+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
+ },
+ { /* INTEL_LIMIT_I8XX_LVDS */
+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
+ },
+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
+ I9XX_P2_SDVO_DAC_FAST},
+ },
+ { /* INTEL_LIMIT_I9XX_LVDS */
+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+ },
+};
+
+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+{
+ const struct psb_intel_limit_t *limit;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ else
+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+static void psb_intel_clock(struct drm_device *dev, int refclk,
+ struct psb_intel_clock_t *clock)
+{
+ return i9xx_clock(refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+ struct psb_intel_clock_t *clock)
+{
+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid("m1 out of range\n");
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk,
+ struct psb_intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_clock_t clock;
+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+ int err = target;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+ clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ psb_intel_clock(dev, refclk, &clock);
+
+ if (!psb_intel_PLL_is_valid
+ (crtc, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_dbg(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ psb_gtt_unpin(psbfb->gtt);
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+
+ if (0 /* FIXMEAC - check what PSB needs */) {
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+ } else {
+ REG_WRITE(dspbase, start + offset);
+ REG_READ(dspbase);
+ }
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ psb_intel_wait_for_vblank(dev);
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see psb_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ /* Must be on PIPE 1 for PSB */
+ return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ struct psb_intel_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ /* No scan out no play */
+ if (crtc->fb == NULL) {
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ return 0;
+ }
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ return 0;
+ }
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_lvds) {
+ dpll |= DPLLB_MODE_LVDS;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ } else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int sdvo_pixel_multiply =
+ adjusted_mode->clock / mode->clock;
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |=
+ (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << 16;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (psb_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ drm_mode_debug_printmodeline(mode);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ udelay(150);
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds &= ~LVDS_PIPEB_SELECT;
+ if (pipe == 1)
+ lvds |= LVDS_PIPEB_SELECT;
+
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ /* write it again -- the BIOS does, after all */
+ REG_WRITE(dpll_reg, dpll);
+
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(pipesrc_reg,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+ psb_intel_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->save_palette_a[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+ /*NOTE: DSPSIZE DSPPOS only for psb*/
+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private * dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+ }
+
+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+ REG_READ(pipeA ? FPA0 : FPB0);
+
+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+ REG_READ(pipeA ? FPA1 : FPB1);
+
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+
+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+ psb_intel_wait_for_vblank(dev);
+
+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+ psb_intel_wait_for_vblank(dev);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ /* turn off the cursor */
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* Unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old bo */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t type, uint32_t size)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+}
+
+static int psb_crtc_set_config(struct drm_mode_set *set)
+{
+ int ret;
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+ ret = drm_crtc_helper_set_config(set);
+ pm_runtime_allow(&dev->pdev->dev);
+ return ret;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ u32 dpll;
+ u32 fp;
+ struct psb_intel_clock_t clock;
+ bool is_lvds;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = (pipe == 0) ?
+ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA0 :
+ dev_priv->saveFPB0;
+ else
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA1 :
+ dev_priv->saveFPB1;
+
+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ gma_power_end(dev);
+ } else {
+ htot = (pipe == 0) ?
+ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ hsync = (pipe == 0) ?
+ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ vtot = (pipe == 0) ?
+ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ vsync = (pipe == 0) ?
+ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gtt_range *gt;
+
+ /* Unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+ kfree(psb_intel_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .dpms = psb_intel_crtc_dpms,
+ .mode_fixup = psb_intel_crtc_mode_fixup,
+ .mode_set = psb_intel_crtc_mode_set,
+ .mode_set_base = psb_intel_pipe_set_base,
+ .prepare = psb_intel_crtc_prepare,
+ .commit = psb_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+ .save = psb_intel_crtc_save,
+ .restore = psb_intel_crtc_restore,
+ .cursor_set = psb_intel_crtc_cursor_set,
+ .cursor_move = psb_intel_crtc_cursor_move,
+ .gamma_set = psb_intel_crtc_gamma_set,
+ .set_config = psb_crtc_set_config,
+ .destroy = psb_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+ u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+
+ REG_WRITE(control[pipe], 0);
+ REG_WRITE(base[pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc;
+ int i;
+ uint16_t *r_base, *g_base, *b_base;
+
+ /* We allocate a extra array of drm_connector pointers
+ * for fbdev after the crtc */
+ psb_intel_crtc =
+ kzalloc(sizeof(struct psb_intel_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+ if (psb_intel_crtc == NULL)
+ return;
+
+ psb_intel_crtc->crtc_state =
+ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+ if (!psb_intel_crtc->crtc_state) {
+ dev_err(dev->dev, "Crtc state error: No memory\n");
+ kfree(psb_intel_crtc);
+ return;
+ }
+
+ /* Set the CRTC operations from the chip specific data */
+ drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+ psb_intel_crtc->pipe = pipe;
+ psb_intel_crtc->plane = pipe;
+
+ r_base = psb_intel_crtc->base.gamma_store;
+ g_base = r_base + 256;
+ b_base = g_base + 256;
+ for (i = 0; i < 256; i++) {
+ psb_intel_crtc->lut_r[i] = i;
+ psb_intel_crtc->lut_g[i] = i;
+ psb_intel_crtc->lut_b[i] = i;
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+
+ psb_intel_crtc->lut_adj[i] = 0;
+ }
+
+ psb_intel_crtc->mode_dev = mode_dev;
+ psb_intel_crtc->cursor_addr = 0;
+
+ drm_crtc_helper_add(&psb_intel_crtc->base,
+ dev_priv->ops->crtc_helper);
+
+ /* Setup the array of drm_connector pointer array */
+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+ &psb_intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+ &psb_intel_crtc->base;
+ psb_intel_crtc->mode_set.connectors =
+ (struct drm_connector **) (psb_intel_crtc + 1);
+ psb_intel_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, pipe);
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+ struct drm_mode_object *drmmode_obj;
+ struct psb_intel_crtc *crtc;
+
+ if (!dev_priv) {
+ dev_err(dev->dev, "called with no initialization\n");
+ return -EINVAL;
+ }
+
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+
+ if (!drmmode_obj) {
+ dev_err(dev->dev, "no such CRTC id\n");
+ return -EINVAL;
+ }
+
+ crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ if (psb_intel_crtc->pipe == pipe)
+ break;
+ }
+ return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+ int index_mask = 0;
+ struct drm_connector *connector;
+ int entry = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ if (type_mask & (1 << psb_intel_encoder->type))
+ index_mask |= (1 << entry);
+ entry++;
+ }
+ return index_mask;
+}
+
+
+void psb_intel_modeset_cleanup(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+ always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ return &psb_intel_encoder->base;
+}
+
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644
index 00000000000..535b49a5e40
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -0,0 +1,28 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t type, uint32_t size);
+void psb_intel_crtc_destroy(struct drm_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644
index 00000000000..f40535e5668
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+ >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+ /*
+ * Abstracted memory manager operations
+ */
+ size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+ /*
+ * Cursor (Can go ?)
+ */
+ int cursor_needs_physical;
+
+ /*
+ * LVDS info
+ */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *panel_fixed_mode2;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+ uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+ /* for getting at dev. private (mmio etc.) */
+ struct drm_device *drm_dev;
+ u32 reg; /* GPIO reg */
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ u8 slave_addr;
+};
+
+struct psb_intel_encoder {
+ struct drm_encoder base;
+ int type;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct psb_intel_encoder *);
+ int crtc_mask;
+ int clone_mask;
+ void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+ own set of output privates */
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct psb_intel_connector {
+ struct drm_connector base;
+ struct psb_intel_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+ uint32_t saveDSPCNTR;
+ uint32_t savePIPECONF;
+ uint32_t savePIPESRC;
+ uint32_t saveDPLL;
+ uint32_t saveFP0;
+ uint32_t saveFP1;
+ uint32_t saveHTOTAL;
+ uint32_t saveHBLANK;
+ uint32_t saveHSYNC;
+ uint32_t saveVTOTAL;
+ uint32_t saveVBLANK;
+ uint32_t saveVSYNC;
+ uint32_t saveDSPSTRIDE;
+ uint32_t saveDSPSIZE;
+ uint32_t saveDSPPOS;
+ uint32_t saveDSPBASE;
+ uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+ struct drm_crtc base;
+ int pipe;
+ int plane;
+ uint32_t cursor_addr;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ u8 lut_adj[256];
+ struct psb_intel_framebuffer *fbdev_fb;
+ /* a mode_set for fbdev users on this crtc */
+ struct drm_mode_set mode_set;
+
+ /* GEM object that holds our cursor */
+ struct drm_gem_object *cursor_obj;
+
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode saved_adjusted_mode;
+
+ struct psb_intel_mode_device *mode_dev;
+
+ /*crtc mode setting flags*/
+ u32 mode_flags;
+
+ /* Saved Crtc HW states */
+ struct psb_intel_crtc_state *crtc_state;
+};
+
+#define to_psb_intel_crtc(x) \
+ container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_connector(x) \
+ container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x) \
+ container_of(x, struct psb_intel_encoder, base)
+#define to_psb_intel_framebuffer(x) \
+ container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+ const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+ struct drm_connector *connector)
+{
+ return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+ struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+ *connector);
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+ int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+ int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+ int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+ struct drm_framebuffer *fb);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+ *dev, struct
+ drm_mode_fb_cmd
+ *mode_cmd,
+ void *mm_private);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644
index 00000000000..a25e4ca5e91
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright Š 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+ /*
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = REG_READ(BLC_PWM_CTL);
+ gma_power_end(dev);
+ } else /* Powered off, use the saved value */
+ ret = dev_priv->saveBLC_PWM_CTL;
+
+ /* Top 15bits hold the frequency mask */
+ ret = (ret & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+ ret *= 2; /* Return a 16bit range as needed for setting */
+ if (ret == 0)
+ dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+ REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+ return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+ dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+ dev_priv->lvds_bl->brightnesscmd,
+ blc_i2c_brightness);
+ return 0;
+ }
+
+ dev_err(dev->dev, "I2C transfer error\n");
+ return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON(max_pwm_blc == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "NO LVDS backlight info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ psb_lvds_i2c_set_brightness(dev, level);
+ else
+ psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "set power, chip off!\n");
+ return;
+ }
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ psb_intel_lvds_set_backlight(dev,
+ mode_dev->backlight_duty_cycle);
+ } else {
+ psb_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ psb_intel_lvds_set_power(dev, true);
+ else
+ psb_intel_lvds_set_power(dev, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+ lvds_priv->saveLVDS = REG_READ(LVDS);
+ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ /*
+ * If the light is off at server startup,
+ * just make it full brightness
+ */
+ if (dev_priv->backlight_duty_cycle == 0)
+ dev_priv->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ u32 pp_status;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv =
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ lvds_priv->savePP_ON,
+ lvds_priv->savePP_OFF,
+ lvds_priv->saveLVDS,
+ lvds_priv->savePP_CONTROL,
+ lvds_priv->savePP_CYCLE,
+ lvds_priv->saveBLC_PWM_CTL);
+
+ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+ REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ } else {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct drm_display_mode *fixed_mode =
+ dev_priv->mode_dev.panel_fixed_mode;
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct psb_intel_crtc *psb_intel_crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+ /* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+ if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
+ return false;
+ }
+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+ printk(KERN_ERR "Must use PIPE A\n");
+ return false;
+ }
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ psb_intel_lvds_set_power(dev, false);
+
+ gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ psb_intel_lvds_get_max_backlight(dev);
+
+ psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ int ret = 0;
+
+ if (!IS_MRST(dev))
+ ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!encoder)
+ return -1;
+
+ if (!strcmp(property->name, "scaling mode")) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curval;
+
+ if (!crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property,
+ &curval))
+ goto set_prop_error;
+
+ if (curval == value)
+ goto set_prop_done;
+
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ goto set_prop_error;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ goto set_prop_error;
+ }
+ } else if (!strcmp(property->name, "backlight")) {
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ goto set_prop_error;
+ else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *devp =
+ encoder->dev->dev_private;
+ struct backlight_device *bd = devp->backlight_device;
+ if (bd) {
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+ }
+#endif
+ }
+ } else if (!strcmp(property->name, "DPMS")) {
+ struct drm_encoder_helper_funcs *hfuncs
+ = encoder->helper_private;
+ hfuncs->dpms(encoder, value);
+ }
+
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+ .dpms = psb_intel_lvds_encoder_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+ .prepare = psb_intel_lvds_prepare,
+ .mode_set = psb_intel_lvds_mode_set,
+ .commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+ psb_intel_lvds_connector_helper_funcs = {
+ .get_modes = psb_intel_lvds_get_modes,
+ .mode_valid = psb_intel_lvds_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = psb_intel_lvds_save,
+ .restore = psb_intel_lvds_restore,
+ .detect = psb_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_lvds_set_property,
+ .destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+ .destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
+ struct psb_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ psb_intel_encoder =
+ kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+
+ if (!psb_intel_encoder) {
+ dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+ return;
+ }
+
+ psb_intel_connector =
+ kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+
+ if (!psb_intel_connector) {
+ kfree(psb_intel_encoder);
+ dev_err(dev->dev, "psb_intel_connector allocation error\n");
+ }
+
+ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv) {
+ dev_err(dev->dev, "LVDS private allocation error\n");
+ goto failed_connector;
+ }
+
+ psb_intel_encoder->dev_priv = lvds_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &psb_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, encoder,
+ &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &psb_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /*
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+ if (!lvds_priv->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ lvds_priv->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!lvds_priv->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this? */
+ if (mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+ if (!mode_dev->panel_fixed_mode)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ psb_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+ /*
+ * Blacklist machines with BIOSes that list an LVDS panel without
+ * actually having one.
+ */
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+ if (lvds_priv->i2c_bus)
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+failed_connector:
+ if (psb_intel_connector)
+ kfree(psb_intel_connector);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644
index 00000000000..4fca0d6feeb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+ u8 out_buf[] = { 0x0, 0x0 };
+ u8 buf[2];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(adapter, msgs, 2);
+ if (ret == 2)
+ return true;
+
+ return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644
index 00000000000..fcc0af03d68
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -0,0 +1,1309 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
+#define BLC_PWM_CTL 0x61254
+#define BLC_PWM_CTL2 0x61250
+#define BLC_PWM_CTL_C 0x62254
+#define BLC_PWM_CTL2_C 0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define I915_GCFGC 0xf0
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+
+#define I855_HPLLCC 0xc0
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define HTOTAL_C 0x62000
+#define HBLANK_C 0x62004
+#define HSYNC_C 0x62008
+#define VTOTAL_C 0x6200c
+#define VBLANK_C 0x62010
+#define VSYNC_C 0x62014
+#define PIPECSRC 0x6201c
+#define BCLRPAT_C 0x62020
+#define VSYNCSHIFT_C 0x62028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+#define PFIT_CONTROL 0x61230
+#define PFIT_ENABLE (1 << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_LOCK (1 << 15) /* CDV */
+
+/*
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
+ * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
+ * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST 0x606c
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1 << 31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1 << 30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_DIV_SHIFT 0
+
+#define PORT_HOTPLUG_EN 0x61110
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
+
+#define PORT_HOTPLUG_STAT 0x61114
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
+#define SDVO_AUDIO_ENABLE (1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK (1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS 0x61180
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN (1 << 31)
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT (1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN (1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1 << 31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1 << 30)
+#define PIPECONF_ACTIVE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSIPLL_LOCK (1 << 29)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_DSR (1 << 26)
+#define PIPEACONF_PIPE_LOCKED (1 << 25)
+#define PIPEACONF_PALETTE 0
+#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPEACONF_GAMMA (1 << 24)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_PLANE_OFF (1 << 19)
+#define PIPECONF_CURSOR_OFF (1 << 18)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1 << 31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1 << 24)
+#define PIPEBCONF_PALETTE 0
+
+#define PIPECCONF 0x72008
+
+#define PIPEBGCMAXRED 0x71010
+#define PIPEBGCMAXGREEN 0x71014
+#define PIPEBGCMAXBLUE 0x71018
+
+#define PIPEASTAT 0x70024
+#define PIPEBSTAT 0x71024
+#define PIPECSTAT 0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
+#define PIPE_VBLANK_CLEAR (1 << 1)
+#define PIPE_VBLANK_STATUS (1 << 1)
+#define PIPE_TE_STATUS (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_VSYNC_CLEAR (1UL << 9)
+#define PIPE_VSYNC_STATUS (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
+#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define PIPE_VSYNC_ENABL (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
+ PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL 0x61268
+#define HISTOGRAM_BIN_DATA 0X61264
+#define HISTOGRAM_LOGIC_CONTROL 0x61260
+#define PWM_CONTROL_LOGIC 0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
+#define PWM_LOGIC_ENABLE (1UL << 31)
+#define PWM_PHASEIN_ENABLE (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT 0x00001f00
+#define PWM_PHASEIN_INC 0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
+#define DPST_YUV_LUMA_MODE 0
+
+struct dpst_ie_histogram_control {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t bin_reg_index:7;
+ uint32_t reserved:4;
+ uint32_t bin_reg_func_select:1;
+ uint32_t sync_to_phase_in:1;
+ uint32_t alt_enhancement_mode:2;
+ uint32_t reserved1:1;
+ uint32_t sync_to_phase_in_count:8;
+ uint32_t histogram_mode_select:1;
+ uint32_t reserved2:4;
+ uint32_t ie_pipe_assignment:1;
+ uint32_t ie_mode_table_enabled:1;
+ uint32_t ie_histogram_enable:1;
+ };
+ };
+};
+
+struct dpst_guardband {
+ union {
+ uint32_t data;
+ struct {
+ uint32_t guardband:22;
+ uint32_t guardband_interrupt_delay:8;
+ uint32_t interrupt_status:1;
+ uint32_t interrupt_enable:1;
+ };
+ };
+};
+
+#define PIPEAFRAMEHIGH 0x70040
+#define PIPEAFRAMEPIXEL 0x70044
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPEBFRAMEPIXEL 0x71044
+#define PIPECFRAMEHIGH 0x72040
+#define PIPECFRAMEPIXEL 0x72044
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+
+#define DSPARB 0x70030
+#define DSPFW1 0x70034
+#define DSPFW2 0x70038
+#define DSPFW3 0x7003c
+#define DSPFW4 0x70050
+#define DSPFW5 0x70054
+#define DSPFW6 0x70058
+#define DSPCHICKENBIT 0x70400
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DSPCCNTR 0x72180
+#define DISPLAY_PLANE_ENABLE (1 << 31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_15_16BPP (0x4 << 26)
+#define DISPPLANE_16BPP (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
+#define DISPPLANE_32BPP (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
+#define DISPPLANE_SEL_PIPE_POS 24
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+#define DISPPLANE_BOTTOM (4)
+
+#define DSPABASE 0x70184
+#define DSPALINOFF 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBLINOFF 0X71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPCBASE 0x72184
+#define DSPCLINOFF 0x72184
+#define DSPCSTRIDE 0x72188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+#define DSPCPOS 0x7218C
+#define DSPCSIZE 0x72190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define DSPCSURF 0x7219C
+#define DSPCTILEOFF 0x721A4
+#define DSPCKEYMAXVAL 0x721A0
+#define DSPCKEYMINVAL 0x72194
+#define DSPCKEYMSK 0x72198
+
+#define VGACNTRL 0x71400
+#define VGA_DISP_DISABLE (1 << 31)
+#define VGA_2X_MODE (1 << 30)
+#define VGA_PIPE_B_SELECT (1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET 0x08000
+#define OV_OVADD 0x30000
+#define OV_DOVASTA 0x30008
+# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS 6
+# define OV_PIPE_A 0
+# define OV_PIPE_C 1
+#define OV_OGAMC5 0x30010
+#define OV_OGAMC4 0x30014
+#define OV_OGAMC3 0x30018
+#define OV_OGAMC2 0x3001C
+#define OV_OGAMC1 0x30020
+#define OV_OGAMC0 0x30024
+#define OVC_OVADD 0x38000
+#define OVC_DOVCSTA 0x38008
+#define OVC_OGAMC5 0x38010
+#define OVC_OGAMC4 0x38014
+#define OVC_OGAMC3 0x38018
+#define OVC_OGAMC2 0x3801C
+#define OVC_OGAMC1 0x38020
+#define OVC_OGAMC0 0x38024
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0 0x71410
+#define SWF1 0x71414
+#define SWF2 0x71418
+#define SWF3 0x7141c
+#define SWF4 0x71420
+#define SWF5 0x71424
+#define SWF6 0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00 0x70410
+#define SWF01 0x70414
+#define SWF02 0x70418
+#define SWF03 0x7041c
+#define SWF04 0x70420
+#define SWF05 0x70424
+#define SWF06 0x70428
+
+#define SWF10 SWF0
+#define SWF11 SWF1
+#define SWF12 SWF2
+#define SWF13 SWF3
+#define SWF14 SWF4
+#define SWF15 SWF5
+#define SWF16 SWF6
+
+#define SWF30 0x72414
+#define SWF31 0x72418
+#define SWF32 0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+#define PALETTE_C 0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR 0x70080
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURABASE 0x70084
+#define CURAPOS 0x70088
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
+#define CURBCNTR 0x700c0
+#define CURBBASE 0x700c4
+#define CURBPOS 0x700c8
+#define CURCCNTR 0x700e0
+#define CURCBASE 0x700e4
+#define CURCPOS 0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A 0x0f014
+#define MDFLD_DPLL_B 0x0f018
+#define MDFLD_INPUT_REF_SEL (1 << 14)
+#define MDFLD_VCO_SEL (1 << 16)
+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
+#define MDFLD_PLL_LATCHEN (1 << 28)
+#define MDFLD_PWR_GATE_EN (1 << 30)
+#define MDFLD_P1_MASK (0x1FF << 17)
+#define MRST_FPA0 0x0f040
+#define MRST_FPA1 0x0f044
+#define MDFLD_DPLL_DIV0 0x0f048
+#define MDFLD_DPLL_DIV1 0x0f04c
+#define MRST_PERF_MODE 0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL 0x61134
+#define HDMI_PHY_POWER_DOWN 0x7f
+#define HDMIB_CONTROL 0x61140
+#define HDMIB_PORT_EN (1 << 31)
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+#define HDMIB_NULL_PACKET (1 << 9)
+#define HDMIB_HDCP_PORT (1 << 5)
+
+/* #define LVDS 0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
+
+#define MIPI 0x61190
+#define MIPI_C 0x62190
+#define MIPI_PORT_EN (1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE (1 << 16)
+#define MIPI_BORDER_EN (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE 0x1
+#define MIPIA_2LANE_MIPIC_2LANE 0x2
+#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
+#define TE_TRIGGER_GPIO_PIN (1 << 3)
+#define MIPI_TE_COUNT 0x61194
+
+/* #define PP_CONTROL 0x61204 */
+#define POWER_DOWN_ON_RESET (1 << 1)
+
+/* #define PFIT_CONTROL 0x61230 */
+#define PFIT_PIPE_SELECT (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT (29)
+
+/* #define BLC_PWM_CTL 0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE (1 << 30)
+/* #define DSPACNTR 0x70180 */
+
+#define MRST_DSPABASE 0x7019c
+#define MRST_DSPBBASE 0x7119c
+#define MDFLD_DSPCBASE 0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ * MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET 0x800
+
+#define DEVICE_READY_REG 0xb000
+#define LP_OUTPUT_HOLD (1 << 16)
+#define EXIT_ULPS_DEV_READY 0x3
+#define LP_OUTPUT_HOLD_RELEASE 0x810000
+# define ENTERING_ULPS (2 << 1)
+# define EXITING_ULPS (1 << 1)
+# define ULPS_MASK (3 << 1)
+# define BUS_POSSESSION (1 << 3)
+#define INTR_STAT_REG 0xb004
+#define RX_SOT_ERROR (1 << 0)
+#define RX_SOT_SYNC_ERROR (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_FALSE_CONTROL_ERROR (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
+#define RX_CHECKSUM_ERROR (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
+#define RX_DSI_VC_ID_INVALID (1 << 11)
+#define TX_FALSE_CONTROL_ERROR (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
+#define TX_CHECKSUM_ERROR (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
+#define TX_DSI_VC_ID_INVALID (1 << 17)
+#define HIGH_CONTENTION (1 << 18)
+#define LOW_CONTENTION (1 << 19)
+#define DPI_FIFO_UNDER_RUN (1 << 20)
+#define HS_TX_TIMEOUT (1 << 21)
+#define LP_RX_TIMEOUT (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define SPL_PKT_SENT (1 << 30)
+#define INTR_EN_REG 0xb008
+#define DSI_FUNC_PRG_REG 0xb00c
+#define DPI_CHANNEL_NUMBER_POS 0x03
+#define DBI_CHANNEL_NUMBER_POS 0x05
+#define FMT_DPI_POS 0x07
+#define FMT_DBI_POS 0x0A
+#define DBI_DATA_WIDTH_POS 0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
+ * 666 FORMAT
+ */
+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED 0x00 /* command mode
+ * is not supported
+ */
+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
+#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
+
+#define HS_TX_TIMEOUT_REG 0xb010
+#define LP_RX_TIMEOUT_REG 0xb014
+#define TURN_AROUND_TIMEOUT_REG 0xb018
+#define DEVICE_RESET_REG 0xb01C
+#define DPI_RESOLUTION_REG 0xb020
+#define RES_V_POS 0x10
+#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
+#define VERT_SYNC_PAD_COUNT_REG 0xb038
+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
+#define DPI_CONTROL_REG 0xb048
+#define DPI_SHUT_DOWN (1 << 0)
+#define DPI_TURN_ON (1 << 1)
+#define DPI_COLOR_MODE_ON (1 << 2)
+#define DPI_COLOR_MODE_OFF (1 << 3)
+#define DPI_BACK_LIGHT_ON (1 << 4)
+#define DPI_BACK_LIGHT_OFF (1 << 5)
+#define DPI_LP (1 << 6)
+#define DPI_DATA_REG 0xb04c
+#define DPI_BACK_LIGHT_ON_DATA 0x07
+#define DPI_BACK_LIGHT_OFF_DATA 0x17
+#define INIT_COUNT_REG 0xb050
+#define MAX_RET_PAK_REG 0xb054
+#define VIDEO_FMT_REG 0xb058
+#define COMPLETE_LAST_PCKT (1 << 2)
+#define EOT_DISABLE_REG 0xb05c
+#define ENABLE_CLOCK_STOPPING (1 << 1)
+#define LP_BYTECLK_REG 0xb060
+#define LP_GEN_DATA_REG 0xb064
+#define HS_GEN_DATA_REG 0xb068
+#define LP_GEN_CTRL_REG 0xb06C
+#define HS_GEN_CTRL_REG 0xb070
+#define DCS_CHANNEL_NUMBER_POS 0x6
+#define MCS_COMMANDS_POS 0x8
+#define WORD_COUNTS_POS 0x8
+#define MCS_PARAMETER_POS 0x10
+#define GEN_FIFO_STAT_REG 0xb074
+#define HS_DATA_FIFO_FULL (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define HS_LS_DBI_ENABLE_REG 0xb078
+#define TXCLKESC_REG 0xb07c
+#define DPHY_PARAM_REG 0xb080
+#define DBI_BW_CTRL_REG 0xb084
+#define CLK_LANE_SWT_REG 0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG 0xb104
+#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG 0xb108
+#define MIPI_DATA_LENGTH_REG 0xb10C
+#define MIPI_COMMAND_ADDRESS_REG 0xb110
+#define MIPI_COMMAND_LENGTH_REG 0xb114
+#define MIPI_READ_DATA_RETURN_REG0 0xb118
+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
+#define MIPI_READ_DATA_RETURN_REG2 0xb120
+#define MIPI_READ_DATA_RETURN_REG3 0xb124
+#define MIPI_READ_DATA_RETURN_REG4 0xb128
+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
+#define MIPI_READ_DATA_RETURN_REG6 0xb130
+#define MIPI_READ_DATA_RETURN_REG7 0xb134
+#define MIPI_READ_DATA_VALID_REG 0xb138
+
+/* DBI COMMANDS */
+#define soft_reset 0x01
+/*
+ * The display module performs a software reset.
+ * Registers are written with their SW Reset default values.
+ */
+#define get_power_mode 0x0a
+/*
+ * The display module returns the current power mode
+ */
+#define get_address_mode 0x0b
+/*
+ * The display module returns the current status.
+ */
+#define get_pixel_format 0x0c
+/*
+ * This command gets the pixel format for the RGB image data
+ * used by the interface.
+ */
+#define get_display_mode 0x0d
+/*
+ * The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode 0x0e
+/*
+ * The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result 0x0f
+/*
+ * The display module returns the self-diagnostic results following
+ * a Sleep Out command.
+ */
+#define enter_sleep_mode 0x10
+/*
+ * This command causes the display module to enter the Sleep mode.
+ * In this mode, all unnecessary blocks inside the display module are
+ * disabled except interface communication. This is the lowest power
+ * mode the display module supports.
+ */
+#define exit_sleep_mode 0x11
+/*
+ * This command causes the display module to exit Sleep mode.
+ * All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode 0x12
+/*
+ * This command causes the display module to enter the Partial Display
+ * Mode. The Partial Display Mode window is described by the
+ * set_partial_area command.
+ */
+#define enter_normal_mode 0x13
+/*
+ * This command causes the display module to enter the Normal mode.
+ * Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode 0x20
+/*
+ * This command causes the display module to stop inverting the image
+ * data on the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define enter_invert_mode 0x21
+/*
+ * This command causes the display module to invert the image data only on
+ * the display device. The frame memory contents remain unchanged.
+ * No status bits are changed.
+ */
+#define set_gamma_curve 0x26
+/*
+ * This command selects the desired gamma curve for the display device.
+ * Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off 0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on 0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address 0x2a
+/*
+ * This command defines the column extent of the frame memory accessed by
+ * the hostprocessor with the read_memory_continue and
+ * write_memory_continue commands.
+ * No status bits are changed.
+ */
+#define set_page_addr 0x2b
+/*
+ * This command defines the page extent of the frame memory accessed by
+ * the host processor with the write_memory_continue and
+ * read_memory_continue command.
+ * No status bits are changed.
+ */
+#define write_mem_start 0x2c
+/*
+ * This command transfers image data from the host processor to the
+ * display modules frame memory starting at the pixel location specified
+ * by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area 0x30
+/*
+ * This command defines the Partial Display mode s display area.
+ * There are two parameters associated with this command, the first
+ * defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ * refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area 0x33
+/*
+ * This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off 0x34
+/*
+ * This command turns off the display modules Tearing Effect output
+ * signal on the TE signal line.
+ */
+#define set_tear_on 0x35
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line.
+ */
+#define set_address_mode 0x36
+/*
+ * This command sets the data order for transfers from the host processor
+ * to display modules frame memory,bits B[7:5] and B3, and from the
+ * display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start 0x37
+/*
+ * This command sets the start of the vertical scrolling area in the frame
+ * memory. The vertical scrolling area is fully defined when this command
+ * is used with the set_scroll_area command The set_scroll_start command
+ * has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ * line in the frame memory that is written to the display device as the
+ * first line of the vertical scroll area.
+ */
+#define exit_idle_mode 0x38
+/*
+ * This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode 0x39
+/*
+ * This command causes the display module to enter Idle Mode.
+ * In Idle Mode, color expression is reduced. Colors are shown on the
+ * display device using the MSB of each of the R, G and B color
+ * components in the frame memory
+ */
+#define set_pixel_format 0x3a
+/*
+ * This command sets the pixel format for the RGB image data used by the
+ * interface.
+ * Bits D[6:4] DPI Pixel Format Definition
+ * Bits D[2:0] DBI Pixel Format Definition
+ * Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp 0x1
+#define DCS_PIXEL_FORMAT_8bpp 0x2
+#define DCS_PIXEL_FORMAT_12bpp 0x3
+#define DCS_PIXEL_FORMAT_16bpp 0x5
+#define DCS_PIXEL_FORMAT_18bpp 0x6
+#define DCS_PIXEL_FORMAT_24bpp 0x7
+
+#define write_mem_cont 0x3c
+
+/*
+ * This command transfers image data from the host processor to the
+ * display module's frame memory continuing from the pixel location
+ * following the previous write_memory_continue or write_memory_start
+ * command.
+ */
+#define set_tear_scanline 0x44
+/*
+ * This command turns on the display modules Tearing Effect output signal
+ * on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline 0x45
+/*
+ * The display module returns the current scanline, N, used to update the
+ * display device. The total number of scanlines on a display device is
+ * defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ * the first line of V Sync and is denoted as Line 0.
+ * When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */
+#define GEN_READ_0 0x04 /* generic read, no parameters */
+#define GEN_READ_1 0x14 /* generic read, 1 parameters */
+#define GEN_READ_2 0x24 /* generic read, 2 parameters */
+#define GEN_LONG_WRITE 0x29 /* generic long write */
+#define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */
+#define MCS_READ 0x06 /* MCS read, no parameters */
+#define MCS_LONG_WRITE 0x39 /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile 0x50
+#define write_display_brightness 0x51
+#define write_ctrl_display 0x53
+#define write_ctrl_cabc 0x55
+ #define UI_IMAGE 0x01
+ #define STILL_IMAGE 0x02
+ #define MOVING_IMAGE 0x03
+#define write_hysteresis 0x57
+#define write_gamma_setting 0x58
+#define write_cabc_min_bright 0x5e
+#define write_kbbc_profile 0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ * This command is used to control ambient light, panel backlight
+ * brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON (1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON (1 << 4)
+#define DISPLAY_DIMMING_ON (1 << 3)
+#define BACKLIGHT_ON (1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO (1 << 1)
+#define GAMMA_AUTO (1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP 0x1
+#define DCS_PIXEL_FORMAT_8BPP 0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data 0xfc
+#define diag_res_data 0x00
+#define disp_mode_data 0x23
+#define pxl_fmt_data 0x77
+#define pwr_mode_data 0x74
+#define sig_mode_data 0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1 0xff
+#define scanline_data2 0xff
+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
+ * with Sync Pulse
+ */
+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
+ * with Sync events
+ */
+#define BURST_MODE 0x03 /* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
+ /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
+ * 0x100 Byte with 32
+ * byte alignment
+ */
+#define DBI_CB_TIME_OUT 0xFFFF
+
+#define GEN_FB_TIME_OUT 2000
+
+#define SKU_83 0x01
+#define SKU_100 0x02
+#define SKU_100L 0x04
+#define SKU_BYPASS 0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT 0x02100 /* cedarview */
+# define SB_OPCODE_MASK PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT 16
+# define SB_OPCODE_READ 0
+# define SB_OPCODE_WRITE 1
+# define SB_DEST_MASK PSB_MASK(15, 8)
+# define SB_DEST_SHIFT 8
+# define SB_DEST_DPLL 0x88
+# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT 4
+# define SB_BUSY (1 << 0)
+
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA 0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR 0x02108 /* cedarview */
+#define DPIO_CFG 0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1 (1 << 3)
+# define DPIO_MODE_SELECT_0 (1 << 2)
+# define DPIO_SFR_BYPASS (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A 0x8008
+#define _SB_M_B 0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT 24
+
+#define _SB_N_VCO_A 0x8014
+#define _SB_N_VCO_B 0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT 30
+#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT 26
+#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT 24
+
+#define _SB_REF_A 0x8018
+#define _SB_REF_B 0x8038
+#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A 0x801c
+#define _SB_P_B 0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT 30
+#define SB_P2_10 0 /* HDMI, DP, DAC */
+#define SB_P2_5 1 /* DAC */
+#define SB_P2_14 2 /* LVDS single */
+#define SB_P2_7 3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT 12
+
+#define PSB_LANE0 0x120
+#define PSB_LANE1 0x220
+#define PSB_LANE2 0x2320
+#define PSB_LANE3 0x2420
+
+#define LANE_PLL_MASK (0x7 << 20)
+#define LANE_PLL_ENABLE (0x3 << 20)
+
+
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 00000000000..4882b29119e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2617 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Š 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+ struct psb_intel_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct psb_intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /* Input timings for adjusted_mode */
+ struct psb_intel_sdvo_dtd input_dtd;
+};
+
+struct psb_intel_sdvo_connector {
+ struct psb_intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ int force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(psb_intel_attached_encoder(connector),
+ struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+ cval = REG_READ(SDVOC);
+ } else {
+ bval = REG_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ REG_WRITE(SDVOB, bval);
+ REG_READ(SDVOB);
+ REG_WRITE(SDVOC, cval);
+ REG_READ(SDVOC);
+ }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg) (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(psb_intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ DRM_LOG_KMS(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
+
+ psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 5;
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15Âľs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ udelay(15);
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ }
+ DRM_LOG_KMS("\n");
+ return true;
+
+log_fail:
+ DRM_LOG_KMS("... failed\n");
+ return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_set_target_input_args targets = {0};
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct psb_intel_sdvo_get_trained_inputs_response response;
+
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct psb_intel_sdvo_pixel_clock_range clocks;
+
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct psb_intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (psb_intel_sdvo->is_lvds &&
+ (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ dtd->part1.clock = mode->clock / 10;
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct psb_intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_encode encode;
+
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ psb_intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ DRM_INFO("HDMI is not supported yet");
+
+ return false;
+#if 0
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo_dtd output_dtd;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return false;
+
+ psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return false;
+
+ if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+ &psb_intel_sdvo->input_dtd))
+ return false;
+
+ psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (psb_intel_sdvo->is_tv) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (psb_intel_sdvo->is_lvds) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 sdvox;
+ struct psb_intel_sdvo_in_out_map in_out;
+ struct psb_intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = psb_intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+ input_dtd = psb_intel_sdvo->input_dtd;
+ } else {
+ /* Set the output timing to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+ }
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return;
+
+ if (psb_intel_sdvo->has_hdmi_monitor) {
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+ psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+ } else
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (psb_intel_sdvo->is_tv &&
+ !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+ return;
+
+ (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+ switch (psb_intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+ if (psb_intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+ if (psb_intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ /* FIXME: Check if this is needed for PSB
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ */
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG("DPMS_ON");
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG("DPMS_OFF");
+ break;
+ default:
+ DRM_DEBUG("DPMS: %d", mode);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ psb_intel_wait_for_vblank(dev);
+
+ status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(psb_intel_sdvo));
+ }
+
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+ }
+ return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (psb_intel_sdvo->is_lvds) {
+ if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct psb_intel_sdvo *iout = NULL;
+ struct psb_intel_sdvo *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_psb_intel_sdvo(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ DRM_DEBUG_KMS("\n");
+
+ if (!connector)
+ return 0;
+
+ psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ if (on) {
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return NULL;
+}
+
+enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+ u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ psb_intel_sdvo->ddc_bus = ddc;
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ psb_intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (psb_intel_sdvo->is_hdmi) {
+ psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ if (psb_intel_sdvo_connector->force_audio)
+ psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+ }
+
+ return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+
+ /* add 30ms delay when the output type might be TV */
+ if (psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ mdelay(30);
+
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ psb_intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ psb_intel_sdvo->attached_output = response;
+
+ psb_intel_sdvo->has_hdmi_monitor = false;
+ psb_intel_sdvo->has_hdmi_audio = false;
+
+ if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(psb_intel_sdvo_connector))
+ ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->is_lvds = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+ return;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ psb_intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
+ psb_intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_lvds_modes(connector);
+ else
+ psb_intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (psb_intel_sdvo_connector->left)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+ if (psb_intel_sdvo_connector->right)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+ if (psb_intel_sdvo_connector->top)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+ if (psb_intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+ if (psb_intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+ if (psb_intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+ if (psb_intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+ if (psb_intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+ if (psb_intel_sdvo_connector->hue)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+ if (psb_intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+ if (psb_intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+ if (psb_intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+ if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+ if (psb_intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+ if (psb_intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+ if (psb_intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+ if (psb_intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (psb_intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ psb_intel_sdvo_connector->tv_format);
+
+ psb_intel_sdvo_destroy_enhance_property(connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!psb_intel_sdvo->is_hdmi)
+ return false;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == psb_intel_sdvo_connector->force_audio)
+ return 0;
+
+ psb_intel_sdvo_connector->force_audio = i;
+
+ if (i == 0)
+ has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ psb_intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!psb_intel_sdvo->color_range)
+ return 0;
+
+ psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (psb_intel_sdvo_connector->name == property) { \
+ if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ psb_intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == psb_intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (psb_intel_sdvo->tv_format_index ==
+ psb_intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+ temp_value = val;
+ if (psb_intel_sdvo_connector->left == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->right, val);
+ if (psb_intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->right == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->left, val);
+ if (psb_intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->top == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->bottom, val);
+ if (psb_intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->bottom == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->top, val);
+ if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (psb_intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+ .dpms = psb_intel_sdvo_dpms,
+ .mode_fixup = psb_intel_sdvo_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = psb_intel_sdvo_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = psb_intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_sdvo_set_property,
+ .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+ .get_modes = psb_intel_sdvo_get_modes,
+ .mode_valid = psb_intel_sdvo_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+ if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+ .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+ /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+ * We need to figure out if this is true for all available poulsbo
+ * hardware, or if we need to fiddle with the guessing code above.
+ * The problem might go away if we can parse sdvo mappings from bios */
+ sdvo->ddc_bus = 2;
+
+#if 0
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
+
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
+ }
+
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+ gma_intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (IS_SDVOB(sdvo_reg)) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (IS_SDVOB(sdvo_reg))
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+ struct psb_intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &psb_intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &psb_intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+ /* FIXME: We don't support HDMI at the moment
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ psb_intel_sdvo->is_hdmi = true;
+ }
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (psb_intel_sdvo->is_hdmi)
+ psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ psb_intel_sdvo->controlled_output |= type;
+ psb_intel_sdvo_connector->output_flag = type;
+
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+ if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+ goto err;
+
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+ psb_intel_sdvo);
+ return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+ psb_intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ psb_intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(psb_intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+ return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+ return false;
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ psb_intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+ psb_intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", psb_intel_sdvo_connector->format_supported_num);
+ if (!psb_intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ psb_intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ psb_intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ psb_intel_sdvo_connector->max_##name = data_value[0]; \
+ psb_intel_sdvo_connector->cur_##name = response; \
+ psb_intel_sdvo_connector->name = \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ if (!psb_intel_sdvo_connector->name) return false; \
+ psb_intel_sdvo_connector->name->values[0] = 0; \
+ psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
+ drm_connector_attach_property(connector, \
+ psb_intel_sdvo_connector->name, \
+ psb_intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_hscan = data_value[0];
+ psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+ psb_intel_sdvo_connector->left =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ if (!psb_intel_sdvo_connector->left)
+ return false;
+
+ psb_intel_sdvo_connector->left->values[0] = 0;
+ psb_intel_sdvo_connector->left->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->left,
+ psb_intel_sdvo_connector->left_margin);
+
+ psb_intel_sdvo_connector->right =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ if (!psb_intel_sdvo_connector->right)
+ return false;
+
+ psb_intel_sdvo_connector->right->values[0] = 0;
+ psb_intel_sdvo_connector->right->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->right,
+ psb_intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_vscan = data_value[0];
+ psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+ psb_intel_sdvo_connector->top =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ if (!psb_intel_sdvo_connector->top)
+ return false;
+
+ psb_intel_sdvo_connector->top->values[0] = 0;
+ psb_intel_sdvo_connector->top->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->top,
+ psb_intel_sdvo_connector->top_margin);
+
+ psb_intel_sdvo_connector->bottom =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ if (!psb_intel_sdvo_connector->bottom)
+ return false;
+
+ psb_intel_sdvo_connector->bottom->values[0] = 0;
+ psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->bottom,
+ psb_intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_dot_crawl = 1;
+ psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ psb_intel_sdvo_connector->dot_crawl =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ if (!psb_intel_sdvo_connector->dot_crawl)
+ return false;
+
+ psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
+ psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->dot_crawl,
+ psb_intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+ union {
+ struct psb_intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+ enhancements.response = 0;
+ psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+ .master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
+ .functionality = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ int i;
+
+ psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+ if (!psb_intel_sdvo)
+ return false;
+
+ psb_intel_sdvo->sdvo_reg = sdvo_reg;
+ psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+ if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+ kfree(psb_intel_sdvo);
+ return false;
+ }
+
+ /* encoder type will be decided later */
+ psb_intel_encoder = &psb_intel_sdvo->base;
+ psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+ }
+
+ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+ drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+ goto err;
+
+ if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+ psb_intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+
+ psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ goto err;
+
+ if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+ &psb_intel_sdvo->pixel_clock_min,
+ &psb_intel_sdvo->pixel_clock_max))
+ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(psb_intel_sdvo),
+ psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+ psb_intel_sdvo->caps.device_rev_id,
+ psb_intel_sdvo->pixel_clock_min / 1000,
+ psb_intel_sdvo->pixel_clock_max / 1000,
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err:
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ kfree(psb_intel_sdvo);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 00000000000..600e79744d6
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct psb_intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct psb_intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct psb_intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct psb_intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct psb_intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct psb_intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644
index 00000000000..7be802baceb
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -0,0 +1,564 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+ if (pipe == 0)
+ return PIPEASTAT;
+ if (pipe == 1)
+ return PIPEBSTAT;
+ if (pipe == 2)
+ return PIPECSTAT;
+ BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_PIPEA_EVENT_FLAG;
+ if (pipe == 1)
+ return _MDFLD_PIPEB_EVENT_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_EVENT_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+ if (pipe == 0)
+ return _PSB_VSYNC_PIPEA_FLAG;
+ if (pipe == 1)
+ return _PSB_VSYNC_PIPEB_FLAG;
+ if (pipe == 2)
+ return _MDFLD_PIPEC_VBLANK_FLAG;
+ BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+ if (pipe == 0)
+ return PIPEACONF;
+ if (pipe == 1)
+ return PIPEBCONF;
+ if (pipe == 2)
+ return PIPECCONF;
+ BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal |= (mask | (mask >> 16));
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = psb_pipestat(pipe);
+ dev_priv->pipestat[pipe] &= ~mask;
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 writeVal = PSB_RVDC32(reg);
+ writeVal &= ~mask;
+ PSB_WVDC32(writeVal, reg);
+ (void) PSB_RVDC32(reg);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask |= pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+}
+
+void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+ if (dev_priv->pipestat[pipe] == 0) {
+ if (gma_power_begin(dev_priv->dev, false)) {
+ u32 pipe_event = mid_pipe_event(pipe);
+ dev_priv->vdc_irq_mask &= ~pipe_event;
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ gma_power_end(dev_priv->dev);
+ }
+ }
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+
+ uint32_t pipe_stat_val = 0;
+ uint32_t pipe_stat_reg = psb_pipestat(pipe);
+ uint32_t pipe_enable = dev_priv->pipestat[pipe];
+ uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+ uint32_t pipe_clear;
+ uint32_t i = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+ pipe_stat_val &= pipe_enable | pipe_status;
+ pipe_stat_val &= pipe_stat_val >> 16;
+
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ /* Clear the 2nd level interrupt status bits
+ * Sometimes the bits are very sticky so we repeat until they unstick */
+ for (i = 0; i < 0xffff; i++) {
+ PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+ pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+ if (pipe_clear == 0)
+ break;
+ }
+
+ if (pipe_clear)
+ dev_err(dev->dev,
+ "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+ __func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+ if (pipe_stat_val & PIPE_VBLANK_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stat_val & PIPE_TE_STATUS)
+ drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+ mid_pipe_event_handler(dev, 0);
+
+ if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+ mid_pipe_event_handler(dev, 1);
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+ int handled = 0;
+
+ spin_lock(&dev_priv->irqmask_lock);
+
+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+ if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+ dsp_int = 1;
+
+ /* FIXME: Handle Medfield
+ if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+ dsp_int = 1;
+ */
+
+ if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+ sgx_int = 1;
+
+ vdc_stat &= dev_priv->vdc_irq_mask;
+ spin_unlock(&dev_priv->irqmask_lock);
+
+ if (dsp_int && gma_power_is_on(dev)) {
+ psb_vdc_interrupt(dev, vdc_stat);
+ handled = 1;
+ }
+
+ if (sgx_int) {
+ /* Not expected - we have it masked, shut it up */
+ u32 s, s2;
+ s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+ s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+ PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+ PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+ /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+ we may as well poll even if we add that ! */
+ handled = 1;
+ }
+
+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+ DRM_READMEMORYBARRIER();
+
+ if (!handled)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (gma_power_is_on(dev))
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ if (dev->vblank_enabled[0])
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ if (dev->vblank_enabled[1])
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ /* FIXME: Handle Medfield irq mask
+ if (dev->vblank_enabled[1])
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+ if (dev->vblank_enabled[2])
+ dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+ */
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank_enabled[0])
+ psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[1])
+ psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[2])
+ psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+ if (dev->vblank_enabled[0])
+ psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[1])
+ psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ if (dev->vblank_enabled[2])
+ psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+ _PSB_IRQ_MSVDX_FLAG |
+ _LNC_IRQ_TOPAZ_FLAG;
+
+ /* These two registers are safe even if display island is off */
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+ wmb();
+
+ /* This register is safe even if display island is off */
+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+ | PWM_PHASEIN_INT_ENABLE,
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+ HISTOGRAM_INT_CONTROL);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+ PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ /* enable DPST */
+ mid_enable_pipe_event(dev_priv, 0);
+ psb_irq_turn_on_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ u32 hist_reg;
+ u32 pwm_reg;
+
+ if (gma_power_begin(dev, false)) {
+ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+ psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+ PWM_CONTROL_LOGIC);
+ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ gma_power_end(dev);
+ }
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ mid_disable_pipe_event(dev_priv, 0);
+ psb_irq_turn_off_dpst(dev);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+ unsigned int *sequence, atomic_t *counter)
+{
+ unsigned int cur_vblank;
+ int ret = 0;
+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+ (((cur_vblank = atomic_read(counter))
+ - *sequence) <= (1 << 23)));
+ *sequence = cur_vblank;
+
+ return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+ if (pipe == 0)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+ else if (pipe == 1)
+ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ uint32_t high_frame = PIPEAFRAMEHIGH;
+ uint32_t low_frame = PIPEAFRAMEPIXEL;
+ uint32_t pipeconf_reg = PIPEACONF;
+ uint32_t reg_val = 0;
+ uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ high_frame = PIPEBFRAMEHIGH;
+ low_frame = PIPEBFRAMEPIXEL;
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ high_frame = PIPECFRAMEHIGH;
+ low_frame = PIPECFRAMEPIXEL;
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+ return 0;
+ }
+
+ if (!gma_power_begin(dev, false))
+ return 0;
+
+ reg_val = REG_READ(pipeconf_reg);
+
+ if (!(reg_val & PIPEACONF_ENABLE)) {
+ dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+ pipe);
+ goto psb_get_vblank_counter_exit;
+ }
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+ PIPE_FRAME_LOW_SHIFT);
+ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+ PIPE_FRAME_HIGH_SHIFT);
+ } while (high1 != high2);
+
+ count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+ gma_power_end(dev);
+
+ return count;
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644
index 00000000000..216fda38b57
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Benjamin Defnet <benjamin.r.defnet@intel.com>
+ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _SYSIRQ_H_
+#define _SYSIRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644
index 00000000000..b867aabe6bf
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -0,0 +1,88 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+ u32 *lid_state = dev_priv->lid_state;
+ u32 pp_status;
+
+ if (readl(lid_state) == dev_priv->lid_last_state)
+ goto lid_timer_schedule;
+
+ if ((readl(lid_state)) & 0x01) {
+ /*lid state is open*/
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ psb_intel_lvds_set_brightness(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+ }
+ dev_priv->lid_last_state = readl(lid_state);
+
+lid_timer_schedule:
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+ if (!timer_pending(lid_timer)) {
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+ add_timer(lid_timer);
+ }
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+ struct timer_list *lid_timer = &dev_priv->lid_timer;
+ unsigned long irq_flags;
+
+ spin_lock_init(&dev_priv->lid_lock);
+ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+ init_timer(lid_timer);
+
+ lid_timer->data = (unsigned long)dev_priv;
+ lid_timer->function = psb_lid_timer_func;
+ lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+ add_timer(lid_timer);
+ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+ del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644
index 00000000000..b81c7c1e9c2
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_reg.h
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL 0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
+
+#define PSB_CR_CORE_ID 0x0010
+#define _PSB_CC_ID_ID_SHIFT (16)
+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT (0)
+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION 0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
+
+#define PSB_CR_SOFT_RESET 0x0080
+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
+#define _PSB_CS_RESET_USE_RESET (1 << 4)
+#define _PSB_CS_RESET_TA_RESET (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
+
+#define PSB_CR_EVENT_STATUS2 0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
+
+#define PSB_CR_EVENT_STATUS 0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
+#define _PSB_CE_SW_EVENT (1 << 14)
+#define _PSB_CE_TA_FINISHED (1 << 13)
+#define _PSB_CE_TA_TERMINATE (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK 0x0007FFFF
+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0 0x0A0C
+#define PSB_CR_USE_CODE_BASE1 0x0A10
+#define PSB_CR_USE_CODE_BASE2 0x0A14
+#define PSB_CR_USE_CODE_BASE3 0x0A18
+#define PSB_CR_USE_CODE_BASE4 0x0A1C
+#define PSB_CR_USE_CODE_BASE5 0x0A20
+#define PSB_CR_USE_CODE_BASE6 0x0A24
+#define PSB_CR_USE_CODE_BASE7 0x0A28
+#define PSB_CR_USE_CODE_BASE8 0x0A2C
+#define PSB_CR_USE_CODE_BASE9 0x0A30
+#define PSB_CR_USE_CODE_BASE10 0x0A34
+#define PSB_CR_USE_CODE_BASE11 0x0A38
+#define PSB_CR_USE_CODE_BASE12 0x0A3C
+#define PSB_CR_USE_CODE_BASE13 0x0A40
+#define PSB_CR_USE_CODE_BASE14 0x0A44
+#define PSB_CR_USE_CODE_BASE15 0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT (25)
+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX (0)
+#define _PSB_CUC_DM_PIXEL (1)
+#define _PSB_CUC_DM_RESERVED (2)
+#define _PSB_CUC_DM_EDM (3)
+
+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER 0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK 0x0AC8
+#define _PSB_CE_KICK_NOW (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
+
+#define PSB_CR_BIF_CTRL 0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
+#define _PSB_CB_CTRL_INVALDC (1 << 3)
+#define _PSB_CB_CTRL_FLUSH (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT 0x0C04
+
+#define PSB_CR_BIF_FAULT 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
+
+#define PSB_CR_BIF_BANK0 0x0C78
+#define PSB_CR_BIF_BANK1 0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
+
+#define PSB_CR_2D_SOCIF 0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS 0x0E04
+#define _PSB_C2B_STATUS_BUSY (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define PSB_2D_CLIP_BH (0x00000000)
+#define PSB_2D_PAT_BH (0x10000000)
+#define PSB_2D_CTRL_BH (0x20000000)
+#define PSB_2D_SRC_OFF_BH (0x30000000)
+#define PSB_2D_MASK_OFF_BH (0x40000000)
+#define PSB_2D_RESERVED1_BH (0x50000000)
+#define PSB_2D_RESERVED2_BH (0x60000000)
+#define PSB_2D_FENCE_BH (0x70000000)
+#define PSB_2D_BLIT_BH (0x80000000)
+#define PSB_2D_SRC_SURF_BH (0x90000000)
+#define PSB_2D_DST_SURF_BH (0xA0000000)
+#define PSB_2D_PAT_SURF_BH (0xB0000000)
+#define PSB_2D_SRC_PAL_BH (0xC0000000)
+#define PSB_2D_PAT_PAL_BH (0xD0000000)
+#define PSB_2D_MASK_SURF_BH (0xE0000000)
+#define PSB_2D_FLUSH_BH (0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX (1)
+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT (0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT (12)
+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT (0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT (12)
+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT (0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT (5)
+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT (10)
+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT (15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL (0x00000001)
+#define PSB_2D_DSTCK_CTRL (0x00000002)
+#define PSB_2D_ALPHA_CTRL (0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
+#define PSB_2D_CK_COL_SHIFT (0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT (0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT (12)
+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK (3 << 25)
+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE (0 << 25)
+#define PSB_2D_ROT_90DEGS (1 << 25)
+#define PSB_2D_ROT_180DEGS (2 << 25)
+#define PSB_2D_ROT_270DEGS (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE (0x00000000)
+#define PSB_2D_DSTCK_PASS (0x00200000)
+#define PSB_2D_DSTCK_REJECT (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE (0x00000000)
+#define PSB_2D_SRCCK_PASS (0x00080000)
+#define PSB_2D_SRCCK_REJECT (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK (0x00010000)
+#define PSB_2D_USE_PAT (0x00010000)
+#define PSB_2D_USE_FILL (0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT (8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT (0)
+
+#define PSB_2D_ROP4_MASK (0x0000FFFF)
+/*
+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
+ * Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT (0)
+/*
+ * DWORD1: (Always Present)
+ * X Start (Dest)
+ * Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT (12)
+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT (0)
+/*
+ * DWORD2: (Always Present)
+ * X Size (Dest)
+ * Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT (12)
+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT (0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
+#define PSB_2D_SRC_1_PAL (0x00000000)
+#define PSB_2D_SRC_2_PAL (0x00008000)
+#define PSB_2D_SRC_4_PAL (0x00010000)
+#define PSB_2D_SRC_8_PAL (0x00018000)
+#define PSB_2D_SRC_8_ALPHA (0x00020000)
+#define PSB_2D_SRC_4_ALPHA (0x00028000)
+#define PSB_2D_SRC_332RGB (0x00030000)
+#define PSB_2D_SRC_4444ARGB (0x00038000)
+#define PSB_2D_SRC_555RGB (0x00040000)
+#define PSB_2D_SRC_1555ARGB (0x00048000)
+#define PSB_2D_SRC_565RGB (0x00050000)
+#define PSB_2D_SRC_0888ARGB (0x00058000)
+#define PSB_2D_SRC_8888ARGB (0x00060000)
+#define PSB_2D_SRC_8888UYVY (0x00068000)
+#define PSB_2D_SRC_RESERVED (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
+#define PSB_2D_PAT_1_PAL (0x00000000)
+#define PSB_2D_PAT_2_PAL (0x00008000)
+#define PSB_2D_PAT_4_PAL (0x00010000)
+#define PSB_2D_PAT_8_PAL (0x00018000)
+#define PSB_2D_PAT_8_ALPHA (0x00020000)
+#define PSB_2D_PAT_4_ALPHA (0x00028000)
+#define PSB_2D_PAT_332RGB (0x00030000)
+#define PSB_2D_PAT_4444ARGB (0x00038000)
+#define PSB_2D_PAT_555RGB (0x00040000)
+#define PSB_2D_PAT_1555ARGB (0x00048000)
+#define PSB_2D_PAT_565RGB (0x00050000)
+#define PSB_2D_PAT_0888ARGB (0x00058000)
+#define PSB_2D_PAT_8888ARGB (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
+#define PSB_2D_DST_332RGB (0x00030000)
+#define PSB_2D_DST_4444ARGB (0x00038000)
+#define PSB_2D_DST_555RGB (0x00040000)
+#define PSB_2D_DST_1555ARGB (0x00048000)
+#define PSB_2D_DST_565RGB (0x00050000)
+#define PSB_2D_DST_0888ARGB (0x00058000)
+#define PSB_2D_DST_8888ARGB (0x00060000)
+#define PSB_2D_DST_8888AYUV (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT (0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN (1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS (0x0000)
+#define PSB_2D_ROP3_SRC (0xCC)
+#define PSB_2D_ROP3_PAT (0xF0)
+#define PSB_2D_ROP3_DST (0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES 2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER 1
+#define PSB_RETURN 2
+#define PSB_TA 3
+
+/* Power management */
+#define PSB_PUNIT_PORT 0x04
+#define PSB_OSPMBA 0x78
+#define PSB_APMBA 0x7a
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PWRGT_VID_ENC_MASK 0x30
+#define PSB_PWRGT_VID_DEC_MASK 0xc
+#define PSB_PWRGT_GL3_MASK 0xc0
+
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK 0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0 0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 8f371e8d630..f7c17b23983 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I810_WRITE(0x02080, 0x1ffff000);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -888,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -1225,12 +1223,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
-}
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- i810_reclaim_buffers(dev, file_priv);
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ i810_driver_reclaim_buffers(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ } else {
+ /* master disappeared, clean up stuff anyway and hope nothing
+ * goes wrong */
+ i810_driver_reclaim_buffers(dev, file_priv);
+ }
+
}
int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d4266bdf6fb..053f1ee5839 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
+static const struct file_operations i810_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -52,20 +63,9 @@ static struct drm_driver driver = {
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &i810_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f48179..6e0acad9e0f 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
/* i810_dma.c */
extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
extern void i810_driver_lastclose(struct drm_device *dev);
extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ae6a7c5020..808b255d7fc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_sprite.o \
intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc9..11807989f91 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
@@ -1000,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_drpc_info(struct seq_file *m, void *unused)
+static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -1067,6 +1068,90 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return 0;
}
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1;
+ int count=0, ret;
+
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ if (atomic_read(&dev_priv->forcewake_count)) {
+ seq_printf(m, "RC information inaccurate because userspace "
+ "holds a reference \n");
+ } else {
+ /* NB: we cannot use forcewake, else we read the wrong values */
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+ seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+ }
+
+ gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_printf(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_printf(m, "Core Power Down\n");
+ else
+ seq_printf(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_printf(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_printf(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_printf(m, "RC7\n");
+ break;
+ default:
+ seq_printf(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return gen6_drpc_info(m);
+ else
+ return ironlake_drpc_info(m);
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93..5f4d5893e98 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1454,6 +1457,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1495,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
@@ -2295,6 +2308,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2..8f7187915b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -788,6 +810,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -821,21 +858,7 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -900,12 +923,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b97980..602bc80baab 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -206,6 +207,8 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -221,6 +224,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -334,6 +339,8 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+ uint32_t last_acthd_bsd;
+ uint32_t last_acthd_blt;
uint32_t last_instdone;
uint32_t last_instdone1;
@@ -347,6 +354,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -710,6 +718,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -998,11 +1007,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@@ -1308,6 +1317,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1366,8 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8359dc77704..e55badb2d86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
|| atomic_read(&dev_priv->mm.wedged));
ring->irq_put(ring);
- } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
ret = -EBUSY;
ring->waiting_seqno = 0;
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000)) {
+ ret = -EBUSY;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f..65e1f0043f9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Disable semaphores on SNB */
+ if (INTEL_INFO(dev)->gen == 6)
+ return 0;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -954,6 +971,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
@@ -967,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
+ u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -1004,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1017,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring,
- I915_EXEC_CONSTANTS_MASK << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
@@ -1159,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b40004b5597..5d433fc11ac 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
- crtc->y * crtc->fb->pitch +
+ crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, tmp);
return true;
}
- if (IS_GEN6(dev) &&
- (tmp & RING_WAIT_SEMAPHORE)) {
- DRM_ERROR("Kicking stuck semaphore on %s\n",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
- return true;
- }
return false;
}
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1;
+ uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
bool err = false;
if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
}
if (INTEL_INFO(dev)->gen < 4) {
- acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
} else {
- acthd = I915_READ(ACTHD_I965);
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
+ acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+ acthd_bsd = HAS_BSD(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+ acthd_blt = HAS_BLT(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_acthd_bsd == acthd_bsd &&
+ dev_priv->last_acthd_blt == acthd_blt &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->hangcheck_count = 0;
dev_priv->last_acthd = acthd;
+ dev_priv->last_acthd_bsd = acthd_bsd;
+ dev_priv->last_acthd_blt = acthd_blt;
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc82400..c3afb783cb9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,6 +442,7 @@
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -2321,6 +2322,7 @@
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -2459,6 +2461,8 @@
#define WM3_LP_ILK 0x45110
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
@@ -2675,6 +2679,140 @@
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_RGBX (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -2882,6 +3020,10 @@
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
+#define IVB_CHICKEN3 0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
@@ -3303,10 +3445,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3589,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
@@ -3478,7 +3642,11 @@
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
@@ -3535,6 +3703,14 @@
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -3547,17 +3723,23 @@
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define GEN5_HDMIW_HDMIEDID_A 0xE2050
-#define GEN5_AUD_CNTL_ST_A 0xE20B4
-#define GEN5_ELD_BUFFER_SIZE (0x1f << 10)
-#define GEN5_ELD_ADDRESS (0x1f << 5)
-#define GEN5_ELD_ACK (1 << 4)
-#define GEN5_AUD_CNTL_ST2 0xE20C0
-#define GEN5_ELD_VALIDB (1 << 0)
-#define GEN5_CP_READYB (1 << 1)
-
-#define GEN7_HDMIW_HDMIEDID_A 0xE5050
-#define GEN7_AUD_CNTRL_ST_A 0xE50B4
-#define GEN7_AUD_CNTRL_ST2 0xE50C0
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a383..2a3f707caab 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val;
+ u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+ pll_sel = TRANSC_DPLL_ENABLE;
if (pipe > 1)
return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
+ if (pipe == 0)
+ pll_sel |= TRANSC_DPLLA_SEL;
+ else if (pipe == 1)
+ pll_sel |= TRANSC_DPLLB_SEL;
+
+
+ if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ return;
+
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 fbc_ctl, fbc_ctl2;
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitch < cfb_pitch)
- cfb_pitch = fb->pitch;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-static void sandybridge_update_wm(struct drm_device *dev)
+void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (!single_plane_enabled(enabled))
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
@@ -4619,6 +4628,149 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+
+ line_time_us = (sprite_width * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -4659,6 +4811,16 @@ static void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -4670,6 +4832,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4844,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4923,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5189,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5137,7 +5317,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5480,7 +5660,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -5804,14 +5984,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
-
drm_vblank_post_modeset(dev, pipe);
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ if (ret)
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ else
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
return ret;
}
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5828,6 +6039,12 @@ static void g4x_write_eld(struct drm_connector *connector,
else
eldv = G4X_ELDV_DEVCTG;
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
@@ -5858,14 +6075,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_cntl_st;
int aud_cntrl_st2;
- if (IS_IVYBRIDGE(connector->dev)) {
- hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
- aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN5_AUD_CNTL_ST_A;
- aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
i = to_intel_crtc(crtc)->pipe;
@@ -5879,14 +6096,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
- eldv = GEN5_ELD_VALIDB;
- eldv |= GEN5_ELD_VALIDB << 4;
- eldv |= GEN5_ELD_VALIDB << 8;
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
- eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
}
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
@@ -5894,13 +6122,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!eld[0])
return;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
-
i = I915_READ(aud_cntl_st);
- i &= ~GEN5_ELD_ADDRESS;
+ i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
@@ -6280,7 +6503,7 @@ static struct drm_display_mode load_detect_mode = {
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
@@ -6322,7 +6545,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -6331,9 +6554,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
- mode_cmd.depth = depth;
- mode_cmd.bpp = bpp;
- mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = 0;
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
@@ -6354,11 +6577,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
fb = &dev_priv->fbdev->ifb.base;
- if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
return NULL;
- if (obj->base.size < mode->vdisplay * fb->pitch)
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
return fb;
@@ -6991,7 +7214,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7008,7 +7231,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -7032,7 +7255,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7046,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
@@ -7079,7 +7302,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7114,7 +7337,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(fb->pitches[0] | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7150,7 +7373,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
- intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
@@ -7189,11 +7412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7212,10 +7440,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7238,7 +7462,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7470,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7574,7 +7799,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
@@ -7582,21 +7807,25 @@ int intel_framebuffer_init(struct drm_device *dev,
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
- if (mode_cmd->pitch & 63)
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
- case 8:
- case 16:
- /* Only pre-ILK can handle 5:5:5 */
- if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ /* RGB formats are common across chipsets */
break;
-
- case 24:
- case 32:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
break;
default:
+ DRM_ERROR("unsupported pixel format\n");
return -EINVAL;
}
@@ -7614,11 +7843,12 @@ int intel_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -7887,6 +8117,31 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +8178,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -7950,7 +8205,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -8205,6 +8460,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -8372,7 +8631,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8750,34 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
@@ -8510,6 +8797,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8523,6 +8811,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8706,7 +8995,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
drm_mode_config_init(dev);
@@ -8736,6 +9025,12 @@ void intel_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
+ if (HAS_PCH_SPLIT(dev)) {
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_ERROR("plane %d init failed: %d\n",
+ i, ret);
+ }
}
/* Just disable it once at startup */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad93..db3b461ad41 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
@@ -1833,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73d..1348705faf6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
+#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ if (W && drm_can_sleep()) msleep(W); \
} \
ret__; \
})
@@ -47,13 +48,6 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define MSLEEP(x) do { \
- if (in_dbg_master()) \
- mdelay(x); \
- else \
- msleep(x); \
-} while (0)
-
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -110,6 +104,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -176,10 +171,32 @@ struct intel_crtc {
bool use_pll_a;
};
+struct intel_plane {
+ struct drm_plane base;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool primary_disabled;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define DIP_HEADER_SIZE 5
@@ -289,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -359,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -379,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ec49bae7338..571375a3eef 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
+
+ /* Be sure to shut off any planes that may be active */
+ list_for_each_entry(plane, &config->plane_list, head)
+ plane->funcs->disable_plane(plane);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d4f5a0b2120..64541f7ef90 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~SDVO_ENABLE;
+ temp &= ~enable_bits;
} else {
- temp |= SDVO_ENABLE;
+ temp |= enable_bits;
}
I915_WRITE(intel_hdmi->sdvox_reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520d..e44191132ac 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a..04d79fd1dc9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca70e2f1044..77e729d4e4f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
+ if (INTEL_INFO(dev)->gen >= 6) {
+ I915_WRITE(INSTPM,
+ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+ }
+
return ret;
}
@@ -787,6 +792,17 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
+gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ /* The BLT ring on IVB appears to have broken synchronization
+ * between the seqno write and the interrupt, so that the
+ * interrupt appears first. Returning false here makes
+ * i915_wait_request() do a polling loop, instead.
+ */
+ return false;
+}
+
+static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
struct drm_device *dev = ring->dev;
@@ -1119,7 +1135,16 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
}
trace_i915_ring_wait_begin(ring);
- end = jiffies + 3 * HZ;
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
+ else
+ end = jiffies + 3 * HZ;
+
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
@@ -1552,5 +1577,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
*ring = gen6_blt_ring;
+ if (IS_GEN7(dev))
+ ring->irq_get = gen7_blt_ring_get_irq;
+
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aef..f7b9268df26 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 4aa6f343e49..6b7b22f4d63 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright Š 2006-2007 Intel Corporation
+ * Copyright Š 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 00000000000..d13989fda50
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright Š 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ int pixel_size;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ sprctl |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+ sprctl |= SPRITE_DEST_KEY;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ dev_priv->sprite_scaling_enabled = true;
+ sandybridge_update_wm(dev);
+ intel_wait_for_vblank(dev, pipe);
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ sandybridge_update_wm(dev);
+ }
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(SPRLINOFF(pipe), offset);
+ }
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_WRITE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe, pixel_size;
+ u32 dvscntr, dvsscale = 0;
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_RGBX;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ /* must disable */
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(DVSLINOFF(pipe), offset);
+ }
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_WRITE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ int ret = 0;
+ int x = src_x >> 16, y = src_y >> 16;
+ int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+ bool disable_primary = false;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ return -EINVAL;
+
+ if (crtc_x >= primary_w || crtc_y >= primary_h)
+ return -EINVAL;
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe)
+ return -EINVAL;
+
+ /*
+ * Clamp the width & height into the visible area. Note we don't
+ * try to scale the source if part of the visible region is offscreen.
+ * The caller must handle that by adjusting source offset and size.
+ */
+ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+ crtc_w += crtc_x;
+ crtc_x = 0;
+ }
+ if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+ goto out;
+ if ((crtc_x + crtc_w) > primary_w)
+ crtc_w = primary_w - crtc_x;
+
+ if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+ crtc_h += crtc_y;
+ crtc_y = 0;
+ }
+ if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+ goto out;
+ if (crtc_y + crtc_h > primary_h)
+ crtc_h = primary_h - crtc_y;
+
+ if (!crtc_w || !crtc_h) /* Again, nothing to display */
+ goto out;
+
+ /*
+ * We can take a larger source and scale it down, but
+ * only so much... 16x is the max on SNB.
+ */
+ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+ return -EINVAL;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ if ((crtc_x == 0) && (crtc_y == 0) &&
+ (crtc_w == primary_w) && (crtc_h == primary_h))
+ disable_primary = true;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ DRM_ERROR("failed to pin object\n");
+ goto out_unlock;
+ }
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary && intel_plane->primary_disabled) {
+ intel_enable_primary(crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+ crtc_w, crtc_h, x, y, src_w, src_h);
+
+ if (disable_primary) {
+ intel_disable_primary(crtc);
+ intel_plane->primary_disabled = true;
+ }
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ mutex_unlock(&dev->struct_mutex);
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ mutex_lock(&dev->struct_mutex);
+ }
+ i915_gem_object_unpin(old_obj);
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (intel_plane->primary_disabled) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(intel_plane->obj);
+ intel_plane->obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+out:
+
+ return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ DRM_ERROR("new plane code only for SNB+\n");
+ return -ENODEV;
+ }
+
+ intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ if (IS_GEN6(dev)) {
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = snb_update_plane;
+ intel_plane->disable_plane = snb_disable_plane;
+ intel_plane->update_colorkey = snb_update_colorkey;
+ intel_plane->get_colorkey = snb_get_colorkey;
+ } else if (IS_GEN7(dev)) {
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+ }
+
+ intel_plane->pipe = pipe;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs, snb_plane_formats,
+ ARRAY_SIZE(snb_plane_formats), false);
+ if (ret)
+ kfree(intel_plane);
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 33daa29eea6..f9a925d5881 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
+static const struct file_operations mga_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mga_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &mga_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 35ef5b1e356..9f27e3d9e69 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_ramht.o \
+ nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o \
+ nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
- nv84_crypt.o \
+ nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
+ nv84_bsp.o \
+ nv84_vp.o \
+ nv98_ppp.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5fc201b49d3..e5cbead85e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,6 +27,7 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
+#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
@@ -34,9 +35,6 @@
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
-#define LEGACY_I2C_CRT 0x80
-#define LEGACY_I2C_PANEL 0x81
-#define LEGACY_I2C_TV 0x82
#define EDID1_LEN 128
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40) {
- if (port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
- }
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
-
if (i2c_index == 0xff) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
- int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = dcb->i2c_default_indices;
+ int idx = dcb_entry_idx_from_crtchead(dev);
+ i2c_index = NV_I2C_DEFAULT(0);
if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
- shift = 4;
-
- i2c_index = (default_indices >> shift) & 0xf;
+ i2c_index = NV_I2C_DEFAULT(1);
}
- if (i2c_index == 0x80) /* g80+ */
- i2c_index = dcb->i2c_default_indices & 0xf;
- else
- if (i2c_index == 0x81)
- i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
-
- if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
- NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
- return NULL;
- }
-
- /* Make sure i2c table entry has been parsed, it may not
- * have been if this is a bus not referenced by a DCB encoder
- */
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
switch (cond) {
case 0:
- {
- struct dcb_connector_table_entry *ent =
- &bios->dcb.connector.entry[dcb->connector];
-
- if (ent->type != DCB_CONNECTOR_eDP)
+ entry = dcb_conn(dev, dcb->connector);
+ if (!entry || entry[0] != DCB_CONNECTOR_eDP)
iexec->execute = false;
- }
break;
case 1:
case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 1;
}
-static void
-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- u32 r, s, v;
-
- /* Not a clue, needs de-magicing */
- r = nv50_gpio_ctl[gpio->line >> 4];
- s = (gpio->line & 0x0f);
- v = bios_rd32(bios, r) & ~(0x00010001 << s);
- switch ((gpio->entry & 0x06000000) >> 25) {
- case 1:
- v |= (0x00000001 << s);
- break;
- case 2:
- v |= (0x00010000 << s);
- break;
- default:
- break;
- }
-
- bios_wr32(bios, r, v);
-}
-
-static void
-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- u32 v, i;
-
- v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
- v &= 0xffffff00;
- v |= (gpio->entry & 0x00ff0000) >> 16;
- bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
-
- i = (gpio->entry & 0x1f000000) >> 24;
- if (i) {
- v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
- v &= 0xffffff00;
- v |= gpio->line;
- bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
- }
-}
-
static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* each GPIO according to various values listed in each entry
*/
- struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int i;
-
- if (dev_priv->card_type < NV_50) {
- NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
- return 1;
- }
-
- if (!iexec->execute)
- return 1;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-
- BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
-
- BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
- offset, gpio->tag, gpio->state_default);
-
- if (!bios->execute)
- continue;
-
- pgpio->set(bios->dev, gpio->tag, gpio->state_default);
- if (dev_priv->card_type < NV_D0)
- init_gpio_unknv50(bios, gpio);
- else
- init_gpio_unknvd0(bios, gpio);
- }
+ if (iexec->execute && bios->execute)
+ nouveau_gpio_reset(bios->dev);
return 1;
}
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
break;
}
- /* Dell Latitude D620 reports a too-high value for the dual-link
- * transition freq, causing us to program the panel incorrectly.
- *
- * It doesn't appear the VBIOS actually uses its transition freq
- * (90000kHz), instead it uses the "Number of LVDS channels" field
- * out of the panel ID structure (http://www.spwg.org/).
- *
- * For the moment, a quirk will do :)
- */
- if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
- bios->fp.duallink_transition_clk = 80000;
-
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
break;
}
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
- { PLL_UNK05 , 0x004030 },
+ { PLL_VDEC , 0x004030 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
struct nvbios *bios = &dev_priv->vbios;
u8 entries, *entry;
+ if (bios->type != NVBIOS_BIT)
+ return -ENODEV;
+
entries = bios->data[bios->offset + 10];
entry = &bios->data[bios->offset + 12];
while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
bit->version = entry[1];
bit->length = ROM16(entry[2]);
bit->offset = ROM16(entry[4]);
- bit->data = ROMPTR(bios, entry[4]);
+ bit->data = ROMPTR(dev, entry[4]);
return 0;
}
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- if (bios->data[legacy_i2c_offset + 4])
- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- if (bios->data[legacy_i2c_offset + 5])
- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- if (bios->data[legacy_i2c_offset + 6])
- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- if (bios->data[legacy_i2c_offset + 7])
- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static struct dcb_gpio_entry *
-new_gpio_entry(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_table *gpio = &bios->dcb.gpio;
-
- if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
- NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
- return NULL;
- }
-
- return &gpio->entry[gpio->entries++];
-}
-
-struct dcb_gpio_entry *
-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+void *
+dcb_table(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- int i;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- if (bios->dcb.gpio.entry[i].tag != tag)
- continue;
+ u8 *dcb = NULL;
- return &bios->dcb.gpio.entry[i];
+ if (dev_priv->card_type > NV_04)
+ dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+ if (!dcb) {
+ NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+ return NULL;
}
- return NULL;
-}
-
-static void
-parse_dcb_gpio_table(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_entry *e;
- u8 headerlen, entries, recordlen;
- u8 *dcb, *gpio = NULL, *entry;
- int i;
-
- dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x41) {
+ NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+ return NULL;
+ } else
if (dcb[0] >= 0x30) {
- gpio = ROMPTR(bios, dcb[10]);
- if (!gpio)
- goto no_table;
-
- headerlen = gpio[1];
- entries = gpio[2];
- recordlen = gpio[3];
+ if (ROM32(dcb[6]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
- gpio = ROMPTR(bios, dcb[-15]);
- if (!gpio)
- goto no_table;
-
- headerlen = 3;
- entries = gpio[2];
- recordlen = gpio[1];
+ if (dcb[0] >= 0x20) {
+ if (ROM32(dcb[4]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22) {
- /* No GPIO table present, parse the TVDAC GPIO data. */
- uint8_t *tvdac_gpio = &dcb[-5];
-
- if (tvdac_gpio[0] & 1) {
- e = new_gpio_entry(bios);
- e->tag = DCB_GPIO_TVDAC0;
- e->line = tvdac_gpio[1] >> 4;
- e->invert = tvdac_gpio[0] & 2;
- }
-
- goto no_table;
+ if (dcb[0] >= 0x15) {
+ if (!memcmp(&dcb[-7], "DEV_REC", 7))
+ return dcb;
} else {
- NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
- goto no_table;
- }
-
- entry = gpio + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- e = new_gpio_entry(bios);
- if (!e)
- break;
-
- if (gpio[0] < 0x40) {
- e->entry = ROM16(entry[0]);
- e->tag = (e->entry & 0x07e0) >> 5;
- if (e->tag == 0x3f) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x001f);
- e->invert = ((e->entry & 0xf800) >> 11) != 4;
- } else {
- e->entry = ROM32(entry[0]);
- e->tag = (e->entry & 0x0000ff00) >> 8;
- if (e->tag == 0xff) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x0000001f) >> 0;
- if (gpio[0] == 0x40) {
- e->state_default = (e->entry & 0x01000000) >> 24;
- e->state[0] = (e->entry & 0x18000000) >> 27;
- e->state[1] = (e->entry & 0x60000000) >> 29;
- } else {
- e->state_default = (e->entry & 0x00000080) >> 7;
- e->state[0] = (entry[4] >> 4) & 3;
- e->state[1] = (entry[4] >> 6) & 3;
- }
- }
- }
-
-no_table:
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- e = new_gpio_entry(bios);
- if (e) {
- e->tag = DCB_GPIO_TVDAC0;
- e->line = 4;
- }
- }
-}
-
-struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *dev, int index)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_connector_table_entry *cte;
-
- if (index >= bios->dcb.connector.entries)
- return NULL;
-
- cte = &bios->dcb.connector.entry[index];
- if (cte->type == 0xff)
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+ * always has the same single (crt) entry, even when tv-out
+ * present, so the conclusion is this version cannot really
+ * be used.
+ *
+ * v1.2 tables (some NV6/10, and NV15+) normally have the
+ * same 5 entries, which are not specific to the card and so
+ * no use.
+ *
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c
+ * table pointer, so use the indices parsed in
+ * parse_bmp_structure.
+ *
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
return NULL;
+ }
- return cte;
+ NV_WARNONCE(dev, "DCB header validation failed\n");
+ return NULL;
}
-static enum dcb_connector_type
-divine_connector_type(struct nvbios *bios, int index)
+void *
+dcb_outp(struct drm_device *dev, u8 idx)
{
- struct dcb_table *dcb = &bios->dcb;
- unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].connector == index)
- encoders |= (1 << dcb->entry[i].type);
- }
-
- if (encoders & (1 << OUTPUT_DP)) {
- if (encoders & (1 << OUTPUT_TMDS))
- type = DCB_CONNECTOR_DP;
- else
- type = DCB_CONNECTOR_eDP;
- } else
- if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DCB_CONNECTOR_DVI_I;
- else
- type = DCB_CONNECTOR_DVI_D;
- } else
- if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DCB_CONNECTOR_VGA;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30) {
+ if (idx < dcb[2])
+ return dcb + dcb[1] + (idx * dcb[3]);
} else
- if (encoders & (1 << OUTPUT_LVDS)) {
- type = DCB_CONNECTOR_LVDS;
+ if (dcb && dcb[0] >= 0x20) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 8 + (idx * 8);
+ if (i2c && ent < i2c)
+ return ent;
} else
- if (encoders & (1 << OUTPUT_TV)) {
- type = DCB_CONNECTOR_TV_0;
+ if (dcb && dcb[0] >= 0x15) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 4 + (idx * 10);
+ if (i2c && ent < i2c)
+ return ent;
}
- return type;
+ return NULL;
}
-static void
-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
-{
- struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
- struct drm_device *dev = bios->dev;
+int
+dcb_outp_foreach(struct drm_device *dev, void *data,
+ int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+ int ret, idx = -1;
+ u8 *outp = NULL;
+ while ((outp = dcb_outp(dev, ++idx))) {
+ if (ROM32(outp[0]) == 0x00000000)
+ break; /* seen on an NV11 with DCB v1.5 */
+ if (ROM32(outp[0]) == 0xffffffff)
+ break; /* seen on an NV17 with DCB v2.0 */
+
+ if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+ continue;
+ if ((outp[0] & 0x0f) == OUTPUT_EOL)
+ break;
- /* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
+ ret = exec(dev, data, idx, outp);
+ if (ret)
+ return ret;
}
- /* Gigabyte GV-NX86T512H */
- if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
- }
+ return 0;
}
-static const u8 hpd_gpio[16] = {
- 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
-};
-
-static void
-parse_dcb_connector_table(struct nvbios *bios)
+u8 *
+dcb_conntab(struct drm_device *dev)
{
- struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->dcb.connector;
- struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
- uint8_t *entry;
- int i;
-
- if (!bios->dcb.connector_table_ptr) {
- NV_DEBUG_KMS(dev, "No DCB connector table present\n");
- return;
- }
-
- NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
- conntab[0], conntab[1], conntab[2], conntab[3]);
- if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
- (conntab[3] != 2 && conntab[3] != 4)) {
- NV_ERROR(dev, " Unknown! Please report.\n");
- return;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+ u8 *conntab = ROMPTR(dev, dcb[0x14]);
+ if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+ return conntab;
}
+ return NULL;
+}
- ct->entries = conntab[2];
-
- entry = conntab + conntab[1];
- cte = &ct->entry[0];
- for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
- cte->index = i;
- if (conntab[3] == 2)
- cte->entry = ROM16(entry[0]);
- else
- cte->entry = ROM32(entry[0]);
-
- cte->type = (cte->entry & 0x000000ff) >> 0;
- cte->index2 = (cte->entry & 0x00000f00) >> 8;
-
- cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
- cte->gpio_tag = hpd_gpio[cte->gpio_tag];
-
- if (cte->type == 0xff)
- continue;
-
- apply_dcb_connector_quirks(bios, i);
-
- NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
- i, cte->entry, cte->type, cte->index, cte->gpio_tag);
-
- /* check for known types, fallback to guessing the type
- * from attached encoders if we hit an unknown.
- */
- switch (cte->type) {
- case DCB_CONNECTOR_VGA:
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- case DCB_CONNECTOR_DVI_I:
- case DCB_CONNECTOR_DVI_D:
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_DP:
- case DCB_CONNECTOR_eDP:
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- break;
- default:
- cte->type = divine_connector_type(bios, cte->index);
- NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
- break;
- }
-
- if (nouveau_override_conntype) {
- int type = divine_connector_type(bios, cte->index);
- if (type != cte->type)
- NV_WARN(dev, " -> type 0x%02x\n", cte->type);
- }
-
- }
+u8 *
+dcb_conn(struct drm_device *dev, u8 idx)
+{
+ u8 *conntab = dcb_conntab(dev);
+ if (conntab && idx < conntab[2])
+ return conntab + conntab[1] + (idx * conntab[3]);
+ return NULL;
}
static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (dcb->version >= 0x40)
- entry->connector = (conn >> 12) & 0xf;
+ entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
- uint32_t conn, uint32_t conf)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
- bool ret;
-
- if (dcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
- else
- ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
- if (!ret)
- return ret;
-
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- entry->i2c_index, &dcb->i2c[entry->i2c_index]);
-
- return true;
-}
-
static
void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#endif
/* Make up some sane defaults */
- fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+ bios->legacy.i2c_indices.crt, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ fabricate_dcb_output(dcb, OUTPUT_TV,
+ bios->legacy.i2c_indices.tv,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
- fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ fabricate_dcb_output(dcb, OUTPUT_TMDS,
+ bios->legacy.i2c_indices.panel,
all_heads, 1);
}
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &bios->dcb;
- uint16_t dcbptr = 0, i2ctabptr = 0;
- uint8_t *dcbtable;
- uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
- bool configblock = true;
- int recordlength = 8, confofs = 4;
- int i;
-
- /* get the offset from 0x36 */
- if (dev_priv->card_type > NV_04) {
- dcbptr = ROM16(bios->data[0x36]);
- if (dcbptr == 0x0000)
- NV_WARN(dev, "No output data (DCB) found in BIOS\n");
- }
-
- /* this situation likely means a really old card, pre DCB */
- if (dcbptr == 0x0) {
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
- }
-
- dcbtable = &bios->data[dcbptr];
-
- /* get DCB version */
- dcb->version = dcbtable[0];
- NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- dcb->version >> 4, dcb->version & 0xf);
-
- if (dcb->version >= 0x20) { /* NV17+ */
- uint32_t sig;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+ u32 conn = ROM32(outp[0]);
+ bool ret;
- if (dcb->version >= 0x30) { /* NV40+ */
- headerlen = dcbtable[1];
- entries = dcbtable[2];
- recordlength = dcbtable[3];
- i2ctabptr = ROM16(dcbtable[4]);
- sig = ROM32(dcbtable[6]);
- dcb->gpio_table_ptr = ROM16(dcbtable[10]);
- dcb->connector_table_ptr = ROM16(dcbtable[20]);
- } else {
- i2ctabptr = ROM16(dcbtable[2]);
- sig = ROM32(dcbtable[4]);
- headerlen = 8;
- }
+ if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+ struct dcb_entry *entry = new_dcb_entry(dcb);
- if (sig != 0x4edcbdcb) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%08X)\n", sig);
- return -EINVAL;
- }
- } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
- char sig[8] = { 0 };
+ NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
- strncpy(sig, (char *)&dcbtable[-7], 7);
- i2ctabptr = ROM16(dcbtable[2]);
- recordlength = 10;
- confofs = 6;
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+ if (!ret)
+ return 1; /* stop parsing */
- if (strcmp(sig, "DEV_REC")) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%s)\n", sig);
- return -EINVAL;
- }
- } else {
- /*
- * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
- * has the same single (crt) entry, even when tv-out present, so
- * the conclusion is this version cannot really be used.
- * v1.2 tables (some NV6/10, and NV15+) normally have the same
- * 5 entries, which are not specific to the card and so no use.
- * v1.2 does have an I2C table that read_dcb_i2c_table can
- * handle, but cards exist (nv11 in #14821) with a bad i2c table
- * pointer, so use the indices parsed in parse_bmp_structure.
- * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ /* Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
*/
- NV_TRACEWARN(dev, "No useful information in BIOS output table; "
- "adding all possible outputs\n");
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
+ if (entry->type == OUTPUT_TV &&
+ entry->location == DCB_LOC_ON_CHIP)
+ entry->i2c_index = 0x0f;
}
- if (!i2ctabptr)
- NV_WARN(dev, "No pointer to DCB I2C port table\n");
- else {
- dcb->i2c_table = &bios->data[i2ctabptr];
- if (dcb->version >= 0x30)
- dcb->i2c_default_indices = dcb->i2c_table[4];
+ return 0;
+}
- /*
- * Parse the "management" I2C bus, used for hardware
- * monitoring and some external TMDS transmitters.
- */
- if (dcb->version >= 0x22) {
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf :
- 2);
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+ struct dcb_table *dcbt = &bios->dcb;
+ u8 map[16] = { };
+ int i, idx = 0;
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- idx, &dcb->i2c[idx]);
- }
+ /* heuristic: if we ever get a non-zero connector field, assume
+ * that all the indices are valid and we don't need fake them.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector)
+ return;
}
- if (entries > DCB_MAX_NUM_ENTRIES)
- entries = DCB_MAX_NUM_ENTRIES;
-
- for (i = 0; i < entries; i++) {
- uint32_t connection, config = 0;
-
- connection = ROM32(dcbtable[headerlen + recordlength * i]);
- if (configblock)
- config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
-
- /* seen on an NV11 with DCB v1.5 */
- if (connection == 0x00000000)
- break;
+ /* no useful connector info available, we need to make it up
+ * ourselves. the rule here is: anything on the same i2c bus
+ * is considered to be on the same connector. any output
+ * without an associated i2c bus is assigned its own unique
+ * connector index.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ u8 i2c = dcbt->entry[i].i2c_index;
+ if (i2c == 0x0f) {
+ dcbt->entry[i].connector = idx++;
+ } else {
+ if (!map[i2c])
+ map[i2c] = ++idx;
+ dcbt->entry[i].connector = map[i2c] - 1;
+ }
+ }
- /* seen on an NV17 with DCB v2.0 */
- if (connection == 0xffffffff)
- break;
+ /* if we created more than one connector, destroy the connector
+ * table - just in case it has random, rather than stub, entries.
+ */
+ if (i > 1) {
+ u8 *conntab = dcb_conntab(bios->dev);
+ if (conntab)
+ conntab[0] = 0x00;
+ }
+}
- if ((connection & 0x0000000f) == 0x0000000f)
- continue;
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ u8 *dcbt, *conn;
+ int idx;
+
+ dcbt = dcb_table(dev);
+ if (!dcbt) {
+ /* handle pre-DCB boards */
+ if (bios->type == NVBIOS_BMP) {
+ fabricate_dcb_encoder_table(dev, bios);
+ return 0;
+ }
- if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
- continue;
+ return -EINVAL;
+ }
- NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
- dcb->entries, connection, config);
+ NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
- if (!parse_dcb_entry(dev, dcb, connection, config))
- break;
- }
+ dcb->version = dcbt[0];
+ dcb_outp_foreach(dev, NULL, parse_dcb_entry);
/*
* apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (!dcb->entries)
return -ENXIO;
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
- return 0;
-}
-
-static void
-fixup_legacy_connector(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
-
- /*
- * DCB 3.0 also has the table in most cases, but there are some cards
- * where the table is filled with stub entries, and the DCB entriy
- * indices are all 0. We don't need the connector indices on pre-G80
- * chips (yet?) so limit the use to DCB 4.0 and above.
- */
- if (dcb->version >= 0x40)
- return;
-
- dcb->connector.entries = 0;
-
- /*
- * No known connector info before v3.0, so make it up. the rule here
- * is: anything on the same i2c bus is considered to be on the same
- * connector. any output without an associated i2c bus is assigned
- * its own unique connector index.
- */
- for (i = 0; i < dcb->entries; i++) {
- /*
- * Ignore the I2C index for on-chip TV-out, as there
- * are cards with bogus values (nv31m in bug 23212),
- * and it's otherwise useless.
- */
- if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP)
- dcb->entry[i].i2c_index = 0xf;
- i2c = dcb->entry[i].i2c_index;
-
- if (i2c_conn[i2c]) {
- dcb->entry[i].connector = i2c_conn[i2c] - 1;
- continue;
+ /* dump connector table entries to log, if any exist */
+ idx = -1;
+ while ((conn = dcb_conn(dev, ++idx))) {
+ if (conn[0] != 0xff) {
+ NV_TRACE(dev, "DCB conn %02d: ", idx);
+ if (dcb_conntab(dev)[3] < 4)
+ printk("%04x\n", ROM16(conn[0]));
+ else
+ printk("%08x\n", ROM32(conn[0]));
}
-
- dcb->entry[i].connector = dcb->connector.entries++;
- if (i2c != 0xf)
- i2c_conn[i2c] = dcb->connector.entries;
- }
-
- /* Fake the connector table as well as just connector indices */
- for (i = 0; i < dcb->connector.entries; i++) {
- dcb->connector.entry[i].index = i;
- dcb->connector.entry[i].type = divine_connector_type(bios, i);
- dcb->connector.entry[i].gpio_tag = 0xff;
- }
-}
-
-static void
-fixup_legacy_i2c(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
}
+ dcb_fake_connectors(bios);
+ return 0;
}
static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
return ret;
}
-static void
-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_i2c_entry *entry;
- int i;
-
- entry = &bios->dcb.i2c[0];
- for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
- nouveau_i2c_fini(dev, entry);
-}
-
static bool
nouveau_bios_posted(struct drm_device *dev)
{
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios);
+ ret = nouveau_i2c_init(dev);
if (ret)
return ret;
- fixup_legacy_i2c(bios);
- fixup_legacy_connector(bios);
+ ret = nouveau_mxm_init(dev);
+ if (ret)
+ return ret;
+
+ ret = parse_dcb_table(dev, bios);
+ if (ret)
+ return ret;
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
void
nouveau_bios_takedown(struct drm_device *dev)
{
- nouveau_bios_i2c_devices_takedown(dev);
+ nouveau_mxm_fini(dev);
+ nouveau_i2c_fini(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8adb69e4a6b..1e382ad5a2b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,14 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({ \
+ struct drm_nouveau_private *dev_priv = (d)->dev_private; \
+ ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+})
struct bit_entry {
uint8_t id;
@@ -48,30 +53,12 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
-struct dcb_i2c_entry {
- uint32_t entry;
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
-
enum dcb_gpio_tag {
DCB_GPIO_TVDAC0 = 0xc,
DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
- uint32_t entry;
- uint8_t state_default;
- uint8_t state[2];
-};
-
-struct dcb_gpio_table {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+ DCB_GPIO_PWM_FAN = 0x9,
+ DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_UNUSED = 0xff
};
enum dcb_connector_type {
@@ -90,20 +77,6 @@ enum dcb_connector_type {
DCB_CONNECTOR_NONE = 0xff
};
-struct dcb_connector_table_entry {
- uint8_t index;
- uint32_t entry;
- enum dcb_connector_type type;
- uint8_t index2;
- uint8_t gpio_tag;
- void *drm;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
enum dcb_type {
OUTPUT_ANALOG = 0,
OUTPUT_TV = 1,
@@ -111,6 +84,7 @@ enum dcb_type {
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
+ OUTPUT_UNUSED = 15,
OUTPUT_ANY = -1
};
@@ -155,18 +129,8 @@ struct dcb_entry {
struct dcb_table {
uint8_t version;
-
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
-
- uint8_t *i2c_table;
- uint8_t i2c_default_indices;
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-
- uint16_t gpio_table_ptr;
- struct dcb_gpio_table gpio;
- uint16_t connector_table_ptr;
- struct dcb_connector_table connector;
};
enum nouveau_or {
@@ -195,7 +159,7 @@ enum pll_types {
PLL_SHADER = 0x02,
PLL_UNK03 = 0x03,
PLL_MEMORY = 0x04,
- PLL_UNK05 = 0x05,
+ PLL_VDEC = 0x05,
PLL_UNK40 = 0x40,
PLL_UNK41 = 0x41,
PLL_UNK42 = 0x42,
@@ -333,4 +297,11 @@ struct nvbios {
} legacy;
};
+void *dcb_table(struct drm_device *);
+void *dcb_outp(struct drm_device *, u8 idx);
+int dcb_outp_foreach(struct drm_device *, void *data,
+ int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *dcb_conntab(struct drm_device *);
+u8 *dcb_conn(struct drm_device *, u8 idx);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7cc37e69086..724b41a2b9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ size_t acc_size;
int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+ sizeof(struct nouveau_bo));
+
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val;
}
-static struct ttm_backend *
-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
- return ttm_agp_backend_init(bdev, dev->agp->bridge);
+ return ttm_agp_tt_create(bdev, dev->agp->bridge,
+ size, page_flags, dummy_read_page);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
- return nouveau_sgdma_init_ttm(dev);
+ return nouveau_sgdma_create_ttm(bdev, size, page_flags,
+ dummy_read_page);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
if (mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, node);
else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
- node, node->pages);
+ nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
return 0;
}
@@ -801,19 +809,18 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
nouveau_vm_map(vma, new_mem->mm_node);
} else
- if (new_mem->mem_type == TTM_PL_TT &&
+ if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
nouveau_vm_map_sg(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
- node, node->pages);
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1044,8 +1051,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence);
}
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate((void *)ttm, dev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ttm_dma->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate((void *)ttm, dev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm_dma->dma_address[i]) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
- .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .ttm_tt_create = &nouveau_ttm_tt_create,
+ .ttm_tt_populate = &nouveau_ttm_tt_populate,
+ .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1184,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+ nouveau_vm_map_sg(vma, 0, size, node);
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index bb6ec9ef867..a018defb762 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_dma_pre_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
+ if (dev_priv->card_type >= NV_50)
+ chan->user_get_hi = 0x60;
/* disable the fifo caches */
pfifo->reassign(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index cea6696b190..f3ce34be082 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -35,6 +35,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
-/*TODO: This could use improvement, and learn to handle the fixed
- * BIOS tables etc. It's fine currently, for its only user.
- */
-int
-nouveau_connector_bpp(struct drm_connector *connector)
-{
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
-
- if (nv_connector->edid && nv_connector->edid->revision >= 4) {
- u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
- if (bpc > 4)
- return bpc;
- }
-
- return 18;
-}
-
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv;
- struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
- pgpio = &dev_priv->engine.gpio;
- if (pgpio->irq_unregister) {
- pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug, connector);
}
kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
+ if (dev_priv->card_type >= NV_50) {
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ } else
if (nv_encoder->dcb->type == OUTPUT_LVDS ||
nv_encoder->dcb->type == OUTPUT_TMDS) {
connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
struct nouveau_encoder *nv_encoder;
int type;
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -420,15 +406,21 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
+ struct nouveau_crtc *nv_crtc;
int ret;
+ nv_crtc = NULL;
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
bool modeset = false;
switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
modeset = true;
nv_connector->scaling_mode = value;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc)
return 0;
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
if (!ret)
return -EINVAL;
} else {
- ret = nv_crtc->set_scale(nv_crtc, value, true);
+ ret = nv_crtc->set_scale(nv_crtc, true);
if (ret)
return ret;
}
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
return 0;
}
- /* Dithering */
- if (property == dev->mode_config.dithering_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
+ /* Underscan */
+ if (property == disp->underscan_property) {
+ if (nv_connector->underscan != value) {
+ nv_connector->underscan = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
- if (value == DRM_MODE_DITHERING_ON)
- nv_connector->use_dithering = true;
- else
- nv_connector->use_dithering = false;
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_hborder_property) {
+ if (nv_connector->underscan_hborder != value) {
+ nv_connector->underscan_hborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_vborder_property) {
+ if (nv_connector->underscan_vborder != value) {
+ nv_connector->underscan_vborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ /* Dithering */
+ if (property == disp->dithering_mode) {
+ nv_connector->dithering_mode = value;
+ if (!nv_crtc || !nv_crtc->set_dither)
+ return 0;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ return nv_crtc->set_dither(nv_crtc, true);
+ }
+ if (property == disp->dithering_depth) {
+ nv_connector->dithering_depth = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
- return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
- true);
+ return nv_crtc->set_dither(nv_crtc, true);
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
return modes;
}
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct drm_display_mode *mode = nv_connector->native_mode;
+ bool duallink;
+
+ /* if the edid is feeling nice enough to provide this info, use it */
+ if (nv_connector->edid && connector->display_info.bpc)
+ return;
+
+ /* if not, we're out of options unless we're LVDS, default to 6bpc */
+ connector->display_info.bpc = 6;
+ if (nv_encoder->dcb->type != OUTPUT_LVDS)
+ return;
+
+ /* LVDS: panel straps */
+ if (bios->fp_no_ddc) {
+ if (bios->fp.if_is_24bit)
+ connector->display_info.bpc = 8;
+ return;
+ }
+
+ /* LVDS: DDC panel, need to first determine the number of links to
+ * know which if_is_24bit flag to check...
+ */
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+ duallink = ((u8 *)nv_connector->edid)[121] == 2;
+ else
+ duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+ if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+ ( duallink && (bios->fp.strapless_is_24bit & 2)))
+ connector->display_info.bpc = 8;
+}
+
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = 1;
}
+ /* Determine LVDS colour depth, must happen after determining
+ * "native" mode as some VBIOS tables require us to use the
+ * pixel clock as part of the lookup...
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
if (nv_encoder->dcb->type == OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
- nv_connector->dcb->type == DCB_CONNECTOR_eDP)
+ if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+ nv_connector->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case OUTPUT_DP:
max_clock = nv_encoder->dp.link_nr;
max_clock *= nv_encoder->dp.link_bw;
- clock = clock * nouveau_connector_bpp(connector) / 10;
+ clock = clock * (connector->display_info.bpc * 3) / 10;
break;
default:
BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+ switch (dcb) {
+ case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
+ case DCB_CONNECTOR_TV_0 :
+ case DCB_CONNECTOR_TV_1 :
+ case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
+ case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
+ case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
+ case DCB_CONNECTOR_LVDS :
+ case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+ case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
+ case DCB_CONNECTOR_HDMI_0 :
+ case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
+ default:
+ break;
+ }
+
+ return DRM_MODE_CONNECTOR_Unknown;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = NULL;
- struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
int type, ret = 0;
+ bool dummy;
NV_DEBUG_KMS(dev, "\n");
- if (index >= dev_priv->vbios.dcb.connector.entries)
- return ERR_PTR(-EINVAL);
-
- dcb = &dev_priv->vbios.dcb.connector.entry[index];
- if (dcb->drm)
- return dcb->drm;
-
- switch (dcb->type) {
- case DCB_CONNECTOR_VGA:
- type = DRM_MODE_CONNECTOR_VGA;
- break;
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- case DCB_CONNECTOR_DVI_I:
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case DCB_CONNECTOR_DVI_D:
- type = DRM_MODE_CONNECTOR_DVID;
- break;
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- type = DRM_MODE_CONNECTOR_HDMIA;
- break;
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- type = DRM_MODE_CONNECTOR_LVDS;
- funcs = &nouveau_connector_funcs_lvds;
- break;
- case DCB_CONNECTOR_DP:
- type = DRM_MODE_CONNECTOR_DisplayPort;
- break;
- case DCB_CONNECTOR_eDP:
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default:
- NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
- return ERR_PTR(-EINVAL);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ nv_connector = nouveau_connector(connector);
+ if (nv_connector->index == index)
+ return connector;
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
- nv_connector->dcb = dcb;
+
connector = &nv_connector->base;
+ nv_connector->index = index;
+
+ /* attempt to parse vbios connector type and hotplug gpio */
+ nv_connector->dcb = dcb_conn(dev, index);
+ if (nv_connector->dcb) {
+ static const u8 hpd[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+ };
+
+ u32 entry = ROM16(nv_connector->dcb[0]);
+ if (dcb_conntab(dev)[3] >= 4)
+ entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+ nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
+ nv_connector->hpd = hpd[nv_connector->hpd];
+
+ nv_connector->type = nv_connector->dcb[0];
+ if (drm_conntype_from_dcb(nv_connector->type) ==
+ DRM_MODE_CONNECTOR_Unknown) {
+ NV_WARN(dev, "unknown connector type %02x\n",
+ nv_connector->type);
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ }
- /* defaults, will get overridden in detect() */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
+ /* Gigabyte NX85T */
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
- drm_connector_init(dev, connector, funcs, type);
- drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* Gigabyte GV-NX86T512H */
+ if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
+ } else {
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ nv_connector->hpd = DCB_GPIO_UNUSED;
+ }
+
+ /* no vbios data, or an unknown dcb connector type - attempt to
+ * figure out something suitable ourselves
+ */
+ if (nv_connector->type == DCB_CONNECTOR_NONE) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+ u32 encoders = 0;
+ int i;
+
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector == nv_connector->index)
+ encoders |= (1 << dcbt->entry[i].type);
+ }
- /* Check if we need dithering enabled */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- bool dummy, is_24bit = false;
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ nv_connector->type = DCB_CONNECTOR_DP;
+ else
+ nv_connector->type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ else
+ nv_connector->type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ nv_connector->type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ nv_connector->type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ nv_connector->type = DCB_CONNECTOR_TV_0;
+ }
+ }
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+ type = drm_conntype_from_dcb(nv_connector->type);
+ if (type == DRM_MODE_CONNECTOR_LVDS) {
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling "
- "LVDS\n");
- goto fail;
+ NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
- nv_connector->use_dithering = !is_24bit;
+ funcs = &nouveau_connector_funcs_lvds;
+ } else {
+ funcs = &nouveau_connector_funcs;
}
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_init(dev, connector, funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
/* Init DVI-I specific properties */
- if (dcb->type == DCB_CONNECTOR_DVI_I) {
- drm_mode_create_dvi_i_properties(dev);
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I)
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs */
+ if (disp->underscan_property &&
+ (nv_connector->type == DCB_CONNECTOR_DVI_D ||
+ nv_connector->type == DCB_CONNECTOR_DVI_I ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
+ nv_connector->type == DCB_CONNECTOR_DP)) {
+ drm_connector_attach_property(connector,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_connector_attach_property(connector,
+ disp->underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(connector,
+ disp->underscan_vborder_property,
+ 0);
}
- switch (dcb->type) {
+ switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
- drm_connector_attach_property(connector,
- dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ?
- DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_mode,
+ nv_connector->dithering_mode);
+ }
+ if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_depth,
+ nv_connector->dithering_depth);
+ }
break;
}
- if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
- pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug,
+ connector);
+ if (ret == 0)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
}
drm_sysfs_connector_add(connector);
-
- dcb->drm = connector;
- return dcb->drm;
-
-fail:
- drm_connector_cleanup(connector);
- kfree(connector);
- return ERR_PTR(ret);
-
+ return connector;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 711b1e9203a..e4857021304 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,13 +30,43 @@
#include "drm_edid.h"
#include "nouveau_i2c.h"
+enum nouveau_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+};
+
struct nouveau_connector {
struct drm_connector base;
+ enum dcb_connector_type type;
+ u8 index;
+ u8 *dcb;
+ u8 hpd;
- struct dcb_connector_table_entry *dcb;
-
+ int dithering_mode;
+ int dithering_depth;
int scaling_mode;
- bool use_dithering;
+ enum nouveau_underscan_type underscan;
+ u32 underscan_hborder;
+ u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index bf8e1289953..686f6b4a1da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -32,8 +32,6 @@ struct nouveau_crtc {
int index;
- struct drm_display_mode *mode;
-
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+ int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, bool update);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e1592368cc..fa2ec491f6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, "channel id : %d\n", chan->id);
seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+ { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb427..3cb52bc52b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,8 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
int
nouveau_framebuffer_init(struct drm_device *dev,
struct nouveau_framebuffer *nv_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
if (!tile_flags) {
if (dev_priv->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
else
- nv_fb->r_pitch = 0x01000000 | fb->pitch;
+ nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
} else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
- nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
}
}
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
@@ -147,11 +149,186 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
return &nouveau_fb->base;
}
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+
+struct drm_prop_enum_list {
+ u8 gen_mask;
+ int type;
+ char *name;
+};
+
+static struct drm_prop_enum_list underscan[] = {
+ { 6, UNDERSCAN_AUTO, "auto" },
+ { 6, UNDERSCAN_OFF, "off" },
+ { 6, UNDERSCAN_ON, "on" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_mode[] = {
+ { 7, DITHERING_MODE_AUTO, "auto" },
+ { 7, DITHERING_MODE_OFF, "off" },
+ { 1, DITHERING_MODE_ON, "on" },
+ { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+ { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+ { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_depth[] = {
+ { 6, DITHERING_DEPTH_AUTO, "auto" },
+ { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+ { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+ {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do { \
+ struct drm_prop_enum_list *l = (list); \
+ int c = 0; \
+ while (l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) \
+ c++; \
+ l++; \
+ } \
+ if (c) { \
+ p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
+ l = (list); \
+ c = 0; \
+ while (p && l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) { \
+ drm_property_add_enum(p, c, l->type, l->name); \
+ c++; \
+ } \
+ l++; \
+ } \
+ } \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+ int ret;
+
+ ret = disp->init(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+
+ /* disable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+ }
+
+ drm_kms_helper_poll_disable(dev);
+ disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ int ret, gen;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
+
+ if (dev_priv->card_type < NV_50)
+ gen = 0;
+ else
+ if (dev_priv->card_type < NV_D0)
+ gen = 1;
+ else
+ gen = 2;
+
+ PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+ PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+ PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+ disp->underscan_hborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
+ disp->underscan_hborder_property->values[0] = 0;
+ disp->underscan_hborder_property->values[1] = 128;
+
+ disp->underscan_vborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
+ disp->underscan_vborder_property->values[0] = 0;
+ disp->underscan_vborder_property->values[1] = 128;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ if (dev_priv->card_type < NV_10) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+
+ drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_disable(dev);
+
+ ret = disp->create(dev);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+
+ drm_vblank_cleanup(dev);
+
+ disp->destroy(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+}
+
int
nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
@@ -294,7 +471,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
@@ -305,7 +482,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
- ret = nv50_display_flip_next(crtc, fb, chan);
+ if (dev_priv->card_type >= NV_D0)
+ ret = nvd0_display_flip_next(crtc, fb, chan, 0);
+ else
+ ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
nouveau_channel_put(&chan);
goto fail_unreserve;
@@ -369,3 +549,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 00bc6eaad55..4c2e4e5925f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
* -EBUSY if timeout exceeded
*/
static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
- uint32_t val;
+ uint64_t val;
val = nvchan_rd32(chan, chan->user_get);
+ if (chan->user_get_hi)
+ val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
- uint32_t cnt = 0, prev_get = 0;
- int ret;
+ uint64_t prev_get = 0;
+ int ret, cnt = 0;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
- uint32_t prev_get = 0, cnt = 0;
- int get;
+ uint64_t prev_get = 0;
+ int cnt = 0, get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de5efe71fef..9b93b703cea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -29,6 +29,7 @@
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
/******************************************************************************
* aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
u8 *
nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
struct bit_entry d;
u8 *table;
int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
return NULL;
}
- table = ROMPTR(bios, d.data[0]);
+ table = ROMPTR(dev, d.data[0]);
if (!table) {
NV_ERROR(dev, "displayport table pointer invalid\n");
return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
}
for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
return table;
}
@@ -336,7 +335,6 @@ struct dp_state {
static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int or = dp->or, link = dp->link;
u8 *entry, sink[2];
u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
* table, that has (among other things) pointers to more scripts that
* need to be executed, this time depending on link speed.
*/
- entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+ entry = ROMPTR(dev, dp->entry[10]);
if (entry) {
if (dp->table[0] < 0x30) {
while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
- dp.auxch = auxch->rd;
+ dp.auxch = auxch->drive;
dp.or = nv_encoder->or;
dp.link = !(nv_encoder->dcb->sorconf.link & 1);
dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
* we take during link training (DP_SET_POWER is one), we need
* to ignore them for the moment to avoid races.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
/* enable down-spreading, if possible */
if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
return true;
}
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
if (!auxch)
return false;
- ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+ ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
if (ret)
return false;
@@ -684,7 +680,7 @@ int
nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr)
{
- return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
+ return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb129526..e4a7cfe7898 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
int nouveau_ctxfw;
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
+int nouveau_mxmdcb = 1;
+module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(dev, "Disabling fbcon acceleration...\n");
- nouveau_fbcon_save_disable_accel(dev);
+ NV_INFO(dev, "Disabling display...\n");
+ nouveau_display_fini(dev);
+
+ NV_INFO(dev, "Disabling fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ret = dev_priv->eng[e]->fini(dev, e, true);
if (ret) {
- NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+ NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
goto out_abort;
}
}
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pci_set_power_state(pdev, PCI_D3hot);
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 1);
- console_unlock();
- nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- nouveau_fbcon_save_disable_accel(dev);
-
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
- nouveau_pm_resume(dev);
-
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ nouveau_pm_resume(dev);
+
NV_INFO(dev, "Restoring mode...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- engine->display.init(dev);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+ nouveau_fbcon_set_suspend(dev, 0);
+ nouveau_fbcon_zfill_all(dev);
- nv_crtc->cursor.set_offset(nv_crtc, offset);
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
- }
+ nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
nv_crtc->lut.depth = 0;
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 0);
- console_unlock();
+ drm_helper_resume_force_mode(dev);
- nouveau_fbcon_zfill_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
- drm_helper_resume_force_mode(dev);
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
- nouveau_fbcon_restore_accel(dev);
return 0;
}
+static const struct file_operations nouveau_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,26 +425,16 @@ static struct drm_driver driver = {
.disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = nouveau_ttm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = nouveau_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &nouveau_driver_fops,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098..38134a9c757 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -163,6 +163,9 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_COPY0 3
#define NVOBJ_ENGINE_COPY1 4
#define NVOBJ_ENGINE_MPEG 5
+#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_VP 7
#define NVOBJ_ENGINE_DISPLAY 15
#define NVOBJ_ENGINE_NR 16
@@ -229,6 +232,7 @@ struct nouveau_channel {
/* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
+ uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
struct nouveau_vma pushbuf_vma;
- uint32_t pushbuf_base;
+ uint64_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
- int (*init)(struct drm_device *);
void (*destroy)(struct drm_device *);
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+
+ struct drm_property *dithering_mode;
+ struct drm_property *dithering_depth;
+ struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
};
struct nouveau_gpio_engine {
- void *priv;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- int (*get)(struct drm_device *, enum dcb_gpio_tag);
- int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
-
- int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ spinlock_t lock;
+ struct list_head isr;
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+ int (*drive)(struct drm_device *, int line, int dir, int out);
+ int (*sense)(struct drm_device *, int line);
+ void (*irq_enable)(struct drm_device *, int line, bool);
};
struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
u32 copy;
u32 daemon;
u32 vdec;
- u32 unk05; /* nv50:nva3, roughly.. */
+ u32 dom6;
u32 unka0; /* nva3:nvc0 */
u32 hub01; /* nvc0- */
u32 hub06; /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
int nr_timing;
};
+struct nouveau_pm_fan {
+ u32 min_duty;
+ u32 max_duty;
+ u32 pwm_freq;
+};
+
struct nouveau_pm_engine {
struct nouveau_pm_voltage voltage;
struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
struct nouveau_pm_memtimings memtimings;
struct nouveau_pm_temp_sensor_constants sensor_constants;
struct nouveau_pm_threshold_temp threshold_temp;
+ struct nouveau_pm_fan fan;
+ u32 pwm_divisor;
struct nouveau_pm_level boot;
struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
struct device *hwmon;
struct notifier_block acpi_nb;
- int (*clock_get)(struct drm_device *, u32 id);
- void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
- void (*clock_set)(struct drm_device *, void *);
-
int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
- void (*clocks_set)(struct drm_device *, void *);
+ int (*clocks_set)(struct drm_device *, void *);
int (*voltage_get)(struct drm_device *);
int (*voltage_set)(struct drm_device *, int voltage);
- int (*fanspeed_get)(struct drm_device *);
- int (*fanspeed_set)(struct drm_device *, int fanspeed);
+ int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
+ int (*pwm_set)(struct drm_device *, int line, u32, u32);
int (*temp_get)(struct drm_device *);
};
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
struct nouveau_vm *chan_vm;
struct nvbios vbios;
+ u8 *mxms;
+ struct list_head i2c_ports;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
extern int nouveau_ctxfw;
+extern int nouveau_mxmdcb;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset);
-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
/* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1072,8 +1086,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
struct dcb_entry *, int crtc);
extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
- enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1103,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
enum LVDS_script, int pxclk);
bool bios_encoder_match(struct dcb_entry *, u32 hash);
+/* nouveau_mxm.c */
+int nouveau_mxm_init(struct drm_device *dev);
+void nouveau_mxm_fini(struct drm_device *dev);
+
/* nouveau_ttm.c */
int nouveau_ttm_global_init(struct drm_nouveau_private *);
void nouveau_ttm_global_release(struct drm_nouveau_private *);
int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+/* nouveau_hdmi.c */
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
/* nouveau_dp.c */
int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr);
@@ -1222,6 +1241,9 @@ extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
+/* nv98_crypt.c */
+extern int nv98_crypt_create(struct drm_device *dev);
+
/* nva3_copy.c */
extern int nva3_copy_create(struct drm_device *dev);
@@ -1234,6 +1256,17 @@ extern int nv31_mpeg_create(struct drm_device *dev);
/* nv50_mpeg.c */
extern int nv50_mpeg_create(struct drm_device *dev);
+/* nv84_bsp.c */
+/* nv98_bsp.c */
+extern int nv84_bsp_create(struct drm_device *dev);
+
+/* nv84_vp.c */
+/* nv98_vp.c */
+extern int nv84_vp_create(struct drm_device *dev);
+
+/* nv98_ppp.c */
+extern int nv98_ppp_create(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1344,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_display_early_init(struct drm_device *);
extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
+extern void nv04_display_fini(struct drm_device *);
/* nvd0_display.c */
extern int nvd0_display_create(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
extern void nvd0_display_destroy(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_fini(struct drm_device *);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
+void nvd0_display_flip_stop(struct drm_crtc *);
+int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *, u32 swap_interval);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,31 +1451,40 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
/* nouveau_display.c */
+int nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
int nouveau_vblank_enable(struct drm_device *dev, int crtc);
void nouveau_vblank_disable(struct drm_device *dev, int crtc);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv10_gpio_init(struct drm_device *dev);
+void nv10_gpio_fini(struct drm_device *dev);
+int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv10_gpio_sense(struct drm_device *dev, int line);
+void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
-
-/* nv50_calc. */
+int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv50_gpio_sense(struct drm_device *dev, int line);
+void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nvd0_gpio_sense(struct drm_device *dev, int line);
+
+/* nv50_calc.c */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
int *N1, int *M1, int *N2, int *M2, int *P);
int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1559,6 +1607,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+#define NV_WARNONCE(d, fmt, arg...) do { \
+ static int _warned = 0; \
+ if (!_warned) { \
+ NV_WARN(d, fmt, ##arg); \
+ _warned = 1; \
+ } \
+} while(0)
/* nouveau_reg_debug bitmask */
enum {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 95c843e684b..f3fb649fe45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-
int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
+ struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3a4cc32b9e4..9892218d745 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
+#include <linux/console.h>
#include "drmP.h"
#include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
- size = mode_cmd.pitch * mode_cmd.height;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ console_lock();
+ if (state == 0)
+ nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_restore_accel(dev);
+ console_unlock();
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644
index 00000000000..a580cc62337
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_gpio.h"
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (dcb) {
+ if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+ return ROMPTR(dev, dcb[0x0a]);
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+ return ROMPTR(dev, dcb[-15]);
+ }
+ return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+ u8 *table = dcb_gpio_table(dev);
+ if (table) {
+ *version = table[0];
+ if (*version < 0x30 && ent < table[2])
+ return table + 3 + (ent * table[1]);
+ else if (ent < table[2])
+ return table + table[1] + (ent * table[3]);
+ }
+ return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+ struct gpio_func *gpio)
+{
+ u8 *table, *entry, version;
+ int i = -1;
+
+ if (line == 0xff && func == 0xff)
+ return -EINVAL;
+
+ while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+ if (version < 0x40) {
+ u16 data = ROM16(entry[0]);
+ *gpio = (struct gpio_func) {
+ .line = (data & 0x001f) >> 0,
+ .func = (data & 0x07e0) >> 5,
+ .log[0] = (data & 0x1800) >> 11,
+ .log[1] = (data & 0x6000) >> 13,
+ };
+ } else
+ if (version < 0x41) {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x1f,
+ .func = entry[1],
+ .log[0] = (entry[3] & 0x18) >> 3,
+ .log[1] = (entry[3] & 0x60) >> 5,
+ };
+ } else {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x3f,
+ .func = entry[1],
+ .log[0] = (entry[4] & 0x30) >> 4,
+ .log[1] = (entry[4] & 0xc0) >> 6,
+ };
+ }
+
+ if ((line == 0xff || line == gpio->line) &&
+ (func == 0xff || func == gpio->func))
+ return 0;
+ }
+
+ /* DCB 2.2, fixed TVDAC GPIO data */
+ if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = table[-4] >> 4,
+ .log[0] = !!(table[-5] & 2),
+ .log[1] = !(table[-5] & 2),
+ };
+ return 0;
+ }
+ }
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = 4,
+ .log[0] = 0,
+ .log[1] = 1,
+ };
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ int dir = !!(gpio.log[state] & 0x02);
+ int out = !!(gpio.log[state] & 0x01);
+ ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ ret = nouveau_gpio_sense(dev, idx, gpio.line);
+ if (ret >= 0)
+ ret = (ret == (gpio.log[1] & 1));
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ if (idx == 0 && pgpio->irq_enable)
+ pgpio->irq_enable(dev, gpio.line, on);
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct gpio_isr {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ int idx;
+ struct gpio_func func;
+ void (*handler)(void *, int);
+ void *data;
+ bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+ struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+ struct drm_device *dev = isr->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ unsigned long flags;
+ int state;
+
+ state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+ if (state >= 0)
+ isr->handler(isr->data, state);
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ isr->inhibit = false;
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+
+ if (idx != 0)
+ return;
+
+ spin_lock(&pgpio->lock);
+ list_for_each_entry(isr, &pgpio->isr, head) {
+ if (line_mask & (1 << isr->func.line)) {
+ if (isr->inhibit)
+ continue;
+ isr->inhibit = true;
+ schedule_work(&isr->work);
+ }
+ }
+ spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+ unsigned long flags;
+ int ret;
+
+ isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+ if (!isr)
+ return -ENOMEM;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+ if (ret) {
+ kfree(isr);
+ return ret;
+ }
+
+ INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+ isr->dev = dev;
+ isr->handler = handler;
+ isr->data = data;
+ isr->idx = idx;
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_add(&isr->head, &pgpio->isr);
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+ return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr, *tmp;
+ struct gpio_func func;
+ unsigned long flags;
+ LIST_HEAD(tofree);
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+ if (ret == 0) {
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+ if (memcmp(&isr->func, &func, sizeof(func)) ||
+ isr->idx != idx ||
+ isr->handler != handler || isr->data != data)
+ continue;
+ list_move(&isr->head, &tofree);
+ }
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+
+ list_for_each_entry_safe(isr, tmp, &tofree, head) {
+ flush_work_sync(&isr->work);
+ kfree(isr);
+ }
+ }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ INIT_LIST_HEAD(&pgpio->isr);
+ spin_lock_init(&pgpio->lock);
+
+ return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ nouveau_gpio_fini(dev);
+ BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ int ret = 0;
+
+ if (pgpio->init)
+ ret = pgpio->init(dev);
+
+ return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ if (pgpio->fini)
+ pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 *entry, version;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+ u8 func = 0xff, line, defs, unk0, unk1;
+ if (version >= 0x41) {
+ defs = !!(entry[0] & 0x80);
+ line = entry[0] & 0x3f;
+ func = entry[1];
+ unk0 = entry[2];
+ unk1 = entry[3] & 0x1f;
+ } else
+ if (version >= 0x40) {
+ line = entry[0] & 0x1f;
+ func = entry[1];
+ defs = !!(entry[3] & 0x01);
+ unk0 = !!(entry[3] & 0x02);
+ unk1 = !!(entry[3] & 0x04);
+ } else {
+ break;
+ }
+
+ if (func == 0xff)
+ continue;
+
+ nouveau_gpio_func_set(dev, func, defs);
+
+ if (dev_priv->card_type >= NV_D0) {
+ nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ static const u32 regs[] = { 0xe100, 0xe28c };
+ u32 val = (unk1 << 16) | unk0;
+ u32 reg = regs[line >> 4]; line &= 0x0f;
+
+ nv_mask(dev, reg, 0x00010001 << line, val << line);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644
index 00000000000..64c5cb077ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+ u8 func;
+ u8 line;
+ u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int nouveau_gpio_drive(struct drm_device *, int idx, int line,
+ int dir, int out);
+int nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+ struct gpio_func *);
+int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+ struct gpio_func func;
+ return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+ return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+ return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644
index 00000000000..59ea1c14eca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+static bool
+hdmi_sor(struct drm_encoder *encoder)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ if (dev_priv->chipset < 0xa3)
+ return false;
+ return true;
+}
+
+static inline u32
+hdmi_base(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ if (!hdmi_sor(encoder))
+ return 0x616500 + (nv_crtc->index * 0x800);
+ return 0x61c500 + (nv_encoder->or * 0x800);
+}
+
+static void
+hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
+{
+ nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+}
+
+static u32
+hdmi_rd32(struct drm_encoder *encoder, u32 reg)
+{
+ return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+}
+
+static u32
+hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
+{
+ u32 tmp = hdmi_rd32(encoder, reg);
+ hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
+ return tmp;
+}
+
+static void
+nouveau_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
+ }
+}
+
+static void
+nouveau_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+ int i;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid)) {
+ nouveau_audio_disconnect(encoder);
+ return;
+ }
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
+ nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+ }
+ }
+}
+
+static void
+nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
+{
+ /* calculate checksum for the infoframe */
+ u8 sum = 0, i;
+ for (i = 0; i < frame[2]; i++)
+ sum += frame[i];
+ frame[3] = 256 - sum;
+
+ /* disable infoframe, and write header */
+ hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
+ hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
+
+ /* register scans tell me the audio infoframe has only one set of
+ * subpack regs, according to tegra (gee nvidia, it'd be nice if we
+ * could get those docs too!), the hdmi block pads out the rest of
+ * the packet on its own.
+ */
+ if (ctrl == 0x020)
+ frame[2] = 6;
+
+ /* write out checksum and data, weird weird 7 byte register pairs */
+ for (i = 0; i < frame[2] + 1; i += 7) {
+ u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
+ u32 *subpack = (u32 *)&frame[3 + i];
+ hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
+ hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
+ }
+
+ /* enable the infoframe */
+ hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
+}
+
+static void
+nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
+ const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
+ const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
+ u8 frame[20];
+
+ frame[0x00] = 0x82; /* AVI infoframe */
+ frame[0x01] = 0x02; /* version */
+ frame[0x02] = 0x0d; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
+ frame[0x05] = (C << 6) | (M << 4) | R;
+ frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
+ frame[0x07] = VIC;
+ frame[0x08] = PR;
+ frame[0x09] = bar_top & 0xff;
+ frame[0x0a] = bar_top >> 8;
+ frame[0x0b] = bar_bottom & 0xff;
+ frame[0x0c] = bar_bottom >> 8;
+ frame[0x0d] = bar_left & 0xff;
+ frame[0x0e] = bar_left >> 8;
+ frame[0x0f] = bar_right & 0xff;
+ frame[0x10] = bar_right >> 8;
+ frame[0x11] = 0x00;
+ frame[0x12] = 0x00;
+ frame[0x13] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x020, frame);
+}
+
+static void
+nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
+ const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
+ u8 frame[12];
+
+ frame[0x00] = 0x84; /* Audio infoframe */
+ frame[0x01] = 0x01; /* version */
+ frame[0x02] = 0x0a; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (CT << 4) | CC;
+ frame[0x05] = (SF << 2) | ceaSS;
+ frame[0x06] = FMT;
+ frame[0x07] = CA;
+ frame[0x08] = (DM_INH << 7) | (LSV << 3);
+ frame[0x09] = 0x00;
+ frame[0x0a] = 0x00;
+ frame[0x0b] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x000, frame);
+}
+
+static void
+nouveau_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ nouveau_audio_disconnect(encoder);
+
+ /* disable audio and avi infoframes */
+ hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
+ hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
+
+ /* disable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
+}
+
+void
+nouveau_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 max_ac_packet, rekey;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!mode || !nv_connector || !nv_connector->edid ||
+ !drm_detect_hdmi_monitor(nv_connector->edid)) {
+ nouveau_hdmi_disconnect(encoder);
+ return;
+ }
+
+ nouveau_hdmi_video_infoframe(encoder, mode);
+ nouveau_hdmi_audio_infoframe(encoder, mode);
+
+ hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* value matches nvidia binary driver, and tegra constant */
+ rekey = 56;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* enable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
+ 0x1f000000 | /* unknown */
+ max_ac_packet << 16 |
+ rekey);
+
+ nouveau_audio_mode_set(encoder, mode);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644
index 00000000000..697687593a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+ u8 data[0x200];
+ union {
+ u8 *u08;
+ u16 *u16;
+ u32 *u32;
+ } ptr;
+ u16 len;
+
+ u32 reg;
+ u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+ hwsq->ptr.u08 = hwsq->data;
+ hwsq->reg = 0xffffffff;
+ hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+ do {
+ *hwsq->ptr.u08++ = 0x7f;
+ hwsq->len = hwsq->ptr.u08 - hwsq->data;
+ } while (hwsq->len & 3);
+ hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+ u32 shift = 0;
+ while (usec & ~3) {
+ usec >>= 2;
+ shift++;
+ }
+
+ *hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+ flag += 0x80;
+ if (val >= 0)
+ flag += 0x20;
+ if (val >= 1)
+ flag += 0x20;
+ *hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+ *hwsq->ptr.u08++ = 0x5f;
+ *hwsq->ptr.u08++ = v0;
+ *hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+ if (val != hwsq->val) {
+ if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x42;
+ *hwsq->ptr.u16++ = (val & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe2;
+ *hwsq->ptr.u32++ = val;
+ }
+
+ hwsq->val = val;
+ }
+
+ if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x40;
+ *hwsq->ptr.u16++ = (reg & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe0;
+ *hwsq->ptr.u32++ = reg;
+ }
+ hwsq->reg = reg;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index d39b2202b19..820ae7f5204 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,262 +29,465 @@
#include "nouveau_i2c.h"
#include "nouveau_hw.h"
+#define T_TIMEOUT 2200000
+#define T_RISEFALL 1000
+#define T_HOLD 5000
+
static void
-nv04_i2c_setscl(void *data, int state)
+i2c_drive_scl(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static void
-nv04_i2c_setsda(void *data, int state)
+i2c_drive_sda(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static int
-nv04_i2c_getscl(void *data)
+i2c_sense_scl(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x01);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x10);
+ }
+ return 0;
}
static int
-nv04_i2c_getsda(void *data)
+i2c_sense_sda(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x02);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x20);
+ }
+ return 0;
}
static void
-nv4e_i2c_setscl(void *data, int state)
+i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ udelay((nsec + 500) / 1000);
}
-static void
-nv4e_i2c_setsda(void *data, int state)
+static bool
+i2c_raise_scl(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
+ u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+ i2c_drive_scl(port, 1);
+ do {
+ i2c_delay(port, T_RISEFALL);
+ } while (!i2c_sense_scl(port) && --timeout);
- val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ return timeout != 0;
}
static int
-nv4e_i2c_getscl(void *data)
+i2c_start(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int ret = 0;
+
+ port->state = i2c_sense_scl(port);
+ port->state |= i2c_sense_sda(port) << 1;
+ if (port->state != 3) {
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 1);
+ if (!i2c_raise_scl(port))
+ ret = -EBUSY;
+ }
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return ret;
}
-static int
-nv4e_i2c_getsda(void *data)
+static void
+i2c_stop(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_RISEFALL);
+
+ i2c_drive_scl(port, 1);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_HOLD);
}
-static const uint32_t nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-
static int
-nv50_i2c_getscl(void *data)
+i2c_bitw(struct nouveau_i2c_chan *port, int sda)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ i2c_drive_sda(port, sda);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 1);
-}
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return 0;
+}
static int
-nv50_i2c_getsda(void *data)
+i2c_bitr(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int sda;
+
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 2);
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+
+ sda = i2c_sense_sda(port);
+
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return sda;
}
-static void
-nv50_i2c_setscl(void *data, int state)
+static int
+i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, bit;
+
+ *byte = 0;
+ for (i = 7; i >= 0; i--) {
+ bit = i2c_bitr(port);
+ if (bit < 0)
+ return bit;
+ *byte |= bit << i;
+ }
- nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+ return i2c_bitw(port, last ? 1 : 0);
}
-static void
-nv50_i2c_setsda(void *data, int state)
+static int
+i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, ret;
+ for (i = 7; i >= 0; i--) {
+ ret = i2c_bitw(port, !!(byte & (1 << i)));
+ if (ret < 0)
+ return ret;
+ }
- nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
- i2c->data = state;
+ ret = i2c_bitr(port);
+ if (ret == 1) /* nack */
+ ret = -EIO;
+ return ret;
}
static int
-nvd0_i2c_getscl(void *data)
+i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+ u32 addr = msg->addr << 1;
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+ return i2c_put_byte(port, addr);
}
static int
-nvd0_i2c_getsda(void *data)
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+ struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
+ struct i2c_msg *msg = msgs;
+ int ret = 0, mcnt = num;
+
+ while (!ret && mcnt--) {
+ u8 remaining = msg->len;
+ u8 *ptr = msg->buf;
+
+ ret = i2c_start(port);
+ if (ret == 0)
+ ret = i2c_addr(port, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ while (!ret && remaining--)
+ ret = i2c_get_byte(port, ptr++, !remaining);
+ } else {
+ while (!ret && remaining--)
+ ret = i2c_put_byte(port, *ptr++);
+ }
+
+ msg++;
+ }
+
+ i2c_stop(port);
+ return (ret < 0) ? ret : num;
}
-int
-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c;
- int ret;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm i2c_bit_algo = {
+ .master_xfer = i2c_bit_xfer,
+ .functionality = i2c_bit_func
+};
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
- if (entry->chan)
- return -EEXIST;
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+ u8 *dcb = dcb_table(dev), *i2c = NULL;
+ if (dcb) {
+ if (dcb[0] >= 0x15)
+ i2c = ROMPTR(dev, dcb[2]);
+ if (dcb[0] >= 0x30)
+ i2c = ROMPTR(dev, dcb[4]);
+ }
- if (dev_priv->card_type >= NV_50 &&
- dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
- NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
- return -EINVAL;
+ /* early revisions had no version number, use dcb version */
+ if (i2c) {
+ *version = dcb[0];
+ if (*version >= 0x30)
+ *version = i2c[0];
}
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
- if (i2c == NULL)
- return -ENOMEM;
-
- switch (entry->port_type) {
- case 0:
- i2c->bit.setsda = nv04_i2c_setsda;
- i2c->bit.setscl = nv04_i2c_setscl;
- i2c->bit.getsda = nv04_i2c_getsda;
- i2c->bit.getscl = nv04_i2c_getscl;
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- case 4:
- i2c->bit.setsda = nv4e_i2c_setsda;
- i2c->bit.setscl = nv4e_i2c_setscl;
- i2c->bit.getsda = nv4e_i2c_getsda;
- i2c->bit.getscl = nv4e_i2c_getscl;
- i2c->rd = 0x600800 + entry->read;
- i2c->wr = 0x600800 + entry->write;
- break;
- case 5:
- i2c->bit.setsda = nv50_i2c_setsda;
- i2c->bit.setscl = nv50_i2c_setscl;
- if (dev_priv->card_type < NV_D0) {
- i2c->bit.getsda = nv50_i2c_getsda;
- i2c->bit.getscl = nv50_i2c_getscl;
- i2c->rd = nv50_i2c_port[entry->read];
- i2c->wr = i2c->rd;
- } else {
- i2c->bit.getsda = nvd0_i2c_getsda;
- i2c->bit.getscl = nvd0_i2c_getscl;
- i2c->rd = 0x00d014 + (entry->read * 0x20);
- i2c->wr = i2c->rd;
- }
- break;
- case 6:
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- default:
- NV_ERROR(dev, "DCB I2C port type %d unknown\n",
- entry->port_type);
- kfree(i2c);
- return -EINVAL;
+ return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_i2c_chan *port;
+ u8 *i2c, *entry, legacy[2][4] = {};
+ u8 version, entries, recordlen;
+ int ret, i;
+
+ INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+ i2c = i2c_table(dev, &version);
+ if (!i2c) {
+ u8 *bmp = &bios->data[bios->offset];
+ if (bios->type != NVBIOS_BMP)
+ return -ENODEV;
+
+ legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+ legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+ legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+ legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+ /* BMP (from v4.0) has i2c info in the structure, it's in a
+ * fixed location on earlier VBIOS
+ */
+ if (bmp[5] < 4)
+ i2c = &bios->data[0x48];
+ else
+ i2c = &bmp[0x36];
+
+ if (i2c[4]) legacy[0][0] = i2c[4];
+ if (i2c[5]) legacy[0][1] = i2c[5];
+ if (i2c[6]) legacy[1][0] = i2c[6];
+ if (i2c[7]) legacy[1][1] = i2c[7];
}
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "nouveau-%s-%d", pci_name(dev->pdev), index);
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.dev.parent = &dev->pdev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
- if (entry->port_type < 6) {
- i2c->adapter.algo_data = &i2c->bit;
- i2c->bit.udelay = 40;
- i2c->bit.timeout = usecs_to_jiffies(5000);
- i2c->bit.data = i2c;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ if (i2c && version >= 0x30) {
+ entry = i2c[1] + i2c;
+ entries = i2c[2];
+ recordlen = i2c[3];
+ } else
+ if (i2c) {
+ entry = i2c;
+ entries = 16;
+ recordlen = 4;
} else {
- i2c->adapter.algo = &nouveau_dp_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ entry = legacy[0];
+ entries = 2;
+ recordlen = 4;
}
- if (ret) {
- NV_ERROR(dev, "Failed to register i2c %d\n", index);
- kfree(i2c);
- return ret;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (port == NULL) {
+ nouveau_i2c_fini(dev);
+ return -ENOMEM;
+ }
+
+ port->type = entry[3];
+ if (version < 0x30) {
+ port->type &= 0x07;
+ if (port->type == 0x07)
+ port->type = 0xff;
+ }
+
+ if (port->type == 0xff) {
+ kfree(port);
+ continue;
+ }
+
+ switch (port->type) {
+ case 0: /* NV04:NV50 */
+ port->drive = entry[0];
+ port->sense = entry[1];
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 4: /* NV4E */
+ port->drive = 0x600800 + entry[1];
+ port->sense = port->drive;
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 5: /* NV50- */
+ port->drive = entry[0] & 0x0f;
+ if (dev_priv->card_type < NV_D0) {
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+ break;
+ port->drive = nv50_i2c_port[port->drive];
+ port->sense = port->drive;
+ } else {
+ port->drive = 0x00d014 + (port->drive * 0x20);
+ port->sense = port->drive;
+ }
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 6: /* NV50- DP AUX */
+ port->drive = entry[0];
+ port->sense = port->drive;
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ break;
+ default:
+ break;
+ }
+
+ if (!port->adapter.algo) {
+ NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+ i, port->type, port->drive, port->sense);
+ kfree(port);
+ continue;
+ }
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), i);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &dev->pdev->dev;
+ port->dev = dev;
+ port->index = i;
+ port->dcb = ROM32(entry[0]);
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ ret = i2c_add_adapter(&port->adapter);
+ if (ret) {
+ NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+ kfree(port);
+ continue;
+ }
+
+ list_add_tail(&port->head, &dev_priv->i2c_ports);
}
- entry->chan = i2c;
return 0;
}
void
-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+nouveau_i2c_fini(struct drm_device *dev)
{
- if (!entry->chan)
- return;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *port, *tmp;
- i2c_del_adapter(&entry->chan->adapter);
- kfree(entry->chan);
- entry->chan = NULL;
+ list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
}
struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, int index)
+nouveau_i2c_find(struct drm_device *dev, u8 index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
+ struct nouveau_i2c_chan *port;
+
+ if (index == NV_I2C_DEFAULT(0) ||
+ index == NV_I2C_DEFAULT(1)) {
+ u8 version, *i2c = i2c_table(dev, &version);
+ if (i2c && version >= 0x30) {
+ if (index == NV_I2C_DEFAULT(0))
+ index = (i2c[4] & 0x0f);
+ else
+ index = (i2c[4] & 0xf0) >> 4;
+ } else {
+ index = 2;
+ }
+ }
- if (index >= DCB_MAX_NUM_I2C_ENTRIES)
- return NULL;
+ list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+ if (port->index == index)
+ break;
+ }
- if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
- uint32_t reg = 0xe500, val;
+ if (&port->head == &dev_priv->i2c_ports)
+ return NULL;
- if (i2c->port_type == 6) {
- reg += i2c->read * 0x50;
+ if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+ u32 reg = 0x00e500, val;
+ if (port->type == 6) {
+ reg += port->drive * 0x50;
val = 0x2002;
} else {
- reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
val = 0xe001;
}
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
nv_mask(dev, reg + 0x00, 0x0000f003, val);
}
- if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
- return NULL;
- return i2c->chan;
+ return port;
}
bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
- NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+ if (!i2c) {
+ NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+ return -ENODEV;
+ }
- for (i = 0; i2c && info[i].addr; i++) {
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+ for (i = 0; info[i].addr; i++) {
if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
(!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
}
NV_DEBUG(dev, "No devices found.\n");
-
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 422b62fd827..4d2e4e9031b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -27,20 +27,25 @@
#include <linux/i2c-algo-bit.h>
#include "drm_dp_helper.h"
-struct dcb_i2c_entry;
+#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_PORT_NUM 0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- struct i2c_algo_bit_data bit;
- unsigned rd;
- unsigned wr;
- unsigned data;
+ struct list_head head;
+ u8 index;
+ u8 type;
+ u32 dcb;
+ u32 drive;
+ u32 sense;
+ u32 state;
};
-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+int nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct i2c_board_info *info,
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 36bec480770..c3a5745e9c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
+ ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ /* Reset to default value. */
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+ }
+
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
return;
if (P.version == 1)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
else
if (P.version == 2)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
else {
NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644
index 00000000000..8bccddf4eff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
+#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
+
+static u8 *
+mxms_data(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->mxms;
+
+}
+
+static u16
+mxms_version(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ u16 version = (mxms[4] << 8) | mxms[5];
+ switch (version ) {
+ case 0x0200:
+ case 0x0201:
+ case 0x0300:
+ return version;
+ default:
+ break;
+ }
+
+ MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
+ return 0x0000;
+}
+
+static u16
+mxms_headerlen(struct drm_device *dev)
+{
+ return 8;
+}
+
+static u16
+mxms_structlen(struct drm_device *dev)
+{
+ return *(u16 *)&mxms_data(dev)[6];
+}
+
+static bool
+mxms_checksum(struct drm_device *dev)
+{
+ u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
+ u8 *mxms = mxms_data(dev), sum = 0;
+ while (size--)
+ sum += *mxms++;
+ if (sum) {
+ MXM_DBG(dev, "checksum invalid\n");
+ return false;
+ }
+ return true;
+}
+
+static bool
+mxms_valid(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ if (*(u32 *)mxms != 0x5f4d584d) {
+ MXM_DBG(dev, "signature invalid\n");
+ return false;
+ }
+
+ if (!mxms_version(dev) || !mxms_checksum(dev))
+ return false;
+
+ return true;
+}
+
+static bool
+mxms_foreach(struct drm_device *dev, u8 types,
+ bool (*exec)(struct drm_device *, u8 *, void *), void *info)
+{
+ u8 *mxms = mxms_data(dev);
+ u8 *desc = mxms + mxms_headerlen(dev);
+ u8 *fini = desc + mxms_structlen(dev) - 1;
+ while (desc < fini) {
+ u8 type = desc[0] & 0x0f;
+ u8 headerlen = 0;
+ u8 recordlen = 0;
+ u8 entries = 0;
+
+ switch (type) {
+ case 0: /* Output Device Structure */
+ if (mxms_version(dev) >= 0x0300)
+ headerlen = 8;
+ else
+ headerlen = 6;
+ break;
+ case 1: /* System Cooling Capability Structure */
+ case 2: /* Thermal Structure */
+ case 3: /* Input Power Structure */
+ headerlen = 4;
+ break;
+ case 4: /* GPIO Device Structure */
+ headerlen = 4;
+ recordlen = 2;
+ entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
+ break;
+ case 5: /* Vendor Specific Structure */
+ headerlen = 8;
+ break;
+ case 6: /* Backlight Control Structure */
+ if (mxms_version(dev) >= 0x0300) {
+ headerlen = 4;
+ recordlen = 8;
+ entries = (desc[1] & 0xf0) >> 4;
+ } else {
+ headerlen = 8;
+ }
+ break;
+ case 7: /* Fan Control Structure */
+ headerlen = 8;
+ recordlen = 4;
+ entries = desc[1] & 0x07;
+ break;
+ default:
+ MXM_DBG(dev, "unknown descriptor type %d\n", type);
+ return false;
+ }
+
+ if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
+ static const char * mxms_desc_name[] = {
+ "ODS", "SCCS", "TS", "IPS",
+ "GSD", "VSS", "BCS", "FCS",
+ };
+ u8 *dump = desc;
+ int i, j;
+
+ MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
+ for (j = headerlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ dump += headerlen;
+
+ for (i = 0; i < entries; i++, dump += recordlen) {
+ MXM_DBG(dev, " ");
+ for (j = recordlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ }
+ }
+
+ if (types & (1 << type)) {
+ if (!exec(dev, desc, info))
+ return false;
+ }
+
+ desc += headerlen + (entries * recordlen);
+ }
+
+ return true;
+}
+
+static u8 *
+mxm_table(struct drm_device *dev, u8 *size)
+{
+ struct bit_entry x;
+
+ if (bit_table(dev, 'x', &x)) {
+ MXM_DBG(dev, "BIT 'x' table not present\n");
+ return NULL;
+ }
+
+ if (x.version != 1 || x.length < 3) {
+ MXM_MSG(dev, "BIT x table %d/%d unknown\n",
+ x.version, x.length);
+ return NULL;
+ }
+
+ *size = x.length;
+ return x.data;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv96_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+ 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8
+mxm_sor_map(struct drm_device *dev, u8 conn)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 6) {
+ u8 *map = ROMPTR(dev, mxm[4]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (conn < map[3])
+ return map[map[1] + conn];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
+ }
+ }
+
+ if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
+ return nv84_sor_map[conn];
+ if (dev_priv->chipset == 0x92)
+ return nv92_sor_map[conn];
+ if (dev_priv->chipset == 0x94)
+ return nv94_sor_map[conn];
+ if (dev_priv->chipset == 0x96)
+ return nv96_sor_map[conn];
+ if (dev_priv->chipset == 0x98)
+ return nv98_sor_map[conn];
+
+ MXM_MSG(dev, "missing sor map\n");
+ return 0x00;
+}
+
+static u8
+mxm_ddc_map(struct drm_device *dev, u8 port)
+{
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 8) {
+ u8 *map = ROMPTR(dev, mxm[6]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (port < map[3])
+ return map[map[1] + port];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
+ }
+ }
+
+ /* v2.x: directly write port as dcb i2cidx */
+ return (port << 4) | port;
+}
+
+struct mxms_odev {
+ u8 outp_type;
+ u8 conn_type;
+ u8 ddc_port;
+ u8 dig_conn;
+};
+
+static void
+mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
+{
+ u64 data = ROM32(pdata[0]);
+ if (mxms_version(dev) >= 0x0300)
+ data |= (u64)ROM16(pdata[4]) << 32;
+
+ desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+ desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
+ desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+ desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
+}
+
+struct context {
+ u32 *outp;
+ struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ struct mxms_odev desc;
+
+ mxms_output_device(dev, data, &desc);
+ if (desc.outp_type == 2 &&
+ desc.dig_conn == ctx->desc.dig_conn)
+ return false;
+ return true;
+}
+
+static bool
+mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ u64 desc = *(u64 *)data;
+
+ mxms_output_device(dev, data, &ctx->desc);
+
+ /* match dcb encoder type to mxm-ods device type */
+ if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+ return true;
+
+ /* digital output, have some extra stuff to match here, there's a
+ * table in the vbios that provides a mapping from the mxm digital
+ * connection enum values to SOR/link
+ */
+ if ((desc & 0x00000000000000f0) >= 0x20) {
+ /* check against sor index */
+ u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
+ if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+ return true;
+
+ /* check dcb entry has a compatible link field */
+ link = (link & 0x30) >> 4;
+ if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+ return true;
+ }
+
+ /* mark this descriptor accounted for by setting invalid device type,
+ * except of course some manufactures don't follow specs properly and
+ * we need to avoid killing off the TMDS function on DP connectors
+ * if MXM-SIS is missing an entry for it.
+ */
+ data[0] &= ~0xf0;
+ if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+ mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
+ data[0] |= 0x20; /* modify descriptor to match TMDS now */
+ } else {
+ data[0] |= 0xf0;
+ }
+
+ return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
+{
+ struct context ctx = { .outp = (u32 *)dcbe };
+ u8 type, i2cidx, link;
+ u8 *conn;
+
+ /* look for an output device structure that matches this dcb entry.
+ * if one isn't found, disable it.
+ */
+ if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
+ MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
+ idx, ctx.outp[0], ctx.outp[1]);
+ ctx.outp[0] |= 0x0000000f;
+ return 0;
+ }
+
+ /* modify the output's ddc/aux port, there's a pointer to a table
+ * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+ * vbios mxm table
+ */
+ i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
+ if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
+ i2cidx = (i2cidx & 0x0f) << 4;
+ else
+ i2cidx = (i2cidx & 0xf0);
+
+ if (i2cidx != 0xf0) {
+ ctx.outp[0] &= ~0x000000f0;
+ ctx.outp[0] |= i2cidx;
+ }
+
+ /* override dcb sorconf.link, based on what mxm data says */
+ switch (ctx.desc.outp_type) {
+ case 0x00: /* Analog CRT */
+ case 0x01: /* Analog TV/HDTV */
+ break;
+ default:
+ link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
+ ctx.outp[1] &= ~0x00000030;
+ ctx.outp[1] |= link;
+ break;
+ }
+
+ /* we may need to fixup various other vbios tables based on what
+ * the descriptor says the connector type should be.
+ *
+ * in a lot of cases, the vbios tables will claim DVI-I is possible,
+ * and the mxm data says the connector is really HDMI. another
+ * common example is DP->eDP.
+ */
+ conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
+ type = conn[0];
+ switch (ctx.desc.conn_type) {
+ case 0x01: /* LVDS */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+ /* XXX: modify default link width in LVDS table */
+ break;
+ case 0x02: /* HDMI */
+ type = DCB_CONNECTOR_HDMI_1;
+ break;
+ case 0x03: /* DVI-D */
+ type = DCB_CONNECTOR_DVI_D;
+ break;
+ case 0x0e: /* eDP, falls through to DPint */
+ ctx.outp[1] |= 0x00010000;
+ case 0x07: /* DP internal, wtf is this?? HP8670w */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+ type = DCB_CONNECTOR_eDP;
+ break;
+ default:
+ break;
+ }
+
+ if (mxms_version(dev) >= 0x0300)
+ conn[0] = type;
+
+ return 0;
+}
+
+static bool
+mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
+{
+ u64 desc = *(u64 *)data;
+ if ((desc & 0xf0) != 0xf0)
+ MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
+ return true;
+}
+
+static void
+mxm_dcb_sanitise(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (!dcb || dcb[0] != 0x40) {
+ MXM_DBG(dev, "unsupported DCB version\n");
+ return;
+ }
+
+ dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
+ mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
+}
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
+ u8 offset, u8 size, u8 *data)
+{
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+ { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c = NULL;
+ u8 i2cidx, mxms[6], addr, size;
+
+ i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
+ if (i2cidx < 0x0f)
+ i2c = nouveau_i2c_find(dev, i2cidx);
+ if (!i2c)
+ return false;
+
+ addr = 0x54;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
+ addr = 0x56;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
+ return false;
+ }
+
+ dev_priv->mxms = mxms;
+ size = mxms_headerlen(dev) + mxms_structlen(dev);
+ dev_priv->mxms = kmalloc(size, GFP_KERNEL);
+
+ if (dev_priv->mxms &&
+ mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
+ return true;
+
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ static char muid[] = {
+ 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+ 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+ };
+ u32 mxms_args[] = { 0x00000000 };
+ union acpi_object args[4] = {
+ /* _DSM MUID */
+ { .buffer.type = 3,
+ .buffer.length = sizeof(muid),
+ .buffer.pointer = muid,
+ },
+ /* spec says this can be zero to mean "highest revision", but
+ * of course there's at least one bios out there which fails
+ * unless you pass in exactly the version it supports..
+ */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+ },
+ /* MXMS function */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = 0x00000010,
+ },
+ /* Pointer to MXMS arguments */
+ { .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(mxms_args),
+ .buffer.pointer = (char *)mxms_args,
+ },
+ };
+ struct acpi_object_list list = { ARRAY_SIZE(args), args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_handle handle;
+ int ret;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle)
+ return false;
+
+ ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+ if (ret) {
+ MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ } else
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static bool
+mxm_shadow_wmi(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+ struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ if (!wmi_has_guid(WMI_WMMX_GUID))
+ return false;
+
+ status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+ if (ACPI_FAILURE(status)) {
+ MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+struct mxm_shadow_h {
+ const char *name;
+ bool (*exec)(struct drm_device *, u8 version);
+} _mxm_shadow[] = {
+ { "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+ { "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+ { "WMI", mxm_shadow_wmi },
+#endif
+ {}
+};
+
+static int
+mxm_shadow(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mxm_shadow_h *shadow = _mxm_shadow;
+ do {
+ MXM_DBG(dev, "checking %s\n", shadow->name);
+ if (shadow->exec(dev, version)) {
+ if (mxms_valid(dev))
+ return 0;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ }
+ } while ((++shadow)->name);
+ return -ENOENT;
+}
+
+int
+nouveau_mxm_init(struct drm_device *dev)
+{
+ u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
+ if (!mxm || !mxm[0]) {
+ MXM_MSG(dev, "no VBIOS data, nothing to do\n");
+ return 0;
+ }
+
+ MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
+
+ if (mxm_shadow(dev, mxm[0])) {
+ MXM_MSG(dev, "failed to locate valid SIS\n");
+ return -EINVAL;
+ }
+
+ MXM_MSG(dev, "MXMS Version %d.%d\n",
+ mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
+ mxms_foreach(dev, 0, NULL, NULL);
+
+ if (nouveau_mxmdcb)
+ mxm_dcb_sanitise(dev);
+ return 0;
+}
+
+void
+nouveau_mxm_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6abdbe6530a..2ef883c4bbc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
- uint32_t offset;
+ uint64_t offset;
int target, ret;
mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540ae..cc419fae794 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
/* map display semaphore buffers into channel's vm */
- if (dev_priv->card_type >= NV_D0)
- return 0;
-
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
-
- ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
- &chan->dispc_vma[i]);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
if (ret)
return ret;
}
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
struct nv50_display *disp = nv50_display(dev);
-
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 33d03fbf00d..58f497343ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bmp[0x73]);
+ perf = ROMPTR(dev, bmp[0x73]);
if (!perf) {
NV_DEBUG(dev, "No memclock table pointer found.\n");
return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
* ramcfg to select the correct subentry
*/
if (P->version == 2) {
- u8 *tmap = ROMPTR(bios, P->data[4]);
+ u8 *tmap = ROMPTR(dev, P->data[4]);
if (!tmap) {
NV_DEBUG(dev, "no timing map pointer\n");
return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
u8 *vmap;
int id;
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
return;
}
- vmap = ROMPTR(bios, P->data[32]);
+ vmap = ROMPTR(dev, P->data[32]);
if (!vmap) {
NV_DEBUG(dev, "volt map table pointer invalid\n");
return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, P.data[0]);
+ perf = ROMPTR(dev, P.data[0]);
version = perf[0];
headerlen = perf[1];
if (version < 0x40) {
recordlen = perf[3] + (perf[4] * perf[5]);
entries = perf[2];
+
+ pm->pwm_divisor = ROM16(perf[6]);
} else {
recordlen = perf[2] + (perf[3] * perf[4]);
entries = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
if (!perf) {
NV_DEBUG(dev, "perf table pointer invalid\n");
return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->memory = ROM16(entry[11]) * 1000;
else
perflvl->memory = ROM16(entry[11]) * 2000;
-
break;
case 0x25:
perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->core = ROM16(entry[8]) * 1000;
perflvl->shader = ROM16(entry[10]) * 1000;
perflvl->memory = ROM16(entry[12]) * 1000;
- /*XXX: confirm on 0x35 */
- perflvl->unk05 = ROM16(entry[16]) * 1000;
+ perflvl->vdec = ROM16(entry[16]) * 1000;
+ perflvl->dom6 = ROM16(entry[20]) * 1000;
break;
case 0x40:
#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a539fd25792..9064d7f1979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -35,22 +36,95 @@
#include <linux/hwmon-sysfs.h>
static int
-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u8 id, u32 khz)
+nouveau_pwmfan_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- void *pre_state;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
- if (khz == 0)
- return 0;
+ if (!pm->pwm_get)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
+ if (ret == 0) {
+ divs = max(divs, duty);
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return nouveau_gpio_func_get(dev, gpio.func) * 100;
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pwmfan_set(struct drm_device *dev, int percent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
+
+ if (!pm->pwm_set)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ divs = pm->pwm_divisor;
+ if (pm->fan.pwm_freq) {
+ /*XXX: PNVIO clock more than likely... */
+ divs = 135000 / pm->fan.pwm_freq;
+ if (dev_priv->chipset < 0xa3)
+ divs /= 4;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
- pre_state = pm->clock_pre(dev, perflvl, id, khz);
- if (IS_ERR(pre_state))
- return PTR_ERR(pre_state);
+ return pm->pwm_set(dev, gpio.line, divs, duty);
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ /*XXX: not on all boards, we should control based on temperature
+ * on recent boards.. or maybe on some other factor we don't
+ * know about?
+ */
+ if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+ ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+ if (ret && ret != -ENODEV) {
+ NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pm->voltage.supported && pm->voltage_set) {
+ if (perflvl->volt_min && b->volt_min > a->volt_min) {
+ ret = pm->voltage_set(dev, perflvl->volt_min);
+ if (ret) {
+ NV_ERROR(dev, "voltage set failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
- if (pre_state)
- pm->clock_set(dev, pre_state);
return 0;
}
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ void *state;
int ret;
if (perflvl == pm->cur)
return 0;
- if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
- ret = pm->voltage_set(dev, perflvl->volt_min);
- if (ret) {
- NV_ERROR(dev, "voltage_set %d failed: %d\n",
- perflvl->volt_min, ret);
- }
- }
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+ if (ret)
+ return ret;
- if (pm->clocks_pre) {
- void *state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state))
- return PTR_ERR(state);
- pm->clocks_set(dev, state);
- } else
- if (pm->clock_set) {
- nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
- nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
- nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
- nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
- }
+ state = pm->clocks_pre(dev, perflvl);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+ pm->clocks_set(dev, state);
+
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+ if (ret)
+ return ret;
pm->cur = perflvl;
return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
memset(perflvl, 0, sizeof(*perflvl));
- if (pm->clocks_get) {
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
- } else
- if (pm->clock_get) {
- ret = pm->clock_get(dev, PLL_CORE);
- if (ret > 0)
- perflvl->core = ret;
-
- ret = pm->clock_get(dev, PLL_MEMORY);
- if (ret > 0)
- perflvl->memory = ret;
-
- ret = pm->clock_get(dev, PLL_SHADER);
- if (ret > 0)
- perflvl->shader = ret;
-
- ret = pm->clock_get(dev, PLL_UNK05);
- if (ret > 0)
- perflvl->unk05 = ret;
- }
+ ret = pm->clocks_get(dev, perflvl);
+ if (ret)
+ return ret;
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
}
+ ret = nouveau_pwmfan_get(dev);
+ if (ret > 0)
+ perflvl->fanspeed = ret;
+
return 0;
}
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
nouveau_hwmon_show_update_rate,
NULL, 0);
+static ssize_t
+nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct gpio_func gpio;
+ u32 cycles, cur, prev;
+ u64 start;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
+ if (ret)
+ return ret;
+
+ /* Monitor the GPIO input 0x3b for 250ms.
+ * When the fan spins, it changes the value of GPIO FAN_SENSE.
+ * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
+ */
+ start = ptimer->read(dev);
+ prev = nouveau_gpio_sense(dev, 0, gpio.line);
+ cycles = 0;
+ do {
+ cur = nouveau_gpio_sense(dev, 0, gpio.line);
+ if (prev != cur) {
+ cycles++;
+ prev = cur;
+ }
+
+ usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+ } while (ptimer->read(dev) - start < 250000000);
+
+ /* interpolate to get rpm */
+ return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+ NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ int ret;
+
+ ret = nouveau_pwmfan_get(dev);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret = -ENODEV;
+ long value;
+
+ if (nouveau_perflvl_wr != 7777)
+ return -EPERM;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < pm->fan.min_duty)
+ value = pm->fan.min_duty;
+ if (value > pm->fan.max_duty)
+ value = pm->fan.max_duty;
+
+ ret = nouveau_pwmfan_set(dev, value);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0,
+ nouveau_hwmon_set_pwm0, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.min_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (pm->fan.max_duty - value < 10)
+ value = pm->fan.max_duty - 10;
+
+ if (value < 10)
+ pm->fan.min_duty = 10;
+ else
+ pm->fan.min_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_min,
+ nouveau_hwmon_set_pwm0_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.max_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (value - pm->fan.min_duty < 10)
+ value = pm->fan.min_duty + 10;
+
+ if (value > 100)
+ pm->fan.max_duty = 100;
+ else
+ pm->fan.max_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_max,
+ nouveau_hwmon_set_pwm0_max, 0);
+
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+ &sensor_dev_attr_fan0_input.dev_attr.attr,
+ NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+ &sensor_dev_attr_pwm0.dev_attr.attr,
+ &sensor_dev_attr_pwm0_min.dev_attr.attr,
+ &sensor_dev_attr_pwm0_max.dev_attr.attr,
+ NULL
+};
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+ .attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+ .attrs = hwmon_pwm_fan_attributes,
+};
#endif
static int
nouveau_hwmon_init(struct drm_device *dev)
{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct device *hwmon_dev;
- int ret;
+ int ret = 0;
if (!pm->temp_get)
return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
+
+ /* default sysfs entries */
ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
- NV_ERROR(dev,
- "Unable to create hwmon sysfs file: %d\n", ret);
- hwmon_device_unregister(hwmon_dev);
- return ret;
+ if (ret)
+ goto error;
+ }
+
+ /* if the card has a pwm fan */
+ /*XXX: incorrect, need better detection for this, some boards have
+ * the gpio entries for pwm fan control even when there's no
+ * actual fan connected to it... therm table? */
+ if (nouveau_pwmfan_get(dev) >= 0) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_pwm_fan_attrgroup);
+ if (ret)
+ goto error;
+ }
+
+ /* if the card can read the fan rpm */
+ if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_fan_rpm_attrgroup);
+ if (ret)
+ goto error;
}
pm->hwmon = hwmon_dev;
-#endif
+
+ return 0;
+
+error:
+ NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ pm->hwmon = NULL;
+ return ret;
+#else
+ pm->hwmon = NULL;
return 0;
+#endif
}
static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
if (pm->hwmon) {
sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+
hwmon_device_unregister(pm->hwmon);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 8ac02cdd03a..2f8e14fbcff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
void nouveau_mem_timing_fini(struct drm_device *);
/* nv04_pm.c */
-int nv04_pm_clock_get(struct drm_device *, u32 id);
-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv04_pm_clock_set(struct drm_device *, void *);
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
/* nv40_pm.c */
int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nv50_pm.c */
-int nv50_pm_clock_get(struct drm_device *, u32 id);
-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv50_pm_clock_set(struct drm_device *, void *);
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nva3_pm.c */
int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nva3_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
/* nvc0_pm.c */
int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
/* nouveau_temp.c */
void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe4..47f245edf53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,88 +8,30 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- struct ttm_backend backend;
+ /* this has to be the first field so populate/unpopulated in
+ * nouve_bo.c works properly, otherwise have to move them here
+ */
+ struct ttm_dma_tt ttm;
struct drm_device *dev;
-
- dma_addr_t *pages;
- unsigned nr_pages;
- bool unmap_pages;
-
u64 offset;
- bool bound;
};
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
- nvbe->pages = dma_addrs;
- nvbe->nr_pages = num_pages;
- nvbe->unmap_pages = true;
-
- /* this code path isn't called and is incorrect anyways */
- if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
- nvbe->unmap_pages = false;
- return 0;
- }
-
- for (i = 0; i < num_pages; i++) {
- nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
- nvbe->nr_pages = --i;
- be->func->clear(be);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- if (nvbe->bound)
- be->func->unbind(be);
-
- if (nvbe->unmap_pages) {
- while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
- if (be) {
+ if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
-
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
- }
+ ttm_dma_tt_fini(&nvbe->ttm);
+ kfree(nvbe);
}
}
static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -99,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = nvbe->pages[i];
+ for (i = 0; i < ttm->num_pages; i++) {
+ dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -109,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
}
- nvbe->bound = true;
return 0;
}
static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -124,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
- if (!nvbe->bound)
+ if (ttm->state != tt_bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
+ for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
@@ -158,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -175,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = true;
return 0;
}
static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
@@ -194,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -270,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4];
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -305,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = true;
+ nv44_sgdma_flush(ttm);
return 0;
}
static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -339,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = false;
+ nv44_sgdma_flush(ttm);
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
+
/* noop: bound in move_notify() */
- node->pages = nvbe->pages;
- nvbe->pages = (dma_addr_t *)node;
- nvbe->bound = true;
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
- nvbe->pages = node->pages;
- node->pages = NULL;
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -395,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL;
nvbe->dev = dev;
+ nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- nvbe->backend.func = dev_priv->gart_info.func;
- return &nvbe->backend;
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(nvbe);
+ return NULL;
+ }
+ return &nvbe->ttm.ttm;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index d8831ab42bb..f5e98910d17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = NULL;
- engine->gpio.set = NULL;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.init = nv10_gpio_init;
+ engine->gpio.fini = nv10_gpio_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->gpio.irq_enable = nv10_gpio_irq_enable;
engine->pm.clocks_get = nv40_pm_clocks_get;
engine->pm.clocks_pre = nv40_pm_clocks_pre;
engine->pm.clocks_set = nv40_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv40_pm_pwm_get;
+ engine->pm.pwm_set = nv40_pm_pwm_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nv50_gpio_fini;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0xaa:
case 0xac:
case 0x50:
- engine->pm.clock_get = nv50_pm_clock_get;
- engine->pm.clock_pre = nv50_pm_clock_pre;
- engine->pm.clock_set = nv50_pm_clock_set;
+ engine->pm.clocks_get = nv50_pm_clocks_get;
+ engine->pm.clocks_pre = nv50_pm_clocks_pre;
+ engine->pm.clocks_set = nv50_pm_clocks_set;
break;
default:
engine->pm.clocks_get = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
engine->vram.init = nv50_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nvc0_vram_flags_valid;
engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
break;
case 0xd0:
engine->instmem.init = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
- engine->display.init = nvd0_display_init;
engine->display.destroy = nvd0_display_destroy;
+ engine->display.init = nvd0_display_init;
+ engine->display.fini = nvd0_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nvd0_gpio_get;
- engine->gpio.set = nvd0_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nvd0_gpio_drive;
+ engine->gpio.sense = nvd0_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
@@ -615,7 +615,7 @@ nouveau_card_init(struct drm_device *dev)
goto out_gart;
/* PGPIO */
- ret = engine->gpio.init(dev);
+ ret = nouveau_gpio_create(dev);
if (ret)
goto out_mc;
@@ -648,6 +648,7 @@ nouveau_card_init(struct drm_device *dev)
nv50_graph_create(dev);
break;
case NV_C0:
+ case NV_D0:
nvc0_graph_create(dev);
break;
default:
@@ -663,6 +664,11 @@ nouveau_card_init(struct drm_device *dev)
case 0xa0:
nv84_crypt_create(dev);
break;
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ nv98_crypt_create(dev);
+ break;
}
switch (dev_priv->card_type) {
@@ -684,15 +690,25 @@ nouveau_card_init(struct drm_device *dev)
break;
}
+ if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ nv98_ppp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x84) {
+ nv50_mpeg_create(dev);
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x50) {
+ nv50_mpeg_create(dev);
+ } else
if (dev_priv->card_type == NV_40 ||
dev_priv->chipset == 0x31 ||
dev_priv->chipset == 0x34 ||
- dev_priv->chipset == 0x36)
+ dev_priv->chipset == 0x36) {
nv31_mpeg_create(dev);
- else
- if (dev_priv->card_type == NV_50 &&
- (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
- nv50_mpeg_create(dev);
+ }
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
if (dev_priv->eng[e]) {
@@ -712,27 +728,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fifo;
- /* initialise general modesetting */
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dithering_property(dev);
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- if (dev_priv->card_type < NV_10) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
- } else
- if (dev_priv->card_type < NV_50) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
- } else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
- ret = engine->display.create(dev);
+ ret = nouveau_display_create(dev);
if (ret)
goto out_irq;
@@ -752,12 +748,11 @@ nouveau_card_init(struct drm_device *dev)
}
if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ ret = nouveau_display_init(dev);
if (ret)
goto out_chan;
nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
}
return 0;
@@ -768,7 +763,7 @@ out_fence:
nouveau_fence_fini(dev);
out_disp:
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
+ nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
out_fifo:
@@ -788,7 +783,7 @@ out_engine:
out_timer:
engine->timer.takedown(dev);
out_gpio:
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
out_mc:
engine->mc.takedown(dev);
out_gart:
@@ -818,9 +813,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
int e;
if (dev->mode_config.num_crtc) {
- drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
- drm_vblank_cleanup(dev);
+ nouveau_display_fini(dev);
}
if (dev_priv->channel) {
@@ -829,8 +823,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
- drm_mode_config_cleanup(dev);
+ nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
@@ -843,7 +836,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
engine->fb.takedown(dev);
engine->timer.takedown(dev);
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
engine->mc.takedown(dev);
engine->display.late_takedown(dev);
@@ -1110,13 +1103,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->noaccel = !!nouveau_noaccel;
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
-#if 0
- case 0xXX: /* known broken */
+ case 0xd9: /* known broken */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
break;
-#endif
default:
dev_priv->noaccel = false;
break;
@@ -1238,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = dev_priv->card_type < NV_D0;
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 5a46446dd5a..0f5a3016055 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
temps->down_clock = 100;
temps->fan_boost = 90;
+ /* Set the default range for the pwm fan */
+ pm->fan.min_duty = 30;
+ pm->fan.max_duty = 100;
+
/* Set the known default values to setup the temperature sensor */
if (dev_priv->card_type >= NV_40) {
switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
case 0x13:
sensor->slope_div = value;
break;
+ case 0x22:
+ pm->fan.min_duty = value & 0xff;
+ pm->fan.max_duty = (value & 0xff00) >> 8;
+ break;
+ case 0x26:
+ pm->fan.pwm_freq = value;
+ break;
}
temp += recordlen;
}
nouveau_temp_safety_checks(dev);
+
+ /* check the fan min/max settings */
+ if (pm->fan.min_duty < 10)
+ pm->fan.min_duty = 10;
+ if (pm->fan.max_duty > 100)
+ pm->fan.max_duty = 100;
+ if (pm->fan.max_duty < pm->fan.min_duty)
+ pm->fan.max_duty = pm->fan.min_duty;
}
static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
static void
nouveau_temp_probe_i2c(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
{ I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
{ I2C_BOARD_INFO("lm99", 0x4c) },
{ }
};
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf : 2);
nouveau_i2c_identify(dev, "monitoring device", info,
- probe_monitoring_device, idx);
+ probe_monitoring_device, NV_I2C_DEFAULT(0));
}
void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
return;
if (P.version == 1)
- temp = ROMPTR(bios, P.data[12]);
+ temp = ROMPTR(dev, P.data[12]);
else if (P.version == 2)
- temp = ROMPTR(bios, P.data[16]);
+ temp = ROMPTR(dev, P.data[16]);
else
NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index ef0832b29ad..2bf6c0350b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem, dma_addr_t *list)
+ struct nouveau_mem *mem)
{
struct nouveau_vm *vm = vma->vm;
+ dma_addr_t *list = mem->pages;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 6ce995f7797..4fb6e728734 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *, dma_addr_t *);
+ struct nouveau_mem *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 86d03e15735..b010cb997b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
nouveau_voltage_gpio_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
u8 vid = 0;
int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
if (!(volt->vid_mask & (1 << i)))
continue;
- vid |= gpio->get(dev, vidtag[i]) << i;
+ vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
}
return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
int vid, i;
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
if (!(volt->vid_mask & (1 << i)))
continue;
- gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+ nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
}
return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
return;
if (P.version == 1)
- volt = ROMPTR(bios, P.data[16]);
+ volt = ROMPTR(dev, P.data[16]);
else
if (P.version == 2)
- volt = ROMPTR(bios, P.data[12]);
+ volt = ROMPTR(dev, P.data[12]);
else {
NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
}
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+ volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
}
if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+ if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 5e45398a9e2..728d07584d3 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
/* framebuffer can be larger than crtc scanout area. */
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
regp->ramdac_gen_ctrl);
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
- regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+ regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index e000455e06d..8300266ffae 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -32,6 +32,7 @@
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nvreg.h"
int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
- gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 12098bf839c..2258746016f 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
+ struct drm_connector *connector = &nv_connector->base;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
output_mode->clock > 165000)
regp->fp_control |= (2 << 24);
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- bool duallink, dummy;
+ bool duallink = false, dummy;
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ duallink = (((u8 *)nv_connector->edid)[121] == 2);
+ } else {
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
+ }
- nouveau_bios_parse_lvds_table(dev, output_mode->clock,
- &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
/* Output property. */
- if (nv_connector->use_dithering) {
+ if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+ (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+ encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
if (dev_priv->chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 6bd8518d7b2..7047d37e8da 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+void
+nv04_display_fini(struct drm_device *dev)
+{
+}
+
static void
nv04_vblank_crtc0_isr(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 9ae92a87b8c..6e7589918fa 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -27,68 +27,111 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
-struct nv04_pm_state {
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ int ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_CORE);
+ if (ret < 0)
+ return ret;
+ perflvl->core = ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+ if (ret < 0)
+ return ret;
+ perflvl->memory = ret;
+
+ return 0;
+}
+
+struct nv04_pm_clock {
struct pll_lims pll;
struct nouveau_pll_vals calc;
};
-int
-nv04_pm_clock_get(struct drm_device *dev, u32 id)
+struct nv04_pm_state {
+ struct nv04_pm_clock core;
+ struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
{
- return nouveau_hw_get_clock(dev, id);
+ int ret;
+
+ ret = get_pll_limits(dev, id, &clk->pll);
+ if (ret)
+ return ret;
+
+ ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
}
void *
-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv04_pm_state *state;
+ struct nv04_pm_state *info;
int ret;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
- }
+ ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+ if (ret)
+ goto error;
- ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
- if (!ret) {
- kfree(state);
- return ERR_PTR(-EINVAL);
+ if (perflvl->memory) {
+ ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+ if (ret)
+ goto error;
}
- return state;
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
}
-void
-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv04_pm_state *state = pre_state;
- u32 reg = state->pll.reg;
+ u32 reg = clk->pll.reg;
/* thank the insane nouveau_hw_setpll() interface for this */
if (dev_priv->card_type >= NV_40)
reg += 4;
- nouveau_hw_setpll(dev, reg, &state->calc);
+ nouveau_hw_setpll(dev, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv04_pm_state *state = pre_state;
+
+ prog_pll(dev, &state->core);
- if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
- if (dev_priv->card_type == NV_20)
- nv_mask(dev, 0x1002c4, 0, 1 << 20);
+ if (state->memory.pll.reg) {
+ prog_pll(dev, &state->memory);
+ if (dev_priv->card_type < NV_30) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
- /* Reset the DLLs */
- nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ }
}
- if (reg == NV_PRAMDAC_NVPLL_COEFF)
- ptimer->init(dev);
+ ptimer->init(dev);
kfree(state);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 263301b809d..55c945290e5 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_hw.h"
int
nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
/* determine base clock for timer source */
if (dev_priv->chipset < 0x40) {
- n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+ n = nouveau_hw_get_clock(dev, PLL_CORE);
} else
if (dev_priv->chipset == 0x40) {
/*XXX: figure this out */
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 007fc29e2f8..550ad3fcf0a 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -27,66 +27,97 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
-static bool
-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
- uint32_t *mask)
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
{
- if (ent->line < 2) {
- *reg = NV_PCRTC_GPIO;
- *shift = ent->line * 16;
- *mask = 0x11;
-
- } else if (ent->line < 10) {
- *reg = NV_PCRTC_GPIO_EXT;
- *shift = (ent->line - 2) * 4;
- *mask = 0x3;
+ if (line < 2) {
+ line = line * 16;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+ return !!(line & 0x0100);
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+ return !!(line & 0x04);
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+ return !!(line & 0x04);
+ }
- } else if (ent->line < 14) {
- *reg = NV_PCRTC_850;
- *shift = (ent->line - 10) * 4;
- *mask = 0x3;
+ return -EINVAL;
+}
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 reg, mask, data;
+
+ if (line < 2) {
+ line = line * 16;
+ reg = NV_PCRTC_GPIO;
+ mask = 0x00000011;
+ data = (dir << 4) | out;
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ reg = NV_PCRTC_GPIO_EXT;
+ mask = 0x00000003 << ((line - 2) * 4);
+ data = (dir << 1) | out;
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ reg = NV_PCRTC_850;
+ mask = 0x00000003;
+ data = (dir << 1) | out;
} else {
- return false;
+ return -EINVAL;
}
- return true;
+ mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+ NVWriteCRTC(dev, 0, reg, mask | (data << line));
+ return 0;
}
-int
-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
+ u32 mask = 0x00010001 << line;
- if (!ent)
- return -ENODEV;
+ nv_wr32(dev, 0x001104, mask);
+ nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+ u32 intr = nv_rd32(dev, 0x1104);
+ u32 hi = (intr & 0x0000ffff) >> 0;
+ u32 lo = (intr & 0xffff0000) >> 16;
- value = NVReadCRTC(dev, 0, reg) >> shift;
+ nouveau_gpio_isr(dev, 0, hi | lo);
- return (ent->invert ? 1 : 0) ^ (value & 1);
+ nv_wr32(dev, 0x001104, intr);
}
int
-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_init(struct drm_device *dev)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
-
- if (!ent)
- return -ENODEV;
-
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
-
- value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
- mask = ~(mask << shift);
-
- NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
-
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001100, 0xffffffff);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nv_wr32(dev, 0x001104, 0xffffffff);
+ nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
return 0;
}
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nouveau_irq_unregister(dev, 28);
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 3900cebba56..696d7e7dc2a 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- gpio->set(dev, DCB_GPIO_TVDAC1, true);
- gpio->set(dev, DCB_GPIO_TVDAC0, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -383,8 +381,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index e676b0d5347..c7615381c5d 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
return true;
}
-void
+int
nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
- int i;
+ int i, ret = -EAGAIN;
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
goto resume;
+ ret = 0;
+
/* set engine clocks */
nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
+}
+
+int
+nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ if (line == 2) {
+ u32 reg = nv_rd32(dev, 0x0010f0);
+ if (reg & 0x80000000) {
+ *duty = (reg & 0x7fff0000) >> 16;
+ *divs = (reg & 0x00007fff);
+ return 0;
+ }
+ } else
+ if (line == 9) {
+ u32 reg = nv_rd32(dev, 0x0015f4);
+ if (reg & 0x80000000) {
+ *divs = nv_rd32(dev, 0x0015f8);
+ *duty = (reg & 0x7fffffff);
+ return 0;
+ }
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return -EINVAL;
+}
+
+int
+nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ if (line == 2) {
+ nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ } else
+ if (line == 9) {
+ nv_wr32(dev, 0x0015f8, divs);
+ nv_wr32(dev, 0x0015f4, duty | 0x80000000);
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 882080e0b4f..8f6c2ace3ad 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
}
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ int head = nv_crtc->index, ret;
+ u32 mode = 0x00;
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(dev, "no space while setting dither\n");
- return ret;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
- if (on)
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
- else
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ OUT_RING (evo, mode);
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
+ }
}
- return 0;
+ return ret;
}
struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_connector *nv_connector =
- nouveau_crtc_connector_get(nv_crtc);
- struct drm_device *dev = nv_crtc->base.dev;
+ struct nouveau_connector *nv_connector;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *native_mode = NULL;
- struct drm_display_mode *mode = &nv_crtc->base.mode;
- uint32_t outX, outY, horiz, vert;
- int ret;
+ struct drm_display_mode *umode = &crtc->mode;
+ struct drm_display_mode *omode;
+ int scaling_mode, ret;
+ u32 ctrl = 0, oX, oY;
NV_DEBUG_KMS(dev, "\n");
- switch (scaling_mode) {
- case DRM_MODE_SCALE_NONE:
- break;
- default:
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(dev, "No native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
+ } else {
+ scaling_mode = nv_connector->scaling_mode;
+ }
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ if (scaling_mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
} else {
- native_mode = nv_connector->native_mode;
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
- break;
}
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
switch (scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
case DRM_MODE_SCALE_ASPECT:
- horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
- vert = (native_mode->vdisplay << 19) / mode->vdisplay;
-
- if (vert > horiz) {
- outX = (mode->hdisplay * horiz) >> 19;
- outY = (mode->vdisplay * horiz) >> 19;
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
} else {
- outX = (mode->hdisplay * vert) >> 19;
- outY = (mode->vdisplay * vert) >> 19;
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native_mode->hdisplay;
- outY = native_mode->vdisplay;
- break;
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_NONE:
default:
- outX = mode->hdisplay;
- outY = mode->vdisplay;
break;
}
- ret = RING_SPACE(evo, update ? 7 : 5);
+ if (umode->hdisplay != oX || umode->vdisplay != oY ||
+ umode->flags & DRM_MODE_FLAG_INTERLACE ||
+ umode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
+
+ ret = RING_SPACE(evo, 5);
if (ret)
return ret;
- /* Got a better name for SCALER_ACTIVE? */
- /* One day i've got to really figure out why this is needed. */
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- mode->hdisplay != outX || mode->vdisplay != outY) {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
- } else {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
- }
-
+ OUT_RING (evo, ctrl);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING(evo, outY << 16 | outX);
- OUT_RING(evo, outY << 16 | outX);
+ OUT_RING (evo, oY << 16 | oX);
+ OUT_RING (evo, oY << 16 | oX);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ nv50_display_flip_stop(crtc);
+ nv50_display_sync(dev);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- kfree(nv_crtc->mode);
kfree(nv_crtc);
}
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
-static int
-nv50_crtc_wait_complete(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- u64 start;
- int ret;
-
- ret = RING_SPACE(evo, 6);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
-
- nv_wo32(disp->ntfy, 0x000, 0x00000000);
- FIRE_RING (evo);
-
- start = ptimer->read(dev);
- do {
- if (nv_ro32(disp->ntfy, 0x000))
- return 0;
- } while (ptimer->read(dev) - start < 2000000000ULL);
-
- return -EBUSY;
-}
-
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_crtc_wait_complete(crtc);
+ nv50_display_sync(dev);
nv50_display_flip_next(crtc, crtc->fb, NULL);
}
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector = NULL;
- uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
- uint32_t hunk1, vunk1, vunk2a, vunk2b;
+ u32 head = nv_crtc->index * 0x400;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
int ret;
- /* Find the connector attached to this CRTC */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
-
- *nv_crtc->mode = *adjusted_mode;
-
- NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* hw timing description looks like this:
+ *
+ * <sync> <back porch> <---------display---------> <front porch>
+ * ______
+ * |____________|---------------------------|____________|
+ *
+ * ^ synce ^ blanke ^ blanks ^ active
+ *
+ * interlaced modes also have 2 additional values pointing at the end
+ * and start of the next field's blanking period.
+ */
- hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
- vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
- hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
- vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
- /* I can't give this a proper name, anyone else can? */
- hunk1 = adjusted_mode->htotal -
- adjusted_mode->hsync_start + adjusted_mode->hdisplay;
- vunk1 = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- /* Another strange value, this time only for interlaced adjusted_modes. */
- vunk2a = 2 * adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- vunk2b = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vtotal;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vsync_dur /= 2;
- vsync_start_to_end /= 2;
- vunk1 /= 2;
- vunk2a /= 2;
- vunk2b /= 2;
- /* magic */
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- vsync_start_to_end -= 1;
- vunk1 -= 1;
- vunk2a -= 1;
- vunk2b -= 1;
- }
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
}
- ret = RING_SPACE(evo, 17);
- if (ret)
- return ret;
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
- OUT_RING(evo, adjusted_mode->clock | 0x800000);
- OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
- OUT_RING(evo, 0);
- OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
- OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
- OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
- (hsync_start_to_end - 1));
- OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
- OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
- } else {
- OUT_RING(evo, 0);
- OUT_RING(evo, 0);
+ ret = RING_SPACE(evo, 18);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ OUT_RING (evo, 0x00800000 | mode->clock);
+ OUT_RING (evo, (ilace == 2) ? 2 : 0);
+ BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ OUT_RING (evo, 0x00000000); /* border colour */
+ OUT_RING (evo, (vactive << 16) | hactive);
+ OUT_RING (evo, ( vsynce << 16) | hsynce);
+ OUT_RING (evo, (vblanke << 16) | hblanke);
+ OUT_RING (evo, (vblanks << 16) | hblanks);
+ OUT_RING (evo, (vblan2e << 16) | vblan2s);
+ BEGIN_RING(evo, 0, 0x082c + head, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ OUT_RING (evo, 0x00000311); /* makes sync channel work */
+ BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
+ BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ OUT_RING (evo, 0x00000000); /* screen position */
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
- OUT_RING(evo, 0);
-
- /* This is the actual resolution of the mode. */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
- OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
-
- nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
- nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nv_crtc->set_dither(nv_crtc, false);
+ nv_crtc->set_scale(nv_crtc, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- ret = nv50_crtc_wait_complete(crtc);
+ ret = nv50_display_sync(crtc->dev);
if (ret)
return ret;
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
if (ret)
return ret;
- return nv50_crtc_wait_complete(crtc);
+ return nv50_display_sync(crtc->dev);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
- nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
- if (!nv_crtc->mode) {
- kfree(nv_crtc);
- return -ENOMEM;
- }
-
/* Default CLUT parameters, will be activated on the hw upon
* first mode set.
*/
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
if (ret) {
- kfree(nv_crtc->mode);
kfree(nv_crtc);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 808f3ec8f82..a0f2bebf49e 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -200,11 +200,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
static void
-nv50_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void
nv50_dac_commit(struct drm_encoder *encoder)
{
}
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.save = nv50_dac_save,
.restore = nv50_dac_restore,
.mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_prepare,
+ .prepare = nv50_dac_disconnect,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
.get_crtc = nv50_dac_crtc_get,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d6..7ba28e08ee3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
+static int
+evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
+{
+ int ret = 0;
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x610304 + (ch * 0x08), data);
+ nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
+ if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
+ ret = -EBUSY;
+ if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
+ NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
+ return ret;
+}
+
int
nv50_display_early_init(struct drm_device *dev)
{
+ u32 ctrl = nv_rd32(dev, 0x610200);
+ int i;
+
+ /* check if master evo channel is already active, a good a sign as any
+ * that the display engine is in a weird state (hibernate/kexec), if
+ * it is, do our best to reset the display engine...
+ */
+ if ((ctrl & 0x00000003) == 0x00000003) {
+ NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
+
+ /* deactivate both heads first, PDISP will disappear forever
+ * (well, until you power cycle) on some boards as soon as
+ * PMC_ENABLE is hit unless they are..
+ */
+ for (i = 0; i < 2; i++) {
+ evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
+ evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
+ }
+ evo_icmd(dev, 0, 0x0080, 0);
+
+ /* reset PDISP */
+ nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
+ }
+
return 0;
}
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
}
int
-nv50_display_init(struct drm_device *dev)
+nv50_display_sync(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct drm_connector *connector;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+ }
+
+ return -EBUSY;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
struct nouveau_channel *evo;
int ret, i;
u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
-
ret = nv50_evo_init(dev);
if (ret)
return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 15);
+ ret = RING_SPACE(evo, 3);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NvEvoSync);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
- OUT_RING(evo, 0);
- /* required to make display sync channels not hate life */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- FIRE_RING(evo);
- if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
- NV_ERROR(dev, "evo pushbuf stalled\n");
-
+ OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING (evo, NvEvoSync);
- return 0;
+ return nv50_display_sync(dev);
}
-static int nv50_display_disable(struct drm_device *dev)
+void
+nv50_display_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
/* disable interrupts. */
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-
- /* disable hotplug interrupts */
- nv_wr32(dev, 0xe054, 0xffffffff);
- nv_wr32(dev, 0xe050, 0x00000000);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe074, 0xffffffff);
- nv_wr32(dev, 0xe070, 0x00000000);
- }
- return 0;
}
-int nv50_display_create(struct drm_device *dev)
+int
+nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
- ret = nv50_display_init(dev);
+ ret = nv50_evo_create(dev);
if (ret) {
nv50_display_destroy(dev);
return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- nv50_display_disable(dev);
+ nv50_evo_destroy(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
} else {
/* determine number of lvds links */
if (nv_connector && nv_connector->edid &&
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
/* http://www.spwg.org */
if (((u8 *)nv_connector->edid)[121] == 2)
script |= 0x0100;
@@ -616,7 +654,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -708,7 +746,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
if (crtc >= 0) {
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
pclk &= 0x003fffff;
-
- nv50_crtc_set_clock(dev, crtc, pclk);
+ if (pclk)
+ nv50_crtc_set_clock(dev, crtc, pclk);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
tmp &= ~0x000000f;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c2da503a22a..95874f7c043 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
int nv50_display_init(struct drm_device *dev);
+void nv50_display_fini(struct drm_device *dev);
void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_sync(struct drm_device *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
+int nv50_evo_create(struct drm_device *dev);
+void nv50_evo_destroy(struct drm_device *dev);
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c99d9751880..9b962e989d7 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
-static void
+void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
nv50_evo_channel_del(&disp->master);
}
-static int
+int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
int ret, i;
- if (!disp->master) {
- ret = nv50_evo_create(dev);
- if (ret)
- return ret;
- }
-
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
if (disp->master)
nv50_evo_channel_fini(disp->master);
-
- nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index c34a074f7ea..3bc2a565c20 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
unsigned long flags;
int ret;
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x7c, 0x30000001);
nv_wo32(ramfc, 0x78, 0x00000000);
nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
- nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
nv_wo32(chan->ramin, 0, chan->id);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 793a5ccca12..f429e6a8ca7 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -25,229 +25,95 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
-static void nv50_gpio_isr(struct drm_device *dev);
-static void nv50_gpio_isr_bh(struct work_struct *work);
-
-struct nv50_gpio_priv {
- struct list_head handlers;
- spinlock_t lock;
-};
-
-struct nv50_gpio_handler {
- struct drm_device *dev;
- struct list_head head;
- struct work_struct work;
- bool inhibit;
-
- struct dcb_gpio_entry *gpio;
-
- void (*handler)(void *data, int state);
- void *data;
-};
-
static int
-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (gpio->line >= 32)
+ if (line >= 32)
return -EINVAL;
- *reg = nv50_gpio_reg[gpio->line >> 3];
- *shift = (gpio->line & 7) << 2;
+ *reg = nv50_gpio_reg[line >> 3];
+ *shift = (line & 7) << 2;
return 0;
}
int
-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) >> (s + 2);
- return ((v & 1) == (gpio->state[1] & 1));
+ nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+ return 0;
}
int
-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv50_gpio_sense(struct drm_device *dev, int line)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) & ~(0x3 << s);
- v |= (gpio->state[state] ^ 2) << s;
- nv_wr32(dev, r, v);
- return 0;
+ return !!(nv_rd32(dev, reg) & (4 << shift));
}
-int
-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg = line < 16 ? 0xe050 : 0xe070;
+ u32 mask = 0x00010001 << (line & 0xf);
- v = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
- v &= 0x00004000;
- return (!!v == (gpio->state[1] & 1));
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
}
int
-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- v = gpio->state[state] ^ 2;
-
- nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+ nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
int
-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
+nvd0_gpio_sense(struct drm_device *dev, int line)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- struct dcb_gpio_entry *gpio;
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
- if (!gpioh)
- return -ENOMEM;
-
- INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
- gpioh->dev = dev;
- gpioh->gpio = gpio;
- gpioh->handler = handler;
- gpioh->data = data;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&gpioh->head, &priv->handlers);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
}
-void
-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh, *tmp;
- struct dcb_gpio_entry *gpio;
- LIST_HEAD(tofree);
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
- if (gpioh->gpio != gpio ||
- gpioh->handler != handler ||
- gpioh->data != data)
- continue;
- list_move(&gpioh->head, &tofree);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
- flush_work_sync(&gpioh->work);
- kfree(gpioh);
- }
-}
-
-bool
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
-{
- struct dcb_gpio_entry *gpio;
- u32 reg, mask;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return false;
-
- reg = gpio->line < 16 ? 0xe050 : 0xe070;
- mask = 0x00010001 << (gpio->line & 0xf);
-
- nv_wr32(dev, reg + 4, mask);
- reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
- return (reg & mask) == mask;
-}
-
-static int
-nv50_gpio_create(struct drm_device *dev)
+static void
+nv50_gpio_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo;
- INIT_LIST_HEAD(&priv->handlers);
- spin_lock_init(&priv->lock);
- pgpio->priv = priv;
- return 0;
-}
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-static void
-nv50_gpio_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ nouveau_gpio_isr(dev, 0, hi | lo);
- kfree(pgpio->priv);
- pgpio->priv = NULL;
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int ret;
-
- if (!pgpio->priv) {
- ret = nv50_gpio_create(dev);
- if (ret)
- return ret;
- }
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe070, 0x00000000);
nouveau_irq_unregister(dev, 21);
-
- nv50_gpio_destroy(dev);
-}
-
-static void
-nv50_gpio_isr_bh(struct work_struct *work)
-{
- struct nv50_gpio_handler *gpioh =
- container_of(work, struct nv50_gpio_handler, work);
- struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- unsigned long flags;
- int state;
-
- state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
- if (state < 0)
- return;
-
- gpioh->handler(gpioh->data, state);
-
- spin_lock_irqsave(&priv->lock, flags);
- gpioh->inhibit = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- u32 intr0, intr1 = 0;
- u32 hi, lo, ch;
-
- intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- ch = hi | lo;
-
- nv_wr32(dev, 0xe054, intr0);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, intr1);
-
- spin_lock(&priv->lock);
- list_for_each_entry(gpioh, &priv->handlers, head) {
- if (!(ch & (1 << gpioh->gpio->line)))
- continue;
-
- if (gpioh->inhibit)
- continue;
- gpioh->inhibit = true;
-
- schedule_work(&gpioh->work);
- }
- spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ac601f7c4e1..33d5711a918 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
}
break;
case 7: /* MP error */
- if (ustatus & 0x00010000) {
+ if (ustatus & 0x04030000) {
nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
+ ustatus &= ~0x04030000;
}
break;
case 8: /* TPDMA error */
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 3d5a86b9828..03937212e9d 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,122 +25,745 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_bios.h"
+#include "nouveau_hw.h"
#include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
-struct nv50_pm_state {
- struct nouveau_pm_level *perflvl;
- struct pll_lims pll;
- enum pll_types type;
- int N, M, P;
+enum clk_src {
+ clk_src_crystal,
+ clk_src_href,
+ clk_src_hclk,
+ clk_src_hclkm3,
+ clk_src_hclkm3d2,
+ clk_src_host,
+ clk_src_nvclk,
+ clk_src_sclk,
+ clk_src_mclk,
+ clk_src_vdec,
+ clk_src_dom6
};
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ case 0xa0:
+ return nv_rd32(dev, 0x004700);
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ return nv_rd32(dev, 0x004800);
+ default:
+ return 0x00000000;
+ }
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 coef, ref = read_clk(dev, clk_src_crystal);
+ u32 rsel = nv_rd32(dev, 0x00e18c);
+ int P, N, M, id;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ switch (base) {
+ case 0x4020:
+ case 0x4028: id = !!(rsel & 0x00000004); break;
+ case 0x4008: id = !!(rsel & 0x00000008); break;
+ case 0x4030: id = 0; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+ ref *= (coef & 0x01000000) ? 2 : 4;
+ P = (coef & 0x00070000) >> 16;
+ N = ((coef & 0x0000ff00) >> 8) + 1;
+ M = ((coef & 0x000000ff) >> 0) + 1;
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ coef = nv_rd32(dev, 0x00e81c);
+ P = (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ rsel = nv_rd32(dev, 0x00c050);
+ switch (base) {
+ case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+ case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+ case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+ case 0x4030: rsel = 3; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ switch (rsel) {
+ case 0: id = 1; break;
+ case 1: return read_clk(dev, clk_src_crystal);
+ case 2: return read_clk(dev, clk_src_href);
+ case 3: id = 0; break;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x28));
+ P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ P += (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (M)
+ return (ref * N / M) >> P;
+ return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+ u32 src, mast = nv_rd32(dev, 0x00c040);
+
+ switch (base) {
+ case 0x004028:
+ src = !!(mast & 0x00200000);
+ break;
+ case 0x004020:
+ src = !!(mast & 0x00400000);
+ break;
+ case 0x004008:
+ src = !!(mast & 0x00010000);
+ break;
+ case 0x004030:
+ src = !!(mast & 0x02000000);
+ break;
+ case 0x00e810:
+ return read_clk(dev, clk_src_crystal);
+ default:
+ NV_ERROR(dev, "bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ if (src)
+ return read_clk(dev, clk_src_href);
+ return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, base + 0);
+ u32 coef = nv_rd32(dev, base + 4);
+ u32 ref = read_pll_ref(dev, base);
+ u32 clk = 0;
+ int N1, N2, M1, M2;
+
+ if (base == 0x004028 && (mast & 0x00100000)) {
+ /* wtf, appears to only disable post-divider on nva0 */
+ if (dev_priv->chipset != 0xa0)
+ return read_clk(dev, clk_src_dom6);
+ }
+
+ N2 = (coef & 0xff000000) >> 24;
+ M2 = (coef & 0x00ff0000) >> 16;
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clk = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ clk = clk * N2 / M2;
+ else
+ clk = 0;
+ }
+ }
+
+ return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 P = 0;
+
+ switch (src) {
+ case clk_src_crystal:
+ return dev_priv->crystal;
+ case clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case clk_src_hclk:
+ return read_clk(dev, clk_src_href) * 27778 / 10000;
+ case clk_src_hclkm3:
+ return read_clk(dev, clk_src_hclk) * 3;
+ case clk_src_hclkm3d2:
+ return read_clk(dev, clk_src_hclk) * 3 / 2;
+ case clk_src_host:
+ switch (mast & 0x30000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x10000000: break;
+ case 0x20000000: /* !0x50 */
+ case 0x30000000: return read_clk(dev, clk_src_hclk);
+ }
+ break;
+ case clk_src_nvclk:
+ if (!(mast & 0x00100000))
+ P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+ switch (mast & 0x00000003) {
+ case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000001: return read_clk(dev, clk_src_dom6);
+ case 0x00000002: return read_pll(dev, 0x004020) >> P;
+ case 0x00000003: return read_pll(dev, 0x004028) >> P;
+ }
+ break;
+ case clk_src_sclk:
+ P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000080)
+ return read_clk(dev, clk_src_host) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(dev, 0x004028) >> P;
+ case 0x00000030: return read_pll(dev, 0x004020) >> P;
+ }
+ break;
+ case clk_src_mclk:
+ P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
+ if (nv_rd32(dev, 0x004008) & 0x00000200) {
+ switch (mast & 0x0000c000) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00008000:
+ case 0x0000c000:
+ return read_clk(dev, clk_src_href) >> P;
+ }
+ } else {
+ return read_pll(dev, 0x004008) >> P;
+ }
+ break;
+ case clk_src_vdec:
+ P = (read_div(dev) & 0x00000700) >> 8;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ if (dev_priv->chipset == 0xa0) /* wtf?? */
+ return read_clk(dev, clk_src_nvclk) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ if (mast & 0x01000000)
+ return read_pll(dev, 0x004028) >> P;
+ return read_pll(dev, 0x004030) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ }
+ break;
+ case 0x98:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ return read_clk(dev, clk_src_hclkm3d2) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_mclk) >> P;
+ }
+ break;
+ }
+ break;
+ case clk_src_dom6:
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ return read_pll(dev, 0x00e810) >> 2;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ P = (read_div(dev) & 0x00000007) >> 0;
+ switch (mast & 0x0c000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x04000000: break;
+ case 0x08000000: return read_clk(dev, clk_src_hclk);
+ case 0x0c000000:
+ return read_clk(dev, clk_src_hclkm3) >> P;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
int
-nv50_pm_clock_get(struct drm_device *dev, u32 id)
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct pll_lims pll;
- int P, N, M, ret;
- u32 reg0, reg1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return 0;
- ret = get_pll_limits(dev, id, &pll);
+ perflvl->core = read_clk(dev, clk_src_nvclk);
+ perflvl->shader = read_clk(dev, clk_src_sclk);
+ perflvl->memory = read_clk(dev, clk_src_mclk);
+ if (dev_priv->chipset != 0x50) {
+ perflvl->vdec = read_clk(dev, clk_src_vdec);
+ perflvl->dom6 = read_clk(dev, clk_src_dom6);
+ }
+
+ return 0;
+}
+
+struct nv50_pm_state {
+ struct hwsq_ucode mclk_hwsq;
+ u32 mscript;
+
+ u32 emast;
+ u32 nctrl;
+ u32 ncoef;
+ u32 sctrl;
+ u32 scoef;
+
+ u32 amast;
+ u32 pdivs;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+ u32 clk, int *N1, int *M1, int *log2P)
+{
+ struct nouveau_pll_vals coef;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, pll);
if (ret)
- return ret;
+ return 0;
+
+ pll->vco2.maxfreq = 0;
+ pll->refclk = read_pll_ref(dev, reg);
+ if (!pll->refclk)
+ return 0;
- reg0 = nv_rd32(dev, pll.reg + 0);
- reg1 = nv_rd32(dev, pll.reg + 4);
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+ if (ret == 0)
+ return 0;
- if ((reg0 & 0x80000000) == 0) {
- if (id == PLL_SHADER) {
- NV_DEBUG(dev, "Shader PLL is disabled. "
- "Shader clock is twice the core\n");
- ret = nv50_pm_clock_get(dev, PLL_CORE);
- if (ret > 0)
- return ret << 1;
- } else if (id == PLL_MEMORY) {
- NV_DEBUG(dev, "Memory PLL is disabled. "
- "Memory clock is equal to the ref_clk\n");
- return pll.refclk;
+ *N1 = coef.N1;
+ *M1 = coef.M1;
+ *log2P = coef.log2P;
+ return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
}
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+ return ((a / 1000) == (b / 1000));
+}
+
+static int
+calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pll_lims pll;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, 0x004008);
+ u32 coef = nv_rd32(dev, 0x00400c);
+ u32 orig = ctrl;
+ u32 crtc_mask = 0;
+ int N, M, P;
+ int ret, i;
+
+ /* use pcie refclock if possible, otherwise use mpll */
+ ctrl &= ~0x81ff0200;
+ if (clk_same(freq, read_clk(dev, clk_src_href))) {
+ ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+ } else {
+ ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+ if (ret == 0)
+ return -EINVAL;
+
+ ctrl |= 0x80000000 | (P << 22) | (P << 16);
+ ctrl |= pll.log2p_bias << 19;
+ coef = (N << 8) | M;
+ }
+
+ mast &= ~0xc0000000; /* get MCLK_2 from HREF */
+ mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+ /* determine active crtcs */
+ for (i = 0; i < 2; i++) {
+ if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
+ crtc_mask |= (1 << i);
+ }
+
+ /* build the ucode which will reclock the memory for us */
+ hwsq_init(hwsq);
+ if (crtc_mask) {
+ hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+ hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
}
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+ hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+ /* prepare memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
+ hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
- P = (reg0 & 0x00070000) >> 16;
- N = (reg1 & 0x0000ff00) >> 8;
- M = (reg1 & 0x000000ff);
+ /* reclock memory */
+ hwsq_wr32(hwsq, 0xc040, mast);
+ hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
+ hwsq_wr32(hwsq, 0x400c, coef);
+ hwsq_wr32(hwsq, 0x4008, ctrl);
- return ((pll.refclk * N / M) >> P);
+ /* restart memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
+ hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
+ hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
+
+ hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+ hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+ hwsq_fini(hwsq);
+ return 0;
}
void *
-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv50_pm_state *state;
- int dummy, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info;
+ struct pll_lims pll;
+ int ret = -EINVAL;
+ int N, M, P1, P2;
+ u32 clk, out;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return ERR_PTR(-ENODEV);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- state->type = id;
- state->perflvl = perflvl;
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret < 0) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ /* core: for the moment at least, always use nvpll */
+ clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast = 0x00000003;
+ info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->ncoef = (N << 8) | M;
+
+ /* shader: tie to nvclk if possible, otherwise use spll. have to be
+ * very careful that the shader clock is at least twice the core, or
+ * some chipsets will be very unhappy. i expect most or all of these
+ * cases will be handled by tying to nvclk, but it's possible there's
+ * corners
+ */
+ if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+ info->emast |= 0x00000020;
+ info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = nv_rd32(dev, 0x004024);
+ } else {
+ clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast |= 0x00000030;
+ info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = (N << 8) | M;
+ }
+
+ /* memory: build hwsq ucode which we'll use to reclock memory */
+ info->mclk_hwsq.len = 0;
+ if (perflvl->memory) {
+ clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
+ if (clk < 0) {
+ ret = clk;
+ goto error;
+ }
+
+ info->mscript = perflvl->memscript;
+ }
+
+ /* vdec: avoid modifying xpll until we know exactly how the other
+ * clock domains work, i suspect at least some of them can also be
+ * tied to xpll...
+ */
+ info->amast = nv_rd32(dev, 0x00c040);
+ info->pdivs = read_div(dev);
+ if (perflvl->vdec) {
+ /* see how close we can get using nvclk as a source */
+ clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+ /* see how close we can get using xpll/hclk as a source */
+ if (dev_priv->chipset != 0x98)
+ out = read_pll(dev, 0x004030);
+ else
+ out = read_clk(dev, clk_src_hclkm3d2);
+ out = calc_div(out, perflvl->vdec, &P2);
+
+ /* select whichever gets us closest */
+ info->amast &= ~0x00000c00;
+ info->pdivs &= ~0x00000700;
+ if (abs((int)perflvl->vdec - clk) <=
+ abs((int)perflvl->vdec - out)) {
+ if (dev_priv->chipset != 0x98)
+ info->amast |= 0x00000c00;
+ info->pdivs |= P1 << 8;
+ } else {
+ info->amast |= 0x00000800;
+ info->pdivs |= P2 << 8;
+ }
+ }
+
+ /* dom6: nfi what this is, but we're limited to various combinations
+ * of the host clock frequency
+ */
+ if (perflvl->dom6) {
+ info->amast &= ~0x0c000000;
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+ info->amast |= 0x00000000;
+ } else
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+ info->amast |= 0x08000000;
+ } else {
+ clk = read_clk(dev, clk_src_hclk) * 3;
+ clk = calc_div(clk, perflvl->dom6, &P1);
+
+ info->amast |= 0x0c000000;
+ info->pdivs = (info->pdivs & ~0x00000007) | P1;
+ }
}
- ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
- &dummy, &dummy, &state->P);
- if (ret < 0) {
- kfree(state);
- return ERR_PTR(ret);
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
+}
+
+static int
+prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 hwsq_data, hwsq_kick;
+ int i;
+
+ if (dev_priv->chipset < 0x90) {
+ hwsq_data = 0x001400;
+ hwsq_kick = 0x00000003;
+ } else {
+ hwsq_data = 0x080000;
+ hwsq_kick = 0x00000001;
}
- return state;
+ /* upload hwsq ucode */
+ nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
+ nv_wr32(dev, 0x001304, 0x00000000);
+ for (i = 0; i < hwsq->len / 4; i++)
+ nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+ nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+
+ /* launch, and wait for completion */
+ nv_wr32(dev, 0x00130c, hwsq_kick);
+ if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
+ NV_ERROR(dev, "hwsq ucode exec timed out\n");
+ NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+ for (i = 0; i < hwsq->len / 4; i++) {
+ NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+ nv_rd32(dev, 0x001400 + (i * 4)));
+ }
+
+ return -EIO;
+ }
+
+ return 0;
}
-void
-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
{
- struct nv50_pm_state *state = pre_state;
- struct nouveau_pm_level *perflvl = state->perflvl;
- u32 reg = state->pll.reg, tmp;
- struct bit_entry BIT_M;
- u16 script;
- int N = state->N;
- int M = state->M;
- int P = state->P;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info = data;
+ struct bit_entry M;
+ int ret = 0;
- if (state->type == PLL_MEMORY && perflvl->memscript &&
- bit_table(dev, 'M', &BIT_M) == 0 &&
- BIT_M.version == 1 && BIT_M.length >= 0x0b) {
- script = ROM16(BIT_M.data[0x05]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x07]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x09]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
+ /* halt and idle execution engines */
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+ goto error;
- nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
+ /* memory: it is *very* important we change this first, the ucode
+ * we build in pre() now has hardcoded 0xc040 values, which can't
+ * change before we execute it or the engine clocks may end up
+ * messed up.
+ */
+ if (info->mclk_hwsq.len) {
+ /* execute some scripts that do ??? from the vbios.. */
+ if (!bit_table(dev, 'M', &M) && M.version == 1) {
+ if (M.length >= 6)
+ nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+ if (M.length >= 8)
+ nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+ if (M.length >= 10)
+ nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+ nouveau_bios_init_exec(dev, info->mscript);
+ }
+
+ ret = prog_mclk(dev, &info->mclk_hwsq);
+ if (ret)
+ goto resume;
}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
+ /* reclock vdec/dom6 */
+ nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
+ switch (dev_priv->chipset) {
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
+ break;
+ default:
+ nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
+ break;
}
+ nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
- tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
- tmp |= 0x80000000 | (P << 16);
- nv_wr32(dev, reg + 0, tmp);
- nv_wr32(dev, reg + 4, (N << 8) | M);
+ /* core/shader: make sure sclk/nvclk are disconnected from their
+ * plls (nvclk to dom6, sclk to hclk), modify the plls, and
+ * reconnect sclk/nvclk to their new clock source
+ */
+ if (dev_priv->chipset < 0x92)
+ nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
+ else
+ nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
+ nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
+ nv_wr32(dev, 0x004024, info->scoef);
+ nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
+ nv_wr32(dev, 0x00402c, info->ncoef);
+ nv_mask(dev, 0x00c040, 0x00100033, info->emast);
+
+ goto resume;
+error:
+ ret = -EBUSY;
+resume:
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+ kfree(info);
+ return ret;
+}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
+static int
+pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
+{
+ if (*line == 0x04) {
+ *ctrl = 0x00e100;
+ *line = 4;
+ *indx = 0;
+ } else
+ if (*line == 0x09) {
+ *ctrl = 0x00e100;
+ *line = 9;
+ *indx = 1;
+ } else
+ if (*line == 0x10) {
+ *ctrl = 0x00e28c;
+ *line = 0;
+ *indx = 0;
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int
+nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ if (nv_rd32(dev, ctrl) & (1 << line)) {
+ *divs = nv_rd32(dev, 0x00e114 + (id * 8));
+ *duty = nv_rd32(dev, 0x00e118 + (id * 8));
+ return 0;
}
- kfree(state);
+ return -EINVAL;
}
+int
+nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
+ nv_wr32(dev, 0x00e114 + (id * 8), divs);
+ nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 2633aa8554e..c4423ba9c9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
+ nouveau_hdmi_mode_set(encoder, NULL);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
static void
nv50_sor_prepare(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_sor_disconnect(encoder);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ /* avoid race between link training and supervisor intr */
+ nv50_display_sync(encoder->dev);
+ }
}
static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
{
struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
nv_encoder->or, nv_encoder->dcb->type, crtc->index);
+ nv_encoder->crtc = encoder->crtc;
switch (nv_encoder->dcb->type) {
case OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
- if (adjusted_mode->clock < 165000)
+ if (mode->clock < 165000)
mode_ctl = 0x0100;
else
mode_ctl = 0x0500;
} else
mode_ctl = 0x0200;
+
+ nouveau_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_DP:
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
mode_ctl |= 0x00020000;
} else {
- nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
mode_ctl |= 0x00050000;
}
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while connecting SOR\n");
+ nv_encoder->crtc = NULL;
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
-
- nv_encoder->crtc = encoder->crtc;
}
static struct drm_crtc *
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 40b84f22d81..6f38ceae3aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
phys |= 0x60;
else if (coverage <= 64 * 1024 * 1024)
phys |= 0x40;
- else if (coverage < 128 * 1024 * 1024)
+ else if (coverage <= 128 * 1024 * 1024)
phys |= 0x20;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644
index 00000000000..74875739bcc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_bsp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+ return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, BSP);
+
+ kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+ struct nv84_bsp_engine *pbsp;
+
+ pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+ if (!pbsp)
+ return -ENOMEM;
+
+ pbsp->base.destroy = nv84_bsp_destroy;
+ pbsp->base.init = nv84_bsp_init;
+ pbsp->base.fini = nv84_bsp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644
index 00000000000..6570d300ab8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_vp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+ return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, VP);
+
+ kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+ struct nv84_vp_engine *pvp;
+
+ pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+ if (!pvp)
+ return -ENOMEM;
+
+ pvp->base.destroy = nv84_vp_destroy;
+ pvp->base.init = nv84_vp_init;
+ pvp->base.fini = nv84_vp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644
index 00000000000..db94ff0a9fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_crypt_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00004000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+ return 0;
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+ kfree(pcrypt);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+ struct nv98_crypt_engine *pcrypt;
+
+ pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+ if (!pcrypt)
+ return -ENOMEM;
+
+ pcrypt->base.destroy = nv98_crypt_destroy;
+ pcrypt->base.init = nv98_crypt_init;
+ pcrypt->base.fini = nv98_crypt_fini;
+
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644
index 00000000000..a987dd6e003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_ppp.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_ppp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, PPP);
+
+ kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+ struct nv98_ppp_engine *pppp;
+
+ pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+ if (!pppp)
+ return -ENOMEM;
+
+ pppp->base.destroy = nv98_ppp_destroy;
+ pppp->base.init = nv98_ppp_init;
+ pppp->base.fini = nv98_ppp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index eaf35f8321e..abc36626fef 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -31,8 +31,9 @@
*/
ifdef(`NVA3',
-.section nva3_pcopy_data,
-.section nvc0_pcopy_data
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
)
ctx_object: .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query: .b32 0
ctx_dma_src: .b32 0
ctx_dma_dst: .b32 0
,)
-.equ ctx_dma_count 3
+.equ #ctx_dma_count 3
ctx_query_address_high: .b32 0
ctx_query_address_low: .b32 0
ctx_query_counter: .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt: .b32 0
dispatch_table:
// mthd 0x0000, NAME
.b16 0x000 1
-.b32 ctx_object ~0xffffffff
+.b32 #ctx_object ~0xffffffff
// mthd 0x0100, NOP
.b16 0x040 1
-.b32 0x00010000 + cmd_nop ~0xffffffff
+.b32 0x00010000 + #cmd_nop ~0xffffffff
// mthd 0x0140, PM_TRIGGER
.b16 0x050 1
-.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
+.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
ifdef(`NVA3', `
// mthd 0x0180-0x018c, DMA_
-.b16 0x060 ctx_dma_count
+.b16 0x060 #ctx_dma_count
dispatch_dma:
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
',)
// mthd 0x0200-0x0218, SRC_TILE
.b16 0x80 7
-.b32 ctx_src_tile_mode ~0x00000fff
-.b32 ctx_src_xsize ~0x0007ffff
-.b32 ctx_src_ysize ~0x00001fff
-.b32 ctx_src_zsize ~0x000007ff
-.b32 ctx_src_zoff ~0x00000fff
-.b32 ctx_src_xoff ~0x0007ffff
-.b32 ctx_src_yoff ~0x00001fff
+.b32 #ctx_src_tile_mode ~0x00000fff
+.b32 #ctx_src_xsize ~0x0007ffff
+.b32 #ctx_src_ysize ~0x00001fff
+.b32 #ctx_src_zsize ~0x000007ff
+.b32 #ctx_src_zoff ~0x00000fff
+.b32 #ctx_src_xoff ~0x0007ffff
+.b32 #ctx_src_yoff ~0x00001fff
// mthd 0x0220-0x0238, DST_TILE
.b16 0x88 7
-.b32 ctx_dst_tile_mode ~0x00000fff
-.b32 ctx_dst_xsize ~0x0007ffff
-.b32 ctx_dst_ysize ~0x00001fff
-.b32 ctx_dst_zsize ~0x000007ff
-.b32 ctx_dst_zoff ~0x00000fff
-.b32 ctx_dst_xoff ~0x0007ffff
-.b32 ctx_dst_yoff ~0x00001fff
+.b32 #ctx_dst_tile_mode ~0x00000fff
+.b32 #ctx_dst_xsize ~0x0007ffff
+.b32 #ctx_dst_ysize ~0x00001fff
+.b32 #ctx_dst_zsize ~0x000007ff
+.b32 #ctx_dst_zoff ~0x00000fff
+.b32 #ctx_dst_xoff ~0x0007ffff
+.b32 #ctx_dst_yoff ~0x00001fff
// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
.b16 0xc0 2
-.b32 0x00010000 + cmd_exec ~0xffffffff
-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + #cmd_exec ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
-.b32 ctx_src_address_high ~0x000000ff
-.b32 ctx_src_address_low ~0xfffffff0
-.b32 ctx_dst_address_high ~0x000000ff
-.b32 ctx_dst_address_low ~0xfffffff0
-.b32 ctx_src_pitch ~0x0007ffff
-.b32 ctx_dst_pitch ~0x0007ffff
-.b32 ctx_xcnt ~0x0000ffff
-.b32 ctx_ycnt ~0x00001fff
-.b32 ctx_format ~0x0333ffff
-.b32 ctx_swz_const0 ~0xffffffff
-.b32 ctx_swz_const1 ~0xffffffff
-.b32 ctx_query_address_high ~0x000000ff
-.b32 ctx_query_address_low ~0xffffffff
-.b32 ctx_query_counter ~0xffffffff
+.b32 #ctx_src_address_high ~0x000000ff
+.b32 #ctx_src_address_low ~0xfffffff0
+.b32 #ctx_dst_address_high ~0x000000ff
+.b32 #ctx_dst_address_low ~0xfffffff0
+.b32 #ctx_src_pitch ~0x0007ffff
+.b32 #ctx_dst_pitch ~0x0007ffff
+.b32 #ctx_xcnt ~0x0000ffff
+.b32 #ctx_ycnt ~0x00001fff
+.b32 #ctx_format ~0x0333ffff
+.b32 #ctx_swz_const0 ~0xffffffff
+.b32 #ctx_swz_const1 ~0xffffffff
+.b32 #ctx_query_address_high ~0x000000ff
+.b32 #ctx_query_address_low ~0xffffffff
+.b32 #ctx_query_counter ~0xffffffff
.b16 0x800 0
ifdef(`NVA3',
-.section nva3_pcopy_code,
-.section nvc0_pcopy_code
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
)
main:
@@ -143,12 +145,12 @@ main:
mov $sp $r0
// setup i0 handler and route fifo and ctxswitch to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
movw $r2 0xfff3
sethi $r2 0
- iowr I[$r2 + 0x300] $r2
+ iowr I[$r1 + 0x300] $r2
// enable interrupts
or $r2 0xc
@@ -164,19 +166,19 @@ main:
bset $flags $p0
spin:
sleep $p0
- bra spin
+ bra #spin
// i0 handler
ih:
iord $r1 I[$r0 + 0x200]
and $r2 $r1 0x00000008
- bra e ih_no_chsw
- call chsw
+ bra e #ih_no_chsw
+ call #chsw
ih_no_chsw:
and $r2 $r1 0x00000004
- bra e ih_no_cmd
- call dispatch
+ bra e #ih_no_cmd
+ call #dispatch
ih_no_cmd:
and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
sethi $r4 0x60000
// swap!
- bra $p1 swctx_load
+ bra $p1 #swctx_load
xdst $r0 $r4
- bra swctx_done
+ bra #swctx_done
swctx_load:
xdld $r0 $r4
swctx_done:
@@ -251,9 +253,9 @@ chsw:
// if it's active, unload it and return
xbit $r15 $r3 0x1e
- bra e chsw_no_unload
+ bra e #chsw_no_unload
bclr $flags $p1
- call swctx
+ call #swctx
bclr $r3 0x1e
iowr I[$r2] $r3
mov $r4 1
@@ -266,20 +268,20 @@ chsw:
// is there a channel waiting to be loaded?
xbit $r13 $r3 0x1e
- bra e chsw_finish_load
+ bra e #chsw_finish_load
bset $flags $p1
- call swctx
+ call #swctx
ifdef(`NVA3',
// load dma objects back into TARGET regs
- mov $r5 ctx_dma
- mov $r6 ctx_dma_count
+ mov $r5 #ctx_dma
+ mov $r6 #ctx_dma_count
chsw_load_ctx_dma:
ld b32 $r7 D[$r5 + $r6 * 4]
add b32 $r8 $r6 0x180
shl b32 $r8 8
iowr I[$r8] $r7
sub b32 $r6 1
- bra nc chsw_load_ctx_dma
+ bra nc #chsw_load_ctx_dma
,)
chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
shl b32 $r2 0x10
// lookup method in the dispatch table, ILLEGAL_MTHD if not found
- mov $r5 dispatch_table
+ mov $r5 #dispatch_table
clear b32 $r6
clear b32 $r7
dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
ld b16 $r7 D[$r5 + 2]
add b32 $r5 4
cmpu b32 $r4 $r6
- bra c dispatch_illegal_mthd
+ bra c #dispatch_illegal_mthd
add b32 $r7 $r6
cmpu b32 $r4 $r7
- bra c dispatch_valid_mthd
+ bra c #dispatch_valid_mthd
sub b32 $r7 $r6
shl b32 $r7 3
add b32 $r5 $r7
- bra dispatch_loop
+ bra #dispatch_loop
// ensure no bits set in reserved fields, INVALID_BITFIELD
dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
ld b32 $r5 D[$r4 + 4]
and $r5 $r3
cmpu b32 $r5 0
- bra ne dispatch_invalid_bitfield
+ bra ne #dispatch_invalid_bitfield
// depending on dispatch flags: execute method, or save data as state
ld b16 $r5 D[$r4 + 0]
ld b16 $r6 D[$r4 + 2]
cmpu b32 $r6 0
- bra ne dispatch_cmd
+ bra ne #dispatch_cmd
st b32 D[$r5] $r3
- bra dispatch_done
+ bra #dispatch_done
dispatch_cmd:
bclr $flags $p1
call $r5
- bra $p1 dispatch_error
- bra dispatch_done
+ bra $p1 #dispatch_error
+ bra #dispatch_done
dispatch_invalid_bitfield:
or $r2 2
@@ -353,7 +355,7 @@ dispatch:
iord $r2 I[$r0 + 0x200]
and $r2 0x40
cmpu b32 $r2 0
- bra ne hostirq_wait
+ bra ne #hostirq_wait
dispatch_done:
mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
// $r2: hostirq state
// $r3: data
cmd_dma:
- sub b32 $r4 dispatch_dma
+ sub b32 $r4 #dispatch_dma
shr b32 $r4 1
bset $r3 0x1e
- st b32 D[$r4 + ctx_dma] $r3
+ st b32 D[$r4 + #ctx_dma] $r3
add b32 $r4 0x600
shl b32 $r4 6
iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
st b32 D[$sp + 0x0c] $r0
// extract cpp, src_ncomp and dst_ncomp from FORMAT
- ld b32 $r4 D[$r0 + ctx_format]
+ ld b32 $r4 D[$r0 + #ctx_format]
extr $r5 $r4 16:17
add b32 $r5 1
extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
clear b32 $r11
bpc_loop:
cmpu b8 $r10 4
- bra nc cmp_c0
+ bra nc #cmp_c0
mulu $r12 $r10 $r5
add b32 $r12 $r11
bset $flags $p2
- bra bpc_next
+ bra #bpc_next
cmp_c0:
- bra ne cmp_c1
+ bra ne #cmp_c1
mov $r12 0x10
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_c1:
cmpu b8 $r10 6
- bra nc cmp_zero
+ bra nc #cmp_zero
mov $r12 0x14
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_zero:
mov $r12 0x80
bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
add b32 $r8 1
add b32 $r11 1
cmpu b32 $r11 $r5
- bra c bpc_loop
+ bra c #bpc_loop
add b32 $r9 1
cmpu b32 $r9 $r7
- bra c ncomp_loop
+ bra c #ncomp_loop
// SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
mulu $r6 $r5
- st b32 D[$r0 + ctx_src_cpp] $r6
- ld b32 $r8 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + #ctx_xcnt]
mulu $r6 $r8
- bra $p2 dst_xcnt
+ bra $p2 #dst_xcnt
clear b32 $r6
dst_xcnt:
mulu $r7 $r5
- st b32 D[$r0 + ctx_dst_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
mulu $r7 $r8
mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
iowr I[$r5 + 0x000] $r6
iowr I[$r5 + 0x100] $r7
add b32 $r5 0x800
- ld b32 $r6 D[$r0 + ctx_dst_cpp]
+ ld b32 $r6 D[$r0 + #ctx_dst_cpp]
sub b32 $r6 1
shl b32 $r6 8
- ld b32 $r7 D[$r0 + ctx_src_cpp]
+ ld b32 $r7 D[$r0 + #ctx_src_cpp]
sub b32 $r7 1
or $r6 $r7
iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
ld b32 $r6 D[$sp + 0x0c]
iowr I[$r5 + 0x300] $r6
add b32 $r5 0x400
- ld b32 $r6 D[$r0 + ctx_swz_const0]
+ ld b32 $r6 D[$r0 + #ctx_swz_const0]
iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$r0 + ctx_swz_const1]
+ ld b32 $r6 D[$r0 + #ctx_swz_const1]
iowr I[$r5 + 0x100] $r6
add $sp 0x10
ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
//
cmd_exec_set_surface_tiled:
// translate TILE_MODE into Tp, Th, Td shift values
- ld b32 $r7 D[$r5 + ctx_src_tile_mode]
+ ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
extr $r9 $r7 8:11
extr $r8 $r7 4:7
ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
)
extr $r7 $r7 0:3
cmp b32 $r7 0xe
- bra ne xtile64
+ bra ne #xtile64
mov $r7 4
- bra xtileok
+ bra #xtileok
xtile64:
xbit $r7 $flags $p2
add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
// Op = (x * cpp) & ((1 << Tp) - 1)
// Tx = (x * cpp) >> Tp
- ld b32 $r10 D[$r5 + ctx_src_xoff]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r10 D[$r5 + #ctx_src_xoff]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r10 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
// Tyo = y & ((1 << Th) - 1)
// Ty = y >> Th
- ld b32 $r13 D[$r5 + ctx_src_yoff]
+ ld b32 $r13 D[$r5 + #ctx_src_yoff]
mov $r14 1
shl b32 $r14 $r8
sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
add b32 $r12 $r11
// nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
- ld b32 $r15 D[$r5 + ctx_src_xsize]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r15 D[$r5 + #ctx_src_xsize]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r15 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
push $r15
// nTy = (h + ((1 << Th) - 1)) >> Th
- ld b32 $r15 D[$r5 + ctx_src_ysize]
+ ld b32 $r15 D[$r5 + #ctx_src_ysize]
mov $r11 1
shl b32 $r11 $r8
sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
// Tz = z >> Td
// Op += Tzo << Tys
// Ts = Tys + Td
- ld b32 $r8 D[$r5 + ctx_src_zoff]
+ ld b32 $r8 D[$r5 + #ctx_src_zoff]
mov $r14 1
shl b32 $r14 $r9
sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
// SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
// CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
- ld b32 $r7 D[$r5 + ctx_src_address_low]
- ld b32 $r8 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r8 D[$r5 + #ctx_src_address_high]
add b32 $r10 $r12
add b32 $r7 $r10
adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
xbit $r6 $flags $p2
add b32 $r6 0x202
shl b32 $r6 8
- ld b32 $r7 D[$r5 + ctx_src_address_low]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_high]
shl b32 $r7 16
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_pitch]
+ ld b32 $r7 D[$r5 + #ctx_src_pitch]
iowr I[$r6 + 0x000] $r7
ret
@@ -697,7 +699,7 @@ cmd_exec_wait:
loop:
iord $r1 I[$r0]
and $r1 1
- bra ne loop
+ bra ne #loop
pop $r1
pop $r0
ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
cmd_exec_query:
// if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
xbit $r4 $r3 13
- bra ne query_counter
- call cmd_exec_wait
+ bra ne #query_counter
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
add b32 $r5 4
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0xc
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
// write COUNTER
query_counter:
- call cmd_exec_wait
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0x4
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
mov $r5 0x00001110
sethi $r5 0x13120000
iowr I[$r4 + 0x100] $r5
- ld b32 $r5 D[$r0 + ctx_query_counter]
+ ld b32 $r5 D[$r0 + #ctx_query_counter]
add b32 $r4 0x500
iowr I[$r4 + 0x000] $r5
mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
// $r2: hostirq state
// $r3: data
cmd_exec:
- call cmd_exec_wait
+ call #cmd_exec_wait
// if format requested, call function to calculate it, otherwise
// fill in cpp/xcnt for both surfaces as if (cpp == 1)
xbit $r15 $r3 0
- bra e cmd_exec_no_format
- call cmd_exec_set_format
+ bra e #cmd_exec_no_format
+ call #cmd_exec_set_format
mov $r4 0x200
- bra cmd_exec_init_src_surface
+ bra #cmd_exec_init_src_surface
cmd_exec_no_format:
mov $r6 0x810
shl b32 $r6 6
mov $r7 1
- st b32 D[$r0 + ctx_src_cpp] $r7
- st b32 D[$r0 + ctx_dst_cpp] $r7
- ld b32 $r7 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + #ctx_xcnt]
iowr I[$r6 + 0x000] $r7
iowr I[$r6 + 0x100] $r7
clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
bclr $flags $p2
clear b32 $r5
xbit $r15 $r3 4
- bra e src_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_init_dst_surface
+ bra e #src_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_init_dst_surface
src_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 7
cmd_exec_init_dst_surface:
bset $flags $p2
- mov $r5 ctx_dst_address_high - ctx_src_address_high
+ mov $r5 #ctx_dst_address_high - #ctx_src_address_high
xbit $r15 $r3 8
- bra e dst_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_kick
+ bra e #dst_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_kick
dst_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 8
cmd_exec_kick:
mov $r5 0x800
shl b32 $r5 6
- ld b32 $r6 D[$r0 + ctx_ycnt]
+ ld b32 $r6 D[$r0 + #ctx_ycnt]
iowr I[$r5 + 0x100] $r6
mov $r6 0x0041
// SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
// if requested, queue up a QUERY write after the copy has completed
xbit $r15 $r3 12
- bra e cmd_exec_done
- call cmd_exec_query
+ bra e #cmd_exec_done
+ call #cmd_exec_query
cmd_exec_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 2731de22ebe..1f33fbdc00b 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 618c144b7a3..9e636e6ef6d 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
return false;
}
-void
+int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nva3_pm_state *info = pre_state;
unsigned long flags;
+ int ret = -EAGAIN;
/* prevent any new grctx switches from starting */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
nv_wr32(dev, 0x100210, 0x80000000);
}
+ ret = 0;
+
cleanup:
/* unfreeze PFIFO */
nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index 419903880e9..a8d17458ced 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd2..8ee3963f903 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
@@ -873,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
case 0xcf: /* 4/0/0/0, 3 */
priv->magic_not_rop_nr = 0x03;
break;
+ case 0xd9: /* 1/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
}
if (!priv->magic_not_rop_nr) {
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
priv->tp_nr[3], priv->rop_nr);
- /* use 0xc3's values... */
- priv->magic_not_rop_nr = 0x03;
+ priv->magic_not_rop_nr = 0x00;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
index 2a4b6dc8f9d..e6b228844a3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -71,9 +71,9 @@ queue_put:
ld b32 $r9 D[$r13 + 0x4] // PUT
xor $r8 8
cmpu b32 $r8 $r9
- bra ne queue_put_next
+ bra ne #queue_put_next
mov $r15 E_CMD_OVERFLOW
- call error
+ call #error
ret
// store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
ld b32 $r8 D[$r13 + 0x0] // GET
ld b32 $r9 D[$r13 + 0x4] // PUT
cmpu b32 $r8 $r9
- bra e queue_get_done
+ bra e #queue_get_done
// fetch first cmd/data pair
and $r9 $r8 7
shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
nv_rd32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_rd32_wait
+ bra ne #nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
- call wait_doneo
+ call #wait_doneo
iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
ret
@@ -157,7 +157,7 @@ nv_wr32:
nv_wr32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_wr32_wait
+ bra ne #nv_wr32_wait
ret
// (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
shl b32 $r8 6
iord $r8 I[$r8 + 0x000] // DONE
xbit $r8 $r8 $r10
- bra $2 wait_done_$1
+ bra $2 #wait_done_$1
trace_clr(T_WAIT)
ret
')
@@ -216,7 +216,7 @@ mmctx_size:
add b32 $r9 $r8
add b32 $r14 4
cmpu b32 $r14 $r15
- bra ne nv_mmctx_size_loop
+ bra ne #nv_mmctx_size_loop
mov b32 $r15 $r9
ret
@@ -238,12 +238,12 @@ mmctx_xfer:
shl b32 $r8 6
clear b32 $r9
or $r11 $r11
- bra e mmctx_base_disabled
+ bra e #mmctx_base_disabled
iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
- bra e mmctx_multi_disabled
+ bra e #mmctx_multi_disabled
iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
bset $r9 1 // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
mmctx_wait_free:
iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
and $r14 0x1f
- bra e mmctx_wait_free
+ bra e #mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
iowr I[$r8 + 0x300] $r14
add b32 $r12 4
cmpu b32 $r12 $r13
- bra ne mmctx_exec_loop
+ bra ne #mmctx_exec_loop
xbit $r11 $r10 2
- bra ne mmctx_stop
+ bra ne #mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
and $r11 0x1f
cmpu b32 $r11 0x10
- bra ne mmctx_fini_wait
+ bra ne #mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
- call wait_donez
- bra mmctx_done
+ call #wait_donez
+ bra #mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
// wait for STOP_TRIGGER to clear
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
xbit $r11 $r11 18
- bra ne mmctx_stop_wait
+ bra ne #mmctx_stop_wait
mmctx_done:
trace_clr(T_MMCTX)
ret
@@ -305,7 +305,7 @@ mmctx_xfer:
strand_wait:
push $r10
mov $r10 2
- call wait_donez
+ call #wait_donez
pop $r10
ret
@@ -316,7 +316,7 @@ strand_pre:
sethi $r8 0x20000
mov $r9 0xc
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
sethi $r8 0x20000
mov $r9 0xd
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
mov $r12 0xb
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call strand_wait
+ call #strand_wait
iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
mov $r12 0xa
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call strand_wait
+ call #strand_wait
ret
// Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
//
strand_ctx_init:
trace_set(T_STRINIT)
- call strand_pre
+ call #strand_pre
mov $r14 3
- call strand_set
+ call #strand_set
mov $r10 0x46fc
sethi $r10 0x20000
add b32 $r11 $r10 0x400
iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
mov $r12 1
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call strand_wait
+ call #strand_wait
sub b32 $r12 $r0 1
iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
mov $r12 2
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call strand_wait
- call strand_post
+ call #strand_wait
+ call #strand_post
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
add b32 $r14 $r10
add b32 $r8 4
sub b32 $r9 1
- bra ne ctx_init_strand_loop
+ bra ne #ctx_init_strand_loop
shl b32 $r14 8
sub b32 $r15 $r14 $r15
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 636fe9812f7..91d44ea662d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
case 0xc1:
return 0x9197;
case 0xc8:
+ case 0xd9:
return 0x9297;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 96b0b93d94c..de77842b31c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
static void
nvc0_grctx_generate_90c0(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+ }
nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+ nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+ }
nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->chipset != 0xc1) {
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
- } else {
+ if (dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x405800, 0x0f8000bf);
nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x08000000);
+ } else
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
}
- nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xc1) {
+ if (dev_priv->chipset == 0xd9)
+ nv_wr32(dev, 0x4064bc, 0x00000000);
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x4064c0, 0x80140078);
nv_wr32(dev, 0x4064c4, 0x0086ffff);
}
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
+ if (chipset == 0xd9) {
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x1043e005);
+ nv_wr32(dev, 0x408908, 0x00c8102f);
+ } else
+ if (chipset == 0xc1) {
+ nv_wr32(dev, 0x408808, 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ } else {
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ }
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
}
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418408, 0x00000000);
nv_wr32(dev, 0x41840c, 0x00001008);
nv_wr32(dev, 0x418410, 0x0fff0fff);
- nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
nv_wr32(dev, 0x418450, 0x00000000);
nv_wr32(dev, 0x418454, 0x00000000);
nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418700, 0x00000002);
nv_wr32(dev, 0x418704, 0x00000080);
nv_wr32(dev, 0x418708, 0x00000000);
- nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
nv_wr32(dev, 0x418710, 0x00000000);
- nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
nv_wr32(dev, 0x418808, 0x00000000);
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x418830, 0x10000001);
+ else
+ nv_wr32(dev, 0x418830, 0x00000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x4188fc, 0x20100008);
+ else if (chipset == 0xc1)
+ nv_wr32(dev, 0x4188fc, 0x00100018);
+ else
+ nv_wr32(dev, 0x4188fc, 0x00100000);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
}
- nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
nv_wr32(dev, 0x418b08, 0x0a418820);
nv_wr32(dev, 0x418b0c, 0x062080e6);
nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419818, 0x00000000);
nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419864, 0x00000129);
+ else
+ nv_wr32(dev, 0x419864, 0x0000012a);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (chipset != 0xc0 && chipset != 0xc8)
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x00419ac4, 0x0017f440);
+ else if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419be0, 0x00400001);
+ else
+ nv_wr32(dev, 0x419be0, 0x00000001);
nv_wr32(dev, 0x419be4, 0x00000000);
- nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- if (chipset == 0xce || chipset == 0xcf)
+ if (dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x419c24, 0x00084210);
+ nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
nv_wr32(dev, 0x419cb0, 0x00020048);
- else
+ } else
+ if (chipset == 0xce || chipset == 0xcf) {
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ } else {
nv_wr32(dev, 0x419cb0, 0x00060048);
+ }
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419d20, 0x12180000);
+ else
+ nv_wr32(dev, 0x419d20, 0x02180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000215, 0x00000040);
nv_icmd(dev, 0x00000216, 0x00000040);
nv_icmd(dev, 0x00000217, 0x00000040);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0400; i <= 0x0417; i++)
+ nv_icmd(dev, i, 0x00000040);
+ }
nv_icmd(dev, 0x00000218, 0x0000c080);
nv_icmd(dev, 0x00000219, 0x0000c080);
nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000021d, 0x0000c080);
nv_icmd(dev, 0x0000021e, 0x0000c080);
nv_icmd(dev, 0x0000021f, 0x0000c080);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0440; i <= 0x0457; i++)
+ nv_icmd(dev, i, 0x0000c080);
+ }
nv_icmd(dev, 0x000000ad, 0x0000013e);
nv_icmd(dev, 0x000000e1, 0x00000010);
nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1)
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9)
nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000957, 0x00000003);
nv_icmd(dev, 0x0000095e, 0x20164010);
nv_icmd(dev, 0x0000095f, 0x00000020);
+ if (dev_priv->chipset == 0xd9)
+ nv_icmd(dev, 0x0000097d, 0x00000020);
nv_icmd(dev, 0x00000683, 0x00000006);
nv_icmd(dev, 0x00000685, 0x003fffff);
nv_icmd(dev, 0x00000687, 0x00000c48);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 06f5e26d1e0..15272be33b6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -32,7 +32,7 @@
* - watchdog timer around ctx operations
*/
-.section nvc0_grgpc_data
+.section #nvc0_grgpc_data
include(`nvc0_graph.fuc')
gpc_id: .b32 0
gpc_mmio_list_head: .b32 0
@@ -48,40 +48,45 @@ cmd_queue: queue_init
// chipset descriptions
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc1_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc1_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvcf_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
mmctx_data(0x000c6c, 1);
nvc1_gpc_mmio_tail:
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
// TPC mmio lists
nvc0_tpc_mmio_head:
mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
mmctx_data(0x000544, 1)
nvc1_tpc_mmio_tail:
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
-.section nvc0_grgpc_code
-bra init
+.section #nvc0_grgpc_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -160,10 +219,10 @@ error:
push $r14
mov $r14 -0x67ec // 0x9814
sethi $r14 0x400000
- call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
add b32 $r14 0x41c
mov $r15 1
- call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
pop $r14
ret
@@ -190,7 +249,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
- st b32 D[$r0 + tpc_count] $r2
- st b32 D[$r0 + tpc_mask] $r3
+ st b32 D[$r0 + #tpc_count] $r2
+ st b32 D[$r0 + #tpc_mask] $r3
add b32 $r1 0x400
iord $r2 I[$r1 + 0x000] // MYINDEX
- st b32 D[$r0 + gpc_id] $r2
+ st b32 D[$r0 + #gpc_id] $r2
// find context data for this chipset
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r1 chipsets - 12
+ mov $r1 #chipsets - 12
init_find_chipset:
add b32 $r1 12
ld b32 $r3 D[$r1 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -253,19 +312,19 @@ init:
clear b32 $r15
ld b16 $r14 D[$r1 + 4]
ld b16 $r15 D[$r1 + 6]
- st b16 D[$r0 + gpc_mmio_list_head] $r14
- st b16 D[$r0 + gpc_mmio_list_tail] $r15
- call mmctx_size
+ st b16 D[$r0 + #gpc_mmio_list_head] $r14
+ st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+ call #mmctx_size
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size, store the list pointers
ld b16 $r14 D[$r1 + 8]
ld b16 $r15 D[$r1 + 10]
- st b16 D[$r0 + tpc_mmio_list_head] $r14
- st b16 D[$r0 + tpc_mmio_list_tail] $r15
- call mmctx_size
- ld b32 $r14 D[$r0 + tpc_count]
+ st b16 D[$r0 + #tpc_mmio_list_head] $r14
+ st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+ call #mmctx_size
+ ld b32 $r14 D[$r0 + #tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r3 $r15
// save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
main:
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// 0x0000-0x0003 are all context transfers
cmpu b32 $r14 0x04
- bra nc main_not_ctx_xfer
+ bra nc #main_not_ctx_xfer
// fetch $flags and mask off $p1/$p2
mov $r1 $flags
mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
or $r1 $r14
mov $flags $r1
// transfer context data
- call ctx_xfer
- bra main
+ call #ctx_xfer
+ bra #main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
// interrupt handler
ih:
@@ -342,13 +401,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
//
hub_barrier_done:
mov $r15 1
- ld b32 $r14 D[$r0 + gpc_id]
+ ld b32 $r14 D[$r0 + #gpc_id]
shl b32 $r15 $r14
mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
sethi $r14 0x400000
- call nv_wr32
+ call #nv_wr32
ret
// Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0xa20
iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
ret
@@ -413,8 +472,8 @@ ctx_xfer:
mov $r1 0xa04
shl b32 $r1 6
iowr I[$r1 + 0x000] $r15// MEM_BASE
- bra not $p1 ctx_xfer_not_load
- call ctx_redswitch
+ bra not $p1 #ctx_xfer_not_load
+ call #ctx_redswitch
ctx_xfer_not_load:
// strands
@@ -422,7 +481,7 @@ ctx_xfer:
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
or $r10 2 // first
mov $r11 0x0000
sethi $r11 0x500000
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
- ld b32 $r12 D[$r0 + gpc_mmio_list_head]
- ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
mov $r11 0x4000
sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
- ld b32 $r12 D[$r0 + tpc_mmio_list_head]
- ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
- ld b32 $r15 D[$r0 + tpc_mask]
+ ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + #tpc_mask]
mov $r14 0x800 // stride = 0x800
- call mmctx_xfer
+ call #mmctx_xfer
// wait for strands to finish
- call strand_wait
+ call #strand_wait
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
// strand commands
- bra $p1 ctx_xfer_post
- bra not $p2 ctx_xfer_done
+ bra $p1 #ctx_xfer_post
+ bra not $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xd
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call strand_wait
+ call #strand_wait
// mark completion in HUB's barrier
ctx_xfer_done:
- call hub_barrier_done
+ call #hub_barrier_done
ret
.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 6f820324480..a988b8ad00a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000c1,
- 0x012000bc,
- 0x01840120,
+ 0x012c00c8,
+ 0x01f80194,
0x000000c3,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c4,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c8,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000ce,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000cf,
- 0x011c00bc,
- 0x017c0120,
+ 0x012800c8,
+ 0x01f00194,
+ 0x000000d9,
+ 0x0194012c,
+ 0x025401f8,
0x00000000,
0x00000380,
0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
0x08001000,
0x00001014,
0x00000c6c,
+ 0x00000380,
+ 0x04000400,
+ 0x0800040c,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c6c,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
0x00000018,
0x0000003c,
0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
0x000006e0,
0x000004bc,
0x00000544,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x000002c4,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x08000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x00000544,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x000006e0,
+ 0x08000750,
};
uint32_t nvc0_grgpc_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index e4f8c7e89dd..98acddb2c5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -27,7 +27,7 @@
* m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
*/
-.section nvc0_grhub_data
+.section #nvc0_grhub_data
include(`nvc0_graph.fuc')
gpc_count: .b32 0
rop_count: .b32 0
@@ -39,26 +39,29 @@ ctx_current: .b32 0
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc1_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
mmctx_data(0x4064c0, 2)
nvc1_hub_mmio_tail:
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
.align 256
chan_data:
chan_mmio_count: .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address: .b32 0
.align 256
xfer_data: .b32 0
-.section nvc0_grhub_code
-bra init
+.section #nvc0_grhub_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -157,7 +202,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
// fetch enabled GPC/ROP counts
mov $r14 -0x69fc // 0x409604
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
extr $r1 $r15 16:20
- st b32 D[$r0 + rop_count] $r1
+ st b32 D[$r0 + #rop_count] $r1
and $r15 0x1f
- st b32 D[$r0 + gpc_count] $r15
+ st b32 D[$r0 + #gpc_count] $r15
// set BAR_REQMASK to GPC mask
mov $r1 1
@@ -220,14 +265,14 @@ init:
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r15 chipsets - 8
+ mov $r15 #chipsets - 8
init_find_chipset:
add b32 $r15 8
ld b32 $r3 D[$r15 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -239,9 +284,9 @@ init:
ld b16 $r14 D[$r15 + 4]
ld b16 $r15 D[$r15 + 6]
sethi $r14 0
- st b32 D[$r0 + hub_mmio_list_head] $r14
- st b32 D[$r0 + hub_mmio_list_tail] $r15
- call mmctx_size
+ st b32 D[$r0 + #hub_mmio_list_head] $r14
+ st b32 D[$r0 + #hub_mmio_list_tail] $r15
+ call #mmctx_size
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
// when it has completed, and return the size of its context data
// in GPCn_CC_SCRATCH[1]
//
- ld b32 $r3 D[$r0 + gpc_count]
+ ld b32 $r3 D[$r0 + #gpc_count]
mov $r4 0x2000
sethi $r4 0x500000
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
- call nv_wr32 // CC_SCRATCH[1] = ctx offset
+ call #nv_wr32 // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x800
mov b32 $r15 $r2
- call nv_wr32 // CC_SCRATCH[0] = chipset
+ call #nv_wr32 // CC_SCRATCH[0] = chipset
add b32 $r14 $r4 0x10c
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
add b32 $r14 $r4 0x104
- call nv_wr32 // ENTRY
+ call #nv_wr32 // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
- call nv_wr32 // CTRL
+ call #nv_wr32 // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 31
- bra e init_gpc_wait
+ bra e #init_gpc_wait
add b32 $r14 $r4 0x804
- call nv_rd32
+ call #nv_rd32
add b32 $r1 $r15
// next!
add b32 $r4 0x8000
sub b32 $r3 1
- bra ne init_gpc
+ bra ne #init_gpc
// save context size, and tell host we're ready
mov $r2 0x800
@@ -322,13 +367,13 @@ main:
// sleep until we have something to do
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
- bra ne main_not_ctx_switch
+ bra ne #main_not_ctx_switch
trace_set(T_AUTO)
mov $r1 0xb00
shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
iord $r1 I[$r1 + 0x000] // CHAN_CUR
xbit $r3 $r1 31
- bra e chsw_no_prev
+ bra e #chsw_no_prev
xbit $r3 $r2 31
- bra e chsw_prev_no_next
+ bra e #chsw_prev_no_next
push $r2
mov b32 $r2 $r1
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_LOAD);
- bra chsw_done
+ bra #chsw_done
chsw_prev_no_next:
push $r2
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
pop $r2
mov $r1 0xb00
shl b32 $r1 6
iowr I[$r1] $r2
- bra chsw_done
+ bra #chsw_done
chsw_no_prev:
xbit $r3 $r2 31
- bra e chsw_done
+ bra e #chsw_done
bset $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
// ack the context switch request
chsw_done:
@@ -377,32 +422,32 @@ main:
mov $r2 1
iowr I[$r1 + 0x000] $r2 // 0x409b0c
trace_clr(T_AUTO)
- bra main
+ bra #main
// request to set current channel? (*not* a context switch)
main_not_ctx_switch:
cmpu b32 $r14 0x0001
- bra ne main_not_ctx_chan
+ bra ne #main_not_ctx_chan
mov b32 $r2 $r15
- call ctx_chan
- bra main_done
+ call #ctx_chan
+ bra #main_done
// request to store current channel context?
main_not_ctx_chan:
cmpu b32 $r14 0x0002
- bra ne main_not_ctx_save
+ bra ne #main_not_ctx_save
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE)
- bra main_done
+ bra #main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
main_done:
mov $r1 0x820
@@ -410,7 +455,7 @@ main:
clear b32 $r2
bset $r2 31
iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
- bra main
+ bra #main
// interrupt handler
ih:
@@ -427,13 +472,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
// context switch request?
ih_no_fifo:
and $r11 $r10 0x00000100
- bra e ih_no_ctxsw
+ bra e #ih_no_ctxsw
// enqueue a context switch for later processing
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
mov $r14 0x4001
- call queue_put
+ call #queue_put
// anything we didn't handle, bring it to the host's attention
ih_no_ctxsw:
mov $r11 0x104
not b32 $r11
and $r11 $r10 $r11
- bra e ih_no_other
+ bra e #ih_no_other
mov $r10 0xc1c
shl b32 $r10 6
iowr I[$r10] $r11 // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
mov $r14 0x4160
sethi $r14 0x400000
mov $r15 1
- call nv_wr32
+ call #nv_wr32
ctx_4160s_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 4
- bra e ctx_4160s_wait
+ bra e #ctx_4160s_wait
ret
// Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
mov $r14 0x4160
sethi $r14 0x400000
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
ret
// Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
mov $r14 0x4170
sethi $r14 0x400000
or $r15 0x10
- call nv_wr32
+ call #nv_wr32
ret
// Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
ctx_4170w:
mov $r14 0x4170
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
and $r15 0x10
- bra ne ctx_4170w
+ bra ne #ctx_4170w
ret
// Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0x770
iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
ret
@@ -546,10 +591,10 @@ ctx_86c:
iowr I[$r14] $r15 // HUB(0x86c) = val
mov $r14 -0x75ec
sethi $r14 0x400000
- call nv_wr32 // ROP(0xa14) = val
+ call #nv_wr32 // ROP(0xa14) = val
mov $r14 -0x5794
sethi $r14 0x410000
- call nv_wr32 // GPC(0x86c) = val
+ call #nv_wr32 // GPC(0x86c) = val
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa24
shl b32 $r1 6
iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
ctx_chan_wait_0:
iord $r4 I[$r1 + 0x100]
and $r4 0x1f
- bra ne ctx_chan_wait_0
+ bra ne #ctx_chan_wait_0
iowr I[$r3 + 0x000] $r2 // CHAN_CUR
// load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
sethi $r2 0x80000000
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
mov $r1 0x10 // chan + 0x0210
- mov $r2 xfer_data
+ mov $r2 #xfer_data
sethi $r2 0x00020000 // 16 bytes
xdld $r1 $r2
xdwait
trace_clr(T_LCHAN)
// update current context
- ld b32 $r1 D[$r0 + xfer_data + 4]
+ ld b32 $r1 D[$r0 + #xfer_data + 4]
shl b32 $r1 24
- ld b32 $r2 D[$r0 + xfer_data + 0]
+ ld b32 $r2 D[$r0 + #xfer_data + 0]
shr b32 $r2 8
or $r1 $r2
- st b32 D[$r0 + ctx_current] $r1
+ st b32 D[$r0 + #ctx_current] $r1
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
mov $r1 0xa20
shl b32 $r1 6
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
- mov $r1 chan_data
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
xdwait
@@ -635,10 +680,10 @@ ctx_load:
// In: $r2 channel address
//
ctx_chan:
- call ctx_4160s
- call ctx_load
+ call #ctx_4160s
+ call #ctx_load
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
ctx_chan_wait:
iord $r2 I[$r1 + 0x000]
or $r2 $r2
- bra ne ctx_chan_wait
- call ctx_4160c
+ bra ne #ctx_chan_wait
+ call #ctx_4160c
ret
// Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
//
ctx_mmio_exec:
// set transfer base to be the mmio list
- ld b32 $r3 D[$r0 + chan_mmio_address]
+ ld b32 $r3 D[$r0 + #chan_mmio_address]
mov $r2 0xa04
shl b32 $r2 6
iowr I[$r2 + 0x000] $r3 // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
ctx_mmio_loop:
// fetch next 256 bytes of mmio list if necessary
and $r4 $r3 0xff
- bra ne ctx_mmio_pull
- mov $r5 xfer_data
+ bra ne #ctx_mmio_pull
+ mov $r5 #xfer_data
sethi $r5 0x00060000 // 256 bytes
xdld $r3 $r5
xdwait
// execute a single list entry
ctx_mmio_pull:
- ld b32 $r14 D[$r4 + xfer_data + 0x00]
- ld b32 $r15 D[$r4 + xfer_data + 0x04]
- call nv_wr32
+ ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+ call #nv_wr32
// next!
add b32 $r3 8
sub b32 $r1 1
- bra ne ctx_mmio_loop
+ bra ne #ctx_mmio_loop
// set transfer base back to the current context
ctx_mmio_done:
- ld b32 $r3 D[$r0 + ctx_current]
+ ld b32 $r3 D[$r0 + #ctx_current]
iowr I[$r2 + 0x000] $r3 // MEM_BASE
// disable the mmio list now, we don't need/want to execute it again
- st b32 D[$r0 + chan_mmio_count] $r0
- mov $r1 chan_data
+ st b32 D[$r0 + #chan_mmio_count] $r0
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdst $r0 $r1
xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
- bra not $p1 ctx_xfer_pre
- bra $p2 ctx_xfer_pre_load
+ bra not $p1 #ctx_xfer_pre
+ bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
- call ctx_86c
- call ctx_4160s
- bra not $p1 ctx_xfer_exec
+ call #ctx_86c
+ call #ctx_4160s
+ bra not $p1 #ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
- call ctx_4170s
- call ctx_4170w
- call ctx_redswitch
+ call #ctx_4170s
+ call #ctx_4170w
+ call #ctx_redswitch
clear b32 $r15
- call ctx_4170s
- call ctx_load
+ call #ctx_4170s
+ call #ctx_load
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
- ld b32 $r1 D[$r0 + ctx_current]
+ ld b32 $r1 D[$r0 + #ctx_current]
mov $r2 0x414
shl b32 $r2 6
iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
mov $r14 -0x5b00
sethi $r14 0x410000
mov b32 $r15 $r1
- call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
add b32 $r14 4
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
- call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
xbit $r10 $flags $p1 // direction
or $r10 6 // first, last
mov $r11 0 // base = 0
- ld b32 $r12 D[$r0 + hub_mmio_list_head]
- ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
- call wait_doneo
+ call #wait_doneo
// wait for strand xfer to complete
- call strand_wait
+ call #strand_wait
// post-op
- bra $p1 ctx_xfer_post
+ bra $p1 #ctx_xfer_post
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
ctx_xfer_post_save_wait:
iord $r2 I[$r1]
or $r2 $r2
- bra ne ctx_xfer_post_save_wait
+ bra ne #ctx_xfer_post_save_wait
- bra $p2 ctx_xfer_done
+ bra $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r15 2
- call ctx_4170s
+ call #ctx_4170s
clear b32 $r15
- call ctx_86c
- call strand_post
- call ctx_4170w
+ call #ctx_86c
+ call #strand_post
+ call #ctx_4170w
clear b32 $r15
- call ctx_4170s
+ call #ctx_4170s
- bra not $p1 ctx_xfer_no_post_mmio
- ld b32 $r1 D[$r0 + chan_mmio_count]
+ bra not $p1 #ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + #chan_mmio_count]
or $r1 $r1
- bra e ctx_xfer_no_post_mmio
- call ctx_mmio_exec
+ bra e #ctx_xfer_no_post_mmio
+ call #ctx_mmio_exec
ctx_xfer_no_post_mmio:
- call ctx_4160c
+ call #ctx_4160c
ctx_xfer_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index 241d3263f1e..c5ed307abeb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x01340098,
+ 0x013c00a0,
0x000000c1,
- 0x01380098,
+ 0x014000a0,
0x000000c3,
- 0x01340098,
+ 0x013c00a0,
0x000000c4,
- 0x01340098,
+ 0x013c00a0,
0x000000c8,
- 0x01340098,
+ 0x013c00a0,
0x000000ce,
- 0x01340098,
+ 0x013c00a0,
0x000000cf,
- 0x01340098,
+ 0x013c00a0,
+ 0x000000d9,
+ 0x01dc0140,
0x00000000,
0x0417e91c,
0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
0x0c408900,
0x00408980,
0x044064c0,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x24404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x04404178,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x104064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 929aded35cb..e9992f62c1c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
perflvl->vdec = read_clk(dev, 0x0e);
return 0;
}
+
+struct nvc0_pm_clock {
+ u32 freq;
+ u32 ssel;
+ u32 mdiv;
+ u32 dsrc;
+ u32 ddiv;
+ u32 coef;
+};
+
+struct nvc0_pm_state {
+ struct nvc0_pm_clock eng[16];
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+ u32 div = min((ref * 2) / freq, (u32)65);
+ if (div < 2)
+ div = 2;
+
+ *ddiv = div - 2;
+ return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+ u32 sclk;
+
+ /* use one of the fixed frequencies if possible */
+ *ddiv = 0x00000000;
+ switch (freq) {
+ case 27000:
+ case 108000:
+ *dsrc = 0x00000000;
+ if (freq == 108000)
+ *dsrc |= 0x00030000;
+ return freq;
+ case 100000:
+ *dsrc = 0x00000002;
+ return freq;
+ default:
+ *dsrc = 0x00000003;
+ break;
+ }
+
+ /* otherwise, calculate the closest divider */
+ sclk = read_vco(dev, clk);
+ if (clk < 7)
+ sclk = calc_div(dev, clk, sclk, freq, ddiv);
+ return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+ struct pll_lims limits;
+ int N, M, P, ret;
+
+ ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+ if (ret)
+ return 0;
+
+ limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+ if (!limits.refclk)
+ return 0;
+
+ ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+ if (ret <= 0)
+ return 0;
+
+ *coef = (P << 16) | (N << 8) | M;
+ return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks. For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists. You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+ u32 src0, div0, div1D, div1P = 0;
+ u32 clk0, clk1 = 0;
+
+ /* invalid clock domain */
+ if (!freq)
+ return 0;
+
+ /* first possible path, using only dividers */
+ clk0 = calc_src(dev, clk, freq, &src0, &div0);
+ clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+ /* see if we can get any closer using PLLs */
+ if (clk0 != freq) {
+ if (clk < 7)
+ clk1 = calc_pll(dev, clk, freq, &info->coef);
+ else
+ clk1 = read_pll(dev, 0x1370e0);
+ clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+ }
+
+ /* select the method which gets closest to target freq */
+ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+ info->dsrc = src0;
+ if (div0) {
+ info->ddiv |= 0x80000000;
+ info->ddiv |= div0 << 8;
+ info->ddiv |= div0;
+ }
+ if (div1D) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1D;
+ }
+ info->ssel = 0;
+ info->freq = clk0;
+ } else {
+ if (div1P) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1P << 8;
+ }
+ info->ssel = (1 << clk);
+ info->freq = clk1;
+ }
+
+ return 0;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_pm_state *info;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ /* NFI why this is still in the performance table, the ROPCs appear
+ * to get their clock from clock 2 ("hub07", actually hub05 on this
+ * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
+ * are always the same freq with the binary driver even when the
+ * performance table says they should differ.
+ */
+ if (dev_priv->chipset == 0xd9)
+ perflvl->rop = 0;
+
+ if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+ (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+ (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+ (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+ (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+ (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+ (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+ (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+
+ return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+ /* program dividers at 137160/1371d0 first */
+ if (clk < 7 && !info->ssel) {
+ nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+ }
+
+ /* switch clock to non-pll mode */
+ nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
+ nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+
+ /* reprogram pll */
+ if (clk < 7) {
+ /* make sure it's disabled first... */
+ u32 base = 0x137000 + (clk * 0x20);
+ u32 ctrl = nv_rd32(dev, base + 0x00);
+ if (ctrl & 0x00000001) {
+ nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+ }
+ /* program it to new values, if necessary */
+ if (info->ssel) {
+ nv_wr32(dev, base + 0x04, info->coef);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
+ nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
+ nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+ }
+ }
+
+ /* select pll/non-pll mode, and program final clock divider */
+ nv_mask(dev, 0x137100, (1 << clk), info->ssel);
+ nv_wait(dev, 0x137100, (1 << clk), info->ssel);
+ nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+ struct nvc0_pm_state *info = data;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (!info->eng[i].freq)
+ continue;
+ prog_clk(dev, i, &info->eng[i]);
+ }
+
+ kfree(info);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d7..d2ba2f07400 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -35,12 +35,34 @@
#include "nouveau_fb.h"
#include "nv50_display.h"
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+struct evo {
+ int idx;
+ dma_addr_t handle;
+ u32 *ptr;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
struct nvd0_display {
struct nouveau_gpuobj *mem;
- struct {
- dma_addr_t handle;
- u32 *ptr;
- } evo[1];
+ struct nouveau_bo *sync;
+ struct evo evo[9];
struct tasklet_struct tasklet;
u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
return dev_priv->engine.display.priv;
}
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
static inline int
evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
{
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
put = 0;
}
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+ NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
+
return disp->evo[id].ptr + put;
}
@@ -91,40 +125,264 @@ static void
evo_kick(u32 *push, struct drm_device *dev, int id)
{
struct nvd0_display *disp = nvd0_display(dev);
+
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
+ u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
+ u32 *cur = disp->evo[id].ptr + curp;
+
+ while (cur < push)
+ NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
+ NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
+ }
+
nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
#define evo_data(p,d) *((p)++) = (d)
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
+static int
+evo_init_dma(struct drm_device *dev, int ch)
{
- return nouveau_encoder(encoder)->crtc;
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 flags;
+
+ flags = 0x00000000;
+ if (ch == EVO_MASTER)
+ flags |= 0x01000000;
+
+ nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+ nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
+ nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_dma(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static inline void
+evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
+{
+ nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+}
+
+static int
+evo_init_pio(struct drm_device *dev, int ch)
+{
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_pio(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev, int ch)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 *push = evo_wait(dev, ch, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, ch);
+ if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+ return nvd0_display(dev)->sync;
+}
+
+void
+nvd0_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u32 *push;
+
+ push = evo_wait(crtc->dev, evo->idx, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+ }
+}
+
+int
+nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan, u32 swap_interval)
+{
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u64 offset;
+ u32 *push;
+ int ret;
+
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(crtc->dev, evo->idx, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ ret = RING_SPACE(chan, 10);
+ if (ret)
+ return ret;
+
+ offset = chan->dispc_vma[nv_crtc->index].offset;
+ offset += evo->sem.offset;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | evo->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
+ 0xf00d0000 | evo->sem.value);
+ evo_sync(crtc->dev, EVO_MASTER);
+ }
+
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, evo->sem.offset);
+ evo_data(push, 0xf00d0000 | evo->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+
+ evo->sem.offset ^= 0x10;
+ evo->sem.value++;
+ return 0;
}
/******************************************************************************
* CRTC
*****************************************************************************/
static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push, mode;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
- mode = 0x00000000;
- if (on) {
- /* 0x11: 6bpc dynamic 2x2
- * 0x13: 8bpc dynamic 2x2
- * 0x19: 6bpc static 2x2
- * 0x1b: 8bpc static 2x2
- * 0x21: 6bpc temporal
- * 0x23: 8bpc temporal
- */
- mode = 0x00000011;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
+
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
}
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
return 0;
}
static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_display_mode *mode = &nv_crtc->base.mode;
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_crtc *crtc = &nv_crtc->base;
struct nouveau_connector *nv_connector;
- u32 *push, outX, outY;
-
- outX = mode->hdisplay;
- outY = mode->vdisplay;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- struct drm_display_mode *native = nv_connector->native_mode;
- u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
- u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
-
- switch (type) {
- case DRM_MODE_SCALE_ASPECT:
- if (xratio > yratio) {
- outX = (mode->hdisplay * yratio) >> 19;
- outY = (mode->vdisplay * yratio) >> 19;
- } else {
- outX = (mode->hdisplay * xratio) >> 19;
- outY = (mode->vdisplay * xratio) >> 19;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native->hdisplay;
- outY = native->vdisplay;
- break;
- default:
- break;
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
+
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
+ break;
+ default:
+ break;
}
- push = evo_wait(dev, 0, 16);
+ push = evo_wait(dev, EVO_MASTER, 8);
if (push) {
evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+ evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
+ evo_kick(push, dev, EVO_MASTER);
if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+ nvd0_display_flip_stop(crtc);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
- evo_kick(push, dev, 0);
}
return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
u32 *push;
- push = evo_wait(fb->dev, 0, 16);
+ push = evo_wait(fb->dev, EVO_MASTER, 16);
if (push) {
evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, fb->dev, 0);
+ evo_kick(push, fb->dev, EVO_MASTER);
}
nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, 0, 16);
+ u32 *push = evo_wait(dev, EVO_MASTER, 16);
if (push) {
if (show) {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
}
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 2);
+ nvd0_display_flip_stop(crtc);
+
+ push = evo_wait(crtc->dev, EVO_MASTER, 2);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
evo_data(push, 0x03000000);
evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 32);
+ push = evo_wait(crtc->dev, EVO_MASTER, 32);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
evo_data(push, NvEvoVRAM);
evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+ nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector;
- u32 htotal = mode->htotal;
- u32 vtotal = mode->vtotal;
- u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
- u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
- u32 hfrntp = mode->hsync_start - mode->hdisplay;
- u32 vfrntp = mode->vsync_start - mode->vdisplay;
- u32 hbackp = mode->htotal - mode->hsync_end;
- u32 vbackp = mode->vtotal - mode->vsync_end;
- u32 hss2be = hsyncw + hbackp;
- u32 vss2be = vsyncw + vbackp;
- u32 hss2de = htotal - hfrntp;
- u32 vss2de = vtotal - vfrntp;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 magic = 0x31ec6000;
u32 syncs, *push;
int ret;
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ magic |= 0x00000001;
+ }
+
syncs = 0x00000001;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
if (ret)
return ret;
- push = evo_wait(crtc->dev, 0, 64);
+ push = evo_wait(crtc->dev, EVO_MASTER, 64);
if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
evo_data(push, 0x00000000);
- evo_data(push, (vtotal << 16) | htotal);
- evo_data(push, (vsyncw << 16) | hsyncw);
- evo_data(push, (vss2be << 16) | hss2be);
- evo_data(push, (vss2de << 16) | hss2de);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000); /* ??? */
evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
evo_data(push, mode->clock * 1000);
evo_data(push, 0x00200000); /* ??? */
evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
evo_data(push, syncs);
- evo_kick(push, crtc->dev, 0);
+ evo_data(push, magic);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
- nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nvd0_crtc_set_dither(nv_crtc, false);
+ nvd0_crtc_set_scale(nv_crtc, false);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
return 0;
}
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
return 0;
}
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
enum mode_set_atomic state)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
return 0;
}
@@ -472,10 +790,10 @@ static int
nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- const u32 data = (y << 16) | x;
+ int ch = EVO_CURS(nv_crtc->index);
- nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
- nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+ evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+ evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
return 0;
}
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
.gamma_set = nvd0_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = nvd0_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
};
static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(encoder->dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
evo_data(push, 1 << nv_crtc->index);
evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, encoder->dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = NULL;
@@ -760,6 +1079,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
}
/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int i, or = nv_encoder->or * 0x30;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+ }
+}
+
+static void
+nvd0_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ int or = nv_encoder->or * 0x30;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
+ return;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* AVI InfoFrame */
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x61671c + head, 0x000d0282);
+ nv_wr32(dev, 0x616720 + head, 0x0000006f);
+ nv_wr32(dev, 0x616724 + head, 0x00000000);
+ nv_wr32(dev, 0x616728 + head, 0x00000000);
+ nv_wr32(dev, 0x61672c + head, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x6167ac + head, 0x00000010);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+ max_ac_packet << 16);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+
+ nvd0_audio_mode_set(encoder, mode);
+}
+
+static void
+nvd0_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+
+ nvd0_audio_disconnect(encoder);
+
+ nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+}
+
+/******************************************************************************
* SOR
*****************************************************************************/
static void
@@ -780,7 +1201,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
@@ -829,7 +1250,8 @@ static void
nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct drm_display_mode *mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
or_config = (mode_ctrl & 0x00000f00) >> 8;
if (mode->clock >= 165000)
or_config |= 0x0100;
+
+ nvd0_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_LVDS:
or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
if (bios->fp.if_is_24bit)
or_config |= 0x0200;
} else {
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
or_config |= 0x0100;
} else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
evo_data(push, mode_ctrl);
evo_data(push, or_config);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
+ nvd0_hdmi_disconnect(encoder);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
struct nvd0_display *disp = nvd0_display(dev);
u32 intr = nv_rd32(dev, 0x610088);
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x61008c);
+ nv_wr32(dev, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
if (intr & 0x00000002) {
u32 stat = nv_rd32(dev, 0x61009c);
int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
/******************************************************************************
* Init
*****************************************************************************/
-static void
+void
nvd0_display_fini(struct drm_device *dev)
{
int i;
- /* fini cursors */
- for (i = 14; i >= 13; i--) {
- if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
- continue;
-
- nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
- nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
- nv_mask(dev, 0x610090, 1 << i, 0x00000000);
- nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+ /* fini cursors + overlays + flips */
+ for (i = 1; i >= 0; i--) {
+ evo_fini_pio(dev, EVO_CURS(i));
+ evo_fini_pio(dev, EVO_OIMM(i));
+ evo_fini_dma(dev, EVO_OVLY(i));
+ evo_fini_dma(dev, EVO_FLIP(i));
}
/* fini master */
- if (nv_rd32(dev, 0x610490) & 0x00000010) {
- nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
- nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
- nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
- nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
- }
+ evo_fini_dma(dev, EVO_MASTER);
}
int
nvd0_display_init(struct drm_device *dev)
{
struct nvd0_display *disp = nvd0_display(dev);
+ int ret, i;
u32 *push;
- int i;
if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
}
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
/* init master */
- nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
- nv_wr32(dev, 0x610498, 0x00010000);
- nv_wr32(dev, 0x61049c, 0x00000001);
- nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
- nv_wr32(dev, 0x640000, 0x00000000);
- nv_wr32(dev, 0x610490, 0x01000013);
- if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "PDISP: master 0x%08x\n",
- nv_rd32(dev, 0x610490));
- return -EBUSY;
+ ret = evo_init_dma(dev, EVO_MASTER);
+ if (ret)
+ goto error;
+
+ /* init flips + overlays + cursors */
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
+ (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
+ (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
+ (ret = evo_init_pio(dev, EVO_CURS(i))))
+ goto error;
}
- nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
- /* init cursors */
- for (i = 13; i <= 14; i++) {
- nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
- if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
- NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
- nv_rd32(dev, 0x610490 + (i * 0x10)));
- return -EBUSY;
- }
-
- nv_mask(dev, 0x610090, 1 << i, 1 << i);
- nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+ push = evo_wait(dev, EVO_MASTER, 32);
+ if (!push) {
+ ret = -EBUSY;
+ goto error;
}
-
- push = evo_wait(dev, 0, 32);
- if (!push)
- return -EBUSY;
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
evo_data(push, 0x80000000);
evo_mthd(push, 0x008c, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
- return 0;
+error:
+ if (ret)
+ nvd0_display_fini(dev);
+ return ret;
}
void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvd0_display *disp = nvd0_display(dev);
struct pci_dev *pdev = dev->pdev;
+ int i;
- nvd0_display_fini(dev);
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
+ }
- pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
nouveau_gpuobj_ref(NULL, &disp->mem);
+ nouveau_bo_unmap(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
nouveau_irq_unregister(dev, 26);
dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nvd0_display_intr);
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
+
+ if (ret)
+ goto out;
+
/* hash table and dma objects for the memory areas we care about */
ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
if (ret)
goto out;
- nv_wo32(disp->mem, 0x1000, 0x00000049);
- nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
- nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
- nv_wo32(disp->mem, 0x100c, 0x00000000);
- nv_wo32(disp->mem, 0x1010, 0x00000000);
- nv_wo32(disp->mem, 0x1014, 0x00000000);
- nv_wo32(disp->mem, 0x0000, NvEvoSync);
- nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1020, 0x00000049);
- nv_wo32(disp->mem, 0x1024, 0x00000000);
- nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x102c, 0x00000000);
- nv_wo32(disp->mem, 0x1030, 0x00000000);
- nv_wo32(disp->mem, 0x1034, 0x00000000);
- nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
- nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1040, 0x00000009);
- nv_wo32(disp->mem, 0x1044, 0x00000000);
- nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x104c, 0x00000000);
- nv_wo32(disp->mem, 0x1050, 0x00000000);
- nv_wo32(disp->mem, 0x1054, 0x00000000);
- nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
- nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1060, 0x0fe00009);
- nv_wo32(disp->mem, 0x1064, 0x00000000);
- nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x106c, 0x00000000);
- nv_wo32(disp->mem, 0x1070, 0x00000000);
- nv_wo32(disp->mem, 0x1074, 0x00000000);
- nv_wo32(disp->mem, 0x0018, NvEvoFB32);
- nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
-
- pinstmem->flush(dev);
+ /* create evo dma channels */
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ u64 offset = disp->sync->bo.offset;
+ u32 dmao = 0x1000 + (i * 0x100);
+ u32 hash = 0x0000 + (i * 0x040);
+
+ evo->idx = i;
+ evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
+ evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
+ if (!evo->ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
- /* push buffers for evo channels */
- disp->evo[0].ptr =
- pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
- if (!disp->evo[0].ptr) {
- ret = -ENOMEM;
- goto out;
+ nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
+ nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
+ nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
+ nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
+ ((dmao + 0x00) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
+ nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
+ ((dmao + 0x20) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
+ nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
+ nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
+ ((dmao + 0x40) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
+ nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
+ nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
+ ((dmao + 0x60) << 9));
}
- ret = nvd0_display_init(dev);
- if (ret)
- goto out;
+ pinstmem->flush(dev);
out:
if (ret)
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4c8796ba6dd..6a5f4395838 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
};
+static const struct file_operations r128_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = r128_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r128_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
-
+ .fops = &r128_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf8b4bc3e73..2139fe893ec 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
+ radeon_semaphore.o radeon_sa.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
+CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 14cc88aaf3a..d1bd239cd9e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
+ else if (!drm_can_sleep())
+ mdelay(count);
else
msleep(count);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f..0fda830ef80 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -554,7 +554,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector)
+ if (connector && connector->display_info.bpc)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
@@ -1153,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
@@ -1322,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 39c04c1b847..f1f06ca9f1f 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -409,8 +409,6 @@ int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +432,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else if (radeon_connector->use_digital)
+ if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +443,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +457,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ else if (drm_detect_monitor_audio(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335d..636660fca8c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -40,6 +40,8 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -82,6 +84,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +102,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -1306,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
@@ -1355,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
- radeon_ring_write(rdev, evergreen_default_state[i]);
+ radeon_ring_write(ring, evergreen_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1437,13 +1448,13 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1451,8 +1462,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -1470,16 +1481,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
@@ -2348,7 +2359,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -2361,19 +2372,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2465,7 +2476,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0,
+ CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cayman_cp_int_cntl_setup(rdev, 1, 0);
+ cayman_cp_int_cntl_setup(rdev, 2, 0);
+ } else
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2510,6 +2527,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -2534,11 +2552,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
- if (rdev->irq.sw_int) {
- DRM_DEBUG("evergreen_irq_set: sw int\n");
- cp_int_cntl |= RB_INT_ENABLE;
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* enable CP interrupts on all rings */
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+ cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+ cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+ }
+ } else {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
}
+
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2598,7 +2633,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
- WREG32(CP_INT_CNTL, cp_int_cntl);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+ cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+ cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+ } else
+ WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3013,11 +3053,24 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ if (rdev->family >= CHIP_CAYMAN) {
+ switch (src_data) {
+ case 0:
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 2:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ }
+ } else
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3047,6 +3100,7 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -3101,6 +3155,12 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3110,7 +3170,9 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
@@ -3120,6 +3182,22 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -3139,31 +3217,30 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
return r;
}
int evergreen_suspend(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ ring->ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -3238,8 +3315,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3248,43 +3325,52 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
}
}
+
return 0;
}
void evergreen_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 914e5af8416..2379849515c 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -49,6 +49,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, pitch);
- radeon_ring_write(rdev, slice);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, cb_color_info);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, pitch);
+ radeon_ring_write(ring, slice);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, cb_color_info);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 1);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 2);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
SQ_VTCX_SEL_W(SQ_SEL_W);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0x580);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1); /* size */
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, sq_vtx_constant_word3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0x580);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1); /* size */
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, sq_vtx_constant_word3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word7);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word7);
}
/* emits 12 */
@@ -225,6 +230,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
x2 = 2;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
int dwords;
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
/* disable dyn gprs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
/* setup LDS */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0x10001000);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0x10001000);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_3);
}
/* CONTEXT_CONTROL */
- radeon_ring_write(rdev, 0xc0012800);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(ring, 0xc0012800);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* SET_SAMPLER */
- radeon_ring_write(rdev, 0xc0036e00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000012);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0036e00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000012);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 38e1bda73d3..f7442e62c03 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -481,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
}
}
break;
@@ -611,20 +649,15 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_INFO:
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
@@ -633,20 +666,15 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_INFO:
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1317,11 +1355,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
}
texture = reloc->robj;
/* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
@@ -1475,3 +1572,241 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+ /* context regs are fine */
+ if (reg >= 0x28000)
+ return true;
+
+ /* check config regs */
+ switch (reg) {
+ case GRBM_GFX_INDEX:
+ case VGT_VTX_VECT_EJECT_REG:
+ case VGT_CACHE_INVALIDATION:
+ case VGT_GS_VERTEX_REUSE:
+ case VGT_PRIMITIVE_TYPE:
+ case VGT_INDEX_TYPE:
+ case VGT_NUM_INDICES:
+ case VGT_NUM_INSTANCES:
+ case VGT_COMPUTE_DIM_X:
+ case VGT_COMPUTE_DIM_Y:
+ case VGT_COMPUTE_DIM_Z:
+ case VGT_COMPUTE_START_X:
+ case VGT_COMPUTE_START_Y:
+ case VGT_COMPUTE_START_Z:
+ case VGT_COMPUTE_INDEX:
+ case VGT_COMPUTE_THREAD_GROUP_SIZE:
+ case VGT_HS_OFFCHIP_PARAM:
+ case PA_CL_ENHANCE:
+ case PA_SU_LINE_STIPPLE_VALUE:
+ case PA_SC_LINE_STIPPLE_STATE:
+ case PA_SC_ENHANCE:
+ case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+ case SQ_DYN_GPR_SIMD_LOCK_EN:
+ case SQ_CONFIG:
+ case SQ_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+ case SQ_CONST_MEM_BASE:
+ case SQ_STATIC_THREAD_MGMT_1:
+ case SQ_STATIC_THREAD_MGMT_2:
+ case SQ_STATIC_THREAD_MGMT_3:
+ case SPI_CONFIG_CNTL:
+ case SPI_CONFIG_CNTL_1:
+ case TA_CNTL_AUX:
+ case DB_DEBUG:
+ case DB_DEBUG2:
+ case DB_DEBUG3:
+ case DB_DEBUG4:
+ case DB_WATERMARKS:
+ case TD_PS_BORDER_COLOR_INDEX:
+ case TD_PS_BORDER_COLOR_RED:
+ case TD_PS_BORDER_COLOR_GREEN:
+ case TD_PS_BORDER_COLOR_BLUE:
+ case TD_PS_BORDER_COLOR_ALPHA:
+ case TD_VS_BORDER_COLOR_INDEX:
+ case TD_VS_BORDER_COLOR_RED:
+ case TD_VS_BORDER_COLOR_GREEN:
+ case TD_VS_BORDER_COLOR_BLUE:
+ case TD_VS_BORDER_COLOR_ALPHA:
+ case TD_GS_BORDER_COLOR_INDEX:
+ case TD_GS_BORDER_COLOR_RED:
+ case TD_GS_BORDER_COLOR_GREEN:
+ case TD_GS_BORDER_COLOR_BLUE:
+ case TD_GS_BORDER_COLOR_ALPHA:
+ case TD_HS_BORDER_COLOR_INDEX:
+ case TD_HS_BORDER_COLOR_RED:
+ case TD_HS_BORDER_COLOR_GREEN:
+ case TD_HS_BORDER_COLOR_BLUE:
+ case TD_HS_BORDER_COLOR_ALPHA:
+ case TD_LS_BORDER_COLOR_INDEX:
+ case TD_LS_BORDER_COLOR_RED:
+ case TD_LS_BORDER_COLOR_GREEN:
+ case TD_LS_BORDER_COLOR_BLUE:
+ case TD_LS_BORDER_COLOR_ALPHA:
+ case TD_CS_BORDER_COLOR_INDEX:
+ case TD_CS_BORDER_COLOR_RED:
+ case TD_CS_BORDER_COLOR_GREEN:
+ case TD_CS_BORDER_COLOR_BLUE:
+ case TD_CS_BORDER_COLOR_ALPHA:
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+ case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, end_reg, reg, i;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_INDEX_BUFFER_SIZE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_MODE_CONTROL:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ case PACKET3_INDEX_BASE:
+ case PACKET3_DRAW_INDEX_2:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_DRAW_INDEX_OFFSET:
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_DRAW_INDEX:
+ case PACKET3_DRAW_INDEX_AUTO:
+ case PACKET3_DRAW_INDEX_IMMD:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+ case PACKET3_MPEG_INDEX:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_BOOL_CONST:
+ case PACKET3_SET_LOOP_CONST:
+ case PACKET3_SET_RESOURCE:
+ case PACKET3_SET_SAMPLER:
+ case PACKET3_SET_CTL_CONST:
+ case PACKET3_SET_RESOURCE_OFFSET:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_RESOURCE_INDIRECT:
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int ret = 0;
+ u32 idx = 0;
+ struct radeon_cs_packet pkt;
+
+ do {
+ pkt.idx = idx;
+ pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.one_reg_wr = 0;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ dev_err(rdev->dev, "Packet0 not allowed!\n");
+ ret = -EINVAL;
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ idx += pkt.count + 2;
+ } while (idx < ib->length_dw);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c345..4215de95477 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,6 +35,14 @@
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE 0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
+
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
@@ -42,6 +50,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +80,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
@@ -191,4 +228,9 @@
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE 0x7030
+
+#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d..b502216d42a 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -242,6 +242,7 @@
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
@@ -319,6 +320,8 @@
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
@@ -337,6 +340,10 @@
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
+#define SQ_STATIC_THREAD_MGMT_1 0x8E20
+#define SQ_STATIC_THREAD_MGMT_2 0x8E24
+#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
#define SQ_MS_FIFO_SIZES 0x8CF0
@@ -691,6 +698,7 @@
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
@@ -768,6 +776,8 @@
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
+#define VGT_VTX_VECT_EJECT_REG 0x88b0
+
#define SQ_CONST_MEM_BASE 0x8df8
#define SQ_ESGS_RING_BASE 0x8c40
@@ -892,13 +902,36 @@
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
-#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_INDEX_TYPE 0x895C
+
+#define VGT_NUM_INDICES 0x8970
+
+#define VGT_COMPUTE_DIM_X 0x8990
+#define VGT_COMPUTE_DIM_Y 0x8994
+#define VGT_COMPUTE_DIM_Z 0x8998
+#define VGT_COMPUTE_START_X 0x899C
+#define VGT_COMPUTE_START_Y 0x89A0
+#define VGT_COMPUTE_START_Z 0x89A4
+#define VGT_COMPUTE_INDEX 0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
+#define VGT_HS_OFFCHIP_PARAM 0x89B0
+
+#define DB_DEBUG 0x9830
+#define DB_DEBUG2 0x9834
+#define DB_DEBUG3 0x9838
+#define DB_DEBUG4 0x983C
+#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +984,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1193,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
@@ -1158,8 +1218,40 @@
#define SQ_VTX_CONSTANT_WORD6_0 0x30018
#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
+#define TD_PS_BORDER_COLOR_INDEX 0xA400
+#define TD_PS_BORDER_COLOR_RED 0xA404
+#define TD_PS_BORDER_COLOR_GREEN 0xA408
+#define TD_PS_BORDER_COLOR_BLUE 0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA 0xA410
+#define TD_VS_BORDER_COLOR_INDEX 0xA414
+#define TD_VS_BORDER_COLOR_RED 0xA418
+#define TD_VS_BORDER_COLOR_GREEN 0xA41C
+#define TD_VS_BORDER_COLOR_BLUE 0xA420
+#define TD_VS_BORDER_COLOR_ALPHA 0xA424
+#define TD_GS_BORDER_COLOR_INDEX 0xA428
+#define TD_GS_BORDER_COLOR_RED 0xA42C
+#define TD_GS_BORDER_COLOR_GREEN 0xA430
+#define TD_GS_BORDER_COLOR_BLUE 0xA434
+#define TD_GS_BORDER_COLOR_ALPHA 0xA438
+#define TD_HS_BORDER_COLOR_INDEX 0xA43C
+#define TD_HS_BORDER_COLOR_RED 0xA440
+#define TD_HS_BORDER_COLOR_GREEN 0xA444
+#define TD_HS_BORDER_COLOR_BLUE 0xA448
+#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
+#define TD_LS_BORDER_COLOR_INDEX 0xA450
+#define TD_LS_BORDER_COLOR_RED 0xA454
+#define TD_LS_BORDER_COLOR_GREEN 0xA458
+#define TD_LS_BORDER_COLOR_BLUE 0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA 0xA460
+#define TD_CS_BORDER_COLOR_INDEX 0xA464
+#define TD_CS_BORDER_COLOR_RED 0xA468
+#define TD_CS_BORDER_COLOR_GREEN 0xA46C
+#define TD_CS_BORDER_COLOR_BLUE 0xA470
+#define TD_CS_BORDER_COLOR_ALPHA 0xA474
+
/* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 0e579985746..32113729540 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
- int r;
+ int i, r;
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
- /* disable context1-7 */
+
+ WREG32(0x15D4, 0);
+ WREG32(0x15D8, 0);
+ WREG32(0x15DC, 0);
+
+ /* empty context1-7 */
+ for (i = 1; i < 8; i++) {
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-7 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl)
+{
+ u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+ WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
/*
* CP.
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cayman_cp_enable(rdev, true);
- r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < cayman_default_size; i++)
- radeon_ring_write(rdev, cayman_default_state[i]);
+ radeon_ring_write(ring, cayman_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
/* XXX init other rings */
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp.wptr = 0;
- WREG32(CP_RB0_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ ring->rptr = RREG32(CP_RB0_RPTR);
/* ring1 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp1.wptr = 0;
- WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
- rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ ring->rptr = RREG32(CP_RB1_RPTR);
/* ring2 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp2.wptr = 0;
- WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
- rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ ring->rptr = RREG32(CP_RB2_RPTR);
/* start the rings */
cayman_cp_start(rdev);
- rdev->cp.ready = true;
- rdev->cp1.ready = true;
- rdev->cp2.ready = true;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
/* this only test cp0 */
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
- rdev->cp.ready = false;
- rdev->cp1.ready = false;
- rdev->cp2.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
/* XXX deal with CP0,1,2 */
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
+ r = radeon_vm_manager_start(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
DRM_ERROR("cayman startup failed on resume\n");
return r;
}
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
- return r;
- }
-
return r;
-
}
int cayman_suspend(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ radeon_vm_manager_suspend(rdev);
+ r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
-
return 0;
}
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev)
*/
int cayman_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* This don't do much */
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+ }
+
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- }
- }
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+ /* number of VMs */
+ rdev->vm_manager.nvm = 8;
+ /* base offset of vram pages */
+ rdev->vm_manager.vram_base_offset = 0;
+ return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << id);
+ return 0;
+}
+
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ if (vm->id == -1)
+ return;
+
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags)
+{
+ uint32_t r600_flags = 0;
+
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ r600_flags |= R600_PTE_SYSTEM;
+ r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return r600_flags;
+}
+
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags)
+{
+ void __iomem *ptr = (void *)vm->pt;
+
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= flags;
+ writeq(addr, ptr + (pfn * 8));
+}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 4672869cdb2..f9df2a645e7 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,6 +42,9 @@
#define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_GFX_CNTL 0x0E44
+#define RINGID(x) (((x) & 0x3) << 0)
+#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -219,6 +222,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_COHER_CNTL2 0x85E8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -394,6 +398,12 @@
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
+
+#define CP_INT_CNTL 0xC124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
@@ -411,6 +421,10 @@
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
+#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+
/*
* PM4
*/
@@ -445,6 +459,7 @@
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
@@ -494,7 +509,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea4990..3ec81c3d510 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -662,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
@@ -734,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
@@ -806,25 +811,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ /* Unused on older asics, since we don't have semaphores or multiple rings */
+ BUG();
}
int r100_copy_blit(struct radeon_device *rdev,
@@ -833,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -850,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
- r = radeon_ring_lock(rdev, ndw);
+ r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
@@ -864,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
/* pages are in Y direction - height
page width in X direction - width */
- radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(ring,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
@@ -877,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
- radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
- radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
- }
- radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
@@ -917,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
void r100_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
@@ -1030,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
@@ -1055,7 +1074,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
- r = radeon_ring_init(rdev, ring_size);
+ r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+ 0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
@@ -1064,7 +1085,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
- rdev->cp.align_mask = 16 - 1;
+ ring->align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
@@ -1094,13 +1115,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
- DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
- WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1116,7 +1137,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1125,12 +1146,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, ring);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
- rdev->cp.ready = true;
+ ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1142,7 +1163,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1150,7 +1171,7 @@ void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1160,13 +1181,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-void r100_cp_commit(struct radeon_device *rdev)
-{
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(RADEON_CP_RB_WPTR);
-}
-
-
/*
* CS functions
*/
@@ -2094,9 +2108,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
}
@@ -2121,20 +2135,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
* false positive when CP is just gived nothing to do.
*
**/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
- if (cp->rptr != lockup->last_cp_rptr) {
+ if (ring->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
@@ -2147,31 +2161,32 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
return false;
}
-bool r100_gpu_is_lockup(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
+ u16 tmp16;
/* disable bus mastering */
tmp = RREG32(R_000030_BUS_CNTL);
@@ -2182,8 +2197,8 @@ void r100_bm_disable(struct radeon_device *rdev)
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+ pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
mdelay(1);
}
@@ -2574,21 +2589,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
- radeon_ring_free_size(rdev);
+ radeon_ring_free_size(rdev, ring);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
}
return 0;
}
@@ -3630,7 +3646,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
}
}
-int r100_ring_test(struct radeon_device *rdev)
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
@@ -3643,15 +3659,15 @@ int r100_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET0(scratch, 0));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(scratch, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3672,9 +3688,11 @@ int r100_ring_test(struct radeon_device *rdev)
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
- radeon_ring_write(rdev, ib->gpu_addr);
- radeon_ring_write(rdev, ib->length_dw);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, ib->length_dw);
}
int r100_ib_test(struct radeon_device *rdev)
@@ -3691,7 +3709,7 @@ int r100_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
if (r) {
return r;
}
@@ -3735,34 +3753,16 @@ int r100_ib_test(struct radeon_device *rdev)
void r100_ib_fini(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
radeon_ib_pool_fini(rdev);
}
-int r100_ib_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- r = r100_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- return 0;
-}
-
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
@@ -3900,6 +3900,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3909,11 +3915,18 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -3936,11 +3949,14 @@ int r100_resume(struct radeon_device *rdev)
r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r100_startup(rdev);
}
int r100_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -4059,7 +4075,14 @@ int r100_init(struct radeon_device *rdev)
return r;
}
r100_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a1f3ba063c2..eba4cbfa78f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (1 << 16));
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ radeon_ring_write(ring, PACKET0(0x720, 2));
+ radeon_ring_write(ring, src_offset);
+ radeon_ring_write(ring, dst_offset);
+ radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c93bc64707e..3fc0d29a5f3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+ radeon_ring_write(ring, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
void r300_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config;
int r;
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
break;
}
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
- radeon_ring_write(rdev, gb_tile_config);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(ring, gb_tile_config);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
- radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(ring,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
}
int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (p->keep_tiling_flags) {
+ if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -1434,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r300_startup(rdev);
}
int r300_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1539,7 +1558,14 @@ int r300_init(struct radeon_device *rdev)
return r;
}
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 417fab81812..666e28fe509 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
- radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -297,11 +314,14 @@ int r420_resume(struct radeon_device *rdev)
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r420_startup(rdev);
}
int r420_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -414,7 +434,14 @@ int r420_init(struct radeon_device *rdev)
return r;
}
r420_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index fc437059918..3bd8f1b1c60 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -573,6 +573,7 @@
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
@@ -633,6 +634,7 @@
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3081d07f8de..4ae1615e752 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -223,6 +235,8 @@ int r520_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return r520_startup(rdev);
}
@@ -292,7 +306,14 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9cdda0b3b08..4f08e5e6ee9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
int r600_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
if (rdev->family >= CHIP_RV770) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
} else {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x3);
+ radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
}
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
int r600_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
r600_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
}
-void r600_cp_commit(struct radeon_device *rdev)
-{
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(CP_RB_WPTR);
-}
-
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
u32 rb_bufsz;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
- rdev->cp.ring_size = ring_size;
- rdev->cp.align_mask = 16 - 1;
+ ring->ring_size = ring_size;
+ ring->align_mask = 16 - 1;
}
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
}
}
-int r600_ring_test(struct radeon_device *rdev)
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
+ unsigned i, ridx = radeon_ring_index(rdev, ring);
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 3);
+ r = radeon_ring_lock(rdev, ring, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test succeeded in %d usecs\n", i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
} else {
- DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ridx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,63 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
if (rdev->wb.use_event) {
- u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
- (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(rdev, addr & 0xffffffff);
- radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
} else {
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(rdev, RB_INT_STAT);
+ radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(ring, RB_INT_STAT);
}
}
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+ unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2409,6 +2419,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
int r600_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -2447,6 +2458,12 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2456,7 +2473,10 @@ int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+
if (r)
return r;
r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -2494,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio resume failed\n");
@@ -2518,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -2595,8 +2622,8 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2632,24 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r)
@@ -2643,12 +2664,13 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2662,18 +2684,20 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
-int r600_ib_test(struct radeon_device *rdev)
+int r600_ib_test(struct radeon_device *rdev, int ring)
{
struct radeon_ib *ib;
uint32_t scratch;
@@ -2687,7 +2711,7 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, ring, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2728,7 +2752,7 @@ int r600_ib_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test succeeded in %u usecs\n", i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -3075,7 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3483,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3520,6 @@ restart_ih:
*/
#if defined(CONFIG_DEBUG_FS)
-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- unsigned count, i, j;
-
- radeon_ring_free_size(rdev);
- count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
- seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
- seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
- seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
- seq_printf(m, "%u dwords in ring\n", count);
- i = rdev->cp.rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
- i = (i + 1) & rdev->cp.ptr_mask;
- }
- return 0;
-}
-
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3533,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
- {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 846fae57639..ba66f3093d4 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -36,7 +36,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
+ u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ if (ASIC_IS_DCE4(rdev)) {
+ if (enable) {
+ value |= 0x81000000; /* Required to enable audio */
+ value |= 0x0e1000f0; /* fglrx sets that too */
+ }
+ WREG32(EVERGREEN_AUDIO_ENABLE, value);
+ } else {
+ WREG32_P(R600_AUDIO_ENABLE,
+ enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
rdev->audio_enabled = enable;
}
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
return;
}
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
+ if (ASIC_IS_DCE4(rdev)) {
+ /* TODO: other PLLs? */
+ WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+ /* Some magic trigger or src sel? */
+ WREG32_P(0x5ac, 0x01, ~0x77);
+ } else {
+ switch (dig->dig_encoder) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ default:
+ dev_err(rdev->dev,
+ "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e09d2818f94..d996f438113 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -50,6 +50,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(rdev, 2 << 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+ radeon_ring_write(ring, 2 << 0);
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (pitch << 0) | (slice << 10));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 21dw + 1 surface sync = 26dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 2);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0x460);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1);
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, 1 << 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0x460);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1);
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, 1 << 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
/* emits 12 */
@@ -241,43 +246,45 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,24 +448,24 @@ set_default_state(struct radeon_device *rdev)
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
}
static uint32_t i2f(uint32_t input)
@@ -611,16 +619,17 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
{
int r;
- r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
+ &rdev->r600_blit.vb_ib, size);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
- rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_total = size;
rdev->r600_blit.vb_used = 0;
return 0;
}
@@ -679,15 +688,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size;
int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
- r = r600_vb_ib_get(rdev);
- if (r)
- return r;
-
/* num loops */
while (num_gpu_pages) {
num_gpu_pages -=
@@ -696,10 +702,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
num_loops++;
}
+ /* 48 bytes for vertex per loop */
+ r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+ if (r)
+ return r;
+
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring_size);
+ r = radeon_ring_lock(rdev, ring, ring_size);
if (r)
return r;
@@ -718,7 +729,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
if (fence)
r = radeon_fence_emit(rdev, fence);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c9db4931913..84c54625095 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
dev_priv->ring.size_l2qw);
#endif
- RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
+ RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb1acffd243..38ce5d0427e 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1625,7 +1625,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f5ac7e788d8..0b592067145 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
frame[0xD] = (right_bar >> 8);
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u16 eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ };
+
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
/* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
+ dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
+ return;
+ }
+ radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
+ eg_offsets[dig->dig_encoder];
+ radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
+ + EVERGREEN_HDMI_CONFIG_OFFSET;
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* TODO */
+ } else if (rdev->family >= CHIP_R600) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740) {
-
+ && rdev->family != CHIP_RS740
+ && !ASIC_IS_DCE4(rdev)) {
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* disable polling */
r600_audio_disable_polling(encoder);
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, 0,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, 0,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bfe1b5d92af..3ee1fd7ef39 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -831,6 +831,8 @@
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8227e76b5c7..73e05cb85ec 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -107,6 +107,21 @@ extern int radeon_msi;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
+/* max number of rings */
+#define RADEON_NUM_RINGS 3
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX 0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+
/*
* Errata workarounds.
*/
@@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
- rwlock_t lock;
struct list_head created;
- struct list_head emited;
+ struct list_head emitted;
struct list_head signaled;
bool initialized;
};
@@ -210,21 +226,26 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- bool emited;
+ bool emitted;
bool signaled;
+ /* RB, DMA, etc. */
+ int ring;
+ struct radeon_semaphore *semaphore;
};
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev);
-int radeon_fence_wait_last(struct radeon_device *rdev);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -246,6 +267,21 @@ struct radeon_mman {
bool initialized;
};
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+ /* bo list is protected by bo being reserved */
+ struct list_head bo_list;
+ /* vm list is protected by vm mutex */
+ struct list_head vm_list;
+ /* constant after initialization */
+ struct radeon_vm *vm;
+ struct radeon_bo *bo;
+ uint64_t soffset;
+ uint64_t eoffset;
+ uint32_t flags;
+ bool valid;
+};
+
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
@@ -259,6 +295,10 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+ struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
@@ -274,6 +314,48 @@ struct radeon_bo_list {
u32 tiling_flags;
};
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+ struct radeon_bo *bo;
+ struct list_head sa_bo;
+ unsigned size;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ uint32_t domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+ struct list_head list;
+ struct radeon_sa_manager *manager;
+ unsigned offset;
+ unsigned size;
+};
+
/*
* GEM objects.
*/
@@ -303,6 +385,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
uint32_t handle);
/*
+ * Semaphores.
+ */
+struct radeon_ring;
+
+#define RADEON_SEMAPHORE_BO_SIZE 256
+
+struct radeon_semaphore_driver {
+ rwlock_t lock;
+ struct list_head bo;
+};
+
+struct radeon_semaphore_bo;
+
+/* everything here is constant */
+struct radeon_semaphore {
+ struct list_head list;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ struct radeon_semaphore_bo *bo;
+};
+
+struct radeon_semaphore_bo {
+ struct list_head list;
+ struct radeon_ib *ib;
+ struct list_head free;
+ struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
+ unsigned nused;
+};
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev);
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore);
+
+/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -310,6 +432,7 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
struct radeon_gart {
dma_addr_t table_addr;
@@ -320,7 +443,6 @@ struct radeon_gart {
unsigned table_size;
struct page **pages;
dma_addr_t *pages_addr;
- bool *ttm_alloced;
bool ready;
};
@@ -434,7 +556,7 @@ union radeon_irq_stat_regs {
struct radeon_irq {
bool installed;
- bool sw_int;
+ bool sw_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
bool pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
@@ -444,7 +566,7 @@ struct radeon_irq {
wait_queue_head_t idle_queue;
bool hdmi[RADEON_MAX_HDMI_BLOCKS];
spinlock_t sw_lock;
- int sw_refcount;
+ int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[RADEON_MAX_CRTCS];
int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +574,23 @@ struct radeon_irq {
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
- * CP & ring.
+ * CP & rings.
*/
+
struct radeon_ib {
- struct list_head list;
+ struct radeon_sa_bo sa_bo;
unsigned idx;
+ uint32_t length_dw;
uint64_t gpu_addr;
- struct radeon_fence *fence;
uint32_t *ptr;
- uint32_t length_dw;
- bool free;
+ struct radeon_fence *fence;
+ unsigned vm_id;
};
/*
@@ -475,20 +598,22 @@ struct radeon_ib {
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
- struct mutex mutex;
- struct radeon_bo *robj;
- struct list_head bogus_ib;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct mutex mutex;
+ struct radeon_sa_manager sa_manager;
+ struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
+ bool ready;
+ unsigned head_id;
};
-struct radeon_cp {
+struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
+ unsigned rptr_reg;
unsigned wptr;
unsigned wptr_old;
+ unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -497,6 +622,61 @@ struct radeon_cp {
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
+ u32 ptr_reg_shift;
+ u32 ptr_reg_mask;
+ u32 nop;
+};
+
+/*
+ * VM
+ */
+struct radeon_vm {
+ struct list_head list;
+ struct list_head va;
+ int id;
+ unsigned last_pfn;
+ u64 pt_gpu_addr;
+ u64 *pt;
+ struct radeon_sa_bo sa_bo;
+ struct mutex mutex;
+ /* last fence for cs using this vm */
+ struct radeon_fence *fence;
+};
+
+struct radeon_vm_funcs {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ /* cs mutex must be lock for schedule_ib */
+ int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+ void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+ void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+ uint32_t (*page_flags)(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+ void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+ struct list_head lru_vm;
+ uint32_t use_bitmap;
+ struct radeon_sa_manager sa_manager;
+ uint32_t max_pfn;
+ /* fields constant after init */
+ const struct radeon_vm_funcs *funcs;
+ /* number of VMIDs */
+ unsigned nvm;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+ struct radeon_vm vm;
};
/*
@@ -506,6 +686,7 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -549,23 +730,29 @@ struct r600_blit {
void r600_blit_suspend(struct radeon_device *rdev);
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_pool_start(struct radeon_device *rdev);
+int radeon_ib_pool_suspend(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
-void radeon_ring_free_size(struct radeon_device *rdev);
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_undo(struct radeon_device *rdev);
-int radeon_ring_test(struct radeon_device *rdev);
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
-void radeon_ring_fini(struct radeon_device *rdev);
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
/*
@@ -582,12 +769,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
- void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -605,14 +792,18 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ bool sync_to_ring[RADEON_NUM_RINGS];
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
+ int chunk_flags_idx;
struct radeon_ib *ib;
void *track;
unsigned family;
int parser_error;
- bool keep_tiling_flags;
+ u32 cs_flags;
+ u32 ring;
+ s32 priority;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *cpA,
+ struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
/*
* Debugfs
*/
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
@@ -889,21 +1089,27 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
- void (*cp_commit)(struct radeon_device *rdev);
void (*ring_start)(struct radeon_device *rdev);
- int (*ring_test)(struct radeon_device *rdev);
- void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ } ring[RADEON_NUM_RINGS];
+
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
- void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
@@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1231,11 +1439,10 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- struct radeon_fence_driver fence_drv;
- struct radeon_cp cp;
- /* cayman compute rings */
- struct radeon_cp cp1;
- struct radeon_cp cp2;
+ rwlock_t fence_lock;
+ struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
+ struct radeon_semaphore_driver semaphore_drv;
+ struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+ /* debugfs */
+ struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+ /* virtual memory */
+ struct radeon_vm_manager vm_manager;
+ /* ring used for bo copies */
+ u32 copy_ring;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-
#if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
/*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo);
+
+
+/*
* R600 vram scratch functions
*/
int r600_vram_scratch_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7..3516a6081dc 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2e1eae114e..36a6192ce86 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
- .cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static const struct radeon_vm_funcs cayman_vm_funcs = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .bind = &cayman_vm_bind,
+ .unbind = &cayman_vm_unbind,
+ .tlb_flush = &cayman_vm_tlb_flush,
+ .page_flags = &cayman_vm_page_flags,
+ .set_page = &cayman_vm_set_page,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 2;
+ /* set the ring used for bo copies */
+ rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
+ rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 59914842a72..6304aef0d9b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_init(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
-void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r600_gpu_is_lockup(struct radeon_device *rdev);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev);
+int r600_ib_test(struct radeon_device *rdev, int ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
/*
* cayman
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d24baf30efc..5082d17d14d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2560,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ if (rdev->pm.default_power_state_index >= 0)
+ rdev->pm.current_vddc =
+ rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ else
+ rdev->pm.current_vddc = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 17e1a9b2d8f..815f2341ab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r)
return r;
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
break;
case 6:
/* GTT to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 7:
/* VRAM to GTT, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 8:
/* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 29afd71e084..435a3d970ab 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < p->nrelocs; j++) {
+ for (j = 0; j < i; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].flags = r->flags;
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
- }
+
+ if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
+ struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+ if (!radeon_fence_signaled(fence)) {
+ p->sync_to_ring[fence->ring] = true;
+ }
+ }
+ } else
+ p->relocs[i].handle = 0;
}
return radeon_bo_list_validate(&p->validated);
}
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+ p->priority = priority;
+
+ switch (ring) {
+ default:
+ DRM_ERROR("unknown ring id: %d\n", ring);
+ return -EINVAL;
+ case RADEON_CS_RING_GFX:
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ case RADEON_CS_RING_COMPUTE:
+ /* for now */
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ }
+ return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+ continue;
+
+ if (!p->ib->fence->semaphore) {
+ r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+ if (r)
+ return r;
+ }
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ }
+ return 0;
+}
+
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i, flags = 0;
+ unsigned size, i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
if (!cs->num_chunks) {
return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->idx = 0;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
+ p->chunk_flags_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
+ p->cs_flags = 0;
p->nchunks = cs->num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
- !p->chunks[i].length_dw) {
- return -EINVAL;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags_idx = i;
+ /* zero length flags aren't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+ if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+ (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
size = p->chunks[i].length_dw * sizeof(uint32_t);
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- flags = p->chunks[i].kdata[0];
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
}
- } else {
- p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
- return -ENOMEM;
- }
- p->chunks[i].kpage_idx[0] = -1;
- p->chunks[i].kpage_idx[1] = -1;
- p->chunks[i].last_copied_page = -1;
- p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
}
}
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
return -EINVAL;
}
- p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+ if (radeon_cs_get_ring(p, ring, priority)) {
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
+ return -EINVAL;
+ }
+
+
+ /* deal with non-vm */
+ if ((p->chunk_ib_idx != -1) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+ (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ return -ENOMEM;
+ }
+ p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+ p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+ p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+ p->chunks[p->chunk_ib_idx].last_page_index =
+ ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+ }
+
return 0;
}
@@ -224,11 +317,139 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_ib_free(parser->rdev, &parser->ib);
}
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached).
+ */
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ r = radeon_cs_parse(parser);
+ if (r || parser->parser_error) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_finish_pages(parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = 0;
+ r = radeon_ib_schedule(rdev, parser->ib);
+ if (r) {
+ DRM_ERROR("Failed to schedule IB !\n");
+ }
+ return 0;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ int r;
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ bo = lobj->bo;
+ r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
+ r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ if (r) {
+ return r;
+ }
+
+ mutex_lock(&vm->mutex);
+ r = radeon_vm_bind(rdev, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_bo_vm_update_pte(parser, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space to gpu_addr is the
+ * offset inside the pool bo
+ */
+ parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+ r = radeon_ib_schedule(rdev, parser->ib);
+out:
+ if (!r) {
+ if (vm->fence) {
+ radeon_fence_unref(&vm->fence);
+ }
+ vm->fence = radeon_fence_ref(parser->ib->fence);
+ }
+ mutex_unlock(&fpriv->vm.mutex);
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_cs_parser parser;
- struct radeon_cs_chunk *ib_chunk;
int r;
radeon_mutex_lock(&rdev->cs_mutex);
@@ -245,13 +466,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- r = radeon_ib_get(rdev, &parser.ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
r = radeon_cs_parser_relocs(&parser);
if (r) {
if (r != -ERESTARTSYS)
@@ -260,29 +474,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- r = radeon_cs_parse(&parser);
- if (r || parser.parser_error) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
- r = radeon_cs_finish_pages(&parser);
+ r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
+ goto out;
}
- r = radeon_ib_schedule(rdev, parser.ib);
+ r = radeon_cs_ib_vm_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Failed to schedule IB !\n");
+ goto out;
}
+out:
radeon_cs_parser_fini(&parser, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c4d00a17141..0afb13bd8dc 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
if (radeon_no_wb == 1)
rdev->wb.enabled = false;
else {
- /* often unreliable on AGP */
if (rdev->flags & RADEON_IS_AGP) {
+ /* often unreliable on AGP */
+ rdev->wb.enabled = false;
+ } else if (rdev->family < CHIP_R300) {
+ /* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
@@ -718,17 +721,24 @@ int radeon_device_init(struct radeon_device *rdev,
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
- mutex_init(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ mutex_init(&rdev->ring[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_drv.lock);
+ rwlock_init(&rdev->fence_lock);
+ rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
+ INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ /* initialize vm here */
+ rdev->vm_manager.use_bitmap = 1;
+ rdev->vm_manager.max_pfn = 1 << 20;
+ INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
+ dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
+ r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+ printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+ }
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
- if (radeon_testing) {
+ if ((radeon_testing & 1)) {
radeon_test_moves(rdev);
}
+ if ((radeon_testing & 2)) {
+ radeon_test_syncing(rdev);
+ }
if (radeon_benchmarking) {
radeon_benchmark(rdev, radeon_benchmarking);
}
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
+ radeon_debugfs_remove_files(rdev);
}
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int r;
+ int i, r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -887,7 +909,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
- radeon_fence_wait_last(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ radeon_fence_wait_last(rdev, i);
radeon_save_bios_scratch_regs(rdev);
@@ -986,36 +1009,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/*
* Debugfs
*/
-struct radeon_debugfs {
- struct drm_info_list *files;
- unsigned num_files;
-};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-static unsigned _radeon_debugfs_count = 0;
-
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
- for (i = 0; i < _radeon_debugfs_count; i++) {
- if (_radeon_debugfs[i].files == files) {
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ if (rdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
- i = _radeon_debugfs_count + 1;
+ i = rdev->debugfs_count + 1;
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
- _radeon_debugfs[_radeon_debugfs_count].files = files;
- _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
- _radeon_debugfs_count = i;
+ rdev->debugfs[rdev->debugfs_count].files = files;
+ rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+ rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
@@ -1027,6 +1043,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->control);
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->primary);
+ }
+#endif
+}
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
@@ -1035,11 +1067,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
void radeon_debugfs_cleanup(struct drm_minor *minor)
{
- unsigned i;
-
- for (i = 0; i < _radeon_debugfs_count; i++) {
- drm_debugfs_remove_files(_radeon_debugfs[i].files,
- _radeon_debugfs[i].num_files, minor);
- }
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a22d6e6a49a..d3ffc18774a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
- pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+ pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
@@ -1081,7 +1081,7 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
void
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
rfb->obj = obj;
@@ -1092,15 +1092,15 @@ radeon_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handle);
+ "can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71499fc3daf..31da622eef6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -54,9 +54,10 @@
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
* 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ * 2.13.0 - virtual memory support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 12
+#define KMS_DRIVER_MINOR 13
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+static const struct file_operations radeon_driver_old_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &radeon_driver_old_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
return radeon_resume_kms(dev);
}
+static const struct file_operations radeon_driver_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
+ .gem_open_object = radeon_gem_object_open,
+ .gem_close_object = radeon_gem_object_close,
.dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = radeon_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
- },
-
+ .fops = &radeon_driver_kms_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a92..4b27efa4405 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0b7b486c97e..cf2bf35b56b 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/* need to align pitch with crtc limits */
- mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+ fb_tiled) * ((bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitch * height;
+ size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd->pitch);
+ mode_cmd->pitches[0]);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
@@ -201,8 +205,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
rbo = gem_to_radeon_bo(gobj);
@@ -228,7 +232,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -271,7 +275,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
- DRM_INFO(" pitch is %d\n", fb->pitch);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9ed8a..64ea3dd9e6f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,32 +40,24 @@
#include "radeon.h"
#include "radeon_trace.h"
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
- } else
- WREG32(rdev->fence_drv.scratch_reg, seq);
+ *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+ } else {
+ WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+ }
}
-static u32 radeon_fence_read(struct radeon_device *rdev)
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
- u32 seq;
+ u32 seq = 0;
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
- } else
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+ } else {
+ seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+ }
return seq;
}
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (fence->emited) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (fence->emitted) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready)
+ fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+ if (!rdev->ring[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
else
- radeon_fence_ring_emit(rdev, fence);
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emited = true;
- list_move_tail(&fence->list, &rdev->fence_drv.emited);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ fence->emitted = true;
+ list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
{
struct radeon_fence *fence;
struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- seq = radeon_fence_read(rdev);
- if (seq != rdev->fence_drv.last_seq) {
- rdev->fence_drv.last_seq = seq;
- rdev->fence_drv.last_jiffies = jiffies;
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ seq = radeon_fence_read(rdev, ring);
+ if (seq != rdev->fence_drv[ring].last_seq) {
+ rdev->fence_drv[ring].last_seq = seq;
+ rdev->fence_drv[ring].last_jiffies = jiffies;
+ rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
- cjiffies -= rdev->fence_drv.last_jiffies;
- if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
+ cjiffies -= rdev->fence_drv[ring].last_jiffies;
+ if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
/* update the timeout */
- rdev->fence_drv.last_timeout -= cjiffies;
+ rdev->fence_drv[ring].last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
- rdev->fence_drv.last_timeout = 1;
+ rdev->fence_drv[ring].last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
- rdev->fence_drv.last_jiffies = cjiffies;
+ rdev->fence_drv[ring].last_jiffies = cjiffies;
}
return false;
}
n = NULL;
- list_for_each(i, &rdev->fence_drv.emited) {
+ list_for_each(i, &rdev->fence_drv[ring].emitted) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_move_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv[ring].signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
- } while (i != &rdev->fence_drv.emited);
+ } while (i != &rdev->fence_drv[ring].emitted);
wake = true;
}
return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
list_del(&fence->list);
- fence->emited = false;
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ fence->emitted = false;
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ if (fence->semaphore)
+ radeon_semaphore_free(fence->rdev, fence->semaphore);
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+int radeon_fence_create(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
{
unsigned long irq_flags;
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emited = false;
+ (*fence)->emitted = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
+ (*fence)->ring = ring;
+ (*fence)->semaphore = NULL;
INIT_LIST_HEAD(&(*fence)->list);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (fence->rdev->gpu_lockup)
return true;
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
}
- if (!fence->emited) {
- WARN(1, "Querying an unemited fence : %p !\n", fence);
+ if (!fence->emitted) {
+ WARN(1, "Querying an unemitted fence : %p !\n", fence);
signaled = true;
}
if (!signaled) {
- radeon_fence_poll_locked(fence->rdev);
+ radeon_fence_poll_locked(fence->rdev, fence->ring);
signaled = fence->signaled;
}
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
return signaled;
}
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
- timeout = rdev->fence_drv.last_timeout;
+ timeout = rdev->fence_drv[fence->ring].last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv.last_seq;
+ seq = rdev->fence_drv[fence->ring].last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
if (unlikely(r < 0)) {
return r;
}
} else {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
timeout = r;
goto retry;
}
- /* don't protect read access to rdev->fence_drv.last_seq
+ /* don't protect read access to rdev->fence_drv[t].last_seq
* if we experiencing a lockup the value doesn't change
*/
- if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ if (seq == rdev->fence_drv[fence->ring].last_seq &&
+ radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv.last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv[fence->ring].last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.next,
+ fence = list_entry(rdev->fence_drv[ring].emitted.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
-int radeon_fence_wait_last(struct radeon_device *rdev)
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.prev,
+ fence = list_entry(rdev->fence_drv[ring].emitted.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
@@ -347,39 +345,95 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
bool wake;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ wake = radeon_fence_poll_locked(rdev, ring);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (wake) {
- wake_up_all(&rdev->fence_drv.queue);
+ wake_up_all(&rdev->fence_drv[ring].queue);
}
}
-int radeon_fence_driver_init(struct radeon_device *rdev)
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (!rdev->fence_drv[ring].initialized)
+ return 0;
+
+ if (!list_empty(&rdev->fence_drv[ring].emitted)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return not_processed;
+}
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
+ uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
- if (r) {
- dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- return r;
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ if (rdev->wb.use_event) {
+ rdev->fence_drv[ring].scratch_reg = 0;
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ } else {
+ r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+ if (r) {
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return r;
+ }
+ index = RADEON_WB_SCRATCH_OFFSET +
+ rdev->fence_drv[ring].scratch_reg -
+ rdev->scratch.reg_base;
}
- radeon_fence_write(rdev, 0);
- atomic_set(&rdev->fence_drv.seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv.created);
- INIT_LIST_HEAD(&rdev->fence_drv.emited);
- INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- init_waitqueue_head(&rdev->fence_drv.queue);
- rdev->fence_drv.initialized = true;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+ radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ rdev->fence_drv[ring].initialized = true;
+ DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return 0;
+}
+
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+ rdev->fence_drv[ring].scratch_reg = -1;
+ rdev->fence_drv[ring].cpu_addr = NULL;
+ rdev->fence_drv[ring].gpu_addr = 0;
+ atomic_set(&rdev->fence_drv[ring].seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+ init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].initialized = false;
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ int ring;
+
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ radeon_fence_driver_init_ring(rdev, ring);
+ }
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -389,14 +443,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
-
- if (!rdev->fence_drv.initialized)
- return;
- wake_up_all(&rdev->fence_drv.queue);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.initialized = false;
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_wait_last(rdev, ring);
+ wake_up_all(&rdev->fence_drv[ring].queue);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[ring].initialized = false;
+ }
}
@@ -410,14 +468,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_fence *fence;
-
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev));
- if (!list_empty(&rdev->fence_drv.emited)) {
- fence = list_entry(rdev->fence_drv.emited.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emited fence %p with 0x%08X\n",
- fence, fence->seq);
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!rdev->fence_drv[i].initialized)
+ continue;
+
+ seq_printf(m, "--- ring %d ---\n", i);
+ seq_printf(m, "Last signaled fence 0x%08X\n",
+ radeon_fence_read(rdev, i));
+ if (!list_empty(&rdev->fence_drv[i].emitted)) {
+ fence = list_entry(rdev->fence_drv[i].emitted.prev,
+ struct radeon_fence, list);
+ seq_printf(m, "Last emitted fence %p with 0x%08X\n",
+ fence, fence->seq);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index ba7ab79e12c..010dad8b66a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- if (!rdev->gart.ttm_alloced[p])
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we reverted the patch using dma_addr in TTM for now but this
- * code stops building on alpha so just comment it out for now */
- if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
- rdev->gart.ttm_alloced[p] = true;
- rdev->gart.pages_addr[p] = dma_addr[i];
- } else {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
- }
- }
+ rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
- rdev->gart.num_cpu_pages, GFP_KERNEL);
- if (rdev->gart.ttm_alloced == NULL) {
- radeon_gart_fini(rdev);
- return -ENOMEM;
- }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
- kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
- rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev);
}
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ int r;
+
+ rdev->vm_manager.enabled = false;
+
+ /* mark first vm as always in use, it's the system one */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ rdev->vm_manager.max_pfn * 8,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
+
+ r = rdev->vm_manager.funcs->init(rdev);
+ if (r == 0)
+ rdev->vm_manager.enabled = true;
+
+ return r;
+}
+
+/* cs mutex must be lock */
+static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ if (vm->id == -1) {
+ return;
+ }
+
+ /* wait for vm use to end */
+ if (vm->fence) {
+ radeon_fence_wait(vm->fence, false);
+ radeon_fence_unref(&vm->fence);
+ }
+
+ /* hw unbind */
+ rdev->vm_manager.funcs->unbind(rdev, vm);
+ rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
+ list_del_init(&vm->list);
+ vm->id = -1;
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ vm->pt = NULL;
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+}
+
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL)
+ return;
+ radeon_vm_manager_suspend(rdev);
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
+}
+
+int radeon_vm_manager_start(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL) {
+ return -EINVAL;
+ }
+ return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+}
+
+int radeon_vm_manager_suspend(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm, *tmp;
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ /* unbind all active vm */
+ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+ radeon_vm_unbind_locked(rdev, vm);
+ }
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+}
+
+/* cs mutex must be lock */
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ mutex_lock(&vm->mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ mutex_unlock(&vm->mutex);
+}
+
+/* cs mutex must be lock & vm mutex must be lock */
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+ unsigned i;
+ int id = -1, r;
+
+ if (vm == NULL) {
+ return -EINVAL;
+ }
+
+ if (vm->id != -1) {
+ /* update lru */
+ list_del_init(&vm->list);
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return 0;
+ }
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
+ RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
+ RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (list_empty(&rdev->vm_manager.lru_vm)) {
+ return r;
+ }
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry;
+ }
+ vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
+ vm->pt += (vm->sa_bo.offset >> 3);
+ vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
+ vm->pt_gpu_addr += vm->sa_bo.offset;
+ memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+
+retry_id:
+ /* search for free vm */
+ for (i = 0; i < rdev->vm_manager.nvm; i++) {
+ if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
+ id = i;
+ break;
+ }
+ }
+ /* evict vm if necessary */
+ if (id == -1) {
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry_id;
+ }
+
+ /* do hw bind */
+ r = rdev->vm_manager.funcs->bind(rdev, vm, id);
+ if (r) {
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ return r;
+ }
+ rdev->vm_manager.use_bitmap |= 1 << id;
+ vm->id = id;
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
+ &rdev->ib_pool.sa_manager.bo->tbo.mem);
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ struct list_head *head;
+ uint64_t size = radeon_bo_size(bo), last_offset = 0;
+ unsigned last_pfn;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return -ENOMEM;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = offset;
+ bo_va->eoffset = offset + size;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+ /* make sure object fit at this offset */
+ if (bo_va->soffset >= bo_va->eoffset) {
+ kfree(bo_va);
+ return -EINVAL;
+ }
+
+ last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ kfree(bo_va);
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vm->mutex);
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ kfree(bo_va);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+ list_add(&bo_va->vm_list, head);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+ return 0;
+}
+
+static u64 radeon_vm_get_addr(struct radeon_device *rdev,
+ struct ttm_mem_reg *mem,
+ unsigned pfn)
+{
+ u64 addr = 0;
+
+ switch (mem->mem_type) {
+ case TTM_PL_VRAM:
+ addr = (mem->start << PAGE_SHIFT);
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr += rdev->vm_manager.vram_base_offset;
+ break;
+ case TTM_PL_TT:
+ /* offset inside page table */
+ addr = mem->start << PAGE_SHIFT;
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr = addr >> PAGE_SHIFT;
+ /* page table offset */
+ addr = rdev->gart.pages_addr[addr];
+ /* in case cpu page size != gpu page size*/
+ addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
+ break;
+ default:
+ break;
+ }
+ return addr;
+}
+
+/* object have to be reserved & cs mutex took & vm mutex took */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct radeon_bo_va *bo_va;
+ unsigned ngpu_pages, i;
+ uint64_t addr = 0, pfn;
+ uint32_t flags;
+
+ /* nothing to do if vm isn't bound */
+ if (vm->id == -1)
+ return 0;;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (bo_va->valid)
+ return 0;
+
+ ngpu_pages = radeon_bo_ngpu_pages(bo);
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ }
+ }
+ pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+ flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
+ for (i = 0, addr = 0; i < ngpu_pages; i++) {
+ if (mem && bo_va->valid) {
+ addr = radeon_vm_get_addr(rdev, mem, i);
+ }
+ rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+ }
+ rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+ return 0;
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL)
+ return 0;
+
+ list_del(&bo_va->bo_list);
+ mutex_lock(&vm->mutex);
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+
+ kfree(bo_va);
+ return 0;
+}
+
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ int r;
+
+ vm->id = -1;
+ vm->fence = NULL;
+ mutex_init(&vm->mutex);
+ INIT_LIST_HEAD(&vm->list);
+ INIT_LIST_HEAD(&vm->va);
+ vm->last_pfn = 0;
+ /* map the ib pool buffer at 0 in virtual address space, set
+ * read only
+ */
+ r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+ return r;
+}
+
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ mutex_lock(&vm->mutex);
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+
+ /* remove all bo */
+ r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ if (!r) {
+ bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ list_del_init(&bo_va->bo_list);
+ list_del_init(&bo_va->vm_list);
+ radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ kfree(bo_va);
+ }
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+ mutex_unlock(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa1ca2dea42..7337850af2f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
radeon_bo_force_delete(rdev);
}
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va, *tmp;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return;
+ }
+
+ if (radeon_bo_reserve(rbo, false)) {
+ return;
+ }
+ list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ /* remove from this vm address space */
+ mutex_lock(&vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+ }
+ radeon_bo_unreserve(rbo);
+}
+
/*
* GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
+ unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
- RADEON_IB_POOL_SIZE*64*1024;
+ args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
@@ -352,6 +392,109 @@ out:
return r;
}
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_va *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv = filp->driver_priv;
+ struct radeon_bo *rbo;
+ struct radeon_bo_va *bo_va;
+ u32 invalid_flags;
+ int r = 0;
+
+ if (!rdev->vm_manager.enabled) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOTTY;
+ }
+
+ /* !! DONT REMOVE !!
+ * We don't support vm_id yet, to be sure we don't have have broken
+ * userspace, reject anyone trying to use non 0 value thus moving
+ * forward we can use those fields without breaking existant userspace
+ */
+ if (args->vm_id) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ if (args->offset < RADEON_VA_RESERVED_SIZE) {
+ dev_err(&dev->pdev->dev,
+ "offset 0x%lX is in reserved area 0x%X\n",
+ (unsigned long)args->offset,
+ RADEON_VA_RESERVED_SIZE);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ /* don't remove, we need to enforce userspace to set the snooped flag
+ * otherwise we will endup with broken userspace and we won't be able
+ * to enable this feature without adding new interface
+ */
+ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+ if ((args->flags & invalid_flags)) {
+ dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ args->flags, invalid_flags);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+ if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+ dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ case RADEON_VA_UNMAP:
+ break;
+ default:
+ dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ args->operation);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOENT;
+ }
+ rbo = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+ }
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ bo_va = radeon_bo_va(rbo, &fpriv->vm);
+ if (bo_va) {
+ args->operation = RADEON_VA_RESULT_VA_EXIST;
+ args->offset = bo_va->soffset;
+ goto out;
+ }
+ r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
+ args->offset, args->flags);
+ break;
+ case RADEON_VA_UNMAP:
+ r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+ break;
+ default:
+ break;
+ }
+ args->operation = RADEON_VA_RESULT_OK;
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ }
+out:
+ radeon_bo_unreserve(rbo);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 8f86aeb2669..be38921bf76 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
unsigned i;
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned i;
dev->max_vblank_count = 0x001fffff;
- rdev->irq.sw_int = true;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = true;
radeon_irq_set(rdev);
return 0;
}
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
return;
}
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -194,26 +198,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
- rdev->irq.sw_int = true;
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
+ rdev->irq.sw_int[ring] = true;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
- rdev->irq.sw_int = false;
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
+ rdev->irq.sw_int[ring] = false;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index be2c1224e68..d3352889a87 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_VA_START:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_VA_RESERVED_SIZE;
+ break;
+ case RADEON_INFO_IB_VM_MAX_SIZE:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_IB_VM_MAX_SIZE;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
return 0;
}
-
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ file_priv->driver_priv = NULL;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN) {
+ struct radeon_fpriv *fpriv;
+ int r;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv)) {
+ return -ENOMEM;
+ }
+
+ r = radeon_vm_init(rdev, &fpriv->vm);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
+
+ file_priv->driver_priv = fpriv;
+ }
return 0;
}
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ file_priv->driver_priv = NULL;
+ }
}
void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index daadf211104..25a19c48307 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
crtc_offset_cntl = 0;
- pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 2c2e75ef8a3..08ff857c8fd 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -643,7 +643,7 @@ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green
u16 *blue, int regno);
void radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1c851521f45..d45df176359 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+ /* remove from all vm address space */
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+}
+
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0;
+ size_t acc_size;
int r;
size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
return -ENOMEM;
}
+ acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+ sizeof(struct radeon_bo));
+
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL, size,
- &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, 0, !kernel, NULL,
+ acc_size, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
+ radeon_vm_bo_invalidate(rbo->rdev, rbo);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
}
return 0;
}
+
+/* object have to be reserved */
+struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index b07f0f9b862..cde43030887 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
return !!atomic_read(&bo->tbo.reserved);
}
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+ return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+ return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
+ struct radeon_vm *vm);
+
+/*
+ * sub allocation
+ */
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+ struct radeon_sa_bo *sa_bo);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 78a665bd951..095148e29a1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- mutex_lock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_lock(&rdev->ring[i].mutex);
+ }
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
} else {
- if (rdev->cp.ready) {
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ if (ring->ready) {
struct radeon_fence *fence;
- radeon_ring_alloc(rdev, 64);
- radeon_fence_create(rdev, &fence);
+ radeon_ring_alloc(rdev, ring, 64);
+ radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev);
+ radeon_ring_commit(rdev, ring);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- mutex_unlock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_unlock(&rdev->ring[i].mutex);
+ }
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
- unsigned long irq_flags;
int not_processed = 0;
+ int i;
- read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (!list_empty(&rdev->fence_drv.emited)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv.emited) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
}
- read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 49d58202202..e8bc70933d1 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,7 @@
#include "atom.h"
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+int radeon_debugfs_ring_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
- if (rdev->cp.count_dw <= 0) {
+ if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
}
#endif
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
- struct radeon_ib *ib, *n;
-
- list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
- list_del(&ib->list);
- vfree(ib->ptr);
- kfree(ib);
- }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *bib;
-
- bib = kmalloc(sizeof(*bib), GFP_KERNEL);
- if (bib == NULL)
- return;
- bib->ptr = vmalloc(ib->length_dw * 4);
- if (bib->ptr == NULL) {
- kfree(bib);
- return;
+ bool done = false;
+
+ /* only free ib which have been emited */
+ if (ib->fence && ib->fence->emitted) {
+ if (radeon_fence_signaled(ib->fence)) {
+ radeon_fence_unref(&ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo);
+ done = true;
+ }
}
- memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
- bib->length_dw = ib->length_dw;
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
- mutex_unlock(&rdev->ib_pool.mutex);
+ return done;
}
-/*
- * IB.
- */
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size)
{
struct radeon_fence *fence;
- struct radeon_ib *nib;
- int r = 0, i, c;
+ unsigned cretry = 0;
+ int r = 0, i, idx;
*ib = NULL;
- r = radeon_fence_create(rdev, &fence);
+ /* align size on 256 bytes */
+ size = ALIGN(size, 256);
+
+ r = radeon_fence_create(rdev, &fence, ring);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
+
mutex_lock(&rdev->ib_pool.mutex);
- for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
- i &= (RADEON_IB_POOL_SIZE - 1);
- if (rdev->ib_pool.ibs[i].free) {
- nib = &rdev->ib_pool.ibs[i];
- break;
- }
- }
- if (nib == NULL) {
- /* This should never happen, it means we allocated all
- * IB and haven't scheduled one yet, return EBUSY to
- * userspace hoping that on ioctl recall we get better
- * luck
- */
- dev_err(rdev->dev, "no free indirect buffer !\n");
+ idx = rdev->ib_pool.head_id;
+retry:
+ if (cretry > 5) {
+ dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
- return -EBUSY;
+ return -ENOMEM;
}
- rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- nib->free = false;
- if (nib->fence) {
- mutex_unlock(&rdev->ib_pool.mutex);
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
- nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
- mutex_lock(&rdev->ib_pool.mutex);
- nib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+ cretry++;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+ if (rdev->ib_pool.ibs[idx].fence == NULL) {
+ r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+ &rdev->ib_pool.ibs[idx].sa_bo,
+ size, 256);
+ if (!r) {
+ *ib = &rdev->ib_pool.ibs[idx];
+ (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+ (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+ (*ib)->fence = fence;
+ (*ib)->vm_id = 0;
+ /* ib are most likely to be allocated in a ring fashion
+ * thus rdev->ib_pool.head_id should be the id of the
+ * oldest ib
+ */
+ rdev->ib_pool.head_id = (1 + idx);
+ rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+ }
}
- mutex_lock(&rdev->ib_pool.mutex);
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ }
+ /* this should be rare event, ie all ib scheduled none signaled yet.
+ */
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
+ r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+ if (!r) {
+ goto retry;
+ }
+ /* an error happened */
+ break;
+ }
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
}
- radeon_fence_unref(&nib->fence);
- nib->fence = fence;
- nib->length_dw = 0;
mutex_unlock(&rdev->ib_pool.mutex);
- *ib = nib;
- return 0;
+ radeon_fence_unref(&fence);
+ return r;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,255 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- if (!tmp->fence->emited)
- radeon_fence_unref(&tmp->fence);
mutex_lock(&rdev->ib_pool.mutex);
- tmp->free = true;
+ if (tmp->fence && !tmp->fence->emitted) {
+ radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ radeon_fence_unref(&tmp->fence);
+ }
mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
int r = 0;
- if (!ib->length_dw || !rdev->cp.ready) {
+ if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib);
+ radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
radeon_fence_emit(rdev, ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- /* once scheduled IB is considered free and protected by the fence */
- ib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- void *ptr;
- uint64_t gpu_addr;
- int i;
- int r = 0;
+ int i, r;
- if (rdev->ib_pool.robj)
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (rdev->ib_pool.ready) {
+ mutex_unlock(&rdev->ib_pool.mutex);
return 0;
- INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
- /* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
- if (r) {
- DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
- return r;
}
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->ib_pool.robj);
- DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
- return r;
- }
- r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GEM_DOMAIN_GTT);
if (r) {
- DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+ mutex_unlock(&rdev->ib_pool.mutex);
return r;
}
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- unsigned offset;
- offset = i * 64 * 1024;
- rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
- rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ rdev->ib_pool.ibs[i].fence = NULL;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- rdev->ib_pool.ibs[i].free = true;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
+
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
- return r;
+ if (radeon_debugfs_ring_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- int r;
- struct radeon_bo *robj;
+ unsigned i;
- if (!rdev->ib_pool.ready) {
- return;
- }
mutex_lock(&rdev->ib_pool.mutex);
- radeon_ib_bogus_cleanup(rdev);
- robj = rdev->ib_pool.robj;
- rdev->ib_pool.robj = NULL;
- mutex_unlock(&rdev->ib_pool.mutex);
-
- if (robj) {
- r = radeon_bo_reserve(robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(robj);
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
+ if (rdev->ib_pool.ready) {
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+ radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
- radeon_bo_unref(&robj);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+ rdev->ib_pool.ready = false;
}
+ mutex_unlock(&rdev->ib_pool.mutex);
}
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+}
+
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
/*
* Ring.
*/
-void radeon_ring_free_size(struct radeon_device *rdev)
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
- if (rdev->wb.enabled)
- rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
- else {
- if (rdev->family >= CHIP_R600)
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- else
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* r1xx-r5xx only has CP ring */
+ if (rdev->family < CHIP_R600)
+ return RADEON_RING_TYPE_GFX_INDEX;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
+ return CAYMAN_RING_TYPE_CP1_INDEX;
+ else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
+ return CAYMAN_RING_TYPE_CP2_INDEX;
}
+ return RADEON_RING_TYPE_GFX_INDEX;
+}
+
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
- rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
- rdev->cp.ring_free_dw -= rdev->cp.wptr;
- rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
- if (!rdev->cp.ring_free_dw) {
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+ ring->ring_free_dw -= ring->wptr;
+ ring->ring_free_dw &= ring->ptr_mask;
+ if (!ring->ring_free_dw) {
+ ring->ring_free_dw = ring->ring_size / 4;
}
}
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- while (ndw > (rdev->cp.ring_free_dw - 1)) {
- radeon_ring_free_size(rdev);
- if (ndw < rdev->cp.ring_free_dw) {
+ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ while (ndw > (ring->ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev, ring);
+ if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev);
+ r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
- rdev->cp.count_dw = ndw;
- rdev->cp.wptr_old = rdev->cp.wptr;
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
return 0;
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
- mutex_lock(&rdev->cp.mutex);
- r = radeon_ring_alloc(rdev, ndw);
+ mutex_lock(&ring->mutex);
+ r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&ring->mutex);
return r;
}
return 0;
}
-void radeon_ring_commit(struct radeon_device *rdev)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned count_dw_pad;
unsigned i;
/* We pad to match fetch size */
- count_dw_pad = (rdev->cp.align_mask + 1) -
- (rdev->cp.wptr & rdev->cp.align_mask);
+ count_dw_pad = (ring->align_mask + 1) -
+ (ring->wptr & ring->align_mask);
for (i = 0; i < count_dw_pad; i++) {
- radeon_ring_write(rdev, 2 << 30);
+ radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
- radeon_cp_commit(rdev);
+ WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ (void)RREG32(ring->wptr_reg);
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- radeon_ring_commit(rdev);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_ring_commit(rdev, ring);
+ mutex_unlock(&ring->mutex);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev)
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
- rdev->cp.wptr = rdev->cp.wptr_old;
- mutex_unlock(&rdev->cp.mutex);
+ ring->wptr = ring->wptr_old;
+ mutex_unlock(&ring->mutex);
}
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
- rdev->cp.ring_size = ring_size;
+ ring->ring_size = ring_size;
+ ring->rptr_offs = rptr_offs;
+ ring->rptr_reg = rptr_reg;
+ ring->wptr_reg = wptr_reg;
+ ring->ptr_reg_shift = ptr_reg_shift;
+ ring->ptr_reg_mask = ptr_reg_mask;
+ ring->nop = nop;
/* Allocate ring buffer */
- if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
+ if (ring->ring_obj == NULL) {
+ r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.ring_obj);
+ &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ r = radeon_bo_reserve(ring->ring_obj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &ring->gpu_addr);
if (r) {
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_unreserve(ring->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_bo_kmap(rdev->cp.ring_obj,
- (void **)&rdev->cp.ring);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ r = radeon_bo_kmap(ring->ring_obj,
+ (void **)&ring->ring);
+ radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
- rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ptr_mask = (ring->ring_size / 4) - 1;
+ ring->ring_free_dw = ring->ring_size / 4;
return 0;
}
-void radeon_ring_fini(struct radeon_device *rdev)
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&rdev->cp.mutex);
- ring_obj = rdev->cp.ring_obj;
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
- mutex_unlock(&rdev->cp.mutex);
+ mutex_lock(&ring->mutex);
+ ring_obj = ring->ring_obj;
+ ring->ring = NULL;
+ ring->ring_obj = NULL;
+ mutex_unlock(&ring->mutex);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +432,83 @@ void radeon_ring_fini(struct radeon_device *rdev)
}
}
-
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_ib *ib = node->info_ent->data;
- unsigned i;
-
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ridx = *(int*)node->info_ent->data;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev, ring);
+ count = (ring->ring_size / 4) - ring->ring_free_dw;
+ seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
+ seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ i = ring->rptr;
+ for (j = 0; j <= count; j++) {
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
}
return 0;
}
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+};
+
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_device *rdev = node->info_ent->data;
- struct radeon_ib *ib;
+ struct radeon_ib *ib = node->info_ent->data;
unsigned i;
- mutex_lock(&rdev->ib_pool.mutex);
- if (list_empty(&rdev->ib_pool.bogus_ib)) {
- mutex_unlock(&rdev->ib_pool.mutex);
- seq_printf(m, "no bogus IB recorded\n");
+ if (ib == NULL) {
return 0;
}
- ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
- list_del_init(&ib->list);
- mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB %04u\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
- vfree(ib->ptr);
- kfree(ib);
return 0;
}
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
- {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
+int radeon_debugfs_ring_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
+ ARRAY_SIZE(radeon_debugfs_ring_info_list));
+#else
+ return 0;
#endif
+}
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
- int r;
- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
- r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
- if (r)
- return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 00000000000..4cce47e7dc0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain)
+{
+ int r;
+
+ sa_manager->bo = NULL;
+ sa_manager->size = size;
+ sa_manager->domain = domain;
+ INIT_LIST_HEAD(&sa_manager->sa_bo);
+
+ r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (!list_empty(&sa_manager->sa_bo)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
+ list_del_init(&sa_bo->list);
+ }
+ radeon_bo_unref(&sa_manager->bo);
+ sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ /* map the buffer */
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(sa_manager->bo);
+ dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ radeon_bo_unreserve(sa_manager->bo);
+ return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (!r) {
+ radeon_bo_kunmap(sa_manager->bo);
+ radeon_bo_unpin(sa_manager->bo);
+ radeon_bo_unreserve(sa_manager->bo);
+ }
+ return r;
+}
+
+/*
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size
+ */
+int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ struct radeon_sa_bo *tmp;
+ struct list_head *head;
+ unsigned offset = 0, wasted = 0;
+
+ BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(size > sa_manager->size);
+
+ /* no one ? */
+ head = sa_manager->sa_bo.prev;
+ if (list_empty(&sa_manager->sa_bo)) {
+ goto out;
+ }
+
+ /* look for a hole big enough */
+ offset = 0;
+ list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+ /* room before this object ? */
+ if ((tmp->offset - offset) >= size) {
+ head = tmp->list.prev;
+ goto out;
+ }
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ }
+ /* room at the end ? */
+ head = sa_manager->sa_bo.prev;
+ tmp = list_entry(head, struct radeon_sa_bo, list);
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ if ((sa_manager->size - offset) < size) {
+ /* failed to find somethings big enough */
+ return -ENOMEM;
+ }
+
+out:
+ sa_bo->manager = sa_manager;
+ sa_bo->offset = offset;
+ sa_bo->size = size;
+ list_add(&sa_bo->list, head);
+ return 0;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+ list_del_init(&sa_bo->list);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 00000000000..61dd4e3c920
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Christian KĂśnig.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian KĂśnig <deathsimple@vodafone.de>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ int r, i;
+
+
+ bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
+ if (bo == NULL) {
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&bo->free);
+ INIT_LIST_HEAD(&bo->list);
+ bo->nused = 0;
+
+ r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
+ kfree(bo);
+ return r;
+ }
+ gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ gpu_addr += bo->ib->sa_bo.offset;
+ cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+ for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
+ bo->semaphores[i].gpu_addr = gpu_addr;
+ bo->semaphores[i].cpu_ptr = cpu_ptr;
+ bo->semaphores[i].bo = bo;
+ list_add_tail(&bo->semaphores[i].list, &bo->free);
+ gpu_addr += 8;
+ cpu_ptr += 2;
+ }
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ return 0;
+}
+
+static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
+ struct radeon_semaphore_bo *bo)
+{
+ radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+ radeon_fence_unref(&bo->ib->fence);
+ list_del(&bo->list);
+ kfree(bo);
+}
+
+void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+
+ if (list_empty(&rdev->semaphore_drv.bo)) {
+ return;
+ }
+ /* only shrink if first bo has free semaphore */
+ bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
+ if (list_empty(&bo->free)) {
+ return;
+ }
+ list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (bo->nused)
+ continue;
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+}
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ bool do_retry = true;
+ int r;
+
+retry:
+ *semaphore = NULL;
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
+ if (list_empty(&bo->free))
+ continue;
+ *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
+ (*semaphore)->cpu_ptr[0] = 0;
+ (*semaphore)->cpu_ptr[1] = 0;
+ list_del(&(*semaphore)->list);
+ bo->nused++;
+ break;
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+
+ if (*semaphore == NULL) {
+ if (do_retry) {
+ do_retry = false;
+ r = radeon_semaphore_add_bo(rdev);
+ if (r)
+ return r;
+ goto retry;
+ }
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore)
+{
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ semaphore->bo->nused--;
+ list_add_tail(&semaphore->list, &semaphore->bo->free);
+ radeon_semaphore_shrink_locked(rdev);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ /* we force to free everything */
+ list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (!list_empty(&bo->free)) {
+ dev_err(rdev->dev, "still in use semaphore\n");
+ }
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 602fa3541c4..dc5dcf483aa 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->ring[i].ring_size;
if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB)
+{
+ struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int r;
+
+ r = radeon_fence_create(rdev, &fence1, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fence2, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence1);
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence2);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence1)) {
+ DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence1, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence2)) {
+ DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence2, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fence1)
+ radeon_fence_unref(&fence1);
+
+ if (fence2)
+ radeon_fence_unref(&fence2);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_ring_sync2(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB,
+ struct radeon_ring *ringC)
+{
+ struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int ridxC = radeon_ring_index(rdev, ringC);
+ bool sigA, sigB;
+ int i, r;
+
+ r = radeon_fence_create(rdev, &fenceA, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fenceB, ridxB);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fenceA);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
+ radeon_fence_emit(rdev, fenceB);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fenceA)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+ if (radeon_fence_signaled(fenceB)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ for (i = 0; i < 30; ++i) {
+ mdelay(100);
+ sigA = radeon_fence_signaled(fenceA);
+ sigB = radeon_fence_signaled(fenceB);
+ if (sigA || sigB)
+ break;
+ }
+
+ if (!sigA && !sigB) {
+ DRM_ERROR("Neither fence A nor B has been signaled\n");
+ goto out_cleanup;
+ } else if (sigA && sigB) {
+ DRM_ERROR("Both fence A and B has been signaled\n");
+ goto out_cleanup;
+ }
+
+ DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ mdelay(1000);
+
+ r = radeon_fence_wait(fenceA, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence A\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fenceB, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence B\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fenceA)
+ radeon_fence_unref(&fenceA);
+
+ if (fenceB)
+ radeon_fence_unref(&fenceB);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+ int i, j, k;
+
+ for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ringA = &rdev->ring[i];
+ if (!ringA->ready)
+ continue;
+
+ for (j = 0; j < i; ++j) {
+ struct radeon_ring *ringB = &rdev->ring[j];
+ if (!ringB->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+ radeon_test_ring_sync(rdev, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+ radeon_test_ring_sync(rdev, ringB, ringA);
+
+ for (k = 0; k < j; ++k) {
+ struct radeon_ring *ringC = &rdev->ring[k];
+ if (!ringC->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+ radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+ radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+ radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+ radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+ radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+ radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0b5468bfaf5..c421e77ace7 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
}
}
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
-
-static struct ttm_backend*
-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
-{
- struct radeon_device *rdev;
-
- rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
- } else
-#endif
- {
- return radeon_ttm_backend_create(rdev);
- }
-}
-
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->cp.ready == false)
+ if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
- int r;
+ int r, i;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
if (unlikely(r)) {
return r;
}
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->cp.ready) {
- DRM_ERROR("Trying to move memory with CP turned off.\n");
+ if (!rdev->ring[rdev->copy_ring].ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+ /* sync other rings */
+ if (rdev->family >= CHIP_R600) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == rdev->copy_ring || !rdev->ring[i].ready)
+ continue;
+
+ if (!fence->semaphore) {
+ r = radeon_semaphore_create(rdev, &fence->semaphore);
+ /* FIXME: handle semaphore error */
+ if (r)
+ continue;
+ }
+
+ r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+
+ r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+ }
+ }
+
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+ if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
/* use memcpy */
goto memcpy;
}
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ u64 offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+ int r;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ ttm->num_pages, (unsigned)gtt->offset);
+ return r;
+ }
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+ size, page_flags, dummy_read_page);
+ }
+#endif
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->ttm.ttm.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (gtt->ttm.dma_address[i]) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
- .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+ .ttm_tt_create = &radeon_ttm_tt_create,
+ .ttm_tt_populate = &radeon_ttm_tt_populate,
+ .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
}
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_backend {
- struct ttm_backend backend;
- struct radeon_device *rdev;
- unsigned long num_pages;
- struct page **pages;
- struct page *dummy_read_page;
- dma_addr_t *dma_addrs;
- bool populated;
- bool bound;
- unsigned offset;
-};
-
-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
- unsigned long num_pages,
- struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = pages;
- gtt->dma_addrs = dma_addrs;
- gtt->num_pages = num_pages;
- gtt->dummy_read_page = dummy_read_page;
- gtt->populated = true;
- return 0;
-}
-
-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = NULL;
- gtt->dma_addrs = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
-}
-
-
-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
- struct ttm_mem_reg *bo_mem)
-{
- struct radeon_ttm_backend *gtt;
- int r;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->start << PAGE_SHIFT;
- if (!gtt->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- gtt->num_pages, bo_mem, backend);
- }
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages, gtt->dma_addrs);
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- gtt->num_pages, gtt->offset);
- return r;
- }
- gtt->bound = true;
- return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
- gtt->bound = false;
- return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- if (gtt->bound) {
- radeon_ttm_backend_unbind(backend);
- }
- kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
- .populate = &radeon_ttm_backend_populate,
- .clear = &radeon_ttm_backend_clear,
- .bind = &radeon_ttm_backend_bind,
- .unbind = &radeon_ttm_backend_unbind,
- .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
- if (gtt == NULL) {
- return NULL;
- }
- gtt->backend.bdev = &rdev->mman.bdev;
- gtt->backend.flags = 0;
- gtt->backend.func = &radeon_backend_func;
- gtt->rdev = rdev;
- gtt->pages = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
- return &gtt->backend;
-}
-
#define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i].data = NULL;
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+ radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+ }
+#endif
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 06b90c87f8f..b0ce84a20a6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
@@ -447,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs400_startup(rdev);
}
int rs400_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -530,7 +546,14 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f6..803e0d3c177 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -319,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
void rs600_bm_disable(struct radeon_device *rdev)
{
- u32 tmp;
+ u16 tmp;
/* disable bus mastering */
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_read_config_word(rdev->pdev, 0x4, &tmp);
pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
}
@@ -544,7 +549,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
@@ -637,7 +642,7 @@ int rs600_irq_process(struct radeon_device *rdev)
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
@@ -844,6 +849,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -853,15 +864,21 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -886,11 +903,14 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs600_startup(rdev);
}
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -971,7 +991,14 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index a9049ed1a51..4f24a0fa8c8 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -663,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rs690_startup(rdev);
}
int rs690_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -749,7 +764,14 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 6613ee9ecca..880637fd194 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
- radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
- radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+ radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
- radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
- radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
- radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
- radeon_ring_write(rdev, PACKET0(0x20C8, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+ radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+ radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+ radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+ radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+ radeon_ring_write(ring, PACKET0(0x20C8, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -428,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+
+ rdev->accel_working = true;
return rv515_startup(rdev);
}
@@ -524,7 +539,14 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab8..a1668b659dd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -352,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
/*
@@ -1038,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1077,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1086,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
@@ -1096,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -1110,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- return r;
- }
-
r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "radeon: audio init failed\n");
@@ -1135,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -1210,8 +1231,8 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1220,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r) {
@@ -1260,11 +1275,12 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 5468d1cd329..89afe0b8364 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
+static const struct file_operations savage_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &savage_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index a9c5716bea4..06da063ece2 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -48,9 +48,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret)
- kfree(dev_priv);
+ idr_init(&dev->object_name_idr);
return ret;
}
@@ -59,32 +57,60 @@ static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
+
kfree(dev_priv);
return 0;
}
+static const struct file_operations sis_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
+ .open = sis_driver_open,
+ .postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &sis_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 194303c177a..573758b2d2d 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -44,7 +44,7 @@ enum sis_family {
SIS_CHIP_315 = 1,
};
-#include "drm_sman.h"
+#include "drm_mm.h"
#define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
- struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
+ struct drm_mm vram_mm;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 7fe2b63412c..dd4a316c3d7 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,40 +41,18 @@
#define AGP_TYPE 1
+struct sis_memblock {
+ struct drm_mm_node mm_node;
+ struct sis_memreq req;
+ struct list_head owner_list;
+};
+
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
-static void *sis_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct sis_memreq req;
-
- req.size = size;
- sis_malloc(&req);
- if (req.size == 0)
- return NULL;
- else
- return (void *)(unsigned long)~req.offset;
-}
-
-static void sis_sman_mm_free(void *private, void *ref)
-{
- sis_free(~((unsigned long)ref));
-}
-
-static void sis_sman_mm_destroy(void *private)
-{
- ;
-}
-
-static unsigned long sis_sman_mm_offset(void *private, void *ref)
-{
- return ~((unsigned long)ref);
-}
-
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
- int ret;
mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- {
- struct drm_sman_mm sman_mm;
- sman_mm.private = (void *)0xFFFFFFFF;
- sman_mm.allocate = sis_sman_mm_allocate;
- sman_mm.free = sis_sman_mm_free;
- sman_mm.destroy = sis_sman_mm_destroy;
- sman_mm.offset = sis_sman_mm_offset;
- ret =
- drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
- }
-#else
- ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
- fb->size >> SIS_MM_ALIGN_SHIFT);
-#endif
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ /* Unconditionally init the drm_mm, even though we don't use it when the
+ * fb sis driver is available - make cleanup easier. */
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct sis_memblock *item;
+ struct sis_file_private *file_priv = file->driver_priv;
+ unsigned long offset;
mutex_lock(&dev->struct_mutex);
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
return -EINVAL;
}
- mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
- (unsigned long)file_priv);
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((pool == 0) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
- mem->free = item->user_hash.key;
- mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+ mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+ if (pool == AGP_TYPE) {
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
} else {
- mem->offset = 0;
- mem->size = 0;
- mem->free = 0;
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ item->req.size = mem->size;
+ sis_malloc(&item->req);
+ if (item->req.size == 0)
+ retval = -ENOMEM;
+ offset = item->req.offset;
+#else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
+#endif
+ }
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((pool == 0) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ (offset << SIS_MM_ALIGN_SHIFT);
+ mem->free = user_key;
+ mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->free = 0;
+
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int ret;
+ struct sis_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+ obj = idr_find(&dev_priv->object_idr, mem->free);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->free);
+ list_del(&obj->owner_list);
+ if (drm_mm_node_allocated(&obj->mm_node))
+ drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(obj->req.offset);
+#endif
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
- return ret;
+ return 0;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
- int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
- agp->size >> SIS_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_sis_private_t *dev_priv = dev->dev_private;
+ struct sis_file_private *file_priv = file->driver_priv;
+ struct sis_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ if (drm_mm_node_allocated(&entry->mm_node))
+ drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(entry->req.offset);
+#endif
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index cda29911e33..1613c78544c 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
};
+static const struct file_operations tdfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f3cf6f02c99..b2b33dde2af 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 1c4a72f681c..747c1413fc9 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -31,6 +31,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include "ttm/ttm_placement.h"
#include <linux/agp_backend.h>
@@ -40,45 +41,33 @@
#include <asm/agp.h>
struct ttm_agp_backend {
- struct ttm_backend backend;
+ struct ttm_tt ttm;
struct agp_memory *mem;
struct agp_bridge_data *bridge;
};
-static int ttm_agp_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct page **cur_page, **last_page = pages + num_pages;
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
+ int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ unsigned i;
- mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+ mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
- for (cur_page = pages; cur_page < last_page; ++cur_page) {
- struct page *page = *cur_page;
+ for (i = 0; i < ttm->num_pages; i++) {
+ struct page *page = ttm->pages[i];
+
if (!page)
- page = dummy_read_page;
+ page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
- return 0;
-}
-
-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct drm_mm_node *node = bo_mem->mm_node;
- struct agp_memory *mem = agp_be->mem;
- int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
- int ret;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
return ret;
}
-static int ttm_agp_unbind(struct ttm_backend *backend)
+static int ttm_agp_unbind(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
- else
- return 0;
-}
-
-static void ttm_agp_clear(struct ttm_backend *backend)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct agp_memory *mem = agp_be->mem;
-
- if (mem) {
- ttm_agp_unbind(backend);
- agp_free_memory(mem);
+ if (agp_be->mem) {
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ agp_free_memory(agp_be->mem);
+ agp_be->mem = NULL;
}
- agp_be->mem = NULL;
+ return 0;
}
-static void ttm_agp_destroy(struct ttm_backend *backend)
+static void ttm_agp_destroy(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem)
- ttm_agp_clear(backend);
+ ttm_agp_unbind(ttm);
+ ttm_tt_fini(ttm);
kfree(agp_be);
}
static struct ttm_backend_func ttm_agp_func = {
- .populate = ttm_agp_populate,
- .clear = ttm_agp_clear,
.bind = ttm_agp_bind,
.unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy,
};
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge)
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct ttm_agp_backend *agp_be;
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->backend.func = &ttm_agp_func;
- agp_be->backend.bdev = bdev;
- return &agp_be->backend;
+ agp_be->ttm.func = &ttm_agp_func;
+
+ if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ return NULL;
+ }
+
+ return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
}
-EXPORT_SYMBOL(ttm_agp_backend_init);
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0bb0f5f713e..2f0eab66ece 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
+ size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy)
bo->destroy(bo);
else {
- ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo);
}
+ ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
- case ttm_bo_type_user:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_USER,
- glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL)) {
- ret = -ENOMEM;
- break;
- }
-
- ret = ttm_tt_set_user(bo->ttm, current,
- bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0)) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
ret = -EINVAL;
@@ -419,9 +404,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, mem);
-
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -434,6 +416,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret)
goto out_err;
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
+
moved:
if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
@@ -472,6 +457,9 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
+ if (bo->bdev->driver->move_notify)
+ bo->bdev->driver->move_notify(bo, NULL);
+
if (bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
@@ -913,16 +901,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
}
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
- bool disallow_fixed,
uint32_t mem_type,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
- return false;
-
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
@@ -967,7 +951,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->placement[i],
&cur_flags);
@@ -1015,7 +998,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->busy_placement[i],
&cur_flags))
@@ -1185,6 +1167,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
{
int ret = 0;
unsigned long num_pages;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (ret) {
+ printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ kfree(bo);
+ return -ENOMEM;
+ }
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1248,34 @@ out_err:
}
EXPORT_SYMBOL(ttm_bo_init);
-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
{
- size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
- PAGE_MASK;
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
- return glob->ttm_bo_size + 2 * page_array_size;
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
+ return size;
}
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
@@ -1276,10 +1289,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
{
struct ttm_buffer_object *bo;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
int ret;
- size_t acc_size =
- ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0))
return ret;
@@ -1465,13 +1478,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_shrink;
}
- glob->ttm_bo_extra_size =
- ttm_round_pot(sizeof(struct ttm_tt)) +
- ttm_round_pot(sizeof(struct ttm_backend));
-
- glob->ttm_bo_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct ttm_buffer_object));
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 082fcaea583..f8187ead7b3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
- struct page *d = ttm_tt_get_page(ttm, page);
+ struct page *d = ttm->pages[page];
void *dst;
if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
- struct page *s = ttm_tt_get_page(ttm, page);
+ struct page *s = ttm->pages[page];
void *src;
if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (old_iomap == NULL && ttm == NULL)
goto out2;
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
add = 0;
dir = 1;
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
+ fbo->acc_size = 0;
*new_obj = fbo;
return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
- struct page *d;
- int i;
+ int ret;
BUG_ON(!ttm);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
*/
map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm_tt_get_page(ttm, start_page);
+ map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
- /*
- * Populate the part we're mapping;
- */
- for (i = start_page; i < start_page + num_pages; ++i) {
- d = ttm_tt_get_page(ttm, i);
- if (!d)
- return -ENOMEM;
- }
-
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 221b924aceb..54412848de8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_FAULT_OOM;
+ goto out_io_unlock;
+ }
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
-
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
- page = ttm_tt_get_page(ttm, page_offset);
+ page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd82dc0..9eba8e9a4e9 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
+ ttm_dma_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93daac3..499debda791 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages, int ttm_flags,
- enum ttm_caching_state cstate, unsigned count)
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
@@ -660,17 +662,67 @@ out:
return count;
}
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ unsigned i;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ __free_page(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ list_add_tail(&pages[i]->lru, &pool->list);
+ pages[i] = NULL;
+ pool->npages++;
+ }
+ }
+ /* Check that we don't go over the pool limit */
+ npages = 0;
+ if (pool->npages > _manager->options.max_size) {
+ npages = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (npages)
+ ttm_page_pool_free(pool, npages);
+}
+
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
-int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count,
- dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
+ unsigned count;
int r;
/* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < count; ++r) {
+ for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
return -ENOMEM;
}
- list_add(&p->lru, pages);
+ pages[r] = p;
}
return 0;
}
-
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */
- count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+ count = 0;
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, pages, lru) {
+ list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
- if (count > 0) {
+ if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
- r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate, NULL);
+ ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
-
return 0;
}
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct page *p, *tmp;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
-
- list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
- }
- /* Make the pages list empty */
- INIT_LIST_HEAD(pages);
- return;
- }
- if (page_count == 0) {
- list_for_each_entry_safe(p, tmp, pages, lru) {
- ++page_count;
- }
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- list_splice_init(pages, &pool->list);
- pool->npages += page_count;
- /* Check that we don't go over the pool limit */
- page_count = 0;
- if (pool->npages > _manager->options.max_size) {
- page_count = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (page_count < NUM_PAGES_TO_ALLOC)
- page_count = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (page_count)
- ttm_page_pool_free(pool, page_count);
-}
-
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 00000000000..37ead6995c8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1143 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ * the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ * when freed).
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 4
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED (0)
+#define IS_WC (1<<1)
+#define IS_UC (1<<2)
+#define IS_CACHED (1<<3)
+#define IS_DMA32 (1<<4)
+
+enum pool_type {
+ POOL_IS_UNDEFINED,
+ POOL_IS_WC = IS_WC,
+ POOL_IS_UC = IS_UC,
+ POOL_IS_CACHED = IS_CACHED,
+ POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+ POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+ POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ * - generic (not restricted to DMA32):
+ * - write combined, uncached, cached.
+ * - dma32 (up to 2^32 - so up 4GB):
+ * - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ * it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ * Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+ struct list_head pools; /* The 'struct device->dma_pools link */
+ enum pool_type type;
+ spinlock_t lock;
+ struct list_head inuse_list;
+ struct list_head free_list;
+ struct device *dev;
+ unsigned size;
+ unsigned npages_free;
+ unsigned npages_in_use;
+ unsigned long nfrees; /* Stats when shrunk. */
+ unsigned long nrefills; /* Stats when grown. */
+ gfp_t gfp_flags;
+ char name[13]; /* "cached dma32" */
+ char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ * via the DMA API, it will be -1.
+ */
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ struct page *p;
+ dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+ struct list_head pools;
+ struct device *dev;
+ struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+ struct mutex lock;
+ struct list_head pools;
+ struct ttm_pool_opts options;
+ unsigned npools;
+ struct shrinker mm_shrink;
+ struct kobject kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+ struct page **pages, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ if (pool->type & IS_UC) {
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to uc!\n",
+ pool->dev_name, cpages);
+ }
+ if (pool->type & IS_WC) {
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to wc!\n",
+ pool->dev_name, cpages);
+ }
+ return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+ dma_addr_t dma = d_page->dma;
+ dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+ kfree(d_page);
+ d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+ struct dma_page *d_page;
+
+ d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+ if (!d_page)
+ return NULL;
+
+ d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+ &d_page->dma,
+ pool->gfp_flags);
+ if (d_page->vaddr)
+ d_page->p = virt_to_page(d_page->vaddr);
+ else {
+ kfree(d_page);
+ d_page = NULL;
+ }
+ return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+ enum pool_type type = IS_UNDEFINED;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ type |= IS_DMA32;
+ if (cstate == tt_cached)
+ type |= IS_CACHED;
+ else if (cstate == tt_uncached)
+ type |= IS_UC;
+ else
+ type |= IS_WC;
+
+ return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages_free -= freed_pages;
+ pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+ struct page *pages[], unsigned npages)
+{
+ struct dma_page *d_page, *tmp;
+
+ /* Don't set WB on WB page pool. */
+ if (npages && !(pool->type & IS_CACHED) &&
+ set_pages_array_wb(pages, npages))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
+
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+ if (nr_free > 1) {
+ pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
+ }
+#endif
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err(TTM_PFX
+ "%s: Failed to allocate memory for pool free operation.\n",
+ pool->dev_name);
+ return 0;
+ }
+ INIT_LIST_HEAD(&d_pages);
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ /* We picking the oldest ones off the list */
+ list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+ page_list) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ /* Move the dma_page from one list to another. */
+ list_move(&dma_p->page_list, &d_pages);
+
+ pages_to_free[freed_pages++] = dma_p->p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+ freed_pages);
+
+ INIT_LIST_HEAD(&d_pages);
+
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+ struct device_pools *p;
+ struct dma_pool *pool;
+
+ if (!dev)
+ return;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry_reverse(p, &_manager->pools, pools) {
+ if (p->dev != dev)
+ continue;
+ pool = p->pool;
+ if (pool->type != type)
+ continue;
+
+ list_del(&p->pools);
+ kfree(p);
+ _manager->npools--;
+ break;
+ }
+ list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+ * touching it. In case somebody is trying to _add_ we are
+ * guarded by the mutex. */
+ list_del(&pool->pools);
+ kfree(pool);
+ break;
+ }
+ mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ if (pool)
+ ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+ enum pool_type type)
+{
+ char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ struct device_pools *sec_pool = NULL;
+ struct dma_pool *pool = NULL, **ptr;
+ unsigned i;
+ int ret = -ENODEV;
+ char *p;
+
+ if (!dev)
+ return NULL;
+
+ ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ret = -ENOMEM;
+
+ pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!pool)
+ goto err_mem;
+
+ sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!sec_pool)
+ goto err_mem;
+
+ INIT_LIST_HEAD(&sec_pool->pools);
+ sec_pool->dev = dev;
+ sec_pool->pool = pool;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->inuse_list);
+ INIT_LIST_HEAD(&pool->pools);
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->npages_free = pool->npages_in_use = 0;
+ pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->size = PAGE_SIZE;
+ pool->type = type;
+ pool->nrefills = 0;
+ p = pool->name;
+ for (i = 0; i < 5; i++) {
+ if (type & t[i]) {
+ p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ "%s", n[i]);
+ }
+ }
+ *p = 0;
+ /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+ * - the kobj->name has already been deallocated.*/
+ snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+ dev_driver_string(dev), dev_name(dev));
+ mutex_lock(&_manager->lock);
+ /* You can get the dma_pool from either the global: */
+ list_add(&sec_pool->pools, &_manager->pools);
+ _manager->npools++;
+ /* or from 'struct device': */
+ list_add(&pool->pools, &dev->dma_pools);
+ mutex_unlock(&_manager->lock);
+
+ *ptr = pool;
+ devres_add(dev, ptr);
+
+ return pool;
+err_mem:
+ devres_free(ptr);
+ kfree(sec_pool);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+ enum pool_type type)
+{
+ struct dma_pool *pool, *tmp, *found = NULL;
+
+ if (type == IS_UNDEFINED)
+ return found;
+
+ /* NB: We iterate on the 'struct dev' which has no spinlock, but
+ * it does have a kref which we have taken. The kref is taken during
+ * graphic driver loading - in the drm_pci_init it calls either
+ * pci_dev_get or pci_register_driver which both end up taking a kref
+ * on 'struct device'.
+ *
+ * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+ * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+ * thing is at that point of time there are no pages associated with the
+ * driver so this function will not be called.
+ */
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ found = pool;
+ break;
+ }
+ return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+ struct list_head *d_pages,
+ struct page **failed_pages,
+ unsigned cpages)
+{
+ struct dma_page *d_page, *tmp;
+ struct page *p;
+ unsigned i = 0;
+
+ p = failed_pages[0];
+ if (!p)
+ return;
+ /* Find the failed page. */
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ if (d_page->p != p)
+ continue;
+ /* .. and then progress over the full list. */
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ if (++i < cpages)
+ p = failed_pages[i];
+ else
+ break;
+ }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+ struct list_head *d_pages,
+ unsigned count)
+{
+ struct page **caching_array;
+ struct dma_page *dma_p;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ pr_err(TTM_PFX
+ "%s: Unable to allocate table for new pages.",
+ pool->dev_name);
+ return -ENOMEM;
+ }
+
+ if (count > 1) {
+ pr_debug("%s: (%s:%d) Getting %d pages\n",
+ pool->dev_name, pool->name, current->pid,
+ count);
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ dma_p = __ttm_dma_alloc_page(pool);
+ if (!dma_p) {
+ pr_err(TTM_PFX "%s: Unable to get page %u.\n",
+ pool->dev_name, i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+ /* Note: Cannot hold the spinlock */
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r) {
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+ list_add(&dma_p->page_list, d_pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array, cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(pool, d_pages,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+ unsigned long *irq_flags)
+{
+ unsigned count = _manager->options.small;
+ int r = pool->npages_free;
+
+ if (count > pool->npages_free) {
+ struct list_head d_pages;
+
+ INIT_LIST_HEAD(&d_pages);
+
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ /* Returns how many more are neccessary to fulfill the
+ * request. */
+ r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ if (!r) {
+ /* Add the fresh to the end.. */
+ list_splice(&d_pages, &pool->free_list);
+ ++pool->nrefills;
+ pool->npages_free += count;
+ r = count;
+ } else {
+ struct dma_page *d_page;
+ unsigned cpages = 0;
+
+ pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
+
+ list_for_each_entry(d_page, &d_pages, page_list) {
+ cpages++;
+ }
+ list_splice_tail(&d_pages, &pool->free_list);
+ pool->npages_free += cpages;
+ r = cpages;
+ }
+ }
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+ struct ttm_dma_tt *ttm_dma,
+ unsigned index)
+{
+ struct dma_page *d_page;
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ unsigned long irq_flags;
+ int count, r = -ENOMEM;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+ pool->npages_in_use += 1;
+ pool->npages_free -= 1;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct dma_pool *pool;
+ enum pool_type type;
+ unsigned i;
+ gfp_t gfp_flags;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ pool = ttm_dma_pool_init(dev, gfp_flags, type);
+ if (IS_ERR_OR_NULL(pool)) {
+ return -ENOMEM;
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+ struct device_pools *p;
+ unsigned total = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ total += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct dma_pool *pool;
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+ unsigned count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ WARN_ON(!pool);
+ return;
+ }
+ is_cached = (ttm_dma_find_pool(pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+ /* make sure pages array match list and count number of pages */
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ ttm->pages[count] = d_page->p;
+ count++;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ if (is_cached) {
+ pool->nfrees += count;
+ } else {
+ pool->npages_free += count;
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
+ if (pool->npages_free > _manager->options.max_size) {
+ npages = pool->npages_free - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (is_cached) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p);
+ ttm_dma_page_put(pool, d_page);
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned idx = 0;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+
+ if (list_empty(&_manager->pools))
+ return 0;
+
+ mutex_lock(&_manager->lock);
+ pool_offset = pool_offset % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+ unsigned nr_free;
+
+ if (!p->dev)
+ continue;
+ if (shrink_pages == 0)
+ break;
+ /* Do it in round-robin fashion. */
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid, nr_free,
+ shrink_pages);
+ }
+ mutex_unlock(&_manager->lock);
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret = -ENOMEM;
+
+ WARN_ON(_manager);
+
+ printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ goto err_manager;
+
+ mutex_init(&_manager->lock);
+ INIT_LIST_HEAD(&_manager->pools);
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ /* This takes care of auto-freeing the _manager */
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "dma_pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ goto err;
+ }
+ ttm_dma_pool_mm_shrink_init(_manager);
+ return 0;
+err_manager:
+ kfree(_manager);
+ _manager = NULL;
+err:
+ return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+ struct device_pools *p, *t;
+
+ printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+ ttm_dma_pool_mm_shrink_fini(_manager);
+
+ list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+ dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+ current->pid);
+ WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+ ttm_dma_pool_match, p->pool));
+ ttm_dma_free_pool(p->dev, p->pool->type);
+ }
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct device_pools *p;
+ struct dma_pool *pool = NULL;
+ char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+ "name", "virt", "busaddr"};
+
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+ h[0], h[1], h[2], h[3], h[4], h[5]);
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools) {
+ struct device *dev = p->dev;
+ if (!dev)
+ continue;
+ pool = p->pool;
+ seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+ pool->name, pool->nrefills,
+ pool->nfrees, pool->npages_in_use,
+ pool->npages_free,
+ pool->dev_name);
+ }
+ mutex_unlock(&_manager->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f9cc548d6d9..2f75d203a2b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -43,139 +43,20 @@
#include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
- ttm->dma_address = drm_calloc_large(ttm->num_pages,
- sizeof(*ttm->dma_address));
-}
-
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
-{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
- drm_free_large(ttm->dma_address);
- ttm->dma_address = NULL;
-}
-
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
-{
- int write;
- int dirty;
- struct page *page;
- int i;
- struct ttm_backend *be = ttm->be;
-
- BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
- write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
- dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
-
- if (be)
- be->func->clear(be);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (page == NULL)
- continue;
-
- if (page == ttm->dummy_read_page) {
- BUG_ON(write);
- continue;
- }
-
- if (write && dirty && !PageReserved(page))
- set_page_dirty_lock(page);
-
- ttm->pages[i] = NULL;
- ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
- put_page(page);
- }
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
}
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- struct page *p;
- struct list_head h;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- int ret;
-
- while (NULL == (p = ttm->pages[index])) {
-
- INIT_LIST_HEAD(&h);
-
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
- &ttm->dma_address[index]);
-
- if (ret != 0)
- return NULL;
-
- p = list_first_entry(&h, struct page, lru);
-
- ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
- if (unlikely(ret != 0))
- goto out_err;
-
- if (PageHighMem(p))
- ttm->pages[--ttm->first_himem_page] = p;
- else
- ttm->pages[++ttm->last_lomem_page] = p;
- }
- return p;
-out_err:
- put_page(p);
- return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
- int ret;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return NULL;
- }
- return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct page *page;
- unsigned long i;
- struct ttm_backend *be;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return ret;
- }
-
- be = ttm->be;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = __ttm_tt_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
- }
-
- be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page, ttm->dma_address);
- ttm->state = tt_unbound;
- return 0;
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+ ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address));
}
-EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
- int i;
- unsigned count = 0;
- struct list_head h;
- struct page *cur_page;
- struct ttm_backend *be = ttm->be;
-
- INIT_LIST_HEAD(&h);
-
- if (be)
- be->func->clear(be);
- for (i = 0; i < ttm->num_pages; ++i) {
-
- cur_page = ttm->pages[i];
- ttm->pages[i] = NULL;
- if (cur_page) {
- if (page_count(cur_page) != 1)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- cur_page);
- list_add(&cur_page->lru, &h);
- count++;
- }
- }
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
- ttm->dma_address);
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
-}
-
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct ttm_backend *be;
-
if (unlikely(ttm == NULL))
return;
- be = ttm->be;
- if (likely(be != NULL)) {
- be->func->destroy(be);
- ttm->be = NULL;
+ if (ttm->state == tt_bound) {
+ ttm_tt_unbind(ttm);
}
if (likely(ttm->pages != NULL)) {
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm_tt_free_user_pages(ttm);
- else
- ttm_tt_free_alloced_pages(ttm);
-
- ttm_tt_free_page_directory(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
- kfree(ttm);
+ ttm->swap_storage = NULL;
+ ttm->func->destroy(ttm);
}
-int ttm_tt_set_user(struct ttm_tt *ttm,
- struct task_struct *tsk,
- unsigned long start, unsigned long num_pages)
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct mm_struct *mm = tsk->mm;
- int ret;
- int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
- BUG_ON(num_pages != ttm->num_pages);
- BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-
- /**
- * Account user pages as lowmem pages for now.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(tsk, mm, start, num_pages,
- write, 0, ttm->pages, NULL);
- up_read(&mm->mmap_sem);
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- if (ret != num_pages && write) {
- ttm_tt_free_user_pages(ttm);
- ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return -ENOMEM;
}
-
- ttm->tsk = tsk;
- ttm->start = start;
- ttm->state = tt_unbound;
-
return 0;
}
+EXPORT_SYMBOL(ttm_tt_init);
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags, struct page *dummy_read_page)
+void ttm_tt_fini(struct ttm_tt *ttm)
{
- struct ttm_bo_driver *bo_driver = bdev->driver;
- struct ttm_tt *ttm;
-
- if (!bo_driver)
- return NULL;
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
- ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
- if (!ttm)
- return NULL;
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
-
ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
- return NULL;
- }
- ttm->be = bo_driver->create_ttm_backend_entry(bdev);
- if (!ttm->be) {
- ttm_tt_destroy(ttm);
- printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
- return NULL;
+ return -ENOMEM;
}
- ttm->state = tt_unpopulated;
- return ttm;
+ return 0;
}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+ drm_free_large(ttm_dma->dma_address);
+ ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
- struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) {
- ret = be->func->unbind(be);
+ ret = ttm->func->unbind(ttm);
BUG_ON(ret);
ttm->state = tt_unbound;
}
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
- struct ttm_backend *be;
if (!ttm)
return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- be = ttm->be;
-
- ret = ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
- ret = be->func->bind(be, bo_mem);
+ ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm->state = tt_bound;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
int i;
int ret = -ENOMEM;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
- ttm->num_pages);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
- return 0;
- }
-
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
ret = PTR_ERR(from_page);
goto out_err;
}
- to_page = __ttm_tt_get_page(ttm, i);
+ to_page = ttm->pages[i];
if (unlikely(to_page == NULL))
goto out_err;
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
- ttm_tt_free_alloced_pages(ttm);
return ret;
}
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
- /*
- * For user buffers, just unpin the pages, as there should be
- * vma references.
- */
-
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ttm_tt_free_user_pages(ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- ttm->swap_storage = NULL;
- return 0;
- }
-
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm_tt_free_alloced_pages(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index a83e86d3956..02661f35f7a 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -30,16 +30,52 @@
#include "drm_pciids.h"
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
+static const struct file_operations via_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
+ .open = via_driver_open,
+ .postclose = via_driver_postclose,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.ioctls = via_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &via_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 9cf87d91232..88edacc9300 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,7 +24,7 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#include "drm_sman.h"
+#include "drm_mm.h"
#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
uint32_t irq_pending_mask;
int *irq_map;
unsigned int idle_fault;
- struct drm_sman sman;
int vram_initialized;
+ struct drm_mm vram_mm;
int agp_initialized;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
unsigned long vram_offset;
unsigned long agp_offset;
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6cca9a709f7..a2ab3436515 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
+ idr_init(&dev->object_name_idr);
ret = drm_vblank_init(dev, 1);
if (ret) {
- drm_sman_takedown(&dev_priv->sman);
kfree(dev_priv);
return ret;
}
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 6cc2dadae3e..a3574d09a07 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -28,26 +28,22 @@
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
-#include "drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+struct via_memblock {
+ struct drm_mm_node mm_node;
+ struct list_head owner_list;
+};
+
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_agp_t *agp = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
- agp->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_fb_t *fb = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
- fb->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
mutex_unlock(&dev->struct_mutex);
}
int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_via_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct via_memblock *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
unsigned long tmpSize;
if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
return -EINVAL;
}
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
+
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
- (unsigned long)file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
- mem->index = item->user_hash.key;
- } else {
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
+ if (mem->type == VIA_MEM_AGP)
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+ mem->index = user_key;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
+ DRM_DEBUG("Video memory allocation failed\n");
+
return retval;
}
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
drm_via_mem_t *mem = data;
- int ret;
+ struct via_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+ obj = idr_find(&dev_priv->object_idr, mem->index);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->index);
+ list_del(&obj->owner_list);
+ drm_mm_remove_node(&obj->mm_node);
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
+
DRM_DEBUG("free = 0x%lx\n", mem->index);
- return ret;
+ return 0;
}
void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_via_private_t *dev_priv = dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
+ struct via_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ drm_mm_remove_node(&entry->mm_node);
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 5a72ed90823..1e2c0fb7f78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
-struct vmw_ttm_backend {
- struct ttm_backend backend;
- struct page **pages;
- unsigned long num_pages;
+struct vmw_ttm_tt {
+ struct ttm_tt ttm;
struct vmw_private *dev_priv;
int gmr_id;
};
-static int vmw_ttm_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = pages;
- vmw_be->num_pages = num_pages;
-
- return 0;
-}
-
-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
- vmw_be->num_pages, vmw_be->gmr_id);
+ return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+ ttm->num_pages, vmw_be->gmr_id);
}
-static int vmw_ttm_unbind(struct ttm_backend *backend)
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
-static void vmw_ttm_clear(struct ttm_backend *backend)
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = NULL;
- vmw_be->num_pages = 0;
-}
-
-static void vmw_ttm_destroy(struct ttm_backend *backend)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ ttm_tt_fini(ttm);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
- .populate = vmw_ttm_populate,
- .clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct vmw_ttm_backend *vmw_be;
+ struct vmw_ttm_tt *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
- vmw_be->backend.func = &vmw_ttm_func;
+ vmw_be->ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- return &vmw_be->backend;
+ if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(vmw_be);
+ return NULL;
+ }
+
+ return &vmw_be->ttm;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
}
struct ttm_bo_driver vmw_bo_driver = {
- .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .ttm_tt_create = &vmw_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dff8fc76715..f390f5f9cb6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
.resume = vmw_pm_resume,
};
+static const struct file_operations vmwgfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = vmw_fops_poll,
+ .read = vmw_fops_read,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
.postclose = vmw_postclose,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
- .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
+ .fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bd..dc279706ca7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a..a0c2f12b1e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ hwversion = ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
+
if (hwversion == 0)
return false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1..66917c6c381 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
- param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ param->value =
+ ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
-
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d757..0af6ebdf205 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+ int num_rects,
+ struct vmw_clip_rect clip,
+ SVGASignedRect *out_rects,
+ int *out_num)
+{
+ int i, k;
+
+ for (i = 0, k = 0; i < num_rects; i++) {
+ int x1 = max_t(int, clip.x1, rects[i].x1);
+ int y1 = max_t(int, clip.y1, rects[i].y1);
+ int x2 = min_t(int, clip.x2, rects[i].x2);
+ int y2 = min_t(int, clip.y2, rects[i].y2);
+
+ if (x1 >= x2)
+ continue;
+ if (y1 >= y2)
+ continue;
+
+ out_rects[k].left = x1;
+ out_rects[k].top = y1;
+ out_rects[k].right = x2;
+ out_rects[k].bottom = y2;
+ k++;
+ }
+
+ *out_num = k;
+}
+
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+ int ret;
+
+ kmap_offset = 0;
+ kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+ hotspotX, hotspotY);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ return ret;
+}
+
+
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- handle, &surface);
- if (!ret) {
- if (!surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
- } else {
- ret = vmw_user_dmabuf_lookup(tfile,
- handle, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
- }
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ handle, &surface, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
}
}
+ /* need to do this before taking down old image */
+ if (surface && !surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
+ return -EINVAL;
+ }
+
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
-
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- kmap_offset = 0;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- vmw_cursor_update_image(dev_priv, virtual, 64, 64,
- du->hotspot_x, du->hotspot_y);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
+ ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+ du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
- struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
-
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
}
+ /* setup blits pointer */
+ blits = (SVGASignedRect *)&cmd[1];
+
+ /* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
- blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- blits[i].left = clips_ptr->x1 - left;
- blits[i].right = clips_ptr->x2 - left;
- blits[i].top = clips_ptr->y1 - top;
- blits[i].bottom = clips_ptr->y2 - top;
+ tmp[i].x1 = clips_ptr->x1 - left;
+ tmp[i].x2 = clips_ptr->x2 - left;
+ tmp[i].y1 = clips_ptr->y1 - top;
+ tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
- int clip_x1 = left - unit->crtc.x;
- int clip_y1 = top - unit->crtc.y;
- int clip_x2 = right - unit->crtc.x;
- int clip_y2 = bottom - unit->crtc.y;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left - unit->crtc.x;
+ clip.y1 = top - unit->crtc.y;
+ clip.x2 = right - unit->crtc.x;
+ clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
+
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ /* Surface must be marked as a scanout. */
+ if (unlikely(!surface->scanout))
+ return -EINVAL;
+
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -610,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitch = mode_cmd->pitch;
+ vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
@@ -724,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitch;
+ cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+ int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
+ /* clip size to crtc size */
+ clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+ clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+ /* translate both src and dest to bring clip into screen */
+ move_x = min_t(int, clip_x1, 0);
+ move_y = min_t(int, clip_y1, 0);
+
+ /* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
- blits[hit_num].body.destRect.left = clip_x1;
- blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+ blits[hit_num].body.destRect.left = clip_x1 - move_x;
+ blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@@ -966,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitch = mode_cmd->pitch;
+ vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
@@ -995,7 +1085,7 @@ out_err1:
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1003,17 +1093,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- u64 required_size;
+ struct drm_mode_fb_cmd mode_cmd;
int ret;
+ mode_cmd.width = mode_cmd2->width;
+ mode_cmd.height = mode_cmd2->height;
+ mode_cmd.pitch = mode_cmd2->pitches[0];
+ mode_cmd.handle = mode_cmd2->handles[0];
+ drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+ &mode_cmd.bpp);
+
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
- required_size = mode_cmd->pitch * mode_cmd->height;
- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode_cmd.pitch,
+ mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1027,7 +1125,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
@@ -1037,42 +1135,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* End conditioned code.
*/
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- mode_cmd->handle, &surface);
+ /* returns either a dmabuf or surface */
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ mode_cmd.handle,
+ &surface, &bo);
if (ret)
- goto try_dmabuf;
-
- if (!surface->scanout)
- goto err_not_scanout;
-
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
- &vfb, mode_cmd);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
- return &vfb->base;
-
-try_dmabuf:
- DRM_INFO("%s: trying buffer\n", __func__);
-
- ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
- if (ret) {
- DRM_ERROR("failed to find buffer: %i\n", ret);
- return ERR_PTR(-ENOENT);
- }
-
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ goto err_out;
+
+ /* Create the new framebuffer depending one what we got back */
+ if (bo)
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ &mode_cmd);
+ else if (surface)
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+ surface, &vfb, &mode_cmd);
+ else
+ BUG();
- /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
- vmw_dmabuf_unreference(&bo);
+err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+ if (bo)
+ vmw_dmabuf_unreference(&bo);
+ if (surface)
+ vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1167,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
-
-err_not_scanout:
- DRM_ERROR("surface not marked as scanout\n");
- /* vmw_user_surface_lookup takes one ref */
- vmw_surface_unreference(&surface);
- ttm_base_object_unref(&user_obj);
-
- return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1183,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@@ -1127,60 +1206,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
+ }
+
+ left = clips->x;
+ right = clips->x + clips->w;
+ top = clips->y;
+ bottom = clips->y + clips->h;
+
+ for (i = 1; i < num_clips; i++) {
+ left = min_t(int, left, (int)clips[i].x);
+ right = max_t(int, right, (int)clips[i].x + clips[i].w);
+ top = min_t(int, top, (int)clips[i].y);
+ bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = 0;
- cmd->body.srcRect.right = surface->sizes[0].width;
- cmd->body.srcRect.top = 0;
- cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
for (i = 0; i < num_clips; i++) {
- blits[i].left = clips[i].x;
- blits[i].right = clips[i].x + clips[i].w;
- blits[i].top = clips[i].y;
- blits[i].bottom = clips[i].y + clips[i].h;
+ tmp[i].x1 = clips[i].x - left;
+ tmp[i].x2 = clips[i].x + clips[i].w - left;
+ tmp[i].y1 = clips[i].y - top;
+ tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
- int clip_x1 = destX - unit->crtc.x;
- int clip_y1 = destY - unit->crtc.y;
- int clip_x2 = clip_x1 + surface->sizes[0].width;
- int clip_y2 = clip_y1 + surface->sizes[0].height;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left + destX - unit->crtc.x;
+ clip.y1 = top + destY - unit->crtc.y;
+ clip.x2 = right + destX - unit->crtc.x;
+ clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -1189,6 +1303,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -1240,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitch;
+ cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
@@ -1809,7 +1925,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -1824,10 +1941,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd96..a4f7f034996 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,6 +29,7 @@
#define VMWGFX_KMS_H_
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "vmwgfx_drv.h"
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -62,9 +63,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
+
/**
* Base class display unit.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e392849..f77b184be80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
+ struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -94,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
@@ -102,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
+
+ /* Find the first du with a cursor. */
+ list_for_each_entry(entry, &lds->active, active) {
+ du = &entry->base;
+
+ if (!du->cursor_dmabuf)
+ continue;
+
+ ret = vmw_cursor_update_dmabuf(dev_priv,
+ du->cursor_dmabuf,
+ 64, 64,
+ du->hotspot_x,
+ du->hotspot_y);
+ if (ret == 0)
+ break;
+
+ DRM_ERROR("Could not update cursor image\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb3..a37abb581cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
+{
+ int ret;
+
+ BUG_ON(*out_surf || *out_buf);
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+ if (!ret)
+ return 0;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ return ret;
+}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -1517,29 +1540,10 @@ out_bad_surface:
/**
* Buffer management.
*/
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
-{
- static size_t bo_user_size = ~0;
-
- size_t page_array_size =
- (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
-
- if (unlikely(bo_user_size == ~0)) {
- bo_user_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- }
-
- return bo_user_size + page_array_size;
-}
-
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -1550,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
BUG_ON(!bo_free);
- acc_size =
- vmw_dmabuf_acc_size(bdev->glob,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0)) {
- /* we must free the bo here as
- * ttm_buffer_object_init does so as well */
- bo_free(&vmw_bo->base);
- return ret;
- }
-
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1582,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 22a4a051f22..a421abdd1ab 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -31,6 +31,11 @@ config HID
If unsure, say Y.
+config HID_BATTERY_STRENGTH
+ bool
+ depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ default y
+
config HIDRAW
bool "/dev/hidraw raw HID device support"
depends on HID
@@ -335,6 +340,7 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- 3M PCT touch screens
- ActionStar dual touch panels
+ - Atmel panels
- Cando dual touch panels
- Chunghwa panels
- CVTouch panels
@@ -349,12 +355,15 @@ config HID_MULTITOUCH
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
+ - PixArt optical touch screen
- Pixcir dual touch panels
+ - Quanta panels
- eGalax dual-touch panels, including the Joojoo and Wetab tablets
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
- XAT optical touch panels
+ - Xiroku optical touch panels
If unsure, say N.
@@ -466,12 +475,6 @@ config HID_PRIMAX
Support for Primax devices that are not fully compliant with the
HID standard.
-config HID_QUANTA
- tristate "Quanta Optical Touch panels"
- depends on USB_HID
- ---help---
- Support for Quanta Optical Touch dual-touch panels.
-
config HID_ROCCAT
tristate "Roccat special event support"
depends on USB_HID
@@ -492,6 +495,13 @@ config HID_ROCCAT_ARVO
---help---
Support for Roccat Arvo keyboard.
+config HID_ROCCAT_ISKU
+ tristate "Roccat Isku keyboard support"
+ depends on USB_HID
+ depends on HID_ROCCAT
+ ---help---
+ Support for Roccat Isku keyboard.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
@@ -560,6 +570,12 @@ config GREENASIA_FF
(like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
and want to enable force feedback support for it.
+config HID_HYPERV_MOUSE
+ tristate "Microsoft Hyper-V mouse driver"
+ depends on HYPERV
+ ---help---
+ Select this option to enable the Hyper-V mouse driver.
+
config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
@@ -620,9 +636,19 @@ config HID_WIIMOTE
depends on BT_HIDP
depends on LEDS_CLASS
select POWER_SUPPLY
+ select INPUT_FF_MEMLESS
---help---
Support for the Nintendo Wii Remote bluetooth device.
+config HID_WIIMOTE_EXT
+ bool "Nintendo Wii Remote Extension support"
+ depends on HID_WIIMOTE
+ default HID_WIIMOTE
+ ---help---
+ Support for extension controllers of the Nintendo Wii Remote. Say yes
+ here if you want to use the Nintendo Motion+, Nunchuck or Classic
+ extension controllers with your Wii Remote.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1e0d2a638b2..8aefdc963cc 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -25,6 +25,14 @@ ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
+hid-wiimote-y := hid-wiimote-core.o
+ifdef CONFIG_HID_WIIMOTE_EXT
+ hid-wiimote-y += hid-wiimote-ext.o
+endif
+ifdef CONFIG_DEBUG_FS
+ hid-wiimote-y += hid-wiimote-debug.o
+endif
+
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
@@ -38,6 +46,7 @@ obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
+obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -51,7 +60,6 @@ obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
-obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
@@ -59,6 +67,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
+obj-$(CONFIG_HID_ROCCAT_ISKU) += hid-roccat-isku.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279..af08ce7207d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -90,7 +90,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
struct hid_field *field;
if (report->maxfield == HID_MAX_FIELDS) {
- dbg_hid("too many fields in report\n");
+ hid_err(report->device, "too many fields in report\n");
return NULL;
}
@@ -121,7 +121,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
usage = parser->local.usage[0];
if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- dbg_hid("collection stack overflow\n");
+ hid_err(parser->device, "collection stack overflow\n");
return -1;
}
@@ -129,7 +129,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
collection = kmalloc(sizeof(struct hid_collection) *
parser->device->collection_size * 2, GFP_KERNEL);
if (collection == NULL) {
- dbg_hid("failed to reallocate collection array\n");
+ hid_err(parser->device, "failed to reallocate collection array\n");
return -1;
}
memcpy(collection, parser->device->collection,
@@ -165,7 +165,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
static int close_collection(struct hid_parser *parser)
{
if (!parser->collection_stack_ptr) {
- dbg_hid("collection stack underflow\n");
+ hid_err(parser->device, "collection stack underflow\n");
return -1;
}
parser->collection_stack_ptr--;
@@ -197,7 +197,7 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
static int hid_add_usage(struct hid_parser *parser, unsigned usage)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
- dbg_hid("usage index exceeded\n");
+ hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
@@ -222,12 +222,13 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
- dbg_hid("hid_register_report failed\n");
+ hid_err(parser->device, "hid_register_report failed\n");
return -1;
}
if (parser->global.logical_maximum < parser->global.logical_minimum) {
- dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
+ hid_err(parser->device, "logical range invalid %d %d\n",
+ parser->global.logical_minimum, parser->global.logical_maximum);
return -1;
}
@@ -307,7 +308,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_PUSH:
if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
- dbg_hid("global environment stack overflow\n");
+ hid_err(parser->device, "global environment stack overflow\n");
return -1;
}
@@ -318,7 +319,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_POP:
if (!parser->global_stack_ptr) {
- dbg_hid("global environment stack underflow\n");
+ hid_err(parser->device, "global environment stack underflow\n");
return -1;
}
@@ -362,8 +363,8 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 32) {
- dbg_hid("invalid report_size %d\n",
+ if (parser->global.report_size > 96) {
+ hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
}
@@ -372,7 +373,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
parser->global.report_count = item_udata(item);
if (parser->global.report_count > HID_MAX_USAGES) {
- dbg_hid("invalid report_count %d\n",
+ hid_err(parser->device, "invalid report_count %d\n",
parser->global.report_count);
return -1;
}
@@ -381,13 +382,13 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_ID:
parser->global.report_id = item_udata(item);
if (parser->global.report_id == 0) {
- dbg_hid("report_id 0 is invalid\n");
+ hid_err(parser->device, "report_id 0 is invalid\n");
return -1;
}
return 0;
default:
- dbg_hid("unknown global tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
return -1;
}
}
@@ -414,14 +415,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* items and the first delimiter set.
*/
if (parser->local.delimiter_depth != 0) {
- dbg_hid("nested delimiters\n");
+ hid_err(parser->device, "nested delimiters\n");
return -1;
}
parser->local.delimiter_depth++;
parser->local.delimiter_branch++;
} else {
if (parser->local.delimiter_depth < 1) {
- dbg_hid("bogus close delimiter\n");
+ hid_err(parser->device, "bogus close delimiter\n");
return -1;
}
parser->local.delimiter_depth--;
@@ -506,7 +507,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- dbg_hid("unknown main item tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
@@ -678,12 +679,12 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
while ((start = fetch_item(start, end, &item)) != NULL) {
if (item.format != HID_ITEM_FORMAT_SHORT) {
- dbg_hid("unexpected long global item\n");
+ hid_err(device, "unexpected long global item\n");
goto err;
}
if (dispatch_type[item.type](parser, &item)) {
- dbg_hid("item %u %u %u %u parsing failed\n",
+ hid_err(device, "item %u %u %u %u parsing failed\n",
item.format, (unsigned)item.size,
(unsigned)item.type, (unsigned)item.tag);
goto err;
@@ -691,11 +692,11 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (start == end) {
if (parser->collection_stack_ptr) {
- dbg_hid("unbalanced collection at end of report description\n");
+ hid_err(device, "unbalanced collection at end of report description\n");
goto err;
}
if (parser->local.delimiter_depth) {
- dbg_hid("unbalanced delimiter at end of report description\n");
+ hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
vfree(parser);
@@ -703,7 +704,7 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
}
}
- dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
vfree(parser);
return ret;
@@ -873,7 +874,7 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
ret = hdrv->event(hid, field, usage, value);
if (ret != 0) {
if (ret < 0)
- dbg_hid("%s's event failed with %d\n",
+ hid_err(hid, "%s's event failed with %d\n",
hdrv->name, ret);
return;
}
@@ -995,12 +996,13 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
hid_dump_input(field->report->device, field->usage + offset, value);
if (offset >= field->report_count) {
- dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
+ hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
+ offset, field->report_count);
return -1;
}
if (field->logical_minimum < 0) {
if (value != snto32(s32ton(value, size), size)) {
- dbg_hid("value %d is out of range\n", value);
+ hid_err(field->report->device, "value %d is out of range\n", value);
return -1;
}
}
@@ -1157,7 +1159,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-static const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1404,11 +1406,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
@@ -1423,6 +1427,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1498,11 +1503,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1544,11 +1553,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
@@ -1768,11 +1787,12 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ee80d733801..01dd9a7daf7 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -114,6 +114,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0xbd, "FlareRelease"},
{0, 0xbe, "LandingGear"},
{0, 0xbf, "ToeBrake"},
+ { 6, 0, "GenericDeviceControls" },
+ {0, 0x20, "BatteryStrength" },
+ {0, 0x21, "WirelessChannel" },
+ {0, 0x22, "WirelessID" },
+ {0, 0x23, "DiscoverWirelessControl" },
+ {0, 0x24, "SecurityCodeCharacterEntered" },
+ {0, 0x25, "SecurityCodeCharactedErased" },
+ {0, 0x26, "SecurityCodeCleared" },
{ 7, 0, "Keyboard" },
{ 8, 0, "LED" },
{0, 0x01, "NumLock"},
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 9bdde867a02..2630d483d26 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -140,7 +140,7 @@ err:
}
static const struct hid_device_id ems_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
{ }
};
MODULE_DEVICE_TABLE(hid, ems_devices);
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/hid/hid-hyperv.c
index ccd39c70c52..0c33ae9cf0f 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/hid/hid-hyperv.c
@@ -14,11 +14,8 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -118,7 +115,6 @@ struct synthhid_input_report {
#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE)
#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE)
-#define NBITS(x) (((x)/BITS_PER_LONG)+1)
enum pipe_prot_msg_type {
PIPE_MESSAGE_INVALID,
@@ -148,7 +144,8 @@ struct mousevsc_prt_msg {
*/
struct mousevsc_dev {
struct hv_device *device;
- unsigned char init_complete;
+ bool init_complete;
+ bool connected;
struct mousevsc_prt_msg protocol_req;
struct mousevsc_prt_msg protocol_resp;
/* Synchronize the request/response if needed */
@@ -159,12 +156,11 @@ struct mousevsc_dev {
unsigned char *report_desc;
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
- int connected;
struct hid_device *hid_device;
};
-static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
+static struct mousevsc_dev *mousevsc_alloc_device(struct hv_device *device)
{
struct mousevsc_dev *input_dev;
@@ -176,11 +172,12 @@ static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
input_dev->device = device;
hv_set_drvdata(device, input_dev);
init_completion(&input_dev->wait_event);
+ input_dev->init_complete = false;
return input_dev;
}
-static void free_input_device(struct mousevsc_dev *device)
+static void mousevsc_free_device(struct mousevsc_dev *device)
{
kfree(device->hid_desc);
kfree(device->report_desc);
@@ -188,7 +185,6 @@ static void free_input_device(struct mousevsc_dev *device)
kfree(device);
}
-
static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct synthhid_device_info *device_info)
{
@@ -196,14 +192,12 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct hid_descriptor *desc;
struct mousevsc_prt_msg ack;
- /* Assume success for now */
- input_device->dev_info_status = 0;
-
- memcpy(&input_device->hid_dev_info, &device_info->hid_dev_info,
- sizeof(struct hv_input_dev_info));
+ input_device->dev_info_status = -ENOMEM;
+ input_device->hid_dev_info = device_info->hid_dev_info;
desc = &device_info->hid_descriptor;
- WARN_ON(desc->bLength == 0);
+ if (desc->bLength == 0)
+ goto cleanup;
input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC);
@@ -213,13 +207,18 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->hid_desc, desc, desc->bLength);
input_device->report_desc_size = desc->desc[0].wDescriptorLength;
- if (input_device->report_desc_size == 0)
+ if (input_device->report_desc_size == 0) {
+ input_device->dev_info_status = -EINVAL;
goto cleanup;
+ }
+
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
- if (!input_device->report_desc)
+ if (!input_device->report_desc) {
+ input_device->dev_info_status = -ENOMEM;
goto cleanup;
+ }
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
@@ -242,22 +241,14 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
(unsigned long)&ack,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- complete(&input_device->wait_event);
- return;
+ if (!ret)
+ input_device->dev_info_status = 0;
cleanup:
- kfree(input_device->hid_desc);
- input_device->hid_desc = NULL;
-
- kfree(input_device->report_desc);
- input_device->report_desc = NULL;
-
- input_device->dev_info_status = -1;
complete(&input_device->wait_event);
+
+ return;
}
static void mousevsc_on_receive(struct hv_device *device,
@@ -274,10 +265,22 @@ static void mousevsc_on_receive(struct hv_device *device,
if (pipe_msg->type != PIPE_MESSAGE_DATA)
return;
- hid_msg = (struct synthhid_msg *)&pipe_msg->data[0];
+ hid_msg = (struct synthhid_msg *)pipe_msg->data;
switch (hid_msg->header.type) {
case SYNTH_HID_PROTOCOL_RESPONSE:
+ /*
+ * While it will be impossible for us to protect against
+ * malicious/buggy hypervisor/host, add a check here to
+ * ensure we don't corrupt memory.
+ */
+ if ((pipe_msg->size + sizeof(struct pipe_prt_msg)
+ - sizeof(unsigned char))
+ > sizeof(struct mousevsc_prt_msg)) {
+ WARN_ON(1);
+ break;
+ }
+
memcpy(&input_dev->protocol_resp, pipe_msg,
pipe_msg->size + sizeof(struct pipe_prt_msg) -
sizeof(unsigned char));
@@ -292,11 +295,11 @@ static void mousevsc_on_receive(struct hv_device *device,
* hid desc and report desc
*/
mousevsc_on_receive_device_info(input_dev,
- (struct synthhid_device_info *)&pipe_msg->data[0]);
+ (struct synthhid_device_info *)pipe_msg->data);
break;
case SYNTH_HID_INPUT_REPORT:
input_report =
- (struct synthhid_input_report *)&pipe_msg->data[0];
+ (struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
hid_input_report(input_dev->hid_device,
@@ -313,73 +316,60 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
- const int packetSize = 0x100;
- int ret = 0;
- struct hv_device *device = (struct hv_device *)context;
-
+ const int packet_size = 0x100;
+ int ret;
+ struct hv_device *device = context;
u32 bytes_recvd;
u64 req_id;
- unsigned char packet[0x100];
struct vmpacket_descriptor *desc;
- unsigned char *buffer = packet;
- int bufferlen = packetSize;
+ unsigned char *buffer;
+ int bufferlen = packet_size;
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer,
bufferlen, &bytes_recvd, &req_id);
- if (ret == 0) {
- if (bytes_recvd > 0) {
- desc = (struct vmpacket_descriptor *)buffer;
-
- switch (desc->type) {
- case VM_PKT_COMP:
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(
- device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type,
- req_id,
- bytes_recvd);
- break;
- }
-
- /* reset */
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
- } else {
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
+ switch (ret) {
+ case 0:
+ if (bytes_recvd <= 0) {
+ kfree(buffer);
+ return;
+ }
+ desc = (struct vmpacket_descriptor *)buffer;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(device, desc);
+ break;
+
+ default:
+ pr_err("unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
break;
}
- } else if (ret == -ENOBUFS) {
+
+ break;
+
+ case -ENOBUFS:
+ kfree(buffer);
/* Handle large packet */
bufferlen = bytes_recvd;
- buffer = kzalloc(bytes_recvd, GFP_ATOMIC);
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (buffer == NULL) {
- buffer = packet;
- bufferlen = packetSize;
- break;
- }
+ if (!buffer)
+ return;
+
+ break;
}
} while (1);
- return;
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
@@ -390,19 +380,15 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
-
request = &input_dev->protocol_req;
-
memset(request, 0, sizeof(struct mousevsc_prt_msg));
request->type = PIPE_MESSAGE_DATA;
request->size = sizeof(struct synthhid_protocol_request);
-
request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST;
request->request.header.size = sizeof(unsigned int);
request->request.version_requested.version = SYNTHHID_INPUT_VERSION;
-
ret = vmbus_sendpacket(device->channel, request,
sizeof(struct pipe_prt_msg) -
sizeof(unsigned char) +
@@ -410,11 +396,11 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
+ if (ret)
goto cleanup;
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -422,14 +408,14 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
response = &input_dev->protocol_resp;
if (!response->response.approved) {
- pr_err("synthhid protocol request failed (version %d)",
+ pr_err("synthhid protocol request failed (version %d)\n",
SYNTHHID_INPUT_VERSION);
ret = -ENODEV;
goto cleanup;
}
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -438,11 +424,9 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
* We should have gotten the device attr, hid desc and report
* desc at this point
*/
- if (input_dev->dev_info_status)
- ret = -ENOMEM;
+ ret = input_dev->dev_info_status;
cleanup:
-
return ret;
}
@@ -451,61 +435,40 @@ static int mousevsc_hid_open(struct hid_device *hid)
return 0;
}
+static int mousevsc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
static void mousevsc_hid_close(struct hid_device *hid)
{
}
+static void mousevsc_hid_stop(struct hid_device *hid)
+{
+}
+
static struct hid_ll_driver mousevsc_ll_driver = {
.open = mousevsc_hid_open,
.close = mousevsc_hid_close,
+ .start = mousevsc_hid_start,
+ .stop = mousevsc_hid_stop,
};
static struct hid_driver mousevsc_hid_driver;
-static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
-{
- struct hid_device *hid_dev;
- struct mousevsc_dev *input_device = hv_get_drvdata(dev);
-
- hid_dev = hid_allocate_device();
- if (IS_ERR(hid_dev))
- return;
-
- hid_dev->ll_driver = &mousevsc_ll_driver;
- hid_dev->driver = &mousevsc_hid_driver;
-
- if (hid_parse_report(hid_dev, packet, len))
- return;
-
- hid_dev->bus = BUS_VIRTUAL;
- hid_dev->vendor = input_device->hid_dev_info.vendor;
- hid_dev->product = input_device->hid_dev_info.product;
- hid_dev->version = input_device->hid_dev_info.version;
-
- sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
-
- if (!hidinput_connect(hid_dev, 0)) {
- hid_dev->claimed |= HID_CLAIMED_INPUT;
-
- input_device->connected = 1;
-
- }
-
- input_device->hid_device = hid_dev;
-}
-
-static int mousevsc_on_device_add(struct hv_device *device)
+static int mousevsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
{
- int ret = 0;
+ int ret;
struct mousevsc_dev *input_dev;
+ struct hid_device *hid_dev;
- input_dev = alloc_input_device(device);
+ input_dev = mousevsc_alloc_device(device);
if (!input_dev)
return -ENOMEM;
- input_dev->init_complete = false;
-
ret = vmbus_open(device->channel,
INPUTVSC_SEND_RING_BUFFER_SIZE,
INPUTVSC_RECV_RING_BUFFER_SIZE,
@@ -515,54 +478,78 @@ static int mousevsc_on_device_add(struct hv_device *device)
device
);
- if (ret != 0) {
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err0;
ret = mousevsc_connect_to_vsp(device);
- if (ret != 0) {
- vmbus_close(device->channel);
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err1;
/* workaround SA-167 */
if (input_dev->report_desc[14] == 0x25)
input_dev->report_desc[14] = 0x29;
- reportdesc_callback(device, input_dev->report_desc,
- input_dev->report_desc_size);
+ hid_dev = hid_allocate_device();
+ if (IS_ERR(hid_dev)) {
+ ret = PTR_ERR(hid_dev);
+ goto probe_err1;
+ }
+
+ hid_dev->ll_driver = &mousevsc_ll_driver;
+ hid_dev->driver = &mousevsc_hid_driver;
+ hid_dev->bus = BUS_VIRTUAL;
+ hid_dev->vendor = input_dev->hid_dev_info.vendor;
+ hid_dev->product = input_dev->hid_dev_info.product;
+ hid_dev->version = input_dev->hid_dev_info.version;
+ input_dev->hid_device = hid_dev;
+
+ sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
+
+ ret = hid_add_device(hid_dev);
+ if (ret)
+ goto probe_err1;
+
+ ret = hid_parse_report(hid_dev, input_dev->report_desc,
+ input_dev->report_desc_size);
+
+ if (ret) {
+ hid_err(hid_dev, "parse failed\n");
+ goto probe_err2;
+ }
+ ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+
+ if (ret) {
+ hid_err(hid_dev, "hw start failed\n");
+ goto probe_err2;
+ }
+
+ input_dev->connected = true;
input_dev->init_complete = true;
return ret;
-}
-static int mousevsc_probe(struct hv_device *dev,
- const struct hv_vmbus_device_id *dev_id)
-{
+probe_err2:
+ hid_destroy_device(hid_dev);
- return mousevsc_on_device_add(dev);
+probe_err1:
+ vmbus_close(device->channel);
+probe_err0:
+ mousevsc_free_device(input_dev);
+
+ return ret;
}
+
static int mousevsc_remove(struct hv_device *dev)
{
struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
vmbus_close(dev->channel);
-
- if (input_dev->connected) {
- hidinput_disconnect(input_dev->hid_device);
- input_dev->connected = 0;
- hid_destroy_device(input_dev->hid_device);
- }
-
- free_input_device(input_dev);
+ hid_destroy_device(input_dev->hid_device);
+ mousevsc_free_device(input_dev);
return 0;
}
@@ -577,7 +564,7 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver mousevsc_drv = {
- .name = "mousevsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b6..b8574cddd95 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -21,6 +21,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
+#define USB_DEVICE_ID_3M3266 0x0506
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -124,6 +125,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
@@ -145,6 +147,9 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_VENDOR_ID_ATMEL 0x03eb
+#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
+
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
@@ -230,11 +235,14 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -266,7 +274,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
@@ -356,6 +364,9 @@
#define USB_VENDOR_ID_HANVON 0x20b3
#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
+#define USB_VENDOR_ID_HANVON_ALT 0x22ed
+#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
+
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
@@ -571,6 +582,11 @@
#define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+#define USB_VENDOR_ID_PIXART 0x093a
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
+
#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
@@ -581,11 +597,14 @@
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
#define USB_VENDOR_ID_QUANTA 0x0408
-#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
+#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
@@ -679,6 +698,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
#define USB_VENDOR_ID_WALTOP 0x172f
#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032
@@ -707,6 +727,17 @@
#define USB_VENDOR_ID_XAT 0x2505
#define USB_DEVICE_ID_XAT_CSR 0x0220
+#define USB_VENDOR_ID_XIROKU 0x1477
+#define USB_DEVICE_ID_XIROKU_SPX 0x1006
+#define USB_DEVICE_ID_XIROKU_MPX 0x1007
+#define USB_DEVICE_ID_XIROKU_CSR 0x100e
+#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
+#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
+#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
+#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
+#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
+#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
+
#define USB_VENDOR_ID_YEALINK 0x6993
#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f333139d1a4..9333d692a78 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -32,6 +32,8 @@
#include <linux/hid.h>
#include <linux/hid-debug.h>
+#include "hid-ids.h"
+
#define unk KEY_UNKNOWN
static const unsigned char hid_keyboard[256] = {
@@ -271,6 +273,161 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
return logical_extents / physical_extents;
}
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+static enum power_supply_property hidinput_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_STATUS
+};
+
+#define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */
+#define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */
+
+static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ {}
+};
+
+static unsigned find_battery_quirk(struct hid_device *hdev)
+{
+ unsigned quirks = 0;
+ const struct hid_device_id *match;
+
+ match = hid_match_id(hdev, hid_battery_quirks);
+ if (match != NULL)
+ quirks = match->driver_data;
+
+ return quirks;
+}
+
+static int hidinput_get_battery_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct hid_device *dev = container_of(psy, struct hid_device, battery);
+ int ret = 0;
+ __u8 buf[2] = {};
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 1;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
+ buf, sizeof(buf),
+ dev->battery_report_type);
+
+ if (ret != 2) {
+ if (ret >= 0)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dev->battery_min < dev->battery_max &&
+ buf[1] >= dev->battery_min &&
+ buf[1] <= dev->battery_max)
+ val->intval = (100 * (buf[1] - dev->battery_min)) /
+ (dev->battery_max - dev->battery_min);
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = dev->name;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+{
+ struct power_supply *battery = &dev->battery;
+ int ret;
+ unsigned quirks;
+ s32 min, max;
+
+ if (field->usage->hid != HID_DC_BATTERYSTRENGTH)
+ return false; /* no match */
+
+ if (battery->name != NULL)
+ goto out; /* already initialized? */
+
+ battery->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq);
+ if (battery->name == NULL)
+ goto out;
+
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = hidinput_battery_props;
+ battery->num_properties = ARRAY_SIZE(hidinput_battery_props);
+ battery->use_for_apm = 0;
+ battery->get_property = hidinput_get_battery_property;
+
+ quirks = find_battery_quirk(dev);
+
+ hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
+ dev->bus, dev->vendor, dev->product, dev->version, quirks);
+
+ min = field->logical_minimum;
+ max = field->logical_maximum;
+
+ if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+ min = 0;
+ max = 100;
+ }
+
+ if (quirks & HID_BATTERY_QUIRK_FEATURE)
+ report_type = HID_FEATURE_REPORT;
+
+ dev->battery_min = min;
+ dev->battery_max = max;
+ dev->battery_report_type = report_type;
+ dev->battery_report_id = field->report->id;
+
+ ret = power_supply_register(&dev->dev, battery);
+ if (ret != 0) {
+ hid_warn(dev, "can't register power supply: %d\n", ret);
+ kfree(battery->name);
+ battery->name = NULL;
+ }
+
+out:
+ return true;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+ if (!dev->battery.name)
+ return;
+
+ power_supply_unregister(&dev->battery);
+ kfree(dev->battery.name);
+ dev->battery.name = NULL;
+}
+#else /* !CONFIG_HID_BATTERY_STRENGTH */
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field)
+{
+ return false;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+}
+#endif /* CONFIG_HID_BATTERY_STRENGTH */
+
static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage)
{
@@ -629,6 +786,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_GENDEVCTRLS:
+ if (hidinput_setup_battery(device, HID_INPUT_REPORT, field))
+ goto ignore;
+ else
+ goto unknown;
+ break;
+
case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */
set_bit(EV_REP, input->evbit);
switch (usage->hid & HID_USAGE) {
@@ -822,6 +986,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
+ /* Ignore out-of-range values as per HID specification, section 5.10 */
+ if (value < field->logical_minimum || value > field->logical_maximum) {
+ dbg_hid("Ignoring out-of-range value %x\n", value);
+ return;
+ }
+
/* report the usage code as scancode if the key status has changed */
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
@@ -861,6 +1031,48 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
}
EXPORT_SYMBOL_GPL(hidinput_find_field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED)
+ return field;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hidinput_get_led_field);
+
+unsigned int hidinput_count_leds(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+ unsigned int count = 0;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED &&
+ field->value[j])
+ count += 1;
+ }
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(hidinput_count_leds);
+
static int hidinput_open(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -882,15 +1094,17 @@ static void report_features(struct hid_device *hid)
struct hid_report *rep;
int i, j;
- if (!drv->feature_mapping)
- return;
-
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list)
for (i = 0; i < rep->maxfield; i++)
- for (j = 0; j < rep->field[i]->maxusage; j++)
- drv->feature_mapping(hid, rep->field[i],
- rep->field[i]->usage + j);
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ /* Verify if Battery Strength feature is available */
+ hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
+
+ if (drv->feature_mapping)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
+ }
}
/*
@@ -1010,6 +1224,8 @@ void hidinput_disconnect(struct hid_device *hid)
{
struct hid_input *hidinput, *next;
+ hidinput_cleanup_battery(hid);
+
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
input_unregister_device(hidinput->input);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 103f30d93f7..6ecc9e22044 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -430,7 +430,7 @@ int lg4ff_init(struct hid_device *hid)
}
/* Add the device to device_list */
- entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
if (!entry) {
hid_err(hid, "Cannot add device, insufficient memory.\n");
return -ENOMEM;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f1c909f1b23..24fc4423b93 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -50,7 +50,6 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ALWAYS_VALID (1 << 4)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
@@ -60,9 +59,19 @@ struct mt_slot {
bool seen_in_this_frame;/* has this slot been updated */
};
+struct mt_class {
+ __s32 name; /* MT_CLS */
+ __s32 quirks;
+ __s32 sn_move; /* Signal/noise ratio for move events */
+ __s32 sn_width; /* Signal/noise ratio for width events */
+ __s32 sn_height; /* Signal/noise ratio for height events */
+ __s32 sn_pressure; /* Signal/noise ratio for pressure events */
+ __u8 maxcontacts;
+};
+
struct mt_device {
struct mt_slot curdata; /* placeholder of incoming data */
- struct mt_class *mtclass; /* our mt device class */
+ struct mt_class mtclass; /* our mt device class */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
int last_mt_collection; /* last known mt-related collection */
@@ -74,30 +83,23 @@ struct mt_device {
struct mt_slot *slots;
};
-struct mt_class {
- __s32 name; /* MT_CLS */
- __s32 quirks;
- __s32 sn_move; /* Signal/noise ratio for move events */
- __s32 sn_width; /* Signal/noise ratio for width events */
- __s32 sn_height; /* Signal/noise ratio for height events */
- __s32 sn_pressure; /* Signal/noise ratio for pressure events */
- __u8 maxcontacts;
-};
-
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
#define MT_CLS_SERIAL 0x0002
#define MT_CLS_CONFIDENCE 0x0003
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
+#define MT_CLS_CONFIDENCE_CONTACT_ID 0x0004
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
+#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
/* vendor specific classes */
#define MT_CLS_3M 0x0101
#define MT_CLS_CYPRESS 0x0102
#define MT_CLS_EGALAX 0x0103
+#define MT_CLS_EGALAX_SERIAL 0x0104
#define MT_DEFAULT_MAXCONTACT 10
@@ -133,13 +135,16 @@ static int find_slot_from_contactid(struct mt_device *td)
return -1;
}
-struct mt_class mt_classes[] = {
+static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
+ { .name = MT_CLS_CONFIDENCE_CONTACT_ID,
+ .quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
+ MT_QUIRK_SLOT_IS_CONTACTID },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE },
@@ -155,6 +160,9 @@ struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
+ { .name = MT_CLS_INRANGE_CONTACTNUMBER,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER },
/*
* vendor specific classes
@@ -171,9 +179,13 @@ struct mt_class mt_classes[] = {
.maxcontacts = 10 },
{ .name = MT_CLS_EGALAX,
.quirks = MT_QUIRK_SLOT_IS_CONTACTID |
- MT_QUIRK_VALID_IS_INRANGE |
- MT_QUIRK_EGALAX_XYZ_FIXUP,
- .maxcontacts = 2,
+ MT_QUIRK_VALID_IS_INRANGE,
+ .sn_move = 4096,
+ .sn_pressure = 32,
+ },
+ { .name = MT_CLS_EGALAX_SERIAL,
+ .quirks = MT_QUIRK_SLOT_IS_CONTACTID |
+ MT_QUIRK_ALWAYS_VALID,
.sn_move = 4096,
.sn_pressure = 32,
},
@@ -181,6 +193,44 @@ struct mt_class mt_classes[] = {
{ }
};
+static ssize_t mt_show_quirks(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%u\n", td->mtclass.quirks);
+}
+
+static ssize_t mt_set_quirks(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ td->mtclass.quirks = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_quirks.attr,
+ NULL
+};
+
+static struct attribute_group mt_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -192,9 +242,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
break;
case HID_DG_CONTACTMAX:
td->maxcontacts = field->value[0];
- if (td->mtclass->maxcontacts)
+ if (td->mtclass.maxcontacts)
/* check if the maxcontacts is given by the class */
- td->maxcontacts = td->mtclass->maxcontacts;
+ td->maxcontacts = td->mtclass.maxcontacts;
break;
}
@@ -214,8 +264,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
struct mt_device *td = hid_get_drvdata(hdev);
- struct mt_class *cls = td->mtclass;
- __s32 quirks = cls->quirks;
+ struct mt_class *cls = &td->mtclass;
/* Only map fields from TouchScreen or TouchPad collections.
* We need to ignore fields that belong to other collections
@@ -227,13 +276,17 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
else
return 0;
+ /* eGalax devices provide a Digitizer.Stylus input which overrides
+ * the correct Digitizers.Finger X/Y ranges.
+ * Let's just ignore this input. */
+ if (field->physical == HID_DG_STYLUS)
+ return -1;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
set_abs(hi->input, ABS_MT_POSITION_X, field,
@@ -246,8 +299,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_GD_Y:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
set_abs(hi->input, ABS_MT_POSITION_Y, field,
@@ -315,8 +366,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_DG_TIPPRESSURE:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_minimum = 0;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
@@ -363,7 +412,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int mt_compute_slot(struct mt_device *td)
{
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
return td->curdata.contactid;
@@ -407,7 +456,7 @@ static void mt_emit_event(struct mt_device *td, struct input_dev *input)
for (i = 0; i < td->maxcontacts; ++i) {
struct mt_slot *s = &(td->slots[i]);
- if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+ if ((td->mtclass.quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
!s->seen_in_this_frame) {
s->touch_state = false;
}
@@ -444,7 +493,7 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct mt_device *td = hid_get_drvdata(hid);
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
@@ -552,7 +601,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
- td->mtclass = mtclass;
+ td->mtclass = *mtclass;
td->inputmode = -1;
td->last_mt_collection = -1;
hid_set_drvdata(hdev, td);
@@ -574,6 +623,8 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto fail;
}
+ ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+
mt_set_input_mode(hdev);
return 0;
@@ -594,6 +645,7 @@ static int mt_reset_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
kfree(td->slots);
kfree(td);
@@ -609,12 +661,20 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_3M,
HID_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M2256) },
+ { .driver_data = MT_CLS_3M,
+ HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
+ /* Atmel panels */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
+ USB_DEVICE_ID_ATMEL_MULTITOUCH) },
+
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
@@ -645,23 +705,32 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
/* eGalax devices (resistive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
/* eGalax devices (capacitive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
/* Elo TouchSystems IntelliTouch Plus panel */
{ .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
@@ -678,6 +747,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Hanvon panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+
/* Ideacom panel */
{ .driver_data = MT_CLS_SERIAL,
HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
@@ -722,6 +796,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
USB_DEVICE_ID_PENMOUNT_PCI) },
+ /* PixArt optical touch screen */
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+
/* PixCir-based panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
@@ -730,6 +815,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+ /* Quanta-based panels */
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
@@ -758,6 +854,35 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Xiroku */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR2) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 01e7d2cd7c2..12f9777c385 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -633,7 +633,7 @@ struct picolcd_fb_cleanup_item {
struct picolcd_fb_cleanup_item *next;
};
static struct picolcd_fb_cleanup_item *fb_pending;
-DEFINE_SPINLOCK(fb_pending_lock);
+static DEFINE_SPINLOCK(fb_pending_lock);
static void picolcd_fb_do_cleanup(struct work_struct *data)
{
@@ -658,7 +658,7 @@ static void picolcd_fb_do_cleanup(struct work_struct *data)
} while (item);
}
-DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
+static DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
static int picolcd_fb_open(struct fb_info *info, int u)
{
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 070f93a5c11..47ed74c46b6 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -9,10 +9,10 @@
* - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT)
*
* 0e8f:0003 "GreenAsia Inc. USB Joystick "
- * - tested with König Gaming gamepad
+ * - tested with KĂśnig Gaming gamepad
*
* 0e8f:0003 "GASIA USB Gamepad"
- * - another version of the König gamepad
+ * - another version of the KĂśnig gamepad
*
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
deleted file mode 100644
index 87a54df4d4a..00000000000
--- a/drivers/hid/hid-quanta.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * HID driver for Quanta Optical Touch dual-touch panels
- *
- * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
- *
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
-MODULE_DESCRIPTION("Quanta dual-touch panel");
-MODULE_LICENSE("GPL");
-
-#include "hid-ids.h"
-
-struct quanta_data {
- __u16 x, y;
- __u8 id;
- bool valid; /* valid finger data, or just placeholder? */
- bool first; /* is this the first finger in this frame? */
- bool activity_now; /* at least one active finger in this frame? */
- bool activity; /* at least one active finger previously? */
-};
-
-static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- switch (usage->hid & HID_USAGE_PAGE) {
-
- case HID_UP_GENDESK:
- switch (usage->hid) {
- case HID_GD_X:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_X,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- case HID_GD_Y:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_Y,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- }
- return 0;
-
- case HID_UP_DIGITIZER:
- switch (usage->hid) {
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- case HID_DG_INPUTMODE:
- case HID_DG_DEVICEINDEX:
- case HID_DG_CONTACTCOUNT:
- case HID_DG_CONTACTMAX:
- case HID_DG_TIPPRESSURE:
- case HID_DG_WIDTH:
- case HID_DG_HEIGHT:
- return -1;
- case HID_DG_INRANGE:
- /* touchscreen emulation */
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- return 1;
- case HID_DG_CONTACTID:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TRACKING_ID);
- return 1;
- }
- return 0;
-
- case 0xff000000:
- /* ignore vendor-specific features */
- return -1;
- }
-
- return 0;
-}
-
-static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- clear_bit(usage->code, *bit);
-
- return 0;
-}
-
-/*
- * this function is called when a whole finger has been parsed,
- * so that it can decide what to send to the input layer.
- */
-static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
-{
-
- td->first = !td->first; /* touchscreen emulation */
-
- if (!td->valid) {
- /*
- * touchscreen emulation: if no finger in this frame is valid
- * and there previously was finger activity, this is a release
- */
- if (!td->first && !td->activity_now && td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 0);
- td->activity = false;
- }
- return;
- }
-
- input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
-
- input_mt_sync(input);
- td->valid = false;
-
- /* touchscreen emulation: if first active finger in this frame... */
- if (!td->activity_now) {
- /* if there was no previous activity, emit touch event */
- if (!td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 1);
- td->activity = true;
- }
- td->activity_now = true;
- /* and in any case this is our preferred finger */
- input_event(input, EV_ABS, ABS_X, td->x);
- input_event(input, EV_ABS, ABS_Y, td->y);
- }
-}
-
-
-static int quanta_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- struct quanta_data *td = hid_get_drvdata(hid);
-
- if (hid->claimed & HID_CLAIMED_INPUT) {
- struct input_dev *input = field->hidinput->input;
-
- switch (usage->hid) {
- case HID_DG_INRANGE:
- td->valid = !!value;
- break;
- case HID_GD_X:
- td->x = value;
- break;
- case HID_GD_Y:
- td->y = value;
- quanta_filter_event(td, input);
- break;
- case HID_DG_CONTACTID:
- td->id = value;
- break;
- case HID_DG_CONTACTCOUNT:
- /* touch emulation: this is the last field in a frame */
- td->first = false;
- td->activity_now = false;
- break;
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- /* avoid interference from generic hidinput handling */
- break;
-
- default:
- /* fallback to the generic hidinput handling */
- return 0;
- }
- }
-
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
-
- return 1;
-}
-
-static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
-{
- int ret;
- struct quanta_data *td;
-
- td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
- if (!td) {
- hid_err(hdev, "cannot allocate Quanta Touch data\n");
- return -ENOMEM;
- }
- td->valid = false;
- td->activity = false;
- td->activity_now = false;
- td->first = false;
- hid_set_drvdata(hdev, td);
-
- ret = hid_parse(hdev);
- if (!ret)
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
-
- if (ret)
- kfree(td);
-
- return ret;
-}
-
-static void quanta_remove(struct hid_device *hdev)
-{
- hid_hw_stop(hdev);
- kfree(hid_get_drvdata(hdev));
- hid_set_drvdata(hdev, NULL);
-}
-
-static const struct hid_device_id quanta_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
- { }
-};
-MODULE_DEVICE_TABLE(hid, quanta_devices);
-
-static const struct hid_usage_id quanta_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
-static struct hid_driver quanta_driver = {
- .name = "quanta-touch",
- .id_table = quanta_devices,
- .probe = quanta_probe,
- .remove = quanta_remove,
- .input_mapping = quanta_input_mapping,
- .input_mapped = quanta_input_mapped,
- .usage_table = quanta_grabbed_usages,
- .event = quanta_event,
-};
-
-static int __init quanta_init(void)
-{
- return hid_register_driver(&quanta_driver);
-}
-
-static void __exit quanta_exit(void)
-{
- hid_unregister_driver(&quanta_driver);
-}
-
-module_init(quanta_init);
-module_exit(quanta_exit);
-
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index b07e7f96a35..a6d93992c75 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -49,12 +49,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
new file mode 100644
index 00000000000..0e4a0ab4714
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.c
@@ -0,0 +1,487 @@
+/*
+ * Roccat Isku driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Isku is a gamer keyboard with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-isku.h"
+
+static struct class *isku_class;
+
+static void isku_profile_activated(struct isku_device *isku, uint new_profile)
+{
+ isku->actual_profile = new_profile;
+}
+
+static int isku_receive(struct usb_device *usb_dev, uint command,
+ void *buf, uint size)
+{
+ return roccat_common_receive(usb_dev, command, buf, size);
+}
+
+static int isku_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct isku_control control;
+
+ do {
+ msleep(50);
+ retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
+ &control, sizeof(struct isku_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ISKU_CONTROL_VALUE_STATUS_OK:
+ return 0;
+ case ISKU_CONTROL_VALUE_STATUS_WAIT:
+ continue;
+ case ISKU_CONTROL_VALUE_STATUS_INVALID:
+ /* seems to be critical - replug necessary */
+ case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
+ return -EINVAL;
+ default:
+ hid_err(usb_dev, "isku_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+static int isku_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ return isku_receive_control_status(usb_dev);
+}
+
+static int isku_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct isku_actual_profile buf;
+ int retval;
+
+ retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct isku_actual_profile));
+ return retval ? retval : buf.actual_profile;
+}
+
+static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
+{
+ struct isku_actual_profile buf;
+
+ buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
+ buf.size = sizeof(struct isku_actual_profile);
+ buf.actual_profile = new_profile;
+ return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ sizeof(struct isku_actual_profile));
+}
+
+static ssize_t isku_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct isku_device *isku =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile);
+}
+
+static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct isku_device *isku;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+ struct isku_roccat_report roccat_report;
+
+ dev = dev->parent->parent;
+ isku = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ if (profile > 4)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+
+ retval = isku_set_actual_profile(usb_dev, profile);
+ if (retval) {
+ mutex_unlock(&isku->isku_lock);
+ return retval;
+ }
+
+ isku_profile_activated(isku, profile);
+
+ roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE;
+ roccat_report.data1 = profile + 1;
+ roccat_report.data2 = 0;
+ roccat_report.profile = profile + 1;
+ roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report);
+
+ mutex_unlock(&isku->isku_lock);
+
+ return size;
+}
+
+static struct device_attribute isku_attributes[] = {
+ __ATTR(actual_profile, 0660,
+ isku_sysfs_show_actual_profile,
+ isku_sysfs_set_actual_profile),
+ __ATTR_NULL
+};
+
+static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define ISKU_SYSFS_W(thingy, THINGY) \
+static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_write(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_R(thingy, THINGY) \
+static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_read(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_RW(thingy, THINGY) \
+ISKU_SYSFS_R(thingy, THINGY) \
+ISKU_SYSFS_W(thingy, THINGY)
+
+#define ISKU_BIN_ATTR_RW(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+#define ISKU_BIN_ATTR_R(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+}
+
+#define ISKU_BIN_ATTR_W(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+ISKU_SYSFS_RW(macro, MACRO)
+ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION)
+ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE)
+ISKU_SYSFS_RW(keys_media, KEYS_MEDIA)
+ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER)
+ISKU_SYSFS_RW(keys_macro, KEYS_MACRO)
+ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK)
+ISKU_SYSFS_RW(light, LIGHT)
+ISKU_SYSFS_RW(key_mask, KEY_MASK)
+ISKU_SYSFS_RW(last_set, LAST_SET)
+ISKU_SYSFS_W(talk, TALK)
+ISKU_SYSFS_R(info, INFO)
+ISKU_SYSFS_W(control, CONTROL)
+
+static struct bin_attribute isku_bin_attributes[] = {
+ ISKU_BIN_ATTR_RW(macro),
+ ISKU_BIN_ATTR_RW(keys_function),
+ ISKU_BIN_ATTR_RW(keys_easyzone),
+ ISKU_BIN_ATTR_RW(keys_media),
+ ISKU_BIN_ATTR_RW(keys_thumbster),
+ ISKU_BIN_ATTR_RW(keys_macro),
+ ISKU_BIN_ATTR_RW(keys_capslock),
+ ISKU_BIN_ATTR_RW(light),
+ ISKU_BIN_ATTR_RW(key_mask),
+ ISKU_BIN_ATTR_RW(last_set),
+ ISKU_BIN_ATTR_W(talk),
+ ISKU_BIN_ATTR_R(info),
+ ISKU_BIN_ATTR_W(control),
+ __ATTR_NULL
+};
+
+static int isku_init_isku_device_struct(struct usb_device *usb_dev,
+ struct isku_device *isku)
+{
+ int retval;
+
+ mutex_init(&isku->isku_lock);
+
+ retval = isku_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ isku_profile_activated(isku, retval);
+
+ return 0;
+}
+
+static int isku_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct isku_device *isku;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ isku = kzalloc(sizeof(*isku), GFP_KERNEL);
+ if (!isku) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, isku);
+
+ retval = isku_init_isku_device_struct(usb_dev, isku);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct isku_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(isku_class, hdev,
+ sizeof(struct isku_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ isku->chrdev_minor = retval;
+ isku->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(isku);
+ return retval;
+}
+
+static void isku_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return;
+
+ isku = hid_get_drvdata(hdev);
+ if (isku->roccat_claimed)
+ roccat_disconnect(isku->chrdev_minor);
+ kfree(isku);
+}
+
+static int isku_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = isku_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install keyboard\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void isku_remove(struct hid_device *hdev)
+{
+ isku_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void isku_keep_values_up_to_date(struct isku_device *isku,
+ u8 const *data)
+{
+ struct isku_report_button const *button_report;
+
+ switch (data[0]) {
+ case ISKU_REPORT_NUMBER_BUTTON:
+ button_report = (struct isku_report_button const *)data;
+ switch (button_report->event) {
+ case ISKU_REPORT_BUTTON_EVENT_PROFILE:
+ isku_profile_activated(isku, button_report->data1 - 1);
+ break;
+ }
+ break;
+ }
+}
+
+static void isku_report_to_chrdev(struct isku_device const *isku,
+ u8 const *data)
+{
+ struct isku_roccat_report roccat_report;
+ struct isku_report_button const *button_report;
+
+ if (data[0] != ISKU_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct isku_report_button const *)data;
+
+ roccat_report.event = button_report->event;
+ roccat_report.data1 = button_report->data1;
+ roccat_report.data2 = button_report->data2;
+ roccat_report.profile = isku->actual_profile + 1;
+ roccat_report_event(isku->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int isku_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return 0;
+
+ if (isku == NULL)
+ return 0;
+
+ isku_keep_values_up_to_date(isku, data);
+
+ if (isku->roccat_claimed)
+ isku_report_to_chrdev(isku, data);
+
+ return 0;
+}
+
+static const struct hid_device_id isku_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, isku_devices);
+
+static struct hid_driver isku_driver = {
+ .name = "isku",
+ .id_table = isku_devices,
+ .probe = isku_probe,
+ .remove = isku_remove,
+ .raw_event = isku_raw_event
+};
+
+static int __init isku_init(void)
+{
+ int retval;
+ isku_class = class_create(THIS_MODULE, "isku");
+ if (IS_ERR(isku_class))
+ return PTR_ERR(isku_class);
+ isku_class->dev_attrs = isku_attributes;
+ isku_class->dev_bin_attrs = isku_bin_attributes;
+
+ retval = hid_register_driver(&isku_driver);
+ if (retval)
+ class_destroy(isku_class);
+ return retval;
+}
+
+static void __exit isku_exit(void)
+{
+ hid_unregister_driver(&isku_driver);
+ class_destroy(isku_class);
+}
+
+module_init(isku_init);
+module_exit(isku_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Isku driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
new file mode 100644
index 00000000000..075f6efaec5
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.h
@@ -0,0 +1,147 @@
+#ifndef __HID_ROCCAT_ISKU_H
+#define __HID_ROCCAT_ISKU_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ ISKU_PROFILE_NUM = 5,
+ ISKU_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct isku_control {
+ uint8_t command; /* ISKU_COMMAND_CONTROL */
+ uint8_t value;
+ uint8_t request;
+} __packed;
+
+enum isku_control_values {
+ ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
+ ISKU_CONTROL_VALUE_STATUS_OK = 1,
+ ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
+ ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
+};
+
+struct isku_actual_profile {
+ uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t actual_profile;
+} __packed;
+
+struct isku_key_mask {
+ uint8_t command; /* ISKU_COMMAND_KEY_MASK */
+ uint8_t size; /* 6 */
+ uint8_t profile_number; /* 0-4 */
+ uint8_t mask;
+ uint16_t checksum;
+} __packed;
+
+struct isku_keys_function {
+ uint8_t data[0x29];
+} __packed;
+
+struct isku_keys_easyzone {
+ uint8_t data[0x41];
+} __packed;
+
+struct isku_keys_media {
+ uint8_t data[0x1d];
+} __packed;
+
+struct isku_keys_thumbster {
+ uint8_t data[0x17];
+} __packed;
+
+struct isku_keys_macro {
+ uint8_t data[0x23];
+} __packed;
+
+struct isku_keys_capslock {
+ uint8_t data[0x6];
+} __packed;
+
+struct isku_macro {
+ uint8_t data[0x823];
+} __packed;
+
+struct isku_light {
+ uint8_t data[0xa];
+} __packed;
+
+struct isku_info {
+ uint8_t data[2];
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __packed;
+
+struct isku_talk {
+ uint8_t data[0x10];
+} __packed;
+
+struct isku_last_set {
+ uint8_t data[0x14];
+} __packed;
+
+enum isku_commands {
+ ISKU_COMMAND_CONTROL = 0x4,
+ ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
+ ISKU_COMMAND_KEY_MASK = 0x7,
+ ISKU_COMMAND_KEYS_FUNCTION = 0x8,
+ ISKU_COMMAND_KEYS_EASYZONE = 0x9,
+ ISKU_COMMAND_KEYS_MEDIA = 0xa,
+ ISKU_COMMAND_KEYS_THUMBSTER = 0xb,
+ ISKU_COMMAND_KEYS_MACRO = 0xd,
+ ISKU_COMMAND_MACRO = 0xe,
+ ISKU_COMMAND_INFO = 0xf,
+ ISKU_COMMAND_LIGHT = 0x10,
+ ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
+ ISKU_COMMAND_LAST_SET = 0x14,
+ ISKU_COMMAND_15 = 0x15,
+ ISKU_COMMAND_TALK = 0x16,
+ ISKU_COMMAND_FIRMWARE_WRITE = 0x1b,
+ ISKU_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+struct isku_report_button {
+ uint8_t number; /* ISKU_REPORT_NUMBER_BUTTON */
+ uint8_t zero;
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+};
+
+enum isku_report_numbers {
+ ISKU_REPORT_NUMBER_BUTTON = 3,
+};
+
+enum isku_report_button_events {
+ ISKU_REPORT_BUTTON_EVENT_PROFILE = 0x2,
+};
+
+struct isku_roccat_report {
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t profile;
+} __packed;
+
+struct isku_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex isku_lock;
+
+ int actual_profile;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index e2072afb34b..40090d60215 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -78,12 +78,10 @@ static int kone_send(struct usb_device *usb_dev, uint usb_command,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index c40afc57fc8..f23456b1fd4 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -3,7 +3,7 @@
*
* Based on hid-gyration.c
*
- * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org>
+ * Copyright (c) 2009 Bruno PrĂŠmont <bonbons@linux-vserver.org>
*/
/*
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 17bb88f782b..f2183486a9b 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -9,6 +9,7 @@
* Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
* Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
+ * Copyright (c) 2011 Przemysław Firszt <przemo@firszt.eu>
*/
/*
@@ -33,6 +34,7 @@
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ __u8 features;
unsigned char high_speed;
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
int battery_capacity;
@@ -107,6 +109,19 @@ static int wacom_ac_get_property(struct power_supply *psy,
}
#endif
+static void wacom_set_features(struct hid_device *hdev)
+{
+ int ret;
+ __u8 rep_data[2];
+
+ /*set high speed, tablet mode*/
+ rep_data[0] = 0x03;
+ rep_data[1] = 0x20;
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ return;
+}
+
static void wacom_poke(struct hid_device *hdev, u8 speed)
{
struct wacom_data *wdata = hid_get_drvdata(hdev);
@@ -177,26 +192,13 @@ static ssize_t wacom_store_speed(struct device *dev,
static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
wacom_show_speed, wacom_store_speed);
-static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *raw_data, int size)
+static int wacom_gr_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct hid_input *hidinput;
- struct input_dev *input;
- unsigned char *data = (unsigned char *) raw_data;
int tool, x, y, rw;
- if (!(hdev->claimed & HID_CLAIMED_INPUT))
- return 0;
-
tool = 0;
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- /* Check if this is a tablet report */
- if (data[0] != 0x03)
- return 0;
-
/* Get X & Y positions */
x = le16_to_cpu(*(__le16 *) &data[2]);
y = le16_to_cpu(*(__le16 *) &data[4]);
@@ -304,6 +306,121 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
return 1;
}
+static void wacom_i4_parse_pen_report(struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ __u16 x, y, pressure;
+ __u32 id;
+
+ switch (data[1]) {
+ case 0x80: /* Out of proximity report */
+ wdata->tool = 0;
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, wdata->tool, 0);
+ input_sync(input);
+ break;
+ case 0xC2: /* Tool report */
+ id = ((data[2] << 4) | (data[3] >> 4) |
+ ((data[7] & 0x0f) << 20) |
+ ((data[8] & 0xf0) << 12)) & 0xfffff;
+
+ switch (id) {
+ case 0x802:
+ wdata->tool = BTN_TOOL_PEN;
+ break;
+ case 0x80A:
+ wdata->tool = BTN_TOOL_RUBBER;
+ break;
+ }
+ break;
+ default: /* Position/pressure report */
+ x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1);
+ y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01);
+ pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5)
+ | (data[1] & 0x01);
+
+ input_report_key(input, BTN_TOUCH, pressure > 1);
+
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
+ input_report_key(input, wdata->tool, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_sync(input);
+ break;
+ }
+
+ return;
+}
+
+static void wacom_i4_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ switch (data[0]) {
+ case 0x00: /* Empty report */
+ break;
+ case 0x02: /* Pen report */
+ wacom_i4_parse_pen_report(wdata, input, data);
+ break;
+ case 0x03: /* Features Report */
+ wdata->features = data[2];
+ break;
+ case 0x0C: /* Button report */
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]);
+ break;
+ }
+}
+
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ struct hid_input *hidinput;
+ struct input_dev *input;
+ unsigned char *data = (unsigned char *) raw_data;
+ int i;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT))
+ return 0;
+
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input = hidinput->input;
+
+ /* Check if this is a tablet report */
+ if (data[0] != 0x03)
+ return 0;
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ return wacom_gr_parse_report(hdev, wdata, input, data);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ i = 1;
+
+ switch (data[0]) {
+ case 0x04:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ /* fall through */
+ case 0x03:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d size:%d\n",
+ data[0], data[1], size);
+ return 0;
+ }
+ }
+ return 1;
+}
+
static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
int *max)
@@ -338,10 +455,19 @@ static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
__set_bit(BTN_TOOL_RUBBER, input->keybit);
__set_bit(BTN_TOOL_MOUSE, input->keybit);
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 40640, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0);
+ break;
+ }
return 0;
}
@@ -378,8 +504,16 @@ static int wacom_probe(struct hid_device *hdev,
hid_warn(hdev,
"can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode 2 with high reporting speed */
- wacom_poke(hdev, 1);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ wdata->features = 0;
+ wacom_set_features(hdev);
+ break;
+ }
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
wdata->battery.properties = wacom_battery_props;
@@ -441,6 +575,7 @@ static void wacom_remove(struct hid_device *hdev)
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote-core.c
index 76739c07fa3..61881b35c67 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -20,91 +20,7 @@
#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-
-#define WIIMOTE_VERSION "0.2"
-#define WIIMOTE_NAME "Nintendo Wii Remote"
-#define WIIMOTE_BUFSIZE 32
-
-struct wiimote_buf {
- __u8 data[HID_MAX_BUFFER_SIZE];
- size_t size;
-};
-
-struct wiimote_state {
- spinlock_t lock;
- __u8 flags;
- __u8 accel_split[2];
-
- /* synchronous cmd requests */
- struct mutex sync;
- struct completion ready;
- int cmd;
- __u32 opt;
-
- /* results of synchronous requests */
- __u8 cmd_battery;
- __u8 cmd_err;
-};
-
-struct wiimote_data {
- struct hid_device *hdev;
- struct input_dev *input;
- struct led_classdev *leds[4];
- struct input_dev *accel;
- struct input_dev *ir;
- struct power_supply battery;
-
- spinlock_t qlock;
- __u8 head;
- __u8 tail;
- struct wiimote_buf outq[WIIMOTE_BUFSIZE];
- struct work_struct worker;
-
- struct wiimote_state state;
-};
-
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
-#define WIIPROTO_FLAG_RUMBLE 0x10
-#define WIIPROTO_FLAG_ACCEL 0x20
-#define WIIPROTO_FLAG_IR_BASIC 0x40
-#define WIIPROTO_FLAG_IR_EXT 0x80
-#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
-#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
- WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
-#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
- WIIPROTO_FLAG_IR_FULL)
-
-/* return flag for led \num */
-#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
-
-enum wiiproto_reqs {
- WIIPROTO_REQ_NULL = 0x0,
- WIIPROTO_REQ_RUMBLE = 0x10,
- WIIPROTO_REQ_LED = 0x11,
- WIIPROTO_REQ_DRM = 0x12,
- WIIPROTO_REQ_IR1 = 0x13,
- WIIPROTO_REQ_SREQ = 0x15,
- WIIPROTO_REQ_WMEM = 0x16,
- WIIPROTO_REQ_RMEM = 0x17,
- WIIPROTO_REQ_IR2 = 0x1a,
- WIIPROTO_REQ_STATUS = 0x20,
- WIIPROTO_REQ_DATA = 0x21,
- WIIPROTO_REQ_RETURN = 0x22,
- WIIPROTO_REQ_DRM_K = 0x30,
- WIIPROTO_REQ_DRM_KA = 0x31,
- WIIPROTO_REQ_DRM_KE = 0x32,
- WIIPROTO_REQ_DRM_KAI = 0x33,
- WIIPROTO_REQ_DRM_KEE = 0x34,
- WIIPROTO_REQ_DRM_KAE = 0x35,
- WIIPROTO_REQ_DRM_KIE = 0x36,
- WIIPROTO_REQ_DRM_KAIE = 0x37,
- WIIPROTO_REQ_DRM_E = 0x3d,
- WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
- WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
-};
+#include "hid-wiimote.h"
enum wiiproto_keys {
WIIPROTO_KEY_LEFT,
@@ -139,52 +55,6 @@ static enum power_supply_property wiimote_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY
};
-/* requires the state.lock spinlock to be held */
-static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- return wdata->state.cmd == cmd && wdata->state.opt == opt;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
-{
- wdata->state.cmd = WIIPROTO_REQ_NULL;
- complete(&wdata->state.ready);
-}
-
-static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
-{
- return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- INIT_COMPLETION(wdata->state.ready);
- wdata->state.cmd = cmd;
- wdata->state.opt = opt;
-}
-
-static inline void wiimote_cmd_release(struct wiimote_data *wdata)
-{
- mutex_unlock(&wdata->state.sync);
-}
-
-static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
-{
- int ret;
-
- ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
- if (ret < 0)
- return -ERESTARTSYS;
- else if (ret == 0)
- return -EIO;
- else
- return 0;
-}
-
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -329,6 +199,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
static __u8 select_drm(struct wiimote_data *wdata)
{
__u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+ bool ext = wiiext_active(wdata);
if (ir == WIIPROTO_FLAG_IR_BASIC) {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
@@ -340,14 +211,21 @@ static __u8 select_drm(struct wiimote_data *wdata)
} else if (ir == WIIPROTO_FLAG_IR_FULL) {
return WIIPROTO_REQ_DRM_SKAI1;
} else {
- if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
- return WIIPROTO_REQ_DRM_KA;
- else
- return WIIPROTO_REQ_DRM_K;
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KAE;
+ else
+ return WIIPROTO_REQ_DRM_KA;
+ } else {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KE;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
}
-static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
+void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
{
__u8 cmd[3];
@@ -358,6 +236,7 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wdata->state.drm = drm;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -440,8 +319,33 @@ static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset,
+ __u16 size)
+{
+ __u8 cmd[7];
+
+ if (size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size);
+ return;
+ }
+
+ cmd[0] = WIIPROTO_REQ_RMEM;
+ cmd[1] = 0;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = (size >> 8) & 0xff;
+ cmd[6] = size & 0xff;
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
/* requries the cmd-mutex to be held */
-static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
const __u8 *wmem, __u8 size)
{
unsigned long flags;
@@ -459,6 +363,36 @@ static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
return ret;
}
+/* requries the cmd-mutex to be held */
+ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem,
+ __u8 size)
+{
+ unsigned long flags;
+ ssize_t ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = size;
+ wdata->state.cmd_read_buf = rmem;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff);
+ wiiproto_req_rreg(wdata, offset, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (!ret) {
+ if (wdata->state.cmd_read_size == 0)
+ ret = -EIO;
+ else
+ ret = wdata->state.cmd_read_size;
+ }
+
+ return ret;
+}
+
static int wiimote_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -862,6 +796,8 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ wiiext_event(wdata, payload[2] & 0x02);
+
if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
wdata->state.cmd_battery = payload[5];
wiimote_cmd_complete(wdata);
@@ -870,7 +806,23 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
{
+ __u16 offset = payload[3] << 8 | payload[4];
+ __u8 size = (payload[2] >> 4) + 1;
+ __u8 err = payload[2] & 0x0f;
+
handler_keys(wdata, payload);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) {
+ if (err)
+ size = 0;
+ else if (size > wdata->state.cmd_read_size)
+ size = wdata->state.cmd_read_size;
+
+ wdata->state.cmd_read_size = size;
+ if (wdata->state.cmd_read_buf)
+ memcpy(wdata->state.cmd_read_buf, &payload[5], size);
+ wiimote_cmd_complete(wdata);
+ }
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -898,6 +850,7 @@ static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
@@ -914,6 +867,7 @@ static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -924,12 +878,14 @@ static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[7], false);
ir_to_input3(wdata, &payload[9], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[12]);
}
static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
+ wiiext_handle(wdata, &payload[5]);
}
static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -941,10 +897,12 @@ static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[10], false);
ir_to_input3(wdata, &payload[12], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[15]);
}
static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
{
+ wiiext_handle(wdata, payload);
}
static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
@@ -1182,6 +1140,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
spin_lock_init(&wdata->state.lock);
init_completion(&wdata->state.ready);
mutex_init(&wdata->state.sync);
+ wdata->state.drm = WIIPROTO_REQ_DRM_K;
return wdata;
@@ -1196,6 +1155,8 @@ err:
static void wiimote_destroy(struct wiimote_data *wdata)
{
+ wiidebug_deinit(wdata);
+ wiiext_deinit(wdata);
wiimote_leds_destroy(wdata);
power_supply_unregister(&wdata->battery);
@@ -1214,6 +1175,8 @@ static int wiimote_hid_probe(struct hid_device *hdev,
struct wiimote_data *wdata;
int ret;
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
wdata = wiimote_create(hdev);
if (!wdata) {
hid_err(hdev, "Can't alloc device\n");
@@ -1267,6 +1230,14 @@ static int wiimote_hid_probe(struct hid_device *hdev,
if (ret)
goto err_free;
+ ret = wiiext_init(wdata);
+ if (ret)
+ goto err_free;
+
+ ret = wiidebug_init(wdata);
+ if (ret)
+ goto err_free;
+
hid_info(hdev, "New device registered\n");
/* by default set led1 after device initialization */
@@ -1343,4 +1314,3 @@ module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
-MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
new file mode 100644
index 00000000000..17dabc1f339
--- /dev/null
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -0,0 +1,227 @@
+/*
+ * Debug support for HID Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "hid-wiimote.h"
+
+struct wiimote_debug {
+ struct wiimote_data *wdata;
+ struct dentry *eeprom;
+ struct dentry *drm;
+};
+
+static int wiidebug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+ loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ struct wiimote_data *wdata = dbg->wdata;
+ unsigned long flags;
+ ssize_t ret;
+ char buf[16];
+ __u16 size;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0xffffff)
+ return 0;
+ if (s > 16)
+ s = 16;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = s;
+ wdata->state.cmd_read_buf = buf;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, *off & 0xffff);
+ wiiproto_req_reeprom(wdata, *off, s);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret)
+ size = wdata->state.cmd_read_size;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+ else if (size == 0)
+ return -EIO;
+
+ if (copy_to_user(u, buf, size))
+ return -EFAULT;
+
+ *off += size;
+ ret = size;
+
+ return ret;
+}
+
+static const struct file_operations wiidebug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_eeprom_open,
+ .read = wiidebug_eeprom_read,
+ .llseek = generic_file_llseek,
+};
+
+static const char *wiidebug_drmmap[] = {
+ [WIIPROTO_REQ_NULL] = "NULL",
+ [WIIPROTO_REQ_DRM_K] = "K",
+ [WIIPROTO_REQ_DRM_KA] = "KA",
+ [WIIPROTO_REQ_DRM_KE] = "KE",
+ [WIIPROTO_REQ_DRM_KAI] = "KAI",
+ [WIIPROTO_REQ_DRM_KEE] = "KEE",
+ [WIIPROTO_REQ_DRM_KAE] = "KAE",
+ [WIIPROTO_REQ_DRM_KIE] = "KIE",
+ [WIIPROTO_REQ_DRM_KAIE] = "KAIE",
+ [WIIPROTO_REQ_DRM_E] = "E",
+ [WIIPROTO_REQ_DRM_SKAI1] = "SKAI1",
+ [WIIPROTO_REQ_DRM_SKAI2] = "SKAI2",
+ [WIIPROTO_REQ_MAX] = NULL
+};
+
+static int wiidebug_drm_show(struct seq_file *f, void *p)
+{
+ struct wiimote_debug *dbg = f->private;
+ const char *str = NULL;
+ unsigned long flags;
+ __u8 drm;
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ drm = dbg->wdata->state.drm;
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ if (drm < WIIPROTO_REQ_MAX)
+ str = wiidebug_drmmap[drm];
+ if (!str)
+ str = "unknown";
+
+ seq_printf(f, "%s\n", str);
+
+ return 0;
+}
+
+static int wiidebug_drm_open(struct inode *i, struct file *f)
+{
+ return single_open(f, wiidebug_drm_show, i->i_private);
+}
+
+static ssize_t wiidebug_drm_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ unsigned long flags;
+ char buf[16];
+ ssize_t len;
+ int i;
+
+ if (s == 0)
+ return -EINVAL;
+
+ len = min((size_t) 15, s);
+ if (copy_from_user(buf, u, len))
+ return -EFAULT;
+
+ buf[15] = 0;
+
+ for (i = 0; i < WIIPROTO_REQ_MAX; ++i) {
+ if (!wiidebug_drmmap[i])
+ continue;
+ if (!strcasecmp(buf, wiidebug_drmmap[i]))
+ break;
+ }
+
+ if (i == WIIPROTO_REQ_MAX)
+ i = simple_strtoul(buf, NULL, 10);
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ wiiproto_req_drm(dbg->wdata, (__u8) i);
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ return len;
+}
+
+static const struct file_operations wiidebug_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_drm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = wiidebug_drm_write,
+ .release = single_release,
+};
+
+int wiidebug_init(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->wdata = wdata;
+
+ dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
+ if (!dbg->eeprom)
+ goto err;
+
+ dbg->drm = debugfs_create_file("drm", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
+ if (!dbg->drm)
+ goto err_drm;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = dbg;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_drm:
+ debugfs_remove(dbg->eeprom);
+err:
+ kfree(dbg);
+ return ret;
+}
+
+void wiidebug_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg = wdata->debug;
+ unsigned long flags;
+
+ if (!dbg)
+ return;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ debugfs_remove(dbg->drm);
+ debugfs_remove(dbg->eeprom);
+ kfree(dbg);
+}
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
new file mode 100644
index 00000000000..aa958706c0e
--- /dev/null
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -0,0 +1,752 @@
+/*
+ * HID driver for Nintendo Wiimote extension devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "hid-wiimote.h"
+
+struct wiimote_ext {
+ struct wiimote_data *wdata;
+ struct work_struct worker;
+ struct input_dev *input;
+ struct input_dev *mp_input;
+
+ atomic_t opened;
+ atomic_t mp_opened;
+ bool plugged;
+ bool mp_plugged;
+ bool motionp;
+ __u8 ext_type;
+};
+
+enum wiiext_type {
+ WIIEXT_NONE, /* placeholder */
+ WIIEXT_CLASSIC, /* Nintendo classic controller */
+ WIIEXT_NUNCHUCK, /* Nintendo nunchuck controller */
+};
+
+enum wiiext_keys {
+ WIIEXT_KEY_C,
+ WIIEXT_KEY_Z,
+ WIIEXT_KEY_A,
+ WIIEXT_KEY_B,
+ WIIEXT_KEY_X,
+ WIIEXT_KEY_Y,
+ WIIEXT_KEY_ZL,
+ WIIEXT_KEY_ZR,
+ WIIEXT_KEY_PLUS,
+ WIIEXT_KEY_MINUS,
+ WIIEXT_KEY_HOME,
+ WIIEXT_KEY_LEFT,
+ WIIEXT_KEY_RIGHT,
+ WIIEXT_KEY_UP,
+ WIIEXT_KEY_DOWN,
+ WIIEXT_KEY_LT,
+ WIIEXT_KEY_RT,
+ WIIEXT_KEY_COUNT
+};
+
+static __u16 wiiext_keymap[] = {
+ BTN_C, /* WIIEXT_KEY_C */
+ BTN_Z, /* WIIEXT_KEY_Z */
+ BTN_A, /* WIIEXT_KEY_A */
+ BTN_B, /* WIIEXT_KEY_B */
+ BTN_X, /* WIIEXT_KEY_X */
+ BTN_Y, /* WIIEXT_KEY_Y */
+ BTN_TL2, /* WIIEXT_KEY_ZL */
+ BTN_TR2, /* WIIEXT_KEY_ZR */
+ KEY_NEXT, /* WIIEXT_KEY_PLUS */
+ KEY_PREVIOUS, /* WIIEXT_KEY_MINUS */
+ BTN_MODE, /* WIIEXT_KEY_HOME */
+ KEY_LEFT, /* WIIEXT_KEY_LEFT */
+ KEY_RIGHT, /* WIIEXT_KEY_RIGHT */
+ KEY_UP, /* WIIEXT_KEY_UP */
+ KEY_DOWN, /* WIIEXT_KEY_DOWN */
+ BTN_TL, /* WIIEXT_KEY_LT */
+ BTN_TR, /* WIIEXT_KEY_RT */
+};
+
+/* diable all extensions */
+static void ext_disable(struct wiimote_ext *ext)
+{
+ unsigned long flags;
+ __u8 wmem = 0x55;
+
+ if (!wiimote_cmd_acquire(ext->wdata)) {
+ wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = false;
+ ext->ext_type = WIIEXT_NONE;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static bool motionp_read(struct wiimote_ext *ext)
+{
+ __u8 rmem[2], wmem;
+ ssize_t ret;
+ bool avail = false;
+
+ if (!atomic_read(&ext->mp_opened))
+ return false;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return false;
+
+ /* initialize motion plus */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa600f0, &wmem, sizeof(wmem));
+ if (ret)
+ goto error;
+
+ /* read motion plus ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa600fe, rmem, 2);
+ if (ret == 2 || rmem[1] == 0x5)
+ avail = true;
+
+error:
+ wiimote_cmd_release(ext->wdata);
+ return avail;
+}
+
+static __u8 ext_read(struct wiimote_ext *ext)
+{
+ ssize_t ret;
+ __u8 rmem[2], wmem;
+ __u8 type = WIIEXT_NONE;
+
+ if (!ext->plugged || !atomic_read(&ext->opened))
+ return WIIEXT_NONE;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return WIIEXT_NONE;
+
+ /* initialize extension */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ if (!ret) {
+ /* disable encryption */
+ wmem = 0x0;
+ wiimote_cmd_write(ext->wdata, 0xa400fb, &wmem, sizeof(wmem));
+ }
+
+ /* read extension ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa400fe, rmem, 2);
+ if (ret == 2) {
+ if (rmem[0] == 0 && rmem[1] == 0)
+ type = WIIEXT_NUNCHUCK;
+ else if (rmem[0] == 0x01 && rmem[1] == 0x01)
+ type = WIIEXT_CLASSIC;
+ }
+
+ wiimote_cmd_release(ext->wdata);
+
+ return type;
+}
+
+static void ext_enable(struct wiimote_ext *ext, bool motionp, __u8 ext_type)
+{
+ unsigned long flags;
+ __u8 wmem;
+ int ret;
+
+ if (motionp) {
+ if (wiimote_cmd_acquire(ext->wdata))
+ return;
+
+ if (ext_type == WIIEXT_CLASSIC)
+ wmem = 0x07;
+ else if (ext_type == WIIEXT_NUNCHUCK)
+ wmem = 0x05;
+ else
+ wmem = 0x04;
+
+ ret = wiimote_cmd_write(ext->wdata, 0xa600fe, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ if (ret)
+ return;
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = motionp;
+ ext->ext_type = ext_type;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static void wiiext_worker(struct work_struct *work)
+{
+ struct wiimote_ext *ext = container_of(work, struct wiimote_ext,
+ worker);
+ bool motionp;
+ __u8 ext_type;
+
+ ext_disable(ext);
+ motionp = motionp_read(ext);
+ ext_type = ext_read(ext);
+ ext_enable(ext, motionp, ext_type);
+}
+
+/* schedule work only once, otherwise mark for reschedule */
+static void wiiext_schedule(struct wiimote_ext *ext)
+{
+ queue_work(system_nrt_wq, &ext->worker);
+}
+
+/*
+ * Reacts on extension port events
+ * Whenever the driver gets an event from the wiimote that an extension has been
+ * plugged or unplugged, this funtion shall be called. It checks what extensions
+ * are connected and initializes and activates them.
+ * This can be called in atomic context. The initialization is done in a
+ * separate worker thread. The state.lock spinlock must be held by the caller.
+ */
+void wiiext_event(struct wiimote_data *wdata, bool plugged)
+{
+ if (!wdata->ext)
+ return;
+
+ if (wdata->ext->plugged == plugged)
+ return;
+
+ wdata->ext->plugged = plugged;
+
+ if (!plugged)
+ wdata->ext->mp_plugged = false;
+
+ /*
+ * We need to call wiiext_schedule(wdata->ext) here, however, the
+ * extension initialization logic is not fully understood and so
+ * automatic initialization is not supported, yet.
+ */
+}
+
+/*
+ * Returns true if the current DRM mode should contain extension data and false
+ * if there is no interest in extension data.
+ * All supported extensions send 6 byte extension data so any DRM that contains
+ * extension bytes is fine.
+ * The caller must hold the state.lock spinlock.
+ */
+bool wiiext_active(struct wiimote_data *wdata)
+{
+ if (!wdata->ext)
+ return false;
+
+ return wdata->ext->motionp || wdata->ext->ext_type;
+}
+
+static void handler_motionp(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s32 x, y, z;
+ bool plugged;
+
+ /* | 8 7 6 5 4 3 | 2 | 1 |
+ * -----+------------------------------+-----+-----+
+ * 1 | Yaw Speed <7:0> |
+ * 2 | Roll Speed <7:0> |
+ * 3 | Pitch Speed <7:0> |
+ * -----+------------------------------+-----+-----+
+ * 4 | Yaw Speed <13:8> | Yaw |Pitch|
+ * -----+------------------------------+-----+-----+
+ * 5 | Roll Speed <13:8> |Roll | Ext |
+ * -----+------------------------------+-----+-----+
+ * 6 | Pitch Speed <13:8> | 1 | 0 |
+ * -----+------------------------------+-----+-----+
+ * The single bits Yaw, Roll, Pitch in the lower right corner specify
+ * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
+ * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
+ * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
+ * and 9 for slow.
+ * If the wiimote is not rotating the sensor reports 2^13 = 8192.
+ * Ext specifies whether an extension is connected to the motionp.
+ */
+
+ x = payload[0];
+ y = payload[1];
+ z = payload[2];
+
+ x |= (((__u16)payload[3]) << 6) & 0xff00;
+ y |= (((__u16)payload[4]) << 6) & 0xff00;
+ z |= (((__u16)payload[5]) << 6) & 0xff00;
+
+ x -= 8192;
+ y -= 8192;
+ z -= 8192;
+
+ if (!(payload[3] & 0x02))
+ x *= 18;
+ else
+ x *= 9;
+ if (!(payload[4] & 0x02))
+ y *= 18;
+ else
+ y *= 9;
+ if (!(payload[3] & 0x01))
+ z *= 18;
+ else
+ z *= 9;
+
+ input_report_abs(ext->mp_input, ABS_RX, x);
+ input_report_abs(ext->mp_input, ABS_RY, y);
+ input_report_abs(ext->mp_input, ABS_RZ, z);
+ input_sync(ext->mp_input);
+
+ plugged = payload[5] & 0x01;
+ if (plugged != ext->mp_plugged)
+ ext->mp_plugged = plugged;
+}
+
+static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s16 x, y, z, bx, by;
+
+ /* Byte | 8 7 | 6 5 | 4 3 | 2 | 1 |
+ * -----+----------+---------+---------+----+-----+
+ * 1 | Button X <7:0> |
+ * 2 | Button Y <7:0> |
+ * -----+----------+---------+---------+----+-----+
+ * 3 | Speed X <9:2> |
+ * 4 | Speed Y <9:2> |
+ * 5 | Speed Z <9:2> |
+ * -----+----------+---------+---------+----+-----+
+ * 6 | Z <1:0> | Y <1:0> | X <1:0> | BC | BZ |
+ * -----+----------+---------+---------+----+-----+
+ * Button X/Y is the analog stick. Speed X, Y and Z are the
+ * accelerometer data in the same format as the wiimote's accelerometer.
+ * The 6th byte contains the LSBs of the accelerometer data.
+ * BC and BZ are the C and Z buttons: 0 means pressed
+ *
+ * If reported interleaved with motionp, then the layout changes. The
+ * 5th and 6th byte changes to:
+ * -----+-----------------------------------+-----+
+ * 5 | Speed Z <9:3> | EXT |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * 6 |Z <2:1> |Y <1>|X <1>| BC | BZ | 0 | 0 |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * All three accelerometer values lose their LSB. The other data is
+ * still available but slightly moved.
+ *
+ * Center data for button values is 128. Center value for accelerometer
+ * values it 512 / 0x200
+ */
+
+ bx = payload[0];
+ by = payload[1];
+ bx -= 128;
+ by -= 128;
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ if (ext->motionp) {
+ x |= (payload[5] >> 3) & 0x02;
+ y |= (payload[5] >> 4) & 0x02;
+ z &= ~0x4;
+ z |= (payload[5] >> 5) & 0x06;
+ } else {
+ x |= (payload[5] >> 2) & 0x03;
+ y |= (payload[5] >> 4) & 0x03;
+ z |= (payload[5] >> 6) & 0x03;
+ }
+
+ x -= 0x200;
+ y -= 0x200;
+ z -= 0x200;
+
+ input_report_abs(ext->input, ABS_HAT0X, bx);
+ input_report_abs(ext->input, ABS_HAT0Y, by);
+
+ input_report_abs(ext->input, ABS_RX, x);
+ input_report_abs(ext->input, ABS_RY, y);
+ input_report_abs(ext->input, ABS_RZ, z);
+
+ if (ext->motionp) {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ } else {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+static void handler_classic(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s8 rx, ry, lx, ly, lt, rt;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <5:4> | LX <5:0> |
+ * 2 | RX <3:2> | LY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 |RX<1>| LT <5:4> | RY <5:1> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <3:1> | RT <5:1> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | BDL | BDU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ * RX and RY are right analog stick
+ * LX and LY are left analog stick
+ * LT is left trigger, RT is right trigger
+ * BLT is 0 if left trigger is fully pressed
+ * BRT is 0 if right trigger is fully pressed
+ * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons
+ * BZL is left Z button and BZR is right Z button
+ * B-, BH, B+ are +, HOME and - buttons
+ * BB, BY, BA, BX are A, B, X, Y buttons
+ * LSB of RX, RY, LT, and RT are not transmitted and always 0.
+ *
+ * With motionp enabled it changes slightly to this:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <4:3> | LX <5:1> | BDU |
+ * 2 | RX <2:1> | LY <5:1> | BDL |
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 |RX<0>| LT <4:3> | RY <4:0> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <2:0> | RT <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | EXT |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | 0 | 0 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * Only the LSBs of LX and LY are lost. BDU and BDL are moved, the rest
+ * is the same as before.
+ */
+
+ if (ext->motionp) {
+ lx = payload[0] & 0x3e;
+ ly = payload[0] & 0x3e;
+ } else {
+ lx = payload[0] & 0x3f;
+ ly = payload[0] & 0x3f;
+ }
+
+ rx = (payload[0] >> 3) & 0x14;
+ rx |= (payload[1] >> 5) & 0x06;
+ rx |= (payload[2] >> 7) & 0x01;
+ ry = payload[2] & 0x1f;
+
+ rt = payload[3] & 0x1f;
+ lt = (payload[2] >> 2) & 0x18;
+ lt |= (payload[3] >> 5) & 0x07;
+
+ rx <<= 1;
+ ry <<= 1;
+ rt <<= 1;
+ lt <<= 1;
+
+ input_report_abs(ext->input, ABS_HAT1X, lx - 0x20);
+ input_report_abs(ext->input, ABS_HAT1Y, ly - 0x20);
+ input_report_abs(ext->input, ABS_HAT2X, rx - 0x20);
+ input_report_abs(ext->input, ABS_HAT2Y, ry - 0x20);
+ input_report_abs(ext->input, ABS_HAT3X, rt - 0x20);
+ input_report_abs(ext->input, ABS_HAT3Y, lt - 0x20);
+
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RIGHT],
+ !!(payload[4] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_DOWN],
+ !!(payload[4] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LT],
+ !!(payload[4] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_MINUS],
+ !!(payload[4] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_HOME],
+ !!(payload[4] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_PLUS],
+ !!(payload[4] & 0x04));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RT],
+ !!(payload[4] & 0x02));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZL],
+ !!(payload[5] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_B],
+ !!(payload[5] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_Y],
+ !!(payload[5] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_A],
+ !!(payload[5] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_X],
+ !!(payload[5] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZR],
+ !!(payload[5] & 0x04));
+
+ if (ext->motionp) {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[0] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[1] & 0x01));
+ } else {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[5] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+/* call this with state.lock spinlock held */
+void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload)
+{
+ struct wiimote_ext *ext = wdata->ext;
+
+ if (!ext)
+ return;
+
+ if (ext->motionp && (payload[5] & 0x02)) {
+ handler_motionp(ext, payload);
+ } else if (ext->ext_type == WIIEXT_NUNCHUCK) {
+ handler_nunchuck(ext, payload);
+ } else if (ext->ext_type == WIIEXT_CLASSIC) {
+ handler_classic(ext, payload);
+ }
+}
+
+static ssize_t wiiext_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ __u8 type = WIIEXT_NONE;
+ bool motionp = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ if (wdata->ext) {
+ motionp = wdata->ext->motionp;
+ type = wdata->ext->ext_type;
+ }
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (type == WIIEXT_NUNCHUCK) {
+ if (motionp)
+ return sprintf(buf, "motionp+nunchuck\n");
+ else
+ return sprintf(buf, "nunchuck\n");
+ } else if (type == WIIEXT_CLASSIC) {
+ if (motionp)
+ return sprintf(buf, "motionp+classic\n");
+ else
+ return sprintf(buf, "classic\n");
+ } else {
+ if (motionp)
+ return sprintf(buf, "motionp\n");
+ else
+ return sprintf(buf, "none\n");
+ }
+}
+
+static DEVICE_ATTR(extension, S_IRUGO, wiiext_show, NULL);
+
+static int wiiext_input_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_input_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+static int wiiext_mp_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->mp_opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_mp_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->mp_opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+/* Initializes the extension driver of a wiimote */
+int wiiext_init(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext;
+ unsigned long flags;
+ int ret, i;
+
+ ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->wdata = wdata;
+ INIT_WORK(&ext->worker, wiiext_worker);
+
+ ext->input = input_allocate_device();
+ if (!ext->input) {
+ ret = -ENOMEM;
+ goto err_input;
+ }
+
+ input_set_drvdata(ext->input, ext);
+ ext->input->open = wiiext_input_open;
+ ext->input->close = wiiext_input_close;
+ ext->input->dev.parent = &wdata->hdev->dev;
+ ext->input->id.bustype = wdata->hdev->bus;
+ ext->input->id.vendor = wdata->hdev->vendor;
+ ext->input->id.product = wdata->hdev->product;
+ ext->input->id.version = wdata->hdev->version;
+ ext->input->name = WIIMOTE_NAME " Extension";
+
+ set_bit(EV_KEY, ext->input->evbit);
+ for (i = 0; i < WIIEXT_KEY_COUNT; ++i)
+ set_bit(wiiext_keymap[i], ext->input->keybit);
+
+ set_bit(EV_ABS, ext->input->evbit);
+ set_bit(ABS_HAT0X, ext->input->absbit);
+ set_bit(ABS_HAT0Y, ext->input->absbit);
+ set_bit(ABS_HAT1X, ext->input->absbit);
+ set_bit(ABS_HAT1Y, ext->input->absbit);
+ set_bit(ABS_HAT2X, ext->input->absbit);
+ set_bit(ABS_HAT2Y, ext->input->absbit);
+ set_bit(ABS_HAT3X, ext->input->absbit);
+ set_bit(ABS_HAT3Y, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_HAT0X, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT0Y, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT1X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT1Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3Y, -30, 30, 1, 1);
+ set_bit(ABS_RX, ext->input->absbit);
+ set_bit(ABS_RY, ext->input->absbit);
+ set_bit(ABS_RZ, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RZ, -500, 500, 2, 4);
+
+ ret = input_register_device(ext->input);
+ if (ret) {
+ input_free_device(ext->input);
+ goto err_input;
+ }
+
+ ext->mp_input = input_allocate_device();
+ if (!ext->mp_input) {
+ ret = -ENOMEM;
+ goto err_mp;
+ }
+
+ input_set_drvdata(ext->mp_input, ext);
+ ext->mp_input->open = wiiext_mp_open;
+ ext->mp_input->close = wiiext_mp_close;
+ ext->mp_input->dev.parent = &wdata->hdev->dev;
+ ext->mp_input->id.bustype = wdata->hdev->bus;
+ ext->mp_input->id.vendor = wdata->hdev->vendor;
+ ext->mp_input->id.product = wdata->hdev->product;
+ ext->mp_input->id.version = wdata->hdev->version;
+ ext->mp_input->name = WIIMOTE_NAME " Motion+";
+
+ set_bit(EV_ABS, ext->mp_input->evbit);
+ set_bit(ABS_RX, ext->mp_input->absbit);
+ set_bit(ABS_RY, ext->mp_input->absbit);
+ set_bit(ABS_RZ, ext->mp_input->absbit);
+ input_set_abs_params(ext->mp_input, ABS_RX, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RY, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RZ, -160000, 160000, 4, 8);
+
+ ret = input_register_device(ext->mp_input);
+ if (ret) {
+ input_free_device(ext->mp_input);
+ goto err_mp;
+ }
+
+ ret = device_create_file(&wdata->hdev->dev, &dev_attr_extension);
+ if (ret)
+ goto err_dev;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = ext;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_dev:
+ input_unregister_device(ext->mp_input);
+err_mp:
+ input_unregister_device(ext->input);
+err_input:
+ kfree(ext);
+ return ret;
+}
+
+/* Deinitializes the extension driver of a wiimote */
+void wiiext_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext = wdata->ext;
+ unsigned long flags;
+
+ if (!ext)
+ return;
+
+ /*
+ * We first unset wdata->ext to avoid further input from the wiimote
+ * core. The worker thread does not access this pointer so it is not
+ * affected by this.
+ * We kill the worker after this so it does not get respawned during
+ * deinitialization.
+ */
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ device_remove_file(&wdata->hdev->dev, &dev_attr_extension);
+ input_unregister_device(ext->mp_input);
+ input_unregister_device(ext->input);
+
+ cancel_work_sync(&ext->worker);
+ kfree(ext);
+}
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
new file mode 100644
index 00000000000..c81dbeb086c
--- /dev/null
+++ b/drivers/hid/hid-wiimote.h
@@ -0,0 +1,208 @@
+#ifndef __HID_WIIMOTE_H
+#define __HID_WIIMOTE_H
+
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
+
+/* return flag for led \num */
+#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
+
+struct wiimote_buf {
+ __u8 data[HID_MAX_BUFFER_SIZE];
+ size_t size;
+};
+
+struct wiimote_state {
+ spinlock_t lock;
+ __u8 flags;
+ __u8 accel_split[2];
+ __u8 drm;
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
+ __u8 *cmd_read_buf;
+ __u8 cmd_read_size;
+};
+
+struct wiimote_data {
+ struct hid_device *hdev;
+ struct input_dev *input;
+ struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
+ struct wiimote_ext *ext;
+ struct wiimote_debug *debug;
+
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+ struct work_struct worker;
+
+ struct wiimote_state state;
+};
+
+enum wiiproto_reqs {
+ WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
+ WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
+ WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
+ WIIPROTO_REQ_RETURN = 0x22,
+ WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
+ WIIPROTO_REQ_MAX
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+ dev))
+
+extern void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm);
+extern int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size);
+extern ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset,
+ __u8 *rmem, __u8 size);
+
+#define wiiproto_req_rreg(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), false, (os), (sz))
+#define wiiproto_req_reeprom(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), true, (os), (sz))
+extern void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, __u16 size);
+
+#ifdef CONFIG_HID_WIIMOTE_EXT
+
+extern int wiiext_init(struct wiimote_data *wdata);
+extern void wiiext_deinit(struct wiimote_data *wdata);
+extern void wiiext_event(struct wiimote_data *wdata, bool plugged);
+extern bool wiiext_active(struct wiimote_data *wdata);
+extern void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload);
+
+#else
+
+static inline int wiiext_init(void *u) { return 0; }
+static inline void wiiext_deinit(void *u) { }
+static inline void wiiext_event(void *u, bool p) { }
+static inline bool wiiext_active(void *u) { return false; }
+static inline void wiiext_handle(void *u, const __u8 *p) { }
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+extern int wiidebug_init(struct wiimote_data *wdata);
+extern void wiidebug_deinit(struct wiimote_data *wdata);
+
+#else
+
+static inline int wiidebug_init(void *u) { return 0; }
+static inline void wiidebug_deinit(void *u) { }
+
+#endif
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
+#endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b403fcef0b8..5bf91dbad59 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -197,16 +197,24 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
if (!hid)
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -215,6 +223,7 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
WARN_ON(hid == NULL);
if (!hid)
@@ -222,10 +231,17 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -304,30 +320,21 @@ static int hid_submit_out(struct hid_device *hid)
report = usbhid->out[usbhid->outtail].report;
raw_report = usbhid->out[usbhid->outtail].raw_report;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
-
- /*
- * if the device hasn't been woken, we leave the output
- * to resume()
- */
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
+ 1 + (report->id > 0);
+ usbhid->urbout->dev = hid_to_usb_dev(hid);
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
- dbg_hid("submitting out urb\n");
+ dbg_hid("submitting out urb\n");
- if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
- hid_err(hid, "usb_submit_urb(out) failed\n");
- usb_autopm_put_interface_async(usbhid->intf);
- return -1;
- }
- usbhid->last_out = jiffies;
+ r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(out) failed: %d\n", r);
+ return r;
}
-
+ usbhid->last_out = jiffies;
return 0;
}
@@ -343,50 +350,48 @@ static int hid_submit_ctrl(struct hid_device *hid)
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (dir == USB_DIR_OUT) {
- usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
- usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
- } else {
- int maxpacket, padlen;
-
- usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
- maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0);
- if (maxpacket > 0) {
- padlen = DIV_ROUND_UP(len, maxpacket);
- padlen *= maxpacket;
- if (padlen > usbhid->bufsize)
- padlen = usbhid->bufsize;
- } else
- padlen = 0;
- usbhid->urbctrl->transfer_buffer_length = padlen;
- }
- usbhid->urbctrl->dev = hid_to_usb_dev(hid);
-
- usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
- usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT;
- usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id);
- usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
- usbhid->cr->wLength = cpu_to_le16(len);
-
- dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
- usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
- usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
-
- if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
- usb_autopm_put_interface_async(usbhid->intf);
- hid_err(hid, "usb_submit_urb(ctrl) failed\n");
- return -1;
- }
- usbhid->last_ctrl = jiffies;
+ len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ if (dir == USB_DIR_OUT) {
+ usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
+ usbhid->urbctrl->transfer_buffer_length = len;
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ } else {
+ int maxpacket, padlen;
+
+ usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
+ maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
+ usbhid->urbctrl->pipe, 0);
+ if (maxpacket > 0) {
+ padlen = DIV_ROUND_UP(len, maxpacket);
+ padlen *= maxpacket;
+ if (padlen > usbhid->bufsize)
+ padlen = usbhid->bufsize;
+ } else
+ padlen = 0;
+ usbhid->urbctrl->transfer_buffer_length = padlen;
}
-
+ usbhid->urbctrl->dev = hid_to_usb_dev(hid);
+
+ usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
+ usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT :
+ HID_REQ_GET_REPORT;
+ usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) |
+ report->id);
+ usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
+ usbhid->cr->wLength = cpu_to_le16(len);
+
+ dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
+ usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" :
+ "Get_Report",
+ usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
+
+ r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r);
+ return r;
+ }
+ usbhid->last_ctrl = jiffies;
return 0;
}
@@ -423,11 +428,8 @@ static void hid_irq_out(struct urb *urb)
else
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (usbhid->outhead != usbhid->outtail) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->outhead != usbhid->outtail && !hid_submit_out(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock_irqrestore(&usbhid->lock, flags);
return;
}
@@ -474,13 +476,9 @@ static void hid_ctrl(struct urb *urb)
else
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (usbhid->ctrlhead != usbhid->ctrltail) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->ctrlhead != usbhid->ctrltail && !hid_submit_ctrl(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock(&usbhid->lock);
- usb_autopm_put_interface_async(usbhid->intf);
return;
}
@@ -515,9 +513,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * But if still suspended, leave urb enqueued, don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid))
+ if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -549,9 +561,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * If already suspended, leave urb enqueued, but don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid))
+ if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -576,6 +602,30 @@ void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, uns
}
EXPORT_SYMBOL_GPL(usbhid_submit_report);
+/* Workqueue routine to send requests to change LEDs */
+static void hid_led(struct work_struct *work)
+{
+ struct usbhid_device *usbhid =
+ container_of(work, struct usbhid_device, led_work);
+ struct hid_device *hid = usbhid->hid;
+ struct hid_field *field;
+ unsigned long flags;
+
+ field = hidinput_get_led_field(hid);
+ if (!field) {
+ hid_warn(hid, "LED event field not found\n");
+ return;
+ }
+
+ spin_lock_irqsave(&usbhid->lock, flags);
+ if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
+ usbhid->ledcount = hidinput_count_leds(hid);
+ hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
+ __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ }
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+}
+
static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -595,17 +645,15 @@ static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, un
return -1;
}
+ spin_lock_irqsave(&usbhid->lock, flags);
hid_set_field(field, offset, value);
- if (value) {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount++;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- } else {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount--;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- }
- usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+
+ /*
+ * Defer performing requested LED action.
+ * This is more likely gather all LED changes into a single URB.
+ */
+ schedule_work(&usbhid->led_work);
return 0;
}
@@ -1100,7 +1148,7 @@ static void usbhid_stop(struct hid_device *hid)
return;
clear_bit(HID_STARTED, &usbhid->iofl);
- spin_lock_irq(&usbhid->lock); /* Sync with error handler */
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
usb_kill_urb(usbhid->urbin);
@@ -1234,6 +1282,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
+ INIT_WORK(&usbhid->led_work, hid_led);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1266,6 +1316,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
{
del_timer_sync(&usbhid->io_retry);
cancel_work_sync(&usbhid->reset_work);
+ cancel_work_sync(&usbhid->led_work);
}
static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1367,16 +1418,6 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
return -EIO;
}
- if (!ignoreled && PMSG_IS_AUTO(message)) {
- spin_lock_irq(&usbhid->lock);
- if (test_bit(HID_LED_ON, &usbhid->iofl)) {
- spin_unlock_irq(&usbhid->lock);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
- }
- spin_unlock_irq(&usbhid->lock);
- }
-
hid_cancel_delayed_stuff(usbhid);
hid_cease_io(usbhid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 5028d60a22a..c831af93748 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
+ { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
@@ -67,6 +68,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4ef02b269a7..7c297d305d5 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -859,7 +859,7 @@ static const struct file_operations hiddev_fops = {
.llseek = noop_llseek,
};
-static char *hiddev_devnode(struct device *dev, mode_t *mode)
+static char *hiddev_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1673cac93d7..cb8f703efde 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -55,7 +55,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_STARTED 8
#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
-#define HID_LED_ON 11
/*
* USB-specific HID struct, to be pointed to
@@ -97,6 +96,8 @@ struct usbhid_device {
struct work_struct reset_work; /* Task context for resets */
wait_queue_head_t wait; /* For sleeping */
int ledcount; /* counting the number of active leds */
+
+ struct work_struct led_work; /* Task context for setting LEDs */
};
#define hid_to_usb_dev(hid_dev) \
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 065817329f0..796086980f4 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -64,6 +64,32 @@ static const unsigned char usb_kbd_keycode[256] = {
150,158,159,128,136,177,178,176,142,152,173,140
};
+
+/**
+ * struct usb_kbd - state of each attached keyboard
+ * @dev: input device associated with this keyboard
+ * @usbdev: usb device associated with this keyboard
+ * @old: data received in the past from the @irq URB representing which
+ * keys were pressed. By comparing with the current list of keys
+ * that are pressed, we are able to see key releases.
+ * @irq: URB for receiving a list of keys that are pressed when a
+ * new key is pressed or a key that was pressed is released.
+ * @led: URB for sending LEDs (e.g. numlock, ...)
+ * @newleds: data that will be sent with the @led URB representing which LEDs
+ should be on
+ * @name: Name of the keyboard. @dev's name field points to this buffer
+ * @phys: Physical path of the keyboard. @dev's phys field points to this
+ * buffer
+ * @new: Buffer for the @irq URB
+ * @cr: Control request for @led URB
+ * @leds: Buffer for the @led URB
+ * @new_dma: DMA address for @irq URB
+ * @leds_dma: DMA address for @led URB
+ * @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted
+ * @led_urb_submitted: indicates whether @led is in progress, i.e. it has been
+ * submitted and its completion handler has not returned yet
+ * without resubmitting @led
+ */
struct usb_kbd {
struct input_dev *dev;
struct usb_device *usbdev;
@@ -78,6 +104,10 @@ struct usb_kbd {
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
+
+ spinlock_t leds_lock;
+ bool led_urb_submitted;
+
};
static void usb_kbd_irq(struct urb *urb)
@@ -136,44 +166,66 @@ resubmit:
static int usb_kbd_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
+ unsigned long flags;
struct usb_kbd *kbd = input_get_drvdata(dev);
if (type != EV_LED)
return -1;
+ spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
- if (kbd->led->status == -EINPROGRESS)
+ if (kbd->led_urb_submitted){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
- if (*(kbd->leds) == kbd->newleds)
+ if (*(kbd->leds) == kbd->newleds){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
-
+ else
+ kbd->led_urb_submitted = true;
+
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
return 0;
}
static void usb_kbd_led(struct urb *urb)
{
+ unsigned long flags;
struct usb_kbd *kbd = urb->context;
if (urb->status)
hid_warn(urb->dev, "led urb status %d received\n",
urb->status);
- if (*(kbd->leds) == kbd->newleds)
+ spin_lock_irqsave(&kbd->leds_lock, flags);
+
+ if (*(kbd->leds) == kbd->newleds){
+ kbd->led_urb_submitted = false;
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
- if (usb_submit_urb(kbd->led, GFP_ATOMIC))
+ if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
+ kbd->led_urb_submitted = false;
+ }
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
}
static int usb_kbd_open(struct input_dev *dev)
@@ -252,6 +304,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
kbd->usbdev = dev;
kbd->dev = input_dev;
+ spin_lock_init(&kbd->leds_lock);
if (dev->manufacturer)
strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
@@ -334,6 +387,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
if (kbd) {
usb_kill_urb(kbd->irq);
input_unregister_device(kbd->dev);
+ usb_kill_urb(kbd->led);
usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
kfree(kbd);
}
@@ -354,19 +408,4 @@ static struct usb_driver usb_kbd_driver = {
.id_table = usb_kbd_id_table,
};
-static int __init usb_kbd_init(void)
-{
- int result = usb_register(&usb_kbd_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit usb_kbd_exit(void)
-{
- usb_deregister(&usb_kbd_driver);
-}
-
-module_init(usb_kbd_init);
-module_exit(usb_kbd_exit);
+module_usb_driver(usb_kbd_driver);
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 79b2bf81a05..0f6be45d43d 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -241,19 +241,4 @@ static struct usb_driver usb_mouse_driver = {
.id_table = usb_mouse_id_table,
};
-static int __init usb_mouse_init(void)
-{
- int retval = usb_register(&usb_mouse_driver);
- if (retval == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return retval;
-}
-
-static void __exit usb_mouse_exit(void)
-{
- usb_deregister(&usb_mouse_driver);
-}
-
-module_init(usb_mouse_init);
-module_exit(usb_mouse_exit);
+module_usb_driver(usb_mouse_driver);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 9fa09ac000a..70f5dde1cc5 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -1,3 +1,5 @@
+menu "Microsoft Hyper-V guest support"
+
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on X86 && ACPI && PCI
@@ -11,4 +13,4 @@ config HYPERV_UTILS
help
Select this option to enable the Hyper-V Utilities.
-
+endmenu
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 12b85ff957f..36484db36ba 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -223,6 +223,17 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
vmbus_device_unregister(channel->device_obj);
}
+void vmbus_free_channels(void)
+{
+ struct vmbus_channel *channel;
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ vmbus_device_unregister(channel->device_obj);
+ kfree(channel->device_obj);
+ free_channel(channel);
+ }
+}
+
/*
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
@@ -287,6 +298,7 @@ static void vmbus_process_offer(struct work_struct *work)
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&newchannel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ kfree(newchannel->device_obj);
free_channel(newchannel);
} else {
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 0fb100ed91a..12aa97f31f9 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -164,11 +164,6 @@ int hv_init(void)
max_leaf = query_hypervisor_info();
- rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
-
- if (hv_context.guestid != 0)
- goto cleanup;
-
/* Write our OS info */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
hv_context.guestid = HV_LINUX_GUEST_ID;
@@ -237,6 +232,9 @@ void hv_cleanup(void)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
+ /* Reset our OS id */
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+
kfree(hv_context.signal_event_buffer);
hv_context.signal_event_buffer = NULL;
hv_context.signal_event_param = NULL;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 89f52440fcf..0e8343f585b 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0aee1122734..6d7d286d544 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -611,6 +611,7 @@ void vmbus_device_unregister(struct hv_device *device_obj);
struct vmbus_channel *relid2channel(u32 relid);
+void vmbus_free_channels(void);
/* Connection interface */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0c048dd8013..a220e5746d6 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -62,6 +62,14 @@ struct hv_device_info {
struct hv_dev_port_info outbound;
};
+static int vmbus_exists(void)
+{
+ if (hv_acpi_dev == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
static void get_channel_info(struct hv_device *device,
struct hv_device_info *info)
@@ -590,6 +598,10 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
pr_info("registering driver %s\n", hv_driver->name);
+ ret = vmbus_exists();
+ if (ret < 0)
+ return ret;
+
hv_driver->driver.name = hv_driver->name;
hv_driver->driver.owner = owner;
hv_driver->driver.mod_name = mod_name;
@@ -614,8 +626,8 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
- driver_unregister(&hv_driver->driver);
-
+ if (!vmbus_exists())
+ driver_unregister(&hv_driver->driver);
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
@@ -776,11 +788,23 @@ static int __init hv_acpi_init(void)
cleanup:
acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ hv_acpi_dev = NULL;
return ret;
}
+static void __exit vmbus_exit(void)
+{
+
+ free_irq(irq, hv_acpi_dev);
+ vmbus_free_channels();
+ bus_unregister(&hv_bus);
+ hv_cleanup();
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+}
+
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
-module_init(hv_acpi_init);
+subsys_initcall(hv_acpi_init);
+module_exit(vmbus_exit);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 91be41f6080..cb351d35838 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -367,11 +367,11 @@ config SENSORS_F71882FG
will be called f71882fg.
config SENSORS_F75375S
- tristate "Fintek F75375S/SP and F75373"
+ tristate "Fintek F75375S/SP, F75373 and F75387"
depends on I2C
help
If you say yes here you get support for hardware monitoring
- features of the Fintek F75375S/SP and F75373
+ features of the Fintek F75375S/SP, F75373 and F75387
This driver can also be built as a module. If so, the module
will be called f75375s.
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 66f67293341..522860ab6ce 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -170,7 +170,7 @@ static ssize_t set_avg_interval(struct device *dev,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -311,7 +311,7 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
int res;
unsigned long temp;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e8537..5d760f3d21c 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index b2cacbe707a..ceb24a36517 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -125,7 +125,7 @@ static ssize_t adcxx_set_max(struct device *dev,
struct adcxx *adc = spi_get_drvdata(spi);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
if (mutex_lock_interruptible(&adc->lock))
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0683e6be662..e6291dafa4c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -767,7 +767,7 @@ static ssize_t set_update_interval(struct device *dev,
int i, err;
u8 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 9e234b981b8..3f63f5f9741 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -503,7 +503,7 @@ static ssize_t chassis_clear(struct device *dev,
struct adm9240_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e18..04450f8bf5d 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 5cc3e3784b4..5b02f7a9101 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -197,7 +197,7 @@ static ssize_t adt7411_set_bit(struct device *dev,
int ret;
unsigned long flag;
- ret = strict_strtoul(buf, 0, &flag);
+ ret = kstrtoul(buf, 0, &flag);
if (ret || flag > 1)
return -EINVAL;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 2af0c7b6b4e..7a1494846cf 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -833,7 +833,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -871,7 +871,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -935,7 +935,7 @@ static ssize_t set_volt_max(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -977,7 +977,7 @@ static ssize_t set_volt_min(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -1066,7 +1066,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp ||
+ if (kstrtol(buf, 10, &temp) || !temp ||
!fan_enabled(data, attr->index))
return -EINVAL;
@@ -1115,7 +1115,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -1147,7 +1147,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1177,7 +1177,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1209,7 +1209,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1243,7 +1243,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -1289,7 +1289,7 @@ static ssize_t set_pwm_tmax(struct device *dev,
int tmin, trange_value;
long trange;
- if (strict_strtol(buf, 10, &trange))
+ if (kstrtol(buf, 10, &trange))
return -EINVAL;
/* trange = tmax - tmin */
@@ -1330,7 +1330,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -1387,7 +1387,7 @@ static ssize_t set_pwm_auto(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
switch (temp) {
@@ -1446,7 +1446,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c6d1ce059ae..5e10c79f2df 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -449,7 +449,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 60000);
@@ -478,7 +478,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, -1, 10);
@@ -511,7 +511,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -545,7 +545,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -600,7 +600,7 @@ static ssize_t set_fan_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -637,7 +637,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -682,7 +682,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -714,7 +714,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -746,7 +746,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -779,7 +779,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -822,7 +822,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -859,7 +859,7 @@ static ssize_t set_pwm_auto(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
if (attr->index % 2)
@@ -919,7 +919,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index b5fcd87931c..7dab3547fee 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -343,7 +343,7 @@ static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
unsigned char reg;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -432,7 +432,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -546,7 +546,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -602,7 +602,7 @@ static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -653,7 +653,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
unsigned char reg = 0;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -758,7 +758,7 @@ static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -781,7 +781,7 @@ static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -819,7 +819,7 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
int out;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
@@ -853,7 +853,7 @@ static ssize_t set_pwm_at_crit(struct device *dev,
struct adt7475_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -883,7 +883,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
struct adt7475_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 4033974d1bb..89a6b9da0ec 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -238,7 +238,7 @@ static ssize_t set_temp(
int ix = to_sensor_dev_attr(attr)->index;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = SENSORS_LIMIT(val / 1000, -128, 127);
@@ -327,7 +327,7 @@ static ssize_t set_pwm1(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -356,7 +356,7 @@ static ssize_t set_pwm1_enable(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
@@ -477,7 +477,7 @@ static ssize_t set_temp_auto_point_temp(
u8 reg;
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -556,7 +556,7 @@ static ssize_t set_pwm1_auto_point_pwm(
struct amc6821_data *data = i2c_get_clientdata(client);
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -623,7 +623,7 @@ static ssize_t set_fan(
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
int ix = to_sensor_dev_attr(attr)->index;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = 1 > val ? 0xFFFF : 6000000/val;
@@ -665,7 +665,7 @@ static ssize_t set_fan1_div(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 4c0743660e9..b9895531240 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -782,7 +782,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+ if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
@@ -822,7 +822,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
unsigned long input;
u16 val;
- if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+ if (kstrtoul(sysfsbuf, 10, &input) < 0)
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
@@ -977,7 +977,7 @@ static ssize_t applesmc_key_at_index_store(struct device *dev,
{
unsigned long newkey;
- if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+ if (kstrtoul(sysfsbuf, 10, &newkey) < 0
|| newkey >= smcreg.key_count)
return -EINVAL;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index d2596cec18b..3efd3244998 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -188,7 +188,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 255);
@@ -221,7 +221,7 @@ static ssize_t store_bitmask(struct device *dev,
long reqval;
u8 currval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
@@ -265,7 +265,7 @@ static ssize_t store_fan16(struct device *dev,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
/* If a minimum RPM of zero is requested, then we set the register to
@@ -338,7 +338,7 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
long reqval;
u8 nr = sda->index;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
@@ -371,7 +371,7 @@ static ssize_t store_temp8(struct device *dev,
long reqval;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -127000, 127000);
@@ -427,7 +427,7 @@ static ssize_t store_temp62(struct device *dev,
long reqval, i, f;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -32000, 31750);
@@ -482,7 +482,7 @@ static ssize_t store_ap2_temp(struct device *dev,
int i;
u8 currval, newval = 0;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -538,7 +538,7 @@ static ssize_t store_pwm_ac(struct device *dev,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
};
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
if (reqval > 31)
@@ -601,7 +601,7 @@ static ssize_t store_pwm_enable(struct device *dev,
long reqval;
u8 currval, config, altbit, newval, minoff = 255;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
switch (reqval) {
@@ -675,7 +675,7 @@ static ssize_t store_pwm_freq(struct device *dev,
u8 currval, newval = 255;
int i;
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
@@ -724,7 +724,7 @@ static ssize_t store_pwm_ast(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
@@ -771,7 +771,7 @@ static ssize_t store_temp_st(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 104b3767516..1fdef885341 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#ifdef CONFIG_SMP
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
#else
-#define TO_PHYS_ID(cpu) (cpu)
-#define TO_CORE_ID(cpu) (cpu)
#define for_each_sibling(i, cpu) for (i = 0; false; )
#endif
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
/*
* Per-Core Temperature Data
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index d9c59271391..d9803958e49 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1223,7 +1223,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
}
static struct attribute *dme1737_pwm_chmod_attr[];
-static void dme1737_chmod_file(struct device*, struct attribute*, mode_t);
+static void dme1737_chmod_file(struct device*, struct attribute*, umode_t);
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1961,7 +1961,7 @@ static inline void dme1737_sio_outb(int sio_cip, int reg, int val)
static int dme1737_i2c_get_features(int, struct dme1737_data*);
static void dme1737_chmod_file(struct device *dev,
- struct attribute *attr, mode_t mode)
+ struct attribute *attr, umode_t mode)
{
if (sysfs_chmod_file(&dev->kobj, attr, mode)) {
dev_warn(dev, "Failed to change permissions of %s.\n",
@@ -1971,7 +1971,7 @@ static void dme1737_chmod_file(struct device *dev,
static void dme1737_chmod_group(struct device *dev,
const struct attribute_group *group,
- mode_t mode)
+ umode_t mode)
{
struct attribute **attr;
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 225ae4f3658..300c3d4d67d 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -161,7 +161,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
struct ds620_data *data = i2c_get_clientdata(client);
- res = strict_strtol(buf, 10, &val);
+ res = kstrtol(buf, 10, &val);
if (res)
return res;
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index cd2a6e437ae..270ffab711c 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -80,7 +80,7 @@ static ssize_t store_temp(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
retval = i2c_smbus_write_byte_data(client, sda->index,
DIV_ROUND_CLOSEST(val, 1000));
@@ -98,7 +98,7 @@ static ssize_t store_bit(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
@@ -151,7 +151,7 @@ static ssize_t store_hyst(struct device *dev,
int hyst;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index af914ad93ec..848a2b0bc83 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -244,7 +244,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -268,7 +268,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -314,7 +314,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
int new_range_bits, old_div = 8 / data->fan_multiplier;
long new_div;
- int status = strict_strtol(buf, 10, &new_div);
+ int status = kstrtol(buf, 10, &new_div);
if (status < 0)
return -EINVAL;
@@ -388,7 +388,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long rpm_target;
- int result = strict_strtol(buf, 10, &rpm_target);
+ int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
return -EINVAL;
@@ -434,7 +434,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
long new_value;
u8 conf_reg;
- int result = strict_strtol(buf, 10, &new_value);
+ int result = kstrtol(buf, 10, &new_value);
if (result < 0)
return -EINVAL;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 0064432f361..6ebb9b738c9 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -212,7 +212,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -249,7 +249,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -291,7 +291,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
int err;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f..f2359a0093b 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 59dd881c71d..e50305819f0 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1333,7 +1333,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1367,7 +1367,7 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1420,7 +1420,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
int err;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1454,7 +1454,7 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1524,7 +1524,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1566,7 +1566,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1609,7 +1609,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1670,7 +1670,7 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1737,7 +1737,7 @@ static ssize_t store_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1788,7 +1788,7 @@ static ssize_t store_simple_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1835,7 +1835,7 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1915,7 +1915,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1969,7 +1969,7 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2015,7 +2015,7 @@ static ssize_t store_pwm_interpolate(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -2055,7 +2055,7 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2106,7 +2106,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 95cbfb3a707..eedf574ab53 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -1,16 +1,19 @@
/*
- * f75375s.c - driver for the Fintek F75375/SP and F75373
- * hardware monitoring features
+ * f75375s.c - driver for the Fintek F75375/SP, F75373 and
+ * F75387SG/RG hardware monitoring features
* Copyright (C) 2006-2007 Riku Voipio
*
* Datasheets available at:
*
* f75375:
- * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
+ * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
*
* f75373:
* http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
*
+ * f75387:
+ * http://www.fintek.com.tw/files/productfiles/F75387_V027P.pdf
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -40,7 +43,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
-enum chips { f75373, f75375 };
+enum chips { f75373, f75375, f75387 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
@@ -59,6 +62,7 @@ enum chips { f75373, f75375 };
#define F75375_REG_VOLT_LOW(nr) (0x21 + (nr) * 2)
#define F75375_REG_TEMP(nr) (0x14 + (nr))
+#define F75387_REG_TEMP11_LSB(nr) (0x1a + (nr))
#define F75375_REG_TEMP_HIGH(nr) (0x28 + (nr) * 2)
#define F75375_REG_TEMP_HYST(nr) (0x29 + (nr) * 2)
@@ -78,8 +82,11 @@ enum chips { f75373, f75375 };
#define F75375_REG_PWM1_DROP_DUTY 0x6B
#define F75375_REG_PWM2_DROP_DUTY 0x6C
-#define FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75375_FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75387_FAN_CTRL_LINEAR(nr) (1 + ((nr) * 4))
#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
+#define F75387_FAN_DUTY_MODE(nr) (2 + ((nr) * 4))
+#define F75387_FAN_MANU_MODE(nr) ((nr) * 4)
/*
* Data structures and manipulation thereof
@@ -102,13 +109,18 @@ struct f75375_data {
u8 in_min[4];
u16 fan[2];
u16 fan_min[2];
- u16 fan_full[2];
- u16 fan_exp[2];
+ u16 fan_max[2];
+ u16 fan_target[2];
u8 fan_timer;
u8 pwm[2];
u8 pwm_mode[2];
u8 pwm_enable[2];
- s8 temp[2];
+ /*
+ * f75387: For remote temperature reading, it uses signed 11-bit
+ * values with LSB = 0.125 degree Celsius, left-justified in 16-bit
+ * registers. For original 8-bit temp readings, the LSB just is 0.
+ */
+ s16 temp11[2];
s8 temp_high[2];
s8 temp_max_hyst[2];
};
@@ -122,6 +134,7 @@ static int f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
{ "f75375", f75375 },
+ { "f75387", f75387 },
{ }
};
MODULE_DEVICE_TABLE(i2c, f75375_id);
@@ -146,8 +159,8 @@ static inline int f75375_read8(struct i2c_client *client, u8 reg)
/* in most cases, should be called while holding update_lock */
static inline u16 f75375_read16(struct i2c_client *client, u8 reg)
{
- return ((i2c_smbus_read_byte_data(client, reg) << 8)
- | i2c_smbus_read_byte_data(client, reg + 1));
+ return (i2c_smbus_read_byte_data(client, reg) << 8)
+ | i2c_smbus_read_byte_data(client, reg + 1);
}
static inline void f75375_write8(struct i2c_client *client, u8 reg,
@@ -181,11 +194,11 @@ static struct f75375_data *f75375_update_device(struct device *dev)
f75375_read8(client, F75375_REG_TEMP_HIGH(nr));
data->temp_max_hyst[nr] =
f75375_read8(client, F75375_REG_TEMP_HYST(nr));
- data->fan_full[nr] =
+ data->fan_max[nr] =
f75375_read16(client, F75375_REG_FAN_FULL(nr));
data->fan_min[nr] =
f75375_read16(client, F75375_REG_FAN_MIN(nr));
- data->fan_exp[nr] =
+ data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
data->pwm[nr] = f75375_read8(client,
F75375_REG_FAN_PWM_DUTY(nr));
@@ -205,8 +218,14 @@ static struct f75375_data *f75375_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
- data->temp[nr] =
- f75375_read8(client, F75375_REG_TEMP(nr));
+ /* assign MSB, therefore shift it by 8 bits */
+ data->temp11[nr] =
+ f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
+ if (data->kind == f75387)
+ /* merge F75387's temperature LSB (11-bit) */
+ data->temp11[nr] |=
+ f75375_read8(client,
+ F75387_REG_TEMP11_LSB(nr));
data->fan[nr] =
f75375_read16(client, F75375_REG_FAN(nr));
}
@@ -226,14 +245,14 @@ static inline u16 rpm_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
- return (1500000 / reg);
+ return 1500000 / reg;
}
static inline u16 rpm_to_reg(int rpm)
{
if (rpm < 367 || rpm > 0xffff)
return 0xffff;
- return (1500000 / rpm);
+ return 1500000 / rpm;
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
@@ -242,7 +261,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = rpm_to_reg(val);
@@ -251,17 +275,22 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_exp(struct device *dev, struct device_attribute *attr,
+static ssize_t set_fan_target(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
- data->fan_exp[nr] = rpm_to_reg(val);
- f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_exp[nr]);
+ data->fan_target[nr] = rpm_to_reg(val);
+ f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_target[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -272,7 +301,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
@@ -294,28 +328,54 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
struct f75375_data *data = i2c_get_clientdata(client);
u8 fanmode;
- if (val < 0 || val > 4)
+ if (val < 0 || val > 3)
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode &= ~(3 << FAN_CTRL_MODE(nr));
-
- switch (val) {
- case 0: /* Full speed */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- data->pwm[nr] = 255;
- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
- data->pwm[nr]);
- break;
- case 1: /* PWM */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- break;
- case 2: /* AUTOMATIC*/
- fanmode |= (2 << FAN_CTRL_MODE(nr));
- break;
- case 3: /* fan speed */
- break;
+ if (data->kind == f75387) {
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(1 << F75387_FAN_DUTY_MODE(nr));
+ fanmode &= ~(1 << F75387_FAN_MANU_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ data->pwm[nr] = 255;
+ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+ data->pwm[nr]);
+ break;
+ case 1: /* PWM */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ break;
+ }
+ } else {
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ data->pwm[nr] = 255;
+ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+ data->pwm[nr]);
+ break;
+ case 1: /* PWM */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+ fanmode |= (2 << FAN_CTRL_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ break;
+ }
}
+
f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
data->pwm_enable[nr] = val;
return 0;
@@ -327,8 +387,12 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- int err = 0;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
err = set_pwm_enable_direct(client, nr, val);
@@ -342,20 +406,39 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- u8 conf = 0;
+ unsigned long val;
+ int err;
+ u8 conf;
+ char reg, ctrl;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
if (!(val == 0 || val == 1))
return -EINVAL;
+ /* F75373 does not support DC (linear voltage) fan control mode */
+ if (data->kind == f75373 && val == 0)
+ return -EINVAL;
+
+ /* take care for different registers */
+ if (data->kind == f75387) {
+ reg = F75375_REG_FAN_TIMER;
+ ctrl = F75387_FAN_CTRL_LINEAR(nr);
+ } else {
+ reg = F75375_REG_CONFIG1;
+ ctrl = F75375_FAN_CTRL_LINEAR(nr);
+ }
+
mutex_lock(&data->update_lock);
- conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf &= ~(1 << FAN_CTRL_LINEAR(nr));
+ conf = f75375_read8(client, reg);
+ conf &= ~(1 << ctrl);
if (val == 0)
- conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
+ conf |= (1 << ctrl);
- f75375_write8(client, F75375_REG_CONFIG1, conf);
+ f75375_write8(client, reg, conf);
data->pwm_mode[nr] = val;
mutex_unlock(&data->update_lock);
return count;
@@ -410,7 +493,13 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
@@ -425,7 +514,13 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
@@ -435,13 +530,14 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
}
#define TEMP_FROM_REG(val) ((val) * 1000)
#define TEMP_TO_REG(val) ((val) / 1000)
+#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t show_temp11(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
+ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[nr]));
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
@@ -466,7 +562,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
@@ -481,7 +583,13 @@ static ssize_t set_temp_max_hyst(struct device *dev,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
@@ -502,8 +610,8 @@ static ssize_t show_##thing(struct device *dev, struct device_attribute *attr, \
show_fan(fan);
show_fan(fan_min);
-show_fan(fan_full);
-show_fan(fan_exp);
+show_fan(fan_max);
+show_fan(fan_target);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO|S_IWUSR,
@@ -525,28 +633,28 @@ static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 3);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_full, S_IRUGO, show_fan_full, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, show_fan_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan1_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 0);
+static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_full, S_IRUGO, show_fan_full, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_max, S_IRUGO, show_fan_max, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan2_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 1);
+static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR,
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR,
@@ -568,13 +676,13 @@ static struct attribute *f75375_attributes[] = {
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_full.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_exp.dev_attr.attr,
+ &sensor_dev_attr_fan1_target.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_full.dev_attr.attr,
+ &sensor_dev_attr_fan2_max.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
- &sensor_dev_attr_fan2_exp.dev_attr.attr,
+ &sensor_dev_attr_fan2_target.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
@@ -604,6 +712,51 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
struct f75375s_platform_data *f75375s_pdata)
{
int nr;
+
+ if (!f75375s_pdata) {
+ u8 conf, mode;
+ int nr;
+
+ conf = f75375_read8(client, F75375_REG_CONFIG1);
+ mode = f75375_read8(client, F75375_REG_FAN_TIMER);
+ for (nr = 0; nr < 2; nr++) {
+ if (data->kind == f75387) {
+ bool manu, duty;
+
+ if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
+ duty = ((mode >> F75387_FAN_DUTY_MODE(nr)) & 1);
+ if (manu && duty)
+ /* speed */
+ data->pwm_enable[nr] = 3;
+ else if (!manu && duty)
+ /* automatic */
+ data->pwm_enable[nr] = 2;
+ else
+ /* manual */
+ data->pwm_enable[nr] = 1;
+ } else {
+ if (!(conf & (1 << F75375_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ switch ((mode >> FAN_CTRL_MODE(nr)) & 3) {
+ case 0: /* speed */
+ data->pwm_enable[nr] = 3;
+ break;
+ case 1: /* automatic */
+ data->pwm_enable[nr] = 2;
+ break;
+ default: /* manual */
+ data->pwm_enable[nr] = 1;
+ break;
+ }
+ }
+ }
+ return;
+ }
+
set_pwm_enable_direct(client, 0, f75375s_pdata->pwm_enable[0]);
set_pwm_enable_direct(client, 1, f75375s_pdata->pwm_enable[1]);
for (nr = 0; nr < 2; nr++) {
@@ -624,14 +777,16 @@ static int f75375_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- if (!(data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL)))
+ data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
- if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
+ err = sysfs_create_group(&client->dev.kobj, &f75375_group);
+ if (err)
goto exit_free;
if (data->kind == f75375) {
@@ -653,8 +808,7 @@ static int f75375_probe(struct i2c_client *client,
goto exit_remove;
}
- if (f75375s_pdata != NULL)
- f75375_init(client, data, f75375s_pdata);
+ f75375_init(client, data, f75375s_pdata);
return 0;
@@ -685,10 +839,15 @@ static int f75375_detect(struct i2c_client *client,
vendid = f75375_read16(client, F75375_REG_VENDOR);
chipid = f75375_read16(client, F75375_CHIP_ID);
- if (chipid == 0x0306 && vendid == 0x1934)
+ if (vendid != 0x1934)
+ return -ENODEV;
+
+ if (chipid == 0x0306)
name = "f75375";
- else if (chipid == 0x0204 && vendid == 0x1934)
+ else if (chipid == 0x0204)
name = "f75373";
+ else if (chipid == 0x0410)
+ name = "f75387";
else
return -ENODEV;
@@ -711,7 +870,7 @@ static void __exit sensors_f75375_exit(void)
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("F75373/F75375 hardware monitoring driver");
+MODULE_DESCRIPTION("F75373/F75375/F75387 hardware monitoring driver");
module_init(sensors_f75375_init);
module_exit(sensors_f75375_exit);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 1d6a6fa31fb..781277ddbaa 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -166,7 +166,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
struct g760a_data *data = g760a_update_client(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743a..2ce8c44a0e0 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -224,7 +224,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int speed_index;
int ret = count;
- if (strict_strtoul(buf, 10, &pwm) || pwm > 255)
+ if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
mutex_lock(&fan_data->lock);
@@ -257,7 +257,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
if (fan_data->pwm_enable == val)
@@ -314,7 +314,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
unsigned long rpm;
int ret = count;
- if (strict_strtoul(buf, 10, &rpm))
+ if (kstrtoul(buf, 10, &rpm))
return -EINVAL;
mutex_lock(&fan_data->lock);
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 6a967d7dbde..cc2981f749a 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -904,7 +904,7 @@ static ssize_t aem_set_power_period(struct device *dev,
unsigned long temp;
int res;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d912649fac5..38c0b87676d 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -444,7 +444,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -463,7 +463,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -539,7 +539,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -557,7 +557,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -604,7 +604,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
@@ -718,7 +718,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -751,7 +751,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
int min;
u8 old;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -820,7 +820,7 @@ static ssize_t set_pwm_enable(struct device *dev,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 2)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
return -EINVAL;
/* Check trip points before switching to automatic mode */
@@ -866,7 +866,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -900,7 +900,7 @@ static ssize_t set_pwm_freq(struct device *dev,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
/* Search for the nearest available frequency */
@@ -949,7 +949,7 @@ static ssize_t set_pwm_temp_map(struct device *dev,
return -EINVAL;
}
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
switch (val) {
@@ -1001,7 +1001,7 @@ static ssize_t set_auto_pwm(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1034,7 +1034,7 @@ static ssize_t set_auto_temp(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
+ if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1126,7 +1126,7 @@ static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1180,7 +1180,7 @@ static ssize_t clear_intrusion(struct device *dev, struct device_attribute
long val;
int config;
- if (strict_strtol(buf, 10, &val) < 0 || val != 0)
+ if (kstrtol(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1231,7 +1231,7 @@ static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0
+ if (kstrtol(buf, 10, &val) < 0
|| (val != 0 && val != 1))
return -EINVAL;
@@ -1278,7 +1278,7 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->vrm = val;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 2d3d72805ff..28c09eead36 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -309,7 +309,7 @@ static ssize_t set_##value(struct device *dev, \
struct jc42_data *data = i2c_get_clientdata(client); \
int err, ret = count; \
long val; \
- if (strict_strtol(buf, 10, &val) < 0) \
+ if (kstrtol(buf, 10, &val) < 0) \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
@@ -337,7 +337,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
int err;
int ret = count;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
diff = jc42_temp_from_reg(data->temp_crit) - val;
@@ -413,7 +413,7 @@ static struct attribute *jc42_attributes[] = {
NULL
};
-static mode_t jc42_attribute_mode(struct kobject *kobj,
+static umode_t jc42_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d4340..5253d23361d 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9e64d96620d..9c8093c4b30 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -50,7 +50,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
short value;
- int status = strict_strtol(buf, 10, &temp);
+ int status = kstrtol(buf, 10, &temp);
if (status < 0)
return status;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 1888dd0fc05..b3311b1d3d9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -93,6 +93,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
@@ -107,7 +111,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
int error;
- error = strict_strtol(buf, 10, &temp);
+ error = kstrtol(buf, 10, &temp);
if (error)
return error;
@@ -402,6 +406,7 @@ static struct lm75_data *lm75_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
+ struct lm75_data *ret = data;
mutex_lock(&data->update_lock);
@@ -414,19 +419,23 @@ static struct lm75_data *lm75_update_device(struct device *dev)
int status;
status = lm75_read_value(client, LM75_REG_TEMP[i]);
- if (status < 0)
- dev_dbg(&client->dev, "reg %d, err %d\n",
- LM75_REG_TEMP[i], status);
- else
- data->temp[i] = status;
+ if (unlikely(status < 0)) {
+ dev_dbg(dev,
+ "LM75: Failed to read value: reg %d, error %d\n",
+ LM75_REG_TEMP[i], status);
+ ret = ERR_PTR(status);
+ data->valid = 0;
+ goto abort;
+ }
+ data->temp[i] = status;
}
data->last_updated = jiffies;
data->valid = 1;
}
+abort:
mutex_unlock(&data->update_lock);
-
- return data;
+ return ret;
}
/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index e547a3eb4de..89aa9098ba5 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,6 +1,6 @@
/*
lm75.h - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
+ monitoring
Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
This program is free software; you can redistribute it and/or modify
@@ -37,7 +37,7 @@
static inline u16 LM75_TEMP_TO_REG(long temp)
{
int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
- ntemp += (ntemp<0 ? -250 : 250);
+ ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
@@ -47,4 +47,3 @@ static inline int LM75_TEMP_FROM_REG(u16 reg)
guarantee arithmetic shift and preserve the sign */
return ((s16)reg / 128) * 500;
}
-
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 18a0e6c5fe8..0891b38ffec 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -66,19 +66,19 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
these macros are called: arguments may be evaluated more than once.
Fixing this is just not worth it. */
-#define IN_TO_REG(val) (SENSORS_LIMIT(((val)+5)/10,0,255))
-#define IN_FROM_REG(val) ((val)*10)
+#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254);
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
-#define FAN_FROM_REG(val,div) ((val)==0?-1:\
- (val)==255?0:1350000/((div)*(val)))
+#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : 1350000/((div) * (val)))
static inline long TEMP_FROM_REG(u16 temp)
{
@@ -93,10 +93,11 @@ static inline long TEMP_FROM_REG(u16 temp)
return res / 10;
}
-#define TEMP_LIMIT_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
+ (val) - 0x100 : (val)) * 1000)
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val)<0?\
- ((val)-500)/1000:((val)+500)/1000,0,255)
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
+ ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
@@ -164,7 +165,8 @@ static struct i2c_driver lm80_driver = {
*/
#define show_in(suffix, value) \
-static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -175,14 +177,14 @@ show_in(max, in_max)
show_in(input, in)
#define set_in(suffix, value, reg) \
-static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtol(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock);\
data->value[nr] = IN_TO_REG(val); \
lm80_write_value(client, reg(nr), data->value[nr]); \
@@ -193,7 +195,8 @@ set_in(min, in_min, LM80_REG_IN_MIN)
set_in(max, in_max, LM80_REG_IN_MAX)
#define show_fan(suffix, value) \
-static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_fan_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -245,10 +248,18 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
- case 1: data->fan_div[nr] = 0; break;
- case 2: data->fan_div[nr] = 1; break;
- case 4: data->fan_div[nr] = 2; break;
- case 8: data->fan_div[nr] = 3; break;
+ case 1:
+ data->fan_div[nr] = 0;
+ break;
+ case 2:
+ data->fan_div[nr] = 1;
+ break;
+ case 4:
+ data->fan_div[nr] = 2;
+ break;
+ case 8:
+ data->fan_div[nr] = 3;
+ break;
default:
dev_err(&client->dev, "fan_div value %ld not "
"supported. Choose one of 1, 2, 4 or 8!\n", val);
@@ -268,14 +279,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_temp_input1(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp));
}
#define show_temp(suffix, value) \
-static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct lm80_data *data = lm80_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \
@@ -286,13 +299,13 @@ show_temp(os_max, temp_os_max);
show_temp(os_hyst, temp_os_hyst);
#define set_temp(suffix, value, reg) \
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtoul(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock); \
data->value = TEMP_LIMIT_TO_REG(val); \
lm80_write_value(client, reg, data->value); \
@@ -366,13 +379,13 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, set_fan_div, 1);
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max,
- set_temp_hot_max);
+ set_temp_hot_max);
static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst,
- set_temp_hot_hyst);
+ set_temp_hot_hyst);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max,
- set_temp_os_max);
+ set_temp_os_max);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst,
- set_temp_os_hyst);
+ set_temp_os_hyst);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -459,7 +472,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
- return -ENODEV;
+ return -ENODEV;
}
strlcpy(info->type, "lm80", I2C_NAME_SIZE);
@@ -490,7 +503,8 @@ static int lm80_probe(struct i2c_client *client,
data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group)))
+ err = sysfs_create_group(&client->dev.kobj, &lm80_group);
+ if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 615bc4f4e53..bdfd675488a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -730,7 +730,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -798,7 +798,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -859,7 +859,7 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
int err;
int temp;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -912,7 +912,7 @@ static ssize_t set_update_interval(struct device *dev,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1080,7 +1080,7 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 513901d592a..70bca671e08 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -169,7 +169,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
int shift;
u8 mask = to_sensor_dev_attr(attr)->index;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -216,7 +216,7 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val < -128000)
return -EINVAL;
@@ -254,7 +254,7 @@ static ssize_t set_max(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val >= 256000)
return -EINVAL;
@@ -290,7 +290,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->interval = val * HZ / 1000;
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index dce9e68241e..5e5fc1b0ace 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -254,7 +254,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
int index = to_sensor_dev_attr(attr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -279,7 +279,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -316,7 +316,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -363,7 +363,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 4b50601027d..ce5235560f0 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -85,6 +85,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
"Failed to read ADC value: error %d\n",
val);
ret = ERR_PTR(val);
+ data->valid = 0;
goto abort;
}
data->regs[i] = val;
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index c97b78ef911..84ef3a89870 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -6,7 +6,7 @@
* Copyright (C) 2004-2005 Richard Purdie
*
* Copyright (C) 2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 385886a4f22..f8e323ac6cb 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -230,7 +230,7 @@ static ssize_t max16065_set_limit(struct device *dev,
int err;
int limit;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (unlikely(err < 0))
return err;
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 20d1b2ddffb..6914195cfd3 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -335,10 +335,10 @@ static struct attribute *max1668_attribute_unique[] = {
NULL
};
-static mode_t max1668_attribute_mode(struct kobject *kobj,
+static umode_t max1668_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
- int ret = S_IRUGO;
+ umode_t ret = S_IRUGO;
if (read_only)
return ret;
if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f20d9978ee7..e10a092c603 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -208,7 +208,7 @@ static ssize_t set_temp_max(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_temp_crit(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -275,7 +275,7 @@ static ssize_t set_temp_emergency(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -308,7 +308,7 @@ static ssize_t set_pwm(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index e855d3b0bd1..209e8a526eb 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -234,7 +234,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct max6642_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index ece3aafa54b..2fc034aeca0 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -464,7 +464,7 @@ static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
-static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dce..9b382ec2c3b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 8da2181630b..cb35461d52d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -418,7 +418,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
unsigned long val;
int iobase = data->address[LD_FAN];
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->lock);
@@ -572,7 +572,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
int nr = to_sensor_dev_attr(devattr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 2)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 2)
return -EINVAL;
/* Can't go to automatic mode if it isn't configured */
if (val == 2 && !(data->pwm_auto_ok & (1 << nr)))
@@ -604,7 +604,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
int iobase = data->address[LD_FAN];
u8 mode;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 0xff)
return -EINVAL;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 4b26f51920b..cfec923f42b 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,8 +19,8 @@ config SENSORS_PMBUS
default y
help
If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
- BMR453, BMR454, NCP4200, and NCP4208.
+ PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
+ NCP4200, and NCP4208.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -113,8 +113,9 @@ config SENSORS_ZL6100
default n
help
If you say yes here you get hardware monitoring support for Intersil
- ZL2004, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105 Digital
- DC/DC Controllers.
+ ZL2004, ZL2005, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105
+ Digital DC/DC Controllers, as well as for Ericsson BMR450, BMR451,
+ BMR462, BMR463, and BMR464.
This driver can also be built as a module. If so, the module will
be called zl6100.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a4d9d502..81c7c2ead6f 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -170,35 +170,71 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static const struct i2c_device_id adm1275_id[] = {
+ { "adm1275", adm1275 },
+ { "adm1276", adm1276 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adm1275_id);
+
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
struct pmbus_driver_info *info;
struct adm1275_data *data;
+ const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
- data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+ dev_err(&client->dev, "Unsupported Manufacturer ID\n");
+ return -ENODEV;
+ }
- config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0) {
- ret = config;
- goto err_mem;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ for (mid = adm1275_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
}
+ if (id->driver_data != mid->driver_data)
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ id->name, mid->name);
+
+ config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ if (config < 0)
+ return config;
+
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
- if (device_config < 0) {
- ret = device_config;
- goto err_mem;
- }
+ if (device_config < 0)
+ return device_config;
+
+ data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = mid->driver_data;
- data->id = id->driver_data;
info = &data->info;
info->pages = 1;
@@ -233,7 +269,7 @@ static int adm1275_probe(struct i2c_client *client,
if (device_config & ADM1275_IOUT_WARN2_SELECT)
data->have_oc_fault = true;
- switch (id->driver_data) {
+ switch (data->id) {
case adm1275:
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
@@ -281,13 +317,6 @@ static int adm1275_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id adm1275_id[] = {
- { "adm1275", adm1275 },
- { "adm1276", adm1276 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adm1275_id);
-
static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 995e873197e..18a385e753d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -200,8 +200,6 @@ static int pmbus_remove(struct i2c_client *client)
*/
static const struct i2c_device_id pmbus_id[] = {
{"adp4000", 1},
- {"bmr450", 1},
- {"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
{"ncp4200", 1},
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 2bc980006f8..48c7b4a716a 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -28,7 +28,7 @@
#include <linux/delay.h>
#include "pmbus.h"
-enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
struct zl6100_data {
int id;
@@ -38,8 +38,11 @@ struct zl6100_data {
#define to_zl6100_data(x) container_of(x, struct zl6100_data, info)
+#define ZL6100_MFR_CONFIG 0xd0
#define ZL6100_DEVICE_ID 0xe4
+#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
@@ -65,6 +68,19 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
if (page || reg >= PMBUS_VIRT_BASE)
return -ENXIO;
+ if (data->id == zl2005) {
+ /*
+ * Limit register detection is not reliable on ZL2005.
+ * Make sure registers are not erroneously detected.
+ */
+ switch (reg) {
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ return -ENXIO;
+ }
+ }
+
zl6100_wait(data);
ret = pmbus_read_word_data(client, page, reg);
data->access = ktime_get();
@@ -122,7 +138,13 @@ static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
}
static const struct i2c_device_id zl6100_id[] = {
+ {"bmr450", zl2005},
+ {"bmr451", zl2005},
+ {"bmr462", zl2008},
+ {"bmr463", zl2008},
+ {"bmr464", zl2008},
{"zl2004", zl2004},
+ {"zl2005", zl2005},
{"zl2006", zl2006},
{"zl2008", zl2008},
{"zl2105", zl2105},
@@ -143,7 +165,7 @@ static int zl6100_probe(struct i2c_client *client,
const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA
+ I2C_FUNC_SMBUS_READ_WORD_DATA
| I2C_FUNC_SMBUS_READ_BLOCK_DATA))
return -ENODEV;
@@ -177,8 +199,9 @@ static int zl6100_probe(struct i2c_client *client,
data->id = mid->driver_data;
/*
- * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+ * ZL2005, ZL2008, ZL2105, and ZL6100 are known to require a wait time
* between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+ * Other chips have not yet been tested.
*
* Only clear the wait time for chips known to be safe. The wait time
* can be cleared later for additional chips if tests show that it
@@ -190,12 +213,9 @@ static int zl6100_probe(struct i2c_client *client,
/*
* Since there was a direct I2C device access above, wait before
* accessing the chip again.
- * Set the timestamp, wait, then set it again. This should provide
- * enough buffer time to be safe.
*/
data->access = ktime_get();
zl6100_wait(data);
- data->access = ktime_get();
info = &data->info;
@@ -203,7 +223,16 @@ static int zl6100_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+ ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+ if (ret < 0)
+ goto err_mem;
+ if (ret & ZL6100_MFR_XTEMP_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
info->read_word_data = zl6100_read_word_data;
info->read_byte_data = zl6100_read_byte_data;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752..f6c26d19f52 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c2..79b6dabe316 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79f..9d5236fb09b 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index fe4104c6b76..6ddeae04905 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -683,7 +683,7 @@ static ssize_t sht15_store_heater(struct device *dev,
long value;
u8 status;
- if (strict_strtol(buf, 10, &value))
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
mutex_lock(&data->read_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 643aa8c9453..c08eee21d76 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -112,7 +112,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
long val;
int status;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
val = SENSORS_LIMIT(val, -256000, 255000);
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ad8d535235c..8b9a77486d5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -334,7 +334,7 @@ static ssize_t store_temp_min(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -361,7 +361,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -388,7 +388,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_crit_temp_to_register(val, data->config);
@@ -413,7 +413,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (data->config & TMP401_CONFIG_RANGE)
@@ -447,7 +447,7 @@ static ssize_t reset_temp_history(struct device *dev,
{
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 1) {
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 0517a8f09d3..c48381f2cd0 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -157,7 +157,7 @@ static ssize_t show_fault(struct device *dev,
return sprintf(buf, "0\n");
}
-static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
+static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b16..0018c7dd009 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dc..b9a87e89bab 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 93f5fc7d605..0e0af044522 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -937,7 +937,7 @@ store_in_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1054,7 +1054,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
unsigned int reg;
u8 new_div;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1199,7 +1199,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
int err; \
long val; \
- err = strict_strtol(buf, 10, &val); \
+ err = kstrtol(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1324,7 +1324,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1351,7 +1351,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1376,7 +1376,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1430,7 +1430,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1455,7 +1455,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1556,7 +1556,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = SENSORS_LIMIT(val, 1, 255); \
@@ -1595,7 +1595,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
@@ -1702,7 +1702,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
unsigned long val;
u16 reg, mask;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mask = to_sensor_dev_attr_2(attr)->nr;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8c2844e5691..6e5d0ae594b 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -711,7 +711,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -756,7 +756,7 @@ static ssize_t store_pwmenable(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- int ret = strict_strtoul(buf, 10, &val);
+ int ret = kstrtoul(buf, 10, &val);
if (ret || val < 1 || val > 3)
return -EINVAL;
@@ -819,7 +819,7 @@ static ssize_t store_temp_target(struct device *dev,
unsigned long val;
u8 target_mask;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -863,7 +863,7 @@ static ssize_t store_temp_tolerance(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index f3e7130c4cd..9ded133e43f 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -749,7 +749,7 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 854f9117f1a..3cc6fef2208 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -450,7 +450,7 @@ store_chassis_clear(struct device *dev,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 845232d7f61..3ee398d0e4c 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -730,7 +730,7 @@ store_beep(struct device *dev, struct device_attribute *attr,
u8 beep_bit = 1 << shift;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -755,7 +755,7 @@ store_chassis_clear(struct device *dev,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val != 0)
+ if (kstrtoul(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -801,7 +801,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
val = fan_to_reg(val);
@@ -863,7 +863,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -924,7 +924,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val < 1 || val > 2)
return -EINVAL;
@@ -1021,7 +1021,7 @@ store_temp_src(struct device *dev, struct device_attribute *attr,
unsigned long channel;
u8 val = index / 2;
- if (strict_strtoul(buf, 10, &channel) < 0 ||
+ if (kstrtoul(buf, 10, &channel) < 0 ||
channel < 1 || channel > 14)
return -EINVAL;
@@ -1088,7 +1088,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long tmp;
- if (strict_strtoul(buf, 10, &tmp) < 0)
+ if (kstrtoul(buf, 10, &tmp) < 0)
return -EINVAL;
switch (nr) {
@@ -1149,7 +1149,7 @@ store_fanin(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1198,7 +1198,7 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1257,7 +1257,7 @@ store_sf4_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1293,7 +1293,7 @@ store_sf4_temp(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1333,7 +1333,7 @@ store_temp(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1394,7 +1394,7 @@ store_dts_ext(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1436,7 +1436,7 @@ store_temp_mode(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if ((val != 4) && (val != 3))
return -EINVAL;
@@ -1512,7 +1512,7 @@ store_in(struct device *dev, struct device_attribute *attr,
u8 tmp;
u8 lsb_idx;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val = in_to_reg(index, val);
@@ -1569,7 +1569,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a47..9b598ed2602 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca8..3ff67edbdc4 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2d3657ab125..5244c4724df 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -137,6 +138,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
@@ -144,6 +146,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
return 0;
@@ -187,6 +190,14 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_i2c_of_match[] = {
+ { .compatible = "snps,designware-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -195,6 +206,7 @@ static struct platform_driver dw_i2c_driver = {
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dw_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 76366716a85..7eb19a5222f 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -515,20 +515,7 @@ static struct usb_driver diolan_u2c_driver = {
.id_table = diolan_u2c_table,
};
-static int __init diolan_u2c_init(void)
-{
- /* register this driver with the USB subsystem */
- return usb_register(&diolan_u2c_driver);
-}
-
-static void __exit diolan_u2c_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&diolan_u2c_driver);
-}
-
-module_init(diolan_u2c_init);
-module_exit(diolan_u2c_exit);
+module_usb_driver(diolan_u2c_driver);
MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
MODULE_DESCRIPTION(DRIVER_NAME " driver");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aea..18936ac9d51 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
+ pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc..03b61577888 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446..fa23faa20f0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index fac67394084..93709fbe30e 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -276,8 +276,6 @@ static int puv3_i2c_resume(struct platform_device *dev)
#define puv3_i2c_resume NULL
#endif
-MODULE_ALIAS("platform:puv3_i2c");
-
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
.remove = __devexit_p(puv3_i2c_remove),
@@ -289,18 +287,8 @@ static struct platform_driver puv3_i2c_driver = {
}
};
-static int __init puv3_i2c_init(void)
-{
- return platform_driver_register(&puv3_i2c_driver);
-}
-
-static void __exit puv3_i2c_exit(void)
-{
- platform_driver_unregister(&puv3_i2c_driver);
-}
-
-module_init(puv3_i2c_init);
-module_exit(puv3_i2c_exit);
+module_platform_driver(puv3_i2c_driver);
MODULE_DESCRIPTION("PKUnity v3 I2C driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:puv3_i2c");
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a0..4c171808168 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 46b6500c547..6381604696d 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -558,7 +558,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
.functionality = tegra_i2c_func,
};
-static int tegra_i2c_probe(struct platform_device *pdev)
+static int __devinit tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -636,7 +636,10 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_clk_rate = be32_to_cpup(prop);
}
- if (pdev->id == 3)
+ if (pdev->dev.of_node)
+ i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
+ "nvidia,tegra20-i2c-dvc");
+ else if (pdev->id == 3)
i2c_dev->is_dvc = 1;
init_completion(&i2c_dev->msg_complete);
@@ -690,7 +693,7 @@ err_iounmap:
return ret;
}
-static int tegra_i2c_remove(struct platform_device *pdev)
+static int __devexit tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -742,6 +745,7 @@ static int tegra_i2c_resume(struct platform_device *pdev)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
{ .compatible = "nvidia,tegra20-i2c", },
+ { .compatible = "nvidia,tegra20-i2c-dvc", },
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index d03b04002f0..f07307ff360 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -262,20 +262,7 @@ static struct usb_driver i2c_tiny_usb_driver = {
.id_table = i2c_tiny_usb_table,
};
-static int __init usb_i2c_tiny_usb_init(void)
-{
- /* register this driver with the USB subsystem */
- return usb_register(&i2c_tiny_usb_driver);
-}
-
-static void __exit usb_i2c_tiny_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&i2c_tiny_usb_driver);
-}
-
-module_init(usb_i2c_tiny_usb_init);
-module_exit(usb_i2c_tiny_usb_exit);
+module_usb_driver(i2c_tiny_usb_driver);
/* ----- end of usb layer ------------------------------------------------ */
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4bb68f35caf..ac083a28ae0 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -426,7 +426,7 @@ static void xiic_process(struct xiic_i2c *i2c)
xiic_wakeup(i2c, STATE_ERROR);
} else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
- /* Transmit register/FIFO is empty or ˝ empty */
+ /* Transmit register/FIFO is empty or ½ empty */
clr = pend &
(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 6dede8f366c..41d41552947 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -314,7 +314,7 @@ static int __init at91_ide_probe(struct platform_device *pdev)
apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
/* with GPIO interrupt we have to do quirks in handler */
- if (board->irq_pin >= PIN_BASE)
+ if (gpio_is_valid(board->irq_pin))
host->irq_handler = at91_irq_handler;
host->ports[0]->select_data = board->chipselect;
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index eb0e2ccc79a..73d45315940 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 127;
dev->tx_queue_len = 10;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd7..1612cfd50f3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+ struct neighbour *n;
+ int ret;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ if (!n || !(n->nud_state & NUD_VALID)) {
+ if (n)
+ neigh_event_send(n, NULL);
+ ret = -ENODATA;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, n->ha);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
- struct neighbour *neigh;
struct flowi4 fl4;
int ret;
@@ -214,18 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
goto put;
}
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
- ret = -ENODATA;
- if (neigh)
- goto release;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
- neigh_release(neigh);
+ ret = dst_fetch_ha(&rt->dst, addr);
put:
ip_rt_put(rt);
out:
@@ -238,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct rdma_dev_addr *addr)
{
struct flowi6 fl6;
- struct neighbour *neigh;
struct dst_entry *dst;
int ret;
memset(&fl6, 0, sizeof fl6);
- ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -258,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
src_in->sin6_family = AF_INET6;
- ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+ src_in->sin6_addr = fl6.saddr;
}
if (dst->dev->flags & IFF_LOOPBACK) {
@@ -274,15 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
- neigh = dst_get_neighbour(dst);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- if (neigh)
- neigh_event_send(neigh, NULL);
- ret = -ENODATA;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ ret = dst_fetch_ha(dst, addr);
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8b72f39202f..c889aaef341 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3659,7 +3659,7 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
-static char *cm_devnode(struct device *dev, mode_t *mode)
+static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 505db2a59e7..7da9b210234 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -799,6 +799,7 @@ struct cm_apr_msg {
u8 info_length;
u8 ap_status;
+ __be16 rsvd;
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af..e3e470fecaa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1110,7 +1110,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
} else {
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr);
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
if (cma_zero_addr(src)) {
dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ ((struct sockaddr_in *)src)->sin_addr =
+ ((struct sockaddr_in *)dst)->sin_addr;
} else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ ((struct sockaddr_in6 *)src)->sin6_addr =
+ ((struct sockaddr_in6 *)dst)->sin6_addr;
}
}
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
@@ -2920,7 +2926,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- multicast->rec.mlid);
+ be16_to_cpu(multicast->rec.mlid));
mutex_unlock(&id_priv->qp_mutex);
memset(&event, 0, sizeof event);
@@ -3181,7 +3187,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
if (id->qp)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
- mc->multicast.ib->rec.mlid);
+ be16_to_cpu(mc->multicast.ib->rec.mlid));
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b8a0b4a7811..06f08713f48 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -106,9 +106,6 @@ enum {
IB_UCM_MAX_DEVICES = 32
};
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
-
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
static void ib_ucm_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 07db22997e9..f0d588f8859 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1175,7 +1175,7 @@ static void ib_umad_remove_one(struct ib_device *device)
kref_put(&umad_dev->ref, ib_umad_release_dev);
}
-static char *umad_devnode(struct device *dev, mode_t *mode)
+static char *umad_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 254f1649c73..b930da4c0c6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -241,11 +241,24 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
+static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
+{
+ struct ib_uobject *uobj;
+
+ uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
+ return uobj ? uobj->object : NULL;
+}
+
static void put_qp_read(struct ib_qp *qp)
{
put_uobj_read(qp->uobject);
}
+static void put_qp_write(struct ib_qp *qp)
+{
+ put_uobj_write(qp->uobject);
+}
+
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
@@ -2375,7 +2388,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2404,7 +2417,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
kfree(mcast);
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
@@ -2422,7 +2435,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2441,14 +2454,14 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
}
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
-int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_uverbs_create_xsrq *cmd,
- struct ib_udata *udata)
+static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_uverbs_create_xsrq *cmd,
+ struct ib_udata *udata)
{
struct ib_uverbs_create_srq_resp resp;
struct ib_usrq_object *obj;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 87963674637..604556d73d2 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -846,7 +846,7 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kfree(uverbs_dev);
}
-static char *uverbs_devnode(struct device *dev, mode_t *mode)
+static char *uverbs_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e60..740dcc065cf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *child_ep, *parent_ep = ctx;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1375,8 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ l2t = t3_l2t_get(tdev, dst, NULL);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1887,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct neighbour *neigh;
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
@@ -1945,11 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
-
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c55..0668bb3472d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1554,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
return;
}
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+ struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+ struct neighbour *n;
+ int err, step;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ err = -ENODEV;
+ if (!n)
+ goto out;
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ struct net_device *pdev;
+
+ pdev = ip_dev_find(&init_net, peer_ip);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, n->dev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(n->dev);
+ ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(n->dev) * step];
+
+ if (clear_mpa_v1) {
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
+ }
+ }
+ err = 0;
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep, *parent_ep;
@@ -1561,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
- struct l2t_entry *l2t;
struct rtable *rt;
__be32 local_ip, peer_ip;
__be16 local_port, peer_port;
- struct net_device *pdev;
- u32 tx_chan, smac_idx;
- u16 rss_qid;
- u32 mtu;
- int step;
- int txq_idx, ctrlq_idx;
+ int err;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1594,47 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- if (neigh->dev->flags & IFF_LOOPBACK) {
- pdev = ip_dev_find(&init_net, peer_ip);
- BUG_ON(!pdev);
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
- mtu = pdev->mtu;
- tx_chan = cxgb4_port_chan(pdev);
- smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(pdev) * step;
- ctrlq_idx = cxgb4_port_idx(pdev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
- mtu = dst_mtu(dst);
- tx_chan = cxgb4_port_chan(neigh->dev);
- smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!l2t) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
dst_release(dst);
goto reject;
}
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ err = import_ep(child_ep, peer_ip, dst, dev, false);
+ if (err) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
- cxgb4_l2t_release(l2t);
dst_release(dst);
+ kfree(child_ep);
goto reject;
}
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1647,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
- child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- child_ep->tx_chan = tx_chan;
- child_ep->smac_idx = smac_idx;
- child_ep->rss_qid = rss_qid;
- child_ep->mtu = mtu;
- child_ep->txq_idx = txq_idx;
- child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- tx_chan, smac_idx, rss_qid);
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1788,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
static int c4iw_reconnect(struct c4iw_ep *ep)
{
- int err = 0;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
@@ -1820,45 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- ep->com.cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!ep->l2t) {
+ err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, false);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
@@ -2234,13 +2222,10 @@ err:
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2301,47 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- ep->retry_with_mpa_v1 = 0;
- ep->tried_with_mpa_v1 = 0;
- }
- if (!ep->l2t) {
+ err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, true);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e..0f1607c8325 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 31ae1b108ae..b7d4216db3c 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -46,7 +46,7 @@
static struct super_block *ipath_super;
static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -61,7 +61,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -76,7 +76,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 4b8f9c49397..a251becdaa9 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -126,7 +126,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e8df155bc3b..5ecf38d9726 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -715,13 +715,17 @@ repoll:
}
wc->slid = be16_to_cpu(cqe->rlid);
- wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
+ if (rdma_port_get_link_layer(wc->qp->device,
+ (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
+ else
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f36da994a85..95c94d8f025 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa..7b445df6a66 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
- return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
- MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
}
err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
memcpy(gids, gw->gids, sizeof gw->gids);
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
printk(KERN_WARNING "set port command failed\n");
else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_ib_version);
+ if (mlx4_is_mfunc(dev)) {
+ printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+ return NULL;
+ }
+
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
@@ -1244,7 +1250,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a16f0c8e6f3..aa2aefa4236 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -962,7 +962,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (is_eth) {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+ ((port - 1) << 6) | ((ah->sl & 7) << 3);
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
@@ -1437,7 +1437,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
u16 pcp;
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
- pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+ pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a3..425065b36b8 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ rcu_read_lock();
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (neigh) {
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
neigh->ha, ETH_ALEN)) {
/* Mac address same as in nes_arp_table */
- neigh_release(neigh);
ip_rt_put(rt);
return rc;
}
@@ -1373,13 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
NES_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
}
- neigh_release(neigh);
}
-
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
ip_rt_put(rt);
return rc;
}
@@ -2836,6 +2834,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
issue_disconn = 1;
issue_close = 1;
nesqp->cm_id = NULL;
+ del_timer(&nesqp->terminate_timer);
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
issue_flush = 1;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 7c0ff19ce38..055f4b545df 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1529,7 +1529,7 @@ int nes_init_phy(struct nes_device *nesdev)
} else {
/* setup 10G MDIO operation */
tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
+ tx_config |= 0x1D;
}
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -3619,10 +3619,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- if (nesqp->term_flags) {
- nes_terminate_done(nesqp, 0);
- return;
- }
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index c00d2f3f896..4b3fa711a24 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_pauseparam = nes_netdev_set_pauseparam,
};
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index cd10968bfa2..8b4c2ff5488 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -56,7 +56,7 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
u32 mh_detected;
u32 mh_pauses_sent;
-u32 nes_set_pau(struct nes_device *nesdev)
+static u32 nes_set_pau(struct nes_device *nesdev)
{
u32 ret = 0;
u32 counter;
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index 21f374aa063..a5356cb4252 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -97,7 +97,7 @@ struct qib_chippport_specific {
u64 iblnkerrsnap;
u64 ibcctrl; /* kr_ibcctrl shadow */
u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- u64 chase_end;
+ unsigned long chase_end;
u32 last_delay_mult;
};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index c90a55f4120..6fc9365ba8a 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -371,9 +371,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
lnh == QIB_LRH_GRH,
qp,
be32_to_cpu(ohdr->bth[0]));
- if (ruc_res) {
+ if (ruc_res)
goto unlock;
- }
/* Only deal with RDMA Writes for now */
if (opcode <
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b4..a7403248d83 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index df7fa251dcd..05e0f17c5b4 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -47,7 +47,7 @@ static struct super_block *qib_super;
#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -67,7 +67,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mtime = inode->i_atime;
inode->i_ctime = inode->i_atime;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -82,7 +82,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 781a802a321..4f18e2d332d 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 439d3c503cd..3c722f79d6f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1051,7 +1051,7 @@ static void reenable_7220_chase(unsigned long opaque)
static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
@@ -1066,9 +1066,9 @@ static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
case IB_7220_LT_STATE_CFGWAITRMT:
case IB_7220_LT_STATE_TXREVLANES:
case IB_7220_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end)) {
+ time_after(tnow, ppd->cpspec->chase_end)) {
ppd->cpspec->chase_end = 0;
qib_set_ib_7220_lstate(ppd,
QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2725,9 +2725,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95d..41e92089e41 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -615,8 +615,8 @@ struct qib_chippport_specific {
u64 ibmalfsnap;
u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- u64 qdr_dfe_time;
- u64 chase_end;
+ unsigned long qdr_dfe_time;
+ unsigned long chase_end;
u32 autoneg_tries;
u32 recovery_init;
u32 qdr_dfe_on;
@@ -1672,7 +1672,8 @@ static void reenable_chase(unsigned long opaque)
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
}
-static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
+ u8 ibclt)
{
ppd->cpspec->chase_end = 0;
@@ -1688,7 +1689,7 @@ static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
@@ -1703,9 +1704,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
case IB_7322_LT_STATE_CFGWAITRMT:
case IB_7322_LT_STATE_TXREVLANES:
case IB_7322_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end))
+ time_after(tnow, ppd->cpspec->chase_end))
disable_chase(ppd, tnow, ibclt);
else if (!ppd->cpspec->chase_end)
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
@@ -2307,19 +2308,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2378,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -2714,7 +2715,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
pins >>= SYM_LSB(EXTStatus, GPIOIn);
if (!(pins & mask)) {
++handled;
- qd->t_insert = get_jiffies_64();
+ qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
}
@@ -3602,7 +3603,7 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
if (qib_rcvhdrcnt)
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
else
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
dd->num_pports > 1 ? 1024U : 2048U);
}
@@ -4082,10 +4083,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
*/
if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
adjust_rcv_timeout(rcd, npkts);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@ -4794,7 +4797,7 @@ static void qib_get_7322_faststats(unsigned long opaque)
(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE)) &&
ppd->cpspec->qdr_dfe_time &&
- time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
ppd->cpspec->qdr_dfe_on = 0;
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
@@ -5240,8 +5243,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* schedule the qsfp refresh which should turn the link
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ qd->t_insert = jiffies;
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
@@ -5592,7 +5595,7 @@ static void qsfp_7322_event(struct work_struct *work)
{
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
- u64 pwrup;
+ unsigned long pwrup;
unsigned long flags;
int ret;
u32 le2;
@@ -5620,8 +5623,7 @@ static void qsfp_7322_event(struct work_struct *work)
* to insertion.
*/
while (1) {
- u64 now = get_jiffies_64();
- if (time_after64(now, pwrup))
+ if (time_is_before_jiffies(pwrup))
break;
msleep(20);
}
@@ -7506,7 +7508,7 @@ static int serdes_7322_init_old(struct qib_pportdata *ppd)
static int serdes_7322_init_new(struct qib_pportdata *ppd)
{
- u64 tstart;
+ unsigned long tend;
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
@@ -7611,10 +7613,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
msleep(20);
/* Start Calibration */
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tstart = get_jiffies_64();
- while (chan_done &&
- !time_after64(get_jiffies_64(),
- tstart + msecs_to_jiffies(500))) {
+ tend = jiffies + msecs_to_jiffies(500);
+ while (chan_done && !time_is_before_jiffies(tend)) {
msleep(20);
for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 58b0f8ad4a2..cf0cd30adc8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1015,7 +1015,7 @@ static int __devinit qib_init_one(struct pci_dev *,
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
-static const struct pci_device_id qib_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 97a8bdf68e6..f695061d688 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -560,9 +560,9 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
* BIOS may not set PCIe bus-utilization parameters for best performance.
* Check and optionally adjust them to maximize our throughput.
*/
-static int qib_pcie_caps;
+static int qib_pcie_caps = 0x51;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f..fa71b1e666c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 46002a9417c..91908f533a2 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -177,7 +177,7 @@ struct qib_qsfp_data {
struct qib_pportdata *ppd;
struct work_struct work;
struct qib_qsfp_cache cache;
- u64 t_insert;
+ unsigned long t_insert;
u8 modpresent;
};
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index de1a4b2f33c..ac065dd6b69 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -300,7 +300,7 @@ bail:
}
static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
+ const char *where)
{
int ret, chn, baduns;
u64 val;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 78fbd56879d..dae51604cfc 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -150,7 +150,7 @@ static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
* For userland compatibility, these offsets must remain fixed.
* They are strings for QIB_STATUS_*
*/
-static const char *qib_status_str[] = {
+static const char * const qib_status_str[] = {
"Initted",
"",
"",
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a894762da46..7b6c3bffa9d 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -913,8 +913,8 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
__raw_writel(last, piobuf);
}
-static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp, int *retp)
+static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
{
struct qib_verbs_txreq *tx;
unsigned long flags;
@@ -926,8 +926,9 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
struct list_head *l = dev->txreq_free.next;
list_del(l);
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- *retp = 0;
} else {
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
qp->s_flags |= QIB_S_WAIT_TX;
list_add_tail(&qp->iowait, &dev->txwait);
}
- tx = NULL;
qp->s_flags &= ~QIB_S_BUSY;
- *retp = -EBUSY;
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ tx = ERR_PTR(-EBUSY);
}
+ return tx;
+}
- spin_unlock(&dev->pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
+static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ /* assume the list non empty */
+ if (likely(!list_empty(&dev->txreq_free))) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ } else {
+ /* call slow path to get the extra lock */
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = __get_txreq(dev, qp);
+ }
return tx;
}
@@ -1122,9 +1142,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
goto bail;
}
- tx = get_txreq(dev, qp, &ret);
- if (!tx)
- goto bail;
+ tx = get_txreq(dev, qp);
+ if (IS_ERR(tx))
+ goto bail_tx;
control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@ unaligned:
ibp->n_unaligned++;
bail:
return ret;
+bail_tx:
+ ret = PTR_ERR(tx);
+ goto bail;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997..4115be54ba3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b600023..3514ca05dee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,15 +555,14 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
- struct neighbour *n;
unsigned long flags;
- n = dst_get_neighbour(skb_dst(skb));
neigh = ipoib_neigh_alloc(n, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
@@ -636,16 +635,14 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n;
/* Look up path record for unicasts */
- n = dst_get_neighbour(dst);
if (n->ha[4] != 0xff) {
- neigh_add_path(skb, dev);
+ neigh_add_path(skb, n, dev);
return;
}
@@ -720,13 +717,19 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
- if (likely(skb_dst(skb)))
- n = dst_get_neighbour(skb_dst(skb));
-
+ rcu_read_lock();
+ if (likely(skb_dst(skb))) {
+ n = dst_get_neighbour_noref(skb_dst(skb));
+ if (!n) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+ }
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -748,18 +751,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_del(&neigh->list);
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1218,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a9768635..f7ff9dd66cd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,8 +725,10 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n && !*to_ipoib_neigh(n)) {
struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
skb->dev);
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7e7373a700e..9a43cb07f29 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -638,7 +638,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iser_conn_terminate(ib_conn);
}
-static mode_t iser_attr_is_visible(int param_type, int param)
+static umode_t iser_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf25347b01..76457d50bc3 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -369,7 +369,7 @@ static int evdev_fetch_next_event(struct evdev_client *client,
spin_lock_irq(&client->buffer_lock);
- have_event = client->head != client->tail;
+ have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
@@ -391,14 +391,13 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
if (count < input_event_size())
return -EINVAL;
- if (client->packet_head == client->tail && evdev->exist &&
- (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
-
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail || !evdev->exist);
- if (retval)
- return retval;
+ if (!(file->f_flags & O_NONBLOCK)) {
+ retval = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail ||
+ !evdev->exist);
+ if (retval)
+ return retval;
+ }
if (!evdev->exist)
return -ENODEV;
@@ -412,6 +411,9 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
retval += input_event_size();
}
+ if (retval == 0 && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
return retval;
}
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 7dfe1009fae..7f161d93203 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -84,10 +84,12 @@ static ssize_t input_polldev_set_poll(struct device *dev,
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
struct input_dev *input = polldev->input;
- unsigned long interval;
+ unsigned int interval;
+ int err;
- if (strict_strtoul(buf, 0, &interval))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &interval);
+ if (err)
+ return err;
if (interval < polldev->poll_interval_min)
return -EINVAL;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index da38d97a51b..1f78c957a75 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1624,7 +1624,7 @@ static struct device_type input_dev_type = {
#endif
};
-static char *input_devnode(struct device *dev, mode_t *mode)
+static char *input_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d72887585a1..32bbd4c77b7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1041,18 +1041,7 @@ static struct usb_driver xpad_driver = {
.id_table = xpad_table,
};
-static int __init usb_xpad_init(void)
-{
- return usb_register(&xpad_driver);
-}
-
-static void __exit usb_xpad_exit(void)
-{
- usb_deregister(&xpad_driver);
-}
-
-module_init(usb_xpad_init);
-module_exit(usb_xpad_exit);
+module_usb_driver(xpad_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 615c21f2a55..cdc385b2cf7 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -221,6 +221,22 @@ config KEYBOARD_TCA6416
To compile this driver as a module, choose M here: the
module will be called tca6416_keypad.
+config KEYBOARD_TCA8418
+ tristate "TCA8418 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA8418 keypad decoder.
+
+ Say Y here if your device has keys connected to
+ TCA8418 keypad decoder.
+
+ If enabled the complete TCA8418 device will be managed through
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tca8418_keypad.
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -425,9 +441,10 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on SAMSUNG_DEV_KEYPAD
+ depends on HAVE_CLK
help
- Say Y here if you want to use the Samsung keypad.
+ Say Y here if you want to use the keypad on your Samsung mobile
+ device.
To compile this driver as a module, choose M here: the
module will be called samsung-keypad.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd476f..df7061f1291 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
+obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 3db8006dac3..e9e8674dfda 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -202,18 +202,7 @@ static struct platform_driver adp5520_keys_driver = {
.probe = adp5520_keys_probe,
.remove = __devexit_p(adp5520_keys_remove),
};
-
-static int __init adp5520_keys_init(void)
-{
- return platform_driver_register(&adp5520_keys_driver);
-}
-module_init(adp5520_keys_init);
-
-static void __exit adp5520_keys_exit(void)
-{
- platform_driver_unregister(&adp5520_keys_driver);
-}
-module_exit(adp5520_keys_exit);
+module_platform_driver(adp5520_keys_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Keys ADP5520 Driver");
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 79172af164f..6df5f6aa790 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -259,19 +259,6 @@ static struct platform_driver amikbd_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init amikbd_init(void)
-{
- return platform_driver_probe(&amikbd_driver, amikbd_probe);
-}
-
-module_init(amikbd_init);
-
-static void __exit amikbd_exit(void)
-{
- platform_driver_unregister(&amikbd_driver);
-}
-
-module_exit(amikbd_exit);
+module_platform_driver(amikbd_driver);
MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 19cfc0cf558..e05a2e7073c 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1305,7 +1305,7 @@ static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_extra;
unsigned char old_set;
@@ -1313,7 +1313,11 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->extra != value) {
@@ -1389,11 +1393,15 @@ static ssize_t atkbd_show_scroll(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_scroll;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->scroll != value) {
@@ -1433,7 +1441,7 @@ static ssize_t atkbd_show_set(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
unsigned char old_set;
bool old_extra;
@@ -1441,7 +1449,11 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || (value != 2 && value != 3))
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 2 && value != 3)
return -EINVAL;
if (atkbd->set != value) {
@@ -1484,14 +1496,18 @@ static ssize_t atkbd_show_softrepeat(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softrepeat, old_softraw;
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softrepeat != value) {
@@ -1534,11 +1550,15 @@ static ssize_t atkbd_show_softraw(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softraw;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softraw != value) {
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 7d989603f87..8eb9116e0a5 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -384,7 +384,7 @@ static int bfin_kpad_resume(struct platform_device *pdev)
# define bfin_kpad_resume NULL
#endif
-struct platform_driver bfin_kpad_device_driver = {
+static struct platform_driver bfin_kpad_device_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -394,19 +394,7 @@ struct platform_driver bfin_kpad_device_driver = {
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
-
-static int __init bfin_kpad_init(void)
-{
- return platform_driver_register(&bfin_kpad_device_driver);
-}
-
-static void __exit bfin_kpad_exit(void)
-{
- platform_driver_unregister(&bfin_kpad_device_driver);
-}
-
-module_init(bfin_kpad_init);
-module_exit(bfin_kpad_exit);
+module_platform_driver(bfin_kpad_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index 9d82b3aeff5..46982524755 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -328,18 +328,7 @@ static struct platform_driver davinci_ks_driver = {
},
.remove = __devexit_p(davinci_ks_remove),
};
-
-static int __init davinci_ks_init(void)
-{
- return platform_driver_probe(&davinci_ks_driver, davinci_ks_probe);
-}
-module_init(davinci_ks_init);
-
-static void __exit davinci_ks_exit(void)
-{
- platform_driver_unregister(&davinci_ks_driver);
-}
-module_exit(davinci_ks_exit);
+module_platform_driver(davinci_ks_driver);
MODULE_AUTHOR("Miguel Aguilar");
MODULE_DESCRIPTION("Texas Instruments DaVinci Key Scan Driver");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 4662c5da801..0ba69f3fcb5 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -390,19 +390,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.suspend = ep93xx_keypad_suspend,
.resume = ep93xx_keypad_resume,
};
-
-static int __init ep93xx_keypad_init(void)
-{
- return platform_driver_register(&ep93xx_keypad_driver);
-}
-
-static void __exit ep93xx_keypad_exit(void)
-{
- platform_driver_unregister(&ep93xx_keypad_driver);
-}
-
-module_init(ep93xx_keypad_init);
-module_exit(ep93xx_keypad_exit);
+module_platform_driver(ep93xx_keypad_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 4c17aff2065..20c8ab17221 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -241,19 +241,7 @@ static struct platform_driver gpio_keys_polled_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init gpio_keys_polled_init(void)
-{
- return platform_driver_register(&gpio_keys_polled_driver);
-}
-
-static void __exit gpio_keys_polled_exit(void)
-{
- platform_driver_unregister(&gpio_keys_polled_driver);
-}
-
-module_init(gpio_keys_polled_init);
-module_exit(gpio_keys_polled_exit);
+module_platform_driver(gpio_keys_polled_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ccebd2d0915..fb87b3bcadb 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -619,19 +619,7 @@ static struct platform_driver imx_keypad_driver = {
.probe = imx_keypad_probe,
.remove = __devexit_p(imx_keypad_remove),
};
-
-static int __init imx_keypad_init(void)
-{
- return platform_driver_register(&imx_keypad_driver);
-}
-
-static void __exit imx_keypad_exit(void)
-{
- platform_driver_unregister(&imx_keypad_driver);
-}
-
-module_init(imx_keypad_init);
-module_exit(imx_keypad_exit);
+module_platform_driver(imx_keypad_driver);
MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
MODULE_DESCRIPTION("IMX Keypad Port Driver");
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 7197c569874..24f3ea01c4d 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -260,19 +260,7 @@ static struct platform_driver jornada680kbd_driver = {
.probe = jornada680kbd_probe,
.remove = __devexit_p(jornada680kbd_remove),
};
-
-static int __init jornada680kbd_init(void)
-{
- return platform_driver_register(&jornada680kbd_driver);
-}
-
-static void __exit jornada680kbd_exit(void)
-{
- platform_driver_unregister(&jornada680kbd_driver);
-}
-
-module_init(jornada680kbd_init);
-module_exit(jornada680kbd_exit);
+module_platform_driver(jornada680kbd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver");
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 0aa6740e60d..eeafc30b207 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -174,16 +174,4 @@ static struct platform_driver jornada720_kbd_driver = {
.probe = jornada720_kbd_probe,
.remove = __devexit_p(jornada720_kbd_remove),
};
-
-static int __init jornada720_kbd_init(void)
-{
- return platform_driver_register(&jornada720_kbd_driver);
-}
-
-static void __exit jornada720_kbd_exit(void)
-{
- platform_driver_unregister(&jornada720_kbd_driver);
-}
-
-module_init(jornada720_kbd_init);
-module_exit(jornada720_kbd_exit);
+module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 82d1dc8badd..21823bfc791 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -545,13 +545,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm8323_pwm *pwm = cdev_to_pwm(led_cdev);
- int ret;
- unsigned long time;
+ int ret, time;
- ret = strict_strtoul(buf, 10, &time);
+ ret = kstrtoint(buf, 10, &time);
/* Numbers only, please. */
if (ret)
- return -EINVAL;
+ return ret;
pwm->fade_time = time;
@@ -613,9 +612,9 @@ static ssize_t lm8323_set_disable(struct device *dev,
{
struct lm8323_chip *lm = dev_get_drvdata(dev);
int ret;
- unsigned long i;
+ unsigned int i;
- ret = strict_strtoul(buf, 10, &i);
+ ret = kstrtouint(buf, 10, &i);
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e2ae657717e..9b223d73de3 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -496,19 +496,7 @@ static struct platform_driver matrix_keypad_driver = {
#endif
},
};
-
-static int __init matrix_keypad_init(void)
-{
- return platform_driver_register(&matrix_keypad_driver);
-}
-
-static void __exit matrix_keypad_exit(void)
-{
- platform_driver_unregister(&matrix_keypad_driver);
-}
-
-module_init(matrix_keypad_init);
-module_exit(matrix_keypad_exit);
+module_platform_driver(matrix_keypad_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index fcdec5e2b29..5a71e55c9c5 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -379,7 +379,7 @@ static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
};
#endif
-struct platform_driver ske_keypad_driver = {
+static struct platform_driver ske_keypad_driver = {
.driver = {
.name = "nmk-ske-keypad",
.owner = THIS_MODULE,
@@ -390,18 +390,7 @@ struct platform_driver ske_keypad_driver = {
.probe = ske_keypad_probe,
.remove = __devexit_p(ske_keypad_remove),
};
-
-static int __init ske_keypad_init(void)
-{
- return platform_driver_probe(&ske_keypad_driver, ske_keypad_probe);
-}
-module_init(ske_keypad_init);
-
-static void __exit ske_keypad_exit(void)
-{
- platform_driver_unregister(&ske_keypad_driver);
-}
-module_exit(ske_keypad_exit);
+module_platform_driver(ske_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>");
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 323bcdfff24..6b630d9d3df 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -473,20 +473,7 @@ static struct platform_driver omap_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap_kp_init(void)
-{
- printk(KERN_INFO "OMAP Keypad Driver\n");
- return platform_driver_register(&omap_kp_driver);
-}
-
-static void __exit omap_kp_exit(void)
-{
- platform_driver_unregister(&omap_kp_driver);
-}
-
-module_init(omap_kp_init);
-module_exit(omap_kp_exit);
+module_platform_driver(omap_kp_driver);
MODULE_AUTHOR("Timo Teräs");
MODULE_DESCRIPTION("OMAP Keypad Driver");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c51a3c4a7fe..d5c5d77f4b8 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -335,18 +335,7 @@ static struct platform_driver omap4_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap4_keypad_init(void)
-{
- return platform_driver_register(&omap4_keypad_driver);
-}
-module_init(omap4_keypad_init);
-
-static void __exit omap4_keypad_exit(void)
-{
- platform_driver_unregister(&omap4_keypad_driver);
-}
-module_exit(omap4_keypad_exit);
+module_platform_driver(omap4_keypad_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP4 Keypad Driver");
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index 1f1a5563f60..abe728c7b88 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -163,18 +163,7 @@ static struct platform_driver opencores_kbd_device_driver = {
.name = "opencores-kbd",
},
};
-
-static int __init opencores_kbd_init(void)
-{
- return platform_driver_register(&opencores_kbd_device_driver);
-}
-module_init(opencores_kbd_init);
-
-static void __exit opencores_kbd_exit(void)
-{
- platform_driver_unregister(&opencores_kbd_device_driver);
-}
-module_exit(opencores_kbd_exit);
+module_platform_driver(opencores_kbd_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>");
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index e7cc51d0fb3..01a1c9f8a38 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -780,18 +780,7 @@ static struct platform_driver pmic8xxx_kp_driver = {
.pm = &pm8xxx_kp_pm_ops,
},
};
-
-static int __init pmic8xxx_kp_init(void)
-{
- return platform_driver_register(&pmic8xxx_kp_driver);
-}
-module_init(pmic8xxx_kp_init);
-
-static void __exit pmic8xxx_kp_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_kp_driver);
-}
-module_exit(pmic8xxx_kp_exit);
+module_platform_driver(pmic8xxx_kp_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8XXX keypad driver");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index eca6ae63de1..29fe1b2be1c 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -602,19 +602,7 @@ static struct platform_driver pxa27x_keypad_driver = {
#endif
},
};
-
-static int __init pxa27x_keypad_init(void)
-{
- return platform_driver_register(&pxa27x_keypad_driver);
-}
-
-static void __exit pxa27x_keypad_exit(void)
-{
- platform_driver_unregister(&pxa27x_keypad_driver);
-}
-
-module_init(pxa27x_keypad_init);
-module_exit(pxa27x_keypad_exit);
+module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 35451bf780c..d7f1134b789 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -195,18 +195,7 @@ static struct platform_driver pxa930_rotary_driver = {
.probe = pxa930_rotary_probe,
.remove = __devexit_p(pxa930_rotary_remove),
};
-
-static int __init pxa930_rotary_init(void)
-{
- return platform_driver_register(&pxa930_rotary_driver);
-}
-module_init(pxa930_rotary_init);
-
-static void __exit pxa930_rotary_exit(void)
-{
- platform_driver_unregister(&pxa930_rotary_driver);
-}
-module_exit(pxa930_rotary_exit);
+module_platform_driver(pxa930_rotary_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller");
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index f689f49e310..17ba7f9f80f 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -20,9 +20,13 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/sched.h>
-#include <plat/keypad.h>
+#include <linux/input/samsung-keypad.h>
#define SAMSUNG_KEYIFCON 0x00
#define SAMSUNG_KEYIFSTSCLR 0x04
@@ -63,36 +67,33 @@ enum samsung_keypad_type {
struct samsung_keypad {
struct input_dev *input_dev;
+ struct platform_device *pdev;
struct clk *clk;
void __iomem *base;
wait_queue_head_t wait;
bool stopped;
+ bool wake_enabled;
int irq;
+ enum samsung_keypad_type type;
unsigned int row_shift;
unsigned int rows;
unsigned int cols;
unsigned int row_state[SAMSUNG_MAX_COLS];
+#ifdef CONFIG_OF
+ int row_gpios[SAMSUNG_MAX_ROWS];
+ int col_gpios[SAMSUNG_MAX_COLS];
+#endif
unsigned short keycodes[];
};
-static int samsung_keypad_is_s5pv210(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- enum samsung_keypad_type type =
- platform_get_device_id(pdev)->driver_data;
-
- return type == KEYPAD_TYPE_S5PV210;
-}
-
static void samsung_keypad_scan(struct samsung_keypad *keypad,
unsigned int *row_state)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int col;
unsigned int val;
for (col = 0; col < keypad->cols; col++) {
- if (samsung_keypad_is_s5pv210(dev)) {
+ if (keypad->type == KEYPAD_TYPE_S5PV210) {
val = S5PV210_KEYIFCOLEN_MASK;
val &= ~(1 << col) << 8;
} else {
@@ -158,6 +159,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
unsigned int val;
bool key_down;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
do {
val = readl(keypad->base + SAMSUNG_KEYIFSTSCLR);
/* Clear interrupt. */
@@ -172,6 +175,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
} while (key_down && !keypad->stopped);
+ pm_runtime_put_sync(&keypad->pdev->dev);
+
return IRQ_HANDLED;
}
@@ -179,6 +184,8 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Tell IRQ thread that it may poll the device. */
keypad->stopped = false;
@@ -191,12 +198,16 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
/* KEYIFCOL reg clear. */
writel(0, keypad->base + SAMSUNG_KEYIFCOL);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static void samsung_keypad_stop(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Signal IRQ thread to stop polling and disable the handler. */
keypad->stopped = true;
wake_up(&keypad->wait);
@@ -217,6 +228,8 @@ static void samsung_keypad_stop(struct samsung_keypad *keypad)
* re-enable the handler.
*/
enable_irq(keypad->irq);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static int samsung_keypad_open(struct input_dev *input_dev)
@@ -235,6 +248,126 @@ static void samsung_keypad_close(struct input_dev *input_dev)
samsung_keypad_stop(keypad);
}
+#ifdef CONFIG_OF
+static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
+ struct device *dev)
+{
+ struct samsung_keypad_platdata *pdata;
+ struct matrix_keymap_data *keymap_data;
+ uint32_t *keymap, num_rows = 0, num_cols = 0;
+ struct device_node *np = dev->of_node, *key_np;
+ unsigned int key_count = 0;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return NULL;
+ }
+
+ of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows);
+ of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols);
+ if (!num_rows || !num_cols) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return NULL;
+ }
+ pdata->rows = num_rows;
+ pdata->cols = num_cols;
+
+ keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL);
+ if (!keymap_data) {
+ dev_err(dev, "could not allocate memory for keymap data\n");
+ return NULL;
+ }
+ pdata->keymap_data = keymap_data;
+
+ for_each_child_of_node(np, key_np)
+ key_count++;
+
+ keymap_data->keymap_size = key_count;
+ keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
+ if (!keymap) {
+ dev_err(dev, "could not allocate memory for keymap\n");
+ return NULL;
+ }
+ keymap_data->keymap = keymap;
+
+ for_each_child_of_node(np, key_np) {
+ u32 row, col, key_code;
+ of_property_read_u32(key_np, "keypad,row", &row);
+ of_property_read_u32(key_np, "keypad,column", &col);
+ of_property_read_u32(key_np, "linux,code", &key_code);
+ *keymap++ = KEY(row, col, key_code);
+ }
+
+ if (of_get_property(np, "linux,input-no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,input-wakeup", NULL))
+ pdata->wakeup = true;
+
+ return pdata;
+}
+
+static void samsung_keypad_parse_dt_gpio(struct device *dev,
+ struct samsung_keypad *keypad)
+{
+ struct device_node *np = dev->of_node;
+ int gpio, ret, row, col;
+
+ for (row = 0; row < keypad->rows; row++) {
+ gpio = of_get_named_gpio(np, "row-gpios", row);
+ keypad->row_gpios[row] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad row[%d]: invalid gpio %d\n",
+ row, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-row");
+ if (ret)
+ dev_err(dev, "keypad row[%d] gpio request failed\n",
+ row);
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ gpio = of_get_named_gpio(np, "col-gpios", col);
+ keypad->col_gpios[col] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad column[%d]: invalid gpio %d\n",
+ col, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-col");
+ if (ret)
+ dev_err(dev, "keypad column[%d] gpio request failed\n",
+ col);
+ }
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < keypad->rows; cnt++)
+ if (gpio_is_valid(keypad->row_gpios[cnt]))
+ gpio_free(keypad->row_gpios[cnt]);
+
+ for (cnt = 0; cnt < keypad->cols; cnt++)
+ if (gpio_is_valid(keypad->col_gpios[cnt]))
+ gpio_free(keypad->col_gpios[cnt]);
+}
+#else
+static
+struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+}
+#endif
+
static int __devinit samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
@@ -246,7 +379,10 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
unsigned int keymap_size;
int error;
- pdata = pdev->dev.platform_data;
+ if (pdev->dev.of_node)
+ pdata = samsung_keypad_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
@@ -298,11 +434,23 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
keypad->input_dev = input_dev;
+ keypad->pdev = pdev;
keypad->row_shift = row_shift;
keypad->rows = pdata->rows;
keypad->cols = pdata->cols;
+ keypad->stopped = true;
init_waitqueue_head(&keypad->wait);
+ if (pdev->dev.of_node) {
+#ifdef CONFIG_OF
+ samsung_keypad_parse_dt_gpio(&pdev->dev, keypad);
+ keypad->type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s5pv210-keypad");
+#endif
+ } else {
+ keypad->type = platform_get_device_id(pdev)->driver_data;
+ }
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
@@ -337,18 +485,29 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+ platform_set_drvdata(pdev, keypad);
+ pm_runtime_enable(&pdev->dev);
+
error = input_register_device(keypad->input_dev);
if (error)
goto err_free_irq;
- device_init_wakeup(&pdev->dev, pdata->wakeup);
- platform_set_drvdata(pdev, keypad);
+ if (pdev->dev.of_node) {
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data);
+ devm_kfree(&pdev->dev, (void *)pdata);
+ }
return 0;
err_free_irq:
free_irq(keypad->irq, keypad);
+ pm_runtime_disable(&pdev->dev);
+ device_init_wakeup(&pdev->dev, 0);
+ platform_set_drvdata(pdev, NULL);
err_put_clk:
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
err_unmap_base:
iounmap(keypad->base);
err_free_mem:
@@ -362,6 +521,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -374,6 +534,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
free_irq(keypad->irq, keypad);
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
iounmap(keypad->base);
kfree(keypad);
@@ -381,11 +542,57 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_RUNTIME
+static int samsung_keypad_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+ int error;
+
+ if (keypad->stopped)
+ return 0;
+
+ /* This may fail on some SoCs due to lack of controller support */
+ error = enable_irq_wake(keypad->irq);
+ if (!error)
+ keypad->wake_enabled = true;
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val |= SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ clk_disable(keypad->clk);
+
+ return 0;
+}
+
+static int samsung_keypad_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ if (keypad->stopped)
+ return 0;
+
+ clk_enable(keypad->clk);
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ if (keypad->wake_enabled)
+ disable_irq_wake(keypad->irq);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
bool enable)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int val;
clk_enable(keypad->clk);
@@ -393,11 +600,11 @@ static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
val = readl(keypad->base + SAMSUNG_KEYIFCON);
if (enable) {
val |= SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
enable_irq_wake(keypad->irq);
} else {
val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
disable_irq_wake(keypad->irq);
}
writel(val, keypad->base + SAMSUNG_KEYIFCON);
@@ -440,11 +647,23 @@ static int samsung_keypad_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops samsung_keypad_pm_ops = {
- .suspend = samsung_keypad_suspend,
- .resume = samsung_keypad_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(samsung_keypad_suspend, samsung_keypad_resume)
+ SET_RUNTIME_PM_OPS(samsung_keypad_runtime_suspend,
+ samsung_keypad_runtime_resume, NULL)
};
+
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_keypad_dt_match[] = {
+ { .compatible = "samsung,s3c6410-keypad" },
+ { .compatible = "samsung,s5pv210-keypad" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
+#else
+#define samsung_keypad_dt_match NULL
#endif
static struct platform_device_id samsung_keypad_driver_ids[] = {
@@ -465,27 +684,14 @@ static struct platform_driver samsung_keypad_driver = {
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = samsung_keypad_dt_match,
.pm = &samsung_keypad_pm_ops,
-#endif
},
.id_table = samsung_keypad_driver_ids,
};
-
-static int __init samsung_keypad_init(void)
-{
- return platform_driver_register(&samsung_keypad_driver);
-}
-module_init(samsung_keypad_init);
-
-static void __exit samsung_keypad_exit(void)
-{
- platform_driver_unregister(&samsung_keypad_driver);
-}
-module_exit(samsung_keypad_exit);
+module_platform_driver(samsung_keypad_driver);
MODULE_DESCRIPTION("Samsung keypad driver");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-keypad");
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 934aeb583b3..da54ad5db15 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -337,19 +337,7 @@ static struct platform_driver sh_keysc_device_driver = {
.pm = &sh_keysc_dev_pm_ops,
}
};
-
-static int __init sh_keysc_init(void)
-{
- return platform_driver_register(&sh_keysc_device_driver);
-}
-
-static void __exit sh_keysc_exit(void)
-{
- platform_driver_unregister(&sh_keysc_device_driver);
-}
-
-module_init(sh_keysc_init);
-module_exit(sh_keysc_exit);
+module_platform_driver(sh_keysc_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH KEYSC Keypad Driver");
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index d712dffd215..c88bd63dc9c 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -326,18 +326,7 @@ static struct platform_driver spear_kbd_driver = {
#endif
},
};
-
-static int __init spear_kbd_init(void)
-{
- return platform_driver_register(&spear_kbd_driver);
-}
-module_init(spear_kbd_init);
-
-static void __exit spear_kbd_exit(void)
-{
- platform_driver_unregister(&spear_kbd_driver);
-}
-module_exit(spear_kbd_exit);
+module_platform_driver(spear_kbd_driver);
MODULE_AUTHOR("Rajeev Kumar");
MODULE_DESCRIPTION("SPEAr Keyboard Driver");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ab7610ca10e..9397cf9c625 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -368,18 +368,7 @@ static struct platform_driver stmpe_keypad_driver = {
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
-
-static int __init stmpe_keypad_init(void)
-{
- return platform_driver_register(&stmpe_keypad_driver);
-}
-module_init(stmpe_keypad_init);
-
-static void __exit stmpe_keypad_exit(void)
-{
- platform_driver_unregister(&stmpe_keypad_driver);
-}
-module_exit(stmpe_keypad_exit);
+module_platform_driver(stmpe_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPExxxx keypad driver");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index f60c9e82f20..2dee3e4e7c6 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -74,11 +74,13 @@
/**
* struct tc_keypad - data structure used by keypad driver
+ * @tc3589x: pointer to tc35893
* @input: pointer to input device object
* @board: keypad platform device
* @krow: number of rows
* @kcol: number of coloumns
* @keymap: matrix scan code table for keycodes
+ * @keypad_stopped: holds keypad status
*/
struct tc_keypad {
struct tc3589x *tc3589x;
@@ -453,18 +455,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.probe = tc3589x_keypad_probe,
.remove = __devexit_p(tc3589x_keypad_remove),
};
-
-static int __init tc3589x_keypad_init(void)
-{
- return platform_driver_register(&tc3589x_keypad_driver);
-}
-module_init(tc3589x_keypad_init);
-
-static void __exit tc3589x_keypad_exit(void)
-{
- return platform_driver_unregister(&tc3589x_keypad_driver);
-}
-module_exit(tc3589x_keypad_exit);
+module_platform_driver(tc3589x_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
new file mode 100644
index 00000000000..958ec107bfb
--- /dev/null
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -0,0 +1,430 @@
+/*
+ * Driver for TCA8418 I2C keyboard
+ *
+ * Copyright (C) 2011 Fuel7, Inc. All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/tca8418_keypad.h>
+
+/* TCA8418 hardware limits */
+#define TCA8418_MAX_ROWS 8
+#define TCA8418_MAX_COLS 10
+
+/* TCA8418 register offsets */
+#define REG_CFG 0x01
+#define REG_INT_STAT 0x02
+#define REG_KEY_LCK_EC 0x03
+#define REG_KEY_EVENT_A 0x04
+#define REG_KEY_EVENT_B 0x05
+#define REG_KEY_EVENT_C 0x06
+#define REG_KEY_EVENT_D 0x07
+#define REG_KEY_EVENT_E 0x08
+#define REG_KEY_EVENT_F 0x09
+#define REG_KEY_EVENT_G 0x0A
+#define REG_KEY_EVENT_H 0x0B
+#define REG_KEY_EVENT_I 0x0C
+#define REG_KEY_EVENT_J 0x0D
+#define REG_KP_LCK_TIMER 0x0E
+#define REG_UNLOCK1 0x0F
+#define REG_UNLOCK2 0x10
+#define REG_GPIO_INT_STAT1 0x11
+#define REG_GPIO_INT_STAT2 0x12
+#define REG_GPIO_INT_STAT3 0x13
+#define REG_GPIO_DAT_STAT1 0x14
+#define REG_GPIO_DAT_STAT2 0x15
+#define REG_GPIO_DAT_STAT3 0x16
+#define REG_GPIO_DAT_OUT1 0x17
+#define REG_GPIO_DAT_OUT2 0x18
+#define REG_GPIO_DAT_OUT3 0x19
+#define REG_GPIO_INT_EN1 0x1A
+#define REG_GPIO_INT_EN2 0x1B
+#define REG_GPIO_INT_EN3 0x1C
+#define REG_KP_GPIO1 0x1D
+#define REG_KP_GPIO2 0x1E
+#define REG_KP_GPIO3 0x1F
+#define REG_GPI_EM1 0x20
+#define REG_GPI_EM2 0x21
+#define REG_GPI_EM3 0x22
+#define REG_GPIO_DIR1 0x23
+#define REG_GPIO_DIR2 0x24
+#define REG_GPIO_DIR3 0x25
+#define REG_GPIO_INT_LVL1 0x26
+#define REG_GPIO_INT_LVL2 0x27
+#define REG_GPIO_INT_LVL3 0x28
+#define REG_DEBOUNCE_DIS1 0x29
+#define REG_DEBOUNCE_DIS2 0x2A
+#define REG_DEBOUNCE_DIS3 0x2B
+#define REG_GPIO_PULL1 0x2C
+#define REG_GPIO_PULL2 0x2D
+#define REG_GPIO_PULL3 0x2E
+
+/* TCA8418 bit definitions */
+#define CFG_AI BIT(7)
+#define CFG_GPI_E_CFG BIT(6)
+#define CFG_OVR_FLOW_M BIT(5)
+#define CFG_INT_CFG BIT(4)
+#define CFG_OVR_FLOW_IEN BIT(3)
+#define CFG_K_LCK_IEN BIT(2)
+#define CFG_GPI_IEN BIT(1)
+#define CFG_KE_IEN BIT(0)
+
+#define INT_STAT_CAD_INT BIT(4)
+#define INT_STAT_OVR_FLOW_INT BIT(3)
+#define INT_STAT_K_LCK_INT BIT(2)
+#define INT_STAT_GPI_INT BIT(1)
+#define INT_STAT_K_INT BIT(0)
+
+/* TCA8418 register masks */
+#define KEY_LCK_EC_KEC 0x7
+#define KEY_EVENT_CODE 0x7f
+#define KEY_EVENT_VALUE 0x80
+
+
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+
+struct tca8418_keypad {
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int keypad_mask; /* Mask for keypad col/rol regs */
+ unsigned int irq;
+ unsigned int row_shift;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ /* Flexible array member, must be at end of struct */
+ unsigned short keymap[];
+};
+
+/*
+ * Write a byte to the TCA8418
+ */
+static int tca8418_write_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 val)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(keypad_data->client, reg, val);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Read a byte from the TCA8418
+ */
+static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_byte_data(keypad_data->client, reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, error: %d\n",
+ __func__, reg, error);
+ return error;
+ }
+
+ *val = (u8)error;
+
+ return 0;
+}
+
+static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+{
+ int error, col, row;
+ u8 reg, state, code;
+
+ /* Initial read of the key event FIFO */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+
+ /* Assume that key code 0 signifies empty FIFO */
+ while (error >= 0 && reg > 0) {
+ state = reg & KEY_EVENT_VALUE;
+ code = reg & KEY_EVENT_CODE;
+
+ row = code / TCA8418_MAX_COLS;
+ col = code % TCA8418_MAX_COLS;
+
+ row = (col) ? row : row - 1;
+ col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
+
+ code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+ input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad_data->input,
+ keypad_data->keymap[code], state);
+
+ /* Read for next loop */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ }
+
+ if (error < 0)
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+
+ input_sync(keypad_data->input);
+}
+
+/*
+ * Threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
+{
+ struct tca8418_keypad *keypad_data = dev_id;
+ u8 reg;
+ int error;
+
+ error = tca8418_read_byte(keypad_data, REG_INT_STAT, &reg);
+ if (error) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_INT_STAT\n");
+ goto exit;
+ }
+
+ if (reg & INT_STAT_OVR_FLOW_INT)
+ dev_warn(&keypad_data->client->dev, "overflow occurred\n");
+
+ if (reg & INT_STAT_K_INT)
+ tca8418_read_keypad(keypad_data);
+
+exit:
+ /* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
+ reg = 0xff;
+ error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
+ if (error)
+ dev_err(&keypad_data->client->dev,
+ "unable to clear REG_INT_STAT\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Configure the TCA8418 for keypad operation
+ */
+static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+{
+ int reg, error;
+
+ /* Write config register, if this fails assume device not present */
+ error = tca8418_write_byte(keypad_data, REG_CFG,
+ CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+ if (error < 0)
+ return -ENODEV;
+
+
+ /* Assemble a mask for row and column registers */
+ reg = ~(~0 << keypad_data->rows);
+ reg += (~(~0 << keypad_data->cols)) << 8;
+ keypad_data->keypad_mask = reg;
+
+ /* Set registers to keypad mode */
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO3, reg >> 16);
+
+ /* Enable column debouncing */
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+
+ return error;
+}
+
+static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct tca8418_keypad_platform_data *pdata =
+ client->dev.platform_data;
+ struct tca8418_keypad *keypad_data;
+ struct input_dev *input;
+ int error, row_shift, max_keys;
+
+ /* Copy the platform data */
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->keymap_data) {
+ dev_err(&client->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
+ dev_err(&client->dev, "invalid rows\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
+ dev_err(&client->dev, "invalid columns\n");
+ return -EINVAL;
+ }
+
+ /* Check i2c driver capabilities */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ row_shift = get_count_order(pdata->cols);
+ max_keys = pdata->rows << row_shift;
+
+ /* Allocate memory for keypad_data, keymap and input device */
+ keypad_data = kzalloc(sizeof(*keypad_data) +
+ max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ if (!keypad_data)
+ return -ENOMEM;
+
+ keypad_data->rows = pdata->rows;
+ keypad_data->cols = pdata->cols;
+ keypad_data->client = client;
+ keypad_data->row_shift = row_shift;
+
+ /* Initialize the chip or fail if chip isn't present */
+ error = tca8418_configure(keypad_data);
+ if (error < 0)
+ goto fail1;
+
+ /* Configure input device */
+ input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+ keypad_data->input = input;
+
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x001;
+ input->id.version = 0x0001;
+
+ input->keycode = keypad_data->keymap;
+ input->keycodesize = sizeof(keypad_data->keymap[0]);
+ input->keycodemax = max_keys;
+
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ input_set_drvdata(input, keypad_data);
+
+ matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+ input->keycode, input->keybit);
+
+ if (pdata->irq_is_gpio)
+ client->irq = gpio_to_irq(client->irq);
+
+ error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING,
+ client->name, keypad_data);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ client->irq, error);
+ goto fail2;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail3;
+ }
+
+ i2c_set_clientdata(client, keypad_data);
+ return 0;
+
+fail3:
+ free_irq(client->irq, keypad_data);
+fail2:
+ input_free_device(input);
+fail1:
+ kfree(keypad_data);
+ return error;
+}
+
+static int __devexit tca8418_keypad_remove(struct i2c_client *client)
+{
+ struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
+
+ free_irq(keypad_data->client->irq, keypad_data);
+
+ input_unregister_device(keypad_data->input);
+
+ kfree(keypad_data);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca8418_keypad_driver = {
+ .driver = {
+ .name = TCA8418_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tca8418_keypad_probe,
+ .remove = __devexit_p(tca8418_keypad_remove),
+ .id_table = tca8418_id,
+};
+
+static int __init tca8418_keypad_init(void)
+{
+ return i2c_add_driver(&tca8418_keypad_driver);
+}
+subsys_initcall(tca8418_keypad_init);
+
+static void __exit tca8418_keypad_exit(void)
+{
+ i2c_del_driver(&tca8418_keypad_driver);
+}
+module_exit(tca8418_keypad_exit);
+
+MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
+MODULE_DESCRIPTION("Keypad driver for TCA8418");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index cf3228b0ab9..a136e2e832b 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <mach/clk.h>
@@ -52,6 +53,7 @@
/* KBC Interrupt Register */
#define KBC_INT_0 0x4
#define KBC_INT_FIFO_CNT_INT_STATUS (1 << 2)
+#define KBC_INT_KEYPRESS_INT_STATUS (1 << 0)
#define KBC_ROW_CFG0_0 0x8
#define KBC_COL_CFG0_0 0x18
@@ -74,15 +76,17 @@ struct tegra_kbc {
unsigned int cp_to_wkup_dly;
bool use_fn_map;
bool use_ghost_filter;
+ bool keypress_caused_wake;
const struct tegra_kbc_platform_data *pdata;
unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
+ u32 wakeup_key;
struct timer_list timer;
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] = {
+static const u32 tegra_kbc_default_keymap[] __devinitdata = {
KEY(0, 2, KEY_W),
KEY(0, 3, KEY_S),
KEY(0, 4, KEY_A),
@@ -217,7 +221,8 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(31, 4, KEY_HELP),
};
-static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+static const
+struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
.keymap = tegra_kbc_default_keymap,
.keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
};
@@ -409,6 +414,9 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
*/
tegra_kbc_set_fifo_interrupt(kbc, false);
mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+ } else if (val & KBC_INT_KEYPRESS_INT_STATUS) {
+ /* We can be here only through system resume path */
+ kbc->keypress_caused_wake = true;
}
spin_unlock_irqrestore(&kbc->lock, flags);
@@ -576,6 +584,56 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
return true;
}
+#ifdef CONFIG_OF
+static struct tegra_kbc_platform_data * __devinit
+tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+{
+ struct tegra_kbc_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (!of_property_read_u32(np, "debounce-delay", &prop))
+ pdata->debounce_cnt = prop;
+
+ if (!of_property_read_u32(np, "repeat-delay", &prop))
+ pdata->repeat_cnt = prop;
+
+ if (of_find_property(np, "needs-ghost-filter", NULL))
+ pdata->use_ghost_filter = true;
+
+ if (of_find_property(np, "wakeup-source", NULL))
+ pdata->wakeup = true;
+
+ /*
+ * All currently known keymaps with device tree support use the same
+ * pin_cfg, so set it up here.
+ */
+ for (i = 0; i < KBC_MAX_ROW; i++) {
+ pdata->pin_cfg[i].num = i;
+ pdata->pin_cfg[i].is_row = true;
+ }
+
+ for (i = 0; i < KBC_MAX_COL; i++) {
+ pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
+ pdata->pin_cfg[KBC_MAX_ROW + i].is_row = false;
+ }
+
+ return pdata;
+}
+#else
+static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
+ struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
@@ -590,21 +648,28 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
unsigned int scan_time_rows;
if (!pdata)
- return -EINVAL;
+ pdata = tegra_kbc_dt_parse_pdata(pdev);
- if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+ if (!pdata)
return -EINVAL;
+ if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
+ err = -EINVAL;
+ goto err_free_pdata;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
@@ -674,9 +739,10 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
+ kbc->wakeup_key = pdata->wakeup_key;
- err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
- pdev->name, kbc);
+ err = request_irq(kbc->irq, tegra_kbc_isr,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
goto err_put_clk;
@@ -706,6 +772,9 @@ err_free_mem_region:
err_free_mem:
input_free_device(input_dev);
kfree(kbc);
+err_free_pdata:
+ if (!pdev->dev.platform_data)
+ kfree(pdata);
return err;
}
@@ -715,6 +784,8 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
struct resource *res;
+ platform_set_drvdata(pdev, NULL);
+
free_irq(kbc->irq, pdev);
clk_put(kbc->clk);
@@ -723,9 +794,14 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- kfree(kbc);
+ /*
+ * If we do not have platform data attached to the device we
+ * allocated it ourselves and thus need to free it.
+ */
+ if (!pdev->dev.platform_data)
+ kfree(kbc->pdata);
- platform_set_drvdata(pdev, NULL);
+ kfree(kbc);
return 0;
}
@@ -754,6 +830,8 @@ static int tegra_kbc_suspend(struct device *dev)
tegra_kbc_setup_wakekeys(kbc, true);
msleep(30);
+ kbc->keypress_caused_wake = false;
+ enable_irq(kbc->irq);
enable_irq_wake(kbc->irq);
} else {
if (kbc->idev->users)
@@ -780,7 +858,19 @@ static int tegra_kbc_resume(struct device *dev)
tegra_kbc_set_fifo_interrupt(kbc, true);
- enable_irq(kbc->irq);
+ if (kbc->keypress_caused_wake && kbc->wakeup_key) {
+ /*
+ * We can't report events directly from the ISR
+ * because timekeeping is stopped when processing
+ * wakeup request and we get a nasty warning when
+ * we try to call do_gettimeofday() in evdev
+ * handler.
+ */
+ input_report_key(kbc->idev, kbc->wakeup_key, 1);
+ input_sync(kbc->idev);
+ input_report_key(kbc->idev, kbc->wakeup_key, 0);
+ input_sync(kbc->idev);
+ }
} else {
if (kbc->idev->users)
err = tegra_kbc_start(kbc);
@@ -793,6 +883,12 @@ static int tegra_kbc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+static const struct of_device_id tegra_kbc_of_match[] = {
+ { .compatible = "nvidia,tegra20-kbc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
+
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
.remove = __devexit_p(tegra_kbc_remove),
@@ -800,20 +896,10 @@ static struct platform_driver tegra_kbc_driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
.pm = &tegra_kbc_pm_ops,
+ .of_match_table = tegra_kbc_of_match,
},
};
-
-static void __exit tegra_kbc_exit(void)
-{
- platform_driver_unregister(&tegra_kbc_driver);
-}
-module_exit(tegra_kbc_exit);
-
-static int __init tegra_kbc_init(void)
-{
- return platform_driver_register(&tegra_kbc_driver);
-}
-module_init(tegra_kbc_init);
+module_platform_driver(tegra_kbc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 66e55e5cfdd..fb39c94b6fd 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -322,19 +322,7 @@ static struct platform_driver keypad_driver = {
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
-
-static int __init keypad_init(void)
-{
- return platform_driver_register(&keypad_driver);
-}
-
-static void __exit keypad_exit(void)
-{
- platform_driver_unregister(&keypad_driver);
-}
-
-module_init(keypad_init);
-module_exit(keypad_exit);
+module_platform_driver(keypad_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Keypad Driver");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a26922cf0e8..a588578037e 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -460,18 +460,7 @@ static struct platform_driver twl4030_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init twl4030_kp_init(void)
-{
- return platform_driver_register(&twl4030_kp_driver);
-}
-module_init(twl4030_kp_init);
-
-static void __exit twl4030_kp_exit(void)
-{
- platform_driver_unregister(&twl4030_kp_driver);
-}
-module_exit(twl4030_kp_exit);
+module_platform_driver(twl4030_kp_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TWL4030 Keypad Driver");
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 318586dadac..99bbb7e775a 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -262,19 +262,7 @@ static struct platform_driver w90p910_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90p910_keypad_init(void)
-{
- return platform_driver_register(&w90p910_keypad_driver);
-}
-
-static void __exit w90p910_keypad_exit(void)
-{
- platform_driver_unregister(&w90p910_keypad_driver);
-}
-
-module_init(w90p910_keypad_init);
-module_exit(w90p910_keypad_exit);
+module_platform_driver(w90p910_keypad_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 keypad driver");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 3dca3c14510..f2e0cbc5ab6 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -137,18 +137,7 @@ static struct platform_driver pm860x_onkey_driver = {
.probe = pm860x_onkey_probe,
.remove = __devexit_p(pm860x_onkey_remove),
};
-
-static int __init pm860x_onkey_init(void)
-{
- return platform_driver_register(&pm860x_onkey_driver);
-}
-module_init(pm860x_onkey_init);
-
-static void __exit pm860x_onkey_exit(void)
-{
- platform_driver_unregister(&pm860x_onkey_driver);
-}
-module_exit(pm860x_onkey_exit);
+module_platform_driver(pm860x_onkey_driver);
MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 22d875fde53..7b46781c30c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -179,6 +179,31 @@ config INPUT_APANEL
To compile this driver as a module, choose M here: the module will
be called apanel.
+config INPUT_GP2A
+ tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
+ depends on I2C
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap002a00f.
+
+config INPUT_GPIO_TILT_POLLED
+ tristate "Polled GPIO tilt switch"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for tilt switches connected
+ to GPIO pins that are not capable of generating interrupts.
+
+ The list of gpios to use and the mapping of their states
+ to specific angles is done via platform data.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio_tilt_polled.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a244fc6a781..46671a875b9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 3d3288a78fd..79d90163363 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -139,18 +139,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
-
-static int __init ab8500_ponkey_init(void)
-{
- return platform_driver_register(&ab8500_ponkey_driver);
-}
-module_init(ab8500_ponkey_init);
-
-static void __exit ab8500_ponkey_exit(void)
-{
- platform_driver_unregister(&ab8500_ponkey_driver);
-}
-module_exit(ab8500_ponkey_exit);
+module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index f29de22fdda..34d401efd4a 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -122,7 +122,6 @@ static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &adxl34x_spi_pm,
},
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 09244804fb9..1cf72fe513e 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -452,10 +452,10 @@ static ssize_t adxl34x_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -541,10 +541,10 @@ static ssize_t adxl34x_rate_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned char val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtou8(buf, 10, &val);
if (error)
return error;
@@ -576,10 +576,10 @@ static ssize_t adxl34x_autosleep_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -623,13 +623,13 @@ static ssize_t adxl34x_write_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
/*
* This allows basic ADXL register write access for debug purposes.
*/
- error = strict_strtoul(buf, 16, &val);
+ error = kstrtouint(buf, 16, &val);
if (error)
return error;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 8d345e87075..f63341f20b9 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -42,13 +42,13 @@ static int ati_remote2_set_mask(const char *val,
const struct kernel_param *kp,
unsigned int max)
{
- unsigned long mask;
+ unsigned int mask;
int ret;
if (!val)
return -EINVAL;
- ret = strict_strtoul(val, 0, &mask);
+ ret = kstrtouint(val, 0, &mask);
if (ret)
return ret;
@@ -720,11 +720,12 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
int r;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ r = kstrtouint(buf, 0, &mask);
+ if (r)
+ return r;
if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
return -EINVAL;
@@ -769,10 +770,12 @@ static ssize_t ati_remote2_store_mode_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
+ int err;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &mask);
+ if (err)
+ return err;
if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
return -EINVAL;
@@ -1010,23 +1013,4 @@ static int ati_remote2_post_reset(struct usb_interface *interface)
return r;
}
-static int __init ati_remote2_init(void)
-{
- int r;
-
- r = usb_register(&ati_remote2_driver);
- if (r)
- printk(KERN_ERR "ati_remote2: usb_register() = %d\n", r);
- else
- printk(KERN_INFO "ati_remote2: " DRIVER_DESC " " DRIVER_VERSION "\n");
-
- return r;
-}
-
-static void __exit ati_remote2_exit(void)
-{
- usb_deregister(&ati_remote2_driver);
-}
-
-module_init(ati_remote2_init);
-module_exit(ati_remote2_exit);
+module_usb_driver(ati_remote2_driver);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index d00edc9f39d..1c4146fccfd 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -264,18 +264,7 @@ static struct platform_driver bfin_rotary_device_driver = {
#endif
},
};
-
-static int __init bfin_rotary_init(void)
-{
- return platform_driver_register(&bfin_rotary_device_driver);
-}
-module_init(bfin_rotary_init);
-
-static void __exit bfin_rotary_exit(void)
-{
- platform_driver_unregister(&bfin_rotary_device_driver);
-}
-module_exit(bfin_rotary_exit);
+module_platform_driver(bfin_rotary_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608e..06517e60e50 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index fd8407a2963..53e43d29514 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -163,16 +163,4 @@ static struct platform_driver cobalt_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init cobalt_buttons_init(void)
-{
- return platform_driver_register(&cobalt_buttons_driver);
-}
-
-static void __exit cobalt_buttons_exit(void)
-{
- platform_driver_unregister(&cobalt_buttons_driver);
-}
-
-module_init(cobalt_buttons_init);
-module_exit(cobalt_buttons_exit);
+module_platform_driver(cobalt_buttons_driver);
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 7283dd2a1ad..35083c6836c 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -267,17 +267,6 @@ static struct platform_driver dm355evm_keys_driver = {
.name = "dm355evm_keys",
},
};
-
-static int __init dm355evm_keys_init(void)
-{
- return platform_driver_register(&dm355evm_keys_driver);
-}
-module_init(dm355evm_keys_init);
-
-static void __exit dm355evm_keys_exit(void)
-{
- platform_driver_unregister(&dm355evm_keys_driver);
-}
-module_exit(dm355evm_keys_exit);
+module_platform_driver(dm355evm_keys_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
new file mode 100644
index 00000000000..71fba8c2fc6
--- /dev/null
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
+ *
+ * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
+ * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/gp2ap002a00f.h>
+
+struct gp2a_data {
+ struct input_dev *input;
+ const struct gp2a_platform_data *pdata;
+ struct i2c_client *i2c_client;
+};
+
+enum gp2a_addr {
+ GP2A_ADDR_PROX = 0x0,
+ GP2A_ADDR_GAIN = 0x1,
+ GP2A_ADDR_HYS = 0x2,
+ GP2A_ADDR_CYCLE = 0x3,
+ GP2A_ADDR_OPMOD = 0x4,
+ GP2A_ADDR_CON = 0x6
+};
+
+enum gp2a_controls {
+ /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
+ GP2A_CTRL_SSD = 0x01
+};
+
+static int gp2a_report(struct gp2a_data *dt)
+{
+ int vo = gpio_get_value(dt->pdata->vout_gpio);
+
+ input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
+ input_sync(dt->input);
+
+ return 0;
+}
+
+static irqreturn_t gp2a_irq(int irq, void *handle)
+{
+ struct gp2a_data *dt = handle;
+
+ gp2a_report(dt);
+
+ return IRQ_HANDLED;
+}
+
+static int gp2a_enable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ GP2A_CTRL_SSD);
+}
+
+static int gp2a_disable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ 0x00);
+}
+
+static int gp2a_device_open(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_enable(dt);
+ if (error < 0) {
+ dev_err(&dt->i2c_client->dev,
+ "unable to activate, err %d\n", error);
+ return error;
+ }
+
+ gp2a_report(dt);
+
+ return 0;
+}
+
+static void gp2a_device_close(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_disable(dt);
+ if (error < 0)
+ dev_err(&dt->i2c_client->dev,
+ "unable to deactivate, err %d\n", error);
+}
+
+static int __devinit gp2a_initialize(struct gp2a_data *dt)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
+ 0x08);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
+ 0xc2);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
+ 0x04);
+ if (error < 0)
+ return error;
+
+ error = gp2a_disable(dt);
+
+ return error;
+}
+
+static int __devinit gp2a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct gp2a_platform_data *pdata = client->dev.platform_data;
+ struct gp2a_data *dt;
+ int error;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->hw_setup) {
+ error = pdata->hw_setup(client);
+ if (error < 0)
+ return error;
+ }
+
+ error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
+ if (error)
+ goto err_hw_shutdown;
+
+ dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
+ if (!dt) {
+ error = -ENOMEM;
+ goto err_free_gpio;
+ }
+
+ dt->pdata = pdata;
+ dt->i2c_client = client;
+
+ error = gp2a_initialize(dt);
+ if (error < 0)
+ goto err_free_mem;
+
+ dt->input = input_allocate_device();
+ if (!dt->input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_set_drvdata(dt->input, dt);
+
+ dt->input->open = gp2a_device_open;
+ dt->input->close = gp2a_device_close;
+ dt->input->name = GP2A_I2C_NAME;
+ dt->input->id.bustype = BUS_I2C;
+ dt->input->dev.parent = &client->dev;
+
+ input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
+
+ error = request_threaded_irq(client->irq, NULL, gp2a_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ GP2A_I2C_NAME, dt);
+ if (error) {
+ dev_err(&client->dev, "irq request failed\n");
+ goto err_free_input_dev;
+ }
+
+ error = input_register_device(dt->input);
+ if (error) {
+ dev_err(&client->dev, "device registration failed\n");
+ goto err_free_irq;
+ }
+
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ i2c_set_clientdata(client, dt);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, dt);
+err_free_input_dev:
+ input_free_device(dt->input);
+err_free_mem:
+ kfree(dt);
+err_free_gpio:
+ gpio_free(pdata->vout_gpio);
+err_hw_shutdown:
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+ return error;
+}
+
+static int __devexit gp2a_remove(struct i2c_client *client)
+{
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ const struct gp2a_platform_data *pdata = dt->pdata;
+
+ device_init_wakeup(&client->dev, false);
+
+ free_irq(client->irq, dt);
+
+ input_unregister_device(dt->input);
+ kfree(dt);
+
+ gpio_free(pdata->vout_gpio);
+
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gp2a_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ enable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_disable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+
+static int gp2a_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_enable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
+
+static const struct i2c_device_id gp2a_i2c_id[] = {
+ { GP2A_I2C_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver gp2a_i2c_driver = {
+ .driver = {
+ .name = GP2A_I2C_NAME,
+ .owner = THIS_MODULE,
+ .pm = &gp2a_pm,
+ },
+ .probe = gp2a_probe,
+ .remove = __devexit_p(gp2a_remove),
+ .id_table = gp2a_i2c_id,
+};
+
+static int __init gp2a_init(void)
+{
+ return i2c_add_driver(&gp2a_i2c_driver);
+}
+
+static void __exit gp2a_exit(void)
+{
+ i2c_del_driver(&gp2a_i2c_driver);
+}
+
+module_init(gp2a_init);
+module_exit(gp2a_exit);
+
+MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
+MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
new file mode 100644
index 00000000000..277a0574c19
--- /dev/null
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -0,0 +1,213 @@
+/*
+ * Driver for tilt switches connected via GPIO lines
+ * not capable of generating interrupts
+ *
+ * Copyright (C) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on: drivers/input/keyboard/gpio_keys_polled.c
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/input/gpio_tilt.h>
+
+#define DRV_NAME "gpio-tilt-polled"
+
+struct gpio_tilt_polled_dev {
+ struct input_polled_dev *poll_dev;
+ struct device *dev;
+ const struct gpio_tilt_platform_data *pdata;
+
+ int last_state;
+
+ int threshold;
+ int count;
+};
+
+static void gpio_tilt_polled_poll(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+ struct input_dev *input = dev->input;
+ struct gpio_tilt_state *tilt_state = NULL;
+ int state, i;
+
+ if (tdev->count < tdev->threshold) {
+ tdev->count++;
+ } else {
+ state = 0;
+ for (i = 0; i < pdata->nr_gpios; i++)
+ state |= (!!gpio_get_value(pdata->gpios[i].gpio) << i);
+
+ if (state != tdev->last_state) {
+ for (i = 0; i < pdata->nr_states; i++)
+ if (pdata->states[i].gpios == state)
+ tilt_state = &pdata->states[i];
+
+ if (tilt_state) {
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_report_abs(input,
+ pdata->axes[i].axis,
+ tilt_state->axes[i]);
+
+ input_sync(input);
+ }
+
+ tdev->count = 0;
+ tdev->last_state = state;
+ }
+ }
+}
+
+static void gpio_tilt_polled_open(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->enable)
+ pdata->enable(tdev->dev);
+
+ /* report initial state of the axes */
+ tdev->last_state = -1;
+ tdev->count = tdev->threshold;
+ gpio_tilt_polled_poll(tdev->poll_dev);
+}
+
+static void gpio_tilt_polled_close(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->disable)
+ pdata->disable(tdev->dev);
+}
+
+static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+{
+ const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_tilt_polled_dev *tdev;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ int error, i;
+
+ if (!pdata || !pdata->poll_interval)
+ return -EINVAL;
+
+ tdev = kzalloc(sizeof(struct gpio_tilt_polled_dev), GFP_KERNEL);
+ if (!tdev) {
+ dev_err(dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ error = gpio_request_array(pdata->gpios, pdata->nr_gpios);
+ if (error) {
+ dev_err(dev,
+ "Could not request tilt GPIOs: %d\n", error);
+ goto err_free_tdev;
+ }
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ dev_err(dev, "no memory for polled device\n");
+ error = -ENOMEM;
+ goto err_free_gpios;
+ }
+
+ poll_dev->private = tdev;
+ poll_dev->poll = gpio_tilt_polled_poll;
+ poll_dev->poll_interval = pdata->poll_interval;
+ poll_dev->open = gpio_tilt_polled_open;
+ poll_dev->close = gpio_tilt_polled_close;
+
+ input = poll_dev->input;
+
+ input->name = pdev->name;
+ input->phys = DRV_NAME"/input0";
+ input->dev.parent = &pdev->dev;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ __set_bit(EV_ABS, input->evbit);
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_set_abs_params(input, pdata->axes[i].axis,
+ pdata->axes[i].min, pdata->axes[i].max,
+ pdata->axes[i].fuzz, pdata->axes[i].flat);
+
+ tdev->threshold = DIV_ROUND_UP(pdata->debounce_interval,
+ pdata->poll_interval);
+
+ tdev->poll_dev = poll_dev;
+ tdev->dev = dev;
+ tdev->pdata = pdata;
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ error);
+ goto err_free_polldev;
+ }
+
+ platform_set_drvdata(pdev, tdev);
+
+ return 0;
+
+err_free_polldev:
+ input_free_polled_device(poll_dev);
+err_free_gpios:
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+err_free_tdev:
+ kfree(tdev);
+
+ return error;
+}
+
+static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+{
+ struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ platform_set_drvdata(pdev, NULL);
+
+ input_unregister_polled_device(tdev->poll_dev);
+ input_free_polled_device(tdev->poll_dev);
+
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+
+ kfree(tdev);
+
+ return 0;
+}
+
+static struct platform_driver gpio_tilt_polled_driver = {
+ .probe = gpio_tilt_polled_probe,
+ .remove = __devexit_p(gpio_tilt_polled_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(gpio_tilt_polled_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Polled GPIO tilt driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 302ab46ce75..50e28306830 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -168,16 +168,5 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.remove = __devexit_p(ixp4xx_spkr_remove),
.shutdown = ixp4xx_spkr_shutdown,
};
+module_platform_driver(ixp4xx_spkr_platform_driver);
-static int __init ixp4xx_spkr_init(void)
-{
- return platform_driver_register(&ixp4xx_spkr_platform_driver);
-}
-
-static void __exit ixp4xx_spkr_exit(void)
-{
- platform_driver_unregister(&ixp4xx_spkr_platform_driver);
-}
-
-module_init(ixp4xx_spkr_init);
-module_exit(ixp4xx_spkr_exit);
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index fc62256c963..d99151a8bf1 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -580,26 +580,7 @@ static struct usb_driver keyspan_driver =
.id_table = keyspan_table
};
-static int __init usb_keyspan_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&keyspan_driver);
- if (result)
- err("usb_register failed. Error number %d\n", result);
-
- return result;
-}
-
-static void __exit usb_keyspan_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&keyspan_driver);
-}
-
-module_init(usb_keyspan_init);
-module_exit(usb_keyspan_exit);
+module_usb_driver(keyspan_driver);
MODULE_DEVICE_TABLE(usb, keyspan_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 7de0ded4ccc..23cf0827104 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -166,18 +166,7 @@ static struct platform_driver max8925_onkey_driver = {
.probe = max8925_onkey_probe,
.remove = __devexit_p(max8925_onkey_remove),
};
-
-static int __init max8925_onkey_init(void)
-{
- return platform_driver_register(&max8925_onkey_driver);
-}
-module_init(max8925_onkey_init);
-
-static void __exit max8925_onkey_exit(void)
-{
- platform_driver_unregister(&max8925_onkey_driver);
-}
-module_exit(max8925_onkey_exit);
+module_platform_driver(max8925_onkey_driver);
MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 09b05228865..8428f1e8e83 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -255,7 +255,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver mc13783_pwrbutton_driver = {
+static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
.remove = __devexit_p(mc13783_pwrbutton_remove),
.driver = {
@@ -264,17 +264,7 @@ struct platform_driver mc13783_pwrbutton_driver = {
},
};
-static int __init mc13783_pwrbutton_init(void)
-{
- return platform_driver_register(&mc13783_pwrbutton_driver);
-}
-module_init(mc13783_pwrbutton_init);
-
-static void __exit mc13783_pwrbutton_exit(void)
-{
- platform_driver_unregister(&mc13783_pwrbutton_driver);
-}
-module_exit(mc13783_pwrbutton_exit);
+module_platform_driver(mc13783_pwrbutton_driver);
MODULE_ALIAS("platform:mc13783-pwrbutton");
MODULE_DESCRIPTION("MC13783 Power Button");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index f71dc728da5..208d1a1cc7f 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -41,18 +41,67 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#define MPU3050_CHIP_ID_REG 0x00
#define MPU3050_CHIP_ID 0x69
-#define MPU3050_XOUT_H 0x1D
-#define MPU3050_PWR_MGM 0x3E
-#define MPU3050_PWR_MGM_POS 6
-#define MPU3050_PWR_MGM_MASK 0x40
#define MPU3050_AUTO_DELAY 1000
#define MPU3050_MIN_VALUE -32768
#define MPU3050_MAX_VALUE 32767
+#define MPU3050_DEFAULT_POLL_INTERVAL 200
+#define MPU3050_DEFAULT_FS_RANGE 3
+
+/* Register map */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_PWR_MGM 0x3E
+#define MPU3050_PWR_MGM_POS 6
+
+/* Register bits */
+
+/* DLPF_FS_SYNC */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+/* INT_CFG */
+#define MPU3050_RAW_RDY_EN 0x01
+#define MPU3050_MPU_RDY_EN 0x02
+#define MPU3050_LATCH_INT_EN 0x04
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL 0x07
+#define MPU3050_PWR_MGM_STBY_ZG 0x08
+#define MPU3050_PWR_MGM_STBY_YG 0x10
+#define MPU3050_PWR_MGM_STBY_XG 0x20
+#define MPU3050_PWR_MGM_SLEEP 0x40
+#define MPU3050_PWR_MGM_RESET 0x80
+#define MPU3050_PWR_MGM_MASK 0x40
+
struct axis_data {
s16 x;
s16 y;
@@ -148,9 +197,20 @@ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
static int mpu3050_input_open(struct input_dev *input)
{
struct mpu3050_sensor *sensor = input_get_drvdata(input);
+ int error;
pm_runtime_get(sensor->dev);
+ /* Enable interrupts */
+ error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
+ MPU3050_LATCH_INT_EN |
+ MPU3050_RAW_RDY_EN |
+ MPU3050_MPU_RDY_EN);
+ if (error < 0) {
+ pm_runtime_put(sensor->dev);
+ return error;
+ }
+
return 0;
}
@@ -192,6 +252,51 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
}
/**
+ * mpu3050_hw_init - initialize hardware
+ * @sensor: the sensor
+ *
+ * Called during device probe; configures the sampling method.
+ */
+static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+{
+ struct i2c_client *client = sensor->client;
+ int ret;
+ u8 reg;
+
+ /* Reset */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MPU3050_PWR_MGM_CLKSEL;
+ ret |= MPU3050_PWR_MGM_PLL_Z;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Output frequency divider. The poll interval */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV,
+ MPU3050_DEFAULT_POLL_INTERVAL - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set low pass filter and full scale */
+ reg = MPU3050_DEFAULT_FS_RANGE;
+ reg |= MPU3050_DLPF_CFG_42HZ << 3;
+ reg |= MPU3050_EXT_SYNC_NONE << 5;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
* mpu3050_probe - device detection callback
* @client: i2c client of found device
* @id: id match information
@@ -256,10 +361,14 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
pm_runtime_set_active(&client->dev);
+ error = mpu3050_hw_init(sensor);
+ if (error)
+ goto err_pm_set_suspended;
+
error = request_threaded_irq(client->irq,
NULL, mpu3050_interrupt_thread,
IRQF_TRIGGER_RISING,
- "mpu_int", sensor);
+ "mpu3050", sensor);
if (error) {
dev_err(&client->dev,
"can't get IRQ %d, error %d\n", client->irq, error);
@@ -348,11 +457,18 @@ static const struct i2c_device_id mpu3050_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
+static const struct of_device_id mpu3050_of_match[] = {
+ { .compatible = "invn,mpu3050", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_of_match);
+
static struct i2c_driver mpu3050_i2c_driver = {
.driver = {
.name = "mpu3050",
.owner = THIS_MODULE,
.pm = &mpu3050_pm,
+ .of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
.remove = __devexit_p(mpu3050_remove),
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index 99335c28625..e09b4fe8191 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -125,19 +125,7 @@ static struct platform_driver pcap_keys_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init pcap_keys_init(void)
-{
- return platform_driver_register(&pcap_keys_device_driver);
-};
-
-static void __exit pcap_keys_exit(void)
-{
- platform_driver_unregister(&pcap_keys_device_driver);
-};
-
-module_init(pcap_keys_init);
-module_exit(pcap_keys_exit);
+module_platform_driver(pcap_keys_device_driver);
MODULE_DESCRIPTION("Motorola PCAP2 input events driver");
MODULE_AUTHOR("Ilya Petrov <ilya.muromec@gmail.com>");
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 95562735728..53891de80b0 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -113,18 +113,7 @@ static struct platform_driver pcf50633_input_driver = {
.probe = pcf50633_input_probe,
.remove = __devexit_p(pcf50633_input_remove),
};
-
-static int __init pcf50633_input_init(void)
-{
- return platform_driver_register(&pcf50633_input_driver);
-}
-module_init(pcf50633_input_init);
-
-static void __exit pcf50633_input_exit(void)
-{
- platform_driver_unregister(&pcf50633_input_driver);
-}
-module_exit(pcf50633_input_exit);
+module_platform_driver(pcf50633_input_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 input driver");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 34f4d2e0f50..b2484aa07f3 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -134,17 +134,5 @@ static struct platform_driver pcspkr_platform_driver = {
.remove = __devexit_p(pcspkr_remove),
.shutdown = pcspkr_shutdown,
};
+module_platform_driver(pcspkr_platform_driver);
-
-static int __init pcspkr_init(void)
-{
- return platform_driver_register(&pcspkr_platform_driver);
-}
-
-static void __exit pcspkr_exit(void)
-{
- platform_driver_unregister(&pcspkr_platform_driver);
-}
-
-module_init(pcspkr_init);
-module_exit(pcspkr_exit);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 43192930824..dfbfb463ea5 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -277,18 +277,7 @@ static struct platform_driver pm8xxx_vib_driver = {
.pm = &pm8xxx_vib_pm_ops,
},
};
-
-static int __init pm8xxx_vib_init(void)
-{
- return platform_driver_register(&pm8xxx_vib_driver);
-}
-module_init(pm8xxx_vib_init);
-
-static void __exit pm8xxx_vib_exit(void)
-{
- platform_driver_unregister(&pm8xxx_vib_driver);
-}
-module_exit(pm8xxx_vib_exit);
+module_platform_driver(pm8xxx_vib_driver);
MODULE_ALIAS("platform:pm8xxx_vib");
MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b3cfb9c71e6..0f83d0f1d01 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -213,18 +213,7 @@ static struct platform_driver pmic8xxx_pwrkey_driver = {
.pm = &pm8xxx_pwr_key_pm_ops,
},
};
-
-static int __init pmic8xxx_pwrkey_init(void)
-{
- return platform_driver_register(&pmic8xxx_pwrkey_driver);
-}
-module_init(pmic8xxx_pwrkey_init);
-
-static void __exit pmic8xxx_pwrkey_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_pwrkey_driver);
-}
-module_exit(pmic8xxx_pwrkey_exit);
+module_platform_driver(pmic8xxx_pwrkey_driver);
MODULE_ALIAS("platform:pmic8xxx_pwrkey");
MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index f45947190e4..538f7049ec6 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -441,18 +441,7 @@ static struct usb_driver powermate_driver = {
.id_table = powermate_devices,
};
-static int __init powermate_init(void)
-{
- return usb_register(&powermate_driver);
-}
-
-static void __exit powermate_cleanup(void)
-{
- usb_deregister(&powermate_driver);
-}
-
-module_init(powermate_init);
-module_exit(powermate_cleanup);
+module_usb_driver(powermate_driver);
MODULE_AUTHOR( "William R Sowerbutts" );
MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" );
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 57c294f0719..fc84c8a5114 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -180,18 +180,7 @@ static struct platform_driver pwm_beeper_driver = {
.pm = PWM_BEEPER_PM_OPS,
},
};
-
-static int __init pwm_beeper_init(void)
-{
- return platform_driver_register(&pwm_beeper_driver);
-}
-module_init(pwm_beeper_init);
-
-static void __exit pwm_beeper_exit(void)
-{
- platform_driver_unregister(&pwm_beeper_driver);
-}
-module_exit(pwm_beeper_exit);
+module_platform_driver(pwm_beeper_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PWM beeper driver");
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e2c7f622a0b..aeb02bcf723 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -100,19 +100,7 @@ static struct platform_driver rb532_button_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rb532_button_init(void)
-{
- return platform_driver_register(&rb532_button_driver);
-}
-
-static void __exit rb532_button_exit(void)
-{
- platform_driver_unregister(&rb532_button_driver);
-}
-
-module_init(rb532_button_init);
-module_exit(rb532_button_exit);
+module_platform_driver(rb532_button_driver);
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 2be21694fac..f07f784198b 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -284,19 +284,7 @@ static struct platform_driver rotary_encoder_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init rotary_encoder_init(void)
-{
- return platform_driver_register(&rotary_encoder_driver);
-}
-
-static void __exit rotary_encoder_exit(void)
-{
- platform_driver_unregister(&rotary_encoder_driver);
-}
-
-module_init(rotary_encoder_init);
-module_exit(rotary_encoder_exit);
+module_platform_driver(rotary_encoder_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DESCRIPTION("GPIO rotary encoder driver");
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 1a80c0dab83..5d9fd557119 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -164,17 +164,6 @@ static struct platform_driver sgi_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init sgi_buttons_init(void)
-{
- return platform_driver_register(&sgi_buttons_driver);
-}
-
-static void __exit sgi_buttons_exit(void)
-{
- platform_driver_unregister(&sgi_buttons_driver);
-}
+module_platform_driver(sgi_buttons_driver);
MODULE_LICENSE("GPL");
-module_init(sgi_buttons_init);
-module_exit(sgi_buttons_exit);
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 38e4b507b94..19a68828cd8 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -107,25 +107,14 @@ static int __exit twl4030_pwrbutton_remove(struct platform_device *pdev)
}
static struct platform_driver twl4030_pwrbutton_driver = {
+ .probe = twl4030_pwrbutton_probe,
.remove = __exit_p(twl4030_pwrbutton_remove),
.driver = {
.name = "twl4030_pwrbutton",
.owner = THIS_MODULE,
},
};
-
-static int __init twl4030_pwrbutton_init(void)
-{
- return platform_driver_probe(&twl4030_pwrbutton_driver,
- twl4030_pwrbutton_probe);
-}
-module_init(twl4030_pwrbutton_init);
-
-static void __exit twl4030_pwrbutton_exit(void)
-{
- platform_driver_unregister(&twl4030_pwrbutton_driver);
-}
-module_exit(twl4030_pwrbutton_exit);
+module_platform_driver(twl4030_pwrbutton_driver);
MODULE_ALIAS("platform:twl4030_pwrbutton");
MODULE_DESCRIPTION("Triton2 Power Button");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 3c1a432c14d..37651373a95 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -278,21 +278,9 @@ static struct platform_driver twl4030_vibra_driver = {
#endif
},
};
-
-static int __init twl4030_vibra_init(void)
-{
- return platform_driver_register(&twl4030_vibra_driver);
-}
-module_init(twl4030_vibra_init);
-
-static void __exit twl4030_vibra_exit(void)
-{
- platform_driver_unregister(&twl4030_vibra_driver);
-}
-module_exit(twl4030_vibra_exit);
+module_platform_driver(twl4030_vibra_driver);
MODULE_ALIAS("platform:twl4030-vibra");
-
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ad153a417ee..45874fed523 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -410,18 +410,7 @@ static struct platform_driver twl6040_vibra_driver = {
.pm = &twl6040_vibra_pm_ops,
},
};
-
-static int __init twl6040_vibra_init(void)
-{
- return platform_driver_register(&twl6040_vibra_driver);
-}
-module_init(twl6040_vibra_init);
-
-static void __exit twl6040_vibra_exit(void)
-{
- platform_driver_unregister(&twl6040_vibra_driver);
-}
-module_exit(twl6040_vibra_exit);
+module_platform_driver(twl6040_vibra_driver);
MODULE_ALIAS("platform:twl6040-vibra");
MODULE_DESCRIPTION("TWL6040 Vibra driver");
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index c3d7ba5f5b4..47f18d6bce4 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -145,18 +145,7 @@ static struct platform_driver wm831x_on_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init wm831x_on_init(void)
-{
- return platform_driver_register(&wm831x_on_driver);
-}
-module_init(wm831x_on_init);
-
-static void __exit wm831x_on_exit(void)
-{
- platform_driver_unregister(&wm831x_on_driver);
-}
-module_exit(wm831x_on_exit);
+module_platform_driver(wm831x_on_driver);
MODULE_ALIAS("platform:wm831x-on");
MODULE_DESCRIPTION("WM831x ON pin");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index ad2e51c04db..02ca8680ea5 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -361,15 +361,12 @@ static const struct xenbus_device_id xenkbd_ids[] = {
{ "" }
};
-static struct xenbus_driver xenkbd_driver = {
- .name = "vkbd",
- .owner = THIS_MODULE,
- .ids = xenkbd_ids,
+static DEFINE_XENBUS_DRIVER(xenkbd, ,
.probe = xenkbd_probe,
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
-};
+);
static int __init xenkbd_init(void)
{
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 41201c6b5e6..f4776e7f8c1 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -988,22 +988,7 @@ static struct usb_driver yealink_driver = {
.id_table = usb_table,
};
-static int __init yealink_dev_init(void)
-{
- int ret = usb_register(&yealink_driver);
- if (ret == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return ret;
-}
-
-static void __exit yealink_dev_exit(void)
-{
- usb_deregister(&yealink_driver);
-}
-
-module_init(yealink_dev_init);
-module_exit(yealink_dev_exit);
+module_usb_driver(yealink_driver);
MODULE_DEVICE_TABLE (usb, usb_table);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 003587c71f4..bd87380bd87 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -17,13 +17,63 @@
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include "psmouse.h"
#include "alps.h"
-#define ALPS_OLDPROTO 0x01 /* old style input */
+/*
+ * Definitions for ALPS version 3 and 4 command mode protocol
+ */
+#define ALPS_V3_X_MAX 2000
+#define ALPS_V3_Y_MAX 1400
+
+#define ALPS_BITMAP_X_BITS 15
+#define ALPS_BITMAP_Y_BITS 11
+
+#define ALPS_CMD_NIBBLE_10 0x01f2
+
+static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
+ { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
+ { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -35,30 +85,33 @@
6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
- { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
- { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
- { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 }, /* HP ze1115 */
- { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
- { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
- { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
- { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
- { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
- { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
- { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
- { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
- { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
+ { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
+ { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */
+ { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */
+ { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
+ { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
+ { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
+ { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
+ { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
+ { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
+ { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
+ { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
+ { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
- { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
- { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
};
/*
@@ -67,42 +120,7 @@ static const struct alps_model_info alps_model_data[] = {
* isn't valid per PS/2 spec.
*/
-/*
- * PS/2 packet format
- *
- * byte 0: 0 0 YSGN XSGN 1 M R L
- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- *
- * Note that the device never signals overflow condition.
- *
- * ALPS absolute Mode - new format
- *
- * byte 0: 1 ? ? ? 1 ? ? ?
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
- * byte 3: 0 y9 y8 y7 1 M R L
- * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * Dualpoint device -- interleaved packet format
- *
- * byte 0: 1 1 0 0 1 1 1 1
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 0 fin ges
- * byte 3: 0 0 YSGN XSGN 1 1 1 1
- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- * byte 6: 0 y9 y8 y7 1 m r l
- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * CAPITALS = stick, miniscules = touchpad
- *
- * ?'s can have different meanings on different models,
- * such as wheel rotation, extra buttons, stick buttons
- * on a dualpoint, etc.
- */
+/* Packet formats are described in Documentation/input/alps.txt */
static bool alps_is_valid_first_byte(const struct alps_model_info *model,
unsigned char data)
@@ -137,7 +155,7 @@ static void alps_report_buttons(struct psmouse *psmouse,
input_sync(dev2);
}
-static void alps_process_packet(struct psmouse *psmouse)
+static void alps_process_packet_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
@@ -147,7 +165,7 @@ static void alps_process_packet(struct psmouse *psmouse)
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if (model->flags & ALPS_OLDPROTO) {
+ if (model->proto_version == ALPS_PROTO_V1) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
middle = 0;
@@ -239,6 +257,403 @@ static void alps_process_packet(struct psmouse *psmouse)
input_sync(dev);
}
+/*
+ * Process bitmap data from v3 and v4 protocols. Returns the number of
+ * fingers detected. A return value of 0 means at least one of the
+ * bitmaps was empty.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of all contacts.
+ * These points are returned in x1, y1, x2, and y2 when the return value
+ * is greater than 0.
+ */
+static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+ int *x1, int *y1, int *x2, int *y2)
+{
+ struct alps_bitmap_point {
+ int start_bit;
+ int num_bits;
+ };
+
+ int fingers_x = 0, fingers_y = 0, fingers;
+ int i, bit, prev_bit;
+ struct alps_bitmap_point x_low = {0,}, x_high = {0,};
+ struct alps_bitmap_point y_low = {0,}, y_high = {0,};
+ struct alps_bitmap_point *point;
+
+ if (!x_map || !y_map)
+ return 0;
+
+ *x1 = *y1 = *x2 = *y2 = 0;
+
+ prev_bit = 0;
+ point = &x_low;
+ for (i = 0; x_map != 0; i++, x_map >>= 1) {
+ bit = x_map & 1;
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_x++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &x_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * y bitmap is reversed for what we need (lower positions are in
+ * higher bits), so we process from the top end.
+ */
+ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+ prev_bit = 0;
+ point = &y_low;
+ for (i = 0; y_map != 0; i++, y_map <<= 1) {
+ bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_y++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &y_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * Fingers can overlap, so we use the maximum count of fingers
+ * on either axis as the finger count.
+ */
+ fingers = max(fingers_x, fingers_y);
+
+ /*
+ * If total fingers is > 1 but either axis reports only a single
+ * contact, we have overlapping or adjacent fingers. For the
+ * purposes of creating a bounding box, divide the single contact
+ * (roughly) equally between the two points.
+ */
+ if (fingers > 1) {
+ if (fingers_x == 1) {
+ i = x_low.num_bits / 2;
+ x_low.num_bits = x_low.num_bits - i;
+ x_high.start_bit = x_low.start_bit + i;
+ x_high.num_bits = max(i, 1);
+ } else if (fingers_y == 1) {
+ i = y_low.num_bits / 2;
+ y_low.num_bits = y_low.num_bits - i;
+ y_high.start_bit = y_low.start_bit + i;
+ y_high.num_bits = max(i, 1);
+ }
+ }
+
+ *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+
+ if (fingers > 1) {
+ *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+ }
+
+ return fingers;
+}
+
+static void alps_set_slot(struct input_dev *dev, int slot, bool active,
+ int x, int y)
+{
+ input_mt_slot(dev, slot);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+ if (active) {
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ }
+}
+
+static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
+ int x1, int y1, int x2, int y2)
+{
+ alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
+ alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
+}
+
+static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = priv->dev2;
+ int x, y, z, left, right, middle;
+
+ /* Sanity check packet */
+ if (!(packet[0] & 0x40)) {
+ psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
+ return;
+ }
+
+ /*
+ * There's a special packet that seems to indicate the end
+ * of a stream of trackstick data. Filter these out.
+ */
+ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f)
+ return;
+
+ x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
+ y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
+ z = (packet[4] & 0x7c) >> 2;
+
+ /*
+ * The x and y values tend to be quite large, and when used
+ * alone the trackstick is difficult to use. Scale them down
+ * to compensate.
+ */
+ x /= 8;
+ y /= 8;
+
+ input_report_rel(dev, REL_X, x);
+ input_report_rel(dev, REL_Y, -y);
+
+ /*
+ * Most ALPS models report the trackstick buttons in the touchpad
+ * packets, but a few report them here. No reliable way has been
+ * found to differentiate between the models upfront, so we enable
+ * the quirk in response to seeing a button press in the trackstick
+ * packet.
+ */
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) &&
+ (left || right || middle))
+ priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS;
+
+ if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) {
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+ }
+
+ input_sync(dev);
+ return;
+}
+
+static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
+ int x, y, z;
+ int left, right, middle;
+ int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ int fingers = 0, bmap_fingers;
+ unsigned int x_bitmap, y_bitmap;
+
+ /*
+ * There's no single feature of touchpad position and bitmap packets
+ * that can be used to distinguish between them. We rely on the fact
+ * that a bitmap packet should always follow a position packet with
+ * bit 6 of packet[4] set.
+ */
+ if (priv->multi_packet) {
+ /*
+ * Sometimes a position packet will indicate a multi-packet
+ * sequence, but then what follows is another position
+ * packet. Check for this, and when it happens process the
+ * position packet as usual.
+ */
+ if (packet[0] & 0x40) {
+ fingers = (packet[5] & 0x3) + 1;
+ x_bitmap = ((packet[4] & 0x7e) << 8) |
+ ((packet[1] & 0x7f) << 2) |
+ ((packet[0] & 0x30) >> 4);
+ y_bitmap = ((packet[3] & 0x70) << 4) |
+ ((packet[2] & 0x7f) << 1) |
+ (packet[4] & 0x01);
+
+ bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ &x1, &y1, &x2, &y2);
+
+ /*
+ * We shouldn't report more than one finger if
+ * we don't have two coordinates.
+ */
+ if (fingers > 1 && bmap_fingers < 2)
+ fingers = bmap_fingers;
+
+ /* Now process position packet */
+ packet = priv->multi_data;
+ } else {
+ priv->multi_packet = 0;
+ }
+ }
+
+ /*
+ * Bit 6 of byte 0 is not usually set in position packets. The only
+ * times it seems to be set is in situations where the data is
+ * suspect anyway, e.g. a palm resting flat on the touchpad. Given
+ * this combined with the fact that this bit is useful for filtering
+ * out misidentified bitmap packets, we reject anything with this
+ * bit set.
+ */
+ if (packet[0] & 0x40)
+ return;
+
+ if (!priv->multi_packet && (packet[4] & 0x40)) {
+ priv->multi_packet = 1;
+ memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
+ return;
+ }
+
+ priv->multi_packet = 0;
+
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ /*
+ * Sometimes the hardware sends a single packet with z = 0
+ * in the middle of a stream. Real releases generate packets
+ * with x, y, and z all zero, so these seem to be flukes.
+ * Ignore them.
+ */
+ if (x && y && !z)
+ return;
+
+ /*
+ * If we don't have MT data or the bitmaps were empty, we have
+ * to rely on ST data.
+ */
+ if (!fingers) {
+ x1 = x;
+ y1 = y;
+ fingers = z > 0 ? 1 : 0;
+ }
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+ input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_sync(dev);
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
+ left = packet[3] & 0x10;
+ right = packet[3] & 0x20;
+ middle = packet[3] & 0x40;
+
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+ input_sync(dev2);
+ }
+}
+
+static void alps_process_packet_v3(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ /*
+ * v3 protocol packets come in three types, two representing
+ * touchpad data and one representing trackstick data.
+ * Trackstick packets seem to be distinguished by always
+ * having 0x3f in the last byte. This value has never been
+ * observed in the last byte of either of the other types
+ * of packets.
+ */
+ if (packet[5] == 0x3f) {
+ alps_process_trackstick_packet_v3(psmouse);
+ return;
+ }
+
+ alps_process_touchpad_packet_v3(psmouse);
+}
+
+static void alps_process_packet_v4(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ int x, y, z;
+ int left, right;
+
+ left = packet[4] & 0x01;
+ right = packet[4] & 0x02;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_report_key(dev, BTN_TOOL_FINGER, z > 0);
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
+ input_sync(dev);
+}
+
+static void alps_process_packet(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ alps_process_packet_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ alps_process_packet_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ alps_process_packet_v4(psmouse);
+ break;
+ }
+}
+
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -344,7 +759,7 @@ static void alps_flush_packet(unsigned long data)
serio_pause_rx(psmouse->ps2dev.serio);
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
/*
* We did not any more data in reasonable amount of time.
@@ -395,8 +810,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- /* Bytes 2 - 6 should have 0 in the highest bit */
- if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
+ /* Bytes 2 - pktsize should have 0 in the highest bit */
+ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
@@ -404,7 +819,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
alps_process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
@@ -412,11 +827,127 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_GOOD_DATA;
}
+static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int command;
+ unsigned char *param;
+ unsigned char dummy[4];
+
+ BUG_ON(nibble > 0xf);
+
+ command = priv->nibble_commands[nibble].command;
+ param = (command & 0x0f00) ?
+ dummy : (unsigned char *)&priv->nibble_commands[nibble].data;
+
+ if (ps2_command(ps2dev, param, command))
+ return -1;
+
+ return 0;
+}
+
+static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int i, nibble;
+
+ if (ps2_command(ps2dev, NULL, priv->addr_command))
+ return -1;
+
+ for (i = 12; i >= 0; i -= 4) {
+ nibble = (addr >> i) & 0xf;
+ if (alps_command_mode_send_nibble(psmouse, nibble))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ /*
+ * The address being read is returned in the first two bytes
+ * of the result. Check that this address matches the expected
+ * address.
+ */
+ if (addr != ((param[0] << 8) | param[1]))
+ return -1;
+
+ return param[2];
+}
+
+static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_read_reg(psmouse, addr);
+}
+
+static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value)
+{
+ if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf))
+ return -1;
+ if (alps_command_mode_send_nibble(psmouse, value & 0xf))
+ return -1;
+ return 0;
+}
+
+static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
+ u8 value)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_write_reg(psmouse, value);
+}
+
+static int alps_enter_command_mode(struct psmouse *psmouse,
+ unsigned char *resp)
+{
+ unsigned char param[4];
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "failed to enter command mode\n");
+ return -1;
+ }
+
+ if (param[0] != 0x88 && param[1] != 0x07) {
+ psmouse_dbg(psmouse,
+ "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+ return -1;
+ }
+
+ if (resp)
+ *resp = param[2];
+ return 0;
+}
+
+static inline int alps_exit_command_mode(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM))
+ return -1;
+ return 0;
+}
+
static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
unsigned char param[4];
+ const struct alps_model_info *model = NULL;
int i;
/*
@@ -464,12 +995,41 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
*version = (param[0] << 8) | (param[1] << 4) | i;
}
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++)
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
if (!memcmp(param, alps_model_data[i].signature,
- sizeof(alps_model_data[i].signature)))
- return alps_model_data + i;
+ sizeof(alps_model_data[i].signature))) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
- return NULL;
+ if (model && model->proto_version > ALPS_PROTO_V2) {
+ /*
+ * Need to check command mode response to identify
+ * model
+ */
+ model = NULL;
+ if (alps_enter_command_mode(psmouse, param)) {
+ psmouse_warn(psmouse,
+ "touchpad failed to enter command mode\n");
+ } else {
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
+ alps_model_data[i].command_mode_resp == param[0]) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
+ alps_exit_command_mode(psmouse);
+
+ if (!model)
+ psmouse_dbg(psmouse,
+ "Unknown command mode response %2.2x\n",
+ param[0]);
+ }
+ }
+
+ return model;
}
/*
@@ -477,7 +1037,7 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
* subsequent commands. It looks like glidepad is behind stickpointer,
* I'd thought it would be other way around...
*/
-static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
@@ -494,7 +1054,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
return 0;
}
-static int alps_absolute_mode(struct psmouse *psmouse)
+static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
@@ -565,17 +1125,17 @@ static int alps_tap_mode(struct psmouse *psmouse, int enable)
static int alps_poll(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char buf[6];
+ unsigned char buf[sizeof(psmouse->packet)];
bool poll_failed;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, true);
+ alps_passthrough_mode_v2(psmouse, true);
poll_failed = ps2_command(&psmouse->ps2dev, buf,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, false);
+ alps_passthrough_mode_v2(psmouse, false);
if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
return -1;
@@ -592,13 +1152,13 @@ static int alps_poll(struct psmouse *psmouse)
return 0;
}
-static int alps_hw_init(struct psmouse *psmouse)
+static int alps_hw_init_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, true)) {
+ alps_passthrough_mode_v2(psmouse, true)) {
return -1;
}
@@ -607,13 +1167,13 @@ static int alps_hw_init(struct psmouse *psmouse)
return -1;
}
- if (alps_absolute_mode(psmouse)) {
+ if (alps_absolute_mode_v1_v2(psmouse)) {
psmouse_err(psmouse, "Failed to enable absolute mode\n");
return -1;
}
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, false)) {
+ alps_passthrough_mode_v2(psmouse, false)) {
return -1;
}
@@ -626,6 +1186,297 @@ static int alps_hw_init(struct psmouse *psmouse)
return 0;
}
+/*
+ * Enable or disable passthrough mode to the trackstick. Must be in
+ * command mode when calling this function.
+ */
+static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ return -1;
+
+ if (enable)
+ reg_val |= 0x01;
+ else
+ reg_val &= ~0x01;
+
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v3(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x06;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ /* Check for trackstick */
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ goto error;
+ if (reg_val & 0x80) {
+ if (alps_passthrough_mode_v3(psmouse, true))
+ goto error;
+ if (alps_exit_command_mode(psmouse))
+ goto error;
+
+ /*
+ * E7 report for the trackstick
+ *
+ * There have been reports of failures to seem to trace back
+ * to the above trackstick check failing. When these occur
+ * this E7 report fails, so when that happens we continue
+ * with the assumption that there isn't a trackstick after
+ * all.
+ */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_warn(psmouse, "trackstick E7 report failed\n");
+ } else {
+ psmouse_dbg(psmouse,
+ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+
+ /*
+ * Not sure what this does, but it is absolutely
+ * essential. Without it, the touchpad does not
+ * work at all and the trackstick just emits normal
+ * PS/2 packets.
+ */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ alps_command_mode_send_nibble(psmouse, 0x9) ||
+ alps_command_mode_send_nibble(psmouse, 0x4)) {
+ psmouse_err(psmouse,
+ "Error sending magic E6 sequence\n");
+ goto error_passthrough;
+ }
+ }
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error_passthrough;
+ if (alps_passthrough_mode_v3(psmouse, false))
+ goto error;
+ }
+
+ if (alps_absolute_mode_v3(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0006);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0007);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0144) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x04))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0159) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0163) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0162) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
+ goto error;
+
+ /*
+ * This ensures the trackstick packets are in the format
+ * supported by this driver. If bit 1 isn't set the packet
+ * format is different.
+ */
+ if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error_passthrough:
+ /* Something failed while in passthrough mode, so try to get out */
+ if (!alps_enter_command_mode(psmouse, NULL))
+ alps_passthrough_mode_v3(psmouse, false);
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v4(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x02;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v4(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v4_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_DISABLE;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ if (alps_absolute_mode_v4(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /*
+ * This sequence changes the output from a 9-byte to an
+ * 8-byte format. All the same data seems to be present,
+ * just in a more compact format.
+ */
+ param[0] = 0xc8;
+ param[1] = 0x64;
+ param[2] = 0x50;
+ if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
+ return -1;
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+static int alps_hw_init(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+ int ret = -1;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ ret = alps_hw_init_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ ret = alps_hw_init_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ ret = alps_hw_init_v4(psmouse);
+ break;
+ }
+
+ return ret;
+}
+
static int alps_reconnect(struct psmouse *psmouse)
{
const struct alps_model_info *model;
@@ -666,6 +1517,8 @@ int alps_init(struct psmouse *psmouse)
psmouse->private = priv;
+ psmouse_reset(psmouse);
+
model = alps_get_model(psmouse, &version);
if (!model)
goto init_fail;
@@ -693,8 +1546,29 @@ int alps_init(struct psmouse *psmouse)
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+ break;
+ case ALPS_PROTO_V3:
+ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, 2);
+ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+
+ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+ /* fall through */
+ case ALPS_PROTO_V4:
+ input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+ break;
+ }
+
input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
if (model->flags & ALPS_WHEEL) {
@@ -737,7 +1611,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->poll = alps_poll;
psmouse->disconnect = alps_disconnect;
psmouse->reconnect = alps_reconnect;
- psmouse->pktsize = 6;
+ psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 904ed8b3c8b..a00a4ab92a0 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,20 +12,39 @@
#ifndef _ALPS_H
#define _ALPS_H
+#define ALPS_PROTO_V1 0
+#define ALPS_PROTO_V2 1
+#define ALPS_PROTO_V3 2
+#define ALPS_PROTO_V4 3
+
struct alps_model_info {
unsigned char signature[3];
+ unsigned char command_mode_resp; /* v3/v4 only */
+ unsigned char proto_version;
unsigned char byte0, mask0;
unsigned char flags;
};
+struct alps_nibble_commands {
+ int command;
+ unsigned char data;
+};
+
struct alps_data {
struct input_dev *dev2; /* Relative device */
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
+ const struct alps_nibble_commands *nibble_commands;
+ int addr_command; /* Command to set register address */
int prev_fin; /* Finger bit from previous packet */
+ int multi_packet; /* Multi-packet data in progress */
+ unsigned char multi_data[6]; /* Saved multi-packet data */
+ u8 quirks;
struct timer_list timer;
};
+#define ALPS_QUIRK_TRACKSTICK_BUTTONS 1 /* trakcstick buttons in trackstick packet */
+
#ifdef CONFIG_MOUSE_PS2_ALPS
int alps_detect(struct psmouse *psmouse, bool set_properties);
int alps_init(struct psmouse *psmouse);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index ff5f61a0fd3..39be7b82c04 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -140,25 +140,13 @@ static int __exit amimouse_remove(struct platform_device *pdev)
}
static struct platform_driver amimouse_driver = {
+ .probe = amimouse_probe,
.remove = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
.owner = THIS_MODULE,
},
};
-
-static int __init amimouse_init(void)
-{
- return platform_driver_probe(&amimouse_driver, amimouse_probe);
-}
-
-module_init(amimouse_init);
-
-static void __exit amimouse_exit(void)
-{
- platform_driver_unregister(&amimouse_driver);
-}
-
-module_exit(amimouse_exit);
+module_platform_driver(amimouse_driver);
MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index b77f9991278..0acbc7d50d0 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -938,15 +938,4 @@ static struct usb_driver atp_driver = {
.id_table = atp_table,
};
-static int __init atp_init(void)
-{
- return usb_register(&atp_driver);
-}
-
-static void __exit atp_exit(void)
-{
- usb_deregister(&atp_driver);
-}
-
-module_init(atp_init);
-module_exit(atp_exit);
+module_usb_driver(atp_driver);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 5ec617e28f7..cf87f8b18e3 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -940,16 +940,4 @@ static struct usb_driver bcm5974_driver = {
.supports_autosuspend = 1,
};
-static int __init bcm5974_init(void)
-{
- return usb_register(&bcm5974_driver);
-}
-
-static void __exit bcm5974_exit(void)
-{
- usb_deregister(&bcm5974_driver);
-}
-
-module_init(bcm5974_init);
-module_exit(bcm5974_exit);
-
+module_usb_driver(bcm5974_driver);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e2a9867c19d..d2c0db159b1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -43,6 +43,24 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
}
/*
+ * V3 and later support this fast command
+ */
+static int elantech_send_cmd(struct psmouse *psmouse, unsigned char c,
+ unsigned char *param)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ ps2_command(ps2dev, NULL, c) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
* A retrying version of ps2_command
*/
static int elantech_ps2_command(struct psmouse *psmouse,
@@ -863,13 +881,13 @@ static int elantech_set_range(struct psmouse *psmouse,
i = (etd->fw_version > 0x020800 &&
etd->fw_version < 0x020900) ? 1 : 2;
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
fixed_dpi = param[1] & 0x10;
if (((etd->fw_version >> 16) == 0x14) && fixed_dpi) {
- if (synaptics_send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
return -1;
*x_max = (etd->capabilities[1] - i) * param[1] / 2;
@@ -888,7 +906,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 3:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -896,7 +914,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 4:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -913,6 +931,30 @@ static int elantech_set_range(struct psmouse *psmouse,
}
/*
+ * (value from firmware) * 10 + 790 = dpi
+ * we also have to convert dpi to dots/mm (*10/254 to avoid floating point)
+ */
+static unsigned int elantech_convert_res(unsigned int val)
+{
+ return (val * 10 + 790) * 10 / 254;
+}
+
+static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ unsigned int *x_res,
+ unsigned int *y_res)
+{
+ unsigned char param[3];
+
+ if (elantech_send_cmd(psmouse, ETP_RESOLUTION_QUERY, param))
+ return -1;
+
+ *x_res = elantech_convert_res(param[1] & 0x0f);
+ *y_res = elantech_convert_res((param[1] & 0xf0) >> 4);
+
+ return 0;
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -920,6 +962,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
+ unsigned int x_res = 0, y_res = 0;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -967,10 +1010,20 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
case 4:
+ if (elantech_get_resolution_v4(psmouse, &x_res, &y_res)) {
+ /*
+ * if query failed, print a warning and leave the values
+ * zero to resemble synaptics.c behavior.
+ */
+ psmouse_warn(psmouse, "couldn't query resolution data.\n");
+ }
+
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -983,6 +1036,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1031,16 +1086,13 @@ static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
struct elantech_data *etd = psmouse->private;
struct elantech_attr_data *attr = data;
unsigned char *reg = (unsigned char *) etd + attr->field_offset;
- unsigned long value;
+ unsigned char value;
int err;
- err = strict_strtoul(buf, 16, &value);
+ err = kstrtou8(buf, 16, &value);
if (err)
return err;
- if (value > 0xff)
- return -EINVAL;
-
/* Do we need to preserve some bits for version 2 hardware too? */
if (etd->hw_version == 1) {
if (attr->reg == 0x10)
@@ -1233,9 +1285,11 @@ static int elantech_set_properties(struct elantech_data *etd)
}
}
- /*
- * Turn on packet checking by default.
- */
+ /* decide which send_cmd we're gonna use early */
+ etd->send_cmd = etd->hw_version >= 3 ? elantech_send_cmd :
+ synaptics_send_cmd;
+
+ /* Turn on packet checking by default */
etd->paritycheck = 1;
/*
@@ -1291,7 +1345,7 @@ int elantech_init(struct psmouse *psmouse)
"assuming hardware version %d (with firmware version 0x%02x%02x%02x)\n",
etd->hw_version, param[0], param[1], param[2]);
- if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
+ if (etd->send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
etd->capabilities)) {
psmouse_err(psmouse, "failed to query capabilities.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 9e5f1aabea7..46db3be45ac 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -20,6 +20,7 @@
#define ETP_FW_VERSION_QUERY 0x01
#define ETP_CAPABILITIES_QUERY 0x02
#define ETP_SAMPLE_QUERY 0x03
+#define ETP_RESOLUTION_QUERY 0x04
/*
* Command values for register reading or writing
@@ -135,6 +136,7 @@ struct elantech_data {
unsigned int width;
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
};
#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 58902fbb989..a9ad8e1402b 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -178,18 +178,7 @@ static struct platform_driver gpio_mouse_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init gpio_mouse_init(void)
-{
- return platform_driver_register(&gpio_mouse_device_driver);
-}
-module_init(gpio_mouse_init);
-
-static void __exit gpio_mouse_exit(void)
-{
- platform_driver_unregister(&gpio_mouse_device_driver);
-}
-module_exit(gpio_mouse_exit);
+module_platform_driver(gpio_mouse_device_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 0470dd46b56..1c5d521de60 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -789,11 +789,14 @@ static ssize_t hgpk_set_powered(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (value != priv->powered) {
@@ -881,11 +884,14 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value != 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 1)
return -EINVAL;
/*
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index faac2c3bef7..84de2fc6acc 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -155,9 +155,14 @@ static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse,
static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
ps2pp_set_smartscroll(psmouse, value);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9f352fbd7b4..de7e8bc17b1 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -127,7 +127,7 @@ struct psmouse_protocol {
* relevant events to the input module once full packet has arrived.
*/
-static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
@@ -418,6 +418,49 @@ int psmouse_reset(struct psmouse *psmouse)
return 0;
}
+/*
+ * Here we set the mouse resolution.
+ */
+
+void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+ static const unsigned char params[] = { 0, 1, 2, 2, 3 };
+ unsigned char p;
+
+ if (resolution == 0 || resolution > 200)
+ resolution = 200;
+
+ p = params[resolution / 50];
+ ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
+ psmouse->resolution = 25 << p;
+}
+
+/*
+ * Here we set the mouse report rate.
+ */
+
+static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
+ unsigned char r;
+ int i = 0;
+
+ while (rates[i] > rate) i++;
+ r = rates[i];
+ ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
+ psmouse->rate = r;
+}
+
+/*
+ * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+ return ps2_command(&psmouse->ps2dev, psmouse->packet,
+ PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
/*
* Genius NetMouse magic init.
@@ -603,6 +646,56 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
}
/*
+ * Apply default settings to the psmouse structure. Most of them will
+ * be overridden by individual protocol initialization routines.
+ */
+
+static void psmouse_apply_defaults(struct psmouse *psmouse)
+{
+ struct input_dev *input_dev = psmouse->dev;
+
+ memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
+ memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
+ memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
+ memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
+ memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_REL, input_dev->evbit);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+
+ psmouse->set_rate = psmouse_set_rate;
+ psmouse->set_resolution = psmouse_set_resolution;
+ psmouse->poll = psmouse_poll;
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ psmouse->reconnect = NULL;
+ psmouse->disconnect = NULL;
+ psmouse->cleanup = NULL;
+ psmouse->pt_activate = NULL;
+ psmouse->pt_deactivate = NULL;
+}
+
+/*
+ * Apply default settings to the psmouse structure and call specified
+ * protocol detection or initialization routine.
+ */
+static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
+ bool set_properties),
+ struct psmouse *psmouse, bool set_properties)
+{
+ if (set_properties)
+ psmouse_apply_defaults(psmouse);
+
+ return detect(psmouse, set_properties);
+}
+
+/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
*/
@@ -616,7 +709,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
- if (lifebook_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
@@ -628,15 +721,18 @@ static int psmouse_extensions(struct psmouse *psmouse,
* upsets the thinkingmouse).
*/
- if (max_proto > PSMOUSE_IMEX && thinking_detect(psmouse, set_properties) == 0)
+ if (max_proto > PSMOUSE_IMEX &&
+ psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
return PSMOUSE_THINKPS;
+ }
/*
* Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
* support is disabled in config - we need to know if it is synaptics so we
* can reset it properly after probing for intellimouse.
*/
- if (max_proto > PSMOUSE_PS2 && synaptics_detect(psmouse, set_properties) == 0) {
+ if (max_proto > PSMOUSE_PS2 &&
+ psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
@@ -667,7 +763,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
*/
if (max_proto > PSMOUSE_IMEX) {
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
- if (alps_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(alps_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || alps_init(psmouse) == 0)
return PSMOUSE_ALPS;
/*
@@ -681,7 +778,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try OLPC HGPK touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- hgpk_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
if (!set_properties || hgpk_init(psmouse) == 0)
return PSMOUSE_HGPK;
/*
@@ -694,7 +791,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try Elantech touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- elantech_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
if (!set_properties || elantech_init(psmouse) == 0)
return PSMOUSE_ELANTECH;
/*
@@ -703,18 +800,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
max_proto = PSMOUSE_IMEX;
}
-
if (max_proto > PSMOUSE_IMEX) {
- if (genius_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(genius_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_GENPS;
- if (ps2pp_init(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(ps2pp_init,
+ psmouse, set_properties) == 0)
return PSMOUSE_PS2PP;
- if (trackpoint_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(trackpoint_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TRACKPOINT;
- if (touchkit_ps2_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(touchkit_ps2_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
@@ -723,7 +823,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Trackpoint devices (causing TP_READ_ID command to time out).
*/
if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(fsp_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || fsp_init(psmouse) == 0)
return PSMOUSE_FSP;
/*
@@ -741,17 +842,23 @@ static int psmouse_extensions(struct psmouse *psmouse,
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
- if (max_proto >= PSMOUSE_IMEX && im_explorer_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMEX &&
+ psmouse_do_detect(im_explorer_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMEX;
+ }
- if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMPS &&
+ psmouse_do_detect(intellimouse_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMPS;
+ }
/*
* Okay, all failed, we have a standard mouse here. The number of the buttons
* is still a question, though. We assume 3.
*/
- ps2bare_detect(psmouse, set_properties);
+ psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
if (synaptics_hardware) {
/*
@@ -819,6 +926,13 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.detect = synaptics_detect,
.init = synaptics_init,
},
+ {
+ .type = PSMOUSE_SYNAPTICS_RELATIVE,
+ .name = "SynRelPS/2",
+ .alias = "synaptics-relative",
+ .detect = synaptics_detect,
+ .init = synaptics_init_relative,
+ },
#endif
#ifdef CONFIG_MOUSE_PS2_ALPS
{
@@ -958,39 +1072,6 @@ static int psmouse_probe(struct psmouse *psmouse)
}
/*
- * Here we set the mouse resolution.
- */
-
-void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
-{
- static const unsigned char params[] = { 0, 1, 2, 2, 3 };
- unsigned char p;
-
- if (resolution == 0 || resolution > 200)
- resolution = 200;
-
- p = params[resolution / 50];
- ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
- psmouse->resolution = 25 << p;
-}
-
-/*
- * Here we set the mouse report rate.
- */
-
-static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
-{
- static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
- unsigned char r;
- int i = 0;
-
- while (rates[i] > rate) i++;
- r = rates[i];
- ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
- psmouse->rate = r;
-}
-
-/*
* psmouse_initialize() initializes the mouse to a sane state.
*/
@@ -1035,16 +1116,6 @@ static void psmouse_deactivate(struct psmouse *psmouse)
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
}
-/*
- * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
- */
-
-static int psmouse_poll(struct psmouse *psmouse)
-{
- return ps2_command(&psmouse->ps2dev, psmouse->packet,
- PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
-}
-
/*
* psmouse_resync() attempts to re-validate current protocol.
@@ -1245,18 +1316,9 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] =
- BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
- input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-
- psmouse->set_rate = psmouse_set_rate;
- psmouse->set_resolution = psmouse_set_resolution;
- psmouse->poll = psmouse_poll;
- psmouse->protocol_handler = psmouse_process_byte;
- psmouse->pktsize = 3;
-
if (proto && (proto->detect || proto->init)) {
+ psmouse_apply_defaults(psmouse);
+
if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
@@ -1558,13 +1620,12 @@ static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char
static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
- unsigned long value;
-
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ unsigned int value;
+ int err;
- if ((unsigned int)value != value)
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
@@ -1671,10 +1732,12 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_rate(psmouse, value);
return count;
@@ -1682,10 +1745,12 @@ static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const
static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_resolution(psmouse, value);
return count;
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 9b84b0c4e37..6a417092d01 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -8,6 +8,7 @@
#define PSMOUSE_CMD_SETSTREAM 0x00ea
#define PSMOUSE_CMD_SETPOLL 0x00f0
#define PSMOUSE_CMD_POLL 0x00eb /* caller sets number of bytes to receive */
+#define PSMOUSE_CMD_RESET_WRAP 0x00ec
#define PSMOUSE_CMD_GETID 0x02f2
#define PSMOUSE_CMD_SETRATE 0x10f3
#define PSMOUSE_CMD_ENABLE 0x00f4
@@ -93,6 +94,7 @@ enum psmouse_type {
PSMOUSE_HGPK,
PSMOUSE_ELANTECH,
PSMOUSE_FSP,
+ PSMOUSE_SYNAPTICS_RELATIVE,
PSMOUSE_AUTO /* This one should always be last */
};
@@ -102,6 +104,7 @@ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
int psmouse_reset(struct psmouse *psmouse);
void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state);
void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse);
struct psmouse_attribute {
struct device_attribute dattr;
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index ee3b0ca9d59..a9e4bfdf31f 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -250,19 +250,7 @@ static struct platform_driver pxa930_trkball_driver = {
.probe = pxa930_trkball_probe,
.remove = __devexit_p(pxa930_trkball_remove),
};
-
-static int __init pxa930_trkball_init(void)
-{
- return platform_driver_register(&pxa930_trkball_driver);
-}
-
-static void __exit pxa930_trkball_exit(void)
-{
- platform_driver_unregister(&pxa930_trkball_driver);
-}
-
-module_init(pxa930_trkball_init);
-module_exit(pxa930_trkball_exit);
+module_platform_driver(pxa930_trkball_driver);
MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>");
MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver");
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index c5b12d2e955..e36847de761 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
/* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
};
int val;
- if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+ if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
return -EIO;
*btn = buttons[(val & 0x30) >> 4];
@@ -408,7 +408,7 @@ static int fsp_onpad_hscr(struct psmouse *psmouse, bool enable)
static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long reg, val;
+ int reg, val;
char *rest;
ssize_t retval;
@@ -416,7 +416,11 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
if (rest == buf || *rest != ' ' || reg > 0xff)
return -EINVAL;
- if (strict_strtoul(rest + 1, 16, &val) || val > 0xff)
+ retval = kstrtoint(rest + 1, 16, &val);
+ if (retval)
+ return retval;
+
+ if (val > 0xff)
return -EINVAL;
if (fsp_reg_write_enable(psmouse, true))
@@ -448,10 +452,13 @@ static ssize_t fsp_attr_set_getreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct fsp_data *pad = psmouse->private;
- unsigned long reg;
- int val;
+ int reg, val, err;
+
+ err = kstrtoint(buf, 16, &reg);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 16, &reg) || reg > 0xff)
+ if (reg > 0xff)
return -EINVAL;
if (fsp_reg_read(psmouse, reg, &val))
@@ -480,9 +487,13 @@ static ssize_t fsp_attr_show_pagereg(struct psmouse *psmouse,
static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ int val, err;
+
+ err = kstrtoint(buf, 16, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 16, &val) || val > 0xff)
+ if (val > 0xff)
return -EINVAL;
if (fsp_page_reg_write(psmouse, val))
@@ -505,9 +516,14 @@ static ssize_t fsp_attr_show_vscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_vscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val > 1)
return -EINVAL;
fsp_onpad_vscr(psmouse, val);
@@ -529,9 +545,14 @@ static ssize_t fsp_attr_show_hscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_hscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (val > 1)
return -EINVAL;
fsp_onpad_hscr(psmouse, val);
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index ed1395ac7b8..2e4af24f8c1 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
/* Finger-sensing Pad control registers */
#define FSP_REG_SYSCTL1 0x10
#define FSP_BIT_EN_REG_CLK BIT(5)
+#define FSP_REG_TMOD_STATUS 0x20
#define FSP_REG_OPC_QDOWN 0x31
#define FSP_BIT_EN_OPC_TAG BIT(7)
#define FSP_REG_OPTZ_XLO 0x34
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5d..8081a0a5d60 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -268,19 +269,49 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
return 0;
}
-static int synaptics_set_absolute_mode(struct psmouse *psmouse)
+static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
{
+ static unsigned char param = 0xc8;
struct synaptics_data *priv = psmouse->private;
- priv->mode = SYN_BIT_ABSOLUTE_MODE;
- if (SYN_ID_MAJOR(priv->identity) >= 4)
+ if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+ return 0;
+
+ if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
+ return -1;
+
+ if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ /* Advanced gesture mode also sends multi finger data */
+ priv->capabilities |= BIT(1);
+
+ return 0;
+}
+
+static int synaptics_set_mode(struct psmouse *psmouse)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ priv->mode = 0;
+ if (priv->absolute_mode)
+ priv->mode |= SYN_BIT_ABSOLUTE_MODE;
+ if (priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ if (psmouse->rate >= 80)
+ priv->mode |= SYN_BIT_HIGH_RATE;
if (SYN_CAP_EXTENDED(priv->capabilities))
priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
+ if (priv->absolute_mode &&
+ synaptics_set_advanced_gesture_mode(psmouse)) {
+ psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ return -1;
+ }
+
return 0;
}
@@ -299,26 +330,6 @@ static void synaptics_set_rate(struct psmouse *psmouse, unsigned int rate)
synaptics_mode_cmd(psmouse, priv->mode);
}
-static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
-{
- static unsigned char param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
-
- if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
- return 0;
-
- if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
- return -1;
- if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
- return -1;
-
- /* Advanced gesture mode also sends multi finger data */
- priv->capabilities |= BIT(1);
-
- return 0;
-}
-
/*****************************************************************************
* Synaptics pass-through PS/2 port support
****************************************************************************/
@@ -1142,8 +1153,24 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
{
int i;
+ /* Things that apply to both modes */
__set_bit(INPUT_PROP_POINTER, dev->propbit);
+ __set_bit(EV_KEY, dev->evbit);
+ __set_bit(BTN_LEFT, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+
+ if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
+ __set_bit(BTN_MIDDLE, dev->keybit);
+ if (!priv->absolute_mode) {
+ /* Relative mode */
+ __set_bit(EV_REL, dev->evbit);
+ __set_bit(REL_X, dev->relbit);
+ __set_bit(REL_Y, dev->relbit);
+ return;
+ }
+
+ /* Absolute mode */
__set_bit(EV_ABS, dev->evbit);
set_abs_position_params(dev, priv, ABS_X, ABS_Y);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
@@ -1169,20 +1196,14 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
if (SYN_CAP_PALMDETECT(priv->capabilities))
input_set_abs_params(dev, ABS_TOOL_WIDTH, 0, 15, 0, 0);
- __set_bit(EV_KEY, dev->evbit);
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- __set_bit(BTN_LEFT, dev->keybit);
- __set_bit(BTN_RIGHT, dev->keybit);
if (SYN_CAP_MULTIFINGER(priv->capabilities)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
- __set_bit(BTN_MIDDLE, dev->keybit);
-
if (SYN_CAP_FOUR_BUTTON(priv->capabilities) ||
SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
__set_bit(BTN_FORWARD, dev->keybit);
@@ -1204,10 +1225,58 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
}
}
+static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse,
+ void *data, char *buf)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ return sprintf(buf, "%c\n", priv->disable_gesture ? '1' : '0');
+}
+
+static ssize_t synaptics_set_disable_gesture(struct psmouse *psmouse,
+ void *data, const char *buf,
+ size_t len)
+{
+ struct synaptics_data *priv = psmouse->private;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
+ return -EINVAL;
+
+ if (value == priv->disable_gesture)
+ return len;
+
+ priv->disable_gesture = value;
+ if (value)
+ priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ else
+ priv->mode &= ~SYN_BIT_DISABLE_GESTURE;
+
+ if (synaptics_mode_cmd(psmouse, priv->mode))
+ return -EIO;
+
+ return len;
+}
+
+PSMOUSE_DEFINE_ATTR(disable_gesture, S_IWUSR | S_IRUGO, NULL,
+ synaptics_show_disable_gesture,
+ synaptics_set_disable_gesture);
+
static void synaptics_disconnect(struct psmouse *psmouse)
{
+ struct synaptics_data *priv = psmouse->private;
+
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ device_remove_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+
synaptics_reset(psmouse);
- kfree(psmouse->private);
+ kfree(priv);
psmouse->private = NULL;
}
@@ -1220,6 +1289,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
@@ -1234,17 +1313,11 @@ static int synaptics_reconnect(struct psmouse *psmouse)
return -1;
}
- if (synaptics_set_absolute_mode(psmouse)) {
+ if (synaptics_set_mode(psmouse)) {
psmouse_err(psmouse, "Unable to initialize device.\n");
return -1;
}
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse,
- "Advanced gesture mode reconnect failed.\n");
- return -1;
- }
-
if (old_priv.identity != priv->identity ||
old_priv.model_id != priv->model_id ||
old_priv.capabilities != priv->capabilities ||
@@ -1321,20 +1394,18 @@ void __init synaptics_module_init(void)
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
}
-int synaptics_init(struct psmouse *psmouse)
+static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
{
struct synaptics_data *priv;
+ int err = -1;
/*
- * The OLPC XO has issues with Synaptics' absolute mode; similarly to
- * the HGPK, it quickly degrades and the hardware becomes jumpy and
- * overly sensitive. Not only that, but the constant packet spew
- * (even at a lowered 40pps rate) overloads the EC such that key
- * presses on the keyboard are missed. Given all of that, don't
- * even attempt to use Synaptics mode. Relative mode seems to work
- * just fine.
+ * The OLPC XO has issues with Synaptics' absolute mode; the constant
+ * packet spew overloads the EC such that key presses on the keyboard
+ * are missed. Given that, don't even attempt to use Absolute mode.
+ * Relative mode seems to work just fine.
*/
- if (broken_olpc_ec) {
+ if (absolute_mode && broken_olpc_ec) {
psmouse_info(psmouse,
"OLPC XO detected, not enabling Synaptics protocol.\n");
return -ENODEV;
@@ -1351,13 +1422,12 @@ int synaptics_init(struct psmouse *psmouse)
goto init_fail;
}
- if (synaptics_set_absolute_mode(psmouse)) {
- psmouse_err(psmouse, "Unable to initialize device.\n");
- goto init_fail;
- }
+ priv->absolute_mode = absolute_mode;
+ if (SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ priv->disable_gesture = true;
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ if (synaptics_set_mode(psmouse)) {
+ psmouse_err(psmouse, "Unable to initialize device.\n");
goto init_fail;
}
@@ -1382,12 +1452,19 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->model = ((priv->model_id & 0x00ff0000) >> 8) |
(priv->model_id & 0x000000ff);
- psmouse->protocol_handler = synaptics_process_byte;
+ if (absolute_mode) {
+ psmouse->protocol_handler = synaptics_process_byte;
+ psmouse->pktsize = 6;
+ } else {
+ /* Relative mode follows standard PS/2 mouse protocol */
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ }
+
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
psmouse->cleanup = synaptics_reset;
- psmouse->pktsize = 6;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;
@@ -1406,11 +1483,32 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->rate = 40;
}
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity)) {
+ err = device_create_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+ if (err) {
+ psmouse_err(psmouse,
+ "Failed to create disable_gesture attribute (%d)",
+ err);
+ goto init_fail;
+ }
+ }
+
return 0;
init_fail:
kfree(priv);
- return -1;
+ return err;
+}
+
+int synaptics_init(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, true);
+}
+
+int synaptics_init_relative(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, false);
}
bool synaptics_supported(void)
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 622aea8dd7e..fd26ccca13d 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -100,6 +100,7 @@
#define SYN_ID_MINOR(i) (((i) >> 16) & 0xff)
#define SYN_ID_FULL(i) ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i))
#define SYN_ID_IS_SYNAPTICS(i) ((((i) >> 8) & 0xff) == 0x47)
+#define SYN_ID_DISGEST_SUPPORTED(i) (SYN_ID_MAJOR(i) >= 4)
/* synaptics special commands */
#define SYN_PS_SET_MODE2 0x14
@@ -159,6 +160,9 @@ struct synaptics_data {
unsigned char mode; /* current mode byte */
int scroll;
+ bool absolute_mode; /* run in Absolute mode */
+ bool disable_gesture; /* disable gestures */
+
struct serio *pt_port; /* Pass-through serio port */
struct synaptics_mt_state mt_state; /* Current mt finger state */
@@ -175,6 +179,7 @@ struct synaptics_data {
void synaptics_module_init(void);
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
+int synaptics_init_relative(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
bool synaptics_supported(void);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 54b2fa892e1..22b21801813 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -89,10 +89,12 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned char value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 255)
- return -EINVAL;
+ err = kstrtou8(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
trackpoint_write(&psmouse->ps2dev, attr->command, value);
@@ -115,9 +117,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ if (value > 1)
return -EINVAL;
if (attr->inverted)
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index d363dc4571a..35864c6130b 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -196,18 +196,7 @@ static struct platform_driver altera_ps2_driver = {
.of_match_table = altera_ps2_match,
},
};
-
-static int __init altera_ps2_init(void)
-{
- return platform_driver_register(&altera_ps2_driver);
-}
-module_init(altera_ps2_init);
-
-static void __exit altera_ps2_exit(void)
-{
- platform_driver_unregister(&altera_ps2_driver);
-}
-module_exit(altera_ps2_exit);
+module_platform_driver(altera_ps2_driver);
MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 12abc50508e..8407d5b0ced 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,6 +195,8 @@ static struct amba_id amba_kmi_idtable[] = {
{ 0, 0 }
};
+MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
+
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index 95280f9207e..421a7442e46 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -358,19 +358,7 @@ static struct platform_driver psif_driver = {
.suspend = psif_suspend,
.resume = psif_resume,
};
-
-static int __init psif_init(void)
-{
- return platform_driver_probe(&psif_driver, psif_probe);
-}
-
-static void __exit psif_exit(void)
-{
- platform_driver_unregister(&psif_driver);
-}
-
-module_init(psif_init);
-module_exit(psif_exit);
+module_platform_driver(psif_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Atmel AVR32 PSIF PS/2 driver");
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d37a48e099d..86564414b75 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(void)
+static void i8042_controller_reset(bool force_reset)
{
i8042_flush();
@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
* Reset the controller if requested.
*/
- if (i8042_reset)
+ if (i8042_reset || force_reset)
i8042_controller_selftest();
/*
@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
* upsetting it.
*/
-static int i8042_pm_reset(struct device *dev)
+static int i8042_pm_suspend(struct device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(true);
return 0;
}
@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
return 0;
}
+static int i8042_pm_reset(struct device *dev)
+{
+ i8042_controller_reset(false);
+
+ return 0;
+}
+
static int i8042_pm_restore(struct device *dev)
{
return i8042_controller_resume(false);
}
static const struct dev_pm_ops i8042_pm_ops = {
- .suspend = i8042_pm_reset,
+ .suspend = i8042_pm_suspend,
.resume = i8042_pm_resume,
.thaw = i8042_pm_thaw,
.poweroff = i8042_pm_reset,
@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
static void i8042_shutdown(struct platform_device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(false);
}
static int __init i8042_create_kbd_port(void)
@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
out_fail:
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return error;
@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return 0;
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 7ec3c97dc1b..8b44ddc8041 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -143,16 +143,4 @@ static struct platform_driver rpckbd_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rpckbd_init(void)
-{
- return platform_driver_register(&rpckbd_driver);
-}
-
-static void __exit rpckbd_exit(void)
-{
- platform_driver_unregister(&rpckbd_driver);
-}
-
-module_init(rpckbd_init);
-module_exit(rpckbd_exit);
+module_platform_driver(rpckbd_driver);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d64c5a43aaa..d96d4c2a76a 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -253,7 +253,7 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
+ if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -369,19 +369,7 @@ static struct platform_driver xps2_of_driver = {
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
-
-static int __init xps2_init(void)
-{
- return platform_driver_register(&xps2_of_driver);
-}
-
-static void __exit xps2_cleanup(void)
-{
- platform_driver_unregister(&xps2_of_driver);
-}
-
-module_init(xps2_init);
-module_exit(xps2_cleanup);
+module_platform_driver(xps2_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx XPS PS/2 driver");
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index d94f7e9aa99..f8b0b1df913 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -269,19 +269,4 @@ static struct usb_driver usb_acecad_driver = {
.id_table = usb_acecad_id_table,
};
-static int __init usb_acecad_init(void)
-{
- int result = usb_register(&usb_acecad_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit usb_acecad_exit(void)
-{
- usb_deregister(&usb_acecad_driver);
-}
-
-module_init(usb_acecad_init);
-module_exit(usb_acecad_exit);
+module_usb_driver(usb_acecad_driver);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 6d89fd1842c..205d16aab44 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1198,9 +1198,9 @@ static ssize_t
store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long x;
+ int x;
- if (strict_strtol(buf, 10, &x)) {
+ if (kstrtoint(buf, 10, &x)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1240,9 +1240,9 @@ static ssize_t
store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long y;
+ int y;
- if (strict_strtol(buf, 10, &y)) {
+ if (kstrtoint(buf, 10, &y)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1277,12 +1277,13 @@ static ssize_t
store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long j;
+ int err, j;
- if (strict_strtol(buf, 10, &j))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &j);
+ if (err)
+ return err;
- aiptek->newSetting.jitterDelay = (int)j;
+ aiptek->newSetting.jitterDelay = j;
return count;
}
@@ -1306,12 +1307,13 @@ static ssize_t
store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long d;
+ int err, d;
- if (strict_strtol(buf, 10, &d))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &d);
+ if (err)
+ return err;
- aiptek->newSetting.programmableDelay = (int)d;
+ aiptek->newSetting.programmableDelay = d;
return count;
}
@@ -1557,11 +1559,13 @@ static ssize_t
store_tabletWheel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long w;
+ int err, w;
- if (strict_strtol(buf, 10, &w)) return -EINVAL;
+ err = kstrtoint(buf, 10, &w);
+ if (err)
+ return err;
- aiptek->newSetting.wheel = (int)w;
+ aiptek->newSetting.wheel = w;
return count;
}
@@ -1919,21 +1923,7 @@ static struct usb_driver aiptek_driver = {
.id_table = aiptek_ids,
};
-static int __init aiptek_init(void)
-{
- int result = usb_register(&aiptek_driver);
- if (result == 0) {
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_AUTHOR "\n");
- }
- return result;
-}
-
-static void __exit aiptek_exit(void)
-{
- usb_deregister(&aiptek_driver);
-}
+module_usb_driver(aiptek_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
@@ -1943,6 +1933,3 @@ module_param(programmableDelay, int, 0);
MODULE_PARM_DESC(programmableDelay, "delay used during tablet programming");
module_param(jitterDelay, int, 0);
MODULE_PARM_DESC(jitterDelay, "stylus/mouse settlement delay");
-
-module_init(aiptek_init);
-module_exit(aiptek_exit);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 8ea6afe2e99..89a297801dc 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -1022,33 +1022,7 @@ static struct usb_driver gtco_driverinfo_table = {
.disconnect = gtco_disconnect,
};
-/*
- * Register this module with the USB subsystem
- */
-static int __init gtco_init(void)
-{
- int error;
-
- error = usb_register(&gtco_driverinfo_table);
- if (error) {
- err("usb_register() failed rc=0x%x", error);
- return error;
- }
-
- printk("GTCO usb driver version: %s", GTCO_VERSION);
- return 0;
-}
-
-/*
- * Deregister this module with the USB subsystem
- */
-static void __exit gtco_exit(void)
-{
- usb_deregister(&gtco_driverinfo_table);
-}
-
-module_init(gtco_init);
-module_exit(gtco_exit);
+module_usb_driver(gtco_driverinfo_table);
MODULE_DESCRIPTION("GTCO digitizer USB driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6504b627b23..b2db3cfe308 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -432,15 +432,4 @@ static struct usb_driver hanwang_driver = {
.id_table = hanwang_ids,
};
-static int __init hanwang_init(void)
-{
- return usb_register(&hanwang_driver);
-}
-
-static void __exit hanwang_exit(void)
-{
- usb_deregister(&hanwang_driver);
-}
-
-module_init(hanwang_init);
-module_exit(hanwang_exit);
+module_usb_driver(hanwang_driver);
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 290f4e57b58..85a5b40333a 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -198,22 +198,4 @@ static struct usb_driver kbtab_driver = {
.id_table = kbtab_ids,
};
-static int __init kbtab_init(void)
-{
- int retval;
- retval = usb_register(&kbtab_driver);
- if (retval)
- goto out;
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-out:
- return retval;
-}
-
-static void __exit kbtab_exit(void)
-{
- usb_deregister(&kbtab_driver);
-}
-
-module_init(kbtab_init);
-module_exit(kbtab_exit);
+module_usb_driver(kbtab_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 1c1b7b43cf9..2a97b7e76db 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -28,7 +28,9 @@
#define HID_USAGE_Y_TILT 0x3e
#define HID_USAGE_FINGER 0x22
#define HID_USAGE_STYLUS 0x20
-#define HID_COLLECTION 0xc0
+#define HID_COLLECTION 0xa1
+#define HID_COLLECTION_LOGICAL 0x02
+#define HID_COLLECTION_END 0xc0
enum {
WCM_UNDEFINED = 0,
@@ -66,7 +68,8 @@ static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
do {
retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_REPORT,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
(type << 8) + id,
intf->altsetting[0].desc.bInterfaceNumber,
buf, size, 100);
@@ -164,7 +167,70 @@ static void wacom_close(struct input_dev *dev)
usb_autopm_put_interface(wacom->intf);
}
-static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
+static int wacom_parse_logical_collection(unsigned char *report,
+ struct wacom_features *features)
+{
+ int length = 0;
+
+ if (features->type == BAMBOO_PT) {
+
+ /* Logical collection is only used by 3rd gen Bamboo Touch */
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+ features->device_type = BTN_TOOL_DOUBLETAP;
+
+ /*
+ * Stylus and Touch have same active area
+ * so compute physical size based on stylus
+ * data before its overwritten.
+ */
+ features->x_phy =
+ (features->x_max * features->x_resolution) / 100;
+ features->y_phy =
+ (features->y_max * features->y_resolution) / 100;
+
+ features->x_max = features->y_max =
+ get_unaligned_le16(&report[10]);
+
+ length = 11;
+ }
+ return length;
+}
+
+/*
+ * Interface Descriptor of wacom devices can be incomplete and
+ * inconsistent so wacom_features table is used to store stylus
+ * device's packet lengths, various maximum values, and tablet
+ * resolution based on product ID's.
+ *
+ * For devices that contain 2 interfaces, wacom_features table is
+ * inaccurate for the touch interface. Since the Interface Descriptor
+ * for touch interfaces has pretty complete data, this function exists
+ * to query tablet for this missing information instead of hard coding in
+ * an additional table.
+ *
+ * A typical Interface Descriptor for a stylus will contain a
+ * boot mouse application collection that is not of interest and this
+ * function will ignore it.
+ *
+ * It also contains a digitizer application collection that also is not
+ * of interest since any information it contains would be duplicate
+ * of what is in wacom_features. Usually it defines a report of an array
+ * of bytes that could be used as max length of the stylus packet returned.
+ * If it happens to define a Digitizer-Stylus Physical Collection then
+ * the X and Y logical values contain valid data but it is ignored.
+ *
+ * A typical Interface Descriptor for a touch interface will contain a
+ * Digitizer-Finger Physical Collection which will define both logical
+ * X/Y maximum as well as the physical size of tablet. Since touch
+ * interfaces haven't supported pressure or distance, this is enough
+ * information to override invalid values in the wacom_features table.
+ *
+ * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
+ * Collection. Instead they define a Logical Collection with a single
+ * Logical Maximum for both X and Y.
+ */
+static int wacom_parse_hid(struct usb_interface *intf,
+ struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -244,8 +310,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->x_max =
get_unaligned_le16(&report[i + 3]);
@@ -287,8 +351,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
@@ -302,6 +364,11 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
i++;
break;
+ /*
+ * Requiring Stylus Usage will ignore boot mouse
+ * X/Y values and some cases of invalid Digitizer X/Y
+ * values commonly reported.
+ */
case HID_USAGE_STYLUS:
pen = 1;
i++;
@@ -309,10 +376,20 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
}
break;
- case HID_COLLECTION:
+ case HID_COLLECTION_END:
/* reset UsagePage and Finger */
finger = usage = 0;
break;
+
+ case HID_COLLECTION:
+ i++;
+ switch (report[i]) {
+ case HID_COLLECTION_LOGICAL:
+ i += wacom_parse_logical_collection(&report[i],
+ features);
+ break;
+ }
+ break;
}
}
@@ -348,7 +425,8 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
WAC_HID_FEATURE_REPORT,
report_id, rep_data, 4, 1);
} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
- } else if (features->type != TABLETPC) {
+ } else if (features->type != TABLETPC &&
+ features->device_type == BTN_TOOL_PEN) {
do {
rep_data[0] = 2;
rep_data[1] = 2;
@@ -485,7 +563,8 @@ static int wacom_led_control(struct wacom *wacom)
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type == WACOM_21UX2)
+ if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+ wacom->wacom_wac.features.type == WACOM_24HD)
led = (wacom->led.select[1] << 4) | 0x40;
led |= wacom->led.select[0] | 0x4;
@@ -704,6 +783,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
wacom->led.select[0] = 0;
wacom->led.select[1] = 0;
@@ -738,6 +818,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
sysfs_remove_group(&wacom->intf->dev.kobj,
&cintiq_led_attr_group);
@@ -919,21 +1000,4 @@ static struct usb_driver wacom_driver = {
.supports_autosuspend = 1,
};
-static int __init wacom_init(void)
-{
- int result;
-
- result = usb_register(&wacom_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit wacom_exit(void)
-{
- usb_deregister(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
+module_usb_driver(wacom_driver);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e77..88672ec296c 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -452,7 +452,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- features->type == WACOM_21UX2) {
+ features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -519,6 +519,56 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == WACOM_24HD) {
+ input_report_key(input, BTN_0, (data[6] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x02));
+ input_report_key(input, BTN_2, (data[6] & 0x04));
+ input_report_key(input, BTN_3, (data[6] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x10));
+ input_report_key(input, BTN_5, (data[6] & 0x20));
+ input_report_key(input, BTN_6, (data[6] & 0x40));
+ input_report_key(input, BTN_7, (data[6] & 0x80));
+ input_report_key(input, BTN_8, (data[8] & 0x01));
+ input_report_key(input, BTN_9, (data[8] & 0x02));
+ input_report_key(input, BTN_A, (data[8] & 0x04));
+ input_report_key(input, BTN_B, (data[8] & 0x08));
+ input_report_key(input, BTN_C, (data[8] & 0x10));
+ input_report_key(input, BTN_X, (data[8] & 0x20));
+ input_report_key(input, BTN_Y, (data[8] & 0x40));
+ input_report_key(input, BTN_Z, (data[8] & 0x80));
+
+ /*
+ * Three "buttons" are available on the 24HD which are
+ * physically implemented as a touchstrip. Each button
+ * is approximately 3 bits wide with a 2 bit spacing.
+ * The raw touchstrip bits are stored at:
+ * ((data[3] & 0x1f) << 8) | data[4])
+ */
+ input_report_key(input, KEY_PROG1, data[4] & 0x07);
+ input_report_key(input, KEY_PROG2, data[4] & 0xE0);
+ input_report_key(input, KEY_PROG3, data[3] & 0x1C);
+
+ if (data[1] & 0x80) {
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
+ } else {
+ /* Out of proximity, clear wheel value. */
+ input_report_abs(input, ABS_WHEEL, 0);
+ }
+
+ if (data[2] & 0x80) {
+ input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f));
+ } else {
+ /* Out of proximity, clear second wheel value. */
+ input_report_abs(input, ABS_THROTTLE, 0);
+ }
+
+ if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else {
if (features->type == WACOM_21UX2) {
input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -799,6 +849,9 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
unsigned char *data = wacom->data;
int i;
+ if (data[0] != 0x02)
+ return 0;
+
for (i = 0; i < 2; i++) {
int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
bool touch = data[offset + 3] & 0x80;
@@ -837,18 +890,77 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
return 0;
}
+static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+ int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
+ bool touch = data[1] & 0x80;
+
+ touch = touch && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, slot_id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+ if (touch) {
+ int x = (data[2] << 4) | (data[4] >> 4);
+ int y = (data[3] << 4) | (data[4] & 0x0f);
+ int w = data[6];
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+ }
+}
+
+static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
+}
+
+static int wacom_bpt3_touch(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ unsigned char *data = wacom->data;
+ int count = data[1] & 0x03;
+ int i;
+
+ if (data[0] != 0x02)
+ return 0;
+
+ /* data has up to 7 fixed sized 8-byte messages starting at data[2] */
+ for (i = 0; i < count; i++) {
+ int offset = (8 * i) + 2;
+ int msg_id = data[offset];
+
+ if (msg_id >= 2 && msg_id <= 17)
+ wacom_bpt3_touch_msg(wacom, data + offset);
+ else if (msg_id == 128)
+ wacom_bpt3_button_msg(wacom, data + offset);
+
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ input_sync(input);
+
+ return 0;
+}
+
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
- /*
- * Similar to Graphire protocol, data[1] & 0x20 is proximity and
- * data[1] & 0x18 is tool ID. 0x30 is safety check to ignore
- * 2 unused tool ID's.
- */
- prox = (data[1] & 0x30) == 0x30;
+ if (data[0] != 0x02)
+ return 0;
+
+ prox = (data[1] & 0x20) == 0x20;
/*
* All reports shared between PEN and RUBBER tool must be
@@ -912,7 +1024,9 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
{
if (len == WACOM_PKGLEN_BBTOUCH)
return wacom_bpt_touch(wacom);
- else if (len == WACOM_PKGLEN_BBFUN)
+ else if (len == WACOM_PKGLEN_BBTOUCH3)
+ return wacom_bpt3_touch(wacom);
+ else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN)
return wacom_bpt_pen(wacom);
return 0;
@@ -955,6 +1069,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case CINTIQ:
case WACOM_BEE:
case WACOM_21UX2:
+ case WACOM_24HD:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1031,9 +1146,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->type == BAMBOO_PT)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
- /* quirks for bamboo touch */
+ /* quirk for bamboo touch with 2 low res touches */
if (features->type == BAMBOO_PT &&
- features->device_type == BTN_TOOL_DOUBLETAP) {
+ features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->x_max <<= 5;
features->y_max <<= 5;
features->x_fuzz <<= 5;
@@ -1110,6 +1225,26 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case WACOM_24HD:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+
+ for (i = 0; i < 10; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+ wacom_setup_cintiq(wacom_wac);
+ break;
+
case WACOM_21UX2:
__set_bit(BTN_A, input_dev->keybit);
__set_bit(BTN_B, input_dev->keybit);
@@ -1240,7 +1375,21 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- input_mt_init_slots(input_dev, 2);
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ __set_bit(BTN_TOOL_TRIPLETAP,
+ input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP,
+ input_dev->keybit);
+
+ input_mt_init_slots(input_dev, 16);
+
+ input_set_abs_params(input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ } else {
+ input_mt_init_slots(input_dev, 2);
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, features->x_max,
features->x_fuzz, 0);
@@ -1425,6 +1574,9 @@ static const struct wacom_features wacom_features_0xBB =
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xF4 =
+ { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1470,6 +1622,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1506,6 +1661,15 @@ static const struct wacom_features wacom_features_0xDA =
static struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDD =
+ { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDE =
+ { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDF =
+ { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1601,6 +1765,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xD8) },
{ USB_DEVICE_WACOM(0xDA) },
{ USB_DEVICE_WACOM(0xDB) },
+ { USB_DEVICE_WACOM(0xDD) },
+ { USB_DEVICE_WACOM(0xDE) },
+ { USB_DEVICE_WACOM(0xDF) },
{ USB_DEVICE_WACOM(0xF0) },
{ USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
@@ -1611,7 +1778,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
+ { USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 53eb71b6833..050acaefee7 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 32
+#define WACOM_PKGLEN_MAX 64
/* packet length for individual models */
#define WACOM_PKGLEN_PENPRTN 7
@@ -22,6 +22,8 @@
#define WACOM_PKGLEN_TPC1FG 5
#define WACOM_PKGLEN_TPC2FG 14
#define WACOM_PKGLEN_BBTOUCH 20
+#define WACOM_PKGLEN_BBTOUCH3 64
+#define WACOM_PKGLEN_BBPEN 10
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -57,6 +59,7 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ WACOM_24HD,
WACOM_21UX2,
CINTIQ,
WACOM_BEE,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index b3aebc2166b..05f30b73c3c 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -217,18 +217,7 @@ static struct platform_driver pm860x_touch_driver = {
.probe = pm860x_touch_probe,
.remove = __devexit_p(pm860x_touch_remove),
};
-
-static int __init pm860x_touch_init(void)
-{
- return platform_driver_register(&pm860x_touch_driver);
-}
-module_init(pm860x_touch_init);
-
-static void __exit pm860x_touch_exit(void)
-{
- platform_driver_unregister(&pm860x_touch_driver);
-}
-module_exit(pm860x_touch_exit);
+module_platform_driver(pm860x_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 3488ffe1fa0..4af2a18eb3b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -98,6 +98,19 @@ config TOUCHSCREEN_ATMEL_MXT
To compile this driver as a module, choose M here: the
module will be called atmel_mxt_ts.
+config TOUCHSCREEN_AUO_PIXCIR
+ tristate "AUO in-cell touchscreen using Pixcir ICs"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say Y here if you have a AUO display with in-cell touchscreen
+ using Pixcir ICs.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called auo-pixcir-ts.
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -177,6 +190,16 @@ config TOUCHSCREEN_EETI
To compile this driver as a module, choose M here: the
module will be called eeti_ts.
+config TOUCHSCREEN_EGALAX
+ tristate "EETI eGalax multi-touch panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI
+ eGalax multi-touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called egalax_ts.
+
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -435,6 +458,18 @@ config TOUCHSCREEN_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_ts.
+config TOUCHSCREEN_PIXCIR
+ tristate "PIXCIR I2C touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a pixcir i2c touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pixcir_i2c_ts.
+
config TOUCHSCREEN_WM831X
tristate "Support for WM831x touchscreen controllers"
depends on MFD_WM831X
@@ -541,6 +576,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- GoTop Super_Q2/GogoPen/PenPower tablets
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
+ - Elo TouchSystems 2700 IntelliTouch
Have a look at <http://linux.chapter7.ch/touchkit/> for
a usage description and the required user-space stuff.
@@ -620,6 +656,11 @@ config TOUCHSCREEN_USB_JASTEC
bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
+config TOUCHSCREEN_USB_ELO
+ default y
+ bool "Elo TouchSystems 2700 IntelliTouch controller device support" if EXPERT
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
config TOUCHSCREEN_USB_E2I
default y
bool "e2i Touchscreen controller (e.g. from Mimo 740)"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f957676035a..496091e8846 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
+obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 400131df677..49a36df0b75 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -488,10 +488,10 @@ static ssize_t ad7877_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -518,10 +518,10 @@ static ssize_t ad7877_dac_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -548,10 +548,10 @@ static ssize_t ad7877_gpio3_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -579,10 +579,10 @@ static ssize_t ad7877_gpio4_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -612,10 +612,10 @@ static struct attribute *ad7877_attributes[] = {
NULL
};
-static mode_t ad7877_attr_is_visible(struct kobject *kobj,
+static umode_t ad7877_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_aux3.attr) {
if (gpio3)
@@ -853,7 +853,6 @@ static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ad7877_pm,
},
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index c789b974c79..0dac6712f42 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -16,30 +16,6 @@
#define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_i2c_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_i2c_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
-
/* All registers are word-sized.
* AD7879 uses a high-byte first convention.
*/
@@ -47,7 +23,7 @@ static int ad7879_i2c_read(struct device *dev, u8 reg)
{
struct i2c_client *client = to_i2c_client(dev);
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int ad7879_i2c_multi_read(struct device *dev,
@@ -68,7 +44,7 @@ static int ad7879_i2c_write(struct device *dev, u8 reg, u16 val)
{
struct i2c_client *client = to_i2c_client(dev);
- return i2c_smbus_write_word_data(client, reg, swab16(val));
+ return i2c_smbus_write_word_swapped(client, reg, val);
}
static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
@@ -119,7 +95,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
- .pm = &ad7879_i2c_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
.remove = __devexit_p(ad7879_i2c_remove),
@@ -141,4 +117,3 @@ module_exit(ad7879_i2c_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7879(-1) touchscreen I2C bus driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad7879");
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index b1643c8fa7c..9b2e1c2b197 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -22,30 +22,6 @@
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_spi_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_spi_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
-
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
@@ -174,9 +150,8 @@ static int __devexit ad7879_spi_remove(struct spi_device *spi)
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
- .pm = &ad7879_spi_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 3b2e9ed2aee..e2482b40da5 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -281,8 +281,11 @@ static void ad7879_close(struct input_dev* input)
__ad7879_disable(ts);
}
-void ad7879_suspend(struct ad7879 *ts)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_suspend(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (!ts->suspended && !ts->disabled && ts->input->users)
@@ -291,11 +294,14 @@ void ad7879_suspend(struct ad7879 *ts)
ts->suspended = true;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_suspend);
-void ad7879_resume(struct ad7879 *ts)
+static int ad7879_resume(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (ts->suspended && !ts->disabled && ts->input->users)
@@ -304,8 +310,13 @@ void ad7879_resume(struct ad7879 *ts)
ts->suspended = false;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_resume);
+#endif
+
+SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
+EXPORT_SYMBOL(ad7879_pm_ops);
static void ad7879_toggle(struct ad7879 *ts, bool disable)
{
@@ -340,10 +351,10 @@ static ssize_t ad7879_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7879 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
diff --git a/drivers/input/touchscreen/ad7879.h b/drivers/input/touchscreen/ad7879.h
index 6b45a27236c..6fd13c48d37 100644
--- a/drivers/input/touchscreen/ad7879.h
+++ b/drivers/input/touchscreen/ad7879.h
@@ -21,8 +21,8 @@ struct ad7879_bus_ops {
int (*write)(struct device *dev, u8 reg, u16 val);
};
-void ad7879_suspend(struct ad7879 *);
-void ad7879_resume(struct ad7879 *);
+extern const struct dev_pm_ops ad7879_pm_ops;
+
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned irq,
const struct ad7879_bus_ops *bops);
void ad7879_remove(struct ad7879 *);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index de31ec6fe9e..23fd9018565 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -602,10 +602,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ads7846 *ts = dev_get_drvdata(dev);
- unsigned long i;
+ unsigned int i;
+ int err;
- if (strict_strtoul(buf, 10, &i))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &i);
+ if (err)
+ return err;
if (i)
ads7846_disable(ts);
@@ -1424,7 +1426,6 @@ static int __devexit ads7846_remove(struct spi_device *spi)
static struct spi_driver ads7846_driver = {
.driver = {
.name = "ads7846",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ads7846_pm,
},
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 8034cbb20f7..d016cb26d12 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -429,18 +429,7 @@ static struct platform_driver atmel_wm97xx_driver = {
.suspend = atmel_wm97xx_suspend,
.resume = atmel_wm97xx_resume,
};
-
-static int __init atmel_wm97xx_init(void)
-{
- return platform_driver_probe(&atmel_wm97xx_driver, atmel_wm97xx_probe);
-}
-module_init(atmel_wm97xx_init);
-
-static void __exit atmel_wm97xx_exit(void)
-{
- platform_driver_unregister(&atmel_wm97xx_driver);
-}
-module_exit(atmel_wm97xx_exit);
+module_platform_driver(atmel_wm97xx_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("wm97xx continuous touch driver for Atmel AT91 and AVR32");
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 122a8788365..201b2d2ec1b 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -351,20 +351,7 @@ static struct platform_driver atmel_tsadcc_driver = {
.name = "atmel_tsadcc",
},
};
-
-static int __init atmel_tsadcc_init(void)
-{
- return platform_driver_register(&atmel_tsadcc_driver);
-}
-
-static void __exit atmel_tsadcc_exit(void)
-{
- platform_driver_unregister(&atmel_tsadcc_driver);
-}
-
-module_init(atmel_tsadcc_init);
-module_exit(atmel_tsadcc_exit);
-
+module_platform_driver(atmel_tsadcc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atmel TouchScreen Driver");
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
new file mode 100644
index 00000000000..94fb9fbb08a
--- /dev/null
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -0,0 +1,652 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * loosely based on auo_touch.c from Dell Streak vendor-kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input/auo-pixcir-ts.h>
+
+/*
+ * Coordinate calculation:
+ * X1 = X1_LSB + X1_MSB*256
+ * Y1 = Y1_LSB + Y1_MSB*256
+ * X2 = X2_LSB + X2_MSB*256
+ * Y2 = Y2_LSB + Y2_MSB*256
+ */
+#define AUO_PIXCIR_REG_X1_LSB 0x00
+#define AUO_PIXCIR_REG_X1_MSB 0x01
+#define AUO_PIXCIR_REG_Y1_LSB 0x02
+#define AUO_PIXCIR_REG_Y1_MSB 0x03
+#define AUO_PIXCIR_REG_X2_LSB 0x04
+#define AUO_PIXCIR_REG_X2_MSB 0x05
+#define AUO_PIXCIR_REG_Y2_LSB 0x06
+#define AUO_PIXCIR_REG_Y2_MSB 0x07
+
+#define AUO_PIXCIR_REG_STRENGTH 0x0d
+#define AUO_PIXCIR_REG_STRENGTH_X1_LSB 0x0e
+#define AUO_PIXCIR_REG_STRENGTH_X1_MSB 0x0f
+
+#define AUO_PIXCIR_REG_RAW_DATA_X 0x2b
+#define AUO_PIXCIR_REG_RAW_DATA_Y 0x4f
+
+#define AUO_PIXCIR_REG_X_SENSITIVITY 0x6f
+#define AUO_PIXCIR_REG_Y_SENSITIVITY 0x70
+#define AUO_PIXCIR_REG_INT_SETTING 0x71
+#define AUO_PIXCIR_REG_INT_WIDTH 0x72
+#define AUO_PIXCIR_REG_POWER_MODE 0x73
+
+#define AUO_PIXCIR_REG_VERSION 0x77
+#define AUO_PIXCIR_REG_CALIBRATE 0x78
+
+#define AUO_PIXCIR_REG_TOUCHAREA_X1 0x1e
+#define AUO_PIXCIR_REG_TOUCHAREA_Y1 0x1f
+#define AUO_PIXCIR_REG_TOUCHAREA_X2 0x20
+#define AUO_PIXCIR_REG_TOUCHAREA_Y2 0x21
+
+#define AUO_PIXCIR_REG_EEPROM_CALIB_X 0x42
+#define AUO_PIXCIR_REG_EEPROM_CALIB_Y 0xad
+
+#define AUO_PIXCIR_INT_TPNUM_MASK 0xe0
+#define AUO_PIXCIR_INT_TPNUM_SHIFT 5
+#define AUO_PIXCIR_INT_RELEASE (1 << 4)
+#define AUO_PIXCIR_INT_ENABLE (1 << 3)
+#define AUO_PIXCIR_INT_POL_HIGH (1 << 2)
+#define AUO_PIXCIR_INT_MODE_MASK 0x03
+
+/*
+ * Power modes:
+ * active: scan speed 60Hz
+ * sleep: scan speed 10Hz can be auto-activated, wakeup on 1st touch
+ * deep sleep: scan speed 1Hz can only be entered or left manually.
+ */
+#define AUO_PIXCIR_POWER_ACTIVE 0x00
+#define AUO_PIXCIR_POWER_SLEEP 0x01
+#define AUO_PIXCIR_POWER_DEEP_SLEEP 0x02
+#define AUO_PIXCIR_POWER_MASK 0x03
+
+#define AUO_PIXCIR_POWER_ALLOW_SLEEP (1 << 2)
+#define AUO_PIXCIR_POWER_IDLE_TIME(ms) ((ms & 0xf) << 4)
+
+#define AUO_PIXCIR_CALIBRATE 0x03
+
+#define AUO_PIXCIR_EEPROM_CALIB_X_LEN 62
+#define AUO_PIXCIR_EEPROM_CALIB_Y_LEN 36
+
+#define AUO_PIXCIR_RAW_DATA_X_LEN 18
+#define AUO_PIXCIR_RAW_DATA_Y_LEN 11
+
+#define AUO_PIXCIR_STRENGTH_ENABLE (1 << 0)
+
+/* Touchscreen absolute values */
+#define AUO_PIXCIR_REPORT_POINTS 2
+#define AUO_PIXCIR_MAX_AREA 0xff
+#define AUO_PIXCIR_PENUP_TIMEOUT_MS 10
+
+struct auo_pixcir_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32];
+
+ /* special handling for touch_indicate interupt mode */
+ bool touch_ind_mode;
+
+ wait_queue_head_t wait;
+ bool stopped;
+};
+
+struct auo_point_t {
+ int coord_x;
+ int coord_y;
+ int area_major;
+ int area_minor;
+ int orientation;
+};
+
+static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
+ struct auo_point_t *point)
+{
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ uint8_t raw_coord[8];
+ uint8_t raw_area[4];
+ int i, ret;
+
+ /* touch coordinates */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_X1_LSB,
+ 8, raw_coord);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read coordinate, %d\n", ret);
+ return ret;
+ }
+
+ /* touch area */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_TOUCHAREA_X1,
+ 4, raw_area);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not read touch area, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ point[i].coord_x =
+ raw_coord[4 * i + 1] << 8 | raw_coord[4 * i];
+ point[i].coord_y =
+ raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
+
+ if (point[i].coord_x > pdata->x_max ||
+ point[i].coord_y > pdata->y_max) {
+ dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+ point[i].coord_x, point[i].coord_y);
+ point[i].coord_x = point[i].coord_y = 0;
+ }
+
+ /* determine touch major, minor and orientation */
+ point[i].area_major = max(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].area_minor = min(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].orientation = raw_area[2 * i] > raw_area[2 * i + 1];
+ }
+
+ return 0;
+}
+
+static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
+{
+ struct auo_pixcir_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
+ int i;
+ int ret;
+ int fingers = 0;
+ int abs = -1;
+
+ while (!ts->stopped) {
+
+ /* check for up event in touch touch_ind_mode */
+ if (ts->touch_ind_mode) {
+ if (gpio_get_value(pdata->gpio_int) == 0) {
+ input_mt_sync(ts->input);
+ input_report_key(ts->input, BTN_TOUCH, 0);
+ input_sync(ts->input);
+ break;
+ }
+ }
+
+ ret = auo_pixcir_collect_data(ts, point);
+ if (ret < 0) {
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ continue;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ if (point[i].coord_x > 0 || point[i].coord_y > 0) {
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ point[i].coord_x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ point[i].coord_y);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ point[i].area_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+ point[i].area_minor);
+ input_report_abs(ts->input, ABS_MT_ORIENTATION,
+ point[i].orientation);
+ input_mt_sync(ts->input);
+
+ /* use first finger as source for singletouch */
+ if (fingers == 0)
+ abs = i;
+
+ /* number of touch points could also be queried
+ * via i2c but would require an additional call
+ */
+ fingers++;
+ }
+ }
+
+ input_report_key(ts->input, BTN_TOUCH, fingers > 0);
+
+ if (abs > -1) {
+ input_report_abs(ts->input, ABS_X, point[abs].coord_x);
+ input_report_abs(ts->input, ABS_Y, point[abs].coord_y);
+ }
+
+ input_sync(ts->input);
+
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set the power mode of the device.
+ * Valid modes are
+ * - AUO_PIXCIR_POWER_ACTIVE
+ * - AUO_PIXCIR_POWER_SLEEP - automatically left on first touch
+ * - AUO_PIXCIR_POWER_DEEP_SLEEP
+ */
+static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_POWER_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_POWER_MASK;
+ ret |= mode;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_POWER_MODE, ret);
+ if (ret) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+ int int_setting)
+{
+ struct i2c_client *client = ts->client;
+ struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_INT_MODE_MASK;
+ ret |= int_setting;
+ ret |= AUO_PIXCIR_INT_POL_HIGH; /* always use high for interrupts */
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+
+ return 0;
+}
+
+/* control the generation of interrupts on the device side */
+static int auo_pixcir_int_toggle(struct auo_pixcir_ts *ts, bool enable)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ if (enable)
+ ret |= AUO_PIXCIR_INT_ENABLE;
+ else
+ ret &= ~AUO_PIXCIR_INT_ENABLE;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_start(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_ACTIVE);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not set power mode, %d\n",
+ ret);
+ return ret;
+ }
+
+ ts->stopped = false;
+ mb();
+ enable_irq(client->irq);
+
+ ret = auo_pixcir_int_toggle(ts, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not enable interrupt, %d\n",
+ ret);
+ disable_irq(client->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_stop(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_int_toggle(ts, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not disable interrupt, %d\n",
+ ret);
+ return ret;
+ }
+
+ /* disable receiving of interrupts */
+ disable_irq(client->irq);
+ ts->stopped = true;
+ mb();
+ wake_up(&ts->wait);
+
+ return auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_DEEP_SLEEP);
+}
+
+static int auo_pixcir_input_open(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+ int ret;
+
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void auo_pixcir_input_close(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+
+ auo_pixcir_stop(ts);
+
+ return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int auo_pixcir_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ /* when configured as wakeup source, device should always wake system
+ * therefore start device if necessary
+ */
+ if (device_may_wakeup(&client->dev)) {
+ /* need to start device if not open, to be wakeup source */
+ if (!input->users) {
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ enable_irq_wake(client->irq);
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_SLEEP);
+ } else if (input->users) {
+ ret = auo_pixcir_stop(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+
+static int auo_pixcir_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+
+ /* need to stop device if it was not open on suspend */
+ if (!input->users) {
+ ret = auo_pixcir_stop(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ /* device wakes automatically from SLEEP */
+ } else if (input->users) {
+ ret = auo_pixcir_start(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
+ auo_pixcir_resume);
+
+static int __devinit auo_pixcir_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_pixcir_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ ts = kzalloc(sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ret = gpio_request(pdata->gpio_int, "auo_pixcir_ts_int");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_int, ret);
+ goto err_gpio_int;
+ }
+
+ if (pdata->init_hw)
+ pdata->init_hw(client);
+
+ ts->client = client;
+ ts->touch_ind_mode = 0;
+ init_waitqueue_head(&ts->wait);
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "could not allocate input device\n");
+ goto err_input_alloc;
+ }
+
+ ts->input = input_dev;
+
+ input_dev->name = "AUO-Pixcir touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ input_dev->open = auo_pixcir_input_open;
+ input_dev->close = auo_pixcir_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_VERSION);
+ if (ret < 0)
+ goto err_fw_vers;
+ dev_info(&client->dev, "firmware version 0x%X\n", ret);
+
+ ret = auo_pixcir_int_config(ts, pdata->int_setting);
+ if (ret)
+ goto err_fw_vers;
+
+ input_set_drvdata(ts->input, ts);
+ ts->stopped = true;
+
+ ret = request_threaded_irq(client->irq, NULL, auo_pixcir_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ input_dev->name, ts);
+ if (ret) {
+ dev_err(&client->dev, "irq %d requested failed\n", client->irq);
+ goto err_fw_vers;
+ }
+
+ /* stop device and put it into deep sleep until it is opened */
+ ret = auo_pixcir_stop(ts);
+ if (ret < 0)
+ goto err_input_register;
+
+ ret = input_register_device(input_dev);
+ if (ret) {
+ dev_err(&client->dev, "could not register input device\n");
+ goto err_input_register;
+ }
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+
+err_input_register:
+ free_irq(client->irq, ts);
+err_fw_vers:
+ input_free_device(input_dev);
+err_input_alloc:
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+ gpio_free(pdata->gpio_int);
+err_gpio_int:
+ kfree(ts);
+
+ return ret;
+}
+
+static int __devexit auo_pixcir_remove(struct i2c_client *client)
+{
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input);
+
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+
+ gpio_free(pdata->gpio_int);
+
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id auo_pixcir_idtable[] = {
+ { "auo_pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, auo_pixcir_idtable);
+
+static struct i2c_driver auo_pixcir_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_pixcir_ts",
+ .pm = &auo_pixcir_pm_ops,
+ },
+ .probe = auo_pixcir_probe,
+ .remove = __devexit_p(auo_pixcir_remove),
+ .id_table = auo_pixcir_idtable,
+};
+
+static int __init auo_pixcir_init(void)
+{
+ return i2c_add_driver(&auo_pixcir_driver);
+}
+module_init(auo_pixcir_init);
+
+static void __exit auo_pixcir_exit(void)
+{
+ i2c_del_driver(&auo_pixcir_driver);
+}
+module_exit(auo_pixcir_exit);
+
+MODULE_DESCRIPTION("AUO-PIXCIR touchscreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 2b72a5923c1..36b65cf10d7 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -379,18 +379,7 @@ static struct platform_driver da9034_touch_driver = {
.probe = da9034_touch_probe,
.remove = __devexit_p(da9034_touch_remove),
};
-
-static int __init da9034_touch_init(void)
-{
- return platform_driver_register(&da9034_touch_driver);
-}
-module_init(da9034_touch_init);
-
-static void __exit da9034_touch_exit(void)
-{
- platform_driver_unregister(&da9034_touch_driver);
-}
-module_exit(da9034_touch_exit);
+module_platform_driver(da9034_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
new file mode 100644
index 00000000000..eadcc2e83c7
--- /dev/null
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -0,0 +1,303 @@
+/*
+ * Driver for EETI eGalax Multiple Touch Controller
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * based on max11801_ts.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* EETI eGalax serial touch screen controller is a I2C based multiple
+ * touch screen controller, it supports 5 point multiple touch. */
+
+/* TODO:
+ - auto idle mode support
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/input/mt.h>
+
+/*
+ * Mouse Mode: some panel may configure the controller to mouse mode,
+ * which can only report one point at a given time.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_MOUSE 0x1
+/*
+ * Vendor Mode: this mode is used to transfer some vendor specific
+ * messages.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_VENDOR 0x3
+/* Multiple Touch Mode */
+#define REPORT_MODE_MTTOUCH 0x4
+
+#define MAX_SUPPORT_POINTS 5
+
+#define EVENT_VALID_OFFSET 7
+#define EVENT_VALID_MASK (0x1 << EVENT_VALID_OFFSET)
+#define EVENT_ID_OFFSET 2
+#define EVENT_ID_MASK (0xf << EVENT_ID_OFFSET)
+#define EVENT_IN_RANGE (0x1 << 1)
+#define EVENT_DOWN_UP (0X1 << 0)
+
+#define MAX_I2C_DATA_LEN 10
+
+#define EGALAX_MAX_X 32760
+#define EGALAX_MAX_Y 32760
+#define EGALAX_MAX_TRIES 100
+
+struct egalax_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+};
+
+static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
+{
+ struct egalax_ts *ts = dev_id;
+ struct input_dev *input_dev = ts->input_dev;
+ struct i2c_client *client = ts->client;
+ u8 buf[MAX_I2C_DATA_LEN];
+ int id, ret, x, y, z;
+ int tries = 0;
+ bool down, valid;
+ u8 state;
+
+ do {
+ ret = i2c_master_recv(client, buf, MAX_I2C_DATA_LEN);
+ } while (ret == -EAGAIN && tries++ < EGALAX_MAX_TRIES);
+
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (buf[0] != REPORT_MODE_MTTOUCH) {
+ /* ignore mouse events and vendor events */
+ return IRQ_HANDLED;
+ }
+
+ state = buf[1];
+ x = (buf[3] << 8) | buf[2];
+ y = (buf[5] << 8) | buf[4];
+ z = (buf[7] << 8) | buf[6];
+
+ valid = state & EVENT_VALID_MASK;
+ id = (state & EVENT_ID_MASK) >> EVENT_ID_OFFSET;
+ down = state & EVENT_DOWN_UP;
+
+ if (!valid || id > MAX_SUPPORT_POINTS) {
+ dev_dbg(&client->dev, "point invalid\n");
+ return IRQ_HANDLED;
+ }
+
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
+
+ dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
+ down ? "down" : "up", id, x, y, z);
+
+ if (down) {
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, z);
+ }
+
+ input_mt_report_pointer_emulation(input_dev, true);
+ input_sync(input_dev);
+
+ return IRQ_HANDLED;
+}
+
+/* wake up controller by an falling edge of interrupt gpio. */
+static int egalax_wake_up_device(struct i2c_client *client)
+{
+ int gpio = irq_to_gpio(client->irq);
+ int ret;
+
+ ret = gpio_request(gpio, "egalax_irq");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "request gpio failed, cannot wake up controller: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* wake up controller via an falling edge on IRQ gpio. */
+ gpio_direction_output(gpio, 0);
+ gpio_set_value(gpio, 1);
+
+ /* controller should be waken up, return irq. */
+ gpio_direction_input(gpio);
+ gpio_free(gpio);
+
+ return 0;
+}
+
+static int __devinit egalax_firmware_version(struct i2c_client *client)
+{
+ static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
+ int ret;
+
+ ret = i2c_master_send(client, cmd, MAX_I2C_DATA_LEN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit egalax_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct egalax_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+ int error;
+
+ ts = kzalloc(sizeof(struct egalax_ts), GFP_KERNEL);
+ if (!ts) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_ts;
+ }
+
+ ts->client = client;
+ ts->input_dev = input_dev;
+
+ /* controller may be in sleep, wake it up. */
+ egalax_wake_up_device(client);
+
+ ret = egalax_firmware_version(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read firmware version\n");
+ error = -EIO;
+ goto err_free_dev;
+ }
+
+ input_dev->name = "EETI eGalax Touch Screen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, EGALAX_MAX_Y, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+ input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS);
+
+ input_set_drvdata(input_dev, ts);
+
+ error = request_threaded_irq(client->irq, NULL, egalax_ts_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "egalax_ts", ts);
+ if (error < 0) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(ts->input_dev);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, ts);
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, ts);
+err_free_dev:
+ input_free_device(input_dev);
+err_free_ts:
+ kfree(ts);
+
+ return error;
+}
+
+static __devexit int egalax_ts_remove(struct i2c_client *client)
+{
+ struct egalax_ts *ts = i2c_get_clientdata(client);
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id egalax_ts_id[] = {
+ { "egalax_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
+
+#ifdef CONFIG_PM_SLEEP
+static int egalax_ts_suspend(struct device *dev)
+{
+ static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
+ 0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
+ };
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
+ return ret > 0 ? 0 : ret;
+}
+
+static int egalax_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return egalax_wake_up_device(client);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+
+static struct i2c_driver egalax_ts_driver = {
+ .driver = {
+ .name = "egalax_ts",
+ .owner = THIS_MODULE,
+ .pm = &egalax_ts_pm_ops,
+ },
+ .id_table = egalax_ts_id,
+ .probe = egalax_ts_probe,
+ .remove = __devexit_p(egalax_ts_remove),
+};
+
+static int __init egalax_ts_init(void)
+{
+ return i2c_add_driver(&egalax_ts_driver);
+}
+
+static void __exit egalax_ts_exit(void)
+{
+ i2c_del_driver(&egalax_ts_driver);
+}
+
+module_init(egalax_ts_init);
+module_exit(egalax_ts_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Touchscreen driver for EETI eGalax touch controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 62811de6f18..81e33862394 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -47,12 +47,6 @@ static int invert_y;
module_param(invert_y, bool, 0644);
MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
-static struct pnp_device_id pnp_ids[] = {
- { .id = "PNP0cc0" },
- { .id = "" }
-};
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
-
static irqreturn_t htcpen_interrupt(int irq, void *handle)
{
struct input_dev *htcpen_dev = handle;
@@ -237,6 +231,7 @@ static struct dmi_system_id __initdata htcshift_dmi_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
static int __init htcpen_isa_init(void)
{
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 327695268e0..3cd7a837f82 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -664,18 +664,7 @@ static struct platform_driver mrstouch_driver = {
.probe = mrstouch_probe,
.remove = __devexit_p(mrstouch_remove),
};
-
-static int __init mrstouch_init(void)
-{
- return platform_driver_register(&mrstouch_driver);
-}
-module_init(mrstouch_init);
-
-static void __exit mrstouch_exit(void)
-{
- platform_driver_unregister(&mrstouch_driver);
-}
-module_exit(mrstouch_exit);
+module_platform_driver(mrstouch_driver);
MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 50076c2d59e..c3848ad2325 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -172,16 +172,4 @@ static struct platform_driver jornada720_ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init jornada720_ts_init(void)
-{
- return platform_driver_register(&jornada720_ts_driver);
-}
-
-static void __exit jornada720_ts_exit(void)
-{
- platform_driver_unregister(&jornada720_ts_driver);
-}
-
-module_init(jornada720_ts_init);
-module_exit(jornada720_ts_exit);
+module_platform_driver(jornada720_ts_driver);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 0a484ed5295..afcd0691ec6 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -392,18 +392,7 @@ static struct platform_driver lpc32xx_ts_driver = {
.pm = LPC32XX_TS_PM_OPS,
},
};
-
-static int __init lpc32xx_ts_init(void)
-{
- return platform_driver_register(&lpc32xx_ts_driver);
-}
-module_init(lpc32xx_ts_init);
-
-static void __exit lpc32xx_ts_exit(void)
-{
- platform_driver_unregister(&lpc32xx_ts_driver);
-}
-module_exit(lpc32xx_ts_exit);
+module_platform_driver(lpc32xx_ts_driver);
MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com");
MODULE_DESCRIPTION("LPC32XX TSC Driver");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index e966c29ff1b..7d2b2136e5a 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -302,19 +302,7 @@ static struct platform_driver mainstone_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init mainstone_wm97xx_init(void)
-{
- return platform_driver_register(&mainstone_wm97xx_driver);
-}
-
-static void __exit mainstone_wm97xx_exit(void)
-{
- platform_driver_unregister(&mainstone_wm97xx_driver);
-}
-
-module_init(mainstone_wm97xx_init);
-module_exit(mainstone_wm97xx_exit);
+module_platform_driver(mainstone_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index ede02743eac..68f86f7dabb 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -240,18 +240,7 @@ static struct platform_driver mc13783_ts_driver = {
.name = MC13783_TS_NAME,
},
};
-
-static int __init mc13783_ts_init(void)
-{
- return platform_driver_probe(&mc13783_ts_driver, &mc13783_ts_probe);
-}
-module_init(mc13783_ts_init);
-
-static void __exit mc13783_ts_exit(void)
-{
- platform_driver_unregister(&mc13783_ts_driver);
-}
-module_exit(mc13783_ts_exit);
+module_platform_driver(mc13783_ts_driver);
MODULE_DESCRIPTION("MC13783 input touchscreen driver");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 5803bd0c1cc..5226194aa78 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -36,7 +36,6 @@
struct migor_ts_priv {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
int irq;
};
@@ -44,15 +43,24 @@ static const u_int8_t migor_ts_ena_seq[17] = { 0x33, 0x22, 0x11,
0x01, 0x06, 0x07, };
static const u_int8_t migor_ts_dis_seq[17] = { };
-static void migor_ts_poscheck(struct work_struct *work)
+static irqreturn_t migor_ts_isr(int irq, void *dev_id)
{
- struct migor_ts_priv *priv = container_of(work,
- struct migor_ts_priv,
- work.work);
+ struct migor_ts_priv *priv = dev_id;
unsigned short xpos, ypos;
unsigned char event;
u_int8_t buf[16];
+ /*
+ * The touch screen controller chip is hooked up to the CPU
+ * using I2C and a single interrupt line. The interrupt line
+ * is pulled low whenever someone taps the screen. To deassert
+ * the interrupt line we need to acknowledge the interrupt by
+ * communicating with the controller over the slow i2c bus.
+ *
+ * Since I2C bus controller may sleep we are using threaded
+ * IRQ here.
+ */
+
memset(buf, 0, sizeof(buf));
/* Set Index 0 */
@@ -72,41 +80,25 @@ static void migor_ts_poscheck(struct work_struct *work)
xpos = ((buf[11] & 0x03) << 8 | buf[10]);
event = buf[12];
- if (event == EVENT_PENDOWN || event == EVENT_REPEAT) {
+ switch (event) {
+ case EVENT_PENDOWN:
+ case EVENT_REPEAT:
input_report_key(priv->input, BTN_TOUCH, 1);
input_report_abs(priv->input, ABS_X, ypos); /*X-Y swap*/
input_report_abs(priv->input, ABS_Y, xpos);
input_sync(priv->input);
- } else if (event == EVENT_PENUP) {
+ break;
+
+ case EVENT_PENUP:
input_report_key(priv->input, BTN_TOUCH, 0);
input_sync(priv->input);
+ break;
}
- out:
- enable_irq(priv->irq);
-}
-
-static irqreturn_t migor_ts_isr(int irq, void *dev_id)
-{
- struct migor_ts_priv *priv = dev_id;
-
- /* the touch screen controller chip is hooked up to the cpu
- * using i2c and a single interrupt line. the interrupt line
- * is pulled low whenever someone taps the screen. to deassert
- * the interrupt line we need to acknowledge the interrupt by
- * communicating with the controller over the slow i2c bus.
- *
- * we can't acknowledge from interrupt context since the i2c
- * bus controller may sleep, so we just disable the interrupt
- * here and handle the acknowledge using delayed work.
- */
-
- disable_irq_nosync(irq);
- schedule_delayed_work(&priv->work, HZ / 20);
+ out:
return IRQ_HANDLED;
}
-
static int migor_ts_open(struct input_dev *dev)
{
struct migor_ts_priv *priv = input_get_drvdata(dev);
@@ -131,15 +123,6 @@ static void migor_ts_close(struct input_dev *dev)
disable_irq(priv->irq);
- /* cancel pending work and wait for migor_ts_poscheck() to finish */
- if (cancel_delayed_work_sync(&priv->work)) {
- /*
- * if migor_ts_poscheck was canceled we need to enable IRQ
- * here to balance disable done in migor_ts_isr.
- */
- enable_irq(priv->irq);
- }
-
/* disable controller */
i2c_master_send(client, migor_ts_dis_seq, sizeof(migor_ts_dis_seq));
@@ -154,23 +137,20 @@ static int migor_ts_probe(struct i2c_client *client,
int error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "failed to allocate driver data\n");
- error = -ENOMEM;
- goto err0;
- }
-
- dev_set_drvdata(&client->dev, priv);
-
input = input_allocate_device();
- if (!input) {
- dev_err(&client->dev, "Failed to allocate input device.\n");
+ if (!priv || !input) {
+ dev_err(&client->dev, "failed to allocate memory\n");
error = -ENOMEM;
- goto err1;
+ goto err_free_mem;
}
+ priv->client = client;
+ priv->input = input;
+ priv->irq = client->irq;
+
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ __set_bit(BTN_TOUCH, input->keybit);
input_set_abs_params(input, ABS_X, 95, 955, 0, 0);
input_set_abs_params(input, ABS_Y, 85, 935, 0, 0);
@@ -184,39 +164,34 @@ static int migor_ts_probe(struct i2c_client *client,
input_set_drvdata(input, priv);
- priv->client = client;
- priv->input = input;
- INIT_DELAYED_WORK(&priv->work, migor_ts_poscheck);
- priv->irq = client->irq;
-
- error = input_register_device(input);
- if (error)
- goto err1;
-
- error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW,
- client->name, priv);
+ error = request_threaded_irq(priv->irq, NULL, migor_ts_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, priv);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err2;
+ goto err_free_mem;
}
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, priv);
device_init_wakeup(&client->dev, 1);
+
return 0;
- err2:
- input_unregister_device(input);
- input = NULL; /* so we dont try to free it below */
- err1:
+ err_free_irq:
+ free_irq(priv->irq, priv);
+ err_free_mem:
input_free_device(input);
kfree(priv);
- err0:
- dev_set_drvdata(&client->dev, NULL);
return error;
}
static int migor_ts_remove(struct i2c_client *client)
{
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
free_irq(priv->irq, priv);
input_unregister_device(priv->input);
@@ -230,7 +205,7 @@ static int migor_ts_remove(struct i2c_client *client)
static int migor_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
enable_irq_wake(priv->irq);
@@ -241,7 +216,7 @@ static int migor_ts_suspend(struct device *dev)
static int migor_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
disable_irq_wake(priv->irq);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index ea6ef16e59b..f57aeb80f7e 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -252,19 +252,7 @@ static struct platform_driver pcap_ts_driver = {
.pm = PCAP_TS_PM_OPS,
},
};
-
-static int __init pcap_ts_init(void)
-{
- return platform_driver_register(&pcap_ts_driver);
-}
-
-static void __exit pcap_ts_exit(void)
-{
- platform_driver_unregister(&pcap_ts_driver);
-}
-
-module_init(pcap_ts_init);
-module_exit(pcap_ts_exit);
+module_platform_driver(pcap_ts_driver);
MODULE_DESCRIPTION("Motorola PCAP2 touchscreen driver");
MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
new file mode 100644
index 00000000000..d5ac09a1ee5
--- /dev/null
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for Pixcir I2C touchscreen controllers.
+ *
+ * Copyright (C) 2010-2011 Pixcir, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/pixcir_ts.h>
+
+struct pixcir_i2c_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ const struct pixcir_ts_platform_data *chip;
+ bool exiting;
+};
+
+static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
+{
+ struct pixcir_i2c_ts_data *tsdata = data;
+ u8 rdbuf[10], wrbuf[1] = { 0 };
+ u8 touch;
+ int ret;
+
+ ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf));
+ if (ret != sizeof(wrbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_send failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf));
+ if (ret != sizeof(rdbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_recv failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ touch = rdbuf[0];
+ if (touch) {
+ u16 posx1 = (rdbuf[3] << 8) | rdbuf[2];
+ u16 posy1 = (rdbuf[5] << 8) | rdbuf[4];
+ u16 posx2 = (rdbuf[7] << 8) | rdbuf[6];
+ u16 posy2 = (rdbuf[9] << 8) | rdbuf[8];
+
+ input_report_key(tsdata->input, BTN_TOUCH, 1);
+ input_report_abs(tsdata->input, ABS_X, posx1);
+ input_report_abs(tsdata->input, ABS_Y, posy1);
+
+ input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1);
+ input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1);
+ input_mt_sync(tsdata->input);
+
+ if (touch == 2) {
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_X, posx2);
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_Y, posy2);
+ input_mt_sync(tsdata->input);
+ }
+ } else {
+ input_report_key(tsdata->input, BTN_TOUCH, 0);
+ }
+
+ input_sync(tsdata->input);
+}
+
+static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
+{
+ struct pixcir_i2c_ts_data *tsdata = dev_id;
+
+ while (!tsdata->exiting) {
+ pixcir_ts_poscheck(tsdata);
+
+ if (tsdata->chip->attb_read_val())
+ break;
+
+ msleep(20);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pixcir_i2c_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pixcir_i2c_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
+ pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
+
+static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+ struct pixcir_i2c_ts_data *tsdata;
+ struct input_dev *input;
+ int error;
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data not defined\n");
+ return -EINVAL;
+ }
+
+ tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsdata || !input) {
+ dev_err(&client->dev, "Failed to allocate driver data!\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsdata->client = client;
+ tsdata->input = input;
+ tsdata->chip = pdata;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+
+ input_set_drvdata(input, tsdata);
+
+ error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
+ IRQF_TRIGGER_FALLING,
+ client->name, tsdata);
+ if (error) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+ goto err_free_mem;
+ }
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, tsdata);
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, tsdata);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsdata);
+ return error;
+}
+
+static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+{
+ struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
+
+ device_init_wakeup(&client->dev, 0);
+
+ tsdata->exiting = true;
+ mb();
+ free_irq(client->irq, tsdata);
+
+ input_unregister_device(tsdata->input);
+ kfree(tsdata);
+
+ return 0;
+}
+
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+ { "pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+static struct i2c_driver pixcir_i2c_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pixcir_ts",
+ .pm = &pixcir_dev_pm_ops,
+ },
+ .probe = pixcir_i2c_ts_probe,
+ .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .id_table = pixcir_i2c_ts_id,
+};
+
+static int __init pixcir_i2c_ts_init(void)
+{
+ return i2c_add_driver(&pixcir_i2c_ts_driver);
+}
+module_init(pixcir_i2c_ts_init);
+
+static void __exit pixcir_i2c_ts_exit(void)
+{
+ i2c_del_driver(&pixcir_i2c_ts_driver);
+}
+module_exit(pixcir_i2c_ts_exit);
+
+MODULE_AUTHOR("Jianchun Bian <jcbian@pixcir.com.cn>, Dequan Meng <dqmeng@pixcir.com.cn>");
+MODULE_DESCRIPTION("Pixcir I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 64ce697a345..bf1a0640006 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -432,19 +432,7 @@ static struct platform_driver s3c_ts_driver = {
.probe = s3c2410ts_probe,
.remove = __devexit_p(s3c2410ts_remove),
};
-
-static int __init s3c2410ts_init(void)
-{
- return platform_driver_register(&s3c_ts_driver);
-}
-
-static void __exit s3c2410ts_exit(void)
-{
- platform_driver_unregister(&s3c_ts_driver);
-}
-
-module_init(s3c2410ts_init);
-module_exit(s3c2410ts_exit);
+module_platform_driver(s3c_ts_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, "
"Ben Dooks <ben@simtec.co.uk>, "
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab371358b3..8825fe37d43 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +47,7 @@ struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct st1232_ts_finger finger[MAX_FINGERS];
+ struct dev_pm_qos_request low_latency_req;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
}
/* SYN_MT_REPORT only if no contact */
- if (!count)
+ if (!count) {
input_mt_sync(input_dev);
+ if (ts->low_latency_req.dev) {
+ dev_pm_qos_remove_request(&ts->low_latency_req);
+ ts->low_latency_req.dev = NULL;
+ }
+ } else if (!ts->low_latency_req.dev) {
+ /* First contact, request 100 us latency. */
+ dev_pm_qos_add_ancestor_request(&ts->client->dev,
+ &ts->low_latency_req, 100);
+ }
/* SYN_REPORT */
input_sync(input_dev);
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index ae88e13c99f..692b685720c 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -379,20 +379,7 @@ static struct platform_driver stmpe_ts_driver = {
.probe = stmpe_input_probe,
.remove = __devexit_p(stmpe_ts_remove),
};
-
-static int __init stmpe_ts_init(void)
-{
- return platform_driver_register(&stmpe_ts_driver);
-}
-
-module_init(stmpe_ts_init);
-
-static void __exit stmpe_ts_exit(void)
-{
- platform_driver_unregister(&stmpe_ts_driver);
-}
-
-module_exit(stmpe_ts_exit);
+module_platform_driver(stmpe_ts_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 0e8f63e5b36..7e748809735 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -378,19 +378,7 @@ static struct platform_driver tsc_driver = {
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
-
-static int __init tsc_init(void)
-{
- return platform_driver_register(&tsc_driver);
-}
-
-static void __exit tsc_exit(void)
-{
- platform_driver_unregister(&tsc_driver);
-}
-
-module_init(tsc_init);
-module_exit(tsc_exit);
+module_platform_driver(tsc_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 43031492d73..6c6f6d8ea9b 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -371,18 +371,7 @@ static struct platform_driver tps6507x_ts_driver = {
.probe = tps6507x_ts_probe,
.remove = __devexit_p(tps6507x_ts_remove),
};
-
-static int __init tps6507x_ts_init(void)
-{
- return platform_driver_register(&tps6507x_ts_driver);
-}
-module_init(tps6507x_ts_init);
-
-static void __exit tps6507x_ts_exit(void)
-{
- platform_driver_unregister(&tps6507x_ts_driver);
-}
-module_exit(tps6507x_ts_exit);
+module_platform_driver(tps6507x_ts_driver);
MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index cbf0ff32267..067d9566299 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -450,13 +450,13 @@ static struct attribute *tsc2005_attrs[] = {
NULL
};
-static mode_t tsc2005_attr_is_visible(struct kobject *kobj,
+static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_selftest.attr) {
if (!ts->set_reset)
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 3b5b5df04dd..d2b57536fee 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -20,24 +20,24 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/input.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/suspend.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/ucb1400.h>
+#define UCB1400_TS_POLL_PERIOD 10 /* ms */
+
static int adcsync;
static int ts_delay = 55; /* us */
static int ts_delay_pressure; /* us */
/* Switch to interrupt mode. */
-static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
+static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_TS_CR,
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_INT);
@@ -47,13 +47,15 @@ static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
* Switch to pressure mode, and read pressure. We don't need to wait
* here, since both plates are being driven.
*/
-static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+
udelay(ts_delay_pressure);
+
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
}
@@ -63,7 +65,7 @@ static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -86,7 +88,7 @@ static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
+static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -107,7 +109,7 @@ static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
* Switch to X plate resistance mode. Set MX to ground, PX to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -119,7 +121,7 @@ static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
* Switch to Y plate resistance mode. Set MY to ground, PY to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -127,26 +129,26 @@ static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
return ucb1400_adc_read(ucb->ac97, 0, adcsync);
}
-static inline int ucb1400_ts_pen_up(struct snd_ac97 *ac97)
+static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
{
- unsigned short val = ucb1400_reg_read(ac97, UCB_TS_CR);
+ unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
}
-static inline void ucb1400_ts_irq_enable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, 0);
- ucb1400_reg_write(ac97, UCB_IE_FAL, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
}
-static inline void ucb1400_ts_irq_disable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_FAL, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
}
-static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 y)
+static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
{
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
@@ -162,7 +164,7 @@ static void ucb1400_ts_event_release(struct input_dev *idev)
input_sync(idev);
}
-static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
+static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
{
unsigned int isr;
@@ -171,32 +173,34 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
if (isr & UCB_IE_TSPX)
- ucb1400_ts_irq_disable(ucb->ac97);
+ ucb1400_ts_irq_disable(ucb);
else
- dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
- enable_irq(ucb->irq);
+ dev_dbg(&ucb->ts_idev->dev,
+ "ucb1400: unexpected IE_STATUS = %#x\n", isr);
}
-static int ucb1400_ts_thread(void *_ucb)
+/*
+ * A restriction with interrupts exists when using the ucb1400, as
+ * the codec read/write routines may sleep while waiting for codec
+ * access completion and uses semaphores for access control to the
+ * AC97 bus. Therefore the driver is forced to use threaded interrupt
+ * handler.
+ */
+static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
- struct ucb1400_ts *ucb = _ucb;
- struct task_struct *tsk = current;
- int valid = 0;
- struct sched_param param = { .sched_priority = 1 };
+ struct ucb1400_ts *ucb = devid;
+ unsigned int x, y, p;
+ bool penup;
- sched_setscheduler(tsk, SCHED_FIFO, &param);
+ if (unlikely(irqnr != ucb->irq))
+ return IRQ_NONE;
- set_freezable();
- while (!kthread_should_stop()) {
- unsigned int x, y, p;
- long timeout;
+ ucb1400_clear_pending_irq(ucb);
- ucb->ts_restart = 0;
+ /* Start with a small delay before checking pendown state */
+ msleep(UCB1400_TS_POLL_PERIOD);
- if (ucb->irq_pending) {
- ucb->irq_pending = 0;
- ucb1400_handle_pending_irq(ucb);
- }
+ while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
@@ -204,91 +208,62 @@ static int ucb1400_ts_thread(void *_ucb)
p = ucb1400_ts_read_pressure(ucb);
ucb1400_adc_disable(ucb->ac97);
- /* Switch back to interrupt mode. */
- ucb1400_ts_mode_int(ucb->ac97);
-
- msleep(10);
-
- if (ucb1400_ts_pen_up(ucb->ac97)) {
- ucb1400_ts_irq_enable(ucb->ac97);
-
- /*
- * If we spat out a valid sample set last time,
- * spit out a "pen off" sample here.
- */
- if (valid) {
- ucb1400_ts_event_release(ucb->ts_idev);
- valid = 0;
- }
-
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- valid = 1;
- ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
- timeout = msecs_to_jiffies(10);
- }
+ ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
- wait_event_freezable_timeout(ucb->ts_wait,
- ucb->irq_pending || ucb->ts_restart ||
- kthread_should_stop(), timeout);
+ wait_event_timeout(ucb->ts_wait, ucb->stopped,
+ msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
}
- /* Send the "pen off" if we are stopping with the pen still active */
- if (valid)
- ucb1400_ts_event_release(ucb->ts_idev);
+ ucb1400_ts_event_release(ucb->ts_idev);
- ucb->ts_task = NULL;
- return 0;
+ if (!ucb->stopped) {
+ /* Switch back to interrupt mode. */
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+ }
+
+ return IRQ_HANDLED;
}
-/*
- * A restriction with interrupts exists when using the ucb1400, as
- * the codec read/write routines may sleep while waiting for codec
- * access completion and uses semaphores for access control to the
- * AC97 bus. A complete codec read cycle could take anywhere from
- * 60 to 100uSec so we *definitely* don't want to spin inside the
- * interrupt handler waiting for codec access. So, we handle the
- * interrupt by scheduling a RT kernel thread to run in process
- * context instead of interrupt context.
- */
-static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
+static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
{
- struct ucb1400_ts *ucb = devid;
+ /* Signal IRQ thread to stop polling and disable the handler. */
+ ucb->stopped = true;
+ mb();
+ wake_up(&ucb->ts_wait);
+ disable_irq(ucb->irq);
- if (irqnr == ucb->irq) {
- disable_irq_nosync(ucb->irq);
- ucb->irq_pending = 1;
- wake_up(&ucb->ts_wait);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ ucb1400_ts_irq_disable(ucb);
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+}
+
+/* Must be called with ts->lock held */
+static void ucb1400_ts_start(struct ucb1400_ts *ucb)
+{
+ /* Tell IRQ thread that it may poll the device. */
+ ucb->stopped = false;
+ mb();
+
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+
+ enable_irq(ucb->irq);
}
static int ucb1400_ts_open(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- int ret = 0;
- BUG_ON(ucb->ts_task);
+ ucb1400_ts_start(ucb);
- ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
- if (IS_ERR(ucb->ts_task)) {
- ret = PTR_ERR(ucb->ts_task);
- ucb->ts_task = NULL;
- }
-
- return ret;
+ return 0;
}
static void ucb1400_ts_close(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- if (ucb->ts_task)
- kthread_stop(ucb->ts_task);
-
- ucb1400_ts_irq_disable(ucb->ac97);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+ ucb1400_ts_stop(ucb);
}
#ifndef NO_IRQ
@@ -299,7 +274,8 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
+static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+ struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -321,7 +297,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
UCB_ADC_DAT_VALID)) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
+ dev_err(&pdev->dev, "timed out in IRQ probe\n");
probe_irq_off(mask);
return -ENODEV;
}
@@ -342,11 +318,11 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
return 0;
}
-static int ucb1400_ts_probe(struct platform_device *dev)
+static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
{
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
u16 fcsr;
- struct ucb1400_ts *ucb = dev->dev.platform_data;
ucb->ts_idev = input_allocate_device();
if (!ucb->ts_idev) {
@@ -356,27 +332,19 @@ static int ucb1400_ts_probe(struct platform_device *dev)
/* Only in case the IRQ line wasn't supplied, try detecting it */
if (ucb->irq < 0) {
- error = ucb1400_ts_detect_irq(ucb);
+ error = ucb1400_ts_detect_irq(ucb, pdev);
if (error) {
- printk(KERN_ERR "UCB1400: IRQ probe failed\n");
+ dev_err(&pdev->dev, "IRQ probe failed\n");
goto err_free_devs;
}
}
+ dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
init_waitqueue_head(&ucb->ts_wait);
- error = request_irq(ucb->irq, ucb1400_hard_irq, IRQF_TRIGGER_RISING,
- "UCB1400", ucb);
- if (error) {
- printk(KERN_ERR "ucb1400: unable to grab irq%d: %d\n",
- ucb->irq, error);
- goto err_free_devs;
- }
- printk(KERN_DEBUG "UCB1400: found IRQ %d\n", ucb->irq);
-
input_set_drvdata(ucb->ts_idev, ucb);
- ucb->ts_idev->dev.parent = &dev->dev;
+ ucb->ts_idev->dev.parent = &pdev->dev;
ucb->ts_idev->name = "UCB1400 touchscreen interface";
ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97,
AC97_VENDOR_ID1);
@@ -398,12 +366,23 @@ static int ucb1400_ts_probe(struct platform_device *dev)
x_res = ucb1400_ts_read_xres(ucb);
y_res = ucb1400_ts_read_yres(ucb);
ucb1400_adc_disable(ucb->ac97);
- printk(KERN_DEBUG "UCB1400: x/y = %d/%d\n", x_res, y_res);
+ dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
+ ucb1400_ts_stop(ucb);
+
+ error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "UCB1400", ucb);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to grab irq%d: %d\n", ucb->irq, error);
+ goto err_free_devs;
+ }
+
error = input_register_device(ucb->ts_idev);
if (error)
goto err_free_irq;
@@ -416,56 +395,61 @@ err_free_devs:
input_free_device(ucb->ts_idev);
err:
return error;
-
}
-static int ucb1400_ts_remove(struct platform_device *dev)
+static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
+
return 0;
}
-#ifdef CONFIG_PM
-static int ucb1400_ts_resume(struct platform_device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int ucb1400_ts_suspend(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
-
- if (ucb->ts_task) {
- /*
- * Restart the TS thread to ensure the
- * TS interrupt mode is set up again
- * after sleep.
- */
- ucb->ts_restart = 1;
- wake_up(&ucb->ts_wait);
- }
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_start(ucb);
+
+ mutex_unlock(&idev->mutex);
+ return 0;
+}
+
+static int ucb1400_ts_resume(struct device *dev)
+{
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_stop(ucb);
+
+ mutex_unlock(&idev->mutex);
return 0;
}
-#else
-#define ucb1400_ts_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
+ ucb1400_ts_suspend, ucb1400_ts_resume);
+
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = ucb1400_ts_remove,
- .resume = ucb1400_ts_resume,
+ .remove = __devexit_p(ucb1400_ts_remove),
.driver = {
.name = "ucb1400_ts",
+ .owner = THIS_MODULE,
+ .pm = &ucb1400_ts_pm_ops,
},
};
-
-static int __init ucb1400_ts_init(void)
-{
- return platform_driver_register(&ucb1400_ts_driver);
-}
-
-static void __exit ucb1400_ts_exit(void)
-{
- platform_driver_unregister(&ucb1400_ts_driver);
-}
+module_platform_driver(ucb1400_ts_driver);
module_param(adcsync, bool, 0444);
MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
@@ -479,8 +463,5 @@ MODULE_PARM_DESC(ts_delay_pressure,
"delay between panel setup and pressure read."
" Default = 0us.");
-module_init(ucb1400_ts_init);
-module_exit(ucb1400_ts_exit);
-
MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 73fd6642b68..06cef3ccc63 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -16,6 +16,7 @@
* - JASTEC USB touch controller/DigiTech DTR-02U
* - Zytronic capacitive touchscreen
* - NEXIO/iNexio
+ * - Elo TouchSystems 2700 IntelliTouch
*
* Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
* Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -138,6 +139,7 @@ enum {
DEVTYPE_ZYTRONIC,
DEVTYPE_TC45USB,
DEVTYPE_NEXIO,
+ DEVTYPE_ELO,
};
#define USB_DEVICE_HID_CLASS(vend, prod) \
@@ -239,6 +241,10 @@ static const struct usb_device_id usbtouch_devices[] = {
.driver_info = DEVTYPE_NEXIO},
#endif
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
+#endif
+
{}
};
@@ -945,6 +951,24 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
/*****************************************************************************
+ * ELO part
+ */
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+
+static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
+{
+ dev->x = (pkt[3] << 8) | pkt[2];
+ dev->y = (pkt[5] << 8) | pkt[4];
+ dev->touch = pkt[6] > 0;
+ dev->press = pkt[6];
+
+ return 1;
+}
+#endif
+
+
+/*****************************************************************************
* the different device descriptors
*/
#ifdef MULTI_PACKET
@@ -953,6 +977,18 @@ static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
#endif
static struct usbtouch_device_info usbtouch_dev_info[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ [DEVTYPE_ELO] = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = elo_read_data,
+ },
+#endif
+
#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
[DEVTYPE_EGALAX] = {
.min_xc = 0x0,
@@ -1580,18 +1616,7 @@ static struct usb_driver usbtouch_driver = {
.supports_autosuspend = 1,
};
-static int __init usbtouch_init(void)
-{
- return usb_register(&usbtouch_driver);
-}
-
-static void __exit usbtouch_cleanup(void)
-{
- usb_deregister(&usbtouch_driver);
-}
-
-module_init(usbtouch_init);
-module_exit(usbtouch_cleanup);
+module_usb_driver(usbtouch_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 217aa51135c..9396b21d0e8 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -331,19 +331,7 @@ static struct platform_driver w90x900ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90x900ts_init(void)
-{
- return platform_driver_register(&w90x900ts_driver);
-}
-
-static void __exit w90x900ts_exit(void)
-{
- platform_driver_unregister(&w90x900ts_driver);
-}
-
-module_init(w90x900ts_init);
-module_exit(w90x900ts_exit);
+module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 9175d49d254..4bc851a9dc3 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -401,18 +401,7 @@ static struct platform_driver wm831x_ts_driver = {
.probe = wm831x_ts_probe,
.remove = __devexit_p(wm831x_ts_remove),
};
-
-static int __init wm831x_ts_init(void)
-{
- return platform_driver_register(&wm831x_ts_driver);
-}
-module_init(wm831x_ts_init);
-
-static void __exit wm831x_ts_exit(void)
-{
- platform_driver_unregister(&wm831x_ts_driver);
-}
-module_exit(wm831x_ts_exit);
+module_platform_driver(wm831x_ts_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index f6328c0cded..bf0869a7a78 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -192,8 +193,8 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev)
else
gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
- wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
- irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
+ wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+ irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_HIGH,
@@ -223,19 +224,7 @@ static struct platform_driver zylonite_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init zylonite_wm97xx_init(void)
-{
- return platform_driver_register(&zylonite_wm97xx_driver);
-}
-
-static void __exit zylonite_wm97xx_exit(void)
-{
- platform_driver_unregister(&zylonite_wm97xx_driver);
-}
-
-module_init(zylonite_wm97xx_init);
-module_exit(zylonite_wm97xx_exit);
+module_platform_driver(zylonite_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5414253b185..6bea6962f8e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -34,7 +34,9 @@ config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
select PCI_MSI
- select PCI_IOV
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
select IOMMU_API
depends on X86_64 && PCI && ACPI
---help---
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
information to userspace via debugfs.
If unsure, say N.
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
+ depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+ select MMU_NOTIFIER
+ ---help---
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the the PCI PRI and PASID interface.
+
# Intel IOMMU support
config DMAR_TABLE
bool
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2f4448794bc..0e36b4934af 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a8521..cce1f03b889 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
@@ -28,6 +29,8 @@
#include <linux/iommu.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
@@ -41,6 +44,24 @@
#define LOOP_TIMEOUT 100000
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
/* A list of preallocated protection domains */
@@ -59,6 +80,9 @@ static struct protection_domain *pt_domain;
static struct iommu_ops amd_iommu_ops;
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -67,6 +91,7 @@ struct iommu_cmd {
};
static void update_domain(struct protection_domain *domain);
+static int __init alloc_passthrough_domain(void);
/****************************************************************************
*
@@ -147,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
return dev->archdata.iommu;
}
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+ static const int caps[] = {
+ PCI_EXT_CAP_ID_ATS,
+ PCI_EXT_CAP_ID_PRI,
+ PCI_EXT_CAP_ID_PASID,
+ };
+ int i, pos;
+
+ for (i = 0; i < 3; ++i) {
+ pos = pci_find_ext_capability(pdev, caps[i]);
+ if (pos == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = get_dev_data(&pdev->dev);
+
+ return dev_data->errata & (1 << erratum) ? true : false;
+}
+
/*
* In this function the list of preallocated protection domains is traversed to
* find the domain for a specific device
@@ -204,6 +256,7 @@ static bool check_device(struct device *dev)
static int iommu_init_device(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
u16 alias;
@@ -228,6 +281,13 @@ static int iommu_init_device(struct device *dev)
dev_data->alias_data = alias_data;
}
+ if (pci_iommuv2_capable(pdev)) {
+ struct amd_iommu *iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ dev_data->iommu_v2 = iommu->is_iommu_v2;
+ }
+
dev->archdata.iommu = dev_data;
return 0;
@@ -317,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single);
DECLARE_STATS_COUNTER(domain_flush_all);
DECLARE_STATS_COUNTER(alloced_io_mem);
DECLARE_STATS_COUNTER(total_map_requests);
+DECLARE_STATS_COUNTER(complete_ppr);
+DECLARE_STATS_COUNTER(invalidate_iotlb);
+DECLARE_STATS_COUNTER(invalidate_iotlb_all);
+DECLARE_STATS_COUNTER(pri_requests);
+
static struct dentry *stats_dir;
static struct dentry *de_fflush;
@@ -351,6 +416,10 @@ static void amd_iommu_stats_init(void)
amd_iommu_stats_add(&domain_flush_all);
amd_iommu_stats_add(&alloced_io_mem);
amd_iommu_stats_add(&total_map_requests);
+ amd_iommu_stats_add(&complete_ppr);
+ amd_iommu_stats_add(&invalidate_iotlb);
+ amd_iommu_stats_add(&invalidate_iotlb_all);
+ amd_iommu_stats_add(&pri_requests);
}
#endif
@@ -365,8 +434,8 @@ static void dump_dte_entry(u16 devid)
{
int i;
- for (i = 0; i < 8; ++i)
- pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+ for (i = 0; i < 4; ++i)
+ pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
amd_iommu_dev_table[devid].data[i]);
}
@@ -461,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+{
+ struct amd_iommu_fault fault;
+ volatile u64 *raw;
+ int i;
+
+ INC_STATS_COUNTER(pri_requests);
+
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is written to
+ * memory. If this happens we need to wait for the entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+ pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
+ return;
+ }
+
+ fault.address = raw[1];
+ fault.pasid = PPR_PASID(raw[0]);
+ fault.device_id = PPR_DEVID(raw[0]);
+ fault.tag = PPR_TAG(raw[0]);
+ fault.flags = PPR_FLAGS(raw[0]);
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * to back to zero.
+ */
+ raw[0] = raw[1] = 0;
+
+ atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+ unsigned long flags;
+ u32 head, tail;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ while (head != tail) {
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, head);
+
+ /* Update and refresh ring-buffer state*/
+ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+ }
+
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu)
+ for_each_iommu(iommu) {
iommu_poll_events(iommu);
+ iommu_poll_ppr_log(iommu);
+ }
return IRQ_HANDLED;
}
@@ -595,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+ u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = pasid & PASID_MASK;
+ cmd->data[1] = domid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int qdep, u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = devid;
+ cmd->data[0] |= (pasid & 0xff) << 16;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ cmd->data[3] = upper_32_bits(address);
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int status, int tag, bool gn)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->data[0] = devid;
+ if (gn) {
+ cmd->data[1] = pasid & PASID_MASK;
+ cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
+ cmd->data[3] = tag & 0x1ff;
+ cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+ CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
static void build_inv_all(struct iommu_cmd *cmd)
{
memset(cmd, 0, sizeof(*cmd));
@@ -1496,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain)
domain->pt_root = NULL;
}
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_page((unsigned long)ptr);
+ }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_gcr3_tbl_level1(ptr);
+ }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+ if (domain->glx == 2)
+ free_gcr3_tbl_level2(domain->gcr3_tbl);
+ else if (domain->glx == 1)
+ free_gcr3_tbl_level1(domain->gcr3_tbl);
+ else if (domain->glx != 0)
+ BUG();
+
+ free_page((unsigned long)domain->gcr3_tbl);
+}
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
@@ -1582,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain)
static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
{
- u64 pte_root = virt_to_phys(domain->pt_root);
- u32 flags = 0;
+ u64 pte_root = 0;
+ u64 flags = 0;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ pte_root = virt_to_phys(domain->pt_root);
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
+ flags = amd_iommu_dev_table[devid].data[1];
+
if (ats)
flags |= DTE_FLAG_IOTLB;
- amd_iommu_dev_table[devid].data[3] |= flags;
- amd_iommu_dev_table[devid].data[2] = domain->id;
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ u64 gcr3 = __pa(domain->gcr3_tbl);
+ u64 glx = domain->glx;
+ u64 tmp;
+
+ pte_root |= DTE_FLAG_GV;
+ pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+ /* First mask out possible old values for GCR3 table */
+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+ flags &= ~tmp;
+
+ tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ flags &= ~tmp;
+
+ /* Encode GCR3 table into DTE */
+ tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+ pte_root |= tmp;
+
+ tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+ flags |= tmp;
+
+ tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+ flags |= tmp;
+ }
+
+ flags &= ~(0xffffUL);
+ flags |= domain->id;
+
+ amd_iommu_dev_table[devid].data[1] = flags;
+ amd_iommu_dev_table[devid].data[0] = pte_root;
}
static void clear_dte_entry(u16 devid)
@@ -1603,7 +1872,6 @@ static void clear_dte_entry(u16 devid)
/* remove entry from the device table seen by the hardware */
amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
amd_iommu_dev_table[devid].data[1] = 0;
- amd_iommu_dev_table[devid].data[2] = 0;
amd_iommu_apply_erratum_63(devid);
}
@@ -1696,6 +1964,93 @@ out_unlock:
return ret;
}
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+ pci_disable_ats(pdev);
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+}
+
+/* FIXME: Change generic reset-function to do the same */
+static int pri_reset_while_enabled(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control |= PCI_PRI_CTRL_RESET;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+
+static int pdev_iommuv2_enable(struct pci_dev *pdev)
+{
+ bool reset_enable;
+ int reqs, ret;
+
+ /* FIXME: Hardcode number of outstanding requests for now */
+ reqs = 32;
+ if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
+ reqs = 1;
+ reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+
+ /* Only allow access to user-accessible pages */
+ ret = pci_enable_pasid(pdev, 0);
+ if (ret)
+ goto out_err;
+
+ /* First reset the PRI state of the device */
+ ret = pci_reset_pri(pdev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PRI */
+ ret = pci_enable_pri(pdev, reqs);
+ if (ret)
+ goto out_err;
+
+ if (reset_enable) {
+ ret = pri_reset_while_enabled(pdev);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+
+ return ret;
+}
+
+/* FIXME: Move this to PCI code */
+#define PCI_PRI_TLP_OFF (1 << 2)
+
+bool pci_pri_tlp_required(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+
+ return (control & PCI_PRI_TLP_OFF) ? true : false;
+}
+
/*
* If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware
@@ -1710,7 +2065,18 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
- if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ if (!dev_data->iommu_v2 || !dev_data->passthrough)
+ return -EINVAL;
+
+ if (pdev_iommuv2_enable(pdev) != 0)
+ return -EINVAL;
+
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ } else if (amd_iommu_iotlb_sup &&
+ pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
}
@@ -1760,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data)
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
- if (iommu_pass_through &&
+ if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
}
@@ -1770,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/
static void detach_device(struct device *dev)
{
+ struct protection_domain *domain;
struct iommu_dev_data *dev_data;
unsigned long flags;
dev_data = get_dev_data(dev);
+ domain = dev_data->domain;
/* lock device table */
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- if (dev_data->ats.enabled) {
+ if (domain->flags & PD_IOMMUV2_MASK)
+ pdev_iommuv2_disable(to_pci_dev(dev));
+ else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
- dev_data->ats.enabled = false;
- }
+
+ dev_data->ats.enabled = false;
}
/*
@@ -1818,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev)
static int device_change_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct device *dev = data;
- u16 devid;
- struct protection_domain *domain;
struct dma_ops_domain *dma_domain;
+ struct protection_domain *domain;
+ struct iommu_dev_data *dev_data;
+ struct device *dev = data;
struct amd_iommu *iommu;
unsigned long flags;
+ u16 devid;
if (!check_device(dev))
return 0;
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
+ dev_data = get_dev_data(dev);
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -1838,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb,
if (!domain)
goto out;
- if (iommu_pass_through)
+ if (dev_data->passthrough)
break;
detach_device(dev);
break;
@@ -2434,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
*/
static void prealloc_protection_domains(void)
{
- struct pci_dev *dev = NULL;
+ struct iommu_dev_data *dev_data;
struct dma_ops_domain *dma_dom;
+ struct pci_dev *dev = NULL;
u16 devid;
for_each_pci_dev(dev) {
@@ -2444,6 +2817,16 @@ static void prealloc_protection_domains(void)
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
+ /* Make sure passthrough domain is allocated */
+ alloc_passthrough_domain();
+ dev_data->passthrough = true;
+ attach_device(&dev->dev, pt_domain);
+ pr_info("AMD-Vi: Using passthough domain for device %s\n",
+ dev_name(&dev->dev));
+ }
+
/* Is there already any domain for it? */
if (domain_for_device(&dev->dev))
continue;
@@ -2474,6 +2857,7 @@ static struct dma_map_ops amd_iommu_dma_ops = {
static unsigned device_dma_ops_init(void)
{
+ struct iommu_dev_data *dev_data;
struct pci_dev *pdev = NULL;
unsigned unhandled = 0;
@@ -2483,7 +2867,12 @@ static unsigned device_dma_ops_init(void)
continue;
}
- pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ dev_data = get_dev_data(&pdev->dev);
+
+ if (!dev_data->passthrough)
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ pdev->dev.archdata.dma_ops = &nommu_dma_ops;
}
return unhandled;
@@ -2610,6 +2999,20 @@ out_err:
return NULL;
}
+static int __init alloc_passthrough_domain(void)
+{
+ if (pt_domain != NULL)
+ return 0;
+
+ /* allocate passthrough domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode = PAGE_MODE_NONE;
+
+ return 0;
+}
static int amd_iommu_domain_init(struct iommu_domain *dom)
{
struct protection_domain *domain;
@@ -2623,6 +3026,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
if (!domain->pt_root)
goto out_free;
+ domain->iommu_domain = dom;
+
dom->priv = domain;
return 0;
@@ -2645,7 +3050,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- free_pagetable(domain);
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
protection_domain_free(domain);
@@ -2702,13 +3111,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- unsigned long page_size = 0x1000UL << gfp_order;
struct protection_domain *domain = dom->priv;
int prot = 0;
int ret;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
+
if (iommu_prot & IOMMU_READ)
prot |= IOMMU_PROT_IR;
if (iommu_prot & IOMMU_WRITE)
@@ -2721,13 +3132,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
return ret;
}
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ size_t page_size)
{
struct protection_domain *domain = dom->priv;
- unsigned long page_size, unmap_size;
+ size_t unmap_size;
- page_size = 0x1000UL << gfp_order;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +3147,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
domain_flush_tlb_pde(domain);
- return get_order(unmap_size);
+ return unmap_size;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2746,6 +3158,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
phys_addr_t paddr;
u64 *pte, __pte;
+ if (domain->mode == PAGE_MODE_NONE)
+ return iova;
+
pte = fetch_pte(domain, iova);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
@@ -2773,6 +3188,26 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid;
+
+ if (!dev_data)
+ return -ENODEV;
+
+ if (pdev->is_virtfn || !iommu_group_mf)
+ devid = dev_data->devid;
+ else
+ devid = calc_devid(pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+
+ *groupid = amd_iommu_alias_table[devid];
+
+ return 0;
+}
+
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
@@ -2782,6 +3217,8 @@ static struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
+ .device_group = amd_iommu_device_group,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
/*****************************************************************************
@@ -2796,21 +3233,23 @@ static struct iommu_ops amd_iommu_ops = {
int __init amd_iommu_init_passthrough(void)
{
- struct amd_iommu *iommu;
+ struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
+ struct amd_iommu *iommu;
u16 devid;
+ int ret;
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode |= PAGE_MODE_NONE;
+ ret = alloc_passthrough_domain();
+ if (ret)
+ return ret;
for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ dev_data->passthrough = true;
+
devid = get_device_id(&dev->dev);
iommu = amd_iommu_rlookup_table[devid];
@@ -2820,7 +3259,375 @@ int __init amd_iommu_init_passthrough(void)
attach_device(&dev->dev, pt_domain);
}
+ amd_iommu_stats_init();
+
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /* Update data structure */
+ domain->mode = PAGE_MODE_NONE;
+ domain->updated = true;
+
+ /* Make changes visible to IOMMUs */
+ update_domain(domain);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ free_pagetable(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int levels, ret;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ /* Number of GCR3 table levels required */
+ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+ levels += 1;
+
+ if (levels > amd_iommu_max_glx_val)
+ return -EINVAL;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /*
+ * Save us all sanity checks whether devices already in the
+ * domain support IOMMUv2. Just force that the domain has no
+ * devices attached when it is switched into IOMMUv2 mode.
+ */
+ ret = -EBUSY;
+ if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ goto out;
+
+ ret = -ENOMEM;
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ goto out;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+ domain->updated = true;
+
+ update_domain(domain);
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, int pasid,
+ u64 address, bool size)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int i, ret;
+
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return -EINVAL;
+
+ build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+ /*
+ * IOMMU TLB needs to be flushed before Device TLB to
+ * prevent device TLB refill from IOMMU TLB
+ */
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (domain->dev_iommu[i] == 0)
+ continue;
+
+ ret = iommu_queue_command(amd_iommus[i], &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until IOMMU TLB flushes are complete */
+ domain_flush_complete(domain);
+
+ /* Now flush device TLBs */
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu;
+ int qdep;
+
+ BUG_ON(!dev_data->ats.enabled);
+
+ qdep = dev_data->ats.qdep;
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+ qdep, address, size);
+
+ ret = iommu_queue_command(iommu, &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until all device TLBs are flushed */
+ domain_flush_complete(domain);
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+ u64 address)
+{
+ INC_STATS_COUNTER(invalidate_iotlb);
+
+ return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_page(domain, pasid, address);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+{
+ INC_STATS_COUNTER(invalidate_iotlb_all);
+
+ return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_tlb(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = __pa(root) | GCR3_VALID;
+ }
+
+ root = __va(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, int pasid,
+ unsigned long cr3)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, int pasid)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+ if (pte == NULL)
+ return 0;
+
+ *pte = 0;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __set_gcr3(domain, pasid, cr3);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __clear_gcr3(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ INC_STATS_COUNTER(complete_ppr);
+
+ dev_data = get_dev_data(&pdev->dev);
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
+{
+ struct protection_domain *domain;
+
+ domain = get_domain(&pdev->dev);
+ if (IS_ERR(domain))
+ return NULL;
+
+ /* Only return IOMMUv2 domains */
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return NULL;
+
+ return domain->iommu_domain;
+}
+EXPORT_SYMBOL(amd_iommu_get_v2_domain);
+
+void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ dev_data = get_dev_data(&pdev->dev);
+ dev_data->errata |= (1 << erratum);
+}
+EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info)
+{
+ int max_pasids;
+ int pos;
+
+ if (pdev == NULL || info == NULL)
+ return -EINVAL;
+
+ if (!amd_iommu_v2_supported())
+ return -EINVAL;
+
+ memset(info, 0, sizeof(*info));
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (pos) {
+ int features;
+
+ max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+ max_pasids = min(max_pasids, (1 << 20));
+
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+ info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+ features = pci_pasid_features(pdev);
+ if (features & PCI_PASID_CAP_EXEC)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+ if (features & PCI_PASID_CAP_PRIV)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 82d2410f420..bdea288dc18 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/amd-iommu.h>
+#include <linux/export.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -141,6 +142,12 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
+u32 amd_iommu_max_pasids __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
/*
* The ACPI table parsing functions set this variable on an error
*/
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+ u32 ctrl;
+
+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~CTRL_INV_TO_MASK;
+ ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+ writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
}
+/* allocates the memory where the IOMMU will log its events to */
+static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+ iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(PPR_LOG_SIZE));
+
+ if (iommu->ppr_log == NULL)
+ return NULL;
+
+ return iommu->ppr_log;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+ if (iommu->ppr_log == NULL)
+ return;
+
+ free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+ if (!iommu_feature(iommu, FEATURE_GT))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
/* sets a specific bit in the device table entry. */
static void set_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
+ amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
}
static int get_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
+ return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->features = ((u64)high << 32) | low;
+ if (iommu_feature(iommu, FEATURE_GT)) {
+ int glxval;
+ u32 pasids;
+ u64 shift;
+
+ shift = iommu->features & FEATURE_PASID_MASK;
+ shift >>= FEATURE_PASID_SHIFT;
+ pasids = (1 << shift);
+
+ amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+
+ glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval >>= FEATURE_GLXVAL_SHIFT;
+
+ if (amd_iommu_max_glx_val == -1)
+ amd_iommu_max_glx_val = glxval;
+ else
+ amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+ }
+
+ if (iommu_feature(iommu, FEATURE_GT) &&
+ iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->is_iommu_v2 = true;
+ amd_iommu_v2_present = true;
+ }
+
if (!is_rd890_iommu(iommu->dev))
return;
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_command_buffer(iommu);
free_event_buffer(iommu);
+ free_ppr_log(iommu);
iommu_unmap_mmio_space(iommu);
}
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu);
+ if (iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->ppr_log = alloc_ppr_log(iommu);
+ if (!iommu->ppr_log)
+ return -ENOMEM;
+ }
+
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
iommu->int_enabled = true;
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+ if (iommu->ppr_log != NULL)
+ iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+
return 0;
}
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu)
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+ /* Set IOTLB invalidation timeout to 1s */
+ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -1274,6 +1378,8 @@ static void enable_iommus(void)
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
+ iommu_enable_ppr_log(iommu);
+ iommu_enable_gt(iommu);
iommu_set_exclusion_range(iommu);
iommu_init_msi(iommu);
iommu_enable(iommu);
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void)
/* re-load the hardware */
enable_iommus();
-
- /*
- * we have to flush after the IOMMUs are enabled because a
- * disabled IOMMU will never execute the commands we send
- */
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
}
static int amd_iommu_suspend(void)
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str)
amd_iommu_unmap_flush = true;
if (strncmp(str, "off", 3) == 0)
amd_iommu_disabled = true;
+ if (strncmp(str, "force_isolation", 15) == 0)
+ amd_iommu_force_isolation = true;
}
return 1;
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
0,
0);
+
+bool amd_iommu_v2_supported(void)
+{
+ return amd_iommu_v2_present;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 7ffaa64410b..1a7f41c6cc6 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag);
+
#ifndef CONFIG_AMD_IOMMU_STATS
static inline void amd_iommu_stats_init(void) { }
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 5b9c5075e81..2452f3b7173 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -69,11 +69,14 @@
#define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030
+#define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
+#define MMIO_PPR_HEAD_OFFSET 0x2030
+#define MMIO_PPR_TAIL_OFFSET 0x2038
/* Extended Feature Bits */
@@ -87,8 +90,17 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
+#define FEATURE_PASID_SHIFT 32
+#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT 14
+#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+#define PASID_MASK 0x000fffff
+
/* MMIO status bits */
-#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
+#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
@@ -115,6 +127,7 @@
#define CONTROL_EVT_LOG_EN 0x02ULL
#define CONTROL_EVT_INT_EN 0x03ULL
#define CONTROL_COMWAIT_EN 0x04ULL
+#define CONTROL_INV_TIMEOUT 0x05ULL
#define CONTROL_PASSPW_EN 0x08ULL
#define CONTROL_RESPASSPW_EN 0x09ULL
#define CONTROL_COHERENT_EN 0x0aULL
@@ -122,18 +135,34 @@
#define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL
+#define CONTROL_PPR_EN 0x0fULL
+#define CONTROL_GT_EN 0x10ULL
+
+#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE 0
+#define CTRL_INV_TO_1MS 1
+#define CTRL_INV_TO_10MS 2
+#define CTRL_INV_TO_100MS 3
+#define CTRL_INV_TO_1S 4
+#define CTRL_INV_TO_10S 5
+#define CTRL_INV_TO_100S 6
/* command specific defines */
#define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_COMPLETE_PPR 0x07
#define CMD_INV_ALL 0x08
#define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_COMPL_WAIT_INT_MASK 0x02
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
+
+#define PPR_STATUS_MASK 0xf
+#define PPR_STATUS_SHIFT 12
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
@@ -165,6 +194,23 @@
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES 512
+#define PPR_LOG_SIZE_SHIFT 56
+#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE 16
+#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x) ((x) & 0xffffULL)
+#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT 0x01
+
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -230,7 +276,24 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
-#define DTE_FLAG_IOTLB 0x01
+#define DTE_FLAG_IOTLB (0x01UL << 32)
+#define DTE_FLAG_GV (0x01ULL << 55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
+
+#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+
+#define DTE_GCR3_INDEX_A 0
+#define DTE_GCR3_INDEX_B 1
+#define DTE_GCR3_INDEX_C 1
+
+#define DTE_GCR3_SHIFT_A 58
+#define DTE_GCR3_SHIFT_B 16
+#define DTE_GCR3_SHIFT_C 43
+
+#define GCR3_VALID 0x01ULL
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
@@ -257,6 +320,7 @@
domain for an IOMMU */
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
+#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -285,6 +349,29 @@ extern bool amd_iommu_iotlb_sup;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+ u64 address; /* IO virtual address of the fault*/
+ u32 pasid; /* Address space identifier */
+ u16 device_id; /* Originating PCI device id */
+ u16 tag; /* PPR tag */
+ u16 flags; /* Fault flags */
+
+};
+
+#define PPR_FAULT_EXEC (1 << 1)
+#define PPR_FAULT_READ (1 << 2)
+#define PPR_FAULT_WRITE (1 << 5)
+#define PPR_FAULT_USER (1 << 6)
+#define PPR_FAULT_RSVD (1 << 7)
+#define PPR_FAULT_GN (1 << 8)
+
+struct iommu_domain;
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -297,11 +384,15 @@ struct protection_domain {
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
+ int glx; /* Number of levels for GCR3 table */
+ u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */
+ struct iommu_domain *iommu_domain; /* Pointer to generic
+ domain structure */
};
@@ -315,10 +406,15 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reverent count */
u16 devid; /* PCI Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ bool passthrough; /* Default for device is pt_domain */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
+ bool pri_tlp; /* PASID TLB required for
+ PPR completions */
+ u32 errata; /* Bitmap for errata to apply */
};
/*
@@ -399,6 +495,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* IOMMUv2 */
+ bool is_iommu_v2;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -431,6 +530,9 @@ struct amd_iommu {
/* MSI number for event interrupt */
u16 evt_msi_num;
+ /* Base of the PPR log, if present */
+ u8 *ppr_log;
+
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
@@ -484,7 +586,7 @@ extern struct list_head amd_iommu_pd_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u32 data[8];
+ u64 data[4];
};
/*
@@ -549,6 +651,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
*/
extern bool amd_iommu_unmap_flush;
+/* Smallest number of PASIDs supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasids;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
new file mode 100644
index 00000000000..8add9f125d3
--- /dev/null
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu_proto.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+
+#define MAX_DEVICES 0x10000
+#define PRI_QUEUE_SIZE 512
+
+struct pri_queue {
+ atomic_t inflight;
+ bool finish;
+ int status;
+};
+
+struct pasid_state {
+ struct list_head list; /* For global state-list */
+ atomic_t count; /* Reference count */
+ struct task_struct *task; /* Task bound to this PASID */
+ struct mm_struct *mm; /* mm_struct for the faults */
+ struct mmu_notifier mn; /* mmu_otifier handle */
+ struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
+ struct device_state *device_state; /* Link to our device_state */
+ int pasid; /* PASID index */
+ spinlock_t lock; /* Protect pri_queues */
+ wait_queue_head_t wq; /* To wait for count == 0 */
+};
+
+struct device_state {
+ atomic_t count;
+ struct pci_dev *pdev;
+ struct pasid_state **states;
+ struct iommu_domain *domain;
+ int pasid_levels;
+ int max_pasids;
+ amd_iommu_invalid_ppr_cb inv_ppr_cb;
+ amd_iommu_invalidate_ctx inv_ctx_cb;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+};
+
+struct fault {
+ struct work_struct work;
+ struct device_state *dev_state;
+ struct pasid_state *state;
+ struct mm_struct *mm;
+ u64 address;
+ u16 devid;
+ u16 pasid;
+ u16 tag;
+ u16 finish;
+ u16 flags;
+};
+
+struct device_state **state_table;
+static spinlock_t state_lock;
+
+/* List and lock for all pasid_states */
+static LIST_HEAD(pasid_state_list);
+static DEFINE_SPINLOCK(ps_lock);
+
+static struct workqueue_struct *iommu_wq;
+
+/*
+ * Empty page table - Used between
+ * mmu_notifier_invalidate_range_start and
+ * mmu_notifier_invalidate_range_end
+ */
+static u64 *empty_page_table;
+
+static void free_pasid_states(struct device_state *dev_state);
+static void unbind_pasid(struct device_state *dev_state, int pasid);
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
+
+static u16 device_id(struct pci_dev *pdev)
+{
+ u16 devid;
+
+ devid = pdev->bus->number;
+ devid = (devid << 8) | pdev->devfn;
+
+ return devid;
+}
+
+static struct device_state *get_device_state(u16 devid)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ dev_state = state_table[devid];
+ if (dev_state != NULL)
+ atomic_inc(&dev_state->count);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+ /*
+ * First detach device from domain - No more PRI requests will arrive
+ * from that device after it is unbound from the IOMMUv2 domain.
+ */
+ iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+
+ /* Everything is down now, free the IOMMUv2 domain */
+ iommu_domain_free(dev_state->domain);
+
+ /* Finally get rid of the device-state */
+ kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+ if (atomic_dec_and_test(&dev_state->count))
+ wake_up(&dev_state->wq);
+}
+
+static void put_device_state_wait(struct device_state *dev_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (!atomic_dec_and_test(&dev_state->count))
+ schedule();
+ finish_wait(&dev_state->wq, &wait);
+
+ free_device_state(dev_state);
+}
+
+static struct notifier_block profile_nb = {
+ .notifier_call = task_exit,
+};
+
+static void link_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ list_add_tail(&pasid_state->list, &pasid_state_list);
+ spin_unlock(&ps_lock);
+}
+
+static void __unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ list_del(&pasid_state->list);
+}
+
+static void unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ __unlink_pasid_state(pasid_state);
+ spin_unlock(&ps_lock);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+ int pasid, bool alloc)
+{
+ struct pasid_state **root, **ptr;
+ int level, index;
+
+ level = dev_state->pasid_levels;
+ root = dev_state->states;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ ptr = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (*ptr == NULL) {
+ if (!alloc)
+ return NULL;
+
+ *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (*ptr == NULL)
+ return NULL;
+ }
+
+ root = (struct pasid_state **)*ptr;
+ level -= 1;
+ }
+
+ return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ ret = -ENOMEM;
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ if (*ptr != NULL)
+ goto out_unlock;
+
+ *ptr = pasid_state;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ *ptr = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+ int pasid)
+{
+ struct pasid_state **ptr, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = *ptr;
+ if (ret)
+ atomic_inc(&ret->count);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+ kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+ if (atomic_dec_and_test(&pasid_state->count)) {
+ put_device_state(pasid_state->device_state);
+ wake_up(&pasid_state->wq);
+ }
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ if (atomic_dec_and_test(&pasid_state->count))
+ put_device_state(pasid_state->device_state);
+ else
+ schedule();
+
+ finish_wait(&pasid_state->wq, &wait);
+ mmput(pasid_state->mm);
+ free_pasid_state(pasid_state);
+}
+
+static void __unbind_pasid(struct pasid_state *pasid_state)
+{
+ struct iommu_domain *domain;
+
+ domain = pasid_state->device_state->domain;
+
+ amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+ clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
+
+ /* Make sure no more pending faults are in the queue */
+ flush_workqueue(iommu_wq);
+
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+ put_pasid_state(pasid_state); /* Reference taken in bind() function */
+}
+
+static void unbind_pasid(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state *pasid_state;
+
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ return;
+
+ unlink_pasid_state(pasid_state);
+ __unbind_pasid(pasid_state);
+ put_pasid_state_wait(pasid_state); /* Reference taken in this function */
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ free_page((unsigned long)tbl[i]);
+ }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+ struct pasid_state **ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ ptr = (struct pasid_state **)tbl[i];
+ free_pasid_states_level1(ptr);
+ }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+ struct pasid_state *pasid_state;
+ int i;
+
+ for (i = 0; i < dev_state->max_pasids; ++i) {
+ pasid_state = get_pasid_state(dev_state, i);
+ if (pasid_state == NULL)
+ continue;
+
+ put_pasid_state(pasid_state);
+ unbind_pasid(dev_state, i);
+ }
+
+ if (dev_state->pasid_levels == 2)
+ free_pasid_states_level2(dev_state->states);
+ else if (dev_state->pasid_levels == 1)
+ free_pasid_states_level1(dev_state->states);
+ else if (dev_state->pasid_levels != 0)
+ BUG();
+
+ free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct pasid_state, mn);
+}
+
+static void __mn_flush_page(struct mmu_notifier *mn,
+ unsigned long address)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
+}
+
+static int mn_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+
+ return 0;
+}
+
+static void mn_change_pte(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(empty_page_table));
+}
+
+static void mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(pasid_state->mm->pgd));
+}
+
+static struct mmu_notifier_ops iommu_mn = {
+ .clear_flush_young = mn_clear_flush_young,
+ .change_pte = mn_change_pte,
+ .invalidate_page = mn_invalidate_page,
+ .invalidate_range_start = mn_invalidate_range_start,
+ .invalidate_range_end = mn_invalidate_range_end,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+ u16 tag, int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ pasid_state->pri[tag].status = status;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ u16 tag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+ pasid_state->pri[tag].finish) {
+ amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+ pasid_state->pri[tag].status, tag);
+ pasid_state->pri[tag].finish = false;
+ pasid_state->pri[tag].status = PPR_SUCCESS;
+ }
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void do_fault(struct work_struct *work)
+{
+ struct fault *fault = container_of(work, struct fault, work);
+ int npages, write;
+ struct page *page;
+
+ write = !!(fault->flags & PPR_FAULT_WRITE);
+
+ npages = get_user_pages(fault->state->task, fault->state->mm,
+ fault->address, 1, write, 0, &page, NULL);
+
+ if (npages == 1) {
+ put_page(page);
+ } else if (fault->dev_state->inv_ppr_cb) {
+ int status;
+
+ status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+ fault->pasid,
+ fault->address,
+ fault->flags);
+ switch (status) {
+ case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+ set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_INVALID:
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_FAIL:
+ set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+ break;
+ default:
+ BUG();
+ }
+ } else {
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ }
+
+ finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+ put_pasid_state(fault->state);
+
+ kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct amd_iommu_fault *iommu_fault;
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ unsigned long flags;
+ struct fault *fault;
+ bool finish;
+ u16 tag;
+ int ret;
+
+ iommu_fault = data;
+ tag = iommu_fault->tag & 0x1ff;
+ finish = (iommu_fault->tag >> 9) & 1;
+
+ ret = NOTIFY_DONE;
+ dev_state = get_device_state(iommu_fault->device_id);
+ if (dev_state == NULL)
+ goto out;
+
+ pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+ if (pasid_state == NULL) {
+ /* We know the device but not the PASID -> send INVALID */
+ amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+ PPR_INVALID, tag);
+ goto out_drop_state;
+ }
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ atomic_inc(&pasid_state->pri[tag].inflight);
+ if (finish)
+ pasid_state->pri[tag].finish = true;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+ fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+ if (fault == NULL) {
+ /* We are OOM - send success and let the device re-fault */
+ finish_pri_tag(dev_state, pasid_state, tag);
+ goto out_drop_state;
+ }
+
+ fault->dev_state = dev_state;
+ fault->address = iommu_fault->address;
+ fault->state = pasid_state;
+ fault->tag = tag;
+ fault->finish = finish;
+ fault->flags = iommu_fault->flags;
+ INIT_WORK(&fault->work, do_fault);
+
+ queue_work(iommu_wq, &fault->work);
+
+ ret = NOTIFY_OK;
+
+out_drop_state:
+ put_device_state(dev_state);
+
+out:
+ return ret;
+}
+
+static struct notifier_block ppr_nb = {
+ .notifier_call = ppr_notifier,
+};
+
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct pasid_state *pasid_state;
+ struct task_struct *task;
+
+ task = data;
+
+ /*
+ * Using this notifier is a hack - but there is no other choice
+ * at the moment. What I really want is a sleeping notifier that
+ * is called when an MM goes down. But such a notifier doesn't
+ * exist yet. The notifier needs to sleep because it has to make
+ * sure that the device does not use the PASID and the address
+ * space anymore before it is destroyed. This includes waiting
+ * for pending PRI requests to pass the workqueue. The
+ * MMU-Notifiers would be a good fit, but they use RCU and so
+ * they are not allowed to sleep. Lets see how we can solve this
+ * in a more intelligent way in the future.
+ */
+again:
+ spin_lock(&ps_lock);
+ list_for_each_entry(pasid_state, &pasid_state_list, list) {
+ struct device_state *dev_state;
+ int pasid;
+
+ if (pasid_state->task != task)
+ continue;
+
+ /* Drop Lock and unbind */
+ spin_unlock(&ps_lock);
+
+ dev_state = pasid_state->device_state;
+ pasid = pasid_state->pasid;
+
+ if (pasid_state->device_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid);
+
+ unbind_pasid(dev_state, pasid);
+
+ /* Task may be in the list multiple times */
+ goto again;
+ }
+ spin_unlock(&ps_lock);
+
+ return NOTIFY_OK;
+}
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+ struct task_struct *task)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ u16 devid;
+ int ret;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+
+ if (dev_state == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ ret = -ENOMEM;
+ pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+ if (pasid_state == NULL)
+ goto out;
+
+ atomic_set(&pasid_state->count, 1);
+ init_waitqueue_head(&pasid_state->wq);
+ pasid_state->task = task;
+ pasid_state->mm = get_task_mm(task);
+ pasid_state->device_state = dev_state;
+ pasid_state->pasid = pasid;
+ pasid_state->mn.ops = &iommu_mn;
+
+ if (pasid_state->mm == NULL)
+ goto out_free;
+
+ mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
+
+ ret = set_pasid_state(dev_state, pasid_state, pasid);
+ if (ret)
+ goto out_unregister;
+
+ ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+ __pa(pasid_state->mm->pgd));
+ if (ret)
+ goto out_clear_state;
+
+ link_pasid_state(pasid_state);
+
+ return 0;
+
+out_clear_state:
+ clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+out_free:
+ free_pasid_state(pasid_state);
+
+out:
+ put_device_state(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+{
+ struct device_state *dev_state;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+ if (dev_state == NULL)
+ return;
+
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ unbind_pasid(dev_state, pasid);
+
+out:
+ put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ int ret, tmp;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ devid = device_id(pdev);
+
+ dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+ if (dev_state == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_state->lock);
+ init_waitqueue_head(&dev_state->wq);
+ dev_state->pdev = pdev;
+
+ tmp = pasids;
+ for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+ dev_state->pasid_levels += 1;
+
+ atomic_set(&dev_state->count, 1);
+ dev_state->max_pasids = pasids;
+
+ ret = -ENOMEM;
+ dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+ if (dev_state->states == NULL)
+ goto out_free_dev_state;
+
+ dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+ if (dev_state->domain == NULL)
+ goto out_free_states;
+
+ amd_iommu_domain_direct_map(dev_state->domain);
+
+ ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+ if (ret)
+ goto out_free_domain;
+
+ ret = iommu_attach_device(dev_state->domain, &pdev->dev);
+ if (ret != 0)
+ goto out_free_domain;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (state_table[devid] != NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ ret = -EBUSY;
+ goto out_free_domain;
+ }
+
+ state_table[devid] = dev_state;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+
+out_free_domain:
+ iommu_domain_free(dev_state->domain);
+
+out_free_states:
+ free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+ kfree(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ dev_state = state_table[devid];
+ if (dev_state == NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+
+ state_table[devid] = NULL;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ put_device_state_wait(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ppr_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ctx_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+ size_t state_table_size;
+ int ret;
+
+ pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>");
+
+ spin_lock_init(&state_lock);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(state_table_size));
+ if (state_table == NULL)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ iommu_wq = create_workqueue("amd_iommu_v2");
+ if (iommu_wq == NULL)
+ goto out_free;
+
+ ret = -ENOMEM;
+ empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
+ if (empty_page_table == NULL)
+ goto out_destroy_wq;
+
+ amd_iommu_register_ppr_notifier(&ppr_nb);
+ profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
+
+ return 0;
+
+out_destroy_wq:
+ destroy_workqueue(iommu_wq);
+
+out_free:
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+ struct device_state *dev_state;
+ size_t state_table_size;
+ int i;
+
+ profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
+ amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+ flush_workqueue(iommu_wq);
+
+ /*
+ * The loop below might call flush_workqueue(), so call
+ * destroy_workqueue() after it
+ */
+ for (i = 0; i < MAX_DEVICES; ++i) {
+ dev_state = get_device_state(i);
+
+ if (dev_state == NULL)
+ continue;
+
+ WARN_ON_ONCE(1);
+
+ put_device_state(dev_state);
+ amd_iommu_free_device(dev_state->pdev);
+ }
+
+ destroy_workqueue(iommu_wq);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ free_page((unsigned long)empty_page_table);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c4..c9c6053198d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -78,6 +79,24 @@
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
+
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
@@ -405,6 +424,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -2185,18 +2207,6 @@ static inline void iommu_prepare_isa(void)
static int md_domain_init(struct dmar_domain *domain, int guest_width);
-static int __init si_domain_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- int *ret = datax;
-
- *ret = iommu_domain_identity_map(si_domain,
- (uint64_t)start_pfn << PAGE_SHIFT,
- (uint64_t)end_pfn << PAGE_SHIFT);
- return *ret;
-
-}
-
static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
@@ -2228,9 +2238,15 @@ static int __init si_domain_init(int hw)
return 0;
for_each_online_node(nid) {
- work_with_active_regions(nid, si_domain_work_fn, &ret);
- if (ret)
- return ret;
+ unsigned long start_pfn, end_pfn;
+ int i;
+
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ ret = iommu_domain_identity_map(si_domain,
+ PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -3524,7 +3540,7 @@ found:
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3663,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
+ size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int prot = 0;
- size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
int order;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return order;
+ return PAGE_SIZE << order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4060,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+/*
+ * Group numbers are arbitrary. Device with the same group number
+ * indicate the iommu cannot differentiate between them. To avoid
+ * tracking used groups we just use the seg|bus|devfn of the lowest
+ * level we're able to differentiate devices
+ */
+static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *bridge;
+ union {
+ struct {
+ u8 devfn;
+ u8 bus;
+ u16 segment;
+ } pci;
+ u32 group;
+ } id;
+
+ if (iommu_no_mapping(dev))
+ return -ENODEV;
+
+ id.pci.segment = pci_domain_nr(pdev->bus);
+ id.pci.bus = pdev->bus->number;
+ id.pci.devfn = pdev->devfn;
+
+ if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+ return -ENODEV;
+
+ bridge = pci_find_upstream_pcie_bridge(pdev);
+ if (bridge) {
+ if (pci_is_pcie(bridge)) {
+ id.pci.bus = bridge->subordinate->number;
+ id.pci.devfn = 0;
+ } else {
+ id.pci.bus = bridge->bus->number;
+ id.pci.devfn = bridge->devfn;
+ }
+ }
+
+ if (!pdev->is_virtfn && iommu_group_mf)
+ id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+
+ *groupid = id.group;
+
+ return 0;
+}
+
static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
@@ -4069,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
+ .device_group = intel_iommu_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f31..6777ca04947 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df55..2198b2dbbcd 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bug.h>
@@ -25,8 +27,59 @@
#include <linux/errno.h>
#include <linux/iommu.h>
+static ssize_t show_iommu_group(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid))
+ return 0;
+
+ return sprintf(buf, "%u", groupid);
+}
+static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
+static int add_iommu_group(struct device *dev, void *data)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ return device_create_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int remove_iommu_group(struct device *dev)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ device_remove_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int iommu_device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ return add_iommu_group(dev, NULL);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ return remove_iommu_group(dev);
+
+ return 0;
+}
+
+static struct notifier_block iommu_device_nb = {
+ .notifier_call = iommu_device_notifier,
+};
+
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
+ bus_register_notifier(bus, &iommu_device_nb);
+ bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
}
/**
@@ -90,7 +143,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot)
{
- size_t size;
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
if (unlikely(domain->ops->map == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+ "0x%x\n", iova, (unsigned long)paddr,
+ (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+ (unsigned long)paddr, (unsigned long)size);
+
+ while (size) {
+ unsigned long pgsize, addr_merge = iova | paddr;
+ unsigned int pgsize_idx;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by both iova and paddr */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
- BUG_ON(!IS_ALIGNED(iova | paddr, size));
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= domain->ops->pgsize_bitmap;
- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+ /* make sure we're still sane */
+ BUG_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+ (unsigned long)paddr, pgsize);
+
+ ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ iommu_unmap(domain, orig_iova, orig_size - size);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
- size_t size;
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
if (unlikely(domain->ops->unmap == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+ iova, (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+ (unsigned long)size);
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t left = size - unmapped;
+
+ unmapped_page = domain->ops->unmap(domain, iova, left);
+ if (!unmapped_page)
+ break;
+
+ pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+ (unsigned long)unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
- BUG_ON(!IS_ALIGNED(iova, size));
+int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+ return dev->bus->iommu_ops->device_group(dev, groupid);
- return domain->ops->unmap(domain, iova, gfp_order);
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(iommu_unmap);
+EXPORT_SYMBOL_GPL(iommu_device_group);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2e28f..08a90b88e40 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
static int msm_iommu_tex_class[4];
DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t len, int prot)
{
struct msm_priv *priv;
unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- size_t len = 0x1000UL << order;
int ret = 0, tex, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
{
struct msm_priv *priv;
unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_table;
unsigned long *sl_pte;
unsigned long sl_offset;
- size_t len = 0x1000UL << order;
int i, ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
ret = __flush_iotlb(domain);
- /*
- * the IOMMU API requires us to return the order of the unmapped
- * page (on success).
- */
- if (!ret)
- ret = order;
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.iova_to_phys = msm_iommu_iova_to_phys,
- .domain_has_cap = msm_iommu_domain_has_cap
+ .domain_has_cap = msm_iommu_domain_has_cap,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
};
static int __init get_tex_class(int icp, int ocp, int mt, int nos)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2bf758..d8edd979d01 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -33,6 +33,9 @@
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
+/* bitmap of the page sizes currently supported */
+#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
@@ -86,20 +89,24 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_save_ctx(struct omap_iommu *obj)
+void omap_iommu_save_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->save_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_restore_ctx(struct omap_iommu *obj)
+void omap_iommu_restore_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->restore_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -820,35 +827,23 @@ static int device_match_by_alias(struct device *dev, void *data)
}
/**
- * omap_find_iommu_device() - find an omap iommu device by name
- * @name: name of the iommu device
- *
- * The generic iommu API requires the caller to provide the device
- * he wishes to attach to a certain iommu domain.
- *
- * Drivers generally should not bother with this as it should just
- * be taken care of by the DMA-API using dev_archdata.
- *
- * This function is provided as an interim solution until the latter
- * materializes, and omap3isp is fully migrated to the DMA-API.
- */
-struct device *omap_find_iommu_device(const char *name)
-{
- return driver_find_device(&omap_iommu_driver.driver, NULL,
- (void *)name,
- device_match_by_alias);
-}
-EXPORT_SYMBOL_GPL(omap_find_iommu_device);
-
-/**
* omap_iommu_attach() - attach iommu device to an iommu domain
- * @dev: target omap iommu device
+ * @name: name of target omap iommu device
* @iopgd: page table
**/
-static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
+static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{
int err = -ENOMEM;
- struct omap_iommu *obj = to_iommu(dev);
+ struct device *dev;
+ struct omap_iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL,
+ (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return NULL;
+
+ obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
@@ -1019,12 +1014,11 @@ static void iopte_cachep_ctor(void *iopte)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t bytes = PAGE_SIZE << order;
struct iotlb_entry e;
int omap_pgsz;
u32 ret, flags;
@@ -1049,19 +1043,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return ret;
}
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+ size_t size)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t unmap_size;
-
- dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
- unmap_size = iopgtable_clear_entry(oiommu, da);
+ dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- return unmap_size ? get_order(unmap_size) : -EINVAL;
+ return iopgtable_clear_entry(oiommu, da);
}
static int
@@ -1069,6 +1060,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
spin_lock(&omap_domain->lock);
@@ -1081,14 +1073,14 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* get a handle to and enable the omap iommu */
- oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
+ oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
if (IS_ERR(oiommu)) {
ret = PTR_ERR(oiommu);
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
- omap_domain->iommu_dev = oiommu;
+ omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
oiommu->domain = domain;
out:
@@ -1100,7 +1092,8 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu *oiommu = to_iommu(dev);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
spin_lock(&omap_domain->lock);
@@ -1114,7 +1107,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
- omap_domain->iommu_dev = NULL;
+ omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
out:
spin_unlock(&omap_domain->lock);
@@ -1183,14 +1176,14 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
- dev_err(dev, "bogus pte 0x%x", *pte);
+ dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
- dev_err(dev, "bogus pgd 0x%x", *pgd);
+ dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
}
return ret;
@@ -1211,6 +1204,7 @@ static struct iommu_ops omap_iommu_ops = {
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap,
+ .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
static int __init omap_iommu_init(void)
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 46be456fcc0..2e10c3e0a7a 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -231,12 +231,14 @@ static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
/**
* omap_find_iovm_area - find iovma which includes @da
+ * @dev: client device
* @da: iommu device virtual address
*
* Find the existing iovma starting at @da
*/
-struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
+struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct iovm_struct *area;
mutex_lock(&obj->mmap_lock);
@@ -343,14 +345,15 @@ static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
/**
* omap_da_to_va - convert (d) to (v)
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
* @va: mpu virtual address
*
* Returns mpu virtual addr which corresponds to a given device virtual addr
*/
-void *omap_da_to_va(struct omap_iommu *obj, u32 da)
+void *omap_da_to_va(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va = NULL;
struct iovm_struct *area;
@@ -410,7 +413,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
unsigned int i, j;
struct scatterlist *sg;
u32 da = new->da_start;
- int order;
if (!domain || !sgt)
return -EINVAL;
@@ -429,12 +431,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
if (bytes_to_iopgsz(bytes) < 0)
goto err_out;
- order = get_order(bytes);
-
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
i, da, pa, bytes);
- err = iommu_map(domain, da, pa, order, flags);
+ err = iommu_map(domain, da, pa, bytes, flags);
if (err)
goto err_out;
@@ -449,10 +449,9 @@ err_out:
size_t bytes;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
/* ignore failures.. we're already handling one */
- iommu_unmap(domain, da, order);
+ iommu_unmap(domain, da, bytes);
da += bytes;
}
@@ -467,7 +466,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t total = area->da_end - area->da_start;
const struct sg_table *sgt = area->sgt;
struct scatterlist *sg;
- int i, err;
+ int i;
+ size_t unmapped;
BUG_ON(!sgtable_ok(sgt));
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +475,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
start = area->da_start;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
- int order;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
- err = iommu_unmap(domain, start, order);
- if (err < 0)
+ unmapped = iommu_unmap(domain, start, bytes);
+ if (unmapped < bytes)
break;
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
@@ -582,16 +580,18 @@ __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
/**
* omap_iommu_vmap - (d)-(p)-(v) address mapper
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @sgt: address of scatter gather table
* @flags: iovma and page property
*
* Creates 1-n-1 mapping with given @sgt and returns @da.
* All @sgt element must be io page size aligned.
*/
-u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
const struct sg_table *sgt, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
size_t bytes;
void *va = NULL;
@@ -622,15 +622,17 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmap);
/**
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @da: iommu device virtual address
*
* Free the iommu virtually contiguous memory area starting at
* @da, which was returned by 'omap_iommu_vmap()'.
*/
struct sg_table *
-omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
+omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
/*
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
@@ -647,7 +649,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
/**
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
- * @obj: objective iommu
+ * @dev: client device
* @da: contiguous iommu virtual memory
* @bytes: allocation size
* @flags: iovma and page property
@@ -656,9 +658,10 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
size_t bytes, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va;
struct sg_table *sgt;
@@ -698,15 +701,16 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
/**
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
*
* Frees the iommu virtually continuous memory area starting at
* @da, as obtained from 'omap_iommu_vmalloc()'.
*/
-void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
const u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
sgt = unmap_vm_area(domain, obj, da, vfree,
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e46777..9021182c4b7 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 04231cb2f03..1793ba1b6a8 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
{
isdn_if *iif;
- pr_info("ISDN4Linux interface\n");
-
iif = kmalloc(sizeof *iif, GFP_KERNEL);
if (!iif) {
pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
*/
void gigaset_isdn_regdrv(void)
{
+ pr_info("ISDN4Linux interface\n");
/* nothing to do */
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 71a8eb6ef71..0e1f4d5b977 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2154,30 +2154,4 @@ static struct usb_driver hfcsusb_drv = {
.disconnect = hfcsusb_disconnect,
};
-static int __init
-hfcsusb_init(void)
-{
- printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
- hfcsusb_rev, debug, poll);
-
- if (usb_register(&hfcsusb_drv)) {
- printk(KERN_INFO DRIVER_NAME
- ": Unable to register hfcsusb module at usb stack\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit
-hfcsusb_cleanup(void)
-{
- if (debug & DBG_HFC_CALL_TRACE)
- printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
-
- /* unregister Hardware */
- usb_deregister(&hfcsusb_drv); /* release our driver */
-}
-
-module_init(hfcsusb_init);
-module_exit(hfcsusb_cleanup);
+module_usb_driver(hfcsusb_drv);
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index 26264abf1f5..f55d29d6082 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -333,7 +333,7 @@ static void __devinit en_cs_init(struct IsdnCard *card,
cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
/* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
+ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
/* 20 ms Pause */
mdelay(20);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e02..2339d7396b9 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff203a42186..897a77dfa9d 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -347,7 +347,8 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
+ depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
+ MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2
default y
help
This option enable support for the dual-GPIO LED found on the
@@ -387,6 +388,14 @@ config LEDS_RENESAS_TPU
pin function. The latter to support brightness control.
Brightness control is supported but hardware blinking is not.
+config LEDS_TCA6507
+ tristate "LED Support for TCA6507 I2C chip"
+ depends on LEDS_CLASS && I2C
+ help
+ This option enables support for LEDs connected to TC6507
+ LED driver chips accessed via the I2C bus.
+ Driver support brightness control and hardware-assisted blinking.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e4f6bf56888..5c9dc4b000d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6d5628bb060..0c8739c448b 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -15,7 +15,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6f1ff93d7ce..46b4c766335 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 0810604dc70..4ca00624bd1 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -238,17 +238,7 @@ static struct platform_driver pm860x_led_driver = {
.remove = pm860x_led_remove,
};
-static int __devinit pm860x_led_init(void)
-{
- return platform_driver_register(&pm860x_led_driver);
-}
-module_init(pm860x_led_init);
-
-static void __devexit pm860x_led_exit(void)
-{
- platform_driver_unregister(&pm860x_led_driver);
-}
-module_exit(pm860x_led_exit);
+module_platform_driver(pm860x_led_driver);
MODULE_DESCRIPTION("LED driver for Marvell PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 7ba4c7b5b97..b1400db3f83 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -213,17 +213,7 @@ static struct platform_driver adp5520_led_driver = {
.remove = __devexit_p(adp5520_led_remove),
};
-static int __init adp5520_led_init(void)
-{
- return platform_driver_register(&adp5520_led_driver);
-}
-module_init(adp5520_led_init);
-
-static void __exit adp5520_led_exit(void)
-{
- platform_driver_unregister(&adp5520_led_driver);
-}
-module_exit(adp5520_led_exit);
+module_platform_driver(adp5520_led_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 8c00937bf7e..07428357c83 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -118,18 +118,7 @@ static struct platform_driver ams_delta_led_driver = {
},
};
-static int __init ams_delta_led_init(void)
-{
- return platform_driver_register(&ams_delta_led_driver);
-}
-
-static void __exit ams_delta_led_exit(void)
-{
- platform_driver_unregister(&ams_delta_led_driver);
-}
-
-module_init(ams_delta_led_init);
-module_exit(ams_delta_led_exit);
+module_platform_driver(ams_delta_led_driver);
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
MODULE_DESCRIPTION("Amstrad Delta LED driver");
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 48d9fe61bdf..525a9249283 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -179,21 +179,9 @@ static struct platform_driver asic3_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-asic3");
-
-static int __init asic3_led_init(void)
-{
- return platform_driver_register(&asic3_led_driver);
-}
-
-static void __exit asic3_led_exit(void)
-{
- platform_driver_unregister(&asic3_led_driver);
-}
-
-module_init(asic3_led_init);
-module_exit(asic3_led_exit);
+module_platform_driver(asic3_led_driver);
MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
MODULE_DESCRIPTION("HTC ASIC3 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-asic3");
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 109c875ea23..800243b6037 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -134,29 +134,18 @@ static int __exit pwmled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:leds-atmel-pwm");
-
static struct platform_driver pwmled_driver = {
.driver = {
.name = "leds-atmel-pwm",
.owner = THIS_MODULE,
},
/* REVISIT add suspend() and resume() methods */
+ .probe = pwmled_probe,
.remove = __exit_p(pwmled_remove),
};
-static int __init modinit(void)
-{
- return platform_driver_probe(&pwmled_driver, pwmled_probe);
-}
-module_init(modinit);
-
-static void __exit modexit(void)
-{
- platform_driver_unregister(&pwmled_driver);
-}
-module_exit(modexit);
+module_platform_driver(pwmled_driver);
MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-atmel-pwm");
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index ea2185531f8..591cbdf5a04 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -688,8 +688,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
i2c_set_clientdata(client, led);
/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
- gpio_request(pdata->reset_gpio, "RGB_RESETB");
- gpio_direction_output(pdata->reset_gpio, 1);
+ gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
/* Tacss = min 0.1ms */
udelay(100);
@@ -813,17 +812,7 @@ static struct i2c_driver bd2802_i2c_driver = {
.id_table = bd2802_id,
};
-static int __init bd2802_init(void)
-{
- return i2c_add_driver(&bd2802_i2c_driver);
-}
-module_init(bd2802_init);
-
-static void __exit bd2802_exit(void)
-{
- i2c_del_driver(&bd2802_i2c_driver);
-}
-module_exit(bd2802_exit);
+module_i2c_driver(bd2802_i2c_driver);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index da5fb016b1a..6a8725cc7b4 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -75,9 +75,6 @@ static int __devexit cobalt_qube_led_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:cobalt-qube-leds");
-
static struct platform_driver cobalt_qube_led_driver = {
.probe = cobalt_qube_led_probe,
.remove = __devexit_p(cobalt_qube_led_remove),
@@ -87,19 +84,9 @@ static struct platform_driver cobalt_qube_led_driver = {
},
};
-static int __init cobalt_qube_led_init(void)
-{
- return platform_driver_register(&cobalt_qube_led_driver);
-}
-
-static void __exit cobalt_qube_led_exit(void)
-{
- platform_driver_unregister(&cobalt_qube_led_driver);
-}
-
-module_init(cobalt_qube_led_init);
-module_exit(cobalt_qube_led_exit);
+module_platform_driver(cobalt_qube_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Front LED support for Cobalt Server");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_ALIAS("platform:cobalt-qube-leds");
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index f28931cf678..d9cd73ebd6c 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -158,17 +158,7 @@ static struct platform_driver da903x_led_driver = {
.remove = __devexit_p(da903x_led_remove),
};
-static int __init da903x_led_init(void)
-{
- return platform_driver_register(&da903x_led_driver);
-}
-module_init(da903x_led_init);
-
-static void __exit da903x_led_exit(void)
-{
- platform_driver_unregister(&da903x_led_driver);
-}
-module_exit(da903x_led_exit);
+module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 31cf0d60a9a..d56c14269ff 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -131,18 +131,7 @@ static struct spi_driver dac124s085_driver = {
},
};
-static int __init dac124s085_leds_init(void)
-{
- return spi_register_driver(&dac124s085_driver);
-}
-
-static void __exit dac124s085_leds_exit(void)
-{
- spi_unregister_driver(&dac124s085_driver);
-}
-
-module_init(dac124s085_leds_init);
-module_exit(dac124s085_leds_exit);
+module_spi_driver(dac124s085_driver);
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_DESCRIPTION("DAC124S085 LED driver");
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 49aceffaa5b..b9053fa6e25 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -224,20 +224,7 @@ static struct platform_driver fsg_led_driver = {
},
};
-
-static int __init fsg_led_init(void)
-{
- return platform_driver_register(&fsg_led_driver);
-}
-
-static void __exit fsg_led_exit(void)
-{
- platform_driver_unregister(&fsg_led_driver);
-}
-
-
-module_init(fsg_led_init);
-module_exit(fsg_led_exit);
+module_platform_driver(fsg_led_driver);
MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 399a86f2013..7df74cb97e7 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -293,21 +293,9 @@ static struct platform_driver gpio_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-gpio");
-
-static int __init gpio_led_init(void)
-{
- return platform_driver_register(&gpio_led_driver);
-}
-
-static void __exit gpio_led_exit(void)
-{
- platform_driver_unregister(&gpio_led_driver);
-}
-
-module_init(gpio_led_init);
-module_exit(gpio_led_exit);
+module_platform_driver(gpio_led_driver);
MODULE_AUTHOR("Raphael Assenat <raph@8d.com>, Trent Piepho <tpiepho@freescale.com>");
MODULE_DESCRIPTION("GPIO LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-gpio");
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index bcfbd3a60ea..366b6055e33 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -79,9 +79,6 @@ static int hp6xxled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:hp6xx-led");
-
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.remove = hp6xxled_remove,
@@ -91,19 +88,9 @@ static struct platform_driver hp6xxled_driver = {
},
};
-static int __init hp6xxled_init(void)
-{
- return platform_driver_register(&hp6xxled_driver);
-}
-
-static void __exit hp6xxled_exit(void)
-{
- platform_driver_unregister(&hp6xxled_driver);
-}
-
-module_init(hp6xxled_init);
-module_exit(hp6xxled_exit);
+module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hp6xx-led");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 0630e4f4b28..45e6878d737 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -457,18 +457,7 @@ static struct i2c_driver lm3530_i2c_driver = {
},
};
-static int __init lm3530_init(void)
-{
- return i2c_add_driver(&lm3530_i2c_driver);
-}
-
-static void __exit lm3530_exit(void)
-{
- i2c_del_driver(&lm3530_i2c_driver);
-}
-
-module_init(lm3530_init);
-module_exit(lm3530_exit);
+module_i2c_driver(lm3530_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3530");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 9010c054615..b8f9f0a5d43 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -453,18 +453,7 @@ static struct i2c_driver lp3944_driver = {
.id_table = lp3944_id,
};
-static int __init lp3944_module_init(void)
-{
- return i2c_add_driver(&lp3944_driver);
-}
-
-static void __exit lp3944_module_exit(void)
-{
- i2c_del_driver(&lp3944_driver);
-}
-
-module_init(lp3944_module_init);
-module_exit(lp3944_module_exit);
+module_i2c_driver(lp3944_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("LP3944 Fun Light Chip");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index cb641f1b334..d62a7982a5e 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -797,25 +797,7 @@ static struct i2c_driver lp5521_driver = {
.id_table = lp5521_id,
};
-static int __init lp5521_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5521_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5521 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5521_exit(void)
-{
- i2c_del_driver(&lp5521_driver);
-}
-
-module_init(lp5521_init);
-module_exit(lp5521_exit);
+module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
MODULE_DESCRIPTION("LP5521 LED engine");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 5971e309b23..73e791ae725 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -870,8 +870,6 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
return 0;
}
-static struct i2c_driver lp5523_driver;
-
static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1021,25 +1019,7 @@ static struct i2c_driver lp5523_driver = {
.id_table = lp5523_id,
};
-static int __init lp5523_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5523_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5523 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5523_exit(void)
-{
- i2c_del_driver(&lp5523_driver);
-}
-
-module_init(lp5523_init);
-module_exit(lp5523_exit);
+module_i2c_driver(lp5523_driver);
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
MODULE_DESCRIPTION("LP5523 LED engine");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 53f67b8ce55..e311a96c446 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -199,21 +199,9 @@ static struct platform_driver lt3593_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-lt3593");
-
-static int __init lt3593_led_init(void)
-{
- return platform_driver_register(&lt3593_led_driver);
-}
-
-static void __exit lt3593_led_exit(void)
-{
- platform_driver_unregister(&lt3593_led_driver);
-}
-
-module_init(lt3593_led_init);
-module_exit(lt3593_led_exit);
+module_platform_driver(lt3593_led_driver);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index b3393a9f213..8bc49154155 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -275,7 +275,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ if (pdata->num_leds < 1 || pdata->num_leds > (MC13783_LED_MAX + 1)) {
dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
return -EINVAL;
}
@@ -385,17 +385,7 @@ static struct platform_driver mc13783_led_driver = {
.remove = __devexit_p(mc13783_led_remove),
};
-static int __init mc13783_led_init(void)
-{
- return platform_driver_register(&mc13783_led_driver);
-}
-module_init(mc13783_led_init);
-
-static void __exit mc13783_led_exit(void)
-{
- platform_driver_unregister(&mc13783_led_driver);
-}
-module_exit(mc13783_led_exit);
+module_platform_driver(mc13783_led_driver);
MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index f2e51c13439..d8433f2d53b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -81,35 +81,23 @@ static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
/* Configure address GPIOs. */
for (i = 0; i < gpio_ext->num_addr; i++) {
- err = gpio_request(gpio_ext->addr[i], "GPIO extension addr");
+ err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension addr");
if (err)
goto err_free_addr;
- err = gpio_direction_output(gpio_ext->addr[i], 0);
- if (err) {
- gpio_free(gpio_ext->addr[i]);
- goto err_free_addr;
- }
}
/* Configure data GPIOs. */
for (i = 0; i < gpio_ext->num_data; i++) {
- err = gpio_request(gpio_ext->data[i], "GPIO extension data");
+ err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension data");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->data[i], 0);
- if (err) {
- gpio_free(gpio_ext->data[i]);
- goto err_free_data;
- }
}
/* Configure "enable select" GPIO. */
- err = gpio_request(gpio_ext->enable, "GPIO extension enable");
+ err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW,
+ "GPIO extension enable");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->enable, 0);
- if (err) {
- gpio_free(gpio_ext->enable);
- goto err_free_data;
- }
return 0;
@@ -429,21 +417,10 @@ static struct platform_driver netxbig_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-netxbig");
-static int __init netxbig_led_init(void)
-{
- return platform_driver_register(&netxbig_led_driver);
-}
-
-static void __exit netxbig_led_exit(void)
-{
- platform_driver_unregister(&netxbig_led_driver);
-}
-
-module_init(netxbig_led_init);
-module_exit(netxbig_led_exit);
+module_platform_driver(netxbig_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-netxbig");
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 37b7d0cfe58..2f0a14421a7 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -323,21 +323,10 @@ static struct platform_driver ns2_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-ns2");
-
-static int __init ns2_led_init(void)
-{
- return platform_driver_register(&ns2_led_driver);
-}
-static void __exit ns2_led_exit(void)
-{
- platform_driver_unregister(&ns2_led_driver);
-}
-
-module_init(ns2_led_init);
-module_exit(ns2_led_exit);
+module_platform_driver(ns2_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("Network Space v2 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ns2");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index a2c874623e3..ceccab44b5b 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -489,20 +489,8 @@ static int pca9532_remove(struct i2c_client *client)
return 0;
}
-static int __init pca9532_init(void)
-{
- return i2c_add_driver(&pca9532_driver);
-}
-
-static void __exit pca9532_exit(void)
-{
- i2c_del_driver(&pca9532_driver);
-}
+module_i2c_driver(pca9532_driver);
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCA 9532 LED dimmer");
-
-module_init(pca9532_init);
-module_exit(pca9532_exit);
-
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 66aa3e8e786..dcc3bc3d38d 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -371,18 +371,7 @@ static struct i2c_driver pca955x_driver = {
.id_table = pca955x_id,
};
-static int __init pca955x_leds_init(void)
-{
- return i2c_add_driver(&pca955x_driver);
-}
-
-static void __exit pca955x_leds_exit(void)
-{
- i2c_del_driver(&pca955x_driver);
-}
-
-module_init(pca955x_leds_init);
-module_exit(pca955x_leds_exit);
+module_i2c_driver(pca955x_driver);
MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
MODULE_DESCRIPTION("PCA955x LED driver");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 666daf77872..3ed92f34bd4 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -135,18 +135,7 @@ static struct platform_driver led_pwm_driver = {
},
};
-static int __init led_pwm_init(void)
-{
- return platform_driver_register(&led_pwm_driver);
-}
-
-static void __exit led_pwm_exit(void)
-{
- platform_driver_unregister(&led_pwm_driver);
-}
-
-module_init(led_pwm_init);
-module_exit(led_pwm_exit);
+module_platform_driver(led_pwm_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("PWM LED driver for PXA");
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index c3525f37f73..a7815b6cd85 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -57,21 +57,9 @@ static struct platform_driver rb532_led_driver = {
},
};
-static int __init rb532_led_init(void)
-{
- return platform_driver_register(&rb532_led_driver);
-}
-
-static void __exit rb532_led_exit(void)
-{
- platform_driver_unregister(&rb532_led_driver);
-}
-
-module_init(rb532_led_init);
-module_exit(rb532_led_exit);
-
-MODULE_ALIAS("platform:rb532-led");
+module_platform_driver(rb532_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("User LED support for Routerboard532");
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
+MODULE_ALIAS("platform:rb532-led");
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 8497f56f8e4..df7e963bddd 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -229,17 +229,7 @@ static struct platform_driver regulator_led_driver = {
.remove = __devexit_p(regulator_led_remove),
};
-static int __init regulator_led_init(void)
-{
- return platform_driver_register(&regulator_led_driver);
-}
-module_init(regulator_led_init);
-
-static void __exit regulator_led_exit(void)
-{
- platform_driver_unregister(&regulator_led_driver);
-}
-module_exit(regulator_led_exit);
+module_platform_driver(regulator_led_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("Regulator driven LED driver");
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 3ee540eb127..32fe337d5c6 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -339,18 +339,7 @@ static struct platform_driver r_tpu_device_driver = {
}
};
-static int __init r_tpu_init(void)
-{
- return platform_driver_register(&r_tpu_device_driver);
-}
-
-static void __exit r_tpu_exit(void)
-{
- platform_driver_unregister(&r_tpu_device_driver);
-}
-
-module_init(r_tpu_init);
-module_exit(r_tpu_exit);
+module_platform_driver(r_tpu_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas TPU LED Driver");
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 29f8b0f0e2c..bd0a5ed49c4 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -121,18 +121,7 @@ static struct platform_driver s3c24xx_led_driver = {
},
};
-static int __init s3c24xx_led_init(void)
-{
- return platform_driver_register(&s3c24xx_led_driver);
-}
-
-static void __exit s3c24xx_led_exit(void)
-{
- platform_driver_unregister(&s3c24xx_led_driver);
-}
-
-module_init(s3c24xx_led_init);
-module_exit(s3c24xx_led_exit);
+module_platform_driver(s3c24xx_led_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX LED driver");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
new file mode 100644
index 00000000000..133f89fb707
--- /dev/null
+++ b/drivers/leds/leds-tca6507.c
@@ -0,0 +1,779 @@
+/*
+ * leds-tca6507
+ *
+ * The TCA6507 is a programmable LED controller that can drive 7
+ * separate lines either by holding them low, or by pulsing them
+ * with modulated width.
+ * The modulation can be varied in a simple pattern to produce a blink or
+ * double-blink.
+ *
+ * This driver can configure each line either as a 'GPIO' which is out-only
+ * (no pull-up) or as an LED with variable brightness and hardware-assisted
+ * blinking.
+ *
+ * Apart from OFF and ON there are three programmable brightness levels which
+ * can be programmed from 0 to 15 and indicate how many 500usec intervals in
+ * each 8msec that the led is 'on'. The levels are named MASTER, BANK0 and
+ * BANK1.
+ *
+ * There are two different blink rates that can be programmed, each with
+ * separate time for rise, on, fall, off and second-off. Thus if 3 or more
+ * different non-trivial rates are required, software must be used for the extra
+ * rates. The two different blink rates must align with the two levels BANK0 and
+ * BANK1.
+ * This driver does not support double-blink so 'second-off' always matches
+ * 'off'.
+ *
+ * Only 16 different times can be programmed in a roughly logarithmic scale from
+ * 64ms to 16320ms. To be precise the possible times are:
+ * 0, 64, 128, 192, 256, 384, 512, 768,
+ * 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+ *
+ * Times that cannot be closely matched with these must be
+ * handled in software. This driver allows 12.5% error in matching.
+ *
+ * This driver does not allow rise/fall rates to be set explicitly. When trying
+ * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
+ * 'hold' times are chosen to get a close match. If the target delay is even,
+ * the 'change' number will be the smaller; if odd, the 'hold' number will be
+ * the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match delays in the
+ * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings. For example
+ * 1536 == 1536+0, 1024+512, or 768+768. This driver will always choose the
+ * pairing with the least maximum - 768+768 in this case. Other pairings are
+ * not available.
+ *
+ * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
+ * Access can be shared by multiple leds if they have the same level and
+ * either same blink rates, or some don't blink.
+ * When a led changes, it relinquishes access and tries again, so it might
+ * lose access to hardware blink.
+ * If a blink engine cannot be allocated, software blink is used.
+ * If the desired brightness cannot be allocated, the closest available non-zero
+ * brightness is used. As 'full' is always available, the worst case would be
+ * to have two different blink rates at '1', with Max at '2', then other leds
+ * will have to choose between '2' and '16'. Hopefully this is not likely.
+ *
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
+ * and LEDs using the blink. It can only be reprogrammed when the appropriate
+ * counter is zero. The MASTER level has a single usage count.
+ *
+ * Each Led has programmable 'on' and 'off' time as milliseconds. With each
+ * there is a flag saying if it was explicitly requested or defaulted.
+ * Similarly the banks know if each time was explicit or a default. Defaults
+ * are permitted to be changed freely - they are not recognised when matching.
+ *
+ *
+ * An led-tca6507 device must be provided with platform data. This data
+ * lists for each output: the name, default trigger, and whether the signal
+ * is being used as a GPiO rather than an led. 'struct led_plaform_data'
+ * is used for this. If 'name' is NULL, the output isn't used. If 'flags'
+ * is TCA6507_MAKE_CPIO, the output is a GPO.
+ * The "struct led_platform_data" can be embedded in a
+ * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
+ * and a 'setup' callback which is called once the GPiOs are available.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/leds-tca6507.h>
+
+/* LED select registers determine the source that drives LED outputs */
+#define TCA6507_LS_LED_OFF 0x0 /* Output HI-Z (off) */
+#define TCA6507_LS_LED_OFF1 0x1 /* Output HI-Z (off) - not used */
+#define TCA6507_LS_LED_PWM0 0x2 /* Output LOW with Bank0 rate */
+#define TCA6507_LS_LED_PWM1 0x3 /* Output LOW with Bank1 rate */
+#define TCA6507_LS_LED_ON 0x4 /* Output LOW (on) */
+#define TCA6507_LS_LED_MIR 0x5 /* Output LOW with Master Intensity */
+#define TCA6507_LS_BLINK0 0x6 /* Blink at Bank0 rate */
+#define TCA6507_LS_BLINK1 0x7 /* Blink at Bank1 rate */
+
+enum {
+ BANK0,
+ BANK1,
+ MASTER,
+};
+static int bank_source[3] = {
+ TCA6507_LS_LED_PWM0,
+ TCA6507_LS_LED_PWM1,
+ TCA6507_LS_LED_MIR,
+};
+static int blink_source[2] = {
+ TCA6507_LS_BLINK0,
+ TCA6507_LS_BLINK1,
+};
+
+/* PWM registers */
+#define TCA6507_REG_CNT 11
+
+/*
+ * 0x00, 0x01, 0x02 encode the TCA6507_LS_* values, each output
+ * owns one bit in each register
+ */
+#define TCA6507_FADE_ON 0x03
+#define TCA6507_FULL_ON 0x04
+#define TCA6507_FADE_OFF 0x05
+#define TCA6507_FIRST_OFF 0x06
+#define TCA6507_SECOND_OFF 0x07
+#define TCA6507_MAX_INTENSITY 0x08
+#define TCA6507_MASTER_INTENSITY 0x09
+#define TCA6507_INITIALIZE 0x0A
+
+#define INIT_CODE 0x8
+
+#define TIMECODES 16
+static int time_codes[TIMECODES] = {
+ 0, 64, 128, 192, 256, 384, 512, 768,
+ 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+};
+
+/* Convert an led.brightness level (0..255) to a TCA6507 level (0..15) */
+static inline int TO_LEVEL(int brightness)
+{
+ return brightness >> 4;
+}
+
+/* ...and convert back */
+static inline int TO_BRIGHT(int level)
+{
+ if (level)
+ return (level << 4) | 0xf;
+ return 0;
+}
+
+#define NUM_LEDS 7
+struct tca6507_chip {
+ int reg_set; /* One bit per register where
+ * a '1' means the register
+ * should be written */
+ u8 reg_file[TCA6507_REG_CNT];
+ /* Bank 2 is Master Intensity and doesn't use times */
+ struct bank {
+ int level;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int time_use, level_use;
+ } bank[3];
+ struct i2c_client *client;
+ struct work_struct work;
+ spinlock_t lock;
+
+ struct tca6507_led {
+ struct tca6507_chip *chip;
+ struct led_classdev led_cdev;
+ int num;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int bank; /* Bank used, or -1 */
+ int blink; /* Set if hardware-blinking */
+ } leds[NUM_LEDS];
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ const char *gpio_name[NUM_LEDS];
+ int gpio_map[NUM_LEDS];
+#endif
+};
+
+static const struct i2c_device_id tca6507_id[] = {
+ { "tca6507" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6507_id);
+
+static int choose_times(int msec, int *c1p, int *c2p)
+{
+ /*
+ * Choose two timecodes which add to 'msec' as near as possible.
+ * The first returned is the 'on' or 'off' time. The second is to be
+ * used as a 'fade-on' or 'fade-off' time. If 'msec' is even,
+ * the first will not be smaller than the second. If 'msec' is odd,
+ * the first will not be larger than the second.
+ * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
+ * otherwise return the sum that was achieved, plus 1 if the first is
+ * smaller.
+ * If two possibilities are equally good (e.g. 512+0, 256+256), choose
+ * the first pair so there is more change-time visible (i.e. it is
+ * softer).
+ */
+ int c1, c2;
+ int tmax = msec * 9 / 8;
+ int tmin = msec * 7 / 8;
+ int diff = 65536;
+
+ /* We start at '1' to ensure we never even think of choosing a
+ * total time of '0'.
+ */
+ for (c1 = 1; c1 < TIMECODES; c1++) {
+ int t = time_codes[c1];
+ if (t*2 < tmin)
+ continue;
+ if (t > tmax)
+ break;
+ for (c2 = 0; c2 <= c1; c2++) {
+ int tt = t + time_codes[c2];
+ int d;
+ if (tt < tmin)
+ continue;
+ if (tt > tmax)
+ break;
+ /* This works! */
+ d = abs(msec - tt);
+ if (d >= diff)
+ continue;
+ /* Best yet */
+ *c1p = c1;
+ *c2p = c2;
+ diff = d;
+ if (d == 0)
+ return msec;
+ }
+ }
+ if (diff < 65536) {
+ int actual;
+ if (msec & 1) {
+ c1 = *c2p;
+ *c2p = *c1p;
+ *c1p = c1;
+ }
+ actual = time_codes[*c1p] + time_codes[*c2p];
+ if (*c1p < *c2p)
+ return actual + 1;
+ else
+ return actual;
+ }
+ /* No close match */
+ return -EINVAL;
+}
+
+/*
+ * Update the register file with the appropriate 3-bit state for
+ * the given led.
+ */
+static void set_select(struct tca6507_chip *tca, int led, int val)
+{
+ int mask = (1 << led);
+ int bit;
+
+ for (bit = 0; bit < 3; bit++) {
+ int n = tca->reg_file[bit] & ~mask;
+ if (val & (1 << bit))
+ n |= mask;
+ if (tca->reg_file[bit] != n) {
+ tca->reg_file[bit] = n;
+ tca->reg_set |= (1 << bit);
+ }
+ }
+}
+
+/* Update the register file with the appropriate 4-bit code for
+ * one bank or other. This can be used for timers, for levels, or
+ * for initialisation.
+ */
+static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
+{
+ int mask = 0xF;
+ int n;
+ if (bank) {
+ mask <<= 4;
+ new <<= 4;
+ }
+ n = tca->reg_file[reg] & ~mask;
+ n |= new;
+ if (tca->reg_file[reg] != n) {
+ tca->reg_file[reg] = n;
+ tca->reg_set |= 1 << reg;
+ }
+}
+
+/* Update brightness level. */
+static void set_level(struct tca6507_chip *tca, int bank, int level)
+{
+ switch (bank) {
+ case BANK0:
+ case BANK1:
+ set_code(tca, TCA6507_MAX_INTENSITY, bank, level);
+ break;
+ case MASTER:
+ set_code(tca, TCA6507_MASTER_INTENSITY, 0, level);
+ break;
+ }
+ tca->bank[bank].level = level;
+}
+
+/* Record all relevant time code for a given bank */
+static void set_times(struct tca6507_chip *tca, int bank)
+{
+ int c1, c2;
+ int result;
+
+ result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose on times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].ontime);
+ set_code(tca, TCA6507_FADE_ON, bank, c2);
+ set_code(tca, TCA6507_FULL_ON, bank, c1);
+ tca->bank[bank].ontime = result;
+
+ result = choose_times(tca->bank[bank].offtime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].offtime);
+ set_code(tca, TCA6507_FADE_OFF, bank, c2);
+ set_code(tca, TCA6507_FIRST_OFF, bank, c1);
+ set_code(tca, TCA6507_SECOND_OFF, bank, c1);
+ tca->bank[bank].offtime = result;
+
+ set_code(tca, TCA6507_INITIALIZE, bank, INIT_CODE);
+}
+
+/* Write all needed register of tca6507 */
+
+static void tca6507_work(struct work_struct *work)
+{
+ struct tca6507_chip *tca = container_of(work, struct tca6507_chip,
+ work);
+ struct i2c_client *cl = tca->client;
+ int set;
+ u8 file[TCA6507_REG_CNT];
+ int r;
+
+ spin_lock_irq(&tca->lock);
+ set = tca->reg_set;
+ memcpy(file, tca->reg_file, TCA6507_REG_CNT);
+ tca->reg_set = 0;
+ spin_unlock_irq(&tca->lock);
+
+ for (r = 0; r < TCA6507_REG_CNT; r++)
+ if (set & (1<<r))
+ i2c_smbus_write_byte_data(cl, r, file[r]);
+}
+
+static void led_release(struct tca6507_led *led)
+{
+ /* If led owns any resource, release it. */
+ struct tca6507_chip *tca = led->chip;
+ if (led->bank >= 0) {
+ struct bank *b = tca->bank + led->bank;
+ if (led->blink)
+ b->time_use--;
+ b->level_use--;
+ }
+ led->blink = 0;
+ led->bank = -1;
+}
+
+static int led_prepare(struct tca6507_led *led)
+{
+ /* Assign this led to a bank, configuring that bank if necessary. */
+ int level = TO_LEVEL(led->led_cdev.brightness);
+ struct tca6507_chip *tca = led->chip;
+ int c1, c2;
+ int i;
+ struct bank *b;
+ int need_init = 0;
+
+ led->led_cdev.brightness = TO_BRIGHT(level);
+ if (level == 0) {
+ set_select(tca, led->num, TCA6507_LS_LED_OFF);
+ return 0;
+ }
+
+ if (led->ontime == 0 || led->offtime == 0) {
+ /*
+ * Just set the brightness, choosing first usable bank.
+ * If none perfect, choose best.
+ * Count backwards so we check MASTER bank first
+ * to avoid wasting a timer.
+ */
+ int best = -1;/* full-on */
+ int diff = 15-level;
+
+ if (level == 15) {
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ return 0;
+ }
+
+ for (i = MASTER; i >= BANK0; i--) {
+ int d;
+ if (tca->bank[i].level == level ||
+ tca->bank[i].level_use == 0) {
+ best = i;
+ break;
+ }
+ d = abs(level - tca->bank[i].level);
+ if (d < diff) {
+ diff = d;
+ best = i;
+ }
+ }
+ if (best == -1) {
+ /* Best brightness is full-on */
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ led->led_cdev.brightness = LED_FULL;
+ return 0;
+ }
+
+ if (!tca->bank[best].level_use)
+ set_level(tca, best, level);
+
+ tca->bank[best].level_use++;
+ led->bank = best;
+ set_select(tca, led->num, bank_source[best]);
+ led->led_cdev.brightness = TO_BRIGHT(tca->bank[best].level);
+ return 0;
+ }
+
+ /*
+ * We have on/off time so we need to try to allocate a timing bank.
+ * First check if times are compatible with hardware and give up if
+ * not.
+ */
+ if (choose_times(led->ontime, &c1, &c2) < 0)
+ return -EINVAL;
+ if (choose_times(led->offtime, &c1, &c2) < 0)
+ return -EINVAL;
+
+ for (i = BANK0; i <= BANK1; i++) {
+ if (tca->bank[i].level_use == 0)
+ /* not in use - it is ours! */
+ break;
+ if (tca->bank[i].level != level)
+ /* Incompatible level - skip */
+ /* FIX: if timer matches we maybe should consider
+ * this anyway...
+ */
+ continue;
+
+ if (tca->bank[i].time_use == 0)
+ /* Timer not in use, and level matches - use it */
+ break;
+
+ if (!(tca->bank[i].on_dflt ||
+ led->on_dflt ||
+ tca->bank[i].ontime == led->ontime))
+ /* on time is incompatible */
+ continue;
+
+ if (!(tca->bank[i].off_dflt ||
+ led->off_dflt ||
+ tca->bank[i].offtime == led->offtime))
+ /* off time is incompatible */
+ continue;
+
+ /* looks like a suitable match */
+ break;
+ }
+
+ if (i > BANK1)
+ /* Nothing matches - how sad */
+ return -EINVAL;
+
+ b = &tca->bank[i];
+ if (b->level_use == 0)
+ set_level(tca, i, level);
+ b->level_use++;
+ led->bank = i;
+
+ if (b->on_dflt ||
+ !led->on_dflt ||
+ b->time_use == 0) {
+ b->ontime = led->ontime;
+ b->on_dflt = led->on_dflt;
+ need_init = 1;
+ }
+
+ if (b->off_dflt ||
+ !led->off_dflt ||
+ b->time_use == 0) {
+ b->offtime = led->offtime;
+ b->off_dflt = led->off_dflt;
+ need_init = 1;
+ }
+
+ if (need_init)
+ set_times(tca, i);
+
+ led->ontime = b->ontime;
+ led->offtime = b->offtime;
+
+ b->time_use++;
+ led->blink = 1;
+ led->led_cdev.brightness = TO_BRIGHT(b->level);
+ set_select(tca, led->num, blink_source[i]);
+ return 0;
+}
+
+static int led_assign(struct tca6507_led *led)
+{
+ struct tca6507_chip *tca = led->chip;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ led_release(led);
+ err = led_prepare(led);
+ if (err) {
+ /*
+ * Can only fail on timer setup. In that case we need to
+ * re-establish as steady level.
+ */
+ led->ontime = 0;
+ led->offtime = 0;
+ led_prepare(led);
+ }
+ spin_unlock_irqrestore(&tca->lock, flags);
+
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+ return err;
+}
+
+static void tca6507_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+ led->led_cdev.brightness = brightness;
+ led->ontime = 0;
+ led->offtime = 0;
+ led_assign(led);
+}
+
+static int tca6507_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+
+ if (*delay_on == 0)
+ led->on_dflt = 1;
+ else if (delay_on != &led_cdev->blink_delay_on)
+ led->on_dflt = 0;
+ led->ontime = *delay_on;
+
+ if (*delay_off == 0)
+ led->off_dflt = 1;
+ else if (delay_off != &led_cdev->blink_delay_off)
+ led->off_dflt = 0;
+ led->offtime = *delay_off;
+
+ if (led->ontime == 0)
+ led->ontime = 512;
+ if (led->offtime == 0)
+ led->offtime = 512;
+
+ if (led->led_cdev.brightness == LED_OFF)
+ led->led_cdev.brightness = LED_FULL;
+ if (led_assign(led) < 0) {
+ led->ontime = 0;
+ led->offtime = 0;
+ led->led_cdev.brightness = LED_OFF;
+ return -EINVAL;
+ }
+ *delay_on = led->ontime;
+ *delay_off = led->offtime;
+ return 0;
+}
+
+#ifdef CONFIG_GPIOLIB
+static void tca6507_gpio_set_value(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ struct tca6507_chip *tca = container_of(gc, struct tca6507_chip, gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ /*
+ * 'OFF' is floating high, and 'ON' is pulled down, so it has the
+ * inverse sense of 'val'.
+ */
+ set_select(tca, tca->gpio_map[offset],
+ val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
+ spin_unlock_irqrestore(&tca->lock, flags);
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+}
+
+static int tca6507_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ tca6507_gpio_set_value(gc, offset, val);
+ return 0;
+}
+
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ int err;
+ int i = 0;
+ int gpios = 0;
+
+ for (i = 0; i < NUM_LEDS; i++)
+ if (pdata->leds.leds[i].name && pdata->leds.leds[i].flags) {
+ /* Configure as a gpio */
+ tca->gpio_name[gpios] = pdata->leds.leds[i].name;
+ tca->gpio_map[gpios] = i;
+ gpios++;
+ }
+
+ if (!gpios)
+ return 0;
+
+ tca->gpio.label = "gpio-tca6507";
+ tca->gpio.names = tca->gpio_name;
+ tca->gpio.ngpio = gpios;
+ tca->gpio.base = pdata->gpio_base;
+ tca->gpio.owner = THIS_MODULE;
+ tca->gpio.direction_output = tca6507_gpio_direction_output;
+ tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.dev = &client->dev;
+ err = gpiochip_add(&tca->gpio);
+ if (err) {
+ tca->gpio.ngpio = 0;
+ return err;
+ }
+ if (pdata->setup)
+ pdata->setup(tca->gpio.base, tca->gpio.ngpio);
+ return 0;
+}
+
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+ if (tca->gpio.ngpio) {
+ int err = gpiochip_remove(&tca->gpio);
+ dev_err(&tca->client->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ }
+}
+#else /* CONFIG_GPIOLIB */
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ return 0;
+}
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+}
+#endif /* CONFIG_GPIOLIB */
+
+static int __devinit tca6507_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6507_chip *tca;
+ struct i2c_adapter *adapter;
+ struct tca6507_platform_data *pdata;
+ int err;
+ int i = 0;
+
+ adapter = to_i2c_adapter(client->dev.parent);
+ pdata = client->dev.platform_data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -EIO;
+
+ if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
+ dev_err(&client->dev, "Need %d entries in platform-data list\n",
+ NUM_LEDS);
+ return -ENODEV;
+ }
+ err = -ENOMEM;
+ tca = kzalloc(sizeof(*tca), GFP_KERNEL);
+ if (!tca)
+ goto exit;
+
+ tca->client = client;
+ INIT_WORK(&tca->work, tca6507_work);
+ spin_lock_init(&tca->lock);
+ i2c_set_clientdata(client, tca);
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ struct tca6507_led *l = tca->leds + i;
+
+ l->chip = tca;
+ l->num = i;
+ if (pdata->leds.leds[i].name && !pdata->leds.leds[i].flags) {
+ l->led_cdev.name = pdata->leds.leds[i].name;
+ l->led_cdev.default_trigger
+ = pdata->leds.leds[i].default_trigger;
+ l->led_cdev.brightness_set = tca6507_brightness_set;
+ l->led_cdev.blink_set = tca6507_blink_set;
+ l->bank = -1;
+ err = led_classdev_register(&client->dev,
+ &l->led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+ }
+ err = tca6507_probe_gpios(client, tca, pdata);
+ if (err)
+ goto exit;
+ /* set all registers to known state - zero */
+ tca->reg_set = 0x7f;
+ schedule_work(&tca->work);
+
+ return 0;
+exit:
+ while (i--)
+ if (tca->leds[i].led_cdev.name)
+ led_classdev_unregister(&tca->leds[i].led_cdev);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+ return err;
+}
+
+static int __devexit tca6507_remove(struct i2c_client *client)
+{
+ int i;
+ struct tca6507_chip *tca = i2c_get_clientdata(client);
+ struct tca6507_led *tca_leds = tca->leds;
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ if (tca_leds[i].led_cdev.name)
+ led_classdev_unregister(&tca_leds[i].led_cdev);
+ }
+ tca6507_remove_gpio(tca);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+
+ return 0;
+}
+
+static struct i2c_driver tca6507_driver = {
+ .driver = {
+ .name = "leds-tca6507",
+ .owner = THIS_MODULE,
+ },
+ .probe = tca6507_probe,
+ .remove = __devexit_p(tca6507_remove),
+ .id_table = tca6507_id,
+};
+
+static int __init tca6507_leds_init(void)
+{
+ return i2c_add_driver(&tca6507_driver);
+}
+
+static void __exit tca6507_leds_exit(void)
+{
+ i2c_del_driver(&tca6507_driver);
+}
+
+module_init(tca6507_leds_init);
+module_exit(tca6507_leds_exit);
+
+MODULE_AUTHOR("NeilBrown <neilb@suse.de>");
+MODULE_DESCRIPTION("TCA6507 LED/GPO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index b1eb34c3e81..74a24cf897c 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -237,7 +237,8 @@ static int wm831x_status_probe(struct platform_device *pdev)
goto err;
}
- drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
+ GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, drvdata);
@@ -300,7 +301,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
err_led:
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
err:
return ret;
}
@@ -311,7 +311,6 @@ static int wm831x_status_remove(struct platform_device *pdev)
device_remove_file(drvdata->cdev.dev, &dev_attr_src);
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
return 0;
}
@@ -325,17 +324,7 @@ static struct platform_driver wm831x_status_driver = {
.remove = wm831x_status_remove,
};
-static int __devinit wm831x_status_init(void)
-{
- return platform_driver_register(&wm831x_status_driver);
-}
-module_init(wm831x_status_init);
-
-static void wm831x_status_exit(void)
-{
- platform_driver_unregister(&wm831x_status_driver);
-}
-module_exit(wm831x_status_exit);
+module_platform_driver(wm831x_status_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x status LED driver");
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 4a127657835..918d4baff1c 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -227,7 +227,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
goto err_isink;
}
- led = kzalloc(sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_dcdc;
@@ -259,12 +259,10 @@ static int wm8350_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
- goto err_led;
+ goto err_dcdc;
return 0;
- err_led:
- kfree(led);
err_dcdc:
regulator_put(dcdc);
err_isink:
@@ -281,7 +279,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
wm8350_led_disable(led);
regulator_put(led->dcdc);
regulator_put(led->isink);
- kfree(led);
return 0;
}
@@ -295,17 +292,7 @@ static struct platform_driver wm8350_led_driver = {
.shutdown = wm8350_led_shutdown,
};
-static int __devinit wm8350_led_init(void)
-{
- return platform_driver_register(&wm8350_led_driver);
-}
-module_init(wm8350_led_init);
-
-static void wm8350_led_exit(void)
-{
- platform_driver_unregister(&wm8350_led_driver);
-}
-module_exit(wm8350_led_exit);
+module_platform_driver(wm8350_led_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 LED driver");
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 0dc30ffde5a..595d7319701 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -381,6 +381,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@@ -392,6 +397,7 @@ static struct virtio_config_ops lguest_config_ops = {
.reset = lg_reset,
.find_vqs = lg_find_vqs,
.del_vqs = lg_del_vqs,
+ .bus_name = lg_bus_name,
};
/*
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 65af42f2d59..39809035320 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -697,7 +697,7 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
* interrupts are enabled. We always leave interrupts enabled while
* running the Guest.
*/
- regs->eflags = X86_EFLAGS_IF | 0x2;
+ regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
/*
* The "Extended Instruction Pointer" register says where the Guest is
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2637c139777..6dc26b61219 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
*/
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ u64 retval;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
+ kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
return retval;
}
@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work)
int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
- rcpu->prev_wall);
+ total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu);
- idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
- rcpu->prev_idle);
+ idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 116a49ce74b..54ac7ffacb4 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -32,7 +32,6 @@
#include <linux/completion.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
-#include <linux/sysdev.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
@@ -681,9 +680,6 @@ static struct platform_driver smu_of_platform_driver =
static int __init smu_init_sysfs(void)
{
/*
- * Due to sysfs bogosity, a sysdev is not a real device, so
- * we should in fact create both if we want sysdev semantics
- * for power management.
* For now, we don't power manage machines with an SMU chip,
* I'm a bit too far from figuring out how that works with those
* new chipsets, but that will come back and bite us
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721b..cdf36b1e9aa 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1147,12 +1149,12 @@ void bitmap_daemon_work(struct mddev *mddev)
return;
}
if (time_before(jiffies, bitmap->daemon_lastrun
- + bitmap->mddev->bitmap_info.daemon_sleep))
+ + mddev->bitmap_info.daemon_sleep))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
- bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
goto done;
}
bitmap->allclean = 1;
@@ -1204,7 +1206,7 @@ void bitmap_daemon_work(struct mddev *mddev)
* sure that events_cleared is up-to-date.
*/
if (bitmap->need_sync &&
- bitmap->mddev->bitmap_info.external == 0) {
+ mddev->bitmap_info.external == 0) {
bitmap_super_t *sb;
bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1268,8 +1270,8 @@ void bitmap_daemon_work(struct mddev *mddev)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout =
- bitmap->mddev->bitmap_info.daemon_sleep;
+ mddev->thread->timeout =
+ mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
@@ -1588,7 +1587,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
}
if (!*bmc) {
struct page *page;
- *bmc = 1 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4720f68f817..b89c548ec3f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -14,7 +14,6 @@
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08c..627456542fb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e..ca8527fe77e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,8 +36,7 @@
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev */
+#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -570,7 +569,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -1714,6 +1713,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
+ set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
@@ -1767,6 +1768,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
}
+ if (test_bit(Replacement, &rdev->flags))
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
@@ -2546,7 +2550,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -2559,6 +2564,15 @@ state_show(struct md_rdev *rdev, char *page)
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
+ if (test_bit(WantReplacement, &rdev->flags)) {
+ len += sprintf(page+len, "%swant_replacement", sep);
+ sep = ",";
+ }
+ if (test_bit(Replacement, &rdev->flags)) {
+ len += sprintf(page+len, "%sreplacement", sep);
+ sep = ",";
+ }
+
return len+sprintf(page+len, "\n");
}
@@ -2627,6 +2641,42 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "want_replacement")) {
+ /* Any non-spare device that is not a replacement can
+ * become want_replacement at any time, but we then need to
+ * check if recovery is needed.
+ */
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Replacement, &rdev->flags))
+ set_bit(WantReplacement, &rdev->flags);
+ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+ md_wakeup_thread(rdev->mddev->thread);
+ err = 0;
+ } else if (cmd_match(buf, "-want_replacement")) {
+ /* Clearing 'want_replacement' is always allowed.
+ * Once replacements starts it is too late though.
+ */
+ err = 0;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else if (cmd_match(buf, "replacement")) {
+ /* Can only set a device as a replacement when array has not
+ * yet been started. Once running, replacement is automatic
+ * from spares, or by assigning 'slot'.
+ */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ set_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
+ } else if (cmd_match(buf, "-replacement")) {
+ /* Similarly, can only clear Replacement before start */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ clear_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2688,7 +2738,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
- hot_remove_disk(rdev->mddev, rdev->raid_disk);
+ hot_remove_disk(rdev->mddev, rdev);
if (err)
return err;
sysfs_unlink_rdev(rdev->mddev, rdev);
@@ -2696,7 +2746,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
- struct md_rdev *rdev2;
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
@@ -2710,10 +2759,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
- list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
- if (rdev2->raid_disk == slot)
- return -EEXIST;
-
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
@@ -3788,6 +3833,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4534,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4563,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -6036,8 +6098,15 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
struct mddev *mddev = NULL;
int ro;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ switch (cmd) {
+ case RAID_VERSION:
+ case GET_ARRAY_INFO:
+ case GET_DISK_INFO:
+ break;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ }
/*
* Commands dealing with the RAID driver but not any
@@ -6697,8 +6766,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
- } else if (rdev->raid_disk < 0)
+ }
+ if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
+ if (test_bit(Replacement, &rdev->flags))
+ seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
@@ -7320,30 +7392,27 @@ static int remove_and_add_spares(struct mddev *mddev)
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
- if (mddev->degraded) {
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags))
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ spares++;
+ if (rdev->raid_disk < 0
+ && !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
+ if (mddev->pers->
+ hot_add_disk(mddev, rdev) == 0) {
+ if (sysfs_link_rdev(mddev, rdev))
+ /* failure here is OK */;
spares++;
- if (rdev->raid_disk < 0
- && !test_bit(Faulty, &rdev->flags)) {
- rdev->recovery_offset = 0;
- if (mddev->pers->
- hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
- spares++;
- md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ md_new_event(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
}
@@ -7458,7 +7527,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(Faulty, &rdev->flags) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
@@ -7840,6 +7909,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf742d9306e..44c63dfeeb2 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -72,34 +72,7 @@ struct md_rdev {
* This reduces the burden of testing multiple flags in many cases
*/
- unsigned long flags;
-#define Faulty 1 /* device is known to have a fault */
-#define In_sync 2 /* device is in_sync with rest of array */
-#define WriteMostly 4 /* Avoid reading if at all possible */
-#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occurred but has not yet
- * been acknowledged by the metadata
- * handler, so don't allow writes
- * until it is cleared */
-#define WriteErrorSeen 9 /* A write error has been seen on this
- * device
- */
-#define FaultRecorded 10 /* Intermediate state for clearing
- * Blocked. The Fault is/will-be
- * recorded in the metadata, but that
- * metadata hasn't been stored safely
- * on disk yet.
- */
-#define BlockedBadBlocks 11 /* A writer is blocked because they
- * found an unacknowledged bad-block.
- * This can safely be cleared at any
- * time, and the writer will re-check.
- * It may be set at any time, and at
- * worst the writer will timeout and
- * re-check. So setting it as
- * accurately as possible is good, but
- * not absolutely critical.
- */
+ unsigned long flags; /* bit set of 'enum flag_bits' bits. */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -152,6 +125,44 @@ struct md_rdev {
sector_t size; /* in sectors */
} badblocks;
};
+enum flag_bits {
+ Faulty, /* device is known to have a fault */
+ In_sync, /* device is in_sync with rest of array */
+ WriteMostly, /* Avoid reading if at all possible */
+ AutoDetected, /* added by auto-detect */
+ Blocked, /* An error occurred but has not yet
+ * been acknowledged by the metadata
+ * handler, so don't allow writes
+ * until it is cleared */
+ WriteErrorSeen, /* A write error has been seen on this
+ * device
+ */
+ FaultRecorded, /* Intermediate state for clearing
+ * Blocked. The Fault is/will-be
+ * recorded in the metadata, but that
+ * metadata hasn't been stored safely
+ * on disk yet.
+ */
+ BlockedBadBlocks, /* A writer is blocked because they
+ * found an unacknowledged bad-block.
+ * This can safely be cleared at any
+ * time, and the writer will re-check.
+ * It may be set at any time, and at
+ * worst the writer will timeout and
+ * re-check. So setting it as
+ * accurately as possible is good, but
+ * not absolutely critical.
+ */
+ WantReplacement, /* This device is a candidate to be
+ * hot-replaced, either because it has
+ * reported some faults, or because
+ * of explicit request.
+ */
+ Replacement, /* This device is a replacement for
+ * a want_replacement device with same
+ * raid_disk number.
+ */
+};
#define BB_LEN_MASK (0x00000000000001FFULL)
#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
@@ -428,7 +439,7 @@ struct md_personality
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
- int (*hot_remove_disk) (struct mddev *mddev, int number);
+ int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
int (*resize) (struct mddev *mddev, sector_t sectors);
@@ -482,15 +493,20 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ } else
+ return 0;
}
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
}
/*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 5899246fa37..a222f516660 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -292,17 +292,16 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int multipath_remove_disk(struct mddev *mddev, int number)
+static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct multipath_info *p = conf->multipaths + number;
print_multipath_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified"
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ede2461e79c..cc24f0cb7ee 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -135,7 +135,7 @@ out_free_pages:
put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < pi->raid_disks )
+ while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
r1bio_pool_free(r1_bio, data);
return NULL;
@@ -164,7 +164,7 @@ static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
{
int i;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio **bio = r1_bio->bios + i;
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
@@ -185,7 +185,7 @@ static void put_buf(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
int i;
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio *bio = r1_bio->bios[i];
if (bio->bi_end_io)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
@@ -277,13 +277,14 @@ static inline void update_head_pos(int disk, struct r1bio *r1_bio)
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
{
int mirror;
- int raid_disks = r1_bio->mddev->raid_disks;
+ struct r1conf *conf = r1_bio->mddev->private;
+ int raid_disks = conf->raid_disks;
- for (mirror = 0; mirror < raid_disks; mirror++)
+ for (mirror = 0; mirror < raid_disks * 2; mirror++)
if (r1_bio->bios[mirror] == bio)
break;
- BUG_ON(mirror == raid_disks);
+ BUG_ON(mirror == raid_disks * 2);
update_head_pos(mirror, r1_bio);
return mirror;
@@ -390,6 +391,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
if (!uptodate) {
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ conf->mddev->recovery);
+
set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
@@ -505,7 +511,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
start_disk = conf->last_used;
}
- for (i = 0 ; i < conf->raid_disks ; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
sector_t dist;
sector_t first_bad;
int bad_sectors;
@@ -609,7 +615,7 @@ int md_raid1_congested(struct mddev *mddev, int bits)
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -974,7 +980,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
- disks = conf->raid_disks;
+ disks = conf->raid_disks * 2;
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
@@ -988,7 +994,8 @@ read_again:
}
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (i < conf->raid_disks)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
continue;
}
@@ -1263,6 +1270,25 @@ static int raid1_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
+ struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+ if (repl
+ && repl->recovery_offset == MaxSector
+ && !test_bit(Faulty, &repl->flags)
+ && !test_and_set_bit(In_sync, &repl->flags)) {
+ /* replacement has just become active */
+ if (!rdev ||
+ !test_and_clear_bit(In_sync, &rdev->flags))
+ count++;
+ if (rdev) {
+ /* Replaced device not technically
+ * faulty, but we need to be sure
+ * it gets removed and never re-added
+ */
+ set_bit(Faulty, &rdev->flags);
+ sysfs_notify_dirent_safe(
+ rdev->sysfs_state);
+ }
+ }
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
@@ -1286,7 +1312,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int mirror = 0;
struct mirror_info *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -1294,8 +1320,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- for (mirror = first; mirror <= last; mirror++)
- if ( !(p=conf->mirrors+mirror)->rdev) {
+ for (mirror = first; mirror <= last; mirror++) {
+ p = conf->mirrors+mirror;
+ if (!p->rdev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1322,21 +1349,35 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p[conf->raid_disks].rdev == NULL) {
+ /* Add this device as a replacement */
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
+ break;
+ }
+ }
md_integrity_add_rdev(rdev, mddev);
print_conf(conf);
return err;
}
-static int raid1_remove_disk(struct mddev *mddev, int number)
+static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct mirror_info *p = conf->mirrors+ number;
+ if (rdev != p->rdev)
+ p = conf->mirrors + conf->raid_disks + number;
+
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
@@ -1358,7 +1399,21 @@ static int raid1_remove_disk(struct mddev *mddev, int number)
err = -EBUSY;
p->rdev = rdev;
goto abort;
- }
+ } else if (conf->mirrors[conf->raid_disks + number].rdev) {
+ /* We just removed a device that is being replaced.
+ * Move down the replacement. We drain all IO before
+ * doing this to avoid confusion.
+ */
+ struct md_rdev *repl =
+ conf->mirrors[conf->raid_disks + number].rdev;
+ raise_barrier(conf);
+ clear_bit(Replacement, &repl->flags);
+ p->rdev = repl;
+ conf->mirrors[conf->raid_disks + number].rdev = NULL;
+ lower_barrier(conf);
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ clear_bit(WantReplacement, &rdev->flags);
err = md_integrity_register(mddev);
}
abort:
@@ -1411,6 +1466,10 @@ static void end_sync_write(struct bio *bio, int error)
} while (sectors_to_go > 0);
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
} else if (is_badblock(conf->mirrors[mirror].rdev,
r1_bio->sector,
@@ -1441,8 +1500,13 @@ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -1493,7 +1557,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
}
}
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
} while (!success && d != r1_bio->read_disk);
@@ -1510,7 +1574,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
mdname(mddev),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
- for (d = 0; d < conf->raid_disks; d++) {
+ for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
@@ -1536,7 +1600,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
/* write it back and re-read */
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1551,7 +1615,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
d = start;
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1584,7 +1648,7 @@ static int process_checks(struct r1bio *r1_bio)
int primary;
int i;
- for (primary = 0; primary < conf->raid_disks; primary++)
+ for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
r1_bio->bios[primary]->bi_end_io = NULL;
@@ -1592,7 +1656,7 @@ static int process_checks(struct r1bio *r1_bio)
break;
}
r1_bio->read_disk = primary;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
struct bio *pbio = r1_bio->bios[primary];
@@ -1656,7 +1720,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
int i;
- int disks = conf->raid_disks;
+ int disks = conf->raid_disks * 2;
struct bio *bio, *wbio;
bio = r1_bio->bios[r1_bio->read_disk];
@@ -1737,7 +1801,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
success = 1;
else {
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
}
} while (!success && d != read_disk);
@@ -1753,7 +1817,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
start = d;
while (d != read_disk) {
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1765,7 +1829,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
while (d != read_disk) {
char b[BDEVNAME_SIZE];
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1887,7 +1951,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
{
int m;
int s = r1_bio->sectors;
- for (m = 0; m < conf->raid_disks ; m++) {
+ for (m = 0; m < conf->raid_disks * 2 ; m++) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
@@ -1909,7 +1973,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
- for (m = 0; m < conf->raid_disks ; m++)
+ for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(rdev,
@@ -2184,7 +2248,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
r1_bio->state = 0;
set_bit(R1BIO_IsSync, &r1_bio->state);
- for (i=0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
bio = r1_bio->bios[i];
@@ -2203,7 +2267,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
- still_degraded = 1;
+ if (i < conf->raid_disks)
+ still_degraded = 1;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
@@ -2254,7 +2319,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
* need to mark them bad on all write targets
*/
int ok = 1;
- for (i = 0 ; i < conf->raid_disks ; i++)
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++)
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
struct md_rdev *rdev =
rcu_dereference(conf->mirrors[i].rdev);
@@ -2323,7 +2388,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
len = sync_blocks<<9;
}
- for (i=0 ; i < conf->raid_disks; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io) {
page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
@@ -2356,7 +2421,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors);
@@ -2393,7 +2458,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf)
goto abort;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)
+ * mddev->raid_disks * 2,
GFP_KERNEL);
if (!conf->mirrors)
goto abort;
@@ -2405,7 +2471,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks;
+ conf->poolinfo->raid_disks = mddev->raid_disks * 2;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free,
conf->poolinfo);
@@ -2414,14 +2480,20 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo->mddev = mddev;
+ err = -EINVAL;
spin_lock_init(&conf->device_lock);
list_for_each_entry(rdev, &mddev->disks, same_set) {
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
- disk = conf->mirrors + disk_idx;
+ if (test_bit(Replacement, &rdev->flags))
+ disk = conf->mirrors + conf->raid_disks + disk_idx;
+ else
+ disk = conf->mirrors + disk_idx;
+ if (disk->rdev)
+ goto abort;
disk->rdev = rdev;
disk->head_position = 0;
@@ -2437,11 +2509,27 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ err = -EIO;
conf->last_used = -1;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
disk = conf->mirrors + i;
+ if (i < conf->raid_disks &&
+ disk[conf->raid_disks].rdev) {
+ /* This slot has a replacement. */
+ if (!disk->rdev) {
+ /* No original, just make the replacement
+ * a recovering spare
+ */
+ disk->rdev =
+ disk[conf->raid_disks].rdev;
+ disk[conf->raid_disks].rdev = NULL;
+ } else if (!test_bit(In_sync, &disk->rdev->flags))
+ /* Original is not in_sync - bad */
+ goto abort;
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
@@ -2455,7 +2543,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->last_used = i;
}
- err = -EIO;
if (conf->last_used < 0) {
printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
@@ -2665,7 +2752,7 @@ static int raid1_reshape(struct mddev *mddev)
if (!newpoolinfo)
return -ENOMEM;
newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks;
+ newpoolinfo->raid_disks = raid_disks * 2;
newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free, newpoolinfo);
@@ -2673,7 +2760,8 @@ static int raid1_reshape(struct mddev *mddev)
kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
+ newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+ GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
mempool_destroy(newpool);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c732b6cce93..80ded139314 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -12,6 +12,9 @@ struct mirror_info {
* pool was allocated for, so they know how much to allocate and free.
* mddev->raid_disks cannot be used, as it can change while a pool is active
* These two datums are stored in a kmalloced struct.
+ * The 'raid_disks' here is twice the raid_disks in r1conf.
+ * This allows space for each 'real' device can have a replacement in the
+ * second half of the array.
*/
struct pool_info {
@@ -21,7 +24,9 @@ struct pool_info {
struct r1conf {
struct mddev *mddev;
- struct mirror_info *mirrors;
+ struct mirror_info *mirrors; /* twice 'raid_disks' to
+ * allow for replacements.
+ */
int raid_disks;
/* When choose the best device for a read (read_balance())
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 685ddf325ee..6e8aa213f0d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -73,7 +73,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
struct r10conf *conf = data;
int size = offsetof(struct r10bio, devs[conf->copies]);
- /* allocate a r10bio with room for raid_disks entries in the bios array */
+ /* allocate a r10bio with room for raid_disks entries in the
+ * bios array */
return kzalloc(size, gfp_flags);
}
@@ -123,12 +124,19 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
+ if (!conf->have_replacement)
+ continue;
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ if (!bio)
+ goto out_free_bio;
+ r10_bio->devs[j].repl_bio = bio;
}
/*
* Allocate RESYNC_PAGES data pages and attach them
* where needed.
*/
for (j = 0 ; j < nalloc; j++) {
+ struct bio *rbio = r10_bio->devs[j].repl_bio;
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
@@ -143,6 +151,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
goto out_free_pages;
bio->bi_io_vec[i].bv_page = page;
+ if (rbio)
+ rbio->bi_io_vec[i].bv_page = page;
}
}
@@ -156,8 +166,11 @@ out_free_pages:
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < nalloc )
+ while (++j < nalloc) {
bio_put(r10_bio->devs[j].bio);
+ if (r10_bio->devs[j].repl_bio)
+ bio_put(r10_bio->devs[j].repl_bio);
+ }
r10bio_pool_free(r10_bio, conf);
return NULL;
}
@@ -178,6 +191,9 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
}
bio_put(bio);
}
+ bio = r10bio->devs[j].repl_bio;
+ if (bio)
+ bio_put(bio);
}
r10bio_pool_free(r10bio, conf);
}
@@ -191,6 +207,10 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
+ bio = &r10_bio->devs[i].repl_bio;
+ if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
+ bio_put(*bio);
+ *bio = NULL;
}
}
@@ -275,19 +295,27 @@ static inline void update_head_pos(int slot, struct r10bio *r10_bio)
* Find the disk number which triggered given bio
*/
static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
- struct bio *bio, int *slotp)
+ struct bio *bio, int *slotp, int *replp)
{
int slot;
+ int repl = 0;
- for (slot = 0; slot < conf->copies; slot++)
+ for (slot = 0; slot < conf->copies; slot++) {
if (r10_bio->devs[slot].bio == bio)
break;
+ if (r10_bio->devs[slot].repl_bio == bio) {
+ repl = 1;
+ break;
+ }
+ }
BUG_ON(slot == conf->copies);
update_head_pos(slot, r10_bio);
if (slotp)
*slotp = slot;
+ if (replp)
+ *replp = repl;
return r10_bio->devs[slot].devnum;
}
@@ -296,11 +324,13 @@ static void raid10_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
+ struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
+ rdev = r10_bio->devs[slot].rdev;
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
@@ -318,7 +348,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
} else {
/*
* oops, read error - keep the refcount on the rdev
@@ -327,7 +357,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
printk_ratelimited(KERN_ERR
"md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(conf->mirrors[dev].rdev->bdev, b),
+ bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
@@ -366,17 +396,35 @@ static void raid10_end_write_request(struct bio *bio, int error)
int dev;
int dec_rdev = 1;
struct r10conf *conf = r10_bio->mddev->private;
- int slot;
+ int slot, repl;
+ struct md_rdev *rdev = NULL;
- dev = find_bio_disk(conf, r10_bio, bio, &slot);
+ dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[dev].replacement;
+ if (!rdev) {
+ smp_rmb();
+ repl = 0;
+ rdev = conf->mirrors[dev].rdev;
+ }
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- dec_rdev = 0;
+ if (repl)
+ /* Never record new bad blocks to replacement,
+ * just fail it.
+ */
+ md_error(rdev->mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ dec_rdev = 0;
+ }
} else {
/*
* Set R10BIO_Uptodate in our master bio, so that
@@ -393,12 +441,15 @@ static void raid10_end_write_request(struct bio *bio, int error)
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(conf->mirrors[dev].rdev,
+ if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors)) {
bio_put(bio);
- r10_bio->devs[slot].bio = IO_MADE_GOOD;
+ if (repl)
+ r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+ else
+ r10_bio->devs[slot].bio = IO_MADE_GOOD;
dec_rdev = 0;
set_bit(R10BIO_MadeGood, &r10_bio->state);
}
@@ -414,7 +465,6 @@ static void raid10_end_write_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
-
/*
* RAID10 layout manager
* As well as the chunksize and raid_disks count, there are two
@@ -562,14 +612,16 @@ static int raid10_mergeable_bvec(struct request_queue *q,
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_sectors)
+static struct md_rdev *read_balance(struct r10conf *conf,
+ struct r10bio *r10_bio,
+ int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
int sectors = r10_bio->sectors;
int best_good_sectors;
sector_t new_distance, best_dist;
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *best_rdev;
int do_balance;
int best_slot;
@@ -578,6 +630,7 @@ static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_s
retry:
sectors = r10_bio->sectors;
best_slot = -1;
+ best_rdev = NULL;
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
@@ -599,10 +652,16 @@ retry:
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (rdev == NULL)
continue;
- if (!test_bit(In_sync, &rdev->flags))
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!test_bit(In_sync, &rdev->flags) &&
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
continue;
dev_sector = r10_bio->devs[slot].addr;
@@ -627,6 +686,7 @@ retry:
if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors;
best_slot = slot;
+ best_rdev = rdev;
}
if (!do_balance)
/* Must read from here */
@@ -655,16 +715,15 @@ retry:
if (new_distance < best_dist) {
best_dist = new_distance;
best_slot = slot;
+ best_rdev = rdev;
}
}
- if (slot == conf->copies)
+ if (slot >= conf->copies) {
slot = best_slot;
+ rdev = best_rdev;
+ }
if (slot >= 0) {
- disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
- if (!rdev)
- goto retry;
atomic_inc(&rdev->nr_pending);
if (test_bit(Faulty, &rdev->flags)) {
/* Cannot risk returning a device that failed
@@ -675,11 +734,11 @@ retry:
}
r10_bio->read_slot = slot;
} else
- disk = -1;
+ rdev = NULL;
rcu_read_unlock();
*max_sectors = best_good_sectors;
- return disk;
+ return rdev;
}
static int raid10_congested(void *data, int bits)
@@ -846,7 +905,6 @@ static void unfreeze_array(struct r10conf *conf)
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
- struct mirror_info *mirror;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
@@ -945,27 +1003,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/*
* read balancing logic:
*/
- int disk;
+ struct md_rdev *rdev;
int slot;
read_again:
- disk = read_balance(conf, r10_bio, &max_sectors);
- slot = r10_bio->read_slot;
- if (disk < 0) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
raid_end_bio_io(r10_bio);
return;
}
- mirror = conf->mirrors + disk;
+ slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
read_bio->bi_sector = r10_bio->devs[slot].addr +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
+ rdev->data_offset;
+ read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
@@ -1025,6 +1083,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
+ r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
retry_write:
blocked_rdev = NULL;
@@ -1034,12 +1093,25 @@ retry_write:
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[d].replacement);
+ if (rdev == rrdev)
+ rrdev = NULL;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
atomic_inc(&rdev->nr_pending);
blocked_rdev = rdev;
break;
}
+ if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+
r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].repl_bio = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
@@ -1088,6 +1160,10 @@ retry_write:
}
r10_bio->devs[i].bio = bio;
atomic_inc(&rdev->nr_pending);
+ if (rrdev) {
+ r10_bio->devs[i].repl_bio = bio;
+ atomic_inc(&rrdev->nr_pending);
+ }
}
rcu_read_unlock();
@@ -1096,11 +1172,23 @@ retry_write:
int j;
int d;
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
if (r10_bio->devs[j].bio) {
d = r10_bio->devs[j].devnum;
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}
+ if (r10_bio->devs[j].repl_bio) {
+ struct md_rdev *rdev;
+ d = r10_bio->devs[j].devnum;
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ /* Race with remove_disk */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ rdev_dec_pending(rdev, mddev);
+ }
+ }
allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
@@ -1147,6 +1235,31 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+
+ if (!r10_bio->devs[i].repl_bio)
+ continue;
+
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ /* We are actively writing to the original device
+ * so it cannot disappear, so the replacement cannot
+ * become NULL here
+ */
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ conf->mirrors[d].replacement->data_offset);
+ mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua;
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1309,9 +1422,27 @@ static int raid10_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
@@ -1353,8 +1484,25 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct mirror_info *p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
- if (p->rdev)
- continue;
+ if (p->rdev) {
+ if (!test_bit(WantReplacement, &p->rdev->flags) ||
+ p->replacement != NULL)
+ continue;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1385,40 +1533,61 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int raid10_remove_disk(struct mddev *mddev, int number)
+static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
- struct mirror_info *p = conf->mirrors+ number;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
+ struct mirror_info *p = conf->mirrors + number;
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove faulty devices in recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != p->recovery_disabled &&
- enough(conf, -1)) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- }
- err = md_integrity_register(mddev);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove faulty devices if recovery
+ * is not possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != p->recovery_disabled &&
+ (!p->replacement || p->replacement == rdev) &&
+ enough(conf, -1)) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ goto abort;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither -- if they are careful.
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just remove the Replacement as faulty
+ * Clear the flag just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
+
+ err = md_integrity_register(mddev);
+
abort:
print_conf(conf);
@@ -1432,7 +1601,7 @@ static void end_sync_read(struct bio *bio, int error)
struct r10conf *conf = r10_bio->mddev->private;
int d;
- d = find_bio_disk(conf, r10_bio, bio, NULL);
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -1493,19 +1662,34 @@ static void end_sync_write(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
int slot;
-
- d = find_bio_disk(conf, r10_bio, bio, &slot);
+ int repl;
+ struct md_rdev *rdev = NULL;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- } else if (is_badblock(conf->mirrors[d].rdev,
+ if (repl)
+ md_error(mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ }
+ } else if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors))
set_bit(R10BIO_MadeGood, &r10_bio->state);
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+ rdev_dec_pending(rdev, mddev);
end_sync_request(r10_bio);
}
@@ -1609,6 +1793,29 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
generic_make_request(tbio);
}
+ /* Now write out to any replacement devices
+ * that are active
+ */
+ for (i = 0; i < conf->copies; i++) {
+ int j, d;
+ int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
+
+ tbio = r10_bio->devs[i].repl_bio;
+ if (!tbio || !tbio->bi_end_io)
+ continue;
+ if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
+ && r10_bio->devs[i].bio != fbio)
+ for (j = 0; j < vcnt; j++)
+ memcpy(page_address(tbio->bi_io_vec[j].bv_page),
+ page_address(fbio->bi_io_vec[j].bv_page),
+ PAGE_SIZE);
+ d = r10_bio->devs[i].devnum;
+ atomic_inc(&r10_bio->remaining);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ tbio->bi_size >> 9);
+ generic_make_request(tbio);
+ }
+
done:
if (atomic_dec_and_test(&r10_bio->remaining)) {
md_done_sync(mddev, r10_bio->sectors, 1);
@@ -1668,8 +1875,13 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
s << 9,
bio->bi_io_vec[idx].bv_page,
WRITE, false);
- if (!ok)
+ if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
}
if (!ok) {
/* We don't worry if we cannot set a bad block -
@@ -1709,7 +1921,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio;
+ struct bio *wbio, *wbio2;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
@@ -1721,12 +1933,20 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* share the pages with the first bio
* and submit the write request
*/
- wbio = r10_bio->devs[1].bio;
d = r10_bio->devs[1].devnum;
-
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- generic_make_request(wbio);
+ wbio = r10_bio->devs[1].bio;
+ wbio2 = r10_bio->devs[1].repl_bio;
+ if (wbio->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+ generic_make_request(wbio);
+ }
+ if (wbio2 && wbio2->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ wbio2->bi_size >> 9);
+ generic_make_request(wbio2);
+ }
}
@@ -1779,8 +1999,12 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -2060,10 +2284,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
{
int slot = r10_bio->read_slot;
- int mirror = r10_bio->devs[slot].devnum;
struct bio *bio;
struct r10conf *conf = mddev->private;
- struct md_rdev *rdev;
+ struct md_rdev *rdev = r10_bio->devs[slot].rdev;
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
@@ -2081,15 +2304,15 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
}
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+ rdev_dec_pending(rdev, mddev);
bio = r10_bio->devs[slot].bio;
bdevname(bio->bi_bdev, b);
r10_bio->devs[slot].bio =
mddev->ro ? IO_BLOCKED : NULL;
read_more:
- mirror = read_balance(conf, r10_bio, &max_sectors);
- if (mirror == -1) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (rdev == NULL) {
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
mdname(mddev), b,
@@ -2103,7 +2326,6 @@ read_more:
if (bio)
bio_put(bio);
slot = r10_bio->read_slot;
- rdev = conf->mirrors[mirror].rdev;
printk_ratelimited(
KERN_ERR
"md/raid10:%s: %s: redirecting"
@@ -2117,6 +2339,7 @@ read_more:
r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = bio;
+ r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -2187,6 +2410,22 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->sectors, 0))
md_error(conf->mddev, rdev);
}
+ rdev = conf->mirrors[dev].replacement;
+ if (r10_bio->devs[m].repl_bio == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE,
+ &r10_bio->devs[m].repl_bio->bi_flags)) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ } else {
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors, 0))
+ md_error(conf->mddev, rdev);
+ }
}
put_buf(r10_bio);
} else {
@@ -2209,6 +2448,15 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
}
rdev_dec_pending(rdev, conf->mddev);
}
+ bio = r10_bio->devs[m].repl_bio;
+ rdev = conf->mirrors[dev].replacement;
+ if (rdev && bio == IO_MADE_GOOD) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -2272,9 +2520,14 @@ static void raid10d(struct mddev *mddev)
static int init_resync(struct r10conf *conf)
{
int buffs;
+ int i;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
BUG_ON(conf->r10buf_pool);
+ conf->have_replacement = 0;
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->have_replacement = 1;
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool)
return -ENOMEM;
@@ -2355,9 +2608,22 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_end_sync(mddev->bitmap, sect,
&sync_blocks, 1);
}
- } else /* completed sync */
+ } else {
+ /* completed sync */
+ if ((!mddev->bitmap || conf->fullsync)
+ && conf->have_replacement
+ && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /* Completed a full sync so the replacements
+ * are now fully recovered.
+ */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->mirrors[i].replacement
+ ->recovery_offset
+ = MaxSector;
+ }
conf->fullsync = 0;
-
+ }
bitmap_close_sync(mddev->bitmap);
close_sync(conf);
*skipped = 1;
@@ -2414,23 +2680,30 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
-
- if (conf->mirrors[i].rdev == NULL ||
- test_bit(In_sync, &conf->mirrors[i].rdev->flags))
+ struct mirror_info *mirror = &conf->mirrors[i];
+
+ if ((mirror->rdev == NULL ||
+ test_bit(In_sync, &mirror->rdev->flags))
+ &&
+ (mirror->replacement == NULL ||
+ test_bit(Faulty,
+ &mirror->replacement->flags)))
continue;
still_degraded = 0;
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
- /* Unless we are doing a full sync, we only need
- * to recover the block if it is set in the bitmap
+ /* Unless we are doing a full sync, or a replacement
+ * we only need to recover the block if it is set in
+ * the bitmap
*/
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, 1);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
+ mirror->replacement == NULL &&
!conf->fullsync) {
/* yep, skip the sync_blocks here, but don't assume
* that there will never be anything to do here
@@ -2500,33 +2773,60 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr +
- conf->mirrors[d].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[d].rdev->bdev;
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- atomic_inc(&r10_bio->remaining);
- /* and we write to 'i' */
+ bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&rdev->nr_pending);
+ /* and we write to 'i' (if not in_sync) */
for (k=0; k<conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
BUG_ON(k == conf->copies);
- bio = r10_bio->devs[1].bio;
- bio->bi_next = biolist;
- biolist = bio;
- bio->bi_private = r10_bio;
- bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
to_addr = r10_bio->devs[k].addr;
- bio->bi_sector = to_addr +
- conf->mirrors[i].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
r10_bio->devs[0].devnum = d;
r10_bio->devs[0].addr = from_addr;
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
+ rdev = mirror->rdev;
+ if (!test_bit(In_sync, &rdev->flags)) {
+ bio = r10_bio->devs[1].bio;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr
+ + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
+ } else
+ r10_bio->devs[1].bio->bi_end_io = NULL;
+
+ /* and maybe write to replacement */
+ bio = r10_bio->devs[1].repl_bio;
+ if (bio)
+ bio->bi_end_io = NULL;
+ rdev = mirror->replacement;
+ /* Note: if rdev != NULL, then bio
+ * cannot be NULL as r10buf_pool_alloc will
+ * have allocated it.
+ * So the second test here is pointless.
+ * But it keeps semantic-checkers happy, and
+ * this comment keeps human reviewers
+ * happy.
+ */
+ if (rdev == NULL || bio == NULL ||
+ test_bit(Faulty, &rdev->flags))
+ break;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
break;
}
if (j == conf->copies) {
@@ -2544,8 +2844,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
- if (!rdev_set_badblocks(
- conf->mirrors[i].rdev,
+ if (!test_bit(In_sync,
+ &mirror->rdev->flags)
+ && !rdev_set_badblocks(
+ mirror->rdev,
+ r10_bio->devs[k].addr,
+ max_sync, 0))
+ any_working = 0;
+ if (mirror->replacement &&
+ !rdev_set_badblocks(
+ mirror->replacement,
r10_bio->devs[k].addr,
max_sync, 0))
any_working = 0;
@@ -2556,7 +2864,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
printk(KERN_INFO "md/raid10:%s: insufficient "
"working devices for recovery.\n",
mdname(mddev));
- conf->mirrors[i].recovery_disabled
+ mirror->recovery_disabled
= mddev->recovery_disabled;
}
break;
@@ -2605,6 +2913,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t first_bad, sector;
int bad_sectors;
+ if (r10_bio->devs[i].repl_bio)
+ r10_bio->devs[i].repl_bio->bi_end_io = NULL;
+
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
clear_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -2635,6 +2946,27 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
+
+ if (conf->mirrors[d].replacement == NULL ||
+ test_bit(Faulty,
+ &conf->mirrors[d].replacement->flags))
+ continue;
+
+ /* Need to set up for writing to the replacement */
+ bio = r10_bio->devs[i].repl_bio;
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ sector = r10_bio->devs[i].addr;
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = sector +
+ conf->mirrors[d].replacement->data_offset;
+ bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ count++;
}
if (count < 2) {
@@ -2643,6 +2975,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (r10_bio->devs[i].bio->bi_end_io)
rdev_dec_pending(conf->mirrors[d].rdev,
mddev);
+ if (r10_bio->devs[i].repl_bio &&
+ r10_bio->devs[i].repl_bio->bi_end_io)
+ rdev_dec_pending(
+ conf->mirrors[d].replacement,
+ mddev);
}
put_buf(r10_bio);
biolist = NULL;
@@ -2896,6 +3233,16 @@ static int run(struct mddev *mddev)
continue;
disk = conf->mirrors + disk_idx;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto out_free_conf;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto out_free_conf;
+ disk->rdev = rdev;
+ }
+
disk->rdev = rdev;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -2923,6 +3270,13 @@ static int run(struct mddev *mddev)
disk = conf->mirrors + i;
+ if (!disk->rdev && disk->replacement) {
+ /* The replacement is all we have - use it */
+ disk->rdev = disk->replacement;
+ disk->replacement = NULL;
+ clear_bit(Replacement, &disk->rdev->flags);
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 7facfdf841f..7c615613c38 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,7 +2,7 @@
#define _RAID10_H
struct mirror_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
sector_t head_position;
int recovery_disabled; /* matches
* mddev->recovery_disabled
@@ -18,12 +18,13 @@ struct r10conf {
spinlock_t device_lock;
/* geometry */
- int near_copies; /* number of copies laid out raid0 style */
+ int near_copies; /* number of copies laid out
+ * raid0 style */
int far_copies; /* number of copies laid out
* at large strides across drives
*/
- int far_offset; /* far_copies are offset by 1 stripe
- * instead of many
+ int far_offset; /* far_copies are offset by 1
+ * stripe instead of many
*/
int copies; /* near_copies * far_copies.
* must be <= raid_disks
@@ -34,10 +35,11 @@ struct r10conf {
* 1 stripe.
*/
- sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+ sector_t dev_sectors; /* temp copy of
+ * mddev->dev_sectors */
- int chunk_shift; /* shift from chunks to sectors */
- sector_t chunk_mask;
+ int chunk_shift; /* shift from chunks to sectors */
+ sector_t chunk_mask;
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
@@ -45,20 +47,22 @@ struct r10conf {
int pending_count;
spinlock_t resync_lock;
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending;
+ int nr_waiting;
+ int nr_queued;
+ int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
* (fresh device added).
* Cleared when a sync completes.
*/
-
+ int have_replacement; /* There is at least one
+ * replacement device.
+ */
wait_queue_head_t wait_barrier;
- mempool_t *r10bio_pool;
- mempool_t *r10buf_pool;
+ mempool_t *r10bio_pool;
+ mempool_t *r10buf_pool;
struct page *tmppage;
/* When taking over an array from a different personality, we store
@@ -98,11 +102,18 @@ struct r10bio {
* When resyncing we also use one for each copy.
* When reconstructing, we use 2 bios, one for read, one for write.
* We choose the number when they are allocated.
+ * We sometimes need an extra bio to write to the replacement.
*/
struct {
- struct bio *bio;
- sector_t addr;
- int devnum;
+ struct bio *bio;
+ union {
+ struct bio *repl_bio; /* used for resync and
+ * writes */
+ struct md_rdev *rdev; /* used for reads
+ * (read_slot >= 0) */
+ };
+ sector_t addr;
+ int devnum;
} devs[0];
};
@@ -121,17 +132,19 @@ struct r10bio {
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r10bio.state */
-#define R10BIO_Uptodate 0
-#define R10BIO_IsSync 1
-#define R10BIO_IsRecover 2
-#define R10BIO_Degraded 3
+enum r10bio_state {
+ R10BIO_Uptodate,
+ R10BIO_IsSync,
+ R10BIO_IsRecover,
+ R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
-#define R10BIO_ReadError 4
+ R10BIO_ReadError,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
-#define R10BIO_MadeGood 5
-#define R10BIO_WriteError 6
+ R10BIO_MadeGood,
+ R10BIO_WriteError,
+};
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e2609217..360f2b98f62 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -370,12 +370,10 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int has_failed(struct r5conf *conf)
+static int calc_degraded(struct r5conf *conf)
{
- int degraded;
+ int degraded, degraded2;
int i;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
rcu_read_lock();
degraded = 0;
@@ -399,14 +397,14 @@ static int has_failed(struct r5conf *conf)
degraded++;
}
rcu_read_unlock();
- if (degraded > conf->max_degraded)
- return 1;
+ if (conf->raid_disks == conf->previous_raid_disks)
+ return degraded;
rcu_read_lock();
- degraded = 0;
+ degraded2 = 0;
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || test_bit(Faulty, &rdev->flags))
- degraded++;
+ degraded2++;
else if (test_bit(In_sync, &rdev->flags))
;
else
@@ -416,9 +414,22 @@ static int has_failed(struct r5conf *conf)
* almost certainly hasn't.
*/
if (conf->raid_disks <= conf->previous_raid_disks)
- degraded++;
+ degraded2++;
}
rcu_read_unlock();
+ if (degraded2 > degraded)
+ return degraded2;
+ return degraded;
+}
+
+static int has_failed(struct r5conf *conf)
+{
+ int degraded;
+
+ if (conf->mddev->reshape_position == MaxSector)
+ return conf->mddev->degraded > conf->max_degraded;
+
+ degraded = calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
@@ -492,8 +503,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
for (i = disks; i--; ) {
int rw;
- struct bio *bi;
- struct md_rdev *rdev;
+ int replace_only = 0;
+ struct bio *bi, *rbi;
+ struct md_rdev *rdev, *rrdev = NULL;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -501,27 +513,57 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rw = WRITE;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
rw = READ;
- else
+ else if (test_and_clear_bit(R5_WantReplace,
+ &sh->dev[i].flags)) {
+ rw = WRITE;
+ replace_only = 1;
+ } else
continue;
bi = &sh->dev[i].req;
+ rbi = &sh->dev[i].rreq; /* For writing to replacement */
bi->bi_rw = rw;
- if (rw & WRITE)
+ rbi->bi_rw = rw;
+ if (rw & WRITE) {
bi->bi_end_io = raid5_end_write_request;
- else
+ rbi->bi_end_io = raid5_end_write_request;
+ } else
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
+ rrdev = rcu_dereference(conf->disks[i].replacement);
+ smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (!rdev) {
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+ if (rw & WRITE) {
+ if (replace_only)
+ rdev = NULL;
+ if (rdev == rrdev)
+ /* We raced and saw duplicates */
+ rrdev = NULL;
+ } else {
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+ if (rrdev)
+ atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
/* We have already checked bad blocks for reads. Now
- * need to check for writes.
+ * need to check for writes. We never accept write errors
+ * on the replacement, so we don't to check rrdev.
*/
while ((rw & WRITE) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -551,7 +593,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
}
if (rdev) {
- if (s->syncing || s->expanding || s->expanded)
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -563,16 +606,38 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
+ if (rrdev)
+ set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
generic_make_request(bi);
- } else {
+ }
+ if (rrdev) {
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
+ md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
+
+ set_bit(STRIPE_IO_STARTED, &sh->state);
+
+ rbi->bi_bdev = rrdev->bdev;
+ pr_debug("%s: for %llu schedule op %ld on "
+ "replacement disc %d\n",
+ __func__, (unsigned long long)sh->sector,
+ rbi->bi_rw, i);
+ atomic_inc(&sh->count);
+ rbi->bi_sector = sh->sector + rrdev->data_offset;
+ rbi->bi_flags = 1 << BIO_UPTODATE;
+ rbi->bi_idx = 0;
+ rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ rbi->bi_io_vec[0].bv_offset = 0;
+ rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_next = NULL;
+ generic_make_request(rbi);
+ }
+ if (!rdev && !rrdev) {
if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
@@ -1583,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL;
for (i=0 ; i<disks; i++)
@@ -1597,11 +1662,23 @@ static void raid5_end_read_request(struct bio * bi, int error)
BUG();
return;
}
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ /* If replacement finished while this request was outstanding,
+ * 'replacement' might be NULL already.
+ * In that case it moved down to 'rdev'.
+ * rdev is not removed until all requests are finished.
+ */
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ rdev = conf->disks[i].rdev;
if (uptodate) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- rdev = conf->disks[i].rdev;
+ /* Note that this cannot happen on a
+ * replacement device. We just fail those on
+ * any error
+ */
printk_ratelimited(
KERN_INFO
"md/raid:%s: read error corrected"
@@ -1614,16 +1691,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
- if (atomic_read(&conf->disks[i].rdev->read_errors))
- atomic_set(&conf->disks[i].rdev->read_errors, 0);
+ if (atomic_read(&rdev->read_errors))
+ atomic_set(&rdev->read_errors, 0);
} else {
- const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+ const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
- rdev = conf->disks[i].rdev;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
- if (conf->mddev->degraded >= conf->max_degraded)
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error on replacement device "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+ else if (conf->mddev->degraded >= conf->max_degraded)
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1657,7 +1742,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
md_error(conf->mddev, rdev);
}
}
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -1668,14 +1753,30 @@ static void raid5_end_write_request(struct bio *bi, int error)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
+ struct md_rdev *uninitialized_var(rdev);
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
sector_t first_bad;
int bad_sectors;
+ int replacement = 0;
- for (i=0 ; i<disks; i++)
- if (bi == &sh->dev[i].req)
+ for (i = 0 ; i < disks; i++) {
+ if (bi == &sh->dev[i].req) {
+ rdev = conf->disks[i].rdev;
break;
-
+ }
+ if (bi == &sh->dev[i].rreq) {
+ rdev = conf->disks[i].replacement;
+ if (rdev)
+ replacement = 1;
+ else
+ /* rdev was removed and 'replacement'
+ * replaced it. rdev is not removed
+ * until all requests are finished.
+ */
+ rdev = conf->disks[i].rdev;
+ break;
+ }
+ }
pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
@@ -1684,21 +1785,33 @@ static void raid5_end_write_request(struct bio *bi, int error)
return;
}
- if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
- set_bit(R5_WriteError, &sh->dev[i].flags);
- } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
- &first_bad, &bad_sectors))
- set_bit(R5_MadeGood, &sh->dev[i].flags);
+ if (replacement) {
+ if (!uptodate)
+ md_error(conf->mddev, rdev);
+ else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ } else {
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ } else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGood, &sh->dev[i].flags);
+ }
+ rdev_dec_pending(rdev, conf->mddev);
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
-
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
@@ -1709,12 +1822,15 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++;
dev->req.bi_max_vecs++;
+ dev->req.bi_private = sh;
dev->vec.bv_page = dev->page;
- dev->vec.bv_len = STRIPE_SIZE;
- dev->vec.bv_offset = 0;
- dev->req.bi_sector = sh->sector;
- dev->req.bi_private = sh;
+ bio_init(&dev->rreq);
+ dev->rreq.bi_io_vec = &dev->rvec;
+ dev->rreq.bi_vcnt++;
+ dev->rreq.bi_max_vecs++;
+ dev->rreq.bi_private = sh;
+ dev->rvec.bv_page = dev->page;
dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous);
@@ -1724,18 +1840,15 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
+ unsigned long flags;
pr_debug("raid456: error called\n");
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- /*
- * if recovery was running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- }
+ spin_lock_irqsave(&conf->device_lock, flags);
+ clear_bit(In_sync, &rdev->flags);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2362,8 +2475,9 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
clear_bit(STRIPE_SYNCING, &sh->state);
s->syncing = 0;
+ s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
- * For recover we need to record a bad block on all
+ * For recover/replace we need to record a bad block on all
* non-sync devices, or abort the recovery
*/
if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
@@ -2373,12 +2487,18 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->disks[i].rdev;
- if (!rdev
- || test_bit(Faulty, &rdev->flags)
- || test_bit(In_sync, &rdev->flags))
- continue;
- if (!rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ rdev = conf->disks[i].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
abort = 1;
}
if (abort) {
@@ -2387,6 +2507,22 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
}
}
+static int want_replace(struct stripe_head *sh, int disk_idx)
+{
+ struct md_rdev *rdev;
+ int rv = 0;
+ /* Doing recovery so rcu locking not required */
+ rdev = sh->raid_conf->disks[disk_idx].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && (rdev->recovery_offset <= sh->sector
+ || rdev->mddev->recovery_cp <= sh->sector))
+ rv = 1;
+
+ return rv;
+}
+
/* fetch_block - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
@@ -2406,6 +2542,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
+ (s->replacing && want_replace(sh, disk_idx)) ||
(s->failed >= 1 && fdev[0]->toread) ||
(s->failed >= 2 && fdev[1]->toread) ||
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
@@ -2959,22 +3096,18 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
}
-
/*
* handle_stripe - do things to a stripe.
*
- * We lock the stripe and then examine the state of various bits
- * to see what needs to be done.
+ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
+ * state of various bits to see what needs to be done.
* Possible results:
- * return some read request which now have data
- * return some write requests which are safely on disc
+ * return some read requests which now have data
+ * return some write requests which are safely on storage
* schedule a read on some buffers
* schedule a write of some buffers
* return confirmation of parity correctness
*
- * buffers are taken off read_list or write_list, and bh_cache buffers
- * get BH_Lock set before the stripe lock is released.
- *
*/
static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
@@ -2983,10 +3116,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
int disks = sh->disks;
struct r5dev *dev;
int i;
+ int do_recovery = 0;
memset(s, 0, sizeof(*s));
- s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
s->failed_num[0] = -1;
@@ -3004,7 +3137,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
+ i, dev->flags,
+ dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read
*
* new wantfill requests are only permitted while
@@ -3035,7 +3169,23 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
if (dev->written)
s->written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
+ /* Prefer to use the replacement for reads, but only
+ * if it is recovered enough and has no bad blocks.
+ */
+ rdev = rcu_dereference(conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
+ !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_ReadRepl, &dev->flags);
+ else {
+ if (rdev)
+ set_bit(R5_NeedReplace, &dev->flags);
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ clear_bit(R5_ReadRepl, &dev->flags);
+ }
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,26 +3213,50 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
- if (test_bit(R5_WriteError, &dev->flags)) {
- clear_bit(R5_Insync, &dev->flags);
- if (!test_bit(Faulty, &rdev->flags)) {
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 == rdev)
+ clear_bit(R5_Insync, &dev->flags);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
- if (!test_bit(Faulty, &rdev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_MadeGood, &dev->flags);
}
+ if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev2->nr_pending);
+ } else
+ clear_bit(R5_MadeGoodRepl, &dev->flags);
+ }
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
@@ -3094,9 +3268,25 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (s->failed < 2)
s->failed_num[s->failed] = i;
s->failed++;
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
}
}
spin_unlock_irq(&conf->device_lock);
+ if (test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* If there is a failed device being replaced,
+ * we must be recovering.
+ * else if we are after recovery_cp, we must be syncing
+ * else we can only be replacing
+ * sync and recovery both need to read all devices, and so
+ * use the same flag.
+ */
+ if (do_recovery ||
+ sh->sector >= conf->mddev->recovery_cp)
+ s->syncing = 1;
+ else
+ s->replacing = 1;
+ }
rcu_read_unlock();
}
@@ -3138,7 +3328,7 @@ static void handle_stripe(struct stripe_head *sh)
if (unlikely(s.blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
- s.to_write || s.written) {
+ s.replacing || s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
}
@@ -3164,7 +3354,7 @@ static void handle_stripe(struct stripe_head *sh)
sh->reconstruct_state = 0;
if (s.to_read+s.to_write+s.written)
handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
- if (s.syncing)
+ if (s.syncing + s.replacing)
handle_failed_sync(conf, sh, &s);
}
@@ -3195,7 +3385,9 @@ static void handle_stripe(struct stripe_head *sh)
*/
if (s.to_read || s.non_overwrite
|| (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
handle_stripe_fill(sh, &s, disks);
/* Now we check to see if any write operations have recently
@@ -3257,7 +3449,20 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}
- if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ if (s.replacing && s.locked == 0
+ && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ /* Write out to replacement devices where possible */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
+ test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ set_bit(R5_WantReplace, &sh->dev[i].flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ s.locked++;
+ }
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+ if ((s.syncing || s.replacing) && s.locked == 0 &&
+ test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
@@ -3355,6 +3560,15 @@ finish:
STRIPE_SECTORS);
rdev_dec_pending(rdev, conf->mddev);
}
+ if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ /* rdev have been moved down */
+ rdev = conf->disks[i].rdev;
+ rdev_clear_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (s.ops_request)
@@ -3578,6 +3792,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
int dd_idx;
struct bio* align_bi;
struct md_rdev *rdev;
+ sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("chunk_aligned_read : non aligned\n");
@@ -3602,9 +3817,19 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
0,
&dd_idx, NULL);
+ end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
rcu_read_lock();
- rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags)) {
+ rdev = rcu_dereference(conf->disks[dd_idx].replacement);
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ rdev->recovery_offset < end_sector) {
+ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+ if (rdev &&
+ (test_bit(Faulty, &rdev->flags) ||
+ !(test_bit(In_sync, &rdev->flags) ||
+ rdev->recovery_offset >= end_sector)))
+ rdev = NULL;
+ }
+ if (rdev) {
sector_t first_bad;
int bad_sectors;
@@ -4129,7 +4354,6 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
-
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
@@ -4200,7 +4424,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
raid5_set_bi_hw_segments(raid_bio, scnt);
@@ -4627,7 +4850,15 @@ static struct r5conf *setup_conf(struct mddev *mddev)
continue;
disk = conf->disks + raid_disk;
- disk->rdev = rdev;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto abort;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto abort;
+ disk->rdev = rdev;
+ }
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
@@ -4716,6 +4947,7 @@ static int run(struct mddev *mddev)
int dirty_parity_disks = 0;
struct md_rdev *rdev;
sector_t reshape_offset = 0;
+ int i;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid:%s: not clean"
@@ -4805,12 +5037,25 @@ static int run(struct mddev *mddev)
conf->thread = NULL;
mddev->private = conf;
- /*
- * 0 for a fully functional array, 1 or 2 for a degraded array.
- */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk < 0)
+ for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
+ i++) {
+ rdev = conf->disks[i].rdev;
+ if (!rdev && conf->disks[i].replacement) {
+ /* The replacement is all we have yet */
+ rdev = conf->disks[i].replacement;
+ conf->disks[i].replacement = NULL;
+ clear_bit(Replacement, &rdev->flags);
+ conf->disks[i].rdev = rdev;
+ }
+ if (!rdev)
continue;
+ if (conf->disks[i].replacement &&
+ conf->reshape_progress != MaxSector) {
+ /* replacements and reshape simply do not mix. */
+ printk(KERN_ERR "md: cannot handle concurrent "
+ "replacement and reshape.\n");
+ goto abort;
+ }
if (test_bit(In_sync, &rdev->flags)) {
working_disks++;
continue;
@@ -4844,8 +5089,10 @@ static int run(struct mddev *mddev)
dirty_parity_disks++;
}
- mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
- - working_disks);
+ /*
+ * 0 for a fully functional array, 1 or 2 for a degraded array.
+ */
+ mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
printk(KERN_ERR "md/raid:%s: not enough operational devices"
@@ -5008,7 +5255,25 @@ static int raid5_spare_active(struct mddev *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
- if (tmp->rdev
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active. */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
@@ -5017,49 +5282,68 @@ static int raid5_spare_active(struct mddev *mddev)
}
}
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
}
-static int raid5_remove_disk(struct mddev *mddev, int number)
+static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
struct disk_info *p = conf->disks + number;
print_raid5_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (number >= conf->raid_disks &&
- conf->reshape_progress == MaxSector)
- clear_bit(In_sync, &rdev->flags);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove non-faulty devices if recovery
- * isn't possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- !has_failed(conf) &&
- number < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- }
+ if (number >= conf->raid_disks &&
+ conf->reshape_progress == MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove non-faulty devices if recovery
+ * isn't possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
+ !has_failed(conf) &&
+ (!p->replacement || p->replacement == rdev) &&
+ number < conf->raid_disks) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither - if they are careful
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just removed the Replacement as faulty-
+ * clear the bit just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
abort:
print_raid5_conf(conf);
@@ -5095,8 +5379,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk = rdev->saved_raid_disk;
else
disk = first;
- for ( ; disk <= last ; disk++)
- if ((p=conf->disks + disk)->rdev == NULL) {
+ for ( ; disk <= last ; disk++) {
+ p = conf->disks + disk;
+ if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
err = 0;
@@ -5105,6 +5390,17 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p->replacement == NULL) {
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = disk;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
+ }
print_raid5_conf(conf);
return err;
}
@@ -5278,8 +5574,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- - added_devices;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
@@ -5348,17 +5643,15 @@ static void raid5_finish_reshape(struct mddev *mddev)
revalidate_disk(mddev->gendisk);
} else {
int d;
- mddev->degraded = conf->raid_disks;
- for (d = 0; d < conf->raid_disks ; d++)
- if (conf->disks[d].rdev &&
- test_bit(In_sync,
- &conf->disks[d].rdev->flags))
- mddev->degraded--;
+ spin_lock_irq(&conf->device_lock);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
struct md_rdev *rdev = conf->disks[d].rdev;
- if (rdev && raid5_remove_disk(mddev, d) == 0) {
+ if (rdev &&
+ raid5_remove_disk(mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e10c5531f9c..8d8e13934a4 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -27,7 +27,7 @@
* The possible state transitions are:
*
* Empty -> Want - on read or write to get old data for parity calc
- * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
+ * Empty -> Dirty - on compute_parity to satisfy write/sync request.
* Empty -> Clean - on compute_block when computing a block for failed drive
* Want -> Empty - on failed read
* Want -> Clean - on successful completion of read request
@@ -226,8 +226,11 @@ struct stripe_head {
#endif
} ops;
struct r5dev {
- struct bio req;
- struct bio_vec vec;
+ /* rreq and rvec are used for the replacement device when
+ * writing data to both devices.
+ */
+ struct bio req, rreq;
+ struct bio_vec vec, rvec;
struct page *page;
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
@@ -239,7 +242,13 @@ struct stripe_head {
* for handle_stripe.
*/
struct stripe_head_state {
- int syncing, expanding, expanded;
+ /* 'syncing' means that we need to read all devices, either
+ * to check/correct parity, or to reconstruct a missing device.
+ * 'replacing' means we are replacing one or more drives and
+ * the source is valid at this point so we don't need to
+ * read all devices, just the replacement targets.
+ */
+ int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
int failed_num[2];
@@ -252,38 +261,41 @@ struct stripe_head_state {
int handle_bad_blocks;
};
-/* Flags */
-#define R5_UPTODATE 0 /* page contains current data */
-#define R5_LOCKED 1 /* IO has been submitted on "req" */
-#define R5_OVERWRITE 2 /* towrite covers whole page */
+/* Flags for struct r5dev.flags */
+enum r5dev_flags {
+ R5_UPTODATE, /* page contains current data */
+ R5_LOCKED, /* IO has been submitted on "req" */
+ R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
+ R5_OVERWRITE, /* towrite covers whole page */
/* and some that are internal to handle_stripe */
-#define R5_Insync 3 /* rdev && rdev->in_sync at start */
-#define R5_Wantread 4 /* want to schedule a read */
-#define R5_Wantwrite 5
-#define R5_Overlap 7 /* There is a pending overlapping request on this block */
-#define R5_ReadError 8 /* seen a read error here recently */
-#define R5_ReWrite 9 /* have tried to over-write the readerror */
+ R5_Insync, /* rdev && rdev->in_sync at start */
+ R5_Wantread, /* want to schedule a read */
+ R5_Wantwrite,
+ R5_Overlap, /* There is a pending overlapping request
+ * on this block */
+ R5_ReadError, /* seen a read error here recently */
+ R5_ReWrite, /* have tried to over-write the readerror */
-#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-#define R5_WantFUA 14 /* Write should be FUA */
-#define R5_WriteError 15 /* got a write error - need to record it */
-#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
-/*
- * Write method
- */
-#define RECONSTRUCT_WRITE 1
-#define READ_MODIFY_WRITE 2
-/* not a write method, but a compute_parity mode */
-#define CHECK_PARITY 3
-/* Additional compute_parity mode -- updates the parity w/o LOCKING */
-#define UPDATE_PARITY 4
+ R5_Expanded, /* This block now has post-expand data */
+ R5_Wantcompute, /* compute_block in progress treat as
+ * uptodate
+ */
+ R5_Wantfill, /* dev->toread contains a bio that needs
+ * filling
+ */
+ R5_Wantdrain, /* dev->towrite needs to be drained */
+ R5_WantFUA, /* Write should be FUA */
+ R5_WriteError, /* got a write error - need to record it */
+ R5_MadeGood, /* A bad block has been fixed by writing to it */
+ R5_ReadRepl, /* Will/did read from replacement rather than orig */
+ R5_MadeGoodRepl,/* A bad block on the replacement device has been
+ * fixed by writing to it */
+ R5_NeedReplace, /* This device has a replacement which is not
+ * up-to-date at this stripe. */
+ R5_WantReplace, /* We need to update the replacement, we have read
+ * data in, and now is a good time to write it out.
+ */
+};
/*
* Stripe state
@@ -311,13 +323,14 @@ enum {
/*
* Operation request flags
*/
-#define STRIPE_OP_BIOFILL 0
-#define STRIPE_OP_COMPUTE_BLK 1
-#define STRIPE_OP_PREXOR 2
-#define STRIPE_OP_BIODRAIN 3
-#define STRIPE_OP_RECONSTRUCT 4
-#define STRIPE_OP_CHECK 5
-
+enum {
+ STRIPE_OP_BIOFILL,
+ STRIPE_OP_COMPUTE_BLK,
+ STRIPE_OP_PREXOR,
+ STRIPE_OP_BIODRAIN,
+ STRIPE_OP_RECONSTRUCT,
+ STRIPE_OP_CHECK,
+};
/*
* Plugging:
*
@@ -344,13 +357,12 @@ enum {
struct disk_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
};
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
- struct disk_info *spare;
int chunk_sectors;
int level, algorithm;
int max_degraded;
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf75cd0..5d02221e99d 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
+ u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe2387e2..4fc29730a12 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
- LO_Frac = params->frequency + 4000000;
+ LO_Frac = params->frequency + 3000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;
diff --git a/drivers/media/dvb/b2c2/flexcop-usb.c b/drivers/media/dvb/b2c2/flexcop-usb.c
index bedcfb67162..26c666dd351 100644
--- a/drivers/media/dvb/b2c2/flexcop-usb.c
+++ b/drivers/media/dvb/b2c2/flexcop-usb.c
@@ -583,25 +583,7 @@ static struct usb_driver flexcop_usb_driver = {
.id_table = flexcop_usb_table,
};
-/* module stuff */
-static int __init flexcop_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&flexcop_usb_driver))) {
- err("usb_register failed. (%d)", result);
- return result;
- }
- return 0;
-}
-
-static void __exit flexcop_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&flexcop_usb_driver);
-}
-
-module_init(flexcop_usb_module_init);
-module_exit(flexcop_usb_module_exit);
+module_usb_driver(flexcop_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index ba9a643b9c6..d1e91bc80e7 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -1480,7 +1480,7 @@ static const struct file_operations ddb_fops = {
.open = ddb_open,
};
-static char *ddb_devnode(struct device *device, mode_t *mode)
+static char *ddb_devnode(struct device *device, umode_t *mode)
{
struct ddb *dev = dev_get_drvdata(device);
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index f7328777595..00a67326c19 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -450,7 +450,7 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static char *dvb_devnode(struct device *dev, mode_t *mode)
+static char *dvb_devnode(struct device *dev, umode_t *mode)
{
struct dvb_device *dvbdev = dev_get_drvdata(dev);
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 2aef3c89e9f..8d7fef84afd 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -183,26 +183,7 @@ static struct usb_driver a800_driver = {
.id_table = a800_table,
};
-/* module stuff */
-static int __init a800_module_init(void)
-{
- int result;
- if ((result = usb_register(&a800_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit a800_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&a800_driver);
-}
-
-module_init (a800_module_init);
-module_exit (a800_module_exit);
+module_usb_driver(a800_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index c6c275bac08..56cbd3636c3 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -1713,25 +1713,7 @@ static struct usb_driver af9015_usb_driver = {
.id_table = af9015_usb_table,
};
-/* module stuff */
-static int __init af9015_usb_module_init(void)
-{
- int ret;
- ret = usb_register(&af9015_usb_driver);
- if (ret)
- err("module init failed:%d", ret);
-
- return ret;
-}
-
-static void __exit af9015_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&af9015_usb_driver);
-}
-
-module_init(af9015_usb_module_init);
-module_exit(af9015_usb_module_exit);
+module_usb_driver(af9015_usb_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Afatech AF9015 DVB-T");
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 5f2278b73ee..b39f14f85e7 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -1091,26 +1091,7 @@ static struct usb_driver anysee_driver = {
.id_table = anysee_table,
};
-/* module stuff */
-static int __init anysee_module_init(void)
-{
- int ret;
-
- ret = usb_register(&anysee_driver);
- if (ret)
- err("%s: usb_register failed. Error number %d", __func__, ret);
-
- return ret;
-}
-
-static void __exit anysee_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&anysee_driver);
-}
-
-module_init(anysee_module_init);
-module_exit(anysee_module_exit);
+module_usb_driver(anysee_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/au6610.c b/drivers/media/dvb/dvb-usb/au6610.c
index b77994967b9..16210c06030 100644
--- a/drivers/media/dvb/dvb-usb/au6610.c
+++ b/drivers/media/dvb/dvb-usb/au6610.c
@@ -244,26 +244,7 @@ static struct usb_driver au6610_driver = {
.id_table = au6610_table,
};
-/* module stuff */
-static int __init au6610_module_init(void)
-{
- int ret;
-
- ret = usb_register(&au6610_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit au6610_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&au6610_driver);
-}
-
-module_init(au6610_module_init);
-module_exit(au6610_module_exit);
+module_usb_driver(au6610_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index bf67b4dfd82..5e45ae60542 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -1174,28 +1174,7 @@ static struct usb_driver az6027_usb_driver = {
.id_table = az6027_usb_table,
};
-/* module stuff */
-static int __init az6027_usb_module_init(void)
-{
- int result;
-
- result = usb_register(&az6027_usb_driver);
- if (result) {
- err("usb_register failed. (%d)", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit az6027_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&az6027_usb_driver);
-}
-
-module_init(az6027_usb_module_init);
-module_exit(az6027_usb_module_exit);
+module_usb_driver(az6027_usb_driver);
MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>");
MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)");
diff --git a/drivers/media/dvb/dvb-usb/ce6230.c b/drivers/media/dvb/dvb-usb/ce6230.c
index 57afb5a9157..fa637255729 100644
--- a/drivers/media/dvb/dvb-usb/ce6230.c
+++ b/drivers/media/dvb/dvb-usb/ce6230.c
@@ -317,27 +317,7 @@ static struct usb_driver ce6230_driver = {
.id_table = ce6230_table,
};
-/* module stuff */
-static int __init ce6230_module_init(void)
-{
- int ret;
- deb_info("%s:\n", __func__);
- ret = usb_register(&ce6230_driver);
- if (ret)
- err("usb_register failed with error:%d", ret);
-
- return ret;
-}
-
-static void __exit ce6230_module_exit(void)
-{
- deb_info("%s:\n", __func__);
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ce6230_driver);
-}
-
-module_init(ce6230_module_init);
-module_exit(ce6230_module_exit);
+module_usb_driver(ce6230_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Intel CE6230 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index f9d905002ec..0a98548ecd1 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -247,25 +247,7 @@ static struct usb_driver cinergyt2_driver = {
.id_table = cinergyt2_usb_table
};
-static int __init cinergyt2_usb_init(void)
-{
- int err;
-
- err = usb_register(&cinergyt2_driver);
- if (err) {
- err("usb_register() failed! (err %i)\n", err);
- return err;
- }
- return 0;
-}
-
-static void __exit cinergyt2_usb_exit(void)
-{
- usb_deregister(&cinergyt2_driver);
-}
-
-module_init(cinergyt2_usb_init);
-module_exit(cinergyt2_usb_exit);
+module_usb_driver(cinergyt2_driver);
MODULE_DESCRIPTION("Terratec Cinergy T2 DVB-T driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 9f2a02c4837..949ea1bc0aa 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -2034,26 +2034,7 @@ static struct usb_driver cxusb_driver = {
.id_table = cxusb_table,
};
-/* module stuff */
-static int __init cxusb_module_init(void)
-{
- int result;
- if ((result = usb_register(&cxusb_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit cxusb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&cxusb_driver);
-}
-
-module_init (cxusb_module_init);
-module_exit (cxusb_module_exit);
+module_usb_driver(cxusb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 156cbfc9c79..206999476f0 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -832,27 +832,7 @@ static struct usb_driver dib0700_driver = {
.id_table = dib0700_usb_id_table,
};
-/* module stuff */
-static int __init dib0700_module_init(void)
-{
- int result;
- info("loaded with support for %d different device-types", dib0700_device_count);
- if ((result = usb_register(&dib0700_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dib0700_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dib0700_driver);
-}
-
-module_init (dib0700_module_init);
-module_exit (dib0700_module_exit);
+module_usb_driver(dib0700_driver);
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 7270791f834..a4ac37e0e98 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -463,26 +463,7 @@ static struct usb_driver dibusb_driver = {
.id_table = dibusb_dib3000mb_table,
};
-/* module stuff */
-static int __init dibusb_module_init(void)
-{
- int result;
- if ((result = usb_register(&dibusb_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dibusb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dibusb_driver);
-}
-
-module_init (dibusb_module_init);
-module_exit (dibusb_module_exit);
+module_usb_driver(dibusb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index 9c165e2569d..9d1a59d09c5 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -141,26 +141,7 @@ static struct usb_driver dibusb_mc_driver = {
.id_table = dibusb_dib3000mc_table,
};
-/* module stuff */
-static int __init dibusb_mc_module_init(void)
-{
- int result;
- if ((result = usb_register(&dibusb_mc_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dibusb_mc_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dibusb_mc_driver);
-}
-
-module_init (dibusb_mc_module_init);
-module_exit (dibusb_mc_module_exit);
+module_usb_driver(dibusb_mc_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index f7184111aa6..0a9a79820f2 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -346,26 +346,7 @@ static struct usb_driver digitv_driver = {
.id_table = digitv_table,
};
-/* module stuff */
-static int __init digitv_module_init(void)
-{
- int result;
- if ((result = usb_register(&digitv_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit digitv_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&digitv_driver);
-}
-
-module_init (digitv_module_init);
-module_exit (digitv_module_exit);
+module_usb_driver(digitv_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 106dfd55ff9..66f205c112b 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -360,26 +360,7 @@ static struct usb_driver dtt200u_usb_driver = {
.id_table = dtt200u_usb_table,
};
-/* module stuff */
-static int __init dtt200u_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&dtt200u_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dtt200u_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dtt200u_usb_driver);
-}
-
-module_init(dtt200u_usb_module_init);
-module_exit(dtt200u_usb_module_exit);
+module_usb_driver(dtt200u_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
diff --git a/drivers/media/dvb/dvb-usb/dtv5100.c b/drivers/media/dvb/dvb-usb/dtv5100.c
index 7373132163d..3d11df41cac 100644
--- a/drivers/media/dvb/dvb-usb/dtv5100.c
+++ b/drivers/media/dvb/dvb-usb/dtv5100.c
@@ -217,26 +217,7 @@ static struct usb_driver dtv5100_driver = {
.id_table = dtv5100_table,
};
-/* module stuff */
-static int __init dtv5100_module_init(void)
-{
- int ret;
-
- ret = usb_register(&dtv5100_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit dtv5100_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dtv5100_driver);
-}
-
-module_init(dtv5100_module_init);
-module_exit(dtv5100_module_exit);
+module_usb_driver(dtv5100_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index f103ec1fe82..ff941d20e6b 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1928,22 +1928,7 @@ static struct usb_driver dw2102_driver = {
.id_table = dw2102_table,
};
-static int __init dw2102_module_init(void)
-{
- int ret = usb_register(&dw2102_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit dw2102_module_exit(void)
-{
- usb_deregister(&dw2102_driver);
-}
-
-module_init(dw2102_module_init);
-module_exit(dw2102_module_exit);
+module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
diff --git a/drivers/media/dvb/dvb-usb/ec168.c b/drivers/media/dvb/dvb-usb/ec168.c
index 78442fe4aa5..b4989ba8897 100644
--- a/drivers/media/dvb/dvb-usb/ec168.c
+++ b/drivers/media/dvb/dvb-usb/ec168.c
@@ -428,27 +428,7 @@ static struct usb_driver ec168_driver = {
.id_table = ec168_id,
};
-/* module stuff */
-static int __init ec168_module_init(void)
-{
- int ret;
- deb_info("%s:\n", __func__);
- ret = usb_register(&ec168_driver);
- if (ret)
- err("module init failed:%d", ret);
-
- return ret;
-}
-
-static void __exit ec168_module_exit(void)
-{
- deb_info("%s:\n", __func__);
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ec168_driver);
-}
-
-module_init(ec168_module_init);
-module_exit(ec168_module_exit);
+module_usb_driver(ec168_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("E3C EC168 DVB-T USB2.0 driver");
diff --git a/drivers/media/dvb/dvb-usb/friio.c b/drivers/media/dvb/dvb-usb/friio.c
index b092dc2137c..474a17e4db0 100644
--- a/drivers/media/dvb/dvb-usb/friio.c
+++ b/drivers/media/dvb/dvb-usb/friio.c
@@ -514,28 +514,7 @@ static struct usb_driver friio_driver = {
.id_table = friio_table,
};
-
-/* module stuff */
-static int __init friio_module_init(void)
-{
- int ret;
-
- ret = usb_register(&friio_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-
-static void __exit friio_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&friio_driver);
-}
-
-module_init(friio_module_init);
-module_exit(friio_module_exit);
+module_usb_driver(friio_driver);
MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>");
MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver");
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index 63681df244c..c1f5582e1cd 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -209,26 +209,7 @@ static struct usb_driver gl861_driver = {
.id_table = gl861_table,
};
-/* module stuff */
-static int __init gl861_module_init(void)
-{
- int ret;
-
- ret = usb_register(&gl861_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit gl861_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&gl861_driver);
-}
-
-module_init(gl861_module_init);
-module_exit(gl861_module_exit);
+module_usb_driver(gl861_driver);
MODULE_AUTHOR("Carl Lundqvist <comabug@gmail.com>");
MODULE_DESCRIPTION("Driver MSI Mega Sky 580 DVB-T USB2.0 / GL861");
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 5f71284703d..5d0384dd45b 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -320,26 +320,7 @@ static struct usb_driver gp8psk_usb_driver = {
.id_table = gp8psk_usb_table,
};
-/* module stuff */
-static int __init gp8psk_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&gp8psk_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit gp8psk_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&gp8psk_usb_driver);
-}
-
-module_init(gp8psk_usb_module_init);
-module_exit(gp8psk_usb_module_exit);
+module_usb_driver(gp8psk_usb_driver);
MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
MODULE_DESCRIPTION("Driver for Genpix DVB-S");
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index c4622618714..67094b879bb 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -675,26 +675,7 @@ static struct usb_driver it913x_driver = {
.id_table = it913x_table,
};
-/* module stuff */
-static int __init it913x_module_init(void)
-{
- int result = usb_register(&it913x_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit it913x_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&it913x_driver);
-}
-
-module_init(it913x_module_init);
-module_exit(it913x_module_exit);
+module_usb_driver(it913x_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("it913x USB 2 Driver");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index b9228240f5c..1a876a65ed5 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -1289,26 +1289,7 @@ static struct usb_driver lme2510_driver = {
.id_table = lme2510_table,
};
-/* module stuff */
-static int __init lme2510_module_init(void)
-{
- int result = usb_register(&lme2510_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit lme2510_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&lme2510_driver);
-}
-
-module_init(lme2510_module_init);
-module_exit(lme2510_module_exit);
+module_usb_driver(lme2510_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index a1e1287c949..288af29a8bb 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -1086,27 +1086,7 @@ static struct usb_driver m920x_driver = {
.id_table = m920x_table,
};
-/* module stuff */
-static int __init m920x_module_init(void)
-{
- int ret;
-
- if ((ret = usb_register(&m920x_driver))) {
- err("usb_register failed. Error number %d", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void __exit m920x_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&m920x_driver);
-}
-
-module_init (m920x_module_init);
-module_exit (m920x_module_exit);
+module_usb_driver(m920x_driver);
MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>");
MODULE_DESCRIPTION("DVB Driver for ULI M920x");
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index b5c98da5d9e..825a8b242e0 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -1055,24 +1055,7 @@ static struct usb_driver mxl111sf_driver = {
.id_table = mxl111sf_table,
};
-static int __init mxl111sf_module_init(void)
-{
- int result = usb_register(&mxl111sf_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit mxl111sf_module_exit(void)
-{
- usb_deregister(&mxl111sf_driver);
-}
-
-module_init(mxl111sf_module_init);
-module_exit(mxl111sf_module_exit);
+module_usb_driver(mxl111sf_driver);
MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index 21384da6570..6c55384e2fc 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -225,26 +225,7 @@ static struct usb_driver nova_t_driver = {
.id_table = nova_t_table,
};
-/* module stuff */
-static int __init nova_t_module_init(void)
-{
- int result;
- if ((result = usb_register(&nova_t_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit nova_t_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&nova_t_driver);
-}
-
-module_init (nova_t_module_init);
-module_exit (nova_t_module_exit);
+module_usb_driver(nova_t_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 98fd9a6092b..c8a95042dfb 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -574,22 +574,7 @@ static struct usb_driver opera1_driver = {
.id_table = opera1_table,
};
-static int __init opera1_module_init(void)
-{
- int result = 0;
- if ((result = usb_register(&opera1_driver))) {
- err("usb_register failed. Error number %d", result);
- }
- return result;
-}
-
-static void __exit opera1_module_exit(void)
-{
- usb_deregister(&opera1_driver);
-}
-
-module_init(opera1_module_init);
-module_exit(opera1_module_exit);
+module_usb_driver(opera1_driver);
MODULE_AUTHOR("Mario Hlawitschka (c) dh1pa@amsat.org");
MODULE_AUTHOR("Marco Gittler (c) g.marco@freenet.de");
diff --git a/drivers/media/dvb/dvb-usb/pctv452e.c b/drivers/media/dvb/dvb-usb/pctv452e.c
index f9aec5cb6e7..f526eb05cc7 100644
--- a/drivers/media/dvb/dvb-usb/pctv452e.c
+++ b/drivers/media/dvb/dvb-usb/pctv452e.c
@@ -1055,22 +1055,7 @@ static struct usb_driver pctv452e_usb_driver = {
.id_table = pctv452e_usb_table,
};
-static int __init pctv452e_usb_init(void)
-{
- int ret = usb_register(&pctv452e_usb_driver);
- if (ret)
- err("%s: usb_register failed! Error %d", __FILE__, ret);
-
- return ret;
-}
-
-static void __exit pctv452e_usb_exit(void)
-{
- usb_deregister(&pctv452e_usb_driver);
-}
-
-module_init(pctv452e_usb_init);
-module_exit(pctv452e_usb_exit);
+module_usb_driver(pctv452e_usb_driver);
MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>");
MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>");
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 0998fe96195..acefaa89cc5 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -781,25 +781,7 @@ static struct usb_driver technisat_usb2_driver = {
.id_table = technisat_usb2_id_table,
};
-/* module stuff */
-static int __init technisat_usb2_module_init(void)
-{
- int result = usb_register(&technisat_usb2_driver);
- if (result) {
- err("usb_register failed. Code %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit technisat_usb2_module_exit(void)
-{
- usb_deregister(&technisat_usb2_driver);
-}
-
-module_init(technisat_usb2_module_init);
-module_exit(technisat_usb2_module_exit);
+module_usb_driver(technisat_usb2_driver);
MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device");
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index ea4eab8b396..56acf8e55d5 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -799,26 +799,7 @@ static struct usb_driver ttusb2_driver = {
.id_table = ttusb2_table,
};
-/* module stuff */
-static int __init ttusb2_module_init(void)
-{
- int result;
- if ((result = usb_register(&ttusb2_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit ttusb2_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ttusb2_driver);
-}
-
-module_init (ttusb2_module_init);
-module_exit (ttusb2_module_exit);
+module_usb_driver(ttusb2_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index 463673a5c2b..9b042292e78 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -143,26 +143,7 @@ static struct usb_driver umt_driver = {
.id_table = umt_table,
};
-/* module stuff */
-static int __init umt_module_init(void)
-{
- int result;
- if ((result = usb_register(&umt_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit umt_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&umt_driver);
-}
-
-module_init (umt_module_init);
-module_exit (umt_module_exit);
+module_usb_driver(umt_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 45e31f22481..07c673a6e76 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -436,26 +436,7 @@ static struct usb_driver vp702x_usb_driver = {
.id_table = vp702x_usb_table,
};
-/* module stuff */
-static int __init vp702x_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&vp702x_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit vp702x_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&vp702x_usb_driver);
-}
-
-module_init(vp702x_usb_module_init);
-module_exit(vp702x_usb_module_exit);
+module_usb_driver(vp702x_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 90873af5682..d750724132e 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -294,26 +294,7 @@ static struct usb_driver vp7045_usb_driver = {
.id_table = vp7045_usb_table,
};
-/* module stuff */
-static int __init vp7045_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&vp7045_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit vp7045_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&vp7045_usb_driver);
-}
-
-module_init(vp7045_usb_module_init);
-module_exit(vp7045_usb_module_exit);
+module_usb_driver(vp7045_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 51c7121b321..b1fe5137df0 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -557,26 +557,7 @@ static struct usb_driver smsusb_driver = {
.resume = smsusb_resume,
};
-static int __init smsusb_module_init(void)
-{
- int rc = usb_register(&smsusb_driver);
- if (rc)
- sms_err("usb_register failed. Error number %d", rc);
-
- sms_debug("");
-
- return rc;
-}
-
-static void __exit smsusb_module_exit(void)
-{
- /* Regular USB Cleanup */
- usb_deregister(&smsusb_driver);
- sms_info("end");
-}
-
-module_init(smsusb_module_init);
-module_exit(smsusb_module_exit);
+module_usb_driver(smsusb_driver);
MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 420bb42d523..e90192fdde1 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1794,26 +1794,7 @@ static struct usb_driver ttusb_driver = {
.id_table = ttusb_table,
};
-static int __init ttusb_init(void)
-{
- int err;
-
- if ((err = usb_register(&ttusb_driver)) < 0) {
- printk("%s: usb_register failed! Error number %d",
- __FILE__, err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ttusb_exit(void)
-{
- usb_deregister(&ttusb_driver);
-}
-
-module_init(ttusb_init);
-module_exit(ttusb_exit);
+module_usb_driver(ttusb_driver);
MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>");
MODULE_DESCRIPTION("TTUSB DVB Driver");
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index f893bffa08a..504c8123033 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1756,26 +1756,7 @@ static struct usb_driver ttusb_dec_driver = {
.id_table = ttusb_dec_table,
};
-static int __init ttusb_dec_init(void)
-{
- int result;
-
- if ((result = usb_register(&ttusb_dec_driver)) < 0) {
- printk("%s: initialisation failed: error %d.\n", __func__,
- result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit ttusb_dec_exit(void)
-{
- usb_deregister(&ttusb_dec_driver);
-}
-
-module_init(ttusb_dec_init);
-module_exit(ttusb_dec_exit);
+module_usb_driver(ttusb_dec_driver);
MODULE_AUTHOR("Alex Woods <linux-dvb@giblets.org>");
MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 25e58cbf35f..f36905b6364 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -624,21 +624,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
return 0;
}
-static int __init dsbr100_init(void)
-{
- int retval = usb_register(&usb_dsbr100_driver);
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return retval;
-}
-
-static void __exit dsbr100_exit(void)
-{
- usb_deregister(&usb_dsbr100_driver);
-}
-
-module_init (dsbr100_init);
-module_exit (dsbr100_exit);
+module_usb_driver(usb_dsbr100_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 1742bd8110b..a860a72a58e 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -659,25 +659,4 @@ err:
return retval;
}
-static int __init amradio_init(void)
-{
- int retval = usb_register(&usb_amradio_driver);
-
- pr_info(KBUILD_MODNAME
- ": version " DRIVER_VERSION " " DRIVER_DESC "\n");
-
- if (retval)
- pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", retval);
-
- return retval;
-}
-
-static void __exit amradio_exit(void)
-{
- usb_deregister(&usb_amradio_driver);
-}
-
-module_init(amradio_init);
-module_exit(amradio_exit);
-
+module_usb_driver(usb_amradio_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index a6ad707fae9..b7debb67932 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -861,33 +861,7 @@ static struct usb_driver si470x_usb_driver = {
.supports_autosuspend = 1,
};
-
-
-/**************************************************************************
- * Module Interface
- **************************************************************************/
-
-/*
- * si470x_module_init - module init
- */
-static int __init si470x_module_init(void)
-{
- printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n");
- return usb_register(&si470x_usb_driver);
-}
-
-
-/*
- * si470x_module_exit - module exit
- */
-static void __exit si470x_module_exit(void)
-{
- usb_deregister(&si470x_usb_driver);
-}
-
-
-module_init(si470x_module_init);
-module_exit(si470x_module_exit);
+module_usb_driver(si470x_usb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 303f22ea04c..baf907b3ce7 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
- unsigned char old_data[2]; /* Detect duplicate events */
+ unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
- unsigned char data1, data2;
+ unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
- {KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
- {KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
- {KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
- {KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
+ {KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
+ {KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
+ {KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
+ {KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
- {KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
- {KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
- {KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
- {KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
+ {KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
+ {KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
+ {KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
+ {KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
- {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
- {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
- {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
- {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+ {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+ {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+ {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+ {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
- {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
- {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+ {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+ {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
- {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+ {KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@@ -397,25 +397,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
- int i;
-
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- /*
- * Decide if the table entry matches the remote input.
- */
- if (ati_remote_tbl[i].data1 == d1 &&
- ati_remote_tbl[i].data2 == d2)
- return i;
-
- }
- return -1;
-}
-
-/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
- unsigned char scancode[2];
+ unsigned char scancode;
+ int i;
+
+ /*
+ * data[0] = 0x14
+ * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+ * data[2] = the key code (with toggle bit in MSB with some models)
+ * data[3] = channel << 4 (the low 4 bits must be zero)
+ */
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
+ if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+ dbginfo(&ati_remote->interface->dev,
+ "wrong checksum in input: %02x %02x %02x %02x\n",
+ data[0], data[1], data[2], data[3]);
+ return;
+ }
+
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
/*
- * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
- * so we have to clear them. The first bit is a bit tricky as the
- * "non-toggled" state depends on remote_num, so we xor it with the
- * second bit which is only used for toggle.
+ * MSB is a toggle code, though only used by some devices
+ * (e.g. SnapStream Firefly)
*/
- scancode[0] ^= (data[2] & 0x80);
+ scancode = data[2] & 0x7f;
- scancode[1] = data[2] & ~0x80;
-
- /* Look up event code index in mouse translation table. */
- index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+ remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
- remote_num, data[1], data[2], scancode[0], scancode[1]);
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
- if (ati_remote->old_data[0] == data[1] &&
- ati_remote->old_data[1] == data[2] &&
+ if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
- u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
- rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
}
}
@@ -908,38 +899,7 @@ static void ati_remote_disconnect(struct usb_interface *interface)
kfree(ati_remote);
}
-/*
- * ati_remote_init
- */
-static int __init ati_remote_init(void)
-{
- int result;
-
- result = usb_register(&ati_remote_driver);
- if (result)
- printk(KERN_ERR KBUILD_MODNAME
- ": usb_register error #%d\n", result);
- else
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- return result;
-}
-
-/*
- * ati_remote_exit
- */
-static void __exit ati_remote_exit(void)
-{
- usb_deregister(&ati_remote_driver);
-}
-
-/*
- * module specification
- */
-
-module_init(ati_remote_init);
-module_exit(ati_remote_exit);
+module_usb_driver(ati_remote_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cf10ecf5ace..860c112e0fd 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -324,7 +324,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
return dev->extra_buf2_address + r_pointer;
}
- dbg("attempt to read beyong ring bufer end");
+ dbg("attempt to read beyond ring buffer end");
return 0;
}
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index fd108d90f75..6f978e85db8 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -227,7 +227,7 @@ struct ene_device {
/* TX buffer */
unsigned *tx_buffer; /* input samples buffer*/
- int tx_pos; /* position in that bufer */
+ int tx_pos; /* position in that buffer */
int tx_len; /* current len of tx buffer */
int tx_done; /* done transmitting */
/* one more sample pending*/
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6ed96465137..3aeb29a7ce1 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2458,23 +2458,4 @@ static int imon_resume(struct usb_interface *intf)
return rc;
}
-static int __init imon_init(void)
-{
- int rc;
-
- rc = usb_register(&imon_driver);
- if (rc) {
- pr_err("usb register failed(%d)\n", rc);
- rc = -ENODEV;
- }
-
- return rc;
-}
-
-static void __exit imon_exit(void)
-{
- usb_deregister(&imon_driver);
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b2605c4..81506440ede 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
#include <media/rc-map.h>
static struct rc_map_table ati_x10[] = {
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xc500, KEY_A },
- { 0xc601, KEY_B },
- { 0xde19, KEY_C },
- { 0xe01b, KEY_D },
- { 0xe621, KEY_E },
- { 0xe823, KEY_F },
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x00, KEY_A },
+ { 0x01, KEY_B },
+ { 0x19, KEY_C },
+ { 0x1b, KEY_D },
+ { 0x21, KEY_E },
+ { 0x23, KEY_F },
- { 0xdd18, KEY_KPENTER }, /* "check" */
- { 0xdb16, KEY_MENU }, /* "menu" */
- { 0xc702, KEY_POWER }, /* Power */
- { 0xc803, KEY_TV }, /* TV */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xca05, KEY_WWW }, /* WEB */
- { 0xcb06, KEY_BOOKMARKS }, /* "book" */
- { 0xcc07, KEY_EDIT }, /* "hand" */
- { 0xe11c, KEY_COFFEE }, /* "timer" */
- { 0xe520, KEY_FRONT }, /* "max" */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe31e, KEY_OK }, /* "OK" */
- { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
- { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
- { 0xcf0a, KEY_MUTE }, /* MUTE */
- { 0xd00b, KEY_CHANNELUP }, /* CH + */
- { 0xd10c, KEY_CHANNELDOWN },/* CH - */
- { 0xec27, KEY_RECORD }, /* ( o) red */
- { 0xea25, KEY_PLAY }, /* ( >) */
- { 0xe924, KEY_REWIND }, /* (<<) */
- { 0xeb26, KEY_FORWARD }, /* (>>) */
- { 0xed28, KEY_STOP }, /* ([]) */
- { 0xee29, KEY_PAUSE }, /* ('') */
- { 0xf02b, KEY_PREVIOUS }, /* (<-) */
- { 0xef2a, KEY_NEXT }, /* (>+) */
- { 0xf22d, KEY_INFO }, /* PLAYING */
- { 0xf32e, KEY_HOME }, /* TOP */
- { 0xf42f, KEY_END }, /* END */
- { 0xf530, KEY_SELECT }, /* SELECT */
+ { 0x18, KEY_KPENTER }, /* "check" */
+ { 0x16, KEY_MENU }, /* "menu" */
+ { 0x02, KEY_POWER }, /* Power */
+ { 0x03, KEY_TV }, /* TV */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x05, KEY_WWW }, /* WEB */
+ { 0x06, KEY_BOOKMARKS }, /* "book" */
+ { 0x07, KEY_EDIT }, /* "hand" */
+ { 0x1c, KEY_COFFEE }, /* "timer" */
+ { 0x20, KEY_FRONT }, /* "max" */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1a, KEY_UP }, /* up */
+ { 0x1e, KEY_OK }, /* "OK" */
+ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0x08, KEY_VOLUMEUP }, /* VOL - */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+ { 0x0b, KEY_CHANNELUP }, /* CH + */
+ { 0x0c, KEY_CHANNELDOWN },/* CH - */
+ { 0x27, KEY_RECORD }, /* ( o) red */
+ { 0x25, KEY_PLAY }, /* ( >) */
+ { 0x24, KEY_REWIND }, /* (<<) */
+ { 0x26, KEY_FORWARD }, /* (>>) */
+ { 0x28, KEY_STOP }, /* ([]) */
+ { 0x29, KEY_PAUSE }, /* ('') */
+ { 0x2b, KEY_PREVIOUS }, /* (<-) */
+ { 0x2a, KEY_NEXT }, /* (>+) */
+ { 0x2d, KEY_INFO }, /* PLAYING */
+ { 0x2e, KEY_HOME }, /* TOP */
+ { 0x2f, KEY_END }, /* END */
+ { 0x30, KEY_SELECT }, /* SELECT */
};
static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc01d11..479cdb89781 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
- { 0xf12c, KEY_TV }, /* TV */
- { 0xf22d, KEY_VCR }, /* VCR */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
- { 0xf32e, KEY_RADIO }, /* RADIO */
- { 0xca05, KEY_DIRECTORY }, /* PHOTO */
- { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
- { 0xf530, KEY_LIST }, /* CHANNEL-LST */
-
- { 0xe01b, KEY_SETUP }, /* SETUP */
- { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
- { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
- { 0xce09, KEY_VOLUMEUP }, /* VOL + */
- { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
- { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
- { 0xc500, KEY_MUTE }, /* MUTE */
-
- { 0xf732, KEY_RED }, /* red */
- { 0xf833, KEY_GREEN }, /* green */
- { 0xf934, KEY_YELLOW }, /* yellow */
- { 0xfa35, KEY_BLUE }, /* blue */
- { 0xdb16, KEY_TEXT }, /* TXT */
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
- { 0xe520, KEY_DELETE }, /* DELETE */
-
- { 0xfb36, KEY_KEYBOARD }, /* RENAME */
- { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
-
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe31e, KEY_OK }, /* OK */
-
- { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
- { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
-
- { 0xe924, KEY_REWIND }, /* rewind (<<) */
- { 0xea25, KEY_PLAY }, /* play ( >) */
- { 0xeb26, KEY_FORWARD }, /* forward (>>) */
- { 0xec27, KEY_RECORD }, /* record ( o) */
- { 0xed28, KEY_STOP }, /* stop ([]) */
- { 0xee29, KEY_PAUSE }, /* pause ('') */
-
- { 0xe621, KEY_PREVIOUS }, /* prev */
- { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
- { 0xe823, KEY_NEXT }, /* next */
- { 0xde19, KEY_MENU }, /* MENU */
- { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
-
- { 0xc702, KEY_POWER }, /* POWER */
+ { 0x2c, KEY_TV }, /* TV */
+ { 0x2d, KEY_VCR }, /* VCR */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x06, KEY_AUDIO }, /* MUSIC */
+
+ { 0x2e, KEY_RADIO }, /* RADIO */
+ { 0x05, KEY_DIRECTORY }, /* PHOTO */
+ { 0x2f, KEY_INFO }, /* TV-PREVIEW */
+ { 0x30, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0x1b, KEY_SETUP }, /* SETUP */
+ { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0x08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0x09, KEY_VOLUMEUP }, /* VOL + */
+ { 0x0b, KEY_CHANNELUP }, /* CHAN + */
+ { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0x00, KEY_MUTE }, /* MUTE */
+
+ { 0x32, KEY_RED }, /* red */
+ { 0x33, KEY_GREEN }, /* green */
+ { 0x34, KEY_YELLOW }, /* yellow */
+ { 0x35, KEY_BLUE }, /* blue */
+ { 0x16, KEY_TEXT }, /* TXT */
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0x20, KEY_DELETE }, /* DELETE */
+
+ { 0x36, KEY_KEYBOARD }, /* RENAME */
+ { 0x18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0x1a, KEY_UP }, /* up */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x1e, KEY_OK }, /* OK */
+
+ { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0x38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0x24, KEY_REWIND }, /* rewind (<<) */
+ { 0x25, KEY_PLAY }, /* play ( >) */
+ { 0x26, KEY_FORWARD }, /* forward (>>) */
+ { 0x27, KEY_RECORD }, /* record ( o) */
+ { 0x28, KEY_STOP }, /* stop ([]) */
+ { 0x29, KEY_PAUSE }, /* pause ('') */
+
+ { 0x21, KEY_PREVIOUS }, /* prev */
+ { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0x23, KEY_NEXT }, /* next */
+ { 0x19, KEY_MENU }, /* MENU */
+ { 0x3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef146520931..c7f33ec719b 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
- { 0xf12c, KEY_ZOOM }, /* Maximize */
- { 0xc702, KEY_CLOSE },
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xdb16, KEY_BACK },
- { 0xdd18, KEY_KPENTER }, /* ent */
-
- { 0xce09, KEY_VOLUMEUP },
- { 0xcd08, KEY_VOLUMEDOWN },
- { 0xcf0a, KEY_MUTE },
- { 0xd00b, KEY_CHANNELUP },
- { 0xd10c, KEY_CHANNELDOWN },
- { 0xc500, KEY_VENDOR }, /* firefly */
-
- { 0xf32e, KEY_INFO },
- { 0xf42f, KEY_OPTION },
-
- { 0xe21d, KEY_LEFT },
- { 0xe41f, KEY_RIGHT },
- { 0xe722, KEY_DOWN },
- { 0xdf1a, KEY_UP },
- { 0xe31e, KEY_OK },
-
- { 0xe11c, KEY_MENU },
- { 0xe520, KEY_EXIT },
-
- { 0xec27, KEY_RECORD },
- { 0xea25, KEY_PLAY },
- { 0xed28, KEY_STOP },
- { 0xe924, KEY_REWIND },
- { 0xeb26, KEY_FORWARD },
- { 0xee29, KEY_PAUSE },
- { 0xf02b, KEY_PREVIOUS },
- { 0xef2a, KEY_NEXT },
-
- { 0xcb06, KEY_AUDIO }, /* Music */
- { 0xca05, KEY_IMAGES }, /* Photos */
- { 0xc904, KEY_DVD },
- { 0xc803, KEY_TV },
- { 0xcc07, KEY_VIDEO },
-
- { 0xc601, KEY_HELP },
- { 0xf22d, KEY_MODE }, /* Mouse */
-
- { 0xde19, KEY_A },
- { 0xe01b, KEY_B },
- { 0xe621, KEY_C },
- { 0xe823, KEY_D },
+ { 0x2c, KEY_ZOOM }, /* Maximize */
+ { 0x02, KEY_CLOSE },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x16, KEY_BACK },
+ { 0x18, KEY_KPENTER }, /* ent */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0a, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x00, KEY_VENDOR }, /* firefly */
+
+ { 0x2e, KEY_INFO },
+ { 0x2f, KEY_OPTION },
+
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x22, KEY_DOWN },
+ { 0x1a, KEY_UP },
+ { 0x1e, KEY_OK },
+
+ { 0x1c, KEY_MENU },
+ { 0x20, KEY_EXIT },
+
+ { 0x27, KEY_RECORD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_PREVIOUS },
+ { 0x2a, KEY_NEXT },
+
+ { 0x06, KEY_AUDIO }, /* Music */
+ { 0x05, KEY_IMAGES }, /* Photos */
+ { 0x04, KEY_DVD },
+ { 0x03, KEY_TV },
+ { 0x07, KEY_VIDEO },
+
+ { 0x01, KEY_HELP },
+ { 0x2d, KEY_MODE }, /* Mouse */
+
+ { 0x19, KEY_A },
+ { 0x1b, KEY_B },
+ { 0x21, KEY_C },
+ { 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 60d3c1e0971..20bb12d6fbb 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1448,25 +1448,7 @@ static struct usb_driver mceusb_dev_driver = {
.id_table = mceusb_dev_table
};
-static int __init mceusb_dev_init(void)
-{
- int ret;
-
- ret = usb_register(&mceusb_dev_driver);
- if (ret < 0)
- printk(KERN_ERR DRIVER_NAME
- ": usb register failed, result = %d\n", ret);
-
- return ret;
-}
-
-static void __exit mceusb_dev_exit(void)
-{
- usb_deregister(&mceusb_dev_driver);
-}
-
-module_init(mceusb_dev_init);
-module_exit(mceusb_dev_exit);
+module_usb_driver(mceusb_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 29f900065d8..f5db8b949bc 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -715,7 +715,7 @@ static void ir_close(struct input_dev *idev)
}
/* class for /sys/class/rc */
-static char *ir_devnode(struct device *dev, mode_t *mode)
+static char *ir_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 61287fcca61..07322fb75ef 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1300,25 +1300,7 @@ static struct usb_driver redrat3_dev_driver = {
.id_table = redrat3_dev_table
};
-static int __init redrat3_dev_init(void)
-{
- int ret;
-
- ret = usb_register(&redrat3_dev_driver);
- if (ret < 0)
- pr_err(DRIVER_NAME
- ": usb register failed, result = %d\n", ret);
-
- return ret;
-}
-
-static void __exit redrat3_dev_exit(void)
-{
- usb_deregister(&redrat3_dev_driver);
-}
-
-module_init(redrat3_dev_init);
-module_exit(redrat3_dev_exit);
+module_usb_driver(redrat3_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index e435d94c077..b1d29d09eea 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -523,33 +523,7 @@ static int streamzap_resume(struct usb_interface *intf)
return 0;
}
-/**
- * streamzap_init
- */
-static int __init streamzap_init(void)
-{
- int ret;
-
- /* register this driver with the USB subsystem */
- ret = usb_register(&streamzap_driver);
- if (ret < 0)
- printk(KERN_ERR DRIVER_NAME ": usb register failed, "
- "result = %d\n", ret);
-
- return ret;
-}
-
-/**
- * streamzap_exit
- */
-static void __exit streamzap_exit(void)
-{
- usb_deregister(&streamzap_driver);
-}
-
-
-module_init(streamzap_init);
-module_exit(streamzap_exit);
+module_usb_driver(streamzap_driver);
MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 13f54b51194..e7f7a57bf68 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1176,6 +1176,6 @@ wbcir_exit(void)
module_init(wbcir_init);
module_exit(wbcir_exit);
-MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923fc46..1c6015a04f9 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+ { USB_DEVICE(0x2040, 0x7260),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7213),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 53dae2a8272..60b021e7986 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1385,26 +1385,4 @@ static struct usb_driver cx231xx_usb_driver = {
.id_table = cx231xx_id_table,
};
-static int __init cx231xx_module_init(void)
-{
- int result;
-
- printk(KERN_INFO DRIVER_NAME " v4l2 driver loaded.\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&cx231xx_usb_driver);
- if (result)
- cx231xx_err(DRIVER_NAME
- " usb_register failed. Error number %d.\n", result);
-
- return result;
-}
-
-static void __exit cx231xx_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&cx231xx_usb_driver);
-}
-
-module_init(cx231xx_module_init);
-module_exit(cx231xx_module_exit);
+module_usb_driver(cx231xx_usb_driver);
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 10550bd93b0..25036cb11be 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <mach/hardware.h>
#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
/* Maximum channel allowed */
#define VPIF_NUM_CHANNELS (4)
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 064550f5ce4..a693d4ebda5 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -27,7 +27,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
-#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index 5d1936dafed..56879d1a068 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -22,6 +22,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 9b747c266af..93807dcf944 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -3325,26 +3325,4 @@ static struct usb_driver em28xx_usb_driver = {
.id_table = em28xx_id_table,
};
-static int __init em28xx_module_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&em28xx_usb_driver);
- if (result)
- em28xx_err(DRIVER_NAME
- " usb_register failed. Error number %d.\n", result);
-
- printk(KERN_INFO DRIVER_NAME " driver loaded\n");
-
- return result;
-}
-
-static void __exit em28xx_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&em28xx_usb_driver);
-}
-
-module_init(em28xx_module_init);
-module_exit(em28xx_module_exit);
+module_usb_driver(em28xx_usb_driver);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index d3777c86e1d..40f214ab924 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -2680,27 +2680,4 @@ static struct usb_driver et61x251_usb_driver = {
.disconnect = et61x251_usb_disconnect,
};
-/*****************************************************************************/
-
-static int __init et61x251_module_init(void)
-{
- int err = 0;
-
- KDBG(2, ET61X251_MODULE_NAME " v" ET61X251_MODULE_VERSION);
- KDBG(3, ET61X251_MODULE_AUTHOR);
-
- if ((err = usb_register(&et61x251_usb_driver)))
- KDBG(1, "usb_register() failed");
-
- return err;
-}
-
-
-static void __exit et61x251_module_exit(void)
-{
- usb_deregister(&et61x251_usb_driver);
-}
-
-
-module_init(et61x251_module_init);
-module_exit(et61x251_module_exit);
+module_usb_driver(et61x251_usb_driver);
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 6ae26160b81..636627b57dc 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -288,15 +288,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 4c56dbef6d9..ea17b5d94ea 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1067,15 +1067,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index f9b86b2484b..8f33bbd091a 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2132,15 +2132,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 0357d6d461d..81a4adbd9f7 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -895,16 +895,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index ea48200fd3a..0107513cd72 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -290,16 +290,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index 2ced3b73994..a8f54c20e58 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -524,22 +524,7 @@ static struct usb_driver sd_driver = {
/*====================== Init and Exit module functions ====================*/
-static int __init sd_mod_init(void)
-{
- PDEBUG(D_PROBE, "driver startup - version %s", DRIVER_VERSION);
-
- if (usb_register(&sd_driver) < 0)
- return -1;
- return 0;
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
/*==========================================================================*/
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c7ffe..2ca10dfec91 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
/* do the specific subdriver stuff before endpoint selection */
- gspca_dev->alt = 0;
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
@@ -957,7 +957,7 @@ retry:
ret = -EIO;
goto out;
}
- alt = ep_tb[--alt_idx].alt;
+ gspca_dev->alt = ep_tb[--alt_idx].alt;
}
}
out:
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 8e3dabe3007..5ab3f7e1276 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -582,16 +582,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 4fe51fda80f..e8e8f2fe916 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -413,16 +413,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index f3f7fe0ec4b..b1da7f4096c 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -634,15 +634,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 67533e5582a..9fe3816b2aa 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -404,19 +404,7 @@ static struct usb_driver sd_driver = {
.disconnect = m5602_disconnect
};
-/* -- module insert / remove -- */
-static int __init mod_m5602_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit mod_m5602_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(mod_m5602_init);
-module_exit(mod_m5602_exit);
+module_usb_driver(sd_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index ef45fa57575..5c2ea05c46b 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -517,15 +517,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 473e813b680..d73e5bd3dbf 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1259,15 +1259,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 7681814e594..d4bec932177 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -2118,18 +2118,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(webcam, int, 0644);
MODULE_PARM_DESC(webcam,
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 6a01b35a947..08b8ce1dee1 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -5056,18 +5056,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(frame_rate, int, 0644);
MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 76907eced4a..04753391de3 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,16 +1533,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b3b1ea60a84..f30060d5063 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1432,16 +1432,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 1600df152fd..ece8b1e82a1 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -569,15 +569,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 1c44f78ff9e..2811195258c 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1220,15 +1220,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 7509d05dc06..1ac111176ff 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -868,15 +868,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index 3b71bbcd977..1494e1829d3 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -766,15 +766,4 @@ static struct usb_driver sd_driver = {
.post_reset = sd_post_reset,
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 48aae3926a3..478533cb115 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -737,16 +737,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 86e07a139a1..33cabc342dc 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2554,15 +2554,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 146b459b08d..ddb392dc4f2 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1527,15 +1527,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c746bf19ca1..afa3186b803 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3104,15 +3104,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 695673106e7..070b9c33b51 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -590,15 +590,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index bb82c94ece1..103984708c7 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1092,15 +1092,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 7aaac72aee9..9c16821addd 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2188,15 +2188,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 16722dc6039..1320f35e39f 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -815,15 +815,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 89fec4c500a..54eed87672d 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -714,21 +714,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index a44fe3d2596..df4e1699646 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1544,15 +1544,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index c82fd53cef9..259a0c73c66 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1106,15 +1106,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index df805f79828..2fe3c29bd6b 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -432,16 +432,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index c2c056056e0..ae783634712 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -339,16 +339,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index e4255b4905e..1a8ba9b3550 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1197,15 +1197,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 42a7a28a6c8..4ae7cc8f463 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -519,15 +519,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index 4dcc7e37f9f..461ed645f30 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -355,15 +355,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index b1fca7db101..0ab425fbea9 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -612,18 +612,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c8909772435..c80f0c0c75b 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1211,15 +1211,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 90f0877eb59..ea44deb66af 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1438,15 +1438,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index 29596c59837..b2695b1dc60 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -4971,18 +4971,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(force_sensor, int, 0644);
MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 933ef2ca658..c8922c5ffbf 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -418,16 +418,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 7ee2c8271dc..208f6b2d512 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4230,15 +4230,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 81dd4c99d02..d12ea1518ac 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -368,16 +368,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 3aed42acdb5..fbb6ed25ec3 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3325,15 +3325,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 30ea1e47949..0202fead6b9 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -7050,18 +7050,7 @@ static struct usb_driver sd_driver = {
#endif
};
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(force_sensor, int, 0644);
MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 441dacf642b..3f1a5b1beeb 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -452,26 +452,7 @@ static struct usb_driver hdpvr_usb_driver = {
.id_table = hdpvr_table,
};
-static int __init hdpvr_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&hdpvr_usb_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit hdpvr_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&hdpvr_usb_driver);
-}
-
-module_init(hdpvr_init);
-module_exit(hdpvr_exit);
+module_usb_driver(hdpvr_usb_driver);
MODULE_LICENSE("GPL");
MODULE_VERSION("0.2.1");
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8914f..82c8817bd32 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,7 +162,6 @@ struct m5mols_version {
* @pad: media pad
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
- * @code: current code
* @irq_waitq: waitqueue for the capture
* @work_irq: workqueue for the IRQ
* @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
struct media_pad pad;
struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
int res_type;
- enum v4l2_mbus_pixelcode code;
wait_queue_head_t irq_waitq;
struct work_struct work_irq;
unsigned long flags;
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab3700647..e0f09e53180 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
int ret = -EINVAL;
u8 reg;
- if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ if (mode < REG_PARAMETER || mode > REG_CAPTURE)
return ret;
ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct m5mols_info *info = to_m5mols(sd);
struct v4l2_mbus_framefmt *format;
- if (fmt->pad != 0)
- return -EINVAL;
-
format = __find_format(info, fh, fmt->which, info->res_type);
if (!format)
return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
u32 resolution = 0;
int ret;
- if (fmt->pad != 0)
- return -EINVAL;
-
ret = __find_resolution(sd, format, &type, &resolution);
if (ret < 0)
return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (!sfmt)
return 0;
- *sfmt = m5mols_default_ffmt[type];
- sfmt->width = format->width;
- sfmt->height = format->height;
+
+ format->code = m5mols_default_ffmt[type].code;
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ *sfmt = *format;
info->resolution = resolution;
- info->code = format->code;
info->res_type = type;
}
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
{
struct m5mols_info *info = to_m5mols(sd);
+ u32 code = info->ffmt[info->res_type].code;
if (enable) {
int ret = -EINVAL;
- if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
- if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
return ret;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb95f2..398f96ffd35 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
+ mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
if (ret) {
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3c0ca..7b34b11daf2 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret)
+ if (ret) {
kfree(priv);
+ return ret;
+ }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f142d..ee0d0b39cd1 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f634b..6a6cf388bae 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86c936..3ea38a8def8 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
unsigned long flags;
struct sgdma_state *sg_state;
- if ((sglen < 0) || ((sglen > 0) & !sglist))
+ if ((sglen < 0) || ((sglen > 0) && !sglist))
return -EINVAL;
spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index b818cacf420..d4c48ef227f 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -80,13 +80,6 @@
#include "isph3a.h"
#include "isphist.h"
-/*
- * this is provided as an interim solution until omap3isp doesn't need
- * any omap-specific iommu API
- */
-#define to_iommu(dev) \
- (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
-
static unsigned int autoidle;
module_param(autoidle, int, 0444);
MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -1114,8 +1107,7 @@ isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
static void isp_save_ctx(struct isp_device *isp)
{
isp_save_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_save_ctx(isp->iommu);
+ omap_iommu_save_ctx(isp->dev);
}
/*
@@ -1128,8 +1120,7 @@ static void isp_save_ctx(struct isp_device *isp)
static void isp_restore_ctx(struct isp_device *isp)
{
isp_restore_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_restore_ctx(isp->iommu);
+ omap_iommu_restore_ctx(isp->dev);
omap3isp_ccdc_restore_context(isp);
omap3isp_preview_restore_context(isp);
}
@@ -1983,7 +1974,7 @@ static int isp_remove(struct platform_device *pdev)
isp_cleanup_modules(isp);
omap3isp_get(isp);
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
iommu_domain_free(isp->domain);
omap3isp_put(isp);
@@ -2131,17 +2122,6 @@ static int isp_probe(struct platform_device *pdev)
}
}
- /* IOMMU */
- isp->iommu_dev = omap_find_iommu_device("isp");
- if (!isp->iommu_dev) {
- dev_err(isp->dev, "omap_find_iommu_device failed\n");
- ret = -ENODEV;
- goto error_isp;
- }
-
- /* to be removed once iommu migration is complete */
- isp->iommu = to_iommu(isp->iommu_dev);
-
isp->domain = iommu_domain_alloc(pdev->dev.bus);
if (!isp->domain) {
dev_err(isp->dev, "can't alloc iommu domain\n");
@@ -2149,7 +2129,7 @@ static int isp_probe(struct platform_device *pdev)
goto error_isp;
}
- ret = iommu_attach_device(isp->domain, isp->iommu_dev);
+ ret = iommu_attach_device(isp->domain, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
goto free_domain;
@@ -2188,7 +2168,7 @@ error_modules:
error_irq:
free_irq(isp->irq_num, isp);
detach_dev:
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
free_domain:
iommu_domain_free(isp->domain);
error_isp:
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 705946ef4d6..d96603eb0d1 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -212,9 +212,7 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
- struct omap_iommu *iommu;
struct iommu_domain *domain;
- struct device *iommu_dev;
struct isp_platform_callback platform_cb;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a357..d341ba12593 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
if (req->table)
- omap_iommu_vfree(isp->domain, isp->iommu, req->table);
+ omap_iommu_vfree(isp->domain, isp->dev, req->table);
kfree(req);
}
@@ -438,7 +438,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1;
- req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
req->config.size, IOMMU_FLAG);
if (IS_ERR_VALUE(req->table)) {
req->table = 0;
@@ -446,7 +446,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
goto done;
}
- req->iovm = omap_find_iovm_area(isp->iommu, req->table);
+ req->iovm = omap_find_iovm_area(isp->dev, req->table);
if (req->iovm == NULL) {
ret = -ENOMEM;
goto done;
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
- table = omap_da_to_va(isp->iommu, req->table);
+ table = omap_da_to_va(isp->dev, req->table);
if (copy_from_user(table, config->lsc, req->config.size)) {
ret = -EFAULT;
goto done;
@@ -734,15 +734,15 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
* already done by omap_iommu_vmalloc().
*/
size = ccdc->fpc.fpnum * 4;
- table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
+ table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
0, size, IOMMU_FLAG);
if (IS_ERR_VALUE(table_new))
return -ENOMEM;
- if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
+ if (copy_from_user(omap_da_to_va(isp->dev, table_new),
(__force void __user *)
ccdc->fpc.fpcaddr, size)) {
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
table_new);
return -EFAULT;
}
@@ -753,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc_configure_fpc(ccdc);
if (table_old != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, table_old);
+ omap_iommu_vfree(isp->domain, isp->dev, table_old);
}
return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
struct isp_pipeline *pipe =
to_isp_pipeline(&ccdc->video_out.video.entity);
- struct video_device *vdev = &ccdc->subdev.devnode;
+ struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
@@ -2309,7 +2309,7 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
+ omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
mutex_destroy(&ccdc->ioctl_lock);
}
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c5..11871ecc6d2 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -366,7 +366,7 @@ static void isp_stat_bufs_free(struct ispstat *stat)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
buf->iommu_addr);
} else {
if (!buf->virt_addr)
@@ -400,7 +400,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
- buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
@@ -410,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
return -ENOMEM;
}
- iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
+ iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
@@ -419,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
}
buf->iovm = iovm;
- buf->virt_addr = omap_da_to_va(stat->isp->iommu,
+ buf->virt_addr = omap_da_to_va(stat->isp->dev,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d1000723c5a..bd3aebafafa 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -452,7 +453,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
sgt->nents = sglen;
sgt->orig_nents = sglen;
- da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
+ da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
if (IS_ERR_VALUE(da))
kfree(sgt);
@@ -468,7 +469,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
{
struct sg_table *sgt;
- sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
+ sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
kfree(sgt);
}
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b1d4c..6806345ec2f 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 803c9c82e49..c1bef618766 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -2682,25 +2682,7 @@ static struct usb_driver s2255_driver = {
.id_table = s2255_table,
};
-static int __init usb_s2255_init(void)
-{
- int result;
- /* register this driver with the USB subsystem */
- result = usb_register(&s2255_driver);
- if (result)
- pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", result);
- dprintk(2, "%s\n", __func__);
- return result;
-}
-
-static void __exit usb_s2255_exit(void)
-{
- usb_deregister(&s2255_driver);
-}
-
-module_init(usb_s2255_init);
-module_exit(usb_s2255_exit);
+module_usb_driver(s2255_driver);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0cd9b..2cc3b916672 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
return 0;
spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
fimc_hw_set_rotation(ctx);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
fimc_hw_set_out_dma(ctx);
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock(&ctx->slock);
return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
int min_bufs;
int ret;
- fimc_hw_reset(fimc);
vid_cap->frame_count = 0;
ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
- if (fimc->id == 1 && var->pix_hoff)
+ if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
set_frame_bounds(ff, mf->width, mf->height);
+ fimc->vid_cap.mf = *mf;
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
media_entity_cleanup(&sd->entity);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- sd = NULL;
+ fimc->vid_cap.subdev = NULL;
}
/* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db38b2..07c6254faee 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
.name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = S5P_FIMC_RGB565,
.memplanes = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && variant->pix_hoff)
+ if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
- mod_y = mod_x;
+ mod_y = ffs(variant->min_vsize_align) - 1;
}
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
- halign = ffs(min_size) - 1;
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
/* Enable clocks and perform basic initalization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
/* Resume the capture or mem-to-mem device */
if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
return 0;
}
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
spin_unlock_irqrestore(&fimc->slock, flags);
if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- fimc_runtime_suspend(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[0],
};
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
+ .min_vsize_align = 1,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936dad5b1..c7f01c47b20 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
* @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
+ u16 min_vsize_align;
u16 out_buf_count;
};
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1de91..615c862f036 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
+ i2c_put_adapter(adapter);
v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
return NULL;
}
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter;
if (!client)
return;
v4l2_device_unregister_subdev(sd);
+ adapter = client->adapter;
i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
static int fimc_md_register_video_nodes(struct fimc_md *fmd)
{
+ struct video_device *vdev;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
if (!fmd->fimc[i])
continue;
- if (fmd->fimc[i]->m2m.vfd)
- ret = video_register_device(fmd->fimc[i]->m2m.vfd,
- VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- if (fmd->fimc[i]->vid_cap.vfd)
- ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
- VFL_TYPE_GRABBER, -1);
+ vdev = fmd->fimc[i]->m2m.vfd;
+ if (vdev) {
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+ }
+
+ vdev = fmd->fimc[i]->vid_cap.vfd;
+ if (vdev == NULL)
+ continue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
}
return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (WARN(csis == NULL,
"MIPI-CSI interface specified "
"but s5p-csis module is not loaded!\n"))
- continue;
+ return -EINVAL;
ret = media_entity_create_link(&sensor->entity, 0,
&csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
struct fimc_md *fmd;
int ret;
- if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
- return -EINVAL;
-
fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err3;
+ if (pdev->dev.platform_data) {
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ }
ret = fimc_md_create_links(fmd);
if (ret)
goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e3416..44f5c2d1920 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
+
+ if (dev->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
}
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = 0;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+ S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+ S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+ S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+ S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
fimc_hw_set_scaler(ctx);
cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CIEXTEN);
} else {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb77d4b..dff9dc79879 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
.num_planes = 1,
},
{
- .name = "H264 Encoded Stream",
+ .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_FIMV_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4bc1d..b47d0c06ecf 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682629c..c51decfcae1 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
- if (csi2_sd)
- csi2_sd->grp_id = (long)icd;
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
{
if (pcdev->csi2_pdev) {
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
return csi2_sd;
}
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
bool ceu_1to1;
int ret;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
s_mbus_fmt, mf);
if (ret < 0)
return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
*/
mf.width = 2560;
mf.height = 1920;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f0473ed3..8a652b53ff7 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
.flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
struct v4l2_mbus_config cfg;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 16cb07c5c27..7025be12928 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -3420,27 +3420,4 @@ static struct usb_driver sn9c102_usb_driver = {
.disconnect = sn9c102_usb_disconnect,
};
-/*****************************************************************************/
-
-static int __init sn9c102_module_init(void)
-{
- int err = 0;
-
- KDBG(2, SN9C102_MODULE_NAME " v" SN9C102_MODULE_VERSION);
- KDBG(3, SN9C102_MODULE_AUTHOR);
-
- if ((err = usb_register(&sn9c102_usb_driver)))
- KDBG(1, "usb_register() failed");
-
- return err;
-}
-
-
-static void __exit sn9c102_module_exit(void)
-{
- usb_deregister(&sn9c102_usb_driver);
-}
-
-
-module_init(sn9c102_module_init);
-module_exit(sn9c102_module_exit);
+module_usb_driver(sn9c102_usb_driver);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c3895..62e4312515c 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
}
sd = soc_camera_to_subdev(icd);
- sd->grp_id = (long)icd;
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
goto ectrl;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index cbc105f975d..b7fb5a5cad7 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1377,25 +1377,4 @@ static struct usb_driver stk_camera_driver = {
#endif
};
-
-static int __init stk_camera_init(void)
-{
- int result;
-
- result = usb_register(&stk_camera_driver);
- if (result)
- STK_ERROR("usb_register failed ! Error number %d\n", result);
-
-
- return result;
-}
-
-static void __exit stk_camera_exit(void)
-{
- usb_deregister(&stk_camera_driver);
-}
-
-module_init(stk_camera_init);
-module_exit(stk_camera_exit);
-
-
+module_usb_driver(stk_camera_driver);
diff --git a/drivers/media/video/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index ec2578a0fdf..ff939bc0e0b 100644
--- a/drivers/media/video/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -1371,31 +1371,7 @@ static struct usb_driver tm6000_usb_driver = {
.id_table = tm6000_id_table,
};
-static int __init tm6000_module_init(void)
-{
- int result;
-
- printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
- (TM6000_VERSION >> 16) & 0xff,
- (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&tm6000_usb_driver);
- if (result)
- printk(KERN_ERR "tm6000"
- " usb_register failed. Error number %d.\n", result);
-
- return result;
-}
-
-static void __exit tm6000_module_exit(void)
-{
- /* deregister at USB subsystem */
- usb_deregister(&tm6000_usb_driver);
-}
-
-module_init(tm6000_module_init);
-module_exit(tm6000_module_exit);
+module_usb_driver(tm6000_usb_driver);
MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter");
MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index d4d05d2ace6..f6d26419445 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1550,7 +1550,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
if (zoran_formats[i].flags & flag && num++ == fmt->index) {
strncpy(fmt->description, zoran_formats[i].name,
sizeof(fmt->description) - 1);
- /* fmt struct pre-zeroed, so adding '\0' not neeed */
+ /* fmt struct pre-zeroed, so adding '\0' not needed */
fmt->pixelformat = zoran_formats[i].fourcc;
if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index e78cf94f491..cd2e39fc4bf 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1695,28 +1695,7 @@ static struct usb_driver zr364xx_driver = {
.id_table = device_table
};
-
-static int __init zr364xx_init(void)
-{
- int retval;
- retval = usb_register(&zr364xx_driver);
- if (retval)
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed!\n");
- else
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
- return retval;
-}
-
-
-static void __exit zr364xx_exit(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC " module unloaded\n");
- usb_deregister(&zr364xx_driver);
-}
-
-
-module_init(zr364xx_init);
-module_exit(zr364xx_exit);
+module_usb_driver(zr364xx_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 22027e7946f..d9bcfba6b04 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A)
#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054)
#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP (0x0059)
#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index fd6222882a0..19fb21b8f0c 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -857,7 +857,7 @@ typedef struct _EVENT_DATA_SAS_DISCOVERY
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_MASK (0xFFFF0000)
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_SHIFT (16)
-/* SAS Discovery Errror Event data */
+/* SAS Discovery Error Event data */
typedef struct _EVENT_DATA_DISCOVERY_ERROR
{
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e9c6a6047a0..a7dc4672d99 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
-static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
+ [MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
- memcpy(MptCallbacksName[cb_idx], func_name,
- strlen(func_name) > 50 ? 50 : strlen(func_name));
+ strlcpy(MptCallbacksName[cb_idx], func_name,
+ MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4d24dc081a..76c05bc24cb 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -89,6 +89,7 @@
*/
#define MPT_MAX_ADAPTERS 18
#define MPT_MAX_PROTOCOL_DRIVERS 16
+#define MPT_MAX_CALLBACKNAME_LEN 49
#define MPT_MAX_BUS 1 /* Do not change */
#define MPT_MAX_FC_DEVICES 255
#define MPT_MAX_SCSI_DEVICES 16
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9d950429854..551262e4b96 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 07dbeaf9df9..6d115c7208a 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -56,7 +56,7 @@
/* Structure used to define /proc entries */
typedef struct _i2o_proc_entry_t {
char *name; /* entry name */
- mode_t mode; /* mode */
+ umode_t mode; /* mode */
const struct file_operations *fops; /* open function */
} i2o_proc_entry;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f1391c21ef2..c8322eefc86 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -328,6 +328,34 @@ config PMIC_DA903X
individual components like LCD backlight, voltage regulators,
LEDs and battery-charger under the corresponding menus.
+config PMIC_DA9052
+ bool
+ select MFD_CORE
+
+config MFD_DA9052_SPI
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on SPI_MASTER=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using SPI. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_DA9052_I2C
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on I2C=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using I2C. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
config PMIC_ADP5520
bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
depends on I2C=y
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2292eb7524..d5f574306c7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -67,6 +67,11 @@ endif
obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+
+obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
+obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
+obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb8195..b7b2d3483fd 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,7 +4,7 @@
* Debugfs support for the AB5500 MFD driver
*/
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e9173804ed..d3d572b2317 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+#ifdef CONFIG_DEBUG_FS
static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
.flags = IORESOURCE_IRQ,
},
};
+#endif
static struct resource __devinitdata ab8500_usb_resources[] = {
{
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d88483112..8d816cce832 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
ret = __adp5520_read(chip->client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = __adp5520_write(chip->client, reg, reg_val);
}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37fd59..1924b857a0f 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __da903x_write(chip->client, reg, reg_val);
}
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ free_irq(client->irq, chip);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
new file mode 100644
index 00000000000..5ddde2a9176
--- /dev/null
+++ b/drivers/mfd/da9052-core.c
@@ -0,0 +1,694 @@
+/*
+ * Device access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static bool da9052_reg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_INTERFACE_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_SECOND_A_REG:
+ case DA9052_SECOND_B_REG:
+ case DA9052_SECOND_C_REG:
+ case DA9052_SECOND_D_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct resource da9052_rtc_resource = {
+ .name = "ALM",
+ .start = DA9052_IRQ_ALARM,
+ .end = DA9052_IRQ_ALARM,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_onkey_resource = {
+ .name = "ONKEY",
+ .start = DA9052_IRQ_NONKEY,
+ .end = DA9052_IRQ_NONKEY,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_bat_resources[] = {
+ {
+ .name = "BATT TEMP",
+ .start = DA9052_IRQ_TBAT,
+ .end = DA9052_IRQ_TBAT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN DET",
+ .start = DA9052_IRQ_DCIN,
+ .end = DA9052_IRQ_DCIN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN REM",
+ .start = DA9052_IRQ_DCINREM,
+ .end = DA9052_IRQ_DCINREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS DET",
+ .start = DA9052_IRQ_VBUS,
+ .end = DA9052_IRQ_VBUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS REM",
+ .start = DA9052_IRQ_VBUSREM,
+ .end = DA9052_IRQ_VBUSREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CHG END",
+ .start = DA9052_IRQ_CHGEND,
+ .end = DA9052_IRQ_CHGEND,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource da9052_tsi_resources[] = {
+ {
+ .name = "PENDWN",
+ .start = DA9052_IRQ_PENDOWN,
+ .end = DA9052_IRQ_PENDOWN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "TSIRDY",
+ .start = DA9052_IRQ_TSIREADY,
+ .end = DA9052_IRQ_TSIREADY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell __devinitdata da9052_subdev_info[] = {
+ {
+ .name = "da9052-regulator",
+ .id = 1,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 2,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 3,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 4,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 5,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 6,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 7,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 8,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 9,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 10,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 11,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 12,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 13,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 14,
+ },
+ {
+ .name = "da9052-onkey",
+ .resources = &da9052_onkey_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-rtc",
+ .resources = &da9052_rtc_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-gpio",
+ },
+ {
+ .name = "da9052-hwmon",
+ },
+ {
+ .name = "da9052-leds",
+ },
+ {
+ .name = "da9052-wled1",
+ },
+ {
+ .name = "da9052-wled2",
+ },
+ {
+ .name = "da9052-wled3",
+ },
+ {
+ .name = "da9052-tsi",
+ .resources = da9052_tsi_resources,
+ .num_resources = ARRAY_SIZE(da9052_tsi_resources),
+ },
+ {
+ .name = "da9052-bat",
+ .resources = da9052_bat_resources,
+ .num_resources = ARRAY_SIZE(da9052_bat_resources),
+ },
+ {
+ .name = "da9052-watchdog",
+ },
+};
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+struct regmap_config da9052_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .max_register = DA9052_PAGE1_CON_REG,
+ .readable_reg = da9052_reg_readable,
+ .writeable_reg = da9052_reg_writeable,
+ .volatile_reg = da9052_reg_volatile,
+};
+EXPORT_SYMBOL_GPL(da9052_regmap_config);
+
+int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
+{
+ struct da9052_pdata *pdata = da9052->dev->platform_data;
+ struct irq_desc *desc;
+ int ret;
+
+ mutex_init(&da9052->io_lock);
+
+ if (pdata && pdata->init != NULL)
+ pdata->init(da9052);
+
+ da9052->chip_id = chip_id;
+
+ if (!pdata || !pdata->irq_base)
+ da9052->irq_base = -1;
+ else
+ da9052->irq_base = pdata->irq_base;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ da9052->irq_base, &da9052_regmap_irq_chip,
+ NULL);
+ if (ret < 0)
+ goto regmap_err;
+
+ desc = irq_to_desc(da9052->chip_irq);
+ da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id);
+
+ ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ARRAY_SIZE(da9052_subdev_info), NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ mfd_remove_devices(da9052->dev);
+regmap_err:
+ return ret;
+}
+
+void da9052_device_exit(struct da9052 *da9052)
+{
+ regmap_del_irq_chip(da9052->chip_irq,
+ irq_get_irq_data(da9052->irq_base)->chip_data);
+ mfd_remove_devices(da9052->dev);
+}
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 MFD Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
new file mode 100644
index 00000000000..44b97c70a61
--- /dev/null
+++ b/drivers/mfd/da9052-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * I2C access for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+{
+ int reg_val, ret;
+
+ ret = regmap_read(da9052->regmap, DA9052_CONTROL_B_REG, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
+ reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+ ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
+ reg_val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit da9052_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct da9052 *da9052;
+ int ret;
+
+ da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ if (!da9052)
+ return -ENOMEM;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
+ __func__);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ da9052->dev = &client->dev;
+ da9052->chip_irq = client->irq;
+
+ i2c_set_clientdata(client, da9052);
+
+ da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_i2c_enable_multiwrite(da9052);
+ if (ret < 0)
+ goto err;
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_i2c_remove(struct i2c_client *client)
+{
+ struct da9052 *da9052 = i2c_get_clientdata(client);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct i2c_driver da9052_i2c_driver = {
+ .probe = da9052_i2c_probe,
+ .remove = da9052_i2c_remove,
+ .id_table = da9052_i2c_id,
+ .driver = {
+ .name = "da9052",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&da9052_i2c_driver);
+ if (ret != 0) {
+ pr_err("DA9052 I2C registration failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_i2c_init);
+
+static void __exit da9052_i2c_exit(void)
+{
+ i2c_del_driver(&da9052_i2c_driver);
+}
+module_exit(da9052_i2c_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("I2C driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
new file mode 100644
index 00000000000..cdbc7cad326
--- /dev/null
+++ b/drivers/mfd/da9052-spi.c
@@ -0,0 +1,115 @@
+/*
+ * SPI access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+
+static int da9052_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+
+ if (!da9052)
+ return -ENOMEM;
+
+ spi->mode = SPI_MODE_0 | SPI_CPOL;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ da9052->dev = &spi->dev;
+ da9052->chip_irq = spi->irq;
+
+ dev_set_drvdata(&spi->dev, da9052);
+
+ da9052_regmap_config.read_flag_mask = 1;
+ da9052_regmap_config.write_flag_mask = 0;
+
+ da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_spi_remove(struct spi_device *spi)
+{
+ struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct spi_device_id da9052_spi_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct spi_driver da9052_spi_driver = {
+ .probe = da9052_spi_probe,
+ .remove = __devexit_p(da9052_spi_remove),
+ .id_table = da9052_spi_id,
+ .driver = {
+ .name = "da9052",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&da9052_spi_driver);
+ if (ret != 0) {
+ pr_err("Failed to register DA9052 SPI driver, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_spi_init);
+
+static void __exit da9052_spi_exit(void)
+{
+ spi_unregister_driver(&da9052_spi_driver);
+}
+module_exit(da9052_spi_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("SPI driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a25ab9c6b5a..af8e0efedbe 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2102,14 +2102,11 @@ static struct irq_chip prcmu_irq_chip = {
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
-
- if (cpu_is_u8500v1()) {
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
- } else if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
- int version;
+ u32 version;
version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
prcmu_version.project_number = version & 0xFF;
prcmu_version.api_version = (version >> 8) & 0xFF;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee533eac..ef39528088f 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
*/
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 86e14583a08..3f565ef3e14 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -27,8 +27,9 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
-#define USBHS_DRIVER_NAME "usbhs-omap"
+#define USBHS_DRIVER_NAME "usbhs_omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
#define OMAP_OHCI_DEVICE "ohci-omap3"
@@ -147,9 +148,6 @@
struct usbhs_hcd_omap {
- struct clk *usbhost_ick;
- struct clk *usbhost_hs_fck;
- struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -159,8 +157,7 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
+ struct clk *ehci_logic_fck;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -169,7 +166,6 @@ struct usbhs_hcd_omap {
u32 usbhs_rev;
spinlock_t lock;
- int count;
};
/*-------------------------------------------------------------------------*/
@@ -319,269 +315,6 @@ err_end:
return ret;
}
-/**
- * usbhs_omap_probe - initialize TI-based HCDs
- *
- * Allocates basic resources for this USB host controller.
- */
-static int __devinit usbhs_omap_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
- struct usbhs_hcd_omap *omap;
- struct resource *res;
- int ret = 0;
- int i;
-
- if (!pdata) {
- dev_err(dev, "Missing platform data\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- spin_lock_init(&omap->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- omap->platdata.port_mode[i] = pdata->port_mode[i];
-
- omap->platdata.ehci_data = pdata->ehci_data;
- omap->platdata.ohci_data = pdata->ohci_data;
-
- omap->usbhost_ick = clk_get(dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- dev_err(dev, "usbhost_ick failed error:%d\n", ret);
- goto err_end;
- }
-
- omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
- if (IS_ERR(omap->usbhost_hs_fck)) {
- ret = PTR_ERR(omap->usbhost_hs_fck);
- dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
- goto err_usbhost_ick;
- }
-
- omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
- if (IS_ERR(omap->usbhost_fs_fck)) {
- ret = PTR_ERR(omap->usbhost_fs_fck);
- dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
- goto err_usbhost_hs_fck;
- }
-
- omap->usbtll_fck = clk_get(dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- dev_err(dev, "usbtll_fck failed error:%d\n", ret);
- goto err_usbhost_fs_fck;
- }
-
- omap->usbtll_ick = clk_get(dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- dev_err(dev, "usbtll_ick failed error:%d\n", ret);
- goto err_usbtll_fck;
- }
-
- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_usbtll_ick;
- }
-
- omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
- if (IS_ERR(omap->xclk60mhsp1_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp1_ck);
- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
- }
-
- omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
- if (IS_ERR(omap->xclk60mhsp2_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp2_ck);
- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(omap->usbhost_p1_fck)) {
- ret = PTR_ERR(omap->usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
- if (IS_ERR(omap->usbtll_p1_fck)) {
- ret = PTR_ERR(omap->usbtll_p1_fck);
- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
- }
-
- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(omap->usbhost_p2_fck)) {
- ret = PTR_ERR(omap->usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbtll_p1_fck;
- }
-
- omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
- if (IS_ERR(omap->usbtll_p2_fck)) {
- ret = PTR_ERR(omap->usbtll_p2_fck);
- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
- }
-
- omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
- if (IS_ERR(omap->init_60m_fclk)) {
- ret = PTR_ERR(omap->init_60m_fclk);
- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbtll_p2_fck;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_init_60m_fclk;
- }
-
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_init_60m_fclk;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_tll;
- }
-
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll;
- }
-
- platform_set_drvdata(pdev, omap);
-
- ret = omap_usbhs_alloc_children(pdev);
- if (ret) {
- dev_err(dev, "omap_usbhs_alloc_children failed\n");
- goto err_alloc;
- }
-
- goto end_probe;
-
-err_alloc:
- iounmap(omap->tll_base);
-
-err_tll:
- iounmap(omap->uhh_base);
-
-err_init_60m_fclk:
- clk_put(omap->init_60m_fclk);
-
-err_usbtll_p2_fck:
- clk_put(omap->usbtll_p2_fck);
-
-err_usbhost_p2_fck:
- clk_put(omap->usbhost_p2_fck);
-
-err_usbtll_p1_fck:
- clk_put(omap->usbtll_p1_fck);
-
-err_usbhost_p1_fck:
- clk_put(omap->usbhost_p1_fck);
-
-err_xclk60mhsp2_ck:
- clk_put(omap->xclk60mhsp2_ck);
-
-err_utmi_p2_fck:
- clk_put(omap->utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
- clk_put(omap->xclk60mhsp1_ck);
-
-err_utmi_p1_fck:
- clk_put(omap->utmi_p1_fck);
-
-err_usbtll_ick:
- clk_put(omap->usbtll_ick);
-
-err_usbtll_fck:
- clk_put(omap->usbtll_fck);
-
-err_usbhost_fs_fck:
- clk_put(omap->usbhost_fs_fck);
-
-err_usbhost_hs_fck:
- clk_put(omap->usbhost_hs_fck);
-
-err_usbhost_ick:
- clk_put(omap->usbhost_ick);
-
-err_end:
- kfree(omap);
-
-end_probe:
- return ret;
-}
-
-/**
- * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
- * @pdev: USB Host Controller being removed
- *
- * Reverses the effect of usbhs_omap_probe().
- */
-static int __devexit usbhs_omap_remove(struct platform_device *pdev)
-{
- struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
-
- if (omap->count != 0) {
- dev_err(&pdev->dev,
- "Either EHCI or OHCI is still using usbhs core\n");
- return -EBUSY;
- }
-
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
- clk_put(omap->init_60m_fclk);
- clk_put(omap->usbtll_p2_fck);
- clk_put(omap->usbhost_p2_fck);
- clk_put(omap->usbtll_p1_fck);
- clk_put(omap->usbhost_p1_fck);
- clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->utmi_p2_fck);
- clk_put(omap->xclk60mhsp1_ck);
- clk_put(omap->utmi_p1_fck);
- clk_put(omap->usbtll_ick);
- clk_put(omap->usbtll_fck);
- clk_put(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_ick);
- kfree(omap);
-
- return 0;
-}
-
static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
{
switch (pmode) {
@@ -689,30 +422,85 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
}
}
-static int usbhs_enable(struct device *dev)
+static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- int ret = 0;
- unsigned long timeout;
- unsigned reg;
+ unsigned long flags;
+
+ dev_dbg(dev, "usbhs_runtime_resume\n");
- dev_dbg(dev, "starting TI HSUSB Controller\n");
if (!pdata) {
dev_dbg(dev, "missing platform_data\n");
return -ENODEV;
}
spin_lock_irqsave(&omap->lock, flags);
- if (omap->count > 0)
- goto end_count;
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost_hs_fck);
- clk_enable(omap->usbhost_fs_fck);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbtll_ick);
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_enable(omap->ehci_logic_fck);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_enable(omap->usbhost_p1_fck);
+ clk_enable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_enable(omap->usbhost_p2_fck);
+ clk_enable(omap->usbtll_p2_fck);
+ }
+ clk_enable(omap->utmi_p1_fck);
+ clk_enable(omap->utmi_p2_fck);
+
+ spin_unlock_irqrestore(&omap->lock, flags);
+
+ return 0;
+}
+
+static int usbhs_runtime_suspend(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
+
+ dev_dbg(dev, "usbhs_runtime_suspend\n");
+
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_disable(omap->usbhost_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_disable(omap->usbhost_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
+ }
+ clk_disable(omap->utmi_p2_fck);
+ clk_disable(omap->utmi_p1_fck);
+
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_disable(omap->ehci_logic_fck);
+
+ spin_unlock_irqrestore(&omap->lock, flags);
+
+ return 0;
+}
+
+static void omap_usbhs_init(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
+ unsigned reg;
+
+ dev_dbg(dev, "starting TI HSUSB Controller\n");
+
+ pm_runtime_get_sync(dev);
+ spin_lock_irqsave(&omap->lock, flags);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -736,50 +524,6 @@ static int usbhs_enable(struct device *dev)
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
- /* perform TLL soft reset, and wait until reset is complete */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
- /* Wait for TLL reset to complete */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_tll;
- }
- }
-
- dev_dbg(dev, "TLL RESET DONE\n");
-
- /* (1<<3) = no idle mode only for initial debugging */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
-
- /* Put UHH in NoIdle/NoStandby mode */
- reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- if (is_omap_usbhs_rev1(omap)) {
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
-
-
- } else if (is_omap_usbhs_rev2(omap)) {
- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
- }
-
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -825,49 +569,6 @@ static int usbhs_enable(struct device *dev)
reg &= ~OMAP4_P1_MODE_CLEAR;
reg &= ~OMAP4_P2_MODE_CLEAR;
- if (is_ehci_phy_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->xclk60mhsp1_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p1_fck);
- clk_enable(omap->usbtll_p1_fck);
- }
-
- if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->xclk60mhsp2_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p2_fck);
- clk_enable(omap->usbtll_p2_fck);
- }
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
-
if (is_ehci_tll_mode(pdata->port_mode[0]) ||
(is_ohci_port(pdata->port_mode[0])))
reg |= OMAP4_P1_MODE_TLL;
@@ -913,12 +614,15 @@ static int usbhs_enable(struct device *dev)
(pdata->ehci_data->reset_gpio_port[1], 1);
}
-end_count:
- omap->count++;
spin_unlock_irqrestore(&omap->lock, flags);
- return 0;
+ pm_runtime_put_sync(dev);
+}
+
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
-err_tll:
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -926,123 +630,272 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
-
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
- spin_unlock_irqrestore(&omap->lock, flags);
- return ret;
}
-static void usbhs_disable(struct device *dev)
+
+/**
+ * usbhs_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller.
+ */
+static int __devinit usbhs_omap_probe(struct platform_device *pdev)
{
- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- unsigned long timeout;
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_hcd_omap *omap;
+ struct resource *res;
+ int ret = 0;
+ int i;
- dev_dbg(dev, "stopping TI HSUSB Controller\n");
+ if (!pdata) {
+ dev_err(dev, "Missing platform data\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- spin_lock_irqsave(&omap->lock, flags);
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- if (omap->count == 0)
- goto end_disble;
+ spin_lock_init(&omap->lock);
- omap->count--;
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ omap->platdata.port_mode[i] = pdata->port_mode[i];
+
+ omap->platdata.ehci_data = pdata->ehci_data;
+ omap->platdata.ohci_data = pdata->ohci_data;
- if (omap->count != 0)
- goto end_disble;
+ pm_runtime_enable(dev);
- /* Reset OMAP modules for insmod/rmmod to work */
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- is_omap_usbhs_rev2(omap) ?
- OMAP4_UHH_SYSCONFIG_SOFTRESET :
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- timeout = jiffies + msecs_to_jiffies(100);
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
+ is_ehci_hsic_mode(i)) {
+ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_warn(dev, "ehci_logic_fck failed:%d\n",
+ ret);
+ }
+ break;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_fck)) {
+ ret = PTR_ERR(omap->utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_end;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
+ omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(omap->xclk60mhsp1_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_fck)) {
+ ret = PTR_ERR(omap->utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
+ omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(omap->xclk60mhsp2_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(omap->usbhost_p1_fck)) {
+ ret = PTR_ERR(omap->usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
}
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+ omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
+ if (IS_ERR(omap->usbtll_p1_fck)) {
+ ret = PTR_ERR(omap->usbtll_p1_fck);
+ dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(omap->usbhost_p2_fck)) {
+ ret = PTR_ERR(omap->usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbtll_p1_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
+ if (IS_ERR(omap->usbtll_p2_fck)) {
+ ret = PTR_ERR(omap->usbtll_p2_fck);
+ dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
}
- if (is_omap_usbhs_rev2(omap)) {
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(omap->usbtll_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(omap->usbtll_p2_fck);
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
+ omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(omap->init_60m_fclk)) {
+ ret = PTR_ERR(omap->init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbtll_p2_fck;
}
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
+ if (is_ehci_phy_mode(pdata->port_mode[0])) {
+ /* for OMAP3 , the clk set paretn fails */
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->xclk60mhsp1_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- /* The gpio_free migh sleep; so unlock the spinlock */
- spin_unlock_irqrestore(&omap->lock, flags);
+ if (is_ehci_phy_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->xclk60mhsp2_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_init_60m_fclk;
+ }
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_init_60m_fclk;
}
- return;
-end_disble:
- spin_unlock_irqrestore(&omap->lock, flags);
-}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_tll;
+ }
-int omap_usbhs_enable(struct device *dev)
-{
- return usbhs_enable(dev->parent);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll;
+ }
+
+ platform_set_drvdata(pdev, omap);
+
+ ret = omap_usbhs_alloc_children(pdev);
+ if (ret) {
+ dev_err(dev, "omap_usbhs_alloc_children failed\n");
+ goto err_alloc;
+ }
+
+ omap_usbhs_init(dev);
+
+ goto end_probe;
+
+err_alloc:
+ iounmap(omap->tll_base);
+
+err_tll:
+ iounmap(omap->uhh_base);
+
+err_init_60m_fclk:
+ clk_put(omap->init_60m_fclk);
+
+err_usbtll_p2_fck:
+ clk_put(omap->usbtll_p2_fck);
+
+err_usbhost_p2_fck:
+ clk_put(omap->usbhost_p2_fck);
+
+err_usbtll_p1_fck:
+ clk_put(omap->usbtll_p1_fck);
+
+err_usbhost_p1_fck:
+ clk_put(omap->usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(omap->xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(omap->utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(omap->xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(omap->utmi_p1_fck);
+
+err_end:
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(dev);
+ kfree(omap);
+
+end_probe:
+ return ret;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_enable);
-void omap_usbhs_disable(struct device *dev)
+/**
+ * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
- usbhs_disable(dev->parent);
+ struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+
+ omap_usbhs_deinit(&pdev->dev);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ clk_put(omap->init_60m_fclk);
+ clk_put(omap->usbtll_p2_fck);
+ clk_put(omap->usbhost_p2_fck);
+ clk_put(omap->usbtll_p1_fck);
+ clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->xclk60mhsp2_ck);
+ clk_put(omap->utmi_p2_fck);
+ clk_put(omap->xclk60mhsp1_ck);
+ clk_put(omap->utmi_p1_fck);
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(&pdev->dev);
+ kfree(omap);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_disable);
+
+static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+ .runtime_suspend = usbhs_runtime_suspend,
+ .runtime_resume = usbhs_runtime_resume,
+};
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
+ .pm = &usbhsomap_dev_pm_ops,
},
.remove = __exit_p(usbhs_omap_remove),
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c24..a5ddf31b60c 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf2f65..c1da84bc157 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
goto out;
}
- data &= mask;
+ data &= ~mask;
err = tps65910_i2c_write(tps65910, reg, 1, &data);
if (err)
dev_err(tps65910->dev, "write to reg %x failed\n", reg);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd66021af..61e70cfaa77 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/*
* [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/* [MSG1] fill the register address data */
msg = &twl->xfer_msg[0];
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8cc6c3..29f11e0765f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@ struct sih_agent {
u32 edge_change;
struct mutex irq_lock;
+ char *irq_name;
};
/*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
- return;
+ return IRQ_HANDLED;
}
while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
+ return IRQ_HANDLED;
}
static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
activate_irq(irq);
}
- status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
- irq_set_chained_handler(irq, handle_twl4030_sih);
+ agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ agent->irq_name ?: sih->name, NULL);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
- return status;
+ return status < 0 ? status : irq_base;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
- status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
- "TWL4030-PIH", NULL);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+ IRQF_ONESHOT,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45ffb09..c6b456ad734 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
static const unsigned max_i2c_errors = 100;
int ret;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
int i;
union {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba132837..61894fced8e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
switch (wm8994->type) {
case WM8958:
+ case WM1811:
ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index a39e0555df6..83adab69bfd 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (I2C bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,7 +11,6 @@
#include "ad525x_dpot.h"
-/* ------------------------------------------------------------------------- */
/* I2C bus functions */
static int write_d8(void *client, u8 val)
{
@@ -60,18 +59,13 @@ static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
.bops = &bops,
};
- struct ad_dpot_id dpot_id = {
- .name = (char *) &id->name,
- .devid = id->driver_data,
- };
-
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "SMBUS Word Data not Supported\n");
return -EIO;
}
- return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+ return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 7f9a55afe05..822749e41fe 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (SPI bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,40 +11,6 @@
#include "ad525x_dpot.h"
-static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
- {.name = "ad5160", .devid = AD5160_ID},
- {.name = "ad5161", .devid = AD5161_ID},
- {.name = "ad5162", .devid = AD5162_ID},
- {.name = "ad5165", .devid = AD5165_ID},
- {.name = "ad5200", .devid = AD5200_ID},
- {.name = "ad5201", .devid = AD5201_ID},
- {.name = "ad5203", .devid = AD5203_ID},
- {.name = "ad5204", .devid = AD5204_ID},
- {.name = "ad5206", .devid = AD5206_ID},
- {.name = "ad5207", .devid = AD5207_ID},
- {.name = "ad5231", .devid = AD5231_ID},
- {.name = "ad5232", .devid = AD5232_ID},
- {.name = "ad5233", .devid = AD5233_ID},
- {.name = "ad5235", .devid = AD5235_ID},
- {.name = "ad5260", .devid = AD5260_ID},
- {.name = "ad5262", .devid = AD5262_ID},
- {.name = "ad5263", .devid = AD5263_ID},
- {.name = "ad5290", .devid = AD5290_ID},
- {.name = "ad5291", .devid = AD5291_ID},
- {.name = "ad5292", .devid = AD5292_ID},
- {.name = "ad5293", .devid = AD5293_ID},
- {.name = "ad7376", .devid = AD7376_ID},
- {.name = "ad8400", .devid = AD8400_ID},
- {.name = "ad8402", .devid = AD8402_ID},
- {.name = "ad8403", .devid = AD8403_ID},
- {.name = "adn2850", .devid = ADN2850_ID},
- {.name = "ad5270", .devid = AD5270_ID},
- {.name = "ad5271", .devid = AD5271_ID},
- {}
-};
-
-/* ------------------------------------------------------------------------- */
-
/* SPI bus functions */
static int write8(void *client, u8 val)
{
@@ -109,36 +75,16 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d8 = write16,
.write_r8d16 = write24,
};
-
-static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
- char *name)
-{
- while (id->name && id->name[0]) {
- if (strcmp(name, id->name) == 0)
- return id;
- id++;
- }
- return NULL;
-}
-
static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
{
- char *name = spi->dev.platform_data;
- const struct ad_dpot_id *dpot_id;
-
struct ad_dpot_bus_data bdata = {
.client = spi,
.bops = &bops,
};
- dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
-
- if (dpot_id == NULL) {
- dev_err(&spi->dev, "%s not in supported device list", name);
- return -ENODEV;
- }
-
- return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+ return ad_dpot_probe(&spi->dev, &bdata,
+ spi_get_device_id(spi)->driver_data,
+ spi_get_device_id(spi)->name);
}
static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
@@ -146,14 +92,47 @@ static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
return ad_dpot_remove(&spi->dev);
}
+static const struct spi_device_id ad_dpot_spi_id[] = {
+ {"ad5160", AD5160_ID},
+ {"ad5161", AD5161_ID},
+ {"ad5162", AD5162_ID},
+ {"ad5165", AD5165_ID},
+ {"ad5200", AD5200_ID},
+ {"ad5201", AD5201_ID},
+ {"ad5203", AD5203_ID},
+ {"ad5204", AD5204_ID},
+ {"ad5206", AD5206_ID},
+ {"ad5207", AD5207_ID},
+ {"ad5231", AD5231_ID},
+ {"ad5232", AD5232_ID},
+ {"ad5233", AD5233_ID},
+ {"ad5235", AD5235_ID},
+ {"ad5260", AD5260_ID},
+ {"ad5262", AD5262_ID},
+ {"ad5263", AD5263_ID},
+ {"ad5290", AD5290_ID},
+ {"ad5291", AD5291_ID},
+ {"ad5292", AD5292_ID},
+ {"ad5293", AD5293_ID},
+ {"ad7376", AD7376_ID},
+ {"ad8400", AD8400_ID},
+ {"ad8402", AD8402_ID},
+ {"ad8403", AD8403_ID},
+ {"adn2850", ADN2850_ID},
+ {"ad5270", AD5270_ID},
+ {"ad5271", AD5271_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
+
static struct spi_driver ad_dpot_spi_driver = {
.driver = {
.name = "ad_dpot",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad_dpot_spi_probe,
.remove = __devexit_p(ad_dpot_spi_remove),
+ .id_table = ad_dpot_spi_id,
};
static int __init ad_dpot_spi_init(void)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 7cb911028d0..1d1d4261591 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Licensed under the GPL-2 or later.
*/
@@ -76,8 +76,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "0.2"
-
#include "ad525x_dpot.h"
/*
@@ -687,8 +685,9 @@ inline void ad_dpot_remove_files(struct device *dev,
}
}
-__devinit int ad_dpot_probe(struct device *dev,
- struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
+int __devinit ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, unsigned long devid,
+ const char *name)
{
struct dpot_data *data;
@@ -704,13 +703,13 @@ __devinit int ad_dpot_probe(struct device *dev,
mutex_init(&data->update_lock);
data->bdata = *bdata;
- data->devid = id->devid;
+ data->devid = devid;
- data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+ data->max_pos = 1 << DPOT_MAX_POS(devid);
data->rdac_mask = data->max_pos - 1;
- data->feat = DPOT_FEAT(data->devid);
- data->uid = DPOT_UID(data->devid);
- data->wipers = DPOT_WIPERS(data->devid);
+ data->feat = DPOT_FEAT(devid);
+ data->uid = DPOT_UID(devid);
+ data->wipers = DPOT_WIPERS(devid);
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i)) {
@@ -731,7 +730,7 @@ __devinit int ad_dpot_probe(struct device *dev,
}
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
- id->name, data->max_pos);
+ name, data->max_pos);
return 0;
@@ -745,7 +744,7 @@ exit_free:
dev_set_drvdata(dev, NULL);
exit:
dev_err(dev, "failed to create client for %s ID 0x%lX\n",
- id->name, id->devid);
+ name, devid);
return err;
}
EXPORT_SYMBOL(ad_dpot_probe);
@@ -770,4 +769,3 @@ MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
"Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 82b2cb77ae1..6bd1eba23bc 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -208,12 +208,8 @@ struct ad_dpot_bus_data {
const struct ad_dpot_bus_ops *bops;
};
-struct ad_dpot_id {
- char *name;
- unsigned long devid;
-};
-
-int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
+ unsigned long devid, const char *name);
int ad_dpot_remove(struct device *dev);
#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 5f898cb706a..b29a2be2459 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -216,7 +216,7 @@ static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
*temperature = (x1+x2+8) >> 4;
exit:
- return status;;
+ return status;
}
/*
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 7b33de95c4b..0ff4b02177b 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
eeprom->reg_data_out = 0;
eeprom->reg_data_clock = 0;
eeprom->reg_chip_select = 1;
+ eeprom->drive_data = 1;
eeprom->register_write(eeprom);
/*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 1;
/*
* Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 0;
/*
* Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ /* create command to enable/disable */
+
+ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+ command <<= (eeprom->width - 2);
+
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+ int timeout = 100;
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+ command |= addr;
+
+ /* send write command */
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /* send data */
+ eeprom_93cx6_write_bits(eeprom, data, 16);
+
+ /* get ready to check for busy */
+ eeprom->drive_data = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /* wait at-least 250ns to get DO to be the busy signal */
+ usleep_range(1000, 2000);
+
+ /* wait for DO to go high to signify finish */
+
+ while (true) {
+ eeprom->register_read(eeprom);
+
+ if (eeprom->reg_data_out)
+ break;
+
+ usleep_range(1000, 2000);
+
+ if (--timeout <= 0) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ break;
+ }
+ }
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 5c766b4fb23..7d56f45dee1 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 3dd2dfb8da1..d7b2ca358b2 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 6cbba1afef3..fc9fc9d4e08 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 76bfda1ffaa..8e540f4e9d5 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 1bc4306572a..90746378f9b 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index bf2c738d2b7..2e9566dab2b 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 4d8a4e248b3..9b083448814 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 89947723a27..35361753b48 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index 4b2398e27fd..5319ea261c0 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index 766766523a6..e97848f51b3 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index a234d965243..1ccedb71e72 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
* This driver is based on code originally written by Pete Reynolds
* and others.
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 2de487ac788..232034f5da4 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index 00dbf1d4373..a7729ef76ac 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
* Originally written by Pete Reynolds
*/
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 93baa350d69..1dcb9ae1905 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max AsbĂśck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 307aada5fff..3d6cce663be 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -158,7 +158,7 @@ static int als_set_default_config(struct i2c_client *client)
dev_err(&client->dev, "default write failed.");
return retval;
}
- return 0;;
+ return 0;
}
static int isl29020_probe(struct i2c_client *client,
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 7768b87d995..950dbe9ecb3 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -324,7 +324,7 @@ static const struct file_operations gru_fops = {
static struct proc_entry {
char *name;
- int mode;
+ umode_t mode;
const struct file_operations *fops;
struct proc_dir_entry *entry;
} proc_files[] = {
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 42f067347bc..3fac67a5204 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -576,7 +576,7 @@ xpnet_init(void)
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
- xpnet_device->features = NETIF_F_NO_CSUM;
+ xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index ba168a7d54d..2b62232c2c6 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -137,6 +137,8 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
* st_reg_complete -
* to call registration complete callbacks
* of all protocol stack drivers
+ * This function is being called with spin lock held, protocol drivers are
+ * only expected to complete their waits and do nothing more than that.
*/
void st_reg_complete(struct st_data_s *st_gdata, char err)
{
@@ -538,11 +540,12 @@ long st_register(struct st_proto_s *new_proto)
set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_kim_recv;
+ /* enable the ST LL - to set default chip state */
+ st_ll_enable(st_gdata);
+
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* enable the ST LL - to set default chip state */
- st_ll_enable(st_gdata);
/* this may take a while to complete
* since it involves BT fw download
*/
@@ -553,10 +556,13 @@ long st_register(struct st_proto_s *new_proto)
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
st_reg_complete(st_gdata, err);
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
}
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
@@ -576,10 +582,10 @@ long st_register(struct st_proto_s *new_proto)
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
pr_err(" proto %d already registered ",
new_proto->chnl_id);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EALREADY;
}
- spin_lock_irqsave(&st_gdata->lock, flags);
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
@@ -619,7 +625,7 @@ long st_unregister(struct st_proto_s *proto)
spin_lock_irqsave(&st_gdata->lock, flags);
- if (st_gdata->list[proto->chnl_id] == NULL) {
+ if (st_gdata->is_registered[proto->chnl_id] == false) {
pr_err(" chnl_id %d not registered", proto->chnl_id);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EPROTONOSUPPORT;
@@ -629,6 +635,10 @@ long st_unregister(struct st_proto_s *proto)
remove_channel_from_table(st_gdata, proto);
spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /* paranoid check */
+ if (st_gdata->protos_registered < ST_EMPTY)
+ st_gdata->protos_registered = ST_EMPTY;
+
if ((st_gdata->protos_registered == ST_EMPTY) &&
(!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_info(" all chnl_ids unregistered ");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 43ef8d162f2..a7a861ceee2 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -469,37 +469,21 @@ long st_kim_start(void *kim_data)
/* wait for ldisc to be installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
- if (!err) { /* timeout */
- pr_err("line disc installation timed out ");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* the following wait is never going to be completed,
- * since the ldisc was never installed, hence serving
- * as a mdelay of LDISC_TIME msecs */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -ETIMEDOUT;
+ if (!err) {
+ /* ldisc installation timeout,
+ * flush uart, power cycle BT_EN */
+ pr_err("ldisc installation timeout");
+ err = st_kim_stop(kim_gdata);
continue;
} else {
/* ldisc installed now */
- pr_info(" line discipline installed ");
+ pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
+ /* ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN */
pr_err("download firmware failed");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* this wait might be completed, though in the
- * tty_close() since the ldisc is already
- * installed */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -EINVAL;
+ err = st_kim_stop(kim_gdata);
continue;
} else { /* on success don't retry */
break;
@@ -510,8 +494,14 @@ long st_kim_start(void *kim_data)
}
/**
- * st_kim_stop - called from ST Core, on the last un-registration
- * toggle low the chip enable gpio
+ * st_kim_stop - stop communication with chip.
+ * This can be called from ST Core/KIM, on the-
+ * (a) last un-register when chip need not be powered there-after,
+ * (b) upon failure to either install ldisc or download firmware.
+ * The function is responsible to (a) notify UIM about un-installation,
+ * (b) flush UART if the ldisc was installed.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -519,12 +509,16 @@ long st_kim_stop(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
struct ti_st_plat_data *pdata =
kim_gdata->kim_pdev->dev.platform_data;
+ struct tty_struct *tty = kim_gdata->core_data->tty;
INIT_COMPLETION(kim_gdata->ldisc_installed);
- /* Flush any pending characters in the driver and discipline. */
- tty_ldisc_flush(kim_gdata->core_data->tty);
- tty_driver_flush_buffer(kim_gdata->core_data->tty);
+ if (tty) { /* can be called before ldisc is installed */
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ tty->ops->flush_buffer(tty);
+ }
/* send uninstall notification to UIM */
pr_info("ldisc_install = 0");
@@ -579,6 +573,28 @@ static ssize_t show_install(struct device *dev,
return sprintf(buf, "%d\n", kim_data->ldisc_install);
}
+#ifdef DEBUG
+static ssize_t store_dev_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing dev name >%s<", buf);
+ strncpy(kim_data->dev_name, buf, count);
+ pr_debug("stored dev name >%s<", kim_data->dev_name);
+ return count;
+}
+
+static ssize_t store_baud_rate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing baud rate >%s<", buf);
+ sscanf(buf, "%ld", &kim_data->baud_rate);
+ pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
+ return count;
+}
+#endif /* if DEBUG */
+
static ssize_t show_dev_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -605,10 +621,18 @@ static struct kobj_attribute ldisc_install =
__ATTR(install, 0444, (void *)show_install, NULL);
static struct kobj_attribute uart_dev_name =
+#ifdef DEBUG /* TODO: move this to debug-fs if possible */
+__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
+#else
__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
+#endif
static struct kobj_attribute uart_baud_rate =
+#ifdef DEBUG /* TODO: move to debugfs */
+__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
+#else
__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
+#endif
static struct kobj_attribute uart_flow_cntrl =
__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f9530..1e0e27cbe98 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b038c4a9468..e99bdc18002 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2949,7 +2949,7 @@ static void mmc_test_free_dbgfs_file(struct mmc_card *card)
}
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
- const char *name, mode_t mode, const struct file_operations *fops)
+ const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e7..950b97d7412 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb38748..d31c78b72b0 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
- /*
- * Enable runtime power management by default. This flag was added due
- * to runtime power management causing disruption for some users, but
- * the power on/off code has been improved since then.
- *
- * We'll enable this flag by default as an experiment, and if no
- * problems are reported, we will follow up later and remove the flag
- * altogether.
- */
- host->caps = MMC_CAP_POWER_OFF_CARD;
-
return host;
free:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279..d240427c124 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a8b4d2aa18e..f437c3e6f3a 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -741,7 +741,7 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
/* maybe switch power to the card */
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
gpio_set_value(host->board->vcc_pin, 0);
@@ -897,7 +897,7 @@ static int at91_mci_get_ro(struct mmc_host *mmc)
{
struct at91mci_host *host = mmc_priv(mmc);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
return !!gpio_get_value(host->board->wp_pin);
/*
* Board doesn't support read only detection; let the mmc core
@@ -991,21 +991,21 @@ static int __init at91_mci_probe(struct platform_device *pdev)
* Reserve GPIOs ... board init code makes sure these pins are set
* up as GPIOs with the right direction (input, except for vcc)
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = gpio_request(host->board->det_pin, "mmc_detect");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
goto fail4b;
}
}
- if (host->board->wp_pin) {
+ if (gpio_is_valid(host->board->wp_pin)) {
ret = gpio_request(host->board->wp_pin, "mmc_wp");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
goto fail4;
}
}
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
@@ -1057,7 +1057,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Add host to MMC layer
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
host->present = !gpio_get_value(host->board->det_pin);
}
else
@@ -1068,7 +1068,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* monitor card insertion/removal if we can
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = request_irq(gpio_to_irq(host->board->det_pin),
at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
if (ret)
@@ -1087,13 +1087,13 @@ fail0:
fail1:
clk_put(host->mci_clk);
fail2:
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
fail3:
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
fail4:
- if (host->board->det_pin)
+ if (gpio_is_valid(host->board->det_pin))
gpio_free(host->board->det_pin);
fail4b:
if (host->buffer)
@@ -1125,7 +1125,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
host->buffer, host->physical_address);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (device_can_wakeup(&pdev->dev))
free_irq(gpio_to_irq(host->board->det_pin), host);
device_init_wakeup(&pdev->dev, 0);
@@ -1140,9 +1140,9 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
clk_disable(host->mci_clk); /* Disable the peripheral clock */
clk_put(host->mci_clk);
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
iounmap(host->baseaddr);
@@ -1163,7 +1163,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
enable_irq_wake(host->board->det_pin);
if (mmc)
@@ -1178,7 +1178,7 @@ static int at91_mci_resume(struct platform_device *pdev)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
disable_irq_wake(host->board->det_pin);
if (mmc)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50b5f9926f6..fa8dd2fda4b 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
if (!cmd->data || cmd->error) {
- if (host->data)
+ if (host->data) {
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
mmci_stop_data(host);
+ }
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
data = host->data;
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+ MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
cmd = host->cmd;
@@ -1496,6 +1502,8 @@ static struct amba_id mmci_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, mmci_ids);
+
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 211a4959c29..eeb8cd125b0 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -679,8 +679,9 @@ static const struct mmc_host_ops mvsd_ops = {
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
-static void __init mv_conf_mbus_windows(struct mvsd_host *host,
- struct mbus_dram_target_info *dram)
+static void __init
+mv_conf_mbus_windows(struct mvsd_host *host,
+ const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = host->base;
int i;
@@ -691,7 +692,7 @@ static void __init mv_conf_mbus_windows(struct mvsd_host *host,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
@@ -705,6 +706,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mvsdio_platform_data *mvsd_data;
+ const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
@@ -755,8 +757,9 @@ static int __init mvsd_probe(struct platform_device *pdev)
}
/* (Re-)program MBUS remapping windows if we are asked to. */
- if (mvsd_data->dram != NULL)
- mv_conf_mbus_windows(host, mvsd_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(host, dram);
mvsd_power_down(host);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d..8e0fbe99404 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99b449d26a4..973011f9a29 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -713,7 +713,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = PTR_ERR(host->clk);
goto out_iounmap;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
mxs_mmc_reset(host);
@@ -772,7 +772,7 @@ out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
out_iounmap:
iounmap(host->base);
@@ -798,7 +798,7 @@ static int mxs_mmc_remove(struct platform_device *pdev)
if (host->dmach)
dma_release_channel(host->dmach);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
iounmap(host->base);
@@ -819,7 +819,7 @@ static int mxs_mmc_suspend(struct device *dev)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return ret;
}
@@ -830,7 +830,7 @@ static int mxs_mmc_resume(struct device *dev)
struct mxs_mmc_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 2dba999caf2..887c0e598cf 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/omap.c
*
* Copyright (C) 2004 Nokia Corporation
- * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
+ * Written by Tuukka Tikkanen and Juha YrjÜlä<juha.yrjola@nokia.com>
* Misc hacks here and there by Tony Lindgren <tony@atomide.com>
* Other hacks (DMA, SD, etc) by David Brownell
*
@@ -1634,4 +1634,4 @@ module_exit(mmc_omap_exit);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("Juha YrjÜlä");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c822..d1fb561e089 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
@@ -1988,6 +1991,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
+ mmc->pm_caps = mmc_slot(host).pm_caps;
+
omap_hsmmc_conf_bus_power(host);
/* Select DMA lines */
@@ -2176,13 +2181,7 @@ static int omap_hsmmc_suspend(struct device *dev)
cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
- if (ret == 0) {
- omap_hsmmc_disable_irq(host);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- if (host->got_dbclk)
- clk_disable(host->dbclk);
- } else {
+ if (ret) {
host->suspended = 0;
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev,
@@ -2191,9 +2190,20 @@ static int omap_hsmmc_suspend(struct device *dev)
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
+ goto err;
}
- pm_runtime_put_sync(host->dev);
+
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+ omap_hsmmc_disable_irq(host);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+ }
+ if (host->got_dbclk)
+ clk_disable(host->dbclk);
+
}
+err:
+ pm_runtime_put_sync(host->dev);
return ret;
}
@@ -2213,7 +2223,8 @@ static int omap_hsmmc_resume(struct device *dev)
if (host->got_dbclk)
clk_enable(host->dbclk);
- omap_hsmmc_conf_bus_power(host);
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+ omap_hsmmc_conf_bus_power(host);
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev, host->slot_id);
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621c..b4257e70061 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
.remove = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_cns3xxx_init(void)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dca442..a81312c91f7 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_dove_init(void)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f00ea8..38ebc4ea259 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_imx_init(void)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d003e58..01e5f627e0f 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_init(void)
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794a4f6..3619adc7d9f 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
.name = "sdhci-hlwd",
.owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_hlwd_init(void)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c2f7e..6878a94626b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
- int (*suspend) (struct sdhci_pci_chip *,
- pm_message_t);
+ int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
};
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
jmicron_enable_mmc(slot->host, 0);
}
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i;
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!slot)
continue;
- ret = sdhci_suspend_host(slot->host, state);
+ ret = sdhci_suspend_host(slot->host);
if (ret) {
for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
}
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- pm_message_t state = { .event = PM_EVENT_SUSPEND };
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
.runtime_suspend = sdhci_pci_runtime_suspend,
.runtime_resume = sdhci_pci_runtime_resume,
.runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea0558..03970bcb349 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, state);
+ return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f4084..37e0e184a0b 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d30c7b..7a039c3cb1f 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav2_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3e7a5..15673a7ee6a 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav3_init(void)
{
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc..9a20d1f55bb 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -435,14 +435,11 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
- char *name = pdata->clocks[ptr];
-
- if (name == NULL)
- continue;
+ char name[14];
+ snprintf(name, 14, "mmc_busclk.%d", ptr);
clk = clk_get(dev, name);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to get clock %s\n", name);
continue;
}
@@ -622,33 +619,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, pm);
+ return sdhci_suspend_host(host);
}
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
#else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .pm = SDHCI_S3C_PMOPS,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e861fc..e2e18d3f949 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_tegra_init(void)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea32354..19ed580f2ca 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2327,7 +2327,7 @@ out:
#ifdef CONFIG_PM
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b65460d8..a04d4d0c6fd 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e20..d5505f3fe2a 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda16..4208b395806 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
pdata->power) {
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f08f944ac53..c0105a2e269 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -562,17 +562,7 @@ static struct usb_driver ushc_driver = {
.disconnect = ushc_disconnect,
};
-static int __init ushc_init(void)
-{
- return usb_register(&ushc_driver);
-}
-module_init(ushc_init);
-
-static void __exit ushc_exit(void)
-{
- usb_deregister(&ushc_driver);
-}
-module_exit(ushc_exit);
+module_usb_driver(ushc_driver);
MODULE_DESCRIPTION("USB SD Host Controller driver");
MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65183d..2ec978bc32b 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
static int firmware_rom_wait_states = 0x1C;
#endif
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 318a869286a..1be62184140 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -140,6 +140,14 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
+ help
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 9aaac3ac89f..f90135429dc 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 89a02f6f65d..5a3942bf109 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
size_t sz;
int ret;
- ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
int ret, i;
memset(iis, 0, sizeof(*iis));
- ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
if (ret < 0)
goto failed;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index f40ea454755..94539312995 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -73,8 +73,8 @@ static int create_mtd_partitions(struct mtd_info *master,
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
- master->read(master, offset,
- sizeof(header), &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
@@ -95,16 +95,16 @@ static int create_mtd_partitions(struct mtd_info *master,
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
@@ -114,8 +114,7 @@ static int create_mtd_partitions(struct mtd_info *master,
break;
}
- master->read(master, root_offset,
- sizeof(header), &len, (u8 *)&header);
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644
index 00000000000..608321ee056
--- /dev/null
+++ b/drivers/mtd/bcm63xxpart.c
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright Š 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright Š 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright Š 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ int namelen = 0;
+ int i;
+ u32 computed_crc;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
+ nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = kerneladdr + kernellen;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+ sparelen = master->size - spareaddr - nvramlen;
+ rootfslen = spareaddr - rootfsaddr;
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ sparelen = master->size - cfelen - nvramlen;
+ }
+
+ /* Determine number of partitions */
+ namelen = 8;
+ if (rootfslen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+ if (kernellen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ parts[curpart].name = "kernel";
+ parts[curpart].offset = kerneladdr;
+ parts[curpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ parts[curpart].name = "rootfs";
+ parts[curpart].offset = rootfsaddr;
+ parts[curpart].size = rootfslen;
+ if (sparelen > 0)
+ parts[curpart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ curpart++;
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %lx and length %lx\n", i,
+ parts[i].name, (long unsigned int)(parts[i].offset),
+ (long unsigned int)(parts[i].size));
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ return register_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 179814a95f3..85e80180b65 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -139,8 +139,9 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
}
/* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
@@ -698,7 +699,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
- ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
@@ -707,7 +709,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
- ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
@@ -721,7 +724,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
- ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 283d887f782..37b05c3f279 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -191,6 +191,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOC2000
tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -213,6 +214,7 @@ config MTD_DOC2000
config MTD_DOC2001
tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -234,6 +236,7 @@ config MTD_DOC2001
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -251,6 +254,8 @@ config MTD_DOC2001PLUS
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
@@ -259,6 +264,13 @@ config MTD_DOCG3
M-Systems and now Sandisk. The support is very experimental,
and doesn't give access to any write operations.
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
config MTD_DOCPROBE
tristate
select MTD_DOCECC
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f23169d4..e7e46d1e746 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,7 +14,6 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -288,7 +287,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
+ dev->mtd.writev = mtd_writev;
dev->mtd.sync = block2mtd_sync;
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e9fad915121..b1cdf647901 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,23 +562,14 @@ void DoC2k_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
-
this->curfloor = -1;
this->curchip = -1;
mutex_init(&this->lock);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index a3f7a27499b..7543b98f46c 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -343,25 +343,17 @@ void DoCMil_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
-
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 99351bc3e0e..177510d0e7e 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,23 +467,14 @@ void DoCMilPlus_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index bdcf5df982e..ad11ef0a81f 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -29,6 +29,9 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -41,11 +44,7 @@
*
* As no specification is available from M-Systems/Sandisk, this drivers lacks
* several functions available on the chip, as :
- * - block erase
- * - page write
* - IPL write
- * - ECC fixing (lack of BCH algorith understanding)
- * - powerdown / powerup
*
* The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
* the driver assumes a 16bits data bus.
@@ -53,8 +52,7 @@
* DocG3 relies on 2 ECC algorithms, which are handled in hardware :
* - a 1 byte Hamming code stored in the OOB for each page
* - a 7 bytes BCH code stored in the OOB for each page
- * The BCH part is only used for check purpose, no correction is available as
- * some information is missing. What is known is that :
+ * The BCH ECC is :
* - BCH is in GF(2^14)
* - BCH is over data of 520 bytes (512 page + 7 page_info bytes
* + 1 hamming byte)
@@ -63,6 +61,30 @@
*
*/
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+/**
+ * struct docg3_bch - BCH engine
+ */
+static struct bch_control *docg3_bch;
+
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
u8 val = readb(docg3->base + reg);
@@ -82,7 +104,7 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
writeb(val, docg3->base + reg);
- trace_docg3_io(1, 16, reg, val);
+ trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
@@ -143,7 +165,7 @@ static void doc_delay(struct docg3 *docg3, int nbNOPs)
{
int i;
- doc_dbg("NOP x %d\n", nbNOPs);
+ doc_vdbg("NOP x %d\n", nbNOPs);
for (i = 0; i < nbNOPs; i++)
doc_writeb(docg3, 0, DOC_NOP);
}
@@ -196,8 +218,8 @@ static int doc_reset_seq(struct docg3 *docg3)
/**
* doc_read_data_area - Read data from data area
* @docg3: the device
- * @buf: the buffer to fill in
- * @len: the lenght to read
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
* @first: first time read, DOC_READADDRESS should be set
*
* Reads bytes from flash data. Handles the single byte / even bytes reads.
@@ -218,8 +240,10 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst16 = buf;
for (i = 0; i < len4; i += 2) {
data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
- *dst16 = data16;
- dst16++;
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
}
if (cdr) {
@@ -229,26 +253,84 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst8 = (u8 *)dst16;
for (i = 0; i < cdr; i++) {
data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
- *dst8 = data8;
- dst8++;
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
}
}
}
/**
- * doc_set_data_mode - Sets the flash to reliable data mode
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
* occur. Entering the reliable mode cannot be done without entering the fast
* mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
*/
static void doc_set_reliable_mode(struct docg3 *docg3)
{
- doc_dbg("doc_set_reliable_mode()\n");
- doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
- doc_flash_command(docg3, DOC_CMD_FAST_MODE);
- doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
doc_delay(docg3, 2);
}
@@ -325,6 +407,37 @@ static int doc_set_extra_page_mode(struct docg3 *docg3)
}
/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
* doc_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
@@ -360,34 +473,80 @@ static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
if (ret)
goto out;
- sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
- doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
- doc_delay(docg3, 1);
+ doc_setup_addr_sector(docg3, sector);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
out:
return ret;
}
+
/**
* doc_read_page_ecc_init - Initialize hardware ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
- * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
@@ -403,6 +562,106 @@ static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
}
/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
* doc_read_page_prepare - Prepares reading data from a flash page
* @docg3: the device
* @block0: the first plane block index on flash memory
@@ -488,16 +747,40 @@ static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
}
/**
- * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
* @docg3: the device
- * @syns: the array of 7 integers where the syndroms will be stored
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
*/
-static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
{
int i;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
- syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
}
/**
@@ -510,8 +793,7 @@ static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
*/
static void doc_read_page_finish(struct docg3 *docg3)
{
- doc_writeb(docg3, 0, DOC_DATAEND);
- doc_delay(docg3, 2);
+ doc_page_finish(docg3);
doc_set_device_id(docg3, 0);
}
@@ -523,18 +805,29 @@ static void doc_read_page_finish(struct docg3 *docg3)
* @block1: second plane block index calculated
* @page: page calculated
* @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
*/
static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
- int *ofs)
+ int *ofs, int reliable)
{
- uint sector;
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
sector = from / DOC_LAYOUT_PAGE_SIZE;
- *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
- * DOC_LAYOUT_NBPLANES;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
*block1 = *block0 + 1;
- *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+ *page = sector % pages_biblock;
*page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
if (sector % 2)
*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
else
@@ -542,99 +835,124 @@ static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
}
/**
- * doc_read - Read bytes from flash
+ * doc_read_oob - Read out of band bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
+ * @ops: the mtd oob structure
*
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
+ * Reads flash memory OOB area of pages.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, readlen, ret, ofs = 0;
- int syn[DOC_ECC_BCH_SIZE], eccconf1;
- u8 oob[DOC_LAYOUT_OOB_SIZE];
+ int block0, block1, page, ret, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
+ (from % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
ret = -EINVAL;
- doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
- if (from % DOC_LAYOUT_PAGE_SIZE)
- goto err;
- if (len % 4)
- goto err;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from + len, &block0, &block1, &page, &ofs,
+ docg3->reliable);
if (block1 > docg3->max_block)
goto err;
- *retlen = 0;
+ ops->oobretlen = 0;
+ ops->retlen = 0;
ret = 0;
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
- while (!ret && len > 0) {
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ while (!ret && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
goto err;
- ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
- if (ret < readlen)
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ if (ret < nbdata)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- oob, 0);
- if (ret < DOC_LAYOUT_OOB_SIZE)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ NULL, 0);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ if (ret < nboob)
goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0);
- *retlen += readlen;
- buf += readlen;
- len -= readlen;
-
- ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
- if (ofs == 0)
- page += 2;
- if (page > DOC_ADDR_PAGE_MASK) {
- page = 0;
- block0 += 2;
- block1 += 2;
- }
-
- /*
- * There should be a BCH bitstream fixing algorithm here ...
- * By now, a page read failure is triggered by BCH error
- */
- doc_get_hw_bch_syndroms(docg3, syn);
+ doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
- doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[0], oob[1], oob[2], oob[3], oob[4],
- oob[5], oob[6]);
- doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
- doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[8], oob[9], oob[10], oob[11], oob[12],
- oob[13], oob[14]);
- doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
+ oobbuf[4], oobbuf[5], oobbuf[6]);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
+ oobbuf[12], oobbuf[13], oobbuf[14]);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
- doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
-
- ret = -EBADMSG;
- if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
- if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
- goto err_in_read;
- if (is_prot_seq_error(docg3))
- goto err_in_read;
+ doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
+ hwecc[5], hwecc[6]);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ ret = -EUCLEAN;
+ }
}
+
doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
}
- return 0;
+ return ret;
err_in_read:
doc_read_page_finish(docg3);
err:
@@ -642,54 +960,33 @@ err:
}
/**
- * doc_read_oob - Read out of band bytes from flash
+ * doc_read - Read bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @ops: the mtd oob structure
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
*
- * Reads flash memory OOB area of pages.
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ofs, ret;
- u8 *buf = ops->oobbuf;
- size_t len = ops->ooblen;
-
- doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
- if (len != DOC_LAYOUT_OOB_SIZE)
- return -EINVAL;
-
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- buf += ops->ooboffs;
- break;
- default:
- break;
- }
+ struct mtd_oob_ops ops;
+ size_t ret;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
- if (block1 > docg3->max_block)
- return -EINVAL;
-
- ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_PAGE_SIZE);
- if (!ret)
- ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
- if (!ret)
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- buf, 1);
- doc_read_page_finish(docg3);
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
- if (ret > 0)
- ops->oobretlen = ret;
- else
- ops->oobretlen = 0;
- return (ret > 0) ? 0 : ret;
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
}
static int doc_reload_bbt(struct docg3 *docg3)
@@ -726,7 +1023,8 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ofs, is_good;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
from, block0, block1, page, ofs);
@@ -739,6 +1037,7 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
return !is_good;
}
+#if 0
/**
* doc_get_erase_count - Get block erase count
* @docg3: the device
@@ -758,7 +1057,7 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
if (from % DOC_LAYOUT_PAGE_SIZE)
return -EINVAL;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
if (block1 > docg3->max_block)
return -EINVAL;
@@ -780,6 +1079,558 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
return max(plane1_erase_count, plane2_erase_count);
}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int status, ret = 0;
+
+ if (!doc_is_ready(docg3))
+ usleep_range(3000, 3000);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+ doc_set_device_id(docg3, docg3->device_id);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (block1 > docg3->max_block || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ oobdelta = 0;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
+ docg3->reliable);
+ if (block1 > docg3->max_block)
+ goto err;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+err:
+ doc_set_device_id(docg3, 0);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
/*
* Debug sysfs entries
@@ -852,13 +1703,15 @@ static int dbg_protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int protect = doc_register_readb(docg3, DOC_PROTECTION);
- int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
- int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
- int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
- int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
- int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
- int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
@@ -947,52 +1800,54 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = "DiskOnChip G3";
+ mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ docg3->device_id);
docg3->max_block = 2047;
break;
}
mtd->type = MTD_NANDFLASH;
- /*
- * Once write methods are added, the correct flags will be set.
- * mtd->flags = MTD_CAP_NANDFLASH;
- */
- mtd->flags = MTD_CAP_ROM;
+ mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = NULL;
- mtd->point = NULL;
- mtd->unpoint = NULL;
+ mtd->erase = doc_erase;
mtd->read = doc_read;
- mtd->write = NULL;
+ mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
+ mtd->write_oob = doc_write_oob;
mtd->block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
- * @pdev: platform device
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
*
- * Probes for a G3 chip at the specified IO space in the platform data
- * ressources.
+ * Checks whether a device at the specified IO range, and floor is available.
*
- * Returns 0 on success, -ENOMEM, -ENXIO on error
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
*/
-static int __init docg3_probe(struct platform_device *pdev)
+static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
+ struct device *dev)
{
- struct device *dev = &pdev->dev;
- struct docg3 *docg3;
- struct mtd_info *mtd;
- struct resource *ress;
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
ret = -ENOMEM;
docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
@@ -1002,69 +1857,218 @@ static int __init docg3_probe(struct platform_device *pdev)
if (!mtd)
goto nomem2;
mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
- ret = -ENXIO;
- ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ress) {
- dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
- }
- docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
-
- docg3->dev = &pdev->dev;
- docg3->device_id = 0;
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->base = base;
doc_set_device_id(docg3, docg3->device_id);
- doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
chip_id = doc_register_readw(docg3, DOC_CHIPID);
chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
- ret = -ENODEV;
+ ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
- doc_info("No device found at IO addr %p\n",
- (void *)ress->start);
- goto nochipfound;
+ goto nomem3;
}
switch (chip_id) {
case DOC_CHIPID_G3:
- doc_info("Found a G3 DiskOnChip at addr %p\n",
- (void *)ress->start);
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
- goto nochipfound;
+ goto nomem3;
}
doc_set_driver_info(chip_id, mtd);
- platform_set_drvdata(pdev, mtd);
- ret = -ENOMEM;
- bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
- 8 * DOC_LAYOUT_PAGE_SIZE);
- docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
- if (!docg3->bbt)
- goto nochipfound;
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
+ return mtd;
- ret = mtd_device_parse_register(mtd, part_probes,
- NULL, NULL, 0);
- if (ret)
- goto register_error;
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
- doc_dbg_register(docg3);
- return 0;
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
-register_error:
+ mtd_device_unregister(mtd);
kfree(docg3->bbt);
-nochipfound:
- iounmap(docg3->base);
-noress:
+ kfree(docg3);
+ kfree(mtd->name);
kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successfull)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor, found = 0;
+ struct mtd_info **docg3_floors;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ goto noress;
+ }
+ base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!docg3_floors)
+ goto nomem1;
+ docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!docg3_bch)
+ goto nomem2;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(base, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ docg3_floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ found++;
+ }
+
+ ret = doc_register_sysfs(pdev, docg3_floors);
+ if (ret)
+ goto err_probe;
+ if (!found)
+ goto notfound;
+
+ platform_set_drvdata(pdev, docg3_floors);
+ doc_dbg_register(docg3_floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(docg3_bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
nomem2:
- kfree(docg3);
+ kfree(docg3_floors);
nomem1:
+ iounmap(base);
+noress:
return ret;
}
@@ -1076,15 +2080,20 @@ nomem1:
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct docg3 *docg3 = mtd->priv;
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = docg3_floors[0]->priv;
+ void __iomem *base = docg3->base;
+ int floor;
+ doc_unregister_sysfs(pdev, docg3_floors);
doc_dbg_unregister(docg3);
- mtd_device_unregister(mtd);
- iounmap(docg3->base);
- kfree(docg3->bbt);
- kfree(docg3);
- kfree(mtd);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
+
+ kfree(docg3_floors);
+ free_bch(docg3_bch);
+ iounmap(base);
return 0;
}
@@ -1093,6 +2102,8 @@ static struct platform_driver g3_driver = {
.name = "docg3",
.owner = THIS_MODULE,
},
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
.remove = __exit_p(docg3_release),
};
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index 0d407be2459..db0da436b49 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -51,10 +51,19 @@
#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
#define DOC_LAYOUT_BLOCK_SIZE \
(DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
#define DOC_ECC_BCH_SIZE 7
#define DOC_ECC_BCH_COVERED_BYTES \
(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
- DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
/*
* Blocks distribution
@@ -80,6 +89,7 @@
#define DOC_CHIPID_G3 0x200
#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
/*
* Flash registers
*/
@@ -105,9 +115,11 @@
#define DOC_ECCCONF1 0x1042
#define DOC_ECCPRESET 0x1044
#define DOC_HAMMINGPARITY 0x1046
-#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
#define DOC_DPS0_ADDRLOW 0x1060
#define DOC_DPS0_ADDRHIGH 0x1062
#define DOC_DPS1_ADDRLOW 0x1064
@@ -117,6 +129,7 @@
#define DOC_ASICMODECONFIRM 0x1072
#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
/*
* Flash sequences
@@ -124,11 +137,14 @@
*/
#define DOC_SEQ_RESET 0x00
#define DOC_SEQ_PAGE_SIZE_532 0x03
-#define DOC_SEQ_SET_MODE 0x09
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
#define DOC_SEQ_READ 0x12
#define DOC_SEQ_SET_PLANE1 0x0e
#define DOC_SEQ_SET_PLANE2 0x10
#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
/*
* Flash commands
@@ -143,7 +159,10 @@
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOC_CMD_PROG_CYCLE1 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_FAST_MODE 0xa2
@@ -174,6 +193,7 @@
/*
* Flash register : DOC_ECCCONF0
*/
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
#define DOC_ECCCONF0_READ_MODE 0x8000
#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
@@ -185,7 +205,7 @@
*/
#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
#define DOC_ECCCONF1_UNKOWN1 0x40
-#define DOC_ECCCONF1_UNKOWN2 0x20
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
#define DOC_ECCCONF1_UNKOWN3 0x10
#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
@@ -223,13 +243,46 @@
#define DOC_READADDR_ONE_BYTE 0x4000
#define DOC_READADDR_ADDR_MASK 0x1fff
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
/**
* struct docg3 - DiskOnChip driver private data
* @dev: the device currently under control
* @base: mapped IO space
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
* @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
* @debugfs_root: debugfs root node
*/
struct docg3 {
@@ -237,8 +290,12 @@ struct docg3 {
void __iomem *base;
unsigned int device_id:4;
unsigned int if_cfg:1;
+ unsigned int reliable:2;
int max_block;
u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
struct dentry *debugfs_root;
};
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 45116bb3029..706b847b46b 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -241,8 +241,7 @@ static void __init DoC_Probe(unsigned long physadr)
return;
}
docfound = 1;
- mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
+ mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
iounmap(docptr);
@@ -250,10 +249,6 @@ static void __init DoC_Probe(unsigned long physadr)
}
this = (struct DiskOnChip *)(&mtd[1]);
-
- memset((char *)mtd,0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct DiskOnChip));
-
mtd->priv = this;
this->virtadr = docptr;
this->physadr = physadr;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 884904d3f9d..7c60dddbefc 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -992,7 +992,6 @@ static int __devexit m25p_remove(struct spi_device *spi)
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = m25p_ids,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d75c7af18a6..236057ead0d 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -936,7 +936,6 @@ static int __devexit dataflash_remove(struct spi_device *spi)
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = dataflash_dt_ids,
},
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index d38ef3bffe8..5fc198350b9 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -378,7 +378,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
- int ret, i;
+ int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -444,7 +444,6 @@ static int __devexit sst25l_remove(struct spi_device *spi)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index c7382bb686c..19d637266fc 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
- (unsigned char *)&header);
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
if (err)
return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
- (unsigned char *)&header);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
if (ret)
goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t), &retval,
- (unsigned char *)part->bam_cache);
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
- ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ ret = mtd_erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
@@ -422,8 +422,8 @@ static int prepare_xfer(partition_t *part, int i)
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
- &retlen, (u_char *)&header);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
if (ret) {
return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&ctl);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
if (ret)
return ret;
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
- &retlen, (u_char *) &unit);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
@@ -550,9 +550,11 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
}
/* Write the BAM to the transfer unit */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
- (u_char *)part->bam_cache);
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
@@ -560,8 +562,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
- &retlen, (u_char *)&srcunitswap);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -648,8 +650,7 @@ static int reclaim_block(partition_t *part)
if (queued) {
pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
@@ -747,10 +748,11 @@ static uint32_t find_free(partition_t *part)
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mbd.mtd->read(part->mbd.mtd,
- part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -810,8 +812,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
- &retlen, (u_char *) buffer);
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -849,8 +851,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&old_addr);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
@@ -886,8 +888,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&le_virt_addr);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -946,8 +948,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
- buffer);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index dd034efd187..28646c95cfb 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -158,7 +158,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -178,7 +178,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -199,7 +199,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
@@ -343,14 +343,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd,
- (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret != -EIO)
pr_debug("INFTL: error went away on retry?\n");
}
@@ -914,7 +917,7 @@ foundit:
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
if (ret < 0 && !mtd_is_bitflip(ret))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 2ff601f816c..4adc0374fb6 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -73,8 +73,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = mtd->read(mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -118,8 +118,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
- mtd->read(mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
@@ -220,7 +220,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
@@ -306,7 +306,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
@@ -342,7 +343,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -393,7 +394,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
- mtd->erase(inftl->mbd.mtd, instr);
+ mtd_erase(inftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -423,7 +424,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1dca31d9a8b..536bbceaeaa 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -70,19 +70,12 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->erase = lpddr_erase;
mtd->write = lpddr_write_buffers;
mtd->writev = lpddr_writev;
- mtd->read_oob = NULL;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
mtd->lock = lpddr_lock;
mtd->unlock = lpddr_unlock;
- mtd->suspend = NULL;
- mtd->resume = NULL;
if (map_is_linear(map)) {
mtd->point = lpddr_point;
mtd->unpoint = lpddr_unpoint;
}
- mtd->block_isbad = NULL;
- mtd->block_markbad = NULL;
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8e0c4bf9f7f..6c5c431c64a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -242,15 +242,6 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_BCM963XX
- tristate "Map driver for Broadcom BCM963xx boards"
- depends on BCM63XX
- select MTD_MAP_BANK_WIDTH_2
- select MTD_CFI_I1
- help
- Support for parsing CFE image tag and creating MTD partitions on
- Broadcom BCM63xx boards.
-
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 45dcb8b14f2..68a9a91d344 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -55,6 +55,5 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
deleted file mode 100644
index 736ca10ca9f..00000000000
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright Š 2006-2008 Florian Fainelli <florian@openwrt.org>
- * Mike Albon <malbon@openwrt.org>
- * Copyright Š 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH 2 /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
- .name = "bcm963xx",
- .bankwidth = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
- struct mtd_partition **pparts)
-{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
- struct mtd_partition *parts;
- int ret;
- size_t retlen;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
- int namelen = 0;
- int i;
- char *boardid;
- char *tagversion;
-
- /* Allocate memory for buffer */
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
- &retlen, (void *)buf);
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
-
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
- tagversion = &(buf->tag_version[0]);
- boardid = &(buf->board_id[0]);
-
- printk(KERN_INFO PFX "CFE boot tag found with version %s "
- "and board type %s\n", tagversion, boardid);
-
- kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
- spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
- sparelen = master->size - spareaddr - master->erasesize;
- rootfslen = spareaddr - rootfsaddr;
-
- /* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
- nrparts++;
- namelen += 6;
- };
- if (kernellen > 0) {
- nrparts++;
- namelen += 6;
- };
-
- /* Ask kernel for more memory */
- parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- vfree(buf);
- return -ENOMEM;
- };
-
- /* Start building partition list */
- parts[curpart].name = "CFE";
- parts[curpart].offset = 0;
- parts[curpart].size = master->erasesize;
- curpart++;
-
- if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
- curpart++;
- };
-
- if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
- curpart++;
- };
-
- parts[curpart].name = "nvram";
- parts[curpart].offset = master->size - master->erasesize;
- parts[curpart].size = master->erasesize;
-
- /* Global partition "linux" to make easy firmware upgrade */
- curpart++;
- parts[curpart].name = "linux";
- parts[curpart].offset = parts[0].size;
- parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
- for (i = 0; i < nrparts; i++)
- printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
- "length %lx\n", i, parts[i].name,
- (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
-
- printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
- spareaddr, sparelen);
- *pparts = parts;
- vfree(buf);
-
- return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
- int idoffset = 0x4e0;
- static char idstring[8] = "CFE1CFE1";
- char buf[9];
- int ret;
- size_t retlen;
-
- ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
- buf[retlen] = 0;
- printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
- return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
- int err = 0;
- int parsed_nr_parts = 0;
- char *part_type;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no resource supplied\n");
- return -ENODEV;
- }
-
- bcm963xx_map.phys = r->start;
- bcm963xx_map.size = resource_size(r);
- bcm963xx_map.virt = ioremap(r->start, resource_size(r));
- if (!bcm963xx_map.virt) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -EIO;
- }
-
- dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
- bcm963xx_map.size, bcm963xx_map.phys);
-
- simple_map_init(&bcm963xx_map);
-
- bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
- if (!bcm963xx_mtd_info) {
- dev_err(&pdev->dev, "failed to probe using CFI\n");
- bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
- if (bcm963xx_mtd_info)
- goto probe_ok;
- dev_err(&pdev->dev, "failed to probe using JEDEC\n");
- err = -EIO;
- goto err_probe;
- }
-
-probe_ok:
- bcm963xx_mtd_info->owner = THIS_MODULE;
-
- /* This is mutually exclusive */
- if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
- dev_info(&pdev->dev, "CFE bootloader detected\n");
- if (parsed_nr_parts == 0) {
- int ret = parse_cfe_partitions(bcm963xx_mtd_info,
- &parsed_parts);
- if (ret > 0) {
- part_type = "CFE";
- parsed_nr_parts = ret;
- }
- }
- } else {
- dev_info(&pdev->dev, "unsupported bootloader\n");
- err = -ENODEV;
- goto err_probe;
- }
-
- return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
-
-err_probe:
- iounmap(bcm963xx_map.virt);
- return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
- if (bcm963xx_mtd_info) {
- mtd_device_unregister(bcm963xx_mtd_info);
- map_destroy(bcm963xx_mtd_info);
- }
-
- if (bcm963xx_map.virt) {
- iounmap(bcm963xx_map.virt);
- bcm963xx_map.virt = 0;
- }
-
- return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
- .probe = bcm963xx_probe,
- .remove = bcm963xx_remove,
- .driver = {
- .name = "bcm963xx-flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
- return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
- platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6d6b2b5674e..650126c361f 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -190,17 +190,7 @@ static struct platform_driver bfin_flash_driver = {
},
};
-static int __init bfin_flash_init(void)
-{
- return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
- platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 1ec66f031c5..33cce895859 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -279,17 +279,7 @@ static struct platform_driver gpio_flash_driver = {
},
};
-static int __init gpio_flash_init(void)
-{
- return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
- platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 437fcd2f352..fc7d4d0d9a4 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -246,18 +246,8 @@ static struct platform_driver ixp2000_flash_driver = {
},
};
-static int __init ixp2000_flash_init(void)
-{
- return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
- platform_driver_unregister(&ixp2000_flash_driver);
-}
+module_platform_driver(ixp2000_flash_driver);
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 30409015a3d..8b5410162d7 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -270,19 +270,7 @@ static struct platform_driver ixp4xx_flash_driver = {
},
};
-static int __init ixp4xx_flash_init(void)
-{
- return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
- platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 4f10e27ada5..7b889de9477 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -159,7 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
err = -ENXIO;
- goto err_unmap;
+ goto err_free;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -179,8 +179,6 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_unmap:
- iounmap(ltq_mtd->map->virt);
err_free:
kfree(ltq_mtd->map);
err_out:
@@ -198,8 +196,6 @@ ltq_mtd_remove(struct platform_device *pdev)
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- if (ltq_mtd->map->virt)
- iounmap(ltq_mtd->map->virt);
kfree(ltq_mtd->map);
kfree(ltq_mtd);
}
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 119baa7d747..8fed58e3a4a 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -223,17 +223,7 @@ static struct platform_driver latch_addr_flash_driver = {
},
};
-static int __init latch_addr_flash_init(void)
-{
- return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
- platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 66e8200079c..abc562653b3 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -85,6 +85,7 @@ static int physmap_flash_probe(struct platform_device *dev)
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
+ const char **part_types;
int err = 0;
int i;
int devices_found = 0;
@@ -171,7 +172,9 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+ mtd_device_parse_register(info->cmtd, part_types, 0,
physmap_data->parts, physmap_data->nr_parts);
return 0;
@@ -187,9 +190,8 @@ static void physmap_flash_shutdown(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend && info->mtd[i]->resume)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
}
#else
#define physmap_flash_shutdown NULL
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7d65f9d3e69..2e6fb6831d5 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -338,18 +338,7 @@ static struct platform_driver of_flash_driver = {
.remove = of_flash_remove,
};
-static int __init of_flash_init(void)
-{
- return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
- platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f55348972..45876d0e5b8 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17df9fc..436d121185b 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
@@ -125,8 +125,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
@@ -142,18 +142,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.shutdown = pxa2xx_flash_shutdown,
};
-static int __init init_pxa2xx_flash(void)
-{
- return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
- platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 0237f197fd1..3da63fc6f16 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -119,9 +119,8 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
- if (info->mtd->suspend && info->mtd->resume)
- if (info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define rbtx4939_flash_shutdown NULL
@@ -137,18 +136,7 @@ static struct platform_driver rbtx4939_flash_driver = {
},
};
-static int __init rbtx4939_flash_init(void)
-{
- return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
- platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RBTX4939 MTD map driver");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fa9c0a9670c..50282199770 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -377,8 +377,8 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define sa1100_mtd_shutdown NULL
@@ -394,18 +394,7 @@ static struct platform_driver sa1100_mtd_driver = {
},
};
-static int __init sa1100_mtd_init(void)
-{
- return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
- platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index d88c8426bb0..934a72c8007 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -204,8 +204,7 @@ scb2_flash_remove(struct pci_dev *dev)
return;
/* disable flash writes */
- if (scb2_mtd->lock)
- scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 2d66234f57c..175e537b444 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -158,15 +158,4 @@ static struct platform_driver uflash_driver = {
.remove = __devexit_p(uflash_remove),
};
-static int __init uflash_init(void)
-{
- return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
- platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ed8b5e744b1..424ca5f93c6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&dev->lock);
- if (dev->open++)
+ if (dev->open)
goto unlock;
kref_get(&dev->ref);
@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
goto error_release;
unlock:
+ dev->open++;
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 7c1dc908a17..af6591237b9 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -85,7 +85,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -102,7 +102,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
* Next, write the data to flash.
*/
- ret = mtd->write(mtd, pos, len, &retlen, buf);
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
@@ -152,7 +152,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->write(mtd, pos, len, &retlen, buf);
+ return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +184,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
- ret = mtd->read(mtd, sect_start, sect_size,
- &retlen, mtdblk->cache_data);
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
if (ret)
return ret;
if (retlen != sect_size)
@@ -222,7 +222,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->read(mtd, pos, len, &retlen, buf);
+ return mtd_read(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +241,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
- ret = mtd->read(mtd, pos, size, &retlen, buf);
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
return ret;
if (retlen != size)
@@ -322,8 +322,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
if (!--mtdblk->count) {
/* It was the last usage. Free the cache */
- if (mbd->mtd->sync)
- mbd->mtd->sync(mbd->mtd);
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
@@ -341,9 +340,7 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
-
- if (dev->mtd->sync)
- dev->mtd->sync(dev->mtd);
+ mtd_sync(dev->mtd);
return 0;
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 0470a6e8630..92759a9d298 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -30,7 +30,7 @@ static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
@@ -40,7 +40,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index e7dc732ddab..50c6a1e7f67 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -51,7 +51,7 @@ struct mtd_file_info {
enum mtd_file_modes mode;
};
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -77,7 +77,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
@@ -142,11 +142,11 @@ static int mtd_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&mtd_mutex);
return ret;
-} /* mtd_open */
+} /* mtdchar_open */
/*====================================================================*/
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -154,8 +154,8 @@ static int mtd_close(struct inode *inode, struct file *file)
pr_debug("MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & FMODE_WRITE) && mtd->sync)
- mtd->sync(mtd);
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
iput(mfi->ino);
@@ -164,7 +164,7 @@ static int mtd_close(struct inode *inode, struct file *file)
kfree(mfi);
return 0;
-} /* mtd_close */
+} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
@@ -184,11 +184,12 @@ static int mtd_close(struct inode *inode, struct file *file)
* alignment requirements are not met in the NAND subdriver.
*/
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- size_t retlen=0;
+ size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
@@ -212,10 +213,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_OTP_USER:
- ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
@@ -226,12 +229,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
ops.oobbuf = NULL;
ops.len = len;
- ret = mtd->read_oob(mtd, *ppos, &ops);
+ ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
/* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
@@ -265,9 +268,10 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
kfree(kbuf);
return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -306,11 +310,8 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ret = -EROFS;
break;
case MTD_FILE_MODE_OTP_USER:
- if (!mtd->write_user_prot_reg) {
- ret = -EOPNOTSUPP;
- break;
- }
- ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
@@ -323,13 +324,13 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ops.ooboffs = 0;
ops.len = len;
- ret = mtd->write_oob(mtd, *ppos, &ops);
+ ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
if (!ret) {
*ppos += retlen;
@@ -345,7 +346,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
kfree(kbuf);
return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
/*======================================================================
@@ -361,20 +362,22 @@ static void mtdchar_erase_callback (struct erase_info *instr)
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
int ret = 0;
+ /*
+ * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
+ * operations are supported.
+ */
+ if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
switch (mode) {
case MTD_OTP_FACTORY:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_USER;
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
default:
ret = -EINVAL;
@@ -387,7 +390,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -424,7 +427,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return PTR_ERR(ops.oobbuf);
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->write_oob(mtd, start, &ops);
+ ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
@@ -436,7 +439,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
}
-static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -447,13 +450,8 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_WRITE, ptr,
- length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
@@ -469,7 +467,7 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->read_oob(mtd, start, &ops);
+ ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
@@ -530,7 +528,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_ioctl_arg a;
@@ -566,7 +564,7 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
}
}
-static int mtd_write_ioctl(struct mtd_info *mtd,
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
@@ -607,7 +605,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
ops.oobbuf = NULL;
}
- ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
kfree(ops.datbuf);
kfree(ops.oobbuf);
@@ -615,7 +613,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
return ret;
}
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -729,7 +727,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- ret = mtd->erase(mtd, erase);
+ ret = mtd_erase(mtd, erase);
if (!ret) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -755,7 +753,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
@@ -769,7 +767,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
@@ -782,7 +780,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -796,7 +794,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -804,7 +802,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case MEMWRITE:
{
- ret = mtd_write_ioctl(mtd,
+ ret = mtdchar_write_ioctl(mtd,
(struct mtd_write_req __user *)arg);
break;
}
@@ -816,10 +814,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->lock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->lock(mtd, einfo.start, einfo.length);
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
@@ -830,10 +825,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->unlock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->unlock(mtd, einfo.start, einfo.length);
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
@@ -844,10 +836,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->is_locked)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
@@ -878,10 +867,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
+ return mtd_block_isbad(mtd, offs);
break;
}
@@ -891,10 +877,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
+ return mtd_block_markbad(mtd, offs);
break;
}
@@ -919,17 +902,15 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = -EOPNOTSUPP;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- if (mtd->get_fact_prot_info)
- ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ ret = mtd_get_fact_prot_info(mtd, buf, 4096);
break;
case MTD_FILE_MODE_OTP_USER:
- if (mtd->get_user_prot_info)
- ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ ret = mtd_get_user_prot_info(mtd, buf, 4096);
break;
default:
+ ret = -EINVAL;
break;
}
if (ret >= 0) {
@@ -953,9 +934,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
#endif
@@ -999,7 +978,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
case MTD_FILE_MODE_RAW:
- if (!mtd->read_oob || !mtd->write_oob)
+ if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
@@ -1014,7 +993,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case BLKPG:
{
- ret = mtd_blkpg_ioctl(mtd,
+ ret = mtdchar_blkpg_ioctl(mtd,
(struct blkpg_ioctl_arg __user *)arg);
break;
}
@@ -1033,12 +1012,12 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return ret;
} /* memory_ioctl */
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
int ret;
mutex_lock(&mtd_mutex);
- ret = mtd_ioctl(file, cmd, arg);
+ ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&mtd_mutex);
return ret;
@@ -1055,7 +1034,7 @@ struct mtd_oob_buf32 {
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -1074,7 +1053,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start,
+ ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
@@ -1089,13 +1068,13 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start,
+ ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
default:
- ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&mtd_mutex);
@@ -1111,7 +1090,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
* mappings)
*/
#ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -1119,32 +1098,28 @@ static unsigned long mtd_get_unmapped_area(struct file *file,
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
- if (mtd->get_unmapped_area) {
- unsigned long offset;
-
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset = pgoff << PAGE_SHIFT;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
- /* can't map directly */
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENOSYS : ret;
}
#endif
/*
* set up a mapping for shared memory segments
*/
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
@@ -1185,18 +1160,18 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
- .llseek = mtd_lseek,
- .read = mtd_read,
- .write = mtd_write,
- .unlocked_ioctl = mtd_unlocked_ioctl,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = mtd_compat_ioctl,
+ .compat_ioctl = mtdchar_compat_ioctl,
#endif
- .open = mtd_open,
- .release = mtd_close,
- .mmap = mtd_mmap,
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = mtd_get_unmapped_area,
+ .get_unmapped_area = mtdchar_get_unmapped_area,
#endif
};
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 6df4d4d4eb9..1ed5103b219 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -91,7 +91,7 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Entire transaction goes into this subdev */
size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
@@ -148,7 +148,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->write(subdev, to, size, &retsize, buf);
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
@@ -227,8 +227,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to, &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to,
+ &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
@@ -273,7 +274,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
- err = subdev->read_oob(subdev, from, &devops);
+ err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
@@ -333,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
- err = subdev->write_oob(subdev, to, &devops);
+ err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.oobretlen;
if (err)
return err;
@@ -379,7 +380,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
- err = mtd->erase(mtd, erase);
+ err = mtd_erase(mtd, erase);
if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -554,12 +555,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->lock) {
- err = subdev->lock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -594,12 +592,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->unlock) {
- err = subdev->unlock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -619,7 +614,7 @@ static void concat_sync(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->sync(subdev);
+ mtd_sync(subdev);
}
}
@@ -630,7 +625,7 @@ static int concat_suspend(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if ((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
@@ -643,7 +638,7 @@ static void concat_resume(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->resume(subdev);
+ mtd_resume(subdev);
}
}
@@ -652,7 +647,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
- if (!concat->subdev[0]->block_isbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return res;
if (ofs > mtd->size)
@@ -666,7 +661,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- res = subdev->block_isbad(subdev, ofs);
+ res = mtd_block_isbad(subdev, ofs);
break;
}
@@ -678,7 +673,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!concat->subdev[0]->block_markbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return 0;
if (ofs > mtd->size)
@@ -692,7 +687,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- err = subdev->block_markbad(subdev, ofs);
+ err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
@@ -725,11 +720,7 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
if (offset + len > subdev->size)
return (unsigned long) -EINVAL;
- if (subdev->get_unmapped_area)
- return subdev->get_unmapped_area(subdev, len, offset,
- flags);
-
- break;
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b01993ea260..6ae9ca01388 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,8 @@ static LIST_HEAD(mtd_notifiers);
*/
static void mtd_release(struct device *dev)
{
- dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
if (index)
@@ -116,27 +117,24 @@ static void mtd_release(struct device *dev)
static int mtd_cls_suspend(struct device *dev, pm_message_t state)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->suspend)
- return mtd->suspend(mtd);
- else
- return 0;
+ return mtd_suspend(mtd);
}
static int mtd_cls_resume(struct device *dev)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
-
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
if (mtd && mtd->resume)
- mtd->resume(mtd);
+ mtd_resume(mtd);
return 0;
}
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
@@ -172,7 +170,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
@@ -182,7 +180,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
@@ -193,7 +191,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
@@ -203,7 +201,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
@@ -213,7 +211,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -224,7 +222,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
@@ -234,7 +232,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
@@ -245,7 +243,7 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
@@ -338,9 +336,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
@@ -516,7 +514,6 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
-
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
@@ -532,6 +529,7 @@ void register_mtd_user (struct mtd_notifier *new)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -542,7 +540,6 @@ void register_mtd_user (struct mtd_notifier *new)
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
-
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
@@ -558,7 +555,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
mutex_unlock(&mtd_table_mutex);
return 0;
}
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
@@ -571,7 +568,6 @@ int unregister_mtd_user (struct mtd_notifier *old)
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
-
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
@@ -604,6 +600,7 @@ out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
@@ -624,6 +621,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd->usecount++;
return 0;
}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -633,7 +631,6 @@ int __get_mtd_device(struct mtd_info *mtd)
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
-
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
@@ -662,6 +659,7 @@ out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
@@ -670,6 +668,7 @@ void put_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
@@ -681,39 +680,65 @@ void __put_mtd_device(struct mtd_info *mtd)
module_put(mtd->owner);
}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
-/* default_mtd_writev - default mtd writev method for MTD devices that
- * don't implement their own
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
- if(!mtd->write) {
- ret = -EROFS;
- } else {
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- if (retlen)
- *retlen = totlen;
+ *retlen = totlen;
return ret;
}
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!mtd->writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
@@ -758,15 +783,6 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
*/
return kmalloc(*size, GFP_KERNEL);
}
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1e2fa623670..db8e8272d69 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -169,8 +169,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
+ while (mtd_can_have_bb(mtd)) {
+ ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
@@ -199,8 +199,8 @@ badblock:
return;
}
- if (mtd->block_markbad && ret == -EIO) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
+ if (mtd_can_have_bb(mtd) && ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
@@ -221,12 +221,16 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
- if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
- else
- ret = mtd->write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -253,10 +257,13 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_can_have_bb(mtd) &&
+ mtd_block_isbad(mtd, page * record_size))
+ continue;
/* Assume the page is used */
mark_page_used(cxt, page);
- ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
- &retlen, (u_char *) &count[0]);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
(ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
@@ -327,13 +334,8 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS) {
- if (!cxt->mtd->panic_write)
- printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
- else
- mtdoops_write(cxt, 1);
- return;
- }
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
@@ -369,7 +371,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG));
+ BITS_PER_LONG) * sizeof(unsigned long));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a0bd2de4752..a3d44c3416b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -70,8 +70,7 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read(part->master, from + part->offset,
- len, retlen, buf);
+ res = mtd_read(part->master, from + part->offset, len, retlen, buf);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -89,15 +88,15 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
+ return mtd_point(part->master, from + part->offset, len, retlen,
+ virt, phys);
}
static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- part->master->unpoint(part->master, from + part->offset, len);
+ mtd_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -108,8 +107,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return part->master->get_unmapped_area(part->master, len, offset,
- flags);
+ return mtd_get_unmapped_area(part->master, len, offset, flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -140,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ res = mtd_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@@ -154,30 +152,28 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info(part->master, buf, len);
+ return mtd_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info(part->master, buf, len);
+ return mtd_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -190,8 +186,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_write(part->master, to + part->offset, len, retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -204,8 +199,8 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_panic_write(part->master, to + part->offset, len, retlen,
+ buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -220,22 +215,21 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->write_oob(part->master, to + part->offset, ops);
+ return mtd_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg(part->master, from, len);
+ return mtd_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
@@ -244,8 +238,8 @@ static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev(part->master, vecs, count,
- to + part->offset, retlen);
+ return mtd_writev(part->master, vecs, count, to + part->offset,
+ retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -257,7 +251,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
+ ret = mtd_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -285,7 +279,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->lock(part->master, ofs + part->offset, len);
+ return mtd_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -293,7 +287,7 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->unlock(part->master, ofs + part->offset, len);
+ return mtd_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -301,25 +295,25 @@ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->is_locked(part->master, ofs + part->offset, len);
+ return mtd_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->sync(part->master);
+ mtd_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return part->master->suspend(part->master);
+ return mtd_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->resume(part->master);
+ mtd_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
@@ -328,7 +322,7 @@ static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
+ return mtd_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -341,7 +335,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- res = part->master->block_markbad(part->master, ofs);
+ res = mtd_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -559,8 +553,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
+ if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index bd9590c723e..c92f0f6bc13 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
eb->root = NULL;
/* badblocks not supported */
- if (!d->mtd->block_markbad)
+ if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = d->mtd->block_markbad(d->mtd, offset);
+ ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,7 +312,7 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = d->mtd->read_oob(d->mtd, from, ops);
+ int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
@@ -343,7 +343,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
- if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
@@ -403,7 +403,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
- ret = d->mtd->write_oob(d->mtd, offset , &ops);
+ ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
@@ -567,7 +567,7 @@ retry:
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
@@ -689,7 +689,7 @@ retry:
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
@@ -736,7 +736,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd->write_oob(mtd, pos, &ops);
+ ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
pos = base;
for (i = 0; i < mtd_pages; i++) {
- ret = mtd->read_oob(mtd, pos, &ops);
+ ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -1047,8 +1047,7 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- if (d->mtd->sync)
- d->mtd->sync(d->mtd);
+ mtd_sync(d->mtd);
return 0;
}
@@ -1059,9 +1058,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
badcnt = 0;
- if (mtd->block_isbad)
+ if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
@@ -1161,7 +1160,7 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index cce7b70824c..31b034b7eba 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -110,7 +110,7 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
+ depends on ARCH_OMAP2PLUS
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
@@ -420,7 +420,6 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
- select MTD_PARTITIONS
select MTD_CMDLINE_PARTS
help
Enables NAND Flash support for IMX23 or IMX28.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index eb40ea829ab..6a5ff64a139 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -717,17 +717,6 @@ static struct usb_driver alauda_driver = {
.id_table = alauda_table,
};
-static int __init alauda_init(void)
-{
- return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
- usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e6b498c9be..3197e9764fc 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -280,17 +280,7 @@ static struct platform_driver ams_delta_nand_driver = {
},
};
-static int __init ams_delta_nand_init(void)
-{
- return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
- platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 23e5d77c39f..4dd056e2e16 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -113,7 +113,7 @@ static int cpu_has_dma(void)
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 0);
}
@@ -122,7 +122,7 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 1);
}
@@ -492,7 +492,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (host->board->rdy_pin)
+ if (gpio_is_valid(host->board->rdy_pin))
nand_chip->dev_ready = atmel_nand_device_ready;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -530,7 +530,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (gpio_get_value(host->board->det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 46b58d67284..50387fd4009 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -546,18 +546,7 @@ static struct platform_driver nand_driver = {
.resume = bcm_umi_nand_resume,
};
-static int __init nand_init(void)
-{
- return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
- platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
+module_platform_driver(nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index c153e1f77f9..6e566156956 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -675,7 +675,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
- ret = davinci_aemif_setup_timing(info->timing, info->base,
+ ret = 0;
+ if (info->timing)
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 5780dbab611..df921e7a496 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1072,7 +1072,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
@@ -1097,7 +1097,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index eedd8ee2c9a..7195ee6efe1 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -166,15 +166,22 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
elbc_fcm_ctrl->page = page_addr;
- out_be32(&lbc->fbar,
- page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
@@ -349,20 +356,22 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
fsl_elbc_run_command(mtd);
return;
- /* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* nand_get_flash_type() reads 8 bytes of entire ID string */
- out_be32(&lbc->fbcr, 8);
- elbc_fcm_ctrl->read_bytes = 8;
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
- elbc_fcm_ctrl->mdr = 0;
-
+ elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
@@ -407,9 +416,17 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
page_addr, column);
elbc_fcm_ctrl->column = column;
- elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -434,16 +451,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
- if (column >= mtd->writesize) {
+ if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
- column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- elbc_fcm_ctrl->oob = 1;
- } else {
- WARN_ON(column != 0);
+ else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- }
}
out_be32(&lbc->fcr, fcr);
@@ -463,7 +476,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
- out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
else
out_be32(&lbc->fbcr, 0);
@@ -659,9 +673,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
if (chip->pagemask & 0xff000000)
al++;
- /* add to ECCM mode set in fsl_elbc_init */
- priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
- (al << FMR_AL_SHIFT);
+ priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
@@ -764,8 +776,10 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- /* Set the ECCM according to the settings in bootloader.*/
- priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
@@ -971,18 +985,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
.remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_nand_init(void)
-{
- return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index b4f3cc9f32f..45df542b9c6 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -353,17 +353,7 @@ static struct platform_driver of_fun_driver = {
.remove = __devexit_p(fun_remove),
};
-static int __init fun_module_init(void)
-{
- return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
- platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 2c2060b2800..27000a5f5f4 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -27,6 +27,9 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
@@ -171,6 +174,96 @@ static int gpio_nand_devready(struct mtd_info *mtd)
return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ u64 addr;
+
+ if (!r || of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev->platform_data) {
+ memcpy(plat, dev->platform_data, sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
static int __devexit gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
@@ -178,7 +271,7 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
nand_release(&gpiomtd->mtd_info);
- res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res = gpio_nand_get_io_sync(dev);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, resource_size(res));
@@ -226,9 +319,10 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
struct gpiomtd *gpiomtd;
struct nand_chip *this;
struct resource *res0, *res1;
- int ret;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
- if (!dev->dev.platform_data)
+ if (!dev->dev.of_node && !dev->dev.platform_data)
return -EINVAL;
res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -248,7 +342,7 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
goto err_map;
}
- res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res1 = gpio_nand_get_io_sync(dev);
if (res1) {
gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
if (!gpiomtd->io_sync) {
@@ -257,7 +351,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
}
}
- memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+ ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ if (ret)
+ goto err_nce;
ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
@@ -316,8 +412,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ ppdata.of_node = dev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (ret)
+ goto err_wp;
platform_set_drvdata(dev, gpiomtd);
return 0;
@@ -352,6 +452,7 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .of_match_table = gpio_nand_id_table,
},
};
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index de4db7604a3..2a56fc6f399 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -126,7 +126,7 @@ int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
ret = gpmi_reset_block(r->gpmi_regs, false);
@@ -146,7 +146,7 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -202,7 +202,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
@@ -229,7 +229,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
r->bch_regs + HW_BCH_CTRL_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -704,7 +704,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
int ret;
/* Enable the clock. */
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret) {
pr_err("We failed in enable the clk\n");
goto err_out;
@@ -773,7 +773,7 @@ err_out:
void gpmi_end(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
}
/* Clears a BCH interrupt. */
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b63420f0..493ec2fcf97 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
-
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e2664073a89..ac3b9f255e0 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -423,17 +423,7 @@ static struct platform_driver jz_nand_driver = {
},
};
-static int __init jz_nand_init(void)
-{
- return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
- platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 5ede6470634..c240cf1af96 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -879,19 +879,7 @@ static struct platform_driver mpc5121_nfc_driver = {
},
};
-static int __init mpc5121_nfc_init(void)
-{
- return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
- platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3ed9c5e4d34..35b4565050f 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3132,8 +3132,8 @@ ident_done:
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices; stored in first two pages
* of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
- * only the first page.
+ * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+ * All others scan only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
@@ -3143,7 +3143,8 @@ ident_done:
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
*maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD)) ||
+ *maf_id == NAND_MFR_AMD ||
+ *maf_id == NAND_MFR_MACRONIX)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 69148ae3bf5..20a112f591f 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -201,7 +201,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
from += marker_len;
marker_len = 0;
}
- res = mtd->read(mtd, from, len, &retlen, buf);
+ res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at "
@@ -298,7 +298,7 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
if (td->options & NAND_BBT_VERSION)
len++;
- return mtd->read(mtd, offs, len, &retlen, buf);
+ return mtd_read(mtd, offs, len, &retlen, buf);
}
/* Scan read raw data from flash */
@@ -317,7 +317,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
- res = mtd->read_oob(mtd, offs, &ops);
+ res = mtd_read_oob(mtd, offs, &ops);
if (res)
return res;
@@ -350,7 +350,7 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -434,7 +434,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -756,7 +756,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Make it block aligned */
to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
- res = mtd->read(mtd, to, len, &retlen, buf);
+ res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block "
@@ -769,7 +769,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 00cf1b0d605..af4fe8ca7b5 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -73,11 +73,12 @@ struct nand_flash_dev nand_flash_ids[] = {
#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
+ /* 512 Megabit */
{"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
@@ -176,6 +177,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
{NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_MACRONIX, "Macronix"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 34c03be7730..261f478f8cc 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -737,7 +737,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
- if (mtd->block_markbad(mtd, offset)) {
+ if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee1713907b9..ec688548c88 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
- ppdata->of_node = flash_np;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
@@ -294,18 +294,7 @@ static struct platform_driver ndfc_driver = {
.remove = __devexit_p(ndfc_remove),
};
-static int __init ndfc_nand_init(void)
-{
- return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
- platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b463ecfb4c1..a86aa812ca1 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -201,7 +201,7 @@ static int nomadik_nand_suspend(struct device *dev)
struct nomadik_nand_host *host = dev_get_drvdata(dev);
int ret = 0;
if (host)
- ret = host->mtd.suspend(&host->mtd);
+ ret = mtd_suspend(&host->mtd);
return ret;
}
@@ -209,7 +209,7 @@ static int nomadik_nand_resume(struct device *dev)
{
struct nomadik_nand_host *host = dev_get_drvdata(dev);
if (host)
- host->mtd.resume(&host->mtd);
+ mtd_resume(&host->mtd);
return 0;
}
@@ -228,19 +228,7 @@ static struct platform_driver nomadik_nand_driver = {
},
};
-static int __init nand_nomadik_init(void)
-{
- pr_info("Nomadik NAND driver\n");
- return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
- platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
+module_platform_driver(nomadik_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index fa8faedfad6..8febe46e110 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -364,18 +364,7 @@ static struct platform_driver nuc900_nand_driver = {
},
};
-static int __init nuc900_nand_init(void)
-{
- return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
- platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index f745f00f316..b3a883e2a22 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1145,20 +1145,7 @@ static struct platform_driver omap_nand_driver = {
},
};
-static int __init omap_nand_init(void)
-{
- pr_info("%s driver initializing\n", DRIVER_NAME);
-
- return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
- platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a97264ececd..974dbf8251c 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -230,17 +230,7 @@ static struct platform_driver pasemi_nand_driver =
.remove = pasemi_nand_remove,
};
-static int __init pasemi_nand_init(void)
-{
- return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
- platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index ea8e1234e0e..7f2da695335 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -148,18 +148,7 @@ static struct platform_driver plat_nand_driver = {
},
};
-static int __init plat_nand_init(void)
-{
- return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
- platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 9eb7f879969..8544d6bf50a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1258,7 +1258,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->suspend(mtd);
+ mtd_suspend(mtd);
}
return 0;
@@ -1291,7 +1291,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
nand_writel(info, NDSR, NDSR_MASK);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->resume(mtd);
+ mtd_resume(mtd);
}
return 0;
@@ -1311,17 +1311,7 @@ static struct platform_driver pxa3xx_nand_driver = {
.resume = pxa3xx_nand_resume,
};
-static int __init pxa3xx_nand_init(void)
-{
- return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
- platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 619d2a50478..b175c0fd8b9 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -230,17 +230,7 @@ static struct platform_driver sharpsl_nand_driver = {
.remove = __devexit_p(sharpsl_nand_remove),
};
-static int __init sharpsl_nand_init(void)
-{
- return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
- platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 32ae5af7444..774c3c26671 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -55,7 +55,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
ops.datbuf = NULL;
- ret = mtd->write_oob(mtd, ofs, &ops);
+ ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 0fb24f9c232..e02b08bcf0c 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -273,18 +273,7 @@ static struct platform_driver socrates_nand_driver = {
.remove = __devexit_p(socrates_nand_remove),
};
-static int __init socrates_nand_init(void)
-{
- return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
- platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index beebd95f769..6caa0cd9d6a 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -533,18 +533,7 @@ static struct platform_driver tmio_driver = {
.resume = tmio_resume,
};
-static int __init tmio_init(void)
-{
- return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
- platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ace46fdaef5..c7c4f1d11c7 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -298,11 +298,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, res->start,
- resource_size(res), dev_name(&dev->dev)))
- return -EBUSY;
- drvdata->base = devm_ioremap(&dev->dev, res->start,
- resource_size(res));
+ drvdata->base = devm_request_and_ioremap(&dev->dev, res);
if (!drvdata->base)
return -EBUSY;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index cda77b562ad..a75382aff5f 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,7 +56,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd_can_have_bb(mtd)) {
printk(KERN_ERR
"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
@@ -153,7 +153,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~mask, &ops);
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -174,7 +174,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -198,7 +198,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
@@ -423,12 +423,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512, &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
- + (block * 512), 512, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
@@ -771,7 +776,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ac4092591ae..51b9d6af307 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf);
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -242,7 +242,8 @@ The new DiskOnChip driver already scanned the bad block table. Just query it.
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
#endif
- if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
@@ -274,7 +275,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -326,7 +327,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
instr->mtd = nftl->mbd.mtd;
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk("Error while formatting block %d\n", block);
@@ -355,7 +356,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
- nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 7813095264a..0ccd5bff254 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -115,21 +115,9 @@ static struct platform_driver generic_onenand_driver = {
.remove = __devexit_p(generic_onenand_remove),
};
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
-static int __init generic_onenand_init(void)
-{
- return platform_driver_register(&generic_onenand_driver);
-}
-
-static void __exit generic_onenand_exit(void)
-{
- platform_driver_unregister(&generic_onenand_driver);
-}
-
-module_init(generic_onenand_init);
-module_exit(generic_onenand_exit);
+module_platform_driver(generic_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a8394730b4b..a061bc163da 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2633,7 +2633,6 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2645,7 +2644,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = this->block_markbad(mtd, ofs);
+ ret = mtd_block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 5474547eafc..fa1ee43f735 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1133,18 +1133,7 @@ static struct platform_driver s3c_onenand_driver = {
.remove = __devexit_p(s3c_onenand_remove),
};
-static int __init s3c_onenand_init(void)
-{
- return platform_driver_register(&s3c_onenand_driver);
-}
-
-static void __exit s3c_onenand_exit(void)
-{
- platform_driver_unregister(&s3c_onenand_driver);
-}
-
-module_init(s3c_onenand_init);
-module_exit(s3c_onenand_exit);
+module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index e366b1d84ea..48970c14bef 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +89,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
} else {
offset = directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
@@ -104,8 +104,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
- ret = master->read(master, offset,
- master->erasesize, &retlen, (void *)buf);
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
if (ret)
goto out;
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 73ae217a425..233b946e5d6 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -200,9 +200,9 @@ static int scan_header(struct partition *part)
part->sector_map[i] = -1;
for (i=0, blocks_found=0; i<part->total_blocks; i++) {
- rc = part->mbd.mtd->read(part->mbd.mtd,
- i * part->block_size, part->header_size,
- &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -250,8 +250,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
addr = part->sector_map[sector];
if (addr != -1) {
- rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
- &retlen, (u_char*)buf);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -304,9 +304,8 @@ static void erase_callback(struct erase_info *erase)
part->blocks[i].used_sectors = 0;
part->blocks[i].erases++;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- part->blocks[i].offset, sizeof(magic), &retlen,
- (u_char*)&magic);
+ rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+ &retlen, (u_char *)&magic);
if (!rc && retlen != sizeof(magic))
rc = -EIO;
@@ -342,7 +341,7 @@ static int erase_block(struct partition *part, int block)
part->blocks[block].state = BLOCK_ERASING;
part->blocks[block].free_sectors = 0;
- rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ rc = mtd_erase(part->mbd.mtd, erase);
if (rc) {
printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -372,9 +371,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
if (!map)
goto err2;
- rc = part->mbd.mtd->read(part->mbd.mtd,
- part->blocks[block_no].offset, part->header_size,
- &retlen, (u_char*)map);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -413,8 +411,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
}
continue;
}
- rc = part->mbd.mtd->read(part->mbd.mtd, addr,
- SECTOR_SIZE, &retlen, sector_data);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -450,8 +448,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
int rc;
/* we have a race if sync doesn't exist */
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
score = 0x7fffffff; /* MAX_INT */
best_block = -1;
@@ -563,8 +560,9 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
}
}
- rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
- part->header_size, &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -595,8 +593,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
addr = part->blocks[block].offset +
(HEADER_MAP_OFFSET + offset) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(del), &retlen, (u_char*)&del);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
if (!rc && retlen != sizeof(del))
rc = -EIO;
@@ -668,8 +666,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
block->offset;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -688,8 +686,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
part->header_cache[i + HEADER_MAP_OFFSET] = entry;
addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(entry), &retlen, (u_char*)&entry);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
if (!rc && retlen != sizeof(entry))
rc = -EIO;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index fddb714e323..072ed5970e2 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -25,7 +25,7 @@
struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
-module_param(cache_timeout, bool, S_IRUGO);
+module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
@@ -278,7 +278,7 @@ again:
/* Unfortunately, oob read will _always_ succeed,
despite card removal..... */
- ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
@@ -343,7 +343,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
- ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
/* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
@@ -479,7 +479,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
return -EIO;
}
- if (mtd->erase(mtd, &erase)) {
+ if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
@@ -645,8 +645,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
- /* We use these functions for IO */
- if (!mtd->read_oob || !mtd->write_oob)
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 976e3d28b96..ab2a52a039c 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
* is not SSFDC formatted
*/
for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
- if (!mtd->block_isbad(mtd, offset)) {
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
- sect_buf);
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
/* CIS pattern match on the sector buffer */
if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -156,7 +156,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
size_t retlen;
loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
if (ret < 0 || retlen != SECTOR_SIZE)
return -1;
@@ -175,7 +175,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
ops.oobbuf = buf;
ops.datbuf = NULL;
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
if (ret < 0 || ops.oobretlen != OOB_SIZE)
return -1;
@@ -255,7 +255,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
phys_block++) {
offset = (unsigned long)phys_block * ssfdc->erase_size;
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
continue; /* skip bad blocks */
ret = read_raw_oob(mtd, offset, oob_buf);
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 933f7e5f32d..ed9b62827f1 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -78,7 +78,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -139,7 +139,7 @@ static int write_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: writeoob failed at %#llx\n",
(long long)addr);
@@ -192,7 +192,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -219,7 +219,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
printk(PRINT_PREF "error: readoob failed at "
"%#llx\n", (long long)addr);
@@ -284,7 +284,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -329,7 +329,7 @@ static int is_block_bad(int ebnum)
int ret;
loff_t addr = ebnum * mtd->erasesize;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -524,7 +524,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to start write past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, addr0, &ops);
+ err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -544,7 +544,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to start read past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, addr0, &ops);
+ err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -568,7 +568,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -588,7 +588,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -612,7 +612,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -632,7 +632,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -670,7 +670,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
@@ -698,7 +698,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index afafb6935fd..252ddb092fb 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -77,7 +77,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -95,12 +95,12 @@ static int erase_eraseblock(int ebnum)
static int write_eraseblock(int ebnum)
{
int err = 0;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, mtd->erasesize);
cond_resched();
- err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -111,7 +111,7 @@ static int write_eraseblock(int ebnum)
static int verify_eraseblock(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr0, addrn;
loff_t addr = ebnum * mtd->erasesize;
@@ -127,7 +127,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -135,7 +135,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -144,8 +144,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -163,7 +162,7 @@ static int verify_eraseblock(int ebnum)
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
unsigned long oldnext = next;
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -171,7 +170,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -180,8 +179,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -203,7 +201,7 @@ static int verify_eraseblock(int ebnum)
static int crosstest(void)
{
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
@@ -228,9 +226,8 @@ static int crosstest(void)
addrn -= mtd->erasesize;
/* Read 2nd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -241,9 +238,8 @@ static int crosstest(void)
}
/* Read 3rd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -254,10 +250,9 @@ static int crosstest(void)
}
/* Read first page to pp2 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp2);
+ err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -268,10 +263,9 @@ static int crosstest(void)
}
/* Read last page to pp3 */
- read = 0;
addr = addrn - pgsize;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp3);
+ err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -282,10 +276,9 @@ static int crosstest(void)
}
/* Read first page again to pp4 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp4);
+ err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -309,7 +302,7 @@ static int crosstest(void)
static int erasecrosstest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
@@ -335,7 +328,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -344,7 +337,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -368,7 +361,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -382,7 +375,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -405,7 +398,7 @@ static int erasecrosstest(void)
static int erasetest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
@@ -425,7 +418,7 @@ static int erasetest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -438,7 +431,7 @@ static int erasetest(void)
return err;
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- err = mtd->read(mtd, addr0, pgsize, &read, twopages);
+ err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -469,7 +462,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 550fe51225a..121aba189ce 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -44,7 +44,7 @@ static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, ret, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
@@ -52,7 +52,7 @@ static int read_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
memset(buf, 0 , pgcnt);
- ret = mtd->read(mtd, addr, pgsize, &read, buf);
+ ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
@@ -74,7 +74,7 @@ static int read_eraseblock_by_page(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
- ret = mtd->read_oob(mtd, addr, &ops);
+ ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
printk(PRINT_PREF "error: read oob failed at "
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -148,8 +148,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 493b367bdd3..2aec4f3b72b 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -79,7 +79,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -105,7 +105,7 @@ static int multiblock_erase(int ebnum, int blocks)
ei.addr = addr;
ei.len = mtd->erasesize * blocks;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
@@ -139,11 +139,11 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
printk(PRINT_PREF "error: write failed at %#llx\n", addr);
if (!err)
@@ -155,13 +155,13 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock_by_page(int ebnum)
{
- size_t written = 0;
+ size_t written;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -178,13 +178,13 @@ static int write_eraseblock_by_page(int ebnum)
static int write_eraseblock_by_2pages(int ebnum)
{
- size_t written = 0, sz = pgsize * 2;
+ size_t written, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->write(mtd, addr, sz, &written, buf);
+ err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -196,7 +196,7 @@ static int write_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -210,11 +210,11 @@ static int write_eraseblock_by_2pages(int ebnum)
static int read_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
+ err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -229,13 +229,13 @@ static int read_eraseblock(int ebnum)
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -255,13 +255,13 @@ static int read_eraseblock_by_page(int ebnum)
static int read_eraseblock_by_2pages(int ebnum)
{
- size_t read = 0, sz = pgsize * 2;
+ size_t read, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->read(mtd, addr, sz, &read, buf);
+ err = mtd_read(mtd, addr, sz, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -276,7 +276,7 @@ static int read_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -296,7 +296,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -336,8 +336,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
goto out;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 52ffd9120e0..7b33f22d0b5 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -112,7 +112,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -140,7 +140,7 @@ static int is_block_bad(int ebnum)
static int do_read(void)
{
- size_t read = 0;
+ size_t read;
int eb = rand_eb();
int offs = rand_offs();
int len = rand_len(offs), err;
@@ -153,7 +153,7 @@ static int do_read(void)
len = mtd->erasesize - offs;
}
addr = eb * mtd->erasesize + offs;
- err = mtd->read(mtd, addr, len, &read, readbuf);
+ err = mtd_read(mtd, addr, len, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
@@ -169,7 +169,7 @@ static int do_read(void)
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
- size_t written = 0;
+ size_t written;
loff_t addr;
offs = offsets[eb];
@@ -192,7 +192,7 @@ static int do_write(void)
}
}
addr = eb * mtd->erasesize + offs;
- err = mtd->write(mtd, addr, len, &written, writebuf);
+ err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
printk(PRINT_PREF "error: write failed at 0x%llx\n",
(long long)addr);
@@ -227,8 +227,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
@@ -284,6 +283,12 @@ static int __init mtd_stresstest_init(void)
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
+ if (ebcnt < 2) {
+ printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
@@ -322,6 +327,7 @@ out:
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
+out_put_mtd:
put_mtd_device(mtd);
if (err)
printk(PRINT_PREF "error %d occurred\n", err);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 1a05bfac4ee..9667bf53528 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -80,7 +80,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -115,12 +115,12 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
addr += subpgsize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -150,7 +150,7 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock2(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -158,7 +158,7 @@ static int write_eraseblock2(int ebnum)
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
set_random_data(writebuf, subpgsize * k);
- err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -189,14 +189,13 @@ static void print_subpage(unsigned char *p)
static int verify_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -223,8 +222,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -252,7 +250,7 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock2(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -261,8 +259,7 @@ static int verify_eraseblock2(int ebnum)
break;
set_random_data(writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -288,15 +285,14 @@ static int verify_eraseblock2(int ebnum)
static int verify_eraseblock_ff(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
memset(writebuf, 0xff, subpgsize);
for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -344,7 +340,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 03ab649a696..b65861bc7b8 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -105,7 +105,7 @@ static inline int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -127,7 +127,7 @@ static inline int erase_eraseblock(int ebnum)
static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
- size_t read = 0;
+ size_t read;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -137,7 +137,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
}
retry:
- err = mtd->read(mtd, addr, len, &read, check_buf);
+ err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
printk(PRINT_PREF "single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
@@ -181,7 +181,7 @@ retry:
static inline int write_pattern(int ebnum, void *buf)
{
int err;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -189,7 +189,7 @@ static inline int write_pattern(int ebnum, void *buf)
addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
- err = mtd->write(mtd, addr, len, &written, buf);
+ err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
printk(PRINT_PREF "error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
@@ -290,10 +290,9 @@ static int __init tort_init(void)
* Check if there is a bad eraseblock among those we are going to test.
*/
memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
- if (mtd->block_isbad) {
+ if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
- err = mtd->block_isbad(mtd,
- (loff_t)i * mtd->erasesize);
+ err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
printk(PRINT_PREF "block_isbad() returned %d "
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6c3fb5ab20f..115749f20f9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -664,7 +664,7 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (mtd_can_have_bb(ubi->mtd))
ubi->bad_allowed = 1;
if (ubi->mtd->type == MTD_NORFLASH) {
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index ab80c0debac..e2cdebf4084 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -216,7 +216,7 @@ void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
buf = vmalloc(len);
if (!buf)
return;
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fb7f19b62d9..cd26da8ad22 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
- return MOVE_CANCEL_RACE;
+ return MOVE_RETRY;
}
/*
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index f20b6f22f24..5cde4e5ca3e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -170,7 +170,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
@@ -289,7 +289,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
}
addr = (loff_t)pnum * ubi->peb_size + offset;
- err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
"%zd bytes", err, len, pnum, offset, written);
@@ -361,7 +361,7 @@ retry:
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
- err = ubi->mtd->erase(ubi->mtd, &ei);
+ err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d while erasing PEB %d, retry",
@@ -525,11 +525,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* the header comment in scan.c for more information).
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err) {
addr += ubi->vid_hdr_aloffset;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
- (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err)
return 0;
}
@@ -635,7 +634,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
if (ubi->bad_allowed) {
int ret;
- ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
ubi_err("error %d while checking if PEB %d is bad",
ret, pnum);
@@ -670,7 +669,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
if (!ubi->bad_allowed)
return 0;
- err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
return err;
@@ -1357,7 +1356,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
if (err && !mtd_is_bitflip(err))
goto out_free;
@@ -1421,7 +1420,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 1a35fc5e3b4..9fdb35367fe 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -714,9 +714,7 @@ int ubi_sync(int ubi_num)
if (!ubi)
return -ENODEV;
- if (ubi->mtd->sync)
- ubi->mtd->sync(ubi->mtd);
-
+ mtd_sync(ubi->mtd);
ubi_put_device(ubi);
return 0;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dc64c767fd2..d51d75d3444 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -120,6 +120,7 @@ enum {
* PEB
* MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
*/
enum {
MOVE_CANCEL_RACE = 1,
@@ -127,6 +128,7 @@ enum {
MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR,
MOVE_CANCEL_BITFLIPS,
+ MOVE_RETRY,
};
/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 42c684cf368..0696e36b053 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
protect = 1;
goto out_not_moved;
}
-
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
}
return err;
- } else if (err != -EIO) {
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
- }
/* It is %-EIO, the PEB went bad */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 654a5e94e0e..b98285446a5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -125,6 +125,8 @@ config IFB
'ifb1' etc.
Look at the iproute2 documentation directory for usage etc
+source "drivers/net/team/Kconfig"
+
config MACVLAN
tristate "MAC-VLAN support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -241,6 +243,8 @@ source "drivers/atm/Kconfig"
source "drivers/net/caif/Kconfig"
+source "drivers/net/dsa/Kconfig"
+
source "drivers/net/ethernet/Kconfig"
source "drivers/net/fddi/Kconfig"
@@ -338,4 +342,6 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+source "drivers/net/hyperv/Kconfig"
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd2b13..a6b8ce11a22 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -66,3 +68,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
+
+obj-$(CONFIG_HYPERV_NET) += hyperv/
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff..84fb6349a59 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644
index 027a0ee7d85..00000000000
--- a/drivers/net/bonding/bond_ipv6.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
- struct inet6_dev *idev;
-
- if (!dev)
- return;
-
- idev = in6_dev_get(dev);
- if (!idev)
- return;
-
- read_lock_bh(&idev->lock);
- if (!list_empty(&idev->addr_list)) {
- struct inet6_ifaddr *ifa
- = list_first_entry(&idev->addr_list,
- struct inet6_ifaddr, if_list);
- ipv6_addr_copy(addr, &ifa->addr);
- } else
- ipv6_addr_set(addr, 0, 0, 0, 0);
-
- read_unlock_bh(&idev->lock);
-
- in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
- struct in6_addr *daddr,
- int router,
- unsigned short vlan_id)
-{
- struct in6_addr mcaddr;
- struct icmp6hdr icmp6h = {
- .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
- };
- struct sk_buff *skb;
-
- icmp6h.icmp6_router = router;
- icmp6h.icmp6_solicited = 0;
- icmp6h.icmp6_override = 1;
-
- addrconf_addr_solict_mult(daddr, &mcaddr);
-
- pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
-
- skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
- ND_OPT_TARGET_LL_ADDR);
-
- if (!skb) {
- pr_err("NA packet allocation failed\n");
- return;
- }
-
- if (vlan_id) {
- /* The Ethernet header is not present yet, so it is
- * too early to insert a VLAN tag. Force use of an
- * out-of-line tag here and let dev_hard_start_xmit()
- * insert it if the slave hardware can't.
- */
- skb = __vlan_hwaccel_put_tag(skb, vlan_id);
- if (!skb) {
- pr_err("failed to insert VLAN tag\n");
- return;
- }
- }
-
- ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master. This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
- struct slave *slave = bond->curr_active_slave;
- struct vlan_entry *vlan;
- struct inet6_dev *idev;
- int is_router;
-
- pr_debug("%s: bond %s slave %s\n", bond->dev->name,
- __func__, slave ? slave->dev->name : "NULL");
-
- if (!slave || !bond->send_unsol_na ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
- return;
-
- bond->send_unsol_na--;
-
- idev = in6_dev_get(bond->dev);
- if (!idev)
- return;
-
- is_router = !!idev->cnf.forwarding;
-
- in6_dev_put(idev);
-
- if (!ipv6_addr_any(&bond->master_ipv6))
- bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
- bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
- vlan->vlan_id);
- }
- }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
- struct bonding *bond;
- struct vlan_entry *vlan;
- struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
- if (bond->dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&bond->master_ipv6))
- ipv6_addr_copy(&bond->master_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&bond->master_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(bond->dev,
- &bond->master_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&vlan->vlan_ipv6))
- ipv6_addr_copy(&vlan->vlan_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&vlan->vlan_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(vlan_dev,
- &vlan->vlan_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
- .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
- register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
- unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c57725648..435984ad8b2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
*/
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *stop_at;
int i, res;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_add_vid) {
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
- }
+ res = vlan_vid_add(slave->dev, vid);
+ if (res)
+ goto unwind;
}
res = bond_add_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ vlan_vid_del(slave->dev, vid);
+
+ return res;
}
/**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
* @bond_dev: bonding net device that got called
* @vid: vlan id being removed
*/
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i, res;
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_kill_vid) {
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- }
- }
+ bond_for_each_slave(bond, slave, i)
+ vlan_vid_del(slave->dev, vid);
res = bond_del_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
}
static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
{
struct vlan_entry *vlan;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_add_vid))
- return;
+ int res;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ if (res)
+ pr_warning("%s: Failed to add vlan id %d to device %s\n",
+ bond->dev->name, vlan->vlan_id,
+ slave_dev->name);
+ }
}
static void bond_del_vlans_from_slave(struct bonding *bond,
struct net_device *slave_dev)
{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_kill_vid))
- return;
-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ vlan_vid_del(slave_dev, vlan->vlan_id);
}
}
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct slave *slave;
struct bonding *bond = netdev_priv(dev);
- u32 mask;
+ netdev_features_t mask;
int i;
read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- u32 vlan_features = BOND_VLAN_FEATURES;
+ netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"but new slave device does not support netpoll.\n",
bond_dev->name);
res = -EBUSY;
- goto err_close;
+ goto err_detach;
}
}
#endif
@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = bond_create_slave_symlinks(bond_dev, slave_dev);
if (res)
- goto err_close;
+ goto err_detach;
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_dest_symlinks:
bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_detach:
+ write_lock_bh(&bond->lock);
+ bond_detach_slave(bond, new_slave);
+ write_unlock_bh(&bond->lock);
+
err_close:
dev_close(slave_dev);
@@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
- u32 old_features = bond_dev->features;
+ netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2553,30 +2558,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3322,6 +3303,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3329,7 +3314,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3345,8 +3330,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -4360,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->features |= bond_dev->hw_features;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4ef7e2fd9fe..aef42f04532 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <linux/sysdev.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 073352517ad..0a4fc62a381 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
dev_dbg(&cfhsi->ndev->dev, "%s.\n",
__func__);
-
- ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
- if (ret) {
- dev_warn(&cfhsi->ndev->dev,
- "%s: can't wake up HSI interface: %d.\n",
- __func__, ret);
- return ret;
- }
-
do {
ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
&fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
} while (1);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
return ret;
}
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Create HSI frame. */
len = cfhsi_tx_frm(desc, cfhsi);
- BUG_ON(!len);
+ WARN_ON(!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 23406e62c0b..8a3054b8481 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
/*This list is protected by the rtnl lock. */
static LIST_HEAD(ser_list);
-static int ser_loop;
+static bool ser_loop;
module_param(ser_loop, bool, S_IRUGO);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
module_param(ser_use_stx, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
module_param(ser_use_fcs, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
skb_pull(skb, tty_wr);
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
- BUG_ON(tmp != skb);
+ WARN_ON(tmp != skb);
if (in_interrupt())
dev_kfree_skb_irq(skb);
else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
ser = tty->disc_data;
BUG_ON(ser == NULL);
- BUG_ON(ser->tty != tty);
+ WARN_ON(ser->tty != tty);
handle_tx(ser);
}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb24ed..5b2041319a3 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
- BUG_ON(skb == NULL);
+
+ if (skb == NULL) {
+ pr_info("OOM: Try next frame in descriptor\n");
+ break;
+ }
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
pck_desc++;
}
+ spin_lock_irqsave(&pshm_drv->lock, flags);
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
if (skb == NULL)
goto send_msg;
-
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
+ spin_lock_irqsave(&pshm_drv->lock, flags);
}
/*
* We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
- unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
skb_queue_tail(&pshm_drv->sk_qhead, skb);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
+ spin_lock_init(&pshm_drv->lock);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 05e791f46ae..96391c36fa7 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-static int spi_loop;
+static bool spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx,
+ cfspi->xfer.va_tx[0],
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
netif_stop_queue(dev);
return 0;
}
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_start_xmit = cfspi_xmit
-};
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
{
+ int res = 0;
struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->destructor = free_netdev;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- ndev = alloc_netdev(sizeof(struct cfspi),
- "cfspi%d", cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
/* Set flow info. */
cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
cfspi->slave_talked = false;
}
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
/* Allocate DMA buffers. */
- cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
- if (!cfspi->xfer.va_tx) {
+ cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+ if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
- goto err_dma_alloc_tx;
+ goto err_dma_alloc_tx_0;
}
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
/* Schedule the work queue. */
queue_work(cfspi->wq, &cfspi->work);
+ return 0;
+
+ err_create_wq:
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+ return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+
+ /* Remove from list. */
+ spin_lock(&cfspi_list_lock);
+ list_del(&cfspi->list);
+ spin_unlock(&cfspi_list_lock);
+
+ cfspi->ndev = NULL;
+ /* Free DMA buffers. */
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ /* Destroy debugfs directory and files. */
+ dev_debugfs_rem(cfspi);
+ return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+ .ndo_open = cfspi_open,
+ .ndo_stop = cfspi_close,
+ .ndo_init = cfspi_init,
+ .ndo_uninit = cfspi_uninit,
+ .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfspi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+ dev->tx_queue_len = 0;
+ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfspi->qhead);
+ skb_queue_head_init(&cfspi->chead);
+ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfspi->cfdev.use_frag = false;
+ cfspi->cfdev.use_stx = false;
+ cfspi->cfdev.use_fcs = false;
+ cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+ struct cfspi *cfspi = NULL;
+ struct net_device *ndev;
+ struct cfspi_dev *dev;
+ int res;
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+ ndev = alloc_netdev(sizeof(struct cfspi),
+ "cfspi%d", cfspi_setup);
+ if (!dev)
+ return -ENODEV;
+
+ cfspi = netdev_priv(ndev);
+ netif_stop_queue(ndev);
+ cfspi->ndev = ndev;
+ cfspi->pdev = pdev;
+
+ /* Assign the SPI device. */
+ cfspi->dev = dev;
+ /* Assign the device ifc to this SPI interface. */
+ dev->ifc = &cfspi->ifc;
+
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
return res;
err_net_reg:
- dev_debugfs_rem(cfspi);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- err_create_wq:
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
free_netdev(ndev);
return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
int cfspi_spi_remove(struct platform_device *pdev)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
- struct cfspi_dev *dev;
-
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
- spin_lock(&cfspi_list_lock);
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- /* Find the corresponding device. */
- if (cfspi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- /* Free DMA buffers. */
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- unregister_netdev(cfspi->ndev);
- spin_unlock(&cfspi_list_lock);
- return 0;
- }
- }
- spin_unlock(&cfspi_list_lock);
- return -ENODEV;
+ /* Everything is done in cfspi_uninit(). */
+ return 0;
}
static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
list_for_each_safe(list_node, n, &cfspi_list) {
cfspi = list_entry(list_node, struct cfspi, list);
- platform_device_unregister(cfspi->pdev);
+ unregister_netdev(cfspi->ndev);
}
/* Destroy sysfs files. */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f6c98fb4a51..ab45758c49a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/c_can/Kconfig"
+source "drivers/net/can/cc770/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 24ebfe8d758..938be37b670 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,6 +14,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 044ea0647b0..6ea905c2cf6 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
.id_table = at91_can_id_table,
};
-static int __init at91_can_module_init(void)
-{
- return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
- platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a1c5abc38cd..349e0fabb63 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
},
};
-static int __init bfin_can_init(void)
-{
- return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
- platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 0b5c6f8bdd3..5e1a5ff6476 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
.remove = __devexit_p(c_can_plat_remove),
};
-static int __init c_can_plat_init(void)
-{
- return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
- platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644
index 00000000000..22c07a8c8b4
--- /dev/null
+++ b/drivers/net/can/cc770/Kconfig
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+ tristate "Bosch CC770 and Intel AN82527 devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+ tristate "ISA Bus based legacy CC770 driver"
+ ---help---
+ This driver adds legacy support for CC770 and AN82527 chips
+ connected to the ISA bus using I/O port, memory mapped or
+ indirect access.
+
+config CAN_CC770_PLATFORM
+ tristate "Generic Platform Bus based CC770 driver"
+ ---help---
+ This driver adds support for the CC770 and AN82527 chips
+ connected to the "platform bus" (Linux abstraction for directly
+ to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644
index 00000000000..9fb8321b33e
--- /dev/null
+++ b/drivers/net/can/cc770/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644
index 00000000000..76689674764
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.c
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+ "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+ "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+ [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+ [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+ [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+ CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+ if (intid == 2)
+ return 0;
+ else
+ return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 msgcfg;
+ unsigned char obj_flags;
+ unsigned int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ obj_flags = priv->obj_flags[o];
+ mo = obj2msgobj(o);
+
+ if (obj_flags & CC770_OBJ_FLAG_RX) {
+ /*
+ * We don't need extra objects for RTR and EFF if
+ * the additional CC770 functions are enabled.
+ */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (o > 0)
+ continue;
+ netdev_dbg(dev, "Message object %d for "
+ "RX data, RTR, SFF and EFF\n", mo);
+ } else {
+ netdev_dbg(dev,
+ "Message object %d for RX %s %s\n",
+ mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+ "RTR" : "data",
+ obj_flags & CC770_OBJ_FLAG_EFF ?
+ "EFF" : "SFF");
+ }
+
+ if (obj_flags & CC770_OBJ_FLAG_EFF)
+ msgcfg = MSGCFG_XTD;
+ else
+ msgcfg = 0;
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ msgcfg |= MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ else
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ } else {
+ netdev_dbg(dev, "Message object %d for "
+ "TX data, RTR, SFF and EFF\n", mo);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+ int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ mo = obj2msgobj(o);
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+ if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+ continue;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ } else {
+ /* Clear message object for send */
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration and puts chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Disable all used message objects */
+ disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register and pre-set last error code */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ /* Enable all used message objects*/
+ enable_all_objs(dev);
+
+ /*
+ * Clear bus-off, interrupts only for errors,
+ * not for status change
+ */
+ cc770_write_reg(priv, control, priv->control_normal_mode);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+ int mo, id, data;
+
+ /* Enable configuration and put chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+ /* Set CLKOUT divider and slew rates */
+ cc770_write_reg(priv, clkout, priv->clkout);
+
+ /* Configure CPU interface / CLKOUT enable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /* Set bus configuration */
+ cc770_write_reg(priv, bus_config, priv->bus_config);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Clear and invalidate message objects */
+ for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_UNC | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_RES | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ for (data = 0; data < 8; data++)
+ cc770_write_reg(priv, msgobj[mo].data[data], 0);
+ for (id = 0; id < 4; id++)
+ cc770_write_reg(priv, msgobj[mo].id[id], 0);
+ cc770_write_reg(priv, msgobj[mo].config, 0);
+ }
+
+ /* Set all global ID masks to "don't care" */
+ cc770_write_reg(priv, global_mask_std[0], 0);
+ cc770_write_reg(priv, global_mask_std[1], 0);
+ cc770_write_reg(priv, global_mask_ext[0], 0);
+ cc770_write_reg(priv, global_mask_ext[1], 0);
+ cc770_write_reg(priv, global_mask_ext[2], 0);
+ cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration, put chip in bus-off, disable ints */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+ /* Configure cpu interface / CLKOUT disable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /*
+ * Check if hardware reset is still inactive or maybe there
+ * is no chip in this address space
+ */
+ if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+ netdev_info(dev, "probing @0x%p failed (reset)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Write and read back test pattern (some arbitrary values) */
+ cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+ cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+ cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+ if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+ (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+ (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+ netdev_info(dev, "probing @0x%p failed (pattern)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Check if this chip is a CC770 supporting additional functions */
+ if (cc770_read_reg(priv, control) & CTRL_EAF)
+ priv->control_normal_mode |= CTRL_EAF;
+
+ return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* leave reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ set_reset_mode(dev);
+
+ /* leave reset mode */
+ set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ cc770_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+ cc770_write_reg(priv, bit_timing_0, btr0);
+ cc770_write_reg(priv, bit_timing_1, btr1);
+
+ return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ bec->txerr = cc770_read_reg(priv, tx_error_counter);
+ bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+ return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ u8 dlc, rtr;
+ u32 id;
+ int i;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ netif_stop_queue(dev);
+
+ dlc = cf->can_dlc;
+ id = cf->can_id;
+ if (cf->can_id & CAN_RTR_FLAG)
+ rtr = 0;
+ else
+ rtr = MSGCFG_DIR;
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+ if (id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config,
+ (dlc << 4) | rtr | MSGCFG_XTD);
+ cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+ cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+ cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+ } else {
+ id &= CAN_SFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+ cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+ }
+
+ for (i = 0; i < dlc; i++)
+ cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+ stats->tx_bytes += dlc;
+
+ can_put_echo_skb(skb, dev, 0);
+
+ /*
+ * HM: We had some cases of repeated IRQs so make sure the
+ * INT is acknowledged I know it's already further up, but
+ * doing again fixed the issue
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 config;
+ u32 id;
+ int i;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (!skb)
+ return;
+
+ config = cc770_read_reg(priv, msgobj[mo].config);
+
+ if (ctrl1 & RMTPND_SET) {
+ /*
+ * Unfortunately, the chip does not store the real message
+ * identifier of the received remote transmission request
+ * frame. Therefore we set it to 0.
+ */
+ cf->can_id = CAN_RTR_FLAG;
+ if (config & MSGCFG_XTD)
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_dlc = 0;
+ } else {
+ if (config & MSGCFG_XTD) {
+ id = cc770_read_reg(priv, msgobj[mo].id[3]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+ id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+ id >>= 3;
+ id |= CAN_EFF_FLAG;
+ } else {
+ id = cc770_read_reg(priv, msgobj[mo].id[1]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+ id >>= 5;
+ }
+
+ cf->can_id = id;
+ cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+ }
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 lec;
+
+ netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Use extended functions of the CC770 */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+ cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+ }
+
+ if (status & STAT_BOFF) {
+ /* Disable interrupts */
+ cc770_write_reg(priv, control, CTRL_INI);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(dev);
+ } else if (status & STAT_WARN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ /* Only the CC770 does show error passive */
+ if (cf->data[7] > 127) {
+ cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+ CAN_ERR_CRTL_TX_PASSIVE;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ } else {
+ cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+ CAN_ERR_CRTL_TX_WARNING;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ }
+ } else {
+ /* Back to error avtive */
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ lec = status & STAT_LEC_MASK;
+ if (lec < 7 && lec > 0) {
+ if (lec == STAT_LEC_ACK) {
+ cf->can_id |= CAN_ERR_ACK;
+ } else {
+ cf->can_id |= CAN_ERR_PROT;
+ switch (lec) {
+ case STAT_LEC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case STAT_LEC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case STAT_LEC_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case STAT_LEC_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case STAT_LEC_CRC:
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+ }
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 status;
+
+ status = cc770_read_reg(priv, status);
+ /* Reset the status register including RXOK and TXOK */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ if (status & (STAT_WARN | STAT_BOFF) ||
+ (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+ cc770_err(dev, status);
+ return status & STAT_BOFF;
+ }
+
+ return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+ if (!(ctrl1 & NEWDAT_SET)) {
+ /* Check for RTR if additional functions are enabled */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+ INTPND_SET))
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (ctrl1 & MSGLST_SET) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+ if (mo < MSGOBJ_LAST)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_UNC | RMTPND_UNC);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl0, ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+ if (!(ctrl0 & INTPND_SET))
+ break;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+
+ /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ /*
+ * We had some cases of repeated IRQ so make sure the
+ * INT is acknowledged
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 intid;
+ int o, n = 0;
+
+ /* Shared interrupts and IRQ off? */
+ if (priv->can.state == CAN_STATE_STOPPED)
+ return IRQ_NONE;
+
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
+ while (n < CC770_MAX_IRQ) {
+ /* Read the highest pending interrupt request */
+ intid = cc770_read_reg(priv, interrupt);
+ if (!intid)
+ break;
+ n++;
+
+ if (intid == 1) {
+ /* Exit in case of bus-off */
+ if (cc770_status_interrupt(dev))
+ break;
+ } else {
+ o = intid2obj(intid);
+
+ if (o >= CC770_OBJ_MAX) {
+ netdev_err(dev, "Unexpected interrupt id %d\n",
+ intid);
+ continue;
+ }
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+ cc770_rtr_interrupt(dev, o);
+ else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+ cc770_rx_interrupt(dev, o);
+ else
+ cc770_tx_interrupt(dev, o);
+ }
+ }
+
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+ if (n >= CC770_MAX_IRQ)
+ netdev_dbg(dev, "%d messages handled in ISR", n);
+
+ return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+ dev->name, dev);
+ if (err) {
+ close_candev(dev);
+ return -EAGAIN;
+ }
+
+ /* init and start chip */
+ cc770_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_reset_mode(dev);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+
+ dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+ CC770_ECHO_SKB_MAX);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &cc770_bittiming_const;
+ priv->can.do_set_bittiming = cc770_set_bittiming;
+ priv->can.do_set_mode = cc770_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+ if (sizeof_priv)
+ priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+ .ndo_open = cc770_open,
+ .ndo_stop = cc770_close,
+ .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = cc770_probe_chip(dev);
+ if (err)
+ return err;
+
+ dev->netdev_ops = &cc770_netdev_ops;
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+
+ /* Should we use additional functions? */
+ if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+ priv->can.do_get_berr_counter = cc770_get_berr_counter;
+ priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+ netdev_dbg(dev, "i82527 mode with additional functions\n");
+ } else {
+ priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+ netdev_dbg(dev, "strict i82527 compatibility mode\n");
+ }
+
+ chipset_init(priv);
+ set_reset_mode(dev);
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+ set_reset_mode(dev);
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+ if (msgobj15_eff) {
+ cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+ cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+ }
+
+ pr_info("CAN netdevice driver\n");
+
+ return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+ pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644
index 00000000000..a1739db98d9
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.h
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+ u8 ctrl0;
+ u8 ctrl1;
+ u8 id[4];
+ u8 config;
+ u8 data[8];
+ u8 dontuse; /* padding */
+} __packed;
+
+struct cc770_regs {
+ union {
+ struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+ struct {
+ u8 control; /* Control Register */
+ u8 status; /* Status Register */
+ u8 cpu_interface; /* CPU Interface Register */
+ u8 dontuse1;
+ u8 high_speed_read[2]; /* High Speed Read */
+ u8 global_mask_std[2]; /* Standard Global Mask */
+ u8 global_mask_ext[4]; /* Extended Global Mask */
+ u8 msg15_mask[4]; /* Message 15 Mask */
+ u8 dontuse2[15];
+ u8 clkout; /* Clock Out Register */
+ u8 dontuse3[15];
+ u8 bus_config; /* Bus Configuration Register */
+ u8 dontuse4[15];
+ u8 bit_timing_0; /* Bit Timing Register byte 0 */
+ u8 dontuse5[15];
+ u8 bit_timing_1; /* Bit Timing Register byte 1 */
+ u8 dontuse6[15];
+ u8 interrupt; /* Interrupt Register */
+ u8 dontuse7[15];
+ u8 rx_error_counter; /* Receive Error Counter */
+ u8 dontuse8[15];
+ u8 tx_error_counter; /* Transmit Error Counter */
+ u8 dontuse9[31];
+ u8 p1_conf;
+ u8 dontuse10[15];
+ u8 p2_conf;
+ u8 dontuse11[15];
+ u8 p1_in;
+ u8 dontuse12[15];
+ u8 p2_in;
+ u8 dontuse13[15];
+ u8 p1_out;
+ u8 dontuse14[15];
+ u8 p2_out;
+ u8 dontuse15[15];
+ u8 serial_reset_addr;
+ };
+ };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI 0x01 /* Initialization */
+#define CTRL_IE 0x02 /* Interrupt Enable */
+#define CTRL_SIE 0x04 /* Status Interrupt Enable */
+#define CTRL_EIE 0x08 /* Error Interrupt Enable */
+#define CTRL_EAF 0x20 /* Enable additional functions */
+#define CTRL_CCE 0x40 /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01 /* Stuff error */
+#define STAT_LEC_FORM 0x02 /* Form error */
+#define STAT_LEC_ACK 0x03 /* Acknowledgement error */
+#define STAT_LEC_BIT1 0x04 /* Bit1 error */
+#define STAT_LEC_BIT0 0x05 /* Bit0 error */
+#define STAT_LEC_CRC 0x06 /* CRC error */
+#define STAT_LEC_MASK 0x07 /* Last Error Code mask */
+#define STAT_TXOK 0x08 /* Transmit Message Successfully */
+#define STAT_RXOK 0x10 /* Receive Message Successfully */
+#define STAT_WAKE 0x20 /* Wake Up Status */
+#define STAT_WARN 0x40 /* Warning Status */
+#define STAT_BOFF 0x80 /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES 0x01 /* No Interrupt pending */
+#define INTPND_SET 0x02 /* Interrupt pending */
+#define INTPND_UNC 0x03
+#define RXIE_RES 0x04 /* Receive Interrupt Disable */
+#define RXIE_SET 0x08 /* Receive Interrupt Enable */
+#define RXIE_UNC 0x0c
+#define TXIE_RES 0x10 /* Transmit Interrupt Disable */
+#define TXIE_SET 0x20 /* Transmit Interrupt Enable */
+#define TXIE_UNC 0x30
+#define MSGVAL_RES 0x40 /* Message Invalid */
+#define MSGVAL_SET 0x80 /* Message Valid */
+#define MSGVAL_UNC 0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES 0x01 /* No New Data */
+#define NEWDAT_SET 0x02 /* New Data */
+#define NEWDAT_UNC 0x03
+#define MSGLST_RES 0x04 /* No Message Lost */
+#define MSGLST_SET 0x08 /* Message Lost */
+#define MSGLST_UNC 0x0c
+#define CPUUPD_RES 0x04 /* No CPU Updating */
+#define CPUUPD_SET 0x08 /* CPU Updating */
+#define CPUUPD_UNC 0x0c
+#define TXRQST_RES 0x10 /* No Transmission Request */
+#define TXRQST_SET 0x20 /* Transmission Request */
+#define TXRQST_UNC 0x30
+#define RMTPND_RES 0x40 /* No Remote Request Pending */
+#define RMTPND_SET 0x80 /* Remote Request Pending */
+#define RMTPND_UNC 0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD 0x04 /* Extended Identifier */
+#define MSGCFG_DIR 0x08 /* Direction is Transmit */
+
+#define MSGOBJ_FIRST 1
+#define MSGOBJ_LAST 15
+
+#define CC770_IO_SIZE 0x100
+#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX 1
+
+#define cc770_read_reg(priv, member) \
+ priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value) \
+ priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX 0x01
+#define CC770_OBJ_FLAG_RTR 0x02
+#define CC770_OBJ_FLAG_EFF 0x04
+
+enum {
+ CC770_OBJ_RX0 = 0, /* for receiving normal messages */
+ CC770_OBJ_RX1, /* for receiving normal messages */
+ CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */
+ CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */
+ CC770_OBJ_TX, /* for sending messages */
+ CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+ struct can_priv can; /* must be the first member */
+ struct sk_buff *echo_skb;
+
+ /* the lower-layer is responsible for appropriate locking */
+ u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+ void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+ void (*pre_irq)(const struct cc770_priv *priv);
+ void (*post_irq)(const struct cc770_priv *priv);
+
+ void *priv; /* for board-specific data */
+ struct net_device *dev;
+
+ void __iomem *reg_base; /* ioremap'ed address to registers */
+ unsigned long irq_flags; /* for request_irq() */
+
+ unsigned char obj_flags[CC770_OBJ_MAX];
+ u8 control_normal_mode; /* Control register for normal mode */
+ u8 cpu_interface; /* CPU interface register */
+ u8 clkout; /* Clock out register */
+ u8 bus_config; /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644
index 00000000000..4be5fe2c40a
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ * clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ * cir: CPU interface register (default=0x40 [DSC])
+ * bcr: Bus configuration register (default=0x40 [CBY])
+ * cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT 16000000 /* 16 MHz */
+#define COR_DEFAULT 0x00
+#define BCR_DEFAULT BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+ "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE 0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+ int reg)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ void __iomem *base = NULL;
+ int iosize = CC770_IOSIZE;
+ int idx = pdev->id;
+ int err;
+ u32 clktmp;
+
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ if (mem[idx]) {
+ if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ base = ioremap_nocache(mem[idx], iosize);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ } else {
+ if (indirect[idx] > 0 ||
+ (indirect[idx] == -1 && indirect[0] > 0))
+ iosize = CC770_IOSIZE_INDIRECT;
+ if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap;
+ }
+ priv = netdev_priv(dev);
+
+ dev->irq = irq[idx];
+ priv->irq_flags = IRQF_SHARED;
+ if (mem[idx]) {
+ priv->reg_base = base;
+ dev->base_addr = mem[idx];
+ priv->read_reg = cc770_isa_mem_read_reg;
+ priv->write_reg = cc770_isa_mem_write_reg;
+ } else {
+ priv->reg_base = (void __iomem *)port[idx];
+ dev->base_addr = port[idx];
+
+ if (iosize == CC770_IOSIZE_INDIRECT) {
+ priv->read_reg = cc770_isa_port_read_reg_indirect;
+ priv->write_reg = cc770_isa_port_write_reg_indirect;
+ } else {
+ priv->read_reg = cc770_isa_port_read_reg;
+ priv->write_reg = cc770_isa_port_write_reg;
+ }
+ }
+
+ if (clk[idx])
+ clktmp = clk[idx];
+ else if (clk[0])
+ clktmp = clk[0];
+ else
+ clktmp = CLK_DEFAULT;
+ priv->can.clock.freq = clktmp;
+
+ if (cir[idx] != 0xff) {
+ priv->cpu_interface = cir[idx];
+ } else if (cir[0] != 0xff) {
+ priv->cpu_interface = cir[0];
+ } else {
+ /* The system clock may not exceed 10 MHz */
+ if (clktmp > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ clktmp /= 2;
+ }
+ /* The memory clock may not exceed 8 MHz */
+ if (clktmp > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+ }
+
+ if (priv->cpu_interface & CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+
+ if (bcr[idx] != 0xff)
+ priv->bus_config = bcr[idx];
+ else if (bcr[0] != 0xff)
+ priv->bus_config = bcr[0];
+ else
+ priv->bus_config = BCR_DEFAULT;
+
+ if (cor[idx] != 0xff)
+ priv->clkout = cor[idx];
+ else if (cor[0] != 0xff)
+ priv->clkout = cor[0];
+ else
+ priv->clkout = COR_DEFAULT;
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register device (err=%d)\n", err);
+ goto exit_unmap;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+ return 0;
+
+ exit_unmap:
+ if (mem[idx])
+ iounmap(base);
+ exit_release:
+ if (mem[idx])
+ release_mem_region(mem[idx], iosize);
+ else
+ release_region(port[idx], iosize);
+ exit:
+ return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
+
+ unregister_cc770dev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mem[idx]) {
+ iounmap(priv->reg_base);
+ release_mem_region(mem[idx], CC770_IOSIZE);
+ } else {
+ if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+ release_region(port[idx], CC770_IOSIZE_INDIRECT);
+ else
+ release_region(port[idx], CC770_IOSIZE);
+ }
+ free_cc770dev(dev);
+
+ return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+ .probe = cc770_isa_probe,
+ .remove = __devexit_p(cc770_isa_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cc770_isa_init(void)
+{
+ int idx, err;
+
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ cc770_isa_devs[idx] =
+ platform_device_alloc(KBUILD_MODNAME, idx);
+ if (!cc770_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(cc770_isa_devs[idx]);
+ if (err) {
+ platform_device_put(cc770_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("insufficient parameters supplied\n");
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&cc770_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+
+ return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+ int idx;
+
+ platform_driver_unregister(&cc770_isa_driver);
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644
index 00000000000..53115eee807
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ * static struct cc770_platform_data myboard_cc770_pdata = {
+ * .osc_freq = 16000000,
+ * .cir = 0x41,
+ * .cor = 0x20,
+ * .bcr = 0x40,
+ * };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ * can@3,100 {
+ * compatible = "bosch,cc770";
+ * reg = <3 0x100 0x80>;
+ * interrupts = <2 0>;
+ * interrupt-parent = <&mpic>;
+ * bosch,external-clock-frequency = <16000000>;
+ * };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK 16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+ u8 val)
+{
+ iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const u32 *prop;
+ int prop_size;
+ u32 clkext;
+
+ prop = of_get_property(np, "bosch,external-clock-frequency",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ clkext = *prop;
+ else
+ clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+ priv->can.clock.freq = clkext;
+
+ /* The system clock may not exceed 10 MHz */
+ if (priv->can.clock.freq > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ priv->can.clock.freq /= 2;
+ }
+
+ /* The memory clock may not exceed 8 MHz */
+ if (priv->can.clock.freq > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+
+ if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ priv->cpu_interface |= CPUIF_DMC;
+ if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ priv->cpu_interface |= CPUIF_MUX;
+
+ if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ priv->bus_config |= BUSCFG_CBY;
+ if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ priv->bus_config |= BUSCFG_DR0;
+ if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ priv->bus_config |= BUSCFG_DR1;
+ if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ priv->bus_config |= BUSCFG_DT1;
+ if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ priv->bus_config |= BUSCFG_POL;
+
+ prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+ u32 cdv = clkext / *prop;
+ int slew;
+
+ if (cdv > 0 && cdv < 16) {
+ priv->cpu_interface |= CPUIF_CEN;
+ priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+ prop = of_get_property(np, "bosch,slew-rate",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32))) {
+ slew = *prop;
+ } else {
+ /* Determine default slew rate */
+ slew = (CLKOUT_SL_MASK >>
+ CLKOUT_SL_SHIFT) -
+ ((cdv * clkext - 1) / 8000000);
+ if (slew < 0)
+ slew = 0;
+ }
+ priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+ CLKOUT_SL_MASK;
+ } else {
+ dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+
+ struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+ priv->can.clock.freq = pdata->osc_freq;
+ if (priv->cpu_interface | CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+ priv->clkout = pdata->cor;
+ priv->bus_config = pdata->bcr;
+ priv->cpu_interface = pdata->cir;
+
+ return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ struct resource *mem;
+ resource_size_t mem_size;
+ void __iomem *base;
+ int err, irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq <= 0)
+ return -ENODEV;
+
+ mem_size = resource_size(mem);
+ if (!request_mem_region(mem->start, mem_size, pdev->name))
+ return -EBUSY;
+
+ base = ioremap(mem->start, mem_size);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap_mem;
+ }
+
+ dev->irq = irq;
+ priv = netdev_priv(dev);
+ priv->read_reg = cc770_platform_read_reg;
+ priv->write_reg = cc770_platform_write_reg;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = base;
+
+ if (pdev->dev.of_node)
+ err = cc770_get_of_node_data(pdev, priv);
+ else if (pdev->dev.platform_data)
+ err = cc770_get_platform_data(pdev, priv);
+ else
+ err = -ENODEV;
+ if (err)
+ goto exit_free_cc770;
+
+ dev_dbg(&pdev->dev,
+ "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+ "bus_config=0x%02x clkout=0x%02x\n",
+ priv->reg_base, dev->irq, priv->can.clock.freq,
+ priv->cpu_interface, priv->bus_config, priv->clkout);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register CC700 device (err=%d)\n", err);
+ goto exit_free_cc770;
+ }
+
+ return 0;
+
+exit_free_cc770:
+ free_cc770dev(dev);
+exit_unmap_mem:
+ iounmap(base);
+exit_release_mem:
+ release_mem_region(mem->start, mem_size);
+
+ return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_cc770dev(dev);
+ iounmap(priv->reg_base);
+ free_cc770dev(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+ {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */
+ {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cc770_platform_table,
+ },
+ .probe = cc770_platform_probe,
+ .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 25695bde054..120f1ab5a2c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index e02337953f4..7fd8089946f 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -802,7 +802,7 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
err = open_candev(dev);
if (err)
@@ -824,7 +824,7 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -838,7 +838,7 @@ static int flexcan_close(struct net_device *dev)
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
close_candev(dev);
@@ -877,7 +877,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -911,7 +911,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
out:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = {
.remove = __devexit_p(flexcan_remove),
};
-static int __init flexcan_init(void)
-{
- pr_info("%s netdevice driver\n", DRV_NAME);
- return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
- platform_driver_unregister(&flexcan_driver);
- pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
"Marc Kleine-Budde <kernel@pengutronix.de>");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32778d56d33..08c893cb789 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
.remove = __devexit_p(ican3_remove),
};
-static int __init ican3_init(void)
-{
- return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
- platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5fedc337556..5caa572d71e 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
#endif
};
-static int __init mpc5xxx_can_init(void)
-{
- return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
- platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ec4a3119e2c..1c82dd8b896 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
priv->open_time = jiffies;
- clrbits8(&regs->canctl1, MSCAN_LISTEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setbits8(&regs->canctl1, MSCAN_LISTEN);
+ else
+ clrbits8(&regs->canctl1, MSCAN_LISTEN);
ret = mscan_start(dev);
if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index fe9e64d476e..36e9d594069 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -6,7 +6,6 @@ if CAN_SJA1000
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
---help---
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a4..2c7f5036f57 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 496223e9e2f..90c5c2dfd2f 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
static int __devinitdata irq[MAXDEV];
static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
module_param_array(mem, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
#define SJA1000_IOSIZE 0x20
#define SJA1000_IOSIZE_INDIRECT 0x02
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
{
return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
outb(val, base + 1);
}
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
- if (port[idx] || mem[idx]) {
- if (irq[idx])
- return 1;
- } else if (idx)
- return 0;
-
- dev_err(pdev, "insufficient parameters supplied\n");
- return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sja1000_priv *priv;
void __iomem *base = NULL;
int iosize = SJA1000_IOSIZE;
+ int idx = pdev->id;
int err;
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+
if (mem[idx]) {
if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
else
priv->can.clock.freq = CLK_DEFAULT / 2;
- if (ocr[idx] != -1)
- priv->ocr = ocr[idx] & 0xff;
- else if (ocr[0] != -1)
- priv->ocr = ocr[0] & 0xff;
+ if (ocr[idx] != 0xff)
+ priv->ocr = ocr[idx];
+ else if (ocr[0] != 0xff)
+ priv->ocr = ocr[0];
else
priv->ocr = OCR_DEFAULT;
- if (cdr[idx] != -1)
- priv->cdr = cdr[idx] & 0xff;
- else if (cdr[0] != -1)
- priv->cdr = cdr[0] & 0xff;
+ if (cdr[idx] != 0xff)
+ priv->cdr = cdr[idx];
+ else if (cdr[0] != 0xff)
+ priv->cdr = cdr[0];
else
priv->cdr = CDR_DEFAULT;
- dev_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, pdev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_sja1000dev(dev);
if (err) {
- dev_err(pdev, "registering %s failed (err=%d)\n",
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_unmap;
}
- dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+ dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
return err;
}
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
{
- struct net_device *dev = dev_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
unregister_sja1000dev(dev);
- dev_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
if (mem[idx]) {
iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
return 0;
}
-static struct isa_driver sja1000_isa_driver = {
- .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
.remove = __devexit_p(sja1000_isa_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
static int __init sja1000_isa_init(void)
{
- int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+ int idx, err;
+
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ sja1000_isa_devs[idx] =
+ platform_device_alloc(DRV_NAME, idx);
+ if (!sja1000_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(sja1000_isa_devs[idx]);
+ if (err) {
+ platform_device_put(sja1000_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("%s: insufficient parameters supplied\n",
+ DRV_NAME);
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&sja1000_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("Legacy %s driver for max. %d devices registered\n",
+ DRV_NAME, MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
- if (!err)
- printk(KERN_INFO
- "Legacy %s driver for max. %d devices registered\n",
- DRV_NAME, MAXDEV);
return err;
}
static void __exit sja1000_isa_exit(void)
{
- isa_unregister_driver(&sja1000_isa_driver);
+ int idx;
+
+ platform_driver_unregister(&sja1000_isa_driver);
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
}
module_init(sja1000_isa_init);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index c3dd9d09be5..f2683eb6a3d 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
.remove = __devexit_p(sja1000_ofp_remove),
};
-static int __init sja1000_ofp_init(void)
-{
- return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
- return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d9fadc489b3..4f50145f648 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
},
};
-static int __init sp_init(void)
-{
- return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a979b006f45..3f1ebcc2cb8 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 09a8b86cf1a..a7c77c744ee 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
.remove = __devexit_p(softing_pdev_remove),
};
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
- return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
- platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
+MODULE_ALIAS("platform:softing");
MODULE_DESCRIPTION("Softing DPRAM CAN driver");
MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2adc294f512..df809e3f130 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = {
.resume = ti_hecc_resume,
};
-static int __init ti_hecc_init_driver(void)
-{
- printk(KERN_INFO DRV_DESC "\n");
- return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
- printk(KERN_INFO DRV_DESC " unloaded\n");
- platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index a72c7bfb409..9697c14b8dc 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1115,28 +1115,4 @@ static struct usb_driver ems_usb_driver = {
.id_table = ems_usb_table,
};
-static int __init ems_usb_init(void)
-{
- int err;
-
- printk(KERN_INFO "CPC-USB kernel driver loaded\n");
-
- /* register this driver with the USB subsystem */
- err = usb_register(&ems_usb_driver);
-
- if (err) {
- err("usb_register failed. Error number %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ems_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&ems_usb_driver);
-}
-
-module_init(ems_usb_init);
-module_exit(ems_usb_exit);
+module_usb_driver(ems_usb_driver);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index eb8b0e60028..92774637aad 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1108,25 +1108,4 @@ static struct usb_driver esd_usb2_driver = {
.id_table = esd_usb2_table,
};
-static int __init esd_usb2_init(void)
-{
- int err;
-
- /* register this driver with the USB subsystem */
- err = usb_register(&esd_usb2_driver);
-
- if (err) {
- err("usb_register failed. Error number %d\n", err);
- return err;
- }
-
- return 0;
-}
-module_init(esd_usb2_init);
-
-static void __exit esd_usb2_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&esd_usb2_driver);
-}
-module_exit(esd_usb2_exit);
+module_usb_driver(esd_usb2_driver);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f93e2d6fc88..ea2d9428593 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
* See Documentation/networking/can.txt for details.
*/
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, S_IRUGO);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 00000000000..dd151d53d50
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+ depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+ tristate
+ default n
+
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+ bool
+ default n
+
+config NET_DSA_MV88E6131
+ tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_MV88E6XXX_NEED_PPU
+ select NET_DSA_TAG_DSA
+ ---help---
+ This enables support for the Marvell 88E6085/6095/6095F/6131
+ ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+ tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6123/6161/6165
+ ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 00000000000..f3bda05536c
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
diff --git a/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 8f4ff5a2c81..7fc4e81d4d4 100644
--- a/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -11,7 +11,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
#define REG_PORT(p) (8 + (p))
#define REG_GLOBAL 0x0f
@@ -286,3 +286,8 @@ static void __exit mv88e6060_cleanup(void)
unregister_switch_driver(&mv88e6060_switch_driver);
}
module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 52faaa21a4d..c0a458fc698 100644
--- a/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -11,7 +11,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
#include "mv88e6xxx.h"
static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
@@ -419,7 +419,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
}
-static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.tag_protocol = cpu_to_be16(ETH_P_EDSA),
.priv_size = sizeof(struct mv88e6xxx_priv_state),
.probe = mv88e6123_61_65_probe,
@@ -433,15 +433,6 @@ static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
.get_sset_count = mv88e6123_61_65_get_sset_count,
};
-static int __init mv88e6123_61_65_init(void)
-{
- register_switch_driver(&mv88e6123_61_65_switch_driver);
- return 0;
-}
-module_init(mv88e6123_61_65_init);
-
-static void __exit mv88e6123_61_65_cleanup(void)
-{
- unregister_switch_driver(&mv88e6123_61_65_switch_driver);
-}
-module_exit(mv88e6123_61_65_cleanup);
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
diff --git a/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 9bd1061fa4e..e0eb6824383 100644
--- a/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -11,7 +11,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
#include "mv88e6xxx.h"
/*
@@ -415,7 +415,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds)
return ARRAY_SIZE(mv88e6131_hw_stats);
}
-static struct dsa_switch_driver mv88e6131_switch_driver = {
+struct dsa_switch_driver mv88e6131_switch_driver = {
.tag_protocol = cpu_to_be16(ETH_P_DSA),
.priv_size = sizeof(struct mv88e6xxx_priv_state),
.probe = mv88e6131_probe,
@@ -429,15 +429,7 @@ static struct dsa_switch_driver mv88e6131_switch_driver = {
.get_sset_count = mv88e6131_get_sset_count,
};
-static int __init mv88e6131_init(void)
-{
- register_switch_driver(&mv88e6131_switch_driver);
- return 0;
-}
-module_init(mv88e6131_init);
-
-static void __exit mv88e6131_cleanup(void)
-{
- unregister_switch_driver(&mv88e6131_switch_driver);
-}
-module_exit(mv88e6131_cleanup);
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
diff --git a/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index efe661a9def..5467c040824 100644
--- a/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -11,7 +11,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
#include "mv88e6xxx.h"
/*
@@ -520,3 +520,30 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
mutex_unlock(&ps->stats_mutex);
}
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+ return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 61156ca26a0..fc2cd7b90e8 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -71,6 +71,9 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data);
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
#define REG_READ(addr, reg) \
({ \
int __ret; \
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index a7c5e8831e8..087648ea1ed 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
- dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 972f80ecc51..da410f03686 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b42c06baba8..8153a3e0a1a 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strcpy(info->bus_info, dev_name(vp->gendev));
+ strlcpy(info->bus_info, dev_name(vp->gendev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "EISA 0x%lx %d",
- dev->base_addr, dev->irq);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "EISA 0x%lx %d", dev->base_addr, dev->irq);
}
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 20ea07508ac..6d6bc754b1a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if(tp->card_state == Sleeping) {
- strcpy(info->fw_version, "Sleep image");
+ strlcpy(info->fw_version, "Sleep image",
+ sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strcpy(info->fw_version, "Unknown runtime");
+ strlcpy(info->fw_version, "Unknown runtime",
+ sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
- snprintf(info->fw_version, 32, "%02x.%03x.%03x",
- sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
- sleep_ver & 0xfff);
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%02x.%03x.%03x", sleep_ver >> 24,
+ (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
}
}
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4c78f..ef325ffa1b5 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -14,8 +14,6 @@
#define TX_PAGES 12 /* Two Tx slots */
-#define ETHER_ADDR_LEN 6
-
/* The 8390 specific per-packet-header format. */
struct e8390_pkt_hdr {
unsigned char status; /* status */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 547737340cb..3ad5d2f9a49 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e9f8432f55b..9e8ba4f5636 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -735,15 +735,14 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] =
ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
ax_reset_8390(dev);
@@ -991,18 +990,7 @@ static struct platform_driver axdrv = {
.resume = ax_resume,
};
-static int __init axdrv_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575ecff..6428f9e7a55 100644
--- a/drivers/net/ethernet/8390/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- for (i = 0; i < ETHER_ADDR_LEN ; i++)
+ for (i = 0; i < ETH_ALEN ; i++)
dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
/* Check the Racal vendor ID as well. */
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index eeac843dcd2..d42938b6b59 100644
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
/* Retrieve and checksum the station address. */
outw(MAC_Page, ioaddr + HP_PAGING);
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for(i = 0; i < ETH_ALEN; i++) {
unsigned char inval = inb(ioaddr + 8 + i);
dev->dev_addr[i] = inval;
checksum += inval;
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4a7c0..113f1e075a2 100644
--- a/drivers/net/ethernet/8390/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for(i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + i);
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 3dac937a67c..5370c884620 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
if (!dev)
return -ENOMEM;
- for(j = 0; j < ETHER_ADDR_LEN; j++)
+ for (j = 0; j < ETH_ALEN; j++)
dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
/* We must set the 8390 for word mode. */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d20177..69490ae018e 100644
--- a/drivers/net/ethernet/8390/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
|| inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
|| inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
printk("lne390.c: card not found");
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
printk(" (invalid prefix).\n");
return -ENODEV;
}
#endif
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
0xa+revision, ioaddr/0x1000, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index cd36a6a5f40..9b9c77d5a65 100644
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093b3af..f92ea2a65a5 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
#ifdef CONFIG_PLAT_MAPPI
outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ioaddr + E8390_CMD); /* 0x61 */
- for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i]
= inb_p(ioaddr + EN1_PHYS_SHIFT(i));
}
#else
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i];
}
#endif
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc699634..922b32036c6 100644
--- a/drivers/net/ethernet/8390/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
dev->base_addr = base_addr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 39923425ba2..3fab04a0034 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2aee88..2a3e8057fea 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
#endif
port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
edev->slot, ifmap[port_index], dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a84bc7..3b903759980 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = stnic_eadr[i];
/* Set the base address to point to the NIC, not the "real" base! */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 3aa9fe9999b..bcd27323b20 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
if (i)
return i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 597f4d45c63..3474a61d470 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde04026..08d5f038887 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,10 +10,11 @@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_ATMEL) += cadence/
+obj-$(CONFIG_NET_CADENCE) += cadence/
obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 6d9f6911000..cb4f38a17f2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
#endif /* VLAN_SUPPORT */
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 442fefa4f2c..c885aa905de 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
.remove = __devexit_p(greth_of_remove),
};
-static int __init greth_init(void)
-{
- return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
- platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a9745f4ddbf..33e0a8c20f6 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i );
/* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strcpy (info->driver, MODULE_NAME);
- strcpy (info->version, MODULE_VERS);
- sprintf(info->fw_version,"%u",chip_version);
- strcpy (info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], lp->mmio + PADR + i );
spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
}
/* Initializing MAC address */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = readb(lp->mmio + PADR + i);
/* Setting user defined parametrs */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a12dd..8baa3527ba7 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -586,7 +586,6 @@ typedef enum {
#define PKT_BUFF_SZ 1536
#define MIN_PKT_LEN 60
-#define ETH_ADDR_LEN 6
#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
@@ -808,8 +807,8 @@ typedef enum {
static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
static unsigned int chip_version;
#endif /* _AMD8111E_H */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 4865ff14beb..cc9262be69c 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1339,18 +1339,7 @@ static struct platform_driver au1000_eth_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
- return platform_driver_register(&au1000_eth_driver);
-}
-static void __exit au1000_exit_module(void)
-{
- platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 3accd5d21b0..6be0dd67631 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -160,8 +160,6 @@ Include Files
Defines
---------------------------------------------------------------------------- */
-#define ETHER_ADDR_LEN ETH_ALEN
- /* 6 bytes in an Ethernet Address */
#define MACE_LADRF_LEN 8
/* 8 bytes in Logical Address Filter */
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
}
}
/* Set PADR register */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
/* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
/* Read the ethernet address from the CIS. */
len = pcmcia_get_tuple(link, 0x80, &buf);
- if (!buf || len < ETHER_ADDR_LEN) {
+ if (!buf || len < ETH_ALEN) {
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
netdev_for_each_mc_addr(ha, dev) {
- memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+ memcpy(adr, ha->addr, ETH_ALEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f92bc6e3482..20e6dab0186 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 8fda457f94c..7ea16d32a5f 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
.remove = __devexit_p(sunlance_sbus_remove),
};
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
- return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
- platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d0aaf..0a9326aa58b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 02c7ed8d9ec..b8591246eb4 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
}
}
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1c_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438d365..6e61f9f9ebb 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl1e_driver_name, 32);
- strncpy(drvinfo->version, atl1e_driver_version, 32);
- strncpy(drvinfo->fw_version, "L1e", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1e_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1e_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 95483bcac1d..c915c087381 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
return 0;
}
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1e_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 33a4e35f5ee..9bd20497664 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1feae5928a4..071f4c85896 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl2_driver_name, 32);
- strncpy(drvinfo->version, atl2_driver_version, 32);
- strncpy(drvinfo->fw_version, "L2", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl2_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl2_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b5745..8ff7411094d 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atlx_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc12..3fb66d09ece 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a11a8ad9422..d44331eb07f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1469,7 +1469,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
return 0;
}
-static struct ethtool_ops bcm_enet_ethtool_ops = {
+static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 965c7235804..021fb818007 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -57,11 +57,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.1.11"
-#define DRV_MODULE_RELDATE "July 20, 2011"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION "2.2.1"
+#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
if (bp->autoneg & AUTONEG_SPEED) {
u32 adv_reg, adv1000_reg;
- u32 new_adv_reg = 0;
- u32 new_adv1000_reg = 0;
+ u32 new_adv = 0;
+ u32 new_adv1000 = 0;
bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- if (bp->advertising & ADVERTISED_10baseT_Half)
- new_adv_reg |= ADVERTISE_10HALF;
- if (bp->advertising & ADVERTISED_10baseT_Full)
- new_adv_reg |= ADVERTISE_10FULL;
- if (bp->advertising & ADVERTISED_100baseT_Half)
- new_adv_reg |= ADVERTISE_100HALF;
- if (bp->advertising & ADVERTISED_100baseT_Full)
- new_adv_reg |= ADVERTISE_100FULL;
- if (bp->advertising & ADVERTISED_1000baseT_Full)
- new_adv1000_reg |= ADVERTISE_1000FULL;
+ new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+ new_adv |= ADVERTISE_CSMA;
+ new_adv |= bnx2_phy_get_pause_adv(bp);
- new_adv_reg |= ADVERTISE_CSMA;
+ new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
- new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
- if ((adv1000_reg != new_adv1000_reg) ||
- (adv_reg != new_adv_reg) ||
+ if ((adv1000_reg != new_adv1000) ||
+ (adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
- bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
- bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+ bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
BMCR_ANENABLE);
}
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
- unsigned long align;
- skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
- if (skb == NULL) {
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
return -ENOMEM;
- }
- if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
- skb_reserve(skb, BNX2_RX_ALIGN - align);
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ mapping = dma_map_single(&bp->pdev->dev,
+ get_l2_fhdr(data),
+ bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- rx_buf->skb = skb;
- rx_buf->desc = (struct l2_fhdr *) skb->data;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_pkt = 0, index;
+ unsigned int tx_bytes = 0;
struct netdev_queue *txq;
index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
+ tx_bytes += skb->len;
dev_kfree_skb(skb);
tx_pkt++;
if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
+ netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
txr->hw_tx_cons = hw_cons;
txr->tx_cons = sw_cons;
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
- struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ u8 *data, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
rxr->rx_prod_bseq += bp->rx_buf_use_size;
- prod_rx_buf->skb = skb;
- prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+ prod_rx_buf->data = data;
if (cons == prod)
return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
+ struct sk_buff *skb;
- err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+ err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
- return err;
+ return NULL;
}
- skb_reserve(skb, BNX2_RX_OFFSET);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
-
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto error;
+ }
+ skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
if (hdr_len == 0) {
skb_put(skb, len);
- return 0;
+ return skb;
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
- return 0;
+ return skb;
}
rx_pg = &rxr->rx_pg_ring[pg_cons];
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
- return err;
+ return NULL;
}
dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
rxr->rx_pg_cons = pg_cons;
}
- return 0;
+ return skb;
}
static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u8 *data;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
- skb = rx_buf->skb;
- prefetchw(skb);
-
- next_rx_buf =
- &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
- prefetch(next_rx_buf->desc);
+ data = rx_buf->data;
+ rx_buf->data = NULL;
- rx_buf->skb = NULL;
+ rx_hdr = get_l2_fhdr(data);
+ prefetch(rx_hdr);
dma_addr = dma_unmap_addr(rx_buf, mapping);
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = rx_buf->desc;
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(get_l2_fhdr(next_rx_buf->data));
+
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
len -= 4;
if (len <= bp->rx_copy_thresh) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + 6);
- if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ skb = netdev_alloc_skb(bp->dev, len + 6);
+ if (skb == NULL) {
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb,
- BNX2_RX_OFFSET - 6,
- new_skb->data, len + 6);
- skb_reserve(new_skb, 6);
- skb_put(new_skb, len);
+ memcpy(skb->data,
+ (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+ len + 6);
+ skb_reserve(skb, 6);
+ skb_put(skb, len);
- bnx2_reuse_rx_skb(bp, rxr, skb,
+ bnx2_reuse_rx_data(bp, rxr, data,
sw_ring_cons, sw_ring_prod);
- skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
- dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
- goto next_rx;
-
+ } else {
+ skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+ (sw_ring_cons << 16) | sw_ring_prod);
+ if (!skb)
+ goto next_rx;
+ }
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
- sizeof(struct skb_shared_info);
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
}
bp->rx_buf_use_size = rx_size;
- /* hw alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ /* hw alignment + build_skb() overhead*/
+ bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
}
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring_idx; j++) {
struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
- dev_kfree_skb(skb);
+ kfree(data);
}
for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
- struct sk_buff *skb, *rx_skb;
+ struct sk_buff *skb;
+ u8 *data;
unsigned char *packet;
u16 rx_start_idx, rx_idx;
dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
- rx_skb = rx_buf->skb;
+ data = rx_buf->data;
- rx_hdr = rx_buf->desc;
- skb_reserve(rx_skb, BNX2_RX_OFFSET);
+ rx_hdr = get_l2_fhdr(data);
+ data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
(L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
for (i = 14; i < pkt_size; i++) {
- if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+ if (*(data + i) != (unsigned char) (i & 0xff)) {
goto loopback_test_done;
}
}
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+ netdev_tx_sent_queue(txq, skb->len);
+
prod = NEXT_TX_BD(prod);
txr->tx_prod_bseq += skb->len;
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->bus_info, pci_name(bp->pdev));
- strcpy(info->fw_version, bp->fw_version);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
return 0;
}
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
}
static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 99d31a7d6aa..1db2d51ba3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_bd {
- struct sk_buff *skb;
- struct l2_fhdr *desc;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+ return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
struct sw_pg {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index aec7212ac98..8c73d34b2ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.30-0"
-#define DRV_MODULE_RELDATE "2011/10/25"
+#define DRV_MODULE_VERSION "1.70.35-0"
+#define DRV_MODULE_RELDATE "2011/11/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_rx_bd {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -411,8 +416,7 @@ union db_prod {
/* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
- BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
@@ -425,8 +429,8 @@ union host_hc_status_block {
struct bnx2x_agg_info {
/*
- * First aggregation buffer is an skb, the following - are pages.
- * We will preallocate the skbs for each aggregation when
+ * First aggregation buffer is a data buffer, the following - are pages.
+ * We will preallocate the data buffer for each aggregation when
* we open the interface and will replace the BD at the consumer
* with this one when we receive the TPA_START CQE in order to
* keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
u16 parsing_flags;
u16 vlan_tag;
u16 len_on_bd;
+ u32 rxhash;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
__le16 fp_hc_idx;
u8 index; /* number in fp array */
+ u8 rx_queue; /* index for skb_record */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
#define CHIP_PORT_MODE_NONE 0x2
#define CHIP_MODE(bp) (bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+ u32 boot_mode;
};
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+
+ union drv_info_to_mcp drv_info_to_mcp;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
enum {
BNX2X_PORT_QUERY_IDX,
BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
BNX2X_FIRST_QUEUE_QUERY_IDX,
};
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+ struct stats_query_entry query[FP_SB_MAX_E1x+
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
};
struct bnx2x_fw_stats_data {
struct stats_counter storm_counters;
struct per_port_stats port;
struct per_pf_stats pf;
+ struct fcoe_statistics_params fcoe;
struct per_queue_stats queue_stats[1];
};
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
+ BNX2X_SP_RTNL_FAN_FAILURE,
};
@@ -1186,10 +1200,20 @@ struct bnx2x {
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
- L1_CACHE_SHIFT : 8)
- /* FW use 2 Cache lines Alignment for start packet and size */
-#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
+
+ /* FW uses 2 Cache lines Alignment for start packet and size
+ *
+ * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END \
+ max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
#define NO_ISCSI_OOO_FLAG (1 << 13)
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
+#define BC_SUPPORTS_PFC_STATS (1 << 17)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
-#define RSS_FLAGS(bp) \
- (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
- (bp->multi_mode << \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+int bnx2x_close(struct net_device *dev);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 580b44edb06..2b731b25359 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
* @to: destination FP index
*
* Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
- struct napi_struct orig_napi = to_fp->napi;
+
+ /* Copy the NAPI object as it has been already initialized */
+ from_fp->napi = to_fp->napi;
+
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
-
- /* Restore the NAPI object as it has been already initialized */
- to_fp->napi = orig_napi;
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
* return idx of last bd freed
*/
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
- u16 idx)
+ u16 idx, unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
+ if (skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
struct netdev_queue *txq;
u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
" pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
- bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+ &pkts_compl, &bytes_compl);
+
sw_cons++;
}
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
txdata->tx_pkt_cons = sw_cons;
txdata->tx_bd_cons = bd_cons;
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+ const struct eth_fast_path_rx_cqe *cqe)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ return le32_to_cpu(cqe->rss_hash_result);
+ return 0;
+}
+
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod,
+ u16 cons, u16 prod,
struct eth_fast_path_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (tpa_info->tpa_state != BNX2X_TPA_STOP)
BNX2X_ERR("start of bin not in stop [%d]\n", queue);
- /* Try to map an empty skb from the aggregation info */
+ /* Try to map an empty data buffer from the aggregation info */
mapping = dma_map_single(&bp->pdev->dev,
- first_buf->skb->data,
+ first_buf->data + NET_SKB_PAD,
fp->rx_buf_size, DMA_FROM_DEVICE);
/*
* ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
/* Move the BD from the consumer to the producer */
- bnx2x_reuse_rx_skb(fp, cons, prod);
+ bnx2x_reuse_rx_data(fp, cons, prod);
tpa_info->tpa_state = BNX2X_TPA_ERROR;
return;
}
- /* move empty skb from pool to prod */
- prod_rx_buf->skb = first_buf->skb;
+ /* move empty data from pool to prod */
+ prod_rx_buf->data = first_buf->data;
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
- /* point prod_bd to new skb */
+ /* point prod_bd to new data */
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u8 pad = tpa_info->placement_offset;
+ u32 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = NULL;
+ u8 *data = rx_buf->data;
/* alloc new skb */
- struct sk_buff *new_skb;
+ u8 *new_data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (old_tpa_state == BNX2X_TPA_ERROR)
goto drop;
- /* Try to allocate the new skb */
- new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ /* Try to allocate the new data */
+ new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
+ if (likely(new_data))
+ skb = build_skb(data);
- if (likely(new_skb)) {
- prefetch(skb);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
#endif
- skb_reserve(skb, pad);
+ skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
+ skb->rxhash = tpa_info->rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
- /* put new skb in bin */
- rx_buf->skb = new_skb;
+ /* put new data in bin */
+ rx_buf->data = new_data;
return;
}
@@ -537,19 +565,6 @@ drop:
fp->eth_q_stats.rx_skb_alloc_failed++;
}
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
- struct sk_buff *skb)
-{
- /* Set Toeplitz hash from CQE */
- if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->fast_path_cqe.status_flags &
- ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
- skb->rxhash =
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
+ u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
- /* Prefetch the page containing the BD descriptor
- at producer's index. It will be needed when new skb is
- allocated */
- prefetch((void *)(PAGE_ALIGN((unsigned long)
- (&fp->rx_desc_ring[bd_prod])) -
- PAGE_SIZE + 1));
-
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp = &cqe->fast_path_cqe;
cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
+ }
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ data = rx_buf->data;
- /* this is an rx packet */
- } else {
- rx_buf = &fp->rx_buf_ring[bd_cons];
- skb = rx_buf->skb;
- prefetch(skb);
-
- if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
#ifdef BNX2X_STOP_ON_ERROR
- /* sanity check */
- if (fp->disable_tpa &&
- (CQE_TYPE_START(cqe_fp_type) ||
- CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
- CQE_TYPE(cqe_fp_type));
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
#endif
- if (CQE_TYPE_START(cqe_fp_type)) {
- u16 queue = cqe_fp->queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_start on queue %d\n",
- queue);
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
- bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod,
- cqe_fp);
-
- /* Set Toeplitz hash for LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- goto next_rx;
-
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
-
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ bnx2x_tpa_start(fp, queue,
+ bd_cons, bd_prod,
+ cqe_fp);
+ goto next_rx;
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
}
- /* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
- pad = cqe_fp->placement_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev,
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ pad += NET_SKB_PAD;
+ prefetch(data + pad); /* speedup eth_type_trans() */
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
- /* is this an error packet? */
- if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ if (skb == NULL) {
DP(NETIF_MSG_RX_ERR,
- "ERROR flags %x rx packet %u\n",
- cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ "ERROR packet dropped because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
-
- /* Since we don't have a jumbo ring
- * copy small packets if mtu > 1500
- */
- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
- (len <= RX_COPY_THRESH)) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + pad);
- if (new_skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped "
- "because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- goto reuse_rx;
- }
-
- /* aligned copy */
- skb_copy_from_linear_data_offset(skb, pad,
- new_skb->data + pad, len);
- skb_reserve(new_skb, pad);
- skb_put(new_skb, len);
-
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
- skb = new_skb;
-
- } else
- if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ memcpy(skb->data, data + pad, len);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ } else {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
+ skb = build_skb(data);
+ if (unlikely(!skb)) {
+ kfree(data);
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto next_rx;
+ }
skb_reserve(skb, pad);
- skb_put(skb, len);
-
} else {
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
}
+ }
- skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, bp->dev);
- /* Set Toeplitz hash for a none-LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
+ /* Set Toeplitz hash for a none-LRO skb */
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
- skb_checksum_none_assert(skb);
+ skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
}
- skb_record_rx_queue(skb, fp->index);
+ skb_record_rx_queue(skb, fp->rx_queue);
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@ reuse_rx:
next_rx:
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
bd_cons = NEXT_RX_IDX(bd_cons);
bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->skb = netdev_alloc_skb(bp->dev,
- fp->rx_buf_size);
- if (!first_buf->skb) {
+ first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+ GFP_ATOMIC);
+ if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
"disabling TPA on this "
@@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 bd_cons = txdata->tx_bd_cons;
u16 sw_prod = txdata->tx_pkt_prod;
u16 sw_cons = txdata->tx_pkt_cons;
while (sw_cons != sw_prod) {
- bd_cons = bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(sw_cons));
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev, txdata->txq_index));
}
}
}
@@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
for (i = 0; i < NUM_RX_BD; i++) {
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
+ rx_buf->data = NULL;
+ kfree(data);
}
}
@@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
break;
}
+#ifdef BCM_CNIC
+ /* override in ISCSI SD mod */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->num_queues = 1;
+#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
}
@@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ u32 mtu;
/* Always use a mini-jumbo MTU for the FCoE L2 ring */
if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
* IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
* overrun attack.
*/
- fp->rx_buf_size =
- BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
else
- fp->rx_buf_size =
- bp->dev->mtu + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = bp->dev->mtu;
+ fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+ IP_HEADER_ALIGNMENT_PADDING +
+ ETH_OVREHEAD +
+ mtu +
+ BNX2X_FW_RX_ALIGN_END;
+ /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
}
}
@@ -1541,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
for (i = 0; i < sizeof(ind_table); i++)
ind_table[i] =
- bp->fp->cl_id + (i % num_eth_queues);
+ bp->fp->cl_id +
+ ethtool_rxfh_indir_default(i, num_eth_queues);
}
/*
@@ -1929,13 +1934,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
}
- if (!bp->port.pmf)
+ if (bp->port.pmf)
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+ else
bnx2x__link_status_update(bp);
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
#ifdef BCM_CNIC
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2808,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
"dropping packet...\n");
@@ -2810,7 +2820,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
first_bd->nbd = cpu_to_le16(nbd);
bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(txdata->tx_pkt_prod));
+ TX_BD(txdata->tx_pkt_prod),
+ &pkts_compl, &bytes_compl);
return NETDEV_TX_OK;
}
@@ -2871,6 +2882,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+ netdev_tx_sent_queue(txq, skb->len);
+
txdata->tx_pkt_prod++;
/*
* Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2994,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
return -EINVAL;
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ return -EINVAL;
+#endif
+
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
if (rc)
@@ -3098,7 +3116,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
- /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp)) {
+ rx_ring_size = MIN_RX_SIZE_NONTPA;
+ bp->rx_ring_size = rx_ring_size;
+ } else
+#endif
if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3131,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
MIN_RX_SIZE_TPA, rx_ring_size);
bp->rx_ring_size = rx_ring_size;
- } else
+ } else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
/* Common */
@@ -3278,14 +3301,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
- tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
@@ -3409,7 +3432,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return bnx2x_reload_if_running(dev);
}
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -3420,7 +3444,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
return features;
}
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 283d663da18..bf27c54ff2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
* bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
return 0;
}
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
- if (unlikely(skb == NULL))
+ data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ if (unlikely(data == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
+ kfree(data);
return -ENOMEM;
}
- rx_buf->skb = skb;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
return 0;
}
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
* we are just moving one from cons to prod
* we are not creating a new mapping,
* so there is no need to check for dma_mapping_error().
*/
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
u16 cons, u16 prod)
{
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
- prod_rx_buf->skb = cons_rx_buf->skb;
+ prod_rx_buf->data = cons_rx_buf->data;
*prod_bd = *cons_bd;
}
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
for (i = 0; i < last; i++) {
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
struct sw_rx_bd *first_buf = &tpa_info->first_buf;
- struct sk_buff *skb = first_buf->skb;
+ u8 *data = first_buf->data;
- if (skb == NULL) {
+ if (data == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- first_buf->skb = NULL;
+ kfree(data);
+ first_buf->data = NULL;
}
}
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
fp->eth_q_stats.rx_skb_alloc_failed++;
continue;
}
@@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
unsigned long q_type = 0;
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
return max_cfg;
}
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp: driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp: driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp: driver handle
+ * @flags: flags to update
+ * @set: set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+ if (is_valid_ether_addr(addr))
+ return true;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ return true;
+#endif
+ return false;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 51bd7485ab1..5051cf3deb2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
- if (SHMEM2_HAS(bp, drv_flags)) {
- u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- drv_flags = SHMEM2_RD(bp, drv_flags);
-
- if (set)
- SET_FLAGS(drv_flags, flags);
- else
- RESET_FLAGS(drv_flags, flags);
-
- SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- }
-}
-
static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
{
u8 prio, cos;
@@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
/* mark DCBX result for PMF migration */
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
#ifdef BCM_DCBNL
- /**
+ /*
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
- bnx2x_dcbx_stop_hw_tx(bp);
-
- /* reconfigure the netdevice with the results of the new
+ /*
+ * reconfigure the netdevice with the results of the new
* dcbx negotiation.
*/
bnx2x_dcbx_update_tc_mapping(bp);
+ /*
+ * allow other funtions to update their netdevices
+ * accordingly
+ */
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ bnx2x_dcbx_stop_hw_tx(bp);
+
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_ets_params(bp);
bnx2x_dcbx_resume_hw_tx(bp);
+
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
/*For IEEE admin_recommendation_bw_precentage
*For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
if (dp->admin_priority_app_table[i].valid) {
struct bnx2x_admin_priority_app_table *table =
dp->admin_priority_app_table;
@@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
/* if we need to syncronize DCBX result from prev PMF
- * read it from shmem and update bp accordingly
+ * read it from shmem and update bp and netdev accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
bp->dcbx_error);
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
}
}
@@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
int i, ff;
/* iterate over the app entries looking for idtype and idval */
- for (i = 0, ff = -1; i < 4; i++) {
+ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
struct bnx2x_admin_priority_app_table *app_ent =
&bp->dcbx_config_params.admin_priority_app_table[i];
if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
if (ff < 0 && !app_ent->valid)
ff = i;
}
- if (i < 4)
+ if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
/* if found overwrite up */
bp->dcbx_config_params.
admin_priority_app_table[i].priority = up;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bca6f2..2ab9254e2d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
u32 app_id;
};
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
- struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ struct bnx2x_admin_priority_app_table
+ admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
u32 admin_default_priority;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f0ca8b27a55..a688b9d975a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -107,6 +107,10 @@ static const struct {
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(pfc_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_received" },
+ { STATS_OFFSET32(pfc_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successuly */
+ /* Save new config in case command complete successully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -761,8 +765,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 phy_fw_ver[PHY_FW_VER_LEN];
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
@@ -773,14 +777,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- strncpy(info->fw_version, bp->fw_ver, 32);
+ strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
- strcpy(info->bus_info, pci_name(bp->pdev));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1744,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
+ u8 *data;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
@@ -1748,8 +1754,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
- bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
- LOOPBACK_XMAC : LOOPBACK_BMAC;
+ if (CHIP_IS_E3(bp)) {
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (bp->port.supported[cfg_idx] &
+ (SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ bp->link_params.loopback_mode = LOOPBACK_XMAC;
+ else
+ bp->link_params.loopback_mode = LOOPBACK_UMAC;
+ } else
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@@ -1784,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ netdev_tx_sent_queue(txq, skb->len);
+
pkt_prod = txdata->tx_pkt_prod++;
tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1883,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp_rx->rx_buf_size, DMA_FROM_DEVICE);
- skb = rx_buf->skb;
- skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
for (i = ETH_HLEN; i < pkt_size; i++)
- if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ if (*(data + i) != (unsigned char) (i & 0xff))
goto test_loopback_rx_exit;
rc = 0;
@@ -2285,18 +2302,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
- struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+ 0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
- size_t copy_size =
- min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -2309,33 +2328,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
* align the returned table to the Client ID of the leading RSS
* queue.
*/
- for (i = 0; i < copy_size; i++)
- indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
- indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+ indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
- const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
- u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
- /* validate the size */
- if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
- return -EINVAL;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
- /* validate the indices */
- if (indir->ring_index[i] >= num_eth_queues)
- return -EINVAL;
/*
* The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2350,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ ind_table[i] = indir[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2383,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index fc754cb6cc0..3e30c8642c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
u32 extended_dev_info_shared_addr;
u32 ncsi_oem_data_addr;
- u32 ocsd_host_addr;
- u32 ocbb_host_addr;
- u32 ocsd_req_update_interval;
+ u32 ocsd_host_addr; /* initialized by option ROM */
+ u32 ocbb_host_addr; /* initialized by option ROM */
+ u32 ocsd_req_update_interval; /* initialized by option ROM */
+ u32 temperature_in_half_celsius;
+ u32 glob_struct_in_host;
+
+ u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ u32 extended_dev_info_shared_cfg_size;
+
+ u32 dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ u32 multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ u32 drv_info_host_addr_lo;
+ u32 drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT 0
+#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
};
@@ -2501,14 +2536,18 @@ struct mac_stx {
#define MAC_STX_IDX_MAX 2
struct host_port_stats {
- u32 host_port_stats_start;
+ u32 host_port_stats_counter;
struct mac_stx mac_stx[MAC_STX_IDX_MAX];
u32 brb_drop_hi;
u32 brb_drop_lo;
- u32 host_port_stats_end;
+ u32 not_used; /* obsolete */
+ u32 pfc_frames_tx_hi;
+ u32 pfc_frames_tx_lo;
+ u32 pfc_frames_rx_hi;
+ u32 pfc_frames_rx_lo;
};
@@ -2548,6 +2587,118 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
/*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
*/
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
struct cfc_del_event_data {
u32 cid;
u32 reserved0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9..4df9505b67b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,7 +27,6 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
-
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -163,6 +162,11 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
/* BRB thresholds for E2*/
#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
@@ -177,6 +181,12 @@
#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
+
/* BRB thresholds for E3A0 */
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
@@ -190,6 +200,11 @@
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
/* BRB thresholds for E3B0 2 port mode*/
#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
@@ -239,18 +254,29 @@
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
/* only for E3B0*/
#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
+#define PFC_E3B0_4P_LB_GUART 120
#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
+#define DEFAULT_E3B0_LB_GUART 40
+
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
+/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
u32 min_w_val = 0;
/* Calculate min_w_val.*/
if (vars->link_up) {
- if (SPEED_20000 == vars->line_speed)
+ if (vars->line_speed == SPEED_20000)
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
* port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
* In 2 port mode port0 has COS0-5 that can be used for WFQ.
* In 4 port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_weight = PBF_REG_COS0_WEIGHT_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
******************************************************************************/
static int bnx2x_ets_e3b0_get_total_bw(
const struct link_params *params,
- const struct bnx2x_ets_params *ets_params,
+ struct bnx2x_ets_params *ets_params,
u16 *total_bw)
{
struct bnx2x *bp = params->bp;
u8 cos_idx = 0;
+ u8 is_bw_cos_exist = 0;
*total_bw = 0 ;
+
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
- if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+ if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ /*
+ * This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
}
/* Check total BW is valid */
- if ((100 != *total_bw) || (0 == *total_bw)) {
- if (0 == *total_bw) {
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
return -EINVAL;
}
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW should be 100\n");
- /**
- * We can handle a case whre the BW isn't 100 this can happen
- * if the TC are joined.
- */
+ "bnx2x_ets_E3B0_config total BW should be 100\n");
+ /*
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
}
return 0;
}
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
- if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
"the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri > max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter Illegal strict priority\n");
+ "parameter Illegal strict priority\n");
return -EINVAL;
}
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
/* Set all the strict priority first */
for (i = 0; i < max_num_of_cos; i++) {
- if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
- if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
sp_pri_to_cos[i], pri_set);
pri_bitmask = 1 << sp_pri_to_cos[i];
/* COS is used remove it from bitmap.*/
- if (0 == (pri_bitmask & cos_bit_to_set)) {
+ if (!(pri_bitmask & cos_bit_to_set)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
******************************************************************************/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params)
+ struct bnx2x_ets_params *ets_params)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
/* Prepare BW parameters*/
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
- /**
- * Upper bound is set according to current link speed (min_w_val
- * should be the same for upper bound and COS credit val).
+ /*
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
*/
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
cos_bw_bitmap |= (1 << cos_entry);
- /**
+ /*
* The function also sets the BW in HW(not the mappin
* yet)
*/
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
"bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
sp_pri_to_cos);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
cos_sp_bitmap,
cos_bw_bitmap);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
return bnx2x_status;
}
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- if ((0 == total_bw) ||
- (0 == cos0_bw) ||
- (0 == cos1_bw)) {
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
* dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
*/
- val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/******************************************************************/
/* PFC section */
/******************************************************************/
-
static void bnx2x_update_pfc_xmac(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
if (!vars->link_up)
return;
- if (MAC_TYPE_EMAC == vars->mac_type) {
+ if (vars->mac_type == MAC_TYPE_EMAC) {
DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
udelay(40);
}
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
}
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
- u32 port4mode_ovwr_val;
- /* Check 4-port override enabled */
- port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
- if (port4mode_ovwr_val & (1<<0)) {
- /* Return 4-port mode override value */
- return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
- }
- /* Return 4-port mode from input pin */
- return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
/* Define the XMAC mode */
static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp);
- /**
- * In 4-port mode, need to set the mode only once, so if XMAC is
- * already out of reset, it means the mode has already been set,
- * and it must not* reset the XMAC again, since it controls both
- * ports of the path
- **/
+ /*
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
return 0;
}
+
static int bnx2x_emac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-
/* PFC BRB internal port configuration params */
struct bnx2x_pfc_brb_threshold_val {
u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
};
struct bnx2x_pfc_brb_e3b0_val {
+ u32 per_class_guaranty_mode;
+ u32 lb_guarantied_hyst;
u32 full_lb_xoff_th;
u32 full_lb_xon_threshold;
u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
struct bnx2x_pfc_brb_th_val {
struct bnx2x_pfc_brb_threshold_val pauseable_th;
struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val default_class0;
+ struct bnx2x_pfc_brb_threshold_val default_class1;
+
};
static int bnx2x_pfc_brb_get_config_params(
struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+ config_val->default_class1.pause_xoff = 0;
+ config_val->default_class1.pause_xon = 0;
+ config_val->default_class1.full_xoff = 0;
+ config_val->default_class1.full_xon = 0;
+
if (CHIP_IS_E2(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+ /* pause able*/
config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3A0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+ /* pause able */
config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3B0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
if (params->phy[INT_PHY].flags &
FLAGS_4_PORT_MODE) {
config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
} else
return -EINVAL;
return 0;
}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- u32 cos0_pauseable,
- u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+ const u8 pfc_enabled)
{
- if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ if (pfc_enabled && pfc_params) {
+ e3b0_val->per_class_guaranty_mode = 1;
+ e3b0_val->lb_guarantied_hyst = 80;
+
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (pfc_params->cos0_pauseable !=
+ pfc_params->cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (pfc_params->cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+ } else {
+ e3b0_val->per_class_guaranty_mode = 0;
+ e3b0_val->lb_guarantied_hyst = 0;
e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
+ DEFAULT_E3B0_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (cos0_pauseable != cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
}
}
static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
struct bnx2x *bp = params->bp;
struct bnx2x_pfc_brb_th_val config_val = { {0} };
struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
+ &config_val.pauseable_th;
struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- int set_pfc = params->feature_config_flags &
+ const int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
+ const u8 pfc_enabled = (set_pfc && pfc_params);
int bnx2x_status = 0;
u8 port = params->port;
/* default - pause configuration */
reg_th_config = &config_val.pauseable_th;
bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
- if (set_pfc && pfc_params)
+ if (pfc_enabled) {
/* First COS */
- if (!pfc_params->cos0_pauseable)
+ if (pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class0;
/*
* The number of free blocks below which the pause signal to class 0
* of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
- if (set_pfc && pfc_params) {
+ if (pfc_enabled) {
/* Second COS */
if (pfc_params->cos1_pauseable)
reg_th_config = &config_val.pauseable_th;
else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class1;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+ if (CHIP_IS_E3B0(bp)) {
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params,
+ pfc_enabled);
+
+ REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+ e3b0_val.per_class_guaranty_mode);
+
/*
- * The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- **/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
+ * The hysteresis on the guarantied buffer space for the Lb
+ * port before signaling XON.
+ */
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+ e3b0_val.lb_guarantied_hyst);
+
/*
- * The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
/*
- * The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
/*
- * The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of blocks guarantied for the MAC #n port. n=0,1
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
+ /* The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
- if (CHIP_IS_E3B0(bp)) {
- /*Should be done by init tool */
- /*
- * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
- * reset value
- * 944
- */
-
- /**
- * The hysteresis on the guarantied buffer space for the Lb port
- * before signaling XON.
- **/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params->cos0_pauseable,
- pfc_params->cos1_pauseable);
- /**
- * The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /**
- * The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /**
- * The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /*The number of blocks guarantied for the LB port.*/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /**
- * The number of blocks guarantied for the MAC #n port.
- */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /**
- * The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /**
- * The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
-
- }
+ /*
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ /*
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
}
return bnx2x_status;
@@ -2515,7 +2619,7 @@ int bnx2x_update_pfc(struct link_params *params,
/* update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
if (!vars->link_up)
@@ -2533,7 +2637,6 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_emac_enable(params, vars, 0);
return bnx2x_status;
}
-
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
else
@@ -3053,7 +3156,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
-
} else {
/* data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3192,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
-
-
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3339,7 +3439,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
@@ -3942,13 +4042,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
struct link_params *params,
- u8 fiber_mode)
+ u8 fiber_mode,
+ u8 always_autoneg)
{
struct bnx2x *bp = params->bp;
u16 val16, digctrl_kx1, digctrl_kx2;
- u8 lane;
-
- lane = bnx2x_get_warpcore_lane(phy, params);
/* Clear XFI clock comp in non-10G single lane mode. */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4054,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
- if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4065,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- val16 &= 0xcfbf;
+ val16 &= 0xcebf;
switch (phy->req_line_speed) {
case SPEED_10:
break;
@@ -4043,9 +4141,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
-
-
- /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
struct link_params *params,
u16 lane)
@@ -4250,7 +4346,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
vars->phy_flags |= PHY_SGMII_FLAG;
DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
bnx2x_warpcore_clear_regs(phy, params, lane);
- bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4374,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
}
bnx2x_warpcore_set_sgmii_speed(phy,
params,
- fiber_mode);
+ fiber_mode,
+ 0);
}
break;
@@ -4291,7 +4388,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
bnx2x_warpcore_set_10G_XFI(phy, params, 0);
} else if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ bnx2x_warpcore_set_sgmii_speed(
+ phy, params, 1, 0);
}
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4526,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
/* Switch back to 4-copy registers */
bnx2x_set_aer_mmd(params, phy);
- /* Global loopback, not recommended. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
} else {
/* 10G & 20G */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4542,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
}
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
- u8 port = params->port;
- u32 sync_offset, media_types;
- /* Update PHY configuration */
- set_phy_vars(params, vars);
-
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
- vars->phy_flags = PHY_XGXS_FLAG;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4563,7 +4644,23 @@ void bnx2x_link_status_update(struct link_params *params,
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_sync_link(params, vars);
/* Sync media type */
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
@@ -4602,7 +4699,6 @@ void bnx2x_link_status_update(struct link_params *params,
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
-
static void bnx2x_set_master_ln(struct link_params *params,
struct bnx2x_phy *phy)
{
@@ -4676,11 +4772,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
* Each two bits represents a lane number:
* No swap is 0123 => 0x1b no need to enable the swap
*/
- u16 ser_lane, rx_lane_swap, tx_lane_swap;
+ u16 rx_lane_swap, tx_lane_swap;
- ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5449,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5495,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
-
u8 lane;
u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
int rc = 0;
@@ -6678,7 +6768,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
return rc;
}
-
/*****************************************************************************/
/* External Phy section */
/*****************************************************************************/
@@ -8103,7 +8192,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
+ struct bnx2x *bp = params->bp;
bnx2x_warpcore_power_module(params, phy, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
}
static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9137,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"8727 Power fault has been detected on port %d\n",
oc_port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has"
- " been detected and the power to "
- "that SFP+ module has been removed"
- " to prevent failure of the card."
- " Please remove the SFP+ module and"
- " restart the system to clear this"
- " error.\n",
+ netdev_err(bp->dev, "Error: Power fault on Port %d has "
+ "been detected and the power to "
+ "that SFP+ module has been removed "
+ "to prevent failure of the card. "
+ "Please remove the SFP+ module and "
+ "restart the system to clear this "
+ "error.\n",
oc_port);
/* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
@@ -9228,7 +9325,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, offset;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9360,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_REG_8481_LED3_BLINK,
0);
- bnx2x_cl45_read(bp, phy,
+ /* Configure the blink rate to ~15.9 Hz */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
- val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+ MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, offset, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_DEVAD, offset, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -9283,7 +9388,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
u16 tmp_req_line_speed;
tmp_req_line_speed = phy->req_line_speed;
@@ -9378,6 +9483,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1<<15 | 1<<9 | 7<<0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1<<8) | (1<<7);
DP(NETIF_MSG_LINK, "Setting 100M force\n");
}
if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,9 +9522,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 10G\n");
/* Restart autoneg for 10G*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ &an_10g_val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ an_10g_val | 0x1000);
bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
- 0x3200);
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
} else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
@@ -9449,74 +9564,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ u16 fw_cmd,
+ u16 cmd_args[])
{
u32 idx;
- u32 pair_swap;
u16 val;
- u16 data;
struct bnx2x *bp = params->bp;
- /* Do pair swap */
-
- /* Check for configuration. */
- pair_swap = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
- PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
- if (pair_swap == 0)
- return 0;
-
- data = (u16)pair_swap;
-
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
msleep(1);
}
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ if (idx >= PHY84833_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
+ /* Prepare argument(s) and issue command */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- data);
- /* Issue pair swap command */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
msleep(1);
}
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
+ /* Gather returning data */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 pair_swap;
+ u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ int status;
+ struct bnx2x *bp = params->bp;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ /* Only the second argument is used for this command */
+ data[1] = (u16)pair_swap;
+
+ status = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_PAIR_SWAP, data);
+ if (status == 0)
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+ return status;
+}
+
static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
u32 shmem_base_path[],
u32 chip_id)
@@ -9579,24 +9715,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 chip_id)
-{
- u8 reset_gpios;
-
- reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
- udelay(10);
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- msleep(800);
- DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
- reset_gpios);
-
- return 0;
-}
-
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9605,8 +9723,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
u16 val;
- u16 temp;
- u32 actual_phy_selection, cms_enable, idx;
+ u32 actual_phy_selection, cms_enable;
+ u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
msleep(1);
@@ -9625,6 +9743,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, 0x8000);
+ }
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode */
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
@@ -9633,26 +9758,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_CTL_DEVAD,
MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
- }
-
- bnx2x_wait_reset_complete(bp, phy, params);
-
- /* Wait for GPHY to come out of reset */
- msleep(50);
-
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
- /*
- * BCM84823 requires that XGXS links up first @ 10G for normal behavior
- */
- temp = vars->line_speed;
- vars->line_speed = SPEED_10000;
- bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
- bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
- vars->line_speed = temp;
-
- /* Set dual-media configuration according to configuration */
+ } else {
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ u16 temp;
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9700,64 +9818,18 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* AutogrEEEn */
if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- /* Ensure that f/w is ready */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
- break;
- usleep_range(1000, 1000);
- }
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
- return -EINVAL;
- }
-
- /* Select EEE mode */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG3,
- 0x2);
-
- /* Set Idle and Latency */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA3_REG,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA4_REG,
- PHY84833_CONSTANT_LATENCY);
-
- /* Send EEE instruction to command register */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_SET_EEE_MODE);
-
- /* Ensure that the command has completed */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
- break;
- usleep_range(1000, 1000);
- }
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
- return -EINVAL;
- }
-
- /* Reset command handler */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- }
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED)
+ cmd_args[0] = 0x2;
+ else
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args);
+ if (rc != 0)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
@@ -10144,8 +10216,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
usleep_range(1000, 1000);
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -10327,6 +10401,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -11103,7 +11214,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
@@ -11181,7 +11292,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
offsetof(struct shmem_region,
dev_info.port_feature_config[port].link_config)) &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
- chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
if (USES_WARPCORE(bp)) {
u32 serdes_net_if;
@@ -11360,6 +11473,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
return -EINVAL;
default:
*phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return -EINVAL;
return 0;
}
@@ -11533,7 +11650,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
int bnx2x_phy_probe(struct link_params *params)
{
- u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u8 phy_index, actual_phy_idx;
u32 phy_config_swapped, sync_offset, media_types;
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy;
@@ -11544,7 +11661,6 @@ int bnx2x_phy_probe(struct link_params *params)
for (phy_index = INT_PHY; phy_index < MAX_PHYS;
phy_index++) {
- link_cfg_idx = LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == EXT_PHY1)
@@ -12210,6 +12326,63 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
return 0;
}
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[],
+ u8 phy_index,
+ u32 chip_id)
+{
+ u8 reset_gpios;
+ struct bnx2x_phy phy;
+ u32 shmem_base, shmem2_base, cnt;
+ s8 port = 0;
+ u16 val;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* This PHY is for E2 and E3. */
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Wait for FW completing its initialization. */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, &phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &val);
+ if (!(val & (1<<15)))
+ break;
+ msleep(1);
+ }
+ if (cnt >= 1000)
+ DP(NETIF_MSG_LINK,
+ "84833 Cmn reset timeout (%d)\n", port);
+
+ /* Put the port in super isolate mode. */
+ bnx2x_cl45_read(bp, &phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
+
+ return 0;
+}
+
+
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
@@ -12244,7 +12417,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
- rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 2a46e633abe..e02a68a7fb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params);
+ struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f6361e949f..ffeaaa95ed9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
- return 2 * vn + BP_PORT(bp);
-}
-
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
"rate shaping and fairness are disabled\n");
}
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
- int func;
- int vn;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
- if (vn == BP_VN(bp))
- continue;
-
- func = func_by_vn(bp, vn);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-}
-
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
if (bp->state != BNX2X_STATE_OPEN)
return;
+ /* read updated dcb configuration */
+ bnx2x_dcbx_pmf_update(bp);
+
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
- /* Statistics are not supported for CNIC Clients at the moment */
- if (IS_FCOE_FP(fp))
- return false;
-#endif
- return true;
-}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
@@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
* parent connection). The statistics are zeroed when the parent
* connection is initialized.
*/
- if (stat_counter_valid(bp, fp)) {
- __set_bit(BNX2X_Q_FLG_STATS, &flags);
- if (zero_stats)
- __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
- }
+
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
return flags;
}
@@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* This should be a maximum number of data bytes that may be
* placed on the BD (not including paddings).
*/
- rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
- IP_HEADER_ALIGNMENT_PADDING;
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+ struct eth_stats_info *ether_stat =
+ &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+ /* leave last char as NULL */
+ memcpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN - 1);
+
+ bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
+
+ ether_stat->mtu_size = bp->dev->mtu;
+
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ if (bp->dev->features & NETIF_F_TSO)
+ ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+ ether_stat->feature_flags |= bp->common.boot_mode;
+
+ ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = bp->tx_ring_size;
+ ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct fcoe_stats_info *fcoe_stat =
+ &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+ memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+ fcoe_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+ /* insert FCoE stats from ramrod response */
+ if (!NO_FCOE(bp)) {
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ tstorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+ ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+ }
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct iscsi_stats_info *iscsi_stat =
+ &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+ memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+ iscsi_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
/* called due to MCP event (on pmf):
* reread new bandwidth configuration
* configure FW
@@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+ enum drv_info_opcode op_code;
+ u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(bp);
+ break;
+ case FCOE_STATS_OPCODE:
+ bnx2x_drv_info_fcoe_stat(bp);
+ break;
+ case ISCSI_STATS_OPCODE:
+ bnx2x_drv_info_iscsi_stat(bp);
+ break;
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ /* if we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(bp, drv_info_host_addr_lo,
+ U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+ SHMEM2_WR(bp, drv_info_host_addr_hi,
+ U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
" the driver to shutdown the card to prevent permanent"
" damage. Please contact OEM Support for assistance\n");
+
+ /*
+ * Scheudle device reset (unload)
+ * This is due to some boards consuming sufficient power when driver is
+ * up to overheat if fan fails.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
u8 cos;
unsigned long q_type = 0;
u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+ fp->rx_queue = fp_idx;
fp->cid = fp_idx;
fp->cl_id = bnx2x_fp_cl_id(fp);
fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
int num_groups;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
- /* number of eth_queues */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
/* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + num_eth_queues */
- bp->fw_stats_num = 2 + num_queue_stats;
+ * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+ * num of queues
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
/* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
- num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
- (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+ num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
*
* stats_counter holds per-STORM counters that are incremented
* when STORM has finished with the current request.
+ *
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
*/
bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
@@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+ DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ return 0;
+ }
+#endif
+
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@ sp_rtnl_not_reset:
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+ /*
+ * in case of fan failure we need to reset id if the "stop on error"
+ * debug flag is set, since we trying to prevent permanent overheating
+ * damage
+ */
+ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ netif_device_detach(bp->dev);
+ bnx2x_close(bp->dev);
+ }
+
sp_rtnl_exit:
rtnl_unlock();
}
@@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
- u32 val, val2, val3, val4, id;
+ u32 val, val2, val3, val4, id, boot_mode;
u16 pmc;
/* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+ BC_SUPPORTS_PFC_STATS : 0;
+
+ boot_mode = SHMEM_RD(bp,
+ dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+ PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+ switch (boot_mode) {
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+ break;
+ }
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+#ifdef BCM_CNIC
int port = BP_PORT(bp);
- int func = BP_ABS_FUNC(bp);
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
- u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[port].max_fcoe_conn);
- /* Get the number of maximum allowed iSCSI and FCoE connections */
+ /* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_FLAG;
+#else
+ bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
}
}
- BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
- bp->cnic_eth_dev.max_iscsi_conn,
- bp->cnic_eth_dev.max_fcoe_conn);
+ BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
/*
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-}
+#else
+ bp->flags |= NO_FCOE_FLAG;
#endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ /*
+ * iSCSI may be dynamically disabled but reading
+ * info here we will decrease memory usage by driver
+ * if the feature is disabled for good
+ */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_get_fcoe_info(bp);
+}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
@@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ /*
+ * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
*/
if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
fip_mac);
} else
bp->flags |= NO_FCOE_FLAG;
+ } else { /* SD mode */
+ if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ }
}
#endif
} else {
@@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
#endif
- if (!is_valid_ether_addr(bp->dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: "
"%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
-#ifdef BCM_CNIC
bnx2x_get_cnic_info(bp);
-#endif
/* Get current FW pulse sequence */
if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
{
int cnt, i, block_end, rodi;
- char vpd_data[BNX2X_VPD_LEN+1];
+ char vpd_start[BNX2X_VPD_LEN+1];
char str_id_reg[VENDOR_ID_LEN+1];
char str_id_cap[VENDOR_ID_LEN+1];
+ char *vpd_data;
+ char *vpd_extended_data = NULL;
u8 len;
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
if (cnt < BNX2X_VPD_LEN)
goto out_not_found;
- i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ /* VPD RO tag should be first tag after identifier string, hence
+ * we should be able to find it in first BNX2X_VPD_LEN chars
+ */
+ i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
-
block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_data[i]);
+ pci_vpd_lrdt_size(&vpd_start[i]);
i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > BNX2X_VPD_LEN)
- goto out_not_found;
+ if (block_end > BNX2X_VPD_LEN) {
+ vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+ if (vpd_extended_data == NULL)
+ goto out_not_found;
+
+ /* read rest of vpd image into vpd_extended_data */
+ memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+ cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+ block_end - BNX2X_VPD_LEN,
+ vpd_extended_data + BNX2X_VPD_LEN);
+ if (cnt < (block_end - BNX2X_VPD_LEN))
+ goto out_not_found;
+ vpd_data = vpd_extended_data;
+ } else
+ vpd_data = vpd_start;
+
+ /* now vpd_data holds full vpd content in both cases */
rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
bp->fw_ver[len] = ' ';
}
}
+ kfree(vpd_extended_data);
return;
}
out_not_found:
+ kfree(vpd_extended_data);
return;
}
@@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->multi_mode = multi_mode;
+ bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+ bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
/* Set TPA flags */
- if (disable_tpa) {
+ if (bp->disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
bp->dev->features &= ~NETIF_F_LRO;
} else {
bp->flags |= TPA_ENABLE_FLAG;
bp->dev->features |= NETIF_F_LRO;
}
- bp->disable_tpa = disable_tpa;
if (CHIP_IS_E1(bp))
bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+ /* handle ISCSI SD mode */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev)
}
#endif
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ return -EADDRNOTAVAIL;
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_select_queue = bnx2x_select_queue,
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = bnx2x_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
@@ -10823,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x and E3*/
- if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ /* disable FCOE L2 queue for E1x */
+ if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
#endif
@@ -11486,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
smp_mb__after_atomic_inc();
break;
}
+ case DRV_CTL_ULP_REGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
+ case DRV_CTL_ULP_UNREGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_mutex);
cp->drv_state = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
kfree(bp->cnic_kwq);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0..44609de4e5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -160,8 +160,11 @@
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268
+/* [R 24] The number of full blocks occpied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -1619,6 +1622,14 @@
register bits. */
#define MISC_REG_LCPLL_CTRL_1 0xa2a4
#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c
/* [RW 4] Interrupt mask register #0 read/write */
#define MISC_REG_MISC_INT_MASK 0xa388
/* [RW 1] Parity mask register #0 read/write */
@@ -1754,6 +1765,7 @@
* is compared to the value on ctrl_md_devad. Drives output
* misc_xgxs0_phy_addr. Global register. */
#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+#define MISC_REG_WC0_RESET 0xac30
/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
side. This should be less than or equal to phy_port_mode; if some of the
ports are not used. This enables reduction of frequency on the core side.
@@ -6823,11 +6835,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
@@ -6838,26 +6852,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
/* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
/* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED 0x0001
-#define PHY84833_CMD_IN_PROGRESS 0x0002
-#define PHY84833_CMD_COMPLETE_PASS 0x0004
-#define PHY84833_CMD_COMPLETE_ERROR 0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
-#define PHY84833_CMD_SYSTEM_BOOT 0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
@@ -6990,6 +7013,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 14517691f8d..5ac616093f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,6 +30,8 @@
#define BNX2X_MAX_EMUL_MULTI 16
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/**** Exe Queue interfaces ****/
/**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ u8 *next = buf;
+ int counter = 0;
+
+ /* traverse list */
+ list_for_each_entry(pos, &o->head, link) {
+ if (counter < n) {
+ /* place leading zeroes in buffer */
+ memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+ /* place mac after leading zeroes*/
+ memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+ ETH_ALEN);
+
+ /* calculate address of next element and
+ * advance counter
+ */
+ counter++;
+ next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+ counter, next, pos->u.mac.mac);
+ }
+ }
+ return counter * ETH_ALEN;
+}
+
/* check_add() callbacks */
static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
mac_obj->check_move = bnx2x_check_move;
mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
if (!list_empty(&o->registry.exact_match.macs))
return 0;
- elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
if (!elem) {
BNX2X_ERR("Failed to allocate registry memory\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2e9f1..992308ff82e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
/* RAMROD command to be used */
int ramrod_cmd;
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accomodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+ int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf);
+
/**
* Checks if ADD-ramrod with the given params may be performed.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 02ac6a771bf..bc0121ac291 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+ u16 res = sizeof(struct host_port_stats) >> 2;
+
+ /* if PFC stats are not supported by the MFW, don't DMA them */
+ if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
+ res -= (sizeof(u32)*4) >> 2;
+
+ return res;
+}
+
/*
* Init service functions
*/
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
DMAE_LEN32_RD_MAX * 4);
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
DMAE_LEN32_RD_MAX * 4);
- dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+ pstats->pfc_frames_tx_hi,
+ diff.lo, new->tx_stat_gtpp_lo,
+ pstats->pfc_frames_tx_lo);
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+ ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+ pstats->pfc_frames_tx_lo, diff.lo);
+
+ DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+ pstats->pfc_frames_rx_hi,
+ diff.lo, new->rx_stat_grpp_lo,
+ pstats->pfc_frames_rx_lo);
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+ ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+ pstats->pfc_frames_rx_lo, diff.lo);
}
estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
- pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+ pstats->host_port_stats_counter++;
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
if (bp->func_stx) {
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
enum bnx2x_stats_state state;
if (unlikely(bp->panic))
return;
- bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
+ bnx2x_stats_stm[state][event].action(bp);
+
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
int i;
+ int first_queue_query_index;
struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+ /**** FCoE FW statistics data ****/
+ if (!NO_FCOE(bp)) {
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+ cur_query_entry =
+ &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_FCOE;
+ /* For FCoE query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+
/**** Clients' queries ****/
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ /* first queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ if (!NO_FCOE(bp))
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+ else
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
for_each_eth_queue(bp, i) {
cur_query_entry =
&bp->fw_stats_req->
- query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+ query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_data_offset += sizeof(struct per_queue_stats);
}
+
+ /* add FCoE queue query if needed */
+ if (!NO_FCOE(bp)) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
}
void bnx2x_stats_init(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f6afe..683deb05310 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
+
+ /* PFC */
+ u32 pfc_frames_received_hi;
+ u32 pfc_frames_received_lo;
+ u32 pfc_frames_sent_hi;
+ u32 pfc_frames_sent_lo;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6f10c693983..dd3a0a232ea 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
return io->data;
}
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ if (reg)
+ info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+ else
+ info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+ info.data.ulp_type = ulp_type;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
}
read_unlock(&cnic_dev_lock);
- rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+ RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
+ cnic_ulp_ctl(dev, ulp_type, true);
+
return 0;
}
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+ cnic_ulp_ctl(dev, ulp_type, false);
+
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1342,7 +1361,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
if (ret == 1)
return 0;
- return -EBUSY;
+ return ret;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
@@ -1830,7 +1849,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
done:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
@@ -1928,7 +1947,7 @@ destroy_reply:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
@@ -2494,6 +2513,57 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
return ret;
}
+static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct kcqe kcqe;
+ struct kcqe *cqes[1];
+ u32 cid;
+ u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+ int ulp_type;
+
+ cid = kwqe->kwqe_info0;
+ memset(&kcqe, 0, sizeof(kcqe));
+
+ if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+ ulp_type = CNIC_ULP_ISCSI;
+ if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
+ cid = kwqe->kwqe_info1;
+
+ kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
+ kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
+ kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ kcqe.kcqe_info2 = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
+
+ } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
+ struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
+ u32 kcqe_op;
+
+ ulp_type = CNIC_ULP_L4;
+ if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ else
+ return;
+
+ kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
+ KCQE_FLAGS_LAYER_MASK_L4;
+ l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ l4kcqe->cid = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
+ } else {
+ return;
+ }
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
+}
+
static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
@@ -2551,9 +2621,17 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
opcode);
break;
}
- if (ret < 0)
+ if (ret < 0) {
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
+
+ /* Possibly bnx2x parity error, send completion
+ * to ulp drivers with error code to speed up
+ * cleanup and reset recovery.
+ */
+ if (ret == -EIO || ret == -EAGAIN)
+ cnic_bnx2x_kwqe_err(dev, kwqe);
+ }
i += work;
}
return 0;
@@ -3052,9 +3130,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
}
}
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+ int rc;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ if (ulp_ops && ulp_ops->cnic_get_stats)
+ rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+ else
+ rc = -ENODEV;
+ mutex_unlock(&cnic_lock);
+ return rc;
+}
+
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
+ int ulp_type = CNIC_ULP_ISCSI;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3195,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
}
break;
}
+ case CNIC_CTL_FCOE_STATS_GET_CMD:
+ ulp_type = CNIC_ULP_FCOE;
+ /* fall through */
+ case CNIC_CTL_ISCSI_STATS_GET_CMD:
+ cnic_hold(dev);
+ cnic_copy_ulp_stats(dev, ulp_type);
+ cnic_put(dev);
+ break;
+
default:
return -EINVAL;
}
@@ -3475,7 +3579,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ fl6.daddr = dst_addr->sin6_addr;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
@@ -3804,6 +3908,9 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+ if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
+ set_bit(SK_F_HW_ERR, &csk->flags);
+
cp->close_conn(csk, opcode);
break;
@@ -3931,7 +4038,9 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
if (cnic_ready_to_close(csk, opcode)) {
- if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ if (test_bit(SK_F_HW_ERR, &csk->flags))
+ close_complete = 1;
+ else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
else
close_complete = 1;
@@ -4824,6 +4933,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
int func = CNIC_FUNC(cp), ret;
u32 pfid;
+ dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
@@ -5134,7 +5244,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
- rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
@@ -5288,6 +5398,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 239de898f07..86936f6b6db 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -85,6 +85,7 @@
/* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0dbf9..1517763d4e5 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.7"
-#define CNIC_MODULE_RELDATE "July 20, 2011"
+#define CNIC_MODULE_VERSION "2.5.8"
+#define CNIC_MODULE_RELDATE "Jan 3, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -86,6 +86,8 @@ struct kcqe {
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
+#define CNIC_CTL_FCOE_STATS_GET_CMD 5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
@@ -96,6 +98,8 @@ struct kcqe {
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
+#define DRV_CTL_ULP_REGISTER_CMD 0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
struct cnic_ctl_completion {
u32 cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
+ int ulp_type;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
+ union drv_info_to_mcp *addr_drv_info_to_mcp;
};
struct cnic_sockaddr {
@@ -255,6 +261,7 @@ struct cnic_sock {
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
+#define SK_F_HW_ERR 8
atomic_t ref_count;
u32 state;
@@ -297,6 +304,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ union drv_info_to_mcp *stats_addr;
+
void *cnic_priv;
};
@@ -326,6 +335,7 @@ struct cnic_ulp_ops {
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
+ int (*cnic_get_stats)(void *ulp_ctx);
struct module *owner;
atomic_t ref_count;
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 0a1d7f279fc..8fa7abc53ec 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -163,7 +163,6 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
-#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
@@ -266,7 +265,7 @@ struct sbmac_softc {
int sbm_pause; /* current pause setting */
int sbm_link; /* current link state */
- unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+ unsigned char sbm_hwaddr[ETH_ALEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */
struct sbmacdma sbm_rxdma;
@@ -2676,15 +2675,4 @@ static struct platform_driver sbmac_driver = {
},
};
-static int __init sbmac_init_module(void)
-{
- return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
- platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf4074167d6..076e02a415a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 121
+#define TG3_MIN_NUM 122
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "November 2, 2011"
+#define DRV_MODULE_RELDATE "December 7, 2011"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#if (NET_IP_ALIGN != 0)
#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
#else
-#define TG3_RX_OFFSET(tp) 0
+#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
#endif
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX 4096
+#define TG3_TX_BD_DMA_MAX_2K 2048
+#define TG3_TX_BD_DMA_MAX_4K 4096
#define TG3_RAW_IP_ALIGN 2
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
}
}
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
- u16 miireg;
-
- if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
- miireg = ADVERTISE_PAUSE_CAP;
- else if (flow_ctrl & FLOW_CTRL_TX)
- miireg = ADVERTISE_PAUSE_ASYM;
- else if (flow_ctrl & FLOW_CTRL_RX)
- miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- else
- miireg = 0;
-
- return miireg;
-}
-
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_1000XPAUSE) {
- if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_1000XPAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+ if (lcladv & ADVERTISE_1000XPAUSE)
+ cap = FLOW_CTRL_RX;
+ if (rmtadv & ADVERTISE_1000XPAUSE)
cap = FLOW_CTRL_TX;
}
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->duplex == DUPLEX_HALF)
mac_mode |= MAC_MODE_HALF_DUPLEX;
else {
- lcl_adv = tg3_advert_flowctrl_1000T(
+ lcl_adv = mii_advertise_flowctrl(
tp->link_config.flowctrl);
if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
if (tp->link_config.active_speed == SPEED_1000 &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ tg3_flag(tp, 57765_CLASS)) &&
!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
- if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
u32 val, new_adv;
new_adv = ADVERTISE_CSMA;
- if (advertise & ADVERTISED_10baseT_Half)
- new_adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- new_adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- new_adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- new_adv |= ADVERTISE_100FULL;
-
- new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+ new_adv |= mii_advertise_flowctrl(flowctrl);
err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (err)
goto done;
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- goto done;
-
- new_adv = 0;
- if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000HALF;
- if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000FULL;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
- new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
- err = tg3_writephy(tp, MII_CTRL1000, new_adv);
- if (err)
- goto done;
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
+ case ASIC_REV_57766:
case ASIC_REV_5719:
/* If we advertised any eee advertisements above... */
if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
- u32 adv_reg, all_mask = 0;
+ u32 advmsk, tgtadv, advertising;
- if (mask & ADVERTISED_10baseT_Half)
- all_mask |= ADVERTISE_10HALF;
- if (mask & ADVERTISED_10baseT_Full)
- all_mask |= ADVERTISE_10FULL;
- if (mask & ADVERTISED_100baseT_Half)
- all_mask |= ADVERTISE_100HALF;
- if (mask & ADVERTISED_100baseT_Full)
- all_mask |= ADVERTISE_100FULL;
+ advertising = tp->link_config.advertising;
+ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
- if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
- return 0;
+ advmsk = ADVERTISE_ALL;
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ }
- if ((adv_reg & ADVERTISE_ALL) != all_mask)
- return 0;
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return false;
+
+ if ((*lcladv & advmsk) != tgtadv)
+ return false;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
- all_mask = 0;
- if (mask & ADVERTISED_1000baseT_Half)
- all_mask |= ADVERTISE_1000HALF;
- if (mask & ADVERTISED_1000baseT_Full)
- all_mask |= ADVERTISE_1000FULL;
+ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
- return 0;
+ return false;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
- if (tg3_ctrl != all_mask)
- return 0;
+ if (tg3_ctrl != tgtadv)
+ return false;
}
- return 1;
+ return true;
}
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
{
- u32 curadv, reqadv;
+ u32 lpeth = 0;
- if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
- return 1;
-
- curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 val;
- if (tp->link_config.active_duplex == DUPLEX_FULL) {
- if (curadv != reqadv)
- return 0;
+ if (tg3_readphy(tp, MII_STAT1000, &val))
+ return false;
- if (tg3_flag(tp, PAUSE_AUTONEG))
- tg3_readphy(tp, MII_LPA, rmtadv);
- } else {
- /* Reprogram the advertisement register, even if it
- * does not affect the current link. If the link
- * gets renegotiated in the future, we can save an
- * additional renegotiation cycle by advertising
- * it correctly in the first place.
- */
- if (curadv != reqadv) {
- *lcladv &= ~(ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
- }
+ lpeth = mii_stat1000_to_ethtool_lpa_t(val);
}
- return 1;
+ if (tg3_readphy(tp, MII_LPA, rmtadv))
+ return false;
+
+ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+ tp->link_config.rmt_adv = lpeth;
+
+ return true;
}
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+ tp->link_config.rmt_adv = 0;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
if ((bmcr & BMCR_ANENABLE) &&
- tg3_copper_is_advertising_all(tp,
- tp->link_config.advertising)) {
- if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
- &rmt_adv))
- current_link_up = 1;
- }
+ tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+ current_link_up = 1;
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
if (current_link_up == 1 &&
- tp->link_config.active_duplex == DUPLEX_FULL)
+ tp->link_config.active_duplex == DUPLEX_FULL) {
+ u32 reg, bit;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ reg = MII_TG3_FET_GEN_STAT;
+ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+ } else {
+ reg = MII_TG3_EXT_STAT;
+ bit = MII_TG3_EXT_STAT_MDIX;
+ }
+
+ if (!tg3_readphy(tp, reg, &val) && (val & bit))
+ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
}
relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
if (rxflags & MR_LP_ADV_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
udelay(40);
current_link_up = 0;
+ tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
/* do nothing, just check for link up at the end */
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 adv, new_adv;
+ u32 adv, newadv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
- new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
- ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM |
- ADVERTISE_SLCT);
+ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
- new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000XHALF;
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000XFULL;
-
- if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, newadv);
bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
tg3_writephy(tp, MII_BMCR, bmcr);
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
+
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
}
+ pkts_compl++;
+ bytes_compl += skb->len;
+
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
+ netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
- if (!ri->skb)
+ if (!ri->data)
return;
pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(ri->skb);
- ri->skb = NULL;
+ kfree(ri->data);
+ ri->data = NULL;
}
/* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
u32 opaque_key, u32 dest_idx_unmasked)
{
struct tg3_rx_buffer_desc *desc;
struct ring_info *map;
- struct sk_buff *skb;
+ u8 *data;
dma_addr_t mapping;
- int skb_size, dest_idx;
+ int skb_size, data_size, dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
- skb_size = tp->rx_pkt_map_sz;
+ data_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
- skb_size = TG3_RX_JMB_MAP_SZ;
+ data_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
- if (skb == NULL)
+ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc(skb_size, GFP_ATOMIC);
+ if (!data)
return -ENOMEM;
- skb_reserve(skb, TG3_RX_OFFSET(tp));
-
- mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ mapping = pci_map_single(tp->pdev,
+ data + TG3_RX_OFFSET(tp),
+ data_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- map->skb = skb;
+ map->data = data;
dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
- return skb_size;
+ return data_size;
}
/* We only need to move over in the address because the other
* members of the RX descriptor are invariant. See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
*/
static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
return;
}
- dest_map->skb = src_map->skb;
+ dest_map->data = src_map->data;
dma_unmap_addr_set(dest_map, mapping,
dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
*/
smp_wmb();
- src_map->skb = NULL;
+ src_map->data = NULL;
}
/* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ u8 *data;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &jmb_prod_idx;
} else
goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ prefetch(data + TG3_RX_OFFSET(tp));
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
*post_ptr);
if (skb_size < 0)
goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- /* Ensure that the update to the skb happens
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
smp_wmb();
- ri->skb = NULL;
+ ri->data = NULL;
- skb_put(skb, len);
} else {
- struct sk_buff *copy_skb;
-
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len +
- TG3_RAW_IP_ALIGN);
- if (copy_skb == NULL)
+ skb = netdev_alloc_skb(tp->dev,
+ len + TG3_RAW_IP_ALIGN);
+ if (skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
- skb_put(copy_skb, len);
+ skb_reserve(skb, TG3_RAW_IP_ALIGN);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
+ memcpy(skb->data,
+ data + TG3_RX_OFFSET(tp),
+ len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
- /* We'll reuse the original ring buffer. */
- skb = copy_skb;
}
+ skb_put(skb, len);
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_std_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_std_buffers[i].skb) {
+ if (dpr->rx_std_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_jmb_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_jmb_buffers[i].skb) {
+ if (dpr->rx_jmb_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
bool hwbug = false;
if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- hwbug = 1;
+ hwbug = true;
if (tg3_4g_overflow_test(map, len))
- hwbug = 1;
+ hwbug = true;
if (tg3_40bit_overflow_test(tp, map, len))
- hwbug = 1;
+ hwbug = true;
- if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ if (tp->dma_limit) {
u32 prvidx = *entry;
u32 tmp_flag = flags & ~TXD_FLAG_END;
- while (len > TG3_TX_BD_DMA_MAX && *budget) {
- u32 frag_len = TG3_TX_BD_DMA_MAX;
- len -= TG3_TX_BD_DMA_MAX;
+ while (len > tp->dma_limit && *budget) {
+ u32 frag_len = tp->dma_limit;
+ len -= tp->dma_limit;
/* Avoid the 8byte DMA problem */
if (len <= 8) {
- len += TG3_TX_BD_DMA_MAX / 2;
- frag_len = TG3_TX_BD_DMA_MAX / 2;
+ len += tp->dma_limit / 2;
+ frag_len = tp->dma_limit / 2;
}
tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
*budget -= 1;
*entry = NEXT_TX(*entry);
} else {
- hwbug = 1;
+ hwbug = true;
tnapi->tx_buffers[prvidx].fragmented = false;
}
}
@@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ netdev_sent_queue(tp->dev, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
return 0;
}
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
}
}
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) & tp->rx_std_ring_mask)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
for (i = 0; i <= tp->rx_std_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX standard ring. Only "
"%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX jumbo ring. Only %d "
"out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
}
+ netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
if (tnapi->hw_status)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
}
- if (tp->hw_stats)
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return err;
}
@@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tg3_flag(tp, PCI_EXPRESS))
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
- else {
- pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
- tp->pci_cacheline_sz);
- pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
- tp->pci_lat_timer);
- }
+ if (!tg3_flag(tp, PCI_EXPRESS)) {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
/* Clear error status */
pci_write_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
return 0;
}
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+ struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+ struct tg3_ethtool_stats *);
+
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
@@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
+ if (tp->hw_stats) {
+ /* Save the stats across chip resets... */
+ tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+ tg3_get_estats(tp, &tp->estats_prev);
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ }
+
if (err)
return err;
@@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
return;
- if (!tg3_flag(tp, 5705_PLUS))
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
- else
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
@@ -8231,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
}
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] =
+ ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return;
+
+ if (tp->irq_cnt <= 2) {
+ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+ return;
+ }
+
+ /* Validate table against current IRQ count */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ break;
+ }
+
+ if (i != TG3_RSS_INDIR_TBL_SIZE)
+ tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ u32 val = tp->rss_ind_tbl[i];
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= tp->rss_ind_tbl[i];
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -8337,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tg3_flag(tp, 57765_CLASS)) {
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
@@ -8425,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ if (!tg3_flag(tp, 57765_CLASS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -8581,10 +8618,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = TG3_RX_STD_MAX_SIZE_5700;
- else
- val = TG3_RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_RING_SIZE(tp);
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
@@ -8661,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
@@ -8924,28 +8961,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
- int i = 0;
- u32 reg = MAC_RSS_INDIR_TBL_0;
-
- if (tp->irq_cnt == 2) {
- for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
- tw32(reg, 0x0);
- reg += 4;
- }
- } else {
- u32 val;
-
- while (i < TG3_RSS_INDIR_TBL_SIZE) {
- val = i % (tp->irq_cnt - 1);
- i++;
- for (; i % 8; i++) {
- val <<= 4;
- val |= (i % (tp->irq_cnt - 1));
- }
- tw32(reg, val);
- reg += 4;
- }
- }
+ tg3_rss_write_indir_tbl(tp);
/* Setup the "secret" hash key. */
tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9018,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (tg3_flag(tp, 57765_CLASS))
val = 1;
else
val = 2;
@@ -9217,7 +9233,7 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9669,6 +9685,8 @@ static int tg3_open(struct net_device *dev)
*/
tg3_ints_init(tp);
+ tg3_rss_check_indir_tbl(tp);
+
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
@@ -9700,8 +9718,8 @@ static int tg3_open(struct net_device *dev)
tg3_free_rings(tp);
} else {
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ !tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9782,10 +9800,6 @@ err_out1:
return err;
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
- struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
static int tg3_close(struct net_device *dev)
{
int i;
@@ -9817,10 +9831,9 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
- memcpy(&tp->estats_prev, tg3_get_estats(tp),
- sizeof(tp->estats_prev));
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_napi_fini(tp);
@@ -9868,9 +9881,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
estats->member = old_estats->member + \
get_stat64(&hw_stats->member)
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+ struct tg3_ethtool_stats *estats)
{
- struct tg3_ethtool_stats *estats = &tp->estats;
struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -10318,12 +10331,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev)) {
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
+ cmd->lp_advertising = tp->link_config.rmt_adv;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
} else {
ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10449,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->fw_version, tp->fw_ver);
- strcpy(info->bus_info, pci_name(tp->pdev));
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10611,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
- if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX)
epause->rx_pause = 1;
else
epause->rx_pause = 0;
- if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX)
epause->tx_pause = 1;
else
epause->tx_pause = 0;
@@ -10715,6 +10736,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (netif_running(tp->dev))
+ info->data = tp->irq_cnt;
+ else {
+ info->data = num_online_cpus();
+ if (info->data > TG3_IRQ_MAX_VECS_RSS)
+ info->data = TG3_IRQ_MAX_VECS_RSS;
+ }
+
+ /* The first interrupt vector only
+ * handles link interrupts.
+ */
+ info->data -= 1;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+ u32 size = 0;
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ size = TG3_RSS_INDIR_TBL_SIZE;
+
+ return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ indir[i] = tp->rss_ind_tbl[i];
+
+ return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ size_t i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] = indir[i];
+
+ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+ return 0;
+
+ /* It is legal to write the indirection
+ * table while the device is running.
+ */
+ tg3_full_lock(tp, 0);
+ tg3_rss_write_indir_tbl(tp);
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -10769,7 +10862,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
- memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
}
static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11446,7 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
@@ -11400,8 +11494,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
- struct sk_buff *skb, *rx_skb;
- u8 *tx_data;
+ struct sk_buff *skb;
+ u8 *tx_data, *rx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11663,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
}
if (opaque_key == RXD_OPAQUE_RING_STD) {
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ rx_data = tpr->rx_std_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
mapping);
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
mapping);
} else
@@ -11582,15 +11676,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
PCI_DMA_FROMDEVICE);
+ rx_data += TG3_RX_OFFSET(tp);
for (i = data_off; i < rx_len; i++, val++) {
- if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ if (*(rx_data + i) != (u8) (val & 0xff))
goto out;
}
}
err = 0;
- /* tg3_free_rings will unmap and free the rx_skb */
+ /* tg3_free_rings will unmap and free the rx_data */
out:
return err;
}
@@ -11943,6 +12038,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
+ .get_rxnfc = tg3_get_rxnfc,
+ .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
+ .get_rxfh_indir = tg3_get_rxfh_indir,
+ .set_rxfh_indir = tg3_set_rxfh_indir,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12711,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13317,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
{
- u32 adv = ADVERTISED_Autoneg |
- ADVERTISED_Pause;
+ u32 adv = ADVERTISED_Autoneg;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13420,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
- u32 bmsr, mask;
+ u32 bmsr, dummy;
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13433,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_phy_set_wirespeed(tp);
- mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
- if (!tg3_copper_is_advertising_all(tp, mask)) {
+ if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
tp->link_config.flowctrl);
@@ -13460,6 +13555,17 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+ strcpy(tp->board_part_number, "BCM57762");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+ strcpy(tp->board_part_number, "BCM57766");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+ strcpy(tp->board_part_number, "BCM57782");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+ strcpy(tp->board_part_number, "BCM57786");
+ else
+ goto nomatch;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
@@ -13798,7 +13904,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN15_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -13945,7 +14055,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5717_PLUS);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- tg3_flag(tp, 5717_PLUS))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tg3_flag_set(tp, 57765_CLASS);
+
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14110,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tp->fw_needed) {
+ /* For firmware TSO, assume ASF is disabled.
+ * We'll disable TSO later if we discover ASF
+ * is enabled in tg3_get_eeprom_hw_cfg().
+ */
tg3_flag_set(tp, TSO_CAPABLE);
- else {
+ } else {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -14027,6 +14144,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
+ tg3_rss_init_dflt_indir_tbl(tp);
}
}
@@ -14034,9 +14152,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
- tg3_flag_set(tp, 4K_FIFO_LIMIT);
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
- if (tg3_flag(tp, 5717_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14178,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tp->pcie_readrq = 2048;
-
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ int readrq = pcie_get_readrq(tp->pdev);
+ if (readrq > 2048)
+ pcie_set_readrq(tp->pdev, 2048);
+ }
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14394,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
@@ -14311,7 +14438,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14675,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tg3_flag_clear(tp, POLL_SERDES);
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
- tp->rx_offset = 0;
+ tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -15313,7 +15440,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
- u32 features = 0;
+ netdev_features_t features = 0;
printk_once(KERN_INFO "%s\n", version);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 94b4bd049a3..aea8f72c24f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -31,6 +31,8 @@
#define TG3_RX_RET_MAX_SIZE_5705 512
#define TG3_RX_RET_MAX_SIZE_5717 4096
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
/* First 256 bytes are a mirror of PCI config space. */
#define TG3PCI_VENDOR 0x00000000
#define TG3PCI_VENDOR_BROADCOM 0x14e4
@@ -57,6 +59,10 @@
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
#define TG3PCI_DEVICE_TIGON3_5719 0x1657
#define TG3PCI_DEVICE_TIGON3_5720 0x165f
+#define TG3PCI_DEVICE_TIGON3_57762 0x1682
+#define TG3PCI_DEVICE_TIGON3_57766 0x1686
+#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
+#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -168,6 +174,7 @@
#define ASIC_REV_57765 0x57785
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
+#define ASIC_REV_57766 0x57766
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -1340,6 +1347,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_JMB_2K_MMRR 0x00800000
#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
@@ -2174,6 +2182,7 @@
#define MII_TG3_EXT_CTRL_TBI 0x8000
#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_MDIX 0x2000
#define MII_TG3_EXT_STAT_LPASS 0x0100
#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
@@ -2277,6 +2286,9 @@
#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+#define MII_TG3_FET_GEN_STAT 0x1c
+#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
*/
struct ring_info {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
u16 active_speed;
+ u32 rmt_adv;
/* When we go in and out of low power mode we need
* to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
TG3_FLAG_NVRAM_BUFFERED,
TG3_FLAG_SUPPORT_MSI,
TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
TG3_FLAG_PCIX_MODE,
TG3_FLAG_PCI_HIGH_SPEED,
TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
TG3_FLAG_TSO_BUG,
- TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
TG3_FLAG_HW_TSO_1,
- TG3_FLAG_5705_PLUS,
- TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
- TG3_FLAG_USING_MSI,
- TG3_FLAG_USING_MSIX,
TG3_FLAG_ICH_WORKAROUND,
- TG3_FLAG_5780_CLASS,
- TG3_FLAG_HW_TSO_2,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
TG3_FLAG_RGMII_EXT_IBND_RX_EN,
TG3_FLAG_RGMII_EXT_IBND_TX_EN,
TG3_FLAG_CLKREQ_BUG,
- TG3_FLAG_5755_PLUS,
TG3_FLAG_NO_NVRAM,
TG3_FLAG_ENABLE_RSS,
TG3_FLAG_ENABLE_TSS,
TG3_FLAG_SHORT_DMA_BUG,
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
- TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
- TG3_FLAG_5717_PLUS,
TG3_FLAG_4K_FIFO_LIMIT,
TG3_FLAG_RESET_TASK_PENDING,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_57765_CLASS,
+ TG3_FLAG_5717_PLUS,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
+ u32 dma_limit;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
unsigned long rx_dropped;
unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
- struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;
u32 phy_otp;
u32 setlpicnt;
+ u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
#define TG3_BPN_SIZE 24
char board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 74d3abca196..6027302ae73 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BNA) += bna.o
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 8e627186507..29f284f79e0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -185,6 +185,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
+ * bfa_cee_get_attr()
+ *
+ * @brief Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_nw_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+
+ if (cee->get_attr_pending == true)
+ return BFA_STATUS_DEVBUSY;
+
+ cee->get_attr_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/**
* bfa_cee_isrs()
*
* @brief Handles Mail-box interrupts for CEE module.
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e98d59..93fde633d6f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa);
void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+ struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 2f12d68021d..871c6309334 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -219,41 +219,39 @@ enum {
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block {
- u8 version; /*!< manufacturing block version */
- u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
- u16 mfgsize; /*!< mfg block size */
- u16 u16_chksum; /*!< old u16 checksum */
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
- u8 mfg_day; /*!< manufacturing day */
- u8 mfg_month; /*!< manufacturing month */
- u16 mfg_year; /*!< manufacturing year */
- u64 mfg_wwn; /*!< wwn base for this adapter */
- u8 num_wwn; /*!< number of wwns assigned */
- u8 mfg_speeds; /*!< speeds allowed for this adapter */
- u8 rsv[2];
- char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
- char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
- char
- supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
- char
- supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /*!< mac address */
- u8 num_mac; /*!< number of mac addresses */
- u8 rsv2;
- u32 card_type; /*!< card type */
- char cap_nic; /*!< capability nic */
- char cap_cna; /*!< capability cna */
- char cap_hba; /*!< capability hba */
- char cap_fc16g; /*!< capability fc 16g */
- char cap_sriov; /*!< capability sriov */
- char cap_mezz; /*!< capability mezz */
- u8 rsv3;
- u8 mfg_nports; /*!< number of ports */
- char media[8]; /*!< xfi/xaui */
- char initial_mode[8];/*!< initial mode: hba/cna/nic */
- u8 rsv4[84];
- u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+ u8 version; /* manufacturing block version */
+ u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
+ u16 mfgsize; /* mfg block size */
+ u16 u16_chksum; /* old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u64 mfg_wwn; /* wwn base for this adapter */
+ u8 num_wwn; /* number of wwns assigned */
+ u8 mfg_speeds; /* speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /* base mac address */
+ u8 num_mac; /* number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /* card type */
+ char cap_nic; /* capability nic */
+ char cap_cna; /* capability cna */
+ char cap_hba; /* capability hba */
+ char cap_fc16g; /* capability fc 16g */
+ char cap_sriov; /* capability sriov */
+ char cap_mezz; /* capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /* number of ports */
+ char media[8]; /* xfi/xaui */
+ char initial_mode[8]; /* initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
BFA_MODE_NIC = 3
};
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE 0x400000
+#define BFA_FLASH_PART_MFG 7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b0307a00a10..abfad275b5f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
{
+ bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] tbuf app memory to store data from smem
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+ u32 pgnum, loff, r32;
+ int i, len;
+ u32 *buf = tbuf;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ return 1;
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32);
+ for (i = 0; i < len; i++) {
+ r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ buf[i] = be32_to_cpu(r32);
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * release semaphore
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+ return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+ int tlen, status = 0;
+
+ tlen = *trclen;
+ if (tlen > BNA_DBG_FWTRC_LEN)
+ tlen = BNA_DBG_FWTRC_LEN;
+
+ status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+ *trclen = tlen;
+ return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = 0;
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+ }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+ bfa_nw_ioc_debug_save_ftrc(ioc);
}
/**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
static u32
bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
{
@@ -2172,6 +2293,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
}
/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
msecs_to_jiffies(BFA_IOC_POLL_TOV));
}
}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_flash *flash = cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+ struct bfi_flash_write_req *msg =
+ (struct bfi_flash_write_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash *flash = cbarg;
+ struct bfi_flash_read_req *msg =
+ (struct bfi_flash_read_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+ struct bfa_flash *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp *query;
+ struct bfi_flash_write_rsp *write;
+ struct bfi_flash_read_rsp *read;
+ struct bfi_mbmsg *msg;
+ } m;
+
+ m.msg = msg;
+
+ /* receiving response after ioc failure */
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+ return;
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr *attr, *f;
+
+ attr = (struct bfa_flash_attr *) flash->ubuf;
+ f = (struct bfa_flash_attr *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_write_send(flash);
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ case BFI_FLASH_I2H_EVENT:
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+ return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+ flash->ioc = ioc;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ struct bfi_flash_query_req *msg =
+ (struct bfi_flash_query_req *) flash->mb.msg;
+
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index ca158d1eaef..3b4460fdc14 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,8 @@
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_POLL_TOV 200 /* msecs */
+#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+ BFI_IOC_TRC_HDR_SZ)
/**
* PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
}
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
struct bfa_ioc_regs {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+ struct bfa_ioc *ioc; /* back pointer to ioc */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ enum bfa_status status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ bfa_cb_flash cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd mb; /* mailbox */
+ struct bfa_ioc_notify ioc_notify; /* ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+ struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+u32 bfa_nw_flash_meminfo(void);
+void bfa_nw_flash_attach(struct bfa_flash *flash,
+ struct bfa_ioc *ioc, void *dev);
+void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 7a1393aabd4..0d9df695397 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -83,6 +83,14 @@ union bfi_addr_u {
} a32;
};
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
*/
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
+#define BFI_IOC_TRC_ENT_SZ 16
+#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_MD5SUM_SZ 4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
u16 len;
};
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
#pragma pack()
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 26f5c5abfd1..9ccc586e376 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
/**
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
kva += bfa_nw_cee_meminfo();
dma += bfa_nw_cee_meminfo();
+ bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+ bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+ kva += bfa_nw_flash_meminfo();
+ dma += bfa_nw_flash_meminfo();
+
bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
bfa_msgq_memclaim(&bna->msgq, kva, dma);
bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
(bfa_nw_cee_meminfo() +
- bfa_msgq_meminfo()), PAGE_SIZE);
+ bfa_nw_flash_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
/* DMA memory for retrieving IOC attributes */
res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
/* Virtual memory for retreiving fw_trc */
res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
/* DMA memory for retreiving stats */
res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d090fbfb12f..e8d3ab7ea6c 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -427,7 +427,7 @@ struct bna_ethport {
/* Doorbell structure */
struct bna_ib_dbell {
- void *__iomem doorbell_addr;
+ void __iomem *doorbell_addr;
u32 doorbell_ack;
};
@@ -463,7 +463,7 @@ struct bna_tcb {
u32 consumer_index;
volatile u32 *hw_consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
struct bna_ib_dbell *i_dbell;
int page_idx;
int page_count;
@@ -599,7 +599,7 @@ struct bna_rcb {
u32 producer_index;
u32 consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
int page_idx;
int page_count;
/* Control path */
@@ -966,6 +966,7 @@ struct bna {
struct bna_ioceth ioceth;
struct bfa_cee cee;
+ struct bfa_flash flash;
struct bfa_msgq msgq;
struct bna_ethport ethport;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7f3091e7eb4..be7d91e4b78 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
module_param(bnad_ioc_auto_recover, uint, 0444);
MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+ " Range[false:0|true:1]");
+
/*
* Global variables
*/
u32 bnad_rxqs_per_cq = 2;
-
+static u32 bna_id;
+static struct mutex bnad_list_mutex;
+static LIST_HEAD(bnad_list);
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
@@ -75,6 +82,23 @@ do { \
#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_add_tail(&bnad->list_entry, &bnad_list);
+ bnad->id = bna_id++;
+ mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_del(&bnad->list_entry);
+ mutex_unlock(&bnad_list_mutex);
+}
+
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -723,7 +747,7 @@ void
bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status link_status)
{
- bool link_up = 0;
+ bool link_up = false;
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
complete(&bnad->bnad_completions.mtu_comp);
}
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+ struct bnad_iocmd_comp *iocmd_comp =
+ (struct bnad_iocmd_comp *)arg;
+
+ iocmd_comp->comp_status = (u32) status;
+ complete(&iocmd_comp->comp);
+}
+
/* Resource allocation, free functions */
static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
-static void
+static int
bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
-static void
+static int
bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
+ mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
+ mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
goto disable_device;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = 1;
+ *using_dac = true;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
if (err)
goto release_regions;
}
- *using_dac = 0;
+ *using_dac = false;
}
pci_set_master(pdev);
return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
return err;
}
bnad = netdev_priv(netdev);
-
bnad_lock_init(bnad);
+ bnad_add_to_list(bnad);
mutex_lock(&bnad->conf_mutex);
/*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set link to down state */
netif_carrier_off(netdev);
+ /* Setup the debugfs node for this bfad */
+ if (bna_debugfs_enable)
+ bnad_debugfs_init(bnad);
+
/* Get resource requirement form bna */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
res_free:
bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
drv_uninit:
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5487ca42d01..55824d92699 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -124,6 +124,12 @@ enum bnad_link_state {
BNAD_LS_UP = 1
};
+struct bnad_iocmd_comp {
+ struct bnad *bnad;
+ struct completion comp;
+ int comp_status;
+};
+
struct bnad_completion {
struct completion ioc_comp;
struct completion ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
struct bnad {
struct net_device *netdev;
+ u32 id;
+ struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,12 +328,26 @@ struct bnad {
char adapter_name[BNAD_NAME_LEN];
char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
+
+ /* debugfs specific data */
+ char *regdata;
+ u32 reglen;
+ struct dentry *bnad_dentry_files[5];
+ struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+ struct bfa_ioc_attr ioc_attr;
+ struct bfa_cee_attr cee_attr;
+ struct bfa_flash_attr flash_attr;
+ u32 cee_status;
+ u32 flash_status;
};
/*
* EXTERN VARIABLES
*/
-extern struct firmware *bfi_fw;
+extern const struct firmware *bfi_fw;
extern u32 bnad_rxqs_per_cq;
/*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
extern int bnad_enable_default_bcast(struct bnad *bnad);
extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
+/* Debugfs */
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
+
/**
* MACROS
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644
index 00000000000..592ad3929f5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ * - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc: To collect current firmware trace.
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr: To write one word to chip register
+ * regrd: To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bnad %s: Failed to collect fwtrc\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to collect fwsave\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *reg_debug;
+
+ reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!reg_debug)
+ return -ENOMEM;
+
+ reg_debug->i_private = inode->i_private;
+
+ file->private_data = reg_debug;
+
+ return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+ struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+ struct bnad_iocmd_comp fcomp;
+ unsigned long flags = 0;
+ int ret = BFA_STATUS_FAILED;
+
+ /* Get IOC info */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Retrieve CEE related info */
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->cee_status = fcomp.comp_status;
+
+ /* Retrieve flash partition info */
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->flash_status = fcomp.comp_status;
+out:
+ return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *drv_info;
+ int rc;
+
+ drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!drv_info)
+ return -ENOMEM;
+
+ drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+ drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+ if (!drv_info->debug_buffer) {
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to allocate drv info buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ mutex_lock(&bnad->conf_mutex);
+ rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+ drv_info->buffer_len);
+ mutex_unlock(&bnad->conf_mutex);
+ if (rc != BFA_STATUS_OK) {
+ kfree(drv_info->debug_buffer);
+ drv_info->debug_buffer = NULL;
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to collect drvinfo\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = drv_info;
+
+ return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t pos = file->f_pos;
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return -EINVAL;
+
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ break;
+ case 1:
+ file->f_pos += offset;
+ break;
+ case 2:
+ file->f_pos = debug->buffer_len - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+ file->f_pos = pos;
+ return -EINVAL;
+ }
+
+ return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug || !debug->debug_buffer)
+ return 0;
+
+ return simple_read_from_buffer(buf, nbytes, pos,
+ debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ (0x40000)
+#define BFA_REG_CB_ADDRSZ (0x20000)
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+ u8 area;
+
+ /* check [16:15] */
+ area = (offset >> 15) & 0x7;
+ if (area == 0) {
+ /* PCIe core register */
+ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else if (area == 0x1) {
+ /* CB 32 KB memory page */
+ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else {
+ /* CB register space 64KB */
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ ssize_t rc;
+
+ if (!bnad->regdata)
+ return 0;
+
+ rc = simple_read_from_buffer(buf, nbytes, pos,
+ bnad->regdata, bnad->reglen);
+
+ if ((*pos + nbytes) >= bnad->reglen) {
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ }
+
+ return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, len, rc, i;
+ u32 *regbuf;
+ void __iomem *rb, *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+
+ kfree(kern_buf);
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+
+ bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+ if (!bnad->regdata) {
+ pr_warn("bna %s: Failed to allocate regrd buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ bnad->reglen = len << 2;
+ rb = bfa_ioc_bar0(ioc);
+ addr &= BFA_REG_ADDRMSK(ioc);
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, len);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ return -EINVAL;
+ }
+
+ reg_addr = rb + addr;
+ regbuf = (u32 *)bnad->regdata;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < len; i++) {
+ *regbuf = readl(reg_addr);
+ regbuf++;
+ reg_addr += sizeof(u32);
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, val, rc;
+ void __iomem *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ kfree(kern_buf);
+
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, 1);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ return -EINVAL;
+ }
+
+ reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ writel(val, reg_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ kfree(debug->debug_buffer);
+
+ file->private_data = NULL;
+ kfree(debug);
+ debug = NULL;
+ return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwtrc,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwsave,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read_regrd,
+ .write = bnad_debugfs_write_regrd,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .write = bnad_debugfs_write_regwr,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_drvinfo,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+ { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+ { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+ { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+ const struct bnad_debugfs_entry *file;
+ char name[64];
+ int i;
+
+ /* Setup the BNA debugfs root directory*/
+ if (!bna_debugfs_root) {
+ bna_debugfs_root = debugfs_create_dir("bna", NULL);
+ atomic_set(&bna_debugfs_port_count, 0);
+ if (!bna_debugfs_root) {
+ pr_warn("BNA: debugfs root dir creation failed\n");
+ return;
+ }
+ }
+
+ /* Setup the pci_dev debugfs directory for the port */
+ snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+ if (!bnad->port_debugfs_root) {
+ bnad->port_debugfs_root =
+ debugfs_create_dir(name, bna_debugfs_root);
+ if (!bnad->port_debugfs_root) {
+ pr_warn("bna pci_dev %s: root dir creation failed\n",
+ pci_name(bnad->pcidev));
+ return;
+ }
+
+ atomic_inc(&bna_debugfs_port_count);
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ file = &bnad_debugfs_files[i];
+ bnad->bnad_dentry_files[i] =
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
+ if (!bnad->bnad_dentry_files[i]) {
+ pr_warn(
+ "BNA pci_dev:%s: create %s entry failed\n",
+ pci_name(bnad->pcidev), file->name);
+ return;
+ }
+ }
+ }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ if (bnad->bnad_dentry_files[i]) {
+ debugfs_remove(bnad->bnad_dentry_files[i]);
+ bnad->bnad_dentry_files[i] = NULL;
+ }
+ }
+
+ /* Remove the pci_dev debugfs directory for the port */
+ if (bnad->port_debugfs_root) {
+ debugfs_remove(bnad->port_debugfs_root);
+ bnad->port_debugfs_root = NULL;
+ atomic_dec(&bna_debugfs_port_count);
+ }
+
+ /* Remove the BNA debugfs root directory */
+ if (atomic_read(&bna_debugfs_port_count) == 0) {
+ debugfs_remove(bna_debugfs_root);
+ bna_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index fd3dcc1e914..9b44ec8096b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -38,7 +38,7 @@
sizeof(struct bnad_drv_stats) / sizeof(u64) + \
offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strcpy(drvinfo->driver, BNAD_NAME);
- strcpy(drvinfo->version, BNAD_VERSION);
+ strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
- sizeof(drvinfo->fw_version) - 1);
+ strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -934,7 +935,144 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
}
}
-static struct ethtool_ops bnad_ethtool_ops = {
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+ u32 *base_offset)
+{
+ struct bfa_flash_attr *flash_attr;
+ struct bnad_iocmd_comp fcomp;
+ u32 i, flash_part = 0, ret;
+ unsigned long flags = 0;
+
+ flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+ if (!flash_attr)
+ return -ENOMEM;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ kfree(flash_attr);
+ goto out_err;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+
+ /* Check for the flash type & base offset value */
+ if (ret == BFA_STATUS_OK) {
+ for (i = 0; i < flash_attr->npart; i++) {
+ if (offset >= flash_attr->part[i].part_off &&
+ offset < (flash_attr->part[i].part_off +
+ flash_attr->part[i].part_size)) {
+ flash_part = flash_attr->part[i].part_type;
+ *base_offset = flash_attr->part[i].part_off;
+ break;
+ }
+ }
+ }
+ kfree(flash_attr);
+ return flash_part;
+out_err:
+ return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+ return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash read request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EFAULT;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash update request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EINVAL;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static const struct ethtool_ops bnad_ethtool_ops = {
.get_settings = bnad_get_settings,
.set_settings = bnad_set_settings,
.get_drvinfo = bnad_get_drvinfo,
@@ -948,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
.set_pauseparam = bnad_set_pauseparam,
.get_strings = bnad_get_strings,
.get_ethtool_stats = bnad_get_ethtool_stats,
- .get_sset_count = bnad_get_sset_count
+ .get_sset_count = bnad_get_sset_count,
+ .get_eeprom_len = bnad_get_eeprom_len,
+ .get_eeprom = bnad_get_eeprom,
+ .set_eeprom = bnad_set_eeprom,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1b3e90dfbd9..32e8f178ab7 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -43,8 +43,7 @@ extern char bfa_version[];
#pragma pack(1)
-#define MAC_ADDRLEN (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index 725b9fff337..cfc22a64157 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -16,6 +16,7 @@
* www.brocade.com
*/
#include <linux/firmware.h>
+#include "bnad.h"
#include "bfi.h"
#include "cna.h"
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index b48378a41e4..db931916da0 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -5,8 +5,8 @@
config HAVE_NET_MACB
bool
-config NET_ATMEL
- bool "Atmel devices"
+config NET_CADENCE
+ bool "Cadence devices"
default y
depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
@@ -21,7 +21,7 @@ config NET_ATMEL
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_ATMEL
+if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
@@ -33,14 +33,16 @@ config ARM_AT91_ETHER
ethernet support, then you should always answer Y to this.
config MACB
- tristate "Atmel MACB support"
+ tristate "Cadence MACB/GEM support"
depends on HAVE_NET_MACB
select PHYLIB
---help---
- The Atmel MACB ethernet interface is found on many AT32 and AT91
- parts. Say Y to include support for the MACB chip.
+ The Cadence MACB ethernet interface is found on many Atmel AT32 and
+ AT91 parts. This driver also supports the Cadence GEM (Gigabit
+ Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
+ is not yet supported. Say Y to include support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
will be called macb.
-endif # NET_ATMEL
+endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 56624d30348..1a5b6efa012 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gfp.h>
@@ -255,8 +256,7 @@ static void enable_phyirq(struct net_device *dev)
unsigned int dsintr, irq_number;
int status;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
/*
* PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
* or board does not have it connected.
@@ -265,6 +265,7 @@ static void enable_phyirq(struct net_device *dev)
return;
}
+ irq_number = lp->board_data.phy_irq_pin;
status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
if (status) {
printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -319,8 +320,7 @@ static void disable_phyirq(struct net_device *dev)
unsigned int dsintr;
unsigned int irq_number;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
del_timer_sync(&lp->check_timer);
return;
}
@@ -365,6 +365,7 @@ static void disable_phyirq(struct net_device *dev)
disable_mdi();
spin_unlock_irq(&lp->lock);
+ irq_number = lp->board_data.phy_irq_pin;
free_irq(irq_number, dev); /* Free interrupt handler */
}
@@ -984,7 +985,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
struct platform_device *pdev, struct clk *ether_clk)
{
- struct at91_eth_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = pdev->dev.platform_data;
struct net_device *dev;
struct at91_private *lp;
unsigned int val;
@@ -1077,7 +1078,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
netif_carrier_off(dev); /* will be enabled in open() */
/* If board has no PHY IRQ, use a timer to poll the PHY */
- if (!lp->board_data.phy_irq_pin) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
init_timer(&lp->check_timer);
lp->check_timer.data = (unsigned long)dev;
lp->check_timer.function = at91ether_check_link;
@@ -1169,7 +1170,8 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(dev);
- if (lp->board_data.phy_irq_pin >= 32)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin) &&
+ lp->board_data.phy_irq_pin >= 32)
gpio_free(lp->board_data.phy_irq_pin);
unregister_netdev(dev);
@@ -1188,11 +1190,12 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
disable_irq(phy_irq);
+ }
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
@@ -1206,7 +1209,6 @@ static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
clk_enable(lp->ether_clk);
@@ -1214,8 +1216,10 @@ static int at91ether_resume(struct platform_device *pdev)
netif_device_attach(net_dev);
netif_start_queue(net_dev);
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
enable_irq(phy_irq);
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 353f4dab62b..3725fbb0def 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -85,7 +85,9 @@ struct recv_desc_bufs
struct at91_private
{
struct mii_if_info mii; /* ethtool support */
- struct at91_eth_data board_data; /* board-specific configuration */
+ struct macb_platform_data board_data; /* board-specific
+ * configuration (shared with
+ * macb for common data */
struct clk *ether_clk; /* clock */
/* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a437b46e549..f3d5c65d99c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1,5 +1,5 @@
/*
- * Atmel MACB Ethernet Controller driver
+ * Cadence MACB/GEM Ethernet Controller driver
*
* Copyright (C) 2004-2006 Atmel Corporation
*
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -19,11 +20,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
-
-#include <mach/board.h>
-#include <mach/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include "macb.h"
@@ -60,9 +62,9 @@ static void __macb_set_hwaddr(struct macb *bp)
u16 top;
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
- macb_writel(bp, SA1B, bottom);
+ macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
- macb_writel(bp, SA1T, top);
+ macb_or_gem_writel(bp, SA1T, top);
}
static void __init macb_get_hwaddr(struct macb *bp)
@@ -71,8 +73,8 @@ static void __init macb_get_hwaddr(struct macb *bp)
u16 top;
u8 addr[6];
- bottom = macb_readl(bp, SA1B);
- top = macb_readl(bp, SA1T);
+ bottom = macb_or_gem_readl(bp, SA1B);
+ top = macb_or_gem_readl(bp, SA1T);
addr[0] = bottom & 0xff;
addr[1] = (bottom >> 8) & 0xff;
@@ -84,7 +86,7 @@ static void __init macb_get_hwaddr(struct macb *bp)
if (is_valid_ether_addr(addr)) {
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
} else {
- dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
+ netdev_info(bp->dev, "invalid hw address, using random\n");
random_ether_addr(bp->dev->dev_addr);
}
}
@@ -178,11 +180,12 @@ static void macb_handle_link_change(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -191,25 +194,21 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev;
- struct eth_platform_data *pdata;
int ret;
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
- printk (KERN_ERR "%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
- pdata = bp->pdev->dev.platform_data;
/* TODO : add pin_irq */
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
- pdata && pdata->is_rmii ?
- PHY_INTERFACE_MODE_RMII :
- PHY_INTERFACE_MODE_MII);
+ bp->phy_interface);
if (ret) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return ret;
}
@@ -228,7 +227,7 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
int err = -ENXIO, i;
/* Enable management port */
@@ -285,8 +284,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
u32 __iomem *reg = bp->regs + MACB_PFR;
- u32 *p = &bp->hw_stats.rx_pause_frames;
- u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+ u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -303,14 +302,13 @@ static void macb_tx(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
- dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
- (unsigned long)status);
+ netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
int i;
- printk(KERN_ERR "%s: TX %s, resetting buffers\n",
- bp->dev->name, status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ netdev_err(bp->dev, "TX %s, resetting buffers\n",
+ status & MACB_BIT(UND) ?
+ "underrun" : "retry limit exceeded");
/* Transfer ongoing, disable transmitter, to avoid confusion */
if (status & MACB_BIT(TGO))
@@ -369,8 +367,8 @@ static void macb_tx(struct macb *bp)
if (!(bufstat & MACB_BIT(TX_USED)))
break;
- dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
+ netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
+ tail, skb->data);
dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
@@ -395,8 +393,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
- dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ first_frag, last_frag, len);
skb = dev_alloc_skb(len + RX_OFFSET);
if (!skb) {
@@ -437,8 +435,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += len;
- dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -515,8 +513,8 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, budget);
work_done = macb_rx(bp, budget);
if (work_done < budget) {
@@ -565,8 +563,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
- dev_dbg(&bp->pdev->dev,
- "scheduling RX softirq\n");
+ netdev_dbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
@@ -582,16 +579,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
- bp->hw_stats.rx_overruns++;
+ if (macb_is_gem(bp))
+ bp->hw_stats.gem.rx_overruns++;
+ else
+ bp->hw_stats.macb.rx_overruns++;
}
if (status & MACB_BIT(HRESP)) {
/*
- * TODO: Reset the hardware, and maybe move the printk
- * to a lower-priority context as well (work queue?)
+ * TODO: Reset the hardware, and maybe move the
+ * netdev_err to a lower-priority context as well
+ * (work queue?)
*/
- printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
- dev->name);
+ netdev_err(dev, "DMA bus error: HRESP not OK\n");
}
status = macb_readl(bp, ISR);
@@ -626,16 +626,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
#ifdef DEBUG
- int i;
- dev_dbg(&bp->pdev->dev,
- "start_xmit: len %u head %p data %p tail %p end %p\n",
- skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
- dev_dbg(&bp->pdev->dev,
- "data:");
- for (i = 0; i < 16; i++)
- printk(" %02x", (unsigned int)skb->data[i]);
- printk("\n");
+ netdev_dbg(bp->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 16, true);
#endif
len = skb->len;
@@ -645,21 +641,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
- dev_err(&bp->pdev->dev,
- "BUG! Tx Ring full when queue awake!\n");
- dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
- bp->tx_head, bp->tx_tail);
+ netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
return NETDEV_TX_BUSY;
}
entry = bp->tx_head;
- dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+ netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
bp->tx_skb[entry].skb = skb;
bp->tx_skb[entry].mapping = mapping;
- dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
- skb->data, (unsigned long)mapping);
+ netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
ctrl |= MACB_BIT(TX_LAST);
@@ -723,27 +718,27 @@ static int macb_alloc_consistent(struct macb *bp)
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
size = TX_RING_BYTES;
bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->tx_ring_dma, GFP_KERNEL);
if (!bp->tx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
size = RX_RING_SIZE * RX_BUFFER_SIZE;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
@@ -797,6 +792,84 @@ static void macb_reset_hw(struct macb *bp)
macb_readl(bp, ISR);
}
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+ if (pclk_hz <= 20000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV32);
+ else if (pclk_hz <= 120000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV48);
+ else if (pclk_hz <= 160000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV64);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+ return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz;
+
+ if (macb_is_gem(bp))
+ return gem_mdc_clk_div(bp);
+
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+ return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program. We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+ if (!macb_is_gem(bp))
+ return 0;
+
+ switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+ case 4:
+ return GEM_BF(DBW, GEM_DBW128);
+ case 2:
+ return GEM_BF(DBW, GEM_DBW64);
+ case 1:
+ default:
+ return GEM_BF(DBW, GEM_DBW32);
+ }
+}
+
+/*
+ * Configure the receive DMA engine to use the correct receive buffer size.
+ * This is a configurable parameter for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+ u32 dmacfg;
+
+ if (macb_is_gem(bp)) {
+ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+ dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+ gem_writel(bp, DMACFG, dmacfg);
+ }
+}
+
static void macb_init_hw(struct macb *bp)
{
u32 config;
@@ -804,7 +877,7 @@ static void macb_init_hw(struct macb *bp)
macb_reset_hw(bp);
__macb_set_hwaddr(bp);
- config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+ config = macb_mdc_clk_div(bp);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -812,8 +885,11 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
config |= MACB_BIT(NBC); /* No BroadCast */
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ macb_configure_dma(bp);
+
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
macb_writel(bp, TBQP, bp->tx_ring_dma);
@@ -909,8 +985,8 @@ static void macb_sethashtable(struct net_device *dev)
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
- macb_writel(bp, HRB, mc_filter[0]);
- macb_writel(bp, HRT, mc_filter[1]);
+ macb_or_gem_writel(bp, HRB, mc_filter[0]);
+ macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
/*
@@ -932,8 +1008,8 @@ static void macb_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
/* Enable all multicast mode */
- macb_writel(bp, HRB, -1);
- macb_writel(bp, HRT, -1);
+ macb_or_gem_writel(bp, HRB, -1);
+ macb_or_gem_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
} else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
@@ -941,8 +1017,8 @@ static void macb_set_rx_mode(struct net_device *dev)
cfg |= MACB_BIT(NCFGR_MTI);
} else if (dev->flags & (~IFF_ALLMULTI)) {
/* Disable all multicast mode */
- macb_writel(bp, HRB, 0);
- macb_writel(bp, HRT, 0);
+ macb_or_gem_writel(bp, HRB, 0);
+ macb_or_gem_writel(bp, HRT, 0);
cfg &= ~MACB_BIT(NCFGR_MTI);
}
@@ -954,7 +1030,7 @@ static int macb_open(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
int err;
- dev_dbg(&bp->pdev->dev, "open\n");
+ netdev_dbg(bp->dev, "open\n");
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
@@ -965,9 +1041,8 @@ static int macb_open(struct net_device *dev)
err = macb_alloc_consistent(bp);
if (err) {
- printk(KERN_ERR
- "%s: Unable to allocate DMA memory (error %d)\n",
- dev->name, err);
+ netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+ err);
return err;
}
@@ -1005,11 +1080,62 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static void gem_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + GEM_OTX;
+ u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+ for (; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+ struct net_device_stats *nstat = &bp->stats;
+
+ gem_update_stats(bp);
+
+ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+ hwstat->rx_alignment_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->tx_errors = (hwstat->tx_late_collisions +
+ hwstat->tx_excessive_collisions +
+ hwstat->tx_underrun +
+ hwstat->tx_carrier_sense_errors);
+ nstat->multicast = hwstat->rx_multicast_frames;
+ nstat->collisions = (hwstat->tx_single_collision_frames +
+ hwstat->tx_multiple_collision_frames +
+ hwstat->tx_excessive_collisions);
+ nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+ nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+ return nstat;
+}
+
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
- struct macb_stats *hwstat = &bp->hw_stats;
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ if (macb_is_gem(bp))
+ return gem_get_stats(bp);
/* read stats from hardware */
macb_update_stats(bp);
@@ -1117,14 +1243,59 @@ static const struct net_device_ops macb_netdev_ops = {
#endif
};
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,at32ap7000-macb" },
+ { .compatible = "cdns,at91sam9260-macb" },
+ { .compatible = "cdns,macb" },
+ { .compatible = "cdns,pc302-gem" },
+ { .compatible = "cdns,gem" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ struct device_node *np = bp->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac) {
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+#else
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ return -ENODEV;
+}
+#endif
+
static int __init macb_probe(struct platform_device *pdev)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
struct phy_device *phydev;
- unsigned long pclk_hz;
u32 config;
int err = -ENXIO;
@@ -1152,28 +1323,19 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
-#if defined(CONFIG_ARCH_AT91)
- bp->pclk = clk_get(&pdev->dev, "macb_clk");
+ bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
clk_enable(bp->pclk);
-#else
- bp->pclk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- goto err_out_free_dev;
- }
+
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
-
- clk_enable(bp->pclk);
clk_enable(bp->hclk);
-#endif
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1185,9 +1347,8 @@ static int __init macb_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
if (err) {
- printk(KERN_ERR
- "%s: Unable to request IRQ %d (error %d)\n",
- dev->name, dev->irq, err);
+ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+ dev->irq, err);
goto err_out_iounmap;
}
@@ -1198,31 +1359,37 @@ static int __init macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start;
/* Set MII management clock divider */
- pclk_hz = clk_get_rate(bp->pclk);
- if (pclk_hz <= 20000000)
- config = MACB_BF(CLK, MACB_CLK_DIV8);
- else if (pclk_hz <= 40000000)
- config = MACB_BF(CLK, MACB_CLK_DIV16);
- else if (pclk_hz <= 80000000)
- config = MACB_BF(CLK, MACB_CLK_DIV32);
- else
- config = MACB_BF(CLK, MACB_CLK_DIV64);
+ config = macb_mdc_clk_div(bp);
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- macb_get_hwaddr(bp);
- pdata = pdev->dev.platform_data;
+ err = macb_get_hwaddr_dt(bp);
+ if (err < 0)
+ macb_get_hwaddr(bp);
+
+ err = macb_get_phy_mode_dt(pdev);
+ if (err < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata && pdata->is_rmii)
+ bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ bp->phy_interface = err;
+ }
- if (pdata && pdata->is_rmii)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
+ macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+ MACB_BIT(CLKEN)));
#else
- macb_writel(bp, USRIO, 0);
+ macb_or_gem_writel(bp, USRIO, 0);
#endif
else
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, MACB_BIT(CLKEN));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
#else
- macb_writel(bp, USRIO, MACB_BIT(MII));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
bp->tx_pending = DEF_TX_RING_PENDING;
@@ -1239,13 +1406,13 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+ macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+ dev->irq, dev->dev_addr);
phydev = bp->phy_dev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
- phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -1256,14 +1423,10 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
err_out_put_pclk:
-#endif
clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
@@ -1289,10 +1452,8 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
@@ -1310,9 +1471,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_device_detach(netdev);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
-#endif
clk_disable(bp->pclk);
return 0;
@@ -1324,9 +1483,7 @@ static int macb_resume(struct platform_device *pdev)
struct macb *bp = netdev_priv(netdev);
clk_enable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
clk_enable(bp->hclk);
-#endif
netif_device_attach(netdev);
@@ -1344,6 +1501,7 @@ static struct platform_driver macb_driver = {
.driver = {
.name = "macb",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(macb_dt_ids),
},
};
@@ -1361,6 +1519,6 @@ module_init(macb_init);
module_exit(macb_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d3212f6db70..335e288f531 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -59,6 +59,24 @@
#define MACB_TPQ 0x00bc
#define MACB_USRIO 0x00c0
#define MACB_WOL 0x00c4
+#define MACB_MID 0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR 0x0004
+#define GEM_USRIO 0x000c
+#define GEM_DMACFG 0x0010
+#define GEM_HRB 0x0080
+#define GEM_HRT 0x0084
+#define GEM_SA1B 0x0088
+#define GEM_SA1T 0x008C
+#define GEM_OTX 0x0100
+#define GEM_DCFG1 0x0280
+#define GEM_DCFG2 0x0284
+#define GEM_DCFG3 0x0288
+#define GEM_DCFG4 0x028c
+#define GEM_DCFG5 0x0290
+#define GEM_DCFG6 0x0294
+#define GEM_DCFG7 0x0298
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0
@@ -126,6 +144,21 @@
#define MACB_IRXFCS_OFFSET 19
#define MACB_IRXFCS_SIZE 1
+/* GEM specific NCFGR bitfields. */
+#define GEM_CLK_OFFSET 18
+#define GEM_CLK_SIZE 3
+#define GEM_DBW_OFFSET 21
+#define GEM_DBW_SIZE 2
+
+/* Constants for data bus width. */
+#define GEM_DBW32 0
+#define GEM_DBW64 1
+#define GEM_DBW128 2
+
+/* Bitfields in DMACFG. */
+#define GEM_RXBS_OFFSET 16
+#define GEM_RXBS_SIZE 8
+
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0
#define MACB_NSR_LINK_SIZE 1
@@ -228,12 +261,30 @@
#define MACB_WOL_MTI_OFFSET 19
#define MACB_WOL_MTI_SIZE 1
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET 16
+#define MACB_IDNUM_SIZE 16
+#define MACB_REV_OFFSET 0
+#define MACB_REV_SIZE 16
+
+/* Bitfields in DCFG1. */
+#define GEM_DBWDEF_OFFSET 25
+#define GEM_DBWDEF_SIZE 3
+
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
#define MACB_CLK_DIV32 2
#define MACB_CLK_DIV64 3
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8 0
+#define GEM_CLK_DIV16 1
+#define GEM_CLK_DIV32 2
+#define GEM_CLK_DIV48 3
+#define GEM_CLK_DIV64 4
+#define GEM_CLK_DIV96 5
+
/* Constants for MAN register */
#define MACB_MAN_SOF 1
#define MACB_MAN_WRITE 1
@@ -254,11 +305,52 @@
<< MACB_##name##_OFFSET)) \
| MACB_BF(name,value))
+#define GEM_BIT(name) \
+ (1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value) \
+ (((value) & ((1 << GEM_##name##_SIZE) - 1)) \
+ << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+ (((value) >> GEM_##name##_OFFSET) \
+ & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old) \
+ (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \
+ << GEM_##name##_OFFSET)) \
+ | GEM_BF(name, value))
+
/* Register access macros */
#define macb_readl(port,reg) \
__raw_readl((port)->regs + MACB_##reg)
#define macb_writel(port,reg,value) \
__raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg) \
+ __raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros. These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB. For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+ ({ \
+ if (macb_is_gem((__bp))) \
+ gem_writel((__bp), __reg, __value); \
+ else \
+ macb_writel((__bp), __reg, __value); \
+ })
+
+#define macb_or_gem_readl(__bp, __reg) \
+ ({ \
+ u32 __v; \
+ if (macb_is_gem((__bp))) \
+ __v = gem_readl((__bp), __reg); \
+ else \
+ __v = macb_readl((__bp), __reg); \
+ __v; \
+ })
struct dma_desc {
u32 addr;
@@ -358,6 +450,54 @@ struct macb_stats {
u32 tx_pause_frames;
};
+struct gem_stats {
+ u32 tx_octets_31_0;
+ u32 tx_octets_47_32;
+ u32 tx_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_byte_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_than_1518_byte_frames;
+ u32 tx_underrun;
+ u32 tx_single_collision_frames;
+ u32 tx_multiple_collision_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_deferred_frames;
+ u32 tx_carrier_sense_errors;
+ u32 rx_octets_31_0;
+ u32 rx_octets_47_32;
+ u32 rx_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_byte_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_than_1518_byte_frames;
+ u32 rx_undersized_frames;
+ u32 rx_oversize_frames;
+ u32 rx_jabbers;
+ u32 rx_frame_check_sequence_errors;
+ u32 rx_length_field_frame_errors;
+ u32 rx_symbol_errors;
+ u32 rx_alignment_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_ip_header_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+};
+
struct macb {
void __iomem *regs;
@@ -376,7 +516,10 @@ struct macb {
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
- struct macb_stats hw_stats;
+ union {
+ struct macb_stats macb;
+ struct gem_stats gem;
+ } hw_stats;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
@@ -389,6 +532,13 @@ struct macb {
unsigned int link;
unsigned int speed;
unsigned int duplex;
+
+ phy_interface_t phy_interface;
};
+static inline bool macb_is_gem(struct macb *bp)
+{
+ return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644
index 00000000000..aba435c3d4a
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+ tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+ depends on HAS_IOMEM
+ select CRC32
+ help
+ This is the driver for the XGMAC Ethernet IP block found on Calxeda
+ Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644
index 00000000000..f0ef08067f9
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644
index 00000000000..1fce186a903
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
+#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
+#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
+#define XGMAC_VERSION 0x00000020 /* Version */
+#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
+#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
+#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
+#define XGMAC_DEBUG 0x00000038 /* Debug */
+#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH 16
+#define XGMAC_OMR 0x00000400
+#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
+#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
+#define XGMAC_MMC_TXBCFRAME_G 0x00000824
+#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME 0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G 0x00000918
+#define XGMAC_MMC_RXMCFRAME_G 0x00000920
+#define XGMAC_MMC_RXCRCERR 0x00000928
+#define XGMAC_MMC_RXRUNT 0x00000930
+#define XGMAC_MMC_RXJABBER 0x00000934
+#define XGMAC_MMC_RXUCFRAME_G 0x00000970
+#define XGMAC_MMC_RXLENGTHERR 0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW 0x00000990
+#define XGMAC_MMC_RXVLANFRAME 0x00000998
+#define XGMAC_MMC_RXWATCHDOG 0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
+#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
+#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
+#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE 0x80000000
+#define XGMAC_MAX_FILTER_ADDR 31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET 0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
+#define XGMAC_PMT_MAGIC_PKT 0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN 0x00000001
+
+#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G 0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G 0x00000000
+#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK 0x18000000
+#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
+#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT 16
+#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_8PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+ DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK 0x00030000
+#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
+#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
+#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS 0x00000001
+#define RXDESC_CRC_ERR 0x00000002
+#define RXDESC_RX_ERR 0x00000008
+#define RXDESC_RX_WDOG 0x00000010
+#define RXDESC_FRAME_TYPE 0x00000020
+#define RXDESC_GIANT_FRAME 0x00000080
+#define RXDESC_LAST_SEG 0x00000100
+#define RXDESC_FIRST_SEG 0x00000200
+#define RXDESC_VLAN_FRAME 0x00000400
+#define RXDESC_OVERFLOW_ERR 0x00000800
+#define RXDESC_LENGTH_ERR 0x00001000
+#define RXDESC_SA_FILTER_FAIL 0x00002000
+#define RXDESC_DESCRIPTOR_ERR 0x00004000
+#define RXDESC_ERROR_SUMMARY 0x00008000
+#define RXDESC_FRAME_LEN_OFFSET 16
+#define RXDESC_FRAME_LEN_MASK 0x3fff0000
+#define RXDESC_DA_FILTER_FAIL 0x40000000
+
+#define RXDESC1_END_RING 0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP 0x00000001
+#define RXDESC_IP_PAYLOAD_TCP 0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR 0x00000008
+#define RXDESC_IP_PAYLOAD_ERR 0x00000010
+#define RXDESC_IPV4_PACKET 0x00000040
+#define RXDESC_IPV6_PACKET 0x00000080
+#define TXDESC_UNDERFLOW_ERR 0x00000001
+#define TXDESC_JABBER_TIMEOUT 0x00000002
+#define TXDESC_LOCAL_FAULT 0x00000004
+#define TXDESC_REMOTE_FAULT 0x00000008
+#define TXDESC_VLAN_FRAME 0x00000010
+#define TXDESC_FRAME_FLUSHED 0x00000020
+#define TXDESC_IP_HEADER_ERR 0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
+#define TXDESC_ERROR_SUMMARY 0x00008000
+#define TXDESC_SA_CTRL_INSERT 0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED 0x00100000
+#define TXDESC_END_RING 0x00200000
+#define TXDESC_CSUM_IP 0x00400000
+#define TXDESC_CSUM_IP_PAYLD 0x00800000
+#define TXDESC_CSUM_ALL 0x00C00000
+#define TXDESC_CRC_EN_REPLACE 0x01000000
+#define TXDESC_CRC_EN_APPEND 0x02000000
+#define TXDESC_DISABLE_PAD 0x04000000
+#define TXDESC_FIRST_SEG 0x10000000
+#define TXDESC_LAST_SEG 0x20000000
+#define TXDESC_INTERRUPT 0x40000000
+
+#define DESC_OWN 0x80000000
+#define DESC_BUFFER1_SZ_MASK 0x00001fff
+#define DESC_BUFFER2_SZ_MASK 0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+ __le32 flags;
+ __le32 buf_size;
+ __le32 buf1_addr; /* Buffer 1 Address Pointer */
+ __le32 buf2_addr; /* Buffer 2 Address Pointer */
+ __le32 ext_status;
+ __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ unsigned long tx_local_fault;
+ unsigned long tx_remote_fault;
+ /* Receive errors */
+ unsigned long rx_watchdog;
+ unsigned long rx_da_filter_fail;
+ unsigned long rx_sa_filter_fail;
+ unsigned long rx_payload_error;
+ unsigned long rx_ip_header_error;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow;
+ unsigned long tx_process_stopped;
+ unsigned long rx_buf_unav;
+ unsigned long rx_process_stopped;
+ unsigned long tx_early;
+ unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+ struct xgmac_dma_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int rx_tail;
+ unsigned int rx_head;
+
+ struct xgmac_dma_desc *dma_tx;
+ struct sk_buff **tx_skbuff;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ void __iomem *base;
+ struct sk_buff_head rx_recycle;
+ unsigned int dma_buf_sz;
+ dma_addr_t dma_rx_phy;
+ dma_addr_t dma_tx_phy;
+
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ struct xgmac_extra_stats xstats;
+
+ spinlock_t stats_lock;
+ int pmt_irq;
+ char rx_pause;
+ char tx_pause;
+ int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU 9000
+#define PAUSE_TIME 0x400
+
+#define DMA_RX_RING_SZ 256
+#define DMA_TX_RING_SZ 128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+ if (buf_sz > MAX_DESC_BUF_SZ)
+ p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+ (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+ else
+ p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+ u32 len = cpu_to_le32(p->flags);
+ return (len & DESC_BUFFER1_SZ_MASK) +
+ ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+ int buf_sz)
+{
+ struct xgmac_dma_desc *end = p + ring_size - 1;
+
+ memset(p, 0, sizeof(*p) * ring_size);
+
+ for (; p <= end; p++)
+ desc_set_buf_len(p, buf_sz);
+
+ end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+ memset(p, 0, sizeof(*p) * ring_size);
+ p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+ /* Clear all fields and set the owner */
+ p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ tmpflags |= flags | DESC_OWN;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ p->buf1_addr = cpu_to_le32(paddr);
+ if (len > MAX_DESC_BUF_SZ)
+ p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ desc_set_buf_len(p, len);
+ desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+ u32 data = le32_to_cpu(p->flags);
+ u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+ if (data & RXDESC_FRAME_TYPE)
+ len -= ETH_FCS_LEN;
+
+ return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ int timeout = 1000;
+ u32 reg = readl(ioaddr + XGMAC_OMR);
+ writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+ while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+ udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ u32 status = le32_to_cpu(p->flags);
+
+ if (!(status & TXDESC_ERROR_SUMMARY))
+ return 0;
+
+ netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+ if (status & TXDESC_JABBER_TIMEOUT)
+ x->tx_jabber++;
+ if (status & TXDESC_FRAME_FLUSHED)
+ x->tx_frame_flushed++;
+ if (status & TXDESC_UNDERFLOW_ERR)
+ xgmac_dma_flush_tx_fifo(priv->base);
+ if (status & TXDESC_IP_HEADER_ERR)
+ x->tx_ip_header_error++;
+ if (status & TXDESC_LOCAL_FAULT)
+ x->tx_local_fault++;
+ if (status & TXDESC_REMOTE_FAULT)
+ x->tx_remote_fault++;
+ if (status & TXDESC_PAYLOAD_CSUM_ERR)
+ x->tx_payload_error++;
+
+ return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ int ret = CHECKSUM_UNNECESSARY;
+ u32 status = le32_to_cpu(p->flags);
+ u32 ext_status = le32_to_cpu(p->ext_status);
+
+ if (status & RXDESC_DA_FILTER_FAIL) {
+ netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+ x->rx_da_filter_fail++;
+ return -1;
+ }
+
+ /* Check if packet has checksum already */
+ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+ !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+ ret = CHECKSUM_NONE;
+
+ netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+ (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+ if (!(status & RXDESC_ERROR_SUMMARY))
+ return ret;
+
+ /* Handle any errors */
+ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+ RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+ return -1;
+
+ if (status & RXDESC_EXT_STATUS) {
+ if (ext_status & RXDESC_IP_HEADER_ERR)
+ x->rx_ip_header_error++;
+ if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+ x->rx_payload_error++;
+ netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+ ext_status);
+ return CHECKSUM_NONE;
+ }
+
+ return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_CONTROL);
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ writel(value, ioaddr + XGMAC_CONTROL);
+
+ value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ value = readl(ioaddr + XGMAC_CONTROL);
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+ writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 data;
+
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+ lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+ u32 reg;
+ unsigned int flow = 0;
+
+ priv->rx_pause = rx;
+ priv->tx_pause = tx;
+
+ if (rx || tx) {
+ if (rx)
+ flow |= XGMAC_FLOW_CTRL_RFE;
+ if (tx)
+ flow |= XGMAC_FLOW_CTRL_TFE;
+
+ flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+ flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg |= XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ } else {
+ writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg &= ~XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ }
+
+ return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+ struct xgmac_dma_desc *p;
+ dma_addr_t paddr;
+
+ while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+ int entry = priv->rx_head;
+ struct sk_buff *skb;
+
+ p = priv->dma_rx + entry;
+
+ if (priv->rx_skbuff[entry] != NULL)
+ continue;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+ netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+ priv->rx_head, priv->rx_tail);
+
+ priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+ /* Ensure descriptor is in memory before handing to h/w */
+ wmb();
+ desc_set_rx_owner(p);
+ }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int bfsize;
+
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+ 64);
+
+ netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+ priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ return -ENOMEM;
+
+ priv->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma_rx;
+
+ priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skb;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx)
+ goto err_dma_tx;
+
+ netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ priv->rx_tail = 0;
+ priv->rx_head = 0;
+ priv->dma_buf_sz = bfsize;
+ desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+ xgmac_rx_refill(priv);
+
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+ writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+ return 0;
+
+err_dma_tx:
+ kfree(priv->tx_skbuff);
+err_tx_skb:
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+ kfree(priv->rx_skbuff);
+ return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+ int i;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->rx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_RX_RING_SZ; i++) {
+ if (priv->rx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_rx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+ int i, f;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->tx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_TX_RING_SZ; i++) {
+ if (priv->tx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_tx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+ p = priv->dma_tx + i++;
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ xgmac_free_rx_skbufs(priv);
+ xgmac_free_tx_skbufs(priv);
+
+ /* Free the consistent memory allocated for descriptor rings */
+ if (priv->dma_tx) {
+ dma_free_coherent(priv->device,
+ DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ priv->dma_tx = NULL;
+ }
+ if (priv->dma_rx) {
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ priv->dma_rx = NULL;
+ }
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+ int i;
+ void __iomem *ioaddr = priv->base;
+
+ writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+ while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+ unsigned int entry = priv->tx_tail;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (desc_get_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ if (desc_get_tx_ls(p))
+ desc_get_tx_status(priv, p);
+
+ netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+ priv->tx_head, priv->tx_tail);
+
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ if (!skb) {
+ continue;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+ DMA_TX_RING_SZ);
+ p = priv->dma_tx + priv->tx_tail;
+
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ DMA_RX_RING_SZ) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+ }
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+ TX_THRESH)
+ netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+ u32 reg, value, inten;
+
+ netif_stop_queue(priv->dev);
+
+ inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ reg = readl(priv->base + XGMAC_DMA_CONTROL);
+ writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+ do {
+ value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+ } while (value && (value != 0x600000));
+
+ xgmac_free_tx_skbufs(priv);
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+ writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+ priv->base + XGMAC_DMA_STATUS);
+ writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+ netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+ u32 value, ctrl;
+ int limit;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Save the ctrl register value */
+ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+ /* SW reset */
+ value = DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+ limit = 15000;
+ while (limit-- &&
+ (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ cpu_relax();
+ if (limit < 0)
+ return -EBUSY;
+
+ value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+ (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+ DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+ writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+ ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+ XGMAC_CONTROL_CAR;
+ if (dev->features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ value = DMA_CONTROL_DFF;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ /* Set the HW DMA mode and the COE */
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ ioaddr + XGMAC_OMR);
+
+ /* Reset the MMC counters */
+ writel(1, ioaddr + XGMAC_MMC_CTRL);
+ return 0;
+}
+
+/**
+ * xgmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+ int ret;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ skb_queue_head_init(&priv->rx_recycle);
+ memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+ /* Initialize the XGMAC and descriptors */
+ xgmac_hw_init(dev);
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+ xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+ ret = xgmac_dma_desc_rings_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the MAC Rx/Tx */
+ xgmac_mac_enable(ioaddr);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/**
+ * xgmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+ napi_disable(&priv->napi);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Disable the MAC core */
+ xgmac_mac_disable(priv->base);
+
+ /* Release and free the Rx/Tx resources */
+ xgmac_free_dma_desc_rings(priv);
+
+ return 0;
+}
+
+/**
+ * xgmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int entry;
+ int i;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct xgmac_dma_desc *desc, *first;
+ unsigned int desc_flags;
+ unsigned int len;
+ dma_addr_t paddr;
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ (nfrags + 1)) {
+ writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+ priv->base + XGMAC_DMA_INTR_ENA);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+ TXDESC_CSUM_ALL : 0;
+ entry = priv->tx_head;
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+ len = skb_headlen(skb);
+ paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ priv->tx_skbuff[entry] = skb;
+ desc_set_buf_addr_and_size(desc, paddr, len);
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+
+ desc_set_buf_addr_and_size(desc, paddr, len);
+ if (i < (nfrags - 1))
+ desc_set_tx_owner(desc, desc_flags);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ if (desc != first)
+ desc_set_tx_owner(desc, desc_flags |
+ TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ else
+ desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+ /* Set owner on first desc last to avoid race condition */
+ wmb();
+ desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+ priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+ return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+ unsigned int entry;
+ unsigned int count = 0;
+ struct xgmac_dma_desc *p;
+
+ while (count < limit) {
+ int ip_checksum;
+ struct sk_buff *skb;
+ int frame_len;
+
+ writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+ priv->base + XGMAC_DMA_STATUS);
+
+ entry = priv->rx_tail;
+ p = priv->dma_rx + entry;
+ if (desc_get_owner(p))
+ break;
+
+ count++;
+ priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+ /* read the status of the incoming frame */
+ ip_checksum = desc_get_rx_status(priv, p);
+ if (ip_checksum < 0)
+ continue;
+
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+ break;
+ }
+ priv->rx_skbuff[entry] = NULL;
+
+ frame_len = desc_get_rx_frame_len(p);
+ netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+ frame_len, ip_checksum);
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ frame_len, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = ip_checksum;
+ if (ip_checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ xgmac_rx_refill(priv);
+
+ writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+ return count;
+}
+
+/**
+ * xgmac_poll - xgmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+ struct xgmac_priv *priv = container_of(napi,
+ struct xgmac_priv, napi);
+ int work_done = 0;
+
+ xgmac_tx_complete(priv);
+ work_done = xgmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ }
+ return work_done;
+}
+
+/**
+ * xgmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ xgmac_tx_err(priv);
+}
+
+/**
+ * xgmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+ int i;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ unsigned int value = 0;
+ u32 hash_filter[XGMAC_NUM_HASH];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ bool use_hash = false;
+
+ netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+ netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+ return;
+ }
+
+ memset(hash_filter, 0, sizeof(hash_filter));
+
+ if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_uc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ value |= XGMAC_FRAME_FILTER_PM;
+ goto out;
+ }
+
+ if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_mc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+out:
+ for (i = 0; i < XGMAC_NUM_HASH; i++)
+ writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+ writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ * xgmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ int old_mtu;
+
+ if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+ netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+ return -EINVAL;
+ }
+
+ old_mtu = dev->mtu;
+ dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* Stop everything, get ready to change the MTU */
+ if (!netif_running(dev))
+ return 0;
+
+ /* Bring the interface down and then back up */
+ xgmac_stop(dev);
+ return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ if (intr_status & XGMAC_INT_STAT_PMT) {
+ netdev_dbg(priv->dev, "received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT */
+ readl(ioaddr + XGMAC_PMT);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool tx_err = false;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ struct xgmac_extra_stats *x = &priv->xstats;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+ /* It displays the DMA process states (CSR5 register) */
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ if (intr_status & DMA_STATUS_TJT) {
+ netdev_err(priv->dev, "transmit jabber\n");
+ x->tx_jabber++;
+ }
+ if (intr_status & DMA_STATUS_RU)
+ x->rx_buf_unav++;
+ if (intr_status & DMA_STATUS_RPS) {
+ netdev_err(priv->dev, "receive process stopped\n");
+ x->rx_process_stopped++;
+ }
+ if (intr_status & DMA_STATUS_ETI) {
+ netdev_err(priv->dev, "transmit early interrupt\n");
+ x->tx_early++;
+ }
+ if (intr_status & DMA_STATUS_TPS) {
+ netdev_err(priv->dev, "transmit process stopped\n");
+ x->tx_process_stopped++;
+ tx_err = true;
+ }
+ if (intr_status & DMA_STATUS_FBI) {
+ netdev_err(priv->dev, "fatal bus error\n");
+ x->fatal_bus_error++;
+ tx_err = true;
+ }
+
+ if (tx_err)
+ xgmac_tx_err(priv);
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+ writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xgmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *base = priv->base;
+ u32 count;
+
+ spin_lock_bh(&priv->stats_lock);
+ writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+ storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+ storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+ storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+ storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+ storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+ storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+ storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+ storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+ storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+ count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+ storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+ storage->tx_packets = count;
+ storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+ writel(0, base + XGMAC_MMC_CTRL);
+ spin_unlock_bh(&priv->stats_lock);
+ return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+ return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+ u32 ctrl;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ ctrl = readl(ioaddr + XGMAC_CONTROL);
+ if (features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ else
+ ctrl &= ~XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+ .ndo_open = xgmac_open,
+ .ndo_start_xmit = xgmac_xmit,
+ .ndo_stop = xgmac_stop,
+ .ndo_change_mtu = xgmac_change_mtu,
+ .ndo_set_rx_mode = xgmac_set_rx_mode,
+ .ndo_tx_timeout = xgmac_tx_timeout,
+ .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgmac_poll_controller,
+#endif
+ .ndo_set_mac_address = xgmac_set_mac_address,
+ .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = 0;
+ cmd->duplex = DUPLEX_FULL;
+ ethtool_cmd_speed_set(cmd, 10000);
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ pause->rx_pause = priv->rx_pause;
+ pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+ bool is_reg;
+};
+
+#define XGMAC_STAT(m) \
+ { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset) \
+ { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+ XGMAC_STAT(tx_frame_flushed),
+ XGMAC_STAT(tx_payload_error),
+ XGMAC_STAT(tx_ip_header_error),
+ XGMAC_STAT(tx_local_fault),
+ XGMAC_STAT(tx_remote_fault),
+ XGMAC_STAT(tx_early),
+ XGMAC_STAT(tx_process_stopped),
+ XGMAC_STAT(tx_jabber),
+ XGMAC_STAT(rx_buf_unav),
+ XGMAC_STAT(rx_process_stopped),
+ XGMAC_STAT(rx_payload_error),
+ XGMAC_STAT(rx_ip_header_error),
+ XGMAC_STAT(rx_da_filter_fail),
+ XGMAC_STAT(rx_sa_filter_fail),
+ XGMAC_STAT(fatal_bus_error),
+ XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+ XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+ XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+ XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+ XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void *p = priv;
+ int i;
+
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ if (xgmac_gstrings_stats[i].is_reg)
+ *data++ = readl(priv->base +
+ xgmac_gstrings_stats[i].stat_offset);
+ else
+ *data++ = *(u32 *)(p +
+ xgmac_gstrings_stats[i].stat_offset);
+ }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return XGMAC_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ memcpy(p, xgmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ priv->wolopts = wol->wolopts;
+
+ if (wol->wolopts) {
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(dev->irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(dev->irq);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops xgmac_ethtool_ops = {
+ .get_settings = xgmac_ethtool_getsettings,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = xgmac_get_pauseparam,
+ .set_pauseparam = xgmac_set_pauseparam,
+ .get_ethtool_stats = xgmac_get_ethtool_stats,
+ .get_strings = xgmac_get_strings,
+ .get_wol = xgmac_get_wol,
+ .set_wol = xgmac_set_wol,
+ .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct xgmac_priv *priv = NULL;
+ u32 uid;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ ether_setup(ndev);
+ ndev->netdev_ops = &xgmac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ spin_lock_init(&priv->stats_lock);
+
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ netdev_err(ndev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ uid = readl(priv->base + XGMAC_VERSION);
+ netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq == -ENXIO) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = ndev->irq;
+ goto err_irq;
+ }
+
+ ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ ndev->irq, ret);
+ goto err_irq;
+ }
+
+ priv->pmt_irq = platform_get_irq(pdev, 1);
+ if (priv->pmt_irq == -ENXIO) {
+ netdev_err(ndev, "No pmt irq resource\n");
+ ret = priv->pmt_irq;
+ goto err_pmt_irq;
+ }
+
+ ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ priv->pmt_irq, ret);
+ goto err_pmt_irq;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ if (device_can_wakeup(priv->device))
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Get the MAC address */
+ xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ netdev_warn(ndev, "MAC address %pM not valid",
+ ndev->dev_addr);
+
+ netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ ret = register_netdev(ndev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ netif_napi_del(&priv->napi);
+ free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ iounmap(priv->base);
+err_io:
+ free_netdev(ndev);
+err_alloc:
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+
+ xgmac_mac_disable(priv->base);
+
+ /* Free the IRQ lines */
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->pmt_irq, ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi);
+
+ iounmap(priv->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ if (mode & WAKE_UCAST)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+ writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ u32 value;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&priv->napi);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ if (device_may_wakeup(priv->device)) {
+ /* Stop TX/RX DMA Only */
+ value = readl(priv->base + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+ xgmac_pmt(priv->base, priv->wolopts);
+ } else
+ xgmac_mac_disable(priv->base);
+
+ return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ void __iomem *ioaddr = priv->base;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ xgmac_pmt(ioaddr, 0);
+
+ /* Enable the MAC and DMA */
+ xgmac_mac_enable(ioaddr);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ netif_device_attach(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+ { .compatible = "calxeda,hb-xgmac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+ .driver = {
+ .name = "calxedaxgmac",
+ .of_match_table = xgmac_of_match,
+ },
+ .probe = xgmac_probe,
+ .remove = xgmac_remove,
+ .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index ca26d97171b..1d17c92f2dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
}
static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv;
if (changed & NETIF_F_HW_VLAN_RX)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index f9b60230004..47a84359d4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
/*
* Enable/disable VLAN acceleration.
*/
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{
struct sge *sge = adapter->sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980bcdd6..b9bf16b385f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 4d15c8f99c3..857cc254cab 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
- if (!fw_vers)
- strcpy(info->fw_version, "N/A");
- else {
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%s %u.%u.%u TP %u.%u.%u",
G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
G_TP_VERSION_MAJOR(tp_vers),
G_TP_VERSION_MINOR(tp_vers),
G_TP_VERSION_MICRO(tp_vers));
- }
}
static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
}
}
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
t3_synchronize_rx(adapter, pi);
}
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
cxgb_vlan_mode(dev, features);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 90ff1318cc0..65e4b280619 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour(nr->new));
+ cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
break;
}
default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
static void cxgb_neigh_update(struct neighbour *neigh)
{
- struct net_device *dev = neigh->dev;
+ struct net_device *dev;
+ if (!neigh)
+ return;
+ dev = neigh->dev;
if (dev && (is_offloading(dev))) {
struct t3cdev *tdev = dev2t3cdev(dev);
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
struct net_device *olddev, *newdev;
+ struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = dst_get_neighbour(old)->dev;
- newdev = dst_get_neighbour(new)->dev;
+ n = dst_get_neighbour_noref(old);
+ if (!n)
+ return;
+ olddev = n->dev;
+
+ n = dst_get_neighbour_noref(new);
+ if (!n)
+ return;
+ newdev = n->dev;
+
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ e = t3_l2t_get(tdev, new, newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- rcu_assign_pointer(dev->l2opt, NULL);
+ RCU_INIT_POINTER(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
rcu_read_lock();
d = L2DATA(tdev);
rcu_read_unlock();
- rcu_assign_pointer(tdev->l2opt, NULL);
+ RCU_INIT_POINTER(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 70fec8b1140..3fa3c8833ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
spin_unlock(&e->lock);
}
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev)
{
struct l2t_entry *e = NULL;
+ struct neighbour *neigh;
+ struct port_info *p;
struct l2t_data *d;
int hash;
- u32 addr = *(u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- struct port_info *p = netdev_priv(dev);
- int smt_idx = p->port_id;
+ u32 addr;
+ int ifidx;
+ int smt_idx;
rcu_read_lock();
+ neigh = dst_get_neighbour_noref(dst);
+ if (!neigh)
+ goto done_rcu;
+
+ addr = *(u32 *) neigh->primary_key;
+ ifidx = neigh->dev->ifindex;
+
+ if (!dev)
+ dev = neigh->dev;
+ p = netdev_priv(dev);
+ smt_idx = p->port_id;
+
d = L2DATA(cdev);
if (!d)
goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
- goto done;
+ goto done_unlock;
}
/* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
-done:
+done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
rcu_read_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f54796e2c..c4e86436975 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c8f42afa3c..e83d12c7bf2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
-static int vf_acls;
+static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
@@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return err;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
int err;
if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features)
return err;
}
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
{
const struct port_info *pi = netdev_priv(dev);
- unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+ unsigned int n = pi->rss_size;
- p->size = pi->rss_size;
while (n--)
- p->ring_index[n] = pi->rss[n];
+ p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev,
- const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
- if (p->size != pi->rss_size)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- if (p->ring_index[i] >= pi->nqsets)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- pi->rss[i] = p->ring_index[i];
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
return write_rss(pi, pi->rss);
return 0;
@@ -1964,7 +1963,7 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return -EOPNOTSUPP;
}
-static struct ethtool_ops cxgb_ethtool_ops = {
+static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
.get_drvinfo = get_drvinfo,
@@ -1990,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
+ .get_rxfh_indir_size = get_rss_table_size,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.flash_device = set_flash,
@@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap)
if (!pi->rss)
return -ENOMEM;
for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = j % pi->nqsets;
+ pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev,
{
int func, i, err;
struct port_info *pi;
- unsigned int highdma = 0;
+ bool highdma = false;
struct adapter *adapter = NULL;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = NETIF_F_HIGHDMA;
+ highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3637,9 @@ static int __devinit init_one(struct pci_dev *pdev,
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->features |= netdev->hw_features | highdma;
+ if (highdma)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 140254c7cba..2dae7959f00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN; /* failures are expected */
+ gfp |= __GFP_NOWARN | __GFP_COLD;
#if FL_PG_ORDER > 0
/*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif
while (n--) {
- pg = __netdev_alloc_page(adap->port[0], gfp);
+ pg = alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- netdev_free_page(adap->port[0], pg);
+ put_page(pg);
goto out;
}
*d++ = cpu_to_be64(mapping);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index da9072bfca8..e53365a7148 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, DRV_VERSION);
- strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
@@ -1561,7 +1564,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-static struct ethtool_ops cxgb4vf_ethtool_ops = {
+static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_settings = cxgb4vf_get_settings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
@@ -2000,7 +2003,7 @@ static const struct file_operations interfaces_proc_fops = {
*/
struct cxgb4vf_debugfs_entry {
const char *name; /* name of debugfs node */
- mode_t mode; /* file system mode */
+ umode_t mode; /* file system mode */
const struct file_operations *fops;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8d5d55ad102..0bd585bba39 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __netdev_alloc_page(adapter->port[0],
- gfp | __GFP_NOWARN);
+ page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -664,7 +663,7 @@ alloc_small_pages:
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- netdev_free_page(adapter->port[0], page);
+ put_page(page);
break;
}
*d++ = cpu_to_be64(dma_addr);
@@ -684,7 +683,7 @@ out:
/*
* Update our accounting state to incorporate the new Free List
* buffers, tell the hardware about them and return the number of
- * bufers which we were able to allocate.
+ * buffers which we were able to allocate.
*/
cred = fl->avail - cred;
fl->pend_cred += cred;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b3c0e..bf0fc56dba1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
}
/* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
+ err = enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
/* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
+ err = enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
int enic_dev_enable2(struct enic *enic, int active)
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 1f83a4747ba..da1cba3c410 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c3786fda11d..2fd9db4b1be 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
enic_dev_fw_info(enic, &fw_info);
- strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, fw_info->fw_version,
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
@@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
#endif
/* Allocate structure for port profiles */
- enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf6..f801754c71a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
return mii_nway_restart(&dm->mii);
}
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+ netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
@@ -613,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1427739d9a5..1eb46a0bb48 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(de->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
info->eedump_len = DE_EEPROM_SIZE;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 871bcaa7068..4d71f5ae20c 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
u_long iobase = 0; /* Clear upper 32 bits in Alphas */
int i, j;
struct de4x5_private *lp = netdev_priv(dev);
- struct list_head *walk;
-
- list_for_each(walk, &pdev->bus_list) {
- struct pci_dev *this_dev = pci_dev_b(walk);
-
- /* Skip the pci_bus list entry */
- if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+ struct pci_dev *this_dev;
+ list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
vendor = this_dev->vendor;
device = this_dev->device << 8;
if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
char *p, *q, t;
- lp->params.fdx = 0;
+ lp->params.fdx = false;
lp->params.autosense = AUTO;
if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
t = *q;
*q = '\0';
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
if (strstr(p, "TP")) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 17b11ee1745..51f7542eb45 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9656dd0647d..4eb0d76145c 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7a44a7a6adc..48b0b6566ee 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d01219ba22..52da7b2fe3b 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a65398d01..c24fab1e9cb 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
#include "de600.h"
-static unsigned int check_lost = 1;
+static bool check_lost = true;
module_param(check_lost, bool, 0);
MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dcd7f7a71ad..28a3a9b50b8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1540c..ce88c0f399f 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, "0");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +977,7 @@ static struct platform_driver dnet_driver = {
},
};
-static int __init dnet_init(void)
-{
- return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
- platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Dave DNET Ethernet driver");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 644e8fed836..cbdec2536da 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -40,6 +40,7 @@
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
@@ -50,6 +51,7 @@
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
+ case OC_DEVICE_ID5:
+ return OC_NAME_SH;
default:
return BE_NAME;
}
@@ -288,14 +292,14 @@ struct be_drv_stats {
};
struct be_vf_cfg {
- unsigned char vf_mac_addr[ETH_ALEN];
- u32 vf_if_handle;
- u32 vf_pmac_id;
- u16 vf_vlan_tag;
- u32 vf_tx_rate;
+ unsigned char mac_addr[ETH_ALEN];
+ int if_handle;
+ int pmac_id;
+ u16 vlan_tag;
+ u32 tx_rate;
};
-#define BE_INVALID_PMAC_ID 0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT 1
struct be_adapter {
struct pci_dev *pdev;
@@ -345,13 +349,16 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
- u32 if_handle; /* Used to configure filtering */
+ int if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
+ bool ue_detected;
+ bool fw_timeout;
u32 port_num;
bool promiscuous;
bool wol;
@@ -359,7 +366,6 @@ struct be_adapter {
u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
- bool ue_detected;
bool stats_cmd_sent;
int link_speed;
u8 port_type;
@@ -369,16 +375,20 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- bool be3_native;
- bool sriov_enabled;
- struct be_vf_cfg *vf_cfg;
+ u32 num_vfs;
u8 is_virtfn;
+ struct be_vf_cfg *vf_cfg;
+ bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
+#define sriov_enabled(adapter) (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i) \
+ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+ i++, vf_cfg++)
/* BladeEngine Generation numbers */
#define BE_GEN2 2
@@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
return adapter->num_rx_qs > 1;
}
+static inline bool be_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2c7b36673df..0fcb4562479 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (adapter->eeh_err) {
- dev_info(&adapter->pdev->dev,
- "Error in Card Detected! Cannot issue commands\n");
+ if (be_error(adapter))
return;
- }
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@ done:
static void be_async_link_state_process(struct be_adapter *adapter,
struct be_async_event_link_state *evt)
{
- be_link_status_update(adapter, evt->port_link_status);
+ /* When link status changes, link speed must be re-queried from FW */
+ adapter->link_speed = -1;
+
+ /* For the initial link status do not rely on the ASYNC event as
+ * it may not be received in some cases.
+ */
+ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+ be_link_status_update(adapter, evt->port_link_status);
}
/* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- if (adapter->eeh_err)
- return -EIO;
-
for (i = 0; i < mcc_timeout; i++) {
+ if (be_error(adapter))
+ return -EIO;
+
num = be_process_mcc(adapter, &status);
if (num)
be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
udelay(100);
}
if (i == mcc_timeout) {
- dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
return -1;
}
return status;
@@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
- if (adapter->eeh_err) {
- dev_err(&adapter->pdev->dev,
- "Error detected in card.Cannot issue commands\n");
- return -EIO;
- }
-
do {
+ if (be_error(adapter))
+ return -EIO;
+
ready = ioread32(db);
- if (ready == 0xffffffff) {
- dev_err(&adapter->pdev->dev,
- "pci slot disconnected\n");
+ if (ready == 0xffffffff)
return -1;
- }
ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
if (msecs > 4000) {
- dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
be_detect_dump_ue(adapter);
return -1;
}
@@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle)
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 1;
} else {
req->if_id = cpu_to_le16((u16) if_handle);
+ req->pmac_id = cpu_to_le32(pmac_id);
req->permanent = 0;
}
@@ -695,12 +693,15 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
int status;
+ if (pmac_id == -1)
+ return 0;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
- wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
ctxt = &req->context;
@@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1136,16 +1139,13 @@ err:
}
/* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
- if (!interface_id)
+ if (interface_id == -1)
return 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@ err:
/* Uses synchronous mcc */
int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u32 dom)
+ u16 *link_speed, u8 *link_status, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
spin_lock_bh(&adapter->mcc_lock);
+ if (link_status)
+ *link_status = LINK_DOWN;
+
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
@@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
}
req = embedded_payload(wrb);
+ if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ req->hdr.version = 1;
+
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
@@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_speed = le16_to_cpu(resp->link_speed);
+ if (link_speed)
+ *link_speed = le16_to_cpu(resp->link_speed);
if (mac_speed)
*mac_speed = resp->mac_speed;
}
+ if (link_status)
+ *link_status = resp->logical_link_status;
}
err:
@@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
- 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+ 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+ 0x3ea83c02, 0x4a110304};
int status;
if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@ err_unlock:
return status;
}
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_read_object *req;
+ struct lancer_cmd_resp_read_object *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
+
+ req->desired_read_len = cpu_to_le32(data_size);
+ req->read_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+ status = be_mcc_notify_wait(adapter);
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_read = le32_to_cpu(resp->actual_read_len);
+ *eof = le32_to_cpu(resp->eof);
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
@@ -2238,3 +2295,99 @@ err:
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_list *req;
+ int status;
+ int mac_count;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_mac_list *resp =
+ embedded_payload(wrb);
+ int i;
+ u8 *ctxt = &resp->context[0][0];
+ status = -EIO;
+ mac_count = resp->mac_count;
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ for (i = 0; i < mac_count; i++) {
+ if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+ act, ctxt)) {
+ *pmac_id = AMAP_GET_BITS
+ (struct amap_get_mac_list_context,
+ macid, ctxt);
+ status = 0;
+ break;
+ }
+ ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_mac_list *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+ &cmd.dma, GFP_KERNEL);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
+
+ req->hdr.domain = domain;
+ req->mac_count = mac_count;
+ if (mac_count)
+ memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, cmd.size,
+ cmd.va, cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a35cd03fac4..dca89249088 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_GET_MAC_LIST 147
+#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_ETH_RSS_CONFIG 1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
u8 type;
u8 permanent;
u16 if_id;
+ u32 pmac_id;
} __packed;
struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@ struct be_cmd_resp_link_status {
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u16 link_speed;
- u32 rsvd0;
+ u8 logical_link_status;
+ u8 rsvd1[3];
} __packed;
/******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object {
u32 actual_write_len;
};
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK (32*1024)
+#define LANCER_READ_FILE_EOF_MASK 0x80000000
+
+#define LANCER_FW_DUMP_FILE "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+ struct be_cmd_req_hdr hdr;
+ u32 desired_read_len;
+ u32 read_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_read_len;
+ u32 eof;
+};
+
/************************ WOL *******************************/
struct be_cmd_req_acpi_wol_magic_config{
struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/******************** GET/SET_MACLIST **************************/
+#define BE_MAX_MAC 64
+struct amap_get_mac_list_context {
+ u8 macid[31];
+ u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+ struct be_cmd_resp_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle);
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- u32 pmac_id, u32 domain);
+ int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
- u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u8 *link_status, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
u32 data_size, u32 data_offset,
const char *obj_name,
u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bf8153ea4ed..6db6b6ae5e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
memset(fw_on_flash, 0 , sizeof(fw_on_flash));
be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VER);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
strcat(drvinfo->fw_version, "]");
}
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+ u32 data_read = 0, eof;
+ u8 addn_status;
+ struct be_dma_mem data_len_cmd;
+ int status;
+
+ memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+ /* data_offset and data_size should be 0 to get reg len */
+ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+ file_name, &data_read, &eof, &addn_status);
+
+ return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
+{
+ struct be_dma_mem read_cmd;
+ u32 read_len = 0, total_read_len = 0, chunk_size;
+ u32 eof = 0;
+ u8 addn_status;
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+ read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+ &read_cmd.dma);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading dump\n");
+ return -ENOMEM;
+ }
+
+ while ((total_read_len < buf_len) && !eof) {
+ chunk_size = min_t(u32, (buf_len - total_read_len),
+ LANCER_READ_FILE_CHUNK);
+ chunk_size = ALIGN(chunk_size, 4);
+ status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+ total_read_len, file_name, &read_len,
+ &eof, &addn_status);
+ if (!status) {
+ memcpy(buf + total_read_len, read_cmd.va, read_len);
+ total_read_len += read_len;
+ eof &= LANCER_READ_FILE_EOF_MASK;
+ } else {
+ status = -EIO;
+ break;
+ }
+ }
+ pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
+
+ return status;
+}
+
static int
be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
- if (be_physfn(adapter))
- be_cmd_get_reg_len(adapter, &log_size);
-
+ if (be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ log_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ be_cmd_get_reg_len(adapter, &log_size);
+ }
return log_size;
}
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
if (be_physfn(adapter)) {
memset(buf, 0, regs->len);
- be_cmd_get_regs(adapter, regs->len, buf);
+ if (lancer_chip(adapter))
+ lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ regs->len, buf);
+ else
+ be_cmd_get_regs(adapter, regs->len, buf);
}
}
@@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
+ u8 link_status;
int status;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &mac_speed,
- &link_speed, 0);
+ &link_speed, &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
@@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
- ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+ ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
@@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, 0) != 0) {
+ &qos_link_speed, NULL, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
@@ -660,7 +727,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
static int
be_get_eeprom_len(struct net_device *netdev)
{
- return BE_READ_SEEPROM_LEN;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_PF_FILE);
+ else
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_VF_FILE);
+ } else {
+ return BE_READ_SEEPROM_LEN;
+ }
}
static int
@@ -675,6 +752,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!eeprom->len)
return -EINVAL;
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+ eeprom->len, data);
+ else
+ return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+ eeprom->len, data);
+ }
+
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bf266a00c77..a6bcdb5cd2b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
-static ushort rx_frag_size = 2048;
static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
status = be_cmd_mac_addr_query(adapter, current_mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (status)
goto err;
@@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
struct be_drv_stats *drvs = &adapter->drv_stats;
be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
return stats;
}
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
{
struct net_device *netdev = adapter->netdev;
- /* when link status changes, link speed must be re-queried from card */
- adapter->link_speed = -1;
- if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
- netif_carrier_on(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
- } else {
+ if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
netif_carrier_off(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
+
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
}
static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
}
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u8 vlan_prio;
+ u16 vlan_tag;
+
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+
+ return vlan_tag;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
- u8 vlan_prio = 0;
- u16 vlan_tag = 0;
+ u16 vlan_tag;
memset(hdr, 0, sizeof(*hdr));
@@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (vlan_tx_tag_present(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
- vlan_tag = vlan_tx_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- /* If vlan priority provided by OS is NOT in available bmap */
- if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
- vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
@@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
+ /* For vlan tagged pkts, BE
+ * 1) calculates checksum even when CSO is not requested
+ * 2) calculates checksum wrongly for padded pkt less than
+ * 60 bytes long.
+ * As a workaround disable TX vlan offloading in such cases.
+ */
+ if (unlikely(vlan_tx_tag_present(skb) &&
+ (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb->vlan_tci = 0;
+ }
+
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
txq->head = start;
dev_kfree_skb_any(skb);
}
+tx_drop:
return NETDEV_TX_OK;
}
@@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
*/
static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
int status = 0;
- u32 if_handle;
if (vf) {
- if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
- vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
- status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+ status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+ 1, 1, 0);
}
/* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
return status;
}
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added++;
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added++;
+ else
+ adapter->vlan_tag[vid] = 0;
+ret:
+ return status;
}
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added--;
-
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added--;
+ else
+ adapter->vlan_tag[vid] = 1;
+ret:
+ return status;
}
static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@ done:
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- status = be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
mac, vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
return status;
}
@@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= num_vfs)
+ if (vf >= adapter->num_vfs)
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
- vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->tx_rate = vf_cfg->tx_rate;
+ vi->vlan = vf_cfg->vlan_tag;
vi->qos = 0;
- memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+ memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
}
@@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (vlan > 4095))
+ if (vf >= adapter->num_vfs || vlan > 4095)
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vf_cfg[vf].vlan_tag = vlan;
adapter->vlans_added++;
} else {
- adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vf_cfg[vf].vlan_tag = 0;
adapter->vlans_added--;
}
@@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (rate < 0))
+ if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate > 10000)
- rate = 10000;
+ if (rate < 100 || rate > 10000) {
+ dev_err(&adapter->pdev->dev,
+ "tx rate must be between 100 and 10000 Mbps\n");
+ return -EINVAL;
+ }
- adapter->vf_cfg[vf].vf_tx_rate = rate;
status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
- dev_info(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
+ else
+ adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
@@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if ((num_vfs && adapter->sriov_enabled) ||
- be_is_mc(adapter) ||
+ if (sriov_enabled(adapter) || be_is_mc(adapter) ||
lancer_chip(adapter) || !be_physfn(adapter) ||
adapter->generation == BE_GEN2)
return 1;
@@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
u8 i;
adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS)
+ if (adapter->num_tx_qs != MAX_TX_QS) {
+ rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_qs);
+ rtnl_unlock();
+ }
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_queue_alloc(adapter, q, TX_Q_LEN,
sizeof(struct be_eth_wrb)))
goto err;
-
- if (be_cmd_txq_create(adapter, q, cq))
- goto err;
}
return 0;
@@ -1728,8 +1786,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && be_physfn(adapter) &&
- !be_is_mc(adapter)) {
+ !sriov_enabled(adapter) && be_physfn(adapter) &&
+ !be_is_mc(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1987,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2024,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
napi_complete(napi);
+ /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+ if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
+ be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ }
+
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
adapter->drv_stats.tx_events++;
return 1;
@@ -1982,6 +2048,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2077,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->ue_detected = true;
adapter->eeh_err = true;
- dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable error in the card\n");
}
if (ue_lo) {
@@ -2036,53 +2106,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->ue_detected)
- be_detect_dump_ue(adapter);
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
- be_post_rx_frags(rxo, GFP_KERNEL);
- }
- }
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
static void be_msix_disable(struct be_adapter *adapter)
{
if (msix_enabled(adapter)) {
@@ -2119,27 +2142,28 @@ done:
static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
+
#ifdef CONFIG_PCI_IOV
if (be_physfn(adapter) && num_vfs) {
int status, pos;
- u16 nvfs;
+ u16 dev_vfs;
pos = pci_find_ext_capability(adapter->pdev,
PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(adapter->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+ pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
- if (num_vfs > nvfs) {
+ adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+ if (adapter->num_vfs != num_vfs)
dev_info(&adapter->pdev->dev,
- "Device supports %d VFs and not %d\n",
- nvfs, num_vfs);
- num_vfs = nvfs;
- }
+ "Device supports %d VFs and not %d\n",
+ adapter->num_vfs, num_vfs);
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- adapter->sriov_enabled = status ? false : true;
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status)
+ adapter->num_vfs = 0;
- if (adapter->sriov_enabled) {
+ if (adapter->num_vfs) {
adapter->vf_cfg = kcalloc(num_vfs,
sizeof(struct be_vf_cfg),
GFP_KERNEL);
@@ -2154,10 +2178,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
static void be_sriov_disable(struct be_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
- if (adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
pci_disable_sriov(adapter->pdev);
kfree(adapter->vf_cfg);
- adapter->sriov_enabled = false;
+ adapter->num_vfs = 0;
}
#endif
}
@@ -2351,8 +2375,8 @@ static int be_close(struct net_device *netdev)
static int be_rx_queues_setup(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
- int rc, i;
- u8 rsstable[MAX_RSS_QS];
+ int rc, i, j;
+ u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2388,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
+ for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= 128)
+ break;
+ rsstable[j + i] = rxo->rss_id;
+ }
+ }
+ rc = be_cmd_rss_config(adapter, rsstable, 128);
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
if (rc)
return rc;
}
@@ -2386,6 +2414,7 @@ static int be_open(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq;
struct be_rx_obj *rxo;
+ u8 link_status;
int status, i;
status = be_rx_queues_setup(adapter);
@@ -2409,6 +2438,11 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
+ status = be_cmd_link_status_query(adapter, NULL, NULL,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+
return 0;
err:
be_close(adapter->netdev);
@@ -2465,19 +2499,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
be_vf_eth_addr_generate(adapter, mac);
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id,
- vf + 1);
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
+
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address add failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n", vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
mac[5] += 1;
}
@@ -2486,24 +2525,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
static void be_vf_clear(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 vf;
- for (vf = 0; vf < num_vfs; vf++) {
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
- }
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter))
+ be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+ else
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+ be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+ }
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter) && adapter->sriov_enabled)
+ if (sriov_enabled(adapter))
be_vf_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -2511,61 +2549,94 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
-
- adapter->be3_native = false;
- adapter->promiscuous = false;
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
return 0;
}
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ int vf;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ vf_cfg->if_handle = -1;
+ vf_cfg->pmac_id = -1;
+ }
+}
+
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
u16 lnk_speed;
int status;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- for (vf = 0; vf < num_vfs; vf++) {
+ be_vf_setup_init(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &adapter->vf_cfg[vf].vf_if_handle,
- NULL, vf+1);
+ &vf_cfg->if_handle, NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto err;
- }
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
- for (vf = 0; vf < num_vfs; vf++) {
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
- vf + 1);
+ NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ vf_cfg->tx_rate = lnk_speed * 10;
}
return 0;
err:
return status;
}
+static void be_setup_init(struct be_adapter *adapter)
+{
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+ adapter->if_handle = -1;
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
+ adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+ u32 pmac_id;
+ int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id, 0);
+do_none:
+ return status;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status;
+ int status, i;
u8 mac[ETH_ALEN];
+ struct be_tx_obj *txo;
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
- adapter->link_speed = -1;
+ be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
@@ -2583,7 +2654,7 @@ static int be_setup(struct be_adapter *adapter)
memset(mac, 0, ETH_ALEN);
status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0);
+ true /*permanent */, 0, 0);
if (status)
return status;
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2663,8 @@ static int be_setup(struct be_adapter *adapter)
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2675,23 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto err;
- /* For BEx, the VF's permanent mac queried from card is incorrect.
- * Query the mac configued by the PF using if_handle
- */
- if (!be_physfn(adapter) && !lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ for_all_tx_queues(adapter, txo, i) {
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ goto err;
+ }
+
+ /* The VF's permanent mac queried from card is incorrect.
+ * For BEx: Query the mac configued by the PF using if_handle
+ * For Lancer: Get and use mac_list to obtain mac address.
+ */
+ if (!be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ status = be_configure_mac_from_list(adapter, mac);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (!status) {
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2707,21 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
+
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
}
pcie_set_readrq(adapter->pdev, 4096);
- if (be_physfn(adapter) && adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
status = be_vf_setup(adapter);
if (status)
goto err;
@@ -2647,6 +2733,19 @@ err:
return status;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i)
+ event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2981,7 +3080,7 @@ fw_exit:
return status;
}
-static struct net_device_ops be_netdev_ops = {
+static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
@@ -2995,7 +3094,10 @@ static struct net_device_ops be_netdev_ops = {
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
- .ndo_get_vf_config = be_get_vf_config
+ .ndo_get_vf_config = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = be_netpoll,
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3344,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
+ case OC_DEVICE_ID5:
adapter->generation = BE_GEN3;
break;
case OC_DEVICE_ID3:
@@ -3267,7 +3370,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
static int lancer_wait_ready(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
int status = 0, i;
@@ -3276,7 +3379,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
if (sliport_status & SLIPORT_STATUS_RDY_MASK)
break;
- msleep(20);
+ msleep(1000);
}
if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3416,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
return status;
}
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status;
+
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in error state."
+ "Trying to recover.\n");
+
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
+
+ netif_device_detach(adapter->netdev);
+
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
+
+ be_clear(adapter);
+
+ adapter->fw_timeout = false;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
+ }
+
+ netif_device_attach(adapter->netdev);
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery succeeded\n");
+ }
+ return;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (lancer_chip(adapter))
+ lancer_test_and_recover_fn_err(adapter);
+
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -3365,7 +3566,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_sriov;
if (lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
if (status) {
dev_err(&pdev->dev, "Adapter in non recoverable error\n");
goto ctrl_clean;
@@ -3559,6 +3765,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
dev_info(&adapter->pdev->dev, "EEH reset\n");
adapter->eeh_err = false;
+ adapter->ue_detected = false;
+ adapter->fw_timeout = false;
status = pci_enable_device(pdev);
if (status)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 251b635fe75..60f0e788cc2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
},
};
-static int __init ethoc_init(void)
-{
- return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
- platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 61d2bddec1f..c82d444b582 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b29..3574e1499df 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -21,9 +21,10 @@ config NET_VENDOR_FREESCALE
if NET_VENDOR_FREESCALE
config FEC
- bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || ARCH_MXS)
+ ARCH_MXC || SOC_IMX28)
+ default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a159..ddcbbb34d1b 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
MODULE_DEVICE_TABLE(platform, fec_devtype);
enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#elif defined (CONFIG_M5272C3)
#define FEC_FLASHMAC (0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC 0xffc0406b
+#define FEC_FLASHMAC 0xffc0406b
#else
#define FEC_FLASHMAC 0
#endif
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -254,11 +255,13 @@ struct fec_enet_private {
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
-#define FEC_MII_TIMEOUT 1000 /* us */
+#define FEC_MII_TIMEOUT 30000 /* us */
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static int mii_cnt;
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -515,6 +518,7 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -531,8 +535,10 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
}
@@ -818,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
iap = (unsigned char *)FEC_FLASHMAC;
#else
if (pdata)
- memcpy(iap, pdata->mac, ETH_ALEN);
+ iap = (unsigned char *)&pdata->mac;
#endif
}
@@ -837,7 +843,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -865,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
fec_restart(ndev, phy_dev->duplex);
+ /* prevent unnecessary second fec_restart() below */
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -953,7 +961,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -972,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
if (phy_id >= PHY_MAX_ADDR) {
- printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", ndev->name);
+ printk(KERN_INFO
+ "%s: no PHY, assuming direct connection to switch\n",
+ ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -998,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ printk(KERN_INFO
+ "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -1031,10 +1041,14 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
- fep->mii_bus = fec0_mii_bus;
- return 0;
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
}
fep->mii_timeout = 0;
@@ -1063,7 +1077,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1079,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ mii_cnt++;
+
/* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1111,11 @@ err_out:
static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- if (fep->phy_dev)
- phy_disconnect(fep->phy_dev);
- mdiobus_unregister(fep->mii_bus);
- kfree(fep->mii_bus->irq);
- mdiobus_free(fep->mii_bus);
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+ }
}
static int fec_enet_get_settings(struct net_device *ndev,
@@ -1136,7 +1152,7 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strcpy(info->bus_info, dev_name(&ndev->dev));
}
-static struct ethtool_ops fec_enet_ethtool_ops = {
+static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
@@ -1521,6 +1537,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1565,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
@@ -1571,8 +1589,12 @@ fec_probe(struct platform_device *pdev)
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
- if (i && irq < 0)
- break;
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
while (--i >= 0) {
@@ -1583,12 +1605,12 @@ fec_probe(struct platform_device *pdev)
}
}
- fep->clk = clk_get(&pdev->dev, "fec_clk");
+ fep->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(fep->clk)) {
ret = PTR_ERR(fep->clk);
goto failed_clk;
}
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
ret = fec_enet_init(ndev);
if (ret)
@@ -1611,7 +1633,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1635,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct resource *r;
+ int i;
- fec_stop(ndev);
+ unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- clk_disable(fep->clk);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
iounmap(fep->hwp);
- unregister_netdev(ndev);
free_netdev(ndev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1664,7 +1691,7 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
return 0;
}
@@ -1675,7 +1702,7 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5bf5471f06f..910a8e18a9a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
.remove = fs_enet_remove,
};
-static int __init fs_init(void)
-{
- return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
- platform_driver_unregister(&fs_enet_driver);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
}
#endif
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b5d0a..0f2d1a71090 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_bb_init(void)
-{
- return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
- platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c35d8..55bb867258e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_fec_init(void)
-{
- return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
- platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e..9eb815941df 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
- return i;
-}
-
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,23 +359,12 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
-
goto err_free_irqs;
+ } else {
+ out_be32(tbipa, tbiaddr);
}
- out_be32(tbipa, tbiaddr);
-
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -480,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
.remove = fsl_pq_mdio_remove,
};
-int __init fsl_pq_mdio_init(void)
-{
- return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
-void fsl_pq_mdio_exit(void)
-{
- platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 83199fd0d62..e01cdaa722a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags =
@@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
}
/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
@@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
- u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
int idx;
- char tmpbuf[MAC_ADDR_LEN];
+ char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
@@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
/* Now copy it into the mac registers backwards, cuz */
/* little endian is silly */
- for (idx = 0; idx < MAC_ADDR_LEN; idx++)
- tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
gfar_write(macptr, *((u32 *) (tmpbuf)));
@@ -3281,16 +3281,4 @@ static struct platform_driver gfar_driver = {
.remove = gfar_remove,
};
-static int __init gfar_init(void)
-{
- return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
- platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa43773e8e..fe7ac3a8319 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -74,9 +74,6 @@ struct ethtool_rx_list {
* will be the next highest multiple of 512 bytes. */
#define INCREMENTAL_BUFFER_SIZE 512
-
-#define MAC_ADDR_LEN 6
-
#define PHY_INIT_TIMEOUT 100000
#define GFAR_PHY_CHANGE_TIME 2
@@ -1179,9 +1176,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 212736bab6b..5a3b2e5b288 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return err;
}
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0, i = 0;
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
/* We need a copy of the filer table because
* we want to change its order */
- temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
- memcpy(temp_table, tab, sizeof(*temp_table));
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
sizeof(struct gfar_mask_entry), GFP_KERNEL);
@@ -1693,8 +1692,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
ret = gfar_set_hash_opts(priv, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
- cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+ cmd->fs.location >= MAX_FILER_IDX) {
ret = -EINVAL;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8aebc89..83e0ed757e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
.remove = gianfar_ptp_remove,
};
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
- return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
- platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index b5dc0273a1d..ba2dc083bfc 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
- return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+ return memcmp(addr1, addr2, ETH_ALEN);
}
#ifdef DEBUG
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad145e..2e395a2566b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/if_ether.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define ENET_NUM_OCTETS_PER_ADDRESS 6
#define ENET_GROUP_ADDR 0x01 /* Group address mask
for ethernet
addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
/* UCC GETH 82xx Ethernet Address Container */
struct enet_addr_container {
- u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 address[ETH_ALEN]; /* ethernet address */
enum ucc_geth_enet_address_recognition_location location; /* location in
82xx address
recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
u16 cpucount[NUM_TX_QUEUES];
u16 __iomem *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
- u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
u8 numGroupAddrInHash;
u8 numIndAddrInHash;
u8 numIndAddrInReg;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 15416752c13..ee84b472cee 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 067c46069a1..114cda7721f 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
static void eepro_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VERSION);
- sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+ "ISA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops eepro_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984e..6650068c996 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b..3554414eb5e 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2113,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2131,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2139,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
- return;
+ return err;
}
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2164,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2172,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
+ return err;
}
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2434,7 +2446,8 @@ static int ehea_open(struct net_device *dev)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ed79b2d3ad3..2abce965c7b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
+ busy_phy_map &= ~(1 << dev->phy.address);
+ DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b1cd41b9c61..e877371680a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df38..acc31af6594 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 8fd80a00b89..075451d0207 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
}
/* The last cycle is a tri-state, so read from the PHY. */
- for (j = 7; j < 8; j++) {
- for (i = 0; i < p[j].len; i++) {
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
- p[j].field |= ((ipg_r8(PHY_CTRL) &
- IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
- }
- }
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+ ipg_r8(PHY_CTRL);
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
}
static void ipg_set_led_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5a2fdf7a00c..9436397e572 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(nic->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev),
+ sizeof(info->bus_info));
}
#define E100_PHY_REGS 0x1C
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2b223ac99c4..3103f0b6bf5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000_driver_name, 32);
- strncpy(drvinfo->version, e1000_driver_version, 32);
+ strlcpy(drvinfo->driver, e1000_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000_driver_version,
+ sizeof(drvinfo->version));
- sprintf(firmware_version, "N/A");
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a8403668..f6c4d7e2560 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
/* MAC decode size is 128K - This is the size of BAR0 */
#define MAC_DECODE_SIZE (128 * 1024)
@@ -813,8 +812,7 @@ struct e1000_ffvt_entry {
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
-extern void __iomem *ce4100_gbe_mdio_base_virt;
-#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt)
#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
@@ -1344,6 +1342,7 @@ struct e1000_hw_stats {
struct e1000_hw {
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
+ void __iomem *ce4100_gbe_mdio_base_virt;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
u32 phy_init_script;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cf480b55462..669ca3800c0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -33,11 +33,6 @@
#include <linux/bitops.h>
#include <linux/if_vlan.h>
-/* Intel Media SOC GbE MDIO physical base address */
-static unsigned long ce4100_gbe_mdio_base_phy;
-/* Intel Media SOC GbE MDIO virtual base address */
-void __iomem *ce4100_gbe_mdio_base_virt;
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.21-k8-NAPI"
@@ -167,9 +162,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -806,7 +802,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
}
}
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -820,10 +817,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_RX)
e1000_vlan_mode(netdev, features);
@@ -1051,11 +1049,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = -EIO;
if (hw->mac_type == e1000_ce4100) {
- ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
- ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ hw->ce4100_gbe_mdio_base_virt =
+ ioremap(pci_resource_start(pdev, BAR_1),
pci_resource_len(pdev, BAR_1));
- if (!ce4100_gbe_mdio_base_virt)
+ if (!hw->ce4100_gbe_mdio_base_virt)
goto err_mdio_ioremap;
}
@@ -1182,7 +1180,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
- adapter->quad_port_a = 1;
+ adapter->quad_port_a = true;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
@@ -1246,7 +1244,7 @@ err_eeprom:
err_dma:
err_sw_init:
err_mdio_ioremap:
- iounmap(ce4100_gbe_mdio_base_virt);
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1283,6 +1281,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+ if (hw->mac_type == e1000_ce4100)
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -1676,7 +1676,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* need this to apply a workaround later in the send path. */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
- adapter->pcix_82544 = 1;
+ adapter->pcix_82544 = true;
ew32(TCTL, tctl);
@@ -1999,7 +1999,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
writel(0, hw->hw_addr + tx_ring->tdh);
writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2848,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
size -= 4;
}
@@ -3216,7 +3216,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(tso)) {
if (likely(hw->mac_type != e1000_82544))
- tx_ring->last_tx_tso = 1;
+ tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4577,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
e1000_irq_enable(adapter);
}
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4601,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
e1000_irq_enable(adapter);
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4610,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4622,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
e1000_write_vfta(hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4647,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, false);
+
+ return 0;
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -4716,8 +4721,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
- mutex_lock(&adapter->mutex);
-
if (netif_running(netdev)) {
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -4725,10 +4728,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
- if (retval) {
- mutex_unlock(&adapter->mutex);
+ if (retval)
return retval;
- }
#endif
status = er32(STATUS);
@@ -4783,8 +4784,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- mutex_unlock(&adapter->mutex);
-
pci_disable_device(pdev);
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9fe18d1d53d..f478a22ed57 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,7 @@ struct e1000_adapter {
u32 txd_cmd;
bool detect_tx_hung;
+ bool tx_hang_recheck;
u8 tx_timeout_factor;
u32 tx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 69c9d219914..fb2c28e799a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version));
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a855db1ad24..3911401ed65 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
regs[n] = __er32(hw, E1000_TARC(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 2; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
/*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
reginfo->name; reginfo++) {
e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
/* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
- printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
- printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
- printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->length, buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp,
- buffer_info->skb);
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ next_desc = " NTC";
else
- printk(KERN_CONT "\n");
+ next_desc = "";
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print Rx Ring Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
- printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("Queue [NTU] [NTC]\n");
+ pr_info(" %5d %5X %5X\n",
+ 0, rx_ring->next_to_use, rx_ring->next_to_clean);
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
* 24 | Buffer Address 3 [63:0] |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
- "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext Pkt Split format\n");
+ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 13 12 8 7 4 3 0
@@ -352,35 +339,40 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
- "[ l3 l2 l1 hs] [reserved ] ---------------- "
- "[bi->skb] <-- Ext Rx Write-Back format\n");
+ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
phys_to_virt(buffer_info->dma),
adapter->rx_ps_bsize0, true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
break;
default:
@@ -407,9 +392,7 @@ rx_ring_summary:
* 8 | Reserved |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
- "[reserved 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext (Read) format\n");
+ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
/* Extended Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 24 23 4 3 0
@@ -423,29 +406,37 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
- "[vt ln xe xs] "
- "[bi->skb] <-- Ext (Write-Back) format\n");
+ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
adapter->rx_buffer_len,
true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
}
@@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev,
buffer_info->dma,
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
+ struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
+ if (!adapter->tx_hang_recheck &&
+ (adapter->flags2 & FLAG2_DMA_BURST)) {
+ /* May be block on write-back, flush and detect again
+ * flush pending descriptor writebacks to memory
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ adapter->tx_hang_recheck = true;
+ return;
+ }
+ /* Real hang detected */
+ adapter->tx_hang_recheck = false;
+ netif_stop_queue(netdev);
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
}
e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
#define TX_WAKE_THRESHOLD 32
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
- adapter->detect_tx_hung = 0;
+ adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ !(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task);
- netif_stop_queue(netdev);
- }
+ else
+ adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
unsigned int i, j;
u32 length, staterr;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- e_dbg("Packet Split buffers didn't pick up the full "
- "packet\n");
+ e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
- e_dbg("Last part of the packet spanning multiple "
- "descriptors\n");
+ e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
return;
}
/* MSI-X failed, so fall through and try MSI */
- e_err("Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
+ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_err("Failed to initialize MSI interrupts. Falling "
- "back to legacy interrupts.\n");
+ e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
/* Fall through */
case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
e1000_put_txbuf(adapter, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
@@ -2518,7 +2522,7 @@ clean_rx:
return work_done;
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
/* add VID to filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000e_release_hw_control(adapter);
- return;
+ return 0;
}
/* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* update_mc_addr_list expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int rar_entries = hw->mac.rar_entry_count;
+ int count = 0;
+
+ /* save a rar entry for our hardware address */
+ rar_entries--;
+
+ /* save a rar entry for the LAA workaround */
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+ rar_entries--;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ /*
+ * write the addresses in reverse order to avoid write
+ * combining
+ */
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ e1000e_rar_set(hw, ha->addr, rar_entries--);
+ count++;
+ }
+ }
+
+ /* zero out the remaining RAR entries not used above */
+ for (; rar_entries > 0; rar_entries--) {
+ ew32(RAH(rar_entries), 0);
+ ew32(RAL(rar_entries), 0);
+ }
+ e1e_flush();
+
+ return count;
}
/**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list;
u32 rctl;
/* Check for Promiscuous and All Multicast modes */
-
rctl = er32(RCTL);
+ /* clear the affected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- rctl &= ~E1000_RCTL_VFE;
/* Do not hardware filter VLANs in promisc mode */
e1000e_vlan_filter_disable(adapter);
} else {
+ int count;
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
- rctl &= ~E1000_RCTL_UPE;
} else {
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = e1000e_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- }
-
- ew32(RCTL, rctl);
-
- if (!netdev_mc_empty(netdev)) {
- int i = 0;
-
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
- /* prepare a packed array of only addresses. */
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
- } else {
/*
- * if we're called from probe, we might not have
- * anything to do here, so clear out the list
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
*/
- e1000_update_mc_addr_list(hw, NULL, 0);
+ count = e1000e_write_uc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_UPE;
}
+ ew32(RCTL, rctl);
+
if (netdev->features & NETIF_F_HW_VLAN_RX)
e1000e_vlan_strip_enable(adapter);
else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
- e1000_set_multi(adapter->netdev);
+ e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
clear_bit(__E1000_DOWN, &adapter->state);
- napi_enable(&adapter->napi);
if (adapter->msix_entries)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
e1e_flush();
usleep_range(10000, 20000);
- napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
e1000_irq_enable(adapter);
+ adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
+ napi_disable(&adapter->napi);
+
if (!test_bit(__E1000_DOWN, &adapter->state)) {
e1000e_down(adapter);
e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "Rx/Tx" :
- ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
- ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- bool link_active = 0;
+ bool link_active = false;
s32 ret_val = 0;
/*
@@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
} else {
- link_active = 1;
+ link_active = true;
}
break;
case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
+ bool txb2b = true;
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work)
e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
- e_info("Autonegotiated half duplex but"
- " link partner cannot autoneg. "
- " Try forcing full duplex if "
- "link gets many collisions.\n");
+ e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 10;
break;
}
@@ -4473,7 +4544,7 @@ link_up:
e1000e_flush_descriptors(adapter);
/* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
+ adapter->detect_tx_hung = true;
/*
* With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* if count is 0 then mapping error has occurred */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
+ netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(adapter, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((adapter->hw.mac.type == e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
(new_mtu > ETH_DATA_LEN)) {
- e_err("Jumbo Frames not supported on 82579 when CRC "
- "stripping is disabled.\n");
+ e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
return -EINVAL;
}
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (wufc) {
e1000_setup_rctl(adapter);
- e1000_set_multi(netdev);
+ e1000e_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
phy_data & E1000_WUS_MC ? "Multicast Packet" :
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ? "Link Status "
- " Change" : "other");
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
- .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
.ndo_do_ioctl = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_TSO6 |
NETIF_F_HW_CSUM);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
- adapter->fc_autoneg = 1;
+ adapter->fc_autoneg = true;
adapter->hw.fc.requested_mode = e1000_fc_default;
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7881fb95a25..b8e20f037d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -29,6 +29,8 @@
* e1000_82576
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
* Check for invalid size
*/
if ((hw->mac.type == e1000_82576) && (size > 15)) {
- printk("igb: The NVM size is not valid, "
- "defaulting to 32K.\n");
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15;
}
nvm->word_size = 1 << size;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c69feebf265..3d12e67eebb 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,4 +447,9 @@ static inline s32 igb_get_phy_info(struct e1000_hw *hw)
return 0;
}
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 43873eba2f6..7998bf4d594 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -36,6 +36,7 @@
#include <linux/ethtool.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include "igb.h"
@@ -148,7 +149,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ ecmd->advertising = (ADVERTISED_TP |
+ ADVERTISED_Pause);
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +167,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ ADVERTISED_Autoneg |
+ ADVERTISED_Pause);
ecmd->port = PORT_FIBRE;
}
@@ -673,25 +676,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u16 eeprom_data;
- strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -2162,6 +2162,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2188,6 +2201,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ced544499f1..01e5e89ef95 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -51,6 +53,7 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -145,9 +148,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
@@ -170,8 +173,18 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *, pm_message_t);
-static int igb_resume(struct pci_dev *);
+static int igb_suspend(struct device *);
+static int igb_resume(struct device *);
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+#endif
+static const struct dev_pm_ops igb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+ SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle)
+};
#endif
static void igb_shutdown(struct pci_dev *);
#ifdef CONFIG_IGB_DCA
@@ -212,9 +225,7 @@ static struct pci_driver igb_driver = {
.probe = igb_probe,
.remove = __devexit_p(igb_remove),
#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igb_suspend,
- .resume = igb_resume,
+ .driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
.err_handler = &igb_err_handler
@@ -325,16 +336,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
regs[n] = rd32(E1000_TXDCTL(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, rd32(reginfo->ofs));
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 4; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/*
@@ -359,18 +367,15 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start "
+ "last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
reginfo->name; reginfo++) {
igb_regdump(hw, reginfo);
@@ -381,18 +386,17 @@ static void igb_dump(struct igb_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
@@ -414,36 +418,38 @@ static void igb_dump(struct igb_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
- "[PlPOCIStDDM Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
+ "[bi->dma ] leng ntw timestamp "
+ "bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
struct igb_tx_buffer *buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
- buffer_info->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
- else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "",
@@ -456,11 +462,11 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO " %5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -492,36 +498,43 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
- "<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
- "<-- Adv Rx Write-Back format\n");
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
+ "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+ "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
struct igb_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX -------"
+ "--------- %p%s\n", "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb);
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX"
+ " %p%s\n", "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb);
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter)) {
print_hex_dump(KERN_INFO, "",
@@ -538,14 +551,6 @@ rx_ring_summary:
PAGE_SIZE/2, true);
}
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
}
}
@@ -599,10 +604,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
+ pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
- printk(KERN_INFO "%s\n", igb_copyright);
+ pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
@@ -1502,6 +1507,7 @@ void igb_power_up_link(struct igb_adapter *adapter)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
+ igb_reset_phy(&adapter->hw);
}
/**
@@ -1742,7 +1748,8 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(hw);
}
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1763,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
@@ -2113,6 +2121,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
default:
break;
}
+
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
@@ -2152,6 +2162,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ pm_runtime_get_noresume(&pdev->dev);
+
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2474,16 +2486,22 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int igb_open(struct net_device *netdev)
+static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
int i;
/* disallow open during test */
- if (test_bit(__IGB_TESTING, &adapter->state))
+ if (test_bit(__IGB_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
return -EBUSY;
+ }
+
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
@@ -2529,6 +2547,9 @@ static int igb_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
/* start the watchdog. */
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
@@ -2543,10 +2564,17 @@ err_setup_rx:
igb_free_all_tx_resources(adapter);
err_setup_tx:
igb_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
+static int igb_open(struct net_device *netdev)
+{
+ return __igb_open(netdev, false);
+}
+
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
@@ -2558,21 +2586,32 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int igb_close(struct net_device *netdev)
+static int __igb_close(struct net_device *netdev, bool suspending)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
- igb_down(adapter);
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
+ igb_down(adapter);
igb_free_irq(adapter);
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
+static int igb_close(struct net_device *netdev)
+{
+ return __igb_close(netdev, false);
+}
+
/**
* igb_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
@@ -3203,6 +3242,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -3632,6 +3672,9 @@ static void igb_watchdog_task(struct work_struct *work)
link = igb_has_link(adapter);
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw,
@@ -3640,23 +3683,23 @@ static void igb_watchdog_task(struct work_struct *work)
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+ "Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & E1000_CTRL_RFCE) ? "RX" :
+ (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
- printk(KERN_INFO "igb: %s The network adapter "
- "link speed was downshifted "
- "because it overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_LINK_THROTTLE)) {
+ netdev_info(netdev, "The network adapter link "
+ "speed was downshifted because it "
+ "overheated\n");
}
/* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3729,10 @@ static void igb_watchdog_task(struct work_struct *work)
adapter->link_duplex = 0;
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
- printk(KERN_ERR "igb: %s The network adapter "
- "was stopped because it "
- "overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_PWR_DOWN)) {
+ netdev_err(netdev, "The network adapter was "
+ "stopped because it overheated\n");
}
/* Links status message must follow this format */
@@ -3704,6 +3746,9 @@ static void igb_watchdog_task(struct work_struct *work)
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
}
}
@@ -4241,6 +4286,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
frag++;
}
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
/* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
tx_desc->read.cmd_type_len = cmd_type;
@@ -5780,6 +5827,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->tx_syncp);
@@ -6138,7 +6187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = netdev_alloc_page(rx_ring->netdev);
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6516,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return 0;
}
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6543,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
igb_rlpml_set(adapter);
}
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6556,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6575,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -6582,13 +6635,14 @@ err_inval:
return -EINVAL;
}
-static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
- u32 wufc = adapter->wol;
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
@@ -6596,7 +6650,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev))
- igb_close(netdev);
+ __igb_close(netdev, true);
igb_clear_interrupt_scheme(adapter);
@@ -6655,12 +6709,13 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igb_suspend(struct device *dev)
{
int retval;
bool wake;
+ struct pci_dev *pdev = to_pci_dev(dev);
- retval = __igb_shutdown(pdev, &wake);
+ retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return retval;
@@ -6674,8 +6729,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int igb_resume(struct pci_dev *pdev)
+static int igb_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6696,7 +6752,18 @@ static int igb_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (!rtnl_is_locked()) {
+ /*
+ * shut up ASSERT_RTNL() warning in
+ * netif_set_real_num_tx/rx_queues.
+ */
+ rtnl_lock();
+ err = igb_init_interrupt_scheme(adapter);
+ rtnl_unlock();
+ } else {
+ err = igb_init_interrupt_scheme(adapter);
+ }
+ if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6709,23 +6776,61 @@ static int igb_resume(struct pci_dev *pdev)
wr32(E1000_WUS, ~0);
- if (netif_running(netdev)) {
- err = igb_open(netdev);
+ if (netdev->flags & IFF_UP) {
+ err = __igb_open(netdev, true);
if (err)
return err;
}
netif_device_attach(netdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (!igb_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake, 1);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
return 0;
}
+
+static int igb_runtime_resume(struct device *dev)
+{
+ return igb_resume(dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
#endif
static void igb_shutdown(struct pci_dev *pdev)
{
bool wake;
- __igb_shutdown(pdev, &wake);
+ __igb_shutdown(pdev, &wake, 0);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, wake);
@@ -7064,15 +7169,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
wr32(E1000_DMCTXTH, 0);
/*
- * DMA Coalescing high water mark needs to be higher
- * than the RX threshold. set hwm to PBA - 2 * max
- * frame size
+ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
*/
- hwm = pba - (2 * adapter->max_frame_size);
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = rd32(E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+ wr32(E1000_FCRTC, reg);
+
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- dmac_thr = pba - 4;
-
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
@@ -7088,7 +7206,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* coalescing(smart fifb)-UTRESH=0
*/
wr32(E1000_DMCRTRH, 0);
- wr32(E1000_FCRTC, hwm);
reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2c25858cc0f..7b600a1f636 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32] = "N/A";
- strncpy(drvinfo->driver, igbvf_driver_name, 32);
- strncpy(drvinfo->version, igbvf_driver_version, 32);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igbvf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index cca78124be3..fd3da3076c2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size);
}
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (hw->mac.ops.set_vfta(hw, vid, true))
+ if (hw->mac.ops.set_vfta(hw, vid, true)) {
dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
- else
- set_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ set_bit(vid, adapter->active_vlans);
+ return 0;
}
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__IGBVF_DOWN, &adapter->state))
igbvf_irq_enable(adapter);
- if (hw->mac.ops.set_vfta(hw, vid, false))
+ if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);
- else
- clear_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ clear_bit(vid, adapter->active_vlans);
+ return 0;
}
static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
{
- dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex"));
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
}
static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
}
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = {
static int __init igbvf_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- igbvf_driver_string, igbvf_driver_version);
- printk(KERN_INFO "%s\n", igbvf_copyright);
+ pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+ pr_info("%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 9dfce7dff79..dbb7dd2f8e3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ixgb_driver_name, 32);
- strncpy(drvinfo->version, ixgb_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgb_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgb_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGB_STATS_LEN;
drvinfo->regdump_len = ixgb_get_regs_len(netdev);
drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e21148f8b16..9bd5faf64a8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
err = pci_enable_msi(adapter->pdev);
if (!err) {
- adapter->have_msi = 1;
+ adapter->have_msi = true;
irq_flags = 0;
}
/* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
}
}
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
}
static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
-static void
+static int
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta |= (1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta &= ~(1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8368d5cf68..258164d6d45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -560,6 +560,7 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
+extern char ixgbe_default_device_descr[];
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -627,6 +628,8 @@ extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4ae26a748da..772072147be 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index f1365fef4ed..a3aa6333073 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
}
return 0;
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
- bool link_up = 0;
+ bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 33b93ffb87c..da31735311f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort a bad configuration */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
- adapter->dcb_set_bitmap |= BIT_PFC;
+ adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort bad configurations */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (ret)
return DCB_NO_HW_CHG;
-#ifdef IXGBE_FCOE
- if (up && !(up & (1 << adapter->fcoe.up)))
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
- /*
- * Only take down the adapter if an app change occurred. FCoE
- * may shuffle tx rings in this case and this can not be done
- * without a reset currently.
- */
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- adapter->fcoe.up = ffs(up) - 1;
-
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- }
-#endif
-
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
-#ifdef IXGBE_FCOE
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- ret = DCB_HW_CHG_RST;
- }
-#endif
-
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+ /* Reprogam FCoE hardware offloads when the traffic class
+ * FCoE is using changes. This happens if the APP info
+ * changes or the up2tc mapping is updated.
+ */
+ if ((up && !(up & (1 << adapter->fcoe.up))) ||
+ (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+ adapter->fcoe.up = ffs(up) - 1;
+ ixgbe_dcbnl_devreset(netdev);
+ ret = DCB_HW_CHG_RST;
+ }
+#endif
+
adapter->dcb_set_bitmap = 0x00;
return ret;
}
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_stop(dev);
-
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
- adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+ adapter->dcb_set_bitmap |= mask;
ixgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 70d58c3849b..da7e580f517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u32 nvm_track_id;
- strncpy(drvinfo->driver, ixgbe_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
nvm_track_id = (adapter->eeprom_verh << 16) |
adapter->eeprom_verl;
- snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
nvm_track_id);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1959,12 +1955,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
/* WOL not supported except for the following */
switch(hw->device_id) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ /* Only these subdevices could supports WOL */
+ switch (hw->subsystem_device_id) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0) {
+ wol->supported = 0;
+ break;
+ }
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ retval = 0;
+ break;
+ default:
wol->supported = 0;
break;
}
- retval = 0;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index df3b1be69d8..d18d6157dd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -855,3 +855,86 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
}
return rc;
}
+
+/**
+ * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
+ * @netdev : ixgbe adapter
+ * @info : HBA information
+ *
+ * Returns ixgbe HBA information
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, pos;
+ u8 buf[8];
+
+ if (!info)
+ return -EINVAL;
+
+ /* Don't return information on unsupported devices */
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
+ return -EINVAL;
+
+ /* Manufacturer */
+ snprintf(info->manufacturer, sizeof(info->manufacturer),
+ "Intel Corporation");
+
+ /* Serial Number */
+
+ /* Get the PCI-e Device Serial Number Capability */
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
+
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "Unknown");
+
+ /* Hardware Version */
+ snprintf(info->hardware_version,
+ sizeof(info->hardware_version),
+ "Rev %d", hw->revision_id);
+ /* Driver Name/Version */
+ snprintf(info->driver_version,
+ sizeof(info->driver_version),
+ "%s v%s",
+ ixgbe_driver_name,
+ ixgbe_driver_version);
+ /* Firmware Version */
+ snprintf(info->firmware_version,
+ sizeof(info->firmware_version),
+ "0x%08x",
+ (adapter->eeprom_verh << 16) |
+ adapter->eeprom_verl);
+
+ /* Model */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel 82599");
+ } else {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel X540");
+ }
+
+ /* Model Description */
+ snprintf(info->model_description,
+ sizeof(info->model_description),
+ "%s",
+ ixgbe_default_device_descr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8ef92d1a6aa..1ee5d0fbb90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -55,6 +55,8 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
+char ixgbe_default_device_descr[] =
+ "Intel(R) 10 Gigabit Network Connection";
#define MAJ 3
#define MIN 6
#define BUILD 7
@@ -106,6 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
/* required last entry */
{0, }
};
@@ -146,7 +149,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
- /* flush memory to make sure state is correct before next watchog */
+ /* flush memory to make sure state is correct before next watchdog */
smp_mb__before_clear_bit();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
@@ -1140,7 +1143,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
@@ -2156,7 +2159,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
- * therefore no explict interrupt disable is necessary */
+ * therefore no explicit interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
/*
@@ -3044,7 +3047,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3056,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add VID to filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3069,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* remove VID from filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3602,7 +3609,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
/*
- * We are assuming the worst case scenerio here, and that
+ * We are assuming the worst case scenario here, and that
* is that an SFP was inserted/removed after the reset
* but before SFP detection was enabled. As such the best
* solution is to just start searching as soon as we start
@@ -3824,7 +3831,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issuesassociated with "
+ "Please be aware there may be issues associated with "
"your hardware. If you are experiencing problems "
"please contact your Intel or hardware "
"representative who provided you with this "
@@ -4019,7 +4026,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Mark all the VFs as inactive */
for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
+ adapter->vfinfo[i].clear_to_send = false;
/* ping all the active vfs to let them know we are going down */
ixgbe_ping_all_vfs(adapter);
@@ -5788,9 +5795,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
* @adapter - pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
- * in order to make certain interrupts are occuring. Secondly it sets the
+ * in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
*/
static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
{
@@ -7128,7 +7135,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to
- * match packet buffer alignment. Unfortunantly, the
+ * match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
@@ -7174,7 +7181,8 @@ void ixgbe_do_reset(struct net_device *netdev)
ixgbe_reset(adapter);
}
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7204,7 +7212,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
return data;
}
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
@@ -7286,6 +7295,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+ .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
#endif /* IXGBE_FCOE */
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
@@ -7598,9 +7608,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->wol = 0;
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ /* Only these subdevice supports WOL */
+ switch (pdev->subsystem_device) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0)
+ break;
+ case IXGBE_SUBDEV_ID_82599_SFP:
adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
@@ -7708,7 +7725,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
- e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+ e_dev_info("%s\n", ixgbe_default_device_descr);
cards_found++;
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 9a56fd74e67..7cf1e1f56c6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
*data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
s32 i;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
s32 status = 0;
s32 i;
u32 i2cctl;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
i2cctl |= IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
return status;
}
@@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
bool data;
if (*i2cctl & IXGBE_I2C_DATA_IN)
- data = 1;
+ data = true;
else
- data = 0;
+ data = false;
return data;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 00fcd39ad66..cf6812dd143 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index df04f1a3857..e8badab0335 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6c5cca808bd..802bfa0f62c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -57,6 +57,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -65,6 +66,7 @@
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -1710,8 +1712,6 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info {
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
/* prefix for World Wide Node Name (WWNN) */
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e5101e91b6b..8cc5eccfd65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
/*
- * In order for the blink bit in the LED control register
- * to work, link and speed must be forced in the MAC. We
- * will reverse this when we stop the blinking.
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
*/
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f1a86..2eb89cb94a0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -35,7 +35,6 @@
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
#define IXGBE_VF_MAX_RX_QUEUES 1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e29ba4506b7..dc8e6511c64 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -27,6 +27,8 @@
/* ethtool support for ixgbevf */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
- strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbevf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = {
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
- printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
+ pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+ "0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4c8e19951d5..891162d1610 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -29,6 +29,9 @@
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(adapter->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
int count = 0;
if ((netdev_uc_count(netdev)) > 10) {
- printk(KERN_ERR "Too many unicast filters - No Space\n");
+ pr_err("Too many unicast filters - No Space\n");
return -ENOSPC;
}
@@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_queues(adapter);
if (err) {
- printk(KERN_ERR "Unable to allocate memory for queues\n");
+ pr_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
@@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
- printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ pr_err("init_shared_code failed: %d\n", err);
goto out;
}
}
@@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev)
* the vf can't start. */
if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX;
- printk(KERN_ERR "Unable to start - perhaps the PF"
- " Driver isn't up yet\n");
+ pr_err("Unable to start - perhaps the PF Driver isn't "
+ "up yet\n");
goto err_setup_reset;
}
}
@@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit())) {
- printk(KERN_WARNING
- "partial checksum but "
- "proto=%x!\n",
- skb->protocol);
+ pr_warn("partial checksum but "
+ "proto=%x!\n", skb->protocol);
}
break;
}
@@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- printk(KERN_ERR "invalid MAC address\n");
+ pr_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
}
@@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void)
{
int ret;
- printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
+ pr_info("%s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
- printk(KERN_INFO "%s\n", ixgbevf_copyright);
+ pr_info("%s\n", ixgbevf_copyright);
ret = pci_register_driver(&ixgbevf_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb03f3..9d38a94a348 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -47,8 +47,8 @@
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200eeca2..5e4d5e5cdf3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -39,29 +39,29 @@
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e8c47..21533e30036 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
- memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
@@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
- memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
return 0;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387..27d651a80f3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -1883,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
struct page *page,
u32 page_offset,
u32 len,
- u8 hidma)
+ bool hidma)
{
dma_addr_t dmaaddr;
@@ -1917,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc, *ctxdesc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
@@ -2292,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(jme->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -2620,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
jme->msg_enable = value;
}
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
{
if (netdev->mtu > 1900)
features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2629,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
}
static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb..4304072bd3c 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d8430f487b8..6ad094f176f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
.remove = korina_remove,
};
-static int __init korina_init_module(void)
-{
- return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
- return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 194a0311380..80aab4e5d69 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
- strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "platform", 32);
+ strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
@@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- u32 rx_csum = features & NETIF_F_RXCSUM;
+ bool rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
@@ -2509,7 +2511,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
/* platform glue ************************************************************/
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->base;
u32 win_enable;
@@ -2527,7 +2529,7 @@ mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
win_protect = 0;
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -2577,6 +2579,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
static int mv643xx_eth_version_printed;
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
struct mv643xx_eth_shared_private *msp;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
int ret;
@@ -2641,8 +2644,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- mv643xx_eth_conf_mbus_windows(msp, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv643xx_eth_conf_mbus_windows(msp, dram);
/*
* Detect hardware parameters.
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d0624c5e..5ec409e3da0 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1645,18 +1645,7 @@ static struct platform_driver pxa168_eth_driver = {
},
};
-static int __init pxa168_init_module(void)
-{
- return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
- platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c7b60839ac9..18a87a57fc0 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(skge->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct skge_stat {
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
spin_unlock_irq(&hw->hw_lock);
napi_enable(&skge->napi);
+
+ skge_set_multicast(dev);
+
return 0;
free_tx_ring:
@@ -4039,7 +4042,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -4101,7 +4104,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
#else
#define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void skge_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7803efa46eb..760c2b17dfd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1110,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
sky2->tx_prod = sky2->tx_cons = 0;
sky2->tx_tcpsum = 0;
sky2->tx_last_mss = 0;
+ netdev_reset_queue(sky2->netdev);
le = get_tx_le(sky2, &sky2->tx_prod);
le->addr = 0;
@@ -1284,7 +1285,7 @@ static const uint32_t rss_init_key[10] = {
};
/* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1402,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1971,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (tx_avail(sky2) <= MAX_SKB_TX_LE)
netif_stop_queue(dev);
+ netdev_sent_queue(dev, skb->len);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
return NETDEV_TX_OK;
@@ -2002,7 +2004,8 @@ mapping_error:
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
{
struct net_device *dev = sky2->netdev;
- unsigned idx;
+ u16 idx;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
BUG_ON(done >= sky2->tx_ring_size);
@@ -2017,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
netif_printk(sky2, tx_done, KERN_DEBUG, dev,
"tx done %u\n", idx);
- u64_stats_update_begin(&sky2->tx_stats.syncp);
- ++sky2->tx_stats.packets;
- sky2->tx_stats.bytes += skb->len;
- u64_stats_update_end(&sky2->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += skb->len;
re->skb = NULL;
dev_kfree_skb_any(skb);
@@ -2031,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ sky2->tx_stats.packets += pkts_compl;
+ sky2->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -3643,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct sky2_stat {
@@ -4311,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
const struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
@@ -4335,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_RXCSUM) {
- u32 on = features & NETIF_F_RXCSUM;
+ bool on = features & NETIF_F_RXCSUM;
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a1585..4a40ab967ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c3ae2..915e947b422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero (default 1)");
+ "Reset device on internal errors if non-zero"
+ " (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
+ /*If we are in SRIOV the default of the module param must be 0*/
+ if (mlx4_is_mfunc(dev))
+ internal_err_reset = 0;
+
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a0b8c..978f593094c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include "mlx4.h"
+#include "fw.h"
#define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK 0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
int next;
u64 out_param;
u16 token;
+ u8 fw_status;
};
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr);
+
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
+static u8 mlx4_errno_to_status(int errno)
+{
+ switch (errno) {
+ case -EPERM:
+ return CMD_STAT_BAD_OP;
+ case -EINVAL:
+ return CMD_STAT_BAD_PARAM;
+ case -ENXIO:
+ return CMD_STAT_BAD_SYS_STATE;
+ case -EBUSY:
+ return CMD_STAT_RESOURCE_BUSY;
+ case -ENOMEM:
+ return CMD_STAT_EXCEED_LIM;
+ case -ENFILE:
+ return CMD_STAT_ICM_ERROR;
+ default:
+ return CMD_STAT_INTERNAL_ERR;
+ }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 status = readl(&priv->mfunc.comm->slave_read);
+
+ return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 val;
+
+ priv->cmd.comm_toggle ^= 1;
+ val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+ __raw_writel((__force u32) cpu_to_be32(val),
+ &priv->mfunc.comm->slave_write);
+ mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long end;
+ int err = 0;
+ int ret_from_pending = 0;
+
+ /* First, verify that the master reports correct status */
+ if (comm_pending(dev)) {
+ mlx4_warn(dev, "Communication channel is not idle."
+ "my toggle is %d (cmd:0x%x)\n",
+ priv->cmd.comm_toggle, cmd);
+ return -EAGAIN;
+ }
+
+ /* Write command */
+ down(&priv->cmd.poll_sem);
+ mlx4_comm_cmd_post(dev, cmd, param);
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ ret_from_pending = comm_pending(dev);
+ if (ret_from_pending) {
+ /* check if the slave is trying to boot in the middle of
+ * FLR process. The only non-zero result in the RESET command
+ * is MLX4_DELAY_RESET_SLAVE*/
+ if ((MLX4_COMM_CMD_RESET == cmd)) {
+ mlx4_warn(dev, "Got slave FLRed from Communication"
+ " channel (ret:0x%x)\n", ret_from_pending);
+ err = MLX4_DELAY_RESET_SLAVE;
+ } else {
+ mlx4_warn(dev, "Communication channel timed out\n");
+ err = -ETIMEDOUT;
+ }
+ }
+
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+ u16 param, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_comm_cmd_post(dev, op, param);
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (time_after_eq(jiffies, end))
+ if (time_after_eq(jiffies, end)) {
+ mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
+ }
cond_resched();
}
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
- op), hcr + 6);
+ op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
return ret;
}
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+ int ret;
+
+ down(&priv->cmd.slave_sem);
+ vhcr->in_param = cpu_to_be64(in_param);
+ vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+ vhcr->in_modifier = cpu_to_be32(in_modifier);
+ vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+ vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+ vhcr->status = 0;
+ vhcr->flags = !!(priv->cmd.use_events) << 6;
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ }
+ } else {
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ MLX4_COMM_TIME + timeout);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ } else
+ mlx4_err(dev, "failed execution of VHCR_POST command"
+ "opcode 0x%x\n", op);
+ }
+ up(&priv->cmd.slave_sem);
+ return ret;
+}
+
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
+ u32 stat;
down(&priv->cmd.poll_sem);
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
- err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+ stat = be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+ err = mlx4_status_to_errno(stat);
+ if (err)
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, stat);
out:
up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
if (token != context->token)
return;
+ context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
- if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
- if (err)
+ if (err) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
goto out;
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -311,17 +521,1046 @@ out:
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout)
+ u16 op, unsigned long timeout, int native)
{
- if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
- else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ }
+ return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+ int slave, u64 slave_addr,
+ int size, int is_read)
+{
+ u64 in_param;
+ u64 out_param;
+
+ if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+ (slave & ~0x7f) | (size & 0xff)) {
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+ "master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
+ return -EINVAL;
+ }
+
+ if (is_read) {
+ in_param = (u64) slave | slave_addr;
+ out_param = (u64) dev->caps.function | master_addr;
+ } else {
+ in_param = (u64) dev->caps.function | master_addr;
+ out_param = (u64) slave | slave_addr;
+ }
+
+ return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+ MLX4_CMD_ACCESS_MEM,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+ if (cmd->encode_slave_id) {
+ in_param &= 0xffffffffffffff00ll;
+ in_param |= slave;
+ }
+
+ err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+ vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm)
+ vhcr->out_param = out_param;
+
+ return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+ {
+ .opcode = MLX4_CMD_QUERY_FW,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_HCA,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_DEV_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_ADAPTER,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_INIT_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_CLOSE_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CLOSE_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_PORT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_PORT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MAP_EQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAP_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_EQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_NOP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_ALLOC_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ALLOC_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_FREE_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FREE_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_MPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_MPT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_MPT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_READ_MTT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_WRITE_MTT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_WRITE_MTT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SYNC_TPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_CQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_CQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MODIFY_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MODIFY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_SRQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_SRQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_ARM_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ARM_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RST2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_RST2INIT_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2RTR_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT2RTR_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQERR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2ERR_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2SQD_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2SQD_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2RST_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_2RST_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_QP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_UNSUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_IF_STAT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_IF_STAT_wrapper
+ },
+ /* Native multicast commands are not available for guests */
+ {
+ .opcode = MLX4_CMD_QP_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_PROMISC,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_PROMISC_wrapper
+ },
+ /* Ethernet specific commands */
+ {
+ .opcode = MLX4_CMD_SET_VLAN_FLTR,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_MCAST_FLTR,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_DUMP_ETH_STATS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INFORM_FLR_DONE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_info *cmd = NULL;
+ struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+ struct mlx4_vhcr *vhcr;
+ struct mlx4_cmd_mailbox *inbox = NULL;
+ struct mlx4_cmd_mailbox *outbox = NULL;
+ u64 in_param;
+ u64 out_param;
+ int ret = 0;
+ int i;
+ int err = 0;
+
+ /* Create sw representation of Virtual HCR */
+ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+ if (!vhcr)
+ return -ENOMEM;
+
+ /* DMA in the vHCR */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr_cmd),
+ MLX4_ACCESS_MEM_ALIGN), 1);
+ if (ret) {
+ mlx4_err(dev, "%s:Failed reading vhcr"
+ "ret: 0x%x\n", __func__, ret);
+ kfree(vhcr);
+ return ret;
+ }
+ }
+
+ /* Fill SW VHCR fields */
+ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+ vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+ vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+ vhcr->token = be16_to_cpu(vhcr_cmd->token);
+ vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+ vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+ vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+ /* Lookup command */
+ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+ if (vhcr->op == cmd_info[i].opcode) {
+ cmd = &cmd_info[i];
+ break;
+ }
+ }
+ if (!cmd) {
+ mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+ vhcr->op, slave);
+ vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+ goto out_status;
+ }
+
+ /* Read inbox */
+ if (cmd->has_inbox) {
+ vhcr->in_param &= INBOX_MASK;
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ inbox = NULL;
+ goto out_status;
+ }
+
+ if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1)) {
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
+ vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+ goto out_status;
+ }
+ }
+
+ /* Apply permission and bound checks if applicable */
+ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+ "checks for resource_id:%d\n", vhcr->op, slave,
+ vhcr->in_modifier);
+ vhcr_cmd->status = CMD_STAT_BAD_OP;
+ goto out_status;
+ }
+
+ /* Allocate outbox */
+ if (cmd->has_outbox) {
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ outbox = NULL;
+ goto out_status;
+ }
+ }
+
+ /* Execute the command! */
+ if (cmd->wrapper) {
+ err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+ cmd);
+ if (cmd->out_is_imm)
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ } else {
+ in_param = cmd->has_inbox ? (u64) inbox->dma :
+ vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma :
+ vhcr->out_param;
+ err = __mlx4_cmd(dev, in_param, &out_param,
+ cmd->out_is_imm, vhcr->in_modifier,
+ vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm) {
+ vhcr->out_param = out_param;
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ }
+ }
+
+ if (err) {
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+ " error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ vhcr_cmd->status = mlx4_errno_to_status(err);
+ goto out_status;
+ }
+
+
+ /* Write outbox if command completed successfully */
+ if (cmd->has_outbox && !vhcr_cmd->status) {
+ ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+ vhcr->out_param,
+ MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+ if (ret) {
+ /* If we failed to write back the outbox after the
+ *command was successfully executed, we must fail this
+ * slave, as it is now in undefined state */
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ goto out;
+ }
+ }
+
+out_status:
+ /* DMA back vhcr result */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr),
+ MLX4_ACCESS_MEM_ALIGN),
+ MLX4_CMD_WRAPPED);
+ if (ret)
+ mlx4_err(dev, "%s:Failed writing vhcr result\n",
+ __func__);
+ else if (vhcr->e_bit &&
+ mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+ mlx4_warn(dev, "Failed to generate command completion "
+ "eqe for slave %d\n", slave);
+ }
+
+out:
+ kfree(vhcr);
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+ u16 param, u8 toggle)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ u32 reply;
+ u32 slave_status = 0;
+ u8 is_going_down = 0;
+
+ slave_state[slave].comm_toggle ^= 1;
+ reply = (u32) slave_state[slave].comm_toggle << 31;
+ if (toggle != slave_state[slave].comm_toggle) {
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+ "STATE COMPROMISIED ***\n", toggle, slave);
+ goto reset_slave;
+ }
+ if (cmd == MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+ slave_state[slave].active = false;
+ /*check if we are in the middle of FLR process,
+ if so return "retry" status to the slave*/
+ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ slave_status = MLX4_DELAY_RESET_SLAVE;
+ goto inform_slave_state;
+ }
+
+ /* write the version in the event field */
+ reply |= mlx4_comm_get_version();
+
+ goto reset_slave;
+ }
+ /*command from slave in the middle of FLR*/
+ if (cmd != MLX4_COMM_CMD_RESET &&
+ MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+ "in the middle of FLR\n", slave, cmd);
+ return;
+ }
+
+ switch (cmd) {
+ case MLX4_COMM_CMD_VHCR0:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma = ((u64) param) << 48;
+ priv->mfunc.master.slave_state[slave].cookie = 0;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ break;
+ case MLX4_COMM_CMD_VHCR1:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+ break;
+ case MLX4_COMM_CMD_VHCR2:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+ break;
+ case MLX4_COMM_CMD_VHCR_EN:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= param;
+ slave_state[slave].active = true;
+ break;
+ case MLX4_COMM_CMD_VHCR_POST:
+ if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ goto reset_slave;
+ down(&priv->cmd.slave_sem);
+ if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+ mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+ " reseting slave.\n", slave);
+ up(&priv->cmd.slave_sem);
+ goto reset_slave;
+ }
+ up(&priv->cmd.slave_sem);
+ break;
+ default:
+ mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+ goto reset_slave;
+ }
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = cmd;
+ else
+ is_going_down = 1;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ if (is_going_down) {
+ mlx4_warn(dev, "Slave is going down aborting command(%d)"
+ " executing from slave:%d\n",
+ cmd, slave);
+ return;
+ }
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ mmiowb();
+
+ return;
+
+reset_slave:
+ /* cleanup any slave resources */
+ mlx4_delete_all_resources_for_slave(dev, slave);
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+ memset(&slave_state[slave].event_eq, 0,
+ sizeof(struct mlx4_slave_event_eq_info));
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work,
+ struct mlx4_mfunc_master_ctx,
+ comm_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ __be32 *bit_vec;
+ u32 comm_cmd;
+ u32 vec;
+ int i, j, slave;
+ int toggle;
+ int served = 0;
+ int reported = 0;
+ u32 slt;
+
+ bit_vec = master->comm_arm_bit_vector;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+ vec = be32_to_cpu(bit_vec[i]);
+ for (j = 0; j < 32; j++) {
+ if (!(vec & (1 << j)))
+ continue;
+ ++reported;
+ slave = (i * 32) + j;
+ comm_cmd = swab32(readl(
+ &mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read))
+ >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ printk(KERN_INFO "slave %d out of sync."
+ " read toggle %d, state toggle %d. "
+ "Resynching.\n", slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
+ }
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
+ }
+ }
+ }
+
+ if (reported && reported != served)
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+ " but %d were served\n",
+ reported, served);
+
+ if (mlx4_ARM_COMM_CHANNEL(dev))
+ mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int wr_toggle;
+ int rd_toggle;
+ unsigned long end;
+
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+ end = jiffies + msecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+ if (rd_toggle == wr_toggle) {
+ priv->cmd.comm_toggle = rd_toggle;
+ return 0;
+ }
+
+ cond_resched();
+ }
+
+ /*
+ * we could reach here if for example the previous VM using this
+ * function misbehaved and left the channel with unsynced state. We
+ * should fix this here and give this VM a chance to use a properly
+ * synced channel
+ */
+ mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+ priv->cmd.comm_toggle = 0;
+
+ return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i, err, port;
+
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate vhcr.\n");
+ return -ENOMEM;
+ }
+
+ if (mlx4_is_master(dev))
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+ else
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+ if (!priv->mfunc.comm) {
+ mlx4_err(dev, "Couldn't map communication vector.\n");
+ goto err_vhcr;
+ }
+
+ if (mlx4_is_master(dev)) {
+ priv->mfunc.master.slave_state =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_slave_state), GFP_KERNEL);
+ if (!priv->mfunc.master.slave_state)
+ goto err_comm;
+
+ for (i = 0; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_write);
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_read);
+ mmiowb();
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ s_state->vlan_filter[port] =
+ kzalloc(sizeof(struct mlx4_vlan_fltr),
+ GFP_KERNEL);
+ if (!s_state->vlan_filter[port]) {
+ if (--port)
+ kfree(s_state->vlan_filter[port]);
+ goto err_slaves;
+ }
+ INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ }
+ spin_lock_init(&s_state->lock);
+ }
+
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ INIT_WORK(&priv->mfunc.master.comm_work,
+ mlx4_master_comm_channel);
+ INIT_WORK(&priv->mfunc.master.slave_event_work,
+ mlx4_gen_slave_eqe);
+ INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+ mlx4_master_handle_slave_flr);
+ spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ priv->mfunc.master.comm_wq =
+ create_singlethread_workqueue("mlx4_comm");
+ if (!priv->mfunc.master.comm_wq)
+ goto err_slaves;
+
+ if (mlx4_init_resource_tracker(dev))
+ goto err_thread;
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_resource;
+ }
+
+ } else {
+ err = sync_toggles(dev);
+ if (err) {
+ mlx4_err(dev, "Couldn't sync toggles\n");
+ goto err_comm;
+ }
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ }
+ return 0;
+
+err_resource:
+ mlx4_free_resource_tracker(dev);
+err_thread:
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+ while (--i) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+err_comm:
+ iounmap(priv->mfunc.comm);
+err_vhcr:
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ return -ENOMEM;
+}
+
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
- MLX4_HCR_SIZE);
- if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.");
- return -ENOMEM;
+ priv->cmd.hcr = NULL;
+ priv->mfunc.vhcr = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+ MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.\n");
+ return -ENOMEM;
+ }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool) {
- iounmap(priv->cmd.hcr);
- return -ENOMEM;
- }
+ if (!priv->cmd.pool)
+ goto err_hcr;
return 0;
+
+err_hcr:
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
+ return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, port;
+
+ if (mlx4_is_master(dev)) {
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+ for (i = 0; i < dev->num_slaves; i++) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ }
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
- iounmap(priv->cmd.hcr);
+
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
}
/*
@@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ int err = 0;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
; /* nothing */
--priv->cmd.token_mask;
- priv->cmd.use_events = 1;
-
down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
- return 0;
+ return err;
}
/*
@@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
@@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+ return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a5168892..475f9d6af95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
-#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_cq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- __be32 logsize_usrpage;
- __be16 cq_period;
- __be16 cq_max_count;
- u8 reserved2[3];
- u8 comp_eqn;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved4[2];
- __be64 db_rec_addr;
-};
-
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
- mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+ MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+ cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (*cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ if (err)
+ goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+ else {
+ *cqn = get_param_l(&out_param);
+ return 0;
+ }
+ }
+ return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+ mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+ mlx4_table_put(dev, &cq_table->table, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cqn);
+ err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+ } else
+ __mlx4_cq_free_icm(dev, cqn);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq->vector = vector;
- cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
- if (cq->cqn == -1)
- return -ENOMEM;
-
- err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
- if (err)
- goto err_out;
-
- err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err)
- goto err_put;
+ return err;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+ mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+ mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d775e..00b81272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
- cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
if (err)
return err;
@@ -147,6 +144,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a8a02..7dbc6a23077 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
- sprintf(drvinfo->fw_version, "%d.%d.%d",
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
int err = 0;
u64 config = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ if ((priv->port < 1) || (priv->port > 2)) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
+ u64 mask;
+
+ if ((priv->port < 1) || (priv->port > 2))
+ return -EOPNOTSUPP;
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776bc355..72fa807b69c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
#include "mlx4_en.h"
#include "en_port.h"
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+ return 0;
}
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
+
+ return 0;
}
u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac, 0);
+ priv->base_qpn, priv->mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
kfree(priv->mc_addrs);
+ priv->mc_addrs = NULL;
priv->mc_addrs_cnt = 0;
}
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ mlx4_en_clear_list(dev);
priv->mc_addrs = mc_addrs;
priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
goto out;
}
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
/*
* Promsicuous mode: disable all filters
*/
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn, 0);
+ /* Set qp number */
+ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+ err = mlx4_get_eth_qp(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn);
if (err) {
- en_err(priv, "Failed setting port mac\n");
+ en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@ tx_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
- /* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
-
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
+ /* Unregister Mac address for the port */
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +989,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &=
+ cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+ return 0;
+
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -990,6 +1020,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
+ .ndo_set_features = mlx4_en_set_features,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1053,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
+ priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
priv->mac_index = -1;
@@ -1088,6 +1121,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd78cd..331791467a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
#include "mlx4_en.h"
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
- u64 mac, u64 clear, u8 mode)
-{
- return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
- MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
- MLX4_CMD_TIME_CLASS_B);
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_general_context *context;
- int err;
- u32 in_mod;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->flags = SET_PORT_GEN_ALL_VALID;
- context->mtu = cpu_to_be16(mtu);
- context->pptx = (pptx * (!pfctx)) << 7;
- context->pfctx = pfctx;
- context->pprx = (pprx * (!pfcrx)) << 7;
- context->pfcrx = pfcrx;
-
- in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_rqp_calc_context *context;
- int err;
- u32 in_mod;
- u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
- MCAST_DIRECT : MCAST_DEFAULT;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
- return 0;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = dev->caps.log_num_macs;
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
- base_qpn);
- context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
- base_qpn);
- context->intra_no_vlan = 0;
- context->no_vlan = MLX4_NO_VLAN_IDX;
- context->intra_vlan_miss = 0;
- context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
- in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244f516..6934fd7e66e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
-enum {
- MLX4_CMD_SET_VLAN_FLTR = 0x47,
- MLX4_CMD_SET_MCAST_FLTR = 0x48,
- MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
- MCAST_DIRECT_ONLY = 0,
- MCAST_DIRECT = 1,
- MCAST_DEFAULT = 2
-};
-
-struct mlx4_set_port_general_context {
- u8 reserved[3];
- u8 flags;
- u16 reserved2;
- __be16 mtu;
- u8 pptx;
- u8 pfctx;
- u16 reserved3;
- u8 pprx;
- u8 pfcrx;
- u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
- __be32 base_qpn;
- u8 rererved;
- u8 n_mac;
- u8 n_vlan;
- u8 n_prio;
- u8 reserved2[3];
- u8 mac_miss;
- u8 intra_no_vlan;
- u8 no_vlan;
- u8 intra_vlan_miss;
- u8 vlan_miss;
- u8 reserved3[3];
- u8 no_vlan_prio;
- __be32 promisc;
- __be32 mcast;
-};
-
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec8a9d..bcbc54c1694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
- context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c2df6c35860..e8d6ad2dce0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
+ struct ethhdr *ethh;
+ u64 s_mac;
if (!priv->port_up)
return 0;
@@ -577,6 +579,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ /* Get pointer to first fragment since we haven't skb yet and
+ * cast it to ethhdr struct */
+ ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+ skb_frags[0].offset);
+ s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+ /* If source MAC is equal to our own MAC and not performing
+ * the selftest or flb disabled - drop the packet */
+ if (s_mac == priv->mac &&
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
+ goto next;
+
/*
* Packet is OK - process it.
*/
@@ -837,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
- struct mlx4_en_rss_context *rss_context;
+ struct mlx4_rss_context *rss_context;
void *ptr;
- u8 rss_mask = 0x3f;
+ u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+ MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -877,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
- ptr = ((void *) &context) + 0x3c;
+ ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ if (priv->mdev->profile.udp_rss) {
+ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ }
rss_context->flags = rss_mask;
- rss_context->hash_fn = 1;
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = rsskey[i];
- if (priv->mdev->profile.udp_rss)
- rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcecd499..bf2e5d3f177 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b426753..9ef9038d062 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- struct mlx4_cqe *cqe = cq->buf;
+ struct mlx4_cqe *cqe;
u16 index;
- u16 new_index;
+ u16 new_index, ring_index;
u32 txbbs_skipped = 0;
- u32 cq_last_sav;
-
- /* index always points to the first TXBB of the last polled descriptor */
- index = ring->cons & ring->size_mask;
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- if (index == new_index)
- return;
+ u32 cons_index = mcq->cons_index;
+ int size = cq->size;
+ u32 size_mask = ring->size_mask;
+ struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
- /*
- * We use a two-stage loop:
- * - the first samples the HW-updated CQE
- * - the second frees TXBBs until the last sample
- * This lets us amortize CQE cache misses, while still polling the CQ
- * until is quiescent.
- */
- cq_last_sav = mcq->cons_index;
- do {
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ ring_index = ring->cons & size_mask;
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cons_index & size)) {
+ /*
+ * make sure we read the CQE after we read the
+ * ownership bit
+ */
+ rmb();
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
do {
- /* Skip over last polled CQE */
- index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
-
- /* Poll next CQE */
+ ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ /* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, index,
- !!((ring->cons + txbbs_skipped) &
- ring->size));
- ++mcq->cons_index;
-
- } while (index != new_index);
+ priv, ring, ring_index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ } while (ring_index != new_index);
+
+ ++cons_index;
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ }
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- } while (index != new_index);
- AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
- (u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
+ mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ (!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ !!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
- if (unlikely(priv->validate_loopback)) {
- /* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
}
/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee9677599..1e9b55eb721 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- u8 log_eq_size;
- u8 reserved2[4];
- u8 eq_period;
- u8 reserved3;
- u8 eq_max_count;
- u8 reserved4[3];
- u8 intr;
- u8 log_page_size;
- u8 reserved5[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- u32 reserved6[2];
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved7[4];
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
- (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- } event;
- u8 reserved3[3];
- u8 owner;
-} __packed;
+ (1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
+ (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+ struct mlx4_eqe *eqe =
+ &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+ return (!!(eqe->owner & 0x80) ^
+ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+ eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+ struct mlx4_eqe *eqe;
+ u8 slave;
+ int i;
+
+ for (eqe = next_slave_event_eqe(slave_eq); eqe;
+ eqe = next_slave_event_eqe(slave_eq)) {
+ slave = eqe->slave_id;
+
+ /* All active slaves need to receive the event */
+ if (slave == ALL_SLAVES) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i != dev->caps.function &&
+ master->slave_state[i].active)
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to "
+ " generate event "
+ "for slave %d\n", i);
+ }
+ } else {
+ if (mlx4_GEN_EQE(dev, slave, eqe))
+ mlx4_warn(dev, "Failed to generate event "
+ "for slave %d\n", slave);
+ }
+ ++slave_eq->cons;
+ }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+ struct mlx4_eqe *s_eqe =
+ &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+ if ((!!(s_eqe->owner & 0x80)) ^
+ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+ "No free EQE on slave events queue\n", slave);
+ return;
+ }
+
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ s_eqe->slave_id = slave;
+ /* ensure all information is written before setting the ownersip bit */
+ wmb();
+ s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+ ++slave_eq->prod;
+
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+ struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave =
+ &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active) {
+ /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ return;
+ }
+
+ slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_flr_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int i;
+ int err;
+
+ mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+
+ if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+ "clean slave: %d\n", i);
+
+ mlx4_delete_all_resources_for_slave(dev, i);
+ /*return the slave to running mode*/
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+ slave_state[i].is_slave_going_down = 0;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*notify the FW:*/
+ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to notify FW on "
+ "FLR done (slave:%d)\n", i);
+ }
+ }
+}
+
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
+ int slave = 0;
+ int ret;
+ u32 flr_slave;
+ u8 update_slave_state;
+ int i;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
- eqe->type);
+ mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the QP */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_QP,
+ be32_to_cpu(eqe->event.qp.qpn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "QP event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+
+ }
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
- mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
- eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the SRQ */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_SRQ,
+ be32_to_cpu(eqe->event.srq.srqn)
+ & 0xffffff,
+ &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_warn(dev, "SRQ event %02x(%02x) "
+ "on EQ %d at index %u: could"
+ " not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+ " event: %02x(%02x)\n", __func__,
+ slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_warn(dev, "%s: sending event "
+ "%02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ if (mlx4_is_master(dev))
+ /*change the state of all slave's port
+ * to down:*/
+ for (i = 0; i < dev->num_slaves; i++) {
+ mlx4_dbg(dev, "%s: Sending "
+ "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ " to slave: %d, port:%d\n",
+ __func__, i, port);
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+ if (mlx4_is_master(dev)) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
+ }
}
break;
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
- mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_CQ,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "CQ event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_cq_event(dev,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff,
eqe->type);
break;
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_COMM_CHANNEL:
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Received comm channel event "
+ "for non master device\n");
+ break;
+ }
+ memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+ eqe->event.comm_channel_arm.bit_vec,
+ sizeof eqe->event.comm_channel_arm.bit_vec);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.comm_work);
+ break;
+
+ case MLX4_EVENT_TYPE_FLR_EVENT:
+ flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Non-master function received"
+ "FLR event\n");
+ break;
+ }
+
+ mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+ if (flr_slave > dev->num_slaves) {
+ mlx4_warn(dev,
+ "Got FLR for unknown function: %d\n",
+ flr_slave);
+ update_slave_state = 0;
+ } else
+ update_slave_state = 1;
+
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (update_slave_state) {
+ priv->mfunc.master.slave_state[flr_slave].active = false;
+ priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+ priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+ }
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_flr_event_work);
+ break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
- eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+ "index %u. owner=%x, nent=0x%x, slave=%x, "
+ "ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ eqe->slave_id,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- }
+ };
++eq->cons_index;
eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq =
+ &priv->mfunc.master.slave_state[slave].event_eq;
+ u32 in_modifier = vhcr->in_modifier;
+ u32 eqn = in_modifier & 0x1FF;
+ u64 in_param = vhcr->in_param;
+ int err = 0;
+
+ if (slave == dev->caps.function)
+ err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ if (!err) {
+ if (in_modifier >> 31) {
+ /* unmap */
+ event_eq->event_type &= ~in_param;
+ } else {
+ event_eq->eqn = eqn;
+ event_eq->event_type = in_param;
+ }
+ }
+ return err;
+}
+
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+ MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+ 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
- err = mlx4_map_clr_int(dev);
- if (err)
- goto err_out_bitmap;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
- priv->eq_table.clr_mask =
- swab32(1 << (priv->eq_table.inta_pin & 31));
- priv->eq_table.clr_int = priv->clr_base +
- (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ }
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X))
+ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e4973..a424a19280c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static int enable_qos;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 field;
+ u32 size;
+ int err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+
+ if (vhcr->op_modifier == 1) {
+ field = vhcr->in_modifier;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ field = 0; /* ensure fvl bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ } else if (vhcr->op_modifier == 0) {
+ field = 1 << 7; /* enable only ethernet interface */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+ field = slave;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+ field = dev->caps.num_ports;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+ size = 0; /* no PF behavious is set for now */
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mgms + dev->caps.num_amgms;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 size;
+ int i;
+ int err = 0;
+
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (1 << 7))) {
+ mlx4_err(dev, "The host doesn't support eth interface\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+ func_cap->function = field;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ for (i = 1; i <= func_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+ MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & (1 << 7)) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ if (field & (1 << 6)) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port[i] = field;
+ }
+
+ /* All other resources are allocated by the master, but we still report
+ * 'num' and 'reserved' capabilities as follows:
+ * - num remains the maximum resource index
+ * - 'num - reserved' is the total available objects of a resource, but
+ * resource indices may be less than 'reserved'
+ * TODO: set per-resource quotas */
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
if (err)
goto out;
@@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
+ dev_cap->suggested_type[i] = (field >> 3) & 1;
+ dev_cap->default_sense[i] = (field >> 4) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,61 @@ out:
return err;
}
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 def_mac;
+ u8 port_type;
+ int err;
+
+#define MLX4_PORT_SUPPORT_IB (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
+ ~MLX4_PORT_SUGGEST_TYPE & \
+ ~MLX4_PORT_DEFAULT_SENSE)
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ if (!err && dev->caps.function != slave) {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+ /* get port type - currently only eth is enabled */
+ MLX4_GET(port_type, outbox->buf,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+ /* check eth is enabled for this port */
+ if (!(port_type & 2))
+ mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+ MLX4_PUT(outbox->buf, port_type,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ }
+
+ return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+ struct mlx4_cmd_mailbox *outbox = ptr;
+
+ return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +751,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
@@ -528,7 +761,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
}
if (nent)
- err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -557,13 +791,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +815,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
@@ -589,13 +826,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+#define QUERY_FW_COMM_BASE_OFFSET 0x40
+#define QUERY_FW_COMM_BAR_OFFSET 0x48
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -608,6 +848,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +892,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+ MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+ MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
+ fw->comm_bar = (fw->comm_bar >> 6) * 2;
+ mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+ fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
@@ -711,7 +959,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -743,6 +991,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1080,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* UAR attributes */
- MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1093,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return err;
}
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err)
+ goto out;
+
+ MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask |=
+ (1 << port);
+ }
+ ++priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1231,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+ (1 << port)))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ --priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+ MLX4_CMD_NATIVE);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
@@ -929,7 +1303,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1312,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
- MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
@@ -947,6 +1322,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec228652..119e0cc9fab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,30 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
};
+struct mlx4_func_cap {
+ u8 function;
+ u8 num_ports;
+ u8 flags;
+ u32 pf_context_behaviour;
+ int qp_quota;
+ int cq_quota;
+ int srq_quota;
+ int mpt_quota;
+ int mtt_quota;
+ int max_eq;
+ int reserved_eq;
+ int mcg_quota;
+ u8 physical_port[MLX4_MAX_PORTS + 1];
+ u8 port_flags[MLX4_MAX_PORTS + 1];
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -133,6 +152,7 @@ struct mlx4_init_hca_param {
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
+ u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
@@ -143,6 +163,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 uar_page_sz; /* log pg sz in 4k chunks */
};
struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fdf44c..a9ade1c3cad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb55bd9..b4e9f6f5cc0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- mlx4_start_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- mlx4_stop_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85a532..6bb62c580e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/delay.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+ " of qp per mcg, for example:"
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK 0
+#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
- .num_qp = 1 << 17,
+ .num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
+ .num_mpt = 1 << 19,
.num_mtt = 1 << 20,
};
-static int log_num_mac = 2;
+static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
-static int use_prio;
+static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+ "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+ struct list_head list;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_eqs +
+ MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
- dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
- dev->caps.port_mask |= 1 << (i - 1);
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+ dev->caps.default_sense[i] = dev_cap->default_sense[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
+ dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
- dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
- dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- dev->caps.mtts_per_seg);
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- dev->caps.reserved_uars = dev_cap->reserved_uars;
+
+ /* The first 128 UARs are used for EQ doorbells */
+ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
- dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
+
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+ if (dev->pdev->device != 0x1003)
+ dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
- else
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- dev->caps.possible_type[i] = dev->caps.port_type[i];
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+ if (dev->caps.supported_type[i]) {
+ /* if only ETH is supported - assign ETH */
+ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ /* if only IB is supported,
+ * assign IB only if SRIOV is off*/
+ else if (dev->caps.supported_type[i] ==
+ MLX4_PORT_TYPE_IB) {
+ if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_NONE;
+ else
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_IB;
+ /* if IB and ETH are supported,
+ * first of all check if SRIOV is on */
+ } else if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ else {
+ /* In non-SRIOV mode, we set the port type
+ * according to user selection of port type,
+ * if usere selected none, take the FW hint */
+ if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+ MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = port_type_array[i-1];
+ }
+ }
+ /*
+ * Link sensing is allowed on the port if 3 conditions are true:
+ * 1. Both protocols are supported on the port.
+ * 2. Different types are supported on the port
+ * 3. FW declared that it supports link sensing
+ */
mlx4_priv(dev)->sense.sense_allowed[i] =
- dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+ ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+ /*
+ * If "default_sense" bit is set, we move the port to "AUTO" mode
+ * and perform sense_port FW command to try and set the correct
+ * port type from beginning
+ */
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+ enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+ dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+ mlx4_SENSE_PORT(dev, i, &sensed_port);
+ if (sensed_port != MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = sensed_port;
+ } else {
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ }
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- mlx4_set_port_mask(dev);
-
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i;
+ int ret = 0;
+
+ for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ if (s_state->active && s_state->last_cmd !=
+ MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "%s: slave: %d is still active\n",
+ __func__, i);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave;
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ s_slave = &priv->mfunc.master.slave_state[slave];
+ return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+ int err;
+ u32 page_size;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_func_cap func_cap;
+ struct mlx4_init_hca_param hca_param;
+ int i;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ return err;
+ }
+
+ /*fail if the hca has an unknown capability */
+ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+ HCA_GLOBAL_CAP_MASK) {
+ mlx4_err(dev, "Unknown hca global capabilities\n");
+ return -ENOSYS;
+ }
+
+ mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ page_size = ~dev->caps.page_size_cap + 1;
+ mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+ if (page_size > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ /* slave gets uar page size from QUERY_HCA fw command */
+ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+ /* TODO: relax this assumption */
+ if (dev->caps.uar_page_size != PAGE_SIZE) {
+ mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+ dev->caps.uar_page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+ PF_CONTEXT_BEHAVIOUR_MASK) {
+ mlx4_err(dev, "Unknown pf context behaviour\n");
+ return -ENOSYS;
+ }
+
+ dev->caps.function = func_cap.function;
+ dev->caps.num_ports = func_cap.num_ports;
+ dev->caps.num_qps = func_cap.qp_quota;
+ dev->caps.num_srqs = func_cap.srq_quota;
+ dev->caps.num_cqs = func_cap.cq_quota;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.num_mpts = func_cap.mpt_quota;
+ dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->caps.num_pds = MLX4_NUM_PDS;
+ dev->caps.num_mgms = 0;
+ dev->caps.num_amgms = 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+ if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev->caps.uar_page_size * (dev->caps.num_uars -
+ dev->caps.reserved_uars) >
+ pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+#if 0
+ mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+ mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+ dev->caps.num_uars, dev->caps.reserved_uars,
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ pci_resource_len(dev->pdev, 2));
+ mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+ dev->caps.reserved_eqs);
+ mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+ dev->caps.num_pds, dev->caps.reserved_pds,
+ dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+ return 0;
+}
/*
* Change the port configuration of the device.
@@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev,
types[i] = mdev->caps.port_type[i+1];
}
- if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
@@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
- cmpt_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
+ int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
+
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs,
- 0, 0);
+ num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
- dev->caps.num_mtt_segs,
+ dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
- init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ init_hca->mc_base,
+ mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
@@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ down(&priv->cmd.slave_sem);
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ mlx4_warn(dev, "Failed to close slave function.\n");
+ up(&priv->cmd.slave_sem);
+}
+
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev)
resource_size_t bf_len;
int err = 0;
- bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ bf_start = pci_resource_start(dev->pdev, 2) +
+ (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) -
+ (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
@@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
- mlx4_CLOSE_HCA(dev, 0);
- mlx4_free_icms(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else {
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 dma = (u64) priv->mfunc.vhcr_dma;
+ int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+ int ret_from_reset = 0;
+ u32 slave_read;
+ u32 cmd_channel_ver;
+
+ down(&priv->cmd.slave_sem);
+ priv->cmd.max_cmds = 1;
+ mlx4_warn(dev, "Sending reset\n");
+ ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+ MLX4_COMM_TIME);
+ /* if we are in the middle of flr the slave will try
+ * NUM_OF_RESET_RETRIES times before leaving.*/
+ if (ret_from_reset) {
+ if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+ msleep(SLEEP_TIME_IN_RESET);
+ while (ret_from_reset && num_of_reset_retries) {
+ mlx4_warn(dev, "slave is currently in the"
+ "middle of FLR. retrying..."
+ "(try num:%d)\n",
+ (NUM_OF_RESET_RETRIES -
+ num_of_reset_retries + 1));
+ ret_from_reset =
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+ 0, MLX4_COMM_TIME);
+ num_of_reset_retries = num_of_reset_retries - 1;
+ }
+ } else
+ goto err;
+ }
+
+ /* check the driver version - the slave I/F revision
+ * must match the master's */
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ cmd_channel_ver = mlx4_comm_get_version();
+
+ if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+ MLX4_COMM_GET_IF_REV(slave_read)) {
+ mlx4_err(dev, "slave driver version is not supported"
+ " by the master\n");
+ goto err;
+ }
+
+ mlx4_warn(dev, "Sending vhcr0\n");
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ goto err;
+ up(&priv->cmd.slave_sem);
+ return 0;
+
+err:
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ up(&priv->cmd.slave_sem);
+ return -EIO;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
u64 icm_size;
int err;
- err = mlx4_QUERY_FW(dev);
- if (err) {
- if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
- else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
- return err;
- }
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ goto unmap_bf;
+ }
- err = mlx4_load_fw(dev);
- if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
- return err;
- }
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ goto unmap_bf;
+ }
- mlx4_cfg.log_pg_sz_m = 1;
- mlx4_cfg.log_pg_sz = 0;
- err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
- if (err)
- mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
- err = mlx4_dev_cap(dev, &dev_cap);
- if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
- goto err_stop_fw;
- }
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
- profile = default_profile;
+ profile = default_profile;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
- if ((long long) icm_size < 0) {
- err = icm_size;
- goto err_stop_fw;
- }
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+ &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
- if (map_bf_area(dev))
- mlx4_dbg(dev, "Failed to map blue flame area\n");
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
- if (err)
- goto err_stop_fw;
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+ } else {
+ err = mlx4_init_slave(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize slave\n");
+ goto unmap_bf;
+ }
- err = mlx4_INIT_HCA(dev, &init_hca);
- if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
+ err = mlx4_slave_cap(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to obtain slave caps\n");
+ goto err_close;
+ }
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ /*Only the master set the ports, all the rest got it from it.*/
+ if (!mlx4_is_slave(dev))
+ mlx4_set_port_mask(dev);
+
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_CLOSE_HCA(dev, 0);
+ mlx4_close_hca(dev);
err_free_icm:
- mlx4_free_icms(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_free_icms(dev);
err_stop_fw:
+ if (!mlx4_is_slave(dev)) {
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ }
+unmap_bf:
unmap_bf_area(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
return err;
}
@@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_counters_table_free;
+ goto err_mcg_table_free;
}
- for (port = 1; port <= dev->caps.num_ports; port++) {
- enum mlx4_port_type port_type = 0;
- mlx4_SENSE_PORT(dev, port, &port_type);
- if (port_type)
- dev->caps.port_type[port] = port_type;
- ib_port_default_caps = 0;
- err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
- if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing with "
- "caps = 0\n", port, err);
- dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
- err = mlx4_check_ext_port_caps(dev, port);
- if (err)
- mlx4_warn(dev, "failed to get port %d extended "
- "port capabilities support info (%d)."
- " Assuming not supported\n", port, err);
+ if (!mlx4_is_slave(dev)) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port,
+ &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing "
+ "with caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n",
+ port, err);
- err = mlx4_SET_PORT(dev, port);
- if (err) {
- mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
- goto err_mcg_table_free;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_counters_table_free;
+ }
}
}
- mlx4_set_port_mask(dev);
return 0;
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- nreq);
+ /* In multifunction mode each function gets 2 msi-X vectors
+ * one for data path completions anf the other for asynch events
+ * or command completions */
+ if (mlx4_is_mfunc(dev)) {
+ nreq = 2;
+ } else {
+ nreq = min_t(int, dev->caps.num_eqs -
+ dev->caps.reserved_eqs, nreq);
+ }
+
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
- mlx4_init_mac_table(dev, &info->mac_table);
- mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ if (!mlx4_is_slave(dev)) {
+ INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn =
+ dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
+ }
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev))
+ info->port_attr.attr.mode = S_IRUGO;
+ else {
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.store = set_port_type;
+ }
info->port_attr.show = show_port_type;
- info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
kfree(priv->steer);
}
+static int extended_func_num(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE 0x8069c
+#define MLX4_OWNER_SIZE 4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+ u32 ret;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return -ENOMEM;
+ }
+
+ ret = readl(owner);
+ iounmap(owner);
+ return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return;
+ }
+ writel(0, owner);
+ msleep(1000);
+ iounmap(owner);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
"aborting.\n");
return err;
}
-
+ if (num_vfs > MLX4_MAX_NUM_VF) {
+ printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF);
+ return -EINVAL;
+ }
/*
- * Check for BARs. We expect 0: 1MB
+ * Check for BARs.
*/
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+ "(id == 0X%p, id->driver_data: 0x%lx,"
+ " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+ id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+ if (id && id->driver_data & MLX4_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+ dev->flags |= MLX4_FLAG_SLAVE;
+ } else {
+ /* We reset the device and enable SRIOV only for physical
+ * devices. Try to claim ownership on the device;
+ * if already taken, skip -- do not allow multiple PFs */
+ err = mlx4_get_ownership(dev);
+ if (err) {
+ if (err < 0)
+ goto err_free_dev;
+ else {
+ mlx4_warn(dev, "Multiple PFs not yet supported."
+ " Skipping PF.\n");
+ err = -EINVAL;
+ goto err_free_dev;
+ }
+ }
- /*
- * Now reset the HCA before we touch the PCI capabilities or
- * attempt a firmware command, since a boot ROM may have left
- * the HCA in an undefined state.
- */
- err = mlx4_reset(dev);
- if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
- goto err_free_dev;
+ if (num_vfs) {
+ mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ mlx4_err(dev, "Failed to enable sriov,"
+ "continuing without sriov enabled"
+ " (err = %d).\n", err);
+ num_vfs = 0;
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = num_vfs;
+ }
+ }
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_rel_own;
+ }
}
+slave_start:
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
- goto err_free_dev;
+ goto err_sriov;
+ }
+
+ /* In slave functions, the communication channel must be initialized
+ * before posting commands. Also, init num_slaves before calling
+ * mlx4_init_hca */
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_master(dev))
+ dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+ else {
+ dev->num_slaves = 0;
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init slave mfunc"
+ " interface, aborting.\n");
+ goto err_cmd;
+ }
+ }
}
err = mlx4_init_hca(dev);
- if (err)
- goto err_cmd;
+ if (err) {
+ if (err == -EACCES) {
+ /* Not primary Physical function
+ * Running in slave mode */
+ mlx4_cmd_cleanup(dev);
+ dev->flags |= MLX4_FLAG_SLAVE;
+ dev->flags &= ~MLX4_FLAG_MASTER;
+ goto slave_start;
+ } else
+ goto err_mfunc;
+ }
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init master mfunc"
+ "interface, aborting.\n");
+ goto err_close;
+ }
+ }
err = mlx4_alloc_eq_table(dev);
if (err)
- goto err_close;
+ goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0;
spin_lock_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
-
- err = mlx4_init_steering(dev);
- if (err)
+ if ((mlx4_is_mfunc(dev)) &&
+ !(dev->flags & MLX4_FLAG_MSI_X)) {
+ mlx4_err(dev, "INTx is not supported in multi-function mode."
+ " aborting.\n");
goto err_free_eq;
+ }
+
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+ }
err = mlx4_setup_hca(dev);
- if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+ !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@ err_port:
mlx4_cleanup_uar_table(dev);
err_steer:
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
+err_master_mfunc:
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
+err_mfunc:
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_cmd:
mlx4_cmd_cleanup(dev);
+err_sriov:
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+ pci_disable_sriov(pdev);
+
+err_rel_own:
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
err_free_dev:
kfree(priv);
@@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int p;
if (dev) {
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev);
+
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Disabling sriov\n");
+ pci_disable_sriov(pdev);
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
{ 0, }
};
@@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ /* Check if module param for ports type has legal combination */
+ if (port_type_array[0] == false && port_type_array[1] == true) {
+ printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ port_type_array[0] = true;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c3104..0785d9b2a26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
static const u8 zero_gid[16]; /* automatically initialized to 0 */
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+ return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+ return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
- in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
- MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
- MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
}
/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
- if (qpn == dqp->qpn)
+ if (qpn == pqp->qpn)
return 0; /* qp is already duplicated */
}
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
/* if qp is not promisc, it cannot be duplicated */
- if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ if (!get_promisc_qp(dev, 0, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
u32 members_count;
bool ret = false;
int i;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
- if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
@@ -332,7 +344,7 @@ out:
return ret;
}
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool found;
int last_index;
int err;
- u8 pf_num;
struct mlx4_priv *priv = mlx4_priv(dev);
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ if (get_promisc_qp(dev, 0, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
if (!found) {
/* Need to add the qpn to mgm */
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
@@ -439,7 +450,7 @@ out_mutex:
return err;
}
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool back_to_list = false;
int loc, i;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
goto out_list;
}
mgm = mailbox->buf;
+ memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
index += dev->caps.num_mgms;
+ new_entry = 1;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
@@ -696,9 +707,9 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ new_steering_entry(dev, port, steer, index, qp->qpn);
else
- existing_steering_entry(dev, 0, port, steer,
+ existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/* if this pq is also a promisc qp, it shouldn't be removed */
if (prot == MLX4_PROT_ETH &&
- check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm->qp[i - 1] = 0;
if (prot == MLX4_PROT_ETH)
- removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ removed_entry = can_remove_steering_entry(dev, port, steer,
+ index, qp->qpn);
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
@@ -828,6 +840,34 @@ out:
return err;
}
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 attach, u8 block_loopback,
+ enum mlx4_protocol prot)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err = 0;
+ int qpn;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EBADF;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, gid, 16);
+ qpn = qp->qpn;
+ qpn |= (prot << 28);
+ if (attach && block_loopback)
+ qpn |= (1 << 31);
+
+ err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+ MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- steer);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- }
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+ struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+ u8 port = vhcr->in_param >> 62;
+ enum mlx4_steer_type steer = vhcr->in_modifier;
+
+ /* Promiscuous unicast is not allowed in mfunc */
+ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+ return 0;
+
+ if (vhcr->op_modifier)
+ return add_promisc_qp(dev, port, steer, qpn);
+ else
+ return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_steer_type steer, u8 add, u8 port)
+{
+ return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+ MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
- return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+ return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68ffc11..a80121a2b51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 14, 2011"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "Dec, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
- MLX4_CLR_INT_SIZE = 0x00008
+ MLX4_CLR_INT_SIZE = 0x00008,
+ MLX4_SLAVE_COMM_BASE = 0x0,
+ MLX4_COMM_PAGESIZE = 0x1000
};
enum {
- MLX4_MGM_ENTRY_SIZE = 0x100,
- MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8
+ MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+ MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -80,6 +84,94 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
+enum mlx4_mr_state {
+ MLX4_MR_DISABLED = 0,
+ MLX4_MR_EN_HW,
+ MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME 10000
+enum {
+ MLX4_COMM_CMD_RESET,
+ MLX4_COMM_CMD_VHCR0,
+ MLX4_COMM_CMD_VHCR1,
+ MLX4_COMM_CMD_VHCR2,
+ MLX4_COMM_CMD_VHCR_EN,
+ MLX4_COMM_CMD_VHCR_POST,
+ MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES 10
+#define SLEEP_TIME_IN_RESET (2 * 1000)
+enum mlx4_resource {
+ RES_QP,
+ RES_CQ,
+ RES_SRQ,
+ RES_XRCD,
+ RES_MPT,
+ RES_MTT,
+ RES_MAC,
+ RES_VLAN,
+ RES_EQ,
+ RES_COUNTER,
+ MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+ RES_OP_RESERVE,
+ RES_OP_RESERVE_AND_MAP,
+ RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+ u64 in_param;
+ u64 out_param;
+ u32 in_modifier;
+ u32 errno;
+ u16 op;
+ u16 token;
+ u8 op_modifier;
+ u8 e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+ __be64 in_param;
+ __be32 in_modifier;
+ __be64 out_param;
+ __be16 token;
+ u16 reserved;
+ u8 status;
+ u8 flags;
+ __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+ u16 opcode;
+ bool has_inbox;
+ bool has_outbox;
+ bool out_is_imm;
+ bool encode_slave_id;
+ int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox);
+ int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+};
+
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do { \
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ##arg)
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
struct mlx4_bitmap {
u32 last;
u32 top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_addr;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved4;
+ __be16 wqe_counter;
+ u32 reserved5;
+ __be64 db_rec_addr;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ u8 port;
+ } __packed sw_event;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
struct mlx4_mtt mtt;
};
+struct mlx4_slave_eqe {
+ u8 type;
+ u8 port;
+ u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+ u32 eqn;
+ u16 token;
+ u64 event_type;
+};
+
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
+ u64 comm_base;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
+ u8 comm_bar;
};
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
+struct mlx4_comm {
+ u32 slave_write;
+ u32 slave_read;
+};
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+ struct list_head list;
+ u64 addr;
+};
struct mlx4_promisc_qp {
struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+ u8 comm_toggle;
+ u8 last_cmd;
+ u8 init_port_mask;
+ bool active;
+ u8 function;
+ dma_addr_t vhcr_dma;
+ u16 mtu[MLX4_MAX_PORTS + 1];
+ __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+ struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+ struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_event_eq_info event_eq;
+ u16 eq_pi;
+ u16 eq_ci;
+ spinlock_t lock;
+ /*initialized via the kzalloc*/
+ u8 is_slave_going_down;
+ u32 cookie;
+};
+
+struct slave_list {
+ struct mutex mutex;
+ struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+ spinlock_t lock;
+ /* tree for each resources */
+ struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ /* num_of_slave's lists, one per slave */
+ struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE 128
+struct mlx4_slave_event_eq {
+ u32 eqn;
+ u32 cons;
+ u32 prod;
+ struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+ int proxy_qp0_active;
+ int qp0_active;
+ int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+ int init_port_ref[MLX4_MAX_PORTS + 1];
+ u16 max_mtu[MLX4_MAX_PORTS + 1];
+ int disable_mcast_ref[MLX4_MAX_PORTS + 1];
+ struct mlx4_resource_tracker res_tracker;
+ struct workqueue_struct *comm_wq;
+ struct work_struct comm_work;
+ struct work_struct slave_event_work;
+ struct work_struct slave_flr_event_work;
+ spinlock_t slave_state_lock;
+ __be32 comm_arm_bit_vector[4];
+ struct mlx4_eqe cmd_eqe;
+ struct mlx4_slave_event_eq slave_eq;
+ struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+ struct mlx4_comm __iomem *comm;
+ struct mlx4_vhcr_cmd *vhcr;
+ dma_addr_t vhcr_dma;
+
+ struct mlx4_mfunc_master_ctx master;
};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
u16 token_mask;
u8 use_events;
u8 toggle;
+ u8 comm_toggle;
};
struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
int max;
};
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
struct mlx4_mac_entry {
u64 mac;
};
@@ -333,6 +716,7 @@ struct mlx4_priv {
struct mlx4_fw fw;
struct mlx4_cmd cmd;
+ struct mlx4_mfunc mfunc;
struct mlx4_bitmap pd_bitmap;
struct mlx4_bitmap xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ int reserved_mtts;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource resource_type,
+ int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+ int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+ struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+ *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+ *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+ return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+ return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+ return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 207b5add3ca..f2a8e65f5f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.2"
-#define DRV_RELDATE "October 2011"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Dec 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -366,16 +366,6 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
-struct mlx4_en_rss_context {
- __be32 base_qpn;
- __be32 default_qpn;
- u16 reserved;
- u8 hash_fn;
- u8 flags;
- __be32 rss_key[10];
- __be32 base_qpn_udp;
-};
-
struct mlx4_en_port_state {
int link_state;
int link_speed;
@@ -463,6 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
+ u32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -495,9 +486,9 @@ struct mlx4_en_priv {
enum mlx4_en_wol {
MLX4_EN_WOL_MAGIC = (1ULL << 61),
MLX4_EN_WOL_ENABLED = (1ULL << 62),
- MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77355e..01df5567e16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
- __be32 flags;
- __be32 qpn;
- __be32 key;
- __be32 pd_flags;
- __be64 start;
- __be64 length;
- __be32 lkey;
- __be32 win_cnt;
- u8 reserved1[3];
- u8 mtt_rep;
- __be64 mtt_seg;
- __be32 mtt_sz;
- __be32 entity_size;
- __be32 first_byte_offset;
-} __packed;
-
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->num_free);
}
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
+ int seg_order;
+ u32 offset;
- seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
- if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
- seg + (1 << order) - 1)) {
- mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ offset = seg * (1 << log_mtts_per_seg);
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
- return seg;
+ return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, order);
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
- mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
- if (mtt->first_seg == -1)
+ mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
+ u32 first_seg;
+ int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+ first_seg = offset / (1 << log_mtts_per_seg);
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, offset);
+ set_param_h(&in_param, order);
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to free mtt range at:"
+ "%d order:%d\n", offset, order);
+ return;
+ }
+ __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
if (mtt->order < 0)
return;
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
- return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+ return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
- MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
- !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ !mailbox, MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
- int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ u32 *base_mridx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u32 index;
- int err;
+ u32 mridx;
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1)
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+ if (mridx == -1)
return -ENOMEM;
+ *base_mridx = mridx;
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr)
+{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = 0;
- mr->key = hw_index_to_key(index);
+ mr->enabled = MLX4_MR_DISABLED;
+ mr->key = hw_index_to_key(mridx);
+
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ int num_entries)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ u64 out_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
+ index);
+ return;
+ }
+ __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, index);
+ return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+ index);
+ return;
+ }
+ return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ u32 index;
+ int err;
+
+ index = mlx4_mr_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+ access, npages, page_shift, mr);
if (err)
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_mr_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- if (mr->enabled) {
+ if (mr->enabled == MLX4_MR_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- }
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ mr->enabled = MLX4_MR_EN_SW;
+ }
mlx4_mtt_cleanup(dev, &mr->mtt);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ mlx4_mr_free_reserved(dev, mr);
+ if (mr->enabled)
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
- mpt_entry->mtt_seg = 0;
+ mpt_entry->mtt_addr = 0;
} else {
- mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+ &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
- mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- dev->caps.mtts_per_seg);
+ mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
-
- mr->enabled = 1;
+ mr->enabled = MLX4_MR_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -373,7 +546,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts;
dma_addr_t dma_handle;
int i;
- int s = start_index * sizeof (u64);
- /* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof (u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
- return -EINVAL;
-
- if (start_index & (dev->caps.mtts_per_seg - 1))
- return -EINVAL;
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+ start_index, &dma_handle);
- mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
- s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return 0;
}
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
{
+ int err = 0;
int chunk;
- int err;
+ int mtts_per_page;
+ int max_mtts_first_page;
- if (mtt->order < 0)
- return -EINVAL;
+ /* compute how may mtts fit in the first page */
+ mtts_per_page = PAGE_SIZE / sizeof(u64);
+ max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+ % mtts_per_page;
+
+ chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
-
npages -= chunk;
start_index += chunk;
page_list += chunk;
+
+ chunk = min_t(int, mtts_per_page, npages);
}
+ return err;
+}
- return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ __be64 *inbox = NULL;
+ int chunk;
+ int err = 0;
+ int i;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ if (mlx4_is_mfunc(dev)) {
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ while (npages > 0) {
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+ npages);
+ inbox[0] = cpu_to_be64(mtt->offset + start_index);
+ inbox[1] = 0;
+ for (i = 0; i < chunk; ++i)
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+ if (err) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ /* Nothing to do for slaves - all MR handling is forwarded
+ * to the master */
+ if (mlx4_is_slave(dev))
+ return 0;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtt_segs));
+ ilog2(dev->caps.num_mtts /
+ (1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ priv->reserved_mtts =
+ mlx4_alloc_mtt_range(dev,
+ fls(dev->caps.reserved_mtts - 1));
+ if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
+ if (mlx4_is_slave(dev))
+ return;
+ if (priv->reserved_mtts >= 0)
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
+ fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u64 mtt_seg;
+ u64 mtt_offset;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err)
return err;
- mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+ mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.first_seg,
+ fmr->mr.mtt.offset,
&fmr->dma_handle);
+
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
@@ -619,6 +852,46 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+ u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.offset,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
if (!fmr->maps)
return;
fmr->maps = 0;
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+ " failed (%d)\n", err);
+ return;
+ }
+
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(fmr->mr.key) &
+ (dev->caps.num_mpts - 1));
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err) {
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+ err);
+ return;
+ }
+ fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed259ce9..5c9a54df17a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
-
+ if (mlx4_is_mfunc(dev))
+ *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds, 0);
+ (1 << NOT_MASKED_PD_BITS) - 1,
+ dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
+ int offset;
+
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ if (mlx4_is_slave(dev))
+ offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ dev->caps.uar_page_size);
+ else
+ offset = uar->index;
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
uar->map = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars), 0);
+ dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea4927..88b52e54752 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
- __be64 *entries)
-{
- struct mlx4_cmd_mailbox *mailbox;
- u32 in_mod;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
- in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
int err;
- if (reserve) {
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- return err;
- }
- }
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- err = mlx4_qp_attach_common(dev, &qp, gid, 0,
- MLX4_PROT_ETH, MLX4_UC_STEER);
- if (err && reserve)
- mlx4_qp_release_range(dev, *qpn, 1);
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ if (err)
+ mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u8 free)
+ u64 mac, int qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
- if (free)
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if ((mac & MLX4_MAC_MASK) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
- int i, err = 0;
- int free = -1;
+ int index = 0;
+ int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (err)
- return err;
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+ (unsigned long long) mac);
+ index = mlx4_register_mac(dev, port, mac);
+ if (index < 0) {
+ err = index;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
+ return err;
+ }
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ *qpn = info->base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ if (err)
+ goto steer_err;
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err)
+ goto insert_err;
+ return 0;
+
+insert_err:
+ kfree(entry);
+
+alloc_err:
+ mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, port, mac);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_entry *entry;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+ (unsigned long long) mac);
+ mlx4_unregister_mac(dev, port, mac);
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+ " qpn %d\n", port,
+ (unsigned long long) mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
}
}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
- mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+ (unsigned long long) mac, port);
mutex_lock(&table->mutex);
- for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
- if (free < 0 && !table->refs[i]) {
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (free < 0 && !table->entries[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase references count */
- ++table->refs[i];
+ /* MAC already registered, Must not have duplicates */
+ err = -EEXIST;
goto out;
}
}
- if (free < 0) {
- err = -ENOMEM;
- goto out;
- }
-
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
/* Register new MAC */
- table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
- table->refs[free] = 0;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- *qpn = info->base_qpn + free;
+ err = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
-static int validate_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- int err = 0;
+ u64 out_param;
+ int err;
- if (index < 0 || index >= table->max || !table->entries[index]) {
- mlx4_warn(dev, "No valid Mac entry for the given index\n");
- err = -EINVAL;
- }
- return err;
-}
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
-static int find_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, u64 mac)
-{
- int i;
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
- return i;
+ return get_param_l(&out_param);
}
- /* Mac not found */
- return -EINVAL;
+ return __mlx4_register_mac(dev, port, mac);
}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
- struct mlx4_mac_entry *entry;
+ int index;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- index = find_index(dev, table, entry->mac);
- kfree(entry);
- }
- }
+ index = find_index(dev, table, mac);
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
- /* Check whether this address has reference count */
- if (!(--table->refs[index])) {
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
- }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
out:
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ return;
+ }
+ __mlx4_unregister_mac(dev, port, mac);
+ return;
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- int err;
+ int index = qpn - info->base_qpn;
+ int err = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- index = find_index(dev, table, entry->mac);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
- if (err || index < 0)
- return err;
+ mlx4_register_mac(dev, port, new_mac);
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ return err;
}
+ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) new_mac);
table->entries[index] = 0;
}
out:
@@ -312,6 +387,7 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+ int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- /* Register new MAC */
+ /* Register new VLAN */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
@@ -405,9 +482,27 @@ out:
mutex_unlock(&table->mutex);
return err;
}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *index = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_register_vlan(dev, port, vlan, index);
+}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
out:
mutex_unlock(&table->mutex);
}
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, port);
+ err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+ index);
+
+ return;
+ }
+ __mlx4_unregister_vlan(dev, port, index);
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
return err;
}
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+ u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_port_info *port_info;
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ struct mlx4_set_port_rqp_calc_context *qpn_context;
+ struct mlx4_set_port_general_context *gen_context;
+ int reset_qkey_viols;
+ int port;
+ int is_eth;
+ u32 in_modifier;
+ u32 promisc;
+ u16 mtu, prev_mtu;
+ int err;
+ int i;
+ __be32 agg_cap_mask;
+ __be32 slave_cap_mask;
+ __be32 new_cap_mask;
+
+ port = in_mod & 0xff;
+ in_modifier = in_mod >> 8;
+ is_eth = op_mod;
+ port_info = &priv->port[port];
+
+ /* Slaves cannot perform SET_PORT operations except changing MTU */
+ if (is_eth) {
+ if (slave != dev->caps.function &&
+ in_modifier != MLX4_SET_PORT_GENERAL) {
+ mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+ slave);
+ return -EINVAL;
+ }
+ switch (in_modifier) {
+ case MLX4_SET_PORT_RQP_CALC:
+ qpn_context = inbox->buf;
+ qpn_context->base_qpn =
+ cpu_to_be32(port_info->base_qpn);
+ qpn_context->n_mac = 0x7;
+ promisc = be32_to_cpu(qpn_context->promisc) >>
+ SET_PORT_PROMISC_SHIFT;
+ qpn_context->promisc = cpu_to_be32(
+ promisc << SET_PORT_PROMISC_SHIFT |
+ port_info->base_qpn);
+ promisc = be32_to_cpu(qpn_context->mcast) >>
+ SET_PORT_MC_PROMISC_SHIFT;
+ qpn_context->mcast = cpu_to_be32(
+ promisc << SET_PORT_MC_PROMISC_SHIFT |
+ port_info->base_qpn);
+ break;
+ case MLX4_SET_PORT_GENERAL:
+ gen_context = inbox->buf;
+ /* Mtu is configured as the max MTU among all the
+ * the functions on the port. */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu ==
+ master->max_mtu[port]) {
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++) {
+ master->max_mtu[port] =
+ max(master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ }
+
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ break;
+ }
+ return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ }
+
+ /* For IB, we only consider:
+ * - The capability mask, which is set to the aggregate of all
+ * slave function capabilities
+ * - The QKey violatin counter - reset according to each request.
+ */
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+ new_cap_mask = ((__be32 *) inbox->buf)[2];
+ } else {
+ reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+ new_cap_mask = ((__be32 *) inbox->buf)[1];
+ }
+
+ agg_cap_mask = 0;
+ slave_cap_mask =
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+ for (i = 0; i < dev->num_slaves; i++)
+ agg_cap_mask |=
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+ /* only clear mailbox for guests. Master may be setting
+ * MTU or PKEY table size
+ */
+ if (slave != dev->caps.function)
+ memset(inbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+ } else {
+ ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+ slave_cap_mask;
+ return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+ vhcr->op_modifier, inbox);
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = dev->caps.log_num_macs;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+ u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+ return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_dump_eth_stats(dev, slave,
+ vhcr->in_modifier, outbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647d0c7..66f91ca7a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
- profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+ profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
- dev->caps.num_mtt_segs = profile[i].num;
+ dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
- init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_entry_sz =
+ ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870cb259..6b03ac8b900 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
- mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ return qp->qpn >= dev->caps.sqp_start &&
+ qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
};
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
- if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
- return mlx4_cmd(dev, 0, qp->qpn, 2,
- MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+ ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+ if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ is_qp0(dev, qp)) {
+ port = (qp->qpn & 1) + 1;
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ }
+ return ret;
+ }
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
+ port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ context->pri_path.sched_queue = (context->pri_path.sched_queue &
+ 0xc3);
+
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
- ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+ qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
- op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+ optpar, sqd_event, qp, 0);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- int qpn;
- qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
- if (qpn == -1)
+ *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (*base == -1)
return -ENOMEM;
- *base = qpn;
return 0;
}
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cnt);
+ set_param_h(&in_param, align);
+ err = mlx4_cmd_imm(dev, in_param, &out_param,
+ RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+
+ *base = get_param_l(&out_param);
+ return 0;
+ }
+ return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- if (base_qpn < dev->caps.sqp_start + 8)
- return;
+ if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+ return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+ err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err) {
+ mlx4_warn(dev, "Failed to release qp range"
+ " base:%d cnt:%d\n", base_qpn, cnt);
+ }
+ } else
+ __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (!qpn)
- return -EINVAL;
-
- qp->qpn = qpn;
-
- err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
- spin_lock_irq(&qp_table->lock);
- err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
- spin_unlock_irq(&qp_table->lock);
- if (err)
- goto err_put_cmpt;
-
- atomic_set(&qp->refcount, 1);
- init_completion(&qp->free);
-
return 0;
-err_put_cmpt:
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
err_put_rdmarc:
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, qpn);
+ return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, qpn);
+ if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+ } else
+ __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_qp_alloc_icm(dev, qpn);
+ if (err)
+ return err;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+ (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_icm;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_icm:
+ mlx4_qp_free_icm(dev, qpn);
+ return err;
+}
+
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
+
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
- MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 00000000000..ed20751a057
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3104 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0x7fffffffffffffffULL
+#define ETH_ALEN 6
+
+struct mac_res {
+ struct list_head list;
+ u64 mac;
+ u8 port;
+};
+
+struct res_common {
+ struct list_head list;
+ u32 res_id;
+ int owner;
+ int state;
+ int from_state;
+ int to_state;
+ int removing;
+};
+
+enum {
+ RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+ struct list_head list;
+ u8 gid[16];
+ enum mlx4_protocol prot;
+};
+
+enum res_qp_states {
+ RES_QP_BUSY = RES_ANY_BUSY,
+
+ /* QP number was allocated */
+ RES_QP_RESERVED,
+
+ /* ICM memory for QP context was mapped */
+ RES_QP_MAPPED,
+
+ /* QP is in hw ownership */
+ RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+ switch (state) {
+ case RES_QP_BUSY: return "RES_QP_BUSY";
+ case RES_QP_RESERVED: return "RES_QP_RESERVED";
+ case RES_QP_MAPPED: return "RES_QP_MAPPED";
+ case RES_QP_HW: return "RES_QP_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_qp {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ struct res_srq *srq;
+ struct list_head mcg_list;
+ spinlock_t mcg_spl;
+ int local_qpn;
+};
+
+enum res_mtt_states {
+ RES_MTT_BUSY = RES_ANY_BUSY,
+ RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+ switch (state) {
+ case RES_MTT_BUSY: return "RES_MTT_BUSY";
+ case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_mtt {
+ struct res_common com;
+ int order;
+ atomic_t ref_count;
+};
+
+enum res_mpt_states {
+ RES_MPT_BUSY = RES_ANY_BUSY,
+ RES_MPT_RESERVED,
+ RES_MPT_MAPPED,
+ RES_MPT_HW,
+};
+
+struct res_mpt {
+ struct res_common com;
+ struct res_mtt *mtt;
+ int key;
+};
+
+enum res_eq_states {
+ RES_EQ_BUSY = RES_ANY_BUSY,
+ RES_EQ_RESERVED,
+ RES_EQ_HW,
+};
+
+struct res_eq {
+ struct res_common com;
+ struct res_mtt *mtt;
+};
+
+enum res_cq_states {
+ RES_CQ_BUSY = RES_ANY_BUSY,
+ RES_CQ_ALLOCATED,
+ RES_CQ_HW,
+};
+
+struct res_cq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ atomic_t ref_count;
+};
+
+enum res_srq_states {
+ RES_SRQ_BUSY = RES_ANY_BUSY,
+ RES_SRQ_ALLOCATED,
+ RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+ switch (state) {
+ case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+ case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+ case RES_SRQ_HW: return "RES_SRQ_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_srq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *cq;
+ atomic_t ref_count;
+};
+
+enum res_counter_states {
+ RES_COUNTER_BUSY = RES_ANY_BUSY,
+ RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+ switch (state) {
+ case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+ case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_counter {
+ struct res_common com;
+ int port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+ switch (rt) {
+ case RES_QP: return "RES_QP";
+ case RES_CQ: return "RES_CQ";
+ case RES_SRQ: return "RES_SRQ";
+ case RES_MPT: return "RES_MPT";
+ case RES_MTT: return "RES_MTT";
+ case RES_MAC: return "RES_MAC";
+ case RES_EQ: return "RES_EQ";
+ case RES_COUNTER: return "RES_COUNTER";
+ default: return "Unknown resource type !!!";
+ };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int t;
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ GFP_KERNEL);
+ if (!priv->mfunc.master.res_tracker.slave_list)
+ return -ENOMEM;
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+ for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+ INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+ slave_list[i].res_list[t]);
+ mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
+
+ mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+ dev->num_slaves);
+ for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+ INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+ GFP_ATOMIC|__GFP_NOWARN);
+
+ spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+ return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (priv->mfunc.master.res_tracker.slave_list) {
+ for (i = 0 ; i < dev->num_slaves; i++)
+ mlx4_delete_all_resources_for_slave(dev, i);
+
+ kfree(priv->mfunc.master.res_tracker.slave_list);
+ }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+ if (MLX4_QP_ST_UD == ts)
+ qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+ mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+ slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+ return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+ enum mlx4_resource type)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type,
+ void *res)
+{
+ struct res_common *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (!r) {
+ err = -ENONET;
+ goto exit;
+ }
+
+ if (r->state == RES_ANY_BUSY) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto exit;
+ }
+
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ ResourceType(type), r->res_id);
+
+ if (res)
+ *((struct res_common **)res) = r;
+
+exit:
+ spin_unlock_irq(mlx4_tlock(dev));
+ return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource type,
+ int res_id, int *slave)
+{
+
+ struct res_common *r;
+ int err = -ENOENT;
+ int id = res_id;
+
+ if (type == RES_QP)
+ id &= 0x7fffff;
+ spin_lock(mlx4_tlock(dev));
+
+ r = find_res(dev, id, type);
+ if (r) {
+ *slave = r->owner;
+ err = 0;
+ }
+ spin_unlock(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type)
+{
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (r)
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+ struct res_qp *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_QP_RESERVED;
+ INIT_LIST_HEAD(&ret->mcg_list);
+ spin_lock_init(&ret->mcg_spl);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+ struct res_mtt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->order = order;
+ ret->com.state = RES_MTT_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+ struct res_mpt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_MPT_RESERVED;
+ ret->key = key;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+ struct res_eq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_EQ_RESERVED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+ struct res_cq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_CQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+ struct res_srq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_SRQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+ struct res_counter *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_COUNTER_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+ int extra)
+{
+ struct res_common *ret;
+
+ switch (type) {
+ case RES_QP:
+ ret = alloc_qp_tr(id);
+ break;
+ case RES_MPT:
+ ret = alloc_mpt_tr(id, extra);
+ break;
+ case RES_MTT:
+ ret = alloc_mtt_tr(id, extra);
+ break;
+ case RES_EQ:
+ ret = alloc_eq_tr(id);
+ break;
+ case RES_CQ:
+ ret = alloc_cq_tr(id);
+ break;
+ case RES_SRQ:
+ ret = alloc_srq_tr(id);
+ break;
+ case RES_MAC:
+ printk(KERN_ERR "implementation missing\n");
+ return NULL;
+ case RES_COUNTER:
+ ret = alloc_counter_tr(id);
+ break;
+
+ default:
+ return NULL;
+ }
+ if (ret)
+ ret->owner = slave;
+
+ return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct res_common **res_arr;
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct radix_tree_root *root = &tracker->res_tree[type];
+
+ res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ if (!res_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < count; ++i) {
+ res_arr[i] = alloc_tr(base + i, type, slave, extra);
+ if (!res_arr[i]) {
+ for (--i; i >= 0; --i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = 0; i < count; ++i) {
+ if (find_res(dev, base + i, type)) {
+ err = -EEXIST;
+ goto undo;
+ }
+ err = radix_tree_insert(root, base + i, res_arr[i]);
+ if (err)
+ goto undo;
+ list_add_tail(&res_arr[i]->list,
+ &tracker->slave_list[slave].res_list[type]);
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(res_arr);
+
+ return 0;
+
+undo:
+ for (--i; i >= base; --i)
+ radix_tree_delete(&tracker->res_tree[type], i);
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ for (i = 0; i < count; ++i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+
+ return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+ if (res->com.state == RES_QP_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_QP_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+ if (res->com.state == RES_MTT_BUSY ||
+ atomic_read(&res->ref_count)) {
+ printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
+ return -EBUSY;
+ } else if (res->com.state != RES_MTT_ALLOCATED)
+ return -EPERM;
+ else if (res->order != order)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+ if (res->com.state == RES_COUNTER_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_COUNTER_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+ if (res->com.state == RES_CQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_CQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+ if (res->com.state == RES_SRQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_SRQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+ switch (type) {
+ case RES_QP:
+ return remove_qp_ok((struct res_qp *)res);
+ case RES_CQ:
+ return remove_cq_ok((struct res_cq *)res);
+ case RES_SRQ:
+ return remove_srq_ok((struct res_srq *)res);
+ case RES_MPT:
+ return remove_mpt_ok((struct res_mpt *)res);
+ case RES_MTT:
+ return remove_mtt_ok((struct res_mtt *)res, extra);
+ case RES_MAC:
+ return -ENOSYS;
+ case RES_EQ:
+ return remove_eq_ok((struct res_eq *)res);
+ case RES_COUNTER:
+ return remove_counter_ok((struct res_counter *)res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ if (!r) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto out;
+ }
+ err = remove_ok(r, type, extra);
+ if (err)
+ goto out;
+ }
+
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ radix_tree_delete(&tracker->res_tree[type], i);
+ list_del(&r->list);
+ kfree(r);
+ }
+ err = 0;
+
+out:
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+ enum res_qp_states state, struct res_qp **qp,
+ int alloc)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_qp *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_QP_BUSY:
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ __func__, r->com.res_id);
+ err = -EBUSY;
+ break;
+
+ case RES_QP_RESERVED:
+ if (r->com.state == RES_QP_MAPPED && !alloc)
+ break;
+
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ err = -EINVAL;
+ break;
+
+ case RES_QP_MAPPED:
+ if ((r->com.state == RES_QP_RESERVED && alloc) ||
+ r->com.state == RES_QP_HW)
+ break;
+ else {
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ r->com.res_id);
+ err = -EINVAL;
+ }
+
+ break;
+
+ case RES_QP_HW:
+ if (r->com.state != RES_QP_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_QP_BUSY;
+ if (qp)
+ *qp = (struct res_qp *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_mpt_states state, struct res_mpt **mpt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mpt *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_MPT_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_RESERVED:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_MAPPED:
+ if (r->com.state != RES_MPT_RESERVED &&
+ r->com.state != RES_MPT_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_HW:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_MPT_BUSY;
+ if (mpt)
+ *mpt = (struct res_mpt *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_eq_states state, struct res_eq **eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_eq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_EQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_RESERVED:
+ if (r->com.state != RES_EQ_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_HW:
+ if (r->com.state != RES_EQ_RESERVED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_EQ_BUSY;
+ if (eq)
+ *eq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+ enum res_cq_states state, struct res_cq **cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_cq *r;
+ int err;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_CQ_BUSY:
+ err = -EBUSY;
+ break;
+
+ case RES_CQ_ALLOCATED:
+ if (r->com.state != RES_CQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ break;
+
+ case RES_CQ_HW:
+ if (r->com.state != RES_CQ_ALLOCATED)
+ err = -EINVAL;
+ else
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_cq_states state, struct res_srq **srq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_srq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_SRQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_SRQ_ALLOCATED:
+ if (r->com.state != RES_SRQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ break;
+
+ case RES_SRQ_HW:
+ if (r->com.state != RES_SRQ_ALLOCATED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->to_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+ return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err;
+ int count;
+ int align;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ count = get_param_l(&in_param);
+ align = get_param_h(&in_param);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err) {
+ __mlx4_qp_release_range(dev, base, count);
+ return err;
+ }
+ set_param_l(out_param, base);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ if (valid_reserved(dev, slave, qpn)) {
+ err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ if (err)
+ return err;
+ }
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+ NULL, 1);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn)) {
+ err = __mlx4_qp_alloc_icm(dev, qpn);
+ if (err) {
+ res_abort_move(dev, slave, RES_QP, qpn);
+ return err;
+ }
+ }
+
+ res_end_move(dev, slave, RES_QP, qpn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ order = get_param_l(&in_param);
+ base = __mlx4_alloc_mtt_range(dev, order);
+ if (base == -1)
+ return -ENOMEM;
+
+ err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (err)
+ __mlx4_free_mtt_range(dev, base, order);
+ else
+ set_param_l(out_param, base);
+
+ return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = __mlx4_mr_reserve(dev);
+ if (index == -1)
+ break;
+ id = index & mpt_mask(dev);
+
+ err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+ if (err) {
+ __mlx4_mr_release(dev, index);
+ break;
+ }
+ set_param_l(out_param, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ if (err) {
+ res_abort_move(dev, slave, RES_MPT, id);
+ return err;
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ break;
+ }
+ return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err) {
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+ }
+
+ set_param_l(out_param, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err) {
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+ }
+
+ set_param_l(out_param, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct mac_res *res;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->mac = mac;
+ res->port = (u8) port;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_MAC]);
+ return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ list_del(&res->list);
+ kfree(res);
+ break;
+ }
+ }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ list_del(&res->list);
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ kfree(res);
+ }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int port;
+ u64 mac;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ port = get_param_l(out_param);
+ mac = in_param;
+
+ err = __mlx4_register_mac(dev, port, mac);
+ if (err >= 0) {
+ set_param_l(out_param, err);
+ err = 0;
+ }
+
+ if (!err) {
+ err = mac_add_to_slave(dev, slave, mac, port);
+ if (err)
+ __mlx4_unregister_mac(dev, port, mac);
+ }
+ return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_CQ:
+ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err;
+ int count;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ base = get_param_l(&in_param) & 0x7fffff;
+ count = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err)
+ break;
+ __mlx4_qp_release_range(dev, base, count);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+ NULL, 0);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ if (valid_reserved(dev, slave, qpn))
+ err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ base = get_param_l(&in_param);
+ order = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (!err)
+ __mlx4_free_mtt_range(dev, base, order);
+ return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ break;
+ index = mpt->key;
+ put_res(dev, slave, id, RES_MPT);
+
+ err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+ if (err)
+ break;
+ __mlx4_mr_release(dev, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
+ return err;
+
+ __mlx4_mr_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
+ return err;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ cqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err)
+ break;
+
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ srqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err)
+ break;
+
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int port;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ port = get_param_l(out_param);
+ mac_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_mac(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = -EINVAL;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_CQ:
+ err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ break;
+ }
+ return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+ return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+ return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+ int page_shift = (qpc->log_page_size & 0x3f) + 12;
+ int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+ int log_sq_sride = qpc->sq_size_stride & 7;
+ int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+ int log_rq_stride = qpc->rq_size_stride & 7;
+ int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+ int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+ int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ int sq_size;
+ int rq_size;
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+ total_pages =
+ roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+ page_shift);
+
+ return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+ return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+ int size, struct res_mtt *mtt)
+{
+ int res_start = mtt->com.res_id;
+ int res_size = (1 << mtt->order);
+
+ if (start < res_start || start + size > res_start + res_size)
+ return -EPERM;
+ return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_mpt *mpt;
+ int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+ int phys;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+ if (err)
+ return err;
+
+ phys = mr_phys_mpt(inbox->buf);
+ if (!phys) {
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base,
+ mr_get_mtt_size(inbox->buf), mtt);
+ if (err)
+ goto ex_put;
+
+ mpt->mtt = mtt;
+ }
+
+ if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+ err = -EPERM;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ if (!phys) {
+ atomic_inc(&mtt->ref_count);
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_put:
+ if (!phys)
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ return err;
+
+ if (mpt->com.from_state != RES_MPT_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+ put_res(dev, slave, id, RES_MPT);
+ return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_mtt *mtt;
+ struct res_qp *qp;
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+ int mtt_size = qp_get_mtt_size(qpc);
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ int rcqn = qp_get_rcqn(qpc);
+ int scqn = qp_get_scqn(qpc);
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+ struct res_srq *srq;
+ int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+ if (err)
+ return err;
+ qp->local_qpn = local_qpn;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+ if (err)
+ goto ex_put_mtt;
+
+ if (scqn != rcqn) {
+ err = get_res(dev, slave, scqn, RES_CQ, &scq);
+ if (err)
+ goto ex_put_rcq;
+ } else
+ scq = rcq;
+
+ if (use_srq) {
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ goto ex_put_scq;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_srq;
+ atomic_inc(&mtt->ref_count);
+ qp->mtt = mtt;
+ atomic_inc(&rcq->ref_count);
+ qp->rcq = rcq;
+ atomic_inc(&scq->ref_count);
+ qp->scq = scq;
+
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+
+ if (use_srq) {
+ atomic_inc(&srq->ref_count);
+ put_res(dev, slave, srqn, RES_SRQ);
+ qp->srq = srq;
+ }
+ put_res(dev, slave, rcqn, RES_CQ);
+ put_res(dev, slave, mtt_base, RES_MTT);
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ return 0;
+
+ex_put_srq:
+ if (use_srq)
+ put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+ put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+ put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+ return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+ int log_eq_size = eqc->log_eq_size & 0x1f;
+ int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+ if (log_eq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+ return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+ int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+ int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+ if (log_cq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int eqn = vhcr->in_modifier;
+ int res_id = (slave << 8) | eqn;
+ struct mlx4_eq_context *eqc = inbox->buf;
+ int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+ int mtt_size = eq_get_mtt_size(eqc);
+ struct res_eq *eq;
+ struct res_mtt *mtt;
+
+ err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ if (err)
+ return err;
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+ if (err)
+ goto out_add;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto out_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+
+ atomic_inc(&mtt->ref_count);
+ eq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+ int len, struct res_mtt **res)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mtt *mtt;
+ int err = -EINVAL;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+ com.list) {
+ if (!check_mtt_range(dev, slave, start, len, mtt)) {
+ *res = mtt;
+ mtt->com.from_state = mtt->com.state;
+ mtt->com.state = RES_MTT_BUSY;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_mtt mtt;
+ __be64 *page_list = inbox->buf;
+ u64 *pg_list = (u64 *)page_list;
+ int i;
+ struct res_mtt *rmtt = NULL;
+ int start = be64_to_cpu(page_list[0]);
+ int npages = vhcr->in_modifier;
+ int err;
+
+ err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+ if (err)
+ return err;
+
+ /* Call the SW implementation of write_mtt:
+ * - Prepare a dummy mtt struct
+ * - Translate inbox contents to simple addresses in host endianess */
+ mtt.offset = 0; /* TBD this is broken but I don't handle it since
+ we don't really use it */
+ mtt.order = 0;
+ mtt.page_shift = 0;
+ for (i = 0; i < npages; ++i)
+ pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+ err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+ ((u64 *)page_list + 2));
+
+ if (rmtt)
+ put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+ return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+ if (err)
+ return err;
+
+ err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+ if (err)
+ goto ex_abort;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ atomic_dec(&eq->mtt->ref_count);
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+ return 0;
+
+ex_put:
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+
+ return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_modifier = 0;
+ int err;
+ int res_id;
+ struct res_eq *req;
+
+ if (!priv->mfunc.master.slave_state)
+ return -EINVAL;
+
+ event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+ /* Create the event only if the slave is registered */
+ if ((event_eq->event_type & (1 << eqe->type)) == 0)
+ return 0;
+
+ mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ res_id = (slave << 8) | event_eq->eqn;
+ err = get_res(dev, slave, res_id, RES_EQ, &req);
+ if (err)
+ goto unlock;
+
+ if (req->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto put;
+ }
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto put;
+ }
+
+ if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+ ++event_eq->token;
+ eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+ }
+
+ memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+ err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ put_res(dev, slave, res_id, RES_EQ);
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+
+put:
+ put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = get_res(dev, slave, res_id, RES_EQ, &eq);
+ if (err)
+ return err;
+
+ if (eq->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+ put_res(dev, slave, res_id, RES_EQ);
+ return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+ struct res_cq *cq;
+ struct res_mtt *mtt;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto out_put;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_move;
+ atomic_dec(&cq->mtt->ref_count);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd,
+ struct res_cq *cq)
+{
+ int err;
+ struct res_mtt *orig_mtt;
+ struct res_mtt *mtt;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+ err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+ if (err)
+ return err;
+
+ if (orig_mtt != cq->mtt) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_put;
+
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto ex_put1;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put1;
+ atomic_dec(&orig_mtt->ref_count);
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ return 0;
+
+ex_put1:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+ return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ if (vhcr->op_modifier == 0) {
+ err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+ if (err)
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+ int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+ int log_rq_stride = srqc->logstride & 7;
+ int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+ if (log_srq_size + log_rq_stride + 4 < page_shift)
+ return 1;
+
+ return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_srq *srq;
+ struct mlx4_srq_context *srqc = inbox->buf;
+ int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+ if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+ return -EINVAL;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+ err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+ mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_mtt;
+
+ atomic_inc(&mtt->ref_count);
+ srq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+ return 0;
+
+ex_put_mtt:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = get_res(dev, slave, qpn, RES_QP, &qp);
+ if (err)
+ return err;
+ if (qp->com.from_state != RES_QP_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+ update_ud_gid(dev, qpc, (u8)slave);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ atomic_dec(&qp->mtt->ref_count);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ res_end_move(dev, slave, RES_QP, qpn);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+ struct res_qp *rqp, u8 *gid)
+{
+ struct res_gid *res;
+
+ list_for_each_entry(res, &rqp->mcg_list, list) {
+ if (!memcmp(res->gid, gid, 16))
+ return res;
+ }
+ return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ if (find_gid(dev, slave, rqp, gid)) {
+ kfree(res);
+ err = -EEXIST;
+ } else {
+ memcpy(res->gid, gid, 16);
+ res->prot = prot;
+ list_add_tail(&res->list, &rqp->mcg_list);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ res = find_gid(dev, slave, rqp, gid);
+ if (!res || res->prot != prot)
+ err = -EINVAL;
+ else {
+ list_del(&res->list);
+ kfree(res);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+ u8 *gid = inbox->buf;
+ enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+ int err, err1;
+ int qpn;
+ struct res_qp *rqp;
+ int attach = vhcr->op_modifier;
+ int block_loopback = vhcr->in_modifier >> 31;
+ u8 steer_type_mask = 2;
+ enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+ qpn = vhcr->in_modifier & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err)
+ return err;
+
+ qp.qpn = qpn;
+ if (attach) {
+ err = add_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid,
+ block_loopback, prot, type);
+ if (err)
+ goto ex_rem;
+ } else {
+ err = rem_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+ err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+ }
+
+ put_res(dev, slave, qpn, RES_QP);
+ return 0;
+
+ex_rem:
+ /* ignore error return below, already in error */
+ err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+ put_res(dev, slave, qpn, RES_QP);
+
+ return err;
+}
+
+enum {
+ BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier & 0xffff;
+
+ err = get_res(dev, slave, index, RES_COUNTER, NULL);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ put_res(dev, slave, index, RES_COUNTER);
+ return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+ struct res_gid *rgid;
+ struct res_gid *tmp;
+ int err;
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+ qp.qpn = rqp->local_qpn;
+ err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+ MLX4_MC_STEER);
+ list_del(&rgid->list);
+ kfree(rgid);
+ }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int print)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+ struct res_common *r;
+ struct res_common *tmp;
+ int busy;
+
+ busy = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(r, tmp, rlist, list) {
+ if (r->owner == slave) {
+ if (!r->removing) {
+ if (r->state == RES_ANY_BUSY) {
+ if (print)
+ mlx4_dbg(dev,
+ "%s id 0x%x is busy\n",
+ ResourceType(type),
+ r->res_id);
+ ++busy;
+ } else {
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ r->removing = 1;
+ }
+ }
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type)
+{
+ unsigned long begin;
+ int busy;
+
+ begin = jiffies;
+ do {
+ busy = _move_all_busy(dev, slave, type, 0);
+ if (time_after(jiffies, begin + 5 * HZ))
+ break;
+ if (busy)
+ cond_resched();
+ } while (busy);
+
+ if (busy)
+ busy = _move_all_busy(dev, slave, type, 1);
+
+ return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *qp_list =
+ &tracker->slave_list[slave].res_list[RES_QP];
+ struct res_qp *qp;
+ struct res_qp *tmp;
+ int state;
+ u64 in_param;
+ int qpn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_QP);
+ if (err)
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+ "for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (qp->com.owner == slave) {
+ qpn = qp->com.res_id;
+ detach_qp(dev, slave, qp);
+ state = qp->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_QP_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_QP],
+ qp->com.res_id);
+ list_del(&qp->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(qp);
+ state = 0;
+ break;
+ case RES_QP_MAPPED:
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+ state = RES_QP_RESERVED;
+ break;
+ case RES_QP_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param,
+ qp->local_qpn, 2,
+ MLX4_CMD_2RST_QP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_qps: failed"
+ " to move slave %d qpn %d to"
+ " reset\n", slave,
+ qp->local_qpn);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ atomic_dec(&qp->mtt->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ state = RES_QP_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *srq_list =
+ &tracker->slave_list[slave].res_list[RES_SRQ];
+ struct res_srq *srq;
+ struct res_srq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int srqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_SRQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (srq->com.owner == slave) {
+ srqn = srq->com.res_id;
+ state = srq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_SRQ_ALLOCATED:
+ __mlx4_srq_free_icm(dev, srqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_SRQ],
+ srqn);
+ list_del(&srq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(srq);
+ state = 0;
+ break;
+
+ case RES_SRQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, srqn, 1,
+ MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_srqs: failed"
+ " to move slave %d srq %d to"
+ " SW ownership\n",
+ slave, srqn);
+
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ state = RES_SRQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *cq_list =
+ &tracker->slave_list[slave].res_list[RES_CQ];
+ struct res_cq *cq;
+ struct res_cq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int cqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_CQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+ cqn = cq->com.res_id;
+ state = cq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_CQ_ALLOCATED:
+ __mlx4_cq_free_icm(dev, cqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_CQ],
+ cqn);
+ list_del(&cq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(cq);
+ state = 0;
+ break;
+
+ case RES_CQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, cqn, 1,
+ MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_cqs: failed"
+ " to move slave %d cq %d to"
+ " SW ownership\n",
+ slave, cqn);
+ atomic_dec(&cq->mtt->ref_count);
+ state = RES_CQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mpt_list =
+ &tracker->slave_list[slave].res_list[RES_MPT];
+ struct res_mpt *mpt;
+ struct res_mpt *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int mptn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MPT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mpt->com.owner == slave) {
+ mptn = mpt->com.res_id;
+ state = mpt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MPT_RESERVED:
+ __mlx4_mr_release(dev, mpt->key);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MPT],
+ mptn);
+ list_del(&mpt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mpt);
+ state = 0;
+ break;
+
+ case RES_MPT_MAPPED:
+ __mlx4_mr_free_icm(dev, mpt->key);
+ state = RES_MPT_RESERVED;
+ break;
+
+ case RES_MPT_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, mptn, 0,
+ MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_mrs: failed"
+ " to move slave %d mpt %d to"
+ " SW ownership\n",
+ slave, mptn);
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+ state = RES_MPT_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *mtt_list =
+ &tracker->slave_list[slave].res_list[RES_MTT];
+ struct res_mtt *mtt;
+ struct res_mtt *tmp;
+ int state;
+ LIST_HEAD(tlist);
+ int base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MTT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mtt->com.owner == slave) {
+ base = mtt->com.res_id;
+ state = mtt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MTT_ALLOCATED:
+ __mlx4_free_mtt_range(dev, base,
+ mtt->order);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MTT],
+ base);
+ list_del(&mtt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mtt);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *eq_list =
+ &tracker->slave_list[slave].res_list[RES_EQ];
+ struct res_eq *eq;
+ struct res_eq *tmp;
+ int err;
+ int state;
+ LIST_HEAD(tlist);
+ int eqn;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ err = move_all_busy(dev, slave, RES_EQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (eq->com.owner == slave) {
+ eqn = eq->com.res_id;
+ state = eq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_EQ_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_EQ],
+ eqn);
+ list_del(&eq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(eq);
+ state = 0;
+ break;
+
+ case RES_EQ_HW:
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ cond_resched();
+ continue;
+ }
+ err = mlx4_cmd_box(dev, slave, 0,
+ eqn & 0xff, 0,
+ MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_dbg(dev, "rem_slave_eqs: failed"
+ " to move slave %d eqs %d to"
+ " SW ownership\n", slave, eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err) {
+ atomic_dec(&eq->mtt->ref_count);
+ state = RES_EQ_RESERVED;
+ }
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+ /*VLAN*/
+ rem_slave_macs(dev, slave);
+ rem_slave_qps(dev, slave);
+ rem_slave_srqs(dev, slave);
+ rem_slave_cqs(dev, slave);
+ rem_slave_mrs(dev, slave);
+ rem_slave_eqs(dev, slave);
+ rem_slave_mtts(dev, slave);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7411d..80249829352 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
- MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fce014..2823fffc638 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -38,26 +40,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_srq_context {
- __be32 state_logsize_srqn;
- u8 logstride;
- u8 reserved1;
- __be16 xrcd;
- __be32 pg_offset_cqn;
- u32 reserved2;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 pd;
- __be16 limit_watermark;
- __be16 wqe_cnt;
- u16 reserved4;
- __be16 wqe_counter;
- u32 reserved5;
- __be64 db_rec_addr;
-};
-
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
- return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+ MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
- struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_srq_context *srq_context;
- u64 mtt_addr;
int err;
- srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
- if (srq->srqn == -1)
+
+ *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *srqn = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+ mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+ mlx4_table_put(dev, &srq_table->table, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, srqn);
+ if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+ return;
+ }
+ __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+ if (err)
+ return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+ mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
complete(&srq->free);
wait_for_completion(&srq->free);
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+ mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d10c2e15f4e..1ea811cf515 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,6 +42,8 @@ config KS8851
select NET_CORE
select MII
select CRC32
+ select MISC_DEVICES
+ select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae057e3b..75ec87a822b 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
.remove = ks8842_remove,
};
-static int __init ks8842_init(void)
-{
- return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
- platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a28fc..6b35e7da9a9 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
#include <linux/spi/spi.h>
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
struct spi_message spi_msg2;
struct spi_transfer spi_xfer1;
struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
};
static int msg_enable;
@@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
}
/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
* ks8851_write_mac_addr - write mac address to device registers
* @dev: The network device
*
@@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev)
mutex_lock(&ks->lock);
+ /*
+ * Wake up chip in case it was powered off when stopped; otherwise,
+ * the first write to the MAC address does not take effect.
+ */
+ ks8851_set_powermode(ks, PMECR_PM_NORMAL);
for (i = 0; i < ETH_ALEN; i++)
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+ if (!netif_running(dev))
+ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
mutex_unlock(&ks->lock);
@@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev)
}
/**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int i;
+
+ mutex_lock(&ks->lock);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+ mutex_unlock(&ks->lock);
+}
+
+/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
* to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ /* first, try reading what we've got already */
+ if (ks->rc_ccr & CCR_EEPROM) {
+ ks8851_read_mac_addr(dev);
+ if (is_valid_ether_addr(dev->dev_addr))
+ return;
+
+ netdev_err(ks->netdev, "invalid mac address read %pM\n",
+ dev->dev_addr);
+ }
+
random_ether_addr(dev->dev_addr);
ks8851_write_mac_addr(dev);
}
@@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work)
}
/**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- pmecr = ks8851_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
* ks8851_net_open - open network device
* @dev: The network device being opened.
*
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
- EEPROM_CONTROL,
- EEPROM_ADDRESS,
- EEPROM_DATA,
- EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- * - on period start: set clock high and read value on bus
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int ctrl = EEPROM_OP_READ;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int data = 0;
- int dummy;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((ctrl >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- bit_count--;
- break;
- case EEPROM_DATA:
- /* Change to receive mode */
- eepcr &= ~EEPCR_EESRWA;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* waitread period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- mutex_lock(&ks->lock);
- eepcr |= EEPCR_EESCK;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* Manage read */
- switch (state) {
- case EEPROM_ADDRESS:
- if (bit_count < 0) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- }
- break;
- case EEPROM_DATA:
- mutex_lock(&ks->lock);
- dummy = ks8851_rdreg16(ks, KS_EEPCR);
- mutex_unlock(&ks->lock);
- data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- * - on period start: set clock high
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
- unsigned int addr, unsigned int data)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- switch (op) {
- case EEPROM_OP_EWEN:
- addr = 0x30;
- break;
- case EEPROM_OP_EWDS:
- addr = 0;
- break;
- }
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((op >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- if (op == EEPROM_OP_WRITE) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- } else {
- state = EEPROM_COMPLETE;
- }
- }
- break;
- case EEPROM_DATA:
- eepcr |= ((data >> bit_count) & 1) << 2;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- eepcr |= EEPCR_EESCK;
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
-}
-
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- return ks->eeprom_size;
-}
+/* EEPROM support */
-static int ks8851_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
{
- struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
+ struct ks8851_net *ks = ee->data;
+ unsigned val;
- if (eeprom->len > ks->eeprom_size)
- return -EINVAL;
+ val = ks8851_rdreg16(ks, KS_EEPCR);
- eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+ ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+ ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+ ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+ struct ks8851_net *ks = ee->data;
+ unsigned val = EEPCR_EESA; /* default - eeprom access on */
+
+ if (ee->drive_data)
+ val |= EEPCR_EESRWA;
+ if (ee->reg_data_in)
+ val |= EEPCR_EEDO;
+ if (ee->reg_data_clock)
+ val |= EEPCR_EESCK;
+ if (ee->reg_chip_select)
+ val |= EEPCR_EECS;
+
+ ks8851_wrreg16(ks, KS_EEPCR, val);
+}
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+ if (!(ks->rc_ccr & CCR_EEPROM))
+ return -ENOENT;
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+ mutex_lock(&ks->lock);
- /* Device's eeprom is little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ /* start with clock low, cs high */
+ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+ return 0;
+}
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+ unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
- return ret_val;
+ ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+ mutex_unlock(&ks->lock);
}
+#define KS_EEPROM_MAGIC (0x00008851)
+
static int ks8851_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *ee, u8 *data)
{
struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- void *ptr;
- int max_len;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->len > ks->eeprom_size)
+ int offset = ee->offset;
+ int len = ee->len;
+ u16 tmp;
+
+ /* currently only support byte writing */
+ if (len != 1)
return -EINVAL;
- if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
- return -EFAULT;
+ if (ee->magic != KS_EEPROM_MAGIC)
+ return -EINVAL;
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- max_len = (last_word - first_word + 1) * 2;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
- ptr = (void *)eeprom_buff;
+ eeprom_93cx6_wren(&ks->eeprom, true);
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
- ptr++;
+ /* ethtool currently only supports writing bytes, which means
+ * we have to read/modify/write our 16bit EEPROMs */
+
+ eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+ if (offset & 1) {
+ tmp &= 0xff;
+ tmp |= *data << 8;
+ } else {
+ tmp &= 0xff00;
+ tmp |= *data;
}
- if ((eeprom->offset + eeprom->len) & 1)
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word] =
- ks8851_eeprom_read(dev, last_word);
+ eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+ eeprom_93cx6_wren(&ks->eeprom, false);
- /* Device's eeprom is little-endian, word addressable */
- le16_to_cpus(&eeprom_buff[0]);
- le16_to_cpus(&eeprom_buff[last_word - first_word]);
+ ks8851_eeprom_release(ks);
- memcpy(ptr, bytes, eeprom->len);
+ return 0;
+}
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int offset = ee->offset;
+ int len = ee->len;
- ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+ /* must be 2 byte aligned */
+ if (len & 1 || offset & 1)
+ return -EINVAL;
- for (i = 0; i < last_word - first_word + 1; i++) {
- ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
- eeprom_buff[i]);
- mdelay(EEPROM_WRITE_TIME);
- }
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
+
+ ee->magic = KS_EEPROM_MAGIC;
- ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+ eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+ ks8851_eeprom_release(ks);
- kfree(eeprom_buff);
- return ret_val;
+ return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ /* currently, we assume it is an 93C46 attached, so return 128 */
+ return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
}
static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+ /* setup EEPROM state */
+
+ ks->eeprom.data = ks;
+ ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+ ks->eeprom.register_read = ks8851_eeprom_regread;
+ ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
/* setup mii state */
ks->mii.dev = ndev;
ks->mii.phy_id = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ ndev->dev_addr, ndev->irq,
+ ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e593..b0fae86aaca 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -16,7 +16,7 @@
#define CCR_32PIN (1 << 0)
/* MAC address registers */
-#define KS_MAR(_m) 0x15 - (_m)
+#define KS_MAR(_m) (0x15 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -27,22 +27,11 @@
#define KS_EEPCR 0x22
#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB_OFFSET 3
-#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB (1 << 3)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
-#define EEPROM_OP_LEN 3 /* bits:*/
-#define EEPROM_OP_READ 0x06
-#define EEPROM_OP_EWEN 0x04
-#define EEPROM_OP_WRITE 0x05
-#define EEPROM_OP_EWDS 0x14
-
-#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD 400 /* in us */
-
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849059d..e58e78e5c93 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->all_mcast = 0;
ks->mcast_lst_size = 0;
- ks->frame_head_info = (struct type_frame_head *) \
- kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
pr_err("Error: Fail to allocate frame memory\n");
return false;
@@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = {
.remove = __devexit_p(ks8851_remove),
};
-static int __init ks8851_init(void)
-{
- return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
- platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
MODULE_DESCRIPTION("KS8851 MLL Network driver");
MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7ece990381c..6ed09a85f03 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -743,8 +743,7 @@
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
-#define MAC_ADDR_LEN 6
-#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
#define ETHERNET_HEADER_SIZE 14
@@ -1043,7 +1042,7 @@ enum {
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
- u8 mac_addr[MAC_ADDR_LEN];
+ u8 mac_addr[ETH_ALEN];
u16 vid;
u8 fid;
u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
- u8 br_addr[MAC_ADDR_LEN];
- u8 other_addr[MAC_ADDR_LEN];
+ u8 br_addr[ETH_ALEN];
+ u8 other_addr[ETH_ALEN];
u8 broad_per;
u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
int tx_int_mask;
int tx_size;
- u8 perm_addr[MAC_ADDR_LEN];
- u8 override_addr[MAC_ADDR_LEN];
- u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 override_addr[ETH_ALEN];
+ u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
- u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+ hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
}
/**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
- hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+ hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
}
/**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
- memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
if (empty_addr(hw->override_addr)) {
- memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ ETH_ALEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
- memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ memcpy(hw->address[j], mac_addr, ETH_ALEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
- memset(hw->address[i], 0, MAC_ADDR_LEN);
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
- desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
- GFP_KERNEL);
+ desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
if (!desc_info->ring)
return 1;
- memset((void *) desc_info->ring, 0,
- sizeof(struct ksz_desc) * desc_info->alloc);
hw_init_desc(desc_info, transmit);
return 0;
}
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
- memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(hw_priv->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ sizeof(info->bus_info));
}
/**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
@@ -6609,7 +6607,7 @@ static int netdev_set_features(struct net_device *dev, u32 features)
return 0;
}
-static struct ethtool_ops netdev_ethtool_ops = {
+static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
int num;
i = j = num = got_num = 0;
- while (j < MAC_ADDR_LEN) {
+ while (j < ETH_ALEN) {
if (macaddr[i]) {
int digit;
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
}
i++;
}
- if (MAC_ADDR_LEN == j) {
+ if (ETH_ALEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
- memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- MAC_ADDR_LEN);
+ ETH_ALEN);
else {
- memcpy(dev->dev_addr, sw->other_addr,
- MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
if (!memcmp(sw->other_addr, hw->override_addr,
- MAC_ADDR_LEN))
+ ETH_ALEN))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 0778edcf7b9..20b72ecb020 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
* access to avoid theoretical race condition with functions that
* change NETIF_F_LRO flag at runtime.
*/
- bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+ bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150e4d6..b7fc26c4f73 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_DCA_OFFSET = 56,
/* offset of dca control for WDMAs */
- /* VMWare NetQueue commands */
+ /* VMware NetQueue commands */
MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
MXGEFW_CMD_NETQ_ADD_FILTER = 58,
/* data0 = filter_id << 16 | queue << 8 | type */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index fc7c6a932ad..5b89fd377ae 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
},
};
-static int __init jazz_sonic_init_module(void)
-{
- return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a2eacbfb425..f1b85561c65 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -142,8 +142,7 @@ static int macsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
@@ -154,8 +153,8 @@ static int macsonic_open(struct net_device* dev)
* rupt as well, which must prevent re-entrance of the sonic handler.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
- IRQ_FLG_FAST, "sonic", dev);
+ retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, IRQ_NUBUS_9);
@@ -643,15 +642,4 @@ static struct platform_driver mac_sonic_driver = {
},
};
-static int __init mac_sonic_init_module(void)
-{
- return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 6ca047aab79..ac7b16b6e7a 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
- strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2b8f64ddfb5..c24b46cbfe2 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strcpy(info->driver, "ns83820");
- strcpy(info->version, VERSION);
- strcpy(info->bus_info, pci_name(dev->pci_dev));
+ strlcpy(info->driver, "ns83820", sizeof(info->driver));
+ strlcpy(info->version, VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ccf61b9da8d..e01c0a07a93 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
},
};
-static int __init xtsonic_init(void)
-{
- return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
- platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c27fb3dda9f..97f63e12d86 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strncpy(info->version, s2io_driver_version, sizeof(info->version));
- strncpy(info->fw_version, "", sizeof(info->fw_version));
- strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
}
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
}
}
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
{
struct s2io_nic *sp = netdev_priv(dev);
- u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+ netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
int rc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a83197d757c..ef76725454d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
/* Enabling RTH requires some of the logic in vxge_device_register and a
* vpath reset. Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
{
struct vxgedev *vdev = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXHASH))
return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
*
* Add the vlan id to the devices vlan id table
*/
-static void
+static int
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
vxge_hw_vpath_vid_add(vpath->handle, vid);
}
set_bit(vid, vdev->active_vlans);
+ return 0;
}
/**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
*
* Remove the vlan id from the device's vlan id table
*/
-static void
+static int
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
clear_bit(vid, vdev->active_vlans);
+ return 0;
}
static const struct net_device_ops vxge_netdev_ops = {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index f1bfb8f8fcf..b75a0497d58 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
},
};
-static int __init w90p910_ether_init(void)
-{
- return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
- platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1c61d36e657..4c4e7f45838 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -65,7 +65,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
-#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -736,6 +737,16 @@ struct nv_skb_map {
* - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs netdev_priv(dev)->lock :-(
* - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ * integer wraparound in the NIC stats registers, at low frequency
+ * (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
*/
/* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
struct net_device *dev;
struct napi_struct napi;
- /* General data:
- * Locking: spin_lock(&np->lock); */
+ /* hardware stats are updated in syscall and timer */
+ spinlock_t hwstats_lock;
struct nv_ethtool_stats estats;
+
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
u32 nic_poll_irq;
int rx_ring_size;
+ /* RX software stats */
+ struct u64_stats_sync swstats_rx_syncp;
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
@@ -820,6 +839,12 @@ struct fe_priv {
struct nv_skb_map *tx_end_flip;
int tx_stop;
+ /* TX software stats */
+ struct u64_stats_sync swstats_tx_syncp;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@ enum {
static int dma_64bit = NV_DMA_64BIT_ENABLED;
/*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
* Crossover Detection
* Realtek 8201 phy + some OEM boards do not work properly.
*/
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
pci_push(base);
}
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ /* If it happens that this is run in top-half context, then
+ * replace the spin_lock of hwstats_lock with
+ * spin_lock_irqsave() in calling functions. */
+ WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+ assert_spin_locked(&np->hwstats_lock);
+
+ /* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,40 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
}
/*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
* Called with read_lock(&dev_base_lock) held for read -
* only synchronized against unregister_netdevice.
*/
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
+ unsigned int syncp_start;
+
+ /*
+ * Note: because HW stats are not always available and for
+ * consistency reasons, the following ifconfig stats are
+ * managed by software: rx_bytes, tx_bytes, rx_packets and
+ * tx_packets. The related hardware stats reported by ethtool
+ * should be equivalent to these ifconfig stats, with 4
+ * additional bytes per packet (Ethernet FCS CRC), except for
+ * tx_packets when TSO kicks in.
+ */
+
+ /* software stats */
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ storage->rx_packets = np->stat_rx_packets;
+ storage->rx_bytes = np->stat_rx_bytes;
+ storage->rx_dropped = np->stat_rx_dropped;
+ storage->rx_missed_errors = np->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ storage->tx_packets = np->stat_tx_packets;
+ storage->tx_bytes = np->stat_tx_bytes;
+ storage->tx_dropped = np->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
- nv_get_hw_stats(dev);
+ if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+ spin_lock_bh(&np->hwstats_lock);
- /*
- * Note: because HW stats are not always available and
- * for consistency reasons, the following ifconfig
- * stats are managed by software: rx_bytes, tx_bytes,
- * rx_packets and tx_packets. The related hardware
- * stats reported by ethtool should be equivalent to
- * these ifconfig stats, with 4 additional bytes per
- * packet (Ethernet FCS CRC).
- */
+ nv_update_stats(dev);
+
+ /* generic stats */
+ storage->rx_errors = np->estats.rx_errors_total;
+ storage->tx_errors = np->estats.tx_errors_total;
- /* copy to net_device stats */
- dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
- dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
- dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
- dev->stats.rx_over_errors = np->estats.rx_over_errors;
- dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
- dev->stats.rx_errors = np->estats.rx_errors_total;
- dev->stats.tx_errors = np->estats.tx_errors_total;
+ /* meaningful only when NIC supports stats v3 */
+ storage->multicast = np->estats.rx_multicast;
+
+ /* detailed rx_errors */
+ storage->rx_length_errors = np->estats.rx_length_error;
+ storage->rx_over_errors = np->estats.rx_over_errors;
+ storage->rx_crc_errors = np->estats.rx_crc_errors;
+ storage->rx_frame_errors = np->estats.rx_frame_align_error;
+ storage->rx_fifo_errors = np->estats.rx_drop_frame;
+
+ /* detailed tx_errors */
+ storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+ storage->tx_fifo_errors = np->estats.tx_fifo_errors;
+
+ spin_unlock_bh(&np->hwstats_lock);
}
- return &dev->stats;
+ return storage;
}
/*
@@ -1759,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1791,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1849,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+ netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
@@ -1927,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(np, &np->tx_skb[i]))
- dev->stats.tx_dropped++;
+ if (nv_release_txskb(np, &np->tx_skb[i])) {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ }
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
@@ -2194,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.orig = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2338,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.ex = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2375,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc *orig_get_tx = np->get_tx.orig;
+ unsigned int bytes_compl = 0;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2385,12 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (flags & NV_TX_ERROR) {
- if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+ if ((flags & NV_TX_RETRYERROR)
+ && !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2398,12 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
} else {
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2414,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2427,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+ unsigned long bytes_cleaned = 0;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2436,17 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
else
nv_legacybackoff_reseed(dev);
}
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_cleaned += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2454,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (np->tx_limit)
nv_tx_flip_ownership(dev);
}
+
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2477,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
- int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+ netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
- netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
- netdev_info(dev, "Dumping tx registers\n");
- for (i = 0; i <= np->register_size; i += 32) {
- netdev_info(dev,
- "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- netdev_info(dev, "Dumping tx ring\n");
- for (i = 0; i < np->tx_ring_size; i += 4) {
- if (!nv_optimized(np)) {
- netdev_info(dev,
- "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
+ if (unlikely(debug_tx_timeout)) {
+ int i;
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
netdev_info(dev,
- "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ "%3x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x "
+ "// %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ }
}
}
@@ -2649,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME)
- dev->stats.rx_missed_errors++;
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_missed_errors++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2693,8 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->first_rx.orig;
@@ -2777,8 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
__vlan_hwaccel_put_tag(skb, vid);
}
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
}
@@ -3021,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
}
}
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyreg, txreg;
+ int mii_status;
+
+ np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+ np->duplex = duplex;
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ phyreg = readl(base + NvRegSlotTime);
+ phyreg &= ~(0x3FF00);
+ if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_SLOTTIME_1000_FULL;
+ writel(phyreg, base + NvRegSlotTime);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ return;
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -3042,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
+ u32 bmcr;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
+ /* If device loopback is enabled, set carrier on and enable max link
+ * speed.
+ */
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (bmcr & BMCR_LOOPBACK) {
+ if (netif_running(dev)) {
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+ return 1;
+ }
+
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
*/
@@ -3729,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
+ netdev_info(dev, "MSI-X enabled\n");
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3750,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ netdev_info(dev, "MSI enabled\n");
}
}
if (ret != 0) {
@@ -3904,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
#endif
static void nv_do_stats_poll(unsigned long data)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
- nv_get_hw_stats(dev);
+ /* If lock is currently taken, the stats are being refreshed
+ * and hence fresh enough */
+ if (spin_trylock(&np->hwstats_lock)) {
+ nv_update_stats(dev);
+ spin_unlock(&np->hwstats_lock);
+ }
if (!np->in_shutdown)
mod_timer(&np->stats_poll,
@@ -3918,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4473,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
return 0;
}
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned long flags;
+ u32 miicontrol;
+ int err, retval = 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (features & NETIF_F_LOOPBACK) {
+ if (miicontrol & BMCR_LOOPBACK) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already enabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn on loopback mode */
+ miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+ if (err) {
+ retval = PHY_ERROR;
+ spin_unlock_irqrestore(&np->lock, flags);
+ phy_init(dev);
+ } else {
+ if (netif_running(dev)) {
+ /* Force 1000 Mbps full-duplex */
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+ 1);
+ /* Force link up */
+ netif_carrier_on(dev);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev,
+ "Internal PHY loopback mode enabled.\n");
+ }
+ } else {
+ if (!(miicontrol & BMCR_LOOPBACK)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already disabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn off loopback */
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+ phy_init(dev);
+ }
+ msleep(500);
+ spin_lock_irqsave(&np->lock, flags);
+ nv_enable_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4482,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
return features;
}
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -4503,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
spin_unlock_irq(&np->lock);
}
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
+ int retval;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+ retval = nv_set_loopback(dev, features);
+ if (retval != 0)
+ return retval;
+ }
if (changed & NETIF_F_RXCSUM) {
spin_lock_irq(&np->lock);
@@ -4553,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
}
}
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *buffer)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- /* update stats */
- nv_get_hw_stats(dev);
-
- memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_lock_bh(&np->hwstats_lock);
+ nv_update_stats(dev);
+ memcpy(buffer, &np->estats,
+ nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_unlock_bh(&np->hwstats_lock);
}
static int nv_link_test(struct net_device *dev)
@@ -5142,6 +5424,12 @@ static int nv_open(struct net_device *dev)
spin_unlock_irq(&np->lock);
+ /* If the loopback feature was set while the device was down, make sure
+ * that it's set correctly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ nv_set_loopback(dev, dev->features);
+
return 0;
out_drain:
nv_drain_rxtx(dev);
@@ -5198,7 +5486,7 @@ static int nv_close(struct net_device *dev)
static const struct net_device_ops nv_netdev_ops = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5215,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5254,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->dev = dev;
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
init_timer(&np->oom_kick);
@@ -5262,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = nv_do_nic_poll; /* timer handler */
- init_timer(&np->stats_poll);
+ init_timer_deferrable(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
np->stats_poll.function = nv_do_stats_poll; /* timer handler */
@@ -5346,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->features |= dev->hw_features;
+ /* Add loopback capability to the device. */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5621,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
- dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "",
+ dev->features & (NETIF_F_LOOPBACK) ?
+ "loopback " : "",
id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -6000,6 +6294,9 @@ module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+ "Dump tx related registers and ring when tx_timeout happens");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 8c8027176be..ac4e72d529e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, pch_driver_version);
- strcpy(drvinfo->fw_version, "N/A");
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 48406ca382f..964e9c0948b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* Returns
* 0: HW state updated successfully
*/
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434baf..90497ffb1ac 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index e09ea83b8c4..8a371985319 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
- strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8cf3173ba48..7dd9a4b107e 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
adapter->set_multi(dev);
}
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM)) {
netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int hw_lro;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index a4bdff438a5..7931531c3a4 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
- strncpy(drvinfo->version, ql3xxx_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7ed53dbb864..60976fc4ccc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 8aa1c6e8667..cc228cf3d84 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
}
static int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index bcb81e47543..b528e52a8ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
}
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
}
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
if (!(changed & NETIF_F_LRO))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bd163828e3..69b8e4ef14d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
/* PCI Device ID Table */
#define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
adapter->pvid = 0;
}
-static void
+static int
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
set_bit(vid, adapter->vlans);
+ return 0;
}
-static void
+static int
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
+ return 0;
}
static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- unsigned long features, vlan_features;
+ netdev_features_t features, vlan_features;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9ef..b8478aab050 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfea035..8e2c2a74f3a 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, qlge_driver_name, 32);
- strncpy(drvinfo->version, qlge_driver_version, 32);
- snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, qlge_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index c92afcd912e..b5489873728 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
}
}
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
return features;
}
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+ netdev_features_t features)
{
- u32 changed = ndev->features ^ features;
+ netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
return 0;
}
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_add_vid(qdev, vid);
+ err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_kill_vid(qdev, vid);
+ err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4bf68cfef39..cb0eca80785 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -52,12 +52,6 @@
#define DRV_VERSION "0.28"
#define DRV_RELDATE "07Oct2011"
-/* PHY CHIP Address */
-#define PHY1_ADDR 1 /* For MAC1 */
-#define PHY2_ADDR 3 /* For MAC2 */
-#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */
-#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */
-
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -69,8 +63,11 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_RCVEN 0x0002 /* Receive enable */
#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
+#define MCR0_XMTEN 0x1000 /* Transmission enable */
+#define MCR0_FD 0x8000 /* Full/Half duplex */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -129,6 +126,7 @@
#define PHY_CC 0x88 /* PHY status change configuration register */
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
+#define MAC_SM_RST 0x0002 /* MAC status machine reset */
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -154,9 +152,6 @@
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
-/* PHY settings */
-#define ICPLUS_PHY_ID 0x0243
-
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
"Florian Fainelli <florian@openwrt.org>");
@@ -178,7 +173,7 @@ struct r6040_descriptor {
struct r6040_descriptor *vndescp; /* 14-17 */
struct sk_buff *skb_ptr; /* 18-1B */
u32 rev2; /* 1C-1F */
-} __attribute__((aligned(32)));
+} __aligned(32);
struct r6040_private {
spinlock_t lock; /* driver lock */
@@ -191,7 +186,7 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
- u16 tx_free_desc, phy_addr;
+ u16 tx_free_desc;
u16 mcr0, mcr1;
struct net_device *dev;
struct mii_bus *mii_bus;
@@ -206,8 +201,6 @@ static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
-static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
-
/* Read a word data from PHY Chip */
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
@@ -379,11 +372,11 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
/* Reset internal state machine */
- iowrite16(2, ioaddr + MAC_SM);
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -409,7 +402,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
- iowrite16(lp->mcr0 | 0x0002, ioaddr);
+ iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
@@ -461,7 +454,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
@@ -742,9 +735,10 @@ static void r6040_mac_address(struct net_device *dev)
void __iomem *ioaddr = lp->base;
u16 *adrp;
- /* MAC operation register */
- iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
- iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+ /* Reset MAC */
+ iowrite16(MAC_RST, ioaddr + MCR1);
+ /* Reset internal state machine */
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -1013,7 +1007,7 @@ static void r6040_adjust_link(struct net_device *dev)
/* reflect duplex change */
if (phydev->link && (lp->old_duplex != phydev->duplex)) {
- lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+ lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
iowrite16(lp->mcr0, ioaddr);
status_changed = 1;
@@ -1166,8 +1160,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->dev = dev;
/* Init RDC private data */
- lp->mcr0 = 0x1002;
- lp->phy_addr = phy_table[card_idx];
+ lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
@@ -1188,7 +1181,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->mii_bus->write = r6040_mdiobus_write;
lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ dev_name(&pdev->dev), card_idx);
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ee5da9293ce..abc79076f86 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,6 +563,7 @@ rx_next:
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
+ napi_gro_flush(napi);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
@@ -859,7 +860,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
- u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -886,11 +886,9 @@ static void __cp_set_rx_mode (struct net_device *dev)
}
/* We can safely update without stopping the chip. */
- tmp = cp_rx_config | rx_mode;
- if (cp->rx_config != tmp) {
- cpw32_f (RxConfig, tmp);
- cp->rx_config = tmp;
- }
+ cp->rx_config = cp_rx_config | rx_mode;
+ cpw32_f(RxConfig, cp->rx_config);
+
cpw32_f (MAR0 + 0, mc_filter[0]);
cpw32_f (MAR0 + 4, mc_filter[1]);
}
@@ -1319,9 +1317,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(cp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1390,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
cp->msg_enable = value;
}
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -1589,7 +1587,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() readl(ee_addr)
+#define eeprom_delay() readb(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_EXTEND_CMD (4)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 4d6b254fc6c..a8779bedb3d 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() (void)RTL_R32(Cfg9346)
+#define eeprom_delay() (void)RTL_R8(Cfg9346)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
info->regdump_len = tp->regs_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f06aa10f0d..7a0c800b50a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -69,9 +69,6 @@
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
-/* MAC address length */
-#define MAC_ADDR_LEN 6
-
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
@@ -477,7 +474,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@@ -485,6 +481,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -1183,11 +1180,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1404,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strcpy(info->driver, MODULENAME);
- strcpy(info->version, RTL8169_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
- rtl_fw->version);
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ strlcpy(info->fw_version, rtl_fw->version,
+ sizeof(info->fw_version));
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1553,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1567,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {
@@ -4099,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&tp->lock);
/* Get MAC address */
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 9b230740c6a..fc9bda9bc36 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1369,13 +1369,13 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
-static struct ethtool_ops sh_eth_ethtool_ops = {
+static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
- .nway_reset = sh_eth_nway_reset,
+ .nway_reset = sh_eth_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
- .get_link = ethtool_op_get_link,
+ .get_link = ethtool_op_get_link,
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
@@ -1957,18 +1957,7 @@ static struct platform_driver sh_eth_driver = {
},
};
-static int __init sh_eth_init(void)
-{
- return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
- platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c3673f151a4..f955a19eb22 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
}
};
-static int __init sgiseeq_module_init(void)
-{
- if (platform_driver_register(&sgiseeq_driver)) {
- printk(KERN_ERR "Driver registration failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
- platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
MODULE_DESCRIPTION("SGI Seeq 8003 driver");
MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d5731f1fe6d..e43702f33b6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] = i % efx->n_rx_channels;
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->n_rx_channels);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */
}
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4764793ed23..a3541ac6ea0 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,10 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID 0x0803
-#define SIENA_A_P_DEVID 0x0813
-
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
@@ -65,13 +61,23 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
-extern int efx_filter_insert_filter(struct efx_nic *efx,
+extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
-extern int efx_filter_remove_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec);
+extern int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
extern void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
+extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f3cd96dfa39..29b2ebfef19 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -818,9 +818,58 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct efx_filter_spec spec;
+ u16 vid;
+ u8 proto;
+ int rc;
+
+ rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == 0xfff)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ rc = efx_filter_get_eth_local(&spec, &vid,
+ rule->h_u.ether_spec.h_dest);
+ if (rc == 0) {
+ rule->flow_type = ETHER_FLOW;
+ memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = htons(vid);
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+ return 0;
+ }
+
+ rc = efx_filter_get_ipv4_local(&spec, &proto,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ if (rc != 0) {
+ rc = efx_filter_get_ipv4_full(
+ &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ EFX_WARN_ON_PARANOID(rc);
+ ip_mask->ip4src = ~0;
+ ip_mask->psrc = ~0;
+ }
+ rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
+ ip_mask->ip4dst = ~0;
+ ip_mask->pdst = ~0;
+ return rc;
+}
+
static int
efx_ethtool_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *info, u32 *rules __always_unused)
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -862,42 +911,80 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
}
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ return efx_ethtool_get_class_rule(efx, &info->fs);
+
+ case ETHTOOL_GRXCLSRLALL: {
+ s32 rc;
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+ }
+
default:
return -EOPNOTSUPP;
}
}
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
- struct ethtool_rx_ntuple *ntuple)
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
- struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
- struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
- struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
- struct efx_filter_spec filter;
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
int rc;
- /* Range-check action */
- if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
- ntuple->fs.action >= (s32)efx->n_rx_channels)
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY &&
+ rule->location != RX_CLS_LOC_FIRST &&
+ rule->location != RX_CLS_LOC_LAST)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (~ntuple->fs.data_mask)
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+ rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
- (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
- 0xfff : ntuple->fs.action);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ (rule->location == RX_CLS_LOC_FIRST) ?
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ 0xfff : rule->ring_cookie);
- switch (ntuple->fs.flow_type) {
+ switch (rule->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
- u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ u8 proto = (rule->flow_type == TCP_V4_FLOW ?
IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */
- if (ip_mask->ip4dst | ip_mask->pdst)
+ if ((__force u32)~ip_mask->ip4dst |
+ (__force u16)~ip_mask->pdst)
return -EINVAL;
/* all or none of source, */
if ((ip_mask->ip4src | ip_mask->psrc) &&
@@ -905,17 +992,17 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
(__force u16)~ip_mask->psrc))
return -EINVAL;
/* and nothing else */
- if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ if (ip_mask->tos | rule->m_ext.vlan_tci)
return -EINVAL;
- if (!ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&filter, proto,
+ if (ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
- rc = efx_filter_set_ipv4_local(&filter, proto,
+ rc = efx_filter_set_ipv4_local(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
@@ -923,23 +1010,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
break;
}
- case ETHER_FLOW:
- /* Must match all of destination, */
- if (!is_zero_ether_addr(mac_mask->h_dest))
+ case ETHER_FLOW | FLOW_EXT:
+ /* Must match all or none of VID */
+ if (rule->m_ext.vlan_tci != htons(0xfff) &&
+ rule->m_ext.vlan_tci != 0)
return -EINVAL;
- /* all or none of VID, */
- if (ntuple->fs.vlan_tag_mask != 0xf000 &&
- ntuple->fs.vlan_tag_mask != 0xffff)
+ case ETHER_FLOW:
+ /* Must match all of destination */
+ if (!is_broadcast_ether_addr(mac_mask->h_dest))
return -EINVAL;
/* and nothing else */
- if (!is_broadcast_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto != htons(0xffff))
+ if (!is_zero_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto)
return -EINVAL;
rc = efx_filter_set_eth_local(
- &filter,
- (ntuple->fs.vlan_tag_mask == 0xf000) ?
- ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ &spec,
+ (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
+ ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
mac_entry->h_dest);
if (rc)
return rc;
@@ -949,47 +1037,57 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
return -EINVAL;
}
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
- return efx_filter_remove_filter(efx, &filter);
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
- rc = efx_filter_insert_filter(efx, &filter, true);
- return rc < 0 ? rc : 0;
+ rule->location = rc;
+ return 0;
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
- struct ethtool_rxfh_indir *indir)
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t copy_size =
- min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
- indir->size = ARRAY_SIZE(efx->rx_indir_table);
- memcpy(indir->ring_index, efx->rx_indir_table,
- copy_size * sizeof(indir->ring_index[0]));
- return 0;
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return efx_ethtool_set_class_rule(efx, &info->fs);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t i;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
+ return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
- /* Validate size and indices */
- if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- if (indir->ring_index[i] >= efx->n_rx_channels)
- return -EINVAL;
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
- memcpy(efx->rx_indir_table, indir->ring_index,
- sizeof(efx->rx_indir_table));
+ memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
@@ -1019,7 +1117,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
- .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 97b606b92e8..8ae1ebd3539 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
if (!nic_data->stats_pending)
return;
- nic_data->stats_pending = 0;
+ nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2b9636f96e0..1fbbbee7b1a 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -155,6 +155,16 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
spec->data[2] = ntohl(host2);
}
+static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
+ __be32 *host1, __be16 *port1,
+ __be32 *host2, __be16 *port2)
+{
+ *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ *port1 = htons(spec->data[0]);
+ *host2 = htonl(spec->data[2]);
+ *port2 = htons(spec->data[1] >> 16);
+}
+
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
@@ -205,6 +215,26 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ switch (spec->type) {
+ case EFX_FILTER_TCP_WILD:
+ *proto = IPPROTO_TCP;
+ __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
+ return 0;
+ case EFX_FILTER_UDP_WILD:
+ *proto = IPPROTO_UDP;
+ __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
@@ -242,6 +272,25 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport)
+{
+ switch (spec->type) {
+ case EFX_FILTER_TCP_FULL:
+ *proto = IPPROTO_TCP;
+ break;
+ case EFX_FILTER_UDP_FULL:
+ *proto = IPPROTO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __efx_filter_get_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
/**
* efx_filter_set_eth_local - specify local Ethernet address and optional VID
* @spec: Specification to initialise
@@ -270,6 +319,29 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
return 0;
}
+int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr)
+{
+ switch (spec->type) {
+ case EFX_FILTER_MAC_WILD:
+ *vid = EFX_FILTER_VID_UNSPEC;
+ break;
+ case EFX_FILTER_MAC_FULL:
+ *vid = spec->data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ addr[0] = spec->data[2] >> 8;
+ addr[1] = spec->data[2];
+ addr[2] = spec->data[1] >> 24;
+ addr[3] = spec->data[1] >> 16;
+ addr[4] = spec->data[1] >> 8;
+ addr[5] = spec->data[1];
+ return 0;
+}
+
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
@@ -332,7 +404,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
- bool for_insert, int *depth_required)
+ bool for_insert, unsigned int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
@@ -366,12 +438,59 @@ static int efx_filter_search(struct efx_filter_table *table,
}
}
-/* Construct/deconstruct external filter IDs */
+/*
+ * Construct/deconstruct external filter IDs. These must be ordered
+ * by matching priority, for RX NFC semantics.
+ *
+ * Each RX MAC filter entry has a flag for whether it can override an
+ * RX IP filter that also matches. So we assign locations for MAC
+ * filters with overriding behaviour, then for IP filters, then for
+ * MAC filters without overriding behaviour.
+ */
+
+#define EFX_FILTER_INDEX_WIDTH 13
+#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
+ unsigned int index, u8 flags)
+{
+ return (table_id == EFX_FILTER_TABLE_RX_MAC &&
+ flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
+ index :
+ (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
+{
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_TABLE_RX_MAC :
+ (id >> EFX_FILTER_INDEX_WIDTH) - 1;
+}
+
+static inline unsigned int efx_filter_id_index(u32 id)
+{
+ return id & EFX_FILTER_INDEX_MASK;
+}
-static inline int
-efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+static inline u8 efx_filter_id_flags(u32 id)
{
- return table_id << 16 | index;
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
+ EFX_FILTER_FLAG_RX;
+}
+
+u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+
+ if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
+ return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_MAC].size;
+ else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
+ return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_IP].size;
+ else
+ return 0;
}
/**
@@ -384,14 +503,14 @@ efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
* On success, return the filter ID.
* On failure, return a negative error code.
*/
-int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- int filter_idx, depth;
+ unsigned int filter_idx, depth;
u32 key;
int rc;
@@ -439,7 +558,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(table->id, filter_idx);
+ rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
out:
spin_unlock_bh(&state->lock);
@@ -448,7 +567,7 @@ out:
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
- int filter_idx)
+ unsigned int filter_idx)
{
static efx_oword_t filter;
@@ -463,48 +582,101 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
}
/**
- * efx_filter_remove_filter - remove a filter by specification
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
- * @spec: Specification for the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
- * On success, return zero.
- * On failure, return a negative error code.
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
*/
-int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
- efx_oword_t filter;
- int filter_idx, depth;
- u32 key;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_filter_spec *spec;
+ u8 filter_flags;
int rc;
- if (!table)
- return -EINVAL;
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
- key = efx_filter_build(&filter, spec);
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
- spin_lock_bh(&state->lock);
+ filter_flags = efx_filter_id_flags(filter_id);
- rc = efx_filter_search(table, spec, key, false, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- saved_spec = &table->spec[filter_idx];
+ spin_lock_bh(&state->lock);
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
}
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
+ spin_unlock_bh(&state->lock);
+
+ return rc;
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ u8 filter_flags;
+ int rc;
+
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ filter_flags = efx_filter_id_flags(filter_id);
+
+ spin_lock_bh(&state->lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ *spec_buf = *spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
-out:
spin_unlock_bh(&state->lock);
+
return rc;
}
@@ -514,7 +686,7 @@ static void efx_filter_table_clear(struct efx_nic *efx,
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
@@ -538,6 +710,68 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
}
+u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
+s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_filter_make_id(
+ table_id, filter_idx,
+ table->spec[filter_idx].flags);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
@@ -545,7 +779,7 @@ void efx_restore_filters(struct efx_nic *efx)
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 872f2132a49..3d4108cd90c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -78,6 +78,11 @@ enum efx_filter_flags {
*
* Use the efx_filter_set_*() functions to initialise the @type and
* @data fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
+ * flag.
*/
struct efx_filter_spec {
u8 type:4;
@@ -100,11 +105,18 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port);
+extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port);
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport);
+extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport);
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr);
+extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr);
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b6304486f24..bc9dcd6b30d 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
int rc = 0;
if (part->mcdi.updating) {
- part->mcdi.updating = 0;
+ part->mcdi.updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a1ee4..c49502bab6a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -908,7 +908,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- u32 offload_features;
+ netdev_features_t offload_features;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 752d521c09b..aca34986176 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_frag_set_page(skb, 0, page);
- skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(efx, rx_buf);
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
- skb_shinfo(skb)->nr_frags = 1;
+ skb_fill_page_desc(skb, 0, page,
+ efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
@@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->ptr_mask);
/* Allocate RX buffers */
- rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2a6a7..52edd24fcde 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc(sizeof(state->skbs[0]) *
- state->packet_count, GFP_KERNEL);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index cc2549cb707..4d5d619feaa 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = 0;
+ bool already_attached = false;
efx_oword_t reg;
int rc;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index df88c5430f9..72f0fbc73b1 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -31,7 +31,9 @@
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->skb = skb;
buffer->continuation = false;
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->len = 0;
}
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
buffer->continuation = true;
buffer->len = 0;
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
+ netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
}
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa5580..53efe7c7b1c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -28,6 +28,7 @@
#include <linux/tcp.h> /* struct tcphdr */
#include <linux/skbuff.h>
#include <linux/mii.h> /* MII definitions */
+#include <linux/crc32.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
@@ -58,12 +59,19 @@ static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
+/*
* This structure is private to each device. It is used to pass
* packets in and out, so there is place for a packet
*/
struct meth_private {
/* in-memory copy of MAC Control register */
- unsigned long mac_ctrl;
+ u64 mac_ctrl;
+
/* in-memory copy of DMA Control register */
unsigned long dma_ctrl;
/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@ struct meth_private {
struct sk_buff *rx_skbs[RX_RING_ENTRIES];
unsigned long rx_write;
+ /* Multicast filter. */
+ u64 mcast_filter;
+
spinlock_t meth_lock;
};
@@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
+static void meth_set_rx_mode(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ priv->mac_ctrl &= ~METH_PROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->mac_ctrl |= METH_PROMISC;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else {
+ struct netdev_hw_addr *ha;
+ priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+ (volatile unsigned long *)&priv->mcast_filter);
+ }
+
+ /* Write the changes to the chip registers. */
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ mace->eth.mcast_filter = priv->mcast_filter;
+
+ /* Done! */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ netif_wake_queue(dev);
+}
+
static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
@@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = meth_set_rx_mode,
};
/*
@@ -830,24 +876,7 @@ static struct platform_driver meth_driver = {
}
};
-static int __init meth_init_module(void)
-{
- int err;
-
- err = platform_driver_register(&meth_driver);
- if (err)
- printk(KERN_ERR "Driver registration failed\n");
-
- return err;
-}
-
-static void __exit meth_exit_module(void)
-{
- platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 1b4658c9939..5b118cd5bf9 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -47,8 +47,6 @@
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
-#define MAC_ADDR_LEN 6
-
#define NUM_TX_DESC 64 /* [8..1024] */
#define NUM_RX_DESC 64 /* [8..8192] */
#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
}
/* Get MAC address from EEPROM */
- for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+ for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
udelay(50);
pci_read_config_byte(isa_bridge, 0x48, &reg);
- for (i = 0; i < MAC_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
dev->dev_addr[i] = inb(0x79);
}
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
*/
SIS_W16(RxMacControl, ctl & ~0x0f00);
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ sizeof(info->bus_info));
}
static int sis190_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index a184abc5ef1..c8efc708c79 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strcpy (info->driver, SIS900_MODULE_NAME);
- strcpy (info->version, SIS900_DRV_VERSION);
- strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+ strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 sis900_get_msglevel(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 150511a922e..1341f33e608 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -205,7 +205,7 @@ enum sis900_tx_buffer_status {
EXCCOLL = 0x00100000, COLCNT = 0x000F0000
};
-enum sis900_rx_bufer_status {
+enum sis900_rx_buffer_status {
OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0a5dfb81415..2c077ce0b6d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8f61fe9db1d..313ba3b32ab 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
},
};
-static int __init smc911x_init(void)
-{
- return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
- platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cbfa9818713..ada927aba7a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f47f81e2532..64ad3ed7449 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
},
};
-static int __init smc_init(void)
-{
- return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
- platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8843071fe98..9d0b8ced023 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
unsigned int *buf, unsigned int wordcount);
};
+#define SMSC911X_NUM_SUPPLIES 2
+
struct smsc911x_data {
void __iomem *ioaddr;
@@ -138,6 +141,9 @@ struct smsc911x_data {
/* register access functions */
const struct smsc911x_ops *ops;
+
+ /* regulators */
+ struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
};
/* Easy access to information */
@@ -362,6 +368,76 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "failed to enable regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ /* Request regulators */
+ pdata->supplies[0].supply = "vdd33a";
+ pdata->supplies[1].supply = "vddvario";
+ ret = regulator_bulk_get(&pdev->dev,
+ ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "couldn't get regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+
+ /* Free regulators */
+ regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -1243,10 +1319,92 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /*
+ * If energy is detected the PHY is already awake so is not necessary
+ * to disable the energy detect power-down mode.
+ */
+ if ((rc & MII_LAN83C185_EDPWRDOWN) &&
+ !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Disable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc & (~MII_LAN83C185_EDPWRDOWN));
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* Only enable if energy detect mode is already disabled */
+ if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
+ mdelay(100);
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+ return 0;
+}
+
static int smsc911x_soft_reset(struct smsc911x_data *pdata)
{
unsigned int timeout;
unsigned int temp;
+ int ret;
+
+ /*
+ * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
+ * are initialized in a Energy Detect Power-Down mode that prevents
+ * the MAC chip to be software reseted. So we have to wakeup the PHY
+ * before.
+ */
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_disable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
/* Reset the LAN911x */
smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
@@ -1260,6 +1418,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed to complete reset");
return -EIO;
}
+
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_enable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -2092,6 +2260,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
iounmap(pdata->ioaddr);
+ (void)smsc911x_disable_resources(pdev);
+ smsc911x_free_resources(pdev);
+
free_netdev(dev);
return 0;
@@ -2218,10 +2389,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
+ platform_set_drvdata(pdev, dev);
+
+ retval = smsc911x_request_resources(pdev);
+ if (retval)
+ goto out_return_resources;
+
+ retval = smsc911x_enable_resources(pdev);
+ if (retval)
+ goto out_disable_resources;
+
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
- goto out_free_netdev_2;
+ goto out_disable_resources;
}
retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2233,7 +2414,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe, "Error smsc911x config not found");
- goto out_unmap_io_3;
+ goto out_disable_resources;
}
/* assume standard, non-shifted, access to HW registers */
@@ -2244,7 +2425,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_unmap_io_3;
+ goto out_disable_resources;
/* configure irq polarity and type before connecting isr */
if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2264,15 +2445,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
- goto out_unmap_io_3;
+ goto out_free_irq;
}
- platform_set_drvdata(pdev, dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_unset_drvdata_4;
+ goto out_free_irq;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2321,12 +2500,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
out_unregister_netdev_5:
unregister_netdev(dev);
-out_unset_drvdata_4:
- platform_set_drvdata(pdev, NULL);
+out_free_irq:
free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+ (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+ smsc911x_free_resources(pdev);
+ platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
-out_free_netdev_2:
free_netdev(dev);
out_release_io_1:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d67aacf886..9ad5e5d39a0 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -401,4 +401,6 @@
#include <asm/smsc911x.h>
#endif
+#include <linux/smscphy.h>
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index edb24b0e337..a9efbdfe530 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(pd->pdev));
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 22745d7bf53..036428348fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -12,11 +12,36 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_PLATFORM
+ tristate "STMMAC platform bus support"
+ depends on STMMAC_ETH
+ default y
+ ---help---
+ This selects the platform specific bus support for
+ the stmmac device driver. This is the driver used
+ on many embedded STM platforms based on ARM and SuperH
+ processors.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config STMMAC_PCI
+ tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ ---help---
+ This is to select the Synopsys DWMAC available on PCI devices,
+ if you have a controller with this interface, say Y or M here.
+
+ This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board.
+
+ If unsure, say N.
+
config STMMAC_DEBUG_FS
bool "Enable monitoring via sysFS "
default n
depends on STMMAC_ETH && DEBUG_FS
- -- help
+ ---help---
The stmmac entry in /sys reports DMA TX/RX rings
or (if supported) the HW cap register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d7c45164ea7..bc965ac9e02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2cc11929582..d0b814ef067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,7 +22,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e25093510b0..f20aa12931d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ if (enable)
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a140a8fbf05..120740020e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_2011"
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define DRV_MODULE_VERSION "Dec_2011"
#include <linux/stmmac.h>
#include <linux/phy.h>
#include "common.h"
@@ -82,8 +83,18 @@ struct stmmac_priv {
int hw_cap_support;
};
+extern int phyaddr;
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 0395f9eba80..9573303a706 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac)
- strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
@@ -458,7 +459,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static struct ethtool_ops stmmac_ethtool_ops = {
+static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_settings = stmmac_ethtool_getsettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8ea770a89f2..3738b470054 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,12 +28,8 @@
https://bugzilla.stlinux.com/
*******************************************************************************/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
@@ -52,8 +48,6 @@
#endif
#include "stmmac.h"
-#define STMMAC_RESOURCE_NAME "stmmaceth"
-
#undef STMMAC_DEBUG
/*#define STMMAC_DEBUG*/
#ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
-static int phyaddr = -1;
+int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
@@ -345,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
/**
* display_ring
* @p: pointer to the ring.
@@ -781,10 +764,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -882,6 +870,53 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
}
/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ struct mac_device_info *device;
+
+ if (priv->plat->has_gmac)
+ device = dwmac1000_setup(priv->ioaddr);
+ else
+ device = dwmac100_setup(priv->ioaddr);
+
+ if (!device)
+ return -ENOMEM;
+
+ priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
+
+ if (device_can_wakeup(priv->device)) {
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(priv->wol_irq);
+ }
+
+ return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ /* verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address */
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->dev->base_addr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ random_ether_addr(priv->dev->dev_addr);
+ }
+ pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -895,18 +930,28 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- /* Check that the MAC address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using the following linux command:
- * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
- pr_warning("%s: generated random MAC address %pM\n", dev->name,
- dev->dev_addr);
- }
+ /* MAC HW device setup */
+ ret = stmmac_mac_device_setup(dev);
+ if (ret < 0)
+ return ret;
+
+ stmmac_check_ether_addr(priv);
stmmac_verify_args();
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(dev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ return ret;
+ }
+
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -1003,7 +1048,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Enable the MAC Rx/Tx */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1012,9 +1057,13 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warning("\tFailed debugFS registration");
+#endif
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
priv->hw->dma->start_tx(priv->ioaddr);
@@ -1087,10 +1136,15 @@ static int stmmac_release(struct net_device *dev)
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+ stmmac_mdio_unregister(dev);
+
return 0;
}
@@ -1466,7 +1520,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1734,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = {
};
/**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
*/
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat)
{
int ret = 0;
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ return NULL;
+ }
- ether_setup(dev);
+ SET_NETDEV_DEV(ndev, device);
- dev->netdev_ops = &stmmac_netdev_ops;
- stmmac_set_ethtool_ops(dev);
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ ether_setup(ndev);
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(ndev);
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
- dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
- dev->features |= NETIF_F_HW_VLAN_RX;
+ ndev->features |= NETIF_F_HW_VLAN_RX;
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -1763,248 +1831,60 @@ static int stmmac_probe(struct net_device *dev)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
priv->pause = pause;
- netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
- /* Get the MAC address */
- priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
- dev->dev_addr, 0);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- pr_warning("\tno valid MAC address;"
- "please, use ifconfig or nwhwconfig!\n");
+ priv->plat = plat_dat;
+ netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
- ret = register_netdev(dev);
+ ret = register_netdev(ndev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
__func__, ret);
- return -ENODEV;
+ goto error;
}
DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
- dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
+ ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+ (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
- return ret;
-}
+ return priv;
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- struct mac_device_info *device;
+error:
+ netif_napi_del(&priv->napi);
- if (priv->plat->has_gmac) {
- dev->priv_flags |= IFF_UNICAST_FLT;
- device = dwmac1000_setup(priv->ioaddr);
- } else {
- device = dwmac100_setup(priv->ioaddr);
- }
-
- if (!device)
- return -ENOMEM;
-
- priv->hw = device;
- priv->hw->ring = &ring_mode_ops;
-
- if (device_can_wakeup(priv->device)) {
- priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- enable_irq_wake(priv->wol_irq);
- }
-
- return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res;
- void __iomem *addr = NULL;
- struct net_device *ndev = NULL;
- struct stmmac_priv *priv = NULL;
- struct plat_stmmacenet_data *plat_dat;
-
- pr_info("STMMAC driver:\n\tplatform registration... ");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- pr_info("\tdone!\n");
-
- if (!request_mem_region(res->start, resource_size(res),
- pdev->name)) {
- pr_err("%s: ERROR: memory allocation failed"
- "cannot get the I/O addr 0x%x\n",
- __func__, (unsigned int)res->start);
- return -EBUSY;
- }
-
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- pr_err("%s: ERROR: memory mapping failed\n", __func__);
- ret = -ENOMEM;
- goto out_release_region;
- }
-
- ndev = alloc_etherdev(sizeof(struct stmmac_priv));
- if (!ndev) {
- pr_err("%s: ERROR: allocating the device\n", __func__);
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- /* Get the MAC information */
- ndev->irq = platform_get_irq_byname(pdev, "macirq");
- if (ndev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- ret = -ENXIO;
- goto out_free_ndev;
- }
-
- priv = netdev_priv(ndev);
- priv->device = &(pdev->dev);
- priv->dev = ndev;
- plat_dat = pdev->dev.platform_data;
-
- priv->plat = plat_dat;
-
- priv->ioaddr = addr;
-
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
- priv->wol_irq = ndev->irq;
-
- platform_set_drvdata(pdev, ndev);
-
- /* Set the I/O base addr */
- ndev->base_addr = (unsigned long)addr;
-
- /* Custom initialisation */
- if (priv->plat->init) {
- ret = priv->plat->init(pdev);
- if (unlikely(ret))
- goto out_free_ndev;
- }
-
- /* MAC HW device detection */
- ret = stmmac_mac_device_setup(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Network Device Registration */
- ret = stmmac_probe(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Override with kernel parameters if supplied XXX CRS XXX
- * this needs to have multiple instances */
- if ((phyaddr >= 0) && (phyaddr <= 31))
- priv->plat->phy_addr = phyaddr;
-
- pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, addr);
-
- /* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
- ret = stmmac_mdio_register(ndev);
- if (ret < 0)
- goto out_unregister;
- pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- pr_warning("\tFailed debugFS registration");
-#endif
-
- return 0;
-
-out_unregister:
unregister_netdev(ndev);
-out_plat_exit:
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-out_free_ndev:
free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
-out_unmap:
- iounmap(addr);
-out_release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return NULL;
}
/**
* stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
*/
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_disable_mac(priv->ioaddr);
-
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
-
- stmmac_mdio_unregister(ndev);
-
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-
- platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
-
- iounmap((void *)priv->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- stmmac_exit_fs();
-#endif
-
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
@@ -2038,15 +1918,14 @@ static int stmmac_suspend(struct device *dev)
if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev))
@@ -2065,7 +1944,7 @@ static int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
/* Enable the MAC and DMA */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
@@ -2085,68 +1964,23 @@ static int stmmac_resume(struct device *dev)
return 0;
}
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_release(ndev);
}
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_open(ndev);
}
-
-static const struct dev_pm_ops stmmac_pm_ops = {
- .suspend = stmmac_suspend,
- .resume = stmmac_resume,
- .freeze = stmmac_freeze,
- .thaw = stmmac_restore,
- .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
#endif /* CONFIG_PM */
-static struct platform_driver stmmac_driver = {
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
- .pm = &stmmac_pm_ops,
- },
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = platform_driver_register(&stmmac_driver);
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- platform_driver_unregister(&stmmac_driver);
-}
-
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2206,9 +2040,6 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5c341..51f44123396 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
static int stmmac_mdio_reset(struct mii_bus *bus)
{
+#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read.
*/
writel(0, priv->ioaddr + mii_address);
-
+#endif
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 00000000000..54a819a3648
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+ plat_dat.pbl = 32;
+ plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat_dat.has_gmac = 1;
+ plat_dat.force_sf_dma_mode = 1;
+
+ mdio_data.bus_id = 1;
+ mdio_data.phy_reset = NULL;
+ mdio_data.phy_mask = 0;
+ plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ int i;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+ pci_name(pdev));
+ return ret;
+ }
+ if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+ pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+ ret = -ENODEV;
+ goto err_out_req_reg_failed;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ addr = pci_iomap(pdev, i, 0);
+ if (addr == NULL) {
+ pr_err("%s: ERROR: cannot map regiser memory, aborting",
+ __func__);
+ ret = -EIO;
+ goto err_out_map_failed;
+ }
+ break;
+ }
+ pci_set_master(pdev);
+
+ stmmac_default_data();
+
+ priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto err_out;
+ }
+ priv->ioaddr = addr;
+ priv->dev->base_addr = (unsigned long)addr;
+ priv->dev->irq = pdev->irq;
+ priv->wol_irq = pdev->irq;
+
+ pci_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+err_out:
+ pci_clear_master(pdev);
+err_out_map_failed:
+ pci_release_regions(pdev);
+err_out_req_reg_failed:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, priv->ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = stmmac_suspend(ndev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+ {
+ PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+ }
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = stmmac_pci_suspend,
+ .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&stmmac_driver);
+ if (ret < 0)
+ pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 00000000000..7b1594f4944
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,198 @@
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ return -EBUSY;
+ }
+
+ addr = ioremap(res->start, resource_size(res));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+ plat_dat = pdev->dev.platform_data;
+ priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto out_release_region;
+ }
+
+ priv->ioaddr = addr;
+ /* Set the I/O base addr */
+ priv->dev->base_addr = (unsigned long)addr;
+
+ /* Get the MAC information */
+ priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (priv->dev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = priv->dev->irq;
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_unmap;
+ }
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+out_unmap:
+ iounmap(addr);
+ platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+ int ret = stmmac_dvr_remove(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap((void *)priv->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ .suspend = stmmac_pltfr_suspend,
+ .resume = stmmac_pltfr_resume,
+ .freeze = stmmac_pltfr_freeze,
+ .thaw = stmmac_pltfr_restore,
+ .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pltfr_pm_ops,
+ },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fd40988c19a..f10665f594c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
- info->fw_version[0] = '\0';
- strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
cp->casreg_len : CAS_MAX_REGS;
info->n_stats = CAS_NUM_STAT_KEYS;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c708107a3..cf433931304 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
supported |= SUPPORTED_1000baseT_Full;
lp->supported = supported;
- advertising = 0;
- if (advert & ADVERTISE_10HALF)
- advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- advertising |= ADVERTISED_100baseT_Full;
- if (ctrl1000 & ADVERTISE_1000HALF)
- advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- advertising |= ADVERTISED_1000baseT_Full;
+ advertising = mii_adv_to_ethtool_adv_t(advert);
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmcr & BMCR_ANENABLE) {
int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
+ unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- while (pkt_cnt--)
+ tx_bytes = 0;
+ tmp = pkt_cnt;
+ while (tmp--) {
+ tx_bytes += rp->tx_buffs[cons].skb->len;
cons = release_tx_packet(np, rp, cons);
+ }
rp->cons = cons;
smp_mb();
+ netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
+ netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
+ netdev_tx_sent_queue(txq, skb->len);
+
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- sprintf(info->fw_version, "%d.%d",
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
}
static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
if (dev_id_1 < 0 || dev_id_2 < 0)
return 0;
if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+ /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+ * test covers the 8706 as well.
+ */
if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
return 0;
} else {
if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d8cfd9ea05..220f724c337 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
.remove = __devexit_p(bigmac_sbus_remove),
};
-static int __init bigmac_init(void)
-{
- return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
- platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ceab215bb4a..31441a870b0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(gp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index cf14ab9db57..09c518655db 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strcpy(info->driver, "sunhme");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunhme", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strcpy(info->bus_info, pci_name(pdev));
+ strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d",
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "SBUS:%d",
regs->which_io);
}
#endif
@@ -2849,7 +2850,7 @@ err_out:
static int is_quattro_p(struct pci_dev *pdev)
{
struct pci_dev *busdev = pdev->bus->self;
- struct list_head *tmp;
+ struct pci_dev *this_pdev;
int n_hmes;
if (busdev == NULL ||
@@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
return 0;
n_hmes = 0;
- tmp = pdev->bus->devices.next;
- while (tmp != &pdev->bus->devices) {
- struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+ list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
n_hmes++;
-
- tmp = tmp->next;
}
if (n_hmes != 4)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 3a90af6d111..4b19e9b0606 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev network device
* @vid VLAN vid to add
*/
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1);
+ return 0;
}
/*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev network device
* @vid VLAN vid to kill
*/
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cd..c97d2f59085 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 815c7970261..794ac30a577 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
+ atomic_t cur_tx;
const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
@@ -1044,6 +1046,9 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ atomic_dec(&priv->cur_tx);
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
@@ -1092,6 +1097,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
+ if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail_tx:
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2..d9951afb926 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
panic("Failed to stop LIPP/LEPP!\n");
- priv->partly_opened = 0;
+ priv->partly_opened = false;
}
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
priv->network_cpus_count, priv->network_cpus_credits);
#endif
- priv->partly_opened = 1;
+ priv->partly_opened = true;
} else {
/* FIXME: Is this possible? */
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
@@ -2260,8 +2260,7 @@ static int tile_net_get_mac(struct net_device *dev)
return 0;
}
-
-static struct net_device_ops tile_net_ops = {
+static const struct net_device_ops tile_net_ops = {
.ndo_open = tile_net_open,
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 7bf1e201578..5ee82a77723 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -640,7 +640,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_rx_dma failed, %d\n", status);
@@ -658,7 +658,7 @@ static inline void gelic_card_disable_txdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_tx_dma failed, status=%d\n", status);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a8df7eca095..a9ce01bafd2 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1688,18 +1688,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_init(void)
-{
- int ret;
- ret = platform_driver_register (&tsi_eth_driver);
- if (ret < 0){
- printk("tsi108_ether_init: error initializing ethernet "
- "device\n");
- return ret;
- }
- return 0;
-}
-
static int tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1702,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
-static void tsi108_ether_exit(void)
-{
- platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f34dd99fe57..5c4983b2870 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -35,6 +35,7 @@
#define DRV_VERSION "1.5.0"
#define DRV_RELDATE "2010-10-09"
+#include <linux/types.h>
/* A few user-configurable values.
These may be modified when a driver module is loaded. */
@@ -55,7 +56,7 @@ static int rx_copybreak;
/* Work-around for broken BIOSes: they are unable to get the chip back out of
power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -488,8 +489,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1262,7 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
}
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1269,9 +1270,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
spin_unlock_irq(&rp->lock);
+ return 0;
}
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1279,6 +1281,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
spin_unlock_irq(&rp->lock);
+ return 0;
}
static void init_registers(struct net_device *dev)
@@ -2009,9 +2012,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct rhine_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2320,7 +2323,7 @@ static int __init rhine_init(void)
#endif
if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */
- avoid_D3 = 1;
+ avoid_D3 = true;
pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
}
else if (avoid_D3)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4535d7cc848..4128d6b8cc2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
clear_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev,
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct velocity_info *vptr = netdev_priv(dev);
- strcpy(info->driver, VELOCITY_NAME);
- strcpy(info->version, VELOCITY_VERSION);
- strcpy(info->bus_info, pci_name(vptr->pdev));
+ strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2681b53820e..f21addb1db9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
@@ -920,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!lp->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1077,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
of_node_put(np); /* Finished with the DMA node; drop the reference */
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if (!lp->rx_irq || !lp->tx_irq) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
@@ -1167,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
},
};
-static int __init temac_init(void)
-{
- return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
- platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d045b..79013e5731a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
- bool tx_complete = 0;
+ bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
/* Get IRQ for the device */
rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
- if (rc == NO_IRQ) {
+ if (!rc) {
dev_err(dev, "no IRQ found\n");
return rc;
}
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
.remove = __devexit_p(xemaclite_of_remove),
};
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return: 0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
- /* No kernel boot options used, we just need to register the driver */
- return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
- platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bbe8b7dbf3f..33979c3ac94 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "xirc2ps_cs");
+ strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 00000000000..936968d2355
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
+config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ help
+ Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 00000000000..c8a66827100
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
+
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ac1ec840512..dec5836ae07 100644
--- a/drivers/staging/hv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -39,9 +39,6 @@ struct xferpage_packet {
u32 count;
};
-/* The number of pages which are enough to cover jumbo frame buffer. */
-#define NETVSC_PACKET_MAXPAGE 4
-
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
@@ -77,8 +74,9 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
+ void *data;
u32 page_buf_cnt;
- struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
+ struct hv_page_buffer page_buf[0];
};
struct netvsc_device_info {
@@ -87,6 +85,27 @@ struct netvsc_device_info {
int ring_size;
};
+enum rndis_device_state {
+ RNDIS_DEV_UNINITIALIZED = 0,
+ RNDIS_DEV_INITIALIZING,
+ RNDIS_DEV_INITIALIZED,
+ RNDIS_DEV_DATAINITIALIZED,
+};
+
+struct rndis_device {
+ struct netvsc_device *net_dev;
+
+ enum rndis_device_state state;
+ bool link_state;
+ atomic_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+
+ unsigned char hw_mac_adr[ETH_ALEN];
+};
+
+
/* Interface */
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
@@ -109,11 +128,13 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
+
+
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
-#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
-#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
+#define NVSP_PROTOCOL_VERSION_2 0x30002
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -138,11 +159,36 @@ enum {
NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
- /*
- * This should be set to the number of messages for the version with
- * the maximum number of messages.
- */
- NVSP_NUM_MSG_PER_VERSION = 9,
+ /* Version 2 messages */
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
+ NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
+
+ NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
+
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
+
+ NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
+
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
+
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
+
+ NVSP_MSG2_TYPE_ALLOC_RXBUF,
+ NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
+
+ NVSP_MSG2_TYPE_FREE_RXBUF,
+
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
+
+ NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
+
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
};
enum {
@@ -153,6 +199,7 @@ enum {
NVSP_STAT_PROTOCOL_TOO_OLD,
NVSP_STAT_INVALID_RNDIS_PKT,
NVSP_STAT_BUSY,
+ NVSP_STAT_PROTOCOL_UNSUPPORTED,
NVSP_STAT_MAX,
};
@@ -337,9 +384,69 @@ union nvsp_1_message_uber {
send_rndis_pkt_complete;
} __packed;
+
+/*
+ * Network VSP protocol version 2 messages:
+ */
+struct nvsp_2_vsc_capability {
+ union {
+ u64 data;
+ struct {
+ u64 vmq:1;
+ u64 chimney:1;
+ u64 sriov:1;
+ u64 ieee8021q:1;
+ u64 correlation_id:1;
+ };
+ };
+} __packed;
+
+struct nvsp_2_send_ndis_config {
+ u32 mtu;
+ u32 reserved;
+ struct nvsp_2_vsc_capability capability;
+} __packed;
+
+/* Allocate receive buffer */
+struct nvsp_2_alloc_rxbuf {
+ /* Allocation ID to match the allocation request and response */
+ u32 alloc_id;
+
+ /* Length of the VM shared memory receive buffer that needs to
+ * be allocated
+ */
+ u32 len;
+} __packed;
+
+/* Allocate receive buffer complete */
+struct nvsp_2_alloc_rxbuf_comp {
+ /* The NDIS_STATUS code for buffer allocation */
+ u32 status;
+
+ u32 alloc_id;
+
+ /* GPADL handle for the allocated receive buffer */
+ u32 gpadl_handle;
+
+ /* Receive buffer ID */
+ u64 recv_buf_id;
+} __packed;
+
+struct nvsp_2_free_rxbuf {
+ u64 recv_buf_id;
+} __packed;
+
+union nvsp_2_message_uber {
+ struct nvsp_2_send_ndis_config send_ndis_config;
+ struct nvsp_2_alloc_rxbuf alloc_rxbuf;
+ struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
+ struct nvsp_2_free_rxbuf free_rxbuf;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
+ union nvsp_2_message_uber v2_msg;
} __packed;
/* ALL Messages */
@@ -349,12 +456,9 @@ struct nvsp_message {
} __packed;
+#define NETVSC_MTU 65536
-
-/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
-/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
-
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -369,7 +473,10 @@ struct nvsp_message {
struct netvsc_device {
struct hv_device *dev;
+ u32 nvsp_version;
+
atomic_t num_outstanding_sends;
+ bool start_remove;
bool destroy;
/*
* List of free preallocated hv_netvsc_packet to represent receive
diff --git a/drivers/staging/hv/netvsc.c b/drivers/net/hyperv/netvsc.c
index b902579c7fe..8965b45ce5a 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include "hyperv_net.h"
@@ -41,7 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
-
+ net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
net_device->ndev = ndev;
@@ -230,19 +231,16 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_section_cnt = init_packet->msg.
v1_msg.send_recv_buf_complete.num_sections;
- net_device->recv_section = kmalloc(net_device->recv_section_cnt
- * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
+ net_device->recv_section = kmemdup(
+ init_packet->msg.v1_msg.send_recv_buf_complete.sections,
+ net_device->recv_section_cnt *
+ sizeof(struct nvsp_1_receive_buffer_section),
+ GFP_KERNEL);
if (net_device->recv_section == NULL) {
ret = -EINVAL;
goto cleanup;
}
- memcpy(net_device->recv_section,
- init_packet->msg.v1_msg.
- send_recv_buf_complete.sections,
- net_device->recv_section_cnt *
- sizeof(struct nvsp_1_receive_buffer_section));
-
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
@@ -263,27 +261,18 @@ exit:
}
-static int netvsc_connect_vsp(struct hv_device *device)
+/* Negotiate NVSP protocol version */
+static int negotiate_nvsp_ver(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct nvsp_message *init_packet,
+ u32 nvsp_ver)
{
int ret, t;
- struct netvsc_device *net_device;
- struct nvsp_message *init_packet;
- int ndis_version;
- struct net_device *ndev;
-
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
- ndev = net_device->ndev;
-
- init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
- init_packet->msg.init_msg.init.min_protocol_ver =
- NVSP_MIN_PROTOCOL_VERSION;
- init_packet->msg.init_msg.init.max_protocol_ver =
- NVSP_MAX_PROTOCOL_VERSION;
+ init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
+ init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -293,26 +282,62 @@ static int netvsc_connect_vsp(struct hv_device *device)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return ret;
t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return -ETIMEDOUT;
if (init_packet->msg.init_msg.init_complete.status !=
- NVSP_STAT_SUCCESS) {
- ret = -EINVAL;
- goto cleanup;
- }
+ NVSP_STAT_SUCCESS)
+ return -EINVAL;
- if (init_packet->msg.init_msg.init_complete.
- negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
+ if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ return 0;
+
+ /* NVSPv2 only: Send NDIS config */
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND, 0);
+
+ return ret;
+}
+
+static int netvsc_connect_vsp(struct hv_device *device)
+{
+ int ret;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
+ int ndis_version;
+ struct net_device *ndev;
+
+ net_device = get_outbound_net_device(device);
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
+
+ init_packet = &net_device->channel_init_pkt;
+
+ /* Negotiate the latest NVSP protocol supported */
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_2) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
+ } else if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_1) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
+ } else {
ret = -EPROTO;
goto cleanup;
}
+
+ pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
+
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -438,6 +463,9 @@ static void netvsc_send_completion(struct hv_device *device,
nvsc_packet->completion.send.send_completion_ctx);
atomic_dec(&net_device->num_outstanding_sends);
+
+ if (netif_queue_stopped(ndev) && !net_device->start_remove)
+ netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -488,11 +516,16 @@ int netvsc_send(struct hv_device *device,
}
- if (ret != 0)
+ if (ret == 0) {
+ atomic_inc(&net_device->num_outstanding_sends);
+ } else if (ret == -EAGAIN) {
+ netif_stop_queue(ndev);
+ if (atomic_read(&net_device->num_outstanding_sends) < 1)
+ netif_wake_queue(ndev);
+ } else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
- else
- atomic_inc(&net_device->num_outstanding_sends);
+ }
return ret;
}
@@ -598,12 +631,10 @@ static void netvsc_receive(struct hv_device *device,
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
- unsigned long start;
- unsigned long end, end_virtual;
/* struct netvsc_driver *netvscDriver; */
struct xferpage_packet *xferpage_packet = NULL;
- int i, j;
- int count = 0, bytes_remain = 0;
+ int i;
+ int count = 0;
unsigned long flags;
struct net_device *ndev;
@@ -712,53 +743,10 @@ static void netvsc_receive(struct hv_device *device,
netvsc_packet->completion.recv.recv_completion_tid =
vmxferpage_packet->d.trans_id;
+ netvsc_packet->data = (void *)((unsigned long)net_device->
+ recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
vmxferpage_packet->ranges[i].byte_count;
- netvsc_packet->page_buf_cnt = 1;
-
- netvsc_packet->page_buf[0].len =
- vmxferpage_packet->ranges[i].byte_count;
-
- start = virt_to_phys((void *)((unsigned long)net_device->
- recv_buf + vmxferpage_packet->ranges[i].byte_offset));
-
- netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
- end_virtual = (unsigned long)net_device->recv_buf
- + vmxferpage_packet->ranges[i].byte_offset
- + vmxferpage_packet->ranges[i].byte_count - 1;
- end = virt_to_phys((void *)end_virtual);
-
- /* Calculate the page relative offset */
- netvsc_packet->page_buf[0].offset =
- vmxferpage_packet->ranges[i].byte_offset &
- (PAGE_SIZE - 1);
- if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
- /* Handle frame across multiple pages: */
- netvsc_packet->page_buf[0].len =
- (netvsc_packet->page_buf[0].pfn <<
- PAGE_SHIFT)
- + PAGE_SIZE - start;
- bytes_remain = netvsc_packet->total_data_buflen -
- netvsc_packet->page_buf[0].len;
- for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
- netvsc_packet->page_buf[j].offset = 0;
- if (bytes_remain <= PAGE_SIZE) {
- netvsc_packet->page_buf[j].len =
- bytes_remain;
- bytes_remain = 0;
- } else {
- netvsc_packet->page_buf[j].len =
- PAGE_SIZE;
- bytes_remain -= PAGE_SIZE;
- }
- netvsc_packet->page_buf[j].pfn =
- virt_to_phys((void *)(end_virtual -
- bytes_remain)) >> PAGE_SHIFT;
- netvsc_packet->page_buf_cnt++;
- if (bytes_remain == 0)
- break;
- }
- }
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 93b0e91cbf9..462d05f05e8 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,24 +43,59 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
- atomic_t avail;
struct delayed_work dwork;
};
-#define PACKET_PAGES_LOWATER 8
-/* Need this many pages to handle worst case fragmented packet */
-#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
-
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-/* no-op so the netdev core doesn't return -EINVAL when modifying the the
- * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
- * when it calls RndisFilterOnOpen() */
+struct set_multicast_work {
+ struct work_struct work;
+ struct net_device *net;
+};
+
+static void do_set_multicast(struct work_struct *w)
+{
+ struct set_multicast_work *swk =
+ container_of(w, struct set_multicast_work, work);
+ struct net_device *net = swk->net;
+
+ struct net_device_context *ndevctx = netdev_priv(net);
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+
+ nvdev = hv_get_drvdata(ndevctx->device_ctx);
+ if (nvdev == NULL)
+ return;
+
+ rdev = nvdev->extension;
+ if (rdev == NULL)
+ return;
+
+ if (net->flags & IFF_PROMISC)
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_PROMISCUOUS);
+ else
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+
+ kfree(w);
+}
+
static void netvsc_set_multicast_list(struct net_device *net)
{
+ struct set_multicast_work *swk =
+ kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
+ if (swk == NULL)
+ return;
+
+ swk->net = net;
+ INIT_WORK(&swk->work, do_set_multicast);
+ schedule_work(&swk->work);
}
static int netvsc_open(struct net_device *net)
@@ -104,18 +139,8 @@ static void netvsc_xmit_completion(void *context)
kfree(packet);
- if (skb) {
- struct net_device *net = skb->dev;
- struct net_device_context *net_device_ctx = netdev_priv(net);
- unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
-
+ if (skb)
dev_kfree_skb_any(skb);
-
- atomic_add(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) >=
- PACKET_PAGES_HIWATER)
- netif_wake_queue(net);
- }
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -123,12 +148,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages;
+ unsigned int i, num_pages, npg_data;
- /* Add 1 for skb->data and additional one for RNDIS */
- num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
- if (num_pages > atomic_read(&net_device_ctx->avail))
- return NETDEV_TX_BUSY;
+ /* Add multipage for skb->data and additional one for RNDIS */
+ npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
+ >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
+ num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -151,21 +176,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->page_buf_cnt = num_pages;
/* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
packet->page_buf[1].offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->page_buf[1].len = skb_headlen(skb);
+ if (npg_data == 1)
+ packet->page_buf[1].len = skb_headlen(skb);
+ else
+ packet->page_buf[1].len = PAGE_SIZE
+ - packet->page_buf[1].offset;
+
+ for (i = 2; i <= npg_data; i++) {
+ packet->page_buf[i].pfn = virt_to_phys(skb->data
+ + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
+ packet->page_buf[i].offset = 0;
+ packet->page_buf[i].len = PAGE_SIZE;
+ }
+ if (npg_data > 1)
+ packet->page_buf[npg_data].len = (((unsigned long)skb->data
+ + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+2].offset = f->page_offset;
- packet->page_buf[i+2].len = skb_frag_size(f);
+ packet->page_buf[i+npg_data+1].pfn =
+ page_to_pfn(skb_frag_page(f));
+ packet->page_buf[i+npg_data+1].offset = f->page_offset;
+ packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
}
/* Set the completion routine */
@@ -178,10 +218,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
-
- atomic_sub(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
- netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
@@ -232,9 +268,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = dev_get_drvdata(&device_obj->device);
struct sk_buff *skb;
- void *data;
- int i;
- unsigned long flags;
struct netvsc_device *net_device;
net_device = hv_get_drvdata(device_obj);
@@ -253,27 +286,12 @@ int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
- /* for kmap_atomic */
- local_irq_save(flags);
-
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- for (i = 0; i < packet->page_buf_cnt; i++) {
- data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
- KM_IRQ1);
- data = (void *)(unsigned long)data +
- packet->page_buf[i].offset;
-
- memcpy(skb_put(skb, packet->page_buf[i].len), data,
- packet->page_buf[i].len);
-
- kunmap_atomic((void *)((unsigned long)data -
- packet->page_buf[i].offset), KM_IRQ1);
- }
-
- local_irq_restore(flags);
+ memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
@@ -299,6 +317,39 @@ static void netvsc_get_drvinfo(struct net_device *net,
strcpy(info->fw_version, "N/A");
}
+static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device_info device_info;
+ int limit = ETH_DATA_LEN;
+
+ if (nvdev == NULL || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ limit = NETVSC_MTU;
+
+ if (mtu < 68 || mtu > limit)
+ return -EINVAL;
+
+ nvdev->start_remove = true;
+ cancel_delayed_work_sync(&ndevctx->dwork);
+ netif_stop_queue(ndev);
+ rndis_filter_device_remove(hdev);
+
+ ndev->mtu = mtu;
+
+ ndevctx->device_ctx = hdev;
+ hv_set_drvdata(hdev, ndev);
+ device_info.ring_size = ring_size;
+ rndis_filter_device_add(hdev, &device_info);
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -309,7 +360,7 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -351,7 +402,6 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
- atomic_set(&net_device_ctx->avail, ring_size);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
@@ -403,6 +453,8 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+ net_device->start_remove = true;
+
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index bafccb36004..da181f9a49d 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -30,26 +30,6 @@
#include "hyperv_net.h"
-enum rndis_device_state {
- RNDIS_DEV_UNINITIALIZED = 0,
- RNDIS_DEV_INITIALIZING,
- RNDIS_DEV_INITIALIZED,
- RNDIS_DEV_DATAINITIALIZED,
-};
-
-struct rndis_device {
- struct netvsc_device *net_dev;
-
- enum rndis_device_state state;
- bool link_state;
- atomic_t new_req_id;
-
- spinlock_t request_lock;
- struct list_head req_list;
-
- unsigned char hw_mac_adr[ETH_ALEN];
-};
-
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -329,7 +309,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
- int i;
rndis_pkt = &msg->msg.pkt;
@@ -342,17 +321,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
- pkt->page_buf[0].offset += data_offset;
- pkt->page_buf[0].len -= data_offset;
-
- /* Drop the 0th page, if rndis data go beyond page boundary */
- if (pkt->page_buf[0].offset >= PAGE_SIZE) {
- pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
- pkt->page_buf[1].len -= pkt->page_buf[1].offset;
- pkt->page_buf_cnt--;
- for (i = 0; i < pkt->page_buf_cnt; i++)
- pkt->page_buf[i] = pkt->page_buf[i+1];
- }
+ pkt->data = (void *)((unsigned long)pkt->data + data_offset);
pkt->is_data_pkt = true;
@@ -387,11 +356,7 @@ int rndis_filter_receive(struct hv_device *dev,
return -ENODEV;
}
- rndis_hdr = (struct rndis_message *)kmap_atomic(
- pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
-
- rndis_hdr = (void *)((unsigned long)rndis_hdr +
- pkt->page_buf[0].offset);
+ rndis_hdr = pkt->data;
/* Make sure we got a valid rndis message */
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
@@ -407,8 +372,6 @@ int rndis_filter_receive(struct hv_device *dev,
sizeof(struct rndis_message) :
rndis_hdr->msg_len);
- kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
-
dump_rndis_message(dev, &rndis_msg);
switch (rndis_msg.ndis_msg_type) {
@@ -522,8 +485,7 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
return ret;
}
-static int rndis_filter_set_packet_filter(struct rndis_device *dev,
- u32 new_filter)
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 46b5f5fd686..e05b645bbc3 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 9d4ce1aba10..a561ae44a9a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
},
};
-static int __init bfin_sir_init(void)
-{
- return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
- platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
module_param(max_rate, int, 0);
MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b45b2cc4280..64f403da101 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
static int max_baud = 4000000;
#ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
#endif
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index d9267cb98a2..72f687b40d6 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1914,41 +1914,8 @@ static struct usb_driver irda_driver = {
#endif
};
-/************************* MODULE CALLBACKS *************************/
-/*
- * Deal with module insertion/removal
- * Mostly tell USB about our existence
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Module insertion
- */
-static int __init usb_irda_init(void)
-{
- int ret;
-
- ret = usb_register(&irda_driver);
- if (ret < 0)
- return ret;
-
- IRDA_MESSAGE("USB IrDA support registered\n");
- return 0;
-}
-module_init(usb_irda_init);
+module_usb_driver(irda_driver);
-/*------------------------------------------------------------------*/
-/*
- * Module removal
- */
-static void __exit usb_irda_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(usb_irda_cleanup);
-
-/*------------------------------------------------------------------*/
/*
* Module parameters
*/
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index cb90d640007..79aebeee928 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -621,24 +621,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init kingsun_init(void)
-{
- return usb_register(&irda_driver);
-}
-module_init(kingsun_init);
-
-/*
- * Module removal
- */
-static void __exit kingsun_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(kingsun_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex VillacĂ­s Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 1046014dd6c..abe689dffc7 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -901,26 +901,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init ks959_init(void)
-{
- return usb_register(&irda_driver);
-}
-
-module_init(ks959_init);
-
-/*
- * Module removal
- */
-static void __exit ks959_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-
-module_exit(ks959_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex VillacĂ­s Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959");
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 9cc142fcc71..f8c01088eeb 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -796,26 +796,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init ksdazzle_init(void)
-{
- return usb_register(&irda_driver);
-}
-
-module_init(ksdazzle_init);
-
-/*
- * Module removal
- */
-static void __exit ksdazzle_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-
-module_exit(ksdazzle_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex VillacĂ­s Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index be52bfed66a..1a00b5990cb 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -968,25 +968,4 @@ static void mcs_disconnect(struct usb_interface *intf)
IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
}
-/* Module insertion */
-static int __init mcs_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&mcs_driver);
- if (result)
- IRDA_ERROR("usb_register failed. Error number %d\n", result);
-
- return result;
-}
-module_init(mcs_init);
-
-/* Module removal */
-static void __exit mcs_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&mcs_driver);
-}
-module_exit(mcs_exit);
-
+module_usb_driver(mcs_driver);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index b56636da6cc..2a4f2f15324 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1664,7 +1664,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
switch_bank(iobase, BANK0);
outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+ASCR) & ASCR_TXUR) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d0851dfa037..81d5275a15e 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
.resume = pxa_irda_resume,
};
-static int __init pxa_irda_init(void)
-{
- return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
- platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index d275e276e74..725d6b36782 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
},
};
-static int __init sh_irda_init(void)
-{
- return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
- platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index ed7d7d62bf6..e6661b5c1f8 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
},
};
-static int __init sh_sir_init(void)
-{
- return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
- platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8b1c3484d27..6c95d4087b2 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
MODULE_LICENSE("GPL");
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
module_param_named(nopnp, smsc_nopnp, bool, 0);
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 41c96b3d815..e6e59a078ef 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
- refrigerator();
+ try_to_freeze();
if (change_speed(stir, stir->speed))
break;
@@ -1133,21 +1133,4 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init stir_init(void)
-{
- return usb_register(&irda_driver);
-}
-module_init(stir_init);
-
-/*
- * Module removal
- */
-static void __exit stir_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(stir_cleanup);
+module_usb_driver(irda_driver);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6d6479049aa..2d456dd164f 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -942,14 +942,14 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
iobase = self->io.fir_base;
/* Disable DMA */
// DisableDmaChannel(self->io.dma);
- /* Check for underrrun! */
+ /* Check for underrun! */
/* Clear bit, by writing 1 into it */
Tx_status = GetTXStatus(iobase);
if (Tx_status & 0x08) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
hwreset(self);
-// how to clear underrrun ?
+ /* how to clear underrun? */
} else {
self->netdev->stats.tx_packets++;
ResetChip(iobase, 3);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index c4366601b06..7d43506c703 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -677,7 +677,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f2c06..b71998d0b5b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_ALL_TSO
| NETIF_F_UFO
- | NETIF_F_NO_CSUM
+ | NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 74134970b70..f2f820c4b40 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
return stats;
}
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_add_vid)
- ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+ return vlan_vid_add(lowerdev, vid);
}
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_kill_vid)
- ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+ vlan_vid_del(lowerdev, vid);
+ return 0;
}
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1b7082d08f3..58dc117a8d7 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
if (vlan) {
int index = get_slot(vlan, q);
- rcu_assign_pointer(vlan->taps[index], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[index], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
sock_put(&q->sk);
--vlan->numvtaps;
}
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- if (tap)
- goto out;
- }
-
/* Everything failed - find first available queue */
for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
- rcu_assign_pointer(vlan->taps[i], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[i], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
vlan->numvtaps--;
}
}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c62e7816d54..c70c2332d15 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -35,26 +35,11 @@
static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
{
- u32 result = 0;
int advert;
advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
- if (advert & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
- if (advert & ADVERTISE_10HALF)
- result |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- result |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- result |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- result |= ADVERTISED_100baseT_Full;
- if (advert & ADVERTISE_PAUSE_CAP)
- result |= ADVERTISED_Pause;
- if (advert & ADVERTISE_PAUSE_ASYM)
- result |= ADVERTISED_Asym_Pause;
-
- return result;
+
+ return mii_lpa_to_ethtool_lpa_t(advert);
}
/**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (ctrl1000 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
- if (stat1000 & LPA_1000HALF)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Half;
- if (stat1000 & LPA_1000FULL)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Full;
+ ecmd->lp_advertising |=
+ mii_stat1000_to_ethtool_lpa_t(stat1000);
} else {
ecmd->lp_advertising = 0;
}
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
}
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (mii->supports_gmii) {
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- tmp2 |= ADVERTISE_1000HALF;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- tmp2 |= ADVERTISE_1000FULL;
- }
+ tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+ if (mii->supports_gmii)
+ tmp2 |=
+ ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
if (advert != tmp) {
mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
mii->advertising = tmp;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c..fbdcdf83cbf 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
@@ -131,3 +131,7 @@ config MDIO_OCTEON
If in doubt, say Y.
endif # PHYLIB
+
+config MICREL_KS8995MA
+ tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2333215bbb3..e15c83fecbe 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 65391891d8c..daec9b05d16 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
+static int mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ if (ctrl->reset)
+ ctrl->reset(bus);
+ return 0;
+}
+
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
+ bus->reset = mdiobb_reset;
bus->priv = ctrl;
return bus;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2843c90f712..89c5a3eccc1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
+ bitbang->ctrl.reset = pdata->reset;
bitbang->mdc = pdata->mdc;
bitbang->mdio = pdata->mdio;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5afec6..f320f466f03 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
if (adv < 0)
return adv;
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- if (advertise & ADVERTISED_Pause)
- adv |= ADVERTISE_PAUSE_CAP;
- if (advertise & ADVERTISED_Asym_Pause)
- adv |= ADVERTISE_PAUSE_ASYM;
+ adv |= ethtool_adv_to_mii_adv_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (advertise & SUPPORTED_1000baseT_Half)
- adv |= ADVERTISE_1000HALF;
- if (advertise & SUPPORTED_1000baseT_Full)
- adv |= ADVERTISE_1000FULL;
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_CTRL1000, adv);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 342505c976d..fc3e7e96c88 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -22,26 +22,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-
-#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
-#define MII_LAN83C185_IM 30 /* Interrupt Mask */
-#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
-
-#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
-#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
-#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
-#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
-#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
-#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
-#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
-
-#define MII_LAN83C185_ISF_INT_ALL (0x0e)
-
-#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
- MII_LAN83C185_ISF_INT7)
-
-#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#include <linux/smscphy.h>
static int smsc_phy_config_intr(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644
index 00000000000..116a2dd7c87
--- /dev/null
+++ b/drivers/net/phy/spi_ks8995.c
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
+#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE 0x80
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define CHIPID_M 0
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_RESET_DELAY 10 /* usec */
+
+struct ks8995_pdata {
+ /* not yet implemented */
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ks8995_pdata *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_READ;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_WRITE;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off > KS8995_REGS_SIZE))
+ return 0;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off >= KS8995_REGS_SIZE))
+ return -EFBIG;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+ .attr = {
+ .name = "registers",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = KS8995_REGS_SIZE,
+ .read = ks8995_registers_read,
+ .write = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ struct ks8995_pdata *pdata;
+ u8 ids[2];
+ int err;
+
+ /* Chip description */
+ pdata = spi->dev.platform_data;
+
+ ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks) {
+ dev_err(&spi->dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ks->lock);
+ ks->pdata = pdata;
+ ks->spi = spi_dev_get(spi);
+ dev_set_drvdata(&spi->dev, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ goto err_drvdata;
+ }
+
+ err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+ if (err < 0) {
+ dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ switch (ids[0]) {
+ case FAMILY_KS8995:
+ break;
+ default:
+ dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+ err = -ENODEV;
+ goto err_drvdata;
+ }
+
+ err = ks8995_reset(ks);
+ if (err)
+ goto err_drvdata;
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ if (err) {
+ dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+ "Revision:%01x\n", ids[0],
+ get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+ return 0;
+
+err_drvdata:
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks);
+ return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_data *ks8995;
+
+ ks8995 = dev_get_drvdata(&spi->dev);
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks8995);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8995_probe,
+ .remove = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+ printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+ return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+ spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f72..c1c9293c2bb 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
{
spin_lock(&chan_lock);
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
synchronize_rcu();
}
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644
index 00000000000..248a144033c
--- /dev/null
+++ b/drivers/net/team/Kconfig
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+ tristate "Ethernet team driver support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This allows one to create virtual interfaces that teams together
+ multiple ethernet devices.
+
+ Team devices can be added using the "ip" command from the
+ iproute2 package:
+
+ "ip link add link [ address MAC ] [ NAME ] type team"
+
+ To compile this driver as a module, choose M here: the module
+ will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+ tristate "Round-robin mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected in
+ round-robin fashion using packet counter.
+
+ All added ports are setup to have bond's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+ tristate "Active-backup mode support"
+ depends on NET_TEAM
+ ---help---
+ Only one port is active at a time and the rest of ports are used
+ for backup.
+
+ Mac addresses of ports are not modified. Userspace is responsible
+ to do so.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644
index 00000000000..85f2028a87a
--- /dev/null
+++ b/drivers/net/team/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644
index 00000000000..ed2a862b835
--- /dev/null
+++ b/drivers/net/team/team.c
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+ struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+ const unsigned char *dev_addr)
+{
+ struct sockaddr addr;
+
+ memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+ addr.sa_family = ARPHRD_ETHER;
+ return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+ struct team_option *option;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ if (strcmp(option->name, opt_name) == 0)
+ return option;
+ }
+ return NULL;
+}
+
+int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+ struct team_option **dst_opts;
+ int err;
+
+ dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+ GFP_KERNEL);
+ if (!dst_opts)
+ return -ENOMEM;
+ for (i = 0; i < option_count; i++, option++) {
+ if (__team_find_option(team, option->name)) {
+ err = -EEXIST;
+ goto rollback;
+ }
+ dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+ if (!dst_opts[i]) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+ }
+
+ for (i = 0; i < option_count; i++)
+ list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+ kfree(dst_opts);
+ return 0;
+
+rollback:
+ for (i = 0; i < option_count; i++)
+ kfree(dst_opts[i]);
+
+ kfree(dst_opts);
+ return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+
+ for (i = 0; i < option_count; i++, option++) {
+ struct team_option *del_opt;
+
+ del_opt = __team_find_option(team, option->name);
+ if (del_opt) {
+ list_del(&del_opt->list);
+ kfree(del_opt);
+ }
+ }
+}
+
+void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ __team_options_unregister(team, option, option_count);
+ __team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+ void *arg)
+{
+ return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+ void *arg)
+{
+ int err;
+
+ err = option->setter(team, arg);
+ if (err)
+ return err;
+
+ __team_options_change_check(team, option);
+ return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+ struct team_mode *mode;
+
+ list_for_each_entry(mode, &mode_list, list) {
+ if (strcmp(mode->kind, kind) == 0)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+ while (*name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ name++;
+ }
+ return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+ int err = 0;
+
+ if (!is_good_mode_name(mode->kind) ||
+ mode->priv_size > TEAM_MODE_PRIV_SIZE)
+ return -EINVAL;
+ spin_lock(&mode_list_lock);
+ if (__find_mode(mode->kind)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+ list_add_tail(&mode->list, &mode_list);
+unlock:
+ spin_unlock(&mode_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+ spin_lock(&mode_list_lock);
+ list_del_init(&mode->list);
+ spin_unlock(&mode_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+ struct team_mode *mode;
+
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ if (!mode) {
+ spin_unlock(&mode_list_lock);
+ request_module("team-mode-%s", kind);
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ }
+ if (mode)
+ if (!try_module_get(mode->owner))
+ mode = NULL;
+
+ spin_unlock(&mode_list_lock);
+ return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+ module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
+{
+ return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+ /*
+ * To avoid checks in rx/tx skb paths, ensure here that non-null and
+ * correct ops are always set.
+ */
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->transmit)
+ team->ops.transmit = team_dummy_transmit;
+ else
+ team->ops.transmit = team->mode->ops->transmit;
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->receive)
+ team->ops.receive = team_dummy_receive;
+ else
+ team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+ const struct team_mode *new_mode)
+{
+ /* Check if mode was previously set and do cleanup if so */
+ if (team->mode) {
+ void (*exit_op)(struct team *team) = team->ops.exit;
+
+ /* Clear ops area so no callback is called any longer */
+ memset(&team->ops, 0, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ if (exit_op)
+ exit_op(team);
+ team_mode_put(team->mode);
+ team->mode = NULL;
+ /* zero private data area */
+ memset(&team->mode_priv, 0,
+ sizeof(struct team) - offsetof(struct team, mode_priv));
+ }
+
+ if (!new_mode)
+ return 0;
+
+ if (new_mode->ops->init) {
+ int err;
+
+ err = new_mode->ops->init(team);
+ if (err)
+ return err;
+ }
+
+ team->mode = new_mode;
+ memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+ struct team_mode *new_mode;
+ struct net_device *dev = team->dev;
+ int err;
+
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "No ports can be present during mode change\n");
+ return -EBUSY;
+ }
+
+ if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ netdev_err(dev, "Unable to change to the same mode the team is in\n");
+ return -EINVAL;
+ }
+
+ new_mode = team_mode_get(kind);
+ if (!new_mode) {
+ netdev_err(dev, "Mode \"%s\" not found\n", kind);
+ return -EINVAL;
+ }
+
+ err = __team_change_mode(team, new_mode);
+ if (err) {
+ netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+ team_mode_put(new_mode);
+ return err;
+ }
+
+ netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+ return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct team_port *port;
+ struct team *team;
+ rx_handler_result_t res;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ port = team_port_get_rcu(skb->dev);
+ team = port->team;
+
+ res = team->ops.receive(team, port, skb);
+ if (res == RX_HANDLER_ANOTHER) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ pcpu_stats->rx_multicast++;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->dev = team->dev;
+ } else {
+ this_cpu_inc(team->pcpu_stats->rx_dropped);
+ }
+
+ return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+ const struct team_port *port)
+{
+ struct team_port *cur;
+
+ list_for_each_entry(cur, &team->port_list, list)
+ if (cur == port)
+ return true;
+ return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+ struct team_port *port)
+{
+ port->index = team->port_count++;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+ int i;
+ struct team_port *port;
+
+ for (i = rm_index + 1; i < team->port_count; i++) {
+ port = team_get_port_by_index(team, i);
+ hlist_del_rcu(&port->hlist);
+ port->index--;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ }
+}
+
+static void team_port_list_del_port(struct team *team,
+ struct team_port *port)
+{
+ int rm_index = port->index;
+
+ hlist_del_rcu(&port->hlist);
+ list_del_rcu(&port->list);
+ __reconstruct_port_hlist(team, rm_index);
+ team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+ struct team_port *port;
+ u32 vlan_features = TEAM_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ vlan_features = netdev_increment_features(vlan_features,
+ port->dev->vlan_features,
+ TEAM_VLAN_FEATURES);
+
+ if (port->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = port->dev->hard_header_len;
+ }
+
+ team->dev->vlan_features = vlan_features;
+ team->dev->hard_header_len = max_hard_header_len;
+
+ netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+ mutex_lock(&team->lock);
+ __team_compute_features(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+ int err = 0;
+
+ dev_hold(team->dev);
+ port->dev->priv_flags |= IFF_TEAM_PORT;
+ if (team->ops.port_enter) {
+ err = team->ops.port_enter(team, port);
+ if (err) {
+ netdev_err(team->dev, "Device %s failed to enter team mode\n",
+ port->dev->name);
+ goto err_port_enter;
+ }
+ }
+
+ return 0;
+
+err_port_enter:
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+
+ return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+ if (team->ops.port_leave)
+ team->ops.port_leave(team, port);
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+ int err;
+
+ if (port_dev->flags & IFF_LOOPBACK ||
+ port_dev->type != ARPHRD_ETHER) {
+ netdev_err(dev, "Device %s is of an unsupported type\n",
+ portname);
+ return -EINVAL;
+ }
+
+ if (team_port_exists(port_dev)) {
+ netdev_err(dev, "Device %s is already a port "
+ "of a team device\n", portname);
+ return -EBUSY;
+ }
+
+ if (port_dev->flags & IFF_UP) {
+ netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+ portname);
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = port_dev;
+ port->team = team;
+
+ port->orig.mtu = port_dev->mtu;
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
+
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+ err = team_port_enter(team, port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to enter team mode\n",
+ portname);
+ goto err_port_enter;
+ }
+
+ err = dev_open(port_dev);
+ if (err) {
+ netdev_dbg(dev, "Device %s opening failed\n",
+ portname);
+ goto err_dev_open;
+ }
+
+ err = vlan_vids_add_by_dev(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Failed to add vlan ids to device %s\n",
+ portname);
+ goto err_vids_add;
+ }
+
+ err = netdev_set_master(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Device %s failed to set master\n", portname);
+ goto err_set_master;
+ }
+
+ err = netdev_rx_handler_register(port_dev, team_handle_frame,
+ port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to register rx_handler\n",
+ portname);
+ goto err_handler_register;
+ }
+
+ team_port_list_add_port(team, port);
+ team_adjust_ops(team);
+ __team_compute_features(team);
+ __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+ netdev_info(dev, "Port device %s added\n", portname);
+
+ return 0;
+
+err_handler_register:
+ netdev_set_master(port_dev, NULL);
+
+err_set_master:
+ vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+ dev_close(port_dev);
+
+err_dev_open:
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+
+err_port_enter:
+ dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+ kfree(port);
+
+ return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+
+ port = team_port_get_rtnl(port_dev);
+ if (!port || !team_port_find(team, port)) {
+ netdev_err(dev, "Device %s does not act as a port of this team\n",
+ portname);
+ return -ENOENT;
+ }
+
+ __team_port_change_check(port, false);
+ team_port_list_del_port(team, port);
+ team_adjust_ops(team);
+ netdev_rx_handler_unregister(port_dev);
+ netdev_set_master(port_dev, NULL);
+ vlan_vids_del_by_dev(port_dev, dev);
+ dev_close(port_dev);
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+ dev_set_mtu(port_dev, port->orig.mtu);
+ synchronize_rcu();
+ kfree(port);
+ netdev_info(dev, "Port device %s removed\n", portname);
+ __team_compute_features(team);
+
+ return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ *str = team->mode ? team->mode->kind : team_no_mode_kind;
+ return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+ {
+ .name = "mode",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = team_mode_option_get,
+ .setter = team_mode_option_set,
+ },
+};
+
+static int team_init(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ int i;
+ int err;
+
+ team->dev = dev;
+ mutex_init(&team->lock);
+
+ team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ if (!team->pcpu_stats)
+ return -ENOMEM;
+
+ for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&team->port_hlist[i]);
+ INIT_LIST_HEAD(&team->port_list);
+
+ team_adjust_ops(team);
+
+ INIT_LIST_HEAD(&team->option_list);
+ err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+ if (err)
+ goto err_options_register;
+ netif_carrier_off(dev);
+
+ return 0;
+
+err_options_register:
+ free_percpu(team->pcpu_stats);
+
+ return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct team_port *tmp;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry_safe(port, tmp, &team->port_list, list)
+ team_port_del(team, port->dev);
+
+ __team_change_mode(team, NULL); /* cleanup */
+ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ free_percpu(team->pcpu_stats);
+ free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ bool tx_success = false;
+ unsigned int len = skb->len;
+
+ tx_success = team->ops.transmit(team, skb);
+ if (tx_success) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(team->pcpu_stats->tx_dropped);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int inc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (change & IFF_PROMISC) {
+ inc = dev->flags & IFF_PROMISC ? 1 : -1;
+ dev_set_promiscuity(port->dev, inc);
+ }
+ if (change & IFF_ALLMULTI) {
+ inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+ dev_set_allmulti(port->dev, inc);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ dev_uc_sync(port->dev, dev);
+ dev_mc_sync(port->dev, dev);
+ }
+ rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ if (team->ops.port_change_mac)
+ team->ops.port_change_mac(team, port);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = dev_set_mtu(port->dev, new_mtu);
+ if (err) {
+ netdev_err(dev, "Device %s failed to change mtu",
+ port->dev->name);
+ goto unwind;
+ }
+ }
+ mutex_unlock(&team->lock);
+
+ dev->mtu = new_mtu;
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ dev_set_mtu(port->dev, dev->mtu);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(team->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /*
+ * rx_dropped & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_dropped += p->rx_dropped;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = vlan_vid_add(port->dev, vid);
+ if (err)
+ goto unwind;
+ }
+ mutex_unlock(&team->lock);
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_add(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_del(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct team_port *port;
+ struct team *team = netdev_priv(dev);
+ netdev_features_t mask;
+
+ mask = features;
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+ }
+ rcu_read_unlock();
+ return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+ .ndo_init = team_init,
+ .ndo_uninit = team_uninit,
+ .ndo_open = team_open,
+ .ndo_stop = team_close,
+ .ndo_start_xmit = team_xmit,
+ .ndo_change_rx_flags = team_change_rx_flags,
+ .ndo_set_rx_mode = team_set_rx_mode,
+ .ndo_set_mac_address = team_set_mac_address,
+ .ndo_change_mtu = team_change_mtu,
+ .ndo_get_stats64 = team_get_stats64,
+ .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+ .ndo_add_slave = team_add_slave,
+ .ndo_del_slave = team_del_slave,
+ .ndo_fix_features = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &team_netdev_ops;
+ dev->destructor = team_destructor;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_MULTICAST;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+ /*
+ * Indicate we support unicast address filtering. That way core won't
+ * bring us to promisc mode in case a unicast addr is added.
+ * Let this up to underlay drivers.
+ */
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features = NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TEAM_GENL_NAME,
+ .version = TEAM_GENL_VERSION,
+ .maxattr = TEAM_ATTR_MAX,
+ .netnsok = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+ [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
+ [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+ [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_OPTION_NAME] = {
+ .type = NLA_STRING,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
+ [TEAM_ATTR_OPTION_DATA] = {
+ .type = NLA_BINARY,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &team_nl_family, 0, TEAM_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto err_msg_put;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+ nlmsg_free(msg);
+
+ return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ int ifindex;
+ struct net_device *dev;
+ struct team *team;
+
+ if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+ return NULL;
+
+ ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev || dev->netdev_ops != &team_netdev_ops) {
+ if (dev)
+ dev_put(dev);
+ return NULL;
+ }
+
+ team = netdev_priv(dev);
+ mutex_lock(&team->lock);
+ return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+ mutex_unlock(&team->lock);
+ dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+ int (*fill_func)(struct sk_buff *skb,
+ struct genl_info *info,
+ int flags, struct team *team))
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = fill_func(skb, info, NLM_F_ACK, team);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_option *changed_option)
+{
+ struct nlattr *option_list;
+ void *hdr;
+ struct team_option *option;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_OPTIONS_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ if (!option_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ struct nlattr *option_item;
+ long arg;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ goto nla_put_failure;
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+ if (option == changed_option)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+ team_option_get(team, option, &arg);
+ NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+ team_option_get(team, option, &arg);
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+ (char *) arg);
+ break;
+ default:
+ BUG();
+ }
+ nla_nest_end(skb, option_item);
+ }
+
+ nla_nest_end(skb, option_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_options_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err = 0;
+ int i;
+ struct nlattr *nl_option;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = -EINVAL;
+ if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+
+ nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+ struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+ enum team_option_type opt_type;
+ struct team_option *option;
+ char *opt_name;
+ bool opt_found = false;
+
+ if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+ nl_option, team_nl_option_policy);
+ if (err)
+ goto team_put;
+ if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+ !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+ !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+ case NLA_U32:
+ opt_type = TEAM_OPTION_TYPE_U32;
+ break;
+ case NLA_STRING:
+ opt_type = TEAM_OPTION_TYPE_STRING;
+ break;
+ default:
+ goto team_put;
+ }
+
+ opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+ list_for_each_entry(option, &team->option_list, list) {
+ long arg;
+ struct nlattr *opt_data_attr;
+
+ if (option->type != opt_type ||
+ strcmp(option->name, opt_name))
+ continue;
+ opt_found = true;
+ opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+ switch (opt_type) {
+ case TEAM_OPTION_TYPE_U32:
+ arg = nla_get_u32(opt_data_attr);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ arg = (long) nla_data(opt_data_attr);
+ break;
+ default:
+ BUG();
+ }
+ err = team_option_set(team, option, &arg);
+ if (err)
+ goto team_put;
+ }
+ if (!opt_found) {
+ err = -ENOENT;
+ goto team_put;
+ }
+ }
+
+team_put:
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_port *changed_port)
+{
+ struct nlattr *port_list;
+ void *hdr;
+ struct team_port *port;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_PORT_LIST_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ if (!port_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct nlattr *port_item;
+
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nla_put_failure;
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+ if (port == changed_port)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+ if (port->linkup)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+ NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+ nla_nest_end(skb, port_item);
+ }
+
+ nla_nest_end(skb, port_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .doit = team_nl_cmd_noop,
+ .policy = team_nl_policy,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .doit = team_nl_cmd_options_set,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .doit = team_nl_cmd_options_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .doit = team_nl_cmd_port_list_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+ struct team_option *changed_option)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+ changed_option);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(port->team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+ port->team, port);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_init(void)
+{
+ int err;
+
+ err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+ ARRAY_SIZE(team_nl_ops));
+ if (err)
+ return err;
+
+ err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+ if (err)
+ goto err_change_event_grp_reg;
+
+ return 0;
+
+err_change_event_grp_reg:
+ genl_unregister_family(&team_nl_family);
+
+ return err;
+}
+
+static void team_nl_fini(void)
+{
+ genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option)
+{
+ int err;
+
+ err = team_nl_send_event_options_get(team, changed_option);
+ if (err)
+ netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+ int err;
+
+ if (port->linkup == linkup)
+ return;
+
+ port->linkup = linkup;
+ if (linkup) {
+ struct ethtool_cmd ecmd;
+
+ err = __ethtool_get_settings(port->dev, &ecmd);
+ if (!err) {
+ port->speed = ethtool_cmd_speed(&ecmd);
+ port->duplex = ecmd.duplex;
+ goto send_event;
+ }
+ }
+ port->speed = 0;
+ port->duplex = 0;
+
+send_event:
+ err = team_nl_send_event_port_list_get(port);
+ if (err)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+ port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+ struct team *team = port->team;
+
+ mutex_lock(&team->lock);
+ __team_port_change_check(port, linkup);
+ mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct team_port *port;
+
+ port = team_port_get_rtnl(dev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (netif_carrier_ok(dev))
+ team_port_change_check(port, true);
+ case NETDEV_DOWN:
+ team_port_change_check(port, false);
+ case NETDEV_CHANGE:
+ if (netif_running(port->dev))
+ team_port_change_check(port,
+ !!netif_carrier_ok(port->dev));
+ break;
+ case NETDEV_UNREGISTER:
+ team_del_slave(port->team->dev, dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+ team_compute_features(port->team);
+ break;
+ case NETDEV_CHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+ return NOTIFY_BAD;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid to change type of underlaying device */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+ .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&team_notifier_block);
+
+ err = rtnl_link_register(&team_link_ops);
+ if (err)
+ goto err_rtnl_reg;
+
+ err = team_nl_init();
+ if (err)
+ goto err_nl_init;
+
+ return 0;
+
+err_nl_init:
+ rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+ unregister_netdevice_notifier(&team_notifier_block);
+
+ return err;
+}
+
+static void __exit team_module_exit(void)
+{
+ team_nl_fini();
+ rtnl_link_unregister(&team_link_ops);
+ unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644
index 00000000000..f4d960e82e2
--- /dev/null
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+ struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+ return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb) {
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (active_port != port)
+ return RX_HANDLER_EXACT;
+ return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (unlikely(!active_port))
+ goto drop;
+ skb->dev = active_port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+ if (ab_priv(team)->active_port == port)
+ RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+
+ *ifindex = 0;
+ if (ab_priv(team)->active_port)
+ *ifindex = ab_priv(team)->active_port->dev->ifindex;
+ return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+ struct team_port *port;
+
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (port->dev->ifindex == *ifindex) {
+ rcu_assign_pointer(ab_priv(team)->active_port, port);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+ {
+ .name = "activeport",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = ab_active_port_get,
+ .setter = ab_active_port_set,
+ },
+};
+
+int ab_init(struct team *team)
+{
+ return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+ team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+ .init = ab_init,
+ .exit = ab_exit,
+ .receive = ab_receive,
+ .transmit = ab_transmit,
+ .port_leave = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+ .kind = "activebackup",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct ab_priv),
+ .ops = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+ return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+ team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644
index 00000000000..a0e8f806331
--- /dev/null
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+ unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+ return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+
+ if (port->linkup)
+ return port;
+ cur = port;
+ list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+ if (cur->linkup)
+ return cur;
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (cur == port)
+ break;
+ if (cur->linkup)
+ return cur;
+ }
+ return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = rr_priv(team)->sent_packets++ % team->port_count;
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = __get_first_port_up(team, port);
+ if (unlikely(!port))
+ goto drop;
+ skb->dev = port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+ .transmit = rr_transmit,
+ .port_enter = rr_port_enter,
+ .port_change_mac = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+ .kind = "roundrobin",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct rr_priv),
+ .ops = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+ return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+ team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7bea9c65119..93c5d72711b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,7 +123,7 @@ struct tun_struct {
gid_t group;
struct net_device *dev;
- u32 set_features;
+ netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
- u32 features = 0;
+ netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
- strcpy(info->bus_info, "tun");
+ strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case TUN_TAP_DEV:
- strcpy(info->bus_info, "tap");
+ strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e6fed4d4cb7..d0937c4634c 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "08-Nov-2011"
+#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -376,7 +376,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, (size + 1) & 0xfffe);
- if (skb->len == 0)
+ if (skb->len < sizeof(header))
break;
head = (u8 *) skb->data;
@@ -689,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
}
static int
@@ -1148,7 +1152,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return 0;
}
-static struct ethtool_ops ax88178_ethtool_ops = {
+static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -1655,6 +1659,10 @@ static const struct usb_device_id products [] = {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Asus USB Ethernet Adapter
+ USB_DEVICE (0x0b95, 0x7e2b),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
@@ -1670,17 +1678,7 @@ static struct usb_driver asix_driver = {
.supports_autosuspend = 1,
};
-static int __init asix_init(void)
-{
- return usb_register(&asix_driver);
-}
-module_init(asix_init);
-
-static void __exit asix_exit(void)
-{
- usb_deregister(&asix_driver);
-}
-module_exit(asix_exit);
+module_usb_driver(asix_driver);
MODULE_AUTHOR("David Hollis");
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index a68272c9338..182cfb4aeb1 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -949,19 +949,4 @@ static struct usb_driver catc_driver = {
.id_table = catc_id_table,
};
-static int __init catc_init(void)
-{
- int result = usb_register(&catc_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit catc_exit(void)
-{
- usb_deregister(&catc_driver);
-}
-
-module_init(catc_init);
-module_exit(catc_exit);
+module_usb_driver(catc_driver);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index a60d0069cc4..790cbdea739 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
err = usb_submit_urb(req, gfp_flags);
if (unlikely(err)) {
dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
- netdev_free_page(dev, page);
+ put_page(page);
}
return err;
}
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
dev->stats.rx_errors++;
resubmit:
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC);
+ rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
}
static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usbpn_close(dev);
return -ENOMEM;
}
@@ -457,18 +457,7 @@ static struct usb_driver usbpn_driver = {
.id_table = usbpn_ids,
};
-static int __init usbpn_init(void)
-{
- return usb_register(&usbpn_driver);
-}
-
-static void __exit usbpn_exit(void)
-{
- usb_deregister(&usbpn_driver);
-}
-
-module_init(usbpn_init);
-module_exit(usbpn_exit);
+module_usb_driver(usbpn_driver);
MODULE_AUTHOR("Remi Denis-Courmont");
MODULE_DESCRIPTION("USB CDC Phonet host interface");
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 882f53f708d..439690be519 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -369,18 +369,7 @@ static struct usb_driver eem_driver = {
.resume = usbnet_resume,
};
-
-static int __init eem_init(void)
-{
- return usb_register(&eem_driver);
-}
-module_init(eem_init);
-
-static void __exit eem_exit(void)
-{
- usb_deregister(&eem_driver);
-}
-module_exit(eem_exit);
+module_usb_driver(eem_driver);
MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>");
MODULE_DESCRIPTION("USB CDC EEM");
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 99ed6eb4dfa..41a61efc331 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -425,6 +425,9 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
struct cdc_state *info = (void *) &dev->data;
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
+ < sizeof(struct cdc_state)));
+
status = usbnet_generic_cdc_bind(dev, intf);
if (status < 0)
return status;
@@ -615,21 +618,7 @@ static struct usb_driver cdc_driver = {
.supports_autosuspend = 1,
};
-
-static int __init cdc_init(void)
-{
- BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
- < sizeof(struct cdc_state)));
-
- return usb_register(&cdc_driver);
-}
-module_init(cdc_init);
-
-static void __exit cdc_exit(void)
-{
- usb_deregister(&cdc_driver);
-}
-module_exit(cdc_exit);
+module_usb_driver(cdc_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("USB CDC Ethernet devices");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f06fb78383a..3a539a9cac5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -138,7 +138,7 @@ struct cdc_ncm_ctx {
static void cdc_ncm_tx_timeout(unsigned long arg);
static const struct driver_info cdc_ncm_info;
static struct usb_driver cdc_ncm_driver;
-static struct ethtool_ops cdc_ncm_ethtool_ops;
+static const struct ethtool_ops cdc_ncm_ethtool_ops;
static const struct usb_device_id cdc_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int temp;
u8 iface_no;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return -ENODEV;
- memset(ctx, 0, sizeof(*ctx));
-
init_timer(&ctx->tx_timer);
spin_lock_init(&ctx->mtx);
ctx->netdev = dev->net;
@@ -1222,7 +1220,7 @@ static struct usb_driver cdc_ncm_driver = {
.supports_autosuspend = 1,
};
-static struct ethtool_ops cdc_ncm_ethtool_ops = {
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.get_drvinfo = cdc_ncm_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -1232,20 +1230,7 @@ static struct ethtool_ops cdc_ncm_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int __init cdc_ncm_init(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
- return usb_register(&cdc_ncm_driver);
-}
-
-module_init(cdc_ncm_init);
-
-static void __exit cdc_ncm_exit(void)
-{
- usb_deregister(&cdc_ncm_driver);
-}
-
-module_exit(cdc_ncm_exit);
+module_usb_driver(cdc_ncm_driver);
MODULE_AUTHOR("Hans Petter Selasky");
MODULE_DESCRIPTION("USB CDC NCM host driver");
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index fc5f13d47ad..b403d934e4e 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -338,17 +338,7 @@ static struct usb_driver cdc_subset_driver = {
.id_table = products,
};
-static int __init cdc_subset_init(void)
-{
- return usb_register(&cdc_subset_driver);
-}
-module_init(cdc_subset_init);
-
-static void __exit cdc_subset_exit(void)
-{
- usb_deregister(&cdc_subset_driver);
-}
-module_exit(cdc_subset_exit);
+module_usb_driver(cdc_subset_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Simple 'CDC Subset' USB networking links");
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 8969f124c18..0e0531356e6 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -329,17 +329,7 @@ static struct usb_driver cx82310_driver = {
.resume = usbnet_resume,
};
-static int __init cx82310_init(void)
-{
- return usb_register(&cx82310_driver);
-}
-module_init(cx82310_init);
-
-static void __exit cx82310_exit(void)
-{
- usb_deregister(&cx82310_driver);
-}
-module_exit(cx82310_exit);
+module_usb_driver(cx82310_driver);
MODULE_AUTHOR("Ondrej Zary");
MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index fbc0e4def76..b97226318ea 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -672,18 +672,7 @@ static struct usb_driver dm9601_driver = {
.resume = usbnet_resume,
};
-static int __init dm9601_init(void)
-{
- return usb_register(&dm9601_driver);
-}
-
-static void __exit dm9601_exit(void)
-{
- usb_deregister(&dm9601_driver);
-}
-
-module_init(dm9601_init);
-module_exit(dm9601_exit);
+module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index c4cfd1dea88..38266bdae26 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -227,17 +227,7 @@ static struct usb_driver gl620a_driver = {
.resume = usbnet_resume,
};
-static int __init usbnet_init(void)
-{
- return usb_register(&gl620a_driver);
-}
-module_init(usbnet_init);
-
-static void __exit usbnet_exit(void)
-{
- usb_deregister(&gl620a_driver);
-}
-module_exit(usbnet_exit);
+module_usb_driver(gl620a_driver);
MODULE_AUTHOR("Jiun-Jie Huang");
MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables");
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 131ac6c172f..12a22a453ff 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -238,17 +238,7 @@ static struct usb_driver int51x1_driver = {
.resume = usbnet_resume,
};
-static int __init int51x1_init(void)
-{
- return usb_register(&int51x1_driver);
-}
-module_init(int51x1_init);
-
-static void __exit int51x1_exit(void)
-{
- usb_deregister(&int51x1_driver);
-}
-module_exit(int51x1_exit);
+module_usb_driver(int51x1_driver);
MODULE_AUTHOR("Peter Holik");
MODULE_DESCRIPTION("Intellon usb powerline adapter");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 13c1f044b40..e84662db51c 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -420,7 +420,7 @@ static u32 ipheth_ethtool_op_get_link(struct net_device *net)
return netif_carrier_ok(dev->net);
}
-static struct ethtool_ops ops = {
+static const struct ethtool_ops ops = {
.get_link = ipheth_ethtool_op_get_link
};
@@ -543,25 +543,7 @@ static struct usb_driver ipheth_driver = {
.id_table = ipheth_table,
};
-static int __init ipheth_init(void)
-{
- int retval;
-
- retval = usb_register(&ipheth_driver);
- if (retval) {
- err("usb_register failed: %d", retval);
- return retval;
- }
- return 0;
-}
-
-static void __exit ipheth_exit(void)
-{
- usb_deregister(&ipheth_driver);
-}
-
-module_init(ipheth_init);
-module_exit(ipheth_exit);
+module_usb_driver(ipheth_driver);
MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5a6d0f88f43..7562649b3d6 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -375,17 +375,7 @@ static struct usb_driver kalmia_driver = {
.resume = usbnet_resume
};
-static int __init kalmia_init(void)
-{
- return usb_register(&kalmia_driver);
-}
-module_init( kalmia_init);
-
-static void __exit kalmia_exit(void)
-{
- usb_deregister(&kalmia_driver);
-}
-module_exit( kalmia_exit);
+module_usb_driver(kalmia_driver);
MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 582ca2dfa5f..d034d9c4254 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1324,32 +1324,4 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
}
}
-
-/****************************************************************
- * kaweth_init
- ****************************************************************/
-static int __init kaweth_init(void)
-{
- dbg("Driver loading");
- return usb_register(&kaweth_driver);
-}
-
-/****************************************************************
- * kaweth_exit
- ****************************************************************/
-static void __exit kaweth_exit(void)
-{
- usb_deregister(&kaweth_driver);
-}
-
-module_init(kaweth_init);
-module_exit(kaweth_exit);
-
-
-
-
-
-
-
-
-
+module_usb_driver(kaweth_driver);
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 9c26c6390d6..45a981fde43 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -346,17 +346,7 @@ static struct usb_driver lg_vl600_driver = {
.resume = usbnet_resume,
};
-static int __init vl600_init(void)
-{
- return usb_register(&lg_vl600_driver);
-}
-module_init(vl600_init);
-
-static void __exit vl600_exit(void)
-{
- usb_deregister(&lg_vl600_driver);
-}
-module_exit(vl600_exit);
+module_usb_driver(lg_vl600_driver);
MODULE_AUTHOR("Anrzej Zaborowski");
MODULE_DESCRIPTION("LG-VL600 modem's ethernet link");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index db2cb74bf85..a29aa9cf9f6 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -692,17 +692,7 @@ static struct usb_driver mcs7830_driver = {
.reset_resume = mcs7830_reset_resume,
};
-static int __init mcs7830_init(void)
-{
- return usb_register(&mcs7830_driver);
-}
-module_init(mcs7830_init);
-
-static void __exit mcs7830_exit(void)
-{
- usb_deregister(&mcs7830_driver);
-}
-module_exit(mcs7830_exit);
+module_usb_driver(mcs7830_driver);
MODULE_DESCRIPTION("USB to network adapter MCS7830)");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 01db4602a39..83f965cb69e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -589,17 +589,7 @@ static struct usb_driver net1080_driver = {
.resume = usbnet_resume,
};
-static int __init net1080_init(void)
-{
- return usb_register(&net1080_driver);
-}
-module_init(net1080_init);
-
-static void __exit net1080_exit(void)
-{
- usb_deregister(&net1080_driver);
-}
-module_exit(net1080_exit);
+module_usb_driver(net1080_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links");
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 769f5090bda..5d99b8cacd7 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
if (~data & 0x08) {
- if (loopback & 1)
+ if (loopback)
break;
if (mii_mode && (pegasus->features & HAS_HOME_PNA))
set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
data[1] |= 0x10; /* set 100 Mbps */
if (mii_mode)
data[1] = 0;
- data[2] = (loopback & 1) ? 0x09 : 0x01;
+ data[2] = loopback ? 0x09 : 0x01;
memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 217aec8a768..b2b035e2997 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -154,17 +154,7 @@ static struct usb_driver plusb_driver = {
.resume = usbnet_resume,
};
-static int __init plusb_init(void)
-{
- return usb_register(&plusb_driver);
-}
-module_init(plusb_init);
-
-static void __exit plusb_exit(void)
-{
- usb_deregister(&plusb_driver);
-}
-module_exit(plusb_exit);
+module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 255d6a424a6..c8f1b5b3aff 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -635,17 +635,7 @@ static struct usb_driver rndis_driver = {
.resume = usbnet_resume,
};
-static int __init rndis_init(void)
-{
- return usb_register(&rndis_driver);
-}
-module_init(rndis_init);
-
-static void __exit rndis_exit(void)
-{
- usb_deregister(&rndis_driver);
-}
-module_exit(rndis_exit);
+module_usb_driver(rndis_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("USB Host side RNDIS driver");
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index bf8c84d0adf..0710b4ca925 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -978,20 +978,7 @@ static struct usb_driver rtl8150_driver = {
.resume = rtl8150_resume
};
-static int __init usb_rtl8150_init(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return usb_register(&rtl8150_driver);
-}
-
-static void __exit usb_rtl8150_exit(void)
-{
- usb_deregister(&rtl8150_driver);
-}
-
-module_init(usb_rtl8150_init);
-module_exit(usb_rtl8150_exit);
+module_usb_driver(rtl8150_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ed1b4321058..b59cf20c781 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -618,7 +618,7 @@ static u32 sierra_net_get_link(struct net_device *net)
return sierra_net_get_private(dev)->link_up && netif_running(net);
}
-static struct ethtool_ops sierra_net_ethtool_ops = {
+static const struct ethtool_ops sierra_net_ethtool_ops = {
.get_drvinfo = sierra_net_get_drvinfo,
.get_link = sierra_net_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -900,6 +900,9 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
u16 len;
bool need_tail;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ < sizeof(struct cdc_state));
+
dev_dbg(&dev->udev->dev, "%s", __func__);
if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
/* enough head room as is? */
@@ -981,21 +984,7 @@ static struct usb_driver sierra_net_driver = {
.no_dynamic_id = 1,
};
-static int __init sierra_net_init(void)
-{
- BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
- < sizeof(struct cdc_state));
-
- return usb_register(&sierra_net_driver);
-}
-
-static void __exit sierra_net_exit(void)
-{
- usb_deregister(&sierra_net_driver);
-}
-
-module_exit(sierra_net_exit);
-module_init(sierra_net_init);
+module_usb_driver(sierra_net_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index a5b9b12ef26..3b017bbd2a2 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -76,7 +76,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -728,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1237,17 +1238,7 @@ static struct usb_driver smsc75xx_driver = {
.disconnect = usbnet_disconnect,
};
-static int __init smsc75xx_init(void)
-{
- return usb_register(&smsc75xx_driver);
-}
-module_init(smsc75xx_init);
-
-static void __exit smsc75xx_exit(void)
-{
- usb_deregister(&smsc75xx_driver);
-}
-module_exit(smsc75xx_exit);
+module_usb_driver(smsc75xx_driver);
MODULE_AUTHOR("Nancy Lin");
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index eff67678c5a..d45520e6dd4 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -59,7 +59,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
}
/* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
u32 read_buf;
@@ -1297,17 +1298,7 @@ static struct usb_driver smsc95xx_driver = {
.disconnect = usbnet_disconnect,
};
-static int __init smsc95xx_init(void)
-{
- return usb_register(&smsc95xx_driver);
-}
-module_init(smsc95xx_init);
-
-static void __exit smsc95xx_exit(void)
-{
- usb_deregister(&smsc95xx_driver);
-}
-module_exit(smsc95xx_exit);
+module_usb_driver(smsc95xx_driver);
MODULE_AUTHOR("Nancy Lin");
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 1a2234c2051..f701d412708 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -362,17 +362,7 @@ static struct usb_driver zaurus_driver = {
.resume = usbnet_resume,
};
-static int __init zaurus_init(void)
-{
- return usb_register(&zaurus_driver);
-}
-module_init(zaurus_init);
-
-static void __exit zaurus_exit(void)
-{
- usb_deregister(&zaurus_driver);
-}
-module_exit(zaurus_exit);
+module_usb_driver(zaurus_driver);
MODULE_AUTHOR("Pavel Machek, David Brownell");
MODULE_DESCRIPTION("Sharp Zaurus PDA, and compatible products");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ef883e97cee..49f4667e1fa 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,8 +27,8 @@
struct veth_net_stats {
u64 rx_packets;
- u64 tx_packets;
u64 rx_bytes;
+ u64 tx_packets;
u64 tx_bytes;
u64 rx_dropped;
struct u64_stats_sync syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ee8410443c..76fe14efb2b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
struct u64_stats_sync syncp;
@@ -155,6 +156,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
*len -= size;
}
+/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int len)
{
@@ -357,7 +359,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
struct skb_vnet_hdr *hdr;
int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
@@ -439,7 +441,13 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
@@ -501,7 +509,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +528,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
@@ -699,6 +707,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
}
tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +728,10 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
virtnet_napi_enable(vi);
return 0;
}
@@ -772,6 +785,8 @@ static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
@@ -853,7 +868,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
kfree(buf);
}
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -863,9 +878,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+ return 0;
}
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -875,6 +891,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+ return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +906,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
}
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
};
@@ -1082,7 +1113,6 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
@@ -1121,9 +1151,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
-
unregister_netdev(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d96bfb1ac20..de7fc345148 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
}
-static void
+static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
@@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = i % adapter->num_rx_queues;
+ rssConf->indTable[i] = ethtool_rxfh_indir_default(
+ i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc8bfb..587a218b234 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
}
#ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ return rssConf->indTableSize;
+}
+
static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
- struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+ unsigned int n = rssConf->indTableSize;
- p->size = rssConf->indTableSize;
while (n--)
- p->ring_index[n] = rssConf->indTable[n];
+ p[n] = rssConf->indTable[n];
return 0;
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
- const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- if (p->size != rssConf->indTableSize)
- return -EINVAL;
- for (i = 0; i < rssConf->indTableSize; i++) {
- /*
- * Return with error code if any of the queue indices
- * is out of range
- */
- if (p->ring_index[i] < 0 ||
- p->ring_index[i] >= adapter->num_rx_queues)
- return -EINVAL;
- }
-
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = p->ring_index[i];
+ rssConf->indTable[i] = p[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -619,7 +608,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
}
#endif
-static struct ethtool_ops vmxnet3_ethtool_ops = {
+static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
@@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
+ .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh_indir = vmxnet3_get_rss_indir,
.set_rxfh_indir = vmxnet3_set_rss_indir,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index b18eac1dcca..ed54797db19 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -401,7 +401,7 @@ void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 783168cce07..d43f4efd3e0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -155,7 +155,7 @@ static int emancipate( struct net_device * );
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
-static int skip_pci_probe __initdata = 0;
+static bool skip_pci_probe __initdata = false;
static int scandone __initdata = 0;
static int num __initdata = 0;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0b4fd05e150..4f774847898 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -362,7 +362,7 @@ static int io=0x238;
static int txdma=1;
static int rxdma=3;
static int irq=5;
-static int slow=0;
+static bool slow=false;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c421a614185..c806d455021 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -75,7 +75,7 @@
* device is up and running or shutdown (through ifconfig up /
* down). Bus-generic only.
*
- * - control ops: control.c - implements various commmands for
+ * - control ops: control.c - implements various commands for
* controlling the device. bus-generic only.
*
* - device model glue: driver.c - implements helpers for the
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b9ecb20dee..f20886ade1c 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
- bool try_head = 0;
+ bool try_head = false;
BUG_ON(i2400m->tx_msg != NULL);
/*
* In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
- try_head = 1;
+ try_head = true;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
- bool try_head = 0;
+ bool try_head = false;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -771,7 +771,7 @@ try_new:
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
- try_head = 1;
+ try_head = true;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index ac357acfb3e..99ef81b3d5a 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -177,7 +177,6 @@ retry:
static
int i2400mu_txd(void *_i2400mu)
{
- int result = 0;
struct i2400mu *i2400mu = _i2400mu;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
/* Yeah, we ignore errors ... not much we can do */
i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
- if (result < 0)
- break;
}
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
- return result;
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+ return 0;
}
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0a304b060b6..98db76196b5 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
+obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
@@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
-obj-$(CONFIG_BRCMUMAC) += brcm80211/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_BRCMFMAC) += brcm80211/
+obj-$(CONFIG_BRCMSMAC) += brcm80211/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ac1176a4f46..1c008c61b95 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
+ emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d1214696a35..d716b748e57 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -11,3 +11,4 @@ ath-objs := main.o \
key.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 0f9ee46cfc9..efc01110dc3 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@ struct ath_common {
struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
@@ -214,6 +215,10 @@ do { \
* @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -239,6 +244,8 @@ enum ATH_DEBUG {
ATH_DBG_BTCOEX = 0x00002000,
ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000,
+ ATH_DBG_MCI = 0x00010000,
+ ATH_DBG_DFS = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};
@@ -248,7 +255,7 @@ enum ATH_DEBUG {
#define ath_dbg(common, dbg_mask, fmt, ...) \
do { \
- if ((common)->debug_mask & dbg_mask) \
+ if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
_ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
} while (0)
@@ -258,10 +265,13 @@ do { \
#else
static inline __attribute__ ((format (printf, 3, 4)))
-void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
const char *fmt, ...)
{
}
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+ _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
int __ret_warn_once = !!(foo); \
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e70181..ee7ea572b06 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
- __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6be70..bf674161a21 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
* or reducing sensitivity as necessary.
*
* The parameters are:
+ *
* - "noise immunity"
+ *
* - "spur immunity"
+ *
* - "firstep level"
+ *
* - "OFDM weak signal detection"
+ *
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
+ *
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
*/
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
- * on the chip revision).
+ * on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- * detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
on ? "on" : "off");
}
-
/**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
}
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- * the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
*/
}
-
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
}
}
-
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
return listen;
}
-
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
return 1;
}
-
/**
* ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
as->listen_time = 0;
}
-
/**
* ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
}
}
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
tasklet_schedule(&ah->ani_tasklet);
}
-
/**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
}
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
}
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
#ifdef CONFIG_ATH5K_DEBUG
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c83c6..21aa355460b 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- * algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- * maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- * minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- * amount of OFDM and CCK frame errors (default).
+ * algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
/**
* struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9a425..c2b2518c2ec 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
-
#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -262,16 +261,34 @@
#define AR5K_AGC_SETTLING_TURBO 37
-/* GENERIC CHIPSET DEFINITIONS */
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
enum ath5k_version {
AR5K_AR5210 = 0,
AR5K_AR5211 = 1,
AR5K_AR5212 = 2,
};
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
enum ath5k_radio {
AR5K_RF5110 = 0,
AR5K_RF5111 = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
/* TODO add support to mac80211 for vendor-specific rates and modes */
-/*
+/**
+ * DOC: Atheros XR
+ *
* Some of this information is based on Documentation from:
*
* http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
*
* Please note that can you either use XR or TURBO but you cannot use both,
* they are exclusive.
*
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
*/
-#define MODULATION_XR 0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
* the mode is turned off.
+ *
* - Dynamic: is the intelligent version, the network decides itself if it
* is ok to use turbo. As soon as traffic is detected on adjacent channels
* (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
*
* http://www.pcworld.com/article/id,113428-page,1/article.html
*
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
*
* - Bursting: allows multiple frames to be sent at once, rather than pausing
* after each frame. Bursting is a standards-compliant feature that can be
* used with any Access Point.
+ *
* - Fast frames: increases the amount of information that can be sent per
* frame, also resulting in a reduction of transmission overhead. It is a
* proprietary feature that needs to be supported by the Access Point.
+ *
* - Compression: data frames are compressed in real time using a Lempel Ziv
* algorithm. This is done transparently. Once this feature is enabled,
* compression and decompression takes place inside the chipset, without
* putting additional load on the host CPU.
*
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
*/
-#define MODULATION_TURBO 0x00000080
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 3
};
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
enum ath5k_ant_mode {
- AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
- AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
- AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
- AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
- AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
- AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
- AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
+ AR5K_ANTMODE_DEFAULT = 0,
+ AR5K_ANTMODE_FIXED_A = 1,
+ AR5K_ANTMODE_FIXED_B = 2,
+ AR5K_ANTMODE_SINGLE_AP = 3,
+ AR5K_ANTMODE_SECTOR_AP = 4,
+ AR5K_ANTMODE_SECTOR_STA = 5,
+ AR5K_ANTMODE_DEBUG = 6,
AR5K_ANTMODE_MAX,
};
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
enum ath5k_bw_mode {
- AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
- AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
- AR5K_BWMODE_10MHZ = 2, /* Half rate */
- AR5K_BWMODE_40MHZ = 3 /* Turbo */
+ AR5K_BWMODE_DEFAULT = 0,
+ AR5K_BWMODE_5MHZ = 1,
+ AR5K_BWMODE_10MHZ = 2,
+ AR5K_BWMODE_40MHZ = 3
};
+
+
/****************\
TX DEFINITIONS
\****************/
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
*/
struct ath5k_tx_status {
u16 ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
* enum ath5k_tx_queue - Queue types used to classify tx queues.
* @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
* @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
* @AR5K_TX_QUEUE_BEACON: The beacon queue
* @AR5K_TX_QUEUE_CAB: The after-beacon queue
* @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
enum ath5k_tx_queue {
AR5K_TX_QUEUE_INACTIVE = 0,
AR5K_TX_QUEUE_DATA,
- AR5K_TX_QUEUE_XR_DATA,
AR5K_TX_QUEUE_BEACON,
AR5K_TX_QUEUE_CAB,
AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
#define AR5K_NUM_TX_QUEUES 10
#define AR5K_NUM_TX_QUEUES_NOQCU 2
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
* These are the 4 Access Categories as defined in
* WME spec. 0 is the lowest priority and 4 is the
* highest. Normal data that hasn't been classified
* goes to the Best Effort AC.
*/
enum ath5k_tx_queue_subtype {
- AR5K_WME_AC_BK = 0, /*Background traffic*/
- AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
- AR5K_WME_AC_VI, /*Video traffic*/
- AR5K_WME_AC_VO, /*Voice traffic*/
+ AR5K_WME_AC_BK = 0,
+ AR5K_WME_AC_BE,
+ AR5K_WME_AC_VI,
+ AR5K_WME_AC_VO,
};
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
*/
enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
- AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
- AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
- AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
- AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
- AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
- AR5K_TX_QUEUE_ID_UAPSD = 8,
- AR5K_TX_QUEUE_ID_XR_DATA = 9,
+ AR5K_TX_QUEUE_ID_DATA_MIN = 0,
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3,
+ AR5K_TX_QUEUE_ID_UAPSD = 7,
+ AR5K_TX_QUEUE_ID_CAB = 8,
+ AR5K_TX_QUEUE_ID_BEACON = 9,
};
/*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
-/*
- * Data transmit queue state. One of these exists for each
- * hardware transmit queue. Packets sent to us from above
- * are assigned to queues based on their priority. Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority. Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
*/
struct ath5k_txq {
- unsigned int qnum; /* hardware q number */
- u32 *link; /* link ptr in last TX desc */
- struct list_head q; /* transmit queue */
- spinlock_t lock; /* lock on q and link */
+ unsigned int qnum;
+ u32 *link;
+ struct list_head q;
+ spinlock_t lock;
bool setup;
- int txq_len; /* number of queued buffers */
- int txq_max; /* max allowed num of queued buffers */
+ int txq_len;
+ int txq_max;
bool txq_poll_mark;
- unsigned int txq_stuck; /* informational counter */
+ unsigned int txq_stuck;
};
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
*/
struct ath5k_txq_info {
enum ath5k_tx_queue tqi_type;
enum ath5k_tx_queue_subtype tqi_subtype;
- u16 tqi_flags; /* Tx queue flags (see above) */
- u8 tqi_aifs; /* Arbitrated Interframe Space */
- u16 tqi_cw_min; /* Minimum Contention Window */
- u16 tqi_cw_max; /* Maximum Contention Window */
- u32 tqi_cbr_period; /* Constant bit rate period */
+ u16 tqi_flags;
+ u8 tqi_aifs;
+ u16 tqi_cw_min;
+ u16 tqi_cw_max;
+ u32 tqi_cbr_period;
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Time queue waits after an event */
+ u32 tqi_ready_time;
};
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
- AR5K_DMASIZE_4B = 0,
- AR5K_DMASIZE_8B,
- AR5K_DMASIZE_16B,
- AR5K_DMASIZE_32B,
- AR5K_DMASIZE_64B,
- AR5K_DMASIZE_128B,
- AR5K_DMASIZE_256B,
- AR5K_DMASIZE_512B
-};
/****************\
RX DEFINITIONS
\****************/
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
*/
struct ath5k_rx_status {
u16 rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
+
/*******************************\
GAIN OPTIMIZATION DEFINITIONS
\*******************************/
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
enum ath5k_rfgain {
AR5K_RFGAIN_INACTIVE = 0,
AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
AR5K_RFGAIN_NEED_CHANGE,
};
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
struct ath5k_gain {
u8 g_step_idx;
u8 g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
u8 g_state;
};
+
+
/********************\
COMMON DEFINITIONS
\********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
* TODO: Clean up
*/
struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
u16 a2_athchan;
};
+/**
+ * enum ath5k_dmasize - DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+ AR5K_DMASIZE_4B = 0,
+ AR5K_DMASIZE_8B,
+ AR5K_DMASIZE_16B,
+ AR5K_DMASIZE_32B,
+ AR5K_DMASIZE_64B,
+ AR5K_DMASIZE_128B,
+ AR5K_DMASIZE_256B,
+ AR5K_DMASIZE_512B
+};
+
+
/******************\
RATE DEFINITIONS
\******************/
/**
+ * DOC: Rate codes
+ *
* Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
* and settings.
*
- * This is the hardware rate map we are aware of:
- *
- * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
- * rate_kbps 3000 1000 ? ? ? 2000 500 48000
- *
- * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
- * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
+ * This is the hardware rate map we are aware of (html unfriendly):
*
- * rate_code 17 18 19 20 21 22 23 24
- * rate_kbps ? ? ? ? ? ? ? 11000
+ * Rate code Rate (Kbps)
+ * --------- -----------
+ * 0x01 3000 (XR)
+ * 0x02 1000 (XR)
+ * 0x03 250 (XR)
+ * 0x04 - 05 -Reserved-
+ * 0x06 2000 (XR)
+ * 0x07 500 (XR)
+ * 0x08 48000 (OFDM)
+ * 0x09 24000 (OFDM)
+ * 0x0A 12000 (OFDM)
+ * 0x0B 6000 (OFDM)
+ * 0x0C 54000 (OFDM)
+ * 0x0D 36000 (OFDM)
+ * 0x0E 18000 (OFDM)
+ * 0x0F 9000 (OFDM)
+ * 0x10 - 17 -Reserved-
+ * 0x18 11000L (CCK)
+ * 0x19 5500L (CCK)
+ * 0x1A 2000L (CCK)
+ * 0x1B 1000L (CCK)
+ * 0x1C 11000S (CCK)
+ * 0x1D 5500S (CCK)
+ * 0x1E 2000S (CCK)
+ * 0x1F -Reserved-
*
- * rate_code 25 26 27 28 29 30 31 32
- * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
*
* AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
* (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
* We handle this in ath5k_setup_bands().
*/
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
#define ATH5K_RATE_CODE_36M 0x0D
#define ATH5K_RATE_CODE_48M 0x08
#define ATH5K_RATE_CODE_54M 0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K 0x07
-#define ATH5K_RATE_CODE_XR_1M 0x02
-#define ATH5K_RATE_CODE_XR_2M 0x06
-#define ATH5K_RATE_CODE_XR_3M 0x01
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
#define AR5K_SET_SHORT_PREAMBLE 0x04
/*
@@ -747,7 +914,7 @@ struct ath5k_athchan_2ghz {
*/
#define AR5K_KEYCACHE_SIZE 8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
/***********************\
HW RELATED DEFINITIONS
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
/**
* enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ * not always fatal, on some chips we can continue operation
+ * without resetting the card, that's why %AR5K_INT_FATAL is not
+ * common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a
+ * descriptor's LinkPtr is NULL. For more details, refer to:
+ * "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ * increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
*
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- * AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- * LinkPtr is NULL. For more details, refer to:
- * http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- * Note that Rx overrun is not always fatal, on some chips we can continue
- * operation without resetting the card, that's why int_fatal is not
- * common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- * AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- * We currently do increments on interrupt by
- * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- * one of the PHY error counters reached the maximum value and should be
- * read and cleared.
+ * one of the PHY error counters reached the maximum value and
+ * should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- * beacon that must be handled in software. The alternative is if you
- * have VEOL support, in that case you let the hardware deal with things.
+ * beacon that must be handled in software. The alternative is if
+ * you have VEOL support, in that case you let the hardware deal
+ * with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- * beacons from the AP have associated with, we should probably try to
- * reassociate. When in IBSS mode this might mean we have not received
- * any beacons from any local stations. Note that every station in an
- * IBSS schedules to send beacons at the Target Beacon Transmission Time
- * (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- * until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- * errors. These types of errors we can enable seem to be of type
- * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ * beacons from the AP have associated with, we should probably
+ * try to reassociate. When in IBSS mode this might mean we have
+ * not received any beacons from any local stations. Note that
+ * every station in an IBSS schedules to send beacons at the
+ * Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ * our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ * nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ * errors. Indicates we need to reset the card.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- * bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ * bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
AR5K_INT_GPIO = 0x01000000,
AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
- AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
- AR5K_INT_QCBRORN = 0x10000000, /* Non common */
- AR5K_INT_QCBRURN = 0x20000000, /* Non common */
- AR5K_INT_QTRIG = 0x40000000, /* Non common */
+ AR5K_INT_QCBRORN = 0x08000000, /* Non common */
+ AR5K_INT_QCBRURN = 0x10000000, /* Non common */
+ AR5K_INT_QTRIG = 0x20000000, /* Non common */
AR5K_INT_GLOBAL = 0x80000000,
AR5K_INT_TX_ALL = AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
| AR5K_INT_TXEOL
| AR5K_INT_TXURN,
@@ -891,15 +1074,32 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
- AR5K_CALIBRATION_ANI = 0x04,
+ AR5K_CALIBRATION_NF = 0x04,
+ AR5K_CALIBRATION_ANI = 0x08,
};
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
*/
enum ath5k_power_mode {
AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
} cap_queues;
bool cap_has_phyerr_counters;
+ bool cap_has_mrr_support;
+ bool cap_needs_2GHz_ovr;
};
/* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
dma_addr_t desc_daddr; /* DMA (physical) address */
size_t desc_len; /* size of TX/RX descriptors */
- DECLARE_BITMAP(status, 6);
+ DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
-#define ATH_STAT_PROMISC 2
-#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
-#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
+#define ATH_STAT_PROMISC 1
+#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
+#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
struct ieee80211_channel *curchan; /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
led_on; /* pin setting for LED on */
struct work_struct reset_work; /* deferred chip reset */
+ struct work_struct calib_work; /* deferred phy calibration */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
struct ath5k_rfkill rf_kill;
- struct tasklet_struct calib; /* calibration tasklet */
-
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct list_head bcbuf; /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
- bool ah_calibration;
+ bool ah_iq_cal_needed;
bool ah_single_chip;
enum ath5k_version ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
u32 ah_txq_imr_cbrurn;
u32 ah_txq_imr_qtrig;
u32 ah_txq_imr_nofrm;
- u32 ah_txq_isr;
+
+ u32 ah_txq_isr_txok_all;
+ u32 ah_txq_isr_txurn;
+ u32 ah_txq_isr_qcborn;
+ u32 ah_txq_isr_qcburn;
+ u32 ah_txq_isr_qtrig;
+
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
size_t ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_short;
unsigned long ah_cal_next_ani;
- unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+ u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
/* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd2c26..d7114c75fe9 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
#include "debug.h"
/**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
}
- if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
- __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
- __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
- }
-
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
}
/**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d049200..d366dadcf86 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -68,18 +68,23 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_all_channels;
+static bool modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
- /* XR missing */
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ret)
goto err_unmap;
- memset(mrr_rate, 0, sizeof(mrr_rate));
- memset(mrr_tries, 0, sizeof(mrr_tries));
- for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
- if (!rate)
- break;
+ /* Set up MRR descriptor */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+ for (i = 0; i < 3; i++) {
+ rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+ if (!rate)
+ break;
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
- }
+ mrr_rate[i] = rate->hw_value;
+ mrr_tries[i] = info->control.rates[i + 1].count;
+ }
- ath5k_hw_setup_mrr_tx_desc(ah, ds,
- mrr_rate[0], mrr_tries[0],
- mrr_rate[1], mrr_tries[1],
- mrr_rate[2], mrr_tries[2]);
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
+ }
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
struct ath5k_hw *ah = (void *)data;
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
- if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+ if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
- ath5k_hw_init_beacon(ah, nexttbtt, intval);
+ ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
- !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
- /* run ANI only when full calibration is not active */
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run ANI only when calibration is not active */
+
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
- } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
- ah->ah_cal_next_full = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
- tasklet_schedule(&ah->calib);
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run calibration only when another calibration
+ * is not running.
+ *
+ * Note: This is for both full/short calibration,
+ * if it's time for a full one, ath5k_calibrate_work will deal
+ * with it. */
+
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+ ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
enum ath5k_int status;
unsigned int counter = 1000;
+
+ /*
+ * If hw is not ready (or detached) and we get an
+ * interrupt, or if we have no interrupts pending
+ * (that means it's not for us) skip it.
+ *
+ * NOTE: Group 0/1 PCI interface registers are not
+ * supported on WiSOCs, so we can't check for pending
+ * interrupts (ISR belongs to another register group
+ * so we are ok).
+ */
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
- ((ath5k_get_bus_type(ah) != ATH_AHB) &&
- !ath5k_hw_is_intr_pending(ah))))
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
+ /** Main loop **/
do {
- ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+ ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
+
+ /*
+ * Fatal hw error -> Log and reset
+ *
+ * Fatal errors are unrecoverable so we have to
+ * reset the card. These errors include bus and
+ * dma errors.
+ */
if (unlikely(status & AR5K_INT_FATAL)) {
- /*
- * Fatal errors are unrecoverable.
- * Typically these are caused by DMA errors.
- */
+
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+ /*
+ * RX Overrun -> Count and reset if needed
+ *
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ */
} else if (unlikely(status & AR5K_INT_RXORN)) {
+
/*
- * Receive buffers are full. Either the bus is busy or
- * the CPU is not fast enough to process all received
- * frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
- * We don't know exactly which versions need a reset -
+ * We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
+
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
+
} else {
+
+ /* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
- if (status & AR5K_INT_RXEOL) {
- /*
- * NB: the hardware should re-read the link when
- * RXE bit is written, but it doesn't work at
- * least on older hardware revs.
- */
+ /*
+ * No more RX descriptors -> Just count
+ *
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work at
+ * least on older hardware revs.
+ */
+ if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
- }
- if (status & AR5K_INT_TXURN) {
- /* bump tx trigger level */
+
+
+ /* TX Underrun -> Bump tx trigger level */
+ if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
- }
+
+ /* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
- if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
- | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+ /* TX -> Schedule tx tasklet */
+ if (status & (AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
- if (status & AR5K_INT_BMISS) {
- /* TODO */
- }
+
+ /* Missed beacon -> TODO
+ if (status & AR5K_INT_BMISS)
+ */
+
+ /* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
+
+ /* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
+ /*
+ * Until we handle rx/tx interrupts mask them on IMR
+ *
+ * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+ * and unset after we 've handled the interrupts.
+ */
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
+ /* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
* for temperature/environment changes.
*/
static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ calib_work);
+
+ /* Should we run a full calibration ? */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "running full calibration\n");
+
+ if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+ /*
+ * Rfgain is out of bounds, reset the chip
+ * to load new gain values.
+ */
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "got new rfgain, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+
+ /* TODO: On full calibration we should stop TX here,
+ * so that it doesn't interfere (mostly due to gain_f
+ * calibration that messes with tx packets -see phy.c).
+ *
+ * NOTE: Stopping the queues from above is not enough
+ * to stop TX but saves us from disconecting (at least
+ * we don't lose packets). */
+ ieee80211_stop_queues(ah->hw);
+ } else
+ ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
- /* Only full calibration for now */
- ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
- if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
- /*
- * Rfgain is out of bounds, reset the chip
- * to load new gain values.
- */
- ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ieee80211_queue_work(ah->hw, &ah->reset_work);
- }
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
- /* Noise floor calibration interrupts rx/tx path while I/Q calibration
- * doesn't.
- * TODO: We should stop TX here, so that it doesn't interfere.
- * Note that stopping the queues is not enough to stop TX! */
- if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
- ah->ah_cal_next_nf = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
- ath5k_hw_update_noise_floor(ah);
- }
-
- ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ /* Clear calibration flags */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+ ieee80211_wake_queues(ah->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_irq;
- /* set up multi-rate retry capabilities */
- if (ah->ah_version == AR5K_AR5212) {
+ /* Set up multi-rate retry capabilities */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.channel;
- ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
- AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+ ah->imask = AR5K_INT_RXOK
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXEOL
+ | AR5K_INT_FATAL
+ | AR5K_INT_GLOBAL
+ | AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
- ath5k_rfkill_hw_start(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
- tasklet_kill(&ah->calib);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ah->tx_complete_work);
- ath5k_rfkill_hw_stop(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_stop(ah);
}
/*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ath5k_ani_init(ah, ani_mode);
- ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
- ah->ah_cal_next_ani = jiffies;
- ah->ah_cal_next_nf = jiffies;
+ /*
+ * Set calibration intervals
+ *
+ * Note: We don't need to run calibration imediately
+ * since some initial calibration is done on reset
+ * even for fast channel switching. Also on scanning
+ * this will get set again and again and it won't get
+ * executed unless we connect somewhere and spend some
+ * time on the channel (that's what calibration needs
+ * anyway to be accurate).
+ */
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
/*
- * Check if the MAC has multi-rate retry support.
- * We do this by trying to setup a fake extended
- * descriptor. MACs that don't have support will
- * return false w/o doing anything. MACs that do
- * support it will return true w/o doing anything.
- */
- ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
- if (ret < 0)
- goto err;
- if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, ah->status);
-
- /*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
* on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba96702..994169ad39c 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
- if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
- if (AR5K_EEPROM_HDR_11G(ee_header) &&
- ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G, caps->cap_mode);
+ /* Override 2GHz modes on SoCs that need it
+ * NOTE: cap_needs_2GHz_ovr gets set from
+ * ath_ahb_probe */
+ if (!caps->cap_needs_2GHz_ovr) {
+ if (AR5K_EEPROM_HDR_11B(ee_header))
+ __set_bit(AR5K_MODE_11B,
+ caps->cap_mode);
+
+ if (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)
+ __set_bit(AR5K_MODE_11G,
+ caps->cap_mode);
+ }
}
}
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
- /* newer hardware has PHY error counters */
+ /* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
+ /* MACs since AR5212 have MRR support */
+ if (ah->ah_version == AR5K_AR5212)
+ caps->cap_has_mrr_support = true;
+ else
+ caps->cap_has_mrr_support = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda8222..f8bfa3ac2af 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
#include "debug.h"
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
/************************\
* TX Control descriptors *
\************************/
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, int padsize,
- enum ath5k_pkt_type type,
- unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
- unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
- struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
- int padsize,
- enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
- unsigned int tx_tries0, unsigned int key_index,
- unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate,
- unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return 0;
}
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
*/
int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u_int tx_rate1, u_int tx_tries1,
+ u_int tx_rate2, u_int tx_tries2,
+ u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* TX Status descriptors *
\***********************/
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
* RX Descriptors *
\****************/
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
*/
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc,
- struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Attach *
\********/
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
*/
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b548f..8d6c01a49ea 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
* RX/TX descriptor structures
*/
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
- u32 rx_control_0; /* RX control word 0 */
- u32 rx_control_1; /* RX control word 1 */
+ u32 rx_control_0;
+ u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
- u32 rx_status_0; /* RX status word 0 */
- u32 rx_status_1; /* RX status word 1 */
+ u32 rx_status_0;
+ u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
/**
* enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
- AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
- AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
- AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
- AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
- AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
- AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
- /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0,
+ AR5K_RX_PHY_ERROR_TIMING = 1,
+ AR5K_RX_PHY_ERROR_PARITY = 2,
+ AR5K_RX_PHY_ERROR_RATE = 3,
+ AR5K_RX_PHY_ERROR_LENGTH = 4,
+ AR5K_RX_PHY_ERROR_RADAR = 5,
+ AR5K_RX_PHY_ERROR_SERVICE = 6,
+ AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_0;
+ u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
- u32 tx_control_2; /* TX control word 2 */
- u32 tx_control_3; /* TX control word 3 */
+ u32 tx_control_0;
+ u32 tx_control_1;
+ u32 tx_control_2;
+ u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
- u32 tx_status_0; /* TX status word 0 */
- u32 tx_status_1; /* TX status word 1 */
+ u32 tx_status_0;
+ u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
* This is read and written to by the hardware
*/
struct ath5k_desc {
- u32 ds_link; /* physical address of the next descriptor */
- u32 ds_data; /* physical address of data buffer (skb) */
+ u32 ds_link;
+ u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c7f4b..5cc9aa81469 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
* DMA and interrupt masking functions *
\*************************************/
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
*/
#include "ath5k.h"
@@ -42,22 +39,22 @@
\*********/
/**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
\**********/
/**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
- *
*/
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
*
* XXX: Is TXDP read and clear ?
*/
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
+ * @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
}
/**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
*/
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
@@ -498,21 +494,20 @@ done:
\*******************/
/**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
*/
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
- u32 data;
+ u32 data = 0;
/*
- * Read interrupt status from the Interrupt Status register
- * on 5210
+ * Read interrupt status from Primary Interrupt
+ * Register.
+ *
+ * Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
- data = ath5k_hw_reg_read(ah, AR5K_ISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ u32 isr = 0;
+ isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+ if (unlikely(isr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = isr;
return -ENODEV;
}
- } else {
+
/*
- * Read interrupt status from Interrupt
- * Status Register shadow copy (Read And Clear)
- *
- * Note: PISR/SISR Not available on 5210
+ * Filter out the non-common bits from the interrupt
+ * status.
*/
- data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+ /* Hanlde INT_FATAL */
+ if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+ | AR5K_ISR_DPERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*
+ * XXX: BMISS interrupts may occur after association.
+ * I found this on 5210 code but it needs testing. If this is
+ * true we should disable them before assoc and re-enable them
+ * after a successful assoc + some jiffies.
+ interrupt_mask &= ~AR5K_INT_BMISS;
+ */
+
+ data = isr;
+ } else {
+ u32 pisr = 0;
+ u32 pisr_clear = 0;
+ u32 sisr0 = 0;
+ u32 sisr1 = 0;
+ u32 sisr2 = 0;
+ u32 sisr3 = 0;
+ u32 sisr4 = 0;
+
+ /* Read PISR and SISRs... */
+ pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+ if (unlikely(pisr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = pisr;
return -ENODEV;
}
- }
- /*
- * Get abstract interrupt mask (driver-compatible)
- */
- *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+ sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+ sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+ sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+ sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+ sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
- if (ah->ah_version != AR5K_AR5210) {
- u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+ /*
+ * PISR holds the logical OR of interrupt bits
+ * from SISR registers:
+ *
+ * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
+ * per-queue bits on SISR0
+ *
+ * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+ * per-queue bits on SISR1
+ *
+ * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+ *
+ * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+ *
+ * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+ * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+ * (and TSFOOR ?) bits on SISR2
+ *
+ * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+ * QCBRURN per-queue bits on SISR3
+ * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+ *
+ * If we clean these bits on PISR we 'll also clear all
+ * related bits from SISRs, e.g. if we write the TXOK bit on
+ * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+ * interrupt got fired for another queue while we were reading
+ * the interrupt registers and we write back the TXOK bit on
+ * PISR we 'll lose it. So make sure that we don't write back
+ * on PISR any bits that come from SISRs. Clearing them from
+ * SISRs will also clear PISR so no need to worry here.
+ */
- /*HIU = Host Interface Unit (PCI etc)*/
- if (unlikely(data & (AR5K_ISR_HIUERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
- /*Beacon Not Ready*/
- if (unlikely(data & (AR5K_ISR_BNR)))
- *interrupt_mask |= AR5K_INT_BNR;
+ /*
+ * Write to clear them...
+ * Note: This means that each bit we write back
+ * to the registers will get cleared, leaving the
+ * rest unaffected. So this won't affect new interrupts
+ * we didn't catch while reading/processing, we 'll get
+ * them next time get_isr gets called.
+ */
+ ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+ ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+ ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+ ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+ ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+ ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+ /* Flush previous write */
+ ath5k_hw_reg_read(ah, AR5K_PISR);
- if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
- AR5K_SISR2_DPERR |
- AR5K_SISR2_MCABT)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+ /* We treat TXOK,TXDESC, TXERR and TXEOL
+ * the same way (schedule the tx tasklet)
+ * so we track them all together per queue */
+ if (pisr & AR5K_ISR_TXOK)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXOK);
- if (data & AR5K_ISR_TIM)
+ if (pisr & AR5K_ISR_TXDESC)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXDESC);
+
+ if (pisr & AR5K_ISR_TXERR)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXERR);
+
+ if (pisr & AR5K_ISR_TXEOL)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXEOL);
+
+ /* Currently this is not much usefull since we treat
+ * all queues the same way if we get a TXURN (update
+ * tx trigger level) but we might need it later on*/
+ if (pisr & AR5K_ISR_TXURN)
+ ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+ AR5K_SISR2_QCU_TXURN);
+
+ /* Misc Beacon related interrupts */
+
+ /* For AR5211 */
+ if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
- if (data & AR5K_ISR_BCNMISC) {
+ /* For AR5212+ */
+ if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
- if (data & AR5K_ISR_RXDOPPLER)
- *interrupt_mask |= AR5K_INT_RX_DOPPLER;
- if (data & AR5K_ISR_QCBRORN) {
+ /* Below interrupts are unlikely to happen */
+
+ /* HIU = Host Interface Unit (PCI etc)
+ * Can be one of MCABT, SSERR, DPERR from SISR2 */
+ if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*Beacon Not Ready*/
+ if (unlikely(pisr & (AR5K_ISR_BNR)))
+ *interrupt_mask |= AR5K_INT_BNR;
+
+ /* A queue got CBR overrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRORN);
+ ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRORN);
}
- if (data & AR5K_ISR_QCBRURN) {
+
+ /* A queue got CBR underrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRURN);
+ ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRURN);
}
- if (data & AR5K_ISR_QTRIG) {
+
+ /* A queue got triggered */
+ if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
- AR5K_SISR4_QTRIG);
+ ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+ AR5K_SISR4_QTRIG);
}
- if (data & AR5K_ISR_TXOK)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXOK);
-
- if (data & AR5K_ISR_TXDESC)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXDESC);
-
- if (data & AR5K_ISR_TXERR)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXERR);
-
- if (data & AR5K_ISR_TXEOL)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXEOL);
-
- if (data & AR5K_ISR_TXURN)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
- AR5K_SISR2_QCU_TXURN);
- } else {
- if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
- | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
-
- /*
- * XXX: BMISS interrupts may occur after association.
- * I found this on 5210 code but it needs testing. If this is
- * true we should disable them before assoc and re-enable them
- * after a successful assoc + some jiffies.
- interrupt_mask &= ~AR5K_INT_BMISS;
- */
+ data = pisr;
}
/*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
}
/**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
+ /* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
- /*Beacon Not Ready*/
- if (new_mask & AR5K_INT_BNR)
- int_mask |= AR5K_INT_BNR;
-
+ /* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
- if (new_mask & AR5K_INT_RX_DOPPLER)
- int_mask |= AR5K_IMR_RXDOPPLER;
+ /*Beacon Not Ready*/
+ if (new_mask & AR5K_INT_BNR)
+ int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
+ /* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
+ /* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
\********************/
/**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* stuck frames on tx queues, only a reset
* can fix that.
*/
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 85929781191..73d3dd8a306 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
#include "reg.h"
#include "debug.h"
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
*/
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
*/
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
*/
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
*/
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
0x1;
}
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
*/
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
return 0;
}
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
*/
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0fd3e..a1ea78e05b4 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
#include "reg.h"
#include "debug.h"
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
*/
-
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
- AR5K_INI_READ = 1, /* Cleared on read */
+ AR5K_INI_READ = 1,
} ini_mode;
};
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
*/
-
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
- /* A/XR B G */
+ /* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ 0x00000010, 0x00000010, 0x00000010 } },
};
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ 0x00000000, 0x00000000, 0x00000108 } },
};
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
+/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
};
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
*/
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
}
}
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dfa48eb7d95..849fa060ebc 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
0xffff);
return true;
}
- udelay(15);
+ usleep_range(15, 20);
}
return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3edc2..cebfd6fd31d 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
#include "reg.h"
#include "debug.h"
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
\*******************/
/**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
\******************/
/**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* that include all OFDM and CCK rates.
*
*/
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
\*******************/
/**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
}
/**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
*/
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
#define ATH5K_MAX_TSF_READ 10
/**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
+#undef ATH5K_MAX_TSF_READ
+
/**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
*/
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
/**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
}
/**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
}
/**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
\***************************/
/**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
return 0;
}
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72de44c..e1f8613426a 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
/*
- * PHY functions
- *
* Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
*
*/
+/***********************\
+* PHY related functions *
+\***********************/
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -31,14 +33,53 @@
#include "../regd.h"
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
*/
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
{
unsigned int i;
u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return 0;
}
- mdelay(2);
+ usleep_range(2000, 2500);
/* ...wait until PHY is ready and read the selected radio revision */
ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return ret;
}
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
*/
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
u16 freq = channel->center_freq;
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
return false;
}
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
return false;
}
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
*/
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
- const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
u32 val, u8 reg_id, bool set)
{
const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
* @ah: the &struct ath5k_hw
* @channel: the currently set channel upon reset
*
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
* mantissa and provide these values on hw.
*
* For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
*/
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
/* Get exponent and mantissa and set it */
u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
return 0;
}
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
/*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
delay = delay << 2;
/* XXX: /2 on turbo ? Let's be safe
* for now */
- udelay(100 + delay);
+ usleep_range(100 + delay, 100 + (2 * delay));
} else {
- mdelay(1);
+ usleep_range(1000, 1500);
}
}
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* RF Gain optimization *
\**********************/
-/*
+/**
+ * DOC: RF Gain optimization
+ *
* This code is used to optimize RF gain on different environments
* (temperature mostly) based on feedback from a power detector.
*
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* no gain optimization ladder-.
*
* For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
*
* This paper describes power drops as seen on the receiver due to
* probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
*
* And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
* with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
*/
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
return 0;
}
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
- * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
*/
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
{
/* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
}
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
return ah->ah_gain.g_f_corr;
}
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
* power detector windows. If we get a measurement outside range
* we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
ah->ah_gain.g_current <= level[3]);
}
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
{
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
return ret;
}
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
* Check for a new gain reading and schedule an adjustment
* if needed.
*
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
{
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
return ah->ah_gain.g_state;
}
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
}
-
/********************\
* RF Registers setup *
\********************/
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
*/
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
PHY/RF channel functions
\**************************/
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
*/
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
u32 athchan;
- /*
- * Convert IEEE channel/MHz to an internal channel value used
- * by the AR5210 chipset. This has not been verified with
- * newer chipsets like the AR5212A who have a completely
- * different RF/PHY part.
- */
athchan = (ath5k_hw_bitswap(
(ieee80211_frequency_to_channel(
channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
return athchan;
}
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
data = ath5k_hw_rf5110_chan2athchan(channel);
ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
- mdelay(1);
+ usleep_range(1000, 1500);
return 0;
}
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
*/
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
{
int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
return 0;
}
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
*/
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
+ /* My guess based on code:
+ * 2GHz RF has 2 synth modes, one with a Local Oscillator
+ * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+ * (3040/2). data0 is used to set the PLL divider and data1
+ * selects synth mode. */
if (c < 4800) {
+ /* Channel 14 and all frequencies with 2Hz spacing
+ * below/above (non-standard channels) */
if (!((c - 2224) % 5)) {
+ /* Same as (c - 2224) / 5 */
data0 = ((2 * (c - 704)) - 3040) / 10;
data1 = 1;
+ /* Channel 1 and all frequencies with 5Hz spacing
+ * below/above (standard channels without channel 14) */
} else if (!((c - 2192) % 5)) {
+ /* Same as (c - 2192) / 5 */
data0 = ((2 * (c - 672)) - 3040) / 10;
data1 = 0;
} else
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+ /* This is more complex, we have a single synthesizer with
+ * 4 reference clock settings (?) based on frequency spacing
+ * and set using data2. LO is at 4800Hz and data0 is again used
+ * to set some divider.
+ *
+ * NOTE: There is an old atheros presentation at Stanford
+ * that mentions a method called dual direct conversion
+ * with 1GHz sliding IF for RF5110. Maybe that's what we
+ * have here, or an updated version. */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
*/
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
*/
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
return 0;
}
+
/*****************\
PHY calibration
\*****************/
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
{
int i;
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
hist->nfval[hist->index] = noise_floor;
}
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
{
s16 sort[ATH5K_NF_CAL_HIST_MAX];
s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
*
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
return;
}
+ ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
/* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_noise_floor = nf;
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
*/
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 phy_sig, phy_agc, phy_sat, beacon;
int ret;
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+ return 0;
+
/*
* Disable beacons and RX/TX queues, wait
*/
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
- mdelay(2);
+ usleep_range(2000, 2500);
/*
* Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
* Activate PHY and wait
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
return 0;
}
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
- if (!ah->ah_calibration ||
- ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
- return 0;
+ /* Skip if I/Q calibration is not needed or if it's still running */
+ if (!ah->ah_iq_cal_needed)
+ return -EINVAL;
+ else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "I/Q calibration still running");
+ return -EBUSY;
+ }
/* Calibration has finished, get the results and re-run */
- /* work around empty results which can apparently happen on 5212 */
+
+ /* Work around for empty results which can apparently happen on 5212:
+ * Read registers up to 10 times until we get both i_pr and q_pwr */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
else
q_coffd = q_pwr >> 7;
- /* protect against divide by 0 and loss of sign bits */
+ /* In case i_coffd became zero, cancel calibration
+ * not only it's too small, it'll also result a divide
+ * by zero later on. */
if (i_coffd == 0 || q_coffd < 2)
- return 0;
+ return -ECANCELED;
+
+ /* Protect against loss of sign bits */
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
return 0;
}
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
*/
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ath5k_hw_rf5110_calibrate(ah, channel);
ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ if (ret) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "No I/Q correction performed (%uMHz)\n",
+ channel->center_freq);
+
+ /* Happens all the time if there is not much
+ * traffic, consider it normal behaviour. */
+ ret = 0;
+ }
+
+ /* On full calibration do an AGC calibration and
+ * request a PAPD probe for gainf calibration if
+ * needed */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
- (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL);
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+ 0, false);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ if ((ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112)
+ && (channel->hw_value != AR5K_MODE_11B))
+ ath5k_hw_request_rfgain_probe(ah);
+ }
+
+ /* Update noise floor
+ * XXX: Only do this after AGC calibration */
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+ ath5k_hw_update_noise_floor(ah);
return ret;
}
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
* Spur mitigation functions *
\***************************/
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
* Antenna control *
\*****************/
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
*/
static void
ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
}
}
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
void
ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
AR5K_PHY_ANT_SWITCH_TABLE_1);
}
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() - Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
*/
void
ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
* Helper functions
*/
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
*/
static s16
ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
return result;
}
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
*
* Since we have the top of the curve and we draw the line below
* until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
*/
static s16
ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
return max(min_pwrL, min_pwrR);
}
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* Interpolate (pwr,vpd) points to create a Power to PDADC or a
* Power to PCDAC curve.
*
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
}
}
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
* them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
*pcinfo_r = &pcinfo[idx_r];
}
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
* Get the surrounding per-rate power calibration data
* for a given frequency and interpolate between power
* values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
*/
static void
ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
rpinfo[idx_r].target_power_54);
}
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
* Power to PCDAC table functions
*/
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
*
* No further processing is needed for RF5111, the only thing we have to
* do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
}
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
*
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
* RFX112 can have up to 2 curves (one for low txpower range and one for
* higher txpower range). We need to put them both on pcdac_out and place
* them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
}
}
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
* Power to PDADC table functions
*/
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
*
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
* We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
}
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
* Common code for PCDAC/PDADC tables
*/
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
* This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
return 0;
}
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
static void
ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
{
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
ath5k_write_pcdac_table(ah);
}
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
*
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
* maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
*
* For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
*
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
* rates[0] - rates[7] -> OFDM rates
* rates[8] - rates[14] -> CCK rates
* rates[15] -> XR rates (they all have the same power)
*/
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
}
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
*/
static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
}
+
/*************\
Init function
\*************/
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast)
{
struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
}
} else if (ah->ah_version == AR5K_AR5210) {
- mdelay(1);
+ usleep_range(1000, 1500);
/* Disable phy and wait */
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
for (i = 0; i <= 20; i++) {
if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
break;
- udelay(200);
+ usleep_range(200, 250);
}
ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* At the same time start I/Q calibration for QAM constellation
* -no need for CCK- */
- ah->ah_calibration = false;
+ ah->ah_iq_cal_needed = false;
if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
+ ah->ah_iq_cal_needed = true;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 776654228ea..30b50f93417 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
*/
/********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
\********************************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
/******************\
* Helper functions *
\******************/
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
return pending;
}
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
{
- u32 cw = 1;
cw_req = min(cw_req, (u16)1023);
- while (cw < cw_req)
- cw = (cw << 1) | 1;
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
- return cw;
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
}
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
*/
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
return 0;
}
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
*/
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
* Single QCU/DCU initialization *
\*******************************/
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
*/
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
\**************************/
/**
- * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
}
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000045d..0ea1608b47f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
* 5211/5212 we have one primary and 4 secondary registers.
* So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
* Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@@ -292,7 +296,10 @@
#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
-#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
+ * NOTE: We don't have per-queue info for this
+ * one, but we can enable it per-queue through
+ * TXNOFRM_QCU field on TXNOFRM register */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -302,21 +309,29 @@
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
-#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
+ * 'or' of MCABT, SSERR, DPERR from SISR2 */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
-#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
- CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
+ * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+ AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+ AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+ AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+ AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
/*
* Secondary status registers [5211+] (0 - 4)
*
@@ -347,7 +362,7 @@
#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
-#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
+#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac257b4b..250db40b751 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
*
*/
-/*****************************\
- Reset functions and helpers
-\*****************************/
+/****************************\
+ Reset function and helpers
+\****************************/
#include <asm/unaligned.h>
@@ -33,14 +33,36 @@
#include "debug.h"
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
\*************************/
/**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
*/
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
* @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
*/
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
*
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
*/
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
}
}
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- * 123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
*/
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
* Reset/Sleep control *
\*********************/
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
- udelay(15);
+ usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
return ret;
}
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() - Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = __raw_readl(reg);
__raw_writel(regval | val, reg);
regval = __raw_readl(reg);
- udelay(100);
+ usleep_range(100, 150);
/* Bring BB/MAC out of reset */
__raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
return 0;
}
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
*/
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
- udelay(15);
+ usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
/* Wait a bit and retry */
- udelay(50);
+ usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
@@ -523,17 +589,20 @@ commit:
return 0;
}
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
*
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
* without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
*/
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return 0;
/* Make sure device is awake */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
/* ...wakeup again!*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return ret;
}
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
/* ...wakeup again!...*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
- udelay(300);
+ usleep_range(300, 350);
}
/* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* Post-initvals register modifications *
\**************************************/
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
}
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
* Main reset function *
\*********************/
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1047,7 +1159,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
if (fast && (ah->ah_radio != AR5K_RF2413) &&
(ah->ah_radio != AR5K_RF5413))
- fast = 0;
+ fast = false;
/* Disable sleep clock operation
* to avoid register access delay on certain
@@ -1073,7 +1185,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret && fast) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
- fast = 0;
+ fast = false;
/* Non fatal, just continue with
* normal reset */
ret = 0;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Initialize PCU
*/
- ath5k_hw_pcu_init(ah, op_mode, mode);
+ ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23b429..aed34d9954c 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
*/
-/*
+/**
+ * DOC: RF Buffer registers
+ *
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
*/
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
* Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
- u8 rfb_bank; /* RF Bank number */
- u16 rfb_ctrl_register; /* RF Buffer control register */
- u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
+ u8 rfb_bank;
+ u16 rfb_ctrl_register;
+ u32 rfb_mode_data[3];
};
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
- u8 len; /* Field length */
- u16 pos; /* Offset on the raw packet */
- u8 col; /* Column -used for shifting */
+ u8 len;
+ u16 pos;
+ u8 col;
};
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
*/
struct ath5k_rf_reg {
- u8 bank; /* RF Buffer Bank number */
- u8 index; /* Register's index on rf_regs_idx */
- struct ath5k_rfb_field field; /* RF Buffer field for this register */
+ u8 bank;
+ u8 index;
+ struct ath5k_rfb_field field;
};
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
* We do this to handle common bits and make our
* life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae052d89..4d21df0e597 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
*
*/
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
- u16 rfg_register; /* RF Gain register address */
+ u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
+
/*
+ * RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
};
/*
+ * RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 39f002ed4a8..00f01581934 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -3,7 +3,8 @@
#include <linux/tracepoint.h>
-#ifndef CONFIG_ATH5K_TRACER
+
+#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
@@ -93,7 +94,7 @@ TRACE_EVENT(ath5k_tx_complete,
#endif /* __TRACE_ATH5K_H */
-#ifdef CONFIG_ATH5K_TRACER
+#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8f7a0d1c290..70706930355 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -23,7 +23,7 @@
obj-$(CONFIG_ATH6KL) := ath6kl.o
ath6kl-y += debug.o
-ath6kl-y += htc_hif.o
+ath6kl-y += hif.o
ath6kl-y += htc.o
ath6kl-y += bmi.o
ath6kl-y += cfg80211.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index c5d11cc536e..bce3575c310 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -19,165 +19,6 @@
#include "target.h"
#include "debug.h"
-static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
-{
- u32 addr;
- unsigned long timeout;
- int ret;
-
- ar->bmi.cmd_credits = 0;
-
- /* Read the counter register to get the command credits */
- addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
- /*
- * Hit the credit counter with a 4-byte access, the first byte
- * read will hit the counter and cause a decrement, while the
- * remaining 3 bytes has no effect. The rationale behind this
- * is to make all HIF accesses 4-byte aligned.
- */
- ret = hif_read_write_sync(ar, addr,
- (u8 *)&ar->bmi.cmd_credits, 4,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to decrement the command credit count register: %d\n",
- ret);
- return ret;
- }
-
- /* The counter is only 8 bits.
- * Ignore anything in the upper 3 bytes
- */
- ar->bmi.cmd_credits &= 0xFF;
- }
-
- if (!ar->bmi.cmd_credits) {
- ath6kl_err("bmi communication timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
-{
- unsigned long timeout;
- u32 rx_word = 0;
- int ret = 0;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !rx_word) {
- ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
- (u8 *)&rx_word, sizeof(rx_word),
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
- return ret;
- }
-
- /* all we really want is one bit */
- rx_word &= (1 << ENDPOINT1);
- }
-
- if (!rx_word) {
- ath6kl_err("bmi_recv_buf FIFO empty\n");
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- ret = ath6kl_get_bmi_cmd_credits(ar);
- if (ret)
- return ret;
-
- addr = ar->mbox_info.htc_addr;
-
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_WR_SYNC_BYTE_INC);
- if (ret)
- ath6kl_err("unable to send the bmi data to the device\n");
-
- return ret;
-}
-
-static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- /*
- * During normal bootup, small reads may be required.
- * Rather than issue an HIF Read and then wait as the Target
- * adds successive bytes to the FIFO, we wait here until
- * we know that response data is available.
- *
- * This allows us to cleanly timeout on an unexpected
- * Target failure rather than risk problems at the HIF level.
- * In particular, this avoids SDIO timeouts and possibly garbage
- * data on some host controllers. And on an interconnect
- * such as Compact Flash (as well as some SDIO masters) which
- * does not provide any indication on data timeout, it avoids
- * a potential hang or garbage response.
- *
- * Synchronization is more difficult for reads larger than the
- * size of the MBOX FIFO (128B), because the Target is unable
- * to push the 129th byte of data until AFTER the Host posts an
- * HIF Read and removes some FIFO data. So for large reads the
- * Host proceeds to post an HIF Read BEFORE all the data is
- * actually available to read. Fortunately, large BMI reads do
- * not occur in practice -- they're supported for debug/development.
- *
- * So Host/Target BMI synchronization is divided into these cases:
- * CASE 1: length < 4
- * Should not happen
- *
- * CASE 2: 4 <= length <= 128
- * Wait for first 4 bytes to be in FIFO
- * If CONSERVATIVE_BMI_READ is enabled, also wait for
- * a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
- *
- * CASE 3: length > 128
- * Wait for the first 4 bytes to be in FIFO
- *
- * For most uses, a small timeout should be sufficient and we will
- * usually see a response quickly; but there may be some unusual
- * (debug) cases of BMI_EXECUTE where we want an larger timeout.
- * For now, we use an unbounded busy loop while waiting for
- * BMI_EXECUTE.
- *
- * If BMI_EXECUTE ever needs to support longer-latency execution,
- * especially in production, this code needs to be enhanced to sleep
- * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
- * a function of Host processor speed.
- */
- if (len >= 4) { /* NB: Currently, always true */
- ret = ath6kl_bmi_get_rx_lkahd(ar);
- if (ret)
- return ret;
- }
-
- addr = ar->mbox_info.htc_addr;
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to read the bmi data from the device: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
int ath6kl_bmi_done(struct ath6kl *ar)
{
int ret;
@@ -190,14 +31,12 @@ int ath6kl_bmi_done(struct ath6kl *ar)
ar->bmi.done_sent = true;
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send bmi done: %d\n", ret);
return ret;
}
- ath6kl_bmi_cleanup(ar);
-
return 0;
}
@@ -212,13 +51,13 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
return -EACCES;
}
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send get target info: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
sizeof(targ_info->version));
if (ret) {
ath6kl_err("Unable to recv target info: %d\n", ret);
@@ -227,7 +66,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
/* Determine how many bytes are in the Target's targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
(u8 *)&targ_info->byte_count,
sizeof(targ_info->byte_count));
if (ret) {
@@ -246,7 +85,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
}
/* Read the remainder of the targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
((u8 *)targ_info) +
sizeof(targ_info->byte_count),
sizeof(*targ_info) -
@@ -278,8 +117,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -292,8 +131,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- rx_len = (len_remain < BMI_DATASZ_MAX) ?
- len_remain : BMI_DATASZ_MAX;
+ rx_len = (len_remain < ar->bmi.max_data_size) ?
+ len_remain : ar->bmi.max_data_size;
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
@@ -302,13 +141,13 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
offset += sizeof(len);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
if (ret) {
ath6kl_err("Unable to read from the device: %d\n",
ret);
@@ -328,7 +167,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
u32 offset;
u32 len_remain, tx_len;
const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
- u8 aligned_buf[BMI_DATASZ_MAX];
+ u8 aligned_buf[400];
u8 *src;
if (ar->bmi.done_sent) {
@@ -336,12 +175,15 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+ if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
- memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+ if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
+ return -E2BIG;
+
+ memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
@@ -350,7 +192,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
while (len_remain) {
src = &buf[len - len_remain];
- if (len_remain < (BMI_DATASZ_MAX - header)) {
+ if (len_remain < (ar->bmi.max_data_size - header)) {
if (len_remain & 3) {
/* align it with 4 bytes */
len_remain = len_remain +
@@ -360,7 +202,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
}
tx_len = len_remain;
} else {
- tx_len = (BMI_DATASZ_MAX - header);
+ tx_len = (ar->bmi.max_data_size - header);
}
offset = 0;
@@ -373,7 +215,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -398,7 +240,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -415,13 +257,13 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
offset += sizeof(*param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -445,7 +287,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -459,7 +301,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -481,7 +323,7 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -495,13 +337,13 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -524,7 +366,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -542,7 +384,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
offset += sizeof(param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -565,8 +407,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + header;
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + header;
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -577,8 +419,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
- len_remain : (BMI_DATASZ_MAX - header);
+ tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
+ len_remain : (ar->bmi.max_data_size - header);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
@@ -589,7 +431,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -615,7 +457,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -631,7 +473,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to start LZ stream to the device: %d\n",
ret);
@@ -672,10 +514,20 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return ret;
}
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+ ar->bmi.done_sent = false;
+}
+
int ath6kl_bmi_init(struct ath6kl *ar)
{
- ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+ if (WARN_ON(ar->bmi.max_data_size == 0))
+ return -EINVAL;
+
+ /* cmd + addr + len + data_size */
+ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 96851d5df24..f1ca6812456 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -44,12 +44,6 @@
* BMI handles all required Target-side cache flushing.
*/
-#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
- (sizeof(u32) * 3 /* cmd + addr + len */))
-
-/* Maximum data size used for BMI transfers */
-#define BMI_DATASZ_MAX 256
-
/* BMI Commands */
#define BMI_NO_COMMAND 0
@@ -230,6 +224,8 @@ struct ath6kl_bmi_target_info {
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
int ath6kl_bmi_done(struct ath6kl *ar);
int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index f517eb8f7b4..6c59a217b1a 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -123,17 +123,50 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.bitrates = ath6kl_a_rates,
};
-static int ath6kl_set_wpa_version(struct ath6kl *ar,
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+/* returns true if scheduled scan was stopped */
+static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+ return false;
+
+ del_timer_sync(&vif->sched_scan_timer);
+
+ ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+
+ ar->state = ATH6KL_STATE_ON;
+
+ return true;
+}
+
+static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return;
+
+ cfg80211_sched_scan_stopped(ar->wiphy);
+}
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
enum nl80211_wpa_versions wpa_version)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
if (!wpa_version) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_2) {
- ar->auth_mode = WPA2_AUTH;
+ vif->auth_mode = WPA2_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_1) {
- ar->auth_mode = WPA_AUTH;
+ vif->auth_mode = WPA_AUTH;
} else {
ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
return -ENOTSUPP;
@@ -142,25 +175,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_auth_type(struct ath6kl *ar,
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
enum nl80211_auth_type auth_type)
{
-
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
switch (auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
- ar->dot11_auth_mode = OPEN_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
- ar->dot11_auth_mode = SHARED_AUTH;
+ vif->dot11_auth_mode = SHARED_AUTH;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
- ar->dot11_auth_mode = LEAP_AUTH;
+ vif->dot11_auth_mode = LEAP_AUTH;
break;
case NL80211_AUTHTYPE_AUTOMATIC:
- ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
break;
default:
@@ -171,11 +203,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
{
- u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
- u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
- &ar->grp_crypto_len;
+ u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+ &vif->grp_crypto_len;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
__func__, cipher, ucast);
@@ -202,6 +234,10 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
*ar_cipher = AES_CRYPT;
*ar_cipher_len = 0;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ *ar_cipher = WAPI_CRYPT;
+ *ar_cipher_len = 0;
+ break;
default:
ath6kl_err("cipher 0x%x not supported\n", cipher);
return -ENOTSUPP;
@@ -210,28 +246,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
return 0;
}
-static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
if (key_mgmt == WLAN_AKM_SUITE_PSK) {
- if (ar->auth_mode == WPA_AUTH)
- ar->auth_mode = WPA_PSK_AUTH;
- else if (ar->auth_mode == WPA2_AUTH)
- ar->auth_mode = WPA2_PSK_AUTH;
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_PSK_AUTH;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt == 0x00409600) {
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_AUTH_CCKM;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_AUTH_CCKM;
} else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
}
}
-static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
{
+ struct ath6kl *ar = vif->ar;
+
if (!test_bit(WMI_READY, &ar->flag)) {
ath6kl_err("wmi is not ready\n");
return false;
}
- if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ if (!test_bit(WLAN_ENABLED, &vif->flags)) {
ath6kl_err("wlan disabled\n");
return false;
}
@@ -239,15 +282,146 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
return true;
}
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 &&
+ pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+ return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+ pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+ size_t ies_len)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Clear previously set flag
+ */
+
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ /*
+ * Filter out RSN/WPA IE(s)
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+
+ if (ath6kl_is_wps_ie(pos))
+ ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_REQ, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ *nw_type = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ *nw_type = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ *nw_type = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+ u8 *if_idx, u8 *nw_type)
+{
+ int i;
+
+ if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+ return false;
+
+ if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+ ar->num_vif))
+ return false;
+
+ if (type == NL80211_IFTYPE_STATION ||
+ type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+ for (i = 0; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ if (type == NL80211_IFTYPE_P2P_CLIENT ||
+ type == NL80211_IFTYPE_P2P_GO) {
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
+ u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
- ar->sme_state = SME_CONNECTING;
+ ath6kl_cfg80211_sscan_disable(vif);
- if (!ath6kl_cfg80211_ready(ar))
+ vif->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +461,22 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (test_bit(CONNECTED, &ar->flag) &&
- ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ar->reconnect_flag = true;
- status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
- ar->ch_hint);
+ if (sme->ie && (sme->ie_len > 0)) {
+ status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+ } else
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ if (test_bit(CONNECTED, &vif->flags) &&
+ vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ vif->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->req_bssid,
+ vif->ch_hint);
up(&ar->sem);
if (status) {
@@ -300,42 +484,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EIO;
}
return 0;
- } else if (ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ath6kl_disconnect(ar);
+ } else if (vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ ath6kl_disconnect(vif);
}
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = sme->ssid_len;
- memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = sme->ssid_len;
+ memcpy(vif->ssid, sme->ssid, sme->ssid_len);
if (sme->channel)
- ar->ch_hint = sme->channel->center_freq;
+ vif->ch_hint = sme->channel->center_freq;
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
- memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+ ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
- status = ath6kl_set_auth_type(ar, sme->auth_type);
+ status = ath6kl_set_auth_type(vif, sme->auth_type);
if (status) {
up(&ar->sem);
return status;
}
if (sme->crypto.n_ciphers_pairwise)
- ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
else
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
- ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
if (sme->crypto.n_akm_suites)
- ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+ ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
if ((sme->key_len) &&
- (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ (vif->auth_mode == NONE_AUTH) &&
+ (vif->prwise_crypto == WEP_CRYPT)) {
struct ath6kl_key *key = NULL;
if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +531,60 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
}
- key = &ar->keys[sme->key_idx];
+ key = &vif->keys[sme->key_idx];
key->key_len = sme->key_len;
memcpy(key->key, sme->key, key->key_len);
- key->cipher = ar->prwise_crypto;
- ar->def_txkey_index = sme->key_idx;
+ key->cipher = vif->prwise_crypto;
+ vif->def_txkey_index = sme->key_idx;
- ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
- ar->prwise_crypto,
+ ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+ vif->prwise_crypto,
GROUP_USAGE | TX_USAGE,
key->key_len,
- NULL,
+ NULL, 0,
key->key, KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
}
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
+ nw_subtype = SUBTYPE_P2PCLIENT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- ar->reconnect_flag = 0;
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ vif->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, nw_subtype);
up(&ar->sem);
if (status == -EINVAL) {
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
ath6kl_err("invalid request\n");
return -ENOENT;
} else if (status) {
@@ -404,28 +593,40 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))) {
- mod_timer(&ar->disconnect_timer,
+ ((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
-static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+ enum network_type nw_type,
+ const u8 *bssid,
struct ieee80211_channel *chan,
const u8 *beacon_ie, size_t beacon_ie_len)
{
+ struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
+ u16 cap_mask, cap_val;
u8 *ie;
- bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
- ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ if (nw_type & ADHOC_NETWORK) {
+ cap_mask = WLAN_CAPABILITY_IBSS;
+ cap_val = WLAN_CAPABILITY_IBSS;
+ } else {
+ cap_mask = WLAN_CAPABILITY_ESS;
+ cap_val = WLAN_CAPABILITY_ESS;
+ }
+
+ bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+ vif->ssid, vif->ssid_len,
+ cap_mask, cap_val);
if (bss == NULL) {
/*
* Since cfg80211 may not yet know about the BSS,
@@ -435,21 +636,20 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
* Prepend SSID element since it is not included in the Beacon
* IEs from the target.
*/
- ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
if (ie == NULL)
return -ENOMEM;
ie[0] = WLAN_EID_SSID;
- ie[1] = ar->ssid_len;
- memcpy(ie + 2, ar->ssid, ar->ssid_len);
- memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
- bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
- bssid, 0, WLAN_CAPABILITY_ESS, 100,
- ie, 2 + ar->ssid_len + beacon_ie_len,
+ ie[1] = vif->ssid_len;
+ memcpy(ie + 2, vif->ssid, vif->ssid_len);
+ memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wiphy, chan,
+ bssid, 0, cap_val, 100,
+ ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
- "%pM prior to indicating connect/roamed "
- "event\n", bssid);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
+ "cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
@@ -463,7 +663,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
return 0;
}
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
@@ -471,6 +671,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
u8 assoc_resp_len, u8 *assoc_info)
{
struct ieee80211_channel *chan;
+ struct ath6kl *ar = vif->ar;
/* capinfo + listen interval */
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +690,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
* Store Beacon interval here; DTIM period will be available only once
* a Beacon frame from the AP is seen.
*/
- ar->assoc_bss_beacon_int = beacon_intvl;
- clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
if (nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
@@ -501,39 +702,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
}
if (nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
}
}
- chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
-
+ chan = ieee80211_get_channel(ar->wiphy, (int) channel);
- if (nw_type & ADHOC_NETWORK) {
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry\n");
return;
}
- if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
- beacon_ie_len) < 0) {
- ath6kl_err("could not add cfg80211 bss entry for "
- "connect/roamed notification\n");
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->sme_state == SME_CONNECTING) {
+ if (vif->sme_state == SME_CONNECTING) {
/* inform connect result to cfg80211 */
- ar->sme_state = SME_CONNECTED;
- cfg80211_connect_result(ar->net_dev, bssid,
+ vif->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(vif->ndev, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
+ } else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
- cfg80211_roamed(ar->net_dev, chan, bssid,
+ cfg80211_roamed(vif->ndev, chan, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
}
@@ -542,12 +743,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *dev, u16 reason_code)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
reason_code);
- if (!ath6kl_cfg80211_ready(ar))
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +764,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
return -ERESTARTSYS;
}
- ar->reconnect_flag = 0;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ vif->reconnect_flag = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
if (!test_bit(SKIP_SCAN, &ar->flag))
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
up(&ar->sem);
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
return 0;
}
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason)
{
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ struct ath6kl *ar = vif->ar;
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
}
- if (ar->nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->nw_type & ADHOC_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->nw_type & INFRA_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
@@ -614,42 +820,46 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
*/
if (reason != DISCONNECT_CMD) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
return;
}
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
- if (ar->sme_state == SME_CONNECTING) {
- cfg80211_connect_result(ar->net_dev,
+ if (vif->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(vif->ndev,
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
- cfg80211_disconnected(ar->net_dev, reason,
+ } else if (vif->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(vif->ndev, reason,
NULL, 0, GFP_KERNEL);
}
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
}
static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
+ u32 force_fg_scan = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ ath6kl_cfg80211_sscan_disable(vif);
+
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ret = ath6kl_wmi_bssfilter_cmd(
- ar->wmi,
- (test_bit(CONNECTED, &ar->flag) ?
+ ar->wmi, vif->fw_vif_idx,
+ (test_bit(CONNECTED, &vif->flags) ?
ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +874,19 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
for (i = 0; i < request->n_ssids; i++)
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- SPECIFIC_SSID_FLAG,
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, SPECIFIC_SSID_FLAG,
request->ssids[i].ssid_len,
request->ssids[i].ssid);
}
+ /*
+ * FIXME: we should clear the IE in fw if it's not set so just
+ * remove the check altogether
+ */
if (request->ie) {
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +917,63 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
channels[i] = request->channels[i]->center_freq;
}
- ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
- false, 0, 0, n_channels, channels);
+ if (test_bit(CONNECTED, &vif->flags))
+ force_fg_scan = 1;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels, request->no_cck,
+ request->rates);
+ } else {
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels);
+ }
if (ret)
ath6kl_err("wmi_startscan_cmd failed\n");
else
- ar->scan_req = request;
+ vif->scan_req = request;
kfree(channels);
return ret;
}
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
{
+ struct ath6kl *ar = vif->ar;
int i;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+ aborted ? " aborted" : "");
- if (!ar->scan_req)
+ if (!vif->scan_req)
return;
- if ((status == -ECANCELED) || (status == -EBUSY)) {
- cfg80211_scan_done(ar->scan_req, true);
+ if (aborted)
goto out;
- }
- cfg80211_scan_done(ar->scan_req, false);
-
- if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
- for (i = 0; i < ar->scan_req->n_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- DISABLE_SSID_FLAG,
+ if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < vif->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, DISABLE_SSID_FLAG,
0, NULL);
}
}
out:
- ar->scan_req = NULL;
+ cfg80211_scan_done(vif->scan_req, aborted);
+ vif->scan_req = NULL;
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -747,15 +981,22 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac_addr,
struct key_params *params)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
u8 key_usage;
u8 key_type;
- int status = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+ if (params->key_len != WMI_KRK_LEN)
+ return -EINVAL;
+ return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+ params->key);
+ }
+
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
@@ -763,7 +1004,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(key, 0, sizeof(struct ath6kl_key));
if (pairwise)
@@ -772,13 +1013,19 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
key_usage = GROUP_USAGE;
if (params) {
+ int seq_len = params->seq_len;
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+ seq_len > ATH6KL_KEY_SEQ_LEN) {
+ /* Only first half of the WPI PN is configured */
+ seq_len = ATH6KL_KEY_SEQ_LEN;
+ }
if (params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > sizeof(key->seq))
+ seq_len > sizeof(key->seq))
return -EINVAL;
key->key_len = params->key_len;
memcpy(key->key, params->key, key->key_len);
- key->seq_len = params->seq_len;
+ key->seq_len = seq_len;
memcpy(key->seq, params->seq, key->seq_len);
key->cipher = params->cipher;
}
@@ -796,31 +1043,33 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
case WLAN_CIPHER_SUITE_CCMP:
key_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ key_type = WAPI_CRYPT;
+ break;
default:
return -ENOTSUPP;
}
- if (((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))
+ if (((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))
&& (key_usage & GROUP_USAGE))
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
__func__, key_index, key->key_len, key_type,
key_usage, key->seq_len);
- ar->def_txkey_index = key_index;
-
- if (ar->nw_type == AP_NETWORK && !pairwise &&
- (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ if (vif->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
+ key_type == WAPI_CRYPT) && params) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
"key configuration until AP mode has been "
"started\n");
@@ -832,8 +1081,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
- !test_bit(CONNECTED, &ar->flag)) {
+ if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &vif->flags)) {
/*
* Store the key locally so that it can be re-configured after
* the AP mode has properly started
@@ -841,31 +1090,29 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
*/
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
"until AP mode has been started\n");
- ar->wep_key_list[key_index].key_len = key->key_len;
- memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ vif->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(vif->wep_key_list[key_index].key, key->key,
+ key->key_len);
return 0;
}
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage, key->key_len,
- key->seq, key->key, KEY_OP_INIT_VAL,
- (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
-
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->seq_len, key->key,
+ KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1122,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d is empty\n", __func__, key_index);
return 0;
}
- ar->keys[key_index].key_len = 0;
+ vif->keys[key_index].key_len = 0;
- return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1139,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie,
struct key_params *))
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
struct key_params params;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1155,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(&params, 0, sizeof(params));
params.cipher = key->cipher;
params.key_len = key->key_len;
@@ -926,15 +1173,15 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
u8 key_index, bool unicast,
bool multicast)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
- int status = 0;
u8 key_usage;
enum crypto_type key_type = NONE_CRYPT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1191,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
__func__, key_index);
return -EINVAL;
}
- ar->def_txkey_index = key_index;
- key = &ar->keys[ar->def_txkey_index];
+ vif->def_txkey_index = key_index;
+ key = &vif->keys[vif->def_txkey_index];
key_usage = GROUP_USAGE;
- if (ar->prwise_crypto == WEP_CRYPT)
+ if (vif->prwise_crypto == WEP_CRYPT)
key_usage |= TX_USAGE;
if (unicast)
- key_type = ar->prwise_crypto;
+ key_type = vif->prwise_crypto;
if (multicast)
- key_type = ar->grp_crypto;
+ key_type = vif->grp_crypto;
- if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
return 0; /* Delay until AP mode has been started */
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage,
- key->key_len, key->seq, key->key,
- KEY_OP_INIT_VAL, NULL,
- SYNC_BOTH_WMIFLAG);
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->seq_len,
+ key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
}
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
- cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
(ismcast ? NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
GFP_KERNEL);
@@ -989,12 +1234,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
int ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
changed);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1014,15 +1264,21 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
*/
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
u8 ath6kl_dbm;
+ int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
type, dbm);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
switch (type) {
@@ -1037,7 +1293,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
return 0;
}
@@ -1045,14 +1301,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (test_bit(CONNECTED, &ar->flag)) {
+ if (test_bit(CONNECTED, &vif->flags)) {
ar->tx_pwr = 0;
- if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
@@ -1076,11 +1337,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct ath6kl *ar = ath6kl_priv(dev);
struct wmi_power_mode_cmd mode;
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
__func__, pmgmt, timeout);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (pmgmt) {
@@ -1091,7 +1353,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
mode.pwr_mode = MAX_PERF_POWER;
}
- if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+ mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
@@ -1099,41 +1362,83 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return 0;
}
+static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct net_device *ndev;
+ u8 if_idx, nw_type;
+
+ if (ar->num_vif == ar->vif_max) {
+ ath6kl_err("Reached maximum number of supported vif\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+ ath6kl_err("Not a supported interface type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ ar->num_vif++;
+
+ return ndev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+
+ spin_lock_bh(&ar->list_lock);
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+
+ ath6kl_deinit_if_data(vif);
+
+ return 0;
+}
+
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct ath6kl *ar = ath6kl_priv(ndev);
- struct wireless_dev *wdev = ar->wdev;
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
- if (!ath6kl_cfg80211_ready(ar))
- return -EIO;
-
switch (type) {
case NL80211_IFTYPE_STATION:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
- ar->next_mode = ADHOC_NETWORK;
+ vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_P2P_GO:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
default:
ath6kl_err("invalid interface type %u\n", type);
return -EOPNOTSUPP;
}
- wdev->iftype = type;
+ vif->wdev.iftype = type;
return 0;
}
@@ -1143,16 +1448,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
struct cfg80211_ibss_params *ibss_param)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ar->ssid_len = ibss_param->ssid_len;
- memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+ vif->ssid_len = ibss_param->ssid_len;
+ memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
if (ibss_param->channel)
- ar->ch_hint = ibss_param->channel->center_freq;
+ vif->ch_hint = ibss_param->channel->center_freq;
if (ibss_param->channel_fixed) {
/*
@@ -1164,44 +1470,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
- memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, ibss_param->bssid,
+ sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, 0);
+ ath6kl_set_wpa_version(vif, 0);
- status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
if (status)
return status;
if (ibss_param->privacy) {
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
} else {
- ath6kl_set_cipher(ar, 0, true);
- ath6kl_set_cipher(ar, 0, false);
+ ath6kl_set_cipher(vif, 0, true);
+ ath6kl_set_cipher(vif, 0, false);
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
- set_bit(CONNECT_PEND, &ar->flag);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, SUBTYPE_NONE);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
@@ -1209,14 +1516,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
struct net_device *dev)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
return 0;
}
@@ -1226,6 +1533,8 @@ static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+ CCKM_KRK_CIPHER_SUITE,
+ WLAN_CIPHER_SUITE_SMS4,
};
static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1602,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
long left;
bool sgi;
s32 rate;
int ret;
u8 mcs;
- if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
return -ENOENT;
if (down_interruptible(&ar->sem))
return -EBUSY;
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
if (ret != 0) {
up(&ar->sem);
@@ -1316,7 +1626,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag),
+ &vif->flags),
WMI_TIMEOUT);
up(&ar->sem);
@@ -1326,24 +1636,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
else if (left < 0)
return left;
- if (ar->target_stats.rx_byte) {
- sinfo->rx_bytes = ar->target_stats.rx_byte;
+ if (vif->target_stats.rx_byte) {
+ sinfo->rx_bytes = vif->target_stats.rx_byte;
sinfo->filled |= STATION_INFO_RX_BYTES;
- sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
- if (ar->target_stats.tx_byte) {
- sinfo->tx_bytes = ar->target_stats.tx_byte;
+ if (vif->target_stats.tx_byte) {
+ sinfo->tx_bytes = vif->target_stats.tx_byte;
sinfo->filled |= STATION_INFO_TX_BYTES;
- sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
- sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->signal = vif->target_stats.cs_rssi;
sinfo->filled |= STATION_INFO_SIGNAL;
- rate = ar->target_stats.tx_ucast_rate;
+ rate = vif->target_stats.tx_ucast_rate;
if (is_rate_legacy(rate)) {
sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1685,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
sinfo->filled |= STATION_INFO_TX_BITRATE;
- if (test_bit(CONNECTED, &ar->flag) &&
- test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
- ar->nw_type == INFRA_NETWORK) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+ vif->nw_type == INFRA_NETWORK) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
sinfo->bss_param.flags = 0;
- sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
- sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
}
return 0;
@@ -1391,7 +1701,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, true);
}
@@ -1399,25 +1711,302 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, false);
}
static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- if (test_bit(CONNECTED, &ar->flag))
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ if (test_bit(CONNECTED, &vif->flags))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bssid, NULL, false);
+ return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *vif;
+ int ret, pos, left;
+ u32 filter = 0;
+ u16 i;
+ u8 mask[WOW_MASK_SIZE];
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (!test_bit(CONNECTED, &vif->flags))
+ return -EINVAL;
+
+ /* Clear existing WOW patterns */
+ for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+ ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+ WOW_LIST_ID, i);
+ /* Configure new WOW patterns */
+ for (i = 0; i < wow->n_patterns; i++) {
+
+ /*
+ * Convert given nl80211 specific mask value to equivalent
+ * driver specific mask value and send it to the chip along
+ * with patterns. For example, If the mask value defined in
+ * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+ * then equivalent driver specific mask value is
+ * "0xFF 0x00 0xFF 0x00".
+ */
+ memset(&mask, 0, sizeof(mask));
+ for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+ if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+ mask[pos] = 0xFF;
+ }
+ /*
+ * Note: Pattern's offset is not passed as part of wowlan
+ * parameter from CFG layer. So it's always passed as ZERO
+ * to the firmware. It means, given WOW patterns are always
+ * matched from the first byte of received pkt in the firmware.
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ wow->patterns[i].pattern_len,
+ 0 /* pattern offset */,
+ wow->patterns[i].pattern, mask);
+ if (ret)
+ return ret;
+ }
+
+ if (wow->disconnect)
+ filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+ if (wow->magic_pkt)
+ filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+ if (wow->gtk_rekey_failure)
+ filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+ if (wow->eap_identity_req)
+ filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+ if (wow->four_way_handshake)
+ filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ filter,
+ WOW_HOST_REQ_DELAY);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret)
+ return ret;
+
+ if (ar->tx_pending[ar->ctrl_ep]) {
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("clear wmi ctrl data timeout\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+ ret = left;
+ }
+ }
+
+ return ret;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ return ret;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow)
+{
+ int ret;
+
+ switch (mode) {
+ case ATH6KL_CFG_SUSPEND_WOW:
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+ /* Flush all non control pkts in TX path */
+ ath6kl_tx_data_cleanup(ar);
+
+ ret = ath6kl_wow_suspend(ar, wow);
+ if (ret) {
+ ath6kl_err("wow suspend failed: %d\n", ret);
+ return ret;
+ }
+ ar->state = ATH6KL_STATE_WOW;
+ break;
+
+ case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (ar->state == ATH6KL_STATE_OFF) {
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "suspend hw off, no action for cutpower\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+ ret = ath6kl_init_hw_stop(ar);
+ if (ret) {
+ ath6kl_warn("failed to stop hw during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_CUTPOWER;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
+ /*
+ * Nothing needed for schedule scan, firmware is already in
+ * wow mode and sleeping most of the time.
+ */
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+ int ret;
+
+ switch (ar->state) {
+ case ATH6KL_STATE_WOW:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+ ret = ath6kl_wow_resume(ar);
+ if (ret) {
+ ath6kl_warn("wow mode resume failed: %d\n", ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+ ar->wmi->saved_pwr_mode);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during resume: %d\n",
+ ret);
+ }
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+
+ break;
+
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+
+ default:
+ break;
+ }
+
return 0;
}
#ifdef CONFIG_PM
-static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- return ath6kl_hif_suspend(ar);
+ return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_resume(ar);
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+ if (ar->state == ATH6KL_STATE_WOW)
+ ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
}
#endif
@@ -1425,14 +2014,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
__func__, chan->center_freq, chan->hw_value);
- ar->next_chan = chan->center_freq;
+ vif->next_chan = chan->center_freq;
return 0;
}
@@ -1444,9 +2033,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos)
pos[4] == 0x9a && pos[5] == 0x09;
}
-static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
- size_t ies_len)
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+ const u8 *ies, size_t ies_len)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *buf = NULL;
size_t len = 0;
@@ -1473,8 +2063,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
}
}
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
- buf, len);
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_RESP, buf, len);
kfree(buf);
return ret;
}
@@ -1483,36 +2073,39 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
struct beacon_parameters *info, bool add)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
int res;
- int i;
+ int i, ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (ar->next_mode != AP_NETWORK)
+ if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
if (info->beacon_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
}
if (info->proberesp_ies) {
- res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
}
if (info->assocresp_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
@@ -1539,12 +2132,14 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
if (info->ssid == NULL)
return -EINVAL;
- memcpy(ar->ssid, info->ssid, info->ssid_len);
- ar->ssid_len = info->ssid_len;
+ memcpy(vif->ssid, info->ssid, info->ssid_len);
+ vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
return -EOPNOTSUPP; /* TODO */
- ar->dot11_auth_mode = OPEN_AUTH;
+ ret = ath6kl_set_auth_type(vif, info->auth_type);
+ if (ret)
+ return ret;
memset(&p, 0, sizeof(p));
@@ -1566,7 +2161,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
}
if (p.auth_mode == 0)
p.auth_mode = NONE_AUTH;
- ar->auth_mode = p.auth_mode;
+ vif->auth_mode = p.auth_mode;
for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
switch (info->crypto.ciphers_pairwise[i]) {
@@ -1580,13 +2175,16 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.prwise_crypto_type |= AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.prwise_crypto_type |= WAPI_CRYPT;
+ break;
}
}
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1)
- ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1599,21 +2197,34 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.grp_crypto_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.grp_crypto_type = WAPI_CRYPT;
+ break;
default:
p.grp_crypto_type = NONE_CRYPT;
break;
}
- ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
p.nw_type = AP_NETWORK;
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
- p.ssid_len = ar->ssid_len;
- memcpy(p.ssid, ar->ssid, ar->ssid_len);
- p.dot11_auth_mode = ar->dot11_auth_mode;
- p.ch = cpu_to_le16(ar->next_chan);
+ p.ssid_len = vif->ssid_len;
+ memcpy(p.ssid, vif->ssid, vif->ssid_len);
+ p.dot11_auth_mode = vif->dot11_auth_mode;
+ p.ch = cpu_to_le16(vif->next_chan);
- res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ p.nw_subtype = SUBTYPE_P2PGO;
+ } else {
+ /*
+ * Due to firmware limitation, it is not possible to
+ * do P2P mgmt operations in AP mode
+ */
+ p.nw_subtype = SUBTYPE_NONE;
+ }
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
@@ -1635,14 +2246,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
- if (!test_bit(CONNECTED, &ar->flag))
+ if (!test_bit(CONNECTED, &vif->flags))
return -ENOTCONN;
- ath6kl_wmi_disconnect_cmd(ar->wmi);
- clear_bit(CONNECTED, &ar->flag);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+ clear_bit(CONNECTED, &vif->flags);
return 0;
}
@@ -1651,8 +2263,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
/* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2273,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
- mac, 0);
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
- 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_AUTHORIZE, mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_UNAUTHORIZE, mac, 0);
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2287,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
* return -EBUSY */
- *cookie = 1; /* only a single pending request is supported */
+ id = ++vif->last_roc_id;
+ if (id == 0) {
+ /* Do not use 0 as the cookie value */
+ id = ++vif->last_roc_id;
+ }
+ *cookie = id;
- return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
- duration);
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+ chan->center_freq, duration);
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2308,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (cookie != 1)
+ if (cookie != vif->last_roc_id)
return -ENOENT;
+ vif->last_cancel_roc_id = cookie;
- return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
}
-static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
- size_t len, unsigned int freq)
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+ const u8 *buf, size_t len,
+ unsigned int freq)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *p2p;
int p2p_len;
@@ -1724,8 +2348,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
pos += 2 + pos[1];
}
- ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
- p2p, p2p_len);
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+ mgmt->da, p2p, p2p_len);
kfree(p2p);
return ret;
}
@@ -1734,44 +2358,61 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ bool dont_wait_for_ack, u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 id;
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
- ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control)) {
/*
* Send Probe Response frame in AP mode using a separate WMI
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
- return ath6kl_send_go_probe_resp(ar, buf, len,
+ return ath6kl_send_go_probe_resp(vif, buf, len,
chan->center_freq);
}
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
if (id == 0) {
/*
* 0 is a reserved value in the WMI command and shall not be
* used for the command.
*/
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
}
*cookie = id;
- return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
- buf, len);
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len, no_cck);
+ } else {
+ return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len);
+ }
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
struct net_device *dev,
u16 frame_type, bool reg)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
@@ -1781,8 +2422,92 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
* we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
* hardcode target to report Probe Request frames all the time.
*/
- ar->probe_req_report = reg;
+ vif->probe_req_report = reg;
+ }
+}
+
+static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u16 interval;
+ int ret;
+ u8 i;
+
+ if (ar->state != ATH6KL_STATE_ON)
+ return -EIO;
+
+ if (vif->sme_state != SME_DISCONNECTED)
+ return -EBUSY;
+
+ for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, DISABLE_SSID_FLAG,
+ 0, NULL);
}
+
+ /* fw uses seconds, also make sure that it's >0 */
+ interval = max_t(u16, 1, request->interval / 1000);
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ interval, interval,
+ 10, 0, 0, 0, 3, 0, 0, 0);
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ for (i = 0; i < request->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+ }
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ WOW_FILTER_SSID,
+ WOW_HOST_REQ_DELAY);
+ if (ret) {
+ ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
+ return ret;
+ }
+
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+ ret);
+ return ret;
+ }
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret) {
+ ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
+ ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_SCHED_SCAN;
+
+ return ret;
+}
+
+static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return -EIO;
+
+ return 0;
}
static const struct ieee80211_txrx_stypes
@@ -1808,6 +2533,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
};
static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .add_virtual_intf = ath6kl_cfg80211_add_iface,
+ .del_virtual_intf = ath6kl_cfg80211_del_iface,
.change_virtual_intf = ath6kl_cfg80211_change_iface,
.scan = ath6kl_cfg80211_scan,
.connect = ath6kl_cfg80211_connect,
@@ -1828,7 +2555,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.flush_pmksa = ath6kl_flush_pmksa,
CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ar6k_cfg80211_suspend,
+ .suspend = __ath6kl_cfg80211_suspend,
+ .resume = __ath6kl_cfg80211_resume,
#endif
.set_channel = ath6kl_set_channel,
.add_beacon = ath6kl_add_beacon,
@@ -1839,78 +2567,277 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
.mgmt_frame_register = ath6kl_mgmt_frame_register,
+ .sched_scan_start = ath6kl_cfg80211_sscan_start,
+ .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
};
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
{
- int ret = 0;
- struct wireless_dev *wdev;
- struct ath6kl *ar;
+ ath6kl_cfg80211_sscan_disable(vif);
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- ath6kl_err("couldn't allocate wireless device\n");
- return NULL;
+ switch (vif->sme_state) {
+ case SME_DISCONNECTED:
+ break;
+ case SME_CONNECTING:
+ cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case SME_CONNECTED:
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ break;
+ }
+
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags))
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
+
+ vif->sme_state = SME_DISCONNECTED;
+ clear_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+
+ /* disable scanning */
+ if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
+ ath6kl_warn("failed to disable scan during stop\n");
+
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+}
+
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif) {
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+ ath6kl_warn("ath6kl_deep_sleep_enable: "
+ "wmi_powermode_cmd failed\n");
+ return;
}
+ /*
+ * FIXME: we should take ar->list_lock to protect changes in the
+ * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
+ * sleeps.
+ */
+ list_for_each_entry(vif, &ar->vif_list, list)
+ ath6kl_cfg80211_stop(vif);
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *dev)
+{
+ struct ath6kl *ar;
+ struct wiphy *wiphy;
+ u8 ctr;
+
/* create a new wiphy for use with cfg80211 */
- wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
- if (!wdev->wiphy) {
+ wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+ if (!wiphy) {
ath6kl_err("couldn't allocate wiphy device\n");
- kfree(wdev);
return NULL;
}
- ar = wiphy_priv(wdev->wiphy);
+ ar = wiphy_priv(wiphy);
ar->p2p = !!ath6kl_p2p;
+ ar->wiphy = wiphy;
+ ar->dev = dev;
- wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+ ar->vif_max = 1;
- wdev->wiphy->max_remain_on_channel_duration = 5000;
+ ar->max_norm_iface = 1;
+
+ spin_lock_init(&ar->lock);
+ spin_lock_init(&ar->mcastpsq_lock);
+ spin_lock_init(&ar->list_lock);
+
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ INIT_LIST_HEAD(&ar->vif_list);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ clear_bit(SKIP_SCAN, &ar->flag);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+ ar->listen_intvl_b = 0;
+ ar->tx_pwr = 0;
+
+ ar->intra_bss = 1;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+ return ar;
+}
+
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
+{
+ struct wiphy *wiphy = ar->wiphy;
+ int ret;
+
+ wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wiphy->max_remain_on_channel_duration = 5000;
/* set device pointer for wiphy */
- set_wiphy_dev(wdev->wiphy, dev);
+ set_wiphy_dev(wiphy, ar->dev);
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
if (ar->p2p) {
- wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT);
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- /* max num of ssids that can be probed during scanning */
- wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
- wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- ret = wiphy_register(wdev->wiphy);
+ /* max num of ssids that can be probed during scanning */
+ wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+ wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+ wiphy->wowlan.pattern_min_len = 1;
+ wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+ wiphy->max_sched_scan_ssids = 10;
+
+ ret = wiphy_register(wiphy);
if (ret < 0) {
ath6kl_err("couldn't register wiphy device\n");
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- return NULL;
+ return ret;
}
- return wdev;
+ return 0;
}
-void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+static int ath6kl_init_if_data(struct ath6kl_vif *vif)
{
- struct wireless_dev *wdev = ar->wdev;
-
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ vif->aggr_cntxt = aggr_init(vif->ndev);
+ if (!vif->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ return -ENOMEM;
}
- if (!wdev)
- return;
+ setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) vif->ndev);
+ setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
+ (unsigned long) vif);
+
+ set_bit(WMM_ENABLED, &vif->flags);
+ spin_lock_init(&vif->if_lock);
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
+ return 0;
+}
+
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ aggr_module_destroy(vif->aggr_cntxt);
+
+ ar->avail_idx_map |= BIT(vif->fw_vif_idx);
+
+ if (vif->nw_type == ADHOC_NETWORK)
+ ar->ibss_if_active = false;
+
+ unregister_netdevice(vif->ndev);
+
+ ar->num_vif--;
+}
+
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type, u8 fw_vif_idx,
+ u8 nw_type)
+{
+ struct net_device *ndev;
+ struct ath6kl_vif *vif;
+
+ ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+ if (!ndev)
+ return NULL;
+
+ vif = netdev_priv(ndev);
+ ndev->ieee80211_ptr = &vif->wdev;
+ vif->wdev.wiphy = ar->wiphy;
+ vif->ar = ar;
+ vif->ndev = ndev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+ vif->wdev.netdev = ndev;
+ vif->wdev.iftype = type;
+ vif->fw_vif_idx = fw_vif_idx;
+ vif->nw_type = vif->next_mode = nw_type;
+
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+ if (fw_vif_idx != 0)
+ ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+ 0x2;
+
+ init_netdev(ndev);
+
+ ath6kl_init_control_info(vif);
+
+ /* TODO: Pass interface specific pointer instead of ar */
+ if (ath6kl_init_if_data(vif))
+ goto err;
+
+ if (register_netdevice(ndev))
+ goto err;
+
+ ar->avail_idx_map &= ~BIT(fw_vif_idx);
+ vif->sme_state = SME_DISCONNECTED;
+ set_bit(WLAN_ENABLED, &vif->flags);
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ set_bit(NETDEV_REGISTERED, &vif->flags);
+
+ if (type == NL80211_IFTYPE_ADHOC)
+ ar->ibss_if_active = true;
+
+ spin_lock_bh(&ar->list_lock);
+ list_add_tail(&vif->list, &ar->vif_list);
+ spin_unlock_bh(&ar->list_lock);
+
+ return ndev;
+
+err:
+ aggr_module_destroy(vif->aggr_cntxt);
+ free_netdev(ndev);
+ return NULL;
+}
+
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+{
+ wiphy_unregister(ar->wiphy);
+ wiphy_free(ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index a84adc249c6..81f20a57231 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -17,23 +17,43 @@
#ifndef ATH6KL_CFG80211_H
#define ATH6KL_CFG80211_H
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
-void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+enum ath6kl_cfg_suspend_mode {
+ ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ ATH6KL_CFG_SUSPEND_CUTPOWER,
+ ATH6KL_CFG_SUSPEND_WOW,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+};
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_alloc(struct device *dev);
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason);
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast);
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index b92f0e5d233..bfd6597763d 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -23,8 +23,6 @@
extern int ath6kl_printk(const char *level, const char *fmt, ...);
-#define A_CACHE_LINE_PAD 128
-
/*
* Reflects the version of binary interface exposed by ATH6KL target
* firmware. Needs to be incremented by 1 for any change in the firmware
@@ -73,25 +71,16 @@ enum crypto_type {
WEP_CRYPT = 0x02,
TKIP_CRYPT = 0x04,
AES_CRYPT = 0x08,
+ WAPI_CRYPT = 0x10,
};
struct htc_endpoint_credit_dist;
struct ath6kl;
enum htc_credit_dist_reason;
-struct htc_credit_state_info;
+struct ath6kl_htc_credit_info;
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info);
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
- struct list_head *epdist_list,
- enum htc_credit_dist_reason reason);
-void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
- struct list_head *ep_list,
- int tot_credits);
-void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
- struct htc_endpoint_credit_dist *ep_dist);
struct ath6kl *ath6kl_core_alloc(struct device *sdev);
int ath6kl_core_init(struct ath6kl *ar);
-int ath6kl_unavail_ev(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6d8a4845baa..c863a28f2e0 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -70,10 +70,20 @@ enum ath6kl_fw_ie_type {
ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
ATH6KL_FW_IE_CAPABILITIES = 6,
ATH6KL_FW_IE_PATCH_ADDR = 7,
+ ATH6KL_FW_IE_BOARD_ADDR = 8,
+ ATH6KL_FW_IE_VIF_MAX = 9,
};
enum ath6kl_fw_capability {
ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
+
+ /*
+ * Firmware is capable of supporting P2P mgmt operations on a
+ * station interface. After group formation, the station
+ * interface will become a P2P client/GO interface as the case may be
+ */
+ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
@@ -88,37 +98,47 @@ struct ath6kl_fw_ie {
};
/* AR6003 1.0 definitions */
-#define AR6003_REV1_VERSION 0x300002ba
+#define AR6003_HW_1_0_VERSION 0x300002ba
/* AR6003 2.0 definitions */
-#define AR6003_REV2_VERSION 0x30000384
-#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
-#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
-#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+#define AR6003_HW_2_0_VERSION 0x30000384
+#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.0/bdata.SD31.bin"
/* AR6003 3.0 definitions */
-#define AR6003_REV3_VERSION 0x30000582
-#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
-#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+#define AR6003_HW_2_1_1_VERSION 0x30000582
+#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
+ "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
+#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
/* AR6004 1.0 definitions */
-#define AR6004_REV1_VERSION 0x30000623
-#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin"
-#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin"
-#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin"
-#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin"
-#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin"
+#define AR6004_HW_1_0_VERSION 0x30000623
+#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin"
+#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.0/bdata.DB132.bin"
+
+/* AR6004 1.1 definitions */
+#define AR6004_HW_1_1_VERSION 0x30000001
+#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin"
+#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.1/bdata.DB132.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
@@ -166,6 +186,7 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
@@ -271,6 +292,8 @@ struct ath6kl_bmi {
u32 cmd_credits;
bool done_sent;
u8 *cmd_buf;
+ u32 max_data_size;
+ u32 max_cmd_size;
};
struct target_stats {
@@ -380,40 +403,42 @@ struct ath6kl_req_key {
u8 key_len;
};
-/* Flag info */
-#define WMI_ENABLED 0
-#define WMI_READY 1
-#define CONNECTED 2
-#define STATS_UPDATE_PEND 3
-#define CONNECT_PEND 4
-#define WMM_ENABLED 5
-#define NETQ_STOPPED 6
-#define WMI_CTRL_EP_FULL 7
-#define DTIM_EXPIRED 8
-#define DESTROY_IN_PROGRESS 9
-#define NETDEV_REGISTERED 10
-#define SKIP_SCAN 11
-#define WLAN_ENABLED 12
-#define TESTMODE 13
-#define CLEAR_BSSFILTER_ON_BEACON 14
-#define DTIM_PERIOD_AVAIL 15
+enum ath6kl_hif_type {
+ ATH6KL_HIF_TYPE_SDIO,
+ ATH6KL_HIF_TYPE_USB,
+};
-struct ath6kl {
- struct device *dev;
- struct net_device *net_dev;
- struct ath6kl_bmi bmi;
- const struct ath6kl_hif_ops *hif_ops;
- struct wmi *wmi;
- int tx_pending[ENDPOINT_MAX];
- int total_tx_data_pend;
- struct htc_target *htc_target;
- void *hif_priv;
- spinlock_t lock;
- struct semaphore sem;
+/*
+ * Driver's maximum limit, note that some firmwares support only one vif
+ * and the runtime (current) limit must be checked from ar->vif_max.
+ */
+#define ATH6KL_VIF_MAX 3
+
+/* vif flags info */
+enum ath6kl_vif_state {
+ CONNECTED,
+ CONNECT_PEND,
+ WMM_ENABLED,
+ NETQ_STOPPED,
+ DTIM_EXPIRED,
+ NETDEV_REGISTERED,
+ CLEAR_BSSFILTER_ON_BEACON,
+ DTIM_PERIOD_AVAIL,
+ WLAN_ENABLED,
+ STATS_UPDATE_PEND,
+};
+
+struct ath6kl_vif {
+ struct list_head list;
+ struct wireless_dev wdev;
+ struct net_device *ndev;
+ struct ath6kl *ar;
+ /* Lock to protect vif specific net_stats and flags */
+ spinlock_t if_lock;
+ u8 fw_vif_idx;
+ unsigned long flags;
int ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 next_mode;
- u8 nw_type;
u8 dot11_auth_mode;
u8 auth_mode;
u8 prwise_crypto;
@@ -421,21 +446,91 @@ struct ath6kl {
u8 grp_crypto;
u8 grp_crypto_len;
u8 def_txkey_index;
- struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ u8 next_mode;
+ u8 nw_type;
u8 bssid[ETH_ALEN];
u8 req_bssid[ETH_ALEN];
u16 ch_hint;
u16 bss_ch;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ struct aggr_info *aggr_cntxt;
+
+ struct timer_list disconnect_timer;
+ struct timer_list sched_scan_timer;
+
+ struct cfg80211_scan_request *scan_req;
+ enum sme_state sme_state;
+ int reconnect_flag;
+ u32 last_roc_id;
+ u32 last_cancel_roc_id;
+ u32 send_action_id;
+ bool probe_req_report;
+ u16 next_chan;
+ u16 assoc_bss_beacon_int;
+ u8 assoc_bss_dtim_period;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+};
+
+#define WOW_LIST_ID 0
+#define WOW_HOST_REQ_DELAY 500 /* ms */
+
+#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+ WMI_ENABLED,
+ WMI_READY,
+ WMI_CTRL_EP_FULL,
+ TESTMODE,
+ DESTROY_IN_PROGRESS,
+ SKIP_SCAN,
+ ROAM_TBL_PEND,
+ FIRST_BOOT,
+};
+
+enum ath6kl_state {
+ ATH6KL_STATE_OFF,
+ ATH6KL_STATE_ON,
+ ATH6KL_STATE_DEEPSLEEP,
+ ATH6KL_STATE_CUTPOWER,
+ ATH6KL_STATE_WOW,
+ ATH6KL_STATE_SCHED_SCAN,
+};
+
+struct ath6kl {
+ struct device *dev;
+ struct wiphy *wiphy;
+
+ enum ath6kl_state state;
+
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ enum ath6kl_hif_type hif_type;
+ void *hif_priv;
+ struct list_head vif_list;
+ /* Lock to avoid race in vif_list entries among add/del/traverse */
+ spinlock_t list_lock;
+ u8 num_vif;
+ unsigned int vif_max;
+ u8 max_norm_iface;
+ u8 avail_idx_map;
+ spinlock_t lock;
+ struct semaphore sem;
u16 listen_intvl_b;
u16 listen_intvl_t;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
u8 tx_pwr;
- struct net_device_stats net_stats;
- struct target_stats target_stats;
struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
u8 ibss_ps_enable;
+ bool ibss_if_active;
u8 node_num;
u8 next_ep_id;
struct ath6kl_cookie *cookie_list;
@@ -446,7 +541,7 @@ struct ath6kl {
u8 hiac_stream_active_pri;
u8 ep2ac_map[ENDPOINT_MAX];
enum htc_endpoint_id ctrl_ep;
- struct htc_credit_state_info credit_state_info;
+ struct ath6kl_htc_credit_info credit_state_info;
u32 connect_ctrl_flags;
u32 user_key_ctrl;
u8 usr_bss_filter;
@@ -456,30 +551,37 @@ struct ath6kl {
struct sk_buff_head mcastpsq;
spinlock_t mcastpsq_lock;
u8 intra_bss;
- struct aggr_info *aggr_cntxt;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
struct list_head amsdu_rx_buffer_queue;
- struct timer_list disconnect_timer;
u8 rx_meta_ver;
- struct wireless_dev *wdev;
- struct cfg80211_scan_request *scan_req;
- struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
- enum sme_state sme_state;
enum wlan_low_pwr_state wlan_pwr_state;
- struct wmi_scan_params_cmd sc_params;
+ u8 mac_addr[ETH_ALEN];
#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
struct {
void *rx_report;
size_t rx_report_len;
} tm;
- struct {
+ struct ath6kl_hw {
+ u32 id;
+ const char *name;
u32 dataset_patch_addr;
u32 app_load_addr;
u32 app_start_override_addr;
u32 board_ext_data_addr;
u32 reserved_ram_size;
+ u32 board_addr;
+ u32 refclk_hz;
+ u32 uarttx_pin;
+
+ const char *fw_otp;
+ const char *fw;
+ const char *fw_tcmd;
+ const char *fw_patch;
+ const char *fw_api2;
+ const char *fw_board;
+ const char *fw_default_board;
} hw;
u16 conf_flags;
@@ -487,7 +589,6 @@ struct ath6kl {
struct ath6kl_mbox_info mbox_info;
struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
- int reconnect_flag;
unsigned long flag;
u8 *fw_board;
@@ -508,13 +609,7 @@ struct ath6kl {
struct dentry *debugfs_phy;
- u32 send_action_id;
- bool probe_req_report;
- u16 next_chan;
-
bool p2p;
- u16 assoc_bss_beacon_int;
- u8 assoc_bss_dtim_period;
#ifdef CONFIG_ATH6KL_DEBUG
struct {
@@ -529,23 +624,19 @@ struct ath6kl {
struct {
unsigned int invalid_rate;
} war_stats;
+
+ u8 *roam_tbl;
+ unsigned int roam_tbl_len;
+
+ u8 keepalive;
+ u8 disc_timeout;
} debug;
#endif /* CONFIG_ATH6KL_DEBUG */
};
-static inline void *ath6kl_priv(struct net_device *dev)
+static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
{
- return wdev_priv(dev->ieee80211_ptr);
-}
-
-static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
- *cred_info,
- struct htc_endpoint_credit_dist
- *ep_dist, int credits)
-{
- ep_dist->credits += credits;
- ep_dist->cred_assngd += credits;
- cred_info->cur_free_credits -= credits;
+ return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
}
static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +652,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
return addr;
}
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr);
void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +669,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_read_fwlogs(struct ath6kl *ar);
-void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
void ath6kl_tx_data_cleanup(struct ath6kl *ar);
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs);
struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +686,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info *aggr_info);
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_int,
u16 beacon_int, enum network_type net_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info);
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status);
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
-void ath6kl_dtimexpiry_event(struct ath6kl *ar);
-void ath6kl_disconnect(struct ath6kl *ar);
-void ath6kl_deep_sleep_enable(struct ath6kl *ar);
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz);
void ath6kl_wakeup_event(void *dev);
-void ath6kl_target_failure(struct ath6kl *ar);
+
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset);
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
+void ath6kl_core_free(struct ath6kl *ar);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+void ath6kl_check_wow_status(struct ath6kl *ar);
#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7879b531428..eb808b46f94 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
{
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"--- endpoint: %d svc_id: 0x%X ---\n",
ep_dist->endpoint, ep_dist->svc_id);
- ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
ep_dist->dist_flags);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
ep_dist->cred_norm);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
ep_dist->cred_min);
- ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
ep_dist->credits);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
ep_dist->cred_assngd);
- ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
ep_dist->seek_cred);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
ep_dist->cred_sz);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
ep_dist->cred_per_msg);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
ep_dist->cred_to_dist);
- ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_dist->htc_rsvd)->txq));
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
+ get_queue_depth(&ep_dist->htc_ep->txq));
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"----------------------------------\n");
}
+/* FIXME: move to htc.c */
void dump_cred_dist_stats(struct htc_target *target)
{
struct htc_endpoint_credit_dist *ep_list;
- if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
return;
list_for_each_entry(ep_list, &target->cred_dist_list, list)
dump_cred_dist(ep_list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
- target->cred_dist_cntxt, NULL);
- ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
- target->cred_dist_cntxt->total_avail_credits,
- target->cred_dist_cntxt->cur_free_credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit distribution total %d free %d\n",
+ target->credit_info->total_avail_credits,
+ target->credit_info->cur_free_credits);
}
static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- struct target_stats *tgt_stats = &ar->target_stats;
+ struct ath6kl_vif *vif;
+ struct target_stats *tgt_stats;
char *buf;
unsigned int len = 0, buf_len = 1500;
int i;
long left;
ssize_t ret_cnt;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ tgt_stats = &vif->target_stats;
+
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
return -EBUSY;
}
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem);
kfree(buf);
return -EIO;
@@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag), WMI_TIMEOUT);
+ &vif->flags), WMI_TIMEOUT);
up(&ar->sem);
@@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Total Avail Credits: ",
- target->cred_dist_cntxt->total_avail_credits);
+ target->credit_info->total_avail_credits);
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Free credits :",
- target->cred_dist_cntxt->cur_free_credits);
+ target->credit_info->cur_free_credits);
len += scnprintf(buf + len, buf_len - len,
" Epid Flags Cred_norm Cred_min Credits Cred_assngd"
@@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
print_credit_info("%9d", cred_per_msg);
print_credit_info("%14d", cred_to_dist);
len += scnprintf(buf + len, buf_len - len, "%12d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_list->htc_rsvd)->txq));
+ get_queue_depth(&ep_list->htc_ep->txq));
}
if (len > buf_len)
@@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
.llseek = default_llseek,
};
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+ unsigned int buf_len, unsigned int len,
+ int offset, const char *name)
+{
+ int i;
+ struct htc_endpoint_stats *ep_st;
+ u32 *counter;
+
+ len += scnprintf(buf + len, buf_len - len, "%s:", name);
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ counter = ((u32 *) ep_st) + (offset / 4);
+ len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+ (25 + ENDPOINT_MAX * 11);
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+#define EPSTAT(name) \
+ len = print_endpoint_stat(target, buf, buf_len, len, \
+ offsetof(struct htc_endpoint_stats, name), \
+ #name)
+ EPSTAT(cred_low_indicate);
+ EPSTAT(tx_issued);
+ EPSTAT(tx_pkt_bundled);
+ EPSTAT(tx_bundles);
+ EPSTAT(tx_dropped);
+ EPSTAT(tx_cred_rpt);
+ EPSTAT(cred_rpt_from_rx);
+ EPSTAT(cred_rpt_from_other);
+ EPSTAT(cred_rpt_ep0);
+ EPSTAT(cred_from_rx);
+ EPSTAT(cred_from_other);
+ EPSTAT(cred_from_ep0);
+ EPSTAT(cred_cosumd);
+ EPSTAT(cred_retnd);
+ EPSTAT(rx_pkts);
+ EPSTAT(rx_lkahds);
+ EPSTAT(rx_bundl);
+ EPSTAT(rx_bundle_lkahd);
+ EPSTAT(rx_bundle_from_hdr);
+ EPSTAT(rx_alloc_thresh_hit);
+ EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ int ret, i;
+ u32 val;
+ struct htc_endpoint_stats *ep_st;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+ if (val == 0) {
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ memset(ep_st, 0, sizeof(*ep_st));
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_endpoint_stats_read,
+ .write = ath6kl_endpoint_stats_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static unsigned long ath6kl_get_num_reg(void)
{
int i;
@@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
.llseek = default_llseek,
};
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len)
+{
+ const struct wmi_target_roam_tbl *tbl;
+ u16 num_entries;
+
+ if (len < sizeof(*tbl))
+ return -EINVAL;
+
+ tbl = (const struct wmi_target_roam_tbl *) buf;
+ num_entries = le16_to_cpu(tbl->num_entries);
+ if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+ len)
+ return -EINVAL;
+
+ if (ar->debug.roam_tbl == NULL ||
+ ar->debug.roam_tbl_len < (unsigned int) len) {
+ kfree(ar->debug.roam_tbl);
+ ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+ }
+
+ memcpy(ar->debug.roam_tbl, buf, len);
+ ar->debug.roam_tbl_len = len;
+
+ if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+ clear_bit(ROAM_TBL_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+
+ return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ long left;
+ struct wmi_target_roam_tbl *tbl;
+ u16 num_entries, i;
+ char *buf;
+ unsigned int len, buf_len;
+ ssize_t ret_cnt;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(ROAM_TBL_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+ if (ret) {
+ up(&ar->sem);
+ return ret;
+ }
+
+ left = wait_event_interruptible_timeout(
+ ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+ up(&ar->sem);
+
+ if (left <= 0)
+ return -ETIMEDOUT;
+
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+
+ tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+ num_entries = le16_to_cpu(tbl->num_entries);
+
+ buf_len = 100 + num_entries * 100;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "roam_mode=%u\n\n"
+ "# roam_util bssid rssi rssidt last_rssi util bias\n",
+ le16_to_cpu(tbl->roam_mode));
+
+ for (i = 0; i < num_entries; i++) {
+ struct wmi_bss_roam_info *info = &tbl->info[i];
+ len += scnprintf(buf + len, buf_len - len,
+ "%d %pM %d %d %d %d %d\n",
+ a_sle32_to_cpu(info->roam_util), info->bssid,
+ info->rssi, info->rssidt, info->last_rssi,
+ info->util, info->bias);
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+ .read = ath6kl_roam_table_read,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ u8 bssid[ETH_ALEN];
+ int i;
+ int addr[ETH_ALEN];
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN)
+ return -EINVAL;
+ for (i = 0; i < ETH_ALEN; i++)
+ bssid[i] = addr[i];
+
+ ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_force_roam = {
+ .write = ath6kl_force_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ enum wmi_roam_mode mode;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strcasecmp(buf, "default") == 0)
+ mode = WMI_DEFAULT_ROAM_MODE;
+ else if (strcasecmp(buf, "bssbias") == 0)
+ mode = WMI_HOST_BIAS_ROAM_MODE;
+ else if (strcasecmp(buf, "lock") == 0)
+ mode = WMI_LOCK_BSS_MODE;
+ else
+ return -EINVAL;
+
+ ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+ .write = ath6kl_roam_mode_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+ ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_keepalive = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_keepalive_read,
+ .write = ath6kl_keepalive_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+ ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_disconnect_timeout_read,
+ .write = ath6kl_disconnect_timeout_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[200];
+ ssize_t len;
+ char *sptr, *token;
+ struct wmi_create_pstream_cmd pstream;
+ u32 val32;
+ u16 val16;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.user_pri))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_direc))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.inactivity_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.suspension_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.service_start_time = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.tsid))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.nominal_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.max_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.mean_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.peak_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_burst_size = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.delay_bound = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_phy_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.sba = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.medium_time = cpu_to_le32(val32);
+
+ ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+ return count;
+}
+
+static const struct file_operations fops_create_qos = {
+ .write = ath6kl_create_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[100];
+ ssize_t len;
+ char *sptr, *token;
+ u8 traffic_class;
+ u8 tsid;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &tsid))
+ return -EINVAL;
+
+ ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+ traffic_class, tsid);
+
+ return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+ .write = ath6kl_delete_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 bgscan_int;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtou16(buf, 0, &bgscan_int))
+ return -EINVAL;
+
+ if (bgscan_int == 0)
+ bgscan_int = 0xffff;
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+ 0, 0, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+ .write = ath6kl_bgscan_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 listen_int_t, listen_int_b;
+ char buf[32];
+ char *sptr, *token;
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou16(token, 0, &listen_int_t))
+ return -EINVAL;
+
+ if (kstrtou16(sptr, 0, &listen_int_b))
+ return -EINVAL;
+
+ if ((listen_int_t < 15) || (listen_int_t > 5000))
+ return -EINVAL;
+
+ if ((listen_int_b < 1) || (listen_int_b > 50))
+ return -EINVAL;
+
+ ar->listen_intvl_t = listen_int_t;
+ ar->listen_intvl_b = listen_int_b;
+
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+ .read = ath6kl_listen_int_read,
+ .write = ath6kl_listen_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[100];
+ unsigned int len = 0;
+ char *sptr, *token;
+ u16 idle_period, ps_poll_num, dtim,
+ tx_wakeup, num_tx;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &idle_period))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &ps_poll_num))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &dtim))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &tx_wakeup))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &num_tx))
+ return -EINVAL;
+
+ ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+ dtim, tx_wakeup, num_tx, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_power_params = {
+ .write = ath6kl_power_params_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath6kl_debug_init(struct ath6kl *ar)
{
ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debug.fwlog_mask = 0;
ar->debugfs_phy = debugfs_create_dir("ath6kl",
- ar->wdev->wiphy->debugfsdir);
+ ar->wiphy->debugfsdir);
if (!ar->debugfs_phy) {
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_credit_dist_stats);
+ debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_endpoint_stats);
+
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog);
@@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_war_stats);
+ debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_roam_table);
+
+ debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_force_roam);
+
+ debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_roam_mode);
+
+ debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_keepalive);
+
+ debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+ debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_create_qos);
+
+ debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_delete_qos);
+
+ debugfs_create_file("bgscan_interval", S_IWUSR,
+ ar->debugfs_phy, ar, &fops_bgscan_int);
+
+ debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_power_params);
+
return 0;
}
@@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
{
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
+ kfree(ar->debug.roam_tbl);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 7b7675f70a1..9853c9c125c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -17,19 +17,19 @@
#ifndef DEBUG_H
#define DEBUG_H
-#include "htc_hif.h"
+#include "hif.h"
enum ATH6K_DEBUG_MASK {
- ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
- ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
+ ATH6KL_DBG_CREDIT = BIT(0),
+ /* hole */
ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
- ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
- ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
+ ATH6KL_DBG_HTC = BIT(5),
+ ATH6KL_DBG_HIF = BIT(6),
ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
- ATH6KL_DBG_PM = BIT(8), /* power management */
- ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
+ /* hole */
+ /* hole */
ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
@@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_SDIO_DUMP = BIT(17),
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19),
+ ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
@@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
void dump_cred_dist_stats(struct htc_target *target);
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
@@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
{
}
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+ u8 timeout)
+{
+}
+
static inline int ath6kl_debug_init(struct ath6kl *ar)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index d6c898f3d0b..2fe1dadfc77 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -18,10 +18,16 @@
#define HIF_OPS_H
#include "hif.h"
+#include "debug.h"
static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+ (request & HIF_WRITE) ? "write" : "read",
+ addr, buf, len, request);
+
return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
}
@@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request,
struct htc_packet *packet)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+ address, buffer, length, request);
+
return ar->hif_ops->write_async(ar, address, buffer, length,
request, packet);
}
static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
return ar->hif_ops->irq_enable(ar);
}
static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
return ar->hif_ops->irq_disable(ar);
}
@@ -69,9 +83,70 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
return ar->hif_ops->cleanup_scatter(ar);
}
-static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+ struct cfg80211_wowlan *wow)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+ return ar->hif_ops->suspend(ar, wow);
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
+ u32 *value)
{
- return ar->hif_ops->suspend(ar);
+ return ar->hif_ops->diag_read32(ar, address, value);
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 value)
+{
+ return ar->hif_ops->diag_write32(ar, address, value);
+}
+
+static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_read(ar, buf, len);
+}
+
+static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_write(ar, buf, len);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+ return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+ return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+ return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+ ar->hif_ops->stop(ar);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7409c..e57da35e59f 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -13,18 +13,19 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "hif.h"
#include "core.h"
#include "target.h"
#include "hif-ops.h"
-#include "htc_hif.h"
#include "debug.h"
#define MAILBOX_FOR_BLOCK_SIZE 1
#define ATH6KL_TIME_QUANTUM 10 /* in ms */
-static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+ bool from_dma)
{
u8 *buf;
int i;
@@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
return 0;
}
-int ath6kldev_rw_comp_handler(void *context, int status)
+int ath6kl_hif_rw_comp_handler(void *context, int status)
{
struct htc_packet *packet = context;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
packet, status);
packet->status = status;
@@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
return 0;
}
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
-static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
{
- u32 dummy;
- int status;
+ __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i, address, regdump_addr = 0;
+ int ret;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(ar->target_type, address);
+
+ /* read RAM location through diagnostic window */
+ ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+ if (ret || !regdump_addr) {
+ ath6kl_warn("failed to get ptr to register dump area: %d\n",
+ ret);
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+ regdump_addr);
+ regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+ /* fetch register dump data */
+ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ if (ret) {
+ ath6kl_warn("failed to get register dump: %d\n", ret);
+ return;
+ }
+
+ ath6kl_info("crash dump:\n");
+ ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+ ar->wiphy->fw_version);
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
- ath6kl_err("target debug interrupt\n");
+ for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+ ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+ 4 * i,
+ le32_to_cpu(regdump_val[i]),
+ le32_to_cpu(regdump_val[i + 1]),
+ le32_to_cpu(regdump_val[i + 2]),
+ le32_to_cpu(regdump_val[i + 3]));
+ }
+
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int ret;
- ath6kl_target_failure(dev->ar);
+ ath6kl_warn("firmware crashed\n");
/*
* read counter to clear the interrupt, the debug error interrupt is
* counter 0.
*/
- status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
(u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
- if (status)
- WARN_ON(1);
+ if (ret)
+ ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
- return status;
+ ath6kl_hif_dump_fw_crash(dev->ar);
+
+ return ret;
}
/* mailbox recv message polling */
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
int timeout)
{
struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
/* delay a little */
mdelay(ATH6KL_TIME_QUANTUM);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
}
if (i == 0) {
@@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Target failure handler will be called in case of
* an assert.
*/
- ath6kldev_proc_dbg_intr(dev);
+ ath6kl_hif_proc_dbg_intr(dev);
}
return status;
@@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Disable packet reception (used in case the host runs out of buffers)
* using the interrupt enable registers through the host I/F
*/
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
{
struct ath6kl_irq_enable_reg regs;
int status = 0;
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+ enable_rx ? "enable" : "disable");
+
/* take the lock to protect interrupt enable shadows */
spin_lock_bh(&dev->lock);
@@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
return status;
}
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read)
{
int status = 0;
@@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
dev->ar->mbox_info.htc_addr;
}
- ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
- "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
scat_req->scat_entries, scat_req->len,
scat_req->addr, !read ? "async" : "sync",
(read) ? "rd" : "wr");
if (!read && scat_req->virt_scat) {
- status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+ status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
if (status) {
scat_req->status = status;
scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
scat_req->status = status;
if (!status && scat_req->virt_scat)
scat_req->status =
- ath6kldev_cp_scat_dma_buf(scat_req, true);
+ ath6kl_hif_cp_scat_dma_buf(scat_req, true);
}
return status;
}
-static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
{
u8 counter_int_status;
@@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
* the debug assertion counter interrupt.
*/
if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
- return ath6kldev_proc_dbg_intr(dev);
+ return ath6kl_hif_proc_dbg_intr(dev);
return 0;
}
-static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
{
int status;
u8 error_int_status;
@@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
return status;
}
-static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
{
int status;
u8 cpu_int_status;
@@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
* we rapidly pull packets.
*/
status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
- &lk_ahd, &fetched);
+ lk_ahd, &fetched);
if (status)
goto out;
@@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
/* CPU Interrupt */
- status = ath6kldev_proc_cpu_intr(dev);
+ status = ath6kl_hif_proc_cpu_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
/* Error Interrupt */
- status = ath6kldev_proc_err_intr(dev);
+ status = ath6kl_hif_proc_err_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
/* Counter Interrupt */
- status = ath6kldev_proc_counter_intr(dev);
+ status = ath6kl_hif_proc_counter_intr(dev);
out:
/*
@@ -479,9 +535,10 @@ out:
}
/* interrupt handler, kicks off all interrupt processing */
-int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
{
struct ath6kl_device *dev = ar->htc_target->dev;
+ unsigned long timeout;
int status = 0;
bool done = false;
@@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
* IRQ processing is synchronous, interrupt status registers can be
* re-read.
*/
- while (!done) {
+ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !done) {
status = proc_pending_irqs(dev, &done);
if (status)
break;
@@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
return status;
}
-static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
int status;
@@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
return status;
}
-int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
@@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
}
/* enable device interrupts */
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
{
int status = 0;
@@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
* target "soft" resets. The ATH6KL interrupt enables reset back to an
* "enabled" state when this happens.
*/
- ath6kldev_disable_intrs(dev);
+ ath6kl_hif_disable_intrs(dev);
/* unmask the host controller interrupts */
ath6kl_hif_irq_enable(dev->ar);
- status = ath6kldev_enable_intrs(dev);
+ status = ath6kl_hif_enable_intrs(dev);
return status;
}
/* disable all device interrupts */
-int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
{
/*
* Mask the interrupt at the HIF layer to avoid any stray interrupt
* taken while we zero out our shadow registers in
- * ath6kldev_disable_intrs().
+ * ath6kl_hif_disable_intrs().
*/
ath6kl_hif_irq_disable(dev->ar);
- return ath6kldev_disable_intrs(dev);
+ return ath6kl_hif_disable_intrs(dev);
}
-int ath6kldev_setup(struct ath6kl_device *dev)
+int ath6kl_hif_setup(struct ath6kl_device *dev)
{
int status = 0;
@@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
/* must be a power of 2 */
if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
WARN_ON(1);
+ status = -EINVAL;
goto fail_setup;
}
/* assemble mask, used for padding to a block */
dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
- ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "hif interrupt processing is sync only\n");
-
- status = ath6kldev_disable_intrs(dev);
+ status = ath6kl_hif_disable_intrs(dev);
fail_setup:
return status;
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 797e2d1d9bf..699a036f3a4 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -35,6 +35,7 @@
#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
#define MANUFACTURER_ID_AR6003_BASE 0x300
+#define MANUFACTURER_ID_AR6004_BASE 0x400
/* SDIO manufacturer ID and Codes */
#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
#define MANUFACTURER_CODE 0x271 /* Atheros */
@@ -59,6 +60,18 @@
/* mode to enable special 4-bit interrupt assertion without clock */
#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
+
struct bus_request {
struct list_head list;
@@ -186,6 +199,34 @@ struct hif_scatter_req {
struct hif_scatter_item scat_list[1];
};
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+ spinlock_t lock;
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ struct htc_target *htc_cnxt;
+ struct ath6kl *ar;
+};
+
struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request);
@@ -202,7 +243,30 @@ struct ath6kl_hif_ops {
int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar);
- int (*suspend)(struct ath6kl *ar);
+ int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+ int (*resume)(struct ath6kl *ar);
+ int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
+ int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
+ int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*power_on)(struct ath6kl *ar);
+ int (*power_off)(struct ath6kl *ar);
+ void (*stop)(struct ath6kl *ar);
};
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f88a7c9e414..f3b63ca25c7 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -15,13 +15,321 @@
*/
#include "core.h"
-#include "htc_hif.h"
+#include "hif.h"
#include "debug.h"
#include "hif-ops.h"
#include <asm/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int credits)
+{
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+ ep_dist->endpoint, credits);
+
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4) {
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_credit_deposit(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_credit_deposit(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+ /* this is the lowest priority data endpoint */
+ /* FIXME: this looks fishy, check */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+ cur_ep_dist->endpoint,
+ cur_ep_dist->svc_id,
+ cur_ep_dist->credits,
+ cur_ep_dist->cred_per_msg,
+ cur_ep_dist->cred_norm,
+ cur_ep_dist->cred_min);
+ }
+}
+
+/* initialize and setup credit distribution */
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+ ep_dist->endpoint, limit);
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_dist_list;
+
+ list_for_each_entry(cur_dist_list, epdist_list, list) {
+ if (cur_dist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_dist_list->cred_to_dist > 0) {
+ cur_dist_list->credits +=
+ cur_dist_list->cred_to_dist;
+ cur_dist_list->cred_to_dist = 0;
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_assngd)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list,
+ cur_dist_list->cred_assngd);
+
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_norm)
+ ath6kl_credit_reduce(cred_info, cur_dist_list,
+ cur_dist_list->cred_norm);
+
+ if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_dist_list->txq_depth == 0)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6kl_credit_reduce(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6kl_credit_reduce(info, curdist_list, 0);
+ else
+ ath6kl_credit_reduce(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6kl_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6kl_credit_redistribute(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
{
u8 *align_addr;
@@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target,
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
spin_unlock_bh(&target->tx_lock);
}
@@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
if (list_empty(txq))
return;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send complete ep %d, (%d pkts)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx complete ep %d pkts %d\n",
endpoint->eid, get_queue_depth(txq));
ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target,
struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
struct list_head container;
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+ packet->info.tx.seqno);
+
htc_tx_comp_update(target, endpoint, packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
INIT_LIST_HEAD(&tx_compq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scat complete len %d entries %d\n",
scat_req->len, scat_req->scat_entries);
if (scat_req->status)
@@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
send_len = packet->act_len + HTC_HDR_LENGTH;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
- __func__, send_len, sync ? "sync" : "async");
-
padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
- padded_len,
- target->dev->ar->mbox_info.htc_addr,
- sync ? "sync" : "async");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+ send_len, packet->info.tx.seqno, padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
if (sync) {
status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target,
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
*req_cred, ep->cred_dist.credits);
if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target,
/* Seek more credits */
ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
ep->cred_dist.seek_cred = 0;
if (ep->cred_dist.credits < *req_cred) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "not enough credits for ep %d - leaving packet in queue\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit not found for ep %d\n",
eid);
return -EINVAL;
}
@@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target,
ep->cred_dist.seek_cred =
ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
/* see if we were successful in getting more */
if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit we need credits asap\n");
}
}
@@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
packet = list_first_entry(&endpoint->txq, struct htc_packet,
list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "got head pkt:0x%p , queue depth: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx got packet 0x%p queue depth %d\n",
packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->len += len;
scat_req->scat_entries++;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
- i, packet, len, rem_scat);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+ i, packet, packet->info.tx.seqno, len, rem_scat);
}
/* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
if (!scat_req) {
/* no scatter resources */
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "no more scatter resources\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx no more scatter resources\n");
break;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
scat_req->len = 0;
@@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
n_sent_bundle++;
tot_pkts_bundle += scat_req->scat_entries;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send scatter total bytes: %d , entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scatter bytes %d entries %d\n",
scat_req->len, scat_req->scat_entries);
- ath6kldev_submit_scat_req(target->dev, scat_req, false);
+ ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
if (status)
break;
@@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
*sent_bundle = n_sent_bundle;
*n_bundle_pkts = tot_pkts_bundle;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
- __func__, n_sent_bundle);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+ n_sent_bundle);
return;
}
@@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
if (endpoint->tx_proc_cnt > 1) {
endpoint->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
return;
}
@@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target,
overflow = true;
if (overflow)
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
- endpoint->eid, overflow, txq_depth,
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx overflow ep %d depth %d max %d\n",
+ endpoint->eid, txq_depth,
endpoint->max_txq_depth);
if (overflow && ep_cb.tx_full) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "indicating overflowed tx packet: 0x%p\n", tx_pkt);
-
if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
HTC_SEND_FULL_DROP) {
endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target)
* are not modifying any state.
*/
list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
- endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+ endpoint = cred_dist->htc_ep;
spin_lock_bh(&target->tx_lock);
if (!list_empty(&endpoint->txq)) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d has %d credits and %d packets in tx queue\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc creds ep %d credits %d pkts %d\n",
cred_dist->endpoint,
endpoint->cred_dist.credits,
get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target)
}
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_dist_cntxt,
+ struct ath6kl_htc_credit_info *credit_info,
u16 srvc_pri_order[], int list_len)
{
struct htc_endpoint *endpoint;
int i, ep;
- target->cred_dist_cntxt = cred_dist_cntxt;
+ target->credit_info = credit_info;
list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
&target->cred_dist_list);
@@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
struct htc_endpoint *endpoint;
struct list_head queue;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx ep id %d buf 0x%p len %d\n",
packet->endpoint, packet->buf, packet->act_len);
if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
@@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target,
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx activity ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
}
spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
if (padded_len > packet->buf_len) {
- ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
padded_len, rx_len, packet->buf_len);
return -ENOMEM;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
packet, packet->info.rx.exp_hdr,
- padded_len, dev->ar->mbox_info.htc_addr, "sync");
+ padded_len, dev->ar->mbox_info.htc_addr);
status = hif_read_write_sync(dev->ar,
dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
}
endpoint->ep_st.rx_bundle_from_hdr += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle pkts %d\n",
n_msg);
} else
/* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
packets->act_len + HTC_HDR_LENGTH);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "Unexpected ENDPOINT 0 Message", "",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx unexpected endpoint 0 message", "",
packets->buf - HTC_HDR_LENGTH,
packets->act_len + HTC_HDR_LENGTH);
}
@@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target,
int tot_credits = 0, i;
bool dist = false;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
-
spin_lock_bh(&target->tx_lock);
for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target,
endpoint = &target->endpoint[rpt->eid];
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
- rpt->eid, rpt->credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit report ep %d credits %d\n",
+ rpt->eid, rpt->credits);
endpoint->ep_st.tx_cred_rpt += 1;
endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target,
tot_credits += rpt->credits;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "report indicated %d credits to distribute\n",
- tot_credits);
-
if (dist) {
/*
* This was a credit return based on a completed send
* operations note, this is done with the lock held
*/
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
-
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
}
spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target,
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
&& next_lk_ahds) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
/* look ahead bytes are valid, copy them over */
memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx next look ahead",
"", next_lk_ahds, 4);
*n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target,
bundle_lkahd_rpt =
(struct htc_bundle_lkahd_rpt *) record_buf;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
"", record_buf, record->len);
for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target,
u8 *record_buf;
u8 *orig_buf;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
-
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
- buf, len);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
orig_buf = buf;
orig_len = len;
@@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target,
}
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
"", orig_buf, orig_len);
return status;
@@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (n_lkahds != NULL)
*n_lkahds = 0;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
- packet->buf, packet->act_len);
-
/*
* NOTE: we cannot assume the alignment of buf, so we use the safe
* macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (lk_ahd != packet->info.rx.exp_hdr) {
ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
__func__, packet, packet->info.rx.rx_flags);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
"", &packet->info.rx.exp_hdr, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
"", (u8 *)&lk_ahd, sizeof(lk_ahd));
status = -ENOMEM;
goto fail_rx;
@@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
fail_rx:
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
- "", packet->buf,
- packet->act_len < 256 ? packet->act_len : 256);
- else {
- if (packet->act_len > 0)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "HTC - Application Msg", "",
- packet->buf, packet->act_len);
- }
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+ "", packet->buf, packet->act_len);
return status;
}
@@ -1534,8 +1815,8 @@ fail_rx:
static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc calling ep %d recv callback on packet 0x%p\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx complete ep %d packet 0x%p\n",
endpoint->eid, packet);
endpoint->ep_cb.rx(endpoint->target, packet);
}
@@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
len = 0;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "%s(): (numpackets: %d , actual : %d)\n",
- __func__, get_queue_depth(rxq), n_scat_pkt);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle depth %d pkts %d\n",
+ get_queue_depth(rxq), n_scat_pkt);
scat_req = hif_scatter_req_get(target->dev->ar);
@@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
scat_req->len = len;
scat_req->scat_entries = i;
- status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+ status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
if (!status)
*n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
int status = 0;
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
- list_del(&packet->list);
ep = &target->endpoint[packet->endpoint];
/* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
if (status)
return status;
+ list_del(&packet->list);
+
if (list_empty(comp_pktq)) {
/*
* Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
int fetched_pkts;
bool part_bundle = false;
int status = 0;
+ struct list_head tmp_rxq;
+ struct htc_packet *packet, *tmp_pkt;
/* now go fetch the list of HTC packets */
while (!list_empty(rx_pktq)) {
fetched_pkts = 0;
+ INIT_LIST_HEAD(&tmp_rxq);
+
if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
/*
* There are enough packets to attempt a
@@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
* allowed.
*/
status = ath6kl_htc_rx_bundle(target, rx_pktq,
- comp_pktq,
+ &tmp_rxq,
&fetched_pkts,
part_bundle);
if (status)
- return status;
+ goto fail_rx;
if (!list_empty(rx_pktq))
part_bundle = true;
+
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
if (!fetched_pkts) {
- struct htc_packet *packet;
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
- list_del(&packet->list);
-
/* fully synchronous */
packet->completion = NULL;
- if (!list_empty(rx_pktq))
+ if (!list_is_singular(rx_pktq))
/*
* look_aheads in all packet
* except the last one in the
@@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
/* go fetch the packet */
status = ath6kl_htc_rx_packet(target, packet,
packet->act_len);
+
+ list_move_tail(&packet->list, &tmp_rxq);
+
if (status)
- return status;
+ goto fail_rx;
- list_add_tail(&packet->list, comp_pktq);
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
}
+ return 0;
+
+fail_rx:
+
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+
+ list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
return status;
}
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *num_pkts)
+ u32 msg_look_ahead, int *num_pkts)
{
struct htc_packet *packets, *tmp_pkt;
struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
* On first entry copy the look_aheads into our temp array for
* processing
*/
- memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+ look_aheads[0] = msg_look_ahead;
while (true) {
@@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (status) {
ath6kl_err("failed to get pending recv messages: %d\n",
status);
- /*
- * Cleanup any packets we allocated but didn't use to
- * actually fetch any packets.
- */
- list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
- list_del(&packets->list);
- htc_reclaim_rxbuf(target, packets,
- &target->endpoint[packets->endpoint]);
- }
/* cleanup any packets in sync completion queue */
list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
}
@@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
*/
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
*num_pkts = n_fetched;
@@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
struct htc_frame_hdr *htc_hdr;
u32 look_ahead;
- if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+ if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
@@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
depth = get_queue_depth(pkt_queue);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx add multiple ep id %d cnt %d len %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
/* check if we are blocked waiting for a new buffer */
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "receiver was blocked on ep:%d, unblocking.\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
/* TODO : implement a buffer threshold count? */
- ath6kldev_rx_control(target->dev, true);
+ ath6kl_hif_rx_control(target->dev, true);
return status;
}
@@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target,
unsigned int max_msg_sz = 0;
int status = 0;
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc_conn_service, target:0x%p service id:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc connect service target 0x%p service id 0x%x\n",
target, conn_req->svc_id);
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->len_max = max_msg_sz;
endpoint->ep_cb = conn_req->ep_cb;
endpoint->cred_dist.svc_id = conn_req->svc_id;
- endpoint->cred_dist.htc_rsvd = endpoint;
+ endpoint->cred_dist.htc_ep = endpoint;
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
@@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target)
}
/* reset distribution list */
+ /* FIXME: free existing entries */
INIT_LIST_HEAD(&target->cred_dist_list);
}
@@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->msg_per_bndl_max = min(target->max_scat_entries,
target->msg_per_bndl_max);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc bundling allowed. max msg per htc bundle: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc bundling allowed msg_per_bndl_max %d\n",
target->msg_per_bndl_max);
/* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
target->max_xfer_szper_scatreq);
- ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "target ready: credits: %d credit size: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc target ready credits %d size %d\n",
target->tgt_creds, target->tgt_cred_sz);
/* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->msg_per_bndl_max = 0;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
@@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target)
status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
if (status)
+ /*
+ * FIXME: this call doesn't make sense, the caller should
+ * call ath6kl_htc_cleanup() when it wants remove htc
+ */
ath6kl_hif_cleanup_scatter(target->dev->ar);
fail_wait_target:
@@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target)
struct htc_packet *packet;
int status;
+ memset(&target->dev->irq_proc_reg, 0,
+ sizeof(target->dev->irq_proc_reg));
+
/* Disable interrupts at the chip level */
- ath6kldev_disable_intrs(target->dev);
+ ath6kl_hif_disable_intrs(target->dev);
target->htc_flags = 0;
target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target)
}
/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
- ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
- target->tgt_creds);
+ ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+ target->tgt_creds);
dump_cred_dist_stats(target);
@@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
/* unmask interrupts */
- status = ath6kldev_unmask_intrs(target->dev);
+ status = ath6kl_hif_unmask_intrs(target->dev);
if (status)
ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
}
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+ u32 block_size, ctrl_bufsz;
+ struct htc_packet *packet;
+ int i;
+
+ reset_ep_state(target);
+
+ block_size = target->dev->ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+
+ return 0;
+}
+
/* htc_stop: stop interrupt reception, and flush all queued buffers */
void ath6kl_htc_stop(struct htc_target *target)
{
@@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target)
* function returns all pending HIF I/O has completed, we can
* safely flush the queues.
*/
- ath6kldev_mask_intrs(target->dev);
+ ath6kl_hif_mask_intrs(target->dev);
ath6kl_htc_flush_txep_all(target);
ath6kl_htc_flush_rx_buf(target);
- reset_ep_state(target);
+ ath6kl_htc_reset(target);
}
void *ath6kl_htc_create(struct ath6kl *ar)
{
struct htc_target *target = NULL;
- struct htc_packet *packet;
- int status = 0, i = 0;
- u32 block_size, ctrl_bufsz;
+ int status = 0;
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target) {
@@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar)
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
- goto fail_create_htc;
+ goto err_htc_cleanup;
}
spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar)
target->dev->htc_cnxt = target;
target->ep_waiting = ENDPOINT_MAX;
- reset_ep_state(target);
-
- status = ath6kldev_setup(target->dev);
-
+ status = ath6kl_hif_setup(target->dev);
if (status)
- goto fail_create_htc;
-
- block_size = ar->mbox_info.block_size;
-
- ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
- (block_size + HTC_HDR_LENGTH) :
- (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
-
- for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
- packet = kzalloc(sizeof(*packet), GFP_KERNEL);
- if (!packet)
- break;
+ goto err_htc_cleanup;
- packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
- if (!packet->buf_start) {
- kfree(packet);
- break;
- }
+ status = ath6kl_htc_reset(target);
+ if (status)
+ goto err_htc_cleanup;
- packet->buf_len = ctrl_bufsz;
- if (i < NUM_CONTROL_RX_BUFFERS) {
- packet->act_len = 0;
- packet->buf = packet->buf_start;
- packet->endpoint = ENDPOINT_0;
- list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
- list_add_tail(&packet->list, &target->free_ctrl_txbuf);
- }
+ return target;
-fail_create_htc:
- if (i != NUM_CONTROL_BUFFERS || status) {
- if (target) {
- ath6kl_htc_cleanup(target);
- target = NULL;
- }
- }
+err_htc_cleanup:
+ ath6kl_htc_cleanup(target);
- return target;
+ return NULL;
}
/* cleanup the HTC instance */
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 8ce0c2c07de..57672e1ed1a 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
int cred_per_msg;
/* reserved for HTC use */
- void *htc_rsvd;
+ struct htc_endpoint *htc_ep;
/*
* current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
HTC_CREDIT_DIST_SEEK_CREDITS,
};
-struct htc_credit_state_info {
+struct ath6kl_htc_credit_info {
int total_avail_credits;
int cur_free_credits;
+
+ /* list of lowest priority endpoints */
struct list_head lowestpri_ep_dist;
};
@@ -508,10 +510,13 @@ struct ath6kl_device;
/* our HTC target state */
struct htc_target {
struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+ /* contains struct htc_endpoint_credit_dist */
struct list_head cred_dist_list;
+
struct list_head free_ctrl_txbuf;
struct list_head free_ctrl_rxbuf;
- struct htc_credit_state_info *cred_dist_cntxt;
+ struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
spinlock_t htc_lock;
@@ -542,7 +547,7 @@ struct htc_target {
void *ath6kl_htc_create(struct ath6kl *ar);
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_info,
+ struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len);
int ath6kl_htc_wait_target(struct htc_target *target);
int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq);
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *n_pkts);
+ u32 msg_look_ahead, int *n_pkts);
+
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644
index 171ad63d89b..00000000000
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2007-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HTC_HIF_H
-#define HTC_HIF_H
-
-#include "htc.h"
-#include "hif.h"
-
-#define ATH6KL_MAILBOXES 4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX 0
-
-#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
-
-#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
- INT_STATUS_ENABLE_CPU_MASK | \
- INT_STATUS_ENABLE_COUNTER_MASK)
-
-#define ATH6KL_REG_IO_BUFFER_SIZE 32
-#define ATH6KL_MAX_REG_IO_BUFFERS 8
-#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
-#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
-#define ATH6KL_SCATTER_REQS 4
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD 128
-#endif
-#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
-#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
-
-struct ath6kl_irq_proc_registers {
- u8 host_int_status;
- u8 cpu_int_status;
- u8 error_int_status;
- u8 counter_int_status;
- u8 mbox_frame;
- u8 rx_lkahd_valid;
- u8 host_int_status2;
- u8 gmbox_rx_avail;
- __le32 rx_lkahd[2];
- __le32 rx_gmbox_lkahd_alias[2];
-} __packed;
-
-struct ath6kl_irq_enable_reg {
- u8 int_status_en;
- u8 cpu_int_status_en;
- u8 err_int_status_en;
- u8 cntr_int_status_en;
-} __packed;
-
-struct ath6kl_device {
- spinlock_t lock;
- u8 pad1[A_CACHE_LINE_PAD];
- struct ath6kl_irq_proc_registers irq_proc_reg;
- u8 pad2[A_CACHE_LINE_PAD];
- struct ath6kl_irq_enable_reg irq_en_reg;
- u8 pad3[A_CACHE_LINE_PAD];
- struct htc_target *htc_cnxt;
- struct ath6kl *ar;
-};
-
-int ath6kldev_setup(struct ath6kl_device *dev);
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
-int ath6kldev_mask_intrs(struct ath6kl_device *dev);
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
- u32 *lk_ahd, int timeout);
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
-int ath6kldev_disable_intrs(struct ath6kl_device *dev);
-
-int ath6kldev_rw_comp_handler(void *context, int status);
-int ath6kldev_intr_bh_handler(struct ath6kl *ar);
-
-/* Scatter Function and Definitions */
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
- struct hif_scatter_req *scat_req, bool read);
-
-#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c1d2366704b..7f55be3092d 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,6 +16,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/errno.h>
#include <linux/of.h>
#include <linux/mmc/sdio_func.h>
#include "core.h"
@@ -26,9 +27,85 @@
unsigned int debug_mask;
static unsigned int testmode;
+static bool suspend_cutpower;
module_param(debug_mask, uint, 0644);
module_param(testmode, uint, 0644);
+module_param(suspend_cutpower, bool, 0444);
+
+static const struct ath6kl_hw hw_list[] = {
+ {
+ .id = AR6003_HW_2_0_VERSION,
+ .name = "ar6003 hw 2.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x543180,
+ .board_ext_data_addr = 0x57e500,
+ .reserved_ram_size = 6912,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ /* hw2.0 needs override address hardcoded */
+ .app_start_override_addr = 0x944C00,
+
+ .fw_otp = AR6003_HW_2_0_OTP_FILE,
+ .fw = AR6003_HW_2_0_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_0_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6003_HW_2_1_1_VERSION,
+ .name = "ar6003 hw 2.1.1",
+ .dataset_patch_addr = 0x57ff74,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x542330,
+ .reserved_ram_size = 512,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ .fw_otp = AR6003_HW_2_1_1_OTP_FILE,
+ .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_1_1_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_0_VERSION,
+ .name = "ar6004 hw 1.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 19456,
+ .board_addr = 0x433900,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_0_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_1_VERSION,
+ .name = "ar6004 hw 1.1",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 11264,
+ .board_addr = 0x43d400,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_1_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+};
/*
* Include definitions here that can be used to tune the WLAN module
@@ -55,7 +132,6 @@ module_param(testmode, uint, 0644);
*/
#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
-#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
#define ATH6KL_DATA_OFFSET 64
struct sk_buff *ath6kl_buf_alloc(int size)
@@ -73,37 +149,21 @@ struct sk_buff *ath6kl_buf_alloc(int size)
return skb;
}
-void ath6kl_init_profile_info(struct ath6kl *ar)
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
{
- ar->ssid_len = 0;
- memset(ar->ssid, 0, sizeof(ar->ssid));
-
- ar->dot11_auth_mode = OPEN_AUTH;
- ar->auth_mode = NONE_AUTH;
- ar->prwise_crypto = NONE_CRYPT;
- ar->prwise_crypto_len = 0;
- ar->grp_crypto = NONE_CRYPT;
- ar->grp_crypto_len = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
- ar->nw_type = ar->next_mode = INFRA_NETWORK;
-}
-
-static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
-{
- switch (ar->nw_type) {
- case INFRA_NETWORK:
- return HI_OPTION_FW_MODE_BSS_STA;
- case ADHOC_NETWORK:
- return HI_OPTION_FW_MODE_IBSS;
- case AP_NETWORK:
- return HI_OPTION_FW_MODE_AP;
- default:
- ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
- return 0xff;
- }
+ vif->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+
+ vif->dot11_auth_mode = OPEN_AUTH;
+ vif->auth_mode = NONE_AUTH;
+ vif->prwise_crypto = NONE_CRYPT;
+ vif->prwise_crypto_len = 0;
+ vif->grp_crypto = NONE_CRYPT;
+ vif->grp_crypto_len = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
}
static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +180,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar)
return -EIO;
address = TARG_VTOP(ar->target_type, data);
- host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
sizeof(struct host_app_area)))
return -EIO;
@@ -258,40 +318,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
return 0;
}
-static void ath6kl_init_control_info(struct ath6kl *ar)
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
{
- u8 ctr;
-
- clear_bit(WMI_ENABLED, &ar->flag);
- ath6kl_init_profile_info(ar);
- ar->def_txkey_index = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- ar->ch_hint = 0;
- ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
- ar->listen_intvl_b = 0;
- ar->tx_pwr = 0;
- clear_bit(SKIP_SCAN, &ar->flag);
- set_bit(WMM_ENABLED, &ar->flag);
- ar->intra_bss = 1;
- memset(&ar->sc_params, 0, sizeof(ar->sc_params));
- ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
- ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
- ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
- memset((u8 *)ar->sta_list, 0,
- AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
- spin_lock_init(&ar->mcastpsq_lock);
-
- /* Init the PS queues */
- for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
- spin_lock_init(&ar->sta_list[ctr].psq_lock);
- skb_queue_head_init(&ar->sta_list[ctr].psq);
- }
-
- skb_queue_head_init(&ar->mcastpsq);
-
- memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+ ath6kl_init_profile_info(vif);
+ vif->def_txkey_index = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ vif->ch_hint = 0;
}
/*
@@ -341,62 +373,7 @@ out:
return status;
}
-#define REG_DUMP_COUNT_AR6003 60
-#define REGISTER_DUMP_LEN_MAX 60
-
-static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
-{
- u32 address;
- u32 regdump_loc = 0;
- int status;
- u32 regdump_val[REGISTER_DUMP_LEN_MAX];
- u32 i;
-
- if (ar->target_type != TARGET_TYPE_AR6003)
- return;
-
- /* the reg dump pointer is copied to the host interest area */
- address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
- address = TARG_VTOP(ar->target_type, address);
-
- /* read RAM location through diagnostic window */
- status = ath6kl_diag_read32(ar, address, &regdump_loc);
-
- if (status || !regdump_loc) {
- ath6kl_err("failed to get ptr to register dump area\n");
- return;
- }
-
- ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
- regdump_loc);
- regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
-
- /* fetch register dump data */
- status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
- REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
-
- if (status) {
- ath6kl_err("failed to get register dump\n");
- return;
- }
- ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
-
- for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
- ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
- i, regdump_val[i]);
-
-}
-
-void ath6kl_target_failure(struct ath6kl *ar)
-{
- ath6kl_err("target asserted\n");
-
- /* try dumping target assertion information (if any) */
- ath6kl_dump_target_assert_info(ar);
-
-}
-
-static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
{
int status = 0;
int ret;
@@ -406,46 +383,46 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
* default values. Required if checksum offload is needed. Set
* RxMetaVersion to 2.
*/
- if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
ar->rx_meta_ver, 0, 0)) {
ath6kl_err("unable to set the rx frame format\n");
status = -EIO;
}
if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
- if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+ if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
ath6kl_err("unable to set power save fail event policy\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
- if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+ if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
ath6kl_err("unable to set barker preamble policy\n");
status = -EIO;
}
- if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+ if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
ath6kl_err("unable to set keep alive interval\n");
status = -EIO;
}
- if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+ if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
ath6kl_err("unable to set disconnect timeout\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
- if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+ if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
ath6kl_err("unable to set txop bursting\n");
status = -EIO;
}
- if (ar->p2p) {
- ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
+ ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
P2P_FLAG_CAPABILITIES_REQ |
P2P_FLAG_MACADDR_REQ |
P2P_FLAG_HMODEL_REQ);
@@ -453,13 +430,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
"capabilities (%d) - assuming P2P not "
"supported\n", ret);
- ar->p2p = 0;
+ ar->p2p = false;
}
}
- if (ar->p2p) {
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
/* Enable Probe Request reporting for P2P */
- ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
if (ret) {
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
"Request reporting (%d)\n", ret);
@@ -472,13 +449,40 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
int ath6kl_configure_target(struct ath6kl *ar)
{
u32 param, ram_reserved_size;
- u8 fw_iftype;
+ u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+ int i, status;
- fw_iftype = ath6kl_get_fw_iftype(ar);
- if (fw_iftype == 0xff)
- return -EINVAL;
+ /*
+ * Note: Even though the firmware interface type is
+ * chosen as BSS_STA for all three interfaces, can
+ * be configured to IBSS/AP as long as the fw submode
+ * remains normal mode (0 - AP, STA and IBSS). But
+ * due to an target assert in firmware only one interface is
+ * configured for now.
+ */
+ fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
+
+ for (i = 0; i < ar->vif_max; i++)
+ fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+ /*
+ * By default, submodes :
+ * vif[0] - AP/STA/IBSS
+ * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+ * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+ */
+
+ for (i = 0; i < ar->max_norm_iface; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ if (ar->p2p && ar->vif_max == 1)
+ fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
- /* Tell target which HTC version it is used*/
param = HTC_PROTOCOL_VERSION;
if (ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
@@ -499,12 +503,10 @@ int ath6kl_configure_target(struct ath6kl *ar)
return -EIO;
}
- param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
- param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
- if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
- param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
- HI_OPTION_FW_SUBMODE_SHIFT;
- }
+ param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT);
+ param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+ param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
@@ -550,71 +552,55 @@ int ath6kl_configure_target(struct ath6kl *ar)
/* use default number of control buffers */
return -EIO;
+ /* Configure GPIO AR600x UART */
+ param = ar->hw.uarttx_pin;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbg_uart_txpin)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
+ /* Configure target refclk_hz */
+ param = ar->hw.refclk_hz;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_refclk_hz)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
return 0;
}
-struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+void ath6kl_core_free(struct ath6kl *ar)
{
- struct net_device *dev;
- struct ath6kl *ar;
- struct wireless_dev *wdev;
-
- wdev = ath6kl_cfg80211_init(sdev);
- if (!wdev) {
- ath6kl_err("ath6kl_cfg80211_init failed\n");
- return NULL;
- }
-
- ar = wdev_priv(wdev);
- ar->dev = sdev;
- ar->wdev = wdev;
- wdev->iftype = NL80211_IFTYPE_STATION;
-
- if (ath6kl_debug_init(ar)) {
- ath6kl_err("Failed to initialize debugfs\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev = alloc_netdev(0, "wlan%d", ether_setup);
- if (!dev) {
- ath6kl_err("no memory for network device instance\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
- wdev->netdev = dev;
- ar->sme_state = SME_DISCONNECTED;
-
- init_netdev(dev);
+ wiphy_free(ar->wiphy);
+}
- ar->net_dev = dev;
- set_bit(WLAN_ENABLED, &ar->flag);
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+ ath6kl_hif_power_off(ar);
- ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ destroy_workqueue(ar->ath6kl_wq);
- spin_lock_init(&ar->lock);
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
- ath6kl_init_control_info(ar);
- init_waitqueue_head(&ar->event_wq);
- sema_init(&ar->sem, 1);
- clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ ath6kl_cookie_cleanup(ar);
- INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
- setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
- (unsigned long) dev);
+ ath6kl_bmi_cleanup(ar);
- return ar;
-}
+ ath6kl_debug_cleanup(ar);
-int ath6kl_unavail_ev(struct ath6kl *ar)
-{
- ath6kl_destroy(ar->net_dev, 1);
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
- return 0;
+ ath6kl_deinit_ieee80211_hw(ar);
}
/* firmware upload */
@@ -643,11 +629,11 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
static const char *get_target_ver_dir(const struct ath6kl *ar)
{
switch (ar->version.target_ver) {
- case AR6003_REV1_VERSION:
+ case AR6003_HW_1_0_VERSION:
return "ath6k/AR6003/hw1.0";
- case AR6003_REV2_VERSION:
+ case AR6003_HW_2_0_VERSION:
return "ath6k/AR6003/hw2.0";
- case AR6003_REV3_VERSION:
+ case AR6003_HW_2_1_1_VERSION:
return "ath6k/AR6003/hw2.1.1";
}
ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
@@ -705,17 +691,10 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
if (ar->fw_board != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_BOARD_DATA_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw_board == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -733,17 +712,7 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
filename, ret);
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
- break;
- }
+ filename = ar->hw.fw_default_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -767,19 +736,14 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar)
if (ar->fw_otp != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_OTP_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n");
+ if (ar->hw.fw_otp == NULL) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "no OTP file configured for this hw\n");
return 0;
- break;
- default:
- filename = AR6003_REV3_OTP_FILE;
- break;
}
+ filename = ar->hw.fw_otp;
+
ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
&ar->fw_otp_len);
if (ret) {
@@ -800,38 +764,22 @@ static int ath6kl_fetch_fw_file(struct ath6kl *ar)
return 0;
if (testmode) {
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_warn("testmode not supported with ar6004\n");
+ if (ar->hw.fw_tcmd == NULL) {
+ ath6kl_warn("testmode not supported\n");
return -EOPNOTSUPP;
- default:
- ath6kl_warn("unknown target version: 0x%x\n",
- ar->version.target_ver);
- return -EINVAL;
}
+ filename = ar->hw.fw_tcmd;
+
set_bit(TESTMODE, &ar->flag);
goto get_fw;
}
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_FILE;
- break;
- default:
- filename = AR6003_REV3_FIRMWARE_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw;
get_fw:
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
@@ -849,27 +797,20 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar)
const char *filename;
int ret;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_PATCH_FILE;
- break;
- case AR6004_REV1_VERSION:
- /* FIXME: implement for AR6004 */
+ if (ar->fw_patch != NULL)
return 0;
- break;
- default:
- filename = AR6003_REV3_PATCH_FILE;
- break;
- }
- if (ar->fw_patch == NULL) {
- ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
- &ar->fw_patch_len);
- if (ret) {
- ath6kl_err("Failed to get patch file %s: %d\n",
- filename, ret);
- return ret;
- }
+ if (ar->hw.fw_patch == NULL)
+ return 0;
+
+ filename = ar->hw.fw_patch;
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
}
return 0;
@@ -904,19 +845,10 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
int ret, ie_id, i, index, bit;
__le32 *val;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_2_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_FIRMWARE_2_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_2_FILE;
- break;
- default:
+ if (ar->hw.fw_api2 == NULL)
return -EOPNOTSUPP;
- }
+
+ filename = ar->hw.fw_api2;
ret = request_firmware(&fw, filename, ar->dev);
if (ret)
@@ -1006,12 +938,15 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:
+ if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
+ break;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found firmware capabilities ie (%zd B)\n",
ie_len);
for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
- index = ALIGN(i, 8) / 8;
+ index = i / 8;
bit = i % 8;
if (data[index] & (1 << bit))
@@ -1030,9 +965,34 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.dataset_patch_addr = le32_to_cpup(val);
ath6kl_dbg(ATH6KL_DBG_BOOT,
- "found patch address ie 0x%d\n",
+ "found patch address ie 0x%x\n",
ar->hw.dataset_patch_addr);
break;
+ case ATH6KL_FW_IE_BOARD_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.board_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found board address ie 0x%x\n",
+ ar->hw.board_addr);
+ break;
+ case ATH6KL_FW_IE_VIF_MAX:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->vif_max = min_t(unsigned int, le32_to_cpup(val),
+ ATH6KL_VIF_MAX);
+
+ if (ar->vif_max > 1 && !ar->p2p)
+ ar->max_norm_iface = 2;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found vif max ie %d\n", ar->vif_max);
+ break;
default:
ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
le32_to_cpup(&hdr->id));
@@ -1087,8 +1047,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
* For AR6004, host determine Target RAM address for
* writing board data.
*/
- if (ar->target_type == TARGET_TYPE_AR6004) {
- board_address = AR6004_REV1_BOARD_DATA_ADDRESS;
+ if (ar->hw.board_addr != 0) {
+ board_address = ar->hw.board_addr;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data)),
@@ -1106,7 +1066,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
HI_ITEM(hi_board_ext_data)),
(u8 *) &board_ext_address, 4);
- if (board_ext_address == 0) {
+ if (ar->target_type == TARGET_TYPE_AR6003 &&
+ board_ext_address == 0) {
ath6kl_err("Failed to get board file target address.\n");
return -EINVAL;
}
@@ -1126,8 +1087,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
break;
}
- if (ar->fw_board_len == (board_data_size +
- board_ext_data_size)) {
+ if (board_ext_address &&
+ ar->fw_board_len == (board_data_size + board_ext_data_size)) {
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
@@ -1182,10 +1143,11 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
static int ath6kl_upload_otp(struct ath6kl *ar)
{
u32 address, param;
+ bool from_hw = false;
int ret;
- if (WARN_ON(ar->fw_otp == NULL))
- return -ENOENT;
+ if (ar->fw_otp == NULL)
+ return 0;
address = ar->hw.app_load_addr;
@@ -1210,15 +1172,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
return ret;
}
- ar->hw.app_start_override_addr = address;
+ if (ar->hw.app_start_override_addr == 0) {
+ ar->hw.app_start_override_addr = address;
+ from_hw = true;
+ }
- ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+ from_hw ? " (from hw)" : "",
ar->hw.app_start_override_addr);
/* execute the OTP code */
- ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+ ar->hw.app_start_override_addr);
param = 0;
- ath6kl_bmi_execute(ar, address, &param);
+ ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
return ret;
}
@@ -1229,7 +1196,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar)
int ret;
if (WARN_ON(ar->fw == NULL))
- return -ENOENT;
+ return 0;
address = ar->hw.app_load_addr;
@@ -1259,8 +1226,8 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
u32 address, param;
int ret;
- if (WARN_ON(ar->fw_patch == NULL))
- return -ENOENT;
+ if (ar->fw_patch == NULL)
+ return 0;
address = ar->hw.dataset_patch_addr;
@@ -1345,7 +1312,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
- if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x20;
@@ -1402,43 +1369,29 @@ static int ath6kl_init_upload(struct ath6kl *ar)
if (status)
return status;
- /* Configure GPIO AR6003 UART */
- param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_dbg_uart_txpin)),
- (u8 *)&param, 4);
-
return status;
}
static int ath6kl_init_hw_params(struct ath6kl *ar)
{
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
- break;
- case AR6003_REV3_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = 0x1234;
- ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE;
- break;
- case AR6004_REV1_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE;
- break;
- default:
+ const struct ath6kl_hw *hw;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = &hw_list[i];
+
+ if (hw->id == ar->version.target_ver)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hw_list)) {
ath6kl_err("Unsupported hardware version: 0x%x\n",
ar->version.target_ver);
return -EINVAL;
}
+ ar->hw = *hw;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
ar->version.target_ver, ar->target_type,
@@ -1447,75 +1400,75 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
"app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
ar->hw.reserved_ram_size);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "refclk_hz %d uarttx_pin %d",
+ ar->hw.refclk_hz, ar->hw.uarttx_pin);
return 0;
}
-static int ath6kl_init(struct net_device *dev)
+static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- int status = 0;
- s32 timeleft;
+ switch (type) {
+ case ATH6KL_HIF_TYPE_SDIO:
+ return "sdio";
+ case ATH6KL_HIF_TYPE_USB:
+ return "usb";
+ }
- if (!ar)
- return -EIO;
+ return NULL;
+}
+
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ long timeleft;
+ int ret, i;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_power_off;
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_power_off;
/* Do we need to finish the BMI phase */
+ /* FIXME: return error from ath6kl_bmi_done() */
if (ath6kl_bmi_done(ar)) {
- status = -EIO;
- goto ath6kl_init_done;
- }
-
- /* Indicate that WMI is enabled (although not ready yet) */
- set_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = ath6kl_wmi_init(ar);
- if (!ar->wmi) {
- ath6kl_err("failed to initialize wmi\n");
- status = -EIO;
- goto ath6kl_init_done;
+ ret = -EIO;
+ goto err_power_off;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
/*
* The reason we have to wait for the target here is that the
* driver layer has to init BMI in order to set the host block
* size.
*/
if (ath6kl_htc_wait_target(ar->htc_target)) {
- status = -EIO;
- goto err_node_cleanup;
+ ret = -EIO;
+ goto err_power_off;
}
if (ath6kl_init_service_ep(ar)) {
- status = -EIO;
+ ret = -EIO;
goto err_cleanup_scatter;
}
- /* setup access class priority mappings */
- ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
- ar->ac_stream_pri_map[WMM_AC_BE] = 1;
- ar->ac_stream_pri_map[WMM_AC_VI] = 2;
- ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
-
- /* give our connected endpoints some buffers */
- ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
- ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
-
- /* allocate some buffers that handle larger AMSDU frames */
- ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
-
/* setup credit distribution */
- ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
-
- ath6kl_cookie_init(ar);
+ ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
/* start HTC */
- status = ath6kl_htc_start(ar->htc_target);
-
- if (status) {
+ ret = ath6kl_htc_start(ar->htc_target);
+ if (ret) {
+ /* FIXME: call this */
ath6kl_cookie_cleanup(ar);
- goto err_rxbuf_cleanup;
+ goto err_cleanup_scatter;
}
/* Wait for Wmi event to be ready */
@@ -1526,54 +1479,81 @@ static int ath6kl_init(struct net_device *dev)
ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+ if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+ ath6kl_info("%s %s fw %s%s\n",
+ ar->hw.name,
+ ath6kl_init_get_hif_name(ar->hif_type),
+ ar->wiphy->fw_version,
+ test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+ }
+
if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
ATH6KL_ABI_VERSION, ar->version.abi_ver);
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
if (!timeleft || signal_pending(current)) {
ath6kl_err("wmi is not ready or wait was interrupted\n");
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
/* communicate the wmi protocol verision to the target */
+ /* FIXME: return error */
if ((ath6kl_set_host_app_area(ar)) != 0)
ath6kl_err("unable to set the host app area\n");
- ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
- ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+ for (i = 0; i < ar->vif_max; i++) {
+ ret = ath6kl_target_config_wlan_params(ar, i);
+ if (ret)
+ goto err_htc_stop;
+ }
- ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ ar->state = ATH6KL_STATE_ON;
- status = ath6kl_target_config_wlan_params(ar);
- if (!status)
- goto ath6kl_init_done;
+ return 0;
err_htc_stop:
ath6kl_htc_stop(ar->htc_target);
-err_rxbuf_cleanup:
- ath6kl_htc_flush_rx_buf(ar->htc_target);
- ath6kl_cleanup_amsdu_rxbufs(ar);
err_cleanup_scatter:
ath6kl_hif_cleanup_scatter(ar);
-err_node_cleanup:
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
+err_power_off:
+ ath6kl_hif_power_off(ar);
-ath6kl_init_done:
- return status;
+ return ret;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+ ath6kl_htc_stop(ar->htc_target);
+
+ ath6kl_hif_stop(ar);
+
+ ath6kl_bmi_reset(ar);
+
+ ret = ath6kl_hif_power_off(ar);
+ if (ret)
+ ath6kl_warn("failed to power off hif: %d\n", ret);
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ return 0;
}
int ath6kl_core_init(struct ath6kl *ar)
{
- int ret = 0;
struct ath6kl_bmi_target_info targ_info;
+ struct net_device *ndev;
+ int ret = 0, i;
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
if (!ar->ath6kl_wq)
@@ -1583,145 +1563,236 @@ int ath6kl_core_init(struct ath6kl *ar)
if (ret)
goto err_wq;
- ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ /*
+ * Turn on power to get hardware (target) version and leave power
+ * on delibrately as we will boot the hardware anyway within few
+ * seconds.
+ */
+ ret = ath6kl_hif_power_on(ar);
if (ret)
goto err_bmi_cleanup;
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_power_off;
+
ar->version.target_ver = le32_to_cpu(targ_info.version);
ar->target_type = le32_to_cpu(targ_info.type);
- ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+ ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
ret = ath6kl_init_hw_params(ar);
if (ret)
- goto err_bmi_cleanup;
-
- ret = ath6kl_configure_target(ar);
- if (ret)
- goto err_bmi_cleanup;
+ goto err_power_off;
ar->htc_target = ath6kl_htc_create(ar);
if (!ar->htc_target) {
ret = -ENOMEM;
- goto err_bmi_cleanup;
- }
-
- ar->aggr_cntxt = aggr_init(ar->net_dev);
- if (!ar->aggr_cntxt) {
- ath6kl_err("failed to initialize aggr\n");
- ret = -ENOMEM;
- goto err_htc_cleanup;
+ goto err_power_off;
}
ret = ath6kl_fetch_firmwares(ar);
if (ret)
goto err_htc_cleanup;
- ret = ath6kl_init_upload(ar);
- if (ret)
+ /* FIXME: we should free all firmwares in the error cases below */
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ ret = -EIO;
goto err_htc_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
- ret = ath6kl_init(ar->net_dev);
+ ret = ath6kl_register_ieee80211_hw(ar);
if (ret)
- goto err_htc_cleanup;
+ goto err_node_cleanup;
- /* This runs the init function if registered */
- ret = register_netdev(ar->net_dev);
+ ret = ath6kl_debug_init(ar);
if (ret) {
- ath6kl_err("register_netdev failed\n");
- ath6kl_destroy(ar->net_dev, 0);
- return ret;
+ wiphy_unregister(ar->wiphy);
+ goto err_node_cleanup;
+ }
+
+ for (i = 0; i < ar->vif_max; i++)
+ ar->avail_idx_map |= BIT(i);
+
+ rtnl_lock();
+
+ /* Add an initial station interface */
+ ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ INFRA_NETWORK);
+
+ rtnl_unlock();
+
+ if (!ndev) {
+ ath6kl_err("Failed to instantiate a network device\n");
+ ret = -ENOMEM;
+ wiphy_unregister(ar->wiphy);
+ goto err_debug_init;
}
- set_bit(NETDEV_REGISTERED, &ar->flag);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ar->net_dev->name, ar->net_dev, ar);
+ __func__, ndev->name, ndev, ar);
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ ath6kl_cookie_init(ar);
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ if (suspend_cutpower)
+ ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
+
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+ WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+ ar->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+
+ set_bit(FIRST_BOOT, &ar->flag);
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_err("Failed to start hardware: %d\n", ret);
+ goto err_rxbuf_cleanup;
+ }
+
+ /*
+ * Set mac address which is received in ready event
+ * FIXME: Move to ath6kl_interface_add()
+ */
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
return ret;
+err_rxbuf_cleanup:
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+ rtnl_lock();
+ ath6kl_deinit_if_data(netdev_priv(ndev));
+ rtnl_unlock();
+ wiphy_unregister(ar->wiphy);
+err_debug_init:
+ ath6kl_debug_cleanup(ar);
+err_node_cleanup:
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
err_htc_cleanup:
ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+ ath6kl_hif_power_off(ar);
err_bmi_cleanup:
ath6kl_bmi_cleanup(ar);
err_wq:
destroy_workqueue(ar->ath6kl_wq);
+
return ret;
}
-void ath6kl_stop_txrx(struct ath6kl *ar)
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
{
- struct net_device *ndev = ar->net_dev;
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
- if (!ndev)
- return;
+ netif_stop_queue(vif->ndev);
- set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ clear_bit(WLAN_ENABLED, &vif->flags);
- if (down_interruptible(&ar->sem)) {
- ath6kl_err("down_interruptible failed\n");
- return;
- }
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
- if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
- ath6kl_stop_endpoint(ndev, false, true);
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
- clear_bit(WLAN_ENABLED, &ar->flag);
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
}
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- *
- * - In case of surprise removal, the hcd already frees up the pending
- * for the device and hence there is no need to unregister the function
- * driver inorder to get these requests. For planned removal, the function
- * driver has to explicitly unregister itself to have the hcd return all the
- * pending requests before the data structures for the devices are freed up.
- * Note that as per the current implementation, the function driver will
- * end up releasing all the devices since there is no API to selectively
- * release a particular device.
- *
- * - Certain commands issued to the target can be skipped for surprise
- * removal since they will anyway not go through.
- */
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+void ath6kl_stop_txrx(struct ath6kl *ar)
{
- struct ath6kl *ar;
+ struct ath6kl_vif *vif, *tmp_vif;
- if (!dev || !ath6kl_priv(dev)) {
- ath6kl_err("failed to get device structure\n");
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
return;
}
- ar = ath6kl_priv(dev);
-
- destroy_workqueue(ar->ath6kl_wq);
-
- if (ar->htc_target)
- ath6kl_htc_cleanup(ar->htc_target);
-
- aggr_module_destroy(ar->aggr_cntxt);
-
- ath6kl_cookie_cleanup(ar);
-
- ath6kl_cleanup_amsdu_rxbufs(ar);
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ rtnl_lock();
+ ath6kl_deinit_if_data(vif);
+ rtnl_unlock();
+ spin_lock_bh(&ar->list_lock);
+ }
+ spin_unlock_bh(&ar->list_lock);
- ath6kl_bmi_cleanup(ar);
+ clear_bit(WMI_READY, &ar->flag);
- ath6kl_debug_cleanup(ar);
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ ath6kl_wmi_shutdown(ar->wmi);
- if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
- unregister_netdev(dev);
- clear_bit(NETDEV_REGISTERED, &ar->flag);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ ath6kl_htc_stop(ar->htc_target);
}
- free_netdev(dev);
-
- kfree(ar->fw_board);
- kfree(ar->fw_otp);
- kfree(ar->fw);
- kfree(ar->fw_patch);
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "attempting to reset target on instance destroy\n");
+ ath6kl_reset_device(ar, ar->target_type, true, true);
- ath6kl_cfg80211_deinit(ar);
+ clear_bit(WLAN_ENABLED, &ar->flag);
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 30b5a53db9e..eea3c747653 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -20,12 +20,13 @@
#include "target.h"
#include "debug.h"
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
- max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+ max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {
if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -174,64 +175,6 @@ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
ar->cookie_count++;
}
-/* set the window address register (using 4-byte register access ). */
-static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
-{
- int status;
- s32 i;
- __le32 addr_val;
-
- /*
- * Write bytes 1,2,3 of the register to set the upper address bytes,
- * the LSB is written last to initiate the access cycle
- */
-
- for (i = 1; i <= 3; i++) {
- /*
- * Fill the buffer with the address byte value we want to
- * hit 4 times. No need to worry about endianness as the
- * same byte is copied to all four bytes of addr_val at
- * any time.
- */
- memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
-
- /*
- * Hit each byte of the register address with a 4-byte
- * write operation to the same address, this is a harmless
- * operation.
- */
- status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
- 4, HIF_WR_SYNC_BYTE_FIX);
- if (status)
- break;
- }
-
- if (status) {
- ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- /*
- * Write the address register again, this time write the whole
- * 4-byte value. The effect here is that the LSB write causes the
- * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
- * effect since we are writing the same values again
- */
- addr_val = cpu_to_le32(addr);
- status = hif_read_write_sync(ar, reg_addr,
- (u8 *)&(addr_val),
- 4, HIF_WR_SYNC_BYTE_INC);
-
- if (status) {
- ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- return 0;
-}
-
/*
* Read from the hardware through its diagnostic window. No cooperation
* from the firmware is required for this.
@@ -240,14 +183,7 @@ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
{
int ret;
- /* set window register to start read cycle */
- ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
- if (ret)
- return ret;
-
- /* read the data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
- sizeof(*value), HIF_RD_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_read32(ar, address, value);
if (ret) {
ath6kl_warn("failed to read32 through diagnose window: %d\n",
ret);
@@ -265,18 +201,15 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
{
int ret;
- /* set write data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
- sizeof(value), HIF_WR_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_write32(ar, address, value);
+
if (ret) {
ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
address, value);
return ret;
}
- /* set window register, which starts the write cycle */
- return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
- address);
+ return 0;
}
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
@@ -393,8 +326,8 @@ out:
#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
-static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
- bool wait_fot_compltn, bool cold_reset)
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset)
{
int status = 0;
u32 address;
@@ -425,102 +358,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
ath6kl_err("failed to reset target\n");
}
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs)
-{
- struct ath6kl *ar = ath6kl_priv(dev);
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(dev);
-
- /* disable the target and the interrupts associated with it */
- if (test_bit(WMI_READY, &ar->flag)) {
- discon_issued = (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag));
- ath6kl_disconnect(ar);
- if (!keep_profile)
- ath6kl_init_profile_info(ar);
-
- del_timer(&ar->disconnect_timer);
-
- clear_bit(WMI_READY, &ar->flag);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
-
- /*
- * After wmi_shudown all WMI events will be dropped. We
- * need to cleanup the buffers allocated in AP mode and
- * give disconnect notification to stack, which usually
- * happens in the disconnect_event. Simulate the disconnect
- * event by calling the function directly. Sometimes
- * disconnect_event will be received when the debug logs
- * are collected.
- */
- if (discon_issued)
- ath6kl_disconnect_event(ar, DISCONNECT_CMD,
- (ar->nw_type & AP_NETWORK) ?
- bcast_mac : ar->bssid,
- 0, NULL, 0);
-
- ar->user_key_ctrl = 0;
-
- } else {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: wmi is not ready 0x%p 0x%p\n",
- __func__, ar, ar->wmi);
-
- /* Shut down WMI if we have started it */
- if (test_bit(WMI_ENABLED, &ar->flag)) {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: shut down wmi\n", __func__);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
- }
- }
-
- if (ar->htc_target) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
- ath6kl_htc_stop(ar->htc_target);
- }
-
- /*
- * Try to reset the device if we can. The driver may have been
- * configure NOT to reset the target during a debug session.
- */
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "attempting to reset target on instance destroy\n");
- ath6kl_reset_device(ar, ar->target_type, true, true);
-}
-
-static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
{
u8 index;
u8 keyusage;
for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
- if (ar->wep_key_list[index].key_len) {
+ if (vif->wep_key_list[index].key_len) {
keyusage = GROUP_USAGE;
- if (index == ar->def_txkey_index)
+ if (index == vif->def_txkey_index)
keyusage |= TX_USAGE;
- ath6kl_wmi_addkey_cmd(ar->wmi,
+ ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
index,
WEP_CRYPT,
keyusage,
- ar->wep_key_list[index].key_len,
- NULL,
- ar->wep_key_list[index].key,
+ vif->wep_key_list[index].key_len,
+ NULL, 0,
+ vif->wep_key_list[index].key,
KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
}
}
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_req_key *ik;
int res;
u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,11 +393,13 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
- switch (ar->auth_mode) {
+ switch (vif->auth_mode) {
case NONE_AUTH:
- if (ar->prwise_crypto == WEP_CRYPT)
- ath6kl_install_static_wep_keys(ar);
- break;
+ if (vif->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(vif);
+ if (!ik->valid || ik->key_type != WAPI_CRYPT)
+ break;
+ /* for WAPI, we need to set the delayed group key, continue: */
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -544,8 +410,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
"the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd(
- ar->wmi, ik->key_index, ik->key_type,
- GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+ ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+ GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+ ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +421,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
break;
}
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
- set_bit(CONNECTED, &ar->flag);
- netif_carrier_on(ar->net_dev);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+ set_bit(CONNECTED, &vif->flags);
+ netif_carrier_on(vif->ndev);
}
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info)
{
+ struct ath6kl *ar = vif->ar;
u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0;
struct station_info sinfo;
@@ -600,6 +468,18 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
wpa_ie = pos; /* WPS IE */
break; /* overrides WPA/RSN IE */
}
+ } else if (pos[0] == 0x44 && wpa_ie == NULL) {
+ /*
+ * Note: WAPI Parameter Set IE re-uses Element ID that
+ * was officially allocated for BSS AC Access Delay. As
+ * such, we need to be a bit more careful on when
+ * parsing the frame. However, BSS AC Access Delay
+ * element is not supposed to be included in
+ * (Re)Association Request frames, so this should not
+ * cause problems.
+ */
+ wpa_ie = pos; /* WAPI IE */
+ break;
}
pos += 2 + pos[1];
}
@@ -617,380 +497,49 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
sinfo.assoc_req_ies_len = ies_len;
sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
- cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
-
- netif_wake_queue(ar->net_dev);
-}
-
-/* Functions for Tx credit handling */
-void ath6k_credit_init(struct htc_credit_state_info *cred_info,
- struct list_head *ep_list,
- int tot_credits)
-{
- struct htc_endpoint_credit_dist *cur_ep_dist;
- int count;
-
- cred_info->cur_free_credits = tot_credits;
- cred_info->total_avail_credits = tot_credits;
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
-
- if (tot_credits > 4)
- if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
- (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
- ath6kl_deposit_credit_to_ep(cred_info,
- cur_ep_dist,
- cur_ep_dist->cred_min);
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- }
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
- ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
- cur_ep_dist->cred_min);
- /*
- * Control service is always marked active, it
- * never goes inactive EVER.
- */
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
- /* this is the lowest priority data endpoint */
- cred_info->lowestpri_ep_dist = cur_ep_dist->list;
-
- /*
- * Streams have to be created (explicit | implicit) for all
- * kinds of traffic. BE endpoints are also inactive in the
- * beginning. When BE traffic starts it creates implicit
- * streams that redistributes credits.
- *
- * Note: all other endpoints have minimums set but are
- * initially given NO credits. credits will be distributed
- * as traffic activity demands
- */
- }
-
- WARN_ON(cred_info->cur_free_credits <= 0);
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
- cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
- /*
- * For the remaining data endpoints, we assume that
- * each cred_per_msg are the same. We use a simple
- * calculation here, we take the remaining credits
- * and determine how many max messages this can
- * cover and then set each endpoint's normal value
- * equal to 3/4 this amount.
- */
- count = (cred_info->cur_free_credits /
- cur_ep_dist->cred_per_msg)
- * cur_ep_dist->cred_per_msg;
- count = (count * 3) >> 2;
- count = max(count, cur_ep_dist->cred_per_msg);
- cur_ep_dist->cred_norm = count;
-
- }
- }
-}
-
-/* initialize and setup credit distribution */
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info)
-{
- u16 servicepriority[5];
-
- memset(cred_info, 0, sizeof(struct htc_credit_state_info));
-
- servicepriority[0] = WMI_CONTROL_SVC; /* highest */
- servicepriority[1] = WMI_DATA_VO_SVC;
- servicepriority[2] = WMI_DATA_VI_SVC;
- servicepriority[3] = WMI_DATA_BE_SVC;
- servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
- /* set priority list */
- ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
-
- return 0;
-}
-
-/* reduce an ep's credits back to a set limit */
-static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist,
- int limit)
-{
- int credits;
-
- ep_dist->cred_assngd = limit;
-
- if (ep_dist->credits <= limit)
- return;
+ cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
- credits = ep_dist->credits - limit;
- ep_dist->credits -= credits;
- cred_info->cur_free_credits += credits;
-}
-
-static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
- struct list_head *epdist_list)
-{
- struct htc_endpoint_credit_dist *cur_dist_list;
-
- list_for_each_entry(cur_dist_list, epdist_list, list) {
- if (cur_dist_list->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_dist_list->cred_to_dist > 0) {
- cur_dist_list->credits +=
- cur_dist_list->cred_to_dist;
- cur_dist_list->cred_to_dist = 0;
- if (cur_dist_list->credits >
- cur_dist_list->cred_assngd)
- ath6k_reduce_credits(cred_info,
- cur_dist_list,
- cur_dist_list->cred_assngd);
-
- if (cur_dist_list->credits >
- cur_dist_list->cred_norm)
- ath6k_reduce_credits(cred_info, cur_dist_list,
- cur_dist_list->cred_norm);
-
- if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (cur_dist_list->txq_depth == 0)
- ath6k_reduce_credits(cred_info,
- cur_dist_list, 0);
- }
- }
- }
-}
-
-/*
- * HTC has an endpoint that needs credits, ep_dist is the endpoint in
- * question.
- */
-void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist)
-{
- struct htc_endpoint_credit_dist *curdist_list;
- int credits = 0;
- int need;
-
- if (ep_dist->svc_id == WMI_CONTROL_SVC)
- goto out;
-
- if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
- (ep_dist->svc_id == WMI_DATA_VO_SVC))
- if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
- goto out;
-
- /*
- * For all other services, we follow a simple algorithm of:
- *
- * 1. checking the free pool for credits
- * 2. checking lower priority endpoints for credits to take
- */
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
- if (credits >= ep_dist->seek_cred)
- goto out;
-
- /*
- * We don't have enough in the free pool, try taking away from
- * lower priority services The rule for taking away credits:
- *
- * 1. Only take from lower priority endpoints
- * 2. Only take what is allocated above the minimum (never
- * starve an endpoint completely)
- * 3. Only take what you need.
- */
-
- list_for_each_entry_reverse(curdist_list,
- &cred_info->lowestpri_ep_dist,
- list) {
- if (curdist_list == ep_dist)
- break;
-
- need = ep_dist->seek_cred - cred_info->cur_free_credits;
-
- if ((curdist_list->cred_assngd - need) >=
- curdist_list->cred_min) {
- /*
- * The current one has been allocated more than
- * it's minimum and it has enough credits assigned
- * above it's minimum to fulfill our need try to
- * take away just enough to fulfill our need.
- */
- ath6k_reduce_credits(cred_info, curdist_list,
- curdist_list->cred_assngd - need);
-
- if (cred_info->cur_free_credits >=
- ep_dist->seek_cred)
- break;
- }
-
- if (curdist_list->endpoint == ENDPOINT_0)
- break;
- }
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-out:
- /* did we find some credits? */
- if (credits)
- ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
-
- ep_dist->seek_cred = 0;
-}
-
-/* redistribute credits based on activity change */
-static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
- struct list_head *ep_dist_list)
-{
- struct htc_endpoint_credit_dist *curdist_list;
-
- list_for_each_entry(curdist_list, ep_dist_list, list) {
- if (curdist_list->endpoint == ENDPOINT_0)
- continue;
-
- if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
- (curdist_list->svc_id == WMI_DATA_BE_SVC))
- curdist_list->dist_flags |= HTC_EP_ACTIVE;
-
- if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
- !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (curdist_list->txq_depth == 0)
- ath6k_reduce_credits(info,
- curdist_list, 0);
- else
- ath6k_reduce_credits(info,
- curdist_list,
- curdist_list->cred_min);
- }
- }
-}
-
-/*
- *
- * This function is invoked whenever endpoints require credit
- * distributions. A lock is held while this function is invoked, this
- * function shall NOT block. The ep_dist_list is a list of distribution
- * structures in prioritized order as defined by the call to the
- * htc_set_credit_dist() api.
- */
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
- struct list_head *ep_dist_list,
- enum htc_credit_dist_reason reason)
-{
- switch (reason) {
- case HTC_CREDIT_DIST_SEND_COMPLETE:
- ath6k_credit_update(cred_info, ep_dist_list);
- break;
- case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
- ath6k_redistribute_credits(cred_info, ep_dist_list);
- break;
- default:
- break;
- }
-
- WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
- WARN_ON(cred_info->cur_free_credits < 0);
+ netif_wake_queue(vif->ndev);
}
void disconnect_timer_handler(unsigned long ptr)
{
struct net_device *dev = (struct net_device *)ptr;
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- ath6kl_init_profile_info(ar);
- ath6kl_disconnect(ar);
+ ath6kl_init_profile_info(vif);
+ ath6kl_disconnect(vif);
}
-void ath6kl_disconnect(struct ath6kl *ar)
+void ath6kl_disconnect(struct ath6kl_vif *vif)
{
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag)) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)) {
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
/*
* Disconnect command is issued, clear the connect pending
* flag. The connected flag will be cleared in
* disconnect event notification.
*/
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
}
}
-void ath6kl_deep_sleep_enable(struct ath6kl *ar)
-{
- switch (ar->sme_state) {
- case SME_CONNECTING:
- cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- break;
- case SME_CONNECTED:
- default:
- /*
- * FIXME: oddly enough smeState is in DISCONNECTED during
- * suspend, why? Need to send disconnected event in that
- * state.
- */
- cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
- break;
- }
-
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag))
- ath6kl_wmi_disconnect_cmd(ar->wmi);
-
- ar->sme_state = SME_DISCONNECTED;
-
- /* disable scanning */
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
- 0, 0) != 0)
- printk(KERN_WARNING "ath6kl: failed to disable scan "
- "during suspend\n");
-
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
-}
-
/* WMI Event handlers */
-static const char *get_hw_id_string(u32 id)
-{
- switch (id) {
- case AR6003_REV1_VERSION:
- return "1.0";
- case AR6003_REV2_VERSION:
- return "2.0";
- case AR6003_REV3_VERSION:
- return "2.1.1";
- default:
- return "unknown";
- }
-}
-
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
{
struct ath6kl *ar = devt;
- struct net_device *dev = ar->net_dev;
- memcpy(dev->dev_addr, datap, ETH_ALEN);
+ memcpy(ar->mac_addr, datap, ETH_ALEN);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
- __func__, dev->dev_addr);
+ __func__, ar->mac_addr);
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
- snprintf(ar->wdev->wiphy->fw_version,
- sizeof(ar->wdev->wiphy->fw_version),
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
"%u.%u.%u.%u",
(ar->version.wlan_ver & 0xf0000000) >> 28,
(ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1000,79 +549,85 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
/* indicate to the waiting thread that the ready event was received */
set_bit(WMI_READY, &ar->flag);
wake_up(&ar->event_wq);
-
- ath6kl_info("hw %s fw %s%s\n",
- get_hw_id_string(ar->wdev->wiphy->hw_version),
- ar->wdev->wiphy->fw_version,
- test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
}
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
{
- ath6kl_cfg80211_scan_complete_event(ar, status);
+ struct ath6kl *ar = vif->ar;
+ bool aborted = false;
+
+ if (status != WMI_SCAN_STATUS_SUCCESS)
+ aborted = true;
+
+ ath6kl_cfg80211_scan_complete_event(vif, aborted);
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
}
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len,
u8 assoc_req_len, u8 assoc_resp_len,
u8 *assoc_info)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- ath6kl_cfg80211_connect_event(ar, channel, bssid,
+ ath6kl_cfg80211_connect_event(vif, channel, bssid,
listen_int, beacon_int,
net_type, beacon_ie_len,
assoc_req_len, assoc_resp_len,
assoc_info);
- memcpy(ar->bssid, bssid, sizeof(ar->bssid));
- ar->bss_ch = channel;
+ memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+ vif->bss_ch = channel;
- if ((ar->nw_type == INFRA_NETWORK))
- ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+ if ((vif->nw_type == INFRA_NETWORK))
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ ar->listen_intvl_t,
ar->listen_intvl_b);
- netif_wake_queue(ar->net_dev);
+ netif_wake_queue(vif->ndev);
/* Update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- set_bit(CONNECTED, &ar->flag);
- clear_bit(CONNECT_PEND, &ar->flag);
- netif_carrier_on(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ set_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+ netif_carrier_on(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- aggr_reset_state(ar->aggr_cntxt);
- ar->reconnect_flag = 0;
+ aggr_reset_state(vif->aggr_cntxt);
+ vif->reconnect_flag = 0;
- if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
memset(ar->node_map, 0, sizeof(ar->node_map));
ar->node_num = 0;
ar->next_ep_id = ENDPOINT_2;
}
if (!ar->usr_bss_filter) {
- set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+ set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ CURRENT_BSS_FILTER, 0);
}
}
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
{
struct ath6kl_sta *sta;
+ struct ath6kl *ar = vif->ar;
u8 tsc[6];
+
/*
* For AP case, keyid will have aid of STA which sent pkt with
* MIC error. Use this aid to get MAC & send it to hostapd.
*/
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
if (!sta)
return;
@@ -1081,19 +636,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
"ap tkip mic error received from aid=%d\n", keyid);
memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
- cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+ cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
} else
- ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+ ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
}
-static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_target_stats *tgt_stats =
(struct wmi_target_stats *) ptr;
- struct target_stats *stats = &ar->target_stats;
+ struct ath6kl *ar = vif->ar;
+ struct target_stats *stats = &vif->target_stats;
struct tkip_ccmp_stats *ccmp_stats;
u8 ac;
@@ -1189,8 +745,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
stats->wow_evt_discarded +=
le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
- if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
- clear_bit(STATS_UPDATE_PEND, &ar->flag);
+ if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+ clear_bit(STATS_UPDATE_PEND, &vif->flags);
wake_up(&ar->event_wq);
}
}
@@ -1200,14 +756,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
}
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct ath6kl *ar = vif->ar;
struct wmi_ap_mode_stat *ap = &ar->ap_stats;
struct wmi_per_sta_stat *st_ap, *st_p;
u8 ac;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (len < sizeof(*p))
return;
@@ -1226,7 +783,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
}
} else {
- ath6kl_update_target_stats(ar, ptr, len);
+ ath6kl_update_target_stats(vif, ptr, len);
}
}
@@ -1245,11 +802,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
wake_up(&ar->event_wq);
}
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
{
struct ath6kl_sta *conn;
struct sk_buff *skb;
bool psq_empty = false;
+ struct ath6kl *ar = vif->ar;
conn = ath6kl_find_sta_by_aid(ar, aid);
@@ -1272,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED;
spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +838,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
}
-void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
{
bool mcastq_empty = false;
struct sk_buff *skb;
+ struct ath6kl *ar = vif->ar;
/*
* If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +867,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
return;
/* set the STA flag to dtim_expired for the frame to go out */
- set_bit(DTIM_EXPIRED, &ar->flag);
+ set_bit(DTIM_EXPIRED, &vif->flags);
spin_lock_bh(&ar->mcastpsq_lock);
while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
spin_unlock_bh(&ar->mcastpsq_lock);
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
spin_lock_bh(&ar->mcastpsq_lock);
}
spin_unlock_bh(&ar->mcastpsq_lock);
- clear_bit(DTIM_EXPIRED, &ar->flag);
+ clear_bit(DTIM_EXPIRED, &vif->flags);
/* clear the LSB of the BitMapCtl field of the TIM IE */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
}
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
u8 assoc_resp_len, u8 *assoc_info,
u16 prot_reason_status)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1344,31 +903,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
/* clear the LSB of the TIM IE's BitMapCtl field */
if (test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ MCAST_AID, 0);
}
if (!is_broadcast_ether_addr(bssid)) {
/* send event to application */
- cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
}
- if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- clear_bit(CONNECTED, &ar->flag);
+ if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ clear_bit(CONNECTED, &vif->flags);
}
return;
}
- ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+ ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
assoc_resp_len, assoc_info,
prot_reason_status);
- aggr_reset_state(ar->aggr_cntxt);
+ aggr_reset_state(vif->aggr_cntxt);
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
- ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
- "disconnect reason is %d\n", reason);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
/*
* If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +936,88 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
*/
if (reason == DISCONNECT_CMD) {
if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
} else {
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
- && (ar->reconnect_flag == 1))) {
- set_bit(CONNECTED, &ar->flag);
+ && (vif->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &vif->flags);
return;
}
}
/* update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- clear_bit(CONNECTED, &ar->flag);
- netif_carrier_off(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ clear_bit(CONNECTED, &vif->flags);
+ netif_carrier_off(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
- ar->reconnect_flag = 0;
+ if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+ vif->reconnect_flag = 0;
if (reason != CSERV_DISCONNECT)
ar->user_key_ctrl = 0;
- netif_stop_queue(ar->net_dev);
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
+ netif_stop_queue(vif->ndev);
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
ath6kl_tx_data_cleanup(ar);
}
-static int ath6kl_open(struct net_device *dev)
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- unsigned long flags;
+ struct ath6kl_vif *vif;
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->list_lock);
+ if (list_empty(&ar->vif_list)) {
+ spin_unlock_bh(&ar->list_lock);
+ return NULL;
+ }
- set_bit(WLAN_ENABLED, &ar->flag);
+ vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
- if (test_bit(CONNECTED, &ar->flag)) {
+ spin_unlock_bh(&ar->list_lock);
+
+ return vif;
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ set_bit(WLAN_ENABLED, &vif->flags);
+
+ if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
} else
netif_carrier_off(dev);
- spin_unlock_irqrestore(&ar->lock, flags);
-
return 0;
}
static int ath6kl_close(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
netif_stop_queue(dev);
- ath6kl_disconnect(ar);
-
- if (test_bit(WMI_READY, &ar->flag)) {
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
- 0, 0, 0))
- return -EIO;
-
- clear_bit(WLAN_ENABLED, &ar->flag);
- }
+ ath6kl_cfg80211_stop(vif);
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+ clear_bit(WLAN_ENABLED, &vif->flags);
return 0;
}
static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- return &ar->net_stats;
+ return &vif->net_stats;
}
static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1030,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
void init_netdev(struct net_device *dev)
{
dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->destructor = free_netdev;
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 066d4f88807..9475e2d0d0b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -22,7 +22,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sd.h>
-#include "htc_hif.h"
+#include "hif.h"
#include "hif-ops.h"
#include "target.h"
#include "debug.h"
@@ -40,12 +40,18 @@ struct ath6kl_sdio {
struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
struct ath6kl *ar;
+
u8 *dma_buffer;
+ /* protects access to dma_buffer */
+ struct mutex dma_buffer_mutex;
+
/* scatter request list head */
struct list_head scat_req;
spinlock_t scat_lock;
+ bool scatter_enabled;
+
bool is_disabled;
atomic_t irq_handling;
const struct sdio_device_id *id;
@@ -135,6 +141,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
{
int ret = 0;
+ sdio_claim_host(func);
+
if (request & HIF_WRITE) {
/* FIXME: looks like ugly workaround for something */
if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +164,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
ret = sdio_memcpy_fromio(func, buf, addr, len);
}
+ sdio_release_host(func);
+
ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
request & HIF_WRITE ? "wr" : "rd", addr,
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +177,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
{
struct bus_request *bus_req;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
if (list_empty(&ar_sdio->bus_req_freeq)) {
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
return NULL;
}
@@ -180,7 +189,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
struct bus_request, list);
list_del(&bus_req->list);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
@@ -190,14 +199,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
struct bus_request *bus_req)
{
- unsigned long flag;
-
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
}
static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +298,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
mmc_req.cmd = &cmd;
mmc_req.data = &data;
+ sdio_claim_host(ar_sdio->func);
+
mmc_set_data_timeout(&data, ar_sdio->func->card);
/* synchronous call to process request */
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+ sdio_release_host(ar_sdio->func);
+
status = cmd.error ? cmd.error : data.error;
scat_complete:
@@ -389,17 +400,19 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
if (buf_needs_bounce(buf)) {
if (!ar_sdio->dma_buffer)
return -ENOMEM;
+ mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer;
memcpy(tbuf, buf, len);
bounced = true;
} else
tbuf = buf;
- sdio_claim_host(ar_sdio->func);
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
memcpy(buf, tbuf, len);
- sdio_release_host(ar_sdio->func);
+
+ if (bounced)
+ mutex_unlock(&ar_sdio->dma_buffer_mutex);
return ret;
}
@@ -418,29 +431,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
req->request);
context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req);
- ath6kldev_rw_comp_handler(context, status);
+ ath6kl_hif_rw_comp_handler(context, status);
}
}
static void ath6kl_sdio_write_async_work(struct work_struct *work)
{
struct ath6kl_sdio *ar_sdio;
- unsigned long flags;
struct bus_request *req, *tmp_req;
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
- sdio_claim_host(ar_sdio->func);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
__ath6kl_sdio_write_async(ar_sdio, req);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
}
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
-
- sdio_release_host(ar_sdio->func);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
}
static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +468,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
*/
sdio_release_host(ar_sdio->func);
- status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
atomic_set(&ar_sdio->irq_handling, 0);
WARN_ON(status && status != -ECANCELED);
}
-static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
int ret = 0;
if (!ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
@@ -495,13 +507,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
return ret;
}
-static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
int ret;
if (ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
/* Disable the card */
sdio_claim_host(ar_sdio->func);
ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +536,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *bus_req;
- unsigned long flags;
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
@@ -534,9 +548,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req->request = request;
bus_req->packet = packet;
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
return 0;
@@ -582,9 +596,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *node = NULL;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
if (!list_empty(&ar_sdio->scat_req)) {
node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +605,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
list_del(&node->list);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
return node;
}
@@ -601,13 +614,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
struct hif_scatter_req *s_req)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_add_tail(&s_req->list, &ar_sdio->scat_req);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
@@ -618,7 +630,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
u32 request = scat_req->req;
int status = 0;
- unsigned long flags;
if (!scat_req->len)
return -EINVAL;
@@ -627,14 +638,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS) {
- sdio_claim_host(ar_sdio->func);
+ if (request & HIF_SYNCHRONOUS)
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- sdio_release_host(ar_sdio->func);
- } else {
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ else {
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
}
@@ -646,23 +655,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *s_req, *tmp_req;
- unsigned long flag;
/* empty the free list */
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
+ /*
+ * FIXME: should we also call completion handler with
+ * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+ * that the packet is properly freed?
+ */
if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries);
kfree(s_req);
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
/* setup of HIF scatter resources */
@@ -673,6 +686,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
int ret;
bool virt_scat = false;
+ if (ar_sdio->scatter_enabled)
+ return 0;
+
+ ar_sdio->scatter_enabled = true;
+
/* check if host supports scatter and it meets our requirements */
if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +705,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
MAX_SCATTER_REQUESTS, virt_scat);
if (!ret) {
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "hif-scatter enabled requests %d entries %d\n",
MAX_SCATTER_REQUESTS,
MAX_SCATTER_ENTRIES_PER_REQ);
@@ -712,8 +730,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return ret;
}
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "virtual scatter enabled requests %d entries %d\n",
ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +742,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return 0;
}
-static int ath6kl_sdio_suspend(struct ath6kl *ar)
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
@@ -733,12 +791,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
flags = sdio_get_host_pm_caps(func);
- if (!(flags & MMC_PM_KEEP_POWER))
- /* as host doesn't support keep power we need to bail out */
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "func %d doesn't support MMC_PM_KEEP_POWER\n",
- func->num);
- return -EINVAL;
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+ if (!(flags & MMC_PM_KEEP_POWER) ||
+ (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
+ /* as host doesn't support keep power we need to cut power */
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
+ NULL);
+ }
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
@@ -747,11 +807,367 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
return ret;
}
- ath6kl_deep_sleep_enable(ar);
+ if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
+ goto deepsleep;
+
+ /* sdio irq wakes up host */
+
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
+ ret = ath6kl_cfg80211_suspend(ar,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+ NULL);
+ if (ret) {
+ ath6kl_warn("Schedule scan suspend failed: %d", ret);
+ return ret;
+ }
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+ if (wow) {
+ /*
+ * The host sdio controller is capable of keep power and
+ * sdio irq wake up at this point. It's fine to continue
+ * wow suspend operation.
+ */
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+ if (ret)
+ return ret;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+deepsleep:
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+ switch (ar->state) {
+ case ATH6KL_STATE_OFF:
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath6kl_sdio_config(ar);
+ break;
+
+ case ATH6KL_STATE_ON:
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ break;
+
+ case ATH6KL_STATE_WOW:
+ break;
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+ }
+
+ ath6kl_cfg80211_resume(ar);
+
+ return 0;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ u8 addr_val[4];
+ s32 i;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times.
+ */
+ memset(addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("%s: failed to write initial bytes of 0x%x "
+ "to window reg: 0x%X\n", __func__,
+ addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ int status;
+
+ /* set window register to start read cycle */
+ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+ address);
+
+ if (status)
+ return status;
+
+ /* read the data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to read from window data addr\n",
+ __func__);
+ return status;
+ }
+
+ return status;
+}
+
+static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 data)
+{
+ int status;
+ u32 val = (__force u32) data;
+
+ /* set write data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window data addr\n",
+ __func__, data);
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ address);
+}
+
+static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath6kl_sdio_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit "
+ "count register: %d\n", ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath6kl_sdio_read_write_sync(ar,
+ RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_err("unable to send the bmi data to the device\n");
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
return 0;
}
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *req, *tmp_req;
+ void *context;
+
+ /* FIXME: make sure that wq is not queued again */
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+
+ if (req->scat_req) {
+ /* this is a scatter gather request */
+ req->scat_req->status = -ECANCELED;
+ req->scat_req->complete(ar_sdio->ar->htc_target,
+ req->scat_req);
+ } else {
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+ }
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.read_write_sync = ath6kl_sdio_read_write_sync,
.write_async = ath6kl_sdio_write_async,
@@ -763,8 +1179,47 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.scat_req_rw = ath6kl_sdio_async_rw_scatter,
.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
.suspend = ath6kl_sdio_suspend,
+ .resume = ath6kl_sdio_resume,
+ .diag_read32 = ath6kl_sdio_diag_read32,
+ .diag_write32 = ath6kl_sdio_diag_write32,
+ .bmi_read = ath6kl_sdio_bmi_read,
+ .bmi_write = ath6kl_sdio_bmi_write,
+ .power_on = ath6kl_sdio_power_on,
+ .power_off = ath6kl_sdio_power_off,
+ .stop = ath6kl_sdio_stop,
};
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+ return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+ ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
static int ath6kl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -773,8 +1228,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
struct ath6kl *ar;
int count;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
func->max_blksize, func->cur_blksize);
@@ -797,6 +1252,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->lock);
spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->dma_buffer_mutex);
INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@@ -815,62 +1271,29 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
}
ar_sdio->ar = ar;
+ ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
ar->hif_priv = ar_sdio;
ar->hif_ops = &ath6kl_sdio_ops;
+ ar->bmi.max_data_size = 256;
ath6kl_sdio_set_mbox_info(ar);
- sdio_claim_host(func);
-
- if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
- MANUFACTURER_ID_AR6003_BASE) {
- /* enable 4-bit ASYNC interrupt on AR6003 or later */
- ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
- CCCR_SDIO_IRQ_MODE_REG,
- SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
- if (ret) {
- ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
- ret);
- sdio_release_host(func);
- goto err_cfg80211;
- }
-
- ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
- }
-
- /* give us some time to enable, in ms */
- func->enable_timeout = 100;
-
- sdio_release_host(func);
-
- ret = ath6kl_sdio_power_on(ar_sdio);
- if (ret)
- goto err_cfg80211;
-
- sdio_claim_host(func);
-
- ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ ret = ath6kl_sdio_config(ar);
if (ret) {
- ath6kl_err("Set sdio block size %d failed: %d)\n",
- HIF_MBOX_BLOCK_SIZE, ret);
- sdio_release_host(func);
- goto err_off;
+ ath6kl_err("Failed to config sdio: %d\n", ret);
+ goto err_core_alloc;
}
- sdio_release_host(func);
-
ret = ath6kl_core_init(ar);
if (ret) {
ath6kl_err("Failed to init ath6kl core\n");
- goto err_off;
+ goto err_core_alloc;
}
return ret;
-err_off:
- ath6kl_sdio_power_off(ar_sdio);
-err_cfg80211:
- ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_core_alloc:
+ ath6kl_core_free(ar_sdio->ar);
err_dma:
kfree(ar_sdio->dma_buffer);
err_hif:
@@ -883,8 +1306,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
{
struct ath6kl_sdio *ar_sdio;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "removed func %d vendor 0x%x device 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
func->num, func->vendor, func->device);
ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1315,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
ath6kl_stop_txrx(ar_sdio->ar);
cancel_work_sync(&ar_sdio->wr_async_work);
- ath6kl_unavail_ev(ar_sdio->ar);
-
- ath6kl_sdio_power_off(ar_sdio);
+ ath6kl_core_cleanup(ar_sdio->ar);
kfree(ar_sdio->dma_buffer);
kfree(ar_sdio);
@@ -903,16 +1324,19 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{},
};
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
static struct sdio_driver ath6kl_sdio_driver = {
- .name = "ath6kl_sdio",
+ .name = "ath6kl",
.id_table = ath6kl_sdio_devices,
.probe = ath6kl_sdio_probe,
.remove = ath6kl_sdio_remove,
+ .drv.pm = ATH6KL_SDIO_PM_OPS,
};
static int __init ath6kl_sdio_init(void)
@@ -938,13 +1362,19 @@ MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index c9a76051f04..108a723a108 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -20,7 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768
-#define AR6004_BOARD_DATA_SZ 7168
+#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0
#define RESET_CONTROL_ADDRESS 0x00000000
@@ -320,7 +320,10 @@ struct host_interest {
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
|------------------------------------------------------------------------------|
*/
+#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_SHIFT 0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
/* Convert a Target virtual address into a Target physical address */
@@ -331,20 +334,6 @@ struct host_interest {
(((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
(((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
-#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
-#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
-#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
-#define AR6003_REV2_RAM_RESERVE_SIZE 6912
-
-#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
-#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
-#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
-#define AR6003_REV3_RAM_RESERVE_SIZE 512
-
-#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400
-#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000
-#define AR6004_REV1_RAM_RESERVE_SIZE 11264
-
#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500
struct ath6kl_dbglog_buf {
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a7117074f81..506a3031a88 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
return ar->node_map[ep_map].ep_id;
}
-static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
bool *more_data)
{
struct ethhdr *datap = (struct ethhdr *) skb->data;
struct ath6kl_sta *conn = NULL;
bool ps_queued = false, is_psq_empty = false;
+ struct ath6kl *ar = vif->ar;
if (is_multicast_ether_addr(datap->h_dest)) {
u8 ctr = 0;
@@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
* If this transmit is not because of a Dtim Expiry
* q it.
*/
- if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+ if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
bool is_mcastq_empty = false;
spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_mcastq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
MCAST_AID, 1);
ps_queued = true;
@@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
}
}
} else {
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (!conn) {
dev_kfree_skb(skb);
@@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
conn->aid, 1);
ps_queued = true;
@@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_cookie *cookie = NULL;
enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len);
/* If target is not associated */
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
dev_kfree_skb(skb);
return 0;
}
@@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
goto fail_tx;
/* AP mode Power saving processing */
- if (ar->nw_type == AP_NETWORK) {
- if (ath6kl_powersave_ap(ar, skb, &more_data))
+ if (vif->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(vif, skb, &more_data))
return 0;
}
if (test_bit(WMI_ENABLED, &ar->flag)) {
if (skb_headroom(skb) < dev->needed_headroom) {
- WARN_ON(1);
- goto fail_tx;
+ struct sk_buff *tmp_skb = skb;
+
+ skb = skb_realloc_headroom(skb, dev->needed_headroom);
+ kfree_skb(tmp_skb);
+ if (skb == NULL) {
+ vif->net_stats.tx_dropped++;
+ return 0;
+ }
}
if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
}
if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
- more_data, 0, 0, NULL)) {
+ more_data, 0, 0, NULL,
+ vif->fw_vif_idx)) {
ath6kl_err("wmi_data_hdr_add failed\n");
goto fail_tx;
}
- if ((ar->nw_type == ADHOC_NETWORK) &&
- ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+ if ((vif->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true;
else {
/* get the stream mapping */
- ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
- 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+ vif->fw_vif_idx, skb,
+ 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
if (ret)
goto fail_tx;
}
@@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
fail_tx:
dev_kfree_skb(skb);
- ar->net_stats.tx_dropped++;
- ar->net_stats.tx_aborted_errors++;
+ vif->net_stats.tx_dropped++;
+ vif->net_stats.tx_aborted_errors++;
return 0;
}
@@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet)
{
struct ath6kl *ar = target->dev->ar;
+ struct ath6kl_vif *vif;
enum htc_endpoint_id endpoint = packet->endpoint;
+ enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
if (endpoint == ar->ctrl_ep) {
/*
@@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
spin_unlock_bh(&ar->lock);
ath6kl_err("wmi ctrl ep is full\n");
- return HTC_SEND_FULL_KEEP;
+ return action;
}
if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
- return HTC_SEND_FULL_KEEP;
-
- if (ar->nw_type == ADHOC_NETWORK)
- /*
- * In adhoc mode, we cannot differentiate traffic
- * priorities so there is no need to continue, however we
- * should stop the network.
- */
- goto stop_net_queues;
+ return action;
/*
* The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -464,24 +470,36 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
* Give preference to the highest priority stream by
* dropping the packets which overflowed.
*/
- return HTC_SEND_FULL_DROP;
+ action = HTC_SEND_FULL_DROP;
-stop_net_queues:
- spin_lock_bh(&ar->lock);
- set_bit(NETQ_STOPPED, &ar->flag);
- spin_unlock_bh(&ar->lock);
- netif_stop_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->nw_type == ADHOC_NETWORK ||
+ action != HTC_SEND_FULL_DROP) {
+ spin_unlock_bh(&ar->list_lock);
+
+ spin_lock_bh(&vif->if_lock);
+ set_bit(NETQ_STOPPED, &vif->flags);
+ spin_unlock_bh(&vif->if_lock);
+ netif_stop_queue(vif->ndev);
- return HTC_SEND_FULL_KEEP;
+ return action;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return action;
}
/* TODO this needs to be looked at */
-static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
enum htc_endpoint_id eid, u32 map_no)
{
+ struct ath6kl *ar = vif->ar;
u32 i;
- if (ar->nw_type != ADHOC_NETWORK)
+ if (vif->nw_type != ADHOC_NETWORK)
return;
if (!ar->ibss_ps_enable)
@@ -523,7 +541,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
int status;
enum htc_endpoint_id eid;
bool wake_event = false;
- bool flushing = false;
+ bool flushing[ATH6KL_VIF_MAX] = {false};
+ u8 if_idx;
+ struct ath6kl_vif *vif;
skb_queue_head_init(&skb_queue);
@@ -549,8 +569,6 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
if (!skb || !skb->data)
goto fatal;
- packet->buf = skb->data;
-
__skb_queue_tail(&skb_queue, skb);
if (!status && (packet->act_len != skb->len))
@@ -569,15 +587,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
wake_event = true;
}
+ if (eid == ar->ctrl_ep) {
+ if_idx = wmi_cmd_hdr_get_if_idx(
+ (struct wmi_cmd_hdr *) packet->buf);
+ } else {
+ if_idx = wmi_data_hdr_get_if_idx(
+ (struct wmi_data_hdr *) packet->buf);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
if (status) {
if (status == -ECANCELED)
/* a packet was flushed */
- flushing = true;
+ flushing[if_idx] = true;
+
+ vif->net_stats.tx_errors++;
- ar->net_stats.tx_errors++;
+ if (status != -ENOSPC && status != -ECANCELED)
+ ath6kl_warn("tx complete error: %d\n", status);
- if (status != -ENOSPC)
- ath6kl_err("tx error, status: 0x%x\n", status);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
"%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
__func__, skb, packet->buf, packet->act_len,
@@ -588,27 +621,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
__func__, skb, packet->buf, packet->act_len,
eid, "OK");
- flushing = false;
- ar->net_stats.tx_packets++;
- ar->net_stats.tx_bytes += skb->len;
+ flushing[if_idx] = false;
+ vif->net_stats.tx_packets++;
+ vif->net_stats.tx_bytes += skb->len;
}
- ath6kl_tx_clear_node_map(ar, eid, map_no);
+ ath6kl_tx_clear_node_map(vif, eid, map_no);
ath6kl_free_cookie(ar, ath6kl_cookie);
- if (test_bit(NETQ_STOPPED, &ar->flag))
- clear_bit(NETQ_STOPPED, &ar->flag);
+ if (test_bit(NETQ_STOPPED, &vif->flags))
+ clear_bit(NETQ_STOPPED, &vif->flags);
}
spin_unlock_bh(&ar->lock);
__skb_queue_purge(&skb_queue);
- if (test_bit(CONNECTED, &ar->flag)) {
- if (!flushing)
- netif_wake_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ !flushing[vif->fw_vif_idx]) {
+ spin_unlock_bh(&ar->list_lock);
+ netif_wake_queue(vif->ndev);
+ spin_lock_bh(&ar->list_lock);
+ }
}
+ spin_unlock_bh(&ar->list_lock);
if (wake_event)
wake_up(&ar->event_wq);
@@ -1041,8 +1081,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
struct ath6kl_sta *conn = NULL;
struct sk_buff *skb1 = NULL;
struct ethhdr *datap = NULL;
+ struct ath6kl_vif *vif;
u16 seq_no, offset;
- u8 tid;
+ u8 tid, if_idx;
ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
"%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1091,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
packet->act_len, status);
if (status || !(skb->data + HTC_HDR_LENGTH)) {
- ar->net_stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ if (ept == ar->ctrl_ep) {
+ if_idx =
+ wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+ } else {
+ if_idx =
+ wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
dev_kfree_skb(skb);
return;
}
@@ -1059,28 +1116,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* Take lock to protect buffer counts and adaptive power throughput
* state.
*/
- spin_lock_bh(&ar->lock);
+ spin_lock_bh(&vif->if_lock);
- ar->net_stats.rx_packets++;
- ar->net_stats.rx_bytes += packet->act_len;
+ vif->net_stats.rx_packets++;
+ vif->net_stats.rx_bytes += packet->act_len;
- spin_unlock_bh(&ar->lock);
+ spin_unlock_bh(&vif->if_lock);
- skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
- skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
- skb->dev = ar->net_dev;
+ skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) {
if (EPPING_ALIGNMENT_PAD > 0)
skb_pull(skb, EPPING_ALIGNMENT_PAD);
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
return;
}
+ ath6kl_check_wow_status(ar);
+
if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
@@ -1096,18 +1153,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* that do not have LLC hdr. They are 16 bytes in size.
* Allow these frames in the AP mode.
*/
- if (ar->nw_type != AP_NETWORK &&
+ if (vif->nw_type != AP_NETWORK &&
((packet->act_len < min_hdr_len) ||
(packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
ath6kl_info("frame len is too short or too long\n");
- ar->net_stats.rx_errors++;
- ar->net_stats.rx_length_errors++;
+ vif->net_stats.rx_errors++;
+ vif->net_stats.rx_length_errors++;
dev_kfree_skb(skb);
return;
}
/* Get the Power save state of the STA */
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
meta_type = wmi_data_hdr_get_meta(dhdr);
ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1186,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
datap = (struct ethhdr *) (skb->data + offset);
- conn = ath6kl_find_sta(ar, datap->h_source);
+ conn = ath6kl_find_sta(vif, datap->h_source);
if (!conn) {
dev_kfree_skb(skb);
@@ -1160,12 +1217,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
while ((skbuff = skb_dequeue(&conn->psq))
!= NULL) {
spin_unlock_bh(&conn->psq_lock);
- ath6kl_data_tx(skbuff, ar->net_dev);
+ ath6kl_data_tx(skbuff, vif->ndev);
spin_lock_bh(&conn->psq_lock);
}
spin_unlock_bh(&conn->psq_lock);
/* Clear the PVB for this STA */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 0);
}
}
@@ -1215,12 +1273,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return;
}
- if (!(ar->net_dev->flags & IFF_UP)) {
+ if (!(vif->ndev->flags & IFF_UP)) {
dev_kfree_skb(skb);
return;
}
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
datap = (struct ethhdr *) skb->data;
if (is_multicast_ether_addr(datap->h_dest))
/*
@@ -1235,8 +1293,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* frame to it on the air else send the
* frame up the stack.
*/
- struct ath6kl_sta *conn = NULL;
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (conn && ar->intra_bss) {
skb1 = skb;
@@ -1247,18 +1304,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
}
if (skb1)
- ath6kl_data_tx(skb1, ar->net_dev);
+ ath6kl_data_tx(skb1, vif->ndev);
+
+ if (skb == NULL) {
+ /* nothing to deliver up the stack */
+ return;
+ }
}
datap = (struct ethhdr *) skb->data;
if (is_unicast_ether_addr(datap->h_dest) &&
- aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+ aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
is_amsdu, skb))
/* aggregation code will handle the skb */
return;
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1398,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
memset(stats, 0, sizeof(struct rxtid_stats));
}
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+ u8 win_sz)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
struct rxtid_stats *stats;
u16 hold_q_size;
@@ -1405,9 +1468,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
return p_aggr;
}
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
if (!p_aggr)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a7de23cbd2c..f6f2aa27fc2 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -21,7 +21,7 @@
#include "../regd.h"
#include "../regd_common.h"
-static int ath6kl_wmi_sync_point(struct wmi *wmi);
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
static const s32 wmi_rate_tbl[][2] = {
/* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
return wmi->ep_id;
}
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+ struct ath6kl_vif *vif, *found = NULL;
+
+ if (WARN_ON(if_idx > (ar->vif_max - 1)))
+ return NULL;
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->fw_vif_idx == if_idx) {
+ found = vif;
+ break;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return found;
+}
+
/* Performs DIX to 802.3 encapsulation for transmit packets.
* Assumes the entire DIX header is contigous and that there is
* enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info)
+ u8 meta_ver, void *tx_meta_info, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
return -EINVAL;
if (tx_meta_info) {
@@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
return 0;
}
@@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
return ip_pri;
}
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb,
u32 layer2_priority, bool wmm_enabled,
u8 *ac)
{
@@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
usr_pri = layer2_priority & 0x7;
}
- /* workaround for WMM S5 */
+ /*
+ * workaround for WMM S5
+ *
+ * FIXME: wmi->traffic_class is always 100 so this test doesn't
+ * make sense
+ */
if ((wmi->traffic_class == WMM_AC_VI) &&
((usr_pri == 5) || (usr_pri == 4)))
usr_pri = 1;
@@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
/* Implicit streams are created with TSID 0xFF */
cmd.tsid = WMI_IMPLICIT_PSTREAM;
- ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+ ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
}
*ac = traffic_class;
@@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
}
static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
freq, dur);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
"(freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+ id = vif->last_roc_id;
+ cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
dur, GFP_ATOMIC);
return 0;
}
static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
- u8 *datap, int len)
+ u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cancel_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
"status=%u\n", freq, dur, ev->status);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
"channel (freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+ if (vif->last_cancel_roc_id &&
+ vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+ id = vif->last_cancel_roc_id; /* event for cancel command */
+ else
+ id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+ vif->last_cancel_roc_id = 0;
+ cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
NL80211_CHAN_NO_HT, GFP_ATOMIC);
return 0;
}
-static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tx_status_event *ev;
u32 id;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
id, ev->ack_status);
if (wmi->last_mgmt_tx_frame) {
- cfg80211_mgmt_tx_status(ar->net_dev, id,
+ cfg80211_mgmt_tx_status(vif->ndev, id,
wmi->last_mgmt_tx_frame,
wmi->last_mgmt_tx_frame_len,
!!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_p2p_rx_probe_req_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
"probe_req_report=%d\n",
- dlen, freq, ar->probe_req_report);
+ dlen, freq, vif->probe_req_report);
- if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_rx_action_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
}
/* Send a "simple" wmi command -- one with no arguments */
-static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_cmd_id cmd_id)
{
struct sk_buff *skb;
int ret;
@@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
if (!skb)
return -ENOMEM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
return ret;
}
@@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
if (len < sizeof(struct wmi_ready_event_2))
return -EINVAL;
- wmi->ready = true;
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
- ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+ ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
return 0;
}
-static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+ cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->info.roam_mode = mode;
+ cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_connect_event *ev;
u8 *pie, *peie;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(struct wmi_connect_event))
return -EINVAL;
ev = (struct wmi_connect_event *) datap;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
/* AP mode start/STA connected event */
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
"(AP started)\n",
__func__, le16_to_cpu(ev->u.ap_bss.ch),
ev->u.ap_bss.bssid);
ath6kl_connect_ap_mode_bss(
- ar, le16_to_cpu(ev->u.ap_bss.ch));
+ vif, le16_to_cpu(ev->u.ap_bss.ch));
} else {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
"auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.apsd_info);
ath6kl_connect_ap_mode_sta(
- ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+ vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
ev->u.ap_sta.keymgmt,
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
pie += pie[1] + 2;
}
- ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+ ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
ev->u.sta.bssid,
le16_to_cpu(ev->u.sta.listen_intvl),
le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
alpha2[0] = country->isoName[0];
alpha2[1] = country->isoName[1];
- regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+ regulatory_hint(wmi->parent_dev->wiphy, alpha2);
ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
alpha2[0], alpha2[1]);
}
}
-static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_disconnect_event *ev;
wmi->traffic_class = 100;
@@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->disconn_reason, ev->assoc_resp_len);
wmi->is_wmm_enabled = false;
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+ ath6kl_disconnect_event(vif, ev->disconn_reason,
ev->bssid, ev->assoc_resp_len, ev->assoc_info,
le16_to_cpu(ev->proto_reason_status));
@@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tkip_micerr_event *ev;
@@ -895,12 +972,20 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_tkip_micerr_event *) datap;
- ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+ ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
return 0;
}
-static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+void ath6kl_wmi_sscan_timer(unsigned long ptr)
+{
+ struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+
+ cfg80211_sched_scan_results(vif->ar->wiphy);
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_bss_info_hdr2 *bih;
u8 *buf;
@@ -927,26 +1012,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0; /* Only update BSS table for now */
if (bih->frame_type == BEACON_FTYPE &&
- test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+ channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
if (channel == NULL)
return -EINVAL;
if (len < 8 + 2 + 2)
return -EINVAL;
- if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
- memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+ if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
+ && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
const u8 *tim;
tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
len - 8 - 2 - 2);
if (tim && tim[1] >= 2) {
- ar->assoc_bss_dtim_period = tim[3];
- set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_dtim_period = tim[3];
+ set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
}
}
@@ -966,7 +1052,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
} else {
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1065,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
memcpy(&mgmt->u.beacon, buf, len);
- bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+ bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
24 + len, (bih->snr - 95) * 100,
GFP_ATOMIC);
kfree(mgmt);
@@ -987,6 +1073,21 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return -ENOMEM;
cfg80211_put_bss(bss);
+ /*
+ * Firmware doesn't return any event when scheduled scan has
+ * finished, so we need to use a timer to find out when there are
+ * no more results.
+ *
+ * The timer is started from the first bss info received, otherwise
+ * the timer would not ever fire if the scan interval is short
+ * enough.
+ */
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+ !timer_pending(&vif->sched_scan_timer)) {
+ mod_timer(&vif->sched_scan_timer, jiffies +
+ msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
+ }
+
return 0;
}
@@ -1094,20 +1195,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_scan_complete_event *ev;
ev = (struct wmi_scan_complete_event *) datap;
- ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+ ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
wmi->is_probe_ssid = false;
return 0;
}
static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_neighbor_report_event *ev;
u8 i;
@@ -1125,7 +1227,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
ev->neighbor[i].bss_flags);
- cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+ cfg80211_pmksa_candidate_notify(vif->ndev, i,
ev->neighbor[i].bssid,
!!(ev->neighbor[i].bss_flags &
WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1268,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+ ath6kl_tgt_stats_event(vif, datap, len);
return 0;
}
@@ -1222,7 +1325,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1322,7 +1425,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cac_event *reply;
struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1447,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
IEEE80211_WMM_IE_TSPEC_TID_MASK;
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, tsid);
} else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
/*
* Following assumes that there is only one outstanding
@@ -1358,7 +1463,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
break;
}
if (index < (sizeof(active_tsids) * 8))
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, index);
}
/*
@@ -1403,7 +1509,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1528,14 +1634,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
{
struct wmi_cmd_hdr *cmd_hdr;
enum htc_endpoint_id ep_id = wmi->ep_id;
int ret;
+ u16 info1;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
return -EINVAL;
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1661,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all data currently queued is transmitted before
* the cmd execution. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
skb_push(skb, sizeof(struct wmi_cmd_hdr));
cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
- cmd_hdr->info1 = 0; /* added for virtual interface */
+ info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+ cmd_hdr->info1 = cpu_to_le16(info1);
/* Only for OPT_TX_CMD, use BE endpoint. */
if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
- false, false, 0, NULL);
+ false, false, 0, NULL, if_idx);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -1582,20 +1690,22 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all new data queued waits for the command to
* execute. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
return 0;
}
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags)
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cc;
@@ -1635,19 +1745,19 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
cc->grp_crypto_len = group_crypto_len;
cc->ch = cpu_to_le16(channel);
cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+ cc->nw_subtype = nw_subtype;
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- wmi->pair_crypto_type = pairwise_crypto;
- wmi->grp_crypto_type = group_crypto;
-
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel)
{
struct sk_buff *skb;
struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1778,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
{
int ret;
@@ -1683,12 +1793,79 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
wmi->traffic_class = 100;
/* Disconnect command does not need to do a SYNC before. */
- ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+ ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
+{
+ struct sk_buff *skb;
+ struct wmi_begin_scan_cmd *sc;
+ s8 size;
+ int i, band, ret;
+ struct ath6kl *ar = wmi->parent_dev;
+ int num_rates;
+
+ size = sizeof(struct wmi_begin_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_begin_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->no_cck = cpu_to_le32(no_cck);
+ sc->num_ch = num_chan;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ ar->wiphy->bands[band];
+ u32 ratemask = rates[band];
+ u8 *supp_rates = sc->supp_rates[band].rates;
+ num_rates = 0;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & ratemask) == 0)
+ continue; /* skip rate */
+ supp_rates[num_rates++] =
+ (u8) (sband->bitrates[i].bitrate / 5);
+ }
+ sc->supp_rates[band].nrates = num_rates;
+ }
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list)
@@ -1724,13 +1901,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
for (i = 0; i < num_chan; i++)
sc->ch_list[i] = cpu_to_le16(ch_list[i]);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+ u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1935,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
{
struct sk_buff *skb;
struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1957,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
cmd->bss_filter = filter;
cmd->ie_mask = cpu_to_le32(ie_mask);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid)
{
struct sk_buff *skb;
@@ -1816,12 +1994,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
cmd->ssid_len = ssid_len;
memcpy(cmd->ssid, ssid, ssid_len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons)
{
struct sk_buff *skb;
@@ -1836,12 +2015,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
cmd->listen_intvl = cpu_to_le16(listen_interval);
cmd->num_beacons = cpu_to_le16(listen_beacons);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
{
struct sk_buff *skb;
struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +2034,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
cmd->pwr_mode = pwr_mode;
wmi->pwr_mode = pwr_mode;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy)
@@ -1881,12 +2060,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
{
struct sk_buff *skb;
struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +2078,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
cmd = (struct wmi_disc_timeout_cmd *) skb->data;
cmd->discon_timeout = timeout;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
return ret;
}
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag)
{
@@ -1920,7 +2104,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
key_index, key_type, key_usage, key_len, key_op_ctrl);
if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
- (key_material == NULL))
+ (key_material == NULL) || key_rsc_len > 8)
return -EINVAL;
if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2122,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
memcpy(cmd->key, key_material, key_len);
if (key_rsc != NULL)
- memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+ memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
cmd->key_op_ctrl = key_op_ctrl;
if (mac_addr)
memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
sync_flag);
return ret;
}
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
{
struct sk_buff *skb;
struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2148,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
cmd = (struct wmi_add_krk_cmd *) skb->data;
memcpy(cmd->krk, krk, WMI_KRK_LEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
{
struct sk_buff *skb;
struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2170,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
cmd->key_index = key_index;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set)
{
struct sk_buff *skb;
@@ -2018,14 +2203,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
cmd->enable = PMKID_DISABLE;
}
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
- enum htc_endpoint_id ep_id)
+ enum htc_endpoint_id ep_id, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
@@ -2037,14 +2222,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
data_hdr = (struct wmi_data_hdr *) skb->data;
data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
return ret;
}
-static int ath6kl_wmi_sync_point(struct wmi *wmi)
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
{
struct sk_buff *skb;
struct wmi_sync_cmd *cmd;
@@ -2100,7 +2285,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
* Send sync cmd followed by sync data messages on all
* endpoints being used
*/
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
NO_SYNC_WMIFLAG);
if (ret)
@@ -2119,7 +2304,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
traffic_class);
ret =
ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
- ep_id);
+ ep_id, if_idx);
if (ret)
break;
@@ -2142,7 +2327,7 @@ free_skb:
return ret;
}
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *params)
{
struct sk_buff *skb;
@@ -2231,12 +2416,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
ath6kl_indicate_tx_activity(wmi->parent_dev,
params->traffic_class, true);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid)
{
struct sk_buff *skb;
struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2458,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
"sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
traffic_class, tsid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
SYNC_BEFORE_WMIFLAG);
spin_lock_bh(&wmi->lock);
@@ -2311,17 +2497,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
cmd = (struct wmi_set_ip_cmd *) skb->data;
memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
- int len)
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
{
- if (len < sizeof(struct wmi_get_wow_list_reply))
+ u16 active_tsids;
+ u8 stream_exist;
+ int i;
+
+ /*
+ * Relinquish credits from all implicitly created pstreams
+ * since when we go to sleep. If user created explicit
+ * thinstreams exists with in a fatpipe leave them intact
+ * for the user to delete.
+ */
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (stream_exist & (1 << i)) {
+
+ /*
+ * FIXME: Is this lock & unlock inside
+ * for loop correct? may need rework.
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[i];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * If there are no user created thin streams
+ * delete the fatpipe
+ */
+ if (!active_tsids) {
+ stream_exist &= ~(1 << i);
+ /*
+ * Indicate inactivity to driver layer for
+ * this fatpipe (pstream)
+ */
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ i, false);
+ }
+ }
+ }
+
+ /* FIXME: Can we do this assignment without locking ? */
+ spin_lock_bh(&wmi->lock);
+ wmi->fat_pipe_exist = stream_exist;
+ spin_unlock_bh(&wmi->lock);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_set_host_sleep_mode_cmd *cmd;
+ int ret;
+
+ if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+ (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+ ath6kl_err("invalid host sleep mode: %d\n", host_mode);
return -EINVAL;
+ }
- return 0;
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+ if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+ ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+ cmd->asleep = cpu_to_le32(1);
+ } else
+ cmd->awake = cpu_to_le32(1);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wow_mode_cmd *cmd;
+ int ret;
+
+ if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+ wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+ ath6kl_err("invalid wow mode: %d\n", wow_mode);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+ cmd->enable_wow = cpu_to_le32(wow_mode);
+ cmd->filter = cpu_to_le32(filter);
+ cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask)
+{
+ struct sk_buff *skb;
+ struct wmi_add_wow_pattern_cmd *cmd;
+ u16 size;
+ u8 *filter_mask;
+ int ret;
+
+ /*
+ * Allocate additional memory in the buffer to hold
+ * filter and mask value, which is twice of filter_size.
+ */
+ size = sizeof(*cmd) + (2 * filter_size);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = list_id;
+ cmd->filter_size = filter_size;
+ cmd->filter_offset = filter_offset;
+
+ memcpy(cmd->filter, filter, filter_size);
+
+ filter_mask = (u8 *) (cmd->filter + filter_size);
+ memcpy(filter_mask, mask, filter_size);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id)
+{
+ struct sk_buff *skb;
+ struct wmi_del_wow_pattern_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = cpu_to_le16(list_id);
+ cmd->filter_id = cpu_to_le16(filter_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
}
static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2678,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
return ret;
}
@@ -2379,12 +2721,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
return ret;
}
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
}
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
{
struct sk_buff *skb;
struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2739,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
cmd->dbM = dbM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
+{
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
+}
+
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
}
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+ u8 preamble_policy)
{
struct sk_buff *skb;
struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2770,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
cmd->status = status;
cmd->preamble_policy = preamble_policy;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
@@ -2440,11 +2788,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
cmd = (struct wmi_set_rts_cmd *) skb->data;
cmd->threshold = cpu_to_le16(threshold);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
{
struct sk_buff *skb;
struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2809,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
cmd->txop_enable = cfg;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl)
{
struct sk_buff *skb;
struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2827,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
cmd = (struct wmi_set_keepalive_cmd *) skb->data;
cmd->keep_alive_intvl = keep_alive_intvl;
- wmi->keep_alive_intvl = keep_alive_intvl;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
return ret;
}
@@ -2495,7 +2848,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
memcpy(skb->data, buf, len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
return ret;
}
@@ -2528,28 +2881,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
- aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+ aggr_recv_addba_req_evt(vif, cmd->tid,
le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
return 0;
}
-static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
- aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+ aggr_recv_delba_req_evt(vif, cmd->tid);
return 0;
}
/* AP mode functions */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cm;
@@ -2562,7 +2918,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
cm = (struct wmi_connect_cmd *) skb->data;
memcpy(cm, p, sizeof(*cm));
- res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+ res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
NO_SYNC_WMIFLAG);
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
"ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2927,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
return res;
}
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+ u16 reason)
{
struct sk_buff *skb;
struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2942,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
cm->reason = cpu_to_le16(reason);
cm->cmd = cmd;
- return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+ return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
NO_SYNC_WMIFLAG);
}
-static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_pspoll_event *ev;
@@ -2598,19 +2956,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_pspoll_event *) datap;
- ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+ ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
return 0;
}
-static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_dtimexpiry_event(wmi->parent_dev);
+ ath6kl_dtimexpiry_event(vif);
return 0;
}
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+ bool flag)
{
struct sk_buff *skb;
struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2985,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
cmd->rsvd = cpu_to_le16(0);
cmd->flag = cpu_to_le32(flag);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
return 0;
}
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_ver,
bool rx_dot11_hdr, bool defrag_on_host)
{
struct sk_buff *skb;
@@ -2648,14 +3009,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
cmd->meta_ver = rx_meta_ver;
/* Delete the local aggr state, on host */
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len)
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len)
{
struct sk_buff *skb;
struct wmi_set_appie_cmd *p;
@@ -2669,8 +3030,11 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
p = (struct wmi_set_appie_cmd *) skb->data;
p->mgmt_frm_type = mgmt_frm_type;
p->ie_len = ie_len;
- memcpy(p->ie_info, ie, ie_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+
+ if (ie != NULL && ie_len > 0)
+ memcpy(p->ie_info, ie, ie_len);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -2688,11 +3052,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
cmd->disable = disable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
{
struct sk_buff *skb;
struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +3070,16 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
p = (struct wmi_remain_on_chnl_cmd *) skb->data;
p->freq = cpu_to_le32(freq);
p->duration = cpu_to_le32(dur);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len)
+/* ath6kl_wmi_send_action_cmd is to be deprecated. Use
+ * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len)
{
struct sk_buff *skb;
struct wmi_send_action_cmd *p;
@@ -2731,6 +3099,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
}
kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -2742,18 +3111,61 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
p->wait = cpu_to_le32(wait);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len)
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck)
{
struct sk_buff *skb;
- struct wmi_p2p_probe_response_cmd *p;
+ struct wmi_send_mgmt_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
+ "len=%u\n", id, freq, wait, data_len);
+ p = (struct wmi_send_mgmt_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->no_cck = cpu_to_le32(no_cck);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_p2p_probe_response_cmd *p;
+ size_t cmd_len = sizeof(*p) + data_len;
+
+ if (data_len == 0)
+ cmd_len++; /* work around target minimum length requirement */
+
+ skb = ath6kl_wmi_get_new_buf(cmd_len);
if (!skb)
return -ENOMEM;
@@ -2764,11 +3176,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
memcpy(p->destination_addr, dst, ETH_ALEN);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
{
struct sk_buff *skb;
struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3194,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
enable);
p = (struct wmi_probe_req_report_cmd *) skb->data;
p->enable = enable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
{
struct sk_buff *skb;
struct wmi_get_p2p_info *p;
@@ -2798,14 +3211,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
info_req_flags);
p = (struct wmi_get_p2p_info *) skb->data;
p->info_req_flags = cpu_to_le32(info_req_flags);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
{
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
- return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
}
static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3232,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmix_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
@@ -2840,7 +3253,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_warn("unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -2848,12 +3260,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
/* Control Path */
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd;
+ struct ath6kl_vif *vif;
u32 len;
u16 id;
+ u8 if_idx;
u8 *datap;
int ret = 0;
@@ -2863,12 +3282,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmi_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
dev_kfree_skb(skb);
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
+ if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -2879,6 +3298,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
+ vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+ if (!vif) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Wmi event for unavailable vif, vif_index:%d\n",
+ if_idx);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
switch (id) {
case WMI_GET_BITRATE_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3326,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
- ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
break;
case WMI_DISCONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
- ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
break;
case WMI_PEER_NODE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3338,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_TKIP_MICERR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
- ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
break;
case WMI_BSSINFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
- ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3354,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_NEIGHBOR_REPORT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
- ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+ vif);
break;
case WMI_SCAN_COMPLETE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
- ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+ ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
break;
case WMI_CMDERROR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3367,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_STATISTICS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
- ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
break;
case WMI_RSSI_THRESHOLD_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3382,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_ROAM_TBL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
break;
case WMI_EXTENSION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3390,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CAC_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
- ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
break;
case WMI_CHANNEL_CHANGE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3426,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_GET_WOW_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
- ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
break;
case WMI_GET_PMKID_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3433,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_PSPOLL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
- ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
break;
case WMI_DTIMEXPIRY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
- ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
break;
case WMI_SET_PARAMS_REPLY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
break;
case WMI_ADDBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_ADDBA_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
break;
case WMI_DELBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3467,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
- ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
break;
case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
- len);
+ len, vif);
break;
case WMI_TX_STATUS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
- ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
break;
case WMI_RX_PROBE_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
- ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_CAPABILITIES_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3489,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
- ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_INFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3497,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -3078,11 +3506,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
-static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+void ath6kl_wmi_reset(struct wmi *wmi)
{
- if (!wmi)
- return;
-
spin_lock_bh(&wmi->lock);
wmi->fat_pipe_exist = 0;
@@ -3103,16 +3528,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev)
wmi->parent_dev = dev;
- ath6kl_wmi_qos_state_init(wmi);
-
wmi->pwr_mode = REC_POWER;
- wmi->phy_mode = WMI_11G_MODE;
-
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- wmi->ht_allowed[A_BAND_24GHZ] = 1;
- wmi->ht_allowed[A_BAND_5GHZ] = 1;
+ ath6kl_wmi_reset(wmi);
return wmi;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index f8e644d54aa..42ac311eda4 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -93,11 +93,6 @@ struct sq_threshold_params {
u8 last_rssi_poll_event;
};
-struct wmi_stats {
- u32 cmd_len_err;
- u32 cmd_id_err;
-};
-
struct wmi_data_sync_bufs {
u8 traffic_class;
struct sk_buff *skb;
@@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VO 3 /* voice */
struct wmi {
- bool ready;
u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist;
struct ath6kl *parent_dev;
- struct wmi_stats stat;
u8 pwr_mode;
- u8 phy_mode;
- u8 keep_alive_intvl;
spinlock_t lock;
enum htc_endpoint_id ep_id;
struct sq_threshold_params
sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
- enum crypto_type pair_crypto_type;
- enum crypto_type grp_crypto_type;
bool is_wmm_enabled;
- u8 ht_allowed[A_NUM_BANDS];
u8 traffic_class;
bool is_probe_ssid;
u8 *last_mgmt_tx_frame;
size_t last_mgmt_tx_frame_len;
+ u8 saved_pwr_mode;
};
struct host_app_area {
- u32 wmi_protocol_ver;
-};
+ __le32 wmi_protocol_ver;
+} __packed;
enum wmi_msg_type {
DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
#define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13
+#define WMI_DATA_HDR_IF_IDX_MASK 0xF
+
struct wmi_data_hdr {
s8 rssi;
@@ -208,6 +199,12 @@ struct wmi_data_hdr {
* b15:b13 - META_DATA_VERSION 0 - 7
*/
__le16 info2;
+
+ /*
+ * usage of info3, 16-bit:
+ * b3:b0 - Interface index
+ * b15:b4 - Reserved
+ */
__le16 info3;
} __packed;
@@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
WMI_DATA_HDR_META_MASK;
}
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+ return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
/* Tx meta version definitions */
#define WMI_MAX_TX_META_SZ 12
#define WMI_META_VERSION_1 0x01
@@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
u8 csum_flags;
} __packed;
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
/* Control Path */
struct wmi_cmd_hdr {
__le16 cmd_id;
@@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
__le16 reserved;
} __packed;
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+ return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
/* List of WMI commands */
enum wmi_cmd_id {
WMI_CONNECT_CMDID = 0x0001,
@@ -320,6 +329,10 @@ enum wmi_cmd_id {
WMI_SYNCHRONIZE_CMDID,
WMI_CREATE_PSTREAM_CMDID,
WMI_DELETE_PSTREAM_CMDID,
+ /* WMI_START_SCAN_CMDID is to be deprecated. Use
+ * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_START_SCAN_CMDID,
WMI_SET_SCAN_PARAMS_CMDID,
WMI_SET_BSS_FILTER_CMDID,
@@ -533,12 +546,61 @@ enum wmi_cmd_id {
WMI_GTK_OFFLOAD_OP_CMDID,
WMI_REMAIN_ON_CHNL_CMDID,
WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+ /* WMI_SEND_ACTION_CMDID is to be deprecated. Use
+ * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_SEND_ACTION_CMDID,
WMI_PROBE_REQ_REPORT_CMDID,
WMI_DISABLE_11B_RATES_CMDID,
WMI_SEND_PROBE_RESPONSE_CMDID,
WMI_GET_P2P_INFO_CMDID,
WMI_AP_JOIN_BSS_CMDID,
+
+ WMI_SMPS_ENABLE_CMDID,
+ WMI_SMPS_CONFIG_CMDID,
+ WMI_SET_RATECTRL_PARM_CMDID,
+ /* LPL specific commands*/
+ WMI_LPL_FORCE_ENABLE_CMDID,
+ WMI_LPL_SET_POLICY_CMDID,
+ WMI_LPL_GET_POLICY_CMDID,
+ WMI_LPL_GET_HWSTATE_CMDID,
+ WMI_LPL_SET_PARAMS_CMDID,
+ WMI_LPL_GET_PARAMS_CMDID,
+
+ WMI_SET_BUNDLE_PARAM_CMDID,
+
+ /*GreenTx specific commands*/
+
+ WMI_GREENTX_PARAMS_CMDID,
+
+ WMI_RTT_MEASREQ_CMDID,
+ WMI_RTT_CAPREQ_CMDID,
+ WMI_RTT_STATUSREQ_CMDID,
+
+ /* WPS Commands */
+ WMI_WPS_START_CMDID,
+ WMI_GET_WPS_STATUS_CMDID,
+
+ /* More P2P commands */
+ WMI_SET_NOA_CMDID,
+ WMI_GET_NOA_CMDID,
+ WMI_SET_OPPPS_CMDID,
+ WMI_GET_OPPPS_CMDID,
+ WMI_ADD_PORT_CMDID,
+ WMI_DEL_PORT_CMDID,
+
+ /* 802.11w cmd */
+ WMI_SET_RSN_CAP_CMDID,
+ WMI_GET_RSN_CAP_CMDID,
+ WMI_SET_IGTK_CMDID,
+
+ WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID,
+ WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID,
+
+ WMI_SEND_MGMT_CMDID,
+ WMI_BEGIN_SCAN_CMDID,
+
};
enum wmi_mgmt_frame_type {
@@ -558,6 +620,14 @@ enum network_type {
AP_NETWORK = 0x10,
};
+enum network_subtype {
+ SUBTYPE_NONE,
+ SUBTYPE_BT,
+ SUBTYPE_P2PDEV,
+ SUBTYPE_P2PCLIENT,
+ SUBTYPE_P2PGO,
+};
+
enum dot11_auth_mode {
OPEN_AUTH = 0x01,
SHARED_AUTH = 0x02,
@@ -576,9 +646,6 @@ enum auth_mode {
WPA2_AUTH_CCKM = 0x40,
};
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
#define WMI_MIN_KEY_INDEX 0
#define WMI_MAX_KEY_INDEX 3
@@ -617,6 +684,7 @@ enum wmi_connect_ctrl_flags_bits {
CONNECT_CSA_FOLLOW_BSS = 0x0020,
CONNECT_DO_WPA_OFFLOAD = 0x0040,
CONNECT_DO_NOT_DEAUTH = 0x0080,
+ CONNECT_WPS_FLAG = 0x0100,
};
struct wmi_connect_cmd {
@@ -632,6 +700,7 @@ struct wmi_connect_cmd {
__le16 ch;
u8 bssid[ETH_ALEN];
__le32 ctrl_flags;
+ u8 nw_subtype;
} __packed;
/* WMI_RECONNECT_CMDID */
@@ -719,7 +788,12 @@ enum wmi_scan_type {
WMI_SHORT_SCAN = 1,
};
-struct wmi_start_scan_cmd {
+struct wmi_supp_rates {
+ u8 nrates;
+ u8 rates[ATH6KL_RATE_MAXSIZE];
+};
+
+struct wmi_begin_scan_cmd {
__le32 force_fg_scan;
/* for legacy cisco AP compatibility */
@@ -731,9 +805,15 @@ struct wmi_start_scan_cmd {
/* time interval between scans (msec) */
__le32 force_scan_intvl;
+ /* no CCK rates */
+ __le32 no_cck;
+
/* enum wmi_scan_type */
u8 scan_type;
+ /* Supported rates to advertise in the probe request frames */
+ struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+
/* how many channels follow */
u8 num_ch;
@@ -741,8 +821,31 @@ struct wmi_start_scan_cmd {
__le16 ch_list[1];
} __packed;
-/* WMI_SET_SCAN_PARAMS_CMDID */
-#define WMI_SHORTSCANRATIO_DEFAULT 3
+/* wmi_start_scan_cmd is to be deprecated. Use
+ * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
/*
* Warning: scan control flag value of 0xFF is used to disable
@@ -776,13 +879,6 @@ enum wmi_scan_ctrl_flags_bits {
ENABLE_SCAN_ABORT_EVENT = 0x40
};
-#define DEFAULT_SCAN_CTRL_FLAGS \
- (CONNECT_SCAN_CTRL_FLAGS | \
- SCAN_CONNECTED_CTRL_FLAGS | \
- ACTIVE_SCAN_CTRL_FLAGS | \
- ROAM_SCAN_CTRL_FLAGS | \
- ENABLE_AUTO_CTRL_FLAGS)
-
struct wmi_scan_params_cmd {
/* sec */
__le16 fg_start_period;
@@ -1365,14 +1461,20 @@ enum wmi_roam_ctrl {
WMI_SET_LRSSI_SCAN_PARAMS,
};
+enum wmi_roam_mode {
+ WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+ WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+ WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
struct bss_bias {
u8 bssid[ETH_ALEN];
- u8 bias;
+ s8 bias;
} __packed;
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[1];
+ struct bss_bias bss_bias[0];
} __packed;
struct low_rssi_scan_params {
@@ -1385,10 +1487,11 @@ struct low_rssi_scan_params {
struct roam_ctrl_cmd {
union {
- u8 bssid[ETH_ALEN];
- u8 roam_mode;
- struct bss_bias_info bss;
- struct low_rssi_scan_params params;
+ u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+ u8 roam_mode; /* WMI_SET_ROAM_MODE */
+ struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+ struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+ */
} __packed info;
u8 roam_ctrl;
} __packed;
@@ -1455,6 +1558,10 @@ struct wmi_tkip_micerr_event {
u8 is_mcast;
} __packed;
+enum wmi_scan_status {
+ WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
/* WMI_SCAN_COMPLETE_EVENTID */
struct wmi_scan_complete_event {
a_sle32 status;
@@ -1635,6 +1742,12 @@ struct wmi_bss_roam_info {
u8 reserved;
} __packed;
+struct wmi_target_roam_tbl {
+ __le16 roam_mode;
+ __le16 num_entries;
+ struct wmi_bss_roam_info info[];
+} __packed;
+
/* WMI_CAC_EVENTID */
enum cac_indication {
CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1884,6 @@ struct wmi_set_appie_cmd {
#define WSC_REG_ACTIVE 1
#define WSC_REG_INACTIVE 0
-#define WOW_MAX_FILTER_LISTS 1
#define WOW_MAX_FILTERS_PER_LIST 4
#define WOW_PATTERN_SIZE 64
#define WOW_MASK_SIZE 64
@@ -1794,17 +1906,52 @@ struct wmi_set_ip_cmd {
__le32 ips[MAX_IP_ADDRS];
} __packed;
-/* WMI_GET_WOW_LIST_CMD reply */
-struct wmi_get_wow_list_reply {
- /* number of patterns in reply */
- u8 num_filters;
+enum ath6kl_wow_filters {
+ WOW_FILTER_SSID = BIT(1),
+ WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
+ WOW_FILTER_OPTION_EAP_REQ = BIT(3),
+ WOW_FILTER_OPTION_PATTERNS = BIT(4),
+ WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
+ WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
+ WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
+ WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
+ WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
+ WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
+ WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
+ WOW_FILTER_OPTION_TEST_MODE = BIT(15),
+};
+
+enum ath6kl_host_mode {
+ ATH6KL_HOST_MODE_AWAKE,
+ ATH6KL_HOST_MODE_ASLEEP,
+};
+
+struct wmi_set_host_sleep_mode_cmd {
+ __le32 awake;
+ __le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+ ATH6KL_WOW_MODE_DISABLE,
+ ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+ __le32 enable_wow;
+ __le32 filter;
+ __le16 host_req_delay;
+} __packed;
- /* this is filter # x of total num_filters */
- u8 this_filter_num;
+struct wmi_add_wow_pattern_cmd {
+ u8 filter_list_id;
+ u8 filter_size;
+ u8 filter_offset;
+ u8 filter[0];
+} __packed;
- u8 wow_mode;
- u8 host_mode;
- struct wow_filter wow_filters[1];
+struct wmi_del_wow_pattern_cmd {
+ __le16 filter_list_id;
+ __le16 filter_id;
} __packed;
/* WMI_SET_AKMP_PARAMS_CMD */
@@ -1905,7 +2052,7 @@ struct wmi_tx_complete_event {
* !!! Warning !!!
* -Changing the following values needs compilation of both driver and firmware
*/
-#define AP_MAX_NUM_STA 8
+#define AP_MAX_NUM_STA 10
/* Spl. AID used to set DTIM flag in the beacons */
#define MCAST_AID 0xFF
@@ -1988,6 +2135,10 @@ struct wmi_remain_on_chnl_cmd {
__le32 duration;
} __packed;
+/* wmi_send_action_cmd is to be deprecated. Use
+ * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
struct wmi_send_action_cmd {
__le32 id;
__le32 freq;
@@ -1996,6 +2147,15 @@ struct wmi_send_action_cmd {
u8 data[0];
} __packed;
+struct wmi_send_mgmt_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le32 no_cck;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
struct wmi_tx_status_event {
__le32 id;
u8 ack_status;
@@ -2163,120 +2323,162 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info);
+ u8 meta_ver, void *tx_meta_info, u8 if_idx);
int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
- u32 layer2_priority, bool wmm_enabled,
- u8 *ac);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb, u32 layer2_priority,
+ bool wmm_enabled, u8 *ac);
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags);
-
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list);
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck,
+ u32 *rates);
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
u8 scan_ctrl_flag, u32 max_dfsch_act_time,
u16 maxact_scan_per_ssid);
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+ u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid);
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons);
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy);
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *pstream);
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
u8 preamble_policy);
int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set);
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index);
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
/* AP mode */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p);
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+ const u8 *mac, u16 reason);
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_version,
bool rx_dot11_hdr, bool defrag_on_host);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
/* P2P */
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ u32 dur);
+
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len);
+
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck);
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len);
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+void ath6kl_wmi_sscan_timer(unsigned long ptr);
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
void *ath6kl_wmi_init(struct ath6kl *devt);
void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9c08c619a3..dc6be4afe8e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+config ATH9K_DFS_DEBUGFS
+ def_bool y
+ depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
config ATH9K
tristate "Atheros 802.11n wireless cards support"
@@ -25,6 +28,7 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
+ default y
depends on ATH9K && PCI
---help---
This option enables the PCI bus support in ath9k.
@@ -50,6 +54,25 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH9K && EXPERT
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath9k. There is no way to dynamically detect if a card was DFS
+ certified and as such this is left as a build time option. This
+ option should only be enabled by system integrators that can
+ guarantee that all the platforms that their kernel will run on
+ have obtained appropriate regulatory body certification for a
+ respective Atheros card by using ath9k on the target shipping
+ platforms.
+
+ This is currently only a placeholder for future DFS support,
+ as DFS support requires more components that still need to be
+ developed. At this point enabling this option won't do anything
+ except increase code size.
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
@@ -58,6 +81,14 @@ config ATH9K_RATE_CONTROL
Say Y, if you want to use the ath9k specific rate control
module instead of minstrel_ht.
+config ATH9K_BTCOEX_SUPPORT
+ bool "Atheros ath9k bluetooth coexistence support"
+ depends on ATH9K
+ default y
+ ---help---
+ Say Y, if you want to use the ath9k radios together with
+ Bluetooth modules in the same system.
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 36ed3c46fec..da02242499a 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,11 +4,14 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
+ mci.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -33,7 +36,8 @@ ath9k_hw-y:= \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
- ar9003_paprd.o
+ ar9003_paprd.o \
+ ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index a639b94f764..bc56f57b393 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -136,8 +136,8 @@ static void ath9k_ani_restart(struct ath_hw *ah)
cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
}
- ath_dbg(common, ATH_DBG_ANI,
- "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
+ ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
+ ofdm_base, cck_base);
ENABLE_REGWRITE_BUFFER(ah);
@@ -268,8 +268,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
immunityLevel, aniState->noiseFloor,
aniState->rssiThrLow, aniState->rssiThrHigh);
@@ -336,8 +335,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
const struct ani_cck_level_entry *entry_cck;
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
aniState->noiseFloor, aniState->rssiThrLow,
aniState->rssiThrHigh);
@@ -481,8 +479,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
if (ah->opmode != NL80211_IFTYPE_STATION
&& ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_dbg(common, ATH_DBG_ANI,
- "Reset ANI state opmode %u\n", ah->opmode);
+ ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
ah->stats.ast_ani_reset++;
if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -582,7 +579,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ATH9K_ANI_OFDM_DEF_LEVEL ||
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -599,7 +596,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/*
* restore historical levels for this channel
*/
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -662,7 +659,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
if (phyCnt1 < ofdm_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt1 0x%x, resetting counter value to 0x%x\n",
phyCnt1, ofdm_base);
REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
@@ -670,7 +667,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
AR_PHY_ERR_OFDM_TIMING);
}
if (phyCnt2 < cck_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt2 0x%x, resetting counter value to 0x%x\n",
phyCnt2, cck_base);
REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
@@ -713,7 +710,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
aniState->listenTime;
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
aniState->listenTime,
aniState->ofdmNoiseImmunityLevel,
@@ -748,7 +745,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
+ ath_dbg(common, ANI, "Enable MIB counters\n");
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -770,7 +767,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
+ ath_dbg(common, ANI, "Disable MIB counters\n");
REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -845,7 +842,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int i;
- ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
+ ath_dbg(common, ANI, "Initialize ANI\n");
if (use_new_ani(ah)) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f199e9e2514..f901a17f76b 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -158,7 +158,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
- ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
@@ -1053,8 +1053,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
@@ -1157,8 +1156,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep));
return false;
}
@@ -1177,8 +1175,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1));
return false;
}
@@ -1195,23 +1192,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI, "ANI parameters:\n");
+ ath_dbg(common, ANI,
"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
aniState->noiseImmunityLevel,
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
aniState->cckWeakSigThreshold,
aniState->firstepLevel,
aniState->listenTime);
- ath_dbg(common, ATH_DBG_ANI,
- "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1295,7 +1291,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -1313,7 +1309,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -1350,7 +1346,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1358,7 +1354,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1378,7 +1374,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -1414,7 +1410,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1422,7 +1418,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1448,11 +1444,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1506,7 +1502,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e325dc..c55e5bbafc4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -61,18 +61,16 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
switch (currCal->calData->calType) {
case IQ_MISMATCH_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
break;
case ADC_GAIN_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n");
break;
case ADC_DC_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n");
break;
}
@@ -129,7 +127,7 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -151,7 +149,7 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
ah->totalAdcQEvenPhase[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcIOddPhase[i],
@@ -175,7 +173,7 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
ah->totalAdcDcOffsetQEvenPhase[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcDcOffsetIOddPhase[i],
@@ -198,12 +196,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting IQ Cal and Correction for Chain %d\n",
i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -213,12 +211,11 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
qCoffDenom = powerMeasQ / 64;
@@ -227,13 +224,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
(qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
iCoff = iCoff & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"New: Chn %d iCoff = 0x%08x\n", i, iCoff);
if (iqCorrNeg == 0x0)
iCoff = 0x40 - iCoff;
@@ -243,7 +240,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
else if (qCoff <= -16)
qCoff = -16;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
@@ -253,7 +250,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n",
i);
}
@@ -275,21 +272,17 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcQOddPhase[i];
qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC Gain Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n",
+ i, qEvenMeasOffset);
if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
iGainMismatch =
@@ -299,19 +292,19 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
((qOddMeasOffset * 32) /
qEvenMeasOffset) & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n",
+ i, iGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n",
+ i, qGainMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xfffff000;
val |= (qGainMismatch) | (iGainMismatch << 6);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC Gain Cal done for Chain %d\n", i);
}
}
@@ -337,40 +330,36 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC DC Offset Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n",
+ i, qEvenMeasOffset);
iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
numSamples) & 0x1ff;
qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
numSamples) & 0x1ff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n",
+ i, iDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n",
+ i, qDcMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xc0000fff;
val |= (qDcMismatch << 12) | (iDcMismatch << 21);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC DC Offset Cal done for Chain %d\n", i);
}
@@ -560,7 +549,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
{ 0x7838, 0 },
};
- ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+ ath_dbg(common, CALIBRATE, "Running PA Calibration\n");
/* PA CAL is not needed for high power solution */
if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -741,7 +730,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -755,7 +744,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -851,7 +840,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -886,22 +875,21 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
INIT_CAL(&ah->adcgain_caldata);
INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC Gain Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
INIT_CAL(&ah->adcdc_caldata);
INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC DC Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index b5920168606..7b6417b5212 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -107,7 +107,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (isr & AR_ISR_RXORN) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"receive FIFO overrun interrupt\n");
}
@@ -143,24 +143,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (fatal_int) {
if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI FATAL interrupt\n");
}
if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI PERR interrupt\n");
}
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
REG_WRITE(ah, AR_RC, 0);
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730dcb50..8e70f0bc073 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
+#include "ar9003_mci.h"
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
@@ -51,7 +52,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
/* Kick-off cal */
@@ -63,7 +64,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
AR_PHY_65NM_CH0_THERM_START, 1);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting Temperature Compensation Calibration\n");
break;
}
@@ -193,7 +194,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -220,12 +221,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
+ ath_dbg(common, CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -235,12 +235,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
qCoffDenom = powerMeasQ / 64;
@@ -248,10 +247,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
/* Force bounds on iCoff */
if (iCoff >= 63)
@@ -272,10 +271,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iCoff = iCoff & 0x7f;
qCoff = qCoff & 0x7f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) before update = 0x%x\n",
offset_array[i],
REG_READ(ah, offset_array[i]));
@@ -286,25 +285,25 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n", i);
}
}
REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
(unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
@@ -348,7 +347,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
f2 = (f1 * f1 + f3 * f3) / result_shift;
if (!f2) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ ath_dbg(common, CALIBRATE, "Divide by 0\n");
return false;
}
@@ -469,7 +468,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
(i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0:\n"
"a0_d0=%d\n"
"a0_d1=%d\n"
@@ -509,8 +508,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
if ((mag1 == 0) || (mag2 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag1=%d, mag2=%d\n",
+ ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n",
mag1, mag2);
return false;
}
@@ -528,8 +526,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_a0_d0, phs_a0_d0,
mag_a1_d0,
phs_a1_d0, solved_eq)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ ath_dbg(common, CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed\n");
return false;
}
@@ -538,12 +536,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_rx = solved_eq[2];
phs_rx = solved_eq[3];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"chain %d: mag mismatch=%d phase mismatch=%d\n",
chain_idx, mag_tx/res_scale, phs_tx/res_scale);
if (res_scale == mag_tx) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_tx=%d, res_scale=%d\n",
mag_tx, res_scale);
return false;
@@ -556,8 +554,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_tx * 128 / res_scale);
q_i_coff = (phs_corr_tx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -571,12 +568,11 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
if (-mag_rx == res_scale) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_rx=%d, res_scale=%d\n",
mag_rx, res_scale);
return false;
@@ -589,8 +585,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_rx * 128 / res_scale);
q_i_coff = (phs_corr_rx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -604,8 +599,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
return true;
@@ -752,8 +746,7 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
AR_PHY_TX_IQCAL_START_DO_CAL, 0,
AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal is not completed.\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
return false;
}
return true;
@@ -791,13 +784,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
nmeasurement = MAX_MEASUREMENT;
for (im = 0; im < nmeasurement; im++) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Doing Tx IQ Cal for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Doing Tx IQ Cal for chain %d\n", i);
if (REG_READ(ah, txiqcal_status[i]) &
AR_PHY_TX_IQCAL_STATUS_FAILED) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal failed for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Tx IQ Cal failed for chain %d\n", i);
goto tx_iqcal_fail;
}
@@ -823,18 +816,16 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
iq_res[idx + 1] = 0xffff & REG_READ(ah,
chan_info_tab[i] + offset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "IQ RES[%d]=0x%x"
- "IQ_RES[%d]=0x%x\n",
+ ath_dbg(common, CALIBRATE,
+ "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
idx, iq_res[idx], idx + 1,
iq_res[idx + 1]);
}
if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
coeff.iqc_coeff)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Failed in calculation of \
- IQ correction.\n");
+ ath_dbg(common, CALIBRATE,
+ "Failed in calculation of IQ correction\n");
goto tx_iqcal_fail;
}
@@ -854,7 +845,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
return;
tx_iqcal_fail:
- ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n");
return;
}
@@ -934,10 +925,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -950,7 +943,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
- ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n",
+ ath_dbg(common, CALIBRATE, "RTT restore %s\n",
run_rtt_cal ? "failed" : "succeed");
}
run_agc_cal = run_rtt_cal;
@@ -1005,6 +998,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ /* send CAL_REQ only when BT is AWAKE. */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+ mci_hw->wlan_cal_seq);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ /* Wait BT_CAL_GRANT for 50ms */
+ ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n");
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+ ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n");
+ else {
+ is_reusable = false;
+ ath_dbg(common, MCI, "\nMCI BT is not responding\n");
+ }
+ }
+
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
@@ -1022,6 +1040,21 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
}
+
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+ mci_hw->wlan_cal_done);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+ }
+
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
@@ -1031,9 +1064,8 @@ skip_tx_iqcal:
if (run_rtt_cal)
ar9003_hw_rtt_disable(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to complete in 1ms;"
- "noisy environment?\n");
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -1092,15 +1124,14 @@ skip_tx_iqcal:
if (ah->supp_cals & IQ_MISMATCH_CAL) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
if (ah->supp_cals & TEMP_COMP_CAL) {
INIT_CAL(&ah->tempCompCalData);
INSERT_CAL(ah, &ah->tempCompCalData);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling Temperature Compensation Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling Temperature Compensation Calibration\n");
}
/* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 3b262ba6b17..9fbcbddea16 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
.spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0xf,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .papdRateMaskHt20 = LE32(0x80c080),
- .papdRateMaskHt40 = LE32(0x80c080),
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.calTarget_freqbin_Cck = {
FREQ2FBIN(2412, 1),
- FREQ2FBIN(2484, 1),
+ FREQ2FBIN(2472, 1),
},
.calTarget_freqbin_2G = {
FREQ2FBIN(2412, 1),
@@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
FREQ2FBIN(5500, 0),
FREQ2FBIN(5600, 0),
FREQ2FBIN(5700, 0),
- FREQ2FBIN(5825, 0)
+ FREQ2FBIN(5785, 0)
},
.calPierData5G = {
{
@@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshch check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.xatten1MarginHigh = {0, 0, 0}
},
.calFreqPier5G = {
- FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5160, 0),
FREQ2FBIN(5220, 0),
FREQ2FBIN(5320, 0),
FREQ2FBIN(5400, 0),
@@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
+ case EEP_QUICK_DROP:
+ return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@@ -3061,8 +3043,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
int i;
if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "eeprom address not in range\n");
+ ath_dbg(common, EEPROM, "eeprom address not in range\n");
return false;
}
@@ -3093,8 +3074,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
return true;
error:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unable to read eeprom region at offset %d\n", address);
+ ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n",
+ address);
return false;
}
@@ -3178,13 +3159,13 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length &= 0xff;
if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
memcpy(&mptr[spot], &block[it+2], length);
spot += length;
} else if (length > 0) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Bad restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
return false;
@@ -3206,13 +3187,13 @@ static int ar9300_compress_decision(struct ath_hw *ah,
switch (code) {
case _CompressNone:
if (length != mdata_size) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM structure size mismatch memory=%d eeprom=%d\n",
mdata_size, length);
return -1;
}
memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
break;
@@ -3221,22 +3202,21 @@ static int ar9300_compress_decision(struct ath_hw *ah,
} else {
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"can't find reference eeprom struct %d\n",
reference);
return -1;
}
memcpy(mptr, eep, mdata_size);
}
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
(u8 *) (word + COMP_HDR_LEN), length);
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unknown compression code %d\n", code);
+ ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
return -1;
}
return 0;
@@ -3312,34 +3292,32 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
cptr = AR9300_BASE_ADDR_512;
else
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
read = ar9300_read_otp;
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
goto fail;
found:
- ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
+ ath_dbg(common, EEPROM, "Found valid EEPROM data\n");
for (it = 0; it < MSTATE; it++) {
if (!read(ah, cptr, word, COMP_HDR_LEN))
@@ -3350,13 +3328,12 @@ found:
ar9300_comp_hdr_unpack(word, &code, &reference,
&length, &major, &minor);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
cptr, code, reference, length, major, minor);
if ((!AR_SREV_9485(ah) && length >= 1024) ||
(AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Skipping bad header\n");
+ ath_dbg(common, EEPROM, "Skipping bad header\n");
cptr -= COMP_HDR_LEN;
continue;
}
@@ -3365,13 +3342,13 @@ found:
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
- ath_dbg(common, ATH_DBG_EEPROM,
- "checksum %x %x\n", checksum, mchecksum);
+ ath_dbg(common, EEPROM, "checksum %x %x\n",
+ checksum, mchecksum);
if (checksum == mchecksum) {
ar9300_compress_decision(ah, it, code, reference, mptr,
word, length, mdata_size);
} else {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"skipping block with bad checksum\n");
}
cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
@@ -3428,25 +3405,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("Quick Drop", modal_hdr->quick_drop);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
PR_EEP("txClip", modal_hdr->txClip);
PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
- PR_EEP("Chain0 ob", modal_hdr->ob[0]);
- PR_EEP("Chain1 ob", modal_hdr->ob[1]);
- PR_EEP("Chain2 ob", modal_hdr->ob[2]);
-
- PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
- PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
- PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
- PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
- PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
- PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
- PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
- PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
- PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
return len;
}
@@ -3503,6 +3469,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+ PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@@ -3571,13 +3538,13 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
+ __le16 val;
if (is_2ghz)
val = eep->modalHeader2G.switchcomspdt;
else
val = eep->modalHeader5G.switchcomspdt;
- return le32_to_cpu(val);
+ return le16_to_cpu(val);
}
@@ -3965,6 +3932,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
}
}
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+ s32 t[3], f[3] = {5180, 5500, 5785};
+
+ if (!quick_drop)
+ return;
+
+ if (freq < 4000)
+ quick_drop = eep->modalHeader2G.quick_drop;
+ else {
+ t[0] = eep->base_ext1.quick_drop_low;
+ t[1] = eep->modalHeader5G.quick_drop;
+ t[2] = eep->base_ext1.quick_drop_high;
+ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u32 value;
+
+ value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
+ eep->modalHeader5G.txEndToXpaOff;
+
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -3972,10 +3973,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
+ ar9003_hw_quick_drop_apply(ah, chan->channel);
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4416,8 +4419,8 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
is2GHz) + ht40PowerIncForPdadc;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
}
@@ -4436,7 +4439,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
if (ichain >= AR9300_MAX_CHAINS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid chain index, must be less than %d\n",
AR9300_MAX_CHAINS);
return -1;
@@ -4444,7 +4447,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
if (mode) { /* 5GHz */
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 5GHz cal pier index, must be less than %d\n",
AR9300_NUM_5G_CAL_PIERS);
return -1;
@@ -4454,7 +4457,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
is2GHz = 0;
} else {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n",
AR9300_NUM_2G_CAL_PIERS);
return -1;
@@ -4616,8 +4619,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "ch=%d f=%d low=%d %d h=%d %d\n",
+ ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
ichain, frequency, lfrequency[ichain],
lcorrection[ichain], hfrequency[ichain],
hcorrection[ichain]);
@@ -4672,7 +4674,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ar9003_hw_power_control_override(ah, frequency, correction, voltage,
temperature);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"for frequency=%d, calibration correction = %d %d %d\n",
frequency, correction[0], correction[1], correction[2]);
@@ -4771,7 +4773,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
@@ -4858,7 +4860,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
ctlMode, numCtlModes, isHt40CtlMode,
(pCtlMode[ctlMode] & EXT_ADDITIVE));
@@ -4872,8 +4874,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
ctlNum = AR9300_NUM_CTLS_5G;
}
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
chan->channel);
@@ -4915,7 +4918,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
scaledPower, minCtlPower);
@@ -5039,7 +5042,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
target_power_val_t2_eep[i]) >
paprd_scale_factor)) {
ah->paprd_ratemask &= ~(1 << i);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"paprd disabled for mcs %d\n", i);
}
}
@@ -5051,12 +5054,14 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
regulatory->max_power_level = targetPowerValT2[i];
}
+ ath9k_hw_update_regulatory_maxpower(ah);
+
if (test)
return;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
ah->txpower_limit = regulatory->max_power_level;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 6335a867527..bb223fe8281 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -216,10 +216,8 @@ struct ar9300_modal_eep_header {
u8 spurChans[AR_EEPROM_MODAL_SPURS];
/* 3 Check if the register is per chain */
int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
- u8 ob[AR9300_MAX_CHAINS];
- u8 db_stage2[AR9300_MAX_CHAINS];
- u8 db_stage3[AR9300_MAX_CHAINS];
- u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 reserved[11];
+ int8_t quick_drop;
u8 xpaBiasLvl;
u8 txFrameToDataStart;
u8 txFrameToPaOn;
@@ -269,7 +267,9 @@ struct cal_ctl_data_5g {
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[13];
+ u8 future[11];
+ int8_t quick_drop_low;
+ int8_t quick_drop_high;
} __packed;
struct ar9300_BaseExtension_2 {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ccde784a842..88c81c5706b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 sync_cause = 0, async_cause;
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
+
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
- if (!isr && !sync_cause)
+ if (!isr && !sync_cause && !async_cause)
return false;
if (isr) {
@@ -294,6 +298,33 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah);
}
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+ u32 raw_intr, rx_msg_intr;
+
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+ ath_dbg(common, MCI,
+ "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+ raw_intr, rx_msg_intr, mci->raw_intr,
+ mci->rx_msg_intr);
+ else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status =
+ REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n");
+
+ }
+ }
+
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -302,7 +333,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -333,7 +364,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"Tx Descriptor error %x\n", ads->ds_info);
memset(ads, 0, sizeof(*ads));
return -EIO;
@@ -541,7 +572,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
memset((void *) ah->ts_ring, 0,
ah->ts_size * sizeof(struct ar9003_txs));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
ah->ts_paddr_start, ah->ts_paddr_end,
ah->ts_ring, ah->ts_size);
@@ -552,7 +583,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size)
+ u16 size)
{
ah->ts_paddr_start = ts_paddr_start;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index c50449387bf..e203b51e968 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -118,5 +118,5 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size);
+ u16 size);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 00000000000..709520c6835
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+ if (!AR_SREV_9462_20(ah))
+ return;
+
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+ udelay(1);
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+ u32 bit_position, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ while (time_out) {
+
+ if (REG_READ(ah, address) & bit_position) {
+
+ REG_WRITE(ah, address, bit_position);
+
+ if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+ if (bit_position &
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position &
+ (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_RX_MSG);
+ }
+ break;
+ }
+
+ udelay(10);
+ time_out -= 10;
+
+ if (time_out < 0)
+ break;
+ }
+
+ if (time_out <= 0) {
+ ath_dbg(common, MCI,
+ "MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
+ address, bit_position);
+ ath_dbg(common, MCI,
+ "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ time_out = 0;
+ }
+
+ return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+ wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x00000000;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x70000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+ MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!mci->bt_version_known &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ ath_dbg(common, MCI, "MCI Send Coex version query\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send Coex version response\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_RESPONSE);
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+ mci->wlan_ver_major;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+ mci->wlan_ver_minor;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload = &mci->wlan_channels[0];
+
+ if ((mci->wlan_channels_update == true) &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+ }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+ bool wait_done, u8 query_type)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+ bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n",
+ query_type);
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
+ if (query_btinfo) {
+ mci->need_flush_btinfo = true;
+
+ ath_dbg(common, MCI,
+ "MCI send bt_status_query fail, set flush flag again\n");
+ }
+ }
+
+ if (query_btinfo)
+ mci->query_bt = false;
+ }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n",
+ (halt) ? "halt" : "unhalt");
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+ if (halt) {
+ mci->query_bt = true;
+ /* Send next unhalt no matter halt sent or not */
+ mci->unhalt_bt_gpm = true;
+ mci->need_flush_btinfo = true;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_HALT;
+ } else
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_UNHALT;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 saved_mci_int_en;
+ u32 mci_timeout = 150;
+
+ mci->bt_state = MCI_BT_SLEEP;
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+ /* Remote Reset */
+ ath_dbg(common, MCI, "MCI Reset sequence start\n");
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+
+ /*
+ * This delay is required for the reset delay worst case value 255 in
+ * MCI_COMMAND2 register
+ */
+
+ if (AR_SREV_9462_10(ah))
+ udelay(252);
+
+ ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+ ar9003_mci_send_req_wake(ah, true);
+
+ if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+ ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n");
+ mci->bt_state = MCI_BT_AWAKE;
+
+ if (AR_SREV_9462_10(ah))
+ udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+
+ /* Send SYS_WAKING to BT */
+
+ ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n");
+
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
+
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_BT_PRI);
+
+ if (AR_SREV_9462_10(ah) || mci->is_2g) {
+ /* Send LNA_TRANS */
+ ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
+
+ if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+ !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
+ }
+
+ if (AR_SREV_9462_10(ah)) {
+ /* Send another remote_reset to deassert BT clk_req. */
+ ath_dbg(common, MCI,
+ "MCI another remote_reset to deassert clk_req\n");
+ ar9003_mci_remote_reset(ah, true);
+ udelay(252);
+ }
+ }
+
+ /* Clear the extra redundant SYS_WAKING from BT */
+ if ((mci->bt_state == MCI_BT_AWAKE) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ }
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+ u32 intr;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ *raw_intr = mci->raw_intr;
+ *rx_msg_intr = mci->rx_msg_intr;
+
+ /* Clean int bits after the values are read. */
+ mci->raw_intr = 0;
+ mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (!mci->update_2g5g &&
+ (mci->is_2g != is_2g))
+ mci->update_2g5g = true;
+
+ mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload;
+ u32 recv_type, offset;
+
+ if (msg_index == MCI_GPM_INVALID)
+ return false;
+
+ offset = msg_index << 4;
+
+ payload = (u32 *)(mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(payload);
+
+ if (recv_type == MCI_GPM_RSVD_PATTERN) {
+ ath_dbg(common, MCI, "MCI Skip RSVD GPM\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+ ath9k_hw_cfg_output(ah, 3,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else
+ return;
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+ ATH_MCI_CONFIG_MCI_OBS_GPIO);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+ REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+ u8 opcode, u32 bt_flags)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 pld[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(pld,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+ *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+ opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" :
+ opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR",
+ bt_flags);
+
+ return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+ wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval, thresh;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n",
+ is_full_sleep, is_2g);
+
+ /*
+ * GPM buffer and scheduling message buffer are not allocated
+ */
+
+ if (!mci->gpm_addr && !mci->sched_addr) {
+ ath_dbg(common, MCI,
+ "MCI GPM and schedule buffers are not allocated\n");
+ return;
+ }
+
+ if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+ ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n");
+ return;
+ }
+
+ /* Program MCI DMA related registers */
+ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+ REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+ REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+ /*
+ * To avoid MCI state machine be affected by incoming remote MCI msgs,
+ * MCI mode will be enabled later, right before reset the MCI TX and RX.
+ */
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ if (is_2g && (AR_SREV_9462_20(ah)) &&
+ !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+ regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ ath_dbg(common, MCI, "MCI sched one step look ahead\n");
+
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+ thresh = MS(mci->config,
+ ATH_MCI_CONFIG_AGGR_THRESH);
+ thresh &= 7;
+ regval |= SM(1,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+ regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ } else
+ ath_dbg(common, MCI, "MCI sched aggr thresh: off\n");
+ } else
+ ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n");
+
+ if (AR_SREV_9462_10(ah))
+ regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+ /* Resetting the Rx and Tx paths of MCI */
+ regval = REG_READ(ah, AR_MCI_COMMAND2);
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ udelay(1);
+
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ if (is_full_sleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(100);
+ }
+
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+ udelay(1);
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+ (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+ SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_mci_observation_set_up(ah);
+
+ mci->ready = true;
+ ar9003_mci_prep_interface(ah);
+
+ if (en_int)
+ ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
+
+ /*
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+ */
+
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+
+ udelay(5);
+
+ ath_dbg(common, MCI, "MCI Send sys sleeping\n");
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+ if (mci->bt_state != cur_bt_state) {
+ ath_dbg(common, MCI,
+ "MCI BT state mismatches. old: %d, new: %d\n",
+ mci->bt_state, cur_bt_state);
+ mci->bt_state = cur_bt_state;
+ }
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true) {
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+ }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 new_flags, to_set, to_clear;
+
+ if (AR_SREV_9462_20(ah) &&
+ mci->update_2g5g &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+ mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+ }
+
+ if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+ mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+ u32 *payload, bool queue)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 type, opcode;
+
+ if (queue) {
+
+ if (payload)
+ ath_dbg(common, MCI,
+ "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+ header,
+ *(((u8 *)payload) + 4),
+ *(((u8 *)payload) + 5),
+ *(((u8 *)payload) + 6));
+ else
+ ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n",
+ header);
+ }
+
+ /* check if the message is to be queued */
+ if (header != MCI_GPM)
+ return;
+
+ type = MCI_GPM_TYPE(payload);
+ opcode = MCI_GPM_OPCODE(payload);
+
+ if (type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (opcode) {
+ case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+ if (AR_SREV_9462_10(ah))
+ break;
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+ MCI_GPM_COEX_BT_FLAGS_READ)
+ break;
+
+ mci->update_2g5g = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n",
+ mci->is_2g ? "2G" : "5G");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n",
+ mci->is_2g ? "2G" : "5G");
+
+ break;
+
+ case MCI_GPM_COEX_WLAN_CHANNELS:
+
+ mci->wlan_channels_update = queue;
+ if (queue)
+ ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n");
+ else
+ ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n");
+ break;
+
+ case MCI_GPM_COEX_HALT_BT_GPM:
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+ mci->unhalt_bt_gpm = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <queued>\n");
+ else {
+ mci->halted_bt_gpm = false;
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+ }
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_HALT) {
+
+ mci->halted_bt_gpm = !queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI HALT BT GPM <not sent>\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (mci->update_2g5g) {
+ if (mci->is_2g) {
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ ath_dbg(common, MCI, "MCI Send LNA trans\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_OSLA)) {
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+ }
+ } else {
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ }
+ }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ bool msg_sent = false;
+ u32 regval;
+ u32 saved_mci_int_en;
+ int i;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+ regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+ ath_dbg(common, MCI,
+ "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
+ header,
+ (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+
+ } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+ ath_dbg(common, MCI,
+ "MCI Don't send message 0x%x. BT is in sleep state\n",
+ header);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+ /* Need to clear SW_MSG_DONE raw bit before wait */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ (AR_MCI_INTERRUPT_SW_MSG_DONE |
+ AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+ if (payload) {
+ for (i = 0; (i * 4) < len; i++)
+ REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+ *(payload + i));
+ }
+
+ REG_WRITE(ah, AR_MCI_COMMAND0,
+ (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+ AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+ SM(len, AR_MCI_COMMAND0_LEN) |
+ SM(header, AR_MCI_COMMAND0_HEADER)));
+
+ if (wait_done &&
+ !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ else {
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+ msg_sent = true;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+ return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
+ mci->sched_buf = sched_buf;
+
+ ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n");
+ ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 *p_data = (u8 *) p_gpm;
+
+ if (gpm_type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+ *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+ if (recv_type == gpm_type) {
+
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ ath_dbg(common, MCI,
+ "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n");
+ continue;
+ }
+
+ break;
+ }
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
+ break;
+
+ /* not expected message */
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ ath_dbg(common, MCI,
+ "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+
+ ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n");
+
+ continue;
+ } else {
+ ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+ *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0) {
+ time_out = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM received timeout, mismatch = %d\n", mismatch);
+ } else
+ ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n",
+ gpm_type, gpm_opcode);
+
+ while (more_data == MCI_GPM_MORE) {
+
+ ath_dbg(common, MCI, "MCI discard remaining GPM\n");
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 value = 0, more_gpm = 0, gpm_ptr;
+ u8 query_type;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ switch (state_type) {
+ case MCI_STATE_ENABLE:
+ if (mci->ready) {
+
+ value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((value == 0xdeadbeef) || (value == 0xffffffff))
+ value = 0;
+ }
+ value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+ break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value);
+ mci->gpm_idx = value;
+ break;
+ case MCI_STATE_NEXT_GPM_OFFSET:
+ case MCI_STATE_LAST_GPM_OFFSET:
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ value = gpm_ptr;
+
+ if (value == 0)
+ value = mci->gpm_len - 1;
+ else if (value >= mci->gpm_len) {
+ if (value != 0xFFFF) {
+ value = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM offset out of range\n");
+ }
+ } else
+ value--;
+
+ if (value == 0xFFFF) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ ath_dbg(common, MCI,
+ "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+ if (gpm_ptr == mci->gpm_idx) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else {
+ for (;;) {
+
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (value != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >=
+ mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message got ptr=%d, @offset=%d, more=%d\n",
+ gpm_ptr, temp_index,
+ (more_gpm == MCI_GPM_MORE));
+
+ if (ar9003_mci_is_gpm_valid(ah,
+ temp_index)) {
+ value = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ value = MCI_GPM_INVALID;
+ break;
+ }
+ }
+ }
+ if (p_data)
+ *p_data = more_gpm;
+ }
+
+ if (value != MCI_GPM_INVALID)
+ value <<= 4;
+
+ break;
+ case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+ /* Make it in bytes */
+ value <<= 4;
+ break;
+
+ case MCI_STATE_REMOTE_SLEEP:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_REMOTE_SLEEP) ?
+ MCI_BT_SLEEP : MCI_BT_AWAKE;
+ break;
+
+ case MCI_STATE_CONT_RSSI_POWER:
+ value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+ break;
+
+ case MCI_STATE_CONT_PRIORITY:
+ value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+ break;
+
+ case MCI_STATE_CONT_TXRX:
+ value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+ break;
+
+ case MCI_STATE_BT:
+ value = mci->bt_state;
+ break;
+
+ case MCI_STATE_SET_BT_SLEEP:
+ mci->bt_state = MCI_BT_SLEEP;
+ break;
+
+ case MCI_STATE_SET_BT_AWAKE:
+ mci->bt_state = MCI_BT_AWAKE;
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm) {
+
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_CAL_START:
+ mci->bt_state = MCI_BT_CAL_START;
+ break;
+
+ case MCI_STATE_SET_BT_CAL:
+ mci->bt_state = MCI_BT_CAL;
+ break;
+
+ case MCI_STATE_RESET_REQ_WAKE:
+ ar9003_mci_reset_req_wakeup(ah);
+ mci->update_2g5g = true;
+
+ if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+ (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+ /* Check if we still have control of the GPIOs */
+ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+ ath_dbg(common, MCI,
+ "MCI reconfigure observation\n");
+ ar9003_mci_observation_set_up(ah);
+ }
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_COEX_VERSION:
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_COEX_VERSION:
+
+ if (!p_data)
+ ath_dbg(common, MCI,
+ "MCI Set BT Coex version with NULL data!!\n");
+ else {
+ mci->bt_ver_major = (*p_data >> 8) & 0xff;
+ mci->bt_ver_minor = (*p_data) & 0xff;
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_CHANNELS:
+ if (p_data) {
+ if (((mci->wlan_channels[1] & 0xffff0000) ==
+ (*(p_data + 1) & 0xffff0000)) &&
+ (mci->wlan_channels[2] == *(p_data + 2)) &&
+ (mci->wlan_channels[3] == *(p_data + 3)))
+ break;
+
+ mci->wlan_channels[0] = *p_data++;
+ mci->wlan_channels[1] = *p_data++;
+ mci->wlan_channels[2] = *p_data++;
+ mci->wlan_channels[3] = *p_data++;
+ }
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+
+ case MCI_STATE_SEND_VERSION_QUERY:
+ ar9003_mci_send_coex_version_query(ah, true);
+ break;
+
+ case MCI_STATE_SEND_STATUS_QUERY:
+ query_type = (AR_SREV_9462_10(ah)) ?
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+ ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+ break;
+
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ /*
+ * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+ * needed to send UNHALT message. It's set whenever
+ * there's a request to send HALT message.
+ * mci_halted_bt_gpm means whether HALT message is sent
+ * out successfully.
+ *
+ * Checking (mci_unhalt_bt_gpm == false) instead of
+ * checking (ah->mci_halted_bt_gpm == false) will make
+ * sure currently is in UNHALT-ed mode and BT can
+ * respond to status query.
+ */
+ value = (!mci->unhalt_bt_gpm &&
+ mci->need_flush_btinfo) ? 1 : 0;
+ if (p_data)
+ mci->need_flush_btinfo =
+ (*p_data != 0) ? true : false;
+ break;
+
+ case MCI_STATE_RECOVER_RX:
+
+ ath_dbg(common, MCI, "MCI hw RECOVER_RX\n");
+ ar9003_mci_prep_interface(ah);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_NEED_FTP_STOMP:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+ break;
+
+ case MCI_STATE_NEED_TUNING:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+ break;
+
+ default:
+ break;
+
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 00000000000..798da116a44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
+
+enum mci_gpm_coex_query_type {
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
+ MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+ MCI_GPM_COEX_BT_GPM_UNHALT,
+ MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+ MCI_GPM_COEX_BT_FLAGS_READ,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS 79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+ MCI_BT_MCI_FLAGS_UPDATE_HDR | \
+ MCI_BT_MCI_FLAGS_UPDATE_PLD | \
+ MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
+#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK 0x00000000
+#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
+ ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S 12
+#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
+ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+ ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index a4450cba065..59647a3ceb7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -119,8 +119,8 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
break;
default:
delta = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Invalid tx-chainmask: %u\n", ah->txchainmask);
+ ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n",
+ ah->txchainmask);
}
power += delta;
@@ -148,13 +148,12 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
else
training_power = ar9003_get_training_power_5g(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Training power: %d, Target power: %d\n",
+ ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n",
training_power, ah->paprd_target_power);
if (training_power < 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "PAPRD target power delta out of range");
+ ath_dbg(common, CALIBRATE,
+ "PAPRD target power delta out of range\n");
return -ERANGE;
}
ah->paprd_training_power = training_power;
@@ -311,8 +310,8 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
reg_cl_gain = AR_PHY_CL_TAB_2;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "Invalid chainmask: %d\n", chain);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Invalid chainmask: %d\n", chain);
break;
}
@@ -850,7 +849,7 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"AGC2_PWR = 0x%x training done = 0x%x\n",
agc2_pwr, paprd_done);
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2330e7ede19..2589b38b689 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -199,12 +199,14 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
synth_freq = chan->channel;
}
} else {
- range = 10;
+ range = AR_SREV_9462(ah) ? 5 : 10;
max_spur_cnts = 4;
synth_freq = chan->channel;
}
for (i = 0; i < max_spur_cnts; i++) {
+ if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+ continue;
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
@@ -880,7 +882,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -898,7 +900,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -935,7 +937,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -943,7 +945,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -963,7 +965,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -999,7 +1001,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_EXT_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1007,7 +1009,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1034,8 +1036,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
if (!is_on != aniState->mrcCCKOff) {
- ath_dbg(common, ATH_DBG_ANI,
- "** ch %d: MRC CCK: %s=>%s\n",
+ ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
!aniState->mrcCCKOff ? "on" : "off",
is_on ? "on" : "off");
@@ -1050,11 +1051,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1123,8 +1124,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->curchan->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI,
- "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
@@ -1386,7 +1386,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_ENABLE));
- ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+ ath_dbg(common, RESET, "Disabled BB Watchdog\n");
return;
}
@@ -1422,8 +1422,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
AR_PHY_WATCHDOG_IDLE_MASK |
(AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
- ath_dbg(common, ATH_DBG_RESET,
- "Enabled BB Watchdog timeout (%u ms)\n",
+ ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
idle_tmo_ms);
}
@@ -1452,9 +1451,9 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
return;
status = ah->bb_watchdog_last_status;
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"\n==== BB update: BB status=0x%08x ====\n", status);
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
MS(status, AR_PHY_WATCHDOG_INFO),
MS(status, AR_PHY_WATCHDOG_DET_HANG),
@@ -1466,22 +1465,19 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
MS(status, AR_PHY_WATCHDOG_AGC_SM),
MS(status, AR_PHY_WATCHDOG_SRCH_SM));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB mode: BB_gen_controls=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
REG_READ(ah, AR_PHY_GEN_CTRL));
#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
if (common->cc_survey.cycles)
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
- ath_dbg(common, ATH_DBG_RESET,
- "==== BB update: done ====\n\n");
+ ath_dbg(common, RESET, "==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 4114fe752c6..ed64114571f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -389,6 +389,8 @@
#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP 0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S 22
#define AR_PHY_AGC_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_COARSE_LOW_S 7
#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
@@ -488,6 +490,8 @@
#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@@ -999,6 +1003,7 @@
/* GLB Registers */
#define AR_GLB_BASE 0x20000
+#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 48803ee9c0d..458bedf0b0a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -16,6 +16,7 @@
#include "hw.h"
#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
#define RTT_RESTORE_TIMEOUT 1000
#define RTT_ACCESS_TIMEOUT 100
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 9c51b395b4f..dc2054f0378 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -688,8 +697,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
@@ -717,8 +726,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
@@ -1059,7 +1068,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
static const u32 ar9462_2p0_soc_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+ {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
};
static const u32 ar9462_2p0_baseband_core[][2] = {
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x15262820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c355c7},
- {0x00009e58, 0xfd897735},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
{0x00009e5c, 0xe9198724},
{0x00009fc0, 0x803e4788},
{0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739ce},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
{0x0000a43c, 0x00100000},
@@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
@@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1c269f50822..b30e9fc6433 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -25,6 +25,7 @@
#include "debug.h"
#include "common.h"
+#include "mci.h"
/*
* Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -96,7 +97,7 @@ enum buffer_type {
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define ATH_TXSTATUS_RING_SIZE 64
+#define ATH_TXSTATUS_RING_SIZE 512
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
@@ -158,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* return block-ack bitmap index given sequence and starting sequence */
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
(((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
@@ -192,6 +196,7 @@ struct ath_txq {
u8 txq_headidx;
u8 txq_tailidx;
int pending_frames;
+ struct sk_buff_head complete_q;
};
struct ath_atx_ac {
@@ -237,6 +242,7 @@ struct ath_atx_tid {
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+ int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
@@ -251,8 +257,9 @@ struct ath_atx_tid {
struct ath_node {
#ifdef CONFIG_ATH9K_DEBUGFS
struct list_head list; /* for sc->nodes */
- struct ieee80211_sta *sta; /* station struct we're part of */
#endif
+ struct ieee80211_sta *sta; /* station struct we're part of */
+ struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
int ps_key;
@@ -274,7 +281,6 @@ struct ath_tx_control {
};
#define ATH_TX_ERROR 0x01
-#define ATH_TX_BAR 0x02
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -443,7 +449,9 @@ struct ath_btcoex {
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
u32 btscan_no_stomp; /* in usec */
+ u32 duty_cycle;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
+ struct ath_mci_profile mci;
};
int ath_init_btcoex_timer(struct ath_softc *sc);
@@ -458,7 +466,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
-#define ATH_LED_PIN_9462 0
+#define ATH_LED_PIN_9462 4
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@@ -538,7 +546,7 @@ struct ath_ant_comb {
#define DEFAULT_CACHELINE 32
#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 10
+#define ATH_MAX_SW_RETRIES 30
#define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
@@ -643,6 +651,7 @@ struct ath_softc {
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
+ struct ath_mci_coex mci_coex;
struct ath_descdma txsdma;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a13cabb9543..b8967e482e6 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -117,11 +117,10 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->beacon.cabq;
- ath_dbg(common, ATH_DBG_XMIT,
- "transmitting CABQ packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
+ ath_dbg(common, XMIT, "CABQ TX failed\n");
dev_kfree_skb_any(skb);
}
}
@@ -204,7 +203,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
if (skb && cabq_depth) {
if (sc->nvifs > 1) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Flushing previous cabq traffic\n");
ath_draintxq(sc, cabq, false);
}
@@ -297,7 +296,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
avp->tsf_adjust = cpu_to_le64(tsfadjust);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
avp->av_bslot, intval, (unsigned long long)tsfadjust);
@@ -357,6 +356,7 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
u32 bfaddr, bc = 0;
@@ -371,15 +371,14 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.bmisscnt++;
if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"missed %u consecutive beacons\n",
sc->beacon.bmisscnt);
ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
if (sc->beacon.bmisscnt > 3)
ath9k_hw_bstuck_nfcal(ah);
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "beacon is officially stuck\n");
+ ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -406,7 +405,7 @@ void ath_beacon_tasklet(unsigned long data)
slot = (tsftu % (intval * ATH_BCBUF)) / intval;
vif = sc->beacon.bslot[slot];
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
slot, tsf, tsftu / ATH_BCBUF, intval, vif);
} else {
@@ -424,7 +423,7 @@ void ath_beacon_tasklet(unsigned long data)
}
if (sc->beacon.bmisscnt != 0) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"resume beacon xmit after %u misses\n",
sc->beacon.bmisscnt);
sc->beacon.bmisscnt = 0;
@@ -458,10 +457,12 @@ void ath_beacon_tasklet(unsigned long data)
if (bfaddr != 0) {
/* NB: cabq traffic should already be queued and primed */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
- ath9k_hw_txstart(ah, sc->beacon.beaconq);
+
+ if (!edma)
+ ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (edma) {
spin_lock_bh(&sc->sc_pcu_lock);
ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -541,7 +542,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* No need to configure beacon if we are not associated */
if (!common->curaid) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return;
}
@@ -631,8 +632,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_dbg(common, BEACON,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -660,8 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
nexttbtt = tsf + intval;
- ath_dbg(common, ATH_DBG_BEACON,
- "IBSS nexttbtt %u intval %u (%u)\n",
+ ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
nexttbtt, intval, conf->beacon_interval);
/*
@@ -699,9 +699,8 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(sc->nbcnvifs > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Changing beacon interval of multiple \
- AP interfaces !\n");
+ ath_dbg(common, CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
return false;
}
/*
@@ -710,7 +709,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
*/
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"STA vif's beacon not allowed on AP mode\n");
return false;
}
@@ -722,7 +721,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(vif->type == NL80211_IFTYPE_STATION) &&
(sc->sc_flags & SC_OP_BEACONS) &&
!avp->primary_sta_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -802,8 +801,7 @@ void ath_set_beacon(struct ath_softc *sc)
ath_beacon_config_sta(sc, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 012263968d6..a6712a95d76 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -21,7 +21,7 @@ enum ath_bt_mode {
ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
- ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
+ ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
};
struct ath_btcoex_config {
@@ -36,6 +36,20 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear;
};
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+ { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+ { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+ { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+ { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{
@@ -54,6 +68,9 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
@@ -85,6 +102,9 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* connect bt_active to baseband */
REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
@@ -107,6 +127,9 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* btcoex 3-wire */
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
@@ -133,6 +156,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* Configure the desired GPIO port for TX_FRAME output */
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
@@ -144,6 +170,9 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
}
@@ -152,27 +181,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
{
- struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
u32 val;
+ int i;
/*
* Program coex mode and weight registers to
* enable coex 3-wire
*/
- REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
- REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
+ REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
-
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+ btcoex->bt_weight[i]);
} else
- REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+ REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
@@ -185,23 +213,39 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+ ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex->wlan_weight[i]);
+
+ REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ btcoex->enabled = true;
+}
+
void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- switch (btcoex_hw->scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(ah)) {
case ATH_BTCOEX_CFG_NONE:
- break;
+ return;
case ATH_BTCOEX_CFG_2WIRE:
ath9k_hw_btcoex_enable_2wire(ah);
break;
case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_enable_3wire(ah);
break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath9k_hw_btcoex_enable_mci(ah);
+ return;
}
REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +259,18 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
void ath9k_hw_btcoex_disable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+ btcoex_hw->enabled = false;
+ if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex_hw->wlan_weight[i]);
+ }
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +283,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
}
-
- ah->btcoex_hw.enabled = false;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
- ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
-
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
- break;
-
- default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
+ ar9462_wlan_weights[stomp_type];
+ int i;
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex->wlan_weight[i] = weight[i];
}
-
- ath9k_hw_btcoex_enable(ah);
}
/*
@@ -279,6 +312,9 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_btcoex_bt_stomp(ah, stomp_type);
return;
@@ -298,11 +334,8 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
AR_STOMP_NONE_WLAN_WGHT);
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
break;
}
-
- ath9k_hw_btcoex_enable(ah);
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 234f77689b1..278361c867c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,22 +36,57 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
+#define AR9300_NUM_BT_WEIGHTS 4
+#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
- ATH_BTCOEX_NO_STOMP,
ATH_BTCOEX_STOMP_ALL,
ATH_BTCOEX_STOMP_LOW,
- ATH_BTCOEX_STOMP_NONE
+ ATH_BTCOEX_STOMP_NONE,
+ ATH_BTCOEX_STOMP_LOW_FTP,
+ ATH_BTCOEX_STOMP_MAX
};
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE,
+ ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+ u32 raw_intr;
+ u32 rx_msg_intr;
+ u32 cont_status;
+ u32 gpm_addr;
+ u32 gpm_len;
+ u32 gpm_idx;
+ u32 sched_addr;
+ u32 wlan_channels[4];
+ u32 wlan_cal_seq;
+ u32 wlan_cal_done;
+ u32 config;
+ u8 *gpm_buf;
+ u8 *sched_buf;
+ bool ready;
+ bool update_2g5g;
+ bool is_2g;
+ bool query_bt;
+ bool unhalt_bt_gpm; /* need send UNHALT */
+ bool halted_bt_gpm; /* HALT sent */
+ bool need_flush_btinfo;
+ bool bt_version_known;
+ bool wlan_channels_update;
+ u8 wlan_ver_major;
+ u8 wlan_ver_minor;
+ u8 bt_ver_major;
+ u8 bt_ver_minor;
+ u8 bt_state;
};
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
+ struct ath9k_hw_mci mci;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;
@@ -59,6 +94,8 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+ u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
};
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 99538810a31..172e33db7f4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -116,7 +116,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
if (h[i].privNF > limit->max) {
high_nf_mid = true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
@@ -199,8 +199,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
if (currCal->calState != CAL_DONE) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Calibration state incorrect, %d\n",
+ ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
currCal->calState);
return true;
}
@@ -208,8 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
if (!(ah->supp_cals & currCal->calData->calType))
return true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Resetting Cal %d state for channel %u\n",
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
@@ -302,7 +300,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* noisefloor until the next calibration timer.
*/
if (j == 10000) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
@@ -344,17 +342,17 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
if (!nf[i])
continue;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > ATH9K_NF_TOO_HIGH) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
i, nf[i], ATH9K_NF_TOO_HIGH);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
i, nf[i], limit->min);
nf[i] = limit->nominal;
@@ -373,7 +371,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
}
@@ -383,7 +381,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2741203e803..68d972bf232 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -709,24 +709,29 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
len += snprintf(buf + len, size - len,
"Stations:\n"
- " tid: addr sched paused buf_q-empty an ac\n"
+ " tid: addr sched paused buf_q-empty an ac baw\n"
" ac: addr sched tid_q-empty txq\n");
spin_lock(&sc->nodes_lock);
list_for_each_entry(an, &sc->nodes, list) {
+ unsigned short ma = an->maxampdu;
+ if (ma == 0)
+ ma = 65535; /* see ath_lookup_rate */
len += snprintf(buf + len, size - len,
- "%pM\n", an->sta->addr);
+ "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
+ an->vif->addr, an->sta->addr, ma,
+ (unsigned int)(an->mpdudensity));
if (len >= size)
goto done;
for (q = 0; q < WME_NUM_TID; q++) {
struct ath_atx_tid *tid = &(an->tid[q]);
len += snprintf(buf + len, size - len,
- " tid: %p %s %s %i %p %p\n",
+ " tid: %p %s %s %i %p %p %hu\n",
tid, tid->sched ? "sched" : "idle",
tid->paused ? "paused" : "running",
skb_queue_empty(&tid->buf_q),
- tid->an, tid->ac);
+ tid->an, tid->ac, tid->baw_size);
if (len >= size)
goto done;
}
@@ -851,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
- if (flags & ATH_TX_BAR)
+ if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
@@ -1625,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug);
#endif
+
+ ath9k_dfs_init_debug(sc);
+
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352ac2d6..776a24ada60 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
#include "hw.h"
#include "rc.h"
+#include "dfs_debug.h"
struct ath_txq;
struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
+ struct ath_dfs_stats dfs_stats;
u32 reset[__RESET_TYPE_MAX];
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 00000000000..f4f56aff1e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ * as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+ u16 freq;
+ u64 ts;
+ u32 width;
+ u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+ u8 pulse_bw_info;
+ u8 rssi;
+ u8 ext_rssi;
+ u8 pulse_length_ext;
+ u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+ const u32 AR93X_NSECS_PER_DUR = 800;
+ const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+ u32 nsecs;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+ nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+ else
+ nsecs = dur * AR93X_NSECS_PER_DUR;
+
+ return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+ struct ath_radar_data *are,
+ struct dfs_radar_pulse *drp)
+{
+ u8 rssi;
+ u16 dur;
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
+ "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+ are->pulse_bw_info,
+ are->pulse_length_pri, are->rssi,
+ are->pulse_length_ext, are->ext_rssi);
+
+ /*
+ * Only the last 2 bits of the BW info are relevant, they indicate
+ * which channel the radar was detected in.
+ */
+ are->pulse_bw_info &= 0x03;
+
+ switch (are->pulse_bw_info) {
+ case PRI_CH_RADAR_FOUND:
+ /* radar in ctrl channel */
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, pri_phy_errors);
+ /*
+ * cannot use ctrl channel RSSI
+ * if extension channel is stronger
+ */
+ rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+ break;
+ case EXT_CH_RADAR_FOUND:
+ /* radar in extension channel */
+ dur = are->pulse_length_ext;
+ DFS_STAT_INC(sc, ext_phy_errors);
+ /*
+ * cannot use extension channel RSSI
+ * if control channel is stronger
+ */
+ rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+ break;
+ case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+ /*
+ * Conducted testing, when pulse is on DC, both pri and ext
+ * durations are reported to be same
+ *
+ * Radiated testing, when pulse is on DC, different pri and
+ * ext durations are reported, so take the larger of the two
+ */
+ if (are->pulse_length_ext >= are->pulse_length_pri)
+ dur = are->pulse_length_ext;
+ else
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, dc_phy_errors);
+
+ /* when both are present use stronger one */
+ rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+ break;
+ default:
+ /*
+ * Bogus bandwidth info was received in descriptor,
+ * so ignore this PHY error
+ */
+ DFS_STAT_INC(sc, bwinfo_discards);
+ return false;
+ }
+
+ if (rssi == 0) {
+ DFS_STAT_INC(sc, rssi_discards);
+ return false;
+ }
+
+ /*
+ * TODO: check chirping pulses
+ * checks for chirping are dependent on the DFS regulatory domain
+ * used, which is yet TBD
+ */
+
+ /* convert duration to usecs */
+ drp->width = dur_to_usecs(sc->sc_ah, dur);
+ drp->rssi = rssi;
+
+ DFS_STAT_INC(sc, pulses_detected);
+ return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime)
+{
+ struct ath_radar_data ard;
+ u16 datalen;
+ char *vdata_end;
+ struct dfs_radar_pulse drp;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+ (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+ ath_dbg(common, DFS,
+ "Error: rs_phyer=0x%x not a radar error\n",
+ rs->rs_phyerr);
+ return;
+ }
+
+ datalen = rs->rs_datalen;
+ if (datalen == 0) {
+ DFS_STAT_INC(sc, datalen_discards);
+ return;
+ }
+
+ ard.rssi = rs->rs_rssi_ctl0;
+ ard.ext_rssi = rs->rs_rssi_ext0;
+
+ /*
+ * hardware stores this as 8 bit signed value.
+ * we will cap it at 0 if it is a negative number
+ */
+ if (ard.rssi & 0x80)
+ ard.rssi = 0;
+ if (ard.ext_rssi & 0x80)
+ ard.ext_rssi = 0;
+
+ vdata_end = (char *)data + datalen;
+ ard.pulse_bw_info = vdata_end[-1];
+ ard.pulse_length_ext = vdata_end[-2];
+ ard.pulse_length_pri = vdata_end[-3];
+
+ ath_dbg(common, DFS,
+ "bw_info=%d, length_pri=%d, length_ext=%d, "
+ "rssi_pri=%d, rssi_ext=%d\n",
+ ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+ ard.rssi, ard.ext_rssi);
+
+ drp.freq = ah->curchan->channel;
+ drp.ts = mactime;
+ if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+ static u64 last_ts;
+ ath_dbg(common, DFS,
+ "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+ last_ts = drp.ts;
+ /*
+ * TODO: forward pulse to pattern detector
+ *
+ * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+ * drp.width, drp.rssi);
+ */
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 00000000000..c2412857f12
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 00000000000..106d031d834
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+ len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ "enabled" : "disabled");
+ ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
+ ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
+ ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
+ ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
+ ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
+ ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+ ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = read_file_dfs,
+ .open = ath9k_dfs_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 00000000000..4911724cb44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef ATH9K_DFS_DEBUG_H
+#define ATH9K_DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected: No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards: No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards: No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors: No. of pulses reported for primary channel
+ * @ext_phy_errors: No. of pulses reported for extension channel
+ * @dc_phy_errors: No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+ u32 pulses_detected;
+ u32 datalen_discards;
+ u32 rssi_discards;
+ u32 bwinfo_discards;
+ u32 pri_phy_errors;
+ u32 ext_phy_errors;
+ u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e46f751ab50..c4352323331 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -305,8 +305,7 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
+ ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
break;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 49abd34be74..5ff7ab96512 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -249,7 +249,8 @@ enum eeprom_param {
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
- EEP_ANTENNA_GAIN_5G
+ EEP_ANTENNA_GAIN_5G,
+ EEP_QUICK_DROP
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f987f..4322ac80c20 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -38,7 +38,7 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -62,8 +62,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -204,8 +203,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -227,7 +225,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -249,7 +247,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
u32 integer;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing\n");
word = swab16(eep->baseEepHeader.length);
@@ -435,11 +433,11 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC: Chain %d | "
"PDADC %3d Value %3d | "
"PDADC %3d Value %3d | "
@@ -473,7 +471,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
u16 twiceMinEdgePower;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
@@ -542,9 +540,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
@@ -1081,8 +1077,7 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1090,8 +1085,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a87ce..f272236d805 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -41,7 +41,7 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -66,8 +66,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -197,8 +196,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -220,7 +218,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -569,7 +567,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +667,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
+ twiceMaxEdgePower = MAX_RATE_POWER;
/* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
struct cal_ctl_edges *pRdEdgesPower;
@@ -1040,8 +1039,7 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1049,8 +1047,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e62967167..619b95d764f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -121,8 +121,7 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -279,8 +278,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -303,7 +301,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -325,7 +323,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
u32 integer, j;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing.\n");
word = swab16(eep->baseEepHeader.length);
@@ -385,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
((eep->baseEepHeader.version & 0xff) > 0x0a) &&
(eep->baseEepHeader.pwdclkind == 0))
- ah->need_an_top2_fixup = 1;
+ ah->need_an_top2_fixup = true;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
(AR_SREV_9280(ah)))
@@ -965,15 +963,12 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | PDADC %3d "
- "Value %3d | PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | PDADC %3d "
- "Value %3d |\n",
+ ath_dbg(common, EEPROM,
+ "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n",
i, 4 * j, pdadcValues[4 * j],
4 * j + 1, pdadcValues[4 * j + 1],
4 * j + 2, pdadcValues[4 * j + 2],
@@ -1000,7 +995,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1116,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1280,7 +1273,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+ ath_dbg(ath9k_hw_common(ah), EEPROM,
"Invalid chainmask configuration\n");
break;
}
@@ -1398,8 +1391,7 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1407,8 +1399,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 655576c8fda..597c84e31ad 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -130,12 +130,12 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT scan detected\n");
sc->sc_flags |= (SC_OP_BT_SCAN |
SC_OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT priority traffic detected\n");
sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
}
@@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data)
bool is_btscan;
ath9k_ps_wakeup(sc);
- ath_detect_bt_priority(sc);
-
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath_detect_bt_priority(sc);
is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
spin_lock_bh(&btcoex->btcoex_lock);
@@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -212,8 +213,9 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
+ timer_period = btcoex->btcoex_period / 1000;
mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+ msecs_to_jiffies(timer_period));
}
/*
@@ -228,8 +230,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "no stomp timer running\n");
+ ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
@@ -239,6 +240,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
ath9k_ps_restore(sc);
}
@@ -247,6 +249,9 @@ int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE)
+ return 0;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -277,8 +282,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
@@ -300,6 +307,9 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
del_timer_sync(&btcoex->period_timer);
if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 57fe22b2424..2eadffb7971 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -167,9 +167,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+ ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -224,9 +224,8 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d "
- "imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -273,9 +272,8 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "IBSS Beacon config, intval: %d, nexttbtt: %u, "
- "resp_time: %d, imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -323,7 +321,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
tx_slot = ath9k_htc_tx_get_slot(priv);
if (tx_slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n");
+ ath_dbg(common, XMIT, "No free CAB slot\n");
dev_kfree_skb_any(skb);
goto next;
}
@@ -333,8 +331,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
ath9k_htc_tx_clear_slot(priv, tx_slot);
dev_kfree_skb_any(skb);
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to send CAB frame\n");
+ ath_dbg(common, XMIT, "Failed to send CAB frame\n");
} else {
spin_lock_bh(&priv->tx.tx_lock);
priv->tx.queued_cnt++;
@@ -409,7 +406,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
ret = htc_send(priv->htc, beacon);
if (ret != 0) {
if (ret == -ENOMEM) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Failed to send beacon, no free TX buffer\n");
}
dev_kfree_skb_any(beacon);
@@ -434,7 +431,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
slot, tsf, tsftu, intval);
@@ -450,8 +447,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
if (swba->beacon_pending != 0) {
priv->cur_beacon_conf.bmiss_cnt++;
if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "Beacon stuck, HW reset\n");
+ ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
}
@@ -459,7 +455,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
}
if (priv->cur_beacon_conf.bmiss_cnt) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
priv->cur_beacon_conf.bmiss_cnt);
priv->cur_beacon_conf.bmiss_cnt = 0;
@@ -495,8 +491,8 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Added interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->bslot);
}
void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
@@ -509,8 +505,8 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Removed interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
+ avp->bslot);
}
/*
@@ -536,8 +532,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
- ath_dbg(common, ATH_DBG_CONFIG,
- "tsfadjust is: %llu for bslot: %d\n",
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->bslot);
}
@@ -568,7 +563,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
(priv->num_ap_vif > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Changing beacon interval of multiple AP interfaces !\n");
return false;
}
@@ -579,7 +574,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
*/
if (priv->num_ap_vif &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"HW in AP mode, cannot set STA beacon parameters\n");
return false;
}
@@ -597,7 +592,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
&beacon_configured);
if (beacon_configured) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -637,8 +632,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
@@ -659,8 +653,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index e3a02eb8e0c..6506e1fd503 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -36,12 +36,12 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT scan detected\n");
priv->op_flags |= (OP_BT_SCAN |
OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT priority traffic detected\n");
priv->op_flags |= OP_BT_PRIORITY_DETECTED;
}
@@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(priv->ah);
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -101,19 +102,22 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = priv->op_flags & OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "time slice work for bt and wlan\n");
+ ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(priv->ah);
}
void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
{
struct ath_btcoex *btcoex = &priv->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -132,7 +136,10 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_hw *ah = priv->ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -146,6 +153,9 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
*/
void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
{
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
cancel_delayed_work_sync(&priv->coex_period_work);
cancel_delayed_work_sync(&priv->duty_cycle_work);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 966661c9e58..9be10a2da1c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -299,8 +299,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER READ FAILED: (0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
reg_offset, r);
return -EIO;
}
@@ -327,7 +326,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
(u8 *)tmpval, sizeof(u32) * count,
100);
if (unlikely(ret)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"Multiple REGISTER READ FAILED (count: %d)\n", count);
}
@@ -352,8 +351,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n",
reg_offset, r);
}
}
@@ -384,7 +382,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -434,7 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -512,8 +510,7 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -610,7 +607,7 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
{
int qnum;
- switch (priv->ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_3WIRE:
@@ -704,7 +701,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
- ath9k_init_btcoex(priv);
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
+ ath9k_init_btcoex(priv);
}
return 0;
@@ -876,9 +874,8 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
goto err_world;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
- "BE:%d, BK:%d, VI:%d, VO:%d\n",
+ ath_dbg(common, CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n",
priv->wmi_cmd_ep,
priv->beacon_ep,
priv->cab_ep,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8a495..ef4c6066129 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -266,7 +266,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_wmi_event_drain(priv);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
priv->ah->curchan->channel,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
@@ -415,7 +415,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
priv->ah->is_monitoring = true;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Attached a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -427,7 +427,7 @@ err_sta:
*/
__ath9k_htc_remove_monitor_interface(priv);
err_vif:
- ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+ ath_dbg(common, FATAL, "Unable to attach a monitor interface\n");
return ret;
}
@@ -452,7 +452,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
priv->nstations--;
priv->ah->is_monitoring = false;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -512,11 +512,11 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for: %pM (idx: %d)\n",
sta->addr, tsta.sta_index);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for VIF %d (idx: %d)\n",
avp->index, tsta.sta_index);
}
@@ -556,11 +556,11 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for: %pM (idx: %d)\n",
sta->addr, sta_idx);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for VIF %d (idx: %d)\n",
avp->index, sta_idx);
}
@@ -665,7 +665,7 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
ath9k_htc_setup_rate(priv, sta, &trate);
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
sta->addr, be32_to_cpu(trate.capflags));
}
@@ -692,7 +692,7 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
bss_conf->bssid, be32_to_cpu(trate.capflags));
}
@@ -721,11 +721,11 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Unable to %s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"%s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "Starting" : "Stopping",
sta->addr, tid);
@@ -784,7 +784,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -793,8 +793,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
if ((timestamp - common->ani.shortcal_timer) >=
short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -808,7 +807,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if (ah->config.enable_ani &&
+ (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -838,7 +838,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (priv->ah->config.enable_ani)
+ if (ah->config.enable_ani)
cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
@@ -865,7 +865,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize) {
- ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
+ ath_dbg(common, XMIT, "No room for padding\n");
goto fail_tx;
}
skb_push(skb, padsize);
@@ -874,13 +874,13 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
slot = ath9k_htc_tx_get_slot(priv);
if (slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n");
+ ath_dbg(common, XMIT, "No free TX slot\n");
goto fail_tx;
}
ret = ath9k_htc_tx_start(priv, skb, slot, false);
if (ret != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
+ ath_dbg(common, XMIT, "Tx failed\n");
goto clear_slot;
}
@@ -908,7 +908,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -942,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ret = ath9k_htc_update_cap_target(priv, 0);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
priv->op_flags &= ~OP_INVALID;
@@ -957,7 +957,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
@@ -979,7 +979,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
}
@@ -1009,7 +1009,8 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_htc_cancel_btcoex_work(priv);
@@ -1026,7 +1027,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
priv->op_flags |= OP_INVALID;
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
}
@@ -1119,8 +1120,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_start_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n",
+ vif->type, avp->index);
out:
ath9k_htc_ps_restore(priv);
@@ -1176,7 +1177,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_stop_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+ ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1201,8 +1202,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&priv->htc_pm_lock);
if (enable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
+ ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
ath9k_htc_radio_enable(hw);
}
@@ -1224,7 +1224,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
- ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
@@ -1264,8 +1264,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
mutex_unlock(&priv->htc_pm_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "idle: disabling radio\n");
+ ath_dbg(common, CONFIG, "idle: disabling radio\n");
ath9k_htc_radio_disable(hw);
}
@@ -1297,7 +1296,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
if (priv->op_flags & OP_INVALID) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
return;
@@ -1308,8 +1307,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1376,7 +1375,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
qnum = get_hw_qnum(queue, priv->hwq_map);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1411,7 +1410,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
return -ENOSPC;
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
switch (cmd) {
@@ -1447,8 +1446,7 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_write_associd(priv->ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -1486,7 +1484,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath9k_htc_ps_wakeup(priv);
if (changed & BSS_CHANGED_ASSOC) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
bss_conf->assoc);
bss_conf->assoc ?
@@ -1511,8 +1509,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
+ ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
+ bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif);
@@ -1524,7 +1522,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
* AP/IBSS interfaces.
*/
if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
priv->op_flags &= ~OP_ENABLE_BEACON;
@@ -1542,7 +1540,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
(vif->type == NL80211_IFTYPE_AP)) {
priv->op_flags |= OP_TSF_RESET;
}
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon interval changed for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_beacon_config(priv, vif);
@@ -1732,8 +1730,7 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set bitrate masks: 0x%x, 0x%x\n",
+ ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
mask->control[IEEE80211_BAND_2GHZ].legacy,
mask->control[IEEE80211_BAND_5GHZ].legacy);
out:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2d81c700e20..3e40a646151 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -355,7 +355,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
vif_idx = avp->index;
} else {
if (!priv->ah->is_monitoring) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(priv->ah), XMIT,
"VIF is null, but no monitor interface !\n");
return -EINVAL;
}
@@ -620,8 +620,7 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
}
spin_unlock_irqrestore(&epid_queue->lock, flags);
- ath_dbg(common, ATH_DBG_XMIT,
- "No matching packet for cookie: %d, epid: %d\n",
+ ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n",
txs->cookie, epid);
return NULL;
@@ -705,8 +704,7 @@ static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb
if (time_after(jiffies,
tx_ctl->timestamp +
msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Dropping a packet due to TX timeout\n");
+ ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n");
return true;
}
@@ -753,7 +751,7 @@ void ath9k_htc_tx_cleanup_timer(unsigned long data)
skb = ath9k_htc_tx_get_packet(priv, &event->txs);
if (skb) {
- ath_dbg(common, ATH_DBG_XMIT,
+ ath_dbg(common, XMIT,
"Found packet for cookie: %d, epid: %d\n",
event->txs.cookie,
MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
@@ -1167,8 +1165,7 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
spin_unlock(&priv->rx.rxbuflock);
if (rxbuf == NULL) {
- ath_dbg(common, ATH_DBG_ANY,
- "No free RX buffer\n");
+ ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233757a..c4ad0b06bdb 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
ini_reloaded);
}
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->set_radar_params)
+ return;
+
+ ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2f91acccb7d..ee775957505 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -133,7 +133,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
udelay(AH_TIME_QUANTUM);
}
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(ah), ANY,
"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
timeout, reg, REG_READ(ah, reg), mask, val);
@@ -491,8 +491,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
if (ecode != 0)
return ecode;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
- "Eeprom VER: %d, REV: %d\n",
+ ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
@@ -504,7 +503,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return ecode;
}
- if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+ if (ah->config.enable_ani) {
ath9k_hw_ani_setup(ah);
ath9k_hw_ani_init(ah);
}
@@ -567,7 +566,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
ah->config.serialize_regmode);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -610,6 +609,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+ /* disable ANI for 9340 */
+ if (AR_SREV_9340(ah))
+ ah->config.enable_ani = false;
+
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -954,8 +957,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
{
if (tu > 0xFFFF) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "bad global tx timeout %u\n", tu);
+ ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
+ tu);
ah->globaltxtimeout = (u32) -1;
return false;
} else {
@@ -976,7 +979,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
int rx_lat = 0, tx_lat = 0, eifs = 0;
u32 reg;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+ ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (!chan)
@@ -1271,7 +1274,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(npend || type == ATH9K_RESET_COLD)) {
int reset_err = 0;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"reset MAC via external reset\n");
reset_err = ah->external_reset();
@@ -1294,8 +1297,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC stuck in MAC reset\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
@@ -1340,8 +1342,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
AR_RTC_STATUS_M,
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC not waking up\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
return false;
}
@@ -1350,6 +1351,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+ bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1363,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
- return ath9k_hw_set_reset_power_on(ah);
+ ret = ath9k_hw_set_reset_power_on(ah);
+ break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
- return ath9k_hw_set_reset(ah, type);
+ ret = ath9k_hw_set_reset(ah, type);
+ break;
default:
- return false;
+ break;
}
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1406,7 +1415,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Transmit frames pending on queue %d\n", qnum);
return false;
}
@@ -1506,6 +1515,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool bChannelChange)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 saveLedState;
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
@@ -1513,6 +1523,52 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool allow_fbs = false;
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+ bool save_fullsleep = ah->chip_fullsleep;
+
+ if (mci) {
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state == MCI_BT_CAL_START) {
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI stop rx for BT CAL\n");
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+
+ ar9003_mci_disable_interrupt(ah);
+
+ ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ ath_dbg(common, MCI, "\nMCI BT is calibrating\n");
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, MCI,
+ "MCI got BT_CAL_DONE\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI ### BT cal takes to long, force bt_state to be bt_awake\n");
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+ }
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1550,12 +1606,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready)
+ ar9003_mci_2g5g_switch(ah, true);
+
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
}
}
+ if (mci) {
+ ar9003_mci_disable_interrupt(ah);
+
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+ }
+
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
+ }
+
+
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
@@ -1611,6 +1684,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ if (mci)
+ ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
@@ -1728,6 +1804,54 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready) {
+
+ if (IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+ if (ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+ ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+
+ ath_dbg(common, MCI,
+ "MCI BT wakes up during WLAN calibration\n");
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ ath_dbg(common, MCI, "MCI re-cal\n");
+
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ }
+ }
+ ar9003_mci_enable_interrupt(ah);
+ }
+
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
@@ -1742,14 +1866,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 mask;
mask = REG_READ(ah, AR_CFG);
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
- ath_dbg(common, ATH_DBG_RESET,
- "CFG Byte Swap Set 0x%x\n", mask);
+ ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+ mask);
} else {
mask =
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
REG_WRITE(ah, AR_CFG, mask);
- ath_dbg(common, ATH_DBG_RESET,
- "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
+ ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+ REG_READ(ah, AR_CFG));
}
} else {
if (common->bus_ops->ath_bus_type == ATH_USB) {
@@ -1767,9 +1891,25 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
#endif
}
- if (ah->btcoex_hw.enabled)
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
ath9k_hw_btcoex_enable(ah);
+ if (mci && mci_hw->ready) {
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -1827,7 +1967,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -1933,6 +2074,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@@ -1944,18 +2086,41 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
if (ah->power_mode == mode)
return status;
- ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
+ ath_dbg(common, RESET, "%s -> %s\n",
modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
break;
case ATH9K_PM_FULL_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ath_dbg(common, MCI,
+ "MCI halt BT GPM (full_sleep)\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah,
+ true, true);
+ }
+
+ mci->ready = false;
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+ }
+
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
@@ -2005,9 +2170,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
- "%s: unsupported opmode: %d\n",
- __func__, ah->opmode);
+ ath_dbg(ath9k_hw_common(ah), BEACON,
+ "%s: unsupported opmode: %d\n", __func__, ah->opmode);
return;
break;
}
@@ -2058,10 +2222,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
else
nextTbtt = bs->bs_nexttbtt;
- ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
- ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
- ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
- ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+ ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
+ ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
+ ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
ENABLE_REGWRITE_BUFFER(ah);
@@ -2108,6 +2272,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
return chip_chainmask;
}
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+ switch (ah->hw_version.macVersion) {
+ /* AR9580 will likely be our first target to get testing on */
+ case AR_SREV_VERSION_9580:
+ default:
+ return false;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2129,8 +2317,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
regulatory->current_rd += 5;
else if (regulatory->current_rd == 0x41)
regulatory->current_rd = 0x43;
- ath_dbg(common, ATH_DBG_REGULATORY,
- "regdomain mapped to 0x%x\n", regulatory->current_rd);
+ ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
+ regulatory->current_rd);
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -2148,6 +2336,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
+ else if (AR_SREV_9462(ah))
+ chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2204,12 +2394,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->num_gpio_pins = AR_NUM_GPIO;
- if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_CST;
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
- } else {
+ else
pCap->rts_aggr_limit = (8 * 1024);
- }
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2233,7 +2421,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (common->btcoex_enabled) {
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9462(ah))
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2317,6 +2507,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->pcie_lcr_offset = 0x80;
}
+ if (ath9k_hw_dfs_tested(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) {
@@ -2331,11 +2524,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
- if (!AR_SREV_9330(ah))
+ if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
return 0;
}
@@ -2583,7 +2776,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
- reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+ reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
if (test)
channel->max_power = MAX_RATE_POWER / 2;
@@ -2650,7 +2843,7 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
{
if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
AH_TSF_WRITE_TIMEOUT))
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
@@ -2775,7 +2968,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
timer_next = tsf + trig_timeout;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+ ath_dbg(ath9k_hw_common(ah), HWTIMER,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
@@ -2864,8 +3057,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
- "TSF overflow for Gen timer %d\n", index);
+ ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+ index);
timer->overflow(timer->arg);
}
@@ -2873,7 +3066,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
+ ath_dbg(common, HWTIMER,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index f389b3c93cf..6a29004a71b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -59,9 +59,6 @@
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
-#define AR9300_NUM_BT_WEIGHTS 4
-#define AR9300_NUM_WLAN_WEIGHTS 4
-
#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define ATH_DEFAULT_NOISE_FLOOR -95
@@ -129,6 +126,16 @@
#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@@ -189,20 +196,25 @@ enum ath_ini_subsys {
enum ath9k_hw_caps {
ATH9K_HW_CAP_HT = BIT(0),
ATH9K_HW_CAP_RFSILENT = BIT(1),
- ATH9K_HW_CAP_CST = BIT(2),
- ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
- ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
- ATH9K_HW_CAP_EDMA = BIT(6),
- ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
- ATH9K_HW_CAP_LDPC = BIT(8),
- ATH9K_HW_CAP_FASTCLOCK = BIT(9),
- ATH9K_HW_CAP_SGI_20 = BIT(10),
- ATH9K_HW_CAP_PAPRD = BIT(11),
- ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
- ATH9K_HW_CAP_2GHZ = BIT(13),
- ATH9K_HW_CAP_5GHZ = BIT(14),
- ATH9K_HW_CAP_APM = BIT(15),
- ATH9K_HW_CAP_RTT = BIT(16),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
+ ATH9K_HW_CAP_EDMA = BIT(4),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
+ ATH9K_HW_CAP_LDPC = BIT(6),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(7),
+ ATH9K_HW_CAP_SGI_20 = BIT(8),
+ ATH9K_HW_CAP_PAPRD = BIT(9),
+ ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
+ ATH9K_HW_CAP_2GHZ = BIT(11),
+ ATH9K_HW_CAP_5GHZ = BIT(12),
+ ATH9K_HW_CAP_APM = BIT(13),
+ ATH9K_HW_CAP_RTT = BIT(14),
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ATH9K_HW_CAP_MCI = BIT(15),
+#else
+ ATH9K_HW_CAP_MCI = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(16),
};
struct ath9k_hw_capabilities {
@@ -268,6 +280,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_MCI = 0x00000200,
ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
@@ -419,6 +432,161 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+ MCI_GPM_COEX_PROFILE_UNKNOWN,
+ MCI_GPM_COEX_PROFILE_RFCOMM,
+ MCI_GPM_COEX_PROFILE_A2DP,
+ MCI_GPM_COEX_PROFILE_HID,
+ MCI_GPM_COEX_PROFILE_BNEP,
+ MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_RSSI_POWER,
+ MCI_STATE_CONT_PRIORITY,
+ MCI_STATE_CONT_TXRX,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_DEBUG,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -791,8 +959,6 @@ struct ath_hw {
/* Bluetooth coexistance */
struct ath_btcoex_hw btcoex_hw;
- u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
- u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u32 intr_txqs;
u8 txchainmask;
@@ -850,7 +1016,7 @@ struct ath_hw {
u32 ts_paddr_start;
u32 ts_paddr_end;
u16 ts_tail;
- u8 ts_size;
+ u16 ts_size;
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
@@ -948,7 +1114,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
void ath9k_hw_write_associd(struct ath_hw *ah);
u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1041,6 +1206,42 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.scheme;
+}
+#else
+#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE
+#endif
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d4c909f8e47..abf943557de 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
else
@@ -274,8 +276,7 @@ static void setup_ht_cap(struct ath_softc *sc,
tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -295,9 +296,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ int ret;
+
+ ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+ /* Set tx power */
+ if (ah->curchan) {
+ sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+ ath9k_ps_restore(sc);
+ }
- return ath_reg_notifier_apply(wiphy, request, reg);
+ return ret;
}
/*
@@ -314,7 +328,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_buf *bf;
int i, bsize, error, desc_len;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
@@ -360,7 +374,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
goto fail;
}
ds = (u8 *) dd->dd_desc;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -408,9 +422,10 @@ fail:
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
int r;
- switch (sc->sc_ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_2WIRE:
@@ -425,6 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
+ case ATH_BTCOEX_CFG_MCI:
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = 0x2201;
+ }
+ break;
default:
WARN_ON(1);
break;
@@ -695,6 +741,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->queues = 4;
hw->max_rates = 4;
@@ -833,9 +880,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
if ((sc->btcoex.no_stomp_timer) &&
- sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+ ath_mci_cleanup(sc);
+
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index ecdb6fd2907..fd3f19c2e55 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -21,7 +21,7 @@
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
@@ -57,8 +57,7 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
- "Enable TXE on queue: %u\n", q);
+ ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -202,12 +201,12 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Set TXQ properties, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+ ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
qi->tqi_ver = qinfo->tqi_ver;
qi->tqi_subtype = qinfo->tqi_subtype;
@@ -266,7 +265,7 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Get TXQ properties, inactive queue: %u\n", q);
return false;
}
@@ -325,7 +324,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
return -1;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
qi = &ah->txq[q];
if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
@@ -348,12 +347,11 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Release TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
ah->txok_interrupt_mask &= ~(1 << q);
@@ -376,12 +374,11 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Reset TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
return true;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
if (chan && IS_CHAN_B(chan))
@@ -760,7 +757,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
return true;
host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+ if (((host_isr & AR_INTR_MAC_IRQ) ||
+ (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+ (host_isr != AR_INTR_SPURIOUS))
return true;
host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -781,7 +781,7 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
else
atomic_dec(&ah->intr_ref_cnt);
- ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ ath_dbg(common, INTERRUPT, "disable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
if (!AR_SREV_9100(ah)) {
@@ -798,13 +798,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
- "Do not enable IER ref count %d\n",
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
@@ -812,18 +812,21 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
- ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ async_mask = AR_INTR_MAC_IRQ;
+
+ if (ah->imask & ATH9K_INT_MCI)
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+ ath_dbg(common, INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
@@ -838,7 +841,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
- ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
+ ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
@@ -901,7 +904,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask2 |= AR_IMR_S2_CST;
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f4089..e267c92dbfb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_idle)
+ if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
@@ -332,14 +332,14 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
hchan = ah->curchan;
}
- if (fastcc && !ath9k_hw_check_alive(ah))
+ if (fastcc && (ah->chip_fullsleep ||
+ !ath9k_hw_check_alive(ah)))
fastcc = false;
if (!ath_prepare_reset(sc, retry_tx, flush))
fastcc = false;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Reset to %u MHz, HT40: %d fastcc: %d\n",
+ ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
CHANNEL_HT40PLUS)),
fastcc);
@@ -428,7 +428,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
txctl.paprd = BIT(chain);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
dev_kfree_skb_any(skb);
return false;
}
@@ -437,7 +437,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
if (!time_left)
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Timeout waiting for paprd training on TX chain %d\n",
chain);
@@ -486,27 +486,27 @@ void ath_paprd_calibrate(struct work_struct *work)
chain_ok = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Sending PAPRD frame for thermal measurement "
- "on chain %d\n", chain);
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD frame for thermal measurement on chain %d\n",
+ chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
ar9003_paprd_setup_gain_table(ah, chain);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Sending PAPRD training frame on chain %d\n", chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
if (!ar9003_paprd_is_done(ah)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD not yet done on chain %d\n", chain);
break;
}
if (ar9003_paprd_create_curve(ah, caldata, chain)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD create curve failed on chain %d\n",
chain);
break;
@@ -561,7 +561,6 @@ void ath_ani_calibrate(unsigned long data)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -569,8 +568,6 @@ void ath_ani_calibrate(unsigned long data)
if (!common->ani.caldone) {
if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -584,8 +581,9 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -605,6 +603,12 @@ void ath_ani_calibrate(unsigned long data)
ah->rxchainmask, longcal);
}
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
ath9k_ps_restore(sc);
set_timer:
@@ -630,7 +634,8 @@ set_timer:
}
}
-static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
{
struct ath_node *an;
an = (struct ath_node *)sta->drv_priv;
@@ -639,8 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
spin_lock(&sc->nodes_lock);
list_add(&an->list, &sc->nodes);
spin_unlock(&sc->nodes_lock);
- an->sta = sta;
#endif
+ an->sta = sta;
+ an->vif = vif;
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -709,8 +715,7 @@ void ath9k_tasklet(unsigned long data)
* TSF sync does not look correct; remain awake to sync with
* the next Beacon.
*/
- ath_dbg(common, ATH_DBG_PS,
- "TSFOOR - Sync with next Beacon\n");
+ ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
@@ -736,10 +741,13 @@ void ath9k_tasklet(unsigned long data)
ath_tx_tasklet(sc);
}
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
+ if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI)
+ ath_mci_intr(sc);
+
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@@ -762,7 +770,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
ATH9K_INT_TSFOOR | \
- ATH9K_INT_GENTIMER)
+ ATH9K_INT_GENTIMER | \
+ ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
@@ -880,82 +889,6 @@ chip_reset:
#undef SCHED_INTR
}
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- atomic_set(&ah->intr_ref_cnt, -1);
-
- ath9k_hw_configpcipowersave(ah, false);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(common,
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath_complete_reset(sc, true);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
-
- ath_cancel_work(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- /*
- * Keep the LED on when the radio is disabled
- * during idle unassociated state.
- */
- if (!sc->ps_idle) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
-
- ath_prepare_reset(sc, false, true);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath9k_hw_phy_disable(ah);
-
- ath9k_hw_configpcipowersave(ah, true);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_ps_restore(sc);
-}
-
static int ath_reset(struct ath_softc *sc, bool retry_tx)
{
int r;
@@ -1002,8 +935,8 @@ void ath_hw_check(struct work_struct *work)
busy = ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
- ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
- "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+ busy, sc->hw_busy_count + 1);
if (busy >= 99) {
if (++sc->hw_busy_count >= 3) {
RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
@@ -1026,8 +959,7 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
count++;
if (count == 3) {
/* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, ATH_DBG_RESET,
- "Possible RX hang, resetting");
+ ath_dbg(common, RESET, "Possible RX hang, resetting\n");
RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
count = 0;
@@ -1067,7 +999,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
struct ath9k_channel *init_channel;
int r;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -1091,6 +1023,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
spin_lock_bh(&sc->sc_pcu_lock);
+
+ atomic_set(&ah->intr_ref_cnt, -1);
+
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
ath_err(common,
@@ -1117,6 +1052,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ah->imask |= ATH9K_INT_MCI;
+
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;
@@ -1129,15 +1067,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
goto mutex_unlock;
}
+ if (ah->led_pin >= 0) {
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
- if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
!ah->btcoex_hw.enabled) {
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_resume(sc);
}
@@ -1167,12 +1118,19 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control) &&
!ieee80211_has_pm(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Add PM=1 for a TX frame while in PS mode\n");
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
}
+ /*
+ * Cannot tx while the hardware is in full sleep, it first needs a full
+ * chip reset to recover from that
+ */
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+ goto exit;
+
if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
/*
* We are using PS-Poll and mac80211 can request TX while in
@@ -1183,12 +1141,11 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Sending PS-Poll to pick a buffered frame\n");
sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
- ath_dbg(common, ATH_DBG_PS,
- "Wake up to complete TX\n");
+ ath_dbg(common, PS, "Wake up to complete TX\n");
sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
@@ -1202,10 +1159,10 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
- ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
+ ath_dbg(common, XMIT, "TX failed\n");
goto exit;
}
@@ -1219,13 +1176,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool prev_idle;
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
@@ -1233,10 +1191,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
+ ath_mci_flush_profile(&sc->btcoex.mci);
}
spin_lock_bh(&sc->sc_pcu_lock);
@@ -1248,39 +1208,49 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
- if (!(sc->sc_flags & SC_OP_INVALID)) {
- ath_drain_all_txq(sc, false);
- ath_stoprecv(sc);
- ath9k_hw_phy_disable(ah);
- } else
- sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ prev_idle = sc->ps_idle;
+ sc->ps_idle = true;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ ath_prepare_reset(sc, false, true);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
sc->rx.frag = NULL;
}
- /* disable HAL and put h/w to sleep */
- ath9k_hw_disable(ah);
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ ath9k_hw_phy_disable(ah);
- /* we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
+ ath9k_hw_configpcipowersave(ah, true);
- ath9k_ps_restore(sc);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
+ ath9k_ps_restore(sc);
sc->sc_flags |= SC_OP_INVALID;
+ sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
}
bool ath9k_uses_beacons(int type)
@@ -1495,8 +1465,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", vif->type);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -1516,7 +1485,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
- ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
+ ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -1559,7 +1528,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath_dbg(common, CONFIG, "Detach Interface\n");
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
@@ -1616,8 +1585,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio = false;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
/*
@@ -1628,13 +1597,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (!sc->ps_idle) {
- ath_radio_enable(sc, hw);
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
- } else {
- disable_radio = true;
- }
+ if (sc->ps_idle)
+ ath_cancel_work(sc);
}
/*
@@ -1655,12 +1619,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is enabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
sc->sc_ah->is_monitoring = true;
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is disabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
sc->sc_ah->is_monitoring = false;
}
}
@@ -1680,8 +1642,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set channel: %d MHz type: %d\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
/* update survey stats for the old channel before switching */
@@ -1738,21 +1699,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set power: %d\n", conf->power_level);
+ ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
- ath9k_ps_wakeup(sc);
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- ath9k_ps_restore(sc);
- }
-
- if (disable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- ath_radio_disable(sc, hw);
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return 0;
}
@@ -1785,8 +1739,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
ath9k_ps_restore(sc);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
}
static int ath9k_sta_add(struct ieee80211_hw *hw,
@@ -1798,7 +1752,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { };
- ath_node_attach(sc, sta);
+ ath_node_attach(sc, sta, vif);
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN)
@@ -1843,6 +1797,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ if (!(sc->sc_flags & SC_OP_TXAGGR))
+ return;
+
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
@@ -1880,7 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
qi.tqi_cwmax = params->cw_max;
qi.tqi_burstTime = params->txop;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1912,7 +1869,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
- if (vif->type == NL80211_IFTYPE_ADHOC &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -1928,7 +1886,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
switch (cmd) {
case SET_KEY:
@@ -1980,9 +1938,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
+ ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
ath_beacon_config(sc, vif);
/*
* Request a re-configuration of Beacon related timers
@@ -2013,8 +1970,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
/* Reconfigure bss info */
if (avp->primary_sta_vif && !bss_conf->assoc) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info DISASSOC %d, bssid %pM\n",
+ ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
common->curaid, common->curbssid);
sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
avp->primary_sta_vif = false;
@@ -2056,7 +2012,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ath9k_config_bss(sc, vif);
- ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -2134,7 +2090,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n",
bss_conf->use_short_preamble);
if (bss_conf->use_short_preamble)
sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
@@ -2143,7 +2099,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n",
bss_conf->use_cts_prot);
if (bss_conf->use_cts_prot &&
hw->conf.channel->band != IEEE80211_BAND_5GHZ)
@@ -2309,20 +2265,17 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
cancel_delayed_work_sync(&sc->tx_complete_work);
if (ah->ah_flags & AH_UNPLUGGED) {
- ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ ath_dbg(common, ANY, "Device has been unplugged!\n");
mutex_unlock(&sc->mutex);
return;
}
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (drop)
- timeout = 1;
-
for (j = 0; j < timeout; j++) {
bool npend = false;
@@ -2340,21 +2293,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
}
if (!npend)
- goto out;
+ break;
}
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ if (drop) {
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ drain_txq = ath_drain_all_txq(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- if (!drain_txq)
- ath_reset(sc, false);
+ if (!drain_txq)
+ ath_reset(sc, false);
- ath9k_ps_restore(sc);
- ieee80211_wake_queues(hw);
+ ath9k_ps_restore(sc);
+ ieee80211_wake_queues(hw);
+ }
-out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644
index 00000000000..05c23ea4c63
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ list_for_each_entry(entry, &mci->info, list) {
+ if (entry->conn_handle == info->conn_handle)
+ break;
+ }
+ return entry;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+ (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many SCO profile, failed to add new profile\n");
+ return false;
+ }
+
+ if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+ (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many ACL profile, failed to add new profile\n");
+ return false;
+ }
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (entry)
+ memcpy(entry, info, 10);
+ else {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return false;
+
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&info->list, &mci->info);
+ }
+ return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (!entry) {
+ ath_dbg(common, MCI, "Profile to be deleted not found\n");
+ return;
+ }
+ DEC_PROF(mci, entry);
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+ struct ath_mci_profile_info *info, *tinfo;
+
+ list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+ list_del(&info->list);
+ DEC_PROF(mci, info);
+ kfree(info);
+ }
+ mci->aggr_limit = 0;
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u32 wlan_airtime = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ /*
+ * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+ * When wlan_airtime is less than 4ms, aggregation limit has to be
+ * adjusted half of wlan_airtime to ensure that the aggregation can fit
+ * without collision with BT traffic.
+ */
+ if ((wlan_airtime <= 4) &&
+ (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+ mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *info;
+ u32 num_profile = NUM_PROF(mci);
+
+ if (num_profile == 1) {
+ info = list_first_entry(&mci->info,
+ struct ath_mci_profile_info,
+ list);
+ if (mci->num_sco && info->T == 12) {
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Single SCO, aggregation limit 2 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
+ !info->master) {
+ btcoex->btcoex_period = 60;
+ ath_dbg(common, MCI,
+ "Single slave PAN/FTP, bt period 60 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
+ (info->T > 0 && info->T < 50) &&
+ (info->A > 1 || info->W > 1)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Multiple attempt/timeout single HID "
+ "aggregation limit 2 ms dutycycle 30%%\n");
+ }
+ } else if ((num_profile == 2) && (mci->num_hid == 2)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
+ } else if (num_profile > 3) {
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1.5 ms\n");
+ }
+
+ if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+ if (IS_CHAN_HT(sc->sc_ah->curchan))
+ ath_mci_adjust_aggr_limit(btcoex);
+ else
+ btcoex->btcoex_period >>= 1;
+ }
+
+ ath9k_hw_btcoex_disable(sc->sc_ah);
+ ath9k_btcoex_timer_pause(sc);
+
+ if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+ return;
+
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+ if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+ btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+ btcoex->btcoex_period *= 1000;
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ ath9k_hw_btcoex_enable(sc->sc_ah);
+ ath9k_btcoex_timer_resume(sc);
+}
+
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 payload[4] = {0, 0, 0, 0};
+
+ switch (opcode) {
+ case MCI_GPM_BT_CAL_REQ:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ } else
+ ath_dbg(common, MCI, "MCI State mismatches: %d\n",
+ ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+ break;
+
+ case MCI_GPM_BT_CAL_DONE:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+ ath_dbg(common, MCI, "MCI error illegal!\n");
+ else
+ ath_dbg(common, MCI, "MCI BT not in CAL state\n");
+
+ break;
+
+ case MCI_GPM_BT_CAL_GRANT:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n");
+
+ /* Send WLAN_CAL_DONE for now */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+ ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+ 16, false, true);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n");
+ break;
+ }
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+
+ if (info->start) {
+ if (!ath_mci_add_profile(common, mci, info))
+ return;
+ } else
+ ath_mci_del_profile(common, mci, info);
+
+ btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+ mci->aggr_limit = mci->num_sco ? 6 : 0;
+ if (NUM_PROF(mci)) {
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+ } else {
+ btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+ ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ }
+
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info info;
+ int i = 0, old_num_mgmt = mci->num_mgmt;
+
+ /* Link status type are not handled */
+ if (status->is_link) {
+ ath_dbg(common, MCI, "Skip link type status update\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(struct ath_mci_profile_info));
+
+ info.conn_handle = status->conn_handle;
+ if (ath_mci_find_profile(mci, &info)) {
+ ath_dbg(common, MCI,
+ "Skip non link state update for existing profile %d\n",
+ status->conn_handle);
+ return;
+ }
+ if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
+ ath_dbg(common, MCI, "Ignore too many non-link update\n");
+ return;
+ }
+ if (status->is_critical)
+ __set_bit(status->conn_handle, mci->status);
+ else
+ __clear_bit(status->conn_handle, mci->status);
+
+ mci->num_mgmt = 0;
+ do {
+ if (test_bit(i, mci->status))
+ mci->num_mgmt++;
+ } while (++i < ATH_MCI_MAX_PROFILE);
+
+ if (old_num_mgmt != mci->num_mgmt)
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_profile_info profile_info;
+ struct ath_mci_profile_status profile_status;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 version;
+ u8 major;
+ u8 minor;
+ u32 seq_num;
+
+ switch (opcode) {
+
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+ break;
+
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+ minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ major, minor);
+ version = (major << 8) + minor;
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_COEX_VERSION, &version);
+ break;
+
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02x\n",
+ *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+ ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ break;
+
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n");
+ memcpy(&profile_info,
+ (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+ || (profile_info.type >=
+ MCI_GPM_COEX_PROFILE_MAX)) {
+
+ ath_dbg(common, MCI,
+ "illegal profile type = %d, state = %d\n",
+ profile_info.type,
+ profile_info.start);
+ break;
+ }
+
+ ath_mci_process_profile(sc, &profile_info);
+ break;
+
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ profile_status.is_link = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_TYPE);
+ profile_status.conn_handle = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_LINKID);
+ profile_status.is_critical = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_STATE);
+
+ seq_num = *((u32 *)(rx_payload + 12));
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ profile_status.is_link, profile_status.conn_handle,
+ profile_status.is_critical, seq_num);
+
+ ath_mci_process_status(sc, &profile_status);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n",
+ opcode);
+ break;
+ }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ int error = 0;
+
+ buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+ &buf->bf_paddr, GFP_KERNEL);
+
+ if (buf->bf_addr == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ memset(buf, 0, sizeof(*buf));
+ return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ if (buf->bf_addr) {
+ dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+ buf->bf_paddr);
+ memset(buf, 0, sizeof(*buf));
+ }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ int error = 0;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+ if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+ ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+ memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+ mci->sched_buf.bf_len);
+
+ mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+ mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+ /* initialize the buffer */
+ memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+ ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+fail:
+ return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_coex *mci = &sc->mci_coex;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /*
+ * both schedule and gpm buffers will be released
+ */
+ ath_mci_buf_free(sc, &mci->sched_buf);
+ ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 mci_int, mci_int_rxmsg;
+ u32 offset, subtype, opcode;
+ u32 *pgpm;
+ u32 more_data = MCI_GPM_MORE;
+ bool skip_gpm = false;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+ ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n");
+
+ ath_dbg(common, MCI,
+ "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+ mci_int, mci_int_rxmsg);
+ return;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+ u32 payload[4] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffff00};
+
+ /*
+ * The following REMOTE_RESET and SYS_WAKING used to sent
+ * only when BT wake up. Now they are always sent, as a
+ * recovery method to reset BT MCI's RX alignment.
+ */
+ ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n");
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+ payload, 16, true, false);
+ ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n");
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+ NULL, 0, true, false);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+ /*
+ * always do this for recovery and 2G/5G toggling and LNA_TRANS
+ */
+ ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+
+ /* Processing SYS_WAKING/SYS_SLEEPING */
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_SLEEP)
+ ath_dbg(common, MCI,
+ "MCI BT stays in sleep mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_AWAKE)
+ ath_dbg(common, MCI,
+ "MCI BT stays in AWAKE mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI SetBT state to SLEEP\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+ NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n");
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+ ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n");
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ skip_gpm = true;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ NULL);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+ while (more_data == MCI_GPM_MORE) {
+
+ pgpm = mci->gpm_buf.bf_addr;
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ pgpm += (offset >> 2);
+
+ /*
+ * The first dword is timer.
+ * The real data starts from 2nd dword.
+ */
+
+ subtype = MCI_GPM_TYPE(pgpm);
+ opcode = MCI_GPM_OPCODE(pgpm);
+
+ if (!skip_gpm) {
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype))
+ ath_mci_cal_msg(sc, subtype,
+ (u8 *) pgpm);
+ else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode,
+ (u8 *) pgpm);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ MCI_GPM_RECYCLE(pgpm);
+ }
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+ ath_dbg(common, MCI, "MCI LNA_INFO\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+ int value_dbm = ar9003_mci_state(ah,
+ MCI_STATE_CONT_RSSI_POWER, NULL);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+ if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ else
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+ ath_dbg(common, MCI, "MCI CONT_NACK\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+ ath_dbg(common, MCI, "MCI CONT_RST\n");
+ }
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+ mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+ if (mci_int_rxmsg & 0xfffffffe)
+ ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n",
+ mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644
index 00000000000..29e3e51d078
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY 16
+#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD 40
+#define ATH_MCI_BDR_DUTY_CYCLE 20
+#define ATH_MCI_MAX_DUTY_CYCLE 90
+
+#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE 7
+#define ATH_MCI_MAX_SCO_PROFILE 1
+#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
+ ATH_MCI_MAX_SCO_PROFILE)
+
+#define INC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp++; \
+ if (!_info->edr) \
+ _mci->num_bdr++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp--; \
+ if (!_info->edr) \
+ _mci->num_bdr--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \
+ _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+ u8 type;
+ u8 conn_handle;
+ bool start;
+ bool master;
+ bool edr;
+ u8 voice_type;
+ u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */
+ u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */
+ u8 A; /* HID: Sniff attempt, in slots */
+ struct list_head list;
+};
+
+struct ath_mci_profile_status {
+ bool is_critical;
+ bool is_link;
+ u8 conn_handle;
+};
+
+struct ath_mci_profile {
+ struct list_head info;
+ DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+ u16 aggr_limit;
+ u8 num_mgmt;
+ u8 num_sco;
+ u8 num_a2dp;
+ u8 num_hid;
+ u8 num_pan;
+ u8 num_other_acl;
+ u8 num_bdr;
+};
+
+
+struct ath_mci_buf {
+ void *bf_addr; /* virtual addr of desc */
+ dma_addr_t bf_paddr; /* physical addr of buffer */
+ u32 bf_len; /* len of data */
+};
+
+struct ath_mci_coex {
+ atomic_t mci_cal_flag;
+ struct ath_mci_buf sched_buf;
+ struct ath_mci_buf gpm_buf;
+ u32 bt_cal_start;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 2dcdf63cb39..77dc327def8 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -121,7 +121,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
if (!parent)
return;
- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
/* Bluetooth coexistance requires disabling ASPM. */
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
@@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
*/
+ ath9k_hw_disable(sc->sc_ah);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
@@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ath9k_ps_wakeup(sc);
- /* Enable LED */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
- /*
- * Reset key cache to sane defaults (all entries cleared) instead of
- * semi-random values after suspend/resume.
- */
- ath9k_cmn_init_crypto(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 888abc2be3a..b3c3798fe51 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1199,7 +1199,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
return &ar5416_11na_ratetable;
return &ar5416_11a_ratetable;
default:
- ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
+ ath_dbg(common, CONFIG, "Invalid band\n");
return NULL;
}
}
@@ -1271,11 +1271,12 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+ ath_rc_priv->rate_max_phy = (k > 4) ?
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
- ath_dbg(common, ATH_DBG_CONFIG,
- "RC Initialized with capabilities: 0x%x\n",
+ ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
ath_rc_priv->ht_cap);
}
@@ -1472,7 +1473,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
oper_cw40, oper_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
"Operating HT Bandwidth changed to: %d\n",
sc->hw->conf.channel_type);
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862cdae6..0e666fbe084 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -172,7 +172,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
u32 nbuf = 0;
if (list_empty(&sc->rx.rxbuf)) {
- ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ ath_dbg(common, QUEUE, "No free rx buf available\n");
return;
}
@@ -337,7 +337,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, common->rx_bufsize);
/* Initialize rx descriptors */
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
return rfilt;
-#undef RX_FILTER_PRESERVE
}
int ath_startrecv(struct ath_softc *sc)
@@ -592,7 +591,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
ath_set_beacon(sc);
}
@@ -605,7 +604,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* a backup trigger for returning into NETWORK SLEEP state,
* so we are waiting for it as well.
*/
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
@@ -618,8 +617,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* been delivered.
*/
sc->ps_flags &= ~PS_WAIT_FOR_CAB;
- ath_dbg(common, ATH_DBG_PS,
- "PS wait for CAB frames timed out\n");
+ ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
}
}
@@ -644,13 +642,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
* point.
*/
sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"All PS CAB frames received, back to sleep\n");
} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received PS-Poll data (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -933,7 +931,7 @@ static int ath9k_process_rate(struct ath_common *common,
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
@@ -1824,6 +1822,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
!compare_ether_addr(hdr->addr3, common->curbssid))
rs.is_mybeacon = true;
else
@@ -1838,11 +1837,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
- retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
- rxs, &decrypt_error);
- if (retval)
- goto requeue_drop_frag;
-
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1846,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+ rxs, &decrypt_error);
+ if (retval)
+ goto requeue_drop_frag;
+
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1922,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
- /*
- * change the default rx antenna if rx diversity chooses the
- * other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+ /*
+ * change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs.rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs.rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
}
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fcb7e9e839..6e2f18861f5 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@ enum {
#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
+#define AR_INTR_ASYNC_MASK_MCI 0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S 7
#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
+#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
+ AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S 7
+
#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
#define AR_RTC_INTR_MASK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_KEEP_AWAKE 0x7034
+
/* RTC_DERIVED_* - only for AR9100 */
#define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
#define AR_DIAG_FRAME_NV0 0x00020000
#define AR_DIAG_OBS_PT_SEL1 0x000C0000
#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S 27
#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@@ -1752,19 +1766,10 @@ enum {
#define AR_BT_COEX_WL_WEIGHTS0 0x8174
#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
-#define AR_BT_COEX_BT_WEIGHTS0 0x83ac
-#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
-#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
-#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
-
-#define AR9300_BT_WGHT 0xcccc4444
-#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
-#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
-#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
-#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
-#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
-#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
+#define AR9300_BT_WGHT 0xcccc4444
#define AR_BT_COEX_MODE2 0x817c
#define AR_BT_BCN_MISS_THRESH 0x000000ff
@@ -1938,37 +1943,277 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+ AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_RRIORITY 0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 35422fc1f2c..65c8894c5f8 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -187,7 +187,7 @@ void ath9k_fatal_work(struct work_struct *work)
fatal_work);
struct ath_common *common = ath9k_hw_common(priv->ah);
- ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
+ ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
ath9k_htc_reset(priv);
}
@@ -330,8 +330,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
if (!time_left) {
- ath_dbg(common, ATH_DBG_WMI,
- "Timeout waiting for WMI command: %s\n",
+ ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
@@ -342,8 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
return 0;
out:
- ath_dbg(common, ATH_DBG_WMI,
- "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 03b0a651a59..3182408ffe3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar);
+ struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -104,6 +104,32 @@ static int ath_max_4ms_framelen[4][32] = {
/* Aggregation logic */
/*********************/
+static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+ __acquires(&txq->axq_lock)
+{
+ spin_lock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&q);
+ skb_queue_splice_init(&txq->complete_q, &q);
+ spin_unlock_bh(&txq->axq_lock);
+
+ while ((skb = __skb_dequeue(&q)))
+ ieee80211_tx_status(sc->hw, skb);
+}
+
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
@@ -130,7 +156,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
WARN_ON(!tid->paused);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
tid->paused = false;
if (skb_queue_empty(&tid->buf_q))
@@ -139,7 +165,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
unlock:
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -150,6 +176,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
}
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+ ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+ seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +190,36 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
+ bool sendbar = false;
INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts));
- spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
- spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ sendbar = true;
} else {
ath_tx_send_normal(sc, txq, NULL, skb);
}
- spin_lock_bh(&txq->axq_lock);
}
- spin_unlock_bh(&txq->axq_lock);
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+ }
+
+ if (sendbar) {
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, tid->seq_start);
+ ath_txq_lock(sc, txq);
+ }
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +235,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ if (tid->bar_index >= 0)
+ tid->bar_index--;
}
}
@@ -238,9 +280,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
- spin_lock(&txq->axq_lock);
continue;
}
@@ -249,24 +289,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head;
+ tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+ struct sk_buff *skb, int count)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr;
+ int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries);
- if (fi->retries++ > 0)
+ fi->retries += count;
+
+ if (prev > 0)
return;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +407,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
- u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
@@ -374,6 +416,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int nframes;
u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ int i, retries;
+ int bar_index = -1;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +426,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memcpy(rates, tx_info->control.rates, sizeof(rates));
+ retries = ts->ts_longretry + 1;
+ for (i = 0; i < ts->ts_rateindex; i++)
+ retries += rates[i].count;
+
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +443,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
bf = bf_next;
}
@@ -406,6 +453,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
an = (struct ath_node *)sta->drv_priv;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno);
+ seq_first = tid->seq_start;
/*
* The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +503,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
+ } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ /*
+ * cleanup in progress, just fail
+ * the un-acked sub-frames
+ */
+ txfail = 1;
+ } else if (flush) {
+ txpending = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (txok || !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+ retries);
+
+ txpending = 1;
} else {
- if ((tid->state & AGGR_CLEANUP) || !retry) {
- /*
- * cleanup in progress, just fail
- * the un-acked sub-frames
- */
- txfail = 1;
- } else if (flush) {
- txpending = 1;
- } else if (fi->retries < ATH_MAX_SW_RETRIES) {
- if (txok || !an->sleeping)
- ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-
- txpending = 1;
- } else {
- txfail = 1;
- sendbar = 1;
- txfail_cnt++;
- }
+ txfail = 1;
+ txfail_cnt++;
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
}
/*
@@ -490,9 +538,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +547,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- !txfail, sendbar);
+ !txfail);
} else {
/* retry the un-acked ones */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the
- * frame with failed status if we
- * run out of tx buf.
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head,
- ts, 0,
- !flush);
- break;
- }
-
- fi->bf = tbf;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ ath_tx_update_baw(sc, tid, seqno);
+
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head, ts, 0);
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ break;
}
+
+ fi->bf = tbf;
}
/*
@@ -545,7 +588,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -553,18 +595,22 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true;
}
- spin_unlock_bh(&txq->axq_lock);
}
- if (tid->state & AGGR_CLEANUP) {
- ath_tx_flush_tid(sc, tid);
+ if (bar_index >= 0) {
+ u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
+ if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+ tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+ ath_txq_lock(sc, txq);
}
+ if (tid->state & AGGR_CLEANUP)
+ ath_tx_flush_tid(sc, tid);
+
rcu_read_unlock();
if (needreset) {
@@ -601,6 +647,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
+ struct ath_mci_profile *mci = &sc->btcoex.mci;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, legacy = 0;
int i;
@@ -617,24 +664,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
for (i = 0; i < 4; i++) {
- if (rates[i].count) {
- int modeidx;
- if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
- legacy = 1;
- break;
- }
-
- if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- modeidx = MCS_HT40;
- else
- modeidx = MCS_HT20;
+ int modeidx;
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx++;
+ if (!rates[i].count)
+ continue;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
- max_4ms_framelen = min(max_4ms_framelen, frmlen);
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+ legacy = 1;
+ break;
}
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ modeidx = MCS_HT40;
+ else
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
+
+ frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
/*
@@ -645,7 +694,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
return 0;
- if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+ aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+ else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
aggr_limit = min((max_4ms_framelen * 3) / 8,
(u32)ATH_AMPDU_LIMIT_MAX);
else
@@ -768,8 +819,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
- if (!bf_first)
- bf_first = bf;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -777,6 +826,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+ struct ath_tx_status ts = {};
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+ __skb_unlink(skb, &tid->buf_q);
+ ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ continue;
+ }
+
+ if (!bf_first)
+ bf_first = bf;
+
if (!rl) {
aggr_limit = ath_lookup_rate(sc, bf, tid);
rl = 1;
@@ -1119,6 +1183,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
+ txtid->bar_index = -1;
memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
txtid->baw_head = txtid->baw_tail = 0;
@@ -1140,7 +1205,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return;
}
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
txtid->paused = true;
/*
@@ -1153,9 +1218,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP;
else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid);
+ ath_txq_unlock_complete(sc, txq);
}
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1176,7 +1241,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
buffered = !skb_queue_empty(&tid->buf_q);
@@ -1188,7 +1253,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
list_del(&ac->list);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
@@ -1207,7 +1272,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
@@ -1215,7 +1280,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_schedule(sc, txq);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -1315,6 +1380,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_qnum = axq_qnum;
txq->mac80211_qnum = -1;
txq->axq_link = NULL;
+ __skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
@@ -1397,8 +1463,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1425,13 +1489,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock_bh(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
}
@@ -1443,7 +1505,8 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
@@ -1464,7 +1527,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1558,11 +1621,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break;
}
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
- }
+ if (!list_empty(&ac->tid_q) && !ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
}
if (ac == last_ac ||
@@ -1600,8 +1661,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
bf_last = list_entry(head->prev, struct ath_buf, list);
- ath_dbg(common, ATH_DBG_QUEUE,
- "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+ ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
+ txq->axq_qnum, txq->axq_depth);
if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
@@ -1612,8 +1673,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (txq->axq_link) {
ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT,
- "link[%u] (%p)=%llx (%p)\n",
+ ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
txq->axq_qnum, txq->axq_link,
ito64(bf->bf_daddr), bf->bf_desc);
} else if (!edma)
@@ -1625,7 +1685,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (puttxbuf) {
TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
@@ -1705,10 +1765,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
- /* update starting sequence number for subsequent ADDBA request */
- if (tid)
- INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1771,7 +1827,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
bf = ath_tx_get_buffer(sc);
if (!bf) {
- ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
+ ath_dbg(common, XMIT, "TX buffers are full\n");
goto error;
}
@@ -1816,7 +1872,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1835,7 +1890,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
- goto out;
+ return;
bf->bf_state.bfs_paprd = txctl->paprd;
@@ -1844,9 +1899,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
-
-out:
- spin_unlock_bh(&txctl->txq->axq_lock);
}
/* Upon failure caller should free skb */
@@ -1907,15 +1959,18 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
q = skb_get_queue_mapping(skb);
- spin_lock_bh(&txq->axq_lock);
+
+ ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
- txq->stopped = 1;
+ txq->stopped = true;
}
- spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl);
+
+ ath_txq_unlock(sc, txq);
+
return 0;
}
@@ -1926,16 +1981,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq)
{
- struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
- ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
-
- if (tx_flags & ATH_TX_BAR)
- tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -1952,9 +2003,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received TX status (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -1964,32 +2015,27 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
ieee80211_wake_queue(sc->hw, q);
- txq->stopped = 0;
+ txq->stopped = false;
}
- spin_unlock_bh(&txq->axq_lock);
}
- ieee80211_tx_status(hw, skb);
+ __skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar)
+ struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags;
int tx_flags = 0;
- if (sendbar)
- tx_flags = ATH_TX_BAR;
-
if (!txok)
tx_flags |= ATH_TX_ERROR;
@@ -2081,8 +2127,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
int txok;
@@ -2092,16 +2136,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
-
if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
- spin_lock_bh(&txq->axq_lock);
-
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
}
@@ -2116,11 +2156,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int status;
- ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+ ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
for (;;) {
if (work_pending(&sc->hw_reset_work))
break;
@@ -2179,7 +2219,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2196,21 +2236,21 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
txq = &sc->tx.txq[i];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (txq->axq_depth) {
if (txq->axq_tx_inprogress) {
needreset = true;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
break;
} else {
txq->axq_tx_inprogress = true;
}
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
@@ -2253,8 +2293,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (status == -EINPROGRESS)
break;
if (status == -EIO) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Error processing tx status\n");
+ ath_dbg(common, XMIT, "Error processing tx status\n");
break;
}
@@ -2264,10 +2303,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txq = &sc->tx.txq[ts.qid];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
return;
}
@@ -2293,7 +2332,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -2431,7 +2470,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (tid->sched) {
list_del(&tid->list);
@@ -2447,6 +2486,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index de57f90e1d5..3c164226687 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -56,7 +56,7 @@ static int carl9170_debugfs_open(struct inode *inode, struct file *file)
struct carl9170_debugfs_fops {
unsigned int read_bufsize;
- mode_t attr;
+ umode_t attr;
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cba9d0435dc..3de61adacd3 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -146,13 +146,15 @@ static bool valid_cpu_addr(const u32 address)
return false;
}
-static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
+ size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
- const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
- const struct carl9170fw_txsq_desc *txsq_desc;
- u16 if_comb_types;
+ const struct carl9170fw_chk_desc *chk_desc;
+ unsigned long fin, diff;
+ unsigned int dsc_len;
+ u32 crc32;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -170,36 +172,68 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
- if (chk_desc) {
- unsigned long fin, diff;
- unsigned int dsc_len;
- u32 crc32;
+ if (!chk_desc) {
+ dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ return 0;
+ }
- dsc_len = min_t(unsigned int, len,
+ dsc_len = min_t(unsigned int, len,
(unsigned long)chk_desc - (unsigned long)otus_desc);
- fin = (unsigned long) last_desc + sizeof(*last_desc);
- diff = fin - (unsigned long) otus_desc;
+ fin = (unsigned long) last_desc + sizeof(*last_desc);
+ diff = fin - (unsigned long) otus_desc;
- if (diff < len)
- len -= diff;
+ if (diff < len)
+ len -= diff;
- if (len < 256)
- return -EIO;
+ if (len < 256)
+ return -EIO;
- crc32 = crc32_le(~0, data, len);
- if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
- dev_err(&ar->udev->dev, "fw checksum test failed.\n");
- return -ENOEXEC;
- }
+ crc32 = crc32_le(~0, data, len);
+ if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
+ dev_err(&ar->udev->dev, "fw checksum test failed.\n");
+ return -ENOEXEC;
+ }
+
+ crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
+ if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
+ dev_err(&ar->udev->dev, "descriptor check failed.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
- crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
- if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
- dev_err(&ar->udev->dev, "descriptor check failed.\n");
+static int carl9170_fw_tx_sequence(struct ar9170 *ar)
+{
+ const struct carl9170fw_txsq_desc *txsq_desc;
+
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
+ CARL9170FW_TXSQ_DESC_CUR_VER);
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
return -EINVAL;
- }
} else {
- dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ ar->fw.tx_seq_table = 0;
+ }
+
+ return 0;
+}
+
+static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+{
+ const struct carl9170fw_otus_desc *otus_desc;
+ int err;
+ u16 if_comb_types;
+
+ err = carl9170_fw_checksum(ar, data, len);
+ if (err)
+ return err;
+
+ otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+ sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+ if (!otus_desc) {
+ return -ENODATA;
}
#define SUPP(feat) \
@@ -321,19 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= if_comb_types;
- txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
- sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
-
- if (txsq_desc) {
- ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
- if (!valid_cpu_addr(ar->fw.tx_seq_table))
- return -EINVAL;
- } else {
- ar->fw.tx_seq_table = 0;
- }
-
#undef SUPPORTED
- return 0;
+ return carl9170_fw_tx_sequence(ar);
}
static struct carl9170fw_desc_head *
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f06e0695d41..db774212161 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -48,7 +48,7 @@
#include "carl9170.h"
#include "cmd.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
- rcu_assign_pointer(ar->beacon_iter, NULL);
+ RCU_INIT_POINTER(ar->beacon_iter, NULL);
carl9170_led_set_state(ar, 0);
@@ -678,7 +678,7 @@ unlock:
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
- rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
list_del_rcu(&vif_priv->list);
mutex_unlock(&ar->mutex);
synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
WARN_ON(vif_priv->enable_beacon);
vif_priv->enable_beacon = false;
list_del_rcu(&vif_priv->list);
- rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
if (vif == main_vif) {
rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
}
for (i = 0; i < CARL9170_NUM_TID; i++)
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
if (!tid_info)
continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->tx_ampdu_list_lock);
}
- rcu_assign_pointer(sta_info->agg[tid], NULL);
+ RCU_INIT_POINTER(sta_info->agg[tid], NULL);
rcu_read_unlock();
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1605c..d19a9ee9d05 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
* feedback either [CTL_REQ_TX_STATUS not set]
*/
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
return;
} else {
/*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
err_free:
ar->tx_dropped++;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 333b69ef2ae..89821e4835c 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1161,15 +1161,4 @@ static struct usb_driver carl9170_driver = {
#endif /* CONFIG_PM */
};
-static int __init carl9170_usb_init(void)
-{
- return usb_register(&carl9170_driver);
-}
-
-static void __exit carl9170_usb_exit(void)
-{
- usb_deregister(&carl9170_driver);
-}
-
-module_init(carl9170_usb_init);
-module_exit(carl9170_usb_exit);
+module_usb_driver(carl9170_driver);
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 4cf7c5eb481..0e81904956c 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -143,7 +143,7 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
break;
case ATH_CIPHER_AES_CCM:
if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"AES-CCM not supported by this mac rev\n");
return false;
}
@@ -152,15 +152,15 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
case ATH_CIPHER_TKIP:
keyType = AR_KEYTABLE_TYPE_TKIP;
if (entry + 64 >= common->keymax) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH_CIPHER_WEP:
if (k->kv_len < WLAN_KEY_LEN_WEP40) {
- ath_dbg(common, ATH_DBG_ANY,
- "WEP key length %u too small\n", k->kv_len);
+ ath_dbg(common, ANY, "WEP key length %u too small\n",
+ k->kv_len);
return false;
}
if (k->kv_len <= WLAN_KEY_LEN_WEP40)
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 65ecb5bab25..10dea37431b 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -21,6 +21,8 @@
#include "regd.h"
#include "regd_common.h"
+static int __ath_regd_init(struct ath_regulatory *reg);
+
/*
* This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate
@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
}
}
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (!memcmp(allCountries[i].isoName, alpha2, 2))
+ return allCountries[i].countryCode;
+ }
+
+ return -1;
+}
+
int ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ u16 country_code;
+
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
return 0;
switch (request->initiator) {
- case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
+ /*
+ * If common->reg_world_copy is world roaming it means we *were*
+ * world roaming... so we now have to restore that data.
+ */
+ if (!ath_is_world_regd(&common->reg_world_copy))
+ break;
+
+ memcpy(reg, &common->reg_world_copy,
+ sizeof(struct ath_regulatory));
+ break;
+ case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_USER:
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (ath_is_world_regd(reg))
- ath_reg_apply_world_flags(wiphy, request->initiator,
- reg);
+ if (!ath_is_world_regd(reg))
+ break;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ break;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+ reg->current_rd);
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
break;
}
@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
reg->current_rd = 0x64;
}
-int
-ath_regd_init(struct ath_regulatory *reg,
- struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->regDmnEnum);
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ int r;
+
+ r = __ath_regd_init(reg);
+ if (r)
+ return r;
+
+ if (ath_is_world_regd(reg))
+ memcpy(&common->reg_world_copy, reg,
+ sizeof(struct ath_regulatory));
+
ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
return 0;
}
EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 37110dfd2c9..16e8f805815 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -191,6 +191,9 @@
#define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */
#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
* with bluetooth */
+#define B43_BFH_NOCBUCK 0x0080
+#define B43_BFH_PALDO 0x0200
+#define B43_BFH_EXTLNA_5GHZ 0x1000 /* has an external LNA (5GHz mode) */
/* SPROM boardflags2_lo values */
#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
@@ -204,6 +207,14 @@
#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+#define B43_BFL2_SINGLEANT_CCK 0x1000
+#define B43_BFL2_2G_SPUR_WAR 0x2000
+
+/* SPROM boardflags2_hi values */
+#define B43_BFH2_GPLL_WAR2 0x0001
+#define B43_BFH2_IPALVLSHIFT_3P3 0x0002
+#define B43_BFH2_INTERNDET_TXIQCAL 0x0004
+#define B43_BFH2_XTALBUFOUTEN 0x0008
/* GPIO register offset, in both ChipCommon and PCI core. */
#define B43_GPIO_CONTROL 0x6c
@@ -667,6 +678,7 @@ struct b43_key {
};
/* SHM offsets to the QOS data structures for the 4 different queues. */
+#define B43_QOS_QUEUE_NUM 4
#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \
(B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0)
@@ -904,7 +916,7 @@ struct b43_wl {
struct work_struct beacon_update_trigger;
/* The current QOS parameters for the 4 queues. */
- struct b43_qos_params qos_params[4];
+ struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
/* Work for adjustment of the transmission power.
* This is scheduled when we determine that the actual TX output
@@ -913,8 +925,12 @@ struct b43_wl {
/* Packet transmit work */
struct work_struct tx_work;
+
/* Queue of packets to be transmitted. */
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43_QOS_QUEUE_NUM];
/* The device LEDs. */
struct b43_leds leds;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 5e45604f0f5..b5f1b91002b 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
else
ring->ops = &dma32_ops;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1465,8 +1465,10 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
@@ -1584,12 +1586,21 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
- ring->stopped = 0;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
}
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43_dmaring *ring, int *slot)
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index a38c1c6446a..d79ab2a227e 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
- turn_on = 0;
+ turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- *activelow = 0;
+ *activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
- *activelow = 1;
+ *activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
- led->hw_state = 1;
+ led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
}
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 4c82d582a52..916123a3d74 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
const struct b43_rfatt *rfatt;
const struct b43_bbatt *bbatt;
u64 power_vector;
- bool table_changed = 0;
+ bool table_changed = false;
BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
| (val & 0x00FF);
}
- table_changed = 1;
+ table_changed = true;
}
if (table_changed) {
/* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
unsigned long now;
unsigned long expire;
struct b43_lo_calib *cal, *tmp;
- bool current_item_expired = 0;
+ bool current_item_expired = false;
bool hwpctl;
if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
B43_WARN_ON(current_item_expired);
- current_item_expired = 1;
+ current_item_expired = true;
}
if (b43_debug(dev, B43_DBG_LO)) {
b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5634d9a9965..1c6f19393ef 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
if (ps_flags & B43_PS_ENABLED) {
- hwps = 1;
+ hwps = true;
} else if (ps_flags & B43_PS_DISABLED) {
- hwps = 0;
+ hwps = false;
} else {
//TODO: If powersave is not off and FIXME is not set and we are not in adhoc
// and thus is not an AP and we are associated, set bit 25
}
if (ps_flags & B43_PS_AWAKE) {
- awake = 1;
+ awake = true;
} else if (ps_flags & B43_PS_ASLEEP) {
- awake = 0;
+ awake = false;
} else {
//TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
// or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
/* FIXME: For now we force awake-on and hwps-off */
- hwps = 0;
- awake = 1;
+ hwps = false;
+ awake = true;
macctl = b43_read32(dev, B43_MMIO_MACCTL);
if (hwps)
@@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
return;
if (dev->noisecalc.calculation_running)
return;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
average -= 48;
dev->stats.link_noise = average;
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
b43_write32(dev, B43_MMIO_MACCMD,
b43_read32(dev, B43_MMIO_MACCMD)
| B43_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
if (wl->beacon0_uploaded)
return;
b43_write_beacon_template(dev, 0x68, 0x18);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43_upload_beacon0(dev);
b43_upload_beacon1(dev);
cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
/* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = 1;
+ dev->use_pio = true;
b43_controller_restart(dev, "DMA error");
return;
}
@@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
filename = NULL;
else
goto err_no_pcm;
- fw->pcm_request_failed = 0;
+ fw->pcm_request_failed = false;
err = b43_do_request_fw(ctx, filename, &fw->pcm);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
- fw->pcm_request_failed = 1;
+ fw->pcm_request_failed = true;
} else if (err)
goto err_load;
@@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
dev->qos_enabled = !!modparam_qos;
/* Default to firmware/hardware crypto acceleration. */
- dev->hwcrypto_enabled = 1;
+ dev->hwcrypto_enabled = true;
if (dev->fw.opensource) {
u16 fwcapa;
@@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
/* Disable hardware crypto and fall back to software crypto. */
- dev->hwcrypto_enabled = 0;
+ dev->hwcrypto_enabled = false;
}
if (!(fwcapa & B43_FWCAPA_QOS)) {
b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
* ieee80211_unregister to make sure the networking core can
* properly free possible resources. */
dev->wl->hw->queues = 1;
- dev->qos_enabled = 0;
+ dev->qos_enabled = false;
}
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl)
wl->rng.name = wl->rng_name;
wl->rng.data_read = b43_rng_read;
wl->rng.priv = (unsigned long)wl;
- wl->rng_initialized = 1;
+ wl->rng_initialized = true;
err = hwrng_register(&wl->rng);
if (err) {
- wl->rng_initialized = 0;
+ wl->rng_initialized = false;
b43err(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
@@ -3378,6 +3378,7 @@ static void b43_tx_work(struct work_struct *work)
struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
struct b43_wldev *dev;
struct sk_buff *skb;
+ int queue_num;
int err = 0;
mutex_lock(&wl->mutex);
@@ -3387,15 +3388,26 @@ static void b43_tx_work(struct work_struct *work)
return;
}
- while (skb_queue_len(&wl->tx_queue)) {
- skb = skb_dequeue(&wl->tx_queue);
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43_using_pio_transfers(dev))
+ err = b43_pio_tx(dev, skb);
+ else
+ err = b43_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
- if (b43_using_pio_transfers(dev))
- err = b43_pio_tx(dev, skb);
- else
- err = b43_dma_tx(dev, skb);
- if (unlikely(err))
- dev_kfree_skb(skb); /* Drop it */
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
}
#if B43_DEBUG
@@ -3416,8 +3428,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue, skb);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ } else {
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+ }
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -3702,13 +3718,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
case IEEE80211_BAND_5GHZ:
if (d->phy.supports_5ghz) {
up_dev = d;
- gmode = 0;
+ gmode = false;
}
break;
case IEEE80211_BAND_2GHZ:
if (d->phy.supports_2ghz) {
up_dev = d;
- gmode = 1;
+ gmode = true;
}
break;
default:
@@ -4147,6 +4163,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
struct b43_wl *wl;
struct b43_wldev *orig_dev;
u32 mask;
+ int queue_num;
if (!dev)
return NULL;
@@ -4199,9 +4216,11 @@ redo:
mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
B43_WARN_ON(mask != 0xFFFFFFFF && mask);
- /* Drain the TX queue */
- while (skb_queue_len(&wl->tx_queue))
- dev_kfree_skb(skb_dequeue(&wl->tx_queue));
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
b43_mac_suspend(dev);
b43_leds_exit(dev);
@@ -4425,18 +4444,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
#if B43_DEBUG
- phy->phy_locked = 0;
- phy->radio_locked = 0;
+ phy->phy_locked = false;
+ phy->radio_locked = false;
#endif
}
static void setup_struct_wldev_for_init(struct b43_wldev *dev)
{
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4670,16 +4689,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (b43_bus_host_is_pcmcia(dev->dev) ||
b43_bus_host_is_sdio(dev->dev)) {
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else if (dev->use_pio) {
b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
"This should not be needed and will result in lower "
"performance.\n");
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else {
- dev->__using_pio_transfers = 0;
+ dev->__using_pio_transfers = false;
err = b43_dma_init(dev);
}
if (err)
@@ -4733,7 +4752,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4767,7 +4786,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
b43_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4789,12 +4808,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->radiotap_enabled = 0;
+ wl->radiotap_enabled = false;
b43_qos_clear(wl);
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -4840,7 +4859,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
goto out_unlock;
}
b43_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
out_unlock:
mutex_unlock(&wl->mutex);
@@ -5028,7 +5047,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
struct pci_dev *pdev = NULL;
int err;
u32 tmp;
- bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+ bool have_2ghz_phy = false, have_5ghz_phy = false;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
@@ -5071,7 +5090,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@@ -5082,11 +5101,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
(pdev->device != 0x4312 &&
pdev->device != 0x4319 && pdev->device != 0x4324)) {
/* No multiband support. */
- have_2ghz_phy = 0;
- have_5ghz_phy = 0;
+ have_2ghz_phy = false;
+ have_5ghz_phy = false;
switch (dev->phy.type) {
case B43_PHYTYPE_A:
- have_5ghz_phy = 1;
+ have_5ghz_phy = true;
break;
case B43_PHYTYPE_LP: //FIXME not always!
#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5096,7 +5115,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
case B43_PHYTYPE_N:
case B43_PHYTYPE_HT:
case B43_PHYTYPE_LCN:
- have_2ghz_phy = 1;
+ have_2ghz_phy = true;
break;
default:
B43_WARN_ON(1);
@@ -5112,8 +5131,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* FIXME: For now we disable the A-PHY on multi-PHY devices. */
if (dev->phy.type != B43_PHYTYPE_N &&
dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = 1;
- have_5ghz_phy = 0;
+ have_2ghz_phy = true;
+ have_5ghz_phy = false;
}
}
@@ -5245,6 +5264,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
struct ieee80211_hw *hw;
struct b43_wl *wl;
char chip_name[6];
+ int queue_num;
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
if (!hw) {
@@ -5264,7 +5284,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->queues = modparam_qos ? 4 : 1;
+ hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
wl->mac80211_initially_registered_queues = hw->queues;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
@@ -5281,7 +5301,12 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
- skb_queue_head_init(&wl->tx_queue);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
snprintf(chip_name, ARRAY_SIZE(chip_name),
(dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3ea44bb0368..3f8883b14d9 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(dev->phy.radio_locked);
- dev->phy.radio_locked = 1;
+ dev->phy.radio_locked = true;
#endif
macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(!dev->phy.radio_locked);
- dev->phy.radio_locked = 0;
+ dev->phy.radio_locked = false;
#endif
/* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(dev->phy.phy_locked);
- dev->phy.phy_locked = 1;
+ dev->phy.phy_locked = true;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(!dev->phy.phy_locked);
- dev->phy.phy_locked = 0;
+ dev->phy.phy_locked = false;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 8e157bc213f..12f467b8d56 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
if (b43_phy_read(dev, 0x0033) & 0x0800)
break;
- gphy->aci_enable = 1;
+ gphy->aci_enable = true;
phy_stacksave(B43_PHY_RADIO_BITFIELD);
phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
if (!(b43_phy_read(dev, 0x0033) & 0x0800))
break;
- gphy->aci_enable = 0;
+ gphy->aci_enable = false;
phy_stackrestore(B43_PHY_RADIO_BITFIELD);
phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
bbatt.att = 11;
if (phy->radio_rev == 8) {
rfatt.att = 15;
- rfatt.with_padmix = 1;
+ rfatt.with_padmix = true;
} else {
rfatt.att = 9;
- rfatt.with_padmix = 0;
+ rfatt.with_padmix = false;
}
b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
}
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
struct b43_bus_dev *bdev = dev->dev;
struct b43_phy *phy = &dev->phy;
- rf->with_padmix = 0;
+ rf->with_padmix = false;
if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 8:
rf->att = 0xA;
- rf->with_padmix = 1;
+ rf->with_padmix = true;
return;
case 9:
default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
(phy->radio_ver != 0x2050)); /* Not supported anymore */
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
pab1, pab2);
if (!gphy->tssi2dbm)
return -ENOMEM;
- gphy->dyn_tssi_tbl = 1;
+ gphy->dyn_tssi_tbl = true;
} else {
/* pabX values not set in SPROM. */
gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
if (gphy->dyn_tssi_tbl)
kfree(gphy->tssi2dbm);
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
gphy->tssi2dbm = NULL;
kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
if (phy->rev == 1) {
/* Workaround: Temporarly disable gmode through the early init
* phase, as the gmode stuff is not needed for phy rev 1 */
- phy->gmode = 0;
+ phy->gmode = false;
b43_wireless_core_reset(dev, 0);
b43_phy_initg(dev);
- phy->gmode = 1;
+ phy->gmode = true;
b43_wireless_core_reset(dev, 1);
}
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
gphy->radio_off_context.rfover);
b43_phy_write(dev, B43_PHY_RFOVERVAL,
gphy->radio_off_context.rfoverval);
- gphy->radio_off_context.valid = 0;
+ gphy->radio_off_context.valid = false;
}
channel = phy->channel;
b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
gphy->radio_off_context.rfover = rfover;
gphy->radio_off_context.rfoverval = rfoverval;
- gphy->radio_off_context.valid = 1;
+ gphy->radio_off_context.valid = true;
b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
}
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
if ((phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- gphy->aci_wlan_automatic = 0;
+ gphy->aci_wlan_automatic = false;
switch (mode) {
case B43_INTERFMODE_AUTOWLAN:
- gphy->aci_wlan_automatic = 1;
+ gphy->aci_wlan_automatic = true;
if (gphy->aci_enable)
mode = B43_INTERFMODE_MANUALWLAN;
else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
b43_radio_interference_mitigation_disable(dev, currentmode);
if (mode == B43_INTERFMODE_NONE) {
- gphy->aci_enable = 0;
- gphy->aci_hw_rssi = 0;
+ gphy->aci_enable = false;
+ gphy->aci_hw_rssi = false;
} else
b43_radio_interference_mitigation_enable(dev, mode);
gphy->interfmode = mode;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index f93d66b1817..3ae28561f7a 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 1;
+ lpphy->crs_usr_disable = true;
else
- lpphy->crs_sys_disable = 1;
+ lpphy->crs_sys_disable = true;
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
}
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 0;
+ lpphy->crs_usr_disable = false;
else
- lpphy->crs_sys_disable = 0;
+ lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6c33a..bf5a4385535 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -78,19 +78,6 @@ enum b43_nphy_rssi_type {
B43_NPHY_RSSI_TBD,
};
-/* TODO: reorder functions */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
- bool enable);
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length);
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq);
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off);
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core);
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev);
-
static inline bool b43_nphy_ipa(struct b43_wldev *dev)
{
enum ieee80211_band band = b43_current_band(dev->wl);
@@ -98,57 +85,394 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
}
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
-{//TODO
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ if (dev->dev->chip_id == 47162)
+ return txpwrctrl_tx_gain_ipa_rev5;
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
}
-static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
-{//TODO
+/**************************************************
+ * RF (just without b43_nphy_rf_control_intc_override)
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq)
+{
+ static const u16 trigger[] = {
+ [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
+ [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
+ [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
+ [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
+ [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
+ [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
+ };
+ int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
+ b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
+ for (i = 0; i < 200; i++) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
+ goto ok;
+ msleep(1);
+ }
+ b43err(dev->wl, "RF sequence status timeout\n");
+ok:
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
- bool ignore_tssi)
-{//TODO
- return B43_TXPWR_RES_DONE;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << i) & core)) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((1 << i) & core)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
}
-static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry_rev2 *e)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
{
- b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ u8 i, j;
+ u16 reg, tmp, val;
- b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
- b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
- b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
- b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
+}
- b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
+ const u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_phy_force_clock(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_force_clock(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (enable) {
+ static const u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * dev->phy.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * dev->phy.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/**************************************************
+ * Radio 0x2056
+ **************************************************/
+
static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
@@ -228,10 +552,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ u16 offset;
+ u8 i;
+ u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
B43_WARN_ON(dev->phy.rev < 3);
b43_chantab_radio_2056_upload(dev, e);
- /* TODO */
+ b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+ if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ if (dev->dev->chip_id == 0x4716) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+ } else {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+ }
+ }
+ if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+ }
+
+ if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < 2; i++) {
+ offset = i ? B2056_TX1 : B2056_TX0;
+ if (dev->phy.rev >= 5) {
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_IDAC, 0xcc);
+
+ if (dev->dev->chip_id == 0x4716) {
+ bias = 0x40;
+ cbias = 0x45;
+ pag_boost = 0x5;
+ pgag_boost = 0x33;
+ mixg_boost = 0x55;
+ } else {
+ bias = 0x25;
+ cbias = 0x20;
+ pag_boost = 0x4;
+ pgag_boost = 0x03;
+ mixg_boost = 0x65;
+ }
+ padg_boost = 0x77;
+
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ cbias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_BOOST_TUNE,
+ pag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PGAG_BOOST_TUNE,
+ pgag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_BOOST_TUNE,
+ padg_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_MIXG_BOOST_TUNE,
+ mixg_boost);
+ } else {
+ bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ 0x30);
+ }
+ b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+ }
+ } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ /* TODO */
+ }
+
udelay(50);
/* VCO calibration */
b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -242,285 +654,84 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
udelay(300);
}
-static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_phy_n_sfo_cfg *e)
-{
- b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
- b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
- b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
- b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
- b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
- b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
-static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+static void b43_radio_init2056_pre(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u16 bmask, val, tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- nphy->txpwrctrl = enable;
- if (!enable) {
- if (dev->phy.rev >= 3 &&
- (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
- (B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN |
- B43_NPHY_TXPCTL_CMD_PCTLEN))) {
- /* We disable enabled TX pwr ctl, save it's state */
- nphy->tx_pwr_idx[0] = b43_phy_read(dev,
- B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
- nphy->tx_pwr_idx[1] = b43_phy_read(dev,
- B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
- }
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3)
- tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
-
- if (dev->phy.rev >= 3) {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
- } else {
- b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
- nphy->adj_pwr_tbl);
- b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
- nphy->adj_pwr_tbl);
-
- bmask = B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- /* wl does useless check for "enable" param here */
- val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3) {
- bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- if (val)
- val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- }
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
-
- if (band == IEEE80211_BAND_5GHZ) {
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
- ~B43_NPHY_TXPCTL_INIT_PIDXI1,
- 0x64);
- }
-
- if (dev->phy.rev >= 3) {
- if (nphy->tx_pwr_idx[0] != 128 &&
- nphy->tx_pwr_idx[1] != 128) {
- /* Recover TX pwr ctl state */
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT,
- nphy->tx_pwr_idx[0]);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev,
- B43_NPHY_TXPCTL_INIT,
- ~0xff, nphy->tx_pwr_idx[1]);
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
-
- if (b43_nphy_ipa(dev)) {
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
- }
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_CHIP0PU);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
-static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+static void b43_radio_init2056_post(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- u8 txpi[2], bbmult, i;
- u16 tmp, radio_gain, dac_gain;
- u16 freq = dev->phy.channel_freq;
- u32 txgain;
- /* u32 gaintbl; rev3+ */
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (dev->phy.rev >= 3) {
- txpi[0] = 40;
- txpi[1] = 40;
- } else if (sprom->revision < 4) {
- txpi[0] = 72;
- txpi[1] = 72;
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- txpi[0] = sprom->txpid2g[0];
- txpi[1] = sprom->txpid2g[1];
- } else if (freq >= 4900 && freq < 5100) {
- txpi[0] = sprom->txpid5gl[0];
- txpi[1] = sprom->txpid5gl[1];
- } else if (freq >= 5100 && freq < 5500) {
- txpi[0] = sprom->txpid5g[0];
- txpi[1] = sprom->txpid5g[1];
- } else if (freq >= 5500) {
- txpi[0] = sprom->txpid5gh[0];
- txpi[1] = sprom->txpid5gh[1];
- } else {
- txpi[0] = 91;
- txpi[1] = 91;
- }
- }
-
+ b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
+ b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
+ b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
+ msleep(1);
+ b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
+ b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
+ b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
/*
- for (i = 0; i < 2; i++) {
- nphy->txpwrindex[i].index_internal = txpi[i];
- nphy->txpwrindex[i].index_internal_save = txpi[i];
- }
+ if (nphy->init_por)
+ Call Radio 2056 Recalibrate
*/
+}
- for (i = 0; i < 2; i++) {
- if (dev->phy.rev >= 3) {
- /* FIXME: support 5GHz */
- txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFFF;
- } else {
- txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFF;
- }
-
- dac_gain = (txgain >> 8) & 0x3F;
- bbmult = txgain & 0xFF;
-
- if (dev->phy.rev >= 3) {
- if (i == 0)
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- else
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (i == 0)
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
- else
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
-
- b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
-
- tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
- if (i == 0)
- tmp = (tmp & 0x00FF) | (bbmult << 8);
- else
- tmp = (tmp & 0xFF00) | bbmult;
- b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
-
- if (b43_nphy_ipa(dev)) {
- u32 tmp32;
- u16 reg = (i == 0) ?
- B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
- tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
- b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
- b43_phy_set(dev, reg, 0x4);
- }
- }
-
- b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
+{
+ b43_radio_init2056_pre(dev);
+ b2056_upload_inittabs(dev, 0, 0);
+ b43_radio_init2056_post(dev);
}
-static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+/**************************************************
+ * Radio 0x2055
+ **************************************************/
+
+static void b43_chantab_radio_upload(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
{
- struct b43_phy *phy = &dev->phy;
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- const u32 *table = NULL;
-#if 0
- TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
- u32 rfpwr_offset;
- u8 pga_gain;
- int i;
-#endif
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- table = b43_nphy_get_ipa_gain_table(dev);
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- if (phy->rev == 3)
- table = b43_ntab_tx_gain_rev3_5ghz;
- if (phy->rev == 4)
- table = b43_ntab_tx_gain_rev4_5ghz;
- else
- table = b43_ntab_tx_gain_rev5plus_5ghz;
- } else {
- table = b43_ntab_tx_gain_rev3plus_2ghz;
- }
- }
- } else {
- table = b43_ntab_tx_gain_rev0_1_2;
- }
- b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
- b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
-#if 0
- nphy->gmval = (table[0] >> 16) & 0x7000;
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- for (i = 0; i < 128; i++) {
- pga_gain = (table[i] >> 24) & 0xF;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
- else
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
- b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
- rfpwr_offset);
- b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
- rfpwr_offset);
- }
-#endif
- }
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
@@ -622,1089 +833,9 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-static void b43_radio_init2056_pre(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_CHIP0PU);
- /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_CHIP0PU);
-}
-
-static void b43_radio_init2056_post(struct b43_wldev *dev)
-{
- b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
- b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
- b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
- msleep(1);
- b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
- b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
- b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- /*
- if (nphy->init_por)
- Call Radio 2056 Recalibrate
- */
-}
-
-/*
- * Initialize a Broadcom 2056 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
- */
-static void b43_radio_init2056(struct b43_wldev *dev)
-{
- b43_radio_init2056_pre(dev);
- b2056_upload_inittabs(dev, 0, 0);
- b43_radio_init2056_post(dev);
-}
-
-/*
- * Upload the N-PHY tables.
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
- */
-static void b43_nphy_tables_init(struct b43_wldev *dev)
-{
- if (dev->phy.rev < 3)
- b43_nphy_rev0_1_2_tables_init(dev);
- else
- b43_nphy_rev3plus_tables_init(dev);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
-static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- enum ieee80211_band band;
- u16 tmp;
-
- if (!enable) {
- nphy->rfctrl_intc1_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC1);
- nphy->rfctrl_intc2_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC2);
- band = b43_current_band(dev->wl);
- if (dev->phy.rev >= 3) {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x600;
- else
- tmp = 0x480;
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x180;
- else
- tmp = 0x120;
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
- } else {
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
- nphy->rfctrl_intc1_save);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
- nphy->rfctrl_intc2_save);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
-static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
-{
- u16 tmp;
-
- if (dev->phy.rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- tmp = 4;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-
- tmp = 1;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
-{
- u16 bbcfg;
-
- b43_phy_force_clock(dev, 1);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
- udelay(1);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- b43_phy_force_clock(dev, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
-static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
-{
- u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
-
- mimocfg |= B43_NPHY_MIMOCFG_AUTO;
- if (preamble == 1)
- mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
- else
- mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
-
- b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
-static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- bool override = false;
- u16 chain = 0x33;
-
- if (nphy->txrx_chain == 0) {
- chain = 0x11;
- override = true;
- } else if (nphy->txrx_chain == 1) {
- chain = 0x22;
- override = true;
- }
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
- chain);
-
- if (override)
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER);
- else
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~B43_NPHY_RFSEQMODE_CAOVER);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
-static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
- u16 samps, u8 time, bool wait)
-{
- int i;
- u16 tmp;
-
- b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
- b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
- if (wait)
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
- else
- b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
-
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
-
- for (i = 1000; i; i--) {
- tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
- if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
- est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
- est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
- est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
-
- est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
- est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
- est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
- return;
- }
- udelay(10);
- }
- memset(est, 0, sizeof(*est));
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
-static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
- struct b43_phy_n_iq_comp *pcomp)
-{
- if (write) {
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
- } else {
- pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
- pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
- pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
- pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
- }
-}
-
-#if 0
-/* Ready but not used anywhere */
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
-static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
-{
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
- if (core == 0) {
- b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
- } else {
- b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
-static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
-{
- u8 rxval, txval;
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
- if (core == 0) {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
- } else {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
- }
- regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
- regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
- regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
- regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
- regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
- regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
- regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
-
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
- ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
- ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
- (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
- (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
-
- if (core == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
- }
-
- b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
- b43_nphy_rf_control_override(dev, 8, 0, 3, false);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
-
- if (core == 0) {
- rxval = 1;
- txval = 8;
- } else {
- rxval = 4;
- txval = 2;
- }
- b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
- b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
-}
-#endif
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
-static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
-{
- int i;
- s32 iq;
- u32 ii;
- u32 qq;
- int iq_nbits, qq_nbits;
- int arsh, brsh;
- u16 tmp, a, b;
-
- struct nphy_iq_est est;
- struct b43_phy_n_iq_comp old;
- struct b43_phy_n_iq_comp new = { };
- bool error = false;
-
- if (mask == 0)
- return;
-
- b43_nphy_rx_iq_coeffs(dev, false, &old);
- b43_nphy_rx_iq_coeffs(dev, true, &new);
- b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
- new = old;
-
- for (i = 0; i < 2; i++) {
- if (i == 0 && (mask & 1)) {
- iq = est.iq0_prod;
- ii = est.i0_pwr;
- qq = est.q0_pwr;
- } else if (i == 1 && (mask & 2)) {
- iq = est.iq1_prod;
- ii = est.i1_pwr;
- qq = est.q1_pwr;
- } else {
- continue;
- }
-
- if (ii + qq < 2) {
- error = true;
- break;
- }
-
- iq_nbits = fls(abs(iq));
- qq_nbits = fls(qq);
-
- arsh = iq_nbits - 20;
- if (arsh >= 0) {
- a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
- tmp = ii >> arsh;
- } else {
- a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
- tmp = ii << -arsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- a /= tmp;
-
- brsh = qq_nbits - 11;
- if (brsh >= 0) {
- b = (qq << (31 - qq_nbits));
- tmp = ii >> brsh;
- } else {
- b = (qq << (31 - qq_nbits));
- tmp = ii << -brsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- b = int_sqrt(b / tmp - a * a) - (1 << 10);
-
- if (i == 0 && (mask & 0x1)) {
- if (dev->phy.rev >= 3) {
- new.a0 = a & 0x3FF;
- new.b0 = b & 0x3FF;
- } else {
- new.a0 = b & 0x3FF;
- new.b0 = a & 0x3FF;
- }
- } else if (i == 1 && (mask & 0x2)) {
- if (dev->phy.rev >= 3) {
- new.a1 = a & 0x3FF;
- new.b1 = b & 0x3FF;
- } else {
- new.a1 = b & 0x3FF;
- new.b1 = a & 0x3FF;
- }
- }
- }
-
- if (error)
- new = old;
-
- b43_nphy_rx_iq_coeffs(dev, true, &new);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
-static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
-{
- u16 array[4];
- b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
-
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
- const u16 *clip_st)
-{
- b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
-{
- clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
- clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
-static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
-{
- if (dev->phy.rev >= 3) {
- if (!init)
- return;
- if (0 /* FIXME */) {
- b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
- b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
- b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
- b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
- }
- } else {
- b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
- b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
- 0xFC00, 0xFC00);
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
- 0xFC00, 0xFC00);
- break;
-#endif
- }
-
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL) &
- ~B43_MACCTL_GPOUTSMSK);
- b43_write16(dev, B43_MMIO_GPIO_MASK,
- b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
- b43_write16(dev, B43_MMIO_GPIO_CONTROL,
- b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
-
- if (init) {
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
-static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
-{
- u16 tmp;
-
- if (dev->dev->core_rev == 16)
- b43_mac_suspend(dev);
-
- tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
- tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
- B43_NPHY_CLASSCTL_WAITEDEN);
- tmp &= ~mask;
- tmp |= (val & mask);
- b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
-
- if (dev->dev->core_rev == 16)
- b43_mac_enable(dev);
-
- return tmp;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (enable) {
- static const u16 clip[] = { 0xFFFF, 0xFFFF };
- if (nphy->deaf_count++ == 0) {
- nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
- b43_nphy_read_clip_detection(dev, nphy->clip_state);
- b43_nphy_write_clip_detection(dev, clip);
- }
- b43_nphy_reset_cca(dev);
- } else {
- if (--nphy->deaf_count == 0) {
- b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
- b43_nphy_write_clip_detection(dev, nphy->clip_state);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
-static void b43_nphy_stop_playback(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u16 tmp;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
- if (tmp & 0x1)
- b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
- else if (tmp & 0x2)
- b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
-
- b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
-
- if (nphy->bb_mult_save & 0x80000000) {
- tmp = nphy->bb_mult_save & 0xFFFF;
- b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- nphy->bb_mult_save = 0;
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
-static void b43_nphy_spur_workaround(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 channel = dev->phy.channel;
- int tone[2] = { 57, 58 };
- u32 noise[2] = { 0x3FF, 0x3FF };
-
- B43_WARN_ON(dev->phy.rev < 3);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gband_spurwar_en) {
- /* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && dev->phy.is_40mhz)
- ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- /* TODO: N PHY Adjust CRS Min Power (0x1E) */
- }
-
- if (nphy->aband_spurwar_en) {
- if (channel == 54) {
- tone[0] = 0x20;
- noise[0] = 0x25F;
- } else if (channel == 38 || channel == 102 || channel == 118) {
- if (0 /* FIXME */) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
- } else if (channel == 134) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else if (channel == 151) {
- tone[0] = 0x10;
- noise[0] = 0x23F;
- } else if (channel == 153 || channel == 161) {
- tone[0] = 0x30;
- noise[0] = 0x23F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
-
- if (!tone[0] && !noise[0])
- ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
-static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 i;
- s16 tmp;
- u16 data[4];
- s16 gain[2];
- u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- gain[0] = 6;
- gain[1] = 6;
- } else {
- tmp = 40370 - 315 * dev->phy.channel;
- gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
- tmp = 23242 - 224 * dev->phy.channel;
- gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
- }
- } else {
- gain[0] = 0;
- gain[1] = 0;
- }
-
- for (i = 0; i < 2; i++) {
- if (nphy->elna_gain_config) {
- data[0] = 19 + gain[i];
- data[1] = 25 + gain[i];
- data[2] = 25 + gain[i];
- data[3] = 25 + gain[i];
- } else {
- data[0] = lna_gain[0] + gain[i];
- data[1] = lna_gain[1] + gain[i];
- data[2] = lna_gain[2] + gain[i];
- data[3] = lna_gain[3] + gain[i];
- }
- b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
-
- minmax[i] = 23 + gain[i];
- }
-
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
- minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
- minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
-static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* PHY rev 0, 1, 2 */
- u8 i, j;
- u8 code;
- u16 tmp;
- u8 rfseq_events[3] = { 6, 8, 7 };
- u8 rfseq_delays[3] = { 10, 30, 1 };
-
- /* PHY rev >= 3 */
- bool ghz5;
- bool ext_lna;
- u16 rssi_gain;
- struct nphy_gain_ctl_workaround_entry *e;
- u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
- u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
-
- if (dev->phy.rev >= 3) {
- /* Prepare values */
- ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
- & B43_NPHY_BANDCTL_5GHZ;
- ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
- e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
- if (ghz5 && dev->phy.rev >= 5)
- rssi_gain = 0x90;
- else
- rssi_gain = 0x50;
-
- b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
-
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
- b43_phy_write(dev, 0x2A7, e->init_gain);
- b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
- e->rfseq_init);
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
-
- /* TODO: check defines. Do not match variables names */
- b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
- b43_phy_write(dev, 0x2A9, e->cliphi_gain);
- b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
- b43_phy_write(dev, 0x2AB, e->clipmd_gain);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
- b43_phy_write(dev, 0x2AD, e->cliplo_gain);
-
- b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
- b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
- b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
- } else {
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
-
- if (!dev->phy.is_40mhz) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
- }
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21);
-
- if (!dev->phy.is_40mhz) {
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
- ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
- ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
- }
-
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
- dev->phy.is_40mhz)
- code = 4;
- else
- code = 5;
- } else {
- code = dev->phy.is_40mhz ? 6 : 7;
- }
-
- /* Set HPVGA2 index */
- b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
- ~B43_NPHY_C1_INITGAIN_HPVGA2,
- code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
- ~B43_NPHY_C2_INITGAIN_HPVGA2,
- code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x7C));
-
- b43_nphy_adjust_lna_gain_table(dev);
-
- if (nphy->elna_gain_config) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x74));
- }
-
- if (dev->phy.rev == 2) {
- for (i = 0; i < 4; i++) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (0x0400 * i) + 0x0020);
- for (j = 0; j < 21; j++) {
- tmp = j * (i < 2 ? 3 : 1);
- b43_phy_write(dev,
- B43_NPHY_TABLE_DATALO, tmp);
- }
- }
- }
-
- b43_nphy_set_rf_sequence(dev, 5,
- rfseq_events, rfseq_delays, 3);
- b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
- ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
- 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- b43_phy_maskset(dev, B43_PHY_N(0xC5D),
- 0xFF80, 4);
- }
-}
-
-static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* TX to RX */
- u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
- /* RX to TX */
- u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
- 0x1F };
- u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
- u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
-
- u16 tmp16;
- u32 tmp32;
-
- tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
- tmp32 &= 0xffffff;
- b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
-
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
-
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
- b43_phy_write(dev, 0x2AE, 0x000C);
-
- /* TX to RX */
- b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
-
- /* RX to TX */
- if (b43_nphy_ipa(dev))
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
- rx2tx_delays_ipa, 9);
- if (nphy->hw_phyrxchain != 3 &&
- nphy->hw_phyrxchain != nphy->hw_phytxchain) {
- if (b43_nphy_ipa(dev)) {
- rx2tx_delays[5] = 59;
- rx2tx_delays[6] = 1;
- rx2tx_events[7] = 0x1F;
- }
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
- }
-
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
- 0x2 : 0x9C40;
- b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
-
- b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
-
- b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
- b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
- b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
-
- /* TODO */
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-
- /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
-
- if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
- tmp32 = 0x00088888;
- else
- tmp32 = 0x88888888;
- b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
-
- if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
- 0x70);
- b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
- 0x70);
- }
-
- b43_phy_write(dev, 0x224, 0x039C);
- b43_phy_write(dev, 0x225, 0x0357);
- b43_phy_write(dev, 0x226, 0x0317);
- b43_phy_write(dev, 0x227, 0x02D7);
- b43_phy_write(dev, 0x228, 0x039C);
- b43_phy_write(dev, 0x229, 0x0357);
- b43_phy_write(dev, 0x22A, 0x0317);
- b43_phy_write(dev, 0x22B, 0x02D7);
- b43_phy_write(dev, 0x22C, 0x039C);
- b43_phy_write(dev, 0x22D, 0x0357);
- b43_phy_write(dev, 0x22E, 0x0317);
- b43_phy_write(dev, 0x22F, 0x02D7);
-}
-
-static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
-{
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
- u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
-
- u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
- u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
- nphy->band5g_pwrgain) {
- b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
- b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
- } else {
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
- }
-
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
-
- if (dev->phy.rev < 2) {
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
- }
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
- dev->dev->board_type == 0x8B) {
- delays1[0] = 0x1;
- delays1[5] = 0x14;
- }
- b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
- b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- if (dev->phy.rev < 2) {
- if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_MLADVW);
- } else if (dev->phy.rev == 2) {
- b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
- b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
- }
-
- if (dev->phy.rev < 2)
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
-
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
-
- b43_phy_mask(dev, B43_NPHY_PIL_DW1,
- ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
-
- if (dev->phy.rev == 2)
- b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
- B43_NPHY_FINERX2_CGC_DECGC);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
-static void b43_nphy_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
- b43_nphy_classifier(dev, 1, 0);
- else
- b43_nphy_classifier(dev, 1, 1);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
-
- if (dev->phy.rev >= 3)
- b43_nphy_workarounds_rev3plus(dev);
- else
- b43_nphy_workarounds_rev1_2(dev);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
+/**************************************************
+ * Samples
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int b43_nphy_load_samples(struct b43_wldev *dev,
@@ -1825,7 +956,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
}
for (i = 0; i < 100; i++) {
- if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) {
i = 0;
break;
}
@@ -1837,333 +968,9 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-/*
- * Transmits a known value for LO calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
- */
-static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
- bool iqmode, bool dac_test)
-{
- u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
- if (samp == 0)
- return -1;
- b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
- return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
-static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- int i, j;
- u32 tmp;
- u32 cur_real, cur_imag, real_part, imag_part;
-
- u16 buffer[7];
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
-
- for (i = 0; i < 2; i++) {
- tmp = ((buffer[i * 2] & 0x3FF) << 10) |
- (buffer[i * 2 + 1] & 0x3FF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 320));
- for (j = 0; j < 128; j++) {
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- for (i = 0; i < 2; i++) {
- tmp = buffer[5 + i];
- real_part = (tmp >> 8) & 0xFF;
- imag_part = (tmp & 0xFF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 448));
-
- if (dev->phy.rev >= 3) {
- cur_real = real_part;
- cur_imag = imag_part;
- tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
- }
-
- for (j = 0; j < 128; j++) {
- if (dev->phy.rev < 3) {
- cur_real = (real_part * loscale[j] + 128) >> 8;
- cur_imag = (imag_part * loscale[j] + 128) >> 8;
- tmp = ((cur_real & 0xFF) << 8) |
- (cur_imag & 0xFF);
- }
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
- u16 offset1 = cmd << 4;
- u16 offset2 = offset1 + 0x80;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
-
- for (i = length; i < 16; i++) {
- b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
- b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq)
-{
- static const u16 trigger[] = {
- [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
- [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
- [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
- [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
- [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
- [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
- };
- int i;
- u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
-
- B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
-
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
- b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
- for (i = 0; i < 200; i++) {
- if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
- goto ok;
- msleep(1);
- }
- b43err(dev->wl, "RF sequence status timeout\n");
-ok:
- b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off)
-{
- int i;
- u8 index = fls(field);
- u8 addr, en_addr, val_addr;
- /* we expect only one bit set */
- B43_WARN_ON(field & (~(1 << (index - 1))));
-
- if (dev->phy.rev >= 3) {
- const struct nphy_rf_control_override_rev3 *rf_ctrl;
- for (i = 0; i < 2; i++) {
- if (index == 0 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
- en_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
- val_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
-
- if (off) {
- b43_phy_mask(dev, en_addr, ~(field));
- b43_phy_mask(dev, val_addr,
- ~(rf_ctrl->val_mask));
- } else {
- if (core == 0 || ((1 << core) & i) != 0) {
- b43_phy_set(dev, en_addr, field);
- b43_phy_maskset(dev, val_addr,
- ~(rf_ctrl->val_mask),
- (value << rf_ctrl->val_shift));
- }
- }
- }
- } else {
- const struct nphy_rf_control_override_rev2 *rf_ctrl;
- if (off) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
- value = 0;
- } else {
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
- }
-
- for (i = 0; i < 2; i++) {
- if (index <= 1 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- if (index == 2 || index == 10 ||
- (index >= 13 && index <= 15)) {
- core = 1;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
- addr = B43_PHY_N((i == 0) ?
- rf_ctrl->addr0 : rf_ctrl->addr1);
-
- if ((core & (1 << i)) != 0)
- b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
- (value << rf_ctrl->shift));
-
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(1);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core)
-{
- u8 i, j;
- u16 reg, tmp, val;
-
- B43_WARN_ON(dev->phy.rev < 3);
- B43_WARN_ON(field > 4);
-
- for (i = 0; i < 2; i++) {
- if ((core == 1 && i == 1) || (core == 2 && !i))
- continue;
-
- reg = (i == 0) ?
- B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
- b43_phy_mask(dev, reg, 0xFBFF);
-
- switch (field) {
- case 0:
- b43_phy_write(dev, reg, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
- break;
- case 1:
- if (!i) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE);
- } else {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_RXTX);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE);
- }
- break;
- case 2:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0020;
- val = value << 5;
- } else {
- tmp = 0x0010;
- val = value << 4;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 3:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0001;
- val = value;
- } else {
- tmp = 0x0004;
- val = value << 2;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 4:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0002;
- val = value << 1;
- } else {
- tmp = 0x0008;
- val = value << 3;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
-static void b43_nphy_bphy_init(struct b43_wldev *dev)
-{
- unsigned int i;
- u16 val;
-
- val = 0x1E1F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
- val -= 0x202;
- }
- val = 0x3E3F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
- val -= 0x202;
- }
- b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
-}
+/**************************************************
+ * RSSI
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
@@ -2233,67 +1040,6 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
}
-static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
-{
- u16 val;
-
- if (type < 3)
- val = 0;
- else if (type == 6)
- val = 1;
- else if (type == 3)
- val = 2;
- else
- val = 3;
-
- val = (val << 12) | (val << 14);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
-
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
- (type + 1) << 4);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
- (type + 1) << 4);
- }
-
- if (code == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
- if (type < 3) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL));
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- ~(0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL),
- (B43_NPHY_RFCTL_CMD_RXEN |
- code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
- (0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- }
-}
-
static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
u8 i;
@@ -2377,6 +1123,67 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
}
}
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
+ if (type < 3) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ ~(0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL),
+ (B43_NPHY_RFCTL_CMD_RXEN |
+ code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
+ (0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
@@ -2686,6 +1493,1413 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
}
}
+/**************************************************
+ * Workarounds
+ **************************************************/
+
+static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ bool ghz5;
+ bool ext_lna;
+ u16 rssi_gain;
+ struct nphy_gain_ctl_workaround_entry *e;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+
+ /* Prepare values */
+ ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+ & B43_NPHY_BANDCTL_5GHZ;
+ ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ :
+ sprom->boardflags_lo & B43_BFL_EXTLNA;
+ e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+ if (ghz5 && dev->phy.rev >= 5)
+ rssi_gain = 0x90;
+ else
+ rssi_gain = 0x50;
+
+ b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+ b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+ b43_phy_write(dev, 0x2A7, e->init_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+ e->rfseq_init);
+
+ /* TODO: check defines. Do not match variables names */
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+ b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+ b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+ b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+ b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+ b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+ b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+ b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i, j;
+ u8 code;
+ u16 tmp;
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
+
+ /* Set HPVGA2 index */
+ b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2,
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2,
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C));
+
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
+
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++) {
+ tmp = j * (i < 2 ? 3 : 1);
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, tmp);
+ }
+ }
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_gain_ctl_workarounds_rev3plus(dev);
+ else
+ b43_nphy_gain_ctl_workarounds_rev1_2(dev);
+}
+
+static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ /* TX to RX */
+ u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ /* RX to TX */
+ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+ 0x1F };
+ u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
+ u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+
+ u16 tmp16;
+ u32 tmp32;
+
+ b43_phy_write(dev, 0x23f, 0x1f8);
+ b43_phy_write(dev, 0x240, 0x1f8);
+
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+ b43_phy_write(dev, 0x2AE, 0x000C);
+
+ /* TX to RX */
+ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+ ARRAY_SIZE(tx2rx_events));
+
+ /* RX to TX */
+ if (b43_nphy_ipa(dev))
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+ rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+ if (nphy->hw_phyrxchain != 3 &&
+ nphy->hw_phyrxchain != nphy->hw_phytxchain) {
+ if (b43_nphy_ipa(dev)) {
+ rx2tx_delays[5] = 59;
+ rx2tx_delays[6] = 1;
+ rx2tx_events[7] = 0x1F;
+ }
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+ ARRAY_SIZE(rx2tx_events));
+ }
+
+ tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ 0x2 : 0x9C40;
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+ b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
+
+ /* TODO */
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+ /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ tmp32 = 0x00088888;
+ else
+ tmp32 = 0x88888888;
+ b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+ if (dev->phy.rev == 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ }
+
+ b43_phy_write(dev, 0x224, 0x03eb);
+ b43_phy_write(dev, 0x225, 0x03eb);
+ b43_phy_write(dev, 0x226, 0x0341);
+ b43_phy_write(dev, 0x227, 0x0341);
+ b43_phy_write(dev, 0x228, 0x042b);
+ b43_phy_write(dev, 0x229, 0x042b);
+ b43_phy_write(dev, 0x22a, 0x0381);
+ b43_phy_write(dev, 0x22b, 0x0381);
+ b43_phy_write(dev, 0x22c, 0x042b);
+ b43_phy_write(dev, 0x22d, 0x042b);
+ b43_phy_write(dev, 0x22e, 0x0381);
+ b43_phy_write(dev, 0x22f, 0x0381);
+}
+
+static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
+ dev->dev->board_type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ b43_hf_write(dev, b43_hf_read(dev) |
+ B43_HF_MLADVW);
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3)
+ b43_nphy_workarounds_rev3plus(dev);
+ else
+ b43_nphy_workarounds_rev1_2(dev);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/**************************************************
+ * Tx/Rx common
+ **************************************************/
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/**************************************************
+ * Tx and Rx
+ **************************************************/
+
+void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
+{//TODO
+}
+
+static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
+{//TODO
+}
+
+static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
+ bool ignore_tssi)
+{//TODO
+ return B43_TXPWR_RES_DONE;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 bmask, val, tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ nphy->txpwrctrl = enable;
+ if (!enable) {
+ if (dev->phy.rev >= 3 &&
+ (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
+ (B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN |
+ B43_NPHY_TXPCTL_CMD_PCTLEN))) {
+ /* We disable enabled TX pwr ctl, save it's state */
+ nphy->tx_pwr_idx[0] = b43_phy_read(dev,
+ B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
+ nphy->tx_pwr_idx[1] = b43_phy_read(dev,
+ B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
+ }
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3)
+ tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
+ nphy->adj_pwr_tbl);
+ b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
+ nphy->adj_pwr_tbl);
+
+ bmask = B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ /* wl does useless check for "enable" param here */
+ val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3) {
+ bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ if (val)
+ val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ }
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1,
+ 0x64);
+ }
+
+ if (dev->phy.rev >= 3) {
+ if (nphy->tx_pwr_idx[0] != 128 &&
+ nphy->tx_pwr_idx[1] != 128) {
+ /* Recover TX pwr ctl state */
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT,
+ nphy->tx_pwr_idx[0]);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev,
+ B43_NPHY_TXPCTL_INIT,
+ ~0xff, nphy->tx_pwr_idx[1]);
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
+
+ if (b43_nphy_ipa(dev)) {
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
+ }
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
+static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ u8 txpi[2], bbmult, i;
+ u16 tmp, radio_gain, dac_gain;
+ u16 freq = dev->phy.channel_freq;
+ u32 txgain;
+ /* u32 gaintbl; rev3+ */
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev >= 7) {
+ txpi[0] = txpi[1] = 30;
+ } else if (dev->phy.rev >= 3) {
+ txpi[0] = 40;
+ txpi[1] = 40;
+ } else if (sprom->revision < 4) {
+ txpi[0] = 72;
+ txpi[1] = 72;
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txpi[0] = sprom->txpid2g[0];
+ txpi[1] = sprom->txpid2g[1];
+ } else if (freq >= 4900 && freq < 5100) {
+ txpi[0] = sprom->txpid5gl[0];
+ txpi[1] = sprom->txpid5gl[1];
+ } else if (freq >= 5100 && freq < 5500) {
+ txpi[0] = sprom->txpid5g[0];
+ txpi[1] = sprom->txpid5g[1];
+ } else if (freq >= 5500) {
+ txpi[0] = sprom->txpid5gh[0];
+ txpi[1] = sprom->txpid5gh[1];
+ } else {
+ txpi[0] = 91;
+ txpi[1] = 91;
+ }
+ }
+ if (dev->phy.rev < 7 &&
+ (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100))
+ txpi[0] = txpi[1] = 91;
+
+ /*
+ for (i = 0; i < 2; i++) {
+ nphy->txpwrindex[i].index_internal = txpi[i];
+ nphy->txpwrindex[i].index_internal_save = txpi[i];
+ }
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+ txpi[i]);
+ } else if (b43_current_band(dev->wl) ==
+ IEEE80211_BAND_5GHZ) {
+ /* FIXME: use 5GHz tables */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ } else {
+ if (dev->phy.rev >= 5 &&
+ sprom->fem.ghz5.extpa_gain == 3)
+ ; /* FIXME: 5GHz_txgain_HiPwrEPA */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ }
+ radio_gain = (txgain >> 16) & 0x1FFFF;
+ } else {
+ txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+ radio_gain = (txgain >> 16) & 0x1FFF;
+ }
+
+ if (dev->phy.rev >= 7)
+ dac_gain = (txgain >> 8) & 0x7;
+ else
+ dac_gain = (txgain >> 8) & 0x3F;
+ bbmult = txgain & 0xFF;
+
+ if (dev->phy.rev >= 3) {
+ if (i == 0)
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ else
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (i == 0)
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
+ else
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
+
+ b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
+ if (i == 0)
+ tmp = (tmp & 0x00FF) | (bbmult << 8);
+ else
+ tmp = (tmp & 0xFF00) | bbmult;
+ b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
+
+ if (b43_nphy_ipa(dev)) {
+ u32 tmp32;
+ u16 reg = (i == 0) ?
+ B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+ 576 + txpi[i]));
+ b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
+ b43_phy_set(dev, reg, 0x4);
+ }
+ }
+
+ b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ u8 core;
+ u16 r; /* routing */
+
+ if (phy->rev >= 7) {
+ for (core = 0; core < 2; core++) {
+ r = core ? 0x190 : 0x170;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r + 0x5, 0x5);
+ b43_radio_write(dev, r + 0x9, 0xE);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 0);
+ if (phy->rev != 7)
+ b43_radio_write(dev, r + 0xB, 1);
+ else
+ b43_radio_write(dev, r + 0xB, 0x31);
+ } else {
+ b43_radio_write(dev, r + 0x5, 0x9);
+ b43_radio_write(dev, r + 0x9, 0xC);
+ b43_radio_write(dev, r + 0xB, 0x0);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 1);
+ else
+ b43_radio_write(dev, r + 0xA, 0x31);
+ }
+ b43_radio_write(dev, r + 0x6, 0);
+ b43_radio_write(dev, r + 0x7, 0);
+ b43_radio_write(dev, r + 0x8, 3);
+ b43_radio_write(dev, r + 0xC, 0);
+ }
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
+ else
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0);
+ b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29);
+
+ for (core = 0; core < 2; core++) {
+ r = core ? B2056_TX1 : B2056_TX0;
+
+ b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0);
+ b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3);
+ b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x5);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIA,
+ 0x00);
+ if (phy->rev >= 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x31);
+ else
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x11);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xE);
+ } else {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x9);
+ b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31);
+ b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xC);
+ }
+ }
+ }
+}
+
+/*
+ * Stop radio and transmit known signal. Then check received signal strength to
+ * get TSSI (Transmit Signal Strength Indicator).
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
+ */
+static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u32 tmp;
+ s32 rssi[4] = { };
+
+ /* TODO: check if we can transmit */
+
+ if (b43_nphy_ipa(dev))
+ b43_nphy_ipa_internal_tssi_setup(dev);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
+
+ b43_nphy_stop_playback(dev);
+ b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
+ udelay(20);
+ tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1);
+ b43_nphy_stop_playback(dev);
+ b43_nphy_rssi_select(dev, 0, 0);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
+
+ if (phy->rev >= 3) {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
+ } else {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF;
+ }
+ nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
+}
+
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const u32 *table = NULL;
+#if 0
+ TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
+ u32 rfpwr_offset;
+ u8 pga_gain;
+ int i;
+#endif
+
+ if (phy->rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (phy->rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ if (phy->rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+
+ if (phy->rev >= 3) {
+#if 0
+ nphy->gmval = (table[0] >> 16) & 0x7000;
+
+ for (i = 0; i < 128; i++) {
+ pga_gain = (table[i] >> 24) & 0xF;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+ else
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+ b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
+ rfpwr_offset);
+ b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
+ rfpwr_offset);
+ }
+#endif
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ u16 tmp;
+
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+#if 0
+/* Ready but not used anywhere */
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+#endif
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 channel = dev->phy.channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
/*
* Restore RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
@@ -2729,24 +2943,6 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
-{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (dev->phy.rev >= 6) {
- if (dev->dev->chip_id == 47162)
- return txpwrctrl_tx_gain_ipa_rev5;
- return txpwrctrl_tx_gain_ipa_rev6;
- } else if (dev->phy.rev >= 5) {
- return txpwrctrl_tx_gain_ipa_rev5;
- } else {
- return txpwrctrl_tx_gain_ipa;
- }
- } else {
- return txpwrctrl_tx_gain_ipa_5g;
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
{
@@ -2841,44 +3037,6 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
}
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
-static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
- struct nphy_txgains target,
- struct nphy_iqcal_params *params)
-{
- int i, j, indx;
- u16 gain;
-
- if (dev->phy.rev >= 3) {
- params->txgm = target.txgm[core];
- params->pga = target.pga[core];
- params->pad = target.pad[core];
- params->ipa = target.ipa[core];
- params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
- (params->pad << 4) | (params->ipa);
- for (j = 0; j < 5; j++)
- params->ncorr[j] = 0x79;
- } else {
- gain = (target.pad[core]) | (target.pga[core] << 4) |
- (target.txgm[core] << 8);
-
- indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
- 1 : 0;
- for (i = 0; i < 9; i++)
- if (tbl_iqcal_gainparams[indx][i][0] == gain)
- break;
- i = min(i, 8);
-
- params->txgm = tbl_iqcal_gainparams[indx][i][1];
- params->pga = tbl_iqcal_gainparams[indx][i][2];
- params->pad = tbl_iqcal_gainparams[indx][i][3];
- params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
- (params->pad << 2);
- for (j = 0; j < 4; j++)
- params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
{
@@ -3258,7 +3416,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (dev->phy.rev >= 4) {
avoid = nphy->hang_avoid;
- nphy->hang_avoid = 0;
+ nphy->hang_avoid = false;
}
b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3368,7 +3526,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (phy6or5x && updated[core] == 0) {
b43_nphy_update_tx_cal_ladder(dev, core);
- updated[core] = 1;
+ updated[core] = true;
}
tmp = (params[core].ncorr[type] << 8) | 0x66;
@@ -3730,10 +3888,104 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
b43_mac_enable(dev);
}
+/**************************************************
+ * N-PHY init
+ **************************************************/
+
/*
- * Init N-PHY
- * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
*/
+static void b43_nphy_tables_init(struct b43_wldev *dev)
+{
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
+static void b43_nphy_bphy_init(struct b43_wldev *dev)
+{
+ unsigned int i;
+ u16 val;
+
+ val = 0x1E1F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
+ val -= 0x202;
+ }
+ val = 0x3E3F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
+ val -= 0x202;
+ }
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+ }
+
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
int b43_phy_initn(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -3857,7 +4109,7 @@ int b43_phy_initn(struct b43_wldev *dev)
tx_pwr_state = nphy->txpwrctrl;
b43_nphy_tx_power_ctrl(dev, false);
b43_nphy_tx_power_fix(dev);
- /* TODO N PHY TX Power Control Idle TSSI */
+ b43_nphy_tx_power_ctl_idle_tssi(dev);
/* TODO N PHY TX Power Control Setup */
b43_nphy_tx_gain_table_upload(dev);
@@ -3928,6 +4180,91 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+static void b43_chantab_phy_upload(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e)
+{
+ b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
+ b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
+ b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
+ b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
+ b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
+ b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+ struct bcma_drv_cc __maybe_unused *cc;
+ u32 __maybe_unused pmu_ctl;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ cc = &dev->dev->bdev->bus->drv_cc;
+ if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else if (dev->dev->chip_id == 0x4716) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+ BCMA_CC_PMU_CTL_NOILPONW;
+ } else if (dev->dev->chip_id == 0x4322 ||
+ dev->dev->chip_id == 0x4340 ||
+ dev->dev->chip_id == 0x4341) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+ bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+ if (avoid)
+ bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+ else
+ bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else {
+ return;
+ }
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* FIXME */
+ break;
+#endif
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4272,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = dev->phy.n;
+ int ch = new_channel->hw_value;
u16 old_band_5ghz;
u32 tmp32;
@@ -3974,8 +4312,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_tx_lp_fbw(dev);
- if (dev->phy.rev >= 3 && 0) {
- /* TODO */
+ if (dev->phy.rev >= 3 &&
+ dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+ bool avoid = false;
+ if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+ avoid = true;
+ } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+ if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+ avoid = true;
+ } else { /* 40MHz */
+ if (nphy->aband_spurwar_en &&
+ (ch == 38 || ch == 102 || ch == 118))
+ avoid = dev->dev->chip_id == 0x4716;
+ }
+
+ b43_nphy_pmu_spur_avoid(dev, avoid);
+
+ if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+ dev->dev->chip_id == 43225) {
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+ avoid ? 0x5341 : 0x8889);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ }
+
+ if (dev->phy.rev == 3 || dev->phy.rev == 4)
+ ; /* TODO: reset PLL */
+
+ if (avoid)
+ b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+ else
+ b43_phy_mask(dev, B43_NPHY_BBCFG,
+ ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+ b43_nphy_reset_cca(dev);
+
+ /* wl sets useless phy_isspuravoid here */
}
b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4039,6 +4410,10 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
return 0;
}
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -4055,10 +4430,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+ nphy->spur_avoid = (phy->rev >= 3) ?
+ B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4445,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128;
+
+ /* Hardware TX power control and 5GHz power gain */
+ nphy->txpwrctrl = false;
+ nphy->pwg_gain_5ghz = false;
+ if (dev->phy.rev >= 3 ||
+ (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+ (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+ nphy->txpwrctrl = true;
+ nphy->pwg_gain_5ghz = true;
+ } else if (sprom->revision >= 4) {
+ if (dev->phy.rev >= 2 &&
+ (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+ nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+ struct pci_dev *pdev =
+ dev->dev->sdev->bus->host_pci;
+ if (pdev->device == 0x4328 ||
+ pdev->device == 0x432a)
+ nphy->pwg_gain_5ghz = true;
+ }
+#endif
+ } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+ nphy->pwg_gain_5ghz = true;
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+ nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+ }
}
static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf520285bd..5de8f74cc02 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
struct b43_wldev;
+enum b43_nphy_spur_avoid {
+ B43_SPUR_AVOID_DISABLE,
+ B43_SPUR_AVOID_AUTO,
+ B43_SPUR_AVOID_FORCE,
+};
+
struct b43_chanspec {
u16 center_freq;
enum nl80211_channel_type channel_type;
@@ -759,6 +765,11 @@ struct b43_phy_n_txpwrindex {
u16 locomp;
};
+struct b43_phy_n_pwr_ctl_info {
+ u8 idle_tssi_2g;
+ u8 idle_tssi_5g;
+};
+
struct b43_phy_n {
u8 antsel_type;
u8 cal_orig_pwr_idx[2];
@@ -785,12 +796,14 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl;
+ bool pwg_gain_5ghz;
u8 tx_pwr_idx[2];
u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2];
struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
@@ -803,6 +816,7 @@ struct b43_phy_n {
u16 classifier_state;
u16 clip_state[2];
+ enum b43_nphy_spur_avoid spur_avoid;
bool aband_spurwar_en;
bool gband_spurwar_en;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcff923b3c1..3533ab86bd3 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Not enough memory on the queue. */
err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
goto out;
}
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
(q->free_packet_slots == 0)) {
/* The queue is full. */
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
}
out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
if (q->stopped) {
ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
- q->stopped = 0;
+ q->stopped = false;
}
}
@@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
const char *err_msg = NULL;
struct b43_rxhdr_fw4 *rxhdr =
(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+ size_t rxhdr_size = sizeof(*rxhdr);
BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
- memset(rxhdr, 0, sizeof(*rxhdr));
+ switch (dev->fw.hdr_format) {
+ case B43_FW_HDR_410:
+ case B43_FW_HDR_351:
+ rxhdr_size -= sizeof(rxhdr->format_598) -
+ sizeof(rxhdr->format_351);
+ break;
+ case B43_FW_HDR_598:
+ break;
+ }
+ memset(rxhdr, 0, rxhdr_size);
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@@ -657,11 +667,11 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
- b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+ b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776ca4d..ce037fb6789 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
- [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
- [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
[B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
B2056_RX1, pts->rx, pts->rx_length);
}
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+ struct b2056_inittabs_pts *pts;
+ const struct b2056_inittab_entry *e;
+
+ if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ B43_WARN_ON(1);
+ return;
+ }
+ pts = &b2056_inittabs[dev->phy.rev];
+ e = &pts->syn[B2056_SYN_PLL_CP2];
+
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8578b..5b86673459f 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2efdc..f7def13524d 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
0x0000, 0x0000,
};
+/* volatile tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+ {
+ 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+ 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+ 0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+ 0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+ 0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+ 0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+ 0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+ 0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+ 0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+ 0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+ 0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x03cc,
+ }
+};
+
/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
const s16 tbl_tx_filter_coef_rev4[7][15] = {
{ -377, 137, -407, 208, -1527,
956, 93, 186, 93, 230,
- -44, 230, 20, -191, 201 },
+ -44, 230, 201, -191, 201 },
{ -77, 20, -98, 49, -93,
60, 56, 111, 56, 26,
-5, 26, 34, -32, 34 },
@@ -2710,7 +2752,18 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
-struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
+ { 10, 14, 19, 27 },
+ { -5, 6, 10, 15 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x427E,
+ { 0x413F, 0x413F, 0x413F, 0x413F },
+ 0x007E, 0x0066, 0x1074,
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x5,
+};
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = {
{ /* 2GHz */
{ /* PHY rev 3 */
{ 7, 11, 16, 23 },
@@ -2734,15 +2787,26 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x18, 0x18, 0x18,
0x01A1, 0x5,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 9, 13, 18, 26 },
{ -3, 7, 11, 16 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x427E, /* invalid for external LNA! */
{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
- 0x1076, 0x0066, 0x106A,
- 0xC, 0xC, 0xC,
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x9,
+ },
+ { /* PHY rev 6+ */
+ { 8, 13, 18, 25 },
+ { -5, 6, 10, 14 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x527E, /* invalid for external LNA! */
+ { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
0x01D0, 0x5,
},
},
@@ -2769,7 +2833,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x0107, 25,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 6, 10, 16, 21 },
{ -7, 0, 4, 8 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
@@ -2780,6 +2844,17 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x00A9, 25,
},
+ { /* PHY rev 6+ */
+ { 6, 10, 16, 21 },
+ { -7, 0, 4, 8 },
+ { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+ 0x729E,
+ { 0x714F, 0x714F, 0x714F, 0x714F },
+ 0x029E, 0x2084, 0x2086,
+ 0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */
+ 0x00F0, 25,
+ },
},
};
@@ -2838,9 +2913,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
break;
case B43_NTAB_32BIT:
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
- value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- value <<= 16;
- value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
break;
default:
B43_WARN_ON(1);
@@ -2864,6 +2938,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2954,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
data += 2;
break;
case B43_NTAB_32BIT:
- *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- *((u32 *)data) <<= 16;
- *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) =
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) |=
+ b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
data += 4;
break;
default:
@@ -2932,6 +3013,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+ dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
value = *data;
@@ -2999,6 +3087,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
} while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
/* Static tables */
ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3119,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
- /* TODO */
+ if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+ ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+ b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+ else
+ B43_WARN_ON(1);
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
@@ -3037,26 +3131,67 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
+ u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
+ dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
+ if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
+ return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
B43_WARN_ON(dev->phy.rev < 3);
- if (dev->phy.rev >= 5)
+ if (dev->phy.rev >= 6)
+ phy_idx = 3;
+ else if (dev->phy.rev == 5)
phy_idx = 2;
else if (dev->phy.rev == 4)
phy_idx = 1;
else
phy_idx = 0;
-
e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
- /* Only one entry differs for external LNA, so instead making whole
- * table 2 times bigger, hack is here
- */
- if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
- e->rfseq_init[0] &= 0x0FFF;
- e->rfseq_init[1] &= 0x0FFF;
- e->rfseq_init[2] &= 0x0FFF;
- e->rfseq_init[3] &= 0x0FFF;
- e->init_gain &= 0x0FFF;
+ /* Some workarounds to the workarounds... */
+ if (ghz5 && dev->phy.rev >= 6) {
+ if (dev->phy.radio_rev == 11 &&
+ !b43_channel_type_is_40mhz(dev->phy.channel_type))
+ e->cliplo_gain = 0x2d;
+ } else if (!ghz5 && dev->phy.rev >= 5) {
+ if (ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ }
+ switch (tr_iso) {
+ case 0:
+ e->cliplo_gain = 0x0062;
+ case 1:
+ e->cliplo_gain = 0x0064;
+ case 2:
+ e->cliplo_gain = 0x006a;
+ case 3:
+ e->cliplo_gain = 0x106a;
+ case 4:
+ e->cliplo_gain = 0x106c;
+ case 5:
+ e->cliplo_gain = 0x1074;
+ case 6:
+ e->cliplo_gain = 0x107c;
+ case 7:
+ e->cliplo_gain = 0x207c;
+ default:
+ e->cliplo_gain = 0x106a;
+ }
+ } else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ e->rfseq_init[0] |= 0x1000;
+ e->rfseq_init[1] |= 0x1000;
+ e->rfseq_init[2] |= 0x1000;
+ e->rfseq_init[3] |= 0x1000;
+ e->init_gain |= 0x1000;
}
return e;
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696bff0e..97038c48193 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
+
/* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
-#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
-#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
-#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
-#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
-#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
-#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
-#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
-#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
-#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
-#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
-#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
-#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 5f77cbe0b6a..2c5367884b3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
struct ieee80211_tx_info *report,
const struct b43_txstatus *status)
{
- bool frame_success = 1;
+ bool frame_success = true;
int retry_limit;
/* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
/* The frame was not ACKed... */
if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ...but we expected an ACK. */
- frame_success = 0;
+ frame_success = false;
}
}
if (status->frame_count == 0) {
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 1d4fc9db7f5..98e3d44400c 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -560,8 +560,16 @@ struct b43legacy_key {
u8 algorithm;
};
+#define B43legacy_QOS_QUEUE_NUM 4
+
struct b43legacy_wldev;
+/* QOS parameters for a queue. */
+struct b43legacy_qos_params {
+ /* The QOS parameters */
+ struct ieee80211_tx_queue_params p;
+};
+
/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */
struct b43legacy_wl {
/* Pointer to the active wireless device on this chip */
@@ -611,6 +619,18 @@ struct b43legacy_wl {
bool beacon1_uploaded;
bool beacon_templates_virgin; /* Never wrote the templates? */
struct work_struct beacon_update_trigger;
+ /* The current QOS parameters for the 4 queues. */
+ struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM];
+
+ /* Packet transmit work */
+ struct work_struct tx_work;
+
+ /* Queue of packets to be transmitted. */
+ struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM];
+
};
/* Pointers to the firmware data and meta information about it. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c5535adf699..f1f8bd09bd8 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -727,7 +727,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
} else
B43legacy_WARN_ON(1);
}
- spin_lock_init(&ring->lock);
#ifdef CONFIG_B43LEGACY_DEBUG
ring->last_injected_overflow = jiffies;
#endif
@@ -806,7 +805,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +819,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +857,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
"Falling back to PIO\n");
- dev->__using_pio = 1;
+ dev->__using_pio = true;
return -EAGAIN;
#else
b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1067,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
@@ -1144,10 +1143,8 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
{
struct b43legacy_dmaring *ring;
int err = 0;
- unsigned long flags;
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) {
@@ -1157,16 +1154,14 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
* For now, just refuse the transmit. */
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacyerr(dev->wl, "Packet after queue stopped\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
@@ -1176,25 +1171,23 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
- err = 0;
- goto out_unlock;
+ return 0;
}
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
- goto out_unlock;
+ return err;
}
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
}
-out_unlock:
- spin_unlock_irqrestore(&ring->lock, flags);
-
return err;
}
@@ -1205,14 +1198,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
int slot;
+ int firstused;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
- B43legacy_WARN_ON(!irqs_disabled());
- spin_lock(&ring->lock);
-
B43legacy_WARN_ON(!ring->tx);
+
+ /* Sanity check: TX packets are processed in-order on one ring.
+ * Check if the slot deduced from the cookie really is the first
+ * used slot. */
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+ * malfunction, memory leaks and/or stall of DMA functionality.
+ */
+ b43legacydbg(dev->wl, "Out of order TX status report on DMA "
+ "ring %d. Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
+ }
+
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
op32_idx2desc(ring, slot, &meta);
@@ -1285,14 +1293,21 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
dev->stats.last_tx = jiffies;
if (ring->stopped) {
B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
- ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 0;
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
+ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
- ring->index);
+ ring->index);
}
-
- spin_unlock(&ring->lock);
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43legacy_dmaring *ring,
@@ -1415,22 +1430,14 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_suspend(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_resume(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 504a58767e9..c3282f906bc 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -150,8 +150,9 @@ struct b43legacy_dmaring {
enum b43legacy_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
- /* Lock, only used for TX. */
- spinlock_t lock;
+ /* The QOS priority assigned to this ring. Only used for TX rings.
+ * This is the mac80211 "queue" value. */
+ u8 queue_prio;
struct b43legacy_wldev *dev;
#ifdef CONFIG_B43LEGACY_DEBUG
/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 2f1bfdc44f9..fd4565389c7 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
if (sprom[i] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- activelow = 0;
+ activelow = false;
switch (i) {
case 0:
behaviour = B43legacy_LED_ACTIVITY;
- activelow = 1;
+ activelow = true;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
behaviour = B43legacy_LED_RADIO_ALL;
break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 20f02437af8..75e70bce40f 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
macctl &= ~B43legacy_MACCTL_GMODE;
if (flags & B43legacy_TMSLOW_GMODE) {
macctl |= B43legacy_MACCTL_GMODE;
- dev->phy.gmode = 1;
+ dev->phy.gmode = true;
} else
- dev->phy.gmode = 0;
+ dev->phy.gmode = false;
macctl |= B43legacy_MACCTL_IHR_ENABLED;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
}
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
dev->stats.link_noise = average;
drop_calculation:
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43legacy_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
* but we don't use that feature anyway. */
b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
&__b43legacy_ratetable[3]);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43legacy_upload_beacon0(dev);
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -2440,30 +2440,64 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
return err;
}
+static void b43legacy_tx_work(struct work_struct *work)
+{
+ struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
+ tx_work);
+ struct b43legacy_wldev *dev;
+ struct sk_buff *skb;
+ int queue_num;
+ int err = 0;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43legacy_using_pio(dev))
+ err = b43legacy_pio_tx(dev, skb);
+ else
+ err = b43legacy_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
+
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
+
+ mutex_unlock(&wl->mutex);
+}
+
static void b43legacy_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- int err = -ENODEV;
- unsigned long flags;
- if (unlikely(!dev))
- goto out;
- if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
- goto out;
- /* DMA-TX is done without a global lock. */
- if (b43legacy_using_pio(dev)) {
- spin_lock_irqsave(&wl->irq_lock, flags);
- err = b43legacy_pio_tx(dev, skb);
- spin_unlock_irqrestore(&wl->irq_lock, flags);
- } else
- err = b43legacy_dma_tx(dev, skb);
-out:
- if (unlikely(err)) {
- /* Drop the packet. */
+ if (unlikely(skb->len < 2 + 2 + 6)) {
+ /* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
+ return;
}
+ B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
+
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping])
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ else
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
@@ -2510,7 +2544,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
if (d->phy.possible_phymodes & phymode) {
/* Ok, this device supports the PHY-mode.
* Set the gmode bit. */
- *gmode = 1;
+ *gmode = true;
*dev = d;
return 0;
@@ -2546,7 +2580,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
struct b43legacy_wldev *uninitialized_var(up_dev);
struct b43legacy_wldev *down_dev;
int err;
- bool gmode = 0;
+ bool gmode = false;
int prev_status;
err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -2879,6 +2913,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
unsigned long flags;
+ int queue_num;
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
return;
@@ -2898,11 +2933,16 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
/* Must unlock as it would otherwise deadlock. No races here.
* Cancel the possibly running self-rearming periodic work. */
cancel_delayed_work_sync(&dev->periodic_work);
+ cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
- ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
- b43legacy_mac_suspend(dev);
+b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
@@ -3044,12 +3084,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
phy->savedpctlreg = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
lo = phy->_lo_pairs;
if (lo)
@@ -3081,7 +3121,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
{
/* Flags */
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3227,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
phy->antenna_diversity = 0xFFFF;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3395,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3429,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3413,10 +3453,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -3455,7 +3495,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
mutex_unlock(&wl->mutex);
}
@@ -3614,7 +3654,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
@@ -3705,7 +3745,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
(void (*)(unsigned long))b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
- wldev->__using_pio = 1;
+ wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
err = b43legacy_wireless_core_attach(wldev);
@@ -3748,6 +3788,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
struct ieee80211_hw *hw;
struct b43legacy_wl *wl;
int err = -ENOMEM;
+ int queue_num;
b43legacy_sprom_fixup(dev->bus);
@@ -3782,6 +3823,13 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
mutex_init(&wl->mutex);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
+ INIT_WORK(&wl->tx_work, b43legacy_tx_work);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
ssb_set_devtypedata(dev, wl);
b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 475eb14e665..fcbafcd603c 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
break;
- phy->aci_enable = 1;
+ phy->aci_enable = true;
phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
break;
- phy->aci_enable = 0;
+ phy->aci_enable = false;
phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
(phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- phy->aci_wlan_automatic = 0;
+ phy->aci_wlan_automatic = false;
switch (mode) {
case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
- phy->aci_wlan_automatic = 1;
+ phy->aci_wlan_automatic = true;
if (phy->aci_enable)
mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
currentmode);
if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
- phy->aci_enable = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_hw_rssi = false;
} else
b43legacy_radio_interference_mitigation_enable(dev, mode);
phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
phy->radio_off_context.rfover);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
phy->radio_off_context.rfoverval);
- phy->radio_off_context.valid = 0;
+ phy->radio_off_context.valid = false;
}
channel = phy->channel;
err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
default:
B43legacy_BUG_ON(1);
}
- phy->radio_on = 1;
+ phy->radio_on = true;
}
void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
if (!force) {
phy->radio_off_context.rfover = rfover;
phy->radio_off_context.rfoverval = rfoverval;
- phy->radio_off_context.valid = 1;
+ phy->radio_off_context.valid = true;
}
b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
rfoverval & 0xFF73);
} else
b43legacy_phy_write(dev, 0x0015, 0xAA00);
- phy->radio_on = 0;
+ phy->radio_on = false;
b43legacydbg(dev->wl, "Radio initialized\n");
}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8f7ad..cd6375de2a6 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
- depends on PCI
depends on MAC80211
- depends on BCMA=n
+ depends on BCMA
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
@@ -18,16 +17,26 @@ config BRCMSMAC
config BRCMFMAC
tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
- depends on MMC
depends on CFG80211
select BRCMUTIL
- select FW_LOADER
---help---
This module adds support for embedded wireless adapters based on
- Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's
- wireless extensions subsystem. If you choose to build a module,
+ Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
+ one of the bus interface support. If you choose to build a module,
it'll be called brcmfmac.ko.
+config BRCMFMAC_SDIO
+ bool "SDIO bus interface support for FullMAC"
+ depends on MMC
+ depends on BRCMFMAC
+ select FW_LOADER
+ default y
+ ---help---
+ This option enables the SDIO bus interface support for Broadcom
+ FullMAC WLAN driver.
+ Say Y if you want to use brcmfmac for a compatible SDIO interface
+ wireless card.
+
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index b44e3094588..9ca9ea1135e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -19,15 +19,16 @@ ccflags-y += \
-Idrivers/net/wireless/brcm80211/brcmfmac \
-Idrivers/net/wireless/brcm80211/include
-DHDOFILES = \
- wl_cfg80211.o \
- dhd_cdc.o \
- dhd_common.o \
- dhd_sdio.o \
- dhd_linux.o \
- bcmsdh.o \
- bcmsdh_sdmmc.o
-
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
-brcmfmac-objs += $(DHDOFILES)
+brcmfmac-objs += \
+ wl_cfg80211.o \
+ dhd_cdc.o \
+ dhd_common.o \
+ dhd_linux.o
+brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+ dhd_sdio.o \
+ bcmsdh.o \
+ bcmsdh_sdmmc.o \
+ sdio_chip.o
+
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index d7d3afd5a10..00000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-/* firmware name */
-#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
-
-#endif /* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 89ff94da556..4bc8d251acf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -31,7 +31,6 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include <soc.h>
-#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
@@ -51,12 +50,18 @@ static void brcmf_sdioh_irqhandler(struct sdio_func *func)
sdio_claim_host(func);
}
+/* dummy handler for SDIO function 2 interrupt */
+static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func)
+{
+}
+
int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler);
sdio_release_host(sdiodev->func[1]);
return 0;
@@ -67,6 +72,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
+ sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
@@ -222,19 +228,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
return sdiodev->regfail;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+ uint flags, uint width, u32 *addr)
{
- int status;
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
@@ -247,29 +246,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
sdiodev->sbwad = bar0;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ if (width == 4)
+ *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ if (!err)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, pkt);
+
+ return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pktq->qlen);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
- status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
- fn, addr, width, nbytes, buf, pkt);
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+ pktq);
- return status;
+ return err;
}
int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ memcpy(mypkt->data, buf, nbytes);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +375,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, width, nbytes, buf, pkt);
+ addr, pkt);
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
u8 *buf, uint nbytes)
{
+ struct sk_buff *mypkt;
+ bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+ int err;
+
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
- (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buf, nbytes);
+
+ err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+ SDIO_FUNC_1, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!err && !write)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
}
int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +438,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->sbwad = SI_ENUM_BASE;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
brcmf_dbg(ERROR, "device attach failed\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d5c93..9b8c0ed833d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -31,15 +31,15 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
-#include "dhd.h"
#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "dhd_bus.h"
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -47,6 +47,7 @@
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +205,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
+/* precondition: host controller is claimed */
static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+ uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+ int err_ret = 0;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pkt->data)), addr, pktlen);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pkt->data)),
+ addr, pktlen);
+ }
+
+ return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
- struct sk_buff *pnext;
+ struct sk_buff *pkt;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
- for (pnext = pkt; pnext; pnext = pnext->next) {
- uint pkt_len = pnext->len;
+
+ skb_queue_walk(pktq, pkt) {
+ uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (write) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (fifo) {
- err_ret = sdio_readsb(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- } else {
- err_ret = sdio_memcpy_fromio(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- }
-
+ err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
if (err_ret) {
brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
-
if (!fifo)
addr += pkt_len;
- SGCount++;
+ SGCount++;
}
/* Release host controller */
@@ -270,91 +284,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
}
/*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case, it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
- uint reg_width, uint buflen_u, u8 *buffer,
struct sk_buff *pkt)
{
- int Status;
- struct sk_buff *mypkt = NULL;
+ int status;
+ uint pkt_len = pkt->len;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
+ if (pkt == NULL)
+ return -EINVAL;
+
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Case 1: we don't have a packet. */
- if (pkt == NULL) {
- brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
- write ? "TX" : "RX", buflen_u);
- mypkt = brcmu_pkt_buf_get_skb(buflen_u);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- buflen_u);
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, buffer, buflen_u);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(buffer, mypkt->data, buflen_u);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /*
- * Case 2: We have a packet, but it is unaligned.
- * In this case, we cannot have a chain (pkt->next == NULL)
- */
- brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
- write ? "TX" : "RX", pkt->len);
- mypkt = brcmu_pkt_buf_get_skb(pkt->len);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- pkt->len);
- return -EIO;
- }
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, pkt->data, pkt->len);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(pkt->data, mypkt->data, mypkt->len);
+ pkt_len += 3;
+ pkt_len &= (uint)~3;
- brcmu_pkt_buf_free_skb(mypkt);
- } else { /* case 3: We have a packet and
- it is aligned. */
- brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
- write ? "Tx" : "Rx");
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, pkt);
+ status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
+ if (status) {
+ brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len, status);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len);
}
- return Status;
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ return status;
}
/* Read client card reg */
@@ -494,6 +462,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
{
int ret = 0;
struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "func->class=%x\n", func->class);
brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +474,26 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(ERROR, "card private drvdata occupied\n");
return -ENXIO;
}
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev)
+ if (!sdiodev) {
+ kfree(bus_if);
return -ENOMEM;
+ }
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv = sdiodev;
+ bus_if->type = SDIO_BUS;
+ bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->card->dev, sdiodev);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
}
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
return -ENODEV;
sdiodev->func[2] = func;
+ bus_if = sdiodev->bus_if;
+ sdiodev->dev = &func->dev;
+ dev_set_drvdata(&func->dev, bus_if);
+
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
ret = brcmf_sdio_probe(sdiodev);
}
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
+ struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) {
- sdiodev = dev_get_drvdata(&func->card->dev);
+ bus_if = dev_get_drvdata(&func->dev);
+ sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
+ dev_set_drvdata(&func->dev, NULL);
+ kfree(bus_if);
kfree(sdiodev);
}
}
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
static int brcmf_sdio_suspend(struct device *dev)
{
mmc_pm_flag_t sdio_flags;
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
int ret = 0;
brcmf_dbg(TRACE, "\n");
- sdiodev = dev_get_drvdata(&func->card->dev);
-
atomic_set(&sdiodev->suspend, true);
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev)
{
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
- sdiodev = dev_get_drvdata(&func->card->dev);
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
- return sdio_register_driver(&brcmf_sdmmc_driver);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
{
+ int ret;
+
brcmf_dbg(TRACE, "Enter\n");
- sdio_unregister_driver(&brcmf_sdmmc_driver);
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+ if (ret)
+ brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+ return ret;
}
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 4645766b407..e58ea40a75b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -122,8 +122,6 @@
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define BRCMF_DEL_IF -0xe
-#define BRCMF_BAD_IF -0xf
#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
@@ -158,18 +156,6 @@ struct brcmf_event {
struct brcmf_event_msg msg;
} __packed;
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
/* event codes sent by the dongle to this driver */
#define BRCMF_E_SET_SSID 0
#define BRCMF_E_JOIN 1
@@ -319,13 +305,6 @@ struct dngl_stats {
#define BRCMF_E_LINK_ASSOC_REC 3
#define BRCMF_E_LINK_BSSCFG_DIS 4
-/* The level of bus communication with the dongle */
-enum brcmf_bus_state {
- BRCMF_BUS_DOWN, /* Not ready for frame transfers */
- BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
- BRCMF_BUS_DATA /* Ready for frame transfers */
-};
-
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -365,7 +344,7 @@ struct brcmf_pkt_filter_enable_le {
* Applications MUST CHECK ie_offset field and length field to access IEs and
* next bss_info structure in a vector (in struct brcmf_scan_results)
*/
-struct brcmf_bss_info {
+struct brcmf_bss_info_le {
__le32 version; /* version field */
__le32 length; /* byte length of data in this record,
* starting at version and including IEs
@@ -466,14 +445,13 @@ struct brcmf_scan_results {
u32 buflen;
u32 version;
u32 count;
- struct brcmf_bss_info bss_info[1];
+ struct brcmf_bss_info_le bss_info_le[];
};
struct brcmf_scan_results_le {
__le32 buflen;
__le32 version;
__le32 count;
- struct brcmf_bss_info bss_info[1];
};
/* used for association with a specific BSSID and chanspec list */
@@ -493,10 +471,6 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
- (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
/* incremental scan results struct */
struct brcmf_iscan_results {
union {
@@ -511,7 +485,7 @@ struct brcmf_iscan_results {
/* size of brcmf_iscan_results not including variable length array */
#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
- (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+ (sizeof(struct brcmf_scan_results) + \
offsetof(struct brcmf_iscan_results, results))
struct brcmf_wsec_key {
@@ -579,25 +553,19 @@ struct brcmf_dcmd {
};
/* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus; /* device bus info */
struct brcmf_proto; /* device communication protocol info */
-struct brcmf_info; /* device driver info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
- struct brcmf_bus *bus;
+ struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
- struct brcmf_info *info;
struct brcmf_cfg80211_dev *config;
+ struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
- bool up; /* Driver up/down (to OS) */
- bool txoff; /* Transmit flow-controlled */
- enum brcmf_bus_state busstate;
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
u8 wme_dp; /* wme discard priority */
@@ -605,48 +573,21 @@ struct brcmf_pub {
bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- struct dngl_stats dstats; /* Stats for dongle-based data */
/* Additional stats for the bus level */
- /* Data packets sent to dongle */
- unsigned long tx_packets;
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
- /* Errors in sending data to dongle */
- unsigned long tx_errors;
- /* Control packets sent to dongle */
- unsigned long tx_ctlpkts;
- /* Errors sending control frames to dongle */
- unsigned long tx_ctlerrs;
- /* Packets sent up the network interface */
- unsigned long rx_packets;
- /* Multicast packets sent up the network interface */
- unsigned long rx_multicast;
- /* Errors processing rx data packets */
- unsigned long rx_errors;
- /* Control frames processed from dongle */
- unsigned long rx_ctlpkts;
-
- /* Errors in processing rx control frames */
- unsigned long rx_ctlerrs;
- /* Packets dropped locally (no memory) */
- unsigned long rx_dropped;
/* Packets flushed due to unscheduled sendup thread */
unsigned long rx_flushed;
/* Number of times dpc scheduled by watchdog timer */
unsigned long wd_dpc_sched;
- /* Number of packets where header read-ahead was used. */
- unsigned long rx_readahead_cnt;
- /* Number of tx packets we had to realloc for headroom */
- unsigned long tx_realloc;
/* Number of flow control pkts recvd */
unsigned long fc_packets;
/* Last error return */
int bcmerror;
- uint tickcnt;
/* Last error from dongle */
int dongle_error;
@@ -664,6 +605,14 @@ struct brcmf_pub {
u8 country_code[BRCM_CNTRY_BUF_SZ];
char eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+ struct mutex proto_block;
+
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ u8 macvalue[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
};
struct brcmf_if_event {
@@ -683,67 +632,33 @@ extern const struct bcmevent_name bcmevent_names[];
extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
char *buf, uint len);
-/* Indication from bus module regarding presence/insertion of dongle.
- * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
- * Returned structure should have bus and prot pointers filled in.
- * bus_hdrlen specifies required headroom for bus module header.
- */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
- uint bus_hdrlen);
extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct brcmf_pub *drvr);
-
-/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-
-extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
- struct sk_buff *pkt, int prec);
-
-/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *rxp, int numpkt);
-
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-/* Notify tx completion */
-extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
-
/* Query dongle */
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
-/* OS independent layer functions */
-extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
-extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
#ifdef BCMDBG
extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
#endif /* BCMDBG */
-extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
-extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
+extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
-extern void brcmf_c_init(void);
-
-extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
- struct net_device *ndev, char *name, u8 *mac_addr,
- u32 flags, u8 bssidx);
-extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
+extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
/* Send packet to dongle via data channel */
extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
struct sk_buff *pkt);
-extern int brcmf_bus_start(struct brcmf_pub *drvr);
-
extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
int enable, int master_mode);
@@ -752,25 +667,4 @@ extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
-/* message levels */
-#define BRCMF_ERROR_VAL 0x0001
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0400
-#define BRCMF_EVENT_VAL 0x0800
-#define BRCMF_BTA_VAL 0x1000
-#define BRCMF_ISCAN_VAL 0x2000
-
-/* Enter idle immediately (no timeout) */
-#define BRCMF_IDLE_IMMEDIATE (-1)
-#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
- when idle */
-#define BRCMF_IDLE_INTERVAL 1
-
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407c9a1..ad9be2410b5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,41 +17,89 @@
#ifndef _BRCMF_BUS_H_
#define _BRCMF_BUS_H_
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#define BRCMF_SDALIGN (1 << 6)
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+ BRCMF_BUS_DOWN, /* Not ready for frame transfers */
+ BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
+ BRCMF_BUS_DATA /* Ready for frame transfers */
+};
-/* watchdog polling interval in ms */
-#define BRCMF_WD_POLL_MS 10
+struct dngl_stats {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+};
+
+/* interface structure between common and bus layer */
+struct brcmf_bus {
+ u8 type; /* bus type */
+ void *bus_priv; /* pointer to bus private structure */
+ void *drvr; /* pointer to driver pub structure brcmf_pub */
+ enum brcmf_bus_state state;
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ bool drvr_up; /* Status flag of driver up/down */
+ unsigned long tx_realloc; /* Tx packets realloced for headroom */
+ struct dngl_stats dstats; /* Stats for dongle-based data */
+ u8 align; /* bus alignment requirement */
+
+ /* interface functions pointers */
+ /* Stop bus module: clear pending frames, disable data flow */
+ void (*brcmf_bus_stop)(struct device *);
+ /* Initialize bus module: prepare for communication w/dongle */
+ int (*brcmf_bus_init)(struct device *);
+ /* Send a data frame to the dongle. Callee disposes of txp. */
+ int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
+ /* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+ int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
+ int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
+};
/*
- * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
+ * interface functions from common layer
*/
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+ struct sk_buff *rxp);
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
+extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+ struct sk_buff *pkt, int prec);
-/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct device *dev, int ifidx,
+ struct sk_buff *pkt)
+{
+ struct sk_buff_head q;
-/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+ skb_queue_head_init(&q);
+ skb_queue_tail(&q, pkt);
+ brcmf_rx_frame(dev, ifidx, &q);
+}
-/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+/* Indication from bus module regarding presence/insertion of dongle. */
+extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+/* Indication from bus module regarding removal/absence of dongle */
+extern void brcmf_detach(struct device *dev);
-/* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
-extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Indication from bus module to change flow-control state */
+extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
-extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Notify tx completion */
+extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
+ bool success);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern int brcmf_bus_start(struct device *dev);
+extern int brcmf_add_if(struct device *dev, int ifidx,
+ char *name, u8 *mac_addr);
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3d1d5..ac8d1f43788 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
* Used on data packets to convey priority across USB.
*/
#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
@@ -77,18 +77,19 @@ struct brcmf_proto_bdc_header {
u8 flags;
u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
u8 flags2;
- u8 rssi;
+ u8 data_offset;
};
#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
+#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
* (amount of header tha might be added)
* plus any space that might be needed
- * for alignment padding.
+ * for bus alignment padding.
*/
-#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
* round off at the end of buffer
+ * Currently is SDIO
*/
struct brcmf_proto {
@@ -116,8 +117,9 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
- len);
+ return drvr->bus_if->brcmf_bus_txctl(drvr->dev,
+ (unsigned char *)&prot->msg,
+ len);
}
static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -128,7 +130,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
brcmf_dbg(TRACE, "Enter\n");
do {
- ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev,
(unsigned char *)&prot->msg,
len + sizeof(struct brcmf_proto_cdc_dcmd));
if (ret < 0)
@@ -280,11 +282,11 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
struct brcmf_proto *prot = drvr->prot;
int ret = -1;
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
return ret;
}
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
brcmf_dbg(TRACE, "Enter\n");
@@ -338,7 +340,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
prot->pending = false;
done:
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
@@ -372,14 +374,16 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
- h->rssi = 0;
+ h->data_offset = 0;
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
@@ -435,7 +439,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
drvr->prot = cdc;
drvr->hdrlen += BDC_HEADER_LEN;
- drvr->maxctl = BRCMF_DCMD_MAXLEN +
+ drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
return 0;
@@ -451,18 +455,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
drvr->prot = NULL;
}
-void brcmf_proto_dstats(struct brcmf_pub *drvr)
-{
- /* No stats from dongle added yet, copy bus stats */
- drvr->dstats.tx_packets = drvr->tx_packets;
- drvr->dstats.tx_errors = drvr->tx_errors;
- drvr->dstats.rx_packets = drvr->rx_packets;
- drvr->dstats.rx_errors = drvr->rx_errors;
- drvr->dstats.rx_dropped = drvr->rx_dropped;
- drvr->dstats.multicast = drvr->rx_multicast;
- return;
-}
-
int brcmf_proto_init(struct brcmf_pub *drvr)
{
int ret = 0;
@@ -470,19 +462,19 @@ int brcmf_proto_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Get the device MAC address */
strcpy(buf, "cur_etheraddr");
ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
buf, sizeof(buf));
if (ret < 0) {
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
memcpy(drvr->mac, buf, ETH_ALEN);
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
ret = brcmf_c_preinit_dcmds(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 891826197f9..a51d8f5d36f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
#define PKTFILTER_BUF_SIZE 2048
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
-int brcmf_msg_level;
-
#define MSGTRACE_VERSION 1
#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,25 +83,14 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-void brcmf_c_init(void)
-{
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behaviour since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
-bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
{
struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
bool discard_oldest;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
@@ -446,7 +433,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
#endif /* BCMDBG */
int
-brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
struct brcmf_event_msg *event, void **data_ptr)
{
/* check whether packet is a BRCM event pkt */
@@ -488,19 +475,18 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+ brcmf_add_if(drvr->dev, ifevent->ifidx,
event->ifname,
- pvt_data->eth.h_dest,
- ifevent->flags, ifevent->bssidx);
+ pvt_data->eth.h_dest);
else
- brcmf_del_if(drvr_priv, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->ifidx);
} else {
brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
ifevent->ifidx, event->ifname);
}
/* send up the if event: btamp user needs it */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
break;
/* These are what external supplicant/authenticator wants */
@@ -512,7 +498,7 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
default:
/* Fall through: this should get _everything_ */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
type, flags, status);
@@ -812,7 +798,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
"event_msgs" + '\0' + bitvec */
uint up = 0;
char buf[128], *ptr;
- u32 dongle_align = BRCMF_SDALIGN;
+ u32 dongle_align = drvr->bus_if->align;
u32 glom = 0;
u32 roaming = 1;
uint bcn_timeout = 3;
@@ -820,7 +806,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
int scan_unassoc_time = 40;
int i;
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Set Country code */
if (drvr->country_code[0] != 0) {
@@ -889,7 +875,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
0, true);
}
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 7467922f053..bb26ee36bc6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -17,6 +17,21 @@
#ifndef _BRCMF_DBG_H_
#define _BRCMF_DBG_H_
+/* message levels */
+#define BRCMF_ERROR_VAL 0x0001
+#define BRCMF_TRACE_VAL 0x0002
+#define BRCMF_INFO_VAL 0x0004
+#define BRCMF_DATA_VAL 0x0008
+#define BRCMF_CTL_VAL 0x0010
+#define BRCMF_TIMER_VAL 0x0020
+#define BRCMF_HDRS_VAL 0x0040
+#define BRCMF_BYTES_VAL 0x0080
+#define BRCMF_INTR_VAL 0x0100
+#define BRCMF_GLOM_VAL 0x0400
+#define BRCMF_EVENT_VAL 0x0800
+#define BRCMF_BTA_VAL 0x1000
+#define BRCMF_ISCAN_VAL 0x2000
+
#if defined(BCMDBG)
#define brcmf_dbg(level, fmt, ...) \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 4acbac5a74c..eb9eb766ac2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
-#include "bcmchip.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -53,48 +52,19 @@ MODULE_LICENSE("Dual BSD/GPL");
/* Interface control information */
struct brcmf_if {
- struct brcmf_info *info; /* back pointer to brcmf_info */
+ struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
/* OS/stack specifics */
struct net_device *ndev;
struct net_device_stats stats;
int idx; /* iface idx in dongle */
- int state; /* interface state */
u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
};
-/* Local private structure (extension of pub) */
-struct brcmf_info {
- struct brcmf_pub pub;
-
- /* OS/stack specifics */
- struct brcmf_if *iflist[BRCMF_MAX_IFS];
-
- struct mutex proto_block;
-
- struct work_struct setmacaddr_work;
- struct work_struct multicast_work;
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
-};
-
/* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
-{
- int i = 0;
-
- while (i < BRCMF_MAX_IFS) {
- if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
- return i;
- i++;
- }
-
- return BRCMF_BAD_IF;
-}
-
-int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
{
int i = BRCMF_MAX_IFS;
struct brcmf_if *ifp;
@@ -103,7 +73,7 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
return 0;
while (--i > 0) {
- ifp = drvr_priv->iflist[i];
+ ifp = drvr->iflist[i];
if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
break;
}
@@ -115,20 +85,18 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
return "<if_bad>";
}
- if (drvr_priv->iflist[ifidx] == NULL) {
+ if (drvr->iflist[ifidx] == NULL) {
brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
return "<if_null>";
}
- if (drvr_priv->iflist[ifidx]->ndev)
- return drvr_priv->iflist[ifidx]->ndev->name;
+ if (drvr->iflist[ifidx]->ndev)
+ return drvr->iflist[ifidx]->ndev->name;
return "<if_none>";
}
@@ -146,10 +114,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
uint buflen;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
multicast_work);
- ndev = drvr_priv->iflist[0]->ndev;
+ ndev = drvr->iflist[0]->ndev;
cnt = netdev_mc_count(ndev);
/* Determine initial value of allmulti flag */
@@ -183,10 +151,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
- brcmf_ifname(&drvr_priv->pub, 0), cnt);
+ brcmf_ifname(drvr, 0), cnt);
dcmd_value = cnt ? true : dcmd_value;
}
@@ -208,7 +176,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
("allmulti", (void *)&dcmd_le_value,
sizeof(dcmd_le_value), buf, buflen)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
(int)sizeof(dcmd_value), buflen);
kfree(buf);
return;
@@ -220,10 +188,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
@@ -241,10 +209,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = sizeof(dcmd_le_value);
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
}
@@ -256,14 +224,14 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_dcmd dcmd;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
setmacaddr_work);
brcmf_dbg(TRACE, "enter\n");
- if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+ if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
ETH_ALEN, buf, 32)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
return;
}
memset(&dcmd, 0, sizeof(dcmd));
@@ -272,52 +240,40 @@ _brcmf_set_mac_address(struct work_struct *work)
dcmd.len = 32;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0)
brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
else
- memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
- drvr_priv->macvalue, ETH_ALEN);
+ memcpy(drvr->iflist[0]->ndev->dev_addr,
+ drvr->macvalue, ETH_ALEN);
return;
}
static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct sockaddr *sa = (struct sockaddr *)addr;
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return -1;
- memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
- schedule_work(&drvr_priv->setmacaddr_work);
+ memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
+ schedule_work(&drvr->setmacaddr_work);
return 0;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- schedule_work(&drvr_priv->multicast_work);
+ schedule_work(&drvr->multicast_work);
}
int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
/* Reject if down */
- if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@@ -328,122 +284,118 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
if (is_multicast_ether_addr(eh->h_dest))
drvr->tx_multicast++;
if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr_priv->pend_8021x_cnt);
+ atomic_inc(&drvr->pend_8021x_cnt);
}
/* If the protocol uses a data header, apply it */
brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
- return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+ return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int ret;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
/* Reject if down */
- if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
- brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
- drvr_priv->pub.up, drvr_priv->pub.busstate);
+ if (!drvr->bus_if->drvr_up ||
+ (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
+ drvr->bus_if->drvr_up,
+ drvr->bus_if->state);
netif_stop_queue(ndev);
return -ENODEV;
}
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF) {
- brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+ if (!drvr->iflist[ifp->idx]) {
+ brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
netif_stop_queue(ndev);
return -ENODEV;
}
/* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+ if (skb_headroom(skb) < drvr->hdrlen) {
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
- drvr_priv->pub.tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+ brcmf_ifname(drvr, ifp->idx));
+ drvr->bus_if->tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifp->idx));
ret = -ENOMEM;
goto done;
}
}
- ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+ ret = brcmf_sendpkt(drvr, ifp->idx, skb);
done:
if (ret)
- drvr_priv->pub.dstats.tx_dropped++;
+ drvr->bus_if->dstats.tx_dropped++;
else
- drvr_priv->pub.tx_packets++;
+ drvr->bus_if->dstats.tx_packets++;
/* Return ok: we always eat the packet */
return 0;
}
-void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
{
struct net_device *ndev;
- struct brcmf_info *drvr_priv = drvr->info;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- drvr->txoff = state;
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
if (state == ON)
netif_stop_queue(ndev);
else
netif_wake_queue(ndev);
}
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
void *pktdata, struct brcmf_event_msg *event,
void **data)
{
int bcmerror = 0;
- bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+ bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
if (bcmerror != 0)
return bcmerror;
- if (drvr_priv->iflist[*ifidx]->ndev)
- brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+ if (drvr->iflist[*ifidx]->ndev)
+ brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
event, *data);
return bcmerror;
}
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
- int numpkt)
+void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *skb_list)
{
- struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
- struct sk_buff *pnext, *save_pktbuf;
- int i;
+ struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_event_msg event;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- save_pktbuf = skb;
-
- for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
- pnext = skb->next;
- skb->next = NULL;
+ skb_queue_walk_safe(skb_list, skb, pnext) {
+ skb_unlink(skb, skb_list);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@@ -460,15 +412,21 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
eth = skb->data;
len = skb->len;
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (ifp == NULL)
- ifp = drvr_priv->iflist[0];
+ ifp = drvr->iflist[0];
+
+ if (!ifp || !ifp->ndev ||
+ ifp->ndev->reg_state != NETREG_REGISTERED) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- drvr_priv->pub.rx_multicast++;
+ bus_if->dstats.multicast++;
skb->data = eth;
skb->len = len;
@@ -478,19 +436,17 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
/* Process special event packets and then discard them */
if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
- brcmf_host_event(drvr_priv, &ifidx,
+ brcmf_host_event(drvr, &ifidx,
skb_mac_header(skb),
&event, &data);
- if (drvr_priv->iflist[ifidx] &&
- !drvr_priv->iflist[ifidx]->state)
- ifp = drvr_priv->iflist[ifidx];
-
- if (ifp->ndev)
+ if (drvr->iflist[ifidx]) {
+ ifp = drvr->iflist[ifidx];
ifp->ndev->last_rx = jiffies;
+ }
- drvr->dstats.rx_bytes += skb->len;
- drvr->rx_packets++; /* Local count */
+ bus_if->dstats.rx_bytes += skb->len;
+ bus_if->dstats.rx_packets++; /* Local count */
if (in_interrupt())
netif_rx(skb);
@@ -505,59 +461,48 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
}
}
-void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
uint ifidx;
- struct brcmf_info *drvr_priv = drvr->info;
struct ethhdr *eh;
u16 type;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_proto_hdrpull(drvr, &ifidx, txp);
+ brcmf_proto_hdrpull(dev, &ifidx, txp);
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE)
- atomic_dec(&drvr_priv->pend_8021x_cnt);
+ atomic_dec(&drvr->pend_8021x_cnt);
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- struct brcmf_if *ifp;
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_bus *bus_if = ifp->drvr->bus_if;
brcmf_dbg(TRACE, "Enter\n");
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return NULL;
-
- ifp = drvr_priv->iflist[ifidx];
-
- if (drvr_priv->pub.up)
- /* Use the protocol to get dongle stats */
- brcmf_proto_dstats(&drvr_priv->pub);
-
/* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
- ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
- ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
- ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+ ifp->stats.rx_packets = bus_if->dstats.rx_packets;
+ ifp->stats.tx_packets = bus_if->dstats.tx_packets;
+ ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
+ ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
+ ifp->stats.rx_errors = bus_if->dstats.rx_errors;
+ ifp->stats.tx_errors = bus_if->dstats.tx_errors;
+ ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
+ ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
+ ifp->stats.multicast = bus_if->dstats.multicast;
return &ifp->stats;
}
/* Retrieve current toe component enables, which are kept
as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
{
struct brcmf_dcmd dcmd;
__le32 toe_le;
@@ -572,17 +517,17 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
dcmd.set = false;
strcpy(buf, "toe_ol");
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
/* Check for older dongle image that doesn't support toe_ol */
if (ret == -EIO) {
brcmf_dbg(ERROR, "%s: toe not supported by device\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifidx));
return -EOPNOTSUPP;
}
brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -593,7 +538,7 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
/* Set current toe component enables in toe_ol iovar,
and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
{
struct brcmf_dcmd dcmd;
char buf[32];
@@ -611,10 +556,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe_ol");
memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -624,10 +569,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe");
memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -637,21 +582,19 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
- sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
- sprintf(info->bus_info, "%s",
- dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+ sprintf(info->version, "%lu", drvr->drv_version);
+ sprintf(info->bus_info, "%s", dev_name(drvr->dev));
}
static struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo
};
-static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
{
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
@@ -685,18 +628,18 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
}
/* otherwise, require dongle to be up */
- else if (!drvr_priv->pub.up) {
+ else if (!drvr->bus_if->drvr_up) {
brcmf_dbg(ERROR, "dongle is not up\n");
return -ENODEV;
}
/* finally, report dongle driver type */
- else if (drvr_priv->pub.iswl)
+ else if (drvr->iswl)
sprintf(info.driver, "wl");
else
sprintf(info.driver, "xx");
- sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
brcmf_dbg(CTL, "given %*s, returning %s\n",
@@ -706,7 +649,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -727,7 +670,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
return -EFAULT;
/* Read the current settings, update and write back */
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -739,17 +682,17 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
else
toe_cmpnt &= ~csum_dir;
- ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+ ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
if (ret < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
- drvr_priv->iflist[0]->ndev->features |=
+ drvr->iflist[0]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[0]->ndev->features &=
+ drvr->iflist[0]->ndev->features &=
~NETIF_F_IP_CSUM;
}
@@ -765,18 +708,16 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
int cmd)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+ brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
- if (ifidx == BRCMF_BAD_IF)
+ if (!drvr->iflist[ifp->idx])
return -1;
if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+ return brcmf_ethtool(drvr, ifr->ifr_data);
return -EOPNOTSUPP;
}
@@ -788,28 +729,25 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
s32 err = 0;
int buflen = 0;
bool is_set_key_cmd;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
memset(&dcmd, 0, sizeof(dcmd));
dcmd.cmd = cmd;
dcmd.buf = arg;
dcmd.len = len;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
-
if (dcmd.buf != NULL)
buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
/* send to dongle (must be up, and wl) */
- if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
brcmf_dbg(ERROR, "DONGLE_DOWN\n");
err = -EIO;
goto done;
}
- if (!drvr_priv->pub.iswl) {
+ if (!drvr->iswl) {
err = -EIO;
goto done;
}
@@ -826,7 +764,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
if (is_set_key_cmd)
brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+ err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
done:
if (err > 0)
@@ -837,15 +775,16 @@ done:
static int brcmf_netdev_stop(struct net_device *ndev)
{
- struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
brcmf_cfg80211_down(drvr->config);
- if (drvr->up == 0)
+ if (drvr->bus_if->drvr_up == 0)
return 0;
/* Set state and stop OS transmissions */
- drvr->up = 0;
+ drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -853,39 +792,37 @@ static int brcmf_netdev_stop(struct net_device *ndev)
static int brcmf_netdev_open(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
u32 toe_ol;
- int ifidx = brcmf_net2idx(drvr_priv, ndev);
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
-
- if (ifidx == 0) { /* do it only for primary eth0 */
+ brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ if (ifp->idx == 0) { /* do it only for primary eth0 */
/* try to bring up bus */
- ret = brcmf_bus_start(&drvr_priv->pub);
+ ret = brcmf_bus_start(drvr->dev);
if (ret != 0) {
brcmf_dbg(ERROR, "failed with code %d\n", ret);
return -1;
}
- atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+ atomic_set(&drvr->pend_8021x_cnt, 0);
- memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
/* Get current TOE mode from dongle */
- if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+ if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr_priv->iflist[ifidx]->ndev->features |=
+ drvr->iflist[ifp->idx]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[ifidx]->ndev->features &=
+ drvr->iflist[ifp->idx]->ndev->features &=
~NETIF_F_IP_CSUM;
}
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr_priv->pub.up = 1;
- if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+ drvr->bus_if->drvr_up = true;
+ if (brcmf_cfg80211_up(drvr->config)) {
brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
return -1;
}
@@ -893,193 +830,155 @@ static int brcmf_netdev_open(struct net_device *ndev)
return ret;
}
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
- char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
- int ret = 0, err = 0;
+ struct net_device *ndev;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+ brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
- if (!ifp) {
- ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
- if (!ifp)
- return -ENOMEM;
+ ifp = drvr->iflist[ifidx];
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp) {
+ brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
}
- memset(ifp, 0, sizeof(struct brcmf_if));
- ifp->info = drvr_priv;
- drvr_priv->iflist[ifidx] = ifp;
+ /* Allocate netdev, including space for private structure */
+ ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
+ if (!ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ return -ENOMEM;
+ }
+
+ ifp = netdev_priv(ndev);
+ ifp->ndev = ndev;
+ ifp->drvr = drvr;
+ drvr->iflist[ifidx] = ifp;
+ ifp->idx = ifidx;
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
- if (ndev == NULL) {
- ifp->state = BRCMF_E_IF_ADD;
- ifp->idx = ifidx;
- /*
- * Delete the existing interface before overwriting it
- * in case we missed the BRCMF_E_IF_DEL event.
- */
- if (ifp->ndev != NULL) {
- brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
- ifp->ndev->name);
- netif_stop_queue(ifp->ndev);
- unregister_netdev(ifp->ndev);
- free_netdev(ifp->ndev);
- }
-
- /* Allocate netdev, including space for private structure */
- ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
- ether_setup);
- if (!ifp->ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- ret = -ENOMEM;
- }
-
- if (ret == 0) {
- memcpy(netdev_priv(ifp->ndev), &drvr_priv,
- sizeof(drvr_priv));
- err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
- if (err != 0) {
- brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
- err);
- ret = -EOPNOTSUPP;
- } else {
- brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
- current->pid, ifp->ndev->name);
- ifp->state = 0;
- }
- }
-
- if (ret < 0) {
- if (ifp->ndev)
- free_netdev(ifp->ndev);
+ if (brcmf_net_attach(drvr, ifp->idx)) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed");
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ return -EOPNOTSUPP;
+ }
- drvr_priv->iflist[ifp->idx] = NULL;
- kfree(ifp);
- }
- } else
- ifp->ndev = ndev;
+ brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+ current->pid, ifp->ndev->name);
return 0;
}
-void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
{
struct brcmf_if *ifp;
brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (!ifp) {
brcmf_dbg(ERROR, "Null interface\n");
return;
}
+ if (ifp->ndev) {
+ if (ifidx == 0) {
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ rtnl_lock();
+ brcmf_netdev_stop(ifp->ndev);
+ rtnl_unlock();
+ }
+ } else {
+ netif_stop_queue(ifp->ndev);
+ }
- ifp->state = BRCMF_E_IF_DEL;
- ifp->idx = ifidx;
- if (ifp->ndev != NULL) {
- netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ if (ifidx == 0)
+ brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
- drvr_priv->iflist[ifidx] = NULL;
- kfree(ifp);
}
}
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+int brcmf_attach(uint bus_hdrlen, struct device *dev)
{
- struct brcmf_info *drvr_priv = NULL;
- struct net_device *ndev;
+ struct brcmf_pub *drvr = NULL;
+ int ret = 0;
brcmf_dbg(TRACE, "Enter\n");
- /* Allocate netdev, including space for private structure */
- ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
- if (!ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- goto fail;
- }
-
/* Allocate primary brcmf_info */
- drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
- if (!drvr_priv)
- goto fail;
-
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
- BRCMF_BAD_IF)
- goto fail;
-
- ndev->netdev_ops = NULL;
- mutex_init(&drvr_priv->proto_block);
+ drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
+ if (!drvr)
+ return -ENOMEM;
- /* Link to info module */
- drvr_priv->pub.info = drvr_priv;
+ mutex_init(&drvr->proto_block);
/* Link to bus module */
- drvr_priv->pub.bus = bus;
- drvr_priv->pub.hdrlen = bus_hdrlen;
+ drvr->hdrlen = bus_hdrlen;
+ drvr->bus_if = dev_get_drvdata(dev);
+ drvr->bus_if->drvr = drvr;
+ drvr->dev = dev;
/* Attach and link in the protocol */
- if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ ret = brcmf_proto_attach(drvr);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
goto fail;
}
- /* Attach and link in the cfg80211 */
- drvr_priv->pub.config =
- brcmf_cfg80211_attach(ndev,
- brcmf_bus_get_device(bus),
- &drvr_priv->pub);
- if (drvr_priv->pub.config == NULL) {
- brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
- goto fail;
- }
-
- INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
+ INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- return &drvr_priv->pub;
+ return ret;
fail:
- if (ndev)
- free_netdev(ndev);
- if (drvr_priv)
- brcmf_detach(&drvr_priv->pub);
+ brcmf_detach(dev);
- return NULL;
+ return ret;
}
-int brcmf_bus_start(struct brcmf_pub *drvr)
+int brcmf_bus_start(struct device *dev)
{
int ret = -1;
- struct brcmf_info *drvr_priv = drvr->info;
/* Room for "event_msgs" + '\0' + bitvec */
char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
- ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ ret = bus_if->brcmf_bus_init(dev);
if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
/* If bus is not ready, can't come up */
- if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ if (bus_if->state != BRCMF_BUS_DATA) {
brcmf_dbg(ERROR, "failed bus is not ready\n");
return -ENODEV;
}
@@ -1116,33 +1015,22 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
/* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(&drvr_priv->pub);
+ ret = brcmf_proto_init(drvr);
if (ret < 0)
return ret;
return 0;
}
-static struct net_device_ops brcmf_netdev_ops_pri = {
- .ndo_open = brcmf_netdev_open,
- .ndo_stop = brcmf_netdev_stop,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
struct net_device *ndev;
u8 temp_addr[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
ndev->netdev_ops = &brcmf_netdev_ops_pri;
/*
@@ -1150,7 +1038,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(temp_addr, drvr->mac, ETH_ALEN);
}
@@ -1161,14 +1049,23 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
- Locally Administered address */
}
- ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
- drvr_priv->pub.hdrlen;
+ drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+ drvr->hdrlen;
memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+ /* attach to cfg80211 for primary interface */
+ if (!ifidx) {
+ drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+ if (drvr->config == NULL) {
+ brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+ goto fail;
+ }
+ }
+
if (register_netdev(ndev) != 0) {
brcmf_dbg(ERROR, "couldn't register the net device\n");
goto fail;
@@ -1185,127 +1082,57 @@ fail:
static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
- struct brcmf_info *drvr_priv;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- /* Stop the protocol module */
- brcmf_proto_stop(&drvr_priv->pub);
-
- /* Stop the bus module */
- brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
- }
- }
-}
-
-void brcmf_detach(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv;
-
brcmf_dbg(TRACE, "Enter\n");
if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- struct brcmf_if *ifp;
- int i;
-
- for (i = 1; i < BRCMF_MAX_IFS; i++)
- if (drvr_priv->iflist[i])
- brcmf_del_if(drvr_priv, i);
-
- ifp = drvr_priv->iflist[0];
- if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- rtnl_lock();
- brcmf_netdev_stop(ifp->ndev);
- rtnl_unlock();
- unregister_netdev(ifp->ndev);
- }
-
- cancel_work_sync(&drvr_priv->setmacaddr_work);
- cancel_work_sync(&drvr_priv->multicast_work);
-
- brcmf_bus_detach(drvr);
-
- if (drvr->prot)
- brcmf_proto_detach(drvr);
-
- brcmf_cfg80211_detach(drvr->config);
+ /* Stop the protocol module */
+ brcmf_proto_stop(drvr);
- free_netdev(ifp->ndev);
- kfree(ifp);
- kfree(drvr_priv);
- }
+ /* Stop the bus module */
+ drvr->bus_if->brcmf_bus_stop(drvr->dev);
}
}
-static void __exit brcmf_module_cleanup(void)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
+void brcmf_detach(struct device *dev)
{
- int error;
+ int i;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- error = brcmf_bus_register();
-
- if (error) {
- brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
- goto failed;
- }
- return 0;
-
-failed:
- return -EINVAL;
-}
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS-1; i > -1; i--)
+ if (drvr->iflist[i])
+ brcmf_del_if(drvr, i);
-int brcmf_os_proto_block(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (drvr_priv) {
- mutex_lock(&drvr_priv->proto_block);
- return 1;
- }
- return 0;
-}
+ cancel_work_sync(&drvr->setmacaddr_work);
+ cancel_work_sync(&drvr->multicast_work);
-int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
+ brcmf_bus_detach(drvr);
- if (drvr_priv) {
- mutex_unlock(&drvr_priv->proto_block);
- return 1;
- }
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
- return 0;
+ bus_if->drvr = NULL;
+ kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
{
- return atomic_read(&drvr_priv->pend_8021x_cnt);
+ return atomic_read(&drvr->pend_8021x_cnt);
}
#define MAX_WAIT_FOR_8021X_TX 10
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
int timeout = 10 * HZ / 1000;
int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ int pend = brcmf_get_pend_8021x_cnt(drvr);
while (ntimes && pend) {
if (pend) {
@@ -1314,7 +1141,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
set_current_state(TASK_RUNNING);
ntimes--;
}
- pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ pend = brcmf_get_pend_8021x_cnt(drvr);
}
return pend;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 4ee1ea846f6..6bc4425a8b0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -41,17 +41,10 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
struct sk_buff *txp);
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
- struct sk_buff *rxp);
-
/* Use protocol to issue command to dongle */
extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
struct brcmf_dcmd *dcmd, int len);
-/* Update local copy of dongle statistics */
-extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-
extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 313b8bf592d..5a002a21f10 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -28,6 +28,7 @@
#include <linux/semaphore.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -35,6 +36,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -85,11 +87,8 @@ struct rte_console {
#endif /* BCMDBG */
#include <chipcommon.h>
-#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
-#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -134,33 +133,6 @@ struct rte_console {
/* Force no backplane reset */
#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
/* direct(mapped) cis space */
/* MAPPED common CIS address */
@@ -335,49 +307,16 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-/* sbimstate */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-
-/* reset */
-#define SBTML_RESET 0x0001
-/* reject field */
-#define SBTML_REJ_MASK 0x0006
-/* reject */
-#define SBTML_REJ 0x0002
-/* temporary reject, for error recovery */
-#define SBTML_TMPREJ 0x0004
-
-/* Shift to locate the SI control flags in sbtml */
-#define SBTML_SICF_SHIFT 16
-
-/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
-
-/* Shift to locate the SI status flags in sbtmh */
-#define SBTMH_SISF_SHIFT 16
-
-/* sbidlow */
-#define SBIDL_INIT 0x80 /* initiator */
-
-/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
-#define SBIDH_RCE_SHIFT 8
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
- ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
-#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
-#define SBIDH_VC_SHIFT 16
+#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+
+#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
+#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ * when idle
+ */
+#define BRCMF_IDLE_INTERVAL 1
/*
* Conversion of 802.1D priority to precedence level
@@ -388,17 +327,6 @@ static uint prio2prec(u32 prio)
(prio^2) : prio;
}
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
/* core registers */
struct sdpcmd_regs {
u32 corecontrol; /* 0x00, rev8 */
@@ -524,25 +452,8 @@ struct sdpcm_shared_le {
/* misc chip info needed by some of the routines */
-struct chip_info {
- u32 chip;
- u32 chiprev;
- u32 cccorebase;
- u32 ccrev;
- u32 cccaps;
- u32 buscorebase; /* 32 bits backplane bus address */
- u32 buscorerev;
- u32 buscoretype;
- u32 ramcorebase;
- u32 armcorebase;
- u32 pmurev;
- u32 ramsize;
-};
-
/* Private data for SDIO bus interaction */
-struct brcmf_bus {
- struct brcmf_pub *drvr;
-
+struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct chip_info *ci; /* Chip info struct */
char *vars; /* Variables (from CIS and/or other) */
@@ -574,7 +485,7 @@ struct brcmf_bus {
uint txminmax;
struct sk_buff *glomd; /* Packet containing glomming descriptor */
- struct sk_buff *glom; /* Packet chain for glommed superframe */
+ struct sk_buff_head glom; /* Packet list for glommed superframe */
uint glomerr; /* Glom packet read errors */
u8 *rxbuf; /* Buffer for receiving control packets */
@@ -637,6 +548,13 @@ struct brcmf_bus {
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
+ uint tickcnt; /* Number of watchdog been schedule */
+ unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
+ unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
+ unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
+ unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
+ unsigned long rx_readahead_cnt; /* Number of packets where header
+ * read-ahead was used. */
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
@@ -657,50 +575,10 @@ struct brcmf_bus {
struct semaphore sdsem;
- const char *fw_name;
const struct firmware *firmware;
- const char *nv_name;
u32 fw_ptr;
-};
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
+ bool txoff; /* Transmit flow-controlled */
};
/* clkstate */
@@ -737,7 +615,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
}
/* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
{
return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -748,12 +626,14 @@ static bool data_ok(struct brcmf_bus *bus)
* adresses on the 32 bit backplane bus.
*/
static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
*regvar = brcmf_sdcard_reg_read(bus->sdiodev,
- bus->ci->buscorebase + reg_offset, sizeof(u32));
+ bus->ci->c_inf[idx].base + reg_offset,
+ sizeof(u32));
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
if (*retryvar) {
@@ -766,12 +646,13 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
}
static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
brcmf_sdcard_reg_write(bus->sdiodev,
- bus->ci->buscorebase + reg_offset,
+ bus->ci->c_inf[idx].base + reg_offset,
sizeof(u32), regval);
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
@@ -790,14 +671,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
if (bus->usebufpool)
brcmu_pkt_buf_free_skb(pkt);
}
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -812,10 +693,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- if ((bus->ci->chip == BCM4329_CHIP_ID)
- && (bus->ci->chiprev == 0))
- clkreq |= SBSDIO_FORCE_ALP;
-
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
@@ -823,14 +700,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
return -EBADE;
}
- if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev == 9))) {
- u32 dummy, retries;
- r_sdreg32(bus, &dummy,
- offsetof(struct sdpcmd_regs, clockctlstatus),
- &retries);
- }
-
/* Check current status */
clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
@@ -930,7 +799,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -943,7 +812,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef BCMDBG
uint oldstate = bus->clkstate;
@@ -999,7 +868,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
return 0;
}
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
uint retries = 0;
@@ -1034,11 +903,9 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
- if (bus->ci->chip != BCM4329_CHIP_ID) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
- }
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
/* Change state */
bus->sleeping = true;
@@ -1049,13 +916,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
- /* Force pad isolation off if possible
- (in case power never toggled) */
- if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev >= 10))
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, 0, NULL);
-
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -1080,13 +940,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
return 0;
}
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
{
if (bus->sleeping)
brcmf_sdbrcm_bussleep(bus, false);
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1162,7 +1022,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1219,16 +1079,61 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
/* If we can't reach the device, signal failure */
if (err || brcmf_sdcard_regfail(bus->sdiodev))
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+}
+
+/* copy a buffer into a pkt buffer chain */
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
+{
+ uint n, ret = 0;
+ struct sk_buff *p;
+ u8 *buf;
+
+ buf = bus->dataptr;
+
+ /* copy the data */
+ skb_queue_walk(&bus->glom, p) {
+ n = min_t(uint, p->len, len);
+ memcpy(p->data, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ if (!len)
+ break;
+ }
+
+ return ret;
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+{
+ struct sk_buff *p;
+ uint total;
+
+ total = 0;
+ skb_queue_walk(&bus->glom, p)
+ total += p->len;
+ return total;
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+{
+ struct sk_buff *cur, *next;
+
+ skb_queue_walk_safe(&bus->glom, cur, next) {
+ skb_unlink(cur, &bus->glom);
+ brcmu_pkt_buf_free_skb(cur);
+ }
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u16 sublen, check;
- struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+ struct sk_buff *pfirst, *pnext;
int errcode;
u8 chan, seq, doff, sfdoff;
@@ -1240,11 +1145,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
- brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+ brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
+ bus->glomd, skb_peek(&bus->glom));
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
- pfirst = plast = pnext = NULL;
+ pfirst = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
@@ -1287,12 +1193,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, sublen);
break;
}
- if (!pfirst) {
- pfirst = plast = pnext;
- } else {
- plast->next = pnext;
- plast = pnext;
- }
+ skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
pkt_align(pnext, sublen, BRCMF_SDALIGN);
@@ -1308,12 +1209,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
bus->nextlen, totlen, rxseq);
}
- bus->glom = pfirst;
pfirst = pnext = NULL;
} else {
- if (pfirst)
- brcmu_pkt_buf_free_skb(pfirst);
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
num = 0;
}
@@ -1325,37 +1223,33 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Ok -- either we just generated a packet chain,
or had one from before */
- if (bus->glom) {
+ if (!skb_queue_empty(&bus->glom)) {
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
- for (pnext = bus->glom; pnext; pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
pnext, (u8 *) (pnext->data),
pnext->len, pnext->len);
}
}
- pfirst = bus->glom;
- dlen = (u16) brcmu_pkttotlen(pfirst);
+ pfirst = skb_peek(&bus->glom);
+ dlen = (u16) brcmf_sdbrcm_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
if (usechain) {
- errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, (u8 *) pfirst->data, dlen,
- pfirst);
+ SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, bus->dataptr, dlen,
- NULL);
- sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
- bus->dataptr);
+ SDIO_FUNC_2, F2SYNC,
+ bus->dataptr, dlen);
+ sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
dlen, sublen);
@@ -1373,16 +1267,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
if (errcode < 0) {
brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
if (bus->glomerr++ < 3) {
brcmf_sdbrcm_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
return 0;
}
@@ -1455,10 +1348,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, doff);
sfdoff = doff;
+ num = 0;
/* Validate all the subframe headers */
- for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
+ /* leave when invalid subframe is found */
+ if (errcode)
+ break;
+
dptr = (u8 *) (pnext->data);
dlen = (u16) (pnext->len);
sublen = get_unaligned_le16(dptr);
@@ -1491,6 +1388,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, doff, sublen, SDPCM_HDRLEN);
errcode = -1;
}
+ /* increase the subframe count */
+ num++;
}
if (errcode) {
@@ -1503,23 +1402,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
return 0;
}
/* Basic SD framing looks ok - process each packet (header) */
- save_pfirst = pfirst;
- bus->glom = NULL;
- plast = NULL;
-
- for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = pfirst->next;
- pfirst->next = NULL;
+ skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1539,6 +1431,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->rx_badseq++;
rxseq = seq;
}
+ rxseq++;
+
#ifdef BCMDBG
if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1551,36 +1445,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
- pfirst) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
+ &ifidx, pfirst) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
}
- /* this packet will go up, link back into
- chain and count it */
- pfirst->next = pnext;
- plast = pfirst;
- num++;
-
#ifdef BCMDBG
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
- num, pfirst, pfirst->data,
+ bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1589,19 +1469,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
#endif /* BCMDBG */
}
- if (num) {
+ /* sent any remaining packets up */
+ if (bus->glom.qlen) {
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
down(&bus->sdsem);
}
bus->rxglomframes++;
- bus->rxglompkts += num;
+ bus->rxglompkts += bus->glom.qlen;
}
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
@@ -1623,7 +1504,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1631,7 +1512,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
@@ -1657,7 +1538,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((len + pad) < bus->drvr->maxctl))
+ ((len + pad) < bus->sdiodev->bus_if->maxctl))
rdlen += pad;
} else if (rdlen % BRCMF_SDALIGN) {
rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
@@ -1668,18 +1549,18 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
rdlen = roundup(rdlen, ALIGNMENT);
/* Drop if the read is too big or it exceeds our maximum */
- if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+ if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
- rdlen, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ rdlen, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
- if ((len - doff) > bus->drvr->maxctl) {
+ if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
- len, len - doff, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ len, len - doff, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1689,8 +1570,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
- NULL);
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
/* Control frame failures need retransmission */
@@ -1721,7 +1601,7 @@ done:
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1734,7 +1614,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
}
static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
struct sk_buff **pkt, u8 **rxbuf)
{
int sdret; /* Return code from calls */
@@ -1746,16 +1626,15 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
*rxbuf = (u8 *) ((*pkt)->data);
/* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- *rxbuf, rdlen, *pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
rdlen, sdret);
brcmu_pkt_buf_free_skb(*pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
/* Force retry w/normal header read.
* Don't attempt NAK for
* gSPI
@@ -1767,7 +1646,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
/* Checks the header */
static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
u8 rxseq, u16 nextlen, u16 *len)
{
u16 check;
@@ -1823,7 +1702,7 @@ fail:
/* Return true if there may be more frames to read */
static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
{
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@@ -1846,14 +1725,15 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft &&
+ bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
- if (bus->glom || bus->glomd) {
+ if (bus->glomd || !skb_queue_empty(&bus->glom)) {
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
- bus->glomd, bus->glom);
+ bus->glomd, skb_peek(&bus->glom));
cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rxseq += cnt - 1;
@@ -1905,7 +1785,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
- bus->drvr->rx_readahead_cnt++;
+ bus->rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -1976,7 +1856,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Read frame header (hardware and software) */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
- BRCMF_FIRSTREAD, NULL);
+ BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
if (sdret < 0) {
@@ -2103,7 +1983,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Too long -- skip this frame */
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
@@ -2115,7 +1995,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Give up on data, request rtx of events */
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
rdlen, chan);
- bus->drvr->rx_dropped++;
+ bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
continue;
}
@@ -2125,9 +2005,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
pkt_align(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
- rdlen, pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
if (sdret < 0) {
@@ -2136,7 +2015,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
: ((chan == SDPCM_DATA_CHANNEL) ? "data"
: "test")), sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
continue;
}
@@ -2185,16 +2064,17 @@ deliver:
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
+ pkt) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
continue;
}
/* Unlock during rx call */
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@@ -2214,16 +2094,8 @@ deliver:
return rxcount;
}
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
- return brcmf_sdcard_send_buf
- (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2233,7 +2105,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@@ -2242,7 +2114,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan, bool free_pkt)
{
int ret;
@@ -2262,7 +2134,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (skb_headroom(pkt) < pad) {
brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
skb_headroom(pkt), pad);
- bus->drvr->tx_realloc++;
+ bus->sdiodev->bus_if->tx_realloc++;
new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
if (!new) {
brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
@@ -2331,9 +2203,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame,
- len, pkt);
+ ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
if (ret < 0) {
@@ -2371,7 +2242,7 @@ done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
up(&bus->sdsem);
- brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
down(&bus->sdsem);
if (free_pkt)
@@ -2380,7 +2251,7 @@ done:
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@@ -2390,8 +2261,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
uint datalen;
u8 tx_prec_map;
- struct brcmf_pub *drvr = bus->drvr;
-
brcmf_dbg(TRACE, "Enter\n");
tx_prec_map = ~bus->flowcontrol;
@@ -2409,9 +2278,9 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
if (ret)
- bus->drvr->tx_errors++;
+ bus->sdiodev->bus_if->dstats.tx_errors++;
else
- bus->drvr->dstats.tx_bytes += datalen;
+ bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2428,14 +2297,98 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
- drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
- brcmf_txflowcontrol(drvr, 0, OFF);
+ if (bus->sdiodev->bus_if->drvr_up &&
+ (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
+ bus->txoff = OFF;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+ }
return cnt;
}
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_bus_stop(struct device *dev)
+{
+ u32 local_hostintmask;
+ u8 saveclk;
+ uint retries;
+ int err;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk && bus->dpc_tsk != current) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ }
+
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Change our idea of bus state */
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err)
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ /* Turn off the backplane clock (only) */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Clear the data packet queues */
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ brcmu_pkt_buf_free_skb(bus->glomd);
+ brcmf_sdbrcm_free_glom(bus);
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = false;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ up(&bus->sdsem);
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
uint retries = 0;
@@ -2463,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* BCMDBG */
@@ -2473,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2486,7 +2439,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2494,7 +2447,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@@ -2596,9 +2549,9 @@ clkwait:
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL);
+ (u32) bus->ctrl_frame_len);
if (ret < 0) {
/* On failure, abort the command and
@@ -2650,11 +2603,11 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
brcmf_sdcard_regfail(bus->sdiodev)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
brcmf_sdcard_regfail(bus->sdiodev));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2681,7 +2634,7 @@ clkwait:
static int brcmf_sdbrcm_dpc_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -2691,12 +2644,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
/* Call bus dpc unless it indicated down
(then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
if (brcmf_sdbrcm_dpc(bus))
complete(&bus->dpc_wait);
} else {
/* after stopping the bus, exit thread */
- brcmf_sdbrcm_bus_stop(bus);
+ brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
break;
}
@@ -2706,10 +2659,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
return 0;
}
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2728,9 +2684,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
- if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+ if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) ==
+ false) {
skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
brcmu_pkt_buf_free_skb(pkt);
brcmf_dbg(ERROR, "out of bus->txq !!!\n");
ret = -ENOSR;
@@ -2739,8 +2696,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
spin_unlock_bh(&bus->txqlock);
- if (pktq_len(&bus->txq) >= TXHI)
- brcmf_txflowcontrol(bus->drvr, 0, ON);
+ if (pktq_len(&bus->txq) >= TXHI) {
+ bus->txoff = ON;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+ }
#ifdef BCMDBG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2757,7 +2716,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@@ -2818,7 +2777,7 @@ xfer_done:
#ifdef BCMDBG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2895,14 +2854,14 @@ break2:
}
#endif /* BCMDBG */
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2937,8 +2896,8 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
return ret;
}
-int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
@@ -2946,6 +2905,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
uint retries = 0;
u8 doff = 0;
int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3045,19 +3007,22 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
- bus->drvr->tx_ctlerrs++;
+ bus->tx_ctlerrs++;
else
- bus->drvr->tx_ctlpkts++;
+ bus->tx_ctlpkts++;
return ret ? -EIO : 0;
}
-int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3083,21 +3048,21 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
}
if (rxlen)
- bus->drvr->rx_ctlpkts++;
+ bus->rx_ctlpkts++;
else
- bus->drvr->rx_ctlerrs++;
+ bus->rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
{
int bcmerror = 0;
brcmf_dbg(TRACE, "Enter\n");
/* Basic sanity checks */
- if (bus->drvr->up) {
+ if (bus->sdiodev->bus_if->drvr_up) {
bcmerror = -EISCONN;
goto err;
}
@@ -3123,7 +3088,7 @@ err:
return bcmerror;
}
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
u32 varsize;
@@ -3210,135 +3175,11 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
return bcmerror;
}
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if (regdata & SBTML_RESET)
- return;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
- 4, regdata | SBTML_REJ);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4) &
- SBTMH_BUSY), 100000);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_BUSY)
- brcmf_dbg(ERROR, "ARM core still busy\n");
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) |
- SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- SBIM_BY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4,
- (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_REJ | SBTML_RESET));
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- ~SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SBTML_REJ | SBTML_RESET));
- udelay(1);
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_RESET);
- udelay(1);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_SERR)
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4, 0);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- if (regdata & (SBIM_IBE | SBIM_TO))
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
- regdata & ~(SBIM_IBE | SBIM_TO));
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_FGC << SBTML_SICF_SHIFT) |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
uint retries;
- u32 regdata;
int bcmerror = 0;
+ struct chip_info *ci = bus->ci;
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
@@ -3346,10 +3187,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
if (enter) {
bus->alp_only = true;
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
- bus->ci->armcorebase);
+ ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
/* Clear the top bit of memory */
if (bus->ramsize) {
@@ -3358,11 +3198,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
(u8 *)&zeros, 4);
}
} else {
- regdata = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
- regdata &= (SBTML_RESET | SBTML_REJ_MASK |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+ if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
bcmerror = -EBADE;
goto fail;
@@ -3377,18 +3213,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
w_sdreg32(bus, 0xFFFFFFFF,
offsetof(struct sdpcmd_regs, intstatus), &retries);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
- bus->drvr->busstate = BRCMF_BUS_LOAD;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
@@ -3398,10 +3234,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
return len;
}
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
int offset = 0;
uint len;
@@ -3410,8 +3243,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
brcmf_dbg(INFO, "Enter\n");
- bus->fw_name = BCM4329_FW_NAME;
- ret = request_firmware(&bus->firmware, bus->fw_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3501,15 +3333,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
uint len;
char *memblock = NULL;
char *bufp;
int ret;
- bus->nv_name = BCM4329_NV_NAME;
- ret = request_firmware(&bus->firmware, bus->nv_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3549,7 +3380,7 @@ err:
return ret;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
int bcmerror = -1;
@@ -3582,7 +3413,7 @@ err:
}
static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
@@ -3596,91 +3427,11 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
return ret;
}
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_bus_init(struct device *dev)
{
- u32 local_hostintmask;
- u8 saveclk;
- uint retries;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->watchdog_tsk) {
- send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
- bus->watchdog_tsk = NULL;
- }
-
- if (bus->dpc_tsk && bus->dpc_tsk != current) {
- send_sig(SIGTERM, bus->dpc_tsk, 1);
- kthread_stop(bus->dpc_tsk);
- bus->dpc_tsk = NULL;
- }
-
- down(&bus->sdsem);
-
- bus_wake(bus);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
-
- /* Change our idea of bus state */
- bus->drvr->busstate = BRCMF_BUS_DOWN;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err)
- brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
-
- /* Turn off the bus (F2), free any pending packets */
- brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Clear the data packet queues */
- brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
-
- /* Clear any held glomming stuff */
- if (bus->glomd)
- brcmu_pkt_buf_free_skb(bus->glomd);
-
- if (bus->glom)
- brcmu_pkt_buf_free_skb(bus->glom);
-
- bus->glom = bus->glomd = NULL;
-
- /* Clear rx control and wake any waiters */
- bus->rxlen = 0;
- brcmf_sdbrcm_dcmd_resp_wake(bus);
-
- /* Reset some F2 state stuff */
- bus->rxskip = false;
- bus->tx_seq = bus->rx_seq = 0;
-
- up(&bus->sdsem);
-}
-
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
-{
- struct brcmf_bus *bus = drvr->bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
u8 ready, enable;
@@ -3690,16 +3441,16 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
/* try to download image and nvram to the dongle */
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus_if->state == BRCMF_BUS_DOWN) {
if (!(brcmf_sdbrcm_download_firmware(bus)))
return -1;
}
- if (!bus->drvr)
+ if (!bus->sdiodev->bus_if->drvr)
return 0;
/* Start the watchdog timer */
- bus->drvr->tickcnt = 0;
+ bus->tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@@ -3756,7 +3507,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_WATERMARK, 8, &err);
/* Set bus state according to enable result */
- drvr->busstate = BRCMF_BUS_DATA;
+ bus_if->state = BRCMF_BUS_DATA;
}
else {
@@ -3771,7 +3522,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
- if (drvr->busstate != BRCMF_BUS_DATA)
+ if (bus_if->state != BRCMF_BUS_DATA)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@@ -3782,7 +3533,7 @@ exit:
void brcmf_sdbrcm_isr(void *arg)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
brcmf_dbg(TRACE, "Enter\n");
@@ -3791,7 +3542,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
return;
}
@@ -3814,14 +3565,14 @@ void brcmf_sdbrcm_isr(void *arg)
complete(&bus->dpc_wait);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
- struct brcmf_bus *bus;
+#ifdef BCMDBG
+ struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
brcmf_dbg(TIMER, "Enter\n");
- bus = drvr->bus;
-
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
@@ -3865,7 +3616,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
}
#ifdef BCMDBG
/* Poll for console output periodically */
- if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ if (bus_if->state == BRCMF_BUS_DATA &&
+ bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
@@ -3900,10 +3652,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM4329_CHIP_ID)
return true;
+ if (chipid == BCM4330_CHIP_ID)
+ return true;
return false;
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3915,13 +3669,13 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
bus->databuf = NULL;
}
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
- if (bus->drvr->maxctl) {
+ if (bus->sdiodev->bus_if->maxctl) {
bus->rxblen =
- roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf))
@@ -3950,276 +3704,14 @@ fail:
return false;
}
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
- {
- 4, 0x2}, {
- 2, 0x3}, {
- 1, 0x0}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
- {
- 12, 0x7}, {
- 10, 0x6}, {
- 8, 0x5}, {
- 6, 0x4}, {
- 4, 0x2}, {
- 2, 0x1}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
- {
- 32, 0x7}, {
- 26, 0x6}, {
- 22, 0x5}, {
- 16, 0x4}, {
- 12, 0x3}, {
- 8, 0x2}, {
- 4, 0x1}, {
- 0, 0x0}
- };
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-
-static char *brcmf_chipname(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
- u32 drivestrength) {
- struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask = 0;
- u32 str_shift = 0;
- char chn[8];
-
- if (!(bus->ci->cccaps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
- str_mask = 0x30000000;
- str_shift = 28;
- break;
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_chipname(bus->ci->chip, chn, 8),
- bus->ci->chiprev, bus->ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- int i;
-
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, cc_data_temp);
-
- brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
- drivestrength, cc_data_temp);
- }
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
-{
- u32 regdata;
-
- /*
- * Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- ci->cccorebase = regs;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, chipid), 4);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
- brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM4329_CHIP_ID:
- ci->buscorebase = BCM4329_CORE_BUS_BASE;
- ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
- ci->armcorebase = BCM4329_CORE_ARM_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- break;
- default:
- brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
- return -ENODEV;
- }
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->cccorebase, sbidhigh), 4);
- ci->ccrev = SBCOREREV(regdata);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
- ci->pmurev = regdata & PCAP_REV_MASK;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->buscorebase, sbidhigh), 4);
- ci->buscorerev = SBCOREREV(regdata);
- ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
-
- /* get chipcommon capabilites */
- ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
- return 0;
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
- struct chip_info *ci;
- int err;
- u8 clkval, clkset;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* alloc chip_info_t */
- ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
- if (NULL == ci)
- return -ENOMEM;
-
- /* bus/core/clk setup for register access */
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_dbg(ERROR, "error writing for HT off\n");
- goto fail;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
- if ((clkval & ~SBSDIO_AVBITS) == clkset) {
- SPINWAIT(((clkval =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- err = -EBUSY;
- goto fail;
- }
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
- SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- clkset, &err);
- udelay(65);
- } else {
- brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- err = -EACCES;
- goto fail;
- }
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
- if (err)
- goto fail;
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* WAR: cmd52 backplane read so core HW will drop ALPReq */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- 0, NULL);
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- bus->ci = ci;
- return 0;
-fail:
- bus->ci = NULL;
- kfree(ci);
- return err;
-}
-
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
{
u8 clkctl = 0;
int err = 0;
int reg_addr;
u32 reg_val;
+ u8 idx;
bus->alp_only = true;
@@ -4234,7 +3726,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
#endif /* BCMDBG */
/*
- * Force PLL off until brcmf_sdbrcm_chip_attach()
+ * Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
@@ -4252,8 +3744,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
goto fail;
}
@@ -4262,11 +3754,10 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+ brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
+ SDIO_DRIVE_STRENGTH);
- /* Get info on the ARM and SOCRAM cores... */
- brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+ /* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
if (!(bus->ramsize)) {
brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
@@ -4274,7 +3765,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
}
/* Set core control so an SDIO reset does a backplane reset */
- reg_addr = bus->ci->buscorebase +
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ reg_addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, corecontrol);
reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
@@ -4298,7 +3790,7 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4306,7 +3798,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
@@ -4333,7 +3825,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static int
brcmf_sdbrcm_watchdog_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -4341,9 +3833,9 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
- bus->drvr->tickcnt++;
+ bus->tickcnt++;
} else
break;
}
@@ -4353,7 +3845,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
static void
brcmf_sdbrcm_watchdog(unsigned long data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -4364,23 +3856,14 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(bus->ci);
- bus->ci = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
if (bus->ci) {
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- brcmf_sdbrcm_chip_detach(bus);
+ brcmf_sdio_chip_detach(&bus->ci);
if (bus->vars && bus->varsz)
kfree(bus->vars);
bus->vars = NULL;
@@ -4390,7 +3873,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
}
/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4398,10 +3881,9 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
/* De-register interrupt handler */
brcmf_sdcard_intr_dereg(bus->sdiodev);
- if (bus->drvr) {
- brcmf_detach(bus->drvr);
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
brcmf_sdbrcm_release_dongle(bus);
- bus->drvr = NULL;
}
brcmf_sdbrcm_release_malloc(bus);
@@ -4412,21 +3894,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
- struct brcmf_bus *bus;
-
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behavior since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_c_init();
+ struct brcmf_sdio *bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -4434,12 +3905,13 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
* regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
goto fail;
bus->sdiodev = sdiodev;
sdiodev->bus = bus;
+ skb_queue_head_init(&bus->glom);
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
@@ -4484,9 +3956,15 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
bus->dpc_tsk = NULL;
}
+ /* Assign bus interface call back */
+ bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
+ bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init;
+ bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata;
+ bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
+ bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
/* Attach to the brcmf/OS/network interface */
- bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
- if (!bus->drvr) {
+ ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_attach failed\n");
goto fail;
}
@@ -4514,16 +3992,17 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
brcmf_dbg(INFO, "completed!!\n");
/* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->drvr);
+ ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
if (ret == -ENOLINK) {
brcmf_dbg(ERROR, "dongle is not responding\n");
goto fail;
}
}
- /* Ok, have the per-port tell the stack we're open for business */
- if (brcmf_net_attach(bus->drvr, 0) != 0) {
- brcmf_dbg(ERROR, "Net attach failed!!\n");
+
+ /* add interface and open for business */
+ if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
+ brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
goto fail;
}
@@ -4536,7 +4015,7 @@ fail:
void brcmf_sdbrcm_disconnect(void *ptr)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
brcmf_dbg(TRACE, "Enter\n");
@@ -4546,18 +4025,9 @@ void brcmf_sdbrcm_disconnect(void *ptr)
brcmf_dbg(TRACE, "Disconnected\n");
}
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
- return &bus->sdiodev->func[2]->dev;
-}
-
void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
- /* don't start the wd until fw is loaded */
- if (bus->drvr->busstate == BRCMF_BUS_DOWN)
- return;
-
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid == true) {
del_timer_sync(&bus->timer);
@@ -4566,6 +4036,10 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
return;
}
+ /* don't start the wd until fw is loaded */
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+ return;
+
if (wdtick) {
if (bus->save_ms != BRCMF_WD_POLL_MS) {
if (bus->wd_timer_valid == true)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
new file mode 100644
index 00000000000..11b2d7c97ba
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ***** SDIO interface chip backplane handle functions ***** */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/card.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+
+#include <chipcommon.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <soc.h>
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+#include "sdio_chip.h"
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+u8
+brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+ if (coreid == ci->c_inf[idx].id)
+ return idx;
+
+ return BRCMF_MAX_CORENUM;
+}
+
+static u32
+brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+ return SBCOREREV(regdata);
+}
+
+static u32
+brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+static bool
+brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return (SSB_TMSLOW_CLOCK == regdata);
+}
+
+static bool
+brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+ bool ret;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void
+brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if (regdata & SSB_TMSLOW_RESET)
+ return;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, regdata | SSB_TMSLOW_REJECT);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+ SSB_TMSHIGH_BUSY), 100000);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_BUSY)
+ brcmf_dbg(ERROR, "core state still busy\n");
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
+ SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ ~SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* if core is already in reset, just return */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, 0);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(10);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, BCMA_RESET_CTL_RESET);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* clear any serror */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_SERR)
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+
+ /* clear reset and allow it to propagate throughout the core */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* leave clock enabled */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
+
+ /* now do initialization sequence */
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, 0);
+ udelay(1);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(1);
+}
+
+static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 regs)
+{
+ u32 regdata;
+
+ /*
+ * Get CC core rev
+ * Chipid is assume to be at offset 0 from regs arg
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = regs;
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+ ci->chip = regdata & CID_ID_MASK;
+ ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM4329_CHIP_ID:
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+ ci->ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->ramsize = 0x48000;
+ break;
+ default:
+ brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ return -ENODEV;
+ }
+
+ switch (ci->socitype) {
+ case SOCI_SB:
+ ci->iscoreup = brcmf_sdio_sb_iscoreup;
+ ci->corerev = brcmf_sdio_sb_corerev;
+ ci->coredisable = brcmf_sdio_sb_coredisable;
+ ci->resetcore = brcmf_sdio_sb_resetcore;
+ break;
+ case SOCI_AI:
+ ci->iscoreup = brcmf_sdio_ai_iscoreup;
+ ci->corerev = brcmf_sdio_ai_corerev;
+ ci->coredisable = brcmf_sdio_ai_coredisable;
+ ci->resetcore = brcmf_sdio_ai_resetcore;
+ break;
+ default:
+ brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+{
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void
+brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci)
+{
+ /* get chipcommon rev */
+ ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+
+ /* get chipcommon capabilites */
+ ci->c_inf[0].caps =
+ brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+
+ /* get pmu caps & rev */
+ if (ci->c_inf[0].caps & CC_CAP_PMU) {
+ ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+ ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+ }
+
+ ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ ci->c_inf[0].rev, ci->pmurev,
+ ci->c_inf[1].rev, ci->c_inf[1].id);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
+}
+
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs)
+{
+ int ret;
+ struct chip_info *ci;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* alloc chip_info_t */
+ ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ if (!ci)
+ return -ENOMEM;
+
+ ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+ if (ret != 0)
+ goto err;
+
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ if (ret != 0)
+ goto err;
+
+ brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+
+ *ci_ptr = ci;
+ return 0;
+
+err:
+ kfree(ci);
+ return ret;
+}
+
+void
+brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(*ci_ptr);
+ *ci_ptr = NULL;
+}
+
+static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+void
+brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength)
+{
+ struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask = 0;
+ u32 str_shift = 0;
+ char chn[8];
+
+ if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_sdio_chip_name(ci->chip, chn, 8),
+ ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, 1);
+ cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, cc_data_temp);
+
+ brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp);
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
new file mode 100644
index 00000000000..ce974d76bd9
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMFMAC_SDIO_CHIP_H_
+#define _BRCMFMAC_SDIO_CHIP_H_
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+#define BRCMF_MAX_CORENUM 6
+
+struct chip_core_info {
+ u16 id;
+ u16 rev;
+ u32 base;
+ u32 wrapbase;
+ u32 caps;
+ u32 cib;
+};
+
+struct chip_info {
+ u32 chip;
+ u32 chiprev;
+ u32 socitype;
+ /* core info */
+ /* always put chipcommon core at 0, bus core at 1 */
+ struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+ u32 pmurev;
+ u32 pmucaps;
+ u32 ramsize;
+
+ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+ void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci,
+ u32 drivestrength);
+extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+
+
+#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa898111..0281d207d99 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -116,12 +116,20 @@
#define SUCCESS 0
#define ERROR 1
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN (1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS 10
+
struct brcmf_sdreg {
int func;
int offset;
int value;
};
+struct brcmf_sdio;
+
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
@@ -132,9 +140,10 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
- wait_queue_head_t request_packet_wait;
+ wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
-
+ struct device *dev;
+ struct brcmf_bus *bus_if;
};
/* Register/deregister device interrupt handler. */
@@ -182,11 +191,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
* NOTE: Async operation is not currently supported.
*/
extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
/* Flags bits */
@@ -237,16 +256,20 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer, struct sk_buff *pkt);
+ uint fix_inc, uint rw, uint fnc_num, u32 addr,
+ struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
+
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 5eddabe5228..f23b0c3e4ea 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, s32 dbm)
+ enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
+ s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
break;
case NL80211_TX_POWER_LIMITED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
WL_ERR("TX_POWER_FIXED - dbm is negative\n");
@@ -1997,7 +1992,7 @@ done:
}
static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
- struct brcmf_bss_info *bi)
+ struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
@@ -2049,18 +2044,27 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
notify_timestamp, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_KERNEL);
- if (!bss) {
- WL_ERR("cfg80211_inform_bss_frame error\n");
- return -EINVAL;
- }
+ if (!bss)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
return err;
}
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+ if (bss == NULL)
+ return list->bss_info_le;
+ return (struct brcmf_bss_info_le *)((unsigned long)bss +
+ le32_to_cpu(bss->length));
+}
+
static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
{
struct brcmf_scan_results *bss_list;
- struct brcmf_bss_info *bi = NULL; /* must be initialized */
+ struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
@@ -2072,7 +2076,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
}
WL_SCAN("scanned AP count (%d)\n", bss_list->count);
for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
- bi = next_bss(bss_list, bi);
+ bi = next_bss_le(bss_list, bi);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
break;
@@ -2085,8 +2089,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
- struct brcmf_bss_info *bi = NULL;
+ struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
+ struct cfg80211_bss *bss;
u8 *buf = NULL;
s32 err = 0;
u16 channel;
@@ -2114,7 +2119,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
goto CleanUp;
}
- bi = (struct brcmf_bss_info *)(buf + 4);
+ bi = (struct brcmf_bss_info_le *)(buf + 4);
channel = bi->ctl_ch ? bi->ctl_ch :
CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2140,10 +2145,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("signal: %d\n", notify_signal);
WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
- cfg80211_inform_bss(wiphy, notify_channel, bssid,
+ bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
notify_timestamp, notify_capability, notify_interval,
notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
+ if (!bss) {
+ err = -ENOMEM;
+ goto CleanUp;
+ }
+
+ cfg80211_put_bss(bss);
+
CleanUp:
kfree(buf);
@@ -2188,7 +2200,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct brcmf_bss_info *bi;
+ struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
u16 beacon_interval;
@@ -2211,7 +2223,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
goto update_bss_info_out;
}
- bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+ bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
goto update_bss_info_out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 62dc46144ed..a613b49cb13 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -352,15 +352,6 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
return &cfg->conn_info;
}
-static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
- struct brcmf_bss_info *bss)
-{
- return bss = bss ?
- (struct brcmf_bss_info *)((unsigned long)bss +
- le32_to_cpu(bss->length)) :
- list->bss_info;
-}
-
extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
struct device *busdev,
void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 025fa0eb6f4..ab9bb11abfb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -16,6 +16,8 @@
* File contents: support functions for PCI/PCIe
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/pci.h>
@@ -316,51 +318,24 @@
#define BADIDX (SI_MAXCORES + 1)
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
- (((si)->pub.buscoretype == PCI_CORE_ID) && \
- (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
- PCI_16KB0_CCREGS_OFFSET))
-
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
+#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
-
-#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
#ifdef BCMDBG
-#define SI_MSG(args) printk args
+#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
-#define SI_MSG(args)
+#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
#define GOODCOREADDR(x, b) \
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
IS_ALIGNED((x), SI_CORE_SIZE))
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
- PCI_16KB0_PCIREGS_OFFSET)
-
struct aidmp {
u32 oobselina30; /* 0x000 */
u32 oobselina74; /* 0x004 */
@@ -479,406 +454,13 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
- u32 ent;
- uint inv = 0, nom = 0;
-
- while (true) {
- ent = R_REG(*eromptr);
- (*eromptr)++;
-
- if (mask == 0)
- break;
-
- if ((ent & ER_VALID) == 0) {
- inv++;
- continue;
- }
-
- if (ent == (ER_END | ER_VALID))
- break;
-
- if ((ent & mask) == match)
- break;
-
- nom++;
- }
-
- return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
- u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
- u32 asd, sz, szd;
-
- asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
- if (((asd & ER_TAG1) != ER_ADD) ||
- (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
- ((asd & AD_ST_MASK) != st)) {
- /* This is not what we want, "push" it back */
- (*eromptr)--;
- return 0;
- }
- *addrl = asd & AD_ADDR_MASK;
- if (asd & AD_AG32)
- *addrh = get_erom_ent(sih, eromptr, 0, 0);
- else
- *addrh = 0;
- *sizeh = 0;
- sz = asd & AD_SZ_MASK;
- if (sz == AD_SZ_SZD) {
- szd = get_erom_ent(sih, eromptr, 0, 0);
- *sizel = szd & SD_SZ_MASK;
- if (szd & SD_SG32)
- *sizeh = get_erom_ent(sih, eromptr, 0, 0);
- } else
- *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
- return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- u32 erombase;
- u32 __iomem *eromptr, *eromlim;
- void __iomem *regs = cc;
-
- erombase = R_REG(&cc->eromptr);
-
- /* Set wrappers address */
- sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
- /* Now point the window at the erom */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
- eromptr = regs;
- eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
- while (eromptr < eromlim) {
- u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
- u32 mpd, asd, addrl, addrh, sizel, sizeh;
- u32 __iomem *base;
- uint i, j, idx;
- bool br;
-
- br = false;
-
- /* Grok a component */
- cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
- if (cia == (ER_END | ER_VALID)) {
- /* Found END of erom */
- ai_hwfixup(sii);
- return;
- }
- base = eromptr - 1;
- cib = get_erom_ent(sih, &eromptr, 0, 0);
-
- if ((cib & ER_TAG) != ER_CI) {
- /* CIA not followed by CIB */
- goto error;
- }
-
- cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
- mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
- crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
- nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
- nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
- nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
- nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
- if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
- continue;
- if ((nmw + nsw == 0)) {
- /* A component which is not a core */
- if (cid == OOB_ROUTER_CORE_ID) {
- asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd != 0)
- sii->oob_router = addrl;
- }
- continue;
- }
-
- idx = sii->numcores;
-/* sii->eromptr[idx] = base; */
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = cid;
-
- for (i = 0; i < nmp; i++) {
- mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
- if ((mpd & ER_TAG) != ER_MP) {
- /* Not enough MP entries for component */
- goto error;
- }
- }
-
- /* First Slave Address Descriptor should be port 0:
- * the main register space for the core
- */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
- &sizel, &sizeh);
- if (asd == 0) {
- /* Try again to see if it is a bridge */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd != 0)
- br = true;
- else if ((addrh != 0) || (sizeh != 0)
- || (sizel != SI_CORE_SIZE)) {
- /* First Slave ASD for core malformed */
- goto error;
- }
- }
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
- /* Get any more ASDs in port 0 */
- j = 1;
- do {
- asd =
- get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
- &addrh, &sizel, &sizeh);
- if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
- }
- j++;
- } while (asd != 0);
-
- /* Go through the ASDs for other slave ports */
- for (i = 1; i < nsp; i++) {
- j = 0;
- do {
- asd =
- get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- } while (asd != 0);
- if (j == 0) {
- /* SP has no address descriptors */
- goto error;
- }
- }
-
- /* Now get master wrappers */
- for (i = 0; i < nmw; i++) {
- asd =
- get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for MW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Master wrapper %d is not 4KB */
- goto error;
- }
- if (i == 0)
- sii->wrapba[idx] = addrl;
- }
-
- /* And finally slave wrappers */
- for (i = 0; i < nsw; i++) {
- uint fwp = (nsp == 1) ? 0 : 1;
- asd =
- get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for SW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Slave wrapper is not 4KB */
- goto error;
- }
- if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
- }
-
- /* Don't record bridges */
- if (br)
- continue;
-
- /* Done with core */
- sii->numcores++;
- }
-
- error:
- /* Reached end of erom without finding END */
- sii->numcores = 0;
- return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 addr = sii->coresba[coreidx];
- u32 wrap = sii->wrapba[coreidx];
-
- if (coreidx >= sii->numcores)
- return NULL;
-
- /* point bar0 window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
- /* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
- sii->curidx = coreidx;
-
- return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
- return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba[cidx];
- else if (asidx == 1)
- return sii->coresba2[cidx];
- else {
- /* Need to parse the erom again to find addr space */
- return 0;
- }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba_size[cidx];
- else if (asidx == 1)
- return sii->coresba2_size[cidx];
- else {
- /* Need to parse the erom again to find addr */
- return 0;
- }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cia;
-
- sii = (struct si_info *)sih;
- cia = sii->cia[sii->curidx];
- return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cib;
-
- sii = (struct si_info *)sih;
- cib = sii->cib[sii->curidx];
- return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
- SICF_CLOCK_EN)
- && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-
- return R_REG(&ai->ioctrl);
-}
-
/* return true if PCIE capability exists in the pci config space */
static bool ai_ispcie(struct si_info *sii)
{
u8 cap_ptr;
cap_ptr =
- pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+ pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
NULL);
if (!cap_ptr)
return false;
@@ -894,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
return true;
}
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->iostatus) & ~mask) | val);
- W_REG(&ai->iostatus, w);
- }
-
- return R_REG(&ai->iostatus);
-}
-
static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- bool pci, pcie;
- uint i;
- uint pciidx, pcieidx, pcirev, pcierev;
- struct chipcregs __iomem *cc;
+ struct bcma_device *pci = NULL;
+ struct bcma_device *pcie = NULL;
+ struct bcma_device *core;
- cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ /* no cores found, bail out */
+ if (cc->bus->nr_cores == 0)
+ return false;
/* get chipcommon rev */
- sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+ sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (sii->pub.ccrev >= 11)
- sii->pub.chipst = R_REG(&cc->chipstatus);
+ if (ai_get_ccrev(&sii->pub) >= 11)
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
- sii->pub.cccaps = R_REG(&cc->capabilities);
- /* get chipcommon extended capabilities */
-
- if (sii->pub.ccrev >= 35)
- sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+ sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
- if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+ sii->pub.pmucaps = bcma_read32(cc,
+ CHIPCREGOFFS(pmucapabilities));
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out bus/orignal core idx */
- sii->pub.buscoretype = NODEV_CORE_ID;
- sii->pub.buscorerev = NOREV;
- sii->pub.buscoreidx = BADIDX;
-
- pci = pcie = false;
- pcirev = pcierev = NOREV;
- pciidx = pcieidx = BADIDX;
-
- for (i = 0; i < sii->numcores; i++) {
+ /* figure out buscore */
+ list_for_each_entry(core, &cc->bus->cores, list) {
uint cid, crev;
- ai_setcoreidx(&sii->pub, i);
- cid = ai_coreid(&sii->pub);
- crev = ai_corerev(&sii->pub);
+ cid = core->id.id;
+ crev = core->id.rev;
if (cid == PCI_CORE_ID) {
- pciidx = i;
- pcirev = crev;
- pci = true;
+ pci = core;
} else if (cid == PCIE_CORE_ID) {
- pcieidx = i;
- pcierev = crev;
- pcie = true;
+ pcie = core;
}
-
- /* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (cc == sii->regs[i]))
- *origidx = i;
}
if (pci && pcie) {
if (ai_ispcie(sii))
- pci = false;
+ pci = NULL;
else
- pcie = false;
+ pcie = NULL;
}
if (pci) {
- sii->pub.buscoretype = PCI_CORE_ID;
- sii->pub.buscorerev = pcirev;
- sii->pub.buscoreidx = pciidx;
+ sii->buscore = pci;
} else if (pcie) {
- sii->pub.buscoretype = PCIE_CORE_ID;
- sii->pub.buscorerev = pcierev;
- sii->pub.buscoreidx = pcieidx;
+ sii->buscore = pcie;
}
/* fixup necessary chip/core configurations */
- if (SI_FAST(sii)) {
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->pbus,
- (__iomem void *)PCIEREGS(sii));
- if (sii->pch == NULL)
- return false;
- }
+ if (!sii->pch) {
+ sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+ if (sii->pch == NULL)
+ return false;
}
- if (ai_pci_fixcfg(&sii->pub)) {
- /* si_doattach: si_pci_fixcfg failed */
+ if (ai_pci_fixcfg(&sii->pub))
return false;
- }
-
- /* return to the original core */
- ai_setcoreidx(&sii->pub, *origidx);
return true;
}
@@ -1017,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
uint w = 0;
/* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
sii->pub.boardvendor = w & 0xffff;
sii->pub.boardtype = (w >> 16) & 0xffff;
- sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
}
static struct si_info *ai_doattach(struct si_info *sii,
- void __iomem *regs, struct pci_dev *pbus)
+ struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint socitype;
- uint origidx;
-
- memset((unsigned char *) sii, 0, sizeof(struct si_info));
savewin = 0;
- sih->buscoreidx = BADIDX;
-
- sii->curmap = regs;
- sii->pbus = pbus;
-
- /* find Chipcommon address */
- pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
- savewin = SI_ENUM_BASE;
+ sii->icbus = pbus;
+ sii->pcibus = pbus->host_pci;
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
- SI_ENUM_BASE);
- cc = (struct chipcregs __iomem *) regs;
+ /* switch to Chipcommon core */
+ cc = pbus->drv_cc.core;
/* bus/core/clk setup for register access */
if (!ai_buscore_prep(sii))
@@ -1062,94 +584,74 @@ static struct si_info *ai_doattach(struct si_info *sii,
* hosts w/o chipcommon), some way of recognizing them needs to
* be added here.
*/
- w = R_REG(&cc->chipid);
+ w = bcma_read32(cc, CHIPCREGOFFS(chipid));
socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
- sih->issim = false;
-
/* scan for cores */
- if (socitype == SOCI_AI) {
- SI_MSG(("Found chip type AI (0x%08x)\n", w));
- /* pass chipc address instead of original core base */
- ai_scan(&sii->pub, cc);
- } else {
- /* Found chip of unknown type */
- return NULL;
- }
- /* no cores found, bail out */
- if (sii->numcores == 0)
+ if (socitype != SOCI_AI)
return NULL;
- /* bus/core/clk setup */
- origidx = SI_CC_IDX;
- if (!ai_buscore_setup(sii, savewin, &origidx))
+ SI_MSG("Found chip type AI (0x%08x)\n", w);
+ if (!ai_buscore_setup(sii, cc))
goto exit;
/* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub, cc))
+ if (srom_var_init(&sii->pub))
goto exit;
ai_nvram_process(sii);
/* === NVRAM, clock is ready === */
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
- W_REG(&cc->gpiopullup, 0);
- W_REG(&cc->gpiopulldown, 0);
- ai_setcoreidx(sih, origidx);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
/* PMU specific initializations */
- if (sih->cccaps & CC_CAP_PMU) {
- u32 xtalfreq;
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
si_pmu_init(sih);
- si_pmu_chip_init(sih);
-
- xtalfreq = si_pmu_measure_alpclk(sih);
- si_pmu_pll_init(sih, xtalfreq);
+ (void)si_pmu_measure_alpclk(sih);
si_pmu_res_init(sih);
- si_pmu_swreg_init(sih);
}
/* setup the GPIO based LED powersave register */
w = getintvar(sih, BRCMS_SROM_LEDDC);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
+ ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+ ~0, w);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_attach(sii->pch, SI_DOATTACH);
- if (sih->chip == BCM43224_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
*/
- if (sih->chiprev == 0) {
- SI_MSG(("Applying 43224A0 WARs\n"));
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
+ if (ai_get_chiprev(sih) == 0) {
+ SI_MSG("Applying 43224A0 WARs\n");
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
- if (sih->chiprev >= 1) {
- SI_MSG(("Applying 43224B0+ WARs\n"));
+ if (ai_get_chiprev(sih) >= 1) {
+ SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
- if (sih->chip == BCM4313_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
/*
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
*/
- SI_MSG(("Applying 4313 WARs\n"));
+ SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}
@@ -1165,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
*/
struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
{
struct si_info *sii;
/* alloc struct si_info */
- sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
if (sii == NULL)
return NULL;
- if (ai_doattach(sii, regs, sdh) == NULL) {
+ if (ai_doattach(sii, pbus) == NULL) {
kfree(sii);
return NULL;
}
@@ -1209,292 +708,66 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intr_arg = intr_arg;
- sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
- sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
- sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
- /* save current core id. when this function called, the current core
- * must be the core which provides driver functions(il, et, wl, etc.)
- */
- sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
- return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
{
+ struct bcma_device *core;
struct si_info *sii;
uint found;
- uint i;
sii = (struct si_info *)sih;
found = 0;
- for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
+ list_for_each_entry(core, &sii->icbus->cores, list)
+ if (core->id.id == coreid) {
if (found == coreunit)
- return i;
+ return core;
found++;
}
- return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
- uint idx;
-
- idx = ai_findcoreidx(sih, coreid, coreunit);
- if (idx >= SI_MAXCORES)
- return NULL;
-
- return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val)
-{
- void __iomem *cc;
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- if (SI_FAST(sii)) {
- /* Overloading the origidx variable to remember the coreid,
- * this works because the core ids cannot be confused with
- * core indices.
- */
- *origidx = coreid;
- if (coreid == CC_CORE_ID)
- return CCREGS_FAST(sii);
- else if (coreid == sih->buscoretype)
- return PCIEREGS(sii);
- }
- INTR_OFF(sii, *intr_val);
- *origidx = sii->curidx;
- cc = ai_setcore(sih, coreid, 0);
- return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- if (SI_FAST(sii)
- && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
- return;
-
- ai_setcoreidx(sih, coreid);
- INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 *w = (u32 *) sii->curwrap;
- W_REG(w + (offset / 4), val);
- return;
+ return NULL;
}
/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
+ * read/modify chipcommon core register.
*/
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
{
- uint origidx = 0;
- u32 __iomem *r = NULL;
- uint w;
- uint intr_val = 0;
- bool fast = false;
+ struct bcma_device *cc;
+ u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- /*
- * If pci/pcie, we can get at pci/pcie regs
- * and on newer cores to chipc
- */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
- fast = true;
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /*
- * pci registers are at either in the last 2KB of
- * an 8KB window or, in pcie and pci rev 13 at 8KB
- */
- fast = true;
- if (SI_FAST(sii))
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- else
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET :
- PCI_BAR0_PCIREGS_OFFSET) + regoff);
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = ai_coreidx(&sii->pub);
-
- /* switch core */
- r = (u32 __iomem *) ((unsigned char __iomem *)
- ai_setcoreidx(&sii->pub, coreidx) + regoff);
- }
+ cc = sii->icbus->drv_cc.core;
/* mask and set */
if (mask || val) {
- w = (R_REG(r) & ~mask) | val;
- W_REG(r, w);
+ bcma_maskset32(cc, regoff, ~mask, val);
}
/* readback */
- w = R_REG(r);
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
+ w = bcma_read32(cc, regoff);
return w;
}
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
- struct si_info *sii;
- u32 dummy;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- /* if core is already in reset, just return */
- if (R_REG(&ai->resetctrl) & AIRC_RESET)
- return;
-
- W_REG(&ai->ioctrl, bits);
- dummy = R_REG(&ai->ioctrl);
- udelay(10);
-
- W_REG(&ai->resetctrl, AIRC_RESET);
- udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 dummy;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- /*
- * Must do the disable sequence first to work
- * for arbitrary current core state.
- */
- ai_core_disable(sih, (bits | resetbits));
-
- /*
- * Now do the initialization sequence.
- */
- W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- W_REG(&ai->resetctrl, 0);
- udelay(1);
-
- W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- udelay(1);
-}
-
/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct chipcregs __iomem *cc;
+ struct si_info *sii;
u32 val;
- if (sii->pub.ccrev < 6) {
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+ sii = (struct si_info *)sih;
+ if (ai_get_ccrev(&sii->pub) < 6) {
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
&val);
if (val & PCI_CFG_GPIO_SCS)
return SCC_SS_PCI;
return SCC_SS_XTAL;
- } else if (sii->pub.ccrev < 10) {
- cc = (struct chipcregs __iomem *)
- ai_setcoreidx(&sii->pub, sii->curidx);
- return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+ } else if (ai_get_ccrev(&sii->pub) < 10) {
+ return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_SS_MASK;
} else /* Insta-clock */
return SCC_SS_XTAL;
}
@@ -1503,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
* return the ILP (slowclock) min or max frequency
* precondition: we've established the chip has dynamic clk control
*/
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
- struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+ struct bcma_device *cc)
{
u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sii);
- if (sii->pub.ccrev < 6) {
+ slowclk = ai_slowclk_src(sih, cc);
+ if (ai_get_ccrev(sih) < 6) {
if (slowclk == SCC_SS_PCI)
return max_freq ? (PCIMAXFREQ / 64)
: (PCIMINFREQ / 64);
else
return max_freq ? (XTALMAXFREQ / 32)
: (XTALMINFREQ / 32);
- } else if (sii->pub.ccrev < 10) {
+ } else if (ai_get_ccrev(sih) < 10) {
div = 4 *
- (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
- SCC_CD_SHIFT) + 1);
+ (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
if (slowclk == SCC_SS_LPO)
return max_freq ? LPOMAXFREQ : LPOMINFREQ;
else if (slowclk == SCC_SS_XTAL)
@@ -1531,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
: (PCIMINFREQ / div);
} else {
/* Chipc rev 10 is InstaClock */
- div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
- div = 4 * (div + 1);
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
return 0;
}
static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
{
uint slowmaxfreq, pll_delay, slowclk;
uint pll_on_delay, fref_sel_delay;
@@ -1552,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
* powered down by dynamic clk control logic.
*/
- slowclk = ai_slowclk_src(sii);
+ slowclk = ai_slowclk_src(sih, cc);
if (slowclk != SCC_SS_XTAL)
pll_delay += XTAL_ON_DELAY;
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih,
+ (ai_get_ccrev(sih) >= 10) ? false : true, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
- W_REG(&cc->pll_on_delay, pll_on_delay);
- W_REG(&cc->fref_sel_delay, fref_sel_delay);
+ bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+ bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
}
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
- struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
- bool fast;
+ struct bcma_device *cc;
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- sii = (struct si_info *)sih;
- fast = SI_FAST(sii);
- if (!fast) {
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- return;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- return;
- }
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ if (cc == NULL)
+ return;
/* set all Instaclk chip ILP to 1 MHz */
- if (sih->ccrev >= 10)
- SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ if (ai_get_ccrev(sih) >= 10)
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
- ai_clkctl_setdelay(sii, cc);
-
- if (!fast)
- ai_setcoreidx(sih, origidx);
+ ai_clkctl_setdelay(sih, cc);
}
/*
@@ -1610,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
{
struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint slowminfreq;
u16 fpdelay;
- uint intr_val = 0;
- bool fast;
sii = (struct si_info *)sih;
- if (sih->cccaps & CC_CAP_PMU) {
- INTR_OFF(sii, intr_val);
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
- INTR_RESTORE(sii, intr_val);
return fpdelay;
}
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return 0;
- fast = SI_FAST(sii);
fpdelay = 0;
- if (!fast) {
- origidx = sii->curidx;
- INTR_OFF(sii, intr_val);
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- goto done;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- slowminfreq = ai_slowclk_freq(sii, false, cc);
- fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
- (slowminfreq - 1)) / slowminfreq;
-
- done:
- if (!fast) {
- ai_setcoreidx(sih, origidx);
- INTR_RESTORE(sii, intr_val);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
+ if (cc) {
+ slowminfreq = ai_slowclk_freq(sih, false, cc);
+ fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+ * 1000000) + (slowminfreq - 1)) / slowminfreq;
}
return fpdelay;
}
@@ -1664,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
sii = (struct si_info *)sih;
/* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sii))
+ if (PCIE(sih))
return -1;
- pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
@@ -1690,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
@@ -1700,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
mdelay(2);
}
@@ -1709,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
}
@@ -1721,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* clk control mechanism through chipcommon, no policy checking */
static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
{
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
u32 scc;
- uint intr_val = 0;
- bool fast = SI_FAST(sii);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sii->pub.ccrev < 6)
+ if (ai_get_ccrev(&sii->pub) < 6)
return false;
- if (!fast) {
- INTR_OFF(sii, intr_val);
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(&sii->pub, CC_CORE_ID, 0);
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
- goto done;
+ if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+ (ai_get_ccrev(&sii->pub) < 20))
+ return mode == CLK_FAST;
switch (mode) {
case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (sii->pub.ccrev < 10) {
+ if (ai_get_ccrev(&sii->pub) < 10) {
/*
* don't forget to force xtal back
* on before we clear SCC_DYN_XTAL..
*/
ai_clkctl_xtal(&sii->pub, XTAL, ON);
- SET_REG(&cc->slow_clk_ctl,
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (sii->pub.ccrev < 20) {
- OR_REG(&cc->system_clk_ctl, SYCC_HR);
+ bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
+ bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
} else {
- OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+ bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
}
/* wait for the PLL */
- if (sii->pub.cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
- == 0), PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+ htavail) == 0), PMU_MAX_TRANSITION_DLY);
} else {
udelay(PLL_DELAY);
}
break;
case CLK_DYNAMIC: /* enable dynamic clock control */
- if (sii->pub.ccrev < 10) {
- scc = R_REG(&cc->slow_clk_ctl);
+ if (ai_get_ccrev(&sii->pub) < 10) {
+ scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
scc &= ~(SCC_FS | SCC_IP | SCC_XC);
if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
scc |= SCC_XC;
- W_REG(&cc->slow_clk_ctl, scc);
+ bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
/*
* for dynamic control, we have to
@@ -1785,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
*/
if (scc & SCC_XC)
ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (sii->pub.ccrev < 20) {
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
/* Instaclock */
- AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+ bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
} else {
- AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+ bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
}
break;
@@ -1797,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
break;
}
- done:
- if (!fast) {
- ai_setcoreidx(&sii->pub, origidx);
- INTR_RESTORE(sii, intr_val);
- }
return mode == CLK_FAST;
}
@@ -1820,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
sii = (struct si_info *)sih;
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sih->ccrev < 6)
+ if (ai_get_ccrev(sih) < 6)
return false;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
return mode == CLK_FAST;
return _ai_clkctl_cc(sii, mode);
}
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
- int slen;
-
- if (!path || size <= 0)
- return -1;
-
- slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- ((struct si_info *)sih)->pbus->bus->number,
- PCI_SLOT(((struct pci_dev *)
- (((struct si_info *)(sih))->pbus))->devfn));
-
- if (slen < 0 || slen >= size) {
- path[0] = '\0';
- return -1;
- }
-
- return 0;
-}
-
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_FAST);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_up(sii->pch, SI_PCIUP);
}
@@ -1882,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_DYNAMIC);
pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1895,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
void ai_pci_setup(struct si_pub *sih, uint coremask)
{
struct si_info *sii;
- struct sbpciregs __iomem *regs = NULL;
- u32 siflag = 0, w;
- uint idx = 0;
+ u32 w;
sii = (struct si_info *)sih;
- if (PCI(sii)) {
- /* get current core index */
- idx = sii->curidx;
-
- /* we interrupt on this backplane flag number */
- siflag = ai_flag(sih);
-
- /* switch over to pci core */
- regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
- }
-
/*
* Enable sb->pci interrupts. Assume
* PCI rev 2.3 support was added in pci core rev 6 and things changed..
*/
- if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+ pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
- } else {
- /* set sbintvec bit for our flag number */
- ai_setint(sih, siflag);
+ pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
}
- if (PCI(sii)) {
- pcicore_pci_setup(sii->pch, regs);
-
- /* switch back to previous core */
- ai_setcoreidx(sih, idx);
+ if (PCI(sih)) {
+ pcicore_pci_setup(sii->pch);
}
}
@@ -1940,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
*/
int ai_pci_fixcfg(struct si_pub *sih)
{
- uint origidx;
- void __iomem *regs = NULL;
struct si_info *sii = (struct si_info *)sih;
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* save the current index */
- origidx = ai_coreidx(&sii->pub);
-
/* check 'pi' is correct and fix it if not */
- regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
- if (sii->pub.buscoretype == PCIE_CORE_ID)
- pcicore_fixcfg_pcie(sii->pch,
- (struct sbpcieregs __iomem *)regs);
- else if (sii->pub.buscoretype == PCI_CORE_ID)
- pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
- /* restore the original index */
- ai_setcoreidx(&sii->pub, origidx);
-
+ pcicore_fixcfg(sii->pch);
pcicore_hwup(sii->pch);
return 0;
}
@@ -1969,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
uint regoff;
regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+ return ai_cc_reg(sih, regoff, mask, val);
}
void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
u32 val;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
- val = R_REG(&cc->chipcontrol);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
if (on) {
- if (sih->chippkg == 9 || sih->chippkg == 0xb)
+ if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
/* Ext PA Controls for 4331 12x9 Package */
- W_REG(&cc->chipcontrol, val |
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5);
else
/* Ext PA Controls for 4331 12x12 Package */
- W_REG(&cc->chipcontrol,
- val | CCTRL4331_EXTPA_EN);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN);
} else {
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- W_REG(&cc->chipcontrol, val);
+ bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+ ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
}
-
- ai_setcoreidx(sih, origidx);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = ai_setcore(sih, CC_CORE_ID, 0);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
- W_REG(&cc->gpiocontrol,
- R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
- ai_setcoreidx(sih, origidx);
+ bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
}
/* check if the device is removed */
@@ -2031,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
- pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -2040,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
bool ai_is_sprom_available(struct si_pub *sih)
{
- if (sih->ccrev >= 31) {
- struct si_info *sii;
- uint origidx;
- struct chipcregs __iomem *cc;
+ struct si_info *sii = (struct si_info *)sih;
+
+ if (ai_get_ccrev(sih) >= 31) {
+ struct bcma_device *cc;
u32 sromctrl;
- if ((sih->cccaps & CC_CAP_SROM) == 0)
+ if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
return false;
- sii = (struct si_info *)sih;
- origidx = sii->curidx;
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- sromctrl = R_REG(&cc->sromcontrol);
- ai_setcoreidx(sih, origidx);
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
return sromctrl & SRC_PRESENT;
}
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
default:
return true;
}
@@ -2067,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
bool ai_is_otp_disabled(struct si_pub *sih)
{
- switch (sih->chip) {
+ struct si_info *sii = (struct si_info *)sih;
+
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ return (sii->chipst & CST4313_OTP_PRESENT) == 0;
/* These chips always have their OTP on */
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
@@ -2077,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
return false;
}
}
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.rev;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 106a7424a7c..f84c6f78169 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
#ifndef _BRCM_AIUTILS_H_
#define _BRCM_AIUTILS_H_
+#include <linux/bcma/bcma.h>
+
#include "types.h"
/*
@@ -38,88 +40,12 @@
/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
#define SI_PCIE_DMA_H32 0x80000000
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
/* chipcommon being the first core: */
#define SI_CC_IDX 0
/* SOC Interconnect types (aka chip types) */
#define SOCI_AI 1
-/* Common core control flags */
-#define SICF_BIST_EN 0x8000
-#define SICF_PME_EN 0x4000
-#define SICF_CORE_BITS 0x3ffc
-#define SICF_FGC 0x0002
-#define SICF_CLOCK_EN 0x0001
-
-/* Common core status flags */
-#define SISF_BIST_DONE 0x8000
-#define SISF_BIST_ERROR 0x4000
-#define SISF_GATED_CLK 0x2000
-#define SISF_DMA64 0x1000
-#define SISF_CORE_BITS 0x0fff
-
/* A register that is common to all cores to
* communicate w/PMU regarding clock control.
*/
@@ -220,26 +146,15 @@
* public (read-only) portion of aiutils handle returned by si_attach()
*/
struct si_pub {
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
int ccrev; /* chip common core rev */
u32 cccaps; /* chip common capabilities */
- u32 cccaps_ext; /* chip common capabilities extension */
int pmurev; /* pmu core rev */
u32 pmucaps; /* pmu capabilities */
uint boardtype; /* board type */
uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
uint chip; /* chip number */
uint chiprev; /* chip revision */
uint chippkg; /* chip package option */
- u32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
- bool pci_pr32414;
-
};
struct pci_dev;
@@ -255,38 +170,13 @@ struct gpioh_item {
/* misc si info needed by some of the routines */
struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
- struct pci_dev *pbus; /* handle to pci bus */
- uint dev_coreid; /* the core provides driver functions */
- void *intr_arg; /* interrupt callback function arg */
- u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
- /* restore chip interrupts */
- void (*intrsrestore_fn) (void *intr_arg, u32 arg);
- /* check if interrupts are enabled */
- bool (*intrsenabled_fn) (void *intr_arg);
-
+ struct bcma_bus *icbus; /* handle to soc interconnect bus */
+ struct pci_dev *pcibus; /* handle to pci bus */
struct pcicore_info *pch; /* PCI/E core handle */
-
+ struct bcma_device *buscore;
struct list_head var_list; /* list of srom variables */
- void __iomem *curmap; /* current regs va */
- void __iomem *regs[SI_MAXCORES]; /* other regs va */
-
- uint curidx; /* current core index */
- uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- u32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
- u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
- u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
- void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
-
- u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
- u32 oob_router; /* oob router registers for axi */
+ u32 chipst; /* chip status */
};
/*
@@ -299,52 +189,15 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+ u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
- uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -359,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */
extern bool ai_is_sprom_available(struct si_pub *sih);
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
@@ -375,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+ return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+ return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+ return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+ return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+ return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+ return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+ return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+ return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+ return sih->chippkg;
+}
+
#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 7f27dbdb6b6..90911eec0cf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
- dma_len += (u16) brcmu_pkttotlen(p);
+ dma_len += (u16) p->len;
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
@@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
-
- plen = brcmu_pkttotlen(p) +
- AMPDU_MAX_MPDU_OVERHEAD;
+ plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
if ((plen + ampdu_len) > max_ampdu_bytes) {
@@ -1120,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
- while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+ while ((s1 & TXS_V) == 0) {
udelay(1);
status_delay++;
if (status_delay > 10)
return; /* error condition */
+ s1 = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(frmtxstatus));
}
- s2 = R_REG(&wlc->regs->frmtxstatus2);
+ s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
}
if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 89ad1b7dab8..55e9f45fce2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
&txpwr);
}
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
- int i;
- char buf[80];
- char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
-
- sprintf(buf, "CCK ");
- for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- printk(KERN_DEBUG "MCS32 %2d%s\n",
- txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif /* POWER_DBG */
-
void
brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
@@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
}
-#ifdef POWER_DBG
- wlc_phy_txpower_limits_dump(txpwr);
-#endif
return;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616abc8..1948cb2771e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@ struct d11regs {
u16 PAD[0x380]; /* 0x800 - 0xEFE */
};
+/* d11 register field offset */
+#define D11REGOFFS(field) offsetof(struct d11regs, field)
+
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
/* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 6ebec8f4284..2e90a9a16ed 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -13,8 +13,10 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
-#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -22,6 +24,14 @@
#include <aiutils.h>
#include "types.h"
#include "dma.h"
+#include "soc.h"
+
+/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
/*
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
@@ -168,26 +178,25 @@
/* debug/trace */
#ifdef BCMDBG
-#define DMA_ERROR(args) \
- do { \
- if (!(*di->msg_level & 1)) \
- ; \
- else \
- printk args; \
- } while (0)
-#define DMA_TRACE(args) \
- do { \
- if (!(*di->msg_level & 2)) \
- ; \
- else \
- printk args; \
- } while (0)
+#define DMA_ERROR(fmt, ...) \
+do { \
+ if (*di->msg_level & 1) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#define DMA_TRACE(fmt, ...) \
+do { \
+ if (*di->msg_level & 2) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
#else
-#define DMA_ERROR(args)
-#define DMA_TRACE(args)
+#define DMA_ERROR(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#define DMA_TRACE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
-#define DMA_NONE(args)
+#define DMA_NONE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#define MAXNAMEL 8 /* 8 char names */
@@ -218,15 +227,16 @@ struct dma_info {
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
- struct pci_dev *pbus; /* bus handle */
+ struct bcma_device *core;
+ struct device *dmadev;
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
/* 64-bit dma tx engine registers */
- struct dma64regs __iomem *d64txregs;
+ uint d64txregbase;
/* 64-bit dma rx engine registers */
- struct dma64regs __iomem *d64rxregs;
+ uint d64rxregbase;
/* pointer to dma64 tx descriptor ring */
struct dma64desc *txd64;
/* pointer to dma64 rx descriptor ring */
@@ -361,7 +371,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
uint dmactrlflags;
if (di == NULL) {
- DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
+ DMA_ERROR("NULL dma handle\n");
return 0;
}
@@ -373,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
if (dmactrlflags & DMA_CTRL_PEN) {
u32 control;
- control = R_REG(&di->d64txregs->control);
- W_REG(&di->d64txregs->control,
+ control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control | D64_XC_PD);
- if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+ if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+ D64_XC_PD)
/* We *can* disable it so it is supported,
* restore control register
*/
- W_REG(&di->d64txregs->control,
- control);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+ control);
else
/* Not supported, don't allow it to be enabled */
dmactrlflags &= ~DMA_CTRL_PEN;
@@ -392,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
return dmactrlflags;
}
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{
u32 w;
- OR_REG(&dma64regs->control, D64_XC_AE);
- w = R_REG(&dma64regs->control);
- AND_REG(&dma64regs->control, ~D64_XC_AE);
+ bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+ w = bcma_read32(di->core, ctrl_offset);
+ bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
return (w & D64_XC_AE) == D64_XC_AE;
}
@@ -410,15 +421,15 @@ static bool _dma_isaddrext(struct dma_info *di)
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
/* not all tx or rx channel are available */
- if (di->d64txregs != NULL) {
- if (!_dma64_addrext(di->d64txregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
- "AE set\n", di->name));
+ if (di->d64txregbase != 0) {
+ if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
+ di->name);
return true;
- } else if (di->d64rxregs != NULL) {
- if (!_dma64_addrext(di->d64rxregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
- "AE set\n", di->name));
+ } else if (di->d64rxregbase != 0) {
+ if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
+ di->name);
return true;
}
@@ -430,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
u32 addrl;
/* Check to see if the descriptors need to be aligned on 4K/8K or not */
- if (di->d64txregs != NULL) {
- W_REG(&di->d64txregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64txregs->addrlow);
+ if (di->d64txregbase != 0) {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
- } else if (di->d64rxregs != NULL) {
- W_REG(&di->d64rxregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64rxregs->addrlow);
+ } else if (di->d64rxregbase != 0) {
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
}
@@ -448,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
* Descriptor table must start at the DMA hardware dictated alignment, so
* allocated memory must be large enough to support this requirement.
*/
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
u16 align_bits, uint *alloced,
dma_addr_t *pap)
{
@@ -458,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
size += align;
*alloced = size;
}
- return pci_alloc_consistent(pdev, size, pap);
+ return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
}
static
@@ -484,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits;
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+ va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
if (NULL == va)
return NULL;
@@ -493,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) {
*alignbits = dma_align_sizetobits(size);
- pci_free_consistent(di->pbus, size, va, *descpa);
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
+ dma_free_coherent(di->dmadev, size, va, *descpa);
+ va = dma_alloc_consistent(di, size, *alignbits,
alloced, descpa);
}
return va;
@@ -519,8 +530,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -533,8 +544,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -554,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
}
struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ struct bcma_device *core,
+ uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
{
struct dma_info *di;
+ u8 rev = core->id.rev;
uint size;
/* allocate private info structure */
@@ -570,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->msg_level = msg_level ? msg_level : &dma_msg_level;
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+ di->dma64 =
+ ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
- /* init dma reg pointer */
- di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
- di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+ /* init dma reg info */
+ di->core = core;
+ di->d64txregbase = txregbase;
+ di->d64rxregbase = rxregbase;
/*
* Default flags (which can be changed by the driver calling
@@ -583,17 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+ DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
di->name[MAXNAMEL - 1] = '\0';
- di->pbus = ((struct si_info *)sih)->pbus;
+ di->dmadev = core->dma_dev;
/* save tunables */
di->ntxd = (u16) ntxd;
@@ -625,12 +639,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
- di->addrext = 0;
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
- di->addrext = 0;
+ if ((core->id.id == SDIOD_CORE_ID)
+ && ((rev > 0) && (rev <= 2)))
+ di->addrext = false;
+ else if ((core->id.id == I2S_CORE_ID) &&
+ ((rev == 0) || (rev == 1)))
+ di->addrext = false;
else
di->addrext = _dma_isaddrext(di);
@@ -645,8 +659,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
- DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align));
+ DMA_NONE("DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@@ -684,21 +698,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->txdpa));
+ DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->rxdpa));
+ DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->rxdpa);
goto fail;
}
}
- DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
- "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
- di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
- di->addrext));
+ DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+ di->ddoffsetlow, di->ddoffsethigh,
+ di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext);
return (struct dma_pub *) di;
@@ -744,17 +758,17 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_detach\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
- pci_free_consistent(di->pbus, di->txdalloc,
- ((s8 *)di->txd64 - di->txdalign),
- (di->txdpaorig));
+ dma_free_coherent(di->dmadev, di->txdalloc,
+ ((s8 *)di->txd64 - di->txdalign),
+ (di->txdpaorig));
if (di->rxd64)
- pci_free_consistent(di->pbus, di->rxdalloc,
- ((s8 *)di->rxd64 - di->rxdalign),
- (di->rxdpaorig));
+ dma_free_coherent(di->dmadev, di->rxdalloc,
+ ((s8 *)di->rxd64 - di->rxdalign),
+ (di->rxdpaorig));
/* free packet pointer vectors */
kfree(di->txp);
@@ -779,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
if ((di->ddoffsetlow == 0)
|| !(pa & PCI32ADDR_HIGH)) {
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
}
} else {
/* DMA64 32bits address extension */
@@ -794,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
pa &= ~PCI32ADDR_HIGH;
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64txregs->control,
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64rxregs->control,
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
}
}
}
@@ -812,11 +834,11 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
- DMA_TRACE(("%s: dma_rxenable\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
- control =
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
- D64_RC_RE;
+ control = D64_RC_RE | (bcma_read32(di->core,
+ DMA64RXREGOFFS(di, control)) &
+ D64_RC_AE);
if ((dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_RC_PD;
@@ -824,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
if (dmactrlflags & DMA_CTRL_ROC)
control |= D64_RC_OC;
- W_REG(&di->d64rxregs->control,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control),
((di->rxoffset << D64_RC_RO_SHIFT) | control));
}
@@ -832,7 +854,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_rxinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return;
@@ -867,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
return NULL;
curr =
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */
@@ -881,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
/* clear this packet from the descriptor ring */
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -901,7 +924,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
/*
* !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
+ * returns the number packages in the next frame, or 0 if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed.
@@ -909,74 +932,83 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
-struct sk_buff *dma_rx(struct dma_pub *pub)
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *head, *tail;
+ struct sk_buff_head dma_frames;
+ struct sk_buff *p, *next;
uint len;
uint pkt_len;
int resid = 0;
+ int pktcnt = 1;
+ skb_queue_head_init(&dma_frames);
next_frame:
- head = _dma_getnextrxp(di, false);
- if (head == NULL)
- return NULL;
+ p = _dma_getnextrxp(di, false);
+ if (p == NULL)
+ return 0;
- len = le16_to_cpu(*(__le16 *) (head->data));
- DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
- dma_spin_for_len(len, head);
+ len = le16_to_cpu(*(__le16 *) (p->data));
+ DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+ dma_spin_for_len(len, p);
/* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize);
- __skb_trim(head, pkt_len);
+ __skb_trim(p, pkt_len);
+ skb_queue_tail(&dma_frames, p);
resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */
if (resid > 0) {
- tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
- tail->next = p;
pkt_len = min_t(uint, resid, di->rxbufsize);
__skb_trim(p, pkt_len);
-
- tail = p;
+ skb_queue_tail(&dma_frames, p);
resid -= di->rxbufsize;
+ pktcnt++;
}
#ifdef BCMDBG
if (resid > 0) {
uint cur;
cur =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
- DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur));
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_CD_MASK) - di->rcvptrbase) &
+ D64_RS0_CD_MASK, struct dma64desc);
+ DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur);
}
#endif /* BCMDBG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
- di->name, len));
- brcmu_pkt_buf_free_skb(head);
+ DMA_ERROR("%s: bad frame length (%d)\n",
+ di->name, len);
+ skb_queue_walk_safe(&dma_frames, p, next) {
+ skb_unlink(p, &dma_frames);
+ brcmu_pkt_buf_free_skb(p);
+ }
di->dma.rxgiants++;
+ pktcnt = 1;
goto next_frame;
}
}
- return head;
+ skb_queue_splice_tail(&dma_frames, skb_list);
+ return pktcnt;
}
static bool dma64_rxidle(struct dma_info *di)
{
- DMA_TRACE(("%s: dma_rxidle\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return true;
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+ return ((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+ D64_RS0_CD_MASK));
}
/*
@@ -1010,7 +1042,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
- DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+ DMA_TRACE("%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@@ -1023,11 +1055,9 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
- DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
- di->name));
+ DMA_ERROR("%s: out of rxbufs\n", di->name);
if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR(("%s: rxfill64: ring is empty !\n",
- di->name));
+ DMA_ERROR("%s: ring is empty !\n", di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@@ -1042,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
*/
*(u32 *) (p->data) = 0;
- pa = pci_map_single(di->pbus, p->data,
- di->rxbufsize, PCI_DMA_FROMDEVICE);
+ pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+ DMA_FROM_DEVICE);
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1061,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
di->rxout = rxout;
/* update the chip lastdscr pointer */
- W_REG(&di->d64rxregs->ptr,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty;
@@ -1072,7 +1102,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@@ -1103,7 +1133,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
- DMA_TRACE(("%s: dma_txinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1122,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD;
- OR_REG(&di->d64txregs->control, control);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
/* DMA engine with alignment requirement requires table to be inited
* before enabling the engine
@@ -1135,24 +1165,24 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- OR_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
}
void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txresume\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+ bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
}
bool dma_txsuspended(struct dma_pub *pub)
@@ -1160,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
return (di->ntxd == 0) ||
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
- D64_XC_SE);
+ ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+ D64_XC_SE);
}
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1169,11 +1200,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->txin == di->txout)
return;
@@ -1194,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
return true;
/* suspend tx DMA first */
- W_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
- && (status != D64_XS0_XS_STOPPED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+ (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+ 10000);
- W_REG(&di->d64txregs->control, 0);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
/* wait for the last transaction to complete */
udelay(300);
@@ -1219,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
if (di->nrxd == 0)
return true;
- W_REG(&di->d64rxregs->control, 0);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
- != D64_RS0_RS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
return status == D64_RS0_RS_DISABLED;
}
@@ -1233,72 +1265,58 @@ bool dma_rxreset(struct dma_pub *pub)
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
u32 flags = 0;
dma_addr_t pa;
- DMA_TRACE(("%s: dma_txfast\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
txout = di->txout;
/*
- * Walk the chain of packet buffers
- * allocating and initializing transmit descriptor entries.
+ * obtain and initialize transmit descriptor entry.
*/
- for (p = p0; p; p = next) {
- data = p->data;
- len = p->len;
- next = p->next;
+ data = p->data;
+ len = p->len;
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
+ /* no use to transmit a zero length packet */
+ if (len == 0)
+ return 0;
- if (len == 0)
- continue;
+ /* return nonzero if out of tx descriptors */
+ if (nexttxd(di, txout) == di->txin)
+ goto outoftxd;
- /* get physical address of buffer start */
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+ /* get physical address of buffer start */
+ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
- flags = 0;
- if (p == p0)
- flags |= D64_CTRL1_SOF;
-
- /* With a DMA segment list, Descriptor table is filled
- * using the segment list instead of looping over
- * buffers in multi-chain DMA. Therefore, EOF for SGLIST
- * is when end of segment list is reached.
- */
- if (next == NULL)
- flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
-
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+ * is when end of segment list is reached.
+ */
+ flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
- txout = nexttxd(di, txout);
- }
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
- /* if last txd eof not set, fix it */
- if (!(flags & D64_CTRL1_EOF))
- di->txd64[prevtxd(di, txout)].ctrl1 =
- cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+ txout = nexttxd(di, txout);
/* save the packet */
- di->txp[prevtxd(di, txout)] = p0;
+ di->txp[prevtxd(di, txout)] = p;
/* bump the tx descriptor index */
di->txout = txout;
/* kick the chip */
if (commit)
- W_REG(&di->d64txregs->ptr,
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */
@@ -1307,8 +1325,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
return 0;
outoftxd:
- DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- brcmu_pkt_buf_free_skb(p0);
+ DMA_ERROR("%s: out of txds !!!\n", di->name);
+ brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
return -1;
@@ -1331,11 +1349,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
- DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->ntxd == 0)
return NULL;
@@ -1346,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
if (range == DMA_RANGE_ALL)
end = di->txout;
else {
- struct dma64regs __iomem *dregs = di->d64txregs;
-
- end = (u16) (B2I(((R_REG(&dregs->status0) &
- D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc));
+ end = (u16) (B2I(((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) &
+ D64_XS0_CD_MASK, struct dma64desc));
if (range == DMA_RANGE_TRANSFERED) {
active_desc =
- (u16) (R_REG(&dregs->status1) &
+ (u16)(bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status1)) &
D64_XS1_AD_MASK);
active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1384,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
txp = di->txp[i];
di->txp[i] = NULL;
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+ dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
}
di->txin = i;
@@ -1395,8 +1412,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
return txp;
bogus:
- DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
- "force %d\n", start, end, di->txout, forceall));
+ DMA_NONE("bogus curr: start %d end %d txout %d\n",
+ start, end, di->txout);
return NULL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index ebc5bc546f3..cc269ee5c49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -18,6 +18,7 @@
#define _BRCM_DMA_H_
#include <linux/delay.h>
+#include <linux/skbuff.h>
#include "types.h" /* forward structure declarations */
/* map/unmap direction */
@@ -74,13 +75,14 @@ struct dma_pub {
};
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
+ struct bcma_device *d11core,
+ uint txregbase, uint rxregbase,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
-struct sk_buff *dma_rx(struct dma_pub *pub);
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 0d8a9cdf897..d106576ce33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,11 +17,11 @@
#define __UNDEF_NO_VERSION__
#include <linux/etherdevice.h>
-#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
#include "nicpci.h"
@@ -40,10 +40,10 @@
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
- FIF_PLCPFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC)
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
.band = IEEE80211_BAND_2GHZ, \
@@ -87,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
- {0}
-};
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
@@ -216,8 +214,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
.ht_cap = {
/* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -238,8 +235,7 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
BRCMS_LEGACY_5G_RATE_OFFSET,
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+ IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -287,6 +283,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
{
struct brcms_info *wl = hw->priv;
bool blocked;
+ int err;
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
@@ -295,57 +292,69 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- return 0;
+ spin_lock_bh(&wl->lock);
+ /* avoid acknowledging frames before a non-monitor device is added */
+ wl->mute_tx = true;
+
+ if (!wl->pub->up)
+ err = brcms_up(wl);
+ else
+ err = -ENODEV;
+ spin_unlock_bh(&wl->lock);
+
+ if (err != 0)
+ wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
+ err);
+ return err;
}
static void brcms_ops_stop(struct ieee80211_hw *hw)
{
+ struct brcms_info *wl = hw->priv;
+ int status;
+
ieee80211_stop_queues(hw);
+
+ if (wl->wlc == NULL)
+ return;
+
+ spin_lock_bh(&wl->lock);
+ status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
+ wl->wlc->hw->deviceid);
+ spin_unlock_bh(&wl->lock);
+ if (!status) {
+ wiphy_err(wl->wiphy,
+ "wl: brcms_ops_stop: chipmatch failed\n");
+ return;
+ }
+
+ /* put driver in down state */
+ spin_lock_bh(&wl->lock);
+ brcms_down(wl);
+ spin_unlock_bh(&wl->lock);
}
static int
brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
- int err;
+ struct brcms_info *wl = hw->priv;
/* Just STA for now */
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
- vif->type != NL80211_IFTYPE_ADHOC) {
+ if (vif->type != NL80211_IFTYPE_STATION) {
wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
" STA for now\n", __func__, vif->type);
return -EOPNOTSUPP;
}
- wl = hw->priv;
- spin_lock_bh(&wl->lock);
- if (!wl->pub->up)
- err = brcms_up(wl);
- else
- err = -ENODEV;
- spin_unlock_bh(&wl->lock);
+ wl->mute_tx = false;
+ brcms_c_mute(wl->wlc, false);
- if (err != 0)
- wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
- err);
-
- return err;
+ return 0;
}
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
-
- wl = hw->priv;
-
- /* put driver in down state */
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -362,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+ wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
__func__, conf->flags & IEEE80211_CONF_MONITOR ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -539,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
+
if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+ wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- wiphy_err(wiphy, "FIF_ALLMULTI\n");
+ wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- wiphy_err(wiphy, "FIF_FCSFAIL\n");
- if (changed_flags & FIF_PLCPFAIL)
- wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+ wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
- wiphy_err(wiphy, "FIF_CONTROL\n");
+ wiphy_dbg(wiphy, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- wiphy_err(wiphy, "FIF_OTHER_BSS\n");
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- spin_lock_bh(&wl->lock);
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
- wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
- } else {
- brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
- wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
- }
- spin_unlock_bh(&wl->lock);
- }
+ wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+ if (changed_flags & FIF_PSPOLL)
+ wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+ wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_mac_promisc(wl->wlc, *total_flags);
+ spin_unlock_bh(&wl->lock);
return;
}
@@ -609,13 +614,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_ampdu->scb = scb;
wl->pub->global_ampdu->max_pdu = 16;
- sta->ht_cap.ht_supported = true;
- sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
- sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
/*
* minstrel_ht initiates addBA on our behalf by calling
* ieee80211_start_tx_ba_session()
@@ -724,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
};
/*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
@@ -864,56 +862,26 @@ static void brcms_free(struct brcms_info *wl)
#endif
kfree(t);
}
-
- /*
- * unregister_netdev() calls get_stats() which may read chip
- * registers so we cannot unmap the chip registers until
- * after calling unregister_netdev() .
- */
- if (wl->regsva)
- iounmap(wl->regsva);
-
- wl->regsva = NULL;
}
/*
-* called from both kernel as from this kernel module.
+* called from both kernel as from this kernel module (error flow on attach)
* precondition: perimeter lock is not acquired.
*/
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int status;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
- return;
- }
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+ struct brcms_info *wl = hw->priv;
- spin_lock_bh(&wl->lock);
- status = brcms_c_chipmatch(pdev->vendor, pdev->device);
- spin_unlock_bh(&wl->lock);
- if (!status) {
- wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
- "failed\n");
- return;
- }
if (wl->wlc) {
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
- pci_disable_device(pdev);
brcms_free(wl);
- pci_set_drvdata(pdev, NULL);
+ bcma_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
@@ -1021,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* it as static.
*
*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
- resource_size_t regs,
- struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
{
struct brcms_info *wl = NULL;
int unit, err;
@@ -1039,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
return NULL;
/* allocate private info */
- hw = pci_get_drvdata(btparam); /* btparam == pdev */
+ hw = bcma_get_drvdata(pdev);
if (hw != NULL)
wl = hw->priv;
if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1051,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
- wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
- if (wl->regsva == NULL) {
- wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
- goto fail;
- }
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, btparam) < 0) {
+ if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
- brcms_remove(btparam);
+ brcms_remove(pdev);
return NULL;
}
/* common load-time initialization */
- wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
- wl->regsva, btparam, &err);
+ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1081,15 +1041,13 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
wl->pub->ieee_hw = hw;
- /* disable mpc */
- brcms_c_set_radio_mpc(wl->wlc, false);
-
/* register our interrupt handler */
- if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = irq;
+ wl->irq = pdev->bus->host_pci->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1136,38 +1094,19 @@ fail:
*
* Perimeter lock is initialized in the course of this function.
*/
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
{
- int rc;
struct brcms_info *wl;
struct ieee80211_hw *hw;
- u32 val;
- dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq);
+ dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+ pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+ pdev->bus->host_pci->irq);
- if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- ((pdev->device != 0x0576) &&
- ((pdev->device & 0xff00) != 0x4300) &&
- ((pdev->device & 0xff00) != 0x4700) &&
- ((pdev->device < 43000) || (pdev->device > 43999))))
+ if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+ (pdev->id.id != BCMA_CORE_80211))
return -ENODEV;
- rc = pci_enable_device(pdev);
- if (rc) {
- pr_err("%s: Cannot enable device %d-%d_%d\n",
- __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- return -ENODEV;
- }
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
if (!hw) {
pr_err("%s: ieee80211_alloc_hw failed\n", __func__);
@@ -1176,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
+ bcma_set_drvdata(pdev, hw);
memset(hw->priv, 0, sizeof(*wl));
- wl = brcms_attach(pdev->vendor, pdev->device,
- pci_resource_start(pdev, 0), pdev,
- pdev->irq);
-
+ wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
__func__);
@@ -1192,16 +1128,23 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_pci_suspend(struct pci_dev *pdev)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ return pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
- hw = pci_get_drvdata(pdev);
+ hw = bcma_get_drvdata(pdev);
wl = hw->priv;
if (!wl) {
wiphy_err(wl->wiphy,
- "brcms_suspend: pci_get_drvdata failed\n");
+ "brcms_suspend: bcma_get_drvdata failed\n");
return -ENODEV;
}
@@ -1210,25 +1153,14 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
+ /* temporarily do suspend ourselves */
+ return brcms_pci_suspend(pdev->bus->host_pci);
}
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_pci_resume(struct pci_dev *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
int err = 0;
- u32 val;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- wiphy_err(wl->wiphy,
- "wl: brcms_resume: pci_get_drvdata failed\n");
- return -ENODEV;
- }
+ uint val;
err = pci_set_power_state(pdev, PCI_D0);
if (err)
@@ -1246,24 +1178,28 @@ static int brcms_resume(struct pci_dev *pdev)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ return 0;
+}
+
+static int brcms_resume(struct bcma_device *pdev)
+{
/*
- * done. driver will be put in up state
- * in brcms_ops_add_interface() call.
+ * just do pci resume for now until bcma supports it.
*/
- return err;
+ return brcms_pci_resume(pdev->bus->host_pci);
}
-static struct pci_driver brcms_pci_driver = {
+static struct bcma_driver brcms_bcma_driver = {
.name = KBUILD_MODNAME,
- .probe = brcms_pci_probe,
+ .probe = brcms_bcma_probe,
.suspend = brcms_suspend,
.resume = brcms_resume,
.remove = __devexit_p(brcms_remove),
- .id_table = brcms_pci_id_table,
+ .id_table = brcms_coreid_table,
};
/**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
*
* This function determines if a device pointed to by pdev is a WL device,
* and if so, performs a brcms_attach() on it.
@@ -1278,26 +1214,24 @@ static int __init brcms_module_init(void)
brcm_msg_level = msglevel;
#endif /* BCMDBG */
- error = pci_register_driver(&brcms_pci_driver);
+ error = bcma_driver_register(&brcms_bcma_driver);
+ printk(KERN_ERR "%s: register returned %d\n", __func__, error);
if (!error)
return 0;
-
-
return error;
}
/**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
*
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
* system.
*
*/
static void __exit brcms_module_exit(void)
{
- pci_unregister_driver(&brcms_pci_driver);
-
+ bcma_driver_unregister(&brcms_bcma_driver);
}
module_init(brcms_module_init);
@@ -1319,8 +1253,7 @@ void brcms_init(struct brcms_info *wl)
{
BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
brcms_reset(wl);
-
- brcms_c_init(wl->wlc);
+ brcms_c_init(wl->wlc, wl->mute_tx);
}
/*
@@ -1332,11 +1265,19 @@ uint brcms_reset(struct brcms_info *wl)
brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
- wl->resched = 0;
+ wl->resched = false;
return 0;
}
+void brcms_fatal_error(struct brcms_info *wl)
+{
+ wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+ wl->wlc->pub->unit);
+ brcms_reset(wl);
+ ieee80211_restart_hw(wl->pub->ieee_hw);
+}
+
/*
* These are interrupt on/off entry points. Disable interrupts
* during interrupt state transition.
@@ -1561,11 +1502,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kmalloc(len, GFP_ATOMIC);
+ *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
if (*pbuf == NULL)
goto fail;
- memcpy(*pbuf, pdata, len);
return 0;
}
}
@@ -1578,7 +1518,7 @@ fail:
}
/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1618,7 +1558,7 @@ void brcms_ucode_free_buf(void *p)
/*
* checks validity of all firmware images loaded from user space
*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 177f0e44e4b..8f60419c37b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
- /* regsva for unmap in brcms_free() */
- void __iomem *regsva; /* opaque chip registers virtual address */
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -80,6 +78,7 @@ struct brcms_info {
struct brcms_firmware fw;
struct wiphy *wiphy;
struct brcms_ucode ucode;
+ bool mute_tx;
};
/* misc callbacks */
@@ -104,5 +103,6 @@ extern bool brcms_del_timer(struct brcms_timer *timer);
extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
+extern void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 510e9bb5228..f7ed34034f8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -30,44 +30,21 @@
#include "mac80211_if.h"
#include "ucode_loader.h"
#include "main.h"
+#include "soc.h"
/*
* Indication for txflowcontrol that all priority bits in
* TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
*/
-#define ALLPRIO -1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+#define ALLPRIO -1
/* watchdog timer, in unit of ms */
-#define TIMER_INTERVAL_WATCHDOG 1000
+#define TIMER_INTERVAL_WATCHDOG 1000
/* radio monitor timer, in unit of ms */
-#define TIMER_INTERVAL_RADIOCHK 800
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define BRCMS_MPC_MAX_DELAYCNT 10
-#endif
-
-/* Min MPC timeout, in unit of watchdog */
-#define BRCMS_MPC_MIN_DELAYCNT 1
-#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
+#define TIMER_INTERVAL_RADIOCHK 800
/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEFAULT 100
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEFAULT 3
-
-/* Scale down delays to accommodate QT slow speed */
-/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEF_QT 20
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEF_QT 1
-
-#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
+#define BEACON_INTERVAL_DEFAULT 100
/* n-mode support capability */
/* 2x2 includes both 1x1 & 2x2 devices
@@ -78,113 +55,71 @@
#define WL_11N_3x3 3
#define WL_11N_4x4 4
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_SHIFT 4
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_AIFSN_MAX 15
-#define EDCF_ECWMAX_MASK 0xf0
-
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_TXOP_STA 0x002f
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-
-#define APHY_SYMBOL_TIME 4
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SIFS_TIME 16
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define BPHY_SIFS_TIME 10
-#define BPHY_PLCP_SHORT_TIME 96
-
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_TXOP_STA 0x002f
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME 4
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SIFS_TIME 16
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define BPHY_SIFS_TIME 10
+#define BPHY_PLCP_SHORT_TIME 96
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
#define DOT11_MAC_HDR_LEN 24
-#define DOT11_ACK_LEN 10
-#define DOT11_BA_LEN 4
+#define DOT11_ACK_LEN 10
+#define DOT11_BA_LEN 4
#define DOT11_OFDM_SIGNAL_EXTENSION 6
#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_RTS_LEN 16
-#define DOT11_CTS_LEN 10
+#define DOT11_RTS_LEN 16
+#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
#define DOT11_MIN_BEACON_PERIOD 1
#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-#define DOT11_MAXNUMFRAGS 16
+#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
-#define BPHY_PLCP_TIME 192
-#define RIFS_11N_TIME 2
-
-#define WME_VER 1
-#define WME_SUBTYPE_PARAM_IE 1
-#define WME_TYPE 2
-#define WME_OUI "\x00\x50\xf2"
+#define BPHY_PLCP_TIME 192
+#define RIFS_11N_TIME 2
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-
-#define BCN_TMPL_LEN 512 /* length of the BCN template area */
+/* length of the BCN template area */
+#define BCN_TMPL_LEN 512
/* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-
-/* Flags used in brcms_c_txq_info.stopped */
-/* per prio flow control bits */
-#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF
-/* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100
-/* stop txq enqueue for ampdu flow control */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200
-
-#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
+#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-/* Find basic rate for a given rate */
-static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
-{
- if (is_mcs_rate(rspec))
- return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
- .leg_ofdm];
- return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
-}
-
-static u16 frametype(u32 rspec, u8 mimoframe)
-{
- if (is_mcs_rate(rspec))
- return mimoframe;
- return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
-}
+/* chip rx buffer offset */
+#define BRCMS_HWRXOFF 38
/* rfdisable delay timer 500 ms, runs of ALP clock */
-#define RFDISABLE_DEFAULT 10000000
+#define RFDISABLE_DEFAULT 10000000
#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
@@ -194,87 +129,83 @@ static u16 frametype(u32 rspec, u8 mimoframe)
* These constants are used ONLY by wlc_prio2prec_map. Do not use them
* elsewhere.
*/
-#define _BRCMS_PREC_NONE 0 /* None = - */
-#define _BRCMS_PREC_BK 2 /* BK - Background */
-#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
-#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
-#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
-#define _BRCMS_PREC_VI 10 /* Vi - Video */
-#define _BRCMS_PREC_VO 12 /* Vo - Voice */
-#define _BRCMS_PREC_NC 14 /* NC - Network Control */
-
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN 0x20
-
-#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */
-#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */
-#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */
-#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */
-
-#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
-
-#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
+#define _BRCMS_PREC_NONE 0 /* None = - */
+#define _BRCMS_PREC_BK 2 /* BK - Background */
+#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
+#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
+#define _BRCMS_PREC_VI 10 /* Vi - Video */
+#define _BRCMS_PREC_VO 12 /* Vo - Voice */
+#define _BRCMS_PREC_NC 14 /* NC - Network Control */
+
+/* synthpu_dly times in us */
+#define SYNTHPU_DLY_APHY_US 3700
+#define SYNTHPU_DLY_BPHY_US 1050
+#define SYNTHPU_DLY_NPHY_US 2048
+#define SYNTHPU_DLY_LPPHY_US 300
+
+#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */
/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S 0
-#define EDCF_SFB_S 4
-#define EDCF_LONG_S 8
-#define EDCF_LFB_S 12
-#define EDCF_SHORT_M BITFIELD_MASK(4)
-#define EDCF_SFB_M BITFIELD_MASK(4)
-#define EDCF_LONG_M BITFIELD_MASK(4)
-#define EDCF_LFB_M BITFIELD_MASK(4)
+#define EDCF_SHORT_S 0
+#define EDCF_SFB_S 4
+#define EDCF_LONG_S 8
+#define EDCF_LFB_S 12
+#define EDCF_SHORT_M BITFIELD_MASK(4)
+#define EDCF_SFB_M BITFIELD_MASK(4)
+#define EDCF_LONG_M BITFIELD_MASK(4)
+#define EDCF_LFB_M BITFIELD_MASK(4)
-#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
-#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
-#define RETRY_LONG_DEF 4 /* Default Long retry count */
-#define RETRY_SHORT_FB 3 /* Short count for fallback rate */
-#define RETRY_LONG_FB 2 /* Long count for fallback rate */
+#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
+#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
+#define RETRY_LONG_DEF 4 /* Default Long retry count */
+#define RETRY_SHORT_FB 3 /* Short count for fb rate */
+#define RETRY_LONG_FB 2 /* Long count for fb rate */
-#define APHY_CWMIN 15
-#define PHY_CWMAX 1023
+#define APHY_CWMIN 15
+#define PHY_CWMAX 1023
-#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MIN 1
-#define FRAGNUM_MASK 0xF
+#define FRAGNUM_MASK 0xF
-#define APHY_SLOT_TIME 9
-#define BPHY_SLOT_TIME 20
+#define APHY_SLOT_TIME 9
+#define BPHY_SLOT_TIME 20
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
/* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS 0xffffffff
+#define BRCMS_USE_COREFLAGS 0xffffffff
/* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO -1
-#define BRCMS_PLCP_SHORT 0
-#define BRCMS_PLCP_LONG 1
+#define BRCMS_PLCP_AUTO -1
+#define BRCMS_PLCP_SHORT 0
+#define BRCMS_PLCP_LONG 1
/* values for g_protection_override and n_protection_override */
#define BRCMS_PROTECTION_AUTO -1
#define BRCMS_PROTECTION_OFF 0
#define BRCMS_PROTECTION_ON 1
#define BRCMS_PROTECTION_MMHDR_ONLY 2
-#define BRCMS_PROTECTION_CTS_ONLY 3
+#define BRCMS_PROTECTION_CTS_ONLY 3
/* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF 0
+#define BRCMS_PROTECTION_CTL_OFF 0
#define BRCMS_PROTECTION_CTL_LOCAL 1
#define BRCMS_PROTECTION_CTL_OVERLAP 2
/* values for n_protection */
#define BRCMS_N_PROTECTION_OFF 0
#define BRCMS_N_PROTECTION_OPTIONAL 1
-#define BRCMS_N_PROTECTION_20IN40 2
+#define BRCMS_N_PROTECTION_20IN40 2
#define BRCMS_N_PROTECTION_MIXEDMODE 3
/* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL 0
-#define BRCMS_N_BW_40ALL 1
-#define BRCMS_N_BW_20IN2G_40IN5G 2
+#define BRCMS_N_BW_20ALL 0
+#define BRCMS_N_BW_40ALL 1
+#define BRCMS_N_BW_20IN2G_40IN5G 2
/* bitflags for SGI support (sgi_rx iovar) */
#define BRCMS_N_SGI_20 0x01
@@ -282,48 +213,42 @@ static u16 frametype(u32 rspec, u8 mimoframe)
/* defines used by the nrate iovar */
/* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_MCS_INUSE 0x00000080
+#define NRATE_MCS_INUSE 0x00000080
/* rate/mcs value */
-#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_RATE_MASK 0x0000007f
/* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_MASK 0x0000ff00
+#define NRATE_STF_MASK 0x0000ff00
/* stf mode shift */
-#define NRATE_STF_SHIFT 8
-/* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE 0x80000000
+#define NRATE_STF_SHIFT 8
/* bit indicate to override mcs only */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS 4
/* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD 256
+#define NTXD 256
/* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXD 256
+#define NRXD 256
/* try to keep this # rbufs posted to the chip */
-#define NRXBUFPOST 32
+#define NRXBUFPOST 32
/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT 50
+#define BRCMS_DATAHIWAT 50
-/* bounded rx loops */
-#define RXBND 8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+/* max # frames to process in brcms_c_recv() */
+#define RXBND 8
+/* max # tx status to process in wlc_txstatus() */
+#define TXSBND 8
/* brcmu_format_flags() bit description structure */
struct brcms_c_bit_desc {
@@ -375,10 +300,22 @@ uint brcm_msg_level =
#endif /* BCMDBG */
/* TX FIFO number to WME/802.1E Access Category */
-static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+static const u8 wme_fifo2ac[] = {
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BE
+};
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+/* ieee80211 Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = {
+ TX_AC_VO_FIFO,
+ TX_AC_VI_FIFO,
+ TX_AC_BE_FIFO,
+ TX_AC_BK_FIFO
+};
/* 802.1D Priority to precedence queue mapping */
const u8 wlc_prio2prec_map[] = {
@@ -405,13 +342,6 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5},
};
-static const u8 acbitmap2maxprio[] = {
- PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
- PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
#ifdef BCMDBG
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
@@ -424,6 +354,22 @@ static const char fifo_names[6][0];
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+ if (is_mcs_rate(rspec))
+ return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+ .leg_ofdm];
+ return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+ if (is_mcs_rate(rspec))
+ return mimoframe;
+ return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
/* currently the best mechanism for determining SIFS is the band in use */
static u16 get_sifs(struct brcms_band *band)
{
@@ -442,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
*/
static bool brcms_deviceremoved(struct brcms_c_info *wlc)
{
+ u32 macctrl;
+
if (!wlc->hw->clk)
return ai_deviceremoved(wlc->hw->sih);
- return (R_REG(&wlc->hw->regs->maccontrol) &
- (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+ macctrl = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(maccontrol));
+ return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
}
/* sum the individual fifo tx pending packet counts */
@@ -470,20 +419,6 @@ static int brcms_chspec_bw(u16 chanspec)
return BRCMS_10_MHZ;
}
-/*
- * return true if Minimum Power Consumption should
- * be entered, false otherwise
- */
-static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
- return false;
-}
-
-static bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
- return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
{
if (cfg == NULL)
@@ -650,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
- struct d11regs __iomem *regs;
-
- regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
if (shortslot) {
/* 11g short slot: 11a timing */
- W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
- W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
@@ -669,9 +602,8 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
* calculate frame duration of a given rate and length, return
* time in usec unit
*/
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
- u8 preamble_type, uint mac_len)
+static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+ u8 preamble_type, uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
@@ -741,24 +673,22 @@ brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
+ struct bcma_device *core = wlc_hw->d11core;
int i;
- u8 __iomem *base;
- u8 __iomem *addr;
+ uint offset;
u16 size;
u32 value;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- base = (u8 __iomem *)wlc_hw->regs;
-
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
- addr = base + le16_to_cpu(inits[i].addr);
+ offset = le16_to_cpu(inits[i].addr);
value = le32_to_cpu(inits[i].value);
if (size == 2)
- W_REG((u16 __iomem *)addr, value);
+ bcma_write16(core, offset, value);
else if (size == 4)
- W_REG((u32 __iomem *)addr, value);
+ bcma_write32(core, offset, value);
else
break;
}
@@ -808,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
}
}
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+ struct bcma_device *core = wlc_hw->d11core;
+ u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+ bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -816,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
if (OFF == clk) { /* clear gmode bit, put phy into reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
- (SICF_PRST | SICF_FGC));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+ (SICF_PRST | SICF_FGC));
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
udelay(1);
} else { /* take phy out of reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
udelay(1);
}
@@ -847,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
/* set gmode core flag */
- if (wlc_hw->sbclk && !wlc_hw->noreset)
- ai_core_cflags(wlc_hw->sih, SICF_GMODE,
- ((bandunit == 0) ? SICF_GMODE : 0));
+ if (wlc_hw->sbclk && !wlc_hw->noreset) {
+ u32 gmode = 0;
+
+ if (bandunit == 0)
+ gmode = SICF_GMODE;
+
+ brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+ }
}
/* switch to new band but leave it inactive */
@@ -857,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
+ u32 macctrl;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ macctrl = bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol));
+ WARN_ON((macctrl & MCTL_EN_MAC) != 0);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -969,7 +914,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
lfbl, /* Long Frame Rate Fallback Limit */
fbl;
- if (queue < AC_COUNT) {
+ if (queue < IEEE80211_NUM_ACS) {
sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_SFB);
lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@@ -1018,14 +963,12 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = brcmu_pkttotlen(p);
+ totlen = p->len;
free_pdu = true;
brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
- p->next = NULL;
- p->prev = NULL;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@@ -1053,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
struct brcms_c_info *wlc = wlc_hw->wlc;
- struct d11regs __iomem *regs;
+ struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
@@ -1066,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
txs = &txstatus;
- regs = wlc_hw->regs;
+ core = wlc_hw->d11core;
*fatal = false;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
while (!(*fatal)
- && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+ && (s1 & TXS_V)) {
if (s1 == 0xffffffff) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
wlc_hw->unit, __func__);
return morepending;
}
-
- s2 = R_REG(&regs->frmtxstatus2);
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1090,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
/* !give others some time to run! */
if (++n >= max_tx_num)
break;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
}
if (*fatal)
@@ -1134,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
}
}
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
{
if (direction == DMA_TX)
- return &(hw->regs->fifo64regs[fifonum].dmaxmt);
- return &(hw->regs->fifo64regs[fifonum].dmarcv);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
}
static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1165,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
- (wme ? dmareg(wlc_hw, DMA_TX, 0) :
- NULL), dmareg(wlc_hw, DMA_RX, 0),
+ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ (wme ? dmareg(DMA_TX, 0) : 0),
+ dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1179,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 1), NULL,
+ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 1), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1190,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 2), NULL,
+ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 2), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1200,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 3),
- NULL, NTXD, 0, 0, -1,
+ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 3),
+ 0, NTXD, 0, 0, -1,
0, 0, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
@@ -1276,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
/* control chip clock to save power, enable dynamic clock or force fast clock */
static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
{
- if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
* on backplane, but mac core will still run on ALP(not HT) when
* it enters powersave mode, which means the FCA bit may not be
@@ -1285,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
if (wlc_hw->clk) {
if (mode == CLK_FAST) {
- OR_REG(&wlc_hw->regs->clk_ctl_st,
- CCS_FORCEHT);
+ bcma_set32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
+ CCS_FORCEHT);
udelay(64);
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL) == 0),
- PMU_MAX_TRANSITION_DLY);
- WARN_ON(!(R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL));
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ WARN_ON(!(bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL));
} else {
- if ((wlc_hw->sih->pmurev == 0) &&
- (R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL)
- == 0),
- PMU_MAX_TRANSITION_DLY);
- AND_REG(&wlc_hw->regs->clk_ctl_st,
+ if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+ (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ (CCS_FORCEHT | CCS_HTAREQ)))
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ offsetof(struct d11regs,
+ clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ bcma_mask32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
~CCS_FORCEHT);
}
}
@@ -1322,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
/* check fast clock is available (if core is not in reset) */
if (wlc_hw->forcefastclk && wlc_hw->clk)
- WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+ WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
SISF_FCLKA));
/*
@@ -1439,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
maccontrol |= MCTL_INFRA;
}
- W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+ maccontrol);
}
/* set or clear maccontrol bits */
@@ -1533,7 +1482,7 @@ static void
brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 mac_l;
u16 mac_m;
u16 mac_h;
@@ -1541,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
wlc_hw->unit);
- regs = wlc_hw->regs;
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
mac_h = addr[4] | (addr[5] << 8);
/* enter the MAC addr into the RXE match registers */
- W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
- W_REG(&regs->rcm_mat_data, mac_l);
- W_REG(&regs->rcm_mat_data, mac_m);
- W_REG(&regs->rcm_mat_data, mac_h);
-
+ bcma_write16(core, D11REGOFFS(rcm_ctl),
+ RCM_INC_DATA | match_reg_offset);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
}
void
brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 word;
__le32 word_le;
__be32 word_be;
bool be_bit;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
- W_REG(&regs->tplatewrptr, offset);
+ bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
/* if MCTL_BIGEND bit set in mac control register,
* the chip swaps data in fifo, as well as data in
* template ram
*/
- be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+ be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
while (len > 0) {
memcpy(&word, buf, sizeof(u32));
@@ -1585,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
word = *(u32 *)&word_le;
}
- W_REG(&regs->tplatewrdata, word);
+ bcma_write32(core, D11REGOFFS(tplatewrdata), word);
buf = (u8 *) buf + sizeof(u32);
len -= sizeof(u32);
@@ -1596,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmin);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
}
static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmax);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
}
void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1773,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 4);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
}
@@ -1797,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
return;
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
else
- ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
}
void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
else
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
}
void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1828,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
- ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+ brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
udelay(1);
@@ -1836,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
- (SICF_PRST | SICF_PCLKE));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+ (SICF_PRST | SICF_PCLKE));
phy_in_reset = true;
} else {
- ai_core_cflags(wlc_hw->sih,
- (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
- (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+ brcms_b_core_ioctl(wlc_hw,
+ (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+ (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
}
udelay(2);
@@ -1859,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
u32 macintmask;
/* Enable the d11 core before accessing it */
- if (!ai_iscoreup(wlc_hw->sih)) {
- ai_core_reset(wlc_hw->sih, 0, 0);
+ if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+ bcma_core_enable(wlc_hw->d11core, 0);
brcms_c_mctrl_reset(wlc_hw);
}
@@ -1886,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0);
}
static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1914,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
uint b2 = boardrev & 0xf;
/* voards from other vendors are always considered valid */
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
return true;
/* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1986,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
- u32 resetbits = 0, flags = 0;
+ u32 flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
@@ -2003,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
flags |= SICF_PCLKE;
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+ bcma_core_enable(wlc_hw->d11core, flags);
brcms_c_mctrl_reset(wlc_hw);
}
- v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+ v = ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
/* put core back into reset */
if (!clk)
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
if (!xtal)
brcms_b_xtal(wlc_hw, OFF);
@@ -2042,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
*/
void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
- struct d11regs __iomem *regs;
uint i;
bool fastclk;
- u32 resetbits = 0;
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* reset the dma engines except first time thru */
- if (ai_iscoreup(wlc_hw->sih)) {
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2098,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* they may touch chipcommon as well.
*/
wlc_hw->clk = false;
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+ bcma_core_enable(wlc_hw->d11core, flags);
wlc_hw->clk = true;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
brcms_c_mctrl_reset(wlc_hw);
- if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2126,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
*/
static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
@@ -2147,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
txfifo_cmd =
TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
- W_REG(&regs->xmtfifodef, txfifo_def);
- W_REG(&regs->xmtfifodef1, txfifo_def1);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+ bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
}
@@ -2186,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x2082);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x5341);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else { /* 120Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x8889);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
}
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
} else { /* 80Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
}
}
}
@@ -2215,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
u32 gc, gm;
- regs = wlc_hw->regs;
-
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
@@ -2250,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
* The board itself is powered by these GPIOs
* (when not sending pattern) so set them high
*/
- OR_REG(&regs->psm_gpio_oe,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
- OR_REG(&regs->psm_gpio_out,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2280,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
const __le32 ucode[], const size_t nbytes)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
uint i;
uint count;
@@ -2288,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
count = (nbytes / sizeof(u32));
- W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
for (i = 0; i < count; i++)
- W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+ bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
}
@@ -2352,19 +2296,12 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
-static void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
- wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
- wlc->pub->unit);
- brcms_init(wlc->wl);
-}
-
static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
{
bool fatal = false;
uint unit;
uint intstatus, idx;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
unit = wlc_hw->unit;
@@ -2372,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
for (idx = 0; idx < NFIFO; idx++) {
/* read intstatus register and ignore any non-error bits */
intstatus =
- R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+ bcma_read32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus)) &
+ I_ERRORS;
if (!intstatus)
continue;
@@ -2414,11 +2353,12 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
}
if (fatal) {
- brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
+ brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
break;
} else
- W_REG(&regs->intctrlregs[idx].intstatus,
- intstatus);
+ bcma_write32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus),
+ intstatus);
}
}
@@ -2426,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
- if (!wlc->hw->up)
- return 0;
-
- return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- if (!wlc->hw->up)
- return;
-
- brcms_intrsrestore(wlc->wl, macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2460,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
macintmask = wlc->macintmask; /* isr can still happen */
- W_REG(&wlc_hw->regs->macintmask, 0);
- (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
udelay(1); /* ensure int line is no longer driven */
wlc->macintmask = 0;
@@ -2476,9 +2395,10 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
return;
wlc->macintmask = macintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
+/* assumes that the d11 MAC is enabled */
static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
uint tx_fifo)
{
@@ -2535,11 +2455,12 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
}
}
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+/* precondition: requires the mac core to be enabled */
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- if (on) {
+ if (mute_tx) {
/* suspend tx fifos */
brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
@@ -2561,14 +2482,20 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
wlc_hw->etheraddr);
}
- wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+ wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
- if (on)
+ if (mute_tx)
brcms_c_ucode_mute_override_set(wlc_hw);
else
brcms_c_ucode_mute_override_clear(wlc_hw);
}
+void
+brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
+{
+ brcms_b_mute(wlc->hw, mute_tx);
+}
+
/*
* Read and clear macintmask and macintstatus and intstatus registers.
* This routine should be called with interrupts off
@@ -2580,11 +2507,11 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 macintstatus;
/* macintstatus includes a DMA interrupt summary bit */
- macintstatus = R_REG(&regs->macintstatus);
+ macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
macintstatus);
@@ -2611,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* consequences
*/
/* turn off the interrupts */
- W_REG(&regs->macintmask, 0);
- (void)R_REG(&regs->macintmask); /* sync readback */
+ bcma_write32(core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(core, D11REGOFFS(macintmask));
wlc->macintmask = 0;
/* clear device interrupts */
- W_REG(&regs->macintstatus, macintstatus);
+ bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
/* MI_DMAINT is indication of non-zero intstatus */
if (macintstatus & MI_DMAINT)
@@ -2625,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* RX_FIFO. If MI_DMAINT is set, assume it
* is set and clear the interrupt.
*/
- W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
- DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+ DEF_RXINTMASK);
return macintstatus;
}
@@ -2689,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
struct wiphy *wiphy = wlc->wiphy;
@@ -2706,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
/* force the core awake */
brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2718,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(!(mc & MCTL_EN_MAC));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2729,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
- SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+ SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
BRCMS_MAX_MAC_SUSPEND);
- if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+ if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
- R_REG(&regs->psmdebug),
- R_REG(&regs->phydebug),
- R_REG(&regs->psm_brc));
+ bcma_read32(core, D11REGOFFS(psmdebug)),
+ bcma_read32(core, D11REGOFFS(phydebug)),
+ bcma_read16(core, D11REGOFFS(psm_brc)));
}
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2758,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2771,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
if (wlc_hw->mac_suspend_depth > 0)
return;
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
- W_REG(&regs->macintstatus, MI_MACSSPNDD);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_EN_MAC));
WARN_ON(!(mc & MCTL_PSM_RUN));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
WARN_ON(mi & MI_MACSSPNDD);
brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2801,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* Validate dchip register access */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- w = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ w = bcma_read32(core, D11REGOFFS(objdata));
/* Can we write and read back a 32bit register? */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0xaa5555aa);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0xaa5555aa) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0xaa5555aa\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0x55aaaa55);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0x55aaaa55) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0x55aaaa55\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, w);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), w);
/* clear CFPStart */
- W_REG(&regs->tsf_cfpstart, 0);
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
- w = R_REG(&regs->maccontrol);
+ w = bcma_read32(core, D11REGOFFS(maccontrol));
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2866,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
tmp = 0;
- regs = wlc_hw->regs;
if (on) {
- if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
- CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ CCS_ERSRC_REQ_HT |
+ CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+ CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
- (CCS_ERSRC_AVAIL_HT))
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+ if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
" PLL failed\n", __func__);
} else {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ tmp | CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL)) !=
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp &
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
@@ -2911,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
* be requesting it; so we'll deassert the request but
* not wait for status to comply.
*/
- AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
- tmp = R_REG(&regs->clk_ctl_st);
+ bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+ ~CCS_ERSRC_REQ_PHYPLL);
+ (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
}
}
@@ -2940,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_ctl(wlc_hw, false);
wlc_hw->clk = false;
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
@@ -2964,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
static u16
brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
- u16 v;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- v = R_REG(objdata_hi);
- else
- v = R_REG(objdata_lo);
+ objoff += 2;
- return v;
+ return bcma_read16(core, objoff);
+;
}
static void
brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- W_REG(objdata_hi, v);
- else
- W_REG(objdata_lo, v);
+ objoff += 2;
+
+ bcma_write16(core, objoff, v);
}
/*
@@ -3078,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
/* write retry limit to SCR, shouldn't need to suspend */
if (wlc_hw->up) {
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
}
}
@@ -3132,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
return false;
/* disallow PS when one of these meets when not scanning */
- if (wlc->monitor)
+ if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (cfg->associated) {
@@ -3267,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 sflags;
- uint bcnint_us;
+ u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
int err = 0;
@@ -3277,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
struct wiphy *wiphy = wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- regs = wlc_hw->regs;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset PSM */
@@ -3291,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
fifosz_fixup = true;
/* let the PSM run to the suspended state, set mode to BSS STA */
- W_REG(&regs->macintstatus, -1);
+ bcma_write32(core, D11REGOFFS(macintstatus), -1);
brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
- SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
- 1000 * 1000);
- if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+ SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+ MI_MACSSPNDD) == 0), 1000 * 1000);
+ if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
- sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+ sflags = bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3368,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
wlc_hw->xmtfifo_sz[i], i);
/* make sure we can still talk to the mac */
- WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+ WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
/* band-specific inits done by wlc_bsinit() */
@@ -3377,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
- W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+ bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
brcms_b_mctrl(wlc_hw,
@@ -3386,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
/* set up Beacon interval */
bcnint_us = 0x8000 << 10;
- W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
- W_REG(&regs->tsf_cfpstart, bcnint_us);
- W_REG(&regs->macintstatus, MI_GP1);
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ (bcnint_us << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
/* write interrupt mask */
- W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+ DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
- W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+ bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
/* tell the ucode the corerev */
brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3411,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
machwcap >> 16) & 0xffff));
/* write retry limits to SCR, this done after PSM init */
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->SRL);
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->LRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
/* write rate fallback retry limits */
brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
- AND_REG(&regs->ifs_ctl, 0x0FFF);
- W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+ bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+ bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
/* init the tx dma engines */
for (i = 0; i < NFIFO; i++) {
@@ -3437,8 +3361,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
}
void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute) {
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
u32 macintmask;
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
@@ -3463,10 +3386,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
/* core-specific initialization */
brcms_b_coreinit(wlc);
- /* suspend the tx fifos and mute the phy for preism cac time */
- if (mute)
- brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
/* band-specific inits */
brcms_b_bsinit(wlc, chanspec);
@@ -3656,42 +3575,32 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
brcms_c_set_phy_chanspec(wlc, chanspec);
}
-static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
- if (wlc->bcnmisc_monitor)
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
- else
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
- wlc->bcnmisc_monitor = promisc;
- brcms_c_mac_bcn_promisc(wlc);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+/*
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
+ */
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
- /*
- * promiscuous mode just sets MCTL_PROMISC
- * Note: APs get all BSS traffic without the need to set
- * the MCTL_PROMISC bit since all BSS data traffic is
- * directed at the AP
- */
- if (wlc->pub->promisc)
+ wlc->filter_flags = filter_flags;
+
+ if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
promisc_bits |= MCTL_PROMISC;
- /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
- * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
- * handled in brcms_c_mac_bcn_promisc()
- */
- if (wlc->monitor)
- promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+ if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
+ promisc_bits |= MCTL_BCNS_PROMISC;
+
+ if (filter_flags & FIF_FCSFAIL)
+ promisc_bits |= MCTL_KEEPBADFCS;
- brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+ if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+ promisc_bits |= MCTL_KEEPCONTROL;
+
+ brcms_b_mctrl(wlc->hw,
+ MCTL_PROMISC | MCTL_BCNS_PROMISC |
+ MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+ promisc_bits);
}
/*
@@ -3721,10 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
} else {
/* disable an active IBSS if we are not on the home channel */
}
-
- /* update the various promisc bits */
- brcms_c_mac_bcn_promisc(wlc);
- brcms_c_mac_promisc(wlc);
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3899,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
- v1 = R_REG(&wlc->regs->maccontrol);
+ v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
if (hps)
v2 |= MCTL_HPS;
@@ -3979,7 +3884,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
void
brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute, struct txpwr_limits *txpwr)
+ bool mute_tx, struct txpwr_limits *txpwr)
{
uint bandunit;
@@ -4005,7 +3910,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
}
}
- wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+ wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
if (!wlc_hw->up) {
if (wlc_hw->clk)
@@ -4017,7 +3922,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
/* Update muting of the channel */
- brcms_b_mute(wlc_hw, mute, 0);
+ brcms_b_mute(wlc_hw, mute_tx);
}
}
@@ -4205,7 +4110,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
EDCF_TXOP2USEC(acp_shm.txop);
acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
- if (aci == AC_VI && acp_shm.txop == 0
+ if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
&& acp_shm.aifs < EDCF_AIFSN_MAX)
acp_shm.aifs++;
@@ -4218,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
acp_shm.cwmax = params->cw_max;
acp_shm.cwcur = acp_shm.cwmin;
acp_shm.bslots =
- R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+ bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+ acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4242,7 +4148,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
}
}
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
{
u16 aci;
int i_ac;
@@ -4255,7 +4161,7 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}; /* ucode needs these parameters during its initialization */
const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
- for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+ for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
@@ -4277,17 +4183,6 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}
}
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
- /*
- * maintain LEDs while in down state, turn on sbclk if
- * not available yet. Turn on sbclk if necessary
- */
- brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
- brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
-}
-
static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
@@ -4299,28 +4194,6 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
}
-static void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
- if (!wlc->pub->up) {
- brcms_c_down_led_upd(wlc);
- return;
- }
-
- brcms_c_radio_monitor_start(wlc);
- brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
- if (wlc->pub->up)
- return;
-
- if (brcms_deviceremoved(wlc))
- return;
-
- brcms_up(wlc->wl);
-}
-
static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
{
if (!wlc->radio_monitor)
@@ -4343,18 +4216,6 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
}
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
- if (wlc->pub->radio_disabled)
- brcms_c_radio_disable(wlc);
- else
- brcms_c_radio_enable(wlc);
-}
-
/* update hwradio status and return it */
bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
{
@@ -4376,12 +4237,7 @@ static void brcms_c_radio_timer(void *arg)
return;
}
- /* cap mpc off count */
- if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
- wlc->mpc_offcnt++;
-
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
}
/* common low-level watchdog code */
@@ -4407,60 +4263,6 @@ static void brcms_b_watchdog(void *arg)
wlc_phy_watchdog(wlc_hw->band->pi);
}
-static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
- bool mpc_radio, radio_state;
-
- /*
- * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
- * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
- * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
- * the radio is going down.
- */
- if (!wlc->mpc) {
- if (!wlc->pub->radio_disabled)
- return;
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (!wlc->pub->radio_disabled)
- brcms_c_radio_monitor_stop(wlc);
- return;
- }
-
- /*
- * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
- * wlc->pub->radio_disabled to go ON, always call radio_upd
- * synchronously to go OFF, postpone radio_upd to later when
- * context is safe(e.g. watchdog)
- */
- radio_state =
- (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
- ON);
- mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
- if (radio_state == ON && mpc_radio == OFF)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
- else if (radio_state == OFF && mpc_radio == ON) {
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
- wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
- else
- wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
- }
- /*
- * Below logic is meant to capture the transition from mpc off
- * to mpc on for reasons other than wlc->mpc_delay_off keeping
- * the mpc off. In that case reset wlc->mpc_delay_off to
- * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
- */
- if ((wlc->prev_non_delay_mpc == false) &&
- (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
-
- wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
/* common watchdog code */
static void brcms_c_watchdog(void *arg)
{
@@ -4481,21 +4283,7 @@ static void brcms_c_watchdog(void *arg)
/* increment second count */
wlc->pub->now++;
- /* delay radio disable */
- if (wlc->mpc_delay_off) {
- if (--wlc->mpc_delay_off == 0) {
- mboolset(wlc->pub->radio_disabled,
- WL_RADIO_MPC_DISABLE);
- if (wlc->mpc && brcms_c_ismpc(wlc))
- wlc->mpc_offcnt = 0;
- }
- }
-
- /* mpc sync */
- brcms_c_radio_mpc_upd(wlc);
- /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
/* if radio is disable, driver may be down, quit here */
if (wlc->pub->radio_disabled)
return;
@@ -4599,9 +4387,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
/* WME QoS mode is Auto by default */
wlc->pub->_ampdu = AMPDU_AGG_HOST;
wlc->pub->bcmerror = 0;
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
}
static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -4647,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void __iomem *regsva,
- struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+ uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- struct d11regs __iomem *regs;
char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
+ struct pci_dev *pcidev = core->bus->host_pci;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
- device);
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
wme = true;
@@ -4678,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
* Do the hardware portion of the attach. Also initialize software
* state that depends on the particular hardware we are running.
*/
- wlc_hw->sih = ai_attach(regsva, btparam);
+ wlc_hw->sih = ai_attach(core->bus);
if (wlc_hw->sih == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
@@ -4687,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(vendor, device)) {
+ if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
- unit, vendor, device);
+ unit, pcidev->vendor, pcidev->device);
err = 12;
goto fail;
}
- wlc_hw->vendorid = vendor;
- wlc_hw->deviceid = device;
-
- /* set bar0 window to point at D11 core */
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- wlc_hw->corerev = ai_corerev(wlc_hw->sih);
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
- regs = wlc_hw->regs;
-
- wlc->regs = wlc_hw->regs;
+ wlc_hw->d11core = core;
+ wlc_hw->corerev = core->id.rev;
/* validate chip, chiprev and corerev */
if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4740,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->boardrev = (u16) j;
if (!brcms_c_validboardtype(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
- "board type (0x%x)" " or revision level (0x%x)\n",
- unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+ "board type (0x%x)" " or revision level (0x%x)\n",
+ unit, ai_get_boardtype(wlc_hw->sih),
+ wlc_hw->boardrev);
err = 15;
goto fail;
}
@@ -4762,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
else
wlc_hw->_nbands = 1;
- if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4794,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
sha_params.corerev = wlc_hw->corerev;
sha_params.vid = wlc_hw->vendorid;
sha_params.did = wlc_hw->deviceid;
- sha_params.chip = wlc_hw->sih->chip;
- sha_params.chiprev = wlc_hw->sih->chiprev;
- sha_params.chippkg = wlc_hw->sih->chippkg;
+ sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+ sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+ sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
sha_params.sromrev = wlc_hw->sromrev;
- sha_params.boardtype = wlc_hw->sih->boardtype;
+ sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
sha_params.boardrev = wlc_hw->boardrev;
- sha_params.boardvendor = wlc_hw->sih->boardvendor;
sha_params.boardflags = wlc_hw->boardflags;
sha_params.boardflags2 = wlc_hw->boardflags2;
- sha_params.buscorerev = wlc_hw->sih->buscorerev;
/* alloc and save pointer to shared phy state area */
wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4825,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+ wlc->core->coreidx = core->core_index;
- wlc_hw->machwcap = R_REG(&regs->machwcap);
+ wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
@@ -4836,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Get a phy for this band */
wlc_hw->band->pi =
- wlc_phy_attach(wlc_hw->phy_sh, regs,
+ wlc_phy_attach(wlc_hw->phy_sh, core,
wlc_hw->band->bandtype,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
@@ -4910,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
- /* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
- (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -4944,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
goto fail;
}
- BCMMSG(wlc->wiphy,
- "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+ macaddr);
return err;
@@ -5185,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
* and per-port interrupt object may has been freed. this must
* be done before sb core switch
*/
- ai_deregister_intr_callback(wlc_hw->sih);
ai_pci_sleep(wlc_hw->sih);
}
@@ -5259,9 +5031,6 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc)
{
/* STA-BSS; short capable */
wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
- /* fixup mpc */
- wlc->mpc = true;
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
@@ -5283,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
ai_pci_fixcfg(wlc_hw->sih);
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
/*
* Inform phy that a POR reset has occurred so
@@ -5301,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5376,7 +5143,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
if (!wlc->clk)
return;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
wlc->wme_retries[ac]);
}
@@ -5396,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5533,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
} else {
/* Reset and disable the core */
- if (ai_iscoreup(wlc_hw->sih)) {
- if (R_REG(&wlc_hw->regs->maccontrol) &
- MCTL_EN_MAC)
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
+ if (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
callbacks += brcms_reset(wlc_hw->wlc->wl);
brcms_c_coredisable(wlc_hw);
@@ -5575,7 +5342,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
if (!wlc->pub->up)
return callbacks;
- /* in between, mpc could try to bring down again.. */
wlc->going_down = true;
callbacks += brcms_b_bmac_down_prep(wlc->hw);
@@ -5852,7 +5618,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
- for (ac = 0; ac < AC_COUNT; ac++) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_SHORT, wlc->SRL);
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
@@ -6103,7 +5869,6 @@ void brcms_c_print_txdesc(struct d11txh *txh)
u8 *rtsph = txh->RTSPhyHeader;
struct ieee80211_rts rts = txh->rts_frame;
- char hexbuf[256];
/* add plcp header along with txh descriptor */
printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
@@ -6124,17 +5889,16 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
printk(KERN_DEBUG "\n");
- brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
- printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
- brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
- printk(KERN_DEBUG "RA: %s\n", hexbuf);
+ print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
+ print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
+ ra, sizeof(txh->TxFrameRA));
printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
- brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtspfb, sizeof(txh->RTSPLCPFallback));
printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
- brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
- printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
+ fragpfb, sizeof(txh->FragPLCPFallback));
printk(KERN_DEBUG "DUR: %04x", fragdfb);
printk(KERN_DEBUG "\n");
@@ -6149,18 +5913,18 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
- brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
- printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+ print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtsph, sizeof(txh->RTSPhyHeader));
+ print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
+ (u8 *)&rts, sizeof(txh->rts_frame));
printk(KERN_DEBUG "\n");
}
#endif /* defined(BCMDBG) */
#if defined(BCMDBG)
-int
+static int
brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
- int len)
+ int len)
{
int i;
char *p = buf;
@@ -6916,7 +6680,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
- len = brcmu_pkttotlen(p);
+ len = p->len;
phylen = len + FCS_LEN;
/* Get tx_info */
@@ -7695,11 +7459,11 @@ static void
brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
/* read the tsf timer low, then high to get an atomic read */
- *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
- *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+ *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+ *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
}
/*
@@ -8253,12 +8017,6 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc)
return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
}
-void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
-{
- wlc->mpc = mpc;
- brcms_c_radio_mpc_upd(wlc);
-}
-
/* Process received frames */
/*
* Return true if more frames need to be processed. false otherwise.
@@ -8293,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
- if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
- " tossing\n");
+ if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
- } else {
- wiphy_err(wlc->wiphy, "RCSERR!!!\n");
- goto toss;
- }
}
/* check received pkt has at least frame control field */
@@ -8328,21 +8080,17 @@ static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
- struct sk_buff *head = NULL;
- struct sk_buff *tail = NULL;
+ struct sk_buff *next = NULL;
+ struct sk_buff_head recv_frames;
+
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- /* gather received frames */
- while ((p = dma_rx(wlc_hw->di[fifo]))) {
+ skb_queue_head_init(&recv_frames);
- if (!tail)
- head = tail = p;
- else {
- tail->prev = p;
- tail = p;
- }
+ /* gather received frames */
+ while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
/* !give others some time to run! */
if (++n >= bound_limit)
@@ -8353,12 +8101,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */
- while ((p = head) != NULL) {
+ skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh;
- head = head->prev;
- p->prev = NULL;
+ skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data;
@@ -8389,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc->wiphy;
if (brcms_deviceremoved(wlc)) {
@@ -8425,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
BCMMSG(wlc->wiphy, "end of ATIM window\n");
- OR_REG(&regs->maccommand, wlc->qvalid);
+ bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
@@ -8443,18 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
if (macintstatus & MI_GP0) {
wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
- "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+ "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, wlc_hw->sih->chip,
- wlc_hw->sih->chiprev);
- /* big hammer */
- brcms_init(wlc->wl);
+ __func__, ai_get_chip_id(wlc_hw->sih),
+ ai_get_chiprev(wlc_hw->sih));
+ brcms_fatal_error(wlc_hw->wlc->wl);
}
/* gptimer timeout */
if (macintstatus & MI_TO)
- W_REG(&regs->gptimer, 0);
+ bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8470,20 +8216,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
return wlc->macintstatus != 0;
fatal:
- brcms_init(wlc->wl);
+ brcms_fatal_error(wlc_hw->wlc->wl);
return wlc->macintstatus != 0;
}
-void brcms_c_init(struct brcms_c_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc->hw->d11core;
u16 chanspec;
- bool mute = false;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- regs = wlc->regs;
-
/*
* This will happen if a big-hammer was executed. In
* that case, we want to go back to the channel that
@@ -8494,7 +8237,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
else
chanspec = brcms_c_init_chanspec(wlc);
- brcms_b_init(wlc->hw, chanspec, mute);
+ brcms_b_init(wlc->hw, chanspec);
/* update beacon listen interval */
brcms_c_bcn_li_upd(wlc);
@@ -8513,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* update since init path would reset
* to default value
*/
- W_REG(&regs->tsf_cfprep,
- (bi << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ bi << CFPREP_CBI_SHIFT);
/* Update maccontrol PM related bits */
brcms_c_set_ps_ctrl(wlc);
@@ -8544,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
- OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+ bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
/* Init precedence maps for empty FIFOs */
@@ -8560,14 +8303,15 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* ..now really unleash hell (allow the MAC out of suspend) */
brcms_c_enable_mac(wlc);
+ /* suspend the tx fifos and mute the phy for preism cac time */
+ if (mute_tx)
+ brcms_b_mute(wlc->hw, true);
+
/* clear tx flow control */
brcms_c_txflowcontrol_reset(wlc);
/* enable the RF Disable Delay timer */
- W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+ bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
/*
* Initialize WME parameters; if they haven't been set by some other
@@ -8577,7 +8321,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* Uninitialized; read from HW */
int ac;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
wlc->wme_retries[ac] =
brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
}
@@ -8587,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* The common driver entry routine. Error codes should be unique
*/
struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr)
{
struct brcms_c_info *wlc;
uint err = 0;
@@ -8597,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
@@ -8624,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
- err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
- btparam);
+ err = brcms_b_attach(wlc, core, unit, piomode);
if (err)
goto fail;
@@ -8754,8 +8496,6 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
brcms_c_ht_update_sgi_rx(wlc, 0);
}
- /* initialize radio_mpc_disable according to wlc->mpc */
- brcms_c_radio_mpc_upd(wlc);
brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
if (perr)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index c0e0fcfdfaf..adb136ec1f0 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -44,8 +44,6 @@
/* transmit buffer max headroom for protocol headers */
#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
-#define AC_COUNT 4
-
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
* #define <NAME>_M BITFIELD_MASK(3)
@@ -336,7 +334,7 @@ struct brcms_hardware {
u32 machwcap_backup; /* backup of machwcap */
struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- struct d11regs __iomem *regs; /* pointer to device registers */
+ struct bcma_device *d11core; /* pointer to 802.11 core */
struct phy_shim_info *physhim; /* phy shim layer handler */
struct shared_phy *phy_sh; /* pointer to shared phy state */
struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -402,7 +400,6 @@ struct brcms_txq_info {
*
* pub: pointer to driver public state.
* wl: pointer to specific private state.
- * regs: pointer to device registers.
* hw: HW related state.
* clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
* fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -427,11 +424,6 @@ struct brcms_txq_info {
* bandinit_pending: track band init in auto band.
* radio_monitor: radio timer is running.
* going_down: down path intermediate variable.
- * mpc: enable minimum power consumption.
- * mpc_dlycnt: # of watchdog cnt before turn disable radio.
- * mpc_offcnt: # of watchdog cnt that radio is disabled.
- * mpc_delay_off: delay radio disable by # of watchdog cnt.
- * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
* wdtimer: timer for watchdog routine.
* radio_timer: timer for hw radio button monitor routine.
* monitor: monitor (MPDU sniffing) mode.
@@ -441,7 +433,7 @@ struct brcms_txq_info {
* bcn_li_dtim: beacon listen interval in # dtims.
* WDarmed: watchdog timer is armed.
* WDlast: last time wlc_watchdog() was called.
- * edcf_txop[AC_COUNT]: current txop for each ac.
+ * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits.
* tx_prec_map: Precedence map based on HW FIFO space.
* fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@@ -484,7 +476,6 @@ struct brcms_txq_info {
struct brcms_c_info {
struct brcms_pub *pub;
struct brcms_info *wl;
- struct d11regs __iomem *regs;
struct brcms_hardware *hw;
/* clock */
@@ -522,18 +513,11 @@ struct brcms_c_info {
bool radio_monitor;
bool going_down;
- bool mpc;
- u8 mpc_dlycnt;
- u8 mpc_offcnt;
- u8 mpc_delay_off;
- u8 prev_non_delay_mpc;
-
struct brcms_timer *wdtimer;
struct brcms_timer *radio_timer;
/* promiscuous */
- bool monitor;
- bool bcnmisc_monitor;
+ uint filter_flags;
/* driver feature */
bool _rifs;
@@ -546,9 +530,9 @@ struct brcms_c_info {
u32 WDlast;
/* WME */
- u16 edcf_txop[AC_COUNT];
+ u16 edcf_txop[IEEE80211_NUM_ACS];
- u16 wme_retries[AC_COUNT];
+ u16 wme_retries[IEEE80211_NUM_ACS];
u16 tx_prec_map;
u16 fifo2prec_map[NFIFO];
@@ -671,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb2679204..7fad6dc1925 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
+
/* Sonics side: PCI core and host control registers */
struct sbpciregs {
u32 control; /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
};
struct pcicore_info {
- union {
- struct sbpcieregs __iomem *pcieregs;
- struct sbpciregs __iomem *pciregs;
- } regs; /* Memory mapped register to the core */
-
+ struct bcma_device *core;
struct si_pub *sih; /* System interconnect handle */
struct pci_dev *dev;
u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
};
#define PCIE_ASPM(sih) \
- (((sih)->buscoretype == PCIE_CORE_ID) && \
- (((sih)->buscorerev >= 3) && \
- ((sih)->buscorerev <= 5)))
+ ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
+ ((ai_get_buscorerev(sih) >= 3) && \
+ (ai_get_buscorerev(sih) <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
/* Initialize the PCI core.
* It's caller's responsibility to make sure that this is done only once
*/
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
- void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
{
struct pcicore_info *pi;
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
return NULL;
pi->sih = sih;
- pi->dev = pdev;
+ pi->dev = core->bus->host_pci;
+ pi->core = core;
- if (sih->buscoretype == PCIE_CORE_ID) {
+ if (core->id.id == PCIE_CORE_ID) {
u8 cap_ptr;
- pi->regs.pcieregs = regs;
cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
NULL, NULL);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- } else
- pi->regs.pciregs = regs;
-
+ }
return pi;
}
@@ -334,37 +330,37 @@ end:
/* ***** Register Access API */
static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG(&pcieregs->configaddr, offset);
- (void)R_REG((&pcieregs->configaddr));
- retval = R_REG(&pcieregs->configdata);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(configaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(configdata));
break;
case PCIE_PCIEREGS:
- W_REG(&pcieregs->pcieindaddr, offset);
- (void)R_REG(&pcieregs->pcieindaddr);
- retval = R_REG(&pcieregs->pcieinddata);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
break;
}
return retval;
}
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG((&pcieregs->configaddr), offset);
- W_REG((&pcieregs->configdata), val);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(configdata), val);
break;
case PCIE_PCIEREGS:
- W_REG((&pcieregs->pcieindaddr), offset);
- W_REG((&pcieregs->pcieinddata), val);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
break;
default:
break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
(MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
(MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
(blk << 4));
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE)
break;
udelay(1000);
@@ -404,15 +400,15 @@ static int
pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
/* enable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+ MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
- if (pi->sih->buscorerev >= 10) {
+ if (ai_get_buscorerev(pi->sih) >= 10) {
/* new serdes is slower in rw,
* using two layers of reg address mapping
*/
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
*val);
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
if (!write) {
pr28829_delay();
- *val = (R_REG(&pcieregs->mdiodata) &
+ *val = (bcma_read32(pi->core,
+ PCIEREGOFFS(mdiodata)) &
MDIODATA_MASK);
}
/* Disable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 0;
}
udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
}
/* Timed out. Disable mdio access to SERDES. */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 1;
}
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+ if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+ ai_get_buscorerev(sih) < 7)
return;
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
if (extend)
w |= PCIE_ASPMTIMER_EXTEND;
else
w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
}
/* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
pcie_clkreq(pi, 1, 0);
break;
case SI_PCIDOWN:
- if (sih->buscorerev == 6) { /* turn on serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
+ /* turn on serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0);
} else if (pi->pcie_pr42767) {
pcie_clkreq(pi, 1, 1);
}
break;
case SI_PCIUP:
- if (sih->buscorerev == 6) { /* turn off serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
+ /* turn off serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0x40);
} else if (PCIE_ASPM(sih)) { /* disable clkreq */
pcie_clkreq(pi, 1, 0);
}
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
if (pi->pcie_polarity != 0)
return;
- w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
/* Detect the current polarity at attach and force that polarity and
* disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
*/
static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
struct si_pub *sih = pi->sih;
u16 val16;
- u16 __iomem *reg16;
u32 w;
if (!PCIE_ASPM(sih))
return;
/* bypass this on QT or VSIM */
- reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
val16 &= ~SRSH_ASPM_ENB;
if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
val16 |= SRSH_ASPM_L0s_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
- reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
} else
val16 &= ~SRSH_CLKREQ_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+ val16);
}
/* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u16 val16;
- u16 __iomem *reg16;
- reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
val16 |= SRSH_L23READY_EXIT_NOPERST;
- W_REG(reg16, val16);
+ bcma_write16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
}
}
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_noplldown(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- u16 __iomem *reg16;
-
/* turn off serdes PLL down */
- ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+ ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+ CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
/* clear srom shadow backdoor */
- reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
- W_REG(reg16, 0);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
}
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_pci_setup(struct pcicore_info *pi)
{
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u32 w;
- if (sih->buscorerev == 0 || sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG);
w |= 0x8;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG, w);
}
- if (sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+ if (ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
w |= 0x40;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
}
- if (sih->buscorerev == 0) {
+ if (ai_get_buscorerev(sih) == 0) {
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
} else if (PCIE_ASPM(sih)) {
/* Change the L1 threshold for better performance */
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG);
w &= ~PCIE_L1THRESHOLDTIME_MASK;
w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG, w);
pcie_war_serdes(pi);
pcie_war_aspm_clkreq(pi);
- } else if (pi->sih->buscorerev == 7)
+ } else if (ai_get_buscorerev(pi->sih) == 7)
pcie_war_noplldown(pi);
/* Note that the fix is actually in the SROM,
* that's why this is open-ended
*/
- if (pi->sih->buscorerev >= 6)
+ if (ai_get_buscorerev(pi->sih) >= 6)
pcie_misc_config_fixup(pi);
}
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
void pcicore_hwup(struct pcicore_info *pi)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
void pcicore_up(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
/* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
void pcicore_down(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
pcie_extendL1timer(pi, false);
}
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
{
- struct si_info *sii = (struct si_info *)(pi->sih);
+ struct bcma_device *core = pi->core;
u16 val16;
- uint pciidx;
+ uint regoff;
- pciidx = ai_coreidx(&sii->pub);
- val16 = R_REG(reg16);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
- val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- W_REG(reg16, val16);
- }
-}
+ switch (pi->core->id.id) {
+ case BCMA_CORE_PCI:
+ regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
- pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
+ case BCMA_CORE_PCIE:
+ regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
- struct sbpcieregs __iomem *pcieregs)
-{
- pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+ default:
+ return;
+ }
+
+ val16 = bcma_read16(pi->core, regoff);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+ (u16)core->core_index) {
+ val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+ (val16 & ~SRSH_PI_MASK);
+ bcma_write16(pi->core, regoff, val16);
+ }
}
/* precondition: current core is pci core */
void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
{
- u32 w;
-
- OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
- OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
- w = R_REG(&pciregs->clkrun);
- W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
- w = R_REG(&pciregs->clkrun);
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_PREF | SBTOPCI_BURST);
+
+ if (pi->core->id.rev >= 11) {
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_RC_READMULTI);
+ bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+ (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80dc332..9fc3ead540a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@ struct sbpciregs;
struct sbpcieregs;
extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct pci_dev *pdev,
- void __iomem *regs);
+ struct bcma_device *core);
extern void pcicore_deinit(struct pcicore_info *pch);
extern void pcicore_attach(struct pcicore_info *pch, int state);
extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
extern void pcicore_down(struct pcicore_info *pch, int state);
extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
- struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf551561fd..f1ca1262586 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@ struct otp_fn_s {
};
struct otpinfo {
- uint ccrev; /* chipc revision */
+ struct bcma_device *core; /* chipc core */
const struct otp_fn_s *fn; /* OTP functions */
struct si_pub *sih; /* Saved sb handle */
@@ -133,9 +133,10 @@ struct otpinfo {
#define OTP_SZ_FU_144 (144/8) /* 144 bits */
static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
{
- return R_REG(&cc->sromotp[wn]);
+ return bcma_read16(oi->core,
+ CHIPCREGOFFS(sromotp[wn]));
}
/*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
{
int ret = 0;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
return ret;
}
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
{
uint k;
u32 otpp, st;
+ int ccrev = ai_get_ccrev(oi->sih);
+
/*
* record word offset of General Use Region
* for various chipcommon revs
*/
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
+ if (ccrev == 21 || ccrev == 24
+ || ccrev == 27) {
oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
+ } else if (ccrev == 36) {
/*
* OTP size greater than equal to 2KB (128 words),
* otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->otpgu_base = REVB8_OTPGU_BASE;
else
oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ } else if (ccrev == 23 || ccrev >= 25) {
oi->otpgu_base = REVB8_OTPGU_BASE;
}
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
otpp =
OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
+ bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+ for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
if (k >= OTPP_TRIES)
return;
/* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
+ oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+ || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
+ p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK) >> OTPGU_P_SHIFT;
oi->status |= (p_bits << OTPS_GUP_SHIFT);
}
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->hwlim = oi->wsize;
if (oi->status & OTPS_GUP_HW) {
oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
oi->swbase = oi->hwlim;
} else
oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
if (oi->status & OTPS_GUP_SW) {
oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
oi->fbase = oi->swlim;
} else
oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
{
- uint idx;
- struct chipcregs __iomem *cc;
-
/* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
+ if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
return -EBADE;
/* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
return -EBADE;
/* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
case 0:
/* Nothing there */
return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
}
/* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
+ _ipxotp_init(oi);
return 0;
}
static int
ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
{
- uint idx;
- struct chipcregs __iomem *cc;
uint base, i, sz;
/* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
return -EINVAL;
}
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
/* Read the data */
for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, cc, base + i);
+ data[i] = ipxotp_otpr(oi, base + i);
- ai_setcoreidx(oi->sih, idx);
*wlen = sz;
return 0;
}
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
static int otp_init(struct si_pub *sih, struct otpinfo *oi)
{
-
int ret;
memset(oi, 0, sizeof(struct otpinfo));
- oi->ccrev = sih->ccrev;
+ oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (OTPTYPE_IPX(oi->ccrev))
+ if (OTPTYPE_IPX(ai_get_ccrev(sih)))
oi->fn = &ipxotp_fn;
if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
oi->sih = sih;
- ret = (oi->fn->init) (sih, oi);
+ ret = (oi->fn->init)(sih, oi);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index a3149254cbc..264f8c4c703 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = {
{204, 5020},
{208, 5040},
{212, 5060},
- {216, 50800}
+ {216, 5080}
};
-const u8 ofdm_rate_lookup[] = {
+static const u8 ofdm_rate_lookup[] = {
BRCM_RATE_48M,
BRCM_RATE_24M,
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 dummy;
- dummy = R_REG(&pi->regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->phy_wreg = 0;
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
}
@@ -186,19 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- data = R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-
-#ifdef __ARM_ARCH_4T__
- __asm__(" .align 4 ");
- __asm__(" nop ");
- data = R_REG(&pi->regs->phy4wdatalo);
-#else
- data = R_REG(&pi->regs->phy4wdatalo);
-#endif
-
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
}
pi->phy_wreg = 0;
@@ -211,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- W_REG(&pi->regs->radioregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
- W_REG(&pi->regs->phy4wdatalo, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
}
@@ -231,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
- W_REG_FLUSH(&pi->regs->radioregaddr, 0);
- b0 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 1);
- b1 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 2);
- b2 = (u32) R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+ b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+ b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+ b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
& 0xf);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
- id = (u32) R_REG(&pi->regs->phy4wdatalo);
- id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+ id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+ id |= (u32) bcma_read16(pi->d11core,
+ D11REGOFFS(phy4wdatahi)) << 16;
}
pi->phy_wreg = 0;
return id;
@@ -283,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
void write_phy_channel_reg(struct brcms_phy *pi, uint val)
{
- W_REG(&pi->regs->phychannel, val);
+ bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
}
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
pi->phy_wreg = 0;
- return R_REG(&regs->phyregdata);
+ return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
}
void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
#ifdef CONFIG_BCM47XX
- W_REG_FLUSH(&regs->phyregaddr, addr);
- W_REG(&regs->phyregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)R_REG(&regs->phyregdata);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
#else
- W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+ bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
pi->phy_wreg = 0;
- (void)R_REG(&regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
#endif
}
void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata,
- ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+ val &= mask;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
pi->phy_wreg = 0;
}
@@ -412,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
sh->sromrev = shp->sromrev;
sh->boardtype = shp->boardtype;
sh->boardrev = shp->boardrev;
- sh->boardvendor = shp->boardvendor;
sh->boardflags = shp->boardflags;
sh->boardflags2 = shp->boardflags2;
- sh->buscorerev = shp->buscorerev;
sh->fast_timer = PHY_SW_TIMER_FAST;
sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -458,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
}
struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy)
{
struct brcms_phy *pi;
@@ -470,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (D11REV_IS(sh->corerev, 4))
sflags = SISF_2G_PHY | SISF_5G_PHY;
else
- sflags = ai_core_sflags(sh->sih, 0, 0);
+ sflags = bcma_aread32(d11core, BCMA_IOST);
if (bandtype == BRCM_BAND_5G) {
if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -488,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
- pi->regs = regs;
+ pi->d11core = d11core;
pi->sh = sh;
pi->phy_init_por = true;
pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -503,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.coreflags = SICF_GMODE;
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
- phyversion = R_REG(&pi->regs->phyversion);
+ phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->pubpi.phy_type = PHY_TYPE(phyversion);
pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -515,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
- if (!pi->pubpi.phy_type == PHY_TYPE_N &&
- !pi->pubpi.phy_type == PHY_TYPE_LCN)
+ if (pi->pubpi.phy_type != PHY_TYPE_N &&
+ pi->pubpi.phy_type != PHY_TYPE_LCN)
goto err;
if (bandtype == BRCM_BAND_5G) {
@@ -787,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
pi->radio_chanspec = chanspec;
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
return;
if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+ if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
"HW error SISF_FCLKA\n"))
return;
@@ -833,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
struct brcms_phy *pi = (struct brcms_phy *) pih;
void (*cal_init)(struct brcms_phy *) = NULL;
- if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
- "HW error: MAC enabled during phy cal\n"))
+ if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
return;
if (!pi->initialized) {
@@ -1025,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
#define DUMMY_PKT_LEN 20
- struct d11regs __iomem *regs = pi->regs;
+ struct bcma_device *core = pi->d11core;
int i, count;
u8 ofdmpkt[DUMMY_PKT_LEN] = {
0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1041,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
dummypkt);
- W_REG(&regs->xmtsel, 0);
+ bcma_write16(core, D11REGOFFS(xmtsel), 0);
if (D11REV_GE(pi->sh->corerev, 11))
- W_REG(&regs->wepctl, 0x100);
+ bcma_write16(core, D11REGOFFS(wepctl), 0x100);
else
- W_REG(&regs->wepctl, 0);
+ bcma_write16(core, D11REGOFFS(wepctl), 0);
- W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+ bcma_write16(core, D11REGOFFS(txe_phyctl),
+ (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_phyctl1, 0x1A02);
+ bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
- W_REG(&regs->txe_wm_0, 0);
- W_REG(&regs->txe_wm_1, 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
- W_REG(&regs->xmttplatetxptr, 0);
- W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+ bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+ bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
- W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+ bcma_write16(core, D11REGOFFS(xmtsel),
+ ((8 << 8) | (1 << 5) | (1 << 2) | 2));
- W_REG(&regs->txe_ctl, 0);
+ bcma_write16(core, D11REGOFFS(txe_ctl), 0);
if (!pa_on) {
if (ISNPHY(pi))
@@ -1068,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
}
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_aux, 0xD0);
+ bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
else
- W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+ bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
- (void)R_REG(&regs->txe_aux);
+ (void)bcma_read16(core, D11REGOFFS(txe_aux));
i = 0;
count = ofdm ? 30 : 250;
while ((i++ < count)
- && (R_REG(&regs->txe_status) & (1 << 7)))
+ && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
udelay(10);
i = 0;
- while ((i++ < 10)
- && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
udelay(10);
i = 0;
- while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
udelay(10);
if (!pa_on) {
@@ -1145,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (ISNPHY(pi)) {
wlc_phy_switch_radio_nphy(pi, on);
@@ -1385,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
&txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
- if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+ if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
mac_enabled = true;
if (mac_enabled)
@@ -1415,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
if (!SCAN_INPROG_PHY(pi)) {
bool suspend;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -1868,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- rxc = R_REG(&pi->regs->phyregdata);
- W_REG(&pi->regs->phyregdata,
- (0x1 << 15) | rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+ 0x1 << 15);
}
} else {
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- W_REG(&pi->regs->phyregdata, rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
}
wlc_phy_por_inform(ppi);
@@ -1999,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
pi->txpwrctrl = hwpwrctrl;
if (ISNPHY(pi)) {
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2201,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2419,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2438,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
struct phy_iq_est est[PHY_CORE_MAX];
u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2932,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
}
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x40);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x40);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x00);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x00);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x40);
}
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e15163222..e34a71e7d24 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@ struct shared_phy_params {
struct phy_shim_info *physhim;
uint unit;
uint corerev;
- uint buscorerev;
u16 vid;
u16 did;
uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
};
@@ -183,7 +181,7 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct d11regs __iomem *regs,
+ struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index bea85241a24..af00e2c2b26 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@ struct shared_phy {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
- uint buscorerev;
uint fast_timer;
uint slow_timer;
uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
} u;
bool user_txpwr_at_rfport;
- struct d11regs __iomem *regs;
+ struct bcma_device *d11core;
struct brcms_phy *next;
struct brcms_phy_pub pubpi;
@@ -774,11 +772,6 @@ struct brcms_phy {
s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ];
u8 nphy_noise_index;
- u8 nphy_txpid2g[PHY_CORE_NUM_2];
- u8 nphy_txpid5g[PHY_CORE_NUM_2];
- u8 nphy_txpid5gl[PHY_CORE_NUM_2];
- u8 nphy_txpid5gh[PHY_CORE_NUM_2];
-
bool nphy_gain_boost;
bool nphy_elna_gain_config;
u16 old_bphy_test;
@@ -1095,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
#define BRCMS_PHY_WAR_PR51571(pi) \
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
- (void)R_REG(&(pi)->regs->maccontrol)
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99d981..ce8562aa5db 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
- bool neg = 0;
+ bool neg = false;
u16 temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
- neg = 1;
+ neg = true;
temp_diff = -temp_diff;
}
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
- suspend =
- (0 ==
- (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
- MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
bool suspend;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- curval1 = R_REG(&pi->regs->psm_corectlsts);
+ curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
ptr[130] = 0;
- W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+ ((1 << 6) | curval1));
- W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
- W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
udelay(20);
- curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+ curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+ curval2 | 0x30);
write_phy_reg(pi, 0x555, 0x0);
write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
- stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
do {
udelay(10);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
timer++;
} while ((curptr != stpptr) && (timer < 500));
- W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
strptr = 0x7E00;
- W_REG(&pi->regs->tplatewrptr, strptr);
+ bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
while (strptr < 0x8000) {
- val = R_REG(&pi->regs->tplatewrdata);
+ val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
imag = ((val >> 16) & 0x3ff);
real = ((val) & 0x3ff);
if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
}
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2);
- W_REG(&pi->regs->psm_corectlsts, curval1);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
}
static void
@@ -3681,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
- phy_c23 = 1;
- phy_c22 = 0;
+ phy_c23 = true;
+ phy_c22 = false;
switch (cal_type) {
case 0:
phy_c10 = 511;
@@ -3700,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c9 = read_phy_reg(pi, 0x93d);
phy_c9 = 2 * phy_c9;
- phy_c24 = 0;
+ phy_c24 = false;
phy_c5 = 7;
- phy_c25 = 1;
+ phy_c25 = true;
while (1) {
write_radio_reg(pi, RADIO_2064_REG026,
(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
udelay(50);
- phy_c22 = 0;
+ phy_c22 = false;
ptr[130] = 0;
wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
if (ptr[130] == 1)
- phy_c22 = 1;
+ phy_c22 = true;
if (phy_c22)
phy_c5 -= 1;
if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3721,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
if (phy_c5 <= 0 || phy_c5 >= 7)
break;
phy_c24 = phy_c22;
- phy_c25 = 0;
+ phy_c25 = false;
}
if (phy_c5 < 0)
@@ -3772,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c13 = phy_c11;
phy_c14 = phy_c12;
}
- phy_c23 = 0;
+ phy_c23 = false;
}
}
- phy_c23 = 1;
+ phy_c23 = true;
phy_c15 = phy_c13;
phy_c16 = phy_c14;
phy_c7 = phy_c7 >> 1;
@@ -3965,12 +3966,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4007,14 +4008,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4075,12 +4076,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend) {
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index cd19c2f7a34..a16f1ab292f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -29,6 +29,7 @@
#include "phy_radio.h"
#include "phyreg_n.h"
#include "phytbl_n.h"
+#include "soc.h"
#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
@@ -14417,12 +14418,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
switch (band_num) {
case 0:
- pi->nphy_txpid2g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA0);
- pi->nphy_txpid2g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP2GA0);
@@ -14486,12 +14481,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 1:
- pi->nphy_txpid5g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA0);
- pi->nphy_txpid5g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
(s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
@@ -14551,12 +14540,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 2:
- pi->nphy_txpid5gl[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA0);
- pi->nphy_txpid5gl[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gl =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GLA0);
@@ -14615,12 +14598,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 3:
- pi->nphy_txpid5gh[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA0);
- pi->nphy_txpid5gh[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gh =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GHA0);
@@ -17825,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -17976,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -19470,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
u8 tx_pwr_ctrl_state;
bool do_nphy_cal = false;
uint core;
- uint origidx, intr_val;
- struct d11regs __iomem *regs;
u32 d11_clk_ctl_st;
bool do_rssi_cal = false;
@@ -19485,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
(pi->sh->chippkg == BCM4718_PKG_ID))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, chipcontrol),
+ 0x40, 0x40);
}
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
- regs = (struct d11regs __iomem *)
- ai_switch_core(pi->sh->sih,
- D11_CORE_ID, &origidx,
- &intr_val);
- d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
- AND_REG(&regs->clk_ctl_st,
- ~(CCS_FORCEHT | CCS_HTAREQ));
+ d11_clk_ctl_st = bcma_read32(pi->d11core,
+ D11REGOFFS(clk_ctl_st));
+ bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ ~(CCS_FORCEHT | CCS_HTAREQ));
- W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
- ai_restore_core(pi->sh->sih, origidx, intr_val);
+ bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ d11_clk_ctl_st);
}
pi->use_int_tx_iqlo_cal_nphy =
@@ -19908,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -21286,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(BBCFG_RESETCCA | BBCFG_RESETRX));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
} else if (!CHSPEC_IS5G(chanspec) && val) {
and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21365,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
spuravoid = 1;
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
+ si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
-
if (spuravoid == 1) {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x5341);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x5341);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
} else {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x8889);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x8889);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
}
}
@@ -21522,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
- W_REG(&pi->regs->maccontrol, mc);
+ bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
- OR_REG(&pi->regs->psm_gpio_oe, mask);
+ bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- AND_REG(&pi->regs->psm_gpio_out, ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21545,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
bool suspended = false;
if (D11REV_IS(pi->sh->corerev, 16)) {
- suspended =
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
- false : true;
+ suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) ? false : true;
if (!suspended)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
@@ -25406,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
if (pi->nphy_papd_skip == 1)
return;
- phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!phy_b3)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -27994,20 +27965,11 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
switch (chan_freq_range) {
case WL_CHAN_FREQ_RANGE_2G:
- txpi[0] = pi->nphy_txpid2g[0];
- txpi[1] = pi->nphy_txpid2g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GL:
- txpi[0] = pi->nphy_txpid5gl[0];
- txpi[1] = pi->nphy_txpid5gl[1];
- break;
case WL_CHAN_FREQ_RANGE_5GM:
- txpi[0] = pi->nphy_txpid5g[0];
- txpi[1] = pi->nphy_txpid5g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GH:
- txpi[0] = pi->nphy_txpid5gh[0];
- txpi[1] = pi->nphy_txpid5gh[1];
+ txpi[0] = 0;
+ txpi[1] = 0;
break;
default:
txpi[0] = txpi[1] = 91;
@@ -28389,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 3b36e3acfd7..4931d29d077 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -23,6 +23,7 @@
#include "pub.h"
#include "aiutils.h"
#include "pmu.h"
+#include "soc.h"
/*
* external LPO crystal frequency
@@ -114,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
uint rsrcs;
/* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
/* ??? */
@@ -138,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
*pmax = max_mask;
}
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
- u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
{
u32 tmp = 0;
+ struct bcma_device *core;
- switch (sih->chip) {
+ /* switch to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E920);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11500010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000C0C06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x0F600a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x2001E920);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
} else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11100010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000c0c06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x03000a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x200005c0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
}
tmp = 1 << 10;
break;
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100008);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0c000c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888855);
-
- tmp = 1 << 10;
- break;
-
default:
/* bail out */
return;
}
- tmp |= R_REG(&cc->pmucontrol);
- W_REG(&cc->pmucontrol, tmp);
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
}
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -219,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
return (u16) delay;
}
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
/* Read/write a chipcontrol reg */
u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+ mask, val);
}
/* Read/write a regcontrol reg */
u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, regcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+ mask, val);
}
/* Read/write a pllcontrol reg */
u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, pllcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+ mask, val);
}
/* PMU PLL update */
void si_pmu_pllupd(struct si_pub *sih)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
- PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
}
/* query alp/xtal clock frequency */
@@ -275,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
u32 clock = ALP_CLOCK;
/* bail out with default */
- if (!(sih->cccaps & CC_CAP_PMU))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
return clock;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -292,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
- struct chipcregs __iomem *cc;
- uint origidx, intr_val;
-
- /* Remember original core before switch to chipc */
- cc = (struct chipcregs __iomem *)
- ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
- /* update the pll changes */
- si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
- /* Return to original core */
- ai_restore_core(sih, origidx, intr_val);
-}
-
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (sih->pmurev == 1)
- AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
- else if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+ /* select chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
- uint origidx;
-
- /* Gate off SPROM clock and chip select signals */
- si_pmu_sprom_enable(sih, false);
-
- /* Remember original core */
- origidx = ai_coreidx(sih);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
+ if (ai_get_pmurev(sih) == 1)
+ bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+ ~PCTL_NOILP_ON_WAIT);
+ else if (ai_get_pmurev(sih) >= 2)
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
}
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 min_mask = 0, max_mask = 0;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ /* select to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
/* Determine min/max rsrc masks */
si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -390,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
/* Program max resource mask */
if (max_mask)
- W_REG(&cc->max_res_mask, max_mask);
+ bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
/* Program min resource mask */
if (min_mask)
- W_REG(&cc->min_res_mask, min_mask);
+ bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
}
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 alp_khz;
- if (sih->pmurev < 10)
+ if (ai_get_pmurev(sih) < 10)
return 0;
/* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+ if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
/*
* Enable the reg to measure the freq,
* in case it was disabled before
*/
- W_REG(&cc->pmu_xtalfreq,
- 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
/* Delay for well over 4 ILP clocks */
udelay(1000);
/* Read the latched number of ALP ticks per 4 ILP ticks */
- ilp_ctr =
- R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+ ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+ PMU_XTALFREQ_REG_ILPCTR_MASK;
/*
* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
* bit to save power
*/
- W_REG(&cc->pmu_xtalfreq, 0);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
/* Calculate ALP frequency */
alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -451,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
} else
alp_khz = 0;
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
return alp_khz;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c620640..3e39c5e0f9f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 37bb2dcc113..f0038ad7d7b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
#ifndef _BRCM_PUB_H_
#define _BRCM_PUB_H_
+#include <linux/bcma/bcma.h>
#include <brcmu_wifi.h>
#include "types.h"
#include "defs.h"
@@ -170,22 +171,6 @@ enum brcms_srom_id {
BRCMS_SROM_TSSIPOS2G,
BRCMS_SROM_TSSIPOS5G,
BRCMS_SROM_TXCHAIN,
- BRCMS_SROM_TXPID2GA0,
- BRCMS_SROM_TXPID2GA1,
- BRCMS_SROM_TXPID2GA2,
- BRCMS_SROM_TXPID2GA3,
- BRCMS_SROM_TXPID5GA0,
- BRCMS_SROM_TXPID5GA1,
- BRCMS_SROM_TXPID5GA2,
- BRCMS_SROM_TXPID5GA3,
- BRCMS_SROM_TXPID5GHA0,
- BRCMS_SROM_TXPID5GHA1,
- BRCMS_SROM_TXPID5GHA2,
- BRCMS_SROM_TXPID5GHA3,
- BRCMS_SROM_TXPID5GLA0,
- BRCMS_SROM_TXPID5GLA1,
- BRCMS_SROM_TXPID5GLA2,
- BRCMS_SROM_TXPID5GLA3,
/*
* per-path identifiers (see srom.c)
*/
@@ -225,10 +210,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA2GW2A1,
BRCMS_SROM_PA2GW2A2,
BRCMS_SROM_PA2GW2A3,
- BRCMS_SROM_PA2GW3A0,
- BRCMS_SROM_PA2GW3A1,
- BRCMS_SROM_PA2GW3A2,
- BRCMS_SROM_PA2GW3A3,
BRCMS_SROM_PA5GHW0A0,
BRCMS_SROM_PA5GHW0A1,
BRCMS_SROM_PA5GHW0A2,
@@ -241,10 +222,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GHW2A1,
BRCMS_SROM_PA5GHW2A2,
BRCMS_SROM_PA5GHW2A3,
- BRCMS_SROM_PA5GHW3A0,
- BRCMS_SROM_PA5GHW3A1,
- BRCMS_SROM_PA5GHW3A2,
- BRCMS_SROM_PA5GHW3A3,
BRCMS_SROM_PA5GLW0A0,
BRCMS_SROM_PA5GLW0A1,
BRCMS_SROM_PA5GLW0A2,
@@ -257,10 +234,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GLW2A1,
BRCMS_SROM_PA5GLW2A2,
BRCMS_SROM_PA5GLW2A3,
- BRCMS_SROM_PA5GLW3A0,
- BRCMS_SROM_PA5GLW3A1,
- BRCMS_SROM_PA5GLW3A2,
- BRCMS_SROM_PA5GLW3A3,
BRCMS_SROM_PA5GW0A0,
BRCMS_SROM_PA5GW0A1,
BRCMS_SROM_PA5GW0A2,
@@ -273,14 +246,9 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GW2A1,
BRCMS_SROM_PA5GW2A2,
BRCMS_SROM_PA5GW2A3,
- BRCMS_SROM_PA5GW3A0,
- BRCMS_SROM_PA5GW3A1,
- BRCMS_SROM_PA5GW3A2,
- BRCMS_SROM_PA5GW3A3,
};
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
-#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
/* phy types */
#define PHY_TYPE_A 0 /* Phy type A */
@@ -414,7 +382,6 @@ struct brcms_pub {
uint _nbands; /* # bands supported */
uint now; /* # elapsed seconds */
- bool promisc; /* promiscuous destination address */
bool delayed_down; /* down delayed */
bool associated; /* true:part of [I]BSS, false: not */
/* (union of stas_associated, aps_associated) */
@@ -564,15 +531,14 @@ struct brcms_antselcfg {
/* common functions for every port */
extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);
extern void brcms_c_intrson(struct brcms_c_info *wlc);
@@ -628,7 +594,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
u8 interval);
extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index e7b9dc2f273..980d578825c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -19,6 +19,7 @@
#include "types.h"
#include "d11.h"
+#include "phy_hal.h"
extern const u8 rate_info[];
extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
@@ -198,11 +199,9 @@ static inline u8 cck_rspec(u8 cck)
/* Convert encoded rate value in plcp header to numerical rates in 500 KHz
* increments */
-extern const u8 ofdm_rate_lookup[];
-
static inline u8 ofdm_phy2mac_rate(u8 rlpt)
{
- return ofdm_rate_lookup[rlpt & 0x7];
+ return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7];
}
static inline u8 cck_phy2mac_rate(u8 signal)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index 99f791048e8..61092156755 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -28,6 +28,7 @@
#include "aiutils.h"
#include "otp.h"
#include "srom.h"
+#include "soc.h"
/*
* SROM CRC8 polynomial value:
@@ -62,9 +63,6 @@
#define SROM_MACHI_ET1 42
#define SROM_MACMID_ET1 43
#define SROM_MACLO_ET1 44
-#define SROM3_MACHI 37
-#define SROM3_MACMID 38
-#define SROM3_MACLO 39
#define SROM_BXARSSI2G 40
#define SROM_BXARSSI5G 41
@@ -101,7 +99,6 @@
#define SROM_BFL 57
#define SROM_BFL2 28
-#define SROM3_BFL2 61
#define SROM_AG10 58
@@ -109,99 +106,16 @@
#define SROM_OPO 60
-#define SROM3_LEDDC 62
-
#define SROM_CRCREV 63
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
#define SROM4_WORDS 220
-#define SROM4_SIGN 32
-#define SROM4_SIGNATURE 0x5372
-
-#define SROM4_BREV 33
-
-#define SROM4_BFL0 34
-#define SROM4_BFL1 35
-#define SROM4_BFL2 36
-#define SROM4_BFL3 37
-#define SROM5_BFL0 37
-#define SROM5_BFL1 38
-#define SROM5_BFL2 39
-#define SROM5_BFL3 40
-
-#define SROM4_MACHI 38
-#define SROM4_MACMID 39
-#define SROM4_MACLO 40
-#define SROM5_MACHI 41
-#define SROM5_MACMID 42
-#define SROM5_MACLO 43
-
-#define SROM4_CCODE 41
-#define SROM4_REGREV 42
-#define SROM5_CCODE 34
-#define SROM5_REGREV 35
-
-#define SROM4_LEDBH10 43
-#define SROM4_LEDBH32 44
-#define SROM5_LEDBH10 59
-#define SROM5_LEDBH32 60
-
-#define SROM4_LEDDC 45
-#define SROM5_LEDDC 45
-
-#define SROM4_AA 46
-
-#define SROM4_AG10 47
-#define SROM4_AG32 48
-
-#define SROM4_TXPID2G 49
-#define SROM4_TXPID5G 51
-#define SROM4_TXPID5GL 53
-#define SROM4_TXPID5GH 55
-
-#define SROM4_TXRXC 61
#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_TXCHAIN_SHIFT 0
#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_RXCHAIN_SHIFT 4
#define SROM4_SWITCH_MASK 0xff00
-#define SROM4_SWITCH_SHIFT 8
/* Per-path fields */
#define MAX_PATH_SROM 4
-#define SROM4_PATH0 64
-#define SROM4_PATH1 87
-#define SROM4_PATH2 110
-#define SROM4_PATH3 133
-
-#define SROM4_2G_ITT_MAXP 0
-#define SROM4_2G_PA 1
-#define SROM4_5G_ITT_MAXP 5
-#define SROM4_5GLH_MAXP 6
-#define SROM4_5G_PA 7
-#define SROM4_5GL_PA 11
-#define SROM4_5GH_PA 15
-
-/* All the miriad power offsets */
-#define SROM4_2G_CCKPO 156
-#define SROM4_2G_OFDMPO 157
-#define SROM4_5G_OFDMPO 159
-#define SROM4_5GL_OFDMPO 161
-#define SROM4_5GH_OFDMPO 163
-#define SROM4_2G_MCSPO 165
-#define SROM4_5G_MCSPO 173
-#define SROM4_5GL_MCSPO 181
-#define SROM4_5GH_MCSPO 189
-#define SROM4_CDDPO 197
-#define SROM4_STBCPO 198
-#define SROM4_BW40PO 199
-#define SROM4_BWDUPPO 200
#define SROM4_CRCREV 219
@@ -424,103 +338,32 @@ struct brcms_varbuf {
static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
0xffff},
- {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
- SROM_BR_MASK},
- {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
{BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
{BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
{BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
{BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
{BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
- {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
- {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
- {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
{BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
{BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
{BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
{BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
{BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
- {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
- {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
- {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
- {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
{BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
{BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
{BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
{BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
{BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
{BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
- {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
{BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
- {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
- {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
{BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
- {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
- {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
- {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
- {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
{BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
{BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
{BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
{BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
- {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
- {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
- {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
- {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
- {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
- {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
- {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
- {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
- {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
- {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
- {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
- {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
- {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
{BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
{BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
{BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
@@ -534,40 +377,20 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
{BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
{BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
- {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
- {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
- {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
{BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
{BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
{BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
{BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
- {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
- {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
- {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
{BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
{BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
{BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
{BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
- {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
- {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
- {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
{BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
{BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
{BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
{BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
- {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
{BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
{BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_TXCHAIN_MASK},
- {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_RXCHAIN_MASK},
- {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_SWITCH_MASK},
{BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
SROM4_TXCHAIN_MASK},
{BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
@@ -594,43 +417,11 @@ static const struct brcms_sromvar pci_sromvars[] = {
SROM8_FEM_ANTSWLUT_MASK},
{BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
{BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
- {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
- {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
- {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
- {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
- {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
- {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
- {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
- {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
- {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
- {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
- {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
-
- {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+
{BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
{BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
- {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
- 0xffff},
- {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
- 0xffff},
{BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
0xffff},
- {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
- 0xffff},
{BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
0x01ff},
{BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
@@ -650,16 +441,7 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
0x00ff},
- {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
{BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
@@ -668,38 +450,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
{BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
{BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
{BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
@@ -732,10 +482,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
{BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
{BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
- {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
- {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
- {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
{BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
{BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
{BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
@@ -811,34 +557,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
};
static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
- {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
- {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
- {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
- {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
- {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
- {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
- {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
- {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
- {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
- {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
- 0xffff},
- {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
- {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
- 0xffff},
{BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
{BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
{BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
@@ -868,24 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
* shared between devices. */
static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-static u16 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
- if (sih->ccrev < 32)
- return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
- if (sih->cccaps & CC_CAP_SROM)
- return (u16 __iomem *)
- (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
-
- return NULL;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
static uint mask_shift(u16 mask)
{
uint i;
@@ -906,18 +606,16 @@ static uint mask_width(u16 mask)
return 0;
}
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
+static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+ while (nwords--)
+ *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
}
-static inline void htol16_buf(u16 *buf, unsigned int size)
+static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+ while (nwords--)
+ *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
}
/*
@@ -929,11 +627,14 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
struct brcms_srom_list_head *entry;
enum brcms_srom_id id;
u16 w;
- u32 val;
+ u32 val = 0;
const struct brcms_sromvar *srv;
uint width;
uint flags;
u32 sr = (1 << sromrev);
+ uint p;
+ uint pb = SROM8_PATH0;
+ const uint psz = SROM8_PATH1 - SROM8_PATH0;
/* first store the srom revision */
entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
@@ -1031,47 +732,34 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
list_add(&entry->var_list, var_list);
}
- if (sromrev >= 4) {
- /* Do per-path variables */
- uint p, pb, psz;
-
- if (sromrev >= 8) {
- pb = SROM8_PATH0;
- psz = SROM8_PATH1 - SROM8_PATH0;
- } else {
- pb = SROM4_PATH0;
- psz = SROM4_PATH1 - SROM4_PATH0;
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars;
- srv->varid != BRCMS_SROM_NULL; srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars;
+ srv->varid != BRCMS_SROM_NULL; srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
- if (srv->flags & SRFL_NOVAR)
- continue;
+ if (srv->flags & SRFL_NOVAR)
+ continue;
- w = srom[pb + srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
+ w = srom[pb + srv->off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
- /* Cheating: no per-path var is more than
- * 1 word */
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
+ /* Cheating: no per-path var is more than
+ * 1 word */
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
- entry =
- kzalloc(sizeof(struct brcms_srom_list_head),
- GFP_KERNEL);
- entry->varid = srv->varid+p;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = val;
- list_add(&entry->var_list, var_list);
- }
- pb += psz;
+ entry =
+ kzalloc(sizeof(struct brcms_srom_list_head),
+ GFP_KERNEL);
+ entry->varid = srv->varid+p;
+ entry->var_type = BRCMS_SROM_UNUMBER;
+ entry->uval = val;
+ list_add(&entry->var_list, var_list);
}
+ pb += psz;
}
}
@@ -1080,41 +768,48 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
* Return 0 on success, nonzero on error.
*/
static int
-sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
{
int err = 0;
uint i;
+ u8 *bbuf = (u8 *)buf; /* byte buffer */
+ uint nbytes = nwords << 1;
+ struct bcma_device *core;
+ uint sprom_offset;
+
+ /* determine core to read */
+ if (ai_get_ccrev(sih) < 32) {
+ core = ai_findcore(sih, BCMA_CORE_80211, 0);
+ sprom_offset = PCI_BAR0_SPROM_OFFSET;
+ } else {
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sprom_offset = CHIPCREGOFFS(sromotp);
+ }
- /* read the sprom */
- for (i = 0; i < nwords; i++)
- buf[i] = R_REG(&sprom[wordoff + i]);
-
- if (check_crc) {
-
- if (buf[0] == 0xffff)
- /*
- * The hardware thinks that an srom that starts with
- * 0xffff is blank, regardless of the rest of the
- * content, so declare it bad.
- */
- return -ENODATA;
+ /* read the sprom in bytes */
+ for (i = 0; i < nbytes; i++)
+ bbuf[i] = bcma_read8(core, sprom_offset+i);
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, nwords * 2);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
- CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE(brcms_srom_crc8_table))
- /* DBG only pci always read srom4 first, then srom8/9 */
- err = -EIO;
+ if (buf[0] == 0xffff)
+ /*
+ * The hardware thinks that an srom that starts with
+ * 0xffff is blank, regardless of the rest of the
+ * content, so declare it bad.
+ */
+ return -ENODATA;
+ if (check_crc &&
+ crc8(brcms_srom_crc8_table, bbuf, nbytes, CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE(brcms_srom_crc8_table))
+ err = -EIO;
+ else
/* now correct the endianness of the byte array */
- ltoh16_buf(buf, nwords * 2);
- }
+ le16_to_cpu_buf(buf, nwords);
+
return err;
}
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
{
u8 *otp;
uint sz = OTP_SZ_MAX / 2; /* size in words */
@@ -1126,7 +821,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
- memcpy(buf, otp, bufsz);
+ sz = min_t(uint, sz, nwords);
+ memcpy(buf, otp, sz * 2);
kfree(otp);
@@ -1139,13 +835,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
return -ENODATA;
/* fixup the endianness so crc8 will pass */
- htol16_buf(buf, bufsz);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+ cpu_to_le16_buf(buf, sz);
+ if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
err = -EIO;
-
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, bufsz);
+ else
+ /* now correct the endianness of the byte array */
+ le16_to_cpu_buf(buf, sz);
return err;
}
@@ -1154,10 +850,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
* Initialize nonvolatile variable table from sprom.
* Return 0 on success, nonzero on error.
*/
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
{
u16 *srom;
- u16 __iomem *sromwindow;
u8 sromrev = 0;
u32 sr;
int err = 0;
@@ -1169,33 +864,17 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
if (!srom)
return -ENOMEM;
- sromwindow = srom_window_address(sih, curmap);
-
crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
- true);
-
- if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
- (((sih->buscoretype == PCIE_CORE_ID)
- && (sih->buscorerev >= 6))
- || ((sih->buscoretype == PCI_CORE_ID)
- && (sih->buscorerev >= 0xe)))) {
- /* sromrev >= 4, read more */
- err = sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else if (err == 0) {
- /* srom is good and is rev < 4 */
+ err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
+
+ if (err == 0)
+ /* srom read and passed crc */
/* top word of sprom contains version and crc8 */
- sromrev = srom[SROM_CRCREV] & 0xff;
- /* bcm4401 sroms misprogrammed */
- if (sromrev == 0x10)
- sromrev = 1;
- }
+ sromrev = srom[SROM4_CRCREV] & 0xff;
} else {
/* Use OTP if SPROM not available */
- err = otp_read_pci(sih, srom, SROM_MAX);
+ err = otp_read_pci(sih, srom, SROM4_WORDS);
if (err == 0)
/* OTP only contain SROM rev8/rev9 for now */
sromrev = srom[SROM4_CRCREV] & 0xff;
@@ -1208,10 +887,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
sr = 1 << sromrev;
/*
- * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
- * 9
+ * srom version check: Current valid versions: 8, 9
*/
- if ((sr & 0x33e) == 0) {
+ if ((sr & 0x300) == 0) {
err = -EINVAL;
goto errout;
}
@@ -1238,21 +916,6 @@ void srom_free_vars(struct si_pub *sih)
kfree(entry);
}
}
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
- uint len;
-
- len = 0;
-
- if (curmap != NULL)
- return initvars_srom_pci(sih, curmap);
-
- return -EINVAL;
-}
/*
* Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index 708c43ff51c..f2a58f262c9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,15 +20,10 @@
#include "types.h"
/* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
extern void srom_free_vars(struct si_pub *sih);
extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
-/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
- * and extract from it into name=value pairs
- */
-extern int srom_parsecis(u8 **pcis, uint ciscnt,
- char **vars, uint *count);
#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b0746..e11ae83111e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@ do { \
wiphy_err(dev, "%s: " fmt, __func__, ##args); \
} while (0)
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((u32 __iomem *)(r)); \
- break; \
- } \
- __osl_v; \
- })
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- writel((u32)(v), (u32 __iomem *)(r)); \
- break; \
- } \
- } while (0)
-
#ifdef CONFIG_BCM47XX
/*
* bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
* transactions. As a fix, a read after write is performed on certain places
* in the code. Older chips and the newer 5357 family don't require this fix.
*/
-#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+ ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
#else
-#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
#endif /* CONFIG_BCM47XX */
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
/* multi-bool data type: set of bools, mbool is true if any is set */
/* set one bool */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index f27c4891082..b7537f70a79 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/module.h>
+
#include <brcmu_utils.h>
MODULE_AUTHOR("Broadcom Corporation");
@@ -40,74 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
- struct sk_buff *nskb;
- int nest = 0;
-
- /* perversion: we use skb->next to chain multi-skb packets */
- while (skb) {
- nskb = skb->next;
- skb->next = NULL;
-
- if (skb->destructor)
- /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
- * destructor exists
- */
- dev_kfree_skb_any(skb);
- else
- /* can free immediately (even in_irq()) if destructor
- * does not exist
- */
- dev_kfree_skb(skb);
-
- nest++;
- skb = nskb;
- }
+ WARN_ON(skb->next);
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
- unsigned char *buf)
-{
- uint n, ret = 0;
-
- /* skip 'offset' bytes */
- for (; p && offset; p = p->next) {
- if (offset < (uint) (p->len))
- break;
- offset -= p->len;
- }
-
- if (!p)
- return 0;
-
- /* copy the data */
- for (; p && len; p = p->next) {
- n = min((uint) (p->len) - offset, (uint) len);
- memcpy(p->data + offset, buf, n);
- buf += n;
- len -= n;
- ret += n;
- offset = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
- uint total;
-
- total = 0;
- for (; p; p = p->next)
- total += p->len;
- return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
@@ -115,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen);
struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head)
- q->tail->prev = p;
- else
- q->head = p;
-
- q->tail = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_tail(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -142,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq);
struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head == NULL)
- q->tail = p;
-
- p->prev = q->head;
- q->head = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_head(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -167,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head);
struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
-
pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq);
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev;
-
- q = &pq->q[prec];
+ struct sk_buff_head *q;
+ struct sk_buff *p;
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue_tail(q);
if (p == NULL)
return NULL;
- for (prev = NULL; p != q->tail; p = p->prev)
- prev = p;
-
- if (prev)
- prev->prev = NULL;
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
pq->len--;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@@ -222,31 +131,17 @@ void
brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev = NULL;
+ struct sk_buff_head *q;
+ struct sk_buff *p, *next;
- q = &pq->q[prec];
- p = q->head;
- while (p) {
+ q = &pq->q[prec].skblist;
+ skb_queue_walk_safe(q, p, next) {
if (fn == NULL || (*fn) (p, arg)) {
- bool head = (p == q->head);
- if (head)
- q->head = p->prev;
- else
- prev->prev = p->prev;
- p->prev = NULL;
+ skb_unlink(p, q);
brcmu_pkt_buf_free_skb(p);
- q->len--;
pq->len--;
- p = (head ? q->head : prev->prev);
- } else {
- prev = p;
- p = p->prev;
}
}
-
- if (q->head == NULL)
- q->tail = NULL;
}
EXPORT_SYMBOL(brcmu_pktq_pflush);
@@ -271,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
pq->max = (u16) max_len;
- for (prec = 0; prec < num_prec; prec++)
+ for (prec = 0; prec < num_prec; prec++) {
pq->q[prec].max = pq->max;
+ skb_queue_head_init(&pq->q[prec].skblist);
+ }
}
EXPORT_SYMBOL(brcmu_pktq_init);
@@ -284,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
+ if (!skb_queue_empty(&pq->q[prec].skblist))
break;
if (prec_out)
*prec_out = prec;
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
EXPORT_SYMBOL(brcmu_pktq_peek_tail);
@@ -303,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
- len += pq->q[prec].len;
+ len += pq->q[prec].skblist.qlen;
return len;
}
@@ -313,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen);
struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
int *prec_out)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
int prec;
if (pq->len == 0)
return NULL;
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ while ((prec = pq->hi_prec) > 0 &&
+ skb_queue_empty(&pq->q[prec].skblist))
pq->hi_prec--;
- while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ while ((prec_bmp & (1 << prec)) == 0 ||
+ skb_queue_empty(&pq->q[prec].skblist))
if (prec-- == 0)
return NULL;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
+ pq->len--;
if (prec_out)
*prec_out = prec;
- pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
@@ -364,23 +254,3 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0)
}
EXPORT_SYMBOL(brcmu_prpkt);
#endif /* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-/*
- * print bytes formatted as hex to a string. return the resulting
- * string length
- */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
- int i;
- char *p = str;
- const u8 *src = (const u8 *)bytes;
-
- for (i = 0; i < len; i++) {
- p += snprintf(p, 3, "%02X", *src);
- src++;
- }
- return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif /* defined(BCMDBG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 7d0f46e0eb9..ad249a0b473 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -65,9 +65,7 @@
#define ETHER_ADDR_STR_LEN 18
struct pktq_prec {
- struct sk_buff *head; /* first packet to dequeue */
- struct sk_buff *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
+ struct sk_buff_head skblist;
u16 max; /* maximum number of queued packets */
};
@@ -88,32 +86,32 @@ struct pktq {
static inline int pktq_plen(struct pktq *pq, int prec)
{
- return pq->q[prec].len;
+ return pq->q[prec].skblist.qlen;
}
static inline int pktq_pavail(struct pktq *pq, int prec)
{
- return pq->q[prec].max - pq->q[prec].len;
+ return pq->q[prec].max - pq->q[prec].skblist.qlen;
}
static inline bool pktq_pfull(struct pktq *pq, int prec)
{
- return pq->q[prec].len >= pq->q[prec].max;
+ return pq->q[prec].skblist.qlen >= pq->q[prec].max;
}
static inline bool pktq_pempty(struct pktq *pq, int prec)
{
- return pq->q[prec].len == 0;
+ return skb_queue_empty(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
{
- return pq->q[prec].head;
+ return skb_peek(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
{
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@@ -172,24 +170,16 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
- uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
/* ip address */
struct ipv4_addr;
+
+/* externs */
+/* format/print */
#ifdef BCMDBG
extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* BCMDBG */
-/* externs */
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc39e64..f96834a7c05 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
#include "defs.h" /* for PAD macro */
+#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field)
+
struct chipcregs {
u32 chipid; /* 0x0 */
u32 capabilities;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index 1e5f310af1e..f0d8c04a9c8 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -62,7 +62,6 @@
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
-#define WL_RADIO_MPC_DISABLE (1<<2)
/* some countries don't support any channel */
#define WL_RADIO_COUNTRY_DISABLE (1<<3)
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4fcb956ad9e..4e9b7e4827e 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -77,8 +77,9 @@
#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-/* Default component, in ai chips it maps all unused address ranges */
-#define DEF_AI_COMP 0xfff
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
+ * maps all unused address ranges
+ */
/* Common core control flags */
#define SICF_BIST_EN 0x8000
@@ -87,4 +88,11 @@
#define SICF_FGC 0x0002
#define SICF_CLOCK_EN 0x0001
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
#endif /* _BRCM_SOC_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad19511..89e9d3a78c3 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
+ "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+ 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+ PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 045a93645a3..18054d9c668 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
- snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+ strlcpy(info->driver, "hostap", sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 127e9c63bea..a0e5c21d365 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strcpy(info->bus_info, pci_name(priv->pci_dev));
+ strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 ipw2100_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 99a710dfe77..018a8deb88a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = {
#define ipw2200_bg_rates (ipw2200_rates + 0)
#define ipw2200_num_bg_rates 12
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+ (((x) <= 14) ? \
+ (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+ ((x) + 1000) * 5)
+
#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
@@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strcpy(info->bus_info, pci_name(p->pci_dev));
+ strlcpy(info->bus_info, pci_name(p->pci_dev),
+ sizeof(info->bus_info));
info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 70f5586d96b..8874588fb92 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -66,16 +66,8 @@ extern u32 libipw_debug_level;
do { if (libipw_debug_level & (level)) \
printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return (libipw_debug_level & level) && net_ratelimit();
-}
#else
#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return false;
-}
#endif /* CONFIG_LIBIPW_DEBUG */
/*
@@ -813,9 +805,6 @@ struct libipw_device {
/* WEP and other encryption related settings at the device level */
int open_wep; /* Set to 1 to allow unencrypted frames */
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes */
-
/* If the host performs {en,de}cryption, then set to 1 */
int host_encrypt;
int host_encrypt_msdu;
@@ -868,7 +857,6 @@ struct libipw_device {
struct libipw_security * sec);
netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb,
struct net_device * dev, int pri);
- int (*reset_port) (struct net_device * dev);
int (*is_queue_full) (struct net_device * dev, int pri);
int (*handle_management) (struct net_device * dev,
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 6623e505225..1571505b1a3 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -474,17 +474,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(dev, &sec);
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
return 0;
}
@@ -688,20 +677,6 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- /*
- * Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack.
- */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 00000000000..5e1a19fd354
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(il->_3945.stats.flag));
+ if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+ return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct iwl39_stats_rx_phy) * 40 +
+ sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_stats_rx_non_phy *general, *accum_general;
+ struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_3945.stats.rx.ofdm;
+ cck = &il->_3945.stats.rx.cck;
+ general = &il->_3945.stats.rx.general;
+ accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+ accum_cck = &il->_3945.accum_stats.rx.cck;
+ accum_general = &il->_3945.accum_stats.rx.general;
+ delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+ delta_cck = &il->_3945.delta_stats.rx.cck;
+ delta_general = &il->_3945.delta_stats.rx.general;
+ max_ofdm = &il->_3945.max_delta.rx.ofdm;
+ max_cck = &il->_3945.max_delta.rx.cck;
+ max_general = &il->_3945.max_delta.rx.general;
+
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_3945.stats.tx;
+ accum_tx = &il->_3945.accum_stats.tx;
+ delta_tx = &il->_3945.delta_stats.tx;
+ max_tx = &il->_3945.max_delta.tx;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_stats_general *general, *accum_general;
+ struct iwl39_stats_general *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_3945.stats.general;
+ dbg = &il->_3945.stats.general.dbg;
+ div = &il->_3945.stats.general.div;
+ accum_general = &il->_3945.accum_stats.general;
+ delta_general = &il->_3945.delta_stats.general;
+ max_general = &il->_3945.max_delta.general;
+ accum_dbg = &il->_3945.accum_stats.general.dbg;
+ delta_dbg = &il->_3945.delta_stats.general.dbg;
+ max_dbg = &il->_3945.max_delta.general.dbg;
+ accum_div = &il->_3945.accum_stats.general.div;
+ delta_div = &il->_3945.delta_stats.general.div;
+ max_div = &il->_3945.max_delta.general.div;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 00000000000..54b2d391e91
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3976 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+ .sw_crypto = 1,
+ .restart_fw = 1,
+ .disable_hw_scan = 1,
+ /* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN - Force MAIN antenna
+ * IL_ANTENNA_AUX - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ switch (il3945_mod_params.antenna) {
+ case IL_ANTENNA_DIVERSITY:
+ return 0;
+
+ case IL_ANTENNA_MAIN:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+ case IL_ANTENNA_AUX:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ }
+
+ /* bad antenna selector value */
+ IL_ERR("Bad antenna selector value (0x%x)\n",
+ il3945_mod_params.antenna);
+
+ return 0; /* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ int ret;
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+ if (sta_id == il->ctx.bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = keyconf->keyidx;
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ D_INFO("hwcrypto: modify ucode station key info\n");
+
+ ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ D_INFO("hwcrypto: clear ucode station key info\n");
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ int ret = 0;
+
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+ int ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return -EOPNOTSUPP;
+
+ IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+ return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il3945_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+
+ if (!il_is_associated(il) || !il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ unsigned int frame_size;
+ int rc;
+ u8 rate;
+
+ frame = il3945_get_free_frame(il);
+
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ rate = il_get_lowest_plcp(il, &il->ctx);
+
+ frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il3945_free_frame(il, frame);
+
+ return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+ if (il->_3945.shared_virt)
+ dma_free_coherent(&il->pci_dev->dev,
+ sizeof(struct il3945_shared),
+ il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_device_cmd *cmd,
+ struct sk_buff *skb_frag, int sta_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+ tx_cmd->sec_ctl = 0;
+
+ switch (keyinfo->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ TX_CMD_SEC_WEP | (info->control.hw_key->
+ hw_key_idx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT;
+
+ memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ info->control.hw_key->hw_key_idx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+ break;
+ }
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ __le32 tx_flags = tx_cmd->tx_flags;
+ __le16 fc = hdr->frame_control;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il3945_tx_cmd *tx_cmd;
+ struct il_tx_queue *txq = NULL;
+ struct il_queue *q = NULL;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ int txq_id = skb_get_queue_mapping(skb);
+ u16 len, idx, hdr_len;
+ u8 id;
+ u8 unicast;
+ u8 sta_id;
+ u8 tid = 0;
+ __le16 fc;
+ u8 wait_write_ptr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+ IL_INVALID_RATE) {
+ IL_ERR("ERROR: No TX rate available.\n");
+ goto drop_unlock;
+ }
+
+ unicast = !is_multicast_ether_addr(hdr->addr1);
+ id = 0;
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop;
+ }
+
+ D_RATE("station Id %d\n", sta_id);
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop;
+ }
+
+ /* Descriptor for chosen Tx queue */
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if ((il_queue_space(q) < q->high_mark))
+ goto drop;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = &il->ctx;
+
+ /* Init first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+ tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (info->control.hw_key)
+ il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+ il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ il_dbg_log_tx_data_frame(il, len, hdr);
+ il_update_stats(il, true, fc, len);
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len =
+ sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+ hdr_len;
+ len = (len + 3) & ~3;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+ /* we do not map meta data ... so we can safely access address to
+ * provide to unmap command*/
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+ 0);
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ len = skb->len - hdr_len;
+ if (len) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ len, 0, U32_PAD(len));
+ }
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ il_stop_queue(il, txq);
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+drop:
+ return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+ struct ieee80211_measurement_params *params, u8 type)
+{
+ struct il_spectrum_cmd spectrum;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SPECTRUM_MEASUREMENT,
+ .data = (void *)&spectrum,
+ .flags = CMD_WANT_SKB,
+ };
+ u32 add_time = le64_to_cpu(params->start_time);
+ int rc;
+ int spectrum_resp_status;
+ int duration = le16_to_cpu(params->duration);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (il_is_associated(il))
+ add_time =
+ il_usecs_to_beacons(il,
+ le64_to_cpu(params->start_time) -
+ il->_3945.last_tsf,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+
+ memset(&spectrum, 0, sizeof(spectrum));
+
+ spectrum.channel_count = cpu_to_le16(1);
+ spectrum.flags =
+ RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+ spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+ cmd.len = sizeof(spectrum);
+ spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+ if (il_is_associated(il))
+ spectrum.start_time =
+ il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+ else
+ spectrum.start_time = 0;
+
+ spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+ spectrum.channels[0].channel = params->channel;
+ spectrum.channels[0].type = type;
+ if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+ spectrum.flags |=
+ RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+ switch (spectrum_resp_status) {
+ case 0: /* Command will be handled */
+ if (pkt->u.spectrum.id != 0xff) {
+ D_INFO("Replaced existing measurement: %d\n",
+ pkt->u.spectrum.id);
+ il->measurement_status &= ~MEASUREMENT_READY;
+ }
+ il->measurement_status |= MEASUREMENT_ACTIVE;
+ rc = 0;
+ break;
+
+ case 1: /* Command will not be handled */
+ rc = -EAGAIN;
+ break;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ il3945_disable_events(il);
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+ D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = beacon->beacon_notify_hdr.rate;
+
+ D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ IL_WARN("Card state received: HW:%s SW:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il3945_hdl_alive;
+ il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il3945_hdl_c_stats;
+ il->handlers[N_STATS] = il3945_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+ il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+ /* Set up hardware specific Rx handlers */
+ il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il3945_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+ int write;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ write = rxq->write & ~0x7;
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7) ||
+ abs(rxq->write - rxq->read) > 7) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("Failed to allocate SKB buffer.\n");
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to allocate SKB buffer with %0x."
+ "Only %u free buffers remaining.\n",
+ priority, rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ break;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ rxb->page = page;
+ /* Get physical address of RB/SKB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+ struct il_priv *il = data;
+ unsigned long flags;
+
+ il3945_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il3945_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+ il3945_rx_allocate(il, GFP_ATOMIC);
+
+ il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/* 0 1 2 3 4 5 6 7 8 9 */
+ 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
+ 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+ 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+ 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+ 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+ 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+ 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+ 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+ 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+ 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ * (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
+ return 60;
+
+ /* 100:1 or higher, divide by 10 and use table,
+ * add 20 dB to make up for divide by 10 */
+ if (sig_ratio >= 100)
+ return 20 + (int)ratio2dB[sig_ratio / 10];
+
+ /* We shouldn't see this */
+ if (sig_ratio < 1)
+ return 0;
+
+ /* Use table for ratios 1:1 - 99:1 */
+ return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty = 0;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il3945_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode won't assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il3945_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il3945_rx_replenish_now(il);
+ else
+ il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+ switch (i) {
+ case 1:
+ return "FAIL";
+ case 2:
+ return "BAD_PARAM";
+ case 3:
+ return "BAD_CHECKSUM";
+ case 4:
+ return "NMI_INTERRUPT";
+ case 5:
+ return "SYSASSERT";
+ case 6:
+ return "FATAL_ERROR";
+ }
+
+ return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+ u32 i;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X\n", base);
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ IL_ERR("Desc Time asrtPC blink2 "
+ "ilink1 nmiPC Line\n");
+ for (i = ERROR_START_OFFSET;
+ i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+ i += ERROR_ELEM_SIZE) {
+ desc = il_read_targ_mem(il, base + i);
+ time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+ IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+ il3945_desc_lookup(desc), desc, time, blink1, blink2,
+ ilink1, ilink2, data1);
+ }
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR39_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR39_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ il_txq_update_write_ptr(il, &il->txq[0]);
+ il_txq_update_write_ptr(il, &il->txq[1]);
+ il_txq_update_write_ptr(il, &il->txq[2]);
+ il_txq_update_write_ptr(il, &il->txq[3]);
+ il_txq_update_write_ptr(il, &il->txq[4]);
+ il_txq_update_write_ptr(il, &il->txq[5]);
+
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il3945_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("Tx interrupt\n");
+ il->isr_stats.tx++;
+
+ _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+ il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+ handled |= CSR_INT_BIT_FH_TX;
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~il->inta_mask) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct il3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ scan_ch->channel = chan->hw_value;
+
+ ch_info = il_get_channel_info(il, band, scan_ch->channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
+ continue;
+ }
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* If passive , set up for auto-switch
+ * and use long active_dwell time.
+ */
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ scan_ch->type = 0; /* passive */
+ if (IL_UCODE_API(il->ucode_ver) == 1)
+ scan_ch->active_dwell =
+ cpu_to_le16(passive_dwell - 1);
+ } else {
+ scan_ch->type = 1; /* active */
+ }
+
+ /* Set direct probe bits. These may be used both for active
+ * scan channels (probes gets sent right away),
+ * or for passive channels (probes get se sent only after
+ * hearing clear Rx packet).*/
+ if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ if (n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ } else {
+ /* uCode v1 does not allow setting direct probe bits on
+ * passive channel. */
+ if ((scan_ch->type & 1) && n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ }
+
+ /* Set txpower levels to defaults */
+ scan_ch->tpc.dsp_atten = 110;
+ /* scan_pwr_info->tpc.dsp_atten; */
+
+ /*scan_pwr_info->tpc.tx_gain; */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else {
+ scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ }
+
+ D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+ (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il3945_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il3945_rates[i].plcp ==
+ 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int rc = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int rc = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+#if 0 /* Enable this if you want to see details */
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+ *image);
+#endif
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int rc = 0;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_full(il, image, len);
+
+ return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item) \
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->v1.item); \
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+ return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+ const struct il_ucode_header *ucode;
+ int ret = -EINVAL, idx;
+ const struct firmware *ucode_raw;
+ /* firmware file name contains uCode/driver compatibility version */
+ const char *name_pre = il->cfg->fw_name_pre;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ char buf[25];
+ u8 *src;
+ size_t len;
+ u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+ /* Ask kernel firmware_class module to get the boot firmware off disk.
+ * request_firmware() is synchronous, file is in memory on return. */
+ for (idx = api_max; idx >= api_min; idx--) {
+ sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+ ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+ if (ret < 0) {
+ IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+ if (ret == -ENOENT)
+ continue;
+ else
+ goto error;
+ } else {
+ if (idx < api_max)
+ IL_ERR("Loaded firmware %s, "
+ "which is deprecated. "
+ " Please use API v%u instead.\n", buf,
+ api_max);
+ D_INFO("Got firmware '%s' file "
+ "(%zd bytes) from disk\n", buf, ucode_raw->size);
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Make sure that we got at least our header! */
+ if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+ IL_ERR("File size way too small!\n");
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+ inst_size = il3945_ucode_get_inst_size(ucode);
+ data_size = il3945_ucode_get_data_size(ucode);
+ init_size = il3945_ucode_get_init_size(ucode);
+ init_data_size = il3945_ucode_get_init_data_size(ucode);
+ boot_size = il3945_ucode_get_boot_size(ucode);
+ src = il3945_ucode_get_data(ucode);
+
+ /* api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward */
+
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ il->ucode_ver = 0;
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected %u, "
+ "got %u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+ D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+ D_INFO("f/w package hdr init inst size = %u\n", init_size);
+ D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+ D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+ init_size + init_data_size + boot_size) {
+
+ D_INFO("uCode file size %zd does not match expected size\n",
+ ucode_raw->size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (inst_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ if (data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode data len %d too large to fit in\n", data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode init instr len %d too large to fit in\n",
+ init_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode init data len %d too large to fit in\n",
+ init_data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (boot_size > IL39_MAX_BSM_SIZE) {
+ D_INFO("uCode boot instr len %d too large to fit in\n",
+ boot_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (init_size && init_data_size) {
+ il->ucode_init.len = init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (boot_size) {
+ il->ucode_boot.len = boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ len = inst_size;
+ D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+ memcpy(il->ucode_code.v_addr, src, len);
+ src += len;
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /* Runtime data (2nd block)
+ * NOTE: Copy into backup buffer will be done in il3945_up() */
+ len = data_size;
+ D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+ memcpy(il->ucode_data.v_addr, src, len);
+ memcpy(il->ucode_data_backup.v_addr, src, len);
+ src += len;
+
+ /* Initialization instructions (3rd block) */
+ if (init_size) {
+ len = init_size;
+ D_INFO("Copying (but not loading) init instr len %zd\n", len);
+ memcpy(il->ucode_init.v_addr, src, len);
+ src += len;
+ }
+
+ /* Initialization data (4th block) */
+ if (init_data_size) {
+ len = init_data_size;
+ D_INFO("Copying (but not loading) init data len %zd\n", len);
+ memcpy(il->ucode_init_data.v_addr, src, len);
+ src += len;
+ }
+
+ /* Bootstrap instructions (5th block) */
+ len = boot_size;
+ D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+ memcpy(il->ucode_boot.v_addr, src, len);
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ return 0;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ ret = -ENOMEM;
+ il3945_dealloc_ucode_pci(il);
+
+err_release:
+ release_firmware(ucode_raw);
+
+error:
+ return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+
+ /* bits 31:0 for 3945 */
+ pinst = il->ucode_code.p_addr;
+ pdata = il->ucode_data_backup.p_addr;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+ /* Check alive response for "valid" sign from uCode */
+ if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il3945_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+ int thermal_spin = 0;
+ u32 rfkill;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+ D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+ if (rfkill & 0x1) {
+ clear_bit(S_RF_KILL_HW, &il->status);
+ /* if RFKILL is not on, then wait for thermal
+ * sensor in adapter to kick in */
+ while (il3945_hw_get_temperature(il) == 0) {
+ thermal_spin++;
+ udelay(10);
+ }
+
+ if (thermal_spin)
+ D_INFO("Thermal calibration took %dus\n",
+ thermal_spin * 10);
+ } else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ /* After the ALIVE response, we can send commands to 3945 uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK_3945;
+
+ il_power_update_mode(il, true);
+
+ if (il_is_associated(il)) {
+ struct il3945_rxon_cmd *active_rxon =
+ (struct il3945_rxon_cmd *)(&ctx->active);
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, ctx);
+ }
+
+ /* Configure Bluetooth device coexistence support */
+ il_send_bt_config(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il3945_commit_rxon(il, ctx);
+
+ il3945_reg_txpower_periodic(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ /* Station information will now be cleared in device */
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il3945_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il3945_init() then
+ * clear all bits but the RF Kill bits and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il3945_hw_txq_ctx_stop(il);
+ il3945_hw_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il3945_down(il);
+ mutex_unlock(&il->mutex);
+
+ il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+ int rc, i;
+
+ rc = il3945_alloc_bcast_station(il);
+ if (rc)
+ return rc;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bring up\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else {
+ set_bit(S_RF_KILL_HW, &il->status);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return -ENODEV;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ rc = il3945_hw_nic_init(il);
+ if (rc) {
+ IL_ERR("Unable to int nic\n");
+ return rc;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ /* We return success when we resume from suspend and rf_kill is on. */
+ if (test_bit(S_RF_KILL_HW, &il->status))
+ return 0;
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ rc = il->cfg->ops->lib->load_ucode(il);
+
+ if (rc) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il3945_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il3945_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change. This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, _3945.rfkill_poll.work);
+ bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+ bool new_rfkill =
+ !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (new_rfkill != old_rfkill) {
+ if (new_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+ D_RF_KILL("RF_KILL bit toggled to %s.\n",
+ new_rfkill ? "disable radio" : "enable radio");
+ }
+
+ /* Keep this running, even if radio now enabled. This will be
+ * cancelled in mac_start() if system decides to start again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il3945_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il3945_scan_cmd *scan;
+ u8 n_probes = 0;
+ enum ieee80211_band band;
+ bool is_active = false;
+ int ret;
+ u16 len;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("Fail to allocate scan memory\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+ /*
+ * suspend time format:
+ * 0-19: beacon interval in usec (time before exec.)
+ * 20-23: 0
+ * 24-31: number of beacons (suspend between channels)
+ */
+
+ extra = (suspend_time / interval) << 24;
+ scan_suspend_time =
+ 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Kicking off passive scan.\n");
+
+ /* We don't build a direct scan probe request; the uCode will do
+ * that based on the direct_mask added to each channel entry */
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ /* flags + rate selection */
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ scan->tx_cmd.rate = RATE_1M_PLCP;
+ band = IEEE80211_BAND_2GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ scan->tx_cmd.rate = RATE_6M_PLCP;
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scaning is requested but a certain channel is marked
+ * passive, we can do active scanning if we detect transmissions. For
+ * passive only scanning disable switching to active on any channel.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(len);
+
+ /* select Rx antennas */
+ scan->flags |= il3945_get_antenna_flags(il);
+
+ scan->channel_count =
+ il3945_get_channels_for_scan(il, band, is_active, n_probes,
+ (void *)&scan->data[len], vif);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il3945_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+ return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+ mutex_unlock(&il->mutex);
+ il3945_down(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il3945_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il3945_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_rx_replenish(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+ int rc = 0;
+ struct ieee80211_conf *conf = NULL;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (!ctx->vif || !il->is_open)
+ return;
+
+ D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+ ctx->vif->bss_conf.beacon_int);
+
+ if (ctx->vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (ctx->vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il3945_commit_rxon(il, ctx);
+
+ switch (ctx->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ il3945_rate_scale_init(il->hw, IL_AP_ID);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il3945_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ ctx->vif->type);
+ break;
+ }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+
+ /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+ * ucode filename and max sizes are card-specific. */
+
+ if (!il->ucode_code.len) {
+ ret = il3945_read_ucode(il);
+ if (ret) {
+ IL_ERR("Could not read microcode: %d\n", ret);
+ mutex_unlock(&il->mutex);
+ goto out_release_irq;
+ }
+ }
+
+ ret = __il3945_up(il);
+
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ goto out_release_irq;
+
+ D_INFO("Start UP work.\n");
+
+ /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ ret = -ETIMEDOUT;
+ goto out_release_irq;
+ }
+ }
+
+ /* ucode is running and will send rfkill notifications,
+ * no need to poll the killswitch state anymore */
+ cancel_delayed_work(&il->_3945.rfkill_poll);
+
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+
+out_release_irq:
+ il->is_open = 0;
+ D_MAC80211("leave - failed\n");
+ return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open) {
+ D_MAC80211("leave - skip\n");
+ return;
+ }
+
+ il->is_open = 0;
+
+ il3945_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* start polling the killswitch state again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+ D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il3945_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int rc = 0;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!(il_is_associated(il))) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - "
+ "Attempting to continue.\n");
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+ }
+ il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ int ret = 0;
+ u8 sta_id = IL_INVALID_STATION;
+ u8 static_key;
+
+ D_MAC80211("enter\n");
+
+ if (il3945_mod_params.sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !il_is_associated(il);
+
+ if (!static_key) {
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+ }
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ switch (cmd) {
+ case SET_KEY:
+ if (static_key)
+ ret = il3945_set_static_key(il, key);
+ else
+ ret = il3945_set_dynamic_key(il, key, sta_id);
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (static_key)
+ ret = il3945_remove_static_key(il);
+ else
+ ret = il3945_clear_sta_key_info(il, sta_id);
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ ret =
+ il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il3945_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but even if hw is ready, committing here breaks for some reason,
+ * we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_INFO("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+ il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+ u32 val;
+
+ val = simple_strtoul(p, &p, 10);
+ if (p == buf)
+ IL_INFO(": %s is not in decimal form.\n", buf);
+ else
+ il3945_hw_reg_set_txpower(il, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+ il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ u32 flags = simple_strtoul(buf, NULL, 0);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.flags) != flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+ ctx->staging.flags = cpu_to_le32(flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+ il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+ filter_flags);
+ ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+ il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_spectrum_notification measure_report;
+ u32 size = sizeof(measure_report), len = 0, ofs = 0;
+ u8 *data = (u8 *) &measure_report;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (!(il->measurement_status & MEASUREMENT_READY)) {
+ spin_unlock_irqrestore(&il->lock, flags);
+ return 0;
+ }
+ memcpy(&measure_report, &il->measure_report, size);
+ il->measurement_status = 0;
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ while (size && PAGE_SIZE - len) {
+ hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+ PAGE_SIZE - len, 1);
+ len = strlen(buf);
+ if (PAGE_SIZE - len)
+ buf[len++] = '\n';
+
+ ofs += 16;
+ size -= min(size, 16U);
+ }
+
+ return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_measurement_params params = {
+ .channel = le16_to_cpu(ctx->active.channel),
+ .start_time = cpu_to_le64(il->_3945.last_tsf),
+ .duration = cpu_to_le16(1),
+ };
+ u8 type = IL_MEASURE_BASIC;
+ u8 buffer[32];
+ u8 channel;
+
+ if (count) {
+ char *p = buffer;
+ strncpy(buffer, buf, min(sizeof(buffer), count));
+ channel = simple_strtoul(p, NULL, 0);
+ if (channel)
+ params.channel = channel;
+
+ p = buffer;
+ while (*p && *p != ' ')
+ p++;
+ if (*p)
+ type = simple_strtoul(p + 1, NULL, 0);
+ }
+
+ D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+ type, params.channel, buf);
+ il3945_get_measurement(il, &params, type);
+
+ return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+ il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ il->retry_rate = simple_strtoul(buf, NULL, 0);
+ if (il->retry_rate <= 0)
+ il->retry_rate = 1;
+
+ return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+ il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+ /* all this shit doesn't belong into sysfs anyway */
+ return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+ int ant;
+
+ if (count == 0)
+ return 0;
+
+ if (sscanf(buf, "%1i", &ant) != 1) {
+ D_INFO("not in hex or decimal form.\n");
+ return count;
+ }
+
+ if (ant >= 0 && ant <= 2) {
+ D_INFO("Setting antenna select to %d.\n", ant);
+ il3945_mod_params.antenna = (enum il3945_antenna)ant;
+ } else
+ D_INFO("Bad antenna select value %d.\n", ant);
+
+ return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+ il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ if (!il_is_alive(il))
+ return -EAGAIN;
+ return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+
+ if (p[0] == '1')
+ il3945_dump_nic_error_log(il);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il3945_bg_restart);
+ INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+ INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+ INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+ il_setup_scan_deferred_work(il);
+
+ il3945_hw_setup_deferred_work(il);
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il3945_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+ il3945_hw_cancel_deferred_work(il);
+
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+
+ il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+ &dev_attr_antenna.attr,
+ &dev_attr_channels.attr,
+ &dev_attr_dump_errors.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_filter_flags.attr,
+ &dev_attr_measurement.attr,
+ &dev_attr_retry_rate.attr,
+ &dev_attr_status.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+ .tx = il3945_mac_tx,
+ .start = il3945_mac_start,
+ .stop = il3945_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il3945_configure_filter,
+ .set_key = il3945_mac_set_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il3945_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+ int ret;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ il->retry_rate = 1;
+ il->beacon_skb = NULL;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+ IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+ eeprom->version);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ /* Set up txpower settings in driver for all channels */
+ if (il3945_txpower_set_from_eeprom(il)) {
+ ret = -EIO;
+ goto err_free_channel_map;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il3945_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST 200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-3945-rs";
+ hw->sta_data_size = sizeof(struct il3945_sta_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+ hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ struct il3945_eeprom *eeprom;
+ unsigned long flags;
+
+ /***********************
+ * 1. Allocating HW data
+ * ********************/
+
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ hw = il_alloc_all(cfg);
+ if (hw == NULL) {
+ pr_err("Can not allocate network device\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ /*
+ * Disabling hardware scan means that mac80211 will perform scans
+ * "the hard way", rather than using device's scan.
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ D_INFO("Disabling hw_scan\n");
+ il3945_hw_ops.hw_scan = NULL;
+ }
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /***************************
+ * 2. Initializing PCI bus
+ * *************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+
+ pci_set_drvdata(pdev, il);
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ /***********************
+ * 3. Read REV Register
+ * ********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, 0x41, 0x00);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /***********************
+ * 4. Read EEPROM
+ * ********************/
+
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ /* MAC Address location in EEPROM same for 3945/4965 */
+ eeprom = (struct il3945_eeprom *)il->eeprom;
+ D_INFO("MAC address: %pM\n", eeprom->mac_address);
+ SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+ /***********************
+ * 5. Setup HW Constants
+ * ********************/
+ /* Device-specific setup */
+ if (il3945_hw_set_hw_params(il)) {
+ IL_ERR("failed to set hw settings\n");
+ goto out_eeprom_free;
+ }
+
+ /***********************
+ * 6. Setup il
+ * ********************/
+
+ err = il3945_init_drv(il);
+ if (err) {
+ IL_ERR("initializing driver failed\n");
+ goto out_unset_hw_params;
+ }
+
+ IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+ /***********************
+ * 7. Setup Services
+ * ********************/
+
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_release_irq;
+ }
+
+ il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+ &il->ctx);
+ il3945_setup_deferred_work(il);
+ il3945_setup_handlers(il);
+ il_power_initialize(il);
+
+ /*********************************
+ * 8. Setup and Register mac80211
+ * *******************************/
+
+ il_enable_interrupts(il);
+
+ err = il3945_setup_mac(il);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ /* Start monitoring the killswitch */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+ return 0;
+
+out_remove_sysfs:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il_free_geos(il);
+ il_free_channel_map(il);
+out_unset_hw_params:
+ il3945_unset_hw_params(il);
+out_eeprom_free:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il3945_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il_down(), but there are paths to
+ * run il_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_synchronize_irq(il);
+
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+ cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+ il3945_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il3945_rx_queue_free(il, &il->rxq);
+ il3945_hw_txq_ctx_free(il);
+
+ il3945_unset_hw_params(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(pdev->irq, il);
+ pci_disable_msi(pdev);
+
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il_free_channel_map(il);
+ il_free_geos(il);
+ kfree(il->scan_cmd);
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+ .name = DRV_NAME,
+ .id_table = il3945_hw_card_ids,
+ .probe = il3945_pci_probe,
+ .remove = __devexit_p(il3945_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il3945_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il3945_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il3945_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+ pci_unregister_driver(&il3945_driver);
+ il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 00000000000..d7a83f22919
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,986 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+ 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+ s8 min_rssi;
+ u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-72, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-87, RATE_9M_IDX},
+ {-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-68, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-86, RATE_11M_IDX},
+ {-88, RATE_5M_IDX},
+ {-90, RATE_2M_IDX},
+ {-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW 62
+#define RATE_FLUSH (3*HZ)
+#define RATE_WIN_FLUSH (HZ/2)
+#define IL39_RATE_HIGH_TH 11520
+#define IL_SUCCESS_UP_TH 8960
+#define IL_SUCCESS_DOWN_TH 10880
+#define RATE_MIN_FAILURE_TH 6
+#define RATE_MIN_SUCCESS_TH 8
+#define RATE_DECREASE_TH 1920
+#define RATE_RETRY_TH 15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+ u32 idx = 0;
+ u32 table_size = 0;
+ struct il3945_tpt_entry *tpt_table = NULL;
+
+ if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+ rssi = IL_MIN_RSSI_VAL;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ tpt_table = il3945_tpt_table_g;
+ table_size = ARRAY_SIZE(il3945_tpt_table_g);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ tpt_table = il3945_tpt_table_a;
+ table_size = ARRAY_SIZE(il3945_tpt_table_a);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+ idx++;
+
+ idx = min(idx, table_size - 1);
+
+ return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = -1;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed. If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+ int unflushed = 0;
+ int i;
+ unsigned long flags;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /*
+ * For each rate, if we have collected data on that rate
+ * and it has been more than RATE_WIN_FLUSH
+ * since we flushed, clear out the gathered stats
+ */
+ for (i = 0; i < RATE_COUNT_3945; i++) {
+ if (!rs_sta->win[i].counter)
+ continue;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+ if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+ D_RATE("flushing %d samples of rate " "idx %d\n",
+ rs_sta->win[i].counter, i);
+ il3945_clear_win(&rs_sta->win[i]);
+ } else
+ unflushed++;
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+ }
+
+ return unflushed;
+}
+
+#define RATE_FLUSH_MAX 5000 /* msec */
+#define RATE_FLUSH_MIN 50 /* msec */
+#define IL_AVERAGE_PACKETS 1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+ struct il3945_rs_sta *rs_sta = (void *)data;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+ int unflushed = 0;
+ unsigned long flags;
+ u32 packet_count, duration, pps;
+
+ D_RATE("enter\n");
+
+ unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* Number of packets Rx'd since last time this timer ran */
+ packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+ rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+ if (unflushed) {
+ duration =
+ jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+ D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+ /* Determine packets per second */
+ if (duration)
+ pps = (packet_count * 1000) / duration;
+ else
+ pps = 0;
+
+ if (pps) {
+ duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+ if (duration < RATE_FLUSH_MIN)
+ duration = RATE_FLUSH_MIN;
+ else if (duration > RATE_FLUSH_MAX)
+ duration = RATE_FLUSH_MAX;
+ } else
+ duration = RATE_FLUSH_MAX;
+
+ rs_sta->flush_time = msecs_to_jiffies(duration);
+
+ D_RATE("new flush period: %d msec ave %d\n", duration,
+ packet_count);
+
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+
+ rs_sta->last_partial_flush = jiffies;
+ } else {
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->flush_pending = 0;
+ }
+ /* If there weren't any unflushed entries, we don't schedule the timer
+ * to run again */
+
+ rs_sta->last_flush = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+ struct il3945_rate_scale_data *win, int success,
+ int retries, int idx)
+{
+ unsigned long flags;
+ s32 fail_count;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ if (!retries) {
+ D_RATE("leave: retries == 0 -- should be at least 1\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ * */
+ while (retries > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+ win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame (throw away oldest history),
+ * OR in "1", and increment "success" if this
+ * frame was successful. */
+ win->data <<= 1;
+ if (success > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ success--;
+ }
+
+ retries--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt =
+ ((win->success_ratio * rs_sta->expected_tpt[idx] +
+ 64) / 128);
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct il3945_sta_priv *psta;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ D_INFO("enter\n");
+ if (sta_id == il->ctx.bcast_sta_id)
+ goto out;
+
+ psta = (struct il3945_sta_priv *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ rs_sta->il = il;
+
+ rs_sta->start_rate = RATE_INVALID;
+
+ /* default to just 802.11b */
+ rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->last_flush = jiffies;
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->last_tx_packets = 0;
+
+ rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+ rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+ for (i = 0; i < RATE_COUNT_3945; i++)
+ il3945_clear_win(&rs_sta->win[i]);
+
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ for (i = sband->n_bitrates - 1; i >= 0; i--) {
+ if (sta->supp_rates[sband->band] & (1 << i)) {
+ rs_sta->last_txrate_idx = i;
+ break;
+ }
+ }
+
+ il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
+ }
+
+out:
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+ D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il3945_rs_sta *rs_sta;
+ struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+ struct il_priv *il __maybe_unused = il_priv;
+
+ D_RATE("enter\n");
+
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
+ D_RATE("leave\n");
+
+ return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il3945_rs_sta *rs_sta = il_sta;
+
+ /*
+ * Be careful not to use any members of il3945_rs_sta (like trying
+ * to use il_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
+ del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ s8 retries = 0, current_count;
+ int scale_rate_idx, first_idx, last_idx;
+ unsigned long flags;
+ struct il_priv *il = (struct il_priv *)il_rate;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ retries = info->status.rates[0].count;
+ /* Sanity Check for retries */
+ if (retries > RATE_RETRY_TH)
+ retries = RATE_RETRY_TH;
+
+ first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+ if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+ D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+ return;
+ }
+
+ if (!il_sta) {
+ D_RATE("leave: No STA il data to update!\n");
+ return;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->il) {
+ D_RATE("leave: STA il data uninitialized!\n");
+ return;
+ }
+
+ rs_sta->tx_packets++;
+
+ scale_rate_idx = first_idx;
+ last_idx = first_idx;
+
+ /*
+ * Update the win for each rate. We determine which rates
+ * were Tx'd based on the total number of retries vs. the number
+ * of retries configured for each rate -- currently set to the
+ * il value 'retry_rate' vs. rate specific
+ *
+ * On exit from this while loop last_idx indicates the rate
+ * at which the frame was finally transmitted (or failed if no
+ * ACK)
+ */
+ while (retries > 1) {
+ if ((retries - 1) < il->retry_rate) {
+ current_count = (retries - 1);
+ last_idx = scale_rate_idx;
+ } else {
+ current_count = il->retry_rate;
+ last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+ }
+
+ /* Update this rate accounting for as many retries
+ * as was used for it (per current_count) */
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+ current_count, scale_rate_idx);
+ D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+ current_count);
+
+ retries -= current_count;
+
+ scale_rate_idx = last_idx;
+ }
+
+ /* Update the last idx win with success/failure based on ACK */
+ D_RATE("Update rate %d with %s.\n", last_idx,
+ (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+ info->flags & IEEE80211_TX_STAT_ACK, 1,
+ last_idx);
+
+ /* We updated the rate scale win -- if its been more than
+ * flush_time since the last run, schedule the flush
+ * again */
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ if (!rs_sta->flush_pending &&
+ time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->flush_pending = 1;
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+ enum ieee80211_band band)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /* 802.11A walks to the next literal adjacent rate in
+ * the rate table */
+ if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ if (rs_sta->tgg)
+ low = il3945_rates[low].prev_rs_tgg;
+ else
+ low = il3945_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ if (rs_sta->tgg)
+ high = il3945_rates[high].next_rs_tgg;
+ else
+ high = il3945_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct sk_buff *skb = txrc->skb;
+ u8 low = RATE_INVALID;
+ u8 high = RATE_INVALID;
+ u16 high_low;
+ int idx;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct il3945_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ unsigned long flags;
+ u16 rate_mask;
+ s8 max_rate_idx = -1;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->il) {
+ D_RATE("Rate scaling information not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ rate_mask = sta->supp_rates[sband->band];
+
+ /* get user max rate if set */
+ max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+ max_rate_idx = -1;
+
+ idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* for recent assoc, choose best rate regarding
+ * to rssi value
+ */
+ if (rs_sta->start_rate != RATE_INVALID) {
+ if (rs_sta->start_rate < idx &&
+ (rate_mask & (1 << rs_sta->start_rate)))
+ idx = rs_sta->start_rate;
+ rs_sta->start_rate = RATE_INVALID;
+ }
+
+ /* force user max rate if set by user */
+ if (max_rate_idx != -1 && max_rate_idx < idx) {
+ if (rate_mask & (1 << max_rate_idx))
+ idx = max_rate_idx;
+ }
+
+ win = &(rs_sta->win[idx]);
+
+ fail_count = win->counter - win->success_counter;
+
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("Invalid average_tpt on rate %d: "
+ "counter: %d, success_counter: %d, "
+ "expected_tpt is %sNULL\n", idx, win->counter,
+ win->success_counter,
+ rs_sta->expected_tpt ? "not " : "");
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+ goto out;
+
+ }
+
+ current_tpt = win->average_tpt;
+
+ high_low =
+ il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (max_rate_idx != -1 && max_rate_idx < high)
+ high = RATE_INVALID;
+
+ /* Collect Measured throughputs of adjacent rates */
+ if (low != RATE_INVALID)
+ low_tpt = rs_sta->win[low].average_tpt;
+
+ if (high != RATE_INVALID)
+ high_tpt = rs_sta->win[high].average_tpt;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ scale_action = 0;
+
+ /* Low success ratio , need to drop the rate */
+ if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates,
+ * try increase */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+
+ /* Both adjacent throughputs are measured, but neither one has
+ * better throughput; we're using the best rate, don't change
+ * it! */
+ } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+ && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+ D_RATE("No action -- low [%d] & high [%d] < "
+ "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+ scale_action = 0;
+
+ /* At least one of the rates has better throughput */
+ } else {
+ if (high_tpt != IL_INVALID_VALUE) {
+
+ /* High rate has better throughput, Increase
+ * rate */
+ if (high_tpt > current_tpt &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else {
+ D_RATE("decrease rate because of high tpt\n");
+ scale_action = 0;
+ }
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (win->success_ratio >= RATE_INCREASE_TH) {
+ /* Lower rate has better
+ * throughput,decrease rate */
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (win->success_ratio > RATE_HIGH_TH ||
+ current_tpt > 100 * rs_sta->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrese rate */
+ if (low != RATE_INVALID)
+ idx = low;
+ break;
+ case 1:
+ /* Increase rate */
+ if (high != RATE_INVALID)
+ idx = high;
+
+ break;
+ case 0:
+ default:
+ /* No change */
+ break;
+ }
+
+ D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+ low, high);
+
+out:
+
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+ idx = IL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
+
+ D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int j;
+ ssize_t ret;
+ struct il3945_rs_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc +=
+ sprintf(buff + desc,
+ "tx packets=%d last rate idx=%d\n"
+ "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+ lq_sta->last_txrate_idx, lq_sta->start_rate,
+ jiffies_to_msecs(lq_sta->flush_time));
+ for (j = 0; j < RATE_COUNT_3945; j++) {
+ desc +=
+ sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+ lq_sta->win[j].counter,
+ lq_sta->win[j].success_counter,
+ lq_sta->win[j].success_ratio);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il3945_sta_dbgfs_stats_table_read,
+ .open = il3945_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = il3945_rs_tx_status,
+ .get_rate = il3945_rs_get_rate,
+ .rate_init = il3945_rs_rate_init_stub,
+ .alloc = il3945_rs_alloc,
+ .free = il3945_rs_free,
+ .alloc_sta = il3945_rs_alloc_sta,
+ .free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il3945_add_debugfs,
+ .remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+ struct il_priv *il = hw->priv;
+ s32 rssi = 0;
+ unsigned long flags;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_sta *sta;
+ struct il3945_sta_priv *psta;
+
+ D_RATE("enter\n");
+
+ rcu_read_lock();
+
+ sta =
+ ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+ if (!sta) {
+ D_RATE("Unable to find station to initialize rate scaling.\n");
+ rcu_read_unlock();
+ return;
+ }
+
+ psta = (void *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ rs_sta->tgg = 0;
+ switch (il->band) {
+ case IEEE80211_BAND_2GHZ:
+ /* TODO: this always does G, not a regression */
+ if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+ rs_sta->tgg = 1;
+ rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+ } else
+ rs_sta->expected_tpt = il3945_expected_tpt_g;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rs_sta->expected_tpt = il3945_expected_tpt_a;
+ break;
+ case IEEE80211_NUM_BANDS:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ rssi = il->_3945.last_rx_rssi;
+ if (rssi == 0)
+ rssi = IL_MIN_RSSI_VAL;
+
+ D_RATE("Network RSSI: %d\n", rssi);
+
+ rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+ D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+ rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+ rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 00000000000..1489b1573a6
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2743 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+
+ return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+ .cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX, \
+ RATE_##r##M_IDX_TBL, \
+ RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ * rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+ IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+ u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+ if (rate == RATE_INVALID)
+ rate = rate_idx;
+ return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ * bitmap in SRAM. Bit position corresponds to Event # (id/type).
+ * Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging. This function is just a placeholder as-is,
+ * you'll need to provide the special bits! ...
+ * ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+ int i;
+ u32 base; /* SRAM address of event log header */
+ u32 disable_ptr; /* SRAM address of event-disable bitmap array */
+ u32 array_size; /* # of u32 entries in array */
+ static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+ 0x00000000, /* 31 - 0 Event id numbers */
+ 0x00000000, /* 63 - 32 */
+ 0x00000000, /* 95 - 64 */
+ 0x00000000, /* 127 - 96 */
+ 0x00000000, /* 159 - 128 */
+ 0x00000000, /* 191 - 160 */
+ 0x00000000, /* 223 - 192 */
+ 0x00000000, /* 255 - 224 */
+ 0x00000000, /* 287 - 256 */
+ 0x00000000, /* 319 - 288 */
+ 0x00000000, /* 351 - 320 */
+ 0x00000000, /* 383 - 352 */
+ 0x00000000, /* 415 - 384 */
+ 0x00000000, /* 447 - 416 */
+ 0x00000000, /* 479 - 448 */
+ 0x00000000, /* 511 - 480 */
+ 0x00000000, /* 543 - 512 */
+ 0x00000000, /* 575 - 544 */
+ 0x00000000, /* 607 - 576 */
+ 0x00000000, /* 639 - 608 */
+ 0x00000000, /* 671 - 640 */
+ 0x00000000, /* 703 - 672 */
+ 0x00000000, /* 735 - 704 */
+ 0x00000000, /* 767 - 736 */
+ 0x00000000, /* 799 - 768 */
+ 0x00000000, /* 831 - 800 */
+ 0x00000000, /* 863 - 832 */
+ 0x00000000, /* 895 - 864 */
+ 0x00000000, /* 927 - 896 */
+ 0x00000000, /* 959 - 928 */
+ 0x00000000, /* 991 - 960 */
+ 0x00000000, /* 1023 - 992 */
+ 0x00000000, /* 1055 - 1024 */
+ 0x00000000, /* 1087 - 1056 */
+ 0x00000000, /* 1119 - 1088 */
+ 0x00000000, /* 1151 - 1120 */
+ 0x00000000, /* 1183 - 1152 */
+ 0x00000000, /* 1215 - 1184 */
+ 0x00000000, /* 1247 - 1216 */
+ 0x00000000, /* 1279 - 1248 */
+ 0x00000000, /* 1311 - 1280 */
+ 0x00000000, /* 1343 - 1312 */
+ 0x00000000, /* 1375 - 1344 */
+ 0x00000000, /* 1407 - 1376 */
+ 0x00000000, /* 1439 - 1408 */
+ 0x00000000, /* 1471 - 1440 */
+ 0x00000000, /* 1503 - 1472 */
+ };
+
+ base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Invalid event log pointer 0x%08X\n", base);
+ return;
+ }
+
+ disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+ array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+ if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+ D_INFO("Disabling selected uCode log events at 0x%x\n",
+ disable_ptr);
+ for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+ il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+ evt_disable[i]);
+
+ } else {
+ D_INFO("Selected uCode log events may be disabled\n");
+ D_INFO(" by writing \"1\"s into disable bitmap\n");
+ D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+ array_size);
+ }
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+ int idx;
+
+ for (idx = 0; idx < RATE_COUNT_3945; idx++)
+ if (il3945_rates[idx].plcp == plcp)
+ return idx;
+ return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ switch (status & TX_STATUS_MSK) {
+ case TX_3945_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_ENTRY(SHORT_LIMIT);
+ TX_STATUS_ENTRY(LONG_LIMIT);
+ TX_STATUS_ENTRY(FIFO_UNDERRUN);
+ TX_STATUS_ENTRY(MGMNT_ABORT);
+ TX_STATUS_ENTRY(NEXT_FRAG);
+ TX_STATUS_ENTRY(LIFE_EXPIRE);
+ TX_STATUS_ENTRY(DEST_PS);
+ TX_STATUS_ENTRY(ABORTED);
+ TX_STATUS_ENTRY(BT_RETRY);
+ TX_STATUS_ENTRY(STA_INVALID);
+ TX_STATUS_ENTRY(FRAG_DROPPED);
+ TX_STATUS_ENTRY(TID_DISABLE);
+ TX_STATUS_ENTRY(FRAME_FLUSHED);
+ TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+ TX_STATUS_ENTRY(TX_LOCKED);
+ TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+ int next_rate = il3945_get_prev_ieee_rate(rate);
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ if (rate == RATE_12M_IDX)
+ next_rate = RATE_9M_IDX;
+ else if (rate == RATE_6M_IDX)
+ next_rate = RATE_6M_IDX;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+ if (rate == RATE_11M_IDX)
+ next_rate = RATE_5M_IDX;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+
+ BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+ tx_info->skb = NULL;
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+
+ if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+ txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+ il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->status);
+ int rate_idx;
+ int fail;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ ieee80211_tx_info_clear_status(info);
+
+ /* Fill the MRR chain with some info about on-chip retransmissions */
+ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+
+ fail = tx_resp->failure_frame;
+
+ info->status.rates[0].idx = rate_idx;
+ info->status.rates[0].count = fail + 1; /* add final attempt */
+
+ /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+ info->flags |=
+ ((status & TX_STATUS_MSK) ==
+ TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+ D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+ il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+ tx_resp->failure_frame);
+
+ D_TX_REPLY("Tx queue reclaim %d\n", idx);
+ il3945_tx_queue_reclaim(il, txq_id, idx);
+
+ if (status & TX_ABORT_REQUIRED_MSK)
+ IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ * RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *) &il->_3945.stats;
+ accum_stats = (u32 *) &il->_3945.accum_stats;
+ delta = (u32 *) &il->_3945.delta_stats;
+ max_delta = (u32 *) &il->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ il->_3945.accum_stats.general.temperature =
+ il->_3945.stats.general.temperature;
+ il->_3945.accum_stats.general.ttl_timestamp =
+ il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il3945_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+ memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *) &pkt->u.raw;
+
+ if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_3945.accum_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.delta_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.max_delta, 0,
+ sizeof(struct il3945_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+ /* Filter incoming packets to determine if they are targeted toward
+ * this network, discarding packets coming from ourselves */
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr3, il->bssid);
+ case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr2, il->bssid);
+ default:
+ return 1;
+ }
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 len = le16_to_cpu(rx_hdr->len);
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We received data from the HW, so stop the watchdog */
+ if (unlikely
+ (len + IL39_RX_FRAME_SIZE >
+ PAGE_SIZE << il->hw_params.rx_page_order)) {
+ D_DROP("Corruption detected!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ if (!il3945_mod_params.sw_crypto)
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ le32_to_cpu(rx_end->status), stats);
+
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused =
+ le16_to_cpu(rx_stats->noise_diff);
+ u8 network_packet;
+
+ rx_status.flag = 0;
+ rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+ rx_status.band =
+ (rx_hdr->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
+
+ rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+ if (rx_status.band == IEEE80211_BAND_5GHZ)
+ rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+ rx_status.antenna =
+ (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ 4;
+
+ /* set the preamble flag if appropriate */
+ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if ((unlikely(rx_stats->phy_count > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ rx_stats->phy_count);
+ return;
+ }
+
+ if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+ return;
+ }
+
+ /* Convert 3945's rssi indicator to dBm */
+ rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+ D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+ rx_stats_sig_avg, rx_stats_noise_diff);
+
+ header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+ network_packet = il3945_is_network_packet(il, header);
+
+ D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+ network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+ rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+ il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+ if (network_packet) {
+ il->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ il->_3945.last_rx_rssi = rx_status.signal;
+ }
+
+ il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ int count;
+ struct il_queue *q;
+ struct il3945_tfd *tfd, *tfd_tmp;
+
+ q = &txq->q;
+ tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+ if (count >= NUM_TFD_CHUNKS || count < 0) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ NUM_TFD_CHUNKS);
+ return -EINVAL;
+ }
+
+ tfd->tbs[count].addr = cpu_to_le32(addr);
+ tfd->tbs[count].len = cpu_to_le32(len);
+
+ count++;
+
+ tfd->control_flags =
+ cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ int idx = txq->q.read_ptr;
+ struct il3945_tfd *tfd = &tfd_tmp[idx];
+ struct pci_dev *dev = il->pci_dev;
+ int i;
+ int counter;
+
+ /* sanity check */
+ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+ if (counter > NUM_TFD_CHUNKS) {
+ IL_ERR("Too many chunks: %i\n", counter);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (counter)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_TODEVICE);
+
+ /* unmap chunks if any */
+
+ for (i = 1; i < counter; i++)
+ pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+ le32_to_cpu(tfd->tbs[i].len),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id)
+{
+ u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+ u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
+ u16 rate_mask;
+ int rate;
+ const u8 rts_retry_limit = 7;
+ u8 data_retry_limit;
+ __le32 tx_flags;
+ __le16 fc = hdr->frame_control;
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+ rate = il3945_rates[rate_idx].plcp;
+ tx_flags = tx_cmd->tx_flags;
+
+ /* We need to figure out how to get the sta->supp_rates while
+ * in this running context */
+ rate_mask = RATES_MASK_3945;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ tx_cmd->rate = rate;
+ tx_cmd->tx_flags = tx_flags;
+
+ /* OFDM */
+ tx_cmd->supp_rates[0] =
+ ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ /* CCK */
+ tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+ D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+ "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+ le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+ tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+ unsigned long flags_spin;
+ struct il_station_entry *station;
+
+ if (sta_id == IL_INVALID_STATION)
+ return IL_INVALID_STATION;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ station = &il->stations[sta_id];
+
+ station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+ station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+ station->sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &station->sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+ return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN,
+ CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+ }
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+ il_wr(il, FH39_RCSR_WPTR(0), 0);
+ il_wr(il, FH39_RCSR_CONFIG(0),
+ FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+ FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+ <<
+ FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+ FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+ /* fake read to flush all prev I/O */
+ il_rd(il, FH39_RSSR_CTRL);
+
+ return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+ /* bypass mode */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+ /* RA 0 is active */
+ il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+ /* all 6 fifo are active */
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+ il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+ il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+ il_wr(il, FH39_TSSR_MSG_CONFIG,
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+ return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+ int rc;
+ int txq_id, slots_num;
+
+ il3945_hw_txq_ctx_free(il);
+
+ /* allocate tx queue structure */
+ rc = il_alloc_txq_mem(il);
+ if (rc)
+ return rc;
+
+ /* Tx CMD queue */
+ rc = il3945_tx_reset(il);
+ if (rc)
+ goto error;
+
+ /* Tx queue(s) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (rc) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ il3945_hw_txq_ctx_free(il);
+ return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+ int ret = il_apm_init(il);
+
+ /* Clear APMG (NIC's internal power management) interrupts */
+ il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+ il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+ /* Reset radio chip */
+ il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+ return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ unsigned long flags;
+ u8 rev_id = il->pci_dev->revision;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Determine HW type */
+ D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+ if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+ D_INFO("RTP type\n");
+ else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+ D_INFO("3945 RADIO-MB type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+ } else {
+ D_INFO("3945 RADIO-MM type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+ }
+
+ if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+ D_INFO("SKU OP mode is mrc\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+ } else
+ D_INFO("SKU OP mode is basic\n");
+
+ if ((eeprom->board_revision & 0xF0) == 0xD0) {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ } else {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ }
+
+ if (eeprom->almgor_m_version <= 1) {
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+ D_INFO("Card M type A version is 0x%X\n",
+ eeprom->almgor_m_version);
+ } else {
+ D_INFO("Card M type B version is 0x%X\n",
+ eeprom->almgor_m_version);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+ D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+ D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+ int rc;
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ rc = il_rx_queue_alloc(il);
+ if (rc) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il3945_rx_queue_reset(il, rxq);
+
+ il3945_rx_replenish(il);
+
+ il3945_rx_init(il, rxq);
+
+ /* Look at using this instead:
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+ */
+
+ il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+ rc = il3945_txq_ctx_reset(il);
+ if (rc)
+ return rc;
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq)
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IL39_CMD_QUEUE_NUM)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+ int txq_id;
+
+ /* stop SCD */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+ /* reset TFD queues */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+ il_poll_bit(il, FH39_TSSR_TX_STATUS,
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ 1000);
+ }
+
+ il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+ return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+ return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+ return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int temperature;
+
+ temperature = il3945_hw_get_temperature(il);
+
+ /* driver's okay range is -260 to +25.
+ * human readable okay range is 0 to +285 */
+ D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+ /* handle insane temp reading */
+ if (il3945_hw_reg_temp_out_of_range(temperature)) {
+ IL_ERR("Error bad temperature value %d\n", temperature);
+
+ /* if really really hot(?),
+ * substitute the 3rd band/group's temp measured at factory */
+ if (il->last_temperature > 100)
+ temperature = eeprom->groups[2].temperature;
+ else /* else use most recent "sane" value from driver */
+ temperature = il->last_temperature;
+ }
+
+ return temperature; /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER 6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ * (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d,\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Same temp,\n");
+ else
+ D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+ /* if we don't need calibration, *don't* update last_temperature */
+ if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+ D_POWER("Timed thermal calib not needed\n");
+ return 0;
+ }
+
+ D_POWER("Timed thermal calib needed\n");
+
+ /* assume that caller will actually do calib ...
+ * update the "last temperature" value */
+ il->last_temperature = il->temperature;
+ return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+ {
+ {251, 127}, /* 2.4 GHz, highest power */
+ {251, 127},
+ {251, 127},
+ {251, 127},
+ {251, 125},
+ {251, 110},
+ {251, 105},
+ {251, 98},
+ {187, 125},
+ {187, 115},
+ {187, 108},
+ {187, 99},
+ {243, 119},
+ {243, 111},
+ {243, 105},
+ {243, 97},
+ {243, 92},
+ {211, 106},
+ {211, 100},
+ {179, 120},
+ {179, 113},
+ {179, 107},
+ {147, 125},
+ {147, 119},
+ {147, 112},
+ {147, 106},
+ {147, 101},
+ {147, 97},
+ {147, 91},
+ {115, 107},
+ {235, 121},
+ {235, 115},
+ {235, 109},
+ {203, 127},
+ {203, 121},
+ {203, 115},
+ {203, 108},
+ {203, 102},
+ {203, 96},
+ {203, 92},
+ {171, 110},
+ {171, 104},
+ {171, 98},
+ {139, 116},
+ {227, 125},
+ {227, 119},
+ {227, 113},
+ {227, 107},
+ {227, 101},
+ {227, 96},
+ {195, 113},
+ {195, 106},
+ {195, 102},
+ {195, 95},
+ {163, 113},
+ {163, 106},
+ {163, 102},
+ {163, 95},
+ {131, 113},
+ {131, 106},
+ {131, 102},
+ {131, 95},
+ {99, 113},
+ {99, 106},
+ {99, 102},
+ {99, 95},
+ {67, 113},
+ {67, 106},
+ {67, 102},
+ {67, 95},
+ {35, 113},
+ {35, 106},
+ {35, 102},
+ {35, 95},
+ {3, 113},
+ {3, 106},
+ {3, 102},
+ {3, 95} /* 2.4 GHz, lowest power */
+ },
+ {
+ {251, 127}, /* 5.x GHz, highest power */
+ {251, 120},
+ {251, 114},
+ {219, 119},
+ {219, 101},
+ {187, 113},
+ {187, 102},
+ {155, 114},
+ {155, 103},
+ {123, 117},
+ {123, 107},
+ {123, 99},
+ {123, 92},
+ {91, 108},
+ {59, 125},
+ {59, 118},
+ {59, 109},
+ {59, 102},
+ {59, 96},
+ {59, 90},
+ {27, 104},
+ {27, 98},
+ {27, 92},
+ {115, 118},
+ {115, 111},
+ {115, 104},
+ {83, 126},
+ {83, 121},
+ {83, 113},
+ {83, 105},
+ {83, 99},
+ {51, 118},
+ {51, 111},
+ {51, 104},
+ {51, 98},
+ {19, 116},
+ {19, 109},
+ {19, 102},
+ {19, 98},
+ {19, 93},
+ {171, 113},
+ {171, 107},
+ {171, 99},
+ {139, 120},
+ {139, 113},
+ {139, 107},
+ {139, 99},
+ {107, 120},
+ {107, 113},
+ {107, 107},
+ {107, 99},
+ {75, 120},
+ {75, 113},
+ {75, 107},
+ {75, 99},
+ {43, 120},
+ {43, 113},
+ {43, 107},
+ {43, 99},
+ {11, 120},
+ {11, 113},
+ {11, 107},
+ {11, 99},
+ {131, 107},
+ {131, 99},
+ {99, 120},
+ {99, 113},
+ {99, 107},
+ {99, 99},
+ {67, 120},
+ {67, 113},
+ {67, 107},
+ {67, 99},
+ {35, 120},
+ {35, 113},
+ {35, 107},
+ {35, 99},
+ {3, 120} /* 5.x GHz, lowest power */
+ }
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+ if (idx < 0)
+ return 0;
+ if (idx >= IL_MAX_GAIN_ENTRIES)
+ return IL_MAX_GAIN_ENTRIES - 1;
+ return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+ const s8 *clip_pwrs,
+ struct il_channel_info *ch_info, int band_idx)
+{
+ struct il3945_scan_power_info *scan_power_info;
+ s8 power;
+ u8 power_idx;
+
+ scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+ /* use this channel group's 6Mbit clipping/saturation pwr,
+ * but cap at regulatory scan power restriction (set during init
+ * based on eeprom channel data) for this channel. */
+ power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+ power = min(power, il->tx_power_user_lmt);
+ scan_power_info->requested_power = power;
+
+ /* find difference between new scan *power* and current "normal"
+ * Tx *power* for 6Mb. Use this difference (x2) to adjust the
+ * current "normal" temperature-compensated Tx power *idx* for
+ * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+ * *idx*. */
+ power_idx =
+ ch_info->power_info[rate_idx].power_table_idx - (power -
+ ch_info->
+ power_info
+ [RATE_6M_IDX_TBL].
+ requested_power) *
+ 2;
+
+ /* store reference idx that we use when adjusting *all* scan
+ * powers. So we can accommodate user (all channel) or spectrum
+ * management (single channel) power changes "between" temperature
+ * feedback compensation procedures.
+ * don't force fit this reference idx into gain table; it may be a
+ * negative number. This will help avoid errors when we're at
+ * the lower bounds (highest gains, for warmest temperatures)
+ * of the table. */
+
+ /* don't exceed table bounds for "real" setting */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ scan_power_info->power_table_idx = power_idx;
+ scan_power_info->tpc.tx_gain =
+ power_gain_table[band_idx][power_idx].tx_gain;
+ scan_power_info->tpc.dsp_atten =
+ power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+ int rate_idx, i;
+ const struct il_channel_info *ch_info = NULL;
+ struct il3945_txpowertable_cmd txpower = {
+ .channel = il->ctx.active.channel,
+ };
+ u16 chan;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ chan = le16_to_cpu(il->ctx.active.channel);
+
+ txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ ch_info = il_get_channel_info(il, il->band, chan);
+ if (!ch_info) {
+ IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+ il->band);
+ return -EINVAL;
+ }
+
+ if (!il_is_channel_valid(ch_info)) {
+ D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+ return 0;
+ }
+
+ /* fill cmd with power settings for all rates for current channel */
+ /* Fill OFDM rate */
+ for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+ rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+ /* Fill CCK rates */
+ for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+ rate_idx++, i++) {
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+
+ return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+ sizeof(struct il3945_txpowertable_cmd),
+ &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update. Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ * properly fill out the scan powers, and actual h/w gain settings,
+ * and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+ struct il3945_channel_power_info *power_info;
+ int power_changed = 0;
+ int i;
+ const s8 *clip_pwrs;
+ int power;
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* Get this channel's rate-to-current-power settings table */
+ power_info = ch_info->power_info;
+
+ /* update OFDM Txpower settings */
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+ int delta_idx;
+
+ /* limit new power to be no more than h/w capability */
+ power = min(ch_info->curr_txpow, clip_pwrs[i]);
+ if (power == power_info->requested_power)
+ continue;
+
+ /* find difference between old and new requested powers,
+ * update base (non-temp-compensated) power idx */
+ delta_idx = (power - power_info->requested_power) * 2;
+ power_info->base_power_idx -= delta_idx;
+
+ /* save new requested power value */
+ power_info->requested_power = power;
+
+ power_changed = 1;
+ }
+
+ /* update CCK Txpower settings, based on OFDM 12M setting ...
+ * ... all CCK power settings for a given channel are the *same*. */
+ if (power_changed) {
+ power =
+ ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+ IL_CCK_FROM_OFDM_POWER_DIFF;
+
+ /* do all CCK rates' il3945_channel_power_info structures */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+ power_info->requested_power = power;
+ power_info->base_power_idx =
+ ch_info->power_info[RATE_12M_IDX_TBL].
+ base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ ++power_info;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ * based strictly on regulatory (eeprom and spectrum mgt) limitations
+ * (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+ s8 max_power;
+
+#if 0
+ /* if we're using TGd limits, use lower of TGd or EEPROM */
+ if (ch_info->tgd_data.max_power != 0)
+ max_power =
+ min(ch_info->tgd_data.max_power,
+ ch_info->eeprom.max_power_avg);
+
+ /* else just use EEPROM limits */
+ else
+#endif
+ max_power = ch_info->eeprom.max_power_avg;
+
+ return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ * and the factory calibration temperatures, and bases the new settings
+ * on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
+ u8 a_band;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ u8 i;
+ int ref_temp;
+ int temperature = il->temperature;
+
+ if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
+ /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* Get this chnlgrp's factory calibration temperature */
+ ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+ /* get power idx adjustment based on current and factory
+ * temps */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+ /* set tx power value for all rates, OFDM and CCK */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+ int power_idx =
+ ch_info->power_info[rate_idx].base_power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within table range */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+ ch_info->power_info[rate_idx].power_table_idx =
+ (u8) power_idx;
+ ch_info->power_info[rate_idx].tpc =
+ power_gain_table[a_band][power_idx];
+ }
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ /* send Txpower command for current channel to ucode */
+ return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+ struct il_channel_info *ch_info;
+ s8 max_power;
+ u8 a_band;
+ u8 i;
+
+ if (il->tx_power_user_lmt == power) {
+ D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+ power);
+ return 0;
+ }
+
+ D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+ il->tx_power_user_lmt = power;
+
+ /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* find minimum power of all user and regulatory constraints
+ * (does not consider h/w clipping limitations) */
+ max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+ max_power = min(power, max_power);
+ if (max_power != ch_info->curr_txpow) {
+ ch_info->curr_txpow = max_power;
+
+ /* this considers the h/w clipping limitations */
+ il3945_hw_reg_set_new_power(il, ch_info);
+ }
+ }
+
+ /* update txpower settings for all channels,
+ * send to NIC if associated. */
+ il3945_is_temp_calib_needed(il);
+ il3945_hw_reg_comp_txpower_temp(il);
+
+ return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int rc = 0;
+ struct il_rx_pkt *pkt;
+ struct il3945_rxon_assoc_cmd rxon_assoc;
+ struct il_host_cmd cmd = {
+ .id = C_RXON_ASSOC,
+ .len = sizeof(rxon_assoc),
+ .flags = CMD_WANT_SKB,
+ .data = &rxon_assoc,
+ };
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_RXON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+ int rc = 0;
+ bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ if (!il_is_alive(il))
+ return -1;
+
+ /* always get timestamp with Rx frame */
+ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+ /* select antenna */
+ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+ staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+ rc = il_check_rxon_cmd(il, ctx);
+ if (rc) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il3945_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, &il->ctx)) {
+ rc = il_send_rxon_assoc(il, &il->ctx);
+ if (rc) {
+ IL_ERR("Error setting RXON_ASSOC "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated(il) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ active_rxon->reserved4 = 0;
+ active_rxon->reserved5 = 0;
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ &il->ctx.active);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (rc) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK on current "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ staging_rxon->reserved4 = 0;
+ staging_rxon->reserved5 = 0;
+
+ il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+ /* Apply the new configuration */
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ staging_rxon);
+ if (rc) {
+ IL_ERR("Error setting new configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ if (!new_assoc) {
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ rc = il_set_tx_power(il, il->tx_power_next, true);
+ if (rc) {
+ IL_ERR("Error setting Tx power (%d).\n", rc);
+ return rc;
+ }
+
+ /* Init the hardware's rate fallback order based on the band */
+ rc = il3945_init_hw_rate_table(il);
+ if (rc) {
+ IL_ERR("Error setting HW rate table: %02X\n", rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic - called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ * -- correct coeffs for temp (can reset temp timer)
+ * -- save this temp as "last",
+ * -- send new set of gain settings to NIC
+ * NOTE: This should continue working, even when we're not associated,
+ * so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+ /* This will kick in the "brute force"
+ * il3945_hw_reg_comp_txpower_temp() below */
+ if (!il3945_is_temp_calib_needed(il))
+ goto reschedule;
+
+ /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+ * This is based *only* on current temperature,
+ * ignoring any previous power measurements */
+ il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+ queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+ REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ _3945.thermal_periodic.work);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il3945_reg_txpower_periodic(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ * These channel groups are based on factory-tested channels;
+ * on A-band, EEPROM's "group frequency" entries represent the top
+ * channel in each group 1-4. Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+ const struct il_channel_info *ch_info)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+ u8 group;
+ u16 group_idx = 0; /* based on factory calib frequencies */
+ u8 grp_channel;
+
+ /* Find the group idx for the channel ... don't use idx 1(?) */
+ if (il_is_channel_a_band(ch_info)) {
+ for (group = 1; group < 5; group++) {
+ grp_channel = ch_grp[group].group_channel;
+ if (ch_info->channel <= grp_channel) {
+ group_idx = group;
+ break;
+ }
+ }
+ /* group 4 has a few channels *above* its factory cal freq */
+ if (group == 5)
+ group_idx = 4;
+ } else
+ group_idx = 0; /* 2.4 GHz, group 0 */
+
+ D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+ return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ * into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+ s32 setting_idx, s32 *new_idx)
+{
+ const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ s32 idx0, idx1;
+ s32 power = 2 * requested_power;
+ s32 i;
+ const struct il3945_eeprom_txpower_sample *samples;
+ s32 gains0, gains1;
+ s32 res;
+ s32 denominator;
+
+ chnl_grp = &eeprom->groups[setting_idx];
+ samples = chnl_grp->samples;
+ for (i = 0; i < 5; i++) {
+ if (power == samples[i].power) {
+ *new_idx = samples[i].gain_idx;
+ return 0;
+ }
+ }
+
+ if (power > samples[1].power) {
+ idx0 = 0;
+ idx1 = 1;
+ } else if (power > samples[2].power) {
+ idx0 = 1;
+ idx1 = 2;
+ } else if (power > samples[3].power) {
+ idx0 = 2;
+ idx1 = 3;
+ } else {
+ idx0 = 3;
+ idx1 = 4;
+ }
+
+ denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+ if (denominator == 0)
+ return -EINVAL;
+ gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+ gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+ res =
+ gains0 + (gains1 - gains0) * ((s32) power -
+ (s32) samples[idx0].power) /
+ denominator + (1 << 18);
+ *new_idx = res >> 19;
+ return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+ u32 i;
+ s32 rate_idx;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ const struct il3945_eeprom_txpower_group *group;
+
+ D_POWER("Initializing factory calib info from EEPROM\n");
+
+ for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+ s8 *clip_pwrs; /* table of power levels for each rate */
+ s8 satur_pwr; /* saturation power for each chnl group */
+ group = &eeprom->groups[i];
+
+ /* sanity check on factory saturation power value */
+ if (group->saturation_power < 40) {
+ IL_WARN("Error: saturation power is %d, "
+ "less than minimum expected 40\n",
+ group->saturation_power);
+ return;
+ }
+
+ /*
+ * Derive requested power levels for each rate, based on
+ * hardware capabilities (saturation power for band).
+ * Basic value is 3dB down from saturation, with further
+ * power reductions for highest 3 data rates. These
+ * backoffs provide headroom for high rate modulation
+ * power peaks, without too much distortion (clipping).
+ */
+ /* we'll fill in this array with h/w max power levels */
+ clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+ /* divide factory saturation power by 2 to find -3dB level */
+ satur_pwr = (s8) (group->saturation_power >> 1);
+
+ /* fill in channel group's nominal powers for each rate */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+ rate_idx++, clip_pwrs++) {
+ switch (rate_idx) {
+ case RATE_36M_IDX_TBL:
+ if (i == 0) /* B/G */
+ *clip_pwrs = satur_pwr;
+ else /* A */
+ *clip_pwrs = satur_pwr - 5;
+ break;
+ case RATE_48M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 7;
+ else
+ *clip_pwrs = satur_pwr - 10;
+ break;
+ case RATE_54M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 9;
+ else
+ *clip_pwrs = satur_pwr - 12;
+ break;
+ default:
+ *clip_pwrs = satur_pwr;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_channel_power_info *pwr_info;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ const s8 *clip_pwrs; /* array of power levels for each rate */
+ u8 gain, dsp_atten;
+ s8 power;
+ u8 pwr_idx, base_pwr_idx, a_band;
+ u8 i;
+ int temperature;
+
+ /* save temperature reference,
+ * so we can determine next time to calibrate */
+ temperature = il3945_hw_reg_txpower_get_temperature(il);
+ il->last_temperature = temperature;
+
+ il3945_hw_reg_init_channel_groups(il);
+
+ /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+ i++, ch_info++) {
+ a_band = il_is_channel_a_band(ch_info);
+ if (!il_is_channel_valid(ch_info))
+ continue;
+
+ /* find this channel's channel group (*not* "band") idx */
+ ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+ /* Get this chnlgrp's rate->max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* calculate power idx *adjustment* value according to
+ * diff between current temperature and factory temperature */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature,
+ eeprom->groups[ch_info->
+ group_idx].
+ temperature);
+
+ D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+ delta_idx, temperature + IL_TEMP_CONVERT);
+
+ /* set tx power value for all OFDM rates */
+ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+ s32 uninitialized_var(power_idx);
+ int rc;
+
+ /* use channel group's clip-power table,
+ * but don't exceed channel's max power */
+ s8 pwr = min(ch_info->max_power_avg,
+ clip_pwrs[rate_idx]);
+
+ pwr_info = &ch_info->power_info[rate_idx];
+
+ /* get base (i.e. at factory-measured temperature)
+ * power table idx for this rate's power */
+ rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+ ch_info->
+ group_idx,
+ &power_idx);
+ if (rc) {
+ IL_ERR("Invalid power idx\n");
+ return rc;
+ }
+ pwr_info->base_power_idx = (u8) power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within range of gain table */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ /* fill 1 OFDM rate's il3945_channel_power_info struct */
+ pwr_info->requested_power = pwr;
+ pwr_info->power_table_idx = (u8) power_idx;
+ pwr_info->tpc.tx_gain =
+ power_gain_table[a_band][power_idx].tx_gain;
+ pwr_info->tpc.dsp_atten =
+ power_gain_table[a_band][power_idx].dsp_atten;
+ }
+
+ /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+ pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+ power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+ pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ base_pwr_idx =
+ pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+ /* stay within table range */
+ pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+ gain = power_gain_table[a_band][pwr_idx].tx_gain;
+ dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+ /* fill each CCK rate's il3945_channel_power_info structure
+ * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
+ * NOTE: CCK rates start at end of OFDM rates! */
+ for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+ pwr_info =
+ &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+ pwr_info->requested_power = power;
+ pwr_info->power_table_idx = pwr_idx;
+ pwr_info->base_power_idx = base_pwr_idx;
+ pwr_info->tpc.tx_gain = gain;
+ pwr_info->tpc.dsp_atten = dsp_atten;
+ }
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+ int rc;
+
+ il_wr(il, FH39_RCSR_CONFIG(0), 0);
+ rc = il_poll_bit(il, FH39_RSSR_STATUS,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ if (rc < 0)
+ IL_ERR("Can't stop Rx DMA.\n");
+
+ return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+ shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+ il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+ il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+ il_wr(il, FH39_TCSR_CONFIG(txq_id),
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+ FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+ /* fake read to flush all prev. writes */
+ _il_rd(il, FH39_TSSR_CBB_BASE);
+
+ return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return sizeof(struct il3945_rxon_cmd);
+ case C_POWER_TBL:
+ return sizeof(struct il3945_powertable_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cpu_to_le16(0);
+ addsta->rate_n_flags = cmd->rate_n_flags;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+ return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int ret;
+ u8 sta_id;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret =
+ il3945_add_bssid_station(il, vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+ (il->band ==
+ IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ RATE_1M_PLCP);
+ il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+ int rc, i, idx, prev_idx;
+ struct il3945_rate_scaling_cmd rate_cmd = {
+ .reserved = {0, 0, 0},
+ };
+ struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+ for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+ idx = il3945_rates[i].table_rs_idx;
+
+ table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
+ table[idx].try_cnt = il->retry_rate;
+ prev_idx = il3945_get_prev_ieee_rate(i);
+ table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+ }
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ D_RATE("Select A mode rate scale\n");
+ /* If one of the following CCK rates is used,
+ * have it fall back to the 6M OFDM rate */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+ /* Don't fall back to CCK rates */
+ table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+ /* Don't drop out of OFDM rates */
+ table[RATE_6M_IDX_TBL].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+ break;
+
+ case IEEE80211_BAND_2GHZ:
+ D_RATE("Select B/G mode rate scale\n");
+ /* If an OFDM rate is used, have it fall back to the
+ * 1M CCK rates */
+
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+
+ idx = IL_FIRST_CCK_RATE;
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[idx].table_rs_idx;
+
+ idx = RATE_11M_IDX_TBL;
+ /* CCK shouldn't fall back to OFDM... */
+ table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Update the rate scaling for control frame Tx */
+ rate_cmd.table_id = 0;
+ rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+ if (rc)
+ return rc;
+
+ /* Update the rate scaling for data frame Tx */
+ rate_cmd.table_id = 1;
+ return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+ memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+ il->_3945.shared_virt =
+ dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+ &il->_3945.shared_phys, GFP_KERNEL);
+ if (!il->_3945.shared_virt) {
+ IL_ERR("failed to allocate pci memory\n");
+ return -ENOMEM;
+ }
+
+ /* Assign number of Usable TX queues */
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+ il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ il->hw_params.max_stations = IL3945_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+ il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+ il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+ u8 rate)
+{
+ struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+ unsigned int frame_size;
+
+ tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ frame_size =
+ il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+ BUG_ON(frame_size > MAX_MPDU_SIZE);
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+ tx_beacon_cmd->tx.rate = rate;
+ tx_beacon_cmd->tx.tx_flags =
+ (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+ /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+ tx_beacon_cmd->tx.supp_rates[0] =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+ return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+ il->handlers[C_TX] = il3945_hdl_tx;
+ il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+ INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+ il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism. Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+ return;
+}
+
+ /**
+ * il3945_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int rc;
+ int i;
+ u32 done;
+ u32 reg_offset;
+
+ D_INFO("Begin load bsm\n");
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL39_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+ * NOTE: il3945_initialize_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache. */
+ pinst = il->ucode_init.p_addr;
+ pdata = il->ucode_init_data.p_addr;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ rc = il3945_verify_bsm(il);
+ if (rc)
+ return rc;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+ .rxon_assoc = il3945_send_rxon_assoc,
+ .commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+ .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il3945_hw_txq_free_tfd,
+ .txq_init = il3945_hw_tx_queue_init,
+ .load_ucode = il3945_load_bsm,
+ .dump_nic_error_log = il3945_dump_nic_error_log,
+ .apm_ops = {
+ .init = il3945_apm_init,
+ .config = il3945_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+ .release_semaphore = il3945_eeprom_release_semaphore,
+ },
+ .send_tx_power = il3945_send_tx_power,
+ .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il3945_ucode_rx_stats_read,
+ .tx_stats_read = il3945_ucode_tx_stats_read,
+ .general_stats_read = il3945_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+ .post_associate = il3945_post_associate,
+ .config_ap = il3945_config_ap,
+ .manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+ .get_hcmd_size = il3945_get_hcmd_size,
+ .build_addsta_hcmd = il3945_build_addsta_hcmd,
+ .request_scan = il3945_request_scan,
+ .post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+ .lib = &il3945_lib,
+ .hcmd = &il3945_hcmd,
+ .utils = &il3945_hcmd_utils,
+ .led = &il3945_led_ops,
+ .legacy = &il3945_legacy_ops,
+ .ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+ .name = "3945BG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+ .name = "3945ABG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+ {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 00000000000..9f42f79f877
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE "iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+ u64 data;
+ s32 success_counter;
+ s32 success_ratio;
+ s32 counter;
+ s32 average_tpt;
+ unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+ spinlock_t lock;
+ struct il_priv *il;
+ s32 *expected_tpt;
+ unsigned long last_partial_flush;
+ unsigned long last_flush;
+ u32 flush_time;
+ u32 last_tx_packets;
+ u32 tx_packets;
+ u8 tgg;
+ u8 flush_pending;
+ u8 start_rate;
+ struct timer_list rate_scale_flush;
+ struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+ struct il_station_priv_common common;
+ struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+ IL_ANTENNA_DIVERSITY,
+ IL_ANTENNA_MAIN,
+ IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+#define IL_TX_FIFO_AC0 0
+#define IL_TX_FIFO_AC1 1
+#define IL_TX_FIFO_AC2 2
+#define IL_TX_FIFO_AC3 3
+#define IL_TX_FIFO_HCCA_1 5
+#define IL_TX_FIFO_HCCA_2 6
+#define IL_TX_FIFO_NONE 7
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il3945_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+#define STA_PS_STATUS_WAKE 0
+#define STA_PS_STATUS_SLEEP 1
+
+struct il3945_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+ x->u.rx_frame.stats.payload + \
+ x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+ IL_RX_HDR(x)->payload + \
+ le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr,
+ int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+ char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE: The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_ <-- Called from work queue context
+ * il3945_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+ struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset,
+ u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame,
+ u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE: This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET 95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ * to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ * (PA) output voltage detector. This same detector is used during Tx of
+ * long packets in normal operation to provide feedback as to proper output
+ * level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+ u8 gain_idx; /* idx into power (gain) setup table ... */
+ s8 power; /* ... for this pwr level for this chnl group */
+ u16 v_det; /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ * to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+ struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
+ s32 a, b, c, d, e; /* coefficients for voltage->power
+ * formula (signed) */
+ s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+ * frequency (signed) */
+ s8 saturation_power; /* highest power possible by h/w in this
+ * band */
+ u8 group_channel; /* "representative" channel # in this band */
+ s16 temperature; /* h/w temperature at factory calib this band
+ * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ * difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+ u32 Ta;
+ u32 Tb;
+ u32 Tc;
+ u32 Td;
+ u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+ u8 reserved0[16];
+ u16 device_id; /* abs.ofs: 16 */
+ u8 reserved1[2];
+ u16 pmc; /* abs.ofs: 20 */
+ u8 reserved2[20];
+ u8 mac_address[6]; /* abs.ofs: 42 */
+ u8 reserved3[58];
+ u16 board_revision; /* abs.ofs: 106 */
+ u8 reserved4[11];
+ u8 board_pba_number[9]; /* abs.ofs: 119 */
+ u8 reserved5[8];
+ u16 version; /* abs.ofs: 136 */
+ u8 sku_cap; /* abs.ofs: 138 */
+ u8 leds_mode; /* abs.ofs: 139 */
+ u16 oem_mode;
+ u16 wowlan_mode; /* abs.ofs: 142 */
+ u16 leds_time_interval; /* abs.ofs: 144 */
+ u8 leds_off_time; /* abs.ofs: 146 */
+ u8 leds_on_time; /* abs.ofs: 147 */
+ u8 almgor_m_version; /* abs.ofs: 148 */
+ u8 antenna_switch_type; /* abs.ofs: 149 */
+ u8 reserved6[42];
+ u8 sku_id[4]; /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+ u16 band_1_count; /* abs.ofs: 196 */
+ struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+ u16 band_2_count; /* abs.ofs: 226 */
+ struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+ u16 band_3_count; /* abs.ofs: 254 */
+ struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+ u16 band_4_count; /* abs.ofs: 280 */
+ struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+ u16 band_5_count; /* abs.ofs: 304 */
+ struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
+
+ u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+ struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+ struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
+ u8 reserved16[172]; /* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
+#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES 5
+#define IL39_CMD_QUEUE_NUM 4
+
+#define IL_DEFAULT_TX_RETRY 15
+
+/*********************************************/
+
+#define RFD_SIZE 4
+#define NUM_TFD_CHUNKS 4
+
+#define TFD_CTL_COUNT_SET(n) (n << 24)
+#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n) (n << 28)
+#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND (0x000000)
+#define IL39_RTC_INST_UPPER_BOUND (0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+ IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+ IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+ addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+ __le32 tx_base_ptr[8];
+} __packed;
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND (0x0800)
+#define FH39_MEM_UPPER_BOUND (0x1000)
+
+#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
+ ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+ (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+ FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+struct il3945_tfd_tb {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct il3945_tfd {
+ __le32 control_flags;
+ struct il3945_tfd_tb tbs[4];
+ u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e686..d3248e3ef23 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
+#include "common.h"
+#include "4965.h"
/*****************************************************************************
* INIT calibrations framework
*****************************************************************************/
-struct statistics_general_data {
+struct stats_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-void iwl4965_calib_free_results(struct iwl_priv *priv)
+void
+il4965_calib_free_results(struct il_priv *il)
{
int i;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ for (i = 0; i < IL_CALIB_MAX; i++) {
+ kfree(il->calib_results[i].buf);
+ il->calib_results[i].buf = NULL;
+ il->calib_results[i].buf_len = 0;
}
}
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
* enough to receive all of our own network traffic, but not so
* high that our DSP gets too busy trying to lock onto non-network
* activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time,
- struct statistics_general_data *rx_info)
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+ struct stats_general_data *rx_info)
{
u32 max_nrg_cck = 0;
int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
data->nrg_auto_corr_silence_diff = 0;
/* Find max silence rssi among all 3 receivers.
* This is background noise, which may include transmissions from other
* networks, measured during silence before our network's beacon */
- silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
- ALL_BAND_FILTER) >> 8);
+ silence_rssi_a =
+ (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+ silence_rssi_b =
+ (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+ silence_rssi_c =
+ (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
val = max(silence_rssi_b, silence_rssi_c);
max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
val = data->nrg_silence_rssi[i];
silence_ref = max(silence_ref, val);
}
- IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
- silence_rssi_a, silence_rssi_b, silence_rssi_c,
- silence_ref);
+ D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+ silence_rssi_b, silence_rssi_c, silence_ref);
/* Find max rx energy (min value!) among all 3 receivers,
* measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
max_nrg_cck += 6;
- IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
- rx_info->beacon_energy_a, rx_info->beacon_energy_b,
- rx_info->beacon_energy_c, max_nrg_cck - 6);
+ D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
/* Count number of consecutive beacons with fewer-than-desired
* false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
data->num_in_cck_no_fa++;
else
data->num_in_cck_no_fa = 0;
- IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
- data->num_in_cck_no_fa);
+ D_CALIB("consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
/* If we got too many false alarms this time, reduce sensitivity */
- if ((false_alarms > max_false_alarms) &&
- (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
- false_alarms, max_false_alarms);
- IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
- data->nrg_curr_state = IWL_FA_TOO_MANY;
+ if (false_alarms > max_false_alarms &&
+ data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+ D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+ max_false_alarms);
+ D_CALIB("... reducing sensitivity\n");
+ data->nrg_curr_state = IL_FA_TOO_MANY;
/* Store for "fewer than desired" on later beacon */
data->nrg_silence_ref = silence_ref;
/* increase energy threshold (reduce nrg value)
* to decrease sensitivity */
data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
- /* Else if we got fewer than desired, increase sensitivity */
+ /* Else if we got fewer than desired, increase sensitivity */
} else if (false_alarms < min_false_alarms) {
- data->nrg_curr_state = IWL_FA_TOO_FEW;
+ data->nrg_curr_state = IL_FA_TOO_FEW;
/* Compare silence level with silence level for most recent
* healthy number or too many false alarms */
- data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
- (s32)silence_ref;
+ data->nrg_auto_corr_silence_diff =
+ (s32) data->nrg_silence_ref - (s32) silence_ref;
- IWL_DEBUG_CALIB(priv,
- "norm FA %u < min FA %u, silence diff %d\n",
- false_alarms, min_false_alarms,
- data->nrg_auto_corr_silence_diff);
+ D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
/* Increase value to increase sensitivity, but only if:
* 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* from a previous beacon with too many, or healthy # FAs
* OR 2) We've seen a lot of beacons (100) with too few
* false alarms */
- if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
- IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ D_CALIB("... increasing sensitivity\n");
/* Increase nrg value to increase sensitivity */
val = data->nrg_th_cck + NRG_STEP_CCK;
- data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
} else {
- IWL_DEBUG_CALIB(priv,
- "... but not changing sensitivity\n");
+ D_CALIB("... but not changing sensitivity\n");
}
- /* Else we got a healthy number of false alarms, keep status quo */
+ /* Else we got a healthy number of false alarms, keep status quo */
} else {
- IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
- data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+ D_CALIB(" FA in safe zone\n");
+ data->nrg_curr_state = IL_FA_GOOD_RANGE;
/* Store for use in "fewer than desired" with later beacon */
data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
/* If previous beacon had too many false alarms,
* give it some extra margin by reducing sensitivity again
* (but don't go below measured energy of desired Rx) */
- if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
- IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+ D_CALIB("... increasing margin\n");
if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
data->nrg_th_cck -= NRG_MARGIN;
else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* Lower value is higher energy, so we use max()!
*/
data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
- IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+ D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
data->nrg_prev_state = data->nrg_curr_state;
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
else {
val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
data->auto_corr_cck =
- min((u32)ranges->auto_corr_max_cck, val);
+ min((u32) ranges->auto_corr_max_cck, val);
}
val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- min((u32)ranges->auto_corr_max_cck_mrc, val);
- } else if ((false_alarms < min_false_alarms) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ min((u32) ranges->auto_corr_max_cck_mrc, val);
+ } else if (false_alarms < min_false_alarms &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
/* Decrease auto_corr values to increase sensitivity */
val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
- data->auto_corr_cck =
- max((u32)ranges->auto_corr_min_cck, val);
+ data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- max((u32)ranges->auto_corr_min_cck_mrc, val);
+ max((u32) ranges->auto_corr_min_cck_mrc, val);
}
return 0;
}
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time)
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
{
u32 val;
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
/* If we got too many false alarms this time, reduce sensitivity */
if (false_alarms > max_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
- false_alarms, max_false_alarms);
+ D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+ max_false_alarms);
val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- min((u32)ranges->auto_corr_max_ofdm, val);
+ min((u32) ranges->auto_corr_max_ofdm, val);
val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- min((u32)ranges->auto_corr_max_ofdm_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
}
/* Else if we got fewer than desired, increase sensitivity */
else if (false_alarms < min_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
- false_alarms, min_false_alarms);
+ D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+ min_false_alarms);
val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- max((u32)ranges->auto_corr_min_ofdm, val);
+ max((u32) ranges->auto_corr_min_ofdm, val);
val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- max((u32)ranges->auto_corr_min_ofdm_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
} else {
- IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
- min_false_alarms, false_alarms, max_false_alarms);
+ D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
}
return 0;
}
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
- struct iwl_sensitivity_data *data,
- __le16 *tbl)
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+ struct il_sensitivity_data *data,
+ __le16 *tbl)
{
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm);
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_x1);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck);
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
- tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_cck);
- tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_ofdm);
-
- tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
- cpu_to_le16(data->barker_corr_th_min);
- tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16(data->barker_corr_th_min_mrc);
- tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
- cpu_to_le16(data->nrg_th_cca);
-
- IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
- data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
- data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
- data->nrg_th_ofdm);
-
- IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
- data->auto_corr_cck, data->auto_corr_cck_mrc,
- data->nrg_th_cck);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+ D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+ data->auto_corr_cck_mrc, data->nrg_th_cck);
}
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
{
- struct iwl_sensitivity_cmd cmd;
- struct iwl_sensitivity_data *data = NULL;
- struct iwl_host_cmd cmd_out = {
- .id = SENSITIVITY_CMD,
- .len = sizeof(struct iwl_sensitivity_cmd),
+ struct il_sensitivity_cmd cmd;
+ struct il_sensitivity_data *data = NULL;
+ struct il_host_cmd cmd_out = {
+ .id = C_SENSITIVITY,
+ .len = sizeof(struct il_sensitivity_cmd),
.flags = CMD_ASYNC,
.data = &cmd,
};
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
memset(&cmd, 0, sizeof(cmd));
- iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+ il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
/* Update uCode's "work" table, and copy it to DSP */
- cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+ cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
/* Don't send command to uCode if nothing has changed */
- if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
- sizeof(u16)*HD_TABLE_SIZE)) {
- IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ if (!memcmp
+ (&cmd.table[0], &(il->sensitivity_tbl[0]),
+ sizeof(u16) * HD_TBL_SIZE)) {
+ D_CALIB("No change in C_SENSITIVITY\n");
return 0;
}
/* Copy table for comparison next time */
- memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
- sizeof(u16)*HD_TABLE_SIZE);
+ memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16) * HD_TBL_SIZE);
- return iwl_legacy_send_cmd(priv, &cmd_out);
+ return il_send_cmd(il, &cmd_out);
}
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
+void
+il4965_init_sensitivity(struct il_priv *il)
{
int ret = 0;
int i;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+ D_CALIB("Start il4965_init_sensitivity\n");
/* Clear driver's sensitivity algo data */
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
if (ranges == NULL)
return;
- memset(data, 0, sizeof(struct iwl_sensitivity_data));
+ memset(data, 0, sizeof(struct il_sensitivity_data));
data->num_in_cck_no_fa = 0;
- data->nrg_curr_state = IWL_FA_TOO_MANY;
- data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_curr_state = IL_FA_TOO_MANY;
+ data->nrg_prev_state = IL_FA_TOO_MANY;
data->nrg_silence_ref = 0;
data->nrg_silence_idx = 0;
data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
data->nrg_silence_rssi[i] = 0;
- data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
- data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- ret |= iwl4965_sensitivity_write(priv);
- IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+ ret |= il4965_sensitivity_write(il);
+ D_CALIB("<<return 0x%X\n", ret);
}
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
{
u32 rx_enable_time;
u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
u32 bad_plcp_ofdm;
u32 norm_fa_ofdm;
u32 norm_fa_cck;
- struct iwl_sensitivity_data *data = NULL;
- struct statistics_rx_non_phy *rx_info;
- struct statistics_rx_phy *ofdm, *cck;
+ struct il_sensitivity_data *data = NULL;
+ struct stats_rx_non_phy *rx_info;
+ struct stats_rx_phy *ofdm, *cck;
unsigned long flags;
- struct statistics_general_data statis;
+ struct stats_general_data statis;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
- if (!iwl_legacy_is_any_associated(priv)) {
- IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ if (!il_is_any_associated(il)) {
+ D_CALIB("<< - not associated\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
- ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
- cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+ rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+ ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+ cck = &(((struct il_notif_stats *)resp)->rx.cck);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB("<< invalid data.\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
statis.beacon_silence_rssi_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
statis.beacon_silence_rssi_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
statis.beacon_silence_rssi_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c);
- statis.beacon_energy_a =
- le32_to_cpu(rx_info->beacon_energy_a);
- statis.beacon_energy_b =
- le32_to_cpu(rx_info->beacon_energy_b);
- statis.beacon_energy_c =
- le32_to_cpu(rx_info->beacon_energy_c);
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
- IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+ D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ D_CALIB("<< RX Enable Time == 0!\n");
return;
}
- /* These statistics increase monotonically, and do not reset
+ /* These stats increase monotonically, and do not reset
* at each beacon. Calculate difference from last value, or just
- * use the new statistics value if it has reset or wrapped around. */
+ * use the new stats value if it has reset or wrapped around. */
if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
data->last_bad_plcp_cnt_cck = bad_plcp_cck;
else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
norm_fa_cck = fa_cck + bad_plcp_cck;
- IWL_DEBUG_CALIB(priv,
- "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
- bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+ D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
- iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
- iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+ il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+ il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
- iwl4965_sensitivity_write(priv);
+ il4965_sensitivity_write(il);
}
-static inline u8 iwl4965_find_first_chain(u8 mask)
+static inline u8
+il4965_find_first_chain(u8 mask)
{
if (mask & ANT_A)
return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
* disconnected.
*/
static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
- struct iwl_chain_noise_data *data)
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+ struct il_chain_noise_data *data)
{
u32 active_chains = 0;
u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] =
+ data->chain_signal_a /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] =
+ data->chain_signal_b /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] =
+ data->chain_signal_c /
+ il->cfg->base_params->chain_noise_num_beacons;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
active_chains = (1 << max_average_sig_antenna_i);
}
- IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
- average_sig[0], average_sig[1], average_sig[2]);
- IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
- max_average_sig, max_average_sig_antenna_i);
+ D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+ average_sig[2]);
+ D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+ max_average_sig_antenna_i);
/* Compare signal strengths for all 3 receivers. */
for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
data->disconn_array[i] = 1;
else
active_chains |= (1 << i);
- IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
- "disconn_array[i] = %d\n",
- i, rssi_delta, data->disconn_array[i]);
+ D_CALIB("i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n", i, rssi_delta,
+ data->disconn_array[i]);
}
}
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= il->hw_params.valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
- * priv->hw_setting.valid_tx_ant */
+ * il->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(il->hw_params.valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == il->hw_params.tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
* connect the first valid tx chain
*/
first_chain =
- iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ il4965_find_first_chain(il->cfg->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
- IWL_DEBUG_CALIB(priv,
- "All Tx chains are disconnected W/A - declare %d as connected\n",
- first_chain);
+ D_CALIB("All Tx chains are disconnected"
+ "- declare %d as connected\n", first_chain);
break;
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
- active_chains != priv->chain_noise_data.active_chains)
- IWL_DEBUG_CALIB(priv,
- "Detected that not all antennas are connected! "
- "Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ if (active_chains != il->hw_params.valid_rx_ant &&
+ active_chains != il->chain_noise_data.active_chains)
+ D_CALIB("Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n", active_chains,
+ il->hw_params.valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
- IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
- active_chains);
+ D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+ u16 min_average_noise_antenna_i, u32 min_average_noise,
+ u8 default_chain)
{
int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ struct il_chain_noise_data *data = &il->chain_noise_data;
data->delta_gain_code[min_average_noise_antenna_i] = 0;
for (i = default_chain; i < NUM_RX_CHAINS; i++) {
s32 delta_g = 0;
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ if (!data->disconn_array[i] &&
+ data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
+ min(data->delta_gain_code[i],
(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
+ (data->delta_gain_code[i] | (1 << 2));
} else {
data->delta_gain_code[i] = 0;
}
}
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
+ D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+ data->delta_gain_code[1], data->delta_gain_code[2]);
/* Differential gain gets sent to uCode only once */
if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
+ struct il_calib_diff_gain_cmd cmd;
data->radio_write = 1;
memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = data->delta_gain_code[0];
cmd.diff_gain_b = data->delta_gain_code[1];
cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
+ ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
+ D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
/* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ data->state = IL_CHAIN_NOISE_CALIBRATED;
}
}
-
-
/*
- * Accumulate 16 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise stats for each of
* 3 receivers/antennas/rx-chains, then figure out:
* 1) Which antennas are connected.
* 2) Differential rx gain settings to balance the 3 receivers.
*/
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
{
- struct iwl_chain_noise_data *data = NULL;
+ struct il_chain_noise_data *data = NULL;
u32 chain_noise_a;
u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u32 chain_sig_a;
u32 chain_sig_b;
u32 chain_sig_c;
- u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
- u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+ u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u8 rxon_band24;
u8 stat_band24;
unsigned long flags;
- struct statistics_rx_non_phy *rx_info;
+ struct stats_rx_non_phy *rx_info;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct il_rxon_context *ctx = &il->ctx;
- if (priv->disable_chain_noise_cal)
+ if (il->disable_chain_noise_cal)
return;
- data = &(priv->chain_noise_data);
+ data = &(il->chain_noise_data);
/*
* Accumulate just the first "chain_noise_num_beacons" after
* the first association, then we're done forever.
*/
- if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
- if (data->state == IWL_CHAIN_NOISE_ALIVE)
- IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IL_CHAIN_NOISE_ALIVE)
+ D_CALIB("Wait for noise calib reset\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
- rx.general);
+ rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB(" << Interference data unavailable\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- stat_band24 = !!(((struct iwl_notif_statistics *)
- stat_resp)->flag &
- STATISTICS_REPLY_FLG_BAND_24G_MSK);
- stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
- stat_resp)->flag) >> 16;
+ stat_band24 =
+ !!(((struct il_notif_stats *)stat_resp)->
+ flag & STATS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum =
+ le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
/* Make sure we accumulate data for just the associated channel
* (even if scanning). */
- if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
- IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
- rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+ D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+ rxon_band24);
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
/*
- * Accumulate beacon statistics values across
+ * Accumulate beacon stats values across
* "chain_noise_num_beacons"
*/
- chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
- IN_BAND_FILTER;
- chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
- IN_BAND_FILTER;
- chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
- IN_BAND_FILTER;
+ chain_noise_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ chain_noise_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ chain_noise_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
data->beacon_count++;
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
- IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
- rxon_chnum, rxon_band24, data->beacon_count);
- IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
- chain_sig_a, chain_sig_b, chain_sig_c);
- IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
- chain_noise_a, chain_noise_b, chain_noise_c);
+ D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+ data->beacon_count);
+ D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+ chain_sig_c);
+ D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+ chain_noise_c);
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
return;
/* Analyze signal for disconnected antenna */
- iwl4965_find_disconn_antenna(priv, average_sig, data);
+ il4965_find_disconn_antenna(il, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] =
+ data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] =
+ data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] =
+ data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
for (i = 0; i < NUM_RX_CHAINS; i++) {
- if (!(data->disconn_array[i]) &&
- (average_noise[i] <= min_average_noise)) {
+ if (!data->disconn_array[i] &&
+ average_noise[i] <= min_average_noise) {
/* This means that chain i is active and has
* lower noise values so far: */
min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
}
- IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
- average_noise[0], average_noise[1],
- average_noise[2]);
+ D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+ average_noise[1], average_noise[2]);
- IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
- min_average_noise, min_average_noise_antenna_i);
+ D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+ min_average_noise_antenna_i);
- iwl4965_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+ il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+ min_average_noise,
+ il4965_find_first_chain(il->cfg->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ if (il->cfg->ops->lib->update_chain_flags)
+ il->cfg->ops->lib->update_chain_flags(il);
- data->state = IWL_CHAIN_NOISE_DONE;
- iwl_legacy_power_update_mode(priv, false);
+ data->state = IL_CHAIN_NOISE_DONE;
+ il_power_update_mode(il, false);
}
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+void
+il4965_reset_run_time_calib(struct il_priv *il)
{
int i;
- memset(&(priv->sensitivity_data), 0,
- sizeof(struct iwl_sensitivity_data));
- memset(&(priv->chain_noise_data), 0,
- sizeof(struct iwl_chain_noise_data));
+ memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+ memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
for (i = 0; i < NUM_RX_CHAINS; i++)
- priv->chain_noise_data.delta_gain_code[i] =
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+ il->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
- /* Ask for statistics now, the uCode will send notification
+ /* Ask for stats now, the uCode will send notification
* periodically after association */
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+ il_send_stats_request(il, CMD_ASYNC, true);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 00000000000..98ec39f56ba
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(il->_4965.stats.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+ "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+ "disabled");
+
+ return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct stats_rx_phy) * 40 +
+ sizeof(struct stats_rx_non_phy) * 40 +
+ sizeof(struct stats_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct stats_rx_non_phy *general, *accum_general;
+ struct stats_rx_non_phy *delta_general, *max_general;
+ struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_4965.stats.rx.ofdm;
+ cck = &il->_4965.stats.rx.cck;
+ general = &il->_4965.stats.rx.general;
+ ht = &il->_4965.stats.rx.ofdm_ht;
+ accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+ accum_cck = &il->_4965.accum_stats.rx.cck;
+ accum_general = &il->_4965.accum_stats.rx.general;
+ accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+ delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+ delta_cck = &il->_4965.delta_stats.rx.cck;
+ delta_general = &il->_4965.delta_stats.rx.general;
+ delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+ max_ofdm = &il->_4965.max_delta.rx.ofdm;
+ max_cck = &il->_4965.max_delta.rx.cck;
+ max_general = &il->_4965.max_delta.rx.general;
+ max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load, delta_general->channel_load,
+ max_general->channel_load);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta_tx = &il->_4965.delta_stats.tx;
+ max_tx = &il->_4965.max_delta.tx;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct stats_general) * 10 + 300;
+ ssize_t ret;
+ struct stats_general_common *general, *accum_general;
+ struct stats_general_common *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_4965.stats.general.common;
+ dbg = &il->_4965.stats.general.common.dbg;
+ div = &il->_4965.stats.general.common.div;
+ accum_general = &il->_4965.accum_stats.general.common;
+ accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+ accum_div = &il->_4965.accum_stats.general.common.div;
+ delta_general = &il->_4965.delta_stats.general.common;
+ max_general = &il->_4965.max_delta.general.common;
+ delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+ max_dbg = &il->_4965.max_delta.general.common.dbg;
+ delta_div = &il->_4965.delta_stats.general.common.div;
+ max_div = &il->_4965.max_delta.general.common.div;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 00000000000..1667232af64
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6515 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IL_ERR("Tx flush command to flush out all frames\n");
+ if (!test_bit(S_EXIT_PENDING, &il->status))
+ queue_work(il->workqueue, &il->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write idx */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size |
+ (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = il_rx_queue_alloc(il);
+ if (ret) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il4965_rx_queue_reset(il, rxq);
+
+ il4965_rx_replenish(il);
+
+ il4965_rx_init(il, rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!il->txq) {
+ ret = il4965_txq_ctx_alloc(il);
+ if (ret)
+ return ret;
+ } else
+ il4965_txq_ctx_reset(il);
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("alloc_pages failed, " "order: %d\n",
+ il->hw_params.rx_page_order);
+
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority ==
+ GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+ unsigned long flags;
+
+ il4965_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il4965_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+ il4965_rx_allocate(il, GFP_ATOMIC);
+
+ il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+ /* stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct il4965_rx_non_cfg_phy *ncphy =
+ (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc =
+ (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+ IL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |=
+ (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!il->cfg->mod_params->sw_crypto &&
+ il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct il_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * N_RX and N_RX_MPDU are handled differently.
+ * N_RX: physical layer info is in this buffer
+ * N_RX_MPDU: physical layer info was sent in separate
+ * command and cached in il->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == N_RX) {
+ phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+ header =
+ (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status =
+ *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!il->_4965.last_phy_res_valid) {
+ IL_ERR("MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &il->_4965.last_phy_res;
+ amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status =
+ il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band =
+ (phy_res->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+ il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+ il_dbg_log_rx_data_frame(il, len, header);
+ D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+ &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ il->_4965.last_phy_res_valid = true;
+ memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+ enum ieee80211_band band, u8 is_active,
+ u8 n_probes, struct il_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+ le32_to_cpu(scan_ch->type),
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+ passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
+{
+ int i;
+ u8 ind = *ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind)) {
+ *ant = ind;
+ return;
+ }
+ }
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il_scan_cmd *scan;
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = il->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx = il_rxon_ctx_from_vif(vif);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_any_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time =
+ (extra | ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod =
+ le32_to_cpu(il->ctx.active.
+ flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = RATE_6M_PLCP;
+ } else {
+ rate = RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = RATE_6M_PLCP;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+ * here instead of IL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ band = il->scan_band;
+
+ if (il->cfg->scan_rx_antennas[band])
+ rx_ant = il->cfg->scan_rx_antennas[band];
+
+ il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
+ rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
+ scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains =
+ rx_ant & ((u8) (il->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ D_SCAN("chain_noise_data.active_chains: %u\n",
+ il->chain_noise_data.active_chains);
+
+ rx_ant = il4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+
+ cmd_len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |=
+ (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+ scan->channel_count =
+ il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+
+ return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return il4965_add_bssid_station(il, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&il->sta_lock);
+
+ if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ D_TX("free more than tfds_in_queue (%u:%d)\n",
+ il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+ il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IL_TX_QUEUE_MSK 0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+ return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE 3
+#define IL_NUM_RX_CHAINS_SINGLE 2
+#define IL_NUM_IDLE_CHAINS_DUAL 2
+#define IL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (il4965_is_single_rx_stream(il))
+ return IL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (il->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ bool is_single = il4965_is_single_rx_stream(il);
+ bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, il4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (il->chain_noise_data.active_chains)
+ active_chains = il->chain_noise_data.active_chains;
+ else
+ active_chains = il->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = il4965_get_active_rx_chain_count(il);
+ idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+ IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+ IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IL_CMD(FH49_TSSR_TX_STATUS_REG);
+ IL_CMD(FH49_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH49_RSCSR_CHNL0_WPTR,
+ FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_MEM_RSSR_SHARED_CTRL_REG,
+ FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IL_ERR("FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ il->missed_beacon_threshold) {
+ D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(S_SCANNING, &il->status))
+ il4965_init_sensitivity(il);
+ }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+ struct stats_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ rx_info = &(il->_4965.stats.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+ D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+ bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ * based on the assumption of all stats counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct stats_general_common *general, *accum_general;
+ struct stats_tx *tx, *accum_tx;
+
+ prev_stats = (__le32 *) &il->_4965.stats;
+ accum_stats = (u32 *) &il->_4965.accum_stats;
+ size = sizeof(struct il_notif_stats);
+ general = &il->_4965.stats.general.common;
+ accum_general = &il->_4965.accum_stats.general.common;
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta = (u32 *) &il->_4965.delta_stats;
+ max_delta = (u32 *) &il->_4965.max_delta;
+
+ for (i = sizeof(__le32); i < size;
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ accum_general->temperature = general->temperature;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ int change;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+ change =
+ ((il->_4965.stats.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+ /* TODO: reading some of stats is unneeded */
+ memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+ set_bit(S_STATS, &il->status);
+
+ /* Reschedule the stats timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&il->stats_periodic,
+ jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ (pkt->hdr.cmd == N_STATS)) {
+ il4965_rx_calc_noise(il);
+ queue_work(il->workqueue, &il->run_time_calib_work);
+ }
+ if (il->cfg->ops->lib->temp_ops.temperature && change)
+ il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_4965.accum_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.delta_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+ struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ const u8 rts_retry_limit = 60;
+ u32 rate_flags;
+ int rate_idx;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_idx = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * idx is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+ || rate_idx > RATE_COUNT_LEGACY)
+ rate_idx =
+ rate_lowest_index(&il->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = il_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ D_TX("tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ keyconf->keyidx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct il_station_priv *sta_priv = NULL;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ struct il_tx_cmd *tx_cmd;
+ struct il_rxon_context *ctx = &il->ctx;
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop_unlock;
+ }
+ }
+
+ D_TX("station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking il->lock */
+ spin_lock(&il->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = il->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl =
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+ txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(il_queue_space(q) < q->high_mark)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ il->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ il->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&il->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+ il_dbg_log_tx_data_frame(il, len, hdr);
+
+ il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+ il_update_stats(il, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+ 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ secondlen, 0, 0);
+ }
+
+ scratch_phys =
+ txcmd_phys + sizeof(struct il_cmd_header) +
+ offsetof(struct il_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+ le16_to_cpu(tx_cmd->
+ len));
+
+ pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ } else {
+ il_stop_queue(il, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+ ptr->addr =
+ dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq) {
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+ }
+ il4965_free_dma_ptr(il, &il->kw);
+
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ il4965_hw_txq_ctx_free(il);
+
+ ret =
+ il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+ il->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IL_ERR("Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+ if (ret) {
+ IL_ERR("Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = il_alloc_txq_mem(il);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (ret) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+error:
+ il4965_hw_txq_ctx_free(il);
+ il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+ return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&il->lock, flags);
+
+ il4965_txq_set_sched(il, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (il_poll_bit
+ (il, FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+ IL_ERR("Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ il_rd(il, FH49_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_unmap(il);
+ else
+ il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr =
+ il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+ int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at idx corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ /* Set up Tx win size and frame limit for this queue */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+ & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct il_tid_data *tid_data;
+
+ tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+ IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = il4965_txq_ctx_activate_free(il);
+ if (txq_id == -1) {
+ IL_ERR("No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue is empty\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ il_txq_ctx_deactivate(il, txq_id);
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct il_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = il_sta_id(sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ tid_data = &il->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ D_HT("AGG stop before setup done\n");
+ goto turn_off;
+ case IL_AGG_ON:
+ break;
+ default:
+ IL_WARN("Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = il->txq[txq_id].q.write_ptr;
+ read_ptr = il->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ D_HT("Stopping a non empty AGG HW QUEUE\n");
+ il->stations[sta_id].tid[tid].agg.state =
+ IL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ D_HT("HW queue is empty\n");
+turn_off:
+ il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&il->sta_lock);
+ spin_lock(&il->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+ struct il_queue *q = &il->txq[txq_id].q;
+ u8 *addr = il->stations[sta_id].sta.sta.addr;
+ struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+ struct il_rxon_context *ctx;
+
+ ctx = &il->ctx;
+
+ lockdep_assert_held(&il->sta_lock);
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if (txq_id == tid_data->agg.txq_id &&
+ q->read_ptr == q->write_ptr) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+ D_HT("HW queue empty: continue DELBA flow\n");
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct il_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(il->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+ if (!is_agg)
+ il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+
+ if (WARN_ON_ONCE(tx_info->skb == NULL))
+ continue;
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+
+ il4965_tx_status(il, tx_info,
+ txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+ tx_info->skb = NULL;
+
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+ struct il_compressed_ba_resp *ba_resp)
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IL_ERR("Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx win bits */
+ sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ D_TX_REPLY("more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+ i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct il_tx_queue *txq = NULL;
+ struct il_ht_agg *agg;
+ int idx;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx win, corresponds to idx
+ * (in Tx queue's circular buffer) of first TFD/frame in win */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= il->hw_params.max_txq_num) {
+ IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &il->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &il->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find idx just before block-ack win */
+ idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+ agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+ "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow, ba_resp->scd_ssn);
+ D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in win */
+ il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack win (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+ if (il_queue_space(&txq->q) > txq->q.low_mark &&
+ il->mac80211_registered &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+
+ il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+ int i, r;
+ struct il_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IL_ERR("Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (il->band == IEEE80211_BAND_5GHZ)
+ r = RATE_6M_IDX;
+ else
+ r = RATE_1M_IDX;
+
+ if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |=
+ il4965_first_antenna(il->hw_params.
+ valid_tx_ant) << RATE_MCS_ANT_POS;
+ rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IL_ERR("Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct il_wep_cmd) +
+ sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+ struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct il_wep_cmd);
+ struct il_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0,
+ cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX; i++) {
+ wep_cmd->key[i].key_idx = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return il_send_cmd(il, &cmd);
+ else
+ return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ lockdep_assert_held(&il->mutex);
+
+ return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (il_is_rfkill(il)) {
+ D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = il4965_static_wepkey_cmd(il, ctx, 1);
+ D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = il4965_static_wepkey_cmd(il, ctx, false);
+ D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+ keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (il_scan_cancel(il)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with idx different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different idx.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+ key_flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit
+ (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+ IL_ERR("idx %d not used in uCode key table.\n",
+ il->stations[sta_id].sta.key.key_offset);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (il_is_rfkill(il)) {
+ D_WEP
+ ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret =
+ il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret =
+ il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR
+ ("Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct il_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (il->stations[sta_id].lq)
+ kfree(il->stations[sta_id].lq);
+ else
+ D_INFO("Bcast sta rate scaling has not been initialized.\n");
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+ return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+ u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+ if (il->cfg->ops->hcmd->set_rxon_chain) {
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+ il_commit_rxon(il, &il->ctx);
+ }
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+ struct il_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+ struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+ u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The idx is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+ } else
+ IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+ struct il_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size =
+ il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+ tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags =
+ TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+ TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = il_get_lowest_plcp(il, il->beacon_ctx);
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+ if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+ struct il_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = il4965_get_free_frame(il);
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = il4965_hw_get_beacon_cmd(il, frame);
+ if (!frame_size) {
+ IL_ERR("Error configuring the beacon command\n");
+ il4965_free_frame(il, frame);
+ return -EINVAL;
+ }
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il4965_free_frame(il, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+ 16;
+
+ return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+ struct il_tfd *tfd;
+ struct pci_dev *dev = il->pci_dev;
+ int idx = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[idx];
+
+ /* Sanity check on number of chunks */
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+ il4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ struct il_queue *q;
+ struct il_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct il_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ IL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IL_TX_DMA_MASK))
+ IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+ il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_init_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received. We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!il_is_ready_rf(il))
+ return;
+
+ il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il4965_beacon_notif *beacon =
+ (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+ unsigned long flags;
+
+ D_POWER("Stop all queues\n");
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&il->reg_lock, flags);
+ if (!_il_grab_nic_access(il))
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ il_wr(il, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ il4965_perform_ct_kill_task(il);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il4965_hdl_alive;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il4965_hdl_c_stats;
+ il->handlers[N_STATS] = il4965_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+
+ /* status change handler */
+ il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+ il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+ /* Rx handlers */
+ il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+ il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+ /* block ack */
+ il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+ (pkt->hdr.cmd != N_RX_MPDU) &&
+ (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il4965_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il4965_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il4965_rx_replenish_now(il);
+ else
+ il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!
+ (_il_rd(il, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IL_WARN("RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ il->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(S_ALIVE, &il->status)) {
+ if (hw_rf_kill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IL_ERR("Microcode CT kill error detected.\n");
+ il->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ for (i = 0; i < il->hw_params.max_txq_num; i++)
+ il_txq_update_write_ptr(il, &il->txq[i]);
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il4965_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("uCode load interrupt\n");
+ il->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ il->ucode_write_complete = 1;
+ wake_up(&il->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(il->inta_mask)) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_ERR("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+ il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_ready_rf(il))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IL_INFO("%s is not in decimal form.\n", buf);
+ else {
+ ret = il_set_tx_power(il, val, false);
+ if (ret)
+ IL_ERR("failed setting tx power (0x%d).\n", ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+ il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+ const char *name_pre = il->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ il->fw_idx = il->cfg->ucode_api_max;
+ sprintf(tag, "%d", il->fw_idx);
+ } else {
+ il->fw_idx--;
+ sprintf(tag, "%d", il->fw_idx);
+ }
+
+ if (il->fw_idx < il->cfg->ucode_api_min) {
+ IL_ERR("no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+ &il->pci_dev->dev, GFP_KERNEL, il,
+ il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+ struct il4965_firmware_pieces *pieces)
+{
+ struct il_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IL_ERR("File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ hdr_size + pieces->inst_size + pieces->data_size +
+ pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+ IL_ERR("uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct il_priv *il = context;
+ struct il_ucode_header *ucode;
+ int err;
+ struct il4965_firmware_pieces pieces;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (il->fw_idx <= il->cfg->ucode_api_max)
+ IL_ERR("request for firmware file '%s' failed.\n",
+ il->firmware_name);
+ goto try_again;
+ }
+
+ D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+ ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IL_ERR("File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+ D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+ D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+ D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+ D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > il->hw_params.max_bsm_size) {
+ IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = pieces.inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ il->ucode_init.len = pieces.init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = pieces.init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ il->ucode_boot.len = pieces.boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in il_up()
+ */
+ D_INFO("Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ D_INFO("Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ D_INFO("Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ D_INFO("Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ il->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ il->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = il4965_mac_setup_register(il, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&il->_4965.firmware_loading_complete);
+ return;
+
+try_again:
+ /* try next, if any */
+ if (il4965_request_firmware(il, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ il4965_dealloc_ucode_pci(il);
+out_unbind:
+ complete(&il->_4965.firmware_loading_complete);
+ device_release_driver(&il->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STBL",
+ "FH49_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ {
+ "NMI_INTERRUPT_WDG", 0x34}, {
+ "SYSASSERT", 0x35}, {
+ "UCODE_VERSION_MISMATCH", 0x37}, {
+ "BAD_COMMAND", 0x38}, {
+ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+ "FATAL_ERROR", 0x3D}, {
+ "NMI_TRM_HW_ERR", 0x46}, {
+ "NMI_INTERRUPT_TRM", 0x4C}, {
+ "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+ "NMI_INTERRUPT_HOST", 0x66}, {
+ "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+ "NMI_INTERRUPT_UNKNOWN", 0x84}, {
+ "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (il->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+ il->isr_stats.err_code = desc;
+ pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+ data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+ line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+ time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+ hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+ IL_ERR("Desc Time "
+ "data1 data2 line\n");
+ IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ il4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+ blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+ struct il_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&il->lock, flags);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+ ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+ if (ret)
+ IL_ERR("C_CT_KILL_CONFIG failed\n");
+ else
+ D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+ "critical temperature is %d\n",
+ il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+ IL49_CMD_FIFO_NUM,
+ IL_TX_FIFO_UNUSED,
+ IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+ a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (;
+ a <
+ il->scd_base_addr +
+ IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+ a += 4)
+ il_write_targ_mem(il, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+ il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+ reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write idxes */
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+ il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+ (1 << il->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+ il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&il->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ il->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ il_txq_ctx_activate(il, i);
+
+ if (ac == IL_TX_FIFO_UNUSED)
+ continue;
+
+ il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+ int ret = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = il4965_alive_notify(il);
+ if (ret) {
+ IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK;
+
+ if (il_is_associated_ctx(ctx)) {
+ struct il_rxon_cmd *active_rxon =
+ (struct il_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, &il->ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ il_send_bt_config(il);
+
+ il4965_reset_run_time_calib(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il_commit_rxon(il, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ il4965_rf_kill_ct_config(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il4965_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il4965_txq_ctx_stop(il);
+ il4965_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il4965_down(il);
+ mutex_unlock(&il->mutex);
+
+ il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+ int ret = 0;
+
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ il->hw_ready = true;
+ else
+ il->hw_ready = false;
+
+ D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+ int ret = 0;
+
+ D_INFO("il4965_prepare_card_hw enter\n");
+
+ ret = il4965_set_hw_ready(il);
+ if (il->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ il4965_set_hw_ready(il);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+ int i;
+ int ret;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ ret = il4965_alloc_bcast_station(il, &il->ctx);
+ if (ret) {
+ il_dealloc_bcast_stations(il);
+ return ret;
+ }
+
+ il4965_prepare_card_hw(il);
+
+ if (!il->hw_ready) {
+ IL_WARN("Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ if (il_is_rfkill(il)) {
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+ il_enable_interrupts(il);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before il_hw_nic_init */
+ il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = il4965_hw_nic_init(il);
+ if (ret) {
+ IL_ERR("Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = il->cfg->ops->lib->load_ucode(il);
+
+ if (ret) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il4965_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il4965_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il->cfg->ops->lib->init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il4965_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ run_time_calib_work);
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (il->start_calib) {
+ il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+ il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+ }
+
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+
+ __il4965_down(il);
+
+ mutex_unlock(&il->mutex);
+ il4965_cancel_deferred_work(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il4965_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il4965_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il4965_rx_replenish(il);
+ mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags =
+ IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (il->cfg->sku & IL_SKU_N)
+ hw->flags |=
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct il_station_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ hw->wiphy->interface_modes |= il->ctx.interface_modes;
+ hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+ ret = __il4965_up(il);
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ return ret;
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ D_INFO("Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ il4965_led_enable(il);
+
+out:
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open)
+ return;
+
+ il->is_open = 0;
+
+ il4965_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_rfkill_int(il);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MACDUMP("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il4965_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ D_MAC80211("enter\n");
+
+ il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+ phase1key);
+
+ D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct il_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ D_MAC80211("enter\n");
+
+ if (il->cfg->mod_params->sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret =
+ il4965_set_default_wep_key(il, vif_priv->ctx, key);
+ else
+ ret =
+ il4965_set_dynamic_key(il, vif_priv->ctx, key,
+ sta_id);
+
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = il4965_remove_default_wep_key(il, ctx, key);
+ else
+ ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size)
+{
+ struct il_priv *il = hw->priv;
+ int ret = -EINVAL;
+
+ D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+ if (!(il->cfg->sku & IL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&il->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ D_HT("start Rx\n");
+ ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ D_HT("stop Rx\n");
+ ret = il4965_sta_rx_agg_stop(il, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ D_HT("start Tx\n");
+ ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ D_HT("stop Tx\n");
+ ret = il4965_tx_agg_stop(il, vif, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret =
+ il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+ &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il4965_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 ch;
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status) ||
+ test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ goto out;
+
+ if (!il_is_associated_ctx(ctx))
+ goto out;
+
+ if (!il->cfg->ops->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&il->lock);
+
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&il->lock);
+
+ il_set_rate(il);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = cpu_to_le16(ch);
+ if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+ clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ il->ctx.staging.filter_flags &= ~filter_nand;
+ il->ctx.staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ txpower_work);
+
+ mutex_lock(&il->mutex);
+
+ /* If a scan happened to start before we got here
+ * then just return; the stats notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status))
+ goto out;
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ il->cfg->ops->lib->send_tx_power(il);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ il->last_temperature = il->temperature;
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il4965_bg_restart);
+ INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+ INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+ il_setup_scan_deferred_work(il);
+
+ INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+ init_timer(&il->stats_periodic);
+ il->stats_periodic.data = (unsigned long)il;
+ il->stats_periodic.function = il4965_bg_stats_periodic;
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il4965_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->txpower_work);
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+ cancel_work_sync(&il->run_time_calib_work);
+
+ il_cancel_scan_deferred_work(il);
+
+ del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il_rates[i].plcp ==
+ RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+ il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+ int ret;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+ il_init_scan_params(il);
+
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il4965_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+ il4965_calib_free_results(il);
+ il_free_geos(il);
+ il_free_channel_map(il);
+ kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+ il->hw_rev = _il_rd(il, CSR_HW_REV);
+ il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+ il->rev_id = il->pci_dev->revision;
+ D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (il->cfg->mod_params->amsdu_size_8K)
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+ else
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+ il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (il->cfg->mod_params->disable_11n)
+ il->cfg->sku &= ~IL_SKU_N;
+
+ /* Device-specific setup */
+ return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = il_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ /* At this point both hw and il are allocated. */
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.always_active = true;
+ il->ctx.is_active = true;
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+ il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+ il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+ il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err =
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, il);
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ il4965_hw_detect(il);
+ IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il4965_prepare_card_hw(il);
+ if (!il->hw_ready) {
+ IL_WARN("Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = il4965_eeprom_check_version(il);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ il4965_eeprom_get_mac(il, il->addresses[0].addr);
+ D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+ il->hw->wiphy->addresses = il->addresses;
+ il->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (il4965_set_hw_params(il)) {
+ IL_ERR("failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup il
+ *******************/
+
+ err = il4965_init_drv(il);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and il are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ il4965_setup_deferred_work(il);
+ il4965_setup_handlers(il);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ il_enable_rfkill_int(il);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+
+ il_power_initialize(il);
+
+ init_completion(&il->_4965.firmware_loading_complete);
+
+ err = il4965_request_firmware(il, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+out_destroy_workqueue:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il4965_uninit_drv(il);
+out_free_eeprom:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ wait_for_completion(&il->_4965.firmware_loading_complete);
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+ sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause il_mac_stop to
+ * to be called and il4965_down since we are removing the device
+ * we need to set S_EXIT_PENDING bit.
+ */
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il4965_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il4965_down(), but there are paths to
+ * run il4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_synchronize_irq(il);
+
+ il4965_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il4965_rx_queue_free(il, &il->rxq);
+ il4965_hw_txq_ctx_free(il);
+
+ il_eeprom_free(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(il->pci_dev->irq, il);
+ pci_disable_msi(il->pci_dev);
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il4965_uninit_drv(il);
+
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+ il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+ {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+ .name = DRV_NAME,
+ .id_table = il4965_hw_card_ids,
+ .probe = il4965_pci_probe,
+ .remove = __devexit_p(il4965_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+ pci_unregister_driver(&il4965_driver);
+ il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+ S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 00000000000..467d0cb14ec
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY 1
+#define IL_HT_NUMBER_TRY 3
+
+#define RATE_MAX_WINDOW 62 /* # tx in history win */
+#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX,
+ RATE_6M_IDX, RATE_9M_IDX,
+ RATE_12M_IDX, RATE_18M_IDX,
+ RATE_24M_IDX, RATE_36M_IDX,
+ RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_SISO_##s##M_PLCP, \
+ RATE_MIMO2_##s##M_PLCP,\
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+ IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= RATE_MIMO2_6M_PLCP)
+ idx = idx - RATE_MIMO2_6M_PLCP;
+
+ idx += IL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht */
+ if (idx >= RATE_9M_IDX)
+ idx += 1;
+ if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+ struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+ u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+ {"1", "BPSK DSSS"},
+ {"2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ {"11", "QPSK CCK"},
+ {"6", "BPSK 1/2"},
+ {"9", "BPSK 1/2"},
+ {"12", "QPSK 1/2"},
+ {"18", "QPSK 3/4"},
+ {"24", "16QAM 1/2"},
+ {"36", "16QAM 3/4"},
+ {"48", "64QAM 2/3"},
+ {"54", "64QAM 3/4"},
+ {"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM (8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = IL_INVALID_VALUE;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the stats. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count && tl->time_stamp < oldest_time) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[idx] = tl->packet_count[idx] + 1;
+ tl->total = tl->total + 1;
+
+ if ((idx + 1) > tl->queue_count)
+ tl->queue_count = idx + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+ u8 tid, struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = il4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IL_AGG_LOAD_THRESHOLD) {
+ D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else
+ D_HT("Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+
+ return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+ else
+ IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+ TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_idx];
+ return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+ int attempts, int successes)
+{
+ struct il_rate_scale_data *win = NULL;
+ static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+ return -EINVAL;
+
+ /* Select win for current tx bit rate */
+ win = &(tbl->win[scale_idx]);
+
+ /* Get expected throughput */
+ tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & mask) {
+ win->data &= ~mask;
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ win->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+ int idx, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = il_rates[idx].plcp;
+ if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (idx > IL_LAST_OFDM_RATE) {
+ IL_ERR("Invalid HT rate idx %d\n", idx);
+ idx = IL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= il_rates[idx].plcp_siso;
+ else
+ rate_n_flags |= il_rates[idx].plcp_mimo2;
+ } else {
+ IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |=
+ ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IL_ERR("GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 il4965_num_of_ant =
+ il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+ *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (il4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = il4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= RATE_SISO_60M_PLCP) {
+ if (il4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE */
+ /* MIMO2 */
+ } else {
+ if (il4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct il_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while (new_ant_type != tbl->ant_type &&
+ !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum il_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ low = il_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ high = il_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, u8 scale_idx,
+ u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct il_priv *il = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+ switch_to_legacy = 1;
+ scale_idx = rs_ht_to_legacy[scale_idx];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (il4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ }
+
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+ low = scale_idx;
+ goto out;
+ }
+
+ high_low =
+ il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == RATE_INVALID)
+ low = scale_idx;
+
+out:
+ return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+ struct il_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+ a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_idx, mac_idx, i;
+ struct il_lq_sta *lq_sta = il_sta;
+ struct il_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct il_priv *il = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct il_scale_tbl_info tbl_type;
+ struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("get frame ack response, update rate scale win\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ D_RATE("Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+ if (il->band == IEEE80211_BAND_5GHZ)
+ rs_idx -= IL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_idx = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+ mac_idx++;
+ /*
+ * mac80211 HT idx is always zero-idxed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (il->band == IEEE80211_BAND_2GHZ)
+ mac_idx += IL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if (mac_idx < 0 ||
+ tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+ tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+ tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+ tbl_type.ant_type != info->antenna_sel_tx ||
+ !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+ || !!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+ D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+ rs_idx, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (il4965_table_type_matches
+ (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else
+ if (il4965_table_type_matches
+ (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ D_RATE("Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+ tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ il4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first idx into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+ &rs_idx);
+ il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed +=
+ (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+ &tbl_type, &rs_idx);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (il4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (il4965_table_type_matches
+ (&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+ i <
+ retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[sband->band])
+ il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+ struct il_lq_sta *lq_sta)
+{
+ D_RATE("we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32(*ht_tbl_pointer)[RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 idx)
+{
+ /* "active" values */
+ struct il_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[idx].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[idx];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = idx;
+
+ new_rate = high = low = start_hi = RATE_INVALID;
+
+ for (;;) {
+ high_low =
+ il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+ (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+ && tpt_tbl[rate] <= active_tpt)) ||
+ (active_sr >= RATE_SCALE_SWITCH &&
+ tpt_tbl[rate] > active_tpt)) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+ WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (il->hw_params.tx_chains_num < 2)
+ return -1;
+
+ D_RATE("LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ D_RATE("LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("can not switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_LEGACY_SWITCH_ANTENNA1:
+ case IL_LEGACY_SWITCH_ANTENNA2:
+ D_RATE("LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ il4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IL_LEGACY_SWITCH_SISO:
+ D_RATE("LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IL_LEGACY_SWITCH_MIMO2_AB:
+ case IL_LEGACY_SWITCH_MIMO2_AC:
+ case IL_LEGACY_SWITCH_MIMO2_BC:
+ D_RATE("LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ u8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_SISO_SWITCH_ANTENNA1:
+ case IL_SISO_SWITCH_ANTENNA2:
+ D_RATE("LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_SISO_SWITCH_MIMO2_AB:
+ case IL_SISO_SWITCH_MIMO2_AC:
+ case IL_SISO_SWITCH_MIMO2_BC:
+ D_RATE("LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+ break;
+ case IL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IL_ERR("SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ s8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_MIMO2_SWITCH_ANTENNA1:
+ case IL_MIMO2_SWITCH_ANTENNA2:
+ D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_MIMO2_SWITCH_SISO_A:
+ case IL_MIMO2_SWITCH_SISO_B:
+ case IL_MIMO2_SWITCH_SISO_C:
+ D_RATE("LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+ struct il_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct il_priv *il;
+
+ il = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ lq_sta->total_failed > lq_sta->max_failure_limit ||
+ lq_sta->total_success > lq_sta->max_success_limit ||
+ (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+ flush_interval_passed)) {
+ D_RATE("LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed, lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >= lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ D_RATE("LQ: stay in table clear win\n");
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&
+ (tbl->
+ win
+ [i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ il4965_rs_fill_link_cmd(il, lq_sta, rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = RATE_INVALID;
+ int high = RATE_INVALID;
+ int idx;
+ int i;
+ struct il_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct il_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_idx_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct il_tid_data *tid_data;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+ if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ idx = lq_sta->last_txrate_idx;
+
+ D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ D_RATE("mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_idx_msk =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_scale_idx_msk =
+ (u16) (rate_mask & lq_sta->supp_rates);
+
+ } else
+ rate_scale_idx_msk = rate_mask;
+
+ if (!rate_scale_idx_msk)
+ rate_scale_idx_msk = rate_mask;
+
+ if (!((1 << idx) & rate_scale_idx_msk)) {
+ IL_ERR("Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid */
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history win for current rate */
+ if (!tbl->expected_tpt) {
+ IL_ERR("tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+ idx = lq_sta->max_rate_idx;
+ update_lq = 1;
+ win = &(tbl->win[idx]);
+ goto lq_update;
+ }
+
+ win = &(tbl->win[idx]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = win->counter - win->success_counter;
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+ win->success_counter, win->counter, idx);
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (win->average_tpt !=
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+ IL_ERR("expected_tpt should have been calculated by now\n");
+ win->average_tpt =
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (win->average_tpt > lq_sta->last_tpt) {
+
+ D_RATE("LQ: SWITCHING TO NEW TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = win->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ D_RATE("LQ: GOING BACK TO THE OLD TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low =
+ il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+ high = RATE_INVALID;
+
+ sr = win->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = win->average_tpt;
+ if (low != RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+ low_tpt < current_tpt && high_tpt < current_tpt)
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else
+ scale_action = 0;
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != RATE_INVALID) {
+ update_lq = 1;
+ idx = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != RATE_INVALID) {
+ update_lq = 1;
+ idx = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+ idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+ /* Save current throughput to compare with "search" throughput */
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+ else if (is_siso(tbl->lq_type))
+ il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+ idx);
+ else /* (is_mimo2(tbl->lq_type)) */
+ il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+ idx);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ D_RATE("Switch current mcs: %X idx: %d\n",
+ tbl->current_rate, idx);
+ il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ D_RATE("LQ: STAY in legacy table\n");
+ il4965_rs_set_stay_in_table(il, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ lq_sta->action_counter >= tbl1->max_search) {
+ if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ tid != MAX_TID_COUNT) {
+ tid_data =
+ &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF) {
+ D_RATE("try to aggregate tid %d\n",
+ tid);
+ il4965_rs_tl_turn_on_agg(il, tid,
+ lq_sta, sta);
+ }
+ }
+ il4965_rs_set_stay_in_table(il, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ i = idx;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ * calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+ struct il_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = il4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct il_station_priv *sta_priv;
+ struct il_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if (i < 0 || i >= RATE_COUNT)
+ i = 0;
+
+ rate = il_rates[i].plcp;
+ tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+ if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il_lq_sta *lq_sta = il_sta;
+ int rate_idx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ &&
+ lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 ||
+ lq_sta->max_rate_idx >= RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ if (!lq_sta)
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS idx */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+ (sband->band == IEEE80211_BAND_5GHZ &&
+ rate_idx < IL_FIRST_OFDM_RATE))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust idx */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il_station_priv *sta_priv =
+ (struct il_station_priv *)sta->drv_priv;
+ struct il_priv *il;
+
+ il = (struct il_priv *)il_rate;
+ D_RATE("create station rate scale win\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct il_station_priv *sta_priv;
+ struct il_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct il_station_priv *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+ lq_sta->band = il->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16) 0x2);
+ lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+ lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+ lq_sta->drv = il;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+ u32 new_rate)
+{
+ struct il_scale_tbl_info tbl_type;
+ int idx = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (idx 0) if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Interpret new_rate (rate_n_flags) */
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (idx 0) */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+ }
+ /* otherwise we don't modify the existing value */
+ idx++;
+ repeat_rate--;
+ if (il)
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate,
+ &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ idx++;
+ }
+
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = idx;
+
+ /* Get next rate */
+ new_rate =
+ il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ idx++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+ return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il_priv *il __maybe_unused = il_r;
+
+ D_RATE("enter\n");
+ D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+ struct il_priv *il;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ il = lq_sta->drv;
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->
+ dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ D_RATE("Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IL_ERR
+ ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ D_RATE("Fixed rate OFF\n");
+ }
+ } else {
+ D_RATE("Fixed rate OFF\n");
+ }
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+ struct il_station_priv *sta_priv =
+ container_of(lq_sta, struct il_station_priv, lq_sta);
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ il = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+ lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+
+ return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int idx = 0;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ il = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc +=
+ sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc +=
+ sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+ desc +=
+ sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+ (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc +=
+ sprintf(buff + desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc +=
+ sprintf(buff + desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc +=
+ sprintf(buff + desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc +=
+ sprintf(buff + desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc +=
+ sprintf(buff + desc,
+ "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc +=
+ sprintf(buff + desc,
+ "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc +=
+ sprintf(buff + desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_idx[0],
+ lq_sta->lq.general_params.start_rate_idx[1],
+ lq_sta->lq.general_params.start_rate_idx[2],
+ lq_sta->lq.general_params.start_rate_idx[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ idx =
+ il4965_hwrate_to_plcp_idx(le32_to_cpu
+ (lq_sta->lq.rs_table[i].
+ rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps);
+ } else {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps,
+ il_rate_mcs[idx].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = il4965_rs_sta_dbgfs_scale_table_write,
+ .read = il4965_rs_sta_dbgfs_scale_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc +=
+ sprintf(buff + desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < RATE_COUNT; j++) {
+ desc +=
+ sprintf(buff + desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il4965_rs_sta_dbgfs_stats_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ if (is_Ht(tbl->lq_type))
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IL4965_RS_NAME,
+ .tx_status = il4965_rs_tx_status,
+ .get_rate = il4965_rs_get_rate,
+ .rate_init = il4965_rs_rate_init_stub,
+ .alloc = il4965_rs_alloc,
+ .free = il4965_rs_free,
+ .alloc_sta = il4965_rs_alloc_sta,
+ .free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il4965_rs_add_debugfs,
+ .remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 00000000000..cacbc03880b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2402 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_full(il, image, len);
+
+ return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < il->cfg->eeprom_ver ||
+ calib_ver < il->cfg->eeprom_calib_ver)
+ goto err;
+
+ IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+ calib_ver, il->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+ const u8 *addr = il_eeprom_query_addr(il,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = _il_rd(il, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+ _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+ .cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int i;
+ u32 done;
+ u32 reg_offset;
+ int ret;
+
+ D_INFO("Begin load bsm\n");
+
+ il->ucode_type = UCODE_RT;
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL49_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+ * NOTE: il_init_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache.
+ */
+ pinst = il->ucode_init.p_addr >> 4;
+ pdata = il->ucode_init_data.p_addr >> 4;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ ret = il4965_verify_bsm(il);
+ if (ret)
+ return ret;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ int ret = 0;
+
+ /* bits 35:4 for 4965 */
+ pinst = il->ucode_code.p_addr >> 4;
+ pdata = il->ucode_data_backup.p_addr >> 4;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ * Voltage, temperature, and MIMO tx gain correction, now stored in il
+ * (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Calculate temperature */
+ il->temperature = il4965_hw_get_temperature(il);
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il4965_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+ int chan_mod =
+ le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ return (chan_mod == CHANNEL_MODE_PURE_40 ||
+ chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+ unsigned long flags;
+ u16 radio_cfg;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ il->calib_info =
+ (struct il_eeprom_calib_info *)
+ il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ * ... once chain noise is calibrated the first time, it's good forever. */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+ struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+ if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+ struct il_calib_diff_gain_cmd cmd;
+
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = 0;
+ cmd.diff_gain_b = 0;
+ cmd.diff_gain_c = 0;
+ if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+ IL_ERR("Could not send C_PHY_CALIBRATION\n");
+ data->state = IL_CHAIN_NOISE_ACCUMULATE;
+ D_CALIB("Run chain_noise_calibrate\n");
+ }
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+
+ .auto_corr_min_ofdm = 85,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 140,
+ .auto_corr_max_ofdm_mrc_x1 = 270,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 200,
+ .auto_corr_max_cck_mrc = 400,
+
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+ /* want Kelvin */
+ il->hw_params.ct_kill_threshold =
+ CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+ if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+ il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+ il->cfg->base_params->num_of_queues =
+ il->cfg->mod_params->num_of_queues;
+
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+ il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+ il->hw_params.scd_bc_tbls_size =
+ il->cfg->base_params->num_of_queues *
+ sizeof(struct il4965_scd_bc_tbl);
+ il->hw_params.tfd_size = sizeof(struct il_tfd);
+ il->hw_params.max_stations = IL4965_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+ il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+ il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+ il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+ il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+ il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+ il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+ il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+ il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+ il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+ il4965_set_ct_threshold(il);
+
+ il->hw_params.sens = &il4965_sensitivity;
+ il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+ s32 sign = 1;
+
+ if (num < 0) {
+ sign = -sign;
+ num = -num;
+ }
+ if (denom < 0) {
+ sign = -sign;
+ denom = -denom;
+ }
+ *res = 1;
+ *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+ return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+ s32 comp = 0;
+
+ if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+ TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+ return 0;
+
+ il4965_math_div_round(current_voltage - eeprom_voltage,
+ TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+ if (current_voltage > eeprom_voltage)
+ comp *= 2;
+ if ((comp < -2) || (comp > 2))
+ comp = 0;
+
+ return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+ if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+ return CALIB_CH_GROUP_5;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+ return CALIB_CH_GROUP_1;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+ return CALIB_CH_GROUP_2;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+ return CALIB_CH_GROUP_3;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+ return CALIB_CH_GROUP_4;
+
+ return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+ s32 b = -1;
+
+ for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+ if (il->calib_info->band_info[b].ch_from == 0)
+ continue;
+
+ if (channel >= il->calib_info->band_info[b].ch_from &&
+ channel <= il->calib_info->band_info[b].ch_to)
+ break;
+ }
+
+ return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ s32 val;
+
+ if (x2 == x1)
+ return y1;
+ else {
+ il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+ return val + y2;
+ }
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest. Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+ struct il_eeprom_calib_ch_info *chan_info)
+{
+ s32 s = -1;
+ u32 c;
+ u32 m;
+ const struct il_eeprom_calib_measure *m1;
+ const struct il_eeprom_calib_measure *m2;
+ struct il_eeprom_calib_measure *omeas;
+ u32 ch_i1;
+ u32 ch_i2;
+
+ s = il4965_get_sub_band(il, channel);
+ if (s >= EEPROM_TX_POWER_BANDS) {
+ IL_ERR("Tx Power can not find channel %d\n", channel);
+ return -1;
+ }
+
+ ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+ ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+ chan_info->ch_num = (u8) channel;
+
+ D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+ ch_i1, ch_i2);
+
+ for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+ for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+ m1 = &(il->calib_info->band_info[s].ch1.
+ measurements[c][m]);
+ m2 = &(il->calib_info->band_info[s].ch2.
+ measurements[c][m]);
+ omeas = &(chan_info->measurements[c][m]);
+
+ omeas->actual_pow =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->actual_pow, ch_i2,
+ m2->actual_pow);
+ omeas->gain_idx =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->gain_idx, ch_i2,
+ m2->gain_idx);
+ omeas->temperature =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->temperature,
+ ch_i2,
+ m2->temperature);
+ omeas->pa_det =
+ (s8) il4965_interpolate_value(channel, ch_i1,
+ m1->pa_det, ch_i2,
+ m2->pa_det);
+
+ D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+ m, m1->actual_pow, m2->actual_pow,
+ omeas->actual_pow);
+ D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+ m, m1->gain_idx, m2->gain_idx,
+ omeas->gain_idx);
+ D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+ m, m1->pa_det, m2->pa_det, omeas->pa_det);
+ D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
+ m, m1->temperature, m2->temperature,
+ omeas->temperature);
+ }
+ }
+
+ return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+ 10 /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+ s32 degrees_per_05db_a;
+ s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+ {
+ 9, 2}, /* group 0 5.2, ch 34-43 */
+ {
+ 4, 1}, /* group 1 5.2, ch 44-70 */
+ {
+ 4, 1}, /* group 2 5.2, ch 71-124 */
+ {
+ 4, 1}, /* group 3 5.2, ch 125-200 */
+ {
+ 3, 1} /* group 4 2.4, ch all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+ if (!band) {
+ if ((rate_power_idx & 7) <= 4)
+ return MIN_TX_GAIN_IDX_52GHZ_EXT;
+ }
+ return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+ u8 dsp;
+ u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+ /* 5.2GHz power gain idx table */
+ {
+ {123, 0x3F}, /* highest txpower */
+ {117, 0x3F},
+ {110, 0x3F},
+ {104, 0x3F},
+ {98, 0x3F},
+ {110, 0x3E},
+ {104, 0x3E},
+ {98, 0x3E},
+ {110, 0x3D},
+ {104, 0x3D},
+ {98, 0x3D},
+ {110, 0x3C},
+ {104, 0x3C},
+ {98, 0x3C},
+ {110, 0x3B},
+ {104, 0x3B},
+ {98, 0x3B},
+ {110, 0x3A},
+ {104, 0x3A},
+ {98, 0x3A},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x25},
+ {104, 0x25},
+ {98, 0x25},
+ {110, 0x24},
+ {104, 0x24},
+ {98, 0x24},
+ {110, 0x23},
+ {104, 0x23},
+ {98, 0x23},
+ {110, 0x22},
+ {104, 0x18},
+ {98, 0x18},
+ {110, 0x17},
+ {104, 0x17},
+ {98, 0x17},
+ {110, 0x16},
+ {104, 0x16},
+ {98, 0x16},
+ {110, 0x15},
+ {104, 0x15},
+ {98, 0x15},
+ {110, 0x14},
+ {104, 0x14},
+ {98, 0x14},
+ {110, 0x13},
+ {104, 0x13},
+ {98, 0x13},
+ {110, 0x12},
+ {104, 0x08},
+ {98, 0x08},
+ {110, 0x07},
+ {104, 0x07},
+ {98, 0x07},
+ {110, 0x06},
+ {104, 0x06},
+ {98, 0x06},
+ {110, 0x05},
+ {104, 0x05},
+ {98, 0x05},
+ {110, 0x04},
+ {104, 0x04},
+ {98, 0x04},
+ {110, 0x03},
+ {104, 0x03},
+ {98, 0x03},
+ {110, 0x02},
+ {104, 0x02},
+ {98, 0x02},
+ {110, 0x01},
+ {104, 0x01},
+ {98, 0x01},
+ {110, 0x00},
+ {104, 0x00},
+ {98, 0x00},
+ {93, 0x00},
+ {88, 0x00},
+ {83, 0x00},
+ {78, 0x00},
+ },
+ /* 2.4GHz power gain idx table */
+ {
+ {110, 0x3f}, /* highest txpower */
+ {104, 0x3f},
+ {98, 0x3f},
+ {110, 0x3e},
+ {104, 0x3e},
+ {98, 0x3e},
+ {110, 0x3d},
+ {104, 0x3d},
+ {98, 0x3d},
+ {110, 0x3c},
+ {104, 0x3c},
+ {98, 0x3c},
+ {110, 0x3b},
+ {104, 0x3b},
+ {98, 0x3b},
+ {110, 0x3a},
+ {104, 0x3a},
+ {98, 0x3a},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x6},
+ {104, 0x6},
+ {98, 0x6},
+ {110, 0x5},
+ {104, 0x5},
+ {98, 0x5},
+ {110, 0x4},
+ {104, 0x4},
+ {98, 0x4},
+ {110, 0x3},
+ {104, 0x3},
+ {98, 0x3},
+ {110, 0x2},
+ {104, 0x2},
+ {98, 0x2},
+ {110, 0x1},
+ {104, 0x1},
+ {98, 0x1},
+ {110, 0x0},
+ {104, 0x0},
+ {98, 0x0},
+ {97, 0},
+ {96, 0},
+ {95, 0},
+ {94, 0},
+ {93, 0},
+ {92, 0},
+ {91, 0},
+ {90, 0},
+ {89, 0},
+ {88, 0},
+ {87, 0},
+ {86, 0},
+ {85, 0},
+ {84, 0},
+ {83, 0},
+ {82, 0},
+ {81, 0},
+ {80, 0},
+ {79, 0},
+ {78, 0},
+ {77, 0},
+ {76, 0},
+ {75, 0},
+ {74, 0},
+ {73, 0},
+ {72, 0},
+ {71, 0},
+ {70, 0},
+ {69, 0},
+ {68, 0},
+ {67, 0},
+ {66, 0},
+ {65, 0},
+ {64, 0},
+ {63, 0},
+ {62, 0},
+ {61, 0},
+ {60, 0},
+ {59, 0},
+ }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+ u8 ctrl_chan_high,
+ struct il4965_tx_power_db *tx_power_tbl)
+{
+ u8 saturation_power;
+ s32 target_power;
+ s32 user_target_power;
+ s32 power_limit;
+ s32 current_temp;
+ s32 reg_limit;
+ s32 current_regulatory;
+ s32 txatten_grp = CALIB_CH_GROUP_MAX;
+ int i;
+ int c;
+ const struct il_channel_info *ch_info = NULL;
+ struct il_eeprom_calib_ch_info ch_eeprom_info;
+ const struct il_eeprom_calib_measure *measurement;
+ s16 voltage;
+ s32 init_voltage;
+ s32 voltage_compensation;
+ s32 degrees_per_05db_num;
+ s32 degrees_per_05db_denom;
+ s32 factory_temp;
+ s32 temperature_comp[2];
+ s32 factory_gain_idx[2];
+ s32 factory_actual_pwr[2];
+ s32 power_idx;
+
+ /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+ * are used for idxing into txpower table) */
+ user_target_power = 2 * il->tx_power_user_lmt;
+
+ /* Get current (RXON) channel, band, width */
+ D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+ ch_info = il_get_channel_info(il, il->band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -EINVAL;
+
+ /* get txatten group, used to select 1) thermal txpower adjustment
+ * and 2) mimo txpower balance between Tx chains. */
+ txatten_grp = il4965_get_tx_atten_grp(channel);
+ if (txatten_grp < 0) {
+ IL_ERR("Can't find txatten group for channel %d.\n", channel);
+ return txatten_grp;
+ }
+
+ D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+ txatten_grp);
+
+ if (is_ht40) {
+ if (ctrl_chan_high)
+ channel -= 2;
+ else
+ channel += 2;
+ }
+
+ /* hardware txpower limits ...
+ * saturation (clipping distortion) txpowers are in half-dBm */
+ if (band)
+ saturation_power = il->calib_info->saturation_power24;
+ else
+ saturation_power = il->calib_info->saturation_power52;
+
+ if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+ saturation_power > IL_TX_POWER_SATURATION_MAX) {
+ if (band)
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+ else
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+ }
+
+ /* regulatory txpower limits ... reg_limit values are in half-dBm,
+ * max_power_avg values are in dBm, convert * 2 */
+ if (is_ht40)
+ reg_limit = ch_info->ht40_max_power_avg * 2;
+ else
+ reg_limit = ch_info->max_power_avg * 2;
+
+ if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+ (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+ if (band)
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+ else
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+ }
+
+ /* Interpolate txpower calibration values for this channel,
+ * based on factory calibration tests on spaced channels. */
+ il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+ /* calculate tx gain adjustment based on power supply voltage */
+ voltage = le16_to_cpu(il->calib_info->voltage);
+ init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+ voltage_compensation =
+ il4965_get_voltage_compensation(voltage, init_voltage);
+
+ D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+ voltage, voltage_compensation);
+
+ /* get current temperature (Celsius) */
+ current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+ current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+ current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+ /* select thermal txpower adjustment params, based on channel group
+ * (same frequency group used for mimo txatten adjustment) */
+ degrees_per_05db_num =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+ degrees_per_05db_denom =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+ /* get per-chain txpower values from factory measurements */
+ for (c = 0; c < 2; c++) {
+ measurement = &ch_eeprom_info.measurements[c][1];
+
+ /* txgain adjustment (in half-dB steps) based on difference
+ * between factory and current temperature */
+ factory_temp = measurement->temperature;
+ il4965_math_div_round((current_temp -
+ factory_temp) * degrees_per_05db_denom,
+ degrees_per_05db_num,
+ &temperature_comp[c]);
+
+ factory_gain_idx[c] = measurement->gain_idx;
+ factory_actual_pwr[c] = measurement->actual_pow;
+
+ D_TXPOWER("chain = %d\n", c);
+ D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+ factory_temp, current_temp, temperature_comp[c]);
+
+ D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+ factory_actual_pwr[c]);
+ }
+
+ /* for each of 33 bit-rates (including 1 for CCK) */
+ for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+ u8 is_mimo_rate;
+ union il4965_tx_power_dual_stream tx_power;
+
+ /* for mimo, reduce each chain's txpower by half
+ * (3dB, 6 steps), so total output power is regulatory
+ * compliant. */
+ if (i & 0x8) {
+ current_regulatory =
+ reg_limit -
+ IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+ is_mimo_rate = 1;
+ } else {
+ current_regulatory = reg_limit;
+ is_mimo_rate = 0;
+ }
+
+ /* find txpower limit, either hardware or regulatory */
+ power_limit = saturation_power - back_off_table[i];
+ if (power_limit > current_regulatory)
+ power_limit = current_regulatory;
+
+ /* reduce user's txpower request if necessary
+ * for this rate on this channel */
+ target_power = user_target_power;
+ if (target_power > power_limit)
+ target_power = power_limit;
+
+ D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+ saturation_power - back_off_table[i],
+ current_regulatory, user_target_power, target_power);
+
+ /* for each of 2 Tx chains (radio transmitters) */
+ for (c = 0; c < 2; c++) {
+ s32 atten_value;
+
+ if (is_mimo_rate)
+ atten_value =
+ (s32) le32_to_cpu(il->card_alive_init.
+ tx_atten[txatten_grp][c]);
+ else
+ atten_value = 0;
+
+ /* calculate idx; higher idx means lower txpower */
+ power_idx =
+ (u8) (factory_gain_idx[c] -
+ (target_power - factory_actual_pwr[c]) -
+ temperature_comp[c] - voltage_compensation +
+ atten_value);
+
+/* D_TXPOWER("calculated txpower idx %d\n",
+ power_idx); */
+
+ if (power_idx < get_min_power_idx(i, band))
+ power_idx = get_min_power_idx(i, band);
+
+ /* adjust 5 GHz idx to support negative idxes */
+ if (!band)
+ power_idx += 9;
+
+ /* CCK, rate 32, reduce txpower for CCK */
+ if (i == POWER_TBL_CCK_ENTRY)
+ power_idx +=
+ IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+ /* stay within the table! */
+ if (power_idx > 107) {
+ IL_WARN("txpower idx %d > 107\n", power_idx);
+ power_idx = 107;
+ }
+ if (power_idx < 0) {
+ IL_WARN("txpower idx %d < 0\n", power_idx);
+ power_idx = 0;
+ }
+
+ /* fill txpower command for this rate/chain */
+ tx_power.s.radio_tx_gain[c] =
+ gain_table[band][power_idx].radio;
+ tx_power.s.dsp_predis_atten[c] =
+ gain_table[band][power_idx].dsp;
+
+ D_TXPOWER("chain %d mimo %d idx %d "
+ "gain 0x%02x dsp %d\n", c, atten_value,
+ power_idx, tx_power.s.radio_tx_gain[c],
+ tx_power.s.dsp_predis_atten[c]);
+ } /* for each chain */
+
+ tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+ } /* for each rate */
+
+ return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+ struct il4965_txpowertable_cmd cmd = { 0 };
+ int ret;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+ if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.channel = ctx->active.channel;
+
+ ret =
+ il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+ is_ht40, ctrl_chan_high, &cmd.tx_power);
+ if (ret)
+ goto out;
+
+ ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+ return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int ret = 0;
+ struct il4965_rxon_assoc_cmd rxon_assoc;
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates &&
+ rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates &&
+ rxon1->rx_chain == rxon2->rx_chain &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ ctx->staging.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ ctx->staging.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+ ret =
+ il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+ &rxon_assoc, NULL);
+
+ return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+ int ret;
+ bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!il_is_alive(il))
+ return -EBUSY;
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ ret = il_check_rxon_cmd(il, ctx);
+ if (ret) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+ il->switch_channel != ctx->staging.channel) {
+ D_11H("abort channel switch on %d\n",
+ le16_to_cpu(il->switch_channel));
+ il_chswitch_done(il, false);
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, ctx)) {
+ ret = il_send_rxon_assoc(il, ctx);
+ if (ret) {
+ IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_print_rx_config_cmd(il, ctx);
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated_ctx(ctx) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (ret) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+ return ret;
+ }
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+ il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+ /* Apply the new configuration
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
+ */
+ if (!new_assoc) {
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ D_INFO("Return from !new_assoc RXON.\n");
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+ if (new_assoc) {
+ il->start_calib = 0;
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ }
+ il_print_rx_config_cmd(il, ctx);
+
+ il4965_init_sensitivity(il);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ ret = il_set_tx_power(il, il->tx_power_next, true);
+ if (ret) {
+ IL_ERR("Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int rc;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il4965_channel_switch_cmd cmd;
+ const struct il_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+ if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.expect_beacon = 0;
+ ch = ch_switch->channel->hw_value;
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+ if (switch_count >
+ ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+ switch_count -=
+ (il->ucode_beacon_time - tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time =
+ il_usecs_to_beacons(il, switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time =
+ il_add_beacon_time(il, il->ucode_beacon_time,
+ ucode_switch_time, beacon_interval);
+ }
+ D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+ ch_info = il_get_channel_info(il, il->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = il_is_channel_radar(ch_info);
+ else {
+ IL_ERR("invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+ rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+ &cmd.tx_power);
+ if (rc) {
+ D_11H("error:%d fill txpower_tbl\n", rc);
+ return rc;
+ }
+
+ return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int write_ptr = txq->q.write_ptr;
+ int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ bc_ent = cpu_to_le16(len & 0xFFF);
+ /* Set up byte count within first 256 entries */
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ /* If within first 64 entries, duplicate at end */
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+ s32 temperature;
+ s32 vt;
+ s32 R1, R2, R3;
+ u32 R4;
+
+ if (test_bit(S_TEMPERATURE, &il->status) &&
+ (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+ D_TEMP("Running HT40 temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+ } else {
+ D_TEMP("Running temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+ }
+
+ /*
+ * Temperature is only 23 bits, so sign extend out to 32.
+ *
+ * NOTE If we haven't received a stats notification yet
+ * with an updated temperature, use R4 provided to us in the
+ * "initialize" ALIVE response.
+ */
+ if (!test_bit(S_TEMPERATURE, &il->status))
+ vt = sign_extend32(R4, 23);
+ else
+ vt = sign_extend32(le32_to_cpu
+ (il->_4965.stats.general.common.temperature),
+ 23);
+
+ D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+ if (R3 == R1) {
+ IL_ERR("Calibration conflict R1 == R3\n");
+ return -1;
+ }
+
+ /* Calculate temperature in degrees Kelvin, adjust by 97%.
+ * Add offset to center the adjustment around 0 degrees Centigrade. */
+ temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+ temperature /= (R3 - R1);
+ temperature =
+ (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+ D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+ KELVIN_TO_CELSIUS(temperature));
+
+ return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD 3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ if (!test_bit(S_STATS, &il->status)) {
+ D_TEMP("Temperature not updated -- no stats.\n");
+ return 0;
+ }
+
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Temperature unchanged\n");
+ else
+ D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+ if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+ D_POWER(" => thermal txpower calib not needed\n");
+ return 0;
+ }
+
+ D_POWER(" => thermal txpower calib needed\n");
+
+ return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+ s32 temp;
+
+ temp = il4965_hw_get_temperature(il);
+ if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+ return;
+
+ if (il->temperature != temp) {
+ if (il->temperature)
+ D_TEMP("Temperature changed " "from %dC to %dC\n",
+ KELVIN_TO_CELSIUS(il->temperature),
+ KELVIN_TO_CELSIUS(temp));
+ else
+ D_TEMP("Temperature " "initialized to %dC\n",
+ KELVIN_TO_CELSIUS(temp));
+ }
+
+ il->temperature = temp;
+ set_bit(S_TEMPERATURE, &il->status);
+
+ if (!il->disable_tx_power_cal &&
+ unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ il4965_is_temp_calib_needed(il))
+ queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return (u16) sizeof(struct il4965_rxon_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cmd->tid_disable_tx;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+ addsta->sleep_tx_count = cmd->sleep_tx_count;
+ addsta->reserved1 = cpu_to_le16(0);
+ addsta->reserved2 = cpu_to_le16(0);
+
+ return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+ return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+ struct il4965_tx_resp *tx_resp, int txq_id,
+ u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+ if (agg->wait_for_ba)
+ D_TX_REPLY("got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* num frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+ D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+ tx_resp->failure_frame);
+ D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx win */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_IDX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status &
+ (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (!hdr) {
+ IL_ERR("BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IL_ERR("BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n", idx,
+ SEQ_TO_SN(sc), hdr->seq_ctrl);
+ return -1;
+ }
+
+ D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+ SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+ (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+ int i;
+ int start = 0;
+ int ret = IL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+ start = IL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return il->ctx.bcast_sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = start; i < il->hw_params.max_stations; i++)
+ if (il->stations[i].used &&
+ (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+ ret = i;
+ goto out;
+ }
+
+ D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IL_INVALID_STATION &&
+ (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+ ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ IL_ERR("Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+ if (il->iw_mode == NL80211_IFTYPE_STATION) {
+ return IL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return il4965_find_station(il, da);
+ }
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info;
+ struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->u.status);
+ int uninitialized_var(tid);
+ int sta_id;
+ int freed;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ memset(&info->status, 0, sizeof(info->status));
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ sta_id = il4965_get_ra_sta_id(il, hdr);
+ if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+ IL_ERR("Station not known\n");
+ return;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (txq->sched_retry) {
+ const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+ struct il_ht_agg *agg = NULL;
+ WARN_ON(!qc);
+
+ agg = &il->stations[sta_id].tid[tid].agg;
+
+ il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) &&
+ !il4965_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+ "%d idx %d\n", scd_ssn, idx);
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc)
+ il4965_free_tfds_in_queue(il, sta_id, tid,
+ freed);
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+ }
+ } else {
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ D_TX_REPLY("TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n", txq_id,
+ il4965_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+ else if (sta_id == IL_INVALID_STATION)
+ D_TX_REPLY("Station not known\n");
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark)
+ il_wake_queue(il, txq);
+ }
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+ il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+ /* Legacy Rx frames */
+ il->handlers[N_RX] = il4965_hdl_rx;
+ /* Tx response */
+ il->handlers[C_TX] = il4965_hdl_tx;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+ .rxon_assoc = il4965_send_rxon_assoc,
+ .commit_rxon = il4965_commit_rxon,
+ .set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+
+ if (!vif || !il->is_open)
+ return;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ il_set_rxon_ht(il, &il->current_ht_config);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ vif->bss_conf.beacon_int);
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il_commit_rxon(il, ctx);
+
+ D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il4965_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ vif->type);
+ break;
+ }
+
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+ if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+ il_power_update_mode(il, false);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ il4965_chain_noise_reset(il);
+ il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!il_is_associated_ctx(ctx)) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing failed - "
+ "Attempting to continue.\n");
+
+ /* AP has all antennas */
+ il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+ il_set_rxon_ht(il, &il->current_ht_config);
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* need to send beacon cmd before committing assoc RXON! */
+ il4965_send_beacon_cmd(il);
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+ }
+ il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+ .get_hcmd_size = il4965_get_hcmd_size,
+ .build_addsta_hcmd = il4965_build_addsta_hcmd,
+ .request_scan = il4965_request_scan,
+ .post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+ .set_hw_params = il4965_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+ .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il4965_hw_txq_free_tfd,
+ .txq_init = il4965_hw_tx_queue_init,
+ .handler_setup = il4965_handler_setup,
+ .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+ .init_alive_start = il4965_init_alive_start,
+ .load_ucode = il4965_load_bsm,
+ .dump_nic_error_log = il4965_dump_nic_error_log,
+ .dump_fh = il4965_dump_fh,
+ .set_channel_switch = il4965_hw_channel_switch,
+ .apm_ops = {
+ .init = il_apm_init,
+ .config = il4965_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+ .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+ .release_semaphore = il4965_eeprom_release_semaphore,
+ },
+ .send_tx_power = il4965_send_tx_power,
+ .update_chain_flags = il4965_update_chain_flags,
+ .temp_ops = {
+ .temperature = il4965_temperature_calib,
+ },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il4965_ucode_rx_stats_read,
+ .tx_stats_read = il4965_ucode_tx_stats_read,
+ .general_stats_read = il4965_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+ .post_associate = il4965_post_associate,
+ .config_ap = il4965_config_ap,
+ .manage_ibss_station = il4965_manage_ibss_station,
+ .update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+ .tx = il4965_mac_tx,
+ .start = il4965_mac_start,
+ .stop = il4965_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il4965_configure_filter,
+ .set_key = il4965_mac_set_key,
+ .update_tkip_key = il4965_mac_update_tkip_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .ampdu_action = il4965_mac_ampdu_action,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il4965_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .channel_switch = il4965_mac_channel_switch,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+ .lib = &il4965_lib,
+ .hcmd = &il4965_hcmd,
+ .utils = &il4965_hcmd_utils,
+ .led = &il4965_led_ops,
+ .legacy = &il4965_legacy_ops,
+ .ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+ .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+ .num_of_queues = IL49_NUM_QUEUES,
+ .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = true,
+ .led_compensation = 61,
+ .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .temperature_kelvin = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
+ .fw_name_pre = IL4965_FW_PRE,
+ .ucode_api_max = IL4965_UCODE_API_MAX,
+ .ucode_api_min = IL4965_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_ABC,
+ .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+ .ops = &il4965_ops,
+ .mod_params = &il4965_mod_params,
+ .base_params = &il4965_base_params,
+ .led_mode = IL_LED_BLINK,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 00000000000..f280e0161b1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1301 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+ int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+ struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE 1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE 7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND (0x000000)
+#define IL49_RTC_INST_UPPER_BOUND (0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
+
+#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
+ IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
+ IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+ addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent). The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp). After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
+ * must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN (263)
+#define IL_TX_POWER_TEMPERATURE_MAX (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+ ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+ (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ * 1) EEPROM
+ * 2) "initialize" alive notification
+ * 3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1) Regulatory information (max txpower and channel usage flags) is provided
+ * separately for each channel that can possibly supported by 4965.
+ * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ * (legacy) channels.
+ *
+ * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ * for locations in EEPROM.
+ *
+ * 2) Factory txpower calibration information is provided separately for
+ * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
+ * but 5 GHz has several sub-bands.
+ *
+ * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ * See struct il4965_eeprom_calib_info (and the tree of structures
+ * contained within it) for format, and struct il4965_eeprom for
+ * locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1) Temperature calculation parameters.
+ *
+ * 2) Power supply voltage measurement.
+ *
+ * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1) Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ * Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * 2 transmitters will be used simultaneously; driver must reduce the
+ * regulatory limit by 3 dB (half-power) for each transmitter, so the
+ * combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
+ * reduce target txpower if necessary.
+ *
+ * Backoff values below are in 1/2 dB units (equivalent to steps in
+ * txpower gain tables):
+ *
+ * OFDM 6 - 36 MBit: 10 steps (5 dB)
+ * OFDM 48 MBit: 15 steps (7.5 dB)
+ * OFDM 54 MBit: 17 steps (8.5 dB)
+ * OFDM 60 MBit: 20 steps (10 dB)
+ * CCK all rates: 10 steps (5 dB)
+ *
+ * Backoff values apply to saturation txpower on a per-transmitter basis;
+ * when using MIMO (2 transmitters), each transmitter uses the same
+ * saturation level provided in EEPROM, and the same backoff values;
+ * no reduction (such as with regulatory txpower limits) is required.
+ *
+ * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ * factory measurement for ht40 channels.
+ *
+ * The result of this step is the final target txpower. The rest of
+ * the steps figure out the proper settings for the device to achieve
+ * that target txpower.
+ *
+ *
+ * 3) Determine (EEPROM) calibration sub band for the target channel, by
+ * comparing against first and last channels in each sub band
+ * (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
+ * referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ * Interpolation is based on difference between target channel's frequency
+ * and the sample channels' frequencies. Since channel numbers are based
+ * on frequency (5 MHz between each channel number), this is equivalent
+ * to interpolating based on channel number differences.
+ *
+ * Note that the sample channels may or may not be the channels at the
+ * edges of the sub band. The target channel may be "outside" of the
+ * span of the sampled channels.
+ *
+ * Driver may choose the pair (for 2 Tx chains) of measurements (see
+ * struct il4965_eeprom_calib_ch_info) for which the actual measured
+ * txpower comes closest to the desired txpower. Usually, though,
+ * the middle set of measurements is closest to the regulatory limits,
+ * and is therefore a good choice for all txpower calculations (this
+ * assumes that high accuracy is needed for maximizing legal txpower,
+ * while lower txpower configurations do not need as much accuracy).
+ *
+ * Driver should interpolate both members of the chosen measurement pair,
+ * i.e. for both Tx chains (radio transmitters), unless the driver knows
+ * that only one of the chains will be used (e.g. only one tx antenna
+ * connected, but this should be unusual). The rate scaling algorithm
+ * switches antennas to find best performance, so both Tx chains will
+ * be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ * Driver should interpolate factory values for temperature, gain table
+ * idx, and actual power. The power amplifier detector values are
+ * not used by the driver.
+ *
+ * Sanity check: If the target channel happens to be one of the sample
+ * channels, the results should agree with the sample channel's
+ * measurements!
+ *
+ *
+ * 5) Find difference between desired txpower and (interpolated)
+ * factory-measured txpower. Using (interpolated) factory gain table idx
+ * (shown elsewhere) as a starting point, adjust this idx lower to
+ * increase txpower, or higher to decrease txpower, until the target
+ * txpower is reached. Each step in the gain table is 1/2 dB.
+ *
+ * For example, if factory measured txpower is 16 dBm, and target txpower
+ * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ * by 3 dB.
+ *
+ *
+ * 6) Find difference between current device temperature and (interpolated)
+ * factory-measured temperature for sub-band. Factory values are in
+ * degrees Celsius. To calculate current temperature, see comments for
+ * "4965 temperature calculation".
+ *
+ * If current temperature is higher than factory temperature, driver must
+ * increase gain (lower gain table idx), and vice verse.
+ *
+ * Temperature affects gain differently for different channels:
+ *
+ * 2.4 GHz all channels: 3.5 degrees per half-dB step
+ * 5 GHz channels 34-43: 4.5 degrees per half-dB step
+ * 5 GHz channels >= 44: 4.0 degrees per half-dB step
+ *
+ * NOTE: Temperature can increase rapidly when transmitting, especially
+ * with heavy traffic at high txpowers. Driver should update
+ * temperature calculations often under these conditions to
+ * maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7) Find difference between current power supply voltage indicator
+ * (from "initialize alive") and factory-measured power supply voltage
+ * indicator (EEPROM).
+ *
+ * If the current voltage is higher (indicator is lower) than factory
+ * voltage, gain should be reduced (gain table idx increased) by:
+ *
+ * (eeprom - current) / 7
+ *
+ * If the current voltage is lower (indicator is higher) than factory
+ * voltage, gain should be increased (gain table idx decreased) by:
+ *
+ * 2 * (current - eeprom) / 7
+ *
+ * If number of idx steps in either direction turns out to be > 2,
+ * something is wrong ... just use 0.
+ *
+ * NOTE: Voltage compensation is independent of band/channel.
+ *
+ * NOTE: "Initialize" uCode measures current voltage, which is assumed
+ * to be constant after this initial measurement. Voltage
+ * compensation for txpower (number of steps in gain table)
+ * may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * adjust txpower for each transmitter chain, so txpower is balanced
+ * between the two chains. There are 5 pairs of tx_atten[group][chain]
+ * values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ * Group 0: 5 GHz channel 34-43
+ * Group 1: 5 GHz channel 44-70
+ * Group 2: 5 GHz channel 71-124
+ * Group 3: 5 GHz channel 125-200
+ * Group 4: 2.4 GHz all channels
+ *
+ * Add the tx_atten[group][chain] value to the idx for the target chain.
+ * The values are signed, but are in pairs of 0 and a non-negative number,
+ * so as to reduce gain (if necessary) of the "hotter" channel. This
+ * avoids any need to double-check for regulatory compliance after
+ * this step.
+ *
+ *
+ * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ * value to the idx:
+ *
+ * Hardware rev B: 9 steps (4.5 dB)
+ * Hardware rev C: 5 steps (2.5 dB)
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ *
+ * NOTE: This compensation is in addition to any saturation backoff that
+ * might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ * Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ * location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device. That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB. Note that these
+ * are *relative* steps, not indications of absolute output power. Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower. This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration). A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * 0 110 0x3f (highest gain)
+ * 1 104 0x3f
+ * 2 98 0x3f
+ * 3 110 0x3e
+ * 4 104 0x3e
+ * 5 98 0x3e
+ * 6 110 0x3d
+ * 7 104 0x3d
+ * 8 98 0x3d
+ * 9 110 0x3c
+ * 10 104 0x3c
+ * 11 98 0x3c
+ * 12 110 0x3b
+ * 13 104 0x3b
+ * 14 98 0x3b
+ * 15 110 0x3a
+ * 16 104 0x3a
+ * 17 98 0x3a
+ * 18 110 0x39
+ * 19 104 0x39
+ * 20 98 0x39
+ * 21 110 0x38
+ * 22 104 0x38
+ * 23 98 0x38
+ * 24 110 0x37
+ * 25 104 0x37
+ * 26 98 0x37
+ * 27 110 0x36
+ * 28 104 0x36
+ * 29 98 0x36
+ * 30 110 0x35
+ * 31 104 0x35
+ * 32 98 0x35
+ * 33 110 0x34
+ * 34 104 0x34
+ * 35 98 0x34
+ * 36 110 0x33
+ * 37 104 0x33
+ * 38 98 0x33
+ * 39 110 0x32
+ * 40 104 0x32
+ * 41 98 0x32
+ * 42 110 0x31
+ * 43 104 0x31
+ * 44 98 0x31
+ * 45 110 0x30
+ * 46 104 0x30
+ * 47 98 0x30
+ * 48 110 0x6
+ * 49 104 0x6
+ * 50 98 0x6
+ * 51 110 0x5
+ * 52 104 0x5
+ * 53 98 0x5
+ * 54 110 0x4
+ * 55 104 0x4
+ * 56 98 0x4
+ * 57 110 0x3
+ * 58 104 0x3
+ * 59 98 0x3
+ * 60 110 0x2
+ * 61 104 0x2
+ * 62 98 0x2
+ * 63 110 0x1
+ * 64 104 0x1
+ * 65 98 0x1
+ * 66 110 0x0
+ * 67 104 0x0
+ * 68 98 0x0
+ * 69 97 0
+ * 70 96 0
+ * 71 95 0
+ * 72 94 0
+ * 73 93 0
+ * 74 92 0
+ * 75 91 0
+ * 76 90 0
+ * 77 89 0
+ * 78 88 0
+ * 79 87 0
+ * 80 86 0
+ * 81 85 0
+ * 82 84 0
+ * 83 83 0
+ * 84 82 0
+ * 85 81 0
+ * 86 80 0
+ * 87 79 0
+ * 88 78 0
+ * 89 77 0
+ * 90 76 0
+ * 91 75 0
+ * 92 74 0
+ * 93 73 0
+ * 94 72 0
+ * 95 71 0
+ * 96 70 0
+ * 97 69 0
+ * 98 68 0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * -9 123 0x3F (highest gain)
+ * -8 117 0x3F
+ * -7 110 0x3F
+ * -6 104 0x3F
+ * -5 98 0x3F
+ * -4 110 0x3E
+ * -3 104 0x3E
+ * -2 98 0x3E
+ * -1 110 0x3D
+ * 0 104 0x3D
+ * 1 98 0x3D
+ * 2 110 0x3C
+ * 3 104 0x3C
+ * 4 98 0x3C
+ * 5 110 0x3B
+ * 6 104 0x3B
+ * 7 98 0x3B
+ * 8 110 0x3A
+ * 9 104 0x3A
+ * 10 98 0x3A
+ * 11 110 0x39
+ * 12 104 0x39
+ * 13 98 0x39
+ * 14 110 0x38
+ * 15 104 0x38
+ * 16 98 0x38
+ * 17 110 0x37
+ * 18 104 0x37
+ * 19 98 0x37
+ * 20 110 0x36
+ * 21 104 0x36
+ * 22 98 0x36
+ * 23 110 0x35
+ * 24 104 0x35
+ * 25 98 0x35
+ * 26 110 0x34
+ * 27 104 0x34
+ * 28 98 0x34
+ * 29 110 0x33
+ * 30 104 0x33
+ * 31 98 0x33
+ * 32 110 0x32
+ * 33 104 0x32
+ * 34 98 0x32
+ * 35 110 0x31
+ * 36 104 0x31
+ * 37 98 0x31
+ * 38 110 0x30
+ * 39 104 0x30
+ * 40 98 0x30
+ * 41 110 0x25
+ * 42 104 0x25
+ * 43 98 0x25
+ * 44 110 0x24
+ * 45 104 0x24
+ * 46 98 0x24
+ * 47 110 0x23
+ * 48 104 0x23
+ * 49 98 0x23
+ * 50 110 0x22
+ * 51 104 0x18
+ * 52 98 0x18
+ * 53 110 0x17
+ * 54 104 0x17
+ * 55 98 0x17
+ * 56 110 0x16
+ * 57 104 0x16
+ * 58 98 0x16
+ * 59 110 0x15
+ * 60 104 0x15
+ * 61 98 0x15
+ * 62 110 0x14
+ * 63 104 0x14
+ * 64 98 0x14
+ * 65 110 0x13
+ * 66 104 0x13
+ * 67 98 0x13
+ * 68 110 0x12
+ * 69 104 0x08
+ * 70 98 0x08
+ * 71 110 0x07
+ * 72 104 0x07
+ * 73 98 0x07
+ * 74 110 0x06
+ * 75 104 0x06
+ * 76 98 0x06
+ * 77 110 0x05
+ * 78 104 0x05
+ * 79 98 0x05
+ * 80 110 0x04
+ * 81 104 0x04
+ * 82 98 0x04
+ * 83 110 0x03
+ * 84 104 0x03
+ * 85 98 0x03
+ * 86 110 0x02
+ * 87 104 0x02
+ * 88 98 0x02
+ * 89 110 0x01
+ * 90 104 0x01
+ * 91 98 0x01
+ * 92 110 0x00
+ * 93 104 0x00
+ * 94 98 0x00
+ * 95 93 0x00
+ * 96 88 0x00
+ * 97 83 0x00
+ * 98 78 0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated. These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
+#define IL_TX_POWER_REGULATORY_MIN (0)
+#define IL_TX_POWER_REGULATORY_MAX (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion. This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
+#define IL_TX_POWER_SATURATION_MIN (20)
+#define IL_TX_POWER_SATURATION_MAX (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain. Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below. If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+ CALIB_CH_GROUP_1 = 0,
+ CALIB_CH_GROUP_2 = 1,
+ CALIB_CH_GROUP_3 = 2,
+ CALIB_CH_GROUP_4 = 3,
+ CALIB_CH_GROUP_5 = 4,
+ CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues). All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device. When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS 7
+#define IL49_CMD_FIFO_NUM 4
+#define IL49_NUM_QUEUES 16
+#define IL49_NUM_AMPDU_QUEUES 8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+ u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IL4965_DEFAULT_TX_RETRY 15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE 10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND (0x1000)
+#define FH49_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the idx of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct il4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD. The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx. For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL (9)
+#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd4766..05bd375cb84 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
tristate
select FW_LOADER
select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
select MAC80211_LEDS
menu "Debugging Options"
- depends on IWLWIFI_LEGACY
+ depends on IWLEGACY
-config IWLWIFI_LEGACY_DEBUG
- bool "Enable full debugging output in 4965 and 3945 drivers"
- depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+ bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+ depends on IWLEGACY
---help---
- This option will enable debug tracing output for the iwlwifilegacy
+ This option will enable debug tracing output for the iwlegacy
drivers.
This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
% echo 0x43fff > /sys/class/net/wlan0/device/debug_level
You can find the list of debug mask values in:
- drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+ drivers/net/wireless/iwlegacy/common.h
If this is your first time using this driver, you should say Y here
as the debug information can assist others in helping you resolve
any problems you may encounter.
-config IWLWIFI_LEGACY_DEBUGFS
- bool "4965 and 3945 debugfs support"
- depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+ bool "iwlegacy (iwl 3945/4965) debugfs support"
+ depends on IWLEGACY && MAC80211_DEBUGFS
---help---
- Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ Enable creation of debugfs files for the iwlegacy drivers. This
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_LEGACY_DEVICE_TRACING
- bool "iwlwifilegacy legacy device access tracing"
- depends on IWLWIFI_LEGACY
- depends on EVENT_TRACING
- help
- Say Y here to trace all commands, including TX frames and IO
- accesses, sent to the device. If you say yes, iwlwifilegacy will
- register with the ftrace framework for event tracing and dump
- all this information to the ringbuffer, you may need to
- increase the ringbuffer size. See the ftrace documentation
- for more information.
-
- When tracing is not enabled, this option still has some
- (though rather small) overhead.
-
- If unsure, say Y so we can help you better when problems
- occur.
endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
This option enables support for
@@ -93,7 +76,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c21..c985a01a073 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
-iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs += iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY) += iwlegacy.o
+iwlegacy-objs := common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
# 4965
obj-$(CONFIG_IWL4965) += iwl4965.o
-iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473..25dd7d28d02 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
-struct iwl_priv;
+#include <linux/ieee80211.h>
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+struct il_priv;
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES 4
+#define IL_OFDM_RATES 8
+#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
enum {
- REPLY_ALIVE = 0x1,
- REPLY_ERROR = 0x2,
+ N_ALIVE = 0x1,
+ N_ERROR = 0x2,
/* RXON and QOS commands */
- REPLY_RXON = 0x10,
- REPLY_RXON_ASSOC = 0x11,
- REPLY_QOS_PARAM = 0x13,
- REPLY_RXON_TIMING = 0x14,
+ C_RXON = 0x10,
+ C_RXON_ASSOC = 0x11,
+ C_QOS_PARAM = 0x13,
+ C_RXON_TIMING = 0x14,
/* Multi-Station support */
- REPLY_ADD_STA = 0x18,
- REPLY_REMOVE_STA = 0x19,
+ C_ADD_STA = 0x18,
+ C_REM_STA = 0x19,
/* Security */
- REPLY_WEPKEY = 0x20,
+ C_WEPKEY = 0x20,
/* RX, TX, LEDs */
- REPLY_3945_RX = 0x1b, /* 3945 only */
- REPLY_TX = 0x1c,
- REPLY_RATE_SCALE = 0x47, /* 3945 only */
- REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ N_3945_RX = 0x1b, /* 3945 only */
+ C_TX = 0x1c,
+ C_RATE_SCALE = 0x47, /* 3945 only */
+ C_LEDS = 0x48,
+ C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
/* 802.11h related */
- REPLY_CHANNEL_SWITCH = 0x72,
- CHANNEL_SWITCH_NOTIFICATION = 0x73,
- REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
- SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+ C_CHANNEL_SWITCH = 0x72,
+ N_CHANNEL_SWITCH = 0x73,
+ C_SPECTRUM_MEASUREMENT = 0x74,
+ N_SPECTRUM_MEASUREMENT = 0x75,
/* Power Management */
- POWER_TABLE_CMD = 0x77,
- PM_SLEEP_NOTIFICATION = 0x7A,
- PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+ C_POWER_TBL = 0x77,
+ N_PM_SLEEP = 0x7A,
+ N_PM_DEBUG_STATS = 0x7B,
/* Scan commands and notifications */
- REPLY_SCAN_CMD = 0x80,
- REPLY_SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ C_SCAN = 0x80,
+ C_SCAN_ABORT = 0x81,
+ N_SCAN_START = 0x82,
+ N_SCAN_RESULTS = 0x83,
+ N_SCAN_COMPLETE = 0x84,
/* IBSS/AP commands */
- BEACON_NOTIFICATION = 0x90,
- REPLY_TX_BEACON = 0x91,
+ N_BEACON = 0x90,
+ C_TX_BEACON = 0x91,
/* Miscellaneous commands */
- REPLY_TX_PWR_TABLE_CMD = 0x97,
+ C_TX_PWR_TBL = 0x97,
/* Bluetooth device coexistence config command */
- REPLY_BT_CONFIG = 0x9b,
+ C_BT_CONFIG = 0x9b,
/* Statistics */
- REPLY_STATISTICS_CMD = 0x9c,
- STATISTICS_NOTIFICATION = 0x9d,
+ C_STATS = 0x9c,
+ N_STATS = 0x9d,
/* RF-KILL commands and notifications */
- CARD_STATE_NOTIFICATION = 0xa1,
+ N_CARD_STATE = 0xa1,
/* Missed beacons notification */
- MISSED_BEACONS_NOTIFICATION = 0xa2,
+ N_MISSED_BEACONS = 0xa2,
- REPLY_CT_KILL_CONFIG_CMD = 0xa4,
- SENSITIVITY_CMD = 0xa8,
- REPLY_PHY_CALIBRATION_CMD = 0xb0,
- REPLY_RX_PHY_CMD = 0xc0,
- REPLY_RX_MPDU_CMD = 0xc1,
- REPLY_RX = 0xc3,
- REPLY_COMPRESSED_BA = 0xc5,
+ C_CT_KILL_CONFIG = 0xa4,
+ C_SENSITIVITY = 0xa8,
+ C_PHY_CALIBRATION = 0xb0,
+ N_RX_PHY = 0xc0,
+ N_RX_MPDU = 0xc1,
+ N_RX = 0xc3,
+ N_COMPRESSED_BA = 0xc5,
- REPLY_MAX = 0xff
+ IL_CN_MAX = 0xff
};
/******************************************************************************
@@ -163,25 +159,25 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_TO_IDX(s) ((s) & 0xff)
+#define IDX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
/**
- * struct iwl_cmd_header
+ * struct il_cmd_header
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
*/
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+ u8 cmd; /* Command ID: C_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
* There is one exception: uCode sets bit 15 when it originates
* the response/notification, i.e. when the response/notification
* is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * example, uCode issues N_3945_RX when it sends a received frame
* to the driver; it is not a direct response to any driver command.
*
* The Linux driver uses the following format:
*
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13 reserved
- * 14 huge - driver sets this to indicate command is in the
- * 'huge' storage at the end of the command buffers
- * 15 unsolicited RX or uCode-originated notification
- */
+ * 0:7 tfd idx - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
__le16 sequence;
/* command or response/notification data follows immediately */
u8 data[0];
} __packed;
-
/**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
*
* Each entry contains two values:
* 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
* 2) Radio gain. This sets the analog gain of the radio Tx path.
* It is a coarser setting, and behaves in a logarithmic (dB) fashion.
*
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
*/
-struct iwl3945_tx_power {
+struct il3945_tx_power {
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
} __packed;
/**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
u8 rate; /* plcp */
- struct iwl3945_tx_power tpc;
+ struct il3945_tx_power tpc;
u8 reserved;
} __packed;
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
* iwl4965 rate_n_flags bit fields
*
* rate_n_flags format is used in following iwl4965 commands:
- * REPLY_RX (response only)
- * REPLY_RX_MPDU (response only)
- * REPLY_TX (both command and response)
- * REPLY_TX_LINK_QUALITY_CMD
+ * N_RX (response only)
+ * N_RX_MPDU (response only)
+ * C_TX (both command and response)
+ * C_TX_LINK_QUALITY_CMD
*
* High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
* 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
#define RATE_ANT_NUM 3
-#define POWER_TABLE_NUM_ENTRIES 33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
-#define POWER_TABLE_CCK_ENTRY 32
+#define POWER_TBL_NUM_ENTRIES 33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TBL_CCK_ENTRY 32
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
-#define IWL_PWR_CCK_ENTRIES 2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IL_PWR_CCK_ENTRIES 2
/**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
*
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
* Use __le32 version (struct tx_power_dual_stream) when building command.
*
* Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
* For MIMO rates, one value may be different from the other,
* in order to balance the Tx output between the two transmitters.
*
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
*/
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
struct {
u8 radio_tx_gain[2];
u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
/**
* struct tx_power_dual_stream
*
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
*/
struct tx_power_dual_stream {
__le32 dw;
} __packed;
/**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
*
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl4965_tx_power_db {
- struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
} __packed;
/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
#define INITIALIZE_SUBTYPE (9)
/*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "initialize alive" notification once the initialization
* uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
* 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
* for each of 5 frequency ranges.
*/
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
* 2 Tx chains */
} __packed;
-
/**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "alive" notification once the runtime image is ready
* to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
* __le32 log_size; log capacity (in number of entries)
* __le32 type; (1) timestamp with each entry, (0) no timestamp
* __le32 wraps; # times uCode has wrapped to top of circular buffer
- * __le32 write_index; next circular buffer entry that uCode would fill
+ * __le32 write_idx; next circular buffer entry that uCode would fill
*
* The header is followed by the circular buffer of log entries. Entries
* with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
*/
-struct iwl_alive_resp {
+struct il_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
u8 sw_rev[8];
u8 ver_type;
- u8 ver_subtype; /* not "9" for runtime alive */
+ u8 ver_subtype; /* not "9" for runtime alive */
__le16 reserved2;
__le32 log_event_table_ptr; /* SRAM address for event log */
__le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
} __packed;
/*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
*/
-struct iwl_error_resp {
+struct il_error_resp {
__le32 error_type;
u8 cmd_id;
u8 reserved1;
@@ -554,7 +548,6 @@ enum {
RXON_DEV_TYPE_SNIFFER = 6,
};
-
#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
* (according to ON_AIR deassertion) */
#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
-
/* HT flags */
#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
/**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
*
* RXON tunes the radio tuner to a service channel, and sets up a number
* of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
* channel.
*
* NOTE: All RXONs wipe clean the internal txpower table. Driver must
- * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
* regardless of whether RXON_FILTER_ASSOC_MSK is set.
*/
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
__le16 reserved5;
} __packed;
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
/* Create a common rxon cmd which will be typecast into the 3945 or 4965
* specific rxon cmd, depending on where it is called from.
*/
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
u8 reserved5;
} __packed;
-
/*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
*/
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL 10
+#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
*/
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
__le64 timestamp;
__le16 beacon_interval;
- __le16 atim_window;
+ __le16 atim_win;
__le32 beacon_init_val;
__le16 listen_interval;
u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
} __packed;
/*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
*/
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
/*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
*/
-struct iwl_csa_notification {
+struct il_csa_notification {
__le16 band;
__le16 channel;
__le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
*****************************************************************************/
/**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
*
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x3f.
* @aifsn: Number of slots in Arbitration Interframe Space (before
* performing random backoff timing prior to Tx). Device default 1.
* @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
*
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
* transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
* value, to cap the CW value.
*/
-struct iwl_ac_qos {
+struct il_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
#define AC_NUM 4
/*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
*
* This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
* 0: Background, 1: Best Effort, 2: Video, 3: Voice.
*/
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
__le32 qos_flags;
- struct iwl_ac_qos ac[AC_NUM];
+ struct il_ac_qos ac[AC_NUM];
} __packed;
/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
*/
/* Special, dedicated locations within device's station table */
-#define IWL_AP_ID 0
-#define IWL_STA_ID 2
-#define IWL3945_BROADCAST_ID 24
-#define IWL3945_STATION_COUNT 25
-#define IWL4965_BROADCAST_ID 31
-#define IWL4965_STATION_COUNT 32
+#define IL_AP_ID 0
+#define IL_STA_ID 2
+#define IL3945_BROADCAST_ID 24
+#define IL3945_STATION_COUNT 25
+#define IL4965_BROADCAST_ID 31
+#define IL4965_STATION_COUNT 32
-#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
-#define IWL_INVALID_STATION 255
+#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
+#define IL_INVALID_STATION 255
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
#define STA_MODIFY_DELBA_TID_MSK 0x10
#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
/**
* struct sta_id_modify
* @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
* @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
*
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
*
* modify_mask flags select which parameters to modify vs. leave alone.
*/
@@ -936,15 +927,15 @@ struct sta_id_modify {
} __packed;
/*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
*
* The device contains an internal table of per-station information,
* with info on security keys, aggregation parameters, and Tx rates for
* initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
*
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
* a new entry, or modifying a pre-existing one.
*
* NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
* their own txpower/rate setup data).
*
* When getting started on a new channel, driver must set up the
- * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * IL_BROADCAST_ID entry (last entry in the table). For a client
* station in a BSS, once an AP is selected, driver sets up the AP STA
- * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
* are all that are needed for a BSS client station. If the device is
* used as AP, or in an IBSS network, driver must set up station table
- * entries for all STAs in network, starting with index IWL_STA_ID.
+ * entries for all STAs in network, starting with idx IL_STA_ID.
*/
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
__le16 add_immediate_ba_ssn;
} __packed;
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 reserved1;
+ __le16 reserved1;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
} __packed;
/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 rate_n_flags; /* 3945 only */
+ __le16 rate_n_flags; /* 3945 only */
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
__le16 reserved2;
} __packed;
-
#define ADD_STA_SUCCESS_MSK 0x1
-#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_ROOM_IN_TBL 0x2
#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
/*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
*/
-struct iwl_add_sta_resp {
- u8 status; /* ADD_STA_* */
+struct il_add_sta_resp {
+ u8 status; /* ADD_STA_* */
} __packed;
#define REM_STA_SUCCESS_MSK 0x1
/*
- * REPLY_REM_STA = 0x19 (response)
+ * C_REM_STA = 0x19 (response)
*/
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
u8 status;
} __packed;
/*
- * REPLY_REM_STA = 0x19 (command)
+ * C_REM_STA = 0x19 (command)
*/
-struct iwl_rem_sta_cmd {
- u8 num_sta; /* number of removed stations */
+struct il_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
u8 reserved[3];
- u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
u8 reserved2[2];
} __packed;
-#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_SELECTED 1
-#define IWL_DROP_ALL 2
+#define IL_DROP_SINGLE 0
+#define IL_DROP_SELECTED 1
+#define IL_DROP_ALL 2
/*
* REPLY_WEP_KEY = 0x20
*/
-struct iwl_wep_key {
- u8 key_index;
+struct il_wep_key {
+ u8 key_idx;
u8 key_offset;
u8 reserved1[2];
u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
u8 key[16];
} __packed;
-struct iwl_wep_cmd {
+struct il_wep_cmd {
u8 num_keys;
u8 global_key_type;
u8 flags;
u8 reserved;
- struct iwl_wep_key key[0];
+ struct il_wep_key key[0];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
u8 phy_count;
u8 id;
u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
__le16 channel;
__le16 phy_flags;
u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
__le32 status;
__le64 timestamp;
__le32 beacon_timestamp;
} __packed;
/*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
*
* NOTE: DO NOT dereference from casts to this structure
* It is provided only for calculating minimum data set size.
* The actual offsets of the hdr and end are dynamic based on
* stats.phy_count
*/
-struct iwl3945_rx_frame {
- struct iwl3945_rx_frame_stats stats;
- struct iwl3945_rx_frame_hdr hdr;
- struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+ struct il3945_rx_frame_stats stats;
+ struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_end end;
} __packed;
-#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
/* Fixed (non-configurable) rx data from phy */
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
-#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
-#define IWL49_AGC_DB_POS (7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IL49_AGC_DB_POS (7)
+struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
} __packed;
-
/*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
* Used only for legacy (non 11n) frames.
*/
-struct iwl_rx_phy_res {
- u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
u8 stat_id; /* configurable DSP phy data set ID */
u8 reserved1;
__le64 timestamp; /* TSF at on air rise */
- __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
__le16 phy_flags; /* general phy flags: band, modulation, ... */
__le16 channel; /* channel number */
- u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 frame_time; /* frame's time on the air */
} __packed;
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
-
/******************************************************************************
* (5)
* Tx Commands & Responses:
*
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
* queues in host DRAM, shared between driver and device (see comments for
* SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
* are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
* uCode handles all timing and protocol related to control frames
* (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
* handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
*
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
* This command must be executed after every RXON command, before Tx can occur.
*****************************************************************************/
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
/*
* 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
/* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- * Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_idx indicates first rate to try;
* uCode walks through table for additional Tx attempts.
* 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
* This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
* Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
/* HCCA-AP - disable duration overwriting. */
#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
-
/*
* TX command security control
*/
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
#define TKIP_ICV_LEN 4
/*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
*/
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
} __packed;
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*/
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
u8 failure_rts;
u8 failure_frame;
u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
__le32 status; /* TX status */
} __packed;
-
/*
* 4965 uCode updates these Tx attempt count values in host DRAM.
* Used for managing Tx retries when expecting block-acks.
* Driver should set these fields to 0.
*/
-struct iwl_dram_scratch {
+struct il_dram_scratch {
u8 try_cnt; /* Tx attempts */
u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
__le16 reserved;
} __packed;
-struct iwl_tx_cmd {
+struct il_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
+ struct il_dram_scratch scratch;
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
u8 sec_ctl; /* TX_CMD_SEC_* */
/*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
- u8 initial_rate_index;
+ u8 initial_rate_idx;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
};
enum {
- TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
TX_STATUS_DELAY_MSK = 0x00000040,
TX_STATUS_ABORT_MSK = 0x00000080,
TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
- TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
@@ -1671,7 +1656,7 @@ enum {
#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*
* This response may be in one of two slightly different formats, indicated
* by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
__le16 sequence;
} __packed;
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
u8 frame_count; /* 1 no aggregation, >1 aggregation */
u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
} u;
} __packed;
/*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
*
* Reports Block-Acknowledge from recipient station
*/
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
__le32 sta_addr_lo32;
__le16 sta_addr_hi16;
__le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
} __packed;
/*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
*
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
*/
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
-
/**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
*
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
*
* NOTE: The table of rates passed to the uCode via the
* RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
*
* For example, if you set 9MB (PLCP 0x0f) as the first
* rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
* command would be bit 0 (1 << 0)
*/
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
__le16 rate_n_flags;
u8 try_cnt;
- u8 next_rate_index;
+ u8 next_rate_idx;
} __packed;
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
u8 table_id;
u8 reserved[3];
- struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+ struct il3945_rate_scaling_info table[IL_MAX_RATES];
} __packed;
-
/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
#define LINK_QUAL_ANT_B_MSK (1 << 1)
#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
-
/**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
u8 flags;
- /* No entries at or above this (driver chosen) index contain MIMO */
+ /* No entries at or above this (driver chosen) idx contain MIMO */
u8 mimo_delimiter;
/* Best single antenna to use for single stream (legacy, SISO). */
u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
/* Best antennas to use for MIMO (unused for 4965, assumes both). */
- u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
/*
* If driver needs to use different initial rates for different
* EDCA QOS access categories (as implemented by tx fifos 0-3),
- * this table will set that up, by indicating the indexes in the
+ * this table will set that up, by indicating the idxes in the
* rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
* Otherwise, driver should set all entries to 0.
*
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
* 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
* TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
*/
- u8 start_rate_index[LINK_QUAL_AC_NUM];
+ u8 start_rate_idx[LINK_QUAL_AC_NUM];
} __packed;
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
/**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
/*
*Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
} __packed;
/*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
*
* Each station in the 4965 device's internal station table has its own table
* of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
* one station.
*
* NOTE: Station must already be in 4965 device's station table.
- * Use REPLY_ADD_STA.
+ * Use C_ADD_STA.
*
* The rate scaling procedures described below work well. Of course, other
* procedures are possible, and may work better for particular environments.
*
*
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
*
* Given a particular initial rate and mode, as determined by the rate
* scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
* speculative mode as the new current active mode.
*
* Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate. The data
+ * sliding win of the 62 most recent tx attempts at that rate. The data
* includes a shifting bitmap of success(1)/failure(0), and sums of successful
* and attempted frames, from which the driver can additionally calculate a
* success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
* The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
*
* When the 4965 device makes multiple tx attempts for a given frame, each
* attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
*
* When using block-ack (aggregation), all frames are transmitted at the same
* rate, since there is no per-attempt acknowledgment from the destination
- * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station. The Tx response struct il_tx_resp indicates the Tx rate in
* rate_n_flags field. After receiving a block-ack, the driver can update
* history for the entire block all at once.
*
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
* good performance; higher rate is sure to have poorer success.
*
* 6) Re-evaluate the rate after each tx frame. If working with block-
- * acknowledge, history and statistics may be calculated for the entire
- * block (including prior history that fits within the history windows),
+ * acknowledge, history and stats may be calculated for the entire
+ * block (including prior history that fits within the history wins),
* before re-evaluation.
*
* FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
* legacy), and then repeat the search process.
*
*/
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
/* Index of destination/recipient station in uCode's station table */
u8 sta_id;
u8 reserved1;
__le16 control; /* not used */
- struct iwl_link_qual_general_params general_params;
- struct iwl_link_qual_agg_params agg_params;
+ struct il_link_qual_general_params general_params;
+ struct il_link_qual_agg_params agg_params;
/*
- * Rate info; when using rate-scaling, Tx command's initial_rate_index
- * specifies 1st Tx rate attempted, via index into this table.
+ * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+ * specifies 1st Tx rate attempted, via idx into this table.
* 4965 devices works its way through table when retrying Tx.
*/
struct {
- __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
} rs_table[LINK_QUAL_MAX_RETRY_NUM];
__le32 reserved2;
} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
#define BT_MAX_KILL_DEF (0x5)
/*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
*
* 3945 and 4965 devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
-struct iwl_bt_cmd {
+struct il_bt_cmd {
u8 flags;
u8 lead_time;
u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
__le32 kill_cts_mask;
} __packed;
-
/******************************************************************************
* (6)
* Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
RXON_FILTER_ASSOC_MSK | \
RXON_FILTER_BCON_AWARE_MSK)
-struct iwl_measure_channel {
+struct il_measure_channel {
__le32 duration; /* measurement duration in extended beacon
* format */
u8 channel; /* channel to measure */
- u8 type; /* see enum iwl_measure_type */
+ u8 type; /* see enum il_measure_type */
__le16 reserved;
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
*/
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
__le16 len; /* number of bytes starting from token */
u8 token; /* token id */
u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
__le32 filter_flags; /* rxon filter flags */
__le16 channel_count; /* minimum 1, maximum 10 */
__le16 reserved3;
- struct iwl_measure_channel channels[10];
+ struct il_measure_channel channels[10];
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
*/
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
u8 token;
u8 id; /* id of the prior command replaced, or 0xff */
__le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
* measurement) */
} __packed;
-enum iwl_measurement_state {
- IWL_MEASUREMENT_START = 0,
- IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+ IL_MEASUREMENT_START = 0,
+ IL_MEASUREMENT_STOP = 1,
};
-enum iwl_measurement_status {
- IWL_MEASUREMENT_OK = 0,
- IWL_MEASUREMENT_CONCURRENT = 1,
- IWL_MEASUREMENT_CSA_CONFLICT = 2,
- IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+ IL_MEASUREMENT_OK = 0,
+ IL_MEASUREMENT_CONCURRENT = 1,
+ IL_MEASUREMENT_CSA_CONFLICT = 2,
+ IL_MEASUREMENT_TGH_CONFLICT = 3,
/* 4-5 reserved */
- IWL_MEASUREMENT_STOPPED = 6,
- IWL_MEASUREMENT_TIMEOUT = 7,
- IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+ IL_MEASUREMENT_STOPPED = 6,
+ IL_MEASUREMENT_TIMEOUT = 7,
+ IL_MEASUREMENT_PERIODIC_FAILED = 8,
};
#define NUM_ELEMENTS_IN_HISTOGRAM 8
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
} __packed;
/* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
__le32 ofdm;
__le32 cck;
} __packed;
-enum iwl_measure_type {
- IWL_MEASURE_BASIC = (1 << 0),
- IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
- IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
- IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
- IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+ IL_MEASURE_BASIC = (1 << 0),
+ IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IL_MEASURE_FRAME = (1 << 4),
/* bits 5:6 are reserved */
- IWL_MEASURE_IDLE = (1 << 7),
+ IL_MEASURE_IDLE = (1 << 7),
};
/*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
*/
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
u8 id; /* measurement id -- 0 or 1 */
u8 token;
- u8 channel_index; /* index in measurement channel list */
+ u8 channel_idx; /* idx in measurement channel list */
u8 state; /* 0 - start, 1 - stop */
__le32 start_time; /* lower 32-bits of TSF */
u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
u8 channel;
- u8 type; /* see enum iwl_measurement_type */
+ u8 type; /* see enum il_measurement_type */
u8 reserved1;
/* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
* valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
* unidentified */
u8 reserved2[3];
- struct iwl_measurement_histogram histogram;
+ struct il_measurement_histogram histogram;
__le32 stop_time; /* lower 32-bits of TSF */
- __le32 status; /* see iwl_measurement_status */
+ __le32 status; /* see il_measurement_status */
} __packed;
/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
*****************************************************************************/
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
* @flags: See below:
*
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
*
* PM allow:
* bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
* '10' force xtal sleep
* '11' Illegal set
*
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
* ucode assume sleep over DTIM is allowed and we don't need to wake up
* for every DTIM.
*/
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
__le16 flags;
u8 reserved[2];
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
} __packed;
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
__le32 keep_alive_beacons;
} __packed;
/*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
* all devices identical.
*/
-struct iwl_sleep_notification {
+struct il_sleep_notification {
u8 pm_sleep_mode;
u8 pm_wakeup_src;
__le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
/* Sleep states. all devices identical. */
enum {
- IWL_PM_NO_SLEEP = 0,
- IWL_PM_SLP_MAC = 1,
- IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
- IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
- IWL_PM_SLP_PHY = 4,
- IWL_PM_SLP_REPENT = 5,
- IWL_PM_WAKEUP_BY_TIMER = 6,
- IWL_PM_WAKEUP_BY_DRIVER = 7,
- IWL_PM_WAKEUP_BY_RFKILL = 8,
+ IL_PM_NO_SLEEP = 0,
+ IL_PM_SLP_MAC = 1,
+ IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IL_PM_SLP_PHY = 4,
+ IL_PM_SLP_REPENT = 5,
+ IL_PM_WAKEUP_BY_TIMER = 6,
+ IL_PM_WAKEUP_BY_DRIVER = 7,
+ IL_PM_WAKEUP_BY_RFKILL = 8,
/* 3 reserved */
- IWL_PM_NUM_OF_MODES = 12,
+ IL_PM_NUM_OF_MODES = 12,
};
/*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
*/
-struct iwl_card_state_notif {
+struct il_card_state_notif {
__le32 flags;
} __packed;
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
-struct iwl_ct_kill_config {
- __le32 reserved;
- __le32 critical_temperature_M;
- __le32 critical_temperature_R;
-} __packed;
+struct il_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
/******************************************************************************
* (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
*
* One for each channel in the scan list.
* Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
* quiet_plcp_th, good_CRC_th)
*
* To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
* 1) If using passive_dwell (i.e. passive_dwell != 0):
* active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
* 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
* passive_dwell < max_out_time
* active_dwell < max_out_time
*/
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
* 5:7 reserved
*/
u8 type;
- u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
- struct iwl3945_tx_power tpc;
+ u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
+ struct il3945_tx_power tpc;
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
} __packed;
/* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
-struct iwl_scan_channel {
+struct il_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
* 21:31 reserved
*/
__le32 type;
- __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ __le16 channel; /* band is selected by il_scan_cmd "flags" field */
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
} __packed;
/* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
/**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
* each channel may select different ssids from among the 20 (4) entries.
* SSID IEs get transmitted in reverse order of entry.
*/
-struct iwl_ssid_ie {
+struct il_ssid_ie {
u8 id;
u8 len;
u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED 0
-#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED 0
+#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
/*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
*
* The hardware scan command is very powerful; the driver can set it up to
* maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
* Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
*
* To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
*/
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
/*
* Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl3945_scan_channel channels[0];
+ * struct il3945_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
} __packed;
-struct iwl_scan_cmd {
+struct il_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct il_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
/*
* Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl_scan_channel channels[0];
+ * struct il_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
#define ABORT_STATUS 0x2
/*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
*/
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
__le32 status; /* 1: okay, 2: cannot fulfill request */
} __packed;
/*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
*/
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
__le32 tsf_low;
__le32 tsf_high;
__le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
#define SCAN_OWNER_STATUS 0x1
#define MEASURE_OWNER_STATUS 0x2
-#define IWL_PROBE_STATUS_OK 0
-#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+#define IL_PROBE_STATUS_OK 0
+#define IL_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT BIT(2)
-#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
/*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
*/
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
u8 channel;
u8 band;
u8 probe_status;
- u8 num_probe_not_sent; /* not enough time to send */
+ u8 num_probe_not_sent; /* not enough time to send */
__le32 tsf_low;
__le32 tsf_high;
- __le32 statistics[NUMBER_OF_STATISTICS];
+ __le32 stats[NUMBER_OF_STATS];
} __packed;
/*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
*/
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
u8 scanned_channels;
u8 status;
u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
__le32 tsf_high;
} __packed;
-
/******************************************************************************
* (9)
* IBSS/AP Commands and Notifications:
*
*****************************************************************************/
-enum iwl_ibss_manager {
- IWL_NOT_IBSS_MANAGER = 0,
- IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+ IL_NOT_IBSS_MANAGER = 0,
+ IL_IBSS_MANAGER = 1,
};
/*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
*/
-struct iwl3945_beacon_notif {
- struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+ struct il3945_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
-struct iwl4965_beacon_notif {
- struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+ struct il4965_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
/*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
*/
-struct iwl3945_tx_beacon_cmd {
- struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+ struct il3945_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
} __packed;
-struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+ struct il_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
*
*****************************************************************************/
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
} failed;
} __packed;
-/* statistics command response */
+/* stats command response */
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
__le32 sent_cts_cnt;
} __packed;
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
* our serving channel */
} __packed;
-struct iwl39_statistics_rx {
- struct iwl39_statistics_rx_phy ofdm;
- struct iwl39_statistics_rx_phy cck;
- struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+ struct iwl39_stats_rx_phy ofdm;
+ struct iwl39_stats_rx_phy cck;
+ struct iwl39_stats_rx_non_phy general;
} __packed;
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
__le32 actual_ack_cnt;
} __packed;
-struct statistics_dbg {
+struct stats_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed;
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
} __packed;
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
__le32 temperature;
- struct statistics_dbg dbg;
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct iwl39_statistics_div div;
+ struct iwl39_stats_div div;
} __packed;
-struct statistics_rx_phy {
+struct stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
__le32 reserved3;
} __packed;
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
- __le32 ina_detection_search_time;/* total time (in 0.8us) searched
- * for INA */
+ __le32 ina_detection_search_time; /* total time (in 0.8us) searched
+ * for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
- __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
__le32 beacon_energy_c;
} __packed;
-struct statistics_rx {
- struct statistics_rx_phy ofdm;
- struct statistics_rx_phy cck;
- struct statistics_rx_non_phy general;
- struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+ struct stats_rx_phy ofdm;
+ struct stats_rx_phy cck;
+ struct stats_rx_non_phy general;
+ struct stats_rx_ht_phy ofdm_ht;
} __packed;
/**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
*
* @ant_a: current tx power on chain a in 1/2 dB step
* @ant_b: current tx power on chain b in 1/2 dB step
* @ant_c: current tx power on chain c in 1/2 dB step
*/
-struct statistics_tx_power {
+struct stats_tx_power {
u8 ant_a;
u8 ant_b;
u8 ant_c;
u8 reserved;
} __packed;
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
__le32 rx_ba_rsp_cnt;
} __packed;
-struct statistics_tx {
+struct stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
- struct statistics_tx_non_phy_agg agg;
+ struct stats_tx_non_phy_agg agg;
__le32 reserved1;
} __packed;
-
-struct statistics_div {
+struct stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
__le32 reserved2;
} __packed;
-struct statistics_general_common {
- __le32 temperature; /* radio temperature */
- struct statistics_dbg dbg;
+struct stats_general_common {
+ __le32 temperature; /* radio temperature */
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct statistics_div div;
+ struct stats_div div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
__le32 num_of_sos_states;
} __packed;
-struct statistics_general {
- struct statistics_general_common common;
+struct stats_general {
+ struct stats_general_common common;
__le32 reserved2;
__le32 reserved3;
} __packed;
-#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
/*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
* all devices identical.
*
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
*
* If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
*
* If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below). This flag
+ * does not affect the response to the C_STATS 0x9c itself.
*/
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
- __le32 configuration_flags; /* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
+struct il_stats_cmd {
+ __le32 configuration_flags; /* IL_STATS_CONF_* */
} __packed;
/*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
* 0x9c with CLEAR_STATS bit set (see above).
*
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans. uCode clears stats
+ * appropriately so that each notification contains stats for only the
* one channel that has just been scanned.
*/
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
__le32 flag;
- struct iwl39_statistics_rx rx;
- struct iwl39_statistics_tx tx;
- struct iwl39_statistics_general general;
+ struct iwl39_stats_rx rx;
+ struct iwl39_stats_tx tx;
+ struct iwl39_stats_general general;
} __packed;
-struct iwl_notif_statistics {
+struct il_notif_stats {
__le32 flag;
- struct statistics_rx rx;
- struct statistics_tx tx;
- struct statistics_general general;
+ struct stats_rx rx;
+ struct stats_tx tx;
+ struct stats_general general;
} __packed;
/*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
*
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
* in regardless of how many missed beacons, which mean when driver receive the
* notification, inside the command, it can find all the beacons information
* which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
*
*/
-#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
__le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
} __packed;
-
/******************************************************************************
* (11)
* Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
*****************************************************************************/
/**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
* is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
* time listening, not transmitting). Driver must adjust sensitivity so that
* the ratio of actual false alarms to actual Rx time falls within this range.
*
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
* received beacon. These provide information to the driver to analyze the
- * sensitivity. Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source. Pertinent statistics include:
+ * sensitivity. Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source. Pertinent stats include:
*
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
*
* (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
* Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
* uSecs of actual Rx time during beacon period (varies according to
* how much time was spent transmitting).
*
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
*
* false_alarm_cnt
* Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
*
* Total number of false alarms = false_alarms + plcp_errs
*
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
* (notice that the start points for OFDM are at or close to settings for
* maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
*
* If actual rate of OFDM false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
* Reset this to 0 at the first beacon period that falls within the
* "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
*
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
* (notice that the start points for CCK are at maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
- * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
*
* If actual rate of CCK false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), method for reducing
* sensitivity is:
*
- * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* up to max 400.
*
- * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
* sensitivity has been reduced a significant amount; bring it up to
* a moderate 161. Otherwise, *add* 3, up to max 200.
*
- * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
* sensitivity has been reduced only a moderate or small amount;
- * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
* down to min 0. Otherwise (if gain has been significantly reduced),
- * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
*
* b) Save a snapshot of the "silence reference".
*
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
*
* Method for increasing sensitivity:
*
- * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
* down to min 125.
*
- * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* down to min 200.
*
- * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
*
* If actual rate of CCK false alarms (+ plcp_errors) is within good range
* (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
*
* 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
* give some extra margin to energy threshold by *subtracting* 8
- * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ * from value in HD_MIN_ENERGY_CCK_DET_IDX.
*
* For all cases (too few, too many, good range), make sure that the CCK
* detection threshold (energy) is below the energy level for robust
* detection over the past 10 beacon periods, the "Max cck energy".
* Lower values mean higher energy; this means making sure that the value
- * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
*
*/
/*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
- */
-#define HD_TABLE_SIZE (11) /* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
-
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
/**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
* @control: (1) updates working table, (0) updates default table
- * @table: energy threshold values, use HD_* as index into table
+ * @table: energy threshold values, use HD_* as idx into table
*
* Always use "1" in "control" to update uCode's working table and DSP.
*/
-struct iwl_sensitivity_cmd {
- __le16 control; /* always use "1" */
- __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+struct il_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
} __packed;
-
/**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
*
* This command sets the relative gains of 4965 device's 3 radio receiver chains.
*
* After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
* in from scanning, or any other non-network source).
*
* DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
* Driver should determine which antennas are actually connected, by comparing
* average beacon signal levels for the 3 Rx chains. Accumulate (add) the
* following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
*
* beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
* to antennas, see above) for gain, by comparing the average signal levels
* detected during the silence after each beacon (background noise).
* Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
*
* beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
* (accum_noise[i] - accum_noise[reference]) / 30
*
* The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
* driver should limit the difference results to a range of 0-3 (0-4.5 dB),
* and set bit 2 to indicate "reduce gain". The value for the reference
* (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
/* Phy calibration command for series */
/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
- IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+ IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-struct iwl_calib_hdr {
+struct il_calib_hdr {
u8 op_code;
u8 first_group;
u8 groups_num;
u8 data_valid;
} __packed;
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
- struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+ struct il_calib_hdr hdr;
s8 diff_gain_a; /* see above */
s8 diff_gain_b;
s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
/*
* LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
*
* For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
* this command turns it on or off, or sets up a periodic blinking cycle.
*/
-struct iwl_led_cmd {
+struct il_led_cmd {
__le32 interval; /* "interval" in uSec */
u8 id; /* 1: Activity, 2: Link, 3: Tech */
u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
u8 reserved;
} __packed;
-
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
*
*****************************************************************************/
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK 0x00003fff
+
+struct il_rx_pkt {
/*
* The first 4 bytes of the RX frame header contain both the RX frame
* size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
* 13-00: RX frame size
*/
__le32 len_n_flags;
- struct iwl_cmd_header hdr;
+ struct il_cmd_header hdr;
union {
- struct iwl3945_rx_frame rx_frame;
- struct iwl3945_tx_resp tx_resp;
- struct iwl3945_beacon_notif beacon_status;
-
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
+ struct il3945_rx_frame rx_frame;
+ struct il3945_tx_resp tx_resp;
+ struct il3945_beacon_notif beacon_status;
+
+ struct il_alive_resp alive_frame;
+ struct il_spectrum_notification spectrum_notif;
+ struct il_csa_notification csa_notif;
+ struct il_error_resp err_resp;
+ struct il_card_state_notif card_state_notif;
+ struct il_add_sta_resp add_sta;
+ struct il_rem_sta_resp rem_sta;
+ struct il_sleep_notification sleep_notif;
+ struct il_spectrum_resp spectrum;
+ struct il_notif_stats stats;
+ struct il_compressed_ba_resp compressed_ba;
+ struct il_missed_beacon_notif missed_beacon;
__le32 status;
u8 raw[0];
} u;
} __packed;
-#endif /* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 00000000000..36454d0bbee
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5867 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((_il_rd(il, addr) & mask) == (bits & mask))
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(_il_poll_bit);
+
+void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_set_bit);
+
+void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_clear_bit);
+
+int
+_il_grab_nic_access(struct il_priv *il)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _il_rd(il, CSR_GP_CNTRL);
+ IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(_il_grab_nic_access);
+
+int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((il_rd(il, addr) & mask) == mask)
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(il_poll_bit);
+
+u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return val;
+}
+EXPORT_SYMBOL(il_rd_prph);
+
+void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr_prph(il, addr, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_wr_prph);
+
+u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+
+ _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+EXPORT_SYMBOL(il_read_targ_mem);
+
+void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _il_wr(il, HBUS_TARG_MEM_WDAT, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_write_targ_mem);
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IL_CMD(N_ALIVE);
+ IL_CMD(N_ERROR);
+ IL_CMD(C_RXON);
+ IL_CMD(C_RXON_ASSOC);
+ IL_CMD(C_QOS_PARAM);
+ IL_CMD(C_RXON_TIMING);
+ IL_CMD(C_ADD_STA);
+ IL_CMD(C_REM_STA);
+ IL_CMD(C_WEPKEY);
+ IL_CMD(N_3945_RX);
+ IL_CMD(C_TX);
+ IL_CMD(C_RATE_SCALE);
+ IL_CMD(C_LEDS);
+ IL_CMD(C_TX_LINK_QUALITY_CMD);
+ IL_CMD(C_CHANNEL_SWITCH);
+ IL_CMD(N_CHANNEL_SWITCH);
+ IL_CMD(C_SPECTRUM_MEASUREMENT);
+ IL_CMD(N_SPECTRUM_MEASUREMENT);
+ IL_CMD(C_POWER_TBL);
+ IL_CMD(N_PM_SLEEP);
+ IL_CMD(N_PM_DEBUG_STATS);
+ IL_CMD(C_SCAN);
+ IL_CMD(C_SCAN_ABORT);
+ IL_CMD(N_SCAN_START);
+ IL_CMD(N_SCAN_RESULTS);
+ IL_CMD(N_SCAN_COMPLETE);
+ IL_CMD(N_BEACON);
+ IL_CMD(C_TX_BEACON);
+ IL_CMD(C_TX_PWR_TBL);
+ IL_CMD(C_BT_CONFIG);
+ IL_CMD(C_STATS);
+ IL_CMD(N_STATS);
+ IL_CMD(N_CARD_STATE);
+ IL_CMD(N_MISSED_BEACONS);
+ IL_CMD(C_CT_KILL_CONFIG);
+ IL_CMD(C_SENSITIVITY);
+ IL_CMD(C_PHY_CALIBRATION);
+ IL_CMD(N_RX_PHY);
+ IL_CMD(N_RX_MPDU);
+ IL_CMD(N_RX);
+ IL_CMD(N_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("back from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+ pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = il_generic_cmd_callback;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EBUSY;
+
+ ret = il_enqueue_hcmd(il, cmd);
+ if (ret < 0) {
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ D_INFO("Attempting to send sync command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ set_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Setting HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ cmd_idx = il_enqueue_hcmd(il, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(il->wait_command_queue,
+ !test_bit(S_HCMD_ACTIVE, &il->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+ IL_ERR("Error sending %s: time out after %dms.\n",
+ il_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(S_RF_KILL_HW, &il->status)) {
+ IL_ERR("Command %s aborted: RF KILL Switch\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(S_FW_ERROR, &il->status)) {
+ IL_ERR("Command %s failed: FW Error\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IL_ERR("Error: Response NULL in '%s'\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ il_free_pages(il, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return il_send_cmd_async(il, cmd);
+
+ return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt))
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+ "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput OFF time(ms) ON time (ms)
+ * >300 25 25
+ * >200 to 300 40 40
+ * >100 to 200 55 55
+ * >70 to 100 65 65
+ * >50 to 70 75 75
+ * >20 to 50 85 85
+ * >10 to 20 95 95
+ * >5 to 10 110 110
+ * >1 to 5 130 130
+ * >0 to 1 167 167
+ * <=0 SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+ {.throughput = 0, .blink_time = 334},
+ {.throughput = 1 * 1024 - 1, .blink_time = 260},
+ {.throughput = 5 * 1024 - 1, .blink_time = 220},
+ {.throughput = 10 * 1024 - 1, .blink_time = 190},
+ {.throughput = 20 * 1024 - 1, .blink_time = 170},
+ {.throughput = 50 * 1024 - 1, .blink_time = 150},
+ {.throughput = 70 * 1024 - 1, .blink_time = 130},
+ {.throughput = 100 * 1024 - 1, .blink_time = 110},
+ {.throughput = 200 * 1024 - 1, .blink_time = 80},
+ {.throughput = 300 * 1024 - 1, .blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IL_ERR("undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+ struct il_led_cmd led_cmd = {
+ .id = IL_LED_LINK,
+ .interval = IL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(S_READY, &il->status))
+ return -EBUSY;
+
+ if (il->blink_on == on && il->blink_off == off)
+ return 0;
+
+ if (off == 0) {
+ /* led is SOLID_ON */
+ on = IL_LED_SOLID;
+ }
+
+ D_LED("Led blink time compensation=%u\n",
+ il->cfg->base_params->led_compensation);
+ led_cmd.on =
+ il_blink_compensation(il, on,
+ il->cfg->base_params->led_compensation);
+ led_cmd.off =
+ il_blink_compensation(il, off,
+ il->cfg->base_params->led_compensation);
+
+ ret = il->cfg->ops->led->cmd(il, &led_cmd);
+ if (!ret) {
+ il->blink_on = on;
+ il->blink_off = off;
+ }
+ return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IL_LED_SOLID;
+
+ il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+ return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IL_LED_DEFAULT)
+ mode = il->cfg->led_mode;
+
+ il->led.name =
+ kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+ il->led.brightness_set = il_led_brightness_set;
+ il->led.blink_set = il_led_blink_set;
+ il->led.max_brightness = 1;
+
+ switch (mode) {
+ case IL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IL_LED_BLINK:
+ il->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(il->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ il_blink,
+ ARRAY_SIZE(il_blink));
+ break;
+ case IL_LED_RF_STATE:
+ il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+ if (ret) {
+ kfree(il->led.name);
+ return;
+ }
+
+ il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+ if (!il->led_registered)
+ return;
+
+ led_classdev_unregister(&il->led);
+ kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+ u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ D_EEPROM("EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+ BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+ return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+ if (!il->eeprom)
+ return 0;
+ return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+ __le16 *e;
+ u32 gp = _il_rd(il, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = il->cfg->base_params->eeprom_size;
+ D_EEPROM("NVM size = %d\n", sz);
+ il->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!il->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *) il->eeprom;
+
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ ret = il_eeprom_verify_signature(il);
+ if (ret < 0) {
+ IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+ if (ret < 0) {
+ IL_ERR("Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _il_wr(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret =
+ _il_poll_bit(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IL_ERR("Time out reading EEPROM[%d]\n", addr);
+ goto done;
+ }
+ r = _il_rd(il, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+ il_eeprom_query16(il, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+ if (ret)
+ il_eeprom_free(il);
+ /* Reset chip to save power until we load uCode during "up". */
+ il_apm_stop(il);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+ kfree(il->eeprom);
+ il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+ int *eeprom_ch_count,
+ const struct il_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_idx)
+{
+ u32 offset =
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+ const struct il_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct il_channel_info *ch_info;
+
+ ch_info =
+ (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -1;
+
+ D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_idx = NULL;
+ const struct il_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct il_channel_info *ch_info;
+
+ if (il->channel_count) {
+ D_EEPROM("Channel map already initialized.\n");
+ return 0;
+ }
+
+ D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+ il->channel_count =
+ ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+ ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+ ARRAY_SIZE(il_eeprom_band_5);
+
+ D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+ il->channel_info =
+ kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+ GFP_KERNEL);
+ if (!il->channel_info) {
+ IL_ERR("Could not allocate channel_info\n");
+ il->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = il->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_idx[ch];
+ ch_info->band =
+ (band ==
+ 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(il_is_channel_valid(ch_info))) {
+ D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n", ch_info->channel,
+ ch_info->flags,
+ il_is_channel_a_band(ch_info) ? "5.2" :
+ "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR)) ? "" :
+ "not ");
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ il_mod_ht40_chan_info(il, ieeeband,
+ eeprom_ch_idx[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+ kfree(il->channel_info);
+ il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+ u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < il->channel_count; i++) {
+ if (il->channel_info[i].channel == channel)
+ return &il->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &il->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+ struct il_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (il->power_data.pci_pm)
+ cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+ D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ D_POWER("Sending power/sleep command\n");
+ D_POWER("Flags value = 0x%08X\n", cmd->flags);
+ D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+ D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+ D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return il_send_cmd_pdu(il, C_POWER_TBL,
+ sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+ il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(S_SCANNING, &il->status) && !force) {
+ D_INFO("Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(S_POWER_PMI, &il->status);
+
+ ret = il_set_power(il, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(S_POWER_PMI, &il->status);
+
+ if (il->cfg->ops->lib->update_chain_flags && update_chains)
+ il->cfg->ops->lib->update_chain_flags(il);
+ else if (il->cfg->ops->lib->update_chain_flags)
+ D_POWER("Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ il->chain_noise_data.state);
+
+ memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IL_ERR("set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+ struct il_powertable_cmd cmd;
+
+ il_power_sleep_cam_cmd(il, &cmd);
+ return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+ u16 lctl = il_pcie_link_ctl(il);
+
+ il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ il->power_data.debug_sleep_level_override = -1;
+
+ memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52 (10)
+#define IL_PASSIVE_DWELL_BASE (100)
+#define IL_CHANNEL_TUNE_TIME 5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+ int ret;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SCAN_ABORT,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(S_READY, &il->status) ||
+ !test_bit(S_GEO_CONFIGURED, &il->status) ||
+ !test_bit(S_SCAN_HW, &il->status) ||
+ test_bit(S_FW_ERROR, &il->status) ||
+ test_bit(S_EXIT_PENDING, &il->status))
+ return -EIO;
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+ return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (il->scan_request) {
+ D_SCAN("Complete scan in mac80211\n");
+ ieee80211_scan_completed(il->hw, aborted);
+ }
+
+ il->scan_vif = NULL;
+ il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Forcing scan end while not scanning\n");
+ return;
+ }
+
+ D_SCAN("Forcing scan end\n");
+ clear_bit(S_SCANNING, &il->status);
+ clear_bit(S_SCAN_HW, &il->status);
+ clear_bit(S_SCAN_ABORTING, &il->status);
+ il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan abort in progress\n");
+ return;
+ }
+
+ ret = il_send_scan_abort(il);
+ if (ret) {
+ D_SCAN("Send scan abort failed %d\n", ret);
+ il_force_scan_end(il);
+ } else
+ D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+ D_SCAN("Queuing abort scan\n");
+ queue_work(il->workqueue, &il->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&il->mutex);
+
+ D_SCAN("Scan cancel timeout\n");
+
+ il_do_scan_abort(il);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(S_SCAN_HW, &il->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanreq_notification *notif =
+ (struct il_scanreq_notification *)pkt->u.raw;
+
+ D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanstart_notification *notif =
+ (struct il_scanstart_notification *)pkt->u.raw;
+ il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ D_SCAN("Scan start: " "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+ notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanresults_notification *notif =
+ (struct il_scanresults_notification *)pkt->u.raw;
+
+ D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->stats[0]),
+ le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels, scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(S_SCAN_HW, &il->status);
+
+ D_SCAN("Scan on %sGHz took %dms\n",
+ (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - il->scan_start));
+
+ queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+ /* scan handlers */
+ il->handlers[C_SCAN] = il_hdl_scan;
+ il->handlers[N_SCAN_START] = il_hdl_scan_start;
+ il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+ il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IL_ACTIVE_DWELL_TIME_52 +
+ IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IL_ACTIVE_DWELL_TIME_24 +
+ IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 value;
+
+ u16 passive =
+ (band ==
+ IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_52;
+
+ if (il_is_any_associated(il)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if (value > IL_PASSIVE_DWELL_BASE || !value)
+ value = IL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+ u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+ if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (WARN_ON(!il->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&il->scan_check);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(S_SCAN_HW, &il->status)) {
+ D_SCAN("Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ D_SCAN("Starting scan...\n");
+
+ set_bit(S_SCANNING, &il->status);
+ il->scan_start = jiffies;
+
+ ret = il->cfg->ops->utils->request_scan(il, vif);
+ if (ret) {
+ clear_bit(S_SCANNING, &il->status);
+ return ret;
+ }
+
+ queue_delayed_work(il->workqueue, &il->scan_check,
+ IL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ il->scan_request = req;
+ il->scan_vif = vif;
+ il->scan_band = req->channels[0]->band;
+
+ ret = il_scan_initiate(il, vif);
+
+ D_MAC80211("leave\n");
+
+out_unlock:
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, scan_check.work);
+
+ D_SCAN("Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+ D_SCAN("Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 200);
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+ bool aborted;
+
+ D_SCAN("Completed scan.\n");
+
+ cancel_delayed_work(&il->scan_check);
+
+ mutex_lock(&il->mutex);
+
+ aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+ if (aborted)
+ D_SCAN("Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already completed.\n");
+ goto out_settings;
+ }
+
+ il_complete_scan(il, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!il_is_ready_rf(il))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+ il_set_tx_power(il, il->tx_power_next, false);
+
+ il->cfg->ops->utils->post_scan(il);
+
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+ INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+ INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+ INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->abort_scan);
+ cancel_work_sync(&il->scan_completed);
+
+ if (cancel_delayed_work_sync(&il->scan_check)) {
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+ IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, il->stations[sta_id].sta.sta.addr);
+
+ if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+ D_ASSOC("STA id %u addr %pM already present"
+ " in uCode (according to driver)\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ } else {
+ il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+ D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+ struct il_rx_pkt *pkt, bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+ return ret;
+ }
+
+ D_INFO("Processing response for adding station %u\n", sta_id);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ D_INFO("C_ADD_STA PASSED\n");
+ il_sta_ucode_activate(il, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TBL:
+ IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IL_ERR("Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IL_ERR("Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+ break;
+ }
+
+ D_INFO("%s station id %u addr %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ D_INFO("%s station according to cmd buffer %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+ il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+ struct il_rx_pkt *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct il_host_cmd cmd = {
+ .id = C_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+ flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = il_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ ret = il_process_add_sta_resp(il, sta, pkt, true);
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+ struct il_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ D_ASSOC("spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ "disabled");
+
+ sta_flags = il->stations[idx].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ il->stations[idx].sta.station_flags = sta_flags;
+done:
+ return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct il_station_entry *station;
+ int i;
+ u8 sta_id = IL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+ if (!compare_ether_addr
+ (il->stations[i].sta.sta.addr, addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!il->stations[i].used &&
+ sta_id == IL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &il->stations[sta_id];
+ station->used = IL_STA_DRIVER_ACTIVE;
+ D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+ il->num_stations++;
+
+ /* Set up the C_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct il_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ il_set_ht_add_station(il, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+ u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare station %pM for addition\n", addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((il->stations[sta_id].
+ used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+ IL_STA_UCODE_ACTIVE)
+ IL_ERR("removed non active STA %u\n", sta_id);
+
+ il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+ memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+ D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+ bool temporary)
+{
+ struct il_rx_pkt *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct il_rem_sta_cmd rm_sta_cmd;
+
+ struct il_host_cmd cmd = {
+ .id = C_REM_STA,
+ .len = sizeof(struct il_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il_sta_ucode_deactivate(il, sta_id);
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ D_ASSOC("C_REM_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IL_ERR("C_REM_STA failed\n");
+ break;
+ }
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+ unsigned long flags;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
+
+ if (WARN_ON(sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ D_INFO("Removing %pM but non DRIVER active\n", addr);
+ goto out_err;
+ }
+
+ if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_INFO("Removing %pM but non UCODE active\n", addr);
+ goto out_err;
+ }
+
+ if (il->stations[sta_id].used & IL_STA_LOCAL) {
+ kfree(il->stations[sta_id].lq);
+ il->stations[sta_id].lq = NULL;
+ }
+
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+ il->num_stations--;
+
+ BUG_ON(il->num_stations < 0);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ D_INFO("Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != il->stations[i].ctxid)
+ continue;
+
+ if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+ D_INFO("Clearing ucode active for station %d\n", i);
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ if (!cleared)
+ D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_addsta_cmd sta_cmd;
+ struct il_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ D_ASSOC("Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx->ctxid != il->stations[i].ctxid)
+ continue;
+ if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+ !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("Restoring sta %pM\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].sta.mode = 0;
+ il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &il->stations[i].sta,
+ sizeof(struct il_addsta_cmd));
+ send_lq = false;
+ if (il->stations[i].lq) {
+ memcpy(&lq, il->stations[i].lq,
+ sizeof(struct il_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[i].used &=
+ ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ if (!found)
+ D_INFO("Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+ int i;
+
+ for (i = 0; i < il->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &il->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (!(il->stations[i].used & IL_STA_BCAST))
+ continue;
+
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ il->num_stations--;
+ BUG_ON(il->num_stations < 0);
+ kfree(il->stations[i].lq);
+ il->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+ int i;
+ D_RATE("lq station id 0x%x\n", lq->sta_id);
+ D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ D_INFO("idx %d of LQ expects HT channel\n", i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct il_host_cmd cmd = {
+ .id = C_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct il_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ il_dump_lq_cmd(il, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (il_is_lq_table_valid(il, ctx, lq))
+ ret = il_send_cmd(il, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ D_INFO("init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ D_INFO("received request to remove station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to remove station %pM\n", sta->addr);
+ ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+ if (ret)
+ IL_ERR("Error removing station %pM\n", sta->addr);
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc() Allocates rx_free
+ * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+ reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+ }
+
+ q->need_update = 0;
+
+exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd =
+ dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts =
+ dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ D_11H("Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&il->measure_report, report, sizeof(*report));
+ il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ D_RX("decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ D_RX("Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ D_RX("hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+ txq_id, reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_tx_queue_unmap(il, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ i = q->n_win;
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_cmd_queue_unmap(il);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_win;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+ u32 id)
+{
+ q->n_bd = count;
+ q->n_win = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise il_queue_inc_wrap
+ * and il_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * il_get_cmd_idx is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_win / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_win / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+ struct device *dev = &il->pci_dev->dev;
+ size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver ilate data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != il->cmd_queue) {
+ txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+ GFP_KERNEL);
+ if (!txq->txb) {
+ IL_ERR("kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds =
+ dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ txq->meta =
+ kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+ txq->cmd =
+ kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct il_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = il_tx_queue_alloc(il, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ il_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+ if (il_is_rfkill(il) || il_is_ctkill(il)) {
+ IL_WARN("Not sending command - %s KILL\n",
+ il_is_rfkill(il) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+ IL_ERR("Restarting adapter due to command queue full\n");
+ queue_work(il->workqueue, &il->restart);
+ return -ENOSPC;
+ }
+
+ idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return -ENOSPC;
+ }
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags | CMD_MAPPED;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct il_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, il->cmd_queue);
+ break;
+ default:
+ D_HC("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+ idx, il->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+ phys_addr =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+ 1, U32_PAD(cmd->len));
+
+ /* Increment and update queue's write idx */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(il->workqueue, &il->restart);
+ }
+
+ }
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ int cmd_idx;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct il_device_cmd *cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ unsigned long flags;
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN
+ (txq_id != il->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+ il->txq[il->cmd_queue].q.write_ptr)) {
+ il_print_hex_error(il, pkt, 32);
+ return;
+ }
+
+ cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+ cmd = txq->cmd[cmd_idx];
+ meta = &txq->meta[cmd_idx];
+
+ txq->time_stamp = jiffies;
+
+ pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(il, cmd, pkt);
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->hdr.cmd));
+ wake_up(&il->wait_command_queue);
+ }
+
+ /* Mark as unmapped */
+ meta->flags = 0;
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+ struct il_priv *il;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n", cfg->name);
+ goto out;
+ }
+
+ il = hw->priv;
+ il->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = il->hw_params.rx_chains_num;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (il->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |=
+ ((tx_chains_num -
+ 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+ struct il_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+ s8 max_tx_power = 0;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ D_INFO("Geography modes already initialized.\n");
+ set_bit(S_GEO_CONFIGURED, &il->status);
+ return 0;
+ }
+
+ channels =
+ kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates =
+ kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+ sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = RATE_COUNT_LEGACY;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ il->ieee_channels = channels;
+ il->ieee_rates = rates;
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch = &il->channel_info[i];
+
+ if (!il_is_channel_valid(ch))
+ continue;
+
+ sband = &il->bands[ch->band];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (il_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+ geo_ch->center_freq,
+ il_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->
+ flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ il->tx_power_device_lmt = max_tx_power;
+ il->tx_power_user_lmt = max_tx_power;
+ il->tx_power_next = max_tx_power;
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ (il->cfg->sku & IL_SKU_A)) {
+ IL_INFO("Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ il->pci_dev->device, il->pci_dev->subsystem_device);
+ il->cfg->sku &= ~IL_SKU_A;
+ }
+
+ IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ il->bands[IEEE80211_BAND_2GHZ].n_channels,
+ il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(S_GEO_CONFIGURED, &il->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+ kfree(il->ieee_channels);
+ kfree(il->ieee_rates);
+ clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct il_channel_info *ch_info;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ if (il->disable_ht40)
+ return false;
+#endif
+
+ return il_is_channel_extension(il, il->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = &il->hw->conf;
+
+ lockdep_assert_held(&il->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_win from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_win = 0;
+
+ beacon_int =
+ il_adjust_beacon_interval(beacon_int,
+ il->hw_params.max_beacon_itrvl *
+ TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+ D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_win));
+
+ return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+ &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IL_WARN("check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IL_WARN("check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IL_WARN("check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IL_WARN("mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+ IL_WARN("neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IL_WARN("aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IL_WARN("CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->
+ flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IL_WARN("TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IL_ERR("Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_rxon_cmd *staging = &ctx->staging;
+ const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ D_INFO("need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ D_INFO("need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!il_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr
+ (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return RATE_1M_PLCP;
+ else
+ return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+ struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+ | RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |=
+ cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IL_ERR("invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+ ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+ _il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+ const struct il_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = il->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ channel = il->channel_info[i].channel;
+ if (channel == le16_to_cpu(il->ctx.staging.channel))
+ continue;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (il_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ il->band = band;
+
+ D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from il_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IL_ERR("Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(il->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info =
+ il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &il->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ il->band = ch_info->band;
+
+ il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ int i;
+
+ hw = il_get_hw_mode(il, il->band);
+ if (!hw) {
+ IL_ERR("Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ il->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < RATE_COUNT_LEGACY)
+ il->active_rate |= (1 << rate->hw_value);
+ }
+
+ D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+ il->ctx.staging.cck_basic_rates =
+ (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ il->ctx.staging.ofdm_basic_rates =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct il_rxon_context *ctx = &il->ctx;
+ struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+ il_chswitch_done(il, true);
+ } else {
+ IL_ERR("CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ il_chswitch_done(il, false);
+ }
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ D_RADIO("RX CONFIG:\n");
+ il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+ D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+ D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+ D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+ D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+ D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+ D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+
+ IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+ il->cfg->ops->lib->dump_nic_error_log(il);
+ if (il->cfg->ops->lib->dump_fh)
+ il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+ il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+ wake_up(&il->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(S_READY, &il->status);
+
+ if (!test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_DBG(IL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (il->cfg->mod_params->restart_fw)
+ queue_work(il->workqueue, &il->restart);
+ }
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret =
+ _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+ D_INFO("stop master\n");
+
+ return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+ D_INFO("Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ il_apm_stop_master(il);
+
+ /* Reset the entire device */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+ int ret = 0;
+ u16 lctl;
+
+ D_INFO("Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existent bit)
+ */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (il->cfg->base_params->set_l0s) {
+ lctl = il_pcie_link_ctl(il);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ il_set_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ il_clear_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (il->cfg->base_params->pll_cfg_val)
+ il_set_bit(il, CSR_ANA_PLL_CFG,
+ il->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. il_wr_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ D_INFO("Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (il->cfg->base_params->use_bsm)
+ il_wr_prph(il, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!il->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
+ IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+ return -EINVAL;
+ }
+
+ if (tx_power > il->tx_power_device_lmt) {
+ IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, il->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ il->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(S_SCANNING, &il->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ D_INFO("Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = il->tx_power_user_lmt;
+ il->tx_power_user_lmt = tx_power;
+
+ ret = il->cfg->ops->lib->send_tx_power(il);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ il->tx_power_user_lmt = prev_tx_power;
+ il->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+ struct il_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ D_INFO("BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+ IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+ struct il_stats_cmd stats_cmd = {
+ .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd, NULL);
+ else
+ return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ D_RX("sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+ il_get_cmd_string(pkt->hdr.cmd));
+ il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ il_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+ memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ int q;
+
+ D_MAC80211("enter\n");
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ D_MAC80211("leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ il_connection_init_rx_config(il, ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&il->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ il->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = il_set_mode(il, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int err;
+ u32 modes;
+
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* check if busy context is exclusive */
+ if (il->ctx.vif &&
+ (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+ if (!(modes & BIT(vif->type))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = &il->ctx;
+ il->ctx.vif = vif;
+
+ err = il_setup_interface(il, &il->ctx);
+ if (err) {
+ il->ctx.vif = NULL;
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+ return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->scan_vif == vif) {
+ il_scan_cancel_timeout(il, 200);
+ il_force_scan_end(il);
+ }
+
+ if (!mode_change) {
+ il_set_mode(il, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ il_teardown_interface(il, vif, false);
+
+ memset(il->bssid, 0, ETH_ALEN);
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+ if (!il->txq)
+ il->txq =
+ kzalloc(sizeof(struct il_tx_queue) *
+ il->cfg->base_params->num_of_queues, GFP_KERNEL);
+ if (!il->txq) {
+ IL_ERR("Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+ kfree(il->txq);
+ il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+ il->tx_traffic_idx = 0;
+ il->rx_traffic_idx = 0;
+ if (il->tx_traffic)
+ memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+ if (il->rx_traffic)
+ memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+ if (il_debug_level & IL_DL_TX) {
+ if (!il->tx_traffic) {
+ il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (il_debug_level & IL_DL_RX) {
+ if (!il->rx_traffic) {
+ il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ il_reset_traffic_log(il);
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+ kfree(il->tx_traffic);
+ il->tx_traffic = NULL;
+
+ kfree(il->rx_traffic);
+ il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_TX)))
+ return;
+
+ if (!il->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->tx_traffic +
+ (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->tx_traffic_idx =
+ (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_RX)))
+ return;
+
+ if (!il->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->rx_traffic +
+ (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->rx_traffic_idx =
+ (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(MANAGEMENT_ASSOC_REQ);
+ IL_CMD(MANAGEMENT_ASSOC_RESP);
+ IL_CMD(MANAGEMENT_REASSOC_REQ);
+ IL_CMD(MANAGEMENT_REASSOC_RESP);
+ IL_CMD(MANAGEMENT_PROBE_REQ);
+ IL_CMD(MANAGEMENT_PROBE_RESP);
+ IL_CMD(MANAGEMENT_BEACON);
+ IL_CMD(MANAGEMENT_ATIM);
+ IL_CMD(MANAGEMENT_DISASSOC);
+ IL_CMD(MANAGEMENT_AUTH);
+ IL_CMD(MANAGEMENT_DEAUTH);
+ IL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(CONTROL_BACK_REQ);
+ IL_CMD(CONTROL_BACK);
+ IL_CMD(CONTROL_PSPOLL);
+ IL_CMD(CONTROL_RTS);
+ IL_CMD(CONTROL_CTS);
+ IL_CMD(CONTROL_ACK);
+ IL_CMD(CONTROL_CFEND);
+ IL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+ memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &il->tx_stats;
+ else
+ stats = &il->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+ struct il_force_reset *force_reset;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ force_reset = &il->force_reset;
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ D_INFO("force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+
+ if (!external && !il->cfg->mod_params->restart_fw) {
+ D_INFO("Cancel firmware reload based on "
+ "module parameter setting\n");
+ return 0;
+ }
+
+ IL_ERR("On demand firmware reload\n");
+
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+ wake_up(&il->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(S_READY, &il->status);
+ queue_work(il->workqueue, &il->restart);
+
+ return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ u32 modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&il->mutex);
+
+ if (!ctx->vif || !il_is_ready_rf(il)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+ if (!(modes & BIT(newtype))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+ (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* success */
+ il_teardown_interface(il, vif, true);
+ vif->type = newtype;
+ vif->p2p = newp2p;
+ err = il_setup_interface(il, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+out:
+ mutex_unlock(&il->mutex);
+ return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+ struct il_tx_queue *txq = &il->txq[cnt];
+ struct il_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout =
+ txq->time_stamp +
+ msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+ il->cfg->base_params->wd_timeout);
+ ret = il_force_reset(il, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ timeout = il->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (il_check_stuck_queue(il, il->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (il_is_any_associated(il)) {
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+ unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+ else
+ del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot =
+ (usec /
+ interval) & (il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits) >> il->
+ hw_params.beacon_time_tsf_bits);
+ rem =
+ (usec % interval) & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+
+ return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval)
+{
+ u32 base_low = base & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 addon_low = addon & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits)) +
+ (addon & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call il_mac_stop() from the mac80211 suspend function
+ * first but since il_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ il_apm_stop(il);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il_enable_interrupts(il);
+
+ if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+ .suspend = il_pci_suspend,
+ .resume = il_pci_resume,
+ .freeze = il_pci_suspend,
+ .thaw = il_pci_resume,
+ .poweroff = il_pci_suspend,
+ .restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+ il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed = false;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&il->mutex);
+
+ D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+ changed);
+
+ if (unlikely(test_bit(S_SCANNING, &il->status))) {
+ scan_active = 1;
+ D_MAC80211("scan active\n");
+ }
+
+ if (changed &
+ (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !il_is_channel_ibss(ch_info)) {
+ D_MAC80211("leave - not IBSS channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in il_ht_conf
+ */
+ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il->cfg->ops->legacy->update_bcast_stations)
+ ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ il_set_rate(il);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = il_power_update_mode(il, false);
+ if (ret)
+ D_MAC80211("Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+ conf->power_level);
+
+ il_set_tx_power(il, conf->power_level, false);
+ }
+
+ if (!il_is_ready(il)) {
+ D_MAC80211("leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+ else
+ D_INFO("Not re-sending same RXON configuration.\n");
+ if (ht_changed)
+ il_update_qos(il, ctx);
+
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ spin_lock_irqsave(&il->lock, flags);
+ memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = NULL;
+
+ il->timestamp = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il_scan_cancel_timeout(il, 100);
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - not ready\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ il_set_rate(il);
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_ASSOC("enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->
+ ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams =
+ (ht_cap->mcs.
+ tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0)
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ D_MAC80211("enter\n");
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ il->timestamp = le64_to_cpu(timestamp);
+
+ D_MAC80211("leave\n");
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return;
+ }
+
+ il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ D_MAC80211("changes = 0x%X\n", changes);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_alive(il)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ il_update_qos(il, ctx);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ il->beacon_ctx = ctx;
+ else
+ il->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (il_scan_cancel_timeout(il, 100)) {
+ IL_WARN("Aborted scan still in progress after 100ms\n");
+ D_MAC80211("leaving - scan abort failed.\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+ il_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from il_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ il_ht_conf(il, vif);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ il->timestamp = bss_conf->timestamp;
+
+ if (!il_is_rfkill(il))
+ il->cfg->ops->legacy->post_associate(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+ D_MAC80211("Changes (%#x) while associated\n", changes);
+ ret = il_send_rxon_assoc(il, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active, &ctx->staging,
+ sizeof(struct il_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ il->cfg->ops->legacy->config_ap(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret =
+ il->cfg->ops->legacy->manage_ibss_station(il, vif,
+ bss_conf->
+ ibss_joined);
+ if (ret)
+ IL_ERR("failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+ struct il_priv *il = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!il)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = _il_rd(il, CSR_INT);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+ inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* il_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].
+ flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 00000000000..abfa388588b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n) ((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct il_rx_buf {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct il_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at common.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (idx) host_w */
+ int read_ptr; /* last used entry (idx) host_r */
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_win; /* safe queue win */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+ struct sk_buff *skb;
+ struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+ struct il_queue q;
+ void *tfds;
+ struct il_device_cmd **cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table idx used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct il_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct il_eeprom_calib_ch_info ch1;
+ struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct il_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct il_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct il_priv *il);
+ void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+ enum ieee80211_band band,
+ u16 channel);
+
+#define IL_NUM_SCAN_RATES (2)
+
+struct il4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 base_power_idx; /* gain idx for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct il_channel_info {
+ struct il4965_channel_tgd_info tgd;
+ struct il4965_channel_tgh_info tgh;
+ struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (idxes). */
+ struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK 0 /* shared */
+#define IL_TX_FIFO_BE 1
+#define IL_TX_FIFO_VI 2 /* shared */
+#define IL_TX_FIFO_VO 3
+#define IL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES 10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+ CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+ struct il_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct il_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct il_rx_buf *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct il_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+struct il_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union il_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct il_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+ int qos_active;
+ struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+ struct il_addsta_cmd sta;
+ struct il_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct il_hw_key keyinfo;
+ struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+ struct il_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+ struct il_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct il4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_ <-- Called from work queue context
+ * il4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+ i < q->write_ptr) : !(i <
+ q->read_ptr
+ && i >=
+ q->
+ write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_win; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO 0
+#define IL_OPERATION_MODE_HT_ONLY 1
+#define IL_OPERATION_MODE_MIXED 2
+#define IL_OPERATION_MODE_20MHZ 3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IL4965_CAL_NUM_BEACONS 20
+#define IL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum il4965_false_alarm_state {
+ IL_FA_TOO_MANY = 0,
+ IL_FA_TOO_FEW = 1,
+ IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+ IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IL_CHAIN_NOISE_ACCUMULATE,
+ IL_CHAIN_NOISE_CALIBRATED,
+ IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+ IL_CALIB_DISABLED = 0, /* must be 0 */
+ IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+ IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES (256)
+#define IL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 handlers[IL_CN_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT (2000)
+#define IL_LONG_WD_TIMEOUT (10000)
+#define IL_MAX_WD_TIMEOUT (120000)
+
+struct il_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS 22
+
+struct il_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ int ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct il_rxon_cmd active;
+ struct il_rxon_cmd staging;
+
+ struct il_rxon_time_cmd timing;
+
+ struct il_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct il_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct il_power_mgr {
+ struct il_powertable_cmd sleep_cmd;
+ struct il_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+struct il_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct il_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+ struct il_rx_buf *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct il_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* force reset */
+ struct il_force_reset force_reset;
+
+ /* we allocate array of il_channel_info for NIC's valid channels.
+ * Access via channel # using indirect idx array */
+ struct il_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct il_calib_result calib_results[IL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_idx; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ il_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct il_rxon_context ctx;
+
+ __le16 switch_channel;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct il_init_alive_resp card_alive_init;
+ struct il_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct il_sensitivity_data sensitivity_data;
+ struct il_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TBL_SIZE];
+
+ struct il_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct il_rx_queue rxq;
+ struct il_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct il_dma_ptr kw; /* keep warm address */
+ struct il_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_stats isr_stats;
+
+ struct il_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct il_station_entry stations[IL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct il_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il3945_notif_stats accum_stats;
+ struct il3945_notif_stats delta_stats;
+ struct il3945_notif_stats max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet stats */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct il3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ struct il_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_notif_stats accum_stats;
+ struct il_notif_stats delta_stats;
+ struct il_notif_stats max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct il_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct il_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ il_debug_level if set */
+#endif /* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list stats_periodic;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+ set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+ clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+ if (il->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+ data;
+ return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+ for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+ return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+ return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+ return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+ __free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+ free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IL_SKU_G 0x1
+#define IL_SKU_A 0x2
+#define IL_SKU_N 0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+ int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+ int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+ void (*set_rxon_chain) (struct il_priv *il,
+ struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+ u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+ u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+ int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+ void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+ int (*init) (struct il_priv *il);
+ void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+ ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*general_stats_read) (struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+ void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params) (struct il_priv *il);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+ struct il_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+ struct il_tx_queue *txq, dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+ int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+ /* setup Rx handler */
+ void (*handler_setup) (struct il_priv *il);
+ /* alive notification after init uCode load */
+ void (*init_alive_start) (struct il_priv *il);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr) (u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode) (struct il_priv *il);
+
+ void (*dump_nic_error_log) (struct il_priv *il);
+ int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+ int (*set_channel_switch) (struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct il_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct il_priv *il);
+ void (*update_chain_flags) (struct il_priv *il);
+
+ /* eeprom operations */
+ struct il_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+ int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+ void (*post_associate) (struct il_priv *il);
+ void (*config_ap) (struct il_priv *il);
+ /* station management */
+ int (*update_bcast_stations) (struct il_priv *il);
+ int (*manage_ibss_station) (struct il_priv *il,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+ const struct il_lib_ops *lib;
+ const struct il_hcmd_ops *hcmd;
+ const struct il_hcmd_utils_ops *utils;
+ const struct il_led_ops *led;
+ const struct il_nic_ops *nic;
+ const struct il_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct il_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues; /* def: HW dependent */
+ /* for il_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY (0<<1)
+#define IL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IL_LED_DEFAULT: use device default
+ * IL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+ IL_LED_DEFAULT,
+ IL_LED_RF_STATE,
+ IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct il_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct il_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct il_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum il_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+ struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+ const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_pcie_cap(il->pci_dev);
+ pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS (&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define S_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED 2
+#define S_RF_KILL_HW 3
+#define S_CT_KILL 4
+#define S_INIT 5
+#define S_ALIVE 6
+#define S_READY 7
+#define S_TEMPERATURE 8
+#define S_GEO_CONFIGURED 9
+#define S_EXIT_PENDING 10
+#define S_STATS 12
+#define S_SCANNING 13
+#define S_SCAN_ABORTING 14
+#define S_SCAN_HW 15
+#define S_POWER_PMI 16
+#define S_FW_ERROR 17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(S_READY, &il->status) &&
+ test_bit(S_GEO_CONFIGURED, &il->status) &&
+ !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+ return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+ return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+ return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+ return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+ return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+ if (il_is_rfkill(il))
+ return 0;
+
+ return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+ return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
+extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+extern int _il_grab_nic_access(struct il_priv *il);
+extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+extern u32 il_rd_prph(struct il_priv *il, u32 reg);
+extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+ iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+ iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+ return ioread32(il->hw_base + ofs);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ value = _il_rd(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, reg, value);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+ _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_wr_prph(il, reg, (val & ~mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(il->stations, 0, sizeof(il->stations));
+ il->num_stations = 0;
+
+ il->ucode_key_table = 0;
+
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IL_INVALID_STATION;
+
+ return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = il_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IL_INVALID_STATION);
+
+ return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+ return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+ return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+ desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr =
+ dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+ GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, il->queue_stopped))
+ if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, il->queue_stopped))
+ if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+ clear_bit(S_INT_ENABLED, &il->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ _il_wr(il, CSR_INT, 0xffffffff);
+ _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+ _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+ set_bit(S_INT_ENABLED, &il->status);
+ _il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ * which was transferred
+ */
+struct il_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS 20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ * unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+struct il_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_idx; /* idx in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+ RATE_1M_IDX = 0,
+ RATE_2M_IDX,
+ RATE_5M_IDX,
+ RATE_11M_IDX,
+ RATE_6M_IDX,
+ RATE_9M_IDX,
+ RATE_12M_IDX,
+ RATE_18M_IDX,
+ RATE_24M_IDX,
+ RATE_36M_IDX,
+ RATE_48M_IDX,
+ RATE_54M_IDX,
+ RATE_60M_IDX,
+ RATE_COUNT,
+ RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
+ RATE_COUNT_3945 = RATE_COUNT - 1,
+ RATE_INVM_IDX = RATE_COUNT,
+ RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+ RATE_6M_IDX_TBL = 0,
+ RATE_9M_IDX_TBL,
+ RATE_12M_IDX_TBL,
+ RATE_18M_IDX_TBL,
+ RATE_24M_IDX_TBL,
+ RATE_36M_IDX_TBL,
+ RATE_48M_IDX_TBL,
+ RATE_54M_IDX_TBL,
+ RATE_1M_IDX_TBL,
+ RATE_2M_IDX_TBL,
+ RATE_5M_IDX_TBL,
+ RATE_11M_IDX_TBL,
+ RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+ IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+ IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+ IL_LAST_OFDM_RATE = RATE_60M_IDX,
+ IL_FIRST_CCK_RATE = RATE_1M_IDX,
+ IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define RATE_6M_MASK (1 << RATE_6M_IDX)
+#define RATE_9M_MASK (1 << RATE_9M_IDX)
+#define RATE_12M_MASK (1 << RATE_12M_IDX)
+#define RATE_18M_MASK (1 << RATE_18M_IDX)
+#define RATE_24M_MASK (1 << RATE_24M_IDX)
+#define RATE_36M_MASK (1 << RATE_36M_IDX)
+#define RATE_48M_MASK (1 << RATE_48M_IDX)
+#define RATE_54M_MASK (1 << RATE_54M_IDX)
+#define RATE_60M_MASK (1 << RATE_60M_IDX)
+#define RATE_1M_MASK (1 << RATE_1M_IDX)
+#define RATE_2M_MASK (1 << RATE_2M_IDX)
+#define RATE_5M_MASK (1 << RATE_5M_IDX)
+#define RATE_11M_MASK (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ RATE_6M_PLCP = 13,
+ RATE_9M_PLCP = 15,
+ RATE_12M_PLCP = 5,
+ RATE_18M_PLCP = 7,
+ RATE_24M_PLCP = 9,
+ RATE_36M_PLCP = 11,
+ RATE_48M_PLCP = 1,
+ RATE_54M_PLCP = 3,
+ RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
+ RATE_1M_PLCP = 10,
+ RATE_2M_PLCP = 20,
+ RATE_5M_PLCP = 55,
+ RATE_11M_PLCP = 110,
+ /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ RATE_SISO_6M_PLCP = 0,
+ RATE_SISO_12M_PLCP = 1,
+ RATE_SISO_18M_PLCP = 2,
+ RATE_SISO_24M_PLCP = 3,
+ RATE_SISO_36M_PLCP = 4,
+ RATE_SISO_48M_PLCP = 5,
+ RATE_SISO_54M_PLCP = 6,
+ RATE_SISO_60M_PLCP = 7,
+ RATE_MIMO2_6M_PLCP = 0x8,
+ RATE_MIMO2_12M_PLCP = 0x9,
+ RATE_MIMO2_18M_PLCP = 0xa,
+ RATE_MIMO2_24M_PLCP = 0xb,
+ RATE_MIMO2_36M_PLCP = 0xc,
+ RATE_MIMO2_48M_PLCP = 0xd,
+ RATE_MIMO2_54M_PLCP = 0xe,
+ RATE_MIMO2_60M_PLCP = 0xf,
+ RATE_SISO_INVM_PLCP,
+ RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ RATE_6M_IEEE = 12,
+ RATE_9M_IEEE = 18,
+ RATE_12M_IEEE = 24,
+ RATE_18M_IEEE = 36,
+ RATE_24M_IEEE = 48,
+ RATE_36M_IEEE = 72,
+ RATE_48M_IEEE = 96,
+ RATE_54M_IEEE = 108,
+ RATE_60M_IEEE = 120,
+ RATE_1M_IEEE = 2,
+ RATE_2M_IEEE = 4,
+ RATE_5M_IEEE = 11,
+ RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK \
+ (RATE_1M_MASK | \
+ RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK \
+ (IL_CCK_BASIC_RATES_MASK | \
+ RATE_5M_MASK | \
+ RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK \
+ (RATE_6M_MASK | \
+ RATE_12M_MASK | \
+ RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ RATE_9M_MASK | \
+ RATE_18M_MASK | \
+ RATE_36M_MASK | \
+ RATE_48M_MASK | \
+ RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE -1
+
+#define IL_MIN_RSSI_VAL -100
+#define IL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT 160
+#define IL_LEGACY_SUCCESS_LIMIT 480
+#define IL_LEGACY_TBL_COUNT 160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IL_NONE_LEGACY_TBL_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO 12800 /* 100% */
+#define RATE_SCALE_SWITCH 10880 /* 85% */
+#define RATE_HIGH_TH 10880 /* 85% */
+#define RATE_INCREASE_TH 6400 /* 50% */
+#define RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1 0
+#define IL_LEGACY_SWITCH_ANTENNA2 1
+#define IL_LEGACY_SWITCH_SISO 2
+#define IL_LEGACY_SWITCH_MIMO2_AB 3
+#define IL_LEGACY_SWITCH_MIMO2_AC 4
+#define IL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1 0
+#define IL_SISO_SWITCH_ANTENNA2 1
+#define IL_SISO_SWITCH_MIMO2_AB 2
+#define IL_SISO_SWITCH_MIMO2_AC 3
+#define IL_SISO_SWITCH_MIMO2_BC 4
+#define IL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1 0
+#define IL_MIMO2_SWITCH_ANTENNA2 1
+#define IL_MIMO2_SWITCH_SISO_A 2
+#define IL_MIMO2_SWITCH_SISO_B 3
+#define IL_MIMO2_SWITCH_SISO_C 4
+#define IL_MIMO2_SWITCH_GI 5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD 0
+#define IL_AGG_LOAD_THRESHOLD 10
+#define IL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE 12
+
+struct il_rate_mcs_info {
+ char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+ enum il_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
+};
+
+struct il_traffic_load {
+ unsigned long time_stamp; /* age of the oldest stats */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+ u8 active_tbl; /* idx of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct il_link_quality_cmd lq;
+ struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct il_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+ struct il_station_priv_common common;
+ struct il_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+ return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ if (il->debug_level)
+ return il->debug_level;
+ else
+ return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO (1 << 0)
+#define IL_DL_MAC80211 (1 << 1)
+#define IL_DL_HCMD (1 << 2)
+#define IL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP (1 << 4)
+#define IL_DL_HCMD_DUMP (1 << 5)
+#define IL_DL_EEPROM (1 << 6)
+#define IL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER (1 << 8)
+#define IL_DL_TEMP (1 << 9)
+#define IL_DL_NOTIF (1 << 10)
+#define IL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC (1 << 12)
+#define IL_DL_DROP (1 << 13)
+#define IL_DL_TXPOWER (1 << 14)
+#define IL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW (1 << 16)
+#define IL_DL_RF_KILL (1 << 17)
+#define IL_DL_FW_ERRORS (1 << 18)
+#define IL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE (1 << 20)
+#define IL_DL_CALIB (1 << 21)
+#define IL_DL_WEP (1 << 22)
+#define IL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX (1 << 24)
+#define IL_DL_ISR (1 << 25)
+#define IL_DL_HT (1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H (1 << 28)
+#define IL_DL_STATS (1 << 29)
+#define IL_DL_TX_REPLY (1 << 30)
+#define IL_DL_QOS (1 << 31)
+
+#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c26..9138e15004f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
/*
* CSR (control and status registers)
*
@@ -70,9 +70,9 @@
* low power states due to driver-invoked device resets
* (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
*
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
* these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
* no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
@@ -82,16 +82,16 @@
*/
#define CSR_BASE (0x000)
-#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
#define CSR_GP_CNTRL (CSR_BASE+0x024)
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/*
@@ -166,26 +166,26 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
CSR_INT_BIT_ALIVE)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
CSR39_FH_INT_BIT_RX_CHNL2 | \
CSR_FH_INT_BIT_RX_CHNL1 | \
CSR_FH_INT_BIT_RX_CHNL0)
-
#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
CSR_FH_INT_BIT_TX_CHNL1 | \
CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
-
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
/* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
-
/* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@@ -357,7 +354,7 @@
/* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
@@ -368,13 +365,13 @@
* to indirectly access device's internal memory or registers that
* may be powered-down.
*
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
* for these registers;
* host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
* to make sure the MAC (uCode processor, etc.) is powered up for accessing
* internal resources.
*
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
* these provide only simple PCI bus access, without waking up the MAC.
*/
#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
/*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
* Bit usage:
- * 0-7: queue write index
+ * 0-7: queue write idx
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 00000000000..b1b8926a9c7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t il_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t il_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->tx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ il_clear_traffic_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->rx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct il_priv *il = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+ il->dbgfs_sram_offset = 0x800000;
+ if (il->ucode_type == UCODE_INIT)
+ il->dbgfs_sram_len = il->ucode_init_data.len;
+ else
+ il->dbgfs_sram_len = il->ucode_data.len;
+ }
+ bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ il->dbgfs_sram_len);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ il->dbgfs_sram_offset);
+ for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+ val =
+ il_read_targ_mem(il,
+ il->dbgfs_sram_offset +
+ il->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ il->dbgfs_sram_offset = offset;
+ il->dbgfs_sram_len = len;
+ } else {
+ il->dbgfs_sram_offset = 0;
+ il->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_station_entry *station;
+ int max_sta = il->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ il->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &il->stations[i];
+ if (!station->used)
+ continue;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n", i,
+ station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = il->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IL_ERR("NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = il->eeprom;
+ if (!ptr) {
+ IL_ERR("Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ pos +=
+ scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+ eeprom_ver);
+ for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(S_GEO_CONFIGURED, &il->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+ test_bit(S_HCMD_ACTIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+ test_bit(S_INT_ENABLED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+ test_bit(S_RF_KILL_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+ test_bit(S_CT_KILL, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+ test_bit(S_INIT, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+ test_bit(S_ALIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+ test_bit(S_READY, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+ test_bit(S_TEMPERATURE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+ test_bit(S_GEO_CONFIGURED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+ test_bit(S_EXIT_PENDING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+ test_bit(S_STATS, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+ test_bit(S_SCANNING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+ test_bit(S_SCAN_ABORTING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+ test_bit(S_SCAN_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+ test_bit(S_POWER_PMI, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+ test_bit(S_FW_ERROR, &il->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ il->isr_stats.hw);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ il->isr_stats.sw);
+ if (il->isr_stats.sw || il->isr_stats.hw) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ il->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ il->isr_stats.sch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ il->isr_stats.alive);
+#endif
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ il->isr_stats.rfkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ il->isr_stats.ctkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ il->isr_stats.wakeup);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+ il->isr_stats.rx);
+ for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+ if (il->isr_stats.handlers[cnt] > 0)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ il_get_cmd_string(cnt),
+ il->isr_stats.handlers[cnt]);
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ il->isr_stats.tx);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ il->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ il_clear_isr_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_rxon_context *ctx = &il->ctx;
+ int pos = 0, i;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!il_is_any_associated(il))
+ il->disable_ht40 = ht40 ? true : false;
+ else {
+ IL_ERR("Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+ il->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_rx_queue *rxq = &il->rxq;
+ char *buf;
+ int bufsz =
+ ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+ q->read_ptr, q->write_ptr);
+ }
+ if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+ ptr = il->tx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+ il->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+ ptr = il->rx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+ il->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ il_reset_traffic_log(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz =
+ sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+ q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, il->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&il->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_rx_queue *rxq = &il->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->
+ closed_rb_num) & 0x0FFF);
+ } else {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct il_sensitivity_data *data;
+
+ data = &il->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct il_chain_noise_data *data;
+
+ data = &il->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status =
+ _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve stats information */
+ mutex_lock(&il->mutex);
+ il_send_stats_request(il, CMD_SYNC, true);
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len =
+ sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (il->cfg->ops->lib->dump_fh) {
+ ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+ if (buf) {
+ ret =
+ simple_read_from_buffer(user_buf, count, ppos, buf,
+ pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%d\n",
+ il->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ il->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct il_force_reset *force_reset;
+
+ force_reset = &il->force_reset;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+ force_reset->reset_duration);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ int ret;
+ struct il_priv *il = file->private_data;
+
+ ret = il_force_reset(il, true);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+ timeout = IL_DEF_WD_TIMEOUT;
+
+ il->cfg->base_params->wd_timeout = timeout;
+ il_setup_watchdog(il);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ struct dentry *phyd = il->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ il->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &il->disable_sens_cal);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &il->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+ return 0;
+
+err:
+ IL_ERR("Can't create the debugfs directory\n");
+ il_dbgfs_unregister(il);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+ if (!il->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(il->debugfs_dir);
+ il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->_3945.statistics.flag));
- if (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
- sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
- ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
- *max_ofdm;
- struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct iwl39_statistics_rx_non_phy *general, *accum_general;
- struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_3945.statistics.rx.ofdm;
- cck = &priv->_3945.statistics.rx.cck;
- general = &priv->_3945.statistics.rx.general;
- accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
- accum_cck = &priv->_3945.accum_statistics.rx.cck;
- accum_general = &priv->_3945.accum_statistics.rx.general;
- delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
- delta_cck = &priv->_3945.delta_statistics.rx.cck;
- delta_general = &priv->_3945.delta_statistics.rx.general;
- max_ofdm = &priv->_3945.max_delta.rx.ofdm;
- max_cck = &priv->_3945.max_delta.rx.cck;
- max_general = &priv->_3945.max_delta.rx.general;
-
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:", le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
- ssize_t ret;
- struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_3945.statistics.tx;
- accum_tx = &priv->_3945.accum_statistics.tx;
- delta_tx = &priv->_3945.delta_statistics.tx;
- max_tx = &priv->_3945.max_delta.tx;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
- ssize_t ret;
- struct iwl39_statistics_general *general, *accum_general;
- struct iwl39_statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_3945.statistics.general;
- dbg = &priv->_3945.statistics.general.dbg;
- div = &priv->_3945.statistics.general.div;
- accum_general = &priv->_3945.accum_statistics.general;
- delta_general = &priv->_3945.delta_statistics.general;
- max_general = &priv->_3945.max_delta.general;
- accum_dbg = &priv->_3945.accum_statistics.general.dbg;
- delta_dbg = &priv->_3945.delta_statistics.general.dbg;
- max_dbg = &priv->_3945.max_delta.general.dbg;
- accum_div = &priv->_3945.accum_statistics.general.div;
- delta_div = &priv->_3945.delta_statistics.general.div;
- max_div = &priv->_3945.max_delta.general.div;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b44..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND (0x0800)
-#define FH39_MEM_UPPER_BOUND (0x1000)
-
-#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
- ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
- (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
- FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-struct iwl3945_tfd_tb {
- __le32 addr;
- __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
- __le32 control_flags;
- struct iwl3945_tfd_tb tbs[4];
- u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af1..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET 95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- * to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- * (PA) output voltage detector. This same detector is used during Tx of
- * long packets in normal operation to provide feedback as to proper output
- * level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
- u8 gain_index; /* index into power (gain) setup table ... */
- s8 power; /* ... for this pwr level for this chnl group */
- u16 v_det; /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- * to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
- struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
- s32 a, b, c, d, e; /* coefficients for voltage->power
- * formula (signed) */
- s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
- * frequency (signed) */
- s8 saturation_power; /* highest power possible by h/w in this
- * band */
- u8 group_channel; /* "representative" channel # in this band */
- s16 temperature; /* h/w temperature at factory calib this band
- * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- * difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
- u32 Ta;
- u32 Tb;
- u32 Tc;
- u32 Td;
- u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
- u8 reserved0[16];
- u16 device_id; /* abs.ofs: 16 */
- u8 reserved1[2];
- u16 pmc; /* abs.ofs: 20 */
- u8 reserved2[20];
- u8 mac_address[6]; /* abs.ofs: 42 */
- u8 reserved3[58];
- u16 board_revision; /* abs.ofs: 106 */
- u8 reserved4[11];
- u8 board_pba_number[9]; /* abs.ofs: 119 */
- u8 reserved5[8];
- u16 version; /* abs.ofs: 136 */
- u8 sku_cap; /* abs.ofs: 138 */
- u8 leds_mode; /* abs.ofs: 139 */
- u16 oem_mode;
- u16 wowlan_mode; /* abs.ofs: 142 */
- u16 leds_time_interval; /* abs.ofs: 144 */
- u8 leds_off_time; /* abs.ofs: 146 */
- u8 leds_on_time; /* abs.ofs: 147 */
- u8 almgor_m_version; /* abs.ofs: 148 */
- u8 antenna_switch_type; /* abs.ofs: 149 */
- u8 reserved6[42];
- u8 sku_id[4]; /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
- u16 band_1_count; /* abs.ofs: 196 */
- struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
- u16 band_2_count; /* abs.ofs: 226 */
- struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
- u16 band_3_count; /* abs.ofs: 254 */
- struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
- u16 band_4_count; /* abs.ofs: 280 */
- struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
- u16 band_5_count; /* abs.ofs: 304 */
- struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
-
- u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
- struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
- struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
- u8 reserved16[172]; /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
-#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES 5
-#define IWL39_CMD_QUEUE_NUM 4
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-/*********************************************/
-
-#define RFD_SIZE 4
-#define NUM_TFD_CHUNKS 4
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-#define U32_PAD(n) ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n) (n << 24)
-#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n) (n << 28)
-#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
- IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
- IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
- __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
- return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
- struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
- .cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2ddde..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
- 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
- s8 min_rssi;
- u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-72, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-87, IWL_RATE_9M_INDEX},
- {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-68, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-86, IWL_RATE_11M_INDEX},
- {-88, IWL_RATE_5M_INDEX},
- {-90, IWL_RATE_2M_INDEX},
- {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
-#define IWL_RATE_WIN_FLUSH (HZ/2)
-#define IWL39_RATE_HIGH_TH 11520
-#define IWL_SUCCESS_UP_TH 8960
-#define IWL_SUCCESS_DOWN_TH 10880
-#define IWL_RATE_MIN_FAILURE_TH 6
-#define IWL_RATE_MIN_SUCCESS_TH 8
-#define IWL_RATE_DECREASE_TH 1920
-#define IWL_RATE_RETRY_TH 15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
- u32 index = 0;
- u32 table_size = 0;
- struct iwl3945_tpt_entry *tpt_table = NULL;
-
- if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
- rssi = IWL_MIN_RSSI_VAL;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- tpt_table = iwl3945_tpt_table_g;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
- break;
-
- case IEEE80211_BAND_5GHZ:
- tpt_table = iwl3945_tpt_table_a;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
- break;
-
- default:
- BUG();
- break;
- }
-
- while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
- index++;
-
- index = min(index, (table_size - 1));
-
- return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = -1;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed. If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
- int unflushed = 0;
- int i;
- unsigned long flags;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /*
- * For each rate, if we have collected data on that rate
- * and it has been more than IWL_RATE_WIN_FLUSH
- * since we flushed, clear out the gathered statistics
- */
- for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
- if (!rs_sta->win[i].counter)
- continue;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
- if (time_after(jiffies, rs_sta->win[i].stamp +
- IWL_RATE_WIN_FLUSH)) {
- IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
- "index %d\n",
- rs_sta->win[i].counter, i);
- iwl3945_clear_window(&rs_sta->win[i]);
- } else
- unflushed++;
- spin_unlock_irqrestore(&rs_sta->lock, flags);
- }
-
- return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX 5000 /* msec */
-#define IWL_RATE_FLUSH_MIN 50 /* msec */
-#define IWL_AVERAGE_PACKETS 1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
- struct iwl3945_rs_sta *rs_sta = (void *)data;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
- int unflushed = 0;
- unsigned long flags;
- u32 packet_count, duration, pps;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* Number of packets Rx'd since last time this timer ran */
- packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
- rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
- if (unflushed) {
- duration =
- jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
- IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
- packet_count, duration);
-
- /* Determine packets per second */
- if (duration)
- pps = (packet_count * 1000) / duration;
- else
- pps = 0;
-
- if (pps) {
- duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
- if (duration < IWL_RATE_FLUSH_MIN)
- duration = IWL_RATE_FLUSH_MIN;
- else if (duration > IWL_RATE_FLUSH_MAX)
- duration = IWL_RATE_FLUSH_MAX;
- } else
- duration = IWL_RATE_FLUSH_MAX;
-
- rs_sta->flush_time = msecs_to_jiffies(duration);
-
- IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
- duration, packet_count);
-
- mod_timer(&rs_sta->rate_scale_flush, jiffies +
- rs_sta->flush_time);
-
- rs_sta->last_partial_flush = jiffies;
- } else {
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->flush_pending = 0;
- }
- /* If there weren't any unflushed entries, we don't schedule the timer
- * to run again */
-
- rs_sta->last_flush = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
- struct iwl3945_rate_scale_data *window,
- int success, int retries, int index)
-{
- unsigned long flags;
- s32 fail_count;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- if (!retries) {
- IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
- return;
- }
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- * */
- while (retries > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
- window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame (throw away oldest history),
- * OR in "1", and increment "success" if this
- * frame was successful. */
- window->data <<= 1;
- if (success > 0) {
- window->success_counter++;
- window->data |= 0x1;
- success--;
- }
-
- retries--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = ((window->success_ratio *
- rs_sta->expected_tpt[index] + 64) / 128);
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct iwl3945_sta_priv *psta;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_supported_band *sband;
- int i;
-
- IWL_DEBUG_INFO(priv, "enter\n");
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- goto out;
-
- psta = (struct iwl3945_sta_priv *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
- rs_sta->priv = priv;
-
- rs_sta->start_rate = IWL_RATE_INVALID;
-
- /* default to just 802.11b */
- rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->last_flush = jiffies;
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->last_tx_packets = 0;
-
- rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
- rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
- for (i = 0; i < IWL_RATE_COUNT_3945; i++)
- iwl3945_clear_window(&rs_sta->win[i]);
-
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
- rs_sta->last_txrate_idx = i;
- break;
- }
- }
-
- priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
- /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
- rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
- IWL_FIRST_OFDM_RATE;
- }
-
-out:
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
- IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
- return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct iwl3945_rs_sta *rs_sta;
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl_priv *priv __maybe_unused = iwl_priv;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rs_sta = &psta->rs_sta;
-
- spin_lock_init(&rs_sta->lock);
- init_timer(&rs_sta->rate_scale_flush);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-
- return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
-
- /*
- * Be careful not to use any members of iwl3945_rs_sta (like trying
- * to use iwl_priv to print out debugging) since it may not be fully
- * initialized at this point.
- */
- del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- s8 retries = 0, current_count;
- int scale_rate_index, first_index, last_index;
- unsigned long flags;
- struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- retries = info->status.rates[0].count;
- /* Sanity Check for retries */
- if (retries > IWL_RATE_RETRY_TH)
- retries = IWL_RATE_RETRY_TH;
-
- first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
- if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
- IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
- return;
- }
-
- if (!priv_sta) {
- IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
- return;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
- return;
- }
-
-
- rs_sta->tx_packets++;
-
- scale_rate_index = first_index;
- last_index = first_index;
-
- /*
- * Update the window for each rate. We determine which rates
- * were Tx'd based on the total number of retries vs. the number
- * of retries configured for each rate -- currently set to the
- * priv value 'retry_rate' vs. rate specific
- *
- * On exit from this while loop last_index indicates the rate
- * at which the frame was finally transmitted (or failed if no
- * ACK)
- */
- while (retries > 1) {
- if ((retries - 1) < priv->retry_rate) {
- current_count = (retries - 1);
- last_index = scale_rate_index;
- } else {
- current_count = priv->retry_rate;
- last_index = iwl3945_rs_next_rate(priv,
- scale_rate_index);
- }
-
- /* Update this rate accounting for as many retries
- * as was used for it (per current_count) */
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[scale_rate_index],
- 0, current_count, scale_rate_index);
- IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
- scale_rate_index, current_count);
-
- retries -= current_count;
-
- scale_rate_index = last_index;
- }
-
-
- /* Update the last index window with success/failure based on ACK */
- IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
- last_index,
- (info->flags & IEEE80211_TX_STAT_ACK) ?
- "success" : "failure");
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[last_index],
- info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
- /* We updated the rate scale window -- if its been more than
- * flush_time since the last run, schedule the flush
- * again */
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- if (!rs_sta->flush_pending &&
- time_after(jiffies, rs_sta->last_flush +
- rs_sta->flush_time)) {
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->flush_pending = 1;
- mod_timer(&rs_sta->rate_scale_flush,
- jiffies + rs_sta->flush_time);
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
- u8 index, u16 rate_mask, enum ieee80211_band band)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /* 802.11A walks to the next literal adjacent rate in
- * the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
- i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- low = iwl3945_rates[low].prev_rs_tgg;
- else
- low = iwl3945_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- high = iwl3945_rates[high].next_rs_tgg;
- else
- high = iwl3945_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
- struct ieee80211_supported_band *sband = txrc->sband;
- struct sk_buff *skb = txrc->skb;
- u8 low = IWL_RATE_INVALID;
- u8 high = IWL_RATE_INVALID;
- u16 high_low;
- int index;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl3945_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- unsigned long flags;
- u16 rate_mask;
- s8 max_rate_idx = -1;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (rs_sta && !rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
- priv_sta = NULL;
- }
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- rate_mask = sta->supp_rates[sband->band];
-
- /* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
- max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
- max_rate_idx = -1;
-
- index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
- if (sband->band == IEEE80211_BAND_5GHZ)
- rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* for recent assoc, choose best rate regarding
- * to rssi value
- */
- if (rs_sta->start_rate != IWL_RATE_INVALID) {
- if (rs_sta->start_rate < index &&
- (rate_mask & (1 << rs_sta->start_rate)))
- index = rs_sta->start_rate;
- rs_sta->start_rate = IWL_RATE_INVALID;
- }
-
- /* force user max rate if set by user */
- if ((max_rate_idx != -1) && (max_rate_idx < index)) {
- if (rate_mask & (1 << max_rate_idx))
- index = max_rate_idx;
- }
-
- window = &(rs_sta->win[index]);
-
- fail_count = window->counter - window->success_counter;
-
- if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
- "counter: %d, success_counter: %d, "
- "expected_tpt is %sNULL\n",
- index,
- window->counter,
- window->success_counter,
- rs_sta->expected_tpt ? "not " : "");
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
- goto out;
-
- }
-
- current_tpt = window->average_tpt;
-
- high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
- sband->band);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((max_rate_idx != -1) && (max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- /* Collect Measured throughputs of adjacent rates */
- if (low != IWL_RATE_INVALID)
- low_tpt = rs_sta->win[low].average_tpt;
-
- if (high != IWL_RATE_INVALID)
- high_tpt = rs_sta->win[high].average_tpt;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- scale_action = 0;
-
- /* Low success ratio , need to drop the rate */
- if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
- IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates,
- * try increase */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
-
- /* Both adjacent throughputs are measured, but neither one has
- * better throughput; we're using the best rate, don't change
- * it! */
- } else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
- IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
- "current_tpt [%d]\n",
- low_tpt, high_tpt, current_tpt);
- scale_action = 0;
-
- /* At least one of the rates has better throughput */
- } else {
- if (high_tpt != IWL_INVALID_VALUE) {
-
- /* High rate has better throughput, Increase
- * rate */
- if (high_tpt > current_tpt &&
- window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of high tpt\n");
- scale_action = 0;
- }
- } else if (low_tpt != IWL_INVALID_VALUE) {
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
- /* Lower rate has better
- * throughput,decrease rate */
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((window->success_ratio > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * rs_sta->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
-
- /* Decrese rate */
- if (low != IWL_RATE_INVALID)
- index = low;
- break;
-
- case 1:
- /* Increase rate */
- if (high != IWL_RATE_INVALID)
- index = high;
-
- break;
-
- case 0:
- default:
- /* No change */
- break;
- }
-
- IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
- index, scale_action, low, high);
-
- out:
-
- if (sband->band == IEEE80211_BAND_5GHZ) {
- if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
- index = IWL_FIRST_OFDM_RATE;
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
- } else {
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = rs_sta->last_txrate_idx;
- }
-
- IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int j;
- ssize_t ret;
- struct iwl3945_rs_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
- "rate=0x%X flush time %d\n",
- lq_sta->tx_packets,
- lq_sta->last_txrate_idx,
- lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
- for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->win[j].counter,
- lq_sta->win[j].success_counter,
- lq_sta->win[j].success_ratio);
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl3945_sta_dbgfs_stats_table_read,
- .open = iwl3945_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
-
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
- .module = NULL,
- .name = RS_NAME,
- .tx_status = iwl3945_rs_tx_status,
- .get_rate = iwl3945_rs_get_rate,
- .rate_init = iwl3945_rs_rate_init_stub,
- .alloc = iwl3945_rs_alloc,
- .free = iwl3945_rs_free,
- .alloc_sta = iwl3945_rs_alloc_sta,
- .free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl3945_add_debugfs,
- .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
- struct iwl_priv *priv = hw->priv;
- s32 rssi = 0;
- unsigned long flags;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_sta *sta;
- struct iwl3945_sta_priv *psta;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rcu_read_lock();
-
- sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
- priv->stations[sta_id].sta.sta.addr);
- if (!sta) {
- IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
- rcu_read_unlock();
- return;
- }
-
- psta = (void *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- rs_sta->tgg = 0;
- switch (priv->band) {
- case IEEE80211_BAND_2GHZ:
- /* TODO: this always does G, not a regression */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_TGG_PROTECT_MSK) {
- rs_sta->tgg = 1;
- rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
- } else
- rs_sta->expected_tpt = iwl3945_expected_tpt_g;
- break;
-
- case IEEE80211_BAND_5GHZ:
- rs_sta->expected_tpt = iwl3945_expected_tpt_a;
- break;
- case IEEE80211_NUM_BANDS:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- rssi = priv->_3945.last_rx_rssi;
- if (rssi == 0)
- rssi = IWL_MIN_RSSI_VAL;
-
- IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
- rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
- IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
- "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
- iwl3945_rates[rs_sta->start_rate].plcp);
- rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a743847..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX, \
- IWL_RATE_##r##M_INDEX_TABLE, \
- IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- * rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
- IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
- u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
- if (rate == IWL_RATE_INVALID)
- rate = rate_index;
- return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- * bitmap in SRAM. Bit position corresponds to Event # (id/type).
- * Default values of 0 enable uCode events to be logged.
- * Use for only special debugging. This function is just a placeholder as-is,
- * you'll need to provide the special bits! ...
- * ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
- int i;
- u32 base; /* SRAM address of event log header */
- u32 disable_ptr; /* SRAM address of event-disable bitmap array */
- u32 array_size; /* # of u32 entries in array */
- static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
- 0x00000000, /* 31 - 0 Event id numbers */
- 0x00000000, /* 63 - 32 */
- 0x00000000, /* 95 - 64 */
- 0x00000000, /* 127 - 96 */
- 0x00000000, /* 159 - 128 */
- 0x00000000, /* 191 - 160 */
- 0x00000000, /* 223 - 192 */
- 0x00000000, /* 255 - 224 */
- 0x00000000, /* 287 - 256 */
- 0x00000000, /* 319 - 288 */
- 0x00000000, /* 351 - 320 */
- 0x00000000, /* 383 - 352 */
- 0x00000000, /* 415 - 384 */
- 0x00000000, /* 447 - 416 */
- 0x00000000, /* 479 - 448 */
- 0x00000000, /* 511 - 480 */
- 0x00000000, /* 543 - 512 */
- 0x00000000, /* 575 - 544 */
- 0x00000000, /* 607 - 576 */
- 0x00000000, /* 639 - 608 */
- 0x00000000, /* 671 - 640 */
- 0x00000000, /* 703 - 672 */
- 0x00000000, /* 735 - 704 */
- 0x00000000, /* 767 - 736 */
- 0x00000000, /* 799 - 768 */
- 0x00000000, /* 831 - 800 */
- 0x00000000, /* 863 - 832 */
- 0x00000000, /* 895 - 864 */
- 0x00000000, /* 927 - 896 */
- 0x00000000, /* 959 - 928 */
- 0x00000000, /* 991 - 960 */
- 0x00000000, /* 1023 - 992 */
- 0x00000000, /* 1055 - 1024 */
- 0x00000000, /* 1087 - 1056 */
- 0x00000000, /* 1119 - 1088 */
- 0x00000000, /* 1151 - 1120 */
- 0x00000000, /* 1183 - 1152 */
- 0x00000000, /* 1215 - 1184 */
- 0x00000000, /* 1247 - 1216 */
- 0x00000000, /* 1279 - 1248 */
- 0x00000000, /* 1311 - 1280 */
- 0x00000000, /* 1343 - 1312 */
- 0x00000000, /* 1375 - 1344 */
- 0x00000000, /* 1407 - 1376 */
- 0x00000000, /* 1439 - 1408 */
- 0x00000000, /* 1471 - 1440 */
- 0x00000000, /* 1503 - 1472 */
- };
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
- }
-
- disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
- if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
- IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
- disable_ptr);
- for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_legacy_write_targ_mem(priv,
- disable_ptr + (i * sizeof(u32)),
- evt_disable[i]);
-
- } else {
- IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
- IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
- IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
- disable_ptr, array_size);
- }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
- int idx;
-
- for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
- if (iwl3945_rates[idx].plcp == plcp)
- return idx;
- return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- switch (status & TX_STATUS_MSK) {
- case TX_3945_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
- int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- if (rate == IWL_RATE_12M_INDEX)
- next_rate = IWL_RATE_9M_INDEX;
- else if (rate == IWL_RATE_6M_INDEX)
- next_rate = IWL_RATE_6M_INDEX;
- break;
- case IEEE80211_BAND_2GHZ:
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- if (rate == IWL_RATE_11M_INDEX)
- next_rate = IWL_RATE_5M_INDEX;
- }
- break;
-
- default:
- break;
- }
-
- return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
- int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
-
- BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
- tx_info->skb = NULL;
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
-
- if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
- (txq_id != IWL39_CMD_QUEUE_NUM) &&
- priv->mac80211_registered)
- iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->status);
- int rate_idx;
- int fail;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- ieee80211_tx_info_clear_status(info);
-
- /* Fill the MRR chain with some info about on-chip retransmissions */
- rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
-
- fail = tx_resp->failure_frame;
-
- info->status.rates[0].idx = rate_idx;
- info->status.rates[0].count = fail + 1; /* add final attempt */
-
- /* tx_status->rts_retry_count = tx_resp->failure_rts; */
- info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
- IEEE80211_TX_STAT_ACK : 0;
-
- IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
- txq_id, iwl3945_get_tx_fail_reason(status), status,
- tx_resp->rate, tx_resp->failure_frame);
-
- IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
- iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
- if (status & TX_ABORT_REQUIRED_MSK)
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- * RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
-
- prev_stats = (__le32 *)&priv->_3945.statistics;
- accum_stats = (u32 *)&priv->_3945.accum_statistics;
- delta = (u32 *)&priv->_3945.delta_statistics;
- max_delta = (u32 *)&priv->_3945.max_delta;
-
- for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- priv->_3945.accum_statistics.general.temperature =
- priv->_3945.statistics.general.temperature;
- priv->_3945.accum_statistics.general.ttl_timestamp =
- priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl3945_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
- memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- __le32 *flag = (__le32 *)&pkt->u.raw;
-
- if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_3945.accum_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.delta_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.max_delta, 0,
- sizeof(struct iwl3945_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We received data from the HW, so stop the watchdog */
- if (unlikely(len + IWL39_RX_FRAME_SIZE >
- PAGE_SIZE << priv->hw_params.rx_page_order)) {
- IWL_DEBUG_DROP(priv, "Corruption detected!\n");
- return;
- }
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- if (!iwl3945_mod_params.sw_crypto)
- iwl_legacy_set_decrypted_flag(priv,
- (struct ieee80211_hdr *)rxb_addr(rxb),
- le32_to_cpu(rx_end->status), stats);
-
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
- u8 network_packet;
-
- rx_status.flag = 0;
- rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
- rx_status.band);
-
- rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
- rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
- rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
- /* set the preamble flag if appropriate */
- if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- if ((unlikely(rx_stats->phy_count > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- rx_stats->phy_count);
- return;
- }
-
- if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
- || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
- }
-
-
-
- /* Convert 3945's rssi indicator to dBm */
- rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
- IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_stats_sig_avg,
- rx_stats_noise_diff);
-
- header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
- network_packet = iwl3945_is_network_packet(priv, header);
-
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
- network_packet ? '*' : ' ',
- le16_to_cpu(rx_hdr->channel),
- rx_status.signal, rx_status.signal,
- rx_status.rate_idx);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
- header);
-
- if (network_packet) {
- priv->_3945.last_beacon_time =
- le32_to_cpu(rx_end->beacon_timestamp);
- priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->_3945.last_rx_rssi = rx_status.signal;
- }
-
- iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
- int count;
- struct iwl_queue *q;
- struct iwl3945_tfd *tfd, *tfd_tmp;
-
- q = &txq->q;
- tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
- if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- NUM_TFD_CHUNKS);
- return -EINVAL;
- }
-
- tfd->tbs[count].addr = cpu_to_le32(addr);
- tfd->tbs[count].len = cpu_to_le32(len);
-
- count++;
-
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
- TFD_CTL_PAD_SET(pad));
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- int index = txq->q.read_ptr;
- struct iwl3945_tfd *tfd = &tfd_tmp[index];
- struct pci_dev *dev = priv->pci_dev;
- int i;
- int counter;
-
- /* sanity check */
- counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
- if (counter > NUM_TFD_CHUNKS) {
- IWL_ERR(priv, "Too many chunks: %i\n", counter);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (counter)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_TODEVICE);
-
- /* unmap chunks if any */
-
- for (i = 1; i < counter; i++)
- pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
- le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id)
-{
- u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
- u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
- u16 rate_mask;
- int rate;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- __le32 tx_flags;
- __le16 fc = hdr->frame_control;
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
- rate = iwl3945_rates[rate_index].plcp;
- tx_flags = tx_cmd->tx_flags;
-
- /* We need to figure out how to get the sta->supp_rates while
- * in this running context */
- rate_mask = IWL_RATES_MASK_3945;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- if (tx_id >= IWL39_CMD_QUEUE_NUM)
- rts_retry_limit = 3;
- else
- rts_retry_limit = 7;
-
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- tx_cmd->rate = rate;
- tx_cmd->tx_flags = tx_flags;
-
- /* OFDM */
- tx_cmd->supp_rates[0] =
- ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- /* CCK */
- tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
- IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
- "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
- tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
- tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
- unsigned long flags_spin;
- struct iwl_station_entry *station;
-
- if (sta_id == IWL_INVALID_STATION)
- return IWL_INVALID_STATION;
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- station = &priv->stations[sta_id];
-
- station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
- station->sta.rate_n_flags = cpu_to_le16(tx_rate);
- station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
- sta_id, tx_rate);
- return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN,
- CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000);
- }
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
- rxq->rb_stts_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
- FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
- FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
- (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
- FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
- (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
- FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
- /* fake read to flush all prev I/O */
- iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
- return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
- /* bypass mode */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
- /* RA 0 is active */
- iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
- /* all 6 fifo are active */
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->_3945.shared_phys);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
- return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
- int rc;
- int txq_id, slots_num;
-
- iwl3945_hw_txq_ctx_free(priv);
-
- /* allocate tx queue structure */
- rc = iwl_legacy_alloc_txq_mem(priv);
- if (rc)
- return rc;
-
- /* Tx CMD queue */
- rc = iwl3945_tx_reset(priv);
- if (rc)
- goto error;
-
- /* Tx queue(s) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- if (rc) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return rc;
-
- error:
- iwl3945_hw_txq_ctx_free(priv);
- return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
- int ret = iwl_legacy_apm_init(priv);
-
- /* Clear APMG (NIC's internal power management) interrupts */
- iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
- /* Reset radio chip */
- iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- unsigned long flags;
- u8 rev_id = priv->pci_dev->revision;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Determine HW type */
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
- if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type\n");
- else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
- } else {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
- }
-
- if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
- IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
- } else
- IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
- if ((eeprom->board_revision & 0xF0) == 0xD0) {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- } else {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- }
-
- if (eeprom->almgor_m_version <= 1) {
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
- IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
- eeprom->almgor_m_version);
- } else {
- IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
- eeprom->almgor_m_version);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
- int rc;
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- rc = iwl_legacy_rx_queue_alloc(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl3945_rx_queue_reset(priv, rxq);
-
- iwl3945_rx_replenish(priv);
-
- iwl3945_rx_init(priv, rxq);
-
-
- /* Look at using this instead:
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- */
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
- rc = iwl3945_txq_ctx_reset(priv);
- if (rc)
- return rc;
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq)
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
- if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* stop SCD */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
- /* reset TFD queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
- iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
- FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
- 1000);
- }
-
- iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
- return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
- return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
- return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int temperature;
-
- temperature = iwl3945_hw_get_temperature(priv);
-
- /* driver's okay range is -260 to +25.
- * human readable okay range is 0 to +285 */
- IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
- /* handle insane temp reading */
- if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
- IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
-
- /* if really really hot(?),
- * substitute the 3rd band/group's temp measured at factory */
- if (priv->last_temperature > 100)
- temperature = eeprom->groups[2].temperature;
- else /* else use most recent "sane" value from driver */
- temperature = priv->last_temperature;
- }
-
- return temperature; /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER 6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- * (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp,\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
- /* if we don't need calibration, *don't* update last_temperature */
- if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
- IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
- /* assume that caller will actually do calib ...
- * update the "last temperature" value */
- priv->last_temperature = priv->temperature;
- return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
- {
- {251, 127}, /* 2.4 GHz, highest power */
- {251, 127},
- {251, 127},
- {251, 127},
- {251, 125},
- {251, 110},
- {251, 105},
- {251, 98},
- {187, 125},
- {187, 115},
- {187, 108},
- {187, 99},
- {243, 119},
- {243, 111},
- {243, 105},
- {243, 97},
- {243, 92},
- {211, 106},
- {211, 100},
- {179, 120},
- {179, 113},
- {179, 107},
- {147, 125},
- {147, 119},
- {147, 112},
- {147, 106},
- {147, 101},
- {147, 97},
- {147, 91},
- {115, 107},
- {235, 121},
- {235, 115},
- {235, 109},
- {203, 127},
- {203, 121},
- {203, 115},
- {203, 108},
- {203, 102},
- {203, 96},
- {203, 92},
- {171, 110},
- {171, 104},
- {171, 98},
- {139, 116},
- {227, 125},
- {227, 119},
- {227, 113},
- {227, 107},
- {227, 101},
- {227, 96},
- {195, 113},
- {195, 106},
- {195, 102},
- {195, 95},
- {163, 113},
- {163, 106},
- {163, 102},
- {163, 95},
- {131, 113},
- {131, 106},
- {131, 102},
- {131, 95},
- {99, 113},
- {99, 106},
- {99, 102},
- {99, 95},
- {67, 113},
- {67, 106},
- {67, 102},
- {67, 95},
- {35, 113},
- {35, 106},
- {35, 102},
- {35, 95},
- {3, 113},
- {3, 106},
- {3, 102},
- {3, 95} }, /* 2.4 GHz, lowest power */
- {
- {251, 127}, /* 5.x GHz, highest power */
- {251, 120},
- {251, 114},
- {219, 119},
- {219, 101},
- {187, 113},
- {187, 102},
- {155, 114},
- {155, 103},
- {123, 117},
- {123, 107},
- {123, 99},
- {123, 92},
- {91, 108},
- {59, 125},
- {59, 118},
- {59, 109},
- {59, 102},
- {59, 96},
- {59, 90},
- {27, 104},
- {27, 98},
- {27, 92},
- {115, 118},
- {115, 111},
- {115, 104},
- {83, 126},
- {83, 121},
- {83, 113},
- {83, 105},
- {83, 99},
- {51, 118},
- {51, 111},
- {51, 104},
- {51, 98},
- {19, 116},
- {19, 109},
- {19, 102},
- {19, 98},
- {19, 93},
- {171, 113},
- {171, 107},
- {171, 99},
- {139, 120},
- {139, 113},
- {139, 107},
- {139, 99},
- {107, 120},
- {107, 113},
- {107, 107},
- {107, 99},
- {75, 120},
- {75, 113},
- {75, 107},
- {75, 99},
- {43, 120},
- {43, 113},
- {43, 107},
- {43, 99},
- {11, 120},
- {11, 113},
- {11, 107},
- {11, 99},
- {131, 107},
- {131, 99},
- {99, 120},
- {99, 113},
- {99, 107},
- {99, 99},
- {67, 120},
- {67, 113},
- {67, 107},
- {67, 99},
- {35, 120},
- {35, 113},
- {35, 107},
- {35, 99},
- {3, 120} } /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
- if (index < 0)
- return 0;
- if (index >= IWL_MAX_GAIN_ENTRIES)
- return IWL_MAX_GAIN_ENTRIES - 1;
- return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
- s32 rate_index, const s8 *clip_pwrs,
- struct iwl_channel_info *ch_info,
- int band_index)
-{
- struct iwl3945_scan_power_info *scan_power_info;
- s8 power;
- u8 power_index;
-
- scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
- /* use this channel group's 6Mbit clipping/saturation pwr,
- * but cap at regulatory scan power restriction (set during init
- * based on eeprom channel data) for this channel. */
- power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
- power = min(power, priv->tx_power_user_lmt);
- scan_power_info->requested_power = power;
-
- /* find difference between new scan *power* and current "normal"
- * Tx *power* for 6Mb. Use this difference (x2) to adjust the
- * current "normal" temperature-compensated Tx power *index* for
- * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
- * *index*. */
- power_index = ch_info->power_info[rate_index].power_table_index
- - (power - ch_info->power_info
- [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
- /* store reference index that we use when adjusting *all* scan
- * powers. So we can accommodate user (all channel) or spectrum
- * management (single channel) power changes "between" temperature
- * feedback compensation procedures.
- * don't force fit this reference index into gain table; it may be a
- * negative number. This will help avoid errors when we're at
- * the lower bounds (highest gains, for warmest temperatures)
- * of the table. */
-
- /* don't exceed table bounds for "real" setting */
- power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
- scan_power_info->power_table_index = power_index;
- scan_power_info->tpc.tx_gain =
- power_gain_table[band_index][power_index].tx_gain;
- scan_power_info->tpc.dsp_atten =
- power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
- int rate_idx, i;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_txpowertable_cmd txpower = {
- .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
- };
- u16 chan;
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
- txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
- if (!ch_info) {
- IWL_ERR(priv,
- "Failed to get channel info for channel %d [%d]\n",
- chan, priv->band);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
- "non-Tx channel.\n");
- return 0;
- }
-
- /* fill cmd with power settings for all rates for current channel */
- /* Fill OFDM rate */
- for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
- rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
- /* Fill CCK rates */
- for (rate_idx = IWL_FIRST_CCK_RATE;
- rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
-
- return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
- sizeof(struct iwl3945_txpowertable_cmd),
- &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update. Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- * properly fill out the scan powers, and actual h/w gain settings,
- * and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
- struct iwl_channel_info *ch_info)
-{
- struct iwl3945_channel_power_info *power_info;
- int power_changed = 0;
- int i;
- const s8 *clip_pwrs;
- int power;
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* Get this channel's rate-to-current-power settings table */
- power_info = ch_info->power_info;
-
- /* update OFDM Txpower settings */
- for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
- i++, ++power_info) {
- int delta_idx;
-
- /* limit new power to be no more than h/w capability */
- power = min(ch_info->curr_txpow, clip_pwrs[i]);
- if (power == power_info->requested_power)
- continue;
-
- /* find difference between old and new requested powers,
- * update base (non-temp-compensated) power index */
- delta_idx = (power - power_info->requested_power) * 2;
- power_info->base_power_index -= delta_idx;
-
- /* save new requested power value */
- power_info->requested_power = power;
-
- power_changed = 1;
- }
-
- /* update CCK Txpower settings, based on OFDM 12M setting ...
- * ... all CCK power settings for a given channel are the *same*. */
- if (power_changed) {
- power =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
- /* do all CCK rates' iwl3945_channel_power_info structures */
- for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
- power_info->requested_power = power;
- power_info->base_power_index =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
- ++power_info;
- }
- }
-
- return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- * based strictly on regulatory (eeprom and spectrum mgt) limitations
- * (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
- s8 max_power;
-
-#if 0
- /* if we're using TGd limits, use lower of TGd or EEPROM */
- if (ch_info->tgd_data.max_power != 0)
- max_power = min(ch_info->tgd_data.max_power,
- ch_info->eeprom.max_power_avg);
-
- /* else just use EEPROM limits */
- else
-#endif
- max_power = ch_info->eeprom.max_power_avg;
-
- return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- * and the factory calibration temperatures, and bases the new settings
- * on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
- u8 a_band;
- u8 rate_index;
- u8 scan_tbl_index;
- u8 i;
- int ref_temp;
- int temperature = priv->temperature;
-
- if (priv->disable_tx_power_cal ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- /* do not perform tx power calibration */
- return 0;
- }
- /* set up new Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* Get this chnlgrp's factory calibration temperature */
- ref_temp = (s16)eeprom->groups[ch_info->group_index].
- temperature;
-
- /* get power index adjustment based on current and factory
- * temps */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- ref_temp);
-
- /* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
- rate_index++) {
- int power_idx =
- ch_info->power_info[rate_index].base_power_index;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within table range */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
- ch_info->power_info[rate_index].
- power_table_index = (u8) power_idx;
- ch_info->power_info[rate_index].tpc =
- power_gain_table[a_band][power_idx];
- }
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs,
- ch_info, a_band);
- }
- }
-
- /* send Txpower command for current channel to ucode */
- return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
- struct iwl_channel_info *ch_info;
- s8 max_power;
- u8 a_band;
- u8 i;
-
- if (priv->tx_power_user_lmt == power) {
- IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
- "limit: %ddBm.\n", power);
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
- priv->tx_power_user_lmt = power;
-
- /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* find minimum power of all user and regulatory constraints
- * (does not consider h/w clipping limitations) */
- max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
- max_power = min(power, max_power);
- if (max_power != ch_info->curr_txpow) {
- ch_info->curr_txpow = max_power;
-
- /* this considers the h/w clipping limitations */
- iwl3945_hw_reg_set_new_power(priv, ch_info);
- }
- }
-
- /* update txpower settings for all channels,
- * send to NIC if associated. */
- iwl3945_is_temp_calib_needed(priv);
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int rc = 0;
- struct iwl_rx_packet *pkt;
- struct iwl3945_rxon_assoc_cmd rxon_assoc;
- struct iwl_host_cmd cmd = {
- .id = REPLY_RXON_ASSOC,
- .len = sizeof(rxon_assoc),
- .flags = CMD_WANT_SKB,
- .data = &rxon_assoc,
- };
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
- rc = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
- struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
- int rc = 0;
- bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- if (!iwl_legacy_is_alive(priv))
- return -1;
-
- /* always get timestamp with Rx frame */
- staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
- /* select antenna */
- staging_rxon->flags &=
- ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
- staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
- rc = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (rc) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv,
- &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_legacy_send_rxon_assoc(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- if (rc) {
- IWL_ERR(priv, "Error setting RXON_ASSOC "
- "configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- active_rxon->reserved4 = 0;
- active_rxon->reserved5 = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- &priv->contexts[IWL_RXON_CTX_BSS].active);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (rc) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
- "configuration (%d).\n", rc);
- return rc;
- }
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(staging_rxon->channel),
- staging_rxon->bssid_addr);
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- staging_rxon->reserved4 = 0;
- staging_rxon->reserved5 = 0;
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
- /* Apply the new configuration */
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- staging_rxon);
- if (rc) {
- IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- if (!new_assoc) {
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (rc) {
- IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
- return rc;
- }
-
- /* Init the hardware's rate fallback order based on the band */
- rc = iwl3945_init_hw_rate_table(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic - called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- * -- correct coeffs for temp (can reset temp timer)
- * -- save this temp as "last",
- * -- send new set of gain settings to NIC
- * NOTE: This should continue working, even when we're not associated,
- * so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
- /* This will kick in the "brute force"
- * iwl3945_hw_reg_comp_txpower_temp() below */
- if (!iwl3945_is_temp_calib_needed(priv))
- goto reschedule;
-
- /* Set up a new set of temp-adjusted TxPowers, send to NIC.
- * This is based *only* on current temperature,
- * ignoring any previous power measurements */
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
- queue_delayed_work(priv->workqueue,
- &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- _3945.thermal_periodic.work);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl3945_reg_txpower_periodic(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- * for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- * These channel groups are based on factory-tested channels;
- * on A-band, EEPROM's "group frequency" entries represent the top
- * channel in each group 1-4. Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
- const struct iwl_channel_info *ch_info)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
- u8 group;
- u16 group_index = 0; /* based on factory calib frequencies */
- u8 grp_channel;
-
- /* Find the group index for the channel ... don't use index 1(?) */
- if (iwl_legacy_is_channel_a_band(ch_info)) {
- for (group = 1; group < 5; group++) {
- grp_channel = ch_grp[group].group_channel;
- if (ch_info->channel <= grp_channel) {
- group_index = group;
- break;
- }
- }
- /* group 4 has a few channels *above* its factory cal freq */
- if (group == 5)
- group_index = 4;
- } else
- group_index = 0; /* 2.4 GHz, group 0 */
-
- IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
- group_index);
- return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- * into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
- s8 requested_power,
- s32 setting_index, s32 *new_index)
-{
- const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- s32 index0, index1;
- s32 power = 2 * requested_power;
- s32 i;
- const struct iwl3945_eeprom_txpower_sample *samples;
- s32 gains0, gains1;
- s32 res;
- s32 denominator;
-
- chnl_grp = &eeprom->groups[setting_index];
- samples = chnl_grp->samples;
- for (i = 0; i < 5; i++) {
- if (power == samples[i].power) {
- *new_index = samples[i].gain_index;
- return 0;
- }
- }
-
- if (power > samples[1].power) {
- index0 = 0;
- index1 = 1;
- } else if (power > samples[2].power) {
- index0 = 1;
- index1 = 2;
- } else if (power > samples[3].power) {
- index0 = 2;
- index1 = 3;
- } else {
- index0 = 3;
- index1 = 4;
- }
-
- denominator = (s32) samples[index1].power - (s32) samples[index0].power;
- if (denominator == 0)
- return -EINVAL;
- gains0 = (s32) samples[index0].gain_index * (1 << 19);
- gains1 = (s32) samples[index1].gain_index * (1 << 19);
- res = gains0 + (gains1 - gains0) *
- ((s32) power - (s32) samples[index0].power) / denominator +
- (1 << 18);
- *new_index = res >> 19;
- return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
- u32 i;
- s32 rate_index;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- const struct iwl3945_eeprom_txpower_group *group;
-
- IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
- for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
- s8 *clip_pwrs; /* table of power levels for each rate */
- s8 satur_pwr; /* saturation power for each chnl group */
- group = &eeprom->groups[i];
-
- /* sanity check on factory saturation power value */
- if (group->saturation_power < 40) {
- IWL_WARN(priv, "Error: saturation power is %d, "
- "less than minimum expected 40\n",
- group->saturation_power);
- return;
- }
-
- /*
- * Derive requested power levels for each rate, based on
- * hardware capabilities (saturation power for band).
- * Basic value is 3dB down from saturation, with further
- * power reductions for highest 3 data rates. These
- * backoffs provide headroom for high rate modulation
- * power peaks, without too much distortion (clipping).
- */
- /* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
- /* divide factory saturation power by 2 to find -3dB level */
- satur_pwr = (s8) (group->saturation_power >> 1);
-
- /* fill in channel group's nominal powers for each rate */
- for (rate_index = 0;
- rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
- switch (rate_index) {
- case IWL_RATE_36M_INDEX_TABLE:
- if (i == 0) /* B/G */
- *clip_pwrs = satur_pwr;
- else /* A */
- *clip_pwrs = satur_pwr - 5;
- break;
- case IWL_RATE_48M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 7;
- else
- *clip_pwrs = satur_pwr - 10;
- break;
- case IWL_RATE_54M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 9;
- else
- *clip_pwrs = satur_pwr - 12;
- break;
- default:
- *clip_pwrs = satur_pwr;
- break;
- }
- }
- }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_channel_power_info *pwr_info;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- u8 rate_index;
- u8 scan_tbl_index;
- const s8 *clip_pwrs; /* array of power levels for each rate */
- u8 gain, dsp_atten;
- s8 power;
- u8 pwr_index, base_pwr_index, a_band;
- u8 i;
- int temperature;
-
- /* save temperature reference,
- * so we can determine next time to calibrate */
- temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- priv->last_temperature = temperature;
-
- iwl3945_hw_reg_init_channel_groups(priv);
-
- /* initialize Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
- i++, ch_info++) {
- a_band = iwl_legacy_is_channel_a_band(ch_info);
- if (!iwl_legacy_is_channel_valid(ch_info))
- continue;
-
- /* find this channel's channel group (*not* "band") index */
- ch_info->group_index =
- iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
- /* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* calculate power index *adjustment* value according to
- * diff between current temperature and factory temperature */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- eeprom->groups[ch_info->group_index].
- temperature);
-
- IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
- ch_info->channel, delta_index, temperature +
- IWL_TEMP_CONVERT);
-
- /* set tx power value for all OFDM rates */
- for (rate_index = 0; rate_index < IWL_OFDM_RATES;
- rate_index++) {
- s32 uninitialized_var(power_idx);
- int rc;
-
- /* use channel group's clip-power table,
- * but don't exceed channel's max power */
- s8 pwr = min(ch_info->max_power_avg,
- clip_pwrs[rate_index]);
-
- pwr_info = &ch_info->power_info[rate_index];
-
- /* get base (i.e. at factory-measured temperature)
- * power table index for this rate's power */
- rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
- ch_info->group_index,
- &power_idx);
- if (rc) {
- IWL_ERR(priv, "Invalid power index\n");
- return rc;
- }
- pwr_info->base_power_index = (u8) power_idx;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within range of gain table */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
- /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
- pwr_info->requested_power = pwr;
- pwr_info->power_table_index = (u8) power_idx;
- pwr_info->tpc.tx_gain =
- power_gain_table[a_band][power_idx].tx_gain;
- pwr_info->tpc.dsp_atten =
- power_gain_table[a_band][power_idx].dsp_atten;
- }
-
- /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
- pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
- power = pwr_info->requested_power +
- IWL_CCK_FROM_OFDM_POWER_DIFF;
- pwr_index = pwr_info->power_table_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
- base_pwr_index = pwr_info->base_power_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
- /* stay within table range */
- pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
- gain = power_gain_table[a_band][pwr_index].tx_gain;
- dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
- /* fill each CCK rate's iwl3945_channel_power_info structure
- * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
- * NOTE: CCK rates start at end of OFDM rates! */
- for (rate_index = 0;
- rate_index < IWL_CCK_RATES; rate_index++) {
- pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
- pwr_info->requested_power = power;
- pwr_info->power_table_index = pwr_index;
- pwr_info->base_power_index = base_pwr_index;
- pwr_info->tpc.tx_gain = gain;
- pwr_info->tpc.dsp_atten = dsp_atten;
- }
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs, ch_info, a_band);
- }
- }
-
- return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
- int rc;
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
- rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
- FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- if (rc < 0)
- IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
- return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
- shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
- iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
- FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
- /* fake read to flush all prev. writes */
- iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
- return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return sizeof(struct iwl3945_rxon_cmd);
- case POWER_TABLE_CMD:
- return sizeof(struct iwl3945_powertable_cmd);
- default:
- return len;
- }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cpu_to_le16(0);
- addsta->rate_n_flags = cmd->rate_n_flags;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
- return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
- const u8 *addr, u8 *sta_id_r)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int ret;
- u8 sta_id;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- int ret;
-
- if (add) {
- ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- if (ret)
- return ret;
-
- iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
- iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
- return 0;
- }
-
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
- int rc, i, index, prev_index;
- struct iwl3945_rate_scaling_cmd rate_cmd = {
- .reserved = {0, 0, 0},
- };
- struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
- for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
- index = iwl3945_rates[i].table_rs_index;
-
- table[index].rate_n_flags =
- iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
- table[index].try_cnt = priv->retry_rate;
- prev_index = iwl3945_get_prev_ieee_rate(i);
- table[index].next_rate_index =
- iwl3945_rates[prev_index].table_rs_index;
- }
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
- /* If one of the following CCK rates is used,
- * have it fall back to the 6M OFDM rate */
- for (i = IWL_RATE_1M_INDEX_TABLE;
- i <= IWL_RATE_11M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
- /* Don't fall back to CCK rates */
- table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
- IWL_RATE_9M_INDEX_TABLE;
-
- /* Don't drop out of OFDM rates */
- table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
- break;
-
- case IEEE80211_BAND_2GHZ:
- IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
- /* If an OFDM rate is used, have it fall back to the
- * 1M CCK rates */
-
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
- index = IWL_FIRST_CCK_RATE;
- for (i = IWL_RATE_6M_INDEX_TABLE;
- i <= IWL_RATE_54M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[index].table_rs_index;
-
- index = IWL_RATE_11M_INDEX_TABLE;
- /* CCK shouldn't fall back to OFDM... */
- table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
- }
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
- /* Update the rate scaling for control frame Tx */
- rate_cmd.table_id = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
- if (rc)
- return rc;
-
- /* Update the rate scaling for data frame Tx */
- rate_cmd.table_id = 1;
- return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
- memset((void *)&priv->hw_params, 0,
- sizeof(struct iwl_hw_params));
-
- priv->_3945.shared_virt =
- dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->_3945.shared_phys, GFP_KERNEL);
- if (!priv->_3945.shared_virt) {
- IWL_ERR(priv, "failed to allocate pci memory\n");
- return -ENOMEM;
- }
-
- /* Assign number of Usable TX queues */
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
- priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- priv->hw_params.max_stations = IWL3945_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
- priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
- priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate)
-{
- struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
- unsigned int frame_size;
-
- tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- tx_beacon_cmd->tx.sta_id =
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- frame_size = iwl3945_fill_beacon_frame(priv,
- tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
- BUG_ON(frame_size > MAX_MPDU_SIZE);
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
- tx_beacon_cmd->tx.rate = rate;
- tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK);
-
- /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
- tx_beacon_cmd->tx.supp_rates[0] =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- tx_beacon_cmd->tx.supp_rates[1] =
- (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
- return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
- priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
- iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism. Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
- return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- return;
-}
-
- /**
- * iwl3945_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int rc;
- int i;
- u32 done;
- u32 reg_offset;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL39_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
- * NOTE: iwl3945_initialize_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache. */
- pinst = priv->ucode_init.p_addr;
- pdata = priv->ucode_init_data.p_addr;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset,
- le32_to_cpu(*image));
-
- rc = iwl3945_verify_bsm(priv);
- if (rc)
- return rc;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
- IWL39_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START_EN);
-
- return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
- .rxon_assoc = iwl3945_send_rxon_assoc,
- .commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
- .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl3945_hw_txq_free_tfd,
- .txq_init = iwl3945_hw_tx_queue_init,
- .load_ucode = iwl3945_load_bsm,
- .dump_nic_error_log = iwl3945_dump_nic_error_log,
- .apm_ops = {
- .init = iwl3945_apm_init,
- .config = iwl3945_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
- .release_semaphore = iwl3945_eeprom_release_semaphore,
- },
- .send_tx_power = iwl3945_send_tx_power,
- .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
- .debugfs_ops = {
- .rx_stats_read = iwl3945_ucode_rx_stats_read,
- .tx_stats_read = iwl3945_ucode_tx_stats_read,
- .general_stats_read = iwl3945_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
- .post_associate = iwl3945_post_associate,
- .config_ap = iwl3945_config_ap,
- .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
- .get_hcmd_size = iwl3945_get_hcmd_size,
- .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .request_scan = iwl3945_request_scan,
- .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
- .lib = &iwl3945_lib,
- .hcmd = &iwl3945_hcmd,
- .utils = &iwl3945_hcmd_utils,
- .led = &iwl3945_led_ops,
- .legacy = &iwl3945_legacy_ops,
- .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
- .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
- .num_of_queues = IWL39_NUM_QUEUES,
- .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
- .set_l0s = false,
- .use_bsm = true,
- .led_compensation = 64,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
- .name = "3945BG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
- .name = "3945ABG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
- {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
- {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
- u64 data;
- s32 success_counter;
- s32 success_ratio;
- s32 counter;
- s32 average_tpt;
- unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
- spinlock_t lock;
- struct iwl_priv *priv;
- s32 *expected_tpt;
- unsigned long last_partial_flush;
- unsigned long last_flush;
- u32 flush_time;
- u32 last_tx_packets;
- u32 tx_packets;
- u8 tgg;
- u8 flush_pending;
- u8 start_rate;
- struct timer_list rate_scale_flush;
- struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
- /* used to be in sta_info */
- int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
- struct iwl_station_priv_common common;
- struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
- IWL_ANTENNA_DIVERSITY,
- IWL_ANTENNA_MAIN,
- IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl3945_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-#define STA_PS_STATUS_WAKE 0
-#define STA_PS_STATUS_SLEEP 1
-
-struct iwl3945_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
- IWL_RX_HDR(x)->payload + \
- le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE: The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_ <-- Called from work queue context
- * iwl3945_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE: This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
- const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-* Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = " %-30s %10u\n";
-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
-static const char *fmt_header =
- "%-32s current cumulative delta max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
- u32 flag;
-
- flag = le32_to_cpu(priv->_4965.statistics.flag);
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
- if (flag & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (flag & UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
-
- return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_4965.statistics.rx.ofdm;
- cck = &priv->_4965.statistics.rx.cck;
- general = &priv->_4965.statistics.rx.general;
- ht = &priv->_4965.statistics.rx.ofdm_ht;
- accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
- accum_cck = &priv->_4965.accum_statistics.rx.cck;
- accum_general = &priv->_4965.accum_statistics.rx.general;
- accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
- delta_cck = &priv->_4965.delta_statistics.rx.cck;
- delta_general = &priv->_4965.delta_statistics.rx.general;
- delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->_4965.max_delta.rx.ofdm;
- max_cck = &priv->_4965.max_delta.rx.cck;
- max_general = &priv->_4965.max_delta.rx.general;
- max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
- max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err, delta_cck->overrun_err,
- max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good, max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout, delta_cck->sfd_timeout,
- max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout, delta_cck->fina_timeout,
- max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts, delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err, delta_cck->mh_format_err,
- max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts, delta_general->bogus_cts,
- max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack, delta_general->bogus_ack,
- max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta_tx = &priv->_4965.delta_statistics.tx;
- max_tx = &priv->_4965.max_delta.tx;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general_common *general, *accum_general;
- struct statistics_general_common *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_4965.statistics.general.common;
- dbg = &priv->_4965.statistics.general.common.dbg;
- div = &priv->_4965.statistics.general.common.div;
- accum_general = &priv->_4965.accum_statistics.general.common;
- accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
- accum_div = &priv->_4965.accum_statistics.general.common.div;
- delta_general = &priv->_4965.delta_statistics.general.common;
- max_general = &priv->_4965.max_delta.general.common;
- delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
- max_dbg = &priv->_4965.max_delta.general.common.dbg;
- delta_div = &priv->_4965.delta_statistics.general.common.div;
- max_div = &priv->_4965.max_delta.general.common.div;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "ttl_timestamp:",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "wait_for_silence_timeout_count:",
- le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
- accum_dbg->wait_for_silence_timeout_cnt,
- delta_dbg->wait_for_silence_timeout_cnt,
- max_dbg->wait_for_silence_timeout_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_IO(priv,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_legacy_eeprom_query16(priv,
- EEPROM_4965_CALIB_VERSION_OFFSET);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE 1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE 7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
-
-#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
- IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
- IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent). The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
- * must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
- (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
- ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- * 1) EEPROM
- * 2) "initialize" alive notification
- * 3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1) Regulatory information (max txpower and channel usage flags) is provided
- * separately for each channel that can possibly supported by 4965.
- * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- * (legacy) channels.
- *
- * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- * for locations in EEPROM.
- *
- * 2) Factory txpower calibration information is provided separately for
- * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
- * but 5 GHz has several sub-bands.
- *
- * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- * See struct iwl4965_eeprom_calib_info (and the tree of structures
- * contained within it) for format, and struct iwl4965_eeprom for
- * locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1) Temperature calculation parameters.
- *
- * 2) Power supply voltage measurement.
- *
- * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1) Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- * Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * 2 transmitters will be used simultaneously; driver must reduce the
- * regulatory limit by 3 dB (half-power) for each transmitter, so the
- * combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
- * reduce target txpower if necessary.
- *
- * Backoff values below are in 1/2 dB units (equivalent to steps in
- * txpower gain tables):
- *
- * OFDM 6 - 36 MBit: 10 steps (5 dB)
- * OFDM 48 MBit: 15 steps (7.5 dB)
- * OFDM 54 MBit: 17 steps (8.5 dB)
- * OFDM 60 MBit: 20 steps (10 dB)
- * CCK all rates: 10 steps (5 dB)
- *
- * Backoff values apply to saturation txpower on a per-transmitter basis;
- * when using MIMO (2 transmitters), each transmitter uses the same
- * saturation level provided in EEPROM, and the same backoff values;
- * no reduction (such as with regulatory txpower limits) is required.
- *
- * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- * factory measurement for ht40 channels.
- *
- * The result of this step is the final target txpower. The rest of
- * the steps figure out the proper settings for the device to achieve
- * that target txpower.
- *
- *
- * 3) Determine (EEPROM) calibration sub band for the target channel, by
- * comparing against first and last channels in each sub band
- * (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
- * referencing the 2 factory-measured (sample) channels within the sub band.
- *
- * Interpolation is based on difference between target channel's frequency
- * and the sample channels' frequencies. Since channel numbers are based
- * on frequency (5 MHz between each channel number), this is equivalent
- * to interpolating based on channel number differences.
- *
- * Note that the sample channels may or may not be the channels at the
- * edges of the sub band. The target channel may be "outside" of the
- * span of the sampled channels.
- *
- * Driver may choose the pair (for 2 Tx chains) of measurements (see
- * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- * txpower comes closest to the desired txpower. Usually, though,
- * the middle set of measurements is closest to the regulatory limits,
- * and is therefore a good choice for all txpower calculations (this
- * assumes that high accuracy is needed for maximizing legal txpower,
- * while lower txpower configurations do not need as much accuracy).
- *
- * Driver should interpolate both members of the chosen measurement pair,
- * i.e. for both Tx chains (radio transmitters), unless the driver knows
- * that only one of the chains will be used (e.g. only one tx antenna
- * connected, but this should be unusual). The rate scaling algorithm
- * switches antennas to find best performance, so both Tx chains will
- * be used (although only one at a time) even for non-MIMO transmissions.
- *
- * Driver should interpolate factory values for temperature, gain table
- * index, and actual power. The power amplifier detector values are
- * not used by the driver.
- *
- * Sanity check: If the target channel happens to be one of the sample
- * channels, the results should agree with the sample channel's
- * measurements!
- *
- *
- * 5) Find difference between desired txpower and (interpolated)
- * factory-measured txpower. Using (interpolated) factory gain table index
- * (shown elsewhere) as a starting point, adjust this index lower to
- * increase txpower, or higher to decrease txpower, until the target
- * txpower is reached. Each step in the gain table is 1/2 dB.
- *
- * For example, if factory measured txpower is 16 dBm, and target txpower
- * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- * by 3 dB.
- *
- *
- * 6) Find difference between current device temperature and (interpolated)
- * factory-measured temperature for sub-band. Factory values are in
- * degrees Celsius. To calculate current temperature, see comments for
- * "4965 temperature calculation".
- *
- * If current temperature is higher than factory temperature, driver must
- * increase gain (lower gain table index), and vice verse.
- *
- * Temperature affects gain differently for different channels:
- *
- * 2.4 GHz all channels: 3.5 degrees per half-dB step
- * 5 GHz channels 34-43: 4.5 degrees per half-dB step
- * 5 GHz channels >= 44: 4.0 degrees per half-dB step
- *
- * NOTE: Temperature can increase rapidly when transmitting, especially
- * with heavy traffic at high txpowers. Driver should update
- * temperature calculations often under these conditions to
- * maintain strong txpower in the face of rising temperature.
- *
- *
- * 7) Find difference between current power supply voltage indicator
- * (from "initialize alive") and factory-measured power supply voltage
- * indicator (EEPROM).
- *
- * If the current voltage is higher (indicator is lower) than factory
- * voltage, gain should be reduced (gain table index increased) by:
- *
- * (eeprom - current) / 7
- *
- * If the current voltage is lower (indicator is higher) than factory
- * voltage, gain should be increased (gain table index decreased) by:
- *
- * 2 * (current - eeprom) / 7
- *
- * If number of index steps in either direction turns out to be > 2,
- * something is wrong ... just use 0.
- *
- * NOTE: Voltage compensation is independent of band/channel.
- *
- * NOTE: "Initialize" uCode measures current voltage, which is assumed
- * to be constant after this initial measurement. Voltage
- * compensation for txpower (number of steps in gain table)
- * may be calculated once and used until the next uCode bootload.
- *
- *
- * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * adjust txpower for each transmitter chain, so txpower is balanced
- * between the two chains. There are 5 pairs of tx_atten[group][chain]
- * values in "initialize alive", one pair for each of 5 channel ranges:
- *
- * Group 0: 5 GHz channel 34-43
- * Group 1: 5 GHz channel 44-70
- * Group 2: 5 GHz channel 71-124
- * Group 3: 5 GHz channel 125-200
- * Group 4: 2.4 GHz all channels
- *
- * Add the tx_atten[group][chain] value to the index for the target chain.
- * The values are signed, but are in pairs of 0 and a non-negative number,
- * so as to reduce gain (if necessary) of the "hotter" channel. This
- * avoids any need to double-check for regulatory compliance after
- * this step.
- *
- *
- * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
- * value to the index:
- *
- * Hardware rev B: 9 steps (4.5 dB)
- * Hardware rev C: 5 steps (2.5 dB)
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- *
- * NOTE: This compensation is in addition to any saturation backoff that
- * might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- * Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- * location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device. That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB. Note that these
- * are *relative* steps, not indications of absolute output power. Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
- * linear value that multiplies the output of the digital signal processor,
- * before being sent to the analog radio.
- * 2) Radio gain. This sets the analog gain of the radio Tx path.
- * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower. This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration). A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index Dsp gain Radio gain
- * 0 110 0x3f (highest gain)
- * 1 104 0x3f
- * 2 98 0x3f
- * 3 110 0x3e
- * 4 104 0x3e
- * 5 98 0x3e
- * 6 110 0x3d
- * 7 104 0x3d
- * 8 98 0x3d
- * 9 110 0x3c
- * 10 104 0x3c
- * 11 98 0x3c
- * 12 110 0x3b
- * 13 104 0x3b
- * 14 98 0x3b
- * 15 110 0x3a
- * 16 104 0x3a
- * 17 98 0x3a
- * 18 110 0x39
- * 19 104 0x39
- * 20 98 0x39
- * 21 110 0x38
- * 22 104 0x38
- * 23 98 0x38
- * 24 110 0x37
- * 25 104 0x37
- * 26 98 0x37
- * 27 110 0x36
- * 28 104 0x36
- * 29 98 0x36
- * 30 110 0x35
- * 31 104 0x35
- * 32 98 0x35
- * 33 110 0x34
- * 34 104 0x34
- * 35 98 0x34
- * 36 110 0x33
- * 37 104 0x33
- * 38 98 0x33
- * 39 110 0x32
- * 40 104 0x32
- * 41 98 0x32
- * 42 110 0x31
- * 43 104 0x31
- * 44 98 0x31
- * 45 110 0x30
- * 46 104 0x30
- * 47 98 0x30
- * 48 110 0x6
- * 49 104 0x6
- * 50 98 0x6
- * 51 110 0x5
- * 52 104 0x5
- * 53 98 0x5
- * 54 110 0x4
- * 55 104 0x4
- * 56 98 0x4
- * 57 110 0x3
- * 58 104 0x3
- * 59 98 0x3
- * 60 110 0x2
- * 61 104 0x2
- * 62 98 0x2
- * 63 110 0x1
- * 64 104 0x1
- * 65 98 0x1
- * 66 110 0x0
- * 67 104 0x0
- * 68 98 0x0
- * 69 97 0
- * 70 96 0
- * 71 95 0
- * 72 94 0
- * 73 93 0
- * 74 92 0
- * 75 91 0
- * 76 90 0
- * 77 89 0
- * 78 88 0
- * 79 87 0
- * 80 86 0
- * 81 85 0
- * 82 84 0
- * 83 83 0
- * 84 82 0
- * 85 81 0
- * 86 80 0
- * 87 79 0
- * 88 78 0
- * 89 77 0
- * 90 76 0
- * 91 75 0
- * 92 74 0
- * 93 73 0
- * 94 72 0
- * 95 71 0
- * 96 70 0
- * 97 69 0
- * 98 68 0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index Dsp gain Radio gain
- * -9 123 0x3F (highest gain)
- * -8 117 0x3F
- * -7 110 0x3F
- * -6 104 0x3F
- * -5 98 0x3F
- * -4 110 0x3E
- * -3 104 0x3E
- * -2 98 0x3E
- * -1 110 0x3D
- * 0 104 0x3D
- * 1 98 0x3D
- * 2 110 0x3C
- * 3 104 0x3C
- * 4 98 0x3C
- * 5 110 0x3B
- * 6 104 0x3B
- * 7 98 0x3B
- * 8 110 0x3A
- * 9 104 0x3A
- * 10 98 0x3A
- * 11 110 0x39
- * 12 104 0x39
- * 13 98 0x39
- * 14 110 0x38
- * 15 104 0x38
- * 16 98 0x38
- * 17 110 0x37
- * 18 104 0x37
- * 19 98 0x37
- * 20 110 0x36
- * 21 104 0x36
- * 22 98 0x36
- * 23 110 0x35
- * 24 104 0x35
- * 25 98 0x35
- * 26 110 0x34
- * 27 104 0x34
- * 28 98 0x34
- * 29 110 0x33
- * 30 104 0x33
- * 31 98 0x33
- * 32 110 0x32
- * 33 104 0x32
- * 34 98 0x32
- * 35 110 0x31
- * 36 104 0x31
- * 37 98 0x31
- * 38 110 0x30
- * 39 104 0x30
- * 40 98 0x30
- * 41 110 0x25
- * 42 104 0x25
- * 43 98 0x25
- * 44 110 0x24
- * 45 104 0x24
- * 46 98 0x24
- * 47 110 0x23
- * 48 104 0x23
- * 49 98 0x23
- * 50 110 0x22
- * 51 104 0x18
- * 52 98 0x18
- * 53 110 0x17
- * 54 104 0x17
- * 55 98 0x17
- * 56 110 0x16
- * 57 104 0x16
- * 58 98 0x16
- * 59 110 0x15
- * 60 104 0x15
- * 61 98 0x15
- * 62 110 0x14
- * 63 104 0x14
- * 64 98 0x14
- * 65 110 0x13
- * 66 104 0x13
- * 67 98 0x13
- * 68 110 0x12
- * 69 104 0x08
- * 70 98 0x08
- * 71 110 0x07
- * 72 104 0x07
- * 73 98 0x07
- * 74 110 0x06
- * 75 104 0x06
- * 76 98 0x06
- * 77 110 0x05
- * 78 104 0x05
- * 79 98 0x05
- * 80 110 0x04
- * 81 104 0x04
- * 82 98 0x04
- * 83 110 0x03
- * 84 104 0x03
- * 85 98 0x03
- * 86 110 0x02
- * 87 104 0x02
- * 88 98 0x02
- * 89 110 0x01
- * 90 104 0x01
- * 91 98 0x01
- * 92 110 0x00
- * 93 104 0x00
- * 94 98 0x00
- * 95 93 0x00
- * 96 88 0x00
- * 97 83 0x00
- * 98 78 0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated. These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
-#define IWL_TX_POWER_REGULATORY_MIN (0)
-#define IWL_TX_POWER_REGULATORY_MAX (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion. This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
-#define IWL_TX_POWER_SATURATION_MIN (20)
-#define IWL_TX_POWER_SATURATION_MAX (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain. Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below. If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
- CALIB_CH_GROUP_1 = 0,
- CALIB_CH_GROUP_2 = 1,
- CALIB_CH_GROUP_3 = 2,
- CALIB_CH_GROUP_4 = 3,
- CALIB_CH_GROUP_5 = 4,
- CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues). All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device. When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS 7
-#define IWL49_CMD_FIFO_NUM 4
-#define IWL49_NUM_QUEUES 16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
- u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET 44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL4965_DEFAULT_TX_RETRY 15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE 10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee6..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
- u32 reg;
-
- reg = iwl_read32(priv, CSR_LED_REG);
- if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
- .cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc33..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b01..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_legacy_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl4965_rx_queue_reset(priv, rxq);
-
- iwl4965_rx_replenish(priv);
-
- iwl4965_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl4965_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl4965_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv,
- "Failed to alloc_pages with %s. "
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl4965_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl4965_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
- iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->_4965.last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->_4965.last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl4965_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
- rx_status.band);
- rx_status.rate_idx =
- iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_4965.last_phy_res_valid = true;
- memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_any_associated(priv)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
- priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = iwl4965_first_antenna(active_chains);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
-
- cmd_len = iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- if (add)
- return iwl4965_add_bssid_station(priv, vif_priv->ctx,
- vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
- return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
- priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE 3
-#define IWL_NUM_RX_CHAINS_SINGLE 2
-#define IWL_NUM_IDLE_CHAINS_DUAL 2
-#define IWL_NUM_IDLE_CHAINS_SINGLE 1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity. Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
- /* # of Rx chains to use when expecting MIMO. */
- if (iwl4965_is_single_rx_stream(priv))
- return IWL_NUM_RX_CHAINS_SINGLE;
- else
- return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
- /* # Rx chains when idling, depending on SMPS mode */
- switch (priv->current_ht_config.smps) {
- case IEEE80211_SMPS_STATIC:
- case IEEE80211_SMPS_DYNAMIC:
- return IWL_NUM_IDLE_CHAINS_SINGLE;
- case IEEE80211_SMPS_OFF:
- return active_cnt;
- default:
- WARN(1, "invalid SMPS mode %d",
- priv->current_ht_config.smps);
- return active_cnt;
- }
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
- u8 res;
- res = (chain_bitmap & BIT(0)) >> 0;
- res += (chain_bitmap & BIT(1)) >> 1;
- res += (chain_bitmap & BIT(2)) >> 2;
- res += (chain_bitmap & BIT(3)) >> 3;
- return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- bool is_single = iwl4965_is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
- u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
- u32 active_chains;
- u16 rx_chain;
-
- /* Tell uCode which antennas are actually connected.
- * Before first association, we assume all antennas are connected.
- * Just after first association, iwl4965_chain_noise_calibration()
- * checks which antennas actually *are* connected. */
- if (priv->chain_noise_data.active_chains)
- active_chains = priv->chain_noise_data.active_chains;
- else
- active_chains = priv->hw_params.valid_rx_ant;
-
- rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
- /* How many receivers should we use? */
- active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
- idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
- /* correct rx chain count according hw settings
- * and chain noise calibration
- */
- valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
- if (valid_rx_cnt < active_rx_cnt)
- active_rx_cnt = valid_rx_cnt;
-
- if (valid_rx_cnt < idle_rx_cnt)
- idle_rx_cnt = valid_rx_cnt;
-
- rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
- rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
-
- ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
- if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
- ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
- else
- ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
- IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
- ctx->staging.rx_chain,
- active_rx_cnt, idle_rx_cnt);
-
- WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
- active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
- int i;
- u8 ind = ant;
-
- for (i = 0; i < RATE_ANT_NUM - 1; i++) {
- ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
- if (valid & BIT(ind))
- return ind;
- }
- return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
-
-#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
- int idx = 0;
-
- /* HT rate format */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
-
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
- if (idx >= IWL_RATE_9M_INDEX)
- idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
- return idx;
-
- /* legacy rate format, search for match in table */
- } else {
- for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx;
- }
-
- return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
- bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
- { "1", "BPSK DSSS"},
- { "2", "QPSK DSSS"},
- {"5.5", "BPSK CCK"},
- { "11", "QPSK CCK"},
- { "6", "BPSK 1/2"},
- { "9", "BPSK 1/2"},
- { "12", "QPSK 1/2"},
- { "18", "QPSK 3/4"},
- { "24", "16QAM 1/2"},
- { "36", "16QAM 3/4"},
- { "48", "64QAM 2/3"},
- { "54", "64QAM 3/4"},
- { "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM (8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
- return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = IWL_INVALID_VALUE;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
- return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else
- return MAX_TID_COUNT;
-
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
-/*
- get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= TID_MAX_LOAD_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_data, u8 tid,
- struct ieee80211_sta *sta)
-{
- int ret = -EAGAIN;
- u32 load;
-
- load = iwl4965_rs_tl_get_load(lq_data, tid);
-
- if (load > IWL_AGG_LOAD_THRESHOLD) {
- IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_ERR(priv, "Aggregation not enabled for tid %d "
- "because load = %u\n", tid, load);
- }
- return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
- struct iwl_lq_sta *lq_data,
- struct ieee80211_sta *sta)
-{
- if (tid < TID_MAX_LOAD_COUNT)
- iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
- return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
-{
- struct iwl_rate_scale_data *window = NULL;
- static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count, tpt;
-
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
- /* Get expected throughput */
- tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- */
- while (attempts > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & mask) {
- window->data &= ~mask;
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame to throw away oldest history */
- window->data <<= 1;
-
- /* Mark the most recent #successes attempts as successful */
- if (successes > 0) {
- window->success_counter++;
- window->data |= 0x1;
- successes--;
- }
-
- attempts--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = (window->success_ratio * tpt + 64) / 128;
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
-{
- u32 rate_n_flags = 0;
-
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwlegacy_rates[index].plcp;
- if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
-
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
- IWL_ERR(priv, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
- }
- rate_n_flags = RATE_MCS_HT_MSK;
-
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwlegacy_rates[index].plcp_siso;
- else
- rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
- } else {
- IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
- }
-
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40) {
- if (tbl->is_dup)
- rate_n_flags |= RATE_MCS_DUP_MSK;
- else
- rate_n_flags |= RATE_MCS_HT40_MSK;
- }
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(priv, "GF was set with SGI:SISO\n");
- }
- }
- }
- return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
-{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
-
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
- *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
- return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
- tbl->is_dup = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
-
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
- if (iwl4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
- }
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
- (rate_n_flags & RATE_MCS_DUP_MSK))
- tbl->is_ht40 = 1;
-
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- tbl->is_dup = 1;
-
- mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (iwl4965_num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else {
- if (iwl4965_num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
- }
- }
- return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
-{
- u8 new_ant_type;
-
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
- return 0;
-
- if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
- return 0;
-
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
- while ((new_ant_type != tbl->ant_type) &&
- !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
- new_ant_type = ant_toggle_lookup[new_ant_type];
-
- if (new_ant_type == tbl->ant_type)
- return 0;
-
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
- return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
-{
- if (is_legacy(rate_type)) {
- return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else
- return lq_sta->active_mimo2_rate;
- }
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
- int rate_type)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
-
- /* 802.11A or ht walks to the next literal adjacent rate in
- * the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- low = iwlegacy_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- high = iwlegacy_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
-{
- s32 low;
- u16 rate_mask;
- u16 high_low;
- u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
- struct iwl_priv *priv = lq_sta->drv;
-
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
-
- if (iwl4965_num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- tbl->is_ht40 = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
-
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
-
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
- }
-
- high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
- scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
-
- if (low == IWL_RATE_INVALID)
- low = scale_index;
-
-out:
- return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
-{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- int legacy_success;
- int retries;
- int rs_index, mac_index, i;
- struct iwl_lq_sta *lq_sta = priv_sta;
- struct iwl_link_quality_cmd *table;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE_LIMIT(priv,
- "get frame ack response, update rate scale window\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- return;
- }
-
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- /*
- * Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
- priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
- mac_flags = info->status.rates[0].flags;
- mac_index = info->status.rates[0].idx;
- /* For HT packets, map MCS to PLCP */
- if (mac_flags & IEEE80211_TX_RC_MCS) {
- mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
- /*
- * mac80211 HT index is always zero-indexed; we need to move
- * HT OFDM rates after CCK rates in 2.4 GHz band
- */
- if (priv->band == IEEE80211_BAND_2GHZ)
- mac_index += IWL_FIRST_OFDM_RATE;
- }
- /* Here we actually compare this rate to the latest LQ command */
- if ((mac_index < 0) ||
- (tbl_type.is_SGI !=
- !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 !=
- !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
- (tbl_type.is_dup !=
- !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
- (tbl_type.ant_type != info->antenna_sel_tx) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
- IWL_DEBUG_RATE(priv,
- "initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
- CMD_ASYNC, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- /* Figure out if rate scale algorithm is in active or search table */
- if (iwl4965_table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (iwl4965_table_type_matches(&tbl_type,
- &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
- IWL_DEBUG_RATE(priv,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- iwl4965_rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
- &rs_index);
- iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /*
- * For legacy, update frame history with for each Tx retry.
- */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
- &tbl_type, &rs_index);
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
- tmp_tbl = curr_tbl;
- else if (iwl4965_table_type_matches(&tbl_type,
- other_tbl))
- tmp_tbl = other_tbl;
- else
- continue;
- iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
- iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
- struct iwl_lq_sta *lq_sta)
-{
- IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
- if (is_legacy) {
- lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
- } else {
- lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
- }
- lq_sta->table_count = 0;
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
-{
- /* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
- * status */
- if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
- else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
-{
- /* "active" values */
- struct iwl_scale_tbl_info *active_tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
-
- /* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
- u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- for (; ;) {
- high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
- tbl->lq_type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
-
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
-
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
-
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
-
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
-
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
- }
-
- return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = LQ_MIMO2;
- tbl->is_dup = lq_sta->is_dup;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
-
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- u8 is_green = lq_sta->is_green;
- s32 rate;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
- tbl->is_dup = lq_sta->is_dup;
- tbl->lq_type = LQ_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- int ret = 0;
- u8 update_search_tbl_counter = 0;
-
- tbl->action = IWL_LEGACY_SWITCH_SISO;
-
- start_action = tbl->action;
- for (; ;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- iwl4965_rs_set_expected_tpt_table(lq_sta,
- search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- }
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
-
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- u8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
-
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(priv,
- "SGI was set in GF+SISO\n");
- }
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
-
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
- struct iwl_scale_tbl_info *tbl;
- int i;
- int active_tbl;
- int flush_interval_passed = 0;
- struct iwl_priv *priv;
-
- priv = lq_sta->drv;
- active_tbl = lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
-
- /* Elapsed time using current modulation mode */
- if (lq_sta->flush_timer)
- flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
-
- /*
- * Check if we should allow search for new modulation mode.
- * If many frames have failed or succeeded, or we've used
- * this same modulation for a long time, allow search, and
- * reset history stats that keep track of whether we should
- * allow a new search. Also (below) reset all bitmaps and
- * stats in active history.
- */
- if (force_search ||
- (lq_sta->total_failed > lq_sta->max_failure_limit) ||
- (lq_sta->total_success > lq_sta->max_success_limit) ||
- ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
- && (flush_interval_passed))) {
- IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
- lq_sta->total_failed,
- lq_sta->total_success,
- flush_interval_passed);
-
- /* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = 0;
-
- /*
- * Else if we've used this modulation mode enough repetitions
- * (regardless of elapsed time or success/failure), reset
- * history bitmaps and rate-specific stats for all rates in
- * active table.
- */
- } else {
- lq_sta->table_count++;
- if (lq_sta->table_count >=
- lq_sta->table_count_limit) {
- lq_sta->table_count = 0;
-
- IWL_DEBUG_RATE(priv,
- "LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-
- /* If transitioning to allow "search", reset all history
- * bitmaps and stats in active table (this will become the new
- * "search" table). */
- if (!lq_sta->stay_in_tbl) {
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
-{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
- iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
- return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int low = IWL_RATE_INVALID;
- int high = IWL_RATE_INVALID;
- int index;
- int i;
- struct iwl_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- u16 rate_mask;
- u8 update_lq = 0;
- struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
- u32 rate;
- u8 is_green = 0;
- u8 active_tbl = 0;
- u8 done_search = 0;
- u16 high_low;
- s32 sr;
- u8 tid = MAX_TID_COUNT;
- struct iwl_tid_data *tid_data;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
- /* Send management frames and NO_ACK data using lowest rate. */
- /* TODO: this could probably be improved.. */
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- if (!sta || !lq_sta)
- return;
-
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
- tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF)
- lq_sta->is_agg = 0;
- else
- lq_sta->is_agg = 1;
- } else
- lq_sta->is_agg = 0;
-
- /*
- * Select rate-scale / modulation-mode table to work with in
- * the rest of this function: "search" if searching for better
- * modulation mode, or "active" if doing rate scaling within a mode.
- */
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- is_green = lq_sta->is_green;
-
- /* current tx rate */
- index = lq_sta->last_txrate_idx;
-
- IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
- /* rates available for this association, and for modulation mode */
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
- IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else
- rate_scale_index_msk = rate_mask;
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
- IWL_ERR(priv, "Current Rate is not valid\n");
- if (lq_sta->search_better_tbl) {
- /* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
- lq_sta->search_better_tbl = 0;
- tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
- }
- return;
- }
-
- /* Get expected throughput table and history window for current rate */
- if (!tbl->expected_tpt) {
- IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
- return;
- }
-
- /* force user max rate if set by user */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < index)) {
- index = lq_sta->max_rate_idx;
- update_lq = 1;
- window = &(tbl->win[index]);
- goto lq_update;
- }
-
- window = &(tbl->win[index]);
-
- /*
- * If there is not enough history to calculate actual average
- * throughput, keep analyzing results of more tx frames, without
- * changing rate or mode (bypass most of the rest of this function).
- * Set up new rate table in uCode only if old rate is not supported
- * in current association (use new rate found above).
- */
- fail_count = window->counter - window->success_counter;
- if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
- IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
- "for index %d\n",
- window->success_counter, window->counter, index);
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(priv,
- "expected_tpt should have been calculated by now\n");
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
- }
-
- /* If we are searching for better modulation mode, check success. */
- if (lq_sta->search_better_tbl) {
- /* If good success, continue using the "search" mode;
- * no need to send new link quality command, since we're
- * continuing to use the setup that we've been trying. */
- if (window->average_tpt > lq_sta->last_tpt) {
-
- IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
- /* Swap tables; "search" becomes "active" */
- lq_sta->active_tbl = active_tbl;
- current_tpt = window->average_tpt;
-
- /* Else poor success; go back to mode in "active" table */
- } else {
-
- IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- /* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
-
- /* Revert to "active" table */
- active_tbl = lq_sta->active_tbl;
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* Revert to "active" rate and throughput info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- current_tpt = lq_sta->last_tpt;
-
- /* Need to set up a new rate table in uCode */
- update_lq = 1;
- }
-
- /* Either way, we've made a decision; modulation mode
- * search is done, allow rate adjustment next time. */
- lq_sta->search_better_tbl = 0;
- done_search = 1; /* Don't switch modes below! */
- goto lq_update;
- }
-
- /* (Else) not in search of better modulation mode, try for better
- * starting rate, while staying in this mode. */
- high_low = iwl4965_rs_get_adjacent_rate(priv, index,
- rate_scale_index_msk,
- tbl->lq_type);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- sr = window->success_ratio;
-
- /* Collect measured throughputs for current and adjacent rates */
- current_tpt = window->average_tpt;
- if (low != IWL_RATE_INVALID)
- low_tpt = tbl->win[low].average_tpt;
- if (high != IWL_RATE_INVALID)
- high_tpt = tbl->win[high].average_tpt;
-
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
-
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
- /* Decrease starting rate, update uCode's rate table */
- if (low != IWL_RATE_INVALID) {
- update_lq = 1;
- index = low;
- }
-
- break;
- case 1:
- /* Increase starting rate, update uCode's rate table */
- if (high != IWL_RATE_INVALID) {
- update_lq = 1;
- index = high;
- }
-
- break;
- case 0:
- /* No change */
- default:
- break;
- }
-
- IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
- "high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
- /* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- /*
- * Search for new modulation mode if we're:
- * 1) Not changing rates right now
- * 2) Not just finishing up a search
- * 3) Allowing a new search
- */
- if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
- /* Save current throughput to compare with "search" throughput*/
- lq_sta->last_tpt = current_tpt;
-
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- iwl4965_rs_move_legacy_other(priv, lq_sta,
- conf, sta, index);
- else if (is_siso(tbl->lq_type))
- iwl4965_rs_move_siso_to_other(priv, lq_sta,
- conf, sta, index);
- else /* (is_mimo2(tbl->lq_type)) */
- iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
- conf, sta, index);
-
- /* If new "search" mode was selected, set up in uCode table */
- if (lq_sta->search_better_tbl) {
- /* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
-
- /* Use new "search" start rate */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
- IWL_DEBUG_RATE(priv,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- iwl4965_rs_fill_link_cmd(priv, lq_sta,
- tbl->current_rate);
- iwl_legacy_send_lq_cmd(priv, ctx,
- &lq_sta->lq, CMD_ASYNC, false);
- } else
- done_search = 1;
- }
-
- if (done_search && !lq_sta->stay_in_tbl) {
- /* If the "active" (non-search) mode was legacy,
- * and we've tried switching antennas,
- * but we haven't been able to try HT modes (not available),
- * stay with best antenna legacy modulation for a while
- * before next round of mode comparisons. */
- tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
- lq_sta->action_counter > tbl1->max_search) {
- IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
- iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
- }
-
- /* If we're in an HT mode, and all 3 mode switch actions
- * have been tried and compared, stay in this best modulation
- * mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
- if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
- (lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
- tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF) {
- IWL_DEBUG_RATE(priv,
- "try to aggregate tid %d\n",
- tid);
- iwl4965_rs_tl_turn_on_agg(priv, tid,
- lq_sta, sta);
- }
- }
- iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
- }
- }
-
-out:
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
- index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct iwl_scale_tbl_info *tbl;
- int rate_idx;
- int i;
- u32 rate;
- u8 use_green = iwl4965_rs_use_green(sta);
- u8 active_tbl = 0;
- u8 valid_tx_ant;
- struct iwl_station_priv *sta_priv;
- struct iwl_rxon_context *ctx;
-
- if (!sta || !lq_sta)
- return;
-
- sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
-
- i = lq_sta->last_txrate_idx;
-
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- if ((i < 0) || (i >= IWL_RATE_COUNT))
- i = 0;
-
- rate = iwlegacy_rates[i].plcp;
- tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
-
- iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
- tbl->current_rate = rate;
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
- priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
-
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = priv_sta;
- int rate_idx;
-
- IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
- /* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
- }
-
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- if (!lq_sta)
- return;
-
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_DUP_DATA;
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
-{
- struct iwl_lq_sta *lq_sta;
- struct iwl_station_priv *sta_priv =
- (struct iwl_station_priv *) sta->drv_priv;
- struct iwl_priv *priv;
-
- priv = (struct iwl_priv *)priv_rate;
- IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
- lq_sta = &sta_priv->lq_sta;
-
- return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- u8 sta_id)
-{
- int i, j;
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_station_priv *sta_priv;
- struct iwl_lq_sta *lq_sta;
- struct ieee80211_supported_band *sband;
-
- sta_priv = (struct iwl_station_priv *) sta->drv_priv;
- lq_sta = &sta_priv->lq_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
-
- lq_sta->lq.sta_id = sta_id;
-
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- IWL_DEBUG_RATE(priv, "LQ:"
- "*** rate scale station global init for station %d ***\n",
- sta_id);
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- lq_sta->is_dup = 0;
- lq_sta->max_rate_idx = -1;
- lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->band = priv->band;
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- /* as default allow aggregation for all tids */
- lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
- lq_sta->drv = priv;
-
- /* Set last_txrate_idx to lowest rate */
- lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
- lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- lq_sta->dbg_fixed_rate = 0;
-#endif
-
- iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
- u8 valid_tx_ant = 0;
- struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
- /* Override starting rate (index 0) if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Interpret new_rate (rate_n_flags) */
- iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
-
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- lq_cmd->general_params.mimo_delimiter =
- is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
- lq_cmd->general_params.single_stream_ant_msk =
- tbl_type.ant_type;
- } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
- lq_cmd->general_params.dual_stream_ant_msk =
- tbl_type.ant_type;
- } /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (priv)
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
-
- iwl4965_rs_get_tbl_info_from_mcs(new_rate,
- lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->general_params.mimo_delimiter = index;
-
- /* Get next rate */
- new_rate = iwl4965_rs_get_lower_rate(lq_sta,
- &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- /* Don't allow HT rates after next pass.
- * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
- use_ht_possible = 0;
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- index++;
- repeat_rate--;
- }
-
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
- lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
- return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl_priv *priv __maybe_unused = priv_r;
-
- IWL_DEBUG_RATE(priv, "enter\n");
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{
- struct iwl_priv *priv;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
-
- priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(priv,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
- } else {
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- char buf[64];
- size_t buf_size;
- u32 parsed_rate;
- struct iwl_station_priv *sta_priv =
- container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- priv = lq_sta->drv;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x", &parsed_rate) == 1)
- lq_sta->dbg_fixed_rate = parsed_rate;
- else
- lq_sta->dbg_fixed_rate = 0;
-
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
- false);
- }
-
- return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i = 0;
- int index = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
- priv = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
- desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
- desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
- }
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
- lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc, "general:"
- "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
- lq_sta->lq.general_params.flags,
- lq_sta->lq.general_params.mimo_delimiter,
- lq_sta->lq.general_params.single_stream_ant_msk,
- lq_sta->lq.general_params.dual_stream_ant_msk);
-
- desc += sprintf(buff+desc, "agg:"
- "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
- le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
- lq_sta->lq.agg_params.agg_dis_start_th,
- lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
- desc += sprintf(buff+desc,
- "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
- lq_sta->lq.general_params.start_rate_index[0],
- lq_sta->lq.general_params.start_rate_index[1],
- lq_sta->lq.general_params.start_rate_index[2],
- lq_sta->lq.general_params.start_rate_index[3]);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl4965_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
- .write = iwl4965_rs_sta_dbgfs_scale_table_write,
- .read = iwl4965_rs_sta_dbgfs_scale_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i, j;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- for (i = 0; i < LQ_SIZE; i++) {
- desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
- "rate=0x%X\n",
- lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->lq_info[i].is_dup,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
- for (j = 0; j < IWL_RATE_COUNT; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
- }
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl4965_rs_sta_dbgfs_stats_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char buff[120];
- int desc = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
- priv = lq_sta->drv;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
- &lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
- .name = IWL4965_RS_NAME,
- .tx_status = iwl4965_rs_tx_status,
- .get_rate = iwl4965_rs_get_rate,
- .rate_init = iwl4965_rs_rate_init_stub,
- .alloc = iwl4965_rs_alloc,
- .free = iwl4965_rs_free,
- .alloc_sta = iwl4965_rs_alloc_sta,
- .free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl4965_rs_add_debugfs,
- .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv,
- "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl4965_init_sensitivity(priv);
- }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info;
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a, bcn_silence_b, bcn_silence_c;
- int last_rx_noise;
-
- rx_info = &(priv->_4965.statistics.rx.general);
- bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i, size;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
- struct statistics_general_common *general, *accum_general;
- struct statistics_tx *tx, *accum_tx;
-
- prev_stats = (__le32 *)&priv->_4965.statistics;
- accum_stats = (u32 *)&priv->_4965.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_4965.statistics.general.common;
- accum_general = &priv->_4965.accum_statistics.general.common;
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta = (u32 *)&priv->_4965.delta_statistics;
- max_delta = (u32 *)&priv->_4965.max_delta;
-
- for (i = sizeof(__le32); i < size;
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- accum_general->temperature = general->temperature;
- accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_4965.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- /* TODO: reading some of statistics is unneeded */
- memcpy(&priv->_4965.statistics, &pkt->u.stats,
- sizeof(priv->_4965.statistics));
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl4965_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_4965.accum_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.delta_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.max_delta, 0,
- sizeof(struct iwl_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
- int i, r;
- struct iwl_link_quality_cmd *link_cmd;
- u32 rate_flags = 0;
- __le32 rate_n_flags;
-
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
- rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
- rate_flags);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
- link_cmd->general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!link_cmd->general_params.dual_stream_ant_msk) {
- link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- link_cmd->sta_id = sta_id;
-
- return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r)
-{
- int ret;
- u8 sta_id;
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- /* Set up default rate scaling table in device's station table */
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for station %pM.\n",
- addr);
- return -ENOMEM;
- }
-
- ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
- if (ret)
- IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- bool send_if_empty)
-{
- int i, not_empty = 0;
- u8 buff[sizeof(struct iwl_wep_cmd) +
- sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
- struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
- size_t cmd_size = sizeof(struct iwl_wep_cmd);
- struct iwl_host_cmd cmd = {
- .id = ctx->wep_key_cmd,
- .data = wep_cmd,
- .flags = CMD_SYNC,
- };
-
- might_sleep();
-
- memset(wep_cmd, 0, cmd_size +
- (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- wep_cmd->key[i].key_index = i;
- if (ctx->wep_keys[i].key_size) {
- wep_cmd->key[i].key_offset = i;
- not_empty = 1;
- } else {
- wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
- }
-
- wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
- memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
- ctx->wep_keys[i].key_size);
- }
-
- wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
- wep_cmd->num_keys = WEP_KEYS_MAX;
-
- cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
- cmd.len = cmd_size;
-
- if (not_empty || send_if_empty)
- return iwl_legacy_send_cmd(priv, &cmd);
- else
- return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- lockdep_assert_held(&priv->mutex);
-
- return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
- keyconf->keyidx);
-
- memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- /* but keys in device are clear anyway so return success */
- return 0;
- }
- ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
- IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
- keyconf->keyidx, ret);
-
- return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (keyconf->keylen != WEP_KEY_LEN_128 &&
- keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
- return -EINVAL;
- }
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = HW_KEY_DEFAULT;
- priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
- ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
- memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
- keyconf->keylen);
-
- ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
- IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
- keyconf->keylen, keyconf->keyidx, ret);
-
- return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
- key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
- memcpy(priv->stations[sta_id].keyinfo.key,
- keyconf->key, keyconf->keylen);
-
- memcpy(&priv->stations[sta_id].sta.key.key[3],
- keyconf->key, keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- int ret = 0;
- __le16 key_flags = 0;
-
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = 16;
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
- /* This copy is acutally not needed: we get the key with each TX */
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
- u8 sta_id;
- unsigned long flags;
- int i;
-
- if (iwl_legacy_scan_cancel(priv)) {
- /* cancel scan failed, just live w/ bad key and rely
- briefly on SW decryption */
- return;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
- for (i = 0; i < 5; i++)
- priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
- cpu_to_le16(phase1key[i]);
-
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- u16 key_flags;
- u8 keyidx;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys--;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
- keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
- IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
- keyconf->keyidx, sta_id);
-
- if (keyconf->keyidx != keyidx) {
- /* We need to remove a key with index different that the one
- * in the uCode. This means that the key we need to remove has
- * been replaced by another one with different index.
- * Don't do anything and return ok
- */
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
- IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
- keyconf->keyidx, key_flags);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
- &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- priv->stations[sta_id].sta.key.key_offset);
- memset(&priv->stations[sta_id].keyinfo, 0,
- sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags =
- STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys++;
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv,
- "Unknown alg: %s cipher = %x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv,
- "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
- false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- struct iwl_link_quality_cmd *link_cmd;
- u8 sta_id = ctx->bcast_sta_id;
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (priv->stations[sta_id].lq)
- kfree(priv->stations[sta_id].lq);
- else
- IWL_DEBUG_INFO(priv,
- "Bcast station rate scaling has not been initialized yet.\n");
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret = 0;
-
- for_each_context(priv, ctx) {
- ret = iwl4965_update_bcast_station(priv, ctx);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION)
- return -ENXIO;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
- priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
- priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638ba..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwlegacy_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
-
- rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
- bool is_agg = false;
-
- if (info->control.vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* For management frames use broadcast id to do not break aggregation */
- if (!ieee80211_is_data(fc))
- sta_id = ctx->bcast_sta_id;
- else {
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
-
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
-
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
- iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
- iwl_legacy_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, firstlen,
- PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- secondlen, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, secondlen,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_legacy_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /*
- * Avoid atomic ops if it isn't an associated client.
- * Also, if this is a packet for aggregation, don't
- * increase the counter because the ucode will stop
- * aggregation queues when their respective station
- * goes to sleep.
- */
- if (sta_priv && sta_priv->client && !is_agg)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark) &&
- priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_legacy_stop_queue(priv, txq);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
- }
- iwl4965_free_dma_ptr(priv, &priv->kw);
-
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl4965_hw_txq_ctx_free(priv);
-
- ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_legacy_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_legacy_tx_queue_init(priv,
- &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl4965_hw_txq_ctx_free(priv);
- iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == priv->cmd_queue ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- }
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl4965_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_legacy_read_direct32(priv,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq)
- return;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_unmap(priv);
- else
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_legacy_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_legacy_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, sta->addr, tid);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl4965_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- iwl_legacy_set_swq_id(&priv->txq[txq_id],
- iwl4965_get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv,
- "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue DELBA flow\n");
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr1)
-{
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(ctx->vif, addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
- rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
- if (!is_agg)
- iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
-
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
-
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
-
- iwl4965_tx_status(priv, tx_info,
- txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- int successes = 0;
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
- ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- sent_bitmap = bitmap & agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- i = 0;
- while (sent_bitmap) {
- ack = sent_bitmap & 1ULL;
- successes += ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i,
- (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- sent_bitmap >>= 1;
- ++i;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
- (unsigned long long)bitmap);
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = successes;
- info->status.ampdu_len = agg->frame_count;
- iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
- unsigned long flags;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
- return;
- }
-
- /* Find index just before block-ack window */
- index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
- "scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
-
- iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb9..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int ret = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL4965_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- ret = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
- u32 len)
-{
- u32 val;
- u32 save_len = len;
- int ret = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL4965_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- ret = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int ret;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_full(priv, image, len);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int i;
- u32 done;
- u32 reg_offset;
- int ret;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- priv->ucode_type = UCODE_RT;
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL49_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
- * NOTE: iwl_init_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache.
- */
- pinst = priv->ucode_init.p_addr >> 4;
- pdata = priv->ucode_init_data.p_addr >> 4;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
- ret = iwl4965_verify_bsm(priv);
- if (ret)
- return ret;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv,
- BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv,
- BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
- return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
- int ret = 0;
-
- /* bits 35:4 for 4965 */
- pinst = priv->ucode_code.p_addr >> 4;
- pdata = priv->ucode_data_backup.p_addr >> 4;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- * Voltage, temperature, and MIMO tx gain correction, now stored in priv
- * (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Calculate temperature */
- priv->temperature = iwl4965_hw_get_temperature(priv);
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl4965_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
-restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
- int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- return ((chan_mod == CHANNEL_MODE_PURE_40) ||
- (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
- unsigned long flags;
- u16 radio_cfg;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
- priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_legacy_eeprom_query_addr(priv,
- EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- * ... once chain noise is calibrated the first time, it's good forever. */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_legacy_is_any_associated(priv)) {
- struct iwl_calib_diff_gain_cmd cmd;
-
- /* clear data for chain noise calibration algorithm */
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = 0;
- cmd.diff_gain_b = 0;
- cmd.diff_gain_c = 0;
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd))
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
- .min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
-
- .auto_corr_min_ofdm = 85,
- .auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 105,
- .auto_corr_min_ofdm_mrc_x1 = 220,
-
- .auto_corr_max_ofdm = 120,
- .auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 140,
- .auto_corr_max_ofdm_mrc_x1 = 270,
-
- .auto_corr_min_cck = 125,
- .auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 200,
- .auto_corr_max_cck_mrc = 400,
-
- .nrg_th_cck = 100,
- .nrg_th_ofdm = 100,
-
- .barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
- .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
- /* want Kelvin */
- priv->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
- if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
- priv->cfg->mod_params->num_of_queues;
-
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwl4965_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWL4965_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
- priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
- priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- iwl4965_set_ct_threshold(priv);
-
- priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
- s32 sign = 1;
-
- if (num < 0) {
- sign = -sign;
- num = -num;
- }
- if (denom < 0) {
- sign = -sign;
- denom = -denom;
- }
- *res = 1;
- *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
- return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
- s32 current_voltage)
-{
- s32 comp = 0;
-
- if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
- (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
- return 0;
-
- iwl4965_math_div_round(current_voltage - eeprom_voltage,
- TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
- if (current_voltage > eeprom_voltage)
- comp *= 2;
- if ((comp < -2) || (comp > 2))
- comp = 0;
-
- return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
- if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
- return CALIB_CH_GROUP_5;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
- return CALIB_CH_GROUP_1;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
- return CALIB_CH_GROUP_2;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
- return CALIB_CH_GROUP_3;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
- return CALIB_CH_GROUP_4;
-
- return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
- s32 b = -1;
-
- for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
- if (priv->calib_info->band_info[b].ch_from == 0)
- continue;
-
- if ((channel >= priv->calib_info->band_info[b].ch_from)
- && (channel <= priv->calib_info->band_info[b].ch_to))
- break;
- }
-
- return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
- s32 val;
-
- if (x2 == x1)
- return y1;
- else {
- iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
- return val + y2;
- }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest. Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
- struct iwl_eeprom_calib_ch_info *chan_info)
-{
- s32 s = -1;
- u32 c;
- u32 m;
- const struct iwl_eeprom_calib_measure *m1;
- const struct iwl_eeprom_calib_measure *m2;
- struct iwl_eeprom_calib_measure *omeas;
- u32 ch_i1;
- u32 ch_i2;
-
- s = iwl4965_get_sub_band(priv, channel);
- if (s >= EEPROM_TX_POWER_BANDS) {
- IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
- return -1;
- }
-
- ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
- ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
- chan_info->ch_num = (u8) channel;
-
- IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
- channel, s, ch_i1, ch_i2);
-
- for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
- for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
- m1 = &(priv->calib_info->band_info[s].ch1.
- measurements[c][m]);
- m2 = &(priv->calib_info->band_info[s].ch2.
- measurements[c][m]);
- omeas = &(chan_info->measurements[c][m]);
-
- omeas->actual_pow =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->actual_pow,
- ch_i2,
- m2->actual_pow);
- omeas->gain_idx =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->gain_idx, ch_i2,
- m2->gain_idx);
- omeas->temperature =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->temperature,
- ch_i2,
- m2->temperature);
- omeas->pa_det =
- (s8) iwl4965_interpolate_value(channel, ch_i1,
- m1->pa_det, ch_i2,
- m2->pa_det);
-
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
- m1->actual_pow, m2->actual_pow, omeas->actual_pow);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
- m1->gain_idx, m2->gain_idx, omeas->gain_idx);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
- m1->pa_det, m2->pa_det, omeas->pa_det);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
- m1->temperature, m2->temperature,
- omeas->temperature);
- }
- }
-
- return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
- 10 /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
- s32 degrees_per_05db_a;
- s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
- {9, 2}, /* group 0 5.2, ch 34-43 */
- {4, 1}, /* group 1 5.2, ch 44-70 */
- {4, 1}, /* group 2 5.2, ch 71-124 */
- {4, 1}, /* group 3 5.2, ch 125-200 */
- {3, 1} /* group 4 2.4, ch all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
- if (!band) {
- if ((rate_power_index & 7) <= 4)
- return MIN_TX_GAIN_INDEX_52GHZ_EXT;
- }
- return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
- u8 dsp;
- u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
- /* 5.2GHz power gain index table */
- {
- {123, 0x3F}, /* highest txpower */
- {117, 0x3F},
- {110, 0x3F},
- {104, 0x3F},
- {98, 0x3F},
- {110, 0x3E},
- {104, 0x3E},
- {98, 0x3E},
- {110, 0x3D},
- {104, 0x3D},
- {98, 0x3D},
- {110, 0x3C},
- {104, 0x3C},
- {98, 0x3C},
- {110, 0x3B},
- {104, 0x3B},
- {98, 0x3B},
- {110, 0x3A},
- {104, 0x3A},
- {98, 0x3A},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x25},
- {104, 0x25},
- {98, 0x25},
- {110, 0x24},
- {104, 0x24},
- {98, 0x24},
- {110, 0x23},
- {104, 0x23},
- {98, 0x23},
- {110, 0x22},
- {104, 0x18},
- {98, 0x18},
- {110, 0x17},
- {104, 0x17},
- {98, 0x17},
- {110, 0x16},
- {104, 0x16},
- {98, 0x16},
- {110, 0x15},
- {104, 0x15},
- {98, 0x15},
- {110, 0x14},
- {104, 0x14},
- {98, 0x14},
- {110, 0x13},
- {104, 0x13},
- {98, 0x13},
- {110, 0x12},
- {104, 0x08},
- {98, 0x08},
- {110, 0x07},
- {104, 0x07},
- {98, 0x07},
- {110, 0x06},
- {104, 0x06},
- {98, 0x06},
- {110, 0x05},
- {104, 0x05},
- {98, 0x05},
- {110, 0x04},
- {104, 0x04},
- {98, 0x04},
- {110, 0x03},
- {104, 0x03},
- {98, 0x03},
- {110, 0x02},
- {104, 0x02},
- {98, 0x02},
- {110, 0x01},
- {104, 0x01},
- {98, 0x01},
- {110, 0x00},
- {104, 0x00},
- {98, 0x00},
- {93, 0x00},
- {88, 0x00},
- {83, 0x00},
- {78, 0x00},
- },
- /* 2.4GHz power gain index table */
- {
- {110, 0x3f}, /* highest txpower */
- {104, 0x3f},
- {98, 0x3f},
- {110, 0x3e},
- {104, 0x3e},
- {98, 0x3e},
- {110, 0x3d},
- {104, 0x3d},
- {98, 0x3d},
- {110, 0x3c},
- {104, 0x3c},
- {98, 0x3c},
- {110, 0x3b},
- {104, 0x3b},
- {98, 0x3b},
- {110, 0x3a},
- {104, 0x3a},
- {98, 0x3a},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x6},
- {104, 0x6},
- {98, 0x6},
- {110, 0x5},
- {104, 0x5},
- {98, 0x5},
- {110, 0x4},
- {104, 0x4},
- {98, 0x4},
- {110, 0x3},
- {104, 0x3},
- {98, 0x3},
- {110, 0x2},
- {104, 0x2},
- {98, 0x2},
- {110, 0x1},
- {104, 0x1},
- {98, 0x1},
- {110, 0x0},
- {104, 0x0},
- {98, 0x0},
- {97, 0},
- {96, 0},
- {95, 0},
- {94, 0},
- {93, 0},
- {92, 0},
- {91, 0},
- {90, 0},
- {89, 0},
- {88, 0},
- {87, 0},
- {86, 0},
- {85, 0},
- {84, 0},
- {83, 0},
- {82, 0},
- {81, 0},
- {80, 0},
- {79, 0},
- {78, 0},
- {77, 0},
- {76, 0},
- {75, 0},
- {74, 0},
- {73, 0},
- {72, 0},
- {71, 0},
- {70, 0},
- {69, 0},
- {68, 0},
- {67, 0},
- {66, 0},
- {65, 0},
- {64, 0},
- {63, 0},
- {62, 0},
- {61, 0},
- {60, 0},
- {59, 0},
- }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
- u8 is_ht40, u8 ctrl_chan_high,
- struct iwl4965_tx_power_db *tx_power_tbl)
-{
- u8 saturation_power;
- s32 target_power;
- s32 user_target_power;
- s32 power_limit;
- s32 current_temp;
- s32 reg_limit;
- s32 current_regulatory;
- s32 txatten_grp = CALIB_CH_GROUP_MAX;
- int i;
- int c;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl_eeprom_calib_ch_info ch_eeprom_info;
- const struct iwl_eeprom_calib_measure *measurement;
- s16 voltage;
- s32 init_voltage;
- s32 voltage_compensation;
- s32 degrees_per_05db_num;
- s32 degrees_per_05db_denom;
- s32 factory_temp;
- s32 temperature_comp[2];
- s32 factory_gain_index[2];
- s32 factory_actual_pwr[2];
- s32 power_index;
-
- /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
- * are used for indexing into txpower table) */
- user_target_power = 2 * priv->tx_power_user_lmt;
-
- /* Get current (RXON) channel, band, width */
- IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
- is_ht40);
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -EINVAL;
-
- /* get txatten group, used to select 1) thermal txpower adjustment
- * and 2) mimo txpower balance between Tx chains. */
- txatten_grp = iwl4965_get_tx_atten_grp(channel);
- if (txatten_grp < 0) {
- IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
- channel);
- return txatten_grp;
- }
-
- IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
- channel, txatten_grp);
-
- if (is_ht40) {
- if (ctrl_chan_high)
- channel -= 2;
- else
- channel += 2;
- }
-
- /* hardware txpower limits ...
- * saturation (clipping distortion) txpowers are in half-dBm */
- if (band)
- saturation_power = priv->calib_info->saturation_power24;
- else
- saturation_power = priv->calib_info->saturation_power52;
-
- if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
- saturation_power > IWL_TX_POWER_SATURATION_MAX) {
- if (band)
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
- else
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
- }
-
- /* regulatory txpower limits ... reg_limit values are in half-dBm,
- * max_power_avg values are in dBm, convert * 2 */
- if (is_ht40)
- reg_limit = ch_info->ht40_max_power_avg * 2;
- else
- reg_limit = ch_info->max_power_avg * 2;
-
- if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
- (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
- if (band)
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
- else
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
- }
-
- /* Interpolate txpower calibration values for this channel,
- * based on factory calibration tests on spaced channels. */
- iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
- /* calculate tx gain adjustment based on power supply voltage */
- voltage = le16_to_cpu(priv->calib_info->voltage);
- init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
- voltage_compensation =
- iwl4965_get_voltage_compensation(voltage, init_voltage);
-
- IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
- init_voltage,
- voltage, voltage_compensation);
-
- /* get current temperature (Celsius) */
- current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
- current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
-
- /* select thermal txpower adjustment params, based on channel group
- * (same frequency group used for mimo txatten adjustment) */
- degrees_per_05db_num =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
- degrees_per_05db_denom =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
- /* get per-chain txpower values from factory measurements */
- for (c = 0; c < 2; c++) {
- measurement = &ch_eeprom_info.measurements[c][1];
-
- /* txgain adjustment (in half-dB steps) based on difference
- * between factory and current temperature */
- factory_temp = measurement->temperature;
- iwl4965_math_div_round((current_temp - factory_temp) *
- degrees_per_05db_denom,
- degrees_per_05db_num,
- &temperature_comp[c]);
-
- factory_gain_index[c] = measurement->gain_idx;
- factory_actual_pwr[c] = measurement->actual_pow;
-
- IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
- IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
- "curr tmp %d, comp %d steps\n",
- factory_temp, current_temp,
- temperature_comp[c]);
-
- IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
- factory_gain_index[c],
- factory_actual_pwr[c]);
- }
-
- /* for each of 33 bit-rates (including 1 for CCK) */
- for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
- u8 is_mimo_rate;
- union iwl4965_tx_power_dual_stream tx_power;
-
- /* for mimo, reduce each chain's txpower by half
- * (3dB, 6 steps), so total output power is regulatory
- * compliant. */
- if (i & 0x8) {
- current_regulatory = reg_limit -
- IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
- is_mimo_rate = 1;
- } else {
- current_regulatory = reg_limit;
- is_mimo_rate = 0;
- }
-
- /* find txpower limit, either hardware or regulatory */
- power_limit = saturation_power - back_off_table[i];
- if (power_limit > current_regulatory)
- power_limit = current_regulatory;
-
- /* reduce user's txpower request if necessary
- * for this rate on this channel */
- target_power = user_target_power;
- if (target_power > power_limit)
- target_power = power_limit;
-
- IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
- i, saturation_power - back_off_table[i],
- current_regulatory, user_target_power,
- target_power);
-
- /* for each of 2 Tx chains (radio transmitters) */
- for (c = 0; c < 2; c++) {
- s32 atten_value;
-
- if (is_mimo_rate)
- atten_value =
- (s32)le32_to_cpu(priv->card_alive_init.
- tx_atten[txatten_grp][c]);
- else
- atten_value = 0;
-
- /* calculate index; higher index means lower txpower */
- power_index = (u8) (factory_gain_index[c] -
- (target_power -
- factory_actual_pwr[c]) -
- temperature_comp[c] -
- voltage_compensation +
- atten_value);
-
-/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
- power_index); */
-
- if (power_index < get_min_power_index(i, band))
- power_index = get_min_power_index(i, band);
-
- /* adjust 5 GHz index to support negative indexes */
- if (!band)
- power_index += 9;
-
- /* CCK, rate 32, reduce txpower for CCK */
- if (i == POWER_TABLE_CCK_ENTRY)
- power_index +=
- IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
- /* stay within the table! */
- if (power_index > 107) {
- IWL_WARN(priv, "txpower index %d > 107\n",
- power_index);
- power_index = 107;
- }
- if (power_index < 0) {
- IWL_WARN(priv, "txpower index %d < 0\n",
- power_index);
- power_index = 0;
- }
-
- /* fill txpower command for this rate/chain */
- tx_power.s.radio_tx_gain[c] =
- gain_table[band][power_index].radio;
- tx_power.s.dsp_predis_atten[c] =
- gain_table[band][power_index].dsp;
-
- IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
- "gain 0x%02x dsp %d\n",
- c, atten_value, power_index,
- tx_power.s.radio_tx_gain[c],
- tx_power.s.dsp_predis_atten[c]);
- } /* for each chain */
-
- tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
- } /* for each rate */
-
- return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl4965_txpowertable_cmd cmd = { 0 };
- int ret;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
- if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.channel = ctx->active.channel;
-
- ret = iwl4965_fill_txpower_tbl(priv, band,
- le16_to_cpu(ctx->active.channel),
- is_ht40, ctrl_chan_high, &cmd.tx_power);
- if (ret)
- goto out;
-
- ret = iwl_legacy_send_cmd_pdu(priv,
- REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
- return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int ret = 0;
- struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- ctx->staging.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- ctx->staging.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
- ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
-
- return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
- int ret;
- bool new_assoc =
- !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_legacy_is_alive(priv))
- return -EBUSY;
-
- if (!ctx->is_active)
- return 0;
-
- /* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
- ret = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /*
- * receive commit_rxon request
- * abort any previous channel switch if still in process
- */
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
- (priv->switch_channel != ctx->staging.channel)) {
- IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv, ctx)) {
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
- return ret;
- }
-
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_print_rx_config_cmd(priv, ctx);
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd),
- active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (ret) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
- return ret;
- }
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel),
- ctx->staging.bssid_addr);
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx,
- !priv->cfg->mod_params->sw_crypto);
-
- /* Apply the new configuration
- * RXON unassoc clears the station table in uCode so restoration of
- * stations is needed after it (the RXON command) completes
- */
- if (!new_assoc) {
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
- if (new_assoc) {
- priv->start_calib = 0;
- /* Apply the new configuration
- * RXON assoc doesn't clear the station table in uCode,
- */
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- }
- iwl_legacy_print_rx_config_cmd(priv, ctx);
-
- iwl4965_init_sensitivity(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (ret) {
- IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int rc;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl4965_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
- if (is_ht40 &&
- (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.expect_beacon = 0;
- ch = ch_switch->channel->hw_value;
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_legacy_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
-
- rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
- ctrl_chan_high, &cmd.tx_power);
- if (rc) {
- IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
- return rc;
- }
-
- return iwl_legacy_send_cmd_pdu(priv,
- REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int write_ptr = txq->q.write_ptr;
- int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- bc_ent = cpu_to_le16(len & 0xFFF);
- /* Set up byte count within first 256 entries */
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- /* If within first 64 entries, duplicate at end */
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
- s32 temperature;
- s32 vt;
- s32 R1, R2, R3;
- u32 R4;
-
- if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
- IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
- } else {
- IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
- }
-
- /*
- * Temperature is only 23 bits, so sign extend out to 32.
- *
- * NOTE If we haven't received a statistics notification yet
- * with an updated temperature, use R4 provided to us in the
- * "initialize" ALIVE response.
- */
- if (!test_bit(STATUS_TEMPERATURE, &priv->status))
- vt = sign_extend32(R4, 23);
- else
- vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
- general.common.temperature), 23);
-
- IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
- if (R3 == R1) {
- IWL_ERR(priv, "Calibration conflict R1 == R3\n");
- return -1;
- }
-
- /* Calculate temperature in degrees Kelvin, adjust by 97%.
- * Add offset to center the adjustment around 0 degrees Centigrade. */
- temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
- temperature /= (R3 - R1);
- temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
- IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
- temperature, KELVIN_TO_CELSIUS(temperature));
-
- return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD 3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- if (!test_bit(STATUS_STATISTICS, &priv->status)) {
- IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
- return 0;
- }
-
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
- if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
- return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
- s32 temp;
-
- temp = iwl4965_hw_get_temperature(priv);
- if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
- return;
-
- if (priv->temperature != temp) {
- if (priv->temperature)
- IWL_DEBUG_TEMP(priv, "Temperature changed "
- "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(priv->temperature),
- KELVIN_TO_CELSIUS(temp));
- else
- IWL_DEBUG_TEMP(priv, "Temperature "
- "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
- }
-
- priv->temperature = temp;
- set_bit(STATUS_TEMPERATURE, &priv->status);
-
- if (!priv->disable_tx_power_cal &&
- unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- iwl4965_is_temp_calib_needed(priv))
- queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return (u16) sizeof(struct iwl4965_rxon_cmd);
- default:
- return len;
- }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cmd->tid_disable_tx;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
- addsta->sleep_tx_count = cmd->sleep_tx_count;
- addsta->reserved1 = cpu_to_le16(0);
- addsta->reserved2 = cpu_to_le16(0);
-
- return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl4965_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = tx_resp->u.agg_status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* num frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- /*
- * It may be possible that more commands interacting with stations
- * arrive before we completed processing the adding of
- * station
- */
- if (ret != IWL_INVALID_STATION &&
- (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
- ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
- (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
- IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
- ret);
- ret = IWL_INVALID_STATION;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl4965_find_station(priv, da);
- }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info;
- struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
- int sta_id;
- int freed;
- u8 *qc = NULL;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- memset(&info->status, 0, sizeof(info->status));
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- }
-
- sta_id = iwl4965_get_ra_sta_id(priv, hdr);
- if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
- IWL_ERR(priv, "Station not known\n");
- return;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
- WARN_ON(!qc);
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
- txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
- "%d index %d\n", scd_ssn , index);
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc)
- iwl4965_free_tfds_in_queue(priv, sta_id,
- tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
- && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
- }
- } else {
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
- "rate_n_flags 0x%x retries %d\n",
- txq_id,
- iwl4965_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
- else if (sta_id == IWL_INVALID_STATION)
- IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
- iwl_legacy_wake_queue(priv, txq);
- }
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
- u8 rate __maybe_unused =
- iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
- "tsf:0x%.8x%.8x rate:%d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
- /* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
- /* Tx response */
- priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
- .rxon_assoc = iwl4965_send_rxon_assoc,
- .commit_rxon = iwl4965_commit_rxon,
- .set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
-
- if (!vif || !priv->is_open)
- return;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl_legacy_commit_rxon(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl4965_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
- break;
- }
-
- /* the chain noise calibration will enabled PM upon completion
- * If chain noise has already been run, then we need to enable
- * power management here */
- if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_legacy_power_update_mode(priv, false);
-
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl4965_chain_noise_reset(priv);
- priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int ret = 0;
-
- lockdep_assert_held(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!iwl_legacy_is_associated_ctx(ctx)) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing failed - "
- "Attempting to continue.\n");
-
- /* AP has all antennas */
- priv->chain_noise_data.active_chains =
- priv->hw_params.valid_rx_ant;
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* need to send beacon cmd before committing assoc RXON! */
- iwl4965_send_beacon_cmd(priv);
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
- }
- iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
- .get_hcmd_size = iwl4965_get_hcmd_size,
- .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .request_scan = iwl4965_request_scan,
- .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
- .set_hw_params = iwl4965_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl4965_hw_txq_free_tfd,
- .txq_init = iwl4965_hw_tx_queue_init,
- .rx_handler_setup = iwl4965_rx_handler_setup,
- .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .init_alive_start = iwl4965_init_alive_start,
- .load_ucode = iwl4965_load_bsm,
- .dump_nic_error_log = iwl4965_dump_nic_error_log,
- .dump_fh = iwl4965_dump_fh,
- .set_channel_switch = iwl4965_hw_channel_switch,
- .apm_ops = {
- .init = iwl_legacy_apm_init,
- .config = iwl4965_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
- },
- .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
- .release_semaphore = iwl4965_eeprom_release_semaphore,
- },
- .send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl4965_update_chain_flags,
- .temp_ops = {
- .temperature = iwl4965_temperature_calib,
- },
- .debugfs_ops = {
- .rx_stats_read = iwl4965_ucode_rx_stats_read,
- .tx_stats_read = iwl4965_ucode_tx_stats_read,
- .general_stats_read = iwl4965_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
- .post_associate = iwl4965_post_associate,
- .config_ap = iwl4965_config_ap,
- .manage_ibss_station = iwl4965_manage_ibss_station,
- .update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
- .tx = iwl4965_mac_tx,
- .start = iwl4965_mac_start,
- .stop = iwl4965_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl4965_configure_filter,
- .set_key = iwl4965_mac_set_key,
- .update_tkip_key = iwl4965_mac_update_tkip_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .ampdu_action = iwl4965_mac_ampdu_action,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl4965_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .channel_switch = iwl4965_mac_channel_switch,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
- .lib = &iwl4965_lib,
- .hcmd = &iwl4965_hcmd,
- .utils = &iwl4965_hcmd_utils,
- .led = &iwl4965_led_ops,
- .legacy = &iwl4965_legacy_ops,
- .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
- .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
- .num_of_queues = IWL49_NUM_QUEUES,
- .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .pll_cfg_val = 0,
- .set_l0s = true,
- .use_bsm = true,
- .led_compensation = 61,
- .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .temperature_kelvin = true,
- .ucode_tracing = true,
- .sensitivity_calib_by_driver = true,
- .chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
- .name = "Intel(R) Wireless WiFi Link 4965AGN",
- .fw_name_pre = IWL4965_FW_PRE,
- .ucode_api_max = IWL4965_UCODE_API_MAX,
- .ucode_api_min = IWL4965_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_ABC,
- .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
- .ops = &iwl4965_ops,
- .mod_params = &iwl4965_mod_params,
- .base_params = &iwl4965_base_params,
- .led_mode = IWL_LED_BLINK,
- /*
- * Force use of chains B and C for scan RX on 5 GHz band
- * because the device has off-channel reception on chain A.
- */
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf1..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
- int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
- int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
- return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
- return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
- return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- * Able to scan and finding all the available AP
- * Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
-
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
- cfg->ops->ieee80211_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
-
- priv = hw->priv;
- priv->hw = hw;
-
-out:
- return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = 0;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- if (!iwl_legacy_is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (iwl_legacy_is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & IWL_SKU_A) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
- priv->pci_dev->device,
- priv->pci_dev->subsystem_device);
- priv->cfg->sku &= ~IWL_SKU_A;
- }
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
-{
- if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
- return false;
-
- /*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
- */
- if (ht_cap && !ht_cap->ht_supported)
- return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- if (priv->disable_ht40)
- return false;
-#endif
-
- return iwl_legacy_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
- u16 new_val;
- u16 beacon_factor;
-
- /*
- * If mac80211 hasn't given us a beacon interval, program
- * the default into the device.
- */
- if (!beacon_val)
- return DEFAULT_BEACON_INTERVAL;
-
- /*
- * If the beacon interval we obtained from the peer
- * is too large, we'll have to wake up more often
- * (and in IBSS case, we'll beacon too much)
- *
- * For example, if max_beacon_val is 4096, and the
- * requested beacon interval is 7000, we'll have to
- * use 3500 to be able to wake up on the beacons.
- *
- * This could badly influence beacon detection stats.
- */
-
- beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
- new_val = beacon_val / beacon_factor;
-
- if (!new_val)
- new_val = max_beacon_val;
-
- return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- u64 tsf;
- s32 interval_tm, rem;
- struct ieee80211_conf *conf = NULL;
- u16 beacon_int;
- struct ieee80211_vif *vif = ctx->vif;
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- lockdep_assert_held(&priv->mutex);
-
- memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
- ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
- ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
- beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
- /*
- * TODO: For IBSS we need to get atim_window from mac80211,
- * for now just always use 0
- */
- ctx->timing.atim_window = 0;
-
- beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
- ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
- tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
- interval_tm = beacon_int * TIME_UNIT;
- rem = do_div(tsf, interval_tm);
- ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
- ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
- IWL_DEBUG_ASSOC(priv,
- "beacon interval %d beacon timer %d beacon tim %d\n",
- le16_to_cpu(ctx->timing.beacon_interval),
- le32_to_cpu(ctx->timing.beacon_init_val),
- le16_to_cpu(ctx->timing.atim_window));
-
- return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (hw_decrypt)
- rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
- else
- rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
- bool error = false;
-
- if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
- if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong narrow\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong radar\n");
- error = true;
- }
- } else {
- if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "check 5.2G: not short slot!\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_CCK_MSK) {
- IWL_WARN(priv, "check 5.2G: CCK!\n");
- error = true;
- }
- }
- if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
- IWL_WARN(priv, "mac/bssid mcast!\n");
- error = true;
- }
-
- /* make sure basic rates 6Mbps and 1Mbps are supported */
- if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
- (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
- IWL_WARN(priv, "neither 1 nor 6 are basic\n");
- error = true;
- }
-
- if (le16_to_cpu(rxon->assoc_id) > 2007) {
- IWL_WARN(priv, "aid > 2007\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "CCK and short slot\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
- RXON_FLG_TGG_PROTECT_MSK)) ==
- RXON_FLG_TGG_PROTECT_MSK) {
- IWL_WARN(priv, "TGg but no auto-detect\n");
- error = true;
- }
-
- if (error)
- IWL_WARN(priv, "Tuning to channel %d\n",
- le16_to_cpu(rxon->channel));
-
- if (error) {
- IWL_ERR(priv, "Invalid RXON\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond) \
- if ((cond)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
- return 1; \
- }
-
-#define CHK_NEQ(c1, c2) \
- if ((c1) != (c2)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " \
- #c1 " != " #c2 " - %d != %d\n", \
- (c1), (c2)); \
- return 1; \
- }
-
- /* These items are only settable from the full RXON command */
- CHK(!iwl_legacy_is_associated_ctx(ctx));
- CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
- CHK(compare_ether_addr(staging->node_addr, active->node_addr));
- CHK(compare_ether_addr(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
- CHK_NEQ(staging->dev_type, active->dev_type);
- CHK_NEQ(staging->channel, active->channel);
- CHK_NEQ(staging->air_propagation, active->air_propagation);
- CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
- active->ofdm_ht_single_stream_basic_rates);
- CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
- active->ofdm_ht_dual_stream_basic_rates);
- CHK_NEQ(staging->assoc_id, active->assoc_id);
-
- /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
- * be updated with the RXON_ASSOC command -- however only some
- * flag transitions are allowed using RXON_ASSOC */
-
- /* Check if we are not switching bands */
- CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
- active->flags & RXON_FLG_BAND_24G_MSK);
-
- /* Check if we are switching association toggle */
- CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
- active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- /*
- * Assign the lowest rate -- should really get this from
- * the beacon skb from mac80211.
- */
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
- return IWL_RATE_1M_PLCP;
- else
- return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (!ctx->ht.enabled) {
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
- RXON_FLG_HT40_PROT_MSK |
- RXON_FLG_HT_PROT_MSK);
- return;
- }
-
- rxon->flags |= cpu_to_le32(ctx->ht.protection <<
- RXON_FLG_HT_OPERATING_MODE_POS);
-
- /* Set up channel bandwidth:
- * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
- /* clear the HT channel mode before set the mode */
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
- /* pure ht40 */
- if (ctx->ht.protection ==
- IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- }
- } else {
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- default:
- /* channel location only valid if in Mixed mode */
- IWL_ERR(priv,
- "invalid extension channel offset\n");
- break;
- }
- }
- } else {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
- }
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
- "extension channel offset 0x%x\n",
- le32_to_cpu(rxon->flags), ctx->ht.protection,
- ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
- struct iwl_rxon_context *ctx;
-
- for_each_context(priv, ctx)
- _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
-{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
- struct iwl_rxon_context *ctx;
-
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- bool busy = false;
-
- for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
- le16_to_cpu(ctx->staging.channel);
- if (busy)
- break;
- }
-
- if (busy)
- continue;
-
- channel = priv->channel_info[i].channel;
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (iwl_legacy_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE: Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx)
-{
- enum ieee80211_band band = ch->band;
- u16 channel = ch->hw_value;
-
- if ((le16_to_cpu(ctx->staging.channel) == channel) &&
- (priv->band == band))
- return 0;
-
- ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
- ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
- else
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
- priv->band = band;
-
- IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- if (band == IEEE80211_BAND_5GHZ) {
- ctx->staging.flags &=
- ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
- | RXON_FLG_CCK_MSK);
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- } else {
- /* Copied from iwl_post_associate() */
- if (vif && vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
- ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
- ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_channel_info *ch_info;
-
- memset(&ctx->staging, 0, sizeof(ctx->staging));
-
- if (!ctx->vif) {
- ctx->staging.dev_type = ctx->unused_devtype;
- } else
- switch (ctx->vif->type) {
-
- case NL80211_IFTYPE_STATION:
- ctx->staging.dev_type = ctx->station_devtype;
- ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- case NL80211_IFTYPE_ADHOC:
- ctx->staging.dev_type = ctx->ibss_devtype;
- ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
- ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
- RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- default:
- IWL_ERR(priv, "Unsupported interface type %d\n",
- ctx->vif->type);
- break;
- }
-
-#if 0
- /* TODO: Figure out when short_preamble would be set and cache from
- * that */
- if (!hw_to_local(priv->hw)->short_preamble)
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
-
- iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- ctx->staging.cck_basic_rates =
- (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- /* clear both MIX and PURE40 mode flag */
- ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
- RXON_FLG_CHANNEL_MODE_PURE_40);
- if (ctx->vif)
- memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
- ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
- ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
- const struct ieee80211_supported_band *hw = NULL;
- struct ieee80211_rate *rate;
- struct iwl_rxon_context *ctx;
- int i;
-
- hw = iwl_get_hw_mode(priv, priv->band);
- if (!hw) {
- IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
- return;
- }
-
- priv->active_rate = 0;
-
- for (i = 0; i < hw->n_bitrates; i++) {
- rate = &(hw->bitrates[i]);
- if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
- priv->active_rate |= (1 << rate->hw_value);
- }
-
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
- for_each_context(priv, ctx) {
- ctx->staging.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
-
- if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
- iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
- IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
- le16_to_cpu(rxon->channel));
- IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
- IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
- le32_to_cpu(rxon->filter_flags));
- IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
- IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
- rxon->ofdm_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
- rxon->cck_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
- IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
- IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
- le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- /* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- priv->cfg->ops->lib->dump_nic_error_log(priv);
- if (priv->cfg->ops->lib->dump_fh)
- priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_legacy_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- wake_up(&priv->wait_command_queue);
-
- /* Keep the restart process from trying to send host
- * commands by clearing the INIT status bit */
- clear_bit(STATUS_READY, &priv->status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
- "Restarting adapter due to uCode error.\n");
-
- if (priv->cfg->mod_params->restart_fw)
- queue_work(priv->workqueue, &priv->restart);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* stop device's busmaster DMA activity */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
- IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(priv, "stop master\n");
-
- return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
- /* Stop device's DMA activity */
- iwl_legacy_apm_stop_master(priv);
-
- /* Reset the entire device */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
- int ret = 0;
- u16 lctl;
-
- IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
- CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- * NOTE: This is no-op for 3945 (non-existent bit)
- */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
- /*
- * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- if (priv->cfg->base_params->set_l0s) {
- lctl = iwl_legacy_pcie_link_ctl(priv);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_legacy_set_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_legacy_clear_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
- }
- }
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
- iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_legacy_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(priv, "Failed to init the card\n");
- goto out;
- }
-
- /*
- * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
- * BSM (Boostrap State Machine) is only in 3945 and 4965.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
- */
- if (priv->cfg->base_params->use_bsm)
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
- else
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
- int ret;
- s8 prev_tx_power;
- bool defer;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->tx_power_user_lmt == tx_power && !force)
- return 0;
-
- if (!priv->cfg->ops->lib->send_tx_power)
- return -EOPNOTSUPP;
-
- /* 0 dBm mean 1 milliwatt */
- if (tx_power < 0) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d below 1 mW.\n",
- tx_power);
- return -EINVAL;
- }
-
- if (tx_power > priv->tx_power_device_lmt) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete and commit_rxon use tx_power_next value,
- * it always need to be updated for newest request */
- priv->tx_power_next = tx_power;
-
- /* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
- memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
- if (defer && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
- return 0;
- }
-
- prev_tx_power = priv->tx_power_user_lmt;
- priv->tx_power_user_lmt = tx_power;
-
- ret = priv->cfg->ops->lib->send_tx_power(priv);
-
- /* if fail to set tx_power, restore the orig. tx power */
- if (ret) {
- priv->tx_power_user_lmt = prev_tx_power;
- priv->tx_power_next = prev_tx_power;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
- struct iwl_bt_cmd bt_cmd = {
- .lead_time = BT_LEAD_TIME_DEF,
- .max_kill = BT_MAX_KILL_DEF,
- .kill_ack_mask = 0,
- .kill_cts_mask = 0,
- };
-
- if (!bt_coex_active)
- bt_cmd.flags = BT_COEX_DISABLE;
- else
- bt_cmd.flags = BT_COEX_ENABLE;
-
- IWL_DEBUG_INFO(priv, "BT coex %s\n",
- (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd))
- IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
- struct iwl_statistics_cmd statistics_cmd = {
- .configuration_flags =
- clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
- };
-
- if (flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd, NULL);
- else
- return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_legacy_connection_init_rx_config(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_legacy_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(vif->type)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_legacy_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_legacy_scan_cancel_timeout(priv, 200);
- iwl_legacy_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_legacy_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
-
- iwl_legacy_teardown_interface(priv, vif, false);
-
- memset(priv->bssid, 0, ETH_ALEN);
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
- if (!priv->txq)
- priv->txq = kzalloc(
- sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues,
- GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
- kfree(priv->txq);
- priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
- priv->tx_traffic_idx = 0;
- priv->rx_traffic_idx = 0;
- if (priv->tx_traffic)
- memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
- if (priv->rx_traffic)
- memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
- if (iwlegacy_debug_level & IWL_DL_TX) {
- if (!priv->tx_traffic) {
- priv->tx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->tx_traffic)
- return -ENOMEM;
- }
- }
- if (iwlegacy_debug_level & IWL_DL_RX) {
- if (!priv->rx_traffic) {
- priv->rx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->rx_traffic)
- return -ENOMEM;
- }
- }
- iwl_legacy_reset_traffic_log(priv);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
- kfree(priv->tx_traffic);
- priv->tx_traffic = NULL;
-
- kfree(priv->rx_traffic);
- priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
- return;
-
- if (!priv->tx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->tx_traffic +
- (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->tx_traffic_idx =
- (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
- return;
-
- if (!priv->rx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->rx_traffic +
- (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->rx_traffic_idx =
- (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(MANAGEMENT_ASSOC_REQ);
- IWL_CMD(MANAGEMENT_ASSOC_RESP);
- IWL_CMD(MANAGEMENT_REASSOC_REQ);
- IWL_CMD(MANAGEMENT_REASSOC_RESP);
- IWL_CMD(MANAGEMENT_PROBE_REQ);
- IWL_CMD(MANAGEMENT_PROBE_RESP);
- IWL_CMD(MANAGEMENT_BEACON);
- IWL_CMD(MANAGEMENT_ATIM);
- IWL_CMD(MANAGEMENT_DISASSOC);
- IWL_CMD(MANAGEMENT_AUTH);
- IWL_CMD(MANAGEMENT_DEAUTH);
- IWL_CMD(MANAGEMENT_ACTION);
- default:
- return "UNKNOWN";
-
- }
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CONTROL_BACK_REQ);
- IWL_CMD(CONTROL_BACK);
- IWL_CMD(CONTROL_PSPOLL);
- IWL_CMD(CONTROL_RTS);
- IWL_CMD(CONTROL_CTS);
- IWL_CMD(CONTROL_ACK);
- IWL_CMD(CONTROL_CFEND);
- IWL_CMD(CONTROL_CFENDACK);
- default:
- return "UNKNOWN";
-
- }
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
- memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
- memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
- stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- stats->mgmt[MANAGEMENT_PROBE_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
- stats->mgmt[MANAGEMENT_PROBE_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BEACON):
- stats->mgmt[MANAGEMENT_BEACON]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ATIM):
- stats->mgmt[MANAGEMENT_ATIM]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
- stats->mgmt[MANAGEMENT_DISASSOC]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- stats->mgmt[MANAGEMENT_AUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- stats->mgmt[MANAGEMENT_DEAUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACTION):
- stats->mgmt[MANAGEMENT_ACTION]++;
- break;
- }
- } else if (ieee80211_is_ctl(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
- stats->ctrl[CONTROL_BACK_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BACK):
- stats->ctrl[CONTROL_BACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
- stats->ctrl[CONTROL_PSPOLL]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_RTS):
- stats->ctrl[CONTROL_RTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CTS):
- stats->ctrl[CONTROL_CTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACK):
- stats->ctrl[CONTROL_ACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFEND):
- stats->ctrl[CONTROL_CFEND]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
- stats->ctrl[CONTROL_CFENDACK]++;
- break;
- }
- } else {
- /* data */
- stats->data_cnt++;
- stats->data_bytes += len;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
- struct iwl_force_reset *force_reset;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- force_reset = &priv->force_reset;
- force_reset->reset_request_count++;
- if (!external) {
- if (force_reset->last_force_reset_jiffies &&
- time_after(force_reset->last_force_reset_jiffies +
- force_reset->reset_duration, jiffies)) {
- IWL_DEBUG_INFO(priv, "force reset rejected\n");
- force_reset->reset_reject_count++;
- return -EAGAIN;
- }
- }
- force_reset->reset_success_count++;
- force_reset->last_force_reset_jiffies = jiffies;
-
- /*
- * if the request is from external(ex: debugfs),
- * then always perform the request in regardless the module
- * parameter setting
- * if the request is from internal (uCode error or driver
- * detect failure), then fw_restart module parameter
- * need to be check before performing firmware reload
- */
-
- if (!external && !priv->cfg->mod_params->restart_fw) {
- IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
- "module parameter setting\n");
- return 0;
- }
-
- IWL_ERR(priv, "On demand firmware reload\n");
-
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up(&priv->wait_command_queue);
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the INIT status bit
- */
- clear_bit(STATUS_READY, &priv->status);
- queue_work(priv->workqueue, &priv->restart);
-
- return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *tmp;
- u32 interface_modes;
- int err;
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->mutex);
-
- if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_legacy_teardown_interface(priv, vif, true);
- vif->type = newtype;
- vif->p2p = newp2p;
- err = iwl_legacy_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->mutex);
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
- int ret;
-
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
- ret = iwl_legacy_force_reset(priv, false);
- return (ret == -EAGAIN) ? 0 : 1;
- }
-
- return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
- int cnt;
- unsigned long timeout;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- timeout = priv->cfg->base_params->wd_timeout;
- if (timeout == 0)
- return;
-
- /* monitor and check for stuck cmd queue */
- if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_legacy_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
- continue;
- if (iwl_legacy_check_stuck_queue(priv, cnt))
- return;
- }
- }
-
- mod_timer(&priv->watchdog, jiffies +
- msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * TIME_UNIT;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) &
- (iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
- rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
-
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
- (addon & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
- } else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
- return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- */
- iwl_legacy_apm_stop(priv);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- bool hw_rfkill = false;
-
- /*
- * We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl_legacy_enable_interrupts(priv);
-
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
-
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
- .suspend = iwl_legacy_pci_suspend,
- .resume = iwl_legacy_pci_resume,
- .freeze = iwl_legacy_pci_suspend,
- .thaw = iwl_legacy_pci_resume,
- .poweroff = iwl_legacy_pci_suspend,
- .restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "scan active\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !iwl_legacy_is_channel_ibss(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
- ht_changed[ctx->ctxid] = true;
- }
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection =
- IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
-
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->legacy->update_bcast_stations)
- ret =
- priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_legacy_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_legacy_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_legacy_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- if (ht_changed[ctx->ctxid])
- iwl_legacy_update_qos(priv, ctx);
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_legacy_scan_cancel_timeout(priv, 100);
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- iwl_legacy_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_ASSOC(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- int ret;
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_alive(priv)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_legacy_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv,
- "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv,
- "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
- /*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwl_legacy_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv,
- "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_legacy_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_legacy_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- if (!iwl_legacy_is_rfkill(priv))
- priv->cfg->ops->legacy->post_associate(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_legacy_rxon_cmd));
- }
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- priv->cfg->ops->legacy->config_ap(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
- }
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv,
- "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
-
- inta &= ~CSR_INT_BIT_SCD;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags &
- IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
- .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT 1024
-
-#define IWL_SKU_G 0x1
-#define IWL_SKU_A 0x2
-#define IWL_SKU_N 0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
- int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- void (*set_rxon_chain)(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
- u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
- u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data);
- int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
- int (*init)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
- ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
- void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
- /* set hw dependent parameters */
- int (*set_hw_params)(struct iwl_priv *priv);
- /* Handling TX */
- void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
- int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr,
- u16 len, u8 reset, u8 pad);
- void (*txq_free_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- int (*txq_init)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- /* setup Rx handler */
- void (*rx_handler_setup)(struct iwl_priv *priv);
- /* alive notification after init uCode load */
- void (*init_alive_start)(struct iwl_priv *priv);
- /* check validity of rtc data address */
- int (*is_valid_rtc_data_addr)(u32 addr);
- /* 1st ucode load */
- int (*load_ucode)(struct iwl_priv *priv);
-
- void (*dump_nic_error_log)(struct iwl_priv *priv);
- int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
- int (*set_channel_switch)(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch);
- /* power management */
- struct iwl_apm_ops apm_ops;
-
- /* power */
- int (*send_tx_power) (struct iwl_priv *priv);
- void (*update_chain_flags)(struct iwl_priv *priv);
-
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
- /* temperature */
- struct iwl_temp_ops temp_ops;
-
- struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
- int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
- void (*post_associate)(struct iwl_priv *priv);
- void (*config_ap)(struct iwl_priv *priv);
- /* station management */
- int (*update_bcast_stations)(struct iwl_priv *priv);
- int (*manage_ibss_station)(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
- const struct iwl_lib_ops *lib;
- const struct iwl_hcmd_ops *hcmd;
- const struct iwl_hcmd_utils_ops *utils;
- const struct iwl_led_ops *led;
- const struct iwl_nic_ops *nic;
- const struct iwl_legacy_ops *legacy;
- const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int disable_hw_scan; /* def: 0 = use h/w scan */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- * sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- * chain noise calibration operation
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_legacy_apm_init() */
- u32 pll_cfg_val;
- bool set_l0s;
- bool use_bsm;
-
- u16 led_compensation;
- int chain_noise_num_beacons;
- unsigned int wd_timeout;
- bool temperature_kelvin;
- const bool ucode_tracing;
- const bool sensitivity_calib_by_driver;
- const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- unsigned int sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_ops *ops;
- /* module based parameters which can be set from modprobe cmd */
- const struct iwl_mod_params *mod_params;
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
-};
-
-/***************************
- * L i b *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
- struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
- u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
- __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
- struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
-#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
-
-/*****************************************************
- * S e n d i n g H o s t C o m m a n d s *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
- struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
- u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
- const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
- int pos;
- u16 pci_lnk_ctl;
- pos = pci_pcie_cap(priv->pci_dev);
- pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-* GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_legacy_is_rfkill(priv))
- return 0;
-
- return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
- u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
-{
- return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701b..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len) \
-do { \
- print_hex_dump(KERN_ERR, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
-do { \
- if (iwl_legacy_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
-do { \
- if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len) \
-do { \
- if (iwl_legacy_get_debug_level(priv) & level) \
- print_hex_dump(KERN_DEBUG, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- * /sys/module/iwl4965/parameters/debug{50}
- * /sys/module/iwl3945/parameters/debug
- * /sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-#define IWL_DL_NOTIF (1 << 10)
-#define IWL_DL_SCAN (1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-#define IWL_DL_TXPOWER (1 << 14)
-#define IWL_DL_AP (1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
-#define IWL_DL_IO (1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...) \
- IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 1407dca70de..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
- &iwl_legacy_dbgfs_##name##_ops)) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
-
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->tx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->tx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->tx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->tx_stats.data_bytes);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 clear_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &clear_flag) != 1)
- return -EFAULT;
- iwl_legacy_clear_traffic_stats(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->rx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->rx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->rx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->rx_stats.data_bytes);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- u32 val;
- char *buf;
- ssize_t ret;
- int i;
- int pos = 0;
- struct iwl_priv *priv = file->private_data;
- size_t bufsz;
-
- /* default is to dump the entire data segment */
- if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init_data.len;
- else
- priv->dbgfs_sram_len = priv->ucode_data.len;
- }
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
- }
- if (!(i % 16))
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[64];
- int buf_size;
- u32 offset, len;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs_sram_offset = offset;
- priv->dbgfs_sram_len = len;
- } else {
- priv->dbgfs_sram_offset = 0;
- priv->dbgfs_sram_len = 0;
- }
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
- char *buf;
- int i, j, pos = 0;
- ssize_t ret;
- /* Add 30 for initial string */
- const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
- priv->num_stations);
-
- for (i = 0; i < max_sta; i++) {
- station = &priv->stations[i];
- if (!station->used)
- continue;
- pos += scnprintf(buf + pos, bufsz - pos,
- "station %d - addr: %pM, flags: %#x\n",
- i, station->sta.sta.addr,
- station->sta.station_flags_msk);
- pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
-
- for (j = 0; j < MAX_TID_COUNT; j++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
- pos += scnprintf(buf + pos, bufsz - pos,
- " - waitforba");
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- ssize_t ret;
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0, buf_size = 0;
- const u8 *ptr;
- char *buf;
- u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
- buf_size = 4 * eeprom_len + 256;
-
- if (eeprom_len % 16) {
- IWL_ERR(priv, "NVM size is not multiple of 16.\n");
- return -ENODATA;
- }
-
- ptr = priv->eeprom;
- if (!ptr) {
- IWL_ERR(priv, "Invalid EEPROM memory\n");
- return -ENOMEM;
- }
-
- /* 4 characters for byte 0xYY */
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
- "version: 0x%x\n", eeprom_ver);
- for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct ieee80211_channel *channels = NULL;
- const struct ieee80211_supported_band *supp_band = NULL;
- int pos = 0, i, bufsz = PAGE_SIZE;
- char *buf;
- ssize_t ret;
-
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 2.4GHz band 802.11bg):\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 5.2GHz band (802.11a)\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = 24 * 64; /* 24 items * 64 char per item */
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
- for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tRx handler[%36s]:\t\t %u\n",
- iwl_legacy_get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &reset_flag) != 1)
- return -EFAULT;
- if (reset_flag == 0)
- iwl_legacy_clear_isr_stats(priv);
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_rxon_context *ctx;
- int pos = 0, i;
- char buf[256 * NUM_IWL_RXON_CTX];
- const size_t bufsz = sizeof(buf);
-
- for_each_context(priv, ctx) {
- pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
- ctx->ctxid);
- for (i = 0; i < AC_NUM; i++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tcw_min\tcw_max\taifsn\ttxop\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "AC[%d]\t%u\t%u\t%u\t%u\n", i,
- ctx->qos_data.def_qos_parm.ac[i].cw_min,
- ctx->qos_data.def_qos_parm.ac[i].cw_max,
- ctx->qos_data.def_qos_parm.ac[i].aifsn,
- ctx->qos_data.def_qos_parm.ac[i].edca_txop);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int ht40;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &ht40) != 1)
- return -EFAULT;
- if (!iwl_legacy_is_any_associated(priv))
- priv->disable_ht40 = ht40 ? true : false;
- else {
- IWL_ERR(priv, "Sta associated with AP - "
- "Change to 40MHz channel support is not allowed\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[100];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "11n 40MHz Mode: %s\n",
- priv->disable_ht40 ? "Disabled" : "Enabled");
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0;
- int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char *buf;
- int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
- const u8 *ptr;
- ssize_t ret;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate buffer\n");
- return -ENOMEM;
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
- ptr = priv->tx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
- ptr = priv->rx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &traffic_log) != 1)
- return -EFAULT;
- if (traffic_log == 0)
- iwl_legacy_reset_traffic_log(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
- ssize_t ret;
- struct iwl_sensitivity_data *data;
-
- data = &priv->sensitivity_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
- data->auto_corr_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc:\t\t %u\n",
- data->auto_corr_ofdm_mrc);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
- data->auto_corr_ofdm_x1);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc_x1:\t\t %u\n",
- data->auto_corr_ofdm_mrc_x1);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
- data->auto_corr_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
- data->auto_corr_cck_mrc);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_ofdm:\t\t %u\n",
- data->last_bad_plcp_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
- data->last_fa_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_cck:\t\t %u\n",
- data->last_bad_plcp_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
- data->last_fa_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
- data->nrg_curr_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
- data->nrg_prev_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
- for (cnt = 0; cnt < 10; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_value[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
- for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_silence_rssi[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
- data->nrg_silence_ref);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
- data->nrg_energy_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
- data->nrg_silence_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
- data->nrg_th_cck);
- pos += scnprintf(buf + pos, bufsz - pos,
- "nrg_auto_corr_silence_diff:\t %u\n",
- data->nrg_auto_corr_silence_diff);
- pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
- data->num_in_cck_no_fa);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
- data->nrg_th_ofdm);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
- ssize_t ret;
- struct iwl_chain_noise_data *data;
-
- data = &priv->chain_noise_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
- data->active_chains);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
- data->chain_noise_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
- data->chain_noise_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
- data->chain_noise_c);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
- data->chain_signal_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
- data->chain_signal_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
- data->chain_signal_c);
- pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
- data->beacon_count);
-
- pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->disconn_array[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->delta_gain_code[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
- data->radio_write);
- pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
- data->state);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[60];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- u32 pwrsave_status;
-
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
- (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
- (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
- "error");
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int clear;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &clear) != 1)
- return -EFAULT;
-
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- if (priv->cfg->ops->lib->dump_fh) {
- ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
- }
-
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[12];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
- priv->missed_beacon_threshold);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int missed;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &missed) != 1)
- return -EINVAL;
-
- if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
- missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
- priv->missed_beacon_threshold =
- IWL_MISSED_BEACON_THRESHOLD_DEF;
- else
- priv->missed_beacon_threshold = missed;
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[300];
- const size_t bufsz = sizeof(buf);
- struct iwl_force_reset *force_reset;
-
- force_reset = &priv->force_reset;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request: %d\n",
- force_reset->reset_request_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request success: %d\n",
- force_reset->reset_success_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request reject: %d\n",
- force_reset->reset_reject_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\treset duration: %lu\n",
- force_reset->reset_duration);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- int ret;
- struct iwl_priv *priv = file->private_data;
-
- ret = iwl_legacy_force_reset(priv, true);
-
- return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int timeout;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &timeout) != 1)
- return -EINVAL;
- if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
- timeout = IWL_DEF_WD_TIMEOUT;
-
- priv->cfg->base_params->wd_timeout = timeout;
- iwl_legacy_setup_watchdog(priv);
- return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
- dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
- priv->debugfs_dir = dir_drv;
-
- dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
- dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
- dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
-
- DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
- &priv->disable_sens_cal);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
- &priv->disable_tx_power_cal);
- return 0;
-
-err:
- IWL_ERR(priv, "Can't create the debugfs directory\n");
- iwl_legacy_dbgfs_unregister(priv);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
- if (!priv->debugfs_dir)
- return;
-
- debugfs_remove_recursive(priv->debugfs_dir);
- priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56f..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- /* The CMD_SIZE_HUGE flag bit indicates that the command
- * structure is stored at the end of the shared queue memory. */
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- void *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES (2)
-
-struct iwl4965_channel_tgd_info {
- u8 type;
- s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
- s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
- /* maximum power level to prevent clipping for each rate, derived by
- * us from this band's saturation power in EEPROM */
- const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 base_power_index; /* gain index for power at factory temp. */
- s8 requested_power; /* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl4965_channel_tgd_info tgd;
- struct iwl4965_channel_tgh_info tgh;
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
- /* Radio/DSP gain settings for each "normal" data Tx rate.
- * These include, in addition to RF and DSP gain, a few fields for
- * remembering/modifying gain settings (indexes). */
- struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
- /* Radio/DSP gain settings for each scan rate, for directed scans. */
- struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_SIZE_NORMAL = 0,
- CMD_NO_SKB = 0,
- CMD_SIZE_HUGE = (1 << 0),
- CMD_ASYNC = (1 << 1),
- CMD_WANT_SKB = (1 << 2),
- CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
- const void *data;
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len;
- u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* 4965 only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
- u32 cipher;
- int keylen;
- u8 keyidx;
- u8 key[32];
-};
-
-union iwl_ht_rate_supp {
- u16 rates;
- struct {
- u8 siso_rate;
- u8 mimo_rate;
- };
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
-struct iwl_ht_config {
- bool single_chain_sufficient;
- enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
- int qos_active;
- struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
- struct iwl_legacy_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
- u8 used, ctxid;
- struct iwl_hw_key keyinfo;
- struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
- struct iwl_station_priv_common common;
- struct iwl_lq_sta lq_sta;
- atomic_t pending_frames;
- bool client;
- bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
- struct iwl_rxon_context *ctx;
- u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
-};
-
-struct iwl4965_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
- u16 min_nrg_cck;
- u16 max_nrg_cck;
-
- u16 nrg_th_cck;
- u16 nrg_th_ofdm;
-
- u16 auto_corr_min_ofdm;
- u16 auto_corr_min_ofdm_mrc;
- u16 auto_corr_min_ofdm_x1;
- u16 auto_corr_min_ofdm_mrc_x1;
-
- u16 auto_corr_max_ofdm;
- u16 auto_corr_max_ofdm_mrc;
- u16 auto_corr_max_ofdm_x1;
- u16 auto_corr_max_ofdm_mrc_x1;
-
- u16 auto_corr_max_cck;
- u16 auto_corr_max_cck_mrc;
- u16 auto_corr_min_cck;
- u16 auto_corr_min_cck_mrc;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u8 dma_chnl_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u32 rx_wrt_ptr_reg;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 max_bsm_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u16 beacon_time_tsf_bits;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE: The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_ <-- Is part of iwlwifi
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_ <-- Called from work queue context
- * iwl4965_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
- int is_huge)
-{
- /*
- * This is for init calibration result and scan command which
- * required buffer > TFD_MAX_PAYLOAD_SIZE,
- * the big buffer at end of command array
- */
- if (is_huge)
- return q->n_window; /* must be power of 2 */
-
- /* Otherwise, use normal size buffers */
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO 0
-#define IWL_OPERATION_MODE_HT_ONLY 1
-#define IWL_OPERATION_MODE_MIXED 2
-#define IWL_OPERATION_MODE_20MHZ 3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE 0xFFFF
-#define IWL4965_CAL_NUM_BEACONS 20
-#define IWL_CAL_NUM_BEACONS 16
-#define MAXIMUM_ALLOWED_PATHLOSS 15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM 50
-#define MIN_FA_OFDM 5
-#define MAX_FA_CCK 50
-#define MIN_FA_CCK 5
-
-#define AUTO_CORR_STEP_OFDM 1
-
-#define AUTO_CORR_STEP_CCK 3
-#define AUTO_CORR_MAX_TH_CCK 160
-
-#define NRG_DIFF 2
-#define NRG_STEP_CCK 2
-#define NRG_MARGIN 8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
-
-#define CHAIN_A 0
-#define CHAIN_B 1
-#define CHAIN_C 2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER 0xFF00
-#define IN_BAND_FILTER 0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L 20
-#define NUM_RX_CHAINS 3
-
-enum iwl4965_false_alarm_state {
- IWL_FA_TOO_MANY = 0,
- IWL_FA_TOO_FEW = 1,
- IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
- IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
- IWL_CHAIN_NOISE_ACCUMULATE,
- IWL_CHAIN_NOISE_CALIBRATED,
- IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
- IWL_CALIB_DISABLED = 0, /* must be 0 */
- IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
-enum ucode_type {
- UCODE_NONE = 0,
- UCODE_INIT,
- UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
- u32 auto_corr_ofdm;
- u32 auto_corr_ofdm_mrc;
- u32 auto_corr_ofdm_x1;
- u32 auto_corr_ofdm_mrc_x1;
- u32 auto_corr_cck;
- u32 auto_corr_cck_mrc;
-
- u32 last_bad_plcp_cnt_ofdm;
- u32 last_fa_cnt_ofdm;
- u32 last_bad_plcp_cnt_cck;
- u32 last_fa_cnt_cck;
-
- u32 nrg_curr_state;
- u32 nrg_prev_state;
- u32 nrg_value[10];
- u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
- u32 nrg_silence_ref;
- u32 nrg_energy_idx;
- u32 nrg_silence_idx;
- u32 nrg_th_cck;
- s32 nrg_auto_corr_silence_diff;
- u32 num_in_cck_no_fa;
- u32 nrg_th_ofdm;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
- u32 active_chains;
- u32 chain_noise_a;
- u32 chain_noise_b;
- u32 chain_noise_c;
- u32 chain_signal_a;
- u32 chain_signal_b;
- u32 chain_signal_c;
- u16 beacon_count;
- u8 disconn_array[NUM_RX_CHAINS];
- u8 delta_gain_code[NUM_RX_CHAINS];
- u8 radio_write;
- u8 state;
-};
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
-enum {
- MEASUREMENT_READY = (1 << 0),
- MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
- MANAGEMENT_ASSOC_REQ = 0,
- MANAGEMENT_ASSOC_RESP,
- MANAGEMENT_REASSOC_REQ,
- MANAGEMENT_REASSOC_RESP,
- MANAGEMENT_PROBE_REQ,
- MANAGEMENT_PROBE_RESP,
- MANAGEMENT_BEACON,
- MANAGEMENT_ATIM,
- MANAGEMENT_DISASSOC,
- MANAGEMENT_AUTH,
- MANAGEMENT_DEAUTH,
- MANAGEMENT_ACTION,
- MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
- CONTROL_BACK_REQ = 0,
- CONTROL_BACK,
- CONTROL_PSPOLL,
- CONTROL_RTS,
- CONTROL_CTS,
- CONTROL_ACK,
- CONTROL_CFEND,
- CONTROL_CFENDACK,
- CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- u32 mgmt[MANAGEMENT_MAX];
- u32 ctrl[CONTROL_MAX];
- u32 data_cnt;
- u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT (2000)
-#define IWL_LONG_WD_TIMEOUT (10000)
-#define IWL_MAX_WD_TIMEOUT (120000)
-
-struct iwl_force_reset {
- int reset_request_count;
- int reset_success_count;
- int reset_reject_count;
- unsigned long reset_duration;
- unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0 - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS 24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0 - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS 22
-
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
-
- NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
- struct ieee80211_vif *vif;
-
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
- /*
- * We could use the vif to indicate active, but we
- * also need it to be active during disabling when
- * we already removed the vif for type setting.
- */
- bool always_active, is_active;
-
- bool ht_need_multiple_chains;
-
- enum iwl_rxon_context_id ctxid;
-
- u32 interface_modes, exclusive_interface_modes;
- u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
- /*
- * We declare this const so it can only be
- * changed via explicit cast within the
- * routines that actually update the physical
- * hardware.
- */
- const struct iwl_legacy_rxon_cmd active;
- struct iwl_legacy_rxon_cmd staging;
-
- struct iwl_rxon_time_cmd timing;
-
- struct iwl_qos_info qos_data;
-
- u8 bcast_sta_id, ap_sta_id;
-
- u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
- u8 qos_cmd;
- u8 wep_key_cmd;
-
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 key_mapping_keys;
-
- __le32 station_flags;
-
- struct {
- bool non_gf_sta_present;
- u8 protection;
- bool enabled, is_40mhz;
- u8 extension_chan_offset;
- } ht;
-};
-
-struct iwl_priv {
-
- /* ieee device used by generic ieee processing code */
- struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
- struct iwl_cfg *cfg;
-
- /* temporary frame storage list */
- struct list_head free_frames;
- int frames_count;
-
- enum ieee80211_band band;
- int alloc_rxb_page;
-
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- /* spectrum measurement report caching */
- struct iwl_spectrum_notification measure_report;
- u8 measurement_status;
-
- /* ucode beacon time */
- u32 ucode_beacon_time;
- int missed_beacon_threshold;
-
- /* track IBSS manager (last beacon) status */
- u32 ibss_manager;
-
- /* force reset */
- struct iwl_force_reset force_reset;
-
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
- /* thermal calibration */
- s32 temperature; /* degrees Kelvin */
- s32 last_temperature;
-
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
- /* Scan related variables */
- unsigned long scan_start;
- unsigned long scan_start_tsf;
- void *scan_cmd;
- enum ieee80211_band scan_band;
- struct cfg80211_scan_request *scan_request;
- struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
- u8 mgmt_tx_ant;
-
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
- u32 hw_rev;
- u32 hw_wa_rev;
- u8 rev_id;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
- /* max number of station keys */
- u8 sta_key_max_num;
-
- /* EEPROM MAC addresses */
- struct mac_address addresses[1];
-
- /* uCode images, save to reload in case of failure */
- int fw_index; /* firmware we're trying to load */
- u32 ucode_ver; /* version of ucode, copy of
- iwl_ucode.ver */
- struct fw_desc ucode_code; /* runtime inst */
- struct fw_desc ucode_data; /* runtime data original */
- struct fw_desc ucode_data_backup; /* runtime data save/restore */
- struct fw_desc ucode_init; /* initialization inst */
- struct fw_desc ucode_init_data; /* initialization data */
- struct fw_desc ucode_boot; /* bootstrap inst */
- enum ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
- char firmware_name[25];
-
- struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
- __le16 switch_channel;
-
- /* 1st responses from initialize and runtime uCode images.
- * _4965's initialize alive response contains some calibration data. */
- struct iwl_init_alive_resp card_alive_init;
- struct iwl_alive_resp card_alive;
-
- u16 active_rate;
-
- u8 start_calib;
- struct iwl_sensitivity_data sensitivity_data;
- struct iwl_chain_noise_data chain_noise_data;
- __le16 sensitivity_tbl[HD_TABLE_SIZE];
-
- struct iwl_ht_config current_ht_config;
-
- /* Rate scaling data */
- u8 retry_rate;
-
- wait_queue_head_t wait_command_queue;
-
- int activity_timer_active;
-
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
- /* counts mgmt, ctl, and data packets */
- struct traffic_stats tx_stats;
- struct traffic_stats rx_stats;
-
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
- struct iwl_power_mgr power_data;
-
- /* context information */
- u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
- /* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
- int num_stations;
- struct iwl_station_entry stations[IWL_STATION_COUNT];
- unsigned long ucode_key_table;
-
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
-
- /* Indication if ieee80211_ops->open has been called */
- u8 is_open;
-
- u8 mac80211_registered;
-
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- struct iwl_eeprom_calib_info *calib_info;
-
- enum nl80211_iftype iw_mode;
-
- /* Last Rx'd beacon timestamp */
- u64 timestamp;
-
- union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
- struct {
- void *shared_virt;
- dma_addr_t shared_phys;
-
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
- struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl3945_notif_statistics accum_statistics;
- struct iwl3945_notif_statistics delta_statistics;
- struct iwl3945_notif_statistics max_delta;
-#endif
-
- u32 sta_supp_rates;
- int last_rx_rssi; /* From Rx packet statistics */
-
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
- /*
- * each calibration channel group in the
- * EEPROM has a derived clip setting for
- * each rate.
- */
- const struct iwl3945_clip_group clip_groups[5];
-
- } _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
- struct {
- struct iwl_rx_phy_res last_phy_res;
- bool last_phy_res_valid;
-
- struct completion firmware_loading_complete;
-
- /*
- * chain noise reset and gain commands are the
- * two extra calibration commands follows the standard
- * phy calibration commands
- */
- u8 phy_calib_chain_noise_reset_cmd;
- u8 phy_calib_chain_noise_gain_cmd;
-
- struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl_notif_statistics accum_statistics;
- struct iwl_notif_statistics delta_statistics;
- struct iwl_notif_statistics max_delta;
-#endif
-
- } _4965;
-#endif
- };
-
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
- struct work_struct restart;
- struct work_struct scan_completed;
- struct work_struct rx_replenish;
- struct work_struct abort_scan;
-
- struct iwl_rxon_context *beacon_ctx;
- struct sk_buff *beacon_skb;
-
- struct work_struct tx_flush;
-
- struct tasklet_struct irq_tasklet;
-
- struct delayed_work init_alive_start;
- struct delayed_work alive_start;
- struct delayed_work scan_check;
-
- /* TX Power */
- s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- /* debugfs */
- u16 tx_traffic_idx;
- u16 rx_traffic_idx;
- u8 *tx_traffic;
- u8 *rx_traffic;
- struct dentry *debugfs_dir;
- u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
- struct work_struct txpower_work;
- u32 disable_sens_cal;
- u32 disable_chain_noise_cal;
- u32 disable_tx_power_cal;
- struct work_struct run_time_calib_work;
- struct timer_list statistics_periodic;
- struct timer_list watchdog;
- bool hw_ready;
-
- struct led_classdev led;
- unsigned long blink_on, blink_off;
- bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx) \
- for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
- ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid)
-{
- return (priv->contexts[ctxid].active.filter_flags &
- RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
- return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6b..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN (__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u8, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
- TP_ARGS(priv, hcmd, len, flags),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, hcmd, len)
- __field(u32, flags)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(hcmd), hcmd, len);
- __entry->flags = flags;
- ),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
- __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, rxbuf, len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
- ),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
- void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(size_t, framelen)
- __dynamic_array(u8, tfd, tfdlen)
-
- /*
- * Do not insert between or below these items,
- * we want to keep the frame together (except
- * for the possible padding).
- */
- __dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
- memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
- memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
- ),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2),
- TP_ARGS(priv, desc, time, data1, data2, line,
- blink1, blink2, ilink1, ilink2),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, desc)
- __field(u32, time)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink1)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->desc = desc;
- __entry->time = time;
- __entry->data1 = data1;
- __entry->data2 = data2;
- __entry->line = line;
- __entry->blink1 = blink1;
- __entry->blink2 = blink2;
- __entry->ilink1 = ilink1;
- __entry->ilink2 = ilink2;
- ),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
- __entry->priv, __entry->desc, __entry->time, __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
- __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- break;
- default:
- IWL_ERR(priv, "bad EEPROM signature,"
- "EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
- BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
-
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- ret = iwl_legacy_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- _iwl_legacy_write32(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
- addr);
- goto done;
- }
- r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- "EEPROM",
- iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
- if (ret)
- iwl_legacy_eeprom_free(priv);
- /* Reset chip to save power until we load uCode during "up". */
- iwl_legacy_apm_stop(priv);
-alloc_err:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->cfg->ops->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_7;
- break;
- default:
- BUG();
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_legacy_get_channel_info(priv, band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &=
- ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwlegacy_eeprom_band_1) +
- ARRAY_SIZE(iwlegacy_eeprom_band_2) +
- ARRAY_SIZE(iwlegacy_eeprom_band_3) +
- ARRAY_SIZE(iwlegacy_eeprom_band_4) +
- ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(iwl_legacy_is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c8100202..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION (0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION (5)
-#define EEPROM_4965_EEPROM_VERSION (0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
-#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna). EEPROM contains:
- *
- * 1) Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2) Gain table index used to achieve the target measurement power.
- * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4) RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
- u8 temperature; /* Device temperature (Celsius) */
- u8 gain_idx; /* Index into gain table */
- u8 actual_pow; /* Measured RF output power, half-dBm */
- s8 pa_det; /* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel. EEPROM contains:
- *
- * 1) Channel number measured
- *
- * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
- * (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
- u8 ch_num;
- struct iwl_eeprom_calib_measure
- measurements[EEPROM_TX_POWER_TX_CHAINS]
- [EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1) First and last channels within range of the subband. "0" values
- * indicate that this sample set is not being used.
- *
- * 2) Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
- u8 ch_from; /* channel number of lowest channel in subband */
- u8 ch_to; /* channel number of highest channel in subband */
- struct iwl_eeprom_calib_ch_info ch1;
- struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info. EEPROM contains:
- *
- * 1) Factory-measured saturation power levels (maximum levels at which
- * tx power amplifier can output a signal without too much distortion).
- * There is one level for 2.4 GHz band and one for 5 GHz band. These
- * values apply to all channels within each of the bands.
- *
- * 2) Factory-measured power supply voltage level. This is assumed to be
- * constant (i.e. same value applies to all channels/bands) while the
- * factory measurements are being made.
- *
- * 3) Up to 8 sets of factory-measured txpower calibration values.
- * These are for different frequency ranges, since txpower gain
- * characteristics of the analog radio circuitry vary with frequency.
- *
- * Not all sets need to be filled with data;
- * struct iwl_eeprom_calib_subband_info contains range of channels
- * (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
- u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
- u8 saturation_power52; /* half-dBm */
- __le16 voltage; /* signed */
- struct iwl_eeprom_calib_subband_info
- band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
-#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE: The RXON command uses 20 MHz channel numbers to specify the
- * control channel to which to tune. RXON also specifies whether the
- * control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- int (*acquire_semaphore) (struct iwl_priv *priv);
- void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-
-#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e3..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND (0x1000)
-#define FH_MEM_UPPER_BOUND (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned. Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
- * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- * entries (although any power of 2, up to 4096, is selectable by driver).
- * Each entry (1 dword) points to a receive buffer (RB) of consistent size
- * (typically 4K, although 8K or 16K are also selectable by driver).
- * Driver sets up RB size and number of RBDs in the CB via Rx config
- * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- * Bit fields within one RBD:
- * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- * Driver sets physical address [35:8] of base of RBD circular buffer
- * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- * (RBs) have been filled, via a "write pointer", actually the index of
- * the RB's corresponding RBD within the circular buffer. Driver sets
- * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- * Bit fields in lower dword of Rx status buffer (upper dword not used
- * by driver; see struct iwl4965_shared, val0):
- * 31-12: Not used by driver
- * 11- 0: Index of last filled Rx buffer descriptor
- * (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer. This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1! See below).
- * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD. The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process. When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index. For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0. Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256. To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
- * NOTE: For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- * min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- * '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- * typical value 0x10 (about 1/2 msec)
- * 3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- * 24: 1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
- (FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
-
-/* TFDB Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29- 4: Reserved, set to "0"
- * 3: Enable internal DMA requests (1, normal operation), disable (0)
- * 2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM (7)
-#define FH50_TCSR_CHNL_NUM (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24: 1 = Channel buffers empty (channel 7:0)
- * 23-16: 1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31: Indicates an address error when accessed to internal memory
- * uCode/driver must write "1" in order to clear this flag
- * 30: Indicates that Host did not send the expected number of dwords to FH
- * uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- * command was received from the scheduler while the TRB was already full
- * with previous command
- * uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- * bit is set, it indicates that the FH has received a full indication
- * from the RTC TxFIFO and the current value of the TxCredit counter was
- * not equal to zero. This mean that the credit mechanism was not
- * synchronized to the TxFIFO status
- * uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL (9)
-#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
- (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- * host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- * in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transferred
- */
-struct iwl_rb_status {
- __le16 closed_rb_num;
- __le16 closed_fr_num;
- __le16 finished_rb_num;
- __le16 finished_fr_nam;
- __le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_BC_DUP (64)
-#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS 20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- * every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- * 4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
- __le32 lo;
- __le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM. These buffers collectively contain the (one) frame described
- * by the TFD. Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM. Each buffer has max size
- * of (4K - 4). The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
- u8 __reserved1[3];
- u8 num_tbs;
- struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
- __le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000 /* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_3945_RX);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_RATE_SCALE);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- default:
- return "UNKNOWN";
-
- }
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- BUG_ON(!(cmd->flags & CMD_ASYNC));
-
- /* An asynchronous command can not expect an SKB to be set. */
- BUG_ON(cmd->flags & CMD_WANT_SKB);
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_legacy_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- BUG_ON(cmd->flags & CMD_ASYNC);
-
- /* A synchronous command can not have a callback set. */
- BUG_ON(cmd->callback);
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- goto out;
- }
-
- ret = wait_event_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- iwl_legacy_get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv,
- "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- ret = 0;
- goto out;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_legacy_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_async(priv, cmd);
-
- return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
- u8 id, u16 len, const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt))
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- cmd.flags |= CMD_ASYNC;
- cmd.callback = callback;
-
- return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbb..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(&pci_dev->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (!desc->len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
- &desc->p_addr, GFP_KERNEL);
- return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O. In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
- trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
- iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u8 val)
-{
- IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
- __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
- trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
- iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u32 val)
-{
- IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
- __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
- u32 val = ioread32(priv->hw_base + ofs);
- trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
- return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
- IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
- return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
- IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
- addr, bits, mask,
- unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
- return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
- __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
- bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) | mask;
- IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
- mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_set_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
- IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_clear_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
- int ret;
- u32 val;
-
- /* this bit wakes up the NIC */
- _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- */
- ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
- _iwl_legacy_write32(priv, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
- IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
- return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
- __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
- _iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
-
- IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
- _iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
- __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
- _iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg)
-{
- u32 value = _iwl_legacy_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv,
- "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
- f, l);
- return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = _iwl_legacy_read_direct32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
- u32 reg, u32 value)
-{
- _iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, reg, value);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
- u32 reg, u32 len, u32 *values)
-{
- u32 count = sizeof(u32);
-
- if ((priv != NULL) && (values != NULL)) {
- for (; 0 < len; len -= count, reg += count, values++)
- iwl_legacy_write_direct32(priv, reg, *values);
- }
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
- u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
- struct iwl_priv *priv,
- u32 addr, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
- if (unlikely(ret == -ETIMEDOUT))
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
- "timedout - %s %d\n", addr, mask, f, l);
- else
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
- "- %s %d\n", addr, mask, ret, f, l);
- return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
- return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
- u32 addr, u32 val)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_prph(priv, reg, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg, \
- ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
- u32 bits, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
- *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- _iwl_legacy_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
- unsigned long reg_flags;
- u32 value;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
- rmb();
- value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
- u32 len, u32 *values)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- for (; 0 < len; len -= sizeof(u32), values++)
- _iwl_legacy_write_direct32(priv,
- HBUS_TARG_MEM_WDAT, *values);
-
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput OFF time(ms) ON time (ms)
- * >300 25 25
- * >200 to 300 40 40
- * >100 to 200 55 55
- * >70 to 100 65 65
- * >50 to 70 75 75
- * >20 to 50 85 85
- * >10 to 20 95 95
- * >5 to 10 110 110
- * >1 to 5 130 130
- * >0 to 1 167 167
- * <=0 SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
- { .throughput = 0, .blink_time = 334 },
- { .throughput = 1 * 1024 - 1, .blink_time = 260 },
- { .throughput = 5 * 1024 - 1, .blink_time = 220 },
- { .throughput = 10 * 1024 - 1, .blink_time = 190 },
- { .throughput = 20 * 1024 - 1, .blink_time = 170 },
- { .throughput = 50 * 1024 - 1, .blink_time = 150 },
- { .throughput = 70 * 1024 - 1, .blink_time = 130 },
- { .throughput = 100 * 1024 - 1, .blink_time = 110 },
- { .throughput = 200 * 1024 - 1, .blink_time = 80 },
- { .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- * compensation = (100 - averageDeviation) * 64 / 100
- * NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
- u8 time, u16 compensation)
-{
- if (!compensation) {
- IWL_ERR(priv, "undefined blink compensation: "
- "use pre-defined blinking time\n");
- return time;
- }
-
- return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
- unsigned long on,
- unsigned long off)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .interval = IWL_DEF_LED_INTRVL
- };
- int ret;
-
- if (!test_bit(STATUS_READY, &priv->status))
- return -EBUSY;
-
- if (priv->blink_on == on && priv->blink_off == off)
- return 0;
-
- if (off == 0) {
- /* led is SOLID_ON */
- on = IWL_LED_SOLID;
- }
-
- IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
- led_cmd.on = iwl_legacy_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
- led_cmd.off = iwl_legacy_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
-
- ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
- if (!ret) {
- priv->blink_on = on;
- priv->blink_off = off;
- }
- return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- unsigned long on = 0;
-
- if (brightness > 0)
- on = IWL_LED_SOLID;
-
- iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
- return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
- int mode = led_mode;
- int ret;
-
- if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
-
- priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
- wiphy_name(priv->hw->wiphy));
- priv->led.brightness_set = iwl_legacy_led_brightness_set;
- priv->led.blink_set = iwl_legacy_led_blink_set;
- priv->led.max_brightness = 1;
-
- switch (mode) {
- case IWL_LED_DEFAULT:
- WARN_ON(1);
- break;
- case IWL_LED_BLINK:
- priv->led.default_trigger =
- ieee80211_create_tpt_led_trigger(priv->hw,
- IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
- iwl_blink, ARRAY_SIZE(iwl_blink));
- break;
- case IWL_LED_RF_STATE:
- priv->led.default_trigger =
- ieee80211_get_radio_led_name(priv->hw);
- break;
- }
-
- ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
- if (ret) {
- kfree(priv->led.name);
- return;
- }
-
- priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
- if (!priv->led_registered)
- return;
-
- led_classdev_unregister(&priv->led);
- kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY (0<<1)
-#define IWL_LED_LINK (1<<1)
-
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
- u8 table_rs_index; /* index in rate scale table cmd */
- u8 prev_table_rs; /* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
- IWL_RATE_1M_INDEX = 0,
- IWL_RATE_2M_INDEX,
- IWL_RATE_5M_INDEX,
- IWL_RATE_11M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX,
- IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX,
- IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX,
- IWL_RATE_54M_INDEX,
- IWL_RATE_60M_INDEX,
- IWL_RATE_COUNT,
- IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
- IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
- IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
- IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
- IWL_RATE_6M_INDEX_TABLE = 0,
- IWL_RATE_9M_INDEX_TABLE,
- IWL_RATE_12M_INDEX_TABLE,
- IWL_RATE_18M_INDEX_TABLE,
- IWL_RATE_24M_INDEX_TABLE,
- IWL_RATE_36M_INDEX_TABLE,
- IWL_RATE_48M_INDEX_TABLE,
- IWL_RATE_54M_INDEX_TABLE,
- IWL_RATE_1M_INDEX_TABLE,
- IWL_RATE_2M_INDEX_TABLE,
- IWL_RATE_5M_INDEX_TABLE,
- IWL_RATE_11M_INDEX_TABLE,
- IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
- IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
- IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
- IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
- IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
-#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
-#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
-#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
-#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
-#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
-#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
-#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
-#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
-#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
-#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
-#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
- IWL_RATE_6M_PLCP = 13,
- IWL_RATE_9M_PLCP = 15,
- IWL_RATE_12M_PLCP = 5,
- IWL_RATE_18M_PLCP = 7,
- IWL_RATE_24M_PLCP = 9,
- IWL_RATE_36M_PLCP = 11,
- IWL_RATE_48M_PLCP = 1,
- IWL_RATE_54M_PLCP = 3,
- IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
- IWL_RATE_1M_PLCP = 10,
- IWL_RATE_2M_PLCP = 20,
- IWL_RATE_5M_PLCP = 55,
- IWL_RATE_11M_PLCP = 110,
- /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK \
- (IWL_RATE_1M_MASK | \
- IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK \
- (IWL_CCK_BASIC_RATES_MASK | \
- IWL_RATE_5M_MASK | \
- IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK \
- (IWL_RATE_6M_MASK | \
- IWL_RATE_12M_MASK | \
- IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_RATE_9M_MASK | \
- IWL_RATE_18M_MASK | \
- IWL_RATE_36M_MASK | \
- IWL_RATE_48M_MASK | \
- IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE -1
-
-#define IWL_MIN_RSSI_VAL -100
-#define IWL_MAX_RSSI_VAL 0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT 160
-#define IWL_LEGACY_SUCCESS_LIMIT 480
-#define IWL_LEGACY_TABLE_COUNT 160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
-#define IWL_NONE_LEGACY_TABLE_COUNT 1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO 12800 /* 100% */
-#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
-#define IWL_RATE_HIGH_TH 10880 /* 85% */
-#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
-
-#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD 0
-#define IWL_AGG_LOAD_THRESHOLD 10
-#define IWL_AGG_ALL_TID 0xff
-#define TID_QUEUE_CELL_SPACING 50 /*mS */
-#define TID_QUEUE_MAX_SIZE 20
-#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
- LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
- LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define ANT_NONE 0x0
-#define ANT_A BIT(0)
-#define ANT_B BIT(1)
-#define ANT_AB (ANT_A | ANT_B)
-#define ANT_C BIT(2)
-#define ANT_AC (ANT_A | ANT_C)
-#define ANT_BC (ANT_B | ANT_C)
-#define ANT_ABC (ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE 12
-
-struct iwl_rate_mcs_info {
- char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
- char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
- u64 data; /* bitmap of successful frames */
- s32 success_counter; /* number of frames successful */
- s32 success_ratio; /* per-cent * 128 */
- s32 counter; /* number of frames attempted */
- s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
- u8 is_dup; /* 1 = duplicated data streams */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
- struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
- u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
- u8 search_better_tbl; /* 1: currently trying alternate mode */
- s32 last_tpt;
-
- /* The following determine when to search for a new mode */
- u32 table_count_limit;
- u32 max_failure_limit; /* # failed frames before new search */
- u32 max_success_limit; /* # successful frames before new search */
- u32 table_count;
- u32 total_failed; /* total failed frames, any/all rates */
- u32 total_success; /* total successful frames, any/all rates */
- u64 flush_timer; /* time staying in mode before new search */
-
- u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
- u8 is_dup;
- enum ieee80211_band band;
-
- /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
- u16 active_legacy_rate;
- u16 active_siso_rate;
- u16 active_mimo2_rate;
- s8 max_rate_idx; /* Max rate set by user */
- u8 missed_rate_counter;
-
- struct iwl_link_quality_cmd lq;
- struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
- u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
- u32 dbg_fixed_rate;
-#endif
- struct iwl_priv *drv;
-
- /* used to be in sta_info */
- int last_txrate_idx;
- /* last tx rate_n_flags */
- u32 last_rate_n_flags;
- /* packets destined for this STA are aggregated */
- u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
- if (mask & ANT_A)
- return ANT_A;
- if (mask & ANT_B)
- return ANT_B;
- return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module. The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem. This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6c..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
- struct iwl_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
- struct iwl_powertable_cmd *cmd)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- if (priv->power_data.pci_pm)
- cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
- IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
- IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
- IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
- le32_to_cpu(cmd->tx_data_timeout));
- IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(priv,
- "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
- le32_to_cpu(cmd->sleep_interval[0]),
- le32_to_cpu(cmd->sleep_interval[1]),
- le32_to_cpu(cmd->sleep_interval[2]),
- le32_to_cpu(cmd->sleep_interval[3]),
- le32_to_cpu(cmd->sleep_interval[4]));
-
- return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
- sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force)
-{
- int ret;
- bool update_chains;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Don't update the RX chain when chain noise calibration is running */
- update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
- priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
- if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
- return 0;
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete use sleep_power_next, need to be updated */
- memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
- return 0;
- }
-
- if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
-
- ret = iwl_legacy_set_power(priv, cmd);
- if (!ret) {
- if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
-
- if (priv->cfg->ops->lib->update_chain_flags && update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
- IWL_DEBUG_POWER(priv,
- "Cannot update the power, chain noise "
- "calibration running: %d\n",
- priv->chain_noise_data.state);
-
- memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
- } else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
-
- return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
- struct iwl_powertable_cmd cmd;
-
- iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
- return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
- u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
- priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
- priv->power_data.debug_sleep_level_override = -1;
-
- memset(&priv->power_data.sleep_cmd, 0,
- sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
- IWL_POWER_INDEX_1,
- IWL_POWER_INDEX_2,
- IWL_POWER_INDEX_3,
- IWL_POWER_INDEX_4,
- IWL_POWER_INDEX_5,
- IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
- struct iwl_powertable_cmd sleep_cmd;
- struct iwl_powertable_cmd sleep_cmd_next;
- int debug_sleep_level_override;
- bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index f4d21ec2249..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
- }
-
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
-
-err_rb:
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
-err_bd:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
-{
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /*
- * All contexts have the same setting here due to it being
- * a module parameter, so OK to check any context.
- */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
- RXON_FILTER_DIS_DECRYPT_MSK)
- return 0;
-
- if (!(fc & IEEE80211_FCTL_PROTECTED))
- return 0;
-
- IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
- switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- /* The uCode has got a bad phase 1 Key, pushes the packet.
- * Decryption will be done in SW. */
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_KEY_TTAK)
- break;
-
- case RX_RES_STATUS_SEC_TYPE_WEP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_ICV_MIC) {
- /* bad ICV, the packet is destroyed since the
- * decryption is inplace, drop it */
- IWL_DEBUG_RX(priv, "Packet destroyed\n");
- return -1;
- }
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_DECRYPT_OK) {
- IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
- stats->flag |= RX_FLAG_DECRYPTED;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index 521b73b527d..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req. This should be set long enough to hear probe responses
- * from more than one AP. */
-#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52 (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52 (10)
-#define IWL_PASSIVE_DWELL_BASE (100)
-#define IWL_CHANNEL_TUNE_TIME 5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
- int ret;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_WANT_SKB,
- };
-
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EIO;
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
- /* The scan abort will return 1 for success or
- * 2 for "failure". A failure condition can be
- * due to simply not being in an active scan which
- * can occur if we send the scan abort before we
- * the microcode has notified us that a scan is
- * completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
- ret = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
- return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
- /* check if scan was requested from mac80211 */
- if (priv->scan_request) {
- IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
- ieee80211_scan_completed(priv->hw, aborted);
- }
-
- priv->scan_vif = NULL;
- priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
- return;
- }
-
- IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
- return;
- }
-
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
- return;
- }
-
- ret = iwl_legacy_send_scan_abort(priv);
- if (ret) {
- IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
- iwl_legacy_force_scan_end(priv);
- } else
- IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
- IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
- iwl_legacy_do_scan_abort(priv);
-
- while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
- msleep(20);
- }
-
- return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
- priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
- IWL_DEBUG_SCAN(priv, "Scan start: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan ch.res: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
- IWL_DEBUG_SCAN(priv,
- "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
- scan_notif->scanned_channels,
- scan_notif->tsf_low,
- scan_notif->tsf_high, scan_notif->status);
-
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
- jiffies_to_msecs(jiffies - priv->scan_start));
-
- queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
- /* scan handlers */
- priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
- priv->rx_handlers[SCAN_START_NOTIFICATION] =
- iwl_legacy_rx_scan_start_notif;
- priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
- iwl_legacy_rx_scan_results_notif;
- priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
- iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
-{
- if (band == IEEE80211_BAND_5GHZ)
- return IWL_ACTIVE_DWELL_TIME_52 +
- IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
- else
- return IWL_ACTIVE_DWELL_TIME_24 +
- IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx;
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
- if (iwl_legacy_is_any_associated(priv)) {
- /*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
- */
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
- }
- }
-
- return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (WARN_ON(!priv->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Request scan called when driver not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_SCAN(priv,
- "Multiple concurrent scan requests in parallel.\n");
- return -EBUSY;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
- return -EBUSY;
- }
-
- IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
- set_bit(STATUS_SCANNING, &priv->status);
- priv->scan_start = jiffies;
-
- ret = priv->cfg->ops->utils->request_scan(priv, vif);
- if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
- return ret;
- }
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
- priv->scan_band = req->channels[0]->band;
-
- ret = iwl_legacy_scan_initiate(priv, vif);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, scan_check.work);
-
- IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
- /* Since we are here firmware does not finish scan and
- * most likely is in bad shape, so we don't bother to
- * send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
- IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
- /* We keep scan_check work queued in case when firmware will not
- * report back scan completed notification */
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_legacy_is_ready_rf(priv))
- goto out;
-
- /*
- * We do not commit power settings while scan is pending,
- * do it now if the settings changed.
- */
- iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
- priv->cfg->ops->utils->post_scan(priv);
-
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
- INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
- INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->abort_scan);
- cancel_work_sync(&priv->scan_completed);
-
- if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a472310..85fe48e520f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
*
*****************************************************************************/
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
enum { /* ieee80211_basic_report.map */
IEEE80211_BASIC_MAP_BSS = (1 << 0),
IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
deleted file mode 100644
index f10df3e2813..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-#include <linux/export.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv,
- "ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
- "STA id %u addr %pM already present"
- " in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- } else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- }
-}
-
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
-{
- u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
- int ret = -EIO;
-
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_legacy_sta_ucode_activate(priv, sta_id);
- ret = 0;
- break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
- break;
- case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv,
- "Adding station %d failed, no block ack resource.\n",
- sta_id);
- break;
- case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
- break;
- default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
- break;
- }
-
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_legacy_addsta_cmd *addsta =
- (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
-
- iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags)
-{
- struct iwl_rx_packet *pkt = NULL;
- int ret = 0;
- u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
- .flags = flags,
- .data = data,
- };
- u8 sta_id __maybe_unused = sta->sta.sta_id;
-
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
- sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
-
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_legacy_add_sta_callback;
- else {
- cmd.flags |= CMD_WANT_SKB;
- might_sleep();
- }
-
- cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret || (flags & CMD_ASYNC))
- return ret;
-
- if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
-
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
- u8 mimo_ps_mode;
-
- if (!sta || !sta_ht_inf->ht_supported)
- goto done;
-
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
- "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
- "dynamic" : "disabled");
-
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
- break;
- }
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
-}
-
-/**
- * iwl_legacy_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
- struct iwl_station_entry *station;
- int i;
- u8 sta_id = IWL_INVALID_STATION;
- u16 rate;
-
- if (is_ap)
- sta_id = ctx->ap_sta_id;
- else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
-
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
- sta_id = i;
- }
-
- /*
- * These two conditions have the same outcome, but keep them
- * separate
- */
- if (unlikely(sta_id == IWL_INVALID_STATION))
- return sta_id;
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- return sta_id;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- return sta_id;
- }
-
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
- sta_id, addr);
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct iwl_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
-
- /*
- * OK to call unconditionally, since local stations (IBSS BSSID
- * STA and broadcast STA) pass in a NULL sta, and mac80211
- * doesn't allow HT IBSS.
- */
- iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
-
- /* 3945 only */
- rate = (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
- /* Turn on both antennas for the station... */
- station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
-
- return sta_id;
-
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_legacy_add_station_common -
- */
-int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r)
-{
- unsigned long flags_spin;
- int ret = 0;
- u8 sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- *sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
- addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- *sta_id_r = sta_id;
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
-
-/**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
- /* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
- IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id,
- bool temporary)
-{
- struct iwl_rx_packet *pkt;
- int ret;
-
- unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = sizeof(struct iwl_rem_sta_cmd),
- .flags = CMD_SYNC,
- .data = &rm_sta_cmd,
- };
-
- memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
- rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
- cmd.flags |= CMD_WANT_SKB;
-
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
- if (!ret) {
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_legacy_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-/**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
- */
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr)
-{
- unsigned long flags;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Unable to remove station %pM, device not ready.\n",
- addr);
- /*
- * It is typical for stations to be removed when we are
- * going down. Return success since device will be down
- * soon anyway
- */
- return 0;
- }
-
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
- addr);
- goto out_err;
- }
-
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
- addr);
- goto out_err;
- }
-
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
- }
-
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
- priv->num_stations--;
-
- BUG_ON(priv->num_stations < 0);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
-out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
-
-/**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int i;
- unsigned long flags_spin;
- bool cleared = false;
-
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
- continue;
-
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv,
- "Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- cleared = true;
- }
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- if (!cleared)
- IWL_DEBUG_INFO(priv,
- "No active stations found to be cleared\n");
-}
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
-
-/**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
- int i;
- bool found = false;
- int ret;
- bool send_lq;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Not ready yet, not restoring any stations.\n");
- return;
- }
-
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
- continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
- found = true;
- }
- }
-
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &=
- ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &=
- ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- /*
- * Rate scaling has already been initialized, send
- * current LQ command
- */
- if (send_lq)
- iwl_legacy_send_lq_cmd(priv, ctx, &lq,
- CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- }
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... no stations to be restored.\n");
- else
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... complete.\n");
-}
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
-
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
- return i;
-
- return WEP_INVALID_OFFSET;
-}
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
-
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
- continue;
-
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- BUG_ON(priv->num_stations < 0);
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
- lq->general_params.single_stream_ant_msk,
- lq->general_params.dual_stream_ant_msk);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
- i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
-
- if (ctx->ht.enabled)
- return true;
-
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
- ctx->active.channel);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
- RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
- i);
- return false;
- }
- }
- return true;
-}
-
-/**
- * iwl_legacy_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
- int ret = 0;
- unsigned long flags_spin;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- iwl_legacy_dump_lq_cmd(priv, lq);
- BUG_ON(init && (cmd.flags & CMD_ASYNC));
-
- if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
- ret = iwl_legacy_send_cmd(priv, &cmd);
- else
- ret = -EINVAL;
-
- if (cmd.flags & CMD_ASYNC)
- return ret;
-
- if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete,"
- " clearing sta addition status for sta %d\n",
- lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
-
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
- const u8 sta_id,
- const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq,
- u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index c0dfb1a4e96..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- u32 reg = 0;
- int txq_id = txq->q.id;
-
- if (txq->need_update == 0)
- return;
-
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (q->n_bd == 0)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- int i;
-
- if (q->n_bd == 0)
- return;
-
- while (q->read_ptr != q->write_ptr) {
- i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-
- i = q->n_window;
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_cmd_queue_unmap(priv);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
- txq->tfds, txq->q.dma_addr);
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
-{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
-
- /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
- * and iwl_legacy_queue_dec_wrap are broken. */
- BUG_ON(!is_power_of_2(count));
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_legacy_get_cmd_index is broken. */
- BUG_ON(!is_power_of_2(slots_num));
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = q->read_ptr = 0;
-
- return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
- struct iwl_tx_queue *txq, u32 id)
-{
- struct device *dev = &priv->pci_dev->dev;
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = id;
-
- return 0;
-
- error:
- kfree(txq->txb);
- txq->txb = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int i, len;
- int ret;
- int actual_slots = slots_num;
-
- /*
- * Alloc buffer array for commands (Tx or other types of commands).
- * For the command queue (#4/#9), allocate command space + one big
- * command for scan, since scan command is very huge; the system will
- * not have two scans at the same time, so only one is needed.
- * For normal Tx queues (all other queues), no super-size command
- * space is needed.
- */
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto out_free_arrays;
-
- len = sizeof(struct iwl_device_cmd);
- for (i = 0; i < actual_slots; i++) {
- /* only happens for cmd queue */
- if (i == slots_num)
- len = IWL_MAX_CMD_SIZE;
-
- txq->cmd[i] = kmalloc(len, GFP_KERNEL);
- if (!txq->cmd[i])
- goto err;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
- if (ret)
- goto err;
-
- txq->need_update = 0;
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-
- return 0;
-err:
- for (i = 0; i < actual_slots; i++)
- kfree(txq->cmd[i]);
-out_free_arrays:
- kfree(txq->meta);
- kfree(txq->cmd);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
- txq->need_update = 0;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- unsigned long flags;
- int len;
- u32 idx;
- u16 fix_size;
-
- cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
- fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
- /* If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries
- * Also, check to see if command buffer should not exceed the size
- * of device_cmd and max_cmd_size. */
- BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->flags & CMD_SIZE_HUGE));
- BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
- if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
- IWL_ERR(priv, "Restarting adapter due to command queue full\n");
- queue_work(priv->workqueue, &priv->restart);
- return -ENOSPC;
- }
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
-
- if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return -ENOSPC;
- }
-
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
- out_meta->flags = cmd->flags | CMD_MAPPED;
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
-
- out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
- /* At this point, the out_cmd now has all of the incoming cmd
- * information */
-
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
- if (cmd->flags & CMD_SIZE_HUGE)
- out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (out_cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv,
- "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- break;
- default:
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- }
-#endif
- txq->need_update = 1;
-
- if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
- phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- fix_size, PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, fix_size);
-
- trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
- fix_size, cmd->flags);
-
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, fix_size, 1,
- U32_PAD(cmd->len));
-
- /* Increment and update queue's write index */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
- int idx, int cmd_idx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- idx, q->n_bd, q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
- queue_work(priv->workqueue, &priv->restart);
- }
-
- }
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed. The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- unsigned long flags;
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
- return;
- }
-
- cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
- cmd = txq->cmd[cmd_index];
- meta = &txq->meta[cmd_index];
-
- txq->time_stamp = jiffies;
-
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up(&priv->wait_command_queue);
- }
-
- /* Mark as unmapped */
- meta->flags = 0;
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a54..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
- .sw_crypto = 1,
- .restart_fw = 1,
- .disable_hw_scan = 1,
- /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN - Force MAIN antenna
- * IWL_ANTENNA_AUX - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- switch (iwl3945_mod_params.antenna) {
- case IWL_ANTENNA_DIVERSITY:
- return 0;
-
- case IWL_ANTENNA_MAIN:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
- case IWL_ANTENNA_AUX:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- }
-
- /* bad antenna selector value */
- IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
- iwl3945_mod_params.antenna);
-
- return 0; /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- int ret;
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = keyconf->keyidx;
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
- ret = iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret = 0;
-
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
- int ret = -EOPNOTSUPP;
-
- return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *key)
-{
- if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104)
- return -EOPNOTSUPP;
-
- IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
- return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl3945_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
-
- if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- unsigned int frame_size;
- int rc;
- u8 rate;
-
- frame = iwl3945_get_free_frame(priv);
-
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- rate = iwl_legacy_get_lowest_plcp(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl3945_free_frame(priv, frame);
-
- return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
- if (priv->_3945.shared_virt)
- dma_free_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- priv->_3945.shared_virt,
- priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_device_cmd *cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
- tx_cmd->sec_ctl = 0;
-
- switch (keyinfo->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
- (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
- memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", info->control.hw_key->hw_key_idx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
- break;
- }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr, u8 std_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- __le32 tx_flags = tx_cmd->tx_flags;
- __le16 fc = hdr->frame_control;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl3945_tx_cmd *tx_cmd;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_queue *q = NULL;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, hdr_len;
- u8 id;
- u8 unicast;
- u8 sta_id;
- u8 tid = 0;
- __le16 fc;
- u8 wait_write_ptr = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS],
- info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop;
- }
-
- IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop;
- }
-
- /* Descriptor for chosen Tx queue */
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if ((iwl_legacy_queue_space(q) < q->high_mark))
- goto drop;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /* Init first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
- tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(*tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- if (info->control.hw_key)
- iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_legacy_update_stats(priv, true, fc, len);
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl3945_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- len = (len + 3) & ~3;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, U32_PAD(len));
- }
-
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark)
- && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- iwl_legacy_stop_queue(priv, txq);
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
-drop:
- return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl_spectrum_cmd spectrum;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_legacy_usecs_to_beacons(priv,
- le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
- le16_to_cpu(ctx->timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- spectrum.start_time =
- iwl_legacy_add_beacon_time(priv,
- priv->_3945.last_beacon_time, add_time,
- le16_to_cpu(ctx->timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (pkt->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
- pkt->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- iwl3945_disable_events(priv);
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
- IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = beacon->beacon_notify_hdr.rate;
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
- priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
- priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
- /* Set up hardware specific Rx handlers */
- iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl3945_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if ((rxq->write_actual != (rxq->write & ~0x7))
- || (abs(rxq->write - rxq->read) > 7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- break;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
- struct iwl_priv *priv = data;
- unsigned long flags;
-
- iwl3945_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl3945_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
- iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio/10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty = 0;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl3945_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode won't assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl3945_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl3945_rx_replenish_now(priv);
- else
- iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
- switch (i) {
- case 1:
- return "FAIL";
- case 2:
- return "BAD_PARAM";
- case 3:
- return "BAD_CHECKSUM";
- case 4:
- return "NMI_INTERRUPT";
- case 5:
- return "SYSASSERT";
- case 6:
- return "FATAL_ERROR";
- }
-
- return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 i;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
-
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
- return;
- }
-
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- IWL_ERR(priv, "Desc Time asrtPC blink2 "
- "ilink1 nmiPC Line\n");
- for (i = ERROR_START_OFFSET;
- i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
- i += ERROR_ELEM_SIZE) {
- desc = iwl_legacy_read_targ_mem(priv, base + i);
- time =
- iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
- blink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
- blink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
- ilink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
- ilink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
- data1 =
- iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
- IWL_ERR(priv,
- "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
- ilink1, ilink2, data1);
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
- 0, blink1, blink2, ilink1, ilink2);
- }
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR39_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR39_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- "Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl3945_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "Tx interrupt\n");
- priv->isr_stats.tx++;
-
- iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
- (FH39_SRVC_CHNL), 0x0);
- handled |= CSR_INT_BIT_FH_TX;
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~priv->inta_mask) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch,
- struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- scan_ch->channel = chan->hw_value;
-
- ch_info = iwl_legacy_get_channel_info(priv, band,
- scan_ch->channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
- continue;
- }
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* If passive , set up for auto-switch
- * and use long active_dwell time.
- */
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
- scan_ch->type = 0; /* passive */
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
- } else {
- scan_ch->type = 1; /* active */
- }
-
- /* Set direct probe bits. These may be used both for active
- * scan channels (probes gets sent right away),
- * or for passive channels (probes get se sent only after
- * hearing clear Rx packet).*/
- if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- if (n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- } else {
- /* uCode v1 does not allow setting direct probe bits on
- * passive channel. */
- if ((scan_ch->type & 1) && n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- }
-
- /* Set txpower levels to defaults */
- scan_ch->tpc.dsp_atten = 110;
- /* scan_pwr_info->tpc.dsp_atten; */
-
- /*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else {
- scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- }
-
- IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
- scan_ch->channel,
- (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
- (scan_ch->type & 1) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl3945_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- u32 save_len = len;
- int rc = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL39_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- rc = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int rc = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- i, val, *image);
-#endif
- rc = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int rc = 0;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_full(priv, image, len);
-
- return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{ \
- return le32_to_cpu(ucode->v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
- return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
- const struct iwl_ucode_header *ucode;
- int ret = -EINVAL, index;
- const struct firmware *ucode_raw;
- /* firmware file name contains uCode/driver compatibility version */
- const char *name_pre = priv->cfg->fw_name_pre;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- char buf[25];
- u8 *src;
- size_t len;
- u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
- /* Ask kernel firmware_class module to get the boot firmware off disk.
- * request_firmware() is synchronous, file is in memory on return. */
- for (index = api_max; index >= api_min; index--) {
- sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
- ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
- if (ret < 0) {
- IWL_ERR(priv, "%s firmware file req failed: %d\n",
- buf, ret);
- if (ret == -ENOENT)
- continue;
- else
- goto error;
- } else {
- if (index < api_max)
- IWL_ERR(priv, "Loaded firmware %s, "
- "which is deprecated. "
- " Please use API v%u instead.\n",
- buf, api_max);
- IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
- "(%zd bytes) from disk\n",
- buf, ucode_raw->size);
- break;
- }
- }
-
- if (ret < 0)
- goto error;
-
- /* Make sure that we got at least our header! */
- if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
- IWL_ERR(priv, "File size way too small!\n");
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = iwl3945_ucode_get_inst_size(ucode);
- data_size = iwl3945_ucode_get_data_size(ucode);
- init_size = iwl3945_ucode_get_init_size(ucode);
- init_data_size = iwl3945_ucode_get_init_data_size(ucode);
- boot_size = iwl3945_ucode_get_boot_size(ucode);
- src = iwl3945_ucode_get_data(ucode);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
-
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv, "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- priv->ucode_ver = 0;
- ret = -EINVAL;
- goto err_release;
- }
- if (api_ver != api_max)
- IWL_ERR(priv, "Firmware has old API version. Expected %u, "
- "got %u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
-
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %zd does not match expected size\n",
- ucode_raw->size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Verify that uCode images will fit in card's SRAM */
- if (inst_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- if (data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init instr len %d too large to fit in\n",
- init_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init data len %d too large to fit in\n",
- init_data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (boot_size > IWL39_MAX_BSM_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode boot instr len %d too large to fit in\n",
- boot_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode instr len %zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl3945_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode data len %zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %zd\n", len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
- }
-
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %zd\n", len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
- }
-
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) boot instr len %zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- return 0;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- ret = -ENOMEM;
- iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
- release_firmware(ucode_raw);
-
- error:
- return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
-
- /* bits 31:0 for 3945 */
- pinst = priv->ucode_code.p_addr;
- pdata = priv->ucode_data_backup.p_addr;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl3945_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
- int thermal_spin = 0;
- u32 rfkill;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
- IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
- if (rfkill & 0x1) {
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- /* if RFKILL is not on, then wait for thermal
- * sensor in adapter to kick in */
- while (iwl3945_hw_get_temperature(priv) == 0) {
- thermal_spin++;
- udelay(10);
- }
-
- if (thermal_spin)
- IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
- thermal_spin * 10);
- } else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- /* After the ALIVE response, we can send commands to 3945 uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK_3945;
-
- iwl_legacy_power_update_mode(priv, true);
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- struct iwl3945_rxon_cmd *active_rxon =
- (struct iwl3945_rxon_cmd *)(&ctx->active);
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- /* Initialize our rx_config data */
- iwl_legacy_connection_init_rx_config(priv, ctx);
- }
-
- /* Configure Bluetooth device coexistence support */
- iwl_legacy_send_bt_config(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl3945_commit_rxon(priv, ctx);
-
- iwl3945_reg_txpower_periodic(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- /* Station information will now be cleared in device */
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl3945_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl3945_init() then
- * clear all bits but the RF Kill bits and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl3945_hw_txq_ctx_stop(priv);
- iwl3945_hw_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl3945_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx,
- iwlegacy_bcast_addr, false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
- int rc, i;
-
- rc = iwl3945_alloc_bcast_station(priv);
- if (rc)
- return rc;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bring up\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else {
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- rc = iwl3945_hw_nic_init(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to int nic\n");
- return rc;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- /* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(STATUS_RF_KILL_HW, &priv->status))
- return 0;
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- rc = priv->cfg->ops->lib->load_ucode(priv);
-
- if (rc) {
- IWL_ERR(priv,
- "Unable to set up bootstrap uCode: %d\n", rc);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl3945_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl3945_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change. This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
- bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
- bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
- & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
- if (new_rfkill != old_rfkill) {
- if (new_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
- IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
- new_rfkill ? "disable radio" : "enable radio");
- }
-
- /* Keep this running, even if radio now enabled. This will be
- * cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl3945_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl3945_scan_cmd *scan;
- u8 n_probes = 0;
- enum ieee80211_band band;
- bool is_active = false;
- int ret;
- u16 len;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
- /*
- * suspend time format:
- * 0-19: beacon interval in usec (time before exec.)
- * 20-23: 0
- * 24-31: number of beacons (suspend between channels)
- */
-
- extra = (suspend_time / interval) << 24;
- scan_suspend_time = 0xFF0FFFFF &
- (extra | ((suspend_time % interval) * 1024));
-
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
- /* We don't build a direct scan probe request; the uCode will do
- * that based on the direct_mask added to each channel entry */
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- /* flags + rate selection */
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
- break;
- case IEEE80211_BAND_5GHZ:
- scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scaning is requested but a certain channel
- * is marked passive, we can do active scanning if we
- * detect transmissions.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
-
- len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
- vif->addr, priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(len);
-
- /* select Rx antennas */
- scan->flags |= iwl3945_get_antenna_flags(priv);
-
- scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[len], vif);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl3945_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
- return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
- mutex_unlock(&priv->mutex);
- iwl3945_down(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl3945_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_rx_replenish(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
- int rc = 0;
- struct ieee80211_conf *conf = NULL;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (!ctx->vif || !priv->is_open)
- return;
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
- if (ctx->vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (ctx->vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl3945_commit_rxon(priv, ctx);
-
- switch (ctx->vif->type) {
- case NL80211_IFTYPE_STATION:
- iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl3945_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, ctx->vif->type);
- break;
- }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
-
- /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
- * ucode filename and max sizes are card-specific. */
-
- if (!priv->ucode_code.len) {
- ret = iwl3945_read_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Could not read microcode: %d\n", ret);
- mutex_unlock(&priv->mutex);
- goto out_release_irq;
- }
- }
-
- ret = __iwl3945_up(priv);
-
- mutex_unlock(&priv->mutex);
-
- if (ret)
- goto out_release_irq;
-
- IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
- /* Wait for START_ALIVE from ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv,
- "Wait for START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- ret = -ETIMEDOUT;
- goto out_release_irq;
- }
- }
-
- /* ucode is running and will send rfkill notifications,
- * no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->_3945.rfkill_poll);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-
-out_release_irq:
- priv->is_open = 0;
- IWL_DEBUG_MAC80211(priv, "leave - failed\n");
- return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open) {
- IWL_DEBUG_MAC80211(priv, "leave - skip\n");
- return;
- }
-
- priv->is_open = 0;
-
- iwl3945_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl3945_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int rc = 0;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
- }
- iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = 0;
- u8 sta_id = IWL_INVALID_STATION;
- u8 static_key;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwl3945_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * To support IBSS RSN, don't program group keys in IBSS, the
- * hardware will then not attempt to decrypt the frames.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
- if (!static_key) {
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
- }
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- switch (cmd) {
- case SET_KEY:
- if (static_key)
- ret = iwl3945_set_static_key(priv, key);
- else
- ret = iwl3945_set_dynamic_key(priv, key, sta_id);
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (static_key)
- ret = iwl3945_remove_static_key(priv);
- else
- ret = iwl3945_clear_sta_key_info(priv, sta_id);
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
- ret = iwl_legacy_add_station_common(priv,
- &priv->contexts[IWL_RXON_CTX_BSS],
- sta->addr, is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl3945_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but even if hw is ready, committing here breaks for some reason,
- * we'll eventually commit the filter flags change anyway.
- */
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
- u32 val;
-
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
- else
- iwl3945_hw_reg_set_txpower(priv, val);
-
- return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 flags = simple_strtoul(buf, NULL, 0);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
- flags);
- ctx->staging.flags = cpu_to_le32(flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- ctx->staging.filter_flags =
- cpu_to_le32(filter_flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
- iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_spectrum_notification measure_report;
- u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *)&measure_report;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!(priv->measurement_status & MEASUREMENT_READY)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- memcpy(&measure_report, &priv->measure_report, size);
- priv->measurement_status = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_measurement_params params = {
- .channel = le16_to_cpu(ctx->active.channel),
- .start_time = cpu_to_le64(priv->_3945.last_tsf),
- .duration = cpu_to_le16(1),
- };
- u8 type = IWL_MEASURE_BASIC;
- u8 buffer[32];
- u8 channel;
-
- if (count) {
- char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
- channel = simple_strtoul(p, NULL, 0);
- if (channel)
- params.channel = channel;
-
- p = buffer;
- while (*p && *p != ' ')
- p++;
- if (*p)
- type = simple_strtoul(p + 1, NULL, 0);
- }
-
- IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
- "channel %d (for '%s')\n", type, params.channel, buf);
- iwl3945_get_measurement(priv, &params, type);
-
- return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- priv->retry_rate = simple_strtoul(buf, NULL, 0);
- if (priv->retry_rate <= 0)
- priv->retry_rate = 1;
-
- return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
- iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- /* all this shit doesn't belong into sysfs anyway */
- return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
- int ant;
-
- if (count == 0)
- return 0;
-
- if (sscanf(buf, "%1i", &ant) != 1) {
- IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
- return count;
- }
-
- if ((ant >= 0) && (ant <= 2)) {
- IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
- iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
- } else
- IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
- return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
- return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
-
- if (p[0] == '1')
- iwl3945_dump_nic_error_log(priv);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl3945_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- iwl3945_hw_setup_deferred_work(priv);
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
- iwl3945_hw_cancel_deferred_work(priv);
-
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
- &dev_attr_antenna.attr,
- &dev_attr_channels.attr,
- &dev_attr_dump_errors.attr,
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_measurement.attr,
- &dev_attr_retry_rate.attr,
- &dev_attr_status.attr,
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
- .tx = iwl3945_mac_tx,
- .start = iwl3945_mac_start,
- .stop = iwl3945_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl3945_configure_filter,
- .set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
- int ret;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- priv->retry_rate = 1;
- priv->beacon_skb = NULL;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
- IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
- eeprom->version);
- ret = -EINVAL;
- goto err;
- }
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- /* Set up txpower settings in driver for all channels */
- if (iwl3945_txpower_set_from_eeprom(priv)) {
- ret = -EIO;
- goto err_free_channel_map;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST 200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
-
- hw->rate_control_algorithm = "iwl-3945-rs";
- hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- hw->wiphy->interface_modes =
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
-
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- struct iwl3945_eeprom *eeprom;
- unsigned long flags;
-
- /***********************
- * 1. Allocating HW data
- * ********************/
-
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- hw = iwl_legacy_alloc_all(cfg);
- if (hw == NULL) {
- pr_err("Can not allocate network device\n");
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
- /* 3945 has only one valid context */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- /*
- * Disabling hardware scan means that mac80211 will perform scans
- * "the hard way", rather than using device's scan.
- */
- if (iwl3945_mod_params.disable_hw_scan) {
- IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
- iwl3945_hw_ops.hw_scan = NULL;
- }
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /***************************
- * 2. Initializing PCI bus
- * *************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
-
- pci_set_drvdata(pdev, priv);
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- /***********************
- * 3. Read REV Register
- * ********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, 0x41, 0x00);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /***********************
- * 4. Read EEPROM
- * ********************/
-
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- /* MAC Address location in EEPROM same for 3945/4965 */
- eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
- SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
- /***********************
- * 5. Setup HW Constants
- * ********************/
- /* Device-specific setup */
- if (iwl3945_hw_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw settings\n");
- goto out_eeprom_free;
- }
-
- /***********************
- * 6. Setup priv
- * ********************/
-
- err = iwl3945_init_drv(priv);
- if (err) {
- IWL_ERR(priv, "initializing driver failed\n");
- goto out_unset_hw_params;
- }
-
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
- priv->cfg->name);
-
- /***********************
- * 7. Setup Services
- * ********************/
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_release_irq;
- }
-
- iwl_legacy_set_rxon_channel(priv,
- &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl3945_setup_deferred_work(priv);
- iwl3945_setup_rx_handlers(priv);
- iwl_legacy_power_initialize(priv);
-
- /*********************************
- * 8. Setup and Register mac80211
- * *******************************/
-
- iwl_legacy_enable_interrupts(priv);
-
- err = iwl3945_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
- /* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- 2 * HZ);
-
- return 0;
-
- out_remove_sysfs:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
- iwl3945_unset_hw_params(priv);
- out_eeprom_free:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl3945_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl_down(), but there are paths to
- * run iwl_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_synchronize_irq(priv);
-
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
- cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
- iwl3945_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl3945_rx_queue_free(priv, &priv->rxq);
- iwl3945_hw_txq_ctx_free(priv);
-
- iwl3945_unset_hw_params(priv);
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(pdev->irq, priv);
- pci_disable_msi(pdev);
-
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl_legacy_free_channel_map(priv);
- iwl_legacy_free_geos(priv);
- kfree(priv->scan_cmd);
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
- .name = DRV_NAME,
- .id_table = iwl3945_hw_card_ids,
- .probe = iwl3945_pci_probe,
- .remove = __devexit_p(iwl3945_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl3945_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl3945_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl3945_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
- pci_unregister_driver(&iwl3945_driver);
- iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae15..00000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
-
- if (priv->cfg->ops->hcmd->set_rxon_chain) {
- for_each_context(priv, ctx) {
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- if (ctx->active.rx_chain != ctx->staging.rx_chain)
- iwl_legacy_commit_rxon(priv, ctx);
- }
- }
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
- struct iwl_tx_beacon_cmd *tx_beacon_cmd,
- u8 *beacon, u32 frame_size)
-{
- u16 tim_idx;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
- /*
- * The index is relative to frame start but we start looking at the
- * variable-length part of the beacon.
- */
- tim_idx = mgmt->u.beacon.variable - beacon;
-
- /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
- while ((tim_idx < (frame_size - 2)) &&
- (beacon[tim_idx] != WLAN_EID_TIM))
- tim_idx += beacon[tim_idx+1] + 2;
-
- /* If TIM field was found, set variables */
- if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
- tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
- tx_beacon_cmd->tim_size = beacon[tim_idx+1];
- } else
- IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl_frame *frame)
-{
- struct iwl_tx_beacon_cmd *tx_beacon_cmd;
- u32 frame_size;
- u32 rate_flags;
- u32 rate;
- /*
- * We have to set up the TX command, the TX Beacon command, and the
- * beacon contents.
- */
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
- return 0;
- }
-
- /* Initialize memory */
- tx_beacon_cmd = &frame->u.beacon;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- /* Set up TX beacon contents */
- frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
- if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
- return 0;
- if (!frame_size)
- return 0;
-
- /* Set up TX command fields */
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
- tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
- /* Set up TX beacon command fields */
- iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
- frame_size);
-
- /* Set up packet rate and flags */
- rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
- rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
- if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
- tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
- rate_flags);
-
- return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- unsigned int frame_size;
- int rc;
-
- frame = iwl4965_get_free_frame(priv);
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
- if (!frame_size) {
- IWL_ERR(priv, "Error configuring the beacon command\n");
- iwl4965_free_frame(priv, frame);
- return -EINVAL;
- }
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl4965_free_frame(priv, frame);
-
- return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
- iwl4965_tfd_tb_get_len(tfd, i),
- PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad)
-{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
- u32 num_tbs;
-
- q = &txq->q;
- tfd_tmp = (struct iwl_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- /* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init,
- &pkt->u.alive_frame,
- sizeof(struct iwl_init_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received. We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* dont send host command if rf-kill is on */
- if (!iwl_legacy_is_ready_rf(priv))
- return;
-
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- }
-
- if (flags & CT_CARD_DISABLED)
- iwl4965_perform_ct_kill_task(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
-
- /* status change handler */
- priv->rx_handlers[CARD_STATE_NOTIFICATION] =
- iwl4965_rx_card_state_notif;
-
- priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
- iwl4965_rx_missed_beacon_notif;
- /* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
- /* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl4965_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl4965_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl4965_rx_replenish_now(priv);
- else
- iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR49_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR49_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /*
- * uCode wakes up after power-down sleep.
- * Tell device about any new tx or host commands enqueued,
- * and about any Rx buffers made available while asleep.
- */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl4965_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv,
- "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_legacy_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
- iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
- void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
- const char *name_pre = priv->cfg->fw_name_pre;
- char tag[8];
-
- if (first) {
- priv->fw_index = priv->cfg->ucode_api_max;
- sprintf(tag, "%d", priv->fw_index);
- } else {
- priv->fw_index--;
- sprintf(tag, "%d", priv->fw_index);
- }
-
- if (priv->fw_index < priv->cfg->ucode_api_min) {
- IWL_ERR(priv, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
- priv->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- &priv->pci_dev->dev, GFP_KERNEL, priv,
- iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
- const void *inst, *data, *init, *init_data, *boot;
- size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwl4965_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size;
- const u8 *src;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- switch (api_ver) {
- default:
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->v1.init_size);
- pieces->init_data_size =
- le32_to_cpu(ucode->v1.init_data_size);
- pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
- src = ucode->v1.data;
- break;
- }
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size + pieces->boot_size) {
-
- IWL_ERR(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
- pieces->boot = src;
- src += pieces->boot_size;
-
- return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_priv *priv = context;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwl4965_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- u32 api_ver;
-
- u32 max_probe_length = 200;
- u32 standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
- IWL_ERR(priv,
- "request for firmware file '%s' failed.\n",
- priv->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
- priv->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(priv, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
- pieces.boot_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- if (pieces.boot_size > priv->hw_params.max_bsm_size) {
- IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
- pieces.boot_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = pieces.inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- priv->ucode_init.len = pieces.init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = pieces.init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (pieces.boot_size) {
- priv->ucode_boot.len = pieces.boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
- pieces.inst_size);
- memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /*
- * Runtime data
- * NOTE: Copy into backup buffer will be done in iwl_up()
- */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
- pieces.data_size);
- memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
- memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
- /* Initialization instructions */
- if (pieces.init_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %Zd\n",
- pieces.init_size);
- memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
- }
-
- /* Initialization data */
- if (pieces.init_data_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %Zd\n",
- pieces.init_data_size);
- memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
- pieces.init_data_size);
- }
-
- /* Bootstrap instructions */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
- pieces.boot_size);
- memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- priv->_4965.phy_calib_chain_noise_reset_cmd =
- standard_phy_calibration_size;
- priv->_4965.phy_calib_chain_noise_gain_cmd =
- standard_phy_calibration_size + 1;
-
- /**************************************************
- * This is still part of probe() in a sense...
- *
- * 9. Setup and register with mac80211 and debugfs
- **************************************************/
- err = iwl4965_mac_setup_register(priv, max_probe_length);
- if (err)
- goto out_unbind;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv,
- "failed to create debugfs files. Ignoring error: %d\n", err);
-
- err = sysfs_create_group(&priv->pci_dev->dev.kobj,
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&priv->_4965.firmware_loading_complete);
- return;
-
- try_again:
- /* try next, if any */
- if (iwl4965_request_firmware(priv, false))
- goto out_unbind;
- release_firmware(ucode_raw);
- return;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
- complete(&priv->_4965.firmware_loading_complete);
- device_release_driver(&priv->pci_dev->dev);
- release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 data2, line;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
- u32 pc, hcmd;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
- }
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
- }
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
- priv->isr_stats.err_code = desc;
- pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
- blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
- blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
- ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
- ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
- data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
- data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
- line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
- time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
- hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
- time, data1, data2, line,
- blink1, blink2, ilink1, ilink2);
-
- IWL_ERR(priv, "Desc Time "
- "data1 data2 line\n");
- IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
- iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
- pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
- struct iwl_ct_kill_config cmd;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
- else
- IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_legacy_read_prph(priv,
- IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- ret = iwl4965_alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition [ntf]: %d\n", ret);
- goto restart;
- }
-
-
- /* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK;
-
- if (iwl_legacy_is_associated_ctx(ctx)) {
- struct iwl_legacy_rxon_cmd *active_rxon =
- (struct iwl_legacy_rxon_cmd *)&ctx->active;
- /* apply any changes in staging */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- struct iwl_rxon_context *tmp;
- /* Initialize our rx_config data */
- for_each_context(priv, tmp)
- iwl_legacy_connection_init_rx_config(priv, tmp);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* Configure bluetooth coexistence if enabled */
- iwl_legacy_send_bt_config(priv);
-
- iwl4965_reset_run_time_calib(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* At this point, the NIC is initialized and operational */
- iwl4965_rf_kill_ct_config(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- iwl_legacy_power_update_mode(priv, true);
- IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl4965_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl_init() then
- * clear all bits but the RF Kill bit and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl4965_txq_ctx_stop(priv);
- iwl4965_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl4965_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
- int ret = 0;
-
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
- if (ret != -ETIMEDOUT)
- priv->hw_ready = true;
- else
- priv->hw_ready = false;
-
- IWL_DEBUG_INFO(priv, "hardware %s\n",
- (priv->hw_ready == 1) ? "ready" : "not ready");
- return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret = 0;
-
- IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
- ret = iwl4965_set_hw_ready(priv);
- if (priv->hw_ready)
- return ret;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- /* HW should be ready by now, check again. */
- if (ret != -ETIMEDOUT)
- iwl4965_set_hw_ready(priv);
-
- return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int i;
- int ret;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bringup\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwl4965_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_legacy_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- iwl4965_prepare_card_hw(priv);
-
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv,
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_legacy_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
- iwl_legacy_enable_interrupts(priv);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return 0;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- /* must be initialised before iwl_hw_nic_init */
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- ret = iwl4965_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- ret = priv->cfg->ops->lib->load_ucode(priv);
-
- if (ret) {
- IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
- ret);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl4965_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl4965_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- priv->cfg->ops->lib->init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl4965_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- run_time_calib_work);
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (priv->start_calib) {
- iwl4965_chain_noise_calibration(priv,
- (void *)&priv->_4965.statistics);
- iwl4965_sensitivity_calibration(priv,
- (void *)&priv->_4965.statistics);
- }
-
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
-
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
-
- __iwl4965_down(priv);
-
- mutex_unlock(&priv->mutex);
- iwl4965_cancel_deferred_work(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl4965_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl4965_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-4965-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- if (priv->cfg->sku & IWL_SKU_N)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
- /*
- * For now, disable PS by default because it affects
- * RX performance significantly.
- */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
- ret = __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
-
- if (ret)
- return ret;
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- return -ETIMEDOUT;
- }
- }
-
- iwl4965_led_enable(priv);
-
-out:
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl4965_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl4965_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
- iv32, phase1key);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (priv->cfg->mod_params->sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- (key->hw_key_idx == HW_KEY_DEFAULT);
- }
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key)
- ret = iwl4965_set_default_wep_key(priv,
- vif_priv->ctx, key);
- else
- ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl4965_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl4965_remove_dynamic_key(priv, ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & IWL_SKU_N))
- return -EACCES;
-
- mutex_lock(&priv->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- ret = 0;
- break;
- }
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
-
- ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl4965_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- goto out;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->ops->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->lock);
-
- iwl_legacy_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- mutex_lock(&priv->mutex);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- goto out;
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- priv->cfg->ops->lib->send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl4965_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
- INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
- init_timer(&priv->statistics_periodic);
- priv->statistics_periodic.data = (unsigned long)priv;
- priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->run_time_calib_work);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-
- del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
- int ret;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- /* Choose which receivers/antennas to use */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- iwl_legacy_init_scan_params(priv);
-
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
- iwl4965_calib_free_results(priv);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
- priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
- priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
- priv->rev_id = priv->pci_dev->revision;
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- if (priv->cfg->mod_params->amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
- else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
- if (priv->cfg->mod_params->disable_11n)
- priv->cfg->sku &= ~IWL_SKU_N;
-
- /* Device-specific setup */
- return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- unsigned long flags;
- u16 pci_cmd;
-
- /************************
- * 1. Allocating HW data
- ************************/
-
- hw = iwl_legacy_alloc_all(cfg);
- if (!hw) {
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- /* At this point both hw and priv are allocated. */
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /**************************
- * 2. Initializing PCI bus
- **************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- pci_set_drvdata(pdev, priv);
-
-
- /***********************
- * 3. Read REV register
- ***********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- iwl4965_hw_detect(priv);
- IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->hw_rev);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl4965_prepare_card_hw(priv);
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_iounmap;
- }
-
- /*****************
- * 4. Read EEPROM
- *****************/
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- err = iwl4965_eeprom_check_version(priv);
- if (err)
- goto out_free_eeprom;
-
- if (err)
- goto out_free_eeprom;
-
- /* extract MAC Address */
- iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
- priv->hw->wiphy->addresses = priv->addresses;
- priv->hw->wiphy->n_addresses = 1;
-
- /************************
- * 5. Setup HW constants
- ************************/
- if (iwl4965_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw parameters\n");
- goto out_free_eeprom;
- }
-
- /*******************
- * 6. Setup priv
- *******************/
-
- err = iwl4965_init_drv(priv);
- if (err)
- goto out_free_eeprom;
- /* At this point both hw and priv are initialized. */
-
- /********************
- * 7. Setup services
- ********************/
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- iwl4965_setup_deferred_work(priv);
- iwl4965_setup_rx_handlers(priv);
-
- /*********************************************
- * 8. Enable interrupts and read RFKILL state
- *********************************************/
-
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
- }
-
- iwl_legacy_enable_rfkill_int(priv);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
-
- iwl_legacy_power_initialize(priv);
-
- init_completion(&priv->_4965.firmware_loading_complete);
-
- err = iwl4965_request_firmware(priv, true);
- if (err)
- goto out_destroy_workqueue;
-
- return 0;
-
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl4965_uninit_drv(priv);
- out_free_eeprom:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- wait_for_completion(&priv->_4965.firmware_loading_complete);
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
- sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
- * to be called and iwl4965_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl4965_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl4965_down(), but there are paths to
- * run iwl4965_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl4965_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_synchronize_irq(priv);
-
- iwl4965_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl4965_rx_queue_free(priv, &priv->rxq);
- iwl4965_hw_txq_ctx_free(priv);
-
- iwl_legacy_eeprom_free(priv);
-
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(priv->pci_dev->irq, priv);
- pci_disable_msi(priv->pci_dev);
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl4965_uninit_drv(priv);
-
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
- {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
- .name = DRV_NAME,
- .id_table = iwl4965_hw_card_ids,
- .probe = iwl4965_pci_probe,
- .remove = __devexit_p(iwl4965_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl4965_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl4965_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl4965_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
- pci_unregister_driver(&iwl4965_driver);
- iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab..ffec4b4a248 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef __il_prph_h__
+#define __il_prph_h__
/*
* Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
*
* 1) Initialization -- performs hardware calibration and sets up some
* internal data, then notifies host via "initialize alive" notification
- * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * (struct il_init_alive_resp) that it has completed all of its work.
* After signal from host, it then loads and starts the runtime program.
* The initialization program must be used when initially setting up the
* NIC after loading the driver.
*
* 2) Runtime/Protocol -- performs all normal runtime operations. This
- * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * notifies host via "alive" notification (struct il_alive_resp) that it
* is ready to be used.
*
* When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
* procedure.
*
* This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL). Platform suspend/resume and
* RFKILL should use complete restarts (with total re-initialization) of uCode,
* allowing total shutdown (including BSM memory).
*
@@ -202,19 +202,19 @@
*/
/* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
/* BSM addresses */
#define BSM_BASE (PRPH_BASE + 0x3400)
#define BSM_END (PRPH_BASE + 0x3800)
-#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
/*
* Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
* Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
*/
#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE (1024) /* bytes */
-
+#define BSM_SRAM_SIZE (1024) /* bytes */
/* 3945 Tx scheduler registers */
#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
* but one DMA channel may take input from several queues.
*
* Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
* The driver sets up each queue to work in one of two modes:
*
* 1) Scheduler-Ack, in which the scheduler automatically supports a
- * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
* contains TFDs for a unique combination of Recipient Address (RA)
* and Traffic Identifier (TID), that is, traffic of a given
* Quality-Of-Service (QOS) priority, destined for a single station.
*
* In scheduler-ack mode, the scheduler keeps track of the Tx status of
- * each frame within the BA window, including whether it's been transmitted,
+ * each frame within the BA win, including whether it's been transmitted,
* and whether it's been acknowledged by the receiving station. The device
* automatically processes block-acks received from the receiving STA,
* and reschedules un-acked frames to be retransmitted (successful
* Tx completion may end up being out-of-order).
*
* The driver must maintain the queue's Byte Count table in host DRAM
- * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
* This mode does not support fragmentation.
*
* 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
*/
/**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
* Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
*/
#define SCD_WIN_SIZE 64
#define SCD_FRAME_LIMIT 64
/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET 0xa02c00
+#define IL49_SCD_START_OFFSET 0xa02c00
/*
* 4965 tells driver SRAM address for internal scheduler structs via this reg.
* Value is valid only after "Alive" response from uCode.
*/
-#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
/*
* Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
* scheduler is not tracking what's happening).
* Bit fields:
* 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
* 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
* NOTE: This register is not used by Linux driver.
*/
-#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
/*
* Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
* This register points to BC CB for queue 0, must be on 1024-byte boundary.
* Others are spaced by 1024 bytes.
* Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
* Bit fields:
* 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
*/
-#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
/*
* Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
* Bit fields:
* 7- 0: Enable (1), disable (0), one bit for each channel 0-7
*/
-#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
/*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
* Initialized and updated by driver as new TFDs are added to queue.
- * NOTE: If using Block Ack, index must correspond to frame's
- * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: If using Block Ack, idx must correspond to frame's
+ * Start Sequence Number; idx = (SSN & 0xff)
* NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
*/
-#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
/*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
* Initialized by driver, updated by scheduler.
*/
-#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
/*
* Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
* NOTE: If driver sets up queue for chain mode, it should be also set up
* Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
*/
-#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
/*
* Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
* Bit fields:
* 31-16: Reserved
* 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
* NOTE: This functionality is apparently a no-op; driver relies on interrupts
* from Rx queue to read Tx command responses and update Tx queues.
*/
-#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
/*
* Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
* Driver should init to "1" for aggregation mode, or "0" otherwise.
* 7-6: Driver should init to "0"
* 5: Window Size Left; indicates whether scheduler can request
- * another TFD, based on window size, etc. Driver should init
+ * another TFD, based on win size, etc. Driver should init
* this bit to "1" for aggregation mode, or "0" for non-agg.
* 4-1: Tx FIFO to use (range 0-7).
* 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
* NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
* via SCD_QUEUECHAIN_SEL.
*/
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
- (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
/* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
/* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
/**
* 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
* each queue's entry as follows:
*
* LS Dword bit fields:
- * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
*
* MS Dword bit fields:
* 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
* Init must be done after driver receives "Alive" response from 4965 uCode,
* and when setting up queue for aggregation.
*/
-#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
- (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
/*
* Tx Status Bitmap
@@ -486,7 +485,7 @@
* "Alive" notification from uCode. Area is used only by device itself;
* no other support (besides clearing) is required from driver.
*/
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
/*
* RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
* When queue is in Scheduler-ACK mode, frames placed in a that queue must be
* for only one combination of receiver address (RA) and traffic ID (TID), i.e.
* one QOS priority level destined for one station (for this wireless link,
- * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
* mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
* mode, the device ignores the mapping value.
*
@@ -508,16 +507,16 @@
* must read a dword-aligned value from device SRAM, replace the 16-bit map
* value of interest, and write the dword value back into device SRAM.
*/
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
/* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
-#define IWL_SCD_TXFIFO_POS_TID (0)
-#define IWL_SCD_TXFIFO_POS_RA (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+#define IL_SCD_TXFIFO_POS_TID (0)
+#define IL_SCD_TXFIFO_POS_RA (4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
/*********************** END TX SCHEDULER *************************************/
-#endif /* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5209d..ae08498dfca 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLWIFI_DEVICE_SVTOOL
- bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+ def_bool y
depends on IWLWIFI
- select NL80211_TESTMODE
+ depends on NL80211_TESTMODE
help
- This option enables the svtool support for iwlwifi device through
- NL80211_TESTMODE. svtool is a software validation tool that runs in
- the user space and interacts with the device in the kernel space
- through the generic netlink message via NL80211_TESTMODE channel.
+ This option enables the testmode support for iwlwifi device through
+ NL80211_TESTMODE. This provide the capabilities of enable user space
+ validation applications to interacts with the device through the
+ generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+ bool "iwlwifi experimental P2P support"
+ depends on IWLWIFI
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
+
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
+
+ Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index c73e5ed8db5..9dc84a7354d 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
-iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
-iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
+iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
@@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff..1ef7bfc2ab2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -124,10 +124,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -135,28 +135,19 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl1000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -191,6 +182,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 79431977a96..094693328db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -86,7 +86,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
{
iwl_rf_config(priv);
- if (priv->cfg->iq_invert)
+ if (cfg(priv)->iq_invert)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -120,10 +120,10 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -131,29 +131,19 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl2000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -258,25 +248,19 @@ static struct iwl_bt_params iwl2030_bt_params = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2000_2bg_cfg = {
- .name = "2000 Series 2x2 BG",
- IWL_DEVICE_2000,
-};
-
struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "2000D Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
@@ -291,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -299,16 +282,11 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.iq_invert = true \
struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
IWL_DEVICE_2030,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2030_2bg_cfg = {
- .name = "2000 Series 2x2 BG/BT",
- IWL_DEVICE_2030,
-};
-
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
@@ -318,7 +296,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -326,19 +303,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl105_bg_cfg = {
- .name = "105 Series 1x1 BG",
- IWL_DEVICE_105,
-};
-
struct iwl_cfg iwl105_bgn_cfg = {
- .name = "105 Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "105D Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
@@ -353,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -361,13 +332,8 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl135_bg_cfg = {
- .name = "135 Series 1x1 BG/BT",
- IWL_DEVICE_135,
-};
-
struct iwl_cfg iwl135_bgn_cfg = {
- .name = "135 Series 1x1 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a..b3a365fea7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
EEPROM_KELVIN_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
- iwl_temp_calib_to_offset(priv);
+ iwl_temp_calib_to_offset(priv->shrd);
hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
@@ -166,10 +166,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -178,22 +178,15 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -202,10 +195,10 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -214,22 +207,15 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5150_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
- s32 offset = iwl_temp_calib_to_offset(priv);
+ s32 offset = iwl_temp_calib_to_offset(priv->shrd);
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -364,6 +350,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
@@ -433,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.lib = &iwl5150_lib, \
.base_params = &iwl5000_base_params, \
- .need_dc_calib = true, \
+ .no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c840c78278d..54b753399e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -101,14 +102,14 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
/* no locking required for register write */
- if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
+ if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
- if (priv->cfg->additional_nic_config)
- priv->cfg->additional_nic_config(priv);
+ if (cfg(priv)->additional_nic_config)
+ cfg(priv)->additional_nic_config(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -140,10 +141,10 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -152,29 +153,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl6000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
.base_params = &iwl6000_g2_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
.lib = &iwl6030_lib, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -434,21 +423,11 @@ struct iwl_cfg iwl6030_2bg_cfg = {
};
struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "6035 Series 2x2 AGN/BT",
+ .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6035_2abg_cfg = {
- .name = "6035 Series 2x2 ABG/BT",
- IWL_DEVICE_6030,
-};
-
-struct iwl_cfg iwl6035_2bg_cfg = {
- .name = "6035 Series 2x2 BG/BT",
- IWL_DEVICE_6030,
-};
-
struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
@@ -479,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
@@ -516,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -540,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -559,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
+ .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.lib = &iwl6000_lib,
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
.led_mode = IWL_LED_BLINK,
};
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48558b..50ff849c9f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
{
- int ret = 0;
- int i = 0;
-
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
.flags = CMD_SYNC,
};
-
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
- priv->calib_results[i].buf) {
- hcmd.len[0] = priv->calib_results[i].buf_len;
- hcmd.data[0] = priv->calib_results[i].buf;
- hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans(priv), &hcmd);
- if (ret) {
- IWL_ERR(priv, "Error %d iteration %d\n",
- ret, i);
- break;
- }
+ struct iwl_calib_result *res;
+
+ list_for_each_entry(res, &trans->calib_results, list) {
+ int ret;
+
+ hcmd.len[0] = res->cmd_len;
+ hcmd.data[0] = &res->hdr;
+ hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret) {
+ IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ ret, res->hdr.op_code);
+ return ret;
}
}
- return ret;
+ return 0;
}
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len)
{
- if (res->buf_len != len) {
- kfree(res->buf);
- res->buf = kzalloc(len, GFP_ATOMIC);
- }
- if (unlikely(res->buf == NULL))
+ struct iwl_calib_result *res, *tmp;
+
+ res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+ GFP_ATOMIC);
+ if (!res)
return -ENOMEM;
+ memcpy(&res->hdr, cmd, len);
+ res->cmd_len = len;
+
+ list_for_each_entry(tmp, &trans->calib_results, list) {
+ if (tmp->hdr.op_code == res->hdr.op_code) {
+ list_replace(&tmp->list, &res->list);
+ kfree(tmp);
+ return 0;
+ }
+ }
+
+ /* wasn't in list already */
+ list_add_tail(&res->list, &trans->calib_results);
- res->buf_len = len;
- memcpy(res->buf, buf, len);
return 0;
}
-void iwl_calib_free_results(struct iwl_priv *priv)
+void iwl_calib_free_results(struct iwl_trans *trans)
{
- int i;
+ struct iwl_calib_result *res, *tmp;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
@@ -505,7 +513,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- if (priv->cfg->base_params->hd_v2) {
+ if (cfg(priv)->base_params->hd_v2) {
cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -839,7 +847,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->cfg->valid_tx_ant);
+ find_first_chain(cfg(priv)->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -882,7 +890,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
continue;
}
- delta_g = (priv->cfg->base_params->chain_noise_scale *
+ delta_g = (cfg(priv)->base_params->chain_noise_scale *
((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
@@ -1039,8 +1047,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
/* Analyze signal for disconnected antenna */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
data->active_chains = hw_params(priv).valid_rx_ant;
@@ -1074,7 +1082,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwlagn_gain_computation(priv, average_noise,
min_average_noise_antenna_i, min_average_noise,
- find_first_chain(priv->cfg->valid_rx_ant));
+ find_first_chain(cfg(priv)->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9205d..10275ce92bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
void iwl_init_sensitivity(struct iwl_priv *priv);
void iwl_reset_run_time_calib(struct iwl_priv *priv);
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 1a52ed29f2d..64cf439035c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -92,11 +93,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
{
struct iwl_eeprom_calib_hdr *hdr;
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
EEPROM_CALIB_ALL);
return hdr->version;
@@ -105,7 +106,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
/*
* EEPROM
*/
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
{
u16 offset = 0;
@@ -114,31 +115,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
break;
case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
break;
case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
break;
case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
break;
default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
address & INDIRECT_TYPE_MSK);
break;
}
@@ -147,11 +148,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
+ u32 address = eeprom_indirect_address(shrd, offset);
+ BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
+ return &shrd->eeprom[address];
}
struct iwl_mod_params iwlagn_mod_params = {
@@ -232,7 +233,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -374,15 +375,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
sizeof(basic.bt3_lookup_table));
- if (priv->cfg->bt_params) {
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params) {
+ if (cfg(priv)->bt_params->bt_session_2) {
bt_cmd_2000.prio_boost = cpu_to_le32(
- priv->cfg->bt_params->bt_prio_boost);
+ cfg(priv)->bt_params->bt_prio_boost);
bt_cmd_2000.tx_prio_boost = 0;
bt_cmd_2000.rx_prio_boost = 0;
} else {
bt_cmd_6000.prio_boost =
- priv->cfg->bt_params->bt_prio_boost;
+ cfg(priv)->bt_params->bt_prio_boost;
bt_cmd_6000.tx_prio_boost = 0;
bt_cmd_6000.rx_prio_boost = 0;
}
@@ -430,7 +431,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
@@ -799,8 +800,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
*/
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
{
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -827,6 +828,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
case IEEE80211_SMPS_STATIC:
case IEEE80211_SMPS_DYNAMIC:
return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
return active_cnt;
default:
@@ -870,8 +872,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
active_chains = hw_params(priv).valid_rx_ant;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -933,53 +935,359 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&priv->notif_wait_lock);
- list_add(&wait_entry->list, &priv->notif_waits);
- spin_unlock_bh(&priv->notif_wait_lock);
+ int i;
+
+ for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
}
-int iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
+struct wowlan_key_data {
+ struct iwl_rxon_context *ctx;
+ struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwlagn_wowlan_tkip_params_cmd *tkip;
+ const u8 *bssid;
+ bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
{
- int ret;
+ struct iwl_priv *priv = hw->priv;
+ struct wowlan_key_data *data = _data;
+ struct iwl_rxon_context *ctx = data->ctx;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwlagn_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWLAGN_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&priv->shrd->mutex);
- ret = wait_event_timeout(priv->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta && !ctx->key_mapping_keys)
+ ret = iwl_set_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_set_dynamic_key(priv, ctx, key, sta);
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ if (ret) {
+ IWL_ERR(priv, "Error setting key during suspend!\n");
+ data->error = true;
+ }
- if (wait_entry->aborted)
- return -EIO;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, data->bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+}
+
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_trans_send_cmd(trans(priv), &cmd);
+ kfree(pattern_cmd);
+ return err;
}
-void iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry)
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+ struct iwl_rxon_cmd rxon;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+ struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
+ struct wowlan_key_data key_data = {
+ .ctx = ctx,
+ .bssid = ctx->active.bssid_addr,
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+ wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 before using the value.
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ seq = priv->tid_data[IWL_AP_ID][i].seq_number;
+ seq -= 0x10;
+ wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ d3_cfg_cmd.wakeup_flags |=
+ cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+ iwl_scan_cancel_timeout(priv, 200);
+
+ memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+ iwl_trans_stop_device(trans(priv));
+
+ priv->shrd->wowlan = true;
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+ if (ret)
+ goto out;
+
+ /* now configure WoWLAN ucode */
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto out;
+
+ memcpy(&ctx->staging, &rxon, sizeof(rxon));
+ ret = iwlagn_commit_rxon(priv, ctx);
+ if (ret)
+ goto out;
+
+ ret = iwl_power_update_mode(priv, true);
+ if (ret)
+ goto out;
+
+ if (!iwlagn_mod_params.sw_crypto) {
+ /* mark all keys clear */
+ priv->ucode_key_table = 0;
+ ctx->key_mapping_keys = 0;
+
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&priv->shrd->mutex);
+ ieee80211_iter_keys(priv->hw, ctx->vif,
+ iwlagn_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&priv->shrd->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(key_data.rsc_tsc),
+ };
+
+ ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip) {
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_TKIP_PARAMS,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (priv->have_rekey_data) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC, sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+ CMD_SYNC, sizeof(wakeup_filter_cmd),
+ &wakeup_filter_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+ kfree(key_data.rsc_tsc);
+ return ret;
}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 66118cea2af..334b5ae8fdd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
} else
return IWL_MAX_TID_COUNT;
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
s32 index;
struct iwl_traffic_load *tl = NULL;
- if (tid >= TID_MAX_LOAD_COUNT)
+ if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if (tid < TID_MAX_LOAD_COUNT)
+ if (tid < IWL_MAX_TID_COUNT)
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
+ IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,12 +1081,12 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
@@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -2277,7 +2273,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
+ tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
@@ -2649,8 +2645,7 @@ lq_update:
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
u8 sta_id = lq_sta->lq.sta_id;
- tid_data =
- &priv->shrd->tid_data[sta_id][tid];
+ tid_data = &priv->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
@@ -2908,7 +2903,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -3059,11 +3054,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
* overwrite if needed, pass aggregation time limit
* to uCode in uSec
*/
- if (priv && priv->cfg->bt_params &&
- priv->cfg->bt_params->agg_time_limit &&
+ if (priv && cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->agg_time_limit &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+ cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb829a..6675b3c816d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@ enum {
#define TID_QUEUE_CELL_SPACING 50 /*mS */
#define TID_QUEUE_MAX_SIZE 20
#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
struct iwl_link_quality_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 5af9e6258a1..b22b2976f89 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ IWL_CMD(REPLY_D3_CONFIG);
default:
return "UNKNOWN";
@@ -317,7 +318,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
unsigned int msecs)
{
int delta;
- int threshold = priv->cfg->base_params->plcp_delta_threshold;
+ int threshold = cfg(priv)->base_params->plcp_delta_threshold;
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -582,8 +583,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
iwlagn_rx_calc_noise(priv);
queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
}
- if (priv->cfg->lib->temperature && change)
- priv->cfg->lib->temperature(priv);
+ if (cfg(priv)->lib->temperature && change)
+ cfg(priv)->lib->temperature(priv);
return 0;
}
@@ -800,7 +801,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
ctx->active.bssid_addr))
continue;
ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+ iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
+ "channel got active");
}
}
@@ -1032,6 +1034,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
return 0;
}
+static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_wipan_noa_data *new_data, *old_data;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+
+ /* no condition -- we're in softirq */
+ old_data = rcu_dereference_protected(priv->noa_data, true);
+
+ if (noa_notif->noa_active) {
+ u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+ u32 copylen = len;
+
+ /* EID, len, OUI, subtype */
+ len += 1 + 1 + 3 + 1;
+ /* P2P id, P2P length */
+ len += 1 + 2;
+ copylen += 1 + 2;
+
+ new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ if (new_data) {
+ new_data->length = len;
+ new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ new_data->data[1] = len - 2; /* not counting EID, len */
+ new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+ memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+ copylen);
+ }
+ } else
+ new_data = NULL;
+
+ rcu_assign_pointer(priv->noa_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+
+ return 0;
+}
+
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
@@ -1055,6 +1101,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
+ handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
+
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
@@ -1083,13 +1131,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->notif_wait_lock);
- INIT_LIST_HEAD(&priv->notif_waits);
- init_waitqueue_head(&priv->notif_waitq);
+ spin_lock_init(&priv->shrd->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->shrd->notif_waits);
+ init_waitqueue_head(&priv->shrd->notif_waitq);
/* Set up BT Rx handlers */
- if (priv->cfg->lib->bt_rx_handler_setup)
- priv->cfg->lib->bt_rx_handler_setup(priv);
+ if (cfg(priv)->lib->bt_rx_handler_setup)
+ cfg(priv)->lib->bt_rx_handler_setup(priv);
}
@@ -1104,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->notif_waits)) {
+ if (!list_empty(&priv->shrd->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&priv->notif_wait_lock);
- list_for_each_entry(w, &priv->notif_waits, list) {
+ spin_lock(&priv->shrd->notif_wait_lock);
+ list_for_each_entry(w, &priv->shrd->notif_waits, list) {
if (w->cmd != pkt->hdr.cmd)
continue;
IWL_DEBUG_RX(priv,
@@ -1117,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
pkt->hdr.cmd);
w->triggered = true;
if (w->fn)
- w->fn(priv, pkt, w->fn_data);
+ w->fn(trans(priv), pkt, w->fn_data);
}
- spin_unlock(&priv->notif_wait_lock);
+ spin_unlock(&priv->shrd->notif_wait_lock);
- wake_up_all(&priv->notif_waitq);
+ wake_up_all(&priv->shrd->notif_waitq);
}
if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c8..1c665941662 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags = old_filter;
if (ret)
- IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+ IWL_DEBUG_QUIET_RFKILL(priv,
+ "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
return ret;
}
@@ -59,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
u8 old_dev_type = send->dev_type;
int ret;
- iwlagn_init_notification_wait(priv, &disable_wait,
+ iwl_init_notification_wait(priv->shrd, &disable_wait,
REPLY_WIPAN_DEACTIVATION_COMPLETE,
NULL, NULL);
@@ -73,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwlagn_remove_notification(priv, &disable_wait);
+ iwl_remove_notification(priv->shrd, &disable_wait);
} else {
- ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
if (ctx->ht.enabled)
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
@@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
- IWL_ERR(priv, "Failed to update QoS\n");
+ IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
static int iwlagn_update_beacon(struct iwl_priv *priv,
@@ -295,9 +296,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
}
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
- priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
+ cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
- priv->cfg->ht_params->smps_mode);
+ cfg(priv)->ht_params->smps_mode);
return 0;
}
@@ -444,8 +445,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
- if (!(priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation))
+ if (!(cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation))
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -528,6 +529,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
@@ -541,6 +560,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ goto out;
+
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
@@ -586,19 +608,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (!ctx->ht.is_40mhz ||
+ !iwl_is_associated_ctx(ctx))
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
@@ -840,7 +854,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
iwl_trans_wake_any_queue(trans(priv),
- ctx->ctxid);
+ ctx->ctxid,
+ "Disassoc: flush queue");
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed628362393..7353826095f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -130,25 +130,15 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
return iwl_process_add_sta_resp(priv, addsta, pkt);
}
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags)
{
int ret = 0;
- u8 data[sizeof(*sta)];
struct iwl_host_cmd cmd = {
.id = REPLY_ADD_STA,
.flags = flags,
- .data = { data, },
+ .data = { sta, },
+ .len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
@@ -160,7 +150,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
might_sleep();
}
- cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret || (flags & CMD_ASYNC))
@@ -463,6 +452,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
unsigned long flags;
+ u8 tid;
if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_INFO(priv,
@@ -501,6 +491,10 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
priv->stations[sta_id].lq = NULL;
}
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->num_stations--;
@@ -647,7 +641,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
int ret;
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
- bool active;
+ bool active, have_lq = false;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@@ -657,7 +651,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
sta_cmd.mode = 0;
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ if (priv->stations[sta_id].lq) {
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ have_lq = true;
+ }
active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -679,7 +676,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (ret)
IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
priv->stations[sta_id].sta.sta.addr, ret);
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+ if (have_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@@ -825,28 +823,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
- "station %pM\n", sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@@ -1268,9 +1244,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
@@ -1283,8 +1256,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
@@ -1464,20 +1435,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
-{
- unsigned long flags;
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask = 0;
- priv->stations[sta_id].sta.sleep_tx_count = 0;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
-}
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
@@ -1494,36 +1452,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
-
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int sta_id;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- WARN_ON(!sta_priv->client);
- sta_priv->asleep = true;
- if (atomic_read(&sta_priv->pending_frames) > 0)
- ieee80211_sta_block_awake(hw, sta, true);
- break;
- case STA_NOTIFY_AWAKE:
- WARN_ON(!sta_priv->client);
- if (!sta_priv->asleep)
- break;
- sta_priv->asleep = false;
- sta_id = iwl_sta_id(sta);
- if (sta_id != IWL_INVALID_STATION)
- iwl_sta_modify_ps_wake(priv, sta_id);
- break;
- default:
- break;
- }
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index c27180a7335..b0dff7a753a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -633,7 +633,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
- if (priv->cfg->base_params->adv_thermal_throttle) {
+ if (cfg(priv)->base_params->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
tt->restriction = kcalloc(IWL_TI_STATE_MAX,
sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 35a6b71f358..c664c272655 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -74,8 +74,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
if (priv->tm_fixed_rate) {
/*
* rate overwrite by testmode
@@ -161,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
#endif
return;
- }
+ } else if (ieee80211_is_back_req(fc))
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -187,8 +191,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -258,8 +262,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
__le16 fc;
u8 hdr_len;
- u16 len;
- u8 sta_id;
+ u16 len, seq_number = 0;
+ u8 sta_id, tid = IWL_MAX_TID_COUNT;
unsigned long flags;
bool is_agg = false;
@@ -283,6 +287,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
+ if (unlikely(ieee80211_is_probe_resp(fc))) {
+ struct iwl_wipan_noa_data *noa_data =
+ rcu_dereference(priv->noa_data);
+
+ if (noa_data &&
+ pskb_expand_head(skb, 0, noa_data->length,
+ GFP_ATOMIC) == 0) {
+ memcpy(skb_put(skb, noa_data->length),
+ noa_data->data, noa_data->length);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ }
+ }
+
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
@@ -351,9 +368,51 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
- if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ struct iwl_tid_data *tid_data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ tid_data->agg.state != IWL_AGG_ON) {
+ IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
+ " Tx flags = 0x%08x, agg.state = %d",
+ info->flags, tid_data->agg.state);
+ IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+ sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
+ goto drop_unlock_sta;
+ }
+
+ /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
+ * only. Check this here.
+ */
+ if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
+ tid_data->agg.state != IWL_AGG_OFF,
+ "Tx while agg.state = %d", tid_data->agg.state))
+ goto drop_unlock_sta;
+
+ seq_number = tid_data->seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
goto drop_unlock_sta;
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
+ !ieee80211_has_morefrags(fc))
+ priv->tid_data[sta_id][tid].seq_number = seq_number;
+
spin_unlock(&priv->shrd->sta_lock);
spin_unlock_irqrestore(&priv->shrd->lock, flags);
@@ -378,10 +437,81 @@ drop_unlock_priv:
return -1;
}
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ priv->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_recl = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ priv->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+turn_off:
+ priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_lock(&priv->shrd->lock);
+
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
int sta_id;
int ret;
@@ -396,7 +526,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
+ if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
@@ -405,27 +535,136 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (ret)
return ret;
- ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
- tid, ssn);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ *ssn = tid_data->agg.ssn;
+
+ ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
+ if (ret) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return ret;
+ }
+
+ if (*ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_reclaimed = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return ret;
}
-int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
- int sta_id;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ unsigned long flags;
+ u16 ssn;
- sta_id = iwl_sta_id(sta);
+ buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
+ buf_size, ssn);
+
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+
+ sta_priv->lq_sta.lq.general_params.flags |=
+ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
}
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
+
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
- return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
- sta_id, tid);
+ return iwl_send_lq_cmd(priv, ctx,
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+}
+
+static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
+{
+ struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
+ enum iwl_rxon_context_id ctx;
+ struct ieee80211_vif *vif;
+ u8 *addr;
+
+ lockdep_assert_held(&priv->shrd->sta_lock);
+
+ addr = priv->stations[sta_id].sta.sta.addr;
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue DELBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue ADDBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ default:
+ break;
+ }
}
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -565,7 +804,7 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
IWLAGN_TX_RES_TID_POS;
int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
u32 status = le16_to_cpu(tx_resp->status.status);
int i;
@@ -581,8 +820,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
* notification again.
*/
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
}
@@ -755,7 +994,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
struct ieee80211_hdr *hdr;
u32 status = le16_to_cpu(tx_resp->status.status);
- u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+ u16 ssn = iwlagn_get_scd_ssn(tx_resp);
int tid;
int sta_id;
int freed;
@@ -777,10 +1016,34 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_rx_reply_tx_agg(priv, tx_resp);
if (tx_resp->frame_count == 1) {
+ u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
+ next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
+
+ if (is_agg) {
+ /* If this is an aggregation queue, we can rely on the
+ * ssn since the wifi sequence number corresponds to
+ * the index in the TFD ring (%256).
+ * The seq_ctl is the sequence control of the packet
+ * to which this Tx response relates. But if there is a
+ * hole in the bitmap of the BA we received, this Tx
+ * response may allow to reclaim the hole and all the
+ * subsequent packets that were already acked.
+ * In that case, seq_ctl != ssn, and the next packet
+ * to be reclaimed will be ssn and not seq_ctl.
+ */
+ next_reclaimed = ssn;
+ }
+
__skb_queue_head_init(&skbs);
+ priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;
+
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+ next_reclaimed);
+
/*we can free until ssn % q.n_bd not inclusive */
- iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs);
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs));
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
@@ -800,7 +1063,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
ctx->last_tx_rejected = true;
- iwl_trans_stop_queue(trans(priv), txq_id);
+ iwl_trans_stop_queue(trans(priv), txq_id,
+ "Tx on passive channel");
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "
@@ -874,27 +1138,24 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
- agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ agg = &priv->tid_data[sta_id][tid].agg;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
+ ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -906,11 +1167,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
"scd_flow = %d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
+ ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
+ scd_flow, ba_resp_scd_ssn);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
@@ -929,13 +1188,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
ba_resp->txed, ba_resp->txed_2_done);
- __skb_queue_head_init(&reclaimed_skbs);
+ priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
- 0, &reclaimed_skbs);
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&reclaimed_skbs)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a..b5c7c5f0a75 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
@@ -44,6 +43,7 @@
#include <asm/div64.h>
#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -367,7 +367,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
- base = priv->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
capacity = iwl_read_targ_mem(bus(priv), base);
num_wraps = iwl_read_targ_mem(bus(priv),
@@ -452,52 +452,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(bus(priv)->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
-{
- iwl_free_fw_desc(priv, &img->code);
- iwl_free_fw_desc(priv, &img->data);
-}
-
-static void iwl_dealloc_ucode(struct iwl_priv *priv)
-{
- iwl_free_fw_img(priv, &priv->ucode_rt);
- iwl_free_fw_img(priv, &priv->ucode_init);
- iwl_free_fw_img(priv, &priv->ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
- const void *data, size_t len)
-{
- if (!len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
- return -ENOMEM;
-
- desc->len = len;
- memcpy(desc->v_addr, data, len);
- return 0;
-}
-
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -555,23 +509,14 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-
-struct iwlagn_ucode_capabilities {
- u32 max_probe_length;
- u32 standard_phy_calibration_size;
- u32 flags;
-};
-
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa);
#define UCODE_EXPERIMENTAL_INDEX 100
#define UCODE_EXPERIMENTAL_TAG "exp"
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
- const char *name_pre = priv->cfg->fw_name_pre;
+ const char *name_pre = cfg(priv)->fw_name_pre;
char tag[8];
if (first) {
@@ -580,14 +525,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
#endif
- priv->fw_index = priv->cfg->ucode_api_max;
+ priv->fw_index = cfg(priv)->ucode_api_max;
sprintf(tag, "%d", priv->fw_index);
} else {
priv->fw_index--;
sprintf(tag, "%d", priv->fw_index);
}
- if (priv->fw_index < priv->cfg->ucode_api_min) {
+ if (priv->fw_index < cfg(priv)->ucode_api_min) {
IWL_ERR(priv, "no suitable firmware found!\n");
return -ENOENT;
}
@@ -892,9 +837,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
int err;
struct iwlagn_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- unsigned int api_ok = priv->cfg->ucode_api_ok;
- const unsigned int api_min = priv->cfg->ucode_api_min;
+ const unsigned int api_max = cfg(priv)->ucode_api_max;
+ unsigned int api_ok = cfg(priv)->ucode_api_ok;
+ const unsigned int api_min = cfg(priv)->ucode_api_min;
u32 api_ver;
char buildstr[25];
u32 build;
@@ -1040,30 +985,32 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
pieces.inst, pieces.inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
pieces.data, pieces.data_size))
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
pieces.init, pieces.init_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
pieces.init_data, pieces.init_data_size))
goto err_pci_alloc;
}
/* WoWLAN instructions and data */
if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.code,
pieces.wowlan_inst,
pieces.wowlan_inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.data,
pieces.wowlan_data,
pieces.wowlan_data_size))
goto err_pci_alloc;
@@ -1081,20 +1028,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
else
priv->init_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->init_errlog_ptr = pieces.init_errlog_ptr;
priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
if (pieces.inst_evtlog_size)
priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
else
priv->inst_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1111,7 +1061,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->sta_key_max_num = STA_KEY_MAX_NUM;
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
-
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
@@ -1156,7 +1105,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
out_unbind:
complete(&priv->firmware_loading_complete);
device_release_driver(bus(priv)->dev);
@@ -1176,7 +1125,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->shrd->lock, flags);
priv->thermal_throttle.ct_kill_toggle = false;
- if (priv->cfg->base_params->support_ct_kill_exit) {
+ if (cfg(priv)->base_params->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
cpu_to_le32(hw_params(priv).ct_kill_threshold);
adv_cmd.critical_temperature_exit =
@@ -1271,10 +1220,10 @@ int iwl_alive_start(struct iwl_priv *priv)
return -ERFKILL;
/* download priority table before any calibration request */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
- if (priv->cfg->bt_params->bt_sco_disable)
+ if (cfg(priv)->bt_params->bt_sco_disable)
priv->bt_enable_pspoll = false;
else
priv->bt_enable_pspoll = true;
@@ -1286,14 +1235,14 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwlagn_send_prio_tbl(priv);
+ iwl_send_prio_tbl(trans(priv));
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -1304,16 +1253,17 @@ int iwl_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
}
- if (hw_params(priv).calib_rt_cfg)
- iwlagn_send_calib_cfg_rt(priv,
- hw_params(priv).calib_rt_cfg);
+ /*
+ * Perform runtime calibrations, including DC calibration.
+ */
+ iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
ieee80211_wake_queues(priv->hw);
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -1352,7 +1302,7 @@ int iwl_alive_start(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv);
-static void __iwl_down(struct iwl_priv *priv)
+void __iwl_down(struct iwl_priv *priv)
{
int exit_pending;
@@ -1382,9 +1332,9 @@ static void __iwl_down(struct iwl_priv *priv)
priv->bt_status = 0;
priv->cur_rssi_ctx = NULL;
priv->bt_is_sco = 0;
- if (priv->cfg->bt_params)
+ if (cfg(priv)->bt_params)
priv->bt_traffic_load =
- priv->cfg->bt_params->bt_init_traffic_load;
+ cfg(priv)->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
priv->bt_full_concurrent = false;
@@ -1415,7 +1365,7 @@ static void __iwl_down(struct iwl_priv *priv)
priv->beacon_skb = NULL;
}
-static void iwl_down(struct iwl_priv *priv)
+void iwl_down(struct iwl_priv *priv)
{
mutex_lock(&priv->shrd->mutex);
__iwl_down(priv);
@@ -1424,57 +1374,6 @@ static void iwl_down(struct iwl_priv *priv)
iwl_cancel_deferred_work(priv);
}
-#define MAX_HW_RESTARTS 5
-
-static int __iwl_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwlagn_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- ret = iwlagn_run_init_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
- if (ret) {
- IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
- return 0;
-
- error:
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
- __iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
- IWL_ERR(priv, "Unable to initialize device.\n");
- return ret;
-}
-
-
/*****************************************************************************
*
* Workqueue callbacks
@@ -1502,7 +1401,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static void iwlagn_prepare_restart(struct iwl_priv *priv)
+void iwlagn_prepare_restart(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
bool bt_full_concurrent;
@@ -1559,1173 +1458,8 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_dualmode[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_sta_ap_limits,
- .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_p2p_sta_go_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_p2p_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
- },
-};
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-agn-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- /*
- * Including the following line will crash some AP's. This
- * workaround removes the stimulus which causes the crash until
- * the AP software can be fixed.
- hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- */
-
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
- if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_p2p);
- } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
- }
-
- hw->wiphy->max_remain_on_channel_duration = 1000;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
- if (!iwlagn_mod_params.sw_crypto)
- hw->wiphy->wowlan.flags |=
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE;
-
- hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
- hw->wiphy->wowlan.pattern_min_len =
- IWLAGN_WOWLAN_MIN_PATTERN_LEN;
- hw->wiphy->wowlan.pattern_max_len =
- IWLAGN_WOWLAN_MAX_PATTERN_LEN;
- }
-
- if (iwlagn_mod_params.power_save)
- hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
- else
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-static int iwlagn_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->shrd->mutex);
- ret = __iwl_up(priv);
- mutex_unlock(&priv->shrd->mutex);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
- ret = -EIO;
-
- iwlagn_led_enable(priv);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl_down(priv);
-
- flush_workqueue(priv->shrd->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
- iwl_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int iwlagn_send_patterns(struct iwl_priv *priv,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_WOWLAN_PATTERNS,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
- };
- int i, err;
-
- if (!wowlan->n_patterns)
- return 0;
-
- cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
-
- pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
- if (!pattern_cmd)
- return -ENOMEM;
-
- pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
-
- for (i = 0; i < wowlan->n_patterns; i++) {
- int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
-
- memcpy(&pattern_cmd->patterns[i].mask,
- wowlan->patterns[i].mask, mask_len);
- memcpy(&pattern_cmd->patterns[i].pattern,
- wowlan->patterns[i].pattern,
- wowlan->patterns[i].pattern_len);
- pattern_cmd->patterns[i].mask_size = mask_len;
- pattern_cmd->patterns[i].pattern_size =
- wowlan->patterns[i].pattern_len;
- }
-
- cmd.data[0] = pattern_cmd;
- err = iwl_trans_send_cmd(trans(priv), &cmd);
- kfree(pattern_cmd);
- return err;
-}
-#endif
-
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (iwlagn_mod_params.sw_crypto)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
- goto out;
-
- memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
- memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
- priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
- priv->have_rekey_data = true;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-struct wowlan_key_data {
- struct iwl_rxon_context *ctx;
- struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
- struct iwlagn_wowlan_tkip_params_cmd *tkip;
- const u8 *bssid;
- bool error, use_rsc_tsc, use_tkip;
-};
-
-#ifdef CONFIG_PM_SLEEP
-static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
-{
- int i;
-
- for (i = 0; i < IWLAGN_P1K_SIZE; i++)
- out[i] = cpu_to_le16(p1k[i]);
-}
-
-static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_priv *priv = hw->priv;
- struct wowlan_key_data *data = _data;
- struct iwl_rxon_context *ctx = data->ctx;
- struct aes_sc *aes_sc, *aes_tx_sc = NULL;
- struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
- struct iwlagn_p1k_cache *rx_p1ks;
- u8 *rx_mic_key;
- struct ieee80211_key_seq seq;
- u32 cur_rx_iv32 = 0;
- u16 p1k[IWLAGN_P1K_SIZE];
- int ret, i;
-
- mutex_lock(&priv->shrd->mutex);
-
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta && !ctx->key_mapping_keys)
- ret = iwl_set_default_wep_key(priv, ctx, key);
- else
- ret = iwl_set_dynamic_key(priv, ctx, key, sta);
-
- if (ret) {
- IWL_ERR(priv, "Error setting key during suspend!\n");
- data->error = true;
- }
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- if (sta) {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
- tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
-
- rx_p1ks = data->tkip->rx_uni;
-
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
-
- ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
- iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
-
- memcpy(data->tkip->mic_keys.tx,
- &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- rx_mic_key = data->tkip->mic_keys.rx_unicast;
- } else {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
- rx_p1ks = data->tkip->rx_multi;
- rx_mic_key = data->tkip->mic_keys.rx_mcast;
- }
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 (as they need to to avoid replay attacks)
- * for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- ieee80211_get_key_rx_seq(key, i, &seq);
- tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
- /* wrapping isn't allowed, AP must rekey */
- if (seq.tkip.iv32 > cur_rx_iv32)
- cur_rx_iv32 = seq.tkip.iv32;
- }
-
- ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
- ieee80211_get_tkip_rx_p1k(key, data->bssid,
- cur_rx_iv32 + 1, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
-
- memcpy(rx_mic_key,
- &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- data->use_tkip = true;
- data->use_rsc_tsc = true;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- if (sta) {
- u8 *pn = seq.ccmp.pn;
-
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
- aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
-
- ieee80211_get_key_tx_seq(key, &seq);
- aes_tx_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- } else
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- u8 *pn = seq.ccmp.pn;
-
- ieee80211_get_key_rx_seq(key, i, &seq);
- aes_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- }
- data->use_rsc_tsc = true;
- break;
- }
-
- mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
- struct iwl_rxon_cmd rxon;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
- struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
- struct wowlan_key_data key_data = {
- .ctx = ctx,
- .bssid = ctx->active.bssid_addr,
- .use_rsc_tsc = false,
- .tkip = &tkip_cmd,
- .use_tkip = false,
- };
- int ret, i;
- u16 seq;
-
- if (WARN_ON(!wowlan))
- return -EINVAL;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- /* Don't attempt WoWLAN when not associated, tear down instead. */
- if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
- !iwl_is_associated_ctx(ctx)) {
- ret = 1;
- goto out;
- }
-
- key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
- if (!key_data.rsc_tsc) {
- ret = -ENOMEM;
- goto out;
- }
-
- memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
-
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
- wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
-
- /*
- * For QoS counters, we store the one to use next, so subtract 0x10
- * since the uCode will add 0x10 before using the value.
- */
- for (i = 0; i < 8; i++) {
- seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
- seq -= 0x10;
- wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
- }
-
- if (wowlan->disconnect)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
- IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
- if (wowlan->magic_pkt)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
- if (wowlan->gtk_rekey_failure)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
- if (wowlan->eap_identity_req)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
- if (wowlan->four_way_handshake)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
- if (wowlan->rfkill_release)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
- if (wowlan->n_patterns)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
-
- iwl_scan_cancel_timeout(priv, 200);
-
- memcpy(&rxon, &ctx->active, sizeof(rxon));
-
- iwl_trans_stop_device(trans(priv));
-
- priv->shrd->wowlan = true;
-
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
- IWL_UCODE_WOWLAN);
- if (ret)
- goto error;
-
- /* now configure WoWLAN ucode */
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
-
- memcpy(&ctx->staging, &rxon, sizeof(rxon));
- ret = iwlagn_commit_rxon(priv, ctx);
- if (ret)
- goto error;
-
- ret = iwl_power_update_mode(priv, true);
- if (ret)
- goto error;
-
- if (!iwlagn_mod_params.sw_crypto) {
- /* mark all keys clear */
- priv->ucode_key_table = 0;
- ctx->key_mapping_keys = 0;
-
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&priv->shrd->mutex);
- ieee80211_iter_keys(priv->hw, ctx->vif,
- iwlagn_wowlan_program_keys,
- &key_data);
- mutex_lock(&priv->shrd->mutex);
- if (key_data.error) {
- ret = -EIO;
- goto error;
- }
-
- if (key_data.use_rsc_tsc) {
- struct iwl_host_cmd rsc_tsc_cmd = {
- .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
- .flags = CMD_SYNC,
- .data[0] = key_data.rsc_tsc,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .len[0] = sizeof(*key_data.rsc_tsc),
- };
-
- ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
- if (ret)
- goto error;
- }
-
- if (key_data.use_tkip) {
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_TKIP_PARAMS,
- CMD_SYNC, sizeof(tkip_cmd),
- &tkip_cmd);
- if (ret)
- goto error;
- }
-
- if (priv->have_rekey_data) {
- memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
- memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
- kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
- memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
- kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
- kek_kck_cmd.replay_ctr = priv->replay_ctr;
-
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC, sizeof(kek_kck_cmd),
- &kek_kck_cmd);
- if (ret)
- goto error;
- }
- }
-
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
- CMD_SYNC, sizeof(wakeup_filter_cmd),
- &wakeup_filter_cmd);
- if (ret)
- goto error;
-
- ret = iwlagn_send_patterns(priv, wowlan);
- if (ret)
- goto error;
-
- device_set_wakeup_enable(bus(priv)->dev, true);
-
- /* Now let the ucode operate on its own */
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- goto out;
-
- error:
- priv->shrd->wowlan = false;
- iwlagn_prepare_restart(priv);
- ieee80211_restart_hw(priv->hw);
- out:
- mutex_unlock(&priv->shrd->mutex);
- kfree(key_data.rsc_tsc);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_resume(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&bus(priv)->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(bus(priv));
- if (ret == 0) {
- iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(bus(priv));
- }
- spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- if (!priv->wowlan_sram)
- priv->wowlan_sram =
- kzalloc(priv->ucode_wowlan.data.len,
- GFP_KERNEL);
-
- if (priv->wowlan_sram)
- _iwl_read_targ_mem_words(
- bus(priv), 0x800000, priv->wowlan_sram,
- priv->ucode_wowlan.data.len / 4);
- }
-#endif
- }
-
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
-
- priv->shrd->wowlan = false;
-
- device_set_wakeup_enable(bus(priv)->dev, false);
-
- iwlagn_prepare_restart(priv);
-
- memset((void *)&ctx->active, 0, sizeof(ctx->active));
- iwl_connection_init_rx_config(priv, ctx);
- iwlagn_set_rxon_chain(priv, ctx);
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- ieee80211_resume_disconnect(vif);
-
- return 1;
-}
-#endif
-
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwlagn_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
-
- iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
-}
-
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwlagn_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * We could program these keys into the hardware as well, but we
- * don't expect much multicast traffic in IBSS and having keys
- * for more stations is probably more useful.
- *
- * Mark key TX-only and return 0.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- key->hw_key_idx = WEP_INVALID_OFFSET;
- return 0;
- }
-
- /* If they key was TX-only, accept deletion */
- if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
- return 0;
-
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, 100);
-
- BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
- }
-
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key) {
- ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
- break;
- }
- ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
- if (ret) {
- /*
- * can't add key for RX, but we don't need it
- * in the device for TX so still return 0
- */
- ret = 0;
- key->hw_key_idx = WEP_INVALID_OFFSET;
- }
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
- struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
- return -EACCES;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
- if ((ret == 0) && (priv->agg_tids_count > 0)) {
- priv->agg_tids_count--;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
- }
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- if (!priv->agg_tids_count && priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch off RTS/CTS if it was previously enabled
- */
- sta_priv->lq_sta.lq.general_params.flags &=
- ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
- }
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
-
- iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
- tid, buf_size);
-
- /*
- * If the limit is 0, then it wasn't initialised yet,
- * use the default. We can do that since we take the
- * minimum below, and we don't want to go above our
- * default due to hardware restrictions.
- */
- if (sta_priv->max_agg_bufsize == 0)
- sta_priv->max_agg_bufsize =
- LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-
- /*
- * Even though in theory the peer could have different
- * aggregation reorder buffer sizes for different sessions,
- * our ucode doesn't allow for that and has a global limit
- * for each station. Therefore, use the minimum of all the
- * aggregation sessions and our default value.
- */
- sta_priv->max_agg_bufsize =
- min(sta_priv->max_agg_bufsize, buf_size);
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- */
-
- sta_priv->lq_sta.lq.general_params.flags |=
- LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- }
- priv->agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
-
- sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
- sta_priv->max_agg_bufsize;
-
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
-
- IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
- sta->addr, tid);
- ret = 0;
- break;
- }
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return ret;
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret = 0;
- u8 sta_id;
-
- IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
-
- ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- goto out;
- }
-
- sta_priv->sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl_rs_rate_init(priv, sta, sta_id);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /*
- * MULTI-FIXME
- * When we add support for multiple interfaces, we need to
- * revisit this. The channel switch command in the device
- * only affects the BSS context, but what does that really
- * mean? And what if we get a CSA on the second interface?
- * This needs a lot of work.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_rfkill(priv->shrd))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
- goto out;
-
- if (!iwl_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->shrd->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->shrd->lock);
-
- iwl_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->shrd->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->shrd->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
-{
- struct iwl_priv *priv = hw->priv;
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
- goto done;
- }
- if (iwl_is_rfkill(priv->shrd)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
- goto done;
- }
-
- /*
- * mac80211 will not push any more frames for transmit
- * until the flush is completed
- */
- if (drop) {
- IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
- IWL_ERR(priv, "flush request fail\n");
- goto done;
- }
- }
- IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(trans(priv));
-done:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
void iwlagn_disable_roc(struct iwl_priv *priv)
{
@@ -2759,160 +1493,6 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type,
- int duration)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- int err = 0;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- err = -EBUSY;
- goto out;
- }
-
- priv->hw_roc_channel = channel;
- priv->hw_roc_chantype = channel_type;
- priv->hw_roc_duration = duration;
- priv->hw_roc_start_notified = false;
- cancel_delayed_work(&priv->hw_roc_disable_work);
-
- if (!ctx->is_active) {
- ctx->is_active = true;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- memcpy(ctx->staging.node_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- memcpy(ctx->staging.bssid_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- err = iwlagn_commit_rxon(priv, ctx);
- if (err)
- goto out;
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
-
- err = iwlagn_commit_rxon(priv, ctx);
- if (err) {
- iwlagn_disable_roc(priv);
- goto out;
- }
- priv->hw_roc_setup = true;
- }
-
- err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
- if (err)
- iwlagn_disable_roc(priv);
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx)) {
- ret = 0;
- goto out;
- }
-
- if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
- if (ret)
- goto out;
-
- if (WARN_ON(sta_id != ctx->ap_sta_id)) {
- ret = -EIO;
- goto out_remove_sta;
- }
-
- memcpy(ctx->bssid, bssid, ETH_ALEN);
- ctx->preauth_bssid = true;
-
- ret = iwlagn_commit_rxon(priv, ctx);
-
- if (ret == 0)
- goto out;
-
- out_remove_sta:
- iwl_remove_station(priv, sta_id, bssid);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx))
- goto out;
-
- iwl_remove_station(priv, ctx->ap_sta_id, bssid);
- ctx->preauth_bssid = false;
- /* no need to commit */
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
/*****************************************************************************
*
* driver setup and teardown
@@ -2936,8 +1516,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
iwl_setup_scan_deferred_work(priv);
- if (priv->cfg->lib->bt_setup_deferred_work)
- priv->cfg->lib->bt_setup_deferred_work(priv);
+ if (cfg(priv)->lib->bt_setup_deferred_work)
+ cfg(priv)->lib->bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -2954,8 +1534,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (priv->cfg->lib->cancel_deferred_work)
- priv->cfg->lib->cancel_deferred_work(priv);
+ if (cfg(priv)->lib->cancel_deferred_work)
+ cfg(priv)->lib->cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -2999,6 +1579,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->shrd->mutex);
+ INIT_LIST_HEAD(&trans(priv)->calib_results);
+
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3022,8 +1604,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
/* init bt coex */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -3055,88 +1637,19 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_calib_free_results(priv);
iwl_free_geos(priv);
iwl_free_channel_map(priv);
if (priv->tx_cmd_pool)
kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
+ kfree(rcu_dereference_raw(priv->noa_data));
#ifdef CONFIG_IWLWIFI_DEBUGFS
kfree(priv->wowlan_sram);
#endif
}
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event)
-{
- struct iwl_priv *priv = hw->priv;
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- if (rssi_event == RSSI_EVENT_LOW)
- priv->bt_enable_pspoll = true;
- else if (rssi_event == RSSI_EVENT_HIGH)
- priv->bt_enable_pspoll = false;
-
- iwlagn_send_advance_bt_config(priv);
- } else {
- IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
- "ignoring RSSI callback\n");
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set)
-{
- struct iwl_priv *priv = hw->priv;
-
- queue_work(priv->shrd->workqueue, &priv->beacon_update);
-
- return 0;
-}
-
-struct ieee80211_ops iwlagn_hw_ops = {
- .tx = iwlagn_mac_tx,
- .start = iwlagn_mac_start,
- .stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwlagn_mac_suspend,
- .resume = iwlagn_mac_resume,
-#endif
- .add_interface = iwlagn_mac_add_interface,
- .remove_interface = iwlagn_mac_remove_interface,
- .change_interface = iwlagn_mac_change_interface,
- .config = iwlagn_mac_config,
- .configure_filter = iwlagn_configure_filter,
- .set_key = iwlagn_mac_set_key,
- .update_tkip_key = iwlagn_mac_update_tkip_key,
- .set_rekey_data = iwlagn_mac_set_rekey_data,
- .conf_tx = iwlagn_mac_conf_tx,
- .bss_info_changed = iwlagn_bss_info_changed,
- .ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwlagn_mac_hw_scan,
- .sta_notify = iwlagn_mac_sta_notify,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwlagn_mac_sta_remove,
- .channel_switch = iwlagn_mac_channel_switch,
- .flush = iwlagn_mac_flush,
- .tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .remain_on_channel = iwlagn_mac_remain_on_channel,
- .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
- .rssi_callback = iwlagn_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
- .tx_sync = iwlagn_mac_tx_sync,
- .finish_tx_sync = iwlagn_mac_finish_tx_sync,
- .set_tim = iwlagn_mac_set_tim,
-};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
@@ -3156,40 +1669,55 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_4K);
- if (iwlagn_mod_params.disable_11n)
- priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
hw_params(priv).num_ampdu_queues =
- priv->cfg->base_params->num_of_ampdu_queues;
+ cfg(priv)->base_params->num_of_ampdu_queues;
hw_params(priv).shadow_reg_enable =
- priv->cfg->base_params->shadow_reg_enable;
- hw_params(priv).sku = priv->cfg->sku;
- hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+ cfg(priv)->base_params->shadow_reg_enable;
+ hw_params(priv).sku = cfg(priv)->sku;
+ hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
/* Device-specific setup */
- return priv->cfg->lib->set_hw_params(priv);
+ return cfg(priv)->lib->set_hw_params(priv);
}
-/* This function both allocates and initializes hw and priv. */
-static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
- priv = hw->priv;
- priv->hw = hw;
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
-out:
- return hw;
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
}
int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
@@ -3204,8 +1732,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
/************************
* 1. Allocating HW data
************************/
- hw = iwl_alloc_all(cfg);
+ hw = iwl_alloc_all();
if (!hw) {
+ pr_err("%s: Cannot allocate network device\n", cfg->name);
err = -ENOMEM;
goto out;
}
@@ -3226,8 +1755,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
SET_IEEE80211_DEV(hw, bus(priv)->dev);
+ /* what debugging capabilities we have */
+ iwl_debug_config(priv);
+
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
+ cfg(priv) = cfg;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
@@ -3261,7 +1793,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
***********************/
hw_rev = iwl_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, hw_rev);
+ cfg(priv)->name, hw_rev);
err = iwl_trans_request_irq(trans(priv));
if (err)
@@ -3291,11 +1823,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -3360,7 +1892,7 @@ out_destroy_workqueue:
priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
out_free_trans:
iwl_trans_free(trans(priv));
out_free_traffic_mem:
@@ -3385,21 +1917,16 @@ void __devexit iwl_remove(struct iwl_priv * priv)
set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
iwl_testmode_cleanup(priv);
- iwl_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- }
+ iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
/*This will stop the queues, move the device to low power state */
iwl_trans_stop_device(trans(priv));
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
flush_workqueue(priv->shrd->workqueue);
@@ -3469,8 +1996,9 @@ module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
+MODULE_PARM_DESC(11n_disable,
+ "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
@@ -3499,9 +2027,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a54..f84fb3c5356 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,12 @@
#include "iwl-dev.h"
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+ u32 standard_phy_calibration_size;
+ u32 flags;
+};
+
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_trans *trans);
@@ -77,6 +83,16 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
+void __iwl_down(struct iwl_priv *priv);
+void iwl_down(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
+void iwlagn_mac_unregister(struct iwl_priv *priv);
+
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -86,25 +102,27 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
-int iwlagn_run_init_ucode(struct iwl_priv *priv);
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+#endif
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@@ -115,6 +133,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
@@ -196,9 +216,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@@ -316,10 +333,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_update_bcast_stations(struct iwl_priv *priv);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
/* rate */
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -339,28 +352,11 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
/* eeprom */
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry);
-extern int iwlagn_init_alive_start(struct iwl_priv *priv);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
+
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index 08b97594e30..940d5038b39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -122,7 +122,8 @@ struct iwl_bus;
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
* @apm_config: will be called during the config of the APM
- * @get_hw_id: prints the hw_id in the provided buffer
+ * @get_hw_id_string: prints the hw_id in the provided buffer
+ * @get_hw_id: get hw_id in u32
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
* @wread32: read a dword at register at offset ofs
@@ -130,7 +131,8 @@ struct iwl_bus;
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
- void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
+ void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
+ u32 (*get_hw_id)(struct iwl_bus *bus);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
@@ -172,9 +174,15 @@ static inline void bus_apm_config(struct iwl_bus *bus)
bus->ops->apm_config(bus);
}
-static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
+static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
+ int buf_len)
{
- bus->ops->get_hw_id(bus, buf, buf_len);
+ bus->ops->get_hw_id_string(bus, buf, buf_len);
+}
+
+static inline u32 bus_get_hw_id(struct iwl_bus *bus)
+{
+ return bus->ops->get_hw_id(bus);
}
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index 2a2dc4597ba..e1d78257e4a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_d_cfg;
extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
extern struct iwl_cfg iwl105_bgn_cfg;
extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
extern struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 69d5f85d11e..265de39d394 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@ enum {
/* RX, TX, LEDs */
REPLY_TX = 0x1c,
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e,
/* WiMAX coexistence */
- COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
+ COEX_PRIORITY_TABLE_CMD = 0x5a,
COEX_MEDIUM_NOTIFICATION = 0x5b,
COEX_EVENT_CMD = 0x5c,
@@ -198,6 +198,7 @@ enum {
REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
REPLY_WOWLAN_GET_STATUS = 0xe5,
+ REPLY_D3_CONFIG = 0xd3,
REPLY_MAX = 0xff
};
@@ -465,23 +466,27 @@ struct iwl_error_event_table {
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
-#if 0
- /* no need to read the remainder, we don't use the values */
- u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the compilation */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-#endif
} __packed;
struct iwl_alive_resp {
@@ -809,7 +814,7 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 9
+#define IWL_MAX_TID_COUNT 8
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -930,8 +935,7 @@ struct iwl_addsta_cmd {
* corresponding to bit (e.g. bit 5 controls TID 5).
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
-
- __le16 rate_n_flags; /* 3945 only */
+ __le16 legacy_reserved;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1161,8 +1165,7 @@ struct iwl_rx_mpdu_res_start {
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
*
* Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
* This command must be executed after every RXON command, before Tx can occur.
@@ -1174,25 +1177,9 @@ struct iwl_rx_mpdu_res_start {
* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
* before this frame. if CTS-to-self required check
* RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
*/
#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
/* 1: Expect ACK from receiving station
* 0: Don't expect ACK (MAC header's duration field s/b 0)
* Set this for unicast frames, but not broadcast/multicast. */
@@ -1210,18 +1197,8 @@ struct iwl_rx_mpdu_res_start {
* Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1567,7 +1544,6 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
- /* following only for 5000 series and up */
u8 txed; /* number of frames sent */
u8 txed_2_done; /* number of frames acked */
} __packed;
@@ -1669,7 +1645,7 @@ struct iwl_link_qual_agg_params {
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
*
* Each station in the agn device's internal station table has its own table
* of 16
@@ -1918,7 +1894,7 @@ struct iwl_link_quality_cmd {
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
*
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
@@ -2202,8 +2178,8 @@ struct iwl_spectrum_notification {
struct iwl_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds;
+ u8 debug_flags;
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2324,9 +2300,9 @@ struct iwl_scan_channel {
/**
* struct iwl_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
* SSID IEs get transmitted in reverse order of entry.
*/
struct iwl_ssid_ie {
@@ -2335,7 +2311,6 @@ struct iwl_ssid_ie {
u8 ssid[32];
} __packed;
-#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH_DISABLED 0
@@ -2416,8 +2391,6 @@ struct iwl_scan_cmd {
* channel */
__le32 suspend_time; /* pause scan this long (in "extended beacon
* format") when returning to service chnl:
- * 3945; 31:24 # beacons, 19:0 additional usec,
- * 4965; 31:22 # beacons, 21:0 additional usec.
*/
__le32 flags; /* RXON_FLG_* */
__le32 filter_flags; /* RXON_FILTER_* */
@@ -2733,7 +2706,7 @@ struct statistics_div {
struct statistics_general_common {
__le32 temperature; /* radio temperature */
- __le32 temperature_m; /* for 5000 and up, this is radio voltage */
+ __le32 temperature_m; /* radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3801,6 +3774,19 @@ struct iwl_bt_coex_prot_env_cmd {
} __attribute__((packed));
/*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+ IWLAGN_D3_WAKEUP_RFKILL = BIT(0),
+ IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+} __packed;
+
+/*
* REPLY_WOWLAN_PATTERNS
*/
#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
@@ -3830,19 +3816,16 @@ enum iwlagn_wowlan_wakeup_filters {
IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
- IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
- IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
- IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
- IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
- IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
- IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
+ IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7),
+ IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8),
};
struct iwlagn_wowlan_wakeup_filter_cmd {
__le32 enabled;
__le16 non_qos_seq;
- u8 min_sleep_seconds;
- u8 reserved;
+ __le16 reserved;
__le16 qos_seq[8];
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140ab..7bcfa781e0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -60,8 +60,8 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->ht_supported = true;
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->ht_greenfield_support)
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
@@ -76,11 +76,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
- ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
- ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
ht_info->mcs.rx_mask[0] = 0xFF;
if (rx_chains_num >= 2)
@@ -141,7 +137,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -151,7 +147,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -206,12 +202,12 @@ int iwl_init_geos(struct iwl_priv *priv)
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+ cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
- bus_get_hw_id(bus(priv), buf, sizeof(buf));
+ bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
- priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
@@ -836,19 +832,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
}
#endif
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&priv->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &priv->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
- wake_up_all(&priv->notif_waitq);
-}
-
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
{
unsigned int reload_msec;
@@ -860,7 +843,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwlagn_abort_notification_waits(priv);
+ iwl_abort_notification_waits(priv->shrd);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
@@ -979,9 +962,9 @@ int iwl_apm_init(struct iwl_priv *priv)
bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
+ if (cfg(priv)->base_params->pll_cfg_val)
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
+ cfg(priv)->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
@@ -1120,229 +1103,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
- /*
- * MULTI-FIXME
- * This may need to be done per interface in nl80211/cfg80211/mac80211.
- */
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-
-static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_connection_init_rx_config(priv, ctx);
-
- iwlagn_set_rxon_chain(priv, ctx);
-
- return iwlagn_commit_rxon(priv, ctx);
-}
-
-static int iwl_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
- vif->type == NL80211_IFTYPE_ADHOC) {
- /*
- * pretend to have high BT traffic as long as we
- * are operating in IBSS mode, as this will cause
- * the rate scaling etc. to behave as intended.
- */
- priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
- }
-
- return 0;
-}
-
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
- enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- viftype, vif->addr);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
- mutex_lock(&priv->shrd->mutex);
-
- iwlagn_disable_roc(priv);
-
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(viftype)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-
-static void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_scan_cancel_timeout(priv, 200);
- iwl_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-
- /*
- * When removing the IBSS interface, overwrite the
- * BT traffic load with the stored one from the last
- * notification, if any. If this is a device that
- * doesn't implement this, this has no effect since
- * both values are the same and zero.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->bt_traffic_load = priv->last_bt_traffic_load;
-}
-
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (WARN_ON(ctx->vif != vif)) {
- struct iwl_rxon_context *tmp;
- IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
- for_each_context(priv, tmp)
- IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
- tmp->ctxid, tmp, tmp->vif);
- }
- ctx->vif = NULL;
-
- iwl_teardown_interface(priv, vif, false);
-
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1649,97 +1411,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_context *tmp;
- enum nl80211_iftype newviftype = newtype;
- u32 interface_modes;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->shrd->mutex);
-
- if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- /*
- * Refuse a change that should be done by moving from the PAN
- * context to the BSS context instead, if the BSS context is
- * available and can support the new interface type.
- */
- if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
- (bss_ctx->interface_modes & BIT(newtype) ||
- bss_ctx->exclusive_interface_modes & BIT(newtype))) {
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_teardown_interface(priv, vif, true);
- vif->type = newviftype;
- vif->p2p = newp2p;
- err = iwl_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
int iwl_cmd_echo_test(struct iwl_priv *priv)
{
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
+ .len = { 0 },
.flags = CMD_SYNC,
};
@@ -1783,7 +1461,7 @@ void iwl_bg_watchdog(unsigned long data)
if (iwl_is_rfkill(priv->shrd))
return;
- timeout = priv->cfg->base_params->wd_timeout;
+ timeout = cfg(priv)->base_params->wd_timeout;
if (timeout == 0)
return;
@@ -1808,13 +1486,25 @@ void iwl_bg_watchdog(unsigned long data)
void iwl_setup_watchdog(struct iwl_priv *priv)
{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ unsigned int timeout = cfg(priv)->base_params->wd_timeout;
+
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !cfg(priv)->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
@@ -1890,34 +1580,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
{
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
@@ -1925,8 +1587,7 @@ void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
void iwl_nic_config(struct iwl_priv *priv)
{
- priv->cfg->lib->nic_config(priv);
-
+ cfg(priv)->lib->nic_config(priv);
}
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da338070..7bf76ab94dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
@@ -134,14 +135,13 @@ struct iwl_base_params {
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
* @bt_init_traffic_load: specify initial bt traffic load
* @bt_prio_boost: default bt priority boost value
* @agg_time_limit: maximum number of uSec in aggregation
- * @ampdu_factor: Maximum A-MPDU length factor
- * @ampdu_density: Minimum A-MPDU spacing
* @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
*/
struct iwl_bt_params {
@@ -149,8 +149,6 @@ struct iwl_bt_params {
u8 bt_init_traffic_load;
u8 bt_prio_boost;
u16 agg_time_limit;
- u8 ampdu_factor;
- u8 ampdu_density;
bool bt_sco_disable;
bool bt_session_2;
};
@@ -163,84 +161,10 @@ struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
};
-/**
- * struct iwl_cfg
- * @name: Offical name of the device
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- * without a warning, for use in transitions
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @valid_tx_ant: valid transmit antenna
- * @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
- * @lib: pointer to the lib ops
- * @additional_nic_config: additional nic configuration
- * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
- * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
- * @internal_wimax_coex: internal wifi/wimax combo device
- * @iq_invert: I/Q inversion
- * @temp_offset_v2: support v2 of temperature offset calibration
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version.
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision.
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_ok;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_lib_ops *lib;
- void (*additional_nic_config)(struct iwl_priv *priv);
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- struct iwl_ht_params *ht_params;
- struct iwl_bt_params *bt_params;
- enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
- const bool need_dc_calib; /* if used set to true */
- const bool need_temp_offset_calib; /* if used set to true */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
- const bool adv_pm;
- const bool rx_with_siso_diversity;
- const bool internal_wimax_coex;
- const bool iq_invert;
- const bool temp_offset_v2;
-};
-
/***************************
* L i b *
***************************/
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -260,13 +184,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@@ -323,9 +240,6 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -379,8 +293,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
{
- return priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist;
+ return cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist;
}
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b9f3267e720..fbc3095c7b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -284,8 +284,8 @@
#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
-#define CSR_HW_REV_TYPE_200 (0x0000110)
-#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_105 (0x0000110)
+#define CSR_HW_REV_TYPE_135 (0x0000120)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 69a77e24d22..f8fc2393dd4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, ...) \
do { \
if (iwl_get_debug_level((m)->shrd) & (level)) \
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
do { \
- if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ if (iwl_get_debug_level((m)->shrd) & (level) && \
+ net_ratelimit()) \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,10 +71,29 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+ else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "(RFKILL) ", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+} while (0)
+
#else
#define IWL_DEBUG(m, level, fmt, args...)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
#define iwl_print_hex_dump(m, level, p, len)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ IWL_ERR(p, fmt, ##args); \
+} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -114,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*/
/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
+#define IWL_DL_INFO 0x00000001
+#define IWL_DL_MAC80211 0x00000002
+#define IWL_DL_HCMD 0x00000004
+#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
+#define IWL_DL_EEPROM 0x00000040
+#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN (1 << 11)
+#define IWL_DL_POWER 0x00000100
+#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX (1 << 15)
+#define IWL_DL_ASSOC 0x00001000
+#define IWL_DL_DROP 0x00002000
+#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
+#define IWL_DL_FW 0x00010000
+#define IWL_DL_RF_KILL 0x00020000
+#define IWL_DL_FW_ERRORS 0x00040000
+#define IWL_DL_LED 0x00080000
/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
+#define IWL_DL_RATE 0x00100000
+#define IWL_DL_CALIB 0x00200000
+#define IWL_DL_WEP 0x00400000
+#define IWL_DL_TX 0x00800000
/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
+#define IWL_DL_RX 0x01000000
+#define IWL_DL_ISR 0x02000000
+#define IWL_DL_HT 0x04000000
/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
+#define IWL_DL_11H 0x10000000
+#define IWL_DL_STATS 0x20000000
+#define IWL_DL_TX_REPLY 0x40000000
+#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -164,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -186,9 +200,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a1670e3f8bf..04a3343f461 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ struct iwl_trans *trans = trans(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init.data.len;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+ priv->dbgfs_sram_len = trans->ucode_init.data.len;
else
- priv->dbgfs_sram_len = priv->ucode_rt.data.len;
+ priv->dbgfs_sram_len = trans->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@@ -341,7 +342,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
- priv->ucode_wowlan.data.len);
+ trans(priv)->ucode_wowlan.data.len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -371,15 +372,13 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
+ "TID\tseq_num\trate_n_flags\n");
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
- tid_data = &priv->shrd->tid_data[i][j];
+ tid_data = &priv->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%#x",
+ "%d:\t%#x\t%#x",
j, tid_data->seq_number,
- tid_data->agg.txq_id,
- tid_data->tfds_in_queue,
tid_data->agg.rate_n_flags);
if (tid_data->agg.wait_for_ba)
@@ -407,7 +406,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ size_t eeprom_len = cfg(priv)->base_params->eeprom_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16) {
@@ -415,7 +414,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return -ENODATA;
}
- ptr = priv->eeprom;
+ ptr = priv->shrd->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
@@ -427,10 +426,10 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
"version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -1541,15 +1540,15 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
tx->tx_power.ant_c);
@@ -2220,7 +2219,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- priv->cfg->base_params->plcp_delta_threshold);
+ cfg(priv)->base_params->plcp_delta_threshold);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -2242,10 +2241,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return -EINVAL;
if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- priv->cfg->base_params->plcp_delta_threshold =
+ cfg(priv)->base_params->plcp_delta_threshold =
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
- priv->cfg->base_params->plcp_delta_threshold = plcp;
+ cfg(priv)->base_params->plcp_delta_threshold = plcp;
return count;
}
@@ -2347,7 +2346,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
timeout = IWL_DEF_WD_TIMEOUT;
- priv->cfg->base_params->wd_timeout = timeout;
+ cfg(priv)->base_params->wd_timeout = timeout;
iwl_setup_watchdog(priv);
return count;
}
@@ -2407,10 +2406,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
char buf[40];
const size_t bufsz = sizeof(buf);
- if (priv->cfg->ht_params)
+ if (cfg(priv)->ht_params)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
- (priv->cfg->ht_params->use_rts_for_aggregation) ?
+ (cfg(priv)->ht_params->use_rts_for_aggregation) ?
"rts/cts" : "cts-to-self");
else
pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2427,7 +2426,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
int buf_size;
int rts;
- if (!priv->cfg->ht_params)
+ if (!cfg(priv)->ht_params)
return -EINVAL;
memset(buf, 0, sizeof(buf));
@@ -2437,9 +2436,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
if (sscanf(buf, "%d", &rts) != 1)
return -EINVAL;
if (rts)
- priv->cfg->ht_params->use_rts_for_aggregation = true;
+ cfg(priv)->ht_params->use_rts_for_aggregation = true;
else
- priv->cfg->ht_params->use_rts_for_aggregation = false;
+ cfg(priv)->ht_params->use_rts_for_aggregation = false;
return count;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c00a447963..e54a4d11e58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 1) Not associated no beacon statistics being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
@@ -190,6 +189,69 @@ struct iwl_qos_info {
struct iwl_qosparam_cmd def_qos_parm;
};
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (REPLY_TX), and the block ack notification
+ * (REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ * Needed by the upper layer for debugfs only.
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+ u32 rate_n_flags;
+ enum iwl_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ struct iwl_ht_agg agg;
+};
+
/*
* Structure should be accessed with sta_lock held. When station addition
* is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
@@ -230,17 +292,6 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-struct fw_img {
- struct fw_desc code, data;
-};
-
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
@@ -452,29 +503,6 @@ enum iwlagn_chain_noise_state {
IWL_CHAIN_NOISE_DONE,
};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_XTAL,
- IWL_CALIB_DC,
- IWL_CALIB_LO,
- IWL_CALIB_TX_IQ,
- IWL_CALIB_TX_IQ_PERD,
- IWL_CALIB_BASE_BAND,
- IWL_CALIB_TEMP_OFFSET,
- IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
/* Sensitivity calib data */
struct iwl_sensitivity_data {
u32 auto_corr_ofdm;
@@ -547,16 +575,6 @@ enum iwl_access_mode {
IWL_OTP_ACCESS_RELATIVE,
};
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM: based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
- IWL_PA_SYSTEM = 0,
- IWL_PA_INTERNAL = 1,
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -714,35 +732,6 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
@@ -805,14 +794,7 @@ enum iwl_scan_type {
IWL_SCAN_ROC,
};
-enum iwlagn_ucode_type {
- IWL_UCODE_NONE,
- IWL_UCODE_REGULAR,
- IWL_UCODE_INIT,
- IWL_UCODE_WOWLAN,
-};
-
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace {
u32 buff_size;
u32 total_size;
@@ -822,8 +804,20 @@ struct iwl_testmode_trace {
dma_addr_t dma_addr;
bool trace_enabled;
};
+struct iwl_testmode_sram {
+ u32 buff_size;
+ u32 num_chunks;
+ u8 *buff_addr;
+ bool sram_readed;
+};
#endif
+struct iwl_wipan_noa_data {
+ struct rcu_head rcu_head;
+ u32 length;
+ u8 data[];
+};
+
struct iwl_priv {
/*data shared among all the driver's layers */
@@ -835,7 +829,6 @@ struct iwl_priv {
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
struct kmem_cache *tx_cmd_pool;
- struct iwl_cfg *cfg;
enum ieee80211_band band;
@@ -880,8 +873,7 @@ struct iwl_priv {
s32 temperature; /* Celsius */
s32 last_temperature;
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+ struct iwl_wipan_noa_data __rcu *noa_data;
/* Scan related variables */
unsigned long scan_start;
@@ -907,23 +899,12 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- struct fw_img ucode_rt;
- struct fw_img ucode_init;
- struct fw_img ucode_wowlan;
-
- enum iwlagn_ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
__le16 switch_channel;
- struct {
- u32 error_event_table;
- u32 log_event_table;
- } device_pointers;
-
u16 active_rate;
u8 start_calib;
@@ -951,17 +932,13 @@ struct iwl_priv {
int num_stations;
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
+ struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
u8 mac80211_registered;
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- int nvm_device_type;
- struct iwl_eeprom_calib_info *calib_info;
-
enum nl80211_iftype iw_mode;
/* Last Rx'd beacon timestamp */
@@ -1017,10 +994,6 @@ struct iwl_priv {
/* counts reply_tx error */
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
@@ -1098,8 +1071,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
+ struct iwl_testmode_sram testmode_sram;
u32 tm_fixed_rate;
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index a635a7e7544..2a2c8de64a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -28,7 +28,7 @@
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
-#include "iwl-dev.h"
+#include "iwl-trans.h"
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 8a51c5ccda1..9b212a8f30b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,7 +29,6 @@
#include <linux/tracepoint.h>
-struct iwl_priv;
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
@@ -37,14 +36,14 @@ struct iwl_priv;
static inline void trace_ ## name(proto) {}
#endif
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
+#define PRIV_ENTRY __field(void *, priv)
#define PRIV_ASSIGN __entry->priv = priv
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_io
TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -60,7 +59,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32,
);
TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+ TP_PROTO(void *priv, u32 offs, u8 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -76,7 +75,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8,
);
TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -91,11 +90,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_irq,
+ TP_PROTO(void *priv),
+ TP_ARGS(priv),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ ),
+ /* TP_printk("") doesn't compile */
+ TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+ TP_PROTO(void *priv, u32 index, u32 value),
+ TP_ARGS(priv, index, value),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, index)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->index = index;
+ __entry->value = value;
+ ),
+ TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+);
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_ucode
TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -115,7 +143,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
);
TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
TP_ARGS(priv, wraps, n_entry, p_entry),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -139,7 +167,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, u32 flags,
+ TP_PROTO(void *priv, u32 flags,
const void *hcmd0, size_t len0,
const void *hcmd1, size_t len1,
const void *hcmd2, size_t len2),
@@ -164,7 +192,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+ TP_PROTO(void *priv, void *rxbuf, size_t len),
TP_ARGS(priv, rxbuf, len),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -179,7 +207,7 @@ TRACE_EVENT(iwlwifi_dev_rx,
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+ TP_PROTO(void *priv, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
@@ -211,7 +239,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
+ TP_PROTO(void *priv, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
@@ -271,7 +299,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
);
TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a4e43bd4a54..c1eda9724f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
-static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
+static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
- IWL_DEBUG_EEPROM(priv,
+ IWL_DEBUG_EEPROM(bus,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
@@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
return ret;
}
-static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
+static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
{
- iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
- u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+ IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
- IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
- IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
default:
- IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+ IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
"EEPROM_GP=0x%08x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", gp);
ret = -ENOENT;
break;
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
return ret;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
{
- if (!priv->eeprom)
+ if (!shrd->eeprom)
return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
}
int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,11 +227,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
u16 eeprom_ver;
u16 calib_ver;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+ calib_ver = iwl_eeprom_calib_version(priv->shrd);
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
+ if (eeprom_ver < cfg(priv)->eeprom_ver ||
+ calib_ver < cfg(priv)->eeprom_calib_ver)
goto err;
IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -241,45 +241,46 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
err:
IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
"CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
+ eeprom_ver, cfg(priv)->eeprom_ver,
+ calib_ver, cfg(priv)->eeprom_calib_ver);
return -EINVAL;
}
int iwl_eeprom_check_sku(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
/* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
+ cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !cfg(priv)->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
}
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+ IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
- if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
/* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
- priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
- priv->cfg->valid_tx_ant,
- priv->cfg->valid_rx_ant);
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+ cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+ cfg(priv)->valid_tx_ant,
+ cfg(priv)->valid_rx_ant);
return -EINVAL;
}
- IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
- priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
}
/*
* for some special cases,
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
return 0;
}
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
{
- const u8 *addr = iwl_eeprom_query_addr(priv,
+ const u8 *addr = iwl_eeprom_query_addr(shrd,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}
@@ -302,19 +303,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
*
******************************************************************************/
-static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
{
- iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ iwl_read32(bus, CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_clear_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -322,7 +323,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
/* OTP only valid for CP/PP and after */
switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(priv, "Unknown hardware type\n");
+ IWL_ERR(bus, "Unknown hardware type\n");
return -ENOENT;
case CSR_HW_REV_TYPE_5300:
case CSR_HW_REV_TYPE_5350:
@@ -331,7 +332,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -341,73 +342,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
return nvm_type;
}
-static int iwl_init_otp_access(struct iwl_priv *priv)
+static int iwl_init_otp_access(struct iwl_bus *bus)
{
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(bus(priv), CSR_GP_CNTRL,
- iwl_read32(bus(priv), CSR_GP_CNTRL) |
+ iwl_write32(bus, CSR_GP_CNTRL,
+ iwl_read32(bus, CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
- IWL_ERR(priv, "Time out access OTP\n");
+ IWL_ERR(bus, "Time out access OTP\n");
else {
- iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (priv->cfg->base_params->shadow_ram_support)
- iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
+ if (cfg(bus)->base_params->shadow_ram_support)
+ iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
u32 otpgp;
- iwl_write32(bus(priv), CSR_EEPROM_REG,
+ iwl_write32(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
- IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+ IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+ r = iwl_read32(bus, CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
+ IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
@@ -416,20 +417,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
/*
* iwl_is_otp_empty: check for empty OTP
*/
-static bool iwl_is_otp_empty(struct iwl_priv *priv)
+static bool iwl_is_otp_empty(struct iwl_bus *bus)
{
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
+ if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
if (!link_value) {
- IWL_ERR(priv, "OTP is empty\n");
+ IWL_ERR(bus, "OTP is empty\n");
is_empty = true;
}
} else {
- IWL_ERR(priv, "Unable to read first block of OTP list.\n");
+ IWL_ERR(bus, "Unable to read first block of OTP list.\n");
is_empty = true;
}
@@ -446,7 +447,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
* we should read and used to configure the device.
* only perform this operation if shadow RAM is disabled
*/
-static int iwl_find_otp_image(struct iwl_priv *priv,
+static int iwl_find_otp_image(struct iwl_bus *bus,
u16 *validblockaddr)
{
u16 next_link_addr = 0, valid_addr;
@@ -454,10 +455,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
+ iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
/* checking for empty OTP or error */
- if (iwl_is_otp_empty(priv))
+ if (iwl_is_otp_empty(bus))
return -EINVAL;
/*
@@ -471,9 +472,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
+ IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
- if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+ if (iwl_read_otp_word(bus, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
@@ -488,10 +489,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= priv->cfg->base_params->max_ll_items);
+ } while (usedblocks <= cfg(bus)->base_params->max_ll_items);
/* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
+ IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
return -EINVAL;
}
@@ -504,28 +505,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
s8 max_txpower_avg = 0; /* (dBm) */
/* Take the highest tx power from any valid chains */
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
+ if ((cfg->valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
+ if ((cfg->valid_tx_ant & ANT_B) &&
(enhanced_txpower[element].chain_b_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
+ if ((cfg->valid_tx_ant & ANT_C) &&
(enhanced_txpower[element].chain_c_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->cfg->valid_tx_ant == ANT_AB) |
- (priv->cfg->valid_tx_ant == ANT_BC) |
- (priv->cfg->valid_tx_ant == ANT_AC)) &&
+ if (((cfg->valid_tx_ant == ANT_AB) |
+ (cfg->valid_tx_ant == ANT_BC) |
+ (cfg->valid_tx_ant == ANT_AC)) &&
(enhanced_txpower[element].mimo2_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+ if ((cfg->valid_tx_ant == ANT_ABC) &&
(enhanced_txpower[element].mimo3_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo3_max;
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
__le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+ txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -627,7 +629,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx,
&max_txp_avg_halfdbm);
/*
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
/**
* iwl_eeprom_init - read EEPROM contents
*
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
*
* NOTE: This routine uses the non-debug IO access functions.
*/
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
+ struct iwl_shared *shrd = priv->shrd;
__le16 *e;
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
@@ -660,22 +663,22 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
u16 validblockaddr = 0;
u16 cache_addr = 0;
- priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
- if (priv->nvm_device_type == -ENOENT)
+ trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
+ if (trans(priv)->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
+ sz = cfg(priv)->base_params->eeprom_size;
IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
+ shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!shrd->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
- e = (__le16 *)priv->eeprom;
+ e = (__le16 *)shrd->eeprom;
iwl_apm_init(priv);
- ret = iwl_eeprom_verify_signature(priv);
+ ret = iwl_eeprom_verify_signature(trans(priv));
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
@@ -683,16 +686,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(priv);
+ ret = iwl_eeprom_acquire_semaphore(bus(priv));
if (ret < 0) {
IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+ if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- ret = iwl_init_otp_access(priv);
+ ret = iwl_init_otp_access(bus(priv));
if (ret) {
IWL_ERR(priv, "Failed to initialize OTP access.\n");
ret = -ENOENT;
@@ -706,8 +709,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!priv->cfg->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(priv, &validblockaddr)) {
+ if (!cfg(priv)->base_params->shadow_ram_support) {
+ if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
ret = -ENOENT;
goto done;
}
@@ -716,7 +719,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
addr += sizeof(u16)) {
__le16 eeprom_data;
- ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+ ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
if (ret)
goto done;
e[cache_addr / 2] = eeprom_data;
@@ -744,27 +747,27 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
+ iwl_eeprom_query16(shrd, EEPROM_VERSION));
ret = 0;
done:
- iwl_eeprom_release_semaphore(priv);
+ iwl_eeprom_release_semaphore(bus(priv));
err:
if (ret)
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/* Reset chip to save power until we load uCode during "up". */
iwl_apm_stop(priv);
alloc_err:
return ret;
}
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
+ kfree(shrd->eeprom);
+ shrd->eeprom = NULL;
}
static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
- u32 offset = priv->cfg->lib->
+ struct iwl_shared *shrd = priv->shrd;
+ u32 offset = cfg(priv)->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_7;
break;
default:
@@ -979,9 +983,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
}
/* Check if we do have HT40 channels */
- if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
+ if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
+ cfg(priv)->lib->eeprom_ops.regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
@@ -1017,8 +1021,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
- priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
+ cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
return 0;
}
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
{
u16 radio_cfg;
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e7299..9fa937ec35e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
#include <net/mac80211.h>
struct iwl_priv;
+struct iwl_shared;
/*
* EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e62b85..d57ea6484bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
spin_lock_irqsave(&bus->reg_lock, flags);
iwl_grab_nic_access(bus);
- value = iwl_read32(bus(bus), reg);
+ value = iwl_read32(bus, reg);
iwl_release_nic_access(bus);
spin_unlock_irqrestore(&bus->reg_lock, flags);
@@ -283,16 +283,29 @@ u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
return value;
}
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words)
{
unsigned long flags;
+ int offs, result = 0;
+ u32 *vals = buf;
spin_lock_irqsave(&bus->reg_lock, flags);
if (!iwl_grab_nic_access(bus)) {
iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
wmb();
- iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+
+ for (offs = 0; offs < words; offs++)
+ iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(bus);
- }
+ } else
+ result = -EBUSY;
spin_unlock_irqrestore(&bus->reg_lock, flags);
+
+ return result;
+}
+
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+{
+ return _iwl_write_targ_mem_words(bus, addr, &val, 1);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index ced2cbeb6ea..aae2eeb331a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -85,6 +85,9 @@ void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
(bufsize) / sizeof(u32));\
} while (0)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words);
+
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index eb541735296..14dcbfcdc0f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -137,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.on = iwl_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
@@ -178,7 +178,7 @@ void iwl_leds_init(struct iwl_priv *priv)
int ret;
if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
+ mode = cfg(priv)->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1c93dfef693..2550b3c7dcb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -36,20 +36,6 @@ struct iwl_priv;
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
void iwlagn_led_enable(struct iwl_priv *priv);
void iwl_leds_init(struct iwl_priv *priv);
void iwl_leds_exit(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
new file mode 100644
index 00000000000..f980e574e1f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -0,0 +1,1601 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_sta_ap_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_p2p_sta_go_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_p2p_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+ },
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ int ret;
+ struct ieee80211_hw *hw = priv->hw;
+ struct iwl_rxon_context *ctx;
+
+ hw->rate_control_algorithm = "iwl-agn-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ /*
+ * Including the following line will crash some AP's. This
+ * workaround removes the stimulus which causes the crash until
+ * the AP software can be fixed.
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ */
+
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+ for_each_context(priv, ctx) {
+ hw->wiphy->interface_modes |= ctx->interface_modes;
+ hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+ }
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+ hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+ } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ hw->wiphy->iface_combinations =
+ iwlagn_iface_combinations_dualmode;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+ }
+
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ if (trans(priv)->ucode_wowlan.code.len &&
+ device_can_wakeup(bus(priv)->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlagn_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+ hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len =
+ IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len =
+ IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ }
+
+ if (iwlagn_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->bands[IEEE80211_BAND_2GHZ];
+ if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->bands[IEEE80211_BAND_5GHZ];
+
+ hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
+
+ iwl_leds_init(priv);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+
+ return 0;
+}
+
+void iwlagn_mac_unregister(struct iwl_priv *priv)
+{
+ if (!priv->mac80211_registered)
+ return;
+ iwl_leds_exit(priv);
+ ieee80211_unregister_hw(priv->hw);
+ priv->mac80211_registered = 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ for_each_context(priv, ctx) {
+ ret = iwlagn_alloc_bcast_station(priv, ctx);
+ if (ret) {
+ iwl_dealloc_bcast_stations(priv);
+ return ret;
+ }
+ }
+
+ ret = iwl_run_init_ucode(trans(priv));
+ if (ret) {
+ IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto error;
+ return 0;
+
+ error:
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ __iwl_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+
+ IWL_ERR(priv, "Unable to initialize device.\n");
+ return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&priv->shrd->mutex);
+ ret = __iwl_up(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ if (ret)
+ return ret;
+
+ IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+ /* Now we should be done, and the READY bit should be set. */
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+ ret = -EIO;
+
+ iwlagn_led_enable(priv);
+
+ priv->is_open = 1;
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!priv->is_open)
+ return;
+
+ priv->is_open = 0;
+
+ iwl_down(priv);
+
+ flush_workqueue(priv->shrd->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
+ iwl_enable_rfkill_int(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (iwlagn_mod_params.sw_crypto)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+ goto out;
+
+ memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+ priv->replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ priv->have_rekey_data = true;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ret;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ /* Don't attempt WoWLAN when not associated, tear down instead. */
+ if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+ !iwl_is_associated_ctx(ctx)) {
+ ret = 1;
+ goto out;
+ }
+
+ ret = iwlagn_suspend(priv, hw, wowlan);
+ if (ret)
+ goto error;
+
+ device_set_wakeup_enable(bus(priv)->dev, true);
+
+ /* Now let the ucode operate on its own */
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ goto out;
+
+ error:
+ priv->shrd->wowlan = false;
+ iwlagn_prepare_restart(priv);
+ ieee80211_restart_hw(priv->hw);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif;
+ unsigned long flags;
+ u32 base, status = 0xffffffff;
+ int ret = -EIO;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ base = priv->shrd->device_pointers.error_event_table;
+ if (iwlagn_hw_valid_rtc_data_addr(base)) {
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(bus(priv));
+ if (ret == 0) {
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(bus(priv));
+ }
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (ret == 0) {
+ struct iwl_trans *trans = trans(priv);
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(trans->ucode_wowlan.data.len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ _iwl_read_targ_mem_words(
+ bus(priv), 0x800000, priv->wowlan_sram,
+ trans->ucode_wowlan.data.len / 4);
+ }
+#endif
+ }
+
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ priv->shrd->wowlan = false;
+
+ device_set_wakeup_enable(bus(priv)->dev, false);
+
+ iwlagn_prepare_restart(priv);
+
+ memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ iwl_connection_init_rx_config(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ ieee80211_resume_disconnect(vif);
+
+ return 1;
+}
+
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (iwlagn_tx_skb(priv, skb))
+ dev_kfree_skb_any(skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ bool is_default_wep_key = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwlagn_mod_params.sw_crypto) {
+ IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We could program these keys into the hardware as well, but we
+ * don't expect much multicast traffic in IBSS and having keys
+ * for more stations is probably more useful.
+ *
+ * Mark key TX-only and return 0.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ return 0;
+ }
+
+ /* If they key was TX-only, accept deletion */
+ if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+ return 0;
+
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, 100);
+
+ BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+ }
+
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key) {
+ ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+ break;
+ }
+ ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+ if (ret) {
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ }
+
+ IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = iwl_remove_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+ IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+
+ IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+ sta->addr, tid);
+
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ return -EACCES;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT(priv, "stop Rx\n");
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->agg_tids_count > 0)) {
+ priv->agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
+ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ if (!priv->agg_tids_count && cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch off RTS/CTS if it was previously enabled
+ */
+ sta_priv->lq_sta.lq.general_params.flags &=
+ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+ }
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
+ break;
+ }
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret = 0;
+ u8 sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->sta_id = IWL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+ is_ap, sta, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ goto out;
+ }
+
+ sta_priv->sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ /*
+ * MULTI-FIXME
+ * When we add support for multiple interfaces, we need to
+ * revisit this. The channel switch command in the device
+ * only affects the BSS context, but what does that really
+ * mean? And what if we get a CSA on the second interface?
+ * This needs a lot of work.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u16 ch;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_rfkill(priv->shrd))
+ goto out;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ goto out;
+
+ if (!iwl_is_associated_ctx(ctx))
+ goto out;
+
+ if (!cfg(priv)->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = iwl_get_channel_info(priv, channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->shrd->lock);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_set_rxon_channel(priv, channel, ctx);
+ iwl_set_rxon_ht(priv, ht_conf);
+ iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&priv->shrd->lock);
+
+ iwl_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->shrd->mutex);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+ goto done;
+ }
+ if (iwl_is_rfkill(priv->shrd)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+ goto done;
+ }
+
+ /*
+ * mac80211 will not push any more frames for transmit
+ * until the flush is completed
+ */
+ if (drop) {
+ IWL_DEBUG_MAC80211(priv, "send flush command\n");
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ IWL_ERR(priv, "flush request fail\n");
+ goto done;
+ }
+ }
+ IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+ iwl_trans_wait_tx_queue_empty(trans(priv));
+done:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ int err = 0;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->hw_roc_channel = channel;
+ priv->hw_roc_chantype = channel_type;
+ /* convert from ms to TU */
+ priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
+ priv->hw_roc_start_notified = false;
+ cancel_delayed_work(&priv->hw_roc_disable_work);
+
+ if (!ctx->is_active) {
+ static const struct iwl_qos_info default_qos_data = {
+ .def_qos_parm = {
+ .ac[0] = {
+ .cw_min = cpu_to_le16(3),
+ .cw_max = cpu_to_le16(7),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(1504),
+ },
+ .ac[1] = {
+ .cw_min = cpu_to_le16(7),
+ .cw_max = cpu_to_le16(15),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(3008),
+ },
+ .ac[2] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 3,
+ },
+ .ac[3] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 7,
+ },
+ },
+ };
+
+ ctx->is_active = true;
+ ctx->qos_data = default_qos_data;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ memcpy(ctx->staging.node_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ memcpy(ctx->staging.bssid_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err)
+ goto out;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err) {
+ iwlagn_disable_roc(priv);
+ goto out;
+ }
+ priv->hw_roc_setup = true;
+ }
+
+ err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+ if (err)
+ iwlagn_disable_roc(priv);
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return 0;
+}
+
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
+ &priv->shrd->status)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+ if (ret)
+ goto out;
+
+ if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+ ret = -EIO;
+ goto out_remove_sta;
+ }
+
+ memcpy(ctx->bssid, bssid, ETH_ALEN);
+ ctx->preauth_bssid = true;
+
+ ret = iwlagn_commit_rxon(priv, ctx);
+
+ if (ret == 0)
+ goto out;
+
+ out_remove_sta:
+ iwl_remove_station(priv, sta_id, bssid);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx))
+ goto out;
+
+ iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+ ctx->preauth_bssid = false;
+ /* no need to commit */
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
+ if (rssi_event == RSSI_EVENT_LOW)
+ priv->bt_enable_pspoll = true;
+ else if (rssi_event == RSSI_EVENT_HIGH)
+ priv->bt_enable_pspoll = false;
+
+ iwlagn_send_advance_bt_config(priv);
+ } else {
+ IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+ "ignoring RSSI callback\n");
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+ return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ unsigned long flags;
+ int q;
+
+ if (WARN_ON(!ctx))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+
+ ctx->qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ ctx->qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ iwl_connection_init_rx_config(priv, ctx);
+
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ priv->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = iwl_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist &&
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * pretend to have high BT traffic as long as we
+ * are operating in IBSS mode, as this will cause
+ * the rate scaling etc. to behave as intended.
+ */
+ priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+ }
+
+ return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ viftype, vif->addr);
+
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ iwlagn_disable_roc(priv);
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_WARN(priv, "Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_context(priv, tmp) {
+ u32 possible_modes =
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+
+ if (!(possible_modes & BIT(viftype)))
+ continue;
+
+ /* have maybe usable context w/o interface */
+ ctx = tmp;
+ break;
+ }
+
+ if (!ctx) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = ctx;
+ ctx->vif = vif;
+
+ err = iwl_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
+
+ ctx->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (priv->scan_vif == vif) {
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_force_scan_end(priv);
+ }
+
+ if (!mode_change) {
+ iwl_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+
+ /*
+ * When removing the IBSS interface, overwrite the
+ * BT traffic load with the stored one from the last
+ * notification, if any. If this is a device that
+ * doesn't implement this, this has no effect since
+ * both values are the same and zero.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (WARN_ON(ctx->vif != vif)) {
+ struct iwl_rxon_context *tmp;
+ IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+ for_each_context(priv, tmp)
+ IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+ tmp->ctxid, tmp, tmp->vif);
+ }
+ ctx->vif = NULL;
+
+ iwl_teardown_interface(priv, vif, false);
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
+ u32 interface_modes;
+ int err;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Refuse a change that should be done by moving from the PAN
+ * context to the BSS context instead, if the BSS context is
+ * available and can support the new interface type.
+ */
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
+ (bss_ctx->interface_modes & BIT(newtype) ||
+ bss_ctx->exclusive_interface_modes & BIT(newtype))) {
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_teardown_interface(priv, vif, true);
+ vif->type = newviftype;
+ vif->p2p = newp2p;
+ err = iwl_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->shrd->mutex);
+
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->scan_type != IWL_SCAN_NORMAL) {
+ IWL_DEBUG_SCAN(priv,
+ "SCAN request during internal scan - defer\n");
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ ret = 0;
+ } else {
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ /*
+ * mac80211 will only ask for one band at a time
+ * so using channels[0] here is ok
+ */
+ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+ req->channels[0]->band);
+ if (ret) {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
+ }
+ }
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ return ret;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+ "station %pM\n", sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.sta.modify_mask = 0;
+ priv->stations[sta_id].sta.sleep_tx_count = 0;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ WARN_ON(!sta_priv->client);
+ sta_priv->asleep = true;
+ if (atomic_read(&sta_priv->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
+ sta_priv->asleep = false;
+ sta_id = iwl_sta_id(sta);
+ if (sta_id != IWL_INVALID_STATION)
+ iwl_sta_modify_ps_wake(priv, sta_id);
+ break;
+ default:
+ break;
+ }
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+struct ieee80211_ops iwlagn_hw_ops = {
+ .tx = iwlagn_mac_tx,
+ .start = iwlagn_mac_start,
+ .stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwlagn_mac_suspend,
+ .resume = iwlagn_mac_resume,
+#endif
+ .add_interface = iwlagn_mac_add_interface,
+ .remove_interface = iwlagn_mac_remove_interface,
+ .change_interface = iwlagn_mac_change_interface,
+ .config = iwlagn_mac_config,
+ .configure_filter = iwlagn_configure_filter,
+ .set_key = iwlagn_mac_set_key,
+ .update_tkip_key = iwlagn_mac_update_tkip_key,
+ .set_rekey_data = iwlagn_mac_set_rekey_data,
+ .conf_tx = iwlagn_mac_conf_tx,
+ .bss_info_changed = iwlagn_bss_info_changed,
+ .ampdu_action = iwlagn_mac_ampdu_action,
+ .hw_scan = iwlagn_mac_hw_scan,
+ .sta_notify = iwlagn_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwlagn_mac_sta_remove,
+ .channel_switch = iwlagn_mac_channel_switch,
+ .flush = iwlagn_mac_flush,
+ .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+ .remain_on_channel = iwlagn_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+ .rssi_callback = iwlagn_mac_rssi_callback,
+ CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+ .tx_sync = iwlagn_mac_tx_sync,
+ .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+ .set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+ struct iwl_priv *priv;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's private structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+ if (!hw)
+ goto out;
+
+ priv = hw->priv;
+ priv->hw = hw;
+
+out:
+ return hw;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 1800029911a..fb30ea7ca96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -135,7 +135,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
}
}
-static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
+static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
int buf_len)
{
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
@@ -144,6 +144,13 @@ static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
pci_dev->subsystem_device);
}
+static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
+{
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ return (pci_dev->device << 16) + pci_dev->subsystem_device;
+}
+
static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
@@ -163,6 +170,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
+ .get_hw_id_string = iwl_pci_get_hw_id_string,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
@@ -256,6 +264,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -325,46 +335,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
{0}
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 4eaab204322..2b188a6025b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -167,7 +167,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
u8 skip;
u32 slp_itrvl;
- if (priv->cfg->adv_pm) {
+ if (cfg(priv)->adv_pm) {
table = apm_range_2;
if (period <= IWL_DTIM_RANGE_1_MAX)
table = apm_range_1;
@@ -221,7 +221,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -307,7 +307,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -350,7 +350,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->shrd->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (!priv->cfg->base_params->no_idle_support &&
+ else if (!cfg(priv)->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index e5d727f537d..084aa2c4ccf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
if (!iwl_is_associated_ctx(ctx))
continue;
+ if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+ continue;
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
@@ -678,7 +680,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
+ if ((priv->scan_request && priv->scan_request->no_cck) ||
+ chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
} else {
rate = IWL_RATE_1M_PLCP;
@@ -688,8 +691,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* Internal scans are passive, so we can indiscriminately set
* the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
case IEEE80211_BAND_5GHZ:
@@ -730,12 +733,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
+ if (cfg(priv)->scan_rx_antennas[band])
+ rx_ant = cfg(priv)->scan_rx_antennas[band];
if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
scan_tx_antennas = first_antenna(scan_tx_antennas);
}
@@ -759,8 +762,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_ant = first_antenna(active_chains);
}
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
rx_ant = first_antenna(rx_ant);
@@ -938,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
return 0;
}
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->shrd->mutex);
-
- /*
- * If an internal scan is in progress, just set
- * up the scan_request as per above.
- */
- if (priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv,
- "SCAN request during internal scan - defer\n");
- priv->scan_request = req;
- priv->scan_vif = vif;
- ret = 0;
- } else {
- priv->scan_request = req;
- priv->scan_vif = vif;
- /*
- * mac80211 will only ask for one band at a time
- * so using channels[0] here is ok
- */
- ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
- req->channels[0]->band);
- if (ret) {
- priv->scan_request = NULL;
- priv->scan_vif = NULL;
- }
- }
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- mutex_unlock(&priv->shrd->mutex);
-
- return ret;
-}
/*
* internal short scan, this function should only been called while associated.
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c4..dc55cc4a810 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -94,9 +94,9 @@
* This implementation is iwl-pci.c
*/
-struct iwl_cfg;
struct iwl_bus;
struct iwl_priv;
+struct iwl_trans;
struct iwl_sensitivity_ranges;
struct iwl_trans_ops;
@@ -107,6 +107,10 @@ struct iwl_trans_ops;
extern struct iwl_mod_params iwlagn_mod_params;
+#define IWL_DISABLE_HT_ALL BIT(0)
+#define IWL_DISABLE_HT_TXAGG BIT(1)
+#define IWL_DISABLE_HT_RXAGG BIT(2)
+
/**
* struct iwl_mod_params
*
@@ -114,13 +118,14 @@ extern struct iwl_mod_params iwlagn_mod_params;
*
* @sw_crypto: using hardware encryption, default = 0
* @num_of_queues: number of tx queue, HW dependent
- * @disable_11n: 11n capabilities enabled, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ * use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1
* @antenna: both antennas (use diversity), default = 0
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
@@ -135,13 +140,13 @@ extern struct iwl_mod_params iwlagn_mod_params;
struct iwl_mod_params {
int sw_crypto;
int num_of_queues;
- int disable_11n;
+ unsigned int disable_11n;
int amsdu_size_8K;
int antenna;
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
@@ -174,8 +179,6 @@ struct iwl_mod_params {
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
* @struct iwl_sensitivity_ranges: range of sensitivity values
*/
struct iwl_hw_params {
@@ -195,67 +198,148 @@ struct iwl_hw_params {
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
- u32 calib_init_cfg;
- u32 calib_rt_cfg;
const struct iwl_sensitivity_ranges *sens;
};
/**
- * enum iwl_agg_state
+ * enum iwl_ucode_type
*
- * The state machine of the BA agreement establishment / tear down.
- * These states relate to a specific RA / TID.
+ * The type of ucode currently loaded on the hardware.
*
- * @IWL_AGG_OFF: aggregation is not used
- * @IWL_AGG_ON: aggregation session is up
- * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
*/
-enum iwl_agg_state {
- IWL_AGG_OFF = 0,
- IWL_AGG_ON,
- IWL_EMPTYING_HW_QUEUE_ADDBA,
- IWL_EMPTYING_HW_QUEUE_DELBA,
+enum iwl_ucode_type {
+ IWL_UCODE_NONE,
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
};
/**
- * struct iwl_ht_agg - aggregation state machine
-
- * This structs holds the states for the BA agreement establishment and tear
- * down. It also holds the state during the BA session itself. This struct is
- * duplicated for each RA / TID.
-
- * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
- * Tx response (REPLY_TX), and the block ack notification
- * (REPLY_COMPRESSED_BA).
- * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- * Needed by the upper layer for debugfs only.
- * @wait_for_ba: Expect block-ack before next Tx reply
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
*/
-struct iwl_ht_agg {
- u32 rate_n_flags;
- enum iwl_agg_state state;
- u16 txq_id;
- bool wait_for_ba;
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+ void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
};
/**
- * struct iwl_tid_data - one for each RA / TID
+ * enum iwl_pa_type - Power Amplifier type
+ * @IWL_PA_SYSTEM: based on uCode configuration
+ * @IWL_PA_INTERNAL: use Internal only
+ */
+enum iwl_pa_type {
+ IWL_PA_SYSTEM = 0,
+ IWL_PA_INTERNAL = 1,
+};
- * This structs holds the states for each RA / TID.
+/*
+ * LED mode
+ * IWL_LED_DEFAULT: use device default
+ * IWL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+ IWL_LED_DEFAULT,
+ IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
+};
- * @seq_number: the next WiFi sequence number to use
- * @tfds_in_queue: number of packets sent to the HW queues.
- * Exported for debugfs only
- * @agg: aggregation state machine
+/**
+ * struct iwl_cfg
+ * @name: Offical name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ * without a warning, for use in transitions
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @sku: sku information from EEPROM
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @additional_nic_config: additional nic configuration
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
+ * @pa_type: used by 6000 series only to identify the type of Power Amplifier
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ * don't send it to those
+ * @scan_rx_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version.
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision.
*/
-struct iwl_tid_data {
- u16 seq_number;
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
+struct iwl_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_ok;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct iwl_lib_ops *lib;
+ void (*additional_nic_config)(struct iwl_priv *priv);
+ /* params not likely to change within a device family */
+ struct iwl_base_params *base_params;
+ /* params likely to change within a device family */
+ struct iwl_ht_params *ht_params;
+ struct iwl_bt_params *bt_params;
+ enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
+ const bool need_temp_offset_calib; /* if used set to true */
+ const bool no_xtal_calib;
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+ const bool adv_pm;
+ const bool rx_with_siso_diversity;
+ const bool internal_wimax_coex;
+ const bool iq_invert;
+ const bool temp_offset_v2;
};
/**
@@ -266,15 +350,25 @@ struct iwl_tid_data {
* @ucode_owner: IWL_OWNERSHIP_*
* @cmd_queue: command queue number
* @status: STATUS_*
+ * @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
* @bus: pointer to the bus layer data
+ * @cfg: see struct iwl_cfg
* @priv: pointer to the upper layer data
+ * @trans: pointer to the transport layer data
* @hw_params: see struct iwl_hw_params
* @workqueue: the workqueue used by all the layers of the driver
* @lock: protect general shared data
* @sta_lock: protects the station table.
* If lock and sta_lock are needed, lock must be acquired first.
* @mutex:
+ * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
+ * @eeprom: pointer to the eeprom/OTP image
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -290,6 +384,7 @@ struct iwl_shared {
u8 valid_contexts;
struct iwl_bus *bus;
+ struct iwl_cfg *cfg;
struct iwl_priv *priv;
struct iwl_trans *trans;
struct iwl_hw_params hw_params;
@@ -299,13 +394,29 @@ struct iwl_shared {
spinlock_t sta_lock;
struct mutex mutex;
- struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
-
wait_queue_head_t wait_command_queue;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+
+ /* ucode related variables */
+ enum iwl_ucode_type ucode_type;
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ struct {
+ u32 error_event_table;
+ u32 log_event_table;
+ } device_pointers;
+
};
/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
#define priv(_m) ((_m)->shrd->priv)
+#define cfg(_m) ((_m)->shrd->cfg)
#define bus(_m) ((_m)->shrd->bus)
#define trans(_m) ((_m)->shrd->trans)
#define hw_params(_m) ((_m)->shrd->hw_params)
@@ -427,12 +538,6 @@ int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
struct iwl_device_cmd *cmd);
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
void iwl_nic_config(struct iwl_priv *priv);
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -445,6 +550,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_reset_traffic_log(struct iwl_priv *priv);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 5e50d88f302..4a5cddd2d56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -70,6 +70,7 @@
#include <net/mac80211.h>
#include <net/netlink.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -77,6 +78,7 @@
#include "iwl-agn.h"
#include "iwl-testmode.h"
#include "iwl-trans.h"
+#include "iwl-bus.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +108,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
};
/*
@@ -177,6 +186,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
{
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
priv->testmode_trace.trace_enabled = false;
+ priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+ if (priv->testmode_sram.sram_readed) {
+ kfree(priv->testmode_sram.buff_addr);
+ priv->testmode_sram.buff_addr = NULL;
+ priv->testmode_sram.buff_size = 0;
+ priv->testmode_sram.num_chunks = 0;
+ priv->testmode_sram.sram_readed = false;
+ }
}
static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +222,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
+ iwl_sram_cleanup(priv);
}
/*
@@ -276,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
@@ -291,7 +313,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_DEBUG_INFO(priv,
"Error sending msg : %d\n", status);
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_DEBUG_INFO(priv,
"Error finding value to write\n");
@@ -302,7 +324,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write32(bus(priv), ofs, val32);
}
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_DEBUG_INFO(priv, "Error finding value to write\n");
return -ENOMSG;
@@ -312,6 +334,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write8(bus(priv), ofs, val8);
}
break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ val32 = iwl_read_prph(bus(priv), ofs);
+ IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_DEBUG_INFO(priv,
+ "Error finding value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+ iwl_write_prph(bus(priv), ofs, val32);
+ }
+ break;
default:
IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
@@ -330,24 +378,24 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(priv->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans(priv));
if (ret) {
IWL_DEBUG_INFO(priv,
"Error configuring init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
- ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
if (ret)
IWL_DEBUG_INFO(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(priv->shrd, &calib_wait);
return ret;
}
@@ -370,14 +418,16 @@ cfg_init_calib_error:
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = hw->priv;
+ struct iwl_trans *trans = trans(priv);
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
+ u32 devid;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
+ rsp_data_ptr = (unsigned char *)cfg(priv)->name;
+ rsp_data_len = strlen(cfg(priv)->name);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
rsp_data_len + 20);
if (!skb) {
@@ -396,8 +446,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (status)
IWL_DEBUG_INFO(priv,
"Error loading init ucode: %d\n", status);
@@ -405,13 +454,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
if (status) {
IWL_DEBUG_INFO(priv,
"Error loading runtime ucode: %d\n", status);
@@ -423,10 +470,25 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
"Error starting the device: %d\n", status);
break;
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_trans_stop_device(trans);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_DEBUG_INFO(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_DEBUG_INFO(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
+ if (priv->shrd->eeprom) {
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->cfg->base_params->eeprom_size + 20);
+ cfg(priv)->base_params->eeprom_size + 20);
if (!skb) {
IWL_DEBUG_INFO(priv,
"Error allocating memory\n");
@@ -435,8 +497,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_EEPROM_RSP);
NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
- priv->cfg->base_params->eeprom_size,
- priv->eeprom);
+ cfg(priv)->base_params->eeprom_size,
+ priv->shrd->eeprom);
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_DEBUG_INFO(priv,
@@ -455,6 +517,37 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ devid = bus_get_hw_id(bus(priv));
+ IWL_INFO(priv, "hw version: 0x%x\n", devid);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
@@ -535,7 +628,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
- TRACE_CHUNK_SIZE);
+ DUMP_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -567,15 +660,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
idx = cb->args[4];
if (idx >= priv->testmode_trace.num_chunks)
return -ENOENT;
- length = TRACE_CHUNK_SIZE;
+ length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+ (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_trace.buff_size %
- TRACE_CHUNK_SIZE;
+ DUMP_CHUNK_SIZE;
NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
priv->testmode_trace.trace_addr +
- (TRACE_CHUNK_SIZE * idx));
+ (DUMP_CHUNK_SIZE * idx));
idx++;
cb->args[4] = idx;
return 0;
@@ -621,6 +714,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
return 0;
}
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = hw->priv;
+ u32 base, ofs, size, maxsize;
+
+ if (priv->testmode_sram.sram_readed)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+ IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+ return -ENOMSG;
+ }
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+ if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+ IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+ switch (priv->shrd->ucode_type) {
+ case IWL_UCODE_REGULAR:
+ maxsize = trans(priv)->ucode_rt.data.len;
+ break;
+ case IWL_UCODE_INIT:
+ maxsize = trans(priv)->ucode_init.data.len;
+ break;
+ case IWL_UCODE_WOWLAN:
+ maxsize = trans(priv)->ucode_wowlan.data.len;
+ break;
+ case IWL_UCODE_NONE:
+ IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+ return -ENOSYS;
+ default:
+ IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+ return -ENOSYS;
+ }
+ if ((ofs + size) > maxsize) {
+ IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+ return -EINVAL;
+ }
+ priv->testmode_sram.buff_size = (size / 4) * 4;
+ priv->testmode_sram.buff_addr =
+ kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+ if (priv->testmode_sram.buff_addr == NULL) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ base = 0x800000;
+ _iwl_read_targ_mem_words(bus(priv), base + ofs,
+ priv->testmode_sram.buff_addr,
+ priv->testmode_sram.buff_size / 4);
+ priv->testmode_sram.num_chunks =
+ DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+ priv->testmode_sram.sram_readed = true;
+ return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwl_priv *priv = hw->priv;
+ int idx, length;
+
+ if (priv->testmode_sram.sram_readed) {
+ idx = cb->args[4];
+ if (idx >= priv->testmode_sram.num_chunks) {
+ iwl_sram_cleanup(priv);
+ return -ENOENT;
+ }
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+ (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+ length = priv->testmode_sram.buff_size %
+ DUMP_CHUNK_SIZE;
+
+ NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+ priv->testmode_sram.buff_addr +
+ (DUMP_CHUNK_SIZE * idx));
+ idx++;
+ cb->args[4] = idx;
+ return 0;
+ } else
+ return -EFAULT;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -668,9 +865,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
result = iwl_testmode_ucode(hw, tb);
break;
- case IWL_TM_CMD_APP2DEV_REG_READ32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
break;
@@ -680,6 +879,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
@@ -696,6 +898,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_ownership(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_READ_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+ result = iwl_testmode_sram(hw, tb);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
result = -ENOSYS;
@@ -744,6 +951,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
result = iwl_testmode_trace_dump(hw, tb, skb, cb);
break;
+ case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+ result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+ break;
default:
result = -EINVAL;
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda4b0f..26138f11034 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
* the actual uCode host command ID is carried with
* IWL_TM_ATTR_UCODE_CMD_ID
*
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
* commands from user applicaiton to access register
*
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
+ *
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
* commands from user application to own change the ownership of the uCode
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ * commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ * commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
- IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
- IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
- IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- IWL_TM_CMD_MAX = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+ IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
+ IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
+ IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
+ IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
+ IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
+ IWL_TM_CMD_MAX = 25,
};
/*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ * IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ * IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ * IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_MAX = 15,
+ IWL_TM_ATTR_SRAM_ADDR = 15,
+ IWL_TM_ATTR_SRAM_SIZE = 16,
+ IWL_TM_ATTR_SRAM_DUMP = 17,
+ IWL_TM_ATTR_FW_VERSION = 18,
+ IWL_TM_ATTR_DEVICE_ID = 19,
+ IWL_TM_ATTR_MAX = 20,
};
/* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
#define TRACE_BUFF_SIZE_MIN 0x20000
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
-#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 2b6756e8b8f..f6debf91d7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -219,9 +219,7 @@ struct iwl_trans_pcie {
/* INT ICT Table */
__le32 *ict_tbl;
- void *ict_tbl_vir;
dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
int ict_index;
u32 inta;
bool use_ict;
@@ -236,6 +234,7 @@ struct iwl_trans_pcie {
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
u8 mcast_queue[NUM_IWL_RXON_CTX];
+ u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
@@ -280,20 +279,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
- int sta_id, int tid, int frame_limit);
+ int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
int index, enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
@@ -354,8 +349,13 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
txq->swq_id = (hwq << 2) | ac;
}
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+ return txq->swq_id & 0x3;
+}
+
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -363,13 +363,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+ if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
iwl_wake_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
+ hwq, ac, msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ }
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -377,9 +386,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+ if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
iwl_stop_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
+ hwq, msg);
+ }
}
#ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 374c68cc1d7..752493f0040 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.error_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_errlog_ptr;
} else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+ IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
}
/**
@@ -657,7 +672,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_priv *priv = priv(trans);
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (priv->cfg->internal_wimax_coex &&
+ if (cfg(priv)->internal_wimax_coex &&
(!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
if (num_events == 0)
return pos;
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_evtlog_ptr;
} else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
size_t bufsz = 0;
struct iwl_priv *priv = priv(trans);
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
logsize = priv->init_evtlog_size;
if (!base)
base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
IWL_ERR(trans,
"Invalid event log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return -EINVAL;
}
@@ -1108,7 +1123,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
- priv(trans)->ucode_write_complete = 1;
+ trans->ucode_write_complete = 1;
wake_up(&trans->shrd->wait_command_queue);
}
@@ -1136,7 +1151,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* ICT functions
*
******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
/* Free dram table */
void iwl_free_isr_ict(struct iwl_trans *trans)
@@ -1144,21 +1163,19 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans_pcie->ict_tbl_vir) {
- dma_free_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- trans_pcie->ict_tbl_vir,
+ if (trans_pcie->ict_tbl) {
+ dma_free_coherent(bus(trans)->dev, ICT_SIZE,
+ trans_pcie->ict_tbl,
trans_pcie->ict_tbl_dma);
- trans_pcie->ict_tbl_vir = NULL;
- memset(&trans_pcie->ict_tbl_dma, 0,
- sizeof(trans_pcie->ict_tbl_dma));
- memset(&trans_pcie->aligned_ict_tbl_dma, 0,
- sizeof(trans_pcie->aligned_ict_tbl_dma));
+ trans_pcie->ict_tbl = NULL;
+ trans_pcie->ict_tbl_dma = 0;
}
}
-/* allocate dram shared table it is a PAGE_SIZE aligned
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
* also reset all data related to ICT table interrupt.
*/
int iwl_alloc_isr_ict(struct iwl_trans *trans)
@@ -1166,36 +1183,26 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- /* allocate shrared data table */
- trans_pcie->ict_tbl_vir =
- dma_alloc_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &trans_pcie->ict_tbl_dma, GFP_KERNEL);
- if (!trans_pcie->ict_tbl_vir)
+ trans_pcie->ict_tbl =
+ dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->ict_tbl)
return -ENOMEM;
- /* align table to PAGE_SIZE boundary */
- trans_pcie->aligned_ict_tbl_dma =
- ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)trans_pcie->ict_tbl_dma,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ /* just an API sanity check ... it is guaranteed to be aligned */
+ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+ iwl_free_isr_ict(trans);
+ return -EINVAL;
+ }
- trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
- (trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma);
- IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
- trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
/* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl_vir, 0,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
trans_pcie->ict_index = 0;
/* add periodic RX interrupt */
@@ -1213,23 +1220,20 @@ int iwl_reset_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans_pcie->ict_tbl_vir)
+ if (!trans_pcie->ict_tbl)
return 0;
spin_lock_irqsave(&trans->shrd->lock, flags);
iwl_disable_interrupts(trans);
- memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
val |= CSR_DRAM_INT_TBL_ENABLE;
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
- IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
trans_pcie->use_ict = true;
@@ -1266,6 +1270,8 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (!trans)
return IRQ_NONE;
+ trace_iwlwifi_dev_irq(priv(trans));
+
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&trans->shrd->lock, flags);
@@ -1340,6 +1346,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
struct iwl_trans_pcie *trans_pcie;
u32 inta, inta_mask;
u32 val = 0;
+ u32 read;
unsigned long flags;
if (!trans)
@@ -1353,6 +1360,8 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (!trans_pcie->use_ict)
return iwl_isr(irq, data);
+ trace_iwlwifi_dev_irq(priv(trans));
+
spin_lock_irqsave(&trans->shrd->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
@@ -1367,24 +1376,29 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
- if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+ if (!read) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
goto none;
}
- /* read all entries that not 0 start with ict_index */
- while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
-
- val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index,
- le32_to_cpu(
- trans_pcie->ict_tbl[trans_pcie->ict_index]));
+ trans_pcie->ict_index, read);
trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
trans_pcie->ict_index =
iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
- }
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+ read);
+ } while (read);
/* We should not get this value, just ignore it. */
if (val == 0xffffffff)
@@ -1411,7 +1425,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta) {
+ !trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
* tasklet will enable it.
@@ -1427,7 +1441,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
* only Re-enable if disabled by irq.
*/
if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 4a0c95302a7..bd29568177e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -408,6 +408,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
@@ -430,7 +431,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
- IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+ IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
@@ -446,14 +447,21 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
return -EINVAL;
}
+static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
+{
+ if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
+ return false;
+ return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues);
+}
+
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
- int tid, int frame_limit)
+ int tid, int frame_limit, u16 ssn)
{
- int tx_fifo, txq_id, ssn_idx;
+ int tx_fifo, txq_id;
u16 ra_tid;
unsigned long flags;
- struct iwl_tid_data *tid_data;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -469,11 +477,15 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
return;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- ssn_idx = SEQ_TO_SN(tid_data->seq_number);
- txq_id = tid_data->agg.txq_id;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ IWL_ERR(trans,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues - 1);
+ return;
+ }
ra_tid = BUILD_RAxTID(sta_id, tid);
@@ -493,9 +505,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
+ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
+ trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+ iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
@@ -539,12 +551,9 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
}
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn)
+ int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tid_data *tid_data;
- unsigned long flags;
int txq_id;
txq_id = iwlagn_txq_ctx_activate_free(trans);
@@ -553,117 +562,39 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
return -ENXIO;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
+ trans_pcie->agg_txq[sta_id][tid] = txq_id;
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
- } else {
- IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
- "queue\n", tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-
return 0;
}
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
-
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
-
- iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(trans_pcie, txq_id);
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid)
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- int read_ptr, write_ptr;
- struct iwl_tid_data *tid_data;
- int txq_id;
-
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- txq_id = tid_data->agg.txq_id;
-
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(trans).num_ampdu_queues <= txq_id)) {
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(trans).num_ampdu_queues - 1);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL;
}
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(trans, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- trans->shrd->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
- read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
- trans->shrd->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
-turn_off:
- trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&trans->shrd->sta_lock);
- spin_lock(&trans->shrd->lock);
-
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
- iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ trans_pcie->agg_txq[sta_id][tid] = 0;
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
}
@@ -982,7 +913,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1000,6 +932,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return -EBUSY;
+
+
+ if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+ get_cmd_string(cmd->id));
+ return -ECANCELED;
+ }
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s failed: FW Error\n",
+ get_cmd_string(cmd->id));
+ return -EIO;
+ }
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@@ -1008,7 +954,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1022,12 +969,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
&trans_pcie->txq[trans->shrd->cmd_queue];
struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@@ -1039,18 +986,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
}
- if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s failed: FW Error\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
@@ -1071,7 +1006,7 @@ cancel:
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
-fail:
+
if (cmd->reply_page) {
iwl_free_pages(trans->shrd, cmd->reply_page);
cmd->reply_page = 0;
@@ -1115,9 +1050,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
return 0;
}
- IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
- q->read_ptr, index);
-
if (WARN_ON(!skb_queue_empty(skbs)))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index ce918980e97..67d6e324e26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -88,18 +88,16 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
return -EINVAL;
/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
/*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb_stts;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
return 0;
@@ -1044,7 +1042,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1058,13 +1056,12 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
- u16 seq_number = 0;
u8 wait_write_ptr = 0;
u8 txq_id;
- u8 tid = 0;
bool is_agg = false;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
/*
* Send this frame after DTIM -- there's a special queue
@@ -1085,36 +1082,28 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq_id =
trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- u8 *qc = NULL;
- struct iwl_tid_data *tid_data;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
- return -1;
-
- seq_number = tid_data->seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
- txq_id = tid_data->agg.txq_id;
- is_agg = true;
- }
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ WARN_ON(tid >= IWL_MAX_TID_COUNT);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ is_agg = true;
}
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
/* Set up driver data for this TFD */
txq->skbs[q->write_ptr] = skb;
txq->cmd[q->write_ptr] = dev_cmd;
@@ -1197,9 +1186,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- if (is_agg)
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
- le16_to_cpu(tx_cmd->len));
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
@@ -1214,13 +1201,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_txq_update_write_ptr(trans, txq);
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- trans->shrd->tid_data[sta_id][tid].seq_number =
- seq_number;
- }
-
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually,
@@ -1232,7 +1212,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq);
+ iwl_stop_queue(trans, txq, "Queue is full");
}
}
return 0;
@@ -1269,101 +1249,48 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
return 0;
}
-static int iwlagn_txq_check_empty(struct iwl_trans *trans,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
- struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue DELBA flow\n");
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
- tid_data->agg.state = IWL_AGG_OFF;
- iwl_stop_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
- freed);
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
- }
-}
-
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- enum iwl_agg_state agg_state;
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
- bool cond;
txq->time_stamp = jiffies;
- if (txq->sched_retry) {
- agg_state =
- trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
- cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
- } else {
- cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+ if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+ txq_id != trans_pcie->agg_txq[sta_id][tid])) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now.
+ * Since it is can possibly happen very often and in order
+ * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+ */
+ IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
+ "agg_txq[sta_id[tid] %d", txq_id,
+ trans_pcie->agg_txq[sta_id][tid]);
+ return 1;
}
if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- ssn , tfd_num, txq_id, txq->swq_id);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+ txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+ tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
- iwl_wake_queue(trans, txq);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ (!txq->sched_retry ||
+ status != TX_STATUS_FAIL_PASSIVE_NO_RX))
+ iwl_wake_queue(trans, txq, "Packets reclaimed");
}
-
- iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
- iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+ return 0;
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
+ iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans);
iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans);
@@ -1419,7 +1346,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#endif /* CONFIG_PM_SLEEP */
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
@@ -1427,11 +1355,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
for (ac = 0; ac < AC_NUM; ac++) {
txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+ IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
ac,
(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
}
}
@@ -1454,11 +1382,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
return iwl_trans;
}
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
+ const char *msg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
}
#define IWL_FLUSH_WAIT_MS 2000
@@ -1513,8 +1442,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
if (time_after(jiffies, timeout)) {
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
hw_params(trans).wd_timeout);
- IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+ iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ & (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
return 1;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c5923125c3f..e6bf3f55477 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -171,32 +171,31 @@ struct iwl_trans_ops {
void (*tx_start)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx);
+ enum iwl_rxon_context_id ctx,
+ const char *msg);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id);
- void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ u8 sta_id, u8 tid);
+ int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id, int tid,
- u16 *ssn);
+ int sta_id, int tid);
void (*tx_agg_setup)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid,
- int frame_limit);
+ int frame_limit, u16 ssn);
void (*kick_nic)(struct iwl_trans *trans);
void (*free)(struct iwl_trans *trans);
- void (*stop_queue)(struct iwl_trans *trans, int q);
+ void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@@ -207,17 +206,54 @@ struct iwl_trans_ops {
#endif
};
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ dma_addr_t p_addr; /* hardware address */
+ void *v_addr; /* software address */
+ u32 len; /* size in bytes */
+};
+
+struct fw_img {
+ struct fw_desc code; /* firmware code image */
+ struct fw_desc data; /* firmware data image */
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
+};
+
/**
* struct iwl_trans - transport common data
* @ops - pointer to iwl_trans_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @hcmd_lock: protects HCMD
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_rt: run time ucode image
+ * @ucode_init: init ucode image
+ * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_shared *shrd;
spinlock_t hcmd_lock;
+ u8 ucode_write_complete; /* the image write is complete */
+ struct fw_img ucode_rt;
+ struct fw_img ucode_init;
+ struct fw_img ucode_wowlan;
+
+ /* eeprom related variables */
+ int nvm_device_type;
+
+ /* init calibration results */
+ struct list_head calib_results;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -249,9 +285,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans)
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
- trans->ops->wake_any_queue(trans, ctx);
+ trans->ops->wake_any_queue(trans, ctx, msg);
}
@@ -266,39 +303,38 @@ int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
- return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
+ return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
}
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
int tid, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
- trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
+ status, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
int sta_id, int tid)
{
- return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
+ return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- int sta_id, int tid, u16 *ssn)
+ int sta_id, int tid)
{
- return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
+ return trans->ops->tx_agg_alloc(trans, sta_id, tid);
}
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid,
- int frame_limit)
+ int frame_limit, u16 ssn)
{
- trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
@@ -311,9 +347,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
trans->ops->free(trans);
}
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
+ const char *msg)
{
- trans->ops->stop_queue(trans, q);
+ trans->ops->stop_queue(trans, q, msg);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -348,4 +385,13 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
******************************************************/
extern const struct iwl_trans_ops trans_ops_pcie;
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len);
+void iwl_dealloc_ucode(struct iwl_trans *trans);
+
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 8ba0dd54e37..36a1b5b2585 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -72,51 +74,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
};
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(bus->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+ iwl_free_fw_desc(bus, &img->code);
+ iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+ iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+ iwl_free_fw_img(bus(trans), &trans->ucode_init);
+ iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len)
+{
+ if (!len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(bus->dev, len,
+ &desc->p_addr, GFP_KERNEL);
+ if (!desc->v_addr)
+ return -ENOMEM;
+
+ desc->len = len;
+ memcpy(desc->v_addr, data, len);
+ return 0;
+}
+
/*
* ucode
*/
-static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
+ struct iwl_bus *bus = bus(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
- priv->ucode_write_complete = 0;
+ trans->ucode_write_complete = 0;
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_timeout(priv->shrd->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
+ IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+ ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ trans->ucode_write_complete, 5 * HZ);
if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
+ IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
@@ -124,41 +173,65 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
return 0;
}
-static int iwlagn_load_given_ucode(struct iwl_priv *priv,
- struct fw_img *image)
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
+{
+ switch (ucode_type) {
+ case IWL_UCODE_INIT:
+ return &trans->ucode_init;
+ case IWL_UCODE_WOWLAN:
+ return &trans->ucode_wowlan;
+ case IWL_UCODE_REGULAR:
+ return &trans->ucode_rt;
+ case IWL_UCODE_NONE:
+ break;
+ }
+ return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
int ret = 0;
+ struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
- ret = iwlagn_load_section(priv, "INST", &image->code,
+
+ if (!image) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+ ucode_type);
+ return -EINVAL;
+ }
+
+ ret = iwl_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
- return iwlagn_load_section(priv, "DATA", &image->data,
+ return iwl_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
/*
* Calibration
*/
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+static int iwl_set_Xtal_calib(struct iwl_trans *trans)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -166,49 +239,48 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
- IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+ IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
- IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+ IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -224,7 +296,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_trans_send_cmd(trans, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -234,60 +306,37 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
/* reduce the size of the length field itself */
len -= 4;
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to
- * their index. We sort them here
- */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return -1;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ if (iwl_calib_set(trans(priv), hdr, len))
+ IWL_ERR(priv, "Failed to record calibration data %d\n",
+ hdr->op_code);
+
return 0;
}
-int iwlagn_init_alive_start(struct iwl_priv *priv)
+int iwl_init_alive_start(struct iwl_trans *trans)
{
int ret;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(trans)->bt_params &&
+ cfg(trans)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwlagn_send_calib_cfg(priv);
+ ret = iwl_send_calib_cfg(trans);
if (ret)
return ret;
@@ -295,21 +344,21 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib) {
- if (priv->cfg->temp_offset_v2)
- return iwlagn_set_temperature_offset_calib_v2(priv);
+ if (cfg(trans)->need_temp_offset_calib) {
+ if (cfg(trans)->temp_offset_v2)
+ return iwl_set_temperature_offset_calib_v2(trans);
else
- return iwlagn_set_temperature_offset_calib(priv);
+ return iwl_set_temperature_offset_calib(trans);
}
return 0;
}
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_trans *trans)
{
struct iwl_wimax_coex_cmd coex_cmd;
- if (priv->cfg->base_params->support_wimax_coexist) {
+ if (cfg(trans)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
@@ -328,12 +377,12 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_trans_send_cmd_pdu(trans(priv),
+ return iwl_trans_send_cmd_pdu(trans,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -355,61 +404,64 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
0, 0, 0, 0, 0, 0, 0
};
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
+void iwl_send_prio_tbl(struct iwl_trans *trans)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
- memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
- sizeof(iwlagn_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans(priv),
+ memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+ sizeof(iwl_bt_prio_tbl));
+ if (iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(priv, "failed to send BT prio tbl command\n");
+ IWL_ERR(trans, "failed to send BT prio tbl command\n");
}
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(priv, "failed to send BT env command\n");
+ IWL_ERR(trans, "failed to send BT env command\n");
return ret;
}
-static int iwlagn_alive_notify(struct iwl_priv *priv)
+static int iwl_alive_notify(struct iwl_trans *trans)
{
+ struct iwl_priv *priv = priv(trans);
struct iwl_rxon_context *ctx;
int ret;
if (!priv->tx_cmd_pool)
priv->tx_cmd_pool =
- kmem_cache_create("iwlagn_dev_cmd",
+ kmem_cache_create("iwl_dev_cmd",
sizeof(struct iwl_device_cmd),
sizeof(void *), 0, NULL);
if (!priv->tx_cmd_pool)
return -ENOMEM;
- iwl_trans_tx_start(trans(priv));
+ iwl_trans_tx_start(trans);
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
- ret = iwlagn_send_wimax_coex(priv);
+ ret = iwl_send_wimax_coex(trans);
if (ret)
return ret;
- ret = iwlagn_set_Xtal_calib(priv);
- if (ret)
- return ret;
+ if (!cfg(priv)->no_xtal_calib) {
+ ret = iwl_set_Xtal_calib(trans);
+ if (ret)
+ return ret;
+ }
- return iwl_send_calib_results(priv);
+ return iwl_send_calib_results(trans);
}
@@ -418,7 +470,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwl_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -426,15 +478,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
u32 val;
u32 i;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -442,7 +494,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
return 0;
}
-static void iwl_print_mismatch_inst(struct iwl_priv *priv,
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -451,18 +503,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
u32 offs;
int errors = 0;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section at "
+ IWL_ERR(bus, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@@ -474,91 +526,163 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
-static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
+static int iwl_verify_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
- if (!iwl_verify_inst_sparse(priv, &img->code)) {
- IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
+ struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+ if (!img) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+ return -EINVAL;
+ }
+
+ if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+ IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
return 0;
}
- IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+ IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
- iwl_print_mismatch_inst(priv, &img->code);
+ iwl_print_mismatch_inst(bus(trans), &img->code);
return -EIO;
}
-struct iwlagn_alive_data {
+struct iwl_alive_data {
bool valid;
u8 subtype;
};
-static void iwlagn_alive_fn(struct iwl_priv *priv,
+static void iwl_alive_fn(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data)
{
- struct iwlagn_alive_data *alive_data = data;
+ struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
palive = &pkt->u.alive_frame;
- IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- priv->device_pointers.error_event_table =
+ trans->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- priv->device_pointers.log_event_table =
+ trans->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_add(&wait_entry->list, &shrd->notif_waits);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(shrd->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+ wake_up_all(&shrd->notif_waitq);
+}
+
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type)
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
- struct iwlagn_alive_data alive_data;
+ struct iwl_alive_data alive_data;
int ret;
- enum iwlagn_ucode_type old_type;
+ enum iwl_ucode_type old_type;
- ret = iwl_trans_start_device(trans(priv));
+ ret = iwl_trans_start_device(trans);
if (ret)
return ret;
- iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
- iwlagn_alive_fn, &alive_data);
+ iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
- old_type = priv->ucode_type;
- priv->ucode_type = ucode_type;
+ old_type = trans->shrd->ucode_type;
+ trans->shrd->ucode_type = ucode_type;
- ret = iwlagn_load_given_ucode(priv, image);
+ ret = iwl_load_given_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
- iwlagn_remove_notification(priv, &alive_wait);
+ trans->shrd->ucode_type = old_type;
+ iwl_remove_notification(trans->shrd, &alive_wait);
return ret;
}
- iwl_trans_kick_nic(trans(priv));
+ iwl_trans_kick_nic(trans);
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ UCODE_ALIVE_TIMEOUT);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
- IWL_ERR(priv, "Loaded ucode is not valid!\n");
- priv->ucode_type = old_type;
+ IWL_ERR(trans, "Loaded ucode is not valid!\n");
+ trans->shrd->ucode_type = old_type;
return -EIO;
}
@@ -568,9 +692,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(priv, image);
+ ret = iwl_verify_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
@@ -578,42 +702,41 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
msleep(5);
}
- ret = iwlagn_alive_notify(priv);
+ ret = iwl_alive_notify(trans);
if (ret) {
- IWL_WARN(priv,
+ IWL_WARN(trans,
"Could not complete ALIVE transition: %d\n", ret);
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
return 0;
}
-int iwlagn_run_init_ucode(struct iwl_priv *priv)
+int iwl_run_init_ucode(struct iwl_trans *trans)
{
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&trans->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!priv->ucode_init.code.len)
+ if (!trans->ucode_init.code.len)
return 0;
- if (priv->ucode_type != IWL_UCODE_NONE)
+ if (trans->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(trans->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (ret)
goto error;
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans);
if (ret)
goto error;
@@ -621,14 +744,15 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &calib_wait,
+ UCODE_CALIB_TIMEOUT);
goto out;
error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(trans->shrd, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h
index f46c80e6e00..18501101a53 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-wifi.h
@@ -59,17 +59,16 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
+#ifndef __iwl_wifi_h__
+#define __iwl_wifi_h__
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
+#include "iwl-shared.h"
-#endif /* __iwl_4965_calib_h__ */
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
+int iwl_init_alive_start(struct iwl_trans *trans);
+int iwl_run_init_ucode(struct iwl_trans *trans);
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type);
+#endif /* __iwl_wifi_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c42be81e979..48e8218fd23 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -165,11 +165,15 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params *params)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
int ret;
IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
memset(key, 0, sizeof(struct iwm_key));
ret = iwm_key_init(key, key_index, mac_addr, params);
if (ret < 0) {
@@ -214,8 +218,12 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
if (!iwm->keys[key_index].key_len) {
IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
return 0;
@@ -236,6 +244,9 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
if (!iwm->keys[key_index].key_len) {
IWM_ERR(iwm, "Key %d not used\n", key_index);
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 98a179f98ea..1f868b166d1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
.mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
};
-static int modparam_reset;
+static bool modparam_reset;
module_param_named(reset, modparam_reset, bool, 0644);
MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
iwm_invalidate_mlme_profile(iwm);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a414768f40f..7d708f4395f 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
umac_sta->mac_addr,
umac_sta->flags & UMAC_STA_FLAG_QOS);
- sta->valid = 1;
+ sta->valid = true;
sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = 0;
+ sta->valid = false;
break;
case UMAC_OPCODE_CLEAR_ALL:
for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = 0;
+ iwm->sta_table[i].valid = false;
break;
default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
switch (hdr->oid) {
case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = 1;
+ iwm->umac_profile_active = true;
break;
default:
break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
*/
list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
- cmd->resp_received = 1;
+ cmd->resp_received = true;
cmd->buf.len = buf_size;
memcpy(cmd->buf.hdr, buf, buf_size);
wake_up_interruptible(&iwm->nonwifi_queue);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7f1ab28940..a7cd311cb1b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
struct cmd_header *resp)
{
+ struct cfg80211_bss *bss;
struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
int bsssize;
const u8 *pos;
@@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
if (channel &&
- !(channel->flags & IEEE80211_CHAN_DISABLED))
- cfg80211_inform_bss(wiphy, channel,
+ !(channel->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(wiphy, channel,
bssid, get_unaligned_le64(tsfdesc),
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+ }
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -728,9 +731,11 @@ static void lbs_scan_worker(struct work_struct *work)
le16_to_cpu(scan_cmd->hdr.size),
lbs_ret_scan, 0);
- if (priv->scan_channel >= priv->scan_req->n_channels)
+ if (priv->scan_channel >= priv->scan_req->n_channels) {
/* Mark scan done */
+ cancel_delayed_work(&priv->scan_work);
lbs_scan_done(priv);
+ }
/* Restart network */
if (carrier)
@@ -759,12 +764,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal,
request->n_ssids, request->n_channels, request->ie_len);
priv->scan_channel = 0;
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(50));
-
priv->scan_req = request;
priv->internal_scan = internal;
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(50));
+
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1720,6 +1725,7 @@ static void lbs_join_post(struct lbs_private *priv,
2 + 2 + /* atim */
2 + 8]; /* extended rates */
u8 *fake = fake_ie;
+ struct cfg80211_bss *bss;
lbs_deb_enter(LBS_DEB_CFG80211);
@@ -1763,14 +1769,15 @@ static void lbs_join_post(struct lbs_private *priv,
*fake++ = 0x6c;
lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
- cfg80211_inform_bss(priv->wdev->wiphy,
- params->channel,
- bssid,
- 0,
- capability,
- params->beacon_interval,
- fake_ie, fake - fake_ie,
- 0, GFP_KERNEL);
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ params->channel,
+ bssid,
+ 0,
+ capability,
+ params->beacon_interval,
+ fake_ie, fake - fake_ie,
+ 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index d8d8f0d0899..c192671610f 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -704,7 +704,7 @@ out_unlock:
struct lbs_debugfs_files {
const char *name;
- int perm;
+ umode_t perm;
struct file_operations fops;
};
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 885ddc1c4fe..f955b2d66ed 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
{
struct lbs_private *priv = dev->ml_priv;
- snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.p%u",
priv->fwrelease >> 24 & 0xff,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strcpy(info->driver, "libertas");
- strcpy(info->version, lbs_driver_version);
+ strlcpy(info->driver, "libertas", sizeof(info->driver));
+ strlcpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index e2693517986..3f7bf4d912b 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* Most of the libertas cards can do unaligned register access, but some
* weird ones cannot. That's especially true for the CF8305 card.
*/
- card->align_regs = 0;
+ card->align_regs = false;
card->model = get_model(p_dev->manf_id, p_dev->card_id);
if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Check if we have a current silicon */
prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
if (card->model == MODEL_8305) {
- card->align_regs = 1;
+ card->align_regs = true;
if (prod_id < IF_CS_CF8305_B1_REV) {
pr_err("8305 rev B0 and older are not supported\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 728baa44525..50b1ee7721e 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = __devexit_p(libertas_spi_remove),
.driver = {
.name = "libertas_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index db879c364eb..b5fbbc7947d 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -1184,29 +1184,7 @@ static struct usb_driver if_usb_driver = {
.reset_resume = if_usb_resume,
};
-static int __init if_usb_init_module(void)
-{
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_MAIN);
-
- ret = usb_register(&if_usb_driver);
-
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
- lbs_deb_enter(LBS_DEB_MAIN);
-
- usb_deregister(&if_usb_driver);
-
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
MODULE_DESCRIPTION("8388 USB WLAN Driver");
MODULE_AUTHOR("Marvell International Ltd. and Red Hat, Inc.");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 68202e63873..aff8b5743af 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -924,27 +924,7 @@ static struct usb_driver if_usb_driver = {
.resume = if_usb_resume,
};
-static int __init if_usb_init_module(void)
-{
- int ret = 0;
-
- lbtf_deb_enter(LBTF_DEB_MAIN);
-
- ret = usb_register(&if_usb_driver);
-
- lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
- lbtf_deb_enter(LBTF_DEB_MAIN);
- usb_deregister(&if_usb_driver);
- lbtf_deb_leave(LBTF_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
MODULE_AUTHOR("Cozybit Inc.");
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ceb51b6e670..a03457292c8 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
- bool tx_buff_bc = 0;
+ bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
- tx_buff_bc = 1;
+ tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 523ad55a288..4b9e730d2c8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -37,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -665,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
- int _pid;
+ u32 _pid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -676,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -707,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
wiphy_debug(hw->wiphy, "%s\n", __func__);
- data->started = 1;
+ data->started = true;
return 0;
}
@@ -715,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
- data->started = 0;
+ data->started = false;
del_timer(&data->beacon_timer);
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
@@ -764,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- int _pid;
+ u32 _pid;
hwsim_check_magic(vif);
@@ -781,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1254,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1275,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->ta, mac, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1292,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1314,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1634,8 +1635,6 @@ static int hwsim_init_netlink(void)
int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- wmediumd_pid = 0;
-
rc = genl_register_family_with_ops(&hwsim_genl_family,
hwsim_ops, ARRAY_SIZE(hwsim_ops));
if (rc)
@@ -1748,6 +1747,8 @@ static int __init init_mac80211_hwsim(void)
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_AMPDU_AGGREGATION;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 7aa9aa0ac95..681d3f2a4c2 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr, int start_win)
@@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
- return 0;
}
/*
@@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
{
@@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
&(MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- return 0;
}
/*
@@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u8 *ta, u8 pkt_type, void *payload)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
- int start_win, end_win, win_size, ret;
+ int start_win, end_win, win_size;
u16 pkt_index;
rx_reor_tbl_ptr =
@@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
+ mwifiex_11n_dispatch_pkt_until_start_win(priv,
rx_reor_tbl_ptr, start_win);
-
- if (ret)
- return ret;
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
- ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+ mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8f2797aa0c6..2a078cea830 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8787"
+ tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
depends on MWIFIEX && MMC
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8787 chipset with SDIO interface.
+ 8787/8797 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 462c71067bf..c3b6c4652cd 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -24,50 +24,26 @@
* This function maps the nl802.11 channel type into driver channel type.
*
* The mapping is as follows -
- * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE
- * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
- * Others -> NO_SEC_CHANNEL
+ * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE
+ * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
+ * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
-static int
-mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
- channel_type)
+static u8
+mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
+ channel_type)
{
switch (channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
- return NO_SEC_CHANNEL;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
case NL80211_CHAN_HT40PLUS:
- return SEC_CHANNEL_ABOVE;
+ return IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
case NL80211_CHAN_HT40MINUS:
- return SEC_CHANNEL_BELOW;
- default:
- return NO_SEC_CHANNEL;
- }
-}
-
-/*
- * This function maps the driver channel type into nl802.11 channel type.
- *
- * The mapping is as follows -
- * NO_SEC_CHANNEL -> NL80211_CHAN_HT20
- * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS
- * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS
- * Others -> NL80211_CHAN_HT20
- */
-static enum nl80211_channel_type
-mwifiex_channels_to_cfg80211_channel_type(int channel_type)
-{
- switch (channel_type) {
- case NO_SEC_CHANNEL:
- return NL80211_CHAN_HT20;
- case SEC_CHANNEL_ABOVE:
- return NL80211_CHAN_HT40PLUS;
- case SEC_CHANNEL_BELOW:
- return NL80211_CHAN_HT40MINUS;
+ return IEEE80211_HT_PARAM_CHA_SEC_BELOW;
default:
- return NL80211_CHAN_HT20;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
}
}
@@ -120,10 +96,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_power_cfg power_cfg;
+ int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) {
power_cfg.is_power_auto = 0;
@@ -330,37 +307,51 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
enum nl80211_channel_type channel_type)
{
struct mwifiex_chan_freq_power cfp;
- struct mwifiex_ds_band_cfg band_cfg;
u32 config_bands = 0;
struct wiphy *wiphy = priv->wdev->wiphy;
+ struct mwifiex_adapter *adapter = priv->adapter;
if (chan) {
- memset(&band_cfg, 0, sizeof(band_cfg));
/* Set appropriate bands */
- if (chan->band == IEEE80211_BAND_2GHZ)
- config_bands = BAND_B | BAND_G | BAND_GN;
- else
- config_bands = BAND_AN | BAND_A;
- if (priv->bss_mode == NL80211_IFTYPE_STATION
- || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) {
- band_cfg.config_bands = config_bands;
- } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- band_cfg.config_bands = config_bands;
- band_cfg.adhoc_start_band = config_bands;
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ if (priv->adapter->config_bands == BAND_B ||
+ priv->adapter->config_bands == BAND_G)
+ config_bands =
+ priv->adapter->config_bands;
+ else
+ config_bands = BAND_B | BAND_G;
+ else
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ } else {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
}
- band_cfg.sec_chan_offset =
- mwifiex_cfg80211_channel_type_to_mwifiex_channels
+ if (!((config_bands | adapter->fw_bands) &
+ ~adapter->fw_bands)) {
+ adapter->config_bands = config_bands;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = config_bands;
+ if ((config_bands & BAND_GN) ||
+ (config_bands & BAND_AN))
+ adapter->adhoc_11n_enabled = true;
+ else
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset =
+ mwifiex_cfg80211_channel_type_to_sec_chan_offset
(channel_type);
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ adapter->channel_type = channel_type;
mwifiex_send_domain_info_cmd_fw(wiphy);
}
wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
- "mode %d\n", config_bands, band_cfg.sec_chan_offset,
+ "mode %d\n", config_bands, adapter->sec_chan_offset,
priv->bss_mode);
if (!chan)
return 0;
@@ -696,9 +687,9 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask)
{
- struct mwifiex_ds_band_cfg band_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int index = 0, mode = 0, i;
+ struct mwifiex_adapter *adapter = priv->adapter;
/* Currently only 2.4GHz is supported */
for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
@@ -720,16 +711,15 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
mode |= BAND_B;
}
- memset(&band_cfg, 0, sizeof(band_cfg));
- band_cfg.config_bands = mode;
-
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
- band_cfg.adhoc_start_band = mode;
-
- band_cfg.sec_chan_offset = NO_SEC_CHANNEL;
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
+ adapter->config_bands = mode;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = mode;
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ adapter->channel_type = NL80211_CHAN_NO_HT;
wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
(mode & BAND_B) ? "b" : "",
@@ -750,17 +740,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
" reason code %d\n", priv->cfg_bssid, reason_code);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -780,6 +766,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
{
struct ieee80211_channel *chan;
struct mwifiex_bss_info bss_info;
+ struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
enum ieee80211_band band;
@@ -800,9 +787,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
- cfg80211_inform_bss(priv->wdev->wiphy, chan,
+ bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
@@ -851,8 +839,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
if (channel)
ret = mwifiex_set_rf_channel(priv, channel,
- mwifiex_channels_to_cfg80211_channel_type
- (priv->adapter->chan_offset));
+ priv->adapter->channel_type);
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
@@ -978,27 +965,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret = 0;
- if (priv->assoc_request)
- return -EBUSY;
-
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "received infra assoc request "
"when station is in ibss mode\n");
goto done;
}
- priv->assoc_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
-
- priv->assoc_request = 1;
done:
- priv->assoc_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+ NULL, 0, WLAN_STATUS_SUCCESS,
+ GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+ }
+
return ret;
}
@@ -1015,28 +1007,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
int ret = 0;
- if (priv->ibss_join_request)
- return -EBUSY;
-
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "request to join ibss received "
"when station is not in ibss mode\n");
goto done;
}
- priv->ibss_join_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
-
- priv->ibss_join_request = 1;
done:
- priv->ibss_join_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: failed creating/joining adhoc network\n");
+ }
+
return ret;
}
@@ -1051,17 +1044,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
-
wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -1078,15 +1066,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int i;
+ struct ieee80211_channel *chan;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
priv->scan_request = request;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+ GFP_KERNEL);
+ if (!priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < request->n_ssids; i++) {
+ memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+ request->ssids[i].ssid, request->ssids[i].ssid_len);
+ priv->user_scan_cfg->ssid_list[i].max_len =
+ request->ssids[i].ssid_len;
+ }
+ for (i = 0; i < request->n_channels; i++) {
+ chan = request->channels[i];
+ priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+ priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_PASSIVE;
+ else
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+
+ priv->user_scan_cfg->chan_list[i].scan_time = 0;
+ }
+ if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ return -EFAULT;
+
return 0;
}
@@ -1292,10 +1307,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
priv->media_connected = false;
- cancel_work_sync(&priv->cfg_workqueue);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
-
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
return 0;
@@ -1373,9 +1384,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- /* We are using custom domains */
- wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
/* Reserve space for bss band information */
wdev->wiphy->bss_priv_size = sizeof(u8);
@@ -1404,100 +1412,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
return ret;
}
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- * - Scan request completion
- * - Association request completion
- * - IBSS join request completion
- * - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
- struct mwifiex_private *priv =
- container_of(work, struct mwifiex_private, cfg_workqueue);
- struct mwifiex_user_scan_cfg *scan_req;
- int ret = 0, i;
- struct ieee80211_channel *chan;
-
- if (priv->scan_request) {
- scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
- if (!scan_req) {
- dev_err(priv->adapter->dev, "failed to alloc "
- "scan_req\n");
- return;
- }
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- memcpy(scan_req->ssid_list[i].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- scan_req->ssid_list[i].max_len =
- priv->scan_request->ssids[i].ssid_len;
- }
- for (i = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
- scan_req->chan_list[i].chan_number = chan->hw_value;
- scan_req->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_PASSIVE;
- else
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_ACTIVE;
- scan_req->chan_list[i].scan_time = 0;
- }
- if (mwifiex_set_user_scan_ioctl(priv, scan_req))
- ret = -EFAULT;
- priv->scan_result_status = ret;
- dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
- __func__);
- cfg80211_scan_done(priv->scan_request,
- (priv->scan_result_status < 0));
- priv->scan_request = NULL;
- kfree(scan_req);
- }
-
- if (priv->assoc_request == 1) {
- if (!priv->assoc_result) {
- cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- }
- priv->assoc_request = 0;
- priv->assoc_result = 0;
- }
-
- if (priv->ibss_join_request == 1) {
- if (!priv->ibss_join_result) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
- }
- priv->ibss_join_request = 0;
- priv->ibss_join_result = 0;
- }
-
- if (priv->disconnect) {
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- priv->disconnect = 0;
- }
-}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2500c..76c76c60438 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
int mwifiex_register_cfg80211(struct mwifiex_private *);
-void mwifiex_cfg80211_results(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f2e6de03805..1782a77f15d 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
* This function maps an index in supported rates table into
* the corresponding data rate.
*/
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info)
{
- u16 mcs_rate[4][8] = {
- {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
- , /* LG 40M */
- {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
- , /* SG 40M */
- {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
- , /* LG 20M */
- {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
- }; /* SG 20M */
-
+ /*
+ * For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+ u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+ };
+ u32 mcs_num_supp =
+ (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
rate = 0x0D; /* MCS 32 SGI rate */
else
rate = 0x0C; /* MCS 32 LGI rate */
- } else if (index < 8) {
+ } else if (index < mcs_num_supp) {
if (ht_info & BIT(1)) {
if (ht_info & BIT(2))
/* SGI, 40M */
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d39..6e0a3eaecf7 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0cc5d73cb0c..51c5417c569 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -375,8 +376,6 @@ enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
};
-#define SECOND_CHANNEL_BELOW 0x30
-#define SECOND_CHANNEL_ABOVE 0x10
struct mwifiex_chan_scan_param_set {
u8 radio_type;
u8 chan_number;
@@ -673,7 +672,7 @@ struct host_cmd_ds_802_11_ad_hoc_start {
union ieee_types_phy_param_set phy_param_set;
u16 reserved1;
__le16 cap_info_bitmap;
- u8 DataRate[HOSTCMD_SUPPORTED_RATES];
+ u8 data_rate[HOSTCMD_SUPPORTED_RATES];
} __packed;
struct host_cmd_ds_802_11_ad_hoc_result {
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d792b3fb7c1..e05b417a3fa 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
- sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
- (adapter->sleep_cfm->data);
adapter->cmd_sent = false;
@@ -248,12 +246,14 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(adapter->event_body, 0, sizeof(adapter->event_body));
adapter->hw_dot_11n_dev_cap = 0;
adapter->hw_dev_mcs_support = 0;
- adapter->chan_offset = 0;
+ adapter->sec_chan_offset = 0;
adapter->adhoc_11n_enabled = false;
mwifiex_wmm_init(adapter);
if (adapter->sleep_cfm) {
+ sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+ adapter->sleep_cfm->data;
memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
sleep_cfm_buf->command =
cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
@@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+ dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_wake_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_stop_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
* This function releases the lock variables and frees the locks and
* associated locks.
*/
@@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->int_lock);
spin_lock_init(&adapter->main_proc_lock);
spin_lock_init(&adapter->mwifiex_cmd_lock);
+ spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e0b68e7c8ca..d5d81f1fe41 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,17 +62,6 @@ enum {
BAND_AN = 16,
};
-#define NO_SEC_CHANNEL 0
-#define SEC_CHANNEL_ABOVE 1
-#define SEC_CHANNEL_BELOW 3
-
-struct mwifiex_ds_band_cfg {
- u32 config_bands;
- u32 adhoc_start_band;
- u32 adhoc_channel;
- u32 sec_chan_offset;
-};
-
enum {
ADHOC_IDLE,
ADHOC_STARTED,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 62b4c293860..0b0eb5efba9 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
u32 cmd_append_size = 0;
u32 i;
u16 tmp_cap;
- uint16_t ht_cap_info;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+ u8 radio_type;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_htinfo *ht_info;
@@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
- memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
- mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
+ memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
+ mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
}
/* Find the last non zero */
- for (i = 0; i < sizeof(adhoc_start->DataRate) &&
- adhoc_start->DataRate[i];
- i++)
- ;
+ for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
+ if (!adhoc_start->data_rate[i])
+ break;
priv->curr_bss_params.num_of_rates = i;
/* Copy the ad-hoc creating rates into Current BSS rate structure */
memcpy(&priv->curr_bss_params.data_rates,
- &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
+ &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
- adhoc_start->DataRate[0], adhoc_start->DataRate[1],
- adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
+ adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+ adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@@ -886,12 +885,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
if (adapter->adhoc_start_band & BAND_GN
|| adapter->adhoc_start_band & BAND_AN) {
- if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
+ if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_ABOVE;
- else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
+ (IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
+ else if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_BELOW;
+ (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
chan_tlv->chan_scan_param[0].radio_type);
@@ -914,55 +915,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (adapter->adhoc_11n_enabled) {
- {
- ht_cap = (struct mwifiex_ie_types_htcap *) pos;
- memset(ht_cap, 0,
- sizeof(struct mwifiex_ie_types_htcap));
- ht_cap->header.type =
- cpu_to_le16(WLAN_EID_HT_CAPABILITY);
- ht_cap->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_cap));
- ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
-
- ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
- if (adapter->chan_offset) {
- ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
- ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
- ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
- }
+ /* Fill HT CAPABILITY */
+ ht_cap = (struct mwifiex_ie_types_htcap *) pos;
+ memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
+ ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ radio_type = mwifiex_band_to_radio_type(
+ priv->adapter->config_bands);
+ mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+
+ pos += sizeof(struct mwifiex_ie_types_htcap);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htcap);
- ht_cap->ht_cap.ampdu_params_info
- = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htcap);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htcap);
- }
- {
- ht_info = (struct mwifiex_ie_types_htinfo *) pos;
- memset(ht_info, 0,
- sizeof(struct mwifiex_ie_types_htinfo));
- ht_info->header.type =
- cpu_to_le16(WLAN_EID_HT_INFORMATION);
- ht_info->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_info));
- ht_info->ht_info.control_chan =
- (u8) priv->curr_bss_params.bss_descriptor.
- channel;
- if (adapter->chan_offset) {
- ht_info->ht_info.ht_param =
- adapter->chan_offset;
- ht_info->ht_info.ht_param |=
+ /* Fill HT INFORMATION */
+ ht_info = (struct mwifiex_ie_types_htinfo *) pos;
+ memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
+ ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+ ht_info->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_info));
+
+ ht_info->ht_info.control_chan =
+ (u8) priv->curr_bss_params.bss_descriptor.channel;
+ if (adapter->sec_chan_offset) {
+ ht_info->ht_info.ht_param = adapter->sec_chan_offset;
+ ht_info->ht_info.ht_param |=
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
- }
- ht_info->ht_info.operation_mode =
- cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- ht_info->ht_info.basic_set[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htinfo);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htinfo);
}
+ ht_info->ht_info.operation_mode =
+ cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ ht_info->ht_info.basic_set[0] = 0xff;
+ pos += sizeof(struct mwifiex_ie_types_htinfo);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htinfo);
}
cmd->size = cpu_to_le16((u16)
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7d672..84be196188c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
static int
mwifiex_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_inc(&priv->adapter->tx_pending);
if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- netif_stop_queue(priv->netdev);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
}
queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
jiffies, priv->bss_index);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
}
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
priv->num_tx_timeout = 0;
- priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
- INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
priv = adapter->priv[i];
if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev,
+ adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 30f138b6fa4..3186aa437f4 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@ struct mwifiex_private {
u8 scan_pending_on_block;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
- int scan_result_status;
- int assoc_request;
- u16 assoc_result;
- int ibss_join_request;
- u16 ibss_join_result;
- bool disconnect;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- struct workqueue_struct *workqueue;
- struct work_struct cfg_workqueue;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
@@ -647,7 +640,8 @@ struct mwifiex_adapter {
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
u8 adhoc_11n_enabled;
- u8 chan_offset;
+ u8 sec_chan_offset;
+ enum nl80211_channel_type channel_type;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
@@ -655,10 +649,19 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
struct cmd_ctrl_node *cmd_queued;
+ spinlock_t queue_lock; /* lock for tx queues */
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -775,7 +778,8 @@ struct mwifiex_chan_freq_power *
struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
struct mwifiex_private *priv,
u8 band, u32 freq);
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u8 **buffer);
@@ -951,8 +955,6 @@ int mwifiex_main_process(struct mwifiex_adapter *);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
- struct mwifiex_ds_band_cfg *);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index d34acf082d3..405350940a4 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,8 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
if (!card->txbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
- kfree(card->txbd_ring_vbase);
- return -1;
+ return -ENOMEM;
}
card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
@@ -477,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
if (!card->rxbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for "
"rxbd_ring.\n");
- return -1;
+ return -ENOMEM;
}
card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
@@ -570,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
if (!card->evtbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer. "
"Terminating download\n");
- return -1;
+ return -ENOMEM;
}
card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
@@ -1229,15 +1228,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (!skb)
return 0;
- if (rdptr >= MWIFIEX_MAX_EVT_BD)
+ if (rdptr >= MWIFIEX_MAX_EVT_BD) {
dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
rdptr);
+ return -EINVAL;
+ }
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
if (!card->evt_buf_list[rdptr]) {
@@ -1266,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
-done:
- /* Free the buffer for failure case */
- if (ret && skb)
- dev_kfree_skb_any(skb);
-
dev_dbg(adapter->dev, "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
@@ -1672,9 +1666,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
- if (!adapter || !skb) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
- __func__, adapter, skb);
+ if (!skb) {
+ dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8d3ab378662..6396d3318ea 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -500,7 +500,6 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- u8 scan_type;
for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
@@ -514,19 +513,20 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
scan_chan_list[chan_idx].radio_type = band;
- scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+
if (user_scan_in &&
user_scan_in->chan_list[0].scan_time)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
- else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+ else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
- if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
@@ -1391,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
{
int status;
- priv->adapter->scan_wait_q_woken = false;
-
status = mwifiex_scan_networks(priv, scan_req);
- if (!status)
- status = mwifiex_wait_queue_complete(priv->adapter);
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
return status;
}
@@ -1537,11 +1534,6 @@ done:
return 0;
}
-static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
-{
- kfree(bss->priv);
-}
-
/*
* This function handles the command response of scan.
*
@@ -1767,7 +1759,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL);
*(u8 *)bss->priv = band;
- bss->free_priv = mwifiex_free_bss_priv;
+ cfg80211_put_bss(bss);
if (priv->media_connected && !memcmp(bssid,
priv->curr_bss_params.bss_descriptor
@@ -1801,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
up(&priv->async_sem);
}
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+ "results\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 283171bbced..d39d8457f25 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
/* Device ID for SD8787 */
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
+/* Device ID for SD8797 */
+#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
{},
};
@@ -1084,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(adapter->ioport | 0x1000 |
(card->mpa_rx.ports << 4)) +
card->mpa_rx.start_port, 1))
- return -1;
+ goto error;
curr_ptr = card->mpa_rx.buf;
@@ -1127,12 +1130,29 @@ rx_curr_single:
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
- return -1;
+ goto error;
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
return 0;
+
+error:
+ if (MP_RX_AGGR_IN_PROGRESS(card)) {
+ /* Multiport-aggregation transfer failed - cleanup */
+ for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+ /* copy pkt to deaggr buf */
+ skb_deaggr = card->mpa_rx.skb_arr[pind];
+ dev_kfree_skb_any(skb_deaggr);
+ }
+ MP_RX_AGGR_BUF_RESET(card);
+ }
+
+ if (f_do_rx_cur)
+ /* Single transfer pending. Free curr buff also */
+ dev_kfree_skb_any(skb);
+
+ return -1;
}
/*
@@ -1268,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev,
"info: CFG reg val =%x\n", cr);
- dev_kfree_skb_any(skb);
return -1;
}
}
@@ -1573,7 +1592,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
- strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+
+ switch (func->device) {
+ case SDIO_DEVICE_ID_MARVELL_8797:
+ strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
+ break;
+ case SDIO_DEVICE_ID_MARVELL_8787:
+ default:
+ strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+ break;
+ }
return 0;
@@ -1630,14 +1658,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_tx.pkt_cnt = 0;
card->mpa_tx.start_port = 0;
- card->mpa_tx.enabled = 0;
+ card->mpa_tx.enabled = 1;
card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
card->mpa_rx.buf_len = 0;
card->mpa_rx.pkt_cnt = 0;
card->mpa_rx.start_port = 0;
- card->mpa_rx.enabled = 0;
+ card->mpa_rx.enabled = 1;
card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
/* Allocate buffers for SDIO MP-A */
@@ -1774,4 +1802,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 3f711801e58..a3fb322205b 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -29,6 +29,7 @@
#include "main.h"
#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index ea6518d1c9e..6e443ffa046 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -748,7 +748,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
rf_type = le16_to_cpu(rf_chan->rf_type);
- SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
+ SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
rf_chan->current_channel = cpu_to_le16(*channel);
}
rf_chan->action = cpu_to_le16(cmd_action);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 7a16b0c417a..e812db8b695 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
priv->tx_htinfo = resp->params.tx_rate.ht_info;
if (!priv->is_data_rate_auto)
priv->data_rate =
- mwifiex_index_to_data_rate(priv->tx_rate,
+ mwifiex_index_to_data_rate(priv, priv->tx_rate,
priv->tx_htinfo);
return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810e833..d7aa21da84d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (adapter->num_cmd_timeout && adapter->curr_cmd)
return;
priv->media_connected = false;
- if (!priv->disconnect) {
- priv->disconnect = 1;
- dev_dbg(adapter->dev, "info: successfully disconnected from"
- " %pM: reason code %d\n", priv->cfg_bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- cfg80211_disconnected(priv->netdev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- GFP_KERNEL);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ dev_dbg(adapter->dev, "info: successfully disconnected from"
+ " %pM: reason code %d\n", priv->cfg_bssid,
+ WLAN_REASON_DEAUTH_LEAVING);
+ if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, GFP_KERNEL);
}
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index ea4a29b7e33..470ca75ec25 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
int status = adapter->cmd_wait_q.status;
- struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+ struct cmd_ctrl_node *cmd_queued;
+ if (!adapter->cmd_queued)
+ return 0;
+
+ cmd_queued = adapter->cmd_queued;
adapter->cmd_queued = NULL;
+
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
@@ -234,7 +239,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
"associating...\n");
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
/* Clear any past association response stored for
* application retrieval */
@@ -265,7 +270,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
@@ -472,67 +477,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
}
/*
- * The function sets band configurations.
- *
- * it performs extra checks to make sure the Ad-Hoc
- * band and channel are compatible. Otherwise it returns an error.
- *
- */
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
- struct mwifiex_ds_band_cfg *radio_cfg)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u8 infra_band, adhoc_band;
- u32 adhoc_channel;
-
- infra_band = (u8) radio_cfg->config_bands;
- adhoc_band = (u8) radio_cfg->adhoc_start_band;
- adhoc_channel = radio_cfg->adhoc_channel;
-
- /* SET Infra band */
- if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- adapter->config_bands = infra_band;
-
- /* SET Ad-hoc Band */
- if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- if (adhoc_band)
- adapter->adhoc_start_band = adhoc_band;
- adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
- /*
- * If no adhoc_channel is supplied verify if the existing adhoc
- * channel compiles with new adhoc_band
- */
- if (!adhoc_channel) {
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band,
- priv->adhoc_channel)) {
- /* Pass back the default channel */
- radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
- if ((adapter->adhoc_start_band & BAND_A)
- || (adapter->adhoc_start_band & BAND_AN))
- radio_cfg->adhoc_channel =
- DEFAULT_AD_HOC_CHANNEL_A;
- }
- } else { /* Retrurn error if adhoc_band and
- adhoc_channel combination is invalid */
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
- return -1;
- priv->adhoc_channel = (u8) adhoc_channel;
- }
- if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
- adapter->adhoc_11n_enabled = true;
- else
- adapter->adhoc_11n_enabled = false;
-
- return 0;
-}
-
-/*
* The function disables auto deep sleep mode.
*/
int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
@@ -832,8 +776,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
if (!ret) {
if (rate->is_rate_auto)
- rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
- priv->tx_htinfo);
+ rate->rate = mwifiex_index_to_data_rate(priv,
+ priv->tx_rate, priv->tx_htinfo);
else
rate->rate = priv->data_rate;
} else {
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 27430512f7c..5e1ef7e5da4 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
u16 rx_pkt_type;
struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
+ if (!priv)
+ return -1;
+
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = local_rx_pd->rx_pkt_type;
@@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
(u8) local_rx_pd->rx_pkt_type,
skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (priv && (ret == -1))
- priv->stats.rx_dropped++;
-
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
dev_kfree_skb_any(skb);
- }
+
+ if (ret)
+ priv->stats.rx_dropped++;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f412875..d9274a1b77a 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if (!priv)
goto done;
- priv->netdev->trans_start = jiffies;
+ mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
&& (tpriv->media_connected)) {
if (netif_queue_stopped(tpriv->netdev))
- netif_wake_queue(tpriv->netdev);
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+ adapter);
}
}
done:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 995695c28d5..7becea3dec6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -28,10 +28,10 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.12"
+#define MWL8K_VERSION "0.13"
/* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
module_param(ap_mode_default, bool, 0);
MODULE_PARM_DESC(ap_mode_default,
"Set to 1 to make ap mode the default instead of sta mode");
@@ -198,6 +198,7 @@ struct mwl8k_priv {
/* firmware access */
struct mutex fw_mutex;
struct task_struct *fw_mutex_owner;
+ struct task_struct *hw_restart_owner;
int fw_mutex_depth;
struct completion *hostcmd_wait;
@@ -262,6 +263,10 @@ struct mwl8k_priv {
*/
struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
+ /* To perform the task of reloading the firmware */
+ struct work_struct fw_reload;
+ bool hw_restart_in_progress;
+
/* async firmware loading state */
unsigned fw_state;
char *fw_pref;
@@ -738,10 +743,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
if (ready_code == MWL8K_FWAP_READY) {
- priv->ap_fw = 1;
+ priv->ap_fw = true;
break;
} else if (ready_code == MWL8K_FWSTA_READY) {
- priv->ap_fw = 0;
+ priv->ap_fw = false;
break;
}
@@ -1498,6 +1503,18 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
might_sleep();
+ /* Since fw restart is in progress, allow only the firmware
+ * commands from the restart code and block the other
+ * commands since they are going to fail in any case since
+ * the firmware has crashed
+ */
+ if (priv->hw_restart_in_progress) {
+ if (priv->hw_restart_owner == current)
+ return 0;
+ else
+ return -EBUSY;
+ }
+
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
@@ -1541,6 +1558,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
MWL8K_TX_WAIT_TIMEOUT_MS);
mwl8k_dump_tx_rings(hw);
+ priv->hw_restart_in_progress = true;
+ ieee80211_queue_work(hw, &priv->fw_reload);
rc = -ETIMEDOUT;
}
@@ -2058,7 +2077,9 @@ static int mwl8k_fw_lock(struct ieee80211_hw *hw)
rc = mwl8k_tx_wait_empty(hw);
if (rc) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
mutex_unlock(&priv->fw_mutex);
return rc;
@@ -2077,7 +2098,9 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
if (!--priv->fw_mutex_depth) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
priv->fw_mutex_owner = NULL;
mutex_unlock(&priv->fw_mutex);
}
@@ -4398,7 +4421,8 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_radio_disable(hw);
+ if (!priv->hw_restart_in_progress)
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -4499,6 +4523,16 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return 0;
}
+static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif)
+{
+ /* Has ieee80211_restart_hw re-added the removed interfaces? */
+ if (!priv->macids_used)
+ return;
+
+ priv->macids_used &= ~(1 << vif->macid);
+ list_del(&vif->list);
+}
+
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4510,8 +4544,54 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->macids_used &= ~(1 << mwl8k_vif->macid);
- list_del(&mwl8k_vif->list);
+ mwl8k_remove_vif(priv, mwl8k_vif);
+}
+
+static void mwl8k_hw_restart_work(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, fw_reload);
+ struct ieee80211_hw *hw = priv->hw;
+ struct mwl8k_device_info *di;
+ int rc;
+
+ /* If some command is waiting for a response, clear it */
+ if (priv->hostcmd_wait != NULL) {
+ complete(priv->hostcmd_wait);
+ priv->hostcmd_wait = NULL;
+ }
+
+ priv->hw_restart_owner = current;
+ di = priv->device_info;
+ mwl8k_fw_lock(hw);
+
+ if (priv->ap_fw)
+ rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
+ else
+ rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
+
+ if (rc)
+ goto fail;
+
+ priv->hw_restart_owner = NULL;
+ priv->hw_restart_in_progress = false;
+
+ /*
+ * This unlock will wake up the queues and
+ * also opens the command path for other
+ * commands
+ */
+ mwl8k_fw_unlock(hw);
+
+ ieee80211_restart_hw(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restarted successfully\n");
+
+ return;
+fail:
+ mwl8k_fw_unlock(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restart failed\n");
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -5024,7 +5104,11 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
rc = mwl8k_check_ba(hw, stream);
- if (!rc)
+ /* If HW restart is in progress mwl8k_post_cmd will
+ * return -EBUSY. Avoid retrying mwl8k_check_ba in
+ * such cases
+ */
+ if (!rc || rc == -EBUSY)
break;
/*
* HW queues take time to be flushed, give them
@@ -5044,14 +5128,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP:
- if (stream == NULL)
- break;
- if (stream->state == AMPDU_STREAM_ACTIVE) {
- spin_unlock(&priv->stream_lock);
- mwl8k_destroy_ba(hw, stream);
- spin_lock(&priv->stream_lock);
+ if (stream) {
+ if (stream->state == AMPDU_STREAM_ACTIVE) {
+ spin_unlock(&priv->stream_lock);
+ mwl8k_destroy_ba(hw, stream);
+ spin_lock(&priv->stream_lock);
+ }
+ mwl8k_remove_stream(hw, stream);
}
- mwl8k_remove_stream(hw, stream);
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -5263,12 +5347,15 @@ fail:
mwl8k_release_firmware(priv);
}
+#define MAX_RESTART_ATTEMPTS 1
static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
bool nowait)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
+ int count = MAX_RESTART_ATTEMPTS;
+retry:
/* Reset firmware and hardware */
mwl8k_hw_reset(priv);
@@ -5290,6 +5377,16 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
/* Reclaim memory once firmware is successfully loaded */
mwl8k_release_firmware(priv);
+ if (rc && count) {
+ /* FW did not start successfully;
+ * lets try one more time
+ */
+ count--;
+ wiphy_err(hw->wiphy, "Trying to reload the firmware again\n");
+ msleep(20);
+ goto retry;
+ }
+
return rc;
}
@@ -5365,7 +5462,14 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
goto err_free_queues;
}
- memset(priv->ampdu, 0, sizeof(priv->ampdu));
+ /*
+ * When hw restart is requested,
+ * mac80211 will take care of clearing
+ * the ampdu streams, so do not clear
+ * the ampdu state here
+ */
+ if (!priv->hw_restart_in_progress)
+ memset(priv->ampdu, 0, sizeof(priv->ampdu));
/*
* Temporarily enable interrupts. Initial firmware host
@@ -5439,10 +5543,20 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
{
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *vif, *tmp_vif;
mwl8k_stop(hw);
mwl8k_rxq_deinit(hw, 0);
+ /*
+ * All the existing interfaces are re-added by the ieee80211_reconfig;
+ * which means driver should remove existing interfaces before calling
+ * ieee80211_restart_hw
+ */
+ if (priv->hw_restart_in_progress)
+ list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list)
+ mwl8k_remove_vif(priv, vif);
+
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
@@ -5454,6 +5568,9 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
if (rc)
goto fail;
+ if (priv->hw_restart_in_progress)
+ return rc;
+
rc = mwl8k_start(hw);
if (rc)
goto fail;
@@ -5517,13 +5634,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
- priv->radio_on = 0;
- priv->radio_short_preamble = 0;
+ priv->radio_on = false;
+ priv->radio_short_preamble = false;
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
/* Handle watchdog ba events */
INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
+ /* To reload the firmware if it crashes */
+ INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
/* TX reclaim and RX tasklets. */
tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -5667,6 +5786,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
if (rc)
goto err_stop_firmware;
+
+ priv->hw_restart_in_progress = false;
+
return rc;
err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index b52acc4b408..9fb77d0319f 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
MODULE_PARM_DESC(orinoco_debug, "Debug level");
#endif
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
module_param(suppress_linkstatus, bool, 0644);
MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 0793e4265b4..ae8ce56670b 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1759,32 +1759,7 @@ static struct usb_driver orinoco_driver = {
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (Manuel Estrada Sainz)";
-static int __init ezusb_module_init(void)
-{
- int err;
-
- printk(KERN_DEBUG "%s\n", version);
-
- /* register this driver with the USB subsystem */
- err = usb_register(&orinoco_driver);
- if (err < 0) {
- printk(KERN_ERR PFX "usb_register failed, error %d\n",
- err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ezusb_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&orinoco_driver);
-}
-
-
-module_init(ezusb_module_init);
-module_exit(ezusb_module_exit);
+module_usb_driver(orinoco_driver);
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e99ca1c1e0d..96e39edfec7 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -76,6 +76,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
u8 *ie;
u8 ie_buf[46];
u64 timestamp;
@@ -121,9 +122,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
- cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
- capability, beacon_interval, ie_buf, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
+ capability, beacon_interval, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -132,6 +134,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
const u8 *ie;
u64 timestamp;
s32 signal;
@@ -152,9 +155,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie = bss->data;
signal = SIGNAL_TO_MBM(bss->level);
- cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
- capability, beacon_interval, ie, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
+ capability, beacon_interval, ie, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index db4d9a02f26..af2ca1a9c7d 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -27,7 +27,7 @@
#include "p54.h"
#include "lmac.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb9..7faed62c637 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -581,15 +581,9 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
struct p54s_priv *priv = dev->priv;
unsigned long flags;
- if (mutex_lock_interruptible(&priv->mutex)) {
- /* FIXME: how to handle this error? */
- return;
- }
-
+ mutex_lock(&priv->mutex);
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +591,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +652,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
@@ -703,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b609686642..f4d28c39aac 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -1083,15 +1083,4 @@ static struct usb_driver p54u_driver = {
.soft_unbind = 1,
};
-static int __init p54u_init(void)
-{
- return usb_register(&p54u_driver);
-}
-
-static void __exit p54u_exit(void)
-{
- usb_deregister(&p54u_driver);
-}
-
-module_init(p54u_init);
-module_exit(p54u_exit);
+module_usb_driver(p54u_driver);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6ed9c323e3c..42b97bc3847 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
skb_unlink(skb, &priv->tx_queue);
p54_tx_qos_accounting_free(priv, skb);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
@@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
return;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582..4e44b1af119 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
- PRISM2_SET_ENCRYPTION = 6,
- PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- PRISM2_HOSTAPD_MLME = 13,
- PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(dev);
- int rvalue = 0, force = 0;
- int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
- union oid_res_t r;
-
- /* with the new API, it's impossible to get a NULL pointer.
- * New version of iwconfig set the IW_ENCODE_NOKEY flag
- * when no key is given, but older versions don't. */
-
- if (param->u.crypt.key_len > 0) {
- /* we have a key to set */
- int index = param->u.crypt.idx;
- int current_index;
- struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
- /* get the current key index */
- rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
- current_index = r.u;
- /* Verify that the key is not marked as invalid */
- if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
- key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
- sizeof (param->u.crypt.key) : param->u.crypt.key_len;
- memcpy(key.key, param->u.crypt.key, key.length);
- if (key.length == 32)
- /* we want WPA-PSK */
- key.type = DOT11_PRIV_TKIP;
- if ((index < 0) || (index > 3))
- /* no index provided use the current one */
- index = current_index;
-
- /* now send the key to the card */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
- &key);
- }
- /*
- * If a valid key is set, encryption should be enabled
- * (user may turn it off later).
- * This is also how "iwconfig ethX key on" works
- */
- if ((index == current_index) && (key.length > 0))
- force = 1;
- } else {
- int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
- if ((index >= 0) && (index <= 3)) {
- /* we want to set the key index */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
- &index);
- } else {
- if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
- /* we cannot do anything. Complain. */
- return -EINVAL;
- }
- }
- }
- /* now read the flags */
- if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
- * authen = DOT11_AUTH_OS;
- * invoke = 0;
- * exunencrypt = 0; */
- }
- if (param->u.crypt.flags & IW_ENCODE_OPEN)
- /* Encode but accept non-encoded packets. No auth */
- invoke = 1;
- if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
- /* Refuse non-encoded packets. Auth */
- authen = DOT11_AUTH_BOTH;
- invoke = 1;
- exunencrypt = 1;
- }
- /* do the change if requested */
- if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
- rvalue |=
- mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
- &exunencrypt);
- }
- return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(ndev);
- int max_len, len, alen, ret=0;
- struct obj_attachment *attach;
-
- len = param->u.generic_elem.len;
- max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
- if (max_len < 0 || max_len < len)
- return -EINVAL;
-
- alen = sizeof(*attach) + len;
- attach = kzalloc(alen, GFP_KERNEL);
- if (attach == NULL)
- return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
- /* Note: endianness is covered by mgt_set_varlen */
-
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_ASSOC_REQ << 4);
- attach->id = -1;
- attach->size = len;
- memcpy(attach->data, param->u.generic_elem.data, len);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0) {
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0)
- printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
- ndev->name);
- }
-
- kfree(attach);
- return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
- return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
- struct prism2_hostapd_param *param)
-{
- islpci_private *priv = netdev_priv(ndev);
- struct iw_request_info info;
- int i, rvalue;
- struct obj_bsslist *bsslist;
- u32 noise = 0;
- char *extra = "";
- char *current_ev = "foo";
- union oid_res_t r;
-
- if (islpci_get_state(priv) < PRV_STATE_INIT) {
- /* device is not ready, fail gently */
- return 0;
- }
-
- /* first get the noise value. We will use it to report the link quality */
- rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
- noise = r.u;
-
- /* Ask the device for a list of known bss. We can report at most
- * IW_MAX_AP=64 to the range struct. But the device won't repport anything
- * if you change the value of IWMAX_BSS=24.
- */
- rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
- bsslist = r.ptr;
-
- info.cmd = PRISM54_HOSTAPD;
- info.flags = 0;
-
- /* ok now, scan the list and translate its info */
- for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
- current_ev = prism54_translate_bss(ndev, &info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &(bsslist->bsslist[i]),
- noise);
- kfree(bsslist);
-
- return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
- struct prism2_hostapd_param *param;
- int ret = 0;
- u32 uwrq;
-
- printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
- if (p->length < sizeof(struct prism2_hostapd_param) ||
- p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param))
- return PTR_ERR(param);
-
- switch (param->cmd) {
- case PRISM2_SET_ENCRYPTION:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
- ndev->name);
- ret = prism2_ioctl_set_encryption(ndev, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
- ndev->name);
- ret = prism2_ioctl_set_generic_element(ndev, param,
- p->length);
- break;
- case PRISM2_HOSTAPD_MLME:
- printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
- ndev->name);
- ret = prism2_ioctl_mlme(ndev, param);
- break;
- case PRISM2_HOSTAPD_SCAN_REQ:
- printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
- ndev->name);
- ret = prism2_ioctl_scan_req(ndev, param);
- break;
- case PRISM54_SET_WPA:
- printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
- ndev->name);
- uwrq = 1;
- ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
- break;
- case PRISM54_DROP_UNENCRYPTED:
- printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
- ndev->name);
-#if 0
- uwrq = 0x01;
- mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
- down_write(&priv->mib_sem);
- mgt_commit(priv);
- up_write(&priv->mib_sem);
-#endif
- /* Not necessary, as set_wpa does it, should we just do it here though? */
- ret = 0;
- break;
- default:
- printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
- ndev->name);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- return ret;
-}
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *) rq;
- int ret = -1;
- switch (cmd) {
- case PRISM54_HOSTAPD:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- ret = prism54_hostapd(ndev, &wrq->u.data);
- return ret;
- }
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9281d..a34bceb6e3c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
int prism54_set_mac_address(struct net_device *, void *);
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
extern const struct iw_handler_def prism54_handler_def;
#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5d0f61508a2..5970ff6f40c 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
static void islpci_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops islpci_ethtool_ops = {
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
static const struct net_device_ops islpci_netdev_ops = {
.ndo_open = islpci_open,
.ndo_stop = islpci_close,
- .ndo_do_ioctl = prism54_ioctl,
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0021e494851..04fec1fa6e0 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
del_timer(&local->timer);
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
/* UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
*/
pr_debug("Deauthentication frame received\n");
local->authentication_state = UNAUTHENTICATED;
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index d7646f299bd..3c3b98b152c 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -566,9 +566,9 @@ struct phy_header {
UCHAR hdr_3;
UCHAR hdr_4;
};
-struct rx_msg {
+struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[1];
+ UCHAR var[0];
};
struct tx_msg {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 0c13840a7de..a330c69583d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
NDIS_80211_POWER_MODE_FAST_PSP,
};
+enum ndis_80211_pmkid_cand_list_flag_bits {
+ NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
struct ndis_80211_auth_request {
__le32 length;
u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
struct ndis_80211_bssid_info {
u8 bssid[6];
u8 pmkid[16];
-};
+} __packed;
struct ndis_80211_pmkid {
__le32 length;
__le32 bssid_info_count;
struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
/*
* private data
*/
-#define NET_TYPE_11FB 0
-
#define CAP_MODE_80211A 1
#define CAP_MODE_80211B 2
#define CAP_MODE_80211G 4
@@ -414,6 +416,7 @@ struct ndis_80211_pmkid {
#define RNDIS_WLAN_ALG_TKIP (1<<1)
#define RNDIS_WLAN_ALG_CCMP (1<<2)
+#define RNDIS_WLAN_NUM_KEYS 4
#define RNDIS_WLAN_KEY_MGMT_NONE 0
#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0)
#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1)
@@ -516,7 +519,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
- struct rndis_wlan_encr_key encr_keys[4];
+ struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
int wpa_version;
u8 command_buffer[COMMAND_BUFFER_SIZE];
@@ -1346,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
return ret;
}
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+ u16 *beacon_interval)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ struct ndis_80211_conf config;
+ int len, ret;
+
+ /* Get channel and beacon interval */
+ len = sizeof(config);
+ ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+ __func__, ret);
+ if (ret < 0)
+ return NULL;
+
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+ if (!channel)
+ return NULL;
+
+ if (beacon_interval)
+ *beacon_interval = le16_to_cpu(config.beacon_period);
+ return channel;
+}
+
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
int index)
@@ -1535,6 +1564,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
bool is_wpa;
int ret;
+ if (index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
if (priv->encr_keys[index].len == 0)
return 0;
@@ -1972,11 +2004,12 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
- struct ndis_80211_bssid_ex *bssid)
+static bool rndis_bss_info_update(struct usbnet *usbdev,
+ struct ndis_80211_bssid_ex *bssid)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
@@ -2015,9 +2048,12 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
capability = le16_to_cpu(fixed->capabilities);
beacon_interval = le16_to_cpu(fixed->beacon_interval);
- return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
timestamp, capability, beacon_interval, ie, ie_len, signal,
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+
+ return (bss != NULL);
}
static struct ndis_80211_bssid_ex *next_bssid_list_item(
@@ -2451,6 +2487,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
+ if (key_index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
priv->encr_tx_key_index = key_index;
if (is_wpa_key(priv, key_index))
@@ -2639,12 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
- struct ndis_80211_conf config;
struct ndis_80211_ssid ssid;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval;
+ u16 beacon_interval = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2669,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
}
/* Get channel and beacon interval */
- len = sizeof(config);
- ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
- netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
- __func__, ret);
- if (ret >= 0) {
- beacon_interval = le16_to_cpu(config.beacon_period);
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
- if (!channel) {
- netdev_warn(usbdev->net, "%s(): could not get channel."
- "\n", __func__);
- return;
- }
- } else {
- netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
- __func__);
+ channel = get_current_channel(usbdev, &beacon_interval);
+ if (!channel) {
+ netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+ __func__);
return;
}
@@ -2714,9 +2741,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
bssid, (u32)timestamp, capability, beacon_interval, ie_len,
ssid.essid, signal);
- cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
timestamp, capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
+ cfg80211_put_bss(bss);
}
/*
@@ -2828,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
req_ie_len, resp_ie,
resp_ie_len, 0, GFP_KERNEL);
else
- cfg80211_roamed(usbdev->net, NULL, bssid,
- req_ie, req_ie_len,
+ cfg80211_roamed(usbdev->net,
+ get_current_channel(usbdev, NULL),
+ bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -2995,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
+ bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
- netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
- i, le32_to_cpu(cand->flags), cand->bssid);
-
-#if 0
- struct iw_pmkid_cand pcand;
- union iwreq_data wrqu;
-
- memset(&pcand, 0, sizeof(pcand));
- if (le32_to_cpu(cand->flags) & 0x01)
- pcand.flags |= IW_PMKID_CAND_PREAUTH;
- pcand.index = i;
- memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), preauth, cand->bssid);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(pcand);
- wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
- (u8 *)&pcand);
-#endif
+ cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+ preauth, GFP_ATOMIC);
}
}
@@ -3754,17 +3771,7 @@ static struct usb_driver rndis_wlan_driver = {
.resume = usbnet_resume,
};
-static int __init rndis_wlan_init(void)
-{
- return usb_register(&rndis_wlan_driver);
-}
-module_init(rndis_wlan_init);
-
-static void __exit rndis_wlan_exit(void)
-{
- usb_deregister(&rndis_wlan_driver);
-}
-module_exit(rndis_wlan_exit);
+module_usb_driver(rndis_wlan_driver);
MODULE_AUTHOR("Bjorge Dijkstra");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 53c5f878f61..1de9c752c88 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -1982,15 +1982,4 @@ static struct usb_driver rt2500usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt2500usb_init(void)
-{
- return usb_register(&rt2500usb_driver);
-}
-
-static void __exit rt2500usb_exit(void)
-{
- usb_deregister(&rt2500usb_driver);
-}
-
-module_init(rt2500usb_init);
-module_exit(rt2500usb_exit);
+module_usb_driver(rt2500usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4778620347c..2571a2fa3d0 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -50,7 +50,7 @@
* RF2853 2.4G/5G 3T3R
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
@@ -66,7 +66,7 @@
#define RF2853 0x000a
#define RF3320 0x000b
#define RF3322 0x000c
-#define RF3853 0x000d
+#define RF3053 0x000d
#define RF5370 0x5370
#define RF5390 0x5390
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186..22a1a8fc6e0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+ !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -1942,19 +1944,24 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
}
- if (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3320))
+ switch (rt2x00dev->chip.rf) {
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3320:
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF3052))
+ break;
+ case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5390))
+ break;
+ case RF5370:
+ case RF5390:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
- else
+ break;
+ default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
+ }
/*
* Change BBP settings
@@ -3771,7 +3778,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
@@ -3930,15 +3937,18 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_rt(rt2x00dev, RT2860) &&
- !rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT3070) &&
- !rt2x00_rt(rt2x00dev, RT3071) &&
- !rt2x00_rt(rt2x00dev, RT3090) &&
- !rt2x00_rt(rt2x00dev, RT3390) &&
- !rt2x00_rt(rt2x00dev, RT3572) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ case RT3390:
+ case RT3572:
+ case RT5390:
+ break;
+ default:
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -4552,6 +4562,9 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel_time_ext_busy = busy_ext / 1000;
}
+ if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ survey->filled |= SURVEY_INFO_IN_USE;
+
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index da48c8ac27b..4941a1a2321 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -50,7 +50,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 377876315b8..262ee9eefb6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -400,10 +400,10 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
/*
* The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
* TXWI + 802.11 header + L2 pad + payload + pad,
- * so need to decrease size of TXINFO and USB end pad.
+ * so need to decrease size of TXINFO.
*/
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- entry->skb->len - TXINFO_DESC_SIZE - 4);
+ roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -421,37 +421,20 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
-static void rt2800usb_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc)
+/*
+ * TX data initialization
+ */
+static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
{
- unsigned int len;
- int err;
-
- rt2800_write_tx_data(entry, txdesc);
-
/*
- * pad(1~3 bytes) is added after each 802.11 payload.
- * USB end pad(4 bytes) is added at each USB bulk out packet end.
+ * pad(1~3 bytes) is needed after each 802.11 payload.
+ * USB end pad(4 bytes) is needed at each USB bulk out packet end.
* TX frame format is :
* | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
* |<------------- tx_pkt_len ------------->|
*/
- len = roundup(entry->skb->len, 4) + 4;
- err = skb_padto(entry->skb, len);
- if (unlikely(err)) {
- WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
- return;
- }
-
- entry->skb->len = len;
-}
-/*
- * TX data initialization
- */
-static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
-{
- return entry->skb->len;
+ return roundup(entry->skb->len, 4) + 4;
}
/*
@@ -807,7 +790,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.flush_queue = rt2x00usb_flush_queue,
.tx_dma_done = rt2800usb_tx_dma_done,
.write_tx_desc = rt2800usb_write_tx_desc,
- .write_tx_data = rt2800usb_write_tx_data,
+ .write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
@@ -914,12 +897,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x8053) },
{ USB_DEVICE(0x050d, 0x805c) },
{ USB_DEVICE(0x050d, 0x815c) },
+ { USB_DEVICE(0x050d, 0x825a) },
{ USB_DEVICE(0x050d, 0x825b) },
{ USB_DEVICE(0x050d, 0x935a) },
{ USB_DEVICE(0x050d, 0x935b) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8) },
{ USB_DEVICE(0x0411, 0x0158) },
+ { USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
/* Corega */
@@ -934,6 +919,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0e) },
{ USB_DEVICE(0x07d1, 0x3c0f) },
{ USB_DEVICE(0x07d1, 0x3c11) },
+ { USB_DEVICE(0x07d1, 0x3c13) },
+ { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
@@ -943,6 +930,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7711) },
{ USB_DEVICE(0x7392, 0x7717) },
{ USB_DEVICE(0x7392, 0x7718) },
+ { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480) },
{ USB_DEVICE(0x203d, 0x14a9) },
@@ -976,6 +964,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13b1, 0x0031) },
{ USB_DEVICE(0x1737, 0x0070) },
{ USB_DEVICE(0x1737, 0x0071) },
+ { USB_DEVICE(0x1737, 0x0077) },
+ { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162) },
{ USB_DEVICE(0x0789, 0x0163) },
@@ -999,9 +989,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0db0, 0x871b) },
{ USB_DEVICE(0x0db0, 0x871c) },
{ USB_DEVICE(0x0db0, 0x899a) },
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
/* Para */
{ USB_DEVICE(0x20b8, 0x8888) },
/* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x000c) },
{ USB_DEVICE(0x1d4d, 0x000e) },
{ USB_DEVICE(0x1d4d, 0x0011) },
@@ -1054,7 +1048,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sparklan */
{ USB_DEVICE(0x15a9, 0x0006) },
/* Sweex */
+ { USB_DEVICE(0x177f, 0x0153) },
{ USB_DEVICE(0x177f, 0x0302) },
+ { USB_DEVICE(0x177f, 0x0313) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
@@ -1138,27 +1134,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13d3, 0x3322) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x1003) },
- { USB_DEVICE(0x050d, 0x825a) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x012e) },
{ USB_DEVICE(0x0411, 0x0148) },
{ USB_DEVICE(0x0411, 0x0150) },
- { USB_DEVICE(0x0411, 0x015d) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x0041) },
{ USB_DEVICE(0x07aa, 0x0042) },
{ USB_DEVICE(0x18c5, 0x0008) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
- { USB_DEVICE(0x07d1, 0x3c13) },
- { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3c17) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x4085) },
- { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
+ /* Fujitsu Stylistic 550 */
+ { USB_DEVICE(0x1690, 0x0761) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010) },
/* Gigabyte */
@@ -1170,20 +1163,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605) },
{ USB_DEVICE(0x1740, 0x0615) },
- /* Linksys */
- { USB_DEVICE(0x1737, 0x0077) },
- { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0168) },
{ USB_DEVICE(0x0789, 0x0169) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032) },
- /* Ovislink */
- { USB_DEVICE(0x1b75, 0x3071) },
- { USB_DEVICE(0x1b75, 0x3072) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101) },
- { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0x5201) },
@@ -1202,9 +1188,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x083a, 0xc522) },
{ USB_DEVICE(0x083a, 0xd522) },
{ USB_DEVICE(0x083a, 0xf511) },
- /* Sweex */
- { USB_DEVICE(0x177f, 0x0153) },
- { USB_DEVICE(0x177f, 0x0313) },
/* Zyxel */
{ USB_DEVICE(0x0586, 0x341a) },
#endif
@@ -1234,15 +1217,4 @@ static struct usb_driver rt2800usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt2800usb_init(void)
-{
- return usb_register(&rt2800usb_driver);
-}
-
-static void __exit rt2800usb_exit(void)
-{
- usb_deregister(&rt2800usb_driver);
-}
-
-module_init(rt2800usb_init);
-module_exit(rt2800usb_exit);
+module_usb_driver(rt2800usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 99ff12d0c29..b03b22c47b1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,9 +189,9 @@ struct rt2x00_chip {
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
-#define RT3593 0x3593 /* PCIe */
+#define RT3593 0x3593
#define RT3883 0x3883 /* WSOC */
-#define RT5390 0x5390 /* 2.4GHz */
+#define RT5390 0x5390 /* 2.4GHz */
u16 rf;
u16 rev;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index edd317fa7c0..c3e1aa7c1a8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
if (spec->supported_rates & SUPPORT_RATE_OFDM)
num_rates += 8;
- channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+ channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+ rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
if (!rates)
goto exit_free_channels;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff0780..ede3c58e678 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
exit_free_skb:
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1e31050dafc..2eea3866504 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -298,12 +298,22 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
return false;
/*
- * USB devices cannot blindly pass the skb->len as the
- * length of the data to usb_fill_bulk_urb. Pass the skb
- * to the driver to determine what the length should be.
+ * USB devices require certain padding at the end of each frame
+ * and urb. Those paddings are not included in skbs. Pass entry
+ * to the driver to determine what the overall length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
+ status = skb_padto(entry->skb, length);
+ if (unlikely(status)) {
+ /* TODO: report something more appropriate than IO_FAILED. */
+ WARNING(rt2x00dev, "TX SKB padding error, out of memory\n");
+ set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ rt2x00lib_dmadone(entry);
+
+ return false;
+ }
+
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index bf55b4a311e..e0c6d117429 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -41,7 +41,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index cfb19dbb0a6..e477a964081 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -2528,15 +2528,4 @@ static struct usb_driver rt73usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt73usb_init(void)
-{
- return usb_register(&rt73usb_driver);
-}
-
-static void __exit rt73usb_exit(void)
-{
- usb_deregister(&rt73usb_driver);
-}
-
-module_init(rt73usb_init);
-module_exit(rt73usb_exit);
+module_usb_driver(rt73usb_driver);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4a78f9e39df..638fbef693e 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1592,15 +1592,4 @@ static struct usb_driver rtl8187_driver = {
.disconnect = __devexit_p(rtl8187_disconnect),
};
-static int __init rtl8187_init(void)
-{
- return usb_register(&rtl8187_driver);
-}
-
-static void __exit rtl8187_exit(void)
-{
- usb_deregister(&rtl8187_driver);
-}
-
-module_init(rtl8187_init);
-module_exit(rtl8187_exit);
+module_usb_driver(rtl8187_driver);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index b4ce93436d2..8d6eb0f56c0 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
if (is_valid_ether_addr(rtlefuse->dev_addr)) {
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
- u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
- get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
- SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
}
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
u8 valid = 0;
/*set init state to on */
- rtlpriv->rfkill.rfkill_state = 1;
+ rtlpriv->rfkill.rfkill_state = true;
wiphy_rfkill_set_hw_state(hw->wiphy, 0);
radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
@@ -448,12 +448,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
+ mutex_init(&rtlpriv->locks.ps_mutex);
spin_lock_init(&rtlpriv->locks.ips_lock);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
spin_lock_init(&rtlpriv->locks.rf_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae905983d0..f66b5757f6b 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PS_POLL_AID(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+ (*(u16 *)((u8 *)(_hdr) + 2) = _val)
#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index eb61061821e..39e0907a3c4 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
u8 init_aspm;
ppsc->reg_rfps_level = 0;
- ppsc->support_aspm = 0;
+ ppsc->support_aspm = false;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
rtlpriv->mac80211.offchan_delay = true;
- rtlpriv->psc.state_inap = 1;
+ rtlpriv->psc.state_inap = true;
} else {
- rtlpriv->psc.state_inap = 0;
+ rtlpriv->psc.state_inap = false;
}
}
@@ -610,7 +610,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
}
@@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
dev_kfree_skb_any(skb);
@@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
+ irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
- if (!inta || inta == 0xffff)
+ if (!inta || inta == 0xffff) {
+ ret = IRQ_NONE;
goto done;
+ }
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -890,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (rtlpriv->rtlhal.earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
-
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
+ return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -903,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -945,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_leave_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_lps_leave(hw);
+}
+
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1006,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
- tasklet_init(&rtlpriv->works.ips_leave_tasklet,
- (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
- (unsigned long)hw);
+ INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1478,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1553,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
set_hal_stop(rtlhal);
rtlpriv->cfg->ops->disable_interrupt(hw);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db526284454..130fdd99d57 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -237,11 +237,12 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
+ unsigned long flags;
if (mac->opmode != NL80211_IFTYPE_STATION)
return;
- spin_lock(&rtlpriv->locks.ips_lock);
+ spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -257,7 +258,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
/*for FW LPS*/
@@ -395,7 +396,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +408,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/*Leave the leisure power save mode.*/
@@ -417,7 +418,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 950c65a15b8..931d97979b0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+ u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ blockSize);
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ remainSize);
+ }
+}
+
static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
const u8 *buffer, u32 size)
{
@@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
u8 *bufferPtr = (u8 *) buffer;
u32 *pu4BytePtr = (u32 *) buffer;
u32 i, offset, blockCount, remainSize;
+ u32 data;
+ if (rtlpriv->io.writeN_sync) {
+ rtl_block_fw_writeN(hw, buffer, size);
+ return;
+ }
blockCount = size / blockSize;
remainSize = size % blockSize;
+ if (remainSize) {
+ /* the last word is < 4 bytes - pad it with zeros */
+ for (i = 0; i < 4 - remainSize; i++)
+ *(bufferPtr + size + i) = 0;
+ blockCount++;
+ }
for (i = 0; i < blockCount; i++) {
offset = i * blockSize;
+ /* for big-endian platforms, the firmware data need to be byte
+ * swapped as it was read as a byte string and will be written
+ * as 32-bit dwords and byte swapped when written
+ */
+ data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
+ data);
}
}
@@ -227,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
if (!rtlhal->pfirmware)
return 1;
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
("Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (uint)sizeof(struct rtl92c_firmware_header)));
+ le16_to_cpu(pfwheader->version),
+ le16_to_cpu(pfwheader->signature),
+ (uint)sizeof(struct rtl92c_firmware_header)));
pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c1262..cec5a3a1cc5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
#define FW_8192C_SIZE 0x3000
#define FW_8192C_START_ADDRESS 0x1000
-#define FW_8192C_END_ADDRESS 0x3FFF
+#define FW_8192C_END_ADDRESS 0x1FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
- (_pfwhdr->signature&0xFFF0) == 0x88C0)
+ ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+ (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
struct rtl92c_firmware_header {
- u16 signature;
+ __le16 signature;
u8 category;
u8 function;
- u16 version;
+ __le16 version;
u8 subversion;
u8 rsvd1;
u8 month;
u8 date;
u8 hour;
u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac592..3b585aadabf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index f2aa33dc4d7..89ef6982ce5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtl8192ce_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05df51e..124cf633861 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
}
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
hwinfo, HWSET_MAX_SIZE);
- eeprom_id = *((u16 *)&hwinfo[0]);
+ eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+ rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
(" VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->eeprom_version =
+ le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -2435,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
"%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
}
if (actuallyset) {
- ppsc->hwradiooff = 1;
+ ppsc->hwradiooff = true;
if (e_rfpowerstate_toset == ERFON) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f4a88..9e0c8fcdf90 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
}
rtlhal->version = (enum version_8192c)chip_version;
+ pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df8..e49cf2244c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c244f2f1b83..6d2ca773bbc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
const struct firmware *firmware;
int err;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
@@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
+ /* RTL8188CTV */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
@@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
- /* 8191cu 1*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
@@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
- {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+ /*SW-WF02-AD15 -Abocom*/
+ {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
@@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
{RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
+ /****** 8188 RU ********/
+ /* Netcore */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+
+ /****** 8188CUS Slim Solo********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+
+ /****** 8188CUS Slim Combo ********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
+
/****** 8192CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
+ {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
@@ -356,15 +384,4 @@ static struct usb_driver rtl8192cu_driver = {
#endif
};
-static int __init rtl8192cu_init(void)
-{
- return usb_register(&rtl8192cu_driver);
-}
-
-static void __exit rtl8192cu_exit(void)
-{
- usb_deregister(&rtl8192cu_driver);
-}
-
-module_init(rtl8192cu_init);
-module_exit(rtl8192cu_exit);
+module_usb_driver(rtl8192cu_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b147f44..b3cc7b94999 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
checksum = checksum ^ (*(ptr + index));
- SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
}
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f5bd3a3cd34..9d89d7ccdaf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -466,8 +466,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
bool int_migration = *(bool *) (val);
if (int_migration) {
- /* Set interrrupt migration timer and
- * corresponging Tx/Rx counter.
+ /* Set interrupt migration timer and
+ * corresponding Tx/Rx counter.
* timer 25ns*0xfa0=100us for 0xf packets.
* 0x306:Rx, 0x307:Tx */
rtl_write_dword(rtlpriv, REG_INT_MIG, 0xfe000fa0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c550..0883349e1c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 149493f4c25..7911c9c8708 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->dm.useramask = 1;
+ rtlpriv->dm.useramask = true;
/* dual mac */
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979..f10ac1ad908 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 92f49d522c5..78723cf5949 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
int err = 0;
u16 earlyrxthreshold = 7;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dm.useramask = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 54cb8a60514..e956fa71d04 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -34,13 +34,14 @@
#include "usb.h"
#include "base.h"
#include "ps.h"
+#include "rtl8192c/fw_common.h"
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
#define REALTEK_USB_VENQT_CMD_REQ 0x05
#define REALTEK_USB_VENQT_CMD_IDX 0x00
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
static void usbctrl_async_callback(struct urb *urb)
{
@@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(len);
+ /* data are already in little-endian order */
memcpy(buf, pdata, len);
usb_fill_control_urb(urb, udev, pipe,
(unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
unsigned int pipe;
int status;
u8 reqtype;
+ int vendorreq_times = 0;
+ static int count;
pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
reqtype = REALTEK_USB_VENQT_READ;
- status = usb_control_msg(udev, pipe, request, reqtype, value, index,
- pdata, len, 0); /* max. timeout */
+ do {
+ status = usb_control_msg(udev, pipe, request, reqtype, value,
+ index, pdata, len, 0); /*max. timeout*/
+ if (status < 0) {
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8192C_START_ADDRESS &&
+ value <= FW_8192C_END_ADDRESS))
+ break;
+ } else {
+ break;
+ }
+ } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
- if (status < 0)
+ if (status < 0 && count++ < 4)
pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
+ value, status, le32_to_cpu(*(u32 *)pdata));
return status;
}
@@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- ret = *data;
+ ret = le32_to_cpu(*data);
kfree(data);
return ret;
}
@@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
u8 request;
u16 wvalue;
u16 index;
- u32 data;
+ __le32 data;
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)(addr&0x0000ffff);
- data = val;
+ data = cpu_to_le32(val);
_usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
len);
}
@@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+ u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ u8 request = REALTEK_USB_VENQT_CMD_REQ;
+ u8 reqtype = REALTEK_USB_VENQT_WRITE;
+ u16 wvalue;
+ u16 index = REALTEK_USB_VENQT_CMD_IDX;
+ int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ u8 *buffer;
+ dma_addr_t dma_addr;
+
+ wvalue = (u16)(addr&0x0000ffff);
+ buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+ if (!buffer)
+ return;
+ memcpy(buffer, data, len);
+ usb_control_msg(udev, pipe, request, reqtype, wvalue,
+ index, buffer, len, 50);
+
+ usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
static void _rtl_usb_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
@@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.writeN_sync = _usb_writeN_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7ddba8e..cdaf1429fa0 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
#define AC_MAX 4
#define QOS_QUEUE_NUM 4
#define RTL_MAC80211_NUM_QUEUE 5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
unsigned long pci_base_addr; /*device I/O address */
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+ u16 len);
u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1485,7 +1488,7 @@ struct rtl_intf_ops {
struct rtl_mod_params {
/* default: 0 = using hardware encryption */
- int sw_crypto;
+ bool sw_crypto;
/* default: 0 = DBG_EMERG (0)*/
int debug;
@@ -1541,6 +1544,7 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
+ struct mutex ps_mutex;
/*spin lock */
spinlock_t ips_lock;
@@ -1548,7 +1552,6 @@ struct rtl_locks {
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
/*Dual mac*/
@@ -1573,7 +1576,8 @@ struct rtl_works {
/* For SW LPS */
struct delayed_work ps_work;
struct delayed_work ps_rfon_wq;
- struct tasklet_struct ips_leave_tasklet;
+
+ struct work_struct lps_leave_work;
};
struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f955620..6248c354fc5 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b87c2..af08c8609c6 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@ config WL12XX_SDIO
If you choose to build a module, it'll be called wl12xx_sdio.
Say N if unsure.
-config WL12XX_SDIO_TEST
- tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC && WL12XX_SDIO
- default n
- ---help---
- This module adds support for the SDIO bus testing with the
- TI wl12xx chipsets. You probably don't want this unless you are
- testing a new hardware platform. Select this if you want to test the
- SDIO bus which is connected to the wl12xx chip.
-
config WL12XX_PLATFORM_DATA
bool
depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b3483ca2..fe67262ba19 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
wl12xx_spi-objs = spi.o
wl12xx_sdio-objs = sdio.o
-wl12xx_sdio_test-objs = sdio_test.o
wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WL12XX) += wl12xx.o
obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
-obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
-
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a74319..7537c401a44 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "ps.h"
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_wake_up_condition *wake_up;
int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
goto out;
}
- wake_up->role_id = wl->role_id;
+ wake_up->role_id = wlvif->role_id;
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
wake_up->listen_interval = wl->conf.conn.listen_interval;
@@ -84,7 +85,8 @@ out:
return ret;
}
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power)
{
struct acx_current_tx_power *acx;
int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
return ret;
}
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_feature_config *feature;
int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->role_id = wl->role_id;
+ feature->role_id = wlvif->role_id;
feature->data_flow_options = 0;
feature->options = 0;
@@ -184,33 +186,8 @@ out:
return ret;
}
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
- struct acx_packet_detection *pd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
- ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
- if (ret < 0) {
- wl1271_warning("failed to set pd threshold: %d", ret);
- goto out;
- }
-
-out:
- kfree(pd);
- return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
goto out;
}
- slot->role_id = wl->role_id;
+ slot->role_id = wlvif->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -238,8 +215,8 @@ out:
return ret;
}
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
}
/* MAC filtering */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
return ret;
}
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_rx_timeout *rx_timeout;
int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
wl1271_debug(DEBUG_ACX, "acx service period timeout");
- rx_timeout->role_id = wl->role_id;
+ rx_timeout->role_id = wlvif->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
@@ -300,7 +278,8 @@ out:
return ret;
}
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
goto out;
}
- rts->role_id = wl->role_id;
+ rts->role_id = wlvif->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
goto out;
}
- beacon_filter->role_id = wl->role_id;
+ beacon_filter->role_id = wlvif->role_id;
beacon_filter->enable = enable_filter;
/*
@@ -401,7 +381,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_beacon_filter_ie_table *ie_table;
int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
}
/* configure default beacon pass-through rules */
- ie_table->role_id = wl->role_id;
+ ie_table->role_id = wlvif->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct acx_conn_monit_params *acx;
u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
timeout = wl->conf.conn.bss_lose_timeout;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
@@ -582,7 +564,7 @@ out:
return ret;
}
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_beacon_broadcast *bb;
int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
goto out;
}
- bb->role_id = wl->role_id;
+ bb->role_id = wlvif->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
return ret;
}
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
goto out;
}
- acx_aid->role_id = wl->role_id;
+ acx_aid->role_id = wlvif->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
return ret;
}
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
return ret;
}
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
- wl->basic_rate, wl->rate_set);
+ wlvif->basic_rate, wlvif->rate_set);
/* configure one basic rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
/* configure one AP supported rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
* (p2p packets should always go out with OFDM rates, even
* if we are currently connected to 11b AP)
*/
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
acx->rate_policy.enabled_rates =
cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1)
{
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
return ret;
}
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
@@ -1129,7 +1114,8 @@ out:
return ret;
}
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
@@ -1189,7 +1175,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_keep_alive_mode *acx = NULL;
int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid)
{
struct wl1271_acx_keep_alive_config *acx = NULL;
int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst)
{
struct wl1271_acx_rssi_snr_trigger *acx = NULL;
int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
goto out;
}
- wl->last_rssi_event = -1;
+ wlvif->last_rssi_event = -1;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
}
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode)
{
struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ba_initiator_policy *acx;
int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
}
/* set for the current role */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
acx->win_size = wl->conf.ht.tx_ba_win_size;
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
return ret;
}
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_ps_rx_streaming *rx_streaming;
u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
if (!(conf_queues & BIT(i)))
continue;
- rx_streaming->role_id = wl->role_id;
+ rx_streaming->role_id = wlvif->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
return ret;
}
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
if (!acx)
return -ENOMEM;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
return ret;
}
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_config_ps *config_ps;
int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
- config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+ config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4b342..69892b40c2d 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
__le32 lifetime;
} __packed;
-struct acx_packet_detection {
- struct acx_header header;
-
- __le32 threshold;
-} __packed;
-
-
enum acx_slot_type {
SLOT_TIME_LONG = 0,
SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
u8 reserved;
};
-#define ACX_TX_BASIC_RATE 0
-#define ACX_TX_AP_FULL_RATE 1
-#define ACX_TX_BASIC_RATE_P2P 2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
struct acx_rate_policy {
struct acx_header header;
@@ -1234,39 +1222,48 @@ enum {
};
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_mem_map(struct wl1271 *wl,
struct acx_header *mem_map, size_t len);
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl12xx_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address);
int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+ bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation, u8 hlid);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 68133791497..8f9cf5a816e 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -25,6 +25,7 @@
#include <linux/wl12xx.h>
#include <linux/export.h>
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "boot.h"
@@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
+ if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
@@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 4;
dest_addr += 4;
}
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
}
/*
@@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
nvs_ptr = (u8 *)wl->nvs +
ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
@@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
kfree(nvs_aligned);
return 0;
+
+out_badnvs:
+ wl1271_error("nvs data is malformed");
+ return -EILSEQ;
}
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e548f..25990bd38be 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from INI out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from ini out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id)
{
struct wl12xx_cmd_role_enable *cmd;
int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
goto out_free;
}
- memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->mac_address, addr, ETH_ALEN);
cmd->role_type = role_type;
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
return ret;
}
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
__set_bit(link, wl->links_map);
+ __set_bit(link, wlvif->links_map);
*hlid = link;
return 0;
}
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
__clear_bit(*hlid, wl->links_map);
+ __clear_bit(*hlid, wlvif->links_map);
*hlid = WL12XX_INVALID_LINK_ID;
}
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
+ if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+ wlvif->session_counter = 0;
- wl->session_counter++;
+ wlvif->session_counter++;
- return wl->session_counter;
+ return wlvif->session_counter;
}
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
- cmd->role_id = wl->dev_role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->dev_role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
- if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+ if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
if (ret)
goto out_free;
}
- cmd->device.hlid = wl->dev_hlid;
- cmd->device.session = wl->session_counter;
+ cmd->device.hlid = wlvif->dev_hlid;
+ cmd->device.session = wlvif->session_counter;
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
err_hlid:
/* clear links on error */
- __clear_bit(wl->dev_hlid, wl->links_map);
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -513,12 +539,13 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
- cmd->role_id = wl->dev_role_id;
+ cmd->role_id = wlvif->dev_role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->dev_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -554,8 +581,9 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->sta.ssid_len = wl->ssid_len;
- memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->sta.hlid = wl->sta_hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl);
- cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.hlid = wlvif->sta.hlid;
+ cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -613,12 +641,12 @@ out:
}
/* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -648,16 +676,17 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
/* trying to use hidden SSID with an old hostapd version */
- if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
wl1271_error("got a null SSID from beacon/bss");
ret = -EINVAL;
goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out;
}
- ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
if (ret < 0)
goto out_free;
- ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
if (ret < 0)
goto out_free_global;
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
- cmd->ap.global_hlid = wl->ap_global_hlid;
- cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
- cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ap.global_hlid = wlvif->ap.global_hlid;
+ cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
- cmd->ap.ssid_len = wl->ssid_len;
- memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+ cmd->ap.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->ap.local_rates = cpu_to_le32(0xffffffff);
- switch (wl->band) {
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+ wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
cmd->band = RADIO_BAND_2_4GHZ;
break;
}
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out_free;
out_free_bcast:
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
out_free_global:
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -735,7 +764,7 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -766,10 +795,11 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ibss.dtim_interval = bss_conf->dtim_period;
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->ibss.ssid_len = wl->ssid_len;
- memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->ibss.hlid = wl->sta_hlid;
- cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.hlid = wlvif->sta.hlid;
+ cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
- wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+ wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+ vif->bss_conf.bssid);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -962,7 +993,8 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
goto out;
}
- ps_params->role_id = wl->role_id;
+ ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
return ret;
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
int ret = -ENOMEM;
- if (wl->bss_type == BSS_TYPE_IBSS) {
+ if (wlvif->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw,
+ wl12xx_wlvif_to_vif(wlvif));
if (!skb)
goto out;
size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
}
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
skb->data, skb->len,
CMD_TEMPL_KLV_IDX_NULL_DATA,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
}
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret = 0;
- skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ skb = ieee80211_pspoll_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
- skb->len, 0, wl->basic_rate_set);
+ skb->len, 0, wlvif->basic_rate_set);
out:
dev_kfree_skb(skb);
return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
if (!skb) {
ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
}
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
int ret;
u32 rate;
if (!skb)
- skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
goto out;
wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
- if (wl->band == IEEE80211_BAND_2GHZ)
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
@@ -1159,9 +1199,11 @@ out:
return skb;
}
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr)
{
int ret;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_arp_rsp_template tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_DATA |
IEEE80211_FCTL_TODS);
- memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
- memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+ memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
memset(hdr->addr3, 0xff, ETH_ALEN);
/* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
- memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+ memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
tmpl.sender_ip = ip_addr;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
&tmpl, sizeof(tmpl), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
return ret;
}
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
- memcpy(template.addr1, wl->bssid, ETH_ALEN);
- memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
- memcpy(template.addr3, wl->bssid, ETH_ALEN);
+ memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(template.addr2, vif->addr, ETH_ALEN);
+ memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
}
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
return ret;
}
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
int ret = 0;
/* hlid might have already been deleted */
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
goto out;
}
- cmd->hlid = wl->sta_hlid;
+ cmd->hlid = wlvif->sta.hlid;
if (key_type == KEY_WEP)
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
* TODO: merge with sta/ibss into 1 set_key function.
* note there are slight diffs
*/
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (!cmd)
return -ENOMEM;
- if (hlid == wl->ap_bcast_hlid) {
+ if (hlid == wlvif->ap.bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
@@ -1411,7 +1456,8 @@ out:
return ret;
}
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid)
{
struct wl12xx_cmd_add_peer *cmd;
int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
else
cmd->psd_type[i] = WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wl->band];
+ sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
- wl->band));
+ wlvif->band));
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
return ret;
}
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
}
cmd->role_id = role_id;
- cmd->channel = wl->channel;
- switch (wl->band) {
+ cmd->channel = wlvif->channel;
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_error("roc - unknown band: %d", (int)wl->band);
+ wl1271_error("roc - unknown band: %d", (int)wlvif->band);
ret = -EINVAL;
goto out_free;
}
@@ -1657,14 +1704,14 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- ret = wl12xx_cmd_roc(wl, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
@@ -1753,3 +1800,53 @@ out_free:
out:
return ret;
}
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out_stop;
+
+ return 0;
+
+out_stop:
+ wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+ return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ /* flush all pending packets */
+ wl1271_tx_work_locked(wl);
+
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd42769aa..3f7d0b93c24 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct ieee80211_channel_switch *ch_switch);
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fbf93f..1bcfb017058 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS)
+
#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 00000000000..b85fd8c41e8
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+ DEBUG_NONE = 0,
+ DEBUG_IRQ = BIT(0),
+ DEBUG_SPI = BIT(1),
+ DEBUG_BOOT = BIT(2),
+ DEBUG_MAILBOX = BIT(3),
+ DEBUG_TESTMODE = BIT(4),
+ DEBUG_EVENT = BIT(5),
+ DEBUG_TX = BIT(6),
+ DEBUG_RX = BIT(7),
+ DEBUG_SCAN = BIT(8),
+ DEBUG_CRYPT = BIT(9),
+ DEBUG_PSM = BIT(10),
+ DEBUG_MAC80211 = BIT(11),
+ DEBUG_CMD = BIT(12),
+ DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
+ DEBUG_ALL = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+ pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+ pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ 0); \
+ } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ true); \
+ } while (0)
+
+#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd52830..15eb3a9c30c 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "ps.h"
#include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
{
struct wl1271 *wl = file->private_data;
int res = 0;
- char buf[1024];
+ ssize_t ret;
+ char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+ buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
mutex_lock(&wl->mutex);
#define DRIVER_STATE_PRINT(x, fmt) \
- (res += scnprintf(buf + res, sizeof(buf) - res,\
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
- DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
- DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(bss_type);
DRIVER_STATE_PRINT_INT(channel);
- DRIVER_STATE_PRINT_HEX(rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate);
DRIVER_STATE_PRINT_INT(band);
- DRIVER_STATE_PRINT_INT(beacon_int);
- DRIVER_STATE_PRINT_INT(psm_entry_retry);
- DRIVER_STATE_PRINT_INT(ps_poll_failures);
DRIVER_STATE_PRINT_INT(power_level);
- DRIVER_STATE_PRINT_INT(rssi_thold);
- DRIVER_STATE_PRINT_INT(last_rssi_event);
DRIVER_STATE_PRINT_INT(sg_enabled);
DRIVER_STATE_PRINT_INT(enable_11a);
DRIVER_STATE_PRINT_INT(noise);
- DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
- DRIVER_STATE_PRINT_INT(last_tx_hlid);
- DRIVER_STATE_PRINT_INT(ba_support);
- DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#undef DRIVER_STATE_PRINT_LHEX
#undef DRIVER_STATE_PRINT_STR
#undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
mutex_unlock(&wl->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
}
static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
.llseek = default_llseek,
};
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ int ret, res = 0;
+ const int buf_size = 4096;
+ char *buf;
+ char tmp_buf[64];
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt) \
+ (res += scnprintf(buf + res, buf_size - res, \
+ #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len) \
+ do { \
+ memset(tmp_buf, 0, sizeof(tmp_buf)); \
+ memcpy(tmp_buf, wlvif->x, \
+ min_t(u8, len, sizeof(tmp_buf) - 1)); \
+ res += scnprintf(buf + res, buf_size - res, \
+ #x " = %s\n", tmp_buf); \
+ } while (0)
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ VIF_STATE_PRINT_INT(role_id);
+ VIF_STATE_PRINT_INT(bss_type);
+ VIF_STATE_PRINT_LHEX(flags);
+ VIF_STATE_PRINT_INT(p2p);
+ VIF_STATE_PRINT_INT(dev_role_id);
+ VIF_STATE_PRINT_INT(dev_hlid);
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ VIF_STATE_PRINT_INT(sta.hlid);
+ VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+ VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+ VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+ VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+ } else {
+ VIF_STATE_PRINT_INT(ap.global_hlid);
+ VIF_STATE_PRINT_INT(ap.bcast_hlid);
+ VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+ VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+ VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+ }
+ VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_LHEX(links_map[0]);
+ VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+ VIF_STATE_PRINT_INT(band);
+ VIF_STATE_PRINT_INT(channel);
+ VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+ VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+ VIF_STATE_PRINT_HEX(basic_rate_set);
+ VIF_STATE_PRINT_HEX(basic_rate);
+ VIF_STATE_PRINT_HEX(rate_set);
+ VIF_STATE_PRINT_INT(beacon_int);
+ VIF_STATE_PRINT_INT(default_key);
+ VIF_STATE_PRINT_INT(aid);
+ VIF_STATE_PRINT_INT(session_counter);
+ VIF_STATE_PRINT_INT(ps_poll_failures);
+ VIF_STATE_PRINT_INT(psm_entry_retry);
+ VIF_STATE_PRINT_INT(power_level);
+ VIF_STATE_PRINT_INT(rssi_thold);
+ VIF_STATE_PRINT_INT(last_rssi_event);
+ VIF_STATE_PRINT_INT(ba_support);
+ VIF_STATE_PRINT_INT(ba_allowed);
+ VIF_STATE_PRINT_LLHEX(tx_security_seq);
+ VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+ }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+ .read = vifs_state_read,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
char buf[10];
size_t len;
unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
if (ret < 0)
goto out;
- ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(gpio_power, rootdir);
DEBUGFS_ADD(start_recovery, rootdir);
DEBUGFS_ADD(driver_state, rootdir);
+ DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a9e40..d3280df68f5 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
*/
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "event.h"
@@ -31,12 +32,16 @@
void wl1271_pspoll_work(struct work_struct *work)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct delayed_work *dwork;
struct wl1271 *wl;
int ret;
dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, pspoll_work);
+ wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+ vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+ wl = wlvif->wl;
wl1271_debug(DEBUG_EVENT, "pspoll work");
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+ if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+ wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
};
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int delay = wl->conf.conn.ps_poll_recovery_period;
int ret;
- wl->ps_poll_failures++;
- if (wl->ps_poll_failures == 1)
+ wlvif->ps_poll_failures++;
+ if (wlvif->ps_poll_failures == 1)
wl1271_info("AP with dysfunctional ps-poll, "
"trying to work around it.");
/* force active mode receive data from the AP */
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
return;
- set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
- ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+ set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+ ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
msecs_to_jiffies(delay));
}
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
}
static int wl1271_event_ps_report(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox,
bool *beacon_loss)
{
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
case EVENT_ENTER_POWER_SAVE_FAIL:
wl1271_debug(DEBUG_PSM, "PSM entry failed");
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
/* remain in active mode */
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
break;
}
- if (wl->psm_entry_retry < total_retries) {
- wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ if (wlvif->psm_entry_retry < total_retries) {
+ wlvif->psm_entry_retry++;
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
} else {
wl1271_info("No ack to nullfunc from AP.");
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
*beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
- wl->psm_entry_retry = 0;
-
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
- if (ret < 0)
- break;
+ wlvif->psm_entry_retry = 0;
/*
* BET has only a minor effect in 5GHz and masks
* channel switch IEs, so we only enable BET on 2.4GHz
*/
- if (wl->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
/* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
+ ret = wl1271_acx_bet_enable(wl, wlvif, true);
- if (wl->ps_compl) {
- complete(wl->ps_compl);
- wl->ps_compl = NULL;
+ if (wlvif->ps_compl) {
+ complete(wlvif->ps_compl);
+ wlvif->ps_compl = NULL;
}
break;
default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
}
static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = mbox->rssi_snr_trigger_metric[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wl->rssi_thold)
+ if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- if (event != wl->last_rssi_event)
- ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
- wl->last_rssi_event = event;
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
}
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (!wl->ba_rx_bitmap)
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (!wlvif->sta.ba_rx_bitmap)
return;
- ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
- wl->bssid);
+ ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ vif->bss_conf.bssid);
} else {
- int i;
+ u8 hlid;
struct wl1271_link *lnk;
- for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
- lnk = &wl->links[i];
- if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+ WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ if (!lnk->ba_bitmap)
continue;
- ieee80211_stop_rx_ba_session(wl->vif,
+ ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
u8 enable)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
if (enable) {
/* disable dynamic PS when requested by the firmware */
- ieee80211_disable_dyn_ps(wl->vif);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_disable_dyn_ps(vif);
+ }
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
- ieee80211_enable_dyn_ps(wl->vif);
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_enable_dyn_ps(vif);
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
}
}
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
u32 vector;
bool beacon_loss = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, wl->scan_vif);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -248,13 +267,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
"(status 0x%0x)", mbox->scheduled_scan_status);
if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl);
ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_scanning = false;
}
}
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
- wl->bss_type == BSS_TYPE_STA_BSS)
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
wl12xx_event_soft_gemini_sense(wl,
mbox->soft_gemini_sense_info);
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+ if (vector & BSS_LOSE_EVENT_ID) {
+ /* TODO: check for multi-role */
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+ if (vector & PS_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
- ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
- if (ret < 0)
- return ret;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl1271_event_ps_report(wl, wlvif,
+ mbox, &beacon_loss);
+ if (ret < 0)
+ return ret;
+ }
}
- if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
- wl1271_event_pspoll_delivery_fail(wl);
+ if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_pspoll_delivery_fail(wl, wlvif);
+ }
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ /* TODO: check actual multi-role support */
wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- if (wl->vif)
- wl1271_event_rssi_trigger(wl, mbox);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_rssi_trigger(wl, wlvif, mbox);
+ }
}
- if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+ u8 role_id = mbox->role_id;
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+ "ba_allowed = 0x%x, role_id=%d",
+ mbox->rx_ba_allowed, role_id);
- wl->ba_allowed = !!mbox->rx_ba_allowed;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (role_id != 0xff && role_id != wlvif->role_id)
+ continue;
- if (wl->vif && !wl->ba_allowed)
- wl1271_stop_ba_event(wl);
+ wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
+ }
}
- if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
"status = 0x%x",
mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* 1) channel switch complete with status=0
* 2) channel switch failed status=1
*/
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
- (wl->vif))
- ieee80211_chswitch_done(wl->vif,
- mbox->channel_switch_status ? false : true);
+
+ /* TODO: configure only the relevant vif */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ bool success;
+
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wl->flags))
+ continue;
+
+ success = mbox->channel_switch_status ? false : true;
+ ieee80211_chswitch_done(vif, success);
+ }
}
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- if (wl->vif)
- wl1271_tx_dummy_packet(wl);
+ wl1271_tx_dummy_packet(wl);
}
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
- if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+ if (vector & MAX_TX_RETRY_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
disconnect_sta = true;
}
- if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ if (vector & INACTIVE_STA_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
disconnect_sta = true;
}
- if (is_ap && disconnect_sta) {
+ if (disconnect_sta) {
u32 num_packets = wl->conf.tx.max_tx_retries;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
- for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
- h < AP_MAX_LINKS;
- h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
- if (!wl1271_is_active_sta(wl, h))
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
+ }
+ if (!found)
continue;
+ vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, addr);
+ sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- if (wl->vif && beacon_loss)
- ieee80211_connection_loss(wl->vif);
+ if (beacon_loss)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0ede5b..1d878ba47bf 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c94e9..ca7ee59e450 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include "debug.h"
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
@@ -33,7 +34,7 @@
#include "tx.h"
#include "io.h"
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template),
+ (struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
- WL1271_CMD_TEMPL_DFLT_SIZE, i,
- WL1271_RATE_AUTOMATIC);
+ sizeof(struct ieee80211_qos_hdr),
+ i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_disconn_template *tmpl;
int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
@@ -123,8 +148,10 @@ out:
return ret;
}
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr_3addr *nullfunc;
int ret;
u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
/* nullfunc->addr1 is filled by FW */
- memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
@@ -153,8 +180,10 @@ out:
return ret;
}
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr *qosnull;
int ret;
u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
/* qosnull->addr1 is filled by FW */
- memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+ memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
@@ -183,49 +212,6 @@ out:
return ret;
}
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
- int ret;
-
- /*
- * Put very large empty placeholders for all templates. These
- * reserve memory for later.
- */
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
- sizeof
- (struct wl12xx_disconn_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
- sizeof
- (struct wl12xx_qos_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
return 0;
}
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_pd_threshold(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+ ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
- ret = wl1271_acx_service_period_timeout(wl);
+ ret = wl1271_acx_service_period_timeout(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+ ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- /* disable beacon filtering at this stage */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_table(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_beacon_filter_table(wl);
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_bcn_dtim_options(wl);
+ ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
if (ret < 0)
return ret;
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
return 0;
}
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
- }
-
/* PS config */
- ret = wl1271_acx_config_ps(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- return ret;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
+ ret = wl12xx_acx_config_ps(wl, wlvif);
if (ret < 0)
return ret;
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- /* Beacons and broadcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
-
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_sta_rate_policies(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* Configure the FW logger */
- ret = wl12xx_init_fwlog(wl);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret, i;
/* disable all keep-alive templates */
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
return ret;
}
/* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_ap_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
-
- ret = wl1271_init_ap_rates(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_ap_max_tx_retry(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* initialize Tx power */
- ret = wl1271_acx_tx_power(wl, wl->power_level);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
- ret = wl1271_ap_init_deauth_template(wl);
+ ret = wl1271_ap_init_deauth_template(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_null_template(wl);
+ ret = wl1271_ap_init_null_template(wl, vif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_qos_null_template(wl);
+ ret = wl1271_ap_init_qos_null_template(wl, vif);
if (ret < 0)
return ret;
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
- return wl1271_ap_init_templates(wl);
+ return wl1271_ap_init_templates(wl, vif);
}
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret;
struct conf_tx_rate_class rc;
u32 supported_rates;
- wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+ wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+ wlvif->basic_rate_set);
- if (wl->basic_rate_set == 0)
+ if (wlvif->basic_rate_set == 0)
return -EINVAL;
- rc.enabled_rates = wl->basic_rate_set;
+ rc.enabled_rates = wlvif->basic_rate_set;
rc.long_retry_limit = 10;
rc.short_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
if (ret < 0)
return ret;
/* use the min basic rate for AP broadcast/multicast */
- rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
if (ret < 0)
return ret;
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+ if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc,
+ wlvif->ap.ucast_rate_idx[i]);
if (ret < 0)
return ret;
}
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return 0;
}
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
/* Reset the BA RX indicators */
- wl->ba_rx_bitmap = 0;
- wl->ba_allowed = true;
+ wlvif->ba_allowed = true;
wl->ba_rx_session_count = 0;
/* BA is supported in STA/AP modes */
- if (wl->bss_type != BSS_TYPE_AP_BSS &&
- wl->bss_type != BSS_TYPE_STA_BSS) {
- wl->ba_support = false;
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+ wlvif->bss_type != BSS_TYPE_STA_BSS) {
+ wlvif->ba_support = false;
return 0;
}
- wl->ba_support = true;
+ wlvif->ba_support = true;
/* 802.11n initiator BA session setting */
- return wl12xx_acx_set_ba_initiator_policy(wl);
+ return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
}
int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
if (wl->chip.id == CHIP_ID_1283_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
/* Enable SDIO padding */
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
@@ -575,39 +497,186 @@ out:
return ret;
}
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
-int wl1271_hw_init(struct wl1271 *wl)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ int ret;
+
+ ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* initialize Tx power */
+ ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_general_parms(wl);
- else
- ret = wl1271_cmd_general_parms(wl);
+ /*
+ * consider all existing roles before configuring psm.
+ * TODO: reconfigure on interface removal.
+ */
+ if (!wl->ap_count) {
+ if (is_ap) {
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ } else if (!wl->sta_count) {
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Mode specific init */
+ if (is_ap) {
+ ret = wl1271_ap_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_ap_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_sta_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ wl12xx_init_phy_vif_config(wl, wlvif);
+
+ /* Default TID/AC configuration */
+ BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+ conf_ac->cw_min, conf_ac->cw_max,
+ conf_ac->aifsn, conf_ac->tx_op_limit);
+ if (ret < 0)
+ return ret;
+
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, wlvif,
+ conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Configure HW encryption */
+ ret = wl1271_acx_feature_cfg(wl, wlvif);
if (ret < 0)
return ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_radio_parms(wl);
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl, vif);
else
- ret = wl1271_cmd_radio_parms(wl);
+ ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+ if (ret < 0)
+ return ret;
+
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl, wlvif);
if (ret < 0)
return ret;
+ return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl128x_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl128x_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
+
/* Chip-specific init */
ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- /* Mode specific init */
- if (is_ap)
- ret = wl1271_ap_hw_init(wl);
- else
- ret = wl1271_sta_hw_init(wl);
+ /* Init templates */
+ ret = wl1271_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+ /* Configure the FW logger */
+ ret = wl12xx_init_fwlog(wl);
if (ret < 0)
return ret;
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl1271_acx_dco_itrim_params(wl);
if (ret < 0)
goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure HW encryption */
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
- /* Mode specific init - post mem init */
- if (is_ap)
- ret = wl1271_ap_hw_init_post_mem(wl);
- else
- ret = wl1271_sta_hw_init_post_mem(wl);
-
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_set_rate_mgmt_params(wl);
if (ret < 0)
goto out_free_memmap;
- /* Configure initiator BA sessions policies */
- ret = wl1271_set_ba_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure hangover */
ret = wl12xx_acx_config_hangover(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230fd29..2da0f404ef6 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_chip_specific_init(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f4504..079ad380e8f 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
@@ -46,7 +48,7 @@
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
- wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+ wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
return true;
}
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
void wl1271_disable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->disable_irq(wl);
+ disable_irq(wl->irq);
}
void wl1271_enable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->enable_irq(wl);
+ enable_irq(wl->irq);
}
/* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
if (wl->if_ops->reset)
- wl->if_ops->reset(wl);
+ wl->if_ops->reset(wl->dev);
}
void wl1271_io_init(struct wl1271 *wl)
{
if (wl->if_ops->init)
- wl->if_ops->init(wl);
+ wl->if_ops->init(wl->dev);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341dfaf..d398cbcea98 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
- return wl->if_ops->dev(wl);
-}
-
-
/* Raw target IO, address is not translated */
static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->write(wl, addr, buf, len, fixed);
+ wl->if_ops->write(wl->dev, addr, buf, len, fixed);
}
static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->read(wl, addr, buf, len, fixed);
+ wl->if_ops->read(wl->dev, addr, buf, len, fixed);
}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl, false);
+ wl->if_ops->power(wl->dev, false);
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl, true);
+ int ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
int wl1271_set_partition(struct wl1271 *wl,
struct wl1271_partition_set *p);
+bool wl1271_set_block_size(struct wl1271 *wl);
+
/* Functions from wl1271_main.c */
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b6321..d5f55a149de 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
#include <linux/slab.h>
#include <linux/wl12xx.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
static bool bug_on_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ unsigned char operstate)
{
int ret;
+
if (operstate != IF_OPER_UP)
return 0;
- if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+ if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wl->role_id);
+ wl12xx_croc(wl, wlvif->role_id);
wl1271_info("Association completed.");
return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
struct ieee80211_hw *hw;
struct wl1271 *wl;
struct wl1271 *wl_temp;
+ struct wl12xx_vif *wlvif;
int ret = 0;
/* Check that this notification is for us. */
@@ -459,17 +450,28 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl->state == WL1271_STATE_OFF)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (dev->operstate != IF_OPER_UP)
goto out;
+ /*
+ * The correct behavior should be just getting the appropriate wlvif
+ * from the given dev, but currently we don't have a mac80211
+ * interface for it.
+ */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
- wl1271_check_operstate(wl, dev->operstate);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
- wl1271_ps_elp_sleep(wl);
+ wl1271_check_operstate(wl, wlvif,
+ ieee80211_get_operstate(vif));
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -498,19 +500,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
return 0;
}
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
int ret = 0;
/* we should hold wl->mutex */
- ret = wl1271_acx_ps_rx_streaming(wl, enable);
+ ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
if (ret < 0)
goto out;
if (enable)
- set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
else
- clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
out:
return ret;
}
@@ -519,25 +522,25 @@ out:
* this function is being called when the rx_streaming interval
* has beed changed or rx_streaming should be disabled
*/
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
/* reconfigure/disable according to new streaming_period */
if (period &&
- test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
else {
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
/* don't cancel_work_sync since we might deadlock */
- del_timer_sync(&wl->rx_streaming_timer);
+ del_timer_sync(&wlvif->rx_streaming_timer);
}
out:
return ret;
@@ -546,13 +549,14 @@ out:
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_enable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_enable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
- !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
(!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
goto out;
@@ -564,12 +568,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
goto out_sleep;
/* stop it after some time of inactivity */
- mod_timer(&wl->rx_streaming_timer,
+ mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
@@ -581,19 +585,20 @@ out:
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_disable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
@@ -605,8 +610,9 @@ out:
static void wl1271_rx_streaming_timer(unsigned long data)
{
- struct wl1271 *wl = (struct wl1271 *)data;
- ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl1271 *wl = wlvif->wl;
+ ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +651,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
static int wl1271_plt_init(struct wl1271 *wl)
{
- struct conf_tx_ac_category *conf_ac;
- struct conf_tx_tid *conf_tid;
- int ret, i;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +680,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- ret = wl1271_acx_dco_itrim_params(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* FM WLAN coexistence */
- ret = wl1271_acx_fm_coex(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Energy detection */
- ret = wl1271_init_energy_detection(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
- /* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
@@ -768,14 +712,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid, u8 tx_pkts)
{
bool fw_ps, single_sta;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
single_sta = (wl->active_sta_count == 1);
@@ -784,7 +726,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* packets in FW or if the STA is awake.
*/
if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_end(wl, hlid);
+ wl12xx_ps_link_end(wl, wlvif, hlid);
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +734,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* case FW-memory congestion is not a problem.
*/
else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
- int id;
-
- /* global/broadcast "stations" are always active */
- if (hlid < WL1271_AP_STA_HLID_START)
- return true;
-
- id = hlid - WL1271_AP_STA_HLID_START;
- return test_bit(id, wl->ap_hlid_map);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct wl12xx_fw_status *status)
{
+ struct wl1271_link *lnk;
u32 cur_fw_ps_map;
u8 hlid, cnt;
@@ -825,25 +757,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
- if (!wl1271_is_active_sta(wl, hlid))
- continue;
-
- cnt = status->tx_lnk_free_pkts[hlid] -
- wl->links[hlid].prev_freed_pkts;
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
- wl->links[hlid].prev_freed_pkts =
- status->tx_lnk_free_pkts[hlid];
- wl->links[hlid].allocated_pkts -= cnt;
+ lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+ lnk->allocated_pkts -= cnt;
- wl12xx_irq_ps_regulate_link(wl, hlid,
- wl->links[hlid].allocated_pkts);
+ wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+ lnk->allocated_pkts);
}
}
static void wl12xx_fw_status(struct wl1271 *wl,
struct wl12xx_fw_status *status)
{
+ struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -898,8 +827,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl12xx_irq_update_links_status(wl, status);
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ wl12xx_irq_update_links_status(wl, wlvif, status);
+ }
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -932,7 +862,7 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
@@ -1054,7 +984,6 @@ out:
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -1069,10 +998,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
- ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, fw_name, wl->dev);
if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
+ wl1271_error("could not get firmware %s: %d", fw_name, ret);
return ret;
}
@@ -1107,10 +1036,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
+ wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+ ret);
return ret;
}
@@ -1217,11 +1147,13 @@ static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, recovery_work);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
mutex_lock(&wl->mutex);
if (wl->state != WL1271_STATE_ON)
- goto out;
+ goto out_unlock;
/* Avoid a recursive recovery */
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1170,12 @@ static void wl1271_recovery_work(struct work_struct *work)
* in the firmware during recovery. This doens't hurt if the network is
* not encrypted.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
- wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+ wlvif->tx_security_seq +=
+ WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ }
/* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1186,14 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* reboot the chipset */
- __wl1271_op_remove_interface(wl, false);
+ while (!list_empty(&wl->wlvif_list)) {
+ wlvif = list_first_entry(&wl->wlvif_list,
+ struct wl12xx_vif, list);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ __wl1271_op_remove_interface(wl, vif, false);
+ }
+ mutex_unlock(&wl->mutex);
+ wl1271_op_stop(wl->hw);
clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1262,8 +1204,8 @@ static void wl1271_recovery_work(struct work_struct *work)
* to restart the HW.
*/
ieee80211_wake_queues(wl->hw);
-
-out:
+ return;
+out_unlock:
mutex_unlock(&wl->mutex);
}
@@ -1318,7 +1260,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* 0. read chip id from CHIP_ID */
wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
- /* 1. check if chip id is valid */
+ /*
+ * For wl127x based devices we could use the default block
+ * size (512 bytes), but due to a bug in the sdio driver, we
+ * need to set it explicitly after the chip is powered on. To
+ * simplify the code and since the performance impact is
+ * negligible, we use the same block size for all different
+ * chip types.
+ */
+ if (!wl1271_set_block_size(wl))
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
switch (wl->chip.id) {
case CHIP_ID_1271_PG10:
@@ -1328,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
@@ -1336,7 +1289,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1283_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
@@ -1344,9 +1299,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
-
- if (wl1271_set_block_size(wl))
- wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
case CHIP_ID_1283_PG10:
default:
@@ -1389,8 +1341,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->bss_type = BSS_TYPE_STA_BSS;
-
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1432,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct wl12xx_vif *wlvif = NULL;
unsigned long flags;
int q, mapping;
- u8 hlid = 0;
+ u8 hlid;
+
+ if (vif)
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- if (!wl1271_is_active_sta(wl, hlid)) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
- hlid, q);
- dev_kfree_skb(skb);
- goto out;
- }
-
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (hlid == WL12XX_INVALID_LINK_ID ||
+ (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+ ieee80211_free_txskb(hw, skb);
+ goto out;
}
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
wl->tx_queue_count[q]++;
/*
@@ -1609,13 +1560,14 @@ static struct notifier_block wl1271_dev_notifier = {
};
#ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1575,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
goto out_unlock;
/* enter psm if needed*/
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
DECLARE_COMPLETION_ONSTACK(compl);
- wl->ps_compl = &compl;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ wlvif->ps_compl = &compl;
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
goto out_sleep;
@@ -1638,42 +1590,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+ mutex_lock(&wl->mutex);
if (ret <= 0) {
wl1271_warning("couldn't enter ps mode!");
ret = -EBUSY;
- goto out;
+ goto out_cleanup;
}
- /* take mutex again, and wakeup */
- mutex_lock(&wl->mutex);
-
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
- goto out_unlock;
+ goto out_cleanup;
}
out_sleep:
wl1271_ps_elp_sleep(wl);
+out_cleanup:
+ wlvif->ps_compl = NULL;
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
wl1271_ps_elp_sleep(wl);
out_unlock:
@@ -1682,20 +1635,22 @@ out_unlock:
}
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl1271_configure_suspend_ap(wl);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ return wl1271_configure_suspend_sta(wl, wlvif);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
}
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
- bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
if (!is_sta && !is_ap)
return;
@@ -1707,11 +1662,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
if (is_sta) {
/* exit psm if it wasn't configured */
- if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
- wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
} else if (is_ap) {
- wl1271_acx_beacon_filter_opt(wl, false);
+ wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1678,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
wl->wow_enabled = true;
- ret = wl1271_configure_suspend(wl);
- if (ret < 0) {
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_configure_suspend(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1709,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->pspoll_work);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ flush_delayed_work(&wlvif->pspoll_work);
+ }
flush_delayed_work(&wl->elp_work);
return 0;
@@ -1760,6 +1720,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
static int wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
unsigned long flags;
bool run_irq_work = false;
@@ -1783,7 +1744,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
wl1271_irq(0, wl);
wl1271_enable_interrupts(wl);
}
- wl1271_configure_resume(wl);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ wl1271_configure_resume(wl, wlvif);
+ }
wl->wow_enabled = false;
return 0;
@@ -1810,20 +1773,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
static void wl1271_op_stop(struct ieee80211_hw *hw)
{
+ struct wl1271 *wl = hw->priv;
+ int i;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ /*
+ * this must be before the cancel_work calls below, so that the work
+ * functions don't perform further work.
+ */
+ wl->state = WL1271_STATE_OFF;
+ mutex_unlock(&wl->mutex);
+
+ mutex_lock(&wl_list_mutex);
+ list_del(&wl->list);
+ mutex_unlock(&wl_list_mutex);
+
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_delayed_work_sync(&wl->scan_complete_work);
+ cancel_work_sync(&wl->netstack_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_delayed_work_sync(&wl->elp_work);
+
+ /* let's notify MAC80211 about the remaining pending TX frames */
+ wl12xx_tx_reset(wl, true);
+ mutex_lock(&wl->mutex);
+
+ wl1271_power_off(wl);
+
+ wl->band = IEEE80211_BAND_2GHZ;
+
+ wl->rx_counter = 0;
+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
+ wl->tx_results_count = 0;
+ wl->tx_packets_count = 0;
+ wl->time_offset = 0;
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
+ wl->sched_scanning = false;
+ memset(wl->roles_map, 0, sizeof(wl->roles_map));
+ memset(wl->links_map, 0, sizeof(wl->links_map));
+ memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ wl->active_sta_count = 0;
+
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+ /*
+ * this is performed after the cancel_work calls and the associated
+ * mutex_lock, so that wl1271_op_add_interface does not accidentally
+ * get executed before all these vars have been reset.
+ */
+ wl->flags = 0;
+
+ wl->tx_blocks_freed = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->tx_pkts_freed[i] = 0;
+ wl->tx_allocated_pkts[i] = 0;
+ }
+
+ wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ mutex_unlock(&wl->mutex);
}
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
- switch (wl->bss_type) {
+ u8 policy = find_first_zero_bit(wl->rate_policies_map,
+ WL12XX_MAX_RATE_POLICIES);
+ if (policy >= WL12XX_MAX_RATE_POLICIES)
+ return -EBUSY;
+
+ __set_bit(policy, wl->rate_policies_map);
+ *idx = policy;
+ return 0;
+}
+
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+ if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+ return;
+
+ __clear_bit(*idx, wl->rate_policies_map);
+ *idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
else
return WL1271_ROLE_AP;
case BSS_TYPE_STA_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_CL;
else
return WL1271_ROLE_STA;
@@ -1832,78 +1894,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
return WL1271_ROLE_IBSS;
default:
- wl1271_error("invalid bss_type: %d", wl->bss_type);
+ wl1271_error("invalid bss_type: %d", wlvif->bss_type);
}
return WL12XX_INVALID_ROLE_TYPE;
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
- struct wl1271 *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
- int retries = WL1271_BOOT_RETRIES;
- int ret = 0;
- u8 role_type;
- bool booted = false;
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- ieee80211_vif_type_p2p(vif), vif->addr);
-
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- wl1271_debug(DEBUG_MAC80211,
- "multiple vifs are not supported yet");
- ret = -EBUSY;
- goto out;
- }
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i;
- /*
- * in some very corner case HW recovery scenarios its possible to
- * get here before __wl1271_op_remove_interface is complete, so
- * opt out if that is the case.
- */
- if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
- ret = -EBUSY;
- goto out;
- }
+ /* clear everything but the persistent data */
+ memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_IBSS;
break;
case NL80211_IFTYPE_P2P_GO:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_AP:
- wl->bss_type = BSS_TYPE_AP_BSS;
+ wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
- ret = -EOPNOTSUPP;
- goto out;
+ wlvif->bss_type = MAX_BSS_TYPE;
+ return -EOPNOTSUPP;
}
- role_type = wl12xx_get_role_type(wl);
- if (role_type == WL12XX_INVALID_ROLE_TYPE) {
- ret = -EINVAL;
- goto out;
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /* init sta/ibss data */
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ /* init ap data */
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_allocate_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
}
- memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
- if (wl->state != WL1271_STATE_OFF) {
- wl1271_error("cannot start because not in off state: %d",
- wl->state);
- ret = -EBUSY;
- goto out;
- }
+ wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+ /*
+ * mac80211 configures some values globally, while we treat them
+ * per-interface. thus, on init, we have to copy them from wl
+ */
+ wlvif->band = wl->band;
+ wlvif->channel = wl->channel;
+ wlvif->power_level = wl->power_level;
+
+ INIT_WORK(&wlvif->rx_streaming_enable_work,
+ wl1271_rx_streaming_enable_work);
+ INIT_WORK(&wlvif->rx_streaming_disable_work,
+ wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+ INIT_LIST_HEAD(&wlvif->list);
+
+ setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+ (unsigned long) wlvif);
+ return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+ int retries = WL1271_BOOT_RETRIES;
+ bool booted = false;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ int ret;
while (retries) {
retries--;
@@ -1915,25 +1994,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto power_off;
- if (wl->bss_type == BSS_TYPE_STA_BSS ||
- wl->bss_type == BSS_TYPE_IBSS) {
- /*
- * The device role is a special role used for
- * rx and tx frames prior to association (as
- * the STA role can get packets only from
- * its associated bssid)
- */
- ret = wl12xx_cmd_role_enable(wl,
- WL1271_ROLE_DEVICE,
- &wl->dev_role_id);
- if (ret < 0)
- goto irq_disable;
- }
-
- ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
- if (ret < 0)
- goto irq_disable;
-
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
@@ -1964,9 +2024,6 @@ power_off:
goto out;
}
- wl->vif = vif;
- wl->state = WL1271_STATE_ON;
- set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1984,7 +2041,115 @@ power_off:
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
+ wl->state = WL1271_STATE_ON;
+out:
+ return booted;
+}
+
+static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
+{
+ return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret = 0;
+ u8 role_type;
+ bool booted = false;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ ieee80211_vif_type_p2p(vif), vif->addr);
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (wl->vif) {
+ wl1271_debug(DEBUG_MAC80211,
+ "multiple vifs are not supported yet");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * in some very corner case HW recovery scenarios its possible to
+ * get here before __wl1271_op_remove_interface is complete, so
+ * opt out if that is the case.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl12xx_init_vif_data(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wlvif->wl = wl;
+ role_type = wl12xx_get_role_type(wl, wlvif);
+ if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO: after the nvs issue will be solved, move this block
+ * to start(), and make sure here the driver is ON.
+ */
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * we still need this in order to configure the fw
+ * while uploading the nvs
+ */
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+ booted = wl12xx_init_fw(wl);
+ if (!booted) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /*
+ * The device role is a special role used for
+ * rx and tx frames prior to association (as
+ * the STA role can get packets only from
+ * its associated bssid)
+ */
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wl->vif = vif;
+ list_add(&wlvif->list, &wl->wlvif_list);
+ set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count++;
+ else
+ wl->sta_count++;
out:
+ wl1271_ps_elp_sleep(wl);
+out_unlock:
mutex_unlock(&wl->mutex);
mutex_lock(&wl_list_mutex);
@@ -1996,29 +2161,34 @@ out:
}
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues)
{
- int ret, i;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i, ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+ if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return;
+
+ wl->vif = NULL;
+
/* because of hardware recovery, we may get here twice */
if (wl->state != WL1271_STATE_ON)
return;
wl1271_info("down");
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
/* enable dyn ps just in case (if left on due to fw crash etc) */
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- ieee80211_enable_dyn_ps(wl->vif);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ ieee80211_enable_dyn_ps(vif);
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+ wl->scan_vif == vif) {
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
@@ -2029,13 +2199,17 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (ret < 0)
goto deinit;
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0)
goto deinit;
}
- ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
if (ret < 0)
goto deinit;
@@ -2043,120 +2217,93 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
}
deinit:
/* clear all hlids (except system_hlid) */
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_free_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
+ }
- /*
- * this must be before the cancel_work calls below, so that the work
- * functions don't perform further work.
- */
- wl->state = WL1271_STATE_OFF;
+ wl12xx_tx_reset_wlvif(wl, wlvif);
+ wl1271_free_ap_keys(wl, wlvif);
+ if (wl->last_wlvif == wlvif)
+ wl->last_wlvif = NULL;
+ list_del(&wlvif->list);
+ memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count--;
+ else
+ wl->sta_count--;
mutex_unlock(&wl->mutex);
-
- wl1271_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->netstack_work);
- cancel_work_sync(&wl->tx_work);
- del_timer_sync(&wl->rx_streaming_timer);
- cancel_work_sync(&wl->rx_streaming_enable_work);
- cancel_work_sync(&wl->rx_streaming_disable_work);
- cancel_delayed_work_sync(&wl->pspoll_work);
- cancel_delayed_work_sync(&wl->elp_work);
+ del_timer_sync(&wlvif->rx_streaming_timer);
+ cancel_work_sync(&wlvif->rx_streaming_enable_work);
+ cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->pspoll_work);
mutex_lock(&wl->mutex);
-
- /* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_reset(wl, reset_tx_queues);
- wl1271_power_off(wl);
-
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
- wl->ssid_len = 0;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->p2p = 0;
- wl->band = IEEE80211_BAND_2GHZ;
-
- wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
- wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->tx_blocks_available = 0;
- wl->tx_allocated_blocks = 0;
- wl->tx_results_count = 0;
- wl->tx_packets_count = 0;
- wl->time_offset = 0;
- wl->session_counter = 0;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wl->vif = NULL;
- wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl1271_free_ap_keys(wl);
- memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
- wl->ap_fw_ps_map = 0;
- wl->ap_ps_map = 0;
- wl->sched_scanning = false;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- memset(wl->roles_map, 0, sizeof(wl->roles_map));
- memset(wl->links_map, 0, sizeof(wl->links_map));
- memset(wl->roc_map, 0, sizeof(wl->roc_map));
- wl->active_sta_count = 0;
-
- /* The system link is always allocated */
- __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
- /*
- * this is performed after the cancel_work calls and the associated
- * mutex_lock, so that wl1271_op_add_interface does not accidentally
- * get executed before all these vars have been reset.
- */
- wl->flags = 0;
-
- wl->tx_blocks_freed = 0;
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- wl->tx_pkts_freed[i] = 0;
- wl->tx_allocated_pkts[i] = 0;
- }
-
- wl1271_debugfs_reset(wl);
-
- kfree(wl->fw_status);
- wl->fw_status = NULL;
- kfree(wl->tx_res_if);
- wl->tx_res_if = NULL;
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl12xx_vif *iter;
mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF ||
+ !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ goto out;
+
/*
* wl->vif can be null here if someone shuts down the interface
* just when hardware recovery has been started.
*/
- if (wl->vif) {
- WARN_ON(wl->vif != vif);
- __wl1271_op_remove_interface(wl, true);
- }
+ wl12xx_for_each_wlvif(wl, iter) {
+ if (iter != wlvif)
+ continue;
+ __wl1271_op_remove_interface(wl, vif, true);
+ break;
+ }
+ WARN_ON(iter != wlvif);
+out:
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->recovery_work);
}
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p)
+{
+ wl1271_op_remove_interface(hw, vif);
+
+ vif->type = ieee80211_iftype_p2p(new_type, p2p);
+ vif->p2p = p2p;
+ return wl1271_op_add_interface(hw, vif);
+}
+
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool set_assoc)
{
int ret;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2314,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* Keep the below message for now, unless it starts bothering
* users who really like to roam a lot :)
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
if (set_assoc)
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
if (is_ibss)
- ret = wl12xx_cmd_role_start_ibss(wl);
+ ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
else
- ret = wl12xx_cmd_role_start_sta(wl);
+ ret = wl12xx_cmd_role_start_sta(wl, wlvif);
if (ret < 0)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -2189,19 +2336,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* the join. The acx_aid starts the keep-alive process, and the order
* of the commands below is relevant.
*/
- ret = wl1271_acx_keep_alive_mode(wl, true);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
goto out;
- ret = wl1271_acx_aid(wl, wl->aid);
+ ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
- ret = wl1271_cmd_build_klv_null_data(wl);
+ ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
goto out;
@@ -2210,72 +2358,63 @@ out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+ if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
wl12xx_cmd_stop_channel_switch(wl);
- ieee80211_chswitch_done(wl->vif, false);
+ ieee80211_chswitch_done(vif, false);
}
/* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl);
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
goto out;
- memset(wl->bssid, 0, ETH_ALEN);
-
/* reset TX security counters on a clean disconnect */
- wl->tx_security_last_seq_lsb = 0;
- wl->tx_security_seq = 0;
+ wlvif->tx_security_last_seq_lsb = 0;
+ wlvif->tx_security_seq = 0;
out:
return ret;
}
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- wl->basic_rate_set = wl->bitrate_masks[wl->band];
- wl->rate_set = wl->basic_rate_set;
+ wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+ wlvif->rate_set = wlvif->basic_rate_set;
}
-static bool wl12xx_is_roc(struct wl1271 *wl)
-{
- u8 role_id;
-
- role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
- if (role_id >= WL12XX_MAX_ROLES)
- return false;
-
- return true;
-}
-
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
{
int ret;
+ bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return 0;
if (idle) {
/* no need to croc if we weren't busy (e.g. during boot) */
- if (wl12xx_is_roc(wl)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
- wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
@@ -2283,75 +2422,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
ieee80211_sched_scan_stopped(wl->hw);
}
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+ set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
}
out:
return ret;
}
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_conf *conf, u32 changed)
{
- struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
- bool is_ap;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+ int channel, ret;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
- " changed 0x%x",
- channel,
- conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
- changed);
-
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
- wl1271_tx_flush(wl);
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- /* we support configuring the channel and band while off */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_POWER))
- wl->power_level = conf->power_level;
-
- goto out;
- }
-
- is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wl->band != conf->channel->band) ||
- (wl->channel != channel))) {
+ ((wlvif->band != conf->channel->band) ||
+ (wlvif->channel != channel))) {
/* send all pending packets */
wl1271_tx_work_locked(wl);
- wl->band = conf->channel->band;
- wl->channel = channel;
+ wlvif->band = conf->channel->band;
+ wlvif->channel = channel;
if (!is_ap) {
/*
@@ -2360,24 +2456,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* possible rate for the band as a fixed rate for
* association frames and other control messages.
*/
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ wl1271_set_band_rate(wl, wlvif);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- if (wl12xx_is_roc(wl)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags)) {
+ if (wl12xx_dev_role_started(wlvif)) {
/* roaming */
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
- goto out_sleep;
+ return ret;
}
- ret = wl1271_join(wl, false);
+ ret = wl1271_join(wl, wlvif, false);
if (ret < 0)
wl1271_warning("cmd join on channel "
"failed %d", ret);
@@ -2387,66 +2486,114 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* not idle. otherwise, CROC will be called
* anyway.
*/
- if (wl12xx_is_roc(wl) &&
+ if (wl12xx_dev_role_started(wlvif) &&
!(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- goto out_sleep;
+ return ret;
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("roc failed %d",
- ret);
+ return ret;
}
}
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
- ret = wl1271_sta_handle_idle(wl,
- conf->flags & IEEE80211_CONF_IDLE);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
/*
* if mac80211 changes the PSM mode, make sure the mode is not
* incorrectly changed after the pspoll failure active window.
*/
if (changed & IEEE80211_CONF_CHANGE_PS)
- clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
if (conf->flags & IEEE80211_CONF_PS &&
- !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+ set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm enabled");
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm disabled");
- clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
- if (test_bit(WL1271_FLAG_PSM, &wl->flags))
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
}
- if (conf->power_level != wl->power_level) {
- ret = wl1271_acx_tx_power(wl, conf->power_level);
+ if (conf->power_level != wlvif->power_level) {
+ ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
- goto out_sleep;
+ return ret;
+
+ wlvif->power_level = conf->power_level;
+ }
+ return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_conf *conf = &hw->conf;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
+ channel,
+ conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
+
+ /*
+ * mac80211 will go to idle nearly immediately after transmitting some
+ * frames, such as the deauth. To make sure those frames reach the air,
+ * wait here until the TX queue is fully flushed.
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_tx_flush(wl);
+
+ mutex_lock(&wl->mutex);
+
+ /* we support configuring the channel and band even while off */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* configure each interface */
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+ if (ret < 0)
+ goto out_sleep;
}
out_sleep:
@@ -2509,6 +2656,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
{
struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2675,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ false,
+ NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
/*
@@ -2551,9 +2705,10 @@ out:
kfree(fp);
}
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 id, u8 key_type, u8 key_size,
+ const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -2568,10 +2723,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
* an existing key.
*/
for (i = 0; i < MAX_NUM_KEYS; i++) {
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- if (wl->recorded_ap_keys[i]->id == id) {
+ if (wlvif->ap.recorded_keys[i]->id == id) {
wl1271_warning("trying to record key replacement");
return -EINVAL;
}
@@ -2592,21 +2747,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
- wl->recorded_ap_keys[i] = ap_key;
+ wlvif->ap.recorded_keys[i] = ap_key;
return 0;
}
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
for (i = 0; i < MAX_NUM_KEYS; i++) {
- kfree(wl->recorded_ap_keys[i]);
- wl->recorded_ap_keys[i] = NULL;
+ kfree(wlvif->ap.recorded_keys[i]);
+ wlvif->ap.recorded_keys[i] = NULL;
}
}
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret = 0;
struct wl1271_ap_key *key;
@@ -2614,15 +2769,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
for (i = 0; i < MAX_NUM_KEYS; i++) {
u8 hlid;
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- key = wl->recorded_ap_keys[i];
+ key = wlvif->ap.recorded_keys[i];
hlid = key->hlid;
if (hlid == WL12XX_INVALID_LINK_ID)
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
- ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
@@ -2635,23 +2790,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
}
if (wep_key_added) {
- ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
- wl->ap_bcast_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+ wlvif->ap.bcast_hlid);
if (ret < 0)
goto out;
}
out:
- wl1271_free_ap_keys(wl);
+ wl1271_free_ap_keys(wl, wlvif);
return ret;
}
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
u16 tx_seq_16, struct ieee80211_sta *sta)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap) {
struct wl1271_station *wl_sta;
@@ -2661,10 +2817,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
}
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* We do not support removing keys after AP shutdown.
* Pretend we do to make mac80211 happy.
@@ -2672,12 +2828,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (action != KEY_ADD_OR_REPLACE)
return 0;
- ret = wl1271_record_ap_key(wl, id,
+ ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
} else {
- ret = wl1271_cmd_set_ap_key(wl, action,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
@@ -2718,10 +2874,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* don't remove key if hlid was already deleted */
if (action == KEY_REMOVE &&
- wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
- ret = wl1271_cmd_set_sta_key(wl, action,
+ ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
id, key_type, key_size,
key, addr, tx_seq_32,
tx_seq_16);
@@ -2731,8 +2887,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* the default WEP key needs to be configured at least once */
if (key_type == KEY_WEP) {
ret = wl12xx_cmd_set_default_wep_key(wl,
- wl->default_key,
- wl->sta_hlid);
+ wlvif->default_key,
+ wlvif->sta.hlid);
if (ret < 0)
return ret;
}
@@ -2747,6 +2903,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
@@ -2782,20 +2939,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = KEY_AES;
- key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2963,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2974,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
- ret = wl1271_set_key(wl, KEY_REMOVE,
+ ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
0, 0, sta);
@@ -2847,6 +3004,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -2874,18 +3033,18 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- /* cancel ROC before scanning */
- if (wl12xx_is_roc(wl)) {
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- /* don't allow scanning right now */
- ret = -EBUSY;
- goto out_sleep;
- }
- wl12xx_croc(wl, wl->dev_role_id);
- wl12xx_cmd_role_stop_dev(wl);
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ test_bit(wlvif->role_id, wl->roc_map)) {
+ /* don't allow scanning right now */
+ ret = -EBUSY;
+ goto out_sleep;
}
- ret = wl1271_scan(hw->priv, ssid, len, req);
+ /* cancel ROC before scanning */
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl1271_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -2921,6 +3080,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
}
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -2938,6 +3098,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_sched_scan_ies *ies)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3109,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, req, ies);
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl);
+ ret = wl1271_scan_sched_scan_start(wl, wlvif);
if (ret < 0)
goto out_sleep;
@@ -3017,6 +3178,7 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret = 0;
mutex_lock(&wl->mutex);
@@ -3030,10 +3192,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
if (ret < 0)
goto out;
- ret = wl1271_acx_rts_threshold(wl, value);
- if (ret < 0)
- wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+ if (ret < 0)
+ wl1271_warning("set rts threshold failed: %d", ret);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -3042,9 +3205,10 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
int offset)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ssid_len;
const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb->len - offset);
@@ -3060,8 +3224,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
return -EINVAL;
}
- wl->ssid_len = ssid_len;
- memcpy(wl->ssid, ptr+2, ssid_len);
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
return 0;
}
@@ -3096,18 +3260,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
skb_trim(skb, skb->len - len);
}
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
- u8 *probe_rsp_data,
- size_t probe_rsp_len,
- u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ieee80211_proberesp_get(wl->hw, vif);
+ if (!skb)
+ return -EOPNOTSUPP;
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ skb->data,
+ skb->len, 0,
+ rates);
+
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ u8 *probe_rsp_data,
+ size_t probe_rsp_len,
+ u32 rates)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
/* no need to change probe response if the SSID is set correctly */
- if (wl->ssid_len > 0)
+ if (wlvif->ssid_len > 0)
return wl1271_cmd_template_set(wl,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
@@ -3153,16 +3339,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
}
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
if (ret < 0) {
wl1271_warning("Set slot time failed %d", ret);
goto out;
@@ -3171,16 +3359,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_ENABLE);
else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_DISABLE);
if (ret < 0) {
wl1271_warning("Set ctsprotect failed %d", ret);
goto out;
@@ -3196,14 +3386,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
if ((changed & BSS_CHANGED_BEACON_INT)) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
- wl->beacon_int = bss_conf->beacon_int;
+ wlvif->beacon_int = bss_conf->beacon_int;
+ }
+
+ if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+ u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+ }
}
if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3413,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
u16 tmpl_id;
- if (!beacon)
+ if (!beacon) {
+ ret = -EINVAL;
goto out;
+ }
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
- min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3437,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
goto out;
}
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
/* remove TIM ie from probe response */
wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
@@ -3254,7 +3462,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
beacon->data,
beacon->len,
min_rate);
@@ -3264,12 +3472,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
beacon->data,
beacon->len, 0,
min_rate);
+end_bcn:
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
}
out:
+ if (ret != 0)
+ wl1271_error("beacon info change failed: %d", ret);
return ret;
}
@@ -3279,23 +3490,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if ((changed & BSS_CHANGED_BASIC_RATES)) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate = wl1271_tx_min_rate_get(wl,
- wl->basic_rate_set);
+ wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
- ret = wl1271_init_ap_rates(wl);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0) {
wl1271_error("AP rate policy change failed %d", ret);
goto out;
}
- ret = wl1271_ap_init_templates(wl);
+ ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
}
@@ -3306,38 +3518,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
if (bss_conf->enable_beacon) {
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_start_ap(wl);
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_start_ap(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_ap_init_hwenc(wl);
+ ret = wl1271_ap_init_hwenc(wl, wlvif);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
wl1271_debug(DEBUG_AP, "started AP");
}
} else {
- if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_stop_ap(wl);
+ if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+ clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+ &wlvif->flags);
wl1271_debug(DEBUG_AP, "stopped AP");
}
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3569,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool do_join = false, set_assoc = false;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
@@ -3373,14 +3588,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
if (bss_conf->ibss_joined) {
- set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+ set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
- &wl->flags)) {
- wl1271_unjoin(wl);
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+ &wlvif->flags)) {
+ wl1271_unjoin(wl, wlvif);
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3396,46 +3610,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
- if (bss_conf->enable_beacon)
- wl->set_bss_type = BSS_TYPE_IBSS;
- else
- wl->set_bss_type = BSS_TYPE_STA_BSS;
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE) {
+ ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ if (ret < 0)
+ wl1271_warning("idle mode change failed %d", ret);
+ }
+
if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
- ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
bss_conf->cqm_rssi_thold,
bss_conf->cqm_rssi_hyst);
if (ret < 0)
goto out;
- wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if ((changed & BSS_CHANGED_BSSID) &&
- /*
- * Now we know the correct bssid, so we send a new join command
- * and enable the BSSID filter
- */
- memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wl->bssid)) {
- ret = wl1271_cmd_build_null_data(wl);
+ if (changed & BSS_CHANGED_BSSID)
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_build_qos_null_data(wl);
+ ret = wl1271_build_qos_null_data(wl, vif);
if (ret < 0)
goto out;
/* Need to update the BSSID (for filtering etc) */
do_join = true;
}
- }
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
rcu_read_lock();
@@ -3459,26 +3667,28 @@ sta_not_found:
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
- wl->aid = bss_conf->aid;
+ wlvif->aid = bss_conf->aid;
set_assoc = true;
- wl->ps_poll_failures = 0;
+ wlvif->ps_poll_failures = 0;
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
*/
rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
if (sta_rate_set)
- wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
- wl->band);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
@@ -3488,53 +3698,56 @@ sta_not_found:
* updates it by itself when the first beacon is
* received after a join.
*/
- ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
/*
* Get a template for hardware connection maintenance
*/
- dev_kfree_skb(wl->probereq);
- wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
- wl1271_ssid_set(wl, wl->probereq, ieoffset);
+ wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
/* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, true);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
if (ret < 0)
goto out;
} else {
/* use defaults when not associated */
bool was_assoc =
- !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
- &wl->flags);
+ !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags);
bool was_ifup =
- !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
- &wl->flags);
- wl->aid = 0;
+ !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+ &wlvif->flags);
+ wlvif->aid = 0;
/* free probe-request template */
- dev_kfree_skb(wl->probereq);
- wl->probereq = NULL;
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
/* re-enable dynamic ps - just in case */
- ieee80211_enable_dyn_ps(wl->vif);
+ ieee80211_enable_dyn_ps(vif);
/* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
/* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, false);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
/* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
goto out;
@@ -3546,7 +3759,7 @@ sta_not_found:
* no IF_OPER_UP notification.
*/
if (!was_ifup) {
- ret = wl12xx_croc(wl, wl->role_id);
+ ret = wl12xx_croc(wl, wlvif->role_id);
if (ret < 0)
goto out;
}
@@ -3555,17 +3768,16 @@ sta_not_found:
* roaming on the same channel. until we will
* have a better flow...)
*/
- if (test_bit(wl->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
goto out;
}
- wl1271_unjoin(wl);
- if (!(conf_flags & IEEE80211_CONF_IDLE)) {
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
- }
+ wl1271_unjoin(wl, wlvif);
+ if (!(conf_flags & IEEE80211_CONF_IDLE))
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3576,27 +3788,28 @@ sta_not_found:
if (bss_conf->ibss_joined) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
/* by default, use 11b + OFDM rates */
- wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
if (bss_conf->arp_addr_cnt == 1 &&
bss_conf->arp_filter_enabled) {
@@ -3606,24 +3819,24 @@ sta_not_found:
* isn't being set (when sending), so we have to
* reconfigure the template upon every ip change.
*/
- ret = wl1271_cmd_build_arp_rsp(wl, addr);
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
goto out;
}
- ret = wl1271_acx_arp_ip_filter(wl,
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif,
ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
- ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
if (ret < 0)
goto out;
}
if (do_join) {
- ret = wl1271_join(wl, set_assoc);
+ ret = wl1271_join(wl, wlvif, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
@@ -3631,35 +3844,31 @@ sta_not_found:
/* ROC until connected (after EAPOL exchange) */
if (!is_ibss) {
- ret = wl12xx_roc(wl, wl->role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
if (ret < 0)
goto out;
- wl1271_check_operstate(wl,
+ wl1271_check_operstate(wl, wlvif,
ieee80211_get_operstate(vif));
}
/*
* stop device role if started (we might already be in
- * STA role). TODO: make it better.
+ * STA/IBSS role).
*/
- if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
/* If we want to go in PSM but we're not there yet */
- if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
- !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
enum wl1271_cmd_ps_mode mode;
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode,
- wl->basic_rate,
+ ret = wl1271_ps_set_mode(wl, wlvif, mode,
+ wlvif->basic_rate,
true);
if (ret < 0)
goto out;
@@ -3673,7 +3882,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
true,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap true failed %d",
ret);
@@ -3685,7 +3894,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
false,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap false failed %d",
ret);
@@ -3697,7 +3906,7 @@ sta_not_found:
/* Handle HT information change. Done after join. */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3924,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct wl1271 *wl = hw->priv;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3936,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3746,6 +3959,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ps_scheme;
int ret = 0;
@@ -3758,31 +3972,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- if (wl->state == WL1271_STATE_OFF) {
- /*
- * If the state is off, the parameters will be recorded and
- * configured on init. This happens in AP-mode.
- */
- struct conf_tx_ac_category *conf_ac =
- &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
- struct conf_tx_tid *conf_tid =
- &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
-
- conf_ac->ac = wl1271_tx_get_queue(queue);
- conf_ac->cw_min = (u8)params->cw_min;
- conf_ac->cw_max = params->cw_max;
- conf_ac->aifsn = params->aifs;
- conf_ac->tx_op_limit = params->txop << 5;
-
- conf_tid->queue_id = wl1271_tx_get_queue(queue);
- conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
- conf_tid->tsid = wl1271_tx_get_queue(queue);
- conf_tid->ps_scheme = ps_scheme;
- conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
- conf_tid->apsd_conf[0] = 0;
- conf_tid->apsd_conf[1] = 0;
+ if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- }
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
@@ -3792,13 +3983,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
* the txop is confed in units of 32us by the mac80211,
* we need us
*/
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4052,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static int wl1271_allocate_sta(struct wl1271 *wl,
- struct ieee80211_sta *sta,
- u8 *hlid)
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
- int id;
+ int ret;
- id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
- if (id >= AP_MAX_STATIONS) {
+
+ if (wl->active_sta_count >= AP_MAX_STATIONS) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
- set_bit(id, wl->ap_hlid_map);
- wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
- *hlid = wl_sta->hlid;
+ ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+ if (ret < 0) {
+ wl1271_warning("could not allocate HLID - too many links");
+ return -EBUSY;
+ }
+
+ set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
wl->active_sta_count++;
return 0;
}
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
- int id = hlid - WL1271_AP_STA_HLID_START;
-
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
return;
- if (!test_bit(id, wl->ap_hlid_map))
- return;
-
- clear_bit(id, wl->ap_hlid_map);
+ clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
}
@@ -3906,6 +4097,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
@@ -3914,20 +4107,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
- ret = wl1271_allocate_sta(wl, sta, &hlid);
+ ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
goto out;
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
- ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+ ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
goto out_sleep;
@@ -3944,7 +4140,7 @@ out_sleep:
out_free_sta:
if (ret < 0)
- wl1271_free_sta(wl, hlid);
+ wl1271_free_sta(wl, wlvif, hlid);
out:
mutex_unlock(&wl->mutex);
@@ -3956,6 +4152,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0, id;
@@ -3964,14 +4161,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
- id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
- if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ id = wl_sta->hlid;
+ if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4179,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- wl1271_free_sta(wl, wl_sta->hlid);
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4196,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
u8 buf_size)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
@@ -4016,10 +4214,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
goto out;
}
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- hlid = wl->sta_hlid;
- ba_bitmap = &wl->ba_rx_bitmap;
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ hlid = wlvif->sta.hlid;
+ ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+ } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4237,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!wl->ba_support || !wl->ba_allowed) {
+ if (!wlvif->ba_support || !wlvif->ba_allowed) {
ret = -ENOTSUPP;
break;
}
@@ -4108,8 +4306,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
- int i;
+ int i, ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4317,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
- wl->bitrate_masks[i] =
+ wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
i);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+ }
+out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4357,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
- mutex_unlock(&wl->mutex);
- ieee80211_chswitch_done(wl->vif, false);
- return;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+ }
+ goto out;
}
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+ /* TODO: change mac80211 to pass vif as param */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl12xx_cmd_channel_switch(wl, ch_switch);
- if (!ret)
- set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+ if (!ret)
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ }
wl1271_ps_elp_sleep(wl);
@@ -4170,10 +4394,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
/* packets are considered pending if in the TX queue or the FW */
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
- /* the above is appropriate for STA mode for PS purposes */
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
out:
mutex_unlock(&wl->mutex);
@@ -4410,6 +4630,7 @@ static const struct ieee80211_ops wl1271_ops = {
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
+ .change_interface = wl12xx_op_change_interface,
#ifdef CONFIG_PM
.suspend = wl1271_op_suspend,
.resume = wl1271_op_resume,
@@ -4604,7 +4825,7 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -4645,9 +4866,8 @@ int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
{
if (wl->state == WL1271_STATE_PLT)
__wl1271_plt_stop(wl);
@@ -4657,9 +4877,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
wl->mac80211_registered = false;
}
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4955,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
- SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ /* the FW answers probe-requests in AP-mode */
+ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wl->hw->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
+ wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
wl->hw->max_rx_aggregation_subframes = 8;
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
- struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+ BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
@@ -4765,41 +4990,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_hw_alloc;
}
- plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
- if (!plat_dev) {
- wl1271_error("could not allocate platform_device");
- ret = -ENOMEM;
- goto err_plat_alloc;
- }
-
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
+ INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
- wl->plat_dev = plat_dev;
-
- for (i = 0; i < NUM_TX_QUEUES; i++)
- skb_queue_head_init(&wl->tx_queue[i]);
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < AP_MAX_LINKS; j++)
+ for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
- INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
- INIT_WORK(&wl->rx_streaming_enable_work,
- wl1271_rx_streaming_enable_work);
- INIT_WORK(&wl->rx_streaming_disable_work,
- wl1271_rx_streaming_disable_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4808,41 +5018,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
}
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
- wl->default_key = 0;
wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
- wl->tx_security_seq = 0;
- wl->tx_security_last_seq_lsb = 0;
wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
wl->system_hlid = WL12XX_SYSTEM_HLID;
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->session_counter = 0;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
wl->active_sta_count = 0;
- setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wl);
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -4860,8 +5050,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5071,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_dummy_packet;
}
- /* Register platform device */
- ret = platform_device_register(wl->plat_dev);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto err_fwlog;
- }
- dev_set_drvdata(&wl->plat_dev->dev, wl);
-
- /* Create sysfs file to control bt coex state */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file bt_coex_state");
- goto err_platform;
- }
-
- /* Create sysfs file to get HW PG version */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file hw_pg_ver");
- goto err_bt_coex_state;
- }
-
- /* Create sysfs file for the FW log */
- ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file fwlog");
- goto err_hw_pg_ver;
- }
-
return hw;
-err_hw_pg_ver:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
- platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
- free_page((unsigned long)wl->fwlog);
-
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
@@ -4937,18 +5084,14 @@ err_wq:
err_hw:
wl1271_debugfs_exit(wl);
- kfree(plat_dev);
-
-err_plat_alloc:
ieee80211_free_hw(hw);
err_hw_alloc:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
{
/* Unblock any fwlog readers */
mutex_lock(&wl->mutex);
@@ -4956,17 +5099,15 @@ int wl1271_free_hw(struct wl1271 *wl)
wake_up_interruptible_all(&wl->fwlog_waitq);
mutex_unlock(&wl->mutex);
- device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+ device_remove_bin_file(wl->dev, &fwlog_attr);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- platform_device_unregister(wl->plat_dev);
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
- kfree(wl->plat_dev);
wl1271_debugfs_exit(wl);
@@ -4983,7 +5124,174 @@ int wl1271_free_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+ /* don't enqueue a work right now. mark it as pending */
+ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+ wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+ disable_irq_nosync(wl->irq);
+ pm_wakeup_event(wl->dev, 0);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ unsigned long irqflags;
+ int ret = -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->irq = platform_get_irq(pdev, 0);
+ wl->ref_clock = pdata->board_ref_clock;
+ wl->tcxo_clock = pdata->board_tcxo_clock;
+ wl->platform_quirks = pdata->platform_quirks;
+ wl->set_power = pdata->set_power;
+ wl->dev = &pdev->dev;
+ wl->if_ops = pdata->ops;
+
+ platform_set_drvdata(pdev, wl);
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ irqflags,
+ pdev->name, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free_hw;
+ }
+
+ ret = enable_irq_wake(wl->irq);
+ if (!ret) {
+ wl->irq_wake_enabled = true;
+ device_init_wakeup(wl->dev, 1);
+ if (pdata->pwr_in_suspend)
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+ }
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto out_irq;
+ }
+
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto out_bt_coex_state;
+ }
+
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(wl->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto out_hw_pg_ver;
+ }
+
+ return 0;
+
+out_hw_pg_ver:
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+ free_irq(wl->irq, wl);
+
+out_free_hw:
+ wl1271_free_hw(wl);
+
+out:
+ return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+
+ if (wl->irq_wake_enabled) {
+ device_init_wakeup(wl->dev, 0);
+ disable_irq_wake(wl->irq);
+ }
+ wl1271_unregister_hw(wl);
+ free_irq(wl->irq, wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+ { "wl12xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+ .probe = wl12xx_probe,
+ .remove = __devexit_p(wl12xx_remove),
+ .id_table = wl12xx_id_table,
+ .driver = {
+ .name = "wl12xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl12xx_init(void)
+{
+ return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+ platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2efd4..a2bdacdd7e1 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "debug.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,18 @@ void wl1271_elp_work(struct work_struct *work)
if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
goto out;
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
goto out;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ goto out;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ goto out;
+ }
+
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +74,20 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
+
/* we shouldn't get consecutive sleep requests */
if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
return;
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags))
- return;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return;
+ }
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +159,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
{
int ret;
@@ -152,39 +168,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- ret = wl1271_acx_wake_up_conditions(wl);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
- set_bit(WL1271_FLAG_PSM, &wl->flags);
+ set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wl->band == IEEE80211_BAND_2GHZ) {
- ret = wl1271_acx_bet_enable(wl, false);
+ if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
}
- /* disable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
- clear_bit(WL1271_FLAG_PSM, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
}
@@ -223,9 +234,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
wl1271_handle_tx_low_watermark(wl);
}
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (test_bit(hlid, &wl->ap_ps_map))
return;
@@ -235,7 +248,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
clean_queues);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for starting ps",
wl->links[hlid].addr);
@@ -253,9 +266,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
__set_bit(hlid, &wl->ap_ps_map);
}
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (!test_bit(hlid, &wl->ap_ps_map))
return;
@@ -265,7 +279,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, &wl->ap_ps_map);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for ending ps",
wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc9b62..a12052f0202 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
#include "wl12xx.h"
#include "acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#define WL1271_PS_COMPLETE_TIMEOUT 500
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f39758..df34d5977b9 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
/* Firmware image load chunk size */
-#define CHUNK_SIZE 512
+#define CHUNK_SIZE 16384
/* Firmware image header size */
#define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe9ccc..4fbd2a722ff 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
#include <linux/sched.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "rx.h"
+#include "tx.h"
#include "io.h"
static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
- bool unaligned)
+ bool unaligned, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* payload aligned to 4 bytes.
*/
memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+ *hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
- seq_num);
+ seq_num, *hlid);
skb_trim(skb, skb->len - desc->pad_len);
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
u32 mem_block;
u32 pkt_length;
u32 pkt_offset;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- bool had_data = false;
+ u8 hlid;
bool unaligned = false;
while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
- pkt_length, unaligned) == 1)
- had_data = true;
+ pkt_length, unaligned,
+ &hlid) == 1) {
+ if (hlid < WL12XX_MAX_LINKS)
+ __set_bit(hlid, active_hlids);
+ else
+ WARN(1,
+ "hlid exceeded WL12XX_MAX_LINKS "
+ "(%d)\n", hlid);
+ }
wl->rx_counter++;
drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* restart rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c671cf3..e24111ececc 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
#include <linux/ieee80211.h>
#include "wl12xx.h"
+#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
bool is_sta, is_ibss;
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
+ vif = wl->scan_vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
+ wl->scan_vif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
/* return to ROC if needed */
- is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
- is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
- if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
- (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
- !test_bit(wl->dev_role_id, wl->roc_map)) {
+ is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+ is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+ if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+ (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+ !test_bit(wlvif->dev_role_id, wl->roc_map)) {
/* restore remain on channel */
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ wl12xx_start_dev(wl, wlvif);
}
wl1271_ps_elp_sleep(wl);
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
- cmd->params.role_id = wl->role_id;
+ cmd->params.role_id = wlvif->role_id;
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.tid_trigger = 0;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
- memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
- ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie, wl->scan.req->ie_len,
- band);
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+ wl->scan.ssid_len, wl->scan.req->ie,
+ wl->scan.req->ie_len, band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -241,11 +248,12 @@ out:
return ret;
}
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
enum ieee80211_band band;
- u32 rate;
+ u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
}
}
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
/*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.ssid_len = 0;
}
+ wl->scan_vif = vif;
wl->scan.req = req;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
return 0;
}
@@ -415,18 +437,19 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_dfs);
- }
- else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ } else {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
- } else {
- channels[j].min_duration =
- cpu_to_le16(c->min_dwell_time_active);
- channels[j].max_duration =
- cpu_to_le16(c->max_dwell_time_active);
}
+
+ channels[j].min_duration =
+ cpu_to_le16(c->min_dwell_time_active);
+ channels[j].max_duration =
+ cpu_to_le16(c->max_dwell_time_active);
+
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
@@ -550,6 +573,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
* so they're used in probe requests.
*/
for (i = 0; i < req->n_ssids; i++) {
+ if (!req->ssids[i].ssid_len)
+ continue;
+
for (j = 0; j < cmd->n_ssids; j++)
if (!memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
@@ -585,6 +611,7 @@ out:
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
@@ -631,7 +658,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[0]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +670,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ],
@@ -667,17 +694,17 @@ out:
return ret;
}
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
- if (wl->bss_type != BSS_TYPE_STA_BSS)
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
- if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
@@ -727,7 +754,6 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
- wl->sched_scanning = false;
out_free:
kfree(stop);
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 92115156522..a7ed43dc08c 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
#include "wl12xx.h"
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a8980723..468a50553fa 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+struct wl12xx_sdio_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
+
static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
- sdio_claim_host(wl->if_priv);
- sdio_set_block_size(wl->if_priv, blksz);
- sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+ unsigned int blksz)
{
- struct wl1271 *wl = cookie;
- unsigned long flags;
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
- /* don't enqueue a work right now. mark it as pending */
- set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
- wl1271_debug(DEBUG_IRQ, "should not enqueue work");
- disable_irq_nosync(wl->irq);
- pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
+ sdio_claim_host(func);
+ sdio_set_block_size(func, blksz);
+ sdio_release_host(func);
}
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+ addr, len);
}
if (ret)
- wl1271_error("sdio read failed (%d)", ret);
+ dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+ addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
}
if (ret)
- wl1271_error("sdio write failed (%d)", ret);
+ dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
return ret;
}
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_disable_func(func);
sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
return ret;
}
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
if (enable)
- return wl1271_sdio_power_on(wl);
+ return wl12xx_sdio_power_on(glue);
else
- return wl1271_sdio_power_off(wl);
+ return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
+ .read = wl12xx_sdio_raw_read,
+ .write = wl12xx_sdio_raw_write,
+ .power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct ieee80211_hw *hw;
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- unsigned long irqflags;
+ struct wl12xx_platform_data *wlan_data;
+ struct wl12xx_sdio_glue *glue;
+ struct resource res[1];
mmc_pm_flag_t mmcflags;
- int ret;
+ int ret = -ENOMEM;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&func->dev, "can't allocate glue\n");
+ goto out;
+ }
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
+ glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
- wl1271_error("missing wlan platform data: %d", ret);
- goto out_free;
+ dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+ goto out_free_glue;
}
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
- wl->platform_quirks = wlan_data->platform_quirks;
+ /* if sdio can keep power while host is suspended, enable wow */
+ mmcflags = sdio_get_host_pm_caps(func);
+ dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
+ if (mmcflags & MMC_PM_KEEP_POWER)
+ wlan_data->pwr_in_suspend = true;
+
+ wlan_data->ops = &sdio_ops;
- ret = enable_irq_wake(wl->irq);
- if (!ret) {
- wl->irq_wake_enabled = true;
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+ sdio_set_drvdata(func, glue);
- /* if sdio can keep power while host is suspended, enable wow */
- mmcflags = sdio_get_host_pm_caps(func);
- wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
- if (mmcflags & MMC_PM_KEEP_POWER)
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- disable_irq(wl->irq);
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ glue->core->dev.parent = &func->dev;
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ memset(res, 0x00, sizeof(res));
- sdio_set_drvdata(func, wl);
+ res[0].start = wlan_data->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- /* Tell PM core that we don't need the card to be powered now */
- pm_runtime_put_noidle(&func->dev);
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
+ }
+ ret = platform_device_add_data(glue->core, wlan_data,
+ sizeof(*wlan_data));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
+
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't add platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
+out_dev_put:
+ platform_device_put(glue->core);
- out_free:
- wl1271_free_hw(wl);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static void __devexit wl1271_remove(struct sdio_func *func)
{
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- wl1271_unregister_hw(wl);
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
- disable_irq_wake(wl->irq);
- }
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
}
#ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
- wl->wow_enabled);
+ dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+ wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- wl1271_error("can't keep power while host "
- "is suspended");
+ dev_err(dev, "can't keep power while host "
+ "is suspended\n");
ret = -EINVAL;
goto out;
}
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
- wl1271_error("error while trying to keep power");
+ dev_err(dev, "error while trying to keep power\n");
goto out;
}
@@ -367,9 +325,10 @@ out:
static int wl1271_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
- wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+ dev_dbg(dev, "wl1271 resume\n");
if (wl->wow_enabled) {
/* claim back host */
sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9212e..00000000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
- "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
- "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
- struct wl1271 wl;
- struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
- {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- if (fixed)
- ret = sdio_readsb(func, buf, addr, len);
- else
- ret = sdio_memcpy_fromio(func, buf, addr, len);
-
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
- }
-
- if (ret)
- wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
- if (fixed)
- ret = sdio_writesb(func, addr, buf, len);
- else
- ret = sdio_memcpy_toio(func, addr, buf, len);
- }
- if (ret)
- wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
- struct sdio_func *func = wl_to_func(wl);
- int ret;
-
- /* Let the SDIO stack handle wlan_enable control, so we
- * keep host claimed while wlan is in use to keep wl1271
- * alive.
- */
- if (enable) {
- /* Power up the card */
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
-
- /* Runtime PM might be disabled, power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
- goto out;
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- } else {
- sdio_disable_func(func);
- sdio_release_host(func);
-
- /* Runtime PM might be disabled, power off the card manually */
- ret = mmc_power_save_host(func->card->host);
- if (ret < 0)
- goto out;
-
- /* Power down the card */
- ret = pm_runtime_put_sync(&func->dev);
- }
-
-out:
- return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
- u32 elp_reg;
-
- elp_reg = ELPCTRL_WAKE_UP;
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = request_firmware(&fw, WL128X_FW_NAME,
- wl1271_wl_to_dev(wl));
- else
- ret = request_firmware(&fw, WL127X_FW_NAME,
- wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
- return ret;
- }
-
- if (fw->size % 4) {
- wl1271_error("firmware size is not multiple of 32 bits: %zu",
- fw->size);
- ret = -EILSEQ;
- goto out;
- }
-
- wl->fw_len = fw->size;
- wl->fw = vmalloc(wl->fw_len);
-
- if (!wl->fw) {
- wl1271_error("could not allocate memory for the firmware");
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(wl->fw, fw->data, wl->fw_len);
-
- ret = 0;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
- return ret;
- }
-
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
-
- wl->nvs_len = fw->size;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
- struct wl1271_partition_set partition;
- int ret;
-
- msleep(WL1271_PRE_POWER_ON_SLEEP);
- ret = wl1271_power_on(wl);
- if (ret)
- return ret;
-
- msleep(WL1271_POWER_ON_SLEEP);
-
- /* We don't need a real memory partition here, because we only want
- * to use the registers at this point. */
- memset(&partition, 0, sizeof(partition));
- partition.reg.start = REGISTERS_BASE;
- partition.reg.size = REGISTERS_DOWN_SIZE;
- wl1271_set_partition(wl, &partition);
-
- /* ELP module wake up */
- wl1271_fw_wakeup(wl);
-
- /* whal_FwCtrl_BootSm() */
-
- /* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
- /* 1. check if chip id is valid */
-
- switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
- wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
- wl->chip.id);
- break;
- case CHIP_ID_1271_PG20:
- wl1271_notice("chip id 0x%x (1271 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG20:
- wl1271_notice("chip id 0x%x (1283 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG10:
- default:
- wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
- return -ENODEV;
- }
-
- return ret;
-}
-
-static struct wl1271_partition_set part_down = {
- .mem = {
- .start = 0x00000000,
- .size = 0x000177c0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x00008800
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
-};
-
-static int tester(void *data)
-{
- struct wl1271 *wl = data;
- struct sdio_func *func = wl_to_func(wl);
- struct device *pdev = &func->dev;
- int ret = 0;
- bool rx_started = 0;
- bool tx_started = 0;
- uint8_t *tx_buf, *rx_buf;
- int test_size = PAGE_SIZE;
- u32 addr = 0;
- struct wl1271_partition_set partition;
-
- /* We assume chip is powered up and firmware fetched */
-
- memcpy(&partition, &part_down, sizeof(partition));
- partition.mem.start = addr;
- wl1271_set_partition(wl, &partition);
-
- tx_buf = kmalloc(test_size, GFP_KERNEL);
- rx_buf = kmalloc(test_size, GFP_KERNEL);
- if (!tx_buf || !rx_buf) {
- dev_err(pdev,
- "Could not allocate memory. Test will not run.\n");
- ret = -ENOMEM;
- goto free;
- }
-
- memset(tx_buf, 0x5a, test_size);
-
- /* write something in data area so we can read it back */
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- while (!kthread_should_stop()) {
- if (rx && !rx_started) {
- dev_info(pdev, "starting rx test\n");
- rx_started = 1;
- } else if (!rx && rx_started) {
- dev_info(pdev, "stopping rx test\n");
- rx_started = 0;
- }
-
- if (tx && !tx_started) {
- dev_info(pdev, "starting tx test\n");
- tx_started = 1;
- } else if (!tx && tx_started) {
- dev_info(pdev, "stopping tx test\n");
- tx_started = 0;
- }
-
- if (rx_started)
- wl1271_read(wl, addr, rx_buf, test_size, false);
-
- if (tx_started)
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- if (!rx_started && !tx_started)
- msleep(100);
- }
-
-free:
- kfree(tx_buf);
- kfree(rx_buf);
- return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- struct wl1271_test *wl_test;
- int ret = 0;
-
- /* wl1271 has 2 sdio functions we handle just the wlan part */
- if (func->num != 0x02)
- return -ENODEV;
-
- wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
- if (!wl_test) {
- dev_err(&func->dev, "Could not allocate memory\n");
- return -ENOMEM;
- }
-
- wl = &wl_test->wl;
-
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
-
- /* Grab access to FN0 for ELP reg. */
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
- /* Use block mode for transferring over one block size of data */
- func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
- dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
- goto out_free;
- }
-
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
- sdio_set_drvdata(func, wl_test);
-
- /* power up the device */
- ret = wl1271_chip_wakeup(wl);
- if (ret) {
- dev_err(&func->dev, "could not wake up chip\n");
- goto out_free;
- }
-
- if (wl->fw == NULL) {
- ret = wl1271_fetch_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware fetch error\n");
- goto out_off;
- }
- }
-
- /* fetch NVS */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0) {
- dev_err(&func->dev, "NVS fetch error\n");
- goto out_off;
- }
- }
-
- ret = wl1271_load_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware load error: %d\n", ret);
- goto out_free;
- }
-
- dev_info(&func->dev, "initialized\n");
-
- /* I/O testing will be done in the tester thread */
-
- wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
- if (IS_ERR(wl_test->test_task)) {
- dev_err(&func->dev, "unable to create kernel thread\n");
- ret = PTR_ERR(wl_test->test_task);
- goto out_free;
- }
-
- return 0;
-
-out_off:
- /* power off the chip */
- wl1271_power_off(wl);
-
-out_free:
- kfree(wl_test);
- return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
- struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
- /* stop the I/O test thread */
- kthread_stop(wl_test->test_task);
-
- /* power off the chip */
- wl1271_power_off(&wl_test->wl);
-
- vfree(wl_test->wl.fw);
- wl_test->wl.fw = NULL;
- kfree(wl_test->wl.nvs);
- wl_test->wl.nvs = NULL;
-
- kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
- .name = "wl12xx_sdio_test",
- .id_table = wl1271_devices,
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&wl1271_sdio_driver);
- if (ret < 0)
- pr_err("failed to register sdio driver: %d\n", ret);
-
- return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
- sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f971867786..92caa7ce605 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl12xx.h"
@@ -69,35 +70,22 @@
#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi reset");
+ dev_err(child->parent,
+ "could not allocate cmd for spi reset\n");
return;
}
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
- wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi init");
+ dev_err(child->parent,
+ "could not allocate cmd for spi init\n");
return;
}
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
- wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+ spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- wl1271_error("SPI read busy-word timeout!\n");
+ dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
- wl1271_spi_read_busy(wl)) {
+ wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return;
}
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
-
- wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
}
}
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+ size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
- wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
if (!fixed)
addr += chunk_len;
buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
cmd++;
}
- spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
- struct wl1271 *wl = cookie;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
- if (wl->set_power)
- wl->set_power(enable);
-
- return 0;
+ spi_sync(to_spi_device(glue->dev), &m);
}
static struct wl1271_if_operations spi_ops = {
- .read = wl1271_spi_raw_read,
- .write = wl1271_spi_raw_write,
- .reset = wl1271_spi_reset,
- .init = wl1271_spi_init,
- .power = wl1271_spi_set_power,
- .dev = wl1271_spi_wl_to_dev,
- .enable_irq = wl1271_spi_enable_interrupts,
- .disable_irq = wl1271_spi_disable_interrupts,
+ .read = wl12xx_spi_raw_read,
+ .write = wl12xx_spi_raw_write,
+ .reset = wl12xx_spi_reset,
+ .init = wl12xx_spi_init,
.set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
{
+ struct wl12xx_spi_glue *glue;
struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- unsigned long irqflags;
- int ret;
+ struct resource res[1];
+ int ret = -ENOMEM;
pdata = spi->dev.platform_data;
if (!pdata) {
- wl1271_error("no platform data");
+ dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ pdata->ops = &spi_ops;
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&spi->dev, "can't allocate glue\n");
+ goto out;
+ }
- dev_set_drvdata(&spi->dev, wl);
- wl->if_priv = spi;
+ glue->dev = &spi->dev;
- wl->if_ops = &spi_ops;
+ spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
+ dev_err(glue->dev, "spi_setup failed\n");
+ goto out_free_glue;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device\n");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
- wl->platform_quirks = pdata->platform_quirks;
+ glue->core->dev.parent = &spi->dev;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ memset(res, 0x00, sizeof(res));
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
+ res[0].start = spi->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
}
- disable_irq(wl->irq);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't register platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
- wl1271_free_hw(wl);
+out_dev_put:
+ platform_device_put(glue->core);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static int __devexit wl1271_remove(struct spi_device *spi)
{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+ struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- wl1271_unregister_hw(wl);
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
return 0;
}
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8effaee2..25093c0cb0e 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
#include <net/genetlink.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
+#include "ps.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
return -EMSGSIZE;
mutex_lock(&wl->mutex);
- ret = wl1271_cmd_test(wl, buf, buf_len, answer);
- mutex_unlock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
wl1271_warning("testmode cmd test failed: %d", ret);
- return ret;
+ goto out_sleep;
}
if (answer) {
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
- return ret;
+ goto out_sleep;
}
- return 0;
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
- mutex_lock(&wl->mutex);
ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
- mutex_unlock(&wl->mutex);
-
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
- kfree(cmd);
- return ret;
+ goto out_free;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
if (!skb) {
- kfree(cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free;
}
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ goto out_free;
+
+out_free:
+ kfree(cmd);
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29d49b..4508ccd7832 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
#include <linux/etherdevice.h>
#include "wl12xx.h"
+#include "debug.h"
#include "io.h"
#include "reg.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 id)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
ret = wl12xx_cmd_set_default_wep_key(wl, id,
- wl->ap_bcast_hlid);
+ wlvif->ap.bcast_hlid);
else
- ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
if (ret < 0)
return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
static int wl1271_tx_update_filters(struct wl1271 *wl,
- struct sk_buff *skb)
+ struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
if (!ieee80211_is_auth(hdr->frame_control))
return 0;
- if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+ if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
goto out;
wl1271_debug(DEBUG_CMD, "starting device role for roaming");
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
}
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid)
{
bool fw_ps, single_sta;
u8 tx_pkts;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
return;
- if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
* case FW-memory congestion is not a problem.
*/
if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
return wl->dummy_packet == skb;
}
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr;
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_mgmt(hdr->frame_control))
- return wl->ap_global_hlid;
+ return wlvif->ap.global_hlid;
else
- return wl->ap_bcast_hlid;
+ return wlvif->ap.bcast_hlid;
}
}
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl12xx_tx_get_hlid_ap(wl, skb);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
- wl1271_tx_update_filters(wl, skb);
+ wl1271_tx_update_filters(wl, wlvif, skb);
- if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+ if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
!ieee80211_is_auth(hdr->frame_control) &&
!ieee80211_is_assoc_req(hdr->frame_control))
- return wl->sta_hlid;
+ return wlvif->sta.hlid;
else
- return wl->dev_hlid;
+ return wlvif->dev_hlid;
}
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra, u32 buf_offset,
+ u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_blocks;
int id, ret = -EBUSY, ac;
u32 spare_blocks = wl->tx_spare_blocks;
+ bool is_dummy = false;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
len = wl12xx_calc_packet_alignment(wl, total_len);
/* in case of a dummy packet, use default amount of spare mem blocks */
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+ if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+ is_dummy = true;
spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ }
total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (wl->bss_type == BSS_TYPE_AP_BSS &&
- hlid >= WL1271_AP_STA_HLID_START)
+ if (!is_dummy && wlvif &&
+ wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control,
- u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra,
+ struct ieee80211_tx_info *control, u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int aligned_len, ac, rate_idx;
s64 hosttime;
- u16 tx_attr;
+ u16 tx_attr = 0;
+ bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+ if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
else
desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = skb->priority;
- if (wl12xx_is_dummy_packet(wl, skb)) {
+ if (is_dummy) {
/*
* FW expects the dummy packet to have an invalid session id -
* any session id that is different than the one set in the join
*/
- tx_attr = ((~wl->session_counter) <<
+ tx_attr = (SESSION_COUNTER_INVALID <<
TX_HW_ATTR_OFST_SESSION_COUNTER) &
TX_HW_ATTR_SESSION_COUNTER;
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
- } else {
+ } else if (wlvif) {
/* configure the tx attributes */
- tx_attr =
- wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = wlvif->session_counter <<
+ TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
-
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (is_dummy || !wlvif)
+ rate_idx = 0;
+ else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
/* if the packets are destined for AP (have a STA entry)
send them with AP rate policies, otherwise use default
basic rates */
- if (control->control.sta)
- rate_idx = ACX_TX_AP_FULL_RATE;
+ if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ rate_idx = wlvif->sta.p2p_rate_idx;
+ else if (control->control.sta)
+ rate_idx = wlvif->sta.ap_rate_idx;
else
- rate_idx = ACX_TX_BASIC_RATE;
+ rate_idx = wlvif->sta.basic_rate_idx;
} else {
- if (hlid == wl->ap_global_hlid)
- rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
- else if (hlid == wl->ap_bcast_hlid)
- rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ if (hlid == wlvif->ap.global_hlid)
+ rate_idx = wlvif->ap.mgmt_rate_idx;
+ else if (hlid == wlvif->ap.bcast_hlid)
+ rate_idx = wlvif->ap.bcast_rate_idx;
else
- rate_idx = ac;
+ rate_idx = wlvif->ap.ucast_rate_idx[ac];
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
- u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 buf_offset)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
u8 hlid;
+ bool is_dummy;
if (!skb)
return -EINVAL;
info = IEEE80211_SKB_CB(skb);
+ /* TODO: handle dummy packets on multi-vifs */
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (unlikely(is_wep && wl->default_key != idx)) {
- ret = wl1271_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wlvif->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
- wl->default_key = idx;
+ wlvif->default_key = idx;
}
}
-
- hlid = wl1271_tx_get_hlid(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+ wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
- wl1271_tx_regulate_link(wl, hlid);
+ wl1271_tx_regulate_link(wl, wlvif, hlid);
}
/*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
/* Revert side effects in the dummy packet skb, so it can be reused */
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (is_dummy)
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
return &queues[q];
}
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
unsigned long flags;
struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, wl->tx_queue);
+ queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue)
- goto out;
+ return NULL;
skb = skb_dequeue(queue);
-
-out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
return skb;
}
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
- unsigned long flags;
int i, h, start_hlid;
- struct sk_buff_head *queue;
/* start from the link after the last one */
- start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < AP_MAX_LINKS; i++) {
- h = (start_hlid + i) % AP_MAX_LINKS;
+ for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+ h = (start_hlid + i) % WL12XX_MAX_LINKS;
/* only consider connected stations */
- if (h >= WL1271_AP_STA_HLID_START &&
- !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+ if (!test_bit(h, wlvif->links_map))
continue;
- queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
- if (!queue)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ if (!skb)
continue;
- skb = skb_dequeue(queue);
- if (skb)
- break;
+ wlvif->last_tx_hlid = h;
+ break;
}
- if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wl->last_tx_hlid = h;
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count[q]--;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- } else {
- wl->last_tx_hlid = 0;
- }
+ if (!skb)
+ wlvif->last_tx_hlid = 0;
return skb;
}
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
{
unsigned long flags;
+ struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- skb = wl1271_ap_skb_dequeue(wl);
- else
- skb = wl1271_sta_skb_dequeue(wl);
+ if (wlvif) {
+ wl12xx_for_each_wlvif_continue(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ /* do another pass */
+ if (!skb) {
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
return skb;
}
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(wl, skb);
+ } else {
+ u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
- } else {
- skb_queue_head(&wl->tx_queue[q], skb);
+ wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+ WL12XX_MAX_LINKS;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
return ieee80211_is_data_present(hdr->frame_control);
}
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+ struct wl12xx_vif *wlvif;
+ u32 timeout;
+ u8 hlid;
+
+ if (!wl->conf.rx_streaming.interval)
+ return;
+
+ if (!wl->conf.rx_streaming.always &&
+ !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+ return;
+
+ timeout = wl->conf.rx_streaming.duration;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ bool found = false;
+ for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ if (test_bit(hlid, wlvif->links_map)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue;
+
+ /* enable rx streaming */
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+ ieee80211_queue_work(wl->hw,
+ &wlvif->rx_streaming_enable_work);
+
+ mod_timer(&wlvif->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
void wl1271_tx_work_locked(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
+ struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0;
bool sent_packets = false;
- bool had_data = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
int ret;
if (unlikely(wl->state == WL1271_STATE_OFF))
return;
while ((skb = wl1271_skb_dequeue(wl))) {
- if (wl1271_tx_is_data_present(skb))
- had_data = true;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool has_data = false;
- ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+ wlvif = NULL;
+ if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ wlvif = wl12xx_vif_to_data(info->control.vif);
+
+ has_data = wlvif && wl1271_tx_is_data_present(skb);
+ ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
- dev_kfree_skb(skb);
+ if (wl12xx_is_dummy_packet(wl, skb))
+ /*
+ * fw still expects dummy packet,
+ * so re-enqueue it
+ */
+ wl1271_skb_queue_head(wl, wlvif, skb);
+ else
+ ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
buf_offset += ret;
wl->tx_packets_count++;
+ if (has_data) {
+ desc = (struct wl1271_tx_hw_descr *) skb->data;
+ __set_bit(desc->hlid, active_hlids);
+ }
}
out_ack:
@@ -701,19 +775,7 @@ out_ack:
wl1271_handle_tx_low_watermark(wl);
}
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* enable rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
struct ieee80211_tx_info *info;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
int id = result->id;
int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
return;
}
+ /* info->control is valid as long as we don't update info->status */
+ vif = info->control.vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+ rate = wl1271_rate_to_idx(result->rate_class_index,
+ wlvif->band);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
u8 fw_lsb = result->tx_security_sequence_number_lsb;
- u8 cur_lsb = wl->tx_security_last_seq_lsb;
+ u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
/*
* update security sequence number, taking care of potential
* wrap-around
*/
- wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
- wl->tx_security_last_seq_lsb = fw_lsb;
+ wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+ wlvif->tx_security_last_seq_lsb = fw_lsb;
}
/* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- for (i = 0; i < AP_MAX_LINKS; i++) {
- wl1271_free_sta(wl, i);
- wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
- }
-
- wl->last_tx_hlid = 0;
- } else {
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
- skb);
-
- if (!wl12xx_is_dummy_packet(wl, skb)) {
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- }
- }
+ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_free_sta(wl, wlvif, i);
+ else
+ wlvif->sta.ba_rx_bitmap = 0;
- wl->ba_rx_bitmap = 0;
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
+ wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09adf08..2dbb24e6d54 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc7505..b2b09cd0202 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
-
-enum {
- DEBUG_NONE = 0,
- DEBUG_IRQ = BIT(0),
- DEBUG_SPI = BIT(1),
- DEBUG_BOOT = BIT(2),
- DEBUG_MAILBOX = BIT(3),
- DEBUG_TESTMODE = BIT(4),
- DEBUG_EVENT = BIT(5),
- DEBUG_TX = BIT(6),
- DEBUG_RX = BIT(7),
- DEBUG_SCAN = BIT(8),
- DEBUG_CRYPT = BIT(9),
- DEBUG_PSM = BIT(10),
- DEBUG_MAC80211 = BIT(11),
- DEBUG_CMD = BIT(12),
- DEBUG_ACX = BIT(13),
- DEBUG_SDIO = BIT(14),
- DEBUG_FILTERS = BIT(15),
- DEBUG_ADHOC = BIT(16),
- DEBUG_AP = BIT(17),
- DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
- DEBUG_ALL = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
- pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
- pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
- do { \
- if (level & wl12xx_debug_level) \
- pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
- } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- 0); \
- } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- true); \
- } while (0)
-
#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
@@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level;
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+#define WL12XX_MAX_RATE_POLICIES 16
+
/* Defined by FW as 0. Will not be freed or allocated. */
#define WL12XX_SYSTEM_HLID 0
/*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START 3
-
-/*
* When in AP-mode, we allow (at least) this number of packets
* to be transmitted to FW for a STA in PS-mode. Only when packets are
* present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@ struct wl1271_stats {
#define AP_MAX_STATIONS 8
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
/* FW status registers */
struct wl12xx_fw_status {
__le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*read)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*write)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*reset)(struct wl1271 *wl);
- void (*init)(struct wl1271 *wl);
- int (*power)(struct wl1271 *wl, bool enable);
- struct device* (*dev)(struct wl1271 *wl);
- void (*enable_irq)(struct wl1271 *wl);
- void (*disable_irq)(struct wl1271 *wl);
- void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+ void (*reset)(struct device *child);
+ void (*init)(struct device *child);
+ int (*power)(struct device *child, bool enable);
+ void (*set_block_size) (struct device *child, unsigned int blksz);
};
#define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
};
enum wl12xx_flags {
- WL1271_FLAG_STA_ASSOCIATED,
- WL1271_FLAG_IBSS_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_ELP_REQUESTED,
- WL1271_FLAG_PSM,
- WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
- WL1271_FLAG_IDLE,
- WL1271_FLAG_PSPOLL_FAILURE,
- WL1271_FLAG_STA_STATE_SENT,
WL1271_FLAG_FW_TX_BUSY,
- WL1271_FLAG_AP_STARTED,
- WL1271_FLAG_IF_INITIALIZED,
WL1271_FLAG_DUMMY_PACKET_PENDING,
WL1271_FLAG_SUSPENDED,
WL1271_FLAG_PENDING_WORK,
WL1271_FLAG_SOFT_GEMINI,
- WL1271_FLAG_RX_STREAMING_STARTED,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
- WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+ WLVIF_FLAG_INITIALIZED,
+ WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_IBSS_JOINED,
+ WLVIF_FLAG_AP_STARTED,
+ WLVIF_FLAG_PSM,
+ WLVIF_FLAG_PSM_REQUESTED,
+ WLVIF_FLAG_STA_STATE_SENT,
+ WLVIF_FLAG_RX_STREAMING_STARTED,
+ WLVIF_FLAG_PSPOLL_FAILURE,
+ WLVIF_FLAG_CS_PROGRESS,
+ WLVIF_FLAG_AP_PROBE_RESP_SET,
+ WLVIF_FLAG_IN_USE,
};
struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
};
struct wl1271 {
- struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
+ struct device *dev;
+
void *if_priv;
struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
s8 hw_pg_ver;
- u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
- u8 bss_type;
- u8 set_bss_type;
- u8 p2p; /* we are using p2p role */
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
int channel;
- u8 role_id;
- u8 dev_role_id;
u8 system_hlid;
- u8 sta_hlid;
- u8 dev_hlid;
- u8 ap_global_hlid;
- u8 ap_bcast_hlid;
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+ unsigned long rate_policies_map[
+ BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+ struct list_head wlvif_list;
+
+ u8 sta_count;
+ u8 ap_count;
struct wl1271_acx_mem_map *target_mem_map;
@@ -440,11 +349,7 @@ struct wl1271 {
/* Time-offset between host and chipset clocks */
s64 time_offset;
- /* Session counter for the chipset */
- int session_counter;
-
/* Frames scheduled for transmission, not handled yet */
- struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map;
@@ -462,17 +367,6 @@ struct wl1271 {
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
- /*
- * Security sequence number
- * bits 0-15: lower 16 bits part of sequence number
- * bits 16-47: higher 32 bits part of sequence number
- * bits 48-63: not in use
- */
- u64 tx_security_seq;
-
- /* 8 bits of the last sequence number in use */
- u8 tx_security_last_seq_lsb;
-
/* FW Rx counter */
u32 rx_counter;
@@ -507,59 +401,21 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
+ struct ieee80211_vif *scan_vif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
bool sched_scanning;
- /* probe-req template for the current AP */
- struct sk_buff *probereq;
-
- /* Our association ID */
- u16 aid;
-
- /*
- * currently configured rate set:
- * bits 0-15 - 802.11abg rates
- * bits 16-23 - 802.11n MCS index mask
- * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
- */
- u32 basic_rate_set;
- u32 basic_rate;
- u32 rate_set;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
/* The current band */
enum ieee80211_band band;
- /* Beaconing interval (needed for ad-hoc) */
- u32 beacon_int;
-
- /* Default key (for WEP) */
- u32 default_key;
-
- /* Rx Streaming */
- struct work_struct rx_streaming_enable_work;
- struct work_struct rx_streaming_disable_work;
- struct timer_list rx_streaming_timer;
-
struct completion *elp_compl;
- struct completion *ps_compl;
struct delayed_work elp_work;
- struct delayed_work pspoll_work;
-
- /* counter for ps-poll delivery failures */
- int ps_poll_failures;
-
- /* retry counter for PSM entries */
- u8 psm_entry_retry;
/* in dBm */
int power_level;
- int rssi_thold;
- int last_rssi_event;
-
struct wl1271_stats stats;
__le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
- /* map for HLIDs of associated stations - when operating in AP mode */
- unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
- /* recoreded keys for AP-mode - set here before AP startup */
- struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
/* bands supported by this instance of wl12xx */
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- /* RX BA constraint value */
- bool ba_support;
- u8 ba_rx_bitmap;
- bool ba_allowed;
-
int tcxo_clock;
/*
@@ -610,10 +455,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[AP_MAX_LINKS];
-
- /* the hlid of the link where the last transmitted skb came from */
- int last_tx_hlid;
+ struct wl1271_link links[WL12XX_MAX_LINKS];
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+
+ /* last wlvif we transmitted from */
+ struct wl12xx_vif *last_wlvif;
};
struct wl1271_station {
u8 hlid;
};
+struct wl12xx_vif {
+ struct wl1271 *wl;
+ struct list_head list;
+ unsigned long flags;
+ u8 bss_type;
+ u8 p2p; /* we are using p2p role */
+ u8 role_id;
+
+ /* sta/ibss specific */
+ u8 dev_role_id;
+ u8 dev_hlid;
+
+ union {
+ struct {
+ u8 hlid;
+ u8 ba_rx_bitmap;
+
+ u8 basic_rate_idx;
+ u8 ap_rate_idx;
+ u8 p2p_rate_idx;
+ } sta;
+ struct {
+ u8 global_hlid;
+ u8 bcast_hlid;
+
+ /* HLIDs bitmap of associated stations */
+ unsigned long sta_hlid_map[BITS_TO_LONGS(
+ WL12XX_MAX_LINKS)];
+
+ /* recoreded keys - set here before AP startup */
+ struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+ u8 mgmt_rate_idx;
+ u8 bcast_rate_idx;
+ u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+ } ap;
+ };
+
+ /* the hlid of the last transmitted skb */
+ int last_tx_hlid;
+
+ unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+ u8 ssid_len;
+
+ /* The current band */
+ enum ieee80211_band band;
+ int channel;
+
+ u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 basic_rate_set;
+
+ /*
+ * currently configured rate set:
+ * bits 0-15 - 802.11abg rates
+ * bits 16-23 - 802.11n MCS index mask
+ * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+ */
+ u32 basic_rate;
+ u32 rate_set;
+
+ /* probe-req template for the current AP */
+ struct sk_buff *probereq;
+
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
+ /* Default key (for WEP) */
+ u32 default_key;
+
+ /* Our association ID */
+ u16 aid;
+
+ /* Session counter for the chipset */
+ int session_counter;
+
+ struct completion *ps_compl;
+ struct delayed_work pspoll_work;
+
+ /* counter for ps-poll delivery failures */
+ int ps_poll_failures;
+
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
+ /* in dBm */
+ int power_level;
+
+ int rssi_thold;
+ int last_rssi_event;
+
+ /* RX BA constraint value */
+ bool ba_support;
+ bool ba_allowed;
+
+ /* Rx Streaming */
+ struct work_struct rx_streaming_enable_work;
+ struct work_struct rx_streaming_disable_work;
+ struct timer_list rx_streaming_timer;
+
+ /*
+ * This struct must be last!
+ * data that has to be saved acrossed reconfigs (e.g. recovery)
+ * should be declared in this struct.
+ */
+ struct {
+ u8 persistent[0];
+ /*
+ * Security sequence number
+ * bits 0-15: lower 16 bits part of sequence number
+ * bits 16-47: higher 32 bits part of sequence number
+ * bits 48-63: not in use
+ */
+ u64 tx_security_seq;
+
+ /* 8 bits of the last sequence number in use */
+ u8 tx_security_last_seq_lsb;
+ };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+ return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+ return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+ list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+ list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \
+ wl12xx_for_each_wlvif(wl, wlvif) \
+ if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
int wl1271_plt_start(struct wl1271 *wl);
int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
#define WL1271_DEFAULT_POWER_LEVEL 0
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
/* Older firmwares did not implement the FW logger over bus feature */
#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3b089..8f0ffaf6230 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
u8 ta[ETH_ALEN];
} __packed;
-struct wl12xx_qos_null_data_template {
- struct ieee80211_header header;
- __le16 qos_ctl;
-} __packed;
-
struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b11060a8..998e95895f9 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -1,8 +1,29 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/wl12xx.h>
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
@@ -18,7 +39,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
return 0;
}
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
if (!platform_data)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 8efa2f2d957..a66b93b7ff9 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1907,15 +1907,4 @@ static struct usb_driver zd1201_usb = {
.resume = zd1201_resume,
};
-static int __init zd1201_init(void)
-{
- return usb_register(&zd1201_usb);
-}
-
-static void __exit zd1201_cleanup(void)
-{
- usb_deregister(&zd1201_usb);
-}
-
-module_init(zd1201_init);
-module_exit(zd1201_cleanup);
+module_usb_driver(zd1201_usb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182562952c7..b7d41f8c338 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
@@ -222,7 +223,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
}
-static struct ethtool_ops xenvif_ethtool_ops = {
+static const struct ethtool_ops xenvif_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
@@ -230,7 +231,7 @@ static struct ethtool_ops xenvif_ethtool_ops = {
.get_strings = xenvif_get_strings,
};
-static struct net_device_ops xenvif_netdev_ops = {
+static const struct net_device_ops xenvif_netdev_ops = {
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c8609..59effac15f3 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
- * These variables a used iff get_page_ext returns true,
+ * These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
if (!page)
return NULL;
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
@@ -1021,7 +1019,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
continue;
}
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
@@ -1638,7 +1634,7 @@ static int __init netback_init(void)
int rc = 0;
int group;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
xen_netbk_group_nr = num_online_cpus();
@@ -1668,7 +1664,7 @@ static int __init netback_init(void)
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1ce729d6af7..410018c4c52 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -474,17 +474,14 @@ static const struct xenbus_device_id netback_ids[] = {
};
-static struct xenbus_driver netback = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netback_ids,
+static DEFINE_XENBUS_DRIVER(netback, ,
.probe = netback_probe,
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
-};
+);
int xenvif_xenbus_init(void)
{
- return xenbus_register_backend(&netback);
+ return xenbus_register_backend(&netback_driver);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 226faab2360..fa679057630 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
#define xennet_sysfs_delif(dev) do { } while (0)
#endif
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
{
return dev->features & NETIF_F_SG;
}
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
gnttab_free_grant_references(np->gref_rx_head);
}
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateConnected:
netif_notify_peers(netdev);
break;
@@ -1910,7 +1914,7 @@ static void xennet_sysfs_delif(struct net_device *netdev)
#endif /* CONFIG_SYSFS */
-static struct xenbus_device_id netfront_ids[] = {
+static const struct xenbus_device_id netfront_ids[] = {
{ "vif" },
{ "" }
};
@@ -1937,15 +1941,12 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
return 0;
}
-static struct xenbus_driver netfront_driver = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netfront_ids,
+static DEFINE_XENBUS_DRIVER(netfront, ,
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
.otherend_changed = netback_changed,
-};
+);
static int __init netif_init(void)
{
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 7bcb1febef0..1a1500bc845 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -231,6 +232,26 @@ struct pn533_cmd_activate_response {
u8 gt[];
} __packed;
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+ u8 active;
+ u8 baud;
+ u8 next;
+ u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+ u8 status;
+ u8 tg;
+ u8 nfcid3t[10];
+ u8 didt;
+ u8 bst;
+ u8 brt;
+ u8 to;
+ u8 ppt;
+ /* optional */
+ u8 gt[];
+} __packed;
struct pn533 {
struct usb_device *udev;
@@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
{
struct pn533_cmd_activate_param param;
struct pn533_cmd_activate_response *resp;
+ u16 gt_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
if (rc != PN533_CMD_RET_SUCCESS)
return -EIO;
- return 0;
+ /* ATR_RES general bytes are located at offset 16 */
+ gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+ return rc;
}
static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
return;
}
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_jump_dep *cmd;
+ struct pn533_cmd_jump_dep_response *resp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (params_len == -ENOENT) {
+ nfc_dev_dbg(&dev->interface->dev, "");
+ return 0;
+ }
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when bringing DEP link up",
+ params_len);
+ return 0;
+ }
+
+ if (dev->tgt_available_prots &&
+ !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+ nfc_dev_err(&dev->interface->dev,
+ "The target does not support DEP");
+ return -EINVAL;
+ }
+
+ resp = (struct pn533_cmd_jump_dep_response *) params;
+ cmd = (struct pn533_cmd_jump_dep *) arg;
+ rc = resp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev,
+ "Bringing DEP link up failed %d", rc);
+ return 0;
+ }
+
+ if (!dev->tgt_available_prots) {
+ nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ return 0;
+
+ dev->tgt_available_prots = 0;
+ }
+
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ resp->gt, target_gt_len);
+ if (rc == 0)
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ !cmd->active, NFC_RF_INITIATOR);
+
+ return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_cmd_jump_dep *cmd;
+ u8 cmd_len, local_gt_len, *local_gt;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (rf_mode == NFC_RF_TARGET) {
+ nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+ return -EOPNOTSUPP;
+ }
+
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "There is already an active target");
+ return -EBUSY;
+ }
+
+ local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+ if (local_gt_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+
+ cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+ cmd->active = !comm_mode;
+ cmd->baud = 0;
+ if (local_gt != NULL) {
+ cmd->next = 4; /* We have some Gi */
+ memcpy(cmd->gt, local_gt, local_gt_len);
+ } else {
+ cmd->next = 0;
+ }
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+ dev->out_frame->datalen += cmd_len;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_in_dep_link_up_complete,
+ cmd, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+
+out:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ pn533_deactivate_target(nfc_dev, 0);
+
+ return 0;
+}
+
#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
@@ -1339,7 +1501,7 @@ error:
return 0;
}
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
struct sk_buff *skb,
data_exchange_cb_t cb,
void *cb_context)
@@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
PN533_CMD_DATAEXCH_DATA_MAXLEN +
PN533_FRAME_TAIL_SIZE;
- skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
if (!skb_resp) {
rc = -ENOMEM;
goto error;
@@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
+ .dep_link_up = pn533_dep_link_up,
+ .dep_link_down = pn533_dep_link_down,
.start_poll = pn533_start_poll,
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
@@ -1597,24 +1761,7 @@ static struct usb_driver pn533_driver = {
.id_table = pn533_table,
};
-static int __init pn533_init(void)
-{
- int rc;
-
- rc = usb_register(&pn533_driver);
- if (rc)
- err("usb_register failed. Error number %d", rc);
-
- return rc;
-}
-
-static void __exit pn533_exit(void)
-{
- usb_deregister(&pn533_driver);
-}
-
-module_init(pn533_init);
-module_exit(pn533_exit);
+module_usb_driver(pn533_driver);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>,"
" Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index cac63c9f49a..268163dd71c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -15,6 +15,15 @@ config PROC_DEVICETREE
an image of the device tree that the kernel copies from Open
Firmware or other boot firmware. If unsure, say Y here.
+config OF_SELFTEST
+ bool "Device Tree Runtime self tests"
+ help
+ This option builds in test cases for the device tree infrastructure
+ that are executed one at boot time, and the results dumped to the
+ console.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_FLATTREE
bool
select DTC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index dccb1176be5..a73f5a51ff4 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
+obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9b6588ef067..133908a6fd8 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -752,7 +752,7 @@ int of_property_read_string_index(struct device_node *np, const char *propname,
for (i = 0; total < prop->length; total += l, p += l) {
l = strlen(p) + 1;
- if ((*p != 0) && (i++ == index)) {
+ if (i++ == index) {
*output = p;
return 0;
}
@@ -790,11 +790,9 @@ int of_property_count_strings(struct device_node *np, const char *propname)
p = prop->value;
- for (i = 0; total < prop->length; total += l, p += l) {
+ for (i = 0; total < prop->length; total += l, p += l, i++)
l = strlen(p) + 1;
- if (*p != 0)
- i++;
- }
+
return i;
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
@@ -824,17 +822,19 @@ of_parse_phandle(struct device_node *np, const char *phandle_name, int index)
EXPORT_SYMBOL(of_parse_phandle);
/**
- * of_parse_phandles_with_args - Find a node pointed by phandle in a list
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
* @index: index of a phandle to parse out
- * @out_node: optional pointer to device_node struct pointer (will be filled)
- * @out_args: optional pointer to arguments pointer (will be filled)
+ * @out_args: optional pointer to output arguments structure (will be filled)
*
* This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_node and out_args, on error returns
- * appropriate errno value.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
*
* Example:
*
@@ -851,94 +851,96 @@ EXPORT_SYMBOL(of_parse_phandle);
* }
*
* To get a device_node of the `node2' node you may call this:
- * of_parse_phandles_with_args(node3, "list", "#list-cells", 2, &node2, &args);
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
-int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
+int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
const char *cells_name, int index,
- struct device_node **out_node,
- const void **out_args)
+ struct of_phandle_args *out_args)
{
- int ret = -EINVAL;
- const __be32 *list;
- const __be32 *list_end;
- int size;
- int cur_index = 0;
+ const __be32 *list, *list_end;
+ int size, cur_index = 0;
+ uint32_t count = 0;
struct device_node *node = NULL;
- const void *args = NULL;
+ phandle phandle;
+ /* Retrieve the phandle list property */
list = of_get_property(np, list_name, &size);
- if (!list) {
- ret = -ENOENT;
- goto err0;
- }
+ if (!list)
+ return -EINVAL;
list_end = list + size / sizeof(*list);
+ /* Loop over the phandles until all the requested entry is found */
while (list < list_end) {
- const __be32 *cells;
- phandle phandle;
+ count = 0;
+ /*
+ * If phandle is 0, then it is an empty entry with no
+ * arguments. Skip forward to the next entry.
+ */
phandle = be32_to_cpup(list++);
- args = list;
-
- /* one cell hole in the list = <>; */
- if (!phandle)
- goto next;
-
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_debug("%s: could not find phandle\n",
- np->full_name);
- goto err0;
- }
+ if (phandle) {
+ /*
+ * Find the provider node and parse the #*-cells
+ * property to determine the argument length
+ */
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ break;
+ }
+ if (of_property_read_u32(node, cells_name, &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ break;
+ }
- cells = of_get_property(node, cells_name, &size);
- if (!cells || size != sizeof(*cells)) {
- pr_debug("%s: could not get %s for %s\n",
- np->full_name, cells_name, node->full_name);
- goto err1;
+ /*
+ * Make sure that the arguments actually fit in the
+ * remaining property data length
+ */
+ if (list + count > list_end) {
+ pr_err("%s: arguments longer than property\n",
+ np->full_name);
+ break;
+ }
}
- list += be32_to_cpup(cells);
- if (list > list_end) {
- pr_debug("%s: insufficient arguments length\n",
- np->full_name);
- goto err1;
+ /*
+ * All of the error cases above bail out of the loop, so at
+ * this point, the parsing is successful. If the requested
+ * index matches, then fill the out_args structure and return,
+ * or return -ENOENT for an empty entry.
+ */
+ if (cur_index == index) {
+ if (!phandle)
+ return -ENOENT;
+
+ if (out_args) {
+ int i;
+ if (WARN_ON(count > MAX_PHANDLE_ARGS))
+ count = MAX_PHANDLE_ARGS;
+ out_args->np = node;
+ out_args->args_count = count;
+ for (i = 0; i < count; i++)
+ out_args->args[i] = be32_to_cpup(list++);
+ }
+ return 0;
}
-next:
- if (cur_index == index)
- break;
of_node_put(node);
node = NULL;
- args = NULL;
+ list += count;
cur_index++;
}
- if (!node) {
- /*
- * args w/o node indicates that the loop above has stopped at
- * the 'hole' cell. Report this differently.
- */
- if (args)
- ret = -EEXIST;
- else
- ret = -ENOENT;
- goto err0;
- }
-
- if (out_node)
- *out_node = node;
- if (out_args)
- *out_args = args;
-
- return 0;
-err1:
- of_node_put(node);
-err0:
- pr_debug("%s failed with status %d\n", __func__, ret);
- return ret;
+ /* Loop exited without finding a valid entry; return an error */
+ if (node)
+ of_node_put(node);
+ return -EINVAL;
}
-EXPORT_SYMBOL(of_parse_phandles_with_args);
+EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
* prom_add_property - Add a property to a node
@@ -1159,7 +1161,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (!of_aliases)
return;
- for_each_property(pp, of_aliases->properties) {
+ for_each_property_of_node(of_aliases, pp) {
const char *start = pp->name;
const char *end = start + strlen(start);
struct device_node *np;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index fd85fa298e0..ea2bd1be264 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,10 +18,12 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
#include <asm/machdep.h>
#endif /* CONFIG_PPC */
+#include <asm/setup.h>
#include <asm/page.h>
char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
@@ -107,7 +109,7 @@ int of_fdt_is_compatible(struct boot_param_header *blob,
* of_fdt_match - Return true if node matches a list of compatible values
*/
int of_fdt_match(struct boot_param_header *blob, unsigned long node,
- const char **compat)
+ const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -541,7 +543,7 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
/**
* of_flat_dt_match - Return true if node matches a list of compatible values
*/
-int __init of_flat_dt_match(unsigned long node, const char **compat)
+int __init of_flat_dt_match(unsigned long node, const char *const *compat)
{
return of_fdt_match(initial_boot_params, node, compat);
}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index ef0105fa52b..7e62d15d60f 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -35,32 +35,27 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
int ret;
- struct device_node *gpio_np;
struct gpio_chip *gc;
- int size;
- const void *gpio_spec;
- const __be32 *gpio_cells;
+ struct of_phandle_args gpiospec;
- ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
- &gpio_np, &gpio_spec);
+ ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
+ &gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
- gc = of_node_to_gpiochip(gpio_np);
+ gc = of_node_to_gpiochip(gpiospec.np);
if (!gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -ENODEV;
goto err1;
}
- gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
- if (!gpio_cells || size != sizeof(*gpio_cells) ||
- be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) {
+ if (gpiospec.args_count != gc->of_gpio_n_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -EINVAL;
goto err1;
}
@@ -69,13 +64,13 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
if (flags)
*flags = 0;
- ret = gc->of_xlate(gc, np, gpio_spec, flags);
+ ret = gc->of_xlate(gc, &gpiospec, flags);
if (ret < 0)
goto err1;
ret += gc->base;
err1:
- of_node_put(gpio_np);
+ of_node_put(gpiospec.np);
err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
@@ -105,8 +100,8 @@ unsigned int of_gpio_count(struct device_node *np)
do {
int ret;
- ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells",
- cnt, NULL, NULL);
+ ret = of_parse_phandle_with_args(np, "gpios", "#gpio-cells",
+ cnt, NULL);
/* A hole in the gpios = <> counts anyway. */
if (ret < 0 && ret != -EEXIST)
break;
@@ -127,12 +122,9 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
{
- const __be32 *gpio = gpio_spec;
- const u32 n = be32_to_cpup(gpio);
-
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
* write your own xlate function (that will have to retrive the GPIO
@@ -144,13 +136,16 @@ int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return -EINVAL;
}
- if (n > gc->ngpio)
+ if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
+ return -EINVAL;
+
+ if (gpiospec->args[0] > gc->ngpio)
return -EINVAL;
if (flags)
- *flags = be32_to_cpu(gpio[1]);
+ *flags = gpiospec->args[1];
- return n;
+ return gpiospec->args[0];
}
EXPORT_SYMBOL(of_gpio_simple_xlate);
@@ -198,8 +193,6 @@ int of_mm_gpiochip_add(struct device_node *np,
if (ret)
goto err2;
- pr_debug("%s: registered as generic GPIO chip, base is %d\n",
- np->full_name, gc->base);
return 0;
err2:
iounmap(mm_gc->regs);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 791270b8bd1..0f0cfa3bca3 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index bc5b3990f6e..07cc1d678e4 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -229,7 +229,7 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
return ret;
}
-static void *kernel_tree_alloc(u64 size, u64 align)
+static void * __init kernel_tree_alloc(u64 size, u64 align)
{
return prom_early_alloc(size);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cbd5d701c7e..63b3ec48c20 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
if (!lookup)
return NULL;
- for(; lookup->name != NULL; lookup++) {
+ for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
if (of_address_to_resource(np, 0, &res))
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
new file mode 100644
index 00000000000..9d2b4803a9d
--- /dev/null
+++ b/drivers/of/selftest.c
@@ -0,0 +1,139 @@
+/*
+ * Self tests for device tree subsystem
+ */
+
+#define pr_fmt(fmt) "### %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+static bool selftest_passed = true;
+#define selftest(result, fmt, ...) { \
+ selftest_passed &= (result); \
+ if (!(result)) \
+ pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
+}
+
+static void __init of_selftest_parse_phandle_with_args(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int rc, i;
+ bool passed_all = true;
+
+ pr_info("start\n");
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 7; i++) {
+ bool passed = true;
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells", i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 0);
+ break;
+ case 2:
+ passed &= (rc == -ENOENT);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 4);
+ passed &= (args.args[2] == 3);
+ break;
+ case 4:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 100);
+ break;
+ case 5:
+ passed &= !rc;
+ passed &= (args.args_count == 0);
+ break;
+ case 6:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 7:
+ passed &= (rc == -EINVAL);
+ break;
+ default:
+ passed = false;
+ }
+
+ if (!passed) {
+ int j;
+ pr_err("index %i - data error on node %s rc=%i regs=[",
+ i, args.np->full_name, rc);
+ for (j = 0; j < args.args_count; j++)
+ printk(" %i", args.args[j]);
+ printk(" ]\n");
+
+ passed_all = false;
+ }
+ }
+
+ /* Check for missing list property */
+ rc = of_parse_phandle_with_args(np, "phandle-list-missing",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for missing cells property */
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells-missing", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for bad phandle in list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for incorrectly formed argument list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
+ "#phandle-cells", 1, &args);
+ passed_all &= (rc == -EINVAL);
+
+ pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+}
+
+static int __init of_selftest(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_info("No testcase data in device tree; not running tests\n");
+ return 0;
+ }
+ of_node_put(np);
+
+ pr_info("start of selftest - you will see error messages\n");
+ of_selftest_parse_phandle_with_args();
+ pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ return 0;
+}
+late_initcall(of_selftest);
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
new file mode 100644
index 00000000000..76f1c9357f3
--- /dev/null
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ event->hw.interrupts = 0; /* don't throttle interrupts */
+ oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (!event) {
+ event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+ nmi_timer_callback, NULL);
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+ per_cpu(nmi_timer_events, cpu) = event;
+ }
+
+ if (event && ctr_running)
+ perf_event_enable(event);
+
+ return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (event && ctr_running)
+ perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+ void *data)
+{
+ int cpu = (unsigned long)data;
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ nmi_timer_start_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ nmi_timer_stop_cpu(cpu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+ .notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ ctr_running = 1;
+ for_each_online_cpu(cpu)
+ nmi_timer_start_cpu(cpu);
+ put_online_cpus();
+
+ return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ nmi_timer_stop_cpu(cpu);
+ ctr_running = 0;
+ put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+ struct perf_event *event;
+ int cpu;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ for_each_possible_cpu(cpu) {
+ event = per_cpu(nmi_timer_events, cpu);
+ if (!event)
+ continue;
+ perf_event_disable(event);
+ per_cpu(nmi_timer_events, cpu) = NULL;
+ perf_event_release_kernel(event);
+ }
+
+ put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+ int cpu, err;
+ u64 period;
+
+ /* clock cycles per tick: */
+ period = (u64)cpu_khz * 1000;
+ do_div(period, HZ);
+ nmi_timer_attr.sample_period = period;
+
+ get_online_cpus();
+ err = register_cpu_notifier(&nmi_timer_cpu_nb);
+ if (err)
+ goto out;
+ /* can't attach events to offline cpus: */
+ for_each_online_cpu(cpu) {
+ err = nmi_timer_start_cpu(cpu);
+ if (err)
+ break;
+ }
+ if (err)
+ nmi_timer_shutdown();
+out:
+ put_online_cpus();
+ return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ int err = 0;
+
+ err = nmi_timer_setup();
+ if (err)
+ return err;
+ nmi_timer_shutdown(); /* only check, don't alloc */
+
+ ops->create_files = NULL;
+ ops->setup = nmi_timer_setup;
+ ops->shutdown = nmi_timer_shutdown;
+ ops->start = nmi_timer_start;
+ ops->stop = nmi_timer_stop;
+ ops->cpu_type = "timer";
+
+ printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095..ed2c3ec0702 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,39 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
+ timer_mode = 0;
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ if (!err) {
+ if (!timer && !oprofilefs_register())
+ return 0;
+ oprofile_arch_exit();
+ }
+
+ /* setup timer mode: */
+ timer_mode = 1;
+ /* no nmi timer mode if oprofile.timer is set */
+ if (timer || op_nmi_timer_init(&oprofile_ops)) {
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
+
return oprofilefs_register();
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (!timer_mode)
+ oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 177b73de5e5..d32ef816337 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -35,7 +35,15 @@ struct dentry;
void oprofile_create_files(struct super_block *sb, struct dentry *root);
int oprofile_timer_init(struct oprofile_operations *ops);
-void oprofile_timer_exit(void);
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+int op_nmi_timer_init(struct oprofile_operations *ops);
+#else
+static inline int op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ return -ENODEV;
+}
+#endif
+
int oprofile_set_ulong(unsigned long *addr, unsigned long val);
int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f63456646..84a208dbed9 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ retval = 0;
if (val)
retval = oprofile_start();
else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc2d7a..2f0aa0f700e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
}
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
+ return count;
}
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
return -EINVAL;
retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f51..93404f72dfa 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,23 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int oprofile_timer_init(struct oprofile_operations *ops)
+static int oprofile_hrtimer_setup(void)
{
- int rc;
-
- rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
- if (rc)
- return rc;
- ops->create_files = NULL;
- ops->setup = NULL;
- ops->shutdown = NULL;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- return 0;
+ return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
-void oprofile_timer_exit(void)
+static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+ ops->create_files = NULL;
+ ops->setup = oprofile_hrtimer_setup;
+ ops->shutdown = oprofile_hrtimer_shutdown;
+ ops->start = oprofile_hrtimer_start;
+ ops->stop = oprofile_hrtimer_stop;
+ ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ return 0;
+}
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 553a9905299..62026493634 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -108,13 +108,6 @@ config IOMMU_HELPER
depends on IOMMU_SBA || IOMMU_CCIO
default y
-#config PCI_EPIC
-# bool "EPIC/SAGA PCI support"
-# depends on PCI
-# default y
-# help
-# Say Y here for V-class PCI, DMA/IOMMU, IRQ subsystem support.
-
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 844f6137970..7c5d86696ee 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -420,18 +420,7 @@ static struct platform_driver axdrv = {
.resume = parport_ax88796_resume,
};
-static int __init parport_ax88796_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit parport_ax88796_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(parport_ax88796_init)
-module_exit(parport_ax88796_exit)
+module_platform_driver(axdrv);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 362db31d8ca..1c0c642b3e2 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -397,7 +397,7 @@ static void __exit parport_mfc3_exit(void)
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
-MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Paralllel Port");
+MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 910c5a26e34..9390a534a2b 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -391,21 +391,10 @@ static struct platform_driver bpp_sbus_driver = {
.remove = __devexit_p(bpp_remove),
};
-static int __init parport_sunbpp_init(void)
-{
- return platform_driver_register(&bpp_sbus_driver);
-}
-
-static void __exit parport_sunbpp_exit(void)
-{
- platform_driver_unregister(&bpp_sbus_driver);
-}
+module_platform_driver(bpp_sbus_driver);
MODULE_AUTHOR("Derrick J Brashear");
MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
-
-module_init(parport_sunbpp_init)
-module_exit(parport_sunbpp_exit)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f02b5235056..37856f7c778 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,11 +98,11 @@ config PCI_PASID
If unsure, say N.
config PCI_IOAPIC
- bool
+ tristate "PCI IO-APIC hotplug support" if X86
depends on PCI
depends on ACPI
depends on HOTPLUG
- default y
+ default !X86
config PCI_LABEL
def_bool y if (DMI || ACPI)
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd7..9dd90b30f91 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
@@ -174,21 +175,22 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
u32 max_requests;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
- if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED))
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ if ((control & PCI_PRI_CTRL_ENABLE) ||
+ !(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests);
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- control |= PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ control |= PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -205,13 +207,13 @@ void pci_disable_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- control &= ~PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control &= ~PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pri);
@@ -226,13 +228,13 @@ bool pci_pri_enabled(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- return (control & PCI_PRI_ENABLE) ? true : false;
+ return (control & PCI_PRI_CTRL_ENABLE) ? true : false;
}
EXPORT_SYMBOL_GPL(pci_pri_enabled);
@@ -248,17 +250,17 @@ int pci_reset_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- if (control & PCI_PRI_ENABLE)
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ if (control & PCI_PRI_CTRL_ENABLE)
return -EBUSY;
- control |= PCI_PRI_RESET;
+ control |= PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -281,14 +283,14 @@ bool pci_pri_stopped(struct pci_dev *pdev)
u16 control, status;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return true;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
return false;
return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
@@ -310,15 +312,15 @@ int pci_pri_status(struct pci_dev *pdev)
u16 status, control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
/* Stopped bit is undefined when enable == 1, so clear it */
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
status &= ~PCI_PRI_STATUS_STOPPED;
return status;
@@ -341,25 +343,25 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
u16 control, supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- if (!(supported & PCI_PASID_ENABLE))
+ if (control & PCI_PASID_CTRL_ENABLE)
return -EINVAL;
- supported &= PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
if ((supported & features) != features)
return -EINVAL;
- control = PCI_PASID_ENABLE | features;
+ control = PCI_PASID_CTRL_ENABLE | features;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
return 0;
}
@@ -375,11 +377,11 @@ void pci_disable_pasid(struct pci_dev *pdev)
u16 control = 0;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pasid);
@@ -390,22 +392,21 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
* Returns a negative value when no PASI capability is present.
* Otherwise is returns a bitmask with supported features. Current
* features reported are:
- * PCI_PASID_ENABLE - PASID capability can be enabled
- * PCI_PASID_EXEC - Execute permission supported
- * PCI_PASID_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Priviledged mode supported
*/
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
@@ -425,11 +426,11 @@ int pci_max_pasids(struct pci_dev *pdev)
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54a0c8..9ddf69e3bbe 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- struct acpi_pci_root *root;
acpi_handle dummy_handle;
- /*
- * We shouldn't use this bridge if PCIe native hotplug control has been
- * granted by the BIOS for it.
- */
- root = acpi_pci_find_root(handle);
- if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return -ENODEV;
-
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- struct acpi_pci_root *root;
int *count = (int *)context;
if (!acpi_is_root_bridge(handle))
return AE_OK;
- root = acpi_pci_find_root(handle);
- if (!root)
- return AE_OK;
-
- if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- return AE_OK;
-
(*count)++;
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 838f571027b..9a33fdde2d1 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -45,7 +45,6 @@ extern int pciehp_poll_time;
extern int pciehp_debug;
extern int pciehp_force;
extern struct workqueue_struct *pciehp_wq;
-extern struct workqueue_struct *pciehp_ordered_wq;
#define dbg(format, arg...) \
do { \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7ac8358df8f..b8c99d35ac9 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -43,7 +43,6 @@ int pciehp_poll_mode;
int pciehp_poll_time;
int pciehp_force;
struct workqueue_struct *pciehp_wq;
-struct workqueue_struct *pciehp_ordered_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -345,18 +344,11 @@ static int __init pcied_init(void)
if (!pciehp_wq)
return -ENOMEM;
- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
- if (!pciehp_ordered_wq) {
- destroy_workqueue(pciehp_wq);
- return -ENOMEM;
- }
-
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval) {
- destroy_workqueue(pciehp_ordered_wq);
destroy_workqueue(pciehp_wq);
dbg("Failure to register service\n");
}
@@ -366,9 +358,8 @@ static int __init pcied_init(void)
static void __exit pcied_cleanup(void)
{
dbg("unload_pciehpd()\n");
- destroy_workqueue(pciehp_ordered_wq);
- destroy_workqueue(pciehp_wq);
pcie_port_service_unregister(&hpdriver_portdrv);
+ destroy_workqueue(pciehp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5fc16..27f44295a65 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
else
p_slot->state = POWERON_STATE;
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
}
static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7b1414810ae..bcdbb164362 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -806,7 +806,6 @@ static void pcie_cleanup_slot(struct controller *ctrl)
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_workqueue(pciehp_wq);
- flush_workqueue(pciehp_ordered_wq);
kfree(slot);
}
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 5775638ac01..205af8dc83c 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,7 +17,7 @@
*/
#include <linux/pci.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -27,7 +27,7 @@ struct ioapic {
u32 gsi_base;
};
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
acpi_handle handle;
acpi_status status;
@@ -88,7 +88,7 @@ exit_free:
return -ENODEV;
}
-static void ioapic_remove(struct pci_dev *dev)
+static void __devexit ioapic_remove(struct pci_dev *dev)
{
struct ioapic *ioapic = pci_get_drvdata(dev);
@@ -99,13 +99,12 @@ static void ioapic_remove(struct pci_dev *dev)
}
-static struct pci_device_id ioapic_devices[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
{ }
};
+MODULE_DEVICE_TABLE(pci, ioapic_devices);
static struct pci_driver ioapic_driver = {
.name = "ioapic",
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b3..1969a3ee305 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0e6d04d7ba4..337e16ab4a9 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -323,6 +323,8 @@ static void free_msi_irqs(struct pci_dev *dev)
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
list_del(&entry->list);
kfree(entry);
}
@@ -403,6 +405,98 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+
+#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
+#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
+
+struct msi_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
+}
+
+static ssize_t msi_irq_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct msi_attribute *attribute = to_msi_attr(attr);
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(entry, attribute, buf);
+}
+
+static const struct sysfs_ops msi_irq_sysfs_ops = {
+ .show = msi_irq_attr_show,
+};
+
+static struct msi_attribute mode_attribute =
+ __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
+
+
+struct attribute *msi_irq_default_attrs[] = {
+ &mode_attribute.attr,
+ NULL
+};
+
+void msi_kobj_release(struct kobject *kobj)
+{
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ pci_dev_put(entry->dev);
+}
+
+static struct kobj_type msi_irq_ktype = {
+ .release = msi_kobj_release,
+ .sysfs_ops = &msi_irq_sysfs_ops,
+ .default_attrs = msi_irq_default_attrs,
+};
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+ struct msi_desc *entry;
+ struct kobject *kobj;
+ int ret;
+ int count = 0;
+
+ pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
+ if (!pdev->msi_kset)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ kobj = &entry->kobj;
+ kobj->kset = pdev->msi_kset;
+ pci_dev_get(pdev);
+ ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
+ "%u", entry->irq);
+ if (ret)
+ goto out_unroll;
+
+ count++;
+ }
+
+ return 0;
+
+out_unroll:
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (!count)
+ break;
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ count--;
+ }
+ return ret;
+}
+
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
@@ -454,6 +548,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return ret;
}
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 1);
@@ -574,6 +675,12 @@ static int msix_capability_init(struct pci_dev *dev,
msix_program_entries(dev, entries);
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ ret = 0;
+ goto error;
+ }
+
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
@@ -732,6 +839,8 @@ void pci_disable_msi(struct pci_dev *dev)
pci_msi_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -830,6 +939,8 @@ void pci_disable_msix(struct pci_dev *dev)
pci_msix_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -870,5 +981,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
+ int pos;
INIT_LIST_HEAD(&dev->msi_list);
+
+ /* Disable the msi hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos)
+ msi_set_enable(dev, pos, 0);
+ msix_set_enable(dev, 0);
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 4ecb6408b0d..060fd22a110 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -45,16 +45,20 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct pci_dev *pci_dev = context;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+ if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
+ return;
+
+ if (!pci_dev->pm_cap || !pci_dev->pme_support
+ || pci_check_pme_status(pci_dev)) {
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
pci_wakeup_event(pci_dev);
- pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
- if (pci_dev->subordinate)
- pci_pme_wakeup_bus(pci_dev->subordinate);
}
+
+ if (pci_dev->subordinate)
+ pci_pme_wakeup_bus(pci_dev->subordinate);
}
/**
@@ -395,7 +399,6 @@ static int __init acpi_pci_init(void)
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
- pcie_clear_aspm();
pcie_no_aspm();
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 81525ae5d86..edaed6f4da6 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -89,7 +89,7 @@ find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static mode_t
+static umode_t
smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
int n)
{
@@ -275,7 +275,7 @@ device_has_dsm(struct device *dev)
return FALSE;
}
-static mode_t
+static umode_t
acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9..6d4a5319148 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index cbfbab18be9..1cfbf228fbb 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@ struct pcie_link_state {
struct aspm_latency acceptable[8];
};
-static int aspm_disabled, aspm_force, aspm_clear_state;
+static int aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
- if (aspm_clear_state)
- return -EINVAL;
-
/*
* Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
@@ -574,9 +571,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
- if (aspm_disabled && !aspm_clear_state)
- return;
-
/* VIA has a strange chipset, root port is under a bridge */
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
@@ -608,7 +602,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
- if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
+ if (aspm_policy != POLICY_POWERSAVE) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
@@ -649,8 +643,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
- !parent || !parent->link_state)
+ if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
return;
if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
(parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -734,13 +727,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
* pci_disable_link_state - disable pci device's link state, so the link will
* never enter specific states
*/
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+ bool force)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
- if (aspm_disabled || !pci_is_pcie(pdev))
+ if (aspm_disabled && !force)
+ return;
+
+ if (!pci_is_pcie(pdev))
return;
+
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
parent = pdev;
@@ -768,16 +766,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false);
+ __pci_disable_link_state(pdev, state, false, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true);
+ __pci_disable_link_state(pdev, state, true, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+ struct pci_dev *child;
+
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+ list_for_each_entry(child, &bus->devices, bus_list) {
+ __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM,
+ false, true);
+ }
+}
+
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
@@ -935,6 +948,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
@@ -947,16 +961,18 @@ static int __init pcie_aspm_disable(char *str)
__setup("pcie_aspm=", pcie_aspm_disable);
-void pcie_clear_aspm(void)
-{
- if (!aspm_force)
- aspm_clear_state = 1;
-}
-
void pcie_no_aspm(void)
{
- if (!aspm_force)
+ /*
+ * Disabling ASPM is intended to prevent the kernel from modifying
+ * existing hardware state, not to clear existing state. To that end:
+ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+ * (b) prevent userspace from changing policy
+ */
+ if (!aspm_force) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
+ }
}
/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 90832a95599..7cf3d2fcf56 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1126,14 +1126,11 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
-static struct xenbus_driver xenbus_pcifront_driver = {
- .name = "pcifront",
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
.probe = pcifront_xenbus_probe,
.remove = pcifront_xenbus_remove,
.otherend_changed = pcifront_backend_changed,
-};
+);
static int __init pcifront_init(void)
{
@@ -1142,12 +1139,12 @@ static int __init pcifront_init(void)
pci_frontend_registrar(1 /* enable */);
- return xenbus_register_frontend(&xenbus_pcifront_driver);
+ return xenbus_register_frontend(&xenpci_driver);
}
static void __exit pcifront_cleanup(void)
{
- xenbus_unregister_driver(&xenbus_pcifront_driver);
+ xenbus_unregister_driver(&xenpci_driver);
pci_frontend_registrar(0 /* disable */);
}
module_init(pcifront_init);
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 0b4f946cf13..31ab6ddf52c 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -16,8 +16,6 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_SKTSEL (54)
@@ -27,15 +25,15 @@
#define GPIO_PCMCIA_S1_RDYINT (8)
#define GPIO_PCMCIA_RESET (9)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S1_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S1_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
-#define PCMCIA_S1_RDYINT IRQ_GPIO(GPIO_PCMCIA_S1_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S1_CD_VALID gpio_to_irq(GPIO_PCMCIA_S1_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S1_RDYINT gpio_to_irq(GPIO_PCMCIA_S1_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
- { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
+ { .sock = 1, .str = "PCMCIA1 CD" },
};
static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -46,6 +44,8 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
+ irqs[1].irq = PCMCIA_S1_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 923f315926e..3dc7621a076 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -16,20 +16,18 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_S0_CD_VALID (84)
#define GPIO_PCMCIA_S0_RDYINT (82)
#define GPIO_PCMCIA_RESET (53)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
};
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -40,6 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = PCMCIA_S0_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
index 8bfbd4dca13..17cd2ce7428 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -26,20 +26,23 @@
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD0),
.str = "CF card detect"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD1),
.str = "Wifi switch"
},
};
static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
- IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
+ if (skt->nr == 0)
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY0);
+ else
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY1);
+
+ cd_irqs[0].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD0);
+ cd_irqs[1].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD1);
return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
}
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index d589ad1dcd4..6a8e011a8c1 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -33,7 +33,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmld_pcmcia_gpios,
ARRAY_SIZE(palmld_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMLD_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 9c6a04b2f71..9e38de769ba 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -37,7 +37,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmtc_pcmcia_gpios,
ARRAY_SIZE(palmtc_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTC_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 939622251df..6c2366b74a3 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -34,7 +34,7 @@
#define SG2_S0_GPIO_READY 81
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
+ {.sock = 0, .str = "PCMCIA0 CD" },
};
static struct gpio sg2_pcmcia_gpios[] = {
@@ -44,7 +44,9 @@ static struct gpio sg2_pcmcia_gpios[] = {
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ skt->socket.pci_irq = gpio_to_irq(SG2_S0_GPIO_READY);
+ irqs[0].irq = gpio_to_irq(SG2_S0_GPIO_DETECT);
+
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index 57ddb969d88..7c33f898135 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -30,7 +30,7 @@
extern void board_pcmcia_power(int power);
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(GPIO_PCD), "cs0_cd" }
+ { .sock = 0, .str = "cs0_cd" }
/* on other baseboards we can have more inputs */
};
@@ -53,7 +53,8 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_free(GPIO_PRDY);
return -EINVAL;
}
- skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_PRDY);
+ irqs[0].irq = gpio_to_irq(GPIO_PCD);
break;
default:
break;
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 66ab92cf310..61b17d235db 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -38,12 +38,10 @@ static struct gpio vpac270_cf_gpios[] = {
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
.str = "PCMCIA CD"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
.str = "CF CD"
},
};
@@ -57,6 +55,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_pcmcia_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
+ cd_irqs[0].irq = gpio_to_irq(GPIO84_VPAC270_PCMCIA_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
@@ -65,6 +64,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_cf_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
+ cd_irqs[1].irq = gpio_to_irq(GPIO17_VPAC270_CF_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e17e2f8001d..afaf8855812 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -12,7 +12,10 @@ menu "Pin controllers"
depends on PINCTRL
config PINMUX
- bool "Support pinmux controllers"
+ bool "Support pin multiplexing controllers"
+
+config PINCONF
+ bool "Support pin configuration controllers"
config DEBUG_PINCTRL
bool "Debug PINCTRL calls"
@@ -20,16 +23,25 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
-config PINMUX_SIRF
- bool "CSR SiRFprimaII pinmux driver"
+config PINCTRL_SIRF
+ bool "CSR SiRFprimaII pin controller driver"
depends on ARCH_PRIMA2
select PINMUX
-config PINMUX_U300
- bool "U300 pinmux driver"
+config PINCTRL_U300
+ bool "U300 pin controller driver"
depends on ARCH_U300
select PINMUX
+config PINCTRL_COH901
+ bool "ST-Ericsson U300 COH 901 335/571 GPIO"
+ depends on GPIOLIB && ARCH_U300 && PINMUX_U300
+ help
+ Say yes here to support GPIO interface on ST-Ericsson U300.
+ The names of the two IP block variants supported are
+ COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
+ ports of 8 GPIO pins each.
+
endmenu
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index bdc548a6b7e..827601cc68f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -1,8 +1,10 @@
# generic pinmux support
-ccflags-$(CONFIG_DEBUG_PINMUX) += -DDEBUG
+ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-$(CONFIG_PINCTRL) += core.o
obj-$(CONFIG_PINMUX) += pinmux.o
-obj-$(CONFIG_PINMUX_SIRF) += pinmux-sirf.o
-obj-$(CONFIG_PINMUX_U300) += pinmux-u300.o
+obj-$(CONFIG_PINCONF) += pinconf.o
+obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
+obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index eadef9e191e..569bdb3ef10 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -28,17 +28,12 @@
#include <linux/pinctrl/machine.h>
#include "core.h"
#include "pinmux.h"
+#include "pinconf.h"
/* Global list of pin control devices */
static DEFINE_MUTEX(pinctrldev_list_mutex);
static LIST_HEAD(pinctrldev_list);
-static void pinctrl_dev_release(struct device *dev)
-{
- struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
- kfree(pctldev);
-}
-
const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
{
/* We're not allowed to register devices without name */
@@ -70,14 +65,14 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
- if (dev && &pctldev->dev == dev) {
+ if (dev && pctldev->dev == dev) {
/* Matched on device pointer */
found = true;
break;
}
if (devname &&
- !strcmp(dev_name(&pctldev->dev), devname)) {
+ !strcmp(dev_name(pctldev->dev), devname)) {
/* Matched on device name */
found = true;
break;
@@ -88,7 +83,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
return found ? pctldev : NULL;
}
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *pindesc;
unsigned long flags;
@@ -101,6 +96,31 @@ struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
}
/**
+ * pin_get_from_name() - look up a pin number from a name
+ * @pctldev: the pin control device to lookup the pin on
+ * @name: the name of the pin to look up
+ */
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
+{
+ unsigned i, pin;
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+ if (desc->name && !strcmp(name, desc->name))
+ return pin;
+ }
+
+ return -EINVAL;
+}
+
+/**
* pin_is_valid() - check if pin exists on controller
* @pctldev: the pin control device to check the pin on
* @pin: pin to check, use the local pin controller index number
@@ -139,6 +159,8 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
if (pindesc != NULL) {
radix_tree_delete(&pctldev->pin_desc_tree,
pins[i].number);
+ if (pindesc->dynamic_name)
+ kfree(pindesc->name);
}
kfree(pindesc);
}
@@ -160,19 +182,27 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
if (pindesc == NULL)
return -ENOMEM;
+
spin_lock_init(&pindesc->lock);
/* Set owner */
pindesc->pctldev = pctldev;
/* Copy basic pin info */
- pindesc->name = name;
+ if (pindesc->name) {
+ pindesc->name = name;
+ } else {
+ pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
+ if (pindesc->name == NULL)
+ return -ENOMEM;
+ pindesc->dynamic_name = true;
+ }
spin_lock(&pctldev->pin_desc_tree_lock);
radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
spin_unlock(&pctldev->pin_desc_tree_lock);
pr_debug("registered pin %d (%s) on %s\n",
- number, name ? name : "(unnamed)", pctldev->desc->name);
+ number, pindesc->name, pctldev->desc->name);
return 0;
}
@@ -284,21 +314,52 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
mutex_unlock(&pctldev->gpio_ranges_lock);
}
+/**
+ * pinctrl_get_group_selector() - returns the group selector for a group
+ * @pctldev: the pin controller handling the group
+ * @pin_group: the pin group to look up
+ */
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ unsigned group_selector = 0;
+
+ while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev,
+ group_selector);
+ if (!strcmp(gname, pin_group)) {
+ dev_dbg(pctldev->dev,
+ "found group selector %u for %s\n",
+ group_selector,
+ pin_group);
+ return group_selector;
+ }
+
+ group_selector++;
+ }
+
+ dev_err(pctldev->dev, "does not have pin group %s\n",
+ pin_group);
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_DEBUG_FS
static int pinctrl_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- unsigned pin;
+ unsigned i, pin;
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
- seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
if (desc == NULL)
@@ -363,8 +424,11 @@ static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
/* Loop over the ranges */
mutex_lock(&pctldev->gpio_ranges_lock);
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
- seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
- range->base, (range->base + range->npins - 1));
+ seq_printf(s, "%u: %s GPIOS [%u - %u] PINS [%u - %u]\n",
+ range->id, range->name,
+ range->base, (range->base + range->npins - 1),
+ range->pin_base,
+ (range->pin_base + range->npins - 1));
}
mutex_unlock(&pctldev->gpio_ranges_lock);
@@ -375,11 +439,15 @@ static int pinctrl_devices_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev;
- seq_puts(s, "name [pinmux]\n");
+ seq_puts(s, "name [pinmux] [pinconf]\n");
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
seq_printf(s, "%s ", pctldev->desc->name);
if (pctldev->desc->pmxops)
+ seq_puts(s, "yes ");
+ else
+ seq_puts(s, "no ");
+ if (pctldev->desc->confops)
seq_puts(s, "yes");
else
seq_puts(s, "no");
@@ -444,11 +512,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
{
static struct dentry *device_root;
- device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ device_root = debugfs_create_dir(dev_name(pctldev->dev),
debugfs_root);
if (IS_ERR(device_root) || !device_root) {
pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&pctldev->dev));
+ dev_name(pctldev->dev));
return;
}
debugfs_create_file("pins", S_IFREG | S_IRUGO,
@@ -458,6 +526,7 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
pinmux_init_device_debugfs(device_root, pctldev);
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_init_debugfs(void)
@@ -495,7 +564,6 @@ static void pinctrl_init_debugfs(void)
struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data)
{
- static atomic_t pinmux_no = ATOMIC_INIT(0);
struct pinctrl_dev *pctldev;
int ret;
@@ -514,6 +582,16 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
}
}
+ /* If we're implementing pinconfig, check the ops for sanity */
+ if (pctldesc->confops) {
+ ret = pinconf_check_ops(pctldesc->confops);
+ if (ret) {
+ pr_err("%s pin config ops lacks necessary functions\n",
+ pctldesc->name);
+ return NULL;
+ }
+ }
+
pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
if (pctldev == NULL)
return NULL;
@@ -526,18 +604,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
spin_lock_init(&pctldev->pin_desc_tree_lock);
INIT_LIST_HEAD(&pctldev->gpio_ranges);
mutex_init(&pctldev->gpio_ranges_lock);
-
- /* Register device */
- pctldev->dev.parent = dev;
- dev_set_name(&pctldev->dev, "pinctrl.%d",
- atomic_inc_return(&pinmux_no) - 1);
- pctldev->dev.release = pinctrl_dev_release;
- ret = device_register(&pctldev->dev);
- if (ret != 0) {
- pr_err("error in device registration\n");
- goto out_reg_dev_err;
- }
- dev_set_drvdata(&pctldev->dev, pctldev);
+ pctldev->dev = dev;
/* Register all the pins */
pr_debug("try to register %d pins on %s...\n",
@@ -547,7 +614,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pr_err("error during pin registration\n");
pinctrl_free_pindescs(pctldev, pctldesc->pins,
pctldesc->npins);
- goto out_reg_pins_err;
+ goto out_err;
}
pinctrl_init_device_debugfs(pctldev);
@@ -557,10 +624,8 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pinmux_hog_maps(pctldev);
return pctldev;
-out_reg_pins_err:
- device_del(&pctldev->dev);
-out_reg_dev_err:
- put_device(&pctldev->dev);
+out_err:
+ kfree(pctldev);
return NULL;
}
EXPORT_SYMBOL_GPL(pinctrl_register);
@@ -584,7 +649,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
- device_unregister(&pctldev->dev);
+ kfree(pctldev);
}
EXPORT_SYMBOL_GPL(pinctrl_unregister);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 472fa1341cc..177a3310547 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -9,6 +9,10 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/pinctrl/pinconf.h>
+
+struct pinctrl_gpio_range;
+
/**
* struct pinctrl_dev - pin control class device
* @node: node to include this pin controller in the global pin controller list
@@ -34,7 +38,7 @@ struct pinctrl_dev {
spinlock_t pin_desc_tree_lock;
struct list_head gpio_ranges;
struct mutex gpio_ranges_lock;
- struct device dev;
+ struct device *dev;
struct module *owner;
void *driver_data;
#ifdef CONFIG_PINMUX
@@ -48,6 +52,7 @@ struct pinctrl_dev {
* @pctldev: corresponding pin control device
* @name: a name for the pin, e.g. the name of the pin/pad/finger on a
* datasheet or such
+ * @dynamic_name: if the name of this pin was dynamically allocated
* @lock: a lock to protect the descriptor structure
* @mux_requested: whether the pin is already requested by pinmux or not
* @mux_function: a named muxing function for the pin that will be passed to
@@ -56,6 +61,7 @@ struct pinctrl_dev {
struct pin_desc {
struct pinctrl_dev *pctldev;
const char *name;
+ bool dynamic_name;
spinlock_t lock;
/* These fields only added when supporting pinmux drivers */
#ifdef CONFIG_PINMUX
@@ -65,7 +71,10 @@ struct pin_desc {
struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
const char *dev_name);
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin);
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin);
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
int pinctrl_get_device_gpio_range(unsigned gpio,
struct pinctrl_dev **outdev,
struct pinctrl_gpio_range **outrange);
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group);
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
new file mode 100644
index 00000000000..1259872b0a1
--- /dev/null
+++ b/drivers/pinctrl/pinconf.c
@@ -0,0 +1,326 @@
+/*
+ * Core driver for the pin config portions of the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinconfig core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "core.h"
+#include "pinconf.h"
+
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_get) {
+ dev_err(pctldev->dev, "cannot get pin configuration, missing "
+ "pin_config_get() function in driver\n");
+ return -EINVAL;
+ }
+
+ return ops->pin_config_get(pctldev, pin, config);
+}
+
+/**
+ * pin_config_get() - get the configuration of a single pin parameter
+ * @dev_name: name of the pin controller device for this pin
+ * @name: name of the pin to get the config for
+ * @config: the config pointed to by this argument will be filled in with the
+ * current pin state, it can be used directly by drivers as a numeral, or
+ * it can be dereferenced to any struct.
+ */
+int pin_config_get(const char *dev_name, const char *name,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_get_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_get);
+
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ int ret;
+
+ if (!ops || !ops->pin_config_set) {
+ dev_err(pctldev->dev, "cannot configure pin, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ ret = ops->pin_config_set(pctldev, pin, config);
+ if (ret) {
+ dev_err(pctldev->dev,
+ "unable to set pin configuration on pin %d\n", pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pin_config_set() - set the configuration of a single pin parameter
+ * @dev_name: name of pin controller device for this pin
+ * @name: name of the pin to set the config for
+ * @config: the config in this argument will contain the desired pin state, it
+ * can be used directly by drivers as a numeral, or it can be dereferenced
+ * to any struct.
+ */
+int pin_config_set(const char *dev_name, const char *name,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_set_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_set);
+
+int pin_config_group_get(const char *dev_name, const char *pin_group,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ int selector;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_group_get) {
+ dev_err(pctldev->dev, "cannot get configuration for pin "
+ "group, missing group config get function in "
+ "driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ return ops->pin_config_group_get(pctldev, selector, config);
+}
+EXPORT_SYMBOL(pin_config_group_get);
+
+
+int pin_config_group_set(const char *dev_name, const char *pin_group,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ const struct pinctrl_ops *pctlops;
+ int selector;
+ const unsigned *pins;
+ unsigned num_pins;
+ int ret;
+ int i;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+ pctlops = pctldev->desc->pctlops;
+
+ if (!ops || (!ops->pin_config_group_set && !ops->pin_config_set)) {
+ dev_err(pctldev->dev, "cannot configure pin group, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ ret = pctlops->get_group_pins(pctldev, selector, &pins, &num_pins);
+ if (ret) {
+ dev_err(pctldev->dev, "cannot configure pin group, error "
+ "getting pins\n");
+ return ret;
+ }
+
+ /*
+ * If the pin controller supports handling entire groups we use that
+ * capability.
+ */
+ if (ops->pin_config_group_set) {
+ ret = ops->pin_config_group_set(pctldev, selector, config);
+ /*
+ * If the pin controller prefer that a certain group be handled
+ * pin-by-pin as well, it returns -EAGAIN.
+ */
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ /*
+ * If the controller cannot handle entire groups, we configure each pin
+ * individually.
+ */
+ if (!ops->pin_config_set)
+ return 0;
+
+ for (i = 0; i < num_pins; i++) {
+ ret = ops->pin_config_set(pctldev, pins[i], config);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pin_config_group_set);
+
+int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+ /* We must be able to read out pin status */
+ if (!ops->pin_config_get && !ops->pin_config_group_get)
+ return -EINVAL;
+ /* We have to be able to config the pins in SOME way */
+ if (!ops->pin_config_set && !ops->pin_config_group_set)
+ return -EINVAL;
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
+ struct seq_file *s, int pin)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_dbg_show)
+ ops->pin_config_dbg_show(pctldev, s, pin);
+}
+
+static int pinconf_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ unsigned i, pin;
+
+ seq_puts(s, "Pin config settings per pin\n");
+ seq_puts(s, "Format: pin (name): pinmux setting array\n");
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; pin < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Skip if we cannot search the pin */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s):", pin,
+ desc->name ? desc->name : "unnamed");
+
+ pinconf_dump_pin(pctldev, s, pin);
+
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+
+static void pinconf_dump_group(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned selector,
+ const char *gname)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_group_dbg_show)
+ ops->pin_config_group_dbg_show(pctldev, s, selector);
+}
+
+static int pinconf_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ unsigned selector = 0;
+
+ if (!ops || !ops->pin_config_group_get)
+ return 0;
+
+ seq_puts(s, "Pin config settings per pin group\n");
+ seq_puts(s, "Format: group (name): pinmux setting array\n");
+
+ while (pctlops->list_groups(pctldev, selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev, selector);
+
+ seq_printf(s, "%u (%s):", selector, gname);
+ pinconf_dump_group(pctldev, s, selector, gname);
+ selector++;
+ }
+
+ return 0;
+}
+
+static int pinconf_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_pins_show, inode->i_private);
+}
+
+static int pinconf_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_groups_show, inode->i_private);
+}
+
+static const struct file_operations pinconf_pins_ops = {
+ .open = pinconf_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinconf_groups_ops = {
+ .open = pinconf_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+ debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_pins_ops);
+ debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_groups_ops);
+}
+
+#endif
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
new file mode 100644
index 00000000000..e7dc6165032
--- /dev/null
+++ b/drivers/pinctrl/pinconf.h
@@ -0,0 +1,36 @@
+/*
+ * Internal interface between the core pin control system and the
+ * pin config portions
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifdef CONFIG_PINCONF
+
+int pinconf_check_ops(const struct pinconf_ops *ops);
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev);
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config);
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config);
+
+#else
+
+static inline int pinconf_check_ops(const struct pinconf_ops *ops)
+{
+ return 0;
+}
+
+static inline void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+}
+
+#endif
diff --git a/drivers/gpio/gpio-u300.c b/drivers/pinctrl/pinctrl-coh901.c
index 4035778852b..69fb7072a23 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/pinctrl/pinmux.h>
#include <mach/gpio-u300.h>
/*
@@ -351,6 +352,24 @@ static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
return container_of(chip, struct u300_gpio, chip);
}
+static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ /*
+ * Map back to global GPIO space and request muxing, the direction
+ * parameter does not matter for this controller.
+ */
+ int gpio = chip->base + offset;
+
+ return pinmux_request_gpio(gpio);
+}
+
+static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinmux_free_gpio(gpio);
+}
+
static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
@@ -483,6 +502,8 @@ static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
static struct gpio_chip u300_gpio_chip = {
.label = "u300-gpio-chip",
.owner = THIS_MODULE,
+ .request = u300_gpio_request,
+ .free = u300_gpio_free,
.get = u300_gpio_get,
.set = u300_gpio_set,
.direction_input = u300_gpio_direction_input,
diff --git a/drivers/pinctrl/pinmux-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index d76cae62095..6b3534cc051 100644
--- a/drivers/pinctrl/pinmux-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -463,7 +463,7 @@ static const struct sirfsoc_padmux spi1_padmux = {
.funcval = BIT(8),
};
-static const unsigned spi1_pins[] = { 33, 34, 35, 36 };
+static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
{
@@ -1067,7 +1067,7 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
spmx = pinctrl_dev_get_drvdata(pmxdev);
muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
- muxval = muxval | (1 << offset);
+ muxval = muxval | (1 << (offset - range->pin_base));
writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
return 0;
@@ -1086,7 +1086,6 @@ static struct pinctrl_desc sirfsoc_pinmux_desc = {
.name = DRIVER_NAME,
.pins = sirfsoc_pads,
.npins = ARRAY_SIZE(sirfsoc_pads),
- .maxpin = SIRFSOC_NUM_PADS - 1,
.pctlops = &sirfsoc_pctrl_ops,
.pmxops = &sirfsoc_pinmux_ops,
.owner = THIS_MODULE,
@@ -1100,21 +1099,25 @@ static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
.name = "sirfsoc-gpio*",
.id = 0,
.base = 0,
+ .pin_base = 0,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 1,
.base = 32,
+ .pin_base = 32,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 2,
.base = 64,
+ .pin_base = 64,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 3,
.base = 96,
+ .pin_base = 96,
.npins = 19,
},
};
diff --git a/drivers/pinctrl/pinmux-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 4858a64131f..c8d02f1c2b5 100644
--- a/drivers/pinctrl/pinmux-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -940,20 +940,23 @@ static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
{
u16 regval, val, mask;
int i;
+ const struct u300_pmx_mask *upmx_mask;
+ upmx_mask = u300_pmx_functions[selector].mask;
for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
if (enable)
- val = u300_pmx_functions[selector].mask->bits;
+ val = upmx_mask->bits;
else
val = 0;
- mask = u300_pmx_functions[selector].mask->mask;
+ mask = upmx_mask->mask;
if (mask != 0) {
regval = readw(upmx->virtbase + u300_pmx_registers[i]);
regval &= ~mask;
regval |= val;
writew(regval, upmx->virtbase + u300_pmx_registers[i]);
}
+ upmx_mask++;
}
}
@@ -1016,21 +1019,35 @@ static struct pinmux_ops u300_pmx_ops = {
};
/*
- * FIXME: this will be set to sane values as this driver engulfs
- * drivers/gpio/gpio-u300.c and we really know this stuff.
+ * GPIO ranges handled by the application-side COH901XXX GPIO controller
+ * Very many pins can be converted into GPIO pins, but we only list those
+ * that are useful in practice to cut down on tables.
*/
-static struct pinctrl_gpio_range u300_gpio_range = {
- .name = "COH901*",
- .id = 0,
- .base = 0,
- .npins = 64,
+#define U300_GPIO_RANGE(a, b, c) { .name = "COH901XXX", .id = a, .base= a, \
+ .pin_base = b, .npins = c }
+
+static struct pinctrl_gpio_range u300_gpio_ranges[] = {
+ U300_GPIO_RANGE(10, 426, 1),
+ U300_GPIO_RANGE(11, 180, 1),
+ U300_GPIO_RANGE(12, 165, 1), /* MS/MMC card insertion */
+ U300_GPIO_RANGE(13, 179, 1),
+ U300_GPIO_RANGE(14, 178, 1),
+ U300_GPIO_RANGE(16, 194, 1),
+ U300_GPIO_RANGE(17, 193, 1),
+ U300_GPIO_RANGE(18, 192, 1),
+ U300_GPIO_RANGE(19, 191, 1),
+ U300_GPIO_RANGE(20, 186, 1),
+ U300_GPIO_RANGE(21, 185, 1),
+ U300_GPIO_RANGE(22, 184, 1),
+ U300_GPIO_RANGE(23, 183, 1),
+ U300_GPIO_RANGE(24, 182, 1),
+ U300_GPIO_RANGE(25, 181, 1),
};
static struct pinctrl_desc u300_pmx_desc = {
.name = DRIVER_NAME,
.pins = u300_pads,
.npins = ARRAY_SIZE(u300_pads),
- .maxpin = U300_NUM_PADS-1,
.pctlops = &u300_pctrl_ops,
.pmxops = &u300_pmx_ops,
.owner = THIS_MODULE,
@@ -1038,9 +1055,10 @@ static struct pinctrl_desc u300_pmx_desc = {
static int __init u300_pmx_probe(struct platform_device *pdev)
{
- int ret;
struct u300_pmx *upmx;
struct resource *res;
+ int ret;
+ int i;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1077,7 +1095,8 @@ static int __init u300_pmx_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
platform_set_drvdata(pdev, upmx);
@@ -1099,8 +1118,10 @@ out_no_resource:
static int __exit u300_pmx_remove(struct platform_device *pdev)
{
struct u300_pmx *upmx = platform_get_drvdata(pdev);
+ int i;
- pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
pinctrl_unregister(upmx->pctl);
iounmap(upmx->virtbase);
release_mem_region(upmx->phybase, upmx->physize);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index a5467f8709e..a76a348321b 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -32,12 +33,8 @@
static DEFINE_MUTEX(pinmux_list_mutex);
static LIST_HEAD(pinmux_list);
-/* List of pinmux hogs */
-static DEFINE_MUTEX(pinmux_hoglist_mutex);
-static LIST_HEAD(pinmux_hoglist);
-
-/* Global pinmux maps, we allow one set only */
-static struct pinmux_map const *pinmux_maps;
+/* Global pinmux maps */
+static struct pinmux_map *pinmux_maps;
static unsigned pinmux_maps_num;
/**
@@ -98,41 +95,35 @@ struct pinmux_hog {
* @function: a functional name to give to this pin, passed to the driver
* so it knows what function to mux in, e.g. the string "gpioNN"
* means that you want to mux in the pin for use as GPIO number NN
- * @gpio: if this request concerns a single GPIO pin
* @gpio_range: the range matching the GPIO pin if this is a request for a
* single GPIO pin
*/
static int pin_request(struct pinctrl_dev *pctldev,
- int pin, const char *function, bool gpio,
+ int pin, const char *function,
struct pinctrl_gpio_range *gpio_range)
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
int status = -EINVAL;
- dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
-
- if (!pin_is_valid(pctldev, pin)) {
- dev_err(&pctldev->dev, "pin is invalid\n");
- return -EINVAL;
- }
-
- if (!function) {
- dev_err(&pctldev->dev, "no function name given\n");
- return -EINVAL;
- }
+ dev_dbg(pctldev->dev, "request pin %d for %s\n", pin, function);
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be requested\n");
goto out;
}
+ if (!function) {
+ dev_err(pctldev->dev, "no function name given\n");
+ return -EINVAL;
+ }
+
spin_lock(&desc->lock);
if (desc->mux_function) {
spin_unlock(&desc->lock);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin already requested\n");
goto out;
}
@@ -141,7 +132,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
/* Let each pin increase references to this module */
if (!try_module_get(pctldev->owner)) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not increase module refcount for pin %d\n",
pin);
status = -EINVAL;
@@ -152,7 +143,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
* If there is no kind of request function for the pin we just assume
* we got it by default and proceed.
*/
- if (gpio && ops->gpio_request_enable)
+ if (gpio_range && ops->gpio_request_enable)
/* This requests and enables a single GPIO pin */
status = ops->gpio_request_enable(pctldev, gpio_range, pin);
else if (ops->request)
@@ -161,7 +152,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
status = 0;
if (status)
- dev_err(&pctldev->dev, "->request on device %s failed "
+ dev_err(pctldev->dev, "->request on device %s failed "
"for pin %d\n",
pctldev->desc->name, pin);
out_free_pin:
@@ -172,7 +163,7 @@ out_free_pin:
}
out:
if (status)
- dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
+ dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
pin, function ? : "?", status);
return status;
@@ -182,34 +173,52 @@ out:
* pin_free() - release a single muxed in pin so something else can be muxed
* @pctldev: pin controller device handling this pin
* @pin: the pin to free
- * @free_func: whether to free the pin's assigned function name string
+ * @gpio_range: the range matching the GPIO pin if this is a request for a
+ * single GPIO pin
+ *
+ * This function returns a pointer to the function name in use. This is used
+ * for callers that dynamically allocate a function name so it can be freed
+ * once the pin is free. This is done for GPIO request functions.
*/
-static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
+static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ struct pinctrl_gpio_range *gpio_range)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pin_desc *desc;
+ const char *func;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be freed\n");
- return;
+ return NULL;
}
- if (ops->free)
+ /*
+ * If there is no kind of request function for the pin we just assume
+ * we got it by default and proceed.
+ */
+ if (gpio_range && ops->gpio_disable_free)
+ ops->gpio_disable_free(pctldev, gpio_range, pin);
+ else if (ops->free)
ops->free(pctldev, pin);
spin_lock(&desc->lock);
- if (free_func)
- kfree(desc->mux_function);
+ func = desc->mux_function;
desc->mux_function = NULL;
spin_unlock(&desc->lock);
module_put(pctldev->owner);
+
+ return func;
}
/**
* pinmux_request_gpio() - request a single pin to be muxed in as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
*/
int pinmux_request_gpio(unsigned gpio)
{
@@ -225,7 +234,7 @@ int pinmux_request_gpio(unsigned gpio)
return -EINVAL;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
/* Conjure some name stating what chip and pin this is taken by */
snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
@@ -234,7 +243,7 @@ int pinmux_request_gpio(unsigned gpio)
if (!function)
return -EINVAL;
- ret = pin_request(pctldev, pin, function, true, range);
+ ret = pin_request(pctldev, pin, function, range);
if (ret < 0)
kfree(function);
@@ -245,6 +254,10 @@ EXPORT_SYMBOL_GPL(pinmux_request_gpio);
/**
* pinmux_free_gpio() - free a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_free() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed out.
*/
void pinmux_free_gpio(unsigned gpio)
{
@@ -252,53 +265,108 @@ void pinmux_free_gpio(unsigned gpio)
struct pinctrl_gpio_range *range;
int ret;
int pin;
+ const char *func;
ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
if (ret)
return;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
- pin_free(pctldev, pin, true);
+ func = pin_free(pctldev, pin, range);
+ kfree(func);
}
EXPORT_SYMBOL_GPL(pinmux_free_gpio);
+static int pinmux_gpio_direction(unsigned gpio, bool input)
+{
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range *range;
+ const struct pinmux_ops *ops;
+ int ret;
+ int pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return ret;
+
+ ops = pctldev->desc->pmxops;
+
+ /* Convert to the pin controllers number space */
+ pin = gpio - range->base + range->pin_base;
+
+ if (ops->gpio_set_direction)
+ ret = ops->gpio_set_direction(pctldev, range, pin, input);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * pinmux_gpio_direction_input() - request a GPIO pin to go into input mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_input() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_input(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, true);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_input);
+
+/**
+ * pinmux_gpio_direction_output() - request a GPIO pin to go into output mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_output() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_output(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, false);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_output);
+
/**
* pinmux_register_mappings() - register a set of pinmux mappings
- * @maps: the pinmux mappings table to register
+ * @maps: the pinmux mappings table to register, this should be marked with
+ * __initdata so it can be discarded after boot, this function will
+ * perform a shallow copy for the mapping entries.
* @num_maps: the number of maps in the mapping table
*
* Only call this once during initialization of your machine, the function is
* tagged as __init and won't be callable after init has completed. The map
* passed into this function will be owned by the pinmux core and cannot be
- * free:d.
+ * freed.
*/
int __init pinmux_register_mappings(struct pinmux_map const *maps,
unsigned num_maps)
{
+ void *tmp_maps;
int i;
- if (pinmux_maps != NULL) {
- pr_err("pinmux mappings already registered, you can only "
- "register one set of maps\n");
- return -EINVAL;
- }
-
pr_debug("add %d pinmux maps\n", num_maps);
+
+ /* First sanity check the new mapping */
for (i = 0; i < num_maps; i++) {
- /* Sanity check the mapping */
if (!maps[i].name) {
pr_err("failed to register map %d: "
"no map name given\n", i);
return -EINVAL;
}
+
if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
pr_err("failed to register map %s (%d): "
"no pin control device given\n",
maps[i].name, i);
return -EINVAL;
}
+
if (!maps[i].function) {
pr_err("failed to register map %s (%d): "
"no function ID given\n", maps[i].name, i);
@@ -315,9 +383,30 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
maps[i].function);
}
- pinmux_maps = maps;
- pinmux_maps_num = num_maps;
+ /*
+ * Make a copy of the map array - string pointers will end up in the
+ * kernel const section anyway so these do not need to be deep copied.
+ */
+ if (!pinmux_maps_num) {
+ /* On first call, just copy them */
+ tmp_maps = kmemdup(maps,
+ sizeof(struct pinmux_map) * num_maps,
+ GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ } else {
+ /* Subsequent calls, reallocate array to new size */
+ size_t oldsize = sizeof(struct pinmux_map) * pinmux_maps_num;
+ size_t newsize = sizeof(struct pinmux_map) * num_maps;
+
+ tmp_maps = krealloc(pinmux_maps, oldsize + newsize, GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ memcpy((tmp_maps + oldsize), maps, newsize);
+ }
+ pinmux_maps = tmp_maps;
+ pinmux_maps_num += num_maps;
return 0;
}
@@ -345,14 +434,14 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
if (ret)
return ret;
- dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
+ dev_dbg(pctldev->dev, "requesting the %u pins from group %u\n",
num_pins, group_selector);
/* Try to allocate all pins in this group, one by one */
for (i = 0; i < num_pins; i++) {
- ret = pin_request(pctldev, pins[i], func, false, NULL);
+ ret = pin_request(pctldev, pins[i], func, NULL);
if (ret) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not get pin %d for function %s "
"on device %s - conflicting mux mappings?\n",
pins[i], func ? : "(undefined)",
@@ -360,7 +449,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
/* On error release all taken pins */
i--; /* this pin just failed */
for (; i >= 0; i--)
- pin_free(pctldev, pins[i], false);
+ pin_free(pctldev, pins[i], NULL);
return -ENODEV;
}
}
@@ -384,44 +473,13 @@ static void release_pins(struct pinctrl_dev *pctldev,
ret = pctlops->get_group_pins(pctldev, group_selector,
&pins, &num_pins);
if (ret) {
- dev_err(&pctldev->dev, "could not get pins to release for "
+ dev_err(pctldev->dev, "could not get pins to release for "
"group selector %d\n",
group_selector);
return;
}
for (i = 0; i < num_pins; i++)
- pin_free(pctldev, pins[i], false);
-}
-
-/**
- * pinmux_get_group_selector() - returns the group selector for a group
- * @pctldev: the pin controller handling the group
- * @pin_group: the pin group to look up
- */
-static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
- const char *pin_group)
-{
- const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- unsigned group_selector = 0;
-
- while (pctlops->list_groups(pctldev, group_selector) >= 0) {
- const char *gname = pctlops->get_group_name(pctldev,
- group_selector);
- if (!strcmp(gname, pin_group)) {
- dev_dbg(&pctldev->dev,
- "found group selector %u for %s\n",
- group_selector,
- pin_group);
- return group_selector;
- }
-
- group_selector++;
- }
-
- dev_err(&pctldev->dev, "does not have pin group %s\n",
- pin_group);
-
- return -EINVAL;
+ pin_free(pctldev, pins[i], NULL);
}
/**
@@ -465,9 +523,9 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
if (num_groups < 1)
return -EINVAL;
- ret = pinmux_get_group_selector(pctldev, groups[0]);
+ ret = pinctrl_get_group_selector(pctldev, groups[0]);
if (ret < 0) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"function %s wants group %s but the pin "
"controller does not seem to have that group\n",
pmxops->get_function_name(pctldev, func_selector),
@@ -476,7 +534,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
}
if (num_groups > 1)
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"function %s support more than one group, "
"default-selecting first group %s (%d)\n",
pmxops->get_function_name(pctldev, func_selector),
@@ -486,13 +544,13 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
}
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"check if we have pin group %s on controller %s\n",
pin_group, pinctrl_dev_get_name(pctldev));
- ret = pinmux_get_group_selector(pctldev, pin_group);
+ ret = pinctrl_get_group_selector(pctldev, pin_group);
if (ret < 0) {
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"%s does not support pin group %s with function %s\n",
pinctrl_dev_get_name(pctldev),
pin_group,
@@ -569,7 +627,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->pctldev && pmx->pctldev != pctldev) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"different pin control devices given for device %s, "
"function %s\n",
devname,
@@ -592,7 +650,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->func_selector != UINT_MAX &&
pmx->func_selector != func_selector) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"dual function defines in the map for device %s\n",
devname);
return -EINVAL;
@@ -698,7 +756,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
}
pr_debug("in map, found pctldev %s to handle function %s",
- dev_name(&pctldev->dev), map->function);
+ dev_name(pctldev->dev), map->function);
/*
@@ -874,7 +932,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
* without any problems, so then we can hog pinmuxes for
* all devices that just want a static pin mux at this point.
*/
- dev_err(&pctldev->dev, "map %s wants to hog a non-system "
+ dev_err(pctldev->dev, "map %s wants to hog a non-system "
"pinmux, this is not going to work\n", map->name);
return -EINVAL;
}
@@ -886,7 +944,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
pmx = pinmux_get(NULL, map->name);
if (IS_ERR(pmx)) {
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not get the %s pinmux mapping for hogging\n",
map->name);
return PTR_ERR(pmx);
@@ -896,7 +954,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
if (ret) {
pinmux_put(pmx);
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not enable the %s pinmux mapping for hogging\n",
map->name);
return ret;
@@ -905,7 +963,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
hog->map = map;
hog->pmx = pmx;
- dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
+ dev_info(pctldev->dev, "hogged map %s, function %s\n", map->name,
map->function);
mutex_lock(&pctldev->pinmux_hogs_lock);
list_add(&hog->node, &pctldev->pinmux_hogs);
@@ -924,7 +982,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
*/
int pinmux_hog_maps(struct pinctrl_dev *pctldev)
{
- struct device *dev = &pctldev->dev;
+ struct device *dev = pctldev->dev;
const char *devname = dev_name(dev);
int ret;
int i;
@@ -948,7 +1006,7 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
}
/**
- * pinmux_hog_maps() - unhog specific map entries on controller device
+ * pinmux_unhog_maps() - unhog specific map entries on controller device
* @pctldev: the pin control device to unhog entries on
*/
void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
@@ -1005,18 +1063,19 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
static int pinmux_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
- unsigned pin;
+ unsigned i, pin;
seq_puts(s, "Pinmux settings per pin\n");
seq_puts(s, "Format: pin (name): pinmuxfunction\n");
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
- /* Pin space may be sparse */
+ /* Skip if we cannot search the pin */
if (desc == NULL)
continue;
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index edaccad9b5b..b7944f90388 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1477,7 +1477,7 @@ static struct attribute *asus_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index d1049ee3c9e..72d731c21d4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -992,7 +992,7 @@ static struct attribute *hwmon_attributes[] = {
NULL
};
-static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1357,7 +1357,7 @@ static struct attribute *platform_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index d9312b3073e..6f966d6c062 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1053,7 +1053,7 @@ static const struct file_operations disp_proc_fops = {
};
static int
-asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
+asus_proc_add(char *name, const struct file_operations *proc_fops, umode_t mode,
struct acpi_device *device)
{
struct proc_dir_entry *proc;
@@ -1072,7 +1072,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
- mode_t mode;
+ umode_t mode;
if ((asus_uid == 0) && (asus_gid == 0)) {
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 811d436cd67..42a7d603c87 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -28,7 +28,6 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/mutex.h>
@@ -165,22 +164,22 @@ static int ibm_rtl_write(u8 value)
return ret;
}
-static ssize_t rtl_show_version(struct sysdev_class * dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_version(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version));
}
-static ssize_t rtl_show_state(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_state(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status));
}
-static ssize_t rtl_set_state(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_set_state(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -205,27 +204,28 @@ static ssize_t rtl_set_state(struct sysdev_class *dev,
return ret;
}
-static struct sysdev_class class_rtl = {
+static struct bus_type rtl_subsys = {
.name = "ibm_rtl",
+ .dev_name = "ibm_rtl",
};
-static SYSDEV_CLASS_ATTR(version, S_IRUGO, rtl_show_version, NULL);
-static SYSDEV_CLASS_ATTR(state, 0600, rtl_show_state, rtl_set_state);
+static DEVICE_ATTR(version, S_IRUGO, rtl_show_version, NULL);
+static DEVICE_ATTR(state, 0600, rtl_show_state, rtl_set_state);
-static struct sysdev_class_attribute *rtl_attributes[] = {
- &attr_version,
- &attr_state,
+static struct device_attribute *rtl_attributes[] = {
+ &dev_attr_version,
+ &dev_attr_state,
NULL
};
static int rtl_setup_sysfs(void) {
int ret, i;
- ret = sysdev_class_register(&class_rtl);
+ ret = subsys_system_register(&rtl_subsys, NULL);
if (!ret) {
for (i = 0; rtl_attributes[i]; i ++)
- sysdev_class_create_file(&class_rtl, rtl_attributes[i]);
+ device_create_file(rtl_subsys.dev_root, rtl_attributes[i]);
}
return ret;
}
@@ -233,8 +233,8 @@ static int rtl_setup_sysfs(void) {
static void rtl_teardown_sysfs(void) {
int i;
for (i = 0; rtl_attributes[i]; i ++)
- sysdev_class_remove_file(&class_rtl, rtl_attributes[i]);
- sysdev_class_unregister(&class_rtl);
+ device_remove_file(rtl_subsys.dev_root, rtl_attributes[i]);
+ bus_unregister(&rtl_subsys);
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a36addf106a..ac902f7a9ba 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -368,7 +368,7 @@ static struct attribute *ideapad_attributes[] = {
NULL
};
-static mode_t ideapad_is_visible(struct kobject *kobj,
+static umode_t ideapad_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index abddc83e9fd..3271ac85115 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -389,7 +389,7 @@ static ssize_t bios_enabled_show(struct device *dev,
return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
}
-static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+static int intel_menlow_add_one_attribute(char *name, umode_t mode, void *show,
void *store, struct device *dev,
acpi_handle handle)
{
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 48870e50423..f00d0d1e065 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b828680b21..62533c105da 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -297,7 +297,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
- mode_t base_procfs_mode;
+ umode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
- unsigned int change_detector, must_reset;
+ unsigned int change_detector;
unsigned int poll_freq;
+ bool was_frozen;
mutex_lock(&hotkey_thread_mutex);
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
t = 100; /* should never happen... */
}
t = msleep_interruptible(t);
- if (unlikely(kthread_should_stop()))
+ if (unlikely(kthread_freezable_should_stop(&was_frozen)))
break;
- must_reset = try_to_freeze();
- if (t > 0 && !must_reset)
+
+ if (t > 0 && !was_frozen)
continue;
mutex_lock(&hotkey_thread_data_mutex);
- if (must_reset || hotkey_config_change != change_detector) {
+ if (was_frozen || hotkey_config_change != change_detector) {
/* forget old state on thaw or config change */
si = so;
t = 0;
@@ -2528,10 +2529,6 @@ exit:
static void hotkey_poll_stop_sync(void)
{
if (tpacpi_hotkey_task) {
- if (frozen(tpacpi_hotkey_task) ||
- freezing(tpacpi_hotkey_task))
- thaw_process(tpacpi_hotkey_task);
-
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
mutex_lock(&hotkey_thread_mutex);
@@ -8542,7 +8539,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode = iibm->base_procfs_mode;
+ umode_t mode = iibm->base_procfs_mode;
if (!mode)
mode = S_IRUGO;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471..dcdc1f4a462 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b0..01fa671ec97 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index e15d4c9d398..e95cd657dac 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -176,13 +176,13 @@ static struct device_attribute power_supply_attrs[] = {
static struct attribute *
__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
-static mode_t power_supply_attr_is_visible(struct kobject *kobj,
+static umode_t power_supply_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int attrno)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct power_supply *psy = dev_get_drvdata(dev);
- mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
int i;
if (attrno == POWER_SUPPLY_PROP_TYPE)
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546..10451a15e82 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10c..691b1ab1a3d 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb140..822e54c394d 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index ca0d608f824..df33530cec4 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -427,7 +427,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
/* replace driver_data with info */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata, info);
+ pdata, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9713b1b860c..7a61b17ddd0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -93,6 +93,7 @@ config REGULATOR_MAX1586
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
+ select REGMAP_I2C
help
This driver controls a Maxim 8649 voltage output regulator via
I2C bus.
@@ -177,6 +178,13 @@ config REGULATOR_DA903X
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
+config REGULATOR_DA9052
+ tristate "Dialog DA9052/DA9053 regulators"
+ depends on PMIC_DA9052
+ help
+ This driver supports the voltage regulators of DA9052-BC and
+ DA9053-AA/Bx PMIC.
+
config REGULATOR_PCF50633
tristate "PCF50633 regulator driver"
depends on MFD_PCF50633
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 93a6318f532..503bac87715 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_REGULATOR) += core.o dummy.o
+obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8..685ad43b074 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -63,7 +63,7 @@ static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
- (selector << ri->voltage_shift) & ri->voltage_mask);
+ selector << ri->voltage_shift);
}
static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
@@ -188,7 +188,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
ri->pdev = pdev;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 585e4946fe0..042271aace6 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,7 +634,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
rdev = regulator_register(&ab3100_regulator_desc[i],
&pdev->dev,
&plfdata->reg_constraints[i],
- reg);
+ reg, NULL);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 6e1ae69646b..e91b8ddc279 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -822,7 +822,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* register regulator with framework */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- &pdata->regulator[i], info);
+ &pdata->regulator[i], info, NULL);
if (IS_ERR(info->regulator)) {
err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index a4be41614ee..483c8093085 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -233,7 +233,7 @@ static int __devinit ad5398_probe(struct i2c_client *client,
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
chip->rdev = regulator_register(&ad5398_reg, &client->dev,
- init_data, chip);
+ init_data, chip, NULL);
if (IS_ERR(chip->rdev)) {
ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index e24d1b7d97a..9fab6d1bbe8 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -107,7 +107,7 @@ static int __init bq24022_probe(struct platform_device *pdev)
ret = gpio_direction_output(pdata->gpio_nce, 1);
bq24022 = regulator_register(&bq24022_desc, &pdev->dev,
- pdata->init_data, pdata);
+ pdata->init_data, pdata, NULL);
if (IS_ERR(bq24022)) {
dev_dbg(&pdev->dev, "couldn't register regulator\n");
ret = PTR_ERR(bq24022);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d0216022..ca86f39a0fd 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -25,6 +25,8 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -132,6 +134,33 @@ static struct regulator *get_device_regulator(struct device *dev)
return NULL;
}
+/**
+ * of_get_regulator - get a regulator device node based on supply name
+ * @dev: Device pointer for the consumer (of regulator) device
+ * @supply: regulator supply name
+ *
+ * Extract the regulator device node corresponding to the supply name.
+ * retruns the device node corresponding to the regulator if found, else
+ * returns NULL.
+ */
+static struct device_node *of_get_regulator(struct device *dev, const char *supply)
+{
+ struct device_node *regnode = NULL;
+ char prop_name[32]; /* 32 is max size of property name */
+
+ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
+
+ snprintf(prop_name, 32, "%s-supply", supply);
+ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+ if (!regnode) {
+ dev_warn(dev, "%s property in node %s references invalid phandle",
+ prop_name, dev->of_node->full_name);
+ return NULL;
+ }
+ return regnode;
+}
+
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
@@ -883,8 +912,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
int ret = 0;
struct regulator_ops *ops = rdev->desc->ops;
- rdev->constraints = kmemdup(constraints, sizeof(*constraints),
- GFP_KERNEL);
+ if (constraints)
+ rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+ GFP_KERNEL);
+ else
+ rdev->constraints = kzalloc(sizeof(*constraints),
+ GFP_KERNEL);
if (!rdev->constraints)
return -ENOMEM;
@@ -893,7 +926,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
goto out;
/* do we need to setup our suspend state */
- if (constraints->initial_state) {
+ if (rdev->constraints->initial_state) {
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
@@ -901,7 +934,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (constraints->initial_mode) {
+ if (rdev->constraints->initial_mode) {
if (!ops->set_mode) {
rdev_err(rdev, "no set_mode operation\n");
ret = -EINVAL;
@@ -952,9 +985,8 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
- if (IS_ERR(rdev->supply)) {
- err = PTR_ERR(rdev->supply);
- rdev->supply = NULL;
+ if (rdev->supply == NULL) {
+ err = -ENOMEM;
return err;
}
@@ -1148,6 +1180,30 @@ static int _regulator_get_enable_time(struct regulator_dev *rdev)
return rdev->desc->ops->enable_time(rdev);
}
+static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r;
+ struct device_node *node;
+
+ /* first do a dt based lookup */
+ if (dev && dev->of_node) {
+ node = of_get_regulator(dev, supply);
+ if (node)
+ list_for_each_entry(r, &regulator_list, list)
+ if (r->dev.parent &&
+ node == r->dev.of_node)
+ return r;
+ }
+
+ /* if not found, try doing it non-dt way */
+ list_for_each_entry(r, &regulator_list, list)
+ if (strcmp(rdev_get_name(r), supply) == 0)
+ return r;
+
+ return NULL;
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1168,6 +1224,10 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
mutex_lock(&regulator_list_mutex);
+ rdev = regulator_dev_lookup(dev, id);
+ if (rdev)
+ goto found;
+
list_for_each_entry(map, &regulator_map_list, list) {
/* If the mapping has a device set up it must match */
if (map->dev_name &&
@@ -1221,6 +1281,7 @@ found:
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
+ goto out;
}
rdev->open_count++;
@@ -1726,6 +1787,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
@@ -2429,6 +2491,43 @@ err:
EXPORT_SYMBOL_GPL(regulator_bulk_disable);
/**
+ * regulator_bulk_force_disable - force disable multiple regulator consumers
+ *
+ * @num_consumers: Number of consumers
+ * @consumers: Consumer data; clients are stored here.
+ * @return 0 on success, an errno on failure
+ *
+ * This convenience API allows consumers to forcibly disable multiple regulator
+ * clients in a single API call.
+ * NOTE: This should be used for situations when device damage will
+ * likely occur if the regulators are not disabled (e.g. over temp).
+ * Although regulator_force_disable function call for some consumers can
+ * return error numbers, the function is called for all consumers.
+ */
+int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].ret =
+ regulator_force_disable(consumers[i].consumer);
+
+ for (i = 0; i < num_consumers; i++) {
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
+
+/**
* regulator_bulk_free - free multiple regulator consumers
*
* @num_consumers: Number of consumers
@@ -2503,7 +2602,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
int status = 0;
/* some attributes need specific methods to be displayed */
- if (ops->get_voltage || ops->get_voltage_sel) {
+ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+ (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -2637,11 +2737,13 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
*/
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
struct device *dev, const struct regulator_init_data *init_data,
- void *driver_data)
+ void *driver_data, struct device_node *of_node)
{
+ const struct regulation_constraints *constraints = NULL;
static atomic_t regulator_no = ATOMIC_INIT(0);
struct regulator_dev *rdev;
int ret, i;
+ const char *supply = NULL;
if (regulator_desc == NULL)
return ERR_PTR(-EINVAL);
@@ -2653,9 +2755,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
regulator_desc->type != REGULATOR_CURRENT)
return ERR_PTR(-EINVAL);
- if (!init_data)
- return ERR_PTR(-EINVAL);
-
/* Only one of each should be implemented */
WARN_ON(regulator_desc->ops->get_voltage &&
regulator_desc->ops->get_voltage_sel);
@@ -2688,7 +2787,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
/* preform any regulator specific init */
- if (init_data->regulator_init) {
+ if (init_data && init_data->regulator_init) {
ret = init_data->regulator_init(rdev->reg_data);
if (ret < 0)
goto clean;
@@ -2696,6 +2795,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
/* register with sysfs */
rdev->dev.class = &regulator_class;
+ rdev->dev.of_node = of_node;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(&regulator_no) - 1);
@@ -2708,7 +2808,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
- ret = set_machine_constraints(rdev, &init_data->constraints);
+ if (init_data)
+ constraints = &init_data->constraints;
+
+ ret = set_machine_constraints(rdev, constraints);
if (ret < 0)
goto scrub;
@@ -2717,21 +2820,18 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- if (init_data->supply_regulator) {
+ if (init_data && init_data->supply_regulator)
+ supply = init_data->supply_regulator;
+ else if (regulator_desc->supply_name)
+ supply = regulator_desc->supply_name;
+
+ if (supply) {
struct regulator_dev *r;
- int found = 0;
- list_for_each_entry(r, &regulator_list, list) {
- if (strcmp(rdev_get_name(r),
- init_data->supply_regulator) == 0) {
- found = 1;
- break;
- }
- }
+ r = regulator_dev_lookup(dev, supply);
- if (!found) {
- dev_err(dev, "Failed to find supply %s\n",
- init_data->supply_regulator);
+ if (!r) {
+ dev_err(dev, "Failed to find supply %s\n", supply);
ret = -ENODEV;
goto scrub;
}
@@ -2739,18 +2839,28 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
ret = set_supply(rdev, r);
if (ret < 0)
goto scrub;
+
+ /* Enable supply if rail is enabled */
+ if (rdev->desc->ops->is_enabled &&
+ rdev->desc->ops->is_enabled(rdev)) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0)
+ goto scrub;
+ }
}
/* add consumers devices */
- for (i = 0; i < init_data->num_consumer_supplies; i++) {
- ret = set_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev,
- init_data->consumer_supplies[i].dev_name,
- init_data->consumer_supplies[i].supply);
- if (ret < 0) {
- dev_err(dev, "Failed to set supply %s\n",
+ if (init_data) {
+ for (i = 0; i < init_data->num_consumer_supplies; i++) {
+ ret = set_consumer_device_supply(rdev,
+ init_data->consumer_supplies[i].dev,
+ init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- goto unset_supplies;
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
+ goto unset_supplies;
+ }
}
}
@@ -2799,8 +2909,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index e23ddfa8b2c..8dbc54da7d7 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -537,7 +537,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
ri->desc.ops = &da9030_regulator_ldo1_15_ops;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
new file mode 100644
index 00000000000..3767364452f
--- /dev/null
+++ b/drivers/regulator/da9052-regulator.c
@@ -0,0 +1,606 @@
+/*
+* da9052-regulator.c: Regulator driver for DA9052
+*
+* Copyright(c) 2011 Dialog Semiconductor Ltd.
+*
+* Author: David Dajun Chen <dchen@diasemi.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+
+/* Buck step size */
+#define DA9052_BUCK_PERI_3uV_STEP 100000
+#define DA9052_BUCK_PERI_REG_MAP_UPTO_3uV 24
+#define DA9052_CONST_3uV 3000000
+
+#define DA9052_MIN_UA 0
+#define DA9052_MAX_UA 3
+#define DA9052_CURRENT_RANGE 4
+
+/* Bit masks */
+#define DA9052_BUCK_ILIM_MASK_EVEN 0x0c
+#define DA9052_BUCK_ILIM_MASK_ODD 0xc0
+
+static const u32 da9052_current_limits[3][4] = {
+ {700000, 800000, 1000000, 1200000}, /* DA9052-BC BUCKs */
+ {1600000, 2000000, 2400000, 3000000}, /* DA9053-AA/Bx BUCK-CORE */
+ {800000, 1000000, 1200000, 1500000}, /* DA9053-AA/Bx BUCK-PRO,
+ * BUCK-MEM and BUCK-PERI
+ */
+};
+
+struct da9052_regulator_info {
+ struct regulator_desc reg_desc;
+ int step_uV;
+ int min_uV;
+ int max_uV;
+ unsigned char volt_shift;
+ unsigned char en_bit;
+ unsigned char activate_bit;
+};
+
+struct da9052_regulator {
+ struct da9052 *da9052;
+ struct da9052_regulator_info *info;
+ struct regulator_dev *rdev;
+};
+
+static int verify_range(struct da9052_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (min_uV > info->max_uV || max_uV < info->min_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int da9052_regulator_enable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 1 << info->en_bit);
+}
+
+static int da9052_regulator_disable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 0);
+}
+
+static int da9052_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ return ret & (1 << info->en_bit);
+}
+
+static int da9052_dcdc_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int ret, row = 2;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKA_REG + offset/2);
+ if (ret < 0)
+ return ret;
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ ret = (ret & DA9052_BUCK_ILIM_MASK_EVEN) >> 2;
+ else
+ ret = (ret & DA9052_BUCK_ILIM_MASK_ODD) >> 6;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ return da9052_current_limits[row][ret];
+}
+
+static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int reg_val = 0;
+ int i, row = 2;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ if (min_uA > da9052_current_limits[row][DA9052_MAX_UA] ||
+ max_uA < da9052_current_limits[row][DA9052_MIN_UA])
+ return -EINVAL;
+
+ for (i = 0; i < DA9052_CURRENT_RANGE; i++) {
+ if (min_uA <= da9052_current_limits[row][i]) {
+ reg_val = i;
+ break;
+ }
+ }
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_EVEN,
+ reg_val << 2);
+ else
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_ODD,
+ reg_val << 6);
+}
+
+static int da9052_list_buckperi_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
+ volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
+ + info->min_uV);
+ volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
+ * (DA9052_BUCK_PERI_3uV_STEP);
+ } else
+ volt_uV = (selector * info->step_uV) + info->min_uV;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ volt_uV = info->min_uV + info->step_uV * selector;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ *selector = (min_uV - info->min_uV) / info->step_uV;
+
+ ret = da9052_list_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_set_ldo_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ return da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+}
+
+static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some LDOs are DVC controlled which requires enabling of
+ * the LDO activate bit to implment the changes on the
+ * LDO output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+ info->activate_bit);
+}
+
+static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some DCDCs are DVC controlled which requires enabling of
+ * the DCDC activate bit to implment the changes on the
+ * DCDC output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
+ info->activate_bit);
+}
+
+static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (min_uV >= DA9052_CONST_3uV))
+ *selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
+ ((min_uV - DA9052_CONST_3uV) /
+ (DA9052_BUCK_PERI_3uV_STEP));
+ else
+ *selector = (min_uV - info->min_uV) / info->step_uV;
+
+ ret = da9052_list_buckperi_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_get_buckperi_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static struct regulator_ops da9052_buckperi_ops = {
+ .list_voltage = da9052_list_buckperi_voltage,
+ .get_voltage_sel = da9052_get_buckperi_voltage_sel,
+ .set_voltage = da9052_set_buckperi_voltage,
+
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_dcdc_ops = {
+ .set_voltage = da9052_set_dcdc_voltage,
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo5_6_ops = {
+ .set_voltage = da9052_set_ldo5_6_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo_ops = {
+ .set_voltage = da9052_set_ldo_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+#define DA9052_LDO5_6(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo5_6_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_dcdc_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_BUCKPERI(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_buckperi_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+static struct da9052_regulator_info da9052_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 50, 1800, 3600, 5, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static struct da9052_regulator_info da9053_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 25, 925, 2500, 6, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+ int id)
+{
+ struct da9052_regulator_info *info;
+ int i;
+
+ switch (chip_id) {
+ case DA9052:
+ for (i = 0; i < ARRAY_SIZE(da9052_regulator_info); i++) {
+ info = &da9052_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ for (i = 0; i < ARRAY_SIZE(da9053_regulator_info); i++) {
+ info = &da9053_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+static int __devinit da9052_regulator_probe(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator;
+ struct da9052 *da9052;
+ struct da9052_pdata *pdata;
+ int ret;
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9052_regulator),
+ GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ regulator->da9052 = da9052;
+
+ regulator->info = find_regulator_info(regulator->da9052->chip_id,
+ pdev->id);
+ if (regulator->info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ regulator->rdev = regulator_register(&regulator->info->reg_desc,
+ &pdev->dev,
+ pdata->regulators[pdev->id],
+ regulator, NULL);
+ if (IS_ERR(regulator->rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->info->reg_desc.name);
+ ret = PTR_ERR(regulator->rdev);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, regulator);
+
+ return 0;
+err:
+ devm_kfree(&pdev->dev, regulator);
+ return ret;
+}
+
+static int __devexit da9052_regulator_remove(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator = platform_get_drvdata(pdev);
+
+ regulator_unregister(regulator->rdev);
+ devm_kfree(&pdev->dev, regulator);
+
+ return 0;
+}
+
+static struct platform_driver da9052_regulator_driver = {
+ .probe = da9052_regulator_probe,
+ .remove = __devexit_p(da9052_regulator_remove),
+ .driver = {
+ .name = "da9052-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_regulator_init(void)
+{
+ return platform_driver_register(&da9052_regulator_driver);
+}
+subsys_initcall(da9052_regulator_init);
+
+static void __exit da9052_regulator_exit(void)
+{
+ platform_driver_unregister(&da9052_regulator_driver);
+}
+module_exit(da9052_regulator_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-regulator");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 78329751af5..515443fcd26 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -486,7 +486,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
/* register with the regulator framework */
info->rdev = regulator_register(&info->desc, &pdev->dev,
- init_data, info);
+ init_data, info, NULL);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index b8f520513ce..0ee00de4be7 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -42,7 +42,7 @@ static int __devinit dummy_regulator_probe(struct platform_device *pdev)
int ret;
dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
+ &dummy_initdata, NULL, NULL);
if (IS_ERR(dummy_regulator_rdev)) {
ret = PTR_ERR(dummy_regulator_rdev);
pr_err("Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 21ecf212a52..e24e3a174c4 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -27,6 +27,10 @@
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -38,6 +42,58 @@ struct fixed_voltage_data {
bool is_enabled;
};
+
+/**
+ * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
+ * @dev: device requesting for fixed_voltage_config
+ *
+ * Populates fixed_voltage_config structure by extracting data from device
+ * tree node, returns a pointer to the populated structure of NULL if memory
+ * alloc fails.
+ */
+static struct fixed_voltage_config *
+of_get_fixed_voltage_config(struct device *dev)
+{
+ struct fixed_voltage_config *config;
+ struct device_node *np = dev->of_node;
+ const __be32 *delay;
+ struct regulator_init_data *init_data;
+
+ config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
+ GFP_KERNEL);
+ if (!config)
+ return NULL;
+
+ config->init_data = of_get_regulator_init_data(dev, dev->of_node);
+ if (!config->init_data)
+ return NULL;
+
+ init_data = config->init_data;
+ init_data->constraints.apply_uV = 0;
+
+ config->supply_name = init_data->constraints.name;
+ if (init_data->constraints.min_uV == init_data->constraints.max_uV) {
+ config->microvolts = init_data->constraints.min_uV;
+ } else {
+ dev_err(dev,
+ "Fixed regulator specified with variable voltages\n");
+ return NULL;
+ }
+
+ if (init_data->constraints.boot_on)
+ config->enabled_at_boot = true;
+
+ config->gpio = of_get_named_gpio(np, "gpio", 0);
+ delay = of_get_property(np, "startup-delay-us", NULL);
+ if (delay)
+ config->startup_delay = be32_to_cpu(*delay);
+
+ if (of_find_property(np, "enable-active-high", NULL))
+ config->enable_high = true;
+
+ return config;
+}
+
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -80,7 +136,10 @@ static int fixed_voltage_get_voltage(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
- return data->microvolts;
+ if (data->microvolts)
+ return data->microvolts;
+ else
+ return -EINVAL;
}
static int fixed_voltage_list_voltage(struct regulator_dev *dev,
@@ -105,10 +164,18 @@ static struct regulator_ops fixed_voltage_ops = {
static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
{
- struct fixed_voltage_config *config = pdev->dev.platform_data;
+ struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
int ret;
+ if (pdev->dev.of_node)
+ config = of_get_fixed_voltage_config(&pdev->dev);
+ else
+ config = pdev->dev.platform_data;
+
+ if (!config)
+ return -ENOMEM;
+
drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
if (drvdata == NULL) {
dev_err(&pdev->dev, "Failed to allocate device data\n");
@@ -180,7 +247,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata,
+ pdev->dev.of_node);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -217,12 +285,23 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id fixed_of_match[] __devinitconst = {
+ { .compatible = "regulator-fixed", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fixed_of_match);
+#else
+#define fixed_of_match NULL
+#endif
+
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
.remove = __devexit_p(reg_fixed_voltage_remove),
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
+ .of_match_table = fixed_of_match,
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index f0acf52498b..42e1cb1835e 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -284,7 +284,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
drvdata->state = state;
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata, NULL);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e4b3592e817..c1a456c4257 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -170,7 +170,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
for (i = 0; i < 3; i++) {
pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
- init_data, pmic);
+ init_data, pmic, NULL);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
err = PTR_ERR(pmic->rdev[i]);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 72b16b5f3db..0cfabd318a5 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -451,7 +451,7 @@ static int __devinit setup_regulators(struct lp3971 *lp3971,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
lp3971->rdev[i] = regulator_register(&regulators[reg->id],
- lp3971->dev, reg->initdata, lp3971);
+ lp3971->dev, reg->initdata, lp3971, NULL);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index fbc5e3741be..49a15eefe5f 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -555,7 +555,7 @@ static int __devinit setup_regulators(struct lp3972 *lp3972,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
lp3972->rdev[i] = regulator_register(&regulators[reg->id],
- lp3972->dev, reg->initdata, lp3972);
+ lp3972->dev, reg->initdata, lp3972, NULL);
if (IS_ERR(lp3972->rdev[i])) {
err = PTR_ERR(lp3972->rdev[i]);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 3f49512c513..40e7a4db285 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -214,7 +214,7 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
}
rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max1586);
+ max1586, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1062cf9f02d..b06a2399587 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -16,6 +16,7 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max8649.h>
+#include <linux/regmap.h>
#define MAX8649_DCDC_VMIN 750000 /* uV */
#define MAX8649_DCDC_VMAX 1380000 /* uV */
@@ -49,9 +50,8 @@
struct max8649_regulator_info {
struct regulator_dev *regulator;
- struct i2c_client *i2c;
struct device *dev;
- struct mutex io_lock;
+ struct regmap *regmap;
int vol_reg;
unsigned mode:2; /* bit[1:0] = VID1, VID0 */
@@ -63,71 +63,6 @@ struct max8649_regulator_info {
/* I2C operations */
-static inline int max8649_read_device(struct i2c_client *i2c,
- int reg, int bytes, void *dest)
-{
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int max8649_write_device(struct i2c_client *i2c,
- int reg, int bytes, void *src)
-{
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int max8649_reg_read(struct i2c_client *i2c, int reg)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char data;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &data);
- mutex_unlock(&info->io_lock);
-
- if (ret < 0)
- return ret;
- return (int)data;
-}
-
-static int max8649_set_bits(struct i2c_client *i2c, int reg,
- unsigned char mask, unsigned char data)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char value;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = max8649_write_device(i2c, reg, 1, &value);
-out:
- mutex_unlock(&info->io_lock);
- return ret;
-}
-
static inline int check_range(int min_uV, int max_uV)
{
if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
@@ -144,13 +79,14 @@ static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
static int max8649_get_voltage(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
unsigned char data;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- data = (unsigned char)ret & MAX8649_VOL_MASK;
+ data = (unsigned char)val & MAX8649_VOL_MASK;
return max8649_list_voltage(rdev, data);
}
@@ -170,14 +106,14 @@ static int max8649_set_voltage(struct regulator_dev *rdev,
mask = MAX8649_VOL_MASK;
*selector = data & mask;
- return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+ return regmap_update_bits(info->regmap, info->vol_reg, mask, data);
}
/* EN_PD means pulldown on EN input */
static int max8649_enable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD, 0);
}
/*
@@ -187,38 +123,40 @@ static int max8649_enable(struct regulator_dev *rdev)
static int max8649_disable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD,
MAX8649_EN_PD);
}
static int max8649_is_enabled(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_CONTROL, &val);
+ if (ret != 0)
return ret;
- return !((unsigned char)ret & MAX8649_EN_PD);
+ return !((unsigned char)val & MAX8649_EN_PD);
}
static int max8649_enable_time(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
int voltage, rate, ret;
+ unsigned int val;
/* get voltage */
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- ret &= MAX8649_VOL_MASK;
+ val &= MAX8649_VOL_MASK;
voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
/* get rate */
- ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
+ if (ret != 0)
return ret;
- ret = (ret & MAX8649_RAMP_MASK) >> 5;
+ ret = (val & MAX8649_RAMP_MASK) >> 5;
rate = (32 * 1000) >> ret; /* uV/uS */
return DIV_ROUND_UP(voltage, rate);
@@ -230,12 +168,12 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
- MAX8649_FORCE_PWM);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_FORCE_PWM,
+ MAX8649_FORCE_PWM);
break;
case REGULATOR_MODE_NORMAL:
- max8649_set_bits(info->i2c, info->vol_reg,
- MAX8649_FORCE_PWM, 0);
+ regmap_update_bits(info->regmap, info->vol_reg,
+ MAX8649_FORCE_PWM, 0);
break;
default:
return -EINVAL;
@@ -246,10 +184,13 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned int max8649_get_mode(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret & MAX8649_FORCE_PWM)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
+ return ret;
+ if (val & MAX8649_FORCE_PWM)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
@@ -275,11 +216,17 @@ static struct regulator_desc dcdc_desc = {
.owner = THIS_MODULE,
};
+static struct regmap_config max8649_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit max8649_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8649_platform_data *pdata = client->dev.platform_data;
struct max8649_regulator_info *info = NULL;
+ unsigned int val;
unsigned char data;
int ret;
@@ -289,9 +236,14 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
return -ENOMEM;
}
- info->i2c = client;
+ info->regmap = regmap_init_i2c(client, &max8649_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
+ goto fail;
+ }
+
info->dev = &client->dev;
- mutex_init(&info->io_lock);
i2c_set_clientdata(client, info);
info->mode = pdata->mode;
@@ -312,8 +264,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
break;
}
- ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
- if (ret < 0) {
+ ret = regmap_read(info->regmap, MAX8649_CHIP_ID1, &val);
+ if (ret != 0) {
dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
ret);
goto out;
@@ -321,33 +273,33 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
/* enable VID0 & VID1 */
- max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+ regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
/* enable/disable external clock synchronization */
info->extclk = pdata->extclk;
data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
if (info->extclk) {
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
- max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
- info->extclk_freq << 6);
+ regmap_update_bits(info->regmap, MAX8649_SYNC, MAX8649_EXT_MASK,
+ info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
info->ramp_timing = pdata->ramp_timing;
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
- info->ramp_timing << 5);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_MASK,
+ info->ramp_timing << 5);
}
info->ramp_down = pdata->ramp_down;
if (info->ramp_down) {
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
- MAX8649_RAMP_DOWN);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+ MAX8649_RAMP_DOWN);
}
info->regulator = regulator_register(&dcdc_desc, &client->dev,
- pdata->regulator, info);
+ pdata->regulator, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
@@ -358,6 +310,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Max8649 regulator device is detected.\n");
return 0;
out:
+ regmap_exit(info->regmap);
+fail:
kfree(info);
return ret;
}
@@ -369,6 +323,7 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client)
if (info) {
if (info->regulator)
regulator_unregister(info->regulator);
+ regmap_exit(info->regmap);
kfree(info);
}
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 33f5d9a492e..a838e664569 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -449,7 +449,7 @@ static int __devinit max8660_probe(struct i2c_client *client,
rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max8660);
+ max8660, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index cc9ec0e0327..cc290d37c46 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -24,9 +24,13 @@
#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
#define SD1_DVM_EN 6 /* SDV1 bit 6 */
-/* bit definitions in SD & LDO control registers */
-#define OUT_ENABLE 0x1f /* Power U/D sequence as I2C */
-#define OUT_DISABLE 0x1e /* Power U/D sequence as I2C */
+/* bit definitions in LDO control registers */
+#define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */
+#define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */
+#define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */
+#define LDO_I2C_EN 0x1 /* Enable by i2c */
+#define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */
+#define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */
struct max8925_regulator_info {
struct regulator_desc desc;
@@ -40,7 +44,6 @@ struct max8925_regulator_info {
int vol_reg;
int vol_shift;
int vol_nbits;
- int enable_bit;
int enable_reg;
};
@@ -98,8 +101,10 @@ static int max8925_enable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_ENABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT |
+ LDO_I2C_EN << LDO_I2C_EN_SHIFT);
}
static int max8925_disable(struct regulator_dev *rdev)
@@ -107,20 +112,24 @@ static int max8925_disable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_DISABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT);
}
static int max8925_is_enabled(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
+ int ldo_seq, ret;
ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
-
- return ret & (1 << info->enable_bit);
+ ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
+ if (ldo_seq != LDO_SEQ_I2C)
+ return 1;
+ else
+ return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
}
static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
@@ -188,7 +197,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_SDCTL##_id, \
- .enable_bit = 0, \
}
#define MAX8925_LDO(_id, min, max, step) \
@@ -207,7 +215,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_LDOCTL##_id, \
- .enable_bit = 0, \
}
static struct max8925_regulator_info max8925_regulator_info[] = {
@@ -266,7 +273,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
ri->chip = chip;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdata->regulator[pdev->id], ri);
+ pdata->regulator[pdev->id], ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 3883d85c5b8..75d89400c12 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -208,7 +208,7 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
max8952->pdata = pdata;
max8952->rdev = regulator_register(&regulator, max8952->dev,
- &pdata->reg_data, max8952);
+ &pdata->reg_data, max8952, NULL);
if (IS_ERR(max8952->rdev)) {
ret = PTR_ERR(max8952->rdev);
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 6176129a27e..d26e8646277 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1146,7 +1146,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
regulators[id].n_voltages = 16;
rdev[i] = regulator_register(&regulators[id], max8997->dev,
- pdata->regulators[i].initdata, max8997);
+ pdata->regulators[i].initdata, max8997, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 41a1495eec2..2d38c2493a0 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -847,7 +847,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
regulators[index].n_voltages = count;
}
rdev[i] = regulator_register(&regulators[index], max8998->dev,
- pdata->regulators[i].initdata, max8998);
+ pdata->regulators[i].initdata, max8998, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8998->dev, "regulator init failed\n");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 8479082e1ae..8e9b90ad88a 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -344,7 +344,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
- priv = kzalloc(sizeof(*priv) +
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
pdata->num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
@@ -357,7 +357,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
init_data = &pdata->regulators[i];
priv->regulators[i] = regulator_register(
&mc13783_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ &pdev->dev, init_data->init_data, priv, NULL);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -374,8 +374,6 @@ err:
while (--i >= 0)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
-
return ret;
}
@@ -391,7 +389,6 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 023d17d022c..e8cfc99dd8f 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -527,18 +527,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
- struct mc13xxx_regulator_init_data *init_data;
+ struct mc13xxx_regulator_init_data *mc13xxx_data;
int i, ret;
+ int num_regulators = 0;
u32 val;
- priv = kzalloc(sizeof(*priv) +
- pdata->num_regulators * sizeof(priv->regulators[0]),
+ num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+ if (num_regulators <= 0 && pdata)
+ num_regulators = pdata->num_regulators;
+ if (num_regulators <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
+ num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13892_regulators;
priv->mc13xxx = mc13892;
+ platform_set_drvdata(pdev, priv);
mc13xxx_lock(mc13892);
ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
@@ -569,11 +578,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
= mc13892_vcam_set_mode;
mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
= mc13892_vcam_get_mode;
- for (i = 0; i < pdata->num_regulators; i++) {
- init_data = &pdata->regulators[i];
+
+ mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+ ARRAY_SIZE(mc13892_regulators));
+ for (i = 0; i < num_regulators; i++) {
+ struct regulator_init_data *init_data;
+ struct regulator_desc *desc;
+ struct device_node *node = NULL;
+ int id;
+
+ if (mc13xxx_data) {
+ id = mc13xxx_data[i].id;
+ init_data = mc13xxx_data[i].init_data;
+ node = mc13xxx_data[i].node;
+ } else {
+ id = pdata->regulators[i].id;
+ init_data = pdata->regulators[i].init_data;
+ }
+ desc = &mc13892_regulators[id].desc;
+
priv->regulators[i] = regulator_register(
- &mc13892_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ desc, &pdev->dev, init_data, priv, node);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -583,8 +608,6 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, priv);
-
return 0;
err:
while (--i >= 0)
@@ -592,7 +615,6 @@ err:
err_free:
mc13xxx_unlock(mc13892);
- kfree(priv);
return ret;
}
@@ -600,16 +622,13 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
int i;
platform_set_drvdata(pdev, NULL);
- for (i = 0; i < pdata->num_regulators; i++)
+ for (i = 0; i < priv->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 6532853a6ef..80ecafef1bc 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -18,12 +18,14 @@
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
@@ -236,6 +238,61 @@ int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+#ifdef CONFIG_OF
+int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ struct device_node *parent, *child;
+ int num = 0;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return -ENODEV;
+
+ for_each_child_of_node(parent, child)
+ num++;
+
+ return num;
+}
+
+struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13xxx_regulator_init_data *data, *p;
+ struct device_node *parent, *child;
+ int i;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return NULL;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+ p = data;
+
+ for_each_child_of_node(parent, child) {
+ for (i = 0; i < num_regulators; i++) {
+ if (!of_node_cmp(child->name,
+ regulators[i].desc.name)) {
+ p->id = i;
+ p->init_data = of_get_regulator_init_data(
+ &pdev->dev, child);
+ p->node = child;
+ p++;
+ break;
+ }
+ }
+ }
+
+ return data;
+}
+#endif
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 27758267e12..b3961c658b0 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -29,6 +29,7 @@ struct mc13xxx_regulator_priv {
struct mc13xxx *mc13xxx;
u32 powermisc_pwgt_state;
struct mc13xxx_regulator *mc13xxx_regulators;
+ int num_regulators;
struct regulator_dev *regulators[];
};
@@ -42,13 +43,32 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector);
extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+#ifdef CONFIG_OF
+extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
+extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators);
+#else
+static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ return NULL;
+}
+#endif
+
extern struct regulator_ops mc13xxx_regulator_ops;
extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -66,7 +86,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -81,7 +101,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
new file mode 100644
index 00000000000..f1651eb6964
--- /dev/null
+++ b/drivers/regulator/of_regulator.c
@@ -0,0 +1,87 @@
+/*
+ * OF helpers for regulator framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regulator/machine.h>
+
+static void of_get_regulation_constraints(struct device_node *np,
+ struct regulator_init_data **init_data)
+{
+ const __be32 *min_uV, *max_uV, *uV_offset;
+ const __be32 *min_uA, *max_uA;
+ struct regulation_constraints *constraints = &(*init_data)->constraints;
+
+ constraints->name = of_get_property(np, "regulator-name", NULL);
+
+ min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
+ if (min_uV)
+ constraints->min_uV = be32_to_cpu(*min_uV);
+ max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
+ if (max_uV)
+ constraints->max_uV = be32_to_cpu(*max_uV);
+
+ /* Voltage change possible? */
+ if (constraints->min_uV != constraints->max_uV)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+ /* Only one voltage? Then make sure it's set. */
+ if (constraints->min_uV == constraints->max_uV)
+ constraints->apply_uV = true;
+
+ uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL);
+ if (uV_offset)
+ constraints->uV_offset = be32_to_cpu(*uV_offset);
+ min_uA = of_get_property(np, "regulator-min-microamp", NULL);
+ if (min_uA)
+ constraints->min_uA = be32_to_cpu(*min_uA);
+ max_uA = of_get_property(np, "regulator-max-microamp", NULL);
+ if (max_uA)
+ constraints->max_uA = be32_to_cpu(*max_uA);
+
+ /* Current change possible? */
+ if (constraints->min_uA != constraints->max_uA)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+
+ if (of_find_property(np, "regulator-boot-on", NULL))
+ constraints->boot_on = true;
+
+ if (of_find_property(np, "regulator-always-on", NULL))
+ constraints->always_on = true;
+ else /* status change should be possible if not always on. */
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+}
+
+/**
+ * of_get_regulator_init_data - extract regulator_init_data structure info
+ * @dev: device requesting for regulator_init_data
+ *
+ * Populates regulator_init_data structure by extracting data from device
+ * tree node, returns a pointer to the populated struture or NULL if memory
+ * alloc fails.
+ */
+struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node)
+{
+ struct regulator_init_data *init_data;
+
+ if (!node)
+ return NULL;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return NULL; /* Out of memory? */
+
+ of_get_regulation_constraints(node, &init_data);
+ return init_data;
+}
+EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 31f6e11a7f1..a5aab1b08bc 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -277,7 +277,7 @@ static int __devinit pcap_regulator_probe(struct platform_device *pdev)
void *pcap = dev_get_drvdata(pdev->dev.parent);
rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcap);
+ pdev->dev.platform_data, pcap, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 69a11d9dd87..1d1c3105629 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -320,7 +320,7 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
pcf = dev_to_pcf50633(pdev->dev.parent);
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcf);
+ pdev->dev.platform_data, pcf, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 1011873896d..d9278da18a9 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -151,7 +151,8 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
/* Register regulator with framework */
tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
&tps6105x->client->dev,
- pdata->regulator_data, tps6105x);
+ pdata->regulator_data, tps6105x,
+ NULL);
if (IS_ERR(tps6105x->regulator)) {
ret = PTR_ERR(tps6105x->regulator);
dev_err(&tps6105x->client->dev,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 9fb4c7b8175..18d61a0529a 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -152,48 +152,21 @@ struct tps_driver_data {
u8 core_regulator;
};
-static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, mask);
-}
-
-static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, 0);
-}
-
-static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(tps->regmap, reg, &val);
-
- if (ret != 0)
- return ret;
- else
- return val;
-}
-
-static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
-{
- return regmap_write(tps->regmap, reg, val);
-}
-
static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
+ int ret;
u8 shift;
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -202,16 +175,17 @@ static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
u8 shift;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -226,7 +200,7 @@ static int tps65023_dcdc_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_dcdc_disable(struct regulator_dev *dev)
@@ -239,7 +213,7 @@ static int tps65023_dcdc_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_ldo_enable(struct regulator_dev *dev)
@@ -252,7 +226,7 @@ static int tps65023_ldo_enable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_ldo_disable(struct regulator_dev *dev)
@@ -265,21 +239,22 @@ static int tps65023_ldo_disable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ret;
int data, dcdc = rdev_get_id(dev);
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
if (dcdc == tps->core_regulator) {
- data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_DEF_CORE, &data);
+ if (ret != 0)
+ return ret;
data &= (tps->info[dcdc]->table_len - 1);
return tps->info[dcdc]->table[data] * 1000;
} else
@@ -318,13 +293,13 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
goto failed;
- ret = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+ ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, vsel);
/* Tell the chip that we have changed the value in DEFCORE
* and its time to update the core voltage
*/
- tps_65023_set_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_GO);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
return ret;
@@ -336,13 +311,14 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
data &= (tps->info[ldo]->table_len - 1);
@@ -354,6 +330,7 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
@@ -377,13 +354,13 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
*selector = vsel;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
- return tps_65023_reg_write(tps, TPS65023_REG_LDO_CTRL, data);
+ return regmap_write(tps->regmap, TPS65023_REG_LDO_CTRL, data);
}
static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
@@ -496,7 +473,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
/* Register the regulators */
rdev = regulator_register(&tps->desc[i], &client->dev,
- init_data, tps);
+ init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
@@ -511,12 +488,12 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index bdef70365f5..0b63ef71a5f 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -599,7 +599,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&tps->desc[i],
- tps6507x_dev->dev, init_data, tps);
+ tps6507x_dev->dev, init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9166aa0a9df..70b7b1f4f00 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -651,7 +651,7 @@ static int __devinit pmic_probe(struct spi_device *spi)
hw->desc[i].n_voltages = 1;
hw->rdev[i] = regulator_register(&hw->desc[i], dev,
- init_data, hw);
+ init_data, hw, NULL);
if (IS_ERR(hw->rdev[i])) {
ret = PTR_ERR(hw->rdev[i]);
hw->rdev[i] = NULL;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 14b9389dd52..c75fb20faa5 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -396,7 +396,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
return err;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index b552aae55b4..5c15ba01e9c 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -25,30 +25,6 @@
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-#define TPS65910_REG_VRTC 0
-#define TPS65910_REG_VIO 1
-#define TPS65910_REG_VDD1 2
-#define TPS65910_REG_VDD2 3
-#define TPS65910_REG_VDD3 4
-#define TPS65910_REG_VDIG1 5
-#define TPS65910_REG_VDIG2 6
-#define TPS65910_REG_VPLL 7
-#define TPS65910_REG_VDAC 8
-#define TPS65910_REG_VAUX1 9
-#define TPS65910_REG_VAUX2 10
-#define TPS65910_REG_VAUX33 11
-#define TPS65910_REG_VMMC 12
-
-#define TPS65911_REG_VDDCTRL 4
-#define TPS65911_REG_LDO1 5
-#define TPS65911_REG_LDO2 6
-#define TPS65911_REG_LDO3 7
-#define TPS65911_REG_LDO4 8
-#define TPS65911_REG_LDO5 9
-#define TPS65911_REG_LDO6 10
-#define TPS65911_REG_LDO7 11
-#define TPS65911_REG_LDO8 12
-
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -885,8 +861,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
if (!pmic_plat_data)
return -EINVAL;
- reg_data = pmic_plat_data->tps65910_pmic_init_data;
-
pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -937,7 +911,16 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
goto err_free_info;
}
- for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
+ for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
+ i++, info++) {
+
+ reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
+
+ /* Regulator API handles empty constraints but not NULL
+ * constraints */
+ if (!reg_data)
+ continue;
+
/* Register the regulators */
pmic->info[i] = info;
@@ -965,7 +948,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&pmic->desc[i],
- tps65910->dev, reg_data, pmic);
+ tps65910->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65910->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 39d4a1749e7..da00d88f94b 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -727,7 +727,7 @@ static __devinit int tps65912_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
range = tps65912_get_range(pmic, i);
rdev = regulator_register(&pmic->desc[i],
- tps65912->dev, reg_data, pmic);
+ tps65912->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65912->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa0..181a2cfe180 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
@@ -1070,7 +1112,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
break;
}
- rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
+ rdev = regulator_register(&info->desc, &pdev->dev, initdata, info, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index fc665514699..518667ef9a0 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -185,18 +185,7 @@ static struct platform_driver regulator_userspace_consumer_driver = {
},
};
-
-static int __init regulator_userspace_consumer_init(void)
-{
- return platform_driver_register(&regulator_userspace_consumer_driver);
-}
-module_init(regulator_userspace_consumer_init);
-
-static void __exit regulator_userspace_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_userspace_consumer_driver);
-}
-module_exit(regulator_userspace_consumer_exit);
+module_platform_driver(regulator_userspace_consumer_driver);
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 858c1f861ba..ee0b161c998 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -352,17 +352,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
},
};
-static int __init regulator_virtual_consumer_init(void)
-{
- return platform_driver_register(&regulator_virtual_consumer_driver);
-}
-module_init(regulator_virtual_consumer_init);
-
-static void __exit regulator_virtual_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_virtual_consumer_driver);
-}
-module_exit(regulator_virtual_consumer_exit);
+module_platform_driver(regulator_virtual_consumer_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Virtual regulator consumer");
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index bd3531d8b2a..4904a40b0d4 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -511,7 +511,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -553,7 +554,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -590,7 +591,6 @@ err_regulator:
err:
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return ret;
}
@@ -605,7 +605,6 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return 0;
}
@@ -722,7 +721,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -747,7 +747,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -771,7 +771,6 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(dcdc->regulator);
err:
- kfree(dcdc);
return ret;
}
@@ -783,7 +782,6 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
- kfree(dcdc);
return 0;
}
@@ -874,7 +872,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -973,7 +971,7 @@ static __devinit int wm831x_epe_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->epe[id], dcdc);
+ pdata->epe[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 01f27c7f423..634aac3f2d5 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -162,7 +162,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->isink[id] == NULL)
return -ENODEV;
- isink = kzalloc(sizeof(struct wm831x_isink), GFP_KERNEL);
+ isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
+ GFP_KERNEL);
if (isink == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -189,7 +190,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.owner = THIS_MODULE;
isink->regulator = regulator_register(&isink->desc, &pdev->dev,
- pdata->isink[id], isink);
+ pdata->isink[id], isink, NULL);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -213,7 +214,6 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(isink->regulator);
err:
- kfree(isink);
return ret;
}
@@ -226,7 +226,6 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
free_irq(platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
- kfree(isink);
return 0;
}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 6709710a059..f1e4ab0f9fd 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -326,7 +326,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -351,7 +351,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -376,7 +376,6 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -388,7 +387,6 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -596,7 +594,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -621,7 +619,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -645,7 +643,6 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -655,7 +652,6 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -793,7 +789,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -818,7 +814,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -831,7 +827,6 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
return 0;
err:
- kfree(ldo);
return ret;
}
@@ -840,7 +835,6 @@ static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bcb22c4409..6894009d815 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1428,7 +1428,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
/* register regulator */
rdev = regulator_register(&wm8350_reg[pdev->id], &pdev->dev,
pdev->dev.platform_data,
- dev_get_drvdata(&pdev->dev));
+ dev_get_drvdata(&pdev->dev), NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 71632ddc378..706f39563a7 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -326,7 +326,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, wm8400);
+ pdev->dev.platform_data, wm8400, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index b87bf5c841f..435e335d6e6 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -269,7 +269,7 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->is_enabled = true;
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
- pdata->ldo[id].init_data, ldo);
+ pdata->ldo[id].init_data, ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 53eb4e55b28..877cf6fdcf2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -774,7 +774,7 @@ config RTC_DRV_EP93XX
config RTC_DRV_SA1100
tristate "SA11x0/PXA2xx"
- depends on ARCH_SA1100 || ARCH_PXA
+ depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP
help
If you say Y here you will get access to the real time clock
built into your SA11x0 or PXA2xx CPU.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2..dc4c2748bbc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a00..8a1c031391d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = now.tm_hour;
/* For simplicity, only support date rollover for now */
- if (alarm->time.tm_mday == -1) {
+ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
alarm->time.tm_mday = now.tm_mday;
missing = day;
}
- if (alarm->time.tm_mon == -1) {
+ if ((unsigned)alarm->time.tm_mon >= 12) {
alarm->time.tm_mon = now.tm_mon;
if (missing == none)
missing = month;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 64b847b7f97..f04761e6622 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -410,17 +410,7 @@ static struct platform_driver pm860x_rtc_driver = {
.remove = __devexit_p(pm860x_rtc_remove),
};
-static int __init pm860x_rtc_init(void)
-{
- return platform_driver_register(&pm860x_rtc_driver);
-}
-module_init(pm860x_rtc_init);
-
-static void __exit pm860x_rtc_exit(void)
-{
- platform_driver_unregister(&pm860x_rtc_driver);
-}
-module_exit(pm860x_rtc_exit);
+module_platform_driver(pm860x_rtc_driver);
MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index e346705aae9..a0a9810adf0 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -90,7 +90,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* Early AB8500 chips will not clear the rtc read request bit */
if (abx500_get_chip_id(dev) == 0) {
- msleep(1);
+ usleep_range(1000, 1000);
} else {
/* Wait for some cycles after enabling the rtc read in ab8500 */
while (time_before(jiffies, timeout)) {
@@ -102,7 +102,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!(value & RTC_READ_REQUEST))
break;
- msleep(1);
+ usleep_range(1000, 5000);
}
}
@@ -258,6 +258,109 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return ab8500_rtc_irq_enable(dev, alarm->enabled);
}
+
+static int ab8500_rtc_set_calibration(struct device *dev, int calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ /*
+ * Check that the calibration value (which is in units of 0.5
+ * parts-per-million) is in the AB8500's range for RtcCalibration
+ * register. -128 (0x80) is not permitted because the AB8500 uses
+ * a sign-bit rather than two's complement, so 0x80 is just another
+ * representation of zero.
+ */
+ if ((calibration < -127) || (calibration > 127)) {
+ dev_err(dev, "RtcCalibration value outside permitted range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert to this sort of representation before writing
+ * into RtcCalibration register...
+ */
+ if (calibration >= 0)
+ rtccal = 0x7F & calibration;
+ else
+ rtccal = ~(calibration - 1) | 0x80;
+
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, rtccal);
+
+ return retval;
+}
+
+static int ab8500_rtc_get_calibration(struct device *dev, int *calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, &rtccal);
+ if (retval >= 0) {
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert value from RtcCalibration register into
+ * a two's complement signed value...
+ */
+ if (rtccal & 0x80)
+ *calibration = 0 - (rtccal & 0x7F);
+ else
+ *calibration = 0x7F & rtccal;
+ }
+
+ return retval;
+}
+
+static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval;
+ int calibration = 0;
+
+ if (sscanf(buf, " %i ", &calibration) != 1) {
+ dev_err(dev, "Failed to store RTC calibration attribute\n");
+ return -EINVAL;
+ }
+
+ retval = ab8500_rtc_set_calibration(dev, calibration);
+
+ return retval ? retval : count;
+}
+
+static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval = 0;
+ int calibration = 0;
+
+ retval = ab8500_rtc_get_calibration(dev, &calibration);
+ if (retval < 0) {
+ dev_err(dev, "Failed to read RTC calibration attribute\n");
+ sprintf(buf, "0\n");
+ return retval;
+ }
+
+ return sprintf(buf, "%d\n", calibration);
+}
+
+static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR,
+ ab8500_sysfs_show_rtc_calibration,
+ ab8500_sysfs_store_rtc_calibration);
+
+static int ab8500_sysfs_rtc_register(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_rtc_calibration);
+}
+
+static void ab8500_sysfs_rtc_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_rtc_calibration);
+}
+
static irqreturn_t rtc_alarm_handler(int irq, void *data)
{
struct rtc_device *rtc = data;
@@ -295,7 +398,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
/* Wait for reset by the PorRtc */
- msleep(1);
+ usleep_range(1000, 5000);
err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
AB8500_RTC_STAT_REG, &rtc_ctrl);
@@ -308,6 +411,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -316,8 +421,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
}
- err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
- "ab8500-rtc", rtc);
+ err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
+ IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -325,6 +430,13 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+
+ err = ab8500_sysfs_rtc_register(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "sysfs RTC failed to register\n");
+ return err;
+ }
+
return 0;
}
@@ -333,6 +445,8 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ALARM");
+ ab8500_sysfs_rtc_unregister(&pdev->dev);
+
free_irq(irq, rtc);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -349,18 +463,8 @@ static struct platform_driver ab8500_rtc_driver = {
.remove = __devexit_p(ab8500_rtc_remove),
};
-static int __init ab8500_rtc_init(void)
-{
- return platform_driver_register(&ab8500_rtc_driver);
-}
-
-static void __exit ab8500_rtc_exit(void)
-{
- platform_driver_unregister(&ab8500_rtc_driver);
-}
+module_platform_driver(ab8500_rtc_driver);
-module_init(ab8500_rtc_init);
-module_exit(ab8500_rtc_exit);
MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
MODULE_DESCRIPTION("AB8500 RTC Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e39b77a4609..dc474bc6522 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -32,11 +32,17 @@
#include <mach/at91_rtc.h>
+#define at91_rtc_read(field) \
+ __raw_readl(at91_rtc_regs + field)
+#define at91_rtc_write(field, val) \
+ __raw_writel((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
+static void __iomem *at91_rtc_regs;
+static int irq;
/*
* Decode time/date into rtc_time structure
@@ -48,10 +54,10 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/* must read twice in case it changes */
do {
- time = at91_sys_read(timereg);
- date = at91_sys_read(calreg);
- } while ((time != at91_sys_read(timereg)) ||
- (date != at91_sys_read(calreg)));
+ time = at91_rtc_read(timereg);
+ date = at91_rtc_read(calreg);
+ } while ((time != at91_rtc_read(timereg)) ||
+ (date != at91_rtc_read(calreg)));
tm->tm_sec = bcd2bin((time & AT91_RTC_SEC) >> 0);
tm->tm_min = bcd2bin((time & AT91_RTC_MIN) >> 8);
@@ -98,19 +104,19 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* Stop Time/Calendar from counting */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
- at91_sys_write(AT91_RTC_TIMR,
+ at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
| bin2bcd(tm->tm_min) << 8
| bin2bcd(tm->tm_hour) << 16);
- at91_sys_write(AT91_RTC_CALR,
+ at91_rtc_write(AT91_RTC_CALR,
bin2bcd((tm->tm_year + 1900) / 100) /* century */
| bin2bcd(tm->tm_year % 100) << 8 /* year */
| bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
@@ -118,8 +124,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
| bin2bcd(tm->tm_mday) << 24);
/* Restart Time/Calendar */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
return 0;
}
@@ -135,7 +141,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -160,20 +166,20 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_TIMALR,
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
| bin2bcd(tm.tm_hour) << 16
| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
- at91_sys_write(AT91_RTC_CALALR,
+ at91_rtc_write(AT91_RTC_CALALR,
bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
| bin2bcd(tm.tm_mday) << 24
| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
if (alrm->enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -188,10 +194,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
pr_debug("%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
} else
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
return 0;
}
@@ -200,7 +206,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_sys_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
seq_printf(seq, "update_IRQ\t: %s\n",
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -220,7 +226,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_sys_read(AT91_RTC_SR) & at91_sys_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -229,7 +235,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
if (rtsr & AT91_RTC_ACKUPD)
complete(&at91_rtc_updated);
- at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
+ at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
rtc_update_irq(rtc, 1, events);
@@ -256,22 +262,41 @@ static const struct rtc_class_ops at91_rtc_ops = {
static int __init at91_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret;
+ struct resource *regs;
+ int ret = 0;
- at91_sys_write(AT91_RTC_CR, 0);
- at91_sys_write(AT91_RTC_MR, 0); /* 24 hour mode */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENXIO;
+ }
+
+ at91_rtc_regs = ioremap(regs->start, resource_size(regs));
+ if (!at91_rtc_regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
+ at91_rtc_write(AT91_RTC_CR, 0);
+ at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
+ ret = request_irq(irq, at91_rtc_interrupt,
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
- AT91_ID_SYS);
+ irq);
return ret;
}
@@ -284,7 +309,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
return PTR_ERR(rtc);
}
platform_set_drvdata(pdev, rtc);
@@ -301,10 +326,10 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -323,13 +348,13 @@ static int at91_rtc_suspend(struct device *dev)
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_sys_read(AT91_RTC_IMR)
+ at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- enable_irq_wake(AT91_ID_SYS);
+ enable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
}
return 0;
}
@@ -338,9 +363,9 @@ static int at91_rtc_resume(struct device *dev)
{
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- disable_irq_wake(AT91_ID_SYS);
+ disable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
}
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 90d866272c8..abfc1a0c07d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -456,18 +456,7 @@ static struct platform_driver bfin_rtc_driver = {
.resume = bfin_rtc_resume,
};
-static int __init bfin_rtc_init(void)
-{
- return platform_driver_register(&bfin_rtc_driver);
-}
-
-static void __exit bfin_rtc_exit(void)
-{
- platform_driver_unregister(&bfin_rtc_driver);
-}
-
-module_init(bfin_rtc_init);
-module_exit(bfin_rtc_exit);
+module_platform_driver(bfin_rtc_driver);
MODULE_DESCRIPTION("Blackfin On-Chip Real Time Clock Driver");
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index 128270ce355..bf612ef2294 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -218,15 +218,4 @@ static struct platform_driver bq4802_driver = {
.remove = __devexit_p(bq4802_remove),
};
-static int __init bq4802_init(void)
-{
- return platform_driver_register(&bq4802_driver);
-}
-
-static void __exit bq4802_exit(void)
-{
- platform_driver_unregister(&bq4802_driver);
-}
-
-module_init(bq4802_init);
-module_exit(bq4802_exit);
+module_platform_driver(bq4802_driver);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 05beb6c1ca7..d7782aa0994 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -164,7 +164,7 @@ static inline unsigned char cmos_read_bank2(unsigned char addr)
static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
{
outb(addr, RTC_PORT(2));
- outb(val, RTC_PORT(2));
+ outb(val, RTC_PORT(3));
}
#else
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 2322c43af20..d4457afcba8 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -161,16 +161,6 @@ static struct platform_driver rtc_dm355evm_driver = {
},
};
-static int __init dm355evm_rtc_init(void)
-{
- return platform_driver_register(&rtc_dm355evm_driver);
-}
-module_init(dm355evm_rtc_init);
-
-static void __exit dm355evm_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_dm355evm_driver);
-}
-module_exit(dm355evm_rtc_exit);
+module_platform_driver(rtc_dm355evm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 68e6caf2549..990c3ff489b 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -396,21 +396,10 @@ static struct platform_driver ds1286_platform_driver = {
.remove = __devexit_p(ds1286_remove),
};
-static int __init ds1286_init(void)
-{
- return platform_driver_register(&ds1286_platform_driver);
-}
-
-static void __exit ds1286_exit(void)
-{
- platform_driver_unregister(&ds1286_platform_driver);
-}
+module_platform_driver(ds1286_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("DS1286 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1286");
-
-module_init(ds1286_init);
-module_exit(ds1286_exit);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 586c244a05d..761f36bc83a 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -580,20 +580,7 @@ static struct platform_driver ds1511_rtc_driver = {
},
};
- static int __init
-ds1511_rtc_init(void)
-{
- return platform_driver_register(&ds1511_rtc_driver);
-}
-
- static void __exit
-ds1511_rtc_exit(void)
-{
- platform_driver_unregister(&ds1511_rtc_driver);
-}
-
-module_init(ds1511_rtc_init);
-module_exit(ds1511_rtc_exit);
+module_platform_driver(ds1511_rtc_driver);
MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 1350029044e..6f0a1b530f2 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -361,18 +361,7 @@ static struct platform_driver ds1553_rtc_driver = {
},
};
-static __init int ds1553_init(void)
-{
- return platform_driver_register(&ds1553_rtc_driver);
-}
-
-static __exit void ds1553_exit(void)
-{
- platform_driver_unregister(&ds1553_rtc_driver);
-}
-
-module_init(ds1553_init);
-module_exit(ds1553_exit);
+module_platform_driver(ds1553_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index e3e0f92b60f..76112667c50 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -240,18 +240,7 @@ static struct platform_driver ds1742_rtc_driver = {
},
};
-static __init int ds1742_init(void)
-{
- return platform_driver_register(&ds1742_rtc_driver);
-}
-
-static __exit void ds1742_exit(void)
-{
- platform_driver_unregister(&ds1742_rtc_driver);
-}
-
-module_init(ds1742_init);
-module_exit(ds1742_exit);
+module_platform_driver(ds1742_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b6473631d18..05ab227eeff 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -345,7 +345,7 @@ static const struct dev_pm_ops jz4740_pm_ops = {
#define JZ4740_RTC_PM_OPS NULL
#endif /* CONFIG_PM */
-struct platform_driver jz4740_rtc_driver = {
+static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.remove = __devexit_p(jz4740_rtc_remove),
.driver = {
@@ -355,17 +355,7 @@ struct platform_driver jz4740_rtc_driver = {
},
};
-static int __init jz4740_rtc_init(void)
-{
- return platform_driver_register(&jz4740_rtc_driver);
-}
-module_init(jz4740_rtc_init);
-
-static void __exit jz4740_rtc_exit(void)
-{
- platform_driver_unregister(&jz4740_rtc_driver);
-}
-module_exit(jz4740_rtc_exit);
+module_platform_driver(jz4740_rtc_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ae16250c762..ecc1713b2b4 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -396,17 +396,7 @@ static struct platform_driver lpc32xx_rtc_driver = {
},
};
-static int __init lpc32xx_rtc_init(void)
-{
- return platform_driver_register(&lpc32xx_rtc_driver);
-}
-module_init(lpc32xx_rtc_init);
-
-static void __exit lpc32xx_rtc_exit(void)
-{
- platform_driver_unregister(&lpc32xx_rtc_driver);
-}
-module_exit(lpc32xx_rtc_exit);
+module_platform_driver(lpc32xx_rtc_driver);
MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128fc1d3..64aedd8cc09 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
+ /*
+ * XXX - m41t80 alarm functionality is reported broken.
+ * until it is fixed, don't register alarm functions.
+ *
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
+ */
.proc = m41t80_rtc_proc,
+ /*
+ * See above comment on broken alarm
+ *
.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+ */
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 7317d3b9a3d..ef71132ff20 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -200,7 +200,6 @@ static int __devexit m41t93_remove(struct spi_device *spi)
static struct spi_driver m41t93_driver = {
.driver = {
.name = "rtc-m41t93",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t93_probe,
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index e259ed76ae8..2a4721f6179 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -147,7 +147,6 @@ static int __devexit m41t94_remove(struct spi_device *spi)
static struct spi_driver m41t94_driver = {
.driver = {
.name = "rtc-m41t94",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t94_probe,
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 8e2a24e33ed..f9e3b358373 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -216,21 +216,10 @@ static struct platform_driver m48t35_platform_driver = {
.remove = __devexit_p(m48t35_remove),
};
-static int __init m48t35_init(void)
-{
- return platform_driver_register(&m48t35_platform_driver);
-}
-
-static void __exit m48t35_exit(void)
-{
- platform_driver_unregister(&m48t35_platform_driver);
-}
+module_platform_driver(m48t35_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("M48T35 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t35");
-
-module_init(m48t35_init);
-module_exit(m48t35_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 28365388fb6..30ebfec9fd2 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -530,18 +530,7 @@ static struct platform_driver m48t59_rtc_driver = {
.remove = __devexit_p(m48t59_rtc_remove),
};
-static int __init m48t59_rtc_init(void)
-{
- return platform_driver_register(&m48t59_rtc_driver);
-}
-
-static void __exit m48t59_rtc_exit(void)
-{
- platform_driver_unregister(&m48t59_rtc_driver);
-}
-
-module_init(m48t59_rtc_init);
-module_exit(m48t59_rtc_exit);
+module_platform_driver(m48t59_rtc_driver);
MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
MODULE_DESCRIPTION("M48T59/M48T02/M48T08 RTC driver");
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f981287d582..863fb3363aa 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -185,21 +185,10 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.remove = __devexit_p(m48t86_rtc_remove),
};
-static int __init m48t86_rtc_init(void)
-{
- return platform_driver_register(&m48t86_rtc_platform_driver);
-}
-
-static void __exit m48t86_rtc_exit(void)
-{
- platform_driver_unregister(&m48t86_rtc_platform_driver);
-}
+module_platform_driver(m48t86_rtc_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("M48T86 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t86");
-
-module_init(m48t86_rtc_init);
-module_exit(m48t86_rtc_exit);
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 0ec3f588a25..1f6b3cc58e8 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -154,7 +154,6 @@ static int __devexit max6902_remove(struct spi_device *spi)
static struct spi_driver max6902_driver = {
.driver = {
.name = "rtc-max6902",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max6902_probe,
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 3bc046f427e..4a5529346b4 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -299,17 +299,7 @@ static struct platform_driver max8925_rtc_driver = {
.remove = __devexit_p(max8925_rtc_remove),
};
-static int __init max8925_rtc_init(void)
-{
- return platform_driver_register(&max8925_rtc_driver);
-}
-module_init(max8925_rtc_init);
-
-static void __exit max8925_rtc_exit(void)
-{
- platform_driver_unregister(&max8925_rtc_driver);
-}
-module_exit(max8925_rtc_exit);
+module_platform_driver(max8925_rtc_driver);
MODULE_DESCRIPTION("Maxim MAX8925 RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 2e48aa60427..7196f438c08 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -327,17 +327,7 @@ static struct platform_driver max8998_rtc_driver = {
.id_table = max8998_rtc_id,
};
-static int __init max8998_rtc_init(void)
-{
- return platform_driver_register(&max8998_rtc_driver);
-}
-module_init(max8998_rtc_init);
-
-static void __exit max8998_rtc_exit(void)
-{
- platform_driver_unregister(&max8998_rtc_driver);
-}
-module_exit(max8998_rtc_exit);
+module_platform_driver(max8998_rtc_driver);
MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 9d0c3b478d5..546f6850bff 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -399,7 +399,7 @@ static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
return 0;
}
-const struct platform_device_id mc13xxx_rtc_idtable[] = {
+static const struct platform_device_id mc13xxx_rtc_idtable[] = {
{
.name = "mc13783-rtc",
}, {
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index da60915818b..9d3caccfc25 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -418,17 +418,7 @@ static struct platform_driver mpc5121_rtc_driver = {
.remove = __devexit_p(mpc5121_rtc_remove),
};
-static int __init mpc5121_rtc_init(void)
-{
- return platform_driver_register(&mpc5121_rtc_driver);
-}
-module_init(mpc5121_rtc_init);
-
-static void __exit mpc5121_rtc_exit(void)
-{
- platform_driver_unregister(&mpc5121_rtc_driver);
-}
-module_exit(mpc5121_rtc_exit);
+module_platform_driver(mpc5121_rtc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bb21f443fb7..6cd6c723534 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -537,18 +537,7 @@ static struct platform_driver vrtc_mrst_platform_driver = {
}
};
-static int __init vrtc_mrst_init(void)
-{
- return platform_driver_register(&vrtc_mrst_platform_driver);
-}
-
-static void __exit vrtc_mrst_exit(void)
-{
- platform_driver_unregister(&vrtc_mrst_platform_driver);
-}
-
-module_init(vrtc_mrst_init);
-module_exit(vrtc_mrst_exit);
+module_platform_driver(vrtc_mrst_platform_driver);
MODULE_AUTHOR("Jacob Pan; Feng Tang");
MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 39e41fbdf08..5e1d64ee522 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -155,7 +155,6 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{
struct rtc_time alarm_tm, now_tm;
unsigned long now, time;
- int ret;
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
@@ -168,21 +167,33 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
alarm_tm.tm_hour = alrm->tm_hour;
alarm_tm.tm_min = alrm->tm_min;
alarm_tm.tm_sec = alrm->tm_sec;
- rtc_tm_to_time(&now_tm, &now);
rtc_tm_to_time(&alarm_tm, &time);
- if (time < now) {
- time += 60 * 60 * 24;
- rtc_time_to_tm(time, &alarm_tm);
- }
-
- ret = rtc_tm_to_time(&alarm_tm, &time);
-
/* clear all the interrupt status bits */
writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
set_alarm_or_time(dev, MXC_RTC_ALARM, time);
- return ret;
+ return 0;
+}
+
+static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 reg;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+ reg = readw(ioaddr + RTC_RTCIENR);
+
+ if (enabled)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ writew(reg, ioaddr + RTC_RTCIENR);
+ spin_unlock_irq(&pdata->rtc->irq_lock);
}
/* This function is the RTC interrupt service routine. */
@@ -199,13 +210,12 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
- /* clear alarm interrupt if it has occurred */
- if (status & RTC_ALM_BIT)
- status &= ~RTC_ALM_BIT;
-
/* update irq data & counter */
- if (status & RTC_ALM_BIT)
+ if (status & RTC_ALM_BIT) {
events |= (RTC_AF | RTC_IRQF);
+ /* RTC alarm should be one-shot */
+ mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
+ }
if (status & RTC_1HZ_BIT)
events |= (RTC_UF | RTC_IRQF);
@@ -213,9 +223,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
if (status & PIT_ALL_ON)
events |= (RTC_PF | RTC_IRQF);
- if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm))
- rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
-
rtc_update_irq(pdata->rtc, 1, events);
spin_unlock_irq(&pdata->rtc->irq_lock);
@@ -242,26 +249,6 @@ static void mxc_rtc_release(struct device *dev)
spin_unlock_irq(&pdata->rtc->irq_lock);
}
-static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
- unsigned int enabled)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- void __iomem *ioaddr = pdata->ioaddr;
- u32 reg;
-
- spin_lock_irq(&pdata->rtc->irq_lock);
- reg = readw(ioaddr + RTC_RTCIENR);
-
- if (enabled)
- reg |= bit;
- else
- reg &= ~bit;
-
- writew(reg, ioaddr + RTC_RTCIENR);
- spin_unlock_irq(&pdata->rtc->irq_lock);
-}
-
static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
@@ -290,6 +277,17 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
{
+ /*
+ * TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
+ */
+ if (cpu_is_mx1()) {
+ struct rtc_time tm;
+
+ rtc_time_to_tm(time, &tm);
+ tm.tm_year = 70;
+ rtc_tm_to_time(&tm, &time);
+ }
+
/* Avoid roll-over from reading the different registers */
do {
set_alarm_or_time(dev, MXC_RTC_TIME, time);
@@ -324,21 +322,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
int ret;
- if (rtc_valid_tm(&alrm->time)) {
- if (alrm->time.tm_sec > 59 ||
- alrm->time.tm_hour > 23 ||
- alrm->time.tm_min > 59)
- return -EINVAL;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- } else {
- ret = rtc_valid_tm(&alrm->time);
- if (ret)
- return ret;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- }
-
+ ret = rtc_update_alarm(dev, &alrm->time);
if (ret)
return ret;
@@ -424,6 +408,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
pdata->irq = -1;
}
+ if (pdata->irq >=0)
+ device_init_wakeup(&pdev->dev, 1);
+
rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -459,9 +446,39 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int mxc_rtc_suspend(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static int mxc_rtc_resume(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static struct dev_pm_ops mxc_rtc_pm_ops = {
+ .suspend = mxc_rtc_suspend,
+ .resume = mxc_rtc_resume,
+};
+#endif
+
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
+#ifdef CONFIG_PM
+ .pm = &mxc_rtc_pm_ops,
+#endif
.owner = THIS_MODULE,
},
.remove = __exit_p(mxc_rtc_remove),
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 2ee3bbf7e5e..b46c4004d8f 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -340,7 +340,6 @@ static int __devexit pcf2123_remove(struct spi_device *spi)
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = pcf2123_probe,
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 0c423892923..a20202f9ee5 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -294,17 +294,7 @@ static struct platform_driver pcf50633_rtc_driver = {
.remove = __devexit_p(pcf50633_rtc_remove),
};
-static int __init pcf50633_rtc_init(void)
-{
- return platform_driver_register(&pcf50633_rtc_driver);
-}
-module_init(pcf50633_rtc_init);
-
-static void __exit pcf50633_rtc_exit(void)
-{
- platform_driver_unregister(&pcf50633_rtc_driver);
-}
-module_exit(pcf50633_rtc_exit);
+module_platform_driver(pcf50633_rtc_driver);
MODULE_DESCRIPTION("PCF50633 RTC driver");
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 1d28d4451da..02111fee077 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -174,6 +174,8 @@ static struct amba_id pl030_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl030_ids);
+
static struct amba_driver pl030_driver = {
.drv = {
.name = "rtc-pl030",
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index ff1b84bd9bb..a952c8de1dd 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -420,6 +420,8 @@ static struct amba_id pl031_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl031_ids);
+
static struct amba_driver pl031_driver = {
.drv = {
.name = "rtc-pl031",
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index d420e9d877e..9f1d6bcbdf6 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -532,17 +532,7 @@ static struct platform_driver pm8xxx_rtc_driver = {
},
};
-static int __init pm8xxx_rtc_init(void)
-{
- return platform_driver_register(&pm8xxx_rtc_driver);
-}
-module_init(pm8xxx_rtc_init);
-
-static void __exit pm8xxx_rtc_exit(void)
-{
- platform_driver_unregister(&pm8xxx_rtc_driver);
-}
-module_exit(pm8xxx_rtc_exit);
+module_platform_driver(pm8xxx_rtc_driver);
MODULE_ALIAS("platform:rtc-pm8xxx");
MODULE_DESCRIPTION("PMIC8xxx RTC driver");
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index e4b6880aabd..ab0acaeb237 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -164,7 +164,7 @@ static int puv3_rtc_open(struct device *dev)
int ret;
ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
- IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+ 0, "pkunity-rtc alarm", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
@@ -172,7 +172,7 @@ static int puv3_rtc_open(struct device *dev)
}
ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
- IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+ 0, "pkunity-rtc tick", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
@@ -326,7 +326,7 @@ static int puv3_rtc_resume(struct platform_device *pdev)
#define puv3_rtc_resume NULL
#endif
-static struct platform_driver puv3_rtcdrv = {
+static struct platform_driver puv3_rtc_driver = {
.probe = puv3_rtc_probe,
.remove = __devexit_p(puv3_rtc_remove),
.suspend = puv3_rtc_suspend,
@@ -337,21 +337,7 @@ static struct platform_driver puv3_rtcdrv = {
}
};
-static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
-
-static int __init puv3_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&puv3_rtcdrv);
-}
-
-static void __exit puv3_rtc_exit(void)
-{
- platform_driver_unregister(&puv3_rtcdrv);
-}
-
-module_init(puv3_rtc_init);
-module_exit(puv3_rtc_exit);
+module_platform_driver(puv3_rtc_driver);
MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
MODULE_AUTHOR("Hu Dongliang");
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 971bc8e08da..ce2ca8523dd 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -229,7 +229,6 @@ static int __devexit rs5c348_remove(struct spi_device *spi)
static struct spi_driver rs5c348_driver = {
.driver = {
.name = "rtc-rs5c348",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = rs5c348_probe,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f0..aef40bd2957 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <mach/hardware.h>
#include <asm/uaccess.h>
@@ -202,7 +203,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +214,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
@@ -507,7 +508,13 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node)
+ s3c_rtc_cpu_type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s3c6410-rtc") ? TYPE_S3C64XX : TYPE_S3C2410;
+ else
+#endif
+ s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
/* Check RTC Time */
@@ -629,6 +636,17 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id s3c_rtc_dt_match[] = {
+ { .compatible = "samsung,s3c2410-rtc" },
+ { .compatible = "samsung,s3c6410-rtc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
+#else
+#define s3c_rtc_dt_match NULL
+#endif
+
static struct platform_device_id s3c_rtc_driver_ids[] = {
{
.name = "s3c2410-rtc",
@@ -651,24 +669,11 @@ static struct platform_driver s3c_rtc_driver = {
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
+ .of_match_table = s3c_rtc_dt_match,
},
};
-static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";
-
-static int __init s3c_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&s3c_rtc_driver);
-}
-
-static void __exit s3c_rtc_exit(void)
-{
- platform_driver_unregister(&s3c_rtc_driver);
-}
-
-module_init(s3c_rtc_init);
-module_exit(s3c_rtc_exit);
+module_platform_driver(s3c_rtc_driver);
MODULE_DESCRIPTION("Samsung S3C RTC Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 0b40bb88a88..4595d3e645a 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -27,35 +27,42 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
-#include <linux/string.h>
#include <linux/pm.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#ifdef CONFIG_ARCH_PXA
-#include <mach/regs-rtc.h>
-#include <mach/regs-ost.h>
-#endif
-
#define RTC_DEF_DIVIDER (32768 - 1)
#define RTC_DEF_TRIM 0
-
-static const unsigned long RTC_FREQ = 1024;
-static struct rtc_time rtc_alarm;
-static DEFINE_SPINLOCK(sa1100_rtc_lock);
-
-static inline int rtc_periodic_alarm(struct rtc_time *tm)
-{
- return (tm->tm_year == -1) ||
- ((unsigned)tm->tm_mon >= 12) ||
- ((unsigned)(tm->tm_mday - 1) >= 31) ||
- ((unsigned)tm->tm_hour > 23) ||
- ((unsigned)tm->tm_min > 59) ||
- ((unsigned)tm->tm_sec > 59);
-}
-
+#define RTC_FREQ 1024
+
+#define RCNR 0x00 /* RTC Count Register */
+#define RTAR 0x04 /* RTC Alarm Register */
+#define RTSR 0x08 /* RTC Status Register */
+#define RTTR 0x0c /* RTC Timer Trim Register */
+
+#define RTSR_HZE (1 << 3) /* HZ interrupt enable */
+#define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */
+#define RTSR_HZ (1 << 1) /* HZ rising-edge detected */
+#define RTSR_AL (1 << 0) /* RTC alarm detected */
+
+#define rtc_readl(sa1100_rtc, reg) \
+ readl_relaxed((sa1100_rtc)->base + (reg))
+#define rtc_writel(sa1100_rtc, reg, value) \
+ writel_relaxed((value), (sa1100_rtc)->base + (reg))
+
+struct sa1100_rtc {
+ struct resource *ress;
+ void __iomem *base;
+ struct clk *clk;
+ int irq_1Hz;
+ int irq_Alrm;
+ struct rtc_device *rtc;
+ spinlock_t lock; /* Protects this structure */
+};
/*
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
@@ -83,46 +90,26 @@ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
}
}
-static int rtc_update_alarm(struct rtc_time *alrm)
-{
- struct rtc_time alarm_tm, now_tm;
- unsigned long now, time;
- int ret;
-
- do {
- now = RCNR;
- rtc_time_to_tm(now, &now_tm);
- rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
- ret = rtc_tm_to_time(&alarm_tm, &time);
- if (ret != 0)
- break;
-
- RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
- RTAR = time;
- } while (now != RCNR);
-
- return ret;
-}
-
static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = to_platform_device(dev_id);
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
unsigned int rtsr;
unsigned long events = 0;
- spin_lock(&sa1100_rtc_lock);
+ spin_lock(&sa1100_rtc->lock);
- rtsr = RTSR;
/* clear interrupt sources */
- RTSR = 0;
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
+ rtc_writel(sa1100_rtc, RTSR, 0);
+
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_probe(). */
if (rtsr & (RTSR_ALE | RTSR_HZE)) {
/* This is the original code, before there was the if test
* above. This code does not clear interrupts that were not
* enabled. */
- RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ) & (rtsr >> 2));
} else {
/* For some reason, it is possible to enter this routine
* without interruptions enabled, it has been tested with
@@ -131,13 +118,13 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
* This situation leads to an infinite "loop" of interrupt
* routine calling and as a result the processor seems to
* lock on its first call to open(). */
- RTSR = RTSR_AL | RTSR_HZ;
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
}
/* clear alarm interrupt if it has occurred */
if (rtsr & RTSR_AL)
rtsr &= ~RTSR_ALE;
- RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
+ rtc_writel(sa1100_rtc, RTSR, rtsr & (RTSR_ALE | RTSR_HZE));
/* update irq data & counter */
if (rtsr & RTSR_AL)
@@ -145,91 +132,100 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
if (rtsr & RTSR_HZ)
events |= RTC_UF | RTC_IRQF;
- rtc_update_irq(rtc, 1, events);
+ rtc_update_irq(sa1100_rtc->rtc, 1, events);
- if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm))
- rtc_update_alarm(&rtc_alarm);
-
- spin_unlock(&sa1100_rtc_lock);
+ spin_unlock(&sa1100_rtc->lock);
return IRQ_HANDLED;
}
static int sa1100_rtc_open(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
int ret;
- struct platform_device *plat_dev = to_platform_device(dev);
- struct rtc_device *rtc = platform_get_drvdata(plat_dev);
- ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc 1Hz", dev);
+ ret = request_irq(sa1100_rtc->irq_1Hz, sa1100_rtc_interrupt,
+ IRQF_DISABLED, "rtc 1Hz", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz);
+ dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_1Hz);
goto fail_ui;
}
- ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED,
- "rtc Alrm", dev);
+ ret = request_irq(sa1100_rtc->irq_Alrm, sa1100_rtc_interrupt,
+ IRQF_DISABLED, "rtc Alrm", dev);
if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
+ dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_Alrm);
goto fail_ai;
}
- rtc->max_user_freq = RTC_FREQ;
- rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
+ sa1100_rtc->rtc->max_user_freq = RTC_FREQ;
+ rtc_irq_set_freq(sa1100_rtc->rtc, NULL, RTC_FREQ);
return 0;
fail_ai:
- free_irq(IRQ_RTC1Hz, dev);
+ free_irq(sa1100_rtc->irq_1Hz, dev);
fail_ui:
return ret;
}
static void sa1100_rtc_release(struct device *dev)
{
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR = 0;
- OIER &= ~OIER_E1;
- OSSR = OSSR_M1;
- spin_unlock_irq(&sa1100_rtc_lock);
-
- free_irq(IRQ_RTCAlrm, dev);
- free_irq(IRQ_RTC1Hz, dev);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&sa1100_rtc->lock);
+ rtc_writel(sa1100_rtc, RTSR, 0);
+ spin_unlock_irq(&sa1100_rtc->lock);
+
+ free_irq(sa1100_rtc->irq_Alrm, dev);
+ free_irq(sa1100_rtc->irq_1Hz, dev);
}
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- spin_lock_irq(&sa1100_rtc_lock);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ unsigned int rtsr;
+
+ spin_lock_irq(&sa1100_rtc->lock);
+
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
if (enabled)
- RTSR |= RTSR_ALE;
+ rtsr |= RTSR_ALE;
else
- RTSR &= ~RTSR_ALE;
- spin_unlock_irq(&sa1100_rtc_lock);
+ rtsr &= ~RTSR_ALE;
+ rtc_writel(sa1100_rtc, RTSR, rtsr);
+
+ spin_unlock_irq(&sa1100_rtc->lock);
return 0;
}
static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- rtc_time_to_tm(RCNR, tm);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc_readl(sa1100_rtc, RCNR), tm);
return 0;
}
static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
unsigned long time;
int ret;
ret = rtc_tm_to_time(tm, &time);
if (ret == 0)
- RCNR = time;
+ rtc_writel(sa1100_rtc, RCNR, time);
return ret;
}
static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- u32 rtsr;
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ unsigned long time;
+ unsigned int rtsr;
- memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time));
- rtsr = RTSR;
+ time = rtc_readl(sa1100_rtc, RCNR);
+ rtc_time_to_tm(time, &alrm->time);
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
return 0;
@@ -237,26 +233,39 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- int ret;
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ struct rtc_time now_tm, alarm_tm;
+ unsigned long time, alarm;
+ unsigned int rtsr;
- spin_lock_irq(&sa1100_rtc_lock);
- ret = rtc_update_alarm(&alrm->time);
- if (ret == 0) {
- if (alrm->enabled)
- RTSR |= RTSR_ALE;
- else
- RTSR &= ~RTSR_ALE;
- }
- spin_unlock_irq(&sa1100_rtc_lock);
+ spin_lock_irq(&sa1100_rtc->lock);
- return ret;
+ time = rtc_readl(sa1100_rtc, RCNR);
+ rtc_time_to_tm(time, &now_tm);
+ rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
+ rtc_tm_to_time(&alarm_tm, &alarm);
+ rtc_writel(sa1100_rtc, RTAR, alarm);
+
+ rtsr = rtc_readl(sa1100_rtc, RTSR);
+ if (alrm->enabled)
+ rtsr |= RTSR_ALE;
+ else
+ rtsr &= ~RTSR_ALE;
+ rtc_writel(sa1100_rtc, RTSR, rtsr);
+
+ spin_unlock_irq(&sa1100_rtc->lock);
+
+ return 0;
}
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
{
- seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
- seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+ seq_printf(seq, "trim/divider\t\t: 0x%08x\n",
+ rtc_readl(sa1100_rtc, RTTR));
+ seq_printf(seq, "RTSR\t\t\t: 0x%08x\n",
+ rtc_readl(sa1100_rtc, RTSR));
return 0;
}
@@ -273,7 +282,51 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
static int sa1100_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
+ struct sa1100_rtc *sa1100_rtc;
+ unsigned int rttr;
+ int ret;
+
+ sa1100_rtc = kzalloc(sizeof(struct sa1100_rtc), GFP_KERNEL);
+ if (!sa1100_rtc)
+ return -ENOMEM;
+
+ spin_lock_init(&sa1100_rtc->lock);
+ platform_set_drvdata(pdev, sa1100_rtc);
+
+ ret = -ENXIO;
+ sa1100_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!sa1100_rtc->ress) {
+ dev_err(&pdev->dev, "No I/O memory resource defined\n");
+ goto err_ress;
+ }
+
+ sa1100_rtc->irq_1Hz = platform_get_irq(pdev, 0);
+ if (sa1100_rtc->irq_1Hz < 0) {
+ dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
+ goto err_ress;
+ }
+ sa1100_rtc->irq_Alrm = platform_get_irq(pdev, 1);
+ if (sa1100_rtc->irq_Alrm < 0) {
+ dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
+ goto err_ress;
+ }
+
+ ret = -ENOMEM;
+ sa1100_rtc->base = ioremap(sa1100_rtc->ress->start,
+ resource_size(sa1100_rtc->ress));
+ if (!sa1100_rtc->base) {
+ dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n");
+ goto err_map;
+ }
+
+ sa1100_rtc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sa1100_rtc->clk)) {
+ dev_err(&pdev->dev, "failed to find rtc clock source\n");
+ ret = PTR_ERR(sa1100_rtc->clk);
+ goto err_clk;
+ }
+ clk_prepare(sa1100_rtc->clk);
+ clk_enable(sa1100_rtc->clk);
/*
* According to the manual we should be able to let RTTR be zero
@@ -282,24 +335,24 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
* If the clock divider is uninitialized then reset it to the
* default value to get the 1Hz clock.
*/
- if (RTTR == 0) {
- RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
- dev_warn(&pdev->dev, "warning: "
- "initializing default clock divider/trim value\n");
+ if (rtc_readl(sa1100_rtc, RTTR) == 0) {
+ rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
+ rtc_writel(sa1100_rtc, RTTR, rttr);
+ dev_warn(&pdev->dev, "warning: initializing default clock"
+ " divider/trim value\n");
/* The current RTC value probably doesn't make sense either */
- RCNR = 0;
+ rtc_writel(sa1100_rtc, RCNR, 0);
}
device_init_wakeup(&pdev->dev, 1);
- rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops,
- THIS_MODULE);
-
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- platform_set_drvdata(pdev, rtc);
-
+ sa1100_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &sa1100_rtc_ops, THIS_MODULE);
+ if (IS_ERR(sa1100_rtc->rtc)) {
+ dev_err(&pdev->dev, "Failed to register RTC device -> %d\n",
+ ret);
+ goto err_rtc_reg;
+ }
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
*
@@ -322,33 +375,46 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
*
* Notice that clearing bit 1 and 0 is accomplished by writting ONES to
* the corresponding bits in RTSR. */
- RTSR = RTSR_AL | RTSR_HZ;
+ rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
return 0;
+
+err_rtc_reg:
+err_clk:
+ iounmap(sa1100_rtc->base);
+err_ress:
+err_map:
+ kfree(sa1100_rtc);
+ return ret;
}
static int sa1100_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- if (rtc)
- rtc_device_unregister(rtc);
+ struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
+ rtc_device_unregister(sa1100_rtc->rtc);
+ clk_disable(sa1100_rtc->clk);
+ clk_unprepare(sa1100_rtc->clk);
+ iounmap(sa1100_rtc->base);
return 0;
}
#ifdef CONFIG_PM
static int sa1100_rtc_suspend(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
if (device_may_wakeup(dev))
- enable_irq_wake(IRQ_RTCAlrm);
+ enable_irq_wake(sa1100_rtc->irq_Alrm);
return 0;
}
static int sa1100_rtc_resume(struct device *dev)
{
+ struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+
if (device_may_wakeup(dev))
- disable_irq_wake(IRQ_RTCAlrm);
+ disable_irq_wake(sa1100_rtc->irq_Alrm);
return 0;
}
@@ -369,18 +435,7 @@ static struct platform_driver sa1100_rtc_driver = {
},
};
-static int __init sa1100_rtc_init(void)
-{
- return platform_driver_register(&sa1100_rtc_driver);
-}
-
-static void __exit sa1100_rtc_exit(void)
-{
- platform_driver_unregister(&sa1100_rtc_driver);
-}
-
-module_init(sa1100_rtc_init);
-module_exit(sa1100_rtc_exit);
+module_platform_driver(sa1100_rtc_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 893bac2bb21..19a28a671a8 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -516,17 +516,7 @@ static struct platform_driver spear_rtc_driver = {
},
};
-static int __init rtc_init(void)
-{
- return platform_driver_register(&spear_rtc_driver);
-}
-module_init(rtc_init);
-
-static void __exit rtc_exit(void)
-{
- platform_driver_unregister(&spear_rtc_driver);
-}
-module_exit(rtc_exit);
+module_platform_driver(spear_rtc_driver);
MODULE_ALIAS("platform:rtc-spear");
MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index ed3e9b59903..7621116bd20 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -370,18 +370,7 @@ static struct platform_driver stk17ta8_rtc_driver = {
},
};
-static __init int stk17ta8_init(void)
-{
- return platform_driver_register(&stk17ta8_rtc_driver);
-}
-
-static __exit void stk17ta8_exit(void)
-{
- platform_driver_unregister(&stk17ta8_rtc_driver);
-}
-
-module_init(stk17ta8_init);
-module_exit(stk17ta8_exit);
+module_platform_driver(stk17ta8_rtc_driver);
MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 7315068daa5..10287865e33 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -276,18 +276,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
},
};
-static int __init stmp3xxx_rtc_init(void)
-{
- return platform_driver_register(&stmp3xxx_rtcdrv);
-}
-
-static void __exit stmp3xxx_rtc_exit(void)
-{
- platform_driver_unregister(&stmp3xxx_rtcdrv);
-}
-
-module_init(stmp3xxx_rtc_init);
-module_exit(stmp3xxx_rtc_exit);
+module_platform_driver(stmp3xxx_rtcdrv);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 20687d55e7a..d43b4f6eb4e 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -550,6 +550,11 @@ static int twl_rtc_resume(struct platform_device *pdev)
#define twl_rtc_resume NULL
#endif
+static const struct of_device_id twl_rtc_of_match[] = {
+ {.compatible = "ti,twl4030-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
@@ -559,8 +564,9 @@ static struct platform_driver twl4030rtc_driver = {
.suspend = twl_rtc_suspend,
.resume = twl_rtc_resume,
.driver = {
- .owner = THIS_MODULE,
- .name = "twl_rtc",
+ .owner = THIS_MODULE,
+ .name = "twl_rtc",
+ .of_match_table = twl_rtc_of_match,
},
};
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index f71c3ce1803..bca5d677bc8 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -393,18 +393,7 @@ static struct platform_driver rtc_device_driver = {
},
};
-static __init int v3020_init(void)
-{
- return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void v3020_exit(void)
-{
- platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(v3020_init);
-module_exit(v3020_exit);
+module_platform_driver(rtc_device_driver);
MODULE_DESCRIPTION("V3020 RTC");
MODULE_AUTHOR("Raphael Assenat");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c5698cda366..fcbfdda2993 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -405,15 +405,4 @@ static struct platform_driver rtc_platform_driver = {
},
};
-static int __init vr41xx_rtc_init(void)
-{
- return platform_driver_register(&rtc_platform_driver);
-}
-
-static void __exit vr41xx_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_platform_driver);
-}
-
-module_init(vr41xx_rtc_init);
-module_exit(vr41xx_rtc_exit);
+module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index f93f412423c..9e94fb147c2 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -311,17 +311,7 @@ static struct platform_driver vt8500_rtc_driver = {
},
};
-static int __init vt8500_rtc_init(void)
-{
- return platform_driver_register(&vt8500_rtc_driver);
-}
-module_init(vt8500_rtc_init);
-
-static void __exit vt8500_rtc_exit(void)
-{
- platform_driver_unregister(&vt8500_rtc_driver);
-}
-module_exit(vt8500_rtc_exit);
+module_platform_driver(vt8500_rtc_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index bdc909bd56d..3b6e6a67e76 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -324,15 +324,6 @@ static irqreturn_t wm831x_alm_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t wm831x_per_irq(int irq, void *data)
-{
- struct wm831x_rtc *wm831x_rtc = data;
-
- rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_UF);
-
- return IRQ_HANDLED;
-}
-
static const struct rtc_class_ops wm831x_rtc_ops = {
.read_time = wm831x_rtc_readtime,
.set_mmss = wm831x_rtc_set_mmss,
@@ -405,11 +396,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
int ret = 0;
- wm831x_rtc = kzalloc(sizeof(*wm831x_rtc), GFP_KERNEL);
+ wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
if (wm831x_rtc == NULL)
return -ENOMEM;
@@ -433,14 +423,6 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
- IRQF_TRIGGER_RISING, "RTC period",
- wm831x_rtc);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
- per_irq, ret);
- }
-
ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
IRQF_TRIGGER_RISING, "RTC alarm",
wm831x_rtc);
@@ -452,20 +434,16 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
return 0;
err:
- kfree(wm831x_rtc);
return ret;
}
static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
{
struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
free_irq(alm_irq, wm831x_rtc);
- free_irq(per_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
- kfree(wm831x_rtc);
return 0;
}
@@ -490,17 +468,7 @@ static struct platform_driver wm831x_rtc_driver = {
},
};
-static int __init wm831x_rtc_init(void)
-{
- return platform_driver_register(&wm831x_rtc_driver);
-}
-module_init(wm831x_rtc_init);
-
-static void __exit wm831x_rtc_exit(void)
-{
- platform_driver_unregister(&wm831x_rtc_driver);
-}
-module_exit(wm831x_rtc_exit);
+module_platform_driver(wm831x_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs");
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 66421426e40..c2e52d15abb 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -486,17 +486,7 @@ static struct platform_driver wm8350_rtc_driver = {
},
};
-static int __init wm8350_rtc_init(void)
-{
- return platform_driver_register(&wm8350_rtc_driver);
-}
-module_init(wm8350_rtc_init);
-
-static void __exit wm8350_rtc_exit(void)
-{
- platform_driver_unregister(&wm8350_rtc_driver);
-}
-module_exit(wm8350_rtc_exit);
+module_platform_driver(wm8350_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM8350");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 65894f05a80..eef27a197c0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -17,7 +17,6 @@
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
@@ -1073,7 +1072,7 @@ static const struct file_operations dasd_stats_global_fops = {
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
if (!base_dentry)
@@ -1112,7 +1111,7 @@ static void dasd_statistics_removeroot(void)
static void dasd_statistics_createroot(void)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 87a0cf160fe..0326571e7ff 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1718,7 +1718,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
erp->startdev = device;
erp->memdev = device;
erp->magic = default_erp->magic;
- erp->expires = 0;
+ erp->expires = default_erp->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
@@ -2363,7 +2363,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->memdev = device;
erp->block = cqr->block;
erp->magic = cqr->magic;
- erp->expires = 0;
+ erp->expires = cqr->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c388eda1e2b..553b3c5abb0 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -705,6 +705,16 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
+ if (unlikely(!(private->features.feature[8] & 0x01))) {
+ /*
+ * PAV enabled but prefix not, very unlikely
+ * seems to be a lost pathgroup
+ * use base device to do IO
+ */
+ DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+ "Prefix not enabled with PAV enabled\n");
+ return NULL;
+ }
spin_lock_irqsave(&lcu->lock, flags);
alias_device = group->next;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6ab29680586..bbcd5e9206e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -752,24 +752,13 @@ dasd_eckd_cdl_reclen(int recid)
return sizes_trk0[recid];
return LABEL_SIZE;
}
-
-/*
- * Generate device unique id that specifies the physical device.
- */
-static int dasd_eckd_generate_uid(struct dasd_device *device)
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
{
- struct dasd_eckd_private *private;
- struct dasd_uid *uid;
int count;
- unsigned long flags;
+ struct dasd_uid *uid;
- private = (struct dasd_eckd_private *) device->private;
- if (!private)
- return -ENODEV;
- if (!private->ned || !private->gneq)
- return -ENODEV;
uid = &private->uid;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -792,6 +781,23 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
private->vdsneq->uit[count]);
}
}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (!private)
+ return -ENODEV;
+ if (!private->ned || !private->gneq)
+ return -ENODEV;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ create_uid(private);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@@ -811,6 +817,21 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
return -EINVAL;
}
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+ struct dasd_eckd_private *private)
+{
+ struct dasd_uid device_uid;
+
+ create_uid(private);
+ dasd_eckd_get_uid(device, &device_uid);
+
+ return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
@@ -1005,59 +1026,120 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
int conf_len, conf_data_saved;
int rc;
__u8 lpm, opm;
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
+ struct dasd_uid *uid;
+ char print_path_uid[60], print_device_uid[60];
private = (struct dasd_eckd_private *) device->private;
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
- lpm = 0x80;
conf_data_saved = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
- if (lpm & opm) {
- rc = dasd_eckd_read_conf_lpm(device, &conf_data,
- &conf_len, lpm);
- if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read configuration data returned "
- "error %d", rc);
- return rc;
- }
- if (conf_data == NULL) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "No configuration data "
- "retrieved");
- /* no further analysis possible */
- path_data->opm |= lpm;
- continue; /* no error */
+ if (!(lpm & opm))
+ continue;
+ rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data returned "
+ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
+ /* no further analysis possible */
+ path_data->opm |= lpm;
+ continue; /* no error */
+ }
+ /* save first valid configuration data */
+ if (!conf_data_saved) {
+ kfree(private->conf_data);
+ private->conf_data = conf_data;
+ private->conf_len = conf_len;
+ if (dasd_eckd_identify_conf_parts(private)) {
+ private->conf_data = NULL;
+ private->conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- /* save first valid configuration data */
- if (!conf_data_saved) {
- kfree(private->conf_data);
- private->conf_data = conf_data;
- private->conf_len = conf_len;
- if (dasd_eckd_identify_conf_parts(private)) {
- private->conf_data = NULL;
- private->conf_len = 0;
- kfree(conf_data);
- continue;
- }
- conf_data_saved++;
+ /*
+ * build device UID that other path data
+ * can be compared to it
+ */
+ dasd_eckd_generate_uid(device);
+ conf_data_saved++;
+ } else {
+ path_private.conf_data = conf_data;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(
+ &path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- switch (dasd_eckd_path_access(conf_data, conf_len)) {
- case 0x02:
- path_data->npm |= lpm;
- break;
- case 0x03:
- path_data->ppm |= lpm;
- break;
+
+ if (dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ uid = &private->uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to "
+ "the same device, path %02X leads to "
+ "device %s instead of %s\n", lpm,
+ print_path_uid, print_device_uid);
+ return -EINVAL;
}
- path_data->opm |= lpm;
- if (conf_data != private->conf_data)
- kfree(conf_data);
+
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
}
+ switch (dasd_eckd_path_access(conf_data, conf_len)) {
+ case 0x02:
+ path_data->npm |= lpm;
+ break;
+ case 0x03:
+ path_data->ppm |= lpm;
+ break;
+ }
+ path_data->opm |= lpm;
+
+ if (conf_data != private->conf_data)
+ kfree(conf_data);
}
+
return 0;
}
@@ -1090,12 +1172,61 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
return 0;
}
+static int rebuild_device_uid(struct dasd_device *device,
+ struct path_verification_work_data *data)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_path *path_data;
+ __u8 lpm, opm;
+ int rc;
+
+ rc = -ENODEV;
+ private = (struct dasd_eckd_private *) device->private;
+ path_data = &device->path_data;
+ opm = device->path_data.opm;
+
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+ continue;
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data "
+ "returned error %d", rc);
+ break;
+ }
+ memcpy(private->conf_data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ if (dasd_eckd_identify_conf_parts(private)) {
+ rc = -ENODEV;
+ } else /* first valid path is enough */
+ break;
+ }
+
+ if (!rc)
+ rc = dasd_eckd_generate_uid(device);
+
+ return rc;
+}
+
static void do_path_verification_work(struct work_struct *work)
{
struct path_verification_work_data *data;
struct dasd_device *device;
+ struct dasd_eckd_private path_private;
+ struct dasd_uid *uid;
+ __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm;
unsigned long flags;
+ char print_uid[60];
int rc;
data = container_of(work, struct path_verification_work_data, worker);
@@ -1112,64 +1243,129 @@ static void do_path_verification_work(struct work_struct *work)
ppm = 0;
epm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
- if (lpm & data->tbvpm) {
- memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
- memset(&data->cqr, 0, sizeof(data->cqr));
- data->cqr.cpaddr = &data->ccw;
- rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
- data->rcd_buffer,
- lpm);
- if (!rc) {
- switch (dasd_eckd_path_access(data->rcd_buffer,
- DASD_ECKD_RCD_DATA_SIZE)) {
- case 0x02:
- npm |= lpm;
- break;
- case 0x03:
- ppm |= lpm;
- break;
- }
- opm |= lpm;
- } else if (rc == -EOPNOTSUPP) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "path verification: No configuration "
- "data retrieved");
- opm |= lpm;
- } else if (rc == -EAGAIN) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ if (!(lpm & data->tbvpm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+ if (!rc) {
+ switch (dasd_eckd_path_access(data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE)
+ ) {
+ case 0x02:
+ npm |= lpm;
+ break;
+ case 0x03:
+ ppm |= lpm;
+ break;
+ }
+ opm |= lpm;
+ } else if (rc == -EOPNOTSUPP) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "path verification: No configuration "
+ "data retrieved");
+ opm |= lpm;
+ } else if (rc == -EAGAIN) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: device is stopped,"
" try again later");
- epm |= lpm;
- } else {
- dev_warn(&device->cdev->dev,
- "Reading device feature codes failed "
- "(rc=%d) for new path %x\n", rc, lpm);
- continue;
- }
- if (verify_fcx_max_data(device, lpm)) {
+ epm |= lpm;
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading device feature codes failed "
+ "(rc=%d) for new path %x\n", rc, lpm);
+ continue;
+ }
+ if (verify_fcx_max_data(device, lpm)) {
+ opm &= ~lpm;
+ npm &= ~lpm;
+ ppm &= ~lpm;
+ continue;
+ }
+
+ /*
+ * save conf_data for comparison after
+ * rebuild_device_uid may have changed
+ * the original data
+ */
+ memcpy(&path_rcd_buf, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ path_private.conf_data = (void *) &path_rcd_buf;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ continue;
+ }
+
+ /*
+ * compare path UID with device UID only if at least
+ * one valid path is left
+ * in other case the device UID may have changed and
+ * the first working path UID will be used as device UID
+ */
+ if (device->path_data.opm &&
+ dasd_eckd_compare_path_uid(device, &path_private)) {
+ /*
+ * the comparison was not successful
+ * rebuild the device UID with at least one
+ * known path in case a z/VM hyperswap command
+ * has changed the device
+ *
+ * after this compare again
+ *
+ * if either the rebuild or the recompare fails
+ * the path can not be used
+ */
+ if (rebuild_device_uid(device, data) ||
+ dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "The newly added channel path %02X "
+ "will not be used because it leads "
+ "to a different device %s\n",
+ lpm, print_uid);
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
+ continue;
}
}
+
+ /*
+ * There is a small chance that a path is lost again between
+ * above path verification and the following modification of
+ * the device opm mask. We could avoid that race here by using
+ * yet another path mask, but we rather deal with this unlikely
+ * situation in dasd_start_IO.
+ */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (!device->path_data.opm && opm) {
+ device->path_data.opm = opm;
+ dasd_generic_path_operational(device);
+ } else
+ device->path_data.opm |= opm;
+ device->path_data.npm |= npm;
+ device->path_data.ppm |= ppm;
+ device->path_data.tbvpm |= epm;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
- /*
- * There is a small chance that a path is lost again between
- * above path verification and the following modification of
- * the device opm mask. We could avoid that race here by using
- * yet another path mask, but we rather deal with this unlikely
- * situation in dasd_start_IO.
- */
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (!device->path_data.opm && opm) {
- device->path_data.opm = opm;
- dasd_generic_path_operational(device);
- } else
- device->path_data.opm |= opm;
- device->path_data.npm |= npm;
- device->path_data.ppm |= ppm;
- device->path_data.tbvpm |= epm;
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_put_device(device);
if (data->isglobal)
@@ -1441,11 +1637,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->default_expires = value;
}
- /* Generate device unique id */
- rc = dasd_eckd_generate_uid(device);
- if (rc)
- goto out_err1;
-
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
@@ -2206,7 +2397,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
- last_trk, cmd, startdev) == -EAGAIN) {
+ last_trk, cmd, basedev) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 98f3e4ade92..690c3338a8a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -36,7 +36,7 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 95b909ac2b7..3c03c1060be 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
@@ -31,14 +31,14 @@ static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
- struct sys_device *sysdev;
+ struct device *dev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 9e32780c317..ba2092f741d 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
-#include <linux/kobj_map.h>
#include <linux/cdev.h>
#include <linux/device.h>
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd4..a84631a7391 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e..4a1ff5c2eb8 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1..21908e67bf6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0eca..47269858ecb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b1..1b853513c89 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -520,12 +538,8 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735..ec7fb6d3b47 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c1..76253dfcc1b 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 2acc01f90a6..452989a7ec1 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -22,12 +22,9 @@
static struct kmem_cache *qdio_q_cache;
static struct kmem_cache *qdio_aob_cache;
-struct qaob *qdio_allocate_aob()
+struct qaob *qdio_allocate_aob(void)
{
- struct qaob *aob;
-
- aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
- return aob;
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(qdio_allocate_aob);
@@ -180,7 +177,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec94f049e99..96bbe9d12a7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index dd4737808e0..077b7d109fd 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -56,11 +56,6 @@
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
-#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
-#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
-
-#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
#define PCIXCC_CLEANUP_TIME (15*HZ)
@@ -265,7 +260,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or -EFAULT, -EINVAL.
*/
struct type86_fmt2_msg {
struct type86_hdr hdr;
@@ -295,19 +290,12 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
- return -EFAULT;
- replylen = CEIL4(xcRB->reply_control_blk_length) +
- CEIL4(xcRB->reply_data_length) +
- sizeof(struct type86_fmt2_msg);
- if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
- xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
- (sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_data_length));
- }
+ return -EINVAL;
+ replylen = sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+ return -EINVAL;
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
@@ -326,7 +314,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length)
- return -EFAULT;
+ return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
@@ -678,7 +666,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
break;
case PCIXCC_RESPONSE_TYPE_XCRB:
length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
+ length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
default:
@@ -1043,7 +1031,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
struct zcrypt_device *zdev;
int rc = 0;
- zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
+ zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 94f49ffa70b..8af868bab20 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -263,6 +263,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/*
* The config ops structure as defined by virtio config
*/
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.reset = kvm_reset,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
+ .bus_name = kvm_bus_name,
};
/*
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b6a6356d09b..8160591913f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,6 +63,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
#define IUCV_DBF_SETUP_PAGES 2
#define IUCV_DBF_SETUP_NR_AREAS 1
#define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
struct net_device *netdev;
struct connection_profile prof;
char userid[9];
+ char userdata[17];
};
/**
@@ -263,7 +265,7 @@ struct ll_header {
};
#define NETIUCV_HDRLEN (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX 32768
+#define NETIUCV_BUFSIZE_MAX 65537
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
#define NETIUCV_MTU_DEFAULT 9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
return test_and_set_bit(0, &priv->tbusy);
}
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
*
* @returns The printable string (static data!!)
*/
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
{
- static char tmp[9];
+ static char tmp[17];
char *p = tmp;
- memcpy(tmp, name, 8);
- tmp[8] = '\0';
- while (*p && (!isspace(*p)))
+ memcpy(tmp, name, len);
+ tmp[len] = '\0';
+ while (*p && ((p - tmp) < len) && (!isspace(*p)))
p++;
*p = '\0';
return tmp;
}
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+ static char tmp_uid[9];
+ static char tmp_udat[17];
+ static char buf[100];
+
+ if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+ tmp_uid[8] = '\0';
+ tmp_udat[16] = '\0';
+ memcpy(tmp_uid, conn->userid, 8);
+ memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_udat, conn->userdata, 16);
+ EBCASC(tmp_udat, 16);
+ memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+ sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+ return buf;
+ } else
+ return netiucv_printname(conn->userid, 8);
+}
+
/**
* States of the interface statemachine.
*/
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
+ static char tmp_user[9];
+ static char tmp_udat[17];
int rc;
- if (memcmp(iucvMagic, ipuser, 16))
- /* ipuser must match iucvMagic. */
- return -EINVAL;
rc = -EINVAL;
+ memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+ memcpy(tmp_udat, ipuser, 16);
+ EBCASC(tmp_udat, 16);
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(conn, &iucv_connection_list, list) {
- if (strncmp(ipvmid, conn->userid, 8))
+ if (strncmp(ipvmid, conn->userid, 8) ||
+ strncmp(ipuser, conn->userdata, 16))
continue;
/* Found a matching connection for this path. */
conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
rc = 0;
}
+ IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+ tmp_user, netiucv_printname(tmp_udat, 16));
read_unlock_bh(&iucv_connection_rwlock);
return rc;
}
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
path->flags = 0;
- rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+ rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
+ iucv_path_sever(conn->path, conn->userdata);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
- dev_info(privptr->dev, "The peer interface of the IUCV device"
- " has closed the connection\n");
+ iucv_path_sever(conn->path, conn->userdata);
+ dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+ "connection\n", netiucv_printuser(conn));
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
- netdev->name, conn->userid);
/*
* We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+ netdev->name, netiucv_printuser(conn));
+
rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
- NULL, iucvMagic, conn);
+ NULL, conn->userdata, conn);
switch (rc) {
case 0:
netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
case 11:
dev_warn(privptr->dev,
"The IUCV device failed to connect to z/VM guest %s\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
dev_warn(privptr->dev,
"The IUCV device failed to connect to the peer on z/VM"
- " guest %s\n", netiucv_printname(conn->userid));
+ " guest %s\n", netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
dev_err(privptr->dev,
"z/VM guest %s has too many IUCV connections"
" to connect with the IUCV device\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(privptr->dev,
"The IUCV device has been connected"
- " successfully to %s\n", privptr->conn->userid);
+ " successfully to %s\n",
+ netiucv_printuser(privptr->conn));
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+ return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
}
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+ char *userdata)
{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->conn->netdev;
- char *p;
- char *tmp;
- char username[9];
- int i;
- struct iucv_connection *cp;
+ const char *p;
+ int i;
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (count > 9) {
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int) count);
+ p = strchr(buf, '.');
+ if ((p && ((count > 26) ||
+ ((p - buf) > 8) ||
+ (buf + count - p > 18))) ||
+ (!p && (count > 9))) {
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
- tmp = strsep((char **) &buf, "\n");
- for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$')) {
- username[i]= toupper(*p);
+ for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
continue;
}
- if (*p == '\n') {
+ if (*p == '\n')
/* trailing lf, grr */
break;
- }
IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n", *p);
+ "conn_write: invalid character %02x\n", *p);
return -EINVAL;
}
while (i < 8)
username[i++] = ' ';
username[8] = '\0';
+ if (*p == '.') {
+ p++;
+ for (i = 0; i < 16 && *p; i++, p++) {
+ if (*p == '\n')
+ break;
+ userdata[i] = toupper(*p);
+ }
+ while (i > 0 && i < 16)
+ userdata[i++] = ' ';
+ } else
+ memcpy(userdata, iucvMagic_ascii, 16);
+ userdata[16] = '\0';
+ ASCEBC(userdata, 16);
+
+ return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
if (memcmp(username, priv->conn->userid, 9) &&
(ndev->flags & (IFF_UP | IFF_RUNNING))) {
/* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
}
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
+ memcpy(priv->conn->userdata, userdata, 17);
return count;
}
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+ *e);
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
* Add it to the list of netiucv connections;
*/
static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
- char *username)
+ char *username,
+ char *userdata)
{
struct iucv_connection *conn;
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
fsm_settimer(conn->fsm, &conn->timer);
fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+ if (userdata)
+ memcpy(conn->userdata, userdata, 17);
if (username) {
memcpy(conn->userid, username, 9);
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
+
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
fsm_deltimer(&conn->timer);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
/**
* Allocate and initialize everything of a net device.
*/
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
{
struct netiucv_priv *privptr;
struct net_device *dev;
@@ -2004,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
if (!privptr->fsm)
goto out_netdev;
- privptr->conn = netiucv_new_connection(dev, username);
+ privptr->conn = netiucv_new_connection(dev, username, userdata);
if (!privptr->conn) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
goto out_fsm;
@@ -2022,47 +2090,31 @@ out_netdev:
static ssize_t conn_write(struct device_driver *drv,
const char *buf, size_t count)
{
- const char *p;
char username[9];
- int i, rc;
+ char userdata[17];
+ int rc;
struct net_device *dev;
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
- if (count>9) {
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
- return -EINVAL;
- }
-
- for (i = 0, p = buf; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || *p == '$') {
- username[i] = toupper(*p);
- continue;
- }
- if (*p == '\n')
- /* trailing lf, grr */
- break;
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
- return -EINVAL;
- }
- while (i < 8)
- username[i++] = ' ';
- username[8] = '\0';
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9)) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17)) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
- dev = netiucv_init_netdevice(username);
+ dev = netiucv_init_netdevice(username, userdata);
if (!dev) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
@@ -2083,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
if (rc)
goto out_unreg;
- dev_info(priv->dev, "The IUCV interface to %s has been"
- " established successfully\n", netiucv_printname(username));
+ dev_info(priv->dev, "The IUCV interface to %s has been established "
+ "successfully\n",
+ netiucv_printuser(priv->conn));
return count;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fff57de7894..9c3f38da4c0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
{
+ if (q->card->options.cq != QETH_CQ_ENABLED)
+ return;
+
if (q->bufs[bidx]->next_pending != NULL) {
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
}
+ if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED)) {
+ /* for recovery situations */
+ q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+ qeth_init_qdio_out_buf(q, bidx);
+ QETH_CARD_TEXT(q->card, 2, "clprecov");
+ }
}
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
notification = TX_NOTIFY_OK;
} else {
BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -1113,11 +1123,25 @@ out:
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
+ struct iucv_sock *iucv;
+ int notify_general_error = 0;
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ notify_general_error = 1;
+
+ /* release may never happen from within CQ tasklet scope */
+ BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
skb = skb_dequeue(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT(buf->q->card, 5, "skbr");
QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+ if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+ }
+ }
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
@@ -1160,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, free);
+ qeth_cleanup_handled_pending(q, j, 1);
qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1207,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_cq(card);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+ dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
@@ -1329,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
static void qeth_start_kernel_thread(struct work_struct *work)
{
+ struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1336,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
- if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
- kthread_run(card->discipline.recover, (void *) card,
+ if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+ ts = kthread_run(card->discipline.recover, (void *)card,
"qeth_recover");
+ if (IS_ERR(ts)) {
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card,
+ QETH_RECOVER_THREAD);
+ }
+ }
}
static int qeth_setup_card(struct qeth_card *card)
@@ -4521,7 +4552,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
- init_data.queue_start_poll = queue_start_poll;
+ init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a21ae3d549d..c1296713311 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
spin_unlock_bh(&card->vlanlock);
}
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
- return;
+ return 0;
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "aidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "aidREC");
- return;
+ return 0;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
+ } else {
+ return -ENOMEM;
}
+ return 0;
}
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "kidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
+ return 0;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4d5307ddbe5..9648e4e6833 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
qeth_l3_free_vlan_addresses6(card, vid);
}
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
set_bit(vid, card->active_vlans);
- return;
+ return 0;
}
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
+ return 0;
}
static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n) {
cast_type = n->type;
rcu_read_unlock();
@@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (ipv == 4) {
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3209,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -3223,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
u32 changed = dev->features ^ features;
@@ -3489,14 +3492,13 @@ contin:
else
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
+ rtnl_lock();
if (recovery_mode)
__qeth_l3_open(card->dev);
- else {
- rtnl_lock();
+ else
dev_open(card->dev);
- rtnl_unlock();
- }
qeth_l3_set_multicast_list(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3542,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->info.hwtrap = 1;
}
qeth_l3_stop_card(card, recovery_mode);
+ if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+ rtnl_unlock();
+ }
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3596,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f88822..b79576b64f4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491..54266829290 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66..4b9939726c3 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154..339fd6f65ed 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaa..826157f3869 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26..0b31658ccde 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 8a0b3303317..0bd38da4ada 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -650,6 +650,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
+ kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index e4a77872030..2e3117aa382 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -1,5 +1,5 @@
/*
- * Aic7xxx SCSI host adapter firmware asssembler
+ * Aic7xxx SCSI host adapter firmware assembler
*
* Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
* Copyright (c) 2001, 2002 Adaptec Inc.
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8b002f6db6c..33c8f09c7ac 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -733,7 +733,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
-mode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t be2iscsi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 4a1f2e393f3..5c45be13450 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,7 +26,7 @@
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
-mode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t be2iscsi_attr_is_visible(int param_type, int param);
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 379c696dac1..375756fa95c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,9 +325,9 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
}
-static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
@@ -348,9 +348,9 @@ static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
return rc;
}
-static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
@@ -364,9 +364,9 @@ static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
}
-static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
struct be_status_bhs *sts_bhs =
(struct be_status_bhs *)io_task->cmd_bhs;
struct iscsi_conn *conn = beiscsi_conn->conn;
- unsigned int sense_len;
unsigned char *sense;
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
}
if (status == SAM_STAT_CHECK_CONDITION) {
+ u16 sense_len;
unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
sense = sts_bhs->sense_info + sizeof(unsigned short);
- sense_len = cpu_to_be16(*slen);
+ sense_len = be16_to_cpu(*slen);
memcpy(task->sc->sense_buffer, sense,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 7b3d235d20b..b5a1595cc0a 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -902,7 +902,7 @@ struct sfp_mem_s {
union sfp_xcvr_e10g_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 e10g_unall:1; /* 10G Ethernet compliance */
u8 e10g_lrm:1;
u8 e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
union sfp_xcvr_fc3_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 rsv4:1;
u8 mb800:1; /* 800 Mbytes/sec */
u8 mb1600:1; /* 1600 Mbytes/sec */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 863c6ba7d5e..78963be2c4f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,22 +34,22 @@
struct bfa_iocfc_intr_attr_s {
u8 coalesce; /* enable/disable coalescing */
u8 rsvd[3];
- __be16 latency; /* latency in microseconds */
- __be16 delay; /* delay in microseconds */
+ __be16 latency; /* latency in microseconds */
+ __be16 delay; /* delay in microseconds */
};
/*
* IOC firmware configuraton
*/
struct bfa_iocfc_fwcfg_s {
- u16 num_fabrics; /* number of fabrics */
- u16 num_lports; /* number of local lports */
- u16 num_rports; /* number of remote ports */
- u16 num_ioim_reqs; /* number of IO reqs */
- u16 num_tskim_reqs; /* task management requests */
- u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
- u16 num_fcxp_reqs; /* unassisted FC exchanges */
- u16 num_uf_bufs; /* unsolicited recv buffers */
+ u16 num_fabrics; /* number of fabrics */
+ u16 num_lports; /* number of local lports */
+ u16 num_rports; /* number of remote ports */
+ u16 num_ioim_reqs; /* number of IO reqs */
+ u16 num_tskim_reqs; /* task management requests */
+ u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
+ u16 num_fcxp_reqs; /* unassisted FC exchanges */
+ u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
u8 rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
#pragma pack()
struct bfa_iocfc_drvcfg_s {
- u16 num_reqq_elems; /* number of req queue elements */
- u16 num_rspq_elems; /* number of rsp queue elements */
- u16 num_sgpgs; /* number of total SG pages */
- u16 num_sboot_tgts; /* number of SAN boot targets */
- u16 num_sboot_luns; /* number of SAN boot luns */
- u16 ioc_recover; /* IOC recovery mode */
- u16 min_cfg; /* minimum configuration */
- u16 path_tov; /* device path timeout */
- u16 num_tio_reqs; /*!< number of TM IO reqs */
+ u16 num_reqq_elems; /* number of req queue elements */
+ u16 num_rspq_elems; /* number of rsp queue elements */
+ u16 num_sgpgs; /* number of total SG pages */
+ u16 num_sboot_tgts; /* number of SAN boot targets */
+ u16 num_sboot_luns; /* number of SAN boot luns */
+ u16 ioc_recover; /* IOC recovery mode */
+ u16 min_cfg; /* minimum configuration */
+ u16 path_tov; /* device path timeout */
+ u16 num_tio_reqs; /* number of TM IO reqs */
u8 port_mode;
u8 rsvd_a;
- bfa_boolean_t delay_comp; /* delay completion of
- failed inflight IOs */
+ bfa_boolean_t delay_comp; /* delay completion of failed
+ * inflight IOs */
u16 num_ttsk_reqs; /* TM task management requests */
u32 rsvd;
};
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
u32 fw_frm_drop; /* f/w drop the frame */
u32 rec_timeout; /* FW rec timed out */
- u32 error_rec; /* FW sending rec on
- * an error condition*/
+ u32 error_rec; /* FW sending rec on
+ * an error condition*/
u32 wait_for_si; /* FW wait for SI */
u32 rec_rsp_inval; /* REC rsp invalid */
u32 seqr_io_abort; /* target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
u32 unexp_fcp_rsp; /* fcp response in wrong state */
u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
- u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
+ u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
- u32 fcp_rsp_resid_inval; /* invalid residue */
+ u32 fcp_rsp_resid_inval; /* invalid residue */
u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
u32 ioh_hit_class2_event; /* IOH hit class2 */
u32 ioh_miss_other_event; /* IOH miss other */
u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
- u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
- * bytes xfered */
+ u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
+ * bytes xfered */
u32 ioh_seq_len_err_event; /* IOH seq len error */
u32 ioh_data_oor_event; /* Data out of range */
u32 ioh_ro_ooo_event; /* Relative offset out of range */
u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
u32 ioh_unexp_frame_event; /* unexpected frame received
- * count */
+ * count */
u32 ioh_err_int; /* IOH error int during data-phase
- * for scsi write
- */
+ * for scsi write */
};
struct bfa_fw_tio_stats_s {
- u32 tio_conf_proc; /* TIO CONF processed */
+ u32 tio_conf_proc; /* TIO CONF processed */
u32 tio_conf_drop; /* TIO CONF dropped */
u32 tio_cleanup_req; /* TIO cleanup requested */
u32 tio_cleanup_comp; /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
u32 tio_abts_req; /* TIO ABTS requested */
u32 tio_abts_ack; /* TIO ABTS ack-ed */
- u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+ u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
u32 tio_abts_tmo; /* TIO ABTS timeout */
u32 tio_snsdata_dma; /* TIO sense data DMA */
- u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
- u32 tio_rxwchan_avail; /* TIO RX wait channel available */
+ u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+ u32 tio_rxwchan_avail; /* TIO RX wait channel available */
u32 tio_hit_bls; /* TIO IOH BLS event */
u32 tio_uf_recv; /* TIO received UF */
- u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
- u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+ u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+ u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */
- u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
- u32 ds_rxwchan_avail; /* DS RX wait channel available */
+ u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
+ u32 ds_rxwchan_avail; /* DS RX wait channel available */
u32 ds_unaligned_rd; /* DS unaligned read */
- u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
- u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+ u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+ * machine */
+ u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+ * machine */
u32 ds_flush_req; /* DS flush requested */
u32 ds_flush_comp; /* DS flush completed */
u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
u32 ds_seq_cnt_err; /* DS seq cnt error */
u32 ds_seq_len_err; /* DS seq len error */
u32 ds_data_oor; /* DS data out of order */
- u32 ds_hit_bls; /* DS hit BLS */
+ u32 ds_hit_bls; /* DS hit BLS */
u32 ds_edtov_timer_exp; /* DS edtov expired */
u32 ds_cpu_owned; /* DS cpu owned */
u32 ds_hit_class2; /* DS hit class2 */
u32 ds_length_err; /* DS length error */
u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
- u32 ds_rectov_timer_exp; /* DS rectov expired */
+ u32 ds_rectov_timer_exp;/* DS rectov expired */
u32 ds_unexp_fr_err; /* DS unexp frame error */
};
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
*/
struct bfa_fw_port_fpg_stats_s {
- u32 intr_evt;
- u32 intr;
- u32 intr_excess;
- u32 intr_cause0;
- u32 intr_other;
- u32 intr_other_ign;
- u32 sig_lost;
- u32 sig_regained;
- u32 sync_lost;
- u32 sync_to;
- u32 sync_regained;
- u32 div2_overflow;
- u32 div2_underflow;
- u32 efifo_overflow;
- u32 efifo_underflow;
- u32 idle_rx;
- u32 lrr_rx;
- u32 lr_rx;
- u32 ols_rx;
- u32 nos_rx;
- u32 lip_rx;
- u32 arbf0_rx;
- u32 arb_rx;
- u32 mrk_rx;
- u32 const_mrk_rx;
- u32 prim_unknown;
+ u32 intr_evt;
+ u32 intr;
+ u32 intr_excess;
+ u32 intr_cause0;
+ u32 intr_other;
+ u32 intr_other_ign;
+ u32 sig_lost;
+ u32 sig_regained;
+ u32 sync_lost;
+ u32 sync_to;
+ u32 sync_regained;
+ u32 div2_overflow;
+ u32 div2_underflow;
+ u32 efifo_overflow;
+ u32 efifo_underflow;
+ u32 idle_rx;
+ u32 lrr_rx;
+ u32 lr_rx;
+ u32 ols_rx;
+ u32 nos_rx;
+ u32 lip_rx;
+ u32 arbf0_rx;
+ u32 arb_rx;
+ u32 mrk_rx;
+ u32 const_mrk_rx;
+ u32 prim_unknown;
};
struct bfa_fw_port_lksm_stats_s {
- u32 hwsm_success; /* hwsm state machine success */
- u32 hwsm_fails; /* hwsm fails */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_fails; /* swsm fails */
- u32 swsm_wdtov; /* swsm timed out */
- u32 busybufs; /* link init failed due to busybuf */
- u32 buf_waits; /* bufwait state entries */
- u32 link_fails; /* link failures */
- u32 psp_errors; /* primitive sequence protocol errors */
- u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
- u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
- u32 lr_tx; /* No. of times LR tx started */
- u32 lrr_tx; /* No. of times LRR tx started */
- u32 ols_tx; /* No. of times OLS tx started */
- u32 nos_tx; /* No. of times NOS tx started */
- u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
- u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
- u32 bbsc_lr; /* LKSM LR tx for credit recovery */
+ u32 hwsm_success; /* hwsm state machine success */
+ u32 hwsm_fails; /* hwsm fails */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_fails; /* swsm fails */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 busybufs; /* link init failed due to busybuf */
+ u32 buf_waits; /* bufwait state entries */
+ u32 link_fails; /* link failures */
+ u32 psp_errors; /* primitive sequence protocol errors */
+ u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
+ u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
+ u32 lr_tx; /* No. of times LR tx started */
+ u32 lrr_tx; /* No. of times LRR tx started */
+ u32 ols_tx; /* No. of times OLS tx started */
+ u32 nos_tx; /* No. of times NOS tx started */
+ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
+ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
+ u32 bbsc_lr; /* LKSM LR tx for credit recovery */
};
struct bfa_fw_port_snsm_stats_s {
- u32 hwsm_success; /* Successful hwsm terminations */
- u32 hwsm_fails; /* hwsm fail count */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_wdtov; /* swsm timed out */
- u32 error_resets; /* error resets initiated by upsm */
- u32 sync_lost; /* Sync loss count */
- u32 sig_lost; /* Signal loss count */
- u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
+ u32 hwsm_success; /* Successful hwsm terminations */
+ u32 hwsm_fails; /* hwsm fail count */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 error_resets; /* error resets initiated by upsm */
+ u32 sync_lost; /* Sync loss count */
+ u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
};
struct bfa_fw_port_physm_stats_s {
- u32 module_inserts; /* Module insert count */
- u32 module_xtracts; /* Module extracts count */
- u32 module_invalids; /* Invalid module inserted count */
- u32 module_read_ign; /* Module validation status ignored */
- u32 laser_faults; /* Laser fault count */
- u32 rsvd;
+ u32 module_inserts; /* Module insert count */
+ u32 module_xtracts; /* Module extracts count */
+ u32 module_invalids; /* Invalid module inserted count */
+ u32 module_read_ign; /* Module validation status ignored */
+ u32 laser_faults; /* Laser fault count */
+ u32 rsvd;
};
struct bfa_fw_fip_stats_s {
- u32 vlan_req; /* vlan discovery requests */
- u32 vlan_notify; /* vlan notifications */
- u32 vlan_err; /* vlan response error */
- u32 vlan_timeouts; /* vlan disvoery timeouts */
- u32 vlan_invalids; /* invalid vlan in discovery advert. */
- u32 disc_req; /* Discovery solicit requests */
- u32 disc_rsp; /* Discovery solicit response */
- u32 disc_err; /* Discovery advt. parse errors */
- u32 disc_unsol; /* Discovery unsolicited */
- u32 disc_timeouts; /* Discovery timeouts */
- u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
- u32 linksvc_unsupp; /* Unsupported link service req */
- u32 linksvc_err; /* Parse error in link service req */
- u32 logo_req; /* FIP logos received */
- u32 clrvlink_req; /* Clear virtual link req */
- u32 op_unsupp; /* Unsupported FIP operation */
- u32 untagged; /* Untagged frames (ignored) */
- u32 invalid_version; /* Invalid FIP version */
+ u32 vlan_req; /* vlan discovery requests */
+ u32 vlan_notify; /* vlan notifications */
+ u32 vlan_err; /* vlan response error */
+ u32 vlan_timeouts; /* vlan disvoery timeouts */
+ u32 vlan_invalids; /* invalid vlan in discovery advert. */
+ u32 disc_req; /* Discovery solicit requests */
+ u32 disc_rsp; /* Discovery solicit response */
+ u32 disc_err; /* Discovery advt. parse errors */
+ u32 disc_unsol; /* Discovery unsolicited */
+ u32 disc_timeouts; /* Discovery timeouts */
+ u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
+ u32 linksvc_unsupp; /* Unsupported link service req */
+ u32 linksvc_err; /* Parse error in link service req */
+ u32 logo_req; /* FIP logos received */
+ u32 clrvlink_req; /* Clear virtual link req */
+ u32 op_unsupp; /* Unsupported FIP operation */
+ u32 untagged; /* Untagged frames (ignored) */
+ u32 invalid_version; /* Invalid FIP version */
};
struct bfa_fw_lps_stats_s {
- u32 mac_invalids; /* Invalid mac assigned */
- u32 rsvd;
+ u32 mac_invalids; /* Invalid mac assigned */
+ u32 rsvd;
};
struct bfa_fw_fcoe_stats_s {
- u32 cee_linkups; /* CEE link up count */
- u32 cee_linkdns; /* CEE link down count */
- u32 fip_linkups; /* FIP link up count */
- u32 fip_linkdns; /* FIP link up count */
- u32 fip_fails; /* FIP fail count */
- u32 mac_invalids; /* Invalid mac assigned */
+ u32 cee_linkups; /* CEE link up count */
+ u32 cee_linkdns; /* CEE link down count */
+ u32 fip_linkups; /* FIP link up count */
+ u32 fip_linkdns; /* FIP link up count */
+ u32 fip_fails; /* FIP fail count */
+ u32 mac_invalids; /* Invalid mac assigned */
};
/*
* IOC firmware FCoE port stats
*/
struct bfa_fw_fcoe_port_stats_s {
- struct bfa_fw_fcoe_stats_s fcoe_stats;
- struct bfa_fw_fip_stats_s fip_stats;
+ struct bfa_fw_fcoe_stats_s fcoe_stats;
+ struct bfa_fw_fip_stats_s fip_stats;
};
/*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
* IOC firmware FC port stats
*/
union bfa_fw_fc_port_stats_s {
- struct bfa_fw_fc_uport_stats_s fc_stats;
- struct bfa_fw_fcoe_port_stats_s fcoe_stats;
+ struct bfa_fw_fc_uport_stats_s fc_stats;
+ struct bfa_fw_fcoe_port_stats_s fcoe_stats;
};
/*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
*/
struct bfa_fw_trunk_stats_s {
u32 emt_recvd; /* Trunk EMT received */
- u32 emt_accepted; /* Trunk EMT Accepted */
- u32 emt_rejected; /* Trunk EMT rejected */
+ u32 emt_accepted; /* Trunk EMT Accepted */
+ u32 emt_rejected; /* Trunk EMT rejected */
u32 etp_recvd; /* Trunk ETP received */
- u32 etp_accepted; /* Trunk ETP Accepted */
- u32 etp_rejected; /* Trunk ETP rejected */
+ u32 etp_accepted; /* Trunk ETP Accepted */
+ u32 etp_rejected; /* Trunk ETP rejected */
u32 lr_recvd; /* Trunk LR received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 rsvd; /* padding for 64 bit alignment */
};
struct bfa_fw_advsm_stats_s {
u32 flogi_sent; /* Flogi sent */
u32 flogi_acc_recvd; /* Flogi Acc received */
u32 flogi_rjt_recvd; /* Flogi rejects received */
- u32 flogi_retries; /* Flogi retries */
+ u32 flogi_retries; /* Flogi retries */
u32 elp_recvd; /* ELP received */
- u32 elp_accepted; /* ELP Accepted */
- u32 elp_rejected; /* ELP rejected */
- u32 elp_dropped; /* ELP dropped */
+ u32 elp_accepted; /* ELP Accepted */
+ u32 elp_rejected; /* ELP rejected */
+ u32 elp_dropped; /* ELP dropped */
};
/*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
u16 total_vc_count; /* Total VC Count */
u16 shared_credit;
u32 elp_opmode_flags;
- struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
+ struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
* total_vc_count */
};
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
struct bfa_qos_stats_s {
u32 flogi_sent; /* QoS Flogi sent */
u32 flogi_acc_recvd; /* QoS Flogi Acc received */
- u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
+ u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
u32 flogi_retries; /* QoS Flogi retries */
u32 elp_recvd; /* QoS ELP received */
u32 elp_accepted; /* QoS ELP Accepted */
- u32 elp_rejected; /* QoS ELP rejected */
- u32 elp_dropped; /* QoS ELP dropped */
+ u32 elp_rejected; /* QoS ELP rejected */
+ u32 elp_dropped; /* QoS ELP dropped */
- u32 qos_rscn_recvd; /* QoS RSCN received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 qos_rscn_recvd; /* QoS RSCN received */
+ u32 rsvd; /* padding for 64 bit alignment */
};
/*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
*/
struct bfa_fcoe_stats_s {
u64 secs_reset; /* Seconds since stats reset */
- u64 cee_linkups; /* CEE link up */
+ u64 cee_linkups; /* CEE link up */
u64 cee_linkdns; /* CEE link down */
- u64 fip_linkups; /* FIP link up */
+ u64 fip_linkups; /* FIP link up */
u64 fip_linkdns; /* FIP link down */
u64 fip_fails; /* FIP failures */
u64 mac_invalids; /* Invalid mac assignments */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
u64 vlan_timeouts; /* Vlan request timeouts */
u64 vlan_invalids; /* Vlan invalids */
u64 disc_req; /* Discovery requests */
- u64 disc_rsp; /* Discovery responses */
+ u64 disc_rsp; /* Discovery responses */
u64 disc_err; /* Discovery error frames */
u64 disc_unsol; /* Discovery unsolicited */
u64 disc_timeouts; /* Discovery timeouts */
u64 disc_fcf_unavail; /* Discovery FCF not avail */
- u64 linksvc_unsupp; /* FIP link service req unsupp. */
- u64 linksvc_err; /* FIP link service req errors */
+ u64 linksvc_unsupp; /* FIP link service req unsupp */
+ u64 linksvc_err; /* FIP link service req errors */
u64 logo_req; /* FIP logos received */
- u64 clrvlink_req; /* Clear virtual link requests */
+ u64 clrvlink_req; /* Clear virtual link requests */
u64 op_unsupp; /* FIP operation unsupp. */
- u64 untagged; /* FIP untagged frames */
+ u64 untagged; /* FIP untagged frames */
u64 txf_ucast; /* Tx FCoE unicast frames */
- u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
+ u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
u64 txf_ucast_octets; /* Tx FCoE unicast octets */
u64 txf_mcast; /* Tx FCoE multicast frames */
- u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
+ u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
u64 txf_mcast_octets; /* Tx FCoE multicast octets */
u64 txf_bcast; /* Tx FCoE broadcast frames */
- u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
+ u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
- u64 txf_timeout; /* Tx timeouts */
+ u64 txf_timeout; /* Tx timeouts */
u64 txf_parity_errors; /* Transmit parity err */
- u64 txf_fid_parity_errors; /* Transmit FID parity err */
+ u64 txf_fid_parity_errors; /* Transmit FID parity err */
u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
u64 rxf_ucast; /* Rx FCoE unicast frames */
- u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
+ u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
u64 rxf_mcast; /* Rx FCoE multicast frames */
- u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
+ u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
u64 rxf_bcast; /* Rx FCoE broadcast frames */
- u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
+ u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
};
/*
@@ -852,12 +853,12 @@ struct bfa_port_cfg_s {
u8 tx_bbcredit; /* transmit buffer credits */
u8 ratelimit; /* ratelimit enabled or not */
u8 trl_def_speed; /* ratelimit default speed */
- u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
- u8 bb_scn_state; /* Config state of BB_SCN */
- u8 faa_state; /* FAA enabled/disabled */
- u8 rsvd[1];
- u16 path_tov; /* device path timeout */
- u16 q_depth; /* SCSI Queue depth */
+ u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
+ u8 bb_scn_state; /* Config state of BB_SCN */
+ u8 faa_state; /* FAA enabled/disabled */
+ u8 rsvd[1];
+ u16 path_tov; /* device path timeout */
+ u16 q_depth; /* SCSI Queue depth */
};
#pragma pack()
@@ -868,20 +869,21 @@ struct bfa_port_attr_s {
/*
* Static fields
*/
- wwn_t nwwn; /* node wwn */
- wwn_t pwwn; /* port wwn */
- wwn_t factorynwwn; /* factory node wwn */
- wwn_t factorypwwn; /* factory port wwn */
- enum fc_cos cos_supported; /* supported class of services */
- u32 rsvd;
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ wwn_t factorynwwn; /* factory node wwn */
+ wwn_t factorypwwn; /* factory port wwn */
+ enum fc_cos cos_supported; /* supported class of
+ * services */
+ u32 rsvd;
struct fc_symname_s port_symname; /* port symbolic name */
- enum bfa_port_speed speed_supported; /* supported speeds */
- bfa_boolean_t pbind_enabled;
+ enum bfa_port_speed speed_supported; /* supported speeds */
+ bfa_boolean_t pbind_enabled;
/*
* Configured values
*/
- struct bfa_port_cfg_s pport_cfg; /* pport cfg */
+ struct bfa_port_cfg_s pport_cfg; /* pport cfg */
/*
* Dynamic field - info from BFA
@@ -890,19 +892,20 @@ struct bfa_port_attr_s {
enum bfa_port_speed speed; /* current speed */
enum bfa_port_topology topology; /* current topology */
bfa_boolean_t beacon; /* current beacon status */
- bfa_boolean_t link_e2e_beacon; /* link beacon is on */
- bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */
+ bfa_boolean_t link_e2e_beacon; /* link beacon is on */
+ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper
+ * state */
/*
* Dynamic field - info from FCS
*/
- u32 pid; /* port ID */
+ u32 pid; /* port ID */
enum bfa_port_type port_type; /* current topology */
- u32 loopback; /* external loopback */
- u32 authfail; /* auth fail state */
+ u32 loopback; /* external loopback */
+ u32 authfail; /* auth fail state */
/* FCoE specific */
- u16 fcoe_vlan;
+ u16 fcoe_vlan;
u8 rsvd1[2];
};
@@ -910,48 +913,48 @@ struct bfa_port_attr_s {
* Port FCP mappings.
*/
struct bfa_port_fcpmap_s {
- char osdevname[256];
+ char osdevname[256];
u32 bus;
u32 target;
u32 oslun;
u32 fcid;
- wwn_t nwwn;
- wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t pwwn;
u64 fcplun;
- char luid[256];
+ char luid[256];
};
/*
* Port RNID info.
*/
struct bfa_port_rnid_s {
- wwn_t wwn;
+ wwn_t wwn;
u32 unittype;
u32 portid;
u32 attached_nodes_num;
u16 ip_version;
u16 udp_port;
- u8 ipaddr[16];
+ u8 ipaddr[16];
u16 rsvd;
u16 topologydiscoveryflags;
};
#pragma pack(1)
struct bfa_fcport_fcf_s {
- wwn_t name; /* FCF name */
- wwn_t fabric_name; /* Fabric Name */
- u8 fipenabled; /* FIP enabled or not */
- u8 fipfailed; /* FIP failed or not */
- u8 resv[2];
- u8 pri; /* FCF priority */
- u8 version; /* FIP version used */
- u8 available; /* Available for login */
- u8 fka_disabled; /* FKA is disabled */
- u8 maxsz_verified; /* FCoE max size verified */
- u8 fc_map[3]; /* FC map */
- __be16 vlan; /* FCoE vlan tag/priority */
- u32 fka_adv_per; /* FIP ka advert. period */
- mac_t mac; /* FCF mac */
+ wwn_t name; /* FCF name */
+ wwn_t fabric_name; /* Fabric Name */
+ u8 fipenabled; /* FIP enabled or not */
+ u8 fipfailed; /* FIP failed or not */
+ u8 resv[2];
+ u8 pri; /* FCF priority */
+ u8 version; /* FIP version used */
+ u8 available; /* Available for login */
+ u8 fka_disabled; /* FKA is disabled */
+ u8 maxsz_verified; /* FCoE max size verified */
+ u8 fc_map[3]; /* FC map */
+ __be16 vlan; /* FCoE vlan tag/priority */
+ u32 fka_adv_per; /* FIP ka advert. period */
+ mac_t mac; /* FCF mac */
};
/*
@@ -981,7 +984,7 @@ struct bfa_port_link_s {
u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
u8 topology; /* P2P/LOOP bfa_port_topology */
u8 speed; /* Link speed (1/2/4/8 G) */
- u32 linkstate_opt; /* Linkstate optional data (debug) */
+ u32 linkstate_opt; /* Linkstate optional data (debug) */
u8 trunked; /* Trunked or not (1 or 0) */
u8 resvd[3];
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -1035,7 +1038,7 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwc_del; /* fw create: delete events */
u32 sm_fwc_off; /* fw create: offline events */
u32 sm_fwc_hwf; /* fw create: IOC down */
- u32 sm_fwc_unexp; /* fw create: exception events*/
+ u32 sm_fwc_unexp; /* fw create: exception events*/
u32 sm_on_off; /* online: offline events */
u32 sm_on_del; /* online: delete events */
u32 sm_on_hwf; /* online: IOC down events */
@@ -1043,25 +1046,25 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwd_rsp; /* fw delete: fw responses */
u32 sm_fwd_del; /* fw delete: delete events */
u32 sm_fwd_hwf; /* fw delete: IOC down events */
- u32 sm_fwd_unexp; /* fw delete: exception events*/
+ u32 sm_fwd_unexp; /* fw delete: exception events*/
u32 sm_off_del; /* offline: delete events */
u32 sm_off_on; /* offline: online events */
u32 sm_off_hwf; /* offline: IOC down events */
- u32 sm_off_unexp; /* offline: exception events */
- u32 sm_del_fwrsp; /* delete: fw responses */
+ u32 sm_off_unexp; /* offline: exception events */
+ u32 sm_del_fwrsp; /* delete: fw responses */
u32 sm_del_hwf; /* delete: IOC down events */
- u32 sm_del_unexp; /* delete: exception events */
- u32 sm_delp_fwrsp; /* delete pend: fw responses */
+ u32 sm_del_unexp; /* delete: exception events */
+ u32 sm_delp_fwrsp; /* delete pend: fw responses */
u32 sm_delp_hwf; /* delete pend: IOC downs */
- u32 sm_delp_unexp; /* delete pend: exceptions */
- u32 sm_offp_fwrsp; /* off-pending: fw responses */
+ u32 sm_delp_unexp; /* delete pend: exceptions */
+ u32 sm_offp_fwrsp; /* off-pending: fw responses */
u32 sm_offp_del; /* off-pending: deletes */
u32 sm_offp_hwf; /* off-pending: IOC downs */
- u32 sm_offp_unexp; /* off-pending: exceptions */
+ u32 sm_offp_unexp; /* off-pending: exceptions */
u32 sm_iocd_off; /* IOC down: offline events */
u32 sm_iocd_del; /* IOC down: delete events */
u32 sm_iocd_on; /* IOC down: online events */
- u32 sm_iocd_unexp; /* IOC down: exceptions */
+ u32 sm_iocd_unexp; /* IOC down: exceptions */
u32 rsvd;
};
#pragma pack(1)
@@ -1069,9 +1072,9 @@ struct bfa_rport_hal_stats_s {
* Rport's QoS attributes
*/
struct bfa_rport_qos_attr_s {
- u8 qos_priority; /* rport's QoS priority */
- u8 rsvd[3];
- u32 qos_flow_id; /* QoS flow Id */
+ u8 qos_priority; /* rport's QoS priority */
+ u8 rsvd[3];
+ u32 qos_flow_id; /* QoS flow Id */
};
#pragma pack()
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 1ac5aecf25a..eca7ab78085 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
- else if (e10g.b & 0x10)
+ else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
- else if (e10g.b & 0x60)
+ else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
- else if (e10g.r.e10g_unall & 0x80)
+ else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index dee1a094c2c..439c012be76 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -472,7 +472,7 @@ static const struct file_operations bfad_debugfs_op_regwr = {
struct bfad_debugfs_entry {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
};
@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
}
}
- /*
- * Remove the pci_dev debugfs directory for the port */
+ /* Remove the pci_dev debugfs directory for the port */
if (port->port_debugfs_root) {
debugfs_remove(port->port_debugfs_root);
port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1..1ad0b822556 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index d1e69719097..1a44b45e7be 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,7 +2177,7 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
return 0;
}
-static mode_t bnx2i_attr_is_visible(int param_type, int param)
+static umode_t bnx2i_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 000294a9df8..36739da8bc1 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ac7a9b1e3e2..5a4a3bfc60c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct port_info *pi = netdev_priv(ndev);
struct sk_buff *skb = NULL;
+ struct neighbour *n;
unsigned int step;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+ n = dst_get_neighbour_noref(csk->dst);
+ if (!n) {
+ pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+ goto rel_resource;
+ }
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c10f74a566f..c5360ffb4be 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
struct net_device *ndev;
struct cxgbi_device *cdev;
struct rtable *rt = NULL;
+ struct neighbour *n;
struct flowi4 fl4;
struct cxgbi_sock *csk = NULL;
unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- ndev = dst_get_neighbour(dst)->dev;
+ n = dst_get_neighbour_noref(dst);
+ if (!n) {
+ err = -ENODEV;
+ goto rel_rt;
+ }
+ ndev = n->dev;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
- dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+ n->dev->name, ndev->name, mtu);
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
@@ -2569,7 +2575,7 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
}
EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
-mode_t cxgbi_attr_is_visible(int param_type, int param)
+umode_t cxgbi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 20c88279c7a..80fa99b3d38 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,7 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *);
void cxgbi_cleanup_task(struct iscsi_task *task);
-mode_t cxgbi_attr_is_visible(int param_type, int param);
+umode_t cxgbi_attr_is_visible(int param_type, int param);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 23149b9e297..48e46f5b77c 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -28,7 +28,6 @@
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-static struct scsi_device_handler *get_device_handler_by_idx(int idx)
-{
- struct scsi_device_handler *tmp, *found = NULL;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (tmp->idx == idx) {
- found = tmp;
- break;
- }
- }
- spin_unlock(&list_lock);
- return found;
-}
-
/*
* device_handler_match_function - Match a device handler to a device
* @sdev - SCSI device to be tested
@@ -84,23 +68,6 @@ device_handler_match_function(struct scsi_device *sdev)
}
/*
- * device_handler_match_devlist - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against all device_handler registered in the devlist.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_devlist(struct scsi_device *sdev)
-{
- int idx;
-
- idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
- SCSI_DEVINFO_DH);
- return get_device_handler_by_idx(idx);
-}
-
-/*
* device_handler_match - Attach a device handler to a device
* @scsi_dh - The device handler to match against or NULL
* @sdev - SCSI device to be tested against @scsi_dh
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device_handler *found_dh;
found_dh = device_handler_match_function(sdev);
- if (!found_dh)
- found_dh = device_handler_match_devlist(sdev);
if (scsi_dh && found_dh != scsi_dh)
found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
- scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_add_keyed(0,
- scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- NULL,
- scsi_dh->idx,
- SCSI_DEVINFO_DH);
- }
-
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- SCSI_DEVINFO_DH);
- }
-
spin_lock(&list_lock);
list_del(&scsi_dh->list);
spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
{
int r;
- r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
- if (r)
- return r;
-
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
- scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 591186cf189..e1c8be06de9 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
{NULL, NULL},
};
+static bool clariion_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; clariion_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+ strlen(clariion_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, clariion_dev_list[i].model,
+ strlen(clariion_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int clariion_bus_attach(struct scsi_device *sdev);
static void clariion_bus_detach(struct scsi_device *sdev);
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
+ .match = clariion_match,
};
static int clariion_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0f86a18b157..084062bb8ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{NULL, NULL},
};
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+ strlen(hp_sw_dh_data_list[i].vendor)) &&
+ !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+ strlen(hp_sw_dh_data_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int hp_sw_bus_attach(struct scsi_device *sdev);
static void hp_sw_bus_detach(struct scsi_device *sdev);
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
.detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
.prep_fn = hp_sw_prep_fn,
+ .match = hp_sw_match,
};
static int hp_sw_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1d312792006..841ebf4a678 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -820,6 +820,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{NULL, NULL},
};
+static bool rdac_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; rdac_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+ strlen(rdac_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, rdac_dev_list[i].model,
+ strlen(rdac_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
@@ -832,6 +850,7 @@ static struct scsi_device_handler rdac_dh = {
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
+ .match = rdac_match,
};
static int rdac_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84..8d67467dd9c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71e..e7522dcc296 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index d969855ac64..d3e4d7c6f57 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -359,7 +359,7 @@ typedef struct {
u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_size; /* size of each cmd bufer in bytes */
+ u32 cmd_buff_size; /* size of each cmd buffer in bytes */
u32 reserved1;
u32 reserved2;
} __attribute__((packed)) gdth_perf_modes;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 865d452542b..5140f5d0fd6 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
+ 0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
+ 0x40800E11, /* Smart Array 5i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
if (h->msix_vector || h->msi_vector)
rc = request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h);
+ 0, h->devname, h);
else
rc = request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h);
+ IRQF_SHARED, h->devname, h);
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 218f71a8726..d77891e5683 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -4494,7 +4494,7 @@ ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
/* */
/* Initialize a CCB to default values */
/* */
-/* ASSUMED to be callled from within a lock */
+/* ASSUMED to be called from within a lock */
/* */
/****************************************************************************/
static ips_scb_t *
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 89700cbca16..14c1c8f6a95 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -112,7 +112,7 @@ static struct attribute *target_attrs[] = {
NULL
};
-static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -193,7 +193,7 @@ static struct attribute *ethernet_attrs[] = {
NULL
};
-static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -265,7 +265,7 @@ static struct attribute *initiator_attrs[] = {
NULL
};
-static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -306,7 +306,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
struct attribute_group *attr_group,
const char *name, int index, void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
struct iscsi_boot_kobj *boot_kobj;
@@ -369,7 +369,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
@@ -394,7 +394,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
@@ -420,7 +420,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7c34d8e7cc7..db47158e0dd 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -873,7 +873,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 08e26d4e373..27cfb0cb186 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas BogendĂśrfer (tsbogend@alpha.frankende)
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bb4c8e0584e..825f9307417 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -247,18 +247,6 @@ struct lpfc_stats {
uint32_t fcpLocalErr;
};
-enum sysfs_mbox_state {
- SMBOX_IDLE,
- SMBOX_WRITING,
- SMBOX_READING
-};
-
-struct lpfc_sysfs_mbox {
- enum sysfs_mbox_state state;
- size_t offset;
- struct lpfcMboxq * mbox;
-};
-
struct lpfc_hba;
@@ -783,8 +771,6 @@ struct lpfc_hba {
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
- struct lpfc_sysfs_mbox sysfs_mbox;
-
/* fastpath list. */
spinlock_t scsi_buf_list_lock;
struct list_head lpfc_scsi_buf_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d0ebaeb7ef6..f6697cb0e21 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ uint32_t if_type;
+ uint8_t sli_family;
char fwrev[32];
+ int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
+ if_type = phba->sli4_hba.pc_sli4_params.if_type;
+ sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+ fwrev, phba->sli_rev);
+ else
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+ fwrev, phba->sli_rev, if_type, sli_family);
+
+ return len;
}
/**
@@ -488,6 +501,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+ return snprintf(buf, PAGE_SIZE, "fcoe\n");
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
* the readyness after performing a firmware reset.
*
* Returns:
- * zero for success
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
**/
int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
+ uint32_t before_fc_flag;
+ uint32_t sriov_nr_virtfn;
uint32_t reg_val;
- int status = 0;
- int rc;
+ int status = 0, rc = 0;
+ int job_posted = 1, sriov_err;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
+ /* Keep state if we need to restore back */
+ before_fc_flag = phba->pport->fc_flag;
+ sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
/* delay driver action following IF_TYPE_2 reset */
rc = lpfc_sli4_pdev_status_reg_wait(phba);
- if (rc)
+ if (rc == -EPERM) {
+ /* no privilage for reset, restore if needed */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
+ } else if (rc == -EIO) {
+ /* reset failed, there is nothing more we can do */
return rc;
+ }
+
+ /* keep the original port state */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
wait_for_completion(&online_compl);
- if (status != 0)
- return -EIO;
+out:
+ /* in any case, restore the virtual functions enabled as before */
+ if (sriov_nr_virtfn) {
+ sriov_err =
+ lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+ if (!sriov_err)
+ phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+ }
- return 0;
+ /* return proper error code */
+ if (!rc) {
+ if (!job_posted)
+ rc = -ENOMEM;
+ else if (status)
+ rc = -EIO;
+ }
+ return rc;
}
/**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int status=0;
+ char *board_mode_str = NULL;
+ int status = 0;
int rc;
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
+ if (!phba->cfg_enable_hba_reset) {
+ status = -EACCES;
+ goto board_mode_out;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3050 lpfc_board_mode set to %s\n", buf);
+ "3050 lpfc_board_mode set to %s\n", buf);
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ if (rc == 0) {
+ status = -ENOMEM;
+ goto board_mode_out;
+ }
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
- return -EINVAL;
+ status = -EINVAL;
+board_mode_out:
if (!status)
return strlen(buf);
- else
+ else {
+ board_mode_str = strchr(buf, '\n');
+ if (board_mode_str)
+ *board_mode_str = '\0';
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3097 Failed \"%s\", status(%d), "
+ "fc_flag(x%x)\n",
+ buf, status, phba->pport->fc_flag);
return status;
+ }
}
/**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (val >= 0 && val <= 6) {
prev_val = phba->cfg_topology;
phba->cfg_topology = val;
+ if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+ val == 4) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3113 Loop mode not supported at speed %d\n",
+ phba->cfg_link_speed);
+ phba->cfg_topology = prev_val;
+ return -EINVAL;
+ }
if (nolip)
return strlen(buf);
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
+ if (val == LPFC_USER_LINK_SPEED_16G &&
+ phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3112 lpfc_link_speed attribute cannot be set "
+ "to %d. Speed is not supported in loop mode.\n",
+ val);
+ return -EINVAL;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
+ if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3111 lpfc_link_speed of %d cannot "
+ "support loop mode, setting topology to default.\n",
+ val);
+ phba->cfg_topology = 0;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
+ &dev_attr_protocol,
NULL,
};
@@ -3988,23 +4102,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
};
/**
- * sysfs_mbox_idle - frees the sysfs mailbox
- * @phba: lpfc_hba pointer
- **/
-static void
-sysfs_mbox_idle(struct lpfc_hba *phba)
-{
- phba->sysfs_mbox.state = SMBOX_IDLE;
- phba->sysfs_mbox.offset = 0;
-
- if (phba->sysfs_mbox.mbox) {
- mempool_free(phba->sysfs_mbox.mbox,
- phba->mbox_mem_pool);
- phba->sysfs_mbox.mbox = NULL;
- }
-}
-
-/**
* sysfs_mbox_write - Write method for writing information via mbox
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to send buf contents to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off and count combo out of range
- * -EINVAL off, count or buff address invalid
- * zero if count is zero
- * -EPERM adapter is offline
- * -ENOMEM failed to allocate memory for the mail box
- * -EAGAIN offset, state or mbox is NULL
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct lpfcMboxq *mbox = NULL;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (off == 0) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
- }
-
- spin_lock_irq(&phba->hbalock);
-
- if (off == 0) {
- if (phba->sysfs_mbox.mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- else
- phba->sysfs_mbox.mbox = mbox;
- phba->sysfs_mbox.state = SMBOX_WRITING;
- } else {
- if (phba->sysfs_mbox.state != SMBOX_WRITING ||
- phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.mbox == NULL) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
- }
-
- memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
- buf, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
/**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to receive data from to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off greater than mailbox command size
- * -EINVAL off, count or buff address invalid
- * zero if off and count are zero
- * -EACCES adapter over temp
- * -EPERM garbage can value to catch a multitude of errors
- * -EAGAIN management IO not permitted, state or off error
- * -ETIME mailbox timeout
- * -ENODEV mailbox error
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq;
- MAILBOX_t *pmb;
- uint32_t mbox_tmo;
- int rc;
-
- if (off > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- count = MAILBOX_CMD_SIZE - off;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (off && count == 0)
- return 0;
-
- spin_lock_irq(&phba->hbalock);
-
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
-
- if (off == 0 &&
- phba->sysfs_mbox.state == SMBOX_WRITING &&
- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
- mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
- pmb = &mboxq->u.mb;
- switch (pmb->mbxCommand) {
- /* Offline only */
- case MBX_INIT_LINK:
- case MBX_DOWN_LINK:
- case MBX_CONFIG_LINK:
- case MBX_CONFIG_RING:
- case MBX_RESET_RING:
- case MBX_UNREG_LOGIN:
- case MBX_CLEAR_LA:
- case MBX_DUMP_CONTEXT:
- case MBX_RUN_DIAGS:
- case MBX_RESTART:
- case MBX_SET_MASK:
- case MBX_SET_DEBUG:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is illegal in on-line state\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- case MBX_WRITE_NV:
- case MBX_WRITE_VPARMS:
- case MBX_LOAD_SM:
- case MBX_READ_NV:
- case MBX_READ_CONFIG:
- case MBX_READ_RCONFIG:
- case MBX_READ_STATUS:
- case MBX_READ_XRI:
- case MBX_READ_REV:
- case MBX_READ_LNK_STAT:
- case MBX_DUMP_MEMORY:
- case MBX_DOWN_LOAD:
- case MBX_UPDATE_CFG:
- case MBX_KILL_BOARD:
- case MBX_LOAD_AREA:
- case MBX_LOAD_EXP_ROM:
- case MBX_BEACON:
- case MBX_DEL_LD_ENTRY:
- case MBX_SET_VARIABLE:
- case MBX_WRITE_WWN:
- case MBX_PORT_CAPABILITIES:
- case MBX_PORT_IOV_CONTROL:
- break;
- case MBX_SECURITY_MGMT:
- case MBX_AUTH_PORT:
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is not permitted\n", pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- break;
- case MBX_READ_SPARM64:
- case MBX_READ_TOPOLOGY:
- case MBX_REG_LOGIN:
- case MBX_REG_LOGIN64:
- case MBX_CONFIG_PORT:
- case MBX_RUN_BIU_DIAG:
- printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- default:
- printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
-
- /* If HBA encountered an error attention, allow only DUMP
- * or RESTART mailbox commands until the HBA is restarted.
- */
- if (phba->pport->stopped &&
- pmb->mbxCommand != MBX_DUMP_MEMORY &&
- pmb->mbxCommand != MBX_RESTART &&
- pmb->mbxCommand != MBX_WRITE_VPARMS &&
- pmb->mbxCommand != MBX_WRITE_WWN)
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "1259 mbox: Issued mailbox cmd "
- "0x%x while in stopped state.\n",
- pmb->mbxCommand);
-
- phba->sysfs_mbox.mbox->vport = vport;
-
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
-
- spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli_issue_mbox (phba,
- phba->sysfs_mbox.mbox,
- MBX_POLL);
- spin_lock_irq(&phba->hbalock);
-
- } else {
- spin_unlock_irq(&phba->hbalock);
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- spin_lock_irq(&phba->hbalock);
- }
-
- if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT) {
- phba->sysfs_mbox.mbox = NULL;
- }
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
- }
- phba->sysfs_mbox.state = SMBOX_READING;
- }
- else if (phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.state != SMBOX_READING) {
- printk(KERN_WARNING "mbox_read: Bad State\n");
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- memcpy(buf, (uint8_t *) &pmb + off, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
- sysfs_mbox_idle(phba);
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
- /* Links up, beyond this port_type reports state */
- fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ /* Links up, reports port state accordingly */
+ if (vport->port_state < LPFC_VPORT_READY)
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_BYPASSED;
+ else
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_ONLINE;
break;
case LPFC_HBA_ERROR:
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6760c69f525..56a86baece5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} else {
switch (cmd) {
case ELX_LOOPBACK_DATA:
- diag_cmd_data_free(phba,
- (struct lpfc_dmabufext *)
- dmabuf);
+ if (phba->sli_rev <
+ LPFC_SLI_REV4)
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext
+ *)dmabuf);
break;
case ELX_LOOPBACK_XRI_SETUP:
if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
- if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (evt_req_id == SLI_CT_ELX_LOOPBACK))
return 0;
return 1;
}
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
- LPFC_MBOXQ_t *pmboxq;
+ LPFC_MBOXQ_t *pmboxq = NULL;
int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
rc = -ETIMEDOUT;
goto loopback_mode_exit;
}
-
msleep(10);
}
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
rc = -ENODEV;
else {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
/* wait for the link attention interrupt */
msleep(100);
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
rc = -ENOMEM;
goto link_diag_state_set_out;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+ diag, phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
if (diag)
bf_set(lpfc_mbx_set_diag_state_diag,
&link_diag_state->u.req, 1);
@@ -1727,6 +1736,79 @@ link_diag_state_set_out:
}
/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ uint32_t req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ int mbxstatus = MBX_SUCCESS, rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3127 Failed setup loopback mode mailbox "
+ "command, rc:x%x, status:x%x\n", mbxstatus,
+ pmboxq->u.mb.mbxStatus);
+ rc = -ENODEV;
+ }
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+ int rc;
+
+ if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3136 Port still had vfi registered: "
+ "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+ phba->pport->fc_myDID, phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[phba->pport->vfi],
+ phba->vpi_ids[phba->pport->vpi]);
+ return -EINVAL;
+ }
+ rc = lpfc_issue_reg_vfi(phba->pport);
+ return rc;
+}
+
+/**
* lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
* @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
@@ -1738,10 +1820,8 @@ static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct diag_mode_set *loopback_mode;
- uint32_t link_flags, timeout, req_len, alloc_len;
- struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
- LPFC_MBOXQ_t *pmboxq = NULL;
- int mbxstatus = MBX_SUCCESS, i, rc = 0;
+ uint32_t link_flags, timeout;
+ int i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if (rc)
goto job_error;
+ /* indicate we are in loobpack diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* reset port to start frome scratch */
+ rc = lpfc_selective_reset(phba);
+ if (rc)
+ goto job_error;
+
/* bring the link to diagnostic mode */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
- if (rc)
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3130 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
goto loopback_mode_exit;
+ }
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3131 Timeout waiting for link to "
+ "diagnostic mode, timeout:%d ms\n",
+ timeout * 10);
goto loopback_mode_exit;
}
msleep(10);
}
+
/* set up loopback mode */
- pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq) {
- rc = -ENOMEM;
- goto loopback_mode_exit;
- }
- req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
- req_len, LPFC_SLI4_MBX_EMBED);
- if (alloc_len != req_len) {
- rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3132 Set up loopback mode:x%x\n", link_flags);
+
+ if (link_flags == INTERNAL_LOOP_BACK)
+ rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+ else if (link_flags == EXTERNAL_LOOP_BACK)
+ rc = lpfc_hba_init_link_fc_topology(phba,
+ FLAGS_TOPOLOGY_MODE_PT_PT,
+ MBX_NOWAIT);
+ else {
+ rc = -EINVAL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3141 Loopback mode:x%x not supported\n",
+ link_flags);
goto loopback_mode_exit;
}
- link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
- bf_set(lpfc_mbx_set_diag_state_link_num,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
- bf_set(lpfc_mbx_set_diag_state_link_type,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
- if (link_flags == INTERNAL_LOOP_BACK)
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
- else
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
- mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
- if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
- rc = -ENODEV;
- else {
- phba->link_flag |= LS_LOOPBACK_MODE;
+ if (!rc) {
/* wait for the link attention interrupt */
msleep(100);
i = 0;
+ while (phba->link_state < LPFC_LINK_UP) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3137 Timeout waiting for link up "
+ "in loopback mode, timeout:%d ms\n",
+ timeout * 10);
+ break;
+ }
+ msleep(10);
+ }
+ }
+
+ /* port resource registration setup for loopback diagnostic */
+ if (!rc) {
+ /* set up a none zero myDID for loopback test */
+ phba->pport->fc_myDID = 1;
+ rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+ } else
+ goto loopback_mode_exit;
+
+ if (!rc) {
+ /* wait for the port ready */
+ msleep(100);
+ i = 0;
while (phba->link_state != LPFC_HBA_READY) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3133 Timeout waiting for port "
+ "loopback mode ready, timeout:%d ms\n",
+ timeout * 10);
break;
}
msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
}
loopback_mode_exit:
+ /* clear loopback diagnostic mode */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ }
lpfc_bsg_diag_mode_exit(phba);
- /*
- * Let SLI layer release mboxq if mbox command completed after timeout.
- */
- if (pmboxq && (mbxstatus != MBX_TIMEOUT))
- mempool_free(pmboxq, phba->mbox_mem_pool);
-
job_error:
/* make error code available to userspace */
job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
rc = -ENODEV;
return rc;
-
}
/**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- int rc;
+ struct diag_mode_set *loopback_mode_end_cmd;
+ uint32_t timeout;
+ int rc, i;
shost = job->shost;
if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
+ /* clear loopback diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ loopback_mode_end_cmd = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = loopback_mode_end_cmd->timeout * 100;
+
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3139 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
+ goto loopback_mode_end_exit;
+ }
- if (!rc)
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3140 Timeout waiting for link to "
+ "diagnostic mode_end, timeout:%d ms\n",
+ timeout * 10);
+ /* there is nothing much we can do here */
+ break;
+ }
+ msleep(10);
+ }
+
+ /* reset port resource registrations */
+ rc = lpfc_selective_reset(phba);
+ phba->pport->fc_myDID = 0;
+loopback_mode_end_exit:
+ /* make return code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
return rc;
}
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
link_diag_test_cmd->test_id);
bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
if (!mbox)
return -ENOMEM;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ else {
*rpi = lpfc_sli4_alloc_rpi(phba);
- status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
- (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
+ status = lpfc_reg_rpi(phba, phba->pport->vpi,
+ phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ }
+
if (status) {
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENODEV;
}
- *rpi = mbox->u.mb.un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ *rpi = mbox->u.mb.un.varWords[0];
lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
if (mbox == NULL)
return -ENOMEM;
- lpfc_unreg_login(phba, 0, rpi, mbox);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_unreg_login(phba, 0, rpi, mbox);
+ else
+ lpfc_unreg_login(phba, phba->pport->vpi,
+ phba->sli4_hba.rpi_ids[rpi], mbox);
+
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t full_size;
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
- struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+ IOCB_t *cmd, *rsp = NULL;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
struct lpfc_dmabufext *txbuffer = NULL;
struct list_head head;
struct lpfc_dmabuf *curr;
- uint16_t txxri, rxxri;
+ uint16_t txxri = 0, rxxri;
uint32_t num_bde;
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = -EINVAL;
goto loopback_test_exit;
}
-
diag_mode = (struct diag_mode_test *)
job->request->rqst_data.h_vendor.vendor_cmd;
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
if (rc)
goto loopback_test_exit;
- rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
+ rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
}
-
evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
SLI_CT_ELX_LOOPBACK);
if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
cmdiocbq = lpfc_sli_get_iocbq(phba);
- rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rspiocbq = lpfc_sli_get_iocbq(phba);
txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
}
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
- !txbmp->virt) {
+ if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+ if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rsp = &rspiocbq->iocb;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
list_del(&head);
/* Build the XMIT_SEQUENCE iocb */
-
num_bde = (uint32_t)txbuffer->flag;
cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
- cmd->ulpContext = txxri;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ cmd->ulpContext = txxri;
+ } else {
+ cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+ cmdiocbq->context3 = txbmp;
+ cmdiocbq->sli4_xritag = NO_XRI;
+ cmd->unsli3.rcvsli3.ox_id = 0xffff;
+ }
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
-
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+ if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOCB_SUCCESS))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3126 Failed loopback test issue iocb: "
+ "iocb_stat:x%x\n", iocb_stat);
rc = -EIO;
goto err_loopback_test_exit;
}
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
evt->wq, !list_empty(&evt->events_to_see),
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
evt->waiting = 0;
- if (list_empty(&evt->events_to_see))
+ if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
- else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3125 Not receiving unsolicited event, "
+ "rc:x%x\n", rc);
+ } else {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_move(evt->events_to_see.prev, &evt->events_to_get);
evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
job->reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
- if (rc == 0)
+ if (rc == IOCB_SUCCESS)
job->job_done(job);
return rc;
}
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
&& (mb->un.varWords[1] == 1)) {
phba->wait_4_mlo_maint_flg = 1;
} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
}
break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint32_t size;
int rc = 0;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint8_t *pmbx;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ /* Copy the byte swapped response mailbox back to the user */
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+ /* if there is any non-embedded extended data copy that too */
+ dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ pmbx = (uint8_t *)dmabuf->virt;
+ /* byte swap the extended data following the mailbox command */
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+ }
job = dd_data->context_un.mbox.set_job;
if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+ /*
+ * Non-embedded mailbox subcommand data gets byte swapped here because
+ * the lower level driver code only does the first 64 mailbox words.
+ */
+ if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+ (nemb_tp == nemb_mse))
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[0].buf_len);
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2953 Handled SLI_CONFIG(mse) wr, "
+ "2953 Failed SLI_CONFIG(mse) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2954 Handled SLI_CONFIG(hbd) wr, "
+ "2954 Failed SLI_CONFIG(hbd) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"2956 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
+ goto job_error;
}
/* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t opcode;
int rc = SLI_CONFIG_NOT_HANDLED;
- /* state change */
+ /* state change on new multi-buffer pass-through mailbox command */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2959 Not handled SLI_CONFIG "
+ "2959 Reject SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
+ break;
+ }
+ } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3106 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3107 Reject SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = -EPERM;
break;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2977 Handled SLI_CONFIG "
+ "2977 Reject SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
}
} else {
subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2978 Handled SLI_CONFIG "
+ "2978 Not handled SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
rc = SLI_CONFIG_NOT_HANDLED;
}
}
+
+ /* state reset on not handled new multi-buffer mailbox command */
+ if (rc != SLI_CONFIG_HANDLED)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
return rc;
}
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- /* any data for the device? */
- if (mbox_req->inExtWLen) {
- from = pmbx;
- ext = from + sizeof(MAILBOX_t);
- }
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index c8c2b47ea88..edfe61fc52b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -96,7 +96,7 @@ struct get_mgmt_rev {
};
#define MANAGEMENT_MAJOR_REV 1
-#define MANAGEMENT_MINOR_REV 0
+#define MANAGEMENT_MINOR_REV 1
/* the MgmtRevInfo structure */
struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
#define COMN_OPCODE_DELETE_OBJECT 0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 75e2e569ded..c88e556ea62 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{
- __iowrite32_copy(dest, src, bytes);
+ /* convert bytes in argument list to word count for copy function */
+ __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
}
static inline void
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 60f95347bab..26924b7a6cd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
int lpfc_sli4_queue_create(struct lpfc_hba *);
void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 28382596fb9..3587a3fe8fc 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.sp_eq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index);
+ }
/* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ }
}
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.mbx_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_cq->entry_size,
phba->sli4_hba.mbx_cq->host_index,
phba->sli4_hba.mbx_cq->hba_index);
+ }
/* Get slow-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.els_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_cq->entry_size,
phba->sli4_hba.els_cq->host_index,
phba->sli4_hba.els_cq->hba_index);
+ }
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
fcp_qidx = 0;
- do {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_cq) {
+ do {
+ if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
/* Get mailbox queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX MQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_wq->entry_size,
phba->sli4_hba.mbx_wq->host_index,
phba->sli4_hba.mbx_wq->hba_index);
+ }
/* Get slow-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS WQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.els_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_wq->entry_size,
phba->sli4_hba.els_wq->host_index,
phba->sli4_hba.els_wq->hba_index);
+ }
/* Get fast-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+ continue;
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], WQE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get receive queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path RQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.hdr_rq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tHQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.hdr_rq->entry_size,
phba->sli4_hba.hdr_rq->host_index,
phba->sli4_hba.hdr_rq->hba_index);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tDQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.dat_rq->entry_size,
phba->sli4_hba.dat_rq->host_index,
phba->sli4_hba.dat_rq->hba_index);
-
+ }
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
/* Slow-path event queue */
- if (phba->sli4_hba.sp_eq->queue_id == queid) {
+ if (phba->sli4_hba.sp_eq &&
+ phba->sli4_hba.sp_eq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* Fast-path event queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
- if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fp_eq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ if (phba->sli4_hba.fp_eq[qidx] &&
+ phba->sli4_hba.fp_eq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fp_eq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_CQ:
/* MBX complete queue */
- if (phba->sli4_hba.mbx_cq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_cq &&
+ phba->sli4_hba.mbx_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* ELS complete queue */
- if (phba->sli4_hba.els_cq->queue_id == queid) {
+ if (phba->sli4_hba.els_cq &&
+ phba->sli4_hba.els_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- qidx = 0;
- do {
- if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_cq) {
+ qidx = 0;
+ do {
+ if (phba->sli4_hba.fcp_cq[qidx] &&
+ phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_cq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
phba->sli4_hba.fcp_cq[qidx];
- goto pass_check;
- }
- } while (++qidx < phba->cfg_fcp_eq_count);
+ goto pass_check;
+ }
+ } while (++qidx < phba->cfg_fcp_eq_count);
+ }
goto error_out;
break;
case LPFC_IDIAG_MQ:
/* MBX work queue */
- if (phba->sli4_hba.mbx_wq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_wq &&
+ phba->sli4_hba.mbx_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.mbx_wq;
goto pass_check;
}
+ goto error_out;
break;
case LPFC_IDIAG_WQ:
/* ELS work queue */
- if (phba->sli4_hba.els_wq->queue_id == queid) {
+ if (phba->sli4_hba.els_wq &&
+ phba->sli4_hba.els_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP work queue */
- for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
- if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_wq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+ if (!phba->sli4_hba.fcp_wq[qidx])
+ continue;
+ if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_wq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.fcp_wq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fcp_wq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_RQ:
/* HDR queue */
- if (phba->sli4_hba.hdr_rq->queue_id == queid) {
+ if (phba->sli4_hba.hdr_rq &&
+ phba->sli4_hba.hdr_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* DAT queue */
- if (phba->sli4_hba.dat_rq->queue_id == queid) {
+ if (phba->sli4_hba.dat_rq &&
+ phba->sli4_hba.dat_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.dat_rq, index, count);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 445826a4c98..7afc757338d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -421,13 +421,13 @@ fail:
* @vport: pointer to a host virtual N_Port data structure.
*
* This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for FCoE only.
+ * the @vport. This mailbox command is necessary for SLI4 port only.
*
* Return code
* 0 - successfully issued REG_VFI for @vport
* A failure code otherwise.
**/
-static int
+int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
int rc = 0;
sp = &phba->fc_fabparam;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = -ENODEV;
- goto fail;
+ /* move forward in case of SLI4 FC port loopback test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = -ENODEV;
+ goto fail;
+ }
}
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -487,6 +491,54 @@ fail:
}
/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost;
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
+ }
+
+ lpfc_unreg_vfi(mboxq, vport);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+
+/**
* lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
* @vport: pointer to a host virtual N_Port data structure.
* @sp: pointer to service parameter data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"1816 FLOGI NPIV supported, "
"response data 0x%x\n",
sp->cmn.response_multiple_NPort);
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
} else {
/* Because we asked f/w for NPIV it still expects us
to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
LOG_ELS | LOG_VPORT,
"1817 Fabric does not support NPIV "
"- configuring single port mode.\n");
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
}
}
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_do_scr_ns_plogi(phba, vport);
} else if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_issue_init_vpi(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3135 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
return 0;
}
+
/**
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
* @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
* alpa map would take too long otherwise.
*/
- if (phba->alpa_map[0] == 0) {
+ if (phba->alpa_map[0] == 0)
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
- (vport->fc_prevDID != vport->fc_myDID))) {
- if (vport->fc_flag & FC_VFI_REGISTERED)
- lpfc_sli4_unreg_all_rpis(vport);
- lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
- goto out;
- }
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (vport->fc_prevDID != vport->fc_myDID))) {
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_issue_reg_vfi(vport);
+ lpfc_nlp_put(ndlp);
+ goto out;
}
goto flogifail;
}
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
- icmd->un.elsreq64.bdl.ulpIoTag32) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
- if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
+ if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
!lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
retry = 1;
- /* retry forever */
+ /* retry FLOGI forever */
maxretry = 0;
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
+ } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ /* retry FDISCs every second up to devloss */
+ retry = 1;
+ maxretry = vport->cfg_devloss_tmo;
+ delay = 1000;
}
cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/*
* The driver received a LOGO from the rport and has ACK'd it.
- * At this point, the driver is done so release the IOCB and
- * remove the ndlp reference.
+ * At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
+
+ /*
+ * Remove the ndlp reference if it's a fabric node that has
+ * sent us an unsolicted LOGO.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_nlp_put(ndlp);
+
return;
}
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
sizeof(struct lpfc_name));
if (!rc) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
-
- lpfc_linkdown(phba);
- lpfc_init_link(phba, mbox,
- phba->cfg_topology,
- phba->cfg_link_speed);
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- lpfc_set_loopback_flag(phba);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ } else {
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
+ */
+ lpfc_els_abort_flogi(phba);
+ return 0;
}
- return 1;
} else if (rc > 0) { /* greater than */
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = vport->fc_prevDID;
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3138 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
}
return 0;
@@ -6596,56 +6679,6 @@ dropit:
}
/**
- * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: host virtual N_Port identifier.
- *
- * This routine finds a vport on a HBA (referred by @phba) through a
- * @vpi. The function walks the HBA's vport list and returns the address
- * of the vport with the matching @vpi.
- *
- * Return code
- * NULL - No vport with the matching @vpi found
- * Otherwise - Address to the vport with the matching @vpi.
- **/
-struct lpfc_vport *
-lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
-{
- struct lpfc_vport *vport;
- unsigned long flags;
- int i = 0;
-
- /* The physical ports are always vpi 0 - translate is unnecessary. */
- if (vpi > 0) {
- /*
- * Translate the physical vpi to the logical vpi. The
- * vport stores the logical vpi.
- */
- for (i = 0; i < phba->max_vpi; i++) {
- if (vpi == phba->vpi_ids[i])
- break;
- }
-
- if (i >= phba->max_vpi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2936 Could not find Vport mapped "
- "to vpi %d\n", vpi);
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return vport;
- }
- }
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
-}
-
-/**
* lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a SLI ring.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 091f68e5cb7..678a4b11059 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
+ /* don't perform discovery for SLI4 loopback diagnostic test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))
+ return;
+
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
vport->fc_flag & FC_PUBLIC_LOOP &&
!(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- /* VFI not supported on interface type 0, just do the flogi */
- if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
+ /*
+ * VFI not supported on interface type 0, just do the flogi
+ * Also continue if the VFI is in use - just use the same one.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- goto fail_free_mem;
+ goto out_free_mem;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- goto fail_free_mem;
+ goto out_free_mem;
}
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
+ /* In case SLI4 FC loopback test, we are ready */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->link_flag & LS_LOOPBACK_MODE)) {
+ phba->link_state = LPFC_HBA_READY;
+ goto out_free_mem;
+ }
+
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* For private loop just start discovery and we are done. */
if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (phba->alpa_map[0] == 0) &&
!(vport->fc_flag & FC_PUBLIC_LOOP)) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
-fail_free_mem:
+out_free_mem:
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+ struct Scsi_Host *shost;
int i;
struct lpfc_dmabuf *mp;
int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */
- if (bf_get(lpfc_mbx_read_top_il, la))
+ if (bf_get(lpfc_mbx_read_top_il, la)) {
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
+ }
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
} else {
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if (phba->max_vpi && phba->cfg_enable_npiv &&
- (phba->sli_rev == 3))
+ (phba->sli_rev >= LPFC_SLI_REV3))
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
LPFC_ATT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
- if (phba->link_flag & LS_LOOPBACK_MODE) {
+ if (phba->link_flag & LS_LOOPBACK_MODE)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
- }
- else {
+ else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_issue_link_down(phba);
}
if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* vport discovery */
if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_start_fdiscs(phba);
- else
+ else {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
return ndlp;
}
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+ unsigned long flags;
+ int i = 0;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == i) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return vport;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+}
+
void
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -5599,7 +5693,7 @@ out:
*
* This function frees memory associated with the mailbox command.
*/
-static void
+void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int
lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
{
- LPFC_MBOXQ_t *mbox;
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
- /* Unregister VFI */
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2556 UNREG_VFI mbox allocation failed"
- "HBA state x%x\n", phba->pport->port_state);
- return -ENOMEM;
- }
-
- lpfc_unreg_vfi(mbox, phba->pport);
- mbox->vport = phba->pport;
- mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2557 UNREG_VFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
- mempool_free(mbox, phba->mbox_mem_pool);
- return -EIO;
- }
-
- shost = lpfc_shost_from_vport(phba->pport);
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
-
- return 0;
+ /* Unregister the physical port VFI */
+ rc = lpfc_issue_unreg_vfi(phba->pport);
+ return rc;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 046edc4ab35..7245bead375 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -349,6 +349,12 @@ struct csp {
* Word 1 Bit 31 in FLOGI response is clean address bit
*/
#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
#endif
-#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1 : 19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
+ uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2839,14 +2846,16 @@ typedef struct {
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t rsvd2 : 2; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3 : 19; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
+ uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2866,7 +2875,8 @@ typedef struct {
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gbg : 1; /* Grant BlockGuard */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t rsvd4 : 2; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 19; /* Reserved */
#endif
@@ -3465,6 +3475,7 @@ typedef struct {
} ASYNCSTAT_FIELDS;
#define ASYNC_TEMP_WARN 0x100
#define ASYNC_TEMP_SAFE 0x101
+#define ASYNC_STATUS_CN 0x102
/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98d21521f53..e5bfa7f334e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
struct {
uint32_t word0;
#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
-#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
#define lpfc_mbx_set_diag_lpbk_type_WORD word0
#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
#define lpfc_init_vfi_hop_count_MASK 0x000000FF
#define lpfc_init_vfi_hop_count_WORD word4
};
+#define MBX_VFI_IN_USE 0x9F02
+
struct lpfc_mbx_reg_vfi {
uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
#define lpfc_mbx_rd_conf_lnk_type_WORD word2
+#define LPFC_LNK_TYPE_GE 0
+#define LPFC_LNK_TYPE_FC 1
#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
#define wqe_la_SHIFT 3
#define wqe_la_MASK 0x000000001
#define wqe_la_WORD word5
+#define wqe_xo_SHIFT 6
+#define wqe_xo_MASK 0x000000001
+#define wqe_xo_WORD word5
#define wqe_ls_SHIFT 7
#define wqe_ls_MASK 0x000000001
#define wqe_ls_WORD word5
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 55bc4fc7376..dfea2dada02 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
static void lpfc_free_sgl_list(struct lpfc_hba *);
static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Get the default values for Model Name and Description */
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
- if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
- && !(phba->lmt & LMT_1Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
- && !(phba->lmt & LMT_2Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
- && !(phba->lmt & LMT_4Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
- && !(phba->lmt & LMT_8Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
- && !(phba->lmt & LMT_10Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
- && !(phba->lmt & LMT_16Gb))) {
- /* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1302 Invalid speed for this board: "
- "Reset link speed to auto: x%x\n",
- phba->cfg_link_speed);
- phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
- }
-
phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- lpfc_init_link(phba, pmb, phba->cfg_topology,
- phba->cfg_link_speed);
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0454 Adapter failed to init, mbxCmd x%x "
- "INIT_LINK, mbxStatus x%x\n",
- mb->mbxCommand, mb->mbxStatus);
-
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- /* Clear all pending interrupts */
- writel(0xffffffff, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- phba->link_state = LPFC_HBA_ERROR;
- if (rc != MBX_BUSY)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
- }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ return rc;
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -668,6 +628,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int
lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
{
+ return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+ uint32_t flag)
+{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
mb = &pmb->u.mb;
pmb->vport = vport;
- lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+ if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+ !(phba->lmt & LMT_1Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+ !(phba->lmt & LMT_2Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+ !(phba->lmt & LMT_4Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+ !(phba->lmt & LMT_8Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+ !(phba->lmt & LMT_10Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+ !(phba->lmt & LMT_16Gb))) {
+ /* Reset link speed to auto */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+ }
+ lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t event_data;
struct Scsi_Host *shost;
uint32_t if_type;
- struct lpfc_register portstat_reg;
+ struct lpfc_register portstat_reg = {0};
+ uint32_t reg_err1, reg_err2;
+ uint32_t uerrlo_reg, uemasklo_reg;
+ uint32_t pci_rd_rc1, pci_rd_rc2;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return;
- /* Send an internal error event to mgmt application */
- lpfc_board_errevt_to_mgmt(phba);
-
- /* For now, the actual action for SLI4 device handling is not
- * specified yet, just treated it as adaptor hardware failure
- */
- event_data = FC_REG_DUMP_EVENT;
- shost = lpfc_shost_from_vport(vport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(event_data), (char *) &event_data,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerrlo_reg);
+ pci_rd_rc2 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+ &uemasklo_reg);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+ return;
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- portstat_reg.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO)
+ return;
+ reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+ reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
- "taking port\n");
+ "taking port offline\n");
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
- return;
+ break;
}
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3143 Port Down: Firmware Restarted\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3144 Port Down: Debug Dump\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3145 Port Down: Provisioning\n");
/*
* On error status condition, driver need to wait for port
* ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!rc) {
/* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Port Error: Attempting "
- "Port Recovery\n");
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) {
lpfc_unblock_mgmt_io(phba);
- return;
+ /* don't report event on forced debug dump */
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ return;
+ else
+ break;
}
/* fall through for not able to recover */
}
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
default:
break;
}
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3123 Report dump event to upper layer\n");
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
}
/**
@@ -2674,6 +2709,32 @@ lpfc_offline(struct lpfc_hba *phba)
}
/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *sb, *sb_next;
+
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->scsi_buf_list_lock);
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+ sb->cur_iocbq.sli4_xritag =
+ phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+ spin_unlock(&phba->scsi_buf_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
* lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
* @phba: pointer to lpfc hba data structure.
*
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
- /*
- * If the SLI4 port supports extents, posting the rpi header isn't
- * required. Set the expected maximum count and let the actual value
- * get set when extents are fully allocated.
- */
- if (!phba->sli4_hba.rpi_hdrs_in_use) {
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
return rc;
- }
if (phba->sli4_hba.extents_in_use)
return -EIO;
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
-static int
+int
lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3081 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3082 Mailbox (x%x) returned ldv:x0\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe));
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq:
lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
}
kfree(phba->sli4_hba.fp_eq);
+ phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq:
lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.els_wq = NULL;
/* Release FCP work queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL;
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP response complete queue */
fcp_qidx = 0;
- do
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
- while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq != NULL)
+ do
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
/* Release fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL;
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path event queue */
+ if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3147 Fast-path EQs not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_sp_eq;
+ }
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", fcp_eqidx);
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 Mailbox CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0530 ELS CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_cq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_cq;
+ }
fcp_cqidx = 0;
do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0538 Slow-path MQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0536 Slow-path ELS WQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id);
/* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_wq;
+ }
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0534 Fast-path FCP WQ (%d) not "
"allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq:
for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error:
return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
- fcp_qidx = 0;
- do {
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ fcp_qidx = 0;
+ do {
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ }
/* Unset fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ }
/* Unset slow-path event queue */
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
@@ -7398,22 +7510,25 @@ out:
static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{
- struct pci_dev *pdev;
-
- /* Obtain PCI device reference */
- if (!phba->pcidev)
- return;
- else
- pdev = phba->pcidev;
-
- /* Free coherent DMA memory allocated */
-
- /* Unmap I/O memory space */
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
- iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ uint32_t if_type;
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
- return;
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_printk(KERN_ERR, &phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
}
/**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade */
- snprintf(file_name, 16, "%s.grp", phba->ModelName);
- error = request_firmware(&fw, file_name, &phba->pcidev->dev);
- if (!error) {
- lpfc_write_firmware(phba, fw);
- release_firmware(fw);
+ /* check for firmware upgrade or downgrade (if_type 2 only) */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
}
/* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 2ebc7d2540c..20336f09fb3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
+ /* If this is an SLI3 port, configure async status notification. */
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varCfgPort.casabt = 1;
+
/* Now setup pcb */
phba->pcb->type = TYPE_NATIVE_SLI2;
phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+ "3134 Register VFI, mydid:x%x, fcfi:%d, "
+ " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+ vport->fc_myDID,
+ vport->phba->fcf.fcfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi],
+ vport->phba->vpi_ids[vport->vpi],
+ reg_vfi->wwn[0], reg_vfi->wwn[1]);
}
/**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
}
/**
- * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
- * This function create a SLI4 dump mailbox command to dump FCoE
- * parameters stored in region 23.
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
**/
int
-lpfc_dump_fcoe_param(struct lpfc_hba *phba,
- struct lpfcMboxq *mbox)
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
if (!mp || !mp->virt) {
kfree(mp);
- /* dump_fcoe_param failed to allocate memory */
+ /* dump config region 23 failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc_dump_fcoe_param: memory"
+ "2569 lpfc dump config region 23: memory"
" allocation failed\n");
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 10d5b5e4149..ade763d3930 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *hbqbp;
- hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp)
return NULL;
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *dma_buf;
- dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ddd02f7c60..e8bb0055994 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -783,6 +783,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
lpfc_device_rm_unused_node, /* DEVICE_RM */
- lpfc_disc_illegal, /* DEVICE_RECOVERY */
+ lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e1e54e5c3a..c60f5d0b386 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp)
+ if (ndlp) {
lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
- memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
-
+ memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+ memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
switch (tag[0]) {
case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = err;
goto out_fail_command;
}
+ /*
+ * Do not let the mid-layer retry I/O too fast. If an I/O is retried
+ * without waiting a bit then indicate that the device is busy.
+ */
+ if (cmnd->retries &&
+ time_before(jiffies, (cmnd->jiffies_at_alloc +
+ msecs_to_jiffies(LPFC_RETRY_PAUSE *
+ cmnd->retries))))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
ndlp = rdata->pnode;
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index ce645b20a6a..9075a08cf78 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -21,6 +21,7 @@
#include <asm/byteorder.h>
struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
#define list_remove_head(list, entry, type, member) \
do { \
@@ -102,7 +103,7 @@ struct fcp_cmnd {
#define WRITE_DATA 0x01 /* Bit 0 */
#define READ_DATA 0x02 /* Bit 1 */
- uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
+ uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
uint32_t fcpDl; /* Total transfer length */
};
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
-
+#define LPFC_RETRY_PAUSE 300
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4d4104f38c9..23a27592388 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -89,15 +89,20 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{
- union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+ union lpfc_wqe *temp_wqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_wqe = q->qe[q->host_index].wqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
/* set consumption flag every once in a while */
- if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+ if (!((q->host_index + 1) % q->entry_repost))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
uint32_t released = 0;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
if (q->hba_index == index)
return 0;
do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{
- struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+ struct lpfc_mqe *temp_mqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_mqe = q->qe[q->host_index].mqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue *q)
{
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* Clear the mailbox pointer for completion */
q->phba->mbox = NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
- struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+ struct lpfc_eqe *eqe;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+ eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
{
struct lpfc_cqe *cqe;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+
/* If the next CQE is not valid then we are done */
if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{
- struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
- struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+ struct lpfc_rqe *temp_hrqe;
+ struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
int put_index = hq->host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return -ENOMEM;
+ temp_hrqe = hq->qe[hq->host_index].rqe;
+ temp_drqe = dq->qe[dq->host_index].rqe;
+
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
{
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return 0;
+
if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
return 0;
hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3575,8 +3619,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
* lpfc_reset_barrier - Make HBA ready for HBA reset
* @phba: Pointer to HBA context object.
*
- * This function is called before resetting an HBA. This
- * function requests HBA to quiesce DMAs before a reset.
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
**/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- uint8_t qindx;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0;
- /* Clean up the child queue list for the CQs */
- list_del_init(&phba->sli4_hba.mbx_wq->list);
- list_del_init(&phba->sli4_hba.els_wq->list);
- list_del_init(&phba->sli4_hba.hdr_rq->list);
- list_del_init(&phba->sli4_hba.dat_rq->list);
- list_del_init(&phba->sli4_hba.mbx_cq->list);
- list_del_init(&phba->sli4_hba.els_cq->list);
- for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
- list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- qindx = 0;
- do
- list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
- while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
/* Perform FCoE PCI function reset */
+ lpfc_sli4_queue_destroy(phba);
lpfc_pci_function_reset(phba);
/* Restore PCI cmd register */
@@ -4339,6 +4370,11 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
done = 1;
+
+ if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+ (pmb->u.mb.un.varCfgPort.gasabt == 0))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3110 Port did not grant ASABT\n");
}
}
if (!done) {
@@ -4551,9 +4587,9 @@ lpfc_sli_hba_setup_error:
* data structure.
**/
static int
-lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
- LPFC_MBOXQ_t *mboxq)
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
{
+ LPFC_MBOXQ_t *mboxq;
struct lpfc_dmabuf *mp;
struct lpfc_mqe *mqe;
uint32_t data_length;
@@ -4565,10 +4601,16 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
- mqe = &mboxq->u.mqe;
- if (lpfc_dump_fcoe_param(phba, mboxq))
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
return -ENOMEM;
+ mqe = &mboxq->u.mqe;
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+
mp = (struct lpfc_dmabuf *) mboxq->context1;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -4596,19 +4638,25 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
if (rc) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return 0;
+ rc = 0;
+
+out_free_mboxq:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
}
/**
@@ -4706,7 +4754,6 @@ static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mboxq;
- struct lpfc_mbx_read_config *rd_config;
struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
struct lpfc_controller_attribute *cntl_attr;
struct lpfc_mbx_get_port_name *get_port_name;
@@ -4724,33 +4771,11 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
-
/* obtain link type and link number via READ_CONFIG */
- lpfc_read_config(phba, mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (rc == MBX_SUCCESS) {
- rd_config = &mboxq->u.mqe.un.rd_config;
- if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
- phba->sli4_hba.lnk_info.lnk_tp =
- bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
- phba->sli4_hba.lnk_info.lnk_no =
- bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3081 lnk_type:%d, lnk_numb:%d\n",
- phba->sli4_hba.lnk_info.lnk_tp,
- phba->sli4_hba.lnk_info.lnk_no);
- goto retrieve_ppname;
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3082 Mailbox (x%x) returned ldv:x0\n",
- bf_get(lpfc_mqe_command,
- &mboxq->u.mqe));
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3083 Mailbox (x%x) failed, status:x%x\n",
- bf_get(lpfc_mqe_command, &mboxq->u.mqe),
- bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ lpfc_sli4_read_config(phba);
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+ goto retrieve_ppname;
/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
@@ -4875,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
fcp_eqidx = 0;
- do
- lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
- LPFC_QUEUE_REARM);
- while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ do
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ }
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
- LPFC_QUEUE_REARM);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+ fcp_eqidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ }
}
/**
@@ -5457,6 +5487,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
uint16_t count, base;
unsigned long longs;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
if (phba->sli4_hba.extents_in_use) {
/*
* The port supports resource extents. The XRI, VPI, VFI, RPI
@@ -5538,9 +5570,10 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
* need any action - just exit.
*/
if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
- LPFC_IDX_RSRC_RDY)
- return 0;
-
+ LPFC_IDX_RSRC_RDY) {
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+ lpfc_sli4_remove_rpis(phba);
+ }
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
base = phba->sli4_hba.max_cfg_param.rpi_base;
@@ -5880,14 +5913,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (!mboxq)
return -ENOMEM;
- /*
- * Continue initialization with default values even if driver failed
- * to read FCoE param config regions
- */
- if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
- "2570 Failed to read FCoE parameters\n");
-
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
@@ -5925,6 +5950,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
+ * Continue initialization with default values even if driver failed
+ * to read FCoE param config regions, only read parameters if the
+ * board is FCoE
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE &&
+ lpfc_sli4_read_fcoe_params(phba))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+ "2570 Failed to read FCoE parameters\n");
+
+ /*
* Retrieve sli4 device physical port name, failure of doing it
* is considered as non-fatal.
*/
@@ -6044,6 +6079,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"rc = x%x\n", rc);
goto out_free_mbox;
}
+ /* update physical xri mappings in the scsi buffers */
+ lpfc_scsi_buf_update(phba);
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6205,7 +6242,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
&mboxq->u.mqe.un.reg_fcfi);
+
+ /* Check if the port is configured to be disabled */
+ lpfc_sli_read_link_ste(phba);
}
+
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -6213,10 +6254,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
- if (rc)
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->hba_flag & LINK_DISABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3103 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3104 Adapter failed to issue "
+ "DOWN_LINK mbox cmd, rc:x%x\n", rc);
goto out_unset_queue;
+ }
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+ /* don't perform init_link on SLI4 FC port loopback test */
+ if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ goto out_unset_queue;
+ }
}
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
@@ -7487,6 +7543,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
struct ulp_bde64 *bpl = NULL;
struct ulp_bde64 bde;
struct sli4_sge *sgl = NULL;
+ struct lpfc_dmabuf *dmabuf;
IOCB_t *icmd;
int numBdes = 0;
int i = 0;
@@ -7505,9 +7562,12 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+ if (piocbq->context3)
+ dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+ else
+ return xritag;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
if (!bpl)
return xritag;
@@ -7616,6 +7676,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
int numBdes, i;
struct ulp_bde64 bde;
struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+ uint32_t if_type;
fip = phba->hba_flag & HBA_FIP_SUPPORT;
/* The fcp commands will set command type */
@@ -7669,6 +7731,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
+
wqe->els_req.payload_len = xmit_len;
/* Els_reguest64 has a TMO */
bf_set(wqe_tmo, &wqe->els_req.wqe_com,
@@ -7683,9 +7746,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP) {
+ if (command_type == ELS_COMMAND_FIP)
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ } else if (iocbq->context1) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
}
bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7704,6 +7786,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -7846,6 +7930,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ }
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -8037,6 +8131,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
return IOCB_ERROR;
@@ -8173,6 +8269,137 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ *
+ * The driver calls this routine in response to a XRI ABORT CQE
+ * event from the port. In this event, the driver is required to
+ * recover its login to the rport even though its login may be valid
+ * from the driver's perspective. The failed ABTS notice from the
+ * port indicates the rport is not responding.
+ */
+static void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+ unsigned long flags = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba = vport->phba;
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI, "3093 No rport recovery needed. "
+ "rport in state 0x%x\n",
+ ndlp->nlp_state);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+ shost->host_no, ndlp->nlp_DID,
+ vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ /*
+ * The rport is not responding. Don't attempt ADISC recovery.
+ * Remove the FCP-2 flag to force a PLOGI.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_start(vport);
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port. The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession. The abort could be originated by the
+ * driver or by the port. The ABTS could have been for an ELS
+ * or FCP IO. The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ uint16_t rpi = 0, vpi = 0;
+ struct lpfc_vport *vport = NULL;
+
+ /* The rpi in the ulpContext is vport-sensitive. */
+ vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+ rpi = iocbq->iocb.ulpContext;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3092 Port generated ABTS async event "
+ "on vpi %d rpi %d status 0x%x\n",
+ vpi, rpi, iocbq->iocb.ulpStatus);
+
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport)
+ goto err_exit;
+ ndlp = lpfc_findnode_rpi(vport, rpi);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto err_exit;
+
+ if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+ return;
+
+ err_exit:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3095 Event Context not found, no "
+ "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+ iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+ vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port. The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply. The abort could be originated
+ * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ struct lpfc_vport *vport;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3115 Node Context not found, driver "
+ "ignoring abts err event\n");
+ vport = ndlp->vport;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3116 Port generated FCP XRI ABORT event on "
+ "vpi %d rpi %d xri x%x status 0x%x\n",
+ ndlp->vport->vpi, ndlp->nlp_rpi,
+ bf_get(lpfc_wcqe_xa_xri, axri),
+ bf_get(lpfc_wcqe_xa_status, axri));
+
+ if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
/**
* lpfc_sli_async_event_handler - ASYNC iocb handler function
* @phba: Pointer to HBA context object.
@@ -8192,63 +8419,58 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
{
IOCB_t *icmd;
uint16_t evt_code;
- uint16_t temp;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
uint32_t *iocb_w;
icmd = &iocbq->iocb;
evt_code = icmd->un.asyncstat.evt_code;
- temp = icmd->ulpContext;
- if ((evt_code != ASYNC_TEMP_WARN) &&
- (evt_code != ASYNC_TEMP_SAFE)) {
+ switch (evt_code) {
+ case ASYNC_TEMP_WARN:
+ case ASYNC_TEMP_SAFE:
+ temp_event_data.data = (uint32_t) icmd->ulpContext;
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ if (evt_code == ASYNC_TEMP_WARN) {
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0347 Adapter is very hot, please take "
+ "corrective action. temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ } else {
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0340 Adapter temperature is OK now. "
+ "temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ }
+
+ /* Send temperature change event to applications */
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+ LPFC_NL_VENDOR_ID);
+ break;
+ case ASYNC_STATUS_CN:
+ lpfc_sli_abts_err_handler(phba, iocbq);
+ break;
+ default:
iocb_w = (uint32_t *) icmd;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
- pring->ringno,
- icmd->un.asyncstat.evt_code,
+ pring->ringno, icmd->un.asyncstat.evt_code,
iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
- return;
- }
- temp_event_data.data = (uint32_t)temp;
- temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
- if (evt_code == ASYNC_TEMP_WARN) {
- temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0347 Adapter is very hot, please take "
- "corrective action. temperature : %d Celsius\n",
- temp);
- }
- if (evt_code == ASYNC_TEMP_SAFE) {
- temp_event_data.event_code = LPFC_NORMAL_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0340 Adapter temperature is OK now. "
- "temperature : %d Celsius\n",
- temp);
+ break;
}
-
- /* Send temperature change event to applications */
- shost = lpfc_shost_from_vport(phba->pport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(temp_event_data), (char *) &temp_event_data,
- LPFC_NL_VENDOR_ID);
-
}
@@ -8823,12 +9045,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-
- abort_iocb = NULL;
+ struct lpfc_iocbq *abort_iocb = NULL;
if (irsp->ulpStatus) {
+
+ /*
+ * Assume that the port already completed and returned, or
+ * will return the iocb. Just Log the message.
+ */
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
@@ -8846,68 +9070,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
abort_iocb = phba->sli.iocbq_lookup[abort_context];
- /*
- * If the iocb is not found in Firmware queue the iocb
- * might have completed already. Do not free it again.
- */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
- spin_unlock_irq(&phba->hbalock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- return;
- }
- /* For SLI4 the ulpContext field for abort IOCB
- * holds the iotag of the IOCB being aborted so
- * the local abort_context needs to be reset to
- * match the aborted IOCBs ulpContext.
- */
- if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
- abort_context = abort_iocb->iocb.ulpContext;
- }
-
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
- /*
- * make sure we have the right iocbq before taking it
- * off the txcmplq and try to call completion routine.
- */
- if (!abort_iocb ||
- abort_iocb->iocb.ulpContext != abort_context ||
- (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
- spin_unlock_irq(&phba->hbalock);
- else if (phba->sli_rev < LPFC_SLI_REV4) {
- /*
- * leave the SLI4 aborted command on the txcmplq
- * list and the command complete WCQE's XB bit
- * will tell whether the SGL (XRI) can be released
- * immediately or to the aborted SGL list for the
- * following abort XRI from the HBA.
- */
- list_del_init(&abort_iocb->list);
- if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
- abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
- pring->txcmplq_cnt--;
- }
- /* Firmware could still be in progress of DMAing
- * payload, so don't free data buffer till after
- * a hbeat.
- */
- abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
- abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irq(&phba->hbalock);
-
- abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
- (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- } else
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
}
-
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9258,6 +9429,14 @@ void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3096 ABORT_XRI_CN completing on xri x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9771,7 +9950,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2885 Port Error Detected: "
+ "2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
@@ -10777,6 +10956,9 @@ static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_release *wcqe)
{
+ /* sanity check on queue memory */
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return;
/* Check for the slow-path ELS work queue */
if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10866,6 +11048,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
uint32_t status, rq_id;
unsigned long iflags;
+ /* sanity check on queue memory */
+ if (unlikely(!hrq) || unlikely(!drq))
+ return workposted;
+
if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
else
@@ -11000,6 +11186,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
+ /* sanity check on queue memory */
+ if (unlikely(!speq))
+ return;
list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
@@ -11241,12 +11430,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return;
}
+ if (unlikely(!phba->sli4_hba.fcp_cq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3146 Fast-path completion queues "
+ "does not exist\n");
+ return;
+ }
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue "
- "does not exist\n");
+ "(%d) does not exist\n", fcp_cqidx);
return;
}
@@ -11417,6 +11612,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
/* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11635,6 +11832,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint16_t dmult;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11751,6 +11951,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!cq || !eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11933,6 +12136,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!mq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12083,6 +12289,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12151,6 +12360,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
+ wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
@@ -12174,6 +12384,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
{
uint32_t cnt;
+ /* sanity check on queue memory */
+ if (!rq)
+ return;
cnt = lpfc_hbq_defs[qno]->entry_count;
/* Recalc repost for RQs based on buffers initially posted */
@@ -12219,6 +12432,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!hrq || !drq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12420,6 +12636,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!eq)
return -ENODEV;
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12475,6 +12692,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!cq)
return -ENODEV;
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12528,6 +12746,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!mq)
return -ENODEV;
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12581,6 +12800,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!wq)
return -ENODEV;
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12634,6 +12854,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!hrq || !drq)
return -ENODEV;
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -15252,45 +15473,42 @@ lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
* @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
*
- * This function read region 23 and parse TLV for port status to
- * decide if the user disaled the port. If the TLV indicates the
- * port is disabled, the hba_flag is set accordingly.
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
**/
-void
-lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
{
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
- uint8_t *rgn23_data = NULL;
- uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
int rc;
+ if (!rgn23_data)
+ return 0;
+
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2600 lpfc_sli_read_serdes_param failed to"
- " allocate mailbox memory\n");
- goto out;
+ "2600 failed to allocate mailbox memory\n");
+ return 0;
}
mb = &pmb->u.mb;
- /* Get adapter Region 23 data */
- rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
- if (!rgn23_data)
- goto out;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2601 lpfc_sli_read_link_ste failed to"
- " read config region 23 rc 0x%x Status 0x%x\n",
- rc, mb->mbxStatus);
+ "2601 failed to read config "
+ "region 23, rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
/*
@@ -15303,13 +15521,96 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
- rgn23_data + offset,
- mb->un.varDmp.word_cnt);
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
- data_size = offset;
- offset = 0;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+ LPFC_MBOXQ_t *mboxq = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length = 0;
+ int rc;
+
+ if (!rgn23_data)
+ return 0;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3105 failed to allocate mailbox memory\n");
+ return 0;
+ }
+
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+ goto out;
+ mqe = &mboxq->u.mqe;
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc)
+ goto out;
+ data_length = mqe->un.mb_words[5];
+ if (data_length == 0)
+ goto out;
+ if (data_length > DMP_RGN23_SIZE) {
+ data_length = 0;
+ goto out;
+ }
+ lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ uint8_t *rgn23_data = NULL;
+ uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+ else {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+ goto out;
+ data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+ }
if (!data_size)
goto out;
@@ -15373,9 +15674,8 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
goto out;
}
}
+
out:
- if (pmb)
- mempool_free(pmb, phba->mbox_mem_pool);
kfree(rgn23_data);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d5cffd8af34..3f266e2c54e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -291,7 +291,7 @@ struct lpfc_bmbx {
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
-#define LPFC_CQE_DEF_COUNT 256
+#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
void __iomem *STATUSregaddr;
void __iomem *CTRLregaddr;
void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART 0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
} if_type2;
} u;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b0630e37f1e..dd044d01a07 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.27"
+#define LPFC_DRIVER_VERSION "8.3.28"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cff6ca67415..0fe188e6600 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
return NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (!(port_iterator->load_flag & FC_UNLOADING))
- lpfc_printf_vlog(port_iterator, KERN_ERR,
- LOG_VPORT,
+ lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 590ce1ef201..4ceeace8045 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -25,6 +25,7 @@
#include <asm/dma.h>
#include <asm/macints.h>
#include <asm/macintosh.h>
+#include <asm/mac_via.h>
#include <scsi/scsi_host.h>
@@ -149,7 +150,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
do {
if (mep->pdma_regs == NULL) {
- if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+ if (via2_scsi_drq_pending())
return 0;
} else {
if (nubus_readl(mep->pdma_regs) & 0x200)
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index af3a6af97cc..ea2bde206f7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
- "ncr5380", instance)) {
+ if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8dc1b32918d..a01f0aa66f2 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.20
+ * mpi2.h Version: 02.00.22
*
* Version History
* ---------------
@@ -69,6 +69,8 @@
* 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
* 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -94,7 +96,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x14)
+#define MPI2_HEADER_VERSION_UNIT (0x16)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
/* IEEE Simple Element only */
-#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
/* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
/****************************************************************************
* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cfd95b4e300..3a023dad77a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.19
+ * mpi2_cnfg.h Version: 02.00.21
*
* Version History
* ---------------
@@ -140,6 +140,13 @@
* Added SASNotifyPrimitiveMasks field to
* MPI2_CONFIG_PAGE_IOC_7.
* 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up
+ * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ * SAS IO Unit Page 4.
+
* --------------------------------------------------------------------------
*/
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
/* defines for IO Unit Page 7 IOCTemperatureUnits field */
#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
{
U8 MaxTargetSpinup; /* 0x00 */
U8 SpinupDelay; /* 0x01 */
- U16 Reserved1; /* 0x02 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
-
/* values for SAS Expander Page 1 DiscoveryInfo field */
#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
/****************************************************************************
* SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
#define MPI2_SASPHY0_PAGEVERSION (0x03)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
/* values for SAS PHY Page 0 Flags field */
#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
-
/* SAS PHY Page 1 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 93d9b6956d0..9a925c07a9e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.17
+ * mpi2_ioc.h Version: 02.00.19
*
* Version History
* ---------------
@@ -110,6 +110,13 @@
* Added Temperature Threshold Event.
* Added Host Message Event.
* Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
* --------------------------------------------------------------------------
*/
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
{
U16 TaskTag; /* 0x00 */
U8 ReasonCode; /* 0x02 */
- U8 Reserved1; /* 0x03 */
+ U8 PhysicalPort; /* 0x03 */
U8 ASC; /* 0x04 */
U8 ASCQ; /* 0x05 */
U16 DevHandle; /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
/* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MEGARAID)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX \
+ (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* defines for the Feature field */
#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
-#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
/* Parameter1 indicates desired PCIe link speed using these defines */
-#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
/* Parameter2 indicates desired PCIe link width using these defines */
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
/* Parameter3 and Parameter4 are reserved */
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index bd61a7b60a2..0601612b875 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.05
+ * mpi2_raid.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,10 @@
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
* 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+
* --------------------------------------------------------------------------
*/
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
-
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
/* RAID Volume Creation Structure */
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
Mpi2RaidOnlineCapacityExpansion_t,
MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /* 0x00 */
+ U16 CandidateDevHandle; /* 0x02 */
+ U32 Flags; /* 0x04 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
/* RAID Volume Indicator Structure */
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 GenericAttributes; /* 0x04 */
+ U32 OEMSpecificAttributes; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
/* RAID Action Reply ActionData union */
typedef union _MPI2_RAID_ACTION_REPLY_DATA
{
- U32 Word[5];
- MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
- U16 VolDevHandle;
- U8 VolumeState;
- U8 PhysDiskNum;
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 2a4bceda364..3cbe677c688 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.06
+ * mpi2_tool.h Version: 02.00.07
*
* Version History
* ---------------
@@ -25,6 +25,8 @@
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
* 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
* Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
* --------------------------------------------------------------------------
*/
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
U8 DevIndex; /* 0x14 */
U8 Action; /* 0x15 */
U8 SGLFlags; /* 0x16 */
- U8 Reserved7; /* 0x17 */
+ U8 Flags; /* 0x17 */
U16 TxDataLength; /* 0x18 */
U16 RxDataLength; /* 0x1A */
U32 Reserved8; /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
/* Toolbox ISTWI Read Write Tool reply message */
typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index beda04a8404..0b2c9558366 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/kthread.h>
#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
static int max_queue_depth = -1;
module_param(max_queue_depth, int, 0);
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
module_param(disable_discovery, int, 0);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
-/* diag_buffer_enable is bitwise
- * bit 0 set = TRACE
- * bit 1 set = SNAPSHOT
- * bit 2 set = EXTENDED
- *
- * Either bit can be set, or both
- */
-static int diag_buffer_enable;
-module_param(diag_buffer_enable, int, 0);
-MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
- "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
*
@@ -120,10 +110,34 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
return 0;
}
+
module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt2sas_fwfault_debug, 0644);
/**
+ * mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_remove_bus_device(pdev);
+ return 0;
+}
+
+
+/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
* Context: sleep.
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
unsigned long flags;
u32 doorbell;
int rc;
+ struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+ ioc->name, __func__);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+ "mpt2sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p)) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ } else {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ }
+
+ return; /* don't rearm timer */
+ }
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
if (_base_check_enable_msix(ioc) != 0)
goto try_ioapic;
- ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
+ case MPT2SAS_INTEL_RAMSDALE_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RAMSDALE_BRANDING);
+ break;
default:
break;
}
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25JB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25JB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ break;
default:
break;
}
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
}
if (ioc->chain_dma_pool)
pci_pool_destroy(ioc->chain_dma_pool);
- }
- if (ioc->chain_lookup) {
free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
ioc->chain_lookup = NULL;
}
@@ -2330,9 +2396,7 @@ static int
_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
struct mpt2sas_facts *facts;
- u32 queue_size, queue_diff;
u16 max_sge_elements;
- u16 num_of_reply_frames;
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
max_request_credit = (max_queue_depth < facts->RequestCredit)
? max_queue_depth : facts->RequestCredit;
else
- max_request_credit = facts->RequestCredit;
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
ioc->hba_queue_depth = max_request_credit;
ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
ioc->chains_needed_per_io = chains_needed_per_io;
- /* reply free queue sizing - taking into account for events */
- num_of_reply_frames = ioc->hba_queue_depth + 32;
-
- /* number of replies frames can't be a multiple of 16 */
- /* decrease number of reply frames by 1 */
- if (!(num_of_reply_frames % 16))
- num_of_reply_frames--;
-
- /* calculate number of reply free queue entries
- * (must be multiple of 16)
- */
-
- /* (we know reply_free_queue_depth is not a multiple of 16) */
- queue_size = num_of_reply_frames;
- queue_size += 16 - (queue_size % 16);
- ioc->reply_free_queue_depth = queue_size;
-
- /* reply descriptor post queue sizing */
- /* this size should be the number of request frames + number of reply
- * frames
- */
-
- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
- /* round up to 16 byte boundary */
- if (queue_size % 16)
- queue_size += 16 - (queue_size % 16);
-
- /* check against IOC maximum reply post queue depth */
- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
- queue_diff = queue_size -
- facts->MaxReplyDescriptorPostQueueDepth;
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- /* round queue_diff up to multiple of 16 */
- if (queue_diff % 16)
- queue_diff += 16 - (queue_diff % 16);
-
- /* adjust hba_queue_depth, reply_free_queue_depth,
- * and queue_size
- */
- ioc->hba_queue_depth -= (queue_diff / 2);
- ioc->reply_free_queue_depth -= (queue_diff / 2);
- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+ /* align the reply post queue on the next 16 count boundary */
+ if (!ioc->reply_free_queue_depth % 16)
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+ else
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+ 32 - (ioc->reply_free_queue_depth % 16);
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth = min_t(u16,
+ (facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
}
- ioc->reply_post_queue_depth = queue_size;
+
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
"sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
"depth(%d)\n", ioc->name, ioc->request,
ioc->scsiio_depth));
- /* loop till the allocation succeeds */
- do {
- sz = ioc->chain_depth * sizeof(struct chain_tracker);
- ioc->chain_pages = get_order(sz);
- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->chain_pages);
- if (ioc->chain_lookup == NULL)
- ioc->chain_depth -= 100;
- } while (ioc->chain_lookup == NULL);
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_free[i] = cpu_to_le32(reply_address);
/* initialize reply queues */
- _base_assign_reply_queues(ioc);
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (ioc->is_driver_loading) {
-
-
-
- ioc->wait_for_discovery_to_complete =
- _base_determine_wait_on_discovery(ioc);
- return r; /* scan_start and scan_finished support */
- }
-
-
- if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
- if (ioc->manu_pg10.OEMIdentifier == 0x80) {
+ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+ == 0x80) {
hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
MFG_PAGE10_HIDE_SSDS_MASK);
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
ioc->mfg_pg10_hide_flag = hide_flag;
}
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
}
-
r = _base_send_port_enable(ioc, sleep_flag);
if (r)
return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
r = mpt2sas_base_map_resources(ioc);
if (r)
- return r;
+ goto out_free_resources;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] =
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 3c3babc7d26..c7459fdc06c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "10.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 10
+#define MPT2SAS_DRIVER_VERSION "12.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 12
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -157,20 +157,33 @@
/*
* Intel HBA branding
*/
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB040"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
"Intel Integrated RAID Module RMS2LL040"
#define MPT2SAS_INTEL_RS25GB008_BRANDING \
"Intel(R) RAID Controller RS25GB008"
-
+#define MPT2SAS_INTEL_RAMSDALE_BRANDING \
+ "Intel 720 Series SSD"
/*
* Intel HBA SSDIDs
*/
+#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
-
+#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700
/*
* HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
* @percent_complete: resync percent complete
* @direct_io_enabled: Whether direct io to PDs are allowed or not
* @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
* @max_lba: Maximum number of LBA in the volume
* @stripe_sz: Stripe Size of the volume
* @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
u8 percent_complete;
u8 direct_io_enabled;
u8 stripe_exponent;
+ u8 block_exponent;
u64 max_lba;
u32 stripe_sz;
u32 device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
TM_MUTEX_ON = 1,
};
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
/**
* struct MPT2SAS_ADAPTER - per adapter struct
* @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
* @msix_vector_count: number msix vectors
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
resource_size_t **reply_post_host_index;
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
+ MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index aabcb911706..7fceb899029 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
#endif
+ init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
case MPI2_FUNCTION_SCSI_IO_REQUEST:
case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
else
timeout = karg.timeout;
- init_completion(&ioc->ctl_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
timeout*HZ);
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
mpi_request->ProductSpecific[i] =
cpu_to_le32(ioc->product_specific[buffer_type][i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808..193e33e28e4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
static ushort max_sectors = 0xFFFF;
module_param(max_sectors, ushort, 0);
-MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
- }
+ } else if (!sas_device->starget) {
+ /* When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
/**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->free_chain_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
- ioc->name);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+ "available\n", ioc->name));
return NULL;
}
chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_target_priv_data->sas_address);
- if (sas_device)
+ if (sas_device && !sas_target_priv_data->num_luns)
sas_device->starget = NULL;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
u16 sz;
u8 num_pds, count;
- u64 mb = 1024 * 1024;
- u64 tb_2 = 2 * mb * mb;
- u64 capacity;
- u32 stripe_sz;
- u8 i, stripe_exp;
+ unsigned long stripe_sz, block_sz;
+ u8 stripe_exp, block_exp;
+ u64 dev_max_lba;
if (!ioc->is_warpdrive)
return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
+ /* Disable direct I/O if member drive lba exceeds 4 bytes */
+ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+ if (dev_max_lba >> 32) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+ "disabled for the drive with handle(0x%04x) member"
+ "handle (0x%04x) unsupported max lba 0x%016llx\n",
+ ioc->name, raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (unsigned long long)dev_max_lba);
+ goto out_error;
+ }
+
raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
}
/*
* Assumption for WD: Direct I/O is not supported if the volume is
- * not RAID0, if the stripe size is not 64KB, if the block size is
- * not 512 and if the volume size is >2TB
+ * not RAID0
*/
- if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
- le16_to_cpu(vol_pg0->BlockSize) != 512) {
+ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
"for the drive with handle(0x%04x): type=%d, "
"s_sz=%uK, blk_size=%u\n", ioc->name,
raid_device->handle, raid_device->volume_type,
- le32_to_cpu(vol_pg0->StripeSize)/2,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
- (le64_to_cpu(vol_pg0->MaxLBA) + 1);
-
- if (capacity > tb_2) {
+ stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+ stripe_exp = find_first_bit(&stripe_sz, 32);
+ if (stripe_exp == 32) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) since drive sz > 2TB\n",
- ioc->name, raid_device->handle);
+ "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ ioc->name, raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
-
- stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
- stripe_exp = 0;
- for (i = 0; i < 32; i++) {
- if (stripe_sz & 1)
- break;
- stripe_exp++;
- stripe_sz >>= 1;
- }
- if (i == 32) {
+ raid_device->stripe_exponent = stripe_exp;
+ block_sz = le16_to_cpu(vol_pg0->BlockSize);
+ block_exp = find_first_bit(&block_sz, 16);
+ if (block_exp == 16) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ "for the drive with handle(0x%04x) invalid block sz %u\n",
ioc->name, raid_device->handle,
- le32_to_cpu(vol_pg0->StripeSize)/2);
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- raid_device->stripe_exponent = stripe_exp;
+ raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
{
u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
u32 stripe_sz, stripe_exp;
- u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
+ u8 num_pds, *cdb_ptr, i;
u8 cdb0 = scmd->cmnd[0];
+ u64 v_llba;
/*
* Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
| cdb_ptr[5])) {
- io_size = scsi_bufflen(scmd) >> 9;
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ i = (cdb0 < READ_16) ? 2 : 6;
/* get virtual lba */
- lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
- &cdb_ptr[6];
- tmp_ptr = (u8 *)&v_lba + 3;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr = *lba_ptr1;
+ v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
if (((u64)v_lba + (u64)io_size - 1) <=
(u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request->DevHandle =
cpu_to_le16(raid_device->
pd_handle[column]);
- tmp_ptr = (u8 *)&p_lba + 3;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2 = *tmp_ptr;
+ (*(__be32 *)(&cdb_ptr[i])) =
+ cpu_to_be32(p_lba);
+ /*
+ * WD: To indicate this I/O is directI/O
+ */
+ _scsih_scsi_direct_io_set(ioc, smid, 1);
+ }
+ }
+ } else {
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ /* get virtual lba */
+ v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+
+ if ((v_llba + (u64)io_size - 1) <=
+ raid_device->max_lba) {
+ stripe_sz = raid_device->stripe_sz;
+ stripe_exp = raid_device->stripe_exponent;
+ stripe_off = (u32) (v_llba & (stripe_sz - 1));
+
+ /* Check whether IO falls within a stripe */
+ if ((stripe_off + io_size) <= stripe_sz) {
+ num_pds = raid_device->num_pds;
+ p_lba = (u32)(v_llba >> stripe_exp);
+ stripe_unit = p_lba / num_pds;
+ column = p_lba % num_pds;
+ p_lba = (stripe_unit << stripe_exp) +
+ stripe_off;
+ mpi_request->DevHandle =
+ cpu_to_le16(raid_device->
+ pd_handle[column]);
+ (*(__be64 *)(&cdb_ptr[2])) =
+ cpu_to_be64((u64)p_lba);
/*
* WD: To indicate this I/O is directI/O
*/
@@ -4335,7 +4368,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_NO_CONNECT << 16;
goto out;
}
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
/*
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (_scsih_scsi_direct_io_get(ioc, smid)) {
+ if (_scsih_scsi_direct_io_get(ioc, smid) &&
+ ((ioc_status & MPI2_IOCSTATUS_MASK)
+ != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->scsi_lookup[smid - 1].scmd = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_TRANSPORT_DISRUPTED << 16;
goto out;
}
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
} else
sas_target_priv_data = NULL;
raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
(unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
*/
_scsih_init_warpdrive_properties(ioc, raid_device);
if (raid_device->handle == handle)
- goto out;
+ return;
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
if (sas_target_priv_data)
sas_target_priv_data->handle = handle;
- goto out;
+ return;
}
}
- out:
+
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = MPT2SAS_SG_DEPTH,
- .max_sectors = 8192,
+ .max_sectors = 32767,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
"value of 64.\n", ioc->name, max_sectors);
- } else if (max_sectors > 8192) {
- shost->max_sectors = 8192;
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
- "default value of 8192.\n", ioc->name,
+ "default value of 32767.\n", ioc->name,
max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_attach_fail;
}
- scsi_scan_host(shost);
if (ioc->is_warpdrive) {
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
} else
ioc->hide_drives = 0;
+ scsi_scan_host(shost);
- _scsih_probe_devices(ioc);
return 0;
out_attach_fail:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 230732241aa..831047466a5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
"send to sas_addr(0x%016llx)\n", ioc->name,
(unsigned long long)sas_address));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
"send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
"send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number,
phy_operation));
- mpt2sas_base_put_smid_default(ioc, smid);
+
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"sending smp request\n", ioc->name, __func__));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 5163edb925c..ea8a0b47d66 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4105,7 +4105,7 @@ static long pmcraid_chr_ioctl(
hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
if (!hdr) {
- pmcraid_err("faile to allocate memory for ioctl header\n");
+ pmcraid_err("failed to allocate memory for ioctl header\n");
return -ENOMEM;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e93..a2f1b3043df 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
}
- return -EINVAL;
+ return count;
}
static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 3:
if (ha->optrom_state != QLA_SWRITING)
- return -ENOMEM;
+ return -EINVAL;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
rval, dev, adr, opt, len, buf[8]);
return -EIO;
}
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x7075,
- "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+ "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
rval, dev, adr, opt, len);
return -EIO;
}
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
@@ -1952,8 +1971,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Queue delete failed.\n");
}
- scsi_host_put(vha->host);
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+ scsi_host_put(vha->host);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8b641a8a0c7..b1d0f936bf2 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
done:
return sp;
}
@@ -102,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
- if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -389,6 +390,20 @@ done:
return rval;
}
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 2) {
+ iocbs += (dsds - 2) / 5;
+ if ((dsds - 2) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
static int
qla2x00_process_ct(struct fc_bsg_job *bsg_job)
{
@@ -489,6 +504,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct = sp->ctx;
ct->type = SRB_CT_CMD;
ct->name = "bsg_ct";
+ ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
ct->u.bsg_job = bsg_job;
ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -1653,7 +1669,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
}
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c..7c54624b5b1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,18 +11,20 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
- * | Device Discovery | 0x2083 | |
- * | Queue Command and IO tracing | 0x302e | 0x3008 |
+ * | Module Init and Probe | 0x0116 | 0xfa |
+ * | Mailbox commands | 0x112b | |
+ * | Device Discovery | 0x2084 | |
+ * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
+ * | | | 0x302e |
* | DPC Thread | 0x401c | |
- * | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
- * | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Async Events | 0x5057 | 0x5052 |
+ * | Timer Routines | 0x6011 | 0x600e,0x600f |
+ * | User Space Interactions | 0x709e | |
+ * | Task Management | 0x803c | 0x8025-0x8026 |
+ * | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
@@ -368,7 +370,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
memcpy(iter_reg, ha->fce, ntohl(fcec->size));
- return iter_reg;
+ return (char *)iter_reg + ntohl(fcec->size);
}
static inline void *
@@ -1650,6 +1652,15 @@ qla81xx_fw_dump_failed:
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ return (level & ql2xextended_error_logging) == level;
+}
+
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
@@ -1664,34 +1675,31 @@ qla81xx_fw_dump_failed:
* msg: The message to be displayed.
*/
void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <pci-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset,
- vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id + ql_dbg_offset);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x:%ld: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no, &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
}
- va_end(ap);
+ va_end(va);
}
@@ -1710,31 +1718,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
if (pdev == NULL)
return;
+ if (!ql_mask_match(level))
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset);
+ va_start(va, fmt);
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
- }
+ vaf.fmt = fmt;
+ vaf.va = &va;
- va_end(ap);
+ /* <module-name> <dev-name>:<msg-id> Message */
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+ va_end(va);
}
/*
@@ -1751,47 +1755,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id, vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id);
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
+ if (level > ql_errlev)
+ return;
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
/*
@@ -1809,43 +1813,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
if (pdev == NULL)
return;
+ if (level > ql_errlev)
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ /* <module-name> <dev-name>:<msg-id> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
void
@@ -1858,20 +1863,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
uint16_t __iomem *mbx_reg;
- if ((level & ql2xextended_error_logging) == level) {
-
- if (IS_QLA82XX(ha))
- mbx_reg = &reg82->mailbox_in[0];
- else if (IS_FWI2_CAPABLE(ha))
- mbx_reg = &reg24->mailbox0;
- else
- mbx_reg = MAILBOX_REG(ha, reg, 0);
+ if (!ql_mask_match(level))
+ return;
- ql_dbg(level, vha, id, "Mailbox registers:\n");
- for (i = 0; i < 6; i++)
- ql_dbg(level, vha, id,
- "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
- }
+ if (IS_QLA82XX(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
}
@@ -1881,24 +1886,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
{
uint32_t cnt;
uint8_t c;
- if ((level & ql2xextended_error_logging) == level) {
-
- ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
- "9 Ah Bh Ch Dh Eh Fh\n");
- ql_dbg(level, vha, id, "----------------------------------"
- "----------------------------\n");
-
- ql_dbg(level, vha, id, "");
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x", (uint32_t) c);
- cnt++;
- if (!(cnt % 16))
- printk("\n");
- else
- printk(" ");
- }
- if (cnt % 16)
- ql_dbg(level, vha, id, "\n");
+
+ if (!ql_mask_match(level))
+ return;
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, " ");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
}
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 98a377b9901..5f1b6d9c3dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
};
#define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK 0x1e400000
#define ql_log_fatal 0 /* display fatal errors */
#define ql_log_warn 1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
extern int ql_errlev;
-void
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
-void
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
-
-#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index fcf052c50bf..a6a4eebce4a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -271,6 +271,7 @@ struct srb_iocb {
struct srb_ctx {
uint16_t type;
char *name;
+ int iocbs;
union {
struct srb_iocb *iocb_cmd;
struct fc_bsg_job *bsg_job;
@@ -2244,6 +2245,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
+ int (*iospace_config)(struct qla_hw_data*);
};
/* MSI-X Support *************************************************************/
@@ -2978,10 +2980,6 @@ typedef struct scsi_qla_host {
atomic_dec(&__vha->vref_count); \
} while (0)
-
-#define qla_printk(level, ha, format, arg...) \
- dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
-
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9..408679be8fd 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -572,12 +572,13 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
size_t, char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
-extern void qla82xx_start_iocbs(srb_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 37937aa3c3b..4aea4ae2330 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
"GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
- ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
"GA_NXT failed, rejected request ga_nxt_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
sns_cmd->p.gan_data, 16);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f187..1fa067e053d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
ctx->u.iocb_cmd = iocb;
iocb->free = qla2x00_ctx_sp_free;
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - portid=%02x%02x%02x.\n",
- ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+ ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, fcport->login_retry);
+ "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
return rval;
done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
goto done_free_sp;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
- ql_log(ql_log_info, vha, 0x0040,
+ ql_dbg(ql_dbg_init, vha, 0x0040,
"Configuring PCI space...\n");
rval = ha->isp_ops->pci_config(vha);
if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
ha->isp_ops->get_flash_version(vha, req->ring);
- ql_log(ql_log_info, vha, 0x0061,
+ ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0078,
+ ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
ha->flags.fce_enabled = 0;
goto try_eft;
}
- ql_log(ql_log_info, vha, 0x00c0,
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
tc_dma);
goto cont_alloc;
}
- ql_log(ql_log_info, vha, 0x00c3,
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
}
return;
}
- ql_log(ql_log_info, vha, 0x00c5,
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
"Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
ha->fw_dump_len = dump_size;
@@ -1509,7 +1511,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
@@ -1928,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn,
- vha, 0x8026,
+ vha, 0x8007,
"Init chip failed.\n");
break;
}
@@ -1937,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- ql_dbg(ql_dbg_taskm, vha, 0x8025,
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
"Increasing wait time by %ld. "
"New time %ld.\n", cs84xx_time,
wtime);
@@ -1980,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Delay for a while */
msleep(500);
-
- ql_dbg(ql_dbg_taskm, vha, 0x8039,
- "fw_state=%x curr time=%lx.\n", state[0], jiffies);
} while (1);
ql_dbg(ql_dbg_taskm, vha, 0x803a,
"fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
state[1], state[2], state[3], state[4], jiffies);
- if (rval) {
+ if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
ql_log(ql_log_warn, vha, 0x803b,
"Firmware ready **** FAILED ****.\n");
}
@@ -2385,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
* internal driver logging.
*/
if (nv->host_p[0] & BIT_7)
- ql2xextended_error_logging = 0x7fffffff;
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
/* Always load RISC code on non ISP2[12]00 chips. */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4187,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+ __func__);
}
return(status);
@@ -4637,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
struct req_que *req = ha->req_q_map[0];
ql_dbg(ql_dbg_init, vha, 0x008b,
- "Loading firmware from flash (%x).\n", faddr);
+ "FW: Loading firmware from flash (%x).\n", faddr);
rval = QLA_SUCCESS;
@@ -4835,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0092,
- "Loading via request-firmware.\n");
+ ql_dbg(ql_dbg_init, vha, 0x0092,
+ "FW: Loading via request-firmware.\n");
rval = QLA_SUCCESS;
@@ -5424,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- ql_log(ql_log_info, vha, 0x803d,
+ ql_log(ql_log_info, vha, 0x8000,
"Configure loop done, status = 0x%x.\n", status);
}
@@ -5457,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803e,
+ ql_log(ql_log_warn, vha, 0x8001,
"Unable to reinitialize FCE (%d).\n",
rval);
ha->flags.fce_enabled = 0;
@@ -5469,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803f,
+ ql_log(ql_log_warn, vha, 0x8010,
"Unable to reinitialize EFT (%d).\n",
rval);
}
@@ -5477,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
}
if (!status) {
- ql_dbg(ql_dbg_taskm, vha, 0x8040,
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
"qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5495,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8041,
+ ql_log(ql_log_warn, vha, 0x8016,
"qla82xx_restart_isp **** FAILED ****.\n");
}
@@ -5642,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (priority < 0)
return QLA_FUNCTION_FAILED;
+ if (IS_QLA82XX(vha->hw)) {
+ fcport->fcp_prio = priority & 0xf;
+ return QLA_SUCCESS;
+ }
+
ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
- if (ret == QLA_SUCCESS)
- fcport->fcp_prio = priority;
- else
+ if (ret == QLA_SUCCESS) {
+ if (fcport->fcp_prio != priority)
+ ql_dbg(ql_dbg_user, vha, 0x709e,
+ "Updated FCP_CMND priority - value=%d loop_id=%d "
+ "port_id=%02x%02x%02x.\n", priority,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->fcp_prio = priority & 0xf;
+ } else
ql_dbg(ql_dbg_user, vha, 0x704f,
- "Unable to activate fcp priority, ret=0x%x.\n", ret);
-
+ "Unable to update FCP_CMND priority - ret=0x%x for "
+ "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0..55a96761b5a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,6 @@
#include <scsi/scsi_tcq.h>
-static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
-
static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -120,11 +118,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +289,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -469,6 +466,42 @@ queuing_error:
}
/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+static void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(vha);
+ } else {
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
+ }
+}
+
+/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
* @ha: HA context
* @loop_id: loop ID
@@ -490,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
+ req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
@@ -516,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
wmb();
- qla2x00_isp_cmd(vha, req);
+ qla2x00_start_iocbs(vha, req);
return (QLA_SUCCESS);
}
@@ -537,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
/**
- * qla2x00_isp_cmd() - Modify the request ring pointer.
- * @ha: HA context
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
*
- * Note: The caller must hold the hardware lock before calling this routine.
+ * Returns the number of IOCB entries needed to store @dsds.
*/
-static void
-qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
+inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
- struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ uint16_t iocbs;
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
- "IOCB data:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
- (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
+ iocbs = 1;
+ if (dsds > 1) {
+ iocbs += (dsds - 1) / 5;
+ if ((dsds - 1) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
- /* Set chip new ring index. */
- if (IS_QLA82XX(ha)) {
- uint32_t dbval = 0x04 | (ha->portnum << 5);
+ cmd = sp->cmd;
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD((unsigned long __iomem *)
- ha->nxdb_wr_ptr, dbval);
- wmb();
- }
- }
- } else if (ha->mqenable) {
- /* Set chip new ring index. */
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else {
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg = sg_next(cur_seg);
+ avail_dsds--;
}
}
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
}
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
*
* @dsds: number of data segment decriptors needed
*
- * Returns the number of IOCB entries needed to store @dsds.
+ * Returns the number of dsd list needed to store @dsds.
*/
inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+qla24xx_calc_dsd_lists(uint16_t dsds)
{
- uint16_t iocbs;
+ uint16_t dsd_lists = 0;
- iocbs = 1;
- if (dsds > 1) {
- iocbs += (dsds - 1) / 5;
- if ((dsds - 1) % 5)
- iocbs++;
- }
- return iocbs;
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
}
+
/**
* qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
* IOCB types.
@@ -684,7 +769,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -946,6 +1031,7 @@ alloc_and_fill:
*cur_dsd++ = 0;
return 0;
}
+
static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
@@ -1005,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+ i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1732,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
+ struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@@ -1760,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
+ /* Adjust entry-counts as needed. */
+ if (sp->ctx) {
+ ctx = sp->ctx;
+ req_cnt = ctx->iocbs;
+ }
+
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
@@ -1794,42 +1887,6 @@ queuing_error:
}
static void
-qla2x00_start_iocbs(srb_t *sp)
-{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct req_que *req = ha->req_q_map[0];
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
-
- if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else {
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
- } else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
- }
- }
-}
-
-static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_ctx *ctx = sp->ctx;
@@ -2070,7 +2127,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2154,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2200,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2158,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->entry_count = entry_count;
}
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ char tag[2];
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -2194,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_ct_iocb(sp, pkt) :
- qla2x00_ct_iocb(sp, pkt);
+ qla24xx_ct_iocb(sp, pkt) :
+ qla2x00_ct_iocb(sp, pkt);
break;
case SRB_ADISC_CMD:
IS_FWI2_CAPABLE(ha) ?
@@ -2210,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(sp);
+ qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aee..e804585cc59 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -242,32 +242,34 @@ static void
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
- if (cnt == 4 || cnt == 5)
+ if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
- else
+ else if (mboxes & BIT_0)
ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
wptr++;
- }
-
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5000,
- "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5001,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
}
}
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
return;
ql_dbg(ql_dbg_async, vha, 0x5022,
- "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
vha->host_no, event[aen & 0xff], timeout);
rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- ql_log(ql_log_info, vha, 0x5009,
+ ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
ha->link_data_rate = mb[1];
}
- ql_log(ql_log_info, vha, 0x500a,
+ ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n", link_speed);
vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
- ql_log(ql_log_info, vha, 0x500b,
+ ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
@@ -519,7 +521,7 @@ skip_rio:
break;
case MBA_LIP_RESET: /* LIP reset occurred */
- ql_log(ql_log_info, vha, 0x500c,
+ ql_dbg(ql_dbg_async, vha, 0x500c,
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- ql_log(ql_log_info, vha, 0x500f,
+ ql_dbg(ql_dbg_async, vha, 0x500f,
"Configuration change detected: value=%x.\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
ql_dbg(ql_dbg_async, vha, 0x5043,
- "Async-%s error entry - portid=%02x%02x%02x "
+ "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
"entry-status=%x status=%x state-flag=%x "
- "status-flags=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "status-flags=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mbx->entry_status,
le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
le16_to_cpu(mbx->status_flags));
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
(uint8_t *)mbx, sizeof(*mbx));
goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5045,
- "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_log(ql_log_warn, vha, 0x5046,
- "Async-%s failed - portid=%02x%02x%02x status=%x "
- "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
- type, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
- le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+ "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7));
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
ql_log(ql_log_info, vha, 0x503f,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
- type, comp_status, fw_status[1], fw_status[2],
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
ql_log(ql_log_info, vha, 0x5040,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
- type, comp_status,
+ type, sp->handle, comp_status,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, vha, 0x5034,
- "Async-%s error entry - "
+ "Async-%s error entry - hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, logio->entry_status);
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
(uint8_t *)logio, sizeof(*logio));
goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5036,
- "Async-%s complete - portid=%02x%02x%02x "
- "iop0=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa,
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, vha, 0x5037,
- "Async-%s failed - portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n",
- type, fcport->d_id.b.domain,
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sts->entry_status) {
ql_log(ql_log_warn, vha, 0x5038,
- "Async-%s error - entry-status(%x).\n",
- type, sts->entry_status);
+ "Async-%s error - hdl=%x entry-status(%x).\n",
+ type, sp->handle, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, vha, 0x5039,
- "Async-%s error - completion status(%x).\n",
- type, sts->comp_status);
+ "Async-%s error - hdl=%x completion status(%x).\n",
+ type, sp->handle, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
ql_log(ql_log_warn, vha, 0x503a,
- "Async-%s error - no response info(%x).\n",
- type, sts->scsi_status);
+ "Async-%s error - hdl=%x no response info(%x).\n",
+ type, sp->handle, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, vha, 0x503b,
- "Async-%s error - not enough response(%d).\n",
- type, sts->rsp_data_len);
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
} else if (sts->data[3]) {
ql_log(ql_log_warn, vha, 0x503c,
- "Async-%s error - response(%x).\n",
- type, sts->data[3]);
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
} else {
error = 0;
}
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
if (pkt->entry_status != 0) {
- ql_log(ql_log_warn, vha, 0x5035,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
static inline void
-
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
{
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sp->request_sense_length != 0)
rsp->status_srb = sp;
- ql_dbg(ql_dbg_io, vha, 0x301c,
- "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
- cp->device->lun, cp);
- if (sense_len)
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+ "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
cp->sense_buffer, sense_len);
+ }
}
struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
}
if (k != blocks_done) {
- qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+ ql_log(ql_log_warn, vha, 0x302f,
"unexpected tag values tag:lba=%x:%llx)\n",
e_ref_tag, (unsigned long long)lba_s);
return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- ql_log(ql_log_warn, vha, 0x3017,
+ ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- ql_log(ql_log_warn, vha, 0x3018,
+ ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- ql_log(ql_log_warn, vha, 0x3019,
+ ql_dbg(ql_dbg_io, vha, 0x3019,
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301a,
+ ql_dbg(ql_dbg_io, vha, 0x301a,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x301b,
+ ql_dbg(ql_dbg_io, vha, 0x301b,
"QUEUE FULL detected.\n");
break;
}
@@ -1735,19 +1733,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_log(ql_log_warn, vha, 0x301d,
+ ql_dbg(ql_dbg_io, vha, 0x301d,
"Dropped frame(s) detected "
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301e,
+ ql_dbg(ql_dbg_io, vha, 0x301e,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
}
} else {
- ql_log(ql_log_warn, vha, 0x301f,
+ ql_dbg(ql_dbg_io, vha, 0x301f,
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
@@ -1774,7 +1772,7 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x3020,
+ ql_dbg(ql_dbg_io, vha, 0x3020,
"QUEUE FULL detected.\n");
logit = 1;
break;
@@ -1838,10 +1836,15 @@ out:
if (logit)
ql_dbg(ql_dbg_io, vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) "
- "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+ "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
+ "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
- comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
- cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ comp_status, scsi_status, cp->result, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
+ cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
+ cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
}
}
+static int
+qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx;
+
+ if (!sp->ctx)
+ return 1;
+
+ ctx = sp->ctx;
+
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD ||
+ ctx->type == SRB_TM_CMD) {
+ ctx->u.iocb_cmd->done(sp);
+ return 0;
+ } else if (ctx->type == SRB_ADISC_CMD) {
+ ctx->u.iocb_cmd->free(sp);
+ return 0;
+ } else {
+ struct fc_bsg_job *bsg_job;
+
+ bsg_job = ctx->u.bsg_job;
+ if (ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD)
+ kfree(sp->fcport);
+
+ bsg_job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_OK;
+ bsg_job->reply->result = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ bsg_job->job_done(bsg_job);
+ return 0;
+ }
+ return 1;
+}
+
/**
* qla2x00_error_entry() - Process an error entry.
* @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
- uint32_t handle = LSW(pkt->handle);
+ const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
ql_dbg(ql_dbg_async, vha, 0x502f,
"UNKNOWN flag error.\n");
- /* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS)
- sp = req->outstanding_cmds[handle];
- else
- sp = NULL;
-
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[handle] = NULL;
-
- /* Bad payload or header */
- if (pkt->entry_status &
- (RF_INV_E_ORDER | RF_INV_E_COUNT |
- RF_INV_E_PARAM | RF_INV_E_TYPE)) {
- sp->cmd->result = DID_ERROR << 16;
- } else if (pkt->entry_status & RF_BUSY) {
- sp->cmd->result = DID_BUS_BUSY << 16;
- } else {
- sp->cmd->result = DID_ERROR << 16;
+ if (qla2x00_free_sp_ctx(vha, sp)) {
+ if (pkt->entry_status &
+ (RF_INV_E_ORDER | RF_INV_E_COUNT |
+ RF_INV_E_PARAM | RF_INV_E_TYPE)) {
+ sp->cmd->result = DID_ERROR << 16;
+ } else if (pkt->entry_status & RF_BUSY) {
+ sp->cmd->result = DID_BUS_BUSY << 16;
+ } else {
+ sp->cmd->result = DID_ERROR << 16;
+ }
+ qla2x00_sp_compl(ha, sp);
}
- qla2x00_sp_compl(ha, sp);
-
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)&reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- wptr++;
- }
+ if (mboxes & BIT_0)
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x504d,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x504e,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
+ wptr++;
}
}
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- ql_dbg(ql_dbg_async, vha, 0x5029,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac..34344d3f865 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
@@ -2870,7 +2887,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) == 1) {
+ if (MSB(stat) != 0) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce..1cd46cd7ff9 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
"!= Read crbwin (0x%x), off=0x%lx.\n",
- ha->crb_win, win_read, *off);
+ __func__, ha->crb_win, win_read, *off);
}
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
}
/* strange address given */
ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%x: Warning: unm_nic_pci_set_crbwindow "
+ "%s: Warning: unm_nic_pci_set_crbwindow "
"called with an unknown address(%llx).\n",
QLA2XXX_DRIVER_NAME, off);
return off;
@@ -1055,7 +1055,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
"ROM lock failed.\n");
return -1;
}
- return 0;;
+ return 0;
}
static int
@@ -1711,12 +1711,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
return 0;
@@ -1744,7 +1744,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
ql_dbg(ql_dbg_init, vha, 0x0043,
- "Chip revision:%ld.\n",
+ "Chip revision:%d.\n",
ha->chip_revision);
return 0;
}
@@ -2023,13 +2023,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
wptr++;
}
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5052,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
+ if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5053,
"MBX pointer ERROR.\n");
- }
}
/*
@@ -2543,484 +2539,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static inline int
-qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
- uint16_t tot_dsds)
-{
- uint32_t *cur_dsd = NULL;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- struct scsi_cmnd *cmd;
- struct scatterlist *cur_seg;
- uint32_t *dsd_seg;
- void *next_dsd;
- uint8_t avail_dsds;
- uint8_t first_iocb = 1;
- uint32_t dsd_list_len;
- struct dsd_dma *dsd_ptr;
- struct ct6_dsd *ctx;
-
- cmd = sp->cmd;
-
- /* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_6);
-
- /* No data transfer */
- if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
- return 0;
- }
-
- vha = sp->fcport->vha;
- ha = vha->hw;
-
- /* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
- } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
- }
-
- cur_seg = scsi_sglist(cmd);
- ctx = sp->ctx;
-
- while (tot_dsds) {
- avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
- QLA_DSDS_PER_IOCB : tot_dsds;
- tot_dsds -= avail_dsds;
- dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
-
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
- next_dsd = dsd_ptr->dsd_addr;
- list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
- list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
- ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
-
- if (first_iocb) {
- first_iocb = 0;
- dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
- *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(dsd_list_len);
- }
- cur_dsd = (uint32_t *)next_dsd;
- while (avail_dsds) {
- dma_addr_t sle_dma;
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- cur_seg = sg_next(cur_seg);
- avail_dsds--;
- }
- }
-
- /* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
- return 0;
-}
-
-/*
- * qla82xx_calc_dsd_lists() - Determine number of DSD list required
- * for Command Type 6.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of dsd list needed to store @dsds.
- */
-inline uint16_t
-qla82xx_calc_dsd_lists(uint16_t dsds)
-{
- uint16_t dsd_lists = 0;
-
- dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
- if (dsds % QLA_DSDS_PER_IOCB)
- dsd_lists++;
- return dsd_lists;
-}
-
-/*
- * qla82xx_start_scsi() - Send a SCSI command to the ISP
- * @sp: command to send to the ISP
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-int
-qla82xx_start_scsi(srb_t *sp)
-{
- int ret, nseg;
- unsigned long flags;
- struct scsi_cmnd *cmd;
- uint32_t *clr_ptr;
- uint32_t index;
- uint32_t handle;
- uint16_t cnt;
- uint16_t req_cnt;
- uint16_t tot_dsds;
- struct device_reg_82xx __iomem *reg;
- uint32_t dbval;
- uint32_t *fcp_dl;
- uint8_t additional_cdb_len;
- struct ct6_dsd *ctx;
- struct scsi_qla_host *vha = sp->fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = NULL;
- struct rsp_que *rsp = NULL;
- char tag[2];
-
- /* Setup device pointers. */
- ret = 0;
- reg = &ha->iobase->isp82;
- cmd = sp->cmd;
- req = vha->req;
- rsp = ha->rsp_q_map[0];
-
- /* So we know we haven't pci_map'ed anything yet */
- tot_dsds = 0;
-
- dbval = 0x04 | (ha->portnum << 5);
-
- /* Send marker if required */
- if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x300c,
- "qla2x00_marker failed for cmd=%p.\n", cmd);
- return QLA_FUNCTION_FAILED;
- }
- vha->marker_needed = 0;
- }
-
- /* Acquire ring specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
- handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == MAX_OUTSTANDING_COMMANDS)
- goto queuing_error;
-
- /* Map the sg table so we have an accurate count of sg entries needed */
- if (scsi_sg_count(cmd)) {
- nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), cmd->sc_data_direction);
- if (unlikely(!nseg))
- goto queuing_error;
- } else
- nseg = 0;
-
- tot_dsds = nseg;
-
- if (tot_dsds > ql2xshiftctondsd) {
- struct cmd_type_6 *cmd_pkt;
- uint16_t more_dsd_lists = 0;
- struct dsd_dma *dsd_ptr;
- uint16_t i;
-
- more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
- ql_dbg(ql_dbg_io, vha, 0x300d,
- "Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
- cmd);
- goto queuing_error;
- }
-
- if (more_dsd_lists <= ha->gbl_dsd_avail)
- goto sufficient_dsds;
- else
- more_dsd_lists -= ha->gbl_dsd_avail;
-
- for (i = 0; i < more_dsd_lists; i++) {
- dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr) {
- ql_log(ql_log_fatal, vha, 0x300e,
- "Failed to allocate memory for dsd_dma "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
-
- dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
- GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
- if (!dsd_ptr->dsd_addr) {
- kfree(dsd_ptr);
- ql_log(ql_log_fatal, vha, 0x300f,
- "Failed to allocate memory for dsd_addr "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
- }
-
-sufficient_dsds:
- req_cnt = 1;
-
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
-
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!sp->ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
- GFP_ATOMIC, &ctx->fcp_cmnd_dma);
- if (!ctx->fcp_cmnd) {
- ql_log(ql_log_fatal, vha, 0x3011,
- "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
- }
-
- /* Initialize the DSD list and dma handle */
- INIT_LIST_HEAD(&ctx->dsd_list);
- ctx->dsd_use_cnt = 0;
-
- if (cmd->cmd_len > 16) {
- additional_cdb_len = cmd->cmd_len - 16;
- if ((cmd->cmd_len % 4) != 0) {
- /* SCSI command bigger than 16 bytes must be
- * multiple of 4
- */
- ql_log(ql_log_warn, vha, 0x3012,
- "scsi cmd len %d not multiple of 4 "
- "for cmd=%p.\n", cmd->cmd_len, cmd);
- goto queuing_error_fcp_cmnd;
- }
- ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
- } else {
- additional_cdb_len = 0;
- ctx->fcp_cmnd_len = 12 + 16 + 4;
- }
-
- cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- /* Build IOCB segments */
- if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
- goto queuing_error_fcp_cmnd;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
-
- /* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
- ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 1;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 2;
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_ORDERED;
- break;
- }
- }
-
- memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
-
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
- additional_cdb_len);
- *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
-
- cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
- cmd_pkt->fcp_cmnd_dseg_address[0] =
- cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
- cmd_pkt->fcp_cmnd_dseg_address[1] =
- cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
-
- sp->flags |= SRB_FCP_CMND_DMA_VALID;
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
- } else {
- struct cmd_type_7 *cmd_pkt;
- req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
- sizeof(cmd_pkt->lun));
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- cmd_pkt->task = TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- cmd_pkt->task = TSK_ORDERED;
- break;
- }
- }
-
- /* Load SCSI command packet. */
- memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
- host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
-
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-
- /* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
-
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen.
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
-
- }
- /* Build command packet. */
- req->current_outstanding_cmd = handle;
- req->outstanding_cmds[handle] = sp;
- sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- req->cnt -= req_cnt;
- wmb();
-
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- sp->flags |= SRB_DMA_VALID;
-
- /* Set chip new ring index. */
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- }
- }
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- return QLA_SUCCESS;
-
-queuing_error_fcp_cmnd:
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
-queuing_error:
- if (tot_dsds)
- scsi_dma_unmap(cmd);
-
- if (sp->ctx) {
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- return QLA_FUNCTION_FAILED;
-}
-
static uint32_t *
qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t length)
@@ -3272,9 +2790,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
}
void
-qla82xx_start_iocbs(srb_t *sp)
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
@@ -3659,11 +3177,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
qla82xx_md_prep(vha);
- } else
- ql_log(ql_log_info, vha, 0xb02e,
- "Firmware dump available to retrieve\n",
- vha->host_no);
- }
+ }
+ } else
+ ql_log(ql_log_info, vha, 0xb02e,
+ "Firmware dump available to retrieve\n");
}
return rval;
}
@@ -3758,7 +3275,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla82xx_check_md_needed(vha);
ha->flags.isp82xx_reset_owner = 0;
goto exit;
case QLA82XX_DEV_COLD:
@@ -3817,6 +3333,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3369,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3392,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3408,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4052,7 +3583,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
}
}
ql_dbg(ql_dbg_p3p, vha, 0xb027,
- "%s status=%d.\n", status);
+ "%s: status=%d.\n", __func__, status);
return status;
}
@@ -4073,10 +3604,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc..57a226be339 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc62..4ed1e4a96b9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\t0x1e400000 - Preferred value for capturing essential "
+ "debug information (equivalent to old "
+ "ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xshiftctondsd = 6;
@@ -199,14 +202,14 @@ int ql2xmdcapmask = 0x1F;
module_param(ql2xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
- "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+ "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +426,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +818,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -889,14 +850,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_taskm, vha, 0x8000,
- "Entered %s for cmd=%p.\n", __func__, cmd);
if (!CMD_SP(cmd))
return SUCCESS;
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8001,
- "Return value of fc_block_scsi_eh=%d.\n", ret);
if (ret != 0)
return ret;
ret = SUCCESS;
@@ -912,7 +869,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_dbg(ql_dbg_taskm, vha, 0x8002,
- "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
+ "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+ vha->host_no, id, lun, sp, cmd);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
@@ -920,10 +878,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_dbg(ql_dbg_taskm, vha, 0x8003,
- "Abort command mbx failed for cmd=%p.\n", cmd);
+ "Abort command mbx failed cmd=%p.\n", cmd);
} else {
ql_dbg(ql_dbg_taskm, vha, 0x8004,
- "Abort command mbx success.\n");
+ "Abort command mbx success cmd=%p.\n", cmd);
wait = 1;
}
@@ -939,13 +897,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8006,
- "Abort handler timed out for cmd=%p.\n", cmd);
+ "Abort handler timed out cmd=%p.\n", cmd);
ret = FAILED;
}
}
ql_log(ql_log_info, vha, 0x801c,
- "Abort command issued -- %d %x.\n", wait, ret);
+ "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
+ vha->host_no, id, lun, wait, ret);
return ret;
}
@@ -1014,19 +973,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
int err;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8007,
- "fcport is NULL.\n");
return FAILED;
}
err = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8008,
- "fc_block_scsi_eh ret=%d.\n", err);
if (err != 0)
return err;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+ "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
cmd->device->id, cmd->device->lun, cmd);
err = 0;
@@ -1035,12 +990,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1057,15 +1006,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
- cmd->device->id, cmd->device->lun, cmd);
+ "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+ vha->host_no, cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
- reset_errors[err], cmd->device->id, cmd->device->lun);
+ "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
return FAILED;
}
@@ -1116,20 +1066,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8010,
- "fcport is NULL.\n");
return ret;
}
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8011,
- "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -1137,10 +1083,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1154,7 +1099,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
- "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
+ "BUS RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
return ret;
}
@@ -1188,33 +1134,20 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8016,
- "fcport is NULL.\n");
return ret;
}
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8017,
- "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
ql_log(ql_log_info, vha, 0x8018,
- "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
+ "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1251,8 +1184,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ret = SUCCESS;
eh_host_reset_lock:
- qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
- (ret == FAILED) ? "failed" : "succeeded");
+ ql_log(ql_log_info, vha, 0x8017,
+ "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
@@ -1297,16 +1231,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -1405,10 +1336,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
return;
ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
- "Queue depth adjusted-down "
- "to %d for scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1430,10 +1359,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
ql_dbg(ql_dbg_io, vha, 0x302a,
- "Queue depth adjusted-up to %d for "
- "scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static int
@@ -1557,6 +1484,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+ resource_size_t pio;
+ uint16_t msix;
+ int cpus;
+
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (!(ha->bars & 1))
+ goto skip_pio;
+
+ /* We only need PIO for Flash operations on ISP2312 v2 chips. */
+ pio = pci_resource_start(ha->pdev, 0);
+ if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ } else {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%llu.\n",
+ (unsigned long long)ha->pio_address);
+
+skip_pio:
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+ (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ goto mqiobase_exit;
+
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+ pci_resource_len(ha->pdev, 3));
+ if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return (0);
+
+iospace_error_exit:
+ return (-ENOMEM);
+}
+
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1591,6 +1630,7 @@ static struct isp_operations qla2100_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1627,6 +1667,7 @@ static struct isp_operations qla2300_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1663,6 +1704,7 @@ static struct isp_operations qla24xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1699,6 +1741,7 @@ static struct isp_operations qla25xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1735,6 +1778,7 @@ static struct isp_operations qla81xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla82xx_isp_ops = {
@@ -1771,6 +1815,7 @@ static struct isp_operations qla82xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
};
static inline void
@@ -1880,121 +1925,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
else
ha->flags.port0 = 0;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
- "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+ "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
ha->device_type, ha->flags.port0, ha->fw_srisc_address);
}
-static int
-qla2x00_iospace_config(struct qla_hw_data *ha)
-{
- resource_size_t pio;
- uint16_t msix;
- int cpus;
-
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
- if (pci_request_selected_regions(ha->pdev, ha->bars,
- QLA2XXX_DRIVER_NAME)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
- "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (!(ha->bars & 1))
- goto skip_pio;
-
- /* We only need PIO for Flash operations on ISP2312 v2 chips. */
- pio = pci_resource_start(ha->pdev, 0);
- if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
- if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
- "Invalid pci I/O region size (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- } else {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
- "Region #0 no a PIO resource (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- ha->pio_address = pio;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
- "PIO address=%p.\n",
- ha->pio_address);
-
-skip_pio:
- /* Use MMIO operations for all accesses. */
- if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
- "Region #1 not an MMIO resource (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
- "Invalid PCI mem region size (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
- if (!ha->iobase) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
- "Cannot remap MMIO (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- /* Determine queue resources */
- ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
- goto mqiobase_exit;
-
- ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
- pci_resource_len(ha->pdev, 3));
- if (ha->mqiobase) {
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
- "MQIO Base=%p.\n", ha->mqiobase);
- /* Read MSIX vector size of the board */
- pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
- ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
- } else
- ql_log_pci(ql_log_info, ha->pdev, 0x001b,
- "BAR 3 not enabled.\n");
-
-mqiobase_exit:
- ha->msix_count = ha->max_rsp_queues + 1;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
- "MSIX Count:%d.\n", ha->msix_count);
- return (0);
-
-iospace_error_exit:
- return (-ENOMEM);
-}
-
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
@@ -2093,14 +2027,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->needs_freset = 1;
}
- /* Configure PCI I/O space */
- ret = qla2x00_iospace_config(ha);
- if (ret)
- goto probe_hw_failed;
-
- ql_log_pci(ql_log_info, pdev, 0x001d,
- "Found an ISP%04X irq %d iobase 0x%p.\n",
- pdev->device, pdev->irq, ha->iobase);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2213,6 +2139,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
ha->nvram_conf_off, ha->nvram_data_off);
+
+ /* Configure PCI I/O space */
+ ret = ha->isp_ops->iospace_config(ha);
+ if (ret)
+ goto probe_hw_failed;
+
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -2288,7 +2223,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg(ql_dbg_init, base_vha, 0x0033,
"max_id=%d this_id=%d "
"cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
- "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+ "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
host->this_id, host->cmd_per_lun, host->unique_id,
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
@@ -2443,9 +2378,6 @@ skip_dpc:
qla2x00_dfs_setup(base_vha);
- ql_log(ql_log_info, base_vha, 0x00fa,
- "QLogic Fibre Channed HBA Driver: %s.\n",
- qla2x00_version_str);
ql_log(ql_log_info, base_vha, 0x00fb,
"QLogic %s - %s.\n",
ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2894,7 +2826,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->sns_cmd)
goto fail_dma_pool;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
- "sns_cmd.\n", ha->sns_cmd);
+ "sns_cmd: %p.\n", ha->sns_cmd);
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3521,27 +3453,21 @@ qla2x00_do_dpc(void *data)
schedule();
__set_current_state(TASK_RUNNING);
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
-
- /* Initialization not yet finished. Don't do anything yet. */
- if (!base_vha->flags.init_done)
- continue;
+ if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+ goto end_loop;
if (ha->flags.eeh_busy) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
"eeh_busy=%d.\n", ha->flags.eeh_busy);
- continue;
+ goto end_loop;
}
ha->dpc_active = 1;
- if (ha->flags.mbox_busy) {
- ha->dpc_active = 0;
- continue;
- }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+ "DPC handler waking up.\n");
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+ "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3683,6 +3609,7 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+end_loop:
set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
__set_current_state(TASK_RUNNING);
@@ -3766,16 +3693,6 @@ qla2x00_sp_free_dma(srb_t *sp)
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
-
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx = sp->ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3787,6 +3704,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
sp->ctx = NULL;
}
+ CMD_SP(cmd) = NULL;
+}
+
+static void
+qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -4070,13 +3996,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index eff13563c82..16bc72844a9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -904,8 +904,9 @@ no_flash_data:
}
done:
ql_dbg(ql_dbg_init, vha, 0x004d,
- "FDT[%x]: (0x%x/0x%x) erase=0x%x "
- "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x wrtd=0x%x blk=0x%x.\n",
+ loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_wrt_disable, ha->fdt_block_size);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa..23f33a6d52d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index af62c3cf875..8d58ae27482 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; c++) {
- printk(KERN_INFO "%02x", *c);
+ printk("%02x", *c);
if (!(++cnt % 16))
- printk(KERN_INFO "\n");
+ printk("\n");
else
- printk(KERN_INFO " ");
+ printk(" ");
}
printk(KERN_INFO "\n");
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254..22a3ff02e48 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x)))
@@ -173,8 +173,11 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
+#define FW_ALIVE_WAIT_TOV 3
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -240,6 +243,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +453,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +646,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +666,11 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
};
struct ql4_task_data {
@@ -835,6 +883,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd..7825c141bc1 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
@@ -744,7 +752,7 @@ struct dev_db_entry {
uint8_t res4[0x36]; /* 8A-BF */
uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
* pointer to a string so we
- * don't have to reserve soooo
+ * don't have to reserve so
* much RAM */
uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea2..d0dd4b33020 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef55..1bdfa8120ac 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 827e93078b9..95828862eea 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
- DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
- "handle 0x%x, sp=%p. This cmd may have already "
- "been completed.\n", ha->host_no, __func__,
- le32_to_cpu(sts_entry->handle), srb));
- ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
- " handle=0x%0x\n", __func__, sts_entry->handle);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+ "handle=0x%0x, srb=%p\n", __func__,
+ sts_entry->handle, srb);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
return;
}
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
(mbox_sts[2] == ACB_STATE_ACQUIRING)))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID))
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ (mbox_sts[2] == ACB_STATE_VALID)) {
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
complete(&ha->disable_acb_comp);
break;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b8487039..c2593782fbb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index f484ff43819..8d6bc1b2ff1 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1792,8 +1792,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
int rval = QLA_SUCCESS;
unsigned long dev_init_timeout;
- if (!test_bit(AF_INIT_DONE, &ha->flags))
+ if (!test_bit(AF_INIT_DONE, &ha->flags)) {
+ qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
+ qla4_8xxx_idc_unlock(ha);
+ }
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1805,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+ qla4_8xxx_idc_lock(ha);
while (1) {
- qla4_8xxx_idc_lock(ha);
if (time_after_eq(jiffies, dev_init_timeout)) {
ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1822,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_COLD:
rval = qla4_8xxx_device_bootstrap(ha);
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_INITIALIZING:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_NEED_RESET:
if (!ql4xdontresethba) {
@@ -1836,32 +1838,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
* reset handler */
dev_init_timeout = jiffies +
(ha->nx_dev_init_timeout * HZ);
+ } else {
+ qla4_8xxx_idc_unlock(ha);
+ msleep(1000);
+ qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_idc_unlock(ha);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
- qla4_8xxx_idc_unlock(ha);
/* idc locked/unlocked in handler */
qla4_8xxx_need_qsnt_handler(ha);
- qla4_8xxx_idc_lock(ha);
- /* fall thru needs idc_locked */
+ break;
case QLA82XX_DEV_QUIESCENT:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_FAILED:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
default:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
}
}
exit:
+ qla4_8xxx_idc_unlock(ha);
return rval;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f3..ec393a00c03 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ "Set to disable exporting boot targets to sysfs\n"
+ " 0 - Export boot targets\n"
+ " 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ " Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -120,7 +128,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static mode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
@@ -189,7 +197,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
static struct scsi_transport_template *qla4xxx_scsi_transport;
-static mode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t ql4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -927,7 +935,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
goto exit_init_fw_cb;
}
- qla4xxx_disable_acb(ha);
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
qla4xxx_initcb_to_acb(init_fw_cb);
@@ -975,6 +992,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1145,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1160,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
- return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1177,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1216,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1251,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1262,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1293,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1320,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1498,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1609,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1619,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1628,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1662,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1867,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1952,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1702,9 +1975,10 @@ mem_alloc_error_exit:
*
* Context: Interrupt
**/
-static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
{
- uint32_t fw_heartbeat_counter, halt_status;
+ uint32_t fw_heartbeat_counter;
+ int status = QLA_SUCCESS;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1712,7 +1986,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
"state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
ha->host_no, __func__));
- return;
+ return status;
}
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1720,8 +1994,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
/* FW not alive after 2 seconds */
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
- halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1729,7 +2001,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
" 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
" 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
" 0x%x,\n PEG_NET_4_PC: 0x%x\n",
- ha->host_no, __func__, halt_status,
+ ha->host_no, __func__,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1),
qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS2),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -1742,24 +2016,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
0x3c));
-
- /* Since we cannot change dev_state in interrupt
- * context, set appropriate DPC flag then wakeup
- * DPC */
- if (halt_status & HALT_STATUS_UNRECOVERABLE)
- set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
- else {
- printk("scsi%ld: %s: detect abort needed!\n",
- ha->host_no, __func__);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- }
- qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
+ status = QLA_ERROR;
}
} else
ha->seconds_since_last_heartbeat = 0;
ha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/**
@@ -1770,14 +2033,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
**/
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
- uint32_t dev_state;
-
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ uint32_t dev_state, halt_status;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
@@ -1785,7 +2047,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
"NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
}
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -1795,7 +2056,78 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
} else {
/* Check firmware health */
- qla4_8xxx_check_fw_alive(ha);
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ halt_status = qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status & HALT_STATUS_UNRECOVERABLE)
+ set_bit(DPC_HA_UNRECOVERABLE,
+ &ha->dpc_flags);
+ else {
+ ql4_printk(KERN_INFO, ha, "%s: detect "
+ "abort needed!\n", __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_wake_dpc(ha);
+ }
+ }
+ }
+}
+
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
}
}
}
@@ -1809,6 +2141,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2412,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2089,6 +2428,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
uint8_t reset_chip = 0;
+ uint32_t dev_state;
+ unsigned long wait;
/* Stall incoming I/O until we are done */
scsi_block_requests(ha->host);
@@ -2139,8 +2480,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
* or if stop_firmware fails for ISP-82xx.
* This is the default case for ISP-4xxx */
if (!is_qla8022(ha) || reset_chip) {
+ if (!is_qla8022(ha))
+ goto chip_reset;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only */
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
+ goto chip_reset;
+
+ wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+ while (time_before(jiffies, wait)) {
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ qla4xxx_mailbox_premature_completion(ha);
+ break;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+chip_reset:
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2163,7 +2525,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2176,6 +2538,25 @@ recover_ha_init_adapter:
* Since we don't want to block the DPC for too long
* with multiple resets in the same thread,
* utilize DPC to retry */
+ if (is_qla8022(ha)) {
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_INFO, ha, "%s: don't retry "
+ "recover adapter. H/W is in Failed "
+ "state\n", __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ status = QLA_ERROR;
+
+ goto exit_recover;
+ }
+ }
+
if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2214,6 +2595,7 @@ recover_ha_init_adapter:
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
}
+exit_recover:
ha->adapter_error_count++;
if (test_bit(AF_ONLINE, &ha->flags))
@@ -2245,17 +2627,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2829,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2847,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2380,6 +2863,7 @@ dpc_post_reset_ha:
**/
static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
/* Turn-off interrupts on the card. */
@@ -2613,7 +3097,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2647,7 +3131,7 @@ static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2734,7 +3218,7 @@ static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
}
-static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
{
int rc;
@@ -2867,6 +3351,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3521,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3556,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3606,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,9 +3672,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
}
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+ /* Free up the normaltargets list */
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ list_del_init(&nt_ddb_idx->list);
+ vfree(nt_ddb_idx);
+ }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id;
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32, tmo = 0;
+ uint32_t initial_cmdsn = 0;
+ struct list_head list_st, list_nt; /* List of sendtargets */
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ int fw_idx_size;
+ unsigned long wtime;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_ddb_list;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+ list) {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ st_ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+ }
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n",
+ idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, &list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha,
+ fw_ddb_entry) == QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+ ha->host, cmds_max,
+ sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess)
+ goto exit_ddb_list;
+
+ /*
+ * iscsi_session_setup increments the driver reference
+ * count which wouldn't let the driver to be unloaded.
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess,
+ sizeof(struct qla_conn),
+ conn_id);
+ if (!cls_conn)
+ goto exit_ddb_list;
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "Unable to get ep\n"));
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+ cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+ }
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+exit_ddb_list:
+ qla4xxx_free_nt_list(&list_nt);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -3298,7 +4294,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4315,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4382,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4449,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4497,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -3840,6 +4874,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
}
/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+ if (shost->shost_state == SHOST_RECOVERY)
+ return 1;
+ return 0;
+}
+
+/**
* qla4xxx_eh_host_reset - kernel callback
* @cmd: Pointer to Linux's SCSI command structure
*
@@ -3856,6 +4904,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
+
+ /* Clear outstanding srb in queues */
+ if (qla4xxx_is_eh_active(cmd->device->host))
+ qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
return FAILED;
}
@@ -4115,7 +5168,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5204,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f53..26a3fa34a33 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dc6131e6a1b..5f84a148eb1 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1812,7 +1812,7 @@ int scsi_error_handler(void *data)
* what we need to do to get it up and online again (if we can).
* If we fail, we end up taking the thing offline.
*/
- if (scsi_autopm_get_host(shost) != 0) {
+ if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
SCSI_LOG_ERROR_RECOVERY(1,
printk(KERN_ERR "Error handler scsi_eh_%d "
"unable to autoresume\n",
@@ -1833,7 +1833,8 @@ int scsi_error_handler(void *data)
* which are still online.
*/
scsi_restart_operations(shost);
- scsi_autopm_put_host(shost);
+ if (!shost->eh_noresume)
+ scsi_autopm_put_host(shost);
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d329f8b12e2..bf8bf79e6a1 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -49,8 +49,22 @@ static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * sd is the only high-level SCSI driver to implement runtime
+ * PM, and sd treats runtime suspend, system suspend, and
+ * system hibernate identically (but not system freeze).
+ */
+ if (pm_runtime_suspended(dev)) {
+ if (msg.event == PM_EVENT_SUSPEND ||
+ msg.event == PM_EVENT_HIBERNATE)
+ return 0; /* already suspended */
+
+ /* wake up device so that FREEZE will succeed */
+ pm_runtime_resume(dev);
+ }
err = scsi_dev_type_suspend(dev, msg);
+ }
return err;
}
@@ -58,8 +72,17 @@ static int scsi_bus_resume_common(struct device *dev)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * Parent device may have runtime suspended as soon as
+ * it is woken up during the system resume.
+ *
+ * Resume it on behalf of child.
+ */
+ pm_runtime_get_sync(dev->parent);
err = scsi_dev_type_resume(dev);
+ pm_runtime_put_sync(dev->parent);
+ }
if (err == 0) {
pm_runtime_disable(dev);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 2a588955423..68eadd1c67f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
- SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 96029e6d027..cfd49143723 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -328,7 +328,7 @@ iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
-static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
return NULL;
session->transport = transport;
+ session->creator = -1;
session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
- uint16_t cmds_max, uint16_t queue_depth)
+ struct iscsi_uevent *ev, pid_t pid,
+ uint32_t initial_cmdsn, uint16_t cmds_max,
+ uint16_t queue_depth)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
if (!session)
return -ENOMEM;
+ session->creator = pid;
shost = iscsi_session_to_shost(session);
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -2199,7 +2204,7 @@ static struct attribute *iscsi_conn_attrs[] = {
NULL,
};
-static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+ NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2367,10 +2381,11 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_targetalias.attr,
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
+ &dev_attr_priv_sess_creator.attr,
NULL,
};
-static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2424,6 +2439,8 @@ static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_creator.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
@@ -2468,7 +2485,7 @@ static struct attribute *iscsi_host_attrs[] = {
NULL,
};
-static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5fbeadd9681..a2715c31e75 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1434,7 +1434,7 @@ static int spi_host_configure(struct transport_container *tc,
(si->f->show_##name ? S_IRUGO : 0) | \
(si->f->set_##name ? S_IWUSR : 0)
-static mode_t target_attribute_is_visible(struct kobject *kobj,
+static umode_t target_attribute_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 6803b1e26ec..92d24d6dcb3 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -16,7 +16,6 @@
#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <asm/unaligned.h>
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fa3a5918009..7b3f8075e2a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -50,6 +50,7 @@
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
@@ -2741,6 +2742,9 @@ static void sd_shutdown(struct device *dev)
if (!sdkp)
return; /* this can happen */
+ if (pm_runtime_suspended(dev))
+ goto exit;
+
if (sdkp->WCE) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp);
@@ -2751,6 +2755,7 @@ static void sd_shutdown(struct device *dev)
sd_start_stop_device(sdkp, 0);
}
+exit:
scsi_disk_put(sdkp);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 441a1c5b897..02d99982a74 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2325,16 +2325,15 @@ static struct sg_proc_leaf sg_proc_leaf_arr[] = {
static int
sg_proc_init(void)
{
- int k, mask;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
- struct sg_proc_leaf * leaf;
+ int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
- leaf = &sg_proc_leaf_arr[k];
- mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+ struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 9acc2b2a360..cf51432f8e7 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -51,7 +51,7 @@
#include "53c700.h"
-MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_AUTHOR("Thomas BogendĂśrfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:snirm_53c710");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index a18996d2446..7264116185d 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1144,7 +1144,7 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
*
* These are statically allocated. Trying to be clever was not worth it.
*
- * Dynamic allocation can fail, and we can't go deeep into the memory
+ * Dynamic allocation can fail, and we can't go deep into the memory
* allocator, since we're a SCSI driver, and trying too hard to allocate
* memory might generate disk I/O. We also don't want to fail disk I/O
* in that case because we can't get an allocation - the I/O could be
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8b7a141ff35..e53e449b4ec 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,7 +25,7 @@
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+ d->skip_suspend = desc->skip_syscore_suspend;
+
nr_intc_controllers++;
return 0;
@@ -386,6 +388,9 @@ static int intc_suspend(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
@@ -409,6 +414,9 @@ static void intc_resume(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
@@ -434,46 +442,47 @@ struct syscore_ops intc_syscore_ops = {
.resume = intc_resume,
};
-struct sysdev_class intc_sysdev_class = {
+struct bus_type intc_subsys = {
.name = "intc",
+ .dev_name = "intc",
};
static ssize_t
-show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+show_intc_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct intc_desc_int *d;
- d = container_of(dev, struct intc_desc_int, sysdev);
+ d = container_of(dev, struct intc_desc_int, dev);
return sprintf(buf, "%s\n", d->chip.name);
}
-static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL);
-static int __init register_intc_sysdevs(void)
+static int __init register_intc_devs(void)
{
struct intc_desc_int *d;
int error;
register_syscore_ops(&intc_syscore_ops);
- error = sysdev_class_register(&intc_sysdev_class);
+ error = subsys_system_register(&intc_subsys, NULL);
if (!error) {
list_for_each_entry(d, &intc_list, list) {
- d->sysdev.id = d->index;
- d->sysdev.cls = &intc_sysdev_class;
- error = sysdev_register(&d->sysdev);
+ d->dev.id = d->index;
+ d->dev.bus = &intc_subsys;
+ error = device_register(&d->dev);
if (error == 0)
- error = sysdev_create_file(&d->sysdev,
- &attr_name);
+ error = device_create_file(&d->dev,
+ &dev_attr_name);
if (error)
break;
}
}
if (error)
- pr_err("sysdev registration error\n");
+ pr_err("device registration error\n");
return error;
}
-device_initcall(register_intc_sysdevs);
+device_initcall(register_intc_devs);
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 5b934851efa..b0e9155ff73 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/radix-tree.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -51,7 +51,7 @@ struct intc_subgroup_entry {
struct intc_desc_int {
struct list_head list;
- struct sys_device sysdev;
+ struct device dev;
struct radix_tree_root tree;
raw_spinlock_t lock;
unsigned int index;
@@ -67,6 +67,7 @@ struct intc_desc_int {
struct intc_window *window;
unsigned int nr_windows;
struct irq_chip chip;
+ bool skip_suspend;
};
@@ -157,7 +158,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
extern unsigned int nr_intc_controllers;
-extern struct sysdev_class intc_sysdev_class;
+extern struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 56bf9336b92..e649ceaaa41 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "intc: " fmt
#include <linux/errno.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
@@ -20,15 +20,15 @@
static void __iomem *uimask;
static ssize_t
-show_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr, char *buf)
+show_intc_userimask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
}
static ssize_t
-store_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr,
+store_intc_userimask(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long level;
@@ -55,8 +55,8 @@ store_intc_userimask(struct sysdev_class *cls,
return count;
}
-static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
- show_intc_userimask, store_intc_userimask);
+static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
static int __init userimask_sysdev_init(void)
@@ -64,7 +64,7 @@ static int __init userimask_sysdev_init(void)
if (unlikely(!uimask))
return -ENXIO;
- return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
+ return device_create_file(intc_subsys.dev_root, &dev_attr_userimask);
}
late_initcall(userimask_sysdev_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df541..56abf55e49d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -174,8 +174,7 @@ config SPI_LM70_LLP
config SPI_MPC52xx
tristate "Freescale MPC52xx SPI (non-PSC) controller support"
- depends on PPC_MPC52xx && SPI
- select SPI_MASTER_OF
+ depends on PPC_MPC52xx
help
This drivers supports the MPC52xx SPI controller in master SPI
mode.
@@ -199,7 +198,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +207,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -346,14 +345,14 @@ config SPI_TI_SSP
serial port.
config SPI_TOPCLIFF_PCH
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
- This driver also supports the ML7213, a companion chip for the
- Atom E6xx series and compatible with the Intel EG20T PCH.
+ This driver also supports the ML7213/ML7223/ML7831, a companion chip
+ for the Atom E6xx series and compatible with the Intel EG20T PCH.
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5c..acc88b4d286 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41b..0094c645ff0 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c..182e9c87382 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 322be7aea8b..0b0dfb71c64 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -121,6 +121,7 @@ struct omap2_mcspi {
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
+ struct workqueue_struct *wq;
};
struct omap2_mcspi_cs {
@@ -143,8 +144,6 @@ struct omap2_mcspi_regs {
static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
-static struct workqueue_struct *omap2_mcspi_wq;
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -1043,7 +1042,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
spin_lock_irqsave(&mcspi->lock, flags);
list_add_tail(&m->queue, &mcspi->msg_queue);
- queue_work(omap2_mcspi_wq, &mcspi->work);
+ queue_work(mcspi->wq, &mcspi->work);
spin_unlock_irqrestore(&mcspi->lock, flags);
return 0;
@@ -1088,6 +1087,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
+ char wq_name[20];
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1111,10 +1111,17 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
+ sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
+ mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
+ if (mcspi->wq == NULL) {
+ status = -ENOMEM;
+ goto free_master;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto err1;
+ goto free_master;
}
r->start += pdata->regs_offset;
@@ -1123,14 +1130,14 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto err1;
+ goto free_master;
}
mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err2;
+ goto release_region;
}
mcspi->dev = &pdev->dev;
@@ -1145,7 +1152,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err2;
+ goto unmap_io;
for (i = 0; i < master->num_chipselect; i++) {
char dma_ch_name[14];
@@ -1175,25 +1182,33 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
+ if (status < 0)
+ goto dma_chnl_free;
+
pm_runtime_enable(&pdev->dev);
if (status || omap2_mcspi_master_setup(mcspi) < 0)
- goto err3;
+ goto disable_pm;
status = spi_register_master(master);
if (status < 0)
- goto err4;
+ goto err_spi_register;
return status;
-err4:
+err_spi_register:
spi_master_put(master);
-err3:
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+dma_chnl_free:
kfree(mcspi->dma_channels);
-err2:
- release_mem_region(r->start, resource_size(r));
+unmap_io:
iounmap(mcspi->base);
-err1:
+release_region:
+ release_mem_region(r->start, resource_size(r));
+free_master:
+ kfree(master);
+ platform_set_drvdata(pdev, NULL);
return status;
}
@@ -1210,6 +1225,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
dma_channels = mcspi->dma_channels;
omap2_mcspi_disable_clocks(mcspi);
+ pm_runtime_disable(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
@@ -1217,6 +1233,8 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
spi_unregister_master(master);
iounmap(base);
kfree(dma_channels);
+ destroy_workqueue(mcspi->wq);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -1275,10 +1293,6 @@ static struct platform_driver omap2_mcspi_driver = {
static int __init omap2_mcspi_init(void)
{
- omap2_mcspi_wq = create_singlethread_workqueue(
- omap2_mcspi_driver.driver.name);
- if (omap2_mcspi_wq == NULL)
- return -1;
return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
}
subsys_initcall(omap2_mcspi_init);
@@ -1287,7 +1301,6 @@ static void __exit omap2_mcspi_exit(void)
{
platform_driver_unregister(&omap2_mcspi_driver);
- destroy_workqueue(omap2_mcspi_wq);
}
module_exit(omap2_mcspi_exit);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5559b229919..f1f5efbc340 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -340,6 +340,10 @@ struct vendor_data {
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
@@ -373,6 +377,7 @@ struct pl022 {
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
+ bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
@@ -445,23 +450,9 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
- void (*curr_cs_control) (u32 command);
+ pl022->next_msg_cs_active = false;
- /*
- * This local reference to the chip select function
- * is needed because we set curr_chip to NULL
- * as a step toward termininating the message.
- */
- curr_cs_control = pl022->cur_chip->cs_control;
- spin_lock_irqsave(&pl022->queue_lock, flags);
- msg = pl022->cur_msg;
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
- queue_work(pl022->workqueue, &pl022->pump_messages);
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- last_transfer = list_entry(msg->transfers.prev,
+ last_transfer = list_entry(pl022->cur_msg->transfers.prev,
struct spi_transfer,
transfer_list);
@@ -473,18 +464,13 @@ static void giveback(struct pl022 *pl022)
*/
udelay(last_transfer->delay_usecs);
- /*
- * Drop chip select UNLESS cs_change is true or we are returning
- * a message with an error, or next message is for another chip
- */
- if (!last_transfer->cs_change)
- curr_cs_control(SSP_CHIP_DESELECT);
- else {
+ if (!last_transfer->cs_change) {
struct spi_message *next_msg;
- /* Holding of cs was hinted, but we need to make sure
- * the next message is for the same chip. Don't waste
- * time with the following tests unless this was hinted.
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
*
* We cannot postpone this until pump_messages, because
* after calling msg->complete (below) the driver that
@@ -501,19 +487,29 @@ static void giveback(struct pl022 *pl022)
struct spi_message, queue);
spin_unlock_irqrestore(&pl022->queue_lock, flags);
- /* see if the next and current messages point
- * to the same chip
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
*/
- if (next_msg && next_msg->spi != msg->spi)
+ if (next_msg && next_msg->spi != pl022->cur_msg->spi)
next_msg = NULL;
- if (!next_msg || msg->state == STATE_ERROR)
- curr_cs_control(SSP_CHIP_DESELECT);
+ if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+ pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ else
+ pl022->next_msg_cs_active = true;
}
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ msg = pl022->cur_msg;
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clocks & power */
- pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -1244,9 +1240,9 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
flag = 1;
- /* Disable Transmit interrupt */
- writew(readw(SSP_IMSC(pl022->virtbase)) &
- (~SSP_IMSC_MASK_TXIM),
+ /* Disable Transmit interrupt, enable receive interrupt */
+ writew((readw(SSP_IMSC(pl022->virtbase)) &
+ ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
SSP_IMSC(pl022->virtbase));
}
@@ -1352,7 +1348,7 @@ static void pump_transfers(unsigned long data)
*/
udelay(previous->delay_usecs);
- /* Drop chip select only if cs_change is requested */
+ /* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
} else {
@@ -1379,15 +1375,22 @@ static void pump_transfers(unsigned long data)
}
err_config_dma:
- writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ /* enable all interrupts except RX */
+ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
- u32 irqflags = ENABLE_ALL_INTERRUPTS;
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM;
+
+ /* Enable target chip, if not already active */
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
- /* Enable target chip */
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
@@ -1442,7 +1445,8 @@ static void do_polling_transfer(struct pl022 *pl022)
} else {
/* STATE_START */
message->state = STATE_RUNNING;
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
}
/* Configuration Changing Per Transfer */
@@ -1504,14 +1508,28 @@ static void pump_messages(struct work_struct *work)
struct pl022 *pl022 =
container_of(work, struct pl022, pump_messages);
unsigned long flags;
+ bool was_busy = false;
/* Lock queue and check for queue work */
spin_lock_irqsave(&pl022->queue_lock, flags);
if (list_empty(&pl022->queue) || !pl022->running) {
+ if (pl022->busy) {
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ if (pl022->master_info->autosuspend_delay > 0) {
+ pm_runtime_mark_last_busy(&pl022->adev->dev);
+ pm_runtime_put_autosuspend(&pl022->adev->dev);
+ } else {
+ pm_runtime_put(&pl022->adev->dev);
+ }
+ }
pl022->busy = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
return;
}
+
/* Make sure we are not already running a message */
if (pl022->cur_msg) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1522,7 +1540,10 @@ static void pump_messages(struct work_struct *work)
list_entry(pl022->queue.next, struct spi_message, queue);
list_del_init(&pl022->cur_msg->queue);
- pl022->busy = true;
+ if (pl022->busy)
+ was_busy = true;
+ else
+ pl022->busy = true;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
/* Initial message state */
@@ -1532,12 +1553,14 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
- /*
- * We enable the core voltage and clocks here, then the clocks
- * and core will be disabled when giveback() is called in each method
- * (poll/interrupt/DMA)
- */
- pm_runtime_get_sync(&pl022->adev->dev);
+ if (!was_busy)
+ /*
+ * We enable the core voltage and clocks here, then the clocks
+ * and core will be disabled when this workqueue is run again
+ * and there is no more work to be done.
+ */
+ pm_runtime_get_sync(&pl022->adev->dev);
+
restore_state(pl022);
flush(pl022);
@@ -1582,6 +1605,7 @@ static int start_queue(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
+ pl022->next_msg_cs_active = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
queue_work(pl022->workqueue, &pl022->pump_messages);
@@ -1881,7 +1905,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq = {0, };
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
@@ -2231,7 +2255,17 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
dev_dbg(dev, "probe succeeded\n");
/* let runtime pm put suspend */
- pm_runtime_put(dev);
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&adev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
return 0;
err_spi_register:
@@ -2305,11 +2339,6 @@ static int pl022_suspend(struct device *dev)
return status;
}
- amba_vcore_enable(pl022->adev);
- amba_pclk_enable(pl022->adev);
- load_ssp_default_config(pl022);
- amba_pclk_disable(pl022->adev);
- amba_vcore_disable(pl022->adev);
dev_dbg(dev, "suspended\n");
return 0;
}
@@ -2432,6 +2461,8 @@ static struct amba_id pl022_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
static struct amba_driver pl022_driver = {
.drv = {
.name = "ssp-pl022",
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 019a7163572..dcf7e100642 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -971,6 +971,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
struct s3c64xx_spi_info *sci;
struct spi_master *master;
int ret;
+ char clk_name[16];
if (pdev->id < 0) {
dev_err(&pdev->dev,
@@ -984,11 +985,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
}
sci = pdev->dev.platform_data;
- if (!sci->src_clk_name) {
- dev_err(&pdev->dev,
- "Board init must call s3c64xx_spi_set_info()\n");
- return -EINVAL;
- }
/* Check for availability of necessary resource */
@@ -1073,17 +1069,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err4;
}
- sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
+ sdd->src_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
- "Unable to acquire clock '%s'\n", sci->src_clk_name);
+ "Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
goto err5;
}
if (clk_enable(sdd->src_clk)) {
- dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
- sci->src_clk_name);
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err6;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 6a80749391d..7086583b910 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1,7 +1,7 @@
/*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,16 +95,18 @@
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+#define PCI_DEVICE_ID_ML7831_SPI 0x8816
/*
* Set the number of SPI instance max
* Intel EG20T PCH : 1ch
- * OKI SEMICONDUCTOR ML7213 IOH : 2ch
- * OKI SEMICONDUCTOR ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7213 IOH : 2ch
+ * LAPIS Semiconductor ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_SPI_MAX_DEV 2
@@ -218,6 +220,7 @@ static struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
{ }
};
@@ -1753,4 +1756,4 @@ MODULE_PARM_DESC(use_dma,
"to use DMA for data transfers pass 1 else 0; default 1");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77eae99af11..b2ccdea30cb 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -319,7 +319,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
spi->master = master;
- spi->dev.parent = dev;
+ spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
device_initialize(&spi->dev);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a54..520e8286db2 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 34c3bab90b9..973223f5de8 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
sizeof(out->antenna_gain.ghz5));
+ /* Extract FEM info */
+ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 25cdff36a78..21e2f4b87f1 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -72,8 +72,6 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
-source "drivers/staging/spectra/Kconfig"
-
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
@@ -116,8 +114,6 @@ source "drivers/staging/bcm/Kconfig"
source "drivers/staging/ft1000/Kconfig"
-source "drivers/staging/intel_sst/Kconfig"
-
source "drivers/staging/speakup/Kconfig"
source "drivers/staging/cptm1217/Kconfig"
@@ -132,4 +128,8 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
+source "drivers/staging/omapdrm/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a25f3f26c7f..7c5808d7212 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_RTS5139) += rts5139/
-obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
@@ -50,10 +49,11 @@ obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
-obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_DRM_PSB) += gma500/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 00000000000..becf711117e
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,110 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ default N
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ default n
+
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
+config ANDROID_LOGGER
+ tristate "Android log driver"
+ default n
+
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ bool "Enable verbose console messages on Android RAM console"
+ default y
+ depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ bool "Android RAM Console Enable error correction"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+ int "Android RAM Console Data data size"
+ default 128
+ help
+ Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+ int "Android RAM Console ECC size"
+ default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+ int "Android RAM Console Symbol size"
+ default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+ hex "Android RAM Console Polynomial"
+ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+ bool "Start Android RAM console early"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+ hex "Android RAM console virtual address"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+ hex "Android RAM console buffer size"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_OUTPUT
+ bool "Timed output class driver"
+ default y
+
+config ANDROID_TIMED_GPIO
+ tristate "Android timed gpio driver"
+ depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ default n
+
+config ANDROID_LOW_MEMORY_KILLER
+ bool "Android Low Memory Killer"
+ default N
+ ---help---
+ Register processes to be killed when memory is low
+
+config ANDROID_PMEM
+ bool "Android pmem allocator"
+ depends on ARM
+
+source "drivers/staging/android/switch/Kconfig"
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 00000000000..eaed1ff64f0
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ASHMEM) += ashmem.o
+obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
+obj-$(CONFIG_ANDROID_PMEM) += pmem.o
+obj-$(CONFIG_ANDROID_SWITCH) += switch/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
new file mode 100644
index 00000000000..e59c5be4be2
--- /dev/null
+++ b/drivers/staging/android/TODO
@@ -0,0 +1,10 @@
+TODO:
+ - checkpatch.pl cleanups
+ - sparse fixes
+ - rename files to be not so "generic"
+ - make sure things build as modules properly
+ - add proper arch dependancies as needed
+ - audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/android_pmem.h b/drivers/staging/android/android_pmem.h
new file mode 100644
index 00000000000..f633621f5be
--- /dev/null
+++ b/drivers/staging/android/android_pmem.h
@@ -0,0 +1,93 @@
+/* include/linux/android_pmem.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ANDROID_PMEM_H_
+#define _ANDROID_PMEM_H_
+
+#define PMEM_IOCTL_MAGIC 'p'
+#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
+#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
+#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
+#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
+/* This ioctl will allocate pmem space, backing the file, it will fail
+ * if the file already has an allocation, pass it the len as the argument
+ * to the ioctl */
+#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
+/* This will connect a one pmem file to another, pass the file that is already
+ * backed in memory as the argument to the ioctl
+ */
+#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
+/* Returns the total size of the pmem region it is sent to as a pmem_region
+ * struct (with offset set to 0).
+ */
+#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
+#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
+
+struct android_pmem_platform_data
+{
+ const char* name;
+ /* starting physical address of memory region */
+ unsigned long start;
+ /* size of memory region */
+ unsigned long size;
+ /* set to indicate the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* set to indicate maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ /* The MSM7k has bits to enable a write buffer in the bus controller*/
+ unsigned buffered;
+};
+
+struct pmem_region {
+ unsigned long offset;
+ unsigned long len;
+};
+
+#ifdef CONFIG_ANDROID_PMEM
+int is_pmem_file(struct file *file);
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *end, struct file **filp);
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end);
+void put_pmem_file(struct file* file);
+void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *));
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation);
+
+#else
+static inline int is_pmem_file(struct file *file) { return 0; }
+static inline int get_pmem_file(int fd, unsigned long *start,
+ unsigned long *vstart, unsigned long *end,
+ struct file **filp) { return -ENOSYS; }
+static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *end) { return -ENOSYS; }
+static inline void put_pmem_file(struct file* file) { return; }
+static inline void flush_pmem_file(struct file *file, unsigned long start,
+ unsigned long len) { return; }
+static inline int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
+
+static inline int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation) { return -ENOSYS; }
+#endif
+
+#endif //_ANDROID_PPP_H_
+
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
new file mode 100644
index 00000000000..99052bfd3a2
--- /dev/null
+++ b/drivers/staging/android/ashmem.c
@@ -0,0 +1,752 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include "ashmem.h"
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+ char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
+ struct list_head unpinned_list; /* list of all ashmem areas */
+ struct file *file; /* the shmem-based backing file */
+ size_t size; /* size of the mapping, in bytes */
+ unsigned long prot_mask; /* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+ struct list_head lru; /* entry in LRU list */
+ struct list_head unpinned; /* entry in its area's unpinned list */
+ struct ashmem_area *asma; /* associated area */
+ size_t pgstart; /* starting page, inclusive */
+ size_t pgend; /* ending page, inclusive */
+ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+ ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+ ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+ (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+ (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+ (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+ (page_in_range(range, start) || page_in_range(range, end) || \
+ page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+ ((range)->pgend < (page))
+
+#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+ list_add_tail(&range->lru, &ashmem_lru_list);
+ lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+ list_del(&range->lru);
+ lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end)
+{
+ struct ashmem_range *range;
+
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (unlikely(!range))
+ return -ENOMEM;
+
+ range->asma = asma;
+ range->pgstart = start;
+ range->pgend = end;
+ range->purged = purged;
+
+ list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+ if (range_on_lru(range))
+ lru_add(range);
+
+ return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+ list_del(&range->unpinned);
+ if (range_on_lru(range))
+ lru_del(range);
+ kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ size_t pre = range_size(range);
+
+ range->pgstart = start;
+ range->pgend = end;
+
+ if (range_on_lru(range))
+ lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+ struct ashmem_area *asma;
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (unlikely(ret))
+ return ret;
+
+ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+ if (unlikely(!asma))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&asma->unpinned_list);
+ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+ asma->prot_mask = PROT_MASK;
+ file->private_data = asma;
+
+ return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+ struct ashmem_range *range, *next;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+ range_del(range);
+ mutex_unlock(&ashmem_mutex);
+
+ if (asma->file)
+ fput(asma->file);
+ kmem_cache_free(ashmem_area_cachep, asma);
+
+ return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* If size is not set, or set to 0, always return EOF. */
+ if (asma->size == 0)
+ goto out;
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret < 0)
+ goto out;
+
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->size == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->llseek(asma->file, offset, origin);
+ if (ret < 0)
+ goto out;
+
+ /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ file->f_pos = asma->file->f_pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static inline unsigned long calc_vm_may_flags(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* user needs to SET_SIZE before mapping */
+ if (unlikely(!asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested protection bits must match our allowed protection mask */
+ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+ calc_vm_prot_bits(PROT_MASK))) {
+ ret = -EPERM;
+ goto out;
+ }
+ vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+
+ /* ... and allocate the backing shmem file */
+ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+ if (unlikely(IS_ERR(vmfile))) {
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
+
+ /*
+ * XXX - Reworked to use shmem_zero_setup() instead of
+ * shmem_set_file while we're in staging. -jstultz
+ */
+ if (vma->vm_flags & VM_SHARED) {
+ ret = shmem_zero_setup(vma);
+ if (ret) {
+ fput(asma->file);
+ goto out;
+ }
+ }
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct ashmem_range *range, *next;
+
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ return -1;
+ if (!sc->nr_to_scan)
+ return lru_count;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ struct inode *inode = range->asma->file->f_dentry->d_inode;
+ loff_t start = range->pgstart * PAGE_SIZE;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+ vmtruncate_range(inode, start, end);
+ range->purged = ASHMEM_WAS_PURGED;
+ lru_del(range);
+
+ sc->nr_to_scan -= range_size(range);
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+ .shrink = ashmem_shrink,
+ .seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* the user can only remove, not add, protection bits */
+ if (unlikely((asma->prot_mask & prot) != prot)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* does the application expect PROT_READ to imply PROT_EXEC? */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ asma->prot_mask = prot;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* cannot change an existing mapping's name */
+ if (unlikely(asma->file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+ name, ASHMEM_NAME_LEN)))
+ ret = -EFAULT;
+ asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+ size_t len;
+
+ /*
+ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+ * prevents us from revealing one user's stack to another.
+ */
+ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+ if (unlikely(copy_to_user(name,
+ asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+ ret = -EFAULT;
+ } else {
+ if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+ sizeof(ASHMEM_NAME_DEF))))
+ ret = -EFAULT;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ int ret = ASHMEM_NOT_PURGED;
+
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* moved past last applicable page; we can short circuit */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to pin pages that span multiple ranges,
+ * or to pin pages that aren't even unpinned, so this is messy.
+ *
+ * Four cases:
+ * 1. The requested range subsumes an existing range, so we
+ * just remove the entire matching range.
+ * 2. The requested range overlaps the start of an existing
+ * range, so we just update that range.
+ * 3. The requested range overlaps the end of an existing
+ * range, so we just update that range.
+ * 4. The requested range punches a hole in an existing range,
+ * so we have to update one side of the range and then
+ * create a new range for the other side.
+ */
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret |= range->purged;
+
+ /* Case #1: Easy. Just nuke the whole thing. */
+ if (page_range_subsumes_range(range, pgstart, pgend)) {
+ range_del(range);
+ continue;
+ }
+
+ /* Case #2: We overlap from the start, so adjust it */
+ if (range->pgstart >= pgstart) {
+ range_shrink(range, pgend + 1, range->pgend);
+ continue;
+ }
+
+ /* Case #3: We overlap from the rear, so adjust it */
+ if (range->pgend <= pgend) {
+ range_shrink(range, range->pgstart, pgstart-1);
+ continue;
+ }
+
+ /*
+ * Case #4: We eat a chunk out of the middle. A bit
+ * more complicated, we allocate a new range for the
+ * second half and adjust the first chunk's endpoint.
+ */
+ range_alloc(asma, range, range->purged,
+ pgend + 1, range->pgend);
+ range_shrink(range, range->pgstart, pgstart - 1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* short circuit: this is our insertion point */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to unpin pages that are already entirely
+ * or partially pinned. We handle those two cases here.
+ */
+ if (page_range_subsumed_by_range(range, pgstart, pgend))
+ return 0;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ pgstart = min_t(size_t, range->pgstart, pgstart),
+ pgend = max_t(size_t, range->pgend, pgend);
+ purged |= range->purged;
+ range_del(range);
+ goto restart;
+ }
+ }
+
+ return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+ size_t pgend)
+{
+ struct ashmem_range *range;
+ int ret = ASHMEM_IS_PINNED;
+
+ list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+ if (range_before_page(range, pgstart))
+ break;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret = ASHMEM_IS_UNPINNED;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ void __user *p)
+{
+ struct ashmem_pin pin;
+ size_t pgstart, pgend;
+ int ret = -EINVAL;
+
+ if (unlikely(!asma->file))
+ return -EINVAL;
+
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ return -EFAULT;
+
+ /* per custom, you can pass zero for len to mean "everything onward" */
+ if (!pin.len)
+ pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+ if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (unlikely(((__u32) -1) - pin.offset < pin.len))
+ return -EINVAL;
+
+ if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ return -EINVAL;
+
+ pgstart = pin.offset / PAGE_SIZE;
+ pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+ mutex_lock(&ashmem_mutex);
+
+ switch (cmd) {
+ case ASHMEM_PIN:
+ ret = ashmem_pin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_UNPIN:
+ ret = ashmem_unpin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_get_pin_status(asma, pgstart, pgend);
+ break;
+ }
+
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ashmem_area *asma = file->private_data;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ASHMEM_SET_NAME:
+ ret = set_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ if (!asma->file) {
+ ret = 0;
+ asma->size = (size_t) arg;
+ }
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = asma->size;
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(asma, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = asma->prot_mask;
+ break;
+ case ASHMEM_PIN:
+ case ASHMEM_UNPIN:
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = -EPERM;
+ if (capable(CAP_SYS_ADMIN)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = 0,
+ };
+ ret = ashmem_shrink(&ashmem_shrinker, &sc);
+ sc.nr_to_scan = ret;
+ ashmem_shrink(&ashmem_shrinker, &sc);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static struct file_operations ashmem_fops = {
+ .owner = THIS_MODULE,
+ .open = ashmem_open,
+ .release = ashmem_release,
+ .read = ashmem_read,
+ .llseek = ashmem_llseek,
+ .mmap = ashmem_mmap,
+ .unlocked_ioctl = ashmem_ioctl,
+ .compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ashmem",
+ .fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+ int ret;
+
+ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+ sizeof(struct ashmem_area),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_area_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+ sizeof(struct ashmem_range),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_range_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ret = misc_register(&ashmem_misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "ashmem: failed to register misc device!\n");
+ return ret;
+ }
+
+ register_shrinker(&ashmem_shrinker);
+
+ printk(KERN_INFO "ashmem: initialized\n");
+
+ return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+ int ret;
+
+ unregister_shrinker(&ashmem_shrinker);
+
+ ret = misc_deregister(&ashmem_misc);
+ if (unlikely(ret))
+ printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+ kmem_cache_destroy(ashmem_range_cachep);
+ kmem_cache_destroy(ashmem_area_cachep);
+
+ printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
new file mode 100644
index 00000000000..1976b10ef93
--- /dev/null
+++ b/drivers/staging/android/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644
index 00000000000..7491801a661
--- /dev/null
+++ b/drivers/staging/android/binder.c
@@ -0,0 +1,3600 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = binder_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+ do { \
+ if (binder_debug_mask & mask) \
+ printk(KERN_INFO x); \
+ } while (0)
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ printk(KERN_INFO x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum binder_stat_types {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+ binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+ binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ void __user *ptr;
+ void __user *cookie;
+ unsigned has_strong_ref:1;
+ unsigned pending_strong_ref:1;
+ unsigned has_weak_ref:1;
+ unsigned pending_weak_ref:1;
+ unsigned has_async_transaction:1;
+ unsigned accept_fds:1;
+ unsigned min_priority:8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ void __user *cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by addesss */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned debug_id:29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+enum binder_deferred_state {
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct files_struct *files;
+ struct hlist_node deferred_work_node;
+ int deferred_work;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+ struct dentry *debugfs_entry;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read buf */
+ uint32_t return_error2; /* Write failed, return error code in read */
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply:1;
+ /* unsigned is_dead:1; */ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ uid_t sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+ struct files_struct *files = proc->files;
+ int fd, error;
+ struct fdtable *fdt;
+ unsigned long rlim_cur;
+ unsigned long irqs;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ error = -EMFILE;
+ spin_lock(&files->file_lock);
+
+repeat:
+ fdt = files_fdtable(files);
+ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+ files->next_fd);
+
+ /*
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
+ rlim_cur = 0;
+ if (lock_task_sighand(proc->tsk, &irqs)) {
+ rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+ unlock_task_sighand(proc->tsk, &irqs);
+ }
+ if (fd >= rlim_cur)
+ goto out;
+
+ /* Do we need to expand the fd array or fd set? */
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
+
+ if (error) {
+ /*
+ * If we needed to expand the fs array we
+ * might have blocked - try again.
+ */
+ error = -EMFILE;
+ goto repeat;
+ }
+
+ FD_SET(fd, fdt->open_fds);
+ if (flags & O_CLOEXEC)
+ FD_SET(fd, fdt->close_on_exec);
+ else
+ FD_CLR(fd, fdt->close_on_exec);
+ files->next_fd = fd + 1;
+#if 1
+ /* Sanity check */
+ if (fdt->fd[fd] != NULL) {
+ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+ fdt->fd[fd] = NULL;
+ }
+#endif
+ error = fd;
+
+out:
+ spin_unlock(&files->file_lock);
+ return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+
+ if (files == NULL)
+ return;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ BUG_ON(fdt->fd[fd] != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+ struct fdtable *fdt = files_fdtable(files);
+ __FD_CLR(fd, fdt->open_fds);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+ struct file *filp;
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+ int retval;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ filp = fdt->fd[fd];
+ if (!filp)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ retval = filp_close(filp, files);
+
+ /* can't restart close syscall because file table entry was cleared */
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ return retval;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "binder: %d: nice value %ld not allowed use "
+ "%ld instead\n", current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice < 20)
+ return;
+ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ else
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: add free buffer, size %zd, "
+ "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+ void __user *user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: %s pages %p-%p\n", proc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+ "map pages in userspace, no vma\n", proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ struct page **page_array_ptr;
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (*page == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "for page at %p\n", proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ page_array_ptr = page;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+ size_t data_size,
+ size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("binder: %d: got transaction with invalid "
+ "size %zd-%zd\n", proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd"
+ "failed, no async space left\n", proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+ "no address space\n", proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got buff"
+ "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got "
+ "%p\n", proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_alloc_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p "
+ "share page with %p\n", proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer"
+ " %p share page with %p\n", proc->pid,
+ buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p do "
+ "not share page%s%s with with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_free_buf %p size %zd buffer"
+ "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_free_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ void __user *ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ void __user *ptr,
+ void __user *cookie)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_NODE);
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->cookie = cookie;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p created\n",
+ proc->pid, current->pid, node->debug_id,
+ node->ptr, node->cookie);
+ return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ printk(KERN_ERR "binder: invalid inc strong "
+ "node for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && target_list) {
+ list_del_init(&node->work.entry);
+ list_add_tail(&node->work.entry, target_list);
+ }
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ printk(KERN_ERR "binder: invalid inc weak node "
+ "for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ list_del_init(&node->work.entry);
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: refless node %d deleted\n",
+ node->debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: dead node %d deleted\n",
+ node->debug_id);
+ }
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_REF);
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "node %d\n", proc->pid, new_ref->debug_id,
+ new_ref->desc, node->debug_id);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "dead node\n", proc->pid, new_ref->debug_id,
+ new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d delete ref %d desc %d for "
+ "node %d\n", ref->proc->pid, ref->debug_id,
+ ref->desc, ref->node->debug_id);
+
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d delete ref %d desc %d "
+ "has death notification\n", ref->proc->pid,
+ ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ }
+ kfree(ref);
+ binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
+{
+ int ret;
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("binder: %d invalid dec strong, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("binder: %d invalid dec weak, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(ref);
+ return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ if (target_thread) {
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+ }
+ t->need_reply = 0;
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+ uint32_t error_code)
+{
+ struct binder_thread *target_thread;
+ BUG_ON(t->flags & TF_ONE_WAY);
+ while (1) {
+ target_thread = t->from;
+ if (target_thread) {
+ if (target_thread->return_error != BR_OK &&
+ target_thread->return_error2 == BR_OK) {
+ target_thread->return_error2 =
+ target_thread->return_error;
+ target_thread->return_error = BR_OK;
+ }
+ if (target_thread->return_error == BR_OK) {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply for "
+ "transaction %d to %d:%d\n",
+ t->debug_id, target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction(target_thread, t);
+ target_thread->return_error = error_code;
+ wake_up_interruptible(&target_thread->wait);
+ } else {
+ printk(KERN_ERR "binder: reply failed, target "
+ "thread, %d:%d, has error code %d "
+ "already\n", target_thread->proc->pid,
+ target_thread->pid,
+ target_thread->return_error);
+ }
+ return;
+ } else {
+ struct binder_transaction *next = t->from_parent;
+
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply "
+ "for transaction %d, target dead\n",
+ t->debug_id);
+
+ binder_pop_transaction(target_thread, t);
+ if (next == NULL) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed,"
+ " no target thread at root\n");
+ return;
+ }
+ t = next;
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed, no target "
+ "thread -- retry %d\n", t->debug_id);
+ }
+ }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ size_t *failed_at)
+{
+ size_t *offp, *off_end;
+ int debug_id = buffer->debug_id;
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+ proc->pid, buffer->debug_id,
+ buffer->data_size, buffer->offsets_size, failed_at);
+
+ if (buffer->target_node)
+ binder_dec_node(buffer->target_node, 1, 0);
+
+ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ if (failed_at)
+ off_end = failed_at;
+ else
+ off_end = (void *)offp + buffer->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > buffer->data_size - sizeof(*fp) ||
+ buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ printk(KERN_ERR "binder: transaction release %d bad"
+ "offset %zd, size %zd\n", debug_id,
+ *offp, buffer->data_size);
+ continue;
+ }
+ fp = (struct flat_binder_object *)(buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad node %p\n", debug_id, fp->binder);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p\n",
+ node->debug_id, node->ptr);
+ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad handle %ld\n", debug_id,
+ fp->handle);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, ref->node->debug_id);
+ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ } break;
+
+ case BINDER_TYPE_FD:
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld\n", fp->handle);
+ if (failed_at)
+ task_close_fd(proc, fp->handle);
+ break;
+
+ default:
+ printk(KERN_ERR "binder: transaction release %d bad "
+ "object type %lx\n", debug_id, fp->type);
+ break;
+ }
+ }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_transaction_data *tr, int reply)
+{
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ in_reply_to = thread->transaction_stack;
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ if (tmp->to_thread != thread) {
+ binder_user_error("binder: %d:%d got new "
+ "transaction with bad transaction stack"
+ ", transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, tmp->debug_id,
+ tmp->to_proc ? tmp->to_proc->pid : 0,
+ tmp->to_thread ?
+ tmp->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_call_stack;
+ }
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_t_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (reply)
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->cred->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offsets size, %zd\n",
+ proc->pid, thread->pid, tr->offsets_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ t->buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %zd\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_new_node_failed;
+ }
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("binder: %d:%d sending u%p "
+ "node %d, cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ fp->binder, node->debug_id,
+ fp->cookie, node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id,
+ ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+ }
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ void __user *buffer, int size, signed long *consumed)
+{
+ uint32_t cmd;
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ while (ptr < end && thread->return_error == BR_OK) {
+ if (get_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ binder_stats.bc[_IOC_NR(cmd)]++;
+ proc->stats.bc[_IOC_NR(cmd)]++;
+ thread->stats.bc[_IOC_NR(cmd)]++;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ case BC_ACQUIRE:
+ case BC_RELEASE:
+ case BC_DECREFS: {
+ uint32_t target;
+ struct binder_ref *ref;
+ const char *debug_string;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (target == 0 && binder_context_mgr_node &&
+ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+ ref = binder_get_ref_for_node(proc,
+ binder_context_mgr_node);
+ if (ref->desc != target) {
+ binder_user_error("binder: %d:"
+ "%d tried to acquire "
+ "reference to desc 0, "
+ "got %d instead\n",
+ proc->pid, thread->pid,
+ ref->desc);
+ }
+ } else
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d refcou"
+ "nt change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+ break;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ debug_string = "IncRefs";
+ binder_inc_ref(ref, 0, NULL);
+ break;
+ case BC_ACQUIRE:
+ debug_string = "Acquire";
+ binder_inc_ref(ref, 1, NULL);
+ break;
+ case BC_RELEASE:
+ debug_string = "Release";
+ binder_dec_ref(ref, 1);
+ break;
+ case BC_DECREFS:
+ default:
+ debug_string = "DecRefs";
+ binder_dec_ref(ref, 0);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid, debug_string, ref->debug_id,
+ ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ break;
+ }
+ case BC_INCREFS_DONE:
+ case BC_ACQUIRE_DONE: {
+ void __user *node_ptr;
+ void *cookie;
+ struct binder_node *node;
+
+ if (get_user(node_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (get_user(cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ node = binder_get_node(proc, node_ptr);
+ if (node == NULL) {
+ binder_user_error("binder: %d:%d "
+ "%s u%p no match\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" :
+ "BC_ACQUIRE_DONE",
+ node_ptr);
+ break;
+ }
+ if (cookie != node->cookie) {
+ binder_user_error("binder: %d:%d %s u%p node %d"
+ " cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node_ptr, node->debug_id,
+ cookie, node->cookie);
+ break;
+ }
+ if (cmd == BC_ACQUIRE_DONE) {
+ if (node->pending_strong_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_ACQUIRE_DONE node %d has "
+ "no pending acquire request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_strong_ref = 0;
+ } else {
+ if (node->pending_weak_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_INCREFS_DONE node %d has "
+ "no pending increfs request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_weak_ref = 0;
+ }
+ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s node %d ls %d lw %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ break;
+ }
+ case BC_ATTEMPT_ACQUIRE:
+ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+ return -EINVAL;
+ case BC_ACQUIRE_RESULT:
+ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+ return -EINVAL;
+
+ case BC_FREE_BUFFER: {
+ void __user *data_ptr;
+ struct binder_buffer *buffer;
+
+ if (get_user(data_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ buffer = binder_buffer_lookup(proc, data_ptr);
+ if (buffer == NULL) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p no match\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (!buffer->allow_user_free) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p matched "
+ "unreturned buffer\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_FREE_BUFFER,
+ "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ BUG_ON(!buffer->target_node->has_async_transaction);
+ if (list_empty(&buffer->target_node->async_todo))
+ buffer->target_node->has_async_transaction = 0;
+ else
+ list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ }
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_free_buf(proc, buffer);
+ break;
+ }
+
+ case BC_TRANSACTION:
+ case BC_REPLY: {
+ struct binder_transaction_data tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ break;
+ }
+
+ case BC_REGISTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "after BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ } else if (proc->requested_threads == 0) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "without request\n",
+ proc->pid, thread->pid);
+ } else {
+ proc->requested_threads--;
+ proc->requested_threads_started++;
+ }
+ thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ break;
+ case BC_ENTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_ENTER_LOOPER called after "
+ "BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ }
+ thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+ break;
+ case BC_EXIT_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_EXIT_LOOPER\n",
+ proc->pid, thread->pid);
+ thread->looper |= BINDER_LOOPER_STATE_EXITED;
+ break;
+
+ case BC_REQUEST_DEATH_NOTIFICATION:
+ case BC_CLEAR_DEATH_NOTIFICATION: {
+ uint32_t target;
+ void __user *cookie;
+ struct binder_ref *ref;
+ struct binder_ref_death *death;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d %s "
+ "invalid ref %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ target);
+ break;
+ }
+
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ cookie, ref->debug_id, ref->desc,
+ ref->strong, ref->weak, ref->node->debug_id);
+
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ if (ref->death) {
+ binder_user_error("binder: %d:%"
+ "d BC_REQUEST_DEATH_NOTI"
+ "FICATION death notific"
+ "ation already set\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ thread->return_error = BR_ERROR;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d "
+ "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ binder_stats_created(BINDER_STAT_DEATH);
+ INIT_LIST_HEAD(&death->work.entry);
+ death->cookie = cookie;
+ ref->death = death;
+ if (ref->node->proc == NULL) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&ref->death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&ref->death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } else {
+ if (ref->death == NULL) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion not active\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = ref->death;
+ if (death->cookie != cookie) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion cookie mismatch "
+ "%p != %p\n",
+ proc->pid, thread->pid,
+ death->cookie, cookie);
+ break;
+ }
+ ref->death = NULL;
+ if (list_empty(&death->work.entry)) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ } else {
+ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+ }
+ }
+ } break;
+ case BC_DEAD_BINDER_DONE: {
+ struct binder_work *w;
+ void __user *cookie;
+ struct binder_ref_death *death = NULL;
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(void *);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ if (tmp_death->cookie == cookie) {
+ death = tmp_death;
+ break;
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+ proc->pid, thread->pid, cookie, death);
+ if (death == NULL) {
+ binder_user_error("binder: %d:%d BC_DEAD"
+ "_BINDER_DONE %p not found\n",
+ proc->pid, thread->pid, cookie);
+ break;
+ }
+
+ list_del_init(&death->work.entry);
+ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } break;
+
+ default:
+ printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+ proc->pid, thread->pid, cmd);
+ return -EINVAL;
+ }
+ *consumed = ptr - buffer;
+ }
+ return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+ uint32_t cmd)
+{
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+ binder_stats.br[_IOC_NR(cmd)]++;
+ proc->stats.br[_IOC_NR(cmd)]++;
+ thread->stats.br[_IOC_NR(cmd)]++;
+ }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ return !list_empty(&proc->todo) ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+ return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user *buffer, int size,
+ signed long *consumed, int non_block)
+{
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ int ret = 0;
+ int wait_for_proc_work;
+
+ if (*consumed == 0) {
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ }
+
+retry:
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo);
+
+ if (thread->return_error != BR_OK && ptr < end) {
+ if (thread->return_error2 != BR_OK) {
+ if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (ptr == end)
+ goto done;
+ thread->return_error2 = BR_OK;
+ }
+ if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ thread->return_error = BR_OK;
+ goto done;
+ }
+
+
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ if (wait_for_proc_work)
+ proc->ready_threads++;
+ mutex_unlock(&binder_lock);
+ if (wait_for_proc_work) {
+ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))) {
+ binder_user_error("binder: %d:%d ERROR: Thread waiting "
+ "for process work before calling BC_REGISTER_"
+ "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+ proc->pid, thread->pid, thread->looper);
+ wait_event_interruptible(binder_user_error_wait,
+ binder_stop_on_user_error < 2);
+ }
+ binder_set_nice(proc->default_priority);
+ if (non_block) {
+ if (!binder_has_proc_work(proc, thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ } else {
+ if (non_block) {
+ if (!binder_has_thread_work(thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ }
+ mutex_lock(&binder_lock);
+ if (wait_for_proc_work)
+ proc->ready_threads--;
+ thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+ if (ret)
+ return ret;
+
+ while (1) {
+ uint32_t cmd;
+ struct binder_transaction_data tr;
+ struct binder_work *w;
+ struct binder_transaction *t = NULL;
+
+ if (!list_empty(&thread->todo))
+ w = list_first_entry(&thread->todo, struct binder_work, entry);
+ else if (!list_empty(&proc->todo) && wait_for_proc_work)
+ w = list_first_entry(&proc->todo, struct binder_work, entry);
+ else {
+ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+ goto retry;
+ break;
+ }
+
+ if (end - ptr < sizeof(tr) + 4)
+ break;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ t = container_of(w, struct binder_transaction, work);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ cmd = BR_TRANSACTION_COMPLETE;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+ "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+
+ list_del(&w->entry);
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node, work);
+ uint32_t cmd = BR_NOOP;
+ const char *cmd_name;
+ int strong = node->internal_strong_refs || node->local_strong_refs;
+ int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ if (weak && !node->has_weak_ref) {
+ cmd = BR_INCREFS;
+ cmd_name = "BR_INCREFS";
+ node->has_weak_ref = 1;
+ node->pending_weak_ref = 1;
+ node->local_weak_refs++;
+ } else if (strong && !node->has_strong_ref) {
+ cmd = BR_ACQUIRE;
+ cmd_name = "BR_ACQUIRE";
+ node->has_strong_ref = 1;
+ node->pending_strong_ref = 1;
+ node->local_strong_refs++;
+ } else if (!strong && node->has_strong_ref) {
+ cmd = BR_RELEASE;
+ cmd_name = "BR_RELEASE";
+ node->has_strong_ref = 0;
+ } else if (!weak && node->has_weak_ref) {
+ cmd = BR_DECREFS;
+ cmd_name = "BR_DECREFS";
+ node->has_weak_ref = 0;
+ }
+ if (cmd != BR_NOOP) {
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(node->ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (put_user(node->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s %d u%p c%p\n",
+ proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ } else {
+ list_del_init(&w->entry);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p deleted\n",
+ proc->pid, thread->pid, node->debug_id,
+ node->ptr, node->cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p state unchanged\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr,
+ node->cookie);
+ }
+ }
+ } break;
+ case BINDER_WORK_DEAD_BINDER:
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death;
+ uint32_t cmd;
+
+ death = container_of(w, struct binder_ref_death, work);
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+ else
+ cmd = BR_DEAD_BINDER;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(death->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p\n",
+ proc->pid, thread->pid,
+ cmd == BR_DEAD_BINDER ?
+ "BR_DEAD_BINDER" :
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ death->cookie);
+
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+ list_del(&w->entry);
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } else
+ list_move(&w->entry, &proc->delivered_death);
+ if (cmd == BR_DEAD_BINDER)
+ goto done; /* DEAD_BINDER notifications can cause transactions */
+ } break;
+ }
+
+ if (!t)
+ continue;
+
+ BUG_ON(t->buffer == NULL);
+ if (t->buffer->target_node) {
+ struct binder_node *target_node = t->buffer->target_node;
+ tr.target.ptr = target_node->ptr;
+ tr.cookie = target_node->cookie;
+ t->saved_priority = task_nice(current);
+ if (t->priority < target_node->min_priority &&
+ !(t->flags & TF_ONE_WAY))
+ binder_set_nice(t->priority);
+ else if (!(t->flags & TF_ONE_WAY) ||
+ t->saved_priority > target_node->min_priority)
+ binder_set_nice(target_node->min_priority);
+ cmd = BR_TRANSACTION;
+ } else {
+ tr.target.ptr = NULL;
+ tr.cookie = NULL;
+ cmd = BR_REPLY;
+ }
+ tr.code = t->code;
+ tr.flags = t->flags;
+ tr.sender_euid = t->sender_euid;
+
+ if (t->from) {
+ struct task_struct *sender = t->from->proc->tsk;
+ tr.sender_pid = task_tgid_nr_ns(sender,
+ current->nsproxy->pid_ns);
+ } else {
+ tr.sender_pid = 0;
+ }
+
+ tr.data_size = t->buffer->data_size;
+ tr.offsets_size = t->buffer->offsets_size;
+ tr.data.ptr.buffer = (void *)t->buffer->data +
+ proc->user_buffer_offset;
+ tr.data.ptr.offsets = tr.data.ptr.buffer +
+ ALIGN(t->buffer->data_size,
+ sizeof(void *));
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &tr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d %s %d %d:%d, cmd %d"
+ "size %zd-%zd ptr %p-%p\n",
+ proc->pid, thread->pid,
+ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+ "BR_REPLY",
+ t->debug_id, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0, cmd,
+ t->buffer->data_size, t->buffer->offsets_size,
+ tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+ list_del(&t->work.entry);
+ t->buffer->allow_user_free = 1;
+ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ t->to_parent = thread->transaction_stack;
+ t->to_thread = thread;
+ thread->transaction_stack = t;
+ } else {
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+ break;
+ }
+
+done:
+
+ *consumed = ptr - buffer;
+ if (proc->requested_threads + proc->ready_threads == 0 &&
+ proc->requested_threads_started < proc->max_threads &&
+ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+ /*spawn a new thread if we leave this out */) {
+ proc->requested_threads++;
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BR_SPAWN_LOOPER\n",
+ proc->pid, thread->pid);
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+ struct binder_work *w;
+ while (!list_empty(list)) {
+ w = list_first_entry(list, struct binder_work, entry);
+ list_del_init(&w->entry);
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t;
+
+ t = container_of(w, struct binder_transaction, work);
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ default:
+ break;
+ }
+ }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &proc->threads.rb_node;
+
+ while (*p) {
+ parent = *p;
+ thread = rb_entry(parent, struct binder_thread, rb_node);
+
+ if (current->pid < thread->pid)
+ p = &(*p)->rb_left;
+ else if (current->pid > thread->pid)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+ if (*p == NULL) {
+ thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (thread == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->return_error = BR_OK;
+ thread->return_error2 = BR_OK;
+ }
+ return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct binder_transaction *t;
+ struct binder_transaction *send_reply = NULL;
+ int active_transactions = 0;
+
+ rb_erase(&thread->rb_node, &proc->threads);
+ t = thread->transaction_stack;
+ if (t && t->to_thread == thread)
+ send_reply = t;
+ while (t) {
+ active_transactions++;
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: release %d:%d transaction %d "
+ "%s, still active\n", proc->pid, thread->pid,
+ t->debug_id,
+ (t->to_thread == thread) ? "in" : "out");
+
+ if (t->to_thread == thread) {
+ t->to_proc = NULL;
+ t->to_thread = NULL;
+ if (t->buffer) {
+ t->buffer->transaction = NULL;
+ t->buffer = NULL;
+ }
+ t = t->to_parent;
+ } else if (t->from == thread) {
+ t->from = NULL;
+ t = t->from_parent;
+ } else
+ BUG();
+ }
+ if (send_reply)
+ binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+ binder_release_work(&thread->todo);
+ kfree(thread);
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread = NULL;
+ int wait_for_proc_work;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo) && thread->return_error == BR_OK;
+ mutex_unlock(&binder_lock);
+
+ if (wait_for_proc_work) {
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ poll_wait(filp, &proc->wait, wait);
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ } else {
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ poll_wait(filp, &thread->wait, wait);
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ }
+ return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread;
+ unsigned int size = _IOC_SIZE(cmd);
+ void __user *ubuf = (void __user *)arg;
+
+ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret)
+ return ret;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+ if (thread == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ switch (cmd) {
+ case BINDER_WRITE_READ: {
+ struct binder_write_read bwr;
+ if (size != sizeof(struct binder_write_read)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+ bwr.read_size, bwr.read_buffer);
+
+ if (bwr.write_size > 0) {
+ ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ if (ret < 0) {
+ bwr.read_consumed = 0;
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (bwr.read_size > 0) {
+ ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ if (!list_empty(&proc->todo))
+ wake_up_interruptible(&proc->wait);
+ if (ret < 0) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+ bwr.read_consumed, bwr.read_size);
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_SET_MAX_THREADS:
+ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case BINDER_SET_CONTEXT_MGR:
+ if (binder_context_mgr_node != NULL) {
+ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ if (binder_context_mgr_uid != -1) {
+ if (binder_context_mgr_uid != current->cred->euid) {
+ printk(KERN_ERR "binder: BINDER_SET_"
+ "CONTEXT_MGR bad uid %d != %d\n",
+ current->cred->euid,
+ binder_context_mgr_uid);
+ ret = -EPERM;
+ goto err;
+ }
+ } else
+ binder_context_mgr_uid = current->cred->euid;
+ binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ if (binder_context_mgr_node == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ binder_context_mgr_node->local_weak_refs++;
+ binder_context_mgr_node->local_strong_refs++;
+ binder_context_mgr_node->has_strong_ref = 1;
+ binder_context_mgr_node->has_weak_ref = 1;
+ break;
+ case BINDER_THREAD_EXIT:
+ binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+ proc->pid, thread->pid);
+ binder_free_thread(proc, thread);
+ thread = NULL;
+ break;
+ case BINDER_VERSION:
+ if (size != sizeof(struct binder_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = 0;
+err:
+ if (thread)
+ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+ mutex_unlock(&binder_lock);
+ wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret && ret != -ERESTARTSYS)
+ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+ return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ dump_stack();
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ proc->vma = NULL;
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ struct binder_proc *proc = filp->private_data;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+ vma->vm_end = vma->vm_start + SZ_4M;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+
+ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+ ret = -EPERM;
+ failure_string = "bad vm_flags";
+ goto err_bad_arg;
+ }
+ vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+ if (proc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ proc->buffer = area->addr;
+ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+ if (proc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ proc->buffer_size = vma->vm_end - vma->vm_start;
+
+ vma->vm_ops = &binder_vm_ops;
+ vma->vm_private_data = proc;
+
+ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = proc->buffer;
+ INIT_LIST_HEAD(&proc->buffers);
+ list_add(&buffer->entry, &proc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(proc, buffer);
+ proc->free_async_space = proc->buffer_size / 2;
+ barrier();
+ proc->files = get_files_struct(current);
+ proc->vma = vma;
+
+ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+ proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(proc->pages);
+ proc->pages = NULL;
+err_alloc_pages_failed:
+ vfree(proc->buffer);
+ proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+err_bad_arg:
+ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ current->group_leader->pid, current->pid);
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+ get_task_struct(current);
+ proc->tsk = current;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+ mutex_lock(&binder_lock);
+ binder_stats_created(BINDER_STAT_PROC);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ proc->pid = current->group_leader->pid;
+ INIT_LIST_HEAD(&proc->delivered_death);
+ filp->private_data = proc;
+ mutex_unlock(&binder_lock);
+
+ if (binder_debugfs_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ }
+
+ return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+ struct binder_proc *proc = filp->private_data;
+
+ binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+ return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ int wake_count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+ wake_up_interruptible(&thread->wait);
+ wake_count++;
+ }
+ }
+ wake_up_interruptible_all(&proc->wait);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_flush: %d woke %d threads\n", proc->pid,
+ wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc = filp->private_data;
+ debugfs_remove(proc->debugfs_entry);
+ binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+ return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+ struct hlist_node *pos;
+ struct binder_transaction *t;
+ struct rb_node *n;
+ int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+ BUG_ON(proc->vma);
+ BUG_ON(proc->files);
+
+ hlist_del(&proc->proc_node);
+ if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder_release: %d context_mgr_node gone\n",
+ proc->pid);
+ binder_context_mgr_node = NULL;
+ }
+
+ threads = 0;
+ active_transactions = 0;
+ while ((n = rb_first(&proc->threads))) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ threads++;
+ active_transactions += binder_free_thread(proc, thread);
+ }
+ nodes = 0;
+ incoming_refs = 0;
+ while ((n = rb_first(&proc->nodes))) {
+ struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+ nodes++;
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: node %d now dead, "
+ "refs %d, death %d\n", node->debug_id,
+ incoming_refs, death);
+ }
+ }
+ outgoing_refs = 0;
+ while ((n = rb_first(&proc->refs_by_desc))) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ outgoing_refs++;
+ binder_delete_ref(ref);
+ }
+ binder_release_work(&proc->todo);
+ buffers = 0;
+
+ while ((n = rb_first(&proc->allocated_buffers))) {
+ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+ rb_node);
+ t = buffer->transaction;
+ if (t) {
+ t->buffer = NULL;
+ buffer->transaction = NULL;
+ printk(KERN_ERR "binder: release proc %d, "
+ "transaction %d, not freed\n",
+ proc->pid, t->debug_id);
+ /*BUG();*/
+ }
+ binder_free_buf(proc, buffer);
+ buffers++;
+ }
+
+ binder_stats_deleted(BINDER_STAT_PROC);
+
+ page_count = 0;
+ if (proc->pages) {
+ int i;
+ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ if (proc->pages[i]) {
+ void *page_addr = proc->buffer + i * PAGE_SIZE;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder_release: %d: "
+ "page %d at %p not freed\n",
+ proc->pid, i,
+ page_addr);
+ unmap_kernel_range((unsigned long)page_addr,
+ PAGE_SIZE);
+ __free_page(proc->pages[i]);
+ page_count++;
+ }
+ }
+ kfree(proc->pages);
+ vfree(proc->buffer);
+ }
+
+ put_task_struct(proc->tsk);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_release: %d threads %d, nodes %d (ref %d), "
+ "refs %d, active transactions %d, buffers %d, "
+ "pages %d\n",
+ proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+ active_transactions, buffers, page_count);
+
+ kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+ struct binder_proc *proc;
+ struct files_struct *files;
+
+ int defer;
+ do {
+ mutex_lock(&binder_lock);
+ mutex_lock(&binder_deferred_lock);
+ if (!hlist_empty(&binder_deferred_list)) {
+ proc = hlist_entry(binder_deferred_list.first,
+ struct binder_proc, deferred_work_node);
+ hlist_del_init(&proc->deferred_work_node);
+ defer = proc->deferred_work;
+ proc->deferred_work = 0;
+ } else {
+ proc = NULL;
+ defer = 0;
+ }
+ mutex_unlock(&binder_deferred_lock);
+
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ }
+
+ if (defer & BINDER_DEFERRED_FLUSH)
+ binder_deferred_flush(proc);
+
+ if (defer & BINDER_DEFERRED_RELEASE)
+ binder_deferred_release(proc); /* frees proc */
+
+ mutex_unlock(&binder_lock);
+ if (files)
+ put_files_struct(files);
+ } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+ mutex_lock(&binder_deferred_lock);
+ proc->deferred_work |= defer;
+ if (hlist_unhashed(&proc->deferred_work_node)) {
+ hlist_add_head(&proc->deferred_work_node,
+ &binder_deferred_list);
+ queue_work(binder_deferred_workqueue, &binder_deferred_work);
+ }
+ mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+ struct binder_transaction *t)
+{
+ seq_printf(m,
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ prefix, t->debug_id, t,
+ t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0,
+ t->to_proc ? t->to_proc->pid : 0,
+ t->to_thread ? t->to_thread->pid : 0,
+ t->code, t->flags, t->priority, t->need_reply);
+ if (t->buffer == NULL) {
+ seq_puts(m, " buffer free\n");
+ return;
+ }
+ if (t->buffer->target_node)
+ seq_printf(m, " node %d",
+ t->buffer->target_node->debug_id);
+ seq_printf(m, " size %zd:%zd data %p\n",
+ t->buffer->data_size, t->buffer->offsets_size,
+ t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
+{
+ struct binder_node *node;
+ struct binder_transaction *t;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ t = container_of(w, struct binder_transaction, work);
+ print_binder_transaction(m, transaction_prefix, t);
+ break;
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ seq_printf(m, "%stransaction complete\n", prefix);
+ break;
+ case BINDER_WORK_NODE:
+ node = container_of(w, struct binder_node, work);
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id, node->ptr, node->cookie);
+ break;
+ case BINDER_WORK_DEAD_BINDER:
+ seq_printf(m, "%shas dead binder\n", prefix);
+ break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ seq_printf(m, "%shas cleared dead binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ seq_printf(m, "%shas cleared death notification\n", prefix);
+ break;
+ default:
+ seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+ break;
+ }
+}
+
+static void print_binder_thread(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
+{
+ struct binder_transaction *t;
+ struct binder_work *w;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ header_pos = m->count;
+ t = thread->transaction_stack;
+ while (t) {
+ if (t->from == thread) {
+ print_binder_transaction(m,
+ " outgoing transaction", t);
+ t = t->from_parent;
+ } else if (t->to_thread == thread) {
+ print_binder_transaction(m,
+ " incoming transaction", t);
+ t = t->to_parent;
+ } else {
+ print_binder_transaction(m, " bad transaction", t);
+ t = NULL;
+ }
+ }
+ list_for_each_entry(w, &thread->todo, entry) {
+ print_binder_work(m, " ", " pending transaction", w);
+ }
+ if (!print_always && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+ struct binder_ref *ref;
+ struct hlist_node *pos;
+ struct binder_work *w;
+ int count;
+
+ count = 0;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ count++;
+
+ seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, node->ptr, node->cookie,
+ node->has_strong_ref, node->has_weak_ref,
+ node->local_strong_refs, node->local_weak_refs,
+ node->internal_strong_refs, count);
+ if (count) {
+ seq_puts(m, " proc");
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ seq_printf(m, " %d", ref->proc->pid);
+ }
+ seq_puts(m, "\n");
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work(m, " ",
+ " pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+ struct binder_proc *proc, int print_all)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ header_pos = m->count;
+
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ print_binder_thread(m, rb_entry(n, struct binder_thread,
+ rb_node), print_all);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (print_all || node->has_async_transaction)
+ print_binder_node(m, node);
+ }
+ if (print_all) {
+ for (n = rb_first(&proc->refs_by_desc);
+ n != NULL;
+ n = rb_next(n))
+ print_binder_ref(m, rb_entry(n, struct binder_ref,
+ rb_node_desc));
+ }
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ list_for_each_entry(w, &proc->todo, entry)
+ print_binder_work(m, " ", " pending transaction", w);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ seq_puts(m, " has delivered dead binder\n");
+ break;
+ }
+ if (!print_all && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+ "proc",
+ "thread",
+ "node",
+ "ref",
+ "death",
+ "transaction",
+ "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+ struct binder_stats *stats)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+ ARRAY_SIZE(binder_command_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+ if (stats->bc[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_command_strings[i], stats->bc[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+ ARRAY_SIZE(binder_return_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+ if (stats->br[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_return_strings[i], stats->br[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(binder_objstat_strings));
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(stats->obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ if (stats->obj_created[i] || stats->obj_deleted[i])
+ seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ binder_objstat_strings[i],
+ stats->obj_created[i] - stats->obj_deleted[i],
+ stats->obj_created[i]);
+ }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+ struct binder_proc *proc)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ int count, strong, weak;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " threads: %d\n", count);
+ seq_printf(m, " requested threads: %d+%d/%d\n"
+ " ready threads %d\n"
+ " free async space %zd\n", proc->requested_threads,
+ proc->requested_threads_started, proc->max_threads,
+ proc->ready_threads, proc->free_async_space);
+ count = 0;
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " nodes: %d\n", count);
+ count = 0;
+ strong = 0;
+ weak = 0;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ count++;
+ strong += ref->strong;
+ weak += ref->weak;
+ }
+ seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
+
+ count = 0;
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " buffers: %d\n", count);
+
+ count = 0;
+ list_for_each_entry(w, &proc->todo, entry) {
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ count++;
+ break;
+ default:
+ break;
+ }
+ }
+ seq_printf(m, " pending transactions: %d\n", count);
+
+ print_binder_stats(m, " ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ struct binder_node *node;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder state:\n");
+
+ if (!hlist_empty(&binder_dead_nodes))
+ seq_puts(m, "dead nodes:\n");
+ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ print_binder_node(m, node);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder stats:\n");
+
+ print_binder_stats(m, "", &binder_stats);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc_stats(m, proc);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder transactions:\n");
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 0);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc = m->private;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+ struct binder_transaction_log_entry *e)
+{
+ seq_printf(m,
+ "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ e->debug_id, (e->call_type == 2) ? "reply" :
+ ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+ e->from_thread, e->to_proc, e->to_thread, e->to_node,
+ e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+ struct binder_transaction_log *log = m->private;
+ int i;
+
+ if (log->full) {
+ for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ }
+ for (i = 0; i < log->next; i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ return 0;
+}
+
+static const struct file_operations binder_fops = {
+ .owner = THIS_MODULE,
+ .poll = binder_poll,
+ .unlocked_ioctl = binder_ioctl,
+ .mmap = binder_mmap,
+ .open = binder_open,
+ .flush = binder_flush,
+ .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "binder",
+ .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+ int ret;
+
+ binder_deferred_workqueue = create_singlethread_workqueue("binder");
+ if (!binder_deferred_workqueue)
+ return -ENOMEM;
+
+ binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+ if (binder_debugfs_dir_entry_root)
+ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+ binder_debugfs_dir_entry_root);
+ ret = misc_register(&binder_miscdev);
+ if (binder_debugfs_dir_entry_root) {
+ debugfs_create_file("state",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_state_fops);
+ debugfs_create_file("stats",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_stats_fops);
+ debugfs_create_file("transactions",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_transactions_fops);
+ debugfs_create_file("transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log,
+ &binder_transaction_log_fops);
+ debugfs_create_file("failed_transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log_failed,
+ &binder_transaction_log_fops);
+ }
+ return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644
index 00000000000..25ab6f2759e
--- /dev/null
+++ b/drivers/staging/android/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
+#define BINDER_THREAD_EXIT _IOW('b', 8, int)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incomming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum BinderDriverCommandProtocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 00000000000..ffc2d043dd8
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,616 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char *buffer;/* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+ struct logger_log *log; /* associated log */
+ struct list_head list; /* entry in logger_log's list */
+ size_t r_off; /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n) ((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O operation
+ * 2) Readers need to maintain state (logger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+ __u16 val;
+
+ switch (log->size - off) {
+ case 1:
+ memcpy(&val, log->buffer + off, 1);
+ memcpy(((char *) &val) + 1, log->buffer, 1);
+ break;
+ default:
+ memcpy(&val, log->buffer + off, 2);
+ }
+
+ return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+ struct logger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ size_t len;
+
+ /*
+ * We read from the log in two disjoint operations. First, we read from
+ * the current read head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - reader->r_off);
+ if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = logger_offset(reader->r_off + count);
+
+ return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&log->mutex);
+ ret = (log->w_off == reader->r_off);
+ mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&log->mutex);
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_entry_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = get_entry_len(log, off);
+ off = logger_offset(off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+ if (b < a) {
+ if (a < c || b >= c)
+ return 1;
+ } else {
+ if (a < c && b >= c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = logger_offset(old + len);
+ struct logger_reader *reader;
+
+ if (clock_interval(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (clock_interval(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ return -EFAULT;
+
+ log->w_off = logger_offset(log->w_off + count);
+
+ return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct logger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct logger_entry header;
+ struct timespec now;
+ ssize_t ret = 0;
+
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct logger_entry));
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ INIT_LIST_HEAD(&reader->list);
+
+ mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ list_del(&reader->list);
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_reader *reader;
+ struct logger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ mutex_lock(&log->mutex);
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct logger_log *log = file_get_log(file);
+ struct logger_reader *reader;
+ long ret = -ENOTTY;
+
+ mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off != reader->r_off)
+ ret = get_entry_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case LOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static const struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .aio_write = logger_aio_write,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+ .compat_ioctl = logger_ioctl,
+ .open = logger_open,
+ .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+ .buffer = _buf_ ## VAR, \
+ .misc = { \
+ .minor = MISC_DYNAMIC_MINOR, \
+ .name = NAME, \
+ .fops = &logger_fops, \
+ .parent = NULL, \
+ }, \
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+ .readers = LIST_HEAD_INIT(VAR .readers), \
+ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+ .w_off = 0, \
+ .head = 0, \
+ .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+ if (log_main.misc.minor == minor)
+ return &log_main;
+ if (log_events.misc.minor == minor)
+ return &log_events;
+ if (log_radio.misc.minor == minor)
+ return &log_radio;
+ if (log_system.misc.minor == minor)
+ return &log_system;
+ return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "logger: failed to register misc "
+ "device for log '%s'!\n", log->misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "logger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->misc.name);
+
+ return 0;
+}
+
+static int __init logger_init(void)
+{
+ int ret;
+
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_events);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_radio);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_system);
+ if (unlikely(ret))
+ goto out;
+
+out:
+ return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 00000000000..2cb06e9d8f9
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD \
+ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644
index 00000000000..2d8d2b79610
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -0,0 +1,219 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_adj values will get killed. Specify the
+ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+ * files take a comma separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
+ * processes with a oom_adj value of 8 or higher when the free memory drops
+ * below 4096 pages and kill processes with a oom_adj value of 0 or higher
+ * when the free memory drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+ 0,
+ 1,
+ 6,
+ 12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+ 3 * 512, /* 6MB */
+ 2 * 1024, /* 8MB */
+ 4 * 1024, /* 16MB */
+ 16 * 1024, /* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static struct task_struct *lowmem_deathpending;
+
+#define lowmem_print(level, x...) \
+ do { \
+ if (lowmem_debug_level >= (level)) \
+ printk(x); \
+ } while (0)
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data);
+
+static struct notifier_block task_nb = {
+ .notifier_call = task_notify_func,
+};
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct task_struct *task = data;
+ if (task == lowmem_deathpending) {
+ lowmem_deathpending = NULL;
+ task_handoff_unregister(&task_nb);
+ }
+ return NOTIFY_OK;
+}
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ int rem = 0;
+ int tasksize;
+ int i;
+ int min_adj = OOM_ADJUST_MAX + 1;
+ int selected_tasksize = 0;
+ int selected_oom_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+ int other_free = global_page_state(NR_FREE_PAGES);
+ int other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM);
+
+ /*
+ * If we already have a death outstanding, then
+ * bail out right away; indicating to vmscan
+ * that we have nothing further to offer on
+ * this pass.
+ *
+ * Note: Currently you need CONFIG_PROFILING
+ * for this to work correctly.
+ */
+ if (lowmem_deathpending)
+ return 0;
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+ for (i = 0; i < array_size; i++) {
+ if (other_free < lowmem_minfree[i] &&
+ other_file < lowmem_minfree[i]) {
+ min_adj = lowmem_adj[i];
+ break;
+ }
+ }
+ if (sc->nr_to_scan > 0)
+ lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_adj);
+ rem = global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+ if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ return rem;
+ }
+ selected_oom_adj = min_adj;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ struct mm_struct *mm;
+ struct signal_struct *sig;
+ int oom_adj;
+
+ task_lock(p);
+ mm = p->mm;
+ sig = p->signal;
+ if (!mm || !sig) {
+ task_unlock(p);
+ continue;
+ }
+ oom_adj = sig->oom_adj;
+ if (oom_adj < min_adj) {
+ task_unlock(p);
+ continue;
+ }
+ tasksize = get_mm_rss(mm);
+ task_unlock(p);
+ if (tasksize <= 0)
+ continue;
+ if (selected) {
+ if (oom_adj < selected_oom_adj)
+ continue;
+ if (oom_adj == selected_oom_adj &&
+ tasksize <= selected_tasksize)
+ continue;
+ }
+ selected = p;
+ selected_tasksize = tasksize;
+ selected_oom_adj = oom_adj;
+ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+ p->pid, p->comm, oom_adj, tasksize);
+ }
+ if (selected) {
+ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+ selected->pid, selected->comm,
+ selected_oom_adj, selected_tasksize);
+ /*
+ * If CONFIG_PROFILING is off, then task_handoff_register()
+ * is a nop. In that case we don't want to stall the killer
+ * by setting lowmem_deathpending.
+ */
+#ifdef CONFIG_PROFILING
+ lowmem_deathpending = selected;
+ task_handoff_register(&task_nb);
+#endif
+ force_sig(SIGKILL, selected);
+ rem -= selected_tasksize;
+ }
+ lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ read_unlock(&tasklist_lock);
+ return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+ .shrink = lowmem_shrink,
+ .seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+ register_shrinker(&lowmem_shrinker);
+ return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+ unregister_shrinker(&lowmem_shrinker);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+ S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/pmem.c b/drivers/staging/android/pmem.c
new file mode 100644
index 00000000000..7d97032c650
--- /dev/null
+++ b/drivers/staging/android/pmem.c
@@ -0,0 +1,1345 @@
+/* pmem.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include "android_pmem.h"
+
+#define PMEM_MAX_DEVICES 10
+#define PMEM_MAX_ORDER 128
+#define PMEM_MIN_ALLOC PAGE_SIZE
+
+#define PMEM_DEBUG 1
+
+/* indicates that a refernce to this file has been taken via get_pmem_file,
+ * the file should not be released until put_pmem_file is called */
+#define PMEM_FLAGS_BUSY 0x1
+/* indicates that this is a suballocation of a larger master range */
+#define PMEM_FLAGS_CONNECTED 0x1 << 1
+/* indicates this is a master and not a sub allocation and that it is mmaped */
+#define PMEM_FLAGS_MASTERMAP 0x1 << 2
+/* submap and unsubmap flags indicate:
+ * 00: subregion has never been mmaped
+ * 10: subregion has been mmaped, reference to the mm was taken
+ * 11: subretion has ben released, refernece to the mm still held
+ * 01: subretion has been released, reference to the mm has been released
+ */
+#define PMEM_FLAGS_SUBMAP 0x1 << 3
+#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
+
+
+struct pmem_data {
+ /* in alloc mode: an index into the bitmap
+ * in no_alloc mode: the size of the allocation */
+ int index;
+ /* see flags above for descriptions */
+ unsigned int flags;
+ /* protects this data field, if the mm_mmap sem will be held at the
+ * same time as this sem, the mm sem must be taken first (as this is
+ * the order for vma_open and vma_close ops */
+ struct rw_semaphore sem;
+ /* info about the mmaping process */
+ struct vm_area_struct *vma;
+ /* task struct of the mapping process */
+ struct task_struct *task;
+ /* process id of teh mapping process */
+ pid_t pid;
+ /* file descriptor of the master */
+ int master_fd;
+ /* file struct of the master */
+ struct file *master_file;
+ /* a list of currently available regions if this is a suballocation */
+ struct list_head region_list;
+ /* a linked list of data so we can access them for debugging */
+ struct list_head list;
+#if PMEM_DEBUG
+ int ref;
+#endif
+};
+
+struct pmem_bits {
+ unsigned allocated:1; /* 1 if allocated, 0 if free */
+ unsigned order:7; /* size of the region in pmem space */
+};
+
+struct pmem_region_node {
+ struct pmem_region region;
+ struct list_head list;
+};
+
+#define PMEM_DEBUG_MSGS 0
+#if PMEM_DEBUG_MSGS
+#define DLOG(fmt,args...) \
+ do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+ ##args); } \
+ while (0)
+#else
+#define DLOG(x...) do {} while (0)
+#endif
+
+struct pmem_info {
+ struct miscdevice dev;
+ /* physical start address of the remaped pmem space */
+ unsigned long base;
+ /* vitual start address of the remaped pmem space */
+ unsigned char __iomem *vbase;
+ /* total size of the pmem space */
+ unsigned long size;
+ /* number of entries in the pmem space */
+ unsigned long num_entries;
+ /* pfn of the garbage page in memory */
+ unsigned long garbage_pfn;
+ /* index of the garbage page in the pmem space */
+ int garbage_index;
+ /* the bitmap for the region indicating which entries are allocated
+ * and which are free */
+ struct pmem_bits *bitmap;
+ /* indicates the region should not be managed with an allocator */
+ unsigned no_allocator;
+ /* indicates maps of this region should be cached, if a mix of
+ * cached and uncached is desired, set this and open the device with
+ * O_SYNC to get an uncached region */
+ unsigned cached;
+ unsigned buffered;
+ /* in no_allocator mode the first mapper gets the whole space and sets
+ * this flag */
+ unsigned allocated;
+ /* for debugging, creates a list of pmem file structs, the
+ * data_list_lock should be taken before pmem_data->sem if both are
+ * needed */
+ struct mutex data_list_lock;
+ struct list_head data_list;
+ /* pmem_sem protects the bitmap array
+ * a write lock should be held when modifying entries in bitmap
+ * a read lock should be held when reading data from bits or
+ * dereferencing a pointer into bitmap
+ *
+ * pmem_data->sem protects the pmem data of a particular file
+ * Many of the function that require the pmem_data->sem have a non-
+ * locking version for when the caller is already holding that sem.
+ *
+ * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
+ * down(pmem_data->sem) => down(bitmap_sem)
+ */
+ struct rw_semaphore bitmap_sem;
+
+ long (*ioctl)(struct file *, unsigned int, unsigned long);
+ int (*release)(struct inode *, struct file *);
+};
+
+static struct pmem_info pmem[PMEM_MAX_DEVICES];
+static int id_count;
+
+#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
+#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
+#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
+#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
+#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
+#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
+ PMEM_LEN(id, index))
+#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
+#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+ (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
+
+static int pmem_release(struct inode *, struct file *);
+static int pmem_mmap(struct file *, struct vm_area_struct *);
+static int pmem_open(struct inode *, struct file *);
+static long pmem_ioctl(struct file *, unsigned int, unsigned long);
+
+struct file_operations pmem_fops = {
+ .release = pmem_release,
+ .mmap = pmem_mmap,
+ .open = pmem_open,
+ .unlocked_ioctl = pmem_ioctl,
+};
+
+static int get_id(struct file *file)
+{
+ return MINOR(file->f_dentry->d_inode->i_rdev);
+}
+
+int is_pmem_file(struct file *file)
+{
+ int id;
+
+ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
+ return 0;
+ id = get_id(file);
+ if (unlikely(id >= PMEM_MAX_DEVICES))
+ return 0;
+ if (unlikely(file->f_dentry->d_inode->i_rdev !=
+ MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
+ return 0;
+ return 1;
+}
+
+static int has_allocation(struct file *file)
+{
+ struct pmem_data *data;
+ /* check is_pmem_file first if not accessed via pmem_file_ops */
+
+ if (unlikely(!file->private_data))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (unlikely(data->index < 0))
+ return 0;
+ return 1;
+}
+
+static int is_master_owner(struct file *file)
+{
+ struct file *master_file;
+ struct pmem_data *data;
+ int put_needed, ret = 0;
+
+ if (!is_pmem_file(file) || !has_allocation(file))
+ return 0;
+ data = (struct pmem_data *)file->private_data;
+ if (PMEM_FLAGS_MASTERMAP & data->flags)
+ return 1;
+ master_file = fget_light(data->master_fd, &put_needed);
+ if (master_file && data->master_file == master_file)
+ ret = 1;
+ fput_light(master_file, put_needed);
+ return ret;
+}
+
+static int pmem_free(int id, int index)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ int buddy, curr = index;
+ DLOG("index %d\n", index);
+
+ if (pmem[id].no_allocator) {
+ pmem[id].allocated = 0;
+ return 0;
+ }
+ /* clean up the bitmap, merging any buddies */
+ pmem[id].bitmap[curr].allocated = 0;
+ /* find a slots buddy Buddy# = Slot# ^ (1 << order)
+ * if the buddy is also free merge them
+ * repeat until the buddy is not free or end of the bitmap is reached
+ */
+ do {
+ buddy = PMEM_BUDDY_INDEX(id, curr);
+ if (PMEM_IS_FREE(id, buddy) &&
+ PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
+ PMEM_ORDER(id, buddy)++;
+ PMEM_ORDER(id, curr)++;
+ curr = min(buddy, curr);
+ } else {
+ break;
+ }
+ } while (curr < pmem[id].num_entries);
+
+ return 0;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data);
+
+static int pmem_release(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ int id = get_id(file), ret = 0;
+
+
+ mutex_lock(&pmem[id].data_list_lock);
+ /* if this file is a master, revoke all the memory in the connected
+ * files */
+ if (PMEM_FLAGS_MASTERMAP & data->flags) {
+ struct pmem_data *sub_data;
+ list_for_each(elt, &pmem[id].data_list) {
+ sub_data = list_entry(elt, struct pmem_data, list);
+ down_read(&sub_data->sem);
+ if (PMEM_IS_SUBMAP(sub_data) &&
+ file == sub_data->master_file) {
+ up_read(&sub_data->sem);
+ pmem_revoke(file, sub_data);
+ } else
+ up_read(&sub_data->sem);
+ }
+ }
+ list_del(&data->list);
+ mutex_unlock(&pmem[id].data_list_lock);
+
+
+ down_write(&data->sem);
+
+ /* if its not a conencted file and it has an allocation, free it */
+ if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
+ down_write(&pmem[id].bitmap_sem);
+ ret = pmem_free(id, data->index);
+ up_write(&pmem[id].bitmap_sem);
+ }
+
+ /* if this file is a submap (mapped, connected file), downref the
+ * task struct */
+ if (PMEM_FLAGS_SUBMAP & data->flags)
+ if (data->task) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ }
+
+ file->private_data = NULL;
+
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ list_del(elt);
+ kfree(region_node);
+ }
+ BUG_ON(!list_empty(&data->region_list));
+
+ up_write(&data->sem);
+ kfree(data);
+ if (pmem[id].release)
+ ret = pmem[id].release(inode, file);
+
+ return ret;
+}
+
+static int pmem_open(struct inode *inode, struct file *file)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+ int ret = 0;
+
+ DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
+ /* setup file->private_data to indicate its unmapped */
+ /* you can only open a pmem device one time */
+ if (file->private_data != NULL)
+ return -1;
+ data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
+ if (!data) {
+ printk("pmem: unable to allocate memory for pmem metadata.");
+ return -1;
+ }
+ data->flags = 0;
+ data->index = -1;
+ data->task = NULL;
+ data->vma = NULL;
+ data->pid = 0;
+ data->master_file = NULL;
+#if PMEM_DEBUG
+ data->ref = 0;
+#endif
+ INIT_LIST_HEAD(&data->region_list);
+ init_rwsem(&data->sem);
+
+ file->private_data = data;
+ INIT_LIST_HEAD(&data->list);
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_add(&data->list, &pmem[id].data_list);
+ mutex_unlock(&pmem[id].data_list_lock);
+ return ret;
+}
+
+static unsigned long pmem_order(unsigned long len)
+{
+ int i;
+
+ len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+ len--;
+ for (i = 0; i < sizeof(len)*8; i++)
+ if (len >> i == 0)
+ break;
+ return i;
+}
+
+static int pmem_allocate(int id, unsigned long len)
+{
+ /* caller should hold the write lock on pmem_sem! */
+ /* return the corresponding pdata[] entry */
+ int curr = 0;
+ int end = pmem[id].num_entries;
+ int best_fit = -1;
+ unsigned long order = pmem_order(len);
+
+ if (pmem[id].no_allocator) {
+ DLOG("no allocator");
+ if ((len > pmem[id].size) || pmem[id].allocated)
+ return -1;
+ pmem[id].allocated = 1;
+ return len;
+ }
+
+ if (order > PMEM_MAX_ORDER)
+ return -1;
+ DLOG("order %lx\n", order);
+
+ /* look through the bitmap:
+ * if you find a free slot of the correct order use it
+ * otherwise, use the best fit (smallest with size > order) slot
+ */
+ while (curr < end) {
+ if (PMEM_IS_FREE(id, curr)) {
+ if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+ /* set the not free bit and clear others */
+ best_fit = curr;
+ break;
+ }
+ if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+ (best_fit < 0 ||
+ PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+ best_fit = curr;
+ }
+ curr = PMEM_NEXT_INDEX(id, curr);
+ }
+
+ /* if best_fit < 0, there are no suitable slots,
+ * return an error
+ */
+ if (best_fit < 0) {
+ printk("pmem: no space left to allocate!\n");
+ return -1;
+ }
+
+ /* now partition the best fit:
+ * split the slot into 2 buddies of order - 1
+ * repeat until the slot is of the correct order
+ */
+ while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+ int buddy;
+ PMEM_ORDER(id, best_fit) -= 1;
+ buddy = PMEM_BUDDY_INDEX(id, best_fit);
+ PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+ }
+ pmem[id].bitmap[best_fit].allocated = 1;
+ return best_fit;
+}
+
+static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+{
+ int id = get_id(file);
+#ifdef pgprot_noncached
+ if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
+ return pgprot_noncached(vma_prot);
+#endif
+#ifdef pgprot_ext_buffered
+ else if (pmem[id].buffered)
+ return pgprot_ext_buffered(vma_prot);
+#endif
+ return vma_prot;
+}
+
+static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return PMEM_START_ADDR(id, 0);
+ else
+ return PMEM_START_ADDR(id, data->index);
+
+}
+
+static void *pmem_start_vaddr(int id, struct pmem_data *data)
+{
+ return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+}
+
+static unsigned long pmem_len(int id, struct pmem_data *data)
+{
+ if (pmem[id].no_allocator)
+ return data->index;
+ else
+ return PMEM_LEN(id, data->index);
+}
+
+static int pmem_map_garbage(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int i, garbage_pages = len >> PAGE_SHIFT;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
+ for (i = 0; i < garbage_pages; i++) {
+ if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
+ pmem[id].garbage_pfn))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ int garbage_pages;
+ DLOG("unmap offset %lx len %lx\n", offset, len);
+
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+
+ garbage_pages = len >> PAGE_SHIFT;
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ pmem_map_garbage(id, vma, data, offset, len);
+ return 0;
+}
+
+static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ DLOG("map offset %lx len %lx\n", offset, len);
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+ BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
+
+ if (io_remap_pfn_range(vma, vma->vm_start + offset,
+ (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
+ len, vma->vm_page_prot)) {
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
+ struct pmem_data *data, unsigned long offset,
+ unsigned long len)
+{
+ /* hold the mm semp for the vma you are modifying when you call this */
+ BUG_ON(!vma);
+ zap_page_range(vma, vma->vm_start + offset, len, NULL);
+ return pmem_map_pfn_range(id, vma, data, offset, len);
+}
+
+static void pmem_vma_open(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+ int id = get_id(file);
+ /* this should never be called as we don't support copying pmem
+ * ranges via fork */
+ BUG_ON(!has_allocation(file));
+ down_write(&data->sem);
+ /* remap the garbage pages, forkers don't get access to the data */
+ pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
+ up_write(&data->sem);
+}
+
+static void pmem_vma_close(struct vm_area_struct *vma)
+{
+ struct file *file = vma->vm_file;
+ struct pmem_data *data = file->private_data;
+
+ DLOG("current %u ppid %u file %p count %d\n", current->pid,
+ current->parent->pid, file, file_count(file));
+ if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
+ printk(KERN_WARNING "pmem: something is very wrong, you are "
+ "closing a vm backing an allocation that doesn't "
+ "exist!\n");
+ return;
+ }
+ down_write(&data->sem);
+ if (data->vma == vma) {
+ data->vma = NULL;
+ if ((data->flags & PMEM_FLAGS_CONNECTED) &&
+ (data->flags & PMEM_FLAGS_SUBMAP))
+ data->flags |= PMEM_FLAGS_UNSUBMAP;
+ }
+ /* the kernel is going to free this vma now anyway */
+ up_write(&data->sem);
+}
+
+static struct vm_operations_struct vm_ops = {
+ .open = pmem_vma_open,
+ .close = pmem_vma_close,
+};
+
+static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pmem_data *data;
+ int index;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ int ret = 0, id = get_id(file);
+
+ if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+ " and a multiple of pages_size.\n");
+#endif
+ return -EINVAL;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ down_write(&data->sem);
+ /* check this file isn't already mmaped, for submaps check this file
+ * has never been mmaped */
+ if ((data->flags & PMEM_FLAGS_SUBMAP) ||
+ (data->flags & PMEM_FLAGS_UNSUBMAP)) {
+#if PMEM_DEBUG
+ printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+ "this file is already mmaped. %x\n", data->flags);
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+ /* if file->private_data == unalloced, alloc*/
+ if (data && data->index == -1) {
+ down_write(&pmem[id].bitmap_sem);
+ index = pmem_allocate(id, vma->vm_end - vma->vm_start);
+ up_write(&pmem[id].bitmap_sem);
+ data->index = index;
+ }
+ /* either no space was available or an error occured */
+ if (!has_allocation(file)) {
+ ret = -EINVAL;
+ printk("pmem: could not find allocation for map.\n");
+ goto error;
+ }
+
+ if (pmem_len(id, data) < vma_size) {
+#if PMEM_DEBUG
+ printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
+ "size of backing region [%lu].\n", vma_size,
+ pmem_len(id, data));
+#endif
+ ret = -EINVAL;
+ goto error;
+ }
+
+ vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
+ vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+
+ if (data->flags & PMEM_FLAGS_CONNECTED) {
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
+ printk("pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ DLOG("remapping file: %p %lx %lx\n", file,
+ region_node->region.offset,
+ region_node->region.len);
+ if (pmem_remap_pfn_range(id, vma, data,
+ region_node->region.offset,
+ region_node->region.len)) {
+ ret = -EAGAIN;
+ goto error;
+ }
+ }
+ data->flags |= PMEM_FLAGS_SUBMAP;
+ get_task_struct(current->group_leader);
+ data->task = current->group_leader;
+ data->vma = vma;
+#if PMEM_DEBUG
+ data->pid = current->pid;
+#endif
+ DLOG("submmapped file %p vma %p pid %u\n", file, vma,
+ current->pid);
+ } else {
+ if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
+ printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+ ret = -EAGAIN;
+ goto error;
+ }
+ data->flags |= PMEM_FLAGS_MASTERMAP;
+ data->pid = current->pid;
+ }
+ vma->vm_ops = &vm_ops;
+error:
+ up_write(&data->sem);
+ return ret;
+}
+
+/* the following are the api for accessing pmem regions by other drivers
+ * from inside the kernel */
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+ unsigned long *len)
+{
+ struct pmem_data *data;
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from invalid"
+ "file.\n");
+#endif
+ return -1;
+ }
+ data = (struct pmem_data *)file->private_data;
+ down_read(&data->sem);
+ if (data->vma) {
+ *start = data->vma->vm_start;
+ *len = data->vma->vm_end - data->vma->vm_start;
+ } else {
+ *start = 0;
+ *len = 0;
+ }
+ up_read(&data->sem);
+ return 0;
+}
+
+int get_pmem_addr(struct file *file, unsigned long *start,
+ unsigned long *vstart, unsigned long *len)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return -1;
+ }
+
+ data = (struct pmem_data *)file->private_data;
+ if (data->index == -1) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: requested pmem data from file with no "
+ "allocation.\n");
+ return -1;
+#endif
+ }
+ id = get_id(file);
+
+ down_read(&data->sem);
+ *start = pmem_start_addr(id, data);
+ *len = pmem_len(id, data);
+ *vstart = (unsigned long)pmem_start_vaddr(id, data);
+ up_read(&data->sem);
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ data->ref++;
+ up_write(&data->sem);
+#endif
+ return 0;
+}
+
+int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+ unsigned long *len, struct file **filp)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (unlikely(file == NULL)) {
+ printk(KERN_INFO "pmem: requested data from file descriptor "
+ "that doesn't exist.");
+ return -1;
+ }
+
+ if (get_pmem_addr(file, start, vstart, len))
+ goto end;
+
+ if (filp)
+ *filp = file;
+ return 0;
+end:
+ fput(file);
+ return -1;
+}
+
+void put_pmem_file(struct file *file)
+{
+ struct pmem_data *data;
+ int id;
+
+ if (!is_pmem_file(file))
+ return;
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+#if PMEM_DEBUG
+ down_write(&data->sem);
+ if (data->ref == 0) {
+ printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
+ pmem[id].dev.name, data->pid);
+ BUG();
+ }
+ data->ref--;
+ up_write(&data->sem);
+#endif
+ fput(file);
+}
+
+void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
+{
+ struct pmem_data *data;
+ int id;
+ void *vaddr;
+ struct pmem_region_node *region_node;
+ struct list_head *elt;
+ void *flush_start, *flush_end;
+
+ if (!is_pmem_file(file) || !has_allocation(file)) {
+ return;
+ }
+
+ id = get_id(file);
+ data = (struct pmem_data *)file->private_data;
+ if (!pmem[id].cached || file->f_flags & O_SYNC)
+ return;
+
+ down_read(&data->sem);
+ vaddr = pmem_start_vaddr(id, data);
+ /* if this isn't a submmapped file, flush the whole thing */
+ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
+ dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+ goto end;
+ }
+ /* otherwise, flush the region of the file we are drawing */
+ list_for_each(elt, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node, list);
+ if ((offset >= region_node->region.offset) &&
+ ((offset + len) <= (region_node->region.offset +
+ region_node->region.len))) {
+ flush_start = vaddr + region_node->region.offset;
+ flush_end = flush_start + region_node->region.len;
+ dmac_flush_range(flush_start, flush_end);
+ break;
+ }
+ }
+end:
+ up_read(&data->sem);
+}
+
+static int pmem_connect(unsigned long connect, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *src_data;
+ struct file *src_file;
+ int ret = 0, put_needed;
+
+ down_write(&data->sem);
+ /* retrieve the src file and check it is a pmem file with an alloc */
+ src_file = fget_light(connect, &put_needed);
+ DLOG("connect %p to %p\n", file, src_file);
+ if (!src_file) {
+ printk("pmem: src file not found!\n");
+ ret = -EINVAL;
+ goto err_no_file;
+ }
+ if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
+ printk(KERN_INFO "pmem: src file is not a pmem file or has no "
+ "alloc!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ src_data = (struct pmem_data *)src_file->private_data;
+
+ if (has_allocation(file) && (data->index != src_data->index)) {
+ printk("pmem: file is already mapped but doesn't match this"
+ " src_file!\n");
+ ret = -EINVAL;
+ goto err_bad_file;
+ }
+ data->index = src_data->index;
+ data->flags |= PMEM_FLAGS_CONNECTED;
+ data->master_fd = connect;
+ data->master_file = src_file;
+
+err_bad_file:
+ fput_light(src_file, put_needed);
+err_no_file:
+ up_write(&data->sem);
+ return ret;
+}
+
+static void pmem_unlock_data_and_mm(struct pmem_data *data,
+ struct mm_struct *mm)
+{
+ up_write(&data->sem);
+ if (mm != NULL) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
+ struct mm_struct **locked_mm)
+{
+ int ret = 0;
+ struct mm_struct *mm = NULL;
+ *locked_mm = NULL;
+lock_mm:
+ down_read(&data->sem);
+ if (PMEM_IS_SUBMAP(data)) {
+ mm = get_task_mm(data->task);
+ if (!mm) {
+#if PMEM_DEBUG
+ printk("pmem: can't remap task is gone!\n");
+#endif
+ up_read(&data->sem);
+ return -1;
+ }
+ }
+ up_read(&data->sem);
+
+ if (mm)
+ down_write(&mm->mmap_sem);
+
+ down_write(&data->sem);
+ /* check that the file didn't get mmaped before we could take the
+ * data sem, this should be safe b/c you can only submap each file
+ * once */
+ if (PMEM_IS_SUBMAP(data) && !mm) {
+ pmem_unlock_data_and_mm(data, mm);
+ up_write(&data->sem);
+ goto lock_mm;
+ }
+ /* now check that vma.mm is still there, it could have been
+ * deleted by vma_close before we could get the data->sem */
+ if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
+ /* might as well release this */
+ if (data->flags & PMEM_FLAGS_SUBMAP) {
+ put_task_struct(data->task);
+ data->task = NULL;
+ /* lower the submap flag to show the mm is gone */
+ data->flags &= ~(PMEM_FLAGS_SUBMAP);
+ }
+ pmem_unlock_data_and_mm(data, mm);
+ return -1;
+ }
+ *locked_mm = mm;
+ return ret;
+}
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+ unsigned operation)
+{
+ int ret;
+ struct pmem_region_node *region_node;
+ struct mm_struct *mm = NULL;
+ struct list_head *elt, *elt2;
+ int id = get_id(file);
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+
+ /* pmem region must be aligned on a page boundry */
+ if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
+ !PMEM_IS_PAGE_ALIGNED(region->len))) {
+#if PMEM_DEBUG
+ printk("pmem: request for unaligned pmem suballocation "
+ "%lx %lx\n", region->offset, region->len);
+#endif
+ return -EINVAL;
+ }
+
+ /* if userspace requests a region of len 0, there's nothing to do */
+ if (region->len == 0)
+ return 0;
+
+ /* lock the mm and data */
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ if (ret)
+ return 0;
+
+ /* only the owner of the master file can remap the client fds
+ * that back in it */
+ if (!is_master_owner(file)) {
+#if PMEM_DEBUG
+ printk("pmem: remap requested from non-master process\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* check that the requested range is within the src allocation */
+ if (unlikely((region->offset > pmem_len(id, data)) ||
+ (region->len > pmem_len(id, data)) ||
+ (region->offset + region->len > pmem_len(id, data)))) {
+#if PMEM_DEBUG
+ printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (operation == PMEM_MAP) {
+ region_node = kmalloc(sizeof(struct pmem_region_node),
+ GFP_KERNEL);
+ if (!region_node) {
+ ret = -ENOMEM;
+#if PMEM_DEBUG
+ printk(KERN_INFO "No space to allocate metadata!");
+#endif
+ goto err;
+ }
+ region_node->region = *region;
+ list_add(&region_node->list, &data->region_list);
+ } else if (operation == PMEM_UNMAP) {
+ int found = 0;
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ if (region->len == 0 ||
+ (region_node->region.offset == region->offset &&
+ region_node->region.len == region->len)) {
+ list_del(elt);
+ kfree(region_node);
+ found = 1;
+ }
+ }
+ if (!found) {
+#if PMEM_DEBUG
+ printk("pmem: Unmap region does not map any mapped "
+ "region!");
+#endif
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (data->vma && PMEM_IS_SUBMAP(data)) {
+ if (operation == PMEM_MAP)
+ ret = pmem_remap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ else if (operation == PMEM_UNMAP)
+ ret = pmem_unmap_pfn_range(id, data->vma, data,
+ region->offset, region->len);
+ }
+
+err:
+ pmem_unlock_data_and_mm(data, mm);
+ return ret;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data)
+{
+ struct pmem_region_node *region_node;
+ struct list_head *elt, *elt2;
+ struct mm_struct *mm = NULL;
+ int id = get_id(file);
+ int ret = 0;
+
+ data->master_file = NULL;
+ ret = pmem_lock_data_and_mm(file, data, &mm);
+ /* if lock_data_and_mm fails either the task that mapped the fd, or
+ * the vma that mapped it have already gone away, nothing more
+ * needs to be done */
+ if (ret)
+ return;
+ /* unmap everything */
+ /* delete the regions and region list nothing is mapped any more */
+ if (data->vma)
+ list_for_each_safe(elt, elt2, &data->region_list) {
+ region_node = list_entry(elt, struct pmem_region_node,
+ list);
+ pmem_unmap_pfn_range(id, data->vma, data,
+ region_node->region.offset,
+ region_node->region.len);
+ list_del(elt);
+ kfree(region_node);
+ }
+ /* delete the master file */
+ pmem_unlock_data_and_mm(data, mm);
+}
+
+static void pmem_get_size(struct pmem_region *region, struct file *file)
+{
+ struct pmem_data *data = (struct pmem_data *)file->private_data;
+ int id = get_id(file);
+
+ if (!has_allocation(file)) {
+ region->offset = 0;
+ region->len = 0;
+ return;
+ } else {
+ region->offset = pmem_start_addr(id, data);
+ region->len = pmem_len(id, data);
+ }
+ DLOG("offset %lx len %lx\n", region->offset, region->len);
+}
+
+
+static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pmem_data *data;
+ int id = get_id(file);
+
+ switch (cmd) {
+ case PMEM_GET_PHYS:
+ {
+ struct pmem_region region;
+ DLOG("get_phys\n");
+ if (!has_allocation(file)) {
+ region.offset = 0;
+ region.len = 0;
+ } else {
+ data = (struct pmem_data *)file->private_data;
+ region.offset = pmem_start_addr(id, data);
+ region.len = pmem_len(id, data);
+ }
+ printk(KERN_INFO "pmem: request for physical address of pmem region "
+ "from process %d.\n", current->pid);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_MAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_MAP);
+ }
+ break;
+ case PMEM_UNMAP:
+ {
+ struct pmem_region region;
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ data = (struct pmem_data *)file->private_data;
+ return pmem_remap(&region, file, PMEM_UNMAP);
+ break;
+ }
+ case PMEM_GET_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get_size\n");
+ pmem_get_size(&region, file);
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_GET_TOTAL_SIZE:
+ {
+ struct pmem_region region;
+ DLOG("get total size\n");
+ region.offset = 0;
+ get_id(file);
+ region.len = pmem[id].size;
+ if (copy_to_user((void __user *)arg, &region,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ break;
+ }
+ case PMEM_ALLOCATE:
+ {
+ if (has_allocation(file))
+ return -EINVAL;
+ data = (struct pmem_data *)file->private_data;
+ data->index = pmem_allocate(id, arg);
+ break;
+ }
+ case PMEM_CONNECT:
+ DLOG("connect\n");
+ return pmem_connect(arg, file);
+ break;
+ case PMEM_CACHE_FLUSH:
+ {
+ struct pmem_region region;
+ DLOG("flush\n");
+ if (copy_from_user(&region, (void __user *)arg,
+ sizeof(struct pmem_region)))
+ return -EFAULT;
+ flush_pmem_file(file, region.offset, region.len);
+ break;
+ }
+ default:
+ if (pmem[id].ioctl)
+ return pmem[id].ioctl(file, cmd, arg);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if PMEM_DEBUG
+static ssize_t debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct list_head *elt, *elt2;
+ struct pmem_data *data;
+ struct pmem_region_node *region_node;
+ int id = (int)file->private_data;
+ const int debug_bufmax = 4096;
+ static char buffer[4096];
+ int n = 0;
+
+ DLOG("debug open\n");
+ n = scnprintf(buffer, debug_bufmax,
+ "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+ mutex_lock(&pmem[id].data_list_lock);
+ list_for_each(elt, &pmem[id].data_list) {
+ data = list_entry(elt, struct pmem_data, list);
+ down_read(&data->sem);
+ n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
+ data->pid);
+ list_for_each(elt2, &data->region_list) {
+ region_node = list_entry(elt2, struct pmem_region_node,
+ list);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "(%lx,%lx) ",
+ region_node->region.offset,
+ region_node->region.len);
+ }
+ n += scnprintf(buffer + n, debug_bufmax - n, "\n");
+ up_read(&data->sem);
+ }
+ mutex_unlock(&pmem[id].data_list_lock);
+
+ n++;
+ buffer[n] = 0;
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static struct file_operations debug_fops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+#endif
+
+#if 0
+static struct miscdevice pmem_dev = {
+ .name = "pmem",
+ .fops = &pmem_fops,
+};
+#endif
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+ long (*ioctl)(struct file *, unsigned int, unsigned long),
+ int (*release)(struct inode *, struct file *))
+{
+ int err = 0;
+ int i, index = 0;
+ int id = id_count;
+ id_count++;
+
+ pmem[id].no_allocator = pdata->no_allocator;
+ pmem[id].cached = pdata->cached;
+ pmem[id].buffered = pdata->buffered;
+ pmem[id].base = pdata->start;
+ pmem[id].size = pdata->size;
+ pmem[id].ioctl = ioctl;
+ pmem[id].release = release;
+ init_rwsem(&pmem[id].bitmap_sem);
+ mutex_init(&pmem[id].data_list_lock);
+ INIT_LIST_HEAD(&pmem[id].data_list);
+ pmem[id].dev.name = pdata->name;
+ pmem[id].dev.minor = id;
+ pmem[id].dev.fops = &pmem_fops;
+ printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+
+ err = misc_register(&pmem[id].dev);
+ if (err) {
+ printk(KERN_ALERT "Unable to register pmem driver!\n");
+ goto err_cant_register_device;
+ }
+ pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
+
+ pmem[id].bitmap = kmalloc(pmem[id].num_entries *
+ sizeof(struct pmem_bits), GFP_KERNEL);
+ if (!pmem[id].bitmap)
+ goto err_no_mem_for_metadata;
+
+ memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
+ pmem[id].num_entries);
+
+ for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
+ if ((pmem[id].num_entries) & 1<<i) {
+ PMEM_ORDER(id, index) = i;
+ index = PMEM_NEXT_INDEX(id, index);
+ }
+ }
+
+ if (pmem[id].cached)
+ pmem[id].vbase = ioremap_cached(pmem[id].base,
+ pmem[id].size);
+#ifdef ioremap_ext_buffered
+ else if (pmem[id].buffered)
+ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+ pmem[id].size);
+#endif
+ else
+ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+
+ if (pmem[id].vbase == 0)
+ goto error_cant_remap;
+
+ pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
+ if (pmem[id].no_allocator)
+ pmem[id].allocated = 0;
+
+#if PMEM_DEBUG
+ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
+ &debug_fops);
+#endif
+ return 0;
+error_cant_remap:
+ kfree(pmem[id].bitmap);
+err_no_mem_for_metadata:
+ misc_deregister(&pmem[id].dev);
+err_cant_register_device:
+ return -1;
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+ struct android_pmem_platform_data *pdata;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ printk(KERN_ALERT "Unable to probe pmem!\n");
+ return -1;
+ }
+ pdata = pdev->dev.platform_data;
+ return pmem_setup(pdata, NULL, NULL);
+}
+
+
+static int pmem_remove(struct platform_device *pdev)
+{
+ int id = pdev->id;
+ __free_page(pfn_to_page(pmem[id].garbage_pfn));
+ misc_deregister(&pmem[id].dev);
+ return 0;
+}
+
+static struct platform_driver pmem_driver = {
+ .probe = pmem_probe,
+ .remove = pmem_remove,
+ .driver = { .name = "android_pmem" }
+};
+
+
+static int __init pmem_init(void)
+{
+ return platform_driver_register(&pmem_driver);
+}
+
+static void __exit pmem_exit(void)
+{
+ platform_driver_unregister(&pmem_driver);
+}
+
+module_init(pmem_init);
+module_exit(pmem_exit);
+
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644
index 00000000000..6d4d67924f2
--- /dev/null
+++ b/drivers/staging/android/ram_console.c
@@ -0,0 +1,443 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include "ram_console.h"
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+ uint32_t sig;
+ uint32_t start;
+ uint32_t size;
+ uint8_t data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+ ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+ for (i = 0; i < ECC_SIZE; i++)
+ ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ for (i = 0; i < ECC_SIZE; i++)
+ par[i] = ecc[i];
+ return decode_rs8(ram_console_rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+ struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int size = ECC_BLOCK_SIZE;
+#endif
+ memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+ par = ram_console_par_buffer +
+ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+ do {
+ if (block + ECC_BLOCK_SIZE > buffer_end)
+ size = buffer_end - block;
+ ram_console_encode_rs8(block, size, par);
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ } while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ struct ram_console_buffer *buffer = ram_console_buffer;
+ uint8_t *par;
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+ int rem;
+ struct ram_console_buffer *buffer = ram_console_buffer;
+
+ if (count > ram_console_buffer_size) {
+ s += count - ram_console_buffer_size;
+ count = ram_console_buffer_size;
+ }
+ rem = ram_console_buffer_size - buffer->start;
+ if (rem < count) {
+ ram_console_update(s, rem);
+ s += rem;
+ count -= rem;
+ buffer->start = 0;
+ buffer->size = ram_console_buffer_size;
+ }
+ ram_console_update(s, count);
+
+ buffer->start += count;
+ if (buffer->size < ram_console_buffer_size)
+ buffer->size += count;
+ ram_console_update_header();
+}
+
+static struct console ram_console = {
+ .name = "ram",
+ .write = ram_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+ if (enabled)
+ ram_console.flags |= CON_ENABLED;
+ else
+ ram_console.flags &= ~CON_ENABLED;
+}
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
+ char *dest)
+{
+ size_t old_log_size = buffer->size;
+ size_t bootinfo_size = 0;
+ size_t total_size = old_log_size;
+ char *ptr;
+ const char *bootinfo_label = "Boot info:\n";
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *block;
+ uint8_t *par;
+ char strbuf[80];
+ int strbuf_len = 0;
+
+ block = buffer->data;
+ par = ram_console_par_buffer;
+ while (block < buffer->data + buffer->size) {
+ int numerr;
+ int size = ECC_BLOCK_SIZE;
+ if (block + size > buffer->data + ram_console_buffer_size)
+ size = buffer->data + ram_console_buffer_size - block;
+ numerr = ram_console_decode_rs8(block, size, par);
+ if (numerr > 0) {
+#if 0
+ printk(KERN_INFO "ram_console: error in block %p, %d\n",
+ block, numerr);
+#endif
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+#if 0
+ printk(KERN_INFO "ram_console: uncorrectable error in "
+ "block %p\n", block);
+#endif
+ ram_console_bad_blocks++;
+ }
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ }
+ if (ram_console_corrected_bytes || ram_console_bad_blocks)
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ ram_console_corrected_bytes, ram_console_bad_blocks);
+ else
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\nNo errors detected\n");
+ if (strbuf_len >= sizeof(strbuf))
+ strbuf_len = sizeof(strbuf) - 1;
+ total_size += strbuf_len;
+#endif
+
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
+ total_size += bootinfo_size;
+
+ if (dest == NULL) {
+ dest = kmalloc(total_size, GFP_KERNEL);
+ if (dest == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer\n");
+ return;
+ }
+ }
+
+ ram_console_old_log = dest;
+ ram_console_old_log_size = total_size;
+ memcpy(ram_console_old_log,
+ &buffer->data[buffer->start], buffer->size - buffer->start);
+ memcpy(ram_console_old_log + buffer->size - buffer->start,
+ &buffer->data[0], buffer->start);
+ ptr = ram_console_old_log + old_log_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ memcpy(ptr, strbuf, strbuf_len);
+ ptr += strbuf_len;
+#endif
+ if (bootinfo) {
+ memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
+ ptr += strlen(bootinfo_label);
+ memcpy(ptr, bootinfo, bootinfo_size);
+ ptr += bootinfo_size;
+ }
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+ size_t buffer_size, const char *bootinfo,
+ char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ int numerr;
+ uint8_t *par;
+#endif
+ ram_console_buffer = buffer;
+ ram_console_buffer_size =
+ buffer_size - sizeof(struct ram_console_buffer);
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "datasize %zu\n", buffer, buffer_size,
+ ram_console_buffer_size);
+ return 0;
+ }
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+ ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "non-ecc datasize %zu\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
+ ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+ /* first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+ if (ram_console_rs_decoder == NULL) {
+ printk(KERN_INFO "ram_console: init_rs failed\n");
+ return 0;
+ }
+
+ ram_console_corrected_bytes = 0;
+ ram_console_bad_blocks = 0;
+
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+ if (numerr > 0) {
+ printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ printk(KERN_INFO
+ "ram_console: uncorrectable error in header\n");
+ ram_console_bad_blocks++;
+ }
+#endif
+
+ if (buffer->sig == RAM_CONSOLE_SIG) {
+ if (buffer->size > ram_console_buffer_size
+ || buffer->start > buffer->size)
+ printk(KERN_INFO "ram_console: found existing invalid "
+ "buffer, size %d, start %d\n",
+ buffer->size, buffer->start);
+ else {
+ printk(KERN_INFO "ram_console: found existing buffer, "
+ "size %d, start %d\n",
+ buffer->size, buffer->start);
+ ram_console_save_old(buffer, bootinfo, old_buf);
+ }
+ } else {
+ printk(KERN_INFO "ram_console: no valid data in buffer "
+ "(sig = 0x%08x)\n", buffer->sig);
+ }
+
+ buffer->sig = RAM_CONSOLE_SIG;
+ buffer->start = 0;
+ buffer->size = 0;
+
+ register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ console_verbose();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+ return ram_console_init((struct ram_console_buffer *)
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+ NULL,
+ ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ size_t start;
+ size_t buffer_size;
+ void *buffer;
+ const char *bootinfo = NULL;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+
+ if (res == NULL || pdev->num_resources != 1 ||
+ !(res->flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+ "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+ return -ENXIO;
+ }
+ buffer_size = res->end - res->start + 1;
+ start = res->start;
+ printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
+ start, buffer_size);
+ buffer = ioremap(res->start, buffer_size);
+ if (buffer == NULL) {
+ printk(KERN_ERR "ram_console: failed to map memory\n");
+ return -ENOMEM;
+ }
+
+ if (pdata)
+ bootinfo = pdata->bootinfo;
+
+ return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+ .probe = ram_console_driver_probe,
+ .driver = {
+ .name = "ram_console",
+ },
+};
+
+static int __init ram_console_module_init(void)
+{
+ int err;
+ err = platform_driver_register(&ram_console_driver);
+ return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= ram_console_old_log_size)
+ return 0;
+
+ count = min(len, (size_t)(ram_console_old_log_size - pos));
+ if (copy_to_user(buf, ram_console_old_log + pos, count))
+ return -EFAULT;
+
+ *offset += count;
+ return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+ .owner = THIS_MODULE,
+ .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (ram_console_old_log == NULL)
+ return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ if (ram_console_old_log == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer for old log\n");
+ ram_console_old_log_size = 0;
+ return 0;
+ }
+ memcpy(ram_console_old_log,
+ ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ printk(KERN_ERR "ram_console: failed to create proc entry\n");
+ kfree(ram_console_old_log);
+ ram_console_old_log = NULL;
+ return 0;
+ }
+
+ entry->proc_fops = &ram_console_file_ops;
+ entry->size = ram_console_old_log_size;
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+postcore_initcall(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/drivers/staging/android/ram_console.h
index fd6aa65b2dc..9f1125c1106 100644
--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ b/drivers/staging/android/ram_console.h
@@ -1,12 +1,6 @@
/*
- * arch/arm/mach-tegra/include/mach/vmalloc.h
- *
* Copyright (C) 2010 Google, Inc.
*
- * Author:
- * Colin Cross <ccross@google.com>
- * Erik Gilling <konkers@google.com>
- *
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -18,11 +12,11 @@
*
*/
-#ifndef __MACH_TEGRA_VMALLOC_H
-#define __MACH_TEGRA_VMALLOC_H
-
-#include <asm/sizes.h>
+#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
-#define VMALLOC_END 0xFE000000UL
+struct ram_console_platform_data {
+ const char *bootinfo;
+};
-#endif
+#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
diff --git a/drivers/staging/android/switch/Kconfig b/drivers/staging/android/switch/Kconfig
new file mode 100644
index 00000000000..36846f62f4b
--- /dev/null
+++ b/drivers/staging/android/switch/Kconfig
@@ -0,0 +1,11 @@
+menuconfig ANDROID_SWITCH
+ tristate "Android Switch class support"
+ help
+ Say Y here to enable Android switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+config ANDROID_SWITCH_GPIO
+ tristate "Android GPIO Switch support"
+ depends on GENERIC_GPIO && ANDROID_SWITCH
+ help
+ Say Y here to enable GPIO based switch support.
diff --git a/drivers/staging/android/switch/Makefile b/drivers/staging/android/switch/Makefile
new file mode 100644
index 00000000000..d76bfdcedfa
--- /dev/null
+++ b/drivers/staging/android/switch/Makefile
@@ -0,0 +1,4 @@
+# Android Switch Class Driver
+obj-$(CONFIG_ANDROID_SWITCH) += switch_class.o
+obj-$(CONFIG_ANDROID_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h
new file mode 100644
index 00000000000..4fcb3109875
--- /dev/null
+++ b/drivers/staging/android/switch/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/drivers/staging/android/switch/switch_class.c b/drivers/staging/android/switch/switch_class.c
new file mode 100644
index 00000000000..74680446fc6
--- /dev/null
+++ b/drivers/staging/android/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include "switch.h"
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c
new file mode 100644
index 00000000000..38b2c2f6004
--- /dev/null
+++ b/drivers/staging/android/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include "switch.h"
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = __devexit_p(gpio_switch_remove),
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644
index 00000000000..a64481c3e86
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.c
@@ -0,0 +1,176 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+ struct timed_output_dev dev;
+ struct hrtimer timer;
+ spinlock_t lock;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+ struct timed_gpio_data *data =
+ container_of(timer, struct timed_gpio_data, timer);
+
+ gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+ return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+
+ if (hrtimer_active(&data->timer)) {
+ ktime_t r = hrtimer_get_remaining(&data->timer);
+ struct timeval t = ktime_to_timeval(r);
+ return t.tv_sec * 1000 + t.tv_usec / 1000;
+ } else
+ return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* cancel previous timer and set GPIO according to value */
+ hrtimer_cancel(&data->timer);
+ gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+ if (value > 0) {
+ if (value > data->max_timeout)
+ value = data->max_timeout;
+
+ hrtimer_start(&data->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio *cur_gpio;
+ struct timed_gpio_data *gpio_data, *gpio_dat;
+ int i, j, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ cur_gpio = &pdata->gpios[i];
+ gpio_dat = &gpio_data[i];
+
+ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ gpio_dat->timer.function = gpio_timer_func;
+ spin_lock_init(&gpio_dat->lock);
+
+ gpio_dat->dev.name = cur_gpio->name;
+ gpio_dat->dev.get_time = gpio_get_time;
+ gpio_dat->dev.enable = gpio_enable;
+ ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+ if (ret >= 0) {
+ ret = timed_output_dev_register(&gpio_dat->dev);
+ if (ret < 0)
+ gpio_free(cur_gpio->gpio);
+ }
+ if (ret < 0) {
+ for (j = 0; j < i; j++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+ kfree(gpio_data);
+ return ret;
+ }
+
+ gpio_dat->gpio = cur_gpio->gpio;
+ gpio_dat->max_timeout = cur_gpio->max_timeout;
+ gpio_dat->active_low = cur_gpio->active_low;
+ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+ }
+
+ platform_set_drvdata(pdev, gpio_data);
+
+ return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+
+ kfree(gpio_data);
+
+ return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+ .probe = timed_gpio_probe,
+ .remove = timed_gpio_remove,
+ .driver = {
+ .name = TIMED_GPIO_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init timed_gpio_init(void)
+{
+ return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+ platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/drivers/staging/android/timed_gpio.h
index d138448eff1..a0e15f8be3f 100644
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ b/drivers/staging/android/timed_gpio.h
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/vmalloc.h
+/* include/linux/timed_gpio.h
*
- * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -11,12 +11,23 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- */
+*/
-#ifndef __ASM_ARCH_MSM_VMALLOC_H
-#define __ASM_ARCH_MSM_VMALLOC_H
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
-#define VMALLOC_END 0xd0000000UL
+#define TIMED_GPIO_NAME "timed-gpio"
-#endif
+struct timed_gpio {
+ const char *name;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+struct timed_gpio_platform_data {
+ int num_gpios;
+ struct timed_gpio *gpios;
+};
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644
index 00000000000..f373422308e
--- /dev/null
+++ b/drivers/staging/android/timed_output.c
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int remaining = tdev->get_time(tdev);
+
+ return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int value;
+
+ if (sscanf(buf, "%d", &value) != 1)
+ return -EINVAL;
+
+ tdev->enable(tdev, value);
+
+ return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+ if (!timed_output_class) {
+ timed_output_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_output_class))
+ return PTR_ERR(timed_output_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+ int ret;
+
+ if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+ return -EINVAL;
+
+ ret = create_timed_output_class();
+ if (ret < 0)
+ return ret;
+
+ tdev->index = atomic_inc_return(&device_count);
+ tdev->dev = device_create(timed_output_class, NULL,
+ MKDEV(0, tdev->index), NULL, tdev->name);
+ if (IS_ERR(tdev->dev))
+ return PTR_ERR(tdev->dev);
+
+ ret = device_create_file(tdev->dev, &dev_attr_enable);
+ if (ret < 0)
+ goto err_create_file;
+
+ dev_set_drvdata(tdev->dev, tdev);
+ tdev->state = 0;
+ return 0;
+
+err_create_file:
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+ tdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+ device_remove_file(tdev->dev, &dev_attr_enable);
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+ return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+ class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644
index 00000000000..ec907ab2ff5
--- /dev/null
+++ b/drivers/staging/android/timed_output.h
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+ const char *name;
+
+ /* enable the output and set the timer */
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
+
+ /* returns the current number of milliseconds remaining on the timer */
+ int (*get_time)(struct timed_output_dev *sdev);
+
+ /* private data */
+ struct device *dev;
+ int index;
+ int state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 7bb7da7959a..e77e4e0396c 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -201,7 +201,7 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
@@ -217,7 +217,7 @@ static ssize_t class_set_enabled(struct device *device,
(struct asus_oled_dev *) dev_get_drvdata(device);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 2fa658eb74d..179707b5e7c 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -161,6 +161,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
INT Status = STATUS_FAILURE;
int timeout = 0;
IOCTL_BUFFER IoBuffer;
+ int bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
@@ -230,11 +231,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (!temp_buff)
return -ENOMEM;
- Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, Bufflen);
- if (Status == STATUS_SUCCESS) {
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
}
kfree(temp_buff);
@@ -302,7 +308,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- /* FIXME: don't trust user supplied length */
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
+
temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
if (!temp_buff)
return STATUS_FAILURE;
@@ -318,11 +328,17 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
- if (Status == STATUS_SUCCESS)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
+ }
kfree(temp_buff);
break;
@@ -437,12 +453,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
}
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
-
- if (STATUS_SUCCESS != Status) {
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"GPIO_MODE_REGISTER read failed");
break;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Set the gpio mode register to output */
@@ -519,12 +537,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
uiBit = gpio_info.uiGpioNumber;
/* Set the gpio output register */
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
(PUINT)ucRead, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
}
break;
@@ -590,11 +611,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
@@ -605,7 +629,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -629,11 +653,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (STATUS_SUCCESS != Status) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Validating the request */
@@ -678,7 +705,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -706,9 +733,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
return -ENOMEM;
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- Status = -EFAULT;
kfree(pvBuffer);
- break;
+ return -EFAULT;
}
down(&Adapter->LowPowerModeSync);
@@ -733,8 +759,7 @@ cntrlEnd:
}
case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
return -EACCES;
@@ -743,157 +768,162 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Starting the firmware download PID =0x%x!!!!\n", current->pid);
- if (!down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = FALSE;
- Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = FALSE;
- Adapter->fw_download_done = FALSE;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if (Status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- }
- mdelay(10);
- } else {
- Status = -EBUSY;
+ if (down_trylock(&Adapter->fw_download_sema))
+ return -EBUSY;
+
+ Adapter->bBinDownloaded = FALSE;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = FALSE;
+ Adapter->fw_download_done = FALSE;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
}
+ mdelay(10);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD: {
FIRMWARE_INFO *psFwInfo = NULL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
- do {
- if (!down_trylock(&Adapter->fw_download_sema)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- Status = -EINVAL;
- break;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
- return -EINVAL;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if (!psFwInfo)
- return -ENOMEM;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0)) {
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo) {
+ up(&Adapter->fw_download_sema);
+ return -ENOMEM;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- Status = -EINVAL;
- break;
- }
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- if (Status != STATUS_SUCCESS) {
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- /* up(&Adapter->fw_download_sema); */
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = FALSE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- break;
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+
+ /* up(&Adapter->fw_download_sema); */
- } while (0);
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
+ }
+ }
if (Status != STATUS_SUCCESS)
up(&Adapter->fw_download_sema);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
kfree(psFwInfo);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (!down_trylock(&Adapter->fw_download_sema)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"FW download blocked as EEPROM Read/Write is in progress\n");
up(&Adapter->fw_download_sema);
return -EACCES;
}
- if (down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = TRUE;
- Adapter->bCfgDownloaded = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->downloadDDR = 0;
-
- /* setting the Mips to Run */
- Status = run_card_proc(Adapter);
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
- }
-
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = FALSE;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid = INVALID_PID;
- Adapter->fw_download_done = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- if (!timeout)
- Status = -ENODEV;
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
} else {
- Status = -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
+ }
+
+ mdelay(10);
+
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = FALSE;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ if (!timeout)
+ Status = -ENODEV;
+
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BE_BUCKET_SIZE:
@@ -969,11 +999,15 @@ cntrlEnd:
}
case IOCTL_BCM_GET_DRIVER_VERSION: {
+ ulong len;
+
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1);
+
+ if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
@@ -985,8 +1019,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.OutputLength != sizeof(link_state)) {
@@ -1001,8 +1034,7 @@ cntrlEnd:
if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
Status = STATUS_SUCCESS;
break;
@@ -1068,8 +1100,10 @@ cntrlEnd:
GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
if (Status != STATUS_FAILURE)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS))) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
kfree(temp_buff);
break;
@@ -1103,7 +1137,9 @@ cntrlEnd:
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- /* FIXME: restrict length */
+ if (IoBuffer.InputLength < sizeof(ULONG) * 2)
+ return -EINVAL;
+
pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
if (!pvBuffer)
return -ENOMEM;
@@ -1111,8 +1147,7 @@ cntrlEnd:
/* Get WrmBuffer structure */
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
kfree(pvBuffer);
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1242,8 +1277,7 @@ cntrlEnd:
memset(&tv1, 0, sizeof(struct timeval));
if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IsFlash2x(Adapter)) {
@@ -1252,7 +1286,7 @@ cntrlEnd:
(Adapter->eActiveDSD != DSD2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE ;
+ return STATUS_FAILURE;
}
}
@@ -1271,8 +1305,7 @@ cntrlEnd:
if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
- Status = STATUS_FAILURE;
- break;
+ return STATUS_FAILURE;
}
pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
@@ -1280,9 +1313,8 @@ cntrlEnd:
return -ENOMEM;
if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- Status = -EFAULT;
kfree(pReadData);
- break;
+ return -EFAULT;
}
do_gettimeofday(&tv0);
@@ -1309,7 +1341,7 @@ cntrlEnd:
if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
kfree(pReadData);
- Status = -EFAULT;
+ return -EFAULT;
}
} else {
down(&Adapter->NVMRdmWrmLock);
@@ -1377,9 +1409,8 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
kfree(pReadData);
- Status = STATUS_SUCCESS;
+ return STATUS_SUCCESS;
}
- break;
case IOCTL_BCM_FLASH2X_SECTION_READ: {
FLASH2X_READWRITE sFlash2xRead = {0};
@@ -1456,7 +1487,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1548,7 +1581,9 @@ cntrlEnd:
Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EFAULT;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
@@ -1608,8 +1643,10 @@ cntrlEnd:
BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP))) {
+ kfree(psFlash2xBitMap);
+ return -EFAULT;
+ }
kfree(psFlash2xBitMap);
}
@@ -1627,13 +1664,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
down(&Adapter->NVMRdmWrmLock);
@@ -1677,13 +1714,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
@@ -1744,7 +1781,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- break;
+ return -EFAULT;
}
if (Adapter->eNVMType != NVM_FLASH) {
@@ -1783,12 +1820,12 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
@@ -1830,8 +1867,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(NVM_READWRITE)))
@@ -1886,7 +1922,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1907,8 +1945,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.InputLength != sizeof(unsigned long)) {
@@ -1919,8 +1956,7 @@ cntrlEnd:
Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 2b1e9e17e11..b058e30b2ca 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -1,195 +1,208 @@
/**
-@file HandleControlPacket.c
-This file contains the routines to deal with
-sending and receiving of control packets.
-*/
+ * @file HandleControlPacket.c
+ * This file contains the routines to deal with
+ * sending and receiving of control packets.
+ */
#include "headers.h"
/**
-When a control packet is received, analyze the
-"status" and call appropriate response function.
-Enqueue the control packet for Application.
-@return None
-*/
+ * When a control packet is received, analyze the
+ * "status" and call appropriate response function.
+ * Enqueue the control packet for Application.
+ * @return None
+ */
static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb)
{
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
BOOLEAN HighPriorityMessage = FALSE;
- struct sk_buff * newPacket = NULL;
+ struct sk_buff *newPacket = NULL;
CHAR cntrl_msg_mask_bit = 0;
- BOOLEAN drop_pkt_flag = TRUE ;
+ BOOLEAN drop_pkt_flag = TRUE;
USHORT usStatus = *(PUSHORT)(skb->data);
if (netif_msg_pktdata(Adapter))
print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, skb->len, 0);
-
- switch(usStatus)
- {
- case CM_RESPONSES: // 0xA0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
- HighPriorityMessage = TRUE ;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- HighPriorityMessage = TRUE ;
- if(Adapter->LinkStatus==LINKUP_DONE)
- {
- CmControlResponseMessage(Adapter,(skb->data +sizeof(USHORT)));
- }
- break;
- case LINK_CONTROL_RESP: //0xA2
- case STATUS_RSP: //0xA1
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"LINK_CONTROL_RESP");
- HighPriorityMessage = TRUE ;
- LinkControlResponseMessage(Adapter,(skb->data + sizeof(USHORT)));
- break;
- case STATS_POINTER_RESP: //0xA6
- HighPriorityMessage = TRUE ;
- StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
- break;
- case IDLE_MODE_STATUS: //0xA3
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"IDLE_MODE_STATUS Type Message Got from F/W");
- InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
- sizeof(USHORT)));
- HighPriorityMessage = TRUE ;
- break;
-
- case AUTH_SS_HOST_MSG:
- HighPriorityMessage = TRUE ;
- break;
-
- default:
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"Got Default Response");
- /* Let the Application Deal with This Packet */
- break;
+ 16, 1, skb->data, skb->len, 0);
+
+ switch (usStatus) {
+ case CM_RESPONSES: /* 0xA0 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
+ HighPriorityMessage = TRUE;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ HighPriorityMessage = TRUE;
+ if (Adapter->LinkStatus == LINKUP_DONE)
+ CmControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case LINK_CONTROL_RESP: /* 0xA2 */
+ case STATUS_RSP: /* 0xA1 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "LINK_CONTROL_RESP");
+ HighPriorityMessage = TRUE;
+ LinkControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case STATS_POINTER_RESP: /* 0xA6 */
+ HighPriorityMessage = TRUE;
+ StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
+ break;
+ case IDLE_MODE_STATUS: /* 0xA3 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "IDLE_MODE_STATUS Type Message Got from F/W");
+ InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
+ sizeof(USHORT)));
+ HighPriorityMessage = TRUE;
+ break;
+
+ case AUTH_SS_HOST_MSG:
+ HighPriorityMessage = TRUE;
+ break;
+
+ default:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Got Default Response");
+ /* Let the Application Deal with This Packet */
+ break;
}
- //Queue The Control Packet to The Application Queues
+ /* Queue The Control Packet to The Application Queues */
down(&Adapter->RxAppControlQueuelock);
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- if(Adapter->device_removed)
- {
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ if (Adapter->device_removed)
break;
- }
- drop_pkt_flag = TRUE ;
+ drop_pkt_flag = TRUE;
/*
- There are cntrl msg from A0 to AC. It has been mapped to 0 to C bit in the cntrl mask.
- Also, by default AD to BF has been masked to the rest of the bits... which wil be ON by default.
- if mask bit is enable to particular pkt status, send it out to app else stop it.
- */
+ * There are cntrl msg from A0 to AC. It has been mapped to 0 to
+ * C bit in the cntrl mask.
+ * Also, by default AD to BF has been masked to the rest of the
+ * bits... which wil be ON by default.
+ * if mask bit is enable to particular pkt status, send it out
+ * to app else stop it.
+ */
cntrl_msg_mask_bit = (usStatus & 0x1F);
- //printk("\ninew msg mask bit which is disable in mask:%X", cntrl_msg_mask_bit);
- if(pTarang->RxCntrlMsgBitMask & (1<<cntrl_msg_mask_bit))
- drop_pkt_flag = FALSE;
-
- if ((drop_pkt_flag == TRUE) || (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) ||
- ((pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN/2) && (HighPriorityMessage == FALSE)))
- {
+ /*
+ * printk("\ninew msg mask bit which is disable in mask:%X",
+ * cntrl_msg_mask_bit);
+ */
+ if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
+ drop_pkt_flag = FALSE;
+
+ if ((drop_pkt_flag == TRUE) ||
+ (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
+ || ((pTarang->AppCtrlQueueLen >
+ MAX_APP_QUEUE_LEN / 2) &&
+ (HighPriorityMessage == FALSE))) {
/*
- Assumption:-
- 1. every tarang manages it own dropped pkt statitistics
- 2. Total packet dropped per tarang will be equal to the sum of all types of dropped
- pkt by that tarang only.
-
- */
- switch(*(PUSHORT)skb->data)
- {
- case CM_RESPONSES:
- pTarang->stDroppedAppCntrlMsgs.cm_responses++;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
- break;
- case LINK_CONTROL_RESP:
- pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
- break;
- case STATUS_RSP:
- pTarang->stDroppedAppCntrlMsgs.status_rsp++;
- break;
- case STATS_POINTER_RESP:
- pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
- break;
- case IDLE_MODE_STATUS:
- pTarang->stDroppedAppCntrlMsgs.idle_mode_status++ ;
- break;
- case AUTH_SS_HOST_MSG:
- pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++ ;
- break;
+ * Assumption:-
+ * 1. every tarang manages it own dropped pkt
+ * statitistics
+ * 2. Total packet dropped per tarang will be equal to
+ * the sum of all types of dropped pkt by that
+ * tarang only.
+ */
+ switch (*(PUSHORT)skb->data) {
+ case CM_RESPONSES:
+ pTarang->stDroppedAppCntrlMsgs.cm_responses++;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
+ break;
+ case LINK_CONTROL_RESP:
+ pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
+ break;
+ case STATUS_RSP:
+ pTarang->stDroppedAppCntrlMsgs.status_rsp++;
+ break;
+ case STATS_POINTER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
+ break;
+ case IDLE_MODE_STATUS:
+ pTarang->stDroppedAppCntrlMsgs.idle_mode_status++;
+ break;
+ case AUTH_SS_HOST_MSG:
+ pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++;
+ break;
default:
- pTarang->stDroppedAppCntrlMsgs.low_priority_message++ ;
- break;
+ pTarang->stDroppedAppCntrlMsgs.low_priority_message++;
+ break;
}
continue;
}
- newPacket = skb_clone(skb, GFP_KERNEL);
- if (!newPacket)
- break;
- ENQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail,
- newPacket);
- pTarang->AppCtrlQueueLen++;
- }
+ newPacket = skb_clone(skb, GFP_KERNEL);
+ if (!newPacket)
+ break;
+ ENQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail, newPacket);
+ pTarang->AppCtrlQueueLen++;
+ }
up(&Adapter->RxAppControlQueuelock);
- wake_up(&Adapter->process_read_wait_queue);
- dev_kfree_skb(skb);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible");
+ wake_up(&Adapter->process_read_wait_queue);
+ dev_kfree_skb(skb);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "After wake_up_interruptible");
}
/**
-@ingroup ctrl_pkt_functions
-Thread to handle control pkt reception
-*/
-int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter object*/
- )
+ * @ingroup ctrl_pkt_functions
+ * Thread to handle control pkt reception
+ */
+int control_packet_handler(PMINI_ADAPTER Adapter /* pointer to adapter object*/)
{
- struct sk_buff *ctrl_packet= NULL;
+ struct sk_buff *ctrl_packet = NULL;
unsigned long flags = 0;
- //struct timeval tv ;
- //int *puiBuffer = NULL ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Entering to make thread wait on control packet event!");
- while(1)
- {
+ /* struct timeval tv; */
+ /* int *puiBuffer = NULL; */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "Entering to make thread wait on control packet event!");
+ while (1) {
wait_event_interruptible(Adapter->process_rx_cntrlpkt,
- atomic_read(&Adapter->cntrlpktCnt) ||
- Adapter->bWakeUpDevice ||
- kthread_should_stop()
- );
+ atomic_read(&Adapter->cntrlpktCnt) ||
+ Adapter->bWakeUpDevice ||
+ kthread_should_stop());
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Exiting \n");
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Exiting\n");
return 0;
}
- if(TRUE == Adapter->bWakeUpDevice)
- {
+ if (TRUE == Adapter->bWakeUpDevice) {
Adapter->bWakeUpDevice = FALSE;
- if((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) &&
- ((TRUE == Adapter->IdleMode)|| (TRUE == Adapter->bShutStatus)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Calling InterfaceAbortIdlemode\n");
- // Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceIdleModeWakeup (Adapter);
+ if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode)
+ && ((TRUE == Adapter->IdleMode) ||
+ (TRUE == Adapter->bShutStatus))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ CP_CTRL_PKT, DBG_LVL_ALL,
+ "Calling InterfaceAbortIdlemode\n");
+ /*
+ * Adapter->bTriedToWakeUpFromlowPowerMode
+ * = TRUE;
+ */
+ InterfaceIdleModeWakeup(Adapter);
}
continue;
}
- while(atomic_read(&Adapter->cntrlpktCnt))
- {
+ while (atomic_read(&Adapter->cntrlpktCnt)) {
spin_lock_irqsave(&Adapter->control_queue_lock, flags);
ctrl_packet = Adapter->RxControlHead;
- if(ctrl_packet)
- {
- DEQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail);
-// Adapter->RxControlHead=ctrl_packet->next;
+ if (ctrl_packet) {
+ DEQUEUEPACKET(Adapter->RxControlHead,
+ Adapter->RxControlTail);
+ /* Adapter->RxControlHead=ctrl_packet->next; */
}
- spin_unlock_irqrestore (&Adapter->control_queue_lock, flags);
- handle_rx_control_packet(Adapter, ctrl_packet);
+ spin_unlock_irqrestore(&Adapter->control_queue_lock,
+ flags);
+ handle_rx_control_packet(Adapter, ctrl_packet);
atomic_dec(&Adapter->cntrlpktCnt);
}
@@ -201,22 +214,22 @@ int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter obje
INT flushAllAppQ(void)
{
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
struct sk_buff *PacketToDrop = NULL;
- for(pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- while(pTarang->RxAppControlHead != NULL)
- {
- PacketToDrop=pTarang->RxAppControlHead;
- DEQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail);
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ while (pTarang->RxAppControlHead != NULL) {
+ PacketToDrop = pTarang->RxAppControlHead;
+ DEQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail);
dev_kfree_skb(PacketToDrop);
}
pTarang->AppCtrlQueueLen = 0;
- //dropped contrl packet statistics also should be reset.
- memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0, sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ /* dropped contrl packet statistics also should be reset. */
+ memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0,
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index bcd86bbef2f..65c352f3568 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -62,6 +62,7 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
static int fw_down;
INT Status = STATUS_SUCCESS;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
+ int bytes;
buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
@@ -94,8 +95,9 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
break;
}
- Status = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
- if (Status) {
+ bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
@@ -302,6 +304,7 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
+ int bytes;
if (NULL == readbackbuff) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
@@ -310,9 +313,10 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
while (u32FirmwareLength && !retval) {
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = rdm(Adapter, u32StartingAddress, readbackbuff, len);
+ bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len);
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 96fa4ead793..faeb03e62c0 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -46,6 +46,7 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
+ int bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"SubType of Message :0x%X", ntohl(*puiBuffer));
@@ -77,16 +78,16 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
else if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
{
//clear on read Register
- status = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
//clear on read Register
- status = rdmalt (Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
@@ -117,9 +118,9 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
Adapter->chip_id== BCS220_3)
{
- status = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
@@ -266,6 +267,8 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
unsigned int uiRegVal = 0;
INT Status = 0;
+ int bytes;
+
if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
{
// clear idlemode interrupt.
@@ -282,16 +285,16 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
//clear Interrupt EP registers.
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index a09d35108f0..8e3c586a699 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -68,7 +68,7 @@ static void InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
{
unsigned long ulReg = 0;
- int ret;
+ int bytes;
/* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
@@ -94,8 +94,8 @@ static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
/* Program TX EP as interrupt(Alternate Setting) */
- ret = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
- if (ret) {
+ bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"reading of Tx EP failed\n");
return;
@@ -430,6 +430,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
int usedIntOutForBulkTransfer = 0 ;
BOOLEAN bBcm16 = FALSE;
UINT uiData = 0;
+ int bytes;
/* Store the usb dev into interface adapter */
psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
@@ -438,9 +439,10 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
- retval = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
+ bytes = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
(u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
return retval;
}
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index 61f878b4f56..2218faeaf8a 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -5,7 +5,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
PVOID buff,
INT len)
{
- int retval = 0;
+ int bytes;
USHORT usRetries = 0;
if (psIntfAdapter == NULL) {
@@ -30,7 +30,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
psIntfAdapter->psAdapter->DeviceAccess = TRUE;
do {
- retval = usb_control_msg(psIntfAdapter->udev,
+ bytes = usb_control_msg(psIntfAdapter->udev,
usb_rcvctrlpipe(psIntfAdapter->udev, 0),
0x02,
0xC2,
@@ -41,22 +41,20 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
5000);
usRetries++;
- if (-ENODEV == retval) {
+ if (-ENODEV == bytes) {
psIntfAdapter->psAdapter->device_removed = TRUE;
break;
}
- } while ((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
+ } while ((bytes < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
- if (retval < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", retval, usRetries);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return retval;
- } else {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return STATUS_SUCCESS;
- }
+ if (bytes < 0)
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", bytes, usRetries);
+ else
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", bytes);
+
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ return bytes;
}
INT InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index e9f29d59751..c7725e141fd 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -814,6 +814,7 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
unsigned int value = 0, uiResetValue = 0;
+ int bytes;
psIntfAdapter = ((PS_INTERFACE_ADAPTER)(ps_adapter->pvInterfaceAdapter));
ps_adapter->bDDRInitDone = FALSE;
@@ -848,8 +849,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
ps_adapter->chip_id == BCS250_BC ||
ps_adapter->chip_id == BCS220_3) {
- retval = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -862,8 +864,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
}
}
} else {
- retval = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -925,11 +928,16 @@ err_exit:
int run_card_proc(PMINI_ADAPTER ps_adapter)
{
+ int status = STATUS_SUCCESS;
+ int bytes;
+
unsigned int value = 0;
{
- if (rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
+ bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return STATUS_FAILURE;
+ return status;
}
if (ps_adapter->bFlashBoot)
@@ -942,7 +950,7 @@ int run_card_proc(PMINI_ADAPTER ps_adapter)
return STATUS_FAILURE;
}
}
- return STATUS_SUCCESS;
+ return status;
}
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
@@ -1215,6 +1223,7 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
int status = 0, i = 0;
unsigned int temp = 0;
unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
+ int bytes;
if (!pucmacaddr) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
@@ -1231,8 +1240,9 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
}
for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- status = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
- if (status != STATUS_SUCCESS) {
+ bytes = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
kfree(pucmacaddr);
pucmacaddr = NULL;
@@ -1574,11 +1584,13 @@ void update_per_sf_desc_cnts(PMINI_ADAPTER Adapter)
{
INT iIndex = 0;
u32 uibuff[MAX_TARGET_DSX_BUFFERS];
+ int bytes;
if (!atomic_read(&Adapter->uiMBupdate))
return;
- if (rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS) < 0) {
+ bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS);
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
return;
}
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index c13ea5c9a2a..101c4e31249 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -1,4 +1,3 @@
-
/*
* File Name: hostmibs.c
*
@@ -6,73 +5,72 @@
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
- *
*/
+
#include "headers.h"
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
+INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
- S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
- PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION)&Adapter->stBCMPhsContext;
+ S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
+ S_PHS_RULE *pstPhsRule = NULL;
+ S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
+ S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
+ PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext;
- UINT nClassifierIndex = 0, nPhsTableIndex = 0,nSfIndex = 0, uiIndex = 0;
+ UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0;
- if(pDeviceExtension == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
+ if (pDeviceExtension == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
- //Copy the classifier Table
- for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
- nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy((PVOID)&pstHostMibs->astClassifierTable[nClassifierIndex],
- (PVOID)&Adapter->astClassifierTable[nClassifierIndex],
- sizeof(S_MIBS_CLASSIFIER_RULE));
+ /* Copy the classifier Table */
+ for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
+ if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
+ memcpy((PVOID) & pstHostMibs->
+ astClassifierTable[nClassifierIndex],
+ (PVOID) & Adapter->
+ astClassifierTable[nClassifierIndex],
+ sizeof(S_MIBS_CLASSIFIER_RULE));
}
- //Copy the SF Table
- for(nSfIndex=0; nSfIndex < NO_OF_QUEUES ; nSfIndex++)
- {
- if(Adapter->PackInfo[nSfIndex].bValid)
- {
- memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
- }
- else
- {
- //if index in not valid, don't process this for the PHS table. Go For the next entry.
- continue ;
- }
+ /* Copy the SF Table */
+ for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
+ if (Adapter->PackInfo[nSfIndex].bValid) {
+ memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
+ (PVOID) & Adapter->PackInfo[nSfIndex],
+ sizeof(S_MIBS_SERVICEFLOW_TABLE));
+ } else {
+ /* If index in not valid,
+ * don't process this for the PHS table.
+ * Go For the next entry.
+ */
+ continue;
+ }
- //Retrieve the SFID Entry Index for requested Service Flow
- if(PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- Adapter->PackInfo[nSfIndex].usVCID_Value ,&pstServiceFlowEntry))
- {
+ /* Retrieve the SFID Entry Index for requested Service Flow */
+ if (PHS_INVALID_TABLE_INDEX ==
+ GetServiceFlowEntry(pDeviceExtension->
+ pstServiceFlowPhsRulesTable,
+ Adapter->PackInfo[nSfIndex].
+ usVCID_Value, &pstServiceFlowEntry))
- continue;
- }
+ continue;
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
-
- for(uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
- if(pstClassifierRule->bUsed)
- {
- pstPhsRule = pstClassifierRule->pstPhsRule;
+ if (pstClassifierRule->bUsed) {
+ pstPhsRule = pstClassifierRule->pstPhsRule;
- pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
+ pstHostMibs->astPhsRulesTable[nPhsTableIndex].
+ ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
- memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI,
- sizeof(S_PHS_RULE));
+ memcpy(&pstHostMibs->
+ astPhsRulesTable[nPhsTableIndex].u8PHSI,
+ &pstPhsRule->u8PHSI, sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
@@ -81,65 +79,63 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMi
}
-
- //copy other Host Statistics parameters
+ /* Copy other Host Statistics parameters */
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
- pstHostMibs->stHostInfo.CurrNumFreeDesc =
- atomic_read(&Adapter->CurrNumFreeTxDesc);
+ pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
- memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist,Adapter->aTxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
- memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist,Adapter->aRxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
-
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
- &(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ &(pTarang->stDroppedAppCntrlMsgs),
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
-
-VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
- CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
+VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter, CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfSfid = psfLocalSet->u32SFID;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid);
+ S_MIBS_EXTSERVICEFLOW_PARAMETERS *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
+
+ t->wmanIfSfid = psfLocalSet->u32SFID;
+ t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
+ t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
+ t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
+ t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
+ t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
+ t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
+ t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
+ t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
+ t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType);
+ t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
+ t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
+ t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
+ t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
+ t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
+ t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
+ t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
+ t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
+ t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
+ t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
+ t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
+ t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
+ t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
+ t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 16e939fa15d..c7f48862972 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -5,65 +5,69 @@
static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
{
- B_UINT16 u16CheckSum=0;
- while(u32Size--) {
+ B_UINT16 u16CheckSum = 0;
+ while (u32Size--) {
u16CheckSum += (B_UINT8)~(*pu8Buffer);
- pu8Buffer++;
+ pu8Buffer++;
}
return u16CheckSum;
}
+
BOOLEAN IsReqGpioIsLedInNVM(PMINI_ADAPTER Adapter, UINT gpios)
{
- INT Status ;
- Status = (Adapter->gpioBitMap & gpios) ^ gpios ;
- if(Status)
+ INT Status;
+ Status = (Adapter->gpioBitMap & gpios) ^ gpios;
+ if (Status)
return FALSE;
else
return TRUE;
}
-static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
+static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex,
+ ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
{
int Status = STATUS_SUCCESS;
BOOLEAN bInfinite = FALSE;
- /*Check if num_of_time is -ve. If yes, blink led in infinite loop*/
- if(num_of_time < 0)
- {
+ /* Check if num_of_time is -ve. If yes, blink led in infinite loop */
+ if (num_of_time < 0) {
bInfinite = TRUE;
num_of_time = 1;
}
- while(num_of_time)
- {
-
- if(currdriverstate == Adapter->DriverState)
+ while (num_of_time) {
+ if (currdriverstate == Adapter->DriverState)
TURN_ON_LED(GPIO_Num, uiLedIndex);
- /*Wait for timeout after setting on the LED*/
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ /* Wait for timeout after setting on the LED */
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
- if(Status)
- {
+ if (Status) {
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
- if(bInfinite == FALSE)
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+ if (bInfinite == FALSE)
num_of_time--;
}
return Status;
@@ -71,19 +75,19 @@ static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULO
static INT ScaleRateofTransfer(ULONG rate)
{
- if(rate <= 3)
+ if (rate <= 3)
return rate;
- else if((rate > 3) && (rate <= 100))
+ else if ((rate > 3) && (rate <= 100))
return 5;
- else if((rate > 100) && (rate <= 200))
+ else if ((rate > 100) && (rate <= 200))
return 6;
- else if((rate > 200) && (rate <= 300))
+ else if ((rate > 200) && (rate <= 300))
return 7;
- else if((rate > 300) && (rate <= 400))
+ else if ((rate > 300) && (rate <= 400))
return 8;
- else if((rate > 400) && (rate <= 500))
+ else if ((rate > 400) && (rate <= 500))
return 9;
- else if((rate > 500) && (rate <= 600))
+ else if ((rate > 500) && (rate <= 600))
return 10;
else
return MAX_NUM_OF_BLINKS;
@@ -92,215 +96,232 @@ static INT ScaleRateofTransfer(ULONG rate)
static INT LED_Proportional_Blink(PMINI_ADAPTER Adapter, UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex, LedEventInfo_t currdriverstate)
+ UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex,
+ LedEventInfo_t currdriverstate)
{
- /* Initial values of TX and RX packets*/
+ /* Initial values of TX and RX packets */
ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
- /*values of TX and RX packets after 1 sec*/
+ /* values of TX and RX packets after 1 sec */
ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
- /*Rate of transfer of Tx and Rx in 1 sec*/
+ /* Rate of transfer of Tx and Rx in 1 sec */
ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
int Status = STATUS_SUCCESS;
INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
UINT remDelay = 0;
BOOLEAN bBlinkBothLED = TRUE;
- //UINT GPIO_num = DISABLE_GPIO_NUM;
+ /* UINT GPIO_num = DISABLE_GPIO_NUM; */
ulong timeout = 0;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
- while((Adapter->device_removed == FALSE))
- {
+ while ((Adapter->device_removed == FALSE)) {
timeout = 50;
- /*Blink Tx and Rx LED when both Tx and Rx is in normal bandwidth*/
- if(bBlinkBothLED)
- {
- /*Assign minimum number of blinks of either Tx or Rx.*/
- if(num_of_time_tx > num_of_time_rx)
+ /*
+ * Blink Tx and Rx LED when both Tx and Rx is
+ * in normal bandwidth
+ */
+ if (bBlinkBothLED) {
+ /*
+ * Assign minimum number of blinks of
+ * either Tx or Rx.
+ */
+ if (num_of_time_tx > num_of_time_rx)
num_of_time = num_of_time_rx;
else
num_of_time = num_of_time_tx;
- if(num_of_time > 0)
- {
- /*Blink both Tx and Rx LEDs*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ if (num_of_time > 0) {
+ /* Blink both Tx and Rx LEDs */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout, num_of_time,currdriverstate)
+
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
}
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, (1 << GPIO_Num_rx), uiRxLedIndex, timeout,
- num_of_time_rx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, (1 << GPIO_Num_rx),
+ uiRxLedIndex, timeout,
+ num_of_time_rx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_rx;
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout,
- num_of_time_tx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time_tx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_tx;
}
- }
- else
- {
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ } else {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout,
- num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
}
}
- /* If Tx/Rx rate is less than maximum blinks per second,
- * wait till delay completes to 1 second
- */
+
+ /*
+ * If Tx/Rx rate is less than maximum blinks per second,
+ * wait till delay completes to 1 second
+ */
remDelay = MAX_NUM_OF_BLINKS - num_of_time;
- if(remDelay > 0)
- {
- timeout= 100 * remDelay;
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState ||kthread_should_stop() ,
- msecs_to_jiffies (timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ if (remDelay > 0) {
+ timeout = 100 * remDelay;
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ LED_DUMP_INFO, DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
return EVENT_SIGNALED;
}
- if(Status)
+ if (Status)
return EVENT_SIGNALED;
}
- /*Turn off both Tx and Rx LEDs before next second*/
- TURN_OFF_LED(1<<GPIO_Num_tx, uiTxLedIndex);
- TURN_OFF_LED(1<<GPIO_Num_rx, uiTxLedIndex);
+ /* Turn off both Tx and Rx LEDs before next second */
+ TURN_OFF_LED(1 << GPIO_Num_tx, uiTxLedIndex);
+ TURN_OFF_LED(1 << GPIO_Num_rx, uiTxLedIndex);
/*
- * Read the Tx & Rx packets transmission after 1 second and
- * calculate rate of transfer
- */
+ * Read the Tx & Rx packets transmission after 1 second and
+ * calculate rate of transfer
+ */
Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx;
- rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx;
+ rate_of_transfer_tx = Final_num_of_packts_tx -
+ Initial_num_of_packts_tx;
+ rate_of_transfer_rx = Final_num_of_packts_rx -
+ Initial_num_of_packts_rx;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Final_num_of_packts_tx;
- Initial_num_of_packts_rx = Final_num_of_packts_rx ;
+ Initial_num_of_packts_rx = Final_num_of_packts_rx;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
}
return Status;
}
-
-//-----------------------------------------------------------------------------
-// Procedure: ValidateDSDParamsChecksum
-//
-// Description: Reads DSD Params and validates checkusm.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulParamOffset - Start offset of the DSD parameter to be read and validated.
-// usParamLen - Length of the DSD Parameter.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
-static INT ValidateDSDParamsChecksum(
- PMINI_ADAPTER Adapter,
- ULONG ulParamOffset,
- USHORT usParamLen )
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateDSDParamsChecksum
+ *
+ * Description: Reads DSD Params and validates checkusm.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulParamOffset - Start offset of the DSD parameter to be read and
+ * validated.
+ * usParamLen - Length of the DSD Parameter.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
+static INT ValidateDSDParamsChecksum(PMINI_ADAPTER Adapter, ULONG ulParamOffset,
+ USHORT usParamLen)
{
INT Status = STATUS_SUCCESS;
- PUCHAR puBuffer = NULL;
- USHORT usChksmOrg = 0;
+ PUCHAR puBuffer = NULL;
+ USHORT usChksmOrg = 0;
USHORT usChecksumCalculated = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",ulParamOffset, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
+ ulParamOffset, usParamLen);
puBuffer = kmalloc(usParamLen, GFP_KERNEL);
- if(!puBuffer)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum Allocation failed");
+ if (!puBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum Allocation failed");
return -ENOMEM;
}
- //
- // Read the DSD data from the parameter offset.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)puBuffer,ulParamOffset,usParamLen))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Read the DSD data from the parameter offset. */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
+ ulParamOffset, usParamLen)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
- //
- // Calculate the checksum of the data read from the DSD parameter.
- //
- usChecksumCalculated = CFG_CalculateChecksum(puBuffer,usParamLen);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usCheckSumCalculated = 0x%x\n", usChecksumCalculated);
-
- //
- // End of the DSD parameter will have a TWO bytes checksum stored in it. Read it and compare with the calculated
- // Checksum.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)&usChksmOrg,ulParamOffset+usParamLen,2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Calculate the checksum of the data read from the DSD parameter. */
+ usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usCheckSumCalculated = 0x%x\n",
+ usChecksumCalculated);
+
+ /*
+ * End of the DSD parameter will have a TWO bytes checksum stored in it.
+ * Read it and compare with the calculated Checksum.
+ */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
+ ulParamOffset+usParamLen, 2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
usChksmOrg = ntohs(usChksmOrg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usChksmOrg = 0x%x", usChksmOrg);
-
- //
- // Compare the checksum calculated with the checksum read from DSD section
- //
- if(usChecksumCalculated ^ usChksmOrg)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usChksmOrg = 0x%x", usChksmOrg);
+
+ /*
+ * Compare the checksum calculated with the checksum read
+ * from DSD section
+ */
+ if (usChecksumCalculated ^ usChksmOrg) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
@@ -311,523 +332,526 @@ exit:
}
-//-----------------------------------------------------------------------------
-// Procedure: ValidateHWParmStructure
-//
-// Description: Validates HW Parameters.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulHwParamOffset - Start offset of the HW parameter Section to be read and validated.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateHWParmStructure
+ *
+ * Description: Validates HW Parameters.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulHwParamOffset - Start offset of the HW parameter Section to be read
+ * and validated.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
static INT ValidateHWParmStructure(PMINI_ADAPTER Adapter, ULONG ulHwParamOffset)
{
- INT Status = STATUS_SUCCESS ;
+ INT Status = STATUS_SUCCESS;
USHORT HwParamLen = 0;
- // Add DSD start offset to the hwParamOffset to get the actual address.
+ /*
+ * Add DSD start offset to the hwParamOffset to get
+ * the actual address.
+ */
ulHwParamOffset += DSD_START_OFFSET;
- /*Read the Length of HW_PARAM structure*/
- BeceemNVMRead(Adapter,(PUINT)&HwParamLen,ulHwParamOffset,2);
+ /* Read the Length of HW_PARAM structure */
+ BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
HwParamLen = ntohs(HwParamLen);
- if(0==HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
- {
+ if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread:HwParamLen = 0x%x", HwParamLen);
- Status =ValidateDSDParamsChecksum(Adapter,ulHwParamOffset,HwParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:HwParamLen = 0x%x", HwParamLen);
+ Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
+ HwParamLen);
return Status;
} /* ValidateHWParmStructure() */
-static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter, UCHAR GPIO_Array[])
+static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter,
+ UCHAR GPIO_Array[])
{
int Status = STATUS_SUCCESS;
- ULONG dwReadValue = 0;
- USHORT usHwParamData = 0;
- USHORT usEEPROMVersion = 0;
- UCHAR ucIndex = 0;
- UCHAR ucGPIOInfo[32] = {0};
+ ULONG dwReadValue = 0;
+ USHORT usHwParamData = 0;
+ USHORT usEEPROMVersion = 0;
+ UCHAR ucIndex = 0;
+ UCHAR ucGPIOInfo[32] = {0};
- BeceemNVMRead(Adapter,(PUINT)&usEEPROMVersion,EEPROM_VERSION_OFFSET,2);
+ BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
+ EEPROM_VERSION_OFFSET, 2);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"usEEPROMVersion: Minor:0x%X Major:0x%x",usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "usEEPROMVersion: Minor:0x%X Major:0x%x",
+ usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
- if(((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION)
- {
- BeceemNVMRead(Adapter,(PUINT)&usHwParamData,EEPROM_HW_PARAM_POINTER_ADDRESS,2);
+ if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
+ BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
+ EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
usHwParamData = ntohs(usHwParamData);
dwReadValue = usHwParamData;
- }
- else
- {
- //
- // Validate Compatibility section and then read HW param if compatibility section is valid.
- //
+ } else {
+ /*
+ * Validate Compatibility section and then read HW param
+ * if compatibility section is valid.
+ */
Status = ValidateDSDParamsChecksum(Adapter,
- DSD_START_OFFSET,
- COMPATIBILITY_SECTION_LENGTH_MAP5);
+ DSD_START_OFFSET,
+ COMPATIBILITY_SECTION_LENGTH_MAP5);
- if(Status != STATUS_SUCCESS)
- {
+ if (Status != STATUS_SUCCESS)
return Status;
- }
- BeceemNVMRead(Adapter,(PUINT)&dwReadValue,EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5,4);
+
+ BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
+ EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
dwReadValue = ntohl(dwReadValue);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Start address of HW_PARAM structure = 0x%lx",dwReadValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Start address of HW_PARAM structure = 0x%lx",
+ dwReadValue);
- //
- // Validate if the address read out is within the DSD.
- // Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
- // lower limit should be above DSD_START_OFFSET and
- // upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
- //
- if(dwReadValue < DSD_START_OFFSET ||
- dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
- {
+ /*
+ * Validate if the address read out is within the DSD.
+ * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
+ * lower limit should be above DSD_START_OFFSET and
+ * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
+ */
+ if (dwReadValue < DSD_START_OFFSET ||
+ dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
Status = ValidateHWParmStructure(Adapter, dwReadValue);
- if(Status){
+ if (Status)
return Status;
- }
/*
- Add DSD_START_OFFSET to the offset read from the EEPROM.
- This will give the actual start HW Parameters start address.
- To read GPIO section, add GPIO offset further.
- */
-
- dwReadValue += DSD_START_OFFSET; // = start address of hw param section.
- dwReadValue += GPIO_SECTION_START_OFFSET; // = GPIO start offset within HW Param section.
-
- /* Read the GPIO values for 32 GPIOs from EEPROM and map the function
- * number to GPIO pin number to GPIO_Array
- */
- BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo,dwReadValue,32);
- for(ucIndex = 0; ucIndex < 32; ucIndex++)
- {
-
- switch(ucGPIOInfo[ucIndex])
- {
- case RED_LED:
- {
- GPIO_Array[RED_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case BLUE_LED:
- {
- GPIO_Array[BLUE_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case YELLOW_LED:
- {
- GPIO_Array[YELLOW_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case GREEN_LED:
- {
- GPIO_Array[GREEN_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- default:
- break;
- }
+ * Add DSD_START_OFFSET to the offset read from the EEPROM.
+ * This will give the actual start HW Parameters start address.
+ * To read GPIO section, add GPIO offset further.
+ */
+
+ dwReadValue +=
+ DSD_START_OFFSET; /* = start address of hw param section. */
+ dwReadValue += GPIO_SECTION_START_OFFSET;
+ /* = GPIO start offset within HW Param section. */
+ /*
+ * Read the GPIO values for 32 GPIOs from EEPROM and map the function
+ * number to GPIO pin number to GPIO_Array
+ */
+ BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
+ for (ucIndex = 0; ucIndex < 32; ucIndex++) {
+
+ switch (ucGPIOInfo[ucIndex]) {
+ case RED_LED:
+ GPIO_Array[RED_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case BLUE_LED:
+ GPIO_Array[BLUE_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case YELLOW_LED:
+ GPIO_Array[YELLOW_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case GREEN_LED:
+ GPIO_Array[GREEN_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ default:
+ break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"GPIO's bit map correspond to LED :0x%X",Adapter->gpioBitMap);
- return Status;
+
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "GPIO's bit map correspond to LED :0x%X", Adapter->gpioBitMap);
+ return Status;
}
-static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread)
+static int ReadConfigFileStructure(PMINI_ADAPTER Adapter,
+ BOOLEAN *bEnableThread)
{
int Status = STATUS_SUCCESS;
- UCHAR GPIO_Array[NUM_OF_LEDS+1]; /*Array to store GPIO numbers from EEPROM*/
+ /* Array to store GPIO numbers from EEPROM */
+ UCHAR GPIO_Array[NUM_OF_LEDS+1];
UINT uiIndex = 0;
UINT uiNum_of_LED_Type = 0;
PUCHAR puCFGData = NULL;
UCHAR bData = 0;
memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
- if(!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams))
- {
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Target Params not Avail.\n");
+ if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Target Params not Avail.\n");
return -ENOENT;
}
- /*Populate GPIO_Array with GPIO numbers for LED functions*/
- /*Read the GPIO numbers from EEPROM*/
+ /* Populate GPIO_Array with GPIO numbers for LED functions */
+ /* Read the GPIO numbers from EEPROM */
Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
- if(Status == STATUS_IMAGE_CHECKSUM_MISMATCH)
- {
+ if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
*bEnableThread = FALSE;
return STATUS_SUCCESS;
- }
- else if(Status)
- {
+ } else if (Status) {
*bEnableThread = FALSE;
return Status;
}
- /*
- * CONFIG file read successfully. Deallocate the memory of
- * uiFileNameBufferSize
- */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Config file read successfully\n");
+
+ /*
+ * CONFIG file read successfully. Deallocate the memory of
+ * uiFileNameBufferSize
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Config file read successfully\n");
puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
/*
- * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
- * will have the information of LED type, LED on state for different
- * driver state and LED blink state.
- */
+ * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
+ * will have the information of LED type, LED on state for different
+ * driver state and LED blink state.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
bData = *puCFGData;
- /*Check Bit 8 for polarity. If it is set, polarity is reverse polarity*/
- if(bData & 0x80)
- {
+ /*
+ * Check Bit 8 for polarity. If it is set,
+ * polarity is reverse polarity
+ */
+ if (bData & 0x80) {
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 0;
- /*unset the bit 8*/
+ /* unset the bit 8 */
bData = bData & 0x7f;
}
Adapter->LEDInfo.LEDState[uiIndex].LED_Type = bData;
- if(bData <= NUM_OF_LEDS)
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = GPIO_Array[bData];
+ if (bData <= NUM_OF_LEDS)
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ GPIO_Array[bData];
else
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = DISABLE_GPIO_NUM;
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ DISABLE_GPIO_NUM;
puCFGData++;
bData = *puCFGData;
Adapter->LEDInfo.LEDState[uiIndex].LED_On_State = bData;
puCFGData++;
bData = *puCFGData;
- Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State= bData;
+ Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State = bData;
puCFGData++;
}
- /*Check if all the LED settings are disabled. If it is disabled, dont launch the LED control thread.*/
- for(uiIndex = 0; uiIndex<NUM_OF_LEDS; uiIndex++)
- {
- if((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
- (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
+ /*
+ * Check if all the LED settings are disabled. If it is disabled,
+ * dont launch the LED control thread.
+ */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if ((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
+ (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
(Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0))
uiNum_of_LED_Type++;
}
- if(uiNum_of_LED_Type >= NUM_OF_LEDS)
+ if (uiNum_of_LED_Type >= NUM_OF_LEDS)
*bEnableThread = FALSE;
return Status;
}
-//--------------------------------------------------------------------------
-// Procedure: LedGpioInit
-//
-// Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode and make the
-// initial state to be OFF.
-//
-// Arguments:
-// Adapter - Pointer to MINI_ADAPTER structure.
-//
-// Returns: VOID
-//
-//-----------------------------------------------------------------------------
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: LedGpioInit
+ *
+ * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
+ * and make the initial state to be OFF.
+ *
+ * Arguments:
+ * Adapter - Pointer to MINI_ADAPTER structure.
+ *
+ * Returns: VOID
+ *
+ * -----------------------------------------------------------------------------
+ */
static VOID LedGpioInit(PMINI_ADAPTER Adapter)
{
UINT uiResetValue = 0;
UINT uiIndex = 0;
/* Set all LED GPIO Mode to output mode */
- if(rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) <0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: RDM Failed\n");
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: RDM Failed\n");
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
+ DISABLE_GPIO_NUM)
uiResetValue |= (1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
- TURN_OFF_LED(1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,uiIndex);
+ TURN_OFF_LED(1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,
+ uiIndex);
}
- if(wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: WRM Failed\n");
+ if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: WRM Failed\n");
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
}
-//-----------------------------------------------------------------------------
-static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx, UCHAR *GPIO_num_rx ,UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,LedEventInfo_t currdriverstate)
+static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx,
+ UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,
+ LedEventInfo_t currdriverstate)
{
UINT uiIndex = 0;
*GPIO_num_tx = DISABLE_GPIO_NUM;
*GPIO_num_rx = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if((currdriverstate == NORMAL_OPERATION)||
- (currdriverstate == IDLEMODE_EXIT)||
- (currdriverstate == FW_DOWNLOAD))
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
- if(*GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ if ((currdriverstate == NORMAL_OPERATION) ||
+ (currdriverstate == IDLEMODE_EXIT) ||
+ (currdriverstate == FW_DOWNLOAD)) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State &
+ currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
+ if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
- }
- else
- {
+ } else {
*GPIO_num_rx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedRxIndex = uiIndex;
}
}
}
- }
- else
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_On_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
+ } else {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_On_State
+ & currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
}
}
}
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
static VOID LEDControlThread(PMINI_ADAPTER Adapter)
{
UINT uiIndex = 0;
UCHAR GPIO_num = 0;
- UCHAR uiLedIndex = 0 ;
+ UCHAR uiLedIndex = 0;
UINT uiResetValue = 0;
LedEventInfo_t currdriverstate = 0;
ulong timeout = 0;
INT Status = 0;
- UCHAR dummyGPIONum = 0;
- UCHAR dummyIndex = 0;
+ UCHAR dummyGPIONum = 0;
+ UCHAR dummyIndex = 0;
- //currdriverstate = Adapter->DriverState;
+ /* currdriverstate = Adapter->DriverState; */
Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE;
- /*Wait till event is triggered*/
- //wait_event(Adapter->LEDInfo.notify_led_event,
- // currdriverstate!= Adapter->DriverState);
+ /*
+ * Wait till event is triggered
+ *
+ * wait_event(Adapter->LEDInfo.notify_led_event,
+ * currdriverstate!= Adapter->DriverState);
+ */
- GPIO_num = DISABLE_GPIO_NUM ;
+ GPIO_num = DISABLE_GPIO_NUM;
- while(TRUE)
- {
- /*Wait till event is triggered*/
- if( (GPIO_num == DISABLE_GPIO_NUM)
+ while (TRUE) {
+ /* Wait till event is triggered */
+ if ((GPIO_num == DISABLE_GPIO_NUM)
||
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != LOWPOWER_MODE_ENTER))
- ||
- (currdriverstate == LED_THREAD_INACTIVE) )
- {
- Status = wait_event_interruptible(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop());
- }
-
- if(kthread_should_stop() || Adapter->device_removed )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- return ;//STATUS_FAILURE;
+ ((currdriverstate != FW_DOWNLOAD) &&
+ (currdriverstate != NORMAL_OPERATION) &&
+ (currdriverstate != LOWPOWER_MODE_ENTER))
+ ||
+ (currdriverstate == LED_THREAD_INACTIVE))
+ Status = wait_event_interruptible(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop());
+
+ if (kthread_should_stop() || Adapter->device_removed) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
+ return; /* STATUS_FAILURE; */
}
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- }
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
- if(Adapter->LEDInfo.bLedInitDone == FALSE)
- {
+ if (Adapter->LEDInfo.bLedInitDone == FALSE) {
LedGpioInit(Adapter);
Adapter->LEDInfo.bLedInitDone = TRUE;
}
- switch(Adapter->DriverState)
- {
- case DRIVER_INIT:
- {
- currdriverstate = DRIVER_INIT;//Adapter->DriverState;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
+ switch (Adapter->DriverState) {
+ case DRIVER_INIT:
+ currdriverstate = DRIVER_INIT;
+ /* Adapter->DriverState; */
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
break;
- case FW_DOWNLOAD:
- {
- //BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FW_DN_DONE called\n");
- currdriverstate = FW_DOWNLOAD;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
-
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- timeout = 50;
- LED_Blink(Adapter, 1<<GPIO_num, uiLedIndex, timeout, -1,currdriverstate);
- }
+ case FW_DOWNLOAD:
+ /*
+ * BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ * LED_DUMP_INFO, DBG_LVL_ALL,
+ * "LED Thread: FW_DN_DONE called\n");
+ */
+ currdriverstate = FW_DOWNLOAD;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM) {
+ timeout = 50;
+ LED_Blink(Adapter, 1 << GPIO_num, uiLedIndex,
+ timeout, -1, currdriverstate);
}
break;
- case FW_DOWNLOAD_DONE:
- {
- currdriverstate = FW_DOWNLOAD_DONE;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case FW_DOWNLOAD_DONE:
+ currdriverstate = FW_DOWNLOAD_DONE;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case SHUTDOWN_EXIT:
- //no break, continue to NO_NETWORK_ENTRY state as well.
-
- case NO_NETWORK_ENTRY:
- {
- currdriverstate = NO_NETWORK_ENTRY;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex,&dummyGPIONum,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case SHUTDOWN_EXIT:
+ /*
+ * no break, continue to NO_NETWORK_ENTRY
+ * state as well.
+ */
+ case NO_NETWORK_ENTRY:
+ currdriverstate = NO_NETWORK_ENTRY;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyGPIONum, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case NORMAL_OPERATION:
+ case NORMAL_OPERATION:
{
UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
UCHAR uiLEDTx = 0;
UCHAR uiLEDRx = 0;
currdriverstate = NORMAL_OPERATION;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
-
- BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiLEDTx,&uiLEDRx,currdriverstate);
- if((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM))
- {
- GPIO_num = DISABLE_GPIO_NUM ;
- }
- else
- {
- /*If single LED is selected, use same for both Tx and Rx*/
- if(GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx,
+ &GPIO_num_rx, &uiLEDTx, &uiLEDRx,
+ currdriverstate);
+ if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
+ (GPIO_num_rx ==
+ DISABLE_GPIO_NUM)) {
+ GPIO_num = DISABLE_GPIO_NUM;
+ } else {
+ /*
+ * If single LED is selected, use same
+ * for both Tx and Rx
+ */
+ if (GPIO_num_tx == DISABLE_GPIO_NUM) {
GPIO_num_tx = GPIO_num_rx;
uiLEDTx = uiLEDRx;
- }
- else if(GPIO_num_rx == DISABLE_GPIO_NUM)
- {
+ } else if (GPIO_num_rx ==
+ DISABLE_GPIO_NUM) {
GPIO_num_rx = GPIO_num_tx;
uiLEDRx = uiLEDTx;
}
- /*Blink the LED in proportionate to Tx and Rx transmissions.*/
- LED_Proportional_Blink(Adapter, GPIO_num_tx, uiLEDTx, GPIO_num_rx, uiLEDRx,currdriverstate);
+ /*
+ * Blink the LED in proportionate
+ * to Tx and Rx transmissions.
+ */
+ LED_Proportional_Blink(Adapter,
+ GPIO_num_tx, uiLEDTx,
+ GPIO_num_rx, uiLEDRx,
+ currdriverstate);
}
}
break;
- case LOWPOWER_MODE_ENTER:
- {
- currdriverstate = LOWPOWER_MODE_ENTER;
- if( DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING == Adapter->ulPowerSaveMode)
- {
- /* Turn OFF all the LED */
- uiResetValue = 0;
- for(uiIndex =0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
-
+ case LOWPOWER_MODE_ENTER:
+ currdriverstate = LOWPOWER_MODE_ENTER;
+ if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
+ Adapter->ulPowerSaveMode) {
+ /* Turn OFF all the LED */
+ uiResetValue = 0;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
- /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- Adapter->LEDInfo.bLedInitDone = FALSE;
- Adapter->LEDInfo.bIdle_led_off = TRUE;
- wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- }
- case IDLEMODE_CONTINUE:
- {
- currdriverstate = IDLEMODE_CONTINUE;
- GPIO_num = DISABLE_GPIO_NUM;
+
}
+ /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = TRUE;
+ wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case IDLEMODE_EXIT:
- {
- }
+ case IDLEMODE_CONTINUE:
+ currdriverstate = IDLEMODE_CONTINUE;
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case DRIVER_HALT:
- {
- currdriverstate = DRIVER_HALT;
- GPIO_num = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
- //Adapter->DriverState = DRIVER_INIT;
+ case IDLEMODE_EXIT:
+ break;
+ case DRIVER_HALT:
+ currdriverstate = DRIVER_HALT;
+ GPIO_num = DISABLE_GPIO_NUM;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
+ /* Adapter->DriverState = DRIVER_INIT; */
break;
- case LED_THREAD_INACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"InActivating LED thread...");
- currdriverstate = LED_THREAD_INACTIVE;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_INACTIVELY ;
- Adapter->LEDInfo.bLedInitDone = FALSE ;
- //disable ALL LED
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
+ case LED_THREAD_INACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "InActivating LED thread...");
+ currdriverstate = LED_THREAD_INACTIVE;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_INACTIVELY;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ /* disable ALL LED */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
break;
- case LED_THREAD_ACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"Activating LED thread again...");
- if(Adapter->LinkUpStatus == FALSE)
- Adapter->DriverState = NO_NETWORK_ENTRY;
- else
- Adapter->DriverState = NORMAL_OPERATION;
+ case LED_THREAD_ACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Activating LED thread again...");
+ if (Adapter->LinkUpStatus == FALSE)
+ Adapter->DriverState = NO_NETWORK_ENTRY;
+ else
+ Adapter->DriverState = NORMAL_OPERATION;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY ;
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ break;
+ /* return; */
+ default:
break;
- //return;
- default:
- break;
}
}
Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
@@ -839,49 +863,54 @@ int InitLedSettings(PMINI_ADAPTER Adapter)
BOOLEAN bEnableThread = TRUE;
UCHAR uiIndex = 0;
- /*Initially set BitPolarity to normal polarity. The bit 8 of LED type
- * is used to change the polarity of the LED.*/
+ /*
+ * Initially set BitPolarity to normal polarity. The bit 8 of LED type
+ * is used to change the polarity of the LED.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
- }
- /*Read the LED settings of CONFIG file and map it to GPIO numbers in EEPROM*/
+ /*
+ * Read the LED settings of CONFIG file and map it
+ * to GPIO numbers in EEPROM
+ */
Status = ReadConfigFileStructure(Adapter, &bEnableThread);
- if(STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FAILED in ReadConfigFileStructure\n");
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: FAILED in ReadConfigFileStructure\n");
return Status;
}
- if(Adapter->LEDInfo.led_thread_running)
- {
- if(bEnableThread)
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (bEnableThread) {
;
- else
- {
+ } else {
Adapter->DriverState = DRIVER_HALT;
wake_up(&Adapter->LEDInfo.notify_led_event);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
}
- }
-
- else if(bEnableThread)
- {
- /*Create secondary thread to handle the LEDs*/
+ } else if (bEnableThread) {
+ /* Create secondary thread to handle the LEDs */
init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
- Adapter->LEDInfo.led_cntrl_threadid = kthread_run((int (*)(void *))
- LEDControlThread, Adapter, "led_control_thread");
- if(IS_ERR(Adapter->LEDInfo.led_cntrl_threadid))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Not able to spawn Kernel Thread\n");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.led_cntrl_threadid =
+ kthread_run((int (*)(void *)) LEDControlThread,
+ Adapter, "led_control_thread");
+ if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Not able to spawn Kernel Thread\n");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
+ }
}
return Status;
}
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 3de0daf5edb..7d703cb3c5e 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -78,7 +78,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
{
value=0;
uiStatus = 0 ;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got removed hence exiting....");
@@ -93,7 +93,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
wrmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
uiData = (UCHAR)value;
break;
@@ -102,8 +102,8 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
dwRetries-- ;
if ( dwRetries == 0 )
{
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"0x3004 = %x 0x3008 = %x, retries = %d failed.\n",value,value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return uiData;
}
@@ -158,7 +158,7 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got Removed.hence exiting from loop...");
@@ -202,8 +202,8 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
value=0;
value1=0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n", dwNumWords, value, value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return STATUS_FAILURE;
}
@@ -217,22 +217,22 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
pvalue = (PUCHAR)(pdwData + dwIndex);
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[0] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[1] = value;
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[2] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[3] = value;
}
@@ -445,6 +445,7 @@ static INT BeceemFlashBulkRead(
UINT uiBytesToRead = uiNumBytes;
INT Status = 0;
UINT uiPartOffset = 0;
+ int bytes;
if(Adapter->device_removed )
{
@@ -469,9 +470,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MAX_RW_SIZE - (uiOffset%MAX_RW_SIZE);
uiBytesToRead = MIN(uiNumBytes,uiBytesToRead);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
Adapter->SelectedChip = RESET_CHIP_SELECT;
return Status;
}
@@ -488,9 +489,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MIN(uiNumBytes,MAX_RW_SIZE);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
break;
}
@@ -613,6 +614,7 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
UINT iIndex = 0, iRetries = 0;
UINT uiStatus = 0;
UINT value;
+ int bytes;
for(iIndex=0;iIndex<numOfSectors;iIndex++)
{
@@ -632,10 +634,11 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
return STATUS_FAILURE;
}
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries++;
//After every try lets make the CPU free for 10 ms. generally time taken by the
@@ -679,6 +682,7 @@ static INT flashByteWrite(
UINT value;
ULONG ulData = *(PUCHAR)pData;
+ int bytes;
//
// need not write 0xFF because write requires an erase and erase will
@@ -720,10 +724,11 @@ static INT flashByteWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
if( iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
@@ -771,6 +776,7 @@ static INT flashWrite(
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
// make whole sector 0xFFFFFFFF.
@@ -803,10 +809,11 @@ static INT flashWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -849,6 +856,7 @@ static INT flashByteWriteStatus(
INT iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; //3
ULONG ulData = *(PUCHAR)pData;
UINT value;
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -891,10 +899,11 @@ static INT flashByteWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -935,6 +944,7 @@ static INT flashWriteStatus(
//UINT uiReadBack = 0;
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -967,10 +977,11 @@ static INT flashWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
//this will ensure that in there will be no changes in the current path.
@@ -1841,7 +1852,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
* What we are checking if the previous write has completed, and this
* may take time. We should wait till the Empty bit is set. */
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
while ( ( uiStatus & EEPROM_WRITE_QUEUE_EMPTY ) == 0 )
{
uiRetries--;
@@ -1855,7 +1866,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
msleep(1);
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem got removed hence exiting from loop....");
@@ -2500,7 +2511,7 @@ static ULONG BcmReadFlashRDID(PMINI_ADAPTER Adapter)
// Read SPI READQ REG. The output will be WWXXYYZZ.
// The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored.
//
- rdmalt(Adapter, FLASH_SPI_READQ_REG,(PUINT)&ulRDID, sizeof(ulRDID));
+ rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID));
return (ulRDID >>8);
@@ -4735,8 +4746,8 @@ static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
Adapter->SelectedChip = ChipNum ;
//bit[13..12] will select the appropriate chip
- rdmalt(Adapter,FLASH_CONFIG_REG, &FlashConfig, 4);
- rdmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
+ rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
+ rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
{
switch(ChipNum)
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
index 2d8b8a367b3..14876388b87 100644
--- a/drivers/staging/bcm/target_params.h
+++ b/drivers/staging/bcm/target_params.h
@@ -72,8 +72,8 @@ typedef struct _TARGET_PARAMS
// removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
//BAMC Related Parameters
- //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
- //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
+ //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
+ //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
B_UINT32 m_u32BandAMCEnable;
} stTargetParams,TARGET_PARAMS,*PTARGET_PARAMS, STARGETPARAMS, *PSTARGETPARAMS;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd..0d18d80bcd2 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
@@ -1885,11 +1924,17 @@ ok:
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
@@ -2429,18 +2479,18 @@ static ssize_t store_max_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2490,19 +2540,19 @@ static ssize_t store_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
- return -EINVAL;
- new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_size = new_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2556,18 +2606,18 @@ static ssize_t store_max_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
@@ -2617,19 +2667,19 @@ static ssize_t store_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
- return -EINVAL;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 6fb7594319c..ca5bd9b8704 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -145,78 +145,77 @@ void fpu_end(void)
static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = {
#ifdef CONFIG_APCI_3120
- {APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x818D)},
#endif
#ifdef CONFIG_APCI_1032
- {APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1032_BOARD_VENDOR_ID, 0x1003)},
#endif
#ifdef CONFIG_APCI_1516
- {APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1516_BOARD_VENDOR_ID, 0x1001)},
#endif
#ifdef CONFIG_APCI_2016
- {APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2016_BOARD_VENDOR_ID, 0x1002)},
#endif
#ifdef CONFIG_APCI_2032
- {APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2032_BOARD_VENDOR_ID, 0x1004)},
#endif
#ifdef CONFIG_APCI_2200
- {APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2200_BOARD_VENDOR_ID, 0x1005)},
#endif
#ifdef CONFIG_APCI_1564
- {APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1564_BOARD_VENDOR_ID, 0x1006)},
#endif
#ifdef CONFIG_APCI_1500
- {APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1500_BOARD_VENDOR_ID, 0x80fc)},
#endif
#ifdef CONFIG_APCI_3001
- {APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x828D)},
#endif
#ifdef CONFIG_APCI_3501
- {APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3501_BOARD_VENDOR_ID, 0x3001)},
#endif
#ifdef CONFIG_APCI_035
- {APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI035_BOARD_VENDOR_ID, 0x0300)},
#endif
#ifdef CONFIG_APCI_3200
- {APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3000)},
#endif
#ifdef CONFIG_APCI_3300
- {APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3007)},
#endif
#ifdef CONFIG_APCI_1710
- {APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID)},
#endif
#ifdef CONFIG_APCI_16XX
- {0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1009)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x100A)},
#endif
#ifdef CONFIG_APCI_3XXX
- {0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3010)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3013)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3014)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3015)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3016)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3017)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3018)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3019)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301A)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3020)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3021)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3022)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3023)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3002)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3004)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3024)},
#endif
{0}
};
@@ -1019,7 +1018,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_16XX
{"apci1648",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x1009,
128,
0,
@@ -1075,7 +1074,7 @@ static const struct addi_board boardtypes[] = {
i_APCI16XX_InsnBitsWriteTTLIO},
{"apci1696",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x100A,
128,
0,
@@ -1132,7 +1131,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_3XXX
{"apci3000-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3010,
256,
256,
@@ -1188,7 +1187,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300F,
256,
256,
@@ -1244,7 +1243,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300E,
256,
256,
@@ -1300,7 +1299,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3013,
256,
256,
@@ -1356,7 +1355,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3014,
256,
256,
@@ -1412,7 +1411,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3015,
256,
256,
@@ -1468,7 +1467,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3016,
256,
256,
@@ -1524,7 +1523,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3017,
256,
256,
@@ -1580,7 +1579,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3018,
256,
256,
@@ -1636,7 +1635,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3019,
256,
256,
@@ -1692,7 +1691,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301A,
256,
256,
@@ -1748,7 +1747,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301B,
256,
256,
@@ -1804,7 +1803,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301C,
256,
256,
@@ -1860,7 +1859,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301D,
256,
256,
@@ -1916,7 +1915,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301E,
256,
256,
@@ -1972,7 +1971,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301F,
256,
256,
@@ -2028,7 +2027,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3020,
256,
256,
@@ -2084,7 +2083,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3021,
256,
256,
@@ -2140,7 +2139,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3022,
256,
256,
@@ -2196,7 +2195,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3023,
256,
256,
@@ -2252,7 +2251,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3003",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300B,
256,
256,
@@ -2307,7 +2306,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3002,
256,
256,
@@ -2362,7 +2361,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3003,
256,
256,
@@ -2417,7 +2416,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3004,
256,
256,
@@ -2472,7 +2471,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3500",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3024,
256,
256,
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index c75a1a1fd77..f9545b064ea 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3598,7 +3598,7 @@ int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
n = comedi_buf_write_alloc(s->async,
(7 + 12) * sizeof(unsigned int));
- /* If not enougth memory available, event is set to Comedi Buffer Errror */
+ /* If not enough memory available, event is set to Comedi Buffer Error */
if (n > ((7 + 12) * sizeof(unsigned int))) {
printk("\ncomedi_buf_write_alloc n = %i", n);
s->async->events |= COMEDI_CB_ERROR;
diff --git a/drivers/staging/comedi/drivers/adl_pci7230.c b/drivers/staging/comedi/drivers/adl_pci7230.c
index 72a7258b5b9..20d570554fa 100644
--- a/drivers/staging/comedi/drivers/adl_pci7230.c
+++ b/drivers/staging/comedi/drivers/adl_pci7230.c
@@ -44,15 +44,7 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7230 0x7230
static DEFINE_PCI_DEVICE_TABLE(adl_pci7230_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK,
- PCI_DEVICE_ID_PCI7230,
- PCI_ANY_ID,
- PCI_ANY_ID,
- 0,
- 0,
- 0
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7230) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/adl_pci7296.c b/drivers/staging/comedi/drivers/adl_pci7296.c
index f28fe6bec05..9a2320537bd 100644
--- a/drivers/staging/comedi/drivers/adl_pci7296.c
+++ b/drivers/staging/comedi/drivers/adl_pci7296.c
@@ -49,10 +49,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7296 0x7296
static DEFINE_PCI_DEVICE_TABLE(adl_pci7296_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7296_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci7432.c b/drivers/staging/comedi/drivers/adl_pci7432.c
index 262da7b29b2..86ee2197604 100644
--- a/drivers/staging/comedi/drivers/adl_pci7432.c
+++ b/drivers/staging/comedi/drivers/adl_pci7432.c
@@ -44,10 +44,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7432 0x7432
static DEFINE_PCI_DEVICE_TABLE(adl_pci7432_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7432_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 767a594935c..3b83d65bc1b 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -57,10 +57,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI8164 0x8164
static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index fc48bae42ab..2a9bd88a4ab 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -311,10 +311,8 @@ static const struct comedi_lrange pci9111_hr_ai_range = {
};
static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
- { PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- /* { PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- * 0, 0, 0 }, */
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
+ /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index da2b75b15d4..8318c82a555 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1382,16 +1382,14 @@ static int pci1710_attach(struct comedi_device *dev,
int i;
int board_index;
- printk("comedi%d: adv_pci1710: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: adv_pci1710:\n", dev->minor);
opt_bus = it->options[0];
opt_slot = it->options[1];
ret = alloc_private(dev, sizeof(struct pci1710_private));
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return -ENOMEM;
- }
/* Look for matching PCI device */
errstr = "not found!";
@@ -1436,10 +1434,10 @@ static int pci1710_attach(struct comedi_device *dev,
if (!pcidev) {
if (opt_bus || opt_slot) {
- printk(" - Card at b:s %d:%d %s\n",
- opt_bus, opt_slot, errstr);
+ dev_err(dev->hw_dev, "- Card at b:s %d:%d %s\n",
+ opt_bus, opt_slot, errstr);
} else {
- printk(" - Card %s\n", errstr);
+ dev_err(dev->hw_dev, "- Card %s\n", errstr);
}
return -EIO;
}
@@ -1450,8 +1448,8 @@ static int pci1710_attach(struct comedi_device *dev,
irq = pcidev->irq;
iobase = pci_resource_start(pcidev, 2);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx", pci_bus, pci_slot, pci_func,
- iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n", pci_bus, pci_slot,
+ pci_func, iobase);
dev->iobase = iobase;
@@ -1471,10 +1469,8 @@ static int pci1710_attach(struct comedi_device *dev,
n_subdevices++;
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return ret;
- }
pci1710_reset(dev);
@@ -1483,24 +1479,20 @@ static int pci1710_attach(struct comedi_device *dev,
if (request_irq(irq, interrupt_service_pci1710,
IRQF_SHARED, "Advantech PCI-1710",
dev)) {
- printk
- (", unable to allocate IRQ %d, DISABLING IT",
- irq);
+ dev_dbg(dev->hw_dev, "unable to allocate IRQ %d, DISABLING IT",
+ irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ dev_dbg(dev->hw_dev, "irq=%u", irq);
}
} else {
- printk(", IRQ disabled");
+ dev_dbg(dev->hw_dev, "IRQ disabled");
}
} else {
irq = 0;
}
dev->irq = irq;
-
- printk(".\n");
-
subdev = 0;
if (this_board->n_aichan) {
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 69334f6f64e..537e5853427 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1106,13 +1106,10 @@ static int pci_dio_attach(struct comedi_device *dev,
unsigned long iobase;
struct pci_dev *pcidev = NULL;
- printk("comedi%d: adv_pci_dio: ", dev->minor);
ret = alloc_private(dev, sizeof(struct pci_dio_private));
- if (ret < 0) {
- printk(", Error: Cann't allocate private memory!\n");
+ if (ret < 0)
return -ENOMEM;
- }
for_each_pci_dev(pcidev) {
/* loop through cards supported by this driver */
@@ -1140,19 +1137,18 @@ static int pci_dio_attach(struct comedi_device *dev,
}
if (!dev->board_ptr) {
- printk(", Error: Requested type of the card was not found!\n");
+ dev_err(dev->hw_dev, "Error: Requested type of the card was not found!\n");
return -EIO;
}
if (comedi_pci_enable(pcidev, driver_pci_dio.driver_name)) {
- printk
- (", Error: Can't enable PCI device and request regions!\n");
+ dev_err(dev->hw_dev, "Error: Can't enable PCI device and request regions!\n");
return -EIO;
}
iobase = pci_resource_start(pcidev, this_board->main_pci_region);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx",
- pcidev->bus->number, PCI_SLOT(pcidev->devfn),
- PCI_FUNC(pcidev->devfn), iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n",
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn),
+ PCI_FUNC(pcidev->devfn), iobase);
dev->iobase = iobase;
dev->board_name = this_board->name;
@@ -1177,15 +1173,10 @@ static int pci_dio_attach(struct comedi_device *dev,
}
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(", Error: Cann't allocate subdevice memory!\n");
+ if (ret < 0)
return ret;
- }
-
- printk(".\n");
subdev = 0;
-
for (i = 0; i < MAX_DI_SUBDEVS; i++)
if (this_board->sdi[i].chans) {
s = dev->subdevices + subdev;
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 93bbe4ec318..566cc441145 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -421,12 +421,9 @@ static const struct dio200_layout_struct dio200_layouts[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, dio200_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 48246cd50d4..7972cadd403 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -134,10 +134,8 @@ static const struct pc236_board pc236_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc236_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 8a338807909..191ac0d23ce 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -101,10 +101,8 @@ static const struct pc263_board pc263_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc263_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc263_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 1b5ba1c2725..b278917cec2 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -384,12 +384,9 @@ static const struct pci224_board pci224_boards[] = {
*/
static DEFINE_PCI_DEVICE_TABLE(pci224_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci224_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 7edeb1103dc..538979551c8 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -501,12 +501,9 @@ static const struct pci230_board pci230_boards[] = {
};
static DEFINE_PCI_DEVICE_TABLE(pci230_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci230_pci_table);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index e171c56112d..49404f49f7b 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -99,7 +99,7 @@ static struct comedi_driver driver_das16cs = {
.detach = das16cs_detach,
};
-static struct pcmcia_device *cur_dev = NULL;
+static struct pcmcia_device *cur_dev;
static const struct comedi_lrange das16cs_ai_range = { 4, {
RANGE(-10, 10),
@@ -150,7 +150,7 @@ static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
return das16cs_boards + i;
}
- printk("unknown board!\n");
+ dev_dbg(dev->hw_dev, "unknown board!\n");
return NULL;
}
@@ -163,20 +163,19 @@ static int das16cs_attach(struct comedi_device *dev,
int ret;
int i;
- printk("comedi%d: cb_das16_cs: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: cb_das16_cs: attached\n", dev->minor);
link = cur_dev; /* XXX hack */
if (!link)
return -EIO;
dev->iobase = link->resource[0]->start;
- printk("I/O base=0x%04lx ", dev->iobase);
+ dev_dbg(dev->hw_dev, "I/O base=0x%04lx\n", dev->iobase);
- printk("fingerprint:\n");
+ dev_dbg(dev->hw_dev, "fingerprint:\n");
for (i = 0; i < 48; i += 2)
- printk("%04x ", inw(dev->iobase + i));
+ dev_dbg(dev->hw_dev, "%04x\n", inw(dev->iobase + i));
- printk("\n");
ret = request_irq(link->irq, das16cs_interrupt,
IRQF_SHARED, "cb_das16_cs", dev);
@@ -185,7 +184,7 @@ static int das16cs_attach(struct comedi_device *dev,
dev->irq = link->irq;
- printk("irq=%u ", dev->irq);
+ dev_dbg(dev->hw_dev, "irq=%u\n", dev->irq);
dev->board_ptr = das16cs_probe(dev, link);
if (!dev->board_ptr)
@@ -252,14 +251,13 @@ static int das16cs_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- printk("attached\n");
return 1;
}
static int das16cs_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16cs: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: das16cs: remove\n", dev->minor);
if (dev->irq)
free_irq(dev->irq, dev);
@@ -312,7 +310,7 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
break;
}
if (to == TIMEOUT) {
- printk("cb_das16_cs: ai timeout\n");
+ dev_dbg(dev->hw_dev, "cb_das16_cs: ai timeout\n");
return -ETIME;
}
data[i] = (unsigned short)inw(dev->iobase + 0);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 61968a505f2..7e4ffcfdac6 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -565,8 +565,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
int index;
int i;
- printk("comedi%d: cb_pcidas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -576,7 +574,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -600,20 +597,20 @@ static int cb_pcidas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcidas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/*
* Enable PCI device and reserve I/O ports.
*/
if (comedi_pci_enable(pcidev, "cb_pcidas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
/*
@@ -639,7 +636,8 @@ found:
/* get irq */
if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt,
IRQF_SHARED, "cb_pcidas", dev)) {
- printk(" unable to allocate irq %d\n", devpriv->pci_dev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %d\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
@@ -768,7 +766,6 @@ found:
*/
static int cb_pcidas_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
if (devpriv) {
if (devpriv->s5933_config) {
@@ -776,8 +773,8 @@ static int cb_pcidas_detach(struct comedi_device *dev)
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("detaching, incsr is 0x%x\n",
- inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
+ dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
#endif
}
}
@@ -858,7 +855,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
unsigned int source = data[1];
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_err(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -1279,7 +1277,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
outw(bits, devpriv->control_status + ADCMUX_CONT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to adcmux control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits);
#endif
/* load counters */
@@ -1306,7 +1304,8 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */
}
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable (and clear) interrupts */
outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL,
@@ -1332,7 +1331,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
bits |= BURSTE;
outw(bits, devpriv->control_status + TRIG_CONTSTAT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to trig control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits);
#endif
return 0;
@@ -1549,7 +1548,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->adc_fifo_bits |= DAEMIE | DAHFIE;
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable and clear interrupts */
outw(devpriv->adc_fifo_bits | DAEMI | DAHFI,
@@ -1559,7 +1559,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY;
outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n",
+ devpriv->ao_control_bits);
#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -1587,8 +1588,9 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("intcsr 0x%x\n", s5933_status);
- printk("mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
+ dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status);
+ dev_dbg(dev->hw_dev, "mbef 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
#endif
if ((INTCSR_INTR_ASSERTED & s5933_status) == 0)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 1e324198996..c9e8c478576 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1739,8 +1739,6 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
uint32_t local_range, local_decode;
int retval;
- printk("comedi%d: cb_pcidas64\n", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -1781,12 +1779,11 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- printk("Found %s on bus %i, slot %i\n", board(dev)->name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", board(dev)->name,
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn));
if (comedi_pci_enable(pcidev, driver_cb_pcidas.driver_name)) {
- printk(KERN_WARNING
- " failed to enable PCI device and request regions\n");
+ dev_warn(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
pci_set_master(pcidev);
@@ -1814,7 +1811,7 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!priv(dev)->plx9080_iobase || !priv(dev)->main_iobase
|| !priv(dev)->dio_counter_iobase) {
- printk(" failed to remap io memory\n");
+ dev_warn(dev->hw_dev, "failed to remap io memory\n");
return -ENOMEM;
}
@@ -1850,17 +1847,19 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
priv(dev)->hw_revision =
hw_revision(dev, readw(priv(dev)->main_iobase + HW_STATUS_REG));
- printk(" stc hardware revision %i\n", priv(dev)->hw_revision);
+ dev_dbg(dev->hw_dev, "stc hardware revision %i\n",
+ priv(dev)->hw_revision);
init_plx9080(dev);
init_stc_registers(dev);
/* get irq */
if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
"cb_pcidas64", dev)) {
- printk(" unable to allocate irq %u\n", pcidev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ pcidev->irq);
return -EINVAL;
}
dev->irq = pcidev->irq;
- printk(" irq %u\n", dev->irq);
+ dev_dbg(dev->hw_dev, "irq %u\n", dev->irq);
retval = setup_subdevices(dev);
if (retval < 0)
@@ -1882,8 +1881,6 @@ static int detach(struct comedi_device *dev)
{
unsigned int i;
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
-
if (dev->irq)
free_irq(dev->irq, dev);
if (priv(dev)) {
@@ -2093,7 +2090,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
else
num_calibration_sources = 8;
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_dbg(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -2924,7 +2922,7 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
}
if (num_samples < 0) {
- printk(" cb_pcidas64: bug! num_samples < 0\n");
+ dev_err(dev->hw_dev, "cb_pcidas64: bug! num_samples < 0\n");
break;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 49102b3a6c4..abba220a767 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -282,7 +282,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
struct pci_dev *pcidev = NULL;
int index;
- printk("comedi%d: cb_pcidda: ", dev->minor);
/*
* Allocate the private structure area.
@@ -293,7 +292,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CB) {
@@ -312,22 +310,21 @@ static int cb_pcidda_attach(struct comedi_device *dev,
}
}
if (!pcidev) {
- printk
- ("Not a ComputerBoards/MeasurementComputing card on requested position\n");
+ dev_err(dev->hw_dev, "Not a ComputerBoards/MeasurementComputing card on requested position\n");
return -EIO;
}
found:
devpriv->pci_dev = pcidev;
dev->board_ptr = cb_pcidda_boards + index;
/* "thisboard" macro can be used from here. */
- printk("Found %s at requested position\n", thisboard->name);
+ dev_dbg(dev->hw_dev, "Found %s at requested position\n",
+ thisboard->name);
/*
* Enable PCI device and request regions.
*/
if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidda: failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "cb_pcidda: failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -377,12 +374,11 @@ found:
s = dev->subdevices + 2;
subdev_8255_init(dev, s, NULL, devpriv->digitalio + PORT2A);
- printk(" eeprom:");
+ dev_dbg(dev->hw_dev, "eeprom:\n");
for (index = 0; index < EEPROM_SIZE; index++) {
devpriv->eeprom_data[index] = cb_pcidda_read_eeprom(dev, index);
- printk(" %i:0x%x ", index, devpriv->eeprom_data[index]);
+ dev_dbg(dev->hw_dev, "%i:0x%x\n", index, devpriv->eeprom_data[index]);
}
- printk("\n");
/* set calibrations dacs */
for (index = 0; index < thisboard->ao_chans; index++)
@@ -417,8 +413,6 @@ static int cb_pcidda_detach(struct comedi_device *dev)
subdev_8255_cleanup(dev, dev->subdevices + 2);
}
- printk("comedi%d: cb_pcidda: remove\n", dev->minor);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 79477a595ef..8f3215239a1 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -184,8 +184,6 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int index;
int i;
- printk("comedi%d: cb_pcidio: \n", dev->minor);
-
/*
* Allocate the private structure area. alloc_private() is a
* convenient macro defined in comedidev.h.
@@ -223,8 +221,7 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
@@ -236,14 +233,12 @@ found:
dev->board_name = thisboard->name;
devpriv->pci_dev = pcidev;
- printk("Found %s on bus %i, slot %i\n", thisboard->name,
- devpriv->pci_dev->bus->number,
- PCI_SLOT(devpriv->pci_dev->devfn));
- if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidio: failed to enable PCI device and request regions\n");
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", thisboard->name,
+ devpriv->pci_dev->bus->number,
+ PCI_SLOT(devpriv->pci_dev->devfn));
+ if (comedi_pci_enable(pcidev, thisboard->name))
return -EIO;
- }
+
devpriv->dio_reg_base
=
pci_resource_start(devpriv->pci_dev,
@@ -259,11 +254,10 @@ found:
for (i = 0; i < thisboard->n_8255; i++) {
subdev_8255_init(dev, dev->subdevices + i,
NULL, devpriv->dio_reg_base + i * 4);
- printk(" subdev %d: base = 0x%lx\n", i,
- devpriv->dio_reg_base + i * 4);
+ dev_dbg(dev->hw_dev, "subdev %d: base = 0x%lx\n", i,
+ devpriv->dio_reg_base + i * 4);
}
- printk("attached\n");
return 1;
}
@@ -277,7 +271,6 @@ found:
*/
static int pcidio_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidio: remove\n", dev->minor);
if (devpriv) {
if (devpriv->pci_dev) {
if (devpriv->dio_reg_base)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index b1b832b65bc..8ba694263bd 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -212,8 +212,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
int index;
/* int i; */
- printk("comedi%d: cb_pcimdas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -223,7 +221,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -248,26 +245,26 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcimdas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcimdas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/* Warn about non-tested features */
switch (thisboard->device_id) {
case 0x56:
break;
default:
- printk("THIS CARD IS UNSUPPORTED.\n"
- "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
+ dev_dbg(dev->hw_dev, "THIS CARD IS UNSUPPORTED.\n"
+ "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
}
if (comedi_pci_enable(pcidev, "cb_pcimdas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -277,13 +274,11 @@ found:
devpriv->BADR3 = pci_resource_start(devpriv->pci_dev, 3);
devpriv->BADR4 = pci_resource_start(devpriv->pci_dev, 4);
-#ifdef CBPCIMDAS_DEBUG
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
-#endif
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
/* Dont support IRQ yet */
/* get irq */
@@ -333,8 +328,6 @@ found:
else
s->type = COMEDI_SUBD_UNUSED;
- printk("attached\n");
-
return 1;
}
@@ -348,16 +341,19 @@ found:
*/
static int cb_pcimdas_detach(struct comedi_device *dev)
{
-#ifdef CBPCIMDAS_DEBUG
if (devpriv) {
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n",
+ devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n",
+ devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n",
+ devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n",
+ devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n",
+ devpriv->BADR4);
}
-#endif
- printk("comedi%d: cb_pcimdas: remove\n", dev->minor);
+
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 8c981a89ab6..40bddfa2222 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -105,7 +105,8 @@ struct board_struct {
int ao_bits;
int dio_chans;
int dio_method;
- int dio_offset; /* how many bytes into the BADR are the DIO ports */
+ /* how many bytes into the BADR are the DIO ports */
+ int dio_offset;
int regs_badrindex; /* IO Region for the control, analog output,
and DIO registers */
int reg_sz; /* number of bytes of registers in io region */
@@ -144,17 +145,18 @@ static const struct board_struct boards[] = {
/* Please add your PCI vendor ID to comedidev.h, and it will be forwarded
* upstream. */
static DEFINE_PCI_DEVICE_TABLE(pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci_table);
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
+/*
+ * this structure is for data unique to this hardware driver. If
+ * several hardware drivers keep similar information in this structure,
+ * feel free to suggest moving the variable to the struct comedi_device
+ * struct.
+ */
struct board_private_struct {
unsigned long registers; /* set by probe */
unsigned long dio_registers;
@@ -335,7 +337,10 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (thisboard->dio_chans) {
switch (thisboard->dio_method) {
case DIO_8255:
- /* this is a straight 8255, so register us with the 8255 driver */
+ /*
+ * this is a straight 8255, so register us with
+ * the 8255 driver
+ */
subdev_8255_init(dev, s, NULL, devpriv->dio_registers);
devpriv->attached_to_8255 = 1;
break;
@@ -436,8 +441,11 @@ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
for (i = 0; i < insn->n; i++) {
inw(devpriv->registers + chan * 2);
- /* should I set data[i] to the result of the actual read on the register
- or the cached unsigned int in devpriv->ao_readback[]? */
+ /*
+ * should I set data[i] to the result of the actual read
+ * on the register or the cached unsigned int in
+ * devpriv->ao_readback[]?
+ */
data[i] = devpriv->ao_readback[chan];
}
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 871f109bcfa..e3659bd6e85 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -57,10 +57,9 @@ static const struct contec_board contec_boards[] = {
#define PCI_DEVICE_ID_PIO1616L 0x8172
static DEFINE_PCI_DEVICE_TABLE(contec_pci_table) = {
- {
- PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, PIO1616L}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L),
+ .driver_data = PIO1616L },
+ {0}
};
MODULE_DEVICE_TABLE(pci, contec_pci_table);
@@ -197,8 +196,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_do_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_do_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
@@ -206,8 +205,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
- printk(" out: %d on %lx\n", s->state,
- dev->iobase + thisboard->out_offs);
+ dev_dbg(dev->hw_dev, "out: %d on %lx\n", s->state,
+ dev->iobase + thisboard->out_offs);
outw(s->state, dev->iobase + thisboard->out_offs);
}
return 2;
@@ -218,8 +217,8 @@ static int contec_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_di_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_di_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 82be77daa7d..e61c6a8f285 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -325,9 +325,8 @@ static const struct daq200_boardtype boardtypes[] = {
#define this_board ((const struct daq200_boardtype *)dev->board_ptr)
static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
- {
- 0x1616, 0x0409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(0x1616, 0x0409) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table);
@@ -430,16 +429,14 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
/* Enable reading from the scanlist FIFO */
fpga->acqControl = DAQBOARD2000_SeqStartScanList;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull) {
+ if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull)
break;
- }
/* udelay(2); */
}
fpga->acqControl = DAQBOARD2000_AdcPacerEnable;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning) {
+ if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning)
break;
- }
/* udelay(2); */
}
for (timeout = 0; timeout < 20; timeout++) {
@@ -465,9 +462,8 @@ static int daqboard2000_ao_insn_read(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
@@ -490,9 +486,8 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
/* fpga->dacControl = (chan + 2) * 0x0010 | 0x0001; udelay(1000); */
fpga->dacSetting[chan] = data[i];
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0) {
+ if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0)
break;
- }
/* udelay(2); */
}
devpriv->ao_readback[chan] = data[i];
@@ -507,7 +502,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
static void daqboard2000_resetLocalBus(struct comedi_device *dev)
{
- printk("daqboard2000_resetLocalBus\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_resetLocalBus\n");
writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
@@ -516,7 +511,7 @@ static void daqboard2000_resetLocalBus(struct comedi_device *dev)
static void daqboard2000_reloadPLX(struct comedi_device *dev)
{
- printk("daqboard2000_reloadPLX\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_reloadPLX\n");
writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
@@ -527,7 +522,7 @@ static void daqboard2000_reloadPLX(struct comedi_device *dev)
static void daqboard2000_pulseProgPin(struct comedi_device *dev)
{
- printk("daqboard2000_pulseProgPin 1\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_pulseProgPin 1\n");
writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
@@ -579,14 +574,14 @@ static int initialize_daqboard2000(struct comedi_device *dev,
secr = readl(devpriv->plx + 0x6c);
if (!(secr & DAQBOARD2000_EEPROM_PRESENT)) {
#ifdef DEBUG_EEPROM
- printk("no serial eeprom\n");
+ dev_dbg(dev->hw_dev, "no serial eeprom\n");
#endif
return -EIO;
}
for (retry = 0; retry < 3; retry++) {
#ifdef DEBUG_EEPROM
- printk("Programming EEPROM try %x\n", retry);
+ dev_dbg(dev->hw_dev, "Programming EEPROM try %x\n", retry);
#endif
daqboard2000_resetLocalBus(dev);
@@ -597,7 +592,8 @@ static int initialize_daqboard2000(struct comedi_device *dev,
if (cpld_array[i] == 0xff
&& cpld_array[i + 1] == 0x20) {
#ifdef DEBUG_EEPROM
- printk("Preamble found at %d\n", i);
+ dev_dbg(dev->hw_dev, "Preamble found at %d\n",
+ i);
#endif
break;
}
@@ -605,13 +601,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
for (; i < len; i += 2) {
int data =
(cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data)) {
+ if (!daqboard2000_writeCPLD(dev, data))
break;
- }
}
if (i >= len) {
#ifdef DEBUG_EEPROM
- printk("Programmed\n");
+ dev_dbg(dev->hw_dev, "Programmed\n");
#endif
daqboard2000_resetLocalBus(dev);
daqboard2000_reloadPLX(dev);
@@ -658,9 +653,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the + reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_PosRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_PosRefDacSelect %d\n", timeout);*/
@@ -668,9 +662,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the - reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_NegRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_NegRefDacSelect %d\n", timeout);*/
@@ -737,15 +730,13 @@ static int daqboard2000_attach(struct comedi_device *dev,
unsigned int aux_len;
int bus, slot;
- printk("comedi%d: daqboard2000:", dev->minor);
-
bus = it->options[0];
slot = it->options[1];
result = alloc_private(dev, sizeof(struct daqboard2000_private));
- if (result < 0) {
+ if (result < 0)
return -ENOMEM;
- }
+
for (card = pci_get_device(0x1616, 0x0409, NULL);
card != NULL; card = pci_get_device(0x1616, 0x0409, card)) {
if (bus || slot) {
@@ -759,10 +750,10 @@ static int daqboard2000_attach(struct comedi_device *dev,
}
if (!card) {
if (bus || slot)
- printk(" no daqboard2000 found at bus/slot: %d/%d\n",
- bus, slot);
+ dev_err(dev->hw_dev, "no daqboard2000 found at bus/slot: %d/%d\n",
+ bus, slot);
else
- printk(" no daqboard2000 found\n");
+ dev_err(dev->hw_dev, "no daqboard2000 found\n");
return -EIO;
} else {
u32 id;
@@ -772,7 +763,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
subsystem_device << 16) | card->subsystem_vendor;
for (i = 0; i < n_boardtypes; i++) {
if (boardtypes[i].id == id) {
- printk(" %s", boardtypes[i].name);
+ dev_dbg(dev->hw_dev, "%s\n",
+ boardtypes[i].name);
dev->board_ptr = boardtypes + i;
}
}
@@ -786,7 +778,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = comedi_pci_enable(card, "daqboard2000");
if (result < 0) {
- printk(" failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
devpriv->got_regions = 1;
@@ -794,9 +786,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
ioremap(pci_resource_start(card, 0), DAQBOARD2000_PLX_SIZE);
devpriv->daq =
ioremap(pci_resource_start(card, 2), DAQBOARD2000_DAQ_SIZE);
- if (!devpriv->plx || !devpriv->daq) {
+ if (!devpriv->plx || !devpriv->daq)
return -ENOMEM;
- }
result = alloc_subdevices(dev, 3);
if (result < 0)
@@ -817,7 +808,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
if (aux_data && aux_len) {
result = initialize_daqboard2000(dev, aux_data, aux_len);
} else {
- printk("no FPGA initialization code, aborting\n");
+ dev_dbg(dev->hw_dev, "no FPGA initialization code, aborting\n");
result = -EIO;
}
if (result < 0)
@@ -857,30 +848,26 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
(unsigned long)(dev->iobase + 0x40));
- printk("\n");
out:
return result;
}
static int daqboard2000_detach(struct comedi_device *dev)
{
- printk("comedi%d: daqboard2000: remove\n", dev->minor);
-
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 2);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (devpriv) {
if (devpriv->daq)
iounmap(devpriv->daq);
if (devpriv->plx)
iounmap(devpriv->plx);
if (devpriv->pci_dev) {
- if (devpriv->got_regions) {
+ if (devpriv->got_regions)
comedi_pci_disable(devpriv->pci_dev);
- }
pci_dev_put(devpriv->pci_dev);
}
}
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 3141dc80fe7..c2dd0ed36a7 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -506,10 +506,8 @@ struct das08_board_struct das08_cs_boards[NUM_DAS08_CS_BOARDS] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, das08_pci_table);
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 6d91d302817..4ad398aad72 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -79,22 +79,20 @@ static int das08_cs_attach(struct comedi_device *dev,
if (ret < 0)
return ret;
- printk("comedi%d: das08_cs: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das08_cs:\n", dev->minor);
/* deal with a pci board */
if (thisboard->bustype == pcmcia) {
if (link == NULL) {
- printk(" no pcmcia cards found\n");
+ dev_err(dev->hw_dev, "no pcmcia cards found\n");
return -EIO;
}
iobase = link->resource[0]->start;
} else {
- printk(" bug! board does not have PCMCIA bustype\n");
+ dev_err(dev->hw_dev, "bug! board does not have PCMCIA bustype\n");
return -EINVAL;
}
- printk("\n");
-
return das08_common_attach(dev, iobase);
}
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index a5ce3b2abe4..5376e718e3d 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -384,20 +384,20 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
byte = 0;
/* if we are using external start trigger (also board dislikes having
* both start and conversion triggers external simultaneously) */
- if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) {
+ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
byte |= EXT_TRIG_BIT;
- }
+
outb(byte, dev->iobase + DAS16M1_CS);
/* clear interrupt bit */
outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
/* enable interrupts and internal pacer */
devpriv->control_state &= ~PACER_MASK;
- if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->convert_src == TRIG_TIMER)
devpriv->control_state |= INT_PACER;
- } else {
+ else
devpriv->control_state |= EXT_PACER;
- }
+
devpriv->control_state |= INTE;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
@@ -531,9 +531,8 @@ static void munge_sample_array(short *array, unsigned int num_elements)
{
unsigned int i;
- for (i = 0; i < num_elements; i++) {
+ for (i = 0; i < num_elements; i++)
array[i] = munge_sample(array[i]);
- }
}
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
@@ -668,25 +667,20 @@ static int das16m1_attach(struct comedi_device *dev,
iobase = it->options[0];
- printk("comedi%d: das16m1:", dev->minor);
-
ret = alloc_private(dev, sizeof(struct das16m1_private_struct));
if (ret < 0)
return ret;
dev->board_name = thisboard->name;
- printk(" io 0x%lx-0x%lx 0x%lx-0x%lx",
- iobase, iobase + DAS16M1_SIZE,
- iobase + DAS16M1_82C55, iobase + DAS16M1_82C55 + DAS16M1_SIZE2);
if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) {
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2,
driver_das16m1.driver_name)) {
release_region(iobase, DAS16M1_SIZE);
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -697,17 +691,17 @@ static int das16m1_attach(struct comedi_device *dev,
if (das16m1_irq_bits(irq) >= 0) {
ret = request_irq(irq, das16m1_interrupt, 0,
driver_das16m1.driver_name, dev);
- if (ret < 0) {
- printk(", irq unavailable\n");
+ if (ret < 0)
return ret;
- }
dev->irq = irq;
- printk(", irq %u\n", irq);
+ printk
+ ("irq %u\n", irq);
} else if (irq == 0) {
- printk(", no irq\n");
+ printk
+ (", no irq\n");
} else {
- printk(", invalid irq\n"
- " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
+ comedi_error(dev, "invalid irq\n"
+ " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
return -EINVAL;
}
@@ -771,7 +765,6 @@ static int das16m1_attach(struct comedi_device *dev,
static int das16m1_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16m1: remove\n", dev->minor);
/* das16m1_reset(dev); */
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index a6df30b7fd7..99ada5a53b9 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -573,22 +573,23 @@ static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0,
devpriv->dma_bits |= DMA_CH7_CH5;
break;
default:
- printk(" only supports dma channels 5 through 7\n"
- " Dual dma only allows the following combinations:\n"
- " dma 5,6 / 6,7 / or 7,5\n");
+ dev_err(dev->hw_dev, " only supports dma channels 5 through 7\n"
+ " Dual dma only allows the following combinations:\n"
+ " dma 5,6 / 6,7 / or 7,5\n");
return -EINVAL;
break;
}
if (request_dma(dma0, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n", dma0);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma0);
return -EINVAL;
}
devpriv->dma0 = dma0;
devpriv->dma_current = dma0;
if (dma1) {
if (request_dma(dma1, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n",
- dma1);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma1);
return -EINVAL;
}
devpriv->dma1 = dma1;
@@ -631,20 +632,20 @@ static int das1800_attach(struct comedi_device *dev,
if (alloc_private(dev, sizeof(struct das1800_private)) < 0)
return -ENOMEM;
- printk("comedi%d: %s: io 0x%lx", dev->minor, driver_das1800.driver_name,
- iobase);
+ printk(KERN_DEBUG "comedi%d: %s: io 0x%lx", dev->minor,
+ driver_das1800.driver_name, iobase);
if (irq) {
- printk(", irq %u", irq);
+ printk(KERN_CONT ", irq %u", irq);
if (dma0) {
- printk(", dma %u", dma0);
+ printk(KERN_CONT ", dma %u", dma0);
if (dma1)
- printk(" and %u", dma1);
+ printk(KERN_CONT " and %u", dma1);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
if (iobase == 0) {
- printk(" io base address required\n");
+ dev_err(dev->hw_dev, "io base address required\n");
return -EINVAL;
}
@@ -659,7 +660,7 @@ static int das1800_attach(struct comedi_device *dev,
board = das1800_probe(dev);
if (board < 0) {
- printk(" unable to determine board type\n");
+ dev_err(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
@@ -683,7 +684,8 @@ static int das1800_attach(struct comedi_device *dev,
if (irq) {
if (request_irq(irq, das1800_interrupt, 0,
driver_das1800.driver_name, dev)) {
- printk(" unable to allocate irq %u\n", irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -712,7 +714,7 @@ static int das1800_attach(struct comedi_device *dev,
devpriv->irq_dma_bits |= 0x38;
break;
default:
- printk(" irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
break;
}
@@ -813,8 +815,8 @@ static int das1800_detach(struct comedi_device *dev)
kfree(devpriv->ai_buf1);
}
- printk("comedi%d: %s: remove\n", dev->minor,
- driver_das1800.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver_das1800.driver_name);
return 0;
};
@@ -833,8 +835,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x3:
if (board == das1801st_da || board == das1802st_da ||
board == das1701st_da || board == das1702st_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -843,8 +845,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x4:
if (board == das1802hr_da || board == das1702hr_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -854,8 +856,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x5:
if (board == das1801ao || board == das1802ao ||
board == das1701ao || board == das1702ao) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -864,18 +866,19 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x6:
if (board == das1802hr || board == das1702hr) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
- printk(" Board model (probed, not recommended): das-1802hr\n");
+ printk
+ (" Board model (probed, not recommended): das-1802hr\n");
return das1802hr;
break;
case 0x7:
if (board == das1801st || board == das1802st ||
board == das1701st || board == das1702st) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -884,8 +887,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x8:
if (board == das1801hc || board == das1802hc) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 6328f5280b6..f25684145e8 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -171,7 +171,7 @@ static irqreturn_t intr_handler(int irq, void *d)
struct comedi_subdevice *s = dev->subdevices;
if (!dev->attached || devpriv->das6402_ignoreirq) {
- printk("das6402: BUG: spurious interrupt\n");
+ dev_warn(dev->hw_dev, "BUG: spurious interrupt\n");
return IRQ_HANDLED;
}
#ifdef DEBUG
@@ -228,9 +228,7 @@ static int das6402_ai_cancel(struct comedi_device *dev,
*/
devpriv->das6402_ignoreirq = 1;
-#ifdef DEBUG
- printk("das6402: Stopping acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Stopping acquisition\n");
devpriv->das6402_ignoreirq = 1;
outb_p(0x02, dev->iobase + 10); /* disable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
@@ -246,10 +244,7 @@ static int das6402_ai_mode2(struct comedi_device *dev,
struct comedi_subdevice *s, comedi_trig * it)
{
devpriv->das6402_ignoreirq = 1;
-
-#ifdef DEBUG
- printk("das6402: Starting acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Starting acquisition\n");
outb_p(0x03, dev->iobase + 10); /* enable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9);
@@ -329,10 +324,8 @@ static int das6402_attach(struct comedi_device *dev,
if (iobase == 0)
iobase = 0x300;
- printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase);
-
if (!request_region(iobase, DAS6402_SIZE, "das6402")) {
- printk(" I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -340,14 +333,12 @@ static int das6402_attach(struct comedi_device *dev,
/* should do a probe here */
irq = it->options[0];
- printk(" ( irq = %u )", irq);
+ dev_dbg(dev->hw_dev, "( irq = %u )\n", irq);
ret = request_irq(irq, intr_handler, 0, "das6402", dev);
- if (ret < 0) {
- printk("irq conflict\n");
+ if (ret < 0)
return ret;
- }
- dev->irq = irq;
+ dev->irq = irq;
ret = alloc_private(dev, sizeof(struct das6402_private));
if (ret < 0)
return ret;
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 96d41ad7695..6e347b40fe6 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -296,47 +296,47 @@ static int das800_probe(struct comedi_device *dev)
switch (id_bits) {
case 0x0:
if (board == das800) {
- printk(" Board model: DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-800\n");
return board;
}
if (board == ciodas800) {
- printk(" Board model: CIO-DAS800\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS800\n");
return board;
}
- printk(" Board model (probed): DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-800\n");
return das800;
break;
case 0x2:
if (board == das801) {
- printk(" Board model: DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-801\n");
return board;
}
if (board == ciodas801) {
- printk(" Board model: CIO-DAS801\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS801\n");
return board;
}
- printk(" Board model (probed): DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-801\n");
return das801;
break;
case 0x3:
if (board == das802) {
- printk(" Board model: DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-802\n");
return board;
}
if (board == ciodas802) {
- printk(" Board model: CIO-DAS802\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802\n");
return board;
}
if (board == ciodas80216) {
- printk(" Board model: CIO-DAS802/16\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802/16\n");
return board;
}
- printk(" Board model (probed): DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-802\n");
return das802;
break;
default:
- printk(" Board model: probe returned 0x%x (unknown)\n",
- id_bits);
+ dev_dbg(dev->hw_dev, "Board model: probe returned 0x%x (unknown)\n",
+ id_bits);
return board;
break;
}
@@ -466,42 +466,43 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long irq_flags;
int board;
- printk("comedi%d: das800: io 0x%lx", dev->minor, iobase);
+ dev_info(dev->hw_dev, "comedi%d: das800: io 0x%lx\n", dev->minor,
+ iobase);
if (irq)
- printk(", irq %u", irq);
- printk("\n");
+ dev_dbg(dev->hw_dev, "irq %u\n", irq);
/* allocate and initialize dev->private */
if (alloc_private(dev, sizeof(struct das800_private)) < 0)
return -ENOMEM;
if (iobase == 0) {
- printk("io base address required for das800\n");
+ dev_err(dev->hw_dev, "io base address required for das800\n");
return -EINVAL;
}
/* check if io addresses are available */
if (!request_region(iobase, DAS800_SIZE, "das800")) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
board = das800_probe(dev);
if (board < 0) {
- printk("unable to determine board type\n");
+ dev_dbg(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
dev->board_ptr = das800_boards + board;
/* grab our IRQ */
if (irq == 1 || irq > 7) {
- printk("irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
}
if (irq) {
if (request_irq(irq, das800_interrupt, 0, "das800", dev)) {
- printk("unable to allocate irq %u\n", irq);
+ dev_err(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -557,7 +558,7 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int das800_detach(struct comedi_device *dev)
{
- printk("comedi%d: das800: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das800: remove\n", dev->minor);
/* only free stuff if it has been allocated by _attach */
if (dev->iobase)
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 6170f7bac46..0a7979e5299 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -352,7 +352,8 @@ static int dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
if ((status & DT3000_COMPLETION_MASK) == DT3000_NOERROR)
return 0;
- printk("dt3k_send_cmd() timeout/error status=0x%04x\n", status);
+ dev_dbg(dev->hw_dev, "dt3k_send_cmd() timeout/error status=0x%04x\n",
+ status);
return -ETIME;
}
@@ -383,7 +384,7 @@ static void dt3k_writesingle(struct comedi_device *dev, unsigned int subsys,
dt3k_send_cmd(dev, CMD_WRITESINGLE);
}
-static int debug_n_ints = 0;
+static int debug_n_ints;
/* FIXME! Assumes shared interrupt is for this card. */
/* What's this debug_n_ints stuff? Obviously needs some work... */
@@ -429,12 +430,12 @@ static char *intr_flags[] = {
static void debug_intr_flags(unsigned int flags)
{
int i;
- printk("dt3k: intr_flags:");
+ printk(KERN_DEBUG "dt3k: intr_flags:");
for (i = 0; i < 8; i++) {
if (flags & (1 << i))
- printk(" %s", intr_flags[i]);
+ printk(KERN_CONT " %s", intr_flags[i]);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
#endif
@@ -452,7 +453,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
if (count < 0)
count += AI_FIFO_DEPTH;
- printk("reading %d samples\n", count);
+ dev_dbg(dev->hw_dev, "reading %d samples\n", count);
rear = devpriv->ai_rear;
@@ -640,7 +641,7 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int ret;
unsigned int mode;
- printk("dt3k_ai_cmd:\n");
+ dev_dbg(dev->hw_dev, "dt3k_ai_cmd:\n");
for (i = 0; i < cmd->chanlist_len; i++) {
chan = CR_CHAN(cmd->chanlist[i]);
range = CR_RANGE(cmd->chanlist[i]);
@@ -651,15 +652,15 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
aref = CR_AREF(cmd->chanlist[0]);
writew(cmd->scan_end_arg, devpriv->io_addr + DPR_Params(0));
- printk("param[0]=0x%04x\n", cmd->scan_end_arg);
+ dev_dbg(dev->hw_dev, "param[0]=0x%04x\n", cmd->scan_end_arg);
if (cmd->convert_src == TRIG_TIMER) {
divider = dt3k_ns_to_timer(50, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((divider >> 16), devpriv->io_addr + DPR_Params(1));
- printk("param[1]=0x%04x\n", divider >> 16);
+ dev_dbg(dev->hw_dev, "param[1]=0x%04x\n", divider >> 16);
writew((divider & 0xffff), devpriv->io_addr + DPR_Params(2));
- printk("param[2]=0x%04x\n", divider & 0xffff);
+ dev_dbg(dev->hw_dev, "param[2]=0x%04x\n", divider & 0xffff);
} else {
/* not supported */
}
@@ -668,21 +669,21 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
tscandiv = dt3k_ns_to_timer(100, &cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((tscandiv >> 16), devpriv->io_addr + DPR_Params(3));
- printk("param[3]=0x%04x\n", tscandiv >> 16);
+ dev_dbg(dev->hw_dev, "param[3]=0x%04x\n", tscandiv >> 16);
writew((tscandiv & 0xffff), devpriv->io_addr + DPR_Params(4));
- printk("param[4]=0x%04x\n", tscandiv & 0xffff);
+ dev_dbg(dev->hw_dev, "param[4]=0x%04x\n", tscandiv & 0xffff);
} else {
/* not supported */
}
mode = DT3000_AD_RETRIG_INTERNAL | 0 | 0;
writew(mode, devpriv->io_addr + DPR_Params(5));
- printk("param[5]=0x%04x\n", mode);
+ dev_dbg(dev->hw_dev, "param[5]=0x%04x\n", mode);
writew(aref == AREF_DIFF, devpriv->io_addr + DPR_Params(6));
- printk("param[6]=0x%04x\n", aref == AREF_DIFF);
+ dev_dbg(dev->hw_dev, "param[6]=0x%04x\n", aref == AREF_DIFF);
writew(AI_FIFO_DEPTH / 2, devpriv->io_addr + DPR_Params(7));
- printk("param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
+ dev_dbg(dev->hw_dev, "param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
writew(SUBS_AI, devpriv->io_addr + DPR_SubSys);
ret = dt3k_send_cmd(dev, CMD_CONFIG);
@@ -848,7 +849,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int bus, slot;
int ret = 0;
- printk("dt3000:");
+ dev_dbg(dev->hw_dev, "dt3000:\n");
bus = it->options[0];
slot = it->options[1];
@@ -860,7 +861,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
if (ret == 0) {
- printk(" no DT board found\n");
+ dev_warn(dev->hw_dev, "no DT board found\n");
return -ENODEV;
}
@@ -868,7 +869,8 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (request_irq(devpriv->pci_dev->irq, dt3k_interrupt, IRQF_SHARED,
"dt3000", dev)) {
- printk(" unable to allocate IRQ %u\n", devpriv->pci_dev->irq);
+ dev_err(dev->hw_dev, "unable to allocate IRQ %u\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 8d98cf41270..6a79ba10630 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -71,18 +71,12 @@ static struct comedi_driver driver_jr3_pci = {
};
static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
- {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
@@ -378,14 +372,14 @@ static int jr3_pci_open(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("jr3_pci_open\n");
+ dev_dbg(dev->hw_dev, "jr3_pci_open\n");
for (i = 0; i < devpriv->n_channels; i++) {
struct jr3_pci_subdev_private *p;
p = dev->subdevices[i].private;
if (p) {
- printk("serial: %p %d (%d)\n", p, p->serial_no,
- p->channel_no);
+ dev_dbg(dev->hw_dev, "serial: %p %d (%d)\n", p,
+ p->serial_no, p->channel_no);
}
}
return 0;
@@ -463,8 +457,8 @@ static int jr3_download_firmware(struct comedi_device *dev, const u8 * data,
break;
more = more
&& read_idm_word(data, size, &pos, &addr);
- printk("Loading#%d %4.4x bytes at %4.4x\n", i,
- count, addr);
+ dev_dbg(dev->hw_dev, "Loading#%d %4.4x bytes at %4.4x\n",
+ i, count, addr);
while (more && count > 0) {
if (addr & 0x4000) {
/* 16 bit data, never seen in real life!! */
@@ -599,24 +593,24 @@ static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
min_full_scale =
get_min_full_scales(channel);
printk("Obtained Min. Full Scales:\n");
- printk("%i ", (min_full_scale).fx);
- printk("%i ", (min_full_scale).fy);
- printk("%i ", (min_full_scale).fz);
- printk("%i ", (min_full_scale).mx);
- printk("%i ", (min_full_scale).my);
- printk("%i ", (min_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (min_full_scale).fx);
+ printk(KERN_CONT "%i ", (min_full_scale).fy);
+ printk(KERN_CONT "%i ", (min_full_scale).fz);
+ printk(KERN_CONT "%i ", (min_full_scale).mx);
+ printk(KERN_CONT "%i ", (min_full_scale).my);
+ printk(KERN_CONT "%i ", (min_full_scale).mz);
+ printk(KERN_CONT "\n");
max_full_scale =
get_max_full_scales(channel);
printk("Obtained Max. Full Scales:\n");
- printk("%i ", (max_full_scale).fx);
- printk("%i ", (max_full_scale).fy);
- printk("%i ", (max_full_scale).fz);
- printk("%i ", (max_full_scale).mx);
- printk("%i ", (max_full_scale).my);
- printk("%i ", (max_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (max_full_scale).fx);
+ printk(KERN_CONT "%i ", (max_full_scale).fy);
+ printk(KERN_CONT "%i ", (max_full_scale).fz);
+ printk(KERN_CONT "%i ", (max_full_scale).mx);
+ printk(KERN_CONT "%i ", (max_full_scale).my);
+ printk(KERN_CONT "%i ", (max_full_scale).mz);
+ printk(KERN_CONT "\n");
set_full_scales(channel,
max_full_scale);
@@ -779,14 +773,12 @@ static int jr3_pci_attach(struct comedi_device *dev,
int opt_bus, opt_slot, i;
struct jr3_pci_dev_private *devpriv;
- printk("comedi%d: jr3_pci\n", dev->minor);
-
opt_bus = it->options[0];
opt_slot = it->options[1];
if (sizeof(struct jr3_channel) != 0xc00) {
- printk("sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
+ dev_err(dev->hw_dev, "sizeof(struct jr3_channel) = %x [expected %x]\n",
+ (unsigned)sizeof(struct jr3_channel), 0xc00);
return -EINVAL;
}
@@ -840,7 +832,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
}
}
if (!card) {
- printk(" no jr3_pci found\n");
+ dev_err(dev->hw_dev, "no jr3_pci found\n");
return -EIO;
} else {
devpriv->pci_dev = card;
@@ -875,10 +867,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
p = dev->subdevices[i].private;
p->channel = &devpriv->iobase->channel[i].data;
- printk("p->channel %p %p (%tx)\n",
- p->channel, devpriv->iobase,
- ((char *)(p->channel) -
- (char *)(devpriv->iobase)));
+ dev_dbg(dev->hw_dev, "p->channel %p %p (%tx)\n",
+ p->channel, devpriv->iobase,
+ ((char *)(p->channel) -
+ (char *)(devpriv->iobase)));
p->channel_no = i;
for (j = 0; j < 8; j++) {
int k;
@@ -916,7 +908,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
devpriv->iobase->channel[0].reset = 0;
result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
- printk("Firmare load %d\n", result);
+ dev_dbg(dev->hw_dev, "Firmare load %d\n", result);
if (result < 0)
goto out;
@@ -934,9 +926,9 @@ static int jr3_pci_attach(struct comedi_device *dev,
*/
msleep_interruptible(25);
for (i = 0; i < 0x18; i++) {
- printk("%c",
- get_u16(&devpriv->iobase->channel[0].
- data.copyright[i]) >> 8);
+ dev_dbg(dev->hw_dev, "%c\n",
+ get_u16(&devpriv->iobase->channel[0].
+ data.copyright[i]) >> 8);
}
/* Start card timer */
@@ -963,7 +955,6 @@ static int jr3_pci_detach(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("comedi%d: jr3_pci: remove\n", dev->minor);
if (devpriv) {
del_timer_sync(&devpriv->timer);
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 286093bca3f..4e9e9a07865 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -52,10 +52,8 @@ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it);
static int cnt_detach(struct comedi_device *dev);
static DEFINE_PCI_DEVICE_TABLE(cnt_pci_table) = {
- {
- PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, cnt_pci_table);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index cda4b224b30..8b812e41c52 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -188,12 +188,9 @@ static const struct comedi_lrange me2600_ao_range = {
};
static DEFINE_PCI_DEVICE_TABLE(me_pci_table) = {
- {
- PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, me_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 32e675e3f0b..c25e44c1905 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -731,9 +731,8 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outw(trigger_bits, dev->iobase + TRIGGER_REG);
/* start acquisition for soft trigger */
- if (cmd->start_src == TRIG_NOW) {
+ if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
- }
#ifdef A2150_DEBUG
ni_dump_regs(dev);
#endif
@@ -860,11 +859,10 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
case TRIG_ROUND_NEAREST:
default:
/* if least upper bound is better approximation */
- if (lub - *period < *period - glb) {
+ if (lub - *period < *period - glb)
*period = lub;
- } else {
+ else
*period = glb;
- }
break;
case TRIG_ROUND_UP:
*period = lub;
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 49b824c7bd2..c0423a8c3e3 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -52,7 +52,7 @@ the PCMCIA interface.
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev = NULL;
+static struct pcmcia_device *pcmcia_cur_dev;
#define DIO24_SIZE 4 /* size of io region used by board */
@@ -133,22 +133,19 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
- printk("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
- thisboard->name, iobase);
+ pr_debug("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
+ thisboard->name, iobase);
#ifdef incomplete
- if (irq) {
- printk(", irq %u", irq);
- }
+ if (irq)
+ pr_debug("irq %u\n", irq);
#endif
- printk("\n");
-
if (iobase == 0) {
- printk("io base address is zero!\n");
+ pr_err("io base address is zero!\n");
return -EINVAL;
}
@@ -173,7 +170,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dio24_detach(struct comedi_device *dev)
{
- printk("comedi%d: ni_daq_dio24: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_daq_dio24: remove\n", dev->minor);
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 0);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 832a5178b63..ff3840544dd 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -145,7 +145,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = link->irq;
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 9148abdad07..0f0d995f137 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1470,7 +1470,7 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
/* FIXME: DIO_Output_Register (16 bit reg) is replaced by M_Offset_Static_Digital_Output (32 bit)
and M_Offset_SCXI_Serial_Data_Out (8 bit) */
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1505,7 +1505,7 @@ static uint16_t m_series_stc_readw(struct comedi_device *dev, int reg)
offset = M_Offset_G01_Status;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1547,7 +1547,7 @@ static void m_series_stc_writel(struct comedi_device *dev, uint32_t data,
offset = M_Offset_G1_Load_B;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1573,7 +1573,7 @@ static uint32_t m_series_stc_readl(struct comedi_device *dev, int reg)
offset = M_Offset_G1_Save;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1632,9 +1632,8 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
}
devpriv->serial_number = be32_to_cpu(devpriv->serial_number);
- for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i) {
+ for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
- }
writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
@@ -1665,9 +1664,9 @@ static void init_6143(struct comedi_device *dev)
static int pcimio_detach(struct comedi_device *dev)
{
mio_common_detach(dev);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (dev->private) {
mite_free_ring(devpriv->ai_mite_ring);
mite_free_ring(devpriv->ao_mite_ring);
@@ -1685,7 +1684,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
- printk("comedi%d: ni_pcimio:", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_pcimio:\n", dev->minor);
ret = ni_alloc_private(dev);
if (ret < 0)
@@ -1695,7 +1694,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
- printk(" %s", boardtype.name);
+ dev_dbg(dev->hw_dev, "%s\n", boardtype.name);
dev->board_name = boardtype.name;
if (boardtype.reg_type & ni_reg_m_series_mask) {
@@ -1712,7 +1711,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = mite_setup(devpriv->mite);
if (ret < 0) {
- printk(" error setting up mite\n");
+ pr_warn("error setting up mite\n");
return ret;
}
comedi_set_hw_dev(dev, &devpriv->mite->pcidev->dev);
@@ -1740,13 +1739,13 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = mite_irq(devpriv->mite);
if (dev->irq == 0) {
- printk(" unknown irq (bad)\n");
+ pr_warn("unknown irq (bad)\n");
} else {
- printk(" ( irq = %u )", dev->irq);
+ pr_debug("( irq = %u )\n", dev->irq);
ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
DRV_NAME, dev);
if (ret < 0) {
- printk(" irq not available\n");
+ pr_warn("irq not available\n");
dev->irq = 0;
}
}
@@ -1787,7 +1786,7 @@ static int pcimio_find_device(struct comedi_device *dev, int bus, int slot)
}
}
}
- printk("no device found\n");
+ pr_warn("no device found\n");
mite_list_devices();
return -EIO;
}
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 0b9bee36eb5..96cd7ec2ad5 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -155,8 +155,8 @@ static int pcl816_attach(struct comedi_device *dev,
static int pcl816_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
static struct comedi_driver driver_pcl816 = {
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index b45a9bd8b48..7344a53a81c 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -252,8 +252,8 @@ static int pcl818_attach(struct comedi_device *dev,
static int pcl818_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
struct pcl818_board {
@@ -463,9 +463,8 @@ static int pcl818_ao_insn_read(struct comedi_device *dev,
int n;
int chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++) {
+ for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
- }
return n;
}
@@ -571,9 +570,9 @@ conv_finish:
return IRQ_HANDLED;
}
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
/* printk("E"); */
@@ -645,9 +644,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -805,11 +804,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
return IRQ_HANDLED;
}
- if (lo & 2) {
+ if (lo & 2)
len = 512;
- } else {
+ else
len = 0;
- }
for (i = 0; i < len; i++) {
lo = inb(dev->iobase + PCL818_FI_DATALO);
@@ -827,9 +825,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -1009,7 +1007,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
int divisor1 = 0, divisor2 = 0;
unsigned int seglen;
- printk("pcl818_ai_cmd_mode()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode()\n");
if ((!dev->irq) && (!devpriv->dma_rtc)) {
comedi_error(dev, "IRQ not defined!");
return -EINVAL;
@@ -1112,7 +1110,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
break;
}
#endif
- printk("pcl818_ai_cmd_mode() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode() end\n");
return 0;
}
@@ -1309,11 +1307,9 @@ static void setup_channel_list(struct comedi_device *dev,
*/
static int check_single_ended(unsigned int port)
{
- if (inb(port + PCL818_STATUS) & 0x20) {
+ if (inb(port + PCL818_STATUS) & 0x20)
return 1;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -1352,9 +1348,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
- if (err) {
+ if (err)
return 1;
- }
/* step 2: make sure trigger sources are unique and mutually compatible */
@@ -1377,9 +1372,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
- if (err) {
+ if (err)
return 2;
- }
/* step 3: make sure arguments are trivially compatible */
@@ -1421,9 +1415,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
}
}
- if (err) {
+ if (err)
return 3;
- }
/* step 4: fix up any arguments */
@@ -1438,9 +1431,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
err++;
}
- if (err) {
+ if (err)
return 4;
- }
/* step 5: complain about special chanlist considerations */
@@ -1461,7 +1453,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
int retval;
- printk("pcl818_ai_cmd()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd()\n");
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
@@ -1470,17 +1462,16 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
- if (cmd->stop_src == TRIG_COUNT) {
+ if (cmd->stop_src == TRIG_COUNT)
devpriv->ai_scans = cmd->stop_arg;
- } else {
+ else
devpriv->ai_scans = 0;
- }
if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
devpriv->ai_timer1 = cmd->convert_arg;
retval = pcl818_ai_cmd_mode(1, dev, s);
- printk("pcl818_ai_cmd() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd() end\n");
return retval;
}
if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
@@ -1499,7 +1490,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
if (devpriv->irq_blocked > 0) {
- printk("pcl818_ai_cancel()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel()\n");
devpriv->irq_was_now_closed = 1;
switch (devpriv->ai_mode) {
@@ -1549,7 +1540,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
}
end:
- printk("pcl818_ai_cancel() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel() end\n");
return 0;
}
@@ -1633,11 +1624,11 @@ static int set_rtc_irq_bit(unsigned char bit)
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
- if (bit) {
+ if (bit)
val |= RTC_PIE;
- } else {
+ else
val &= ~RTC_PIE;
- }
+
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
@@ -1754,22 +1745,23 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* claim our I/O space */
iobase = it->options[0];
- printk("comedi%d: pcl818: board=%s, ioport=0x%03lx",
- dev->minor, this_board->name, iobase);
+ printk
+ ("comedi%d: pcl818: board=%s, ioport=0x%03lx",
+ dev->minor, this_board->name, iobase);
devpriv->io_range = this_board->io_range;
if ((this_board->fifo) && (it->options[2] == -1)) { /* we've board with FIFO and we want to use FIFO */
devpriv->io_range = PCLx1xFIFO_RANGE;
devpriv->usefifo = 1;
}
if (!request_region(iobase, devpriv->io_range, "pcl818")) {
- printk("I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
if (pcl818_check(iobase)) {
- printk(", I can't detect board. FAIL!\n");
+ comedi_error(dev, "I can't detect board. FAIL!\n");
return -EIO;
}
@@ -1793,19 +1785,18 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ printk(KERN_DEBUG "irq=%u", irq);
}
}
}
}
dev->irq = irq;
- if (irq) {
- devpriv->irq_free = 1;
- } /* 1=we have allocated irq */
- else {
+ if (irq)
+ devpriv->irq_free = 1; /* 1=we have allocated irq */
+ else
devpriv->irq_free = 0;
- }
+
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->ai_mode = 0; /* mode of irq */
@@ -1825,7 +1816,7 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"pcl818 DMA (RTC)", dev)) {
devpriv->dma_rtc = 1;
devpriv->rtc_irq = RTC_IRQ;
- printk(", dma_irq=%u", devpriv->rtc_irq);
+ printk(KERN_DEBUG "dma_irq=%u", devpriv->rtc_irq);
} else {
RTC_lock--;
if (RTC_lock == 0) {
@@ -1850,34 +1841,26 @@ no_rtc:
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & this_board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ printk(KERN_ERR "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl818");
- if (ret) {
- printk(", unable to allocate DMA %u, FAIL!\n", dma);
+ if (ret)
return -EBUSY; /* DMA isn't free */
- }
devpriv->dma = dma;
- printk(", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[0])
/* maybe experiment with try_to_free_pages() will help .... */
return -EBUSY; /* no buffer :-( */
- }
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
/* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- printk
- (", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[1])
return -EBUSY;
- }
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] =
virt_to_bus((void *)devpriv->dmabuf[1]);
@@ -2017,11 +2000,10 @@ no_dma:
}
/* select 1/10MHz oscilator */
- if ((it->options[3] == 0) || (it->options[3] == 10)) {
+ if ((it->options[3] == 0) || (it->options[3] == 10))
devpriv->i8253_osc_base = 100;
- } else {
+ else
devpriv->i8253_osc_base = 1000;
- }
/* max sampling speed */
devpriv->ns_min = this_board->ns_min;
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 3ad04aaa1e3..eddac00e4e2 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -371,15 +371,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = it->options[0];
irq[0] = it->options[1];
- printk(KERN_INFO "comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ printk(KERN_INFO "comedi%d: %s: io: %lx attaching...\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->total_iosize,
driver.driver_name)) {
- printk(KERN_ERR "I/O port conflict\n");
+ printk(KERN_ERR "comedi%d: I/O port conflict\n", dev->minor);
return -EIO;
}
@@ -394,7 +394,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmmio_private)) < 0) {
- printk(KERN_ERR "cannot allocate private data structure\n");
+ printk(KERN_ERR "comedi%d: cannot allocate private data structure\n",
+ dev->minor);
return -ENOMEM;
}
@@ -417,7 +418,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk(KERN_ERR "cannot allocate subdevice private data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n",
+ dev->minor);
return -ENOMEM;
}
/*
@@ -427,7 +429,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Allocate 1 AI + 1 AO + 2 DIO subdevs (24 lines per DIO)
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk(KERN_ERR "cannot allocate subdevice data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice data structures\n",
+ dev->minor);
return -ENOMEM;
}
@@ -557,14 +560,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
if (irq[0]) {
- printk(KERN_DEBUG "irq: %u ", irq[0]);
+ printk(KERN_DEBUG "comedi%d: irq: %u\n", dev->minor, irq[0]);
if (thisboard->dio_num_asics == 2 && irq[1])
- printk(KERN_DEBUG "second ASIC irq: %u ", irq[1]);
+ printk(KERN_DEBUG "comedi%d: second ASIC irq: %u\n",
+ dev->minor, irq[1]);
} else {
- printk(KERN_INFO "(IRQ mode disabled) ");
+ printk(KERN_INFO "comedi%d: (IRQ mode disabled)\n", dev->minor);
}
- printk(KERN_INFO "attached\n");
+ printk(KERN_INFO "comedi%d: attached\n", dev->minor);
return 1;
}
@@ -663,7 +667,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
+ printk("data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index b2c2c8971a3..661ba2e0389 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -295,15 +295,15 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq[0] = it->options[1];
irq[1] = it->options[2];
- printk("comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: io: %lx attached\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->num_asics * ASIC_IOSIZE,
driver.driver_name)) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
@@ -318,7 +318,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmuio_private)) < 0) {
- printk("cannot allocate private data structure\n");
+ dev_warn(dev->hw_dev, "cannot allocate private data structure\n");
return -ENOMEM;
}
@@ -337,7 +337,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmuio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk("cannot allocate subdevice private data structures\n");
+ dev_warn(dev->hw_dev, "cannot allocate subdevice private data structures\n");
return -ENOMEM;
}
/*
@@ -348,7 +348,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* 96-channel version of the board.
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk("cannot allocate subdevice data structures\n");
+ dev_dbg(dev->hw_dev, "cannot allocate subdevice data structures\n");
return -ENOMEM;
}
@@ -436,14 +436,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irqs.. */
if (irq[0]) {
- printk("irq: %u ", irq[0]);
+ dev_dbg(dev->hw_dev, "irq: %u\n", irq[0]);
if (irq[1] && thisboard->num_asics == 2)
- printk("second ASIC irq: %u ", irq[1]);
+ dev_dbg(dev->hw_dev, "second ASIC irq: %u\n", irq[1]);
} else {
- printk("(IRQ mode disabled) ");
+ dev_dbg(dev->hw_dev, "(IRQ mode disabled)\n");
}
- printk("attached\n");
return 1;
}
@@ -460,7 +459,8 @@ static int pcmuio_detach(struct comedi_device *dev)
{
int i;
- printk("comedi%d: %s: remove\n", dev->minor, driver.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver.driver_name);
if (dev->iobase)
release_region(dev->iobase, ASIC_IOSIZE * thisboard->num_asics);
@@ -501,7 +501,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("write mask: %08x data: %08x\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "write mask: %08x data: %08x\n", data[0],
+ data[1]);
#endif
s->state = 0;
@@ -537,7 +538,7 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("data_out_byte %02x\n", (unsigned)byte);
+ dev_dbg(dev->hw_dev, "data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
@@ -548,7 +549,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("s->state %08x data_out %08x\n", s->state, data[1]);
+ dev_dbg(dev->hw_dev, "s->state %08x data_out %08x\n", s->state,
+ data[1]);
#endif
return 2;
@@ -951,14 +953,13 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
spin_lock_irqsave(&subpriv->intr.spinlock, flags);
s->async->inttrig = 0;
- if (subpriv->intr.active) {
+ if (subpriv->intr.active)
event = pcmuio_start_intr(dev, s);
- }
+
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 1;
}
@@ -1000,9 +1001,8 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index ade2202b623..d880c2f6fbc 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -824,7 +824,7 @@ static int serial2002_attach(struct comedi_device *dev,
{
struct comedi_subdevice *s;
- printk("comedi%d: serial2002: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: attached\n", dev->minor);
dev->board_name = thisboard->name;
if (alloc_private(dev, sizeof(struct serial2002_private)) < 0)
return -ENOMEM;
@@ -832,7 +832,8 @@ static int serial2002_attach(struct comedi_device *dev,
dev->close = serial_2002_close;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
- printk("/dev/ttyS%d @ %d\n", devpriv->port, devpriv->speed);
+ dev_dbg(dev->hw_dev, "/dev/ttyS%d @ %d\n", devpriv->port,
+ devpriv->speed);
if (alloc_subdevices(dev, 5) < 0)
return -ENOMEM;
@@ -891,7 +892,7 @@ static int serial2002_detach(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
- printk("comedi%d: serial2002: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: remove\n", dev->minor);
for (i = 0; i < 5; i++) {
s = &dev->subdevices[i];
kfree(s->maxdata_list);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a9173..ca6bcf8b023 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
@@ -44,6 +44,7 @@ Status: testing
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
@@ -1523,15 +1524,17 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
return -EFAULT;
down(&this_usbduxsub->sem);
+
if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
if (trignum != 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: invalid trignum\n",
dev->minor);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!(this_usbduxsub->ao_cmd_running)) {
this_usbduxsub->ao_cmd_running = 1;
@@ -1541,8 +1544,7 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: usbdux_ao_inttrig: submitURB: "
"err=%d\n", dev->minor, ret);
this_usbduxsub->ao_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ goto out;
}
s->async->inttrig = NULL;
} else {
@@ -1550,8 +1552,10 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: ao_inttrig but acqu is already running.\n",
dev->minor);
}
+ ret = 1;
+out:
up(&this_usbduxsub->sem);
- return 1;
+ return ret;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
@@ -2688,6 +2692,7 @@ static int usbduxsigma_attach(struct comedi_device *dev,
if (ret < 0) {
dev_err(&udev->interface->dev,
"comedi%d: no space for subdev\n", dev->minor);
+ up(&udev->sem);
up(&start_stop_sem);
return ret;
}
diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
index fde5b0627f6..8cd51a7aad8 100644
--- a/drivers/staging/crystalhd/bc_dts_defs.h
+++ b/drivers/staging/crystalhd/bc_dts_defs.h
@@ -296,43 +296,79 @@ enum {
vdecColourPrimariesSMPTE240M,
vdecColourPrimariesGenericFilm,
};
-
+/**
+ * @vdecRESOLUTION_CUSTOM: custom
+ * @vdecRESOLUTION_480i: 480i
+ * @vdecRESOLUTION_1080i: 1080i (1920x1080, 60i)
+ * @vdecRESOLUTION_NTSC: NTSC (720x483, 60i)
+ * @vdecRESOLUTION_480p: 480p (720x480, 60p)
+ * @vdecRESOLUTION_720p: 720p (1280x720, 60p)
+ * @vdecRESOLUTION_PAL1: PAL_1 (720x576, 50i)
+ * @vdecRESOLUTION_1080i25: 1080i25 (1920x1080, 50i)
+ * @vdecRESOLUTION_720p50: 720p50 (1280x720, 50p)
+ * @vdecRESOLUTION_576p: 576p (720x576, 50p)
+ * @vdecRESOLUTION_1080i29_97: 1080i (1920x1080, 59.94i)
+ * @vdecRESOLUTION_720p59_94: 720p (1280x720, 59.94p)
+ * @vdecRESOLUTION_SD_DVD: SD DVD (720x483, 60i)
+ * @vdecRESOLUTION_480p656: 480p (720x480, 60p),
+ * output bus width 8 bit, clock 74.25MHz
+ * @vdecRESOLUTION_1080p23_976: 1080p23_976 (1920x1080, 23.976p)
+ * @vdecRESOLUTION_720p23_976: 720p23_976 (1280x720p, 23.976p)
+ * @vdecRESOLUTION_240p29_97: 240p (1440x240, 29.97p )
+ * @vdecRESOLUTION_240p30: 240p (1440x240, 30p)
+ * @vdecRESOLUTION_288p25: 288p (1440x288p, 25p)
+ * @vdecRESOLUTION_1080p29_97: 1080p29_97 (1920x1080, 29.97p)
+ * @vdecRESOLUTION_1080p30: 1080p30 (1920x1080, 30p)
+ * @vdecRESOLUTION_1080p24: 1080p24 (1920x1080, 24p)
+ * @vdecRESOLUTION_1080p25: 1080p25 (1920x1080, 25p)
+ * @vdecRESOLUTION_720p24: 720p24 (1280x720, 25p)
+ * @vdecRESOLUTION_720p29_97: 720p29.97 (1280x720, 29.97p)
+ * @vdecRESOLUTION_480p23_976: 480p23.976 (720*480, 23.976)
+ * @vdecRESOLUTION_480p29_97: 480p29.976 (720*480, 29.97p)
+ * @vdecRESOLUTION_576p25: 576p25 (720*576, 25p)
+ * @vdecRESOLUTION_480p0: 480p (720x480, 0p)
+ * @vdecRESOLUTION_480i0: 480i (720x480, 0i)
+ * @vdecRESOLUTION_576p0: 576p (720x576, 0p)
+ * @vdecRESOLUTION_720p0: 720p (1280x720, 0p)
+ * @vdecRESOLUTION_1080p0: 1080p (1920x1080, 0p)
+ * @vdecRESOLUTION_1080i0: 1080i (1920x1080, 0i)
+ */
enum {
- vdecRESOLUTION_CUSTOM = 0x00000000, /* custom */
- vdecRESOLUTION_480i = 0x00000001, /* 480i */
- vdecRESOLUTION_1080i = 0x00000002, /* 1080i (1920x1080, 60i) */
- vdecRESOLUTION_NTSC = 0x00000003, /* NTSC (720x483, 60i) */
- vdecRESOLUTION_480p = 0x00000004, /* 480p (720x480, 60p) */
- vdecRESOLUTION_720p = 0x00000005, /* 720p (1280x720, 60p) */
- vdecRESOLUTION_PAL1 = 0x00000006, /* PAL_1 (720x576, 50i) */
- vdecRESOLUTION_1080i25 = 0x00000007, /* 1080i25 (1920x1080, 50i) */
- vdecRESOLUTION_720p50 = 0x00000008, /* 720p50 (1280x720, 50p) */
- vdecRESOLUTION_576p = 0x00000009, /* 576p (720x576, 50p) */
- vdecRESOLUTION_1080i29_97 = 0x0000000A, /* 1080i (1920x1080, 59.94i) */
- vdecRESOLUTION_720p59_94 = 0x0000000B, /* 720p (1280x720, 59.94p) */
- vdecRESOLUTION_SD_DVD = 0x0000000C, /* SD DVD (720x483, 60i) */
- vdecRESOLUTION_480p656 = 0x0000000D, /* 480p (720x480, 60p), output bus width 8 bit, clock 74.25MHz */
- vdecRESOLUTION_1080p23_976 = 0x0000000E, /* 1080p23_976 (1920x1080, 23.976p) */
- vdecRESOLUTION_720p23_976 = 0x0000000F, /* 720p23_976 (1280x720p, 23.976p) */
- vdecRESOLUTION_240p29_97 = 0x00000010, /* 240p (1440x240, 29.97p ) */
- vdecRESOLUTION_240p30 = 0x00000011, /* 240p (1440x240, 30p) */
- vdecRESOLUTION_288p25 = 0x00000012, /* 288p (1440x288p, 25p) */
- vdecRESOLUTION_1080p29_97 = 0x00000013, /* 1080p29_97 (1920x1080, 29.97p) */
- vdecRESOLUTION_1080p30 = 0x00000014, /* 1080p30 (1920x1080, 30p) */
- vdecRESOLUTION_1080p24 = 0x00000015, /* 1080p24 (1920x1080, 24p) */
- vdecRESOLUTION_1080p25 = 0x00000016, /* 1080p25 (1920x1080, 25p) */
- vdecRESOLUTION_720p24 = 0x00000017, /* 720p24 (1280x720, 25p) */
- vdecRESOLUTION_720p29_97 = 0x00000018, /* 720p29.97 (1280x720, 29.97p) */
- vdecRESOLUTION_480p23_976 = 0x00000019, /* 480p23.976 (720*480, 23.976) */
- vdecRESOLUTION_480p29_97 = 0x0000001A, /* 480p29.976 (720*480, 29.97p) */
- vdecRESOLUTION_576p25 = 0x0000001B, /* 576p25 (720*576, 25p) */
+ vdecRESOLUTION_CUSTOM = 0x00000000,
+ vdecRESOLUTION_480i = 0x00000001,
+ vdecRESOLUTION_1080i = 0x00000002,
+ vdecRESOLUTION_NTSC = 0x00000003,
+ vdecRESOLUTION_480p = 0x00000004,
+ vdecRESOLUTION_720p = 0x00000005,
+ vdecRESOLUTION_PAL1 = 0x00000006,
+ vdecRESOLUTION_1080i25 = 0x00000007,
+ vdecRESOLUTION_720p50 = 0x00000008,
+ vdecRESOLUTION_576p = 0x00000009,
+ vdecRESOLUTION_1080i29_97 = 0x0000000A,
+ vdecRESOLUTION_720p59_94 = 0x0000000B,
+ vdecRESOLUTION_SD_DVD = 0x0000000C,
+ vdecRESOLUTION_480p656 = 0x0000000D,
+ vdecRESOLUTION_1080p23_976 = 0x0000000E,
+ vdecRESOLUTION_720p23_976 = 0x0000000F,
+ vdecRESOLUTION_240p29_97 = 0x00000010,
+ vdecRESOLUTION_240p30 = 0x00000011,
+ vdecRESOLUTION_288p25 = 0x00000012,
+ vdecRESOLUTION_1080p29_97 = 0x00000013,
+ vdecRESOLUTION_1080p30 = 0x00000014,
+ vdecRESOLUTION_1080p24 = 0x00000015,
+ vdecRESOLUTION_1080p25 = 0x00000016,
+ vdecRESOLUTION_720p24 = 0x00000017,
+ vdecRESOLUTION_720p29_97 = 0x00000018,
+ vdecRESOLUTION_480p23_976 = 0x00000019,
+ vdecRESOLUTION_480p29_97 = 0x0000001A,
+ vdecRESOLUTION_576p25 = 0x0000001B,
/* For Zero Frame Rate */
- vdecRESOLUTION_480p0 = 0x0000001C, /* 480p (720x480, 0p) */
- vdecRESOLUTION_480i0 = 0x0000001D, /* 480i (720x480, 0i) */
- vdecRESOLUTION_576p0 = 0x0000001E, /* 576p (720x576, 0p) */
- vdecRESOLUTION_720p0 = 0x0000001F, /* 720p (1280x720, 0p) */
- vdecRESOLUTION_1080p0 = 0x00000020, /* 1080p (1920x1080, 0p) */
- vdecRESOLUTION_1080i0 = 0x00000021, /* 1080i (1920x1080, 0i) */
+ vdecRESOLUTION_480p0 = 0x0000001C,
+ vdecRESOLUTION_480i0 = 0x0000001D,
+ vdecRESOLUTION_576p0 = 0x0000001E,
+ vdecRESOLUTION_720p0 = 0x0000001F,
+ vdecRESOLUTION_1080p0 = 0x00000020,
+ vdecRESOLUTION_1080i0 = 0x00000021,
};
/* Bit definitions for 'flags' field */
@@ -359,14 +395,23 @@ enum _BC_OUTPUT_FORMAT {
MODE422_YUY2 = 0x1,
MODE422_UYVY = 0x2,
};
-
+/**
+ * struct BC_PIC_INFO_BLOCK
+ * @timeStam;: Timestamp
+ * @picture_number: Ordinal display number
+ * @width: pixels
+ * @height: pixels
+ * @chroma_format: 0x420, 0x422 or 0x444
+ * @n_drop;: number of non-reference frames
+ * remaining to be dropped
+ */
struct BC_PIC_INFO_BLOCK {
/* Common fields. */
- uint64_t timeStamp; /* Timestamp */
- uint32_t picture_number; /* Ordinal display number */
- uint32_t width; /* pixels */
- uint32_t height; /* pixels */
- uint32_t chroma_format; /* 0x420, 0x422 or 0x444 */
+ uint64_t timeStamp;
+ uint32_t picture_number;
+ uint32_t width;
+ uint32_t height;
+ uint32_t chroma_format;
uint32_t pulldown;
uint32_t flags;
uint32_t frame_rate;
@@ -376,7 +421,8 @@ struct BC_PIC_INFO_BLOCK {
uint32_t sess_num;
uint32_t ycom;
uint32_t custom_aspect_ratio_width_height;
- uint32_t n_drop; /* number of non-reference frames remaining to be dropped */
+ uint32_t n_drop; /* number of non-reference frames
+ remaining to be dropped */
/* Protocol-specific extensions. */
union {
@@ -390,43 +436,71 @@ struct BC_PIC_INFO_BLOCK {
/*------------------------------------------------------*
* ProcOut Info *
*------------------------------------------------------*/
-/* Optional flags for ProcOut Interface.*/
+
+/**
+ * enum POUT_OPTIONAL_IN_FLAGS - Optional flags for ProcOut Interface.
+ * @BC_POUT_FLAGS_YV12: Copy Data in YV12 format
+ * @BC_POUT_FLAGS_STRIDE: Stride size is valid.
+ * @BC_POUT_FLAGS_SIZE: Take size information from Application
+ * @BC_POUT_FLAGS_INTERLACED: copy only half the bytes
+ * @BC_POUT_FLAGS_INTERLEAVED: interleaved frame
+ * @: * @BC_POUT_FLAGS_FMT_CHANGE: Data is not VALID when this flag is set
+ * @BC_POUT_FLAGS_PIB_VALID: PIB Information valid
+ * @BC_POUT_FLAGS_ENCRYPTED: Data is encrypted.
+ * @BC_POUT_FLAGS_FLD_BOT: Bottom Field data
+ */
enum POUT_OPTIONAL_IN_FLAGS_ {
/* Flags from App to Device */
- BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
- BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
- BC_POUT_FLAGS_SIZE = 0x04, /* Take size information from Application */
- BC_POUT_FLAGS_INTERLACED = 0x08, /* copy only half the bytes */
- BC_POUT_FLAGS_INTERLEAVED = 0x10, /* interleaved frame */
+ BC_POUT_FLAGS_YV12 = 0x01,
+ BC_POUT_FLAGS_STRIDE = 0x02,
+ BC_POUT_FLAGS_SIZE = 0x04,
+ BC_POUT_FLAGS_INTERLACED = 0x08,
+ BC_POUT_FLAGS_INTERLEAVED = 0x10,
/* Flags from Device to APP */
- BC_POUT_FLAGS_FMT_CHANGE = 0x10000, /* Data is not VALID when this flag is set */
- BC_POUT_FLAGS_PIB_VALID = 0x20000, /* PIB Information valid */
- BC_POUT_FLAGS_ENCRYPTED = 0x40000, /* Data is encrypted. */
- BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */
+ BC_POUT_FLAGS_FMT_CHANGE = 0x10000,
+ BC_POUT_FLAGS_PIB_VALID = 0x20000,
+ BC_POUT_FLAGS_ENCRYPTED = 0x40000,
+ BC_POUT_FLAGS_FLD_BOT = 0x80000,
};
-typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
+typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width,
+ uint32_t height, uint32_t stride, void *pOut);
/* Line 21 Closed Caption */
/* User Data */
#define MAX_UD_SIZE 1792 /* 1920 - 128 */
+/**
+ * struct BC_DTS_PROC_OUT
+ * @Ybuff: Caller Supplied buffer for Y data
+ * @YbuffSz: Caller Supplied Y buffer size
+ * @YBuffDoneSz: Transferred Y datasize
+ * @*UVbuff: Caller Supplied buffer for UV data
+ * @UVbuffSz: Caller Supplied UV buffer size
+ * @UVBuffDoneSz: Transferred UV data size
+ * @StrideSz: Caller supplied Stride Size
+ * @PoutFlags: Call IN Flags
+ * @discCnt: Picture discontinuity count
+ * @PicInfo: Picture Information Block Data
+ * @b422Mode: Picture output Mode
+ * @bPibEnc: PIB encrypted
+ */
struct BC_DTS_PROC_OUT {
- uint8_t *Ybuff; /* Caller Supplied buffer for Y data */
- uint32_t YbuffSz; /* Caller Supplied Y buffer size */
- uint32_t YBuffDoneSz; /* Transferred Y datasize */
+ uint8_t *Ybuff;
+ uint32_t YbuffSz;
+ uint32_t YBuffDoneSz;
- uint8_t *UVbuff; /* Caller Supplied buffer for UV data */
- uint32_t UVbuffSz; /* Caller Supplied UV buffer size */
- uint32_t UVBuffDoneSz; /* Transferred UV data size */
+ uint8_t *UVbuff;
+ uint32_t UVbuffSz;
+ uint32_t UVBuffDoneSz;
- uint32_t StrideSz; /* Caller supplied Stride Size */
- uint32_t PoutFlags; /* Call IN Flags */
+ uint32_t StrideSz;
+ uint32_t PoutFlags;
- uint32_t discCnt; /* Picture discontinuity count */
+ uint32_t discCnt;
- struct BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
+ struct BC_PIC_INFO_BLOCK PicInfo;
/* Line 21 Closed Caption */
/* User Data */
@@ -436,39 +510,47 @@ struct BC_DTS_PROC_OUT {
void *hnd;
dts_pout_callback AppCallBack;
uint8_t DropFrames;
- uint8_t b422Mode; /* Picture output Mode */
- uint8_t bPibEnc; /* PIB encrypted */
+ uint8_t b422Mode;
+ uint8_t bPibEnc;
uint8_t bRevertScramble;
};
-
+/**
+ * struct BC_DTS_STATUS
+ * @ReadyListCount: Number of frames in ready list (reported by driver)
+ * @PowerStateChange: Number of active state power
+ * transitions (reported by driver)
+ * @FramesDropped: Number of frames dropped. (reported by DIL)
+ * @FramesCaptured: Number of frames captured. (reported by DIL)
+ * @FramesRepeated: Number of frames repeated. (reported by DIL)
+ * @InputCount: Times compressed video has been sent to the HW.
+ * i.e. Successful DtsProcInput() calls (reported by DIL)
+ * @InputTotalSize: Amount of compressed video that has been sent to the HW.
+ * (reported by DIL)
+ * @InputBusyCount: Times compressed video has attempted to be sent to the HW
+ * but the input FIFO was full. (reported by DIL)
+ * @PIBMissCount: Amount of times a PIB is invalid. (reported by DIL)
+ * @cpbEmptySize: supported only for H.264, specifically changed for
+ * Adobe. Report size of CPB buffer available. (reported by DIL)
+ * @NextTimeStamp: TimeStamp of the next picture that will be returned
+ * by a call to ProcOutput. Added for Adobe. Reported
+ * back from the driver
+ */
struct BC_DTS_STATUS {
- uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */
- uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */
- uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */
+ uint8_t ReadyListCount;
+ uint8_t FreeListCount;
+ uint8_t PowerStateChange;
uint8_t reserved_[1];
-
- uint32_t FramesDropped; /* Number of frames dropped. (reported by DIL) */
- uint32_t FramesCaptured; /* Number of frames captured. (reported by DIL) */
- uint32_t FramesRepeated; /* Number of frames repeated. (reported by DIL) */
-
- uint32_t InputCount; /* Times compressed video has been sent to the HW.
- * i.e. Successful DtsProcInput() calls (reported by DIL) */
- uint64_t InputTotalSize; /* Amount of compressed video that has been sent to the HW.
- * (reported by DIL) */
- uint32_t InputBusyCount; /* Times compressed video has attempted to be sent to the HW
- * but the input FIFO was full. (reported by DIL) */
-
- uint32_t PIBMissCount; /* Amount of times a PIB is invalid. (reported by DIL) */
-
- uint32_t cpbEmptySize; /* supported only for H.264, specifically changed for
- * Adobe. Report size of CPB buffer available.
- * Reported by DIL */
- uint64_t NextTimeStamp; /* TimeStamp of the next picture that will be returned
- * by a call to ProcOutput. Added for Adobe. Reported
- * back from the driver */
+ uint32_t FramesDropped;
+ uint32_t FramesCaptured;
+ uint32_t FramesRepeated;
+ uint32_t InputCount;
+ uint64_t InputTotalSize;
+ uint32_t InputBusyCount;
+ uint32_t PIBMissCount;
+ uint32_t cpbEmptySize;
+ uint64_t NextTimeStamp;
uint8_t reserved__[16];
-
};
#define BC_SWAP32(_v) \
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index 5cb3afda011..e06da4a6f6f 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet.h,v 1.3 2005/09/28 00:10:07 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_H_
#define _INC_COMET_H_
@@ -23,27 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:07 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet.h,v $
- * Revision 1.3 2005/09/28 00:10:07 rickd
- * Add RCS header. Switch to structure usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined(__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-
#define VINT32 volatile u_int32_t
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
index db1293c71a6..84931117da3 100644
--- a/drivers/staging/cxt1e1/comet_tables.c
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.c,v 1.2 2005/10/17 23:55:27 rickd PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* comet_tables.c - waveform tables for the PM4351 'COMET'
*
@@ -20,28 +16,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:27 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.c,v $
- * Revision 1.2 2005/10/17 23:55:27 rickd
- * Note that 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.1 2005/09/28 00:10:05 rickd
- * Cosmetic alignment of tables for readability.
- *
- * Revision 1.0 2005/05/10 22:47:53 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_comet_tblc[] =
- "@(#)comet_tables.c - $Revision: 1.2 $ (c) Copyright 2004-2005 SBE, Inc.";
-
-
#include <linux/types.h>
/*****************************************************************************
diff --git a/drivers/staging/cxt1e1/comet_tables.h b/drivers/staging/cxt1e1/comet_tables.h
index 80424a26a16..3e2e5badf78 100644
--- a/drivers/staging/cxt1e1/comet_tables.h
+++ b/drivers/staging/cxt1e1/comet_tables.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.h,v 1.5 2006/01/02 22:37:31 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_TBLS_H_
#define _INC_COMET_TBLS_H_
@@ -23,26 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.5 $
- * Last changed on $Date: 2006/01/02 22:37:31 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.h,v $
- * Revision 1.5 2006/01/02 22:37:31 rickd
- * Double indexed arrays need sizings to avoid CC errors under
- * gcc 4.0.0
- *
- * Revision 1.4 2005/10/17 23:55:28 rickd
- * The 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU License info. Structures moved to -C- file.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 5c99646cd10..4254c0426db 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -1,7 +1,3 @@
-/*
- * $Id: libsbew.h,v 2.1 2005/10/27 18:54:19 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_LIBSBEW_H_
#define _INC_LIBSBEW_H_
@@ -25,32 +21,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2005/10/27 18:54:19 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: libsbew.h,v $
- * Revision 2.1 2005/10/27 18:54:19 rickd
- * Add E1PLAIN support.
- *
- * Revision 2.0 2005/09/28 00:10:08 rickd
- * Customized for PMCC4 comet-per-port design.
- *
- * Revision 1.15 2005/03/29 00:51:31 rickd
- * File imported from C1T3 port, Revision 1.15
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/** set driver logging level **/
/********************************/
@@ -323,7 +295,7 @@ struct sbecom_port_param
#define CFG_CH_DINV_TX 0x02
-/* Posssible resettable chipsets/functions */
+/* Possible resettable chipsets/functions */
#define RESET_DEV_TEMUX 1
#define RESET_DEV_TECT3 RESET_DEV_TEMUX
#define RESET_DEV_PLL 2
@@ -574,8 +546,4 @@ struct sbecom_port_param
extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
#endif
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 5cc3423a664..90c0f1e30f6 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.c,v 2.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
unsigned int max_intcnt = 0;
unsigned int max_bh = 0;
@@ -24,53 +20,8 @@ unsigned int max_bh = 0;
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.c,v $
- * Revision 2.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 2.0 2007/08/15 22:13:20 rickd
- * Update to printf pointer %p usage and correct some UINT to ULONG for
- * 64bit comptibility.
- *
- * Revision 1.7 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 1.6 2005/10/27 18:54:19 rickd
- * Clean out old code. Default to HDLC_FCS16, not TRANS.
- *
- * Revision 1.5 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- *
- * Revision 1.4 2005/10/13 20:35:25 rickd
- * Cleanup warning for unused <flags> variable.
- *
- * Revision 1.3 2005/10/13 19:19:22 rickd
- * Disable redundant driver removal cleanup code.
- *
- * Revision 1.2 2005/10/11 18:36:16 rickd
- * Clean up warning messages caused by de-implemented some <flags> associated
- * with spin_lock() removals.
- *
- * Revision 1.1 2005/10/05 00:45:28 rickd
- * Re-enable xmit on flow-controlled and full channel to fix restart hang.
- * Add some temp spin-lock debug code (rld_spin_owner).
- *
- * Revision 1.0 2005/09/28 00:10:06 rickd
- * Initial release for C4T1E1 support. Lots of transparent
- * mode updates.
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_musyccc[] =
-"@(#)musycc.c - $Revision: 2.1 $ (c) Copyright 2004-2006 SBE, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
diff --git a/drivers/staging/cxt1e1/musycc.h b/drivers/staging/cxt1e1/musycc.h
index 68f3660f447..cf6b54e1568 100644
--- a/drivers/staging/cxt1e1/musycc.h
+++ b/drivers/staging/cxt1e1/musycc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.h,v 1.3 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_MUSYCC_H_
#define _INC_MUSYCC_H_
@@ -24,36 +20,13 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.h,v $
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Add PMCC4 PCI/DevIDs. Implement new
- * musycc reg&bits namings. Use PORTMAP_0 GCD grouping.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#define VINT8 volatile u_int8_t
#define VINT32 volatile u_int32_t
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
#include "pmcc4_defs.h"
@@ -448,10 +421,6 @@ extern "C"
/* This must be defined on an entire channel group (Port) basis */
#define SUERM_THRESHOLD 0x1f
-#ifdef __cplusplus
-}
-#endif
-
#undef VINT32
#undef VINT8
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
index a56029866c2..f17a902e95e 100644
--- a/drivers/staging/cxt1e1/ossiRelease.c
+++ b/drivers/staging/cxt1e1/ossiRelease.c
@@ -1,7 +1,3 @@
-/*
- * $Id: ossiRelease.c,v 1.2 2008/05/08 20:14:03 rdobbs PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* ossiRelease.c -
*
@@ -26,14 +22,8 @@
* One Stop Systems, Inc. Escondido, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2008/05/08 20:14:03 $
- * Changed by $Author: rdobbs $
- *-----------------------------------------------------------------------------
*/
-
char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.h b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
index c3ada87efd2..96c48cb8326 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.h
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmc93x6_eeprom.h,v 1.1 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMC93X6_EEPROM_H_
#define _INC_PMC93X6_EEPROM_H_
@@ -23,26 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- *-----------------------------------------------------------------------------
- * $Log: pmc93x6_eeprom.h,v $
- * Revision 1.1 2005/09/28 00:10:08 rickd
- * pmc_verify_cksum return value is char.
- *
- * Revision 1.0 2005/05/04 17:20:51 rickd
- * Initial revision
- *
- * Revision 1.0 2005/04/22 23:48:48 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#ifdef __KERNEL__
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index e046b87763a..b0ed4ad1301 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4.h,v 1.4 2005/11/01 19:24:48 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_H_
#define _INC_PMCC4_H_
@@ -23,49 +19,15 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.4 $
- * Last changed on $Date: 2005/11/01 19:24:48 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4.h,v $
- * Revision 1.4 2005/11/01 19:24:48 rickd
- * Remove de-implement function prototypes. Several <int> to
- * <status_t> changes for consistent usage of same.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Use config params from libsbew.h
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-
typedef int status_t;
#define SBE_DRVR_FAIL 0
#define SBE_DRVR_SUCCESS 1
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
/********************/
/* PMCC4 memory Map */
/********************/
@@ -105,10 +67,6 @@ extern "C"
#define sbeE1errSMF 0x02
#define sbeE1CRC 0x01
-#ifdef __cplusplus
-}
-#endif
-
#ifdef __KERNEL__
/*
diff --git a/drivers/staging/cxt1e1/pmcc4_cpld.h b/drivers/staging/cxt1e1/pmcc4_cpld.h
index 6d8f0337aa3..a51209bc527 100644
--- a/drivers/staging/cxt1e1/pmcc4_cpld.h
+++ b/drivers/staging/cxt1e1/pmcc4_cpld.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_cpld.h,v 1.0 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_CPLD_H_
#define _INC_PMCC4_CPLD_H_
@@ -23,34 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_cpld.h,v $
- * Revision 1.0 2005/09/28 00:10:08 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/* iSPLD control chip registers */
@@ -117,8 +88,4 @@ extern "C"
#define PMCC4_CPLD_INTR_CMT_3 0x04
#define PMCC4_CPLD_INTR_CMT_4 0x08
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _INC_PMCC4_CPLD_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_defs.h b/drivers/staging/cxt1e1/pmcc4_defs.h
index a39505f45c2..83ceae4324b 100644
--- a/drivers/staging/cxt1e1/pmcc4_defs.h
+++ b/drivers/staging/cxt1e1/pmcc4_defs.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_defs.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_DEFS_H_
#define _INC_PMCC4_DEFS_H_
@@ -25,16 +21,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_defs.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index e1f07fabd22..8a7b3a64645 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -1,8 +1,3 @@
-/*
- * $Id: pmcc4_drv.c,v 3.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
-
/*-----------------------------------------------------------------------------
* pmcc4_drv.c -
*
@@ -22,74 +17,10 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 3.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_drv.c,v $
- * Revision 3.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 3.0 2007/08/15 22:19:55 rickd
- * Correct sizeof() castings and pi->regram to support 64bit compatibility.
- *
- * Revision 2.10 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 2.9 2005/11/01 19:22:49 rickd
- * Add sanity checks against max_port for ioctl functions.
- *
- * Revision 2.8 2005/10/27 18:59:25 rickd
- * Code cleanup. Default channel config to HDLC_FCS16.
- *
- * Revision 2.7 2005/10/18 18:16:30 rickd
- * Further NCOMM code repairs - (1) interrupt matrix usage inconsistent
- * for indexing into nciInterrupt[][], code missing double parameters.
- * (2) check input of ncomm interrupt registration cardID for correct
- * boundary values.
- *
- * Revision 2.6 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- * Corrected NCOMMs wanpmcC4T1E1_getBaseAddress() to correctly handle
- * multiple boards.
- *
- * Revision 2.5 2005/10/13 23:01:28 rickd
- * Correct panic for illegal address reference w/in get_brdinfo on
- * first_if/last_if name acquistion under Linux 2.6
- *
- * Revision 2.4 2005/10/13 21:20:19 rickd
- * Correction of c4_cleanup() wherein next should be acquired before
- * ci_t structure is free'd.
- *
- * Revision 2.3 2005/10/13 19:20:10 rickd
- * Correct driver removal cleanup code for multiple boards.
- *
- * Revision 2.2 2005/10/11 18:34:04 rickd
- * New routine added to determine number of ports (comets) on board.
- *
- * Revision 2.1 2005/10/05 00:48:13 rickd
- * Add some RX activation trace code.
- *
- * Revision 2.0 2005/09/28 00:10:06 rickd
- * Implement 2.6 workqueue for TX/RX restart. Correction to
- * hardware register boundary checks allows expanded access of MUSYCC.
- * Implement new musycc reg&bits namings.
- *
- *-----------------------------------------------------------------------------
*/
-char OSSIid_pmcc4_drvc[] =
-"@(#)pmcc4_drv.c - $Revision: 3.1 $ (c) Copyright 2002-2007 One Stop Systems, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/errno.h>
-#else
#include <linux/types.h>
#include "pmcc4_sysdep.h"
#include <linux/errno.h>
@@ -98,7 +29,6 @@ char OSSIid_pmcc4_drvc[] =
#include <linux/timer.h> /* include for timer */
#include <linux/hdlc.h>
#include <asm/io.h>
-#endif
#include "sbecom_inline_linux.h"
#include "libsbew.h"
diff --git a/drivers/staging/cxt1e1/pmcc4_ioctls.h b/drivers/staging/cxt1e1/pmcc4_ioctls.h
index 6b8d65673c7..56a1ee39be1 100644
--- a/drivers/staging/cxt1e1/pmcc4_ioctls.h
+++ b/drivers/staging/cxt1e1/pmcc4_ioctls.h
@@ -1,6 +1,3 @@
-/* RCSid: $Header: /home/rickd/projects/pmcc4/include/pmcc4_ioctls.h,v 2.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_IOCTLS_H_
#define _INC_PMCC4_IOCTLS_H_
@@ -22,19 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_ioctls.h,v $
- * Revision 2.0 2005/09/28 00:10:09 rickd
- * Add GNU license info. Switch Ioctls to sbe_ioc.h usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
#include "sbew_ioc.h"
diff --git a/drivers/staging/cxt1e1/sbe_bid.h b/drivers/staging/cxt1e1/sbe_bid.h
index 1f49b4061fb..abc2e55f62f 100644
--- a/drivers/staging/cxt1e1/sbe_bid.h
+++ b/drivers/staging/cxt1e1/sbe_bid.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_bid.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEBID_H_
#define _INC_SBEBID_H_
@@ -24,16 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_bid.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
#define SBE_BID_REG 0x00000000 /* Board ID Register */
diff --git a/drivers/staging/cxt1e1/sbe_promformat.h b/drivers/staging/cxt1e1/sbe_promformat.h
index 746f81b15c7..aad411d185f 100644
--- a/drivers/staging/cxt1e1/sbe_promformat.h
+++ b/drivers/staging/cxt1e1/sbe_promformat.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_promformat.h,v 2.2 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBE_PROMFORMAT_H_
#define _INC_SBE_PROMFORMAT_H_
@@ -24,19 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.2 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_promformat.h,v $
- * Revision 2.2 2005/09/28 00:10:09 rickd
- * Add EEPROM sample from C4T1E1 board.
- *
- * Revision 2.1 2005/05/04 17:18:24 rickd
- * Initial CI.
- *
- *-----------------------------------------------------------------------------
*/
@@ -85,12 +68,6 @@
*
*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
#define STRUCT_OFFSET(type, symbol) ((long)&(((type *)0)->symbol))
/*------------------------------------------------------------------------
@@ -150,8 +127,4 @@ extern "C"
FLD_TYPE2 fldType2;
} PROMFORMAT;
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBE_PROMFORMAT_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 9ea2c0c2061..68ed445ab0c 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbecom_inline_linux.h,v 1.2 2007/08/15 22:51:35 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBECOM_INLNX_H_
#define _INC_SBECOM_INLNX_H_
@@ -24,22 +20,6 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2007/08/15 22:51:35 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbecom_inline_linux.h,v $
- * Revision 1.2 2007/08/15 22:51:35 rickd
- * Remove duplicate version.h entry.
- *
- * Revision 1.1 2007/08/15 22:50:29 rickd
- * Update linux/config for 2.6.18 and later.
- *
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
index 4aa53f44ec0..e82be6afd1e 100644
--- a/drivers/staging/cxt1e1/sbeproc.h
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbeproc.h,v 1.2 2005/10/17 23:55:28 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEPROC_H_
#define _INC_SBEPROC_H_
@@ -23,22 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:28 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbeproc.h,v $
- * Revision 1.2 2005/10/17 23:55:28 rickd
- * sbecom_proc_brd_init() is an declared an __init function.
- *
- * Revision 1.1 2005/09/28 00:10:09 rickd
- * Remove unneeded inclusion of c4_private.h.
- *
- * Revision 1.0 2005/05/10 22:21:46 rickd
- * Initial check-in.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbew_ioc.h b/drivers/staging/cxt1e1/sbew_ioc.h
index 14d371904d1..ce9b15c7189 100644
--- a/drivers/staging/cxt1e1/sbew_ioc.h
+++ b/drivers/staging/cxt1e1/sbew_ioc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbew_ioc.h,v 1.0 2005/09/28 00:10:10 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEWIOC_H_
#define _INC_SBEWIOC_H_
@@ -24,55 +20,9 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:10 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbew_ioc.h,v $
- * Revision 1.0 2005/09/28 00:10:10 rickd
- * Initial revision
- *
- * Revision 1.6 2005/01/11 18:41:01 rickd
- * Add BRDADDR_GET Ioctl.
- *
- * Revision 1.5 2004/09/16 18:55:59 rickd
- * Start setting up for generic framer configuration Ioctl by switch
- * from tect3_framer_param[] to sbecom_framer_param[].
- *
- * Revision 1.4 2004/06/28 17:58:15 rickd
- * Rename IOC_TSMAP_[GS] to IOC_TSIOC_[GS] to support need for
- * multiple formats of data when setting up TimeSlots.
- *
- * Revision 1.3 2004/06/22 21:18:13 rickd
- * read_vec now() ONLY handles a single common wrt_vec array.
- *
- * Revision 1.1 2004/06/10 18:11:34 rickd
- * Add IID_GET Ioctl reference.
- *
- * Revision 1.0 2004/06/08 22:59:38 rickd
- * Initial revision
- *
- * Revision 2.0 2004/06/07 17:49:47 rickd
- * Initial library release following merge of wanc1t3/wan256 into
- * common elements for lib.
- *
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-#ifdef SunOS
-#include <sys/ioccom.h>
-#else
#include <linux/ioctl.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
#define SBE_LOCKFILE "/tmp/.sbewan.LCK"
@@ -128,9 +78,4 @@ extern "C"
#define SBE_IOC_MAXVEC 1
-
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBEWIOC_H_ ***/
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 0c1c6ca8c37..2c4069fcd98 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -155,7 +155,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
/* Some offsets in PCI config space that are actually used. */
-#define ET1310_PCI_MAX_PYLD 0x4C
#define ET1310_PCI_MAC_ADDRESS 0xA4
#define ET1310_PCI_EEPROM_STATUS 0xB2
#define ET1310_PCI_ACK_NACK 0xC0
@@ -178,9 +177,7 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
/* RX defines */
#define USE_FBR0 1
-
#define FBR_CHUNKS 32
-
#define MAX_DESC_PER_RING_RX 1024
/* number of RFDs - default and min */
@@ -195,7 +192,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#endif
#define NIC_MIN_NUM_RFD 64
-
#define NUM_PACKETS_HANDLED 256
#define ALCATEL_MULTICAST_PKT 0x01000000
@@ -303,8 +299,8 @@ struct fbr_lookup {
dma_addr_t ring_physaddr;
void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- uint64_t real_physaddr;
- uint64_t offset;
+ u64 real_physaddr;
+ u64 offset;
u32 local_full;
u32 num_entries;
u32 buffsize;
@@ -427,7 +423,6 @@ struct tx_ring {
int since_irq;
};
-/* ADAPTER defines */
/*
* Do not change these values: if changed, then change also in respective
* TXdma and Rxdma engines
@@ -576,8 +571,6 @@ struct et131x_adapter {
struct net_device_stats net_stats;
};
-/* EEPROM functions */
-
static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
{
u32 reg;
@@ -795,7 +788,7 @@ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
}
-int et131x_init_eeprom(struct et131x_adapter *adapter)
+static int et131x_init_eeprom(struct et131x_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u8 eestatus;
@@ -868,7 +861,7 @@ int et131x_init_eeprom(struct et131x_adapter *adapter)
* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
u32 csr = 0x2000; /* FBR1 enable */
@@ -906,7 +899,7 @@ void et131x_rx_dma_enable(struct et131x_adapter *adapter)
* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
{
u32 csr;
/* Setup the receive dma configuration register */
@@ -928,7 +921,7 @@ void et131x_rx_dma_disable(struct et131x_adapter *adapter)
*
* Mainly used after a return to the D0 (full-power) state from a lower state.
*/
-void et131x_tx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the transmit dma configuration register for normal
* operation
@@ -948,23 +941,10 @@ static inline void add_12bit(u32 *v, int n)
}
/**
- * nic_rx_pkts - Checks the hardware for available packets
- * @adapter: pointer to our adapter
- *
- * Returns rfd, a pointer to our MPRFD.
- *
- * Checks the hardware for available packets, using completion ring
- * If packets are available, it gets an RFD from the recv_list, attaches
- * the packet to it, puts the RFD in the RecvPendList, and also returns
- * the pointer to the RFD.
- */
-/* MAC functions */
-
-/**
* et1310_config_mac_regs1 - Initialize the first part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs1(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
{
struct mac_regs __iomem *macregs = &adapter->regs->mac;
u32 station1;
@@ -1024,7 +1004,7 @@ void et1310_config_mac_regs1(struct et131x_adapter *adapter)
* et1310_config_mac_regs2 - Initialize the second part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs2(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1105,7 +1085,7 @@ void et1310_config_mac_regs2(struct et131x_adapter *adapter)
*
* Returns 0 if the device is not in phy coma, 1 if it is in phy coma
*/
-int et1310_in_phy_coma(struct et131x_adapter *adapter)
+static int et1310_in_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -1114,15 +1094,13 @@ int et1310_in_phy_coma(struct et131x_adapter *adapter)
return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
}
-void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- uint32_t nIndex;
- uint32_t result;
- uint32_t hash1 = 0;
- uint32_t hash2 = 0;
- uint32_t hash3 = 0;
- uint32_t hash4 = 0;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ u32 hash3 = 0;
+ u32 hash4 = 0;
u32 pm_csr;
/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
@@ -1131,10 +1109,13 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
* driver.
*/
if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
+ int i;
+
/* Loop through our multicast array and set up the device */
- for (nIndex = 0; nIndex < adapter->multicast_addr_count;
- nIndex++) {
- result = ether_crc(6, adapter->multicast_list[nIndex]);
+ for (i = 0; i < adapter->multicast_addr_count; i++) {
+ u32 result;
+
+ result = ether_crc(6, adapter->multicast_list[i]);
result = (result & 0x3F800000) >> 23;
@@ -1163,7 +1144,7 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
}
}
-void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
u32 uni_pf1;
@@ -1203,7 +1184,7 @@ void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
}
}
-void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
struct phy_device *phydev = adapter->phydev;
@@ -1334,7 +1315,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0x9, &rxmac->ctrl);
}
-void et1310_config_txmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
{
struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
@@ -1348,7 +1329,7 @@ void et1310_config_txmac_regs(struct et131x_adapter *adapter)
writel(0x40, &txmac->cf_param);
}
-void et1310_config_macstat_regs(struct et131x_adapter *adapter)
+static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
{
struct macstat_regs __iomem *macstat =
&adapter->regs->macstat;
@@ -1422,7 +1403,7 @@ void et1310_config_macstat_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
+static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
u8 reg, u16 *value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1478,7 +1459,7 @@ int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
return status;
}
-int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
+static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
struct phy_device *phydev = adapter->phydev;
@@ -1498,7 +1479,7 @@ int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
*
* Return 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
+static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
struct phy_device *phydev = adapter->phydev;
@@ -1564,8 +1545,9 @@ int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
}
/* Still used from _mac for BIT_READ */
-void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
- u16 regnum, u16 bitnum, u8 *value)
+static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
+ u16 action, u16 regnum, u16 bitnum,
+ u8 *value)
{
u16 reg;
u16 mask = 0x0001 << bitnum;
@@ -1591,7 +1573,7 @@ void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
}
}
-void et1310_config_flow_control(struct et131x_adapter *adapter)
+static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -1632,7 +1614,7 @@ void et1310_config_flow_control(struct et131x_adapter *adapter)
* et1310_update_macstat_host_counters - Update the local copy of the statistics
* @adapter: pointer to the adapter structure
*/
-void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
+static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
{
struct ce_stats *stats = &adapter->stats;
struct macstat_regs __iomem *macstat =
@@ -1664,7 +1646,7 @@ void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
* the statistics held in the adapter structure, checking the "wrap"
* bit for each counter.
*/
-void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
+static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
{
u32 carry_reg1;
u32 carry_reg2;
@@ -1714,9 +1696,7 @@ void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
}
-/* PHY functions */
-
-int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
+static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1731,7 +1711,7 @@ int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
return value;
}
-int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
+static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1739,7 +1719,7 @@ int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
return et131x_mii_write(adapter, reg, value);
}
-int et131x_mdio_reset(struct mii_bus *bus)
+static int et131x_mdio_reset(struct mii_bus *bus)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1759,7 +1739,7 @@ int et131x_mdio_reset(struct mii_bus *bus)
* Can't you see that this code processed
* Phy power, phy power..
*/
-void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
{
u16 data;
@@ -1775,7 +1755,7 @@ void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
* @adapter: pointer to our private adapter structure
*
*/
-void et131x_xcvr_init(struct et131x_adapter *adapter)
+static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 imr;
u16 isr;
@@ -1822,7 +1802,7 @@ void et131x_xcvr_init(struct et131x_adapter *adapter)
*
* Used to configure the global registers on the JAGCore
*/
-void et131x_configure_global_regs(struct et131x_adapter *adapter)
+static void et131x_configure_global_regs(struct et131x_adapter *adapter)
{
struct global_regs __iomem *regs = &adapter->regs->global;
@@ -1863,13 +1843,11 @@ void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/* PM functions */
-
/**
* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
* @adapter: pointer to our adapter structure
*/
-void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
{
struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -1987,7 +1965,7 @@ void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
*/
-void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
{
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
@@ -2017,7 +1995,7 @@ void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-void et131x_adapter_setup(struct et131x_adapter *adapter)
+static void et131x_adapter_setup(struct et131x_adapter *adapter)
{
/* Configure the JAGCore */
et131x_configure_global_regs(adapter);
@@ -2044,7 +2022,7 @@ void et131x_adapter_setup(struct et131x_adapter *adapter)
* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_soft_reset(struct et131x_adapter *adapter)
+static void et131x_soft_reset(struct et131x_adapter *adapter)
{
/* Disable MAC Core */
writel(0xc00f0000, &adapter->regs->mac.cfg1);
@@ -2062,7 +2040,7 @@ void et131x_soft_reset(struct et131x_adapter *adapter)
* Enable the appropriate interrupts on the ET131x according to our
* configuration
*/
-void et131x_enable_interrupts(struct et131x_adapter *adapter)
+static void et131x_enable_interrupts(struct et131x_adapter *adapter)
{
u32 mask;
@@ -2082,7 +2060,7 @@ void et131x_enable_interrupts(struct et131x_adapter *adapter)
*
* Block all interrupts from the et131x device at the device itself
*/
-void et131x_disable_interrupts(struct et131x_adapter *adapter)
+static void et131x_disable_interrupts(struct et131x_adapter *adapter)
{
/* Disable all global interrupts */
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
@@ -2092,7 +2070,7 @@ void et131x_disable_interrupts(struct et131x_adapter *adapter)
* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_tx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
@@ -2103,7 +2081,7 @@ void et131x_tx_dma_disable(struct et131x_adapter *adapter)
* et131x_enable_txrx - Enable tx/rx queues
* @netdev: device to be enabled
*/
-void et131x_enable_txrx(struct net_device *netdev)
+static void et131x_enable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2123,7 +2101,7 @@ void et131x_enable_txrx(struct net_device *netdev)
* et131x_disable_txrx - Disable tx/rx queues
* @netdev: device to be disabled
*/
-void et131x_disable_txrx(struct net_device *netdev)
+static void et131x_disable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2142,7 +2120,7 @@ void et131x_disable_txrx(struct net_device *netdev)
* et131x_init_send - Initialize send data structures
* @adapter: pointer to our private adapter structure
*/
-void et131x_init_send(struct et131x_adapter *adapter)
+static void et131x_init_send(struct et131x_adapter *adapter)
{
struct tcb *tcb;
u32 ct;
@@ -2192,7 +2170,7 @@ void et131x_init_send(struct et131x_adapter *adapter)
* indicating linkup status, call the MPDisablePhyComa routine to
* restore JAGCore and gigE PHY
*/
-void et1310_enable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 pmcsr;
@@ -2231,7 +2209,7 @@ void et1310_enable_phy_coma(struct et131x_adapter *adapter)
* et1310_disable_phy_coma - Disable the Phy Coma Mode
* @adapter: pointer to our adapter structure
*/
-void et1310_disable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -2269,8 +2247,6 @@ void et1310_disable_phy_coma(struct et131x_adapter *adapter)
et131x_enable_txrx(adapter->netdev);
}
-/* RX functions */
-
static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
{
u32 tmp_free_buff_ring = *free_buff_ring;
@@ -2296,16 +2272,14 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
* @offset: pointer to the offset variable
* @mask: correct mask
*/
-void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- uint64_t *phys_addr,
- uint64_t *offset, uint64_t mask)
+static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
+ u64 *phys_addr, u64 *offset,
+ u64 mask)
{
- uint64_t new_addr;
+ u64 new_addr = *phys_addr & ~mask;
*offset = 0;
- new_addr = *phys_addr & ~mask;
-
if (new_addr != *phys_addr) {
/* Move to next aligned block */
new_addr += mask + 1;
@@ -2325,7 +2299,7 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter,
* Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
* and the Packet Status Ring.
*/
-int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
{
u32 i, j;
u32 bufsize;
@@ -2454,8 +2428,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->fbr[1]->offset);
#endif
for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr1_offset;
u64 fbr1_tmp_physaddr;
+ u64 fbr1_offset;
u32 fbr1_align;
/* This code allocates an area of memory big enough for N
@@ -2520,8 +2494,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
#ifdef USE_FBR0
/* Same for FBR0 (if in use) */
for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr0_offset;
u64 fbr0_tmp_physaddr;
+ u64 fbr0_offset;
fbr_chunksize =
((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
@@ -2629,7 +2603,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
* et131x_rx_dma_memory_free - Free all memory allocated within this module.
* @adapter: pointer to our private adapter structure
*/
-void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
u32 index;
u32 bufsize;
@@ -2774,7 +2748,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h)
*/
-int et131x_init_recv(struct et131x_adapter *adapter)
+static int et131x_init_recv(struct et131x_adapter *adapter)
{
int status = -ENOMEM;
struct rfd *rfd = NULL;
@@ -2824,7 +2798,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
* @adapter: pointer to our adapter structure
*/
-void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
+static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -2918,6 +2892,17 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
}
+/**
+ * nic_rx_pkts - Checks the hardware for available packets
+ * @adapter: pointer to our adapter
+ *
+ * Returns rfd, a pointer to our MPRFD.
+ *
+ * Checks the hardware for available packets, using completion ring
+ * If packets are available, it gets an RFD from the recv_list, attaches
+ * the packet to it, puts the RFD in the RecvPendList, and also returns
+ * the pointer to the RFD.
+ */
static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
{
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -3139,7 +3124,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
*
* Assumption, Rcv spinlock has been acquired.
*/
-void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
{
struct rfd *rfd = NULL;
u32 count = 0;
@@ -3188,8 +3173,6 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->rx_ring.unfinished_receives = false;
}
-/* TX functions */
-
/**
* et131x_tx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
@@ -3202,7 +3185,7 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
* memory. The device will update the "status" in memory each time it xmits a
* packet.
*/
-int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
{
int desc_size = 0;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3256,7 +3239,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h).
*/
-void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
@@ -3578,7 +3561,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only
*/
-int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
+static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -3696,7 +3679,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
+static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
{
struct tcb *tcb;
unsigned long flags;
@@ -3743,7 +3726,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 serviced;
@@ -3798,8 +3781,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/* ETHTOOL functions */
-
static int et131x_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
@@ -3965,18 +3946,16 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-void et131x_set_ethtool_ops(struct net_device *netdev)
+static void et131x_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
}
-/* PCI functions */
-
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_hwaddr_init(struct et131x_adapter *adapter)
+static void et131x_hwaddr_init(struct et131x_adapter *adapter)
{
/* If have our default mac from init and no mac address from
* EEPROM then we need to generate the last octet and set it on the
@@ -4022,24 +4001,31 @@ void et131x_hwaddr_init(struct et131x_adapter *adapter)
static int et131x_pci_init(struct et131x_adapter *adapter,
struct pci_dev *pdev)
{
- int i;
- u8 max_payload;
- u8 read_size_reg;
+ int cap = pci_pcie_cap(pdev);
+ u16 max_payload;
+ u16 ctl;
+ int i, rc;
- if (et131x_init_eeprom(adapter) < 0)
- return -EIO;
+ rc = et131x_init_eeprom(adapter);
+ if (rc < 0)
+ goto out;
+ if (!cap) {
+ dev_err(&pdev->dev, "Missing PCIe capabilities\n");
+ goto err_out;
+ }
+
/* Let's set up the PORT LOGIC Register. First we need to know what
* the max_payload_size is
*/
- if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max Payload Size\n");
- return -EIO;
+ goto err_out;
}
/* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07; /* Only the lower 3 bits are valid */
+ max_payload &= 0x07;
if (max_payload < 2) {
static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -4049,13 +4035,13 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
acknak[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for ACK/NAK\n");
- return -EIO;
+ goto err_out;
}
if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
replay[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for Replay Timer\n");
- return -EIO;
+ goto err_out;
}
}
@@ -4065,23 +4051,22 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Latency Timers\n");
- return -EIO;
+ goto err_out;
}
/* Change the max read size to 2k */
- if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
- read_size_reg &= 0x8f;
- read_size_reg |= 0x40;
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12);
- if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
+ if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
/* Get MAC address from config space if an eeprom exists, otherwise
@@ -4096,11 +4081,15 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
adapter->rom_addr + i)) {
dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
- return -EIO;
+ goto err_out;
}
}
memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
- return 0;
+out:
+ return rc;
+err_out:
+ rc = -EIO;
+ goto out;
}
/**
@@ -4110,7 +4099,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *) data;
struct phy_device *phydev = adapter->phydev;
@@ -4153,7 +4142,7 @@ void et131x_error_timer_handler(unsigned long data)
*
* Allocate all the memory blocks for send, receive and others.
*/
-int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
{
int status;
@@ -4188,7 +4177,7 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
* @adapter: pointer to our private adapter structure
*/
-void et131x_adapter_memory_free(struct et131x_adapter *adapter)
+static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
/* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
@@ -4364,10 +4353,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->pdev = pci_dev_get(pdev);
adapter->netdev = netdev;
- /* Do the same for the netdev struct */
- netdev->irq = pdev->irq;
- netdev->base_addr = pci_resource_start(pdev, 0);
-
/* Initialize spinlocks here */
spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->tcb_send_qlock);
@@ -4400,6 +4385,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
+ phy_disconnect(adapter->phydev);
mdiobus_unregister(adapter->mii_bus);
kfree(adapter->mii_bus->irq);
mdiobus_free(adapter->mii_bus);
@@ -4417,7 +4403,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
* et131x_up - Bring up a device for use.
* @netdev: device to be opened
*/
-void et131x_up(struct net_device *netdev)
+static void et131x_up(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4429,7 +4415,7 @@ void et131x_up(struct net_device *netdev)
* et131x_down - Bring down the device
* @netdev: device to be broght down
*/
-void et131x_down(struct net_device *netdev)
+static void et131x_down(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4475,8 +4461,6 @@ static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
#define ET131X_PM_OPS NULL
#endif
-/* ISR functions */
-
/**
* et131x_isr - The Interrupt Service Routine for the driver.
* @irq: the IRQ on which the interrupt was received.
@@ -4573,7 +4557,7 @@ out:
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
*/
-void et131x_isr_handler(struct work_struct *work)
+static void et131x_isr_handler(struct work_struct *work)
{
struct et131x_adapter *adapter =
container_of(work, struct et131x_adapter, task);
@@ -4774,8 +4758,6 @@ void et131x_isr_handler(struct work_struct *work)
et131x_enable_interrupts(adapter);
}
-/* NETDEV functions */
-
/**
* et131x_stats - Return the current device statistics.
* @netdev: device whose stats are being queried
@@ -4829,10 +4811,12 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_open(struct net_device *netdev)
+static int et131x_open(struct net_device *netdev)
{
- int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int irq = pdev->irq;
+ int result;
/* Start the timer to track NIC errors */
init_timer(&adapter->error_timer);
@@ -4841,12 +4825,9 @@ int et131x_open(struct net_device *netdev)
adapter->error_timer.data = (unsigned long)adapter;
add_timer(&adapter->error_timer);
- /* Register our IRQ */
- result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
- netdev->name, netdev);
+ result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev);
if (result) {
- dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
- netdev->irq);
+ dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
return result;
}
@@ -4863,14 +4844,14 @@ int et131x_open(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_close(struct net_device *netdev)
+static int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
et131x_down(netdev);
adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
- free_irq(netdev->irq, netdev);
+ free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
return del_timer_sync(&adapter->error_timer);
@@ -4905,8 +4886,8 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
*/
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
+ int filter = adapter->packet_filter;
int status = 0;
- uint32_t filter = adapter->packet_filter;
u32 ctrl;
u32 pf_ctrl;
@@ -4968,7 +4949,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- uint32_t packet_filter = 0;
+ int packet_filter;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
@@ -5249,40 +5230,6 @@ static const struct net_device_ops et131x_netdev_ops = {
};
/**
- * et131x_device_alloc
- *
- * Returns pointer to the allocated and initialized net_device struct for
- * this device.
- *
- * Create instances of net_device and wl_private for the new adapter and
- * register the device's entry points in the net_device structure.
- */
-struct net_device *et131x_device_alloc(void)
-{
- struct net_device *netdev;
-
- /* Alloc net_device and adapter structs */
- netdev = alloc_etherdev(sizeof(struct et131x_adapter));
-
- if (!netdev) {
- printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
- return NULL;
- }
-
- /*
- * Setup the function registration table (and other data) for a
- * net_device
- */
- netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
- netdev->netdev_ops = &et131x_netdev_ops;
-
- /* Poll? */
- /* netdev->poll = &et131x_poll; */
- /* netdev->poll_controller = &et131x_poll_controller; */
- return netdev;
-}
-
-/**
* et131x_pci_setup - Perform device initialization
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
@@ -5297,24 +5244,26 @@ struct net_device *et131x_device_alloc(void)
static int __devinit et131x_pci_setup(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int result;
struct net_device *netdev;
struct et131x_adapter *adapter;
+ int rc;
int ii;
- result = pci_enable_device(pdev);
- if (result) {
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "pci_enable_device() failed\n");
- goto err_out;
+ goto out;
}
/* Perform some basic PCI checks */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Can't find PCI device's base address\n");
+ rc = -ENODEV;
goto err_disable;
}
- if (pci_request_regions(pdev, DRIVER_NAME)) {
+ rc = pci_request_regions(pdev, DRIVER_NAME);
+ if (rc < 0) {
dev_err(&pdev->dev, "Can't get PCI resources\n");
goto err_disable;
}
@@ -5323,46 +5272,50 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
/* Check the DMA addressing support of this device */
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 32 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- result = -EIO;
+ rc = -EIO;
goto err_release_res;
}
/* Allocate netdev and private adapter structs */
- netdev = et131x_device_alloc();
+ netdev = alloc_etherdev(sizeof(struct et131x_adapter));
if (!netdev) {
dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_release_res;
}
+ netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
+ netdev->netdev_ops = &et131x_netdev_ops;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
et131x_set_ethtool_ops(netdev);
adapter = et131x_adapter_init(netdev, pdev);
- /* Initialise the PCI setup for the device */
- et131x_pci_init(adapter, pdev);
+ rc = et131x_pci_init(adapter, pdev);
+ if (rc < 0)
+ goto err_free_dev;
/* Map the bus-relative registers to system virtual memory */
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "Cannot map device registers\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_free_dev;
}
@@ -5376,8 +5329,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
et131x_disable_interrupts(adapter);
/* Allocate DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result) {
+ rc = et131x_adapter_memory_alloc(adapter);
+ if (rc < 0) {
dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
goto err_iounmap;
}
@@ -5395,6 +5348,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
adapter->boot_coma = 0;
et1310_disable_phy_coma(adapter);
+ rc = -ENOMEM;
+
/* Setup the mii_bus struct */
adapter->mii_bus = mdiobus_alloc();
if (!adapter->mii_bus) {
@@ -5418,13 +5373,14 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
for (ii = 0; ii < PHY_MAX_ADDR; ii++)
adapter->mii_bus->irq[ii] = PHY_POLL;
- if (mdiobus_register(adapter->mii_bus)) {
+ rc = mdiobus_register(adapter->mii_bus);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to register MII bus\n");
- mdiobus_free(adapter->mii_bus);
goto err_mdio_free_irq;
}
- if (et131x_mii_probe(netdev)) {
+ rc = et131x_mii_probe(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
goto err_mdio_unregister;
}
@@ -5440,10 +5396,10 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
*/
/* Register the net_device struct with the Linux network layer */
- result = register_netdev(netdev);
- if (result != 0) {
+ rc = register_netdev(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "register_netdev() failed\n");
- goto err_mdio_unregister;
+ goto err_phy_disconnect;
}
/* Register the net_device struct with the PCI subsystem. Save a copy
@@ -5451,10 +5407,11 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
* been initialized, just in case it needs to be quickly restored.
*/
pci_set_drvdata(pdev, netdev);
- pci_save_state(adapter->pdev);
-
- return result;
+out:
+ return rc;
+err_phy_disconnect:
+ phy_disconnect(adapter->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free_irq:
@@ -5472,8 +5429,7 @@ err_release_res:
pci_release_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err_out:
- return result;
+ goto out;
}
static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 2babb034a25..d8efed65744 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -867,30 +867,4 @@ static struct usb_driver usb_alphatrack_driver = {
.id_table = usb_alphatrack_table,
};
-/**
- * usb_alphatrack_init
- */
-static int __init usb_alphatrack_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&usb_alphatrack_driver);
- if (retval)
- err("usb_register failed for the " __FILE__
- " driver. Error number %d\n", retval);
-
- return retval;
-}
-
-/**
- * usb_alphatrack_exit
- */
-static void __exit usb_alphatrack_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_alphatrack_driver);
-}
-
-module_init(usb_alphatrack_init);
-module_exit(usb_alphatrack_exit);
+module_usb_driver(usb_alphatrack_driver);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 8894ab14f16..cf47a5d191f 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -199,7 +199,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
unsigned long temp; \
- if (strict_strtoul(buf, 10, &temp)) \
+ if (kstrtoul(buf, 10, &temp)) \
return -EINVAL; \
t->value = temp; \
return count; \
@@ -971,29 +971,4 @@ static struct usb_driver usb_tranzport_driver = {
.id_table = usb_tranzport_table,
};
-/**
- * usb_tranzport_init
- */
-static int __init usb_tranzport_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&usb_tranzport_driver);
- if (retval)
- err("usb_register failed for the " __FILE__
- " driver. Error number %d\n", retval);
- return retval;
-}
-/**
- * usb_tranzport_exit
- */
-
-static void __exit usb_tranzport_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_tranzport_driver);
-}
-
-module_init(usb_tranzport_init);
-module_exit(usb_tranzport_exit);
+module_usb_driver(usb_tranzport_driver);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index b3d743a3d30..917bbb082a6 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -344,10 +344,10 @@ static void ft1000_reset_asic(struct net_device *dev)
}
mdelay(1);
if (info->AsicID == ELECTRABUZZ_ID) {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
} else {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
}
// clear interrupts
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index aaf44c35982..43b1d363107 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -601,7 +601,7 @@ static void ft1000_reset_asic(struct net_device *dev)
mdelay(1);
- /* set watermark to -1 in order to not generate an interrrupt */
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK);
/* clear interrupts */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 79482ac1c48..84c38d5c939 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -263,24 +263,4 @@ static struct usb_driver ft1000_usb_driver = {
.id_table = id_table,
};
-static int __init usb_ft1000_init(void)
-{
- int ret = 0;
-
- DEBUG("Initialize and register the driver\n");
-
- ret = usb_register(&ft1000_usb_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit usb_ft1000_exit(void)
-{
- DEBUG("Deregister the driver\n");
- usb_deregister(&ft1000_usb_driver);
-}
-
-module_init(usb_ft1000_init);
-module_exit(usb_ft1000_exit);
+module_usb_driver(ft1000_usb_driver);
diff --git a/drivers/staging/gma500/Kconfig b/drivers/staging/gma500/Kconfig
index bfe2166acda..c7a2b3bc0a1 100644
--- a/drivers/staging/gma500/Kconfig
+++ b/drivers/staging/gma500/Kconfig
@@ -1,6 +1,6 @@
config DRM_PSB
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86
+ depends on DRM && PCI && X86 && BROKEN
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
index 114b99a1ce1..b8f78ebbb14 100644
--- a/drivers/staging/gma500/accel_2d.c
+++ b/drivers/staging/gma500/accel_2d.c
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
return;
offset = psbfb->gtt->offset;
- stride = fb->pitch;
+ stride = fb->pitches[0];
switch (fb->depth) {
case 8:
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
index 7b97c600eff..c63a32776a9 100644
--- a/drivers/staging/gma500/cdv_intel_display.c
+++ b/drivers/staging/gma500/cdv_intel_display.c
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
if (ret < 0)
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
index 3f39a37456f..b00761cba14 100644
--- a/drivers/staging/gma500/framebuffer.c
+++ b/drivers/staging/gma500/framebuffer.c
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -273,14 +274,17 @@ static struct fb_ops psbfb_unaccel_ops = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct psb_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
+ u32 bpp, depth;
int ret;
- if (mode_cmd->pitch & 63)
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 8:
case 16:
case 24:
@@ -313,7 +317,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
struct psb_framebuffer *fb;
@@ -387,27 +391,28 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
int gtt_roll = 1;
+ u32 bpp, depth;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
+ bpp = sizes->surface_bpp;
/* No 24bit packed */
- if (mode_cmd.bpp == 24)
- mode_cmd.bpp = 32;
+ if (bpp == 24)
+ bpp = 32;
/* Acceleration via the GTT requires pitch to be 4096 byte aligned
(ie 1024 or 2048 pixels in normal use) */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096);
+ depth = sizes->surface_depth;
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
@@ -421,10 +426,10 @@ static int psbfb_create(struct psb_fbdev *fbdev,
gtt_roll = 0; /* Don't use GTT accelerated scrolling */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+ depth = sizes->surface_depth;
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page
@@ -443,6 +448,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
}
info->par = fbdev;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret)
goto out_unref;
@@ -504,7 +511,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
@@ -546,7 +553,7 @@ out_err1:
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
- struct drm_mode_fb_cmd *cmd)
+ struct drm_mode_fb_cmd2 *cmd)
{
struct gtt_range *r;
struct drm_gem_object *obj;
@@ -555,7 +562,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
- obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
index 8eb827ecc3d..0b37b7b6b02 100644
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ b/drivers/staging/gma500/mdfld_intel_display.c
@@ -390,9 +390,9 @@ int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_f
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
index c9311a573c2..980837e37d8 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/staging/gma500/mrst_crtc.c
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
index 436fe9733b1..40825703833 100644
--- a/drivers/staging/gma500/power.c
+++ b/drivers/staging/gma500/power.c
@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
ret = gma_resume_pci(dev->pdev);
if (ret == 0) {
/* FIXME: we want to defer this for Medfield/Oaktrail */
- gma_resume_display(dev);
+ gma_resume_display(dev->pdev);
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
pm_runtime_get(&dev->pdev->dev);
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 986a04d16ba..95816808f86 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -1151,6 +1151,17 @@ static struct vm_operations_struct psb_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static const struct file_operations gma500_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
@@ -1179,17 +1190,7 @@ static struct drm_driver driver = {
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
.dumb_destroy = psb_gem_dumb_destroy,
-
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
- },
+ .fops = &gma500_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = PSB_DRM_DRIVER_DATE,
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
index caa9d86f26d..85659613ae6 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/staging/gma500/psb_intel_display.c
@@ -367,9 +367,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 072185ebe95..60ac479a290 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -3,15 +3,3 @@ config HYPERV_STORAGE
depends on HYPERV && SCSI
help
Select this option to enable the Hyper-V virtual storage driver.
-
-config HYPERV_NET
- tristate "Microsoft Hyper-V virtual network driver"
- depends on HYPERV && NET
- help
- Select this option to enable the Hyper-V virtual network driver.
-
-config HYPERV_MOUSE
- tristate "Microsoft Hyper-V mouse driver"
- depends on HYPERV && HID
- help
- Select this option to enable the Hyper-V mouse driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 0f55ceee919..af95a6b7e43 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,6 +1,3 @@
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
hv_storvsc-y := storvsc_drv.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index ed4d63619df..dea7d92dfdc 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,7 +1,5 @@
TODO:
- - audit the network driver
- audit the scsi driver
Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Hank Janssen <hjanssen@microsoft.com>, Haiyang Zhang <haiyangz@microsoft.com>,
-K. Y. Srinivasan <kys@microsoft.com>
+Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index ae8c33e7849..eb853f71089 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/hyperv.h>
+#include <linux/mempool.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -42,6 +43,7 @@
#include <scsi/scsi_dbg.h>
+#define STORVSC_MIN_BUF_NR 64
#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
@@ -78,7 +80,7 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
/* V1 Beta 0.1 */
/* V1 RC < 2008/1/31 1.0 */
/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
+#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
@@ -104,7 +106,8 @@ enum vstor_packet_operation {
VSTOR_OPERATION_END_INITIALIZATION = 8,
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
VSTOR_OPERATION_QUERY_PROPERTIES = 10,
- VSTOR_OPERATION_MAXIMUM = 10
+ VSTOR_OPERATION_ENUMERATE_BUS = 11,
+ VSTOR_OPERATION_MAXIMUM = 11
};
/*
@@ -234,8 +237,6 @@ struct vstor_packet {
#define STORVSC_MAX_CHANNELS 1
#define STORVSC_MAX_CMD_LEN 16
-struct hv_storvsc_request;
-
/* Matches Windows-end */
enum storvsc_request_type {
WRITE_TYPE,
@@ -284,9 +285,13 @@ struct storvsc_device {
struct hv_storvsc_request reset_request;
};
+struct stor_mem_pools {
+ struct kmem_cache *request_pool;
+ mempool_t *request_mempool;
+};
+
struct hv_host_device {
struct hv_device *dev;
- struct kmem_cache *request_pool;
unsigned int port;
unsigned char path;
unsigned char target;
@@ -302,6 +307,51 @@ struct storvsc_cmd_request {
struct hv_storvsc_request request;
};
+struct storvsc_scan_work {
+ struct work_struct work;
+ struct Scsi_Host *host;
+ uint lun;
+};
+
+static void storvsc_bus_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ int id, order_id;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ for (id = 0; id < wrk->host->max_id; ++id) {
+ if (wrk->host->reverse_ordering)
+ order_id = wrk->host->max_id - id - 1;
+ else
+ order_id = id;
+
+ scsi_scan_target(&wrk->host->shost_gendev, 0,
+ order_id, SCAN_WILD_CARD, 1);
+ }
+ kfree(wrk);
+}
+
+static void storvsc_remove_lun(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ if (!scsi_host_get(wrk->host))
+ goto done;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ scsi_host_put(wrk->host);
+
+done:
+ kfree(wrk);
+}
+
static inline struct storvsc_device *get_out_stor_device(
struct hv_device *device)
{
@@ -549,11 +599,25 @@ static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
+ struct storvsc_scan_work *work;
+ struct storvsc_device *stor_device;
+
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(device, vstor_packet, request);
break;
+
case VSTOR_OPERATION_REMOVE_DEVICE:
+ case VSTOR_OPERATION_ENUMERATE_BUS:
+ stor_device = get_in_stor_device(device);
+ work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, storvsc_bus_scan);
+ work->host = stor_device->host;
+ schedule_work(&work->work);
+ break;
default:
break;
@@ -729,19 +793,48 @@ static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
- /*
- * This enables luns to be located sparsely. Otherwise, we may not
- * discovered them.
- */
- sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
+ struct stor_mem_pools *memp;
+ int number = STORVSC_MIN_BUF_NR;
+
+ memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
+ if (!memp)
+ return -ENOMEM;
+
+ memp->request_pool =
+ kmem_cache_create(dev_name(&sdevice->sdev_dev),
+ sizeof(struct storvsc_cmd_request), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (!memp->request_pool)
+ goto err0;
+
+ memp->request_mempool = mempool_create(number, mempool_alloc_slab,
+ mempool_free_slab,
+ memp->request_pool);
+
+ if (!memp->request_mempool)
+ goto err1;
+
+ sdevice->hostdata = memp;
+
return 0;
+
+err1:
+ kmem_cache_destroy(memp->request_pool);
+
+err0:
+ kfree(memp);
+ return -ENOMEM;
}
-static int storvsc_merge_bvec(struct request_queue *q,
- struct bvec_merge_data *bmd, struct bio_vec *bvec)
+static void storvsc_device_destroy(struct scsi_device *sdevice)
{
- /* checking done by caller. */
- return bvec->bv_len;
+ struct stor_mem_pools *memp = sdevice->hostdata;
+
+ mempool_destroy(memp->request_mempool);
+ kmem_cache_destroy(memp->request_pool);
+ kfree(memp);
+ sdevice->hostdata = NULL;
}
static int storvsc_device_configure(struct scsi_device *sdevice)
@@ -751,8 +844,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
- blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
-
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
return 0;
@@ -802,12 +893,14 @@ static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
unsigned int sg_count,
- unsigned int len)
+ unsigned int len,
+ int write)
{
int i;
int num_pages;
struct scatterlist *bounce_sgl;
struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
@@ -819,7 +912,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
}
return bounce_sgl;
@@ -833,7 +926,8 @@ cleanup:
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
{
int i;
int j = 0;
@@ -874,6 +968,24 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
kunmap_atomic((void *)bounce_addr, KM_IRQ0);
j++;
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ orig_sgl[i].offset),
+ KM_IRQ0);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
/* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1)
bounce_addr =
@@ -965,18 +1077,13 @@ static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)host->hostdata;
scsi_remove_host(host);
scsi_host_put(host);
storvsc_dev_remove(dev);
- if (host_dev->request_pool) {
- kmem_cache_destroy(host_dev->request_pool);
- host_dev->request_pool = NULL;
- }
+
return 0;
}
@@ -1014,7 +1121,7 @@ static int storvsc_host_reset(struct hv_device *device)
stor_device = get_out_stor_device(device);
if (!stor_device)
- return -ENODEV;
+ return FAILED;
request = &stor_device->reset_request;
vstor_packet = &request->vstor_packet;
@@ -1031,13 +1138,11 @@ static int storvsc_host_reset(struct hv_device *device)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return FAILED;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return TIMEOUT_ERROR;
/*
@@ -1045,8 +1150,7 @@ static int storvsc_host_reset(struct hv_device *device)
* should have been flushed out and return to us
*/
-cleanup:
- return ret;
+ return SUCCESS;
}
@@ -1055,16 +1159,10 @@ cleanup:
*/
static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
{
- int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
struct hv_device *dev = host_dev->dev;
- ret = storvsc_host_reset(dev);
- if (ret != 0)
- return ret;
-
- return ret;
+ return storvsc_host_reset(dev);
}
@@ -1076,21 +1174,22 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
struct storvsc_cmd_request *cmd_request =
(struct storvsc_cmd_request *)request->context;
struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
vm_srb = &request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE) {
+ if (vm_srb->data_in == READ_TYPE)
copy_from_bounce_buffer(scsi_sglist(scmnd),
cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
- destroy_bounce_buffer(cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- }
}
/*
@@ -1103,6 +1202,29 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
else
scmnd->result = vm_srb->scsi_status;
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == 0x20) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
@@ -1120,7 +1242,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
scsi_done_fn(scmnd);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
}
static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
@@ -1131,7 +1253,7 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
switch (scsi_op) {
/* smartd sends this command, which will offline the device */
case SET_WINDOW:
- scmnd->result = DID_ERROR << 16;
+ scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
break;
default:
@@ -1143,12 +1265,10 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
/*
* storvsc_queuecommand - Initiate command processing
*/
-static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
- void (*done)(struct scsi_cmnd *))
+static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
{
int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct hv_storvsc_request *request;
struct storvsc_cmd_request *cmd_request;
@@ -1157,9 +1277,10 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
if (storvsc_check_scsi_cmd(scmnd) == false) {
- done(scmnd);
+ scmnd->scsi_done(scmnd);
return 0;
}
@@ -1172,16 +1293,14 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
goto retry_request;
}
- scmnd->scsi_done = done;
-
request_size = sizeof(struct storvsc_cmd_request);
- cmd_request = kmem_cache_zalloc(host_dev->request_pool,
+ cmd_request = mempool_alloc(memp->request_mempool,
GFP_ATOMIC);
- if (!cmd_request) {
- scmnd->scsi_done = NULL;
+ if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
- }
+
+ memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
/* Setup the cmd request */
cmd_request->bounce_sgl_count = 0;
@@ -1231,12 +1350,12 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd));
+ scsi_bufflen(scmnd),
+ vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
- kmem_cache_free(host_dev->request_pool,
- cmd_request);
+ mempool_free(cmd_request,
+ memp->request_mempool);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1278,9 +1397,8 @@ retry_request:
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
ret = SCSI_MLQUEUE_DEVICE_BUSY;
@@ -1289,9 +1407,6 @@ retry_request:
return ret;
}
-static DEF_SCSI_QCMD(storvsc_queuecommand)
-
-
/* Scsi driver */
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
@@ -1300,6 +1415,7 @@ static struct scsi_host_template scsi_driver = {
.queuecommand = storvsc_queuecommand,
.eh_host_reset_handler = storvsc_host_reset_handler,
.slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 1,
/* 64 max_queue * 1 target */
@@ -1308,14 +1424,7 @@ static struct scsi_host_template scsi_driver = {
/* no use setting to 0 since ll_blk_rw reset it to 1 */
/* currently 32 */
.sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- /*
- * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
- * into 1 sg element. If set, we must limit the max_segment_size to
- * PAGE_SIZE, otherwise we may get 1 sg element that represents
- * multiple
- */
- /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
- .use_clustering = ENABLE_CLUSTERING,
+ .use_clustering = DISABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
};
@@ -1360,27 +1469,17 @@ static int storvsc_probe(struct hv_device *device,
if (!host)
return -ENOMEM;
- host_dev = (struct hv_host_device *)host->hostdata;
+ host_dev = shost_priv(host);
memset(host_dev, 0, sizeof(struct hv_host_device));
host_dev->port = host->host_no;
host_dev->dev = device;
- host_dev->request_pool =
- kmem_cache_create(dev_name(&device->device),
- sizeof(struct storvsc_cmd_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!host_dev->request_pool) {
- scsi_host_put(host);
- return -ENOMEM;
- }
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
if (!stor_device) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
stor_device->destroy = false;
@@ -1391,12 +1490,8 @@ static int storvsc_probe(struct hv_device *device,
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
- if (ret) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- kfree(stor_device);
- return ret;
- }
+ if (ret)
+ goto err_out1;
if (dev_is_ide)
storvsc_get_ide_info(device, &target, &path);
@@ -1416,7 +1511,7 @@ static int storvsc_probe(struct hv_device *device,
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
- goto err_out;
+ goto err_out2;
if (!dev_is_ide) {
scsi_scan_host(host);
@@ -1425,21 +1520,32 @@ static int storvsc_probe(struct hv_device *device,
ret = scsi_add_device(host, 0, target, 0);
if (ret) {
scsi_remove_host(host);
- goto err_out;
+ goto err_out2;
}
return 0;
-err_out:
+err_out2:
+ /*
+ * Once we have connected with the host, we would need to
+ * to invoke storvsc_dev_remove() to rollback this state and
+ * this call also frees up the stor_device; hence the jump around
+ * err_out1 label.
+ */
storvsc_dev_remove(device);
- kmem_cache_destroy(host_dev->request_pool);
+ goto err_out0;
+
+err_out1:
+ kfree(stor_device);
+
+err_out0:
scsi_host_put(host);
- return -ENODEV;
+ return ret;
}
/* The one and only one */
static struct hv_driver storvsc_drv = {
- .name = "storvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index d5809532149..69a05b9456d 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <string.h>
#include <poll.h>
+#include <endian.h>
#include "iio_utils.h"
/**
@@ -56,6 +57,13 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
void print2byte(int input, struct iio_channel_info *info)
{
+ /* First swap if incorrect endian */
+
+ if (info->be)
+ input = be16toh((uint_16t)input);
+ else
+ input = le16toh((uint_16t)input);
+
/* shift before conversion to avoid sign extension
of left aligned data */
input = input >> info->shift;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 75938b2412f..6f3a392297e 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -13,6 +13,7 @@
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
+#include <dirent.h>
#define IIO_MAX_NAME_LENGTH 30
@@ -73,6 +74,7 @@ struct iio_channel_info {
unsigned bits_used;
unsigned shift;
uint64_t mask;
+ unsigned be;
unsigned is_signed;
unsigned enabled;
unsigned location;
@@ -83,6 +85,7 @@ struct iio_channel_info {
* @is_signed: output whether channel is signed
* @bytes: output how many bytes the channel storage occupies
* @mask: output a bit mask for the raw data
+ * @be: big endian
* @device_dir: the iio device directory
* @name: the channel name
* @generic_name: the channel type name
@@ -92,6 +95,7 @@ inline int iioutils_get_type(unsigned *is_signed,
unsigned *bits_used,
unsigned *shift,
uint64_t *mask,
+ unsigned *be,
const char *device_dir,
const char *name,
const char *generic_name)
@@ -100,7 +104,7 @@ inline int iioutils_get_type(unsigned *is_signed,
int ret;
DIR *dp;
char *scan_el_dir, *builtname, *builtname_generic, *filename = 0;
- char signchar;
+ char signchar, endianchar;
unsigned padint;
const struct dirent *ent;
@@ -144,9 +148,18 @@ inline int iioutils_get_type(unsigned *is_signed,
ret = -errno;
goto error_free_filename;
}
- fscanf(sysfsfp,
- "%c%u/%u>>%u", &signchar, bits_used,
- &padint, shift);
+
+ ret = fscanf(sysfsfp,
+ "%ce:%c%u/%u>>%u",
+ &endianchar,
+ &signchar,
+ bits_used,
+ &padint, shift);
+ if (ret < 0) {
+ printf("failed to pass scan type description\n");
+ return ret;
+ }
+ *be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
*mask = ~0;
@@ -156,6 +169,10 @@ inline int iioutils_get_type(unsigned *is_signed,
*is_signed = 1;
else
*is_signed = 0;
+ fclose(sysfsfp);
+ free(filename);
+
+ filename = 0;
}
error_free_filename:
if (filename)
@@ -386,6 +403,7 @@ inline int build_channel_array(const char *device_dir,
&current->bits_used,
&current->shift,
&current->mask,
+ &current->be,
device_dir,
current->name,
current->generic_name);
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
index 7e99ef2b7bc..e33807761cd 100644
--- a/drivers/staging/iio/Documentation/ring.txt
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -23,10 +23,6 @@ The function pointers within here are used to allow the core to handle
as much buffer functionality as possible. Note almost all of these
are optional.
-mark_in_use, unmark_in_use
- Basically indicate that not changes should be made to the buffer state that
- will effect the form of the data being captures (e.g. scan elements or length)
-
store_to
If possible, push data to the buffer.
@@ -39,8 +35,6 @@ rip_first_n
The primary buffer reading function. Note that it may well not return
as much data as requested.
-mark_param_changed
- Used to indicate that something has changed. Used in conjunction with
request_update
If parameters have changed that require reinitialization or configuration of
the buffer this will trigger it.
@@ -51,7 +45,3 @@ get_bytes_per_datum, set_bytes_per_datum
get_length / set_length
Get/set the number of complete scans that may be held by the buffer.
-is_enabled
- Query if ring buffer is in use
-enable
- Start the ring buffer.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index 0d6823d19b6..46a995d6d26 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -276,6 +276,16 @@ Description:
If a discrete set of scale values are available, they
are listed in this attribute.
+What: /sys/.../in_accel_filter_low_pass_3db_frequency
+What: /sys/.../in_magn_filter_low_pass_3db_frequency
+What: /sys/.../in_anglvel_filter_low_pass_3db_frequency
+KernelVersion: 3.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ If a known or controllable low pass filter is applied
+ to the underlying data channel, then this parameter
+ gives the 3dB frequency of the filter in Hz.
+
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 4ec9118955f..90162aa8b2d 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -76,7 +76,6 @@ config IIO_DUMMY_EVGEN
config IIO_SIMPLE_DUMMY
tristate "An example driver with no hardware requirements"
- select IIO_SIMPLE_DUMMY_EVGEN if IIO_SIMPLE_DUMMY_EVENTS
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 1c5dad53784..d439e45d07f 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16201.h"
@@ -322,8 +322,7 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -348,10 +347,10 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -388,7 +387,7 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -408,36 +407,36 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16201_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16201_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16201_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16201_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16201_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16201_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16201_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_y, ADIS16201_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(7)
@@ -549,19 +548,9 @@ static struct spi_driver adis16201_driver = {
.probe = adis16201_probe,
.remove = __devexit_p(adis16201_remove),
};
-
-static __init int adis16201_init(void)
-{
- return spi_register_driver(&adis16201_driver);
-}
-module_init(adis16201_init);
-
-static __exit void adis16201_exit(void)
-{
- spi_unregister_driver(&adis16201_driver);
-}
-module_exit(adis16201_exit);
+module_spi_driver(adis16201_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 0016ed378e3..26c610faee3 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16201_read_ring_data(indio_dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)
+ && adis16201_read_ring_data(indio_dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -116,11 +116,9 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16201_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16201_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index 8a3337442af..1a5140f9e3f 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16203.h"
@@ -329,8 +329,7 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -350,10 +349,10 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 14;
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->address][1];
@@ -374,26 +373,26 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16203_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16203_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16203_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16203_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
/* Fixme: Not what it appears to be - see data sheet */
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16203_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16203_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -504,19 +503,9 @@ static struct spi_driver adis16203_driver = {
.probe = adis16203_probe,
.remove = __devexit_p(adis16203_remove),
};
-
-static __init int adis16203_init(void)
-{
- return spi_register_driver(&adis16203_driver);
-}
-module_init(adis16203_init);
-
-static __exit void adis16203_exit(void)
-{
- spi_unregister_driver(&adis16203_driver);
-}
-module_exit(adis16203_exit);
+module_spi_driver(adis16203_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 1fdfe6f6ac6..064640d15e4 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -118,11 +118,9 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16203_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16203_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 644ac8e4d2a..fa89364b841 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16204.h"
@@ -366,7 +366,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -390,12 +390,12 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
- if (mask == (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE)) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_PEAK:
+ if (mask == IIO_CHAN_INFO_CALIBBIAS) {
bits = 12;
addrind = 1;
} else { /* PEAK_SEPARATE */
@@ -428,7 +428,7 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -445,28 +445,28 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16204_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 0, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16204_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16204_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16204_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_x, ADIS16204_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_y, ADIS16204_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -577,19 +577,9 @@ static struct spi_driver adis16204_driver = {
.probe = adis16204_probe,
.remove = __devexit_p(adis16204_remove),
};
-
-static __init int adis16204_init(void)
-{
- return spi_register_driver(&adis16204_driver);
-}
-module_init(adis16204_init);
-
-static __exit void adis16204_exit(void)
-{
- spi_unregister_driver(&adis16204_driver);
-}
-module_exit(adis16204_exit);
+module_spi_driver(adis16204_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 6fd3d8f51f2..4081179dfa5 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -71,11 +71,11 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -114,10 +114,8 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16204_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16204_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16204_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 0a8571b18b3..a98715f6bd6 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -18,7 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16209.h"
@@ -304,7 +304,7 @@ static int adis16209_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
case IIO_INCLI:
@@ -355,8 +355,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -381,10 +380,10 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 14;
@@ -410,34 +409,34 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16209_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16209_SCAN_SUPPLY,
IIO_ST('u', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16209_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16209_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16209_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16209_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_x, ADIS16209_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16209_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ROT, 0, 1, 0, NULL, 0, IIO_MOD_X,
@@ -553,19 +552,9 @@ static struct spi_driver adis16209_driver = {
.probe = adis16209_probe,
.remove = __devexit_p(adis16209_remove),
};
-
-static __init int adis16209_init(void)
-{
- return spi_register_driver(&adis16209_driver);
-}
-module_init(adis16209_init);
-
-static __exit void adis16209_exit(void)
-{
- spi_unregister_driver(&adis16209_driver);
-}
-module_exit(adis16209_exit);
+module_spi_driver(adis16209_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index d17e39d9545..2a6fd334f5f 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -72,9 +72,10 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -114,10 +115,8 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16209_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16209_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16209_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 6d4503de192..51a852d4548 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -167,9 +167,9 @@ static ssize_t adis16220_write_16bit(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = adis16220_spi_write_reg_16(indio_dev, this_attr->address, val);
@@ -510,17 +510,17 @@ static int adis16220_read_raw(struct iio_dev *indio_dev,
case 0:
addrind = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
*val = 25;
return IIO_VAL_INT;
}
addrind = 1;
break;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
addrind = 2;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
case IIO_TEMP:
@@ -575,27 +575,27 @@ static const struct iio_chan_spec adis16220_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
}, {
.type = IIO_ACCEL,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
.address = accel,
}, {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_1,
}, {
.type = IIO_VOLTAGE,
@@ -708,19 +708,9 @@ static struct spi_driver adis16220_driver = {
.probe = adis16220_probe,
.remove = __devexit_p(adis16220_remove),
};
-
-static __init int adis16220_init(void)
-{
- return spi_register_driver(&adis16220_driver);
-}
-module_init(adis16220_init);
-
-static __exit void adis16220_exit(void)
-{
- spi_unregister_driver(&adis16220_driver);
-}
-module_exit(adis16220_exit);
+module_spi_driver(adis16220_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index b8be2925d61..17f77fef7f2 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16240.h"
@@ -389,8 +389,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -411,14 +410,14 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_PEAK_SCALE_SHARED):
+ case IIO_CHAN_INFO_PEAK_SCALE:
*val = 6;
*val2 = 629295;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][1];
@@ -432,7 +431,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][2];
@@ -460,7 +459,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16240_addresses[chan->address][1];
return adis16240_spi_write_reg_16(indio_dev, addr, val16);
@@ -470,7 +469,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16240_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16240_SCAN_SUPPLY,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
@@ -478,22 +477,22 @@ static struct iio_chan_spec adis16240_channels[] = {
in_aux, ADIS16240_SCAN_AUX_ADC,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16240_SCAN_ACC_X,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16240_SCAN_ACC_Y,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_z, ADIS16240_SCAN_ACC_Z,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
temp, ADIS16240_SCAN_TEMP,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(6)
@@ -606,19 +605,9 @@ static struct spi_driver adis16240_driver = {
.probe = adis16240_probe,
.remove = __devexit_p(adis16240_remove),
};
-
-static __init int adis16240_init(void)
-{
- return spi_register_driver(&adis16240_driver);
-}
-module_init(adis16240_init);
-
-static __exit void adis16240_exit(void)
-{
- spi_unregister_driver(&adis16240_driver);
-}
-module_exit(adis16240_exit);
+module_spi_driver(adis16240_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index b907ca3f4fd..e23622d96f9 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -69,9 +69,10 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16240_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -111,10 +112,8 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16240_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16240_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16240_trigger_handler,
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index 5238503f680..d13d7215ff6 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -140,7 +140,7 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
{
int ret = -EINVAL;
- if (mask == (1 << IIO_CHAN_INFO_SCALE_SHARED)) {
+ if (mask == IIO_CHAN_INFO_SCALE) {
/* Check no integer component */
if (val)
return -EINVAL;
@@ -164,7 +164,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
goto error_ret;
*val = ret;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
goto error_ret;
@@ -181,7 +181,7 @@ error_ret:
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = 1 << IIO_CHAN_INFO_SCALE_SHARED, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = KXSD9_REG_##axis, \
}
@@ -268,8 +268,10 @@ static int __devexit kxsd9_remove(struct spi_device *spi)
}
static const struct spi_device_id kxsd9_id[] = {
- {"kxsd9", 0}
+ {"kxsd9", 0},
+ { },
};
+MODULE_DEVICE_TABLE(spi, kxsd9_id);
static struct spi_driver kxsd9_driver = {
.driver = {
@@ -280,18 +282,7 @@ static struct spi_driver kxsd9_driver = {
.remove = __devexit_p(kxsd9_remove),
.id_table = kxsd9_id,
};
-
-static __init int kxsd9_spi_init(void)
-{
- return spi_register_driver(&kxsd9_driver);
-}
-module_init(kxsd9_spi_init);
-
-static __exit void kxsd9_spi_exit(void)
-{
- spi_unregister_driver(&kxsd9_driver);
-}
-module_exit(kxsd9_spi_exit);
+module_spi_driver(kxsd9_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 7237a9ab61a..2db383fc274 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -181,11 +181,6 @@ int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val);
-
-
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
@@ -212,13 +207,6 @@ static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
}
-static inline ssize_t
-lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- return 0;
-}
static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 559545a4233..376da513796 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -25,7 +25,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "lis3l02dq.h"
@@ -226,14 +227,14 @@ static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
u8 uval;
s8 sval;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val > 255 || val < -256)
return -EINVAL;
sval = val;
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val & ~0xFF)
return -EINVAL;
uval = val;
@@ -259,23 +260,20 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
- ret = lis3l02dq_read_accel_from_buffer(indio_dev->
- buffer,
- chan->scan_index,
- val);
- else {
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ } else {
reg = lis3l02dq_axis_map
[LIS3L02DQ_ACCEL][chan->address];
ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
}
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 9580;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
if (ret)
@@ -284,7 +282,7 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
*val = utemp;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
/* to match with what previous code does */
@@ -331,11 +329,11 @@ static ssize_t lis3l02dq_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- long val;
+ unsigned long val;
int ret;
u8 t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -515,9 +513,9 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
}
#define LIS3L02DQ_INFO_MASK \
- ((1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE))
+ (IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT)
#define LIS3L02DQ_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
@@ -534,7 +532,7 @@ static struct iio_chan_spec lis3l02dq_channels[] = {
};
-static ssize_t lis3l02dq_read_event_config(struct iio_dev *indio_dev,
+static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
u64 event_code)
{
@@ -804,19 +802,9 @@ static struct spi_driver lis3l02dq_driver = {
.probe = lis3l02dq_probe,
.remove = __devexit_p(lis3l02dq_remove),
};
-
-static __init int lis3l02dq_init(void)
-{
- return spi_register_driver(&lis3l02dq_driver);
-}
-module_init(lis3l02dq_init);
-
-static __exit void lis3l02dq_exit(void)
-{
- spi_unregister_driver(&lis3l02dq_driver);
-}
-module_exit(lis3l02dq_exit);
+module_spi_driver(lis3l02dq_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 89527af8f4c..98c5c92d345 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -38,38 +38,6 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
return IRQ_WAKE_THREAD;
}
-/**
- * lis3l02dq_read_accel_from_buffer() individual acceleration read from buffer
- **/
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- int ret;
- s16 *data;
-
- if (!iio_scan_mask_query(buffer, index))
- return -EINVAL;
-
- if (!buffer->access->read_last)
- return -EBUSY;
-
- data = kmalloc(buffer->access->get_bytes_per_datum(buffer),
- GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- ret = buffer->access->read_last(buffer, (u8 *)data);
- if (ret)
- goto error_free_data;
- *val = data[bitmap_weight(buffer->scan_mask, index)];
-error_free_data:
-
- kfree(data);
-
- return ret;
-}
-
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
@@ -87,21 +55,21 @@ static const u8 read_all_tx_array[] = {
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
- struct iio_buffer *buffer = indio_dev->buffer;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
- xfers = kzalloc((buffer->scan_count) * 2
- * sizeof(*xfers), GFP_KERNEL);
+ xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2,
+ sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
- if (test_bit(i, buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
@@ -129,7 +97,8 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
* values in alternate bytes
*/
spi_message_init(&msg);
- for (j = 0; j < buffer->scan_count * 2; j++)
+ for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -145,14 +114,16 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- rx_array = kzalloc(4 * (indio_dev->buffer->scan_count), GFP_KERNEL);
+ rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
- for (i = 0; i < indio_dev->buffer->scan_count; i++)
+ for (i = 0; i < scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
@@ -175,7 +146,7 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (buffer->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
@@ -363,17 +334,17 @@ static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
if (ret)
goto error_ret;
- if (iio_scan_mask_query(indio_dev->buffer, 0)) {
+ if (test_bit(0, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 1)) {
+ if (test_bit(1, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 2)) {
+ if (test_bit(2, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
@@ -437,11 +408,9 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
indio_dev->buffer = buffer;
/* Effectively select the buffer implementation */
indio_dev->buffer->access = &lis3l02dq_access_funcs;
- buffer->bpe = 2;
buffer->scan_timestamp = true;
- buffer->setup_ops = &lis3l02dq_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index a44a70589db..49764fb7181 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -20,7 +20,8 @@
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "sca3000.h"
@@ -381,13 +382,17 @@ sca3000_store_measurement_mode(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
int ret;
- int mask = 0x03;
- long val;
+ u8 mask = 0x03;
+ u8 val;
mutex_lock(&st->lock);
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
+ if (val > 3) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto error_ret;
@@ -424,7 +429,7 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
#define SCA3000_INFO_MASK \
- (1 << IIO_CHAN_INFO_SCALE_SHARED)
+ IIO_CHAN_INFO_SCALE_SHARED_BIT
#define SCA3000_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING))
@@ -474,7 +479,7 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
(sizeof(*val)*8 - 13);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
if (chan->type == IIO_ACCEL)
*val2 = st->info->scale;
@@ -1161,9 +1166,9 @@ static int __devinit sca3000_probe(struct spi_device *spi)
if (ret < 0)
goto error_unregister_dev;
if (indio_dev->buffer) {
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
- iio_scan_mask_set(indio_dev->buffer, 2);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 2);
}
if (spi->irq) {
@@ -1240,6 +1245,7 @@ static const struct spi_device_id sca3000_id[] = {
{"sca3000_e05", e05},
{}
};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
static struct spi_driver sca3000_driver = {
.driver = {
@@ -1250,18 +1256,7 @@ static struct spi_driver sca3000_driver = {
.remove = __devexit_p(sca3000_remove),
.id_table = sca3000_id,
};
-
-static __init int sca3000_init(void)
-{
- return spi_register_driver(&sca3000_driver);
-}
-module_init(sca3000_init);
-
-static __exit void sca3000_exit(void)
-{
- spi_unregister_driver(&sca3000_driver);
-}
-module_exit(sca3000_exit);
+module_spi_driver(sca3000_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 4a9a01dccd0..6b824a11f7f 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_hw.h"
#include "sca3000.h"
@@ -146,7 +146,6 @@ static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
}
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/**
@@ -158,8 +157,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, val;
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
@@ -180,8 +178,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
@@ -222,8 +219,7 @@ static ssize_t sca3000_show_buffer_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
return sprintf(buf, "0.%06d\n", 4*st->info->scale);
@@ -243,7 +239,6 @@ static IIO_DEVICE_ATTR(in_accel_scale,
*/
static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
@@ -269,7 +264,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
buf->stufftoread = 0;
buf->attrs = &sca3000_ring_attr;
- iio_buffer_init(buf, indio_dev);
+ iio_buffer_init(buf);
return buf;
}
@@ -350,7 +345,7 @@ static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
{
- indio_dev->buffer->setup_ops = &sca3000_ring_setup_ops;
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
}
/**
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 31c376b9d5e..45f4504ed92 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -19,7 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -453,25 +453,6 @@ out:
return ret;
}
-static int ad7192_scan_from_ring(struct ad7192_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7192_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -479,12 +460,14 @@ static int ad7192_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -544,7 +527,7 @@ static irqreturn_t ad7192_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7192_read_reg(st, 1, 1, AD7192_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -592,7 +575,7 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7192_ring_setup_ops;
+ indio_dev->setup_ops = &ad7192_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -626,6 +609,10 @@ static irqreturn_t ad7192_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7192_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7192_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -638,7 +625,7 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
-
+ st->trig->ops = &ad7192_trigger_ops;
ret = request_irq(st->spi->irq,
ad7192_data_rdy_trig_poll,
IRQF_TRIGGER_LOW,
@@ -650,7 +637,6 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -795,7 +781,7 @@ static ssize_t ad7192_set(struct device *dev,
return -EBUSY;
}
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7192_REG_GPOCON:
if (val)
st->gpocon |= AD7192_GPOCON_BPDSW;
@@ -838,14 +824,14 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static mode_t ad7192_attr_is_visible(struct kobject *kobj,
+static umode_t ad7192_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if ((st->devid != ID_AD7195) &&
(attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
@@ -873,8 +859,7 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7192_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7192_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -901,18 +886,20 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- mutex_lock(&indio_dev->mlock);
- *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
- *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1000;
-
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&indio_dev->mlock);
+ *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
+ *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
@@ -935,7 +922,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -992,7 +979,7 @@ static const struct iio_info ad7192_info = {
.extend_name = _name, \
.channel = _chan, \
.channel2 = _chan2, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1001,7 +988,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1010,7 +997,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_TEMP, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1151,6 +1138,7 @@ static const struct spi_device_id ad7192_id[] = {
{"ad7195", ID_AD7195},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7192_id);
static struct spi_driver ad7192_driver = {
.driver = {
@@ -1161,18 +1149,7 @@ static struct spi_driver ad7192_driver = {
.remove = __devexit_p(ad7192_remove),
.id_table = ad7192_id,
};
-
-static int __init ad7192_init(void)
-{
- return spi_register_driver(&ad7192_driver);
-}
-module_init(ad7192_init);
-
-static void __exit ad7192_exit(void)
-{
- spi_unregister_driver(&ad7192_driver);
-}
-module_exit(ad7192_exit);
+module_spi_driver(ad7192_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 372d059042f..7dbd6812c24 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "ad7280a.h"
@@ -455,10 +456,10 @@ static ssize_t ad7280_store_balance_timer(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -487,8 +488,8 @@ static int ad7280_channel_init(struct ad7280_state *st)
{
int dev, ch, cnt;
- st->channels = kzalloc(sizeof(*st->channels) *
- ((st->slave_num + 1) * 12 + 2), GFP_KERNEL);
+ st->channels = kcalloc((st->slave_num + 1) * 12 + 2,
+ sizeof(*st->channels), GFP_KERNEL);
if (st->channels == NULL)
return -ENOMEM;
@@ -507,7 +508,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
}
st->channels[cnt].indexed = 1;
st->channels[cnt].info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].address =
AD7280A_DEVADDR(dev) << 8 | ch;
st->channels[cnt].scan_index = cnt;
@@ -523,7 +524,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
st->channels[cnt].channel2 = dev * 6;
st->channels[cnt].address = AD7280A_ALL_CELLS;
st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ st->channels[cnt].info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].scan_index = cnt;
st->channels[cnt].scan_type.sign = 'u';
st->channels[cnt].scan_type.realbits = 32;
@@ -600,7 +601,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned val;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
val = 1000 + (st->cell_threshhigh * 1568) / 100;
break;
@@ -636,7 +637,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
if (ret)
return ret;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
case AD7280A_CELL_UNDERVOLTAGE:
val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
@@ -652,7 +653,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
val = clamp(val, 0L, 0xFFL);
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
st->cell_threshhigh = val;
break;
@@ -682,13 +683,13 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
unsigned *channels;
int i, ret;
- channels = kzalloc(sizeof(*channels) * st->scan_cnt, GFP_KERNEL);
+ channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
if (channels == NULL)
return IRQ_HANDLED;
ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
if (ret < 0)
- return IRQ_HANDLED;
+ goto out;
for (i = 0; i < st->scan_cnt; i++) {
if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
@@ -731,6 +732,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
}
}
+out:
kfree(channels);
return IRQ_HANDLED;
@@ -801,7 +803,7 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
scale_uv = (4000 * 1000) >> AD7280A_BITS;
else
@@ -968,29 +970,18 @@ static const struct spi_device_id ad7280_id[] = {
{"ad7280a", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7280_id);
static struct spi_driver ad7280_driver = {
.driver = {
.name = "ad7280",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7280_probe,
.remove = __devexit_p(ad7280_remove),
.id_table = ad7280_id,
};
-
-static int __init ad7280_init(void)
-{
- return spi_register_driver(&ad7280_driver);
-}
-module_init(ad7280_init);
-
-static void __exit ad7280_exit(void)
-{
- spi_unregister_driver(&ad7280_driver);
-}
-module_exit(ad7280_exit);
+module_spi_driver(ad7280_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7280A");
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 10e79e8ee64..0a13616e3db 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -19,6 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* Simplified handling
@@ -500,7 +501,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
AD7291_T_AVERAGE);
if (ret < 0)
@@ -509,18 +510,24 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
AD7291_VALUE_MASK) << 4) >> 4;
*val = signval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /*
- * One LSB of the ADC corresponds to 0.25 deg C.
- * The temperature reading is in 12-bit twos complement format
- */
- *val = 250;
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /*
+ * One LSB of the ADC corresponds to 0.25 deg C.
+ * The temperature reading is in 12-bit twos
+ * complement format
+ */
+ *val = 250;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -529,7 +536,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
#define AD7291_VOLTAGE_CHAN(_chan) \
{ \
.type = IIO_VOLTAGE, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.indexed = 1, \
.channel = _chan, \
.event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|\
@@ -547,8 +554,8 @@ static const struct iio_chan_spec ad7291_channels[] = {
AD7291_VOLTAGE_CHAN(7),
{
.type = IIO_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.indexed = 1,
.channel = 0,
.event_mask =
@@ -700,20 +707,8 @@ static struct i2c_driver ad7291_driver = {
.remove = __devexit_p(ad7291_remove),
.id_table = ad7291_id,
};
-
-static __init int ad7291_init(void)
-{
- return i2c_add_driver(&ad7291_driver);
-}
-
-static __exit void ad7291_exit(void)
-{
- i2c_del_driver(&ad7291_driver);
-}
+module_i2c_driver(ad7291_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7291_init);
-module_exit(ad7291_exit);
diff --git a/drivers/staging/iio/adc/ad7298.h b/drivers/staging/iio/adc/ad7298.h
index 9ddf5cf0e51..a0e5dea415e 100644
--- a/drivers/staging/iio/adc/ad7298.h
+++ b/drivers/staging/iio/adc/ad7298.h
@@ -54,14 +54,9 @@ struct ad7298_state {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch);
int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7298_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- return 0;
-}
static inline int
ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/staging/iio/adc/ad7298_core.c
index c1de73a1ca9..8dd6aa9cf92 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/staging/iio/adc/ad7298_core.c
@@ -18,37 +18,37 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7298.h"
static struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
1, 1, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
2, 2, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
3, 3, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
4, 4, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
5, 5, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
6, 6, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
7, 7, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
@@ -69,27 +69,28 @@ static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
static int ad7298_scan_temp(struct ad7298_state *st, int *val)
{
int tmp, ret;
+ __be16 buf;
- tmp = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
+ buf = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
AD7298_TAVG | st->ext_ref);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = 0;
+ buf = cpu_to_be16(0);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
usleep_range(101, 1000); /* sleep > 100us */
- ret = spi_read(st->spi, (u8 *)&tmp, 2);
+ ret = spi_read(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = be16_to_cpu(tmp) & RES_MASK(AD7298_BITS);
+ tmp = be16_to_cpu(buf) & RES_MASK(AD7298_BITS);
/*
* One LSB of the ADC corresponds to 0.25 deg C.
@@ -122,12 +123,8 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- if (chan->address == AD7298_CH_TEMP)
- ret = -ENODEV;
- else
- ret = ad7298_scan_from_ring(indio_dev,
- chan->address);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
} else {
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
@@ -143,15 +140,20 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
*val = ret & RES_MASK(AD7298_BITS);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1;
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
}
@@ -265,31 +267,19 @@ static const struct spi_device_id ad7298_id[] = {
{"ad7298", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7298_id);
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
.remove = __devexit_p(ad7298_remove),
.id_table = ad7298_id,
};
-
-static int __init ad7298_init(void)
-{
- return spi_register_driver(&ad7298_driver);
-}
-module_init(ad7298_init);
-
-static void __exit ad7298_exit(void)
-{
- spi_unregister_driver(&ad7298_driver);
-}
-module_exit(ad7298_exit);
+module_spi_driver(ad7298_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7298");
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index 47630d506a6..d1a12dd015e 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -12,41 +12,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7298.h"
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- if (!(test_bit(ch, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = be16_to_cpu(ring_data[ch]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7298_ring_preenable() setup the parameters of the ring before enabling
*
@@ -61,8 +32,9 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
int i, m;
unsigned short command;
-
- d_size = ring->scan_count * (AD7298_STORAGE_BITS / 8);
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ d_size = scan_count * (AD7298_STORAGE_BITS / 8);
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -79,7 +51,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
command = AD7298_WRITE | st->ext_ref;
for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
- if (test_bit(i, ring->scan_mask))
+ if (test_bit(i, indio_dev->active_scan_mask))
command |= m;
st->tx_buf[0] = cpu_to_be16(command);
@@ -96,7 +68,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
- for (i = 0; i < ring->scan_count; i++) {
+ for (i = 0; i < scan_count; i++) {
st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
st->ring_xfer[i + 2].len = 2;
st->ring_xfer[i + 2].cs_change = 1;
@@ -134,7 +106,8 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
&time_ns, sizeof(time_ns));
}
- for (i = 0; i < ring->scan_count; i++)
+ for (i = 0; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
buf[i] = be16_to_cpu(st->rx_buf[i]);
indio_dev->buffer->access->store_to(ring, (u8 *)buf, time_ns);
@@ -174,7 +147,7 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7298_ring_setup_ops;
+ indio_dev->setup_ops = &ad7298_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7476.h b/drivers/staging/iio/adc/ad7476.h
index 60142926565..27f696c75cc 100644
--- a/drivers/staging/iio/adc/ad7476.h
+++ b/drivers/staging/iio/adc/ad7476.h
@@ -50,14 +50,9 @@ enum ad7476_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7476_scan_from_ring(struct iio_dev *indio_dev);
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7476_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
static inline int
ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index fd79facc6ca..0c064d1c392 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7476.h"
@@ -46,7 +46,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7476_scan_from_ring(indio_dev);
+ ret = -EBUSY;
else
ret = ad7476_scan_direct(st);
mutex_unlock(&indio_dev->mlock);
@@ -56,7 +56,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -69,49 +69,49 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7466] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7467] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7468] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1 , 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7475] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7476] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7477] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7478] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7495] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.int_vref_mv = 2500,
@@ -237,31 +237,19 @@ static const struct spi_device_id ad7476_id[] = {
{"ad7495", ID_AD7495},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7476_id);
static struct spi_driver ad7476_driver = {
.driver = {
.name = "ad7476",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7476_probe,
.remove = __devexit_p(ad7476_remove),
.id_table = ad7476_id,
};
-
-static int __init ad7476_init(void)
-{
- return spi_register_driver(&ad7476_driver);
-}
-module_init(ad7476_init);
-
-static void __exit ad7476_exit(void)
-{
- spi_unregister_driver(&ad7476_driver);
-}
-module_exit(ad7476_exit);
+module_spi_driver(ad7476_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7475/6/7/8(A) AD7466/7/8 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7476");
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index e82c1a433f4..4e298b2a05b 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -14,36 +14,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7476.h"
-int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u8 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = (ring_data[0] << 8) | ring_data[1];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7476_ring_preenable() setup the parameters of the ring before enabling
*
@@ -56,7 +32,8 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev)
struct ad7476_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -137,7 +114,7 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7476_ring_setup_ops;
+ indio_dev->setup_ops = &ad7476_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 35018c3f518..10f59896597 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -99,7 +99,6 @@ enum ad7606_supported_device_ids {
ID_AD7606_4
};
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch);
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7606_ring_cleanup(struct iio_dev *indio_dev);
#endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 54423ab196f..ddb7ef92f5c 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7606.h"
@@ -91,7 +91,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7606_scan_from_ring(indio_dev, chan->address);
+ ret = -EBUSY;
else
ret = ad7606_scan_direct(indio_dev, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -100,7 +100,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return ret;
*val = (short) ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->range * 1000 * 2)
>> st->chip_info->channels[0].scan_type.realbits;
*val = scale_uv / 1000;
@@ -205,14 +205,14 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static mode_t ad7606_attr_is_visible(struct kobject *kobj,
+static umode_t ad7606_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!(gpio_is_valid(st->pdata->gpio_os0) &&
gpio_is_valid(st->pdata->gpio_os1) &&
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 688632edd3d..cff97568189 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -189,4 +189,3 @@ module_exit(ad7606_cleanup);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ad7606_par");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index 20927fd5372..e8f94a18a94 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -12,36 +12,12 @@
#include <linux/slab.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7606.h"
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = ring_data[ch];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
*
@@ -136,8 +112,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->bpe =
- st->chip_info->channels[0].scan_type.storagebits / 8;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
@@ -152,7 +126,7 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7606_ring_setup_ops;
+ indio_dev->setup_ops = &ad7606_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true ;
INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index aede1ba5e04..237f1c44d29 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -97,11 +97,11 @@ static const struct spi_device_id ad7606_id[] = {
{"ad7606-4", ID_AD7606_4},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7606_id);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = AD7606_SPI_PM_OPS,
},
@@ -109,20 +109,8 @@ static struct spi_driver ad7606_driver = {
.remove = __devexit_p(ad7606_spi_remove),
.id_table = ad7606_id,
};
-
-static int __init ad7606_spi_init(void)
-{
- return spi_register_driver(&ad7606_driver);
-}
-module_init(ad7606_spi_init);
-
-static void __exit ad7606_spi_exit(void)
-{
- spi_unregister_driver(&ad7606_driver);
-}
-module_exit(ad7606_spi_exit);
+module_spi_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7606_spi");
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 7a579a1fd69..a13e58c814e 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -114,7 +114,7 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
*val *= 128;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 100000)
>> (channel.scan_type.realbits - 1);
*val = scale_uv / 100000;
@@ -127,12 +127,12 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7780] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 24, 32, 8), 0),
},
[ID_AD7781] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 20, 32, 12), 0),
},
};
@@ -272,29 +272,18 @@ static const struct spi_device_id ad7780_id[] = {
{"ad7781", ID_AD7781},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7780_id);
static struct spi_driver ad7780_driver = {
.driver = {
.name = "ad7780",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7780_probe,
.remove = __devexit_p(ad7780_remove),
.id_table = ad7780_id,
};
-
-static int __init ad7780_init(void)
-{
- return spi_register_driver(&ad7780_driver);
-}
-module_init(ad7780_init);
-
-static void __exit ad7780_exit(void)
-{
- spi_unregister_driver(&ad7780_driver);
-}
-module_exit(ad7780_exit);
+module_spi_driver(ad7780_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7780/1 ADC");
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 999f8f746cf..6a058b19c49 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -316,25 +316,6 @@ out:
return ret;
}
-static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7793_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -342,14 +323,15 @@ static int ad7793_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask,
+ channel = find_first_bit(indio_dev->active_scan_mask,
indio_dev->masklength);
- d_size = ring->scan_count *
- indio_dev->channels[0].scan_type.storagebits / 8;
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
+ indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -411,7 +393,7 @@ static irqreturn_t ad7793_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7793_read_reg(st, 1, 1, AD7793_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -459,7 +441,7 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7793_ring_setup_ops;
+ indio_dev->setup_ops = &ad7793_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -493,6 +475,10 @@ static irqreturn_t ad7793_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7793_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7793_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -505,6 +491,7 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
+ st->trig->ops = &ad7793_trigger_ops;
ret = request_irq(st->spi->irq,
ad7793_data_rdy_trig_poll,
@@ -517,7 +504,6 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -649,8 +635,7 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7793_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7793_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -667,19 +652,21 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- *val = st->scale_avail[(st->conf >> 8) & 0x7][0];
- *val2 = st->scale_avail[(st->conf >> 8) & 0x7][1];
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- /* 1170mV / 2^23 * 6 */
- scale_uv = (1170ULL * 100000000ULL * 6ULL)
- >> (chan->scan_type.realbits -
- (unipolar ? 0 : 1));
+ if (chan->differential) {
+ *val = st->
+ scale_avail[(st->conf >> 8) & 0x7][0];
+ *val2 = st->
+ scale_avail[(st->conf >> 8) & 0x7][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ } else {
+ /* 1170mV / 2^23 * 6 */
+ scale_uv = (1170ULL * 100000000ULL * 6ULL)
+ >> (chan->scan_type.realbits -
+ (unipolar ? 0 : 1));
+ }
break;
case IIO_TEMP:
/* Always uses unity gain and internal ref */
@@ -716,7 +703,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -775,7 +762,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -786,7 +773,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -797,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -809,7 +796,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -818,7 +805,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -828,7 +815,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -842,7 +829,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -853,7 +840,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -864,7 +851,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -876,7 +863,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -885,7 +872,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -895,7 +882,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -1034,29 +1021,18 @@ static const struct spi_device_id ad7793_id[] = {
{"ad7793", ID_AD7793},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7793_id);
static struct spi_driver ad7793_driver = {
.driver = {
.name = "ad7793",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7793_probe,
.remove = __devexit_p(ad7793_remove),
.id_table = ad7793_id,
};
-
-static int __init ad7793_init(void)
-{
- return spi_register_driver(&ad7793_driver);
-}
-module_init(ad7793_init);
-
-static void __exit ad7793_exit(void)
-{
- spi_unregister_driver(&ad7793_driver);
-}
-module_exit(ad7793_exit);
+module_spi_driver(ad7793_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7792/3 ADC");
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index bdb90492b8a..52b720e2b03 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* AD7816 config masks
@@ -459,28 +460,15 @@ MODULE_DEVICE_TABLE(spi, ad7816_id);
static struct spi_driver ad7816_driver = {
.driver = {
.name = "ad7816",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7816_probe,
.remove = __devexit_p(ad7816_remove),
.id_table = ad7816_id,
};
-
-static __init int ad7816_init(void)
-{
- return spi_register_driver(&ad7816_driver);
-}
-
-static __exit void ad7816_exit(void)
-{
- spi_unregister_driver(&ad7816_driver);
-}
+module_spi_driver(ad7816_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7816_init);
-module_exit(ad7816_exit);
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
index 3452d181907..bc53b653212 100644
--- a/drivers/staging/iio/adc/ad7887.h
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -83,14 +83,9 @@ enum ad7887_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum);
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7887_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- return 0;
-}
static inline int
ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 609dcd5f2dd..e9bbc3eed15 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7887.h"
@@ -45,7 +45,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7887_scan_from_ring(st, 1 << chan->address);
+ ret = -EBUSY;
else
ret = ad7887_scan_direct(st, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -55,7 +55,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -75,7 +75,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 1,
.scan_index = 1,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -84,7 +84,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 0,
.scan_index = 0,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -246,31 +246,19 @@ static const struct spi_device_id ad7887_id[] = {
{"ad7887", ID_AD7887},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7887_id);
static struct spi_driver ad7887_driver = {
.driver = {
.name = "ad7887",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7887_probe,
.remove = __devexit_p(ad7887_remove),
.id_table = ad7887_id,
};
-
-static int __init ad7887_init(void)
-{
- return spi_register_driver(&ad7887_driver);
-}
-module_init(ad7887_init);
-
-static void __exit ad7887_exit(void)
-{
- spi_unregister_driver(&ad7887_driver);
-}
-module_exit(ad7887_exit);
+module_spi_driver(ad7887_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7887");
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index cb74cada561..85076cd962e 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -13,46 +13,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7887.h"
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- /* for single channel scan the result is stored with zero offset */
- if ((test_bit(1, ring->scan_mask) || test_bit(0, ring->scan_mask)) &&
- (channum == 1))
- count = 1;
-
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7887_ring_preenable() setup the parameters of the ring before enabling
*
@@ -65,7 +31,8 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
struct ad7887_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -80,7 +47,7 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
set_bytes_per_datum(indio_dev->buffer, st->d_size);
/* We know this is a single long so can 'cheat' */
- switch (*ring->scan_mask) {
+ switch (*indio_dev->active_scan_mask) {
case (1 << 0):
st->ring_msg = &st->msg[AD7887_CH0];
break;
@@ -121,7 +88,8 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
__u8 *buf;
int b_sent;
- unsigned int bytes = ring->scan_count *
+ unsigned int bytes = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
buf = kzalloc(st->d_size, GFP_KERNEL);
@@ -176,7 +144,7 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_deallocate_sw_rb;
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7887_ring_setup_ops;
+ indio_dev->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index eda01d5bab7..356f690a76f 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -124,15 +124,9 @@ struct ad799x_platform_data {
int ad7997_8_set_scan_mode(struct ad799x_state *st, unsigned mask);
#ifdef CONFIG_AD799X_RING_BUFFER
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum);
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_AD799X_RING_BUFFER */
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- return -EINVAL;
-}
-
static inline int
ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index ee6cd792bec..d5b581d8bc2 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -35,7 +35,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "ad799x.h"
@@ -150,8 +151,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad799x_single_channel_from_ring(indio_dev,
- chan->scan_index);
+ ret = -EBUSY;
else
ret = ad799x_scan_direct(st, chan->scan_index);
mutex_unlock(&indio_dev->mlock);
@@ -161,7 +161,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
*val = (ret >> chan->scan_type.shift) &
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000) >> chan->scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
@@ -929,21 +929,8 @@ static struct i2c_driver ad799x_driver = {
.remove = __devexit_p(ad799x_remove),
.id_table = ad799x_id,
};
-
-static __init int ad799x_init(void)
-{
- return i2c_add_driver(&ad799x_driver);
-}
-
-static __exit void ad799x_exit(void)
-{
- i2c_del_driver(&ad799x_driver);
-}
+module_i2c_driver(ad799x_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD799x ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:ad799x");
-
-module_init(ad799x_init);
-module_exit(ad799x_exit);
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index e3f4698d781..5dded9e7820 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -17,42 +17,12 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad799x.h"
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
- count = bitmap_weight(ring->scan_mask, channum);
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad799x_ring_preenable() setup the parameters of the ring before enabling
*
@@ -71,9 +41,10 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
*/
if (st->id == ad7997 || st->id == ad7998)
- ad7997_8_set_scan_mode(st, *ring->scan_mask);
+ ad7997_8_set_scan_mode(st, *indio_dev->active_scan_mask);
- st->d_size = ring->scan_count * 2;
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2;
if (ring->scan_timestamp) {
st->d_size += sizeof(s64);
@@ -115,12 +86,13 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
case ad7991:
case ad7995:
case ad7999:
- cmd = st->config | (*ring->scan_mask << AD799X_CHANNEL_SHIFT);
+ cmd = st->config |
+ (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT);
break;
case ad7992:
case ad7993:
case ad7994:
- cmd = (*ring->scan_mask << AD799X_CHANNEL_SHIFT) |
+ cmd = (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT) |
AD7998_CONV_RES_REG;
break;
case ad7997:
@@ -132,7 +104,8 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
}
b_sent = i2c_smbus_read_i2c_block_data(st->client,
- cmd, ring->scan_count * 2, rxbuf);
+ cmd, bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2, rxbuf);
if (b_sent < 0)
goto done;
@@ -183,7 +156,7 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad799x_buf_setup_ops;
+ indio_dev->setup_ops = &ad799x_buf_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index c9e0be3b1cf..eec2f325d54 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* ADT7310 registers definition
*/
@@ -877,28 +877,15 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7310_probe,
.remove = __devexit_p(adt7310_remove),
.id_table = adt7310_id,
};
-
-static __init int adt7310_init(void)
-{
- return spi_register_driver(&adt7310_driver);
-}
-
-static __exit void adt7310_exit(void)
-{
- spi_unregister_driver(&adt7310_driver);
-}
+module_spi_driver(adt7310_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7310_init);
-module_exit(adt7310_exit);
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index a289e429dc4..c62248ceb37 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -17,6 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* ADT7410 registers definition
@@ -844,21 +845,9 @@ static struct i2c_driver adt7410_driver = {
.remove = __devexit_p(adt7410_remove),
.id_table = adt7410_id,
};
-
-static __init int adt7410_init(void)
-{
- return i2c_add_driver(&adt7410_driver);
-}
-
-static __exit void adt7410_exit(void)
-{
- i2c_del_driver(&adt7410_driver);
-}
+module_i2c_driver(adt7410_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7410 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7410_init);
-module_exit(adt7410_exit);
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index cbcb08a2cb5..2cd0112067b 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -146,22 +146,22 @@ struct max1363_state {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci);
+*max1363_match_mode(const unsigned long *mask,
+ const struct max1363_chip_info *ci);
int max1363_set_scan_mode(struct max1363_state *st);
#ifdef CONFIG_MAX1363_RING_BUFFER
-
-int max1363_single_channel_from_ring(const long *mask,
- struct max1363_state *st);
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_MAX1363_RING_BUFFER */
-
-int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const long *scan_mask)
{
- return -EINVAL;
+ return 0;
}
static inline int
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index eb699ade34b..b92cb4af18c 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -34,7 +34,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "max1363.h"
@@ -147,7 +148,8 @@ static const struct max1363_mode max1363_mode_table[] = {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci)
+*max1363_match_mode(const unsigned long *mask,
+const struct max1363_chip_info *ci)
{
int i;
if (mask)
@@ -189,7 +191,6 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int ret = 0;
s32 data;
char rxbuf[2];
- const unsigned long *mask;
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -198,46 +199,38 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
* been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on) {
+ if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- /* If ring buffer capture is occurring, query the buffer */
- if (iio_buffer_enabled(indio_dev)) {
- mask = max1363_mode_table[chan->address].modemask;
- data = max1363_single_channel_from_ring(mask, st);
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ goto error_ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 2);
if (data < 0) {
ret = data;
goto error_ret;
}
+ data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
} else {
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- goto error_ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 2);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
- } else {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 1);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = rxbuf[0];
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 1);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
}
+ data = rxbuf[0];
}
*val = data;
error_ret:
@@ -260,7 +253,7 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((1 << (st->chip_info->bits + 1)) >
st->chip_info->int_vref_mv) {
*val = 0;
@@ -288,7 +281,7 @@ static const enum max1363_modes max1363_mode_list[] = {
#define MAX1363_EV_M \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) \
| IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
-#define MAX1363_INFO_MASK (1 << IIO_CHAN_INFO_SCALE_SHARED)
+#define MAX1363_INFO_MASK IIO_CHAN_INFO_SCALE_SHARED_BIT
#define MAX1363_CHAN_U(num, addr, si, bits, evmask) \
{ \
.type = IIO_VOLTAGE, \
@@ -296,7 +289,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel = num, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -311,7 +310,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel2 = num2, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num"-AIN"#num2, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -833,6 +838,7 @@ static const struct iio_info max1363_info = {
.read_event_config = &max1363_read_event_config,
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
+ .update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
.event_attrs = &max1363_event_attribute_group,
};
@@ -1410,20 +1416,8 @@ static struct i2c_driver max1363_driver = {
.remove = max1363_remove,
.id_table = max1363_id,
};
-
-static __init int max1363_init(void)
-{
- return i2c_add_driver(&max1363_driver);
-}
-
-static __exit void max1363_exit(void)
-{
- i2c_del_driver(&max1363_driver);
-}
+module_i2c_driver(max1363_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Maxim 1363 ADC");
MODULE_LICENSE("GPL v2");
-
-module_init(max1363_init);
-module_exit(max1363_exit);
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index df6893e058c..f730b3fb971 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -15,88 +15,25 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "max1363.h"
-int max1363_single_channel_from_ring(const long *mask, struct max1363_state *st)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret, index;
- u8 *ring_data;
- index = find_first_bit(mask, MAX1363_MAX_CHANNELS);
-
- if (!(test_bit(index, st->current_mode->modemask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
-
- count = bitmap_weight(mask, index - 1);
- if (st->chip_info->bits != 8)
- ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8)
- + (int)(ring_data[count*2 + 1]);
- else
- ret = ring_data[count];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
-
-/**
- * max1363_ring_preenable() - setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the nuber of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int max1363_ring_preenable(struct iio_dev *indio_dev)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
{
struct max1363_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
- size_t d_size = 0;
- unsigned long numvals;
/*
* Need to figure out the current mode based upon the requested
* scan mask in iio_dev
*/
- st->current_mode = max1363_match_mode(ring->scan_mask,
- st->chip_info);
+ st->current_mode = max1363_match_mode(scan_mask, st->chip_info);
if (!st->current_mode)
return -EINVAL;
-
max1363_set_scan_mode(st);
-
- numvals = bitmap_weight(st->current_mode->modemask,
- indio_dev->masklength);
- if (ring->access->set_bytes_per_datum) {
- if (ring->scan_timestamp)
- d_size += sizeof(s64);
- if (st->chip_info->bits != 8)
- d_size += numvals*2;
- else
- d_size += numvals;
- if (ring->scan_timestamp && (d_size % 8))
- d_size += 8 - (d_size % 8);
- ring->access->set_bytes_per_datum(ring, d_size);
- }
-
return 0;
}
@@ -114,12 +51,14 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
/* Ensure the timestamp is 8 byte aligned */
if (st->chip_info->bits != 8)
- d_size = numvals*2 + sizeof(s64);
+ d_size = numvals*2;
else
- d_size = numvals + sizeof(s64);
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
-
+ d_size = numvals;
+ if (indio_dev->buffer->scan_timestamp) {
+ d_size += sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
@@ -139,9 +78,10 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
time_ns = iio_get_time_ns();
- memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ if (indio_dev->buffer->scan_timestamp)
+ memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ iio_push_to_buffer(indio_dev->buffer, rxbuf, time_ns);
- indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
@@ -151,7 +91,7 @@ done:
static const struct iio_buffer_setup_ops max1363_ring_setup_ops = {
.postenable = &iio_triggered_buffer_postenable,
- .preenable = &max1363_ring_preenable,
+ .preenable = &iio_sw_buffer_preenable,
.predisable = &iio_triggered_buffer_predisable,
};
@@ -179,7 +119,7 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &max1363_ring_setup_ops;
+ indio_dev->setup_ops = &max1363_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 07d718e8fb4..2c03a39220e 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -151,21 +151,9 @@ static struct i2c_driver adt7316_driver = {
.resume = adt7316_i2c_resume,
.id_table = adt7316_i2c_id,
};
-
-static __init int adt7316_i2c_init(void)
-{
- return i2c_add_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_i2c_exit(void)
-{
- i2c_del_driver(&adt7316_driver);
-}
+module_i2c_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
"ADT7516/7/8 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_i2c_init);
-module_exit(adt7316_i2c_exit);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 369d4d01ed9..1ea3cd06299 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -151,7 +151,6 @@ static int adt7316_spi_resume(struct spi_device *spi_dev)
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
@@ -160,21 +159,9 @@ static struct spi_driver adt7316_driver = {
.resume = adt7316_spi_resume,
.id_table = adt7316_spi_id,
};
-
-static __init int adt7316_spi_init(void)
-{
- return spi_register_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_spi_exit(void)
-{
- spi_unregister_driver(&adt7316_driver);
-}
+module_spi_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
"ADT7516/7/9 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_spi_init);
-module_exit(adt7316_spi_exit);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 8df24704ff2..13c39292d3f 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include "../iio.h"
+#include "../events.h"
#include "../sysfs.h"
#include "adt7316.h"
diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer.h
index 9e8f0100997..6fb6e64181a 100644
--- a/drivers/staging/iio/buffer_generic.h
+++ b/drivers/staging/iio/buffer.h
@@ -11,7 +11,6 @@
#define _IIO_BUFFER_GENERIC_H_
#include <linux/sysfs.h>
#include "iio.h"
-#include "chrdev.h"
#ifdef CONFIG_IIO_BUFFER
@@ -19,22 +18,14 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using buffer
* @store_to: actually store stuff to the buffer
- * @read_last: get the last element stored
- * @read_first_n: try to get a specified number of elements (must exist)
- * @mark_param_change: notify buffer that some relevant parameter has changed
- * Often this means the underlying storage may need to
- * change.
+ * @read_first_n: try to get a specified number of bytes (must exist)
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @get_bytes_per_datum:get current bytes per datum
* @set_bytes_per_datum:set number of bytes per datum
* @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
- * @is_enabled: query if buffer is currently being used
- * @enable: enable the buffer
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -45,85 +36,60 @@ struct iio_buffer;
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- void (*mark_in_use)(struct iio_buffer *buffer);
- void (*unmark_in_use)(struct iio_buffer *buffer);
-
int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
- int (*read_last)(struct iio_buffer *buffer, u8 *data);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
- int (*mark_param_change)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
int (*get_bytes_per_datum)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
-
- int (*is_enabled)(struct iio_buffer *buffer);
- int (*enable)(struct iio_buffer *buffer);
-};
-
-/**
- * struct iio_buffer_setup_ops - buffer setup related callbacks
- * @preenable: [DRIVER] function to run prior to marking buffer enabled
- * @postenable: [DRIVER] function to run after marking buffer enabled
- * @predisable: [DRIVER] function to run prior to marking buffer
- * disabled
- * @postdisable: [DRIVER] function to run after marking buffer disabled
- */
-struct iio_buffer_setup_ops {
- int (*preenable)(struct iio_dev *);
- int (*postenable)(struct iio_dev *);
- int (*predisable)(struct iio_dev *);
- int (*postdisable)(struct iio_dev *);
};
/**
* struct iio_buffer - general buffer structure
- * @indio_dev: industrial I/O device structure
- * @owner: module that owns the buffer (for ref counting)
* @length: [DEVICE] number of datums in buffer
* @bytes_per_datum: [DEVICE] size of individual datum including timestamp
- * @bpe: [DEVICE] size of individual channel value
* @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
* control method is used
- * @scan_count: [INTERN] the number of elements in the current scan mode
* @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
* @access: [DRIVER] buffer access functions associated with the
* implementation.
- * @flags: [INTERN] file ops related flags including busy flag.
+ * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @scan_el_group: [DRIVER] attribute group for those attributes not
+ * created from the iio_chan_info array.
+ * @pollq: [INTERN] wait queue to allow for polling on the buffer.
+ * @stufftoread: [INTERN] flag to indicate new data.
+ * @demux_list: [INTERN] list of operations required to demux the scan.
+ * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
**/
struct iio_buffer {
- struct iio_dev *indio_dev;
- struct module *owner;
int length;
int bytes_per_datum;
- int bpe;
struct attribute_group *scan_el_attrs;
- int scan_count;
long *scan_mask;
bool scan_timestamp;
+ unsigned scan_index_timestamp;
const struct iio_buffer_access_funcs *access;
- const struct iio_buffer_setup_ops *setup_ops;
struct list_head scan_el_dev_attr_list;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
- unsigned long flags;
const struct attribute_group *attrs;
+ struct list_head demux_list;
+ unsigned char *demux_bounce;
};
/**
* iio_buffer_init() - Initialize the buffer structure
* @buffer: buffer to be initialized
- * @indio_dev: the iio device the buffer is assocated with
**/
-void iio_buffer_init(struct iio_buffer *buffer,
- struct iio_dev *indio_dev);
+void iio_buffer_init(struct iio_buffer *buffer);
void iio_buffer_deinit(struct iio_buffer *buffer);
@@ -140,17 +106,27 @@ static inline void __iio_update_buffer(struct iio_buffer *buffer,
buffer->length = length;
}
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
-#define to_iio_buffer(d) \
- container_of(d, struct iio_buffer, dev)
+/**
+ * iio_push_to_buffer() - push to a registered buffer.
+ * @buffer: IIO buffer structure for device
+ * @scan: Full scan.
+ * @timestamp:
+ */
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp);
+
+int iio_update_demux(struct iio_dev *indio_dev);
/**
* iio_buffer_register() - register the buffer with IIO core
@@ -180,12 +156,6 @@ ssize_t iio_buffer_write_length(struct device *dev,
const char *buf,
size_t len);
/**
- * iio_buffer_read_bytes_per_datum() - attr for number of bytes in whole datum
- **/
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
* iio_buffer_store_enable() - attr to turn the buffer on
**/
ssize_t iio_buffer_store_enable(struct device *dev,
@@ -201,9 +171,6 @@ ssize_t iio_buffer_show_enable(struct device *dev,
#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
iio_buffer_read_length, \
iio_buffer_write_length)
-#define IIO_BUFFER_BYTES_PER_DATUM_ATTR \
- DEVICE_ATTR(bytes_per_datum, S_IRUGO | S_IWUSR, \
- iio_buffer_read_bytes_per_datum, NULL)
#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
iio_buffer_show_enable, \
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index a761ca94a17..b73007dcf4b 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -15,7 +15,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* AD7150 registers definition
*/
@@ -111,7 +111,7 @@ static int ad7150_read_raw(struct iio_dev *indio_dev,
return ret;
*val = swab16(ret);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
ad7150_addresses[chan->channel][1]);
if (ret < 0)
@@ -429,7 +429,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
@@ -441,7 +441,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
@@ -657,20 +657,8 @@ static struct i2c_driver ad7150_driver = {
.remove = __devexit_p(ad7150_remove),
.id_table = ad7150_id,
};
-
-static __init int ad7150_init(void)
-{
- return i2c_add_driver(&ad7150_driver);
-}
-
-static __exit void ad7150_exit(void)
-{
- i2c_del_driver(&ad7150_driver);
-}
+module_i2c_driver(ad7150_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7150_init);
-module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 662584d72a7..fdb83c35e6d 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -259,7 +259,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -276,7 +276,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -289,7 +289,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
if (val != 0) {
ret = -EINVAL;
goto out;
@@ -372,7 +372,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_GAIN]);
@@ -384,7 +384,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_OFFS]);
if (ret < 0)
@@ -393,7 +393,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
ret = i2c_smbus_read_byte_data(chip->client,
ad7152_addresses[chan->channel][AD7152_SETUP]);
if (ret < 0)
@@ -416,7 +416,7 @@ static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
long mask)
{
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
default:
return IIO_VAL_INT_PLUS_MICRO;
@@ -436,34 +436,34 @@ static const struct iio_chan_spec ad7152_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}
};
/*
@@ -540,20 +540,8 @@ static struct i2c_driver ad7152_driver = {
.remove = __devexit_p(ad7152_remove),
.id_table = ad7152_id,
};
-
-static __init int ad7152_init(void)
-{
- return i2c_add_driver(&ad7152_driver);
-}
-
-static __exit void ad7152_exit(void)
-{
- i2c_del_driver(&ad7152_driver);
-}
+module_i2c_driver(ad7152_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7152_init);
-module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 2867943309c..40b8512cbc3 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -123,7 +123,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_EXT_VIN,
},
@@ -132,7 +132,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_VDD_MON,
},
@@ -156,10 +156,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8,
},
[CIN1_DIFF] = {
@@ -168,10 +168,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF
},
@@ -179,10 +179,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CIN2,
},
@@ -192,10 +192,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
}
@@ -477,7 +477,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -503,7 +503,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -515,7 +515,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if ((val < 0) | (val > 43008000)) { /* 21pF */
ret = -EINVAL;
goto out;
@@ -612,7 +612,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
reg = AD7746_REG_CAP_GAINH;
@@ -634,7 +634,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
AD7746_REG_CAP_OFFH);
if (ret < 0)
@@ -643,13 +643,13 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
[chan->differential]) * 338646;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
@@ -788,20 +788,8 @@ static struct i2c_driver ad7746_driver = {
.remove = __devexit_p(ad7746_remove),
.id_table = ad7746_id,
};
-
-static __init int ad7746_init(void)
-{
- return i2c_add_driver(&ad7746_driver);
-}
-
-static __exit void ad7746_exit(void)
-{
- i2c_del_driver(&ad7746_driver);
-}
+module_i2c_driver(ad7746_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7746_init);
-module_exit(ad7746_exit);
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
deleted file mode 100644
index d8e736f6052..00000000000
--- a/drivers/staging/iio/chrdev.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* The industrial I/O core - character device related
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#ifndef _IIO_CHRDEV_H_
-#define _IIO_CHRDEV_H_
-
-/**
- * struct iio_event_data - The actual event being pushed to userspace
- * @id: event identifier
- * @timestamp: best estimate of time of event occurrence (often from
- * the interrupt handler)
- */
-struct iio_event_data {
- u64 id;
- s64 timestamp;
-};
-
-#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
-#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index fac8549dc8e..13e27979df2 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -24,6 +24,29 @@ config AD5360
To compile this driver as module choose M here: the module will be called
ad5360.
+config AD5380
+ tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
+ depends on (SPI_MASTER || I2C)
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5380, AD5381,
+ AD5382, AD5383, AD5384, AD5390, AD5391, AD5392 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5380.
+
+config AD5421
+ tristate "Analog Devices AD5421 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5421 loop-powered
+ digital-to-analog convertors (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5421.
+
config AD5624R_SPI
tristate "Analog Devices AD5624/44/64R DAC spi driver"
depends on SPI
@@ -37,7 +60,7 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5444, AD5446,
AD5512A, AD5542A, AD5543, AD5553, AD5601, AD5611, AD5620, AD5621,
- AD5640, AD5660 DACs.
+ AD5640, AD5660, AD5662 DACs.
To compile this driver as a module, choose M here: the
module will be called ad5446.
@@ -52,12 +75,22 @@ config AD5504
To compile this driver as a module, choose M here: the
module will be called ad5504.
+config AD5764
+ tristate "Analog Devices AD5764/64R/44/44R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5764, AD5764R, AD5744,
+ AD5744R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5764.
+
config AD5791
- tristate "Analog Devices AD5760/AD5780/AD5781/AD5791 DAC SPI driver"
+ tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD5760, AD5780,
- AD5781, AD5791 High Resolution Voltage Output Digital to
+ AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
Analog Converter.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
index 07b6f5ebdb5..8ab1d264aab 100644
--- a/drivers/staging/iio/dac/Makefile
+++ b/drivers/staging/iio/dac/Makefile
@@ -3,10 +3,13 @@
#
obj-$(CONFIG_AD5360) += ad5360.o
+obj-$(CONFIG_AD5380) += ad5380.o
+obj-$(CONFIG_AD5421) += ad5421.o
obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 24279f2ae41..049a855039c 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -91,7 +91,7 @@ enum ad5064_type {
.indexed = 1, \
.output = 1, \
.channel = (chan), \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = AD5064_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
}
@@ -280,14 +280,14 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int vref;
+ int scale_uv;
switch (m) {
case 0:
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
vref = st->chip_info->shared_vref ? 0 : chan->channel;
scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
if (scale_uv < 0)
@@ -445,18 +445,7 @@ static struct spi_driver ad5064_driver = {
.remove = __devexit_p(ad5064_remove),
.id_table = ad5064_id,
};
-
-static __init int ad5064_spi_init(void)
-{
- return spi_register_driver(&ad5064_driver);
-}
-module_init(ad5064_spi_init);
-
-static __exit void ad5064_spi_exit(void)
-{
- spi_unregister_driver(&ad5064_driver);
-}
-module_exit(ad5064_spi_exit);
+module_spi_driver(ad5064_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 72d0f3f0d6a..710b256affc 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -103,10 +103,10 @@ enum ad5360_type {
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
.scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
}
@@ -326,21 +326,21 @@ static int ad5360_write_raw(struct iio_dev *indio_dev,
return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (val <= -max_val || val > 0)
return -EINVAL;
@@ -371,8 +371,8 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int ofs_index;
+ int scale_uv;
int ret;
switch (m) {
@@ -383,7 +383,7 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret >> chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
/* vout = 4 * vref * dac_code */
scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100;
if (scale_uv < 0)
@@ -393,21 +393,21 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
*val = scale_uv / 100000;
*val2 = (scale_uv % 100000) * 10;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
ret = ad5360_read(indio_dev, AD5360_READBACK_SF,
AD5360_REG_SF_OFS(ofs_index));
@@ -563,18 +563,7 @@ static struct spi_driver ad5360_driver = {
.remove = __devexit_p(ad5360_remove),
.id_table = ad5360_ids,
};
-
-static __init int ad5360_spi_init(void)
-{
- return spi_register_driver(&ad5360_driver);
-}
-module_init(ad5360_spi_init);
-
-static __exit void ad5360_spi_exit(void)
-{
- spi_unregister_driver(&ad5360_driver);
-}
-module_exit(ad5360_spi_exit);
+module_spi_driver(ad5360_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC");
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
new file mode 100644
index 00000000000..eff97ae05c4
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -0,0 +1,676 @@
+/*
+ * Analog devices AD5380, AD5381, AD5382, AD5383, AD5390, AD5391, AD5392
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+
+#define AD5380_REG_DATA(x) (((x) << 2) | 3)
+#define AD5380_REG_OFFSET(x) (((x) << 2) | 2)
+#define AD5380_REG_GAIN(x) (((x) << 2) | 1)
+#define AD5380_REG_SF_PWR_DOWN (8 << 2)
+#define AD5380_REG_SF_PWR_UP (9 << 2)
+#define AD5380_REG_SF_CTRL (12 << 2)
+
+#define AD5380_CTRL_PWR_DOWN_MODE_OFFSET 13
+#define AD5380_CTRL_INT_VREF_2V5 BIT(12)
+#define AD5380_CTRL_INT_VREF_EN BIT(10)
+
+/**
+ * struct ad5380_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @int_vref: internal vref in uV
+*/
+
+struct ad5380_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int int_vref;
+};
+
+/**
+ * struct ad5380_state - driver instance specific data
+ * @regmap: regmap instance used by the device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulator
+ * @vref: actual reference voltage used in uA
+ * @pwr_down: whether the chip is currently in power down mode
+ */
+
+struct ad5380_state {
+ struct regmap *regmap;
+ const struct ad5380_chip_info *chip_info;
+ struct regulator *vref_reg;
+ int vref;
+ bool pwr_down;
+};
+
+enum ad5380_type {
+ ID_AD5380_3,
+ ID_AD5380_5,
+ ID_AD5381_3,
+ ID_AD5381_5,
+ ID_AD5382_3,
+ ID_AD5382_5,
+ ID_AD5383_3,
+ ID_AD5383_5,
+ ID_AD5390_3,
+ ID_AD5390_5,
+ ID_AD5391_3,
+ ID_AD5391_5,
+ ID_AD5392_3,
+ ID_AD5392_5,
+};
+
+#define AD5380_CHANNEL(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)) \
+}
+
+static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
+ [ID_AD5380_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 1250000,
+ },
+ [ID_AD5380_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 2500000,
+ },
+ [ID_AD5381_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5381_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5382_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5382_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5383_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5383_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5390_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5390_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5391_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5391_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5392_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 1250000,
+ },
+ [ID_AD5392_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 2500000,
+ },
+};
+
+static ssize_t ad5380_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5380_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (pwr_down)
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
+ else
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_UP, 0);
+
+ st->pwr_down = pwr_down;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_dac_powerdown,
+ ad5380_write_dac_powerdown, 0);
+
+static const char ad5380_powerdown_modes[][15] = {
+ [0] = "100kohm_to_gnd",
+ [1] = "three_state",
+};
+
+static ssize_t ad5380_read_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int mode;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD5380_REG_SF_CTRL, &mode);
+ if (ret)
+ return ret;
+
+ mode = (mode >> AD5380_CTRL_PWR_DOWN_MODE_OFFSET) & 1;
+
+ return sprintf(buf, "%s\n", ad5380_powerdown_modes[mode]);
+}
+
+static ssize_t ad5380_write_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad5380_powerdown_modes); ++i) {
+ if (sysfs_streq(buf, ad5380_powerdown_modes[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ad5380_powerdown_modes))
+ return -EINVAL;
+
+ ret = regmap_update_bits(st->regmap, AD5380_REG_SF_CTRL,
+ 1 << AD5380_CTRL_PWR_DOWN_MODE_OFFSET,
+ i << AD5380_CTRL_PWR_DOWN_MODE_OFFSET);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_powerdown_mode,
+ ad5380_write_powerdown_mode, 0);
+
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
+ "100kohm_to_gnd three_state");
+
+static struct attribute *ad5380_attributes[] = {
+ &iio_dev_attr_out_voltage_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5380_attribute_group = {
+ .attrs = ad5380_attributes,
+};
+
+static unsigned int ad5380_info_to_reg(struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case 0:
+ return AD5380_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5380_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5380_REG_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const unsigned int max_val = (1 << chan->scan_type.realbits);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ ad5380_info_to_reg(chan, info),
+ val << chan->scan_type.shift);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += (1 << chan->scan_type.realbits) / 2;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ AD5380_REG_OFFSET(chan->address),
+ val << chan->scan_type.shift);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ int ret;
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = regmap_read(st->regmap, ad5380_info_to_reg(chan, info),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(st->regmap, AD5380_REG_OFFSET(chan->address),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ val -= (1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = ((2 * st->vref) >> chan->scan_type.realbits) * 100;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5380_info = {
+ .read_raw = ad5380_read_raw,
+ .write_raw = ad5380_write_raw,
+ .attrs = &ad5380_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(sizeof(struct iio_chan_spec),
+ st->chip_info->num_channels, GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = i;
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct ad5380_state *st;
+ unsigned int ctrl = 0;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(dev, "Failed to allocate iio device\n");
+ ret = -ENOMEM;
+ goto error_regmap_exit;
+ }
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->chip_info = &ad5380_chip_info_tbl[type];
+ st->regmap = regmap;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5380_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
+ goto error_free;
+ }
+
+ if (st->chip_info->int_vref == 2500000)
+ ctrl |= AD5380_CTRL_INT_VREF_2V5;
+
+ st->vref_reg = regulator_get(dev, "vref");
+ if (!IS_ERR(st->vref_reg)) {
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+
+ st->vref = regulator_get_voltage(st->vref_reg);
+ } else {
+ st->vref = st->chip_info->int_vref;
+ ctrl |= AD5380_CTRL_INT_VREF_EN;
+ }
+
+ ret = regmap_write(st->regmap, AD5380_REG_SF_CTRL, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to write to device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_disable(st->vref_reg);
+error_free_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_put(st->vref_reg);
+
+ kfree(indio_dev->channels);
+error_free:
+ iio_free_device(indio_dev);
+error_regmap_exit:
+ regmap_exit(regmap);
+
+ return ret;
+}
+
+static int __devexit ad5380_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ if (!IS_ERR(st->vref_reg)) {
+ regulator_disable(st->vref_reg);
+ regulator_put(st->vref_reg);
+ }
+
+ regmap_exit(st->regmap);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static bool ad5380_reg_false(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config ad5380_regmap_config = {
+ .reg_bits = 10,
+ .val_bits = 14,
+
+ .max_register = AD5380_REG_DATA(40),
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = ad5380_reg_false,
+ .readable_reg = ad5380_reg_false,
+};
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int __devinit ad5380_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = regmap_init_spi(spi, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_spi_remove(struct spi_device *spi)
+{
+ return ad5380_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5380_spi_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5380_spi_ids);
+
+static struct spi_driver ad5380_spi_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_spi_probe,
+ .remove = __devexit_p(ad5380_spi_remove),
+ .id_table = ad5380_spi_ids,
+};
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5380_spi_driver);
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5380_spi_driver);
+}
+
+#else
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = regmap_init_i2c(i2c, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5380_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5380_i2c_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5380_i2c_ids);
+
+static struct i2c_driver ad5380_i2c_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_i2c_probe,
+ .remove = __devexit_p(ad5380_i2c_remove),
+ .id_table = ad5380_i2c_ids,
+};
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5380_i2c_driver);
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5380_i2c_driver);
+}
+
+#else
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ad5380_spi_init(void)
+{
+ int ret;
+
+ ret = ad5380_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5380_i2c_register_driver();
+ if (ret) {
+ ad5380_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5380_spi_init);
+
+static void __exit ad5380_spi_exit(void)
+{
+ ad5380_i2c_unregister_driver();
+ ad5380_spi_unregister_driver();
+
+}
+module_exit(ad5380_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5380/81/82/83/84/90/91/92 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
new file mode 100644
index 00000000000..71ee8682476
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -0,0 +1,555 @@
+/*
+ * AD5421 Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../events.h"
+#include "dac.h"
+#include "ad5421.h"
+
+
+#define AD5421_REG_DAC_DATA 0x1
+#define AD5421_REG_CTRL 0x2
+#define AD5421_REG_OFFSET 0x3
+#define AD5421_REG_GAIN 0x4
+/* load dac and fault shared the same register number. Writing to it will cause
+ * a dac load command, reading from it will return the fault status register */
+#define AD5421_REG_LOAD_DAC 0x5
+#define AD5421_REG_FAULT 0x5
+#define AD5421_REG_FORCE_ALARM_CURRENT 0x6
+#define AD5421_REG_RESET 0x7
+#define AD5421_REG_START_CONVERSION 0x8
+#define AD5421_REG_NOOP 0x9
+
+#define AD5421_CTRL_WATCHDOG_DISABLE BIT(12)
+#define AD5421_CTRL_AUTO_FAULT_READBACK BIT(11)
+#define AD5421_CTRL_MIN_CURRENT BIT(9)
+#define AD5421_CTRL_ADC_SOURCE_TEMP BIT(8)
+#define AD5421_CTRL_ADC_ENABLE BIT(7)
+#define AD5421_CTRL_PWR_DOWN_INT_VREF BIT(6)
+
+#define AD5421_FAULT_SPI BIT(15)
+#define AD5421_FAULT_PEC BIT(14)
+#define AD5421_FAULT_OVER_CURRENT BIT(13)
+#define AD5421_FAULT_UNDER_CURRENT BIT(12)
+#define AD5421_FAULT_TEMP_OVER_140 BIT(11)
+#define AD5421_FAULT_TEMP_OVER_100 BIT(10)
+#define AD5421_FAULT_UNDER_VOLTAGE_6V BIT(9)
+#define AD5421_FAULT_UNDER_VOLTAGE_12V BIT(8)
+
+/* These bits will cause the fault pin to go high */
+#define AD5421_FAULT_TRIGGER_IRQ \
+ (AD5421_FAULT_SPI | AD5421_FAULT_PEC | AD5421_FAULT_OVER_CURRENT | \
+ AD5421_FAULT_UNDER_CURRENT | AD5421_FAULT_TEMP_OVER_140)
+
+/**
+ * struct ad5421_state - driver instance specific data
+ * @spi: spi_device
+ * @ctrl: control register cache
+ * @current_range: current range which the device is configured for
+ * @data: spi transfer buffers
+ * @fault_mask: software masking of events
+ */
+struct ad5421_state {
+ struct spi_device *spi;
+ unsigned int ctrl;
+ enum ad5421_current_range current_range;
+ unsigned int fault_mask;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec ad5421_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
+ .scan_type = IIO_ST('u', 16, 16, 0),
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ },
+ {
+ .type = IIO_TEMP,
+ .channel = -1,
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ },
+};
+
+static int ad5421_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int val)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5421_write_unlocked(indio_dev, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl &= ~clr;
+ st->ctrl |= set;
+
+ ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static irqreturn_t ad5421_fault_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int fault;
+ unsigned int old_fault = 0;
+ unsigned int events;
+
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+ if (!fault)
+ return IRQ_NONE;
+
+ /* If we had a fault, this might mean that the DAC has lost its state
+ * and has been reset. Make sure that the control register actually
+ * contains what we expect it to contain. Otherwise the watchdog might
+ * be enabled and we get watchdog timeout faults, which will render the
+ * DAC unusable. */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+
+ /* The fault pin stays high as long as a fault condition is present and
+ * it is not possible to mask fault conditions. For certain fault
+ * conditions for example like over-temperature it takes some time
+ * until the fault condition disappears. If we would exit the interrupt
+ * handler immediately after handling the event it would be entered
+ * again instantly. Thus we fall back to polling in case we detect that
+ * a interrupt condition is still present.
+ */
+ do {
+ /* 0xffff is a invalid value for the register and will only be
+ * read if there has been a communication error */
+ if (fault == 0xffff)
+ fault = 0;
+
+ /* we are only interested in new events */
+ events = (old_fault ^ fault) & fault;
+ events &= st->fault_mask;
+
+ if (events & AD5421_FAULT_OVER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_UNDER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_TEMP_OVER_140) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ old_fault = fault;
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+
+ /* still active? go to sleep for some time */
+ if (fault & AD5421_FAULT_TRIGGER_IRQ)
+ msleep(1000);
+
+ } while (fault & AD5421_FAULT_TRIGGER_IRQ);
+
+
+ return IRQ_HANDLED;
+}
+
+static void ad5421_get_current_min_max(struct ad5421_state *st,
+ unsigned int *min, unsigned int *max)
+{
+ /* The current range is configured using external pins, which are
+ * usually hard-wired and not run-time switchable. */
+ switch (st->current_range) {
+ case AD5421_CURRENT_RANGE_4mA_20mA:
+ *min = 4000;
+ *max = 20000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA8_21mA:
+ *min = 3800;
+ *max = 21000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA2_24mA:
+ *min = 3200;
+ *max = 24000;
+ break;
+ default:
+ *min = 0;
+ *max = 1;
+ break;
+ }
+}
+
+static inline unsigned int ad5421_get_offset(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return (min * (1 << 16)) / (max - min);
+}
+
+static inline unsigned int ad5421_get_scale(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return ((max - min) * 1000) / (1 << 16);
+}
+
+static int ad5421_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long m)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ switch (m) {
+ case 0:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = ad5421_get_scale(st);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5421_get_offset(st);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad5421_read(indio_dev, AD5421_REG_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = ret - 32768;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad5421_read(indio_dev, AD5421_REG_GAIN);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ const unsigned int max_val = 1 << 16;
+
+ switch (mask) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_DAC_DATA, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += 32768;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_OFFSET, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_GAIN, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ if (state)
+ st->fault_mask |= mask;
+ else
+ st->fault_mask &= ~mask;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int ad5421_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (bool)(st->fault_mask & mask);
+}
+
+static int ad5421_read_event_value(struct iio_dev *indio_dev, u64 event_code,
+ int *val)
+{
+ int ret;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case IIO_TEMP:
+ *val = 140000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad5421_info = {
+ .read_raw = ad5421_read_raw,
+ .write_raw = ad5421_write_raw,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5421_probe(struct spi_device *spi)
+{
+ struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct iio_dev *indio_dev;
+ struct ad5421_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = "ad5421";
+ indio_dev->info = &ad5421_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5421_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+
+ st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
+ AD5421_CTRL_AUTO_FAULT_READBACK;
+
+ if (pdata) {
+ st->current_range = pdata->current_range;
+ if (pdata->external_vref)
+ st->ctrl |= AD5421_CTRL_PWR_DOWN_INT_VREF;
+ } else {
+ st->current_range = AD5421_CURRENT_RANGE_4mA_20mA;
+ }
+
+ /* write initial ctrl register value */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ ad5421_fault_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ad5421 fault",
+ indio_dev);
+ if (ret)
+ goto error_free;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5421_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver ad5421_driver = {
+ .driver = {
+ .name = "ad5421",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5421_probe,
+ .remove = __devexit_p(ad5421_remove),
+};
+
+static __init int ad5421_init(void)
+{
+ return spi_register_driver(&ad5421_driver);
+}
+module_init(ad5421_init);
+
+static __exit void ad5421_exit(void)
+{
+ spi_unregister_driver(&ad5421_driver);
+}
+module_exit(ad5421_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5421");
diff --git a/drivers/staging/iio/dac/ad5421.h b/drivers/staging/iio/dac/ad5421.h
new file mode 100644
index 00000000000..cd2bb84ff1b
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.h
@@ -0,0 +1,32 @@
+#ifndef __IIO_DAC_AD5421_H__
+#define __IIO_DAC_AD5421_H__
+
+/*
+ * TODO: This file needs to go into include/linux/iio
+ */
+
+/**
+ * enum ad5421_current_range - Current range the AD5421 is configured for.
+ * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00)
+ * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1)
+ * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10)
+ */
+
+enum ad5421_current_range {
+ AD5421_CURRENT_RANGE_4mA_20mA,
+ AD5421_CURRENT_RANGE_3mA8_21mA,
+ AD5421_CURRENT_RANGE_3mA2_24mA,
+};
+
+/**
+ * struct ad5421_platform_data - AD5421 DAC driver platform data
+ * @external_vref: whether an external reference voltage is used or not
+ * @current_range: Current range the AD5421 is configured for
+ */
+
+struct ad5421_platform_data {
+ bool external_vref;
+ enum ad5421_current_range current_range;
+};
+
+#endif
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e1c204d51d8..693e7482524 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -26,19 +26,17 @@
static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5446_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5446_LOAD | val);
}
static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(val << st->chip_info->left_shift);
+ st->data.d16 = cpu_to_be16(val);
}
static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5620_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5620_LOAD | val);
}
static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
@@ -63,50 +61,6 @@ static void ad5660_store_pwr_down(struct ad5446_state *st, unsigned mode)
st->data.d24[2] = val & 0xFF;
}
-static ssize_t ad5446_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- int ret;
- long val;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- if (val > RES_MASK(st->chip_info->bits)) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- mutex_lock(&indio_dev->mlock);
- st->cached_val = val;
- st->chip_info->store_sample(st, val);
- ret = spi_sync(st->spi, &st->msg);
- mutex_unlock(&indio_dev->mlock);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5446_write, 0);
-
-static ssize_t ad5446_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
-
static ssize_t ad5446_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -189,22 +143,20 @@ static IIO_DEVICE_ATTR(out_voltage0_powerdown, S_IRUGO | S_IWUSR,
ad5446_write_dac_powerdown, 0);
static struct attribute *ad5446_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
-static mode_t ad5446_attr_is_visible(struct kobject *kobj,
+static umode_t ad5446_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!st->chip_info->store_pwr_down &&
(attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
@@ -223,121 +175,148 @@ static const struct attribute_group ad5446_attribute_group = {
.is_visible = ad5446_attr_is_visible,
};
+#define AD5446_CHANNEL(bits, storage, shift) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .scan_type = IIO_ST('u', (bits), (storage), (shift)) \
+}
+
static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
[ID_AD5444] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5446_store_sample,
},
[ID_AD5446] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5446_store_sample,
},
[ID_AD5541A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5542A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5543] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5512A] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(12, 16, 4),
.store_sample = ad5542_store_sample,
},
[ID_AD5553] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5601] = {
- .bits = 8,
- .storagebits = 16,
- .left_shift = 6,
+ .channel = AD5446_CHANNEL(8, 16, 6),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5611] = {
- .bits = 10,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(10, 16, 4),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5621] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_2500] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_1250] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_2500] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_1250] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5660_2500] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
[ID_AD5660_1250] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
};
+static int ad5446_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
+}
+
+static int ad5446_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ val <<= chan->scan_type.shift;
+ mutex_lock(&indio_dev->mlock);
+ st->cached_val = val;
+ st->chip_info->store_sample(st, val);
+ ret = spi_sync(st->spi, &st->msg);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static const struct iio_info ad5446_info = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
.attrs = &ad5446_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -376,11 +355,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5446_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &st->chip_info->channel;
+ indio_dev->num_channels = 1;
/* Setup default message */
st->xfer.tx_buf = &st->data;
- st->xfer.len = st->chip_info->storagebits / 8;
+ st->xfer.len = st->chip_info->channel.scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
@@ -454,31 +435,19 @@ static const struct spi_device_id ad5446_id[] = {
{"ad5660-1250", ID_AD5660_1250},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5446_id);
static struct spi_driver ad5446_driver = {
.driver = {
.name = "ad5446",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad5446_probe,
.remove = __devexit_p(ad5446_remove),
.id_table = ad5446_id,
};
-
-static int __init ad5446_init(void)
-{
- return spi_register_driver(&ad5446_driver);
-}
-module_init(ad5446_init);
-
-static void __exit ad5446_exit(void)
-{
- spi_unregister_driver(&ad5446_driver);
-}
-module_exit(ad5446_exit);
+module_spi_driver(ad5446_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad5446");
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
index 7118d653ac3..4ea3476fb06 100644
--- a/drivers/staging/iio/dac/ad5446.h
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -25,8 +25,6 @@
#define AD5660_PWRDWN_100k (0x2 << 16) /* Power-down: 100kOhm to GND */
#define AD5660_PWRDWN_TRISTATE (0x3 << 16) /* Power-down: Three-state */
-#define RES_MASK(bits) ((1 << (bits)) - 1)
-
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -62,18 +60,14 @@ struct ad5446_state {
/**
* struct ad5446_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
- * @storagebits: number of bits written to the DAC
- * @left_shift: number of bits the datum must be shifted
+ * @channel: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
* @store_sample: chip specific helper function to store the datum
* @store_sample: chip specific helper function to store the powerpown cmd
*/
struct ad5446_chip_info {
- u8 bits;
- u8 storagebits;
- u8 left_shift;
+ struct iio_chan_spec channel;
u16 int_vref_mv;
void (*store_sample) (struct ad5446_state *st, unsigned val);
void (*store_pwr_down) (struct ad5446_state *st, unsigned mode);
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index 60dd6404d68..bc17205fe72 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -18,9 +18,27 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "dac.h"
#include "ad5504.h"
+#define AD5504_CHANNEL(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = AD5504_ADDR_DAC(_chan), \
+ .scan_type = IIO_ST('u', 12, 16, 0), \
+}
+
+static const struct iio_chan_spec ad5504_channels[] = {
+ AD5504_CHANNEL(0),
+ AD5504_CHANNEL(1),
+ AD5504_CHANNEL(2),
+ AD5504_CHANNEL(3),
+};
+
static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
{
u16 tmp = cpu_to_be16(AD5504_CMD_WRITE |
@@ -30,13 +48,14 @@ static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
return spi_write(spi, (u8 *)&tmp, 2);
}
-static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
+static int ad5504_spi_read(struct spi_device *spi, u8 addr)
{
u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ u16 val;
int ret;
struct spi_transfer t = {
.tx_buf = &tmp,
- .rx_buf = val,
+ .rx_buf = &val,
.len = 2,
};
struct spi_message m;
@@ -45,44 +64,61 @@ static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
- *val = be16_to_cpu(*val) & AD5504_RES_MASK;
+ if (ret < 0)
+ return ret;
- return ret;
+ return be16_to_cpu(val) & AD5504_RES_MASK;
}
-static ssize_t ad5504_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5504_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long readin;
+ unsigned long scale_uv;
int ret;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case 0:
+ ret = ad5504_spi_read(st->spi, chan->address);
+ if (ret < 0)
+ return ret;
- ret = ad5504_spi_write(st->spi, this_attr->address, readin);
- return ret ? ret : len;
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
}
-static ssize_t ad5504_read_dac(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad5504_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- u16 val;
- ret = ad5504_spi_read(st->spi, this_attr->address, &val);
- if (ret)
- return ret;
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5504_spi_write(st->spi, chan->address, val);
+ default:
+ ret = -EINVAL;
+ }
- return sprintf(buf, "%d\n", val);
+ return -EINVAL;
}
static ssize_t ad5504_read_powerdown_mode(struct device *dev,
@@ -157,32 +193,6 @@ static ssize_t ad5504_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5504_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> AD5505_BITS;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5504_show_scale, NULL, 0);
-
-#define IIO_DEV_ATTR_OUT_RW_RAW(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out_voltage##_num##_raw, \
- S_IRUGO | S_IWUSR, _show, _store, _addr)
-
-static IIO_DEV_ATTR_OUT_RW_RAW(0, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RW_RAW(1, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RW_RAW(2, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RW_RAW(3, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5504_read_powerdown_mode,
ad5504_write_powerdown_mode, 0);
@@ -203,17 +213,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5504_read_dac_powerdown,
ad5504_write_dac_powerdown, 3);
static struct attribute *ad5504_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -222,11 +227,9 @@ static const struct attribute_group ad5504_attribute_group = {
};
static struct attribute *ad5501_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -261,12 +264,16 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
}
static const struct iio_info ad5504_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5504_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad5501_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5501_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
@@ -307,10 +314,14 @@ static int __devinit ad5504_probe(struct spi_device *spi)
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(st->spi)->name;
- if (spi_get_device_id(st->spi)->driver_data == ID_AD5501)
+ if (spi_get_device_id(st->spi)->driver_data == ID_AD5501) {
indio_dev->info = &ad5501_info;
- else
+ indio_dev->num_channels = 1;
+ } else {
indio_dev->info = &ad5504_info;
+ indio_dev->num_channels = 4;
+ }
+ indio_dev->channels = ad5504_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
if (spi->irq) {
@@ -367,6 +378,7 @@ static const struct spi_device_id ad5504_id[] = {
{"ad5501", ID_AD5501},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5504_id);
static struct spi_driver ad5504_driver = {
.driver = {
@@ -377,18 +389,7 @@ static struct spi_driver ad5504_driver = {
.remove = __devexit_p(ad5504_remove),
.id_table = ad5504_id,
};
-
-static __init int ad5504_spi_init(void)
-{
- return spi_register_driver(&ad5504_driver);
-}
-module_init(ad5504_spi_init);
-
-static __exit void ad5504_spi_exit(void)
-{
- spi_unregister_driver(&ad5504_driver);
-}
-module_exit(ad5504_spi_exit);
+module_spi_driver(ad5504_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5501/AD5501 DAC");
diff --git a/drivers/staging/iio/dac/ad5504.h b/drivers/staging/iio/dac/ad5504.h
index 85beb1dd29b..afe09522f53 100644
--- a/drivers/staging/iio/dac/ad5504.h
+++ b/drivers/staging/iio/dac/ad5504.h
@@ -18,10 +18,7 @@
/* Registers */
#define AD5504_ADDR_NOOP 0
-#define AD5504_ADDR_DAC0 1
-#define AD5504_ADDR_DAC1 2
-#define AD5504_ADDR_DAC2 3
-#define AD5504_ADDR_DAC3 4
+#define AD5504_ADDR_DAC(x) ((x) + 1)
#define AD5504_ADDR_ALL_DAC 5
#define AD5504_ADDR_CTRL 7
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
index b71c6a03e78..5dca3028cdf 100644
--- a/drivers/staging/iio/dac/ad5624r.h
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -32,12 +32,12 @@
/**
* struct ad5624r_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
+ * @channels: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
*/
struct ad5624r_chip_info {
- u8 bits;
+ const struct iio_chan_spec *channels;
u16 int_vref_mv;
};
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index 284d8790036..10c7484366e 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -21,29 +21,51 @@
#include "dac.h"
#include "ad5624r.h"
+#define AD5624R_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = (_chan), \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+}
+
+#define DECLARE_AD5624R_CHANNELS(_name, _bits) \
+ const struct iio_chan_spec _name##_channels[] = { \
+ AD5624R_CHANNEL(0, _bits), \
+ AD5624R_CHANNEL(1, _bits), \
+ AD5624R_CHANNEL(2, _bits), \
+ AD5624R_CHANNEL(3, _bits), \
+}
+
+static DECLARE_AD5624R_CHANNELS(ad5624r, 12);
+static DECLARE_AD5624R_CHANNELS(ad5644r, 14);
+static DECLARE_AD5624R_CHANNELS(ad5664r, 16);
+
static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
[ID_AD5624R3] = {
- .bits = 12,
- .int_vref_mv = 1250,
- },
- [ID_AD5644R3] = {
- .bits = 14,
- .int_vref_mv = 1250,
- },
- [ID_AD5664R3] = {
- .bits = 16,
+ .channels = ad5624r_channels,
.int_vref_mv = 1250,
},
[ID_AD5624R5] = {
- .bits = 12,
+ .channels = ad5624r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5644R3] = {
+ .channels = ad5644r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5644R5] = {
- .bits = 14,
+ .channels = ad5644r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5664R3] = {
+ .channels = ad5664r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5664R5] = {
- .bits = 16,
+ .channels = ad5664r_channels,
.int_vref_mv = 2500,
},
};
@@ -70,24 +92,49 @@ static int ad5624r_spi_write(struct spi_device *spi,
return spi_write(spi, msg, 3);
}
-static ssize_t ad5624r_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5624r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- long readin;
- int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5624r_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long scale_uv;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
- ret = ad5624r_spi_write(st->us, AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
- this_attr->address, readin,
- st->chip_info->bits);
- return ret ? ret : len;
+ }
+ return -EINVAL;
+}
+
+static int ad5624r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5624r_spi_write(st->us,
+ AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val,
+ chan->scan_type.shift);
+ default:
+ ret = -EINVAL;
+ }
+
+ return -EINVAL;
}
static ssize_t ad5624r_read_powerdown_mode(struct device *dev,
@@ -161,24 +208,6 @@ static ssize_t ad5624r_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5624r_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5624r_show_scale, NULL, 0);
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5624r_read_powerdown_mode,
ad5624r_write_powerdown_mode, 0);
@@ -200,17 +229,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5624r_read_dac_powerdown,
ad5624r_write_dac_powerdown, 3);
static struct attribute *ad5624r_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -219,6 +243,8 @@ static const struct attribute_group ad5624r_attribute_group = {
};
static const struct iio_info ad5624r_info = {
+ .write_raw = ad5624r_write_raw,
+ .read_raw = ad5624r_read_raw,
.attrs = &ad5624r_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -259,6 +285,8 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5624r_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = AD5624R_DAC_CHANNELS;
ret = ad5624r_spi_write(spi, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 16);
@@ -307,6 +335,7 @@ static const struct spi_device_id ad5624r_id[] = {
{"ad5664r5", ID_AD5664R5},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5624r_id);
static struct spi_driver ad5624r_driver = {
.driver = {
@@ -317,18 +346,7 @@ static struct spi_driver ad5624r_driver = {
.remove = __devexit_p(ad5624r_remove),
.id_table = ad5624r_id,
};
-
-static __init int ad5624r_spi_init(void)
-{
- return spi_register_driver(&ad5624r_driver);
-}
-module_init(ad5624r_spi_init);
-
-static __exit void ad5624r_spi_exit(void)
-{
- spi_unregister_driver(&ad5624r_driver);
-}
-module_exit(ad5624r_spi_exit);
+module_spi_driver(ad5624r_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index 974c6f5b60c..ce2d6193dd8 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -99,7 +99,7 @@ enum ad5686_supported_device_ids {
.indexed = 1, \
.output = 1, \
.channel = chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = AD5686_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', bits, 16, shift) \
}
@@ -306,7 +306,7 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_mv * 100000)
>> (chan->scan_type.realbits);
*val = scale_uv / 100000;
@@ -437,6 +437,7 @@ static const struct spi_device_id ad5686_id[] = {
{"ad5686", ID_AD5686},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5686_id);
static struct spi_driver ad5686_driver = {
.driver = {
@@ -447,18 +448,7 @@ static struct spi_driver ad5686_driver = {
.remove = __devexit_p(ad5686_remove),
.id_table = ad5686_id,
};
-
-static __init int ad5686_spi_init(void)
-{
- return spi_register_driver(&ad5686_driver);
-}
-module_init(ad5686_spi_init);
-
-static __exit void ad5686_spi_exit(void)
-{
- spi_unregister_driver(&ad5686_driver);
-}
-module_exit(ad5686_spi_exit);
+module_spi_driver(ad5686_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
new file mode 100644
index 00000000000..ff91480ae65
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -0,0 +1,393 @@
+/*
+ * Analog devices AD5764, AD5764R, AD5744, AD5744R quad-channel
+ * Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5764_REG_SF_NOP 0x0
+#define AD5764_REG_SF_CONFIG 0x1
+#define AD5764_REG_SF_CLEAR 0x4
+#define AD5764_REG_SF_LOAD 0x5
+#define AD5764_REG_DATA(x) ((2 << 3) | (x))
+#define AD5764_REG_COARSE_GAIN(x) ((3 << 3) | (x))
+#define AD5764_REG_FINE_GAIN(x) ((4 << 3) | (x))
+#define AD5764_REG_OFFSET(x) ((5 << 3) | (x))
+
+#define AD5764_NUM_CHANNELS 4
+
+/**
+ * struct ad5764_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in uV - 0 if external
+ * reference voltage is used
+ * @channel channel specification
+*/
+
+struct ad5764_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec *channels;
+};
+
+/**
+ * struct ad5764_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip info
+ * @vref_reg: vref supply regulators
+ * @data: spi transfer buffers
+ */
+
+struct ad5764_state {
+ struct spi_device *spi;
+ const struct ad5764_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5764_type {
+ ID_AD5744,
+ ID_AD5744R,
+ ID_AD5764,
+ ID_AD5764R,
+};
+
+#define AD5764_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask = IIO_CHAN_INFO_OFFSET_SHARED_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \
+}
+
+#define DECLARE_AD5764_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD5764_CHANNEL(0, (_bits)), \
+ AD5764_CHANNEL(1, (_bits)), \
+ AD5764_CHANNEL(2, (_bits)), \
+ AD5764_CHANNEL(3, (_bits)), \
+};
+
+static DECLARE_AD5764_CHANNELS(ad5764, 16);
+static DECLARE_AD5764_CHANNELS(ad5744, 14);
+
+static const struct ad5764_chip_info ad5764_chip_infos[] = {
+ [ID_AD5744] = {
+ .int_vref = 0,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5744R] = {
+ .int_vref = 5000000,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5764] = {
+ .int_vref = 0,
+ .channels = ad5764_channels,
+ },
+ [ID_AD5764R] = {
+ .int_vref = 5000000,
+ .channels = ad5764_channels,
+ },
+};
+
+static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ ret = spi_write(st->spi, &st->data[0].d8[1], 3);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ *val = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_chan_info_to_reg(struct iio_chan_spec const *chan, long info)
+{
+ switch (info) {
+ case 0:
+ return AD5764_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5764_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5764_REG_FINE_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5764_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const int max_val = (1 << chan->scan_type.realbits);
+ unsigned int reg;
+
+ switch (info) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val >= 128 || val < -128)
+ return -EINVAL;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= 32 || val < -32)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = ad5764_chan_info_to_reg(chan, info);
+ return ad5764_write(indio_dev, reg, (u16)val);
+}
+
+static int ad5764_get_channel_vref(struct ad5764_state *st,
+ unsigned int channel)
+{
+ if (st->chip_info->int_vref)
+ return st->chip_info->int_vref;
+ else
+ return regulator_get_voltage(st->vref_reg[channel / 2].consumer);
+}
+
+static int ad5764_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ unsigned int reg;
+ int vref;
+ int ret;
+
+ switch (info) {
+ case 0:
+ reg = AD5764_REG_DATA(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ reg = AD5764_REG_OFFSET(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 7);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ reg = AD5764_REG_FINE_GAIN(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* vout = 4 * vref + ((dac_code / 65535) - 0.5) */
+ vref = ad5764_get_channel_vref(st, chan->channel);
+ if (vref < 0)
+ return vref;
+
+ scale_uv = (vref * 4 * 100) >> chan->scan_type.realbits;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5764_info = {
+ .read_raw = ad5764_read_raw,
+ .write_raw = ad5764_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5764_probe(struct spi_device *spi)
+{
+ enum ad5764_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5764_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+ st->chip_info = &ad5764_chip_infos[type];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5764_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = AD5764_NUM_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+
+ if (st->chip_info->int_vref == 0) {
+ st->vref_reg[0].supply = "vrefAB";
+ st->vref_reg[1].supply = "vrefCD";
+
+ ret = regulator_bulk_get(&st->spi->dev,
+ ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n",
+ ret);
+ goto error_free;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(st->vref_reg),
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5764_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5764_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->chip_info->int_vref == 0) {
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ }
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5764_ids[] = {
+ { "ad5744", ID_AD5744 },
+ { "ad5744r", ID_AD5744R },
+ { "ad5764", ID_AD5764 },
+ { "ad5764r", ID_AD5764R },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5764_ids);
+
+static struct spi_driver ad5764_driver = {
+ .driver = {
+ .name = "ad5764",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5764_probe,
+ .remove = __devexit_p(ad5764_remove),
+ .id_table = ad5764_ids,
+};
+
+static int __init ad5764_spi_init(void)
+{
+ return spi_register_driver(&ad5764_driver);
+}
+module_init(ad5764_spi_init);
+
+static void __exit ad5764_spi_exit(void)
+{
+ spi_unregister_driver(&ad5764_driver);
+}
+module_exit(ad5764_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 6fbca8d9615..ac45636a8d7 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -1,5 +1,6 @@
/*
- * AD5760, AD5780, AD5781, AD5791 Voltage Output Digital to Analog Converter
+ * AD5760, AD5780, AD5781, AD5790, AD5791 Voltage Output Digital to Analog
+ * Converter
*
* Copyright 2011 Analog Devices Inc.
*
@@ -77,8 +78,8 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
.indexed = 1, \
.address = AD5791_ADDR_DAC0, \
.channel = 0, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_OFFSET_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.scan_type = IIO_ST('u', bits, 24, shift) \
}
@@ -237,11 +238,11 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
*val &= AD5791_DAC_MASK;
*val >>= chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = (((u64)st->vref_mv) * 1000000ULL) >> chan->scan_type.realbits;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SHARED):
+ case IIO_CHAN_INFO_OFFSET:
val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
do_div(val64, st->vref_mv);
*val = -val64;
@@ -397,9 +398,11 @@ static const struct spi_device_id ad5791_id[] = {
{"ad5760", ID_AD5760},
{"ad5780", ID_AD5780},
{"ad5781", ID_AD5781},
+ {"ad5790", ID_AD5791},
{"ad5791", ID_AD5791},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5791_id);
static struct spi_driver ad5791_driver = {
.driver = {
@@ -410,19 +413,8 @@ static struct spi_driver ad5791_driver = {
.remove = __devexit_p(ad5791_remove),
.id_table = ad5791_id,
};
-
-static __init int ad5791_spi_init(void)
-{
- return spi_register_driver(&ad5791_driver);
-}
-module_init(ad5791_spi_init);
-
-static __exit void ad5791_spi_exit(void)
-{
- spi_unregister_driver(&ad5791_driver);
-}
-module_exit(ad5791_spi_exit);
+module_spi_driver(ad5791_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5791 DAC");
+MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index adfbd20f898..a4df6d7443c 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -280,20 +280,8 @@ static struct i2c_driver max517_driver = {
.resume = max517_resume,
.id_table = max517_id,
};
-
-static int __init max517_init(void)
-{
- return i2c_add_driver(&max517_driver);
-}
-
-static void __exit max517_exit(void)
-{
- i2c_del_driver(&max517_driver);
-}
+module_i2c_driver(max517_driver);
MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC");
MODULE_LICENSE("GPL");
-
-module_init(max517_init);
-module_exit(max517_exit);
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index f5e368b5665..9c32d1beae2 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -143,19 +143,9 @@ static struct spi_driver ad5930_driver = {
.probe = ad5930_probe,
.remove = __devexit_p(ad5930_remove),
};
-
-static __init int ad5930_spi_init(void)
-{
- return spi_register_driver(&ad5930_driver);
-}
-module_init(ad5930_spi_init);
-
-static __exit void ad5930_spi_exit(void)
-{
- spi_unregister_driver(&ad5930_driver);
-}
-module_exit(ad5930_spi_exit);
+module_spi_driver(ad5930_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad5930 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index 9b4ff606535..2ccf25dd928 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -88,7 +88,7 @@ static ssize_t ad9832_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
ret = ad9832_write_frequency(st, this_attr->address, val);
@@ -344,31 +344,19 @@ static const struct spi_device_id ad9832_id[] = {
{"ad9835", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9832_probe,
.remove = __devexit_p(ad9832_remove),
.id_table = ad9832_id,
};
-
-static int __init ad9832_init(void)
-{
- return spi_register_driver(&ad9832_driver);
-}
-module_init(ad9832_init);
-
-static void __exit ad9832_exit(void)
-{
- spi_unregister_driver(&ad9832_driver);
-}
-module_exit(ad9832_exit);
+module_spi_driver(ad9832_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9832");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index c468f696fe2..5e67104fea1 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -77,7 +77,7 @@ static ssize_t ad9834_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
ret = ad9834_write_frequency(st, this_attr->address, val);
@@ -153,7 +153,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case 0:
if (sysfs_streq(buf, "sine")) {
st->control &= ~AD9834_MODE;
@@ -281,14 +281,14 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+static umode_t ad9834_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad9834_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
@@ -435,31 +435,19 @@ static const struct spi_device_id ad9834_id[] = {
{"ad9838", ID_AD9838},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9834_id);
static struct spi_driver ad9834_driver = {
.driver = {
.name = "ad9834",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9834_probe,
.remove = __devexit_p(ad9834_remove),
.id_table = ad9834_id,
};
-
-static int __init ad9834_init(void)
-{
- return spi_register_driver(&ad9834_driver);
-}
-module_init(ad9834_init);
-
-static void __exit ad9834_exit(void)
-{
- spi_unregister_driver(&ad9834_driver);
-}
-module_exit(ad9834_exit);
+module_spi_driver(ad9834_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9834");
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index a14771b24c5..f4f731bb219 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -129,19 +129,9 @@ static struct spi_driver ad9850_driver = {
.probe = ad9850_probe,
.remove = __devexit_p(ad9850_remove),
};
-
-static __init int ad9850_spi_init(void)
-{
- return spi_register_driver(&ad9850_driver);
-}
-module_init(ad9850_spi_init);
-
-static __exit void ad9850_spi_exit(void)
-{
- spi_unregister_driver(&ad9850_driver);
-}
-module_exit(ad9850_spi_exit);
+module_spi_driver(ad9850_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9850 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index cfceaa64a53..554266c615a 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -280,19 +280,9 @@ static struct spi_driver ad9852_driver = {
.probe = ad9852_probe,
.remove = __devexit_p(ad9852_remove),
};
-
-static __init int ad9852_spi_init(void)
-{
- return spi_register_driver(&ad9852_driver);
-}
-module_init(ad9852_spi_init);
-
-static __exit void ad9852_spi_exit(void)
-{
- spi_unregister_driver(&ad9852_driver);
-}
-module_exit(ad9852_spi_exit);
+module_spi_driver(ad9852_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9852 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index da83d2b88c4..3985766d6f8 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -413,19 +413,9 @@ static struct spi_driver ad9910_driver = {
.probe = ad9910_probe,
.remove = __devexit_p(ad9910_remove),
};
-
-static __init int ad9910_spi_init(void)
-{
- return spi_register_driver(&ad9910_driver);
-}
-module_init(ad9910_spi_init);
-
-static __exit void ad9910_spi_exit(void)
-{
- spi_unregister_driver(&ad9910_driver);
-}
-module_exit(ad9910_spi_exit);
+module_spi_driver(ad9910_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9910 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index 20c182576de..4d150048002 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -224,19 +224,9 @@ static struct spi_driver ad9951_driver = {
.probe = ad9951_probe,
.remove = __devexit_p(ad9951_remove),
};
-
-static __init int ad9951_spi_init(void)
-{
- return spi_register_driver(&ad9951_driver);
-}
-module_init(ad9951_spi_init);
-
-static __exit void ad9951_spi_exit(void)
-{
- spi_unregister_driver(&ad9951_driver);
-}
-module_exit(ad9951_spi_exit);
+module_spi_driver(ad9951_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9951 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
new file mode 100644
index 00000000000..bfb63400fa6
--- /dev/null
+++ b/drivers/staging/iio/events.h
@@ -0,0 +1,103 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_EVENTS_H_
+#define _IIO_EVENTS_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include "types.h"
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurrence (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ __u64 id;
+ __s64 timestamp;
+};
+
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
+/**
+ * IIO_EVENT_CODE() - create event identifier
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @diff: Whether the event is for an differential channel or not.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @chan: Channel number for non-differential channels.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ */
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
+ ((u16)chan))
+
+
+#define IIO_EV_DIR_MAX 4
+#define IIO_EV_BIT(type, direction) \
+ (1 << (type*IIO_EV_DIR_MAX + direction))
+
+/**
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
+ type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+
+/**
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+
+/* Event code number extraction depends on which type of event we have.
+ * Perhaps review this function in the future*/
+#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF))
+
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+
+#endif
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 22aea5b4e61..ea295b25308 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -37,11 +37,11 @@ config ADIS16260
will be called adis16260.
config ADXRS450
- tristate "Analog Devices ADXRS450 Digital Output Gyroscope SPI driver"
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
depends on SPI
help
- Say yes here to build support for Analog Devices ADXRS450 programmable
- digital output gyroscope.
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
This driver can also be built as a module. If so, the module
will be called adxrs450.
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index ff1b5a82b3d..c0ca7093e0e 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -98,11 +98,11 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
*val = tval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = -7;
*val2 = 461117;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 34000;
return IIO_VAL_INT_PLUS_MICRO;
@@ -136,8 +136,8 @@ static const struct iio_chan_spec adis16060_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = ADIS16060_TEMP_OUT,
}
};
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index 5d7a906fec7..1815490db8b 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -189,19 +189,9 @@ static struct spi_driver adis16080_driver = {
.probe = adis16080_probe,
.remove = __devexit_p(adis16080_remove),
};
-
-static __init int adis16080_init(void)
-{
- return spi_register_driver(&adis16080_driver);
-}
-module_init(adis16080_init);
-
-static __exit void adis16080_exit(void)
-{
- spi_unregister_driver(&adis16080_driver);
-}
-module_exit(adis16080_exit);
+module_spi_driver(adis16080_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16080");
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index 749240d0d8c..947eb86f05d 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -168,19 +168,9 @@ static struct spi_driver adis16130_driver = {
.probe = adis16130_probe,
.remove = __devexit_p(adis16130_remove),
};
-
-static __init int adis16130_init(void)
-{
- return spi_register_driver(&adis16130_driver);
-}
-module_init(adis16130_init);
-
-static __exit void adis16130_exit(void)
-{
- spi_unregister_driver(&adis16130_driver);
-}
-module_exit(adis16130_exit);
+module_spi_driver(adis16130_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16130");
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index aaa3967f8c0..8f6af47e955 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16260.h"
@@ -390,9 +390,9 @@ enum adis16260_channel {
#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
struct iio_chan_spec adis16260_channels_##axis[] = { \
IIO_CHAN(IIO_ANGL_VEL, 1, 0, 0, NULL, 0, mod, \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
gyro, ADIS16260_SCAN_GYRO, \
IIO_ST('s', 14, 16, 0), 0), \
IIO_CHAN(IIO_ANGL, 1, 0, 0, NULL, 0, mod, \
@@ -400,16 +400,16 @@ enum adis16260_channel {
angle, ADIS16260_SCAN_ANGL, \
IIO_ST('u', 14, 16, 0), 0), \
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
temp, ADIS16260_SCAN_TEMP, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_supply, ADIS16260_SCAN_SUPPLY, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_aux, ADIS16260_SCAN_AUX_ADC, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5) \
@@ -464,8 +464,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -489,10 +488,10 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -512,7 +511,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -544,11 +543,11 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][1];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][2];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
@@ -633,11 +632,16 @@ static int __devinit adis16260_probe(struct spi_device *spi)
}
if (indio_dev->buffer) {
/* Set default scan mode */
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_ANGL);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_SUPPLY);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_GYRO);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_AUX_ADC);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_TEMP);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_ANGL);
}
if (spi->irq) {
ret = adis16260_probe_trigger(indio_dev);
@@ -701,6 +705,7 @@ static const struct spi_device_id adis16260_id[] = {
{"adis16251", 1},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16260_id);
static struct spi_driver adis16260_driver = {
.driver = {
@@ -711,18 +716,7 @@ static struct spi_driver adis16260_driver = {
.remove = __devexit_p(adis16260_remove),
.id_table = adis16260_id,
};
-
-static __init int adis16260_init(void)
-{
- return spi_register_driver(&adis16260_driver);
-}
-module_init(adis16260_init);
-
-static __exit void adis16260_exit(void)
-{
- spi_unregister_driver(&adis16260_driver);
-}
-module_exit(adis16260_exit);
+module_spi_driver(adis16260_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 52a9e784e7c..699a6152c40 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -74,9 +74,10 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16260_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -116,10 +117,8 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16260_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16260_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16260_trigger_handler,
diff --git a/drivers/staging/iio/gyro/adxrs450.h b/drivers/staging/iio/gyro/adxrs450.h
index b6b68287640..af0c870100b 100644
--- a/drivers/staging/iio/gyro/adxrs450.h
+++ b/drivers/staging/iio/gyro/adxrs450.h
@@ -39,6 +39,11 @@
#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
/**
* struct adxrs450_state - device instance specific data
* @us: actual spi_device
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 3c3ef796d48..15e2496f70c 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -1,5 +1,5 @@
/*
- * ADXRS450 Digital Output Gyroscope Driver
+ * ADXRS450/ADXRS453 Digital Output Gyroscope Driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -243,7 +243,7 @@ static int adxrs450_write_raw(struct iio_dev *indio_dev,
{
int ret;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = adxrs450_spi_write_reg_16(indio_dev,
ADXRS450_DNC1,
val & 0x3FF);
@@ -263,7 +263,7 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
{
int ret;
s16 t;
- u16 ut;
+
switch (mask) {
case 0:
switch (chan->type) {
@@ -276,10 +276,10 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
case IIO_TEMP:
ret = adxrs450_spi_read_reg_16(indio_dev,
- ADXRS450_TEMP1, &ut);
+ ADXRS450_TEMP1, &t);
if (ret)
break;
- *val = ut;
+ *val = (t >> 6) + 225;
ret = IIO_VAL_INT;
break;
default:
@@ -287,13 +287,34 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = 218166;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 200;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW:
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
if (ret)
break;
*val = t;
ret = IIO_VAL_INT;
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
break;
@@ -302,18 +323,36 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static const struct iio_chan_spec adxrs450_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE)
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- }
+static const struct iio_chan_spec adxrs450_channels[2][2] = {
+ [ID_ADXRS450] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
+ [ID_ADXRS453] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
};
static const struct iio_info adxrs450_info = {
@@ -343,7 +382,8 @@ static int __devinit adxrs450_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adxrs450_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adxrs450_channels;
+ indio_dev->channels =
+ adxrs450_channels[spi_get_device_id(spi)->driver_data];
indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
indio_dev->name = spi->dev.driver->name;
@@ -373,6 +413,13 @@ static int adxrs450_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id adxrs450_id[] = {
+ {"adxrs450", ID_ADXRS450},
+ {"adxrs453", ID_ADXRS453},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adxrs450_id);
+
static struct spi_driver adxrs450_driver = {
.driver = {
.name = "adxrs450",
@@ -380,20 +427,10 @@ static struct spi_driver adxrs450_driver = {
},
.probe = adxrs450_probe,
.remove = __devexit_p(adxrs450_remove),
+ .id_table = adxrs450_id,
};
-
-static __init int adxrs450_init(void)
-{
- return spi_register_driver(&adxrs450_driver);
-}
-module_init(adxrs450_init);
-
-static __exit void adxrs450_exit(void)
-{
- spi_unregister_driver(&adxrs450_driver);
-}
-module_exit(adxrs450_exit);
+module_spi_driver(adxrs450_driver);
MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>");
-MODULE_DESCRIPTION("Analog Devices ADXRS450 Gyroscope SPI driver");
+MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index f3d88cd7e8a..be6ced31f65 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -7,13 +7,12 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
#include <linux/device.h>
#include <linux/cdev.h>
-
+#include "types.h"
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -25,62 +24,64 @@ enum iio_data_type {
IIO_PROCESSED,
};
-enum iio_chan_type {
- /* real channel types */
- IIO_VOLTAGE,
- IIO_CURRENT,
- IIO_POWER,
- IIO_ACCEL,
- IIO_ANGL_VEL,
- IIO_MAGN,
- IIO_LIGHT,
- IIO_INTENSITY,
- IIO_PROXIMITY,
- IIO_TEMP,
- IIO_INCLI,
- IIO_ROT,
- IIO_ANGL,
- IIO_TIMESTAMP,
- IIO_CAPACITANCE,
-};
-
-enum iio_modifier {
- IIO_NO_MOD,
- IIO_MOD_X,
- IIO_MOD_Y,
- IIO_MOD_Z,
- IIO_MOD_X_AND_Y,
- IIO_MOD_X_ANX_Z,
- IIO_MOD_Y_AND_Z,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_MOD_X_OR_Y,
- IIO_MOD_X_OR_Z,
- IIO_MOD_Y_OR_Z,
- IIO_MOD_X_OR_Y_OR_Z,
- IIO_MOD_LIGHT_BOTH,
- IIO_MOD_LIGHT_IR,
-};
-
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
- IIO_CHAN_INFO_SCALE_SHARED,
- IIO_CHAN_INFO_SCALE_SEPARATE,
- IIO_CHAN_INFO_OFFSET_SHARED,
- IIO_CHAN_INFO_OFFSET_SEPARATE,
- IIO_CHAN_INFO_CALIBSCALE_SHARED,
- IIO_CHAN_INFO_CALIBSCALE_SEPARATE,
- IIO_CHAN_INFO_CALIBBIAS_SHARED,
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE,
- IIO_CHAN_INFO_PEAK_SHARED,
- IIO_CHAN_INFO_PEAK_SEPARATE,
- IIO_CHAN_INFO_PEAK_SCALE_SHARED,
- IIO_CHAN_INFO_PEAK_SCALE_SEPARATE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE,
- IIO_CHAN_INFO_AVERAGE_RAW_SHARED,
- IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE,
+ /* 0 is reserverd for raw attributes */
+ IIO_CHAN_INFO_SCALE = 1,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
};
+#define IIO_CHAN_INFO_SHARED_BIT(type) BIT(type*2)
+#define IIO_CHAN_INFO_SEPARATE_BIT(type) BIT(type*2 + 1)
+
+#define IIO_CHAN_INFO_SCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_SCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_OFFSET_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_OFFSET_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_PEAK_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAK_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAKSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_PEAKSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+
enum iio_endian {
IIO_CPU,
IIO_BE,
@@ -109,6 +110,10 @@ enum iio_endian {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * @datasheet_name: A name used in in kernel mapping of channels. It should
+ * corrspond to the first name that the channel is referred
+ * to by in the datasheet (e.g. IND), or the nearest
+ * possible compound name (e.g. IND-INC).
* @processed_val: Flag to specify the data access attribute should be
* *_input rather than *_raw.
* @modified: Does a modifier apply to this channel. What these are
@@ -137,6 +142,7 @@ struct iio_chan_spec {
long info_mask;
long event_mask;
char *extend_name;
+ const char *datasheet_name;
unsigned processed_val:1;
unsigned modified:1;
unsigned indexed:1;
@@ -261,7 +267,23 @@ struct iio_info {
int val);
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*update_scan_mode)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+};
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
};
/**
@@ -278,6 +300,7 @@ struct iio_info {
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
* channels
+ * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @trig: [INTERN] current device trigger (buffer modes)
* @pollfunc: [DRIVER] function run on trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -290,6 +313,7 @@ struct iio_info {
* @chrdev: [INTERN] associated character device
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
+ * @flags: [INTERN] file ops related flags including busy flag.
**/
struct iio_dev {
int id;
@@ -305,6 +329,7 @@ struct iio_dev {
unsigned long *available_scan_masks;
unsigned masklength;
+ unsigned long *active_scan_mask;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -315,13 +340,24 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
#define IIO_MAX_GROUPS 6
const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
int groupcounter;
+
+ unsigned long flags;
};
/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
+
+/**
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 36159e0dbfc..107cfb1cbb0 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -33,9 +33,6 @@ int __iio_add_chan_devattr(const char *postfix,
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
-
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
@@ -47,14 +44,6 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#else
-static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- return -EINVAL;
-}
-
-static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{}
-
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
diff --git a/drivers/staging/iio/iio_core_trigger.h b/drivers/staging/iio/iio_core_trigger.h
index 523c288b776..6f7c56fcbe7 100644
--- a/drivers/staging/iio/iio_core_trigger.h
+++ b/drivers/staging/iio/iio_core_trigger.h
@@ -13,8 +13,7 @@
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
-
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index da657d10471..cdbf289bfe2 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -102,6 +102,10 @@ static int iio_dummy_evgen_create(void)
int iio_dummy_evgen_get_irq(void)
{
int i, ret = 0;
+
+ if (iio_evgen == NULL)
+ return -ENODEV;
+
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++)
if (iio_evgen->inuse[i] == false) {
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index af0c99236d4..e3a94572bb4 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -21,7 +21,8 @@
#include "iio.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "events.h"
+#include "buffer.h"
#include "iio_simple_dummy.h"
/*
@@ -76,13 +77,13 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Offset for userspace to apply prior to scale
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
/*
* in_voltage0_scale
* Multipler for userspace to apply post offset
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
/* The ordering of elements in the buffer via an enum */
.scan_index = voltage0,
.scan_type = { /* Description of storage in buffer */
@@ -117,7 +118,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Shared version of scale - shared by differential
* input channels of type IIO_VOLTAGE.
*/
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage1m2,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -134,7 +135,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
.channel = 3,
.channel2 = 4,
.info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage3m4,
.scan_type = {
.sign = 's',
@@ -159,7 +160,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* seeing the readings. Typically part of hardware
* calibration.
*/
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
.scan_index = accelx,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -228,29 +229,32 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /* only single ended adc -> 0.001333 */
- *val = 0;
- *val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- /* all differential adc channels -> 0.000001344 */
- *val = 0;
- *val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->differential) {
+ case 0:
+ /* only single ended adc -> 0.001333 */
+ *val = 0;
+ *val2 = 1333;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case 1:
+ /* all differential adc channels -> 0.000001344 */
+ *val = 0;
+ *val2 = 1344;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ }
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
ret = IIO_VAL_INT_PLUS_MICRO;
@@ -295,7 +299,7 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
st->dac_val = val;
mutex_unlock(&st->lock);
return 0;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
@@ -514,7 +518,8 @@ static __init int iio_dummy_init(void)
return -EINVAL;
}
/* Fake a bus */
- iio_dummy_devs = kzalloc(sizeof(*iio_dummy_devs)*instances, GFP_KERNEL);
+ iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
+ GFP_KERNEL);
/* Here we have no actual device so call probe */
for (i = 0; i < instances; i++) {
ret = iio_dummy_probe(i);
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index edad0e7b4f4..d6a1c0e82a5 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -57,7 +57,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
if (data == NULL)
return -ENOMEM;
- if (buffer->scan_count) {
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
/*
* Three common options here:
* hardware scans: certain combinations of channels make
@@ -75,7 +75,10 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* in the constant table fakedata.
*/
int i, j;
- for (i = 0, j = 0; i < buffer->scan_count; i++) {
+ for (i = 0, j = 0;
+ i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ i++) {
j = find_next_bit(buffer->scan_mask,
indio_dev->masklength, j + 1);
/* random access read form the 'device' */
@@ -142,8 +145,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
/* Tell the core how to access the buffer */
buffer->access = &kfifo_access_funcs;
- /* Number of bytes per element */
- buffer->bpe = 2;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
@@ -151,8 +152,7 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
* Tell the core what device type specific functions should
* be run on either side of buffer capture enable / disable.
*/
- buffer->setup_ops = &iio_simple_dummy_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops;
/*
* Configure a polling function.
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index 9f00cff7ddd..449c7a5ece8 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -14,6 +14,7 @@
#include "iio.h"
#include "sysfs.h"
+#include "events.h"
#include "iio_simple_dummy.h"
/* Evgen 'fakes' interrupt events for this example */
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 1086e0befc2..9a2ca55625f 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "ad5933.h"
@@ -113,10 +113,10 @@ static struct iio_chan_spec ad5933_channels[] = {
0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
/* Ring Channels */
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
};
@@ -329,7 +329,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%d\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -380,7 +380,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -537,14 +537,14 @@ static const struct iio_info ad5933_info = {
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
int ret;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
ad5933_channels[1].scan_type.storagebits / 8;
if (indio_dev->buffer->access->set_bytes_per_datum)
@@ -611,7 +611,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad5933_ring_setup_ops;
+ indio_dev->setup_ops = &ad5933_ring_setup_ops;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
@@ -640,12 +640,14 @@ static void ad5933_work(struct work_struct *work)
ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
if (status & AD5933_STAT_DATA_VALID) {
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
ad5933_i2c_read(st->client,
- test_bit(1, ring->scan_mask) ?
+ test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
- ring->scan_count * 2, (u8 *)buf);
+ scan_count * 2, (u8 *)buf);
- if (ring->scan_count == 2) {
+ if (scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
buf[1] = be16_to_cpu(buf[1]);
} else {
@@ -734,8 +736,8 @@ static int __devinit ad5933_probe(struct i2c_client *client,
goto error_unreg_ring;
/* enable both REAL and IMAG channels by default */
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
ret = ad5933_setup(st);
if (ret)
@@ -796,18 +798,7 @@ static struct i2c_driver ad5933_driver = {
.remove = __devexit_p(ad5933_remove),
.id_table = ad5933_id,
};
-
-static __init int ad5933_init(void)
-{
- return i2c_add_driver(&ad5933_driver);
-}
-module_init(ad5933_init);
-
-static __exit void ad5933_exit(void)
-{
- i2c_del_driver(&ad5933_driver);
-}
-module_exit(ad5933_exit);
+module_i2c_driver(ad5933_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index f3546ee910a..83d133efaac 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -148,12 +148,14 @@ struct adis16400_chip_info {
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
+ * @filt_int: integer part of requested filter frequency
**/
struct adis16400_state {
struct spi_device *us;
struct iio_trigger *trig;
struct mutex buf_lock;
struct adis16400_chip_info *variant;
+ int filt_int;
u8 tx[ADIS16400_MAX_TX] ____cacheline_aligned;
u8 rx[ADIS16400_MAX_RX] ____cacheline_aligned;
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index d082a37c4fb..e73ad7818d8 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -28,7 +28,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16400.h"
enum adis16400_chip_variant {
@@ -161,25 +161,65 @@ error_ret:
return ret;
}
+static int adis16400_get_freq(struct iio_dev *indio_dev)
+{
+ u16 t;
+ int sps, ret;
+
+ ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
static ssize_t adis16400_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
int ret, len = 0;
- u16 t;
- int sps;
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SMPL_PRD,
- &t);
- if (ret)
+ ret = adis16400_get_freq(indio_dev);
+ if (ret < 0)
return ret;
- sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
- sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
- len = sprintf(buf, "%d SPS\n", sps);
+ len = sprintf(buf, "%d SPS\n", ret);
return len;
}
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 5,
+ [2] = 10,
+ [3] = 50,
+ [4] = 200,
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ int i, ret;
+ u16 val16;
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 0; i--)
+ if (sps/adis16400_3db_divisors[i] > val)
+ break;
+ if (i == -1)
+ ret = -EINVAL;
+ else {
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = adis16400_spi_write_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ (val16 & ~0x03) | i);
+ }
+error_ret:
+ return ret;
+}
+
static ssize_t adis16400_write_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -210,6 +250,7 @@ static ssize_t adis16400_write_frequency(struct device *dev,
ADIS16400_SMPL_PRD,
t);
+ /* Also update the filter */
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
@@ -455,22 +496,39 @@ static u8 adis16400_addresses[17][2] = {
[incli_y] = { ADIS16300_ROLL_OUT }
};
+
static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
- int ret;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_write_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
val);
mutex_unlock(&indio_dev->mlock);
return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /* Need to cache values so we can update if the frequency
+ changes */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = adis16400_get_freq(indio_dev);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
default:
return -EINVAL;
}
@@ -504,8 +562,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -533,7 +590,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_read_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
@@ -544,11 +601,29 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
val16 = ((val16 & 0xFFF) << 4) >> 4;
*val = val16;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* currently only temperature */
*val = 198;
*val2 = 160000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = adis16400_get_freq(indio_dev);
+ if (ret > 0)
+ *val = ret/adis16400_3db_divisors[val16 & 0x03];
+ *val2 = 0;
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -560,7 +635,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 14, 16, 0)
@@ -568,8 +643,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -577,8 +653,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -586,8 +663,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -595,8 +673,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -604,8 +683,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -613,8 +693,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -622,7 +703,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_x,
.scan_index = ADIS16400_SCAN_MAGN_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -630,7 +712,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_y,
.scan_index = ADIS16400_SCAN_MAGN_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -638,7 +721,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_z,
.scan_index = ADIS16400_SCAN_MAGN_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -646,8 +730,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -655,7 +739,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16400_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -669,7 +753,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -677,8 +761,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -686,8 +771,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -695,8 +781,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -704,8 +791,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -713,8 +801,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -722,8 +811,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -732,8 +822,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "x",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp0,
.scan_index = ADIS16350_SCAN_TEMP_X,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -742,8 +833,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "y",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp1,
.scan_index = ADIS16350_SCAN_TEMP_Y,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -752,8 +844,8 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "z",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp2,
.scan_index = ADIS16350_SCAN_TEMP_Z,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -761,7 +853,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -775,7 +867,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -783,8 +875,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -792,8 +885,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -801,8 +895,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -810,8 +905,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -819,8 +915,8 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -828,7 +924,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -836,7 +932,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_x,
.scan_index = ADIS16300_SCAN_INCLI_X,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -844,7 +940,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_y,
.scan_index = ADIS16300_SCAN_INCLI_Y,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -857,8 +953,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -866,8 +963,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -875,8 +973,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -884,8 +983,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -893,8 +993,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -902,8 +1003,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -911,8 +1013,8 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -1118,6 +1220,7 @@ static const struct spi_device_id adis16400_id[] = {
{"adis16405", ADIS16400},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
static struct spi_driver adis16400_driver = {
.driver = {
@@ -1128,18 +1231,7 @@ static struct spi_driver adis16400_driver = {
.probe = adis16400_probe,
.remove = __devexit_p(adis16400_remove),
};
-
-static __init int adis16400_init(void)
-{
- return spi_register_driver(&adis16400_driver);
-}
-module_init(adis16400_init);
-
-static __exit void adis16400_exit(void)
-{
- spi_unregister_driver(&adis16400_driver);
-}
-module_exit(adis16400_exit);
+module_spi_driver(adis16400_driver);
MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index fd886bf51a6..ac22de573f3 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -79,14 +79,16 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
struct spi_message msg;
int i, j = 0, ret;
struct spi_transfer *xfers;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- xfers = kzalloc(sizeof(*xfers)*indio_dev->buffer->scan_count + 1,
+ xfers = kzalloc(sizeof(*xfers)*(scan_count + 1),
GFP_KERNEL);
if (xfers == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
- if (test_bit(i, indio_dev->buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
xfers[j].tx_buf = &read_all_tx_array[i];
xfers[j].bits_per_word = 16;
xfers[j].len = 2;
@@ -97,7 +99,7 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
xfers[j].len = 2;
spi_message_init(&msg);
- for (j = 0; j < indio_dev->buffer->scan_count + 1; j++)
+ for (j = 0; j < scan_count + 1; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -119,26 +121,27 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
/* Asumption that long is enough for maximum channels */
- unsigned long mask = *ring->scan_mask;
-
+ unsigned long mask = *indio_dev->active_scan_mask;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
data = kmalloc(datasize , GFP_KERNEL);
if (data == NULL) {
dev_err(&st->us->dev, "memory alloc failed in ring bh");
return -ENOMEM;
}
- if (ring->scan_count) {
+ if (scan_count) {
if (st->variant->flags & ADIS16400_NO_BURST) {
ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < ring->scan_count; i++)
+ for (; i < scan_count; i++)
data[i] = *(s16 *)(st->rx + i*2);
} else {
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < indio_dev->buffer->scan_count; i++) {
+ for (; i < scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
data[i] = be16_to_cpup(
@@ -186,10 +189,8 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16400_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16400_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16400_trigger_handler,
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index 9df0ce81dad..d7b1e9e435a 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -24,7 +24,7 @@
#include "iio.h"
#include "iio_core.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "buffer.h"
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
@@ -43,7 +43,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!rb->access->read_first_n)
+ if (!rb || !rb->access->read_first_n)
return -EINVAL;
return rb->access->read_first_n(rb, n, buf);
}
@@ -64,28 +64,9 @@ unsigned int iio_buffer_poll(struct file *filp,
return 0;
}
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+void iio_buffer_init(struct iio_buffer *buffer)
{
- struct iio_buffer *rb = indio_dev->buffer;
- if (!rb)
- return -EINVAL;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
- return 0;
-}
-
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
-
- clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-}
-
-void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
-{
- buffer->indio_dev = indio_dev;
+ INIT_LIST_HEAD(&buffer->demux_list);
init_waitqueue_head(&buffer->pollq);
}
EXPORT_SYMBOL(iio_buffer_init);
@@ -126,17 +107,15 @@ static ssize_t iio_scan_el_show(struct device *dev,
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- ret = iio_scan_mask_query(indio_dev->buffer,
- to_iio_dev_attr(attr)->address);
- if (ret < 0)
- return ret;
+ ret = test_bit(to_iio_dev_attr(attr)->address,
+ indio_dev->buffer->scan_mask);
+
return sprintf(buf, "%d\n", ret);
}
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
- buffer->scan_count--;
return 0;
}
@@ -153,11 +132,11 @@ static ssize_t iio_scan_el_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- ret = iio_scan_mask_query(buffer, this_attr->address);
+ ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
@@ -165,7 +144,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret)
goto error_ret;
} else if (state && !ret) {
- ret = iio_scan_mask_set(buffer, this_attr->address);
+ ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
@@ -173,7 +152,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
error_ret:
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return ret < 0 ? ret : len;
}
@@ -196,7 +175,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
@@ -311,12 +290,14 @@ int iio_buffer_register(struct iio_dev *indio_dev,
if (ret < 0)
goto error_cleanup_dynamic;
attrcount += ret;
+ if (channels[i].type == IIO_TIMESTAMP)
+ buffer->scan_index_timestamp =
+ channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
- buffer->scan_mask
- = kzalloc(sizeof(*buffer->scan_mask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
+ buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*buffer->scan_mask),
+ GFP_KERNEL);
if (buffer->scan_mask == NULL) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
@@ -326,10 +307,9 @@ int iio_buffer_register(struct iio_dev *indio_dev,
buffer->scan_el_group.name = iio_scan_elements_group_name;
- buffer->scan_el_group.attrs
- = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
+ sizeof(buffer->scan_el_group.attrs[0]),
+ GFP_KERNEL);
if (buffer->scan_el_group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_scan_mask;
@@ -395,31 +375,20 @@ ssize_t iio_buffer_write_length(struct device *dev,
if (val == buffer->access->get_length(buffer))
return len;
- if (buffer->access->set_length) {
- buffer->access->set_length(buffer, val);
- if (buffer->access->mark_param_change)
- buffer->access->mark_param_change(buffer);
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ } else {
+ if (buffer->access->set_length)
+ buffer->access->set_length(buffer, val);
+ ret = 0;
}
+ mutex_unlock(&indio_dev->mlock);
- return len;
+ return ret ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_write_length);
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
-
- if (buffer->access->get_bytes_per_datum)
- return sprintf(buf, "%d\n",
- buffer->access->get_bytes_per_datum(buffer));
-
- return 0;
-}
-EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
-
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -434,14 +403,14 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
previous_mode = indio_dev->currentmode;
requested_state = !(buf[0] == '0');
- current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ current_state = iio_buffer_enabled(indio_dev);
if (current_state == requested_state) {
printk(KERN_INFO "iio-buffer, current state requested again\n");
goto done;
}
if (requested_state) {
- if (buffer->setup_ops->preenable) {
- ret = buffer->setup_ops->preenable(indio_dev);
+ if (indio_dev->setup_ops->preenable) {
+ ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
printk(KERN_ERR
"Buffer not started:"
@@ -458,16 +427,12 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
}
- if (buffer->access->mark_in_use)
- buffer->access->mark_in_use(buffer);
/* Definitely possible for devices to support both of these.*/
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ret = -EINVAL;
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
goto error_ret;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
@@ -478,32 +443,28 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
- if (buffer->setup_ops->postenable) {
- ret = buffer->setup_ops->postenable(indio_dev);
+ if (indio_dev->setup_ops->postenable) {
+ ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
printk(KERN_INFO
"Buffer not started:"
"postenable failed\n");
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = previous_mode;
- if (buffer->setup_ops->postdisable)
- buffer->setup_ops->
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->
postdisable(indio_dev);
goto error_ret;
}
}
} else {
- if (buffer->setup_ops->predisable) {
- ret = buffer->setup_ops->predisable(indio_dev);
+ if (indio_dev->setup_ops->predisable) {
+ ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
goto error_ret;
}
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
- if (buffer->setup_ops->postdisable) {
- ret = buffer->setup_ops->postdisable(indio_dev);
+ if (indio_dev->setup_ops->postdisable) {
+ ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
goto error_ret;
}
@@ -523,37 +484,10 @@ ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", !!(indio_dev->currentmode
- & INDIO_ALL_BUFFER_MODES));
+ return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer = indio_dev->buffer;
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(buffer->scan_count || buffer->scan_timestamp))
- return -EINVAL;
- if (buffer->scan_timestamp)
- if (buffer->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((buffer->scan_count * buffer->bpe)
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = buffer->scan_count * buffer->bpe;
- buffer->access->set_bytes_per_datum(buffer, size);
-
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_buffer_preenable);
-
-
/* note NULL used as error indicator as it doesn't make sense. */
static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
unsigned int masklength,
@@ -569,14 +503,57 @@ static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
return NULL;
}
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ const struct iio_chan_spec *ch;
+ unsigned bytes = 0;
+ int length, i;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+
+ /* How much space will the demuxed element take? */
+ for_each_set_bit(i, buffer->scan_mask,
+ indio_dev->masklength) {
+ ch = iio_find_channel_from_si(indio_dev, i);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ if (buffer->scan_timestamp) {
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+
+ /* What scan mask do we actually have ?*/
+ if (indio_dev->available_scan_masks)
+ indio_dev->active_scan_mask =
+ iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ buffer->scan_mask);
+ else
+ indio_dev->active_scan_mask = buffer->scan_mask;
+ iio_update_demux(indio_dev);
+
+ if (indio_dev->info->update_scan_mode)
+ return indio_dev->info
+ ->update_scan_mode(indio_dev,
+ indio_dev->active_scan_mask);
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
unsigned long *mask;
unsigned long *trialmask;
@@ -604,7 +581,6 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
}
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
- buffer->scan_count++;
kfree(trialmask);
@@ -612,25 +588,147 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
};
EXPORT_SYMBOL_GPL(iio_scan_mask_set);
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
- long *mask;
-
if (bit > indio_dev->masklength)
return -EINVAL;
if (!buffer->scan_mask)
return 0;
- if (indio_dev->available_scan_masks)
- mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- buffer->scan_mask);
- else
- mask = buffer->scan_mask;
- if (!mask)
- return 0;
- return test_bit(bit, mask);
+ return test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
+
+/**
+ * struct iio_demux_table() - table describing demux memcpy ops
+ * @from: index to copy from
+ * @to: index to copy to
+ * @length: how many bytes to copy
+ * @l: list head used for management
+ */
+struct iio_demux_table {
+ unsigned from;
+ unsigned to;
+ unsigned length;
+ struct list_head l;
+};
+
+static unsigned char *iio_demux(struct iio_buffer *buffer,
+ unsigned char *datain)
+{
+ struct iio_demux_table *t;
+
+ if (list_empty(&buffer->demux_list))
+ return datain;
+ list_for_each_entry(t, &buffer->demux_list, l)
+ memcpy(buffer->demux_bounce + t->to,
+ datain + t->from, t->length);
+
+ return buffer->demux_bounce;
+}
+
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp)
+{
+ unsigned char *dataout = iio_demux(buffer, data);
+
+ return buffer->access->store_to(buffer, dataout, timestamp);
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffer);
+
+int iio_update_demux(struct iio_dev *indio_dev)
+{
+ const struct iio_chan_spec *ch;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, in_ind = -1, out_ind, length;
+ unsigned in_loc = 0, out_loc = 0;
+ struct iio_demux_table *p, *q;
+
+ /* Clear out any old demux */
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ kfree(buffer->demux_bounce);
+ buffer->demux_bounce = NULL;
+
+ /* First work out which scan mode we will actually have */
+ if (bitmap_equal(indio_dev->active_scan_mask,
+ buffer->scan_mask,
+ indio_dev->masklength))
+ return 0;
+
+ /* Now we have the two masks, work from least sig and build up sizes */
+ for_each_set_bit(out_ind,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ while (in_ind != out_ind) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ /* Make sure we are aligned */
+ in_loc += length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ }
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ /* Relies on scan_timestamp being last */
+ if (buffer->scan_timestamp) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
+ if (buffer->demux_bounce == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 26564094e33..19f897f3c85 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -25,8 +25,8 @@
#include "iio.h"
#include "iio_core.h"
#include "iio_core_trigger.h"
-#include "chrdev.h"
#include "sysfs.h"
+#include "events.h"
/* IDA to assign each registered device a unique id*/
static DEFINE_IDA(iio_ida);
@@ -77,17 +77,29 @@ static const char * const iio_modifier_names[] = {
/* relies on pairs of these shared then separate */
static const char * const iio_chan_info_postfix[] = {
- [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
- [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
- [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
- [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
- [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
- [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
- [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
- = "quadrature_correction_raw",
- [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
+ [IIO_CHAN_INFO_SCALE] = "scale",
+ [IIO_CHAN_INFO_OFFSET] = "offset",
+ [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
+ [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
+ [IIO_CHAN_INFO_PEAK] = "peak_raw",
+ [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
+ [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
+ [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
+ [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
+ = "filter_low_pass_3db_frequency",
};
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == si)
+ return &indio_dev->channels[i];
+ return NULL;
+}
+
/**
* struct iio_detected_event_list - list element for events that have occurred
* @list: linked list header
@@ -169,8 +181,11 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
{
struct iio_event_interface *ev_int = filep->private_data;
struct iio_detected_event_list *el;
+ size_t len = sizeof(el->ev);
int ret;
- size_t len;
+
+ if (count < len)
+ return -EINVAL;
mutex_lock(&ev_int->event_list_lock);
if (list_empty(&ev_int->det_events)) {
@@ -192,7 +207,6 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
el = list_first_entry(&ev_int->det_events,
struct iio_detected_event_list,
list);
- len = sizeof el->ev;
if (copy_to_user(buf, &(el->ev), len)) {
ret = -EFAULT;
goto error_mutex_unlock;
@@ -242,25 +256,24 @@ static const struct file_operations iio_event_chrdev_fileops = {
static int iio_event_getfd(struct iio_dev *indio_dev)
{
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
int fd;
- if (indio_dev->event_interface == NULL)
+ if (ev_int == NULL)
return -ENODEV;
- mutex_lock(&indio_dev->event_interface->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS,
- &indio_dev->event_interface->flags)) {
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ mutex_unlock(&ev_int->event_list_lock);
return -EBUSY;
}
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_unlock(&ev_int->event_list_lock);
fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops,
- indio_dev->event_interface, O_RDONLY);
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
if (fd < 0) {
- mutex_lock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_unlock(&ev_int->event_list_lock);
}
return fd;
}
@@ -416,7 +429,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
- if (chan->modified) {
+ if (chan->modified && !generic) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan
@@ -601,7 +614,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
chan,
&iio_read_channel_info,
&iio_write_channel_info,
- (1 << i),
+ i/2,
!(i%2),
&indio_dev->dev,
&indio_dev->channel_attr_list);
@@ -666,10 +679,9 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
- indio_dev->chan_attr_group.attrs
- = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->chan_attr_group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->chan_attr_group.attrs == NULL) {
ret = -ENOMEM;
goto error_clear_attrs;
@@ -789,6 +801,9 @@ static ssize_t iio_ev_value_store(struct device *dev,
unsigned long val;
int ret;
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
ret = strict_strtoul(buf, 10, &val);
if (ret)
return ret;
@@ -959,10 +974,9 @@ static int iio_device_register_eventset(struct iio_dev *indio_dev)
}
indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs =
- kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
- *(attrcount + 1),
- GFP_KERNEL);
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->event_interface->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
@@ -1069,9 +1083,13 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
+ return -EBUSY;
+
filp->private_data = indio_dev;
- return iio_chrdev_buffer_open(indio_dev);
+ return 0;
}
/**
@@ -1079,8 +1097,9 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
**/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
- iio_chrdev_buffer_release(container_of(inode->i_cdev,
- struct iio_dev, chrdev));
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+ clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
return 0;
}
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 2c626e0cb29..47ecadd4818 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -159,13 +159,12 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count) {
+ if (!trig->use_count)
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
if (trig->subirqs[i].enabled) {
trig->use_count++;
handle_nested_irq(trig->subirq_base + i);
}
- }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
@@ -173,10 +172,9 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
{
trig->use_count--;
if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
- if (trig->ops->try_reenable(trig)) {
+ if (trig->ops->try_reenable(trig))
/* Missed and interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
- }
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -222,8 +220,16 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (trig->ops && trig->ops->set_trigger_state && notinuse)
+ if (ret < 0) {
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
+ }
+
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
+ if (ret < 0)
+ module_put(pf->indio_dev->info->driver_module);
+ }
return ret;
}
@@ -295,7 +301,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query which trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -336,6 +342,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
+ if (oldtrig == trig)
+ return len;
if (trig && indio_dev->info->validate_trigger) {
ret = indio_dev->info->validate_trigger(indio_dev, trig);
@@ -473,12 +481,10 @@ void iio_free_trigger(struct iio_trigger *trig)
}
EXPORT_SYMBOL(iio_free_trigger);
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
&iio_trigger_consumer_attr_group;
-
- return 0;
}
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
@@ -490,18 +496,14 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_postenable);
int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e8c234bb18f..e1e9c06cde4 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -11,9 +11,7 @@
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
- int use_count;
int update_needed;
- struct mutex use_lock;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
@@ -33,54 +31,25 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
if (!buf->update_needed)
goto error_ret;
- if (buf->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
error_ret:
- mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count++;
- mutex_unlock(&buf->use_lock);
-}
-
-static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count--;
- mutex_unlock(&buf->use_lock);
-}
-
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
-static inline void __iio_init_kfifo(struct iio_kfifo *kf)
-{
- mutex_init(&kf->use_lock);
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -98,9 +67,8 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
if (!kf)
return NULL;
kf->update_needed = true;
- iio_buffer_init(&kf->buffer, indio_dev);
+ iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
- __iio_init_kfifo(kf);
return &kf->buffer;
}
@@ -111,20 +79,19 @@ static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
return r->bytes_per_datum;
}
-static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
- if (r->bytes_per_datum != bpd) {
- r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
- }
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ kf->update_needed = true;
return 0;
}
-static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
{
- struct iio_kfifo *kf = iio_to_kfifo(r);
- kf->update_needed = true;
+ if (r->bytes_per_datum != bpd) {
+ r->bytes_per_datum = bpd;
+ iio_mark_update_needed_kfifo(r);
+ }
return 0;
}
@@ -132,8 +99,7 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_kfifo(r);
}
return 0;
}
@@ -150,16 +116,9 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
{
int ret;
struct iio_kfifo *kf = iio_to_kfifo(r);
- u8 *datal = kmalloc(r->bytes_per_datum, GFP_KERNEL);
- memcpy(datal, data, r->bytes_per_datum - sizeof(timestamp));
- memcpy(datal + r->bytes_per_datum - sizeof(timestamp),
- &timestamp, sizeof(timestamp));
ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
- if (ret != r->bytes_per_datum) {
- kfree(datal);
+ if (ret != r->bytes_per_datum)
return -EBUSY;
- }
- kfree(datal);
return 0;
}
@@ -169,17 +128,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
- ret = kfifo_to_user(&kf->kf, buf, r->bytes_per_datum*n, &copied);
+ if (n < r->bytes_per_datum)
+ return -EINVAL;
+
+ n = rounddown(n, r->bytes_per_datum);
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
return copied;
}
const struct iio_buffer_access_funcs kfifo_access_funcs = {
- .mark_in_use = &iio_mark_kfifo_in_use,
- .unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
- .mark_param_change = &iio_mark_update_needed_kfifo,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index a15598bb9fd..cc2bd9a1ccf 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -1,7 +1,7 @@
#include <linux/kfifo.h>
#include "iio.h"
-#include "buffer_generic.h"
+#include "buffer.h"
extern const struct iio_buffer_access_funcs kfifo_access_funcs;
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 9dc9e635839..849d6a564af 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -362,8 +362,7 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
- if (mask == (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) &&
- chan->type == IIO_LIGHT) {
+ if (mask == IIO_CHAN_INFO_CALIBSCALE && chan->type == IIO_LIGHT) {
chip->lux_scale = val;
ret = 0;
}
@@ -402,7 +401,7 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
if (!ret)
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
*val = chip->lux_scale;
ret = IIO_VAL_INT;
@@ -421,7 +420,7 @@ static const struct iio_chan_spec isl29018_channels[] = {
.indexed = 1,
.channel = 0,
.processed_val = IIO_PROCESSED,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}, {
.type = IIO_INTENSITY,
.modified = 1,
@@ -603,19 +602,7 @@ static struct i2c_driver isl29018_driver = {
.remove = __devexit_p(isl29018_remove),
.id_table = isl29018_id,
};
-
-static int __init isl29018_init(void)
-{
- return i2c_add_driver(&isl29018_driver);
-}
-
-static void __exit isl29018_exit(void)
-{
- i2c_del_driver(&isl29018_driver);
-}
-
-module_init(isl29018_init);
-module_exit(isl29018_exit);
+module_i2c_driver(isl29018_driver);
MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 7e984bcf8d7..ffca85e81ef 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -37,6 +37,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "tsl2563.h"
/* Use this many bits for fraction part. */
@@ -226,6 +227,8 @@ static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
if (ret < 0)
return ret;
+ *id = ret;
+
return 0;
}
@@ -510,7 +513,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
}
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->channel == 0)
*val = calib_to_sysfs(chip->calib0);
else
@@ -536,7 +539,7 @@ static const struct iio_chan_spec tsl2563_channels[] = {
.type = IIO_INTENSITY,
.modified = 1,
.channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
.event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH,
@@ -544,8 +547,8 @@ static const struct iio_chan_spec tsl2563_channels[] = {
}, {
.type = IIO_INTENSITY,
.modified = 1,
- .channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}
};
@@ -866,20 +869,8 @@ static struct i2c_driver tsl2563_i2c_driver = {
.remove = __devexit_p(tsl2563_remove),
.id_table = tsl2563_id,
};
-
-static int __init tsl2563_init(void)
-{
- return i2c_add_driver(&tsl2563_i2c_driver);
-}
-
-static void __exit tsl2563_exit(void)
-{
- i2c_del_driver(&tsl2563_i2c_driver);
-}
+module_i2c_driver(tsl2563_i2c_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("tsl2563 light sensor driver");
MODULE_LICENSE("GPL");
-
-module_init(tsl2563_init);
-module_exit(tsl2563_exit);
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 80f77cf8e9c..5b6455a238d 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -194,6 +194,7 @@ static int taos_get_lux(struct iio_dev *indio_dev)
{
u16 ch0, ch1; /* separated ch0/ch1 data from device */
u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
u32 ratio;
u8 buf[5];
struct taos_lux *p;
@@ -297,9 +298,19 @@ static int taos_get_lux(struct iio_dev *indio_dev)
lux = (lux + (chip->als_time_scale >> 1)) /
chip->als_time_scale;
- /* adjust for active gain scale */
- lux >>= 13; /* tables have factor of 8192 builtin for accuracy */
- lux = (lux * chip->taos_settings.als_gain_trim + 500) / 1000;
+ /* Adjust for active gain scale.
+ * The taos_device_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->taos_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
return_max:
lux = TSL258X_LUX_CALC_OVER_FLOW;
@@ -933,19 +944,7 @@ static struct i2c_driver taos_driver = {
.probe = taos_probe,
.remove = __devexit_p(taos_remove),
};
-
-static int __init taos_init(void)
-{
- return i2c_add_driver(&taos_driver);
-}
-
-static void __exit taos_exit(void)
-{
- i2c_del_driver(&taos_driver);
-}
-
-module_init(taos_init);
-module_exit(taos_exit);
+module_i2c_driver(taos_driver);
MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 8b017127fd4..3158f12cb05 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -431,7 +431,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case 0:
return ak8975_read_axis(indio_dev, chan->address, val);
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = data->raw_to_gauss[chan->address];
return IIO_VAL_INT;
}
@@ -443,7 +443,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = index, \
}
@@ -572,19 +572,7 @@ static struct i2c_driver ak8975_driver = {
.remove = __devexit_p(ak8975_remove),
.id_table = ak8975_id,
};
-
-static int __init ak8975_init(void)
-{
- return i2c_add_driver(&ak8975_driver);
-}
-
-static void __exit ak8975_exit(void)
-{
- i2c_del_driver(&ak8975_driver);
-}
-
-module_init(ak8975_init);
-module_exit(ak8975_exit);
+module_i2c_driver(ak8975_driver);
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
MODULE_DESCRIPTION("AK8975 magnetometer driver");
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index fc9ee970888..f2e85a9cf19 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -463,7 +463,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
return hmc5843_read_measurement(indio_dev,
chan->address,
val);
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = hmc5843_regval_to_nanoscale[data->range];
return IIO_VAL_INT_PLUS_NANO;
@@ -476,7 +476,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = add \
}
@@ -605,6 +605,7 @@ static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
@@ -618,20 +619,8 @@ static struct i2c_driver hmc5843_driver = {
.suspend = hmc5843_suspend,
.resume = hmc5843_resume,
};
-
-static int __init hmc5843_init(void)
-{
- return i2c_add_driver(&hmc5843_driver);
-}
-
-static void __exit hmc5843_exit(void)
-{
- i2c_del_driver(&hmc5843_driver);
-}
+module_i2c_driver(hmc5843_driver);
MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
MODULE_DESCRIPTION("HMC5843 driver");
MODULE_LICENSE("GPL");
-
-module_init(hmc5843_init);
-module_exit(hmc5843_exit);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 940fef602b1..57baac6c0d4 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -577,19 +577,9 @@ static struct spi_driver ade7753_driver = {
.probe = ade7753_probe,
.remove = __devexit_p(ade7753_remove),
};
-
-static __init int ade7753_init(void)
-{
- return spi_register_driver(&ade7753_driver);
-}
-module_init(ade7753_init);
-
-static __exit void ade7753_exit(void)
-{
- spi_unregister_driver(&ade7753_driver);
-}
-module_exit(ade7753_exit);
+module_spi_driver(ade7753_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ade7753");
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 33f0d327c9f..8d81c92007e 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -600,19 +600,9 @@ static struct spi_driver ade7754_driver = {
.probe = ade7754_probe,
.remove = __devexit_p(ade7754_remove),
};
-
-static __init int ade7754_init(void)
-{
- return spi_register_driver(&ade7754_driver);
-}
-module_init(ade7754_init);
-
-static __exit void ade7754_exit(void)
-{
- spi_unregister_driver(&ade7754_driver);
-}
-module_exit(ade7754_exit);
+module_spi_driver(ade7754_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7754");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index c5dafbdf3bd..dcb20294dfe 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "meter.h"
#include "ade7758.h"
@@ -663,63 +663,63 @@ static const struct attribute_group ade7758_attribute_group = {
static struct iio_chan_spec ade7758_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
0, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
1, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
2, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
3, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
4, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
5, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
6, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
7, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
8, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
9, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
10, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
11, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
12, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
13, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
14, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(15),
@@ -746,12 +746,12 @@ static int __devinit ade7758_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
/* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7758_MAX_RX, GFP_KERNEL);
+ st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
if (st->rx == NULL) {
ret = -ENOMEM;
goto error_free_dev;
}
- st->tx = kzalloc(sizeof(*st->tx)*ADE7758_MAX_TX, GFP_KERNEL);
+ st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
if (st->tx == NULL) {
ret = -ENOMEM;
goto error_free_rx;
@@ -843,6 +843,7 @@ static const struct spi_device_id ade7758_id[] = {
{"ade7758", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ade7758_id);
static struct spi_driver ade7758_driver = {
.driver = {
@@ -853,18 +854,7 @@ static struct spi_driver ade7758_driver = {
.remove = __devexit_p(ade7758_remove),
.id_table = ade7758_id,
};
-
-static __init int ade7758_init(void)
-{
- return spi_register_driver(&ade7758_driver);
-}
-module_init(ade7758_init);
-
-static __exit void ade7758_exit(void)
-{
- spi_unregister_driver(&ade7758_driver);
-}
-module_exit(ade7758_exit);
+module_spi_driver(ade7758_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 00fa2ac5c45..f29f2b278fe 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -67,7 +67,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
if (ade7758_spi_read_burst(&indio_dev->dev) >= 0)
*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
@@ -96,10 +96,11 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
d_size = st->ade7758_ring_channels[channel].scan_type.storagebits / 8;
@@ -145,8 +146,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->setup_ops = &ade7758_ring_setup_ops;
- indio_dev->buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ade7758_trigger_handler,
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index b691f10ce98..0beab478dcd 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -521,19 +521,9 @@ static struct spi_driver ade7759_driver = {
.probe = ade7759_probe,
.remove = __devexit_p(ade7759_remove),
};
-
-static __init int ade7759_init(void)
-{
- return spi_register_driver(&ade7759_driver);
-}
-module_init(ade7759_init);
-
-static __exit void ade7759_exit(void)
-{
- spi_unregister_driver(&ade7759_driver);
-}
-module_exit(ade7759_exit);
+module_spi_driver(ade7759_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7759");
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index cbca3fd8fcd..1e1faa0479d 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -253,19 +253,7 @@ static struct i2c_driver ade7854_i2c_driver = {
.remove = __devexit_p(ade7854_i2c_remove),
.id_table = ade7854_id,
};
-
-static __init int ade7854_i2c_init(void)
-{
- return i2c_add_driver(&ade7854_i2c_driver);
-}
-module_init(ade7854_i2c_init);
-
-static __exit void ade7854_i2c_exit(void)
-{
- i2c_del_driver(&ade7854_i2c_driver);
-}
-module_exit(ade7854_i2c_exit);
-
+module_i2c_driver(ade7854_i2c_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index cfa23ba30ef..81121862c1b 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -343,6 +343,7 @@ static const struct spi_device_id ade7854_id[] = {
{ "ade7878", 0 },
{ }
};
+MODULE_DEVICE_TABLE(spi, ade7854_id);
static struct spi_driver ade7854_driver = {
.driver = {
@@ -353,18 +354,7 @@ static struct spi_driver ade7854_driver = {
.remove = __devexit_p(ade7854_spi_remove),
.id_table = ade7854_id,
};
-
-static __init int ade7854_init(void)
-{
- return spi_register_driver(&ade7854_driver);
-}
-module_init(ade7854_init);
-
-static __exit void ade7854_exit(void)
-{
- spi_unregister_driver(&ade7854_driver);
-}
-module_exit(ade7854_exit);
+module_spi_driver(ade7854_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 SPI Driver");
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index d7ad46aed0f..d8ce854c189 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -160,6 +160,7 @@ static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1205" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
@@ -170,18 +171,7 @@ static struct spi_driver ad2s1200_driver = {
.remove = __devexit_p(ad2s1200_remove),
.id_table = ad2s1200_id,
};
-
-static __init int ad2s1200_spi_init(void)
-{
- return spi_register_driver(&ad2s1200_driver);
-}
-module_init(ad2s1200_spi_init);
-
-static __exit void ad2s1200_spi_exit(void)
-{
- spi_unregister_driver(&ad2s1200_driver);
-}
-module_exit(ad2s1200_spi_exit);
+module_spi_driver(ad2s1200_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6401a627362..c439fcf72be 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -749,6 +749,7 @@ static const struct spi_device_id ad2s1210_id[] = {
{ "ad2s1210" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1210_id);
static struct spi_driver ad2s1210_driver = {
.driver = {
@@ -759,18 +760,7 @@ static struct spi_driver ad2s1210_driver = {
.remove = __devexit_p(ad2s1210_remove),
.id_table = ad2s1210_id,
};
-
-static __init int ad2s1210_spi_init(void)
-{
- return spi_register_driver(&ad2s1210_driver);
-}
-module_init(ad2s1210_spi_init);
-
-static __exit void ad2s1210_spi_exit(void)
-{
- spi_unregister_driver(&ad2s1210_driver);
-}
-module_exit(ad2s1210_spi_exit);
+module_spi_driver(ad2s1210_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index a9200d949dc..2a86f582ddf 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -109,6 +109,7 @@ static const struct spi_device_id ad2s90_id[] = {
{ "ad2s90" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s90_id);
static struct spi_driver ad2s90_driver = {
.driver = {
@@ -119,18 +120,7 @@ static struct spi_driver ad2s90_driver = {
.remove = __devexit_p(ad2s90_remove),
.id_table = ad2s90_id,
};
-
-static __init int ad2s90_spi_init(void)
-{
- return spi_register_driver(&ad2s90_driver);
-}
-module_init(ad2s90_spi_init);
-
-static __exit void ad2s90_spi_exit(void)
-{
- spi_unregister_driver(&ad2s90_driver);
-}
-module_exit(ad2s90_spi_exit);
+module_spi_driver(ad2s90_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 66a34addb75..3e24ec45585 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -23,11 +23,8 @@
* @data: the ring buffer memory
* @read_p: read pointer (oldest available)
* @write_p: write pointer
- * @last_written_p: read pointer (newest available)
* @half_p: half buffer length behind write_p (event generation)
- * @use_count: reference count to prevent resizing when in use
* @update_needed: flag to indicated change in size requested
- * @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
* struct iio_buffer.
@@ -37,12 +34,9 @@ struct iio_sw_ring_buffer {
unsigned char *data;
unsigned char *read_p;
unsigned char *write_p;
- unsigned char *last_written_p;
/* used to act as a point at which to signal an event */
unsigned char *half_p;
- int use_count;
int update_needed;
- spinlock_t use_lock;
};
#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
@@ -56,38 +50,15 @@ static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
ring->read_p = NULL;
ring->write_p = NULL;
- ring->last_written_p = NULL;
ring->half_p = NULL;
return ring->data ? 0 : -ENOMEM;
}
-static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- spin_lock_init(&ring->use_lock);
-}
-
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count++;
- spin_unlock(&ring->use_lock);
-}
-
-static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count--;
- spin_unlock(&ring->use_lock);
-}
-
-
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
@@ -115,7 +86,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
* Always valid as either points to latest or second latest value.
* Before this runs it is null and read attempts fail with -EAGAIN.
*/
- ring->last_written_p = ring->write_p;
barrier();
/* temp_ptr used to ensure we never have an invalid pointer
* it may be slightly lagging, but never invalid
@@ -174,6 +144,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
u8 *data;
int ret, max_copied, bytes_to_rip, dead_offset;
+ size_t data_available, buffer_size;
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
@@ -186,9 +157,11 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
n, ring->buf.bytes_per_datum);
goto error_ret;
}
+
+ buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
+
/* Limit size to whole of ring buffer */
- bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
- n);
+ bytes_to_rip = min_t(size_t, buffer_size, n);
data = kmalloc(bytes_to_rip, GFP_KERNEL);
if (data == NULL) {
@@ -217,38 +190,24 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
goto error_free_data_cpy;
}
- if (initial_write_p >= initial_read_p + bytes_to_rip) {
- /* write_p is greater than necessary, all is easy */
- max_copied = bytes_to_rip;
- memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_read_p + max_copied;
- } else if (initial_write_p > initial_read_p) {
- /*not enough data to cpy */
- max_copied = initial_write_p - initial_read_p;
+ if (initial_write_p >= initial_read_p)
+ data_available = initial_write_p - initial_read_p;
+ else
+ data_available = buffer_size - (initial_read_p - initial_write_p);
+
+ if (data_available < bytes_to_rip)
+ bytes_to_rip = data_available;
+
+ if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
+ max_copied = ring->data + buffer_size - initial_read_p;
memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_write_p;
+ memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
+ end_read_p = ring->data + bytes_to_rip - max_copied;
} else {
- /* going through 'end' of ring buffer */
- max_copied = ring->data
- + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
- memcpy(data, initial_read_p, max_copied);
- /* possible we are done if we align precisely with end */
- if (max_copied == bytes_to_rip)
- end_read_p = ring->data;
- else if (initial_write_p
- > ring->data + bytes_to_rip - max_copied) {
- /* enough data to finish */
- memcpy(data + max_copied, ring->data,
- bytes_to_rip - max_copied);
- max_copied = bytes_to_rip;
- end_read_p = ring->data + (bytes_to_rip - max_copied);
- } else { /* not enough data */
- memcpy(data + max_copied, ring->data,
- initial_write_p - ring->data);
- max_copied += initial_write_p - ring->data;
- end_read_p = initial_write_p;
- }
+ memcpy(data, initial_read_p, bytes_to_rip);
+ end_read_p = initial_read_p + bytes_to_rip;
}
+
/* Now to verify which section was cleanly copied - i.e. how far
* read pointer has been pushed */
current_read_p = ring->read_p;
@@ -256,15 +215,14 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
if (initial_read_p <= current_read_p)
dead_offset = current_read_p - initial_read_p;
else
- dead_offset = ring->buf.length*ring->buf.bytes_per_datum
- - (initial_read_p - current_read_p);
+ dead_offset = buffer_size - (initial_read_p - current_read_p);
/* possible issue if the initial write has been lapped or indeed
* the point we were reading to has been passed */
/* No valid data read.
* In this case the read pointer is already correct having been
* pushed further than we would look. */
- if (max_copied - dead_offset < 0) {
+ if (bytes_to_rip - dead_offset < 0) {
ret = 0;
goto error_free_data_cpy;
}
@@ -280,7 +238,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
- ret = max_copied - dead_offset;
+ ret = bytes_to_rip - dead_offset;
if (copy_to_user(buf, data + dead_offset, ret)) {
ret = -EFAULT;
@@ -305,52 +263,18 @@ static int iio_store_to_sw_rb(struct iio_buffer *r,
return iio_store_to_sw_ring(ring, data, timestamp);
}
-static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data)
-{
- unsigned char *last_written_p_copy;
-
- iio_mark_sw_rb_in_use(&ring->buf);
-again:
- barrier();
- last_written_p_copy = ring->last_written_p;
- barrier(); /*unnessecary? */
- /* Check there is anything here */
- if (last_written_p_copy == NULL)
- return -EAGAIN;
- memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
-
- if (unlikely(ring->last_written_p != last_written_p_copy))
- goto again;
-
- iio_unmark_sw_rb_in_use(&ring->buf);
- return 0;
-}
-
-static int iio_read_last_from_sw_rb(struct iio_buffer *r,
- unsigned char *data)
-{
- return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
-}
-
static int iio_request_update_sw_rb(struct iio_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
r->stufftoread = false;
- spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
- if (ring->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
__iio_free_sw_ring_buffer(ring);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
ring->buf.length);
error_ret:
- spin_unlock(&ring->use_lock);
return ret;
}
@@ -360,12 +284,18 @@ static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
return ring->buf.bytes_per_datum;
}
+static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ ring->update_needed = true;
+ return 0;
+}
+
static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
@@ -379,27 +309,17 @@ static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
-static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- ring->update_needed = true;
- return 0;
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/* Standard set of ring buffer attributes */
static struct attribute *iio_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -419,8 +339,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
return NULL;
ring->update_needed = true;
buf = &ring->buf;
- iio_buffer_init(buf, indio_dev);
- __iio_init_sw_ring_buffer(ring);
+ iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
return buf;
@@ -434,12 +353,8 @@ void iio_sw_rb_free(struct iio_buffer *r)
EXPORT_SYMBOL(iio_sw_rb_free);
const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .mark_in_use = &iio_mark_sw_rb_in_use,
- .unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
- .read_last = &iio_read_last_from_sw_rb,
.read_first_n = &iio_read_first_n_sw_rb,
- .mark_param_change = &iio_mark_update_needed_sw_rb,
.request_update = &iio_request_update_sw_rb,
.get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
.set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index a3e15784c22..e6a6e2c4096 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -23,7 +23,7 @@
#ifndef _IIO_RING_SW_H_
#define _IIO_RING_SW_H_
-#include "buffer_generic.h"
+#include "buffer.h"
/**
* ring_sw_access_funcs - access functions for a software ring buffer
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index 868952b5ba6..bfedb73b850 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -114,47 +114,4 @@ struct iio_const_attr {
#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
IIO_CONST_ATTR(in_temp_scale, _string)
-enum iio_event_type {
- IIO_EV_TYPE_THRESH,
- IIO_EV_TYPE_MAG,
- IIO_EV_TYPE_ROC,
- IIO_EV_TYPE_THRESH_ADAPTIVE,
- IIO_EV_TYPE_MAG_ADAPTIVE,
-};
-
-enum iio_event_direction {
- IIO_EV_DIR_EITHER,
- IIO_EV_DIR_RISING,
- IIO_EV_DIR_FALLING,
-};
-
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
- (((u64)type << 56) | ((u64)diff << 55) | \
- ((u64)direction << 48) | ((u64)modifier << 40) | \
- ((u64)chan_type << 32) | (chan2 << 16) | chan1 | chan)
-
-#define IIO_EV_DIR_MAX 4
-#define IIO_EV_BIT(type, direction) \
- (1 << (type*IIO_EV_DIR_MAX + direction))
-
-#define IIO_MOD_EVENT_CODE(channelclass, number, modifier, \
- type, direction) \
- IIO_EVENT_CODE(channelclass, 0, modifier, direction, type, number, 0, 0)
-
-#define IIO_UNMOD_EVENT_CODE(channelclass, number, type, direction) \
- IIO_EVENT_CODE(channelclass, 0, 0, direction, type, number, 0, 0)
-
-#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
-
-#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
-
-/* Event code number extraction depends on which type of event we have.
- * Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) (mask & 0xFFFF)
-
-#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
-
#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index 5cc42a655c8..1cfca231db8 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -46,7 +46,6 @@ struct iio_trigger_ops {
* @private_data: [DRIVER] device specific data
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @owner: [DRIVER] used to monitor usage count of the trigger.
* @use_count: use count for the trigger
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
@@ -63,7 +62,6 @@ struct iio_trigger {
void *private_data;
struct list_head list;
struct list_head alloc_list;
- struct module *owner;
int use_count;
struct irq_chip subirq_chip;
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index d35d085da94..bd7416b2c56 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -125,7 +125,6 @@ static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
goto error_put_trigger_and_remove_from_list;
}
trig->private_data = trig_info;
- trig->owner = THIS_MODULE;
trig->ops = &iio_prtc_trigger_ops;
/* RTC access */
trig_info->rtc
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
new file mode 100644
index 00000000000..b7d26474ad0
--- /dev/null
+++ b/drivers/staging/iio/types.h
@@ -0,0 +1,49 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_TYPES_H_
+#define _IIO_TYPES_H_
+
+enum iio_chan_type {
+ /* real channel types */
+ IIO_VOLTAGE,
+ IIO_CURRENT,
+ IIO_POWER,
+ IIO_ACCEL,
+ IIO_ANGL_VEL,
+ IIO_MAGN,
+ IIO_LIGHT,
+ IIO_INTENSITY,
+ IIO_PROXIMITY,
+ IIO_TEMP,
+ IIO_INCLI,
+ IIO_ROT,
+ IIO_ANGL,
+ IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
+};
+
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_AND_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+};
+
+#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/intel_sst/Kconfig b/drivers/staging/intel_sst/Kconfig
deleted file mode 100644
index 82391077b38..00000000000
--- a/drivers/staging/intel_sst/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-config SND_INTEL_SST
- tristate "Intel SST (LPE) Driver"
- depends on X86 && INTEL_SCU_IPC
- default n
- help
- Say Y here to include support for the Intel(R) MID SST DSP driver
- On other PC platforms if you are unsure answer 'N'
-
-config SND_INTELMID
- tristate "Intel MID sound card driver"
- depends on SOUND && SND
- select SND_PCM
- select SND_SEQUENCER
- select SND_JACK
- depends on SND_INTEL_SST
- default n
- help
- Say Y here to include support for the Intel(R) MID sound card driver
- On other PC platforms if you are unsure answer 'N'
diff --git a/drivers/staging/intel_sst/Makefile b/drivers/staging/intel_sst/Makefile
deleted file mode 100644
index 9eb7c158bb6..00000000000
--- a/drivers/staging/intel_sst/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Intel MID Audio drivers
-#
-snd-intel-sst-y := intel_sst.o intel_sst_ipc.o intel_sst_stream.o intel_sst_drv_interface.o intel_sst_dsp.o intel_sst_pvt.o intel_sst_stream_encoded.o intel_sst_app_interface.o
-snd-intelmid-y := intelmid.o intelmid_msic_control.o intelmid_ctrl.o intelmid_pvt.o intelmid_v0_control.o intelmid_v1_control.o intelmid_v2_control.o
-obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
-obj-$(CONFIG_SND_INTELMID) += snd-intelmid.o
diff --git a/drivers/staging/intel_sst/TODO b/drivers/staging/intel_sst/TODO
deleted file mode 100644
index c733d701109..00000000000
--- a/drivers/staging/intel_sst/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO
-----
-
-Get the memrar driver cleaned up and upstream (dependency blocking SST)
-Replace long/short press with two virtual buttons
-Review the printks and kill off any left over ST_ERR: messages
-Review the misc device ioctls for 32/64bit safety and sanity
-Review the misc device ioctls for size safety depending on config and decide
- if space/unused areas should be left
-What the sound folks turn up on full review
-Using the ALSA frameworks properly
-
-
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
deleted file mode 100644
index ff9aaec0557..00000000000
--- a/drivers/staging/intel_sst/intel_sst.c
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * intel_sst.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all init functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-#include <linux/miscdevice.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <asm/mrst.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(SST_DRIVER_VERSION);
-
-struct intel_sst_drv *sst_drv_ctx;
-static struct mutex drv_ctx_lock;
-struct class *sst_class;
-
-/* fops Routines */
-static const struct file_operations intel_sst_fops = {
- .owner = THIS_MODULE,
- .open = intel_sst_open,
- .release = intel_sst_release,
- .read = intel_sst_read,
- .write = intel_sst_write,
- .unlocked_ioctl = intel_sst_ioctl,
- .mmap = intel_sst_mmap,
- .aio_read = intel_sst_aio_read,
- .aio_write = intel_sst_aio_write,
-};
-static const struct file_operations intel_sst_fops_cntrl = {
- .owner = THIS_MODULE,
- .open = intel_sst_open_cntrl,
- .release = intel_sst_release_cntrl,
- .unlocked_ioctl = intel_sst_ioctl,
-};
-
-static struct miscdevice lpe_dev = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst",/* /dev/intel_sst */
- .fops = &intel_sst_fops
-};
-
-
-static struct miscdevice lpe_ctrl = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
- .fops = &intel_sst_fops_cntrl
-};
-
-/**
-* intel_sst_interrupt - Interrupt service routine for SST
-*
-* @irq: irq number of interrupt
-* @context: pointer to device structre
-*
-* This function is called by OS when SST device raises
-* an interrupt. This will be result of write in IPC register
-* Source can be busy or done interrupt
-*/
-static irqreturn_t intel_sst_interrupt(int irq, void *context)
-{
- union interrupt_reg isr;
- union ipc_header header;
- union interrupt_reg imr;
- struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
- unsigned int size = 0, str_id;
- struct stream_info *stream ;
-
- /* Do not handle interrupt in suspended state */
- if (drv->sst_state == SST_SUSPENDED)
- return IRQ_NONE;
- /* Interrupt arrived, check src */
- isr.full = sst_shim_read(drv->shim, SST_ISRX);
-
- if (isr.part.busy_interrupt) {
- header.full = sst_shim_read(drv->shim, SST_IPCD);
- if (header.part.msg_id == IPC_SST_PERIOD_ELAPSED) {
- sst_clear_interrupt();
- str_id = header.part.str_id;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->period_elapsed)
- stream->period_elapsed(stream->pcm_substream);
- return IRQ_HANDLED;
- }
- if (header.part.large)
- size = header.part.data;
- if (header.part.msg_id & REPLY_MSG) {
- sst_drv_ctx->ipc_process_msg.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_msg.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_msg_wq,
- &sst_drv_ctx->ipc_process_msg.wq);
- } else {
- sst_drv_ctx->ipc_process_reply.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_reply.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_reply_wq,
- &sst_drv_ctx->ipc_process_reply.wq);
- }
- /* mask busy inetrrupt */
- imr.full = sst_shim_read(drv->shim, SST_IMRX);
- imr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- return IRQ_HANDLED;
- } else if (isr.part.done_interrupt) {
- /* Clear done bit */
- header.full = sst_shim_read(drv->shim, SST_IPCX);
- header.part.done = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, header.full);
- /* write 1 to clear status register */;
- isr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- queue_work(sst_drv_ctx->post_msg_wq,
- &sst_drv_ctx->ipc_post_msg.wq);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-
-}
-
-
-/*
-* intel_sst_probe - PCI probe function
-*
-* @pci: PCI device structure
-* @pci_id: PCI device ID structure
-*
-* This function is called by OS when a device is found
-* This enables the device, interrupt etc
-*/
-static int __devinit intel_sst_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
-{
- int i, ret = 0;
-
- pr_debug("Probe for DID %x\n", pci->device);
- mutex_lock(&drv_ctx_lock);
- if (sst_drv_ctx) {
- pr_err("Only one sst handle is supported\n");
- mutex_unlock(&drv_ctx_lock);
- return -EBUSY;
- }
-
- sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
- if (!sst_drv_ctx) {
- pr_err("malloc fail\n");
- mutex_unlock(&drv_ctx_lock);
- return -ENOMEM;
- }
- mutex_unlock(&drv_ctx_lock);
-
- sst_drv_ctx->pci_id = pci->device;
-
- mutex_init(&sst_drv_ctx->stream_lock);
- mutex_init(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
-
- sst_drv_ctx->stream_cnt = 0;
- sst_drv_ctx->encoded_cnt = 0;
- sst_drv_ctx->am_cnt = 0;
- sst_drv_ctx->pb_streams = 0;
- sst_drv_ctx->cp_streams = 0;
- sst_drv_ctx->unique_id = 0;
- sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
-
- INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
- INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
- INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
- init_waitqueue_head(&sst_drv_ctx->wait_queue);
-
- sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
- if (!sst_drv_ctx->mad_wq)
- goto do_free_drv_ctx;
- sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
- if (!sst_drv_ctx->post_msg_wq)
- goto free_mad_wq;
- sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
- if (!sst_drv_ctx->process_msg_wq)
- goto free_post_msg_wq;
- sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
- if (!sst_drv_ctx->process_reply_wq)
- goto free_process_msg_wq;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- }
- spin_lock_init(&sst_drv_ctx->list_spin_lock);
-
- sst_drv_ctx->max_streams = pci_id->driver_data;
- pr_debug("Got drv data max stream %d\n",
- sst_drv_ctx->max_streams);
- for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
- struct stream_info *stream = &sst_drv_ctx->streams[i];
- INIT_LIST_HEAD(&stream->bufs);
- mutex_init(&stream->lock);
- spin_lock_init(&stream->pcm_lock);
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- sst_drv_ctx->mmap_mem = NULL;
- sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
- while (sst_drv_ctx->mmap_len > 0) {
- sst_drv_ctx->mmap_mem =
- kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
- if (sst_drv_ctx->mmap_mem) {
- pr_debug("Got memory %p size 0x%x\n",
- sst_drv_ctx->mmap_mem,
- sst_drv_ctx->mmap_len);
- break;
- }
- if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
- pr_err("mem alloc fail...abort!!\n");
- ret = -ENOMEM;
- goto free_process_reply_wq;
- }
- sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
- pr_debug("mem alloc failed...trying %d\n",
- sst_drv_ctx->mmap_len);
- }
- }
-
- /* Init the device */
- ret = pci_enable_device(pci);
- if (ret) {
- pr_err("device can't be enabled\n");
- goto do_free_mem;
- }
- sst_drv_ctx->pci = pci_dev_get(pci);
- ret = pci_request_regions(pci, SST_DRV_NAME);
- if (ret)
- goto do_disable_device;
- /* map registers */
- /* SST Shim */
- sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
- sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
- if (!sst_drv_ctx->shim)
- goto do_release_regions;
- pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
-
- /* Shared SRAM */
- sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
- if (!sst_drv_ctx->mailbox)
- goto do_unmap_shim;
- pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
-
- /* IRAM */
- sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
- if (!sst_drv_ctx->iram)
- goto do_unmap_sram;
- pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
-
- /* DRAM */
- sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
- if (!sst_drv_ctx->dram)
- goto do_unmap_iram;
- pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* Register the ISR */
- ret = request_irq(pci->irq, intel_sst_interrupt,
- IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
- if (ret)
- goto do_unmap_dram;
- pr_debug("Registered IRQ 0x%x\n", pci->irq);
-
- /*Register LPE Control as misc driver*/
- ret = misc_register(&lpe_ctrl);
- if (ret) {
- pr_err("couldn't register control device\n");
- goto do_free_irq;
- }
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- ret = misc_register(&lpe_dev);
- if (ret) {
- pr_err("couldn't register LPE device\n");
- goto do_free_misc;
- }
- } else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- u32 csr;
-
- /*allocate mem for fw context save during suspend*/
- sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
- if (!sst_drv_ctx->fw_cntx) {
- ret = -ENOMEM;
- goto do_free_misc;
- }
- /*setting zero as that is valid mem to restore*/
- sst_drv_ctx->fw_cntx_size = 0;
-
- /*set lpe start clock and ram size*/
- csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr |= 0x30060; /*remove the clock ratio after fw fix*/
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
- }
- sst_drv_ctx->lpe_stalled = 0;
- pci_set_drvdata(pci, sst_drv_ctx);
- pm_runtime_allow(&pci->dev);
- pm_runtime_put_noidle(&pci->dev);
- pr_debug("...successfully done!!!\n");
- return ret;
-
-do_free_misc:
- misc_deregister(&lpe_ctrl);
-do_free_irq:
- free_irq(pci->irq, sst_drv_ctx);
-do_unmap_dram:
- iounmap(sst_drv_ctx->dram);
-do_unmap_iram:
- iounmap(sst_drv_ctx->iram);
-do_unmap_sram:
- iounmap(sst_drv_ctx->mailbox);
-do_unmap_shim:
- iounmap(sst_drv_ctx->shim);
-do_release_regions:
- pci_release_regions(pci);
-do_disable_device:
- pci_disable_device(pci);
-do_free_mem:
- kfree(sst_drv_ctx->mmap_mem);
-free_process_reply_wq:
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
-free_process_msg_wq:
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
-free_post_msg_wq:
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
-free_mad_wq:
- destroy_workqueue(sst_drv_ctx->mad_wq);
-do_free_drv_ctx:
- kfree(sst_drv_ctx);
- sst_drv_ctx = NULL;
- pr_err("Probe failed with %d\n", ret);
- return ret;
-}
-
-/**
-* intel_sst_remove - PCI remove function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a device is unloaded
-* This frees the interrupt etc
-*/
-static void __devexit intel_sst_remove(struct pci_dev *pci)
-{
- pm_runtime_get_noresume(&pci->dev);
- pm_runtime_forbid(&pci->dev);
- pci_dev_put(sst_drv_ctx->pci);
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- misc_deregister(&lpe_ctrl);
- free_irq(pci->irq, sst_drv_ctx);
- iounmap(sst_drv_ctx->dram);
- iounmap(sst_drv_ctx->iram);
- iounmap(sst_drv_ctx->mailbox);
- iounmap(sst_drv_ctx->shim);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- misc_deregister(&lpe_dev);
- kfree(sst_drv_ctx->mmap_mem);
- } else
- kfree(sst_drv_ctx->fw_cntx);
- flush_scheduled_work();
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
- destroy_workqueue(sst_drv_ctx->mad_wq);
- kfree(pci_get_drvdata(pci));
- sst_drv_ctx = NULL;
- pci_release_regions(pci);
- pci_disable_device(pci);
- pci_set_drvdata(pci, NULL);
-}
-
-void sst_save_dsp_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- unsigned int pvt_id, i;
- struct ipc_post *msg = NULL;
-
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING) {
- pr_debug("fw not running no context save ...\n");
- return;
- }
-
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_GET_FW_CTXT, 1, pvt_id);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = FW_CONTEXT_MEM;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- /*wait for reply*/
- if (sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]))
- pr_debug("err fw context save timeout ...\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_debug("fw context saved ...\n");
- return;
-}
-
-/* Power Management */
-/*
-* intel_sst_suspend - PCI suspend function
-*
-* @pci: PCI device structure
-* @state: PM message
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_suspend called\n");
-
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pci_set_drvdata(pci, sst_drv_ctx);
- pci_save_state(pci);
- pci_disable_device(pci);
- pci_set_power_state(pci, PCI_D3hot);
- return 0;
-}
-
-/**
-* intel_sst_resume - PCI resume function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_resume(struct pci_dev *pci)
-{
- int ret = 0;
-
- pr_debug("intel_sst_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
- sst_drv_ctx = pci_get_drvdata(pci);
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- ret = pci_enable_device(pci);
- if (ret)
- pr_err("device can't be enabled\n");
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-/* The runtime_suspend/resume is pretty much similar to the legacy
- * suspend/resume with the noted exception below:
- * The PCI core takes care of taking the system through D3hot and
- * restoring it back to D0 and so there is no need to duplicate
- * that here.
- */
-static int intel_sst_runtime_suspend(struct device *dev)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_runtime_suspend called\n");
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
-
- /* Only needed by Medfield */
- if (sst_drv_ctx->pci_id != SST_MRST_PCI_ID)
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_resume(struct device *dev)
-{
-
- pr_debug("intel_sst_runtime_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_idle(struct device *dev)
-{
- pr_debug("runtime_idle called\n");
- if (sst_drv_ctx->stream_cnt == 0 && sst_drv_ctx->am_cnt == 0)
- pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
- return -EBUSY;
-}
-
-static const struct dev_pm_ops intel_sst_pm = {
- .runtime_suspend = intel_sst_runtime_suspend,
- .runtime_resume = intel_sst_runtime_resume,
- .runtime_idle = intel_sst_runtime_idle,
-};
-
-/* PCI Routines */
-static struct pci_device_id intel_sst_ids[] = {
- { PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
- { PCI_VDEVICE(INTEL, SST_MFLD_PCI_ID), 6},
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, intel_sst_ids);
-
-static struct pci_driver driver = {
- .name = SST_DRV_NAME,
- .id_table = intel_sst_ids,
- .probe = intel_sst_probe,
- .remove = __devexit_p(intel_sst_remove),
-#ifdef CONFIG_PM
- .suspend = intel_sst_suspend,
- .resume = intel_sst_resume,
- .driver = {
- .pm = &intel_sst_pm,
- },
-#endif
-};
-
-/**
-* intel_sst_init - Module init function
-*
-* Registers with PCI
-* Registers with /dev
-* Init all data strutures
-*/
-static int __init intel_sst_init(void)
-{
- /* Init all variables, data structure etc....*/
- int ret = 0;
- pr_debug("INFO: ******** SST DRIVER loading.. Ver: %s\n",
- SST_DRIVER_VERSION);
-
- mutex_init(&drv_ctx_lock);
- /* Register with PCI */
- ret = pci_register_driver(&driver);
- if (ret)
- pr_err("PCI register failed\n");
- return ret;
-}
-
-/**
-* intel_sst_exit - Module exit function
-*
-* Unregisters with PCI
-* Unregisters with /dev
-* Frees all data strutures
-*/
-static void __exit intel_sst_exit(void)
-{
- pci_unregister_driver(&driver);
-
- pr_debug("driver unloaded\n");
- sst_drv_ctx = NULL;
- return;
-}
-
-module_init(intel_sst_init);
-module_exit(intel_sst_exit);
diff --git a/drivers/staging/intel_sst/intel_sst.h b/drivers/staging/intel_sst/intel_sst.h
deleted file mode 100644
index 4ad2829105a..00000000000
--- a/drivers/staging/intel_sst/intel_sst.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __INTEL_SST_H__
-#define __INTEL_SST_H__
-/*
- * intel_sst.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * This file is shared between the SST and MAD drivers
- */
-#include "intel_sst_ioctl.h"
-#include <sound/jack.h>
-
-#define SST_CARD_NAMES "intel_mid_card"
-
-#define MFLD_MAX_HW_CH 4
-/* control list Pmic & Lpe */
-/* Input controls */
-enum port_status {
- ACTIVATE = 1,
- DEACTIVATE,
-};
-
-/* Card states */
-enum sst_card_states {
- SND_CARD_UN_INIT = 0,
- SND_CARD_INIT_DONE,
-};
-
-enum sst_controls {
- SST_SND_ALLOC = 0x1000,
- SST_SND_PAUSE = 0x1001,
- SST_SND_RESUME = 0x1002,
- SST_SND_DROP = 0x1003,
- SST_SND_FREE = 0x1004,
- SST_SND_BUFFER_POINTER = 0x1005,
- SST_SND_STREAM_INIT = 0x1006,
- SST_SND_START = 0x1007,
- SST_SND_STREAM_PROCESS = 0x1008,
- SST_MAX_CONTROLS = 0x1008,
- SST_CONTROL_BASE = 0x1000,
- SST_ENABLE_RX_TIME_SLOT = 0x1009,
-};
-
-enum SND_CARDS {
- SND_FS = 0,
- SND_MX,
- SND_NC,
- SND_MSIC
-};
-
-struct pcm_stream_info {
- int str_id;
- void *mad_substream;
- void (*period_elapsed) (void *mad_substream);
- unsigned long long buffer_ptr;
- int sfreq;
-};
-
-struct snd_pmic_ops {
- int card_status;
- int master_mute;
- int num_channel;
- int input_dev_id;
- int mute_status;
- struct mutex lock;
- int pb_on, pbhs_on;
- int cap_on;
- int output_dev_id;
- int lineout_dev_id, line_out_names_cnt;
- int prev_lineout_dev_id;
- bool jack_interrupt_status;
- int (*set_input_dev) (u8 value);
- int (*set_output_dev) (u8 value);
- int (*set_lineout_dev) (u8 value);
- int (*set_mute) (int dev_id, u8 value);
- int (*get_mute) (int dev_id, u8 *value);
-
- int (*set_vol) (int dev_id, int value);
- int (*get_vol) (int dev_id, int *value);
-
- int (*init_card) (void);
- int (*set_pcm_audio_params)
- (int sfreq, int word_size , int num_channel);
- int (*set_pcm_voice_params) (void);
- int (*set_voice_port) (int status);
- int (*set_audio_port) (int status);
-
- int (*power_up_pmic_pb) (unsigned int port);
- int (*power_up_pmic_cp) (unsigned int port);
- int (*power_down_pmic_pb) (unsigned int device);
- int (*power_down_pmic_cp) (unsigned int device);
- int (*power_down_pmic) (void);
- void (*pmic_irq_cb) (void *cb_data, u8 value);
- void (*pmic_irq_enable)(void *data);
- int (*pmic_jack_enable) (void);
- int (*pmic_get_mic_bias)(void *intelmaddata);
- int (*pmic_set_headset_state)(int state);
-
- unsigned int hw_dmic_map[MFLD_MAX_HW_CH];
- unsigned int available_dmics;
- int (*set_hw_dmic_route) (u8 index);
-
- int gpio_amp;
-};
-
-extern void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent,
- int status);
-
-
-int intemad_set_headset_state(int state);
-int intelmad_get_mic_bias(void);
-
-struct intel_sst_pcm_control {
- int (*open) (struct snd_sst_params *str_param);
- int (*device_control) (int cmd, void *arg);
- int (*close) (unsigned int str_id);
-};
-struct intel_sst_card_ops {
- char *module_name;
- unsigned int vendor_id;
- struct intel_sst_pcm_control *pcm_control;
- struct snd_pmic_ops *scard_ops;
-};
-
-/* modified for generic access */
-struct sc_reg_access {
- u16 reg_addr;
- u8 value;
- u8 mask;
-};
-enum sc_reg_access_type {
- PMIC_READ = 0,
- PMIC_WRITE,
- PMIC_READ_MODIFY,
-};
-
-int register_sst_card(struct intel_sst_card_ops *card);
-void unregister_sst_card(struct intel_sst_card_ops *card);
-#endif /* __INTEL_SST_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
deleted file mode 100644
index 93b41a284d8..00000000000
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ /dev/null
@@ -1,1460 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * Jeeja KP <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/uio.h>
-#include <linux/aio.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/ioctl.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../../../drivers/staging/memrar/memrar.h"
-#endif
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-#define AM_MODULE 1
-#define STREAM_MODULE 0
-
-
-/**
-* intel_sst_check_device - checks SST device
-*
-* This utility function checks the state of SST device and downlaods FW if
-* not done, or resumes the device if suspended
-*/
-
-static int intel_sst_check_device(void)
-{
- int retval = 0;
- if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
- pr_warn("Sound card not available\n");
- return -EIO;
- }
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_debug("Resume Failed= %#x,abort\n", retval);
- return retval;
- }
- }
-
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- retval = sst_download_fw();
- if (retval)
- return -ENODEV;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- retval = sst_drv_ctx->rx_time_slot_status;
- if (retval != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(retval);
- }
- }
- return 0;
-}
-
-/**
- * intel_sst_open - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle. Only one handle at a time
- * will be allowed
- */
-int intel_sst_open(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
- struct ioctl_pvt_data *data =
- kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
- if (!data) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return -ENOMEM;
- }
-
- sst_drv_ctx->encoded_cnt++;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- data->str_id = 0;
- file_ptr->private_data = (void *)data;
- pr_debug("pvt_id handle = %d!\n", data->pvt_id);
- } else {
- retval = -EUSERS;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- return retval;
-}
-
-/**
- * intel_sst_open_cntrl - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle to /dev/intel_sst_control.
- * Only one handle at a time will be allowed
- * This is for control operations only
- */
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- /* audio manager open */
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
- sst_drv_ctx->am_cnt++;
- pr_debug("AM handle opened...\n");
- file_ptr->private_data = NULL;
- } else {
- retval = -EACCES;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- }
-
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
-}
-
-/**
- * intel_sst_release - releases a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr: pointer to file
- *
- * This function is called by OS when a user space component
- * tries to release a driver handle.
- */
-int intel_sst_release(struct inode *i_node, struct file *file_ptr)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
-
- pr_debug("Release called, closing app handle\n");
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->encoded_cnt--;
- sst_drv_ctx->stream_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- free_stream_context(data->str_id);
- kfree(data);
- return 0;
-}
-
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- /* audio manager close */
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->am_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("AM handle closed\n");
- return 0;
-}
-
-/**
-* intel_sst_mmap - mmaps a kernel buffer to user space for copying data
-*
-* @vma: vm area structure instance
-* @file_ptr: pointer to file
-*
-* This function is called by OS when a user space component
-* tries to get mmap memory from driver
-*/
-int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
-{
- int retval, length;
- struct ioctl_pvt_data *data =
- (struct ioctl_pvt_data *)file_ptr->private_data;
- int str_id = data->str_id;
- void *mem_area;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- length = vma->vm_end - vma->vm_start;
- pr_debug("called for stream %d length 0x%x\n", str_id, length);
-
- if (length > sst_drv_ctx->mmap_len)
- return -ENOMEM;
- if (!sst_drv_ctx->mmap_mem)
- return -EIO;
-
- /* round it up to the page boundary */
- /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem)
- + PAGE_SIZE - 1) & PAGE_MASK);*/
- mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem);
-
- /* map the whole physically contiguous area in one piece */
- retval = remap_pfn_range(vma,
- vma->vm_start,
- virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
- length,
- vma->vm_page_prot);
- if (retval)
- sst_drv_ctx->streams[str_id].mmapped = false;
- else
- sst_drv_ctx->streams[str_id].mmapped = true;
-
- pr_debug("mmap ret 0x%x\n", retval);
- return retval;
-}
-
-/* sets mmap data buffers to play/capture*/
-static int intel_sst_mmap_play_capture(u32 str_id,
- struct snd_sst_mmap_buffs *mmap_buf)
-{
- struct sst_stream_bufs *bufs;
- int retval, i;
- struct stream_info *stream;
- struct snd_sst_mmap_buff_entry *buf_entry;
- struct snd_sst_mmap_buff_entry *tmp_buf;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped != true)
- return -EIO;
-
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
- if (!tmp_buf)
- return -ENOMEM;
- if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
- mmap_buf->entries * sizeof(*tmp_buf))) {
- retval = -EFAULT;
- goto out_free;
- }
-
- pr_debug("new buffers count %d status %d\n",
- mmap_buf->entries, stream->status);
- buf_entry = tmp_buf;
- for (i = 0; i < mmap_buf->entries; i++) {
- bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
- if (!bufs) {
- retval = -ENOMEM;
- goto out_free;
- }
- bufs->size = buf_entry->size;
- bufs->offset = buf_entry->offset;
- bufs->addr = sst_drv_ctx->mmap_mem;
- bufs->in_use = false;
- buf_entry++;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
-
- mutex_lock(&stream->lock);
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- if (stream->status == STREAM_INIT &&
- stream->prev != STREAM_UN_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frame fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- }
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
- if (!list_empty(&stream->bufs)) {
- stream->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx,
- &stream->data_blk);
- }
-
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec ioctl bytes = %d!!\n", retval);
-
-out_free:
- kfree(tmp_buf);
- return retval;
-}
-
-/*sets user data buffers to play/capture*/
-static int intel_sst_play_capture(struct stream_info *stream, int str_id)
-{
- int retval;
-
- stream->data_blk.ret_code = 0;
- stream->data_blk.on = true;
- stream->data_blk.condition = false;
-
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) {
- /* stream is started */
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- }
-
- if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
- /* stream is not started yet */
- pr_debug("Stream isn't in started state %d, prev %d\n",
- stream->status, stream->prev);
- } else if ((stream->status == STREAM_RUNNING ||
- stream->status == STREAM_PAUSED) &&
- stream->need_draining != true) {
- /* stream is started */
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- }
- } else {
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
-
- retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
- if (retval) {
- stream->status = STREAM_INIT;
- pr_debug("wait returned error...\n");
- }
- return retval;
-}
-
-/* fills kernel list with buffer addresses for SST DSP driver to process*/
-static int snd_sst_fill_kernel_list(struct stream_info *stream,
- const struct iovec *iovec, unsigned long nr_segs,
- struct list_head *copy_to_list)
-{
- struct sst_stream_bufs *stream_bufs;
- unsigned long index, mmap_len;
- unsigned char __user *bufp;
- unsigned long size, copied_size;
- int retval = 0, add_to_list = 0;
- static int sent_offset;
- static unsigned long sent_index;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- for (index = stream->sg_index; index < nr_segs; index++) {
- __u32 rar_handle;
- struct sst_stream_bufs *stream_bufs =
- kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
-
- stream->sg_index = index;
- if (!stream_bufs)
- return -ENOMEM;
- if (copy_from_user((void *) &rar_handle,
- iovec[index].iov_base,
- sizeof(__u32))) {
- kfree(stream_bufs);
- return -EFAULT;
- }
- stream_bufs->addr = (char *)rar_handle;
- stream_bufs->in_use = false;
- stream_bufs->size = iovec[0].iov_len;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
- stream->sg_index = index;
- return retval;
- }
-#endif
- stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
- if (!stream_bufs)
- return -ENOMEM;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- mmap_len = sst_drv_ctx->mmap_len;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- bufp = stream->cur_ptr;
-
- copied_size = 0;
-
- if (!stream->sg_index)
- sent_index = sent_offset = 0;
-
- for (index = stream->sg_index; index < nr_segs; index++) {
- stream->sg_index = index;
- if (!stream->cur_ptr)
- bufp = iovec[index].iov_base;
-
- size = ((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) - (unsigned long) bufp;
-
- if ((copied_size + size) > mmap_len)
- size = mmap_len - copied_size;
-
-
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (copy_from_user((void *)
- (stream_bufs->addr + copied_size),
- bufp, size)) {
- /* Clean up the list and return error code */
- retval = -EFAULT;
- break;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- struct snd_sst_user_cap_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry) {
- kfree(stream_bufs);
- return -ENOMEM;
- }
- entry->iov_index = index;
- entry->iov_offset = (unsigned long) bufp -
- (unsigned long)iovec[index].iov_base;
- entry->offset = copied_size;
- entry->size = size;
- list_add_tail(&entry->node, copy_to_list);
- }
-
- stream->cur_ptr = bufp + size;
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) <
- ((unsigned long)iovec[index].iov_base)) {
- pr_debug("Buffer overflows\n");
- kfree(stream_bufs);
- return -EINVAL;
- }
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) ==
- (unsigned long)stream->cur_ptr) {
- stream->cur_ptr = NULL;
- stream->sg_index++;
- }
-
- copied_size += size;
- pr_debug("copied_size - %lx\n", copied_size);
- if ((copied_size >= mmap_len) ||
- (stream->sg_index == nr_segs)) {
- add_to_list = 1;
- }
-
- if (add_to_list) {
- stream_bufs->in_use = false;
- stream_bufs->size = copied_size;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- break;
- }
- }
- return retval;
-}
-
-/* This function copies the captured data returned from SST DSP engine
- * to the user buffers*/
-static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
- const struct iovec *iovec,
- struct list_head *copy_to_list)
-{
- struct snd_sst_user_cap_list *entry, *_entry;
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- int retval = 0;
-
- /* copy sent buffers */
- pr_debug("capture stream copying to user now...\n");
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- /* copy to user */
- list_for_each_entry_safe(entry, _entry,
- copy_to_list, node) {
- if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
- kbufs->addr + entry->offset,
- entry->size)) {
- /* Clean up the list and return error */
- retval = -EFAULT;
- break;
- }
- list_del(&entry->node);
- kfree(entry);
- }
- }
- }
- pr_debug("end of cap copy\n");
- return retval;
-}
-
-/*
- * snd_sst_userbufs_play_cap - constructs the list from user buffers
- *
- * @iovec:pointer to iovec structure
- * @nr_segs:number entries in the iovec structure
- * @str_id:stream id
- * @stream:pointer to stream_info structure
- *
- * This function will traverse the user list and copy the data to the kernel
- * space buffers.
- */
-static int snd_sst_userbufs_play_cap(const struct iovec *iovec,
- unsigned long nr_segs, unsigned int str_id,
- struct stream_info *stream)
-{
- int retval;
- LIST_HEAD(copy_to_list);
-
-
- retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs,
- &copy_to_list);
-
- retval = intel_sst_play_capture(stream, str_id);
- if (retval < 0)
- return retval;
-
- if (stream->ops == STREAM_OPS_CAPTURE) {
- retval = snd_sst_copy_userbuf_capture(stream, iovec,
- &copy_to_list);
- }
- return retval;
-}
-
-/* This function is common function across read/write
- for user buffers called from system calls*/
-static int intel_sst_read_write(unsigned int str_id, char __user *buf,
- size_t count)
-{
- int retval;
- struct stream_info *stream;
- struct iovec iovec;
- unsigned long nr_segs;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true) {
- pr_warn("user write and stream is mapped\n");
- return -EIO;
- }
- if (!count)
- return -EINVAL;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- /* copy user buf details */
- pr_debug("new buffers %p, copy size %d, status %d\n" ,
- buf, (int) count, (int) stream->status);
-
- stream->buf_type = SST_BUF_USER_STATIC;
- iovec.iov_base = buf;
- iovec.iov_len = count;
- nr_segs = 1;
-
- do {
- retval = snd_sst_userbufs_play_cap(
- &iovec, nr_segs, str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/***
- * intel_sst_write - This function is called when user tries to play out data
- *
- * @file_ptr:pointer to file
- * @buf:user buffer to be played out
- * @count:size of tthe buffer
- * @offset:offset to start from
- *
- * writes the encoded data into DSP
- */
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- return intel_sst_read_write(str_id, (char __user *)buf, count);
-}
-
-/*
- * intel_sst_aio_write - write buffers
- *
- * @kiocb:pointer to a structure containing file pointer
- * @iov:list of user buffer to be played out
- * @nr_segs:number of entries
- * @offset:offset to start from
- *
- * This function is called when user tries to play out multiple data buffers
- */
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false)
- return -EINVAL;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/*
- * intel_sst_read - read the encoded data
- *
- * @file_ptr: pointer to file
- * @buf: user buffer to be filled with captured data
- * @count: size of tthe buffer
- * @offset: offset to start from
- *
- * This function is called when user tries to capture data
- */
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- return intel_sst_read_write(str_id, buf, count);
-}
-
-/*
- * intel_sst_aio_read - aio read
- *
- * @kiocb: pointer to a structure containing file pointer
- * @iov: list of user buffer to be filled with captured
- * @nr_segs: number of entries
- * @offset: offset to start from
- *
- * This function is called when user tries to capture out multiple data buffers
- */
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false) {
- pr_debug("aio_read from user space is not allowed\n");
- return -EINVAL;
- }
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/* sst_print_stream_params - prints the stream parameters (debug fn)*/
-static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
-{
- pr_debug("codec params:result = %d\n",
- get_prm->codec_params.result);
- pr_debug("codec params:stream = %d\n",
- get_prm->codec_params.stream_id);
- pr_debug("codec params:codec = %d\n",
- get_prm->codec_params.codec);
- pr_debug("codec params:ops = %d\n",
- get_prm->codec_params.ops);
- pr_debug("codec params:stream_type = %d\n",
- get_prm->codec_params.stream_type);
- pr_debug("pcmparams:sfreq = %d\n",
- get_prm->pcm_params.sfreq);
- pr_debug("pcmparams:num_chan = %d\n",
- get_prm->pcm_params.num_chan);
- pr_debug("pcmparams:pcm_wd_sz = %d\n",
- get_prm->pcm_params.pcm_wd_sz);
- return;
-}
-
-/**
- * sst_create_algo_ipc - create ipc msg for algorithm parameters
- *
- * @algo_params: Algorithm parameters
- * @msg: post msg pointer
- *
- * This function is called to create ipc msg
- */
-int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
- struct ipc_post **msg)
-{
- if (sst_create_large_msg(msg))
- return -ENOMEM;
- sst_fill_header(&(*msg)->header,
- IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
- (*msg)->header.part.data = sizeof(u32) +
- sizeof(*algo_params) + algo_params->size;
- memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
- memcpy((*msg)->mailbox_data + sizeof(u32),
- algo_params, sizeof(*algo_params));
- return 0;
-}
-
-/**
- * sst_send_algo_ipc - send ipc msg for algorithm parameters
- *
- * @msg: post msg pointer
- *
- * This function is called to send ipc msg
- */
-int sst_send_algo_ipc(struct ipc_post **msg)
-{
- sst_drv_ctx->ppp_params_blk.condition = false;
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.on = true;
- sst_drv_ctx->ppp_params_blk.data = NULL;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT);
-}
-
-/**
- * intel_sst_ioctl_dsp - receives the device ioctl's
- *
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called when a user space component
- * sends a DSP Ioctl to SST driver
- */
-long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct snd_ppp_params algo_params;
- struct snd_ppp_params *algo_params_copied;
- struct ipc_post *msg;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- if (algo_params.size > SST_MAILBOX_SIZE)
- return -EMSGSIZE;
-
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 0;
- if (copy_from_user(msg->mailbox_data + sizeof(algo_params),
- algo_params.params, algo_params.size))
- return -EFAULT;
-
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_set_algo = %d\n", retval);
- retval = -EIO;
- }
- break;
-
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 1;
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_get_algo = %d\n", retval);
- retval = -EIO;
- break;
- }
- algo_params_copied = (struct snd_ppp_params *)
- sst_drv_ctx->ppp_params_blk.data;
- if (algo_params_copied->size > algo_params.size) {
- pr_debug("mem insufficient to copy\n");
- retval = -EMSGSIZE;
- goto free_mem;
- } else {
- char __user *tmp;
-
- if (copy_to_user(algo_params.params,
- algo_params_copied->params,
- algo_params_copied->size)) {
- retval = -EFAULT;
- goto free_mem;
- }
- tmp = (char __user *)arg + offsetof(
- struct snd_ppp_params, size);
- if (copy_to_user(tmp, &algo_params_copied->size,
- sizeof(__u32))) {
- retval = -EFAULT;
- goto free_mem;
- }
-
- }
-free_mem:
- kfree(algo_params_copied->params);
- kfree(algo_params_copied);
- break;
- }
- return retval;
-}
-
-
-int sst_ioctl_tuning_params(unsigned long arg)
-{
- struct snd_sst_tuning_params params;
- struct ipc_post *msg;
-
- if (copy_from_user(&params, (void __user *)arg, sizeof(params)))
- return -EFAULT;
- if (params.size > SST_MAILBOX_SIZE)
- return -ENOMEM;
- pr_debug("Parameter %d, Stream %d, Size %d\n", params.type,
- params.str_id, params.size);
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1, params.str_id);
- msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
- memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params));
- if (copy_from_user(msg->mailbox_data + sizeof(params),
- (void __user *)(unsigned long)params.addr,
- params.size)) {
- kfree(msg->mailbox_data);
- kfree(msg);
- return -EFAULT;
- }
- return sst_send_algo_ipc(&msg);
-}
-/**
- * intel_sst_ioctl - receives the device ioctl's
- * @file_ptr:pointer to file
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called by OS when a user space component
- * sends an Ioctl to SST driver
- */
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct ioctl_pvt_data *data = NULL;
- int str_id = 0, minor = 0;
-
- data = file_ptr->private_data;
- if (data) {
- minor = 0;
- str_id = data->str_id;
- } else
- minor = 1;
-
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
- return -EBUSY;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
- pr_debug("IOCTL_PAUSE received for %d!\n", str_id);
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_pause_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_RESUME):
- pr_debug("SNDRV_SST_IOCTL_RESUME received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_resume_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
- struct snd_sst_params str_param;
-
- pr_debug("IOCTL_SET_PARAMS received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
-
- if (copy_from_user(&str_param, (void __user *)arg,
- sizeof(str_param))) {
- retval = -EFAULT;
- break;
- }
-
- if (!str_id) {
-
- retval = sst_get_stream(&str_param);
- if (retval > 0) {
- struct stream_info *str_info;
- char __user *dest;
-
- sst_drv_ctx->stream_cnt++;
- data->str_id = retval;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = SST_DRV;
- dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
- retval = copy_to_user(dest, &retval, sizeof(__u32));
- if (retval)
- retval = -EFAULT;
- } else {
- if (retval == -SST_ERR_INVALID_PARAMS)
- retval = -EINVAL;
- }
- } else {
- pr_debug("SET_STREAM_PARAMS received!\n");
- /* allocated set params only */
- retval = sst_set_stream_param(str_id, &str_param);
- /* Block the call for reply */
- if (!retval) {
- int sfreq = 0, word_size = 0, num_channel = 0;
- sfreq = str_param.sparams.uc.pcm_params.sfreq;
- word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
- num_channel = str_param.sparams.uc.pcm_params.num_chan;
- if (str_param.ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->scard_ops->\
- set_pcm_audio_params(sfreq,
- word_size, num_channel);
- }
- }
- }
- break;
- }
- case _IOC_NR(SNDRV_SST_SET_VOL): {
- struct snd_sst_vol set_vol;
-
- if (copy_from_user(&set_vol, (void __user *)arg,
- sizeof(set_vol))) {
- pr_debug("copy failed\n");
- retval = -EFAULT;
- break;
- }
- pr_debug("SET_VOLUME received for %d!\n",
- set_vol.stream_id);
- if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_set_vol(&set_vol);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_VOL): {
- struct snd_sst_vol get_vol;
-
- if (copy_from_user(&get_vol, (void __user *)arg,
- sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("IOCTL_GET_VOLUME received for stream = %d!\n",
- get_vol.stream_id);
- if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_get_vol(&get_vol);
- if (retval) {
- retval = -EIO;
- break;
- }
- pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
- get_vol.stream_id, get_vol.volume,
- get_vol.ramp_duration, get_vol.ramp_type);
- if (copy_to_user((struct snd_sst_vol __user *)arg,
- &get_vol, sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- /*sst_print_get_vol_info(str_id, &get_vol);*/
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MUTE): {
- struct snd_sst_mute set_mute;
-
- if (copy_from_user(&set_mute, (void __user *)arg,
- sizeof(set_mute))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("SNDRV_SST_SET_VOLUME received for %d!\n",
- set_mute.stream_id);
- if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
- retval = -EPERM;
- break;
- }
- retval = sst_set_mute(&set_mute);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
- struct snd_sst_get_stream_params get_params;
-
- pr_debug("IOCTL_GET_PARAMS received!\n");
- if (minor != 0) {
- retval = -EBADRQC;
- break;
- }
-
- retval = sst_get_stream_params(str_id, &get_params);
- if (retval) {
- retval = -EIO;
- break;
- }
- if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
- &get_params, sizeof(get_params))) {
- retval = -EFAULT;
- break;
- }
- sst_print_stream_params(&get_params);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MMAP_PLAY):
- case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
- struct snd_sst_mmap_buffs mmap_buf;
-
- pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&mmap_buf, (void __user *)arg,
- sizeof(mmap_buf))) {
- retval = -EFAULT;
- break;
- }
- retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_DROP):
- pr_debug("SNDRV_SST_IOCTL_DROP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drop_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
- struct snd_sst_tstamp tstamp = {0};
- unsigned long long time, freq, mod;
-
- pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- time = tstamp.samples_rendered;
- freq = (unsigned long long) tstamp.sampling_frequency;
- time = time * 1000; /* converting it to ms */
- mod = do_div(time, freq);
- if (copy_to_user((void __user *)arg, &time,
- sizeof(unsigned long long)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_START):{
- struct stream_info *stream;
-
- pr_debug("SNDRV_SST_STREAM_START received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- retval = sst_play_frame(str_id);
- } else if (stream->ops == STREAM_OPS_CAPTURE)
- retval = sst_capture_frame(str_id);
- else {
- retval = -EINVAL;
- mutex_unlock(&stream->lock);
- break;
- }
- if (retval < 0) {
- stream->status = STREAM_INIT;
- mutex_unlock(&stream->lock);
- break;
- }
- } else {
- retval = -EINVAL;
- }
- mutex_unlock(&stream->lock);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
- struct snd_sst_target_device target_device;
-
- pr_debug("SET_TARGET_DEVICE received!\n");
- if (copy_from_user(&target_device, (void __user *)arg,
- sizeof(target_device))) {
- retval = -EFAULT;
- break;
- }
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_target_device_select(&target_device);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
- struct snd_sst_driver_info info;
-
- pr_debug("SNDRV_SST_DRIVER_INFO received\n");
- info.version = SST_VERSION_NUM;
- /* hard coding, shud get sumhow later */
- info.active_pcm_streams = sst_drv_ctx->stream_cnt -
- sst_drv_ctx->encoded_cnt;
- info.active_enc_streams = sst_drv_ctx->encoded_cnt;
- info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
- info.max_enc_streams = MAX_ENC_STREAM;
- info.buf_per_stream = sst_drv_ctx->mmap_len;
- if (copy_to_user((void __user *)arg, &info,
- sizeof(info)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
- struct snd_sst_dbufs param;
- struct snd_sst_dbufs dbufs_local;
- struct snd_sst_buffs ibufs, obufs;
- struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
- char __user *dest;
-
- pr_debug("SNDRV_SST_STREAM_DECODE received\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&param, (void __user *)arg,
- sizeof(param))) {
- retval = -EFAULT;
- break;
- }
-
- dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
- dbufs_local.output_bytes_produced =
- param.output_bytes_produced;
-
- if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
- retval = -EFAULT;
- break;
- }
- if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
- retval = -EFAULT;
- break;
- }
-
- ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
- obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
- if (!ibuf_tmp || !obuf_tmp) {
- retval = -ENOMEM;
- goto free_iobufs;
- }
-
- if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
- ibufs.entries * sizeof(*ibuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- ibufs.buff_entry = ibuf_tmp;
- dbufs_local.ibufs = &ibufs;
-
- if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
- obufs.entries * sizeof(*obuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- obufs.buff_entry = obuf_tmp;
- dbufs_local.obufs = &obufs;
-
- retval = sst_decode(str_id, &dbufs_local);
- if (retval) {
- retval = -EAGAIN;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.input_bytes_consumed,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.output_bytes_produced,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-free_iobufs:
- kfree(ibuf_tmp);
- kfree(obuf_tmp);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
- pr_debug("SNDRV_SST_STREAM_DRAIN received\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drain_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
- unsigned long long __user *bytes = (unsigned long long __user *)arg;
- struct snd_sst_tstamp tstamp = {0};
-
- pr_debug("STREAM_BYTES_DECODED received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- if (copy_to_user(bytes, &tstamp.bytes_processed,
- sizeof(*bytes)))
- retval = -EFAULT;
- break;
- }
- case _IOC_NR(SNDRV_SST_FW_INFO): {
- struct snd_sst_fw_info *fw_info;
-
- pr_debug("SNDRV_SST_FW_INFO received\n");
-
- fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
- if (!fw_info) {
- retval = -ENOMEM;
- break;
- }
- retval = sst_get_fw_info(fw_info);
- if (retval) {
- retval = -EIO;
- kfree(fw_info);
- break;
- }
- if (copy_to_user((struct snd_sst_dbufs __user *)arg,
- fw_info, sizeof(*fw_info))) {
- kfree(fw_info);
- retval = -EFAULT;
- break;
- }
- /*sst_print_fw_info(fw_info);*/
- kfree(fw_info);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = intel_sst_ioctl_dsp(cmd, arg);
- break;
-
- case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_ioctl_tuning_params(arg);
- break;
-
- default:
- retval = -EINVAL;
- }
- pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
deleted file mode 100644
index 870981ba3c9..00000000000
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ /dev/null
@@ -1,623 +0,0 @@
-#ifndef __INTEL_SST_COMMON_H__
-#define __INTEL_SST_COMMON_H__
-/*
- * intel_sst_common.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private declarations for SST
- */
-
-#define SST_DRIVER_VERSION "1.2.17"
-#define SST_VERSION_NUM 0x1217
-
-/* driver names */
-#define SST_DRV_NAME "intel_sst_driver"
-#define SST_MRST_PCI_ID 0x080A
-#define SST_MFLD_PCI_ID 0x082F
-#define PCI_ID_LENGTH 4
-#define SST_SUSPEND_DELAY 2000
-#define FW_CONTEXT_MEM (64*1024)
-
-enum sst_states {
- SST_FW_LOADED = 1,
- SST_FW_RUNNING,
- SST_UN_INIT,
- SST_ERROR,
- SST_SUSPENDED
-};
-
-#define MAX_ACTIVE_STREAM 3
-#define MAX_ENC_STREAM 1
-#define MAX_AM_HANDLES 1
-#define ALLOC_TIMEOUT 5000
-/* SST numbers */
-#define SST_BLOCK_TIMEOUT 5000
-#define TARGET_DEV_BLOCK_TIMEOUT 5000
-
-#define BLOCK_UNINIT -1
-#define RX_TIMESLOT_UNINIT -1
-
-/* SST register map */
-#define SST_CSR 0x00
-#define SST_PISR 0x08
-#define SST_PIMR 0x10
-#define SST_ISRX 0x18
-#define SST_IMRX 0x28
-#define SST_IPCX 0x38 /* IPC IA-SST */
-#define SST_IPCD 0x40 /* IPC SST-IA */
-#define SST_ISRD 0x20 /* dummy register for shim workaround */
-#define SST_SHIM_SIZE 0X44
-
-#define SPI_MODE_ENABLE_BASE_ADDR 0xffae4000
-#define FW_SIGNATURE_SIZE 4
-
-/* PMIC and SST hardware states */
-enum sst_mad_states {
- SND_MAD_UN_INIT = 0,
- SND_MAD_INIT_DONE,
-};
-
-/* stream states */
-enum sst_stream_states {
- STREAM_UN_INIT = 0, /* Freed/Not used stream */
- STREAM_RUNNING = 1, /* Running */
- STREAM_PAUSED = 2, /* Paused stream */
- STREAM_DECODE = 3, /* stream is in decoding only state */
- STREAM_INIT = 4, /* stream init, waiting for data */
-};
-
-
-enum sst_ram_type {
- SST_IRAM = 1,
- SST_DRAM = 2,
-};
-/* SST shim registers to structure mapping */
-union config_status_reg {
- struct {
- u32 mfld_strb:1;
- u32 sst_reset:1;
- u32 hw_rsvd:3;
- u32 sst_clk:2;
- u32 bypass:3;
- u32 run_stall:1;
- u32 rsvd1:2;
- u32 strb_cntr_rst:1;
- u32 rsvd:18;
- } part;
- u32 full;
-};
-
-union interrupt_reg {
- struct {
- u32 done_interrupt:1;
- u32 busy_interrupt:1;
- u32 rsvd:30;
- } part;
- u32 full;
-};
-
-union sst_pisr_reg {
- struct {
- u32 pssp0:1;
- u32 pssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:26;
- } part;
- u32 full;
-};
-
-union sst_pimr_reg {
- struct {
- u32 ssp0:1;
- u32 ssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:10;
- u32 ssp0_sc:1;
- u32 ssp1_sc:1;
- u32 rsvd2:3;
- u32 dmac_sc:1;
- u32 rsvd3:10;
- } part;
- u32 full;
-};
-
-
-struct sst_stream_bufs {
- struct list_head node;
- u32 size;
- const char *addr;
- u32 data_copied;
- bool in_use;
- u32 offset;
-};
-
-struct snd_sst_user_cap_list {
- unsigned int iov_index; /* index of iov */
- unsigned long iov_offset; /* offset in iov */
- unsigned long offset; /* offset in kmem */
- unsigned long size; /* size copied */
- struct list_head node;
-};
-/*
-This structure is used to block a user/fw data call to another
-fw/user call
-*/
-struct sst_block {
- bool condition; /* condition for blocking check */
- int ret_code; /* ret code when block is released */
- void *data; /* data to be appsed for block if any */
- bool on;
-};
-
-enum snd_sst_buf_type {
- SST_BUF_USER_STATIC = 1,
- SST_BUF_USER_DYNAMIC,
- SST_BUF_MMAP_STATIC,
- SST_BUF_MMAP_DYNAMIC,
-};
-
-enum snd_src {
- SST_DRV = 1,
- MAD_DRV = 2
-};
-
-/**
- * struct stream_info - structure that holds the stream information
- *
- * @status : stream current state
- * @prev : stream prev state
- * @codec : stream codec
- * @sst_id : stream id
- * @ops : stream operation pb/cp/drm...
- * @bufs: stream buffer list
- * @lock : stream mutex for protecting state
- * @pcm_lock : spinlock for pcm path only
- * @mmapped : is stream mmapped
- * @sg_index : current stream user buffer index
- * @cur_ptr : stream user buffer pointer
- * @buf_entry : current user buffer
- * @data_blk : stream block for data operations
- * @ctrl_blk : stream block for ctrl operations
- * @buf_type : stream user buffer type
- * @pcm_substream : PCM substream
- * @period_elapsed : PCM period elapsed callback
- * @sfreq : stream sampling freq
- * @decode_ibuf : Decoded i/p buffers pointer
- * @decode_obuf : Decoded o/p buffers pointer
- * @decode_isize : Decoded i/p buffers size
- * @decode_osize : Decoded o/p buffers size
- * @decode_ibuf_type : Decoded i/p buffer type
- * @decode_obuf_type : Decoded o/p buffer type
- * @idecode_alloc : Decode alloc index
- * @need_draining : stream set for drain
- * @str_type : stream type
- * @curr_bytes : current bytes decoded
- * @cumm_bytes : cummulative bytes decoded
- * @str_type : stream type
- * @src : stream source
- * @device : output device type (medfield only)
- * @pcm_slot : pcm slot value
- */
-struct stream_info {
- unsigned int status;
- unsigned int prev;
- u8 codec;
- unsigned int sst_id;
- unsigned int ops;
- struct list_head bufs;
- struct mutex lock; /* mutex */
- spinlock_t pcm_lock;
- bool mmapped;
- unsigned int sg_index; /* current buf Index */
- unsigned char __user *cur_ptr; /* Current static bufs */
- struct snd_sst_buf_entry __user *buf_entry;
- struct sst_block data_blk; /* stream ops block */
- struct sst_block ctrl_blk; /* stream control cmd block */
- enum snd_sst_buf_type buf_type;
- void *pcm_substream;
- void (*period_elapsed) (void *pcm_substream);
- unsigned int sfreq;
- void *decode_ibuf, *decode_obuf;
- unsigned int decode_isize, decode_osize;
- u8 decode_ibuf_type, decode_obuf_type;
- unsigned int idecode_alloc;
- unsigned int need_draining;
- unsigned int str_type;
- u32 curr_bytes;
- u32 cumm_bytes;
- u32 src;
- enum snd_sst_audio_device_type device;
- u8 pcm_slot;
-};
-
-/*
- * struct stream_alloc_bloc - this structure is used for blocking the user's
- * alloc calls to fw's response to alloc calls
- *
- * @sst_id : session id of blocked stream
- * @ops_block : ops block struture
- */
-struct stream_alloc_block {
- int sst_id; /* session id of blocked stream */
- struct sst_block ops_block; /* ops block struture */
-};
-
-#define SST_FW_SIGN "$SST"
-#define SST_FW_LIB_SIGN "$LIB"
-
-/*
- * struct fw_header - FW file headers
- *
- * @signature : FW signature
- * @modules : # of modules
- * @file_format : version of header format
- * @reserved : reserved fields
- */
-struct fw_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
- u32 file_size; /* size of fw minus this header */
- u32 modules; /* # of modules */
- u32 file_format; /* version of header format */
- u32 reserved[4];
-};
-
-struct fw_module_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
- u32 mod_size; /* size of module */
- u32 blocks; /* # of blocks */
- u32 type; /* codec type, pp lib */
- u32 entry_point;
-};
-
-struct dma_block_info {
- enum sst_ram_type type; /* IRAM/DRAM */
- u32 size; /* Bytes */
- u32 ram_offset; /* Offset in I/DRAM */
- u32 rsvd; /* Reserved field */
-};
-
-struct ioctl_pvt_data {
- int str_id;
- int pvt_id;
-};
-
-struct sst_ipc_msg_wq {
- union ipc_header header;
- char mailbox[SST_MAILBOX_SIZE];
- struct work_struct wq;
-};
-
-struct mad_ops_wq {
- int stream_id;
- enum sst_controls control_op;
- struct work_struct wq;
-
-};
-
-#define SST_MMAP_PAGES (640*1024 / PAGE_SIZE)
-#define SST_MMAP_STEP (40*1024 / PAGE_SIZE)
-
-/***
- * struct intel_sst_drv - driver ops
- *
- * @pmic_state : pmic state
- * @pmic_vendor : pmic vendor detected
- * @sst_state : current sst device state
- * @pci_id : PCI device id loaded
- * @shim : SST shim pointer
- * @mailbox : SST mailbox pointer
- * @iram : SST IRAM pointer
- * @dram : SST DRAM pointer
- * @shim_phy_add : SST shim phy addr
- * @ipc_dispatch_list : ipc messages dispatched
- * @ipc_post_msg_wq : wq to post IPC messages context
- * @ipc_process_msg : wq to process msgs from FW context
- * @ipc_process_reply : wq to process reply from FW context
- * @ipc_post_msg : wq to post reply from FW context
- * @mad_ops : MAD driver operations registered
- * @mad_wq : MAD driver wq
- * @post_msg_wq : wq to post IPC messages
- * @process_msg_wq : wq to process msgs from FW
- * @process_reply_wq : wq to process reply from FW
- * @streams : sst stream contexts
- * @alloc_block : block structure for alloc
- * @tgt_dev_blk : block structure for target device
- * @fw_info_blk : block structure for fw info block
- * @vol_info_blk : block structure for vol info block
- * @mute_info_blk : block structure for mute info block
- * @hs_info_blk : block structure for hs info block
- * @list_lock : sst driver list lock (deprecated)
- * @list_spin_lock : sst driver spin lock block
- * @scard_ops : sst card ops
- * @pci : sst pci device struture
- * @active_streams : sst active streams
- * @sst_lock : sst device lock
- * @stream_lock : sst stream lock
- * @unique_id : sst unique id
- * @stream_cnt : total sst active stream count
- * @pb_streams : total active pb streams
- * @cp_streams : total active cp streams
- * @lpe_stalled : lpe stall status
- * @pmic_port_instance : active pmic port instance
- * @rx_time_slot_status : active rx slot
- * @lpaudio_start : lpaudio status
- * @audio_start : audio status
- * @devt_d : pointer to /dev/lpe node
- * @devt_c : pointer to /dev/lpe_ctrl node
- * @max_streams : max streams allowed
- */
-struct intel_sst_drv {
- bool pmic_state;
- int pmic_vendor;
- int sst_state;
- unsigned int pci_id;
- void __iomem *shim;
- void __iomem *mailbox;
- void __iomem *iram;
- void __iomem *dram;
- unsigned int shim_phy_add;
- struct list_head ipc_dispatch_list;
- struct work_struct ipc_post_msg_wq;
- struct sst_ipc_msg_wq ipc_process_msg;
- struct sst_ipc_msg_wq ipc_process_reply;
- struct sst_ipc_msg_wq ipc_post_msg;
- struct mad_ops_wq mad_ops;
- wait_queue_head_t wait_queue;
- struct workqueue_struct *mad_wq;
- struct workqueue_struct *post_msg_wq;
- struct workqueue_struct *process_msg_wq;
- struct workqueue_struct *process_reply_wq;
-
- struct stream_info streams[MAX_NUM_STREAMS];
- struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
- struct sst_block tgt_dev_blk, fw_info_blk, ppp_params_blk,
- vol_info_blk, mute_info_blk, hs_info_blk;
- struct mutex list_lock;/* mutex for IPC list locking */
- spinlock_t list_spin_lock; /* mutex for IPC list locking */
- struct snd_pmic_ops *scard_ops;
- struct pci_dev *pci;
- int active_streams[MAX_NUM_STREAMS];
- void *mmap_mem;
- struct mutex sst_lock;
- struct mutex stream_lock;
- unsigned int mmap_len;
- unsigned int unique_id;
- unsigned int stream_cnt; /* total streams */
- unsigned int encoded_cnt; /* enocded streams only */
- unsigned int am_cnt;
- unsigned int pb_streams; /* pb streams active */
- unsigned int cp_streams; /* cp streams active */
- unsigned int lpe_stalled; /* LPE is stalled or not */
- unsigned int pmic_port_instance; /*pmic port instance*/
- int rx_time_slot_status;
- unsigned int lpaudio_start;
- /* 1 - LPA stream(MP3 pb) in progress*/
- unsigned int audio_start;
- dev_t devt_d, devt_c;
- unsigned int max_streams;
- unsigned int *fw_cntx;
- unsigned int fw_cntx_size;
-
- unsigned int fw_downloaded;
-};
-
-extern struct intel_sst_drv *sst_drv_ctx;
-
-#define CHIP_REV_REG 0xff108000
-#define CHIP_REV_ADDR 0x78
-
-/* misc definitions */
-#define FW_DWNL_ID 0xFF
-#define LOOP1 0x11111111
-#define LOOP2 0x22222222
-#define LOOP3 0x33333333
-#define LOOP4 0x44444444
-
-#define SST_DEFAULT_PMIC_PORT 1 /*audio port*/
-/* NOTE: status will have +ve for good cases and -ve for error ones */
-#define MAX_STREAM_FIELD 255
-
-int sst_alloc_stream(char *params, unsigned int stream_ops, u8 codec,
- unsigned int session_id);
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *response);
-int sst_stalled(void);
-int sst_pause_stream(int id);
-int sst_resume_stream(int id);
-int sst_enable_rx_timeslot(int status);
-int sst_drop_stream(int id);
-int sst_free_stream(int id);
-int sst_start_stream(int streamID);
-int sst_play_frame(int streamID);
-int sst_pcm_play_frame(int str_id, struct sst_stream_bufs *sst_buf);
-int sst_capture_frame(int streamID);
-int sst_set_stream_param(int streamID, struct snd_sst_params *str_param);
-int sst_target_device_select(struct snd_sst_target_device *target_device);
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs);
-int sst_get_decoded_bytes(int str_id, unsigned long long *bytes);
-int sst_get_fw_info(struct snd_sst_fw_info *info);
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params);
-int sst_get_stream(struct snd_sst_params *str_param);
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld);
-int sst_drain_stream(int str_id);
-int sst_get_vol(struct snd_sst_vol *set_vol);
-int sst_set_vol(struct snd_sst_vol *set_vol);
-int sst_set_mute(struct snd_sst_mute *set_mute);
-
-
-void sst_post_message(struct work_struct *work);
-void sst_process_message(struct work_struct *work);
-void sst_process_reply(struct work_struct *work);
-void sst_process_mad_ops(struct work_struct *work);
-void sst_process_mad_jack_detection(struct work_struct *work);
-
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
- unsigned long arg);
-int intel_sst_open(struct inode *i_node, struct file *file_ptr);
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_mmap(struct file *fp, struct vm_area_struct *vma);
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-
-int sst_load_fw(const struct firmware *fw, void *context);
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
-int sst_spi_mode_enable(void);
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
-
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block);
-int sst_wait_interruptible_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout);
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block);
-int sst_create_large_msg(struct ipc_post **arg);
-int sst_create_short_msg(struct ipc_post **arg);
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data);
-void sst_clear_interrupt(void);
-int intel_sst_resume(struct pci_dev *pci);
-int sst_download_fw(void);
-void free_stream_context(unsigned int str_id);
-void sst_clean_stream(struct stream_info *stream);
-
-/*
- * sst_fill_header - inline to fill sst header
- *
- * @header : ipc header
- * @msg : IPC message to be sent
- * @large : is ipc large msg
- * @str_id : stream id
- *
- * this function is an inline function that sets the headers before
- * sending a message
- */
-static inline void sst_fill_header(union ipc_header *header,
- int msg, int large, int str_id)
-{
- header->part.msg_id = msg;
- header->part.str_id = str_id;
- header->part.large = large;
- header->part.done = 0;
- header->part.busy = 1;
- header->part.data = 0;
-}
-
-/*
- * sst_assign_pvt_id - assign a pvt id for stream
- *
- * @sst_drv_ctx : driver context
- *
- * this inline function assigns a private id for calls that dont have stream
- * context yet, should be called with lock held
- */
-static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
-{
- sst_drv_ctx->unique_id++;
- if (sst_drv_ctx->unique_id >= MAX_NUM_STREAMS)
- sst_drv_ctx->unique_id = 1;
- return sst_drv_ctx->unique_id;
-}
-
-/*
- * sst_init_stream - this function initialzes stream context
- *
- * @stream : stream struture
- * @codec : codec for stream
- * @sst_id : stream id
- * @ops : stream operation
- * @slot : stream pcm slot
- * @device : device type
- *
- * this inline function initialzes stream context for allocated stream
- */
-static inline void sst_init_stream(struct stream_info *stream,
- int codec, int sst_id, int ops, u8 slot,
- enum snd_sst_audio_device_type device)
-{
- stream->status = STREAM_INIT;
- stream->prev = STREAM_UN_INIT;
- stream->codec = codec;
- stream->sst_id = sst_id;
- stream->str_type = 0;
- stream->ops = ops;
- stream->data_blk.on = false;
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- stream->data_blk.data = NULL;
- stream->ctrl_blk.on = false;
- stream->ctrl_blk.condition = false;
- stream->ctrl_blk.ret_code = 0;
- stream->ctrl_blk.data = NULL;
- stream->need_draining = false;
- stream->decode_ibuf = NULL;
- stream->decode_isize = 0;
- stream->mmapped = false;
- stream->pcm_slot = slot;
- stream->device = device;
-}
-
-
-/*
- * sst_validate_strid - this function validates the stream id
- *
- * @str_id : stream id to be validated
- *
- * returns 0 if valid stream
- */
-static inline int sst_validate_strid(int str_id)
-{
- if (str_id <= 0 || str_id > sst_drv_ctx->max_streams) {
- pr_err("SST ERR: invalid stream id : %d MAX_STREAMS:%d\n",
- str_id, sst_drv_ctx->max_streams);
- return -EINVAL;
- } else
- return 0;
-}
-
-static inline int sst_shim_write(void __iomem *addr, int offset, int value)
-{
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- writel(value, addr + SST_ISRD); /*dummy*/
- writel(value, addr + offset);
- return 0;
-}
-
-static inline int sst_shim_read(void __iomem *addr, int offset)
-{
- return readl(addr + offset);
-}
-#endif /* __INTEL_SST_COMMON_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
deleted file mode 100644
index 22bd29c0c43..00000000000
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com)
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/export.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/*
- * sst_download_fw - download the audio firmware to DSP
- *
- * This function is called when the FW needs to be downloaded to SST DSP engine
- */
-int sst_download_fw(void)
-{
- int retval;
- const struct firmware *fw_sst;
- char name[20];
-
- if (sst_drv_ctx->sst_state != SST_UN_INIT)
- return -EPERM;
-
- /* Reload firmware is not needed for MRST */
- if ( (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) && sst_drv_ctx->fw_downloaded) {
- pr_debug("FW already downloaded, skip for MRST platform\n");
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- return 0;
- }
-
- snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
- sst_drv_ctx->pci_id, ".bin");
-
- pr_debug("Downloading %s FW now...\n", name);
- retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
- if (retval) {
- pr_err("request fw failed %d\n", retval);
- return retval;
- }
- sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
- sst_drv_ctx->alloc_block[0].ops_block.condition = false;
- retval = sst_load_fw(fw_sst, NULL);
- if (retval)
- goto end_restore;
-
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
- if (retval)
- pr_err("fw download failed %d\n" , retval);
- else
- sst_drv_ctx->fw_downloaded = 1;
-
-end_restore:
- release_firmware(fw_sst);
- sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
- return retval;
-}
-
-
-/*
- * sst_stalled - this function checks if the lpe is in stalled state
- */
-int sst_stalled(void)
-{
- int retry = 1000;
- int retval = -1;
-
- while (retry) {
- if (!sst_drv_ctx->lpe_stalled)
- return 0;
- /*wait for time and re-check*/
- msleep(1);
-
- retry--;
- }
- pr_debug("in Stalled State\n");
- return retval;
-}
-
-void free_stream_context(unsigned int str_id)
-{
- struct stream_info *stream;
-
- if (!sst_validate_strid(str_id)) {
- /* str_id is valid, so stream is alloacted */
- stream = &sst_drv_ctx->streams[str_id];
- if (sst_free_stream(str_id))
- sst_clean_stream(&sst_drv_ctx->streams[str_id]);
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- sst_drv_ctx->pb_streams--;
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- sst_drv_ctx->scard_ops->power_down_pmic_pb(
- stream->device);
- else {
- if (sst_drv_ctx->pb_streams == 0)
- sst_drv_ctx->scard_ops->
- power_down_pmic_pb(stream->device);
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->cp_streams--;
- if (sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic_cp(
- stream->device);
- }
- if (sst_drv_ctx->pb_streams == 0
- && sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
- }
-}
-
-/*
- * sst_get_stream_allocated - this function gets a stream allocated with
- * the given params
- *
- * @str_param : stream params
- * @lib_dnld : pointer to pointer of lib downlaod struct
- *
- * This creates new stream id for a stream, in case lib is to be downloaded to
- * DSP, it downloads that
- */
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld)
-{
- int retval, str_id;
- struct stream_info *str_info;
-
- retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
- str_param->codec, str_param->device_type);
- if (retval < 0) {
- pr_err("sst_alloc_stream failed %d\n", retval);
- return retval;
- }
- pr_debug("Stream allocated %d\n", retval);
- str_id = retval;
- str_info = &sst_drv_ctx->streams[str_id];
- /* Block the call for reply */
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
- pr_debug("FW alloc failed retval %d, ret_code %d\n",
- retval, str_info->ctrl_blk.ret_code);
- str_id = -str_info->ctrl_blk.ret_code; /*return error*/
- *lib_dnld = str_info->ctrl_blk.data;
- sst_clean_stream(str_info);
- } else
- pr_debug("FW Stream allocated success\n");
- return str_id; /*will ret either error (in above if) or correct str id*/
-}
-
-/*
- * sst_get_sfreq - this function returns the frequency of the stream
- *
- * @str_param : stream params
- */
-static int sst_get_sfreq(struct snd_sst_params *str_param)
-{
- switch (str_param->codec) {
- case SST_CODEC_TYPE_PCM:
- return 48000; /*str_param->sparams.uc.pcm_params.sfreq;*/
- case SST_CODEC_TYPE_MP3:
- return str_param->sparams.uc.mp3_params.sfreq;
- case SST_CODEC_TYPE_AAC:
- return str_param->sparams.uc.aac_params.sfreq;
- case SST_CODEC_TYPE_WMA9:
- return str_param->sparams.uc.wma_params.sfreq;
- default:
- return 0;
- }
-}
-
-/*
- * sst_get_stream - this function prepares for stream allocation
- *
- * @str_param : stream param
- */
-int sst_get_stream(struct snd_sst_params *str_param)
-{
- int i, retval;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- /* stream is not allocated, we are allocating */
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
- /* codec download is required */
- struct snd_sst_alloc_response *response;
-
- pr_debug("Codec is required.... trying that\n");
- if (lib_dnld == NULL) {
- pr_err("lib download null!!! abort\n");
- return -EIO;
- }
- i = sst_get_block_stream(sst_drv_ctx);
- response = sst_drv_ctx->alloc_block[i].ops_block.data;
- pr_debug("alloc block allocated = %d\n", i);
- if (i < 0) {
- kfree(lib_dnld);
- return -ENOMEM;
- }
- retval = sst_load_library(lib_dnld, str_param->ops);
- kfree(lib_dnld);
-
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- if (!retval) {
- pr_debug("codec was downloaded successfully\n");
-
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval <= 0)
- goto err;
-
- pr_debug("Alloc done stream id %d\n", retval);
- } else {
- pr_debug("codec download failed\n");
- retval = -EIO;
- goto err;
- }
- } else if (retval <= 0)
- goto err;
- /*else
- set_port_params(str_param, str_param->ops);*/
-
- /* store sampling freq */
- str_info = &sst_drv_ctx->streams[retval];
- str_info->sfreq = sst_get_sfreq(str_param);
-
- /* power on the analog, if reqd */
- if (str_param->ops == STREAM_OPS_PLAYBACK ||
- str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- sst_drv_ctx->pmic_port_instance);
- else
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- str_info->device);
- /*Only if the playback is MP3 - Send a message*/
- sst_drv_ctx->pb_streams++;
- } else if (str_param->ops == STREAM_OPS_CAPTURE) {
-
- sst_drv_ctx->scard_ops->power_up_pmic_cp(
- sst_drv_ctx->pmic_port_instance);
- /*Send a messageif not sent already*/
- sst_drv_ctx->cp_streams++;
- }
-
-err:
- return retval;
-}
-
-void sst_process_mad_ops(struct work_struct *work)
-{
-
- struct mad_ops_wq *mad_ops =
- container_of(work, struct mad_ops_wq, wq);
- int retval = 0;
-
- switch (mad_ops->control_op) {
- case SST_SND_PAUSE:
- retval = sst_pause_stream(mad_ops->stream_id);
- break;
- case SST_SND_RESUME:
- retval = sst_resume_stream(mad_ops->stream_id);
- break;
- case SST_SND_DROP:
- retval = sst_drop_stream(mad_ops->stream_id);
- break;
- case SST_SND_START:
- pr_debug("SST Debug: start stream\n");
- retval = sst_start_stream(mad_ops->stream_id);
- break;
- case SST_SND_STREAM_PROCESS:
- pr_debug("play/capt frames...\n");
- break;
- default:
- pr_err(" wrong control_ops reported\n");
- }
- return;
-}
-
-void send_intial_rx_timeslot(void)
-{
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
- sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
-}
-
-/*
- * sst_open_pcm_stream - Open PCM interface
- *
- * @str_param: parameters of pcm stream
- *
- * This function is called by MID sound card driver to open
- * a new pcm interface
- */
-int sst_open_pcm_stream(struct snd_sst_params *str_param)
-{
- struct stream_info *str_info;
- int retval;
-
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
-
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- /* LPE is suspended, resume it before proceeding*/
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_err("Resume Failed = %#x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- }
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- pr_debug("DSP Downloading FW now...\n");
- retval = sst_download_fw();
- if (retval) {
- pr_err("FW download fail %x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- send_intial_rx_timeslot();
- }
-
- if (!str_param) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return -EINVAL;
- }
-
- retval = sst_get_stream(str_param);
- if (retval > 0) {
- sst_drv_ctx->stream_cnt++;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = MAD_DRV;
- } else
- pm_runtime_put(&sst_drv_ctx->pci->dev);
-
- return retval;
-}
-
-/*
- * sst_close_pcm_stream - Close PCM interface
- *
- * @str_id: stream id to be closed
- *
- * This function is called by MID sound card driver to close
- * an existing pcm interface
- */
-int sst_close_pcm_stream(unsigned int str_id)
-{
- struct stream_info *stream;
-
- pr_debug("sst: stream free called\n");
- if (sst_validate_strid(str_id))
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- free_stream_context(str_id);
- stream->pcm_substream = NULL;
- stream->status = STREAM_UN_INIT;
- stream->period_elapsed = NULL;
- sst_drv_ctx->stream_cnt--;
- pr_debug("sst: will call runtime put now\n");
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return 0;
-}
-
-/*
- * sst_device_control - Set Control params
- *
- * @cmd: control cmd to be set
- * @arg: command argument
- *
- * This function is called by MID sound card driver to set
- * SST/Sound card controls for an opened stream.
- * This is registered with MID driver
- */
-int sst_device_control(int cmd, void *arg)
-{
- int retval = 0, str_id = 0;
-
- switch (cmd) {
- case SST_SND_PAUSE:
- case SST_SND_RESUME:
- case SST_SND_DROP:
- case SST_SND_START:
- sst_drv_ctx->mad_ops.control_op = cmd;
- sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
- queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
- break;
-
- case SST_SND_STREAM_INIT: {
- struct pcm_stream_info *str_info;
- struct stream_info *stream;
-
- pr_debug("stream init called\n");
- str_info = (struct pcm_stream_info *)arg;
- str_id = str_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
-
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("setting the period ptrs\n");
- stream->pcm_substream = str_info->mad_substream;
- stream->period_elapsed = str_info->period_elapsed;
- stream->sfreq = str_info->sfreq;
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- break;
- }
-
- case SST_SND_BUFFER_POINTER: {
- struct pcm_stream_info *stream_info;
- struct snd_sst_tstamp fw_tstamp = {0,};
- struct stream_info *stream;
-
-
- stream_info = (struct pcm_stream_info *)arg;
- str_id = stream_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
-
- if (!stream->pcm_substream)
- break;
- memcpy_fromio(&fw_tstamp,
- ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
- +(str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
-
- pr_debug("Pointer Query on strid = %d ops %d\n",
- str_id, stream->ops);
-
- if (stream->ops == STREAM_OPS_PLAYBACK)
- stream_info->buffer_ptr = fw_tstamp.samples_rendered;
- else
- stream_info->buffer_ptr = fw_tstamp.samples_processed;
- pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
- fw_tstamp.samples_rendered, stream_info->buffer_ptr);
- break;
- }
- case SST_ENABLE_RX_TIME_SLOT: {
- int status = *(int *)arg;
- sst_drv_ctx->rx_time_slot_status = status ;
- sst_enable_rx_timeslot(status);
- break;
- }
- default:
- /* Illegal case */
- pr_warn("illegal req\n");
- return -EINVAL;
- }
-
- return retval;
-}
-
-
-struct intel_sst_pcm_control pcm_ops = {
- .open = sst_open_pcm_stream,
- .device_control = sst_device_control,
- .close = sst_close_pcm_stream,
-};
-
-struct intel_sst_card_ops sst_pmic_ops = {
- .pcm_control = &pcm_ops,
-};
-
-/*
- * register_sst_card - function for sound card to register
- *
- * @card: pointer to structure of operations
- *
- * This function is called card driver loads and is ready for registration
- */
-int register_sst_card(struct intel_sst_card_ops *card)
-{
- if (!sst_drv_ctx) {
- pr_err("No SST driver register card reject\n");
- return -ENODEV;
- }
-
- if (!card || !card->module_name) {
- pr_err("Null Pointer Passed\n");
- return -EINVAL;
- }
- if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
- /* register this driver */
- if ((strncmp(SST_CARD_NAMES, card->module_name,
- strlen(SST_CARD_NAMES))) == 0) {
- sst_drv_ctx->pmic_vendor = card->vendor_id;
- sst_drv_ctx->scard_ops = card->scard_ops;
- sst_pmic_ops.module_name = card->module_name;
- sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
- sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
- card->pcm_control = sst_pmic_ops.pcm_control;
- return 0;
- } else {
- pr_err("strcmp fail %s\n", card->module_name);
- return -EINVAL;
- }
-
- } else {
- /* already registered a driver */
- pr_err("Repeat for registration..denied\n");
- return -EBADRQC;
- }
- /* The ASoC code doesn't set scard_ops */
- if (sst_drv_ctx->scard_ops)
- sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_sst_card);
-
-/*
- * unregister_sst_card- function for sound card to un-register
- *
- * @card: pointer to structure of operations
- *
- * This function is called when card driver unloads
- */
-void unregister_sst_card(struct intel_sst_card_ops *card)
-{
- if (sst_pmic_ops.pcm_control == card->pcm_control) {
- /* unreg */
- sst_pmic_ops.module_name = "";
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- pr_debug("Unregistered %s\n", card->module_name);
- }
- return;
-}
-EXPORT_SYMBOL_GPL(unregister_sst_card);
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
deleted file mode 100644
index 426d2b92073..00000000000
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * intel_sst_dsp.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all dsp controlling functions like firmware download,
- * setting/resetting dsp cores, etc
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/**
- * intel_sst_reset_dsp_mrst - Resetting SST DSP
- *
- * This resets DSP in case of MRST platfroms
- */
-static int intel_sst_reset_dsp_mrst(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in mrst\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.strb_cntr_rst = 0;
- csr.part.run_stall = 0x1;
- csr.part.bypass = 0x7;
- csr.part.sst_reset = 0x1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- return 0;
-}
-
-/**
- * intel_sst_reset_dsp_medfield - Resetting SST DSP
- *
- * This resets DSP in case of Medfield platfroms
- */
-static int intel_sst_reset_dsp_medfield(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in medfield\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_mrst - Start the SST DSP processor
- *
- * This starts the DSP in MRST platfroms
- */
-static int sst_start_mrst(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- csr.part.strb_cntr_rst = 1;
- pr_debug("Setting SST to execute_mrst 0x%x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_medfield - Start the SST DSP processor
- *
- * This starts the DSP in Medfield platfroms
- */
-static int sst_start_medfield(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.mfld_strb = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- pr_debug("Starting the DSP_medfld %x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- pr_debug("Starting the DSP_medfld\n");
-
- return 0;
-}
-
-/**
- * sst_parse_module - Parse audio FW modules
- *
- * @module: FW module header
- *
- * Parses modules that need to be placed in SST IRAM and DRAM
- * returns error or 0 if module sizes are proper
- */
-static int sst_parse_module(struct fw_module_header *module)
-{
- struct dma_block_info *block;
- u32 count;
- void __iomem *ram;
-
- pr_debug("module sign %s size %x blocks %x type %x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
- pr_debug("module entrypoint 0x%x\n", module->entry_point);
-
- block = (void *)module + sizeof(*module);
-
- for (count = 0; count < module->blocks; count++) {
- if (block->size <= 0) {
- pr_err("block size invalid\n");
- return -EINVAL;
- }
- switch (block->type) {
- case SST_IRAM:
- ram = sst_drv_ctx->iram;
- break;
- case SST_DRAM:
- ram = sst_drv_ctx->dram;
- break;
- default:
- pr_err("wrong ram type0x%x in block0x%x\n",
- block->type, count);
- return -EINVAL;
- }
- memcpy_toio(ram + block->ram_offset,
- (void *)block + sizeof(*block), block->size);
- block = (void *)block + sizeof(*block) + block->size;
- }
- return 0;
-}
-
-/**
- * sst_parse_fw_image - parse and load FW
- *
- * @sst_fw: pointer to audio fw
- *
- * This function is called to parse and download the FW image
- */
-static int sst_parse_fw_image(const struct firmware *sst_fw)
-{
- struct fw_header *header;
- u32 count;
- int ret_val;
- struct fw_module_header *module;
-
- BUG_ON(!sst_fw);
-
- /* Read the header information from the data pointer */
- header = (struct fw_header *)sst_fw->data;
-
- /* verify FW */
- if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
- (sst_fw->size != header->file_size + sizeof(*header))) {
- /* Invalid FW signature */
- pr_err("Invalid FW sign/filesize mismatch\n");
- return -EINVAL;
- }
- pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%x\n",
- header->signature, header->file_size, header->modules,
- header->file_format, sizeof(*header));
- module = (void *)sst_fw->data + sizeof(*header);
- for (count = 0; count < header->modules; count++) {
- /* module */
- ret_val = sst_parse_module(module);
- if (ret_val)
- return ret_val;
- module = (void *)module + sizeof(*module) + module->mod_size ;
- }
-
- return 0;
-}
-
-/**
- * sst_load_fw - function to load FW into DSP
- *
- * @fw: Pointer to driver loaded FW
- * @context: driver context
- *
- * This function is called by OS when the FW is loaded into kernel
- */
-int sst_load_fw(const struct firmware *fw, void *context)
-{
- int ret_val;
-
- pr_debug("load_fw called\n");
- BUG_ON(!fw);
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = intel_sst_reset_dsp_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = intel_sst_reset_dsp_medfield();
- if (ret_val)
- return ret_val;
-
- ret_val = sst_parse_fw_image(fw);
- if (ret_val)
- return ret_val;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* 7. ask scu to reset the bypass bits */
- /* 8.bring sst out of reset */
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = sst_start_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = sst_start_medfield();
- if (ret_val)
- return ret_val;
-
- pr_debug("fw loaded successful!!!\n");
- return ret_val;
-}
-
-/*This function is called when any codec/post processing library
- needs to be downloaded*/
-static int sst_download_library(const struct firmware *fw_lib,
- struct snd_sst_lib_download_info *lib)
-{
- /* send IPC message and wait */
- int i;
- u8 pvt_id;
- struct ipc_post *msg = NULL;
- union config_status_reg csr;
- struct snd_sst_str_type str_type = {0};
- int retval = 0;
-
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- pr_debug("alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
- if (i < 0) {
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
- msg->header.part.data = sizeof(u32) + sizeof(str_type);
- str_type.codec_type = lib->dload_lib.lib_info.lib_type;
- /*str_type.pvt_id = pvt_id;*/
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_err("Prep codec downloaded failed %d\n",
- retval);
- return -EIO;
- }
- pr_debug("FW responded, ready for download now...\n");
- /* downloading on success */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- csr.full = readl(sst_drv_ctx->shim + SST_CSR);
- csr.part.run_stall = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x7;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- sst_parse_fw_image(fw_lib);
-
- /* set the FW to running again */
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- /* send download complete and wait */
- if (sst_create_large_msg(&msg)) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- msg->header.part.data = sizeof(u32) + sizeof(*lib);
- lib->pvt_id = pvt_id;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("Waiting for FW response Download complete\n");
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -EIO;
- }
-
- pr_debug("FW success on Download complete\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-
-}
-
-/* This function is called before downloading the codec/postprocessing
-library is set for download to SST DSP*/
-static int sst_validate_library(const struct firmware *fw_lib,
- struct lib_slot_info *slot,
- u32 *entry_point)
-{
- struct fw_header *header;
- struct fw_module_header *module;
- struct dma_block_info *block;
- unsigned int n_blk, isize = 0, dsize = 0;
- int err = 0;
-
- header = (struct fw_header *)fw_lib->data;
- if (header->modules != 1) {
- pr_err("Module no mismatch found\n");
- err = -EINVAL;
- goto exit;
- }
- module = (void *)fw_lib->data + sizeof(*header);
- *entry_point = module->entry_point;
- pr_debug("Module entry point 0x%x\n", *entry_point);
- pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
-
- block = (void *)module + sizeof(*module);
- for (n_blk = 0; n_blk < module->blocks; n_blk++) {
- switch (block->type) {
- case SST_IRAM:
- isize += block->size;
- break;
- case SST_DRAM:
- dsize += block->size;
- break;
- default:
- pr_err("Invalid block type for 0x%x\n", n_blk);
- err = -EINVAL;
- goto exit;
- }
- block = (void *)block + sizeof(*block) + block->size;
- }
- if (isize > slot->iram_size || dsize > slot->dram_size) {
- pr_err("library exceeds size allocated\n");
- err = -EINVAL;
- goto exit;
- } else
- pr_debug("Library is safe for download...\n");
-
- pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
- isize, dsize, slot->iram_size, slot->dram_size);
-exit:
- return err;
-
-}
-
-/* This function is called when FW requests for a particular library download
-This function prepares the library to download*/
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
-{
- char buf[20];
- const char *type, *dir;
- int len = 0, error = 0;
- u32 entry_point;
- const struct firmware *fw_lib;
- struct snd_sst_lib_download_info dload_info = {{{0},},};
-
- memset(buf, 0, sizeof(buf));
-
- pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
- lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
- pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
- lib->lib_info.lib_version, lib->lib_info.lib_name,
- lib->lib_info.lib_caps, lib->lib_info.media_type);
-
- pr_debug("IRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.iram_size, lib->slot_info.iram_offset);
- pr_debug("DRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.dram_size, lib->slot_info.dram_offset);
-
- switch (lib->lib_info.lib_type) {
- case SST_CODEC_TYPE_MP3:
- type = "mp3_";
- break;
- case SST_CODEC_TYPE_AAC:
- type = "aac_";
- break;
- case SST_CODEC_TYPE_AACP:
- type = "aac_v1_";
- break;
- case SST_CODEC_TYPE_eAACP:
- type = "aac_v2_";
- break;
- case SST_CODEC_TYPE_WMA9:
- type = "wma9_";
- break;
- default:
- pr_err("Invalid codec type\n");
- error = -EINVAL;
- goto wake;
- }
-
- if (ops == STREAM_OPS_CAPTURE)
- dir = "enc_";
- else
- dir = "dec_";
- len = strlen(type) + strlen(dir);
- strncpy(buf, type, sizeof(buf)-1);
- strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
- len += snprintf(buf + len, sizeof(buf) - len, "%d",
- lib->slot_info.slot_num);
- len += snprintf(buf + len, sizeof(buf) - len, ".bin");
-
- pr_debug("Requesting %s\n", buf);
-
- error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
- if (error) {
- pr_err("library load failed %d\n", error);
- goto wake;
- }
- error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
- if (error)
- goto wake_free;
-
- lib->mod_entry_pt = entry_point;
- memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
- error = sst_download_library(fw_lib, &dload_info);
- if (error)
- goto wake_free;
-
- /* lib is downloaded and init send alloc again */
- pr_debug("Library is downloaded now...\n");
-wake_free:
- /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
- release_firmware(fw_lib);
-wake:
- return error;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_fw_ipc.h b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
deleted file mode 100644
index 5d0cc56aaef..00000000000
--- a/drivers/staging/intel_sst/intel_sst_fw_ipc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-#ifndef __INTEL_SST_FW_IPC_H__
-#define __INTEL_SST_FW_IPC_H__
-/*
-* intel_sst_fw_ipc.h - Intel SST Driver for audio engine
-*
-* Copyright (C) 2008-10 Intel Corporation
-* Author: Vinod Koul <vinod.koul@intel.com>
-* Harsha Priya <priya.harsha@intel.com>
-* Dharageswari R <dharageswari.r@intel.com>
-* KP Jeeja <jeeja.kp@intel.com>
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; version 2 of the License.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License along
-* with this program; if not, write to the Free Software Foundation, Inc.,
-* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
-*
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This driver exposes the audio engine functionalities to the ALSA
-* and middleware.
-* This file has definitions shared between the firmware and driver
-*/
-
-#define MAX_NUM_STREAMS_MRST 3
-#define MAX_NUM_STREAMS_MFLD 6
-#define MAX_NUM_STREAMS 6
-#define MAX_DBG_RW_BYTES 80
-#define MAX_NUM_SCATTER_BUFFERS 8
-#define MAX_LOOP_BACK_DWORDS 8
-/* IPC base address and mailbox, timestamp offsets */
-#define SST_MAILBOX_SIZE 0x0400
-#define SST_MAILBOX_SEND 0x0000
-#define SST_MAILBOX_RCV 0x0804
-#define SST_TIME_STAMP 0x1800
-#define SST_RESERVED_OFFSET 0x1A00
-#define SST_CHEKPOINT_OFFSET 0x1C00
-#define REPLY_MSG 0x80
-
-/* Message ID's for IPC messages */
-/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
-
-/* I2L Firmware/Codec Download msgs */
-#define IPC_IA_PREP_LIB_DNLD 0x01
-#define IPC_IA_LIB_DNLD_CMPLT 0x02
-
-#define IPC_IA_SET_PMIC_TYPE 0x03
-#define IPC_IA_GET_FW_VERSION 0x04
-#define IPC_IA_GET_FW_BUILD_INF 0x05
-#define IPC_IA_GET_FW_INFO 0x06
-#define IPC_IA_GET_FW_CTXT 0x07
-#define IPC_IA_SET_FW_CTXT 0x08
-
-/* I2L Codec Config/control msgs */
-#define IPC_IA_SET_CODEC_PARAMS 0x10
-#define IPC_IA_GET_CODEC_PARAMS 0x11
-#define IPC_IA_SET_PPP_PARAMS 0x12
-#define IPC_IA_GET_PPP_PARAMS 0x13
-#define IPC_IA_PLAY_FRAMES 0x14
-#define IPC_IA_CAPT_FRAMES 0x15
-#define IPC_IA_PLAY_VOICE 0x16
-#define IPC_IA_CAPT_VOICE 0x17
-#define IPC_IA_DECODE_FRAMES 0x18
-
-#define IPC_IA_ALG_PARAMS 0x1A
-#define IPC_IA_TUNING_PARAMS 0x1B
-
-/* I2L Stream config/control msgs */
-#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
-#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
-#define IPC_IA_SET_STREAM_PARAMS 0x22
-#define IPC_IA_GET_STREAM_PARAMS 0x23
-#define IPC_IA_PAUSE_STREAM 0x24
-#define IPC_IA_RESUME_STREAM 0x25
-#define IPC_IA_DROP_STREAM 0x26
-#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
-#define IPC_IA_TARGET_DEV_SELECT 0x28
-#define IPC_IA_CONTROL_ROUTING 0x29
-
-#define IPC_IA_SET_STREAM_VOL 0x2A /*Vol for stream, pre mixer */
-#define IPC_IA_GET_STREAM_VOL 0x2B
-#define IPC_IA_SET_STREAM_MUTE 0x2C
-#define IPC_IA_GET_STREAM_MUTE 0x2D
-#define IPC_IA_ENABLE_RX_TIME_SLOT 0x2E /* Enable Rx time slot 0 or 1 */
-
-#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
-
-/* Debug msgs */
-#define IPC_IA_DBG_MEM_READ 0x40
-#define IPC_IA_DBG_MEM_WRITE 0x41
-#define IPC_IA_DBG_LOOP_BACK 0x42
-
-/* L2I Firmware/Codec Download msgs */
-#define IPC_IA_FW_INIT_CMPLT 0x81
-#define IPC_IA_LPE_GETTING_STALLED 0x82
-#define IPC_IA_LPE_UNSTALLED 0x83
-
-/* L2I Codec Config/control msgs */
-#define IPC_SST_GET_PLAY_FRAMES 0x90 /* Request IA more data */
-#define IPC_SST_GET_CAPT_FRAMES 0x91 /* Request IA more data */
-#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
-#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
-#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
-#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
-#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
-#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
-#define IPC_IA_TARGET_DEV_CHNGD 0x98 /* error in processing a stream */
-
-#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
-/* L2S messages */
-#define IPC_SC_DDR_LINK_UP 0xC0
-#define IPC_SC_DDR_LINK_DOWN 0xC1
-#define IPC_SC_SET_LPECLK_REQ 0xC2
-#define IPC_SC_SSP_BIT_BANG 0xC3
-
-/* L2I Error reporting msgs */
-#define IPC_IA_MEM_ALLOC_FAIL 0xE0
-#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
- stream can be used by playback and
- capture modules */
-
-/* L2I Debug msgs */
-#define IPC_IA_PRINT_STRING 0xF0
-
-
-
-/* Command Response or Acknowledge message to any IPC message will have
- * same message ID and stream ID information which is sent.
- * There is no specific Ack message ID. The data field is used as response
- * meaning.
- */
-enum ackData {
- IPC_ACK_SUCCESS = 0,
- IPC_ACK_FAILURE
-};
-
-
-enum sst_error_codes {
- /* Error code,response to msgId: Description */
- /* Common error codes */
- SST_SUCCESS = 0, /* Success */
- SST_ERR_INVALID_STREAM_ID = 1,
- SST_ERR_INVALID_MSG_ID = 2,
- SST_ERR_INVALID_STREAM_OP = 3,
- SST_ERR_INVALID_PARAMS = 4,
- SST_ERR_INVALID_CODEC = 5,
- SST_ERR_INVALID_MEDIA_TYPE = 6,
- SST_ERR_STREAM_ERR = 7,
-
- /* IPC specific error codes */
- SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
- SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
- SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
- SST_IPC_ERR_GET_STREAM_FAILED = 11,
- SST_ERR_MOD_NOT_AVAIL = 12,
- SST_ERR_MOD_DNLD_RQD = 13,
- SST_ERR_STREAM_STOPPED = 14,
- SST_ERR_STREAM_IN_USE = 15,
-
- /* Capture specific error codes */
- SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
- SST_CAP_ERR_CAPTURE_FAIL = 17,
- SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
- SST_CAP_ERR_UNDER_RUN = 19,
- SST_CAP_ERR_OVERFLOW = 20,
-
- /* Playback specific error codes*/
- SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
- SST_PB_ERR_PLAY_FAIL = 22,
- SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
-
- /* Codec manager specific error codes */
- SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
- SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
-
- /* Library manager specific error codes */
- SST_SCC_ERR_PREP_DNLD_FAILED = 26,
- SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
- /* Scheduler specific error codes */
- SST_SCH_ERR_FAIL = 28,
-
- /* DMA specific error codes */
- SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
- SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
- SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
- SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
- SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
- SST_DMA_ERR_TRANSFER_FAILED = 34,
-
- SST_SSP_ERR_ALREADY_ENABLED = 35,
- SST_SSP_ERR_ALREADY_DISABLED = 36,
- SST_SSP_ERR_NOT_INITIALIZED = 37,
- SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
-
- /* Other error codes */
- SST_ERR_MOD_INIT_FAIL = 39,
-
- /* FW init error codes */
- SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
- SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
- SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
- SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
-
- /* Memory debug error codes */
- SST_ERR_DBG_MEM_READ_FAIL = 44,
- SST_ERR_DBG_MEM_WRITE_FAIL = 45,
- SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
- SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
-
- SST_ERR_BUFFER_NOT_AVAILABLE = 48,
- SST_ERR_BUFFER_NOT_ALLOCATED = 49,
- SST_ERR_INVALID_REGION_TYPE = 50,
- SST_ERR_NULL_PTR = 51,
- SST_ERR_INVALID_BUFFER_SIZE = 52,
- SST_ERR_INVALID_BUFFER_INDEX = 53,
-
- /*IIPC specific error codes */
- SST_IIPC_QUEUE_FULL = 54,
- SST_IIPC_ERR_MSG_SND_FAILED = 55,
- SST_PB_ERR_UNDERRUN_OCCURED = 56,
- SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
- SST_INVALID_TIME_SLOTS = 58,
-};
-
-enum dbg_mem_data_type {
- /* Data type of debug read/write */
- DATA_TYPE_U32,
- DATA_TYPE_U16,
- DATA_TYPE_U8,
-};
-
-/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
-
-/* IPC Header */
-union ipc_header {
- struct {
- u32 msg_id:8; /* Message ID - Max 256 Message Types */
- u32 str_id:5;
- u32 large:1; /* Large Message if large = 1 */
- u32 reserved:2; /* Reserved for future use */
- u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
- u32 done:1; /* bit 30 */
- u32 busy:1; /* bit 31 */
- } part;
- u32 full;
-} __attribute__ ((packed));
-
-/* Firmware build info */
-struct sst_fw_build_info {
- unsigned char date[16]; /* Firmware build date */
- unsigned char time[16]; /* Firmware build time */
-} __attribute__ ((packed));
-
-struct ipc_header_fw_init {
- struct snd_sst_fw_version fw_version;/* Firmware version details */
- struct sst_fw_build_info build_info;
- u16 result; /* Fw init result */
- u8 module_id; /* Module ID in case of error */
- u8 debug_info; /* Debug info from Module ID in case of fail */
-} __attribute__ ((packed));
-
-/* Address and size info of a frame buffer in DDR */
-struct sst_address_info {
- u32 addr; /* Address at IA */
- u32 size; /* Size of the buffer */
-} __attribute__ ((packed));
-
-/* Time stamp */
-struct snd_sst_tstamp {
- u64 samples_processed;/* capture - data in DDR */
- u64 samples_rendered;/* playback - data rendered */
- u64 bytes_processed;/* bytes decoded or encoded */
- u32 sampling_frequency;/* eg: 48000, 44100 */
- u32 dma_base_address;/* DMA base address */
- u16 dma_channel_no;/* DMA Channel used for the data transfer*/
- u16 reserved;/* 32 bit alignment */
-};
-
-/* Frame info to play or capture */
-struct sst_frame_info {
- u16 num_entries; /* number of entries to follow */
- u16 rsrvd;
- struct sst_address_info addr[MAX_NUM_SCATTER_BUFFERS];
-} __attribute__ ((packed));
-
-/* Frames info for decode */
-struct snd_sst_decode_info {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct sst_frame_info frames_in;
- struct sst_frame_info frames_out;
-} __attribute__ ((packed));
-
-/* SST to IA print debug message*/
-struct ipc_sst_ia_print_params {
- u32 string_size;/* Max value is 160 */
- u8 prt_string[160];/* Null terminated Char string */
-} __attribute__ ((packed));
-
-/* Voice data message */
-struct snd_sst_voice_data {
- u16 num_bytes;/* Number of valid voice data bytes */
- u8 pcm_wd_size;/* 0=8 bit, 1=16 bit 2=32 bit */
- u8 reserved;/* Reserved */
- u8 voice_data_buf[0];/* Voice data buffer in bytes, little endian */
-} __attribute__ ((packed));
-
-/* SST to IA memory read debug message */
-struct ipc_sst_ia_dbg_mem_rw {
- u16 num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
- u16 data_type;/* enum: dbg_mem_data_type */
- u32 address; /* Memory address of data memory of data_type */
- u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
-} __attribute__ ((packed));
-
-struct ipc_sst_ia_dbg_loop_back {
- u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
- u16 increment_val;/* Increments dwords by this value, 0- no increment */
- u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
-} __attribute__ ((packed));
-
-/* Stream type params struture for Alloc stream */
-struct snd_sst_str_type {
- u8 codec_type; /* Codec type */
- u8 str_type; /* 1 = voice 2 = music */
- u8 operation; /* Playback or Capture */
- u8 protected_str; /* 0=Non DRM, 1=DRM */
- u8 time_slots;
- u8 reserved; /* Reserved */
- u16 result; /* Result used for acknowledgment */
-} __attribute__ ((packed));
-
-/* Library info structure */
-struct module_info {
- u32 lib_version;
- u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
- u32 media_type;
- u8 lib_name[12];
- u32 lib_caps;
- unsigned char b_date[16]; /* Lib build date */
- unsigned char b_time[16]; /* Lib build time */
-} __attribute__ ((packed));
-
-/* Library slot info */
-struct lib_slot_info {
- u8 slot_num; /* 1 or 2 */
- u8 reserved1;
- u16 reserved2;
- u32 iram_size; /* slot size in IRAM */
- u32 dram_size; /* slot size in DRAM */
- u32 iram_offset; /* starting offset of slot in IRAM */
- u32 dram_offset; /* starting offset of slot in DRAM */
-} __attribute__ ((packed));
-
-struct snd_sst_lib_download {
- struct module_info lib_info; /* library info type, capabilities etc */
- struct lib_slot_info slot_info; /* slot info to be downloaded */
- u32 mod_entry_pt;
-};
-
-struct snd_sst_lib_download_info {
- struct snd_sst_lib_download dload_lib;
- u16 result; /* Result used for acknowledgment */
- u8 pvt_id; /* Private ID */
- u8 reserved; /* for alignment */
-};
-
-/* Alloc stream params structure */
-struct snd_sst_alloc_params {
- struct snd_sst_str_type str_type;
- struct snd_sst_stream_params stream_params;
-};
-
-struct snd_sst_fw_get_stream_params {
- struct snd_sst_stream_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-/* Alloc stream response message */
-struct snd_sst_alloc_response {
- struct snd_sst_str_type str_type; /* Stream type for allocation */
- struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
-};
-
-/* Drop response */
-struct snd_sst_drop_response {
- u32 result;
- u32 bytes;
-};
-
-/* CSV Voice call routing structure */
-struct snd_sst_control_routing {
- u8 control; /* 0=start, 1=Stop */
- u8 reserved[3]; /* Reserved- for 32 bit alignment */
-};
-
-
-struct ipc_post {
- struct list_head node;
- union ipc_header header; /* driver specific */
- char *mailbox_data;
-};
-
-struct snd_sst_ctxt_params {
- u32 address; /* Physical Address in DDR where the context is stored */
- u32 size; /* size of the context */
-};
-#endif /* __INTEL_SST_FW_IPC_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ioctl.h b/drivers/staging/intel_sst/intel_sst_ioctl.h
deleted file mode 100644
index 5da5ee092c6..00000000000
--- a/drivers/staging/intel_sst/intel_sst_ioctl.h
+++ /dev/null
@@ -1,440 +0,0 @@
-#ifndef __INTEL_SST_IOCTL_H__
-#define __INTEL_SST_IOCTL_H__
-/*
- * intel_sst_ioctl.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all sst ioctls
- */
-
-/* codec and post/pre processing related info */
-
-#include <linux/types.h>
-
-enum sst_codec_types {
-/* AUDIO/MUSIC CODEC Type Definitions */
- SST_CODEC_TYPE_UNKNOWN = 0,
- SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
- SST_CODEC_TYPE_MP3,
- SST_CODEC_TYPE_MP24,
- SST_CODEC_TYPE_AAC,
- SST_CODEC_TYPE_AACP,
- SST_CODEC_TYPE_eAACP,
- SST_CODEC_TYPE_WMA9,
- SST_CODEC_TYPE_WMA10,
- SST_CODEC_TYPE_WMA10P,
- SST_CODEC_TYPE_RA,
- SST_CODEC_TYPE_DDAC3,
- SST_CODEC_TYPE_STEREO_TRUE_HD,
- SST_CODEC_TYPE_STEREO_HD_PLUS,
-
- /* VOICE CODEC Type Definitions */
- SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
-};
-
-enum sst_algo_types {
- SST_CODEC_SRC = 0x64,
- SST_CODEC_MIXER = 0x65,
- SST_CODEC_DOWN_MIXER = 0x66,
- SST_CODEC_VOLUME_CONTROL = 0x67,
- SST_CODEC_OEM1 = 0xC8,
- SST_CODEC_OEM2 = 0xC9,
-};
-
-enum snd_sst_stream_ops {
- STREAM_OPS_PLAYBACK = 0, /* Decode */
- STREAM_OPS_CAPTURE, /* Encode */
- STREAM_OPS_PLAYBACK_DRM, /* Play Audio/Voice */
- STREAM_OPS_PLAYBACK_ALERT, /* Play Audio/Voice */
- STREAM_OPS_CAPTURE_VOICE_CALL, /* CSV Voice recording */
-};
-
-enum stream_mode {
- SST_STREAM_MODE_NONE = 0,
- SST_STREAM_MODE_DNR = 1,
- SST_STREAM_MODE_FNF = 2,
- SST_STREAM_MODE_CAPTURE = 3
-};
-
-enum stream_type {
- SST_STREAM_TYPE_NONE = 0,
- SST_STREAM_TYPE_MUSIC = 1,
- SST_STREAM_TYPE_NORMAL = 2,
- SST_STREAM_TYPE_LONG_PB = 3,
- SST_STREAM_TYPE_LOW_LATENCY = 4,
-};
-
-enum snd_sst_audio_device_type {
- SND_SST_DEVICE_HEADSET = 1,
- SND_SST_DEVICE_IHF,
- SND_SST_DEVICE_VIBRA,
- SND_SST_DEVICE_HAPTIC,
- SND_SST_DEVICE_CAPTURE,
-};
-
-/* Firmware Version info */
-struct snd_sst_fw_version {
- __u8 build; /* build number*/
- __u8 minor; /* minor number*/
- __u8 major; /* major number*/
- __u8 type; /* build type */
-};
-
-/* Port info structure */
-struct snd_sst_port_info {
- __u16 port_type;
- __u16 reserved;
-};
-
-/* Mixer info structure */
-struct snd_sst_mix_info {
- __u16 max_streams;
- __u16 reserved;
-};
-
-/* PCM Parameters */
-struct snd_pcm_params {
- __u16 codec; /* codec type */
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 reserved; /* Bitrate in bits per second */
- __u32 sfreq; /* Sampling rate in Hz */
- __u32 ring_buffer_size;
- __u32 period_count; /* period elapsed in samples*/
- __u32 ring_buffer_addr;
-};
-
-/* MP3 Music Parameters Message */
-struct snd_mp3_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u8 crc_check; /* crc_check - disable (0) or enable (1) */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB*/
- __u16 reserved; /* Unused */
-};
-
-#define AAC_BIT_STREAM_ADTS 0
-#define AAC_BIT_STREAM_ADIF 1
-#define AAC_BIT_STREAM_RAW 2
-
-/* AAC Music Parameters Message */
-struct snd_aac_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo*/
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate;
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 aac_srate; /* Plain AAC decoder operating sample rate */
- __u8 mpg_id; /* 0=MPEG-2, 1=MPEG-4 */
- __u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
- __u8 aac_profile; /* 0=Main Profile, 1=LC profile, 3=SSR profile */
- __u8 ext_chl; /* No.of external channels */
- __u8 aot; /* Audio object type. 1=Main , 2=LC , 3=SSR, 4=SBR*/
- __u8 op_align; /* output alignment 0=16 bit , 1=MSB, 2= LSB align */
- __u8 brate_type; /* 0=CBR, 1=VBR */
- __u8 crc_check; /* crc check 0= disable, 1=enable */
- __s8 bit_stream_format[8]; /* input bit stream format adts/adif/raw */
- __u8 jstereo; /* Joint stereo Flag */
- __u8 sbr_present; /* 1 = SBR Present, 0 = SBR absent, for RAW */
- __u8 downsample; /* 1 = Downsampling ON, 0 = Downsampling OFF */
- __u8 num_syntc_elems; /* 1- Mono/stereo, 0 - Dual Mono, 0 - for raw */
- __s8 syntc_id[2]; /* 0 for ID_SCE(Dula Mono), -1 for raw */
- __s8 syntc_tag[2]; /* raw - -1 and 0 -16 for rest of the streams */
- __u8 pce_present; /* Flag. 1- present 0 - not present, for RAW */
- __u8 sbr_type; /* sbr_type: 0-plain aac, 1-aac-v1, 2-aac-v2 */
- __u8 outchmode; /*0- mono, 1-stereo, 2-dual mono 3-Parametric stereo */
- __u8 ps_present;
-};
-
-/* WMA Music Parameters Message */
-struct snd_wma_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 channel_mask; /* Channel Mask */
- __u16 format_tag; /* Format Tag */
- __u16 block_align; /* packet size */
- __u16 wma_encode_opt;/* Encoder option */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
- __u8 pcm_src; /* input pcm bit width */
-};
-
-/* Pre processing param structure */
-struct snd_prp_params {
- __u32 reserved; /* No pre-processing defined yet */
-};
-
-/* Pre and post processing params structure */
-struct snd_ppp_params {
- __u8 algo_id;/* Post/Pre processing algorithm ID */
- __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
- __u8 enable; /* 0= disable, 1= enable*/
- __u8 reserved;
- __u32 size; /*Size of parameters for all blocks*/
- void *params;
-} __attribute__ ((packed));
-
-struct snd_sst_postproc_info {
- __u32 src_min; /* Supported SRC Min sampling freq */
- __u32 src_max; /* Supported SRC Max sampling freq */
- __u8 src; /* 0=Not supported, 1=Supported */
- __u8 bass_boost; /* 0=Not Supported, 1=Supported */
- __u8 stereo_widening; /* 0=Not Supported, 1=Supported */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 mute_control; /* 0=No Mute, 1=Mute */
- __u8 reserved1;
- __u16 reserved2;
-};
-
-/* pre processing Capability info structure */
-struct snd_sst_prp_info {
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __u8 reserved1; /* for 32 bit alignment */
- __u16 reserved2; /* for 32 bit alignment */
-} __attribute__ ((packed));
-
-/*Pre / Post processing algorithms support*/
-struct snd_sst_ppp_info {
- __u32 src:1; /* 0=Not supported, 1=Supported */
- __u32 mixer:1; /* 0=Not supported, 1=Supported */
- __u32 volume_control:1; /* 0=Not Supported, 1=Supported */
- __u32 mute_control:1; /* 0=Not Supported, 1=Supported */
- __u32 anc:1; /* 0=Not Supported, 1=Supported */
- __u32 side_tone:1; /* 0=Not Supported, 1=Supported */
- __u32 dc_removal:1; /* 0=Not Supported, 1=Supported */
- __u32 equalizer:1; /* 0=Not Supported, 1=Supported */
- __u32 spkr_prot:1; /* 0=Not Supported, 1=Supported */
- __u32 bass_boost:1; /* 0=Not Supported, 1=Supported */
- __u32 stereo_widening:1;/* 0=Not Supported, 1=Supported */
- __u32 rsvd1:21;
- __u32 rsvd2;
-};
-
-/* Firmware capabilities info */
-struct snd_sst_fw_info {
- struct snd_sst_fw_version fw_version; /* Firmware version */
- __u8 audio_codecs_supported[8]; /* Codecs supported by FW */
- __u32 recommend_min_duration; /* Min duration for Lowpower Playback */
- __u8 max_pcm_streams_supported; /* Max num of PCM streams supported */
- __u8 max_enc_streams_supported; /* Max number of Encoded streams */
- __u16 reserved; /* 32 bit alignment*/
- struct snd_sst_ppp_info ppp_info; /* pre_processing mod cap info */
- struct snd_sst_postproc_info pop_info; /* Post processing cap info*/
- struct snd_sst_port_info port_info[3]; /* Port info */
- struct snd_sst_mix_info mix_info;/* Mixer info */
- __u32 min_input_buf; /* minmum i/p buffer for decode */
-};
-
-/* Codec params struture */
-union snd_sst_codec_params {
- struct snd_pcm_params pcm_params;
- struct snd_mp3_params mp3_params;
- struct snd_aac_params aac_params;
- struct snd_wma_params wma_params;
-};
-
-
-struct snd_sst_stream_params {
- union snd_sst_codec_params uc;
-} __attribute__ ((packed));
-
-struct snd_sst_params {
- __u32 result;
- __u32 stream_id;
- __u8 codec;
- __u8 ops;
- __u8 stream_type;
- __u8 device_type;
- struct snd_sst_stream_params sparams;
-};
-
-struct snd_sst_vol {
- __u32 stream_id;
- __s32 volume;
- __u32 ramp_duration;
- __u32 ramp_type; /* Ramp type, default=0 */
-};
-
-struct snd_sst_mute {
- __u32 stream_id;
- __u32 mute;
-};
-
-/* ioctl related stuff here */
-struct snd_sst_pmic_config {
- __u32 sfreq; /* Sampling rate in Hz */
- __u16 num_chan; /* Mono =1 or Stereo =2 */
- __u16 pcm_wd_sz; /* Number of bits per sample */
-} __attribute__ ((packed));
-
-struct snd_sst_get_stream_params {
- struct snd_sst_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-enum snd_sst_target_type {
- SND_SST_TARGET_PMIC = 1,
- SND_SST_TARGET_LPE,
- SND_SST_TARGET_MODEM,
- SND_SST_TARGET_BT,
- SND_SST_TARGET_FM,
- SND_SST_TARGET_NONE,
-};
-
-enum snd_sst_device_type {
- SND_SST_DEVICE_SSP = 1,
- SND_SST_DEVICE_PCM,
- SND_SST_DEVICE_OTHER,
-};
-
-enum snd_sst_device_mode {
-
- SND_SST_DEV_MODE_PCM_MODE1 = 1, /*(16-bit word, bit-length frame sync)*/
- SND_SST_DEV_MODE_PCM_MODE2,
- SND_SST_DEV_MODE_PCM_MODE3,
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_LEFT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_I2S, /*(I2S mode, 16-bit words)*/
- SND_SST_DEV_MODE_PCM_MODE5,
- SND_SST_DEV_MODE_PCM_MODE6,
-};
-
-enum snd_sst_port_action {
- SND_SST_PORT_PREPARE = 1,
- SND_SST_PORT_ACTIVATE,
-};
-
-/* Target selection per device structure */
-struct snd_sst_slot_info {
- __u8 mix_enable; /* Mixer enable or disable */
- __u8 device_type;
- __u8 device_instance; /* 0, 1, 2 */
- __u8 target_device;
- __u16 target_sink;
- __u8 slot[2];
- __u8 master;
- __u8 action;
- __u8 device_mode;
- __u8 reserved;
- struct snd_sst_pmic_config pcm_params;
-} __attribute__ ((packed));
-
-#define SST_MAX_TARGET_DEVICES 3
-/* Target device list structure */
-struct snd_sst_target_device {
- __u32 device_route;
- struct snd_sst_slot_info devices[SST_MAX_TARGET_DEVICES];
-} __attribute__ ((packed));
-
-struct snd_sst_driver_info {
- __u32 version; /* Version of the driver */
- __u32 active_pcm_streams;
- __u32 active_enc_streams;
- __u32 max_pcm_streams;
- __u32 max_enc_streams;
- __u32 buf_per_stream;
-};
-
-enum snd_sst_buff_type {
- SST_BUF_USER = 1,
- SST_BUF_MMAP,
- SST_BUF_RAR,
-};
-
-struct snd_sst_mmap_buff_entry {
- unsigned int offset;
- unsigned int size;
-};
-
-struct snd_sst_mmap_buffs {
- unsigned int entries;
- enum snd_sst_buff_type type;
- struct snd_sst_mmap_buff_entry *buff;
-};
-
-struct snd_sst_buff_entry {
- void *buffer;
- unsigned int size;
-};
-
-struct snd_sst_buffs {
- unsigned int entries;
- __u8 type;
- struct snd_sst_buff_entry *buff_entry;
-};
-
-struct snd_sst_dbufs {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buffs *obufs;
-};
-
-struct snd_sst_tuning_params {
- __u8 type;
- __u8 str_id;
- __u8 size;
- __u8 rsvd;
- __aligned_u64 addr;
-} __attribute__ ((packed));
-/*IOCTL defined here */
-/*SST MMF IOCTLS only */
-#define SNDRV_SST_STREAM_SET_PARAMS _IOR('L', 0x00, \
- struct snd_sst_stream_params *)
-#define SNDRV_SST_STREAM_GET_PARAMS _IOWR('L', 0x01, \
- struct snd_sst_get_stream_params *)
-#define SNDRV_SST_STREAM_GET_TSTAMP _IOWR('L', 0x02, __u64 *)
-#define SNDRV_SST_STREAM_DECODE _IOWR('L', 0x03, struct snd_sst_dbufs *)
-#define SNDRV_SST_STREAM_BYTES_DECODED _IOWR('L', 0x04, __u64 *)
-#define SNDRV_SST_STREAM_START _IO('A', 0x42)
-#define SNDRV_SST_STREAM_DROP _IO('A', 0x43)
-#define SNDRV_SST_STREAM_DRAIN _IO('A', 0x44)
-#define SNDRV_SST_STREAM_PAUSE _IOW('A', 0x45, int)
-#define SNDRV_SST_STREAM_RESUME _IO('A', 0x47)
-#define SNDRV_SST_MMAP_PLAY _IOW('L', 0x05, struct snd_sst_mmap_buffs *)
-#define SNDRV_SST_MMAP_CAPTURE _IOW('L', 0x06, struct snd_sst_mmap_buffs *)
-/*SST common ioctls */
-#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info *)
-#define SNDRV_SST_SET_VOL _IOW('L', 0x11, struct snd_sst_vol *)
-#define SNDRV_SST_GET_VOL _IOW('L', 0x12, struct snd_sst_vol *)
-#define SNDRV_SST_MUTE _IOW('L', 0x13, struct snd_sst_mute *)
-/*AM Ioctly only */
-#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
-#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
- struct snd_sst_target_device *)
-/*DSP Ioctls on /dev/intel_sst_ctrl only*/
-#define SNDRV_SST_SET_ALGO _IOW('L', 0x30, struct snd_ppp_params *)
-#define SNDRV_SST_GET_ALGO _IOWR('L', 0x31, struct snd_ppp_params *)
-#define SNDRV_SST_TUNING_PARAMS _IOW('L', 0x32, struct snd_sst_tuning_params *)
-
-#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ipc.c b/drivers/staging/intel_sst/intel_sst_ipc.c
deleted file mode 100644
index 5c3444f6ab4..00000000000
--- a/drivers/staging/intel_sst/intel_sst_ipc.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * intel_sst_ipc.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all ipc functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_send_sound_card_type - send sound card type
- *
- * this function sends the sound card type to sst dsp engine
- */
-static void sst_send_sound_card_type(void)
-{
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_PMIC_TYPE, 0, 0);
- msg->header.part.data = sst_drv_ctx->pmic_vendor;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-
-/**
-* sst_post_message - Posts message to SST
-*
-* @work: Pointer to work structure
-*
-* This function is called by any component in driver which
-* wants to send an IPC message. This will post message only if
-* busy bit is free
-*/
-void sst_post_message(struct work_struct *work)
-{
- struct ipc_post *msg;
- union ipc_header header;
- union interrupt_reg imr;
- int retval = 0;
- imr.full = 0;
-
- /*To check if LPE is in stalled state.*/
- retval = sst_stalled();
- if (retval < 0) {
- pr_err("in stalled state\n");
- return;
- }
- pr_debug("post message called\n");
- spin_lock(&sst_drv_ctx->list_spin_lock);
-
- /* check list */
- if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
- /* list is empty, mask imr */
- pr_debug("Empty msg queue... masking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
-
- /* check busy bit */
- header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
- if (header.part.busy) {
- /* busy, unmask */
- pr_debug("Busy not free... unmasking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 0;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
- /* copy msg from list */
- msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
- struct ipc_post, node);
- list_del(&msg->node);
- pr_debug("Post message: header = %x\n", msg->header.full);
- pr_debug("size: = %x\n", msg->header.part.data);
- if (msg->header.part.large)
- memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
- msg->mailbox_data, msg->header.part.data);
- /* dummy register for shim workaround */
-
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, msg->header.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
-
- kfree(msg->mailbox_data);
- kfree(msg);
- return;
-}
-
-/*
- * sst_clear_interrupt - clear the SST FW interrupt
- *
- * This function clears the interrupt register after the interrupt
- * bottom half is complete allowing next interrupt to arrive
- */
-void sst_clear_interrupt(void)
-{
- union interrupt_reg isr;
- union interrupt_reg imr;
- union ipc_header clear_ipc;
-
- imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
- isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
- /* write 1 to clear */;
- isr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- /* Set IA done bit */
- clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCD);
- clear_ipc.part.busy = 0;
- clear_ipc.part.done = 1;
- clear_ipc.part.data = IPC_ACK_SUCCESS;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
- /* un mask busy interrupt */
- imr.part.busy_interrupt = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
-}
-
-void sst_restore_fw_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- struct ipc_post *msg = NULL;
-
- pr_debug("restore_fw_context\n");
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (!sst_drv_ctx->fw_cntx_size)
- return;
- /*nothing to restore*/
- pr_debug("restoring context......\n");
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = sst_drv_ctx->fw_cntx_size;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-/*
- * process_fw_init - process the FW init msg
- *
- * @msg: IPC message from FW
- *
- * This function processes the FW init msg from FW
- * marks FW state and prints debug info of loaded FW
- */
-int process_fw_init(struct sst_ipc_msg_wq *msg)
-{
- struct ipc_header_fw_init *init =
- (struct ipc_header_fw_init *)msg->mailbox;
- int retval = 0;
-
- pr_debug("*** FW Init msg came***\n");
- if (init->result) {
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_ERROR;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Init failed, Error %x\n", init->result);
- pr_err("FW Init failed, Error %x\n", init->result);
- retval = -init->result;
- return retval;
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_send_sound_card_type();
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- sst_drv_ctx->lpe_stalled = 0;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Version %02x.%02x.%02x\n", init->fw_version.major,
- init->fw_version.minor, init->fw_version.build);
- pr_debug("Build Type %x\n", init->fw_version.type);
- pr_debug(" Build date %s Time %s\n",
- init->build_info.date, init->build_info.time);
- sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
- sst_restore_fw_context();
- return retval;
-}
-/**
-* sst_process_message - Processes message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a msg from process_queue and does action based on msg
-*/
-void sst_process_message(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
- int str_id = msg->header.part.str_id;
-
- pr_debug("IPC process for %x\n", msg->header.full);
-
- /* based on msg in list call respective handler */
- switch (msg->header.part.msg_id) {
- case IPC_SST_BUF_UNDER_RUN:
- case IPC_SST_BUF_OVER_RUN:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("Buffer under/overrun for %d\n",
- msg->header.part.str_id);
- pr_err("Got Underrun & not to send data...ignore\n");
- break;
-
- case IPC_SST_GET_PLAY_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream ;
-
- if (sst_validate_strid(str_id)) {
- pr_err("strid %d invalid\n", str_id);
- break;
- }
- /* call sst_play_frame */
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_play_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&sst_drv_ctx->streams[str_id].lock);
- sst_play_frame(msg->header.part.str_id);
- mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
- break;
- } else
- pr_err("sst_play_frames for Penwell!!\n");
-
- case IPC_SST_GET_CAPT_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream;
- /* call sst_capture_frame */
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_capture_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&stream->lock);
- if (stream->mmapped == false &&
- stream->src == SST_DRV) {
- pr_debug("waking up block for copy.\n");
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- } else
- sst_capture_frame(msg->header.part.str_id);
- mutex_unlock(&stream->lock);
- } else
- pr_err("sst_play_frames for Penwell!!\n");
- break;
-
- case IPC_IA_PRINT_STRING:
- pr_debug("been asked to print something by fw\n");
- /* TBD */
- break;
-
- case IPC_IA_FW_INIT_CMPLT: {
- /* send next data to FW */
- process_fw_init(msg);
- break;
- }
-
- case IPC_SST_STREAM_PROCESS_FATAL_ERR:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("codec fatal error %x stream %d...\n",
- msg->header.full, msg->header.part.str_id);
- pr_err("Dropping the stream\n");
- sst_drop_stream(msg->header.part.str_id);
- break;
- case IPC_IA_LPE_GETTING_STALLED:
- sst_drv_ctx->lpe_stalled = 1;
- break;
- case IPC_IA_LPE_UNSTALLED:
- sst_drv_ctx->lpe_stalled = 0;
- break;
- default:
- /* Illegal case */
- pr_err("Unhandled msg %x header %x\n",
- msg->header.part.msg_id, msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
-
-/**
-* sst_process_reply - Processes reply message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a reply msg from response_queue and
-* does action based on msg
-*/
-void sst_process_reply(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
-
- int str_id = msg->header.part.str_id;
- struct stream_info *str_info;
-
- switch (msg->header.part.msg_id) {
- case IPC_IA_TARGET_DEV_SELECT:
- if (!msg->header.part.data) {
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->tgt_dev_blk.ret_code =
- -msg->header.part.data;
- }
-
- if (sst_drv_ctx->tgt_dev_blk.on == true) {
- sst_drv_ctx->tgt_dev_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALG_PARAMS: {
- pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
- pr_debug("sst: data value %x\n", msg->header.part.data);
- pr_debug("sst: large value %x\n", msg->header.part.large);
-
- if (!msg->header.part.large) {
- if (!msg->header.part.data) {
- pr_debug("sst: alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- } else {
- pr_debug("sst: alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- }
-
- } else if (msg->header.part.data) {
- struct snd_ppp_params *mailbox_params, *get_params;
- char *params;
-
- pr_debug("sst: alg get success\n");
- mailbox_params = (struct snd_ppp_params *)msg->mailbox;
- get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
- if (get_params == NULL) {
- pr_err("sst: out of memory for ALG PARAMS");
- break;
- }
- memcpy_fromio(get_params, mailbox_params,
- sizeof(*get_params));
- get_params->params = kzalloc(mailbox_params->size,
- GFP_KERNEL);
- if (get_params->params == NULL) {
- kfree(get_params);
- pr_err("sst: out of memory for ALG PARAMS block");
- break;
- }
- params = msg->mailbox;
- params = params + sizeof(*mailbox_params) - sizeof(u32);
- memcpy_fromio(get_params->params, params,
- get_params->size);
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.data = get_params;
- }
-
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
-
- case IPC_IA_TUNING_PARAMS: {
- pr_debug("sst:IPC_TUNING_PARAMS resp: %x\n", msg->header.full);
- pr_debug("data value %x\n", msg->header.part.data);
- if (msg->header.part.large) {
- pr_debug("alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- } else {
- pr_debug("alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- }
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- }
-
- case IPC_IA_GET_FW_INFO: {
- struct snd_sst_fw_info *fw_info =
- (struct snd_sst_fw_info *)msg->mailbox;
- if (msg->header.part.large) {
- int major = fw_info->fw_version.major;
- int minor = fw_info->fw_version.minor;
- int build = fw_info->fw_version.build;
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
- major, minor, build);
- memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
- ((struct snd_sst_fw_info *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_info));
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->fw_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->fw_info_blk.on == true) {
- pr_debug("Memcopy succeeded\n");
- sst_drv_ctx->fw_info_blk.on = false;
- sst_drv_ctx->fw_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
- case IPC_IA_SET_STREAM_MUTE:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->mute_info_blk.ret_code =
- -msg->header.part.data;
-
- }
- if (sst_drv_ctx->mute_info_blk.on == true) {
- sst_drv_ctx->mute_info_blk.on = false;
- sst_drv_ctx->mute_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_SET_STREAM_VOL:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
-
- }
-
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_GET_STREAM_VOL:
- if (msg->header.part.large) {
- pr_debug("Large Msg Received Successfully\n");
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
- (void *) msg->mailbox,
- sizeof(struct snd_sst_vol));
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_GET_STREAM_PARAMS:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Get stream large success\n");
- memcpy_fromio(str_info->ctrl_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_get_stream_params));
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DECODE_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(str_info->data_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_decode_info));
- str_info->data_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->data_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DRAIN_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
-
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
-
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_DROP_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- struct snd_sst_drop_response *drop_resp =
- (struct snd_sst_drop_response *)msg->mailbox;
-
- pr_debug("Drop ret bytes %x\n", drop_resp->bytes);
-
- str_info->curr_bytes = drop_resp->bytes;
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ENABLE_RX_TIME_SLOT:
- if (!msg->header.part.data) {
- pr_debug("RX_TIME_SLOT success\n");
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->hs_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->hs_info_blk.on == true) {
- sst_drv_ctx->hs_info_blk.on = false;
- sst_drv_ctx->hs_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_PAUSE_STREAM:
- case IPC_IA_RESUME_STREAM:
- case IPC_IA_SET_STREAM_PARAMS:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (sst_validate_strid(str_id)) {
- pr_err(" stream id %d invalid\n", str_id);
- break;
- }
-
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_FREE_STREAM:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Stream %d freed\n", str_id);
- } else {
- pr_err("Free for %d ret error %x\n",
- str_id, msg->header.part.data);
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALLOC_STREAM: {
- /* map to stream, call play */
- struct snd_sst_alloc_response *resp =
- (struct snd_sst_alloc_response *)msg->mailbox;
- if (resp->str_type.result)
- pr_err("error alloc stream = %x\n",
- resp->str_type.result);
- sst_alloc_stream_response(str_id, resp);
- break;
- }
-
- case IPC_IA_PLAY_FRAMES:
- case IPC_IA_CAPT_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_debug("Ack for play/capt frames received\n");
- break;
-
- case IPC_IA_PREP_LIB_DNLD: {
- struct snd_sst_str_type *str_type =
- (struct snd_sst_str_type *)msg->mailbox;
- pr_debug("Prep Lib download %x\n",
- msg->header.part.msg_id);
- if (str_type->result)
- pr_err("Prep lib download %x\n", str_type->result);
- else
- pr_debug("Can download codec now...\n");
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- str_type->result, NULL);
- break;
- }
-
- case IPC_IA_LIB_DNLD_CMPLT: {
- struct snd_sst_lib_download_info *resp =
- (struct snd_sst_lib_download_info *)msg->mailbox;
- int retval = resp->result;
-
- pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
- if (resp->result) {
- pr_err("err in lib dload %x\n", resp->result);
- } else {
- pr_debug("Codec download complete...\n");
- pr_debug("codec Type %d Ver %d Built %s: %s\n",
- resp->dload_lib.lib_info.lib_type,
- resp->dload_lib.lib_info.lib_version,
- resp->dload_lib.lib_info.b_date,
- resp->dload_lib.lib_info.b_time);
- }
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- retval, NULL);
- break;
- }
-
- case IPC_IA_GET_FW_VERSION: {
- struct ipc_header_fw_init *version =
- (struct ipc_header_fw_init *)msg->mailbox;
- int major = version->fw_version.major;
- int minor = version->fw_version.minor;
- int build = version->fw_version.build;
- dev_info(&sst_drv_ctx->pci->dev,
- "INFO: ***LOADED SST FW VERSION*** = %02d.%02d.%02d\n",
- major, minor, build);
- break;
- }
- case IPC_IA_GET_FW_BUILD_INF: {
- struct sst_fw_build_info *build =
- (struct sst_fw_build_info *)msg->mailbox;
- pr_debug("Build date:%sTime:%s", build->date, build->time);
- break;
- }
- case IPC_IA_SET_PMIC_TYPE:
- break;
- case IPC_IA_START_STREAM:
- pr_debug("reply for START STREAM %x\n", msg->header.full);
- break;
-
- case IPC_IA_GET_FW_CTXT:
- pr_debug("reply for get fw ctxt %x\n", msg->header.full);
- if (msg->header.part.data)
- sst_drv_ctx->fw_cntx_size = 0;
- else
- sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
- pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
- sst_wake_up_alloc_block(
- sst_drv_ctx, str_id, msg->header.part.data, NULL);
- break;
- default:
- /* Illegal case */
- pr_err("process reply:default = %x\n", msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
diff --git a/drivers/staging/intel_sst/intel_sst_pvt.c b/drivers/staging/intel_sst/intel_sst_pvt.c
deleted file mode 100644
index e034bea56f1..00000000000
--- a/drivers/staging/intel_sst/intel_sst_pvt.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * intel_sst_pvt.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_get_block_stream - get a new block stream
- *
- * @sst_drv_ctx: Driver context structure
- *
- * This function assigns a block for the calls that dont have stream context yet
- * the blocks are used for waiting on Firmware's response for any operation
- * Should be called with stream lock held
- */
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
-{
- int i;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_drv_ctx->alloc_block[i].sst_id == BLOCK_UNINIT) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = 0;
- sst_drv_ctx->alloc_block[i].sst_id = 0;
- break;
- }
- }
- if (i == MAX_ACTIVE_STREAM) {
- pr_err("max alloc_stream reached\n");
- i = -EBUSY; /* active stream limit reached */
- }
- return i;
-}
-
-/*
- * sst_wait_interruptible - wait on event
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits without a timeout (and is interruptable) for a
- * given block event
- */
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block)
-{
- int retval = 0;
-
- if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
- block->condition)) {
- /* event wake */
- if (block->ret_code < 0) {
- pr_err("stream failed %d\n", block->ret_code);
- retval = -EBUSY;
- } else {
- pr_debug("event up\n");
- retval = 0;
- }
- } else {
- pr_err("signal interrupted\n");
- retval = -EINTR;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_interruptible_timeout - wait on event interruptable
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- * @timeout: time for wait on
- *
- * This function waits with a timeout value (and is interruptible) on a
- * given block event
- */
-int sst_wait_interruptible_timeout(
- struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout)
-{
- int retval = 0;
-
- pr_debug("sst_wait_interruptible_timeout - waiting....\n");
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->condition,
- msecs_to_jiffies(timeout))) {
- if (block->ret_code < 0)
- pr_err("stream failed %d\n", block->ret_code);
- else
- pr_debug("event up\n");
- retval = block->ret_code;
- } else {
- block->on = false;
- pr_err("timeout occurred...\n");
- /*setting firmware state as uninit so that the
- firmware will get re-downloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_timeout - wait on event for timeout
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits with a timeout value (and is not interruptible) on a
- * given block event
- */
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block)
-{
- int retval = 0;
-
- /* NOTE:
- Observed that FW processes the alloc msg and replies even
- before the alloc thread has finished execution */
- pr_debug("waiting for %x, condition %x\n",
- block->sst_id, block->ops_block.condition);
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->ops_block.condition,
- msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
- /* event wake */
- pr_debug("Event wake %x\n", block->ops_block.condition);
- pr_debug("message ret: %d\n", block->ops_block.ret_code);
- retval = block->ops_block.ret_code;
- } else {
- block->ops_block.on = false;
- pr_err("Wait timed-out %x\n", block->ops_block.condition);
- /* settign firmware state as uninit so that the
- firmware will get redownloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-/*
- * sst_create_large_msg - create a large IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a large message to the firmware
- */
-int sst_create_large_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
-
- msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
- if (!msg->mailbox_data) {
- kfree(msg);
- pr_err("kzalloc mailbox_data failed");
- return -ENOMEM;
- }
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_create_short_msg - create a short IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a short message to the firmware
- */
-int sst_create_short_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
- msg->mailbox_data = NULL;
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_clean_stream - clean the stream context
- *
- * @stream: stream structure
- *
- * this function resets the stream contexts
- * should be called in free
- */
-void sst_clean_stream(struct stream_info *stream)
-{
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- stream->status = STREAM_UN_INIT;
- stream->prev = STREAM_UN_INIT;
- mutex_lock(&stream->lock);
- list_for_each_entry_safe(bufs, _bufs, &stream->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&stream->lock);
-
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM)
- kfree(stream->decode_ibuf);
-}
-
-/*
- * sst_wake_up_alloc_block - wake up waiting block
- *
- * @sst_drv_ctx: Driver context
- * @sst_id: stream id
- * @status: status of wakeup
- * @data: data pointer of wakeup
- *
- * This function wakes up a sleeping block event based on the response
- */
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data)
-{
- int i;
-
- /* Unblock with retval code */
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_id == sst_drv_ctx->alloc_block[i].sst_id) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = true;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = status;
- sst_drv_ctx->alloc_block[i].ops_block.data = data;
- wake_up(&sst_drv_ctx->wait_queue);
- break;
- }
- }
-}
-
-/*
- * sst_enable_rx_timeslot - Send msg to query for stream parameters
- * @status: rx timeslot to be enabled
- *
- * This function is called when the RX timeslot is required to be enabled
- */
-int sst_enable_rx_timeslot(int status)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- pr_debug("ipc message sending: ENABLE_RX_TIME_SLOT\n");
- sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
- msg->header.part.data = status;
- sst_drv_ctx->hs_info_blk.condition = false;
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- sst_drv_ctx->hs_info_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->hs_info_blk, SST_BLOCK_TIMEOUT);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream.c b/drivers/staging/intel_sst/intel_sst_stream.c
deleted file mode 100644
index be4565e74f8..00000000000
--- a/drivers/staging/intel_sst/intel_sst_stream.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_check_device_type - Check the medfield device type
- *
- * @device: Device to be checked
- * @num_ch: Number of channels queried
- * @pcm_slot: slot to be enabled for this device
- *
- * This checks the deivce against the map and calculates pcm_slot value
- */
-int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
-{
- if (device >= MAX_NUM_STREAMS_MFLD) {
- pr_debug("device type invalid %d\n", device);
- return -EINVAL;
- }
- if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
- if (device == SND_SST_DEVICE_VIBRA && num_chan == 1)
- *pcm_slot = 0x10;
- else if (device == SND_SST_DEVICE_HAPTIC && num_chan == 1)
- *pcm_slot = 0x20;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 1)
- *pcm_slot = 0x04;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 2)
- *pcm_slot = 0x0C;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 3)
- *pcm_slot = 0x07;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
- *pcm_slot = 0x0F;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan > 4)
- *pcm_slot = 0x1F;
- else {
- pr_debug("No condition satisfied.. ret err\n");
- return -EINVAL;
- }
- } else {
- pr_debug("this stream state is not uni-init, is %d\n",
- sst_drv_ctx->streams[device].status);
- return -EBADRQC;
- }
- pr_debug("returning slot %x\n", *pcm_slot);
- return 0;
-}
-/**
- * get_mrst_stream_id - gets a new stream id for use
- *
- * This functions searches the current streams and allocated an empty stream
- * lock stream_lock required to be held before calling this
- */
-static unsigned int get_mrst_stream_id(void)
-{
- int i;
-
- for (i = 1; i <= MAX_NUM_STREAMS_MRST; i++) {
- if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
- return i;
- }
- pr_debug("Didn't find empty stream for mrst\n");
- return -EBUSY;
-}
-
-/**
- * sst_alloc_stream - Send msg for a new stream ID
- *
- * @params: stream params
- * @stream_ops: operation of stream PB/capture
- * @codec: codec for stream
- * @device: device stream to be allocated for
- *
- * This function is called by any function which wants to start
- * a new stream. This also check if a stream exists which is idle
- * it initializes idle stream id to this request
- */
-int sst_alloc_stream(char *params, unsigned int stream_ops,
- u8 codec, unsigned int device)
-{
- struct ipc_post *msg = NULL;
- struct snd_sst_alloc_params alloc_param;
- unsigned int pcm_slot = 0, num_ch;
- int str_id;
- struct snd_sst_stream_params *sparams;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:entering sst_alloc_stream\n");
- pr_debug("SST DBG:%d %d %d\n", stream_ops, codec, device);
-
- BUG_ON(!params);
- sparams = (struct snd_sst_stream_params *)params;
- num_ch = sparams->uc.pcm_params.num_chan;
- /*check the device type*/
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- if (sst_check_device_type(device, num_ch, &pcm_slot))
- return -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = device;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST_DBG: slot %x\n", pcm_slot);
- } else {
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = get_mrst_stream_id();
- mutex_unlock(&sst_drv_ctx->stream_lock);
- if (str_id <= 0)
- return -EBUSY;
- }
- /*allocate device type context*/
- sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
- str_id, stream_ops, pcm_slot, device);
- /* send msg to FW to allocate a stream */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
- msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
- alloc_param.str_type.codec_type = codec;
- alloc_param.str_type.str_type = SST_STREAM_TYPE_MUSIC;
- alloc_param.str_type.operation = stream_ops;
- alloc_param.str_type.protected_str = 0; /* non drm */
- alloc_param.str_type.time_slots = pcm_slot;
- alloc_param.str_type.result = alloc_param.str_type.reserved = 0;
- memcpy(&alloc_param.stream_params, params,
- sizeof(struct snd_sst_stream_params));
-
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
- sizeof(alloc_param));
- str_info = &sst_drv_ctx->streams[str_id];
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("SST DBG:alloc stream done\n");
- return str_id;
-}
-
-
-/*
- * sst_alloc_stream_response - process alloc reply
- *
- * @str_id: stream id for which the stream has been allocated
- * @resp the stream response from firware
- *
- * This function is called by firmware as a response to stream allcoation
- * request
- */
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *resp)
-{
- int retval = 0;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- pr_debug("SST DEBUG: stream number given = %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (resp->str_type.result == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
- lib_dnld = kzalloc(sizeof(*lib_dnld), GFP_KERNEL);
- memcpy(lib_dnld, &resp->lib_dnld, sizeof(*lib_dnld));
- } else
- lib_dnld = NULL;
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.data = lib_dnld;
- str_info->ctrl_blk.condition = true;
- str_info->ctrl_blk.ret_code = resp->str_type.result;
- pr_debug("SST DEBUG: sst_alloc_stream_response: waking up.\n");
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return retval;
-}
-
-
-/**
-* sst_get_fw_info - Send msg to query for firmware configurations
-* @info: out param that holds the firmare configurations
-*
-* This function is called when the firmware configurations are queiried for
-*/
-int sst_get_fw_info(struct snd_sst_fw_info *info)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("SST DBG:sst_get_fw_info called\n");
-
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_FW_INFO, 0, 0);
- sst_drv_ctx->fw_info_blk.condition = false;
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- sst_drv_ctx->fw_info_blk.on = true;
- sst_drv_ctx->fw_info_blk.data = info;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->fw_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("SST ERR: error in fw_info = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-
-/**
-* sst_pause_stream - Send msg for a pausing stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to pause
-* an already running stream.
-*/
-int sst_start_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("sst_start_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT)
- return -EBADRQC;
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_START_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return retval;
-}
-
-/*
- * sst_pause_stream - Send msg for a pausing stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to pause
- * an already running stream.
- */
-int sst_pause_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_PAUSED)
- return 0;
- if (str_info->status == STREAM_RUNNING ||
- str_info->status == STREAM_INIT) {
- if (str_info->prev == STREAM_UN_INIT)
- return -EBADRQC;
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path is in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval == 0) {
- str_info->prev = str_info->status;
- str_info->status = STREAM_PAUSED;
- } else if (retval == SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-/**
- * sst_resume_stream - Send msg for resuming stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to resume
- * an already paused stream.
- */
-int sst_resume_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_RUNNING)
- return 0;
- if (str_info->status == STREAM_PAUSED) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- if (str_info->prev == STREAM_RUNNING)
- str_info->status = STREAM_RUNNING;
- else
- str_info->status = STREAM_INIT;
- str_info->prev = STREAM_PAUSED;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
-/**
- * sst_drop_stream - Send msg for stopping stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to stop
- * a stream.
- */
-int sst_drop_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT &&
- str_info->status != STREAM_DECODE) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DROP_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- pr_debug("SST DBG:drop success\n");
- str_info->prev = STREAM_UN_INIT;
- str_info->status = STREAM_INIT;
- if (str_info->src != MAD_DRV) {
- mutex_lock(&str_info->lock);
- list_for_each_entry_safe(bufs, _bufs,
- &str_info->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&str_info->lock);
- }
- str_info->cumm_bytes += str_info->curr_bytes;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = retval;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_drain_stream - Send msg for draining stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to drain
-* a stream.
-*/
-int sst_drain_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_RUNNING &&
- str_info->status != STREAM_INIT &&
- str_info->status != STREAM_PAUSED) {
- pr_err("SST ERR: BADQRC for stream = %d\n",
- str_info->status);
- return -EBADRQC;
- }
-
- if (str_info->status == STREAM_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else
- str_info->need_draining = true;
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- str_info->need_draining = false;
- return retval;
-}
-
-/**
- * sst_free_stream - Frees a stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to free
- * a stream.
- */
-int sst_free_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- str_info->prev = str_info->status;
- str_info->status = STREAM_UN_INIT;
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = 0;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- str_info->data_blk.on = true;
- str_info->data_blk.condition = false;
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- pr_debug("wait for free returned %d\n", retval);
- msleep(100);
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST DBG:Stream freed\n");
- } else {
- retval = -EBADRQC;
- pr_debug("SST DBG:BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
deleted file mode 100644
index 2be58c5cba0..00000000000
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ /dev/null
@@ -1,1273 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../memrar/memrar.h"
-#endif
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-/**
-* sst_get_stream_params - Send msg to query for stream parameters
-* @str_id: stream id for which the parameters are queried for
-* @get_params: out parameters to which the parameters are copied to
-*
-* This function is called when the stream parameters are queiried for
-*/
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
- struct snd_sst_fw_get_stream_params *fw_params;
-
- pr_debug("get_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_UN_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
- if (!fw_params) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
- 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- str_info->ctrl_blk.data = (void *) fw_params;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- get_params->codec_params.result = retval;
- kfree(fw_params);
- return -EIO;
- }
- memcpy(&get_params->pcm_params, &fw_params->pcm_params,
- sizeof(fw_params->pcm_params));
- memcpy(&get_params->codec_params.sparams,
- &fw_params->codec_params,
- sizeof(fw_params->codec_params));
- get_params->codec_params.result = 0;
- get_params->codec_params.stream_id = str_id;
- get_params->codec_params.codec = str_info->codec;
- get_params->codec_params.ops = str_info->ops;
- get_params->codec_params.stream_type = str_info->str_type;
- kfree(fw_params);
- } else {
- pr_debug("Stream is not in the init state\n");
- }
- return retval;
-}
-
-/**
- * sst_set_stream_param - Send msg for setting stream parameters
- *
- * @str_id: stream id
- * @str_param: stream params
- *
- * This function sets stream params during runtime
- */
-int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- BUG_ON(!str_param);
- if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
- pr_err("Invalid operation\n");
- return -EINVAL;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- pr_debug("set_stream for %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EAGAIN;
- }
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_SET_STREAM_PARAMS, 1, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- msg->header.part.data = sizeof(u32) +
- sizeof(str_param->sparams);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
- sizeof(str_param->sparams));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval < 0) {
- retval = -EIO;
- sst_clean_stream(str_info);
- }
- } else {
- retval = -EBADRQC;
- pr_err("BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_get_vol - This function allows to get the premix gain or gain of a stream
-*
-* @get_vol: this is an output param through which the volume
-* structure is passed back to user
-*
-* This function is called when the premix gain or stream gain is queried for
-*/
-int sst_get_vol(struct snd_sst_vol *get_vol)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct snd_sst_vol *fw_get_vol;
- int str_id = get_vol->stream_id;
-
- pr_debug("get vol called\n");
-
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_GET_STREAM_VOL, 0, str_id);
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
- if (!fw_get_vol) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval)
- retval = -EIO;
- else {
- pr_debug("stream id %d\n", fw_get_vol->stream_id);
- pr_debug("volume %d\n", fw_get_vol->volume);
- pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
- pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
- memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
- }
- return retval;
-}
-
-/**
-* sst_set_vol - This function allows to set the premix gain or gain of a stream
-*
-* @set_vol: this holds the volume structure that needs to be set
-*
-* This function is called when premix gain or stream gain is requested to be set
-*/
-int sst_set_vol(struct snd_sst_vol *set_vol)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set vol called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
- set_vol->stream_id);
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- sst_drv_ctx->vol_info_blk.data = set_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_vol = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-/**
-* sst_set_mute - This function sets premix mute or soft mute of a stream
-*
-* @set_mute: this holds the mute structure that needs to be set
-*
-* This function is called when premix mute or stream mute requested to be set
-*/
-int sst_set_mute(struct snd_sst_mute *set_mute)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set mute called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
- set_mute->stream_id);
- sst_drv_ctx->mute_info_blk.condition = false;
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- sst_drv_ctx->mute_info_blk.on = true;
- sst_drv_ctx->mute_info_blk.data = set_mute;
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_mute,
- sizeof(*set_mute));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_mute = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-int sst_prepare_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC
- && slot->device_instance == 1) {
- /*music mode*/
- if (sst_drv_ctx->pmic_port_instance == 0)
- sst_drv_ctx->scard_ops->set_voice_port(
- DEACTIVATE);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- if (sst_drv_ctx->pmic_port_instance == 1)
- sst_drv_ctx->scard_ops->set_audio_port(
- DEACTIVATE);
- }
- return 0;
-}
-
-int sst_activate_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC &&
- slot->device_instance == 1) {
- /*music mode*/
- sst_drv_ctx->pmic_port_instance = 1;
- sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
- sst_drv_ctx->scard_ops->set_pcm_audio_params(
- slot->pcm_params.sfreq,
- slot->pcm_params.pcm_wd_sz,
- slot->pcm_params.num_chan);
- if (sst_drv_ctx->pb_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
- if (sst_drv_ctx->cp_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- sst_drv_ctx->pmic_port_instance = 0;
- sst_drv_ctx->scard_ops->set_voice_port(
- ACTIVATE);
- sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
- /*sst_drv_ctx->scard_ops->power_up_pmic_cp(0);*/
- }
- return 0;
-}
-
-int sst_parse_target(struct snd_sst_slot_info *slot)
-{
- int retval = 0;
-
- if (slot->action == SND_SST_PORT_ACTIVATE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_activate_target(slot);
- if (retval)
- pr_err("SST_Activate_target_fail\n");
- else
- pr_err("SST_Activate_target_pass\n");
- } else if (slot->action == SND_SST_PORT_PREPARE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_prepare_target(slot);
- if (retval)
- pr_err("SST_prepare_target_fail\n");
- else
- pr_err("SST_prepare_target_pass\n");
- } else {
- pr_err("slot_action : %d, device_type: %d\n",
- slot->action, slot->device_type);
- }
- return retval;
-}
-
-int sst_send_target(struct snd_sst_target_device *target)
-{
- int retval;
- struct ipc_post *msg;
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
- sst_drv_ctx->tgt_dev_blk.condition = false;
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- sst_drv_ctx->tgt_dev_blk.on = true;
-
- msg->header.part.data = sizeof(u32) + sizeof(*target);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), target,
- sizeof(*target));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("message sent- waiting\n");
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
- if (retval)
- pr_err("target device ipc failed = 0x%x\n", retval);
- return retval;
-
-}
-
-int sst_target_device_validate(struct snd_sst_target_device *target)
-{
- int retval = 0;
- int i;
-
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].device_type == SND_SST_DEVICE_PCM) {
- /*pcm device, check params*/
- if (target->devices[i].device_instance == 1) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S) &&
- (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- } else if (target->devices[i].device_instance == 0) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE2)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- if (target->devices[i].pcm_params.sfreq != 8000
- || target->devices[i].pcm_params.num_chan != 1
- || target->devices[i].pcm_params.pcm_wd_sz !=
- 16)
- goto err;
- } else {
-err:
- pr_err("i/p params incorrect\n");
- return -EINVAL;
- }
- }
- }
- return retval;
-}
-
-/**
- * sst_target_device_select - This function sets the target device configurations
- *
- * @target: this parameter holds the configurations to be set
- *
- * This function is called when the user layer wants to change the target
- * device's configurations
- */
-
-int sst_target_device_select(struct snd_sst_target_device *target)
-{
- int retval, i, prepare_count = 0;
-
- pr_debug("Target Device Select\n");
-
- if (target->device_route < 0 || target->device_route > 2) {
- pr_err("device route is invalid\n");
- return -EINVAL;
- }
-
- if (target->device_route != 0) {
- pr_err("Unsupported config\n");
- return -EIO;
- }
- retval = sst_target_device_validate(target);
- if (retval)
- return retval;
-
- retval = sst_send_target(target);
- if (retval)
- return retval;
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
- pr_debug("activate called in %d\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval)
- return retval;
- } else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
- pr_debug("PREPARE in %d, Forwarding\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval) {
- pr_err("Parse Target fail %d\n", retval);
- return retval;
- }
- pr_debug("Parse Target successful %d\n", retval);
- if (target->devices[i].device_type ==
- SND_SST_DEVICE_PCM)
- prepare_count++;
- }
- }
- if (target->devices[0].action == SND_SST_PORT_PREPARE &&
- prepare_count == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
-
- return retval;
-}
-#ifdef CONFIG_MRST_RAR_HANDLER
-/*This function gets the physical address of the secure memory from the handle*/
-static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
-{
- int retval = 0, rar_status = 0;
-
- rar_status = rar_handle_to_bus(buffers, count);
-
- if (count != rar_status) {
- pr_err("The rar CALL Failed");
- retval = -EIO;
- }
- if (buffers->info.type != RAR_TYPE_AUDIO) {
- pr_err("Invalid RAR type\n");
- return -EINVAL;
- }
- return retval;
-}
-
-#endif
-
-/* This function creates the scatter gather list to be sent to firmware to
-capture/playback data*/
-static int sst_create_sg_list(struct stream_info *stream,
- struct sst_frame_info *sg_list)
-{
- struct sst_stream_bufs *kbufs = NULL;
-#ifdef CONFIG_MRST_RAR_HANDLER
- struct RAR_buffer rar_buffers;
- int retval = 0;
-#endif
- int i = 0;
- list_for_each_entry(kbufs, &stream->bufs, node) {
- if (kbufs->in_use == false) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- pr_debug("DRM playback handling\n");
- rar_buffers.info.handle = (__u32)kbufs->addr;
- rar_buffers.info.size = kbufs->size;
- pr_debug("rar handle 0x%x size=0x%x\n",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
-
- if (retval)
- return retval;
- sg_list->addr[i].addr = rar_buffers.bus_address;
- /* rar_buffers.info.size; */
- sg_list->addr[i].size = (__u32)kbufs->size;
- pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
- , i, sg_list->addr[i].addr,
- sg_list->addr[i].size);
- }
-#endif
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM) {
- sg_list->addr[i].addr =
- virt_to_phys((void *)
- kbufs->addr + kbufs->offset);
- sg_list->addr[i].size = kbufs->size;
- pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
- , i , sg_list->addr[i].addr, kbufs->size);
- }
- stream->curr_bytes += sg_list->addr[i].size;
- kbufs->in_use = true;
- i++;
- }
- if (i >= MAX_NUM_SCATTER_BUFFERS)
- break;
- }
-
- sg_list->num_entries = i;
- pr_debug("sg list entries = %d\n", sg_list->num_entries);
- return i;
-}
-
-
-/**
- * sst_play_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to send data to be played out
- * to the firmware
- */
-int sst_play_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
- pr_debug("play frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- spin_lock(&stream->pcm_lock);
- list_del(&kbufs->node);
- spin_unlock(&stream->pcm_lock);
- kfree(kbufs);
- }
- }
- /* update bytes sent */
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer stream status %d\n", stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n", stream->status);
- if (stream->need_draining == true) {
- pr_debug("draining stream\n");
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
- 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return 0;
- }
-
- /* create list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return 0;
-
-}
-
-/**
- * sst_capture_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to capture data from the firmware
- */
-int sst_capture_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
-
- pr_debug("capture frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- list_del(&kbufs->node);
- kfree(kbufs);
- pr_debug("del node\n");
- }
- }
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer!!!!stream status %d\n",
- stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n",
- stream->status);
- if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
-
- }
- return 0;
- }
- /* create new sg list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
-
-
- /*update bytes recevied*/
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
-
- pr_debug("Cum bytes = %d\n", stream->cumm_bytes);
- return 0;
-}
-
-/*This function is used to calculate the minimum size of input buffers given*/
-static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
-{
- int i, min_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size < min_val)
- min_val = bufs->buff_entry[i].size;
- }
- pr_debug("min_val = %d\n", min_val);
- return min_val;
-}
-
-static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
-{
- int i, max_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size > max_val)
- max_val = bufs->buff_entry[i].size;
- }
- pr_debug("max_val = %d\n", max_val);
- return max_val;
-}
-
-/*This function is used to allocate input and output buffers to be sent to
-the firmware that will take encoded data and return decoded data*/
-static int sst_allocate_decode_buf(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- unsigned int cum_input_given,
- unsigned int cum_output_given)
-{
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
-
- if (dbufs->ibufs->type == SST_BUF_RAR &&
- dbufs->obufs->type == SST_BUF_RAR) {
- if (dbufs->ibufs->entries == dbufs->obufs->entries)
- return 0;
- else {
- pr_err("RAR entries dont match\n");
- return -EINVAL;
- }
- } else
- str_info->decode_osize = cum_output_given;
- return 0;
-
- }
-#endif
- if (!str_info->decode_ibuf) {
- pr_debug("no i/p buffers, trying full size\n");
- str_info->decode_isize = cum_input_given;
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try max size\n");
- str_info->decode_isize = calculate_max_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(
- str_info->decode_isize, GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try min size\n");
- str_info->decode_isize = calculate_min_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- if (!str_info->decode_ibuf) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- str_info->idecode_alloc = str_info->decode_isize;
- }
- str_info->decode_osize = cum_output_given;
- if (str_info->decode_osize > sst_drv_ctx->mmap_len)
- str_info->decode_osize = sst_drv_ctx->mmap_len;
- return 0;
-}
-
-/*This function is used to send the message to firmware to decode the data*/
-static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
- struct snd_sst_decode_info *dec_info)
-{
- struct ipc_post *msg = NULL;
- int retval = 0;
-
- pr_debug("SST DBG:sst_set_mute:called\n");
-
- if (str_info->decode_ibuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_in.addr[0].addr =
- (unsigned long)str_info->decode_ibuf;
- dec_info->frames_in.addr[0].size =
- str_info->decode_isize;
-#endif
-
- } else {
- dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
- str_info->decode_ibuf);
- dec_info->frames_in.addr[0].size = str_info->decode_isize;
- }
-
-
- if (str_info->decode_obuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_out.addr[0].addr =
- (unsigned long)str_info->decode_obuf;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
-#endif
-
- } else {
- dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
- str_info->decode_obuf) ;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
- }
-
- dec_info->frames_in.num_entries = 1;
- dec_info->frames_out.num_entries = 1;
- dec_info->frames_in.rsrvd = 0;
- dec_info->frames_out.rsrvd = 0;
- dec_info->input_bytes_consumed = 0;
- dec_info->output_bytes_produced = 0;
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), dec_info,
- sizeof(*dec_info));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- str_info->data_blk.data = dec_info;
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- return retval;
-}
-
-#ifdef CONFIG_MRST_RAR_HANDLER
-static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int retval = 0, i;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- struct RAR_buffer rar_buffers;
- __u32 info;
- retval = copy_from_user((void *) &info,
- dbufs->ibufs->buff_entry[i].buffer,
- sizeof(__u32));
- if (retval) {
- pr_err("cpy from user fail\n");
- return -EAGAIN;
- }
- rar_buffers.info.type = dbufs->ibufs->type;
- rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
- rar_buffers.info.handle = info;
- pr_debug("rar in DnR(input buffer function)=0x%x size=0x%x",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval) {
- pr_debug("SST ERR: RAR API failed\n");
- return retval;
- }
- str_info->decode_ibuf =
- (void *) ((unsigned long) rar_buffers.bus_address);
- pr_debug("RAR buf addr in DnR (input buffer function)0x%lu",
- (unsigned long) str_info->decode_ibuf);
- pr_debug("rar in DnR decode function/output b_add rar =0x%lu",
- (unsigned long) rar_buffers.bus_address);
- *input_index = i + 1;
- str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- *in_copied = str_info->decode_isize;
- }
- return retval;
-}
-#endif
-/*This function is used to prepare the kernel input buffers with contents
-before sending for decode*/
-static int sst_prepare_input_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int i, cpy_size, retval = 0;
-
- pr_debug("input_index = %d, input entries = %d\n",
- *input_index, dbufs->ibufs->entries);
- for (i = *input_index; i < dbufs->ibufs->entries; i++) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- retval = sst_prepare_input_buffers_rar(str_info,
- dbufs, input_index, in_copied,
- input_index_valid_size, new_entry_flag);
- if (retval) {
- pr_err("In prepare input buffers for RAR\n");
- return -EIO;
- }
-#endif
- *input_index = i;
- if (*input_index_valid_size == 0)
- *input_index_valid_size =
- dbufs->ibufs->buff_entry[i].size;
- pr_debug("inout addr = %p, size = %d\n",
- dbufs->ibufs->buff_entry[i].buffer,
- *input_index_valid_size);
- pr_debug("decode_isize = %d, in_copied %d\n",
- str_info->decode_isize, *in_copied);
- if (*input_index_valid_size <=
- (str_info->decode_isize - *in_copied))
- cpy_size = *input_index_valid_size;
- else
- cpy_size = str_info->decode_isize - *in_copied;
-
- pr_debug("cpy size = %d\n", cpy_size);
- if (!dbufs->ibufs->buff_entry[i].buffer) {
- pr_err("i/p buffer is null\n");
- return -EINVAL;
- }
- pr_debug("Try copy To %p, From %p, size %d\n",
- str_info->decode_ibuf + *in_copied,
- dbufs->ibufs->buff_entry[i].buffer, cpy_size);
-
- retval =
- copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
- (void *) dbufs->ibufs->buff_entry[i].buffer,
- cpy_size);
- if (retval) {
- pr_err("copy from user failed\n");
- return -EIO;
- }
- *in_copied += cpy_size;
- *input_index_valid_size -= cpy_size;
- pr_debug("in buff size = %d, in_copied = %d\n",
- *input_index_valid_size, *in_copied);
- if (*input_index_valid_size != 0) {
- pr_debug("more input buffers left\n");
- dbufs->ibufs->buff_entry[i].buffer += cpy_size;
- break;
- }
- if (*in_copied == str_info->decode_isize &&
- *input_index_valid_size == 0 &&
- (i+1) <= dbufs->ibufs->entries) {
- pr_debug("all input buffers copied\n");
- *new_entry_flag = true;
- *input_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/* This function is used to copy the decoded data from kernel buffers to
-the user output buffers with contents after decode*/
-static int sst_prepare_output_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *output_index, int output_size,
- int *out_copied)
-
-{
- int i, cpy_size, retval = 0;
- pr_debug("output_index = %d, output entries = %d\n",
- *output_index,
- dbufs->obufs->entries);
- for (i = *output_index; i < dbufs->obufs->entries; i++) {
- *output_index = i;
- pr_debug("output addr = %p, size = %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- dbufs->obufs->buff_entry[i].size);
- pr_debug("output_size = %d, out_copied = %d\n",
- output_size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size <
- (output_size - *out_copied))
- cpy_size = dbufs->obufs->buff_entry[i].size;
- else
- cpy_size = output_size - *out_copied;
- pr_debug("cpy size = %d\n", cpy_size);
- pr_debug("Try copy To: %p, From %p, size %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- if (retval) {
- pr_err("copy to user failed\n");
- return -EIO;
- } else
- pr_debug("copy to user passed\n");
- *out_copied += cpy_size;
- dbufs->obufs->buff_entry[i].size -= cpy_size;
- pr_debug("o/p buff size %d, out_copied %d\n",
- dbufs->obufs->buff_entry[i].size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size != 0) {
- *output_index = i;
- dbufs->obufs->buff_entry[i].buffer += cpy_size;
- break;
- } else if (*out_copied == output_size) {
- *output_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/**
- * sst_decode - Send msg for decoding frames
- *
- * @str_id: ID of stream
- * @dbufs: param that holds the user input and output buffers and size
- *
- * This function is called to decode data from the firmware
- */
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
-{
- int retval = 0, i;
- unsigned long long total_input = 0 , total_output = 0;
- unsigned int cum_input_given = 0 , cum_output_given = 0;
- int copy_in_done = false, copy_out_done = false;
- int input_index = 0, output_index = 0;
- int input_index_valid_size = 0;
- int in_copied, out_copied;
- int new_entry_flag;
- u64 output_size;
- struct stream_info *str_info;
- struct snd_sst_decode_info dec_info;
- unsigned long long input_bytes, output_bytes;
-
- sst_drv_ctx->scard_ops->power_down_pmic();
- pr_debug("Powering_down_PMIC...\n");
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT) {
- pr_err("invalid stream state = %d\n",
- str_info->status);
- return -EINVAL;
- }
-
- str_info->prev = str_info->status;
- str_info->status = STREAM_DECODE;
-
- for (i = 0; i < dbufs->ibufs->entries; i++)
- cum_input_given += dbufs->ibufs->buff_entry[i].size;
- for (i = 0; i < dbufs->obufs->entries; i++)
- cum_output_given += dbufs->obufs->buff_entry[i].size;
-
- /* input and output buffer allocation */
- retval = sst_allocate_decode_buf(str_info, dbufs,
- cum_input_given, cum_output_given);
- if (retval) {
- pr_err("mem allocation failed, abort!!!\n");
- retval = -ENOMEM;
- goto finish;
- }
-
- str_info->decode_isize = str_info->idecode_alloc;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- str_info->decode_obuf_type = dbufs->obufs->type;
-
- while ((copy_out_done == false) && (copy_in_done == false)) {
- in_copied = 0;
- new_entry_flag = false;
- retval = sst_prepare_input_buffers(str_info,\
- dbufs, &input_index, &in_copied,
- &input_index_valid_size, &new_entry_flag);
- if (retval) {
- pr_err("prepare in buffers failed\n");
- goto finish;
- }
-
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- else {
- if (dbufs->obufs->type == SST_BUF_RAR) {
- struct RAR_buffer rar_buffers;
- __u32 info;
-
- pr_debug("DRM");
- retval = copy_from_user((void *) &info,
- dbufs->obufs->
- buff_entry[output_index].buffer,
- sizeof(__u32));
-
- rar_buffers.info.size = dbufs->obufs->
- buff_entry[output_index].size;
- rar_buffers.info.handle = info;
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval)
- return retval;
-
- str_info->decode_obuf = (void *)((unsigned long)
- rar_buffers.bus_address);
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
- str_info->decode_obuf_type = dbufs->obufs->type;
- pr_debug("DRM handling\n");
- pr_debug("o/p_add=0x%lu Size=0x%x\n",
- (unsigned long) str_info->decode_obuf,
- str_info->decode_osize);
- } else {
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
-
- }
- }
-#endif
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (str_info->decode_isize > in_copied) {
- str_info->decode_isize = in_copied;
- pr_debug("i/p size = %d\n",
- str_info->decode_isize);
- }
- }
-
-
- retval = sst_send_decode_mess(str_id, str_info, &dec_info);
- if (retval || dec_info.input_bytes_consumed == 0) {
- pr_err("SST ERR: mess failed or no input consumed\n");
- goto finish;
- }
- input_bytes = dec_info.input_bytes_consumed;
- output_bytes = dec_info.output_bytes_produced;
-
- pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
- in_copied, input_bytes, output_bytes);
- if (dbufs->obufs->type == SST_BUF_RAR) {
- output_index += 1;
- if (output_index == dbufs->obufs->entries) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- total_output += output_bytes;
- } else {
- out_copied = 0;
- output_size = output_bytes;
- retval = sst_prepare_output_buffers(str_info, dbufs,
- &output_index, output_size, &out_copied);
- if (retval) {
- pr_err("prep out buff fail\n");
- goto finish;
- }
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (in_copied != input_bytes) {
- int bytes_left = in_copied -
- input_bytes;
- pr_debug("bytes %d\n",
- bytes_left);
- if (new_entry_flag == true)
- input_index--;
- while (bytes_left) {
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buff_entry
- *buff_entry;
- unsigned int size_sent;
-
- ibufs = dbufs->ibufs;
- buff_entry =
- &ibufs->buff_entry[input_index];
- size_sent = buff_entry->size -\
- input_index_valid_size;
- if (bytes_left == size_sent) {
- bytes_left = 0;
- } else if (bytes_left <
- size_sent) {
- buff_entry->buffer +=
- (size_sent -
- bytes_left);
- buff_entry->size -=
- (size_sent -
- bytes_left);
- bytes_left = 0;
- } else {
- bytes_left -= size_sent;
- input_index--;
- input_index_valid_size =
- 0;
- }
- }
-
- }
- }
-
- total_output += out_copied;
- if (str_info->decode_osize != out_copied) {
- str_info->decode_osize -= out_copied;
- pr_debug("output size modified = %d\n",
- str_info->decode_osize);
- }
- }
- total_input += input_bytes;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (total_input == cum_input_given)
- copy_in_done = true;
- copy_out_done = true;
-
- } else {
- if (total_output == cum_output_given) {
- copy_out_done = true;
- pr_debug("all o/p cpy done\n");
- }
-
- if (total_input == cum_input_given) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- }
-
- pr_debug("copy_out = %d, copy_in = %d\n",
- copy_out_done, copy_in_done);
- }
-
-finish:
- dbufs->input_bytes_consumed = total_input;
- dbufs->output_bytes_produced = total_output;
- str_info->status = str_info->prev;
- str_info->prev = STREAM_DECODE;
- kfree(str_info->decode_ibuf);
- str_info->decode_ibuf = NULL;
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
deleted file mode 100644
index 492b660246b..00000000000
--- a/drivers/staging/intel_sst/intelmid.c
+++ /dev/null
@@ -1,1022 +0,0 @@
-/*
- * intelmid.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/input.h>
-#include <sound/control.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/jack.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <linux/gpio.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-#include "intelmid_snd_control.h"
-#include "intelmid_adc_control.h"
-#include "intelmid.h"
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel MAD Sound card driver");
-MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
-
-
-static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
-static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
-
-module_param(card_index, int, 0444);
-MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
-module_param(card_id, charp, 0444);
-MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
-
-int sst_card_vendor_id;
-int intelmid_audio_interrupt_enable;/*checkpatch fix*/
-struct snd_intelmad *intelmad_drv;
-
-#define INFO(_cpu_id, _irq_cache, _size) \
- ((kernel_ulong_t)&(struct snd_intelmad_probe_info) { \
- .cpu_id = (_cpu_id), \
- .irq_cache = (_irq_cache), \
- .size = (_size), \
- })
-/* Data path functionalities */
-static struct snd_pcm_hardware snd_intelmad_stream = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_DOUBLE |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME |
- SNDRV_PCM_INFO_MMAP|
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_SYNC_START),
- .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
- SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
- SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
- .rates = (SNDRV_PCM_RATE_8000|
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000),
- .rate_min = MIN_RATE,
-
- .rate_max = MAX_RATE,
- .channels_min = MIN_CHANNEL,
- .channels_max = MAX_CHANNEL_AMIC,
- .buffer_bytes_max = MAX_BUFFER,
- .period_bytes_min = MIN_PERIOD_BYTES,
- .period_bytes_max = MAX_PERIOD_BYTES,
- .periods_min = MIN_PERIODS,
- .periods_max = MAX_PERIODS,
- .fifo_size = FIFO_SIZE,
-};
-
-
-/**
- * snd_intelmad_pcm_trigger - stream activities are handled here
- *
- * @substream:substream for which the stream function is called
- * @cmd:the stream commamd that requested from upper layer
- *
- * This function is called whenever an a stream activity is invoked
- */
-static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- int ret_val = 0, str_id;
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- struct intel_sst_pcm_control *sst_ops;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
- WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
- sst_ops = intelmaddata->sstdrv_ops->pcm_control;
- str_id = stream->stream_info.str_id;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pr_debug("Trigger Start\n");
- ret_val = sst_ops->device_control(SST_SND_START, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- stream->substream = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("in stop\n");
- ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = DROPPED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("in pause\n");
- ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = PAUSED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("in pause release\n");
- ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- break;
- default:
- return -EINVAL;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_pcm_prepare- internal preparation before starting a stream
-*
-* @substream: substream for which the function is called
-*
-* This function is called when a stream is started for internal preparation.
-*/
-static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream;
- int ret_val = 0;
- struct snd_intelmad *intelmaddata;
-
- pr_debug("pcm_prepare called\n");
-
- WARN_ON(!substream);
- stream = substream->runtime->private_data;
- intelmaddata = snd_pcm_substream_chip(substream);
- pr_debug("pb cnt = %d cap cnt = %d\n",\
- intelmaddata->playback_cnt,
- intelmaddata->capture_cnt);
-
- if (stream->stream_info.str_id) {
- pr_debug("Prepare called for already set stream\n");
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_DROP, &stream->stream_info.str_id);
- return ret_val;
- }
-
- ret_val = snd_intelmad_alloc_stream(substream);
- if (ret_val < 0)
- return ret_val;
- stream->dbg_cum_bytes = 0;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt++;
- else
- intelmaddata->capture_cnt++;
- /* return back the stream id */
- snprintf(substream->pcm->id, sizeof(substream->pcm->id),
- "%d", stream->stream_info.str_id);
- pr_debug("stream id to user = %s\n",
- substream->pcm->id);
-
- ret_val = snd_intelmad_init_stream(substream);
- if (ret_val)
- return ret_val;
- substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
- return ret_val;
-}
-
-static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret_val;
-
- pr_debug("snd_intelmad_hw_params called\n");
- ret_val = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- memset(substream->runtime->dma_area, 0,
- params_buffer_bytes(hw_params));
-
- return ret_val;
-}
-
-static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
-{
- pr_debug("snd_intelmad_hw_free called\n");
- return snd_pcm_lib_free_pages(substream);
-}
-
-/**
- * snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework to get the current hw buffer ptr
- * when a period is elapsed
- */
-static snd_pcm_uframes_t snd_intelmad_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- /* struct snd_pcm_runtime *runtime = substream->runtime; */
- struct mad_stream_pvt *stream;
- struct snd_intelmad *intelmaddata;
- int ret_val;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
- if (stream->stream_status == INIT)
- return 0;
-
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_BUFFER_POINTER, &stream->stream_info);
- if (ret_val) {
- pr_err("error code = 0x%x\n", ret_val);
- return ret_val;
- }
- pr_debug("samples reported out 0x%llx\n",
- stream->stream_info.buffer_ptr);
- pr_debug("Frame bits:: %d period_count :: %d\n",
- (int)substream->runtime->frame_bits,
- (int)substream->runtime->period_size);
-
- return stream->stream_info.buffer_ptr;
-
-}
-
-/**
- * snd_intelmad_close- to free parameteres when stream is stopped
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework when stream is stopped
- */
-static int snd_intelmad_close(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- int ret_val = 0, str_id;
-
- WARN_ON(!substream);
-
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
-
- pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
- intelmaddata = snd_pcm_substream_chip(substream);
-
- pr_debug("str id = %d\n", stream->stream_info.str_id);
- if (stream->stream_info.str_id) {
- /* SST API to actually stop/free the stream */
- ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt--;
- else
- intelmaddata->capture_cnt--;
- }
- pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
- intelmaddata->playback_cnt, intelmaddata->capture_cnt);
- kfree(substream->runtime->private_data);
- return ret_val;
-}
-
-/**
- * snd_intelmad_open- to set runtime parameters during stream start
- *
- * @substream: substream for which the function is called
- * @type: audio device type
- *
- * This function is called by ALSA framework when stream is started
- */
-static int snd_intelmad_open(struct snd_pcm_substream *substream,
- enum snd_sst_audio_device_type type)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pcm_runtime *runtime;
- struct mad_stream_pvt *stream;
-
- WARN_ON(!substream);
-
- pr_debug("snd_intelmad_open called\n");
-
- intelmaddata = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
- /* set the runtime hw parameter with local snd_pcm_hardware struct */
- runtime->hw = snd_intelmad_stream;
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- /*
- * MRST firmware currently denies stereo recording requests.
- */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S16 |
- SNDRV_PCM_FMTBIT_U16);
- runtime->hw.channels_max = 1;
- }
- }
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- runtime->hw = snd_intelmad_stream;
- runtime->hw.rates = SNDRV_PCM_RATE_48000;
- runtime->hw.rate_min = MAX_RATE;
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 |
- SNDRV_PCM_FMTBIT_U24);
- if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC)
- runtime->hw.channels_max = MAX_CHANNEL_AMIC;
- else
- runtime->hw.channels_max = MAX_CHANNEL_DMIC;
-
- }
- /* setup the internal datastruture stream pointers based on it being
- playback or capture stream */
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
- stream->stream_info.str_id = 0;
- stream->device = type;
- stream->stream_status = INIT;
- runtime->private_data = stream;
- return snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
-}
-
-static int snd_intelmad_headset_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET);
-}
-
-static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_IHF);
-}
-
-static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA);
-}
-
-static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC);
-}
-
-static struct snd_pcm_ops snd_intelmad_headset_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_ihf_ops = {
- .open = snd_intelmad_ihf_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_vibra_ops = {
- .open = snd_intelmad_vibra_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_haptic_ops = {
- .open = snd_intelmad_haptic_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_capture_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-int intelmad_get_mic_bias(void)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_get_mic_bias)
- return pmic_ops->pmic_get_mic_bias(intelmad_drv);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_get_mic_bias);
-
-int intelmad_set_headset_state(int state)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_set_headset_state)
- return pmic_ops->pmic_set_headset_state(state);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_set_headset_state);
-
-void sst_process_mad_jack_detection(struct work_struct *work)
-{
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_detect =
- container_of(work, struct mad_jack_msg_wq, wq);
-
- struct snd_intelmad *intelmaddata =
- mad_jack_detect->intelmaddata;
-
- if (!intelmaddata)
- return;
-
- interrupt_status = mad_jack_detect->intsts;
- if (intelmaddata->sstdrv_ops && intelmaddata->sstdrv_ops->scard_ops
- && intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb) {
- intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb(
- (void *)intelmaddata, interrupt_status);
- intelmaddata->sstdrv_ops->scard_ops->pmic_jack_enable();
- }
- kfree(mad_jack_detect);
-}
-/**
- * snd_intelmad_intr_handler- interrupt handler
- *
- * @irq : irq number of the interrupt received
- * @dev: device context
- *
- * This function is called when an interrupt is raised at the sound card
- */
-static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
-{
- struct snd_intelmad *intelmaddata =
- (struct snd_intelmad *)dev;
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_msg;
- memcpy_fromio(&interrupt_status,
- ((void *)(intelmaddata->int_base)),
- sizeof(u8));
-
- mad_jack_msg = kzalloc(sizeof(*mad_jack_msg), GFP_ATOMIC);
- mad_jack_msg->intsts = interrupt_status;
- mad_jack_msg->intelmaddata = intelmaddata;
- INIT_WORK(&mad_jack_msg->wq, sst_process_mad_jack_detection);
- queue_work(intelmaddata->mad_jack_wq, &mad_jack_msg->wq);
-
- return IRQ_HANDLED;
-}
-
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status)
-{
-
- if (!jack) {
- pr_debug("MAD error jack empty\n");
-
- } else {
- snd_jack_report(jack, status);
- /* button pressed and released */
- if (buttonpressevent)
- snd_jack_report(jack, 0);
- pr_debug("MAD sending jack report Done !!!\n");
- }
-}
-
-static int __devinit snd_intelmad_register_irq(
- struct snd_intelmad *intelmaddata, unsigned int regbase,
- unsigned int regsize)
-{
- int ret_val;
- char *drv_name;
-
- pr_debug("irq reg regbase 0x%x, regsize 0x%x\n",
- regbase, regsize);
- intelmaddata->int_base = ioremap_nocache(regbase, regsize);
- if (!intelmaddata->int_base)
- pr_err("Mapping of cache failed\n");
- pr_debug("irq = 0x%x\n", intelmaddata->irq);
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- drv_name = DRIVER_NAME_MFLD;
- else
- drv_name = DRIVER_NAME_MRST;
- ret_val = request_irq(intelmaddata->irq,
- snd_intelmad_intr_handler,
- IRQF_SHARED, drv_name,
- intelmaddata);
- if (ret_val)
- pr_err("cannot register IRQ\n");
- return ret_val;
-}
-
-static int __devinit snd_intelmad_sst_register(
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
- struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
- &snd_pmic_ops_fs,
- &snd_pmic_ops_mx,
- &snd_pmic_ops_nc,
- &snd_msic_ops
- };
-
- struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00};
-
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1);
- if (ret_val)
- return ret_val;
- sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
- pr_debug("original n extrated vendor id = 0x%x %d\n",
- vendor_addr.value, sst_card_vendor_id);
- if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
- pr_err("vendor card not supported!!\n");
- return -EIO;
- }
- } else
- sst_card_vendor_id = 0x3;
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
- BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]);
- intelmaddata->sstdrv_ops->scard_ops =
- intelmad_vendor_ops[sst_card_vendor_id];
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- intelmaddata->sstdrv_ops->scard_ops->pb_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->cap_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC;
- intelmaddata->sstdrv_ops->scard_ops->output_dev_id =
- STEREO_HEADPHONE;
- intelmaddata->sstdrv_ops->scard_ops->lineout_dev_id = NONE;
- }
-
- /* registering with SST driver to get access to SST APIs to use */
- ret_val = register_sst_card(intelmaddata->sstdrv_ops);
- if (ret_val) {
- pr_err("sst card registration failed\n");
- return ret_val;
- }
- sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
- intelmaddata->pmic_status = PMIC_UNINIT;
- return ret_val;
-}
-
-static void snd_intelmad_page_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-/* Driver Init/exit functionalities */
-/**
- * snd_intelmad_pcm_new - to setup pcm for the card
- *
- * @card: pointer to the sound card structure
- * @intelmaddata: pointer to internal context
- * @pb: playback count for this card
- * @cap: capture count for this card
- * @index: device index
- *
- * This function is called from probe function to set up pcm params
- * and functions
- */
-static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
- struct snd_intelmad *intelmaddata,
- unsigned int pb, unsigned int cap, unsigned int index)
-{
- int ret_val = 0;
- struct snd_pcm *pcm;
- char name[32] = INTEL_MAD;
- struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
-
- pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
- ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
- if (ret_val)
- return ret_val;
- /* setup the ops for playback and capture streams */
- switch (index) {
- case 0:
- pb_ops = &snd_intelmad_headset_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 1:
- pb_ops = &snd_intelmad_ihf_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 2:
- pb_ops = &snd_intelmad_vibra_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 3:
- pb_ops = &snd_intelmad_haptic_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- }
- if (pb)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops);
- if (cap)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops);
- /* setup private data which can be retrieved when required */
- pcm->private_data = intelmaddata;
- pcm->private_free = snd_intelmad_page_free;
- pcm->info_flags = 0;
- strncpy(pcm->name, card->shortname, strlen(card->shortname));
- /* allocate dma pages for ALSA stream operations */
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- MIN_BUFFER, MAX_BUFFER);
- return ret_val;
-}
-
-static int __devinit snd_intelmad_pcm(struct snd_card *card,
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
-
- WARN_ON(!card);
- WARN_ON(!intelmaddata);
- pr_debug("snd_intelmad_pcm called\n");
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1);
- if (ret_val)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2);
- if (ret_val)
- return ret_val;
- return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3);
-}
-
-/**
- * snd_intelmad_jack- to setup jack settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called send jack events
- */
-static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
-{
- struct snd_jack *jack;
- int retval;
-
- pr_debug("snd_intelmad_jack called\n");
- jack = &intelmaddata->jack[0].jack;
- snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PHONE);
- retval = snd_jack_new(intelmaddata->card, "Intel(R) MID Audio Jack",
- SND_JACK_HEADPHONE | SND_JACK_HEADSET |
- SW_JACK_PHYSICAL_INSERT | SND_JACK_BTN_0
- | SND_JACK_BTN_1, &jack);
- pr_debug("snd_intelmad_jack called\n");
- if (retval < 0)
- return retval;
- snd_jack_report(jack, 0);
-
- jack->private_data = jack;
- intelmaddata->jack[0].jack = *jack;
-
- return retval;
-}
-
-/**
- * snd_intelmad_mixer- to setup mixer settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called from probe function to set up mixer controls
- */
-static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
-{
- struct snd_card *card;
- unsigned int idx;
- int ret_val = 0, max_controls = 0;
- char *mixername = "IntelMAD Controls";
- struct snd_kcontrol_new *controls;
-
- WARN_ON(!intelmaddata);
-
- card = intelmaddata->card;
- strncpy(card->mixername, mixername, sizeof(card->mixername)-1);
- /* add all widget controls and expose the same */
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- max_controls = MAX_CTRL_MFLD;
- controls = snd_intelmad_controls_mfld;
- } else {
- max_controls = MAX_CTRL_MRST;
- controls = snd_intelmad_controls_mrst;
- }
- for (idx = 0; idx < max_controls; idx++) {
- ret_val = snd_ctl_add(card,
- snd_ctl_new1(&controls[idx],
- intelmaddata));
- pr_debug("mixer[idx]=%d added\n", idx);
- if (ret_val) {
- pr_err("in adding of control index = %d\n", idx);
- break;
- }
- }
- return ret_val;
-}
-
-static int snd_intelmad_dev_free(struct snd_device *device)
-{
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!device);
-
- intelmaddata = device->device_data;
-
- pr_debug("snd_intelmad_dev_free called\n");
- unregister_sst_card(intelmaddata->sstdrv_ops);
-
- /* free allocated memory for internal context */
- destroy_workqueue(intelmaddata->mad_jack_wq);
- device->device_data = NULL;
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
-
- return 0;
-}
-
-static int __devinit snd_intelmad_create(
- struct snd_intelmad *intelmaddata,
- struct snd_card *card)
-{
- int ret_val;
- static struct snd_device_ops ops = {
- .dev_free = snd_intelmad_dev_free,
- };
-
- WARN_ON(!intelmaddata);
- WARN_ON(!card);
- /* ALSA api to register for the device */
- ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
- return ret_val;
-}
-
-/**
-* snd_intelmad_probe- function registred for init
-* @pdev : pointer to the device struture
-* This function is called when the device is initialized
-*/
-int __devinit snd_intelmad_probe(struct platform_device *pdev)
-{
- struct snd_card *card;
- int ret_val;
- struct snd_intelmad *intelmaddata;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct snd_intelmad_probe_info *info = (void *)id->driver_data;
-
- pr_debug("probe for %s cpu_id %d\n", pdev->name, info->cpu_id);
- pr_debug("rq_chache %x of size %x\n", info->irq_cache, info->size);
- if (!strcmp(pdev->name, DRIVER_NAME_MRST))
- pr_debug("detected MRST\n");
- else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
- pr_debug("detected MFLD\n");
- else {
- pr_err("detected unknown device abort!!\n");
- return -EIO;
- }
- if ((info->cpu_id < CPU_CHIP_LINCROFT) ||
- (info->cpu_id > CPU_CHIP_PENWELL)) {
- pr_err("detected unknown cpu_id abort!!\n");
- return -EIO;
- }
- /* allocate memory for saving internal context and working */
- intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
- if (!intelmaddata) {
- pr_debug("mem alloctn fail\n");
- return -ENOMEM;
- }
- intelmad_drv = intelmaddata;
-
- /* allocate memory for LPE API set */
- intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
- GFP_KERNEL);
- if (!intelmaddata->sstdrv_ops) {
- pr_err("mem allocation for ops fail\n");
- kfree(intelmaddata);
- return -ENOMEM;
- }
-
- intelmaddata->cpu_id = info->cpu_id;
- /* create a card instance with ALSA framework */
- ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
- if (ret_val) {
- pr_err("snd_card_create fail\n");
- goto free_allocs;
- }
-
- intelmaddata->pdev = pdev;
- intelmaddata->irq = platform_get_irq(pdev, 0);
- platform_set_drvdata(pdev, intelmaddata);
- intelmaddata->card = card;
- intelmaddata->card_id = card_id;
- intelmaddata->card_index = card_index;
- intelmaddata->master_mute = UNMUTE;
- intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
- strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
- strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- /* registering with LPE driver to get access to SST APIs to use */
- ret_val = snd_intelmad_sst_register(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_sst_register failed\n");
- goto set_null_data;
- }
-
- intelmaddata->pmic_status = PMIC_INIT;
-
- ret_val = snd_intelmad_pcm(card, intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_pcm failed\n");
- goto free_sst;
- }
-
- ret_val = snd_intelmad_mixer(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_mixer failed\n");
- goto free_card;
- }
-
- ret_val = snd_intelmad_jack(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_jack failed\n");
- goto free_card;
- }
- intelmaddata->adc_address = mid_initialize_adc();
-
- /*create work queue for jack interrupt*/
- INIT_WORK(&intelmaddata->mad_jack_msg.wq,
- sst_process_mad_jack_detection);
-
- intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
- if (!intelmaddata->mad_jack_wq)
- goto free_card;
-
- ret_val = snd_intelmad_register_irq(intelmaddata,
- info->irq_cache, info->size);
- if (ret_val) {
- pr_err("snd_intelmad_register_irq fail\n");
- goto free_mad_jack_wq;
- }
-
- /* internal function call to register device with ALSA */
- ret_val = snd_intelmad_create(intelmaddata, card);
- if (ret_val) {
- pr_err("snd_intelmad_create failed\n");
- goto set_pvt_data;
- }
- card->private_data = &intelmaddata;
- snd_card_set_dev(card, &pdev->dev);
- ret_val = snd_card_register(card);
- if (ret_val) {
- pr_err("snd_card_register failed\n");
- goto set_pvt_data;
- }
- if (pdev->dev.platform_data) {
- int gpio_amp = *(int *)pdev->dev.platform_data;
- if (gpio_request_one(gpio_amp, GPIOF_OUT_INIT_LOW, "amp power"))
- gpio_amp = 0;
- intelmaddata->sstdrv_ops->scard_ops->gpio_amp = gpio_amp;
- }
-
- pr_debug("snd_intelmad_probe complete\n");
- return ret_val;
-
-set_pvt_data:
- card->private_data = NULL;
-free_mad_jack_wq:
- destroy_workqueue(intelmaddata->mad_jack_wq);
-free_card:
- snd_card_free(intelmaddata->card);
-free_sst:
- unregister_sst_card(intelmaddata->sstdrv_ops);
-set_null_data:
- platform_set_drvdata(pdev, NULL);
-free_allocs:
- pr_err("probe failed\n");
- snd_card_free(card);
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
- return ret_val;
-}
-
-
-static int snd_intelmad_remove(struct platform_device *pdev)
-{
- struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev);
-
- if (intelmaddata) {
- if (intelmaddata->sstdrv_ops->scard_ops->gpio_amp)
- gpio_free(intelmaddata->sstdrv_ops->scard_ops->gpio_amp);
- free_irq(intelmaddata->irq, intelmaddata);
- snd_card_free(intelmaddata->card);
- }
- intelmad_drv = NULL;
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-/*********************************************************************
- * Driver initialization and exit
- *********************************************************************/
-static const struct platform_device_id snd_intelmad_ids[] = {
- {DRIVER_NAME_MRST, INFO(CPU_CHIP_LINCROFT, AUDINT_BASE, 1)},
- {DRIVER_NAME_MFLD, INFO(CPU_CHIP_PENWELL, 0xFFFF7FCD, 1)},
- {"", 0},
-
-};
-
-static struct platform_driver snd_intelmad_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "intel_mid_sound_card",
- },
- .id_table = snd_intelmad_ids,
- .probe = snd_intelmad_probe,
- .remove = __devexit_p(snd_intelmad_remove),
-};
-
-/*
- * alsa_card_intelmad_init- driver init function
- *
- * This function is called when driver module is inserted
- */
-static int __init alsa_card_intelmad_init(void)
-{
- pr_debug("mad_init called\n");
- return platform_driver_register(&snd_intelmad_driver);
-}
-
-/**
- * alsa_card_intelmad_exit- driver exit function
- *
- * This function is called when driver module is removed
- */
-static void __exit alsa_card_intelmad_exit(void)
-{
- pr_debug("mad_exit called\n");
- return platform_driver_unregister(&snd_intelmad_driver);
-}
-
-module_init(alsa_card_intelmad_init)
-module_exit(alsa_card_intelmad_exit)
-
diff --git a/drivers/staging/intel_sst/intelmid.h b/drivers/staging/intel_sst/intelmid.h
deleted file mode 100644
index 14a7ba078b7..00000000000
--- a/drivers/staging/intel_sst/intelmid.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * intelmid.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver header for Intel MAD chipset
- */
-#ifndef __INTELMID_H
-#define __INTELMID_H
-
-#include <linux/time.h>
-#include <sound/jack.h>
-
-#define DRIVER_NAME_MFLD "msic_audio"
-#define DRIVER_NAME_MRST "pmic_audio"
-#define DRIVER_NAME "intelmid_audio"
-#define PMIC_SOUND_IRQ_TYPE_MASK (1 << 15)
-#define AUDINT_BASE (0xFFFFEFF8 + (6 * sizeof(u8)))
-#define REG_IRQ
-/* values #defined */
-/* will differ for different hw - to be taken from config */
-#define MAX_DEVICES 1
-#define MIN_RATE 8000
-#define MAX_RATE 48000
-#define MAX_BUFFER (800*1024) /* for PCM */
-#define MIN_BUFFER (800*1024)
-#define MAX_PERIODS (1024*2)
-#define MIN_PERIODS 2
-#define MAX_PERIOD_BYTES MAX_BUFFER
-#define MIN_PERIOD_BYTES 32
-/*#define MIN_PERIOD_BYTES 160*/
-#define MAX_MUTE 1
-#define MIN_MUTE 0
-#define MONO_CNTL 1
-#define STEREO_CNTL 2
-#define MIN_CHANNEL 1
-#define MAX_CHANNEL_AMIC 2
-#define MAX_CHANNEL_DMIC 5
-#define FIFO_SIZE 0 /* fifo not being used */
-#define INTEL_MAD "Intel MAD"
-#define MAX_CTRL_MRST 8
-#define MAX_CTRL_MFLD 7
-#define MAX_CTRL 8
-#define MAX_VENDORS 4
-/* TODO +6 db */
-#define MAX_VOL 64
-/* TODO -57 db */
-#define MIN_VOL 0
-#define PLAYBACK_COUNT 1
-#define CAPTURE_COUNT 1
-#define ADC_ONE_LSB_MULTIPLIER 2346
-
-#define MID_JACK_HS_LONG_PRESS SND_JACK_BTN_0
-#define MID_JACK_HS_SHORT_PRESS SND_JACK_BTN_1
-
-extern int sst_card_vendor_id;
-
-struct mad_jack {
- struct snd_jack jack;
- int jack_status;
- int jack_dev_state;
- struct timeval buttonpressed;
- struct timeval buttonreleased;
-};
-
-struct mad_jack_msg_wq {
- u8 intsts;
- struct snd_intelmad *intelmaddata;
- struct work_struct wq;
-
-};
-
-struct snd_intelmad_probe_info {
- unsigned int cpu_id;
- unsigned int irq_cache;
- unsigned int size;
-};
-
-/**
- * struct snd_intelmad - intelmad driver structure
- *
- * @card: ptr to the card details
- * @card_index: sound card index
- * @card_id: sound card id detected
- * @sstdrv_ops: ptr to sst driver ops
- * @pdev: ptr to platform device
- * @irq: interrupt number detected
- * @pmic_status: Device status of sound card
- * @int_base: ptr to MMIO interrupt region
- * @output_sel: device selected as o/p
- * @input_sel: device selected as i/p
- * @master_mute: master mute status
- * @jack: jack status
- * @playback_cnt: active pb streams
- * @capture_cnt: active cp streams
- * @mad_jack_msg: wq struct for jack interrupt processing
- * @mad_jack_wq: wq for jack interrupt processing
- * @jack_prev_state: Previos state of jack detected
- * @cpu_id: current cpu id loaded for
- */
-struct snd_intelmad {
- struct snd_card *card; /* ptr to the card details */
- int card_index;/* card index */
- char *card_id; /* card id */
- struct intel_sst_card_ops *sstdrv_ops;/* ptr to sst driver ops */
- struct platform_device *pdev;
- int irq;
- int pmic_status;
- void __iomem *int_base;
- int output_sel;
- int input_sel;
- int lineout_sel;
- int master_mute;
- struct mad_jack jack[4];
- int playback_cnt;
- int capture_cnt;
- u16 adc_address;
- struct mad_jack_msg_wq mad_jack_msg;
- struct workqueue_struct *mad_jack_wq;
- u8 jack_prev_state;
- unsigned int cpu_id;
-};
-
-struct snd_control_val {
- int playback_vol_max;
- int playback_vol_min;
- int capture_vol_max;
- int capture_vol_min;
- int master_vol_max;
- int master_vol_min;
-};
-
-struct mad_stream_pvt {
- int stream_status;
- int stream_ops;
- struct snd_pcm_substream *substream;
- struct pcm_stream_info stream_info;
- ssize_t dbg_cum_bytes;
- enum snd_sst_device_type device;
-};
-
-enum mad_drv_status {
- INIT = 1,
- STARTED,
- RUNNING,
- PAUSED,
- DROPPED,
-};
-
-enum mad_pmic_status {
- PMIC_UNINIT = 1,
- PMIC_INIT,
-};
-enum _widget_ctrl {
- OUTPUT_SEL = 1,
- INPUT_SEL,
- PLAYBACK_VOL,
- PLAYBACK_MUTE,
- CAPTURE_VOL,
- CAPTURE_MUTE,
- MASTER_VOL,
- MASTER_MUTE
-};
-enum _widget_ctrl_mfld {
- LINEOUT_SEL_MFLD = 3,
-};
-enum hw_chs {
- HW_CH0 = 0,
- HW_CH1,
- HW_CH2,
- HW_CH3
-};
-
-void period_elapsed(void *mad_substream);
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream);
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream);
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-#define CPU_CHIP_LINCROFT 1 /* System running lincroft */
-#define CPU_CHIP_PENWELL 2 /* System running penwell */
-
-extern struct snd_control_val intelmad_ctrl_val[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mrst[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
-extern struct snd_pmic_ops *intelmad_vendor_ops[];
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status);
-
-#endif /* __INTELMID_H */
diff --git a/drivers/staging/intel_sst/intelmid_adc_control.h b/drivers/staging/intel_sst/intelmid_adc_control.h
deleted file mode 100644
index 65d5c398876..00000000000
--- a/drivers/staging/intel_sst/intelmid_adc_control.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef __INTELMID_ADC_CONTROL_H__
-#define __INTELMID_ADC_CONTROL_H_
-/*
- * intelmid_adc_control.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: R Durgadadoss <r.durgadoss@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private ADC declarations for SST
- */
-
-
-#define MSIC_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
-
-#define MSIC_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - 1)
-
-/* ADC channel code values */
-#define AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
-
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static inline int configure_adc(int val)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
-
-
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (val)
- /* Enable and start the ADC */
- sc_access.value |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- else
- /* Just stop the ADC */
- sc_access.value &= (~MSIC_ADC_START);
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static inline int reset_stopbit(uint16_t addr)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
- sc_access.reg_addr = addr;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- sc_access.reg_addr = addr;
- sc_access.value = (sc_access.value) & 0xEF;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static inline int find_free_channel(void)
-{
- int ret;
- int i;
-
- struct sc_reg_access sc_access = {0,};
-
- /* check whether ADC is enabled */
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if ((sc_access.value & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
-
- sc_access.reg_addr = ADC_CHNL_START_ADDR + i;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (sc_access.value & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static inline int mid_initialize_adc(void)
-{
- int base_addr, chnl_addr;
- int ret;
- static int channel_index;
- struct sc_reg_access sc_access = {0,};
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- pr_err("No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- sc_access.reg_addr = base_addr;
- sc_access.value = AUDIO_DETECT_CODE | 0x10;
- ret = sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
- if (ret) {
- pr_err("unable to enable ADC");
- return ret;
- }
-
- chnl_addr = ADC_DATA_START_ADDR + 2 * channel_index;
- pr_debug("mid_initialize : %x", chnl_addr);
- configure_adc(1);
- return chnl_addr;
-}
-#endif
-
diff --git a/drivers/staging/intel_sst/intelmid_ctrl.c b/drivers/staging/intel_sst/intelmid_ctrl.c
deleted file mode 100644
index 19ec474b362..00000000000
--- a/drivers/staging/intel_sst/intelmid_ctrl.c
+++ /dev/null
@@ -1,921 +0,0 @@
-/*
- * intelmid_ctrl.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver handling mixer controls for Intel MAD chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <sound/core.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define HW_CH_BASE 4
-
-
-#define HW_CH_0 "Hw1"
-#define HW_CH_1 "Hw2"
-#define HW_CH_2 "Hw3"
-#define HW_CH_3 "Hw4"
-
-static char *router_dmics[] = { "DMIC1",
- "DMIC2",
- "DMIC3",
- "DMIC4",
- "DMIC5",
- "DMIC6"
- };
-
-static char *out_names_mrst[] = {"Headphones",
- "Internal speakers"};
-static char *in_names_mrst[] = {"AMIC",
- "DMIC",
- "HS_MIC"};
-static char *line_out_names_mfld[] = {"Headset",
- "IHF ",
- "Vibra1 ",
- "Vibra2 ",
- "NONE "};
-static char *out_names_mfld[] = {"Headset ",
- "EarPiece "};
-static char *in_names_mfld[] = {"AMIC",
- "DMIC"};
-
-struct snd_control_val intelmad_ctrl_val[MAX_VENDORS] = {
- {
- .playback_vol_max = 63,
- .playback_vol_min = 0,
- .capture_vol_max = 63,
- .capture_vol_min = 0,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -20,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -31,
- .master_vol_max = 0,
- .master_vol_min = -126,
- },
-};
-
-/* control path functionalities */
-
-static inline int snd_intelmad_volume_info(struct snd_ctl_elem_info *uinfo,
- int control_type, int max, int min)
-{
- WARN_ON(!uinfo);
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = control_type;
- uinfo->value.integer.min = min;
- uinfo->value.integer.max = max;
- return 0;
-}
-
-/**
-* snd_intelmad_mute_info - provides information about the mute controls
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_mute_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- WARN_ON(!uinfo);
- WARN_ON(!kcontrol);
-
- /* set up the mute as a boolean mono control with min-max values */
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->count = MONO_CNTL;
- uinfo->value.integer.min = MIN_MUTE;
- uinfo->value.integer.max = MAX_MUTE;
- return 0;
-}
-
-/**
-* snd_intelmad_capture_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_capture_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, MONO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_playback_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_playback_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_min);
- return 0;
-}
-
-static int snd_intelmad_master_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_device_info_mrst - provides information about the devices available
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the devices's info need
-* to be filled
-*
-* This function is called when a mixer application requests for device's info
-*/
-static int snd_intelmad_device_info_mrst(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mrst);
- else
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mrst);
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- strncpy(uinfo->value.enumerated.name,
- in_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- return 0;
-}
-
-static int snd_intelmad_device_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_pmic_ops *scard_ops;
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mfld);
- else if (kcontrol->id.numid == INPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mfld);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD) {
- uinfo->value.enumerated.items = ARRAY_SIZE(line_out_names_mfld);
- scard_ops->line_out_names_cnt = uinfo->value.enumerated.items;
- } else
- return -EINVAL;
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == INPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- in_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- strncpy(uinfo->value.enumerated.name,
- line_out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- return -EINVAL;
- return 0;
-}
-
-/**
-* snd_intelmad_volume_get - gets the current volume for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int ret_val = 0, cntl_list[2] = {0,};
- int value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_volume_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_PB_VOL;
- cntl_list[1] = PMIC_SND_LEFT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_LEFT_MASTER_VOL;
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_vol(cntl_list[0], &value);
- uval->value.integer.value[0] = value;
-
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL) {
- ret_val = scard_ops->get_vol(cntl_list[1], &value);
- uval->value.integer.value[1] = value;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_get - gets the current mute status for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int cntl_list = 0, ret_val = 0;
- u8 value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("Mute_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE)
- cntl_list = PMIC_SND_LEFT_HP_MUTE;
- else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE))
- cntl_list = PMIC_SND_LEFT_SPEAKER_MUTE;
- break;
-
- case CAPTURE_MUTE:
- if (intelmaddata->input_sel == DMIC)
- cntl_list = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- uval->value.integer.value[0] = intelmaddata->master_mute;
- return 0;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_mute(cntl_list, &value);
- uval->value.integer.value[0] = value;
- return ret_val;
-}
-
-/**
-* snd_intelmad_volume_set - sets the volume control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int ret_val, cntl_list[2] = {0,};
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("volume set called:%ld %ld\n",
- uval->value.integer.value[0],
- uval->value.integer.value[1]);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_LEFT_PB_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_LEFT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_MASTER_VOL;
- break;
-
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_vol(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL)
- ret_val = scard_ops->set_vol(cntl_list[1],
- uval->value.integer.value[1]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_set - sets the mute control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int cntl_list[2] = {0,}, ret_val;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_mute_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- kcontrol->private_value = uval->value.integer.value[0];
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE) {
- cntl_list[0] = PMIC_SND_LEFT_HP_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_HP_MUTE;
- } else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE)) {
- cntl_list[0] = PMIC_SND_LEFT_SPEAKER_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_SPEAKER_MUTE;
- }
- break;
-
- case CAPTURE_MUTE:/*based on sel device mute the i/p dev*/
- if (intelmaddata->input_sel == DMIC)
- cntl_list[0] = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list[0] = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list[0] = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- cntl_list[0] = PMIC_SND_MUTE_ALL;
- intelmaddata->master_mute = uval->value.integer.value[0];
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_mute(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_MUTE)
- ret_val = scard_ops->set_mute(cntl_list[1],
- uval->value.integer.value[0]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_device_get - get the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- pr_debug("device_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->output_dev_id;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- uval->value.enumerated.item[0] =
- scard_ops->lineout_dev_id;
- else
- return -EINVAL;
- } else if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- /* There is a mismatch here.
- * ALSA expects 1 for internal speaker.
- * But internally, we may give 2 for internal speaker.
- */
- if (scard_ops->output_dev_id == MONO_EARPIECE ||
- scard_ops->output_dev_id == INTERNAL_SPKR)
- uval->value.enumerated.item[0] = MONO_EARPIECE;
- else if (scard_ops->output_dev_id == STEREO_HEADPHONE)
- uval->value.enumerated.item[0] =
- STEREO_HEADPHONE;
- else
- return -EINVAL;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else
- return -EINVAL;
- } else
- uval->value.enumerated.item[0] = kcontrol->private_value;
- return 0;
-}
-
-/**
-* snd_intelmad_device_set - set the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int ret_val = 0, vendor, status;
- struct intel_sst_pcm_control *pcm_control;
-
- pr_debug("snd_intelmad_device_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
- status = -1;
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- /* store value with driver */
- kcontrol->private_value = uval->value.enumerated.item[0];
-
- switch (kcontrol->id.numid) {
- case OUTPUT_SEL:
- ret_val = scard_ops->set_output_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->output_sel = uval->value.enumerated.item[0];
- break;
- case INPUT_SEL:
- vendor = intelmaddata->sstdrv_ops->vendor_id;
- if ((vendor == SND_MX) || (vendor == SND_FS)) {
- pcm_control = intelmaddata->sstdrv_ops->pcm_control;
- if (uval->value.enumerated.item[0] == HS_MIC)
- status = 1;
- else
- status = 0;
- pcm_control->device_control(
- SST_ENABLE_RX_TIME_SLOT, &status);
- }
- ret_val = scard_ops->set_input_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->input_sel = uval->value.enumerated.item[0];
- break;
- case LINEOUT_SEL_MFLD:
- ret_val = scard_ops->set_lineout_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->lineout_sel = uval->value.enumerated.item[0];
- break;
- default:
- return -EINVAL;
- }
- kcontrol->private_value = uval->value.enumerated.item[0];
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- uval->value.enumerated.item[0] = kcontrol->private_value;
- else
- pr_debug(" CPU id = 0x%xis invalid.\n",
- intelmaddata->cpu_id);
- return 0;
-}
-
-void msic_set_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics |= (1 << index);
-}
-
-void msic_clear_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics &= ~(1 << index);
-}
-
-int msic_is_set_bit(u8 index, unsigned int *available_dmics)
-{
- int ret_val;
-
- ret_val = (*available_dmics & (1 << index));
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int i, dmic_index;
- unsigned int available_dmics;
- int jump_count;
- int max_dmics = ARRAY_SIZE(router_dmics);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- available_dmics = scard_ops->available_dmics;
-
- if (kcontrol->private_value > uval->value.enumerated.item[0]) {
- pr_debug("jump count -1.\n");
- jump_count = -1;
- } else {
- pr_debug("jump count 1.\n");
- jump_count = 1;
- }
-
- dmic_index = uval->value.enumerated.item[0];
- pr_debug("set function. dmic_index = %d, avl_dmic = 0x%x\n",
- dmic_index, available_dmics);
- for (i = 0; i < max_dmics; i++) {
- pr_debug("set function. loop index = 0x%x. dmic_index = 0x%x\n",
- i, dmic_index);
- if (!msic_is_set_bit(dmic_index, &available_dmics)) {
- msic_clear_bit(kcontrol->private_value,
- &available_dmics);
- msic_set_bit(dmic_index, &available_dmics);
- kcontrol->private_value = dmic_index;
- scard_ops->available_dmics = available_dmics;
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
- scard_ops->set_hw_dmic_route
- (kcontrol->id.numid-HW_CH_BASE);
- return 0;
- }
-
- dmic_index += jump_count;
-
- if (dmic_index > (max_dmics - 1) && jump_count == 1) {
- pr_debug("Resettingthe dmic index to 0.\n");
- dmic_index = 0;
- } else if (dmic_index == -1 && jump_count == -1) {
- pr_debug("Resetting the dmic index to 5.\n");
- dmic_index = max_dmics - 1;
- }
- }
-
- return -EINVAL;
-}
-
-static int snd_intelmad_device_dmic_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = ARRAY_SIZE(router_dmics);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strncpy(uinfo->value.enumerated.name,
- router_dmics[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
-
-
- msic_set_bit(kcontrol->private_value, &scard_ops->available_dmics);
- pr_debug("info function. avl_dmic = 0x%x",
- scard_ops->available_dmics);
-
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
-
- return 0;
-}
-
-struct snd_kcontrol_new snd_intelmad_controls_mrst[MAX_CTRL] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_playback_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_capture_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_master_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-};
-
-struct snd_kcontrol_new
-snd_intelmad_controls_mfld[MAX_CTRL_MFLD] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Line out",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 0
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_1,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 1
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_2,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 2
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_3,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 3
-}
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_msic_control.c b/drivers/staging/intel_sst/intelmid_msic_control.c
deleted file mode 100644
index 70cdb169781..00000000000
--- a/drivers/staging/intel_sst/intelmid_msic_control.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * intelmid_vm_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2010 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of msic vendors
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <linux/delay.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include <linux/input.h>
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define AUDIOMUX12 0x24c
-#define AUDIOMUX34 0x24d
-
-static int msic_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- /* dmic configuration */
- {0x241, 0x85, 0},
- {0x242, 0x02, 0},
- /* audio paths config */
- {0x24C, 0x10, 0},
- {0x24D, 0x32, 0},
- /* PCM2 interface slots */
- /* preconfigured slots for 0-5 both tx, rx */
- {0x272, 0x10, 0},
- {0x273, 0x32, 0},
- {0x274, 0xFF, 0},
- {0x275, 0x10, 0},
- {0x276, 0x32, 0},
- {0x277, 0x54, 0},
- /*Sinc5 decimator*/
- {0x24E, 0x28, 0},
- /*TI vibra w/a settings*/
- {0x384, 0x80, 0},
- {0x385, 0x80, 0},
- {0x267, 0x00, 0},
- {0x261, 0x00, 0},
- /* pcm port setting */
- {0x278, 0x00, 0},
- {0x27B, 0x01, 0},
- {0x27C, 0x0a, 0},
- /* Set vol HSLRVOLCTRL, IHFVOL */
- {0x259, 0x08, 0},
- {0x25A, 0x08, 0},
- {0x25B, 0x08, 0},
- {0x25C, 0x08, 0},
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- /* amic configuration */
- {0x249, 0x01, 0x0},
- {0x24A, 0x01, 0x0},
- /* unmask ocaudio/accdet interrupts */
- {0x1d, 0x00, 0x00},
- {0x1e, 0x00, 0x00},
- };
- snd_msic_ops.card_status = SND_CARD_INIT_DONE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 28);
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.cap_on = 0;
- snd_msic_ops.input_dev_id = DMIC; /*def dev*/
- snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
- snd_msic_ops.jack_interrupt_status = false;
- pr_debug("msic init complete!!\n");
- return 0;
-}
-static int msic_line_out_restore(u8 value)
-{
- struct sc_reg_access hs_drv_en[] = {
- {0x25d, 0x03, 0x03},
- };
- struct sc_reg_access ep_drv_en[] = {
- {0x25d, 0x40, 0x40},
- };
- struct sc_reg_access ihf_drv_en[] = {
- {0x25d, 0x0c, 0x0c},
- };
- struct sc_reg_access vib1_drv_en[] = {
- {0x25d, 0x10, 0x10},
- };
- struct sc_reg_access vib2_drv_en[] = {
- {0x25d, 0x20, 0x20},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_lineout_restore_lineout_dev:%d\n", value);
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET-restore\n");
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE)
- retval = sst_sc_reg_access(hs_drv_en,
- PMIC_READ_MODIFY, 1);
- else
- retval = sst_sc_reg_access(ep_drv_en,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF-restore\n");
- retval = sst_sc_reg_access(ihf_drv_en, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1-restore\n");
- retval = sst_sc_reg_access(vib1_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2-restore\n");
- retval = sst_sc_reg_access(vib2_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE-restore\n");
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-static int msic_get_lineout_prvstate(void)
-{
- struct sc_reg_access hs_ihf_drv[2] = {
- {0x257, 0x0, 0x0},
- {0x25d, 0x0, 0x0},
- };
- struct sc_reg_access vib1drv[2] = {
- {0x264, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- struct sc_reg_access vib2drv[2] = {
- {0x26A, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- int retval = 0, drv_en, dac_en, dev_id, mask;
- for (dev_id = 0; dev_id < snd_msic_ops.line_out_names_cnt; dev_id++) {
- switch (dev_id) {
- case HEADSET:
- pr_debug("msic_get_lineout_prvs_state: HEADSET\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK0|MASK1);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = ((MASK0|MASK1)|MASK6);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = HEADSET;
- return retval;
- }
- break;
- case IHF:
- pr_debug("msic_get_lineout_prvstate: IHF\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK2 | MASK3);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = (MASK2 | MASK3);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = IHF;
- return retval;
- }
- break;
- case VIBRA1:
- pr_debug("msic_get_lineout_prvstate: vibra1\n");
- sst_sc_reg_access(vib1drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib1drv[0].value) & mask;
-
- mask = MASK4;
- drv_en = (vib1drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA1;
- return retval;
- }
- break;
- case VIBRA2:
- pr_debug("msic_get_lineout_prvstate: vibra2\n");
- sst_sc_reg_access(vib2drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib2drv[0].value) & mask;
-
- mask = MASK5;
- drv_en = ((vib2drv[1].value) & mask);
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA2;
- return retval;
- }
- break;
- case NONE:
- pr_debug("msic_get_lineout_prvstate: NONE\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return retval;
- default:
- pr_debug("Invalid device id\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return -EINVAL;
- }
- }
- return retval;
-}
-static int msic_set_selected_lineout_dev(u8 value)
-{
- struct sc_reg_access lout_hs[] = {
- {0x25e, 0x33, 0xFF},
- {0x25d, 0x0, 0x43},
- };
- struct sc_reg_access lout_ihf[] = {
- {0x25e, 0x55, 0xff},
- {0x25d, 0x0, 0x0c},
- };
- struct sc_reg_access lout_vibra1[] = {
-
- {0x25e, 0x61, 0xff},
- {0x25d, 0x0, 0x10},
- };
- struct sc_reg_access lout_vibra2[] = {
-
- {0x25e, 0x16, 0xff},
- {0x25d, 0x0, 0x20},
- };
- struct sc_reg_access lout_def[] = {
- {0x25e, 0x66, 0x0},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_lineout_dev:%d\n", value);
- msic_get_lineout_prvstate();
- msic_line_out_restore(snd_msic_ops.prev_lineout_dev_id);
- snd_msic_ops.lineout_dev_id = value;
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_hs,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_ihf,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra1,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra2,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE\n");
- retval = sst_sc_reg_access(lout_def,
- PMIC_WRITE, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-
-
-static int msic_power_up_pb(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access vhs[] = {
- /* VHSP */
- {0x0DC, 0x3D, 0},
- /* VHSN */
- {0x0DD, 0x3F, 0},
- };
- struct sc_reg_access hsdac[] = {
- {0x382, 0x40, 0x40},
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access hs_filter[] = {
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- };
- struct sc_reg_access hs_enable[] = {
- /* enable driver */
- {0x25D, 0x3, 0x3},
- {0x26C, 0x0, 0x2},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vihf[] = {
- /* VIHF ON */
- {0x0C9, 0x27, 0x00},
- };
- struct sc_reg_access ihf_filter[] = {
- /* disable driver */
- {0x25D, 0x00, 0x0C},
- /*Filer DAC enable*/
- {0x251, 0x03, 0x03},
- {0x257, 0x0C, 0x0C},
- };
- struct sc_reg_access ihf_en[] = {
- /*enable drv*/
- {0x25D, 0x0C, 0x0c},
- };
- struct sc_reg_access ihf_unmute[] = {
- /*unmute headset*/
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access epdac[] = {
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access ep_enable[] = {
- /* enable driver */
- {0x25D, 0x40, 0x40},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vib1_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x10, 0x10},
- {0x264, 0x02, 0x82},
- };
- struct sc_reg_access vib2_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x20, 0x20},
- {0x26A, 0x02, 0x82},
- };
- struct sc_reg_access pcm2_en[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up pb.... Device %d\n", device);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pb_on = 1;
- snd_msic_ops.pbhs_on = 1;
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE) {
- sst_sc_reg_access(vhs, PMIC_WRITE, 2);
- sst_sc_reg_access(hsdac, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 4);
- } else {
- sst_sc_reg_access(epdac, PMIC_READ_MODIFY, 2);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(ep_enable, PMIC_READ_MODIFY, 3);
- }
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- msic_set_selected_lineout_dev(HEADSET);
- break;
- case SND_SST_DEVICE_IHF:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vihf, PMIC_WRITE, 1);
- sst_sc_reg_access(ihf_filter, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(ihf_en, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_unmute, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF)
- msic_set_selected_lineout_dev(IHF);
- break;
-
- case SND_SST_DEVICE_VIBRA:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib1_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- msic_set_selected_lineout_dev(VIBRA1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib2_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- msic_set_selected_lineout_dev(VIBRA2);
- break;
-
- default:
- pr_warn("Wrong Device %d, selected %d\n",
- device, snd_msic_ops.output_dev_id);
- }
- return sst_sc_reg_access(pcm2_en, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_up_cp(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access dmic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xA0, 0xA0},
- };
- struct sc_reg_access dmic[] = {
- /* mic demux enable */
- {0x245, 0x3F, 0x3F},
- {0x246, 0x07, 0x07},
-
- };
- struct sc_reg_access amic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xFC, 0xFC},
- };
- struct sc_reg_access amic[] = {
- /*MIC EN*/
- {0x249, 0x01, 0x01},
- {0x24A, 0x01, 0x01},
- /*ADC EN*/
- {0x248, 0x05, 0x0F},
-
- };
- struct sc_reg_access pcm2[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- struct sc_reg_access tx_on[] = {
- /*wait for mic to stabalize before turning on audio channels*/
- {0x24F, 0x3C, 0x0},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(500);/*FIXME need optimzed value here*/
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- snd_msic_ops.cap_on = 1;
- if (snd_msic_ops.input_dev_id == AMIC) {
- sst_sc_reg_access(amic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 3);
- } else {
- sst_sc_reg_access(dmic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 2);
- }
- msleep(1);
- sst_sc_reg_access(tx_on, PMIC_WRITE, 1);
- return sst_sc_reg_access(pcm2, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_down(void)
-{
- struct sc_reg_access power_dn[] = {
- /* VHSP */
- {0x0DC, 0xC4, 0},
- /* VHSN */
- {0x0DD, 0x04, 0},
- /* VIHF */
- {0x0C9, 0x24, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn off PLL*/
- {0x240, 0x00, 0x0},
- };
- struct sc_reg_access vaud[] = {
- /* turn off VAUD*/
- {0x0DB, 0x04, 0},
- };
-
- pr_debug("powering dn msic\n");
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(power_dn, PMIC_WRITE, 3);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- return 0;
-}
-
-static int msic_power_down_pb(unsigned int device)
-{
- struct sc_reg_access drv_enable[] = {
- {0x25D, 0x00, 0x00},
- };
- struct sc_reg_access hs_mute[] = {
- {0x259, 0x80, 0x80},
- {0x25A, 0x80, 0x80},
- {0x26C, 0x02, 0x02},
- };
- struct sc_reg_access hs_off[] = {
- {0x257, 0x00, 0x03},
- {0x250, 0x00, 0x30},
- {0x382, 0x00, 0x40},
- };
- struct sc_reg_access ihf_mute[] = {
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access ihf_off[] = {
- {0x257, 0x00, 0x0C},
- {0x251, 0x00, 0x03},
- };
- struct sc_reg_access vib1_off[] = {
- {0x264, 0x00, 0x82},
- };
- struct sc_reg_access vib2_off[] = {
- {0x26A, 0x00, 0x82},
- };
- struct sc_reg_access lout_off[] = {
- {0x25e, 0x66, 0x00},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
-
-
-
- pr_debug("powering dn pb for device %d\n", device);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pbhs_on = 0;
- sst_sc_reg_access(hs_mute, PMIC_READ_MODIFY, 3);
- drv_enable[0].mask = 0x43;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(hs_off, PMIC_READ_MODIFY, 3);
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_IHF:
- sst_sc_reg_access(ihf_mute, PMIC_READ_MODIFY, 2);
- drv_enable[0].mask = 0x0C;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_off, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF) {
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- sst_sc_reg_access(pmode_disable, PMIC_READ_MODIFY, 1);
- }
- break;
-
- case SND_SST_DEVICE_VIBRA:
- sst_sc_reg_access(vib1_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x10;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- sst_sc_reg_access(vib2_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x20;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
- }
- return 0;
-}
-
-static int msic_power_down_cp(unsigned int device)
-{
- struct sc_reg_access dmic[] = {
- {0x247, 0x00, 0xA0},
- {0x245, 0x00, 0x38},
- {0x246, 0x00, 0x07},
- };
- struct sc_reg_access amic[] = {
- {0x248, 0x00, 0x05},
- {0x249, 0x00, 0x01},
- {0x24A, 0x00, 0x01},
- {0x247, 0x00, 0xA3},
- };
- struct sc_reg_access tx_off[] = {
- {0x24F, 0x00, 0x3C},
- };
-
- pr_debug("powering dn cp....\n");
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(tx_off, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.input_dev_id == DMIC)
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 3);
- else
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 4);
- return 0;
-}
-
-static int msic_set_selected_output_dev(u8 value)
-{
- int retval = 0;
-
- pr_debug("msic set selected output:%d\n", value);
- snd_msic_ops.output_dev_id = value;
- if (snd_msic_ops.pbhs_on)
- msic_power_up_pb(SND_SST_DEVICE_HEADSET);
- return retval;
-}
-
-static int msic_set_selected_input_dev(u8 value)
-{
-
- struct sc_reg_access sc_access_dmic[] = {
- {0x24C, 0x10, 0x0},
- };
- struct sc_reg_access sc_access_amic[] = {
- {0x24C, 0x76, 0x0},
-
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_input_dev:%d\n", value);
- snd_msic_ops.input_dev_id = value;
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC1\n");
- retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
- break;
- case DMIC:
- pr_debug("Selecting DMIC1\n");
- retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
- break;
- default:
- return -EINVAL;
-
- }
- if (snd_msic_ops.cap_on)
- retval = msic_power_up_cp(SND_SST_DEVICE_CAPTURE);
- return retval;
-}
-
-static int msic_set_hw_dmic_route(u8 hw_ch_index)
-{
- struct sc_reg_access sc_access_router;
- int retval = -EINVAL;
-
- switch (hw_ch_index) {
- case HW_CH0:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[0];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch0. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH1:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[1]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("### hw_ch1. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH2:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[2];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch2. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH3:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[3]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("hw_ch3. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
- }
-
- return retval;
-}
-
-
-static int msic_set_pcm_voice_params(void)
-{
- return 0;
-}
-
-static int msic_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- return 0;
-}
-
-static int msic_set_audio_port(int status)
-{
- return 0;
-}
-
-static int msic_set_voice_port(int status)
-{
- return 0;
-}
-
-static int msic_set_mute(int dev_id, u8 value)
-{
- return 0;
-}
-
-static int msic_set_vol(int dev_id, int value)
-{
- return 0;
-}
-
-static int msic_get_mute(int dev_id, u8 *value)
-{
- return 0;
-}
-
-static int msic_get_vol(int dev_id, int *value)
-{
- return 0;
-}
-
-static int msic_set_headset_state(int state)
-{
- struct sc_reg_access hs_enable[] = {
- {0x25D, 0x03, 0x03},
- };
-
- if (state)
- /*enable*/
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- else {
- hs_enable[0].value = 0;
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- }
- return 0;
-}
-
-static int msic_enable_mic_bias(void)
-{
- struct sc_reg_access jack_interrupt_reg[] = {
- {0x0DB, 0x07, 0x00},
-
- };
- struct sc_reg_access jack_bias_reg[] = {
- {0x247, 0x0C, 0x0C},
- };
-
- sst_sc_reg_access(jack_interrupt_reg, PMIC_WRITE, 1);
- sst_sc_reg_access(jack_bias_reg, PMIC_READ_MODIFY, 1);
- return 0;
-}
-
-static int msic_disable_mic_bias(void)
-{
- if (snd_msic_ops.jack_interrupt_status == true)
- return 0;
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- return 0;
-}
-
-static int msic_disable_jack_btn(void)
-{
- struct sc_reg_access btn_disable[] = {
- {0x26C, 0x00, 0x01}
- };
-
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- snd_msic_ops.jack_interrupt_status = false;
- return sst_sc_reg_access(btn_disable, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_enable_jack_btn(void)
-{
- struct sc_reg_access btn_enable[] = {
- {0x26b, 0x77, 0x00},
- {0x26C, 0x01, 0x00},
- };
- return sst_sc_reg_access(btn_enable, PMIC_WRITE, 2);
-}
-static int msic_convert_adc_to_mvolt(unsigned int mic_bias)
-{
- return (ADC_ONE_LSB_MULTIPLIER * mic_bias) / 1000;
-}
-int msic_get_headset_state(int mic_bias)
-{
- struct sc_reg_access msic_hs_toggle[] = {
- {0x070, 0x00, 0x01},
- };
- if (mic_bias >= 0 && mic_bias < 400) {
-
- pr_debug("Detected Headphone!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias > 400 && mic_bias < 650) {
-
- pr_debug("Detected American headset\n");
- msic_hs_toggle[0].value = 0x01;
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias >= 650 && mic_bias < 2000) {
-
- pr_debug("Detected Headset!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
- /*power on jack and btn*/
- snd_msic_ops.jack_interrupt_status = true;
- msic_enable_jack_btn();
- msic_enable_mic_bias();
- return SND_JACK_HEADSET;
-
- } else
- pr_debug("Detected Open Cable!!!\n");
-
- return SND_JACK_HEADPHONE;
-}
-
-static int msic_get_mic_bias(void *arg)
-{
- struct snd_intelmad *intelmad_drv = (struct snd_intelmad *)arg;
- u16 adc_adr = intelmad_drv->adc_address;
- u16 adc_val;
- int ret;
- struct sc_reg_access adc_ctrl3[2] = {
- {0x1C2, 0x05, 0x0},
- };
-
- struct sc_reg_access audio_adc_reg1 = {0,};
- struct sc_reg_access audio_adc_reg2 = {0,};
-
- msic_enable_mic_bias();
- /* Enable the msic for conversion before reading */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
- adc_ctrl3[0].value = 0x04;
- /* Re-toggle the RRDATARD bit */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
-
- audio_adc_reg1.reg_addr = adc_adr;
- /* Read the higher bits of data */
- msleep(1000);
- ret = sst_sc_reg_access(&audio_adc_reg1, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg1.value);
-
- /* Shift bits to accomodate the lower two data bits */
- adc_val = (audio_adc_reg1.value << 2);
- adc_adr++;
- audio_adc_reg2. reg_addr = adc_adr;
- ret = sst_sc_reg_access(&audio_adc_reg2, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg2.value);
-
- /* Adding lower two bits to the higher bits */
- audio_adc_reg2.value &= 03;
- adc_val += audio_adc_reg2.value;
-
- pr_debug("ADC value 0x%x", adc_val);
- msic_disable_mic_bias();
- return adc_val;
-}
-
-static void msic_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- int retval = 0;
-
- pr_debug("value returned = 0x%x\n", intsts);
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return;
- }
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("MAD short_push detected\n");
- present = SND_JACK_BTN_0;
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = SND_JACK_BTN_0;
- mjack->jack.key[0] = BTN_0 ;
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD long_push detected\n");
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = present = SND_JACK_BTN_1;
- mjack->jack.key[1] = BTN_1;
- }
-
- if (intsts & 0x4) {
- unsigned int mic_bias;
- jack_event_flag = 1;
- buttonpressflag = 0;
- mic_bias = msic_get_mic_bias(intelmaddata);
- pr_debug("mic_bias = %d\n", mic_bias);
- mic_bias = msic_convert_adc_to_mvolt(mic_bias);
- pr_debug("mic_bias after conversion = %d mV\n", mic_bias);
- mjack->jack_dev_state = msic_get_headset_state(mic_bias);
- mjack->jack.type = present = mjack->jack_dev_state;
- }
-
- if (intsts & 0x8) {
- mjack->jack.type = mjack->jack_dev_state;
- present = 0;
- jack_event_flag = 1;
- buttonpressflag = 0;
- msic_disable_jack_btn();
- msic_disable_mic_bias();
- }
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-
-
-
-struct snd_pmic_ops snd_msic_ops = {
- .set_input_dev = msic_set_selected_input_dev,
- .set_output_dev = msic_set_selected_output_dev,
- .set_lineout_dev = msic_set_selected_lineout_dev,
- .set_hw_dmic_route = msic_set_hw_dmic_route,
- .set_mute = msic_set_mute,
- .get_mute = msic_get_mute,
- .set_vol = msic_set_vol,
- .get_vol = msic_get_vol,
- .init_card = msic_init_card,
- .set_pcm_audio_params = msic_set_pcm_audio_params,
- .set_pcm_voice_params = msic_set_pcm_voice_params,
- .set_voice_port = msic_set_voice_port,
- .set_audio_port = msic_set_audio_port,
- .power_up_pmic_pb = msic_power_up_pb,
- .power_up_pmic_cp = msic_power_up_cp,
- .power_down_pmic_pb = msic_power_down_pb,
- .power_down_pmic_cp = msic_power_down_cp,
- .power_down_pmic = msic_power_down,
- .pmic_irq_cb = msic_pmic_irq_cb,
- .pmic_jack_enable = msic_enable_mic_bias,
- .pmic_get_mic_bias = msic_get_mic_bias,
- .pmic_set_headset_state = msic_set_headset_state,
-};
diff --git a/drivers/staging/intel_sst/intelmid_pvt.c b/drivers/staging/intel_sst/intelmid_pvt.c
deleted file mode 100644
index 90e0e64c0ab..00000000000
--- a/drivers/staging/intel_sst/intelmid_pvt.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * intelmid_pvt.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset - holding private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/io.h>
-#include <asm/intel_scu_ipc.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/pcm.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-
-void period_elapsed(void *mad_substream)
-{
- struct snd_pcm_substream *substream = mad_substream;
- struct mad_stream_pvt *stream;
-
-
-
- if (!substream || !substream->runtime)
- return;
- stream = substream->runtime->private_data;
- if (!stream)
- return;
-
- if (stream->stream_status != RUNNING)
- return;
- pr_debug("calling period elapsed\n");
- snd_pcm_period_elapsed(substream);
- return;
-}
-
-
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_sst_stream_params param = {{{0,},},};
- struct snd_sst_params str_params = {0};
- int ret_val;
-
- /* set codec params and inform SST driver the same */
-
- param.uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
- param.uc.pcm_params.num_chan = (u8) substream->runtime->channels;
- param.uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
- param.uc.pcm_params.reserved = 0;
- param.uc.pcm_params.sfreq = substream->runtime->rate;
- param.uc.pcm_params.ring_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
- param.uc.pcm_params.period_count = substream->runtime->period_size;
- param.uc.pcm_params.ring_buffer_addr =
- virt_to_phys(substream->runtime->dma_area);
- pr_debug("period_cnt = %d\n", param.uc.pcm_params.period_count);
- pr_debug("sfreq= %d, wd_sz = %d\n",
- param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
-
- str_params.sparams = param;
- str_params.codec = SST_CODEC_TYPE_PCM;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- str_params.ops = STREAM_OPS_PLAYBACK;
- pr_debug("Playbck stream,Device %d\n", stream->device);
- } else {
- str_params.ops = STREAM_OPS_CAPTURE;
- stream->device = SND_SST_DEVICE_CAPTURE;
- pr_debug("Capture stream,Device %d\n", stream->device);
- }
- str_params.device_type = stream->device;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->open(&str_params);
- pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
- if (ret_val < 0)
- return ret_val;
-
- stream->stream_info.str_id = ret_val;
- stream->stream_status = INIT;
- stream->stream_info.buffer_ptr = 0;
- pr_debug("str id : %d\n", stream->stream_info.str_id);
-
- return ret_val;
-}
-
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- int ret_val;
-
- pr_debug("setting buffer ptr param\n");
- stream->stream_info.period_elapsed = period_elapsed;
- stream->stream_info.mad_substream = substream;
- stream->stream_info.buffer_ptr = 0;
- stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_STREAM_INIT, &stream->stream_info);
- if (ret_val)
- pr_err("control_set ret error %d\n", ret_val);
- return ret_val;
-
-}
-
-
-/**
- * sst_sc_reg_access - IPC read/write wrapper
- *
- * @sc_access: array of data, addresses and mask
- * @type: operation type
- * @num_val: number of reg to opertae on
- *
- * Reads/writes/read-modify operations on registers accessed through SCU (sound
- * card and few SST DSP regsisters that are not accissible to IA)
- */
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val)
-{
- int i, retval = 0;
- if (type == PMIC_WRITE) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
- sc_access[i].value);
- if (retval)
- goto err;
- }
- } else if (type == PMIC_READ) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
- &(sc_access[i].value));
- if (retval)
- goto err;
- }
- } else {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_update_register(
- sc_access[i].reg_addr, sc_access[i].value,
- sc_access[i].mask);
- if (retval)
- goto err;
- }
- }
- return 0;
-err:
- pr_err("IPC failed for cmd %d, %d\n", retval, type);
- pr_err("reg:0x%2x addr:0x%2x\n",
- sc_access[i].reg_addr, sc_access[i].value);
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid_snd_control.h b/drivers/staging/intel_sst/intelmid_snd_control.h
deleted file mode 100644
index 06ad3a10099..00000000000
--- a/drivers/staging/intel_sst/intelmid_snd_control.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef __INTELMID_SND_CTRL_H__
-#define __INTELMID_SND_CTRL_H__
-/*
- * intelmid_snd_control.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all snd control functions
- */
-
-/*
-Mask bits
-*/
-#define MASK0 0x01 /* 0000 0001 */
-#define MASK1 0x02 /* 0000 0010 */
-#define MASK2 0x04 /* 0000 0100 */
-#define MASK3 0x08 /* 0000 1000 */
-#define MASK4 0x10 /* 0001 0000 */
-#define MASK5 0x20 /* 0010 0000 */
-#define MASK6 0x40 /* 0100 0000 */
-#define MASK7 0x80 /* 1000 0000 */
-/*
-value bits
-*/
-#define VALUE0 0x01 /* 0000 0001 */
-#define VALUE1 0x02 /* 0000 0010 */
-#define VALUE2 0x04 /* 0000 0100 */
-#define VALUE3 0x08 /* 0000 1000 */
-#define VALUE4 0x10 /* 0001 0000 */
-#define VALUE5 0x20 /* 0010 0000 */
-#define VALUE6 0x40 /* 0100 0000 */
-#define VALUE7 0x80 /* 1000 0000 */
-
-#define MUTE 0 /* ALSA Passes 0 for mute */
-#define UNMUTE 1 /* ALSA Passes 1 for unmute */
-
-#define MAX_VOL_PMIC_VENDOR0 0x3f /* max vol in dB for stereo & voice DAC */
-#define MIN_VOL_PMIC_VENDOR0 0 /* min vol in dB for stereo & voice DAC */
-/* Head phone volume control */
-#define MAX_HP_VOL_PMIC_VENDOR1 6 /* max volume in dB for HP */
-#define MIN_HP_VOL_PMIC_VENDOR1 (-84) /* min volume in dB for HP */
-#define MAX_HP_VOL_INDX_PMIC_VENDOR1 40 /* Number of HP volume control values */
-
-/* Mono Earpiece Volume control */
-#define MAX_EP_VOL_PMIC_VENDOR1 0 /* max volume in dB for EP */
-#define MIN_EP_VOL_PMIC_VENDOR1 (-75) /* min volume in dB for EP */
-#define MAX_EP_VOL_INDX_PMIC_VENDOR1 32 /* Number of EP volume control values */
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-extern struct snd_pmic_ops snd_pmic_ops_fs;
-extern struct snd_pmic_ops snd_pmic_ops_mx;
-extern struct snd_pmic_ops snd_pmic_ops_nc;
-extern struct snd_pmic_ops snd_msic_ops;
-
-/* device */
-enum SND_INPUT_DEVICE {
- AMIC,
- DMIC,
- HS_MIC,
- IN_UNDEFINED
-};
-enum SND_LINE_OUT_DEVICE {
- HEADSET,
- IHF,
- VIBRA1,
- VIBRA2,
- NONE,
-};
-
-enum SND_OUTPUT_DEVICE {
- STEREO_HEADPHONE,
- MONO_EARPIECE,
-
- INTERNAL_SPKR,
- RECEIVER,
- OUT_UNDEFINED
-};
-
-enum pmic_controls {
- PMIC_SND_HP_MIC_MUTE = 0x0001,
- PMIC_SND_AMIC_MUTE = 0x0002,
- PMIC_SND_DMIC_MUTE = 0x0003,
- PMIC_SND_CAPTURE_VOL = 0x0004,
-/* Output controls */
- PMIC_SND_LEFT_PB_VOL = 0x0010,
- PMIC_SND_RIGHT_PB_VOL = 0x0011,
- PMIC_SND_LEFT_HP_MUTE = 0x0012,
- PMIC_SND_RIGHT_HP_MUTE = 0x0013,
- PMIC_SND_LEFT_SPEAKER_MUTE = 0x0014,
- PMIC_SND_RIGHT_SPEAKER_MUTE = 0x0015,
- PMIC_SND_RECEIVER_VOL = 0x0016,
- PMIC_SND_RECEIVER_MUTE = 0x0017,
- PMIC_SND_LEFT_MASTER_VOL = 0x0018,
- PMIC_SND_RIGHT_MASTER_VOL = 0x0019,
-/* Other controls */
- PMIC_SND_MUTE_ALL = 0x0020,
- PMIC_MAX_CONTROLS = 0x0020,
-};
-
-#endif /* __INTELMID_SND_CTRL_H__ */
-
-
diff --git a/drivers/staging/intel_sst/intelmid_v0_control.c b/drivers/staging/intel_sst/intelmid_v0_control.c
deleted file mode 100644
index b8dfdb9bc1a..00000000000
--- a/drivers/staging/intel_sst/intelmid_v0_control.c
+++ /dev/null
@@ -1,866 +0,0 @@
-/*
- * intel_sst_v0_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 1
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum _reg_v1 {
- VOICEPORT1 = 0x180,
- VOICEPORT2 = 0x181,
- AUDIOPORT1 = 0x182,
- AUDIOPORT2 = 0x183,
- MISCVOICECTRL = 0x184,
- MISCAUDCTRL = 0x185,
- DMICCTRL1 = 0x186,
- AUDIOBIAS = 0x187,
- MICCTRL = 0x188,
- MICLICTRL1 = 0x189,
- MICLICTRL2 = 0x18A,
- MICLICTRL3 = 0x18B,
- VOICEDACCTRL1 = 0x18C,
- STEREOADCCTRL = 0x18D,
- AUD15 = 0x18E,
- AUD16 = 0x18F,
- AUD17 = 0x190,
- AUD18 = 0x191,
- RMIXOUTSEL = 0x192,
- ANALOGLBR = 0x193,
- ANALOGLBL = 0x194,
- POWERCTRL1 = 0x195,
- POWERCTRL2 = 0x196,
- HEADSETDETECTINT = 0x197,
- HEADSETDETECTINTMASK = 0x198,
- TRIMENABLE = 0x199,
-};
-
-int rev_id = 0x20;
-static bool jack_det_enabled;
-
-/****
- * fs_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int fs_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0x00, 0x0},
- {0x181, 0x00, 0x0},
- {0x182, 0xF8, 0x0},
- {0x183, 0x08, 0x0},
- {0x184, 0x00, 0x0},
- {0x185, 0x40, 0x0},
- {0x186, 0x06, 0x0},
- {0x187, 0x80, 0x0},
- {0x188, 0x40, 0x0},
- {0x189, 0x39, 0x0},
- {0x18a, 0x39, 0x0},
- {0x18b, 0x1F, 0x0},
- {0x18c, 0x00, 0x0},
- {0x18d, 0x00, 0x0},
- {0x18e, 0x39, 0x0},
- {0x18f, 0x39, 0x0},
- {0x190, 0x39, 0x0},
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- {0x193, 0x00, 0x0},
- {0x194, 0x00, 0x0},
- {0x195, 0x00, 0x0},
- {0x196, 0x7C, 0x0},
- {0x197, 0x00, 0x0},
- {0x198, 0x0B, 0x0},
- {0x199, 0x00, 0x0},
- {0x037, 0x3F, 0x0},
- };
-
- snd_pmic_ops_fs.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_fs.master_mute = UNMUTE;
- snd_pmic_ops_fs.mute_status = UNMUTE;
- snd_pmic_ops_fs.num_channel = 2;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
-}
-
-static int fs_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK7;
-
- if (snd_pmic_ops_fs.mute_status == MUTE)
- return 0;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x80;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
-}
-
-static int fs_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, 0x00, MASK7},
- {POWERCTRL1, 0xC6, 0xC6},
- {POWERCTRL2, 0x30, 0x30},
-
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- if (retval)
- return retval;
-
- pr_debug("in fs power up pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xC6},
- {POWERCTRL2, 0x00, 0x30},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- if (retval)
- return retval;
-
- pr_debug("in fsl power down pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x32, 0x32}, /*NOTE power up A ADC only as*/
- {AUDIOBIAS, 0x00, MASK7},
- /*as turning on V ADC causes noise*/
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int fs_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x00, 0x03},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, MASK7, MASK7},
- };
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0xA0, 0},
- {0x181, 0x04, 0},
- {0x182, 0x0, 0},
- {0x183, 0x0, 0},
- {0x184, 0x18, 0},
- {0x185, 0x40, 0},
- {0x186, 0x06, 0},
- {0x187, 0x0, 0},
- {0x188, 0x10, 0},
- {0x189, 0x39, 0},
- {0x18a, 0x39, 0},
- {0x18b, 0x02, 0},
- {0x18c, 0x0, 0},
- {0x18d, 0x0, 0},
- {0x18e, 0x39, 0},
- {0x18f, 0x0, 0},
- {0x190, 0x0, 0},
- {0x191, 0x20, 0},
- {0x192, 0x20, 0},
- {0x193, 0x0, 0},
- {0x194, 0x0, 0},
- {0x195, 0x06, 0},
- {0x196, 0x25, 0},
- {0x197, 0x0, 0},
- {0x198, 0xF, 0},
- {0x199, 0x0, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
-}
-
-static int fs_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x30;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x03;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- u8 config1 = 0;
- struct sc_reg_access sc_access[4];
- int retval = 0, num_value = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- switch (sfreq) {
- case 8000:
- config1 = 0x00;
- break;
- case 11025:
- config1 = 0x01;
- break;
- case 12000:
- config1 = 0x02;
- break;
- case 16000:
- config1 = 0x03;
- break;
- case 22050:
- config1 = 0x04;
- break;
- case 24000:
- config1 = 0x05;
- break;
- case 26000:
- config1 = 0x06;
- break;
- case 32000:
- config1 = 0x07;
- break;
- case 44100:
- config1 = 0x08;
- break;
- case 48000:
- config1 = 0x09;
- break;
- }
- snd_pmic_ops_fs.num_channel = num_channel;
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x80;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- } else {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x00;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- }
- pr_debug("sfreq:%d,Register value = %x\n", sfreq, config1);
-
- if (word_size == 24) {
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
- sc_access[0].value = 0xFB;
-
-
- sc_access[1].reg_addr = AUDIOPORT2;
- sc_access[1].value = config1 | 0x10;
- sc_access[1].mask = MASK0 | MASK1 | MASK2 | MASK3
- | MASK4 | MASK5 | MASK6;
-
- sc_access[2].reg_addr = MISCAUDCTRL;
- sc_access[2].value = 0x02;
- sc_access[2].mask = 0x02;
-
- num_value = 3 ;
-
- } else {
-
- sc_access[0].reg_addr = AUDIOPORT2;
- sc_access[0].value = config1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
-
- sc_access[1].reg_addr = MISCAUDCTRL;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x02;
- num_value = 2;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_value);
-
-}
-
-static int fs_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access_dmic[] = {
- {MICCTRL, 0x81, 0xf7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_mic[] = {
- {MICCTRL, 0x40, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_hsmic[] = {
- {MICCTRL, 0x10, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
-
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting amic not supported in mono cfg\n");
- return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
- break;
-
- case HS_MIC:
- pr_debug("Selecting hsmic\n");
- return sst_sc_reg_access(sc_access_hsmic,
- PMIC_READ_MODIFY, 2);
- break;
-
- case DMIC:
- pr_debug("Selecting dmic\n");
- return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_hp[] = {
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- };
- struct sc_reg_access sc_access_is[] = {
- {0x191, 0x17, 0xFF},
- {0x192, 0x08, 0xFF},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case STEREO_HEADPHONE:
- pr_debug("SST DBG:Selecting headphone\n");
- return sst_sc_reg_access(sc_access_hp, PMIC_WRITE, 2);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- pr_debug("SST DBG:Selecting internal spkr\n");
- return sst_sc_reg_access(sc_access_is, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("dev_id:0x%x value:0x%x\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- sc_access[1].reg_addr = MICLICTRL1;
- sc_access[2].reg_addr = MICLICTRL2;
- sc_access[0].mask = MASK5;
- sc_access[1].mask = sc_access[2].mask = MASK6;
- if (value == MUTE) {
- sc_access[0].value = 0x20;
- sc_access[2].value = sc_access[1].value = 0x40;
- } else
- sc_access[0].value = sc_access[1].value
- = sc_access[2].value = 0x0;
- reg_num = 3;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x40;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
-
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- snd_pmic_ops_fs.mute_status = value;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- snd_pmic_ops_fs.mute_status = value;
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[0].value = sc_access[1].value = 0x80;
- reg_num = 2;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[3].reg_addr = MICCTRL;
- sc_access[4].reg_addr = MICLICTRL1;
- sc_access[5].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK7;
- sc_access[3].mask = MASK5;
- sc_access[4].mask = sc_access[5].mask = MASK6;
-
- if (value == MUTE) {
- sc_access[0].value =
- sc_access[1].value = sc_access[2].value = 0x80;
- sc_access[3].value = 0x20;
- sc_access[4].value = sc_access[5].value = 0x40;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = sc_access[3].value =
- sc_access[4].value = sc_access[5].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- reg_num = 6;
- snd_pmic_ops_fs.mute_status = value;
- snd_pmic_ops_fs.master_mute = value;
- break;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_acces, sc_access[4] = {{0},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 2;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].value = sc_access[1].value = 0x80;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- }
- reg_num = 2;
- break;
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:%d\n", value);
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[2].reg_addr = DMICCTRL1;
- sc_access[2].value = value;
- sc_access[0].value = sc_access[1].value = value;
- sc_acces.reg_addr = MICLICTRL3;
- sc_acces.value = value;
- sc_acces.mask = (MASK0|MASK1|MASK2|MASK3|MASK5|MASK6|MASK7);
- retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 3;
- break;
-
- default:
- return -EINVAL;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
-
- int retval = 0, temp_value = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
-
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- if (sc_access[0].value & mask)
- *value = MUTE;
- else
- *value = UNMUTE;
- break;
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- mask = MASK5;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = (sc_access[0].value & mask);
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD16;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- default:
- return -EINVAL;
- }
-
- return retval;
-}
-
-static int fs_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL\n");
- sc_access.reg_addr = MICLICTRL1;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL\n");
- sc_access.reg_addr = AUD16;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RT_PB_VOL\n");
- sc_access.reg_addr = AUD17;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- default:
- return -EINVAL;
- }
-
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = (int) (sc_access.value & mask);
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static void fs_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
- struct sc_reg_access sc_access[] = {
- {0x187, 0x00, MASK7},
- {0x188, 0x10, MASK4},
- {0x18b, 0x10, MASK4},
- };
-
- struct sc_reg_access sc_access_write[] = {
- {0x198, 0x00, 0x0},
- };
- pr_debug("Audio interrupt enable\n");
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
-
- intelmaddata->jack[0].jack_status = 0;
- /*intelmaddata->jack[1].jack_status = 0;*/
-
- jack_det_enabled = true;
- return;
-}
-
-static void fs_pmic_irq_cb(void *cb_data, u8 value)
-{
- struct mad_jack *mjack = NULL;
- struct snd_intelmad *intelmaddata = cb_data;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
-
- mjack = &intelmaddata->jack[0];
-
- if (value & 0x4) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
-
- /* send headphone detect */
- pr_debug(":MAD headphone %d\n", value & 0x4);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if (value & 0x2) {
- /* send short push */
- pr_debug(":MAD short push %d\n", value & 0x2);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (value & 0x1) {
- /* send long push */
- pr_debug(":MAD long push %d\n", value & 0x1);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (value & 0x8) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
- /* send headset detect */
- pr_debug(":MAD headset = %d\n", value & 0x8);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-
- return;
-}
-static int fs_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_fs = {
- .set_input_dev = fs_set_selected_input_dev,
- .set_output_dev = fs_set_selected_output_dev,
- .set_mute = fs_set_mute,
- .get_mute = fs_get_mute,
- .set_vol = fs_set_vol,
- .get_vol = fs_get_vol,
- .init_card = fs_init_card,
- .set_pcm_audio_params = fs_set_pcm_audio_params,
- .set_pcm_voice_params = fs_set_pcm_voice_params,
- .set_voice_port = fs_set_voice_port,
- .set_audio_port = fs_set_audio_port,
- .power_up_pmic_pb = fs_power_up_pb,
- .power_up_pmic_cp = fs_power_up_cp,
- .power_down_pmic_pb = fs_power_down_pb,
- .power_down_pmic_cp = fs_power_down_cp,
- .power_down_pmic = fs_power_down,
- .pmic_irq_cb = fs_pmic_irq_cb,
- /*
- * Jack detection enabling
- * need be delayed till first IRQ happen.
- */
- .pmic_irq_enable = NULL,
- .pmic_jack_enable = fs_jack_enable,
-};
diff --git a/drivers/staging/intel_sst/intelmid_v1_control.c b/drivers/staging/intel_sst/intelmid_v1_control.c
deleted file mode 100644
index 9d00728d8de..00000000000
--- a/drivers/staging/intel_sst/intelmid_v1_control.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/* intel_sst_v1_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/control.h>
-#include <sound/initval.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid.h"
-#include "intelmid_snd_control.h"
-
-#include <linux/gpio.h>
-#define KOSKI_VOICE_CODEC_ENABLE 46
-
-enum _reg_v2 {
-
- MASTER_CLOCK_PRESCALAR = 0x205,
- SET_MASTER_AND_LR_CLK1 = 0x20b,
- SET_MASTER_AND_LR_CLK2 = 0x20c,
- MASTER_MODE_AND_DATA_DELAY = 0x20d,
- DIGITAL_INTERFACE_TO_DAI2 = 0x20e,
- CLK_AND_FS1 = 0x208,
- CLK_AND_FS2 = 0x209,
- DAI2_TO_DAC_HP = 0x210,
- HP_OP_SINGLE_ENDED = 0x224,
- ENABLE_OPDEV_CTRL = 0x226,
- ENABLE_DEV_AND_USE_XTAL = 0x227,
-
- /* Max audio subsystem (PQ49) MAX 8921 */
- AS_IP_MODE_CTL = 0xF9,
- AS_LEFT_SPKR_VOL_CTL = 0xFA, /* Mono Earpiece volume control */
- AS_RIGHT_SPKR_VOL_CTL = 0xFB,
- AS_LEFT_HP_VOL_CTL = 0xFC,
- AS_RIGHT_HP_VOL_CTL = 0xFD,
- AS_OP_MIX_CTL = 0xFE,
- AS_CONFIG = 0xFF,
-
- /* Headphone volume control & mute registers */
- VOL_CTRL_LT = 0x21c,
- VOL_CTRL_RT = 0x21d,
-
-};
-/**
- * mx_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int mx_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x02, 0x00},
- {0x205, 0x10, 0x00},
- {0x206, 0x60, 0x00},
- {0x207, 0x00, 0x00},
- {0x208, 0x90, 0x00},
- {0x209, 0x51, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x10, 0x00},
- {0x20c, 0x00, 0x00},
- {0x20d, 0x00, 0x00},
- {0x20e, 0x21, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0xB3, 0x00},
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x00, 0x00},
- {0x218, 0x03, 0x00},
- {0x219, 0x03, 0x00},
- {0x21a, 0x00, 0x00},
- {0x21b, 0x00, 0x00},
- {0x21c, 0x00, 0x00},
- {0x21d, 0x00, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x20, 0x00},
- {0x221, 0x20, 0x00},
- {0x222, 0x51, 0x00},
- {0x223, 0x20, 0x00},
- {0x224, 0x04, 0x00},
- {0x225, 0x80, 0x00},
- {0x226, 0x0F, 0x00},
- {0x227, 0x08, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x1f, 0x00},
- {0xfb, 0x1f, 0x00},
- {0xfc, 0x1f, 0x00},
- {0xfd, 0x1f, 0x00},
- {0xfe, 0x00, 0x00},
- {0xff, 0x0c, 0x00},
- };
- snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_mx.num_channel = 2;
- snd_pmic_ops_mx.master_mute = UNMUTE;
- snd_pmic_ops_mx.mute_status = UNMUTE;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
-}
-
-static int mx_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
- int mute_val1 = 0;
- int retval = 0;
-
- sc_access[0].reg_addr = AS_LEFT_HP_VOL_CTL;
- sc_access[1].reg_addr = AS_RIGHT_HP_VOL_CTL;
-
- if (value == UNMUTE) {
- mute_val = 0x1F;
- mute_val1 = 0x00;
- } else {
- mute_val = 0x00;
- mute_val1 = 0x40;
- }
- sc_access[0].mask = sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- sc_access[0].value = sc_access[1].value = (u8)mute_val;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- pr_debug("mute status = %d\n", snd_pmic_ops_mx.mute_status);
- if (snd_pmic_ops_mx.mute_status == MUTE ||
- snd_pmic_ops_mx.master_mute == MUTE)
- return retval;
-
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[1].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[0].value = sc_access[1].value = mute_val1;
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[1].value = 0x40;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_up_pb(unsigned int port)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- msleep(10);
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = 0xff;
- sc_access[0].value = 0x3C;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = 0x80;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_up_cp(unsigned int port)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {ENABLE_DEV_AND_USE_XTAL, 0x80, MASK7},
- {ENABLE_OPDEV_CTRL, 0x3, 0x3},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {ENABLE_OPDEV_CTRL, 0x00, MASK1|MASK0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int mx_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_set_pcm_voice_params(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x0e, 0x00},
- {0x205, 0x20, 0x00},
- {0x206, 0x8f, 0x00},
- {0x207, 0x21, 0x00},
- {0x208, 0x18, 0x00},
- {0x209, 0x32, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x5A, 0x00},
- {0x20c, 0xBE, 0x00},/* 0x00 -> 0xBE Koski */
- {0x20d, 0x00, 0x00}, /* DAI2 'off' */
- {0x20e, 0x40, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0x33, 0x00}, /* Voice filter */
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x20, 0x00},
- {0x218, 0x00, 0x00},
- {0x219, 0x00, 0x00},
- {0x21a, 0x40, 0x00},
- {0x21b, 0x40, 0x00},
- {0x21c, 0x09, 0x00},
- {0x21d, 0x09, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x00, 0x00}, /* Microphone configurations */
- {0x221, 0x00, 0x00}, /* Microphone configurations */
- {0x222, 0x50, 0x00}, /* Microphone configurations */
- {0x223, 0x21, 0x00}, /* Microphone configurations */
- {0x224, 0x00, 0x00},
- {0x225, 0x80, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x19, 0x00},
- {0xfb, 0x19, 0x00},
- {0xfc, 0x12, 0x00},
- {0xfd, 0x12, 0x00},
- {0xfe, 0x00, 0x00},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("SST DBG:mx_set_pcm_voice_params called\n");
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
-}
-
-static int mx_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int retval = 0;
-
- int config1 = 0, config2 = 0, filter = 0xB3;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- switch (sfreq) {
- case 8000:
- config1 = 0x10;
- config2 = 0x00;
- filter = 0x33;
- break;
- case 11025:
- config1 = 0x16;
- config2 = 0x0d;
- break;
- case 12000:
- config1 = 0x18;
- config2 = 0x00;
- break;
- case 16000:
- config1 = 0x20;
- config2 = 0x00;
- break;
- case 22050:
- config1 = 0x2c;
- config2 = 0x1a;
- break;
- case 24000:
- config1 = 0x30;
- config2 = 0x00;
- break;
- case 32000:
- config1 = 0x40;
- config2 = 0x00;
- break;
- case 44100:
- config1 = 0x58;
- config2 = 0x33;
- break;
- case 48000:
- config1 = 0x60;
- config2 = 0x00;
- break;
- }
- snd_pmic_ops_mx.num_channel = num_channel;
- /*mute the right channel if MONO*/
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x05;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- } else {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x04;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- }
- sc_access[0].reg_addr = 0x206;
- sc_access[0].value = config1;
- sc_access[1].reg_addr = 0x207;
- sc_access[1].value = config2;
-
- if (word_size == 16) {
- sc_access[2].value = 0x51;
- sc_access[3].value = 0x31;
- } else if (word_size == 24) {
- sc_access[2].value = 0x52;
- sc_access[3].value = 0x92;
- }
-
- sc_access[2].reg_addr = 0x209;
- sc_access[3].reg_addr = 0x20e;
-
- sc_access[4].reg_addr = 0x211;
- sc_access[4].value = filter;
-
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
-}
-
-static int mx_set_selected_output_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
- snd_pmic_ops_mx.output_dev_id = dev_id;
- switch (dev_id) {
- case STEREO_HEADPHONE:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x8C;
- sc_access[0].mask =
- MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0xb0;
- sc_access[0].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case RECEIVER:
- pr_debug("RECEIVER Koski selected\n");
-
- /* configuration - AS enable, receiver enable */
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x81;
- sc_access[0].mask = 0xff;
-
- num_reg = 1;
- break;
- default:
- pr_err("Not a valid output dev\n");
- return 0;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-
-static int mx_set_voice_port(int status)
-{
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- if (status == ACTIVATE)
- retval = mx_set_pcm_voice_params();
-
- return retval;
-}
-
-static int mx_set_audio_port(int status)
-{
- return 0;
-}
-
-static int mx_set_selected_input_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- snd_pmic_ops_mx.input_dev_id = dev_id;
- pr_debug("mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
-
- switch (dev_id) {
- case AMIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x50;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
-
- case HS_MIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x51;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
- case DMIC:
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- num_reg = 2;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-static int mx_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[5];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
-
- pr_debug("set_mute dev_id:0x%x , value:%d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[2].reg_addr = 0x223;
- if (value == MUTE) {
- sc_access[0].value = 0x00;
- sc_access[1].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x00;
- else
- sc_access[2].value = 0x20;
- } else {
- sc_access[0].value = 0x20;
- sc_access[1].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x20;
- else
- sc_access[2].value = 0x00;
- }
- sc_access[0].mask = MASK5|MASK6;
- sc_access[1].mask = MASK5|MASK6;
- sc_access[2].mask = MASK5|MASK6;
- num_reg = 3;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (snd_pmic_ops_mx.num_channel == 1)
- value = MUTE;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[1].reg_addr = VOL_CTRL_LT;
- sc_access[2].reg_addr = 0x220;
- sc_access[3].reg_addr = 0x221;
- sc_access[4].reg_addr = 0x223;
- snd_pmic_ops_mx.master_mute = value;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value = 0x40;
- sc_access[2].value = 0x00;
- sc_access[3].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x00;
- else
- sc_access[4].value = 0x20;
-
- } else {
- sc_access[0].value = sc_access[1].value = 0x00;
- sc_access[2].value = sc_access[3].value = 0x20;
- sc_access[4].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x20;
- else
- sc_access[4].value = 0x00;
-
-
- }
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[0].value = 0x40;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[2].mask = MASK5|MASK6;
- sc_access[3].mask = MASK5|MASK6|MASK2|MASK4;
- sc_access[4].mask = MASK5|MASK6|MASK4;
-
- num_reg = 5;
- break;
- case PMIC_SND_RECEIVER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- break;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[2] = {{0},};
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_RECEIVER_VOL:
- return 0;
- break;
- case PMIC_SND_CAPTURE_VOL:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[0].value = sc_access[1].value = -value;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4);
- num_reg = 2;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- }
- num_reg = 1;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[4] = {{0},};
- int retval = 0, num_reg = 0, mask = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- mask = MASK5|MASK6;
- num_reg = 1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = UNMUTE;
- else
- *value = MUTE;
- return retval;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- num_reg = 1;
- mask = MASK6;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- num_reg = 1;
- mask = MASK6;
- break;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = MUTE;
- else
- *value = UNMUTE;
- return retval;
-}
-
-static int mx_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0, num_reg = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- sc_access.reg_addr = 0x220;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- num_reg = 1;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_LT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_RT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = -(sc_access.value & mask);
- pr_debug("get volume value extracted %d\n", *value);
- return retval;
-}
-
-static u8 mx_get_jack_status(void)
-{
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x201;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- pr_debug("value returned = 0x%x\n", sc_access_read.value);
- return sc_access_read.value;
-}
-
-static void mx_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
-
- intelmaddata->jack_prev_state = 0xc0;
- return;
-}
-
-static void mx_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 jack_cur_status, jack_prev_state = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- time_t timediff;
- struct snd_intelmad *intelmaddata = cb_data;
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x2) {
- jack_cur_status = mx_get_jack_status();
- jack_prev_state = intelmaddata->jack_prev_state;
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x40)) {
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x00)) {
- /* headphone insert detected. */
- pr_debug("MAD headphone inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0xc0)) {
- /* headset remove detected. */
- pr_debug("MAD headset removed\n");
-
- present = 0;
- jack_event_flag = 1;
- mjack->jack_status = 0;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0xc0)) {
- /* headphone remove detected. */
- pr_debug("MAD headphone removed\n");
- present = 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0x00)) {
- /* button pressed */
- do_gettimeofday(&mjack->buttonpressed);
- pr_debug("MAD button press detected\n");
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0x40)) {
- if (mjack->jack_status) {
- /*button pressed */
- do_gettimeofday(
- &mjack->buttonreleased);
- /*button pressed */
- pr_debug("MAD Button Released detected\n");
- timediff = mjack->buttonreleased.tv_sec -
- mjack->buttonpressed.tv_sec;
- buttonpressflag = 1;
-
- if (timediff > 1) {
- pr_debug("MAD long press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_LONG_PRESS;
- } else {
- pr_debug("MAD short press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_SHORT_PRESS;
- }
- } else {
- /***workaround for maxim
- hw issue,0x00 t 0x40 is not
- a valid transiton for Headset insertion */
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
- }
- intelmaddata->jack_prev_state = jack_cur_status;
- pr_debug("mx_pmic_irq_cb prv_state= 0x%x\n",
- intelmaddata->jack_prev_state);
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int mx_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_mx = {
- .set_input_dev = mx_set_selected_input_dev,
- .set_output_dev = mx_set_selected_output_dev,
- .set_mute = mx_set_mute,
- .get_mute = mx_get_mute,
- .set_vol = mx_set_vol,
- .get_vol = mx_get_vol,
- .init_card = mx_init_card,
- .set_pcm_audio_params = mx_set_pcm_audio_params,
- .set_pcm_voice_params = mx_set_pcm_voice_params,
- .set_voice_port = mx_set_voice_port,
- .set_audio_port = mx_set_audio_port,
- .power_up_pmic_pb = mx_power_up_pb,
- .power_up_pmic_cp = mx_power_up_cp,
- .power_down_pmic_pb = mx_power_down_pb,
- .power_down_pmic_cp = mx_power_down_cp,
- .power_down_pmic = mx_power_down,
- .pmic_irq_cb = mx_pmic_irq_cb,
- .pmic_irq_enable = mx_pmic_irq_enable,
- .pmic_jack_enable = mx_jack_enable,
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
deleted file mode 100644
index 46ab55eb809..00000000000
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ /dev/null
@@ -1,1156 +0,0 @@
-/*
- * intelmid_v2_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 3
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/gpio.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum reg_v3 {
- VAUDIOCNT = 0x51,
- VOICEPORT1 = 0x100,
- VOICEPORT2 = 0x101,
- AUDIOPORT1 = 0x102,
- AUDIOPORT2 = 0x103,
- ADCSAMPLERATE = 0x104,
- DMICCTRL1 = 0x105,
- DMICCTRL2 = 0x106,
- MICCTRL = 0x107,
- MICSELVOL = 0x108,
- LILSEL = 0x109,
- LIRSEL = 0x10a,
- VOICEVOL = 0x10b,
- AUDIOLVOL = 0x10c,
- AUDIORVOL = 0x10d,
- LMUTE = 0x10e,
- RMUTE = 0x10f,
- POWERCTRL1 = 0x110,
- POWERCTRL2 = 0x111,
- DRVPOWERCTRL = 0x112,
- VREFPLL = 0x113,
- PCMBUFCTRL = 0x114,
- SOFTMUTE = 0x115,
- DTMFPATH = 0x116,
- DTMFVOL = 0x117,
- DTMFFREQ = 0x118,
- DTMFHFREQ = 0x119,
- DTMFLFREQ = 0x11a,
- DTMFCTRL = 0x11b,
- DTMFASON = 0x11c,
- DTMFASOFF = 0x11d,
- DTMFASINUM = 0x11e,
- CLASSDVOL = 0x11f,
- VOICEDACAVOL = 0x120,
- AUDDACAVOL = 0x121,
- LOMUTEVOL = 0x122,
- HPLVOL = 0x123,
- HPRVOL = 0x124,
- MONOVOL = 0x125,
- LINEOUTMIXVOL = 0x126,
- EPMIXVOL = 0x127,
- LINEOUTLSEL = 0x128,
- LINEOUTRSEL = 0x129,
- EPMIXOUTSEL = 0x12a,
- HPLMIXSEL = 0x12b,
- HPRMIXSEL = 0x12c,
- LOANTIPOP = 0x12d,
- AUXDBNC = 0x12f,
-};
-
-static void nc_set_amp_power(int power)
-{
- if (snd_pmic_ops_nc.gpio_amp)
- gpio_set_value(snd_pmic_ops_nc.gpio_amp, power);
-}
-
-/****
- * nc_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int nc_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {VAUDIOCNT, 0x25, 0},
- {VOICEPORT1, 0x00, 0},
- {VOICEPORT2, 0x00, 0},
- {AUDIOPORT1, 0x98, 0},
- {AUDIOPORT2, 0x09, 0},
- {AUDIOLVOL, 0x00, 0},
- {AUDIORVOL, 0x00, 0},
- {LMUTE, 0x03, 0},
- {RMUTE, 0x03, 0},
- {POWERCTRL1, 0x00, 0},
- {POWERCTRL2, 0x00, 0},
- {DRVPOWERCTRL, 0x00, 0},
- {VREFPLL, 0x10, 0},
- {HPLMIXSEL, 0xee, 0},
- {HPRMIXSEL, 0xf6, 0},
- {PCMBUFCTRL, 0x0, 0},
- {VOICEVOL, 0x0e, 0},
- {HPLVOL, 0x06, 0},
- {HPRVOL, 0x06, 0},
- {MICCTRL, 0x51, 0x00},
- {ADCSAMPLERATE, 0x8B, 0x00},
- {MICSELVOL, 0x5B, 0x00},
- {LILSEL, 0x06, 0},
- {LIRSEL, 0x46, 0},
- {LOANTIPOP, 0x00, 0},
- {DMICCTRL1, 0x40, 0},
- {AUXDBNC, 0xff, 0},
- };
- snd_pmic_ops_nc.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_nc.master_mute = UNMUTE;
- snd_pmic_ops_nc.mute_status = UNMUTE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
- mutex_init(&snd_pmic_ops_nc.lock);
- pr_debug("init complete!!\n");
- return 0;
-}
-
-static int nc_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
-
- if (snd_pmic_ops_nc.mute_status == MUTE)
- return 0;
-
- if (((snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE) ||
- (snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)) &&
- (value == UNMUTE))
- return 0;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x04;
-
- }
- sc_access[0].reg_addr = LMUTE;
- sc_access[1].reg_addr = RMUTE;
- sc_access[0].mask = sc_access[1].mask = MASK2;
- sc_access[0].value = sc_access[1].value = mute_val;
-
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x04;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[7];
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- if (port == 0xFF)
- return 0;
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
- msleep(30);
-
- pr_debug("powering up pb....\n");
-
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3A;
- sc_access[1].mask = 0x3A;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = 0x40;
- } else if (port == 1) {
- sc_access[0].value = 0x01;
- sc_access[0].mask = 0x01;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
-
- sc_access[2].reg_addr = DRVPOWERCTRL;
- sc_access[2].value = 0x86;
- sc_access[2].mask = 0x86;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 1;
-
- /*
- * There is a mismatch between Playback Sources and the enumerated
- * values of output sources. This mismatch causes ALSA upper to send
- * Item 1 for Internal Speaker, but the expected enumeration is 2! For
- * now, treat MONO_EARPIECE and INTERNAL_SPKR identically and power up
- * the needed resources
- */
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(1);
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[5];
- int retval = 0;
-
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("powering up cp....\n");
-
- if (port == 0xFF)
- return 0;
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3E;
- sc_access[1].mask = 0x3E;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0xB4;
- sc_access[0].mask = 0xB4;
- } else if (port == 1) {
- sc_access[0].value = 0xBF;
- sc_access[0].mask = 0xBF;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- if (port == 0) {
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
- } else if (port == 1) {
- sc_access[1].value = 0x02;
- sc_access[1].mask = 0x02;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- nc_enable_audiodac(MUTE);
-
-
- pr_debug("powering dn nc_power_down ....\n");
-
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(0);
-
- msleep(30);
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x00;
-
-
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- sc_access[0].reg_addr = VREFPLL;
- sc_access[0].value = 0x10;
- sc_access[0].mask = 0x10;
-
- sc_access[1].reg_addr = VAUDIOCNT;
- sc_access[1].value = 0x25;
- sc_access[1].mask = 0x25;
-
-
- retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- return nc_enable_audiodac(UNMUTE);
-}
-
-static int nc_power_down_pb(unsigned int device)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn pb....\n");
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
-
-
- msleep(30);
-
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- msleep(30);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x41;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x0C;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 0;
-
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xBE},
- {POWERCTRL2, 0x00, 0x02},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn cp....\n");
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int nc_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x100, 0xD5, 0},
- {0x101, 0x08, 0},
- {0x104, 0x03, 0},
- {0x107, 0x10, 0},
- {0x10B, 0x0E, 0},
- {0x10E, 0x03, 0},
- {0x10F, 0x03, 0},
- {0x114, 0x13, 0},
- {0x115, 0x00, 0},
- {0x128, 0xFE, 0},
- {0x129, 0xFE, 0},
- {0x12A, 0xFE, 0},
- {0x12B, 0xDE, 0},
- {0x12C, 0xDE, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
- pr_debug("Voice parameters set successfully!!\n");
- return 0;
-}
-
-
-static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int config2 = 0;
- struct sc_reg_access sc_access;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (sfreq) {
- case 8000:
- config2 = 0x00;
- break;
- case 11025:
- config2 = 0x01;
- break;
- case 12000:
- config2 = 0x02;
- break;
- case 16000:
- config2 = 0x03;
- break;
- case 22050:
- config2 = 0x04;
- break;
- case 24000:
- config2 = 0x05;
- break;
- case 32000:
- config2 = 0x07;
- break;
- case 44100:
- config2 = 0x08;
- break;
- case 48000:
- config2 = 0x09;
- break;
- }
-
- snd_pmic_ops_nc.num_channel = num_channel;
- if (snd_pmic_ops_nc.num_channel == 1) {
-
- sc_access.value = 0x07;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
- } else {
- sc_access.value = 0x00;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
- }
-
- pr_debug("word_size = %d\n", word_size);
-
- if (word_size == 24) {
- sc_access.reg_addr = AUDIOPORT2;
- sc_access.value = config2 | 0x10;
- sc_access.mask = 0x1F;
- } else {
- sc_access.value = config2;
- sc_access.mask = 0x1F;
- sc_access.reg_addr = AUDIOPORT2;
- }
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
- pr_debug("word_size = %d\n", word_size);
- sc_access.reg_addr = AUDIOPORT1;
- sc_access.mask = MASK5|MASK4|MASK1|MASK0;
- if (word_size == 16)
- sc_access.value = 0x98;
- else if (word_size == 24)
- sc_access.value = 0xAB;
-
- return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
-
-}
-
-static int nc_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_HP[] = {
- {LMUTE, 0x02, 0x06},
- {RMUTE, 0x02, 0x06},
- {DRVPOWERCTRL, 0x06, 0x06},
- };
- struct sc_reg_access sc_access_IS[] = {
- {LMUTE, 0x04, 0x06},
- {RMUTE, 0x04, 0x06},
- {DRVPOWERCTRL, 0x00, 0x06},
- };
- int retval = 0;
-
- snd_pmic_ops_nc.output_dev_id = value;
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- pr_debug("nc set selected output:%d\n", value);
- mutex_lock(&snd_pmic_ops_nc.lock);
- switch (value) {
- case STEREO_HEADPHONE:
- if (snd_pmic_ops_nc.pb_on)
- sst_sc_reg_access(sc_access_HP+2, PMIC_WRITE, 1);
- retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
- nc_set_amp_power(0);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 3);
- if (snd_pmic_ops_nc.pb_on)
- nc_set_amp_power(1);
- break;
- default:
- pr_err("rcvd illegal request: %d\n", value);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return -EINVAL;
- }
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return retval;
-}
-
-static int nc_audio_init(void)
-{
- struct sc_reg_access sc_acces, sc_access[] = {
- {0x100, 0x00, 0},
- {0x101, 0x00, 0},
- {0x104, 0x8B, 0},
- {0x107, 0x11, 0},
- {0x10B, 0x0E, 0},
- {0x114, 0x00, 0},
- {0x115, 0x00, 0},
- {0x128, 0x00, 0},
- {0x129, 0x00, 0},
- {0x12A, 0x00, 0},
- {0x12B, 0xee, 0},
- {0x12C, 0xf6, 0},
- };
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
- pr_debug("Audio Init successfully!!\n");
-
- /*set output device */
- nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
-
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_acces.value = 0x07;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- } else {
- sc_acces.value = 0x00;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- }
-
- return 0;
-}
-
-static int nc_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4|MASK5;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- nc_audio_init();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4|MASK5 ;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-
-}
-
-static int nc_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Activate Voice port */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* Deactivate voice port */
- nc_set_pcm_voice_params();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-}
-
-static int nc_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[3];
- u8 mute_val, cap_mute;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set device id::%d, value %d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_MUTE_ALL:
- pr_debug("PMIC_SND_MUTE_ALL value %d\n", value);
- snd_pmic_ops_nc.mute_status = value;
- snd_pmic_ops_nc.master_mute = value;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = cap_mute = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x80;
- cap_mute = 0x40;
- }
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[1].reg_addr = AUDIORVOL;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = mute_val;
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x80;
- if (!sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2)) {
- sc_access[0].reg_addr = 0x109;
- sc_access[1].reg_addr = 0x10a;
- sc_access[2].reg_addr = 0x105;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK6;
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = cap_mute;
-
- if ((snd_pmic_ops_nc.input_dev_id == AMIC) ||
- (snd_pmic_ops_nc.input_dev_id == DMIC))
- sc_access[1].value = 0x40;
- if (snd_pmic_ops_nc.input_dev_id == HS_MIC)
- sc_access[0].value = 0x40;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 3);
- }
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_HPMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LIRSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_AMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LILSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
-
- case PMIC_SND_DMIC_MUTE:
- pr_debug("INPUT_MUTE_DMIC value%d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[1].value = 0x00;
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[1].value = 0x40;
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = DMICCTRL1;
- sc_access[0].mask = MASK6;
- sc_access[1].reg_addr = LILSEL;
- sc_access[1].mask = MASK6;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 2);
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- snd_pmic_ops_nc.mute_status = value;
- if (value == UNMUTE)
- sc_access[0].value = 0x0;
- else
- sc_access[0].value = 0x04;
-
- if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
- sc_access[0].reg_addr = LMUTE;
- pr_debug("LEFT_HP_MUTE value %d\n",
- sc_access[0].value);
- } else {
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n",
- sc_access[0].value);
- }
- sc_access[0].mask = MASK2;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- if (value == UNMUTE)
- sc_access[0].value = 0x00;
- else
- sc_access[0].value = 0x03;
- sc_access[0].reg_addr = LMUTE;
- pr_debug("SPEAKER_MUTE %d\n", sc_access[0].value);
- sc_access[0].mask = MASK1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval ;
-
-}
-
-static int nc_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0, entries = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set volume:%d\n", dev_id);
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:value::%d\n", value);
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = -value;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- sc_access[0].reg_addr = 0x10a;
- sc_access[1].reg_addr = 0x109;
- sc_access[2].reg_addr = 0x105;
- entries = 3;
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_HP_VOL %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPLVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_HP_VOL value %d\n", value);
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- sc_access[0].mask = MASK2;
- } else {
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPRVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- }
- entries = 1;
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("PMIC_SND_LEFT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("PMIC_SND_RIGHT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIORVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- default:
- return -EINVAL;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, entries);
-}
-
-static int nc_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access[6];
- u8 num_val;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- snd_pmic_ops_nc.input_dev_id = value;
-
- pr_debug("nc set selected input:%d\n", value);
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC\n");
- sc_access[0].reg_addr = 0x107;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[1].reg_addr = 0x10a;
- sc_access[1].value = 0x40;
- sc_access[1].mask = MASK6;
- sc_access[2].reg_addr = 0x109;
- sc_access[2].value = 0x00;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- num_val = 4;
- break;
-
- case HS_MIC:
- pr_debug("Selecting HS_MIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x00;
- sc_access[1].reg_addr = 0x109;
- sc_access[1].mask = MASK6;
- sc_access[1].value = 0x40;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].mask = MASK6;
- sc_access[2].value = 0x00;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0xc8;
- num_val = 5;
- break;
-
- case DMIC:
- pr_debug("DMIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x0B;
- sc_access[1].reg_addr = 0x105;
- sc_access[1].value = 0x80;
- sc_access[1].mask = MASK7|MASK6;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].value = 0x40;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = LILSEL;
- sc_access[3].mask = MASK6;
- sc_access[3].value = 0x00;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0x33;
- num_val = 5;
- break;
- default:
- return -EINVAL;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_val);
-}
-
-static int nc_get_mute(int dev_id, u8 *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("get mute::%d\n", dev_id);
-
- switch (dev_id) {
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC1\n");
- sc_access.reg_addr = LILSEL;
- mask = MASK6;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC2\n");
- sc_access.reg_addr = LIRSEL;
- mask = MASK6;
- break;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- mask = MASK2;
- pr_debug("PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
- if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
- sc_access.reg_addr = RMUTE;
- else
- sc_access.reg_addr = LMUTE;
- break;
-
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- pr_debug("PMIC_MONO_EARPIECE_MUTE\n");
- sc_access.reg_addr = RMUTE;
- mask = MASK1;
- break;
- case PMIC_SND_DMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_DMIC\n");
- sc_access.reg_addr = 0x105;
- mask = MASK6;
- break;
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("reg value = %d\n", sc_access.value);
- if (retval)
- return retval;
- *value = (sc_access.value) & mask;
- pr_debug("masked value = %d\n", *value);
- if (*value)
- *value = 0;
- else
- *value = 1;
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static int nc_get_vol(int dev_id, int *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_INPUT_CAPTURE_VOL\n");
- sc_access.reg_addr = LILSEL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIOLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIORVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
- sc_access.reg_addr = HPRVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_HP_VOL\n");
- sc_access.reg_addr = HPLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = -((sc_access.value) & mask);
- pr_debug("get vol value returned = %d\n", *value);
- return retval;
-}
-
-static void hp_automute(enum snd_jack_types type, int present)
-{
- u8 in = DMIC;
- u8 out = INTERNAL_SPKR;
- if (present) {
- if (type == SND_JACK_HEADSET)
- in = HS_MIC;
- out = STEREO_HEADPHONE;
- }
- nc_set_selected_input_dev(in);
- nc_set_selected_output_dev(out);
-}
-
-static void nc_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 value = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x132;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- value = (sc_access_read.value);
- pr_debug("value returned = 0x%x\n", value);
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("SST DBG:MAD headset detected\n");
- /* send headset detect/undetect */
- present = (value == 0x1) ? 3 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- hp_automute(SND_JACK_HEADSET, present);
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD headphone detected\n");
- /* send headphone detect/undetect */
- present = (value == 0x2) ? 1 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- hp_automute(SND_JACK_HEADPHONE, present);
- }
-
- if (intsts & 0x4) {
- pr_debug("MAD short push detected\n");
- /* send short push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (intsts & 0x8) {
- pr_debug(":MAD long push detected\n");
- /* send long push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int nc_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_nc = {
- .input_dev_id = DMIC,
- .output_dev_id = INTERNAL_SPKR,
- .set_input_dev = nc_set_selected_input_dev,
- .set_output_dev = nc_set_selected_output_dev,
- .set_mute = nc_set_mute,
- .get_mute = nc_get_mute,
- .set_vol = nc_set_vol,
- .get_vol = nc_get_vol,
- .init_card = nc_init_card,
- .set_pcm_audio_params = nc_set_pcm_audio_params,
- .set_pcm_voice_params = nc_set_pcm_voice_params,
- .set_voice_port = nc_set_voice_port,
- .set_audio_port = nc_set_audio_port,
- .power_up_pmic_pb = nc_power_up_pb,
- .power_up_pmic_cp = nc_power_up_cp,
- .power_down_pmic_pb = nc_power_down_pb,
- .power_down_pmic_cp = nc_power_down_cp,
- .power_down_pmic = nc_power_down,
- .pmic_irq_cb = nc_pmic_irq_cb,
- .pmic_jack_enable = nc_jack_enable,
-};
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 31f7813cab0..cc49038e55d 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -148,7 +148,7 @@ int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
{
WORD len, bn;
- //if (Check_D_MediaPower()) ; Śb 6250 don't care
+ //if (Check_D_MediaPower()) ; ÂŚb 6250 don't care
// return(ErrCode);
//if (Check_D_MediaFmt(fdoExt)) ;
// return(ErrCode);
@@ -594,7 +594,7 @@ int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt)
// if (Check_D_CardStsChg())
// MediaChange = ERROR;
// //usleep(56*1024);
-// if ((!Check_D_CntPower())&&(!MediaChange)) // Śł power & Media ¨SłQ change, Ťh return success
+// if ((!Check_D_CntPower())&&(!MediaChange)) // Œ³ power & Media ¨S³Q change, h return success
// return(SMSUCCESS);
// //usleep(56*1024);
//
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 66aad3a0d1f..48330340273 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -701,26 +701,4 @@ static struct usb_driver usb_storage_driver = {
.soft_unbind = 1,
};
-//----- usb_stor_init() ---------------------
-static int __init usb_stor_init(void)
-{
- int retval;
- pr_info("usb --- usb_stor_init start\n");
-
- retval = usb_register(&usb_storage_driver);
- if (retval == 0)
- pr_info("ENE USB Mass Storage support registered.\n");
-
- return retval;
-}
-
-//----- usb_stor_exit() ---------------------
-static void __exit usb_stor_exit(void)
-{
- pr_info("usb --- usb_stor_exit\n");
-
- usb_deregister(&usb_storage_driver) ;
-}
-
-module_init(usb_stor_init);
-module_exit(usb_stor_exit);
+module_usb_driver(usb_storage_driver);
diff --git a/drivers/staging/line6/Makefile b/drivers/staging/line6/Makefile
index de6bd12e973..34a2ddacc7e 100644
--- a/drivers/staging/line6/Makefile
+++ b/drivers/staging/line6/Makefile
@@ -12,4 +12,5 @@ line6usb-y := \
playback.o \
pod.o \
toneport.o \
- variax.o
+ variax.o \
+ podhd.o
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 9647154a492..127f9524774 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -192,6 +193,31 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
+int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_in)
+ return 0;
+
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_in);
+ line6pcm->buffer_in = NULL;
+}
+
/*
* Callback for completed capture URB.
*/
@@ -243,11 +269,7 @@ static void audio_in_callback(struct urb *urb)
length += fsize;
/* the following assumes LINE6_ISO_PACKETS == 1: */
-#if LINE6_BACKUP_MONITOR_SIGNAL
- memcpy(line6pcm->prev_fbuf, fbuf, fsize);
-#else
line6pcm->prev_fbuf = fbuf;
-#endif
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
@@ -319,6 +341,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ ret = line6_alloc_capture_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -331,6 +360,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
/* hw_free capture callback */
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index a7509fbbb95..366cbaa7c88 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,11 +19,13 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
+extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
int length);
extern int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 851b762319c..6a1959e16e0 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -21,6 +21,7 @@
#include "midi.h"
#include "playback.h"
#include "pod.h"
+#include "podhd.h"
#include "revision.h"
#include "toneport.h"
#include "usbdefs.h"
@@ -37,6 +38,8 @@ static const struct usb_device_id line6_id_table[] = {
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXTPRO)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX2)},
@@ -56,23 +59,25 @@ MODULE_DEVICE_TABLE(usb, line6_id_table);
/* *INDENT-OFF* */
static struct line6_properties line6_properties_table[] = {
- { "BassPODxt", "BassPODxt", LINE6_BIT_BASSPODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtLive", "BassPODxt Live", LINE6_BIT_BASSPODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_BASSPODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "GuitarPort", "GuitarPort", LINE6_BIT_GUITARPORT, LINE6_BIT_PCM },
- { "PocketPOD", "Pocket POD", LINE6_BIT_POCKETPOD, LINE6_BIT_CONTROL },
- { "PODStudioGX", "POD Studio GX", LINE6_BIT_PODSTUDIO_GX, LINE6_BIT_PCM },
- { "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PODSTUDIO_UX1, LINE6_BIT_PCM },
- { "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PODSTUDIO_UX2, LINE6_BIT_PCM },
- { "PODX3", "POD X3", LINE6_BIT_PODX3, LINE6_BIT_PCM },
- { "PODX3Live", "POD X3 Live", LINE6_BIT_PODX3LIVE, LINE6_BIT_PCM },
- { "PODxt", "PODxt", LINE6_BIT_PODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtLive", "PODxt Live", LINE6_BIT_PODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtPro", "PODxt Pro", LINE6_BIT_PODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "TonePortGX", "TonePort GX", LINE6_BIT_TONEPORT_GX, LINE6_BIT_PCM },
- { "TonePortUX1", "TonePort UX1", LINE6_BIT_TONEPORT_UX1, LINE6_BIT_PCM },
- { "TonePortUX2", "TonePort UX2", LINE6_BIT_TONEPORT_UX2, LINE6_BIT_PCM },
- { "Variax", "Variax Workbench", LINE6_BIT_VARIAX, LINE6_BIT_CONTROL }
+ { LINE6_BIT_BASSPODXT, "BassPODxt", "BassPODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
+ { LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
+ { LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3, "PODX3", "POD X3", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3LIVE, "PODX3Live", "POD X3 Live", LINE6_BIT_PCM },
+ { LINE6_BIT_PODXT, "PODxt", "PODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTLIVE, "PODxtLive", "PODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTPRO, "PODxtPro", "PODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_TONEPORT_GX, "TonePortGX", "TonePort GX", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX1, "TonePortUX1", "TonePort UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX2, "TonePortUX2", "TonePort UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_VARIAX, "Variax", "Variax Workbench", LINE6_BIT_CONTROL },
};
/* *INDENT-ON* */
@@ -437,6 +442,10 @@ static void line6_data_received(struct urb *urb)
line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ break; /* let userspace handle MIDI */
+
case LINE6_DEVID_PODXTLIVE:
switch (line6->interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -720,8 +729,8 @@ static int line6_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int devtype;
- struct usb_device *usbdev = NULL;
- struct usb_line6 *line6 = NULL;
+ struct usb_device *usbdev;
+ struct usb_line6 *line6;
const struct line6_properties *properties;
int devnum;
int interface_number, alternate = 0;
@@ -794,6 +803,7 @@ static int line6_probe(struct usb_interface *interface,
}
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
switch (interface_number) {
@@ -812,6 +822,7 @@ static int line6_probe(struct usb_interface *interface,
case LINE6_DEVID_BASSPODXTPRO:
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
alternate = 5;
break;
@@ -865,6 +876,18 @@ static int line6_probe(struct usb_interface *interface,
ep_write = 0x03;
break;
+ case LINE6_DEVID_PODHD300:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x84;
+ ep_write = 0x03;
+ break;
+
+ case LINE6_DEVID_PODHD500:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x81;
+ ep_write = 0x01;
+ break;
+
case LINE6_DEVID_POCKETPOD:
size = sizeof(struct usb_line6_pod);
ep_read = 0x82;
@@ -923,7 +946,7 @@ static int line6_probe(struct usb_interface *interface,
}
if (size == 0) {
- dev_err(line6->ifcdev,
+ dev_err(&interface->dev,
"driver bug: interface data size not set\n");
ret = -ENODEV;
goto err_put;
@@ -1017,6 +1040,12 @@ static int line6_probe(struct usb_interface *interface,
ret = line6_pod_init(interface, (struct usb_line6_pod *)line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ ret = line6_podhd_init(interface,
+ (struct usb_line6_podhd *)line6);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -1139,6 +1168,11 @@ static void line6_disconnect(struct usb_interface *interface)
line6_pod_disconnect(interface);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6_podhd_disconnect(interface);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index 553192f4931..117bf994356 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -88,6 +88,11 @@ static const int SYSEX_EXTRA_SIZE = sizeof(line6_midi_id) + 4;
*/
struct line6_properties {
/**
+ Bit identifying this device in the line6usb driver.
+ */
+ int device_bit;
+
+ /**
Card id string (maximum 16 characters).
This can be used to address the device in ALSA programs as
"default:CARD=<id>"
@@ -100,11 +105,6 @@ struct line6_properties {
const char *name;
/**
- Bit identifying this device in the line6usb driver.
- */
- int device_bit;
-
- /**
Bit vector defining this device's capabilities in the
line6usb driver.
*/
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index e554a2da643..13d02939c3c 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -135,7 +135,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
line6_write_hexdump(line6, 'S', data, length);
#endif
- transfer_buffer = kmalloc(length, GFP_ATOMIC);
+ transfer_buffer = kmemdup(data, length, GFP_ATOMIC);
if (transfer_buffer == NULL) {
usb_free_urb(urb);
@@ -143,7 +143,6 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
return -ENOMEM;
}
- memcpy(transfer_buffer, data, length);
usb_fill_int_urb(urb, line6->usbdev,
usb_sndbulkpipe(line6->usbdev,
line6->ep_control_write),
@@ -173,6 +172,8 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
break;
case LINE6_DEVID_VARIAX:
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
break;
default:
@@ -307,10 +308,10 @@ static ssize_t midi_set_midi_mask_transmit(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -339,10 +340,10 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -391,16 +392,32 @@ int line6_init_midi(struct usb_line6 *line6)
return -ENOMEM;
err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi);
return err;
+ }
err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi->midibuf_in.buf);
+ kfree(line6midi);
return err;
+ }
line6midi->line6 = line6;
- line6midi->midi_mask_transmit = 1;
- line6midi->midi_mask_receive = 4;
+
+ switch(line6->product) {
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 1;
+ break;
+
+ default:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 4;
+ }
+
line6->line6midi = line6midi;
err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi,
diff --git a/drivers/staging/line6/midi.h b/drivers/staging/line6/midi.h
index b73a025d8be..4a9e9f94729 100644
--- a/drivers/staging/line6/midi.h
+++ b/drivers/staging/line6/midi.h
@@ -57,12 +57,12 @@ struct snd_line6_midi {
/**
Bit mask for output MIDI channels.
*/
- int midi_mask_transmit;
+ unsigned short midi_mask_transmit;
/**
Bit mask for input MIDI channels.
*/
- int midi_mask_receive;
+ unsigned short midi_mask_receive;
/**
Buffer for incoming MIDI stream.
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 9d4c8a606ee..37675e66da8 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -86,31 +86,22 @@ static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
#endif
+static bool test_flags(unsigned long flags0, unsigned long flags1,
+ unsigned long mask)
+{
+ return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
+}
+
int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
int err = 0;
-
-#if LINE6_BACKUP_MONITOR_SIGNAL
- if (!(line6pcm->line6->properties->capabilities & LINE6_BIT_HWMON)) {
- line6pcm->prev_fbuf =
- kmalloc(LINE6_ISO_PACKETS * line6pcm->max_packet_size,
- GFP_KERNEL);
-
- if (!line6pcm->prev_fbuf) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc monitor buffer\n");
- return -ENOMEM;
- }
- }
-#else
+
line6pcm->prev_fbuf = NULL;
-#endif
- if (((flags_old & MASK_CAPTURE) == 0) &&
- ((flags_new & MASK_CAPTURE) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
@@ -119,54 +110,47 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
return -EBUSY;
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
+ err = line6_alloc_capture_buffer(line6pcm);
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_in = 0;
line6pcm->prev_fsize = 0;
err = line6_submit_audio_in_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
- if (((flags_old & MASK_PLAYBACK) == 0) &&
- ((flags_new & MASK_PLAYBACK) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
/*
See comment above regarding PCM restart.
*/
if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
return -EBUSY;
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
+ err = line6_alloc_playback_buffer(line6pcm);
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
return 0;
+
+pcm_start_error:
+ __sync_fetch_and_and(&line6pcm->flags, ~channels);
+ return err;
}
int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
@@ -175,22 +159,19 @@ int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (((flags_old & MASK_CAPTURE) != 0) &&
- ((flags_new & MASK_CAPTURE) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
line6_unlink_audio_in_urbs(line6pcm);
- kfree(line6pcm->buffer_in);
- line6pcm->buffer_in = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
+ line6_free_capture_buffer(line6pcm);
}
- if (((flags_old & MASK_PLAYBACK) != 0) &&
- ((flags_new & MASK_PLAYBACK) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
line6_unlink_audio_out_urbs(line6pcm);
- kfree(line6pcm->buffer_out);
- line6pcm->buffer_out = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
+ line6_free_playback_buffer(line6pcm);
}
-#if LINE6_BACKUP_MONITOR_SIGNAL
- kfree(line6pcm->prev_fbuf);
-#endif
return 0;
}
@@ -403,10 +384,12 @@ int line6_init_pcm(struct usb_line6 *line6,
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
ep_read = 0x82;
ep_write = 0x01;
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
ep_read = 0x86;
@@ -451,9 +434,14 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->line6 = line6;
line6pcm->ep_audio_read = ep_read;
line6pcm->ep_audio_write = ep_write;
- line6pcm->max_packet_size = usb_maxpacket(line6->usbdev,
- usb_rcvintpipe(line6->usbdev,
- ep_read), 0);
+
+ /* Read and write buffers are sized identically, so choose minimum */
+ line6pcm->max_packet_size = min(
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0),
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1));
+
line6pcm->properties = properties;
line6->line6pcm = line6pcm;
@@ -508,6 +496,23 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+
+ break;
+
+ default:
+ MISSING_CASE;
+ }
+
if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 77055b3724a..55d8297dd3d 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -39,9 +39,6 @@
#define LINE6_IMPULSE_DEFAULT_PERIOD 100
#endif
-#define LINE6_BACKUP_MONITOR_SIGNAL 0
-#define LINE6_REUSE_DMA_AREA_FOR_PLAYBACK 0
-
/*
Get substream from Line6 PCM data structure
*/
@@ -149,11 +146,6 @@ struct snd_line6_pcm {
unsigned char *buffer_in;
/**
- Temporary buffer index for playback.
- */
- int index_out;
-
- /**
Previously captured frame (for software monitoring).
*/
unsigned char *prev_fbuf;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 10c54383658..4152db2328b 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -191,13 +192,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_frames = urb_size / bytes_per_frame;
urb_out->transfer_buffer =
line6pcm->buffer_out +
- line6pcm->max_packet_size * line6pcm->index_out;
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (++line6pcm->index_out == LINE6_ISO_BUFFERS)
- line6pcm->index_out = 0;
-
if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
!test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
@@ -222,18 +220,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
} else
dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); /* this is somewhat paranoid */
} else {
-#if LINE6_REUSE_DMA_AREA_FOR_PLAYBACK
- /* set the buffer pointer */
- urb_out->transfer_buffer =
- runtime->dma_area +
- line6pcm->pos_out * bytes_per_frame;
-#else
- /* copy data */
memcpy(urb_out->transfer_buffer,
runtime->dma_area +
line6pcm->pos_out * bytes_per_frame,
urb_out->transfer_buffer_length);
-#endif
}
line6pcm->pos_out += urb_frames;
@@ -361,6 +351,31 @@ void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
wait_clear_audio_out_urbs(line6pcm);
}
+int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_out)
+ return 0;
+
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_out);
+ line6pcm->buffer_out = NULL;
+}
+
/*
Callback for completed playback URB.
*/
@@ -469,6 +484,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ ret = line6_alloc_playback_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -481,6 +503,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
/* hw_free playback callback */
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index f2fc8c0526e..02487ff2453 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,7 +29,9 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
+extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index d9b30212585..4dadc571d96 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -1149,14 +1149,10 @@ static struct snd_kcontrol_new pod_control_monitor = {
static void pod_destruct(struct usb_interface *interface)
{
struct usb_line6_pod *pod = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (pod == NULL)
return;
- line6 = &pod->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&pod->line6);
del_timer(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
diff --git a/drivers/staging/line6/podhd.c b/drivers/staging/line6/podhd.c
new file mode 100644
index 00000000000..7ef45437b4f
--- /dev/null
+++ b/drivers/staging/line6/podhd.c
@@ -0,0 +1,154 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "audio.h"
+#include "driver.h"
+#include "pcm.h"
+#include "podhd.h"
+
+#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
+
+static struct snd_ratden podhd_ratden = {
+ .num_min = 48000,
+ .num_max = 48000,
+ .num_step = 1,
+ .den = 1,
+};
+
+static struct line6_pcm_properties podhd_pcm_properties = {
+ .snd_line6_playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_rates = {
+ .nrats = 1,
+ .rats = &podhd_ratden},
+ .bytes_per_frame = PODHD_BYTES_PER_FRAME
+};
+
+/*
+ POD HD destructor.
+*/
+static void podhd_destruct(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd = usb_get_intfdata(interface);
+
+ if (podhd == NULL)
+ return;
+ line6_cleanup_audio(&podhd->line6);
+}
+
+/*
+ Try to init POD HD device.
+*/
+static int podhd_try_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err;
+ struct usb_line6 *line6 = &podhd->line6;
+
+ if ((interface == NULL) || (podhd == NULL))
+ return -ENODEV;
+
+ /* initialize audio system: */
+ err = line6_init_audio(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize MIDI subsystem: */
+ err = line6_init_midi(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize PCM subsystem: */
+ err = line6_init_pcm(line6, &podhd_pcm_properties);
+ if (err < 0)
+ return err;
+
+ /* register USB audio system: */
+ err = line6_register_audio(line6);
+ return err;
+}
+
+/*
+ Init POD HD device (and clean up in case of failure).
+*/
+int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err = podhd_try_init(interface, podhd);
+
+ if (err < 0)
+ podhd_destruct(interface);
+
+ return err;
+}
+
+/*
+ POD HD device disconnected.
+*/
+void line6_podhd_disconnect(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd;
+
+ if (interface == NULL)
+ return;
+ podhd = usb_get_intfdata(interface);
+
+ if (podhd != NULL) {
+ struct snd_line6_pcm *line6pcm = podhd->line6.line6pcm;
+
+ if (line6pcm != NULL)
+ line6_pcm_disconnect(line6pcm);
+ }
+
+ podhd_destruct(interface);
+}
diff --git a/drivers/staging/line6/podhd.h b/drivers/staging/line6/podhd.h
new file mode 100644
index 00000000000..652f74056bb
--- /dev/null
+++ b/drivers/staging/line6/podhd.h
@@ -0,0 +1,30 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#ifndef PODHD_H
+#define PODHD_H
+
+#include <linux/usb.h>
+
+#include "driver.h"
+
+struct usb_line6_podhd {
+ /**
+ Generic Line6 USB data.
+ */
+ struct usb_line6 line6;
+};
+
+extern void line6_podhd_disconnect(struct usb_interface *interface);
+extern int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd);
+
+#endif /* PODHD_H */
diff --git a/drivers/staging/line6/revision.h b/drivers/staging/line6/revision.h
index 350d0dfff8f..b4eee2b7383 100644
--- a/drivers/staging/line6/revision.h
+++ b/drivers/staging/line6/revision.h
@@ -1,4 +1,4 @@
#ifndef DRIVER_REVISION
/* current subversion revision */
-#define DRIVER_REVISION " (revision 690)"
+#define DRIVER_REVISION " (904)"
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 879e6992bbc..f31057830db 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -295,14 +295,10 @@ static struct snd_kcontrol_new toneport_control_source = {
static void toneport_destruct(struct usb_interface *interface)
{
struct usb_line6_toneport *toneport = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (toneport == NULL)
return;
- line6 = &toneport->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&toneport->line6);
}
/*
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index c6dffe6bc1a..aff9e5caea4 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -24,6 +24,8 @@
#define LINE6_DEVID_BASSPODXTPRO 0x4252
#define LINE6_DEVID_GUITARPORT 0x4750
#define LINE6_DEVID_POCKETPOD 0x5051
+#define LINE6_DEVID_PODHD300 0x5057
+#define LINE6_DEVID_PODHD500 0x414D
#define LINE6_DEVID_PODSTUDIO_GX 0x4153
#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
#define LINE6_DEVID_PODSTUDIO_UX2 0x4151
@@ -37,48 +39,71 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-#define LINE6_BIT_BASSPODXT (1 << 0)
-#define LINE6_BIT_BASSPODXTLIVE (1 << 1)
-#define LINE6_BIT_BASSPODXTPRO (1 << 2)
-#define LINE6_BIT_GUITARPORT (1 << 3)
-#define LINE6_BIT_POCKETPOD (1 << 4)
-#define LINE6_BIT_PODSTUDIO_GX (1 << 5)
-#define LINE6_BIT_PODSTUDIO_UX1 (1 << 6)
-#define LINE6_BIT_PODSTUDIO_UX2 (1 << 7)
-#define LINE6_BIT_PODX3 (1 << 8)
-#define LINE6_BIT_PODX3LIVE (1 << 9)
-#define LINE6_BIT_PODXT (1 << 10)
-#define LINE6_BIT_PODXTLIVE (1 << 11)
-#define LINE6_BIT_PODXTPRO (1 << 12)
-#define LINE6_BIT_TONEPORT_GX (1 << 13)
-#define LINE6_BIT_TONEPORT_UX1 (1 << 14)
-#define LINE6_BIT_TONEPORT_UX2 (1 << 15)
-#define LINE6_BIT_VARIAX (1 << 16)
+enum {
+ LINE6_ID_BASSPODXT,
+ LINE6_ID_BASSPODXTLIVE,
+ LINE6_ID_BASSPODXTPRO,
+ LINE6_ID_GUITARPORT,
+ LINE6_ID_POCKETPOD,
+ LINE6_ID_PODHD300,
+ LINE6_ID_PODHD500,
+ LINE6_ID_PODSTUDIO_GX,
+ LINE6_ID_PODSTUDIO_UX1,
+ LINE6_ID_PODSTUDIO_UX2,
+ LINE6_ID_PODX3,
+ LINE6_ID_PODX3LIVE,
+ LINE6_ID_PODXT,
+ LINE6_ID_PODXTLIVE,
+ LINE6_ID_PODXTPRO,
+ LINE6_ID_TONEPORT_GX,
+ LINE6_ID_TONEPORT_UX1,
+ LINE6_ID_TONEPORT_UX2,
+ LINE6_ID_VARIAX
+};
-#define LINE6_BITS_PRO (LINE6_BIT_BASSPODXTPRO | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_LIVE (LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODX3LIVE)
-#define LINE6_BITS_PODXTALL (LINE6_BIT_PODXT | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_BASSPODXTALL (LINE6_BIT_BASSPODXT | \
- LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_BASSPODXTPRO)
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+
+enum {
+ LINE6_BIT(BASSPODXT),
+ LINE6_BIT(BASSPODXTLIVE),
+ LINE6_BIT(BASSPODXTPRO),
+ LINE6_BIT(GUITARPORT),
+ LINE6_BIT(POCKETPOD),
+ LINE6_BIT(PODHD300),
+ LINE6_BIT(PODHD500),
+ LINE6_BIT(PODSTUDIO_GX),
+ LINE6_BIT(PODSTUDIO_UX1),
+ LINE6_BIT(PODSTUDIO_UX2),
+ LINE6_BIT(PODX3),
+ LINE6_BIT(PODX3LIVE),
+ LINE6_BIT(PODXT),
+ LINE6_BIT(PODXTLIVE),
+ LINE6_BIT(PODXTPRO),
+ LINE6_BIT(TONEPORT_GX),
+ LINE6_BIT(TONEPORT_UX1),
+ LINE6_BIT(TONEPORT_UX2),
+ LINE6_BIT(VARIAX),
+
+ LINE6_BITS_PRO = LINE6_BIT_BASSPODXTPRO | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_LIVE = LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 | LINE6_BIT_PODHD500,
+ LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT | LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_BASSPODXTPRO
+};
/* device supports settings parameter via USB */
-#define LINE6_BIT_CONTROL (1 << 0)
+#define LINE6_BIT_CONTROL (1 << 0)
/* device supports PCM input/output via USB */
-#define LINE6_BIT_PCM (1 << 1)
+#define LINE6_BIT_PCM (1 << 1)
/* device support hardware monitoring */
-#define LINE6_BIT_HWMON (1 << 2)
+#define LINE6_BIT_HWMON (1 << 2)
#define LINE6_BIT_CONTROL_PCM_HWMON (LINE6_BIT_CONTROL | \
LINE6_BIT_PCM | \
LINE6_BIT_HWMON)
-#define LINE6_FALLBACK_INTERVAL 10
-#define LINE6_FALLBACK_MAXPACKETSIZE 16
+#define LINE6_FALLBACK_INTERVAL 10
+#define LINE6_FALLBACK_MAXPACKETSIZE 16
#endif
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 81241cdf1be..d36622228b2 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -572,14 +572,10 @@ static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
static void variax_destruct(struct usb_interface *interface)
{
struct usb_line6_variax *variax = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (variax == NULL)
return;
- line6 = &variax->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&variax->line6);
del_timer(&variax->startup_timer1);
del_timer(&variax->startup_timer2);
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 3db3b0a91cc..b7175fe1b15 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1272,17 +1272,4 @@ static struct usb_driver go7007_usb_driver = {
.id_table = go7007_usb_id_table,
};
-static int __init go7007_usb_init(void)
-{
- return usb_register(&go7007_usb_driver);
-}
-
-static void __exit go7007_usb_cleanup(void)
-{
- usb_deregister(&go7007_usb_driver);
-}
-
-module_init(go7007_usb_init);
-module_exit(go7007_usb_cleanup);
-
-MODULE_LICENSE("GPL v2");
+module_usb_driver(go7007_usb_driver);
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 0dc2c2b22c2..6cd4cd67a1d 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -541,26 +541,7 @@ static struct usb_driver igorplugusb_remote_driver = {
.id_table = igorplugusb_remote_id_table
};
-static int __init igorplugusb_remote_init(void)
-{
- int ret = 0;
-
- dprintk(DRIVER_NAME ": loaded, debug mode enabled\n");
-
- ret = usb_register(&igorplugusb_remote_driver);
- if (ret)
- printk(KERN_ERR DRIVER_NAME ": usb register failed!\n");
-
- return ret;
-}
-
-static void __exit igorplugusb_remote_exit(void)
-{
- usb_deregister(&igorplugusb_remote_driver);
-}
-
-module_init(igorplugusb_remote_init);
-module_exit(igorplugusb_remote_exit);
+module_usb_driver(igorplugusb_remote_driver);
#include <linux/vermagic.h>
MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index f5308d5929c..f68218012f2 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -1025,26 +1025,4 @@ static int imon_resume(struct usb_interface *intf)
return rc;
}
-static int __init imon_init(void)
-{
- int rc;
-
- printk(KERN_INFO MOD_NAME ": " MOD_DESC ", v" MOD_VERSION "\n");
-
- rc = usb_register(&imon_driver);
- if (rc) {
- err("%s: usb register failed(%d)", __func__, rc);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit imon_exit(void)
-{
- usb_deregister(&imon_driver);
- printk(KERN_INFO MOD_NAME ": module removed. Goodbye!\n");
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index a2d18b0aa04..7855baa18e7 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -913,27 +913,4 @@ static void sasem_disconnect(struct usb_interface *interface)
mutex_unlock(&disconnect_lock);
}
-static int __init sasem_init(void)
-{
- int rc;
-
- printk(KERN_INFO MOD_DESC ", v" MOD_VERSION "\n");
- printk(KERN_INFO MOD_AUTHOR "\n");
-
- rc = usb_register(&sasem_driver);
- if (rc < 0) {
- err("%s: usb register failed (%d)", __func__, rc);
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit sasem_exit(void)
-{
- usb_deregister(&sasem_driver);
- printk(KERN_INFO "module removed. Goodbye!\n");
-}
-
-
-module_init(sasem_init);
-module_exit(sasem_exit);
+module_usb_driver(sasem_driver);
diff --git a/drivers/staging/media/lirc/lirc_ttusbir.c b/drivers/staging/media/lirc/lirc_ttusbir.c
index e4b329b8caf..7950887ff11 100644
--- a/drivers/staging/media/lirc/lirc_ttusbir.c
+++ b/drivers/staging/media/lirc/lirc_ttusbir.c
@@ -372,24 +372,4 @@ static void disconnect(struct usb_interface *intf)
kfree(ttusbir);
}
-static int ttusbir_init_module(void)
-{
- int result;
-
- DPRINTK(KERN_DEBUG "Module ttusbir init\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&usb_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
- return result;
-}
-
-static void ttusbir_exit_module(void)
-{
- printk(KERN_DEBUG "Module ttusbir exit\n");
- usb_deregister(&usb_driver);
-}
-
-module_init(ttusbir_init_module);
-module_exit(ttusbir_exit_module);
+module_usb_driver(usb_driver);
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 8bf34794489..4ac3696883c 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -38,7 +38,6 @@ void mei_io_list_init(struct mei_io_list *list)
{
/* initialize our queue list */
INIT_LIST_HEAD(&list->mei_cb.cb_list);
- list->status = 0;
}
/**
@@ -49,22 +48,15 @@ void mei_io_list_init(struct mei_io_list *list)
*/
void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos;
+ struct mei_cl_cb *next;
- if (list->status != 0)
- return;
-
- if (list_empty(&list->mei_cb.cb_list))
- return;
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- if (cb_pos) {
+ list_for_each_entry_safe(pos, next, &list->mei_cb.cb_list, cb_list) {
+ if (pos->file_private) {
struct mei_cl *cl_tmp;
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
+ cl_tmp = (struct mei_cl *)pos->file_private;
if (mei_cl_cmp_id(cl, cl_tmp))
- list_del(&cb_pos->cb_list);
+ list_del(&pos->cb_list);
}
}
}
@@ -338,16 +330,10 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
}
}
/* remove all waiting requests */
- if (dev->write_list.status == 0 &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- if (cb_pos) {
- list_del(&cb_pos->cb_list);
- mei_free_cb_private(cb_pos);
- cb_pos = NULL;
- }
- }
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ list_del(&cb_pos->cb_list);
+ mei_free_cb_private(cb_pos);
}
}
@@ -380,8 +366,7 @@ void mei_host_start_message(struct mei_device *dev)
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_start_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
mei_hdr->length)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->mei_state = MEI_RESETING;
@@ -414,8 +399,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_enum_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
mei_hdr->length)) {
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -605,15 +589,10 @@ void mei_host_init_iamthif(struct mei_device *dev)
return;
}
- /* Do not render the system unusable when iamthif_mtu is not equal to
- the value received from ME.
- Assign iamthif_mtu to the value received from ME in order to solve the
- hardware macro incompatibility. */
+ /* Assign iamthif_mtu to the value received from ME */
- dev_dbg(&dev->pdev->dev, "[DEFAULT] IAMTHIF = %d\n", dev->iamthif_mtu);
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
- dev_dbg(&dev->pdev->dev,
- "IAMTHIF = %d\n",
+ dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n",
dev->me_clients[i].props.max_msg_length);
kfree(dev->iamthif_msg_buf);
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index a65dacf0021..eb5df7fc226 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -128,9 +128,9 @@ int mei_count_empty_write_slots(struct mei_device *dev)
* returns 1 if success, 0 - otherwise.
*/
int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length)
+ struct mei_msg_hdr *header,
+ unsigned char *write_buffer,
+ unsigned long write_length)
{
u32 temp_msg = 0;
unsigned long bytes_written = 0;
@@ -216,7 +216,7 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer_length: message size will be read
*/
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+ unsigned char *buffer, unsigned long buffer_length)
{
u32 i = 0;
unsigned char temp_buf[sizeof(u32)];
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index 7bd38ae2c23..aeae511419c 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -52,6 +52,17 @@ int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev, bool preserve);
bool mei_wd_host_init(struct mei_device *dev);
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
+/*
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Uegistering watchdog interface
+ * @dev - mei device
+ */
+void mei_watchdog_unregister(struct mei_device *dev);
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 882d106d54e..3544fee34e4 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -198,8 +198,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
unsigned char *buffer = NULL;
dev_dbg(&dev->pdev->dev, "start client msg\n");
- if (!(dev->read_list.status == 0 &&
- !list_empty(&dev->read_list.mei_cb.cb_list)))
+ if (list_empty(&dev->read_list.mei_cb.cb_list))
goto quit;
list_for_each_entry_safe(cb_pos, cb_next,
@@ -210,9 +209,6 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
buffer = (unsigned char *)
(cb_pos->response_buffer.data +
cb_pos->information);
- BUG_ON(cb_pos->response_buffer.size <
- mei_hdr->length +
- cb_pos->information);
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -390,24 +386,10 @@ static void mei_client_connect_response(struct mei_device *dev,
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n",
- dev->wd_timeout);
-
- dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
-
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
- /* Registering watchdog interface device once we got connection
- to the WD Client
- */
- if (watchdog_register_device(&amt_wd_dev)) {
- printk(KERN_ERR "mei: unable to register watchdog device.\n");
- dev->wd_interface_reg = false;
- } else {
- dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
- dev->wd_interface_reg = true;
- }
-
+ /* next step in the state maching */
mei_host_init_iamthif(dev);
return;
}
@@ -416,22 +398,20 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+
+ cl = (struct mei_cl *)cb_pos->file_private;
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
+ if (MEI_IOCTL == cb_pos->major_file_operations) {
+ if (is_treat_specially_client(cl, rs)) {
list_del(&cb_pos->cb_list);
- return;
- }
- if (MEI_IOCTL == cb_pos->major_file_operations) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&cb_pos->cb_list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -458,29 +438,26 @@ static void mei_client_disconnect_response(struct mei_device *dev,
rs->host_addr,
rs->status);
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return;
- }
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (cl->host_client_id == rs->host_addr &&
+ cl->me_client_id == rs->me_addr) {
- list_del(&cb_pos->cb_list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
+ list_del(&cb_pos->cb_list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -718,7 +695,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res =
(struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
+ mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
@@ -736,7 +713,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
mei_reset(dev, 1);
return;
}
- if (dev->me_clients[dev->me_client_presentation_num]
+ if (dev->me_clients[dev->me_client_presentation_num]
.client_id == props_res->address) {
dev->me_clients[dev->me_client_presentation_num].props
@@ -1228,7 +1205,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
{
struct mei_cl *cl;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
struct mei_io_list *list;
int ret;
@@ -1241,36 +1218,31 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
list = &dev->write_waiting_list;
- if (!list->status && !list_empty(&list->mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (cl) {
- cl->status = 0;
- list_del(&cb_pos->cb_list);
- if (MEI_WRITING == cl->writing_state &&
- (cb_pos->major_file_operations ==
- MEI_WRITE) &&
- (cl != &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev,
- "MEI WRITE COMPLETE\n");
- cl->writing_state =
- MEI_WRITE_COMPLETE;
- list_add_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- }
- if (cl == &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
- if (dev->iamthif_flow_control_pending) {
- ret =
- _mei_irq_thread_iamthif_read(
- dev, slots);
- if (ret)
- return ret;
- }
- }
+ list_for_each_entry_safe(pos, next,
+ &list->mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ cl->status = 0;
+ list_del(&pos->cb_list);
+ if (MEI_WRITING == cl->writing_state &&
+ (pos->major_file_operations == MEI_WRITE) &&
+ (cl != &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "MEI WRITE COMPLETE\n");
+ cl->writing_state = MEI_WRITE_COMPLETE;
+ list_add_tail(&pos->cb_list,
+ &cmpl_list->mei_cb.cb_list);
+ }
+ if (cl == &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
+ if (dev->iamthif_flow_control_pending) {
+ ret = _mei_irq_thread_iamthif_read(
+ dev, slots);
+ if (ret)
+ return ret;
}
-
}
}
@@ -1317,101 +1289,88 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
return ~ENODEV;
/* complete control write list CB */
- if (!dev->ctrl_wr_list.status) {
- /* complete control write list CB */
- dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
+ dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
+ list_for_each_entry_safe(pos, next,
&dev->ctrl_wr_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)
- cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return -ENODEV;
- }
- switch (cb_pos->major_file_operations) {
- case MEI_CLOSE:
- /* send disconnect message */
- ret = _mei_irq_thread_close(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
-
- break;
- case MEI_READ:
- /* send flow control message */
- ret = _mei_irq_thread_read(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ cl = (struct mei_cl *) pos->file_private;
+ if (!cl) {
+ list_del(&pos->cb_list);
+ return -ENODEV;
+ }
+ switch (pos->major_file_operations) {
+ case MEI_CLOSE:
+ /* send disconnect message */
+ ret = _mei_irq_thread_close(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
- case MEI_IOCTL:
- /* connect message */
- if (!mei_other_client_is_connecting(dev,
- cl))
- continue;
- ret = _mei_irq_thread_ioctl(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ break;
+ case MEI_READ:
+ /* send flow control message */
+ ret = _mei_irq_thread_read(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
+ break;
+ case MEI_IOCTL:
+ /* connect message */
+ if (mei_other_client_is_connecting(dev, cl))
+ continue;
+ ret = _mei_irq_thread_ioctl(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- default:
- BUG();
- }
+ break;
+ default:
+ BUG();
}
+
}
/* complete write list CB */
- if (!dev->write_list.status &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
-
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for client"
- " %d, not sending.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl(dev, slots,
- cb_pos,
- cl, cmpl_list);
- if (ret)
- return ret;
-
- } else if (cl == &dev->iamthif_cl) {
- /* IAMTHIF IOCTL */
- dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for amthi"
- " client %d.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl_iamthif(dev,
- slots,
- cb_pos,
- cl,
- cmpl_list);
- if (ret)
- return ret;
-
- }
+ dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ if (cl != &dev->iamthif_cl) {
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for client"
+ " %d, not sending.\n",
+ cl->host_client_id);
+ continue;
+ }
+ ret = _mei_irq_thread_cmpl(dev, slots,
+ pos,
+ cl, cmpl_list);
+ if (ret)
+ return ret;
+
+ } else if (cl == &dev->iamthif_cl) {
+ /* IAMTHIF IOCTL */
+ dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for amthi"
+ " client %d.\n",
+ cl->host_client_id);
+ continue;
}
+ ret = _mei_irq_thread_cmpl_iamthif(dev,
+ slots,
+ pos,
+ cl,
+ cmpl_list);
+ if (ret)
+ return ret;
}
+
}
return 0;
}
@@ -1502,18 +1461,13 @@ void mei_timer(struct work_struct *work)
amthi_complete_list = &dev->amthi_read_complete_list.
mei_cb.cb_list;
- if (!list_empty(amthi_complete_list)) {
-
- list_for_each_entry_safe(cb_pos, cb_next,
- amthi_complete_list,
- cb_list) {
+ list_for_each_entry_safe(cb_pos, cb_next, amthi_complete_list, cb_list) {
- cl_pos = cb_pos->file_object->private_data;
+ cl_pos = cb_pos->file_object->private_data;
- /* Finding the AMTHI entry. */
- if (cl_pos == &dev->iamthif_cl)
- list_del(&cb_pos->cb_list);
- }
+ /* Finding the AMTHI entry. */
+ if (cl_pos == &dev->iamthif_cl)
+ list_del(&cb_pos->cb_list);
}
if (dev->iamthif_current_cb)
mei_free_cb_private(dev->iamthif_current_cb);
@@ -1527,8 +1481,8 @@ void mei_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
- mutex_unlock(&dev->device_lock);
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ mutex_unlock(&dev->device_lock);
}
/**
@@ -1624,7 +1578,7 @@ end:
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
- if (complete_list.status || list_empty(&complete_list.mei_cb.cb_list))
+ if (list_empty(&complete_list.mei_cb.cb_list))
return IRQ_HANDLED;
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 8a61d126651..0752ead4269 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -228,18 +228,15 @@ struct mei_cl_cb *find_amthi_read_list_entry(
struct file *file)
{
struct mei_cl *cl_temp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->amthi_read_complete_list.status &&
- !list_empty(&dev->amthi_read_complete_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
- cl_temp = (struct mei_cl *)cb_pos->file_private;
- if (cl_temp && cl_temp == &dev->iamthif_cl &&
- cb_pos->file_object == file)
- return cb_pos;
- }
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
+ cl_temp = (struct mei_cl *)pos->file_private;
+ if (cl_temp && cl_temp == &dev->iamthif_cl &&
+ pos->file_object == file)
+ return pos;
}
return NULL;
}
@@ -262,7 +259,7 @@ struct mei_cl_cb *find_amthi_read_list_entry(
* negative on failure.
*/
int amthi_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset)
+ char __user *ubuf, size_t length, loff_t *offset)
{
int rets;
int wait_ret;
@@ -334,8 +331,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
}
}
/* if the whole message will fit remove it from the list */
- if (cb->information >= *offset &&
- length >= (cb->information - *offset))
+ if (cb->information >= *offset && length >= (cb->information - *offset))
list_del(&cb->cb_list);
else if (cb->information > 0 && cb->information <= *offset) {
/* end of the message has been reached */
@@ -356,9 +352,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
* the information may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length))
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
rets = -EFAULT;
else {
rets = length;
@@ -427,7 +421,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cb->response_buffer.size = dev->me_clients[i].props.max_msg_length;
cb->response_buffer.data =
- kmalloc(cb->response_buffer.size, GFP_KERNEL);
+ kmalloc(cb->response_buffer.size, GFP_KERNEL);
if (!cb->response_buffer.data) {
rets = -ENOMEM;
goto unlock;
@@ -448,8 +442,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
&dev->read_list.mei_cb.cb_list);
}
} else {
- list_add_tail(&cb->cb_list,
- &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
}
return rets;
unlock:
@@ -482,7 +475,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_ioctl = true;
dev->iamthif_msg_buf_size = cb->request_buffer.size;
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
- cb->request_buffer.size);
+ cb->request_buffer.size);
ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
if (ret < 0)
@@ -534,8 +527,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev_dbg(&dev->pdev->dev, "No flow control credentials, "
"so add iamthif cb to write list.\n");
- list_add_tail(&cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->write_list.mei_cb.cb_list);
}
return 0;
}
@@ -550,8 +542,8 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
void mei_run_next_iamthif_cmd(struct mei_device *dev)
{
struct mei_cl *cl_tmp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
int status;
if (!dev)
@@ -565,25 +557,22 @@ void mei_run_next_iamthif_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- if (dev->amthi_cmd_list.status == 0 &&
- !list_empty(&dev->amthi_cmd_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
- list_del(&cb_pos->cb_list);
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
-
- if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
- status = amthi_write(dev, cb_pos);
- if (status) {
- dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
- status);
- return;
- }
- break;
+ dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
+ list_del(&pos->cb_list);
+ cl_tmp = (struct mei_cl *)pos->file_private;
+
+ if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
+ status = amthi_write(dev, pos);
+ if (status) {
+ dev_dbg(&dev->pdev->dev,
+ "amthi write failed status = %d\n",
+ status);
+ return;
}
+ break;
}
}
}
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index eb05c36f45d..1e1a9f996e7 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -33,6 +33,7 @@
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
#include "mei_dev.h"
#include "mei.h"
@@ -51,18 +52,10 @@ static char mei_driver_name[] = MEI_DRIVER_NAME;
static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
static const char mei_driver_version[] = MEI_DRIVER_VERSION;
-/* mei char device for registration */
-static struct cdev mei_cdev;
-
-/* major number for device */
-static int mei_major;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
struct pci_dev *mei_device;
-static struct class *mei_class;
-
-
/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
@@ -105,173 +98,6 @@ MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
static DEFINE_MUTEX(mei_mutex);
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int __devinit mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
- if (mei_device) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "mei: Failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, mei_driver_name);
- if (err) {
- printk(KERN_ERR "mei: Failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, mei_driver_name, dev);
-
- if (err) {
- printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto unmap_memory;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- if (mei_hw_init(dev)) {
- printk(KERN_ERR "mei: Init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
- mei_device = pdev;
- pci_set_drvdata(pdev, dev);
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("mei: Driver initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-unmap_memory:
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- printk(KERN_ERR "mei: Driver initialization failed.\n");
- return err;
-}
-
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void __devexit mei_remove(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- if (mei_device != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- mei_wd_stop(dev, false);
-
- mei_device = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- if (dev->wd_interface_reg)
- watchdog_unregister_device(&amt_wd_dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
- mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
/**
* mei_clear_list - removes all callbacks associated with file
@@ -372,21 +198,17 @@ static struct mei_cl_cb *find_read_list_entry(
struct mei_device *dev,
struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->read_list.status &&
- !list_empty(&dev->read_list.mei_cb.cb_list)) {
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->read_list.mei_cb.cb_list, cb_list) {
- struct mei_cl *cl_temp;
- cl_temp = (struct mei_cl *)cb_pos->file_private;
+ dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->read_list.mei_cb.cb_list, cb_list) {
+ struct mei_cl *cl_temp;
+ cl_temp = (struct mei_cl *)pos->file_private;
- if (mei_cl_cmp_id(cl, cl_temp))
- return cb_pos;
- }
+ if (mei_cl_cmp_id(cl, cl_temp))
+ return pos;
}
return NULL;
}
@@ -402,15 +224,16 @@ static struct mei_cl_cb *find_read_list_entry(
static int mei_open(struct inode *inode, struct file *file)
{
struct mei_cl *cl;
- int if_num = iminor(inode), err;
struct mei_device *dev;
+ unsigned long cl_id;
+ int err;
err = -ENODEV;
if (!mei_device)
goto out;
dev = pci_get_drvdata(mei_device);
- if (if_num != MEI_MINOR_NUMBER || !dev)
+ if (!dev)
goto out;
mutex_lock(&dev->device_lock);
@@ -429,14 +252,16 @@ static int mei_open(struct inode *inode, struct file *file)
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
goto out_unlock;
- cl->host_client_id = find_first_zero_bit(dev->host_clients_map,
- MEI_CLIENTS_MAX);
- if (cl->host_client_id > MEI_CLIENTS_MAX)
+ cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
+ if (cl_id >= MEI_CLIENTS_MAX)
goto out_unlock;
+ cl->host_client_id = cl_id;
+
dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
dev->open_handle_count++;
+
list_add_tail(&cl->link, &dev->file_list);
set_bit(cl->host_client_id, dev->host_clients_map);
@@ -446,7 +271,7 @@ static int mei_open(struct inode *inode, struct file *file)
file->private_data = cl;
mutex_unlock(&dev->device_lock);
- return 0;
+ return nonseekable_open(inode, file);
out_unlock:
mutex_unlock(&dev->device_lock);
@@ -492,8 +317,7 @@ static int mei_release(struct inode *inode, struct file *file)
cl->me_client_id);
if (dev->open_handle_count > 0) {
- clear_bit(cl->host_client_id,
- dev->host_clients_map);
+ clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
mei_remove_client_from_file_list(dev, cl->host_client_id);
@@ -554,7 +378,7 @@ static int mei_release(struct inode *inode, struct file *file)
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_read(struct file *file, char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb_pos = NULL;
@@ -673,9 +497,7 @@ copy_buffer:
/* information size may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length)) {
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
rets = -EFAULT;
goto free;
}
@@ -711,7 +533,7 @@ out:
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_write(struct file *file, const char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *write_cb = NULL;
@@ -762,8 +584,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
cl->read_cb = NULL;
cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE &&
- !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
*offset = 0;
@@ -1034,7 +855,7 @@ out:
*/
#ifdef CONFIG_COMPAT
static long mei_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long data)
+ unsigned int cmd, unsigned long data)
{
return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
}
@@ -1090,6 +911,206 @@ out:
return mask;
}
+/*
+ * file operations structure will be used for mei char device.
+ */
+static const struct file_operations mei_fops = {
+ .owner = THIS_MODULE,
+ .read = mei_read,
+ .unlocked_ioctl = mei_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mei_compat_ioctl,
+#endif
+ .open = mei_open,
+ .release = mei_release,
+ .write = mei_write,
+ .poll = mei_poll,
+ .llseek = no_llseek
+};
+
+
+/*
+ * Misc Device Struct
+ */
+static struct miscdevice mei_misc_device = {
+ .name = MEI_DRIVER_NAME,
+ .fops = &mei_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int __devinit mei_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ int err;
+
+ mutex_lock(&mei_mutex);
+ if (mei_device) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, mei_driver_name);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_device_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ /* mapping IO device memory */
+ dev->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!dev->mem_addr) {
+ printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_interrupt_thread_handler,
+ 0, mei_driver_name, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_interrupt_quick_handler,
+ mei_interrupt_thread_handler,
+ IRQF_SHARED, mei_driver_name, dev);
+
+ if (err) {
+ printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto unmap_memory;
+ }
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ if (mei_hw_init(dev)) {
+ printk(KERN_ERR "mei: Init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = misc_register(&mei_misc_device);
+ if (err)
+ goto release_irq;
+
+ mei_device = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("mei: Driver initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ /* disable interrupts */
+ dev->host_hw_state = mei_hcsr_read(dev);
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+unmap_memory:
+ pci_iounmap(pdev, dev->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ printk(KERN_ERR "mei: Driver initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void __devexit mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ if (mei_device != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->device_lock);
+
+ mei_wd_stop(dev, false);
+
+ mei_device = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+ mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
+ mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (dev->mem_addr)
+ pci_iounmap(pdev, dev->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
@@ -1173,131 +1194,6 @@ static struct pci_driver mei_driver = {
.driver.pm = MEI_PM_OPS,
};
-/*
- * file operations structure will be used for mei char device.
- */
-static const struct file_operations mei_fops = {
- .owner = THIS_MODULE,
- .read = mei_read,
- .unlocked_ioctl = mei_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mei_compat_ioctl,
-#endif
- .open = mei_open,
- .release = mei_release,
- .write = mei_write,
- .poll = mei_poll,
-};
-
-/**
- * mei_registration_cdev - sets up the cdev structure for mei device.
- *
- * @dev: char device struct
- * @hminor: minor number for registration char device
- * @fops: file operations structure
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_registration_cdev(struct cdev *dev, int hminor,
- const struct file_operations *fops)
-{
- int ret, devno = MKDEV(mei_major, hminor);
-
- cdev_init(dev, fops);
- dev->owner = THIS_MODULE;
- ret = cdev_add(dev, devno, 1);
- /* Fail gracefully if need be */
- if (ret)
- printk(KERN_ERR "mei: Error %d registering mei device %d\n",
- ret, hminor);
- return ret;
-}
-
-/**
- * mei_register_cdev - registers mei char device
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_register_cdev(void)
-{
- int ret;
- dev_t dev;
-
- /* registration of char devices */
- ret = alloc_chrdev_region(&dev, MEI_MINORS_BASE, MEI_MINORS_COUNT,
- MEI_DRIVER_NAME);
- if (ret) {
- printk(KERN_ERR "mei: Error allocating char device region.\n");
- return ret;
- }
-
- mei_major = MAJOR(dev);
-
- ret = mei_registration_cdev(&mei_cdev, MEI_MINOR_NUMBER,
- &mei_fops);
- if (ret)
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-
- return ret;
-}
-
-/**
- * mei_unregister_cdev - unregisters mei char device
- */
-static void mei_unregister_cdev(void)
-{
- cdev_del(&mei_cdev);
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-}
-
-/**
- * mei_sysfs_device_create - adds device entry to sysfs
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_sysfs_device_create(void)
-{
- struct class *class;
- void *tmphdev;
- int err;
-
- class = class_create(THIS_MODULE, MEI_DRIVER_NAME);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- printk(KERN_ERR "mei: Error creating mei class.\n");
- goto err_out;
- }
-
- tmphdev = device_create(class, NULL, mei_cdev.dev, NULL,
- MEI_DEV_NAME);
- if (IS_ERR(tmphdev)) {
- err = PTR_ERR(tmphdev);
- goto err_destroy;
- }
-
- mei_class = class;
- return 0;
-
-err_destroy:
- class_destroy(class);
-err_out:
- return err;
-}
-
-/**
- * mei_sysfs_device_remove - unregisters the device entry on sysfs
- */
-static void mei_sysfs_device_remove(void)
-{
- if (IS_ERR_OR_NULL(mei_class))
- return;
-
- device_destroy(mei_class, mei_cdev.dev);
- class_destroy(mei_class);
-}
-
/**
* mei_init_module - Driver Registration Routine
*
@@ -1314,26 +1210,9 @@ static int __init mei_init_module(void)
mei_driver_string, mei_driver_version);
/* init pci module */
ret = pci_register_driver(&mei_driver);
- if (ret < 0) {
+ if (ret < 0)
printk(KERN_ERR "mei: Error registering driver.\n");
- goto end;
- }
- ret = mei_register_cdev();
- if (ret)
- goto unregister_pci;
-
- ret = mei_sysfs_device_create();
- if (ret)
- goto unregister_cdev;
-
- return ret;
-
-unregister_cdev:
- mei_unregister_cdev();
-unregister_pci:
- pci_unregister_driver(&mei_driver);
-end:
return ret;
}
@@ -1347,8 +1226,7 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- mei_sysfs_device_remove();
- mei_unregister_cdev();
+ misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("mei: Driver unloaded successfully.\n");
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 17302ad2531..516bfe7319a 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -1,78 +1,74 @@
-Intel MEI
+Intel(R) Management Engine Interface (Intel(R) MEI)
=======================
Introduction
=======================
-The Intel Management Engine (Intel ME) is an isolated and
-protected computing resource (Coprocessor) residing inside
-Intel chipsets. The Intel ME provides support for computer/IT
-management features.
-The Feature set depends on the Intel chipset SKU.
+The Intel Management Engine (Intel ME) is an isolated andprotected computing
+resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
+provides support for computer/IT management features. The feature set
+depends on the Intel chipset SKU.
-The Intel Management Engine Interface (Intel MEI, previously known
-as HECI) is the interface between the Host and Intel ME.
-This interface is exposed to the host as a PCI device.
-The Intel MEI Driver is in charge of the communication channel
-between a host application and the ME feature.
+The Intel Management Engine Interface (Intel MEI, previously known as HECI)
+is the interface between the Host and Intel ME. This interface is exposed
+to the host as a PCI device. The Intel MEI Driver is in charge of the
+communication channel between a host application and the Intel ME feature.
-Each Intel ME feature (Intel ME Client) is addressed by
-GUID/UUID and each feature defines its own protocol.
-The protocol is message-based with a header and payload up to
-512 bytes.
+Each Intel ME feature (Intel ME Client) is addressed by a GUID/UUID and
+each client has its own protocol. The protocol is message-based with a
+header and payload up to 512 bytes.
-[place holder to URL to protocol definitions]
-
-Prominent usage of the Interface is to communicate with
-Intel Active Management Technology (Intel AMT)
-implemented in firmware running on the Intel ME.
+Prominent usage of the Intel ME Interface is to communicate with Intel(R)
+Active Management Technology (Intel AMT)implemented in firmware running on
+the Intel ME.
Intel AMT provides the ability to manage a host remotely out-of-band (OOB)
-even when the host processor has crashed or is in a sleep state.
+even when the operating system running on the host processor has crashed or
+is in a sleep state.
Some examples of Intel AMT usage are:
- Monitoring hardware state and platform components
- - Remote power off/on (useful for green computing or overnight IT maintenance)
+ - Remote power off/on (useful for green computing or overnight IT
+ maintenance)
- OS updates
- Storage of useful platform information such as software assets
- - built-in hardware KVM
- - selective network isolation of Ethernet and IP protocol flows based on
- policies set by a remote management console
+ - Built-in hardware KVM
+ - Selective network isolation of Ethernet and IP protocol flows based
+ on policies set by a remote management console
- IDE device redirection from remote management console
Intel AMT (OOB) communication is based on SOAP (deprecated
-starting with Release 6.0) over HTTP/HTTPS or WS-Management protocol
-over HTTP and HTTPS that are received from a remote
-management console application.
+starting with Release 6.0) over HTTP/S or WS-Management protocol over
+HTTP/S that are received from a remote management console application.
For more information about Intel AMT:
-http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/aboutintelamt.htm
-
+http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
-MEI Driver
+Intel MEI Driver
=======================
-The driver exposes a character device called /dev/mei.
+The driver exposes a misc device called /dev/mei.
-An application maintains communication with an ME feature while
-/dev/mei is open. The binding to a specific features is performed
-by calling MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
-The number of instances of an ME feature that can be opened
-at the same time depends on the ME feature, but most of the
+An application maintains communication with an Intel ME feature while
+/dev/mei is open. The binding to a specific features is performed by calling
+MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
+The number of instances of an Intel ME feature that can be opened
+at the same time depends on the Intel ME feature, but most of the
features allow only a single instance.
-
-The Intel AMT Host Interface (AMTHI) feature requires multiple
-simultaneous user applications, therefore the MEI driver handles
+The Intel AMT Host Interface (Intel AMTHI) feature supports multiple
+simultaneous user applications. Therefore, the Intel MEI driver handles
this internally by maintaining request queues for the applications.
-The driver is oblivious to data that are passed between
+The driver is oblivious to data that is passed between firmware feature
+and host application.
-Because some of the ME features can change the system
-configuration, the driver by default allows only privileged
+Because some of the Intel ME features can change the system
+configuration, the driver by default allows only a privileged
user to access it.
-A Code snippet for application communicating with AMTHI client:
+A code snippet for an application communicating with
+Intel AMTHI client:
struct mei_connect_client_data data;
fd = open(MEI_DEVICE);
@@ -80,7 +76,7 @@ A Code snippet for application communicating with AMTHI client:
ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &data);
- printf(“Ver=%d, MaxLen=%ld\n”,
+ printf("Ver=%d, MaxLen=%ld\n",
data.d.in_client_uuid.protocol_version,
data.d.in_client_uuid.max_msg_length);
@@ -95,76 +91,106 @@ A Code snippet for application communicating with AMTHI client:
[...]
close(fd);
-ME Applications:
+IOCTL:
+======
+The Intel MEI Driver supports the following IOCTL command:
+ IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client).
+
+ usage:
+ struct mei_connect_client_data clientData;
+ ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &clientData);
+
+ inputs:
+ mei_connect_client_data struct contain the following
+ input field:
+
+ in_client_uuid - UUID of the FW Feature that needs
+ to connect to.
+ outputs:
+ out_client_properties - Client Properties: MTU and Protocol Version.
+
+ error returns:
+ EINVAL Wrong IOCTL Number
+ ENODEV Device or Connection is not initialized or ready.
+ (e.g. Wrong UUID)
+ ENOMEM Unable to allocate memory to client internal data.
+ EFAULT Fatal Error (e.g. Unable to access user input data)
+ EBUSY Connection Already Open
+
+ Notes:
+ max_msg_length (MTU) in client properties describes the maximum
+ data that can be sent or received. (e.g. if MTU=2K, can send
+ requests up to bytes 2k and received responses upto 2k bytes).
+
+Intel ME Applications:
==============
1) Intel Local Management Service (Intel LMS)
- Applications running locally on the platform communicate with
- Intel AMT Release 2.0 and later releases in the same way
- that network applications do via SOAP over HTTP (deprecated
- starting with Release 6.0) or with WS-Management over SOAP over
- HTTP. which means that some Intel AMT feature can be access
- from a local application using same Network interface as for
- remote application.
-
- When a local application sends a message addressed to the local
- Intel AMT host name, the Local Manageability Service (LMS),
- which listens for traffic directed to the host name, intercepts
- the message and routes it to the Intel Management Engine Interface.
+
+ Applications running locally on the platform communicate with Intel AMT Release
+ 2.0 and later releases in the same way that network applications do via SOAP
+ over HTTP (deprecated starting with Release 6.0) or with WS-Management over
+ SOAP over HTTP. This means that some Intel AMT features can be accessed from a
+ local application using the same network interface as a remote application
+ communicating with Intel AMT over the network.
+
+ When a local application sends a message addressed to the local Intel AMT host
+ name, the Intel LMS, which listens for traffic directed to the host name,
+ intercepts the message and routes it to the Intel MEI.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_
- Reference_Guide/WordDocuments/localaccess1.htm
-
- The LMS opens a connection using the MEI driver to the LMS
- FW feature using a defined UUID and then communicates with the
- feature using a protocol
- called Intel(R) AMT Port Forwarding Protocol (APF protocol).
- The protocol is used to maintain multiple sessions with
- Intel AMT from a single application.
- See the protocol specification in
- the Intel(R) AMT Implementation and Reference Guide
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/HTMLDocuments/MPSDocuments/Intel%20AMT%20Port%20Forwarding%20Protocol%20Reference%20Manual.pdf
-
- 2) Intel AMT Remote configuration using a Local Agent:
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "About Intel AMT" => "Local Access"
+
+ For downloading Intel LMS:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
+
+ The Intel LMS opens a connection using the Intel MEI driver to the Intel LMS
+ firmware feature using a defined UUID and then communicates with the feature
+ using a protocol called Intel AMT Port Forwarding Protocol(Intel APF protocol).
+ The protocol is used to maintain multiple sessions with Intel AMT from a
+ single application.
+
+ See the protocol specification in the Intel AMT Software Development Kit(SDK)
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "SDK Resources" => "Intel(R) vPro(TM) Gateway(MPS)"
+ => "Information for Intel(R) vPro(TM) Gateway Developers"
+ => "Description of the Intel AMT Port Forwarding (APF)Protocol"
+
+ 2) Intel AMT Remote configuration using a Local Agent
A Local Agent enables IT personnel to configure Intel AMT out-of-the-box
- without requiring installing additional data to enable setup.
- The remote configuration process may involve an ISV-developed remote
- configuration agent that runs on the host.
+ without requiring installing additional data to enable setup. The remote
+ configuration process may involve an ISV-developed remote configuration
+ agent that runs on the host.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/remoteconfigurationwithalocalagent.htm
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "Setup and Configuration of Intel AMT" =>
+ "SDK Tools Supporting Setup and Configuration" =>
+ "Using the Local Agent Sample"
+
+ An open source Intel AMT configuration utility, implementing a local agent
+ that accesses the Intel MEI driver, can be found here:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
- How the Local Agent Works (including Command structs):
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/howthelocalagentsampleworks.htm
Intel AMT OS Health Watchdog:
=============================
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
-to whoever subscribed to this event. This mechanism means that
-IT knows when a platform crashes even when there is a hard failure
-on the host.
-The AMT Watchdog is composed of two parts:
- 1) FW Feature - that receives the heartbeats
- and sends an event when the heartbeats stop.
- 2) MEI driver – connects to the watchdog (WD) feature,
- configures the watchdog and sends the heartbeats.
-
-The MEI driver configures the Watchdog to expire by default
-every 120sec unless set by the user using module parameters.
-The Driver then sends heartbeats every 2sec.
+to any subsciber to this event. This mechanism means that
+IT knows when a platform crashes even when there is a hard failureon the host.
-If WD feature does not exist (i.e. the connection failed),
-the MEI driver will disable the sending of heartbeats.
+The Intel AMT Watchdog is composed of two parts:
+ 1) Firmware feature - receives the heartbeats
+ and sends an event when the heartbeats stop.
+ 2) Intel MEI driver - connects to the watchdog feature, configures the
+ watchdog and sends the heartbeats.
-Module Parameters
-=================
-watchdog_timeout - the user can use this module parameter
-to change the watchdog timeout setting.
+The Intel MEI driver uses the kernel watchdog to configure the Intel AMT
+Watchdog and to send heartbeats to it. The default timeout of the
+watchdog is 120 seconds.
-This value sets the Intel AMT watchdog timeout interval in seconds;
-the default value is 120sec.
-in order to disable the watchdog activites set the value to 0.
-Normal values should be between 120 and 65535
+If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
+the Intel MEI driver will disable the sending of heartbeats.
Supported Chipsets:
==================
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index af4b1af9eea..82bacfc624c 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -23,13 +23,6 @@
#include "hw.h"
/*
- * MEI Char Driver Minors
- */
-#define MEI_MINORS_BASE 1
-#define MEI_MINORS_COUNT 1
-#define MEI_MINOR_NUMBER 1
-
-/*
* watch dog definition
*/
#define MEI_WATCHDOG_DATA_SIZE 16
@@ -42,11 +35,6 @@
*/
extern struct pci_dev *mei_device;
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-extern struct watchdog_device amt_wd_dev;
/*
* AMTHI Client UUID
@@ -175,7 +163,6 @@ struct mei_cl {
struct mei_io_list {
struct mei_cl_cb mei_cb;
- int status;
};
/* MEI private device struct */
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index ffca7ca3265..8094941a98f 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -35,12 +35,16 @@ const u8 mei_wd_state_independence_msg[3][4] = {
{0x07, 0x02, 0x01, 0x10}
};
+/*
+ * AMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
+
/* UUIDs for AMT F/W clients */
const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
0x9D, 0xA9, 0x15, 0x14, 0xCB,
0x32, 0xAB);
-
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
{
dev_dbg(&dev->pdev->dev, "timeout=%d.\n", timeout);
@@ -331,14 +335,14 @@ static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, unsigned int t
/*
* Watchdog Device structs
*/
-const struct watchdog_ops wd_ops = {
+static const struct watchdog_ops wd_ops = {
.owner = THIS_MODULE,
.start = mei_wd_ops_start,
.stop = mei_wd_ops_stop,
.ping = mei_wd_ops_ping,
.set_timeout = mei_wd_ops_set_timeout,
};
-const struct watchdog_info wd_info = {
+static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
.options = WDIOF_KEEPALIVEPING,
};
@@ -352,3 +356,25 @@ struct watchdog_device amt_wd_dev = {
};
+void mei_watchdog_register(struct mei_device *dev)
+{
+ dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n", dev->wd_timeout);
+
+ dev->wd_due_counter = !!dev->wd_timeout;
+
+ if (watchdog_register_device(&amt_wd_dev)) {
+ dev_err(&dev->pdev->dev, "unable to register watchdog device.\n");
+ dev->wd_interface_reg = false;
+ } else {
+ dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
+ dev->wd_interface_reg = true;
+ }
+}
+
+void mei_watchdog_unregister(struct mei_device *dev)
+{
+ if (dev->wd_interface_reg)
+ watchdog_unregister_device(&amt_wd_dev);
+ dev->wd_interface_reg = false;
+}
+
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index e06b867d1e0..fafdfa25e13 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -27,6 +27,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/list.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
@@ -727,8 +729,24 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, nvec);
nvec->dev = &pdev->dev;
- nvec->gpio = pdata->gpio;
- nvec->i2c_addr = pdata->i2c_addr;
+
+ if (pdata) {
+ nvec->gpio = pdata->gpio;
+ nvec->i2c_addr = pdata->i2c_addr;
+ } else if (nvec->dev->of_node) {
+ nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
+ if (nvec->gpio < 0) {
+ dev_err(&pdev->dev, "no gpio specified");
+ goto failed;
+ }
+ if (of_property_read_u32(nvec->dev->of_node, "slave-addr", &nvec->i2c_addr)) {
+ dev_err(&pdev->dev, "no i2c address specified");
+ goto failed;
+ }
+ } else {
+ dev_err(&pdev->dev, "no platform data\n");
+ goto failed;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -893,6 +911,13 @@ static int tegra_nvec_resume(struct platform_device *pdev)
#define tegra_nvec_resume NULL
#endif
+/* Match table for of_platform binding */
+static const struct of_device_id nvidia_nvec_of_match[] __devinitconst = {
+ { .compatible = "nvidia,nvec", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
+
static struct platform_driver nvec_device_driver = {
.probe = tegra_nvec_probe,
.remove = __devexit_p(tegra_nvec_remove),
@@ -901,6 +926,7 @@ static struct platform_driver nvec_device_driver = {
.driver = {
.name = "nvec",
.owner = THIS_MODULE,
+ .of_match_table = nvidia_nvec_of_match,
}
};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index af24ddfb58c..3d9199320d8 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -34,8 +34,8 @@
/* Module definitions */
-static int resumeline = 898;
-module_param(resumeline, int, 0444);
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
/* Default off since it doesn't work on DCON ASIC in B-test OLPC board */
static int useaa = 1;
@@ -456,7 +456,7 @@ static ssize_t dcon_mono_store(struct device *dev,
unsigned long enable_mono;
int rc;
- rc = strict_strtoul(buf, 10, &enable_mono);
+ rc = kstrtoul(buf, 10, &enable_mono);
if (rc)
return rc;
@@ -472,7 +472,7 @@ static ssize_t dcon_freeze_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -498,10 +498,10 @@ static ssize_t dcon_freeze_store(struct device *dev,
static ssize_t dcon_resumeline_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- unsigned long rl;
+ unsigned short rl;
int rc;
- rc = strict_strtoul(buf, 10, &rl);
+ rc = kstrtou16(buf, 10, &rl);
if (rc)
return rc;
@@ -517,7 +517,7 @@ static ssize_t dcon_sleep_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -755,9 +755,9 @@ static int dcon_resume(struct i2c_client *client)
irqreturn_t dcon_interrupt(int irq, void *id)
{
struct dcon_priv *dcon = id;
- int status = pdata->read_status();
+ u8 status;
- if (status == -1)
+ if (pdata->read_status(&status))
return IRQ_NONE;
switch (status & 3) {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 0264c94375a..167a41778be 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -84,7 +84,7 @@ struct dcon_platform_data {
int (*init)(struct dcon_priv *);
void (*bus_stabilize_wiggle)(void);
void (*set_dconload)(int);
- u8 (*read_status)(void);
+ int (*read_status)(u8 *);
};
#include <linux/interrupt.h>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 2245213df60..cb6ce0cf92a 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -183,17 +183,15 @@ static void dcon_set_dconload_1(int val)
gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
}
-static u8 dcon_read_status_xo_1(void)
+static int dcon_read_status_xo_1(u8 *status)
{
- u8 status;
-
- status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
- status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+ *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+ *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
/* Clear the negative edge status for GPIO7 */
cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1 = {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index a6a6cf2adc4..69415eec425 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -167,20 +167,18 @@ static void dcon_set_dconload_xo_1_5(int val)
gpio_set_value(VX855_GPIO(1), val);
}
-static u8 dcon_read_status_xo_1_5(void)
+static int dcon_read_status_xo_1_5(u8 *status)
{
- u8 status;
-
if (!dcon_was_irq())
return -1;
/* i believe this is the same as "inb(0x44b) & 3" */
- status = gpio_get_value(VX855_GPI(10));
- status |= gpio_get_value(VX855_GPI(11)) << 1;
+ *status = gpio_get_value(VX855_GPI(10));
+ *status |= gpio_get_value(VX855_GPI(11)) << 1;
dcon_clear_irq();
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1_5 = {
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
new file mode 100644
index 00000000000..81a7cba4a0c
--- /dev/null
+++ b/drivers/staging/omapdrm/Kconfig
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+ tristate "OMAP DRM"
+ depends on DRM && !CONFIG_FB_OMAP2
+ depends on ARCH_OMAP2PLUS
+ select DRM_KMS_HELPER
+ select OMAP2_DSS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default n
+ help
+ DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+ int "Number of CRTCs"
+ range 1 10
+ default 1 if ARCH_OMAP2 || ARCH_OMAP3
+ default 2 if ARCH_OMAP4
+ depends on DRM_OMAP
+ help
+ Select the number of video overlays which can be used as framebuffers.
+ The remaining overlays are reserved for video.
+
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
new file mode 100644
index 00000000000..592cf69020c
--- /dev/null
+++ b/drivers/staging/omapdrm/Makefile
@@ -0,0 +1,21 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o \
+ omap_debugfs.o \
+ omap_crtc.o \
+ omap_encoder.o \
+ omap_connector.o \
+ omap_fb.o \
+ omap_fbdev.o \
+ omap_gem.o \
+ omap_dmm_tiler.o \
+ tcm-sita.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
new file mode 100644
index 00000000000..55b18377ac4
--- /dev/null
+++ b/drivers/staging/omapdrm/TODO
@@ -0,0 +1,38 @@
+TODO
+. check error handling/cleanup paths
+. add drm_plane / overlay support
+. add video decode/encode support (via syslink3 + codec-engine)
+. still some rough edges with flipping.. event back to userspace should
+ really come after VSYNC interrupt
+. where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd. (It is mostly only
+ of interest in case you have a swap partition/file.. which a lot of
+ these devices do not.. but it doesn't hurt for the driver to do the
+ right thing anyways.)
+ . Use mm_shrinker to trigger unpinning pages. Need to figure out how
+ to handle next issue first (I think?)
+ . Note TTM already has some mm_shrinker stuff.. maybe an argument to
+ move to TTM? Or maybe something that could be factored out in common?
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
+ part connector. Which results in a bit of duct tape to fwd calls from
+ encoder to connector. Possibly this could be done a bit better.
+. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
+ access is made from any component in the system. Which means on suspend
+ CRTC's should be disabled, and on resume the LUT should be reprogrammed
+ before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
+ buffer mapped in DMM/TILER before LUT is reloaded.
+. Add debugfs information for DMM/TILER
+
+Userspace:
+. git://github.com/robclark/xf86-video-omap.git
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
new file mode 100644
index 00000000000..5e2856c0e0b
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -0,0 +1,371 @@
+/*
+ * drivers/staging/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+ struct drm_connector base;
+ struct omap_dss_device *dssdev;
+};
+
+static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings)
+{
+ mode->clock = timings->pixel_clock;
+
+ mode->hdisplay = timings->x_res;
+ mode->hsync_start = mode->hdisplay + timings->hfp;
+ mode->hsync_end = mode->hsync_start + timings->hsw;
+ mode->htotal = mode->hsync_end + timings->hbp;
+
+ mode->vdisplay = timings->y_res;
+ mode->vsync_start = mode->vdisplay + timings->vfp;
+ mode->vsync_end = mode->vsync_start + timings->vsw;
+ mode->vtotal = mode->vsync_end + timings->vbp;
+
+ /* note: whether or not it is interlaced, +/- h/vsync, etc,
+ * which should be set in the mode flags, is not exposed in
+ * the omap_video_timings struct.. but hdmi driver tracks
+ * those separately so all we have to have to set the mode
+ * is the way to recover these timings values, and the
+ * omap_dss_driver would do the rest.
+ */
+}
+
+static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode)
+{
+ timings->pixel_clock = mode->clock;
+
+ timings->x_res = mode->hdisplay;
+ timings->hfp = mode->hsync_start - mode->hdisplay;
+ timings->hsw = mode->hsync_end - mode->hsync_start;
+ timings->hbp = mode->htotal - mode->hsync_end;
+
+ timings->y_res = mode->vdisplay;
+ timings->vfp = mode->vsync_start - mode->vdisplay;
+ timings->vsw = mode->vsync_end - mode->vsync_start;
+ timings->vbp = mode->vtotal - mode->vsync_end;
+}
+
+static void omap_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ int old_dpms;
+
+ DBG("%s: %d", dssdev->name, mode);
+
+ old_dpms = connector->dpms;
+
+ /* from off to on, do from crtc to connector */
+ if (mode < old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /* store resume info for suspended displays */
+ switch (dssdev->state) {
+ case OMAP_DSS_DISPLAY_SUSPENDED:
+ dssdev->activate_after_resume = true;
+ break;
+ case OMAP_DSS_DISPLAY_DISABLED: {
+ int ret = dssdev->driver->enable(dssdev);
+ if (ret) {
+ DBG("%s: failed to enable: %d",
+ dssdev->name, ret);
+ dssdev->driver->disable(dssdev);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ } else {
+ /* TODO */
+ }
+
+ /* from on to off, do from connector to crtc */
+ if (mode > old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+}
+
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum drm_connector_status ret;
+
+ if (dssdrv->detect) {
+ if (dssdrv->detect(dssdev)) {
+ ret = connector_status_connected;
+ } else {
+ ret = connector_status_disconnected;
+ }
+ } else {
+ ret = connector_status_unknown;
+ }
+
+ VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+ return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+ dssdev->driver->disable(dssdev);
+
+ DBG("%s", omap_connector->dssdev->name);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+
+ omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID 512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct drm_device *dev = connector->dev;
+ int n = 0;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ /* if display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.. otherwise (ie. fixed resolution
+ * LCD panels) we just return a single mode corresponding to the
+ * currently configured timings:
+ */
+ if (dssdrv->read_edid) {
+ void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+ if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+ drm_edid_is_valid(edid)) {
+ drm_mode_connector_update_edid_property(
+ connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(connector->display_info.raw_edid);
+ connector->display_info.raw_edid = edid;
+ } else {
+ drm_mode_connector_update_edid_property(
+ connector, NULL);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct omap_video_timings timings;
+
+ dssdrv->get_timings(dssdev, &timings);
+
+ copy_timings_omap_to_drm(mode, &timings);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ n = 1;
+ }
+
+ return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings = {0};
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *new_mode;
+ int ret = MODE_BAD;
+
+ copy_timings_drm_to_omap(&timings, mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ if (!dssdrv->check_timings(dssdev, &timings)) {
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ new_mode->clock = timings.pixel_clock;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+ }
+
+ DBG("connector: mode %s: "
+ "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ (ret == MODE_OK) ? "valid" : "invalid",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector)
+{
+ int i;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct drm_mode_object *obj;
+
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+
+ if (obj) {
+ struct drm_encoder *encoder = obj_to_encoder(obj);
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+ DBG("%s: found %s", omap_connector->dssdev->name,
+ mgr->name);
+ return encoder;
+ }
+ }
+
+ DBG("%s: no encoder", omap_connector->dssdev->name);
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+ .dpms = omap_connector_dpms,
+ .detect = omap_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+ .get_modes = omap_connector_get_modes,
+ .mode_valid = omap_connector_mode_valid,
+ .best_encoder = omap_connector_attached_encoder,
+};
+
+/* called from encoder when mode is set, to propagate settings to the dssdev */
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings;
+
+ copy_timings_drm_to_omap(&timings, mode);
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_connector->dssdev->name,
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ if (dssdrv->check_timings(dssdev, &timings)) {
+ dev_err(dev->dev, "could not set timings\n");
+ return;
+ }
+
+ dssdrv->set_timings(dssdev, &timings);
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ /* TODO: enable when supported in dss */
+ VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev)
+{
+ struct drm_connector *connector = NULL;
+ struct omap_connector *omap_connector;
+
+ DBG("%s", dssdev->name);
+
+ omap_dss_get_device(dssdev);
+
+ omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ if (!omap_connector) {
+ dev_err(dev->dev, "could not allocate connector\n");
+ goto fail;
+ }
+
+ omap_connector->dssdev = dssdev;
+ connector = &omap_connector->base;
+
+ drm_connector_init(dev, connector, &omap_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+ connector->polled = 0;
+ else
+#endif
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ if (connector) {
+ omap_connector_destroy(connector);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
new file mode 100644
index 00000000000..cffdf5e1239
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -0,0 +1,326 @@
+/*
+ * drivers/staging/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_mode.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+ struct drm_crtc base;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info info;
+ int id;
+
+ /* if there is a pending flip, this will be non-null: */
+ struct drm_pending_vblank_event *event;
+};
+
+/* push changes down to dss2 */
+static int commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+ struct omap_overlay_info *info = &omap_crtc->info;
+ int ret;
+
+ DBG("%s", omap_crtc->ovl->name);
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
+ info->out_height, info->screen_width);
+ DBG("%d,%d %08x", info->pos_x, info->pos_y, info->paddr);
+
+ /* NOTE: do we want to do this at all here, or just wait
+ * for dpms(ON) since other CRTC's may not have their mode
+ * set yet, so fb dimensions may still change..
+ */
+ ret = ovl->set_overlay_info(ovl, info);
+ if (ret) {
+ dev_err(dev->dev, "could not set overlay info\n");
+ return ret;
+ }
+
+ /* our encoder doesn't necessarily get a commit() after this, in
+ * particular in the dpms() and mode_set_base() cases, so force the
+ * manager to update:
+ *
+ * could this be in the encoder somehow?
+ */
+ if (ovl->manager) {
+ ret = ovl->manager->apply(ovl->manager);
+ if (ret) {
+ dev_err(dev->dev, "could not apply settings\n");
+ return ret;
+ }
+ }
+
+ if (info->enabled) {
+ omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
+ crtc->fb->width, crtc->fb->height);
+ }
+
+ return 0;
+}
+
+/* update parameters that are dependent on the framebuffer dimensions and
+ * position within the fb that this crtc scans out from. This is called
+ * when framebuffer dimensions or x,y base may have changed, either due
+ * to our mode, or a change in another crtc that is scanning out of the
+ * same fb.
+ */
+static void update_scanout(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ dma_addr_t paddr;
+ unsigned int screen_width;
+
+ omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
+ NULL, &paddr, &screen_width);
+
+ DBG("%s: %d,%d: %08x (%d)", omap_crtc->ovl->name,
+ crtc->x, crtc->y, (u32)paddr, screen_width);
+
+ omap_crtc->info.paddr = paddr;
+ omap_crtc->info.screen_width = screen_width;
+}
+
+static void omap_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+}
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ drm_crtc_cleanup(crtc);
+ kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d", omap_crtc->ovl->name, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ update_scanout(crtc);
+ omap_crtc->info.enabled = true;
+ } else {
+ omap_crtc->info.enabled = false;
+ }
+
+ WARN_ON(commit(crtc));
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s: %d,%d: %dx%d", omap_crtc->ovl->name, x, y,
+ mode->hdisplay, mode->vdisplay);
+
+ /* just use adjusted mode */
+ mode = adjusted_mode;
+
+ omap_crtc->info.width = mode->hdisplay;
+ omap_crtc->info.height = mode->vdisplay;
+ omap_crtc->info.out_width = mode->hdisplay;
+ omap_crtc->info.out_height = mode->vdisplay;
+ omap_crtc->info.color_mode = OMAP_DSS_COLOR_RGB24U;
+ omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
+ omap_crtc->info.rotation = OMAP_DSS_ROT_0;
+ omap_crtc->info.global_alpha = 0xff;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.mirror = 0;
+ omap_crtc->info.pos_x = 0;
+ omap_crtc->info.pos_y = 0;
+#if 0 /* re-enable when these are available in DSS2 driver */
+ omap_crtc->info.zorder = 3; /* GUI in the front, video behind */
+ omap_crtc->info.min_x_decim = 1;
+ omap_crtc->info.max_x_decim = 1;
+ omap_crtc->info.min_y_decim = 1;
+ omap_crtc->info.max_y_decim = 1;
+#endif
+
+ update_scanout(crtc);
+
+ return 0;
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct omap_overlay *ovl = omap_crtc->ovl;
+
+ DBG("%s", omap_crtc->ovl->name);
+
+ ovl->get_overlay_info(ovl, &omap_crtc->info);
+
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb);
+
+ update_scanout(crtc);
+
+ return commit(crtc);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->ovl->name);
+}
+
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_pending_vblank_event *event = omap_crtc->event;
+ struct timeval now;
+ unsigned long flags;
+
+ WARN_ON(!event);
+
+ omap_crtc->event = NULL;
+
+ update_scanout(crtc);
+ WARN_ON(commit(crtc));
+
+ /* wakeup userspace */
+ /* TODO: this should happen *after* flip in vsync IRQ handler */
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event->event.sequence = drm_vblank_count_and_time(
+ dev, omap_crtc->id, &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+ list_add_tail(&event->base.link,
+ &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+
+ if (omap_crtc->event) {
+ dev_err(dev->dev, "already a pending flip\n");
+ return -EINVAL;
+ }
+
+ crtc->fb = fb;
+ omap_crtc->event = event;
+
+ omap_gem_op_async(omap_framebuffer_bo(fb), OMAP_GEM_READ,
+ page_flip_cb, crtc);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+ .gamma_set = omap_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = omap_crtc_destroy,
+ .page_flip = omap_crtc_page_flip_locked,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+ .dpms = omap_crtc_dpms,
+ .mode_fixup = omap_crtc_mode_fixup,
+ .mode_set = omap_crtc_mode_set,
+ .prepare = omap_crtc_prepare,
+ .commit = omap_crtc_commit,
+ .mode_set_base = omap_crtc_mode_set_base,
+ .load_lut = omap_crtc_load_lut,
+};
+
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ return omap_crtc->ovl;
+}
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+ DBG("%s", ovl->name);
+
+ if (!omap_crtc) {
+ dev_err(dev->dev, "could not allocate CRTC\n");
+ goto fail;
+ }
+
+ omap_crtc->ovl = ovl;
+ omap_crtc->id = id;
+ crtc = &omap_crtc->base;
+ drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ if (crtc) {
+ omap_crtc_destroy(crtc);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
new file mode 100644
index 00000000000..da920dfdc59
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -0,0 +1,42 @@
+/*
+ * drivers/staging/omapdrm/omap_debugfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct drm_info_list omap_debugfs_list[] = {
+ {"tiler_map", tiler_map_show, 0},
+};
+
+int omap_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+void omap_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list), minor);
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
new file mode 100644
index 00000000000..2f529ab4b7c
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -0,0 +1,187 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_PRIV_H
+#define OMAP_DMM_PRIV_H
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_DESCR__1 0x510
+#define DMM_PAT_DESCR__2 0x520
+#define DMM_PAT_DESCR__3 0x530
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+#define DMM_IRQSTAT_DST (1<<0)
+#define DMM_IRQSTAT_LST (1<<1)
+#define DMM_IRQSTAT_ERR_INV_DSC (1<<2)
+#define DMM_IRQSTAT_ERR_INV_DATA (1<<3)
+#define DMM_IRQSTAT_ERR_UPD_AREA (1<<4)
+#define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5)
+#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
+#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
+
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
+ DMM_IRQ_STAT_ERR_INV_DATA | \
+ DMM_IRQ_STAT_ERR_UPD_AREA | \
+ DMM_IRQ_STAT_ERR_UPD_CTRL | \
+ DMM_IRQ_STAT_ERR_UPD_DATA | \
+ DMM_IRQ_STAT_ERR_LUT_MISS)
+
+#define DMM_PATSTATUS_READY (1<<0)
+#define DMM_PATSTATUS_VALID (1<<1)
+#define DMM_PATSTATUS_RUN (1<<2)
+#define DMM_PATSTATUS_DONE (1<<3)
+#define DMM_PATSTATUS_LINKED (1<<4)
+#define DMM_PATSTATUS_BYPASSED (1<<7)
+#define DMM_PATSTATUS_ERR_INV_DESCR (1<<10)
+#define DMM_PATSTATUS_ERR_INV_DATA (1<<11)
+#define DMM_PATSTATUS_ERR_UPD_AREA (1<<12)
+#define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13)
+#define DMM_PATSTATUS_ERR_UPD_DATA (1<<14)
+#define DMM_PATSTATUS_ERR_ACCESS (1<<15)
+
+/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
+#define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \
+ DMM_PATSTATUS_ERR_INV_DATA | \
+ DMM_PATSTATUS_ERR_UPD_AREA | \
+ DMM_PATSTATUS_ERR_UPD_CTRL | \
+ DMM_PATSTATUS_ERR_UPD_DATA)
+
+
+
+enum {
+ PAT_STATUS,
+ PAT_DESCR
+};
+
+struct pat_ctrl {
+ u32 start:4;
+ u32 dir:4;
+ u32 lut_id:8;
+ u32 sync:12;
+ u32 ini:4;
+};
+
+struct pat {
+ uint32_t next_pa;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ uint32_t data_pa;
+};
+
+#define DMM_FIXED_RETRY_COUNT 1000
+
+/* create refill buffer big enough to refill all slots, plus 3 descriptors..
+ * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
+ * but I guess you don't hit that worst case at the same time as full area
+ * refill
+ */
+#define DESCR_SIZE 128
+#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+
+struct dmm;
+
+struct dmm_txn {
+ void *engine_handle;
+ struct tcm *tcm;
+
+ uint8_t *current_va;
+ dma_addr_t current_pa;
+
+ struct pat *last_pat;
+};
+
+struct refill_engine {
+ int id;
+ struct dmm *dmm;
+ struct tcm *tcm;
+
+ uint8_t *refill_va;
+ dma_addr_t refill_pa;
+
+ /* only one trans per engine for now */
+ struct dmm_txn txn;
+
+ /* offset to lut associated with container */
+ u32 *lut_offset;
+
+ wait_queue_head_t wait_for_refill;
+
+ struct list_head idle_node;
+};
+
+struct dmm {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ struct page *dummy_page;
+ dma_addr_t dummy_pa;
+
+ void *refill_va;
+ dma_addr_t refill_pa;
+
+ /* refill engines */
+ struct semaphore engine_sem;
+ struct list_head idle_head;
+ struct refill_engine *engines;
+ int num_engines;
+
+ /* container information */
+ int container_width;
+ int container_height;
+ int lut_width;
+ int lut_height;
+ int num_lut;
+
+ /* array of LUT - TCM containers */
+ struct tcm **tcm;
+
+ /* LUT table storage */
+ u32 *lut;
+
+ /* allocation list and lock */
+ struct list_head alloc_head;
+ spinlock_t list_lock;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
new file mode 100644
index 00000000000..852d9440f72
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -0,0 +1,830 @@
+/*
+ * DMM IOMMU driver support functions for TI OMAP processors.
+ *
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_dmm_priv.h"
+
+/* mappings for associating views to luts */
+static struct tcm *containers[TILFMT_NFORMATS];
+static struct dmm *omap_dmm;
+
+/* Geometry table */
+#define GEOM(xshift, yshift, bytes_per_pixel) { \
+ .x_shft = (xshift), \
+ .y_shft = (yshift), \
+ .cpp = (bytes_per_pixel), \
+ .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+ .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+ }
+
+static const struct {
+ uint32_t x_shft; /* unused X-bits (as part of bpp) */
+ uint32_t y_shft; /* unused Y-bits (as part of bpp) */
+ uint32_t cpp; /* bytes/chars per pixel */
+ uint32_t slot_w; /* width of each slot (in pixels) */
+ uint32_t slot_h; /* height of each slot (in pixels) */
+} geom[TILFMT_NFORMATS] = {
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+};
+
+
+/* lookup table for registers w/ per-engine instances */
+static const uint32_t reg[][4] = {
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+};
+
+/* simple allocator to grab next 16 byte aligned memory from txn */
+static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+{
+ void *ptr;
+ struct refill_engine *engine = txn->engine_handle;
+
+ /* dmm programming requires 16 byte aligned addresses */
+ txn->current_pa = round_up(txn->current_pa, 16);
+ txn->current_va = (void *)round_up((long)txn->current_va, 16);
+
+ ptr = txn->current_va;
+ *pa = txn->current_pa;
+
+ txn->current_pa += sz;
+ txn->current_va += sz;
+
+ BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+
+ return ptr;
+}
+
+/* check status and spin until wait_mask comes true */
+static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+{
+ struct dmm *dmm = engine->dmm;
+ uint32_t r = 0, err, i;
+
+ i = DMM_FIXED_RETRY_COUNT;
+ while (true) {
+ r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ err = r & DMM_PATSTATUS_ERR;
+ if (err)
+ return -EFAULT;
+
+ if ((r & wait_mask) == wait_mask)
+ break;
+
+ if (--i == 0)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ }
+
+ return 0;
+}
+
+irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+{
+ struct dmm *dmm = arg;
+ uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ int i;
+
+ /* ack IRQ */
+ writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+
+ for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_LST)
+ wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+
+ status >>= 8;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Get a handle for a DMM transaction
+ */
+static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+{
+ struct dmm_txn *txn = NULL;
+ struct refill_engine *engine = NULL;
+
+ down(&dmm->engine_sem);
+
+ /* grab an idle engine */
+ spin_lock(&dmm->list_lock);
+ if (!list_empty(&dmm->idle_head)) {
+ engine = list_entry(dmm->idle_head.next, struct refill_engine,
+ idle_node);
+ list_del(&engine->idle_node);
+ }
+ spin_unlock(&dmm->list_lock);
+
+ BUG_ON(!engine);
+
+ txn = &engine->txn;
+ engine->tcm = tcm;
+ txn->engine_handle = engine;
+ txn->last_pat = NULL;
+ txn->current_va = engine->refill_va;
+ txn->current_pa = engine->refill_pa;
+
+ return txn;
+}
+
+/**
+ * Add region to DMM transaction. If pages or pages[i] is NULL, then the
+ * corresponding slot is cleared (ie. dummy_pa is programmed)
+ */
+static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ struct page **pages, uint32_t npages, uint32_t roll)
+{
+ dma_addr_t pat_pa = 0;
+ uint32_t *data;
+ struct pat *pat;
+ struct refill_engine *engine = txn->engine_handle;
+ int columns = (1 + area->x1 - area->x0);
+ int rows = (1 + area->y1 - area->y0);
+ int i = columns*rows;
+ u32 *lut = omap_dmm->lut + (engine->tcm->lut_id * omap_dmm->lut_width *
+ omap_dmm->lut_height) +
+ (area->y0 * omap_dmm->lut_width) + area->x0;
+
+ pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+
+ if (txn->last_pat)
+ txn->last_pat->next_pa = (uint32_t)pat_pa;
+
+ pat->area = *area;
+ pat->ctrl = (struct pat_ctrl){
+ .start = 1,
+ .lut_id = engine->tcm->lut_id,
+ };
+
+ data = alloc_dma(txn, 4*i, &pat->data_pa);
+
+ while (i--) {
+ int n = i + roll;
+ if (n >= npages)
+ n -= npages;
+ data[i] = (pages && pages[n]) ?
+ page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+ }
+
+ /* fill in lut with new addresses */
+ for (i = 0; i < rows; i++, lut += omap_dmm->lut_width)
+ memcpy(lut, &data[i*columns], columns * sizeof(u32));
+
+ txn->last_pat = pat;
+
+ return 0;
+}
+
+/**
+ * Commit the DMM transaction.
+ */
+static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+{
+ int ret = 0;
+ struct refill_engine *engine = txn->engine_handle;
+ struct dmm *dmm = engine->dmm;
+
+ if (!txn->last_pat) {
+ dev_err(engine->dmm->dev, "need at least one txn\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ txn->last_pat->next_pa = 0;
+
+ /* write to PAT_DESCR to clear out any pending transaction */
+ writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+
+ /* wait for engine ready: */
+ ret = wait_status(engine, DMM_PATSTATUS_READY);
+ if (ret) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* kick reload */
+ writel(engine->refill_pa,
+ dmm->base + reg[PAT_DESCR][engine->id]);
+
+ if (wait) {
+ if (wait_event_interruptible_timeout(engine->wait_for_refill,
+ wait_status(engine, DMM_PATSTATUS_READY) == 0,
+ msecs_to_jiffies(1)) <= 0) {
+ dev_err(dmm->dev, "timed out waiting for done\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+cleanup:
+ spin_lock(&dmm->list_lock);
+ list_add(&engine->idle_node, &dmm->idle_head);
+ spin_unlock(&dmm->list_lock);
+
+ up(&omap_dmm->engine_sem);
+ return ret;
+}
+
+/*
+ * DMM programming
+ */
+static int fill(struct tcm_area *area, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret = 0;
+ struct tcm_area slice, area_s;
+ struct dmm_txn *txn;
+
+ txn = dmm_txn_init(omap_dmm, area->tcm);
+ if (IS_ERR_OR_NULL(txn))
+ return PTR_ERR(txn);
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ struct pat_area p_area = {
+ .x0 = slice.p0.x, .y0 = slice.p0.y,
+ .x1 = slice.p1.x, .y1 = slice.p1.y,
+ };
+
+ ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
+ if (ret)
+ goto fail;
+
+ roll += tcm_sizeof(slice);
+ }
+
+ ret = dmm_txn_commit(txn, wait);
+
+fail:
+ return ret;
+}
+
+/*
+ * Pin/unpin
+ */
+
+/* note: slots for which pages[i] == NULL are filled w/ dummy page
+ */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret;
+
+ ret = fill(&block->area, pages, npages, roll, wait);
+
+ if (ret)
+ tiler_unpin(block);
+
+ return ret;
+}
+
+int tiler_unpin(struct tiler_block *block)
+{
+ return fill(&block->area, NULL, 0, 0, false);
+}
+
+/*
+ * Reserve/release
+ */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+ uint16_t h, uint16_t align)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ u32 min_align = 128;
+ int ret;
+
+ BUG_ON(!validfmt(fmt));
+
+ /* convert width/height to slots */
+ w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+ h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+
+ /* convert alignment to slots */
+ min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+ align = ALIGN(align, min_align);
+ align /= geom[fmt].slot_w * geom[fmt].cpp;
+
+ block->fmt = fmt;
+
+ ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+ if (ret) {
+ kfree(block);
+ return 0;
+ }
+
+ /* add to allocation list */
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+struct tiler_block *tiler_reserve_1d(size_t size)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (!block)
+ return 0;
+
+ block->fmt = TILFMT_PAGE;
+
+ if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+ &block->area)) {
+ kfree(block);
+ return 0;
+ }
+
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+/* note: if you have pin'd pages, you should have already unpin'd first! */
+int tiler_release(struct tiler_block *block)
+{
+ int ret = tcm_free(&block->area);
+
+ if (block->area.tcm)
+ dev_err(omap_dmm->dev, "failed to release block\n");
+
+ spin_lock(&omap_dmm->list_lock);
+ list_del(&block->alloc_node);
+ spin_unlock(&omap_dmm->list_lock);
+
+ kfree(block);
+ return ret;
+}
+
+/*
+ * Utils
+ */
+
+/* calculate the tiler space address of a pixel in a view orientation */
+static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+dma_addr_t tiler_ssptr(struct tiler_block *block)
+{
+ BUG_ON(!validfmt(block->fmt));
+
+ return TILVIEW_8BIT + tiler_get_address(0, block->fmt,
+ block->area.p0.x * geom[block->fmt].slot_w,
+ block->area.p0.y * geom[block->fmt].slot_h);
+}
+
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+{
+ BUG_ON(!validfmt(fmt));
+ *w = round_up(*w, geom[fmt].slot_w);
+ *h = round_up(*h, geom[fmt].slot_h);
+}
+
+uint32_t tiler_stride(enum tiler_fmt fmt)
+{
+ BUG_ON(!validfmt(fmt));
+
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ tiler_align(fmt, &w, &h);
+ return geom[fmt].cpp * w * h;
+}
+
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ BUG_ON(!validfmt(fmt));
+ return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+}
+
+int omap_dmm_remove(void)
+{
+ struct tiler_block *block, *_block;
+ int i;
+
+ if (omap_dmm) {
+ /* free all area regions */
+ spin_lock(&omap_dmm->list_lock);
+ list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+ alloc_node) {
+ list_del(&block->alloc_node);
+ kfree(block);
+ }
+ spin_unlock(&omap_dmm->list_lock);
+
+ for (i = 0; i < omap_dmm->num_lut; i++)
+ if (omap_dmm->tcm && omap_dmm->tcm[i])
+ omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+ kfree(omap_dmm->tcm);
+
+ kfree(omap_dmm->engines);
+ if (omap_dmm->refill_va)
+ dma_free_coherent(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va,
+ omap_dmm->refill_pa);
+ if (omap_dmm->dummy_page)
+ __free_page(omap_dmm->dummy_page);
+
+ vfree(omap_dmm->lut);
+
+ if (omap_dmm->irq != -1)
+ free_irq(omap_dmm->irq, omap_dmm);
+
+ kfree(omap_dmm);
+ }
+
+ return 0;
+}
+
+int omap_dmm_init(struct drm_device *dev)
+{
+ int ret = -EFAULT, i;
+ struct tcm_area area = {0};
+ u32 hwinfo, pat_geom, lut_table_size;
+ struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+
+ if (!pdata || !pdata->dmm_pdata) {
+ dev_err(dev->dev, "dmm platform data not present, skipping\n");
+ return ret;
+ }
+
+ omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
+ if (!omap_dmm) {
+ dev_err(dev->dev, "failed to allocate driver data section\n");
+ goto fail;
+ }
+
+ /* lookup hwmod data - base address and irq */
+ omap_dmm->base = pdata->dmm_pdata->base;
+ omap_dmm->irq = pdata->dmm_pdata->irq;
+ omap_dmm->dev = dev->dev;
+
+ if (!omap_dmm->base) {
+ dev_err(dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+ omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+ omap_dmm->container_width = 256;
+ omap_dmm->container_height = 128;
+
+ /* read out actual LUT width and height */
+ pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+ omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+
+ /* initialize DMM registers */
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+ writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+ writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+
+ ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+ "omap_dmm_irq_handler", omap_dmm);
+
+ if (ret) {
+ dev_err(dev->dev, "couldn't register IRQ %d, error %d\n",
+ omap_dmm->irq, ret);
+ omap_dmm->irq = -1;
+ goto fail;
+ }
+
+ /* Enable all interrupts for each refill engine except
+ * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+ * about because we want to be able to refill live scanout
+ * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+ * we just generally don't care about.
+ */
+ writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+
+ lut_table_size = omap_dmm->lut_width * omap_dmm->lut_height *
+ omap_dmm->num_lut;
+
+ omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
+ if (!omap_dmm->lut) {
+ dev_err(dev->dev, "could not allocate lut table\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+ if (!omap_dmm->dummy_page) {
+ dev_err(dev->dev, "could not allocate dummy page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+
+ /* alloc refill memory */
+ omap_dmm->refill_va = dma_alloc_coherent(dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
+ if (!omap_dmm->refill_va) {
+ dev_err(dev->dev, "could not allocate refill memory\n");
+ goto fail;
+ }
+
+ /* alloc engines */
+ omap_dmm->engines = kzalloc(
+ omap_dmm->num_engines * sizeof(struct refill_engine),
+ GFP_KERNEL);
+ if (!omap_dmm->engines) {
+ dev_err(dev->dev, "could not allocate engines\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
+ INIT_LIST_HEAD(&omap_dmm->idle_head);
+ for (i = 0; i < omap_dmm->num_engines; i++) {
+ omap_dmm->engines[i].id = i;
+ omap_dmm->engines[i].dmm = omap_dmm;
+ omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+ (REFILL_BUFFER_SIZE * i);
+ omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+ (REFILL_BUFFER_SIZE * i);
+ init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+
+ list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+ }
+
+ omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+ GFP_KERNEL);
+ if (!omap_dmm->tcm) {
+ dev_err(dev->dev, "failed to allocate lut ptrs\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* init containers */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+ omap_dmm->container_height,
+ NULL);
+
+ if (!omap_dmm->tcm[i]) {
+ dev_err(dev->dev, "failed to allocate container\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->tcm[i]->lut_id = i;
+ }
+
+ /* assign access mode containers to applicable tcm container */
+ /* OMAP 4 has 1 container for all 4 views */
+ containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+
+ INIT_LIST_HEAD(&omap_dmm->alloc_head);
+ spin_lock_init(&omap_dmm->list_lock);
+
+ area = (struct tcm_area) {
+ .is2d = true,
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ for (i = 0; i < lut_table_size; i++)
+ omap_dmm->lut[i] = omap_dmm->dummy_pa;
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(omap_dmm->dev, "refill failed");
+ }
+
+ dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+
+ return 0;
+
+fail:
+ omap_dmm_remove();
+ return ret;
+}
+
+/*
+ * debugfs support
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+static const char *special = ".,:;'\"`~!^-+";
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+ char c, bool ovw)
+{
+ int x, y;
+ for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+ for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+ if (map[y][x] == ' ' || ovw)
+ map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+ char c)
+{
+ map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+ return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+ return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+ char *p = map[yd] + (x0 / xdiv);
+ int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+ if (w >= 0) {
+ p += w;
+ while (*nice)
+ *p++ = *nice++;
+ }
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+ if (a->p0.y + 1 < a->p1.y) {
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+ 256 - 1);
+ } else if (a->p0.y < a->p1.y) {
+ if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+ text_map(map, xdiv, nice, a->p0.y / ydiv,
+ a->p0.x + xdiv, 256 - 1);
+ else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+ text_map(map, xdiv, nice, a->p1.y / ydiv,
+ 0, a->p1.y - xdiv);
+ } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+ text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+ }
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+ if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+ a->p0.x, a->p1.x);
+}
+
+int tiler_map_show(struct seq_file *s, void *arg)
+{
+ int xdiv = 2, ydiv = 1;
+ char **map = NULL, *global_map;
+ struct tiler_block *block;
+ struct tcm_area a, p;
+ int i;
+ const char *m2d = alphabet;
+ const char *a2d = special;
+ const char *m2dp = m2d, *a2dp = a2d;
+ char nice[128];
+ int h_adj = omap_dmm->lut_height / ydiv;
+ int w_adj = omap_dmm->lut_width / xdiv;
+ unsigned long flags;
+
+ map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
+ global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+
+ if (!map || !global_map)
+ goto error;
+
+ memset(global_map, ' ', (w_adj + 1) * h_adj);
+ for (i = 0; i < omap_dmm->lut_height; i++) {
+ map[i] = global_map + i * (w_adj + 1);
+ map[i][w_adj] = 0;
+ }
+ spin_lock_irqsave(&omap_dmm->list_lock, flags);
+
+ list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+ if (block->fmt != TILFMT_PAGE) {
+ fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice, &block->area);
+ } else {
+ bool start = read_map_pt(map, xdiv, ydiv,
+ &block->area.p0)
+ == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
+ == ' ';
+ tcm_for_each_slice(a, block->area, p)
+ fill_map(map, xdiv, ydiv, &a, '=', true);
+ fill_map_pt(map, xdiv, ydiv, &block->area.p0,
+ start ? '<' : 'X');
+ fill_map_pt(map, xdiv, ydiv, &block->area.p1,
+ end ? '>' : 'X');
+ map_1d_info(map, xdiv, ydiv, nice, &block->area);
+ }
+ }
+
+ spin_unlock_irqrestore(&omap_dmm->list_lock, flags);
+
+ if (s) {
+ seq_printf(s, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ seq_printf(s, "%03d:%s\n", i, map[i]);
+ seq_printf(s, "END TILER MAP\n");
+ } else {
+ dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+ dev_dbg(omap_dmm->dev, "END TILER MAP\n");
+ }
+
+error:
+ kfree(map);
+ kfree(global_map);
+
+ return 0;
+}
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
new file mode 100644
index 00000000000..f87cb657d68
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -0,0 +1,135 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_TILER_H
+#define OMAP_DMM_TILER_H
+
+#include "omap_drv.h"
+#include "tcm.h"
+
+enum tiler_fmt {
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT,
+ TILFMT_32BIT,
+ TILFMT_PAGE,
+ TILFMT_NFORMATS
+};
+
+struct pat_area {
+ u32 x0:8;
+ u32 y0:8;
+ u32 x1:8;
+ u32 y1:8;
+};
+
+struct tiler_block {
+ struct list_head alloc_node; /* node for global block list */
+ struct tcm_area area; /* area */
+ enum tiler_fmt fmt; /* format */
+};
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+/* tiler space addressing bitfields */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+/* externally accessible functions */
+int omap_dmm_init(struct drm_device *dev);
+int omap_dmm_remove(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tiler_map_show(struct seq_file *s, void *arg);
+#endif
+
+/* pin/unpin */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait);
+int tiler_unpin(struct tiler_block *block);
+
+/* reserve/release */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
+ uint16_t align);
+struct tiler_block *tiler_reserve_1d(size_t size);
+int tiler_release(struct tiler_block *block);
+
+/* utilities */
+dma_addr_t tiler_ssptr(struct tiler_block *block);
+uint32_t tiler_stride(enum tiler_fmt fmt);
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+
+
+/* GEM bo flags -> tiler fmt */
+static inline enum tiler_fmt gem2fmt(uint32_t flags)
+{
+ switch (flags & OMAP_BO_TILED) {
+ case OMAP_BO_TILED_8:
+ return TILFMT_8BIT;
+ case OMAP_BO_TILED_16:
+ return TILFMT_16BIT;
+ case OMAP_BO_TILED_32:
+ return TILFMT_32BIT;
+ default:
+ return TILFMT_PAGE;
+ }
+}
+
+static inline bool validfmt(enum tiler_fmt fmt)
+{
+ switch (fmt) {
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ case TILFMT_PAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct omap_dmm_platform_data {
+ void __iomem *base;
+ int irq;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
new file mode 100644
index 00000000000..f0ac34a8973
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drm.h
@@ -0,0 +1,123 @@
+/*
+ * include/drm/omap_drm.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_H__
+#define __OMAP_DRM_H__
+
+#include <drm/drm.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
+
+struct drm_omap_param {
+ uint64_t param; /* in */
+ uint64_t value; /* in (set_param), out (get_param) */
+};
+
+#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
+#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
+#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+
+/* cache modes */
+#define OMAP_BO_CACHED 0x00000000 /* default */
+#define OMAP_BO_WC 0x00000002 /* write-combine */
+#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+
+/* tiled modes */
+#define OMAP_BO_TILED_8 0x00000100
+#define OMAP_BO_TILED_16 0x00000200
+#define OMAP_BO_TILED_32 0x00000300
+#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+
+union omap_gem_size {
+ uint32_t bytes; /* (for non-tiled formats) */
+ struct {
+ uint16_t width;
+ uint16_t height;
+ } tiled; /* (for tiled formats) */
+};
+
+struct drm_omap_gem_new {
+ union omap_gem_size size; /* in */
+ uint32_t flags; /* in */
+ uint32_t handle; /* out */
+ uint32_t __pad;
+};
+
+/* mask of operations: */
+enum omap_gem_op {
+ OMAP_GEM_READ = 0x01,
+ OMAP_GEM_WRITE = 0x02,
+};
+
+struct drm_omap_gem_cpu_prep {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+};
+
+struct drm_omap_gem_cpu_fini {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+ /* TODO maybe here we pass down info about what regions are touched
+ * by sw so we can be clever about cache ops? For now a placeholder,
+ * set to zero and we just do full buffer flush..
+ */
+ uint32_t nregions;
+ uint32_t __pad;
+};
+
+struct drm_omap_gem_info {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t pad;
+ uint64_t offset; /* mmap offset (out) */
+ /* note: in case of tiled buffers, the user virtual size can be
+ * different from the physical size (ie. how many pages are needed
+ * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
+ * This size here is the one that should be used if you want to
+ * mmap() the buffer:
+ */
+ uint32_t size; /* virtual size for mmap'ing (out) */
+ uint32_t __pad;
+};
+
+#define DRM_OMAP_GET_PARAM 0x00
+#define DRM_OMAP_SET_PARAM 0x01
+/* placeholder for plugin-api
+#define DRM_OMAP_GET_BASE 0x02
+*/
+#define DRM_OMAP_GEM_NEW 0x03
+#define DRM_OMAP_GEM_CPU_PREP 0x04
+#define DRM_OMAP_GEM_CPU_FINI 0x05
+#define DRM_OMAP_GEM_INFO 0x06
+#define DRM_OMAP_NUM_IOCTLS 0x07
+
+#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
+/* placeholder for plugin-api
+#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
+*/
+#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
+#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
+#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
+#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
new file mode 100644
index 00000000000..602aa2dd49c
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -0,0 +1,821 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+#define DRIVER_NAME MODULE_NAME
+#define DRIVER_DESC "OMAP DRM"
+#define DRIVER_DATE "20110917"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct drm_device *drm_device;
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ * CRTC: overlay
+ * encoder: manager.. with some extension to allow one primary CRTC
+ * and zero or more video CRTC's to be mapped to one encoder?
+ * connector: dssdev.. manager can be attached/detached from different
+ * devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ DBG("dev=%p", dev);
+ if (priv->fbdev) {
+ drm_fb_helper_hotplug_event(priv->fbdev);
+ }
+}
+
+static struct drm_mode_config_funcs omap_mode_config_funcs = {
+ .fb_create = omap_framebuffer_create,
+ .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!strcmp(dssdev->name, "dvi"))
+ return DRM_MODE_CONNECTOR_DVID;
+ /* fallthrough */
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+#if 0 /* enable when dss2 supports hotplug */
+static int omap_drm_notifier(struct notifier_block *nb,
+ unsigned long evt, void *arg)
+{
+ switch (evt) {
+ case OMAP_DSS_SIZE_CHANGE:
+ case OMAP_DSS_HOTPLUG_CONNECT:
+ case OMAP_DSS_HOTPLUG_DISCONNECT: {
+ struct drm_device *dev = drm_device;
+ DBG("hotplug event: evt=%d, dev=%p", evt, dev);
+ if (dev) {
+ drm_sysfs_hotplug_event(dev);
+ }
+ return NOTIFY_OK;
+ }
+ default: /* don't care about other events for now */
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
+static void dump_video_chains(void)
+{
+ int i;
+
+ DBG("dumping video chains: ");
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ struct omap_overlay_manager *mgr = ovl->manager;
+ struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
+ if (dssdev) {
+ DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
+ dssdev->name);
+ } else if (mgr) {
+ DBG("%d: %s -> %s", i, ovl->name, mgr->name);
+ } else {
+ DBG("%d: %s", i, ovl->name);
+ }
+ }
+}
+
+/* create encoders for each manager */
+static int create_encoder(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
+
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ mgr->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return 0;
+}
+
+/* create connectors for each display device */
+static int create_connector(struct drm_device *dev,
+ struct omap_dss_device *dssdev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ static struct notifier_block *notifier;
+ struct drm_connector *connector;
+ int j;
+
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
+ }
+
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
+ }
+
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev);
+
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+ priv->connectors[priv->num_connectors++] = connector;
+
+#if 0 /* enable when dss2 supports hotplug */
+ notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ notifier->notifier_call = omap_drm_notifier;
+ omap_dss_add_notify(dssdev, notifier);
+#else
+ notifier = NULL;
+#endif
+
+ for (j = 0; j < priv->num_encoders; j++) {
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(priv->encoders[j]);
+ if (mgr->device == dssdev) {
+ drm_mode_connector_attach_encoder(connector,
+ priv->encoders[j]);
+ }
+ }
+
+ return 0;
+}
+
+/* create up to max_overlays CRTCs mapping to overlays.. by default,
+ * connect the overlays to different managers/encoders, giving priority
+ * to encoders connected to connectors with a detected connection
+ */
+static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
+ int *j, unsigned int connected_connectors)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_overlay_manager *mgr = NULL;
+ struct drm_crtc *crtc;
+
+ if (ovl->manager) {
+ DBG("disconnecting %s from %s", ovl->name,
+ ovl->manager->name);
+ ovl->unset_manager(ovl);
+ }
+
+ /* find next best connector, ones with detected connection first
+ */
+ while (*j < priv->num_connectors && !mgr) {
+ if (connected_connectors & (1 << *j)) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[*j]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ /* if we couldn't find another connected connector, lets start
+ * looking at the unconnected connectors:
+ *
+ * note: it might not be immediately apparent, but thanks to
+ * the !mgr check in both this loop and the one above, the only
+ * way to enter this loop is with *j == priv->num_connectors,
+ * so idx can never go negative.
+ */
+ while (*j < 2 * priv->num_connectors && !mgr) {
+ int idx = *j - priv->num_connectors;
+ if (!(connected_connectors & (1 << idx))) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[idx]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ if (mgr) {
+ DBG("connecting %s to %s", ovl->name, mgr->name);
+ ovl->set_manager(ovl, mgr);
+ }
+
+ crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
+
+ if (!crtc) {
+ dev_err(dev->dev, "could not create CRTC: %s\n",
+ ovl->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ return 0;
+}
+
+static int match_dev_name(struct omap_dss_device *dssdev, void *data)
+{
+ return !strcmp(dssdev->name, data);
+}
+
+static unsigned int detect_connectors(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned int connected_connectors = 0;
+ int i;
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (omap_connector_detect(connector, true) ==
+ connector_status_connected) {
+ connected_connectors |= (1 << i);
+ }
+ }
+
+ return connected_connectors;
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+ const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+ struct omap_kms_platform_data *kms_pdata = NULL;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev = NULL;
+ int i, j;
+ unsigned int connected_connectors = 0;
+
+ drm_mode_config_init(dev);
+
+ if (pdata && pdata->kms_pdata) {
+ kms_pdata = pdata->kms_pdata;
+
+ /* if platform data is provided by the board file, use it to
+ * control which overlays, managers, and devices we own.
+ */
+ for (i = 0; i < kms_pdata->mgr_cnt; i++) {
+ struct omap_overlay_manager *mgr =
+ omap_dss_get_overlay_manager(
+ kms_pdata->mgr_ids[i]);
+ create_encoder(dev, mgr);
+ }
+
+ for (i = 0; i < kms_pdata->dev_cnt; i++) {
+ struct omap_dss_device *dssdev =
+ omap_dss_find_device(
+ (void *)kms_pdata->dev_names[i],
+ match_dev_name);
+ if (!dssdev) {
+ dev_warn(dev->dev, "no such dssdev: %s\n",
+ kms_pdata->dev_names[i]);
+ continue;
+ }
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < kms_pdata->ovl_cnt; i++) {
+ struct omap_overlay *ovl =
+ omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
+ create_crtc(dev, ovl, &j, connected_connectors);
+ }
+ } else {
+ /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
+ * to make educated guesses about everything else
+ */
+ int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ create_encoder(dev, omap_dss_get_overlay_manager(i));
+ }
+
+ for_each_dss_dev(dssdev) {
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < max_overlays; i++) {
+ create_crtc(dev, omap_dss_get_overlay(i),
+ &j, connected_connectors);
+ }
+ }
+
+ /* for now keep the mapping of CRTCs and encoders static.. */
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+
+ encoder->possible_crtcs = 0;
+
+ for (j = 0; j < priv->num_crtcs; j++) {
+ struct omap_overlay *ovl =
+ omap_crtc_get_overlay(priv->crtcs[j]);
+ if (ovl->manager == mgr) {
+ encoder->possible_crtcs |= (1 << j);
+ }
+ }
+
+ DBG("%s: possible_crtcs=%08x", mgr->name,
+ encoder->possible_crtcs);
+ }
+
+ dump_video_chains();
+
+ dev->mode_config.min_width = 256;
+ dev->mode_config.min_height = 256;
+
+ /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+ * to fill in these limits properly on different OMAP generations..
+ */
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &omap_mode_config_funcs;
+
+ return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ DBG("%p: param=%llu", dev, args->param);
+
+ switch (args->param) {
+ case OMAP_PARAM_CHIPSET_ID:
+ args->value = GET_OMAP_TYPE;
+ break;
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ switch (args->param) {
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_new *args = data;
+ DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ args->size.bytes, args->flags);
+ return omap_gem_new_handle(dev, file_priv, args->size,
+ args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ ret = omap_gem_op_sync(obj, args->op);
+
+ if (!ret) {
+ ret = omap_gem_op_start(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ /* XXX flushy, flushy */
+ ret = 0;
+
+ if (!ret) {
+ ret = omap_gem_op_finish(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ args->size = omap_gem_mmap_size(obj);
+ args->offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+ DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+ struct omap_drm_private *priv;
+ int ret;
+
+ DBG("load: dev=%p", dev);
+
+ drm_device = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ omap_gem_init(dev);
+
+ ret = omap_modeset_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev->dev_private = NULL;
+ kfree(priv);
+ return ret;
+ }
+
+ priv->fbdev = omap_fbdev_init(dev);
+ if (!priv->fbdev) {
+ dev_warn(dev->dev, "omap_fbdev_init failed\n");
+ /* well, limp along without an fbdev.. maybe X11 will work? */
+ }
+
+ drm_kms_helper_poll_init(dev);
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret) {
+ dev_warn(dev->dev, "could not init vblank\n");
+ }
+
+ return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+ DBG("unload: dev=%p", dev);
+
+ drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+
+ omap_fbdev_free(dev);
+ omap_modeset_free(dev);
+ omap_gem_deinit(dev);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+ file->driver_priv = NULL;
+
+ DBG("open: dev=%p, file=%p", dev, file);
+
+ return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+ DBG("firstopen: dev=%p", dev);
+ return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+ /* we don't support vga-switcheroo.. so just make sure the fbdev
+ * mode is active
+ */
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ DBG("lastclose: dev=%p", dev);
+
+ ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+static int dev_enable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+static void dev_disable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
+}
+
+static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
+{
+ return IRQ_HANDLED;
+}
+
+static void dev_irq_preinstall(struct drm_device *dev)
+{
+ DBG("irq_preinstall: dev=%p", dev);
+}
+
+static int dev_irq_postinstall(struct drm_device *dev)
+{
+ DBG("irq_postinstall: dev=%p", dev);
+ return 0;
+}
+
+static void dev_irq_uninstall(struct drm_device *dev)
+{
+ DBG("irq_uninstall: dev=%p", dev);
+}
+
+static struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_driver omap_drm_driver = {
+ .driver_features =
+ DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .firstopen = dev_firstopen,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = dev_enable_vblank,
+ .disable_vblank = dev_disable_vblank,
+ .irq_preinstall = dev_irq_preinstall,
+ .irq_postinstall = dev_irq_postinstall,
+ .irq_uninstall = dev_irq_uninstall,
+ .irq_handler = dev_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
+#endif
+ .gem_init_object = omap_gem_init_object,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = omap_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+ DBG("");
+ return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+ DBG("");
+ return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+ DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+ DBG("%s", device->name);
+ return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+ DBG("");
+ drm_platform_exit(&omap_drm_driver, device);
+ return 0;
+}
+
+struct platform_driver pdev = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
+ .suspend = pdev_suspend,
+ .resume = pdev_resume,
+ .shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+ DBG("init");
+ return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
new file mode 100644
index 00000000000..76c42515ecc
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -0,0 +1,135 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include "omap_drm.h"
+#include "omap_priv.h"
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+struct omap_drm_private {
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+
+ struct drm_fb_helper *fbdev;
+
+ bool has_dmm;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id);
+struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr);
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev);
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h);
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb);
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, dma_addr_t *paddr, unsigned int *screen_width);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h);
+
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* in case someone tries to feed us a completely bogus stride: */
+ pitch = max(pitch, width * bytespp);
+ /* PVR needs alignment to 8 pixels.. right now that is the most
+ * restrictive stride requirement..
+ */
+ return ALIGN(pitch, 8 * bytespp);
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
new file mode 100644
index 00000000000..06c52cb62d2
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/staging/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+struct omap_encoder {
+ struct drm_encoder base;
+ struct omap_overlay_manager *mgr;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+}
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s: %d", omap_encoder->mgr->name, mode);
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
+ mode->hdisplay, mode->vdisplay);
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (connector->encoder == encoder) {
+ omap_connector_mode_set(connector, mode);
+ }
+ }
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ omap_encoder->mgr->apply(omap_encoder->mgr);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+ .dpms = omap_encoder_dpms,
+ .mode_fixup = omap_encoder_mode_fixup,
+ .mode_set = omap_encoder_mode_set,
+ .prepare = omap_encoder_prepare,
+ .commit = omap_encoder_commit,
+};
+
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ return omap_encoder->mgr;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct drm_encoder *encoder = NULL;
+ struct omap_encoder *omap_encoder;
+ struct omap_overlay_manager_info info;
+ int ret;
+
+ DBG("%s", mgr->name);
+
+ omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+ if (!omap_encoder) {
+ dev_err(dev->dev, "could not allocate encoder\n");
+ goto fail;
+ }
+
+ omap_encoder->mgr = mgr;
+ encoder = &omap_encoder->base;
+
+ drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+ mgr->get_manager_info(mgr, &info);
+
+ /* TODO: fix hard-coded setup.. */
+ info.default_color = 0x00000000;
+ info.trans_key = 0x00000000;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info.trans_enabled = false;
+
+ ret = mgr->set_manager_info(mgr, &info);
+ if (ret) {
+ dev_err(dev->dev, "could not set manager info\n");
+ goto fail;
+ }
+
+ ret = mgr->apply(mgr);
+ if (ret) {
+ dev_err(dev->dev, "could not apply\n");
+ goto fail;
+ }
+
+ return encoder;
+
+fail:
+ if (encoder) {
+ omap_encoder_destroy(encoder);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
new file mode 100644
index 00000000000..0b50c5b3b56
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -0,0 +1,243 @@
+/*
+ * drivers/staging/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+/*
+ * framebuffer funcs
+ */
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *bo;
+ int size;
+ dma_addr_t paddr;
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return drm_gem_handle_create(file_priv, omap_fb->bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ if (omap_fb->bo) {
+ if (omap_fb->paddr && omap_gem_put_paddr(omap_fb->bo))
+ dev_err(dev->dev, "could not unmap!\n");
+ drm_gem_object_unreference_unlocked(omap_fb->bo);
+ }
+
+ kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ int i;
+
+ for (i = 0; i < num_clips; i++) {
+ omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+ .create_handle = omap_framebuffer_create_handle,
+ .destroy = omap_framebuffer_destroy,
+ .dirty = omap_framebuffer_dirty,
+};
+
+/* returns the buffer size */
+int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
+ void **vaddr, dma_addr_t *paddr, unsigned int *screen_width)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int bpp = fb->bits_per_pixel / 8;
+ unsigned long offset;
+
+ offset = (x * bpp) + (y * fb->pitch);
+
+ if (vaddr) {
+ void *bo_vaddr = omap_gem_vaddr(omap_fb->bo);
+ /* note: we can only count on having a vaddr for buffers that
+ * are allocated physically contiguously to begin with (ie.
+ * dma_alloc_coherent()). But this should be ok because it
+ * is only used by legacy fbdev
+ */
+ BUG_ON(IS_ERR_OR_NULL(bo_vaddr));
+ *vaddr = bo_vaddr + offset;
+ }
+
+ *paddr = omap_fb->paddr + offset;
+ *screen_width = fb->pitch / bpp;
+
+ return omap_fb->size - offset;
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return omap_fb->bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from)
+{
+ struct drm_device *dev = fb->dev;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector *connector = from;
+
+ if (!from) {
+ return list_first_entry(connector_list, typeof(*from), head);
+ }
+
+ list_for_each_entry_from(connector, connector_list, head) {
+ if (connector != from) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ if (crtc && crtc->fb == fb) {
+ return connector;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h)
+{
+ struct drm_connector *connector = NULL;
+
+ VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+ while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+ /* only consider connectors that are part of a chain */
+ if (connector->encoder && connector->encoder->crtc) {
+ /* TODO: maybe this should propagate thru the crtc who
+ * could do the coordinate translation..
+ */
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ int cx = max(0, x - crtc->x);
+ int cy = max(0, y - crtc->y);
+ int cw = w + (x - crtc->x) - cx;
+ int ch = h + (y - crtc->y) - cy;
+
+ omap_connector_flush(connector, cx, cy, cw, ch);
+ }
+ }
+}
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ bo = drm_gem_object_lookup(dev, file, mode_cmd->handle);
+ if (!bo) {
+ return ERR_PTR(-ENOENT);
+ }
+ fb = omap_framebuffer_init(dev, mode_cmd, bo);
+ if (!fb) {
+ return ERR_PTR(-ENOMEM);
+ }
+ return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo)
+{
+ struct omap_framebuffer *omap_fb;
+ struct drm_framebuffer *fb = NULL;
+ int size, ret;
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ mode_cmd->bpp);
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ mode_cmd->pitch = align_pitch(mode_cmd->pitch,
+ mode_cmd->width, mode_cmd->bpp);
+
+ omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+ if (!omap_fb) {
+ dev_err(dev->dev, "could not allocate fb\n");
+ goto fail;
+ }
+
+ fb = &omap_fb->base;
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height);
+
+ if (size > bo->size) {
+ dev_err(dev->dev, "provided buffer object is too small!\n");
+ goto fail;
+ }
+
+ omap_fb->bo = bo;
+ omap_fb->size = size;
+
+ if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) {
+ dev_err(dev->dev, "could not map (paddr)!\n");
+ goto fail;
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ return fb;
+
+fail:
+ if (fb) {
+ omap_framebuffer_destroy(fb);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
new file mode 100644
index 00000000000..093ae2f87b2
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -0,0 +1,372 @@
+/*
+ * drivers/staging/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+static bool ywrap_enabled = true;
+module_param_named(ywrap, ywrap_enabled, bool, 0644);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+ bool ywrap_enabled;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t res;
+
+ res = fb_sys_write(fbi, buf, count, ppos);
+ omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+ return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(fbi, rect);
+ omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(fbi, area);
+ omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+ const struct fb_image *image)
+{
+ sys_imageblit(fbi, image);
+ omap_fbdev_flush(fbi, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ int npages;
+
+ if (!helper)
+ goto fallback;
+
+ if (!fbdev->ywrap_enabled)
+ goto fallback;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, var->yoffset * npages);
+
+ return 0;
+
+fallback:
+ return drm_fb_helper_pan_display(var, fbi);
+}
+
+static struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = omap_fbdev_write,
+ .fb_fillrect = omap_fbdev_fillrect,
+ .fb_copyarea = omap_fbdev_copyarea,
+ .fb_imageblit = omap_fbdev_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb = NULL;
+ union omap_gem_size gsize;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd mode_cmd = {0};
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ int size, screen_width;
+ int ret;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
+
+ mode_cmd.pitch = align_pitch(
+ mode_cmd.width * ((mode_cmd.bpp + 7) / 8),
+ mode_cmd.width, mode_cmd.bpp);
+
+ fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+ if (fbdev->ywrap_enabled) {
+ /* need to align pitch to page size if using DMM scrolling */
+ mode_cmd.pitch = ALIGN(mode_cmd.pitch, PAGE_SIZE);
+ }
+
+ /* allocate backing bo */
+ gsize = (union omap_gem_size){
+ .bytes = PAGE_ALIGN(mode_cmd.pitch * mode_cmd.height),
+ };
+ DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+ fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+ if (!fbdev->bo) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ goto fail;
+ }
+
+ fb = omap_framebuffer_init(dev, &mode_cmd, fbdev->bo);
+ if (!fb) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &omap_fb_ops;
+
+ strcpy(fbi->fix.id, MODULE_NAME);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ size = omap_framebuffer_get_buffer(fb, 0, 0,
+ &vaddr, &paddr, &screen_width);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = vaddr;
+ fbi->screen_size = size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = size;
+
+ /* if we have DMM, then we can use it for scrolling by just
+ * shuffling pages around in DMM rather than doing sw blit.
+ */
+ if (fbdev->ywrap_enabled) {
+ DRM_INFO("Enabling DMM ywrap scrolling\n");
+ fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+ fbi->fix.ywrapstep = 1;
+ }
+
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb)
+ fb->funcs->destroy(fb);
+ }
+
+ return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static int omap_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = omap_fbdev_create(helper, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .gamma_set = omap_crtc_fb_gamma_set,
+ .gamma_get = omap_crtc_fb_gamma_get,
+ .fb_probe = omap_fbdev_probe,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return;
+
+ VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+ omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "could not allocate fbdev\n");
+ goto fail;
+ }
+
+ helper = &fbdev->base;
+
+ helper->funcs = &omap_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct omap_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_omap_fbdev(priv->fbdev);
+
+ kfree(fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb)
+ fbdev->fb->funcs->destroy(fbdev->fb);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
new file mode 100644
index 00000000000..e0ebd1d139f
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -0,0 +1,1231 @@
+/*
+ * drivers/staging/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* remove these once drm core helpers are merged */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
+#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
+
+
+struct omap_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /** width/height for tiled formats (rounded up to slot boundaries) */
+ uint16_t width, height;
+
+ /** roll applied when mapping to DMM */
+ uint32_t roll;
+
+ /**
+ * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+ * is set and the paddr is valid. Also if the buffer is remapped in
+ * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
+ * the physical address and OMAP_BO_DMA is not set, then you should
+ * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+ * not removed from under your feet.
+ *
+ * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+ * buffer is requested, but doesn't mean that it is. Use the
+ * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+ * physical address.
+ */
+ dma_addr_t paddr;
+
+ /**
+ * # of users of paddr
+ */
+ uint32_t paddr_cnt;
+
+ /**
+ * tiler block used when buffer is remapped in DMM/TILER.
+ */
+ struct tiler_block *block;
+
+ /**
+ * Array of backing pages, if allocated. Note that pages are never
+ * allocated for buffers originally allocated from contiguous memory
+ */
+ struct page **pages;
+
+ /** addresses corresponding to pages in above array */
+ dma_addr_t *addrs;
+
+ /**
+ * Virtual address, if mapped.
+ */
+ void *vaddr;
+
+ /**
+ * sync-object allocated on demand (if needed)
+ *
+ * Per-buffer sync-object for tracking pending and completed hw/dma
+ * read and write operations. The layout in memory is dictated by
+ * the SGX firmware, which uses this information to stall the command
+ * stream if a surface is not ready yet.
+ *
+ * Note that when buffer is used by SGX, the sync-object needs to be
+ * allocated from a special heap of sync-objects. This way many sync
+ * objects can be packed in a page, and not waste GPU virtual address
+ * space. Because of this we have to have a omap_gem_set_sync_object()
+ * API to allow replacement of the syncobj after it has (potentially)
+ * already been allocated. A bit ugly but I haven't thought of a
+ * better alternative.
+ */
+ struct {
+ uint32_t write_pending;
+ uint32_t write_complete;
+ uint32_t read_pending;
+ uint32_t read_complete;
+ } *sync;
+};
+
+/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+ * not necessarily pinned in TILER all the time, and (b) when they are
+ * they are not necessarily page aligned, we reserve one or more small
+ * regions in each of the 2d containers to use as a user-GART where we
+ * can create a second page-aligned mapping of parts of the buffer
+ * being accessed from userspace.
+ *
+ * Note that we could optimize slightly when we know that multiple
+ * tiler containers are backed by the same PAT.. but I'll leave that
+ * for later..
+ */
+#define NUM_USERGART_ENTRIES 2
+struct usergart_entry {
+ struct tiler_block *block; /* the reserved tiler block */
+ dma_addr_t paddr;
+ struct drm_gem_object *obj; /* the current pinned obj */
+ pgoff_t obj_pgoff; /* page offset of obj currently
+ mapped in */
+};
+static struct {
+ struct usergart_entry entry[NUM_USERGART_ENTRIES];
+ int height; /* height in rows */
+ int height_shift; /* ilog2(height in rows) */
+ int slot_shift; /* ilog2(width per slot) */
+ int stride_pfn; /* stride in pages */
+ int last; /* index of last used entry */
+} *usergart;
+
+static void evict_entry(struct drm_gem_object *obj,
+ enum tiler_fmt fmt, struct usergart_entry *entry)
+{
+ if (obj->dev->dev_mapping) {
+ size_t size = PAGE_SIZE * usergart[fmt].height;
+ loff_t off = omap_gem_mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
+
+ entry->obj = NULL;
+}
+
+/* Evict a buffer from usergart, if it is mapped there */
+static void evict(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ int i;
+
+ if (!usergart)
+ return;
+
+ for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+ struct usergart_entry *entry = &usergart[fmt].entry[i];
+ if (entry->obj == obj)
+ evict_entry(obj, fmt, entry);
+ }
+ }
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+ return obj->filp != NULL;
+}
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct page **pages;
+
+ WARN_ON(omap_obj->pages);
+
+ /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+ * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+ * we actually want CMA memory for it all anyways..
+ */
+ pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+ return PTR_ERR(pages);
+ }
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
+ for (i = 0; i < npages; i++) {
+ addrs[i] = dma_map_page(obj->dev->dev, pages[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ omap_obj->addrs = addrs;
+ }
+
+ omap_obj->pages = pages;
+ return 0;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ kfree(omap_obj->addrs);
+ omap_obj->addrs = NULL;
+ }
+
+ _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ omap_obj->pages = NULL;
+}
+
+/** get mmap offset */
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ if (!obj->map_list.map) {
+ /* Make it mmapable */
+ size_t size = omap_gem_mmap_size(obj);
+ int ret = _drm_gem_create_mmap_offset_size(obj, size);
+
+ if (ret) {
+ dev_err(obj->dev->dev, "could not allocate mmap offset");
+ return 0;
+ }
+ }
+
+ return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+/** get mmap size */
+size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ size_t size = obj->size;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ /* for tiled buffers, the virtual size has stride rounded up
+ * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+ * 32kb later!). But we don't back the entire buffer with
+ * pages, only the valid picture part.. so need to adjust for
+ * this in the size used to mmap and generate mmap offset
+ */
+ size = tiler_vsize(gem2fmt(omap_obj->flags),
+ omap_obj->width, omap_obj->height);
+ }
+
+ return size;
+}
+
+
+/* Normal handling for the case of faulting in non-tiled buffers */
+static int fault_1d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ if (omap_obj->pages) {
+ pfn = page_to_pfn(omap_obj->pages[pgoff]);
+ } else {
+ BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+ pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+ }
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+}
+
+/* Special handling for the case of faulting in 2d tiled buffers */
+static int fault_2d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct usergart_entry *entry;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct page *pages[64]; /* XXX is this too much to have on stack? */
+ unsigned long pfn;
+ pgoff_t pgoff, base_pgoff;
+ void __user *vaddr;
+ int i, ret, slots;
+
+ if (!usergart)
+ return -EFAULT;
+
+ /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
+ * that are wider than 4kb
+ */
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ /* actual address we start mapping at is rounded down to previous slot
+ * boundary in the y direction:
+ */
+ base_pgoff = round_down(pgoff, usergart[fmt].height);
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
+ slots = omap_obj->width >> usergart[fmt].slot_shift;
+
+ /* evict previous buffer using this usergart entry, if any: */
+ if (entry->obj)
+ evict_entry(entry->obj, fmt, entry);
+
+ entry->obj = obj;
+ entry->obj_pgoff = base_pgoff;
+
+ /* now convert base_pgoff to phys offset from virt offset:
+ */
+ base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
+
+ /* map in pages. Note the height of the slot is also equal to the
+ * number of pages that need to be mapped in to fill 4kb wide CPU page.
+ * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
+ * get a dummy page mapped in.. if someone reads/writes it they will get
+ * random/undefined content, but at least it won't be corrupting
+ * whatever other random page used to be mapped in, or other undefined
+ * behavior.
+ */
+ memcpy(pages, &omap_obj->pages[base_pgoff],
+ sizeof(struct page *) * slots);
+ memset(pages + slots, 0,
+ sizeof(struct page *) * (usergart[fmt].height - slots));
+
+ ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (ret) {
+ dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ return ret;
+ }
+
+ i = usergart[fmt].height;
+ pfn = entry->paddr >> PAGE_SHIFT;
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ while (i--) {
+ vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+ pfn += usergart[fmt].stride_pfn;
+ vaddr += PAGE_SIZE;
+ }
+
+ /* simple round-robin: */
+ usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+
+ return 0;
+}
+
+/**
+ * omap_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /* if a shmem backed object, make sure we have pages attached now */
+ ret = get_pages(obj, &pages);
+ if (ret) {
+ goto fail;
+ }
+
+ /* where should we do corresponding put_pages().. we are mapping
+ * the original page, rather than thru a GART, so we can't rely
+ * on eviction to trigger this. But munmap() or all mappings should
+ * probably trigger put_pages()?
+ */
+
+ if (omap_obj->flags & OMAP_BO_TILED)
+ ret = fault_2d(obj, vma, vmf);
+ else
+ ret = fault_1d(obj, vma, vmf);
+
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct omap_gem_object *omap_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ /* after drm_gem_mmap(), it is safe to access the obj */
+ omap_obj = to_omap_bo(vma->vm_private_data);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (omap_obj->flags & OMAP_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return ret;
+}
+
+/**
+ * omap_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ union omap_gem_size gsize;
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ gsize = (union omap_gem_size){
+ .bytes = args->size,
+ };
+
+ return omap_gem_new_handle(dev, file, gsize,
+ OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+/* Set scrolling position. This allows us to implement fast scrolling
+ * for console.
+ */
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ if (roll > npages) {
+ dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+ return -EINVAL;
+ }
+
+ omap_obj->roll = roll;
+
+ if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
+ /* this can get called from fbcon in atomic context.. so
+ * just ignore it and wait for next time called from
+ * interruptible context to update the PAT.. the result
+ * may be that user sees wrap-around instead of scrolling
+ * momentarily on the screen. If we wanted to be fancier
+ * we could perhaps schedule some workqueue work at this
+ * point.
+ */
+ return 0;
+ }
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ /* if we aren't mapped yet, we don't need to do anything */
+ if (omap_obj->block) {
+ struct page **pages;
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+ ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+ if (ret)
+ dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap)
+{
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ if (remap && is_shmem(obj) && priv->has_dmm) {
+ if (omap_obj->paddr_cnt == 0) {
+ struct page **pages;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct tiler_block *block;
+
+ BUG_ON(omap_obj->block);
+
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ block = tiler_reserve_2d(fmt,
+ omap_obj->width,
+ omap_obj->height, 0);
+ } else {
+ block = tiler_reserve_1d(obj->size);
+ }
+
+ if (IS_ERR(block)) {
+ ret = PTR_ERR(block);
+ dev_err(obj->dev->dev,
+ "could not remap: %d (%d)\n", ret, fmt);
+ goto fail;
+ }
+
+ /* TODO: enable async refill.. */
+ ret = tiler_pin(block, pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ tiler_release(block);
+ dev_err(obj->dev->dev,
+ "could not pin: %d\n", ret);
+ goto fail;
+ }
+
+ omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->block = block;
+
+ DBG("got paddr: %08x", omap_obj->paddr);
+ }
+
+ omap_obj->paddr_cnt++;
+
+ *paddr = omap_obj->paddr;
+ } else if (omap_obj->flags & OMAP_BO_DMA) {
+ *paddr = omap_obj->paddr;
+ } else {
+ ret = -EINVAL;
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ if (omap_obj->paddr_cnt > 0) {
+ omap_obj->paddr_cnt--;
+ if (omap_obj->paddr_cnt == 0) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ goto fail;
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->block = NULL;
+ }
+ }
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ if (is_shmem(obj) && !omap_obj->pages) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ dev_err(obj->dev->dev, "could not attach pages\n");
+ return ret;
+ }
+ }
+
+ /* TODO: even phys-contig.. we should have a list of pages? */
+ *pages = omap_obj->pages;
+
+ return 0;
+}
+
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ int ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = get_pages(obj, pages);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* do something here if we dynamically attach/detach pages.. at
+ * least they would no longer need to be pinned if everyone has
+ * released the pages..
+ */
+ return 0;
+}
+
+/* Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev. This should be called with struct_mutex
+ * held.
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
+ if (!omap_obj->vaddr) {
+ struct page **pages;
+ int ret = get_pages(obj, &pages);
+ if (ret)
+ return ERR_PTR(ret);
+ omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return omap_obj->vaddr;
+}
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+ struct list_head list;
+ struct omap_gem_object *omap_obj;
+ enum omap_gem_op op;
+ uint32_t read_target, write_target;
+ /* notify called w/ sync_lock held */
+ void (*notify)(void *arg);
+ void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+ struct omap_gem_object *omap_obj = waiter->omap_obj;
+ if ((waiter->op & OMAP_GEM_READ) &&
+ (omap_obj->sync->read_complete < waiter->read_target))
+ return true;
+ if ((waiter->op & OMAP_GEM_WRITE) &&
+ (omap_obj->sync->write_complete < waiter->write_target))
+ return true;
+ return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+ printk(KERN_ERR "%s:%d: "fmt"\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+
+static void sync_op_update(void)
+{
+ struct omap_gem_sync_waiter *waiter, *n;
+ list_for_each_entry_safe(waiter, n, &waiters, list) {
+ if (!is_waiting(waiter)) {
+ list_del(&waiter->list);
+ SYNC("notify: %p", waiter);
+ waiter->notify(waiter->arg);
+ kfree(waiter);
+ }
+ }
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+ enum omap_gem_op op, bool start)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if (!omap_obj->sync) {
+ omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!omap_obj->sync) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ if (start) {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_pending++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_pending++;
+ } else {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_complete++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_complete++;
+ sync_op_update();
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+
+ return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient. So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+ spin_lock(&sync_lock);
+ sync_op_update();
+ spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+ struct task_struct **waiter_task = arg;
+ *waiter_task = NULL;
+ wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+ if (omap_obj->sync) {
+ struct task_struct *waiter_task = current;
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = sync_notify;
+ waiter->arg = &waiter_task;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ ret = wait_event_interruptible(sync_event,
+ (waiter_task == NULL));
+ spin_lock(&sync_lock);
+ if (waiter_task) {
+ SYNC("interrupted: %p", waiter);
+ /* we were interrupted */
+ list_del(&waiter->list);
+ waiter_task = NULL;
+ } else {
+ /* freed in sync_op_update() */
+ waiter = NULL;
+ }
+ }
+ spin_unlock(&sync_lock);
+
+ if (waiter) {
+ kfree(waiter);
+ }
+ }
+ return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked.. fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (omap_obj->sync) {
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = fxn;
+ waiter->arg = arg;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ return 0;
+ }
+
+ spin_unlock(&sync_lock);
+ }
+
+ /* no waiting.. */
+ fxn(arg);
+
+ return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap. Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+ /* clearing a previously set syncobj */
+ syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!syncobj) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ /* replacing an existing syncobj */
+ if (omap_obj->sync) {
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ kfree(omap_obj->sync);
+ }
+ omap_obj->flags |= OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+ return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL; /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ evict(obj);
+
+ if (obj->map_list.map) {
+ drm_gem_free_mmap_offset(obj);
+ }
+
+ /* don't free externally allocated backing memory */
+ if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+ if (omap_obj->pages) {
+ omap_gem_detach_pages(obj);
+ }
+ if (!is_shmem(obj)) {
+ dma_free_writecombine(dev->dev, obj->size,
+ omap_obj->vaddr, omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ }
+ }
+
+ /* don't free externally allocated syncobj */
+ if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ kfree(omap_obj->sync);
+ }
+
+ drm_gem_object_release(obj);
+
+ kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = omap_gem_new(dev, gsize, flags);
+ if (!obj)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, obj, handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+ return ret;
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ struct drm_gem_object *obj = NULL;
+ size_t size;
+ int ret;
+
+ if (flags & OMAP_BO_TILED) {
+ if (!usergart) {
+ dev_err(dev->dev, "Tiled buffers require DMM\n");
+ goto fail;
+ }
+
+ /* tiled buffers are always shmem paged backed.. when they are
+ * scanned out, they are remapped into DMM/TILER
+ */
+ flags &= ~OMAP_BO_SCANOUT;
+
+ /* currently don't allow cached buffers.. there is some caching
+ * stuff that needs to be handled better
+ */
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+ flags |= OMAP_BO_WC;
+
+ /* align dimensions to slot boundaries... */
+ tiler_align(gem2fmt(flags),
+ &gsize.tiled.width, &gsize.tiled.height);
+
+ /* ...and calculate size based on aligned dimensions */
+ size = tiler_size(gem2fmt(flags),
+ gsize.tiled.width, gsize.tiled.height);
+ } else {
+ size = PAGE_ALIGN(gsize.bytes);
+ }
+
+ omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+ if (!omap_obj) {
+ dev_err(dev->dev, "could not allocate GEM object\n");
+ goto fail;
+ }
+
+ obj = &omap_obj->base;
+
+ if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+ /* attempt to allocate contiguous memory if we don't
+ * have DMM for remappign discontiguous buffers
+ */
+ omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
+ &omap_obj->paddr, GFP_KERNEL);
+ if (omap_obj->vaddr) {
+ flags |= OMAP_BO_DMA;
+ }
+ }
+
+ omap_obj->flags = flags;
+
+ if (flags & OMAP_BO_TILED) {
+ omap_obj->width = gsize.tiled.width;
+ omap_obj->height = gsize.tiled.height;
+ }
+
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+ ret = drm_gem_private_object_init(dev, obj, size);
+ } else {
+ ret = drm_gem_object_init(dev, obj, size);
+ }
+
+ if (ret) {
+ goto fail;
+ }
+
+ return obj;
+
+fail:
+ if (obj) {
+ omap_gem_free_object(obj);
+ }
+ return NULL;
+}
+
+/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+void omap_gem_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ const enum tiler_fmt fmts[] = {
+ TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+ };
+ int i, j, ret;
+
+ ret = omap_dmm_init(dev);
+ if (ret) {
+ /* DMM only supported on OMAP4 and later, so this isn't fatal */
+ dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
+ return;
+ }
+
+ usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
+ if (!usergart) {
+ dev_warn(dev->dev, "could not allocate usergart\n");
+ return;
+ }
+
+ /* reserve 4k aligned/wide regions for userspace mappings: */
+ for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+ uint16_t h = 1, w = PAGE_SIZE >> i;
+ tiler_align(fmts[i], &w, &h);
+ /* note: since each region is 1 4kb page wide, and minimum
+ * number of rows, the height ends up being the same as the
+ * # of pages in the region
+ */
+ usergart[i].height = h;
+ usergart[i].height_shift = ilog2(h);
+ usergart[i].stride_pfn = tiler_stride(fmts[i]) >> PAGE_SHIFT;
+ usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+ for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+ struct usergart_entry *entry = &usergart[i].entry[j];
+ struct tiler_block *block =
+ tiler_reserve_2d(fmts[i], w, h,
+ PAGE_SIZE);
+ if (IS_ERR(block)) {
+ dev_err(dev->dev,
+ "reserve failed: %d, %d, %ld\n",
+ i, j, PTR_ERR(block));
+ return;
+ }
+ entry->paddr = tiler_ssptr(block);
+ entry->block = block;
+
+ DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+ entry->paddr,
+ usergart[i].stride_pfn << PAGE_SHIFT);
+ }
+ }
+
+ priv->has_dmm = true;
+}
+
+void omap_gem_deinit(struct drm_device *dev)
+{
+ /* I believe we can rely on there being no more outstanding GEM
+ * objects which could depend on usergart/dmm at this point.
+ */
+ omap_dmm_remove();
+ kfree(usergart);
+}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
new file mode 100644
index 00000000000..29275c7209e
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -0,0 +1,169 @@
+/*
+ * drivers/staging/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--) {
+ page_cache_release(pages[i]);
+ }
+ drm_free_large(pages);
+ return ERR_PTR(PTR_ERR(p));
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+
+int
+_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ size / PAGE_SIZE, 0, 0);
+
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
diff --git a/drivers/staging/omapdrm/omap_priv.h b/drivers/staging/omapdrm/omap_priv.h
new file mode 100644
index 00000000000..c324709aa9a
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_priv.h
@@ -0,0 +1,47 @@
+/*
+ * include/drm/omap_priv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_PRIV_H__
+#define __OMAP_PRIV_H__
+
+/* Non-userspace facing APIs
+ */
+
+/* optional platform data to configure the default configuration of which
+ * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
+ * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
+ * one manager, with priority given to managers that are connected to
+ * detected devices. This should be a good default behavior for most cases,
+ * but yet there still might be times when you wish to do something different.
+ */
+struct omap_kms_platform_data {
+ int ovl_cnt;
+ const int *ovl_ids;
+ int mgr_cnt;
+ const int *mgr_ids;
+ int dev_cnt;
+ const char **dev_names;
+};
+
+struct omap_drm_platform_data {
+ struct omap_kms_platform_data *kms_pdata;
+ struct omap_dmm_platform_data *dmm_pdata;
+};
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/staging/omapdrm/tcm-sita.c
new file mode 100644
index 00000000000..10d5ac3dae4
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.c
@@ -0,0 +1,703 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "tcm-sita.h"
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ spin_lock_init(&(pvt->lock));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ spin_lock(&(pvt->lock));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ spin_lock(&(pvt->lock));
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reesrved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ spin_lock(&(pvt->lock));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ spin_unlock(&(pvt->lock));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+ /* change upper x bound */
+ end_x = x + 1;
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+ /* change upper x bound */
+ end_x = x - 1;
+
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ if (found == num_slots)
+ break;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & CR_BIAS_HORIZONTAL;
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/staging/omapdrm/tcm-sita.h
new file mode 100644
index 00000000000..0444f868671
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.h
@@ -0,0 +1,95 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ spinlock_t lock; /* spinlock to protect access */
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+ a->p0.x = x0;
+ a->p0.y = y0;
+ a->p1.x = x1;
+ a->p1.y = y1;
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/staging/omapdrm/tcm.h
new file mode 100644
index 00000000000..d273e3ee0b4
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm.h
@@ -0,0 +1,326 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+ u16 x;
+ u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+ bool is2d; /* whether area is 1d or 2d */
+ struct tcm *tcm; /* parent */
+ struct tcm_pt p0;
+ struct tcm_pt p1;
+};
+
+struct tcm {
+ u16 width, height; /* container dimensions */
+ int lut_id; /* Lookup table identifier */
+
+ /* 'pvt' structure shall contain any tcm details (attr) along with
+ linked list of allocated areas and mutex for mutually exclusive access
+ to the list. It may also contain copies of width and height to notice
+ any changes to the publicly available width and height fields. */
+ void *pvt;
+
+ /* function table */
+ s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+ struct tcm_area *area);
+ s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+ s32 (*free) (struct tcm *tcm, struct tcm_area *area);
+ void (*deinit) (struct tcm *tcm);
+};
+
+/*=============================================================================
+ BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ * area pointer is NULL
+ * width and height fits within container
+ * number of pages is more than the size of the container
+ *
+ */
+
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error. The call
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+ if (tcm)
+ tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param height Height(in pages) of area to be reserved.
+ * @param width Width(in pages) of area to be reserved.
+ * @param align Alignment requirement for top-left corner of area. Not
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+ u16 align, struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ (align & (align - 1))) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param slots Number of (contiguous) slots to reserve.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+ struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area Pointer to area reserved by a prior call to
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
+ *
+ * @return 0 on success. Non-0 error code on failure. Also, the tcm
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+ s32 res = 0; /* free succeeds by default */
+
+ if (area && area->tcm) {
+ res = area->tcm->free(area->tcm, area);
+ if (res == 0)
+ area->tcm = NULL;
+ }
+
+ return res;
+}
+
+/*=============================================================================
+ HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter. The 'parent' parameter will get modified to
+ * contain the remaining portion of the area. If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent Pointer to a VALID parent area that will get modified
+ * @param slice Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+ *slice = *parent;
+
+ /* check if we need to slice */
+ if (slice->tcm && !slice->is2d &&
+ slice->p0.y != slice->p1.y &&
+ (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+ /* set end point of slice (start always remains) */
+ slice->p1.x = slice->tcm->width - 1;
+ slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+ /* adjust remaining area */
+ parent->p0.x = 0;
+ parent->p0.y = slice->p1.y + 1;
+ } else {
+ /* mark this as the last slice */
+ parent->tcm = NULL;
+ }
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+ return area && area->tcm &&
+ /* coordinate bounds */
+ area->p1.x < area->tcm->width &&
+ area->p1.y < area->tcm->height &&
+ area->p0.y <= area->p1.y &&
+ /* 1D coordinate relationship + p0.x check */
+ ((!area->is2d &&
+ area->p0.x < area->tcm->width &&
+ area->p0.x + area->p0.y * area->tcm->width <=
+ area->p1.x + area->p1.y * area->tcm->width) ||
+ /* 2D coordinate relationship */
+ (area->is2d &&
+ area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+ u16 i;
+
+ if (a->is2d) {
+ return p->x >= a->p0.x && p->x <= a->p1.x &&
+ p->y >= a->p0.y && p->y <= a->p1.y;
+ } else {
+ i = p->x + p->y * a->tcm->width;
+ return i >= a->p0.x + a->p0.y * a->tcm->width &&
+ i <= a->p1.x + a->p1.y * a->tcm->width;
+ }
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+ return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+ return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+ return area->is2d ?
+ __tcm_area_width(area) * __tcm_area_height(area) :
+ (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+ area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+ if (__tcm_sizeof(a) < num_pg)
+ return -ENOMEM;
+ if (!num_pg)
+ return -EINVAL;
+
+ a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+ a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+ return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var Name of a local variable of type 'struct
+ * tcm_area *' that will get modified to
+ * contain each slice.
+ * @param area Pointer to the VALID parent area. This
+ * structure will not get modified
+ * throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+ for (safe = area, \
+ tcm_slice(&safe, &var); \
+ var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 683657cb21f..d77b21f1eb2 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -70,7 +70,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
- { PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
+ { PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000),
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
};
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 7598e77672a..2ee4491b713 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -590,13 +590,13 @@ out:
* during writeback for given inode.
*/
struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode)
+ struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode)
{
struct pohmelfs_inode *npi;
int err = -ENOMEM;
struct netfs_inode_info info;
- dprintk("%s: name: '%s', mode: %o, start: %llu.\n",
+ dprintk("%s: name: '%s', mode: %ho, start: %llu.\n",
__func__, str->name, mode, start);
info.mode = mode;
@@ -630,7 +630,8 @@ err_out_unlock:
/*
* Create local object and bind it to dentry.
*/
-static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 start, int mode)
+static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry,
+ u64 start, umode_t mode)
{
struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
struct pohmelfs_inode *npi, *parent;
@@ -661,13 +662,13 @@ static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 s
/*
* VFS create and mkdir callbacks.
*/
-static int pohmelfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int pohmelfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return pohmelfs_create_entry(dir, dentry, 0, mode);
}
-static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7a1955583b7..807e3f32411 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -830,7 +830,6 @@ const struct address_space_operations pohmelfs_aops = {
static void pohmelfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
}
@@ -1370,9 +1369,9 @@ static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int pohmelfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
+ struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
seq_printf(seq, ",idx=%u", psb->idx);
seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
@@ -1760,11 +1759,11 @@ err_out_exit:
return err;
}
-static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+static int pohmelfs_show_stats(struct seq_file *m, struct dentry *root)
{
struct netfs_state *st;
struct pohmelfs_ctl *ctl;
- struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
+ struct pohmelfs_sb *psb = POHMELFS_SB(root->d_sb);
struct pohmelfs_config *c;
mutex_lock(&psb->state_lock);
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
index 985b6b755d5..f26894f2a57 100644
--- a/drivers/staging/pohmelfs/netfs.h
+++ b/drivers/staging/pohmelfs/netfs.h
@@ -776,7 +776,7 @@ struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash);
void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode);
+ struct pohmelfs_inode *parent, struct qstr *str, u64 start, umode_t mode);
int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 750c347bfbe..f87e2110185 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,9 +1,43 @@
-config RTL8192E
- tristate "RealTek RTL8192E Wireless LAN NIC driver"
- depends on PCI && WLAN
- depends on m
- select WIRELESS_EXT
- select WEXT_PRIV
- select CRYPTO
- default N
+config RTLLIB
+ tristate "Support for rtllib wireless devices"
+ depends on WLAN && m
+ default n
+ select LIB80211
---help---
+ If you have a wireless card that uses rtllib, say
+ Y. Currently the only card is the rtl8192e.
+
+ If unsure, say N.
+
+if RTLLIB
+
+config RTLLIB_CRYPTO_CCMP
+ tristate "Support for rtllib CCMP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ CCMP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_TKIP
+ tristate "Support for rtllib TKIP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_WEP
+ tristate "Support for rtllib WEP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+source "drivers/staging/rtl8192e/rtl8192e/Kconfig"
+
+endif
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index a66a9ad3368..cb18db74d78 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,41 +1,21 @@
-ccflags-y += -DUSE_FW_SOURCE_IMG_FILE
-ccflags-y += -DCONFIG_PM_RTL
-ccflags-y += -DCONFIG_PM
-ccflags-y += -DHAVE_NET_DEVICE_OPS
-ccflags-y += -DENABLE_DOT11D
-
-r8192e_pci-objs := \
- rtl_core.o \
- rtl_eeprom.o \
- rtl_ps.o \
- rtl_wx.o \
- rtl_cam.o \
- rtl_dm.o \
- rtl_pm.o \
- rtl_pci.o \
- rtl_debug.o \
- rtl_ethtool.o \
- r8192E_dev.o \
- r8192E_phy.o \
- r8192E_firmware.o \
- r8192E_cmdpkt.o \
- r8192E_hwimg.o \
- r8190P_rtl8256.o \
+rtllib-objs := \
+ dot11d.o \
+ rtllib_module.o \
rtllib_rx.o \
- rtllib_softmac.o \
rtllib_tx.o \
rtllib_wx.o \
- rtllib_module.o \
+ rtllib_softmac.o \
rtllib_softmac_wx.o \
- rtl819x_HTProc.o \
- rtl819x_TSProc.o \
rtl819x_BAProc.o \
- dot11d.o \
- rtllib_crypt.o \
- rtllib_crypt_tkip.o \
- rtllib_crypt_ccmp.o \
- rtllib_crypt_wep.o
+ rtl819x_HTProc.o \
+ rtl819x_TSProc.o
+
+obj-$(CONFIG_RTLLIB) += rtllib.o
+
+obj-$(CONFIG_RTLLIB_CRYPTO_CCMP) += rtllib_crypt_ccmp.o
+obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
+obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
-obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+obj-$(CONFIG_RTL8192E) += rtl8192e/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index ee0381e0a81..f7b14f8b7b8 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -46,7 +46,7 @@ static struct channel_list ChannelPlan[] = {
56, 60, 64}, 21}
};
-void Dot11d_Init(struct rtllib_device *ieee)
+void dot11d_init(struct rtllib_device *ieee)
{
struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
pDot11dInfo->bEnabled = false;
@@ -58,6 +58,7 @@ void Dot11d_Init(struct rtllib_device *ieee)
RESET_CIE_WATCHDOG(ieee);
}
+EXPORT_SYMBOL(dot11d_init);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
{
@@ -99,6 +100,7 @@ void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
break;
}
}
+EXPORT_SYMBOL(Dot11d_Channelmap);
void Dot11d_Reset(struct rtllib_device *ieee)
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 032f7004a7f..71f4549a378 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -93,7 +93,7 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-void Dot11d_Init(struct rtllib_device *dev);
+void dot11d_init(struct rtllib_device *dev);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
void Dot11d_Reset(struct rtllib_device *dev);
void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
new file mode 100644
index 00000000000..50e0d91a409
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -0,0 +1,9 @@
+config RTL8192E
+ tristate "RealTek RTL8192E Wireless LAN NIC driver"
+ depends on PCI && WLAN && RTLLIB
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select CRYPTO
+ default N
+ ---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
new file mode 100644
index 00000000000..313a92ec683
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -0,0 +1,21 @@
+r8192e_pci-objs := \
+ r8192E_dev.o \
+ r8192E_phy.o \
+ r8192E_firmware.o \
+ r8192E_cmdpkt.o \
+ r8192E_hwimg.o \
+ r8190P_rtl8256.o \
+ rtl_cam.o \
+ rtl_core.o \
+ rtl_debug.o \
+ rtl_dm.o \
+ rtl_eeprom.o \
+ rtl_ethtool.o \
+ rtl_pci.o \
+ rtl_pm.o \
+ rtl_ps.o \
+ rtl_wx.o \
+
+obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index b7bb71fa9ec..b7bb71fa9ec 100644
--- a/drivers/staging/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 0da56c80f08..0da56c80f08 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 64e831d2f4e..64e831d2f4e 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 58d044ea552..58d044ea552 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index 23219e17e5a..23219e17e5a 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
diff --git a/drivers/staging/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 808aab6fa5e..808aab6fa5e 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
diff --git a/drivers/staging/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index b9b3b52f912..b9b3b52f912 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 37719859bda..37719859bda 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index caa87883310..caa87883310 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
diff --git a/drivers/staging/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 43c3fb859d1..43c3fb859d1 100644
--- a/drivers/staging/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
index 08e7dbb6694..08e7dbb6694 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
index 019836bb36c..019836bb36c 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
diff --git a/drivers/staging/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 7fe69a3348d..3e705efaaf2 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -23,7 +23,6 @@
#include "r8190P_rtl8256.h"
#include "r8192E_phy.h"
#include "rtl_dm.h"
-#include "dot11d.h"
#include "r8192E_hwimg.h"
@@ -859,7 +858,7 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
__func__, *stage, *step, channel);
- if (!IsLegalChannel(priv->rtllib, channel)) {
+ if (!rtllib_legal_channel(priv->rtllib, channel)) {
RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n",
channel);
return true;
diff --git a/drivers/staging/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 7318f8857af..7318f8857af 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
diff --git a/drivers/staging/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 7899dd538dc..7899dd538dc 100644
--- a/drivers/staging/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
diff --git a/drivers/staging/rtl8192e/r819xE_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
index d5de279f664..d5de279f664 100644
--- a/drivers/staging/rtl8192e/r819xE_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
diff --git a/drivers/staging/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index baf3b6342e4..baf3b6342e4 100644
--- a/drivers/staging/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
diff --git a/drivers/staging/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index fa607f98b17..fa607f98b17 100644
--- a/drivers/staging/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
diff --git a/drivers/staging/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5ad96649f40..71adb6b3344 100644
--- a/drivers/staging/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -53,9 +53,7 @@
#include "rtl_wx.h"
#include "rtl_dm.h"
-#ifdef CONFIG_PM_RTL
#include "rtl_pm.h"
-#endif
int hwwep = 1;
static int channels = 0x3fff;
@@ -581,7 +579,7 @@ static void rtl8192_update_beacon(void *data)
struct rtllib_network *net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
- HTUpdateSelfAndPeerSetting(ieee, net);
+ HT_update_self_and_peer_setting(ieee, net);
ieee->pHTInfo->bCurrentRT2RTLongSlotTime =
net->bssht.bdRT2RTLongSlotTime;
ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.RT2RT_HT_Mode;
@@ -1289,7 +1287,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
priv->ChannelPlan = COUNTRY_CODE_FCC;
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
- Dot11d_Init(priv->rtllib);
+ dot11d_init(priv->rtllib);
Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
@@ -1305,7 +1303,6 @@ static short rtl8192_init(struct net_device *dev)
memset(&(priv->stats), 0, sizeof(struct rt_stats));
- rtl8192_dbgp_flag_init(dev);
rtl8192_init_priv_handler(dev);
rtl8192_init_priv_constant(dev);
rtl8192_init_priv_variable(dev);
@@ -2839,7 +2836,6 @@ done:
/****************************************************************************
---------------------------- PCI_STUFF---------------------------
*****************************************************************************/
-#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = rtl8192_open,
.ndo_stop = rtl8192_close,
@@ -2851,7 +2847,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = rtllib_xmit,
};
-#endif
static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -2938,17 +2933,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->irq = 0;
-#ifdef HAVE_NET_DEVICE_OPS
dev->netdev_ops = &rtl8192_netdev_ops;
-#else
- dev->open = rtl8192_open;
- dev->stop = rtl8192_close;
- dev->tx_timeout = rtl8192_tx_timeout;
- dev->do_ioctl = rtl8192_ioctl;
- dev->set_multicast_list = r8192_set_multicast;
- dev->set_mac_address = r8192_set_mac_adr;
- dev->hard_start_xmit = rtllib_xmit;
-#endif
dev->wireless_handlers = (struct iw_handler_def *)
&r8192_wx_handlers_def;
@@ -2974,10 +2959,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
register_netdev(dev);
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
- err = rtl_debug_module_init(priv, dev->name);
- if (err)
- RT_TRACE(COMP_DBG, "failed to create debugfs files. Ignoring "
- "error: %d\n", err);
+
rtl8192_proc_init_one(dev);
if (priv->polling_timer_on == 0)
@@ -3015,7 +2997,6 @@ static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
- rtl_debug_module_remove(priv);
rtl8192_proc_remove_one(dev);
rtl8192_down(dev, true);
deinit_hal_dm(dev);
@@ -3103,43 +3084,9 @@ bool NicIFDisableNIC(struct net_device *dev)
static int __init rtl8192_pci_module_init(void)
{
- int ret;
- int error;
-
- ret = rtllib_init();
- if (ret) {
- printk(KERN_ERR "rtllib_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_tkip_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_tkip_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_ccmp_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_ccmp_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_wep_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_wep_init() failed %d\n", ret);
- return ret;
- }
printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n");
- error = rtl_create_debugfs_root();
- if (error) {
- RT_TRACE(COMP_DBG, "Create debugfs root fail: %d\n", error);
- goto err_out;
- }
-
rtl8192_proc_module_init();
if (0 != pci_register_driver(&rtl8192_pci_driver)) {
DMESG("No device found");
@@ -3147,9 +3094,6 @@ static int __init rtl8192_pci_module_init(void)
return -ENODEV;
}
return 0;
-err_out:
- return error;
-
}
static void __exit rtl8192_pci_module_exit(void)
@@ -3158,12 +3102,6 @@ static void __exit rtl8192_pci_module_exit(void)
RT_TRACE(COMP_DOWN, "Exiting");
rtl8192_proc_module_remove();
- rtl_remove_debugfs_root();
- rtllib_crypto_tkip_exit();
- rtllib_crypto_ccmp_exit();
- rtllib_crypto_wep_exit();
- rtllib_crypto_deinit();
- rtllib_exit();
}
void check_rfctrl_gpio_timer(unsigned long data)
diff --git a/drivers/staging/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index f9af5153d9c..2a2519cc284 100644
--- a/drivers/staging/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -44,11 +44,14 @@
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <linux/io.h>
-#include "rtllib.h"
-#include "dot11d.h"
+/* Need this defined before including local include files */
+#define DRV_NAME "rtl819xE"
+
+#include "../rtllib.h"
+
+#include "../dot11d.h"
#include "r8192E_firmware.h"
#include "r8192E_hw.h"
@@ -56,7 +59,6 @@
#include "r8190P_def.h"
#include "r8192E_dev.h"
-#include "rtl_debug.h"
#include "rtl_eeprom.h"
#include "rtl_ps.h"
#include "rtl_pci.h"
@@ -67,8 +69,6 @@
#define DRV_AUTHOR "<wlanfae@realtek.com>"
#define DRV_VERSION "0014.0401.2010"
-#define DRV_NAME "rtl819xE"
-
#define IS_HARDWARE_TYPE_819xP(_priv) \
((((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8190P) || \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192E))
@@ -215,41 +215,6 @@ enum RTL819x_PHY_PARAM {
RTL819X_EFUSE_MAP = 19,
};
-enum RTL_DEBUG {
- COMP_TRACE = BIT0,
- COMP_DBG = BIT1,
- COMP_INIT = BIT2,
- COMP_RECV = BIT3,
- COMP_SEND = BIT4,
- COMP_CMD = BIT5,
- COMP_POWER = BIT6,
- COMP_EPROM = BIT7,
- COMP_SWBW = BIT8,
- COMP_SEC = BIT9,
- COMP_LPS = BIT10,
- COMP_QOS = BIT11,
- COMP_RATE = BIT12,
- COMP_RXDESC = BIT13,
- COMP_PHY = BIT14,
- COMP_DIG = BIT15,
- COMP_TXAGC = BIT16,
- COMP_HALDM = BIT17,
- COMP_POWER_TRACKING = BIT18,
- COMP_CH = BIT19,
- COMP_RF = BIT20,
- COMP_FIRMWARE = BIT21,
- COMP_HT = BIT22,
- COMP_RESET = BIT23,
- COMP_CMDPKT = BIT24,
- COMP_SCAN = BIT25,
- COMP_PS = BIT26,
- COMP_DOWN = BIT27,
- COMP_INTR = BIT28,
- COMP_LED = BIT29,
- COMP_MLME = BIT30,
- COMP_ERR = BIT31
-};
-
enum nic_t {
NIC_UNKNOWN = 0,
NIC_8192E = 1,
@@ -1121,4 +1086,10 @@ void ActUpdateChannelAccessSetting(struct net_device *dev,
enum wireless_mode WirelessMode,
struct channel_access_setting *ChnlAccessSetting);
+/* proc stuff from rtl_debug.c */
+void rtl8192_proc_init_one(struct net_device *dev);
+void rtl8192_proc_remove_one(struct net_device *dev);
+void rtl8192_proc_module_init(void);
+void rtl8192_proc_module_remove(void);
+
#endif
diff --git a/drivers/staging/rtl8192e/rtl_crypto.h b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
index ee57c0f4fa6..ee57c0f4fa6 100644
--- a/drivers/staging/rtl8192e/rtl_crypto.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
diff --git a/drivers/staging/rtl8192e/rtl_debug.c b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
index 22bc2dd6e43..c19b14cd6f7 100644
--- a/drivers/staging/rtl8192e/rtl_debug.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
@@ -22,91 +22,12 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#include "rtl_debug.h"
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-u32 rt_global_debug_component = \
- COMP_ERR ;
-
-/*------------------Declare variable-----------------------*/
-u32 DBGP_Type[DBGP_TYPE_MAX];
-
-/*-----------------------------------------------------------------------------
- * Function: DBGP_Flag_Init
- *
- * Overview: Refresh all debug print control flag content to zero.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 10/20/2006 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void rtl8192_dbgp_flag_init(struct net_device *dev)
-{
- u8 i;
-
- for (i = 0; i < DBGP_TYPE_MAX; i++)
- DBGP_Type[i] = 0;
-
-
-} /* DBGP_Flag_Init */
-
-/* this is only for debugging */
-void print_buffer(u32 *buffer, int len)
-{
- int i;
- u8 *buf = (u8 *)buffer;
-
- printk(KERN_INFO "ASCII BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%c", buf[i]);
-
- printk(KERN_INFO "\nBINARY BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%x", buf[i]);
-
- printk(KERN_INFO "\n");
-}
-
-/* this is only for debug */
-void dump_eprom(struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < 0xff; i++)
- RT_TRACE(COMP_INIT, "EEPROM addr %x : %x", i,
- eprom_read(dev, i));
-}
-
-/* this is only for debug */
-void rtl8192_dump_reg(struct net_device *dev)
-{
- int i;
- int n;
- int max = 0x5ff;
-
- RT_TRACE(COMP_INIT, "Dumping NIC register map");
-
- for (n = 0; n <= max; ) {
- printk(KERN_INFO "\nD: %2x> ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- printk(KERN_INFO "%2x ", read_nic_byte(dev, n));
- }
- printk(KERN_INFO "\n");
-}
-
/****************************************************************************
-----------------------------PROCFS STUFF-------------------------
*****************************************************************************/
diff --git a/drivers/staging/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index a7fa9aad6f2..a7fa9aad6f2 100644
--- a/drivers/staging/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
diff --git a/drivers/staging/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index ab44a9a6927..ab44a9a6927 100644
--- a/drivers/staging/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index c1ccff4a832..c1ccff4a832 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 9452e1683a7..9452e1683a7 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
diff --git a/drivers/staging/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 36452fb7cef..36452fb7cef 100644
--- a/drivers/staging/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index ddadcc3e4e7..ddadcc3e4e7 100644
--- a/drivers/staging/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 7ea5a47dfd2..28c7da677a8 100644
--- a/drivers/staging/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/pci.h>
-#include "rtllib.h"
static inline void NdisRawWritePortUlong(u32 port, u32 val)
{
diff --git a/drivers/staging/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 92e2fde7f5f..8e1a5d55dce 100644
--- a/drivers/staging/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -17,7 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8190P_rtl8256.h"
@@ -133,4 +132,3 @@ int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable)
return -EAGAIN;
}
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 4d7f4067cc7..e5299fc3b34 100644
--- a/drivers/staging/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -17,8 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
-
#ifndef R8192E_PM_H
#define R8192E_PM_H
@@ -31,5 +29,3 @@ int rtl8192E_resume(struct pci_dev *dev);
int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable);
#endif
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c9a7c563b68..c9a7c563b68 100644
--- a/drivers/staging/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
diff --git a/drivers/staging/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index a9c2d799c08..df79d6c4ca0 100644
--- a/drivers/staging/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -26,7 +26,7 @@
#define _RTL_PS_H
#include <linux/types.h>
-#include "rtllib.h"
+
struct net_device;
#define RT_CHECK_FOR_HANG_PERIOD 2
diff --git a/drivers/staging/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 93b1edbe6ba..4e93669210a 100644
--- a/drivers/staging/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include "rtl_core.h"
-#include "dot11d.h"
#define RATE_COUNT 12
static u32 rtl8192_rates[] = {
@@ -803,7 +802,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
- key_idx = ieee->tx_keyidx;
+ key_idx = ieee->crypt_info.tx_keyidx;
break;
case 1:
key_idx = 0;
diff --git a/drivers/staging/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
index 6a51a25ec87..6a51a25ec87 100644
--- a/drivers/staging/rtl8192e/rtl_wx.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 8b9d85c48be..32fbbc9d0d9 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -18,7 +18,6 @@
******************************************************************************/
#include "rtllib.h"
#include "rtl819x_BA.h"
-#include "rtl_core.h"
static void ActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA,
u16 Time)
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index b1c0c566882..8b7412980eb 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -943,8 +943,8 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
}
}
-void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct ht_info_ele *pPeerHTInfo =
@@ -955,6 +955,7 @@ void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
}
+EXPORT_SYMBOL(HT_update_self_and_peer_setting);
void HTUseDefaultSetting(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 09a602f7432..711a096be7a 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -497,6 +497,7 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
}
}
}
+EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl_debug.h b/drivers/staging/rtl8192e/rtl_debug.h
deleted file mode 100644
index 50fb9a9b828..00000000000
--- a/drivers/staging/rtl8192e/rtl_debug.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
-#ifndef _RTL_DEBUG_H
-#define _RTL_DEBUG_H
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/debugfs.h>
-
-struct r8192_priv;
-struct _tx_desc_8192se;
-struct _TX_DESC_8192CE;
-struct net_device;
-
-#define DBG_LOUD 4
-
-#define RT_ASSERT(_Exp, Fmt) \
- if (!(_Exp)) { \
- printk("Rtl819x: "); \
- printk Fmt; \
- }
-
-enum dbgp_flag {
- FQoS = 0,
- FTX = 1,
- FRX = 2,
- FSEC = 3,
- FMGNT = 4,
- FMLME = 5,
- FRESOURCE = 6,
- FBEACON = 7,
- FISR = 8,
- FPHY = 9,
- FMP = 10,
- FEEPROM = 11,
- FPWR = 12,
- FDM = 13,
- FDBGCtrl = 14,
- FC2H = 15,
- FBT = 16,
- FINIT = 17,
- FIOCTL = 18,
- DBGP_TYPE_MAX
-};
-
-#define QoS_INIT BIT0
-#define QoS_VISTA BIT1
-
-#define TX_DESC BIT0
-#define TX_DESC_TID BIT1
-
-#define RX_DATA BIT0
-#define RX_PHY_STS BIT1
-#define RX_PHY_SS BIT2
-#define RX_PHY_SQ BIT3
-#define RX_PHY_ASTS BIT4
-#define RX_ERR_LEN BIT5
-#define RX_DEFRAG BIT6
-#define RX_ERR_RATE BIT7
-
-
-
-#define MEDIA_STS BIT0
-#define LINK_STS BIT1
-
-#define OS_CHK BIT0
-
-#define BCN_SHOW BIT0
-#define BCN_PEER BIT1
-
-#define ISR_CHK BIT0
-
-#define PHY_BBR BIT0
-#define PHY_BBW BIT1
-#define PHY_RFR BIT2
-#define PHY_RFW BIT3
-#define PHY_MACR BIT4
-#define PHY_MACW BIT5
-#define PHY_ALLR BIT6
-#define PHY_ALLW BIT7
-#define PHY_TXPWR BIT8
-#define PHY_PWRDIFF BIT9
-
-#define MP_RX BIT0
-#define MP_SWICH_CH BIT1
-
-#define EEPROM_W BIT0
-#define EFUSE_PG BIT1
-#define EFUSE_READ_ALL BIT2
-
-#define LPS BIT0
-#define IPS BIT1
-#define PWRSW BIT2
-#define PWRHW BIT3
-#define PWRHAL BIT4
-
-#define WA_IOT BIT0
-#define DM_PWDB BIT1
-#define DM_Monitor BIT2
-#define DM_DIG BIT3
-#define DM_EDCA_Turbo BIT4
-
-#define DbgCtrl_Trace BIT0
-#define DbgCtrl_InbandNoise BIT1
-
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define C2H_Summary BIT0
-#define C2H_PacketData BIT1
-#define C2H_ContentData BIT2
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define INIT_EEPROM BIT0
-#define INIT_TxPower BIT1
-#define INIT_IQK BIT2
-#define INIT_RF BIT3
-
-#define IOCTL_TRACE BIT0
-#define IOCTL_BT_EVENT BIT1
-#define IOCTL_BT_EVENT_DETAIL BIT2
-#define IOCTL_BT_TX_ACLDATA BIT3
-#define IOCTL_BT_TX_ACLDATA_DETAIL BIT4
-#define IOCTL_BT_RX_ACLDATA BIT5
-#define IOCTL_BT_RX_ACLDATA_DETAIL BIT6
-#define IOCTL_BT_HCICMD BIT7
-#define IOCTL_BT_HCICMD_DETAIL BIT8
-#define IOCTL_IRP BIT9
-#define IOCTL_IRP_DETAIL BIT10
-#define IOCTL_CALLBACK_FUN BIT11
-#define IOCTL_STATE BIT12
-#define IOCTL_BT_TP BIT13
-#define IOCTL_BT_LOGO BIT14
-
-/* 2007/07/13 MH ------For DeBuG Print modeue------*/
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export Marco Definition---------------------------*/
-#define DEBUG_PRINT 1
-
-#if (DEBUG_PRINT == 1)
-#define RTPRINT(dbgtype, dbgflag, printstr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- printk printstr; \
- } \
-}
-
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_Ptr; \
- printk printstr; \
- printk(" "); \
- for (__i = 0; __i < 6; __i++) \
- printk("%02X%s", ptr[__i], \
- (__i == 5) ? "" : "-"); \
- printk("\n"); \
- } \
-}
-
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_HexData; \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) \
- % 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-}
-#else
-#define RTPRINT(dbgtype, dbgflag, printstr)
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)
-#endif
-
-extern u32 DBGP_Type[DBGP_TYPE_MAX];
-
-#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) \
-do {\
- if (((_Comp) & rt_global_debug_component) && \
- (_Level <= rt_global_debug_component)) { \
- int __i; \
- u8* ptr = (u8 *)_HexData; \
- printk(KERN_INFO "Rtl819x: "); \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) % \
- 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-} while (0);
-
-#define DMESG(x, a...)
-#define DMESGW(x, a...)
-#define DMESGE(x, a...)
-extern u32 rt_global_debug_component;
-#define RT_TRACE(component, x, args...) \
-do { \
- if (rt_global_debug_component & component) \
- printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
- ##args);\
-} while (0);
-
-#define assert(expr) \
- if (!(expr)) { \
- printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#define RT_DEBUG_DATA(level, data, datalen) \
- do { \
- if ((rt_global_debug_component & (level)) == (level)) {\
- int _i; \
- u8 *_pdata = (u8 *)data; \
- printk(KERN_DEBUG DRV_NAME ": %s()\n", __func__); \
- for (_i = 0; _i < (int)(datalen); _i++) { \
- printk(KERN_INFO "%2x ", _pdata[_i]); \
- if ((_i+1) % 16 == 0) \
- printk("\n"); \
- } \
- printk(KERN_INFO "\n"); \
- } \
- } while (0)
-
-struct rtl_fs_debug {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *debug_register;
- u32 hw_type;
- u32 hw_offset;
- bool hw_holding;
-};
-
-void print_buffer(u32 *buffer, int len);
-void dump_eprom(struct net_device *dev);
-void rtl8192_dump_reg(struct net_device *dev);
-
-/* debugfs stuff */
-static inline int rtl_debug_module_init(struct r8192_priv *priv,
- const char *name)
-{
- return 0;
-}
-
-static inline void rtl_debug_module_remove(struct r8192_priv *priv)
-{
-}
-
-static inline int rtl_create_debugfs_root(void)
-{
- return 0;
-}
-
-static inline void rtl_remove_debugfs_root(void)
-{
-}
-
-/* proc stuff */
-void rtl8192_proc_init_one(struct net_device *dev);
-void rtl8192_proc_remove_one(struct net_device *dev);
-void rtl8192_proc_module_init(void);
-void rtl8192_proc_module_remove(void);
-void rtl8192_dbgp_flag_init(struct net_device *dev);
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index de25975ccee..e26aec86a5c 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -25,7 +25,6 @@
#define RTLLIB_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -36,12 +35,14 @@
#include <linux/delay.h>
#include <linux/wireless.h>
+#include "rtllib_debug.h"
#include "rtl819x_HT.h"
#include "rtl819x_BA.h"
#include "rtl819x_TS.h"
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <net/lib80211.h>
#define MAX_PRECMD_CNT 16
#define MAX_RFDEPENDCMD_CNT 16
@@ -870,69 +871,6 @@ enum _REG_PREAMBLE_MODE {
#define WLAN_ERP_USE_PROTECTION (1<<1)
#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-/* Status codes */
-enum rtllib_statuscode {
- WLAN_STATUS_SUCCESS = 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
- WLAN_STATUS_CAPS_UNSUPPORTED = 10,
- WLAN_STATUS_REASSOC_NO_ASSOC = 11,
- WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
- WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
- WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
- WLAN_STATUS_CHALLENGE_FAIL = 15,
- WLAN_STATUS_AUTH_TIMEOUT = 16,
- WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
- WLAN_STATUS_ASSOC_DENIED_RATES = 18,
- /* 802.11b */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
- WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
- WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
- /* 802.11h */
- WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
- WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
- WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
- /* 802.11g */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
- WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
- /* 802.11i */
- WLAN_STATUS_INVALID_IE = 40,
- WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
- WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
- WLAN_STATUS_INVALID_AKMP = 43,
- WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
- WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
- WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
-};
-
-/* Reason codes */
-enum rtllib_reasoncode {
- WLAN_REASON_UNSPECIFIED = 1,
- WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
- WLAN_REASON_DEAUTH_LEAVING = 3,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
- WLAN_REASON_DISASSOC_AP_BUSY = 5,
- WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
- WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
- /* 802.11h */
- WLAN_REASON_DISASSOC_BAD_POWER = 10,
- WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
- /* 802.11i */
- WLAN_REASON_INVALID_IE = 13,
- WLAN_REASON_MIC_FAILURE = 14,
- WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
- WLAN_REASON_IE_DIFFERENT = 17,
- WLAN_REASON_INVALID_GROUP_CIPHER = 18,
- WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
- WLAN_REASON_INVALID_AKMP = 20,
- WLAN_REASON_UNSUPP_RSN_VERSION = 21,
- WLAN_REASON_INVALID_RSN_IE_CAP = 22,
- WLAN_REASON_IEEE8021X_FAILED = 23,
- WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
-};
-
#define RTLLIB_STATMASK_SIGNAL (1<<0)
#define RTLLIB_STATMASK_RSSI (1<<1)
#define RTLLIB_STATMASK_NOISE (1<<2)
@@ -1122,8 +1060,6 @@ struct rtllib_stats {
struct rtllib_device;
-#include "rtllib_crypt.h"
-
#define SEC_KEY_1 (1<<0)
#define SEC_KEY_2 (1<<1)
#define SEC_KEY_3 (1<<2)
@@ -1146,7 +1082,6 @@ struct rtllib_device;
#define SEC_ALG_TKIP 2
#define SEC_ALG_CCMP 4
-#define WEP_KEYS 4
#define WEP_KEY_LEN 13
#define SCM_KEY_LEN 32
#define SCM_TEMPORAL_KEY_LENGTH 16
@@ -1158,8 +1093,8 @@ struct rtllib_security {
auth_algo:4,
unicast_uses_group:1,
encrypt:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
+ u8 key_sizes[NUM_WEP_KEYS];
+ u8 keys[NUM_WEP_KEYS][SCM_KEY_LEN];
u8 level;
u16 flags;
} __packed;
@@ -2251,14 +2186,10 @@ struct rtllib_device {
u8 ap_mac_addr[6];
u16 pairwise_key_type;
u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct rtllib_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
+ struct lib80211_crypt_info crypt_info;
+ struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
int bcrx_sta_key; /* use individual keys to override default keys even
* with RX of broad/multicast frames */
@@ -2774,7 +2705,7 @@ extern void rtllib_rx_mgt(struct rtllib_device *ieee,
struct rtllib_rx_stats *stats);
extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
struct sk_buff *skb);
-extern int IsLegalChannel(struct rtllib_device *rtllib, u8 channel);
+extern int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
/* rtllib_wx.c */
extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
@@ -2804,7 +2735,7 @@ extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
/* rtllib_softmac.c */
extern short rtllib_is_54g(struct rtllib_network *net);
-extern short rtllib_is_shortslot(struct rtllib_network net);
+extern short rtllib_is_shortslot(const struct rtllib_network *net);
extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats, u16 type,
@@ -2971,8 +2902,8 @@ extern void HTInitializeHTInfo(struct rtllib_device *ieee);
extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+extern void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
@@ -3052,21 +2983,6 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
(HTMcsToDataRate(_ieee, (u8)_MGN_RATE)))
/* fun with the built-in rtllib stack... */
-int rtllib_init(void);
-void rtllib_exit(void);
-int rtllib_crypto_init(void);
-void rtllib_crypto_deinit(void);
-int rtllib_crypto_tkip_init(void);
-void rtllib_crypto_tkip_exit(void);
-int rtllib_crypto_ccmp_init(void);
-void rtllib_crypto_ccmp_exit(void);
-int rtllib_crypto_wep_init(void);
-void rtllib_crypto_wep_exit(void);
-
-void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib);
-void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
- u8 asRsn);
-void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn);
bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
@@ -3133,12 +3049,5 @@ extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
#define MUTEX_LOCK_PRIV(pmutex) mutex_lock(pmutex)
#define MUTEX_UNLOCK_PRIV(pmutex) mutex_unlock(pmutex)
#endif
-static inline void dump_buf(u8 *buf, u32 len)
-{
- u32 i;
- printk(KERN_INFO "-----------------Len %d----------------\n", len);
- for (i = 0; i < len; i++)
- printk("%2.2x-", *(buf+i));
- printk("\n");
-}
+
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
index acda37b8184..86152d0e6b5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -22,7 +21,7 @@
struct rtllib_crypto_alg {
struct list_head list;
- struct rtllib_crypto_ops *ops;
+ struct lib80211_crypto_ops *ops;
};
@@ -33,15 +32,15 @@ struct rtllib_crypto {
static struct rtllib_crypto *hcrypt;
-void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force)
{
struct list_head *ptr, *n;
- struct rtllib_crypt_data *entry;
+ struct lib80211_crypt_data *entry;
- for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
- ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct rtllib_crypt_data, list);
+ for (ptr = info->crypt_deinit_list.next, n = ptr->next;
+ ptr != &info->crypt_deinit_list; ptr = n, n = ptr->next) {
+ entry = list_entry(ptr, struct lib80211_crypt_data, list);
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
@@ -53,28 +52,30 @@ void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
kfree(entry);
}
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_entries);
void rtllib_crypt_deinit_handler(unsigned long data)
{
- struct rtllib_device *ieee = (struct rtllib_device *)data;
+ struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
- rtllib_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
+ spin_lock_irqsave(info->lock, flags);
+ rtllib_crypt_deinit_entries(info, 0);
+ if (!list_empty(&info->crypt_deinit_list)) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
- "deletion list\n", ieee->dev->name);
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ "deletion list\n", info->name);
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_handler);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt)
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt)
{
- struct rtllib_crypt_data *tmp;
+ struct lib80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
@@ -87,16 +88,17 @@ void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
- spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ spin_lock_irqsave(info->lock, flags);
+ list_add(&tmp->list, &info->crypt_deinit_list);
+ if (!timer_pending(&info->crypt_deinit_timer)) {
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_delayed_deinit);
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct rtllib_crypto_alg *alg;
@@ -104,11 +106,10 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -120,8 +121,9 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
return 0;
}
+EXPORT_SYMBOL(rtllib_register_crypto_ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct list_head *ptr;
@@ -150,9 +152,10 @@ int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
return del_alg ? 0 : -1;
}
+EXPORT_SYMBOL(rtllib_unregister_crypto_ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
@@ -177,12 +180,13 @@ struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
else
return NULL;
}
+EXPORT_SYMBOL(rtllib_get_crypto_ops);
static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
-static struct rtllib_crypto_ops rtllib_crypt_null = {
+static struct lib80211_crypto_ops rtllib_crypt_null = {
.name = "NULL",
.init = rtllib_crypt_null_init,
.deinit = rtllib_crypt_null_deinit,
@@ -192,8 +196,10 @@ static struct rtllib_crypto_ops rtllib_crypt_null = {
.decrypt_msdu = NULL,
.set_key = NULL,
.get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
+ .extra_mpdu_prefix_len = 0,
+ .extra_mpdu_postfix_len = 0,
+ .extra_msdu_prefix_len = 0,
+ .extra_msdu_postfix_len = 0,
.owner = THIS_MODULE,
};
@@ -202,15 +208,14 @@ int __init rtllib_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
- ret = rtllib_register_crypto_ops(&rtllib_crypt_null);
+ ret = lib80211_register_crypto_ops(&rtllib_crypt_null);
if (ret < 0) {
kfree(hcrypt);
hcrypt = NULL;
@@ -239,3 +244,8 @@ void __exit rtllib_crypto_deinit(void)
kfree(hcrypt);
}
+
+module_init(rtllib_crypto_init);
+module_exit(rtllib_crypto_deinit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.h b/drivers/staging/rtl8192e/rtllib_crypt.h
index 49b90b73ed9..e177c9287b4 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.h
+++ b/drivers/staging/rtl8192e/rtllib_crypt.h
@@ -25,61 +25,11 @@
#include <linux/skbuff.h>
-struct rtllib_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv, struct rtllib_device* ieee);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the struct buffer and
- * correct length must be returned */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct rtllib_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct rtllib_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name);
-void rtllib_crypt_deinit_entries(struct rtllib_device *, int);
-void rtllib_crypt_deinit_handler(unsigned long);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt);
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops);
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name);
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info, int force);
+void rtllib_crypt_deinit_handler(unsigned long data);
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt);
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 6196b9aa3a0..4217b88e6fc 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -63,10 +62,9 @@ static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
@@ -429,13 +427,8 @@ static char *rtllib_ccmp_print_stats(char *p, void *priv)
return p;
}
-void rtllib_ccmp_null(void)
-{
- return;
-}
-
-static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
- .name = "CCMP",
+static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
+ .name = "R-CCMP",
.init = rtllib_ccmp_init,
.deinit = rtllib_ccmp_deinit,
.encrypt_mpdu = rtllib_ccmp_encrypt,
@@ -445,19 +438,24 @@ static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
.set_key = rtllib_ccmp_set_key,
.get_key = rtllib_ccmp_get_key,
.print_stats = rtllib_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
+ .extra_mpdu_prefix_len = CCMP_HDR_LEN,
+ .extra_mpdu_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
int __init rtllib_crypto_ccmp_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_ccmp);
+ return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
void __exit rtllib_crypto_ccmp_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_ccmp);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
+
+module_init(rtllib_crypto_ccmp_init);
+module_exit(rtllib_crypto_ccmp_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 6a0c8788642..800925053fb 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -60,10 +59,9 @@ static void *rtllib_tkip_init(int key_idx)
{
struct rtllib_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
@@ -598,8 +596,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv,
- struct rtllib_device *ieee)
+ int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 mic[8];
@@ -618,23 +615,20 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
- if ((memcmp(mic, skb->data + skb->len - 8, 8) != 0) ||
- (ieee->force_mic_error)) {
+ if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *) skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", hdr->addr2,
keyidx);
- printk(KERN_DEBUG "%d, force_mic_error = %d\n",
- (memcmp(mic, skb->data + skb->len - 8, 8) != 0),\
- ieee->force_mic_error);
+ printk(KERN_DEBUG "%d\n",
+ memcmp(mic, skb->data + skb->len - 8, 8) != 0);
if (skb->dev) {
printk(KERN_INFO "skb->dev != NULL\n");
rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
}
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- ieee->force_mic_error = false;
return -1;
}
@@ -740,9 +734,8 @@ static char *rtllib_tkip_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_tkip = {
- .name = "TKIP",
+static struct lib80211_crypto_ops rtllib_crypt_tkip = {
+ .name = "R-TKIP",
.init = rtllib_tkip_init,
.deinit = rtllib_tkip_deinit,
.encrypt_mpdu = rtllib_tkip_encrypt,
@@ -752,24 +745,25 @@ static struct rtllib_crypto_ops rtllib_crypt_tkip = {
.set_key = rtllib_tkip_set_key,
.get_key = rtllib_tkip_get_key,
.print_stats = rtllib_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
+ .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
+ .extra_msdu_postfix_len = 8, /* MIC */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_tkip_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_tkip);
+ return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
void __exit rtllib_crypto_tkip_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_tkip);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
-void rtllib_tkip_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_tkip_init);
+module_exit(rtllib_crypto_tkip_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index c59bf10fe78..8cdf38913a3 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -38,10 +37,9 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
@@ -257,9 +255,8 @@ static char *prism2_wep_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_wep = {
- .name = "WEP",
+static struct lib80211_crypto_ops rtllib_crypt_wep = {
+ .name = "R-WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
@@ -269,24 +266,24 @@ static struct rtllib_crypto_ops rtllib_crypt_wep = {
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
+ .extra_mpdu_prefix_len = 4, /* IV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_wep_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_wep);
+ return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
void __exit rtllib_crypto_wep_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_wep);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
-void rtllib_wep_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_wep_init);
+module_exit(rtllib_crypto_wep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
new file mode 100644
index 00000000000..2bfc1155f50
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -0,0 +1,86 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL_DEBUG_H
+#define _RTL_DEBUG_H
+
+/* Allow files to override DRV_NAME */
+#ifndef DRV_NAME
+#define DRV_NAME "rtllib_92e"
+#endif
+
+#define DMESG(x, a...)
+
+extern u32 rt_global_debug_component;
+
+/* These are the defines for rt_global_debug_component */
+enum RTL_DEBUG {
+ COMP_TRACE = (1 << 0),
+ COMP_DBG = (1 << 1),
+ COMP_INIT = (1 << 2),
+ COMP_RECV = (1 << 3),
+ COMP_SEND = (1 << 4),
+ COMP_CMD = (1 << 5),
+ COMP_POWER = (1 << 6),
+ COMP_EPROM = (1 << 7),
+ COMP_SWBW = (1 << 8),
+ COMP_SEC = (1 << 9),
+ COMP_LPS = (1 << 10),
+ COMP_QOS = (1 << 11),
+ COMP_RATE = (1 << 12),
+ COMP_RXDESC = (1 << 13),
+ COMP_PHY = (1 << 14),
+ COMP_DIG = (1 << 15),
+ COMP_TXAGC = (1 << 16),
+ COMP_HALDM = (1 << 17),
+ COMP_POWER_TRACKING = (1 << 18),
+ COMP_CH = (1 << 19),
+ COMP_RF = (1 << 20),
+ COMP_FIRMWARE = (1 << 21),
+ COMP_HT = (1 << 22),
+ COMP_RESET = (1 << 23),
+ COMP_CMDPKT = (1 << 24),
+ COMP_SCAN = (1 << 25),
+ COMP_PS = (1 << 26),
+ COMP_DOWN = (1 << 27),
+ COMP_INTR = (1 << 28),
+ COMP_LED = (1 << 29),
+ COMP_MLME = (1 << 30),
+ COMP_ERR = (1 << 31)
+};
+
+#define RT_TRACE(component, x, args...) \
+do { \
+ if (rt_global_debug_component & component) \
+ printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
+ ##args);\
+} while (0);
+
+#define assert(expr) \
+ if (!(expr)) { \
+ printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index c36a140a456..f9dae958a5d 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -45,7 +45,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -54,7 +53,9 @@
#include "rtllib.h"
-#define DRV_NAME "rtllib_92e"
+u32 rt_global_debug_component = COMP_ERR;
+EXPORT_SYMBOL(rt_global_debug_component);
+
void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
{
@@ -135,10 +136,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
- INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- _setup_timer(&ieee->crypt_deinit_timer,
- rtllib_crypt_deinit_handler,
- (unsigned long) ieee);
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
@@ -148,6 +145,9 @@ struct net_device *alloc_rtllib(int sizeof_priv)
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
+ /* SAM FIXME */
+ lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
+
ieee->bHalfNMode = false;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
@@ -177,10 +177,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->last_packet_time[i] = 0;
}
- rtllib_tkip_null();
- rtllib_wep_null();
- rtllib_ccmp_null();
-
return dev;
failed:
@@ -188,32 +184,23 @@ struct net_device *alloc_rtllib(int sizeof_priv)
free_netdev(dev);
return NULL;
}
+EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
- int i;
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
- del_timer_sync(&ieee->crypt_deinit_timer);
- rtllib_crypt_deinit_entries(ieee, 1);
-
- for (i = 0; i < WEP_KEYS; i++) {
- struct rtllib_crypt_data *crypt = ieee->crypt[i];
- if (crypt) {
- if (crypt->ops)
- crypt->ops->deinit(crypt->priv);
- kfree(crypt);
- ieee->crypt[i] = NULL;
- }
- }
+
+ lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
+EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
static int debug = \
@@ -287,3 +274,8 @@ void __exit rtllib_exit(void)
rtllib_proc = NULL;
}
}
+
+module_init(rtllib_init);
+module_exit(rtllib_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 8d0af5ed8ec..6c5061f12ba 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -281,7 +280,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data *crypt)
+ struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -322,7 +321,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
- int keyidx, struct rtllib_crypt_data *crypt)
+ int keyidx, struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -341,7 +340,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv, ieee);
+ res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
@@ -1010,7 +1009,7 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
}
static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data **crypt, size_t hdrlen)
+ struct lib80211_crypt_data **crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
@@ -1020,7 +1019,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
- *crypt = ieee->crypt[idx];
+ *crypt = ieee->crypt_info.crypt[idx];
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
if (*crypt && ((*crypt)->ops == NULL ||
@@ -1045,7 +1044,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
- struct rtllib_crypt_data *crypt, size_t hdrlen)
+ struct lib80211_crypt_data *crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr;
int keyidx = 0;
@@ -1253,7 +1252,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
{
struct net_device *dev = ieee->dev;
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *pTS = NULL;
u16 fc, sc, SeqNum = 0;
@@ -1497,6 +1496,7 @@ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->stats.rx_dropped++;
return 0;
}
+EXPORT_SYMBOL(rtllib_rx);
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
@@ -2492,7 +2492,7 @@ static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
-int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
+int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel)
{
if (MAX_CHANNEL_NUMBER < channel) {
printk(KERN_INFO "%s(): Invalid Channel\n", __func__);
@@ -2503,6 +2503,7 @@ int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
+EXPORT_SYMBOL(rtllib_legal_channel);
static inline void rtllib_process_probe_response(
struct rtllib_device *ieee,
@@ -2553,7 +2554,7 @@ static inline void rtllib_process_probe_response(
}
- if (!IsLegalChannel(ieee, network->channel))
+ if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b5086850f0d..1637f111099 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -15,11 +15,9 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include <linux/random.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/uaccess.h>
#include "dot11d.h"
@@ -28,9 +26,9 @@ short rtllib_is_54g(struct rtllib_network *net)
return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
-short rtllib_is_shortslot(struct rtllib_network net)
+short rtllib_is_shortslot(const struct rtllib_network *net)
{
- return net.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ return net->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
}
/* returns the total length needed for pleacing the RATE MFIE
@@ -468,6 +466,7 @@ void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = true;
}
+EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
/*
@@ -490,6 +489,7 @@ void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = false;
}
+EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
{
@@ -685,6 +685,7 @@ void rtllib_stop_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_stop(ieee);
}
+EXPORT_SYMBOL(rtllib_stop_send_beacons);
void rtllib_start_send_beacons(struct rtllib_device *ieee)
@@ -694,6 +695,7 @@ void rtllib_start_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_start(ieee);
}
+EXPORT_SYMBOL(rtllib_start_send_beacons);
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
@@ -719,6 +721,7 @@ void rtllib_stop_scan(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
{
@@ -729,6 +732,7 @@ void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan_syncro);
bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
{
@@ -741,6 +745,7 @@ bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
return test_bit(STATUS_SCANNING, &ieee->status);
}
}
+EXPORT_SYMBOL(rtllib_act_scanning);
/* called with ieee->lock held */
static void rtllib_start_scan(struct rtllib_device *ieee)
@@ -781,6 +786,7 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
ieee->rtllib_start_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_start_scan_syncro);
inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
struct rtllib_device *ieee, int challengelen, u8 *daddr)
@@ -830,7 +836,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb = NULL;
int encrypt;
int atim_len, erp_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
@@ -865,9 +871,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
} else
erp_len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
+ ((0 == strcmp(crypt->ops->name, "R-WEP") || wpa_ie_len));
if (ieee->pHTInfo->bCurrentHTSupport) {
tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
@@ -917,7 +923,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16((beacon_buf->capability |=
WLAN_CAPABILITY_SHORT_SLOT_TIME));
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
@@ -976,7 +982,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb;
u8 *tag;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
struct rtllib_assoc_response_frame *assoc;
short encrypt;
@@ -1007,7 +1013,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
if (ieee->host_encrypt)
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
else
crypt = NULL;
@@ -1172,7 +1178,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int ckip_ie_len = 0;
unsigned int ccxrm_ie_len = 0;
unsigned int cxvernum_ie_len = 0;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
int PMKCacheIdx;
@@ -1185,10 +1191,10 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
int len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (crypt != NULL)
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") ||
+ ((0 == strcmp(crypt->ops->name, "R-WEP") ||
wpa_ie_len));
else
encrypt = 0;
@@ -1956,6 +1962,7 @@ void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
}
+EXPORT_SYMBOL(rtllib_sta_ps_send_null_frame);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
{
@@ -2168,6 +2175,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee, struct sk_buff *skb)
{
@@ -2540,6 +2548,7 @@ void rtllib_reset_queue(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_reset_queue);
void rtllib_wake_queue(struct rtllib_device *ieee)
{
@@ -2928,6 +2937,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
return skb;
}
+EXPORT_SYMBOL(rtllib_get_beacon);
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
u8 shutdown)
@@ -2937,6 +2947,7 @@ void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
rtllib_stop_protocol(ieee, shutdown);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
@@ -2985,6 +2996,7 @@ void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
rtllib_start_protocol(ieee);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_start_protocol);
void rtllib_start_protocol(struct rtllib_device *ieee)
{
@@ -3048,10 +3060,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->state = RTLLIB_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->pDot11dInfo = kmalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(struct rt_dot11d_info));
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3207,11 +3218,11 @@ static int rtllib_wpa_set_wpa_ie(struct rtllib_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3334,8 +3345,8 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
u8 is_mesh)
{
int ret = 0;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
@@ -3354,9 +3365,9 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS)
+ if (param->u.crypt.idx >= NUM_WEP_KEYS)
return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
+ crypt = &ieee->crypt_info.crypt[param->u.crypt.idx];
} else {
return -EINVAL;
}
@@ -3366,7 +3377,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
goto done;
}
@@ -3375,19 +3386,19 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
/* IPW HW cannot build TKIP MIC, host decryption still needed. */
if (!(ieee->host_encrypt || ieee->host_decrypt) &&
- strcmp(param->u.crypt.alg, "TKIP"))
+ strcmp(param->u.crypt.alg, "R-TKIP"))
goto skip_host_crypt;
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ if (ops == NULL && strcmp(param->u.crypt.alg, "R-WEP") == 0) {
request_module("rtllib_crypt_wep");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
request_module("rtllib_crypt_tkip");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
request_module("rtllib_crypt_ccmp");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
}
if (ops == NULL) {
printk(KERN_INFO "unknown crypto alg '%s'\n",
@@ -3397,17 +3408,17 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- new_crypt = (struct rtllib_crypt_data *)
+ new_crypt = (struct lib80211_crypt_data *)
kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
+ memset(new_crypt, 0, sizeof(struct lib80211_crypt_data));
new_crypt->ops = ops;
if (new_crypt->ops)
new_crypt->priv =
@@ -3435,7 +3446,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
skip_host_crypt:
if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
+ ieee->crypt_info.tx_keyidx = param->u.crypt.idx;
sec.active_key = param->u.crypt.idx;
sec.flags |= SEC_ACTIVE_KEY;
} else
@@ -3448,13 +3459,13 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
sec.flags |= (1 << param->u.crypt.idx);
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ if (strcmp(param->u.crypt.alg, "R-WEP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
@@ -3551,13 +3562,13 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len = ieee->wpa_ie_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
|| (ieee->host_encrypt && crypt && crypt->ops &&
- (0 == strcmp(crypt->ops->name, "WEP")));
+ (0 == strcmp(crypt->ops->name, "R-WEP")));
/* simply judge */
if (encrypt && (wpa_ie_len == 0)) {
@@ -3634,6 +3645,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
@@ -3719,6 +3731,7 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
return true;
}
+EXPORT_SYMBOL(rtllib_MgntDisconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
@@ -3739,3 +3752,4 @@ void notify_wx_assoc_event(struct rtllib_device *ieee)
}
wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
}
+EXPORT_SYMBOL(notify_wx_assoc_event);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 22988fbd444..1523bc7a210 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -15,7 +15,6 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include "dot11d.h"
/* FIXME: add A freqs */
@@ -25,6 +24,7 @@ const long rtllib_wlan_frequencies[] = {
2452, 2457, 2462, 2467,
2472, 2484
};
+EXPORT_SYMBOL(rtllib_wlan_frequencies);
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
@@ -82,6 +82,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_freq);
int rtllib_wx_get_freq(struct rtllib_device *ieee,
@@ -97,6 +98,7 @@ int rtllib_wx_get_freq(struct rtllib_device *ieee,
fwrq->e = 1;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_freq);
int rtllib_wx_get_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -125,6 +127,7 @@ int rtllib_wx_get_wap(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_wap);
int rtllib_wx_set_wap(struct rtllib_device *ieee,
@@ -184,6 +187,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_wap);
int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -220,6 +224,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_essid);
int rtllib_wx_set_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -231,6 +236,7 @@ int rtllib_wx_set_rate(struct rtllib_device *ieee,
ieee->rate = target_rate/100000;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rate);
int rtllib_wx_get_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -243,6 +249,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rate);
int rtllib_wx_set_rts(struct rtllib_device *ieee,
@@ -259,6 +266,7 @@ int rtllib_wx_set_rts(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rts);
int rtllib_wx_get_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -269,6 +277,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee,
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rts);
int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -314,6 +323,7 @@ out:
up(&ieee->wx_sem);
return set_mode_status;
}
+EXPORT_SYMBOL(rtllib_wx_set_mode);
void rtllib_wx_sync_scan_wq(void *data)
{
@@ -428,6 +438,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_scan);
int rtllib_wx_set_essid(struct rtllib_device *ieee,
struct iw_request_info *a,
@@ -490,6 +501,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_essid);
int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -497,6 +509,7 @@ int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
wrqu->mode = ieee->iw_mode;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_mode);
int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -533,6 +546,7 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rawtx);
int rtllib_wx_get_name(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -548,6 +562,7 @@ int rtllib_wx_get_name(struct rtllib_device *ieee,
strcat(wrqu->name, "n");
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_name);
/* this is mostly stolen from hostap */
@@ -605,6 +620,7 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_power);
/* this is stolen from hostap */
int rtllib_wx_get_power(struct rtllib_device *ieee,
@@ -643,3 +659,4 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_power);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 44e8006bc1a..f451bfc27a8 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -180,10 +179,10 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
int hdr_len)
{
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
int res;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (!(crypt && crypt->ops)) {
printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
@@ -569,7 +568,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
int qos_actived = ieee->current_network.qos_data.active;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct cb_desc *tcb_desc;
u8 bIsMulticast = false;
u8 IsAmsdu = false;
@@ -646,7 +645,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
skb->priority = rtllib_classify(skb, IsAmsdu);
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
ieee->host_encrypt && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
@@ -742,8 +741,10 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryptiong
* pre/postfix */
if (encrypt) {
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
+ bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_mpdu_postfix_len +
+ crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
}
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment */
@@ -791,7 +792,8 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag,
- crypt->ops->extra_prefix_len);
+ crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_msdu_prefix_len);
} else {
tcb_desc->bHwSec = 0;
}
@@ -965,3 +967,4 @@ int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->cb, 0, sizeof(skb->cb));
return rtllib_xmit_inter(skb, dev);
}
+EXPORT_SYMBOL(rtllib_xmit);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 8cea4a60e1b..c27ff7edbaf 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -30,7 +30,6 @@
******************************************************************************/
#include <linux/wireless.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/module.h>
@@ -295,6 +294,7 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee,
return err;
}
+EXPORT_SYMBOL(rtllib_wx_get_scan);
int rtllib_wx_set_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -306,44 +306,44 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
.flags = 0
};
int i, key, key_provided, len;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypt_data **crypt;
RTLLIB_DEBUG_WX("SET_ENCODE\n");
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
RTLLIB_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
- crypt = &ieee->crypt[key];
+ crypt = &ieee->crypt_info.crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
RTLLIB_DEBUG_WX("Disabling encryption on key %d.\n",
key);
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
} else
RTLLIB_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all */
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL) {
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL) {
if (key_provided)
break;
- rtllib_crypt_delayed_deinit(ieee,
- &ieee->crypt[i]);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info,
+ &ieee->crypt_info.crypt[i]);
}
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
@@ -358,25 +358,24 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
+ strcmp((*crypt)->ops->name, "R-WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key */
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
if (*crypt == NULL) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct rtllib_crypt_data),
+ new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
if (!new_crypt->ops) {
request_module("rtllib_crypt_wep");
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
}
if (new_crypt->ops)
@@ -412,7 +411,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
@@ -435,7 +434,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
if (key_provided) {
RTLLIB_DEBUG_WX(
"Setting key %d to default Tx key.\n", key);
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -470,6 +469,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode);
int rtllib_wx_get_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -477,7 +477,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
RTLLIB_DEBUG_WX("GET_ENCODE\n");
@@ -486,13 +486,13 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
} else {
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
- crypt = ieee->crypt[key];
+ crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
@@ -513,6 +513,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_encode);
int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -525,29 +526,29 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
int i, idx;
int group_key = 0;
const char *alg, *module;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
};
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else{
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
else
return -EINVAL;
}
@@ -556,13 +557,13 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL)
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL)
break;
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
@@ -573,15 +574,15 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
sec.enabled = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
- alg = "WEP";
+ alg = "R-WEP";
module = "rtllib_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
+ alg = "R-TKIP";
module = "rtllib_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
+ alg = "R-CCMP";
module = "rtllib_crypt_ccmp";
break;
default:
@@ -592,14 +593,14 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
printk(KERN_INFO "alg name:%s\n", alg);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
if (ops == NULL) {
char tempbuf[100];
memset(tempbuf, 0x00, 100);
sprintf(tempbuf, "%s", module);
request_module("%s", tempbuf);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
}
if (ops == NULL) {
RTLLIB_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -610,9 +611,9 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
@@ -641,7 +642,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
goto done;
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
+ ieee->crypt_info.tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -674,6 +675,7 @@ done:
}
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode_ext);
int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -681,7 +683,7 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
{
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
@@ -690,18 +692,18 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else {
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
(ext->alg != IW_ENCODE_ALG_WEP))
if (idx != 0 || (ieee->iw_mode != IW_MODE_INFRA))
return -EINVAL;
- crypt = ieee->crypt[idx];
+ crypt = ieee->crypt_info.crypt[idx];
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
@@ -711,11 +713,11 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
- if (strcmp(crypt->ops->name, "WEP") == 0)
+ if (strcmp(crypt->ops->name, "R-WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
+ else if (strcmp(crypt->ops->name, "R-TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
+ else if (strcmp(crypt->ops->name, "R-CCMP"))
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
@@ -778,6 +780,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_mlme);
int rtllib_wx_set_auth(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -830,6 +833,7 @@ int rtllib_wx_set_auth(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_auth);
int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
{
@@ -846,10 +850,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
ieee->wps_ie_len = (len < MAX_WZC_IE_LEN) ? (len) :
(MAX_WZC_IE_LEN);
- buf = kmalloc(ieee->wps_ie_len, GFP_KERNEL);
+ buf = kmemdup(ie, ieee->wps_ie_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, ieee->wps_ie_len);
ieee->wps_ie = buf;
return 0;
}
@@ -860,10 +863,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
if (len) {
if (len != ie[1]+2)
return -EINVAL;
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
@@ -874,3 +876,4 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_gen_ie);
diff --git a/drivers/staging/rtl8192u/ieee80211/api.c b/drivers/staging/rtl8192u/ieee80211/api.c
deleted file mode 100644
index 5f46e50e586..00000000000
--- a/drivers/staging/rtl8192u/ieee80211/api.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels M鰈ler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include "kmap_types.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/rwsem.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-LIST_HEAD(crypto_alg_list);
-DECLARE_RWSEM(crypto_alg_sem);
-
-static inline int crypto_alg_get(struct crypto_alg *alg)
-{
- return try_inc_mod_count(alg->cra_module);
-}
-
-static inline void crypto_alg_put(struct crypto_alg *alg)
-{
- if (alg->cra_module)
- __MOD_DEC_USE_COUNT(alg->cra_module);
-}
-
-struct crypto_alg *crypto_alg_lookup(const char *name)
-{
- struct crypto_alg *q, *alg = NULL;
-
- if (!name)
- return NULL;
-
- down_read(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, name))) {
- if (crypto_alg_get(q))
- alg = q;
- break;
- }
- }
-
- up_read(&crypto_alg_sem);
- return alg;
-}
-
-static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
-{
- tfm->crt_flags = 0;
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_flags(tfm, flags);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static int crypto_init_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_ops(tfm);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_ops(tfm);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static void crypto_exit_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- crypto_exit_cipher_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- crypto_exit_compress_ops(tfm);
- break;
-
- default:
- BUG();
-
- }
-}
-
-struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name);
- if (alg == NULL)
- goto out;
-
- tfm = kzalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
- if (tfm == NULL)
- goto out_put;
-
- tfm->__crt_alg = alg;
-
- if (crypto_init_flags(tfm, flags))
- goto out_free_tfm;
-
- if (crypto_init_ops(tfm)) {
- crypto_exit_ops(tfm);
- goto out_free_tfm;
- }
-
- goto out;
-
-out_free_tfm:
- kfree(tfm);
- tfm = NULL;
-out_put:
- crypto_alg_put(alg);
-out:
- return tfm;
-}
-
-void crypto_free_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- int size = sizeof(*tfm) + alg->cra_ctxsize;
-
- crypto_exit_ops(tfm);
- crypto_alg_put(alg);
- memset(tfm, 0, size);
- kfree(tfm);
-}
-
-int crypto_register_alg(struct crypto_alg *alg)
-{
- int ret = 0;
- struct crypto_alg *q;
-
- down_write(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, alg->cra_name))) {
- ret = -EEXIST;
- goto out;
- }
- }
-
- list_add_tail(&alg->cra_list, &crypto_alg_list);
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_unregister_alg(struct crypto_alg *alg)
-{
- int ret = -ENOENT;
- struct crypto_alg *q;
-
- BUG_ON(!alg->cra_module);
-
- down_write(&crypto_alg_sem);
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (alg == q) {
- list_del(&alg->cra_list);
- ret = 0;
- goto out;
- }
- }
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_alg_available(const char *name, u32 flags)
-{
- int ret = 0;
- struct crypto_alg *alg = crypto_alg_mod_lookup(name);
-
- if (alg) {
- crypto_alg_put(alg);
- ret = 1;
- }
-
- return ret;
-}
-
-static int __init init_crypto(void)
-{
- printk(KERN_INFO "Initializing Cryptographic API\n");
- crypto_init_proc();
- return 0;
-}
-
-__initcall(init_crypto);
-
-/*
-EXPORT_SYMBOL_GPL(crypto_register_alg);
-EXPORT_SYMBOL_GPL(crypto_unregister_alg);
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-EXPORT_SYMBOL_GPL(crypto_free_tfm);
-EXPORT_SYMBOL_GPL(crypto_alg_available);
-*/
-
-EXPORT_SYMBOL_NOVERS(crypto_register_alg);
-EXPORT_SYMBOL_NOVERS(crypto_unregister_alg);
-EXPORT_SYMBOL_NOVERS(crypto_alloc_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_free_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_alg_available);
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index ef8eb6c7ee4..4277d0304b7 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -551,7 +551,7 @@ void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf)
ibss_wlan = r8712_find_network(
&pmlmepriv->scanned_queue,
pnetwork->MacAddress);
- if (!ibss_wlan) {
+ if (ibss_wlan) {
memcpy(ibss_wlan->network.IEs,
pnetwork->IEs, 8);
goto exit;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056..5385da2e9cd 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index d9cee6d0b12..2b9f785954d 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -934,34 +934,4 @@ struct usb_driver rts51x_driver = {
.soft_unbind = 1,
};
-static int __init rts51x_init(void)
-{
- int retval;
-
- printk(KERN_INFO "Initializing %s USB card reader driver...\n",
- RTS51X_NAME);
-
- /* register the driver, return usb_register return code if error */
- retval = usb_register(&rts51x_driver);
- if (retval == 0) {
- printk(KERN_INFO
- "Realtek %s USB card reader support registered.\n",
- RTS51X_NAME);
- }
- return retval;
-}
-
-static void __exit rts51x_exit(void)
-{
- RTS51X_DEBUGP("rts51x_exit() called\n");
-
- /* Deregister the driver
- * This will cause disconnect() to be called for each
- * attached unit
- */
- RTS51X_DEBUGP("-- calling usb_deregister()\n");
- usb_deregister(&rts51x_driver);
-}
-
-module_init(rts51x_init);
-module_exit(rts51x_exit);
+module_usb_driver(rts51x_driver);
diff --git a/drivers/staging/rts5139/rts51x.h b/drivers/staging/rts5139/rts51x.h
index 9415d5c0550..b2c58390bfc 100644
--- a/drivers/staging/rts5139/rts51x.h
+++ b/drivers/staging/rts5139/rts51x.h
@@ -34,7 +34,6 @@
#include <linux/mutex.h>
#include <linux/cdrom.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index f7aa87f7f1a..8464c4836d5 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -28,7 +28,6 @@
#define __RTS51X_TRANSPORT_H
#include <linux/kernel.h>
-#include <linux/version.h>
#include "rts51x.h"
#include "rts51x_chip.h"
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4d..a7feb3e328a 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
@@ -1021,6 +1019,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 8ac3faea2d2..6b3d156d414 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -1235,7 +1235,7 @@ static void sep_build_lli_table(struct sep_device *sep,
/* Counter of lli array entry */
u32 array_counter;
- /* Init currrent table data size and lli array entry counter */
+ /* Init current table data size and lli array entry counter */
curr_table_data_size = 0;
array_counter = 0;
*num_table_entries_ptr = 1;
@@ -2120,6 +2120,8 @@ static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
}
}
if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
if (is_kva == true) {
memcpy(dcb_table_ptr->tail_data,
(void *)(app_in_address + data_in_size -
diff --git a/drivers/staging/serial/68360serial.c b/drivers/staging/serial/68360serial.c
index 0a3e8787ed5..daf0b1d0dc2 100644
--- a/drivers/staging/serial/68360serial.c
+++ b/drivers/staging/serial/68360serial.c
@@ -2771,8 +2771,8 @@ static int __init rs_360_init(void)
*/
/* cpm_install_handler(IRQ_MACHSPEC | state->irq, rs_360_interrupt, info); */
/*request_irq(IRQ_MACHSPEC | state->irq, rs_360_interrupt, */
- request_irq(state->irq, rs_360_interrupt,
- IRQ_FLG_LOCK, "ttyS", (void *)info);
+ request_irq(state->irq, rs_360_interrupt, 0, "ttyS",
+ (void *)info);
/* Set up the baud rate generator.
*/
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 39dbf339a4f..ae0035f327e 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1024,9 +1024,9 @@ failed_free:
/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
- {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(0x126f, 0x710), },
+ { PCI_DEVICE(0x126f, 0x712), },
+ { PCI_DEVICE(0x126f, 0x720), },
{0,}
};
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 07a7f543259..2093896c546 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -265,12 +265,11 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
unsigned long flags;
spk_lock(flags);
- in_buff = kmalloc(count + 1, GFP_ATOMIC);
+ in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spk_unlock(flags);
return -ENOMEM;
}
- memcpy(in_buff, buf, count + 1);
if (strchr("dDrR", *in_buff)) {
set_key_info(key_defaults, key_buf);
pr_info("keymap set to default values\n");
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 8be56045897..c7b03f0ef2d 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2268,8 +2268,6 @@ static int __init speakup_init(void)
set_mask_bits(0, i, 2);
set_key_info(key_defaults, key_buf);
- if (quiet_boot)
- spk_shut_up |= 0x01;
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
@@ -2292,6 +2290,9 @@ static int __init speakup_init(void)
goto error_kobjects;
}
+ if (quiet_boot)
+ spk_shut_up |= 0x01;
+
err = speakup_kobj_init();
if (err)
goto error_kobjects;
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
deleted file mode 100644
index 4fc20648483..00000000000
--- a/drivers/staging/spectra/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
-
-menuconfig SPECTRA
- tristate "Denali Spectra Flash Translation Layer"
- depends on BLOCK
- depends on X86_MRST
- default n
- ---help---
- Enable the FTL pseudo-filesystem used with the NAND Flash
- controller on Intel Moorestown Platform to pretend to be a disk.
-
-choice
- prompt "Compile for"
- depends on SPECTRA
- default SPECTRA_MRST_HW
-
-config SPECTRA_MRST_HW
- bool "Moorestown hardware mode"
- help
- Driver communicates with the Moorestown hardware's register interface.
- in DMA mode.
-
-config SPECTRA_MTD
- bool "Linux MTD mode"
- depends on MTD
- help
- Driver communicates with the kernel MTD subsystem instead of its own
- built-in hardware driver.
-
-config SPECTRA_EMU
- bool "RAM emulator testing"
- help
- Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
-
-endchoice
-
-config SPECTRA_MRST_HW_DMA
- bool
- default n
- depends on SPECTRA_MRST_HW
- help
- Use DMA for native hardware interface.
diff --git a/drivers/staging/spectra/Makefile b/drivers/staging/spectra/Makefile
deleted file mode 100644
index f777dfba05a..00000000000
--- a/drivers/staging/spectra/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile of Intel Moorestown NAND controller driver
-#
-
-obj-$(CONFIG_SPECTRA) += spectra.o
-spectra-y := ffsport.o flash.o lld.o
-spectra-$(CONFIG_SPECTRA_MRST_HW) += lld_nand.o
-spectra-$(CONFIG_SPECTRA_MRST_HW_DMA) += lld_cdma.o
-spectra-$(CONFIG_SPECTRA_EMU) += lld_emu.o
-spectra-$(CONFIG_SPECTRA_MTD) += lld_mtd.o
-
diff --git a/drivers/staging/spectra/README b/drivers/staging/spectra/README
deleted file mode 100644
index ecba559b899..00000000000
--- a/drivers/staging/spectra/README
+++ /dev/null
@@ -1,29 +0,0 @@
-This is a driver for NAND controller of Intel Moorestown platform.
-
-This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
-It includes three layer:
- block layer interface - file ffsport.c
- Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
- Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
-
-This driver can be build as modules or build-in.
-
-Dependency:
-This driver has dependency on IA Firmware of Intel Moorestown platform.
-It need the IA Firmware to create the block table for the first time.
-And to validate this driver code without IA Firmware, you can change the
-macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
-driver will erase the whole nand flash and create a new block table.
-
-TODO:
- - Enable Command DMA feature support
- - lower the memory footprint
- - Remove most of the unnecessary global variables
- - Change all the upcase variable / functions name to lowercase
- - Some other misc bugs
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
-
-And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
-
diff --git a/drivers/staging/spectra/ffsdefs.h b/drivers/staging/spectra/ffsdefs.h
deleted file mode 100644
index a9e9cd233d2..00000000000
--- a/drivers/staging/spectra/ffsdefs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSDEFS_
-#define _FFSDEFS_
-
-#define CLEAR 0 /*use this to clear a field instead of "fail"*/
-#define SET 1 /*use this to set a field instead of "pass"*/
-#define FAIL 1 /*failed flag*/
-#define PASS 0 /*success flag*/
-#define ERR -1 /*error flag*/
-
-#define ERASE_CMD 10
-#define WRITE_MAIN_CMD 11
-#define READ_MAIN_CMD 12
-#define WRITE_SPARE_CMD 13
-#define READ_SPARE_CMD 14
-#define WRITE_MAIN_SPARE_CMD 15
-#define READ_MAIN_SPARE_CMD 16
-#define MEMCOPY_CMD 17
-#define DUMMY_CMD 99
-
-#define EVENT_PASS 0x00
-#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
-#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
-#define EVENT_TIME_OUT 0x03
-#define EVENT_PROGRAM_FAILURE 0x04
-#define EVENT_ERASE_FAILURE 0x05
-#define EVENT_MEMCOPY_FAILURE 0x06
-#define EVENT_FAIL 0x07
-
-#define EVENT_NONE 0x22
-#define EVENT_DMA_CMD_COMP 0x77
-#define EVENT_ECC_TRANSACTION_DONE 0x88
-#define EVENT_DMA_CMD_FAIL 0x99
-
-#define CMD_PASS 0
-#define CMD_FAIL 1
-#define CMD_ABORT 2
-#define CMD_NOT_DONE 3
-
-#endif /* _FFSDEFS_ */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
deleted file mode 100644
index 86d556d6cf9..00000000000
--- a/drivers/staging/spectra/ffsport.c
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "ffsport.h"
-#include "flash.h"
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/async.h>
-
-/**** Helper functions used for Div, Remainder operation on u64 ****/
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_Calc_Used_Bits
-* Inputs: Power of 2 number
-* Outputs: Number of Used Bits
-* 0, if the argument is 0
-* Description: Calculate the number of bits used by a given power of 2 number
-* Number can be up to 32 bit
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_Calc_Used_Bits(u32 n)
-{
- int tot_bits = 0;
-
- if (n >= 1 << 16) {
- n >>= 16;
- tot_bits += 16;
- }
-
- if (n >= 1 << 8) {
- n >>= 8;
- tot_bits += 8;
- }
-
- if (n >= 1 << 4) {
- n >>= 4;
- tot_bits += 4;
- }
-
- if (n >= 1 << 2) {
- n >>= 2;
- tot_bits += 2;
- }
-
- if (n >= 1 << 1)
- tot_bits += 1;
-
- return ((n == 0) ? (0) : tot_bits);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Div
-* Inputs: Number of u64
-* A power of 2 number as Division
-* Outputs: Quotient of the Divisor operation
-* Description: It divides the address by divisor by using bit shift operation
-* (essentially without explicitely using "/").
-* Divisor is a power of 2 number and Divided is of u64
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Div(u64 addr, u32 divisor)
-{
- return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Remainder
-* Inputs: Number of u64
-* Divisor Type (1 -PageAddress, 2- BlockAddress)
-* Outputs: Remainder of the Division operation
-* Description: It calculates the remainder of a number (of u64) by
-* divisor(power of 2 number ) by using bit shifting and multiply
-* operation(essentially without explicitely using "/").
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
-{
- u64 result = 0;
-
- if (divisor_type == 1) { /* Remainder -- Page */
- result = (addr >> DeviceInfo.nBitsInPageDataSize);
- result = result * DeviceInfo.wPageDataSize;
- } else if (divisor_type == 2) { /* Remainder -- Block */
- result = (addr >> DeviceInfo.nBitsInBlockDataSize);
- result = result * DeviceInfo.wBlockDataSize;
- }
-
- result = addr - result;
-
- return result;
-}
-
-#define NUM_DEVICES 1
-#define PARTITIONS 8
-
-#define GLOB_SBD_NAME "nd"
-#define GLOB_SBD_IRQ_NUM (29)
-
-#define GLOB_SBD_IOCTL_GC (0x7701)
-#define GLOB_SBD_IOCTL_WL (0x7702)
-#define GLOB_SBD_IOCTL_FORMAT (0x7703)
-#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
-#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
-#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
-#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
-#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
-#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
-#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
-
-static int reserved_mb = 0;
-module_param(reserved_mb, int, 0);
-MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
-
-int nand_debug_level;
-module_param(nand_debug_level, int, 0644);
-MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
-
-MODULE_LICENSE("GPL");
-
-struct spectra_nand_dev {
- struct pci_dev *dev;
- u64 size;
- u16 users;
- spinlock_t qlock;
- void __iomem *ioaddr; /* Mapped address */
- struct request_queue *queue;
- struct task_struct *thread;
- struct gendisk *gd;
- u8 *tmp_buf;
-};
-
-
-static int GLOB_SBD_majornum;
-
-static char *GLOB_version = GLOB_VERSION;
-
-static struct spectra_nand_dev nand_device[NUM_DEVICES];
-
-static struct mutex spectra_lock;
-
-static int res_blks_os = 1;
-
-struct spectra_indentfy_dev_tag IdentifyDeviceData;
-
-static int force_flush_cache(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (ERR == GLOB_FTL_Flush_Cache()) {
- printk(KERN_ERR "Fail to Flush FTL Cache!\n");
- return -EFAULT;
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-}
-
-struct ioctl_rw_page_info {
- u8 *data;
- unsigned int page;
-};
-
-static int ioctl_read_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!buf) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Read(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- if (copy_to_user((void __user *)info.data, buf,
- IdentifyDeviceData.PageDataSize)) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to copy user data\n");
- kfree(buf);
- return -EFAULT;
- }
-
- kfree(buf);
- return result;
-}
-
-static int ioctl_write_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = memdup_user((void __user *)info.data,
- IdentifyDeviceData.PageDataSize);
- if (IS_ERR(buf)) {
- printk(KERN_ERR "ioctl_write_page_data: "
- "failed to copy user data\n");
- return PTR_ERR(buf);
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Write(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- kfree(buf);
- return result;
-}
-
-/* Return how many blocks should be reserved for bad block replacement */
-static int get_res_blk_num_bad_blk(void)
-{
- return IdentifyDeviceData.wDataBlockNum / 10;
-}
-
-/* Return how many blocks should be reserved for OS image */
-static int get_res_blk_num_os(void)
-{
- u32 res_blks, blk_size;
-
- blk_size = IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock;
-
- res_blks = (reserved_mb * 1024 * 1024) / blk_size;
-
- if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
- res_blks = 1; /* Reserved 1 block for block table */
-
- return res_blks;
-}
-
-/* Transfer a full request. */
-static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
-{
- u64 start_addr, addr;
- u32 logical_start_sect, hd_start_sect;
- u32 nsect, hd_sects;
- u32 rsect, tsect = 0;
- char *buf;
- u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
-
- start_addr = (u64)(blk_rq_pos(req)) << 9;
- /* Add a big enough offset to prevent the OS Image from
- * being accessed or damaged by file system */
- start_addr += IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- res_blks_os;
-
- if (req->cmd_type & REQ_FLUSH) {
- if (force_flush_cache()) /* Fail to flush cache */
- return -EIO;
- else
- return 0;
- }
-
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
- printk(KERN_ERR "Spectra error: request over the NAND "
- "capacity!sector %d, current_nr_sectors %d, "
- "while capacity is %d\n",
- (int)blk_rq_pos(req),
- blk_rq_cur_sectors(req),
- (int)get_capacity(tr->gd));
- return -EIO;
- }
-
- logical_start_sect = start_addr >> 9;
- hd_start_sect = logical_start_sect / ratio;
- rsect = logical_start_sect - hd_start_sect * ratio;
-
- addr = (u64)hd_start_sect * ratio * 512;
- buf = req->buffer;
- nsect = blk_rq_cur_sectors(req);
-
- if (rsect)
- tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
-
- switch (rq_data_dir(req)) {
- case READ:
- /* Read the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Read the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Read(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Read the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- case WRITE:
- /* Write the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Write the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Write(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Write the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- default:
- printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return -EIO;
- }
-}
-
-/* This function is copied from drivers/mtd/mtd_blkdevs.c */
-static int spectra_trans_thread(void *arg)
-{
- struct spectra_nand_dev *tr = arg;
- struct request_queue *rq = tr->queue;
- struct request *req = NULL;
-
- /* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC;
-
- spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
- int res;
-
- if (!req) {
- req = blk_fetch_request(rq);
- if (!req) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
- }
- }
-
- spin_unlock_irq(rq->queue_lock);
-
- mutex_lock(&spectra_lock);
- res = do_transfer(tr, req);
- mutex_unlock(&spectra_lock);
-
- spin_lock_irq(rq->queue_lock);
-
- if (!__blk_end_request_cur(req, res))
- req = NULL;
- }
-
- if (req)
- __blk_end_request_all(req, -EIO);
-
- spin_unlock_irq(rq->queue_lock);
-
- return 0;
-}
-
-
-/* Request function that "handles clustering". */
-static void GLOB_SBD_request(struct request_queue *rq)
-{
- struct spectra_nand_dev *pdev = rq->queuedata;
- wake_up_process(pdev->thread);
-}
-
-static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
-
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- return 0;
-}
-
-static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- return 0;
-}
-
-static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- geo->heads = 4;
- geo->sectors = 16;
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "heads: %d, sectors: %d, cylinders: %d\n",
- geo->heads, geo->sectors, geo->cylinders);
-
- return 0;
-}
-
-int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- switch (cmd) {
- case GLOB_SBD_IOCTL_GC:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Garbage Collection "
- "being performed\n");
- if (PASS != GLOB_FTL_Garbage_Collection())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WL:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Static Wear Leveling "
- "being performed\n");
- if (PASS != GLOB_FTL_Wear_Leveling())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FORMAT:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
- "being performed\n");
- if (PASS != GLOB_FTL_Flash_Format())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FLUSH_CACHE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
- "being performed\n");
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
- return ret;
-
- case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy block table\n");
- if (copy_to_user((void __user *)arg,
- get_blk_table_start_addr(),
- get_blk_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy wear leveling table\n");
- if (copy_to_user((void __user *)arg,
- get_wear_leveling_table_start_addr(),
- get_wear_leveling_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_GET_NAND_INFO:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Get NAND info\n");
- if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
- sizeof(IdentifyDeviceData)))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WRITE_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Write one page data\n");
- return ioctl_write_page_data(arg);
-
- case GLOB_SBD_IOCTL_READ_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Read one page data\n");
- return ioctl_read_page_data(arg);
- }
-
- return -ENOTTY;
-}
-
-static DEFINE_MUTEX(ffsport_mutex);
-
-int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ffsport_mutex);
- ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ffsport_mutex);
-
- return ret;
-}
-
-static struct block_device_operations GLOB_SBD_ops = {
- .owner = THIS_MODULE,
- .open = GLOB_SBD_open,
- .release = GLOB_SBD_release,
- .ioctl = GLOB_SBD_unlocked_ioctl,
- .getgeo = GLOB_SBD_getgeo,
-};
-
-static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
-{
- int res_blks;
- u32 sects;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- memset(dev, 0, sizeof(struct spectra_nand_dev));
-
- nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
- "for OS image, %d blocks for bad block replacement.\n",
- get_res_blk_num_os(),
- get_res_blk_num_bad_blk());
-
- res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
-
- dev->size = (u64)IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- (IdentifyDeviceData.wDataBlockNum - res_blks);
-
- res_blks_os = get_res_blk_num_os();
-
- spin_lock_init(&dev->qlock);
-
- dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!dev->tmp_buf) {
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
- __FILE__, __LINE__);
- goto out_vfree;
- }
-
- dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
- if (dev->queue == NULL) {
- printk(KERN_ERR
- "Spectra: Request queue could not be initialized."
- " Aborting\n ");
- goto out_vfree;
- }
- dev->queue->queuedata = dev;
-
- /* As Linux block layer doesn't support >4KB hardware sector, */
- /* Here we force report 512 byte hardware sector size to Kernel */
- blk_queue_logical_block_size(dev->queue, 512);
-
- blk_queue_flush(dev->queue, REQ_FLUSH);
-
- dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
- if (IS_ERR(dev->thread)) {
- blk_cleanup_queue(dev->queue);
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
- return PTR_ERR(dev->thread);
- }
-
- dev->gd = alloc_disk(PARTITIONS);
- if (!dev->gd) {
- printk(KERN_ERR
- "Spectra: Could not allocate disk. Aborting \n ");
- goto out_vfree;
- }
- dev->gd->major = GLOB_SBD_majornum;
- dev->gd->first_minor = which * PARTITIONS;
- dev->gd->fops = &GLOB_SBD_ops;
- dev->gd->queue = dev->queue;
- dev->gd->private_data = dev;
- snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
-
- sects = dev->size >> 9;
- nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
- set_capacity(dev->gd, sects);
-
- add_disk(dev->gd);
-
- return 0;
-out_vfree:
- return -ENOMEM;
-}
-
-/*
-static ssize_t show_nand_block_num(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.wDataBlockNum);
-}
-
-static ssize_t show_nand_pages_per_block(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PagesPerBlock);
-}
-
-static ssize_t show_nand_page_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PageDataSize);
-}
-
-static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
-static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
-static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
-
-static void create_sysfs_entry(struct device *dev)
-{
- if (device_create_file(dev, &dev_attr_nand_block_num))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_block_num.\n");
- if (device_create_file(dev, &dev_attr_nand_pages_per_block))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_pages_per_block.\n");
- if (device_create_file(dev, &dev_attr_nand_page_size))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_page_size.\n");
-}
-*/
-
-static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
-{
- int i;
-
- /* create_sysfs_entry(&dev->dev); */
-
- if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
- printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
- "Aborting\n");
- return;
- } else {
- nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
- "Num blocks=%d, pagesperblock=%d, "
- "pagedatasize=%d, ECCBytesPerSector=%d\n",
- (int)IdentifyDeviceData.NumBlocks,
- (int)IdentifyDeviceData.PagesPerBlock,
- (int)IdentifyDeviceData.PageDataSize,
- (int)IdentifyDeviceData.wECCBytesPerSector);
- }
-
- printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
- if (GLOB_FTL_Init() != PASS) {
- printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
- "Aborting\n");
- goto out_ftl_flash_register;
- }
- printk(KERN_ALERT "Spectra: block table has been found.\n");
-
- GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
- if (GLOB_SBD_majornum <= 0) {
- printk(KERN_ERR "Unable to get the major %d for Spectra",
- GLOB_SBD_majornum);
- goto out_ftl_flash_register;
- }
-
- for (i = 0; i < NUM_DEVICES; i++)
- if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
- goto out_blk_register;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra: module loaded with major number %d\n",
- GLOB_SBD_majornum);
-
- return;
-
-out_blk_register:
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-out_ftl_flash_register:
- GLOB_FTL_Cache_Release();
- printk(KERN_ERR "Spectra: Module load failed.\n");
-}
-
-int register_spectra_ftl()
-{
- async_schedule(register_spectra_ftl_async, NULL);
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_spectra_ftl);
-
-static int GLOB_SBD_init(void)
-{
- /* Set debug output level (0~3) here. 3 is most verbose */
- printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-
- mutex_init(&spectra_lock);
-
- if (PASS != GLOB_FTL_Flash_Init()) {
- printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
- "Aborting\n");
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit GLOB_SBD_exit(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < NUM_DEVICES; i++) {
- struct spectra_nand_dev *dev = &nand_device[i];
- if (dev->gd) {
- del_gendisk(dev->gd);
- put_disk(dev->gd);
- }
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
- kfree(dev->tmp_buf);
- }
-
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-
- mutex_lock(&spectra_lock);
- force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- GLOB_FTL_Cache_Release();
-
- GLOB_FTL_Flash_Release();
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra FTL module (major number %d) unloaded.\n",
- GLOB_SBD_majornum);
-}
-
-module_init(GLOB_SBD_init);
-module_exit(GLOB_SBD_exit);
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
deleted file mode 100644
index 85c0750612f..00000000000
--- a/drivers/staging/spectra/ffsport.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSPORT_
-#define _FFSPORT_
-
-#include "ffsdefs.h"
-
-#if defined __GNUC__
-#define PACKED
-#define PACKED_GNU __attribute__ ((packed))
-#define UNALIGNED
-#endif
-
-#include <linux/semaphore.h>
-#include <linux/string.h> /* for strcpy(), stricmp(), etc */
-#include <linux/mm.h> /* for kmalloc(), kfree() */
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h> /* printk() */
-#include <linux/fs.h> /* everything... */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/hdreg.h>
-#include <linux/pci.h>
-#include "flash.h"
-
-#define VERBOSE 1
-
-#define NAND_DBG_WARN 1
-#define NAND_DBG_DEBUG 2
-#define NAND_DBG_TRACE 3
-
-extern int nand_debug_level;
-
-#ifdef VERBOSE
-#define nand_dbg_print(level, args...) \
- do { \
- if (level <= nand_debug_level) \
- printk(KERN_ALERT args); \
- } while (0)
-#else
-#define nand_dbg_print(level, args...)
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
- (u16)((u16)(w) >> 8))
-
-#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
- (((u32)(dw) << 8) & 0x00ff0000) | \
- (((u32)(dw) >> 8) & 0x0000ff00) | \
- ((u32)(dw) >> 24))
-#else
-#define INVERTUINT16(w) w
-#define INVERTUINT32(dw) dw
-#endif
-
-extern int GLOB_Calc_Used_Bits(u32 n);
-extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
-extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
-extern int register_spectra_ftl(void);
-
-#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
deleted file mode 100644
index aead358e5c2..00000000000
--- a/drivers/staging/spectra/flash.c
+++ /dev/null
@@ -1,4305 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#endif
-
-#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
-#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
- DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
-
-#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
-
-#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
-
-#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
-
-#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no,
- int lineno, char *filename)
-{
- if (chnl >= limit)
- printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
- "at %s:%d. Other info:%d. Aborting...\n",
- chnl, limit, filename, lineno, no);
-}
-/* static int globalmemsize; */
-#endif
-
-static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
-static int FTL_Cache_Read(u64 dwPageAddr);
-static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
- u16 cache_blk);
-static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
- u8 cache_blk, u16 flag);
-static int FTL_Cache_Write(void);
-static void FTL_Calculate_LRU(void);
-static u32 FTL_Get_Block_Index(u32 wBlockNum);
-
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page);
-static int FTL_Read_Block_Table(void);
-static int FTL_Write_Block_Table(int wForce);
-static int FTL_Write_Block_Table_Data(void);
-static int FTL_Check_Block_Table(int wOldTable);
-static int FTL_Static_Wear_Leveling(void);
-static u32 FTL_Replace_Block_Table(void);
-static int FTL_Write_IN_Progress_Block_Table_Page(void);
-
-static u32 FTL_Get_Page_Num(u64 length);
-static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
-
-static u32 FTL_Replace_OneBlock(u32 wBlockNum,
- u32 wReplaceNum);
-static u32 FTL_Replace_LWBlock(u32 wBlockNum,
- int *pGarbageCollect);
-static u32 FTL_Replace_MWBlock(void);
-static int FTL_Replace_Block(u64 blk_addr);
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-
-struct device_info_tag DeviceInfo;
-struct flash_cache_tag Cache;
-static struct spectra_l2_cache_info cache_l2;
-
-static u8 *cache_l2_page_buf;
-static u8 *cache_l2_blk_buf;
-
-u8 *g_pBlockTable;
-u8 *g_pWearCounter;
-u16 *g_pReadCounter;
-u32 *g_pBTBlocks;
-static u16 g_wBlockTableOffset;
-static u32 g_wBlockTableIndex;
-static u8 g_cBlockTableStatus;
-
-static u8 *g_pTempBuf;
-static u8 *flag_check_blk_table;
-static u8 *tmp_buf_search_bt_in_block;
-static u8 *spare_buf_search_bt_in_block;
-static u8 *spare_buf_bt_search_bt_in_block;
-static u8 *tmp_buf1_read_blk_table;
-static u8 *tmp_buf2_read_blk_table;
-static u8 *flags_static_wear_leveling;
-static u8 *tmp_buf_write_blk_table_data;
-static u8 *tmp_buf_read_disturbance;
-
-u8 *buf_read_page_main_spare;
-u8 *buf_write_page_main_spare;
-u8 *buf_read_page_spare;
-u8 *buf_get_bad_block;
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
-struct flash_cache_tag cache_start_copy;
-#endif
-
-int g_wNumFreeBlocks;
-u8 g_SBDCmdIndex;
-
-static u8 *g_pIPF;
-static u8 bt_flag = FIRST_BT_ID;
-static u8 bt_block_changed;
-
-static u16 cache_block_to_write;
-static u8 last_erased = FIRST_BT_ID;
-
-static u8 GC_Called;
-static u8 BT_GC_Called;
-
-#if CMD_DMA
-#define COPY_BACK_BUF_NUM 10
-
-static u8 ftl_cmd_cnt; /* Init value is 0 */
-u8 *g_pBTDelta;
-u8 *g_pBTDelta_Free;
-u8 *g_pBTStartingCopy;
-u8 *g_pWearCounterCopy;
-u16 *g_pReadCounterCopy;
-u8 *g_pBlockTableCopies;
-u8 *g_pNextBlockTable;
-static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
-static int cp_back_buf_idx;
-
-static u8 *g_temp_buf;
-
-#pragma pack(push, 1)
-#pragma pack(1)
-struct BTableChangesDelta {
- u8 ftl_cmd_cnt;
- u8 ValidFields;
- u16 g_wBlockTableOffset;
- u32 g_wBlockTableIndex;
- u32 BT_Index;
- u32 BT_Entry_Value;
- u32 WC_Index;
- u8 WC_Entry_Value;
- u32 RC_Index;
- u16 RC_Entry_Value;
-};
-
-#pragma pack(pop)
-
-struct BTableChangesDelta *p_BTableChangesDelta;
-#endif
-
-
-#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
-#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
-
-#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u32))
-#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8))
-#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#if SUPPORT_LARGE_BLOCKNUM
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8) * 3)
-#else
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#endif
-#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
- FTL_Get_WearCounter_Table_Mem_Size_Bytes
-#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
- FTL_Get_ReadCounter_Table_Mem_Size_Bytes
-
-static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
-{
- u32 byte_num;
-
- if (DeviceInfo.MLCDevice) {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8) +
- DeviceInfo.wDataBlockNum * sizeof(u16);
- } else {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8);
- }
-
- byte_num += 4 * sizeof(u8);
-
- return byte_num;
-}
-
-static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
-{
- return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
-}
-
-static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
- u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0;
- (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
- wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
- >> (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
-#else
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
- >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- wBytesCopied += wBytes;
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
- flashBuf[wBytes + wBytesCopied] =
- (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
- (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
- }
-
- return wBytesCopied + wBytes;
-}
-
-static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
- u32 sizeToTx, u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0; (wBytes < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- if (!((wBytes + sizeTxed) % 3))
- pbt[(wBytes + sizeTxed) / 3] = 0;
- pbt[(wBytes + sizeTxed) / 3] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
-#else
- if (!((wBytes + sizeTxed) % 2))
- pbt[(wBytes + sizeTxed) / 2] = 0;
- pbt[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
- 0 : 8));
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- wBytesCopied += wBytes;
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
- if (((wBytes + sizeTxed) % 2))
- g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
- g_pReadCounter[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] <<
- (((wBytes + sizeTxed) % 2) ? 0 : 8));
- }
- }
-
- return wBytesCopied+wBytes;
-}
-
-static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
-{
- int i;
-
- for (i = 0; i < BTSIG_BYTES; i++)
- buf[BTSIG_OFFSET + i] =
- ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
- (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
-
- return PASS;
-}
-
-static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
-{
- static u8 tag[BTSIG_BYTES >> 1];
- int i, j, k, tagi, tagtemp, status;
-
- *tagarray = (u8 *)tag;
- tagi = 0;
-
- for (i = 0; i < (BTSIG_BYTES - 1); i++) {
- for (j = i + 1; (j < BTSIG_BYTES) &&
- (tagi < (BTSIG_BYTES >> 1)); j++) {
- tagtemp = buf[BTSIG_OFFSET + j] -
- buf[BTSIG_OFFSET + i];
- if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
- tagtemp = (buf[BTSIG_OFFSET + i] +
- (1 + LAST_BT_ID - FIRST_BT_ID) -
- (i * BTSIG_DELTA)) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- status = FAIL;
- for (k = 0; k < tagi; k++) {
- if (tagtemp == tag[k])
- status = PASS;
- }
-
- if (status == FAIL) {
- tag[tagi++] = tagtemp;
- i = (j == (i + 1)) ? i + 1 : i;
- j = (j == (i + 1)) ? i + 1 : i;
- }
- }
- }
- }
-
- return tagi;
-}
-
-
-static int FTL_Execute_SPL_Recovery(void)
-{
- u32 j, block, blks;
- u32 *pbt = (u32 *)g_pBlockTable;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
- for (j = 0; j <= blks; j++) {
- block = (pbt[j]);
- if (((block & BAD_BLOCK) != BAD_BLOCK) &&
- ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
- ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
- if (FAIL == ret) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)(block & ~BAD_BLOCK));
- MARK_BLOCK_AS_BAD(pbt[j]);
- }
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_IdentifyDevice
-* Inputs: pointer to identify data structure
-* Outputs: PASS / FAIL
-* Description: the identify data structure is filled in with
-* information for the block driver.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
- dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
- dev_data->PageDataSize = DeviceInfo.wPageDataSize;
- dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
- dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
-
- return PASS;
-}
-
-/* ..... */
-static int allocate_memory(void)
-{
- u32 block_table_size, page_size, block_size, mem_size;
- u32 total_bytes = 0;
- int i;
-#if CMD_DMA
- int j;
-#endif
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- page_size = DeviceInfo.wPageSize;
- block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- block_table_size = DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8) + sizeof(u16));
- block_table_size += (DeviceInfo.wPageDataSize -
- (block_table_size % DeviceInfo.wPageDataSize)) %
- DeviceInfo.wPageDataSize;
-
- /* Malloc memory for block tables */
- g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBlockTable)
- goto block_table_fail;
- total_bytes += block_table_size;
-
- g_pWearCounter = (u8 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounter = (u16 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory and init for cache items */
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- Cache.array[i].address = NAND_CACHE_INIT_ADDR;
- Cache.array[i].use_cnt = 0;
- Cache.array[i].changed = CLEAR;
- Cache.array[i].buf = kzalloc(Cache.cache_item_size,
- GFP_ATOMIC);
- if (!Cache.array[i].buf)
- goto cache_item_fail;
- total_bytes += Cache.cache_item_size;
- }
-
- /* Malloc memory for IPF */
- g_pIPF = kzalloc(page_size, GFP_ATOMIC);
- if (!g_pIPF)
- goto ipf_fail;
- total_bytes += page_size;
-
- /* Malloc memory for data merging during Level2 Cache flush */
- cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
- if (!cache_l2_page_buf)
- goto cache_l2_page_buf_fail;
- memset(cache_l2_page_buf, 0xff, page_size);
- total_bytes += page_size;
-
- cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!cache_l2_blk_buf)
- goto cache_l2_blk_buf_fail;
- memset(cache_l2_blk_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for temp buffer */
- g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC);
- if (!g_pTempBuf)
- goto Temp_buf_fail;
- total_bytes += Cache.cache_item_size;
-
- /* Malloc memory for block table blocks */
- mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
- g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTBlocks)
- goto bt_blocks_fail;
- memset(g_pBTBlocks, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Check_Block_Table */
- flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
- if (!flag_check_blk_table)
- goto flag_check_blk_table_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
- tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf_search_bt_in_block)
- goto tmp_buf_search_bt_in_block_fail;
- memset(tmp_buf_search_bt_in_block, 0xff, page_size);
- total_bytes += page_size;
-
- mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
- spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_search_bt_in_block)
- goto spare_buf_search_bt_in_block_fail;
- memset(spare_buf_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_bt_search_bt_in_block)
- goto spare_buf_bt_search_bt_in_block_fail;
- memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Block_Table */
- tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf1_read_blk_table)
- goto tmp_buf1_read_blk_table_fail;
- memset(tmp_buf1_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf2_read_blk_table)
- goto tmp_buf2_read_blk_table_fail;
- memset(tmp_buf2_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- /* Malloc memory for function FTL_Static_Wear_Leveling */
- flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
- GFP_ATOMIC);
- if (!flags_static_wear_leveling)
- goto flags_static_wear_leveling_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Write_Block_Table_Data */
- if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
- mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
- 2 * DeviceInfo.wPageSize;
- else
- mem_size = DeviceInfo.wPageSize;
- tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
- if (!tmp_buf_write_blk_table_data)
- goto tmp_buf_write_blk_table_data_fail;
- memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Disturbance */
- tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
- if (!tmp_buf_read_disturbance)
- goto tmp_buf_read_disturbance_fail;
- memset(tmp_buf_read_disturbance, 0xff, block_size);
- total_bytes += block_size;
-
- /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
- buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_read_page_main_spare)
- goto buf_read_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
- buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_write_page_main_spare)
- goto buf_write_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
- buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_read_page_spare)
- goto buf_read_page_spare_fail;
- memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
- /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
- buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_get_bad_block)
- goto buf_get_bad_block_fail;
- memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
-#if CMD_DMA
- g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!g_temp_buf)
- goto temp_buf_fail;
- memset(g_temp_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for copy of block table used in CDMA mode */
- g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBTStartingCopy)
- goto bt_starting_copy;
- total_bytes += block_table_size;
-
- g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory for block table copies */
- mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
- 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
- if (DeviceInfo.MLCDevice)
- mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
- g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBlockTableCopies)
- goto blk_table_copies_fail;
- total_bytes += mem_size;
- g_pNextBlockTable = g_pBlockTableCopies;
-
- /* Malloc memory for Block Table Delta */
- mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
- g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTDelta)
- goto bt_delta_fail;
- total_bytes += mem_size;
- g_pBTDelta_Free = g_pBTDelta;
-
- /* Malloc memory for Copy Back Buffers */
- for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
- cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC);
- if (!cp_back_buf_copies[j])
- goto cp_back_buf_copies_fail;
- total_bytes += block_size;
- }
- cp_back_buf_idx = 0;
-
- /* Malloc memory for pending commands list */
- mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
- info.pcmds = kzalloc(mem_size, GFP_KERNEL);
- if (!info.pcmds)
- goto pending_cmds_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for CDMA descripter table */
- mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
- info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.cdma_desc_buf)
- goto cdma_desc_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for Memcpy descripter table */
- mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
- info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.memcp_desc_buf)
- goto memcp_desc_buf_fail;
- total_bytes += mem_size;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN,
- "Total memory allocated in FTL layer: %d\n", total_bytes);
-
- return PASS;
-
-#if CMD_DMA
-memcp_desc_buf_fail:
- kfree(info.cdma_desc_buf);
-cdma_desc_buf_fail:
- kfree(info.pcmds);
-pending_cmds_buf_fail:
-cp_back_buf_copies_fail:
- j--;
- for (; j >= 0; j--)
- kfree(cp_back_buf_copies[j]);
- kfree(g_pBTDelta);
-bt_delta_fail:
- kfree(g_pBlockTableCopies);
-blk_table_copies_fail:
- kfree(g_pBTStartingCopy);
-bt_starting_copy:
- kfree(g_temp_buf);
-temp_buf_fail:
- kfree(buf_get_bad_block);
-#endif
-
-buf_get_bad_block_fail:
- kfree(buf_read_page_spare);
-buf_read_page_spare_fail:
- kfree(buf_write_page_main_spare);
-buf_write_page_main_spare_fail:
- kfree(buf_read_page_main_spare);
-buf_read_page_main_spare_fail:
- kfree(tmp_buf_read_disturbance);
-tmp_buf_read_disturbance_fail:
- kfree(tmp_buf_write_blk_table_data);
-tmp_buf_write_blk_table_data_fail:
- kfree(flags_static_wear_leveling);
-flags_static_wear_leveling_fail:
- kfree(tmp_buf2_read_blk_table);
-tmp_buf2_read_blk_table_fail:
- kfree(tmp_buf1_read_blk_table);
-tmp_buf1_read_blk_table_fail:
- kfree(spare_buf_bt_search_bt_in_block);
-spare_buf_bt_search_bt_in_block_fail:
- kfree(spare_buf_search_bt_in_block);
-spare_buf_search_bt_in_block_fail:
- kfree(tmp_buf_search_bt_in_block);
-tmp_buf_search_bt_in_block_fail:
- kfree(flag_check_blk_table);
-flag_check_blk_table_fail:
- kfree(g_pBTBlocks);
-bt_blocks_fail:
- kfree(g_pTempBuf);
-Temp_buf_fail:
- kfree(cache_l2_blk_buf);
-cache_l2_blk_buf_fail:
- kfree(cache_l2_page_buf);
-cache_l2_page_buf_fail:
- kfree(g_pIPF);
-ipf_fail:
-cache_item_fail:
- i--;
- for (; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-block_table_fail:
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
- __FILE__, __LINE__);
-
- return -ENOMEM;
-}
-
-/* .... */
-static int free_memory(void)
-{
- int i;
-
-#if CMD_DMA
- kfree(info.memcp_desc_buf);
- kfree(info.cdma_desc_buf);
- kfree(info.pcmds);
- for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
- kfree(cp_back_buf_copies[i]);
- kfree(g_pBTDelta);
- kfree(g_pBlockTableCopies);
- kfree(g_pBTStartingCopy);
- kfree(g_temp_buf);
- kfree(buf_get_bad_block);
-#endif
- kfree(buf_read_page_spare);
- kfree(buf_write_page_main_spare);
- kfree(buf_read_page_main_spare);
- kfree(tmp_buf_read_disturbance);
- kfree(tmp_buf_write_blk_table_data);
- kfree(flags_static_wear_leveling);
- kfree(tmp_buf2_read_blk_table);
- kfree(tmp_buf1_read_blk_table);
- kfree(spare_buf_bt_search_bt_in_block);
- kfree(spare_buf_search_bt_in_block);
- kfree(tmp_buf_search_bt_in_block);
- kfree(flag_check_blk_table);
- kfree(g_pBTBlocks);
- kfree(g_pTempBuf);
- kfree(g_pIPF);
- for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-
- return 0;
-}
-
-static void dump_cache_l2_table(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- int n;
-
- n = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
-/*
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE)
- nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
- }
-*/
- n++;
- }
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: allocates the memory for cache array,
-* important data structures
-* clears the cache array
-* reads the block table from flash into array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Cache.pages_per_item = 1;
- Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
-
- if (allocate_memory() != PASS)
- return FAIL;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_CHANS + MAX_DESCS));
-#endif
- ftl_cmd_cnt = 0;
-#endif
-
- if (FTL_Read_Block_Table() != PASS)
- return FAIL;
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- dump_cache_l2_table();
-
- return 0;
-}
-
-
-#if CMD_DMA
-#if 0
-static void save_blk_table_changes(u16 idx)
-{
- u8 ftl_cmd;
- u32 *pbt = (u32 *)g_pBTStartingCopy;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u16 id;
- u8 cache_blks;
-
- id = idx - MAX_CHANS;
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
-#endif
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- if (p_BTableChangesDelta->ValidFields == 0x01) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
- pbt[p_BTableChangesDelta->BT_Index] =
- p_BTableChangesDelta->BT_Entry_Value;
- debug_boundary_error(((
- p_BTableChangesDelta->BT_Index)),
- DeviceInfo.wDataBlockNum, 0);
- } else if (p_BTableChangesDelta->ValidFields == 0x03) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- g_wBlockTableIndex =
- p_BTableChangesDelta->g_wBlockTableIndex;
- } else if (p_BTableChangesDelta->ValidFields == 0x30) {
- g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
- p_BTableChangesDelta->WC_Entry_Value;
- } else if ((DeviceInfo.MLCDevice) &&
- (p_BTableChangesDelta->ValidFields == 0xC0)) {
- g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
- p_BTableChangesDelta->RC_Entry_Value;
- nand_dbg_print(NAND_DBG_DEBUG,
- "In event status setting read counter "
- "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
- ftl_cmd,
- p_BTableChangesDelta->RC_Entry_Value,
- (unsigned int)p_BTableChangesDelta->RC_Index);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "This should never occur \n");
- }
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-}
-
-static void discard_cmds(u16 n)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long k;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
- for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
- if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
- MARK_BLK_AS_DISCARD(pbt[k]);
- }
- }
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[n].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = n - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if (PendingCMD[n].CMD == MEMCOPY_CMD) {
- if ((cache_start_copy.array[cache_blks].buf <=
- PendingCMD[n].DataDestAddr) &&
- ((cache_start_copy.array[cache_blks].buf +
- Cache.cache_item_size) >
- PendingCMD[n].DataDestAddr)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt =
- 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- }
- } else {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
- }
-#endif
-}
-
-static void process_cmd_pass(int *first_failed_cmd, u16 idx)
-{
- if (0 == *first_failed_cmd)
- save_blk_table_changes(idx);
- else
- discard_cmds(idx);
-}
-
-static void process_cmd_fail_abort(int *first_failed_cmd,
- u16 idx, int event)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long i;
- int erase_fail, program_fail;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if (0 == *first_failed_cmd)
- *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occurred "
- "while executing %u Command %u accesing Block %u\n",
- (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
- PendingCMD[idx].CMD,
- (unsigned int)PendingCMD[idx].Block);
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = idx - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed = SET;
- } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt = 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- } else if (PendingCMD[idx].CMD == ERASE_CMD) {
- /* ? */
- } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
- /* ? */
- }
- }
-#endif
-
- erase_fail = (event == EVENT_ERASE_FAILURE) &&
- (PendingCMD[idx].CMD == ERASE_CMD);
-
- program_fail = (event == EVENT_PROGRAM_FAILURE) &&
- ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
-
- if (erase_fail || program_fail) {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (PendingCMD[idx].Block ==
- (pbt[i] & (~BAD_BLOCK)))
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-}
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- u8 ftl_cmd;
- int cmd_match = 0;
-
- if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
- cmd_match = 1;
-
- if (PendingCMD[idx].Status == CMD_PASS) {
- process_cmd_pass(first_failed_cmd, idx);
- } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
- (PendingCMD[idx].Status == CMD_ABORT)) {
- process_cmd_fail_abort(first_failed_cmd, idx, event);
- } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
- PendingCMD[idx].Tag) {
- nand_dbg_print(NAND_DBG_DEBUG,
- " Command no. %hu is not executed\n",
- (unsigned int)PendingCMD[idx].Tag);
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
- }
-}
-#endif
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- printk(KERN_ERR "temporary workaround function. "
- "Should not be called! \n");
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Event_Status
-* Inputs: none
-* Outputs: Event Code
-* Description: It is called by SBD after hardware interrupt signalling
-* completion of commands chain
-* It does following things
-* get event status from LLD
-* analyze command chain status
-* determine last command executed
-* analyze results
-* rebuild the block table in case of uncorrectable error
-* return event code
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Event_Status(int *first_failed_cmd)
-{
- int event_code = PASS;
- u16 i_P;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- *first_failed_cmd = 0;
-
- event_code = GLOB_LLD_Event_Status();
-
- switch (event_code) {
- case EVENT_PASS:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
- break;
- case EVENT_UNCORRECTABLE_DATA_ERROR:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
- break;
- case EVENT_PROGRAM_FAILURE:
- case EVENT_ERASE_FAILURE:
- nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
- "Event code: 0x%x\n", event_code);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta;
- for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
- i_P++)
- process_cmd(first_failed_cmd, i_P, event_code);
- memcpy(g_pBlockTable, g_pBTStartingCopy,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounter, g_pWearCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounter, g_pReadCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&Cache, (void *)&cache_start_copy,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Handling unexpected event code - 0x%x\n",
- event_code);
- event_code = ERR;
- break;
- }
-
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounterCopy, g_pWearCounter,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounterCopy, g_pReadCounter,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
- g_pBTDelta_Free = g_pBTDelta;
- ftl_cmd_cnt = 0;
- g_pNextBlockTable = g_pBlockTableCopies;
- cp_back_buf_idx = 0;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
-
- return event_code;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: glob_ftl_execute_cmds
-* Inputs: none
-* Outputs: none
-* Description: pass thru to LLD
-***************************************************************/
-u16 glob_ftl_execute_cmds(void)
-{
- nand_dbg_print(NAND_DBG_TRACE,
- "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
- (unsigned int)ftl_cmd_cnt);
- g_SBDCmdIndex = 0;
- return glob_lld_execute_cmds();
-}
-
-#endif
-
-#if !CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Read Immediate
-* Inputs: pointer to data
-* address of data
-* Outputs: PASS / FAIL
-* Description: Reads one page of data into RAM directly from flash without
-* using or disturbing cache.It is assumed this function is called
-* with CMD-DMA disabled.
-*****************************************************************/
-int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
-{
- int wResult = FAIL;
- u32 Block;
- u16 Page;
- u32 phy_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Block = BLK_FROM_ADDR(addr);
- Page = PAGE_FROM_ADDR(addr, Block);
-
- if (!IS_SPARE_BLOCK(Block))
- return FAIL;
-
- phy_blk = pbt[Block];
- wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
- >= MAX_READ_COUNTER)
- FTL_Read_Disturbance(phy_blk);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- return wResult;
-}
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-/*********************************************************************
-* Function: FTL_Invert_Block_Table
-* Inputs: none
-* Outputs: none
-* Description: Re-format the block table in ram based on BIG_ENDIAN and
-* LARGE_BLOCKNUM if necessary
-**********************************************************************/
-static void FTL_Invert_Block_Table(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#ifdef SUPPORT_LARGE_BLOCKNUM
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT32(pbt[i]);
- g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
- }
-#else
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT16(pbt[i]);
- g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
- }
-#endif
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is initialized
-* The flash device is reset
-* Perform a flash READ ID command to confirm that a
-* valid device is attached and active.
-* The DeviceInfo structure gets filled in
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Init(void)
-{
- int status = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- g_SBDCmdIndex = 0;
-
- status = GLOB_LLD_Flash_Init();
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is released
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return GLOB_LLD_Flash_Release();
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Cache_Release
-* Inputs: none
-* Outputs: none
-* Description: release all allocated memory in GLOB_FTL_Init
-* (allocated in GLOB_FTL_Init)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void GLOB_FTL_Cache_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- free_memory();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_If_Hit
-* Inputs: Page Address
-* Outputs: Block number/UNHIT BLOCK
-* Description: Determines if the addressed page is in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u16 FTL_Cache_If_Hit(u64 page_addr)
-{
- u16 item;
- u64 addr;
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- item = UNHIT_CACHE_ITEM;
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- addr = Cache.array[i].address;
- if ((page_addr >= addr) &&
- (page_addr < (addr + Cache.cache_item_size))) {
- item = i;
- break;
- }
- }
-
- return item;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Calculate_LRU
-* Inputs: None
-* Outputs: None
-* Description: Calculate the least recently block in a cache and record its
-* index in LRU field.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Calculate_LRU(void)
-{
- u16 i, bCurrentLRU, bTempCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bCurrentLRU = 0;
- bTempCount = MAX_WORD_VALUE;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (Cache.array[i].use_cnt < bTempCount) {
- bCurrentLRU = i;
- bTempCount = Cache.array[i].use_cnt;
- }
- }
-
- Cache.LRU = bCurrentLRU;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_Page
-* Inputs: pointer to read buffer, logical address and cache item number
-* Outputs: None
-* Description: Read the page from the cached block addressed by blocknumber
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
-{
- u8 *start_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = Cache.array[cache_item].buf;
- start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
- DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
-
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
- DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
-#endif
-
- if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_item].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_All
-* Inputs: pointer to read buffer,block address
-* Outputs: PASS=0 / FAIL =1
-* Description: It reads pages in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
-{
- int wResult = PASS;
- u32 Block;
- u32 lba;
- u16 Page;
- u16 PageCount;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- Block = BLK_FROM_ADDR(phy_addr);
- Page = PAGE_FROM_ADDR(phy_addr, Block);
- PageCount = Cache.pages_per_item;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Function: %s, Block: 0x%x\n",
- __FILE__, __LINE__, __func__, Block);
-
- lba = 0xffffffff;
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if ((pbt[i] & (~BAD_BLOCK)) == Block) {
- lba = i;
- if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
- IS_DISCARDED_BLOCK(i)) {
- /* Add by yunpeng -2008.12.3 */
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
- PageCount * DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memset(pData, 0xFF,
- PageCount * DeviceInfo.wPageDataSize);
-#endif
- return wResult;
- } else {
- continue; /* break ?? */
- }
- }
- }
-
- if (0xffffffff == lba)
- printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
-
-#if CMD_DMA
- wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
- PageCount, LLD_CMD_FLAG_MODE_CDMA);
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Counter modified in ftl_cmd_cnt %u"
- " Block %u Counter%u\n",
- ftl_cmd_cnt, (unsigned int)Block,
- g_pReadCounter[Block -
- DeviceInfo.wSpectraStartBlock]);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- Block - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
-
- ftl_cmd_cnt++;
-
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- } else {
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
- if (wResult == FAIL)
- return wResult;
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_All
-* Inputs: pointer to cache in sys memory
-* address of free block in flash
-* Outputs: PASS=0 / FAIL=1
-* Description: writes all the pages of the block in cache to flash
-*
-* NOTE:need to make sure this works ok when cache is limited
-* to a partial block. This is where copy-back would be
-* activated. This would require knowing which pages in the
-* cached block are clean/dirty.Right now we only know if
-* the whole block is clean/dirty.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
-{
- u16 wResult = PASS;
- u32 Block;
- u16 Page;
- u16 PageCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
- "on %d\n", cache_block_to_write,
- (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
-
- Block = BLK_FROM_ADDR(blk_addr);
- Page = PAGE_FROM_ADDR(blk_addr, Block);
- PageCount = Cache.pages_per_item;
-
-#if CMD_DMA
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
- Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated! "
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
- " Line %d, Function %s, new Bad Block %d generated!"
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Copy_Block
-* Inputs: source block address
-* Destination block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used only for static wear leveling to move the block
-* containing static data to new blocks(more worn)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
-{
- int i, r1, r2, wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
- r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
- i * DeviceInfo.wPageDataSize);
- r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
- i * DeviceInfo.wPageDataSize);
- if ((ERR == r1) || (FAIL == r2)) {
- wResult = FAIL;
- break;
- }
- }
-
- return wResult;
-}
-
-/* Search the block table to find out the least wear block and then return it */
-static u32 find_least_worn_blk_for_l2_cache(void)
-{
- int i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 least_wear_cnt = MAX_BYTE_VALUE;
- u32 least_wear_blk_idx = MAX_U32_VALUE;
- u32 phy_idx;
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
- if (phy_idx > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
- "Too big phy block num (%d)\n", phy_idx);
- if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
- least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
- least_wear_blk_idx = i;
- }
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "find block %d with least worn counter (%d)\n",
- least_wear_blk_idx, least_wear_cnt);
-
- return least_wear_blk_idx;
-}
-
-
-
-/* Get blocks for Level2 Cache */
-static int get_l2_cache_blks(void)
-{
- int n;
- u32 blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
- blk = find_least_worn_blk_for_l2_cache();
- if (blk >= DeviceInfo.wDataBlockNum) {
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
- return FAIL;
- }
- /* Tag the free block as discard in block table */
- pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
- /* Add the free block to the L2 Cache block array */
- cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
- }
-
- return PASS;
-}
-
-static int erase_l2_cache_blocks(void)
-{
- int i, ret = PASS;
- u32 pblk, lblk = BAD_BLOCK;
- u64 addr;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
- pblk = cache_l2.blk_array[i];
-
- /* If the L2 cache block is invalid, then just skip it */
- if (MAX_U32_VALUE == pblk)
- continue;
-
- BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
-
- addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- /* Get logical block number of the erased block */
- lblk = FTL_Get_Block_Index(pblk);
- BUG_ON(BAD_BLOCK == lblk);
- /* Tag it as free in the block table */
- pbt[lblk] &= (u32)(~DISCARD_BLOCK);
- pbt[lblk] |= (u32)(SPARE_BLOCK);
- } else {
- MARK_BLOCK_AS_BAD(pbt[lblk]);
- ret = ERR;
- }
- }
-
- return ret;
-}
-
-/*
- * Merge the valid data page in the L2 cache blocks into NAND.
-*/
-static int flush_l2_cache(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *tmp_pnd;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 phy_blk, l2_blk;
- u64 addr;
- u16 l2_page;
- int i, ret = PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (list_empty(&cache_l2.table.list)) /* No data to flush */
- return ret;
-
- //dump_cache_l2_table();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
- IS_BAD_BLOCK(pnd->logical_blk_num) ||
- IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
- } else {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
- phy_blk, 0, DeviceInfo.wPagesPerBlock);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- }
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE) {
- l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
- l2_page = pnd->pages_array[i] & 0xffff;
- ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
- }
- }
-
- /* Find a free block and tag the original block as discarded */
- addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
- ret = FTL_Replace_Block(addr);
- if (ret == FAIL) {
- printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
- }
-
- /* Write back the updated data into NAND */
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "Program NAND block %d fail in %s, Line %d\n",
- phy_blk, __FILE__, __LINE__);
- /* This may not be really a bad block. So just tag it as discarded. */
- /* Then it has a chance to be erased when garbage collection. */
- /* If it is really bad, then the erase will fail and it will be marked */
- /* as bad then. Otherwise it will be marked as free and can be used again */
- MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
- /* Find another free block and write it again */
- FTL_Replace_Block(addr);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
- "Some data will be lost!\n", phy_blk);
- MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
- }
- } else {
- /* tag the new free block as used block */
- pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
- }
- }
-
- /* Destroy the L2 Cache table and free the memory of all nodes */
- list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
- list_del(&pnd->list);
- kfree(pnd);
- }
-
- /* Erase discard L2 cache blocks */
- if (erase_l2_cache_blocks() != PASS)
- nand_dbg_print(NAND_DBG_WARN,
- " Erase L2 cache blocks error in %s, Line %d\n",
- __FILE__, __LINE__);
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- return ret;
-}
-
-/*
- * Write back a changed victim cache item to the Level2 Cache
- * and update the L2 Cache table to map the change.
- * If the L2 Cache is full, then start to do the L2 Cache flush.
-*/
-static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *pnd_new;
- u32 node_size;
- int i, found;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /*
- * If Level2 Cache table is empty, then it means either:
- * 1. This is the first time that the function called after FTL_init
- * or
- * 2. The Level2 Cache has just been flushed
- *
- * So, 'steal' some free blocks from NAND for L2 Cache using
- * by just mask them as discard in the block table
- */
- if (list_empty(&cache_l2.table.list)) {
- BUG_ON(cache_l2.cur_blk_idx != 0);
- BUG_ON(cache_l2.cur_page_num!= 0);
- BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
- if (FAIL == get_l2_cache_blks()) {
- GLOB_FTL_Garbage_Collection();
- if (FAIL == get_l2_cache_blks()) {
- printk(KERN_ALERT "Fail to get L2 cache blks!\n");
- return FAIL;
- }
- }
- }
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
- BUG_ON(logical_blk_num == MAX_U32_VALUE);
-
- /* Write the cache item data into the current position of L2 Cache */
-#if CMD_DMA
- /*
- * TODO
- */
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(buf,
- cache_l2.blk_array[cache_l2.cur_blk_idx],
- cache_l2.cur_page_num, 1)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, new Bad Block %d generated!\n",
- __FILE__, __LINE__,
- cache_l2.blk_array[cache_l2.cur_blk_idx]);
-
- /* TODO: tag the current block as bad and try again */
-
- return FAIL;
- }
-#endif
-
- /*
- * Update the L2 Cache table.
- *
- * First seaching in the table to see whether the logical block
- * has been mapped. If not, then kmalloc a new node for the
- * logical block, fill data, and then insert it to the list.
- * Otherwise, just update the mapped node directly.
- */
- found = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- pnd->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) |
- cache_l2.cur_page_num;
- found = 1;
- break;
- }
- }
- if (!found) { /* Create new node for the logical block here */
-
- /* The logical pages to physical pages map array is
- * located at the end of struct spectra_l2_cache_list.
- */
- node_size = sizeof(struct spectra_l2_cache_list) +
- sizeof(u32) * DeviceInfo.wPagesPerBlock;
- pnd_new = kmalloc(node_size, GFP_ATOMIC);
- if (!pnd_new) {
- printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
- __FILE__, __LINE__);
- /*
- * TODO: Need to flush all the L2 cache into NAND ASAP
- * since no memory available here
- */
- }
- pnd_new->logical_blk_num = logical_blk_num;
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
- pnd_new->pages_array[i] = MAX_U32_VALUE;
- pnd_new->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
- list_add(&pnd_new->list, &cache_l2.table.list);
- }
-
- /* Increasing the current position pointer of the L2 Cache */
- cache_l2.cur_page_num++;
- if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
- cache_l2.cur_blk_idx++;
- if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
- /* The L2 Cache is full. Need to flush it now */
- nand_dbg_print(NAND_DBG_WARN,
- "L2 Cache is full, will start to flush it\n");
- flush_l2_cache();
- } else {
- cache_l2.cur_page_num = 0;
- }
- }
-
- return PASS;
-}
-
-/*
- * Search in the Level2 Cache table to find the cache item.
- * If find, read the data from the NAND page of L2 Cache,
- * Otherwise, return FAIL.
- */
-static int search_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- u32 tmp = MAX_U32_VALUE;
- u32 phy_blk;
- u16 phy_page;
- int ret = FAIL;
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- tmp = pnd->pages_array[logical_page_num];
- break;
- }
- }
-
- if (tmp != MAX_U32_VALUE) { /* Found valid map */
- phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
- phy_page = tmp & 0xFFFF;
-#if CMD_DMA
- /* TODO */
-#else
- ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
-#endif
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_Page
-* Inputs: Pointer to buffer, page address, cache block number
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes the data in Cache Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
- u8 cache_blk, u16 flag)
-{
- u8 *pDest;
- u64 addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- addr = Cache.array[cache_blk].address;
- pDest = Cache.array[cache_blk].buf;
-
- pDest += (unsigned long)(page_addr - addr);
- Cache.array[cache_blk].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = cache_blk;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[cache_blk].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[cache_blk].changed;
-#endif
- GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
- ftl_cmd_cnt++;
-#else
- memcpy(pDest, pData, DeviceInfo.wPageDataSize);
-#endif
- if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_blk].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes least frequently used Cache block to flash if it
-* has been changed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write(void)
-{
- int i, bResult = PASS;
- u16 bNO, least_count = 0xFFFF;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Calculate_LRU();
-
- bNO = Cache.LRU;
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
- "Least used cache block is %d\n", bNO);
-
- if (Cache.array[bNO].changed != SET)
- return bResult;
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
- " Block %d containing logical block %d is dirty\n",
- bNO,
- (u32)(Cache.array[bNO].address >>
- DeviceInfo.nBitsInBlockDataSize));
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = bNO;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[bNO].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
- Cache.array[bNO].address);
- if (bResult != ERR)
- Cache.array[bNO].changed = CLEAR;
-
- least_count = Cache.array[bNO].use_cnt;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (i == bNO)
- continue;
- if (Cache.array[i].use_cnt > 0)
- Cache.array[i].use_cnt -= least_count;
- }
-
- return bResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read
-* Inputs: Page address
-* Outputs: PASS=0 / FAIL=1
-* Description: It reads the block from device in Cache Block
-* Set the LRU count to 1
-* Mark the Cache Block as clean
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read(u64 logical_addr)
-{
- u64 item_addr, phy_addr;
- u16 num;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- num = Cache.LRU; /* The LRU cache item will be overwritten */
-
- item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
- Cache.cache_item_size;
- Cache.array[num].address = item_addr;
- Cache.array[num].use_cnt = 1;
- Cache.array[num].changed = CLEAR;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = num;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[num].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[num].changed;
-#endif
-#endif
- /*
- * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
- * Otherwise, read it from NAND
- */
- ret = search_l2_cache(Cache.array[num].buf, logical_addr);
- if (PASS == ret) /* Hit in L2 Cache */
- return ret;
-
- /* Compute the physical start address of NAND device according to */
- /* the logical start address of the cache item (LRU cache item) */
- phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
- GLOB_u64_Remainder(item_addr, 2);
-
- return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Check_Block_Table
-* Inputs: ?
-* Outputs: PASS=0 / FAIL=1
-* Description: It checks the correctness of each block table entry
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Check_Block_Table(int wOldTable)
-{
- u32 i;
- int wResult = PASS;
- u32 blk_idx;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 *pFlag = flag_check_blk_table;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (NULL != pFlag) {
- memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
-
- /*
- * 20081006/KBV - Changed to pFlag[i] reference
- * to avoid buffer overflow
- */
-
- /*
- * 2008-10-20 Yunpeng Note: This change avoid
- * buffer overflow, but changed function of
- * the code, so it should be re-write later
- */
- if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
- PASS == pFlag[i]) {
- wResult = FAIL;
- break;
- } else {
- pFlag[i] = PASS;
- }
- }
- }
-
- return wResult;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table
-* Inputs: flasg
-* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
-* happen. -1 Error
-* Description: It writes the block table
-* Block table always mapped to LBA 0 which inturn mapped
-* to any physical block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table(int wForce)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- int wSuccess = PASS;
- u32 wTempBlockTableIndex;
- u16 bt_pages, new_bt_offset;
- u8 blockchangeoccured = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
- return 0;
-
- if (PASS == wForce) {
- g_wBlockTableOffset =
- (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Inside FTL_Write_Block_Table: block %d Page:%d\n",
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- do {
- new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
- if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
- (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
- (FAIL == wSuccess)) {
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- if (!blockchangeoccured) {
- bt_block_changed = 1;
- blockchangeoccured = 1;
- }
-
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->g_wBlockTableIndex =
- g_wBlockTableIndex;
- p_BTableChangesDelta->ValidFields = 0x03;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- BLOCK_TABLE_INDEX;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[BLOCK_TABLE_INDEX];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- wSuccess = FTL_Write_Block_Table_Data();
- if (FAIL == wSuccess)
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- } while (FAIL == wSuccess);
-
- g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
-
- return 1;
-}
-
-static int force_format_nand(void)
-{
- u32 i;
-
- /* Force erase the whole unprotected physical partiton of NAND */
- printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
- printk(KERN_ALERT "From phyical block %d to %d\n",
- DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
- for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
- if (GLOB_LLD_Erase_Block(i))
- printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
- }
- printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
- while(1);
-
- return PASS;
-}
-
-int GLOB_FTL_Flash_Format(void)
-{
- //return FTL_Format_Flash(1);
- return force_format_nand();
-
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Search_Block_Table_IN_Block
-* Inputs: Block Number
-* Pointer to page
-* Outputs: PASS / FAIL
-* Page contatining the block table
-* Description: It searches the block table in the block
-* passed as an argument.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page)
-{
- u16 i, j, k;
- u16 Result = PASS;
- u16 Last_IPF = 0;
- u8 BT_Found = 0;
- u8 *tagarray;
- u8 *tempbuf = tmp_buf_search_bt_in_block;
- u8 *pSpareBuf = spare_buf_search_bt_in_block;
- u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
- u8 bt_flag_last_page = 0xFF;
- u8 search_in_previous_pages = 0;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching block table in %u block\n",
- (unsigned int)BT_Block);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
- i += (bt_pages + 1)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching last IPF: %d\n", i);
- Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
- BT_Block, i, 1);
-
- if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
- if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
- continue;
- } else {
- search_in_previous_pages = 1;
- Last_IPF = i;
- }
- }
-
- if (!search_in_previous_pages) {
- if (i != bt_pages) {
- i -= (bt_pages + 1);
- Last_IPF = i;
- }
- }
-
- if (0 == Last_IPF)
- break;
-
- if (!search_in_previous_pages) {
- i = i + 1;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
- BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(
- pSpareBufBTLastPage, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found"
- " in page after IPF "
- "at block %d "
- "page %d\n",
- (int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- }
- }
- }
- }
-
- if (search_in_previous_pages)
- i = i - bt_pages;
- else
- i = i - (bt_pages + 1);
-
- Result = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %d Page %d",
- (int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j) {
- bt_flag_last_page = tagarray[k];
- } else {
- Result = FAIL;
- break;
- }
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page prior to IPF "
- "at block %u page %d\n",
- (unsigned int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- IN_PROGRESS_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- break;
- }
- }
- }
- }
-
- if (Result == FAIL) {
- if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
- BT_Found = 1;
- *Page = i - (bt_pages + 1);
- }
- if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
- goto func_return;
- }
-
- if (Last_IPF == 0) {
- i = 0;
- Result = PASS;
- nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
- "Block %u Page %u", (unsigned int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page after IPF at "
- "block %u page %u\n",
- (unsigned int)BT_Block,
- (unsigned int)i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- goto func_return;
- } else {
- Result = FAIL;
- }
- }
- }
-
- if (Result == FAIL)
- goto func_return;
- }
-func_return:
- return Result;
-}
-
-u8 *get_blk_table_start_addr(void)
-{
- return g_pBlockTable;
-}
-
-unsigned long get_blk_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u32);
-}
-
-u8 *get_wear_leveling_table_start_addr(void)
-{
- return g_pWearCounter;
-}
-
-unsigned long get_wear_leveling_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u8);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Block_Table
-* Inputs: none
-* Outputs: PASS / FAIL
-* Description: read the flash spare area and find a block containing the
-* most recent block table(having largest block_table_counter).
-* Find the last written Block table in this block.
-* Check the correctness of Block Table
-* If CDMA is enabled, this function is called in
-* polling mode.
-* We don't need to store changes in Block table in this
-* function as it is called only at initialization
-*
-* Note: Currently this function is called at initialization
-* before any read/erase/write command issued to flash so,
-* there is no need to wait for CDMA list to complete as of now
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Read_Block_Table(void)
-{
- u16 i = 0;
- int k, j;
- u8 *tempBuf, *tagarray;
- int wResult = FAIL;
- int status = FAIL;
- u8 block_table_found = 0;
- int search_result;
- u32 Block;
- u16 Page = 0;
- u16 PageCount;
- u16 bt_pages;
- int wBytesCopied = 0, tempvar;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- tempBuf = tmp_buf1_read_blk_table;
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (j = DeviceInfo.wSpectraStartBlock;
- j <= (int)DeviceInfo.wSpectraEndBlock;
- j++) {
- status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
- k = 0;
- i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
- if (i) {
- status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
- j, 0, 1);
- for (; k < i; k++) {
- if (tagarray[k] == tempBuf[3])
- break;
- }
- }
-
- if (k < i)
- k = tagarray[k];
- else
- continue;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is contained in Block %d %d\n",
- (unsigned int)j, (unsigned int)k);
-
- if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
- g_pBTBlocks[k-FIRST_BT_ID] = j;
- block_table_found = 1;
- } else {
- printk(KERN_ERR "FTL_Read_Block_Table -"
- "This should never happens. "
- "Two block table have same counter %u!\n", k);
- }
- }
-
- if (block_table_found) {
- if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
- g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
- j = LAST_BT_ID;
- while ((j > FIRST_BT_ID) &&
- (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
- j--;
- if (j == FIRST_BT_ID) {
- j = LAST_BT_ID;
- last_erased = LAST_BT_ID;
- } else {
- last_erased = (u8)j + 1;
- while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
- g_pBTBlocks[j - FIRST_BT_ID]))
- j--;
- }
- } else {
- j = FIRST_BT_ID;
- while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
- j++;
- last_erased = (u8)j;
- while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
- g_pBTBlocks[j - FIRST_BT_ID]))
- j++;
- if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
- j--;
- }
-
- if (last_erased > j)
- j += (1 + LAST_BT_ID - FIRST_BT_ID);
-
- for (; (j >= last_erased) && (FAIL == wResult); j--) {
- i = (j - FIRST_BT_ID) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- search_result =
- FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
- i + FIRST_BT_ID, &Page);
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- block_table_found = 0;
-
- while ((search_result == PASS) && (FAIL == wResult)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Read_Block_Table:"
- "Block: %u Page: %u "
- "contains block table\n",
- (unsigned int)g_pBTBlocks[i],
- (unsigned int)Page);
-
- tempBuf = tmp_buf2_read_blk_table;
-
- for (k = 0; k < bt_pages; k++) {
- Block = g_pBTBlocks[i];
- PageCount = 1;
-
- status =
- GLOB_LLD_Read_Page_Main_Polling(
- tempBuf, Block, Page, PageCount);
-
- tempvar = k ? 0 : 4;
-
- wBytesCopied +=
- FTL_Copy_Block_Table_From_Flash(
- tempBuf + tempvar,
- DeviceInfo.wPageDataSize - tempvar,
- wBytesCopied);
-
- Page++;
- }
-
- wResult = FTL_Check_Block_Table(FAIL);
- if (FAIL == wResult) {
- block_table_found = 0;
- if (Page > bt_pages)
- Page -= ((bt_pages<<1) + 1);
- else
- search_result = FAIL;
- }
- }
- }
- }
-
- if (PASS == wResult) {
- if (!block_table_found)
- FTL_Execute_SPL_Recovery();
-
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- g_wBlockTableOffset = (u16)Page + 1;
- else
- g_wBlockTableOffset = (u16)Page - bt_pages;
-
- g_wBlockTableIndex = (u32)g_pBTBlocks[i];
-
-#if CMD_DMA
- if (DeviceInfo.MLCDevice)
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8)
- + DeviceInfo.wDataBlockNum * sizeof(u16));
- else
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8));
-#endif
- }
-
- if (FAIL == wResult)
- printk(KERN_ERR "Yunpeng - "
- "Can not find valid spectra block table!\n");
-
-#if AUTO_FORMAT_FLASH
- if (FAIL == wResult) {
- nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
- wResult = FTL_Format_Flash(0);
- }
-#endif
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Page_Num
-* Inputs: Size in bytes
-* Outputs: Size in pages
-* Description: It calculates the pages required for the length passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Page_Num(u64 length)
-{
- return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
- (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Physical_Block_Addr
-* Inputs: Block Address (byte format)
-* Outputs: Physical address of the block.
-* Description: It translates LBA to PBA by returning address stored
-* at the LBA location in the block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
-{
- u32 *pbt;
- u64 physical_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- pbt = (u32 *)g_pBlockTable;
- physical_addr = (u64) DeviceInfo.wBlockDataSize *
- (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
-
- return physical_addr;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Block_Index
-* Inputs: Physical Block no.
-* Outputs: Logical block no. /BAD_BLOCK
-* Description: It returns the logical block no. for the PBA passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Block_Index(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
- return i;
-
- return BAD_BLOCK;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0
-* Description: This is static wear leveling (done by explicit call)
-* do complete static wear leveling
-* do complete garbage collection
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Wear_Leveling(void)
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Static_Wear_Leveling();
- GLOB_FTL_Garbage_Collection();
-
- return PASS;
-}
-
-static void find_least_most_worn(u8 *chg,
- u32 *least_idx, u8 *least_cnt,
- u32 *most_idx, u8 *most_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 idx;
- u8 cnt;
- int i;
-
- for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i) || PASS == chg[i])
- continue;
-
- idx = (u32) ((~BAD_BLOCK) & pbt[i]);
- cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
-
- if (IS_SPARE_BLOCK(i)) {
- if (cnt > *most_cnt) {
- *most_cnt = cnt;
- *most_idx = idx;
- }
- }
-
- if (IS_DATA_BLOCK(i)) {
- if (cnt < *least_cnt) {
- *least_cnt = cnt;
- *least_idx = idx;
- }
- }
-
- if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
- debug_boundary_error(*most_idx,
- DeviceInfo.wDataBlockNum, 0);
- debug_boundary_error(*least_idx,
- DeviceInfo.wDataBlockNum, 0);
- continue;
- }
- }
-}
-
-static int move_blks_for_wear_leveling(u8 *chg,
- u32 *least_idx, u32 *rep_blk_num, int *result)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 rep_blk;
- int j, ret_cp_blk, ret_erase;
- int ret = PASS;
-
- chg[*least_idx] = PASS;
- debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
-
- rep_blk = FTL_Replace_MWBlock();
- if (rep_blk != BAD_BLOCK) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "More than two spare blocks exist so do it\n");
- nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
- rep_blk);
-
- chg[rep_blk] = PASS;
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- for (j = 0; j < RETRY_TIMES; j++) {
- ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
- DeviceInfo.wBlockDataSize,
- (u64)rep_blk * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_cp_blk) {
- ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
- * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_erase)
- MARK_BLOCK_AS_BAD(pbt[rep_blk]);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Copy_Block == OK\n");
- break;
- }
- }
-
- if (j < RETRY_TIMES) {
- u32 tmp;
- u32 old_idx = FTL_Get_Block_Index(*least_idx);
- u32 rep_idx = FTL_Get_Block_Index(rep_blk);
- tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
- pbt[old_idx] = (u32)((~SPARE_BLOCK) &
- pbt[rep_idx]);
- pbt[rep_idx] = tmp;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = old_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- } else {
- pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- FTL_Get_Block_Index(rep_blk);
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[FTL_Get_Block_Index(rep_blk)];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- *result = FAIL;
- ret = FAIL;
- }
-
- if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
- ret = FAIL;
- } else {
- printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
- ret = FAIL;
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Static_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: This is static wear leveling (done by explicit call)
-* search for most&least used
-* if difference < GATE:
-* update the block table with exhange
-* mark block table in flash as IN_PROGRESS
-* copy flash block
-* the caller should handle GC clean up after calling this function
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Static_Wear_Leveling(void)
-{
- u8 most_worn_cnt;
- u8 least_worn_cnt;
- u32 most_worn_idx;
- u32 least_worn_idx;
- int result = PASS;
- int go_on = PASS;
- u32 replaced_blks = 0;
- u8 *chang_flag = flags_static_wear_leveling;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!chang_flag)
- return FAIL;
-
- memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
- while (go_on == PASS) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "starting static wear leveling\n");
- most_worn_cnt = 0;
- least_worn_cnt = 0xFF;
- least_worn_idx = BLOCK_TABLE_INDEX;
- most_worn_idx = BLOCK_TABLE_INDEX;
-
- find_least_most_worn(chang_flag, &least_worn_idx,
- &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Used and least worn is block %u, whos count is %u\n",
- (unsigned int)least_worn_idx,
- (unsigned int)least_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Free and most worn is block %u, whos count is %u\n",
- (unsigned int)most_worn_idx,
- (unsigned int)most_worn_cnt);
-
- if ((most_worn_cnt > least_worn_cnt) &&
- (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
- go_on = move_blks_for_wear_leveling(chang_flag,
- &least_worn_idx, &replaced_blks, &result);
- else
- go_on = FAIL;
- }
-
- return result;
-}
-
-#if CMD_DMA
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
- ((ftl_cmd_cnt + 28) < 256)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %u\n",
- (unsigned int)pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = i;
- p_BTableChangesDelta->BT_Entry_Value = pbt[i];
- p_BTableChangesDelta->ValidFields = 0x0C;
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-
-#else
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %d\n",
- pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- /* If the discard block is L2 cache block, then just skip it */
- for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
- if (cache_l2.blk_array[cnt] == pba) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase L2 cache blk %d\n",
- pba);
- break;
- }
- }
- if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
- discard_cnt--;
- i++;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: search the block table for all discarded blocks to erase
-* for each discarded block:
-* set the flash block to IN_PROGRESS
-* erase the block
-* update the block table
-* write the block table to flash
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Garbage_Collection(void)
-{
- u32 i;
- u32 wDiscard = 0;
- int wResult = FAIL;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (GC_Called) {
- printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
- "has been re-entered! Exit.\n");
- return PASS;
- }
-
- GC_Called = 1;
-
- GLOB_FTL_BT_Garbage_Collection();
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscard++;
- }
-
- if (wDiscard <= 0) {
- GC_Called = 0;
- return wResult;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Found %d discarded blocks\n", wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- wResult = do_garbage_collection(wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- GC_Called = 0;
-
- return wResult;
-}
-
-
-#if CMD_DMA
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
- ((ftl_cmd_cnt + 28)) < 256; i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = lba;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[lba];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#else
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_BT_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: Erases discarded blocks containing Block table
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_BT_Garbage_Collection(void)
-{
- return do_bt_garbage_collection();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_OneBlock
-* Inputs: Block number 1
-* Block number 2
-* Outputs: Replaced Block Number
-* Description: Interchange block table entries at wBlockNum and wReplaceNum
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
-{
- u32 tmp_blk;
- u32 replace_node = BAD_BLOCK;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (rep_blk != BAD_BLOCK) {
- if (IS_BAD_BLOCK(blk))
- tmp_blk = pbt[blk];
- else
- tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
-
- replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
- pbt[blk] = replace_node;
- pbt[rep_blk] = tmp_blk;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- return replace_node;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table_Data
-* Inputs: Block table size in pages
-* Outputs: PASS=0 / FAIL=1
-* Description: Write block table data in flash
-* If first page and last page
-* Write data+BT flag
-* else
-* Write data
-* BT flag is a counter. Its value is incremented for block table
-* write in a new Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table_Data(void)
-{
- u64 dwBlockTableAddr, pTempAddr;
- u32 Block;
- u16 Page, PageCount;
- u8 *tempBuf = tmp_buf_write_blk_table_data;
- int wBytesCopied;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dwBlockTableAddr =
- (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
- (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
- pTempAddr = dwBlockTableAddr;
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
- "page= %d BlockTableIndex= %d "
- "BlockTableOffset=%d\n", bt_pages,
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- Block = BLK_FROM_ADDR(pTempAddr);
- Page = PAGE_FROM_ADDR(pTempAddr, Block);
- PageCount = 1;
-
- if (bt_block_changed) {
- if (bt_flag == LAST_BT_ID) {
- bt_flag = FIRST_BT_ID;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- } else if (bt_flag < LAST_BT_ID) {
- bt_flag++;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- }
-
- if ((bt_flag > (LAST_BT_ID-4)) &&
- g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
- BTBLOCK_INVAL) {
- bt_block_changed = 0;
- GLOB_FTL_BT_Garbage_Collection();
- }
-
- bt_block_changed = 0;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Counter is %u Block %u\n",
- bt_flag, (unsigned int)Block);
- }
-
- memset(tempBuf, 0, 3);
- tempBuf[3] = bt_flag;
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
- DeviceInfo.wPageDataSize - 4, 0);
- memset(&tempBuf[wBytesCopied + 4], 0xff,
- DeviceInfo.wPageSize - (wBytesCopied + 4));
- FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
- bt_flag);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
- "Block %u Page %u\n", (unsigned int)Block, Page);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
- Block, Page, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
-
- if (bt_pages > 1) {
- PageCount = bt_pages - 1;
- if (PageCount > 1) {
- wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize * (PageCount - 1),
- wBytesCopied);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- (PageCount - 1) * DeviceInfo.wPageDataSize);
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pNextBlockTable, Block, Page + 1,
- PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += (PageCount - 1) *
- DeviceInfo.wPageDataSize * sizeof(u8);
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
- Block, Page + 1, PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-#endif
- }
-
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize, wBytesCopied);
- memset(&tempBuf[wBytesCopied], 0xff,
- DeviceInfo.wPageSize-wBytesCopied);
- FTL_Insert_Block_Table_Signature(
- &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG,
- "Writing the last Page of Block Table "
- "Block %u Page %u\n",
- (unsigned int)Block, Page + bt_pages - 1);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
- g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
- LLD_CMD_FLAG_MODE_CDMA |
- LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
- Block, Page+bt_pages - 1, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
-
-func_return:
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block_Table
-* Inputs: None
-* Outputs: PASS=0 / FAIL=1
-* Description: Get a new block to write block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_Block_Table(void)
-{
- u32 blk;
- int gc;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
-
- if ((BAD_BLOCK == blk) && (PASS == gc)) {
- GLOB_FTL_Garbage_Collection();
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
- }
- if (BAD_BLOCK == blk)
- printk(KERN_ERR "%s, %s: There is no spare block. "
- "It should never happen\n",
- __FILE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
-
- return blk;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_LWBlock
-* Inputs: Block number
-* Pointer to Garbage Collect flag
-* Outputs:
-* Description: Determine the least weared block by traversing
-* block table
-* Set Garbage collection to be called if number of spare
-* block is less than Free Block Gate count
-* Change Block table entry to map least worn block for current
-* operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wLeastWornCounter = 0xFF;
- u32 wLeastWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wDiscardBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(wBlockNum)) {
- *pGarbageCollect = FAIL;
- pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- return pbt[wBlockNum];
- }
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscardBlockNum++;
-
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
- if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "FTL_Replace_LWBlock: "
- "This should never occur!\n");
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastWornCounter) {
- wLeastWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastWornIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "FTL_Replace_LWBlock: Least Worn Counter %d\n",
- (int)wLeastWornCounter);
-
- if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
- (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
- *pGarbageCollect = PASS;
- else
- *pGarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
- " Blocks %u\n",
- (unsigned int)wDiscardBlockNum,
- (unsigned int)wSpareBlockNum);
-
- return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_MWBlock
-* Inputs: None
-* Outputs: most worn spare block no./BAD_BLOCK
-* Description: It finds most worn spare block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_MWBlock(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wMostWornCounter = 0;
- u32 wMostWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] >
- wMostWornCounter) {
- wMostWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wMostWornIndex = wPhysicalIndex;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= 2)
- return BAD_BLOCK;
-
- return wMostWornIndex;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block
-* Inputs: Block Address
-* Outputs: PASS=0 / FAIL=1
-* Description: If block specified by blk_addr parameter is not free,
-* replace it with the least worn block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Replace_Block(u64 blk_addr)
-{
- u32 current_blk = BLK_FROM_ADDR(blk_addr);
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
- int GarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(current_blk)) {
- pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = current_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
- p_BTableChangesDelta->ValidFields = 0x0C ;
-#endif
- return wResult;
- }
-
- FTL_Replace_LWBlock(current_blk, &GarbageCollect);
-
- if (PASS == GarbageCollect)
- wResult = GLOB_FTL_Garbage_Collection();
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Is_BadBlock
-* Inputs: block number to test
-* Outputs: PASS (block is BAD) / FAIL (block is not bad)
-* Description: test if this block number is flagged as bad
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (wBlockNum >= DeviceInfo.wSpectraStartBlock
- && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
- return PASS;
- else
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flush_Cache
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: flush all the cache blocks to flash
-* if a cache block is not dirty, don't do anything with it
-* else, write the block and update the block table
-* Note: This function should be called at shutdown/power down.
-* to write important data into device
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flush_Cache(void)
-{
- int i, ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (SET == Cache.array[i].changed) {
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = i;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[i].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
- if (PASS == ret) {
- Cache.array[i].changed = CLEAR;
- } else {
- printk(KERN_ALERT "Failed when write back to L2 cache!\n");
- /* TODO - How to handle this? */
- }
- }
- }
-
- flush_l2_cache();
-
- return FTL_Write_Block_Table(FAIL);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Read
-* Inputs: pointer to data
-* logical address of data (u64 is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: reads a page of data into RAM from the cache
-* if the data is not already in cache, read from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
-{
- u16 cache_item;
- int res = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
- "page_addr: %llu\n", logical_addr);
-
- cache_item = FTL_Cache_If_Hit(logical_addr);
-
- if (UNHIT_CACHE_ITEM == cache_item) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GLOB_FTL_Page_Read: Cache not hit\n");
- res = FTL_Cache_Write();
- if (ERR == FTL_Cache_Read(logical_addr))
- res = ERR;
- cache_item = Cache.LRU;
- }
-
- FTL_Cache_Read_Page(data, logical_addr, cache_item);
-
- return res;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Write
-* Inputs: pointer to data
-* address of data (ADDRESSTYPE is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: writes a page of data from RAM to the cache
-* if the data is not already in cache, write back the
-* least recently used block and read the addressed block
-* from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
-{
- u16 cache_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
- "dwPageAddr: %llu\n", dwPageAddr);
-
- cache_blk = FTL_Cache_If_Hit(dwPageAddr);
-
- if (UNHIT_CACHE_ITEM == cache_blk) {
- wResult = FTL_Cache_Write();
- if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
- wResult = FTL_Replace_Block(dwPageAddr);
- pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
- if (wResult == FAIL)
- return FAIL;
- }
- if (ERR == FTL_Cache_Read(dwPageAddr))
- wResult = ERR;
- cache_blk = Cache.LRU;
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
- } else {
-#if CMD_DMA
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
- LLD_CMD_FLAG_ORDER_BEFORE_REST);
-#else
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
-#endif
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Block_Erase
-* Inputs: address of block to erase (now in byte format, should change to
-* block format)
-* Outputs: PASS=0 / FAIL=1
-* Description: erases the specified block
-* increments the erase count
-* If erase count reaches its upper limit,call function to
-* do the adjustment as per the relative erase count values
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Block_Erase(u64 blk_addr)
-{
- int status;
- u32 BlkIdx;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
-
- if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
- printk(KERN_ERR "GLOB_FTL_Block_Erase: "
- "This should never occur\n");
- return FAIL;
- }
-
-#if CMD_DMA
- status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
- if (status == FAIL)
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
-#else
- status = GLOB_LLD_Erase_Block(BlkIdx);
- if (status == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
- return status;
- }
-#endif
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-
- if (DeviceInfo.MLCDevice) {
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[BlkIdx -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
- }
-
- ftl_cmd_cnt++;
-#endif
-
- if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
- FTL_Adjust_Relative_Erase_Count(BlkIdx);
-
- return status;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Adjust_Relative_Erase_Count
-* Inputs: index to block that was just incremented and is at the max
-* Outputs: PASS=0 / FAIL=1
-* Description: If any erase counts at MAX, adjusts erase count of every
-* block by subtracting least worn
-* counter from counter value of every entry in wear table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
-{
- u8 wLeastWornCounter = MAX_BYTE_VALUE;
- u8 wWearCounter;
- u32 i, wWearIndex;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i))
- continue;
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
-
- if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
- printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
- "This should never occur\n");
- wWearCounter = g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- if (wWearCounter < wLeastWornCounter)
- wLeastWornCounter = wWearCounter;
- }
-
- if (wLeastWornCounter == 0) {
- nand_dbg_print(NAND_DBG_WARN,
- "Adjusting Wear Levelling Counters: Special Case\n");
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock]--;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- Index_of_MAX - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- FTL_Static_Wear_Leveling();
- } else {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (!IS_BAD_BLOCK(i)) {
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock] =
- (u8)(g_pWearCounter
- [wWearIndex -
- DeviceInfo.wSpectraStartBlock] -
- wLeastWornCounter);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index = wWearIndex -
- DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- }
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_IN_Progress_Block_Table_Page
-* Inputs: None
-* Outputs: None
-* Description: It writes in-progress flag page to the page next to
-* block table
-***********************************************************************/
-static int FTL_Write_IN_Progress_Block_Table_Page(void)
-{
- int wResult = PASS;
- u16 bt_pages;
- u16 dwIPFPageAddr;
-#if CMD_DMA
-#else
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 wTempBlockTableIndex;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
- "Block %d Page %d\n",
- g_wBlockTableIndex, dwIPFPageAddr);
-
-#if CMD_DMA
- wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- g_wBlockTableIndex);
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
- p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
- ftl_cmd_cnt++;
-#else
- wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)g_wBlockTableIndex);
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- bt_block_changed = 1;
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- /* Block table tag is '00'. Means it's used one */
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
- return FAIL;
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Disturbance
-* Inputs: block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used to handle read disturbance. Data in block that
-* reaches its read limit is moved to new block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Read_Disturbance(u32 blk_addr)
-{
- int wResult = FAIL;
- u32 *pbt = (u32 *) g_pBlockTable;
- u32 dwOldBlockAddr = blk_addr;
- u32 wBlockNum;
- u32 i;
- u32 wLeastReadCounter = 0xFFFF;
- u32 wLeastReadIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wTempNode;
- u32 wReplacedNode;
- u8 *g_pTempBuf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
- cp_back_buf_idx++;
- if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
- printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
- "Maybe too many pending commands in your CDMA chain.\n");
- return FAIL;
- }
-#else
- g_pTempBuf = tmp_buf_read_disturbance;
-#endif
-
- wBlockNum = FTL_Get_Block_Index(blk_addr);
-
- do {
- /* This is a bug.Here 'i' should be logical block number
- * and start from 1 (0 is reserved for block table).
- * Have fixed it. - Yunpeng 2008. 12. 19
- */
- for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex =
- (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastReadCounter) {
- wLeastReadCounter =
- g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastReadIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
- wResult = GLOB_FTL_Garbage_Collection();
- if (PASS == wResult)
- continue;
- else
- break;
- } else {
- wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
- wReplacedNode = (u32)((~SPARE_BLOCK) &
- pbt[wLeastReadIndex]);
-#if CMD_DMA
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wBlockNum;
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wLeastReadIndex;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[wLeastReadIndex];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
- LLD_CMD_FLAG_MODE_CDMA);
- if (wResult == FAIL)
- return wResult;
-
- ftl_cmd_cnt++;
-
- if (wResult != FAIL) {
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pTempBuf, pbt[wBlockNum], 0,
- DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)pbt[wBlockNum]);
- wResult = FAIL;
- MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
- }
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL)
- return wResult;
-
- if (wResult != FAIL) {
- /* This is a bug. At this time, pbt[wBlockNum]
- is still the physical address of
- discard block, and should not be write.
- Have fixed it as below.
- -- Yunpeng 2008.12.19
- */
- wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
- wReplacedNode, 0,
- DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)wReplacedNode);
- MARK_BLOCK_AS_BAD(wReplacedNode);
- } else {
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- }
- }
-
- if ((wResult == PASS) && (g_cBlockTableStatus !=
- IN_PROGRESS_BLOCK_TABLE)) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-#endif
- }
- } while (wResult != PASS)
- ;
-
-#if CMD_DMA
- /* ... */
-#endif
-
- return wResult;
-}
-
diff --git a/drivers/staging/spectra/flash.h b/drivers/staging/spectra/flash.h
deleted file mode 100644
index e59cf4ede55..00000000000
--- a/drivers/staging/spectra/flash.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FLASH_INTERFACE_
-#define _FLASH_INTERFACE_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-
-#define MAX_BYTE_VALUE 0xFF
-#define MAX_WORD_VALUE 0xFFFF
-#define MAX_U32_VALUE 0xFFFFFFFF
-
-#define MAX_BLOCKNODE_VALUE 0xFFFFFF
-#define DISCARD_BLOCK 0x800000
-#define SPARE_BLOCK 0x400000
-#define BAD_BLOCK 0xC00000
-
-#define UNHIT_CACHE_ITEM 0xFFFF
-
-#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
-
-#define IN_PROGRESS_BLOCK_TABLE 0x00
-#define CURRENT_BLOCK_TABLE 0x01
-
-#define BTSIG_OFFSET (0)
-#define BTSIG_BYTES (5)
-#define BTSIG_DELTA (3)
-
-#define MAX_READ_COUNTER 0x2710
-
-#define FIRST_BT_ID (1)
-#define LAST_BT_ID (254)
-#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
-
-struct device_info_tag {
- u16 wDeviceMaker;
- u16 wDeviceID;
- u32 wDeviceType;
- u32 wSpectraStartBlock;
- u32 wSpectraEndBlock;
- u32 wTotalBlocks;
- u16 wPagesPerBlock;
- u16 wPageSize;
- u16 wPageDataSize;
- u16 wPageSpareSize;
- u16 wNumPageSpareFlag;
- u16 wECCBytesPerSector;
- u32 wBlockSize;
- u32 wBlockDataSize;
- u32 wDataBlockNum;
- u8 bPlaneNum;
- u16 wDeviceMainAreaSize;
- u16 wDeviceSpareAreaSize;
- u16 wDevicesConnected;
- u16 wDeviceWidth;
- u16 wHWRevision;
- u16 wHWFeatures;
-
- u16 wONFIDevFeatures;
- u16 wONFIOptCommands;
- u16 wONFITimingMode;
- u16 wONFIPgmCacheTimingMode;
-
- u16 MLCDevice;
- u16 wSpareSkipBytes;
-
- u8 nBitsInPageNumber;
- u8 nBitsInPageDataSize;
- u8 nBitsInBlockDataSize;
-};
-
-extern struct device_info_tag DeviceInfo;
-
-/* Cache item format */
-struct flash_cache_item_tag {
- u64 address;
- u16 use_cnt;
- u16 changed;
- u8 *buf;
-};
-
-struct flash_cache_tag {
- u32 cache_item_size; /* Size in bytes of each cache item */
- u16 pages_per_item; /* How many NAND pages in each cache item */
- u16 LRU; /* No. of the least recently used cache item */
- struct flash_cache_item_tag array[CACHE_ITEM_NUM];
-};
-
-/*
- *Data structure for each list node of the management table
- * used for the Level 2 Cache. Each node maps one logical NAND block.
- */
-struct spectra_l2_cache_list {
- struct list_head list;
- u32 logical_blk_num; /* Logical block number */
- u32 pages_array[]; /* Page map array of this logical block.
- * Array index is the logical block number,
- * and for every item of this arry:
- * high 16 bit is index of the L2 cache block num,
- * low 16 bit is the phy page num
- * of the above L2 cache block.
- * This array will be kmalloc during run time.
- */
-};
-
-struct spectra_l2_cache_info {
- u32 blk_array[BLK_NUM_FOR_L2_CACHE];
- u16 cur_blk_idx; /* idx to the phy block number of current using */
- u16 cur_page_num; /* pages number of current using */
- struct spectra_l2_cache_list table; /* First node of the table */
-};
-
-#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-struct flash_cache_mod_item_tag {
- u64 address;
- u8 changed;
-};
-
-struct flash_cache_delta_list_tag {
- u8 item; /* used cache item */
- struct flash_cache_mod_item_tag cache;
-};
-#endif
-
-extern struct flash_cache_tag Cache;
-
-extern u8 *buf_read_page_main_spare;
-extern u8 *buf_write_page_main_spare;
-extern u8 *buf_read_page_spare;
-extern u8 *buf_get_bad_block;
-extern u8 *cdma_desc_buf;
-extern u8 *memcp_desc_buf;
-
-/* struture used for IndentfyDevice function */
-struct spectra_indentfy_dev_tag {
- u32 NumBlocks;
- u16 PagesPerBlock;
- u16 PageDataSize;
- u16 wECCBytesPerSector;
- u32 wDataBlockNum;
-};
-
-int GLOB_FTL_Flash_Init(void);
-int GLOB_FTL_Flash_Release(void);
-/*void GLOB_FTL_Erase_Flash(void);*/
-int GLOB_FTL_Block_Erase(u64 block_addr);
-int GLOB_FTL_Is_BadBlock(u32 block_num);
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
-int GLOB_FTL_Event_Status(int *);
-u16 glob_ftl_execute_cmds(void);
-
-/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
-int FTL_Read_Disturbance(u32 dwBlockAddr);
-
-/*Flash r/w based on cache*/
-int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
-int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
-int GLOB_FTL_Wear_Leveling(void);
-int GLOB_FTL_Flash_Format(void);
-int GLOB_FTL_Init(void);
-int GLOB_FTL_Flush_Cache(void);
-int GLOB_FTL_Garbage_Collection(void);
-int GLOB_FTL_BT_Garbage_Collection(void);
-void GLOB_FTL_Cache_Release(void);
-u8 *get_blk_table_start_addr(void);
-u8 *get_wear_leveling_table_start_addr(void);
-unsigned long get_blk_table_len(void);
-unsigned long get_wear_leveling_table_len(void);
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
- char *filename);
-#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
- limit, no, __LINE__, __FILE__)
-#else
-#define debug_boundary_error(chnl, limit, no) ;
-#endif
-
-#endif /*_FLASH_INTERFACE_*/
diff --git a/drivers/staging/spectra/lld.c b/drivers/staging/spectra/lld.c
deleted file mode 100644
index 5c3b9762dc3..00000000000
--- a/drivers/staging/spectra/lld.c
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "spectraswconfig.h"
-#include "ffsport.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
-#include "lld_emu.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return emu_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return emu_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return emu_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return emu_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return emu_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return emu_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return emu_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_EMU */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
-#include "lld_mtd.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return mtd_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return mtd_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return mtd_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return mtd_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return mtd_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return mtd_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return mtd_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_MTD */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "flash.h"
-
-/* common functions for LLD_NAND */
-void GLOB_LLD_ECC_Control(int enable)
-{
- NAND_ECC_Ctrl(enable);
-}
-
-/* common functions for LLD_NAND */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return NAND_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return NAND_Read_Device_ID();
-}
-
-u16 GLOB_LLD_UnlockArrayAll(void)
-{
- return NAND_UnlockArrayAll();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return NAND_Flash_Init();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return nand_release_spectra();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return NAND_Erase_Block(block_add);
-}
-
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- if (page_count == 1) /* Using polling to improve read speed */
- return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
- else
- return NAND_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Polling(read_data,
- block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return NAND_Get_Bad_Block(block);
-}
-
-#if CMD_DMA
-u16 GLOB_LLD_Event_Status(void)
-{
- return CDMA_Event_Status();
-}
-
-u16 glob_lld_execute_cmds(void)
-{
- return CDMA_Execute_CMDs();
-}
-
-u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
- u32 ByteCount, u16 flag)
-{
- /* Replace the hardware memcopy with software memcpy function */
- if (CDMA_Execute_CMDs())
- return FAIL;
- memcpy(dest, src, ByteCount);
- return PASS;
-
- /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
-}
-
-u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
-{
- return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
-}
-
-u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
- data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
- LLD_CMD_FLAG_MODE_CDMA);
-}
-
-#endif /* CMD_DMA */
-#endif /* FLASH_NAND */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-/* end of LLD.c */
diff --git a/drivers/staging/spectra/lld.h b/drivers/staging/spectra/lld.h
deleted file mode 100644
index d3738e0e1fe..00000000000
--- a/drivers/staging/spectra/lld.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-
-
-#ifndef _LLD_
-#define _LLD_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-#include "flash.h"
-
-#define GOOD_BLOCK 0
-#define DEFECTIVE_BLOCK 1
-#define READ_ERROR 2
-
-#define CLK_X 5
-#define CLK_MULTI 4
-
-/* Typedefs */
-
-/* prototypes: API for LLD */
-/* Currently, Write_Page_Main
- * MemCopy
- * Read_Page_Main_Spare
- * do not have flag because they were not implemented prior to this
- * They are not being added to keep changes to a minimum for now.
- * Currently, they are not required (only reqd for Wr_P_M_S.)
- * Later on, these NEED to be changed.
- */
-
-extern void GLOB_LLD_ECC_Control(int enable);
-
-extern u16 GLOB_LLD_Flash_Reset(void);
-
-extern u16 GLOB_LLD_Read_Device_ID(void);
-
-extern u16 GLOB_LLD_UnlockArrayAll(void);
-
-extern u16 GLOB_LLD_Flash_Init(void);
-
-extern int GLOB_LLD_Flash_Release(void);
-
-extern u16 GLOB_LLD_Erase_Block(u32 block_add);
-
-extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
-
-extern u16 GLOB_LLD_Event_Status(void);
-
-extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
-
-extern u16 glob_lld_execute_cmds(void);
-
-extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
-#define LLD_CMD_FLAG_MODE_CDMA (0x8)
-
-
-#endif /*_LLD_ */
-
-
diff --git a/drivers/staging/spectra/lld_cdma.c b/drivers/staging/spectra/lld_cdma.c
deleted file mode 100644
index c6e76103d43..00000000000
--- a/drivers/staging/spectra/lld_cdma.c
+++ /dev/null
@@ -1,910 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "spectraswconfig.h"
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "lld_emu.h"
-#include "flash.h"
-#include "nand_regs.h"
-
-#define MAX_PENDING_CMDS 4
-#define MODE_02 (0x2 << 26)
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Data_Cmd
-* Inputs: cmd code (aligned for hw)
-* data: pointer to source or destination
-* block: block address
-* page: page address
-* num: num pages to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
-{
- u8 bank;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (0 == cmd)
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
-
- /* If a command of another bank comes, then first execute */
- /* pending commands of the current bank, then set the new */
- /* bank as current bank */
- bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
- if (bank != info.flash_bank) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will access new bank. old bank: %d, new bank: %d\n",
- info.flash_bank, bank);
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- info.flash_bank = bank;
- }
-
- info.pcmds[info.pcmds_num].CMD = cmd;
- info.pcmds[info.pcmds_num].DataAddr = data;
- info.pcmds[info.pcmds_num].Block = block;
- info.pcmds[info.pcmds_num].Page = page;
- info.pcmds[info.pcmds_num].PageCount = num;
- info.pcmds[info.pcmds_num].DataDestAddr = 0;
- info.pcmds[info.pcmds_num].DataSrcAddr = 0;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- switch (cmd) {
- case WRITE_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Log2Phy_Format(data, num);
- break;
- case WRITE_SPARE_CMD:
- Conv_Spare_Data_Log2Phy_Format(data);
- break;
- default:
- break;
- }
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_MemCopy_CMD
-* Inputs: dest: pointer to destination
-* src: pointer to source
-* count: num bytes to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
- info.pcmds[info.pcmds_num].DataAddr = 0;
- info.pcmds[info.pcmds_num].Block = 0;
- info.pcmds[info.pcmds_num].Page = 0;
- info.pcmds[info.pcmds_num].PageCount = 0;
- info.pcmds[info.pcmds_num].DataDestAddr = dest;
- info.pcmds[info.pcmds_num].DataSrcAddr = src;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-#if 0
-/* Prints the PendingCMDs array */
-void print_pending_cmds(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < info.pcmds_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Erase Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case MEMCOPY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Memcopy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case DUMMY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Dummy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- default:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Illegal Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
- (u32)info.pcmds[i].DataAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
- info.pcmds[i].Block);
- nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
- info.pcmds[i].Page);
- nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
- info.pcmds[i].PageCount);
- nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
- (u32)info.pcmds[i].DataDestAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
- (u32)info.pcmds[i].DataSrcAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
- info.pcmds[i].MemCopyByteCnt);
- nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
- info.pcmds[i].Flags);
- nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
- info.pcmds[i].Status);
- }
-}
-
-/* Print the CDMA descriptors */
-void print_cdma_descriptors(void)
-{
- struct cdma_descriptor *pc;
- int i;
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pc[i].NxtPointerHi, pc[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
- pc[i].FlashPointerHi, pc[i].FlashPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
- pc[i].CommandType);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
- pc[i].MemAddrHi, pc[i].MemAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
- pc[i].CommandFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
- pc[i].Channel, pc[i].Status);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
- pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reserved12: 0x%x, Reserved13: 0x%x, "
- "Reserved14: 0x%x, pcmd: %d\n",
- pc[i].Reserved12, pc[i].Reserved13,
- pc[i].Reserved14, pc[i].pcmd);
- }
-}
-
-/* Print the Memory copy descriptors */
-static void print_memcp_descriptors(void)
-{
- struct memcpy_descriptor *pm;
- int i;
-
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pm[i].NxtPointerHi, pm[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
- pm[i].SrcAddrHi, pm[i].SrcAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
- pm[i].DestAddrHi, pm[i].DestAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
- pm[i].XferSize);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
- pm[i].MemCopyFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
- pm[i].MemCopyStatus);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
- pm[i].reserved9);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
- pm[i].reserved10);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
- pm[i].reserved11);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
- pm[i].reserved12);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
- pm[i].reserved13);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
- pm[i].reserved14);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
- pm[i].reserved15);
- }
-}
-#endif
-
-/* Reset cdma_descriptor chain to 0 */
-static void reset_cdma_desc(int i)
-{
- struct cdma_descriptor *ptr;
-
- BUG_ON(i >= MAX_DESCS);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- ptr[i].NxtPointerHi = 0;
- ptr[i].NxtPointerLo = 0;
- ptr[i].FlashPointerHi = 0;
- ptr[i].FlashPointerLo = 0;
- ptr[i].CommandType = 0;
- ptr[i].MemAddrHi = 0;
- ptr[i].MemAddrLo = 0;
- ptr[i].CommandFlags = 0;
- ptr[i].Channel = 0;
- ptr[i].Status = 0;
- ptr[i].MemCopyPointerHi = 0;
- ptr[i].MemCopyPointerLo = 0;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_UpdateEventStatus
-* Inputs: none
-* Outputs: none
-* Description: This function update the event status of all the channels
-* when an error condition is reported.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void CDMA_UpdateEventStatus(void)
-{
- int i, j, active_chan;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++) {
- /* Check for the descriptor with failure */
- if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
- break;
-
- }
-
- /* All the previous cmd's status for this channel must be good */
- for (i = 0; i < j; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- }
-
- /* Abort the channel with type 0 reset command. It resets the */
- /* selected channel after the descriptor completes the flash */
- /* operation and status has been updated for the descriptor. */
- /* Memory Copy and Sync associated with this descriptor will */
- /* not be executed */
- active_chan = ioread32(FlashReg + CHNL_ACTIVE);
- if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
- iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
- iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
- } else { /* Should not reached here */
- printk(KERN_ERR "Error! Used bank is not set in"
- " reg CHNL_ACTIVE\n");
- }
-}
-
-static void cdma_trans(u16 chan)
-{
- u32 addr;
-
- addr = info.cdma_desc;
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | chan, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
- FlashMem);
- iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
-* for each pending command, start the CDMA engine, and return.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Execute_CMDs(void)
-{
- int i, ret;
- u64 flash_add;
- u32 ptr;
- dma_addr_t map_addr, next_ptr;
- u16 status = PASS;
- u16 tmp_c;
- struct cdma_descriptor *pc;
- struct memcpy_descriptor *pm;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* No pending cmds to execute, just exit */
- if (0 == info.pcmds_num) {
- nand_dbg_print(NAND_DBG_TRACE,
- "No pending cmds to execute. Just exit.\n");
- return PASS;
- }
-
- for (i = 0; i < MAX_DESCS; i++)
- reset_cdma_desc(i);
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
- info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
- next_ptr = info.cdma_desc;
- info.cdma_num = 0;
-
- for (i = 0; i < info.pcmds_num; i++) {
- if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
- info.pcmds[i].Status = CMD_NOT_DONE;
- continue;
- }
-
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- /* Use the Block offset within a bank */
- tmp_c = info.pcmds[i].Block /
- (DeviceInfo.wTotalBlocks / totalUsedBanks);
- flash_add = (u64)(info.pcmds[i].Block - tmp_c *
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)(info.pcmds[i].Page) *
- DeviceInfo.wPageDataSize;
-
- ptr = MODE_10 | (info.flash_bank << 24) |
- (u32)GLOB_u64_Div(flash_add,
- DeviceInfo.wPageDataSize);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set Main+Spare Access Mode */
- pc[info.cdma_num].CommandType = 0x43;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- }
-
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- pc[info.cdma_num].CommandType = 1;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
-
- case WRITE_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case WRITE_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case MEMCOPY_CMD:
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
- /* Set bit 11 to let the CDMA engine continue to */
- /* execute only after it has finished processing */
- /* the memcopy descriptor. */
- /* Also set bit 10 and bit 9 to 1 */
- pc[info.cdma_num].CommandFlags = 0x0E40;
- map_addr = info.memcp_desc + info.cdma_num *
- sizeof(struct memcpy_descriptor);
- pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
- pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
-
- pm[info.cdma_num].NxtPointerHi = 0;
- pm[info.cdma_num].NxtPointerLo = 0;
-
- map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
- pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
- pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
- map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
- pm[info.cdma_num].DestAddrHi = map_addr >> 16;
- pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
-
- pm[info.cdma_num].XferSize =
- info.pcmds[i].MemCopyByteCnt;
- pm[info.cdma_num].MemCopyFlags =
- (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
- pm[info.cdma_num].MemCopyStatus = 0;
- break;
-
- case DUMMY_CMD:
- default:
- pc[info.cdma_num].CommandType = 0XFFFF;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
- }
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set back Main Area Access Mode */
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- pc[info.cdma_num].CommandType = 0x42;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
- }
- }
-
- /* Add a dummy descriptor at end of the CDMA chain */
- reset_cdma_desc(info.cdma_num);
- ptr = MODE_10 | (info.flash_bank << 24);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
- /* Set Command Flags for the last CDMA descriptor: */
- /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
- pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- /* Wait for DMA to be enabled before issuing the next command */
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- cdma_trans(info.flash_bank);
-
- ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
- if (!ret)
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = info.ret;
-
- info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
-
- return status;
-}
-
-int is_cdma_interrupt(void)
-{
- u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
- u32 int_en_mask;
- u32 cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* Set the global Enable masks for only those interrupts
- * that are supported */
- cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
-
- int_en_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-
- ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
- ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
- ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
- ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
- ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
- ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
-
- if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
- return 1;
- } else {
- iowrite32(ints_b0, FlashReg + INTR_STATUS0);
- iowrite32(ints_b1, FlashReg + INTR_STATUS1);
- iowrite32(ints_b2, FlashReg + INTR_STATUS2);
- iowrite32(ints_b3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Not a NAND controller interrupt! Ignore it.\n");
- return 0;
- }
-}
-
-static void update_event_status(void)
-{
- int i;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (i = 0; i < info.cdma_num; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- if ((ptr[i].CommandType == 0x41) ||
- (ptr[i].CommandType == 0x42) ||
- (ptr[i].CommandType == 0x43))
- continue;
-
- switch (info.pcmds[ptr[i].pcmd].CMD) {
- case READ_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr,
- info.pcmds[ptr[i].pcmd].PageCount);
- break;
- case READ_SPARE_CMD:
- Conv_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr);
- break;
- }
- }
-}
-
-static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
-{
- u16 event = EVENT_NONE;
- u16 err_byte;
- u16 err_page = 0;
- u8 err_sector;
- u8 err_device;
- u16 ecc_correction_info;
- u16 err_address;
- u32 eccSectorSize;
- u8 *err_pos;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- if (0 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
- else if (1 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
- else if (2 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
- else if (3 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
-
- err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
- err_sector = ((err_address &
- ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
-
- ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_device = ((ecc_correction_info &
- ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
-
- if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- event = EVENT_UNCORRECTABLE_DATA_ERROR;
- } else {
- event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sector * eccSectorSize +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_device;
- *err_pos ^= ecc_correction_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return event;
-}
-
-static u16 process_ecc_int(u32 c, u16 *p_desc_num)
-{
- struct cdma_descriptor *ptr;
- u16 j;
- int event = EVENT_PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (c != info.flash_bank)
- printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
- info.flash_bank, c);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++)
- if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
- break;
-
- *p_desc_num = j; /* Pass the descripter number found here */
-
- if (j >= info.cdma_num) {
- printk(KERN_ERR "Can not find the correct descriptor number "
- "when ecc interrupt triggered!"
- "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
- return EVENT_UNCORRECTABLE_DATA_ERROR;
- }
-
- event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
- info.pcmds[ptr[j].pcmd].Page);
-
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- printk(KERN_ERR "Uncorrectable ECC error!"
- "info.cdma_num: %d, j: %d, "
- "pending cmd CMD: 0x%x, "
- "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
- info.cdma_num, j,
- info.pcmds[ptr[j].pcmd].CMD,
- info.pcmds[ptr[j].pcmd].Block,
- info.pcmds[ptr[j].pcmd].Page,
- info.pcmds[ptr[j].pcmd].PageCount);
-
- if (ptr[j].pcmd != 0xff)
- info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
- CDMA_UpdateEventStatus();
- }
-
- return event;
-}
-
-static void process_prog_erase_fail_int(u16 desc_num)
-{
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- if (ptr[desc_num].pcmd != 0xFF)
- info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
-
- CDMA_UpdateEventStatus();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Event_Status (for use with CMD_DMA)
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function is called after an interrupt has happened
-* It reads the HW status register and ...tbd
-* It returns the appropriate event status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Event_Status(void)
-{
- u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
- DMA_INTR__DESC_COMP_CHANNEL1,
- DMA_INTR__DESC_COMP_CHANNEL2,
- DMA_INTR__DESC_COMP_CHANNEL3};
- u32 cdma_int_status, int_status;
- u32 ecc_enable = 0;
- u16 event = EVENT_PASS;
- u16 cur_desc = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ecc_enable = ioread32(FlashReg + ECC_ENABLE);
-
- while (1) {
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
- event = process_ecc_int(info.flash_bank, &cur_desc);
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + ints_addr[info.flash_bank]);
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- nand_dbg_print(NAND_DBG_WARN,
- "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, "
- "ints_cdma: 0x%x\n",
- ioread32(FlashReg + INTR_STATUS0),
- ioread32(FlashReg + INTR_STATUS1),
- ioread32(FlashReg + INTR_STATUS2),
- ioread32(FlashReg + INTR_STATUS3),
- ioread32(FlashReg + DMA_INTR));
- break;
- }
- } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
- printk(KERN_ERR "NAND program fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_PROGRAM_FAILURE;
- break;
- } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
- printk(KERN_ERR "NAND erase fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_ERASE_FAILURE;
- break;
- } else {
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
- iowrite32(dma_intr_bit[info.flash_bank],
- FlashReg + DMA_INTR);
- update_event_status();
- event = EVENT_PASS;
- break;
- }
- }
- }
-
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- iowrite32(cdma_int_status, FlashReg + DMA_INTR);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return event;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_cdma.h b/drivers/staging/spectra/lld_cdma.h
deleted file mode 100644
index 854ea066f0c..00000000000
--- a/drivers/staging/spectra/lld_cdma.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-/* header for LLD_CDMA.c module */
-
-#ifndef _LLD_CDMA_
-#define _LLD_CDMA_
-
-#include "flash.h"
-
-#define DEBUG_SYNC 1
-
-/*/////////// CDMA specific MACRO definition */
-#define MAX_DESCS (255)
-#define MAX_CHANS (4)
-#define MAX_SYNC_POINTS (16)
-#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
-
-#define CHANNEL_SYNC_MASK (0x000F)
-#define CHANNEL_DMA_MASK (0x00F0)
-#define CHANNEL_ID_MASK (0x0300)
-#define CHANNEL_CONT_MASK (0x4000)
-#define CHANNEL_INTR_MASK (0x8000)
-
-#define CHANNEL_SYNC_OFFSET (0)
-#define CHANNEL_DMA_OFFSET (4)
-#define CHANNEL_ID_OFFSET (8)
-#define CHANNEL_CONT_OFFSET (14)
-#define CHANNEL_INTR_OFFSET (15)
-
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
-u16 CDMA_Execute_CMDs(void);
-void print_pending_cmds(void);
-void print_cdma_descriptors(void);
-
-extern u8 g_SBDCmdIndex;
-extern struct mrst_nand_info info;
-
-
-/*/////////// prototypes: APIs for LLD_CDMA */
-int is_cdma_interrupt(void);
-u16 CDMA_Event_Status(void);
-
-/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
-struct cdma_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 FlashPointerHi;
- u32 FlashPointerLo;
- u32 CommandType;
- u32 MemAddrHi;
- u32 MemAddrLo;
- u32 CommandFlags;
- u32 Channel;
- u32 Status;
- u32 MemCopyPointerHi;
- u32 MemCopyPointerLo;
- u32 Reserved12;
- u32 Reserved13;
- u32 Reserved14;
- u32 pcmd; /* pending cmd num related to this descriptor */
-};
-
-/* This struct holds one MemCopy descriptor as defined by the HW */
-struct memcpy_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 SrcAddrHi;
- u32 SrcAddrLo;
- u32 DestAddrHi;
- u32 DestAddrLo;
- u32 XferSize;
- u32 MemCopyFlags;
- u32 MemCopyStatus;
- u32 reserved9;
- u32 reserved10;
- u32 reserved11;
- u32 reserved12;
- u32 reserved13;
- u32 reserved14;
- u32 reserved15;
-};
-
-/* Pending CMD table entries (includes MemCopy parameters */
-struct pending_cmd {
- u8 CMD;
- u8 *DataAddr;
- u32 Block;
- u16 Page;
- u16 PageCount;
- u8 *DataDestAddr;
- u8 *DataSrcAddr;
- u32 MemCopyByteCnt;
- u16 Flags;
- u16 Status;
-};
-
-#if DEBUG_SYNC
-extern u32 debug_sync_cnt;
-#endif
-
-/* Definitions for CMD DMA descriptor chain fields */
-#define CMD_DMA_DESC_COMP 0x8000
-#define CMD_DMA_DESC_FAIL 0x4000
-
-#endif /*_LLD_CDMA_*/
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
deleted file mode 100644
index 095f2f0c2e5..00000000000
--- a/drivers/staging/spectra/lld_emu.c
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#if FLASH_EMU
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-#if FLASH_EMU /* This is for entire module */
-
-static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
-
-/* Read nand emu file and then fill it's content to flash_memory */
-int emu_load_file_to_mem(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nread;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Can not get valid inode!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid nand emu file size: "
- "0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
- nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nread = vfs_read(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nread < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial read: "
- "%d bytes\n", __FILE__, __LINE__, (int)nread);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/* Write contents of flash_memory to nand emu file */
-int emu_write_mem_to_file(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nwritten;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid "
- "nand emu file size: 0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
- "%lld\n", nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nwritten = vfs_write(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nwritten < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial write: "
- "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_memory[0] = vmalloc(GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS *
- GLOB_LLD_PAGES * sizeof(u8));
- if (!flash_memory[0]) {
- printk(KERN_ERR "Fail to allocate memory "
- "for nand emulator!\n");
- return ERR;
- }
-
- memset((char *)(flash_memory[0]), 0xFF,
- GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
- sizeof(u8));
-
- for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
- flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
-
- emu_load_file_to_mem(); /* Load nand emu file to mem */
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int emu_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- emu_write_mem_to_file(); /* Write back mem to nand emu file */
-
- vfree(flash_memory[0]);
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 emu_Read_Device_ID(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = 36;
- DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
- DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
- DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
- DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
- DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
- GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Erase_Block(u32 block_add)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "emu_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- for (i = block_add * GLOB_LLD_PAGES;
- i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
- if (flash_memory[i]) {
- memset((u8 *)(flash_memory[i]), 0xFF,
- DeviceInfo.wPageSize * sizeof(u8));
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageDataSize);
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
- } else {
- memcpy(read_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageDataSize);
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageSize);
- } else {
- memcpy(read_data, (u8 *) (flash_memory[Block *
- GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageSize);
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < page_count; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageSize);
- write_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
-
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
- DeviceInfo.wPageDataSize), write_data,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(write_data, 0xFF,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- } else {
- memcpy(write_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
- + DeviceInfo.wPageDataSize),
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 emu_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* emu_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* emu_CDMA_execute all commands
-* emu_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void emu_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
- print_pending_cmds(tag_count);
-
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- emu_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- emu_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- emu_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- emu_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
-#endif /* FLASH_EMU */
diff --git a/drivers/staging/spectra/lld_emu.h b/drivers/staging/spectra/lld_emu.h
deleted file mode 100644
index 63f84c38d3c..00000000000
--- a/drivers/staging/spectra/lld_emu.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_EMU_
-#define _LLD_EMU_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: emulator API functions */
-extern u16 emu_Flash_Reset(void);
-extern u16 emu_Flash_Init(void);
-extern int emu_Flash_Release(void);
-extern u16 emu_Read_Device_ID(void);
-extern u16 emu_Erase_Block(u32 block_addr);
-extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Event_Status(void);
-extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Get_Bad_Block(u32 block);
-
-u16 emu_CDMA_Flash_Init(void);
-u16 emu_CDMA_Execute_CMDs(u16 tag_count);
-u16 emu_CDMA_Event_Status(void);
-#endif /*_LLD_EMU_*/
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
deleted file mode 100644
index a9c309a167c..00000000000
--- a/drivers/staging/spectra/lld_mtd.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-static struct mtd_info *spectra_mtd;
-static int mtddev = -1;
-module_param(mtddev, int, 0);
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Init(void)
-{
- if (mtddev == -1) {
- printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
- return FAIL;
- }
-
- spectra_mtd = get_mtd_device(NULL, mtddev);
- if (!spectra_mtd) {
- printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int mtd_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- if (!spectra_mtd)
- return PASS;
-
- put_mtd_device(spectra_mtd);
- spectra_mtd = NULL;
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 mtd_Read_Device_ID(void)
-{
- uint64_t tmp;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!spectra_mtd)
- return FAIL;
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- tmp = spectra_mtd->size;
- do_div(tmp, spectra_mtd->erasesize);
- DeviceInfo.wTotalBlocks = tmp;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
- DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
- DeviceInfo.wPageDataSize = spectra_mtd->writesize;
- DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-void erase_callback(struct erase_info *e)
-{
- complete((void *)e->priv);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Erase_Block(u32 block_add)
-{
- struct erase_info erase;
- DECLARE_COMPLETION_ONSTACK(comp);
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "mtd_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- erase.mtd = spectra_mtd;
- erase.callback = erase_callback;
- erase.addr = block_add * spectra_mtd->erasesize;
- erase.len = spectra_mtd->erasesize;
- erase.priv = (unsigned long)&comp;
-
- ret = spectra_mtd->erase(spectra_mtd, &erase);
- if (!ret) {
- wait_for_completion(&comp);
- if (erase.state != MTD_ERASE_DONE)
- ret = -EIO;
- }
- if (ret) {
- printk(KERN_WARNING "mtd_Erase_Block error! "
- "erase of region [0x%llx, 0x%llx] failed\n",
- erase.addr, erase.len);
- return FAIL;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->write(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, write_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->read(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, read_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, PageCount, Block);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = read_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, page_count, Block);
- WARN_ON(1);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- while (page_count) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = write_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->write_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageSize;
- Page++;
- page_count--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- WARN_ON(1);
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u (%u pages)\n",
- (unsigned int)Block, (unsigned int)Page, PageCount);
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = NULL;
- ops.len = 0;
- ops.oobbuf = read_data;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 mtd_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* mtd_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* mtd_CDMA_execute all commands
-* mtd_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void mtd_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
-#ifdef VERBOSE
- print_pending_cmds(tag_count);
-#endif
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- mtd_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- mtd_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- mtd_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- mtd_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
diff --git a/drivers/staging/spectra/lld_mtd.h b/drivers/staging/spectra/lld_mtd.h
deleted file mode 100644
index 4e81ee87b53..00000000000
--- a/drivers/staging/spectra/lld_mtd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_MTD_
-#define _LLD_MTD_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: MTD API functions */
-extern u16 mtd_Flash_Reset(void);
-extern u16 mtd_Flash_Init(void);
-extern int mtd_Flash_Release(void);
-extern u16 mtd_Read_Device_ID(void);
-extern u16 mtd_Erase_Block(u32 block_addr);
-extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Event_Status(void);
-extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Get_Bad_Block(u32 block);
-
-u16 mtd_CDMA_Flash_Init(void);
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
-u16 mtd_CDMA_Event_Status(void);
-#endif /*_LLD_MTD_*/
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
deleted file mode 100644
index 60a14ff26c7..00000000000
--- a/drivers/staging/spectra/lld_nand.c
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-
-#include "spectraswconfig.h"
-#include "flash.h"
-#include "ffsdefs.h"
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include "nand_regs.h"
-
-#define SPECTRA_NAND_NAME "nd"
-
-#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
-#define MAX_PAGES_PER_RW 128
-
-#define INT_IDLE_STATE 0
-#define INT_READ_PAGE_MAIN 0x01
-#define INT_WRITE_PAGE_MAIN 0x02
-#define INT_PIPELINE_READ_AHEAD 0x04
-#define INT_PIPELINE_WRITE_AHEAD 0x08
-#define INT_MULTI_PLANE_READ 0x10
-#define INT_MULTI_PLANE_WRITE 0x11
-
-static u32 enable_ecc;
-
-struct mrst_nand_info info;
-
-int totalUsedBanks;
-u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-void __iomem *FlashReg;
-void __iomem *FlashMem;
-
-u16 conf_parameters[] = {
- 0x0000,
- 0x0000,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x0000,
- 0x0000,
- 0x0001,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0040,
- 0x0001,
- 0x000A,
- 0x000A,
- 0x000A,
- 0x0000,
- 0x0000,
- 0x0005,
- 0x0012,
- 0x000C
-};
-
-u16 NAND_Get_Bad_Block(u32 block)
-{
- u32 status = PASS;
- u32 flag_bytes = 0;
- u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
- u32 page, i;
- u8 *pReadSpareBuf = buf_get_bad_block;
-
- if (enable_ecc)
- flag_bytes = DeviceInfo.wNumPageSpareFlag;
-
- for (page = 0; page < 2; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- for (page = 1; page < 3; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block,
- DeviceInfo.wPagesPerBlock - page , 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- return GOOD_BLOCK;
-}
-
-
-u16 NAND_Flash_Reset(void)
-{
- u32 i;
- u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
- u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
- u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
- while (!(ioread32(FlashReg + intr_status[i]) &
- (intr_status_rst_comp[i] | intr_status_time_out[i])))
- ;
- if (ioread32(FlashReg + intr_status[i]) &
- intr_status_time_out[i])
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Reset operation timed out on bank %d\n", i);
- }
-
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- return PASS;
-}
-
-static void NAND_ONFi_Timing_Mode(u16 mode)
-{
- u16 Trea[6] = {40, 30, 25, 20, 20, 16};
- u16 Trp[6] = {50, 25, 17, 15, 12, 10};
- u16 Treh[6] = {30, 15, 15, 10, 10, 7};
- u16 Trc[6] = {100, 50, 35, 30, 25, 20};
- u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
- u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
- u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
- u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
- u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
- u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
- u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
- u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
-
- u16 TclsRising = 1;
- u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
- u16 dv_window = 0;
- u16 en_lo, en_hi;
- u16 acc_clks;
- u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- en_lo = CEIL_DIV(Trp[mode], CLK_X);
- en_hi = CEIL_DIV(Treh[mode], CLK_X);
-
-#if ONFI_BLOOM_TIME
- if ((en_hi * CLK_X) < (Treh[mode] + 2))
- en_hi++;
-#endif
-
- if ((en_lo + en_hi) * CLK_X < Trc[mode])
- en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-
- if ((en_lo + en_hi) < CLK_MULTI)
- en_lo += CLK_MULTI - en_lo - en_hi;
-
- while (dv_window < 8) {
- data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-
- data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-
- data_invalid =
- data_invalid_rhoh <
- data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
-
- dv_window = data_invalid - Trea[mode];
-
- if (dv_window < 8)
- en_lo++;
- }
-
- acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-
- while (((acc_clks * CLK_X) - Trea[mode]) < 3)
- acc_clks++;
-
- if ((data_invalid - acc_clks * CLK_X) < 2)
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
- __FILE__, __LINE__);
-
- addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
- re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
- re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
- we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
- cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (!TclsRising)
- cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
- if (cs_cnt == 0)
- cs_cnt = 1;
-
- if (Tcea[mode]) {
- while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
- cs_cnt++;
- }
-
-#if MODE5_WORKAROUND
- if (mode == 5)
- acc_clks = 5;
-#endif
-
- /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
- if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
- (ioread32(FlashReg + DEVICE_ID) == 0x88))
- acc_clks = 6;
-
- iowrite32(acc_clks, FlashReg + ACC_CLKS);
- iowrite32(re_2_we, FlashReg + RE_2_WE);
- iowrite32(re_2_re, FlashReg + RE_2_RE);
- iowrite32(we_2_re, FlashReg + WE_2_RE);
- iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
- iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
-}
-
-static void index_addr(u32 address, u32 data)
-{
- iowrite32(address, FlashMem);
- iowrite32(data, FlashMem + 0x10);
-}
-
-static void index_addr_read_data(u32 address, u32 *pdata)
-{
- iowrite32(address, FlashMem);
- *pdata = ioread32(FlashMem + 0x10);
-}
-
-static void set_ecc_config(void)
-{
-#if SUPPORT_8BITECC
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
-
- if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
- == 1) {
- DeviceInfo.wECCBytesPerSector = 4;
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag =
- DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- } else {
- DeviceInfo.wECCBytesPerSector =
- (ioread32(FlashReg + ECC_CORRECTION) &
- ECC_CORRECTION__VALUE) * 13 / 8;
- if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
- DeviceInfo.wECCBytesPerSector += 2;
- else
- DeviceInfo.wECCBytesPerSector += 1;
-
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- }
-}
-
-static u16 get_onfi_nand_para(void)
-{
- int i;
- u16 blks_lun_l, blks_lun_h, n_of_luns;
- u32 blockperlun, id;
-
- iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
-
- while (!((ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK2,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK3,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__TIME_OUT)))
- ;
- } else {
- printk(KERN_ERR "Getting a time out for bank 2!\n");
- }
- } else {
- printk(KERN_ERR "Getting a time out for bank 1!\n");
- }
- }
-
- iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
- iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
- iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
- iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
-
- DeviceInfo.wONFIDevFeatures =
- ioread32(FlashReg + ONFI_DEVICE_FEATURES);
- DeviceInfo.wONFIOptCommands =
- ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
- DeviceInfo.wONFITimingMode =
- ioread32(FlashReg + ONFI_TIMING_MODE);
- DeviceInfo.wONFIPgmCacheTimingMode =
- ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
-
- n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
- blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
- blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
-
- blockperlun = (blks_lun_h << 16) | blks_lun_l;
-
- DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
-
- if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
- ONFI_TIMING_MODE__VALUE))
- return FAIL;
-
- for (i = 5; i > 0; i--) {
- if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
- break;
- }
-
- NAND_ONFi_Timing_Mode(i);
-
- index_addr(MODE_11 | 0, 0x90);
- index_addr(MODE_11 | 1, 0);
-
- for (i = 0; i < 3; i++)
- index_addr_read_data(MODE_11 | 2, &id);
-
- nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
-
- DeviceInfo.MLCDevice = id & 0x0C;
-
- /* By now, all the ONFI devices we know support the page cache */
- /* rw feature. So here we enable the pipeline_rw_ahead feature */
- /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
- /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
-
- return PASS;
-}
-
-static void get_samsung_nand_para(void)
-{
- u8 no_of_planes;
- u32 blk_size;
- u64 plane_size, capacity;
- u32 id_bytes[5];
- int i;
-
- index_addr((u32)(MODE_11 | 0), 0x90);
- index_addr((u32)(MODE_11 | 1), 0);
- for (i = 0; i < 5; i++)
- index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- id_bytes[0], id_bytes[1], id_bytes[2],
- id_bytes[3], id_bytes[4]);
-
- if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
- /* Set timing register values according to datasheet */
- iowrite32(5, FlashReg + ACC_CLKS);
- iowrite32(20, FlashReg + RE_2_WE);
- iowrite32(12, FlashReg + WE_2_RE);
- iowrite32(14, FlashReg + ADDR_2_DATA);
- iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(2, FlashReg + CS_SETUP_CNT);
- }
-
- no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
- plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
- blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
- capacity = (u64)128 * plane_size * no_of_planes;
-
- DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
-}
-
-static void get_toshiba_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 tmp;
-
- /* Workaround to fix a controller bug which reports a wrong */
- /* spare area size for some kind of Toshiba NAND device */
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
- iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
- tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
- iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- }
-
- /* As Toshiba NAND can not provide it's block number, */
- /* so here we need user to provide the correct block */
- /* number in a scratch register before the Linux NAND */
- /* driver is loaded. If no valid value found in the scratch */
- /* register, then we use default block number value */
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void get_hynix_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 main_size, spare_size;
-
- switch (DeviceInfo.wDeviceID) {
- case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
- case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
- iowrite32(128, FlashReg + PAGES_PER_BLOCK);
- iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
- main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
- spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
- iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
- iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
- iowrite32(0, FlashReg + DEVICE_WIDTH);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- DeviceInfo.MLCDevice = 1;
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
- "Will use default parameter values instead.\n",
- DeviceInfo.wDeviceID);
- }
-
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void find_valid_banks(void)
-{
- u32 id[LLD_MAX_FLASH_BANKS];
- int i;
-
- totalUsedBanks = 0;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
- index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
- index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
- index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Return 1st ID for bank[%d]: %x\n", i, id[i]);
-
- if (i == 0) {
- if (id[i] & 0x0ff)
- GLOB_valid_banks[i] = 1;
- } else {
- if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
- GLOB_valid_banks[i] = 1;
- }
-
- totalUsedBanks += GLOB_valid_banks[i];
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "totalUsedBanks: %d\n", totalUsedBanks);
-}
-
-static void detect_partition_feature(void)
-{
- if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(FlashReg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
- DeviceInfo.wSpectraStartBlock =
- ((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wSpectraEndBlock =
- (((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MAX_BLK_ADDR_1) &
- MAX_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
-
- if (DeviceInfo.wSpectraEndBlock >=
- DeviceInfo.wTotalBlocks) {
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- }
-
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
-}
-
-static void dump_device_info(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
- DeviceInfo.wDeviceMaker);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
- DeviceInfo.wDeviceID);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
- DeviceInfo.wDeviceType);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
- DeviceInfo.wSpectraStartBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
- DeviceInfo.wSpectraEndBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
- DeviceInfo.wTotalBlocks);
- nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
- DeviceInfo.wPagesPerBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
- DeviceInfo.wPageSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
- DeviceInfo.wPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
- DeviceInfo.wPageSpareSize);
- nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
- DeviceInfo.wNumPageSpareFlag);
- nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
- DeviceInfo.wECCBytesPerSector);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
- DeviceInfo.wBlockSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
- DeviceInfo.wBlockDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
- DeviceInfo.wDataBlockNum);
- nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
- DeviceInfo.bPlaneNum);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
- DeviceInfo.wDeviceMainAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
- DeviceInfo.wDeviceSpareAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
- DeviceInfo.wDevicesConnected);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
- DeviceInfo.wDeviceWidth);
- nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
- DeviceInfo.wHWRevision);
- nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
- DeviceInfo.wHWFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
- DeviceInfo.wONFIDevFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
- DeviceInfo.wONFIOptCommands);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
- DeviceInfo.wONFITimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
- DeviceInfo.wONFIPgmCacheTimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
- DeviceInfo.MLCDevice ? "Yes" : "No");
- nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
- DeviceInfo.wSpareSkipBytes);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
- DeviceInfo.nBitsInPageNumber);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
- DeviceInfo.nBitsInPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
- DeviceInfo.nBitsInBlockDataSize);
-}
-
-u16 NAND_Read_Device_ID(void)
-{
- u16 status = PASS;
- u8 no_of_planes;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
- iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
- DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
- DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
- DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
-
- if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
- if (FAIL == get_onfi_nand_para())
- return FAIL;
- } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
- get_samsung_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
- get_toshiba_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
- get_hynix_nand_para();
- } else {
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
- DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
-
- DeviceInfo.wDeviceMainAreaSize =
- ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
- DeviceInfo.wDeviceSpareAreaSize =
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
-
- DeviceInfo.wPageDataSize =
- ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
-
- /* Note: When using the Micon 4K NAND device, the controller will report
- * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
- * And if force set it to 218 bytes, the controller can not work
- * correctly. So just let it be. But keep in mind that this bug may
- * cause
- * other problems in future. - Yunpeng 2008-10-10
- */
- DeviceInfo.wPageSpareSize =
- ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-
- DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
-
- DeviceInfo.wPageSize =
- DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
- DeviceInfo.wBlockSize =
- DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize =
- DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
- DeviceInfo.wDeviceType =
- ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
-
- DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
-
- DeviceInfo.wSpareSkipBytes =
- ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
- DeviceInfo.wDevicesConnected;
-
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
- set_ecc_config();
-
- no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
- NUMBER_OF_PLANES__VALUE;
-
- switch (no_of_planes) {
- case 0:
- case 1:
- case 3:
- case 7:
- DeviceInfo.bPlaneNum = no_of_planes + 1;
- break;
- default:
- status = FAIL;
- break;
- }
-
- find_valid_banks();
-
- detect_partition_feature();
-
- dump_device_info();
-
- return status;
-}
-
-u16 NAND_UnlockArrayAll(void)
-{
- u64 start_addr, end_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = 0;
- end_addr = ((u64)DeviceInfo.wBlockSize *
- (DeviceInfo.wTotalBlocks - 1)) >>
- DeviceInfo.nBitsInPageDataSize;
-
- index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
- index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
-
- return PASS;
-}
-
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (INT_ENABLE)
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
- else
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-}
-
-u16 NAND_Erase_Block(u32 block)
-{
- u16 status = PASS;
- u64 flash_add;
- u16 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ERASE_FAIL)
- status = FAIL;
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
- }
-
- return status;
-}
-
-static u32 Boundary_Check_Block_Page(u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (page + page_count > DeviceInfo.wPagesPerBlock)
- status = FAIL;
-
- return status;
-}
-
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i;
- u64 flash_add;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_spare = buf_read_page_spare;
-
- if (block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "block too big: %d\n", (int)block);
- status = FAIL;
- }
-
- if (page >= DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "page too big: %d\n", page);
- status = FAIL;
- }
-
- if (page_count > 1) {
- printk(KERN_ERR "page count too big: %d\n", page_count);
- status = FAIL;
- }
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x41);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < (PageSpareSize / 4); i++)
- *((u32 *)page_spare + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- read_data[i] =
- page_spare[PageSpareSize -
- spareFlagBytes + i];
- for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
- read_data[spareFlagBytes + i] =
- page_spare[i];
- } else {
- for (i = 0; i < PageSpareSize; i++)
- read_data[i] = page_spare[i];
- }
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- }
-
- return status;
-}
-
-/* No use function. Should be removed later */
-u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- printk(KERN_ERR
- "Error! This function (NAND_Write_Page_Spare) should never"
- " be called!\n");
- return ERR;
-}
-
-/* op value: 0 - DDMA read; 1 - DDMA write */
-static void ddma_trans(u8 *data, u64 flash_add,
- u32 flash_bank, int op, u32 numPages)
-{
- u32 data_addr;
-
- /* Map virtual address to bus address for DDMA */
- data_addr = virt_to_bus(data);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- (u16)(2 << 12) | (op << 8) | numPages);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
- (u16)(2 << 12) | (2 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & data_addr) << 8)),
- (u16)(2 << 12) | (3 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (1 << 16) | (0x40 << 8)),
- (u16)(2 << 12) | (4 << 8) | 0);
-}
-
-/* If data in buf are all 0xff, then return 1; otherwise return 0 */
-static int check_all_1(u8 *buf)
-{
- int i, j, cnt;
-
- for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
- if (buf[i] != 0xff) {
- cnt = 0;
- nand_dbg_print(NAND_DBG_WARN,
- "the first non-0xff data byte is: %d\n", i);
- for (j = i; j < DeviceInfo.wPageDataSize; j++) {
- nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
- cnt++;
- if (cnt > 8)
- break;
- }
- nand_dbg_print(NAND_DBG_WARN, "\n");
- return 0;
- }
- }
-
- return 1;
-}
-
-static int do_ecc_new(unsigned long bank, u8 *buf,
- u32 block, u16 page)
-{
- int status = PASS;
- u16 err_page = 0;
- u16 err_byte;
- u8 err_sect;
- u8 err_dev;
- u16 err_fix_info;
- u16 err_addr;
- u32 ecc_sect_size;
- u8 *err_pos;
- u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
- ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
-
- ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- err_page = ioread32(FlashReg + err_page_addr[bank]);
- err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
- err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
- err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
- >> 8);
- if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Uncorrectable ECC error "
- "when read block %d page %d."
- "PTN_INTR register: 0x%x "
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- ioread32(FlashReg + PTN_INTR),
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
-
- if (check_all_1(buf))
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "All 0xff!\n",
- __FILE__, __LINE__);
- else
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "Not all 0xff!\n",
- __FILE__, __LINE__);
- status = FAIL;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Found ECC error "
- "when read block %d page %d."
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sect * ecc_sect_size +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_dev;
-
- *err_pos ^= err_fix_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank, read_data,
- block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
-
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- }
- return status;
-}
-
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_READ_PAGE_MAIN;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-void Conv_Spare_Data_Log2Phy_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[PageSpareSize - spareFlagBytes + i] = data[i];
- }
-}
-
-void Conv_Spare_Data_Phy2Log_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- data[i] = data[PageSpareSize - spareFlagBytes + i];
- }
-}
-
-
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- j = (DeviceInfo.wPageDataSize / eccSectorSize);
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset + PageDataSize + i];
- for (j--; j >= 1; j--) {
- for (i = eccSectorSize - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset +
- eccSectorSize * j + i];
- }
- for (i = (PageSize - spareSkipBytes) - 1;
- i >= PageDataSize; i--)
- data[page_offset + i + spareSkipBytes] =
- data[page_offset + i];
- page_count--;
- }
- }
-}
-
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- for (i = PageDataSize;
- i < PageSize - spareSkipBytes;
- i++)
- data[page_offset + i] =
- data[page_offset + i +
- spareSkipBytes];
- for (j = 1;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- data[page_offset +
- eccSectorSize * j + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j
- + i];
- }
- for (i = 0; i < spareFlagBytes; i++)
- data[page_offset + PageDataSize + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i];
- page_count--;
- }
- }
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
- }
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_READ_AHEAD;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *write_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
-
- if (page_count > 1) {
- write_data_l = write_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Write_Ahead(
- write_data_l, block, page,
- MAX_PAGES_PER_RW);
- if (status == FAIL)
- return status;
-
- write_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Write_Ahead(write_data_l,
- block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_WRITE_PAGE_MAIN;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(write_data, flash_add, flash_bank, 1, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
- ;
-
- return status;
-}
-
-void NAND_ECC_Ctrl(int enable)
-{
- if (enable) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will enable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Will disable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(0, FlashReg + ECC_ENABLE);
- enable_ecc = 0;
- }
-}
-
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 i, j, page_num = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u64 flash_add;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_write_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- if (enable_ecc) {
- for (j = 0;
- j <
- DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- i] =
- write_data[eccSectorSize *
- j + i];
-
- for (i = 0; i < eccBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- eccSectorSize +
- i] =
- write_data[PageDataSize +
- spareFlagBytes +
- eccBytes * j +
- i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i] =
- write_data[PageDataSize + i];
-
- for (i = PageSize - 1; i >= PageDataSize +
- spareSkipBytes; i--)
- page_main_spare[i] = page_main_spare[i -
- spareSkipBytes];
-
- for (i = PageDataSize; i < PageDataSize +
- spareSkipBytes; i++)
- page_main_spare[i] = 0xff;
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(
- *((u32 *)page_main_spare + i),
- FlashMem + 0x10);
- } else {
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(*((u32 *)write_data + i),
- FlashMem + 0x10);
- }
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL)
- status = FAIL;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- page_num++;
- page_count--;
- write_data += PageSize;
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- }
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i, j;
- u64 flash_add = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u8 *read_data_l = read_data;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_read_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x43);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
-
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < PageSize / 4; i++)
- *(((u32 *)page_main_spare) + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = PageDataSize; i < PageSize -
- spareSkipBytes; i++)
- page_main_spare[i] = page_main_spare[i +
- spareSkipBytes];
-
- for (j = 0;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
-
- for (i = 0; i < eccSectorSize; i++)
- read_data_l[eccSectorSize * j +
- i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j + i];
-
- for (i = 0; i < eccBytes; i++)
- read_data_l[PageDataSize +
- spareFlagBytes +
- eccBytes * j + i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j +
- eccSectorSize + i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- read_data_l[PageDataSize + i] =
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i];
- } else {
- for (i = 0; i < (PageDataSize + PageSpareSize);
- i++)
- read_data_l[i] = page_main_spare[i];
-
- }
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- }
- }
-
- page++;
- page_count--;
- read_data_l += PageSize;
- }
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- return status;
-}
-
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_WRITE_AHEAD;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u16 status2 = PASS;
- u32 t;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- status = PASS;
- if (status2 == FAIL)
- status = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status2 = FAIL;
- status = FAIL;
- t = ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL;
- iowrite32(t, FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
-
- return status;
-}
-
-
-#if CMD_DMA
-static irqreturn_t cdma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- int first_failed_cmd;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!is_cdma_interrupt())
- return IRQ_NONE;
-
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- GLOB_FTL_Event_Status(&first_failed_cmd);
- complete(&dev->complete);
-
- return IRQ_HANDLED;
-}
-#else
-static void handle_nand_int_read(struct mrst_nand_info *dev)
-{
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 intr_status;
- u32 ecc_done_OR_dma_comp = 0;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr_status_addresses[dev->flash_bank];
-
- while (1) {
- if (enable_ecc) {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- dev->ret = do_ecc_new(dev->flash_bank,
- dev->read_data,
- dev->block, dev->page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- } else {
- printk(KERN_ERR "Illegal INTS "
- "(offset addr 0x%x) value: 0x%x\n",
- intr_status,
- ioread32(FlashReg + intr_status));
- }
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
-}
-
-static void handle_nand_int_write(struct mrst_nand_info *dev)
-{
- u32 intr_status;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- int status = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr[dev->flash_bank];
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (FAIL == status)
- dev->ret = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status = FAIL;
- iowrite32(INTR_STATUS0__PROGRAM_FAIL,
- FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-}
-
-static irqreturn_t ddma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
-
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-
- ints0 = ioread32(FlashReg + INTR_STATUS0);
- ints1 = ioread32(FlashReg + INTR_STATUS1);
- ints2 = ioread32(FlashReg + INTR_STATUS2);
- ints3 = ioread32(FlashReg + INTR_STATUS3);
-
- ints_offset = intr[dev->flash_bank];
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
- "DMA_INTR: 0x%x, "
- "dev->state: 0x%x, dev->flash_bank: %d\n",
- ints0, ints1, ints2, ints3,
- ioread32(FlashReg + DMA_INTR),
- dev->state, dev->flash_bank);
-
- if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
- iowrite32(ints0, FlashReg + INTR_STATUS0);
- iowrite32(ints1, FlashReg + INTR_STATUS1);
- iowrite32(ints2, FlashReg + INTR_STATUS2);
- iowrite32(ints3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_WARN,
- "ddma_isr: Invalid interrupt for NAND controller. "
- "Ignore it\n");
- return IRQ_NONE;
- }
-
- switch (dev->state) {
- case INT_READ_PAGE_MAIN:
- case INT_PIPELINE_READ_AHEAD:
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_read(dev);
- break;
- case INT_WRITE_PAGE_MAIN:
- case INT_PIPELINE_WRITE_AHEAD:
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_write(dev);
- break;
- default:
- printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
- dev->state);
- return IRQ_NONE;
- }
-
- dev->state = INT_IDLE_STATE;
- complete(&dev->complete);
- return IRQ_HANDLED;
-}
-#endif
-
-static const struct pci_device_id nand_pci_ids[] = {
- {
- .vendor = 0x8086,
- .device = 0x0809,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { /* end: all zeroes */ }
-};
-
-static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- int ret = -ENODEV;
- unsigned long csr_base;
- unsigned long csr_len;
- struct mrst_nand_info *pndev = &info;
- u32 int_mask;
-
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- return ret;
- }
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
- GLOB_HWCTL_REG_SIZE);
- if (!FlashReg) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped reg base address: "
- "0x%p, len: %d\n",
- FlashReg, GLOB_HWCTL_REG_SIZE);
-
- FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
- GLOB_HWCTL_MEM_SIZE);
- if (!FlashMem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- iounmap(FlashReg);
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped flash base address: "
- "0x%p, len: %d\n",
- (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- NAND_Flash_Reset();
-
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-
-#if CMD_DMA
- info.pcmds_num = 0;
- info.flash_bank = 0;
- info.cdma_num = 0;
- int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
- iowrite32(int_mask, FlashReg + DMA_INTR_EN);
- iowrite32(0xFFFF, FlashReg + DMA_INTR);
-
- int_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-#else
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-#endif
- iowrite32(int_mask, FlashReg + INTR_EN0);
- iowrite32(int_mask, FlashReg + INTR_EN1);
- iowrite32(int_mask, FlashReg + INTR_EN2);
- iowrite32(int_mask, FlashReg + INTR_EN3);
-
- /* Clear all status bits */
- iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
-
- iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
-
- /* Should set value for these registers when init */
- iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
-
- pci_set_master(dev);
- pndev->dev = dev;
-
- csr_base = pci_resource_start(dev, 0);
- if (!csr_base) {
- printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- csr_len = pci_resource_len(dev, 0);
- if (!csr_len) {
- printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request "
- "memory region\n");
- goto failed_req_csr;
- }
-
- pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
- if (!pndev->ioaddr) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_remap_csr;
- }
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
- csr_base, pndev->ioaddr, csr_len);
-
- init_completion(&pndev->complete);
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
-
-#if CMD_DMA
- if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#else
- if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#endif
-
- pci_set_drvdata(dev, pndev);
-
- ret = GLOB_LLD_Read_Device_ID();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- ret = register_spectra_ftl();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- return 0;
-
-failed_remap_csr:
- pci_release_regions(dev);
-failed_req_csr:
- iounmap(FlashMem);
- iounmap(FlashReg);
-failed_disable:
- pci_disable_device(dev);
-
- return ret;
-}
-
-static void nand_pci_remove(struct pci_dev *dev)
-{
- struct mrst_nand_info *pndev = pci_get_drvdata(dev);
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- free_irq(dev->irq, pndev);
-#endif
- iounmap(pndev->ioaddr);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-MODULE_DEVICE_TABLE(pci, nand_pci_ids);
-
-static struct pci_driver nand_pci_driver = {
- .name = SPECTRA_NAND_NAME,
- .id_table = nand_pci_ids,
- .probe = nand_pci_probe,
- .remove = nand_pci_remove,
-};
-
-int NAND_Flash_Init(void)
-{
- int retval;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- retval = pci_register_driver(&nand_pci_driver);
- if (retval)
- return -ENOMEM;
-
- return PASS;
-}
-
-/* Free memory */
-int nand_release_spectra(void)
-{
- pci_unregister_driver(&nand_pci_driver);
- iounmap(FlashMem);
- iounmap(FlashReg);
-
- return 0;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_nand.h b/drivers/staging/spectra/lld_nand.h
deleted file mode 100644
index d08388287da..00000000000
--- a/drivers/staging/spectra/lld_nand.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_NAND_
-#define _LLD_NAND_
-
-#ifdef ELDORA
-#include "defs.h"
-#else
-#include "flash.h"
-#include "ffsport.h"
-#endif
-
-#define MODE_00 0x00000000
-#define MODE_01 0x04000000
-#define MODE_10 0x08000000
-#define MODE_11 0x0C000000
-
-
-#define DATA_TRANSFER_MODE 0
-#define PROTECTION_PER_BLOCK 1
-#define LOAD_WAIT_COUNT 2
-#define PROGRAM_WAIT_COUNT 3
-#define ERASE_WAIT_COUNT 4
-#define INT_MONITOR_CYCLE_COUNT 5
-#define READ_BUSY_PIN_ENABLED 6
-#define MULTIPLANE_OPERATION_SUPPORT 7
-#define PRE_FETCH_MODE 8
-#define CE_DONT_CARE_SUPPORT 9
-#define COPYBACK_SUPPORT 10
-#define CACHE_WRITE_SUPPORT 11
-#define CACHE_READ_SUPPORT 12
-#define NUM_PAGES_IN_BLOCK 13
-#define ECC_ENABLE_SELECT 14
-#define WRITE_ENABLE_2_READ_ENABLE 15
-#define ADDRESS_2_DATA 16
-#define READ_ENABLE_2_WRITE_ENABLE 17
-#define TWO_ROW_ADDRESS_CYCLES 18
-#define MULTIPLANE_ADDRESS_RESTRICT 19
-#define ACC_CLOCKS 20
-#define READ_WRITE_ENABLE_LOW_COUNT 21
-#define READ_WRITE_ENABLE_HIGH_COUNT 22
-
-#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
-
-struct mrst_nand_info {
- struct pci_dev *dev;
- u32 state;
- u32 flash_bank;
- u8 *read_data;
- u8 *write_data;
- u32 block;
- u16 page;
- u32 use_dma;
- void __iomem *ioaddr; /* Mapped io reg base address */
- int ret;
- u32 pcmds_num;
- struct pending_cmd *pcmds;
- int cdma_num; /* CDMA descriptor number in this chan */
- u8 *cdma_desc_buf; /* CDMA descriptor table */
- u8 *memcp_desc_buf; /* Memory copy descriptor table */
- dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
- dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
- struct completion complete;
-};
-
-int NAND_Flash_Init(void);
-int nand_release_spectra(void);
-u16 NAND_Flash_Reset(void);
-u16 NAND_Read_Device_ID(void);
-u16 NAND_Erase_Block(u32 flash_add);
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_UnlockArrayAll(void);
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
-u16 NAND_Get_Bad_Block(u32 block);
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-void NAND_ECC_Ctrl(int enable);
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-void Conv_Spare_Data_Log2Phy_Format(u8 *data);
-void Conv_Spare_Data_Phy2Log_Format(u8 *data);
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
-
-extern void __iomem *FlashReg;
-extern void __iomem *FlashMem;
-
-extern int totalUsedBanks;
-extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-#endif /*_LLD_NAND_*/
-
-
-
diff --git a/drivers/staging/spectra/nand_regs.h b/drivers/staging/spectra/nand_regs.h
deleted file mode 100644
index e192e4ae8c1..00000000000
--- a/drivers/staging/spectra/nand_regs.h
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#define DEVICE_RESET 0x0
-#define DEVICE_RESET__BANK0 0x0001
-#define DEVICE_RESET__BANK1 0x0002
-#define DEVICE_RESET__BANK2 0x0004
-#define DEVICE_RESET__BANK3 0x0008
-
-#define TRANSFER_SPARE_REG 0x10
-#define TRANSFER_SPARE_REG__FLAG 0x0001
-
-#define LOAD_WAIT_CNT 0x20
-#define LOAD_WAIT_CNT__VALUE 0xffff
-
-#define PROGRAM_WAIT_CNT 0x30
-#define PROGRAM_WAIT_CNT__VALUE 0xffff
-
-#define ERASE_WAIT_CNT 0x40
-#define ERASE_WAIT_CNT__VALUE 0xffff
-
-#define INT_MON_CYCCNT 0x50
-#define INT_MON_CYCCNT__VALUE 0xffff
-
-#define RB_PIN_ENABLED 0x60
-#define RB_PIN_ENABLED__BANK0 0x0001
-#define RB_PIN_ENABLED__BANK1 0x0002
-#define RB_PIN_ENABLED__BANK2 0x0004
-#define RB_PIN_ENABLED__BANK3 0x0008
-
-#define MULTIPLANE_OPERATION 0x70
-#define MULTIPLANE_OPERATION__FLAG 0x0001
-
-#define MULTIPLANE_READ_ENABLE 0x80
-#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
-
-#define COPYBACK_DISABLE 0x90
-#define COPYBACK_DISABLE__FLAG 0x0001
-
-#define CACHE_WRITE_ENABLE 0xa0
-#define CACHE_WRITE_ENABLE__FLAG 0x0001
-
-#define CACHE_READ_ENABLE 0xb0
-#define CACHE_READ_ENABLE__FLAG 0x0001
-
-#define PREFETCH_MODE 0xc0
-#define PREFETCH_MODE__PREFETCH_EN 0x0001
-#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
-
-#define CHIP_ENABLE_DONT_CARE 0xd0
-#define CHIP_EN_DONT_CARE__FLAG 0x01
-
-#define ECC_ENABLE 0xe0
-#define ECC_ENABLE__FLAG 0x0001
-
-#define GLOBAL_INT_ENABLE 0xf0
-#define GLOBAL_INT_EN_FLAG 0x01
-
-#define WE_2_RE 0x100
-#define WE_2_RE__VALUE 0x003f
-
-#define ADDR_2_DATA 0x110
-#define ADDR_2_DATA__VALUE 0x003f
-
-#define RE_2_WE 0x120
-#define RE_2_WE__VALUE 0x003f
-
-#define ACC_CLKS 0x130
-#define ACC_CLKS__VALUE 0x000f
-
-#define NUMBER_OF_PLANES 0x140
-#define NUMBER_OF_PLANES__VALUE 0x0007
-
-#define PAGES_PER_BLOCK 0x150
-#define PAGES_PER_BLOCK__VALUE 0xffff
-
-#define DEVICE_WIDTH 0x160
-#define DEVICE_WIDTH__VALUE 0x0003
-
-#define DEVICE_MAIN_AREA_SIZE 0x170
-#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
-
-#define DEVICE_SPARE_AREA_SIZE 0x180
-#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
-
-#define TWO_ROW_ADDR_CYCLES 0x190
-#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
-
-#define MULTIPLANE_ADDR_RESTRICT 0x1a0
-#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
-
-#define ECC_CORRECTION 0x1b0
-#define ECC_CORRECTION__VALUE 0x001f
-
-#define READ_MODE 0x1c0
-#define READ_MODE__VALUE 0x000f
-
-#define WRITE_MODE 0x1d0
-#define WRITE_MODE__VALUE 0x000f
-
-#define COPYBACK_MODE 0x1e0
-#define COPYBACK_MODE__VALUE 0x000f
-
-#define RDWR_EN_LO_CNT 0x1f0
-#define RDWR_EN_LO_CNT__VALUE 0x001f
-
-#define RDWR_EN_HI_CNT 0x200
-#define RDWR_EN_HI_CNT__VALUE 0x001f
-
-#define MAX_RD_DELAY 0x210
-#define MAX_RD_DELAY__VALUE 0x000f
-
-#define CS_SETUP_CNT 0x220
-#define CS_SETUP_CNT__VALUE 0x001f
-
-#define SPARE_AREA_SKIP_BYTES 0x230
-#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
-
-#define SPARE_AREA_MARKER 0x240
-#define SPARE_AREA_MARKER__VALUE 0xffff
-
-#define DEVICES_CONNECTED 0x250
-#define DEVICES_CONNECTED__VALUE 0x0007
-
-#define DIE_MASK 0x260
-#define DIE_MASK__VALUE 0x00ff
-
-#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
-#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
-
-#define WRITE_PROTECT 0x280
-#define WRITE_PROTECT__FLAG 0x0001
-
-#define RE_2_RE 0x290
-#define RE_2_RE__VALUE 0x003f
-
-#define MANUFACTURER_ID 0x300
-#define MANUFACTURER_ID__VALUE 0x00ff
-
-#define DEVICE_ID 0x310
-#define DEVICE_ID__VALUE 0x00ff
-
-#define DEVICE_PARAM_0 0x320
-#define DEVICE_PARAM_0__VALUE 0x00ff
-
-#define DEVICE_PARAM_1 0x330
-#define DEVICE_PARAM_1__VALUE 0x00ff
-
-#define DEVICE_PARAM_2 0x340
-#define DEVICE_PARAM_2__VALUE 0x00ff
-
-#define LOGICAL_PAGE_DATA_SIZE 0x350
-#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
-
-#define LOGICAL_PAGE_SPARE_SIZE 0x360
-#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
-
-#define REVISION 0x370
-#define REVISION__VALUE 0xffff
-
-#define ONFI_DEVICE_FEATURES 0x380
-#define ONFI_DEVICE_FEATURES__VALUE 0x003f
-
-#define ONFI_OPTIONAL_COMMANDS 0x390
-#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
-
-#define ONFI_TIMING_MODE 0x3a0
-#define ONFI_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
-#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
-#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
-#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
-
-#define FEATURES 0x3f0
-#define FEATURES__N_BANKS 0x0003
-#define FEATURES__ECC_MAX_ERR 0x003c
-#define FEATURES__DMA 0x0040
-#define FEATURES__CMD_DMA 0x0080
-#define FEATURES__PARTITION 0x0100
-#define FEATURES__XDMA_SIDEBAND 0x0200
-#define FEATURES__GPREG 0x0400
-#define FEATURES__INDEX_ADDR 0x0800
-
-#define TRANSFER_MODE 0x400
-#define TRANSFER_MODE__VALUE 0x0003
-
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
-
-#define DATA_INTR 0x550
-#define DATA_INTR__WRITE_SPACE_AV 0x0001
-#define DATA_INTR__READ_DATA_AV 0x0002
-
-#define DATA_INTR_EN 0x560
-#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
-#define DATA_INTR_EN__READ_DATA_AV 0x0002
-
-#define GPREG_0 0x570
-#define GPREG_0__VALUE 0xffff
-
-#define GPREG_1 0x580
-#define GPREG_1__VALUE 0xffff
-
-#define GPREG_2 0x590
-#define GPREG_2__VALUE 0xffff
-
-#define GPREG_3 0x5a0
-#define GPREG_3__VALUE 0xffff
-
-#define ECC_THRESHOLD 0x600
-#define ECC_THRESHOLD__VALUE 0x03ff
-
-#define ECC_ERROR_BLOCK_ADDRESS 0x610
-#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
-
-#define ECC_ERROR_PAGE_ADDRESS 0x620
-#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
-#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
-
-#define ECC_ERROR_ADDRESS 0x630
-#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
-#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
-
-#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
-#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
-#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
-
-#define DMA_ENABLE 0x700
-#define DMA_ENABLE__FLAG 0x0001
-
-#define IGNORE_ECC_DONE 0x710
-#define IGNORE_ECC_DONE__FLAG 0x0001
-
-#define DMA_INTR 0x720
-#define DMA_INTR__TARGET_ERROR 0x0001
-#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
-
-#define DMA_INTR_EN 0x730
-#define DMA_INTR_EN__TARGET_ERROR 0x0001
-#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
-
-#define TARGET_ERR_ADDR_LO 0x740
-#define TARGET_ERR_ADDR_LO__VALUE 0xffff
-
-#define TARGET_ERR_ADDR_HI 0x750
-#define TARGET_ERR_ADDR_HI__VALUE 0xffff
-
-#define CHNL_ACTIVE 0x760
-#define CHNL_ACTIVE__CHANNEL0 0x0001
-#define CHNL_ACTIVE__CHANNEL1 0x0002
-#define CHNL_ACTIVE__CHANNEL2 0x0004
-#define CHNL_ACTIVE__CHANNEL3 0x0008
-
-#define ACTIVE_SRC_ID 0x800
-#define ACTIVE_SRC_ID__VALUE 0x00ff
-
-#define PTN_INTR 0x810
-#define PTN_INTR__CONFIG_ERROR 0x0001
-#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR__REG_ACCESS_ERROR 0x0020
-
-#define PTN_INTR_EN 0x820
-#define PTN_INTR_EN__CONFIG_ERROR 0x0001
-#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
diff --git a/drivers/staging/spectra/spectraswconfig.h b/drivers/staging/spectra/spectraswconfig.h
deleted file mode 100644
index 17259469e95..00000000000
--- a/drivers/staging/spectra/spectraswconfig.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _SPECTRASWCONFIG_
-#define _SPECTRASWCONFIG_
-
-/* NAND driver version */
-#define GLOB_VERSION "driver version 20100311"
-
-
-/***** Common Parameters *****/
-#define RETRY_TIMES 3
-
-#define READ_BADBLOCK_INFO 1
-#define READBACK_VERIFY 0
-#define AUTO_FORMAT_FLASH 0
-
-/***** Cache Parameters *****/
-#define CACHE_ITEM_NUM 128
-#define BLK_NUM_FOR_L2_CACHE 16
-
-/***** Block Table Parameters *****/
-#define BLOCK_TABLE_INDEX 0
-
-/***** Wear Leveling Parameters *****/
-#define WEAR_LEVELING_GATE 0x10
-#define WEAR_LEVELING_BLOCK_NUM 10
-
-#define DEBUG_BNDRY 0
-
-/***** Product Feature Support *****/
-#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
-#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
-#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
-#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
-
-#define SPECTRA_PARTITION_ID 0
-
-/* Enable this macro if the number of flash blocks is larger than 16K. */
-#define SUPPORT_LARGE_BLOCKNUM 1
-
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK 3
-//#define NUM_FREE_BLOCKS_GATE 30
-#define NUM_FREE_BLOCKS_GATE 60
-
-/**** Hardware Parameters ****/
-#define GLOB_HWCTL_REG_BASE 0xFFA40000
-#define GLOB_HWCTL_REG_SIZE 4096
-
-#define GLOB_HWCTL_MEM_BASE 0xFFA48000
-#define GLOB_HWCTL_MEM_SIZE 4096
-
-/* KBV - Updated to LNW scratch register address */
-#define SCRATCH_REG_ADDR 0xFF108018
-#define SCRATCH_REG_SIZE 64
-
-#define GLOB_HWCTL_DEFAULT_BLKS 2048
-
-#define SUPPORT_15BITECC 1
-#define SUPPORT_8BITECC 1
-
-#define ONFI_BLOOM_TIME 0
-#define MODE5_WORKAROUND 1
-
-#endif /*_SPECTRASWCONFIG_*/
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 93de4f2e8bf..21a559ecbbb 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -78,7 +78,7 @@ config TIDSPBRIDGE_NTFY_PWRERR
bool "Notify power errors"
depends on TIDSPBRIDGE
help
- Enable notifications to registered clients on the event of power errror
+ Enable notifications to registered clients on the event of power error
trying to suspend bridge driver. Say Y, to signal this event as a fatal
error, this will require a bridge restart to recover.
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a..7eb56178fb6 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index a7e407e2518..fda240214cd 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -285,7 +285,7 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
enum_refs = 0;
/*
- * TODO: Revisit, this is not an errror case but code
+ * TODO: Revisit, this is not an error case but code
* expects non-zero value.
*/
status = ENODATA;
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c..76cfc6edecd 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 55c0b510889..03420e25d9c 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -109,11 +109,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
spin_unlock(&sdev->ud.lock);
return -EINVAL;
}
-#if 0
- setnodelay(socket);
- setkeepalive(socket);
- setreuse(socket);
-#endif
sdev->ud.tcp_socket = socket;
spin_unlock(&sdev->ud.lock);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 6b4e3e182de..27ac363d1cf 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -564,7 +564,7 @@ static void stub_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 3b7a847f465..d93e7f1f797 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -334,9 +334,8 @@ void usbip_dump_header(struct usbip_header *pdu)
}
EXPORT_SYMBOL_GPL(usbip_dump_header);
-/* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags)
+/* Receive data over TCP/IP. */
+int usbip_recv(struct socket *sock, void *buf, int size)
{
int result;
struct msghdr msg;
@@ -355,19 +354,6 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
return -EINVAL;
}
- if (usbip_dbg_flag_xmit) {
- if (send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("sending... , sock %p, buf %p, size %d, "
- "msg_flags %d\n", sock, buf, size, msg_flags);
- usbip_dump_buffer(buf, size);
- }
- }
-
do {
sock->sk->sk_allocation = GFP_NOIO;
iov.iov_base = buf;
@@ -377,42 +363,30 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = kernel_sendmsg(sock, &msg, &iov, 1, size);
- else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size,
- MSG_WAITALL);
+ msg.msg_flags = MSG_NOSIGNAL;
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
if (result <= 0) {
- pr_debug("%s sock %p buf %p size %u ret %d total %d\n",
- send ? "send" : "receive", sock, buf, size,
- result, total);
+ pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
+ sock, buf, size, result, total);
goto err;
}
size -= result;
buf += result;
total += result;
-
} while (size > 0);
if (usbip_dbg_flag_xmit) {
- if (!send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("receiving....\n");
- usbip_dump_buffer(bp, osize);
- pr_debug("received, osize %d ret %d size %d total %d\n",
- osize, result, size, total);
- }
+ if (!in_interrupt())
+ pr_debug("%-10s:", current->comm);
+ else
+ pr_debug("interrupt :");
- if (send)
- pr_debug("send, total %d\n", total);
+ pr_debug("receiving....\n");
+ usbip_dump_buffer(bp, osize);
+ pr_debug("received, osize %d ret %d size %d total %d\n",
+ osize, result, size, total);
}
return total;
@@ -420,7 +394,7 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
err:
return result;
}
-EXPORT_SYMBOL_GPL(usbip_xmit);
+EXPORT_SYMBOL_GPL(usbip_recv);
struct socket *sockfd_to_socket(unsigned int sockfd)
{
@@ -712,7 +686,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
if (!buff)
return -ENOMEM;
- ret = usbip_xmit(0, ud->tcp_socket, buff, size, 0);
+ ret = usbip_recv(ud->tcp_socket, buff, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n",
ret);
@@ -823,8 +797,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
- ret = usbip_xmit(0, ud->tcp_socket, (char *)urb->transfer_buffer,
- size, 0);
+ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
if (ud->side == USBIP_STUB) {
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index be216175ae8..b8f8c48b8a7 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -292,21 +292,11 @@ struct usbip_device {
} eh_ops;
};
-#if 0
-int usbip_sendmsg(struct socket *, struct msghdr *, int);
-int set_sockaddr(struct socket *socket, struct sockaddr_storage *ss);
-int setnodelay(struct socket *);
-int setquickack(struct socket *);
-int setkeepalive(struct socket *socket);
-void setreuse(struct socket *);
-#endif
-
/* usbip_common.c */
void usbip_dump_urb(struct urb *purb);
void usbip_dump_header(struct usbip_header *pdu);
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags);
+int usbip_recv(struct socket *sock, void *buf, int size);
struct socket *sockfd_to_socket(unsigned int sockfd);
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
@@ -337,9 +327,4 @@ static inline int interface_to_devnum(struct usb_interface *interface)
return udev->devnum;
}
-static inline int interface_to_infnum(struct usb_interface *interface)
-{
- return interface->cur_altsetting->desc.bInterfaceNumber;
-}
-
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e..3f511b47563 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
@@ -204,7 +206,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret < 0) {
if (ret == -ECONNRESET)
pr_info("connection reset by peer\n");
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
index 82c222b4a14..79f00333e7e 100644
--- a/drivers/staging/vme/TODO
+++ b/drivers/staging/vme/TODO
@@ -1,70 +1,5 @@
TODO
====
-API
-===
-
-Master window broadcast select mask
------------------------------------
-
-API currently provides no method to set or get Broadcast Select mask. Suggest
-somthing like:
-
- int vme_master_bmsk_set (struct vme_resource *res, int mask);
- int vme_master_bmsk_get (struct vme_resource *res, int *mask);
-
-
-Interrupt Generation
---------------------
-
-Add optional timeout when waiting for an IACK.
-
-
-CR/CSR Buffer
--------------
-
-The VME API provides no functions to access the buffer mapped into the CR/CSR
-space.
-
-
-Mailboxes
----------
-
-Whilst not part of the VME specification, they are provided by a number of
-chips. They are currently not supported at all by the API.
-
-
-Core
-====
-
-- Improve generic sanity checks (Such as does an offset and size fit within a
- window and parameter checking).
-
-Bridge Support
-==============
-
-Tempe (tsi148)
---------------
-
-- 2eSST Broadcast mode.
-- Mailboxes unsupported.
-- Improve error detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-
-Universe II (ca91c142)
-----------------------
-
-- Mailboxes unsupported.
-- Error Detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-- Slot detection
-
-Universe I (ca91x042)
----------------------
-
-Currently completely unsupported.
+- Add one or more device drivers which use the VME framework.
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 0e4feac138e..515b8b8e32a 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -338,7 +338,7 @@ static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity;
unsigned int temp_ctl = 0;
@@ -444,7 +444,7 @@ static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned long long vme_bound, pci_offset;
@@ -595,8 +595,8 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i, granularity = 0;
@@ -753,7 +753,7 @@ err_window:
static int __ca91cx42_master_get(struct vme_master_resource *image,
int *enabled, unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned long long pci_base, pci_bound, vme_offset;
@@ -839,8 +839,8 @@ static int __ca91cx42_master_get(struct vme_master_resource *image,
}
static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -876,13 +876,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
* maximal configured data cycle is used and splits it
* automatically for non-aligned addresses.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -930,13 +930,13 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
/* Here we apply for the same strategy we do in master_read
* function in order to assure D16 cycle when required.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
@@ -973,7 +973,8 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
- u32 pci_addr, result;
+ u32 result;
+ uintptr_t pci_addr;
int i;
struct ca91cx42_driver *bridge;
struct device *dev;
@@ -990,7 +991,7 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
/* Lock image */
spin_lock(&image->lock);
- pci_addr = (u32)image->kern_base + offset;
+ pci_addr = (uintptr_t)image->kern_base + offset;
/* Address must be 4-byte aligned */
if (pci_addr & 0x3) {
@@ -1291,7 +1292,7 @@ static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int ca91cx42_lm_set(struct vme_lm_resource *lm,
- unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
+ unsigned long long lm_base, u32 aspace, u32 cycle)
{
u32 temp_base, lm_ctl = 0;
int i;
@@ -1359,7 +1360,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
* or disabled.
*/
static int ca91cx42_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_ctl, enabled = 0;
struct ca91cx42_driver *bridge;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 6c1167c2bea..08a449b4abf 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -483,7 +483,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
@@ -517,7 +517,7 @@ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
@@ -551,7 +551,7 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
@@ -701,7 +701,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
@@ -893,8 +893,8 @@ static void tsi148_free_resource(struct vme_master_resource *image)
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -1129,8 +1129,8 @@ err_window:
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
@@ -1239,8 +1239,8 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -1259,9 +1259,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
{
int retval, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1301,9 +1299,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
{
int retval = 0, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1420,7 +1416,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1514,7 +1510,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1886,7 +1882,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
@@ -1953,7 +1949,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index ca5ba89e2d8..55ec30cb1fa 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -6,3 +6,16 @@ config VME_USER
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
provided with the original driver at http://vmelinux.org/.
+
+config VME_PIO2
+ tristate "GE PIO2 VME"
+ depends on GPIOLIB
+ help
+ Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
+ slave card, implementing 32 solid-state relay switched IO lines, in
+ 4 groups of 8. Each bank of IO lines is built to function as input,
+ output or both depending on the variant of the card.
+
+ To compile this driver as a module, choose M here. The module will
+ be called vme_pio2. If unsure, say N.
+
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 459742a7528..172512cb5db 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_VME_USER) += vme_user.o
+
+vme_pio2-objs := vme_pio2_cntr.o vme_pio2_gpio.o vme_pio2_core.o
+obj-$(CONFIG_VME_PIO2) += vme_pio2.o
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
new file mode 100644
index 00000000000..3c593136453
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -0,0 +1,249 @@
+#ifndef _VME_PIO2_H_
+#define _VME_PIO2_H_
+
+#define PIO2_CARDS_MAX 32
+
+#define PIO2_VARIANT_LENGTH 5
+
+#define PIO2_NUM_CHANNELS 32
+#define PIO2_NUM_IRQS 11
+#define PIO2_NUM_CNTRS 6
+
+#define PIO2_REGS_SIZE 0x40
+
+#define PIO2_REGS_DATA0 0x0
+#define PIO2_REGS_DATA1 0x1
+#define PIO2_REGS_DATA2 0x2
+#define PIO2_REGS_DATA3 0x3
+
+static const int PIO2_REGS_DATA[4] = { PIO2_REGS_DATA0, PIO2_REGS_DATA1,
+ PIO2_REGS_DATA2, PIO2_REGS_DATA3 };
+
+#define PIO2_REGS_INT_STAT0 0x8
+#define PIO2_REGS_INT_STAT1 0x9
+#define PIO2_REGS_INT_STAT2 0xa
+#define PIO2_REGS_INT_STAT3 0xb
+
+static const int PIO2_REGS_INT_STAT[4] = { PIO2_REGS_INT_STAT0,
+ PIO2_REGS_INT_STAT1,
+ PIO2_REGS_INT_STAT2,
+ PIO2_REGS_INT_STAT3 };
+
+#define PIO2_REGS_INT_STAT_CNTR 0xc
+#define PIO2_REGS_INT_MASK0 0x10
+#define PIO2_REGS_INT_MASK1 0x11
+#define PIO2_REGS_INT_MASK2 0x12
+#define PIO2_REGS_INT_MASK3 0x13
+#define PIO2_REGS_INT_MASK4 0x14
+#define PIO2_REGS_INT_MASK5 0x15
+#define PIO2_REGS_INT_MASK6 0x16
+#define PIO2_REGS_INT_MASK7 0x17
+
+static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
+ PIO2_REGS_INT_MASK1,
+ PIO2_REGS_INT_MASK2,
+ PIO2_REGS_INT_MASK3,
+ PIO2_REGS_INT_MASK4,
+ PIO2_REGS_INT_MASK5,
+ PIO2_REGS_INT_MASK6,
+ PIO2_REGS_INT_MASK7 };
+
+
+
+#define PIO2_REGS_CTRL 0x18
+#define PIO2_REGS_VME_VECTOR 0x19
+#define PIO2_REGS_CNTR0 0x20
+#define PIO2_REGS_CNTR1 0x22
+#define PIO2_REGS_CNTR2 0x24
+#define PIO2_REGS_CTRL_WRD0 0x26
+#define PIO2_REGS_CNTR3 0x28
+#define PIO2_REGS_CNTR4 0x2a
+#define PIO2_REGS_CNTR5 0x2c
+#define PIO2_REGS_CTRL_WRD1 0x2e
+
+#define PIO2_REGS_ID 0x30
+
+
+/* PIO2_REGS_DATAx (0x0 - 0x3) */
+
+static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3 };
+
+#define PIO2_CHANNEL0_BIT (1 << 0)
+#define PIO2_CHANNEL1_BIT (1 << 1)
+#define PIO2_CHANNEL2_BIT (1 << 2)
+#define PIO2_CHANNEL3_BIT (1 << 3)
+#define PIO2_CHANNEL4_BIT (1 << 4)
+#define PIO2_CHANNEL5_BIT (1 << 5)
+#define PIO2_CHANNEL6_BIT (1 << 6)
+#define PIO2_CHANNEL7_BIT (1 << 7)
+#define PIO2_CHANNEL8_BIT (1 << 0)
+#define PIO2_CHANNEL9_BIT (1 << 1)
+#define PIO2_CHANNEL10_BIT (1 << 2)
+#define PIO2_CHANNEL11_BIT (1 << 3)
+#define PIO2_CHANNEL12_BIT (1 << 4)
+#define PIO2_CHANNEL13_BIT (1 << 5)
+#define PIO2_CHANNEL14_BIT (1 << 6)
+#define PIO2_CHANNEL15_BIT (1 << 7)
+#define PIO2_CHANNEL16_BIT (1 << 0)
+#define PIO2_CHANNEL17_BIT (1 << 1)
+#define PIO2_CHANNEL18_BIT (1 << 2)
+#define PIO2_CHANNEL19_BIT (1 << 3)
+#define PIO2_CHANNEL20_BIT (1 << 4)
+#define PIO2_CHANNEL21_BIT (1 << 5)
+#define PIO2_CHANNEL22_BIT (1 << 6)
+#define PIO2_CHANNEL23_BIT (1 << 7)
+#define PIO2_CHANNEL24_BIT (1 << 0)
+#define PIO2_CHANNEL25_BIT (1 << 1)
+#define PIO2_CHANNEL26_BIT (1 << 2)
+#define PIO2_CHANNEL27_BIT (1 << 3)
+#define PIO2_CHANNEL28_BIT (1 << 4)
+#define PIO2_CHANNEL29_BIT (1 << 5)
+#define PIO2_CHANNEL30_BIT (1 << 6)
+#define PIO2_CHANNEL31_BIT (1 << 7)
+
+static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
+ PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
+ PIO2_CHANNEL4_BIT, PIO2_CHANNEL5_BIT,
+ PIO2_CHANNEL6_BIT, PIO2_CHANNEL7_BIT,
+ PIO2_CHANNEL8_BIT, PIO2_CHANNEL9_BIT,
+ PIO2_CHANNEL10_BIT, PIO2_CHANNEL11_BIT,
+ PIO2_CHANNEL12_BIT, PIO2_CHANNEL13_BIT,
+ PIO2_CHANNEL14_BIT, PIO2_CHANNEL15_BIT,
+ PIO2_CHANNEL16_BIT, PIO2_CHANNEL17_BIT,
+ PIO2_CHANNEL18_BIT, PIO2_CHANNEL19_BIT,
+ PIO2_CHANNEL20_BIT, PIO2_CHANNEL21_BIT,
+ PIO2_CHANNEL22_BIT, PIO2_CHANNEL23_BIT,
+ PIO2_CHANNEL24_BIT, PIO2_CHANNEL25_BIT,
+ PIO2_CHANNEL26_BIT, PIO2_CHANNEL27_BIT,
+ PIO2_CHANNEL28_BIT, PIO2_CHANNEL29_BIT,
+ PIO2_CHANNEL30_BIT, PIO2_CHANNEL31_BIT
+ };
+
+/* PIO2_REGS_INT_STAT_CNTR (0xc) */
+#define PIO2_COUNTER0 (1 << 0)
+#define PIO2_COUNTER1 (1 << 1)
+#define PIO2_COUNTER2 (1 << 2)
+#define PIO2_COUNTER3 (1 << 3)
+#define PIO2_COUNTER4 (1 << 4)
+#define PIO2_COUNTER5 (1 << 5)
+
+static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
+ PIO2_COUNTER2, PIO2_COUNTER3,
+ PIO2_COUNTER4, PIO2_COUNTER5 };
+
+/* PIO2_REGS_CTRL (0x18) */
+#define PIO2_VME_INT_MASK 0x7
+#define PIO2_LED (1 << 6)
+#define PIO2_LOOP (1 << 7)
+
+/* PIO2_REGS_VME_VECTOR (0x19) */
+#define PIO2_VME_VECTOR_SPUR 0x0
+#define PIO2_VME_VECTOR_BANK0 0x1
+#define PIO2_VME_VECTOR_BANK1 0x2
+#define PIO2_VME_VECTOR_BANK2 0x3
+#define PIO2_VME_VECTOR_BANK3 0x4
+#define PIO2_VME_VECTOR_CNTR0 0x5
+#define PIO2_VME_VECTOR_CNTR1 0x6
+#define PIO2_VME_VECTOR_CNTR2 0x7
+#define PIO2_VME_VECTOR_CNTR3 0x8
+#define PIO2_VME_VECTOR_CNTR4 0x9
+#define PIO2_VME_VECTOR_CNTR5 0xa
+
+#define PIO2_VME_VECTOR_MASK 0xf0
+
+static const int PIO2_VECTOR_BANK[4] = { PIO2_VME_VECTOR_BANK0,
+ PIO2_VME_VECTOR_BANK1,
+ PIO2_VME_VECTOR_BANK2,
+ PIO2_VME_VECTOR_BANK3 };
+
+static const int PIO2_VECTOR_CNTR[6] = { PIO2_VME_VECTOR_CNTR0,
+ PIO2_VME_VECTOR_CNTR1,
+ PIO2_VME_VECTOR_CNTR2,
+ PIO2_VME_VECTOR_CNTR3,
+ PIO2_VME_VECTOR_CNTR4,
+ PIO2_VME_VECTOR_CNTR5 };
+
+/* PIO2_REGS_CNTRx (0x20 - 0x24 & 0x28 - 0x2c) */
+
+static const int PIO2_CNTR_DATA[6] = { PIO2_REGS_CNTR0, PIO2_REGS_CNTR1,
+ PIO2_REGS_CNTR2, PIO2_REGS_CNTR3,
+ PIO2_REGS_CNTR4, PIO2_REGS_CNTR5 };
+
+/* PIO2_REGS_CTRL_WRDx (0x26 & 0x2e) */
+
+static const int PIO2_CNTR_CTRL[6] = { PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1 };
+
+#define PIO2_CNTR_SC_DEV0 0
+#define PIO2_CNTR_SC_DEV1 (1 << 6)
+#define PIO2_CNTR_SC_DEV2 (2 << 6)
+#define PIO2_CNTR_SC_RDBACK (3 << 6)
+
+static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
+ PIO2_CNTR_SC_DEV2, PIO2_CNTR_SC_DEV0,
+ PIO2_CNTR_SC_DEV1, PIO2_CNTR_SC_DEV2 };
+
+#define PIO2_CNTR_RW_LATCH 0
+#define PIO2_CNTR_RW_LSB (1 << 4)
+#define PIO2_CNTR_RW_MSB (2 << 4)
+#define PIO2_CNTR_RW_BOTH (3 << 4)
+
+#define PIO2_CNTR_MODE0 0
+#define PIO2_CNTR_MODE1 (1 << 1)
+#define PIO2_CNTR_MODE2 (2 << 1)
+#define PIO2_CNTR_MODE3 (3 << 1)
+#define PIO2_CNTR_MODE4 (4 << 1)
+#define PIO2_CNTR_MODE5 (5 << 1)
+
+#define PIO2_CNTR_BCD 1
+
+
+
+enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
+enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
+
+/* Bank configuration structure */
+struct pio2_io_bank {
+ enum pio2_bank_config config;
+ u8 value;
+ enum pio2_int_config irq[8];
+};
+
+/* Counter configuration structure */
+struct pio2_cntr {
+ int mode;
+ int count;
+};
+
+struct pio2_card {
+ int id;
+ int bus;
+ long base;
+ int irq_vector;
+ int irq_level;
+ char variant[6];
+ int led;
+
+ struct vme_dev *vdev;
+ struct vme_resource *window;
+
+ struct gpio_chip gc;
+ struct pio2_io_bank bank[4];
+
+ struct pio2_cntr cntr[6];
+};
+
+int pio2_cntr_reset(struct pio2_card *);
+
+int pio2_gpio_reset(struct pio2_card *);
+int __init pio2_gpio_init(struct pio2_card *);
+void __exit pio2_gpio_exit(struct pio2_card *);
+
+#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_cntr.c b/drivers/staging/vme/devices/vme_pio2_cntr.c
new file mode 100644
index 00000000000..08e0d59806c
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_cntr.c
@@ -0,0 +1,71 @@
+/*
+ * GE PIO2 Counter Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The PIO-2 has 6 counters, currently this code just disables the interrupts
+ * and leaves them alone.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static int pio2_cntr_irq_set(struct pio2_card *card, int id)
+{
+ int retval;
+ u8 data;
+
+ data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]);
+ if (retval < 0)
+ return retval;
+
+ data = card->cntr[id].count & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ data = (card->cntr[id].count >> 8) & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int pio2_cntr_reset(struct pio2_card *card)
+{
+ int i, retval = 0;
+ u8 reg;
+
+ /* Clear down all timers */
+ for (i = 0; i < 6; i++) {
+ card->cntr[i].mode = PIO2_CNTR_MODE5;
+ card->cntr[i].count = 0;
+ retval = pio2_cntr_irq_set(card, i);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Ensure all counter interrupts are cleared */
+ do {
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT_CNTR);
+ if (retval < 0)
+ return retval;
+ } while (reg != 0);
+
+ return retval;
+}
+
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
new file mode 100644
index 00000000000..9fedc442a77
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -0,0 +1,524 @@
+/*
+ * GE PIO2 6U VME I/O Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+
+static const char driver_name[] = "pio2";
+
+static int bus[PIO2_CARDS_MAX];
+static int bus_num;
+static long base[PIO2_CARDS_MAX];
+static int base_num;
+static int vector[PIO2_CARDS_MAX];
+static int vector_num;
+static int level[PIO2_CARDS_MAX];
+static int level_num;
+static const char *variant[PIO2_CARDS_MAX];
+static int variant_num;
+
+static int loopback;
+
+static int pio2_match(struct vme_dev *);
+static int __devinit pio2_probe(struct vme_dev *);
+static int __devexit pio2_remove(struct vme_dev *);
+
+static int pio2_get_led(struct pio2_card *card)
+{
+ /* Can't read hardware, state saved in structure */
+ return card->led;
+}
+
+static int pio2_set_led(struct pio2_card *card, int state)
+{
+ u8 reg;
+ int retval;
+
+ reg = card->irq_level;
+
+ /* Register state inverse of led state */
+ if (!state)
+ reg |= PIO2_LED;
+
+ if (loopback)
+ reg |= PIO2_LOOP;
+
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ card->led = state ? 1 : 0;
+
+ return 0;
+}
+
+static void pio2_int(int level, int vector, void *ptr)
+{
+ int vec, i, channel, retval;
+ u8 reg;
+ struct pio2_card *card = ptr;
+
+ vec = vector & ~PIO2_VME_VECTOR_MASK;
+
+ switch (vec) {
+ case 0:
+ dev_warn(&card->vdev->dev, "Spurious Interrupt\n");
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ /* Channels 0 to 7 */
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT[vec - 1]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to read IRQ status register\n");
+ return;
+ }
+ for (i = 0; i < 8; i++) {
+ channel = ((vec - 1) * 8) + i;
+ if (reg & PIO2_CHANNEL_BIT[channel])
+ dev_info(&card->vdev->dev,
+ "Interrupt on I/O channel %d\n",
+ channel);
+ }
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ /* Counters are dealt with by their own handler */
+ dev_err(&card->vdev->dev,
+ "Counter interrupt\n");
+ break;
+ }
+}
+
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+static int pio2_reset_card(struct pio2_card *card)
+{
+ int retval = 0;
+ u8 data = 0;
+
+ /* Clear main register*/
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Clear VME vector */
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Reset GPIO */
+ retval = pio2_gpio_reset(card);
+ if (retval < 0)
+ return retval;
+
+ /* Reset counters */
+ retval = pio2_cntr_reset(card);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static struct vme_driver pio2_driver = {
+ .name = driver_name,
+ .match = pio2_match,
+ .probe = pio2_probe,
+ .remove = __devexit_p(pio2_remove),
+};
+
+
+static int __init pio2_init(void)
+{
+ int retval = 0;
+
+ if (bus_num == 0) {
+ printk(KERN_ERR "%s: No cards, skipping registration\n",
+ driver_name);
+ goto err_nocard;
+ }
+
+ if (bus_num > PIO2_CARDS_MAX) {
+ printk(KERN_ERR
+ "%s: Driver only able to handle %d PIO2 Cards\n",
+ driver_name, PIO2_CARDS_MAX);
+ bus_num = PIO2_CARDS_MAX;
+ }
+
+ /* Register the PIO2 driver */
+ retval = vme_register_driver(&pio2_driver, bus_num);
+ if (retval != 0)
+ goto err_reg;
+
+ return retval;
+
+err_reg:
+err_nocard:
+ return retval;
+}
+
+static int pio2_match(struct vme_dev *vdev)
+{
+
+ if (vdev->num >= bus_num) {
+ dev_err(&vdev->dev,
+ "The enumeration of the VMEbus to which the board is connected must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= base_num) {
+ dev_err(&vdev->dev,
+ "The VME address for the cards registers must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= vector_num) {
+ dev_err(&vdev->dev,
+ "The IRQ vector used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= level_num) {
+ dev_err(&vdev->dev,
+ "The IRQ level used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= variant_num) {
+ dev_err(&vdev->dev, "The variant of the card must be specified");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int __devinit pio2_probe(struct vme_dev *vdev)
+{
+ struct pio2_card *card;
+ int retval;
+ int i;
+ u8 reg;
+ int vec;
+
+ card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
+ if (card == NULL) {
+ dev_err(&vdev->dev, "Unable to allocate card structure\n");
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ card->id = vdev->num;
+ card->bus = bus[card->id];
+ card->base = base[card->id];
+ card->irq_vector = vector[card->id];
+ card->irq_level = level[card->id] & PIO2_VME_INT_MASK;
+ strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH);
+ card->vdev = vdev;
+
+ for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
+
+ if (isdigit(card->variant[i]) == 0) {
+ dev_err(&card->vdev->dev, "Variant invalid\n");
+ retval = -EINVAL;
+ goto err_variant;
+ }
+ }
+
+ /*
+ * Bottom 4 bits of VME interrupt vector used to determine source,
+ * provided vector should only use upper 4 bits.
+ */
+ if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
+ dev_err(&card->vdev->dev,
+ "Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
+ retval = -EINVAL;
+ goto err_vector;
+ }
+
+ /*
+ * There is no way to determine the build variant or whether each bank
+ * is input, output or both at run time. The inputs are also inverted
+ * if configured as both.
+ *
+ * We pass in the board variant and use that to determine the
+ * configuration of the banks.
+ */
+ for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
+ switch (card->variant[i]) {
+ case '0':
+ card->bank[i-1].config = NOFIT;
+ break;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ card->bank[i-1].config = INPUT;
+ break;
+ case '5':
+ card->bank[i-1].config = OUTPUT;
+ break;
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ card->bank[i-1].config = BOTH;
+ break;
+ }
+ }
+
+ /* Get a master window and position over regs */
+ card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
+ if (card->window == NULL) {
+ dev_err(&card->vdev->dev,
+ "Unable to assign VME master resource\n");
+ retval = -EIO;
+ goto err_window;
+ }
+
+ retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
+ (VME_SCT | VME_USER | VME_DATA), VME_D16);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Unable to configure VME master resource\n");
+ goto err_set;
+ }
+
+ /*
+ * There is also no obvious register which we can probe to determine
+ * whether the provided base is valid. If we can read the "ID Register"
+ * offset and the reset function doesn't error, assume we have a valid
+ * location.
+ */
+ retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from device\n");
+ goto err_read;
+ }
+
+ dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg);
+
+ /*
+ * Ensure all the I/O is cleared. We can't read back the states, so
+ * this is the only method we have to ensure that the I/O is in a known
+ * state.
+ */
+ retval = pio2_reset_card(card);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Failed to reset card, is location valid?");
+ retval = -ENODEV;
+ goto err_reset;
+ }
+
+ /* Configure VME Interrupts */
+ reg = card->irq_level;
+ if (pio2_get_led(card))
+ reg |= PIO2_LED;
+ if (loopback)
+ reg |= PIO2_LOOP;
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Set VME vector */
+ retval = vme_master_write(card->window, &card->irq_vector, 1,
+ PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Attach spurious interrupt handler. */
+ vec = card->irq_vector | PIO2_VME_VECTOR_SPUR;
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_irq;
+ }
+
+ /* Attach GPIO interrupt handlers. */
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_gpio_irq;
+ }
+ }
+
+ /* Attach counter interrupt handlers. */
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_cntr_irq;
+ }
+ }
+
+ /* Register IO */
+ retval = pio2_gpio_init(card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to register with GPIO framework\n");
+ goto err_gpio;
+ }
+
+ /* Set LED - This also sets interrupt level */
+ retval = pio2_set_led(card, 0);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to set LED\n");
+ goto err_led;
+ }
+
+ dev_set_drvdata(&card->vdev->dev, card);
+
+ dev_info(&card->vdev->dev,
+ "PIO2 (variant %s) configured at 0x%lx\n", card->variant,
+ card->base);
+
+ return 0;
+
+err_led:
+ pio2_gpio_exit(card);
+err_gpio:
+ i = 6;
+err_cntr_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ i = 4;
+err_gpio_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+err_irq:
+ pio2_reset_card(card);
+err_reset:
+err_read:
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+err_set:
+ vme_master_free(card->window);
+err_window:
+err_vector:
+err_variant:
+ kfree(card);
+err_struct:
+ return retval;
+}
+
+static int __devexit pio2_remove(struct vme_dev *vdev)
+{
+ int vec;
+ int i;
+
+ struct pio2_card *card = dev_get_drvdata(&vdev->dev);
+
+ pio2_gpio_exit(card);
+
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+
+ pio2_reset_card(card);
+
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+
+ vme_master_free(card->window);
+
+ kfree(card);
+
+ return 0;
+}
+
+static void __exit pio2_exit(void)
+{
+ vme_unregister_driver(&pio2_driver);
+}
+
+
+/* These are required for each board */
+MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
+module_param_array(bus, int, &bus_num, S_IRUGO);
+
+MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
+module_param_array(base, long, &base_num, S_IRUGO);
+
+MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
+module_param_array(vector, int, &vector_num, S_IRUGO);
+
+MODULE_PARM_DESC(level, "VME IRQ Level");
+module_param_array(level, int, &level_num, S_IRUGO);
+
+MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
+module_param_array(variant, charp, &variant_num, S_IRUGO);
+
+/* This is for debugging */
+MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
+module_param(loopback, bool, S_IRUGO);
+
+MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_LICENSE("GPL");
+
+module_init(pio2_init);
+module_exit(pio2_exit);
+
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
new file mode 100644
index 00000000000..dc837deb99d
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -0,0 +1,232 @@
+/*
+ * GE PIO2 GPIO Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static const char driver_name[] = "pio2_gpio";
+
+static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
+{
+ return container_of(chip, struct pio2_card, gc);
+}
+
+static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not available as input\n");
+ return 0;
+ }
+
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from GPIO\n");
+ return 0;
+ }
+
+ /*
+ * Remember, input on channels configured as both input and output
+ * are inverted!
+ */
+ if (reg & PIO2_CHANNEL_BIT[offset]) {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 0;
+ else
+ return 1;
+ } else {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+static void pio2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not availabe as output\n");
+ return;
+ }
+
+ if (value)
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value |
+ PIO2_CHANNEL_BIT[offset];
+ else
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value &
+ ~PIO2_CHANNEL_BIT[offset];
+
+ retval = vme_master_write(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to write to GPIO\n");
+ return;
+ }
+
+ card->bank[PIO2_CHANNEL_BANK[offset]].value = reg;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+int pio2_gpio_reset(struct pio2_card *card)
+{
+ int retval = 0;
+ int i, j;
+
+ u8 data = 0;
+
+ /* Zero output registers */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_DATA[i]);
+ if (retval < 0)
+ return retval;
+ card->bank[i].value = 0;
+ }
+
+ /* Set input interrupt masks */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[i * 2]);
+ if (retval < 0)
+ return retval;
+
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[(i * 2) + 1]);
+ if (retval < 0)
+ return retval;
+
+ for (j = 0; j < 8; j++)
+ card->bank[i].irq[j] = NONE;
+ }
+
+ /* Ensure all I/O interrupts are cleared */
+ for (i = 0; i < 4; i++) {
+ do {
+ retval = vme_master_read(card->window, &data, 1,
+ PIO2_REGS_INT_STAT[i]);
+ if (retval < 0)
+ return retval;
+ } while (data != 0);
+ }
+
+ return 0;
+}
+
+int __init pio2_gpio_init(struct pio2_card *card)
+{
+ int retval = 0;
+ char *label;
+
+ label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
+ if (label == NULL) {
+ dev_err(&card->vdev->dev, "Unable to allocate GPIO label\n");
+ return -ENOMEM;
+ }
+
+ sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
+ card->gc.label = label;
+
+ card->gc.ngpio = PIO2_NUM_CHANNELS;
+ /* Dynamic allocation of base */
+ card->gc.base = -1;
+ /* Setup pointers to chip functions */
+ card->gc.direction_input = pio2_gpio_dir_in;
+ card->gc.direction_output = pio2_gpio_dir_out;
+ card->gc.get = pio2_gpio_get;
+ card->gc.set = pio2_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = gpiochip_add(&(card->gc));
+ if (retval) {
+ dev_err(&card->vdev->dev, "Unable to register GPIO\n");
+ kfree(card->gc.label);
+ }
+
+ return retval;
+};
+
+void __exit pio2_gpio_exit(struct pio2_card *card)
+{
+ const char *label = card->gc.label;
+
+ if (gpiochip_remove(&(card->gc)))
+ dev_err(&card->vdev->dev, "Failed to remove GPIO");
+
+ kfree(label);
+}
+
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index d85a1e9dbe3..7d24cd6343e 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -10,9 +10,9 @@ struct vme_master {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
- vme_width_t dwidth; /* Maximum Data Width */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
+ u32 dwidth; /* Maximum Data Width */
#if 0
char prefetchEnable; /* Prefetch Read Enable State */
int prefetchSize; /* Prefetch Read Size (Cache Lines) */
@@ -34,8 +34,8 @@ struct vme_slave {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
#if 0
char wrPostEnable; /* Write Post State */
char rmwLock; /* Lock PCI during RMW Cycles */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index b04b4688f70..70722ae5232 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -153,9 +153,7 @@ size_t vme_get_size(struct vme_resource *resource)
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
switch (resource->type) {
case VME_MASTER:
@@ -181,7 +179,7 @@ size_t vme_get_size(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_get_size);
-static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
+static int vme_check_window(u32 aspace, unsigned long long vme_base,
unsigned long long size)
{
int retval = 0;
@@ -232,8 +230,8 @@ static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
* Request a slave image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_slave_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle)
+struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
+ u32 cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
@@ -298,7 +296,7 @@ EXPORT_SYMBOL(vme_slave_request);
int vme_slave_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -333,7 +331,7 @@ EXPORT_SYMBOL(vme_slave_set);
int vme_slave_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -388,8 +386,8 @@ EXPORT_SYMBOL(vme_slave_free);
* Request a master image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_master_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
+struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
@@ -456,8 +454,8 @@ err_bus:
EXPORT_SYMBOL(vme_master_request);
int vme_master_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -492,8 +490,8 @@ int vme_master_set(struct vme_resource *resource, int enabled,
EXPORT_SYMBOL(vme_master_set);
int vme_master_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -646,8 +644,7 @@ EXPORT_SYMBOL(vme_master_free);
* Request a DMA controller with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_dma_request(struct vme_dev *vdev,
- vme_dma_route_t route)
+struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
@@ -743,8 +740,7 @@ EXPORT_SYMBOL(vme_new_dma_list);
/*
* Create "Pattern" type attributes
*/
-struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type)
+struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
{
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
@@ -822,7 +818,7 @@ EXPORT_SYMBOL(vme_dma_pci_attribute);
* Create "VME" type attributes
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
@@ -1173,7 +1169,7 @@ int vme_lm_count(struct vme_resource *resource)
EXPORT_SYMBOL(vme_lm_count);
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1195,7 +1191,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
EXPORT_SYMBOL(vme_lm_set);
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
- vme_address_t *aspace, vme_cycle_t *cycle)
+ u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1307,7 +1303,12 @@ EXPORT_SYMBOL(vme_slot_get);
/* - Bridge Registration --------------------------------------------------- */
-static int vme_add_bus(struct vme_bridge *bridge)
+static void vme_dev_release(struct device *dev)
+{
+ kfree(dev_to_vme_dev(dev));
+}
+
+int vme_register_bridge(struct vme_bridge *bridge)
{
int i;
int ret = -1;
@@ -1327,8 +1328,9 @@ static int vme_add_bus(struct vme_bridge *bridge)
return ret;
}
+EXPORT_SYMBOL(vme_register_bridge);
-static void vme_remove_bus(struct vme_bridge *bridge)
+void vme_unregister_bridge(struct vme_bridge *bridge)
{
struct vme_dev *vdev;
struct vme_dev *tmp;
@@ -1343,22 +1345,6 @@ static void vme_remove_bus(struct vme_bridge *bridge)
list_del(&bridge->bus_list);
mutex_unlock(&vme_buses_lock);
}
-
-static void vme_dev_release(struct device *dev)
-{
- kfree(dev_to_vme_dev(dev));
-}
-
-int vme_register_bridge(struct vme_bridge *bridge)
-{
- return vme_add_bus(bridge);
-}
-EXPORT_SYMBOL(vme_register_bridge);
-
-void vme_unregister_bridge(struct vme_bridge *bridge)
-{
- vme_remove_bus(bridge);
-}
EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
@@ -1421,10 +1407,7 @@ static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
* and if the bridge is removed, it will have to go through
* vme_unregister_bridge() to do it (which calls remove() on
* the bridge which in turn tries to acquire vme_buses_lock and
- * will have to wait). The probe() called after device
- * registration in __vme_register_driver below will also fail
- * as the bridge is being removed (since the probe() calls
- * vme_bridge_get()).
+ * will have to wait).
*/
err = __vme_register_driver_bus(drv, bridge, ndevs);
if (err)
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index e3828badca6..9d38ceed60e 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -10,7 +10,6 @@ enum vme_resource_type {
};
/* VME Address Spaces */
-typedef u32 vme_address_t;
#define VME_A16 0x1
#define VME_A24 0x2
#define VME_A32 0x4
@@ -29,7 +28,6 @@ typedef u32 vme_address_t;
/* VME Cycle Types */
-typedef u32 vme_cycle_t;
#define VME_SCT 0x1
#define VME_BLT 0x2
#define VME_MBLT 0x4
@@ -47,28 +45,23 @@ typedef u32 vme_cycle_t;
#define VME_DATA 0x8000
/* VME Data Widths */
-typedef u32 vme_width_t;
#define VME_D8 0x1
#define VME_D16 0x2
#define VME_D32 0x4
#define VME_D64 0x8
/* Arbitration Scheduling Modes */
-typedef u32 vme_arbitration_t;
#define VME_R_ROBIN_MODE 0x1
#define VME_PRIORITY_MODE 0x2
-typedef u32 vme_dma_t;
#define VME_DMA_PATTERN (1<<0)
#define VME_DMA_PCI (1<<1)
#define VME_DMA_VME (1<<2)
-typedef u32 vme_pattern_t;
#define VME_DMA_PATTERN_BYTE (1<<0)
#define VME_DMA_PATTERN_WORD (1<<1)
#define VME_DMA_PATTERN_INCREMENT (1<<2)
-typedef u32 vme_dma_route_t;
#define VME_DMA_VME_TO_MEM (1<<0)
#define VME_DMA_MEM_TO_VME (1<<1)
#define VME_DMA_VME_TO_VME (1<<2)
@@ -77,7 +70,7 @@ typedef u32 vme_dma_route_t;
#define VME_DMA_PATTERN_TO_MEM (1<<5)
struct vme_dma_attr {
- vme_dma_t type;
+ u32 type;
void *private;
};
@@ -97,7 +90,7 @@ extern struct bus_type vme_bus_type;
/**
* Structure representing a VME device
- * @id: The ID of the device (currently the bus and slot number)
+ * @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
* @drv_list: List of devices (per driver)
@@ -128,32 +121,29 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
size_t vme_get_size(struct vme_resource *);
-struct vme_resource *vme_slave_request(struct vme_dev *, vme_address_t,
- vme_cycle_t);
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
void vme_slave_free(struct vme_resource *);
-struct vme_resource *vme_master_request(struct vme_dev *, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
+ unsigned long long, u32, u32, u32);
int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
+ unsigned long long *, u32 *, u32 *, u32 *);
ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
void vme_master_free(struct vme_resource *);
-struct vme_resource *vme_dma_request(struct vme_dev *, vme_dma_route_t);
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
-struct vme_dma_attr *vme_dma_pattern_attribute(u32, vme_pattern_t);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
void vme_dma_free_attribute(struct vme_dma_attr *);
int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
struct vme_dma_attr *, size_t);
@@ -168,10 +158,8 @@ int vme_irq_generate(struct vme_dev *, int, int);
struct vme_resource * vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
-int vme_lm_set(struct vme_resource *, unsigned long long, vme_address_t,
- vme_cycle_t);
-int vme_lm_get(struct vme_resource *, unsigned long long *, vme_address_t *,
- vme_cycle_t *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
diff --git a/drivers/staging/vme/vme_api.txt b/drivers/staging/vme/vme_api.txt
index e8ff2151a48..856efa35f6e 100644
--- a/drivers/staging/vme/vme_api.txt
+++ b/drivers/staging/vme/vme_api.txt
@@ -86,26 +86,26 @@ driver allows a resource to be assigned based on the required attributes of the
driver in question:
struct vme_resource * vme_master_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
-
- struct vme_resource * vme_slave_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle);
-
- struct vme_resource *vme_dma_request(struct vme_dev *dev,
- vme_dma_route_t route);
-
-For slave windows these attributes are split into those of type 'vme_address_t'
-and 'vme_cycle_t'. Master windows add a further set of attributes
-'vme_cycle_t'. These attributes are defined as bitmasks and as such any
-combination of the attributes can be requested for a single window, the core
-will assign a window that meets the requirements, returning a pointer of type
-vme_resource that should be used to identify the allocated resource when it is
-used. For DMA controllers, the request function requires the potential
-direction of any transfers to be provided in the route attributes. This is
-typically VME-to-MEM and/or MEM-to-VME, though some hardware can support
-VME-to-VME and MEM-to-MEM transfers as well as test pattern generation. If an
-unallocated window fitting the requirements can not be found a NULL pointer
-will be returned.
+ u32 aspace, u32 cycle, u32 width);
+
+ struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
+ u32 cycle);
+
+ struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
+
+For slave windows these attributes are split into the VME address spaces that
+need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
+Master windows add a further set of attributes in 'width' specifying the
+required data transfer widths. These attributes are defined as bitmasks and as
+such any combination of the attributes can be requested for a single window,
+the core will assign a window that meets the requirements, returning a pointer
+of type vme_resource that should be used to identify the allocated resource
+when it is used. For DMA controllers, the request function requires the
+potential direction of any transfers to be provided in the route attributes.
+This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
+support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
+If an unallocated window fitting the requirements can not be found a NULL
+pointer will be returned.
Functions are also provided to free window allocations once they are no longer
required. These functions should be passed the pointer to the resource provided
@@ -133,12 +133,12 @@ Once a master window has been assigned the following functions can be used to
configure it and retrieve the current settings:
int vme_master_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ unsigned long long base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 width);
int vme_master_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *width);
+ unsigned long long *base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *width);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -189,11 +189,11 @@ configure it and retrieve the current settings:
int vme_slave_set (struct vme_resource *res, int enabled,
unsigned long long base, unsigned long long size,
- dma_addr_t mem, vme_address_t aspace, vme_cycle_t cycle);
+ dma_addr_t mem, u32 aspace, u32 cycle);
int vme_slave_get (struct vme_resource *res, int *enabled,
unsigned long long *base, unsigned long long *size,
- dma_addr_t *mem, vme_address_t *aspace, vme_cycle_t *cycle);
+ dma_addr_t *mem, u32 *aspace, u32 *cycle);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -273,8 +273,7 @@ and pattern sources and destinations (where appropriate):
Pattern source:
- struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type);
+ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
PCI source or destination:
@@ -283,7 +282,7 @@ PCI source or destination:
VME source or destination:
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ u32 aspace, u32 cycle, u32 width);
The following function should be used to free an attribute:
@@ -366,10 +365,10 @@ Once a bank of location monitors has been allocated, the following functions
are provided to configure the location and mode of the location monitor:
int vme_lm_set(struct vme_resource *res, unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle);
+ u32 aspace, u32 cycle);
int vme_lm_get(struct vme_resource *res, unsigned long long *base,
- vme_address_t *aspace, vme_cycle_t *cycle);
+ u32 *aspace, u32 *cycle);
Location Monitor Use
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index c2deda2c38d..934949abd74 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -15,9 +15,9 @@ struct vme_master_resource {
spinlock_t lock;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
- vme_width_t width_attr;
+ u32 address_attr;
+ u32 cycle_attr;
+ u32 width_attr;
struct resource bus_resource;
void __iomem *kern_base;
};
@@ -28,13 +28,13 @@ struct vme_slave_resource {
struct mutex mtx;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
+ u32 address_attr;
+ u32 cycle_attr;
};
struct vme_dma_pattern {
u32 pattern;
- vme_pattern_t type;
+ u32 type;
};
struct vme_dma_pci {
@@ -43,9 +43,9 @@ struct vme_dma_pci {
struct vme_dma_vme {
unsigned long long address;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace;
+ u32 cycle;
+ u32 dwidth;
};
struct vme_dma_list {
@@ -63,7 +63,7 @@ struct vme_dma_resource {
int number;
struct list_head pending;
struct list_head running;
- vme_dma_route_t route_attr;
+ u32 route_attr;
};
struct vme_lm_resource {
@@ -122,17 +122,16 @@ struct vme_bridge {
/* Slave Functions */
int (*slave_get) (struct vme_slave_resource *, int *,
unsigned long long *, unsigned long long *, dma_addr_t *,
- vme_address_t *, vme_cycle_t *);
+ u32 *, u32 *);
int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
/* Master Functions */
int (*master_get) (struct vme_master_resource *, int *,
- unsigned long long *, unsigned long long *, vme_address_t *,
- vme_cycle_t *, vme_width_t *);
+ unsigned long long *, unsigned long long *, u32 *, u32 *,
+ u32 *);
int (*master_set) (struct vme_master_resource *, int,
- unsigned long long, unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+ unsigned long long, unsigned long long, u32, u32, u32);
ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
loff_t);
ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
@@ -151,10 +150,9 @@ struct vme_bridge {
int (*irq_generate) (struct vme_bridge *, int, int);
/* Location monitor functions */
- int (*lm_set) (struct vme_lm_resource *, unsigned long long,
- vme_address_t, vme_cycle_t);
- int (*lm_get) (struct vme_lm_resource *, unsigned long long *,
- vme_address_t *, vme_cycle_t *);
+ int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
+ int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
+ u32 *);
int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
int (*lm_detach) (struct vme_lm_resource *, int);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index d8dd7846447..3e8283c2dc7 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3153,11 +3153,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 432a20993c6..7fd5cc5a55f 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -300,6 +300,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -571,6 +575,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -EINVAL;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 5e425d1476b..87288db2178 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -151,18 +151,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
-
/*
* Wireless Handler : set scan
*/
diff --git a/drivers/staging/vt6655/iwctl.h b/drivers/staging/vt6655/iwctl.h
index 3096de0ba1b..d224f913a62 100644
--- a/drivers/staging/vt6655/iwctl.h
+++ b/drivers/staging/vt6655/iwctl.h
@@ -79,11 +79,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index fceec4999c3..39f98423dc0 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -224,8 +224,6 @@ vMgrDecodeBeacon(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
-
- return;
}
@@ -248,8 +246,6 @@ vMgrEncodeIBSSATIM(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
-
- return;
}
@@ -270,8 +266,6 @@ vMgrDecodeIBSSATIM(
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- return;
}
@@ -298,8 +292,6 @@ vMgrEncodeDisassociation(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
-
- return;
}
@@ -324,8 +316,6 @@ vMgrDecodeDisassociation(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
-
- return;
}
/*+
@@ -352,7 +342,6 @@ vMgrEncodeAssocRequest(
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
- return;
}
@@ -418,7 +407,6 @@ vMgrDecodeAssocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
/*+
@@ -448,8 +436,6 @@ vMgrEncodeAssocResponse(
+ WLAN_ASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
+ sizeof(*(pFrame->pwAid));
-
- return;
}
@@ -491,10 +477,8 @@ vMgrDecodeAssocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
- } else {
+ } else
pFrame->pExtSuppRates = NULL;
- }
- return;
}
@@ -524,8 +508,6 @@ vMgrEncodeReassocRequest(
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP + sizeof(*(pFrame->pAddrCurrAP));
-
- return;
}
@@ -578,10 +560,9 @@ vMgrDecodeReassocRequest(
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
break;
case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
+ if (pFrame->pRSNWPA == NULL)
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
break;
case WLAN_EID_EXTSUPP_RATES:
@@ -595,7 +576,6 @@ vMgrDecodeReassocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -619,7 +599,6 @@ vMgrEncodeProbeRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
- return;
}
/*+
@@ -670,7 +649,6 @@ vMgrDecodeProbeRequest(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -703,8 +681,6 @@ vMgrEncodeProbeResponse(
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
sizeof(*(pFrame->pwCapInfo));
-
- return;
}
@@ -818,7 +794,6 @@ vMgrDecodeProbeResponse(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -848,7 +823,6 @@ vMgrEncodeAuthen(
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
- return;
}
@@ -886,7 +860,6 @@ vMgrDecodeAuthen(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE))
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
- return;
}
@@ -912,7 +885,6 @@ vMgrEncodeDeauthen(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
- return;
}
@@ -937,7 +909,6 @@ vMgrDecodeDeauthen(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
- return;
}
@@ -968,7 +939,6 @@ vMgrEncodeReassocResponse(
+ WLAN_REASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
- return;
}
@@ -1010,5 +980,4 @@ vMgrDecodeReassocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES))
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- return;
}
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 0d11147f91c..06f27f624db 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -932,30 +932,8 @@ BBvCaculateParameter (
void
BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
{
- //{{ RobertYu: 20041124, ABG Mode, VC1/VC2 define, make the ANT_A, ANT_B inverted
- /*if ( (pDevice->byRFType == RF_MAXIM2829) ||
- (pDevice->byRFType == RF_UW2452) ||
- (pDevice->byRFType == RF_AIROHA7230) ) { // RobertYu: 20041210, 20050104
-
- switch (byAntennaMode) {
- case ANT_TXA:
- byAntennaMode = ANT_TXB;
- break;
- case ANT_TXB:
- byAntennaMode = ANT_TXA;
- break;
- case ANT_RXA:
- byAntennaMode = ANT_RXB;
- break;
- case ANT_RXB:
- byAntennaMode = ANT_RXA;
- break;
- }
- }*/
-
switch (byAntennaMode) {
case ANT_TXA:
- break;
case ANT_TXB:
break;
case ANT_RXA:
@@ -1249,8 +1227,7 @@ void BBvLoopbackOff (PSDevice pDevice)
// Set the CR33 Bit2 to disable internal Loopback.
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x21, &byData);//CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, (BYTE)(byData & 0xFE));//CR33
- }
- else { // OFDM
+ } else { /* OFDM */
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x9A, &byData);//CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, (BYTE)(byData & 0xFE));//CR154
}
@@ -1277,19 +1254,16 @@ BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBVGA=0;
- if (pDevice->bShortSlotTime) {
+ if (pDevice->bShortSlotTime)
pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
+ else
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0xE7, &byBBVGA);
- if (byBBVGA == pDevice->abyBBVGA[0]) {
+ if (byBBVGA == pDevice->abyBBVGA[0])
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);
-
}
@@ -1299,13 +1273,11 @@ void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
// patch for 3253B0 Baseband with Cardbus module
- if (byData == pDevice->abyBBVGA[0]) {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- } else if (pDevice->bShortSlotTime) {
- pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- }
+ if (pDevice->bShortSlotTime)
+ pDevice->byBBRxConf &= 0xDF; /* 1101 1111 */
+ else
+ pDevice->byBBRxConf |= 0x20; /* 0010 0000 */
+
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);//CR10
}
@@ -1365,15 +1337,14 @@ static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
unsigned long ulMaxPacket;
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
- if ( pDevice->aulPktNum[RATE_54M] != 0 ) {
+ if (pDevice->aulPktNum[RATE_54M] != 0)
ulSQ3 = pDevice->aulSQ3Val[RATE_54M] / pDevice->aulPktNum[RATE_54M];
- }
- for ( ii=RATE_48M;ii>=RATE_6M;ii-- ) {
- if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
+
+ for (ii = RATE_48M; ii >= RATE_6M; ii--)
+ if (pDevice->aulPktNum[ii] > ulMaxPacket) {
ulMaxPacket = pDevice->aulPktNum[ii];
ulSQ3 = pDevice->aulSQ3Val[ii] / pDevice->aulPktNum[ii];
}
- }
return ulSQ3;
}
@@ -1392,7 +1363,7 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
- for ( ii=RATE_48M;ii>=RATE_1M;ii-- ) {
+ for (ii = RATE_48M; ii >= RATE_1M; ii--)
if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
ulPacketNum = 0;
for ( jj=RATE_54M;jj>=ii;jj--)
@@ -1402,8 +1373,6 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulMaxPacket = pDevice->aulPktNum[ii];
}
- }
-
return ulRatio;
}
@@ -1589,7 +1558,6 @@ void TimerSQ3CallBack(void *hDeviceContext)
spin_unlock_irq(&pDevice->lock);
- return;
}
@@ -1637,7 +1605,6 @@ void TimerSQ3Tmax3CallBack(void *hDeviceContext)
add_timer(&pDevice->TimerSQ3Tmax1);
spin_unlock_irq(&pDevice->lock);
- return;
}
void
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index af006df4c8e..32c67ed8435 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -216,26 +216,6 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
continue;
}
}
-/*
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == TRUE) {
- // WPA AP will reject connection of station without WPA enable.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == FALSE) {
- // station with WPA enable can't join NonWPA AP.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == FALSE) {
- // station with WPA2 enable can't join NonWPA2 AP.
- continue;
- }
- }
-*/
pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList pSelect1[%02X %02X %02X-%02X %02X %02X]\n",*pCurrBSS->abyBSSID,*(pCurrBSS->abyBSSID+1),*(pCurrBSS->abyBSSID+2),*(pCurrBSS->abyBSSID+3),*(pCurrBSS->abyBSSID+4),*(pCurrBSS->abyBSSID+5));
@@ -300,18 +280,11 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
continue;
}
}
-/*
- if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) {
- pMgmt->sBSSList[ii].uClearCount ++;
- continue;
- }
-*/
- pMgmt->sBSSList[ii].bActive = FALSE;
+
+ pMgmt->sBSSList[ii].bActive = FALSE;
memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
}
BSSvClearAnyBSSJoinRecord(pDevice);
-
- return;
}
@@ -524,46 +497,6 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
pBSSList->ldBmAverage[ii] = 0;
}
-/*
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == TRUE)) {
- CARDvSetCountryInfo(pMgmt->pAdapter,
- pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if ((bParsingQuiet == TRUE) && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- TRUE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- FALSE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if ((bParsingQuiet == TRUE) &&
- (pQuiet != NULL)) {
- CARDbStartQuiet(pMgmt->pAdapter);
- }
-*/
-
pBSSList->uIELength = uIELength;
if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
@@ -609,8 +542,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
signed long ldBm, ldBmSum;
BOOL bParsingQuiet = FALSE;
- // BYTE abyTmpSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
-
if (pBSSList == NULL)
return FALSE;
@@ -622,7 +553,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
pBSSList->uChannel = byCurrChannel;
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel);
if (pSSID->len > WLAN_SSID_MAXLEN)
pSSID->len = WLAN_SSID_MAXLEN;
@@ -809,7 +739,6 @@ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
- return;
};
@@ -840,8 +769,6 @@ void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
// clear tx bit map
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-
- return;
};
/*+
*
@@ -1054,10 +981,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
// Rate fallback check
if (!pDevice->bFixRate) {
-/*
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0))
- RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii]));
-*/
if (ii > 0) {
// ii = 0 for multicast node (AP & Adhoc)
RATEvTxRateFallBack((void *)pDevice,
@@ -1152,7 +1075,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- // DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount);
if (pDevice->bUpdateBBVGA) {
/* s_vCheckSensitivity((void *) pDevice); */
@@ -1194,7 +1116,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
}
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1223,7 +1144,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
pDevice->uIsroamingTime = 0;
pDevice->bRoaming = FALSE;
-// if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_CCKM_ROAM_MSG;
wpahdr->resp_ie_len = 0;
@@ -1237,7 +1157,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
netif_rx(pDevice->skb);
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
-// }
}
else if ((pDevice->bRoaming == FALSE)&&(pDevice->bIsRoaming == TRUE)) {
pDevice->uIsroamingTime++;
@@ -1315,7 +1234,6 @@ else {
pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
add_timer(&pMgmt->sTimerSecondCallback);
- return;
}
/*+
@@ -1364,7 +1282,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
// Only Unicast using support rates
if (wFIFOCtl & FIFOCTL_NEEDACK) {
- //DBG_PRN_GRP21(("Device %08X, wRate %04X, byTSR %02X\n", hDeviceContext, wRate, byTSR));
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
pMgmt->sNodeDBTable[0].uTxAttempts += 1;
if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
@@ -1475,10 +1392,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
}
}
}
-
- return;
-
-
}
/*+
@@ -1519,8 +1432,6 @@ void BSSvClearNodeDBTable(void *hDeviceContext,
memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
}
}
-
- return;
};
void s_vCheckSensitivity(void *hDeviceContext)
@@ -1584,7 +1495,6 @@ RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
//decide link quality
if(pDevice->bLinkPass !=TRUE)
{
- // printk("s_uCalculateLinkQual-->Link disconnect and Poor quality**\n");
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
}
@@ -1608,7 +1518,6 @@ else
pDevice->scStatistic.TxFailCount = 0;
pDevice->scStatistic.TxNoRetryOkCount = 0;
pDevice->scStatistic.TxRetryOkCount = 0;
- return;
}
void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
@@ -1617,10 +1526,8 @@ void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
unsigned int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++)
pMgmt->sBSSList[ii].bSelected = FALSE;
- }
- return;
}
void s_vCheckPreEDThreshold(void *hDeviceContext)
@@ -1637,6 +1544,5 @@ void s_vCheckPreEDThreshold(void *hDeviceContext)
BBvUpdatePreEDThreshold(pDevice, FALSE);
}
}
- return;
}
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index a49053bd7c6..9d09e9fd8e1 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -91,15 +91,10 @@ const WORD cwRXBCNTSFOff[MAX_RATE] =
* uConnectionChannel - Channel to be set
* Out:
* none
- *
- * Return Value: TRUE if succeeded; FALSE if failed.
- *
*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
+void CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-BOOL bResult = TRUE;
-
if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
@@ -140,7 +135,6 @@ BOOL bResult = TRUE;
RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M);
}
ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(BYTE)(uConnectionChannel|0x80));
- return(bResult);
}
/*
@@ -607,7 +601,7 @@ BYTE ii;
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
WORD wRate = (WORD)(1<<wRateIdx);
@@ -616,8 +610,6 @@ WORD wRate = (WORD)(1<<wRateIdx);
//Determines the highest basic rate.
CARDvUpdateBasicTopRate(pDevice);
-
- return(TRUE);
}
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler)
@@ -1090,7 +1082,7 @@ CARDbChannelSwitch (
if (byCount == 0) {
pDevice->sMgmtObj.uCurrChannel = byNewChannel;
- bResult = CARDbSetMediaChannel(pDevice, byNewChannel);
+ CARDbSetMediaChannel(pDevice, byNewChannel);
return bResult;
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 6c91343d0d1..9cf71a3d880 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -60,12 +60,12 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Functions --------------------------*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler,
+void CARDbSetMediaChannel(void *pDeviceHandler,
unsigned int uConnectionChannel);
void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType);
void vUpdateIFS(void *pDeviceHandler);
void CARDvUpdateBasicTopRate(void *pDeviceHandler);
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
QWORD qwBSSTimestamp, QWORD qwLocalTSF);
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index c95833ac58e..0a114231145 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -92,9 +92,8 @@ void INTvWorkItem(void *Context)
spin_unlock_irq(&pDevice->lock);
}
-int INTnsProcessData(PSDevice pDevice)
+void INTnsProcessData(PSDevice pDevice)
{
- int status = STATUS_SUCCESS;
PSINTData pINTData;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct net_device_stats *pStats = &pDevice->stats;
@@ -218,6 +217,4 @@ int INTnsProcessData(PSDevice pDevice)
pDevice->scStatistic.ullTxBroadcastBytes;
pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
-
- return status;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 3176c8d08d6..a5d96b96817 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -68,6 +68,6 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
void INTvWorkItem(void *Context);
-int INTnsProcessData(PSDevice pDevice);
+void INTnsProcessData(PSDevice pDevice);
#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 49390026dea..1463d76895f 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -295,6 +295,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -557,6 +561,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -ENOMEM;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 2121205a912..ecfda5272fa 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -128,17 +128,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
/*
* Wireless Handler : set scan
*/
@@ -1939,7 +1928,6 @@ static const iw_handler iwctl_handler[] =
(iw_handler) iwctl_commit, // SIOCSIWCOMMIT
(iw_handler) iwctl_giwname, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) NULL, // SIOCGIWNWID
(iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
(iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
(iw_handler) iwctl_siwmode, // SIOCSIWMODE
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index cc48954783f..10a240e6501 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -77,11 +77,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 26c19d1408c..af4a29d1477 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -133,10 +133,9 @@ void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL MACbShutdown (PSDevice pDevice)
+void MACbShutdown(PSDevice pDevice)
{
CONTROLnsRequestOutAsyn(pDevice,
MESSAGE_TYPE_MACSHUTDOWN,
@@ -145,7 +144,6 @@ BOOL MACbShutdown (PSDevice pDevice)
0,
NULL
);
- return TRUE;
}
void MACvSetBBType(PSDevice pDevice,BYTE byType)
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 491ff5ecd04..147ac50218d 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -422,7 +422,7 @@
void MACvSetMultiAddrByHash(PSDevice pDevice, BYTE byHashIdx);
void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
-BOOL MACbShutdown(PSDevice pDevice);
+void MACbShutdown(PSDevice pDevice);
void MACvSetBBType(PSDevice pDevice, BYTE byType);
void MACvSetMISCFifo(PSDevice pDevice, WORD wOffset, DWORD dwData);
void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 27521b69ce0..6a708f44765 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -900,7 +900,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
}
// allocate rcb mem
- pDevice->pRCBMem = kmalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
+ pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -912,7 +912,6 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
- memset(pDevice->pRCBMem, 0, (sizeof(RCB) * pDevice->cbRD));
pRCB = (PRCB) pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -1618,15 +1617,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCSIWNWID:
- rc = -EOPNOTSUPP;
- break;
-
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
@@ -2103,16 +2095,4 @@ static struct usb_driver vt6656_driver = {
#endif /* CONFIG_PM */
};
-static int __init vt6656_init_module(void)
-{
- printk(KERN_NOTICE DEVICE_FULL_DRV_NAM " " DEVICE_VERSION);
- return usb_register(&vt6656_driver);
-}
-
-static void __exit vt6656_cleanup_module(void)
-{
- usb_deregister(&vt6656_driver);
-}
-
-module_init(vt6656_init_module);
-module_exit(vt6656_cleanup_module);
+module_usb_driver(vt6656_driver);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index f958eb4f0d8..c3751a71838 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -865,15 +865,4 @@ static struct usb_driver wb35_driver = {
.disconnect = wb35_disconnect,
};
-static int __init wb35_init(void)
-{
- return usb_register(&wb35_driver);
-}
-
-static void __exit wb35_exit(void)
-{
- usb_deregister(&wb35_driver);
-}
-
-module_init(wb35_init);
-module_exit(wb35_exit);
+module_usb_driver(wb35_driver);
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 8d5dddf0805..811698f1070 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/dhfcfg.h b/drivers/staging/wlags49_h2/dhfcfg.h
index 75c279f268a..147f4c83c00 100644
--- a/drivers/staging/wlags49_h2/dhfcfg.h
+++ b/drivers/staging/wlags49_h2/dhfcfg.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 7dc176a95aa..b008773323b 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -32,9 +32,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 00099473116..95527b5cf86 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -40,9 +40,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfcfg.h b/drivers/staging/wlags49_h2/hcfcfg.h
index 7545bc55411..ef60da8c3eb 100644
--- a/drivers/staging/wlags49_h2/hcfcfg.h
+++ b/drivers/staging/wlags49_h2/hcfcfg.h
@@ -64,9 +64,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfdef.h b/drivers/staging/wlags49_h2/hcfdef.h
index a62b53a2289..30744e194a2 100644
--- a/drivers/staging/wlags49_h2/hcfdef.h
+++ b/drivers/staging/wlags49_h2/hcfdef.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index b02e3ea9e47..5f951efb9c0 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT Š 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT Š 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.c b/drivers/staging/wlags49_h2/mmd.c
index de138c481a9..c8f52107e6c 100644
--- a/drivers/staging/wlags49_h2/mmd.c
+++ b/drivers/staging/wlags49_h2/mmd.c
@@ -35,7 +35,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.h b/drivers/staging/wlags49_h2/mmd.h
index 06890c1b30a..91495251300 100644
--- a/drivers/staging/wlags49_h2/mmd.h
+++ b/drivers/staging/wlags49_h2/mmd.h
@@ -33,7 +33,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT Š 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 21f17be4f02..a7ab579759d 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 26cf5486edd..4c6f776cc4d 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index b4f54d81f31..46629f3b112 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_if.h b/drivers/staging/wlags49_h2/wl_if.h
index ed2b4135a10..6a26130f5a3 100644
--- a/drivers/staging/wlags49_h2/wl_if.h
+++ b/drivers/staging/wlags49_h2/wl_if.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index 57534083405..553601f4887 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 483eee1bf63..dab603e0f45 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index d593ae535fb..3b5acdf4e32 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 5a2b334f206..9c16f5478a7 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.h b/drivers/staging/wlags49_h2/wl_netdev.h
index 632ab2e6302..61f040f26d9 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.h
+++ b/drivers/staging/wlags49_h2/wl_netdev.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 28ae9dd1b44..2bd9b84ace8 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
@@ -112,17 +112,10 @@ extern dbg_info_t *DbgInfo;
#endif // DBG
/* define the PCI device Table Cardname and id tables */
-enum hermes_pci_versions {
- CH_Agere_Systems_Mini_PCI_V1 = 0,
-};
-
static struct pci_device_id wl_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), },
{ } /* Terminating entry */
};
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
index cea04c44ec4..86831f1b4de 100644
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ b/drivers/staging/wlags49_h2/wl_pci.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 260d4f0d47b..f30e5ee4bca 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.h b/drivers/staging/wlags49_h2/wl_priv.h
index 9b0254497aa..b647bfd9009 100644
--- a/drivers/staging/wlags49_h2/wl_priv.h
+++ b/drivers/staging/wlags49_h2/wl_priv.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index a459e48c7bf..b8c96cf18de 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.h b/drivers/staging/wlags49_h2/wl_profile.h
index 81db8e8c6ba..f81df51d221 100644
--- a/drivers/staging/wlags49_h2/wl_profile.h
+++ b/drivers/staging/wlags49_h2/wl_profile.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 3b6f5a59b2b..b748a3ff795 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 2661bcd6b0e..946b1b64c46 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index fd37040afd0..3deacfac9d2 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 8ac5e1081aa..7ff0a108da1 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -18,7 +18,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -39,7 +39,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index a713058c802..029da52c4c4 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright Š 2003 Agere Systems Inc.
+ * Copyright Š 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 4efa027a81e..b1aed1f1f74 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -358,16 +358,4 @@ static struct usb_driver prism2_usb_driver = {
/* fops, minor? */
};
-static int __init prism2usb_init(void)
-{
- /* This call will result in calls to prism2sta_probe_usb. */
- return usb_register(&prism2_usb_driver);
-};
-
-static void __exit prism2usb_cleanup(void)
-{
- usb_deregister(&prism2_usb_driver);
-};
-
-module_init(prism2usb_init);
-module_exit(prism2usb_cleanup);
+module_usb_driver(prism2_usb_driver);
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
index 3c8c7de9ead..55e51990534 100644
--- a/drivers/staging/xgifb/Makefile
+++ b/drivers/staging/xgifb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_XGI) += xgifb.o
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o vb_ext.o
+xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 71aebe3e84d..35f7b2a485e 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -32,14 +32,10 @@
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 1},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 2},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 3},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
{0}
};
@@ -128,7 +124,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* display status */
static int XGIfb_crt1off;
static int XGIfb_forcecrt1 = -1;
-static int XGIfb_userom ;
/* global flags */
static int XGIfb_tvmode;
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 277e408c39c..2502c49c9c5 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/vmalloc.h>
#include <linux/vt_kern.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -46,8 +45,7 @@
#define GPIOG_EN (1<<6)
#define GPIOG_READ (1<<1)
-#define XGIFB_ROM_SIZE 65536
-
+static char *forcecrt2type;
static char *mode;
static int vesa = -1;
static unsigned int refresh_rate;
@@ -159,7 +157,6 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
/* unsigned long temp = 0; */
int Clock;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -199,7 +196,6 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned char sr_data, cr_data, cr_data2;
unsigned long cr_data3;
int A, B, C, D, E, F, temp, j;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -387,7 +383,7 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
/* ------------------ Internal helper routines ----------------- */
-static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
+static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
{
int found_mode = 0;
@@ -396,11 +392,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
found_mode = 0;
while ((XGIbios_mode[XGIfb_mode_idx].mode_no != 0)
&& (XGIbios_mode[XGIfb_mode_idx].xres
- <= XGI21_LCDCapList[0].LVDSHDE)) {
+ <= xgifb_info->lvds_data.LVDSHDE)) {
if ((XGIbios_mode[XGIfb_mode_idx].xres
- == XGI21_LCDCapList[0].LVDSHDE)
+ == xgifb_info->lvds_data.LVDSHDE)
&& (XGIbios_mode[XGIfb_mode_idx].yres
- == XGI21_LCDCapList[0].LVDSVDE)
+ == xgifb_info->lvds_data.LVDSVDE)
&& (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) {
found_mode = 1;
break;
@@ -456,51 +452,6 @@ invalid:
printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
}
-static int XGIfb_GetXG21LVDSData(struct xgifb_video_info *xgifb_info)
-{
- u8 tmp;
- void __iomem *data = xgifb_info->mmio_vbase + 0x20000;
- int i, j, k;
-
- tmp = xgifb_reg_get(XGISR, 0x1e);
- xgifb_reg_set(XGISR, 0x1e, tmp | 4);
-
- if ((readb(data) == 0x55) &&
- (readb(data + 1) == 0xAA) &&
- (readb(data + 0x65) & 0x1)) {
- i = readw(data + 0x316);
- j = readb(data + i - 1);
- if (j == 0xff)
- j = 1;
-
- k = 0;
- do {
- XGI21_LCDCapList[k].LVDS_Capability = readw(data + i);
- XGI21_LCDCapList[k].LVDSHT = readw(data + i + 2);
- XGI21_LCDCapList[k].LVDSVT = readw(data + i + 4);
- XGI21_LCDCapList[k].LVDSHDE = readw(data + i + 6);
- XGI21_LCDCapList[k].LVDSVDE = readw(data + i + 8);
- XGI21_LCDCapList[k].LVDSHFP = readw(data + i + 10);
- XGI21_LCDCapList[k].LVDSVFP = readw(data + i + 12);
- XGI21_LCDCapList[k].LVDSHSYNC = readw(data + i + 14);
- XGI21_LCDCapList[k].LVDSVSYNC = readw(data + i + 16);
- XGI21_LCDCapList[k].VCLKData1 = readb(data + i + 18);
- XGI21_LCDCapList[k].VCLKData2 = readb(data + i + 19);
- XGI21_LCDCapList[k].PSC_S1 = readb(data + i + 20);
- XGI21_LCDCapList[k].PSC_S2 = readb(data + i + 21);
- XGI21_LCDCapList[k].PSC_S3 = readb(data + i + 22);
- XGI21_LCDCapList[k].PSC_S4 = readb(data + i + 23);
- XGI21_LCDCapList[k].PSC_S5 = readb(data + i + 24);
- i += 25;
- j--;
- k++;
- } while ((j > 0) && (k < (sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))));
- return 1;
- }
- return 0;
-}
-
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
{
u16 xres, yres;
@@ -508,8 +459,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
if (xgifb_info->chip == XG21) {
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
- xres = XGI21_LCDCapList[0].LVDSHDE;
- yres = XGI21_LCDCapList[0].LVDSVDE;
+ xres = xgifb_info->lvds_data.LVDSHDE;
+ yres = xgifb_info->lvds_data.LVDSVDE;
if (XGIbios_mode[myindex].xres > xres)
return -1;
if (XGIbios_mode[myindex].yres > yres)
@@ -1223,7 +1174,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (isactive) {
XGIfb_pre_setmode(xgifb_info);
- if (XGISetModeNew(hw_info,
+ if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
@@ -1794,17 +1745,16 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
XGIfb_crt1off = 0;
}
- if (XGIfb_crt2type != -1)
- /* TW: Override with option */
- xgifb_info->display2 = XGIfb_crt2type;
- else if (cr32 & XGI_VB_TV)
- xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
- xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
- xgifb_info->display2 = XGIFB_DISP_CRT;
- else
- xgifb_info->display2 = XGIFB_DISP_NONE;
+ if (!xgifb_info->display2_force) {
+ if (cr32 & XGI_VB_TV)
+ xgifb_info->display2 = XGIFB_DISP_TV;
+ else if (cr32 & XGI_VB_LCD)
+ xgifb_info->display2 = XGIFB_DISP_LCD;
+ else if (cr32 & XGI_VB_CRT2)
+ xgifb_info->display2 = XGIFB_DISP_CRT;
+ else
+ xgifb_info->display2 = XGIFB_DISP_NONE;
+ }
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
@@ -1925,8 +1875,6 @@ static int __init XGIfb_setup(char *options)
XGIfb_crt2type = XGIFB_DISP_LCD;
} else if (!strncmp(this_opt, "noypan", 6)) {
XGIfb_ypan = 0;
- } else if (!strncmp(this_opt, "userom:", 7)) {
- XGIfb_userom = xgifb_optval(this_opt, 7);
} else {
mode = this_opt;
}
@@ -1934,35 +1882,12 @@ static int __init XGIfb_setup(char *options)
return 0;
}
-static unsigned char *xgifb_copy_rom(struct pci_dev *dev)
-{
- void __iomem *rom_address;
- unsigned char *rom_copy;
- size_t rom_size;
-
- rom_address = pci_map_rom(dev, &rom_size);
- if (rom_address == NULL)
- return NULL;
-
- rom_copy = vzalloc(XGIFB_ROM_SIZE);
- if (rom_copy == NULL)
- goto done;
-
- rom_size = min_t(size_t, rom_size, XGIFB_ROM_SIZE);
- memcpy_fromio(rom_copy, rom_address, rom_size);
-
-done:
- pci_unmap_rom(dev, rom_address);
- return rom_copy;
-}
-
static int __devinit xgifb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
int ret;
- bool xgi21_drvlcdcaplist = false;
struct fb_info *fb_info;
struct xgifb_video_info *xgifb_info;
struct xgi_hw_device_info *hw_info;
@@ -2001,6 +1926,11 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
+ if (XGIfb_crt2type != -1) {
+ xgifb_info->display2 = XGIfb_crt2type;
+ xgifb_info->display2_force = true;
+ }
+
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
@@ -2041,18 +1971,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
printk("XGIfb:chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
- if ((xgifb_info->chip == XG21) || (XGIfb_userom)) {
- hw_info->pjVirtualRomBase = xgifb_copy_rom(pdev);
- if (hw_info->pjVirtualRomBase)
- printk(KERN_INFO "XGIfb: Video ROM found and mapped to %p\n",
- hw_info->pjVirtualRomBase);
- else
- printk(KERN_INFO "XGIfb: Video ROM not found\n");
- } else {
- hw_info->pjVirtualRomBase = NULL;
- printk(KERN_INFO "XGIfb: Video ROM usage disabled\n");
- }
-
if (XGIfb_get_dram_size(xgifb_info)) {
printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
@@ -2117,8 +2035,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
CR38 = xgifb_reg_get(XGICR, 0x38);
if ((CR38&0xE0) == 0xC0) {
xgifb_info->display2 = XGIFB_DISP_LCD;
- if (!XGIfb_GetXG21LVDSData(xgifb_info))
- xgi21_drvlcdcaplist = true;
} else if ((CR38&0xE0) == 0x60) {
xgifb_info->hasVB = HASVB_CHRONTEL;
} else {
@@ -2193,6 +2109,8 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->hasVB != HASVB_NONE)
XGIfb_detect_VB(xgifb_info);
+ else if (xgifb_info->chip != XG21)
+ xgifb_info->display2 = XGIFB_DISP_NONE;
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
if (!enable_dstn) {
@@ -2254,7 +2172,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->display2 == XGIFB_DISP_LCD &&
xgifb_info->chip == XG21)
xgifb_info->mode_idx =
- XGIfb_GetXG21DefaultLVDSModeIdx();
+ XGIfb_GetXG21DefaultLVDSModeIdx(xgifb_info);
else
xgifb_info->mode_idx = DEFAULT_MODE;
}
@@ -2264,21 +2182,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error_1;
}
- if (xgi21_drvlcdcaplist) {
- int m;
-
- for (m = 0; m < ARRAY_SIZE(XGI21_LCDCapList); m++)
- if ((XGI21_LCDCapList[m].LVDSHDE ==
- XGIbios_mode[xgifb_info->mode_idx].xres) &&
- (XGI21_LCDCapList[m].LVDSVDE ==
- XGIbios_mode[xgifb_info->mode_idx].yres)) {
- xgifb_reg_set(xgifb_info->dev_info.P3d4,
- 0x36,
- m);
- break;
- }
- }
-
/* yilin set default refresh rate */
xgifb_info->refresh_rate = refresh_rate;
if (xgifb_info->refresh_rate == 0)
@@ -2418,7 +2321,6 @@ error_1:
error_0:
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
error:
- vfree(hw_info->pjVirtualRomBase);
framebuffer_release(fb_info);
return ret;
}
@@ -2442,7 +2344,6 @@ static void __devexit xgifb_remove(struct pci_dev *pdev)
iounmap(xgifb_info->video_vbase);
release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
- vfree(xgifb_info->hw_info.pjVirtualRomBase);
framebuffer_release(fb_info);
pci_set_drvdata(pdev, NULL);
}
@@ -2458,6 +2359,8 @@ static int __init xgifb_init(void)
{
char *option = NULL;
+ if (forcecrt2type != NULL)
+ XGIfb_search_crt2type(forcecrt2type);
if (fb_get_options("xgifb", &option))
return -ENODEV;
XGIfb_setup(option);
@@ -2480,6 +2383,11 @@ MODULE_AUTHOR("XGITECH , Others");
module_param(mode, charp, 0);
module_param(vesa, int, 0);
module_param(filter, int, 0);
+module_param(forcecrt2type, charp, 0);
+
+MODULE_PARM_DESC(forcecrt2type,
+ "\nForce the second display output type. Possible values are NONE,\n"
+ "LCD, TV, VGA, SVIDEO or COMPOSITE.\n");
MODULE_PARM_DESC(mode,
"\nSelects the desired default display mode in the format XxYxDepth,\n"
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 7611846a703..2c866bb65a0 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -86,10 +86,13 @@ struct xgifb_video_info {
unsigned int refresh_rate;
enum xgifb_display_type display2; /* the second display output type */
+ bool display2_force;
unsigned char hasVB;
unsigned char TV_type;
unsigned char TV_plug;
+ struct XGI21_LVDSCapStruct lvds_data;
+
enum XGI_CHIP_TYPE chip;
unsigned char revision_id;
diff --git a/drivers/staging/xgifb/vb_ext.c b/drivers/staging/xgifb/vb_ext.c
deleted file mode 100644
index b1a25730b7c..00000000000
--- a/drivers/staging/xgifb/vb_ext.c
+++ /dev/null
@@ -1,444 +0,0 @@
-#include <linux/io.h>
-#include <linux/types.h>
-#include "XGIfb.h"
-
-#include "vb_def.h"
-#include "vgatypes.h"
-#include "vb_struct.h"
-#include "vb_util.h"
-#include "vb_setmode.h"
-#include "vb_ext.h"
-
-/**************************************************************
- *********************** Dynamic Sense ************************
- *************************************************************/
-
-static unsigned char XGINew_Is301B(struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
-
- if (flag > 0x0B0)
- return 0; /* 301b */
- else
- return 1;
-}
-
-static unsigned char XGINew_Sense(unsigned short tempbx,
- unsigned short tempcx,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp, i, tempch;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0x7F00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp > 0)
- return 1;
- else
- return 0;
-}
-
-static unsigned char
-XGINew_GetLCDDDCInfo(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp;
-
- /* add lcd sense */
- if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
- return 0;
- } else {
- temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
- switch (HwDeviceExtension->ulCRT2LCDType) {
- case LCD_INVALID:
- case LCD_800x600:
- case LCD_1024x768:
- case LCD_1280x1024:
- break;
-
- case LCD_640x480:
- case LCD_1024x600:
- case LCD_1152x864:
- case LCD_1280x960:
- case LCD_1152x768:
- temp = 0;
- break;
-
- case LCD_1400x1050:
- case LCD_1280x768:
- case LCD_1600x1200:
- break;
-
- case LCD_1920x1440:
- case LCD_2048x1536:
- temp = 0;
- break;
-
- default:
- break;
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
- return 1;
- }
-}
-
-static unsigned char XGINew_GetPanelID(struct vb_device_info *pVBInfo)
-{
- unsigned short PanelTypeTable[16] = { SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType00, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType01, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType02, SyncNN | PanelRGB18Bit
- | Panel640x480 | _PanelType03, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType04, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType05, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType06, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType07, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType08, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType09, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType0A, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0B, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0C, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType0D, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0E, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0F };
- unsigned short tempax, tempbx, temp;
- /* unsigned short return_flag; */
-
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = tempax & 0x1E;
-
- if (tempax == 0)
- return 0;
- else {
- /*
- if (!(tempax & 0x10)) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- tempbx = 0;
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- if (temp & 0x40)
- tempbx |= 0x08;
- if (temp & 0x20)
- tempbx |= 0x02;
- if (temp & 0x01)
- tempbx |= 0x01;
-
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x39);
- if (temp & 0x80)
- tempbx |= 0x04;
- } else {
- return(0);
- }
- }
- */
-
- tempbx = tempbx >> 1;
- temp = tempbx & 0x00F;
- xgifb_reg_set(pVBInfo->P3d4, 0x36, temp);
- tempbx--;
- tempbx = PanelTypeTable[tempbx];
-
- temp = (tempbx & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~(LCDSyncBit
- | LCDRGB18Bit), temp);
- return 1;
- }
-}
-
-static unsigned char
-XGINew_BridgeIsEnable(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- if (XGI_BridgeIsOn(pVBInfo) == 0) {
- flag = xgifb_reg_get(pVBInfo->Part1Port, 0x0);
-
- if (flag & 0x050)
- return 1;
- else
- return 0;
-
- }
- return 0;
-}
-
-static unsigned char
-XGINew_SenseHiTV(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, tempcx, temp, i, tempch;
-
- tempbx = *pVBInfo->pYCSenseData2;
-
- tempcx = 0x0604;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 0;
-
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch) {
- return 0;
- } else {
- tempbx = 0x3FF;
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 1;
- else
- return 0;
- }
-}
-
-void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax = 0, tempbx, tempcx, temp,
- P2reg0 = 0, SenseModeNo = 0,
- OutputSelect = *pVBInfo->pOutputSelect,
- ModeIdIndex, i;
- pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
-
- if (pVBInfo->IF_DEF_LVDS == 1) {
- /* ynlai 02/27/2002 */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = xgifb_reg_get(pVBInfo->P3c4, 0x1B);
- tempax = ((tempax & 0xFE) >> 1) | (tempbx << 8);
- if (tempax == 0x00) { /* Get Panel id from DDC */
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
- if (temp == 1) { /* LCD connect */
- /* set CR39 bit0="1" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x39, 0xFF, 0x01);
- /* clean CR37 bit4="0" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x37, 0xEF, 0x00);
- temp = LCDSense;
- } else { /* LCD don't connect */
- temp = 0;
- }
- } else {
- XGINew_GetPanelID(pVBInfo);
- temp = LCDSense;
- }
-
- tempbx = ~(LCDSense | AVIDEOSense | SVIDEOSense);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, tempbx, temp);
- } else { /* for 301 */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiVision */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- temp = tempax & 0x01;
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x3A);
- temp = temp | (tempax & 0x02);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xA0, temp);
- } else {
- if (XGI_BridgeIsOn(pVBInfo)) {
- P2reg0 = xgifb_reg_get(pVBInfo->Part2Port,
- 0x00);
- if (!XGINew_BridgeIsEnable(HwDeviceExtension,
- pVBInfo)) {
- SenseModeNo = 0x2e;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x41);
- * XGISetModeNew(HwDeviceExtension, 0x2e);
- * // ynlai InitMode */
-
- temp = XGI_SearchModeID(SenseModeNo,
- &ModeIdIndex,
- pVBInfo);
- XGI_GetVGAType(HwDeviceExtension,
- pVBInfo);
- XGI_GetVBType(pVBInfo);
- pVBInfo->SetFlag = 0x00;
- pVBInfo->ModeType = ModeVGA;
- pVBInfo->VBInfo = SetCRT2ToRAMDAC |
- LoadDACFlag |
- SetInSlaveMode;
- XGI_GetLCDInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_GetTVInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_EnableBridge(HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2Group301(SenseModeNo,
- HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2ModeRegs(0x2e,
- HwDeviceExtension,
- pVBInfo);
- /* XGI_DisableBridge(HwDeviceExtension,
- * pVBInfo ) ; */
- /* Display Off 0212 */
- xgifb_reg_and_or(pVBInfo->P3c4,
- 0x01,
- 0xDF,
- 0x20);
- for (i = 0; i < 20; i++)
- XGI_LongWait(pVBInfo);
- }
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1c);
- tempax = 0;
- tempbx = *pVBInfo->pRGBSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pRGBSenseData2;
-
- tempcx = 0x0E08;
- if (XGINew_Sense(tempbx, tempcx, pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= Monitor2Sense;
- }
-
- if (pVBInfo->VBType & VB_XGI301C)
- xgifb_reg_or(pVBInfo->Part4Port,
- 0x0d,
- 0x04);
-
- /* add by kuku for Multi-adapter sense HiTV */
- if (XGINew_SenseHiTV(HwDeviceExtension,
- pVBInfo)) {
- tempax |= HiTVSense;
- if ((pVBInfo->VBType & VB_XGI301C))
- tempax ^= (HiTVSense |
- YPbPrSense);
- }
-
- /* start */
- if (!(tempax & (HiTVSense | YPbPrSense))) {
- tempbx = *pVBInfo->pYCSenseData;
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pYCSenseData2;
- tempcx = 0x0604;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= SVIDEOSense;
- }
-
- if (OutputSelect & BoardTVType) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= AVIDEOSense;
- }
- } else {
- if (!(tempax & SVIDEOSense)) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx, tempcx, pVBInfo))
- tempax |= AVIDEOSense;
- }
- }
- }
- }
- } /* end */
- if (!(tempax & Monitor2Sense)) {
- if (XGINew_SenseLCD(HwDeviceExtension, pVBInfo))
- tempax |= LCDSense;
- }
- tempbx = 0;
- tempcx = 0;
- XGINew_Sense(tempbx, tempcx, pVBInfo);
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, ~0xDF, tempax);
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, P2reg0);
-
- if (!(P2reg0 & 0x20)) {
- pVBInfo->VBInfo = DisableCRT2Display;
- /* XGI_SetCRT2Group301(SenseModeNo,
- * HwDeviceExtension,
- * pVBInfo); */
- }
- }
- }
- XGI_DisableBridge(HwDeviceExtension, pVBInfo); /* shampoo 0226 */
-
-}
-
-unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /* unsigned short SoftSetting ; */
- unsigned short temp;
-
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
-
- return temp;
-}
diff --git a/drivers/staging/xgifb/vb_ext.h b/drivers/staging/xgifb/vb_ext.h
deleted file mode 100644
index 0b1f55b4242..00000000000
--- a/drivers/staging/xgifb/vb_ext.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _VBEXT_
-#define _VBEXT_
-
-extern void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
- struct vb_device_info *pVBInfo);
-
-#endif
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 9e890a17fbc..4ccd988ffd7 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
#include <linux/pci.h>
+#include <linux/vmalloc.h>
#include "vgatypes.h"
#include "XGIfb.h"
@@ -10,7 +11,6 @@
#include "vb_util.h"
#include "vb_setmode.h"
#include "vb_init.h"
-#include "vb_ext.h"
#include <linux/io.h>
@@ -35,6 +35,8 @@ static const unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
{ 2, 12, 9, 8, 0x35},
{ 2, 12, 8, 4, 0x31} };
+#define XGIFB_ROM_SIZE 65536
+
static unsigned char
XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1068,20 +1070,20 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
return 0;
}
-static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short data;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
- XGISetModeNew(HwDeviceExtension, 0x2e);
+ XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e);
data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
/* disable read cache */
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data & 0xDF));
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
/* data = xgifb_reg_get(pVBInfo->P3c4, 0x1); */
/* data |= 0x20 ; */
@@ -1092,118 +1094,100 @@ static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data | 0x20));
}
-static void ReadVBIOSTablData(unsigned char ChipType,
+static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size)
+{
+ void __iomem *rom_address;
+ u8 *rom_copy;
+
+ rom_address = pci_map_rom(dev, rom_size);
+ if (rom_address == NULL)
+ return NULL;
+
+ rom_copy = vzalloc(XGIFB_ROM_SIZE);
+ if (rom_copy == NULL)
+ goto done;
+
+ *rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE);
+ memcpy_fromio(rom_copy, rom_address, *rom_size);
+
+done:
+ pci_unmap_rom(dev, rom_address);
+ return rom_copy;
+}
+
+static void xgifb_read_vbios(struct pci_dev *pdev,
struct vb_device_info *pVBInfo)
{
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
+ struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
+ u8 *vbios;
unsigned long i;
- unsigned char j, k;
- /* Volari customize data area end */
-
- if (ChipType == XG21) {
- pVBInfo->IF_DEF_LVDS = 0;
- if (pVideoMemory[0x65] & 0x1) {
- pVBInfo->IF_DEF_LVDS = 1;
- i = pVideoMemory[0x316] | (pVideoMemory[0x317] << 8);
- j = pVideoMemory[i - 1];
- if (j != 0xff) {
- k = 0;
- do {
- pVBInfo->XG21_LVDSCapList[k].
- LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[k].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[k].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[k].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[k].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[k].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[k].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[k].PSC_S5
- = pVideoMemory[i + 24];
- i += 25;
- j--;
- k++;
- } while ((j > 0) &&
- (k < (sizeof(XGI21_LCDCapList) /
- sizeof(struct
- XGI21_LVDSCapStruct))));
- } else {
- pVBInfo->XG21_LVDSCapList[0].LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[0].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[0].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[0].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[0].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[0].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[0].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[0].PSC_S5
- = pVideoMemory[i + 24];
- }
- }
+ unsigned char j;
+ struct XGI21_LVDSCapStruct *lvds;
+ size_t vbios_size;
+ int entry;
+
+ if (xgifb_info->chip != XG21)
+ return;
+ pVBInfo->IF_DEF_LVDS = 0;
+ vbios = xgifb_copy_rom(pdev, &vbios_size);
+ if (vbios == NULL) {
+ dev_err(&pdev->dev, "video BIOS not available\n");
+ return;
+ }
+ if (vbios_size <= 0x65)
+ goto error;
+ /*
+ * The user can ignore the LVDS bit in the BIOS and force the display
+ * type.
+ */
+ if (!(vbios[0x65] & 0x1) &&
+ (!xgifb_info->display2_force ||
+ xgifb_info->display2 != XGIFB_DISP_LCD)) {
+ vfree(vbios);
+ return;
}
+ if (vbios_size <= 0x317)
+ goto error;
+ i = vbios[0x316] | (vbios[0x317] << 8);
+ if (vbios_size <= i - 1)
+ goto error;
+ j = vbios[i - 1];
+ if (j == 0)
+ goto error;
+ if (j == 0xff)
+ j = 1;
+ /*
+ * Read the LVDS table index scratch register set by the BIOS.
+ */
+ entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
+ if (entry >= j)
+ entry = 0;
+ i += entry * 25;
+ lvds = &xgifb_info->lvds_data;
+ if (vbios_size <= i + 24)
+ goto error;
+ lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8);
+ lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8);
+ lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8);
+ lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8);
+ lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8);
+ lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8);
+ lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8);
+ lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8);
+ lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8);
+ lvds->VCLKData1 = vbios[i + 18];
+ lvds->VCLKData2 = vbios[i + 19];
+ lvds->PSC_S1 = vbios[i + 20];
+ lvds->PSC_S2 = vbios[i + 21];
+ lvds->PSC_S3 = vbios[i + 22];
+ lvds->PSC_S4 = vbios[i + 23];
+ lvds->PSC_S5 = vbios[i + 24];
+ vfree(vbios);
+ pVBInfo->IF_DEF_LVDS = 1;
+ return;
+error:
+ dev_err(&pdev->dev, "video BIOS corrupted\n");
+ vfree(vbios);
}
static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
@@ -1336,18 +1320,57 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
}
+static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
+ *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned short temp;
+
+ /* add lcd sense */
+ if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
+ return 0;
+ } else {
+ temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
+ switch (HwDeviceExtension->ulCRT2LCDType) {
+ case LCD_INVALID:
+ case LCD_800x600:
+ case LCD_1024x768:
+ case LCD_1280x1024:
+ break;
+
+ case LCD_640x480:
+ case LCD_1024x600:
+ case LCD_1152x864:
+ case LCD_1280x960:
+ case LCD_1152x768:
+ temp = 0;
+ break;
+
+ case LCD_1400x1050:
+ case LCD_1280x768:
+ case LCD_1600x1200:
+ break;
+
+ case LCD_1920x1440:
+ case LCD_2048x1536:
+ temp = 0;
+ break;
+
+ default:
+ break;
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
+ return 1;
+ }
+}
+
static void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char Temp;
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
-
- pVBInfo->IF_DEF_LVDS = 0;
#if 1
- if ((pVideoMemory[0x65] & 0x01)) { /* For XG21 LVDS */
- pVBInfo->IF_DEF_LVDS = 1;
+ if (pVBInfo->IF_DEF_LVDS) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
@@ -1393,7 +1416,6 @@ static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
if (Temp <= 0x02) {
- pVBInfo->IF_DEF_LVDS = 1;
/* LVDS setting */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21);
@@ -1451,24 +1473,15 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
struct vb_device_info *pVBInfo = &VBINF;
unsigned char i, temp = 0, temp1;
/* VBIOSVersion[5]; */
- volatile unsigned char *pVideoMemory;
/* unsigned long j, k; */
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
-
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
- pVideoMemory = (unsigned char *) pVBInfo->ROMAddr;
-
/* Newdebugcode(0x99); */
-
- /* if (pVBInfo->ROMAddr == 0) */
- /* return(0); */
-
if (pVBInfo->FBAddr == NULL) {
printk("\n pVBInfo->FBAddr == 0 ");
return 0;
@@ -1516,8 +1529,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- /* ReadVBIOSData */
- ReadVBIOSTablData(HwDeviceExtension->jChipType, pVBInfo);
+ xgifb_read_vbios(pdev, pVBInfo);
/* 1.Openkey */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
@@ -1774,7 +1786,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo);
printk("20");
- XGINew_SetDRAMSize_340(HwDeviceExtension, pVBInfo);
+ XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo);
printk("21");
printk("22");
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 81c0cc41bb4..67a316c3c10 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -67,11 +67,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- /* add for new UNIVGABIOS */
- /* XGINew_UBLCDDataTable =
- * (struct XGI_LCDDataTablStruct *) XGI_LCDDataTable; */
- /* XGINew_UBTVDataTable = (XGI_TVDataTablStruct *) XGI_TVDataTable; */
-
pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
@@ -148,9 +143,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
else
pVBInfo->LCDCapList = XGI_LCDCapList;
- if ((ChipType == XG21) || (ChipType == XG27))
- pVBInfo->XG21_LVDSCapList = XGI21_LCDCapList;
-
pVBInfo->XGI_TVDelayList = XGI301TVDelayList;
pVBInfo->XGI_TVDelayList2 = XGI301TVDelayList2;
@@ -236,28 +228,6 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
}
}
-static void XGI_SetMiscRegs(unsigned short StandTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char Miscdata;
-
- /* Get Misc from file */
- Miscdata = pVBInfo->StandTable[StandTableIndex].MISC;
- /*
- if (pVBInfo->VBType & (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
- Miscdata |= 0x0C;
- }
- }
- */
-
- outb(Miscdata, pVBInfo->P3c2); /* Set Misc(3c2) */
-}
-
static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
@@ -274,16 +244,6 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
CRTCdata = pVBInfo->StandTable[StandTableIndex].CRTC[i];
xgifb_reg_set(pVBInfo->P3d4, i, CRTCdata); /* Set CRTC(3d4) */
}
- /*
- if ((HwDeviceExtension->jChipType == XGI_630) &&
- (HwDeviceExtension->jChipRevision == 0x30)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- xgifb_reg_set(pVBInfo->P3d4, 0x18, 0xFE);
- }
- }
- }
- */
}
static void XGI_SetATTRegs(unsigned short ModeNo,
@@ -530,10 +490,6 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
unsigned char data, data1, pushax;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
/* unlock cr0-7 */
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
@@ -595,10 +551,6 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
unsigned char data;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
for (i = 0x00; i <= 0x01; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
@@ -976,6 +928,20 @@ static void XGI_SetXG27CRTC(unsigned short ModeNo,
}
}
+static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ temp = (temp & 3) << 6;
+ /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void xgifb_set_lcd(int chip_id,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex,
@@ -1088,6 +1054,20 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
}
}
+static unsigned short XGI_GetResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+{
+ unsigned short resindex;
+
+ if (ModeNo <= 0x13)
+ /* si+St_ResInfo */
+ resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
+ else
+ /* si+Ext_ResInfo */
+ resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
+ return resindex;
+}
+
static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1127,9 +1107,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
tempcx = 8;
- /* if (!(modeflag & Charx8Dot)) */
- /* tempcx = 9; */
-
tempax /= tempcx;
tempax -= 1;
tempbx -= 1;
@@ -1163,20 +1140,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp);
}
-unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short resindex;
-
- if (ModeNo <= 0x13)
- /* si+St_ResInfo */
- resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
- else
- /* si+Ext_ResInfo */
- resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- return resindex;
-}
-
static void XGI_SetCRT1Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1308,77 +1271,55 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else { /* for TV */
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = HiTVVCLK;
- VCLKIndex += 25;
- }
-
- if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot) {
- VCLKIndex =
- HiTVSimuVCLK;
- VCLKIndex += 25;
- } else {
- VCLKIndex =
- HiTVTextVCLK;
- VCLKIndex += 25;
- }
- }
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = HiTVVCLK;
+ VCLKIndex += 25;
+ }
- /* 301lv */
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->VBExtInfo ==
- VB_YPbPr1080i)) {
- VCLKIndex =
- YPbPr750pVCLK;
- if (!(pVBInfo->VBExtInfo
- ==
- VB_YPbPr750p)) {
- VCLKIndex =
- YPbPr525pVCLK;
- if (!(pVBInfo->VBExtInfo
- == VB_YPbPr525p)) {
- VCLKIndex
- = YPbPr525iVCLK_2;
- if (!(pVBInfo->SetFlag
- & RPLLDIV2XO))
- VCLKIndex
- = YPbPr525iVCLK;
- }
- }
- }
- }
+ if (pVBInfo->SetFlag & TVSimuMode) {
+ if (modeflag & Charx8Dot) {
+ VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex += 25;
} else {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->SetFlag &
- RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = TVVCLK;
- VCLKIndex += 25;
- }
- }
+ VCLKIndex = HiTVTextVCLK;
+ VCLKIndex += 25;
}
- } else { /* for CRT2 */
- /* Port 3cch */
- VCLKIndex = (unsigned char) inb(
- (pVBInfo->P3ca + 0x02));
- VCLKIndex = ((VCLKIndex >> 2) & 0x03);
- if (ModeNo > 0x13) {
- /* di+Ext_CRTVCLK */
- VCLKIndex =
- pVBInfo->RefIndex[
+ }
+
+ /* 301lv */
+ if ((pVBInfo->VBType & VB_XGI301LV) &&
+ !(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
+ if (pVBInfo->VBExtInfo == VB_YPbPr750p)
+ VCLKIndex = YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ VCLKIndex = YPbPr525pVCLK;
+ else if (pVBInfo->SetFlag & RPLLDIV2XO)
+ VCLKIndex = YPbPr525iVCLK_2;
+ else
+ VCLKIndex = YPbPr525iVCLK;
+ }
+ } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = TVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = TVVCLK;
+ VCLKIndex += 25;
+ }
+ } else { /* for CRT2 */
+ /* Port 3cch */
+ VCLKIndex = (unsigned char) inb((pVBInfo->P3ca + 0x02));
+ VCLKIndex = ((VCLKIndex >> 2) & 0x03);
+ if (ModeNo > 0x13) {
+ /* di+Ext_CRTVCLK */
+ VCLKIndex = pVBInfo->RefIndex[
RefreshRateTableIndex].
Ext_CRTVCLK;
- VCLKIndex &= IndexMask;
- }
+ VCLKIndex &= IndexMask;
}
}
} else { /* LVDS */
@@ -1397,7 +1338,6 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
}
- /* VCLKIndex = VCLKIndex&IndexMask; */
return VCLKIndex;
}
@@ -1461,6 +1401,19 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
}
}
+static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
+ temp = (temp & 1) << 6;
+ /* SR06[6] 18bit Dither */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void XGI_SetCRT1FIFO(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1532,16 +1485,6 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x1F, data);
}
- /* Jong for Adavantech LCD ripple issue
- if ((VCLK >= 0) && (VCLK < 135))
- data2 = 0x03;
- else if ((VCLK >= 135) && (VCLK < 160))
- data2 = 0x02;
- else if ((VCLK >= 160) && (VCLK < 260))
- data2 = 0x01;
- else if (VCLK > 260)
- data2 = 0x00;
- */
data2 = 0x00;
xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
@@ -1591,7 +1534,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data2 |= 0x20;
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x3F, data2);
- /* xgifb_reg_set(pVBInfo->P3c4,0x06,data2); */
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13)
xres = pVBInfo->StResInfo[resindex].HTotal;
@@ -1636,11 +1578,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
pVBInfo);
- /* if (modeflag&HalfDCLK) //030305 fix lowresolution bug */
- /* if (XGINew_IF_DEF_NEW_LOWRES) */
- /* XGI_VesaLowResolution(ModeNo, ModeIdIndex);
- * //030305 fix lowresolution bug */
-
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
if (HwDeviceExtension->jChipType == XG27) {
@@ -1803,11 +1740,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* if (ModeNo > 0x13) */
- /* modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; */
- /* else */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag; */
-
if (ModeNo <= 0x13)
/* si+St_ResInfo */
resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
@@ -1815,8 +1747,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo); */
-
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
@@ -1831,13 +1761,10 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
if (modeflag & DoubleScanMode)
yres = yres << 1;
}
- /* if (modeflag & Charx8Dot) */
- /* { */
if (xres == 720)
xres = 640;
- /* } */
pVBInfo->VGAHDE = xres;
pVBInfo->HDE = xres;
pVBInfo->VGAVDE = yres;
@@ -1890,7 +1817,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
tempal = (tempal & 0x0f);
}
- tempcx = LCDLenList[tempbx]; /* mov cl,byte ptr cs:LCDLenList[bx] */
+ tempcx = LCDLenList[tempbx];
if (pVBInfo->LCDInfo & EnableScalingLCD) { /* ScaleLCD */
if ((tempbx == 5) || (tempbx) == 7)
@@ -1898,9 +1825,6 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
else if ((tempbx == 3) || (tempbx == 8))
tempcx = LVDSDesDataLen2;
}
- /* mov di, word ptr cs:LCDDataList[bx] */
- /* tempdi = pVideoMemory[LCDDataList + tempbx * 2] |
- (pVideoMemory[LCDDataList + tempbx * 2 + 1] << 8); */
switch (tempbx) {
case 0:
@@ -2321,10 +2245,10 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
switch (tempbx) {
case 0:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_H;*/
+ tempdi = NULL;
break;
case 1:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_V;*/
+ tempdi = NULL;
break;
case 2:
case 6:
@@ -2363,9 +2287,7 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
}
/* 07/05/22 */
- if (table == 0x00) {
- } else if (table == 0x01) {
- } else if (table == 0x04) {
+ if (table == 0x04) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtPALData[tempal];
@@ -2429,7 +2351,6 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
default:
break;
}
- } else if (table == 0x06) {
}
return NULL;
}
@@ -2741,7 +2662,6 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
else
tempbx = LCDPtr->LCDVRS;
- /* tempbx = tempbx >> 4; */
tempcx = push1;
if (pVBInfo->LCDInfo & EnableScalingLCD)
@@ -2881,7 +2801,6 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
unsigned short index;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- /* index = XGI_GetLCDCapPtr(pVBInfo); */
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -3105,19 +3024,6 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
-void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /*
- if ( HwDeviceExtension->jChipType >= XG20 ) {
- pVBInfo->Set_VGAType = XG20;
- } else {
- pVBInfo->Set_VGAType = VGA_XGI340;
- }
- */
- pVBInfo->Set_VGAType = HwDeviceExtension->jChipType;
-}
-
void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
@@ -3160,7 +3066,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
-void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -3193,14 +3099,9 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) {
- if ((pVBInfo->Set_VGAType >= XG20)
- || (pVBInfo->Set_VGAType >= XG40)) {
+ if ((HwDeviceExtension->jChipType >= XG20) ||
+ (HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- /* if ((pVBInfo->VBType & VB_XGI302B)
- || (pVBInfo->VBType & VB_XGI301LV)
- || (pVBInfo->VBType & VB_XGI302LV)
- || (pVBInfo->VBType & VB_XGI301C))
- */
if (pVBInfo->VBType &
(VB_XGI302B |
VB_XGI301LV |
@@ -3225,7 +3126,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
- if (temp & SetYPbPr) { /* temp = CR38 */
+ if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
/* shampoo add for new
* scratch */
@@ -3242,8 +3143,6 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
SetCRT2ToYPbPr;
}
}
-
- /* tempbx |= SetCRT2ToYPbPr; */
}
}
}
@@ -3368,7 +3267,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempbx = 0, resinfo = 0, modeflag, index1;
@@ -3404,17 +3303,6 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx &= (SetCHTVOverScan |
SetNTSCJ |
SetPALTV);
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- //PAL-M/PAL-N Info
- index1 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- //00:PAL, 01:PAL-M, 10:PAL-N
- temp2 = (index1 & 0xC0) >> 5;
- tempbx |= temp2;
- if (temp2 & 0x02) //PAL-M
- tempbx &= (~SetPALTV);
- }
- */
}
if (pVBInfo->IF_DEF_LVDS == 0) {
@@ -3476,8 +3364,8 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->TVInfo = tempbx;
}
-unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, modeflag, resinfo = 0, LCDIdIndex;
@@ -3553,15 +3441,8 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx |= SetLCDtoNonExpanding;
}
- /*
- if (tempax & LCDBToA) {
- tempbx |= SetLCDBToA;
- }
- */
-
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
- /* if (!(pVBInfo->LCDInfo&LCDNonExpanding)) */
if (!(tempbx & SetLCDtoNonExpanding)) {
tempbx |= EnableLVDSDDA;
} else {
@@ -3604,25 +3485,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (tempax & (LockLCDBToA | StLCDBToA)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (!((!(tempax & LockLCDBToA)) &&
- (ModeNo > 0x13))) {
- pVBInfo->VBInfo &=
- ~(SetSimuScanMode |
- SetInSlaveMode |
- SetCRT2ToLCD);
- pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
- SetCRT2ToDualEdge;
- }
- }
- }
- }
- */
-
return 1;
}
@@ -3632,10 +3494,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
if (ModeNo <= 5)
ModeNo |= 1;
if (ModeNo <= 0x13) {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->SModeIDTable)
- / sizeof(struct XGI_StStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID ==
ModeNo)
@@ -3651,10 +3509,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
(*ModeIdIndex) += 2; /* 400 lines */
/* else 350 lines */
} else {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->EModeIDTable)
- / sizeof(struct XGI_ExtStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID ==
ModeNo)
@@ -3675,7 +3529,6 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
for (i = 0; i < 8; i++) {
ujRet = ujRet << 1;
- /* ujRet |= GETBITS(ujDate >> i, 0:0); */
ujRet |= (ujDate >> i) & 1;
}
@@ -3726,7 +3579,101 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
+/*----------------------------------------------------------------------------*/
+/* input */
+/* bl[5] : 1;LVDS signal on */
+/* bl[1] : 1;LVDS backlight on */
+/* bl[0] : 1:LVDS VDD on */
+/* bh: 100000b : clear bit 5, to set bit5 */
+/* 000010b : clear bit 1, to set bit1 */
+/* 000001b : clear bit 0, to set bit0 */
+/*----------------------------------------------------------------------------*/
+static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x23;
+ tempbl &= 0x23;
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
+
+ temp = XG21GPIODataTransfer(temp);
+ temp &= ~tempbh;
+ temp |= tempbl;
+ xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
+}
+
+static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+ unsigned short tempbh0, tempbl0;
+
+ tempbh0 = tempbh;
+ tempbl0 = tempbl;
+ tempbh0 &= 0x20;
+ tempbl0 &= 0x20;
+ tempbh0 >>= 3;
+ tempbl0 >>= 3;
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x03;
+ tempbl &= 0x03;
+ tempbh <<= 2;
+ tempbl <<= 2; /* GPIOC,GPIOD */
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
+}
+
+/* --------------------------------------------------------------------- */
+/* Function : XGI_XG21SetPanelDelay */
+/* Input : */
+/* Output : */
+/* Description : */
+/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
+/* : bl : 2 ; T2 : the duration signal on and Vdd on */
+/* : bl : 3 ; T3 : the duration between CPL off and signal off */
+/* : bl : 4 ; T4 : the duration signal off and Vdd off */
+/* --------------------------------------------------------------------- */
+static void XGI_XG21SetPanelDelay(struct xgifb_video_info *xgifb_info,
+ unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ if (tempbl == 1)
+ mdelay(xgifb_info->lvds_data.PSC_S1);
+
+ if (tempbl == 2)
+ mdelay(xgifb_info->lvds_data.PSC_S2);
+
+ if (tempbl == 3)
+ mdelay(xgifb_info->lvds_data.PSC_S3);
+
+ if (tempbl == 4)
+ mdelay(xgifb_info->lvds_data.PSC_S4);
+}
+
+static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3736,12 +3683,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG21BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG21BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3756,12 +3703,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG27BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG27BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3772,7 +3719,8 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
}
}
-void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
+void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3780,7 +3728,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if (pVBInfo->IF_DEF_LVDS == 1) {
/* LVDS backlight off */
XGI_XG21BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
} else {
/* DVO/DVI signal off */
XGI_XG21BLSignalVDD(0x20, 0x00, pVBInfo);
@@ -3791,7 +3739,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
/* LVDS backlight off */
XGI_XG27BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
}
if (pVBInfo->IF_DEF_LVDS == 0)
@@ -3838,26 +3786,17 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
- /* si+St_ResInfo */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;*/
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- /*
- if (pVBInfo->IF_DEF_FSTN) {
- xres *= 2;
- yres *= 2;
- } else {
- */
if (modeflag & HalfDCLK)
xres *= 2;
if (modeflag & DoubleScanMode)
yres *= 2;
- /* } */
}
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
@@ -4028,8 +3967,6 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 775;
else if (pVBInfo->VGAVDE == 600)
tempbx = 775;
- /* else if (pVBInfo->VGAVDE==350) tempbx=560; */
- /* else if (pVBInfo->VGAVDE==400) tempbx=640; */
else
tempbx = 768;
} else
@@ -4294,7 +4231,6 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
- /* XGI_SetCRT2Sync(ModeNo,RefreshRateTableIndex); */
for (tempcx = 4; tempcx < 7; tempcx++)
xgifb_reg_set(pVBInfo->Part1Port, tempcx, 0x0);
@@ -4497,9 +4433,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = 0xFF; /* set MAX HT */
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
- /* if (modeflag & Charx8Dot) */
- /* tempcx = 0x08; */
- /* else */
tempcx = 0x08;
if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
@@ -4565,7 +4498,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
} else {
- /* tempcx = tempbx & 0x00FF ; */
tempbx = (tempbx & 0xFF00) >> 8;
tempcx = (tempcx + tempbx) >> 1;
temp = (tempcx & 0x00FF) + 2;
@@ -4579,36 +4511,23 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp -= 6;
}
}
- } else {
- if (!(modeflag & HalfDCLK)) {
- temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960) {
- if (pVBInfo->VGAHDE >= 800) {
- temp -= 7;
- if (pVBInfo->ModeType ==
- ModeEGA) {
- if (pVBInfo->VGAVDE ==
- 1024) {
- temp += 15;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- temp +=
- 7;
- }
- }
- }
-
- if (pVBInfo->VGAHDE >= 1280) {
- if (pVBInfo->LCDResInfo
- != Panel1280x960) {
- if (pVBInfo->LCDInfo
- & LCDNonExpanding) {
- temp
- += 28;
- }
- }
- }
- }
+ } else if (!(modeflag & HalfDCLK)) {
+ temp -= 4;
+ if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->VGAHDE >= 800) {
+ temp -= 7;
+ if (pVBInfo->ModeType == ModeEGA &&
+ pVBInfo->VGAVDE == 1024) {
+ temp += 15;
+ if (pVBInfo->LCDResInfo !=
+ Panel1280x1024)
+ temp += 7;
}
+
+ if (pVBInfo->VGAHDE >= 1280 &&
+ pVBInfo->LCDResInfo != Panel1280x960 &&
+ (pVBInfo->LCDInfo & LCDNonExpanding))
+ temp += 28;
}
}
}
@@ -5297,7 +5216,6 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax--;
xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
- /* if ( !( pVBInfo->VBType & VB_XGI301C ) ) */
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
@@ -5436,7 +5354,6 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = pVBInfo->VT;
tempbx = pVBInfo->LCDVRS;
- /* if (SetLCD_Info & EnableScalingLCD) */
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
@@ -5478,12 +5395,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part2Port, 0x25, temp);
- /* getlcdsync() */
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
tempcx = tempax;
tempax = pVBInfo->HT;
tempbx = pVBInfo->LCDHRS;
- /* if ( SetLCD_Info & EnableScalingLCD) */
if (XGI_IsLCDDualLink(pVBInfo)) {
tempax = tempax >> 1;
tempbx = tempbx >> 1;
@@ -5801,9 +5716,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempax = tempax >> 1;
- /* if((pVBInfo->VBInfo&(SetCRT2ToLCD)) ||
- ((pVBInfo->TVInfo&SetYPbPrMode525p) ||
- (pVBInfo->TVInfo&SetYPbPrMode750p))) { */
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (tempax > 800)
tempax -= 800;
@@ -5817,33 +5729,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
tempax -= 1;
- /*
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i))) {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax =(tempax * 25 /
- 32) - 1;
- else
- tempax = (tempax * 20 /
- 32) - 1;
- }
- }
- } else {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax = (tempax * 25 / 32) - 1;
- else
- tempax = (tempax * 20 / 32) - 1;
- }
- }
- }
- */
-
temp = (tempax & 0xFF00) >> 8;
temp = ((temp & 0x0003) << 4);
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
@@ -5902,7 +5787,6 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
| CRT2DisplayFlag))) {
XGINew_EnableCRT2(pVBInfo);
- /* LoadDAC2(pVBInfo->Part5Port, ModeNo, ModeIdIndex); */
}
}
return;
@@ -5921,118 +5805,11 @@ static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
-/*----------------------------------------------------------------------------*/
-/* input */
-/* bl[5] : 1;LVDS signal on */
-/* bl[1] : 1;LVDS backlight on */
-/* bl[0] : 1:LVDS VDD on */
-/* bh: 100000b : clear bit 5, to set bit5 */
-/* 000010b : clear bit 1, to set bit1 */
-/* 000001b : clear bit 0, to set bit0 */
-/*----------------------------------------------------------------------------*/
-void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x23;
- tempbl &= 0x23;
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp = XG21GPIODataTransfer(temp);
- temp &= ~tempbh;
- temp |= tempbl;
- xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
-}
-
-void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
- unsigned short tempbh0, tempbl0;
-
- tempbh0 = tempbh;
- tempbl0 = tempbl;
- tempbh0 &= 0x20;
- tempbl0 &= 0x20;
- tempbh0 >>= 3;
- tempbl0 >>= 3;
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x03;
- tempbl &= 0x03;
- tempbh <<= 2;
- tempbl <<= 2; /* GPIOC,GPIOD */
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
-}
-
-/* --------------------------------------------------------------------- */
-unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo)
-{
- unsigned short index;
-
- index = xgifb_reg_get(pVBInfo->P3d4, 0x36);
- if (index < sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))
- return index;
- return 0;
-}
-
-/* --------------------------------------------------------------------- */
-/* Function : XGI_XG21SetPanelDelay */
-/* Input : */
-/* Output : */
-/* Description : */
-/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
-/* : bl : 2 ; T2 : the duration signal on and Vdd on */
-/* : bl : 3 ; T3 : the duration between CPL off and signal off */
-/* : bl : 4 ; T4 : the duration signal off and Vdd off */
-/* --------------------------------------------------------------------- */
-void XGI_XG21SetPanelDelay(unsigned short tempbl,
+static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short index;
-
- index = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (tempbl == 1)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S1);
-
- if (tempbl == 2)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S2);
-
- if (tempbl == 3)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S3);
-
- if (tempbl == 4)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S4);
-}
-
-unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short xres, yres, colordepth, modeflag, resindex,
- lvdstableindex;
+ unsigned short xres, yres, colordepth, modeflag, resindex;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
@@ -6061,18 +5838,15 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
}
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (xres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE))
+ if (xres > xgifb_info->lvds_data.LVDSHDE)
return 0;
- if (yres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE))
+ if (yres > xgifb_info->lvds_data.LVDSVDE)
return 0;
if (ModeNo > 0x13) {
- if ((xres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSHDE)) ||
- (yres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVDE))) {
+ if (xres != xgifb_info->lvds_data.LVDSHDE ||
+ yres != xgifb_info->lvds_data.LVDSVDE) {
colordepth = XGI_GetColorDepth(ModeNo,
ModeIdIndex,
pVBInfo);
@@ -6084,55 +5858,26 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
return 1;
}
-void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
- temp = (temp & 1) << 6;
- /* SR06[6] 18bit Dither */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- temp = (temp & 3) << 6;
- /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-static void xgifb_set_lvds(int chip_id,
+static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
+ int chip_id,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
- unsigned short xres, yres, modeflag, resindex, lvdstableindex;
+ unsigned short xres, yres, modeflag, resindex;
unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability &
+ temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = (unsigned char) inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
- temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability & LCDPolarity);
+ temp = xgifb_info->lvds_data.LVDS_Capability & LCDPolarity;
/* SR35[7] FP VSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
/* SR30[5] FP HSync polarity */
@@ -6159,48 +5904,43 @@ static void xgifb_set_lvds(int chip_id,
if (!(modeflag & Charx8Dot))
xres = xres * 8 / 9;
- LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
+ LVDSHT = xgifb_info->lvds_data.LVDSHT;
- LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- - xres) / 2;
+ LVDSHBS = xres + (xgifb_info->lvds_data.LVDSHDE - xres) / 2;
if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
LVDSHBS -= xres / 4;
if (LVDSHBS > LVDSHT)
LVDSHBS -= LVDSHT;
- LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
+ LVDSHRS = LVDSHBS + xgifb_info->lvds_data.LVDSHFP;
if (LVDSHRS > LVDSHT)
LVDSHRS -= LVDSHT;
- LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
+ LVDSHRE = LVDSHRS + xgifb_info->lvds_data.LVDSHSYNC;
if (LVDSHRE > LVDSHT)
LVDSHRE -= LVDSHT;
- LVDSHBE = LVDSHBS + LVDSHT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
+ LVDSHBE = LVDSHBS + LVDSHT - xgifb_info->lvds_data.LVDSHDE;
- LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
+ LVDSVT = xgifb_info->lvds_data.LVDSVT;
- LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- - yres) / 2;
+ LVDSVBS = yres + (xgifb_info->lvds_data.LVDSVDE - yres) / 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
LVDSVBS += yres / 2;
if (LVDSVBS > LVDSVT)
LVDSVBS -= LVDSVT;
- LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
+ LVDSVRS = LVDSVBS + xgifb_info->lvds_data.LVDSVFP;
if (LVDSVRS > LVDSVT)
LVDSVRS -= LVDSVT;
- LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVSYNC;
+ LVDSVRE = LVDSVRS + xgifb_info->lvds_data.LVDSVSYNC;
if (LVDSVRE > LVDSVT)
LVDSVRE -= LVDSVT;
- LVDSVBE = LVDSVBS + LVDSVT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
+ LVDSVBE = LVDSVBS + LVDSVT - xgifb_info->lvds_data.LVDSVDE;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
@@ -6300,13 +6040,9 @@ static void xgifb_set_lvds(int chip_id,
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2B,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData1);
+ 0x2B, xgifb_info->lvds_data.VCLKData1);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2C,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData2);
+ 0x2C, xgifb_info->lvds_data.VCLKData2);
value += 0x10;
}
@@ -6398,7 +6134,8 @@ static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
return 0;
}
-void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
@@ -6442,7 +6179,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -6464,7 +6201,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
if ((pVBInfo->SetFlag & DisableChB) ||
@@ -6484,7 +6220,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
}
} else { /* {301} */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
/* Disable CRT2 */
xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
@@ -6494,7 +6229,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
| SetSimuScanMode))
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
}
@@ -6610,17 +6345,6 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- /*
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
- tempbl = CRT2Delay1; // Get CRT2 Delay
- if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C))
- tempbl = CRT2Delay2;
- */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
@@ -6680,23 +6404,6 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
(unsigned short) (0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
-
- /*
- if (tempcx & EnableLCD24bpp) { // 24bits
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- (unsigned short)(0x30 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
- } else {
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- // Enable Dither
- (unsigned short)(0x20 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
- }
- */
}
/* --------------------------------------------------------------------- */
@@ -6718,6 +6425,25 @@ static void XGI_SetLCDCap_B(unsigned short tempcx,
| 0x18)); /* Enable Dither */
}
+static void XGI_LongWait(struct vb_device_info *pVBInfo)
+{
+ unsigned short i;
+
+ i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
+
+ if (!(i & 0xC0)) {
+ for (i = 0; i < 0xFFFF; i++) {
+ if (!(inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+
+ for (i = 0; i < 0xFFFF; i++) {
+ if ((inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+ }
+}
+
static void SetSpectrum(struct vb_device_info *pVBInfo)
{
unsigned short index;
@@ -6940,14 +6666,12 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- /* GetPart1IO(); */
XGI_SetDelayComp(pVBInfo);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* GetPart2IO() */
XGI_SetPhaseIncr(pVBInfo);
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
@@ -6963,7 +6687,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
+static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -6972,8 +6696,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
unsigned char tempah;
- /* // fix write part1 index 0 BTDRAM bit Bug
- * xgifb_reg_set(pVBInfo->Part1Port, 0x03, 0x00); */
tempah = 0;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
@@ -6999,32 +6721,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
}
- /* 0210 shampoo
- if (pVBInfo->VBInfo & DisableCRT2Display) {
- tempah = 0;
- }
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD)) {
- tempcl = pVBInfo->ModeType;
- if (ModeNo > 0x13) {
- tempcl -= ModeVGA;
- if ((tempcl > 0) || (tempcl == 0)) {
- tempah=(0x008>>tempcl) ;
- if (tempah == 0)
- tempah = 1;
- tempah |= 0x040;
- }
- } else {
- tempah = 0x040;
- }
-
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- tempah = (tempah ^ 0x050);
- }
- }
- */
-
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
tempah = 0x08;
tempbl = 0xf0;
@@ -7093,14 +6789,11 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x080;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p))) { */
tempah |= 0x020;
if (ModeNo > 0x13) {
if (pVBInfo->VBInfo & DriverMode)
tempah = tempah ^ 0x20;
}
- /* } */
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, ~0x0BF, tempah);
@@ -7110,12 +6803,8 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if ((!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) &&
- (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))) { */
if (pVBInfo->TVInfo & RPLLDIV2XO)
tempah |= 0x40;
- /* } */
}
if ((pVBInfo->LCDResInfo == Panel1280x1024)
@@ -7219,57 +6908,6 @@ unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
}
}
-void XGI_LongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short i;
-
- i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
-
- if (!(i & 0xC0)) {
- for (i = 0; i < 0xFFFF; i++) {
- if (!(inb(pVBInfo->P3da) & 0x08))
- break;
- }
-
- for (i = 0; i < 0xFFFF; i++) {
- if ((inb(pVBInfo->P3da) & 0x08))
- break;
- }
- }
-}
-
-static void XGI_VBLongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short tempal, temp, i, j;
- return;
- if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
- temp = 0;
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 100; j++) {
- tempal = inb(pVBInfo->P3da);
- if (temp & 0x01) { /* VBWaitMode2 */
- if ((tempal & 0x08))
- continue;
-
- if (!(tempal & 0x08))
- break;
-
- } else { /* VBWaitMode1 */
- if (!(tempal & 0x08))
- continue;
-
- if ((tempal & 0x08))
- break;
- }
- }
- temp = temp ^ 0x01;
- }
- } else {
- XGI_LongWait(pVBInfo);
- }
- return;
-}
-
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
@@ -7322,12 +6960,6 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
RefreshRateTableIndex = pVBInfo->EModeIDTable[ModeIdIndex].REFindex;
ModeNo = pVBInfo->RefIndex[RefreshRateTableIndex].ModeID;
if (pXGIHWDE->jChipType >= XG20) { /* for XG20, XG21, XG27 */
- /*
- if (pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag &
- XG2xNotSupport) {
- index++;
- }
- */
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 800) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 600)) {
index++;
@@ -7371,7 +7003,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
RefreshRateTableIndex, &i, pVBInfo);
}
- return RefreshRateTableIndex + i; /* return (0x01 | (temp1<<1)); */
+ return RefreshRateTableIndex + i;
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
@@ -7379,9 +7011,6 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
- /* unsigned short temp ; */
-
- /* pVBInfo->SelectCRT2Rate = 0; */
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -7394,7 +7023,7 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
-unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
+static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -7499,10 +7128,6 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
outb((unsigned char) DAC_TEST_PARMS[2], (pVBInfo->P3c8 + 1));
}
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
-
mdelay(1);
XGI_WaitDisply(pVBInfo);
@@ -7532,7 +7157,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
}
-void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah;
@@ -7544,7 +7170,6 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
} else {
- /* SetCRT2ToLCDA ) */
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port,
@@ -7572,10 +7197,8 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->Part1Port, 0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port,
0x2E, 0x80);
- /* BScreenOFF = 0 */
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
}
}
@@ -7638,12 +7261,11 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
if (!(pVBInfo->SetFlag & DisableChA)) {
- XGI_VBLongWait(pVBInfo);
if (!(pVBInfo->SetFlag & GatingCRT)) {
XGI_DisableGatingCRT(HwDeviceExtension,
pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- XGI_VBLongWait(pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension,
+ pVBInfo);
}
}
} /* 301 */
@@ -7656,15 +7278,15 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
tempah = (unsigned char) xgifb_reg_get(pVBInfo->Part1Port,
0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
} /* End of VB */
}
-static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -7672,18 +7294,14 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short XGINew_P3cc = pVBInfo->P3cc;
- /* XGINew_CRT1Mode = ModeNo; // SaveModeID */
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_SetBIOSData(ModeNo, ModeIdIndex); */
- /* XGI_ClearBankRegs(ModeNo, ModeIdIndex); */
XGI_SetSeqRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
- XGI_SetMiscRegs(StandTableIndex, pVBInfo);
+ outb(pVBInfo->StandTable[StandTableIndex].MISC, pVBInfo->P3c2);
XGI_SetCRTCRegs(HwDeviceExtension, StandTableIndex, pVBInfo);
XGI_SetATTRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(StandTableIndex, pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
- /* if (pVBInfo->IF_DEF_ExpLink) */
if (HwDeviceExtension->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 0)
XGI_SetDefaultVCLK(pVBInfo);
@@ -7735,11 +7353,6 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (temp & 0xA0) {
- /* Enable write GPIOF */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20); */
- /* P. DWN */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x48, ~0x20); */
- /* XG21 CRT1 Timing */
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
@@ -7754,10 +7367,9 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo, RefreshRateTableIndex, ModeNo);
if (pVBInfo->IF_DEF_LVDS == 1)
- xgifb_set_lvds(HwDeviceExtension->jChipType,
+ xgifb_set_lvds(xgifb_info,
+ HwDeviceExtension->jChipType,
ModeNo, ModeIdIndex, pVBInfo);
- /* P. ON */
- /* xgifb_reg_or(pVBInfo->P3d4, 0x48, 0x20); */
}
}
@@ -7765,22 +7377,16 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
-
- /* XGI_LoadCharacter(); //dif ifdef TVFont */
-
XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_ClearBuffer(HwDeviceExtension, ModeNo, pVBInfo); */
}
-unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo)
{
unsigned short ModeIdIndex;
- /* unsigned char *pVBInfo->FBAddr =
- HwDeviceExtension->pjVideoMemoryAddress; */
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
pVBInfo->IF_DEF_LVDS = 0;
pVBInfo->IF_DEF_LCDA = 1;
@@ -7831,14 +7437,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_GetVBType(pVBInfo);
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- if (ModeNo & 0x80) {
+ if (ModeNo & 0x80)
ModeNo = ModeNo & 0x7F;
- /* XGINew_flag_clearbuffer = 0; */
- }
- /* else {
- XGINew_flag_clearbuffer = 1;
- }
- */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 1.Openkey */
@@ -7846,16 +7446,14 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
- XGI_GetVGAType(HwDeviceExtension, pVBInfo);
-
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisableBridge(HwDeviceExtension, pVBInfo);
+ XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -7864,7 +7462,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
}
} else {
if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info,
+ HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
@@ -7894,11 +7493,11 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
XGI_CloseCRTC(HwDeviceExtension, pVBInfo);
- XGI_EnableBridge(HwDeviceExtension, pVBInfo);
+ XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
- if (!XGI_XG21CheckLVDSMode(ModeNo,
+ if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
ModeIdIndex,
pVBInfo))
return 0;
@@ -7914,39 +7513,13 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->SetFlag = 0;
pVBInfo->VBInfo = DisableCRT2Display;
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex,
- pVBInfo);
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
+ ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- /*
- if (HwDeviceExtension->jChipType == XG21)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0x80, 0x80);
- */
- }
-
- /*
- if (ModeNo <= 0x13) {
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- } else {
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
- pVBInfo->ModeType = modeflag&ModeInfoFlag;
- pVBInfo->SetFlag = 0x00;
- pVBInfo->VBInfo = DisableCRT2Display;
- temp = XGINew_CheckMemorySize(HwDeviceExtension,
- ModeNo,
- ModeIdIndex,
- pVBInfo);
-
- if (temp == 0)
- return (0);
-
- XGI_DisplayOff(HwDeviceExtension, pVBInfo) ;
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- */
XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 1bd8667ff5c..552482858c1 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -6,66 +6,22 @@ extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
-extern void XGI_LongWait(struct vb_device_info *);
-extern void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
- struct xgi_hw_device_info *,
- struct vb_device_info *);
-extern void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_DisplayOff(struct xgi_hw_device_info *,
+extern void XGI_DisplayOff(struct xgifb_video_info *,
+ struct xgi_hw_device_info *,
struct vb_device_info *);
-extern void XGI_DisplayOn(struct xgi_hw_device_info *,
- struct vb_device_info *);
extern void XGI_GetVBType(struct vb_device_info *);
extern void XGI_SenseCRT1(struct vb_device_info *);
-extern void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetVBInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetTVInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
-extern unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-
-extern unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo) ;
extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
unsigned short *ModeIdIndex,
struct vb_device_info *);
-extern unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
extern unsigned char XGI_BridgeIsOn(struct vb_device_info *);
-
-extern unsigned char
-XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *);
-extern void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_XG21BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG27BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG21SetPanelDelay(unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo);
-
#endif
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index f9ade6f9f7e..6556a0d6ff8 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -293,13 +293,12 @@ struct vb_device_info {
unsigned short IF_DEF_ExpLink;
unsigned short IF_DEF_HiVision;
unsigned short LCDResInfo, LCDTypeInfo, VBType;/*301b*/
- unsigned short VBInfo, TVInfo, LCDInfo, Set_VGAType;
+ unsigned short VBInfo, TVInfo, LCDInfo;
unsigned short VBExtInfo;/*301lv*/
unsigned short SetFlag;
unsigned short NewFlickerMode;
unsigned short SelectCRT2Rate;
- unsigned char *ROMAddr;
void __iomem *FBAddr;
unsigned long BaseAddr;
unsigned long RelIO;
@@ -376,7 +375,6 @@ struct vb_device_info {
unsigned char *pXGINew_CR97 ;
struct XGI330_LCDCapStruct *LCDCapList;
- struct XGI21_LVDSCapStruct *XG21_LVDSCapList;
struct XGI_TimingHStruct *TimingH;
struct XGI_TimingVStruct *TimingV;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index b81ac7726d1..e7946f1c114 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -2569,33 +2569,6 @@ static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-struct XGI21_LVDSCapStruct XGI21_LCDCapList[] = {
- {DisableLCD24bpp + LCDPolarity,
- 2160, 1250, 1600, 1200, 64, 1, 192, 3,
- 0x70, 0x24, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1688, 1066, 1280, 1024, 48, 1, 112, 3,
- 0x70, 0x44, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 1344, 806, 1024, 768, 24, 3, 136, 6,
- 0x6C, 0x65, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1056, 628, 800, 600, 40, 1, 128, 4,
- 0x42, 0xE2, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity,
- 928, 525, 800, 480, 40, 13, 48, 3,
- 0x52, 0xC5, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 800, 525, 640, 480, 16, 10, 96, 2,
- 0x1B, 0xE1, 0x20, 0x04, 0x0A, 0x02, 0xC8
- }
-};
-
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
{Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9b939b75c30..9e166bbb00c 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -51,8 +51,6 @@ struct xgi_hw_device_info {
unsigned long ulExternalChip; /* NO VB or other video bridge*/
/* if ujVBChipID = VB_CHIP_UNKNOWN, */
- unsigned char *pjVirtualRomBase; /* ROM image */
-
void __iomem *pjVideoMemoryAddress;/* base virtual memory address */
/* of Linear VGA memory */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 56c1f9c80dc..642840c612a 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -787,7 +787,7 @@ static ssize_t zv_max_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_zsize = val;
@@ -819,7 +819,7 @@ static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_mean_zsize = val;
@@ -853,7 +853,7 @@ static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > 150))
return -EINVAL;
zv_page_count_policy_percent = val;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 09de99fbb7e..2a2a92d389e 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -653,7 +653,8 @@ int zram_init_device(struct zram *zram)
goto fail_no_table;
}
- zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
+ zram->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 0ea8ed296a9..d521122826f 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -58,7 +58,7 @@ static ssize_t disksize_store(struct device *dev,
u64 disksize;
struct zram *zram = dev_to_zram(dev);
- ret = strict_strtoull(buf, 10, &disksize);
+ ret = kstrtoull(buf, 10, &disksize);
if (ret)
return ret;
@@ -88,7 +88,7 @@ static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
- unsigned long do_reset;
+ unsigned short do_reset;
struct zram *zram;
struct block_device *bdev;
@@ -99,7 +99,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev->bd_holders)
return -EBUSY;
- ret = strict_strtoul(buf, 10, &do_reset);
+ ret = kstrtou16(buf, 10, &do_reset);
if (ret)
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271..8599545cdf9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f..1cd6ce373b8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae..f1a02dad05a 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e50..101b1beb3bc 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93c..d734bdec24f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9..98936cb7c29 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6..81d5832fbbd 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8..1dcbef499d6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b824..831468b3163 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4..93d4f6a1b79 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f..9b863942547 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bf..b4864fba4ef 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe2926..4aa99220443 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54..95dee7074ae 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe42..8b15e56b038 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f1..02e51faa2f4 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df629..684522805a1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f..0257658e2e3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
@@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c61..71fc9cea5dc 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca4..9402b7387ca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cea56033b34..a09ce3ef5d7 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -417,7 +417,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
__FILE__,__LINE__,tbuf,tbuf->count);
/* Send the next block of data to device */
- tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
/* rollback was possible and has been done */
@@ -459,7 +459,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
}
if (!tbuf)
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/* Clear the re-entry flag */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
@@ -491,7 +491,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
return;
if (tty != n_hdlc->tty) {
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 39d6ab6551e..d2256d08ee7 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -61,7 +61,7 @@
* controlling the space in the read buffer.
*/
#define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */
-#define TTY_THRESHOLD_UNTHROTTLE 128
+#define TTY_THRESHOLD_UNTHROTTLE 128
/*
* Special byte codes used in the echo buffer to represent operations
@@ -405,7 +405,7 @@ static ssize_t process_output_block(struct tty_struct *tty,
const unsigned char *buf, unsigned int nr)
{
int space;
- int i;
+ int i;
const unsigned char *cp;
mutex_lock(&tty->output_lock);
@@ -1607,7 +1607,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
}
/**
- * copy_from_read_buf - copy read data directly
+ * copy_from_read_buf - copy read data directly
* @tty: terminal device
* @b: user data
* @nr: size of data
@@ -1909,7 +1909,7 @@ do_it_again:
if (nr)
clear_bit(TTY_PUSH, &tty->flags);
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
- goto do_it_again;
+ goto do_it_again;
n_tty_set_room(tty);
return retval;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e18604b3fc7..d8653ab6f49 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,19 +446,8 @@ static inline void legacy_pty_init(void) { }
int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_limit_min;
static int pty_limit_max = NR_UNIX98_PTY_MAX;
-static int tty_count;
static int pty_count;
-static inline void pty_inc_count(void)
-{
- pty_count = (++tty_count) / 2;
-}
-
-static inline void pty_dec_count(void)
-{
- pty_count = (--tty_count) / 2;
-}
-
static struct cdev ptmx_cdev;
static struct ctl_table pty_table[] = {
@@ -600,8 +589,7 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
*/
tty_driver_kref_get(driver);
tty->count++;
- pty_inc_count(); /* tty */
- pty_inc_count(); /* tty->link */
+ pty_count++;
return 0;
err_free_mem:
deinitialize_tty_struct(o_tty);
@@ -613,15 +601,19 @@ err_free_tty:
return -ENOMEM;
}
-static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void ptm_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ pty_count--;
+}
+
+static void pts_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- pty_dec_count();
}
static const struct tty_operations ptm_unix98_ops = {
.lookup = ptm_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = ptm_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
@@ -638,7 +630,7 @@ static const struct tty_operations ptm_unix98_ops = {
static const struct tty_operations pty_unix98_ops = {
.lookup = pts_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = pts_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index eeadf1b8e09..9f50c4e3c2b 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -129,32 +129,6 @@ static unsigned long probe_rsa[PORT_RSA_MAX];
static unsigned int probe_rsa_count;
#endif /* CONFIG_SERIAL_8250_RSA */
-struct uart_8250_port {
- struct uart_port port;
- struct timer_list timer; /* "no irq" timer */
- struct list_head list; /* ports on this IRQ */
- unsigned short capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- unsigned int tx_loadsz; /* transmit fifo load size */
- unsigned char acr;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char cur_iotype; /* Running I/O type */
-
- /*
- * Some bits in registers are cleared on a read, so they must
- * be saved whenever the register is read but the bits will not
- * be immediately processed.
- */
-#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
-#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
- unsigned char msr_saved_flags;
-};
-
struct irq_info {
struct hlist_node node;
int irq;
@@ -1326,8 +1300,6 @@ static void serial8250_stop_tx(struct uart_port *port)
}
}
-static void transmit_chars(struct uart_8250_port *up);
-
static void serial8250_start_tx(struct uart_port *port)
{
struct uart_8250_port *up =
@@ -1344,7 +1316,7 @@ static void serial8250_start_tx(struct uart_port *port)
if ((up->port.type == PORT_RM9000) ?
(lsr & UART_LSR_THRE) :
(lsr & UART_LSR_TEMT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
}
}
@@ -1401,11 +1373,16 @@ static void clear_rx_fifo(struct uart_8250_port *up)
} while (1);
}
-static void
-receive_chars(struct uart_8250_port *up, unsigned int *status)
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned char ch, lsr = *status;
+ unsigned char ch;
int max_count = 256;
char flag;
@@ -1481,10 +1458,11 @@ ignore_char:
spin_unlock(&up->port.lock);
tty_flip_buffer_push(tty);
spin_lock(&up->port.lock);
- *status = lsr;
+ return lsr;
}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-static void transmit_chars(struct uart_8250_port *up)
+void serial8250_tx_chars(struct uart_8250_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
@@ -1521,8 +1499,9 @@ static void transmit_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
__stop_tx(up);
}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-static unsigned int check_modem_status(struct uart_8250_port *up)
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
{
unsigned int status = serial_in(up, UART_MSR);
@@ -1544,14 +1523,20 @@ static unsigned int check_modem_status(struct uart_8250_port *up)
return status;
}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
/*
* This handles the interrupt from one port.
*/
-static void serial8250_handle_port(struct uart_8250_port *up)
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned int status;
+ unsigned char status;
unsigned long flags;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ if (iir & UART_IIR_NO_INT)
+ return 0;
spin_lock_irqsave(&up->port.lock, flags);
@@ -1560,25 +1545,13 @@ static void serial8250_handle_port(struct uart_8250_port *up)
DEBUG_INTR("status = %x...", status);
if (status & (UART_LSR_DR | UART_LSR_BI))
- receive_chars(up, &status);
- check_modem_status(up);
+ status = serial8250_rx_chars(up, status);
+ serial8250_modem_status(up);
if (status & UART_LSR_THRE)
- transmit_chars(up);
+ serial8250_tx_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
-
- if (!(iir & UART_IIR_NO_INT)) {
- serial8250_handle_port(up);
- return 1;
- }
-
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(serial8250_handle_irq);
@@ -1619,11 +1592,13 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
do {
struct uart_8250_port *up;
struct uart_port *port;
+ bool skip;
up = list_entry(l, struct uart_8250_port, list);
port = &up->port;
+ skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
- if (port->handle_irq(port)) {
+ if (!skip && port->handle_irq(port)) {
handled = 1;
end = NULL;
} else if (end == NULL)
@@ -1758,11 +1733,8 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
static void serial8250_timeout(unsigned long data)
{
struct uart_8250_port *up = (struct uart_8250_port *)data;
- unsigned int iir;
- iir = serial_in(up, UART_IIR);
- if (!(iir & UART_IIR_NO_INT))
- serial8250_handle_port(up);
+ up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
@@ -1801,7 +1773,7 @@ static void serial8250_backup_timeout(unsigned long data)
}
if (!(iir & UART_IIR_NO_INT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
if (is_real_interrupt(up->port.irq))
serial_out(up, UART_IER, ier);
@@ -1835,7 +1807,7 @@ static unsigned int serial8250_get_mctrl(struct uart_port *port)
unsigned int status;
unsigned int ret;
- status = check_modem_status(up);
+ status = serial8250_modem_status(up);
ret = 0;
if (status & UART_MSR_DCD)
@@ -2000,7 +1972,7 @@ static int serial8250_startup(struct uart_port *port)
serial_outp(up, UART_IER, 0);
serial_outp(up, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_outp(up, UART_EFR, UART_EFR_ECB);
serial_outp(up, UART_LCR, 0);
}
@@ -2848,7 +2820,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (up->port.sysrq) {
- /* serial8250_handle_port() already took the lock */
+ /* serial8250_handle_irq() already took the lock */
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
@@ -2882,7 +2854,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
- check_modem_status(up);
+ serial8250_modem_status(up);
if (locked)
spin_unlock(&up->port.lock);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
index 6edf4a6a22d..ae027be57e2 100644
--- a/drivers/tty/serial/8250.h
+++ b/drivers/tty/serial/8250.h
@@ -13,6 +13,32 @@
#include <linux/serial_8250.h>
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short capabilities; /* port capabilities */
+ unsigned short bugs; /* port bugs */
+ unsigned int tx_loadsz; /* transmit fifo load size */
+ unsigned char acr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
+
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+ unsigned char lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+};
+
struct old_serial_port {
unsigned int uart;
unsigned int baud_base;
diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
index bf1fba640c2..f574eef3075 100644
--- a/drivers/tty/serial/8250_dw.c
+++ b/drivers/tty/serial/8250_dw.c
@@ -177,17 +177,7 @@ static struct platform_driver dw8250_platform_driver = {
.remove = __devexit_p(dw8250_remove),
};
-static int __init dw8250_init(void)
-{
- return platform_driver_register(&dw8250_platform_driver);
-}
-module_init(dw8250_init);
-
-static void __exit dw8250_exit(void)
-{
- platform_driver_unregister(&dw8250_platform_driver);
-}
-module_exit(dw8250_exit);
+module_platform_driver(dw8250_platform_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_fsl.c b/drivers/tty/serial/8250_fsl.c
new file mode 100644
index 00000000000..f4d3c47b88e
--- /dev/null
+++ b/drivers/tty/serial/8250_fsl.c
@@ -0,0 +1,63 @@
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+/*
+ * Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This isn't a full driver; it just provides an alternate IRQ
+ * handler to deal with an errata. Everything else is just
+ * using the bog standard 8250 support.
+ *
+ * We follow code flow of serial8250_default_handle_irq() but add
+ * a check for a break and insert a dummy read on the Rx for the
+ * immediately following IRQ event.
+ *
+ * We re-use the already existing "bug handling" lsr_saved_flags
+ * field to carry the "what we just did" information from the one
+ * IRQ event to the next one.
+ */
+
+int fsl8250_handle_irq(struct uart_port *port)
+{
+ unsigned char lsr, orig_lsr;
+ unsigned long flags;
+ unsigned int iir;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ iir = port->serial_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 0;
+ }
+
+ /* This is the WAR; if last event was BRK, then read and return */
+ if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ up->lsr_saved_flags &= ~UART_LSR_BI;
+ port->serial_in(port, UART_RX);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+ }
+
+ lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+
+ if (lsr & (UART_LSR_DR | UART_LSR_BI))
+ lsr = serial8250_rx_chars(up, lsr);
+
+ serial8250_modem_status(up);
+
+ if (lsr & UART_LSR_THRE)
+ serial8250_tx_chars(up);
+
+ up->lsr_saved_flags = orig_lsr;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+}
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 825937a5f21..da2b0b0a183 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1092,6 +1092,14 @@ static int skip_tx_en_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int kt_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ port->flags |= UPF_IIR_ONCE;
+ return skip_tx_en_setup(priv, board, port, idx);
+}
+
static int pci_eg20t_init(struct pci_dev *dev)
{
#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
@@ -1110,7 +1118,18 @@ pci_xr17c154_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
-/* This should be in linux/pci_ids.h */
+static int try_enable_msi(struct pci_dev *dev)
+{
+ /* use msi if available, but fallback to legacy otherwise */
+ pci_enable_msi(dev);
+ return 0;
+}
+
+static void disable_msi(struct pci_dev *dev)
+{
+ pci_disable_msi(dev);
+}
+
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1133,9 +1152,14 @@ pci_xr17c154_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_400V3 0xA310
+#define PCI_DEVICE_ID_TITAN_410V3 0xA312
+#define PCI_DEVICE_ID_TITAN_800V3 0xA314
+#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -1220,6 +1244,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = ce4100_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = try_enable_msi,
+ .setup = kt_serial_setup,
+ .exit = disable_msi,
+ },
/*
* ITE
*/
@@ -3414,6 +3447,18 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 925a1e547a8..f32a2ea7010 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -97,6 +97,11 @@ config SERIAL_8250_PNP
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
+config SERIAL_8250_FSL
+ bool
+ depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
+ default PPC
+
config SERIAL_8250_HP300
tristate
depends on SERIAL_8250 && HP300
@@ -457,7 +462,7 @@ config SERIAL_SAMSUNG
config SERIAL_SAMSUNG_UARTS_4
bool
depends on ARM && PLAT_SAMSUNG
- default y if CPU_S3C2443
+ default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442)
help
Internal node for the common case of 4 Samsung compatible UARTs
@@ -465,7 +470,7 @@ config SERIAL_SAMSUNG_UARTS
int
depends on ARM && PLAT_SAMSUNG
default 6 if ARCH_S5P6450
- default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416
default 3
help
Select the number of available UART ports for the Samsung S3C
@@ -495,46 +500,27 @@ config SERIAL_SAMSUNG_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
-config SERIAL_S3C2410
- tristate "Samsung S3C2410 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2410
- default y if CPU_S3C2410
- help
- Serial port support for the Samsung S3C2410 SoC
-
-config SERIAL_S3C2412
- tristate "Samsung S3C2412/S3C2413 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2412
- default y if CPU_S3C2412
- help
- Serial port support for the Samsung S3C2412 and S3C2413 SoC
-
-config SERIAL_S3C2440
- tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
- default y if CPU_S3C2440
- default y if CPU_S3C2442
- select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
- help
- Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
-
-config SERIAL_S3C6400
- tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100)
- select SERIAL_SAMSUNG_UARTS_4
- default y
- help
- Serial port support for the Samsung S3C6400, S3C6410, S5P6440, S5P6450
- and S5PC100 SoCs
-
-config SERIAL_S5PV210
- tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- default y
- help
- Serial port support for Samsung's S5P Family of SoC's
-
+config SERIAL_SIRFSOC
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARM && ARCH_PRIMA2
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
+
+config SERIAL_SIRFSOC_CONSOLE
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_MAX3100
tristate "MAX3100 support"
@@ -808,7 +794,7 @@ config BFIN_UART3_CTSRTS
config SERIAL_IMX
bool "IMX serial port support"
- depends on ARM && (ARCH_IMX || ARCH_MXC)
+ depends on ARCH_MXC
select SERIAL_CORE
select RATIONAL
help
@@ -1324,7 +1310,7 @@ config SERIAL_OF_PLATFORM
config SERIAL_OMAP
tristate "OMAP serial port support"
- depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS
select SERIAL_CORE
help
If you have a machine based on an Texas Instruments OMAP CPU you
@@ -1575,6 +1561,15 @@ config SERIAL_PCH_UART
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config SERIAL_PCH_UART_CONSOLE
+ bool "Support for console on Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH"
+ depends on SERIAL_PCH_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use the PCH UART as the system console
+ (the system console is the device which receives all kernel messages and
+ warnings and which allows logins in single user mode).
+
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
default n
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index e10cf5b54b6..07e0494c683 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
@@ -39,11 +40,6 @@ obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
obj-$(CONFIG_SERIAL_BFIN) += bfin_uart.o
obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
-obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
-obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
-obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
-obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
-obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
@@ -94,3 +90,4 @@ obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index efdf92c3a35..0d91a540bf1 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -795,6 +795,8 @@ static struct amba_id pl010_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl010_ids);
+
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 00233af1acc..6958594f2fc 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1994,6 +1994,8 @@ static struct amba_id pl011_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl011_ids);
+
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 77554fd68d1..7162f70d926 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -577,7 +577,7 @@ static int __devinit apbuart_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id __initdata apbuart_match[] = {
+static struct of_device_id apbuart_match[] = {
{
.name = "GAISLER_APBUART",
},
@@ -597,7 +597,7 @@ static struct platform_driver grlib_apbuart_of_driver = {
};
-static int grlib_apbuart_configure(void)
+static int __init grlib_apbuart_configure(void)
{
struct device_node *np;
int line = 0;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4c823f341d9..10605ecc99a 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -212,8 +212,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
+ unsigned long flags;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -244,7 +245,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1256,12 +1257,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
static void atmel_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index ee101c0d358..7fbc3a08f10 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -299,8 +299,13 @@ static int sport_startup(struct uart_port *port)
dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
}
}
- if (up->rts_pin >= 0)
- gpio_direction_output(up->rts_pin, 0);
+ if (up->rts_pin >= 0) {
+ if (gpio_request(up->rts_pin, DRV_NAME)) {
+ dev_info(port->dev, "fail to request RTS PIN at GPIO_%d\n", up->rts_pin);
+ up->rts_pin = -1;
+ } else
+ gpio_direction_output(up->rts_pin, 0);
+ }
#endif
return 0;
@@ -445,6 +450,8 @@ static void sport_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
if (up->cts_pin >= 0)
free_irq(gpio_to_irq(up->cts_pin), up);
+ if (up->rts_pin >= 0)
+ gpio_free(up->rts_pin);
#endif
}
@@ -803,17 +810,16 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
sport->cts_pin = -1;
- else
+ else {
sport->cts_pin = res->start;
+ sport->port.flags |= ASYNC_CTS_FLOW;
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
sport->rts_pin = -1;
else
sport->rts_pin = res->start;
-
- if (sport->rts_pin >= 0)
- gpio_request(sport->rts_pin, DRV_NAME);
#endif
}
@@ -853,10 +859,6 @@ static int __devexit sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (sport->rts_pin >= 0)
- gpio_free(sport->rts_pin);
-#endif
iounmap(sport->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1d567..e4510ea135c 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -45,11 +45,12 @@
#define SPORT_GET_RX32(sport) \
({ \
unsigned int __ret; \
+ unsigned long flags; \
if (ANOMALY_05000473) \
- local_irq_disable(); \
+ local_irq_save(flags); \
__ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
if (ANOMALY_05000473) \
- local_irq_enable(); \
+ local_irq_restore(flags); \
__ret; \
})
#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 66afb98b77b..26953bfa692 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -116,15 +116,22 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
- unsigned int status;
-
- status = bfin_serial_get_mctrl(&uart->port);
- uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+ unsigned int status = bfin_serial_get_mctrl(&uart->port);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- uart->scts = 1;
+ struct tty_struct *tty = uart->port.state->port.tty;
+
UART_CLEAR_SCTS(uart);
- UART_CLEAR_IER(uart, EDSSI);
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uart_write_wakeup(&uart->port);
+ }
+ } else {
+ if (!status)
+ tty->hw_stopped = 1;
+ }
#endif
+ uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
return IRQ_HANDLED;
}
@@ -175,13 +182,6 @@ static void bfin_serial_start_tx(struct uart_port *port)
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct tty_struct *tty = uart->port.state->port.tty;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
/*
* To avoid losting RX interrupt, we reset IR function
* before sending data.
@@ -380,12 +380,6 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
spin_lock(&uart->port.lock);
if (UART_GET_LSR(uart) & THRE)
bfin_serial_tx_chars(uart);
@@ -531,13 +525,6 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.state->xmit;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
@@ -739,20 +726,26 @@ static int bfin_serial_startup(struct uart_port *port)
pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
- if (uart->rts_pin >= 0)
- gpio_direction_output(uart->rts_pin, 0);
+ if (uart->rts_pin >= 0) {
+ if (gpio_request(uart->rts_pin, DRIVER_NAME)) {
+ pr_info("fail to request RTS PIN at GPIO_%d\n", uart->rts_pin);
+ uart->rts_pin = -1;
+ } else
+ gpio_direction_output(uart->rts_pin, 0);
+ }
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
- bfin_serial_mctrl_cts_int,
- 0, "BFIN_UART_MODEM_STATUS", uart)) {
- uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
- }
+ if (uart->cts_pin >= 0) {
+ if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
+ IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
+ }
- /* CTS RTS PINs are negative assertive. */
- UART_PUT_MCR(uart, ACTS);
- UART_SET_IER(uart, EDSSI);
+ /* CTS RTS PINs are negative assertive. */
+ UART_PUT_MCR(uart, ACTS);
+ UART_SET_IER(uart, EDSSI);
+ }
#endif
UART_SET_IER(uart, ERBFI);
@@ -792,6 +785,8 @@ static void bfin_serial_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
@@ -1370,18 +1365,18 @@ static int bfin_serial_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;
- else
+ else {
uart->cts_pin = res->start;
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ uart->port.flags |= ASYNC_CTS_FLOW;
+#endif
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
uart->rts_pin = -1;
else
uart->rts_pin = res->start;
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- if (uart->rts_pin >= 0)
- gpio_request(uart->rts_pin, DRIVER_NAME);
-# endif
#endif
}
@@ -1421,10 +1416,6 @@ static int __devexit bfin_serial_remove(struct platform_device *pdev)
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
-#endif
iounmap(uart->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 426434e5eb7..7e925e20cba 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1334,7 +1334,6 @@ MODULE_DEVICE_TABLE(spi, ifx_id_table);
static const struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
- .bus = &spi_bus_type,
.pm = &ifx_spi_pm,
.owner = THIS_MODULE},
.probe = ifx_spi_spi_probe,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 163fc9021f5..0b7fed746b2 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -102,6 +102,7 @@
#define UCR2_STPB (1<<6) /* Stop */
#define UCR2_WS (1<<5) /* Word size */
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enabled */
#define UCR2_RXEN (1<<1) /* Receiver enabled */
#define UCR2_SRST (1<<0) /* SW reset */
@@ -207,6 +208,12 @@ struct imx_port {
struct imx_uart_data *devdata;
};
+struct imx_port_ucrs {
+ unsigned int ucr1;
+ unsigned int ucr2;
+ unsigned int ucr3;
+};
+
#ifdef CONFIG_IRDA
#define USE_IRDA(sport) ((sport)->use_irda)
#else
@@ -260,6 +267,27 @@ static inline int is_imx21_uart(struct imx_port *sport)
}
/*
+ * Save and restore functions for UCR1, UCR2 and UCR3 registers
+ */
+static void imx_port_ucrs_save(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* save control registers */
+ ucr->ucr1 = readl(port->membase + UCR1);
+ ucr->ucr2 = readl(port->membase + UCR2);
+ ucr->ucr3 = readl(port->membase + UCR3);
+}
+
+static void imx_port_ucrs_restore(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* restore control registers */
+ writel(ucr->ucr1, port->membase + UCR1);
+ writel(ucr->ucr2, port->membase + UCR2);
+ writel(ucr->ucr3, port->membase + UCR3);
+}
+
+/*
* Handle any change of modem status signal since we were last called.
*/
static void imx_mctrl_check(struct imx_port *sport)
@@ -566,6 +594,9 @@ static irqreturn_t imx_int(int irq, void *dev_id)
if (sts & USR1_RTSD)
imx_rtsint(irq, dev_id);
+ if (sts & USR1_AWAKE)
+ writel(USR1_AWAKE, sport->port.membase + USR1);
+
return IRQ_HANDLED;
}
@@ -901,6 +932,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 |= UCR2_PROE;
}
+ del_timer_sync(&sport->timer);
+
/*
* Ask the core to calculate the divisor for us.
*/
@@ -931,8 +964,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
@@ -1079,6 +1110,70 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
+#if defined(CONFIG_CONSOLE_POLL)
+static int imx_poll_get_char(struct uart_port *port)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+ unsigned char c;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* poll */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_RDR);
+
+ /* read */
+ c = readl(port->membase + URXD0);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+
+ return c;
+}
+
+static void imx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* drain */
+ do {
+ status = readl(port->membase + USR1);
+ } while (~status & USR1_TRDY);
+
+ /* write */
+ writel(c, port->membase + URTX0);
+
+ /* flush */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_TXDC);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+}
+#endif
+
static struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
@@ -1096,6 +1191,10 @@ static struct uart_ops imx_pops = {
.request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_get_char = imx_poll_get_char,
+ .poll_put_char = imx_poll_put_char,
+#endif
};
static struct imx_port *imx_ports[UART_NR];
@@ -1118,13 +1217,14 @@ static void
imx_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_ports[co->index];
- unsigned int old_ucr1, old_ucr2, ucr1;
+ struct imx_port_ucrs old_ucr;
+ unsigned int ucr1;
/*
- * First, save UCR1/2 and then disable interrupts
+ * First, save UCR1/2/3 and then disable interrupts
*/
- ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
- old_ucr2 = readl(sport->port.membase + UCR2);
+ imx_port_ucrs_save(&sport->port, &old_ucr);
+ ucr1 = old_ucr.ucr1;
if (is_imx1_uart(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
@@ -1133,18 +1233,17 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
writel(ucr1, sport->port.membase + UCR1);
- writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
+ writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
uart_console_write(&sport->port, s, count, imx_console_putchar);
/*
* Finally, wait for transmitter to become empty
- * and restore UCR1/2
+ * and restore UCR1/2/3
*/
while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
- writel(old_ucr1, sport->port.membase + UCR1);
- writel(old_ucr2, sport->port.membase + UCR2);
+ imx_port_ucrs_restore(&sport->port, &old_ucr);
}
/*
@@ -1269,6 +1368,12 @@ static struct uart_driver imx_reg = {
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* enable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val |= UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_suspend_port(&imx_reg, &sport->port);
@@ -1279,6 +1384,12 @@ static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
static int serial_imx_resume(struct platform_device *dev)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* disable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_resume_port(&imx_reg, &sport->port);
@@ -1287,6 +1398,10 @@ static int serial_imx_resume(struct platform_device *dev)
}
#ifdef CONFIG_OF
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
static int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
@@ -1296,12 +1411,13 @@ static int serial_imx_probe_dt(struct imx_port *sport,
int ret;
if (!np)
- return -ENODEV;
+ /* no device tree device */
+ return 1;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
- return -ENODEV;
+ return ret;
}
sport->port.line = ret;
@@ -1319,7 +1435,7 @@ static int serial_imx_probe_dt(struct imx_port *sport,
static inline int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
- return -ENODEV;
+ return 1;
}
#endif
@@ -1354,8 +1470,10 @@ static int serial_imx_probe(struct platform_device *pdev)
return -ENOMEM;
ret = serial_imx_probe_dt(sport, pdev);
- if (ret == -ENODEV)
+ if (ret > 0)
serial_imx_probe_pdata(sport, pdev);
+ else if (ret < 0)
+ goto free;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1476,7 +1594,7 @@ static int __init imx_serial_init(void)
if (ret != 0)
uart_unregister_driver(&imx_reg);
- return 0;
+ return ret;
}
static void __exit imx_serial_exit(void)
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 08018934e01..94a6792bf97 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1000,11 +1000,8 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
init_timer(&up->timer);
up->timer.function = m32r_sio_timeout;
- /*
- * ALPHA_KLUDGE_MCR needs to be killed.
- */
- up->mcr_mask = ~ALPHA_KLUDGE_MCR;
- up->mcr_force = ALPHA_KLUDGE_MCR;
+ up->mcr_mask = ~0;
+ up->mcr_force = 0;
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 8a6cc8c30b5..b4902b99cfd 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -901,7 +901,6 @@ static int max3100_resume(struct spi_device *spi)
static struct spi_driver max3100_driver = {
.driver = {
.name = "max3100",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index 90c40f22ec7..aae772a71de 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -315,7 +315,6 @@ static int __devinit max3107_probe_aava(struct spi_device *spi)
static struct spi_driver max3107_driver = {
.driver = {
.name = "aava-max3107",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max3107_probe_aava,
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 7827000db4f..17c7ba805d9 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1181,7 +1181,6 @@ static int max3107_probe_generic(struct spi_device *spi)
static struct spi_driver max3107_driver = {
.driver = {
.name = "max3107",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max3107_probe_generic,
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index e272d3919c6..a9234ba8f8d 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1154,7 +1154,6 @@ serial_hsu_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
- int ret;
if (co->index == -1 || co->index >= serial_hsu_reg.nr)
co->index = 0;
@@ -1165,9 +1164,7 @@ serial_hsu_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
-
- return ret;
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_hsu_console = {
@@ -1176,9 +1173,13 @@ static struct console serial_hsu_console = {
.device = uart_console_device,
.setup = serial_hsu_console_setup,
.flags = CON_PRINTBUFFER,
- .index = 2,
+ .index = -1,
.data = &serial_hsu_reg,
};
+
+#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE NULL
#endif
struct uart_ops serial_hsu_pops = {
@@ -1208,6 +1209,7 @@ static struct uart_driver serial_hsu_reg = {
.major = TTY_MAJOR,
.minor = 128,
.nr = 3,
+ .cons = SERIAL_HSU_CONSOLE,
};
#ifdef CONFIG_PM
@@ -1342,12 +1344,6 @@ static int serial_hsu_probe(struct pci_dev *pdev,
}
uart_add_one_port(&serial_hsu_reg, &uport->port);
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
- if (index == 2) {
- register_console(&serial_hsu_console);
- uport->port.cons = &serial_hsu_console;
- }
-#endif
pci_set_drvdata(pdev, uport);
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 4c309e86990..df2a2240a3a 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -876,7 +876,6 @@ static int __devexit serial_m3110_remove(struct spi_device *dev)
static struct spi_driver uart_max3110_driver = {
.driver = {
.name = "spi_max3111",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = serial_m3110_probe,
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 60c6eb85026..5e85e1e14c4 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -422,9 +422,9 @@ static int __devexit msm_hs_remove(struct platform_device *pdev)
msm_uport->rx.rbuffer);
dma_pool_destroy(msm_uport->rx.pool);
- dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
DMA_TO_DEVICE);
- dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
DMA_TO_DEVICE);
dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
DMA_TO_DEVICE);
@@ -812,7 +812,7 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
/* Save tx_count to use in Callback */
tx->tx_count = tx_count;
@@ -1087,12 +1087,10 @@ static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
}
/* Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts(struct uart_port *uport)
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
{
- unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* clear interrupt */
@@ -1100,7 +1098,6 @@ static void msm_hs_handle_delta_cts(struct uart_port *uport)
uport->icount.cts++;
clk_disable(msm_uport->clk);
- spin_unlock_irqrestore(&uport->lock, flags);
/* clear the IOCTL TIOCMIWAIT if called */
wake_up_interruptible(&uport->state->port.delta_msr_wait);
@@ -1248,7 +1245,7 @@ static irqreturn_t msm_hs_isr(int irq, void *dev)
/* Change in CTS interrupt */
if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
- msm_hs_handle_delta_cts(uport);
+ msm_hs_handle_delta_cts_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -1537,7 +1534,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
if (!tx->command_ptr)
return -ENOMEM;
- tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!tx->command_ptr_ptr) {
ret = -ENOMEM;
goto err_tx_command_ptr_ptr;
@@ -1547,7 +1544,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
sizeof(dmov_box), DMA_TO_DEVICE);
tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
tx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
init_waitqueue_head(&rx->wait);
@@ -1575,7 +1572,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
goto err_rx_command_ptr;
}
- rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!rx->command_ptr_ptr) {
pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
ret = -ENOMEM;
@@ -1593,7 +1590,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
*rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
@@ -1609,7 +1606,7 @@ err_dma_pool_alloc:
dma_pool_destroy(msm_uport->rx.pool);
err_dma_pool_create:
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
kfree(msm_uport->tx.command_ptr_ptr);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 7e02c9c344f..55fd362b987 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -145,11 +145,12 @@ static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
writel(xmit->buf[xmit->tail],
s->port.membase + AUART_DATA);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&s->port);
} else
break;
}
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
if (uart_circ_empty(&(s->port.state->xmit)))
writel(AUART_INTR_TXIEN,
s->port.membase + AUART_INTR_CLR);
@@ -424,7 +425,7 @@ static int mxs_auart_startup(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
@@ -453,7 +454,7 @@ static void mxs_auart_shutdown(struct uart_port *u)
writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
u->membase + AUART_INTR_CLR);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
}
static unsigned int mxs_auart_tx_empty(struct uart_port *u)
@@ -634,7 +635,7 @@ auart_console_setup(struct console *co, char *options)
if (!s)
return -ENODEV;
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -643,7 +644,7 @@ auart_console_setup(struct console *co, char *options)
ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
return ret;
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 5e713d3ef1f..d192dcbb82f 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -37,17 +37,24 @@
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <plat/dma.h>
#include <plat/dmtimer.h>
#include <plat/omap-serial.h>
+#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
+
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
-static void serial_omap_rx_timeout(unsigned long uart_no);
+static void serial_omap_rxdma_poll(unsigned long uart_no);
static int serial_omap_start_rxdma(struct uart_omap_port *up);
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
+
+static struct workqueue_struct *serial_omap_uart_wq;
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
@@ -102,6 +109,8 @@ static void serial_omap_stop_rxdma(struct uart_omap_port *up)
omap_free_dma(up->uart_dma.rx_dma_channel);
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_used = false;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
}
@@ -109,9 +118,12 @@ static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_stop_tx(struct uart_port *port)
@@ -129,30 +141,40 @@ static void serial_omap_stop_tx(struct uart_port *port)
omap_stop_dma(up->uart_dma.tx_dma_channel);
omap_free_dma(up->uart_dma.tx_dma_channel);
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
+
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->use_dma)
serial_omap_stop_rxdma(up);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
-static inline void receive_chars(struct uart_omap_port *up, int *status)
+static inline void receive_chars(struct uart_omap_port *up,
+ unsigned int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned int flag;
- unsigned char ch, lsr = *status;
+ unsigned int flag, lsr = *status;
+ unsigned char ch = 0;
int max_count = 256;
do {
@@ -262,7 +284,10 @@ static void serial_omap_start_tx(struct uart_port *port)
int ret = 0;
if (!up->use_dma) {
+ pm_runtime_get_sync(&up->pdev->dev);
serial_omap_enable_ier_thri(up);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return;
}
@@ -272,6 +297,7 @@ static void serial_omap_start_tx(struct uart_port *port)
xmit = &up->port.state->xmit;
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
"UART Tx DMA",
(void *)uart_tx_dma_callback, up,
@@ -354,9 +380,13 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
unsigned int iir, lsr;
unsigned long flags;
+ pm_runtime_get_sync(&up->pdev->dev);
iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT)
+ if (iir & UART_IIR_NO_INT) {
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return IRQ_NONE;
+ }
spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
@@ -378,6 +408,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
+
up->port_activity = jiffies;
return IRQ_HANDLED;
}
@@ -388,22 +421,26 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
unsigned long flags = 0;
unsigned int ret = 0;
- dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ pm_runtime_get_sync(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
-
+ pm_runtime_put(&up->pdev->dev);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned char status;
+ unsigned int status;
unsigned int ret = 0;
+ pm_runtime_get_sync(&up->pdev->dev);
status = check_modem_status(up);
- dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
@@ -421,7 +458,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char mcr = 0;
- dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
@@ -433,8 +470,11 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
- mcr |= up->mcr;
- serial_out(up, UART_MCR, mcr);
+ pm_runtime_get_sync(&up->pdev->dev);
+ up->mcr = serial_in(up, UART_MCR);
+ up->mcr |= mcr;
+ serial_out(up, UART_MCR, up->mcr);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
@@ -442,7 +482,8 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
@@ -450,6 +491,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_startup(struct uart_port *port)
@@ -466,8 +508,9 @@ static int serial_omap_startup(struct uart_port *port)
if (retval)
return retval;
- dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
@@ -505,8 +548,8 @@ static int serial_omap_startup(struct uart_port *port)
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
0);
init_timer(&(up->uart_dma.rx_timer));
- up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
- up->uart_dma.rx_timer.data = up->pdev->id;
+ up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
+ up->uart_dma.rx_timer.data = up->port.line;
/* Currently the buffer size is 4KB. Can increase it */
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
up->uart_dma.rx_buf_size,
@@ -523,6 +566,8 @@ static int serial_omap_startup(struct uart_port *port)
/* Enable module level wake up */
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
up->port_activity = jiffies;
return 0;
}
@@ -532,7 +577,9 @@ static void serial_omap_shutdown(struct uart_port *port)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Disable interrupts from this port
*/
@@ -566,6 +613,8 @@ static void serial_omap_shutdown(struct uart_port *port)
up->uart_dma.rx_buf_dma_phys);
up->uart_dma.rx_buf = NULL;
}
+
+ pm_runtime_put(&up->pdev->dev);
free_irq(up->port.irq, up);
}
@@ -573,8 +622,6 @@ static inline void
serial_omap_configure_xonxoff
(struct uart_omap_port *up, struct ktermios *termios)
{
- unsigned char efr = 0;
-
up->lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -584,8 +631,7 @@ serial_omap_configure_xonxoff
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* clear SW control mode bits */
- efr = up->efr;
- efr &= OMAP_UART_SW_CLR;
+ up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
@@ -593,7 +639,7 @@ serial_omap_configure_xonxoff
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXON)
- efr |= OMAP_UART_SW_TX;
+ up->efr |= OMAP_UART_SW_TX;
/*
* IXOFF Flag:
@@ -601,7 +647,7 @@ serial_omap_configure_xonxoff
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXOFF)
- efr |= OMAP_UART_SW_RX;
+ up->efr |= OMAP_UART_SW_RX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
@@ -624,13 +670,21 @@ serial_omap_configure_xonxoff
* load the new software flow control mode IXON or IXOFF
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
*/
- serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
serial_out(up, UART_LCR, up->lcr);
}
+static void serial_omap_uart_qos_work(struct work_struct *work)
+{
+ struct uart_omap_port *up = container_of(work, struct uart_omap_port,
+ qos_work);
+
+ pm_qos_update_request(&up->pm_qos_request, up->latency);
+}
+
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -671,6 +725,16 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
+ /* calculate wakeup latency constraint */
+ up->calc_latency = (1000000 * up->port.fifosize) /
+ (1000 * baud / 8);
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+
+ up->dll = quot & 0xff;
+ up->dlh = quot >> 8;
+ up->mdr1 = UART_OMAP_MDR1_DISABLE;
+
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
if (up->use_dma)
@@ -680,6 +744,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
/*
@@ -723,6 +788,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval;
+ up->scr = OMAP_UART_SCR_TX_EMPTY;
/* FIFOs and DMA Settings */
@@ -749,17 +816,22 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
if (up->use_dma) {
serial_out(up, UART_TI752_TLR, 0);
- serial_out(up, UART_OMAP_SCR,
- (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8);
}
+ serial_out(up, UART_OMAP_SCR, up->scr);
+
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
/* Protocol, Baud Rate, and Interrupt Settings */
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -769,8 +841,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
+ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
@@ -780,9 +852,14 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_LCR, cval);
if (baud > 230400 && baud != 3000000)
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_13X_MODE;
else
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_16X_MODE;
+
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Hardware Flow Control Configuration */
@@ -809,7 +886,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_omap_configure_xonxoff(up, termios);
spin_unlock_irqrestore(&up->port.lock, flags);
- dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static void
@@ -819,7 +897,9 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char efr;
- dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
@@ -829,6 +909,15 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
+
+ if (!device_may_wakeup(&up->pdev->dev)) {
+ if (!state)
+ pm_runtime_forbid(&up->pdev->dev);
+ else
+ pm_runtime_allow(&up->pdev->dev);
+ }
+
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_release_port(struct uart_port *port)
@@ -847,7 +936,7 @@ static void serial_omap_config_port(struct uart_port *port, int flags)
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
- up->pdev->id);
+ up->port.line);
up->port.type = PORT_OMAP;
}
@@ -864,7 +953,7 @@ serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
@@ -906,19 +995,26 @@ static inline void wait_for_xmitr(struct uart_omap_port *up)
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ pm_runtime_get_sync(&up->pdev->dev);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned int status = serial_in(up, UART_LSR);
+ unsigned int status;
+ pm_runtime_get_sync(&up->pdev->dev);
+ status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR))
return NO_POLL_CHAR;
- return serial_in(up, UART_RX);
+ status = serial_in(up, UART_RX);
+ pm_runtime_put(&up->pdev->dev);
+ return status;
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -946,6 +1042,8 @@ serial_omap_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
+ pm_runtime_get_sync(&up->pdev->dev);
+
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
@@ -978,6 +1076,8 @@ serial_omap_console_write(struct console *co, const char *s,
if (up->msr_saved_flags)
check_modem_status(up);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
@@ -1014,7 +1114,7 @@ static struct console serial_omap_console = {
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
- serial_omap_console_ports[up->pdev->id] = up;
+ serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
@@ -1060,26 +1160,30 @@ static struct uart_driver serial_omap_reg = {
.cons = OMAP_CONSOLE,
};
-static int
-serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_SUSPEND
+static int serial_omap_suspend(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(pdev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
- if (up)
+ if (up) {
uart_suspend_port(&serial_omap_reg, &up->port);
+ flush_work_sync(&up->qos_work);
+ }
+
return 0;
}
-static int serial_omap_resume(struct platform_device *dev)
+static int serial_omap_resume(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(dev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
if (up)
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
+#endif
-static void serial_omap_rx_timeout(unsigned long uart_no)
+static void serial_omap_rxdma_poll(unsigned long uart_no)
{
struct uart_omap_port *up = ui[uart_no];
unsigned int curr_dma_pos, curr_transmitted_size;
@@ -1089,9 +1193,9 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
(curr_dma_pos == 0)) {
if (jiffies_to_msecs(jiffies - up->port_activity) <
- RX_TIMEOUT) {
+ up->uart_dma.rx_timeout) {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
} else {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
@@ -1120,7 +1224,7 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
}
} else {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
}
up->port_activity = jiffies;
}
@@ -1135,6 +1239,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
int ret = 0;
if (up->uart_dma.rx_dma_channel == -1) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
"UART Rx DMA",
(void *)uart_rx_dma_callback, up,
@@ -1158,7 +1263,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.rx_dma_channel);
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
up->uart_dma.rx_dma_used = true;
return ret;
}
@@ -1221,6 +1326,19 @@ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
return;
}
+static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
+{
+ struct omap_uart_port_info *omap_up_info;
+
+ omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
+ if (!omap_up_info)
+ return NULL; /* out of memory */
+
+ of_property_read_u32(dev->of_node, "clock-frequency",
+ &omap_up_info->uartclk);
+ return omap_up_info;
+}
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
@@ -1228,6 +1346,9 @@ static int serial_omap_probe(struct platform_device *pdev)
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
int ret = -ENOSPC;
+ if (pdev->dev.of_node)
+ omap_up_info = of_get_uart_port_info(&pdev->dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
@@ -1263,7 +1384,6 @@ static int serial_omap_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto do_release_region;
}
- sprintf(up->name, "OMAP UART%d", pdev->id);
up->pdev = pdev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
@@ -1273,34 +1393,74 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
- up->port.line = pdev->id;
- up->port.membase = omap_up_info->membase;
- up->port.mapbase = omap_up_info->mapbase;
+ if (pdev->dev.of_node)
+ up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
+ else
+ up->port.line = pdev->id;
+
+ if (up->port.line < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
+ up->port.line);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ sprintf(up->name, "OMAP UART%d", up->port.line);
+ up->port.mapbase = mem->start;
+ up->port.membase = ioremap(mem->start, resource_size(mem));
+ if (!up->port.membase) {
+ dev_err(&pdev->dev, "can't ioremap UART\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
up->port.flags = omap_up_info->flags;
- up->port.irqflags = omap_up_info->irqflags;
up->port.uartclk = omap_up_info->uartclk;
+ if (!up->port.uartclk) {
+ up->port.uartclk = DEFAULT_CLK_SPEED;
+ dev_warn(&pdev->dev, "No clock speed specified: using default:"
+ "%d\n", DEFAULT_CLK_SPEED);
+ }
up->uart_dma.uart_base = mem->start;
+ up->errata = omap_up_info->errata;
if (omap_up_info->dma_enabled) {
up->uart_dma.uart_dma_tx = dma_tx->start;
up->uart_dma.uart_dma_rx = dma_rx->start;
up->use_dma = 1;
- up->uart_dma.rx_buf_size = 4096;
- up->uart_dma.rx_timeout = 2;
+ up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
+ up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
+ up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
spin_lock_init(&(up->uart_dma.tx_lock));
spin_lock_init(&(up->uart_dma.rx_lock));
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
- ui[pdev->id] = up;
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ pm_qos_add_request(&up->pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, up->latency);
+ serial_omap_uart_wq = create_singlethread_workqueue(up->name);
+ INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ omap_up_info->autosuspend_timeout);
+
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto do_release_region;
+ pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, up);
return 0;
err:
@@ -1315,22 +1475,168 @@ static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
if (up) {
+ pm_runtime_disable(&up->pdev->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
+ pm_qos_remove_request(&up->pm_qos_request);
+
kfree(up);
}
+
+ platform_set_drvdata(dev, NULL);
return 0;
}
+/*
+ * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
+ * The access to uart register after MDR1 Access
+ * causes UART to corrupt data.
+ *
+ * Need a delay =
+ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+ * give 10 times as much
+ */
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
+{
+ u8 timeout = 255;
+
+ serial_out(up, UART_OMAP_MDR1, mdr1);
+ udelay(2);
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
+ UART_FCR_CLEAR_RCVR);
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
+ (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
+ serial_in(up, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+static void serial_omap_restore_context(struct uart_omap_port *up)
+{
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
+ else
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, 0x0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_DLL, up->dll);
+ serial_out(up, UART_DLM, up->dlh);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, up->lcr);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int serial_omap_runtime_suspend(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (!up)
+ return -EINVAL;
+
+ if (!pdata || !pdata->enable_wakeup)
+ return 0;
+
+ if (pdata->get_context_loss_count)
+ up->context_loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (!up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, true);
+ up->wakeups_enabled = true;
+ }
+ } else {
+ if (up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, false);
+ up->wakeups_enabled = false;
+ }
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_forceidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_forceidle(up->pdev);
+
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ schedule_work(&up->qos_work);
+
+ return 0;
+}
+
+static int serial_omap_runtime_resume(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (up) {
+ if (pdata->get_context_loss_count) {
+ u32 loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (up->context_loss_cnt != loss_cnt)
+ serial_omap_restore_context(up);
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_noidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_noidle(up->pdev);
+
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops serial_omap_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
+ SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
+ serial_omap_runtime_resume, NULL)
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id omap_serial_of_match[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_serial_of_match);
+#endif
+
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
-
- .suspend = serial_omap_suspend,
- .resume = serial_omap_resume,
.driver = {
.name = DRIVER_NAME,
+ .pm = &serial_omap_dev_pm_ops,
+ .of_match_table = of_match_ptr(omap_serial_of_match),
},
};
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d6aba8c087e..de0f613ed6f 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -25,6 +25,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/pch_dma.h>
@@ -198,6 +201,10 @@ enum {
#define PCI_VENDOR_ID_ROHM 0x10DB
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define DEFAULT_BAUD_RATE 1843200 /* 1.8432MHz */
+
struct pch_uart_buffer {
unsigned char *buf;
int size;
@@ -276,6 +283,9 @@ static struct pch_uart_driver_data drv_dat[] = {
[pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+static struct eg20t_port *pch_uart_ports[PCH_UART_NR];
+#endif
static unsigned int default_baud = 9600;
static const int trigger_level_256[4] = { 1, 64, 128, 224 };
static const int trigger_level_64[4] = { 1, 16, 32, 56 };
@@ -1385,6 +1395,143 @@ static struct uart_ops pch_uart_ops = {
.verify_port = pch_uart_verify_port
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct eg20t_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ for (;;) {
+ status = ioread8(up->membase + UART_LSR);
+
+ if ((status & bits) == bits)
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ unsigned int tmout;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = ioread8(up->membase + UART_MSR);
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
+static void pch_console_putchar(struct uart_port *port, int ch)
+{
+ struct eg20t_port *priv =
+ container_of(port, struct eg20t_port, port);
+
+ wait_for_xmitr(priv, UART_LSR_THRE);
+ iowrite8(ch, priv->membase + PCH_UART_THR);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+pch_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct eg20t_port *priv;
+
+ unsigned long flags;
+ u8 ier;
+ int locked = 1;
+
+ priv = pch_uart_ports[co->index];
+
+ touch_nmi_watchdog();
+
+ local_irq_save(flags);
+ if (priv->port.sysrq) {
+ /* serial8250_handle_port() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&priv->port.lock);
+ } else
+ spin_lock(&priv->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = ioread8(priv->membase + UART_IER);
+
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+
+ uart_console_write(&priv->port, s, count, pch_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(priv, BOTH_EMPTY);
+ iowrite8(ier, priv->membase + UART_IER);
+
+ if (locked)
+ spin_unlock(&priv->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init pch_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= PCH_UART_NR)
+ co->index = 0;
+ port = &pch_uart_ports[co->index]->port;
+
+ if (!port || (!port->iobase && !port->membase))
+ return -ENODEV;
+
+ /* setup uartclock */
+ port->uartclk = DEFAULT_BAUD_RATE;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pch_uart_driver;
+
+static struct console pch_console = {
+ .name = PCH_UART_DRIVER_DEVICE,
+ .write = pch_console_write,
+ .device = uart_console_device,
+ .setup = pch_console_setup,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
+ .data = &pch_uart_driver,
+};
+
+#define PCH_CONSOLE (&pch_console)
+#else
+#define PCH_CONSOLE NULL
+#endif
+
static struct uart_driver pch_uart_driver = {
.owner = THIS_MODULE,
.driver_name = KBUILD_MODNAME,
@@ -1392,6 +1539,7 @@ static struct uart_driver pch_uart_driver = {
.major = 0,
.minor = 0,
.nr = PCH_UART_NR,
+ .cons = PCH_CONSOLE,
};
static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
@@ -1418,7 +1566,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
if (!rxbuf)
goto init_port_free_txbuf;
- base_baud = 1843200; /* 1.8432MHz */
+ base_baud = DEFAULT_BAUD_RATE;
/* quirk for CM-iTC board */
board_name = dmi_get_system_info(DMI_BOARD_NAME);
@@ -1468,6 +1616,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_set_drvdata(pdev, priv);
pch_uart_hal_request(pdev, fifosize, base_baud);
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = priv;
+#endif
ret = uart_add_one_port(&pch_uart_driver, &priv->port);
if (ret < 0)
goto init_port_hal_free;
@@ -1475,6 +1626,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
return priv;
init_port_hal_free:
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = NULL;
+#endif
free_page((unsigned long)rxbuf);
init_port_free_txbuf:
kfree(priv);
@@ -1497,6 +1651,10 @@ static void pch_uart_pci_remove(struct pci_dev *pdev)
priv = (struct eg20t_port *)pci_get_drvdata(pdev);
pci_disable_msi(pdev);
+
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[priv->port.line] = NULL;
+#endif
pch_uart_exit_port(priv);
pci_disable_device(pdev);
kfree(priv);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5acd24a27d0..e9c2dfe471a 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -99,6 +99,9 @@ MODULE_LICENSE("GPL");
#define PMACZILOG_NAME "ttyPZ"
#endif
+#define pmz_debug(fmt, arg...) pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_error(fmt, arg...) pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_info(fmt, arg...) pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg)
/*
* For the sake of early serial console, we can do a pre-probe
@@ -106,7 +109,6 @@ MODULE_LICENSE("GPL");
*/
static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS];
static int pmz_ports_count;
-static DEFINE_MUTEX(pmz_irq_mutex);
static struct uart_driver pmz_uart_reg = {
.owner = THIS_MODULE,
@@ -126,9 +128,6 @@ static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs)
{
int i;
- if (ZS_IS_ASLEEP(uap))
- return;
-
/* Let pending transmits finish. */
for (i = 0; i < 1000; i++) {
unsigned char stat = read_zsreg(uap, R1);
@@ -216,32 +215,24 @@ static void pmz_maybe_update_regs(struct uart_pmac_port *uap)
}
}
+static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
+{
+ if (enable) {
+ uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[1] |= EXT_INT_ENAB;
+ } else {
+ uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ }
+ write_zsreg(uap, R1, uap->curregs[1]);
+}
+
static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_struct *tty = NULL;
unsigned char ch, r1, drop, error, flag;
int loops = 0;
- /* The interrupt can be enabled when the port isn't open, typically
- * that happens when using one port is open and the other closed (stale
- * interrupt) or when one port is used as a console.
- */
- if (!ZS_IS_OPEN(uap)) {
- pmz_debug("pmz: draining input\n");
- /* Port is closed, drain input data */
- for (;;) {
- if ((++loops) > 1000)
- goto flood;
- (void)read_zsreg(uap, R1);
- write_zsreg(uap, R0, ERR_RES);
- (void)read_zsdata(uap);
- ch = read_zsreg(uap, R0);
- if (!(ch & Rx_CH_AV))
- break;
- }
- return NULL;
- }
-
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
WARN_ON(1);
@@ -339,9 +330,7 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
return tty;
flood:
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return tty;
}
@@ -383,8 +372,6 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap)
{
struct circ_buf *xmit;
- if (ZS_IS_ASLEEP(uap))
- return;
if (ZS_IS_CONS(uap)) {
unsigned char status = read_zsreg(uap, R0);
@@ -481,6 +468,10 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
/* Channel A */
tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanA interrupt while open !\n");
+ goto skip_a;
+ }
write_zsreg(uap_a, R0, RES_H_IUS);
zssync(uap_a);
if (r3 & CHAEXT)
@@ -491,16 +482,21 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_a);
rc = IRQ_HANDLED;
}
+ skip_a:
spin_unlock(&uap_a->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
- if (uap_b->node == NULL)
+ if (!uap_b)
goto out;
spin_lock(&uap_b->port.lock);
tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanB interrupt while open !\n");
+ goto skip_b;
+ }
write_zsreg(uap_b, R0, RES_H_IUS);
zssync(uap_b);
if (r3 & CHBEXT)
@@ -511,14 +507,12 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_b);
rc = IRQ_HANDLED;
}
+ skip_b:
spin_unlock(&uap_b->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
out:
-#ifdef DEBUG_HARD
- pmz_debug("irq done.\n");
-#endif
return rc;
}
@@ -543,12 +537,8 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
*/
static unsigned int pmz_tx_empty(struct uart_port *port)
{
- struct uart_pmac_port *uap = to_pmz(port);
unsigned char status;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return TIOCSER_TEMT;
-
status = pmz_peek_status(to_pmz(port));
if (status & Tx_BUF_EMP)
return TIOCSER_TEMT;
@@ -570,8 +560,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (ZS_IS_IRDA(uap))
return;
/* We get called during boot with a port not up yet */
- if (ZS_IS_ASLEEP(uap) ||
- !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
+ if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
return;
set_bits = clear_bits = 0;
@@ -590,8 +579,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* NOTE: Not subject to 'transmitter active' rule. */
uap->curregs[R5] |= set_bits;
uap->curregs[R5] &= ~clear_bits;
- if (ZS_IS_ASLEEP(uap))
- return;
+
write_zsreg(uap, R5, uap->curregs[R5]);
pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n",
set_bits, clear_bits, uap->curregs[R5]);
@@ -609,9 +597,6 @@ static unsigned int pmz_get_mctrl(struct uart_port *port)
unsigned char status;
unsigned int ret;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return 0;
-
status = read_zsreg(uap, R0);
ret = 0;
@@ -649,9 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
status = read_zsreg(uap, R0);
/* TX busy? Just wait for the TX done interrupt. */
@@ -690,9 +672,6 @@ static void pmz_stop_rx(struct uart_port *port)
{
struct uart_pmac_port *uap = to_pmz(port);
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
pmz_debug("pmz: stop_rx()()\n");
/* Disable all RX interrupts. */
@@ -711,14 +690,12 @@ static void pmz_enable_ms(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned char new_reg;
- if (ZS_IS_IRDA(uap) || uap->node == NULL)
+ if (ZS_IS_IRDA(uap))
return;
new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
if (new_reg != uap->curregs[R15]) {
uap->curregs[R15] = new_reg;
- if (ZS_IS_ASLEEP(uap))
- return;
/* NOTE: Not subject to 'transmitter active' rule. */
write_zsreg(uap, R15, uap->curregs[R15]);
}
@@ -734,8 +711,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
- if (uap->node == NULL)
- return;
set_bits = clear_bits = 0;
if (break_state)
@@ -748,12 +723,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != uap->curregs[R5]) {
uap->curregs[R5] = new_reg;
-
- /* NOTE: Not subject to 'transmitter active' rule. */
- if (ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
write_zsreg(uap, R5, uap->curregs[R5]);
}
@@ -927,14 +896,21 @@ static int __pmz_startup(struct uart_pmac_port *uap)
static void pmz_irda_reset(struct uart_pmac_port *uap)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] |= DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(110);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(110);
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] &= ~DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(10);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(10);
}
/*
@@ -949,13 +925,6 @@ static int pmz_startup(struct uart_port *port)
pmz_debug("pmz: startup()\n");
- if (ZS_IS_ASLEEP(uap))
- return -EAGAIN;
- if (uap->node == NULL)
- return -ENODEV;
-
- mutex_lock(&pmz_irq_mutex);
-
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
/* A console is never powered down. Else, power up and
@@ -966,18 +935,14 @@ static int pmz_startup(struct uart_port *port)
pwr_delay = __pmz_startup(uap);
spin_unlock_irqrestore(&port->lock, flags);
}
-
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+ sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
- "SCC", uap)) {
+ uap->irq_name, uap)) {
pmz_error("Unable to register zs interrupt handler.\n");
pmz_set_scc_power(uap, 0);
- mutex_unlock(&pmz_irq_mutex);
return -ENXIO;
}
- mutex_unlock(&pmz_irq_mutex);
-
/* Right now, we deal with delay by blocking here, I'll be
* smarter later on
*/
@@ -990,12 +955,9 @@ static int pmz_startup(struct uart_port *port)
if (ZS_IS_IRDA(uap))
pmz_irda_reset(uap);
- /* Enable interrupts emission from the chip */
+ /* Enable interrupt requests for the channel */
spin_lock_irqsave(&port->lock, flags);
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 1);
spin_unlock_irqrestore(&port->lock, flags);
pmz_debug("pmz: startup() done.\n");
@@ -1010,49 +972,35 @@ static void pmz_shutdown(struct uart_port *port)
pmz_debug("pmz: shutdown()\n");
- if (uap->node == NULL)
- return;
-
- mutex_lock(&pmz_irq_mutex);
-
- /* Release interrupt handler */
- free_irq(uap->port.irq, uap);
-
spin_lock_irqsave(&port->lock, flags);
- uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
+ /* Disable interrupt requests for the channel */
+ pmz_interrupt_control(uap, 0);
- if (!ZS_IS_OPEN(uap->mate))
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
+ if (!ZS_IS_CONS(uap)) {
+ /* Disable receiver and transmitter */
+ uap->curregs[R3] &= ~RxENABLE;
+ uap->curregs[R5] &= ~TxENABLE;
- /* Disable interrupts */
- if (!ZS_IS_ASLEEP(uap)) {
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ /* Disable break assertion */
+ uap->curregs[R5] &= ~SND_BRK;
+ pmz_maybe_update_regs(uap);
}
- if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
- return;
- }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Release interrupt handler */
+ free_irq(uap->port.irq, uap);
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
+ spin_lock_irqsave(&port->lock, flags);
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R5] &= ~SND_BRK;
- pmz_maybe_update_regs(uap);
+ uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
+ if (!ZS_IS_CONS(uap))
+ pmz_set_scc_power(uap, 0); /* Shut the chip down */
spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
-
pmz_debug("pmz: shutdown() done.\n");
}
@@ -1300,9 +1248,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
pmz_debug("pmz: set_termios()\n");
- if (ZS_IS_ASLEEP(uap))
- return;
-
memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
@@ -1352,19 +1297,15 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
spin_lock_irqsave(&port->lock, flags);
/* Disable IRQs on the port */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 0);
/* Setup new port configuration */
__pmz_set_termios(port, termios, old);
/* Re-enable IRQs on the port */
- if (ZS_IS_OPEN(uap)) {
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
+ if (ZS_IS_OPEN(uap))
+ pmz_interrupt_control(uap, 1);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1604,25 +1545,34 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
*/
static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
+ struct uart_pmac_port *uap;
int i;
/* Iterate the pmz_ports array to find a matching entry
*/
for (i = 0; i < MAX_ZS_PORTS; i++)
- if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
- struct uart_pmac_port *uap = &pmz_ports[i];
-
- uap->dev = mdev;
- dev_set_drvdata(&mdev->ofdev.dev, uap);
- if (macio_request_resources(uap->dev, "pmac_zilog"))
- printk(KERN_WARNING "%s: Failed to request resource"
- ", port still active\n",
- uap->node->name);
- else
- uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
- return 0;
- }
- return -ENODEV;
+ if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
+ break;
+ if (i >= MAX_ZS_PORTS)
+ return -ENODEV;
+
+
+ uap = &pmz_ports[i];
+ uap->dev = mdev;
+ uap->port.dev = &mdev->ofdev.dev;
+ dev_set_drvdata(&mdev->ofdev.dev, uap);
+
+ /* We still activate the port even when failing to request resources
+ * to work around bugs in ancient Apple device-trees
+ */
+ if (macio_request_resources(uap->dev, "pmac_zilog"))
+ printk(KERN_WARNING "%s: Failed to request resource"
+ ", port still active\n",
+ uap->node->name);
+ else
+ uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
/*
@@ -1636,12 +1586,15 @@ static int pmz_detach(struct macio_dev *mdev)
if (!uap)
return -ENODEV;
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) {
macio_release_resources(uap->dev);
uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED;
}
dev_set_drvdata(&mdev->ofdev.dev, NULL);
uap->dev = NULL;
+ uap->port.dev = NULL;
return 0;
}
@@ -1650,59 +1603,13 @@ static int pmz_detach(struct macio_dev *mdev)
static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
if (uap == NULL) {
printk("HRM... pmz_suspend with NULL uap\n");
return 0;
}
- if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
- return 0;
-
- pmz_debug("suspend, switching to state %d\n", pm_state.event);
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) {
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
-
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- uap->curregs[R5] &= ~SND_BRK;
- pmz_load_zsregs(uap, uap->curregs);
- uap->flags |= PMACZILOG_FLAG_IS_ASLEEP;
- mb();
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate))
- if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
- disable_irq(uap->port.irq);
- }
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags &= ~CON_ENABLED;
-
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
-
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- pmz_debug("suspend, switching complete\n");
-
- mdev->ofdev.dev.power.power_state = pm_state;
+ uart_suspend_port(&pmz_uart_reg, &uap->port);
return 0;
}
@@ -1711,76 +1618,20 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
static int pmz_resume(struct macio_dev *mdev)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
- int pwr_delay = 0;
if (uap == NULL)
return 0;
- if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
- return 0;
-
- pmz_debug("resume, switching to state 0\n");
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
- if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) {
- spin_unlock_irqrestore(&uap->port.lock, flags);
- goto bail;
- }
- pwr_delay = __pmz_startup(uap);
-
- /* Take care of config that may have changed while asleep */
- __pmz_set_termios(&uap->port, &uap->termios_cache, NULL);
-
- if (ZS_IS_OPEN(uap)) {
- /* Enable interrupts */
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags |= CON_ENABLED;
-
- /* Re-enable IRQ on the controller */
- if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
- enable_irq(uap->port.irq);
- }
-
- bail:
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- /* Right now, we deal with delay by blocking here, I'll be
- * smarter later on
- */
- if (pwr_delay != 0) {
- pmz_debug("pmz: delaying %d ms\n", pwr_delay);
- msleep(pwr_delay);
- }
-
- pmz_debug("resume, switching complete\n");
-
- mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
+ uart_resume_port(&pmz_uart_reg, &uap->port);
return 0;
}
/*
* Probe all ports in the system and build the ports array, we register
- * with the serial layer at this point, the macio-type probing is only
- * used later to "attach" to the sysfs tree so we get power management
- * events
+ * with the serial layer later, so we get a proper struct device which
+ * allows the tty to attach properly. This is later than it used to be
+ * but the tty layer really wants it that way.
*/
static int __init pmz_probe(void)
{
@@ -1816,8 +1667,10 @@ static int __init pmz_probe(void)
/*
* Fill basic fields in the port structures
*/
- pmz_ports[count].mate = &pmz_ports[count+1];
- pmz_ports[count+1].mate = &pmz_ports[count];
+ if (node_b != NULL) {
+ pmz_ports[count].mate = &pmz_ports[count+1];
+ pmz_ports[count+1].mate = &pmz_ports[count];
+ }
pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
pmz_ports[count].node = node_a;
pmz_ports[count+1].node = node_b;
@@ -1855,8 +1708,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
struct resource *r_ports;
int irq;
- r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0);
- irq = platform_get_irq(uap->node, 0);
+ r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(uap->pdev, 0);
if (!r_ports || !irq)
return -ENODEV;
@@ -1885,19 +1738,19 @@ static int __init pmz_probe(void)
pmz_ports_count = 0;
- pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[0].port.line = 0;
pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
- pmz_ports[0].node = &scc_a_pdev;
+ pmz_ports[0].pdev = &scc_a_pdev;
err = pmz_init_port(&pmz_ports[0]);
if (err)
return err;
pmz_ports_count++;
+ pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[1].mate = &pmz_ports[0];
pmz_ports[1].port.line = 1;
pmz_ports[1].flags = 0;
- pmz_ports[1].node = &scc_b_pdev;
+ pmz_ports[1].pdev = &scc_b_pdev;
err = pmz_init_port(&pmz_ports[1]);
if (err)
return err;
@@ -1913,16 +1766,35 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
static int __init pmz_attach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap;
int i;
+ /* Iterate the pmz_ports array to find a matching entry */
for (i = 0; i < pmz_ports_count; i++)
- if (pmz_ports[i].node == pdev)
- return 0;
- return -ENODEV;
+ if (pmz_ports[i].pdev == pdev)
+ break;
+ if (i >= pmz_ports_count)
+ return -ENODEV;
+
+ uap = &pmz_ports[i];
+ uap->port.dev = &pdev->dev;
+ platform_set_drvdata(pdev, uap);
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
static int __exit pmz_detach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap = platform_get_drvdata(pdev);
+
+ if (!uap)
+ return -ENODEV;
+
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
+ platform_set_drvdata(pdev, NULL);
+ uap->port.dev = NULL;
+
return 0;
}
@@ -1954,38 +1826,13 @@ static struct console pmz_console = {
*/
static int __init pmz_register(void)
{
- int i, rc;
-
pmz_uart_reg.nr = pmz_ports_count;
pmz_uart_reg.cons = PMACZILOG_CONSOLE;
/*
* Register this driver with the serial core
*/
- rc = uart_register_driver(&pmz_uart_reg);
- if (rc)
- return rc;
-
- /*
- * Register each port with the serial core
- */
- for (i = 0; i < pmz_ports_count; i++) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- /* NULL node may happen on wallstreet */
- if (uport->node != NULL)
- rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
- if (rc)
- goto err_out;
- }
-
- return 0;
-err_out:
- while (i-- > 0) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
- }
- uart_unregister_driver(&pmz_uart_reg);
- return rc;
+ return uart_register_driver(&pmz_uart_reg);
}
#ifdef CONFIG_PPC_PMAC
@@ -2084,10 +1931,13 @@ static void __exit exit_pmz(void)
for (i = 0; i < pmz_ports_count; i++) {
struct uart_pmac_port *uport = &pmz_ports[i];
- if (uport->node != NULL) {
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
+#ifdef CONFIG_PPC_PMAC
+ if (uport->node != NULL)
pmz_dispose_port(uport);
- }
+#else
+ if (uport->pdev != NULL)
+ pmz_dispose_port(uport);
+#endif
}
/* Unregister UART driver */
uart_unregister_driver(&pmz_uart_reg);
@@ -2114,8 +1964,6 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
struct uart_pmac_port *uap = &pmz_ports[con->index];
unsigned long flags;
- if (ZS_IS_ASLEEP(uap))
- return;
spin_lock_irqsave(&uap->port.lock, flags);
/* Turn of interrupts and enable the transmitter. */
@@ -2160,8 +2008,13 @@ static int __init pmz_console_setup(struct console *co, char *options)
if (co->index >= pmz_ports_count)
co->index = 0;
uap = &pmz_ports[co->index];
+#ifdef CONFIG_PPC_PMAC
if (uap->node == NULL)
return -ENODEV;
+#else
+ if (uap->pdev == NULL)
+ return -ENODEV;
+#endif
port = &uap->port;
/*
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fbb1b2..3483242ee3e 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -1,16 +1,6 @@
#ifndef __PMAC_ZILOG_H__
#define __PMAC_ZILOG_H__
-#ifdef CONFIG_PPC_PMAC
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->dev->ofdev.dev, fmt, ## arg)
-#else
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->node->dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->node->dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->node->dev, fmt, ## arg)
-#endif
-
/*
* At most 2 ESCCs with 2 ports each
*/
@@ -35,7 +25,7 @@ struct uart_pmac_port {
*/
struct device_node *node;
#else
- struct platform_device *node;
+ struct platform_device *pdev;
#endif
/* Port type as obtained from device tree (IRDA, modem, ...) */
@@ -50,14 +40,11 @@ struct uart_pmac_port {
#define PMACZILOG_FLAG_REGS_HELD 0x00000010
#define PMACZILOG_FLAG_TX_STOPPED 0x00000020
#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
-#define PMACZILOG_FLAG_ENABLED 0x00000080
#define PMACZILOG_FLAG_IS_IRDA 0x00000100
#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
#define PMACZILOG_FLAG_HAS_DMA 0x00000400
#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
-#define PMACZILOG_FLAG_IS_ASLEEP 0x00001000
#define PMACZILOG_FLAG_IS_OPEN 0x00002000
-#define PMACZILOG_FLAG_IS_IRQ_ON 0x00004000
#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
#define PMACZILOG_FLAG_BREAK 0x00010000
@@ -74,6 +61,8 @@ struct uart_pmac_port {
volatile struct dbdma_regs __iomem *rx_dma_regs;
#endif
+ unsigned char irq_name[8];
+
struct ktermios termios_cache;
};
@@ -388,9 +377,7 @@ static inline void zssync(struct uart_pmac_port *port)
#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
-#define ZS_IS_ASLEEP(UP) ((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP)
#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
-#define ZS_IS_IRQ_ON(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON)
#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
#endif /* __PMAC_ZILOG_H__ */
diff --git a/drivers/tty/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
deleted file mode 100644
index b1d7e7c1849..00000000000
--- a/drivers/tty/serial/s3c2410.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Driver for Samsung S3C2410 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2410_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2410_UCON_UCLK;
- else
- ucon &= ~S3C2410_UCON_UCLK;
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-static int s3c2410_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
- clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
-
- return 0;
-}
-
-static int s3c2410_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- wr_regl(port, S3C2410_UCON, cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2410_uart_inf = {
- .name = "Samsung S3C2410 UART",
- .type = PORT_S3C2410,
- .fifosize = 16,
- .rx_fifomask = S3C2410_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2410_UFSTAT_RXFULL,
- .tx_fifofull = S3C2410_UFSTAT_TXFULL,
- .tx_fifomask = S3C2410_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2410_serial_getsource,
- .set_clksrc = s3c2410_serial_setsource,
- .reset_port = s3c2410_serial_resetport,
-};
-
-static int s3c2410_serial_probe(struct platform_device *dev)
-{
- return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
-}
-
-static struct platform_driver s3c2410_serial_driver = {
- .probe = s3c2410_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2410-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2410_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
-}
-
-static void __exit s3c2410_serial_exit(void)
-{
- platform_driver_unregister(&s3c2410_serial_driver);
-}
-
-module_init(s3c2410_serial_init);
-module_exit(s3c2410_serial_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver");
-MODULE_ALIAS("platform:s3c2410-uart");
diff --git a/drivers/tty/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
deleted file mode 100644
index 2234bf9ced4..00000000000
--- a/drivers/tty/serial/s3c2412.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C2412 and S3C2413 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2412_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= ~S3C2412_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "usysclk") == 0)
- ucon |= S3C2412_UCON_USYSCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2412_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- switch (ucon & S3C2412_UCON_CLKMASK) {
- case S3C2412_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2412_UCON_PCLK:
- case S3C2412_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2412_UCON_USYSCLK:
- clk->divisor = 1;
- clk->name = "usysclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2412_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("%s: port=%p (%08lx), cfg=%p\n",
- __func__, port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C2412_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2412_uart_inf = {
- .name = "Samsung S3C2412 UART",
- .type = PORT_S3C2412,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2412_serial_getsource,
- .set_clksrc = s3c2412_serial_setsource,
- .reset_port = s3c2412_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2412_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
-}
-
-static struct platform_driver s3c2412_serial_driver = {
- .probe = s3c2412_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2412-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static inline int s3c2412_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
-}
-
-static inline void s3c2412_serial_exit(void)
-{
- platform_driver_unregister(&s3c2412_serial_driver);
-}
-
-module_init(s3c2412_serial_init);
-module_exit(s3c2412_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2412,S3C2413 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2412-uart");
diff --git a/drivers/tty/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
deleted file mode 100644
index 1d0c324b813..00000000000
--- a/drivers/tty/serial/s3c2440.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Driver for Samsung S3C2440 and S3C2442 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-
-static int s3c2440_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- /* todo - proper fclk<>nonfclk switch. */
-
- ucon &= ~S3C2440_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "fclk") == 0)
- ucon |= S3C2440_UCON_FCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2440_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
- unsigned long ucon0, ucon1, ucon2;
-
- switch (ucon & S3C2440_UCON_CLKMASK) {
- case S3C2440_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2440_UCON_PCLK:
- case S3C2440_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2440_UCON_FCLK:
- /* the fun of calculating the uart divisors on
- * the s3c2440 */
-
- ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
- ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
- ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
-
- printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
-
- ucon0 &= S3C2440_UCON0_DIVMASK;
- ucon1 &= S3C2440_UCON1_DIVMASK;
- ucon2 &= S3C2440_UCON2_DIVMASK;
-
- if (ucon0 != 0) {
- clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 6;
- } else if (ucon1 != 0) {
- clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 21;
- } else if (ucon2 != 0) {
- clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 36;
- } else {
- /* manual calims 44, seems to be 9 */
- clk->divisor = 9;
- }
-
- clk->name = "fclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2440_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2440_uart_inf = {
- .name = "Samsung S3C2440 UART",
- .type = PORT_S3C2440,
- .fifosize = 64,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2440_serial_getsource,
- .set_clksrc = s3c2440_serial_setsource,
- .reset_port = s3c2440_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2440_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
-}
-
-static struct platform_driver s3c2440_serial_driver = {
- .probe = s3c2440_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2440-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2440_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
-}
-
-static void __exit s3c2440_serial_exit(void)
-{
- platform_driver_unregister(&s3c2440_serial_driver);
-}
-
-module_init(s3c2440_serial_init);
-module_exit(s3c2440_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/tty/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
deleted file mode 100644
index e2f6913d84d..00000000000
--- a/drivers/tty/serial/s3c6400.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C6400 and S3C6410 SoC onboard UARTs.
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-
-#include "samsung.h"
-
-static int s3c6400_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk0") == 0) {
- ucon &= ~S3C6400_UCON_CLKMASK;
- ucon |= S3C6400_UCON_UCLK0;
- } else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S3C6400_UCON_UCLK1;
- else if (strcmp(clk->name, "pclk") == 0) {
- /* See notes about transitioning from UCLK to PCLK */
- ucon &= ~S3C6400_UCON_UCLK0;
- } else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c6400_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- switch (ucon & S3C6400_UCON_CLKMASK) {
- case S3C6400_UCON_UCLK0:
- clk->name = "uclk0";
- break;
-
- case S3C6400_UCON_UCLK1:
- clk->name = "uclk1";
- break;
-
- case S3C6400_UCON_PCLK:
- case S3C6400_UCON_PCLK2:
- clk->name = "pclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c6400_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c6400_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C6400_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c6400_uart_inf = {
- .name = "Samsung S3C6400 UART",
- .type = PORT_S3C6400,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c6400_serial_getsource,
- .set_clksrc = s3c6400_serial_setsource,
- .reset_port = s3c6400_serial_resetport,
-};
-
-/* device management */
-
-static int s3c6400_serial_probe(struct platform_device *dev)
-{
- dbg("s3c6400_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
-}
-
-static struct platform_driver s3c6400_serial_driver = {
- .probe = s3c6400_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c6400-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c6400_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
-}
-
-static void __exit s3c6400_serial_exit(void)
-{
- platform_driver_unregister(&s3c6400_serial_driver);
-}
-
-module_init(s3c6400_serial_init);
-module_exit(s3c6400_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C6400,S3C6410 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c6400-uart");
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
deleted file mode 100644
index 8b0b888a1b7..00000000000
--- a/drivers/tty/serial/s5pv210.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on drivers/serial/s3c6400.c
- *
- * Driver for Samsung S5PV210 SoC UARTs.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/delay.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <plat/regs-serial.h>
-#include "samsung.h"
-
-static int s5pv210_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- if (strcmp(clk->name, "pclk") == 0)
- ucon &= ~S5PV210_UCON_CLKMASK;
- else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S5PV210_UCON_CLKMASK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s5pv210_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- switch (ucon & S5PV210_UCON_CLKMASK) {
- case S5PV210_UCON_PCLK:
- clk->name = "pclk";
- break;
- case S5PV210_UCON_UCLK:
- clk->name = "uclk1";
- break;
- }
-
- return 0;
-}
-
-static int s5pv210_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= S5PV210_UCON_CLKMASK;
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- /* It is need to delay When reset FIFO register */
- udelay(1);
-
- return 0;
-}
-
-#define S5PV210_UART_DEFAULT_INFO(fifo_size) \
- .name = "Samsung S5PV210 UART0", \
- .type = PORT_S3C6400, \
- .fifosize = fifo_size, \
- .has_divslot = 1, \
- .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
- .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
- .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
- .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
- .get_clksrc = s5pv210_serial_getsource, \
- .set_clksrc = s5pv210_serial_setsource, \
- .reset_port = s5pv210_serial_resetport
-
-static struct s3c24xx_uart_info s5p_port_fifo256 = {
- S5PV210_UART_DEFAULT_INFO(256),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo64 = {
- S5PV210_UART_DEFAULT_INFO(64),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo16 = {
- S5PV210_UART_DEFAULT_INFO(16),
-};
-
-static struct s3c24xx_uart_info *s5p_uart_inf[] = {
- [0] = &s5p_port_fifo256,
- [1] = &s5p_port_fifo64,
- [2] = &s5p_port_fifo16,
- [3] = &s5p_port_fifo16,
-};
-
-/* device management */
-static int s5p_serial_probe(struct platform_device *pdev)
-{
- return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
-}
-
-static struct platform_driver s5p_serial_driver = {
- .probe = s5p_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s5pv210-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s5p_serial_init(void)
-{
- return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
-}
-
-static void __exit s5p_serial_exit(void)
-{
- platform_driver_unregister(&s5p_serial_driver);
-}
-
-module_init(s5p_serial_init);
-module_exit(s5p_serial_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s5pv210-uart");
-MODULE_DESCRIPTION("Samsung S5PV210 UART Driver support");
-MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b31f1c3a2c4..f96f37b5fec 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
#include <asm/irq.h>
@@ -49,6 +50,7 @@
#include <mach/map.h>
#include <plat/regs-serial.h>
+#include <plat/clock.h>
#include "samsung.h"
@@ -190,10 +192,13 @@ static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *p
static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport;
+
if (port->dev == NULL)
return NULL;
- return (struct s3c2410_uartcfg *)port->dev->platform_data;
+ ourport = container_of(port, struct s3c24xx_uart_port, port);
+ return ourport->cfg;
}
static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
@@ -202,7 +207,7 @@ static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
if (ufstat & info->rx_fifofull)
- return info->fifosize;
+ return ourport->port.fifosize;
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
@@ -555,154 +560,98 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
*
*/
+#define MAX_CLK_NAME_LENGTH 15
-#define MAX_CLKS (8)
-
-static struct s3c24xx_uart_clksrc tmp_clksrc = {
- .name = "pclk",
- .min_baud = 0,
- .max_baud = 0,
- .divisor = 1,
-};
-
-static inline int
-s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
+static inline int s3c24xx_serial_getsource(struct uart_port *port)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- return (info->get_clksrc)(port, c);
-}
-
-static inline int
-s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
-{
- struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ if (info->num_clks == 1)
+ return 0;
- return (info->set_clksrc)(port, c);
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= info->clksel_mask;
+ return ucon >> info->clksel_shift;
}
-struct baud_calc {
- struct s3c24xx_uart_clksrc *clksrc;
- unsigned int calc;
- unsigned int divslot;
- unsigned int quot;
- struct clk *src;
-};
-
-static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
- struct uart_port *port,
- struct s3c24xx_uart_clksrc *clksrc,
- unsigned int baud)
+static void s3c24xx_serial_setsource(struct uart_port *port,
+ unsigned int clk_sel)
{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned long rate;
-
- calc->src = clk_get(port->dev, clksrc->name);
- if (calc->src == NULL || IS_ERR(calc->src))
- return 0;
-
- rate = clk_get_rate(calc->src);
- rate /= clksrc->divisor;
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- calc->clksrc = clksrc;
+ if (info->num_clks == 1)
+ return;
- if (ourport->info->has_divslot) {
- unsigned long div = rate / baud;
-
- /* The UDIVSLOT register on the newer UARTs allows us to
- * get a divisor adjustment of 1/16th on the baud clock.
- *
- * We don't keep the UDIVSLOT value (the 16ths we calculated
- * by not multiplying the baud by 16) as it is easy enough
- * to recalculate.
- */
-
- calc->quot = div / 16;
- calc->calc = rate / div;
- } else {
- calc->quot = (rate + (8 * baud)) / (16 * baud);
- calc->calc = (rate / (calc->quot * 16));
- }
+ ucon = rd_regl(port, S3C2410_UCON);
+ if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel)
+ return;
- calc->quot--;
- return 1;
+ ucon &= ~info->clksel_mask;
+ ucon |= clk_sel << info->clksel_shift;
+ wr_regl(port, S3C2410_UCON, ucon);
}
-static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
- struct s3c24xx_uart_clksrc **clksrc,
- struct clk **clk,
- unsigned int baud)
+static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ unsigned int req_baud, struct clk **best_clk,
+ unsigned int *clk_num)
{
- struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
- struct s3c24xx_uart_clksrc *clkp;
- struct baud_calc res[MAX_CLKS];
- struct baud_calc *resptr, *best, *sptr;
- int i;
-
- clkp = cfg->clocks;
- best = NULL;
-
- if (cfg->clocks_size < 2) {
- if (cfg->clocks_size == 0)
- clkp = &tmp_clksrc;
-
- /* check to see if we're sourcing fclk, and if so we're
- * going to have to update the clock source
- */
-
- if (strcmp(clkp->name, "fclk") == 0) {
- struct s3c24xx_uart_clksrc src;
-
- s3c24xx_serial_getsource(port, &src);
-
- /* check that the port already using fclk, and if
- * not, then re-select fclk
+ struct s3c24xx_uart_info *info = ourport->info;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ char clkname[MAX_CLK_NAME_LENGTH];
+ int calc_deviation, deviation = (1 << 30) - 1;
+
+ *best_clk = NULL;
+ clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
+ ourport->info->def_clk_sel;
+ for (cnt = 0; cnt < info->num_clks; cnt++) {
+ if (!(clk_sel & (1 << cnt)))
+ continue;
+
+ sprintf(clkname, "clk_uart_baud%d", cnt);
+ clk = clk_get(ourport->port.dev, clkname);
+ if (IS_ERR_OR_NULL(clk))
+ continue;
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ continue;
+
+ if (ourport->info->has_divslot) {
+ unsigned long div = rate / req_baud;
+
+ /* The UDIVSLOT register on the newer UARTs allows us to
+ * get a divisor adjustment of 1/16th on the baud clock.
+ *
+ * We don't keep the UDIVSLOT value (the 16ths we
+ * calculated by not multiplying the baud by 16) as it
+ * is easy enough to recalculate.
*/
- if (strcmp(src.name, clkp->name) == 0) {
- s3c24xx_serial_setsource(port, clkp);
- s3c24xx_serial_getsource(port, &src);
- }
-
- clkp->divisor = src.divisor;
- }
-
- s3c24xx_serial_calcbaud(res, port, clkp, baud);
- best = res;
- resptr = best + 1;
- } else {
- resptr = res;
-
- for (i = 0; i < cfg->clocks_size; i++, clkp++) {
- if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
- resptr++;
+ quot = div / 16;
+ baud = rate / div;
+ } else {
+ quot = (rate + (8 * req_baud)) / (16 * req_baud);
+ baud = rate / (quot * 16);
}
- }
-
- /* ok, we now need to select the best clock we found */
-
- if (!best) {
- unsigned int deviation = (1<<30)|((1<<30)-1);
- int calc_deviation;
+ quot--;
- for (sptr = res; sptr < resptr; sptr++) {
- calc_deviation = baud - sptr->calc;
- if (calc_deviation < 0)
- calc_deviation = -calc_deviation;
+ calc_deviation = req_baud - baud;
+ if (calc_deviation < 0)
+ calc_deviation = -calc_deviation;
- if (calc_deviation < deviation) {
- best = sptr;
- deviation = calc_deviation;
- }
+ if (calc_deviation < deviation) {
+ *best_clk = clk;
+ best_quot = quot;
+ *clk_num = cnt;
+ deviation = calc_deviation;
}
}
- /* store results to pass back */
-
- *clksrc = best->clksrc;
- *clk = best->src;
-
- return best->quot;
+ return best_quot;
}
/* udivslot_table[]
@@ -735,10 +684,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_clksrc *clksrc = NULL;
struct clk *clk = NULL;
unsigned long flags;
- unsigned int baud, quot;
+ unsigned int baud, quot, clk_sel = 0;
unsigned int ulcon;
unsigned int umcon;
unsigned int udivslot = 0;
@@ -754,17 +702,16 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
*/
baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
-
+ quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel);
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
- else
- quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
+ if (!clk)
+ return;
/* check to see if we need to change clock source */
- if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
- dbg("selecting clock %p\n", clk);
- s3c24xx_serial_setsource(port, clksrc);
+ if (ourport->baudclk != clk) {
+ s3c24xx_serial_setsource(port, clk_sel);
if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
clk_disable(ourport->baudclk);
@@ -773,7 +720,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
clk_enable(clk);
- ourport->clksrc = clksrc;
ourport->baudclk = clk;
ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
}
@@ -1020,16 +966,29 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
/* s3c24xx_serial_resetport
*
- * wrapper to call the specific reset for this port (reset the fifos
- * and the settings)
+ * reset the fifos and other the settings.
*/
-static inline int s3c24xx_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
+static void s3c24xx_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ unsigned int ucon_mask;
+
+ ucon_mask = info->clksel_mask;
+ if (info->type == PORT_S3C2440)
+ ucon_mask |= S3C2440_UCON0_DIVMASK;
- return (info->reset_port)(port, cfg);
+ ucon &= ucon_mask;
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+
+ /* reset both fifos */
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ /* some delay is required after fifo reset */
+ udelay(1);
}
@@ -1121,11 +1080,10 @@ static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *p
*/
static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
- struct s3c24xx_uart_info *info,
struct platform_device *platdev)
{
struct uart_port *port = &ourport->port;
- struct s3c2410_uartcfg *cfg;
+ struct s3c2410_uartcfg *cfg = ourport->cfg;
struct resource *res;
int ret;
@@ -1134,30 +1092,16 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
if (platdev == NULL)
return -ENODEV;
- cfg = s3c24xx_dev_to_cfg(&platdev->dev);
-
if (port->mapbase != 0)
return 0;
- if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
- printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
- cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
- return -ERANGE;
- }
-
/* setup info for port */
port->dev = &platdev->dev;
- ourport->info = info;
/* Startup sequence is different for s3c64xx and higher SoC's */
if (s3c24xx_serial_has_interrupt_mask(port))
s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
- /* copy the info in from provided structure */
- ourport->port.fifosize = info->fifosize;
-
- dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
-
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
@@ -1215,43 +1159,74 @@ static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
+ return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->baudclk->name);
}
static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
+
/* Device driver serial port probe */
+static const struct of_device_id s3c24xx_uart_dt_match[];
static int probe_index;
-int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *info)
+static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
+ return (struct s3c24xx_serial_drv_data *)match->data;
+ }
+#endif
+ return (struct s3c24xx_serial_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_serial_probe(struct platform_device *pdev)
{
struct s3c24xx_uart_port *ourport;
int ret;
- dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
+ dbg("s3c24xx_serial_probe(%p) %d\n", pdev, probe_index);
ourport = &s3c24xx_serial_ports[probe_index];
+
+ ourport->drv_data = s3c24xx_get_driver_data(pdev);
+ if (!ourport->drv_data) {
+ dev_err(&pdev->dev, "could not find driver data\n");
+ return -ENODEV;
+ }
+
+ ourport->info = ourport->drv_data->info;
+ ourport->cfg = (pdev->dev.platform_data) ?
+ (struct s3c2410_uartcfg *)pdev->dev.platform_data :
+ ourport->drv_data->def_cfg;
+
+ ourport->port.fifosize = (ourport->info->fifosize) ?
+ ourport->info->fifosize :
+ ourport->drv_data->fifosize[probe_index];
+
probe_index++;
dbg("%s: initialising port %p...\n", __func__, ourport);
- ret = s3c24xx_serial_init_port(ourport, info, dev);
+ ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
goto probe_err;
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
- platform_set_drvdata(dev, &ourport->port);
+ platform_set_drvdata(pdev, &ourport->port);
- ret = device_create_file(&dev->dev, &dev_attr_clock_source);
+ ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
if (ret < 0)
- printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
+ dev_err(&pdev->dev, "failed to add clock source attr.\n");
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
- dev_err(&dev->dev, "failed to add cpufreq notifier\n");
+ dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
return 0;
@@ -1259,9 +1234,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
return ret;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-
-int __devexit s3c24xx_serial_remove(struct platform_device *dev)
+static int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
@@ -1274,8 +1247,6 @@ int __devexit s3c24xx_serial_remove(struct platform_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
-
/* UART power management code */
#ifdef CONFIG_PM_SLEEP
static int s3c24xx_serial_suspend(struct device *dev)
@@ -1315,41 +1286,6 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
#define SERIAL_SAMSUNG_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
-int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info)
-{
- dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
-
- drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
-
- return platform_driver_register(drv);
-}
-
-EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
-
-/* module initialisation code */
-
-static int __init s3c24xx_serial_modinit(void)
-{
- int ret;
-
- ret = uart_register_driver(&s3c24xx_uart_drv);
- if (ret < 0) {
- printk(KERN_ERR "failed to register UART driver\n");
- return -1;
- }
-
- return 0;
-}
-
-static void __exit s3c24xx_serial_modexit(void)
-{
- uart_unregister_driver(&s3c24xx_uart_drv);
-}
-
-module_init(s3c24xx_serial_modinit);
-module_exit(s3c24xx_serial_modexit);
-
/* Console code */
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
@@ -1395,12 +1331,13 @@ static void __init
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
- struct s3c24xx_uart_clksrc clksrc;
struct clk *clk;
unsigned int ulcon;
unsigned int ucon;
unsigned int ubrdiv;
unsigned long rate;
+ unsigned int clk_sel;
+ char clk_name[MAX_CLK_NAME_LENGTH];
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
@@ -1445,44 +1382,21 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
/* now calculate the baud rate */
- s3c24xx_serial_getsource(port, &clksrc);
+ clk_sel = s3c24xx_serial_getsource(port);
+ sprintf(clk_name, "clk_uart_baud%d", clk_sel);
- clk = clk_get(port->dev, clksrc.name);
+ clk = clk_get(port->dev, clk_name);
if (!IS_ERR(clk) && clk != NULL)
- rate = clk_get_rate(clk) / clksrc.divisor;
+ rate = clk_get_rate(clk);
else
rate = 1;
-
*baud = rate / (16 * (ubrdiv + 1));
dbg("calculated baud %d\n", *baud);
}
}
-/* s3c24xx_serial_init_ports
- *
- * initialise the serial ports from the machine provided initialisation
- * data.
-*/
-
-static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
-{
- struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
- struct platform_device **platdev_ptr;
- int i;
-
- dbg("s3c24xx_serial_init_ports: initialising ports...\n");
-
- platdev_ptr = s3c24xx_uart_devs;
-
- for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
- s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
- }
-
- return 0;
-}
-
static int __init
s3c24xx_serial_console_setup(struct console *co, char *options)
{
@@ -1526,11 +1440,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-/* s3c24xx_serial_initconsole
- *
- * initialise the console from one of the uart drivers
-*/
-
static struct console s3c24xx_serial_console = {
.name = S3C24XX_SERIAL_NAME,
.device = uart_console_device,
@@ -1540,34 +1449,250 @@ static struct console s3c24xx_serial_console = {
.setup = s3c24xx_serial_console_setup,
.data = &s3c24xx_uart_drv,
};
+#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
-int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **info)
+#ifdef CONFIG_CPU_S3C2410
+static struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2410 UART",
+ .type = PORT_S3C2410,
+ .fifosize = 16,
+ .rx_fifomask = S3C2410_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2410_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2410_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2410_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S3C2410_UCON_CLKMASK,
+ .clksel_shift = S3C2410_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2410_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2410_serial_drv_data)
+#else
+#define S3C2410_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
-{
- struct platform_device *dev = s3c24xx_uart_devs[0];
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2412 UART",
+ .type = PORT_S3C2412,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2412_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2412_serial_drv_data)
+#else
+#define S3C2412_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- dbg("s3c24xx_serial_initconsole\n");
+#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \
+ defined(CONFIG_CPU_S3C2443)
+static struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2440 UART",
+ .type = PORT_S3C2440,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2440_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2440_serial_drv_data)
+#else
+#define S3C2440_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- /* select driver based on the cpu */
+#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410) || \
+ defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) || \
+ defined(CONFIG_CPU_S5PC100)
+static struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C6400 UART",
+ .type = PORT_S3C6400,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C6400_UCON_CLKMASK,
+ .clksel_shift = S3C6400_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C6400_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c6400_serial_drv_data)
+#else
+#define S3C6400_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (dev == NULL) {
- printk(KERN_ERR "s3c24xx: no devices for console init\n");
- return 0;
- }
+#ifdef CONFIG_CPU_S5PV210
+static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S5PV210 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S5PV210_UCON_CLKMASK,
+ .clksel_shift = S5PV210_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define S5PV210_SERIAL_DRV_DATA ((kernel_ulong_t)&s5pv210_serial_drv_data)
+#else
+#define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (strcmp(dev->name, drv->driver.name) != 0)
- return 0;
+#ifdef CONFIG_CPU_EXYNOS4210
+static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung Exynos4 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 1,
+ .clksel_mask = 0,
+ .clksel_shift = 0,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ .has_fracval = 1,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#else
+#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- s3c24xx_serial_console.data = &s3c24xx_uart_drv;
- s3c24xx_serial_init_ports(info);
+static struct platform_device_id s3c24xx_serial_driver_ids[] = {
+ {
+ .name = "s3c2410-uart",
+ .driver_data = S3C2410_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2412-uart",
+ .driver_data = S3C2412_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2440-uart",
+ .driver_data = S3C2440_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c6400-uart",
+ .driver_data = S3C6400_SERIAL_DRV_DATA,
+ }, {
+ .name = "s5pv210-uart",
+ .driver_data = S5PV210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos4210-uart",
+ .driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids);
- register_console(&s3c24xx_serial_console);
- return 0;
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_uart_dt_match[] = {
+ { .compatible = "samsung,exynos4210-uart",
+ .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
+#else
+#define s3c24xx_uart_dt_match NULL
+#endif
+
+static struct platform_driver samsung_serial_driver = {
+ .probe = s3c24xx_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .id_table = s3c24xx_serial_driver_ids,
+ .driver = {
+ .name = "samsung-uart",
+ .owner = THIS_MODULE,
+ .pm = SERIAL_SAMSUNG_PM_OPS,
+ .of_match_table = s3c24xx_uart_dt_match,
+ },
+};
+
+/* module initialisation code */
+
+static int __init s3c24xx_serial_modinit(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to register UART driver\n");
+ return -1;
+ }
+
+ return platform_driver_register(&samsung_serial_driver);
}
-#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
+static void __exit s3c24xx_serial_modexit(void)
+{
+ uart_unregister_driver(&s3c24xx_uart_drv);
+}
+
+module_init(s3c24xx_serial_modinit);
+module_exit(s3c24xx_serial_modexit);
+MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 8e87b788e5c..1a4bca3e417 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -19,20 +19,25 @@ struct s3c24xx_uart_info {
unsigned long tx_fifomask;
unsigned long tx_fifoshift;
unsigned long tx_fifofull;
+ unsigned int def_clk_sel;
+ unsigned long num_clks;
+ unsigned long clksel_mask;
+ unsigned long clksel_shift;
/* uart port features */
unsigned int has_divslot:1;
- /* clock source control */
-
- int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
- int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
-
/* uart controls */
int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
};
+struct s3c24xx_serial_drv_data {
+ struct s3c24xx_uart_info *info;
+ struct s3c2410_uartcfg *def_cfg;
+ unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
struct s3c24xx_uart_port {
unsigned char rx_claimed;
unsigned char tx_claimed;
@@ -43,10 +48,13 @@ struct s3c24xx_uart_port {
unsigned int tx_irq;
struct s3c24xx_uart_info *info;
- struct s3c24xx_uart_clksrc *clksrc;
struct clk *clk;
struct clk *baudclk;
struct uart_port port;
+ struct s3c24xx_serial_drv_data *drv_data;
+
+ /* reference to platform data */
+ struct s3c2410_uartcfg *cfg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
@@ -56,7 +64,6 @@ struct s3c24xx_uart_port {
/* conversion functions */
#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
-#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
/* register access controls */
@@ -69,17 +76,6 @@ struct s3c24xx_uart_port {
#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
-extern int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *uart);
-
-extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
-
-extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **uart);
-
-extern int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info);
-
#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
extern void printascii(const char *);
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad2b24..e0b4b0a30a5 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -736,19 +736,7 @@ static struct platform_driver sc26xx_driver = {
},
};
-static int __init sc26xx_init(void)
-{
- return platform_driver_register(&sc26xx_driver);
-}
-
-static void __exit sc26xx_exit(void)
-{
- platform_driver_unregister(&sc26xx_driver);
-}
-
-module_init(sc26xx_init);
-module_exit(sc26xx_exit);
-
+module_platform_driver(sc26xx_driver);
MODULE_AUTHOR("Thomas BogendĂśrfer");
MODULE_DESCRIPTION("SC681/SC2692 serial driver");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0406d7ff505..c7bf31a6a7e 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -22,6 +22,7 @@
*/
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -60,6 +61,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
+static void uart_port_shutdown(struct tty_port *port);
+
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
@@ -128,25 +131,16 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
-static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
+static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long page;
int retval = 0;
- if (port->flags & ASYNC_INITIALIZED)
- return 0;
-
- /*
- * Set the TTY IO error marker - we will only clear this
- * once we have successfully opened the port. Also set
- * up the tty->alt_speed kludge
- */
- set_bit(TTY_IO_ERROR, &tty->flags);
-
if (uport->type == PORT_UNKNOWN)
- return 0;
+ return 1;
/*
* Initialise and allocate the transmit and temporary
@@ -188,10 +182,6 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
-
- set_bit(ASYNCB_INITIALIZED, &port->flags);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
}
/*
@@ -200,6 +190,31 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
* now.
*/
if (retval && capable(CAP_SYS_ADMIN))
+ return 1;
+
+ return retval;
+}
+
+static int uart_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
+{
+ struct tty_port *port = &state->port;
+ int retval;
+
+ if (port->flags & ASYNC_INITIALIZED)
+ return 0;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port.
+ */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ retval = uart_port_startup(tty, state, init_hw);
+ if (!retval) {
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ } else if (retval > 0)
retval = 0;
return retval;
@@ -228,24 +243,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (!tty || (tty->termios->c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free
- * the irq here so the queue might never be woken up. Note
- * that we won't end up waiting on delta_msr_wait again since
- * any outstanding file descriptors should be pointing at
- * hung_up_tty_fops now.
- */
- wake_up_interruptible(&port->delta_msr_wait);
-
- /*
- * Free the IRQ and disable the port.
- */
- uport->ops->shutdown(uport);
-
- /*
- * Ensure that the IRQ handler isn't running on another CPU.
- */
- synchronize_irq(uport->irq);
+ uart_port_shutdown(port);
}
/*
@@ -423,7 +421,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
- quot = (port->uartclk + (8 * baud)) / (16 * baud);
+ quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
return quot;
}
@@ -658,10 +656,10 @@ static int uart_get_info(struct uart_state *state,
tmp.flags = uport->flags;
tmp.xmit_fifo_size = uport->fifosize;
tmp.baud_base = uport->uartclk / 16;
- tmp.close_delay = port->close_delay / 10;
+ tmp.close_delay = jiffies_to_msecs(port->close_delay) / 10;
tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
- port->closing_wait / 10;
+ jiffies_to_msecs(port->closing_wait) / 10;
tmp.custom_divisor = uport->custom_divisor;
tmp.hub6 = uport->hub6;
tmp.io_type = uport->iotype;
@@ -695,9 +693,10 @@ static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
new_serial.irq = irq_canonicalize(new_serial.irq);
- close_delay = new_serial.close_delay * 10;
+ close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ ASYNC_CLOSING_WAIT_NONE :
+ msecs_to_jiffies(new_serial.closing_wait * 10);
/*
* This semaphore protects port->count. It is also
@@ -1265,47 +1264,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
pr_debug("uart_close(%d) called\n", uport->line);
- spin_lock_irqsave(&port->lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ if (tty_port_close_start(port, tty, filp) == 0)
return;
- }
-
- if ((tty->count == 1) && (port->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. port->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
- "port->count is %d\n", port->count);
- port->count = 1;
- }
- if (--port->count < 0) {
- printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
- tty->name, port->count);
- port->count = 0;
- }
- if (port->count) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
-
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters by
- * setting tty->closing.
- */
- set_bit(ASYNCB_CLOSING, &port->flags);
- tty->closing = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent_from_close(tty,
- msecs_to_jiffies(port->closing_wait));
/*
* At this point, we stop accepting input. To do this, we
@@ -1337,7 +1297,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
- msleep_interruptible(port->close_delay);
+ msleep_interruptible(
+ jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1402,36 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
+static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ return 0;
+}
+
+static void uart_port_shutdown(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free
+ * the irq here so the queue might never be woken up. Note
+ * that we won't end up waiting on delta_msr_wait again since
+ * any outstanding file descriptors should be pointing at
+ * hung_up_tty_fops now.
+ */
+ wake_up_interruptible(&port->delta_msr_wait);
+
+ /*
+ * Free the IRQ and disable the port.
+ */
+ uport->ops->shutdown(uport);
+
+ /*
+ * Ensure that the IRQ handler isn't running on another CPU.
+ */
+ synchronize_irq(uport->irq);
+}
+
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
@@ -1466,33 +1457,6 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
-static struct uart_state *uart_get(struct uart_driver *drv, int line)
-{
- struct uart_state *state;
- struct tty_port *port;
- int ret = 0;
-
- state = drv->state + line;
- port = &state->port;
- if (mutex_lock_interruptible(&port->mutex)) {
- ret = -ERESTARTSYS;
- goto err;
- }
-
- port->count++;
- if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
- ret = -ENXIO;
- goto err_unlock;
- }
- return state;
-
- err_unlock:
- port->count--;
- mutex_unlock(&port->mutex);
- err:
- return ERR_PTR(ret);
-}
-
/*
* calls to uart_open are serialised by the BKL in
* fs/char_dev.c:chrdev_open()
@@ -1506,26 +1470,29 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
- struct uart_state *state;
- struct tty_port *port;
int retval, line = tty->index;
+ struct uart_state *state = drv->state + line;
+ struct tty_port *port = &state->port;
pr_debug("uart_open(%d) called\n", line);
/*
- * We take the semaphore inside uart_get to guarantee that we won't
- * be re-entered while allocating the state structure, or while we
- * request any IRQs that the driver may need. This also has the nice
- * side-effect that it delays the action of uart_hangup, so we can
- * guarantee that state->port.tty will always contain something
- * reasonable.
+ * We take the semaphore here to guarantee that we won't be re-entered
+ * while allocating the state structure, or while we request any IRQs
+ * that the driver may need. This also has the nice side-effect that
+ * it delays the action of uart_hangup, so we can guarantee that
+ * state->port.tty will always contain something reasonable.
*/
- state = uart_get(drv, line);
- if (IS_ERR(state)) {
- retval = PTR_ERR(state);
- goto fail;
+ if (mutex_lock_interruptible(&port->mutex)) {
+ retval = -ERESTARTSYS;
+ goto end;
+ }
+
+ port->count++;
+ if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+ retval = -ENXIO;
+ goto err_dec_count;
}
- port = &state->port;
/*
* Once we set tty->driver_data here, we are guaranteed that
@@ -1535,7 +1502,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = state;
state->uart_port->state = state;
tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty->alt_speed = 0;
tty_port_tty_set(port, tty);
/*
@@ -1543,9 +1509,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
*/
if (tty_hung_up_p(filp)) {
retval = -EAGAIN;
- port->count--;
- mutex_unlock(&port->mutex);
- goto fail;
+ goto err_dec_count;
}
/*
@@ -1566,8 +1530,12 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
-fail:
+end:
return retval;
+err_dec_count:
+ port->count--;
+ mutex_unlock(&port->mutex);
+ goto end;
}
static const char *uart_type(struct uart_port *port)
@@ -1858,6 +1826,14 @@ uart_set_options(struct uart_port *port, struct console *co,
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
+/**
+ * uart_change_pm - set power state of the port
+ *
+ * @state: port descriptor
+ * @pm_state: new state
+ *
+ * Locking: port->mutex has to be held
+ */
static void uart_change_pm(struct uart_state *state, int pm_state)
{
struct uart_port *port = state->uart_port;
@@ -2214,6 +2190,8 @@ static const struct tty_operations uart_ops = {
};
static const struct tty_port_operations uart_port_ops = {
+ .activate = uart_port_activate,
+ .shutdown = uart_port_shutdown,
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
@@ -2275,8 +2253,8 @@ int uart_register_driver(struct uart_driver *drv)
tty_port_init(port);
port->ops = &uart_port_ops;
- port->close_delay = 500; /* .5 seconds */
- port->closing_wait = 30000; /* 30 seconds */
+ port->close_delay = HZ / 2; /* .5 seconds */
+ port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
@@ -2467,6 +2445,99 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
}
EXPORT_SYMBOL(uart_match_port);
+/**
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
+ */
+void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
+{
+ struct uart_state *state = uport->state;
+ struct tty_port *port = &state->port;
+ struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
+ struct pps_event_time ts;
+
+ if (ld && ld->ops->dcd_change)
+ pps_get_ts(&ts);
+
+ uport->icount.dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((uport->flags & UPF_HARDPPS_CD) && status)
+ hardpps();
+#endif
+
+ if (port->flags & ASYNC_CHECK_CD) {
+ if (status)
+ wake_up_interruptible(&port->open_wait);
+ else if (port->tty)
+ tty_hangup(port->tty);
+ }
+
+ if (ld && ld->ops->dcd_change)
+ ld->ops->dcd_change(port->tty, status, &ts);
+ if (ld)
+ tty_ldisc_deref(ld);
+}
+EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
+
+/**
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
+ */
+void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
+{
+ struct tty_port *port = &uport->state->port;
+ struct tty_struct *tty = port->tty;
+
+ uport->icount.cts++;
+
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uport->ops->start_tx(uport);
+ uart_write_wakeup(uport);
+ }
+ } else {
+ if (!status) {
+ tty->hw_stopped = 1;
+ uport->ops->stop_tx(uport);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(uart_handle_cts_change);
+
+/**
+ * uart_insert_char - push a char to the uart layer
+ *
+ * User is responsible to call tty_flip_buffer_push when they are done with
+ * insertion.
+ *
+ * @port: corresponding port
+ * @status: state of the serial port RX buffer (LSR for 8250)
+ * @overrun: mask of overrun bits in @status
+ * @ch: character to push
+ * @flag: flag for the character (see TTY_NORMAL and friends)
+ */
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag)
+{
+ struct tty_struct *tty = port->state->port.tty;
+
+ if ((status & port->ignore_status_mask & ~overrun) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ /*
+ * Overrun is special. Since it's reported immediately,
+ * it doesn't affect the current character.
+ */
+ if (status & ~port->ignore_status_mask & overrun)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+}
+EXPORT_SYMBOL_GPL(uart_insert_char);
+
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index eef736ff810..86090605a84 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -317,7 +317,7 @@ static int serial_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
if (do_sound)
link->config_flags |= CONF_ENABLE_SPKR;
@@ -445,7 +445,7 @@ static int simple_config(struct pcmcia_device *link)
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_AUTO_SET_VPP;
for (try = 0; try < 4; try++)
if (!pcmcia_loop_config(link, simple_config_check, &try))
goto found_port;
@@ -501,7 +501,8 @@ static int multi_config_check_notpicky(struct pcmcia_device *p_dev,
{
int *base2 = priv_data;
- if (!p_dev->resource[0]->end || !p_dev->resource[1]->end)
+ if (!p_dev->resource[0]->end || !p_dev->resource[1]->end ||
+ p_dev->resource[0]->start + 8 != p_dev->resource[1]->start)
return -ENODEV;
p_dev->resource[0]->end = p_dev->resource[1]->end = 8;
@@ -520,7 +521,6 @@ static int multi_config(struct pcmcia_device *link)
struct serial_info *info = link->priv;
int i, base2 = 0;
- link->config_flags |= CONF_AUTO_SET_IO;
/* First, look for a generic full-sized window */
if (!pcmcia_loop_config(link, multi_config_check, &info->multi))
base2 = link->resource[0]->start + 8;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
new file mode 100644
index 00000000000..a60523fee11
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -0,0 +1,783 @@
+/*
+ * Driver for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "sirfsoc_uart.h"
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
+static struct uart_driver sirfsoc_uart_drv;
+
+static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
+ {4000000, 2359296},
+ {3500000, 1310721},
+ {3000000, 1572865},
+ {2500000, 1245186},
+ {2000000, 1572866},
+ {1500000, 1245188},
+ {1152000, 1638404},
+ {1000000, 1572869},
+ {921600, 1114120},
+ {576000, 1245196},
+ {500000, 1245198},
+ {460800, 1572876},
+ {230400, 1310750},
+ {115200, 1310781},
+ {57600, 1310843},
+ {38400, 1114328},
+ {19200, 1114545},
+ {9600, 1114979},
+};
+
+static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
+ [0] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+ },
+ [1] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ },
+ },
+ [2] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ },
+ },
+};
+
+static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
+{
+ return container_of(port, struct sirfsoc_uart_port, port);
+}
+
+static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
+{
+ unsigned long reg;
+ reg = rd_regl(port, SIRFUART_TX_FIFO_STATUS);
+ if (reg & SIRFUART_FIFOEMPTY_MASK(port))
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ if (!(sirfport->ms_enabled)) {
+ goto cts_asserted;
+ } else if (sirfport->hw_flow_ctrl) {
+ if (!(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS))
+ goto cts_asserted;
+ else
+ goto cts_deasserted;
+ }
+cts_deasserted:
+ return TIOCM_CAR | TIOCM_DSR;
+cts_asserted:
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int assert = mctrl & TIOCM_RTS;
+ unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
+ unsigned int current_val;
+ if (sirfport->hw_flow_ctrl) {
+ current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF;
+ val |= current_val;
+ wr_regl(port, SIRFUART_AFC_CTRL, val);
+ }
+}
+
+static void sirfsoc_uart_stop_tx(struct uart_port *port)
+{
+ unsigned int regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_TX_INT_EN);
+}
+
+void sirfsoc_uart_start_tx(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long regv;
+ sirfsoc_uart_pio_tx_chars(sirfport, 1);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN);
+}
+
+static void sirfsoc_uart_stop_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_RX_IO_INT_EN);
+}
+
+static void sirfsoc_uart_disable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ sirfport->ms_enabled = 0;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN);
+}
+
+static void sirfsoc_uart_enable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ unsigned long flg;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg | flg);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN);
+ uart_handle_cts_change(port,
+ !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS));
+ sirfport->ms_enabled = 1;
+}
+
+static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long ulcon = rd_regl(port, SIRFUART_LINE_CTRL);
+ if (break_state)
+ ulcon |= SIRFUART_SET_BREAK;
+ else
+ ulcon &= ~SIRFUART_SET_BREAK;
+ wr_regl(port, SIRFUART_LINE_CTRL, ulcon);
+}
+
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+{
+ unsigned int ch, rx_count = 0;
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty)
+ return -ENODEV;
+
+ while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port))) {
+ ch = rd_regl(port, SIRFUART_RX_FIFO_DATA) | SIRFUART_DUMMY_READ;
+ if (unlikely(uart_handle_sysrq_char(port, ch)))
+ continue;
+ uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
+ rx_count++;
+ if (rx_count >= max_rx_count)
+ break;
+ }
+
+ port->icount.rx += rx_count;
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ return rx_count;
+}
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
+{
+ struct uart_port *port = &sirfport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int num_tx = 0;
+ while (!uart_circ_empty(xmit) &&
+ !(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOFULL_MASK(port)) &&
+ count--) {
+ wr_regl(port, SIRFUART_TX_FIFO_DATA, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ num_tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ return num_tx;
+}
+
+static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+{
+ unsigned long intr_status;
+ unsigned long cts_status;
+ unsigned long flag = TTY_NORMAL;
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ struct uart_port *port = &sirfport->port;
+ struct uart_state *state = port->state;
+ struct circ_buf *xmit = &port->state->xmit;
+ intr_status = rd_regl(port, SIRFUART_INT_STATUS);
+ wr_regl(port, SIRFUART_INT_STATUS, intr_status);
+ intr_status &= rd_regl(port, SIRFUART_INT_EN);
+ if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) {
+ if (intr_status & SIRFUART_RXD_BREAK) {
+ if (uart_handle_break(port))
+ goto recv_char;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+ return IRQ_HANDLED;
+ }
+ if (intr_status & SIRFUART_RX_OFLOW)
+ port->icount.overrun++;
+ if (intr_status & SIRFUART_FRM_ERR) {
+ port->icount.frame++;
+ flag = TTY_FRAME;
+ }
+ if (intr_status & SIRFUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+ intr_status &= port->read_status_mask;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW_INT, 0, flag);
+ }
+recv_char:
+ if (intr_status & SIRFUART_CTS_INT_EN) {
+ cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS);
+ if (cts_status != 0) {
+ uart_handle_cts_change(port, 1);
+ } else {
+ uart_handle_cts_change(port, 0);
+ wake_up_interruptible(&state->port.delta_msr_wait);
+ }
+ }
+ if (intr_status & SIRFUART_RX_IO_INT_EN)
+ sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
+ if (intr_status & SIRFUART_TX_INT_EN) {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ return IRQ_HANDLED;
+ } else {
+ sirfsoc_uart_pio_tx_chars(sirfport,
+ SIRFSOC_UART_IO_TX_REASONABLE_CNT);
+ if ((uart_circ_empty(xmit)) &&
+ (rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port)))
+ sirfsoc_uart_stop_tx(port);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void sirfsoc_uart_start_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_RX_IO_INT_EN);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+}
+
+static unsigned int
+sirfsoc_calc_sample_div(unsigned long baud_rate,
+ unsigned long ioclk_rate, unsigned long *setted_baud)
+{
+ unsigned long min_delta = ~0UL;
+ unsigned short sample_div;
+ unsigned int regv = 0;
+ unsigned long ioclk_div;
+ unsigned long baud_tmp;
+ int temp_delta;
+
+ for (sample_div = SIRF_MIN_SAMPLE_DIV;
+ sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
+ ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
+ if (ioclk_div > SIRF_IOCLK_DIV_MAX)
+ continue;
+ baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
+ temp_delta = baud_tmp - baud_rate;
+ temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
+ if (temp_delta < min_delta) {
+ regv = regv & (~SIRF_IOCLK_DIV_MASK);
+ regv = regv | ioclk_div;
+ regv = regv & (~SIRF_SAMPLE_DIV_MASK);
+ regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
+ min_delta = temp_delta;
+ *setted_baud = baud_tmp;
+ }
+ }
+ return regv;
+}
+
+static void sirfsoc_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long ioclk_rate;
+ unsigned long config_reg = 0;
+ unsigned long baud_rate;
+ unsigned long setted_baud;
+ unsigned long flags;
+ unsigned long ic;
+ unsigned int clk_div_reg = 0;
+ unsigned long temp_reg_val;
+ unsigned long rx_time_out;
+ int threshold_div;
+ int temp;
+
+ ioclk_rate = 150000000;
+ switch (termios->c_cflag & CSIZE) {
+ default:
+ case CS8:
+ config_reg |= SIRFUART_DATA_BIT_LEN_8;
+ break;
+ case CS7:
+ config_reg |= SIRFUART_DATA_BIT_LEN_7;
+ break;
+ case CS6:
+ config_reg |= SIRFUART_DATA_BIT_LEN_6;
+ break;
+ case CS5:
+ config_reg |= SIRFUART_DATA_BIT_LEN_5;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ config_reg |= SIRFUART_STOP_BIT_LEN_2;
+ baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ spin_lock_irqsave(&port->lock, flags);
+ port->read_status_mask = SIRFUART_RX_OFLOW_INT;
+ port->ignore_status_mask = 0;
+ /* read flags */
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SIRFUART_RXD_BREAK_INT;
+ /* ignore flags */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= SIRFUART_DUMMY_READ;
+ /* enable parity if PARENB is set*/
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ config_reg |= SIRFUART_STICK_BIT_MARK;
+ else
+ config_reg |= SIRFUART_STICK_BIT_SPACE;
+ } else if (termios->c_cflag & PARODD) {
+ config_reg |= SIRFUART_STICK_BIT_ODD;
+ } else {
+ config_reg |= SIRFUART_STICK_BIT_EVEN;
+ }
+ }
+ /* Hardware Flow Control Settings */
+ if (UART_ENABLE_MS(port, termios->c_cflag)) {
+ if (!sirfport->ms_enabled)
+ sirfsoc_uart_enable_ms(port);
+ } else {
+ if (sirfport->ms_enabled)
+ sirfsoc_uart_disable_ms(port);
+ }
+
+ /* common rate: fast calculation */
+ for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+ if (baud_rate == baudrate_to_regv[ic].baud_rate)
+ clk_div_reg = baudrate_to_regv[ic].reg_val;
+ setted_baud = baud_rate;
+ /* arbitary rate setting */
+ if (unlikely(clk_div_reg == 0))
+ clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+ &setted_baud);
+ wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
+
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, setted_baud, setted_baud);
+
+ /* set receive timeout */
+ rx_time_out = SIRFSOC_UART_RX_TIMEOUT(baud_rate, 20000);
+ rx_time_out = (rx_time_out > 0xFFFF) ? 0xFFFF : rx_time_out;
+ config_reg |= SIRFUART_RECV_TIMEOUT(rx_time_out);
+ temp_reg_val = rd_regl(port, SIRFUART_TX_FIFO_OP);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_TX_FIFO_OP,
+ temp_reg_val & ~SIRFUART_TX_FIFO_START);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, SIRFUART_TX_MODE_IO);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_LINE_CTRL, config_reg);
+
+ /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
+ if (baud_rate < 1000000)
+ threshold_div = 1;
+ else
+ threshold_div = 2;
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp / threshold_div);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp / threshold_div);
+ temp_reg_val |= SIRFUART_TX_FIFO_START;
+ wr_regl(port, SIRFUART_TX_FIFO_OP, temp_reg_val);
+ uart_update_timeout(port, termios->c_cflag, baud_rate);
+ sirfsoc_uart_start_rx(port);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_TX_EN | SIRFUART_RX_EN);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void startup_uart_controller(struct uart_port *port)
+{
+ unsigned long temp_regv;
+ int temp;
+ temp_regv = rd_regl(port, SIRFUART_TX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, temp_regv | SIRFUART_TX_MODE_IO);
+ temp_regv = rd_regl(port, SIRFUART_RX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, temp_regv | SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_TX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_RX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_RX_EN | SIRFUART_TX_EN);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_RESET);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp);
+}
+
+static int sirfsoc_uart_startup(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int index = port->line;
+ int ret;
+ set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(port->irq,
+ sirfsoc_uart_isr,
+ 0,
+ SIRFUART_PORT_NAME,
+ sirfport);
+ if (ret != 0) {
+ dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
+ index, port->irq);
+ goto irq_err;
+ }
+ startup_uart_controller(port);
+ enable_irq(port->irq);
+irq_err:
+ return ret;
+}
+
+static void sirfsoc_uart_shutdown(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ wr_regl(port, SIRFUART_INT_EN, 0);
+ free_irq(port->irq, sirfport);
+ if (sirfport->ms_enabled) {
+ sirfsoc_uart_disable_ms(port);
+ sirfport->ms_enabled = 0;
+ }
+}
+
+static const char *sirfsoc_uart_type(struct uart_port *port)
+{
+ return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
+}
+
+static int sirfsoc_uart_request_port(struct uart_port *port)
+{
+ void *ret;
+ ret = request_mem_region(port->mapbase,
+ SIRFUART_MAP_SIZE, SIRFUART_PORT_NAME);
+ return ret ? 0 : -EBUSY;
+}
+
+static void sirfsoc_uart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
+}
+
+static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = SIRFSOC_PORT_TYPE;
+ sirfsoc_uart_request_port(port);
+ }
+}
+
+static struct uart_ops sirfsoc_uart_ops = {
+ .tx_empty = sirfsoc_uart_tx_empty,
+ .get_mctrl = sirfsoc_uart_get_mctrl,
+ .set_mctrl = sirfsoc_uart_set_mctrl,
+ .stop_tx = sirfsoc_uart_stop_tx,
+ .start_tx = sirfsoc_uart_start_tx,
+ .stop_rx = sirfsoc_uart_stop_rx,
+ .enable_ms = sirfsoc_uart_enable_ms,
+ .break_ctl = sirfsoc_uart_break_ctl,
+ .startup = sirfsoc_uart_startup,
+ .shutdown = sirfsoc_uart_shutdown,
+ .set_termios = sirfsoc_uart_set_termios,
+ .type = sirfsoc_uart_type,
+ .release_port = sirfsoc_uart_release_port,
+ .request_port = sirfsoc_uart_request_port,
+ .config_port = sirfsoc_uart_config_port,
+};
+
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+static int __init sirfsoc_uart_console_setup(struct console *co, char *options)
+{
+ unsigned int baud = 115200;
+ unsigned int bits = 8;
+ unsigned int parity = 'n';
+ unsigned int flow = 'n';
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+
+ if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
+ return -EINVAL;
+
+ if (!port->mapbase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ port->cons = co;
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (rd_regl(port,
+ SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOFULL_MASK(port))
+ cpu_relax();
+ wr_regb(port, SIRFUART_TX_FIFO_DATA, ch);
+}
+
+static void sirfsoc_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+ uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
+}
+
+static struct console sirfsoc_uart_console = {
+ .name = SIRFSOC_UART_NAME,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .write = sirfsoc_uart_console_write,
+ .setup = sirfsoc_uart_console_setup,
+ .data = &sirfsoc_uart_drv,
+};
+
+static int __init sirfsoc_uart_console_init(void)
+{
+ register_console(&sirfsoc_uart_console);
+ return 0;
+}
+console_initcall(sirfsoc_uart_console_init);
+#endif
+
+static struct uart_driver sirfsoc_uart_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = SIRFUART_PORT_NAME,
+ .nr = SIRFSOC_UART_NR,
+ .dev_name = SIRFSOC_UART_NAME,
+ .major = SIRFSOC_UART_MAJOR,
+ .minor = SIRFSOC_UART_MINOR,
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+ .cons = &sirfsoc_uart_console,
+#else
+ .cons = NULL,
+#endif
+};
+
+int sirfsoc_uart_probe(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport;
+ struct uart_port *port;
+ struct resource *res;
+ int ret;
+
+ if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
+ dev_err(&pdev->dev,
+ "Unable to find cell-index in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ sirfport = &sirfsoc_uart_ports[pdev->id];
+ port = &sirfport->port;
+ port->dev = &pdev->dev;
+ port->private_data = sirfport;
+
+ if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
+ sirfport->hw_flow_ctrl = 1;
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "fifosize",
+ &port->fifosize)) {
+ dev_err(&pdev->dev,
+ "Unable to find fifosize in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ port->mapbase = res->start;
+ port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!port->membase) {
+ dev_err(&pdev->dev, "Cannot remap resource.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto irq_err;
+ }
+ port->irq = res->start;
+
+ if (sirfport->hw_flow_ctrl) {
+ sirfport->pmx = pinmux_get(&pdev->dev, NULL);
+ ret = IS_ERR(sirfport->pmx);
+ if (ret)
+ goto pmx_err;
+
+ pinmux_enable(sirfport->pmx);
+ }
+
+ port->ops = &sirfsoc_uart_ops;
+ spin_lock_init(&port->lock);
+
+ platform_set_drvdata(pdev, sirfport);
+ ret = uart_add_one_port(&sirfsoc_uart_drv, port);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
+ goto port_err;
+ }
+
+ return 0;
+
+port_err:
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+pmx_err:
+irq_err:
+ devm_iounmap(&pdev->dev, port->membase);
+err:
+ return ret;
+}
+
+static int sirfsoc_uart_remove(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+ devm_iounmap(&pdev->dev, port->membase);
+ uart_remove_one_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int
+sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_suspend_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int sirfsoc_uart_resume(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_resume_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_uart_ids[] __devinitdata = {
+ { .compatible = "sirf,prima2-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
+
+static struct platform_driver sirfsoc_uart_driver = {
+ .probe = sirfsoc_uart_probe,
+ .remove = __devexit_p(sirfsoc_uart_remove),
+ .suspend = sirfsoc_uart_suspend,
+ .resume = sirfsoc_uart_resume,
+ .driver = {
+ .name = SIRFUART_PORT_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_uart_ids,
+ },
+};
+
+static int __init sirfsoc_uart_init(void)
+{
+ int ret = 0;
+
+ ret = uart_register_driver(&sirfsoc_uart_drv);
+ if (ret)
+ goto out;
+
+ ret = platform_driver_register(&sirfsoc_uart_driver);
+ if (ret)
+ uart_unregister_driver(&sirfsoc_uart_drv);
+out:
+ return ret;
+}
+module_init(sirfsoc_uart_init);
+
+static void __exit sirfsoc_uart_exit(void)
+{
+ platform_driver_unregister(&sirfsoc_uart_driver);
+ uart_unregister_driver(&sirfsoc_uart_drv);
+}
+module_exit(sirfsoc_uart_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
+MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
new file mode 100644
index 00000000000..fc64260fa93
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -0,0 +1,185 @@
+/*
+ * Drivers for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include <linux/bitops.h>
+
+/* UART Register Offset Define */
+#define SIRFUART_LINE_CTRL 0x0040
+#define SIRFUART_TX_RX_EN 0x004c
+#define SIRFUART_DIVISOR 0x0050
+#define SIRFUART_INT_EN 0x0054
+#define SIRFUART_INT_STATUS 0x0058
+#define SIRFUART_TX_DMA_IO_CTRL 0x0100
+#define SIRFUART_TX_DMA_IO_LEN 0x0104
+#define SIRFUART_TX_FIFO_CTRL 0x0108
+#define SIRFUART_TX_FIFO_LEVEL_CHK 0x010C
+#define SIRFUART_TX_FIFO_OP 0x0110
+#define SIRFUART_TX_FIFO_STATUS 0x0114
+#define SIRFUART_TX_FIFO_DATA 0x0118
+#define SIRFUART_RX_DMA_IO_CTRL 0x0120
+#define SIRFUART_RX_DMA_IO_LEN 0x0124
+#define SIRFUART_RX_FIFO_CTRL 0x0128
+#define SIRFUART_RX_FIFO_LEVEL_CHK 0x012C
+#define SIRFUART_RX_FIFO_OP 0x0130
+#define SIRFUART_RX_FIFO_STATUS 0x0134
+#define SIRFUART_RX_FIFO_DATA 0x0138
+#define SIRFUART_AFC_CTRL 0x0140
+#define SIRFUART_SWH_DMA_IO 0x0148
+
+/* UART Line Control Register */
+#define SIRFUART_DATA_BIT_LEN_MASK 0x3
+#define SIRFUART_DATA_BIT_LEN_5 BIT(0)
+#define SIRFUART_DATA_BIT_LEN_6 1
+#define SIRFUART_DATA_BIT_LEN_7 2
+#define SIRFUART_DATA_BIT_LEN_8 3
+#define SIRFUART_STOP_BIT_LEN_1 0
+#define SIRFUART_STOP_BIT_LEN_2 BIT(2)
+#define SIRFUART_PARITY_EN BIT(3)
+#define SIRFUART_EVEN_BIT BIT(4)
+#define SIRFUART_STICK_BIT_MASK (7 << 3)
+#define SIRFUART_STICK_BIT_NONE (0 << 3)
+#define SIRFUART_STICK_BIT_EVEN BIT(3)
+#define SIRFUART_STICK_BIT_ODD (3 << 3)
+#define SIRFUART_STICK_BIT_MARK (5 << 3)
+#define SIRFUART_STICK_BIT_SPACE (7 << 3)
+#define SIRFUART_SET_BREAK BIT(6)
+#define SIRFUART_LOOP_BACK BIT(7)
+#define SIRFUART_PARITY_MASK (7 << 3)
+#define SIRFUART_DUMMY_READ BIT(16)
+
+#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
+#define SIRFUART_RECV_TIMEOUT_MASK (0xFFFF << 16)
+#define SIRFUART_RECV_TIMEOUT(x) (((x) & 0xFFFF) << 16)
+
+/* UART Auto Flow Control */
+#define SIRFUART_AFC_RX_THD_MASK 0x000000FF
+#define SIRFUART_AFC_RX_EN BIT(8)
+#define SIRFUART_AFC_TX_EN BIT(9)
+#define SIRFUART_CTS_CTRL BIT(10)
+#define SIRFUART_RTS_CTRL BIT(11)
+#define SIRFUART_CTS_IN_STATUS BIT(12)
+#define SIRFUART_RTS_OUT_STATUS BIT(13)
+
+/* UART Interrupt Enable Register */
+#define SIRFUART_RX_DONE_INT BIT(0)
+#define SIRFUART_TX_DONE_INT BIT(1)
+#define SIRFUART_RX_OFLOW_INT BIT(2)
+#define SIRFUART_TX_ALLOUT_INT BIT(3)
+#define SIRFUART_RX_IO_DMA_INT BIT(4)
+#define SIRFUART_TX_IO_DMA_INT BIT(5)
+#define SIRFUART_RXFIFO_FULL_INT BIT(6)
+#define SIRFUART_TXFIFO_EMPTY_INT BIT(7)
+#define SIRFUART_RXFIFO_THD_INT BIT(8)
+#define SIRFUART_TXFIFO_THD_INT BIT(9)
+#define SIRFUART_FRM_ERR_INT BIT(10)
+#define SIRFUART_RXD_BREAK_INT BIT(11)
+#define SIRFUART_RX_TIMEOUT_INT BIT(12)
+#define SIRFUART_PARITY_ERR_INT BIT(13)
+#define SIRFUART_CTS_INT_EN BIT(14)
+#define SIRFUART_RTS_INT_EN BIT(15)
+
+/* UART Interrupt Status Register */
+#define SIRFUART_RX_DONE BIT(0)
+#define SIRFUART_TX_DONE BIT(1)
+#define SIRFUART_RX_OFLOW BIT(2)
+#define SIRFUART_TX_ALL_EMPTY BIT(3)
+#define SIRFUART_DMA_IO_RX_DONE BIT(4)
+#define SIRFUART_DMA_IO_TX_DONE BIT(5)
+#define SIRFUART_RXFIFO_FULL BIT(6)
+#define SIRFUART_TXFIFO_EMPTY BIT(7)
+#define SIRFUART_RXFIFO_THD_REACH BIT(8)
+#define SIRFUART_TXFIFO_THD_REACH BIT(9)
+#define SIRFUART_FRM_ERR BIT(10)
+#define SIRFUART_RXD_BREAK BIT(11)
+#define SIRFUART_RX_TIMEOUT BIT(12)
+#define SIRFUART_PARITY_ERR BIT(13)
+#define SIRFUART_CTS_CHANGE BIT(14)
+#define SIRFUART_RTS_CHANGE BIT(15)
+#define SIRFUART_PLUG_IN BIT(16)
+
+#define SIRFUART_ERR_INT_STAT \
+ (SIRFUART_RX_OFLOW | \
+ SIRFUART_FRM_ERR | \
+ SIRFUART_RXD_BREAK | \
+ SIRFUART_PARITY_ERR)
+#define SIRFUART_ERR_INT_EN \
+ (SIRFUART_RX_OFLOW_INT | \
+ SIRFUART_FRM_ERR_INT | \
+ SIRFUART_RXD_BREAK_INT | \
+ SIRFUART_PARITY_ERR_INT)
+#define SIRFUART_TX_INT_EN SIRFUART_TXFIFO_EMPTY_INT
+#define SIRFUART_RX_IO_INT_EN \
+ (SIRFUART_RX_TIMEOUT_INT | \
+ SIRFUART_RXFIFO_THD_INT | \
+ SIRFUART_RXFIFO_FULL_INT | \
+ SIRFUART_ERR_INT_EN)
+
+/* UART FIFO Register */
+#define SIRFUART_TX_FIFO_STOP 0x0
+#define SIRFUART_TX_FIFO_RESET 0x1
+#define SIRFUART_TX_FIFO_START 0x2
+#define SIRFUART_RX_FIFO_STOP 0x0
+#define SIRFUART_RX_FIFO_RESET 0x1
+#define SIRFUART_RX_FIFO_START 0x2
+#define SIRFUART_TX_MODE_DMA 0
+#define SIRFUART_TX_MODE_IO 1
+#define SIRFUART_RX_MODE_DMA 0
+#define SIRFUART_RX_MODE_IO 1
+
+#define SIRFUART_RX_EN 0x1
+#define SIRFUART_TX_EN 0x2
+
+/* Generic Definitions */
+#define SIRFSOC_UART_NAME "ttySiRF"
+#define SIRFSOC_UART_MAJOR 0
+#define SIRFSOC_UART_MINOR 0
+#define SIRFUART_PORT_NAME "sirfsoc-uart"
+#define SIRFUART_MAP_SIZE 0x200
+#define SIRFSOC_UART_NR 3
+#define SIRFSOC_PORT_TYPE 0xa5
+
+/* Baud Rate Calculation */
+#define SIRF_MIN_SAMPLE_DIV 0xf
+#define SIRF_MAX_SAMPLE_DIV 0x3f
+#define SIRF_IOCLK_DIV_MAX 0xffff
+#define SIRF_SAMPLE_DIV_SHIFT 16
+#define SIRF_IOCLK_DIV_MASK 0xffff
+#define SIRF_SAMPLE_DIV_MASK 0x3f0000
+#define SIRF_BAUD_RATE_SUPPORT_NR 18
+
+/* For Fast Baud Rate Calculation */
+struct sirfsoc_baudrate_to_regv {
+ unsigned int baud_rate;
+ unsigned int reg_val;
+};
+
+struct sirfsoc_uart_port {
+ unsigned char hw_flow_ctrl;
+ unsigned char ms_enabled;
+
+ struct uart_port port;
+ struct pinmux *pmx;
+};
+
+/* Hardware Flow Control */
+#define SIRFUART_AFC_CTRL_RX_THD 0x70
+
+/* Register Access Control */
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
+#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
+#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+
+/* UART Port Mask */
+#define SIRFUART_FIFOLEVEL_MASK(port) ((port->line == 1) ? (0x1f) : (0x7f))
+#define SIRFUART_FIFOFULL_MASK(port) ((port->line == 1) ? (0x20) : (0x80))
+#define SIRFUART_FIFOEMPTY_MASK(port) ((port->line == 1) ? (0x40) : (0x100))
+
+/* I/O Mode */
+#define SIRFSOC_UART_IO_RX_MAX_CNT 256
+#define SIRFSOC_UART_IO_TX_REASONABLE_CNT 6
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index e76c8b747fb..70f97491d8f 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -513,20 +513,7 @@ static struct platform_driver timbuart_platform_driver = {
.remove = __devexit_p(timbuart_remove),
};
-/*--------------------------------------------------------------------------*/
-
-static int __init timbuart_init(void)
-{
- return platform_driver_register(&timbuart_platform_driver);
-}
-
-static void __exit timbuart_exit(void)
-{
- platform_driver_unregister(&timbuart_platform_driver);
-}
-
-module_init(timbuart_init);
-module_exit(timbuart_exit);
+module_platform_driver(timbuart_platform_driver);
MODULE_DESCRIPTION("Timberdale UART driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index cea8918b823..2ebe606a2db 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -963,6 +963,9 @@ static void qe_uart_set_termios(struct uart_port *port,
/* Do we really need a spinlock here? */
spin_lock_irqsave(&port->lock, flags);
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
out_be16(&uccp->upsmr, upsmr);
if (soft_uart) {
out_be16(&uccup->supsmr, supsmr);
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab4fa6..83148e79ca1 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -961,18 +961,7 @@ static struct platform_driver siu_device_driver = {
},
};
-static int __init vr41xx_siu_init(void)
-{
- return platform_driver_register(&siu_device_driver);
-}
-
-static void __exit vr41xx_siu_exit(void)
-{
- platform_driver_unregister(&siu_device_driver);
-}
-
-module_init(vr41xx_siu_init);
-module_exit(vr41xx_siu_exit);
+module_platform_driver(siu_device_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 43db715f150..7867b7c4538 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for fsync_bdev() */
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/vt_kern.h>
@@ -41,6 +40,7 @@
#include <linux/oom.h>
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 05085beb83d..e41b9bbc107 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -790,19 +790,24 @@ static void session_clear_tty(struct pid *session)
void disassociate_ctty(int on_exit)
{
struct tty_struct *tty;
- struct pid *tty_pgrp = NULL;
if (!current->signal->leader)
return;
tty = get_current_tty();
if (tty) {
- tty_pgrp = get_pid(tty->pgrp);
+ struct pid *tty_pgrp = get_pid(tty->pgrp);
if (on_exit) {
if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
tty_vhangup(tty);
}
tty_kref_put(tty);
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
} else if (on_exit) {
struct pid *old_pgrp;
spin_lock_irq(&current->sighand->siglock);
@@ -816,12 +821,6 @@ void disassociate_ctty(int on_exit)
}
return;
}
- if (tty_pgrp) {
- kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
- put_pid(tty_pgrp);
- }
spin_lock_irq(&current->sighand->siglock);
put_pid(current->signal->tty_old_pgrp);
@@ -1558,6 +1557,59 @@ static void release_tty(struct tty_struct *tty, int idx)
}
/**
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @o_tty: link of @tty (if any)
+ * @idx: index of the tty
+ *
+ * Performs some paranoid checking before true release of the @tty.
+ * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
+ int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
+ __func__, tty->name);
+ return -1;
+ }
+
+ /* not much to check for devpts */
+ if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+ return 0;
+
+ if (tty != tty->driver->ttys[idx]) {
+ printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->termios != tty->driver->termios[idx]) {
+ printk(KERN_DEBUG "%s: driver.termios[%d] not termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->driver->other) {
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->termios != tty->driver->other->termios[idx]) {
+ printk(KERN_DEBUG "%s: other->termios[%d] not o_termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->link != tty) {
+ printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
@@ -1585,11 +1637,11 @@ int tty_release(struct inode *inode, struct file *filp)
int idx;
char buf[64];
- if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+ if (tty_paranoia_check(tty, inode, __func__))
return 0;
tty_lock();
- check_tty_count(tty, "tty_release_dev");
+ check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
@@ -1599,59 +1651,16 @@ int tty_release(struct inode *inode, struct file *filp)
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
o_tty = tty->link;
-#ifdef TTY_PARANOIA_CHECK
- if (idx < 0 || idx >= tty->driver->num) {
- printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
- "free (%s)\n", tty->name);
+ if (tty_release_checks(tty, o_tty, idx)) {
tty_unlock();
return 0;
}
- if (!devpts) {
- if (tty != tty->driver->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
- "for (%s)\n", idx, tty->name);
- return 0;
- }
- if (tty->termios != tty->driver->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
- "for (%s)\n",
- idx, tty->name);
- return 0;
- }
- }
-#endif
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
- tty_name(tty, buf), tty->count);
+ printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
+ tty_name(tty, buf), tty->count);
#endif
-#ifdef TTY_PARANOIA_CHECK
- if (tty->driver->other &&
- !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- if (o_tty != tty->driver->other->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
- "not o_tty for (%s)\n",
- idx, tty->name);
- return 0 ;
- }
- if (o_tty->termios != tty->driver->other->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
- "not o_termios for (%s)\n",
- idx, tty->name);
- return 0;
- }
- if (o_tty->link != tty) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
- return 0;
- }
- }
-#endif
if (tty->ops->close)
tty->ops->close(tty, filp);
@@ -1707,8 +1716,8 @@ int tty_release(struct inode *inode, struct file *filp)
if (!do_sleep)
break;
- printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
- "active!\n", tty_name(tty, buf));
+ printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+ __func__, tty_name(tty, buf));
tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
@@ -1721,15 +1730,14 @@ int tty_release(struct inode *inode, struct file *filp)
*/
if (pty_master) {
if (--o_tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad pty slave count "
- "(%d) for %s\n",
- o_tty->count, tty_name(o_tty, buf));
+ printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
+ __func__, o_tty->count, tty_name(o_tty, buf));
o_tty->count = 0;
}
}
if (--tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
- tty->count, tty_name(tty, buf));
+ printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n",
+ __func__, tty->count, tty_name(tty, buf));
tty->count = 0;
}
@@ -1778,7 +1786,7 @@ int tty_release(struct inode *inode, struct file *filp)
}
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "freeing tty structure...");
+ printk(KERN_DEBUG "%s: freeing tty structure...\n", __func__);
#endif
/*
* Ask the line discipline code to release its structures
@@ -1798,6 +1806,83 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
+ * tty_open_current_tty - get tty of current task for open
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: tty of the current task iff @device is /dev/tty
+ *
+ * We cannot return driver and index like for the other nodes because
+ * devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+ struct tty_struct *tty;
+
+ if (device != MKDEV(TTYAUX_MAJOR, 0))
+ return NULL;
+
+ tty = get_current_tty();
+ if (!tty)
+ return ERR_PTR(-ENXIO);
+
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ tty_kref_put(tty);
+ /* FIXME: we put a reference and return a TTY! */
+ return tty;
+}
+
+/**
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @noctty: set if the device should not become a controlling tty
+ * @index: index for the device in the @return driver
+ * @return: driver for this inode (with increased refcount)
+ *
+ * If @return is not erroneous, the caller is responsible to decrement the
+ * refcount by tty_driver_kref_put.
+ *
+ * Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+ int *noctty, int *index)
+{
+ struct tty_driver *driver;
+
+ switch (device) {
+#ifdef CONFIG_VT
+ case MKDEV(TTY_MAJOR, 0): {
+ extern struct tty_driver *console_driver;
+ driver = tty_driver_kref_get(console_driver);
+ *index = fg_console;
+ *noctty = 1;
+ break;
+ }
+#endif
+ case MKDEV(TTYAUX_MAJOR, 1): {
+ struct tty_driver *console_driver = console_device(index);
+ if (console_driver) {
+ driver = tty_driver_kref_get(console_driver);
+ if (driver) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ *noctty = 1;
+ break;
+ }
+ }
+ return ERR_PTR(-ENODEV);
+ }
+ default:
+ driver = get_tty_driver(device, index);
+ if (!driver)
+ return ERR_PTR(-ENODEV);
+ break;
+ }
+ return driver;
+}
+
+/**
* tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
@@ -1813,16 +1898,16 @@ int tty_release(struct inode *inode, struct file *filp)
* The termios state of a pty is reset on first open so that
* settings don't persist across reuse.
*
- * Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
* tty->count should protect the rest.
* ->siglock protects ->signal/->sighand
*/
static int tty_open(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty = NULL;
+ struct tty_struct *tty;
int noctty, retval;
- struct tty_driver *driver;
+ struct tty_driver *driver = NULL;
int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
@@ -1841,66 +1926,22 @@ retry_open:
mutex_lock(&tty_mutex);
tty_lock();
- if (device == MKDEV(TTYAUX_MAJOR, 0)) {
- tty = get_current_tty();
- if (!tty) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENXIO;
- }
- driver = tty_driver_kref_get(tty->driver);
- index = tty->index;
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
- /* noctty = 1; */
- /* FIXME: Should we take a driver reference ? */
- tty_kref_put(tty);
- goto got_driver;
- }
-#ifdef CONFIG_VT
- if (device == MKDEV(TTY_MAJOR, 0)) {
- extern struct tty_driver *console_driver;
- driver = tty_driver_kref_get(console_driver);
- index = fg_console;
- noctty = 1;
- goto got_driver;
- }
-#endif
- if (device == MKDEV(TTYAUX_MAJOR, 1)) {
- struct tty_driver *console_driver = console_device(&index);
- if (console_driver) {
- driver = tty_driver_kref_get(console_driver);
- if (driver) {
- /* Don't let /dev/console block */
- filp->f_flags |= O_NONBLOCK;
- noctty = 1;
- goto got_driver;
- }
+ tty = tty_open_current_tty(device, filp);
+ if (IS_ERR(tty)) {
+ retval = PTR_ERR(tty);
+ goto err_unlock;
+ } else if (!tty) {
+ driver = tty_lookup_driver(device, filp, &noctty, &index);
+ if (IS_ERR(driver)) {
+ retval = PTR_ERR(driver);
+ goto err_unlock;
}
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
- driver = get_tty_driver(device, &index);
- if (!driver) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
-got_driver:
- if (!tty) {
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, inode, index);
-
if (IS_ERR(tty)) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_unlock;
}
}
@@ -1912,21 +1953,22 @@ got_driver:
tty = tty_init_dev(driver, index, 0);
mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
+ if (driver)
+ tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
tty_unlock();
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_file;
}
tty_add_file(tty, filp);
- check_tty_count(tty, "tty_open");
+ check_tty_count(tty, __func__);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
noctty = 1;
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "opening %s...", tty->name);
+ printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
#endif
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
@@ -1940,8 +1982,8 @@ got_driver:
if (retval) {
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error %d in opening %s...", retval,
- tty->name);
+ printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
+ retval, tty->name);
#endif
tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
@@ -1976,6 +2018,15 @@ got_driver:
tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
+err_unlock:
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ /* after locks to avoid deadlock */
+ if (!IS_ERR_OR_NULL(driver))
+ tty_driver_kref_put(driver);
+err_file:
+ tty_free_file(filp);
+ return retval;
}
@@ -3267,7 +3318,7 @@ void __init console_init(void)
}
}
-static char *tty_devnode(struct device *dev, mode_t *mode)
+static char *tty_devnode(struct device *dev, umode_t *mode)
{
if (!mode)
return NULL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 8e0924f5544..24b95db75d8 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,19 +1,11 @@
#include <linux/types.h>
-#include <linux/major.h>
#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
+#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -24,18 +16,8 @@
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/bitops.h>
-#include <linux/delay.h>
#include <linux/seq_file.h>
-
#include <linux/uaccess.h>
-#include <asm/system.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-
-#include <linux/kmod.h>
-#include <linux/nsproxy.h>
#include <linux/ratelimit.h>
/*
@@ -558,8 +540,6 @@ static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
long ret;
ret = wait_event_timeout(tty_ldisc_idle,
atomic_read(&tty->ldisc->users) == 1, timeout);
- if (ret < 0)
- return ret;
return ret > 0 ? 0 : -EBUSY;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80156d..a0f3d6c4d39 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -584,7 +584,7 @@ int con_set_default_unimap(struct vc_data *vc)
return 0;
dflt->refcount++;
*vc->vc_uni_pagedir_loc = (unsigned long)dflt;
- if (p && --p->refcount) {
+ if (p && !--p->refcount) {
con_release_unimap(p);
kfree(p);
}
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index ff505951735..72d3646c736 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -104,17 +104,7 @@ static struct platform_driver uio_pdrv = {
},
};
-static int __init uio_pdrv_init(void)
-{
- return platform_driver_register(&uio_pdrv);
-}
-
-static void __exit uio_pdrv_exit(void)
-{
- platform_driver_unregister(&uio_pdrv);
-}
-module_init(uio_pdrv_init);
-module_exit(uio_pdrv_exit);
+module_platform_driver(uio_pdrv);
MODULE_AUTHOR("Uwe Kleine-Koenig");
MODULE_DESCRIPTION("Userspace I/O platform driver");
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 25de302009a..b98371d93a9 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -273,18 +273,7 @@ static struct platform_driver uio_pdrv_genirq = {
},
};
-static int __init uio_pdrv_genirq_init(void)
-{
- return platform_driver_register(&uio_pdrv_genirq);
-}
-
-static void __exit uio_pdrv_genirq_exit(void)
-{
- platform_driver_unregister(&uio_pdrv_genirq);
-}
-
-module_init(uio_pdrv_genirq_init);
-module_exit(uio_pdrv_genirq_exit);
+module_platform_driver(uio_pdrv_genirq);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index e67b566e7aa..33a7a273b45 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -227,19 +227,7 @@ static struct platform_driver pruss_driver = {
},
};
-static int __init pruss_init_module(void)
-{
- return platform_driver_register(&pruss_driver);
-}
-
-module_init(pruss_init_module);
-
-static void __exit pruss_exit_module(void)
-{
- platform_driver_unregister(&pruss_driver);
-}
-
-module_exit(pruss_exit_module);
+module_platform_driver(pruss_driver);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 791f11bed60..75823a1abeb 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -48,6 +48,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_DAVINCI_DA8XX
default y if ARCH_CNS3XXX
default y if PLAT_SPEAR
+ default y if ARCH_EXYNOS
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 75eca764522..53a7bc07dd8 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_USB) += core/
+obj-$(CONFIG_USB_OTG_UTILS) += otg/
+
obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_MON) += mon/
@@ -51,7 +53,6 @@ obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
obj-$(CONFIG_USB_MUSB_HDRC) += musb/
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs/
-obj-$(CONFIG_USB_OTG_UTILS) += otg/
obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += usb-common.o
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index a845f8b8382..98b89fe1986 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1372,18 +1372,7 @@ static struct usb_driver cxacru_usb_driver = {
.id_table = cxacru_usb_ids
};
-static int __init cxacru_init(void)
-{
- return usb_register(&cxacru_usb_driver);
-}
-
-static void __exit cxacru_cleanup(void)
-{
- usb_deregister(&cxacru_usb_driver);
-}
-
-module_init(cxacru_init);
-module_exit(cxacru_cleanup);
+module_usb_driver(cxacru_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 0842cfbf60c..b42092e1f16 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -953,22 +953,7 @@ static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_devic
return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
}
-static int __init speedtch_usb_init(void)
-{
- dbg("%s: driver version %s", __func__, DRIVER_VERSION);
-
- return usb_register(&speedtch_usb_driver);
-}
-
-static void __exit speedtch_usb_cleanup(void)
-{
- dbg("%s", __func__);
-
- usb_deregister(&speedtch_usb_driver);
-}
-
-module_init(speedtch_usb_init);
-module_exit(speedtch_usb_cleanup);
+module_usb_driver(speedtch_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 428f36801e0..00f171a7a8a 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2753,36 +2753,7 @@ static struct usb_driver uea_driver = {
MODULE_DEVICE_TABLE(usb, uea_ids);
-/**
- * uea_init - Initialize the module.
- * Register to USB subsystem
- */
-static int __init uea_init(void)
-{
- printk(KERN_INFO "[ueagle-atm] driver " EAGLEUSBVERSION " loaded\n");
-
- usb_register(&uea_driver);
-
- return 0;
-}
-
-module_init(uea_init);
-
-/**
- * uea_exit - Destroy module
- * Deregister with USB subsystem
- */
-static void __exit uea_exit(void)
-{
- /*
- * This calls automatically the uea_disconnect method if necessary:
- */
- usb_deregister(&uea_driver);
-
- printk(KERN_INFO "[ueagle-atm] driver unloaded\n");
-}
-
-module_exit(uea_exit);
+module_usb_driver(uea_driver);
MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 57ae44cd0b8..6f3b6e26739 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -225,21 +225,10 @@ static struct platform_driver c67x00_driver = {
.name = "c67x00",
},
};
-MODULE_ALIAS("platform:c67x00");
-
-static int __init c67x00_init(void)
-{
- return platform_driver_register(&c67x00_driver);
-}
-static void __exit c67x00_exit(void)
-{
- platform_driver_unregister(&c67x00_driver);
-}
-
-module_init(c67x00_init);
-module_exit(c67x00_exit);
+module_platform_driver(c67x00_driver);
MODULE_AUTHOR("Peter Korsgaard, Jan Veldeman, Grant Likely");
MODULE_DESCRIPTION("Cypress C67X00 USB Controller Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:c67x00");
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index d3e1356d091..75e47b860a5 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -271,7 +271,6 @@ static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
if (int_status & SOFEOP_FLG(sie->sie_num)) {
c67x00_ll_usb_clear_status(sie, SOF_EOP_IRQ_FLG);
c67x00_sched_kick(c67x00);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8c564a5334..9543b19d410 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -58,12 +58,62 @@ static struct usb_driver acm_driver;
static struct tty_driver *acm_tty_driver;
static struct acm *acm_table[ACM_TTY_MINORS];
-static DEFINE_MUTEX(open_mutex);
+static DEFINE_MUTEX(acm_table_lock);
-#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
+/*
+ * acm_table accessors
+ */
-static const struct tty_port_operations acm_port_ops = {
-};
+/*
+ * Look up an ACM structure by index. If found and not disconnected, increment
+ * its refcount and return it with its mutex held.
+ */
+static struct acm *acm_get_by_index(unsigned index)
+{
+ struct acm *acm;
+
+ mutex_lock(&acm_table_lock);
+ acm = acm_table[index];
+ if (acm) {
+ mutex_lock(&acm->mutex);
+ if (acm->disconnected) {
+ mutex_unlock(&acm->mutex);
+ acm = NULL;
+ } else {
+ tty_port_get(&acm->port);
+ mutex_unlock(&acm->mutex);
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+ return acm;
+}
+
+/*
+ * Try to find an available minor number and if found, associate it with 'acm'.
+ */
+static int acm_alloc_minor(struct acm *acm)
+{
+ int minor;
+
+ mutex_lock(&acm_table_lock);
+ for (minor = 0; minor < ACM_TTY_MINORS; minor++) {
+ if (!acm_table[minor]) {
+ acm_table[minor] = acm;
+ break;
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+
+ return minor;
+}
+
+/* Release the minor number associated with 'acm'. */
+static void acm_release_minor(struct acm *acm)
+{
+ mutex_lock(&acm_table_lock);
+ acm_table[acm->minor] = NULL;
+ mutex_unlock(&acm_table_lock);
+}
/*
* Functions for ACM control messages.
@@ -267,9 +317,6 @@ static void acm_ctrl_irq(struct urb *urb)
goto exit;
}
- if (!ACM_READY(acm))
- goto exit;
-
usb_mark_last_busy(acm->dev);
data = (unsigned char *)(dr + 1);
@@ -429,8 +476,7 @@ static void acm_write_bulk(struct urb *urb)
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
- if (ACM_READY(acm))
- schedule_work(&acm->work);
+ schedule_work(&acm->work);
}
static void acm_softint(struct work_struct *work)
@@ -440,8 +486,6 @@ static void acm_softint(struct work_struct *work)
dev_vdbg(&acm->data->dev, "%s\n", __func__);
- if (!ACM_READY(acm))
- return;
tty = tty_port_tty_get(&acm->port);
if (!tty)
return;
@@ -453,93 +497,122 @@ static void acm_softint(struct work_struct *work)
* TTY handlers
*/
-static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct acm *acm;
- int rv = -ENODEV;
-
- mutex_lock(&open_mutex);
+ int retval;
- acm = acm_table[tty->index];
- if (!acm || !acm->dev)
- goto out;
- else
- rv = 0;
+ dev_dbg(tty->dev, "%s\n", __func__);
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+ acm = acm_get_by_index(tty->index);
+ if (!acm)
+ return -ENODEV;
- set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ retval = tty_init_termios(tty);
+ if (retval)
+ goto error_init_termios;
tty->driver_data = acm;
- tty_port_tty_set(&acm->port, tty);
- if (usb_autopm_get_interface(acm->control) < 0)
- goto early_bail;
- else
- acm->control->needs_remote_wakeup = 1;
+ /* Final install (we use the default method) */
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+
+ return 0;
+
+error_init_termios:
+ tty_port_put(&acm->port);
+ return retval;
+}
+
+static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct acm *acm = tty->driver_data;
+
+ dev_dbg(tty->dev, "%s\n", __func__);
+
+ return tty_port_open(&acm->port, tty, filp);
+}
+
+static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct acm *acm = container_of(port, struct acm, port);
+ int retval = -ENODEV;
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
mutex_lock(&acm->mutex);
- if (acm->port.count++) {
- mutex_unlock(&acm->mutex);
- usb_autopm_put_interface(acm->control);
- goto out;
- }
+ if (acm->disconnected)
+ goto disconnected;
+
+ retval = usb_autopm_get_interface(acm->control);
+ if (retval)
+ goto error_get_interface;
+
+ /*
+ * FIXME: Why do we need this? Allocating 64K of physically contiguous
+ * memory is really nasty...
+ */
+ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
- goto bail_out;
+ goto error_submit_urb;
}
- if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
+ acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
+ if (acm_set_control(acm, acm->ctrlout) < 0 &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
- goto bail_out;
+ goto error_set_control;
usb_autopm_put_interface(acm->control);
if (acm_submit_read_urbs(acm, GFP_KERNEL))
- goto bail_out;
-
- set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
- rv = tty_port_block_til_ready(&acm->port, tty, filp);
+ goto error_submit_read_urbs;
mutex_unlock(&acm->mutex);
-out:
- mutex_unlock(&open_mutex);
- return rv;
-bail_out:
- acm->port.count--;
- mutex_unlock(&acm->mutex);
+ return 0;
+
+error_submit_read_urbs:
+ acm->ctrlout = 0;
+ acm_set_control(acm, acm->ctrlout);
+error_set_control:
+ usb_kill_urb(acm->ctrlurb);
+error_submit_urb:
usb_autopm_put_interface(acm->control);
-early_bail:
- mutex_unlock(&open_mutex);
- tty_port_tty_set(&acm->port, NULL);
- return -EIO;
+error_get_interface:
+disconnected:
+ mutex_unlock(&acm->mutex);
+ return retval;
}
-static void acm_tty_unregister(struct acm *acm)
+static void acm_port_destruct(struct tty_port *port)
{
- int i;
+ struct acm *acm = container_of(port, struct acm, port);
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_unregister_device(acm_tty_driver, acm->minor);
+ acm_release_minor(acm);
usb_put_intf(acm->control);
- acm_table[acm->minor] = NULL;
- usb_free_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_free_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_urb(acm->read_urbs[i]);
kfree(acm->country_codes);
kfree(acm);
}
-static void acm_port_down(struct acm *acm)
+static void acm_port_shutdown(struct tty_port *port)
{
+ struct acm *acm = container_of(port, struct acm, port);
int i;
- if (acm->dev) {
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+
+ mutex_lock(&acm->mutex);
+ if (!acm->disconnected) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
usb_kill_urb(acm->ctrlurb);
@@ -550,40 +623,28 @@ static void acm_port_down(struct acm *acm)
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
+ mutex_unlock(&acm->mutex);
+}
+
+static void acm_tty_cleanup(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_put(&acm->port);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_port_hangup(&acm->port);
- mutex_lock(&open_mutex);
- acm_port_down(acm);
- mutex_unlock(&open_mutex);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
-
- /* Perform the closing process and see if we need to do the hardware
- shutdown */
- if (!acm)
- return;
-
- mutex_lock(&open_mutex);
- if (tty_port_close_start(&acm->port, tty, filp) == 0) {
- if (!acm->dev) {
- tty_port_tty_set(&acm->port, NULL);
- acm_tty_unregister(acm);
- tty->driver_data = NULL;
- }
- mutex_unlock(&open_mutex);
- return;
- }
- acm_port_down(acm);
- tty_port_close_end(&acm->port, tty);
- tty_port_tty_set(&acm->port, NULL);
- mutex_unlock(&open_mutex);
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_close(&acm->port, tty, filp);
}
static int acm_tty_write(struct tty_struct *tty,
@@ -595,8 +656,6 @@ static int acm_tty_write(struct tty_struct *tty,
int wbn;
struct acm_wb *wb;
- if (!ACM_READY(acm))
- return -EINVAL;
if (!count)
return 0;
@@ -625,8 +684,6 @@ static int acm_tty_write(struct tty_struct *tty,
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
/*
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
@@ -637,7 +694,11 @@ static int acm_tty_write_room(struct tty_struct *tty)
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
+ /*
+ * if the device was unplugged then any remaining characters fell out
+ * of the connector ;)
+ */
+ if (acm->disconnected)
return 0;
/*
* This is inaccurate (overcounts), but it works.
@@ -649,9 +710,6 @@ static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
acm->throttle_req = 1;
spin_unlock_irq(&acm->read_lock);
@@ -662,9 +720,6 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
struct acm *acm = tty->driver_data;
unsigned int was_throttled;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
was_throttled = acm->throttled;
acm->throttled = 0;
@@ -679,8 +734,7 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
{
struct acm *acm = tty->driver_data;
int retval;
- if (!ACM_READY(acm))
- return -EINVAL;
+
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dev_dbg(&acm->control->dev, "%s - send break failed\n",
@@ -692,9 +746,6 @@ static int acm_tty_tiocmget(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
-
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
@@ -709,9 +760,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
struct acm *acm = tty->driver_data;
unsigned int newctrl;
- if (!ACM_READY(acm))
- return -EINVAL;
-
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
@@ -728,11 +776,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
static int acm_tty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
- struct acm *acm = tty->driver_data;
-
- if (!ACM_READY(acm))
- return -EINVAL;
-
return -ENOIOCTLCMD;
}
@@ -756,9 +799,6 @@ static void acm_tty_set_termios(struct tty_struct *tty,
struct usb_cdc_line_coding newline;
int newctrl = acm->ctrlout;
- if (!ACM_READY(acm))
- return;
-
newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
@@ -788,6 +828,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
}
}
+static const struct tty_port_operations acm_port_ops = {
+ .shutdown = acm_port_shutdown,
+ .activate = acm_port_activate,
+ .destruct = acm_port_destruct,
+};
+
/*
* USB probe and disconnect routines.
*/
@@ -1047,12 +1093,6 @@ skip_normal_probe:
}
made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
- for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
-
- if (minor == ACM_TTY_MINORS) {
- dev_err(&intf->dev, "no more free acm devices\n");
- return -ENODEV;
- }
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
if (acm == NULL) {
@@ -1060,6 +1100,13 @@ made_compressed_probe:
goto alloc_fail;
}
+ minor = acm_alloc_minor(acm);
+ if (minor == ACM_TTY_MINORS) {
+ dev_err(&intf->dev, "no more free acm devices\n");
+ kfree(acm);
+ return -ENODEV;
+ }
+
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1183,6 +1230,8 @@ made_compressed_probe:
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
if (i < 0) {
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
@@ -1191,6 +1240,8 @@ made_compressed_probe:
if (i < 0) {
device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
}
@@ -1218,8 +1269,6 @@ skip_countries:
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
- acm_table[minor] = acm;
-
return 0;
alloc_fail7:
for (i = 0; i < ACM_NW; i++)
@@ -1234,6 +1283,7 @@ alloc_fail5:
alloc_fail4:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
+ acm_release_minor(acm);
kfree(acm);
alloc_fail:
return -ENOMEM;
@@ -1259,12 +1309,16 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
+ int i;
+
+ dev_dbg(&intf->dev, "%s\n", __func__);
/* sibling interface is already cleaning up */
if (!acm)
return;
- mutex_lock(&open_mutex);
+ mutex_lock(&acm->mutex);
+ acm->disconnected = true;
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1272,33 +1326,32 @@ static void acm_disconnect(struct usb_interface *intf)
&dev_attr_iCountryCodeRelDate);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
- acm->dev = NULL;
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
+ mutex_unlock(&acm->mutex);
+
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
stop_data_traffic(acm);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
- usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
- acm->ctrl_dma);
+ usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
- if (acm->port.count == 0) {
- acm_tty_unregister(acm);
- mutex_unlock(&open_mutex);
- return;
- }
-
- mutex_unlock(&open_mutex);
- tty = tty_port_tty_get(&acm->port);
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_put(&acm->port);
}
#ifdef CONFIG_PM
@@ -1325,16 +1378,10 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- /*
- we treat opened interfaces differently,
- we must guard against open
- */
- mutex_lock(&acm->mutex);
- if (acm->port.count)
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
stop_data_traffic(acm);
- mutex_unlock(&acm->mutex);
return 0;
}
@@ -1353,8 +1400,7 @@ static int acm_resume(struct usb_interface *intf)
if (cnt)
return 0;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
spin_lock_irq(&acm->write_lock);
@@ -1378,7 +1424,6 @@ static int acm_resume(struct usb_interface *intf)
}
err_out:
- mutex_unlock(&acm->mutex);
return rv;
}
@@ -1387,15 +1432,14 @@ static int acm_reset_resume(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
tty = tty_port_tty_get(&acm->port);
if (tty) {
tty_hangup(tty);
tty_kref_put(tty);
}
}
- mutex_unlock(&acm->mutex);
+
return acm_resume(intf);
}
@@ -1458,6 +1502,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
@@ -1594,8 +1648,10 @@ static struct usb_driver acm_driver = {
*/
static const struct tty_operations acm_ops = {
+ .install = acm_tty_install,
.open = acm_tty_open,
.close = acm_tty_close,
+ .cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca7937f26e2..35ef887b741 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -101,6 +101,7 @@ struct acm {
int transmitting;
spinlock_t write_lock;
struct mutex mutex;
+ bool disconnected;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index efe684908c1..1c50baff772 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -895,24 +895,7 @@ static struct usb_driver wdm_driver = {
.supports_autosuspend = 1,
};
-/* --- low level module stuff --- */
-
-static int __init wdm_init(void)
-{
- int rv;
-
- rv = usb_register(&wdm_driver);
-
- return rv;
-}
-
-static void __exit wdm_exit(void)
-{
- usb_deregister(&wdm_driver);
-}
-
-module_init(wdm_init);
-module_exit(wdm_exit);
+module_usb_driver(wdm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index cb3a93243a0..a68c1a63dc6 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1045,7 +1045,7 @@ static const struct file_operations usblp_fops = {
.llseek = noop_llseek,
};
-static char *usblp_devnode(struct device *dev, mode_t *mode)
+static char *usblp_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -1412,18 +1412,7 @@ static struct usb_driver usblp_driver = {
.supports_autosuspend = 1,
};
-static int __init usblp_init(void)
-{
- return usb_register(&usblp_driver);
-}
-
-static void __exit usblp_exit(void)
-{
- usb_deregister(&usblp_driver);
-}
-
-module_init(usblp_init);
-module_exit(usblp_exit);
+module_usb_driver(usblp_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 12cf5e7395a..70d69d06054 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1116,21 +1116,6 @@ static struct usb_driver usbtmc_driver = {
.resume = usbtmc_resume,
};
-static int __init usbtmc_init(void)
-{
- int retcode;
-
- retcode = usb_register(&usbtmc_driver);
- if (retcode)
- printk(KERN_ERR KBUILD_MODNAME": Unable to register driver\n");
- return retcode;
-}
-module_init(usbtmc_init);
-
-static void __exit usbtmc_exit(void)
-{
- usb_deregister(&usbtmc_driver);
-}
-module_exit(usbtmc_exit);
+module_usb_driver(usbtmc_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e3beaf229ee..3af5e2dd1d8 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -86,6 +86,7 @@ struct async {
void __user *userbuffer;
void __user *userurb;
struct urb *urb;
+ unsigned int mem_usage;
int status;
u32 secid;
u8 bulk_addr;
@@ -108,8 +109,44 @@ enum snoop_when {
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
-#define MAX_USBFS_BUFFER_SIZE 16384
+/* Limit on the total amount of memory we can allocate for transfers */
+static unsigned usbfs_memory_mb = 16;
+module_param(usbfs_memory_mb, uint, 0644);
+MODULE_PARM_DESC(usbfs_memory_mb,
+ "maximum MB allowed for usbfs buffers (0 = no limit)");
+/* Hard limit, necessary to avoid aithmetic overflow */
+#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
+
+static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
+
+/* Check whether it's okay to allocate more memory for a transfer */
+static int usbfs_increase_memory_usage(unsigned amount)
+{
+ unsigned lim;
+
+ /*
+ * Convert usbfs_memory_mb to bytes, avoiding overflows.
+ * 0 means use the hard limit (effectively unlimited).
+ */
+ lim = ACCESS_ONCE(usbfs_memory_mb);
+ if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
+ lim = USBFS_XFER_MAX;
+ else
+ lim <<= 20;
+
+ atomic_add(amount, &usbfs_memory_usage);
+ if (atomic_read(&usbfs_memory_usage) <= lim)
+ return 0;
+ atomic_sub(amount, &usbfs_memory_usage);
+ return -ENOMEM;
+}
+
+/* Memory for a transfer is being deallocated */
+static void usbfs_decrease_memory_usage(unsigned amount)
+{
+ atomic_sub(amount, &usbfs_memory_usage);
+}
static int connected(struct dev_state *ps)
{
@@ -249,10 +286,12 @@ static struct async *alloc_async(unsigned int numisoframes)
static void free_async(struct async *as)
{
put_pid(as->pid);
- put_cred(as->cred);
+ if (as->cred)
+ put_cred(as->cred);
kfree(as->urb->transfer_buffer);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
+ usbfs_decrease_memory_usage(as->mem_usage);
kfree(as);
}
@@ -792,9 +831,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
+ ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ if (ret)
+ return ret;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
@@ -806,8 +851,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
@@ -821,15 +866,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
@@ -843,14 +888,18 @@ static int proc_control(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
- free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
- return i;
+ ret = i;
+ done:
+ free_page((unsigned long) tbuf);
+ usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ return ret;
}
static int proc_bulk(struct dev_state *ps, void __user *arg)
@@ -877,15 +926,20 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
- if (len1 > MAX_USBFS_BUFFER_SIZE)
+ if (len1 >= USBFS_XFER_MAX)
return -EINVAL;
- if (!(tbuf = kmalloc(len1, GFP_KERNEL)))
- return -ENOMEM;
+ ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+ if (ret)
+ return ret;
+ if (!(tbuf = kmalloc(len1, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = bulk.timeout;
if (bulk.ep & 0x80) {
if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) {
- kfree(tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
@@ -896,15 +950,15 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (len1) {
if (copy_from_user(tbuf, bulk.data, len1)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
@@ -914,10 +968,11 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
+ ret = (i < 0 ? i : len2);
+ done:
kfree(tbuf);
- if (i < 0)
- return i;
- return len2;
+ usbfs_decrease_memory_usage(len1 + sizeof(struct urb));
+ return ret;
}
static int proc_resetep(struct dev_state *ps, void __user *arg)
@@ -1062,7 +1117,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
- struct async *as;
+ struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int ret, ifnum = -1;
@@ -1095,32 +1150,30 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
if (!ep)
return -ENOENT;
+
+ u = 0;
switch(uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
- /* min 8 byte setup packet,
- * max 8 byte setup plus an arbitrary data stage */
- if (uurb->buffer_length < 8 ||
- uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE))
+ /* min 8 byte setup packet */
+ if (uurb->buffer_length < 8)
return -EINVAL;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (copy_from_user(dr, uurb->buffer, 8)) {
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
- kfree(dr);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpup(&dr->wIndex));
- if (ret) {
- kfree(dr);
- return ret;
- }
+ if (ret)
+ goto error;
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
@@ -1138,6 +1191,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
__le16_to_cpup(&dr->wValue),
__le16_to_cpup(&dr->wIndex),
__le16_to_cpup(&dr->wLength));
+ u = sizeof(struct usb_ctrlrequest);
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1151,8 +1205,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
goto interrupt_urb;
}
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
@@ -1160,8 +1212,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1176,50 +1226,53 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
- kfree(isopkt);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
/* arbitrary limit,
* sufficient for USB 2.0 high-bandwidth iso */
if (isopkt[u].length > 8192) {
- kfree(isopkt);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
totlen += isopkt[u].length;
}
- /* 3072 * 64 microframes */
- if (totlen > 196608) {
- kfree(isopkt);
- return -EINVAL;
- }
+ u *= sizeof(struct usb_iso_packet_descriptor);
uurb->buffer_length = totlen;
break;
default:
return -EINVAL;
}
+
+ if (uurb->buffer_length >= USBFS_XFER_MAX) {
+ ret = -EINVAL;
+ goto error;
+ }
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {
- kfree(isopkt);
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
as = alloc_async(uurb->number_of_packets);
if (!as) {
- kfree(isopkt);
- kfree(dr);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
+ u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length;
+ ret = usbfs_increase_memory_usage(u);
+ if (ret)
+ goto error;
+ as->mem_usage = u;
+
if (uurb->buffer_length > 0) {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
GFP_KERNEL);
if (!as->urb->transfer_buffer) {
- kfree(isopkt);
- kfree(dr);
- free_async(as);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
/* Isochronous input data may end up being discontiguous
* if some of the packets are short. Clear the buffer so
@@ -1253,6 +1306,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
+ dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
@@ -1268,6 +1322,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
totlen += isopkt[u].length;
}
kfree(isopkt);
+ isopkt = NULL;
as->ps = ps;
as->userurb = arg;
if (is_in && uurb->buffer_length > 0)
@@ -1282,8 +1337,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!is_in && uurb->buffer_length > 0) {
if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
uurb->buffer_length)) {
- free_async(as);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
@@ -1329,10 +1384,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE, NULL, 0);
async_removepending(as);
- free_async(as);
- return ret;
+ goto error;
}
return 0;
+
+ error:
+ kfree(isopkt);
+ kfree(dr);
+ if (as)
+ free_async(as);
+ return ret;
}
static int proc_submiturb(struct dev_state *ps, void __user *arg)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 45887a0ff87..d40ff956881 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -45,10 +45,12 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
struct usb_dynid *dynid;
u32 idVendor = 0;
u32 idProduct = 0;
+ unsigned int bInterfaceClass = 0;
int fields = 0;
int retval = 0;
- fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
+ fields = sscanf(buf, "%x %x %x", &idVendor, &idProduct,
+ &bInterfaceClass);
if (fields < 2)
return -EINVAL;
@@ -60,6 +62,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idVendor = idVendor;
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+ if (fields == 3) {
+ dynid->id.bInterfaceClass = (u8)bInterfaceClass;
+ dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
+ }
spin_lock(&dynids->lock);
list_add_tail(&dynid->node, &dynids->list);
@@ -1073,17 +1079,10 @@ static int usb_suspend_interface(struct usb_device *udev,
goto done;
driver = to_usb_driver(intf->dev.driver);
- if (driver->suspend) {
- status = driver->suspend(intf, msg);
- if (status && !PMSG_IS_AUTO(msg))
- dev_err(&intf->dev, "%s error %d\n",
- "suspend", status);
- } else {
- /* Later we will unbind the driver and reprobe */
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "suspend", driver->name);
- }
+ /* at this time we know the driver supports suspend */
+ status = driver->suspend(intf, msg);
+ if (status && !PMSG_IS_AUTO(msg))
+ dev_err(&intf->dev, "suspend error %d\n", status);
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
@@ -1132,16 +1131,9 @@ static int usb_resume_interface(struct usb_device *udev,
"reset_resume", driver->name);
}
} else {
- if (driver->resume) {
- status = driver->resume(intf);
- if (status)
- dev_err(&intf->dev, "%s error %d\n",
- "resume", status);
- } else {
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "resume", driver->name);
- }
+ status = driver->resume(intf);
+ if (status)
+ dev_err(&intf->dev, "resume error %d\n", status);
}
done:
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 99458c843d6..d95760de9e8 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -66,7 +66,7 @@ static struct usb_class {
struct class *class;
} *usb_class;
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_class_driver *drv;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a004db35f6d..d136b8f4c8a 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -453,10 +453,6 @@ static int resume_common(struct device *dev, int event)
pci_set_master(pci_dev);
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
-
if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) {
if (event != PM_EVENT_AUTO_RESUME)
wait_for_companions(pci_dev, hcd);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 13222d352a6..eb19cba34ac 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -658,7 +658,7 @@ error:
len > offsetof(struct usb_device_descriptor,
bDeviceProtocol))
((struct usb_device_descriptor *) ubuf)->
- bDeviceProtocol = 1;
+ bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
}
/* any errors get returned through the urb completion */
@@ -1168,20 +1168,6 @@ int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
if (urb->unlinked)
return -EBUSY;
urb->unlinked = status;
-
- /* IRQ setup can easily be broken so that USB controllers
- * never get completion IRQs ... maybe even the ones we need to
- * finish unlinking the initial failed usb_set_address()
- * or device descriptor fetch.
- */
- if (!HCD_SAW_IRQ(hcd) && !is_root_hub(urb->dev)) {
- dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
- "Controller is probably using the wrong IRQ.\n");
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
@@ -1412,11 +1398,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
- if (n != urb->num_sgs) {
- urb->num_sgs = n;
+ urb->num_mapped_sgs = n;
+ if (n != urb->num_sgs)
urb->transfer_flags |=
URB_DMA_SG_COMBINED;
- }
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
@@ -2148,16 +2133,12 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
*/
local_irq_save(flags);
- if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
+ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
- } else if (hcd->driver->irq(hcd) == IRQ_NONE) {
+ else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
- } else {
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
+ else
rc = IRQ_HANDLED;
- }
local_irq_restore(flags);
return rc;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 79781461eec..79d339e2e70 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -84,7 +84,7 @@ struct usb_hub {
static inline int hub_is_superspeed(struct usb_device *hdev)
{
- return (hdev->descriptor.bDeviceProtocol == 3);
+ return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
}
/* Protect struct usb_device->state and ->children members
@@ -1041,58 +1041,58 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
- case 0x00:
- dev_dbg(hub_dev, "ganged power switching\n");
- break;
- case 0x01:
- dev_dbg(hub_dev, "individual port power switching\n");
- break;
- case 0x02:
- case 0x03:
- dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
- break;
+ case HUB_CHAR_COMMON_LPSM:
+ dev_dbg(hub_dev, "ganged power switching\n");
+ break;
+ case HUB_CHAR_INDV_PORT_LPSM:
+ dev_dbg(hub_dev, "individual port power switching\n");
+ break;
+ case HUB_CHAR_NO_LPSM:
+ case HUB_CHAR_LPSM:
+ dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
+ break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
- case 0x00:
- dev_dbg(hub_dev, "global over-current protection\n");
- break;
- case 0x08:
- dev_dbg(hub_dev, "individual port over-current protection\n");
- break;
- case 0x10:
- case 0x18:
- dev_dbg(hub_dev, "no over-current protection\n");
- break;
+ case HUB_CHAR_COMMON_OCPM:
+ dev_dbg(hub_dev, "global over-current protection\n");
+ break;
+ case HUB_CHAR_INDV_PORT_OCPM:
+ dev_dbg(hub_dev, "individual port over-current protection\n");
+ break;
+ case HUB_CHAR_NO_OCPM:
+ case HUB_CHAR_OCPM:
+ dev_dbg(hub_dev, "no over-current protection\n");
+ break;
}
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
- case 0:
- break;
- case 1:
- dev_dbg(hub_dev, "Single TT\n");
- hub->tt.hub = hdev;
- break;
- case 2:
- ret = usb_set_interface(hdev, 0, 1);
- if (ret == 0) {
- dev_dbg(hub_dev, "TT per port\n");
- hub->tt.multi = 1;
- } else
- dev_err(hub_dev, "Using single TT (err %d)\n",
- ret);
- hub->tt.hub = hdev;
- break;
- case 3:
- /* USB 3.0 hubs don't have a TT */
- break;
- default:
- dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
- hdev->descriptor.bDeviceProtocol);
- break;
+ case USB_HUB_PR_FS:
+ break;
+ case USB_HUB_PR_HS_SINGLE_TT:
+ dev_dbg(hub_dev, "Single TT\n");
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_HS_MULTI_TT:
+ ret = usb_set_interface(hdev, 0, 1);
+ if (ret == 0) {
+ dev_dbg(hub_dev, "TT per port\n");
+ hub->tt.multi = 1;
+ } else
+ dev_err(hub_dev, "Using single TT (err %d)\n",
+ ret);
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_SS:
+ /* USB 3.0 hubs don't have a TT */
+ break;
+ default:
+ dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
+ hdev->descriptor.bDeviceProtocol);
+ break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
@@ -1360,7 +1360,6 @@ descriptor_error:
return -ENODEV;
}
-/* No BKL needed */
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2278dad886e..9e186f3da83 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -65,7 +65,7 @@ static umode_t devmode = USBFS_DEFAULT_DEVMODE;
static umode_t busmode = USBFS_DEFAULT_BUSMODE;
static umode_t listmode = USBFS_DEFAULT_LISTMODE;
-static int usbfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int usbfs_show_options(struct seq_file *seq, struct dentry *root)
{
if (devuid != 0)
seq_printf(seq, ",devuid=%u", devuid);
@@ -264,21 +264,19 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
- if (usbfs_mount && usbfs_mount->mnt_sb)
+ if (usbfs_mount)
update_sb(usbfs_mount->mnt_sb);
return 0;
}
-static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t dev)
+static struct inode *usbfs_get_inode (struct super_block *sb, umode_t mode, dev_t dev)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+ inode_init_owner(inode, NULL, mode);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
default:
@@ -300,7 +298,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de
}
/* SMP-safe */
-static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
+static int usbfs_mknod (struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
struct inode *inode = usbfs_get_inode(dir->i_sb, mode, dev);
@@ -317,7 +315,7 @@ static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
return error;
}
-static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, umode_t mode)
{
int res;
@@ -328,7 +326,7 @@ static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
return res;
}
-static int usbfs_create (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_create (struct inode *dir, struct dentry *dentry, umode_t mode)
{
mode = (mode & S_IALLUGO) | S_IFREG;
return usbfs_mknod (dir, dentry, mode, 0);
@@ -489,7 +487,7 @@ static int usbfs_fill_super(struct super_block *sb, void *data, int silent)
*
* This function handles both regular files and directories.
*/
-static int fs_create_by_name (const char *name, mode_t mode,
+static int fs_create_by_name (const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry)
{
int error = 0;
@@ -500,9 +498,8 @@ static int fs_create_by_name (const char *name, mode_t mode,
* have around.
*/
if (!parent ) {
- if (usbfs_mount && usbfs_mount->mnt_sb) {
- parent = usbfs_mount->mnt_sb->s_root;
- }
+ if (usbfs_mount)
+ parent = usbfs_mount->mnt_root;
}
if (!parent) {
@@ -514,7 +511,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry)) {
- if ((mode & S_IFMT) == S_IFDIR)
+ if (S_ISDIR(mode))
error = usbfs_mkdir (parent->d_inode, *dentry, mode);
else
error = usbfs_create (parent->d_inode, *dentry, mode);
@@ -525,7 +522,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
return error;
}
-static struct dentry *fs_create_file (const char *name, mode_t mode,
+static struct dentry *fs_create_file (const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
uid_t uid, gid_t gid)
@@ -608,7 +605,7 @@ static int create_special_files (void)
ignore_mount = 0;
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
devices_usbfs_dentry = fs_create_file ("devices",
listmode | S_IFREG, parent,
NULL, &usbfs_devices_fops,
@@ -662,7 +659,7 @@ static void usbfs_add_bus(struct usb_bus *bus)
sprintf (name, "%03d", bus->busnum);
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
bus->usbfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
bus, NULL, busuid, busgid);
if (bus->usbfs_dentry == NULL) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ecf12e15a7e..4c65eb6a867 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
- /* Guillemot Webcam Hercules Dualpix Exchange*/
+ /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Guillemot Webcam Hercules Dualpix Exchange*/
+ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 662c0cf3a3e..9e491ca2e5c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -642,7 +642,7 @@ static struct attribute *dev_string_attrs[] = {
NULL
};
-static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -877,7 +877,7 @@ static struct attribute *intf_assoc_attrs[] = {
NULL,
};
-static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 73cd90012ec..1382c90d083 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -326,7 +326,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#endif /* CONFIG_PM */
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_device *usb_dev;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 3888778582c..45e8479c377 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -132,20 +132,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
-/* translate USB error codes to codes user space understands */
-static inline int usb_translate_errors(int error_code)
-{
- switch (error_code) {
- case 0:
- case -ENOMEM:
- case -ENODEV:
- return error_code;
- default:
- return -EIO;
- }
-}
-
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 3c1d67d324f..d8f741f9e56 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,10 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET)
+ depends on (USB && USB_GADGET)
select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SUPERSPEED
+ select USB_XHCI_PLATFORM
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 593d1dbc465..900ae74357f 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -4,10 +4,8 @@ ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o
-
-ifneq ($(CONFIG_USB_GADGET_DWC3),)
- dwc3-y += gadget.o ep0.o
-endif
+dwc3-y += host.o
+dwc3-y += gadget.o ep0.o
ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 717ebc9ff94..7c9df630dbe 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -59,6 +59,60 @@
#include "debug.h"
+static char *maximum_speed = "super";
+module_param(maximum_speed, charp, 0);
+MODULE_PARM_DESC(maximum_speed, "Maximum supported speed.");
+
+/* -------------------------------------------------------------------------- */
+
+#define DWC3_DEVS_POSSIBLE 32
+
+static DECLARE_BITMAP(dwc3_devs, DWC3_DEVS_POSSIBLE);
+
+int dwc3_get_device_id(void)
+{
+ int id;
+
+again:
+ id = find_first_zero_bit(dwc3_devs, DWC3_DEVS_POSSIBLE);
+ if (id < DWC3_DEVS_POSSIBLE) {
+ int old;
+
+ old = test_and_set_bit(id, dwc3_devs);
+ if (old)
+ goto again;
+ } else {
+ pr_err("dwc3: no space for new device\n");
+ id = -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_device_id);
+
+void dwc3_put_device_id(int id)
+{
+ int ret;
+
+ if (id < 0)
+ return;
+
+ ret = test_bit(id, dwc3_devs);
+ WARN(!ret, "dwc3: ID %d not in use\n", id);
+ clear_bit(id, dwc3_devs);
+}
+EXPORT_SYMBOL_GPL(dwc3_put_device_id);
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
/**
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
@@ -150,7 +204,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int i;
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
evt = dwc->ev_buffs[i];
if (evt) {
dwc3_free_one_event_buffer(dwc, evt);
@@ -162,17 +216,25 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
/**
* dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
* @dwc: Pointer to out controller context structure
- * @num: number of event buffers to allocate
* @length: size of event buffer
*
* Returns 0 on success otherwise negative errno. In error the case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
- unsigned length)
+static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
+ int num;
int i;
+ num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
+ dwc->num_event_buffers = num;
+
+ dwc->ev_buffs = kzalloc(sizeof(*dwc->ev_buffs) * num, GFP_KERNEL);
+ if (!dwc->ev_buffs) {
+ dev_err(dwc->dev, "can't allocate event buffers array\n");
+ return -ENOMEM;
+ }
+
for (i = 0; i < num; i++) {
struct dwc3_event_buffer *evt;
@@ -198,7 +260,7 @@ static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
evt->buf, (unsigned long long) evt->dma,
@@ -221,7 +283,7 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
@@ -264,7 +326,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
ret = -ENODEV;
goto err0;
}
- dwc->revision = reg & DWC3_GSNPSREV_MASK;
+ dwc->revision = reg;
dwc3_core_soft_reset(dwc);
@@ -285,8 +347,32 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
cpu_relax();
} while (true);
- ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
- DWC3_EVENT_BUFFERS_SIZE);
+ dwc3_cache_hwparams(dwc);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_SCALEDOWN(3);
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+
+ switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ default:
+ dev_dbg(dwc->dev, "No power optimization available\n");
+ }
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have a bug
+ * when The device fails to connect at SuperSpeed
+ * and falls back to high-speed mode which causes
+ * the device to enter in a Connect/Disconnect loop
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ reg |= DWC3_GCTL_U2RSTECN;
+
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
@@ -299,8 +385,6 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
goto err1;
}
- dwc3_cache_hwparams(dwc);
-
return 0;
err1:
@@ -320,15 +404,17 @@ static void dwc3_core_exit(struct dwc3 *dwc)
static int __devinit dwc3_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
struct dwc3 *dwc;
- void __iomem *regs;
- unsigned int features = id->driver_data;
+
int ret = -ENOMEM;
int irq;
+
+ void __iomem *regs;
void *mem;
+ u8 mode;
+
mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
dev_err(&pdev->dev, "not enough memory\n");
@@ -343,6 +429,8 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err1;
}
+ dwc->res = res;
+
res = request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev));
if (!res) {
@@ -370,6 +458,17 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
dwc->dev = &pdev->dev;
dwc->irq = irq;
+ if (!strncmp("super", maximum_speed, 5))
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+ else if (!strncmp("high", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+ else if (!strncmp("full", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+ else if (!strncmp("low", maximum_speed, 3))
+ dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+ else
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_forbid(&pdev->dev);
@@ -380,13 +479,44 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err3;
}
- if (features & DWC3_HAS_PERIPHERAL) {
+ mode = DWC3_MODE(dwc->hwparams.hwparams0);
+
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialized gadget\n");
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
goto err4;
}
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
+ goto err4;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported mode of operation %d\n", mode);
+ goto err4;
}
+ dwc->mode = mode;
ret = dwc3_debugfs_init(dwc);
if (ret) {
@@ -399,8 +529,21 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
return 0;
err5:
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
err4:
dwc3_core_exit(dwc);
@@ -420,10 +563,8 @@ err0:
static int __devexit dwc3_remove(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res;
- unsigned int features = id->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -432,8 +573,21 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (dwc->mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
dwc3_core_exit(dwc);
release_mem_region(res->start, resource_size(res));
@@ -443,30 +597,15 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id dwc3_id_table[] __devinitconst = {
- {
- .name = "dwc3-omap",
- .driver_data = (DWC3_HAS_PERIPHERAL
- | DWC3_HAS_XHCI
- | DWC3_HAS_OTG),
- },
- {
- .name = "dwc3-pci",
- .driver_data = DWC3_HAS_PERIPHERAL,
- },
- { }, /* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(platform, dwc3_id_table);
-
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = __devexit_p(dwc3_remove),
.driver = {
.name = "dwc3",
},
- .id_table = dwc3_id_table,
};
+MODULE_ALIAS("platform:dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29a8e1679e1..9e57f8e9bf1 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -41,6 +41,7 @@
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
@@ -52,7 +53,6 @@
/* Global constants */
#define DWC3_ENDPOINTS_NUM 32
-#define DWC3_EVENT_BUFFERS_NUM 2
#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE
#define DWC3_EVENT_TYPE_MASK 0xfe
@@ -153,6 +153,7 @@
#define DWC3_GCTL_CLK_PIPEHALF (2)
#define DWC3_GCTL_CLK_MASK (3)
+#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
#define DWC3_GCTL_PRTCAPDIR(n) (n << 12)
#define DWC3_GCTL_PRTCAP_HOST 1
#define DWC3_GCTL_PRTCAP_DEVICE 2
@@ -347,6 +348,7 @@ struct dwc3_ep {
u32 free_slot;
u32 busy_slot;
const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
unsigned flags;
@@ -536,6 +538,31 @@ struct dwc3_hwparams {
u32 hwparams8;
};
+/* HWPARAMS0 */
+#define DWC3_MODE(n) ((n) & 0x7)
+
+#define DWC3_MODE_DEVICE 0
+#define DWC3_MODE_HOST 1
+#define DWC3_MODE_DRD 2
+#define DWC3_MODE_HUB 3
+
+/* HWPARAMS1 */
+#define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+struct dwc3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct dwc3_ep *dep;
+
+ u8 epnum;
+ struct dwc3_trb_hw *trb;
+ dma_addr_t trb_dma;
+
+ unsigned direction:1;
+ unsigned mapped:1;
+ unsigned queued:1;
+};
+
/**
* struct dwc3 - representation of our controller
* @ctrl_req: usb control request which is used for ep0
@@ -549,19 +576,24 @@ struct dwc3_hwparams {
* @ep0_bounce_addr: dma address of ep0_bounce
* @lock: for synchronizing
* @dev: pointer to our struct device
+ * @xhci: pointer to our xHCI child
* @event_buffer_list: a list of event buffers
* @gadget: device side representation of the peripheral controller
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
* @irq: IRQ number
+ * @num_event_buffers: calculated number of event buffers
+ * @u1u2: only used on revisions <1.83a for workaround
+ * @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
+ * @mode: mode of operation
* @is_selfpowered: true when we are selfpowered
* @three_stage_setup: set if we perform a three phase setup
- * @ep0_status_pending: ep0 status response without a req is pending
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @start_config_issued: true when StartConfig command has been issued
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @ep0_next_event: hold the next expected event
* @ep0state: state of endpoint zero
* @link_state: link state
@@ -579,12 +611,15 @@ struct dwc3 {
dma_addr_t ep0_trb_addr;
dma_addr_t setup_buf_addr;
dma_addr_t ep0_bounce_addr;
- struct usb_request ep0_usb_req;
+ struct dwc3_request ep0_usb_req;
/* device lock */
spinlock_t lock;
struct device *dev;
- struct dwc3_event_buffer *ev_buffs[DWC3_EVENT_BUFFERS_NUM];
+ struct platform_device *xhci;
+ struct resource *res;
+
+ struct dwc3_event_buffer **ev_buffs;
struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
struct usb_gadget gadget;
@@ -595,7 +630,11 @@ struct dwc3 {
int irq;
+ u32 num_event_buffers;
+ u32 u1u2;
+ u32 maximum_speed;
u32 revision;
+ u32 mode;
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
@@ -607,10 +646,11 @@ struct dwc3 {
unsigned is_selfpowered:1;
unsigned three_stage_setup:1;
- unsigned ep0_status_pending:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned start_config_issued:1;
+ unsigned setup_packet_pending:1;
+ unsigned delayed_status:1;
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -765,4 +805,16 @@ union dwc3_event {
#define DWC3_HAS_XHCI BIT(1)
#define DWC3_HAS_OTG BIT(3)
+/* prototypes */
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
+
+int dwc3_host_init(struct dwc3 *dwc);
+void dwc3_host_exit(struct dwc3 *dwc);
+
+int dwc3_gadget_init(struct dwc3 *dwc);
+void dwc3_gadget_exit(struct dwc3 *dwc);
+
+extern int dwc3_get_device_id(void);
+extern void dwc3_put_device_id(int id);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index da1ad77d8d5..433c97c15fc 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -44,17 +44,12 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
-
-struct dwc3_register {
- const char *name;
- u32 offset;
-};
+#include "debug.h"
#define dump_register(nm) \
{ \
@@ -62,7 +57,7 @@ struct dwc3_register {
.offset = DWC3_ ##nm, \
}
-static const struct dwc3_register dwc3_regs[] = {
+static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
dump_register(GTXTHRCFG),
@@ -382,15 +377,10 @@ static const struct dwc3_register dwc3_regs[] = {
static int dwc3_regdump_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
- int i;
seq_printf(s, "DesignWare USB3 Core Register Dump\n");
-
- for (i = 0; i < ARRAY_SIZE(dwc3_regs); i++) {
- seq_printf(s, "%-20s : %08x\n", dwc3_regs[i].name,
- dwc3_readl(dwc->regs, dwc3_regs[i].offset));
- }
-
+ debugfs_print_regs32(s, dwc3_regs, ARRAY_SIZE(dwc3_regs),
+ dwc->regs, "");
return 0;
}
@@ -405,6 +395,75 @@ static const struct file_operations dwc3_regdump_fops = {
.release = single_release,
};
+static int dwc3_mode_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (DWC3_GCTL_PRTCAP(reg)) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ seq_printf(s, "host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ seq_printf(s, "device\n");
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ seq_printf(s, "OTG\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ }
+
+ return 0;
+}
+
+static int dwc3_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_mode_show, inode->i_private);
+}
+
+static ssize_t dwc3_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 mode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4))
+ mode |= DWC3_GCTL_PRTCAP_HOST;
+
+ if (!strncmp(buf, "device", 6))
+ mode |= DWC3_GCTL_PRTCAP_DEVICE;
+
+ if (!strncmp(buf, "otg", 3))
+ mode |= DWC3_GCTL_PRTCAP_OTG;
+
+ if (mode) {
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_set_mode(dwc, mode);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ return count;
+}
+
+static const struct file_operations dwc3_mode_fops = {
+ .open = dwc3_mode_open,
+ .write = dwc3_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -412,7 +471,7 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
int ret;
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
- if (IS_ERR(root)){
+ if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto err0;
}
@@ -425,6 +484,14 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
ret = PTR_ERR(file);
goto err1;
}
+
+ file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_mode_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 062552b5fc8..3274ac8f120 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include "core.h"
#include "io.h"
/*
@@ -200,6 +201,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct resource *res;
+ int devid;
int ret = -ENOMEM;
int irq;
@@ -236,16 +238,20 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
- dwc3 = platform_device_alloc("dwc3-omap", -1);
+ devid = dwc3_get_device_id();
+ if (devid < 0)
+ goto err2;
+
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
- goto err2;
+ goto err3;
}
context = kzalloc(resource_size(res), GFP_KERNEL);
if (!context) {
dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
- goto err3;
+ goto err4;
}
spin_lock_init(&omap->lock);
@@ -299,7 +305,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
- goto err4;
+ goto err5;
}
/* enable all IRQs */
@@ -322,26 +328,29 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
- goto err5;
+ goto err6;
}
ret = platform_device_add(dwc3);
if (ret) {
dev_err(&pdev->dev, "failed to register dwc3 device\n");
- goto err5;
+ goto err6;
}
return 0;
-err5:
+err6:
free_irq(omap->irq, omap);
-err4:
+err5:
kfree(omap->context);
-err3:
+err4:
platform_device_put(dwc3);
+err3:
+ dwc3_put_device_id(devid);
+
err2:
iounmap(base);
@@ -358,6 +367,7 @@ static int __devexit dwc3_omap_remove(struct platform_device *pdev)
platform_device_unregister(omap->dwc3);
+ dwc3_put_device_id(omap->dwc3->id);
free_irq(omap->irq, omap);
iounmap(omap->base);
@@ -384,18 +394,9 @@ static struct platform_driver dwc3_omap_driver = {
},
};
+module_platform_driver(dwc3_omap_driver);
+
+MODULE_ALIAS("platform:omap-dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
-
-static int __devinit dwc3_omap_init(void)
-{
- return platform_driver_register(&dwc3_omap_driver);
-}
-module_init(dwc3_omap_init);
-
-static void __exit dwc3_omap_exit(void)
-{
- platform_driver_unregister(&dwc3_omap_driver);
-}
-module_exit(dwc3_omap_exit);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index f77c0004268..64e1f7c67b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -42,52 +42,17 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "core.h"
+
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define DWC3_PCI_DEVS_POSSIBLE 32
-
struct dwc3_pci {
struct device *dev;
struct platform_device *dwc3;
};
-static DECLARE_BITMAP(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
-
-static int dwc3_pci_get_device_id(struct dwc3_pci *glue)
-{
- int id;
-
-again:
- id = find_first_zero_bit(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
- if (id < DWC3_PCI_DEVS_POSSIBLE) {
- int old;
-
- old = test_and_set_bit(id, dwc3_pci_devs);
- if (old)
- goto again;
- } else {
- dev_err(glue->dev, "no space for new device\n");
- id = -ENOMEM;
- }
-
- return 0;
-}
-
-static void dwc3_pci_put_device_id(struct dwc3_pci *glue, int id)
-{
- int ret;
-
- if (id < 0)
- return;
-
- ret = test_bit(id, dwc3_pci_devs);
- WARN(!ret, "Device: %s\nID %d not in use\n",
- dev_driver_string(glue->dev), id);
- clear_bit(id, dwc3_pci_devs);
-}
-
static int __devinit dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -114,11 +79,11 @@ static int __devinit dwc3_pci_probe(struct pci_dev *pci,
pci_set_power_state(pci, PCI_D0);
pci_set_master(pci);
- devid = dwc3_pci_get_device_id(glue);
+ devid = dwc3_get_device_id();
if (devid < 0)
goto err2;
- dwc3 = platform_device_alloc("dwc3-pci", devid);
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pci->dev, "couldn't allocate dwc3 device\n");
goto err3;
@@ -163,13 +128,13 @@ err4:
platform_device_put(dwc3);
err3:
- dwc3_pci_put_device_id(glue, devid);
+ dwc3_put_device_id(devid);
err2:
pci_disable_device(pci);
err1:
- kfree(pci);
+ kfree(glue);
err0:
return ret;
@@ -179,7 +144,7 @@ static void __devexit dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *glue = pci_get_drvdata(pci);
- dwc3_pci_put_device_id(glue, glue->dwc3->id);
+ dwc3_put_device_id(glue->dwc3->id);
platform_device_unregister(glue->dwc3);
pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
@@ -196,7 +161,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
static struct pci_driver dwc3_pci_driver = {
- .name = "pci-dwc3",
+ .name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = __devexit_p(dwc3_pci_remove),
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 69a4e43ddf5..2f51de57593 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -48,13 +48,13 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
-static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
@@ -125,6 +125,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
+ struct dwc3 *dwc = dep->dwc;
+ u32 type;
int ret = 0;
req->request.actual = 0;
@@ -143,9 +145,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- struct dwc3 *dwc = dep->dwc;
unsigned direction;
- u32 type;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
@@ -165,6 +165,13 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
req->request.dma, req->request.length, type);
dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
DWC3_EP0_DIR_IN);
+ } else if (dwc->delayed_status) {
+ dwc->delayed_status = false;
+
+ if (dwc->ep0state == EP0_STATUS_PHASE)
+ dwc3_ep0_do_control_status(dwc, 1);
+ else
+ dev_dbg(dwc->dev, "too early for delayed status\n");
}
return ret;
@@ -190,9 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
}
/* we share one TRB for ep0/1 */
- if (!list_empty(&dwc->eps[0]->request_list) ||
- !list_empty(&dwc->eps[1]->request_list) ||
- dwc->ep0_status_pending) {
+ if (!list_empty(&dep->request_list)) {
ret = -EBUSY;
goto out;
}
@@ -214,8 +219,9 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_ep *dep = dwc->eps[0];
/* stall is always issued on EP0 */
- __dwc3_gadget_ep_set_halt(dwc->eps[0], 1);
- dwc->eps[0]->flags = DWC3_EP_ENABLED;
+ __dwc3_gadget_ep_set_halt(dep, 1);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
if (!list_empty(&dep->request_list)) {
struct dwc3_request *req;
@@ -254,17 +260,14 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
return NULL;
}
-static void dwc3_ep0_send_status_response(struct dwc3 *dwc)
+static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
- dwc3_ep0_start_trans(dwc, 1, dwc->setup_buf_addr,
- dwc->ep0_usb_req.length,
- DWC3_TRBCTL_CONTROL_DATA);
}
-
/*
* ch 9.4.5
*/
-static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+static int dwc3_ep0_handle_status(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
u32 recip;
@@ -291,7 +294,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
- return -EINVAL;
+ return -EINVAL;
if (dep->flags & DWC3_EP_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
@@ -302,10 +305,14 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
- dwc->ep0_usb_req.length = sizeof(*response_pkt);
- dwc->ep0_status_pending = 1;
- return 0;
+ dep = dwc->eps[0];
+ dwc->ep0_usb_req.dep = dep;
+ dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
+ dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
+ dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+
+ return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
@@ -396,8 +403,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
-
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep)
return -EINVAL;
ret = __dwc3_gadget_ep_set_halt(dep, set);
@@ -422,8 +428,15 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
u32 reg;
addr = le16_to_cpu(ctrl->wValue);
- if (addr > 127)
+ if (addr > 127) {
+ dev_dbg(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
+ }
+
+ if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
+ dev_dbg(dwc->dev, "trying to set address when configured\n");
+ return -EINVAL;
+ }
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
@@ -473,8 +486,10 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
if (!cfg)
dwc->dev_state = DWC3_ADDRESS_STATE;
break;
+ default:
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -537,6 +552,9 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
else
ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ dwc->delayed_status = true;
+
if (ret >= 0)
return;
@@ -550,27 +568,21 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct dwc3_request *r = NULL;
struct usb_request *ur;
struct dwc3_trb trb;
- struct dwc3_ep *dep;
+ struct dwc3_ep *ep0;
u32 transferred;
u8 epnum;
epnum = event->endpoint_number;
- dep = dwc->eps[epnum];
+ ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
- if (!dwc->ep0_status_pending) {
- r = next_request(&dwc->eps[0]->request_list);
- ur = &r->request;
- } else {
- ur = &dwc->ep0_usb_req;
- dwc->ep0_status_pending = 0;
- }
+ r = next_request(&ep0->request_list);
+ ur = &r->request;
dwc3_trb_to_nat(dwc->ep0_trb, &trb);
if (dwc->ep0_bounced) {
- struct dwc3_ep *ep0 = dwc->eps[0];
transferred = min_t(u32, ur->length,
ep0->endpoint.maxpacket - trb.length);
@@ -591,7 +603,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
* seems to be case when req.length > maxpacket. Could it be?
*/
if (r)
- dwc3_gadget_giveback(dep, r, 0);
+ dwc3_gadget_giveback(ep0, r, 0);
}
}
@@ -619,6 +631,7 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_BUSY;
+ dwc->setup_packet_pending = false;
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
@@ -643,7 +656,6 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -655,12 +667,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
int ret;
dep = dwc->eps[0];
- dwc->ep0state = EP0_DATA_PHASE;
-
- if (dwc->ep0_status_pending) {
- dwc3_ep0_send_status_response(dwc);
- return;
- }
if (list_empty(&dep->request_list)) {
dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
@@ -674,7 +680,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
req = next_request(&dep->request_list);
req->direction = !!event->endpoint_number;
- dwc->ep0state = EP0_DATA_PHASE;
if (req->request.length == 0) {
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
dwc->ctrl_req_addr, 0,
@@ -706,35 +711,79 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
WARN_ON(ret < 0);
}
-static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event)
+static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
{
+ struct dwc3 *dwc = dep->dwc;
u32 type;
- int ret;
-
- dwc->ep0state = EP0_STATUS_PHASE;
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ return dwc3_ep0_start_trans(dwc, dep->number,
dwc->ctrl_req_addr, 0, type);
+}
- WARN_ON(ret < 0);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ WARN_ON(dwc3_ep0_start_control_status(dep));
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
+ dwc->setup_packet_pending = true;
+
+ /*
+ * This part is very tricky: If we has just handled
+ * XferNotReady(Setup) and we're now expecting a
+ * XferComplete but, instead, we receive another
+ * XferNotReady(Setup), we should STALL and restart
+ * the state machine.
+ *
+ * In all other cases, we just continue waiting
+ * for the XferComplete event.
+ *
+ * We are a little bit unsafe here because we're
+ * not trying to ensure that last event was, indeed,
+ * XferNotReady(Setup).
+ *
+ * Still, we don't expect any condition where that
+ * should happen and, even if it does, it would be
+ * another error condition.
+ */
+ if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
+ switch (event->status) {
+ case DEPEVT_STATUS_CONTROL_SETUP:
+ dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
+ dwc3_ep0_stall_and_restart(dwc);
+ break;
+ case DEPEVT_STATUS_CONTROL_DATA:
+ /* FALLTHROUGH */
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ /* FALLTHROUGH */
+ default:
+ dev_vdbg(dwc->dev, "waiting for XferComplete\n");
+ }
+
+ return;
+ }
+
switch (event->status) {
case DEPEVT_STATUS_CONTROL_SETUP:
dev_vdbg(dwc->dev, "Control Setup\n");
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+
dwc3_ep0_do_control_setup(dwc, event);
break;
case DEPEVT_STATUS_CONTROL_DATA:
dev_vdbg(dwc->dev, "Control Data\n");
+ dwc->ep0state = EP0_DATA_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -764,6 +813,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
case DEPEVT_STATUS_CONTROL_STATUS:
dev_vdbg(dwc->dev, "Control Status\n");
+ dwc->ep0state = EP0_STATUS_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -772,12 +823,19 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
dwc3_ep0_stall_and_restart(dwc);
return;
}
- dwc3_ep0_do_control_status(dwc, event);
+
+ if (dwc->delayed_status) {
+ WARN_ON_ONCE(event->endpoint_number != 1);
+ dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
+ return;
+ }
+
+ dwc3_ep0_do_control_status(dwc, event->endpoint_number);
}
}
void dwc3_ep0_interrupt(struct dwc3 *dwc,
- const const struct dwc3_event_depevt *event)
+ const struct dwc3_event_depevt *event)
{
u8 epnum = event->endpoint_number;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 25dbd8614e7..a696bde5322 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -65,6 +65,22 @@ void dwc3_map_buffer_to_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dwc->dev, req->request.sg,
+ req->request.num_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ if (mapped < 0) {
+ dev_err(dwc->dev, "failed to map SGs\n");
+ return;
+ }
+
+ req->request.num_mapped_sgs = mapped;
+ return;
+ }
+
if (req->request.dma == DMA_ADDR_INVALID) {
req->request.dma = dma_map_single(dwc->dev, req->request.buf,
req->request.length, req->direction
@@ -82,6 +98,17 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_mapped_sgs) {
+ req->request.dma = DMA_ADDR_INVALID;
+ dma_unmap_sg(dwc->dev, req->request.sg,
+ req->request.num_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ req->request.num_mapped_sgs = 0;
+ return;
+ }
+
if (req->mapped) {
dma_unmap_single(dwc->dev, req->request.dma,
req->request.length, req->direction
@@ -97,7 +124,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
struct dwc3 *dwc = dep->dwc;
if (req->queued) {
- dep->busy_slot++;
+ if (req->request.num_mapped_sgs)
+ dep->busy_slot += req->request.num_mapped_sgs;
+ else
+ dep->busy_slot++;
+
/*
* Skip LINK TRB. We can't use req->trb and check for
* DWC3_TRBCTL_LINK_TRB because it points the TRB we just
@@ -108,6 +139,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
dep->busy_slot++;
}
list_del(&req->list);
+ req->trb = NULL;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -251,7 +283,8 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -264,7 +297,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_XFER_NOT_READY_EN;
- if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
@@ -317,7 +350,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -329,7 +363,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
if (ret)
return ret;
@@ -343,6 +377,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
dep->desc = desc;
+ dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
@@ -405,6 +440,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->desc = NULL;
+ dep->comp_desc = NULL;
dep->type = 0;
dep->flags = 0;
@@ -473,7 +509,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc);
+ ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -539,6 +575,85 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
kfree(req);
}
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, dma_addr_t dma,
+ unsigned length, unsigned last, unsigned chain)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb_hw *trb_hw;
+ struct dwc3_trb trb;
+
+ unsigned int cur_slot;
+
+ dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
+ dep->name, req, (unsigned long long) dma,
+ length, last ? " last" : "",
+ chain ? " chain" : "");
+
+ trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ cur_slot = dep->free_slot;
+ dep->free_slot++;
+
+ /* Skip the LINK-TRB on ISOC */
+ if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->desc))
+ return;
+
+ memset(&trb, 0, sizeof(trb));
+ if (!req->trb) {
+ dwc3_gadget_move_request_queued(req);
+ req->trb = trb_hw;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
+ }
+
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ trb.isp_imi = true;
+ trb.csp = true;
+ } else {
+ trb.chn = chain;
+ trb.lst = last;
+ }
+
+ if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
+ trb.sid_sofn = req->request.stream_id;
+
+ switch (usb_endpoint_type(dep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
+ /* IOC every DWC3_TRB_NUM / 4 so we can refill */
+ if (!(cur_slot % (DWC3_TRB_NUM / 4)))
+ trb.ioc = last;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ trb.trbctl = DWC3_TRBCTL_NORMAL;
+ break;
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ BUG();
+ }
+
+ trb.length = length;
+ trb.bplh = dma;
+ trb.hwo = true;
+
+ dwc3_trb_to_hw(&trb, trb_hw);
+}
+
/*
* dwc3_prepare_trbs - setup TRBs from requests
* @dep: endpoint for which requests are being prepared
@@ -548,18 +663,17 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
* transfers. The functions returns once there are not more TRBs available or
* it run out of requests.
*/
-static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
- bool starting)
+static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
{
- struct dwc3_request *req, *n, *ret = NULL;
- struct dwc3_trb_hw *trb_hw;
- struct dwc3_trb trb;
+ struct dwc3_request *req, *n;
u32 trbs_left;
+ unsigned int last_one = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
/* the first request must not be queued */
trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
+
/*
* if busy & slot are equal than it is either full or empty. If we are
* starting to proceed requests then we are empty. Otherwise we ar
@@ -567,7 +681,7 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
*/
if (!trbs_left) {
if (!starting)
- return NULL;
+ return;
trbs_left = DWC3_TRB_NUM;
/*
* In case we start from scratch, we queue the ISOC requests
@@ -591,94 +705,62 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
/* The last TRB is a link TRB, not used for xfer */
if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
- return NULL;
+ return;
list_for_each_entry_safe(req, n, &dep->request_list, list) {
- unsigned int last_one = 0;
- unsigned int cur_slot;
+ unsigned length;
+ dma_addr_t dma;
- trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
- cur_slot = dep->free_slot;
- dep->free_slot++;
+ if (req->request.num_mapped_sgs > 0) {
+ struct usb_request *request = &req->request;
+ struct scatterlist *sg = request->sg;
+ struct scatterlist *s;
+ int i;
- /* Skip the LINK-TRB on ISOC */
- if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->desc))
- continue;
+ for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ unsigned chain = true;
- dwc3_gadget_move_request_queued(req);
- memset(&trb, 0, sizeof(trb));
- trbs_left--;
+ length = sg_dma_len(s);
+ dma = sg_dma_address(s);
- /* Is our TRB pool empty? */
- if (!trbs_left)
- last_one = 1;
- /* Is this the last request? */
- if (list_empty(&dep->request_list))
- last_one = 1;
+ if (i == (request->num_mapped_sgs - 1)
+ || sg_is_last(s)) {
+ last_one = true;
+ chain = false;
+ }
- /*
- * FIXME we shouldn't need to set LST bit always but we are
- * facing some weird problem with the Hardware where it doesn't
- * complete even though it has been previously started.
- *
- * While we're debugging the problem, as a workaround to
- * multiple TRBs handling, use only one TRB at a time.
- */
- last_one = 1;
+ trbs_left--;
+ if (!trbs_left)
+ last_one = true;
- req->trb = trb_hw;
- if (!ret)
- ret = req;
+ if (last_one)
+ chain = false;
- trb.bplh = req->request.dma;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, chain);
- if (usb_endpoint_xfer_isoc(dep->desc)) {
- trb.isp_imi = true;
- trb.csp = true;
+ if (last_one)
+ break;
+ }
} else {
- trb.lst = last_one;
- }
+ dma = req->request.dma;
+ length = req->request.length;
+ trbs_left--;
- if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
- trb.sid_sofn = req->request.stream_id;
-
- switch (usb_endpoint_type(dep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
- break;
+ if (!trbs_left)
+ last_one = 1;
- case USB_ENDPOINT_XFER_ISOC:
- trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /* Is this the last request? */
+ if (list_is_last(&req->list, &dep->request_list))
+ last_one = 1;
- /* IOC every DWC3_TRB_NUM / 4 so we can refill */
- if (!(cur_slot % (DWC3_TRB_NUM / 4)))
- trb.ioc = last_one;
- break;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, false);
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_INT:
- trb.trbctl = DWC3_TRBCTL_NORMAL;
- break;
- default:
- /*
- * This is only possible with faulty memory because we
- * checked it already :)
- */
- BUG();
+ if (last_one)
+ break;
}
-
- trb.length = req->request.length;
- trb.hwo = true;
-
- dwc3_trb_to_hw(&trb, trb_hw);
- req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
-
- if (last_one)
- break;
}
-
- return ret;
}
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
@@ -707,11 +789,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
/* req points to the first request which will be sent */
req = next_request(&dep->req_queued);
} else {
+ dwc3_prepare_trbs(dep, start_new);
+
/*
* req points to the first request where HWO changed
* from 0 to 1
*/
- req = dwc3_prepare_trbs(dep, start_new);
+ req = next_request(&dep->req_queued);
}
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -745,8 +829,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
dep->flags |= DWC3_EP_BUSY;
dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
dep->number);
- if (!dep->res_trans_idx)
- printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
+
+ WARN_ON_ONCE(!dep->res_trans_idx);
+
return 0;
}
@@ -1155,35 +1240,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
dwc->gadget.dev.driver = &driver->driver;
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-
- reg &= ~DWC3_GCTL_SCALEDOWN(3);
- reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
- reg &= ~DWC3_GCTL_DISSCRAMBLE;
- reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
-
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
- case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
- reg &= ~DWC3_GCTL_DSBLCLKGTNG;
- break;
- default:
- dev_dbg(dwc->dev, "No power optimization available\n");
- }
-
- /*
- * WORKAROUND: DWC3 revisions <1.90a have a bug
- * when The device fails to connect at SuperSpeed
- * and falls back to high-speed mode which causes
- * the device to enter in a Connect/Disconnect loop
- */
- if (dwc->revision < DWC3_REVISION_190A)
- reg |= DWC3_GCTL_U2RSTECN;
-
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
- reg |= DWC3_DCFG_SUPERSPEED;
+ reg |= dwc->maximum_speed;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
dwc->start_config_issued = false;
@@ -1192,14 +1251,14 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1290,11 +1349,10 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
&dwc->gadget.ep_list);
ret = dwc3_alloc_trb_pool(dep);
- if (ret) {
- dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
+ if (ret)
return ret;
- }
}
+
INIT_LIST_HEAD(&dep->request_list);
INIT_LIST_HEAD(&dep->req_queued);
}
@@ -1334,8 +1392,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
do {
req = next_request(&dep->req_queued);
- if (!req)
- break;
+ if (!req) {
+ WARN_ON_ONCE(1);
+ return 1;
+ }
dwc3_trb_to_nat(req->trb, &trb);
@@ -1400,6 +1460,31 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_BUSY;
dep->res_trans_idx = 0;
}
+
+ /*
+ * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+ * See dwc3_gadget_linksts_change_interrupt() for 1st half.
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ u32 reg;
+ int i;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (!list_empty(&dep->req_queued))
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= dwc->u1u2;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc->u1u2 = 0;
+ }
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1639,6 +1724,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
}
static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
@@ -1675,6 +1761,40 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dev_vdbg(dwc->dev, "%s\n", __func__);
+ /*
+ * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ * would cause a missing Disconnect Event if there's a
+ * pending Setup Packet in the FIFO.
+ *
+ * There's no suggested workaround on the official Bug
+ * report, which states that "unless the driver/application
+ * is doing any special handling of a disconnect event,
+ * there is no functional issue".
+ *
+ * Unfortunately, it turns out that we _do_ some special
+ * handling of a disconnect event, namely complete all
+ * pending transfers, notify gadget driver of the
+ * disconnection, and so on.
+ *
+ * Our suggested workaround is to follow the Disconnect
+ * Event steps here, instead, based on a setup_packet_pending
+ * flag. Such flag gets set whenever we have a XferNotReady
+ * event on EP0 and gets cleared on XferComplete for the
+ * same endpoint.
+ *
+ * Refers to:
+ *
+ * STAR#9000466709: RTL: Device : Disconnect event not
+ * generated if setup packet pending in FIFO
+ */
+ if (dwc->revision < DWC3_REVISION_188A) {
+ if (dwc->setup_packet_pending)
+ dwc3_gadget_disconnect_interrupt(dwc);
+ }
+
+ /* after reset -> Default State */
+ dwc->dev_state = DWC3_DEFAULT_STATE;
+
/* Enable PHYs */
dwc3_gadget_usb2_phy_power(dwc, true);
dwc3_gadget_usb3_phy_power(dwc, true);
@@ -1755,6 +1875,22 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
switch (speed) {
case DWC3_DCFG_SUPERSPEED:
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have an issue which
+ * would cause a missing USB3 Reset event.
+ *
+ * In such situations, we should force a USB3 Reset
+ * event by calling our dwc3_gadget_reset_interrupt()
+ * routine.
+ *
+ * Refers to:
+ *
+ * STAR#9000483510: RTL: SS : USB3 reset event may
+ * not be generated always when the link enters poll
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ dwc3_gadget_reset_interrupt(dwc);
+
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
@@ -1781,14 +1917,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -1818,8 +1954,55 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
- /* The fith bit says SuperSpeed yes or no. */
- dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ /*
+ * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
+ * on the link partner, the USB session might do multiple entry/exit
+ * of low power states before a transfer takes place.
+ *
+ * Due to this problem, we might experience lower throughput. The
+ * suggested workaround is to disable DCTL[12:9] bits if we're
+ * transitioning from U1/U2 to U0 and enable those bits again
+ * after a transfer completes and there are no pending transfers
+ * on any of the enabled endpoints.
+ *
+ * This is the first half of that workaround.
+ *
+ * Refers to:
+ *
+ * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
+ * core send LGO_Ux entering U0
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ if (next == DWC3_LINK_STATE_U0) {
+ u32 u1u2;
+ u32 reg;
+
+ switch (dwc->link_state) {
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_U2:
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ u1u2 = reg & (DWC3_DCTL_INITU2ENA
+ | DWC3_DCTL_ACCEPTU2ENA
+ | DWC3_DCTL_INITU1ENA
+ | DWC3_DCTL_ACCEPTU1ENA);
+
+ if (!dwc->u1u2)
+ dwc->u1u2 = reg & u1u2;
+
+ reg &= ~u1u2;
+
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ dwc->link_state = next;
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
@@ -1925,7 +2108,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
spin_lock(&dwc->lock);
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
status = dwc3_process_event_buf(dwc, i);
@@ -1986,9 +2169,10 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
dev_set_name(&dwc->gadget.dev, "gadget");
dwc->gadget.ops = &dwc3_gadget_ops;
- dwc->gadget.is_dualspeed = true;
+ dwc->gadget.max_speed = USB_SPEED_SUPER;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.dev.parent = dwc->dev;
+ dwc->gadget.sg_supported = true;
dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
@@ -2076,7 +2260,6 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
int irq;
- int i;
usb_del_gadget_udc(&dwc->gadget);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
@@ -2084,9 +2267,6 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
free_irq(irq, dwc);
- for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
- __dwc3_gadget_ep_disable(dwc->eps[i]);
-
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 71145a449d9..d97f467d41c 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -79,19 +79,6 @@ struct dwc3_gadget_ep_cmd_params {
/* -------------------------------------------------------------------------- */
-struct dwc3_request {
- struct usb_request request;
- struct list_head list;
- struct dwc3_ep *dep;
-
- u8 epnum;
- struct dwc3_trb_hw *trb;
- dma_addr_t trb_dma;
-
- unsigned direction:1;
- unsigned mapped:1;
- unsigned queued:1;
-};
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
static inline struct dwc3_request *next_request(struct list_head *list)
@@ -110,23 +97,11 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
list_move_tail(&req->list, &dep->req_queued);
}
-#if defined(CONFIG_USB_GADGET_DWC3) || defined(CONFIG_USB_GADGET_DWC3_MODULE)
-int dwc3_gadget_init(struct dwc3 *dwc);
-void dwc3_gadget_exit(struct dwc3 *dwc);
-#else
-static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; }
-static inline void dwc3_gadget_exit(struct dwc3 *dwc) { }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
-{
- return 0;
-}
-#endif
-
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
-void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event);
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
new file mode 100644
index 00000000000..7cfe211b6c3
--- /dev/null
+++ b/drivers/usb/dwc3/host.c
@@ -0,0 +1,102 @@
+/**
+ * host.c - DesignWare USB3 DRD Controller Host Glue
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/platform_device.h>
+
+#include "core.h"
+
+static struct resource generic_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+int dwc3_host_init(struct dwc3 *dwc)
+{
+ struct platform_device *xhci;
+ int ret;
+
+ xhci = platform_device_alloc("xhci", -1);
+ if (!xhci) {
+ dev_err(dwc->dev, "couldn't allocate xHCI device\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
+
+ xhci->dev.parent = dwc->dev;
+ xhci->dev.dma_mask = dwc->dev->dma_mask;
+ xhci->dev.dma_parms = dwc->dev->dma_parms;
+
+ dwc->xhci = xhci;
+
+ /* setup resources */
+ generic_resources[0].start = dwc->irq;
+
+ generic_resources[1].start = dwc->res->start;
+ generic_resources[1].end = dwc->res->start + 0x7fff;
+
+ ret = platform_device_add_resources(xhci, generic_resources,
+ ARRAY_SIZE(generic_resources));
+ if (ret) {
+ dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
+ goto err1;
+ }
+
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register xHCI device\n");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ platform_device_put(xhci);
+
+err0:
+ return ret;
+}
+
+void dwc3_host_exit(struct dwc3 *dwc)
+{
+ platform_device_unregister(dwc->xhci);
+}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index bc957db1ea4..071d561f3e6 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -39,7 +39,7 @@
#ifndef __DRIVERS_USB_DWC3_IO_H
#define __DRIVERS_USB_DWC3_IO_H
-#include <asm/io.h>
+#include <linux/io.h>
static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 23a447373c5..7ecb68a6741 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -15,6 +15,7 @@
menuconfig USB_GADGET
tristate "USB Gadget Support"
+ select NLS
help
USB is a master/slave protocol, organized with one master
host (such as a PC) controlling up to 127 peripheral devices.
@@ -124,7 +125,6 @@ config USB_GADGET_STORAGE_NUM_BUFFERS
#
choice
prompt "USB Peripheral Controller"
- depends on USB_GADGET
help
A USB device uses a controller to talk to its host.
Systems should have only one such upstream link.
@@ -234,7 +234,6 @@ config USB_R8A66597
config USB_RENESAS_USBHS_UDC
tristate 'Renesas USBHS controller'
- depends on SUPERH || ARCH_SHMOBILE
depends on USB_RENESAS_USBHS
select USB_GADGET_DUALSPEED
help
@@ -264,7 +263,6 @@ config USB_PXA27X
config USB_S3C_HSOTG
tristate "S3C HS/OtG USB Device controller"
depends on S3C_DEV_USB_HSOTG
- select USB_GADGET_S3C_HSOTG_PIO
select USB_GADGET_DUALSPEED
help
The Samsung S3C64XX USB2.0 high-speed gadget controller
@@ -310,25 +308,13 @@ config USB_S3C_HSUDC
This driver has been tested on S3C2416 and S3C2450 processors.
-config USB_PXA_U2O
- tristate "PXA9xx Processor USB2.0 controller"
- depends on ARCH_MMP
+config USB_MV_UDC
+ tristate "Marvell USB2.0 Device Controller"
select USB_GADGET_DUALSPEED
help
- PXA9xx Processor series include a high speed USB2.0 device
- controller, which support high speed and full speed USB peripheral.
-
-config USB_GADGET_DWC3
- tristate "DesignWare USB3.0 (DRD) Controller"
- depends on USB_DWC3
- select USB_GADGET_DUALSPEED
- select USB_GADGET_SUPERSPEED
- help
- DesignWare USB3.0 controller is a SuperSpeed USB3.0 Controller
- which can be configured for peripheral-only, host-only, hub-only
- and Dual-Role operation. This Controller was first integrated into
- the OMAP5 series of processors. More information about the OMAP5
- version of this controller, refer to http://www.ti.com/omap5.
+ Marvell Socs (including PXA and MMP series) include a high speed
+ USB2.0 OTG controller, which can be configured as high speed or
+ full speed USB peripheral.
#
# Controllers available in both integrated and discrete versions
@@ -544,12 +530,10 @@ endchoice
# Selected by UDC drivers that support high-speed operation.
config USB_GADGET_DUALSPEED
bool
- depends on USB_GADGET
# Selected by UDC drivers that support super-speed opperation
config USB_GADGET_SUPERSPEED
bool
- depends on USB_GADGET
depends on USB_GADGET_DUALSPEED
#
@@ -557,7 +541,6 @@ config USB_GADGET_SUPERSPEED
#
choice
tristate "USB Gadget Drivers"
- depends on USB_GADGET
default USB_ETH
help
A Linux "Gadget Driver" talks to the USB Peripheral Controller
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b54ac619089..b7f6eefc392 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
-obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
+obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
mv_udc-y := mv_udc_core.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd..e9a2c5c4445 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->max_speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
@@ -3349,7 +3349,7 @@ static int udc_probe(struct udc *dev)
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.release = gadget_release;
dev->gadget.name = name;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* init registers, interrupts, ... */
startup_registers(dev);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 8efe0fa9228..143a7256b59 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1633,7 +1633,7 @@ static int at91_start(struct usb_gadget_driver *driver,
unsigned long flags;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup) {
DBG("bad parameter.\n");
@@ -1748,7 +1748,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
/* rm9200 needs manual D+ pullup; off by default */
if (cpu_is_at91rm9200()) {
- if (udc->board.pullup_pin <= 0) {
+ if (gpio_is_valid(udc->board.pullup_pin)) {
DBG("no D+ pullup?\n");
retval = -ENODEV;
goto fail0;
@@ -1815,7 +1815,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
DBG("request irq %d failed\n", udc->udp_irq);
goto fail1;
}
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
if (retval < 0) {
DBG("request vbus pin failed\n");
@@ -1859,10 +1859,10 @@ static int __init at91udc_probe(struct platform_device *pdev)
INFO("%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
fail4:
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
free_irq(udc->board.vbus_pin, udc);
fail3:
- if (udc->board.vbus_pin > 0)
+ if (gpio_is_valid(udc->board.vbus_pin))
gpio_free(udc->board.vbus_pin);
fail2:
free_irq(udc->udp_irq, udc);
@@ -1897,7 +1897,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
free_irq(udc->board.vbus_pin, udc);
gpio_free(udc->board.vbus_pin);
}
@@ -1941,7 +1941,7 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled && wake)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
enable_irq_wake(udc->board.vbus_pin);
return 0;
}
@@ -1951,7 +1951,7 @@ static int at91udc_resume(struct platform_device *pdev)
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled &&
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
udc->active_suspend)
disable_irq_wake(udc->board.vbus_pin);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 271a9d87360..e2fb6d583bd 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1038,7 +1038,7 @@ static struct usba_udc the_udc = {
.gadget = {
.ops = &usba_udc_ops,
.ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
- .is_dualspeed = 1,
+ .max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
.dev = {
.init_name = "gadget",
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 9a0c3979ff4..27e31371842 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -182,6 +182,16 @@ static inline int hw_ep_bit(int num, int dir)
return num + (dir ? 16 : 0);
}
+static int ep_to_bit(int n)
+{
+ int fill = 16 - hw_ep_max / 2;
+
+ if (n >= hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
/**
* hw_aread: reads from register bitfield
* @addr: address relative to bus map
@@ -440,12 +450,13 @@ static int hw_ep_get_halt(int num, int dir)
/**
* hw_test_and_clear_setup_status: test & clear setup status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns setup status
*/
static int hw_test_and_clear_setup_status(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
}
@@ -641,12 +652,13 @@ static int hw_register_write(u16 addr, u32 data)
/**
* hw_test_and_clear_complete: test & clear complete status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns complete status
*/
static int hw_test_and_clear_complete(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
}
@@ -754,8 +766,11 @@ static ssize_t show_device(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
- gadget->is_dualspeed);
+ gadget_is_dualspeed(gadget));
n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
gadget->is_otg);
n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
@@ -798,7 +813,7 @@ static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
(driver->function ? driver->function : ""));
n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
- driver->speed);
+ driver->max_speed);
return n;
}
@@ -2563,9 +2578,7 @@ static int ci13xxx_start(struct usb_gadget_driver *driver,
if (driver == NULL ||
bind == NULL ||
driver->setup == NULL ||
- driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL)
+ driver->disconnect == NULL)
return -EINVAL;
else if (udc == NULL)
return -ENODEV;
@@ -2693,8 +2706,6 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
driver->unbind == NULL ||
driver->setup == NULL ||
driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL ||
driver != udc->driver)
return -EINVAL;
@@ -2793,7 +2804,7 @@ static irqreturn_t udc_irq(void)
isr_statistics.pci++;
udc->gadget.speed = hw_port_is_high_speed() ?
USB_SPEED_HIGH : USB_SPEED_FULL;
- if (udc->suspended) {
+ if (udc->suspended && udc->driver->resume) {
spin_unlock(udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(udc->lock);
@@ -2807,7 +2818,8 @@ static irqreturn_t udc_irq(void)
isr_tr_complete_handler(udc);
}
if (USBi_SLI & intr) {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->driver->suspend) {
udc->suspended = 1;
spin_unlock(udc->lock);
udc->driver->suspend(&udc->gadget);
@@ -2871,7 +2883,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
udc->gadget.ops = &usb_gadget_ops;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.is_dualspeed = 1;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.is_otg = 0;
udc->gadget.name = driver->name;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 23707775cb4..f4871e1fac5 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -127,7 +127,7 @@ struct ci13xxx {
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
u32 ep0_dir; /* ep0 direction */
#define ep0out ci13xxx_ep[0]
-#define ep0in ci13xxx_ep[16]
+#define ep0in ci13xxx_ep[hw_ep_max / 2]
u8 remote_wakeup; /* Is remote wakeup feature
enabled by the host? */
u8 suspended; /* suspended by the host */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f71b0787983..a95de6a4a13 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1535,9 +1535,9 @@ composite_resume(struct usb_gadget *gadget)
static struct usb_gadget_driver composite_driver = {
#ifdef CONFIG_USB_GADGET_SUPERSPEED
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
#else
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#endif
.unbind = composite_unbind,
@@ -1584,8 +1584,8 @@ int usb_composite_probe(struct usb_composite_driver *driver,
driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
- composite_driver.speed = min((u8)composite_driver.speed,
- (u8)driver->max_speed);
+ composite_driver.max_speed =
+ min_t(u8, composite_driver.max_speed, driver->max_speed);
composite = driver;
composite_gadget_bind = bind;
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 6256420089f..19d7bb0df75 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -404,7 +404,7 @@ fail:
static struct usb_gadget_driver dbgp_driver = {
.function = "dbgp",
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = dbgp_unbind,
.setup = dbgp_setup,
.disconnect = dbgp_disconnect,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index ab8f1b488d5..db815c2da7e 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -823,19 +823,18 @@ static int dummy_pullup (struct usb_gadget *_gadget, int value)
if (value && dum->driver) {
if (mod_data.is_super_speed)
- dum->gadget.speed = dum->driver->speed;
+ dum->gadget.speed = dum->driver->max_speed;
else if (mod_data.is_high_speed)
dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
- dum->driver->speed);
+ dum->driver->max_speed);
else
dum->gadget.speed = USB_SPEED_FULL;
dummy_udc_udpate_ep0(dum);
- if (dum->gadget.speed < dum->driver->speed)
+ if (dum->gadget.speed < dum->driver->max_speed)
dev_dbg(udc_dev(dum), "This device can perform faster"
- " if you connect it to a %s port...\n",
- (dum->driver->speed == USB_SPEED_SUPER ?
- "SuperSpeed" : "HighSpeed"));
+ " if you connect it to a %s port...\n",
+ usb_speed_string(dum->driver->max_speed));
}
dum_hcd = gadget_to_dummy_hcd(_gadget);
@@ -898,7 +897,7 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->speed == USB_SPEED_UNKNOWN)
+ if (driver->max_speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
@@ -977,7 +976,7 @@ static int dummy_udc_probe (struct platform_device *pdev)
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.is_dualspeed = 1;
+ dum->gadget.max_speed = USB_SPEED_SUPER;
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 596a0b464e6..753aa0683ac 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@ ep_matches (
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
- /* Update the ep_comp descriptor if needed */
- if (num_req_streams != ep->max_streams)
- ep_comp->bmAttributes = ep->max_streams;
}
}
@@ -152,7 +149,7 @@ ep_matches (
switch (type) {
case USB_ENDPOINT_XFER_INT:
/* INT: limit 64 bytes full speed, 1024 high/super speed */
- if (!gadget->is_dualspeed && max > 64)
+ if (!gadget_is_dualspeed(gadget) && max > 64)
return 0;
/* FALLTHROUGH */
@@ -160,12 +157,12 @@ ep_matches (
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (ep->maxpacket < max)
return 0;
- if (!gadget->is_dualspeed && max > 1023)
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
return 0;
/* BOTH: "high bandwidth" works only at high speed */
if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
return 0;
/* configure your hardware with enough buffering!! */
}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index acb38004eec..f63dc6c150d 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1037,7 +1037,6 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
{
struct ffs_sb_fill_data *data = _data;
struct inode *inode;
- struct dentry *d;
struct ffs_data *ffs;
ENTER();
@@ -1045,7 +1044,7 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
/* Initialise data */
ffs = ffs_data_new();
if (unlikely(!ffs))
- goto enomem0;
+ goto Enomem;
ffs->sb = sb;
ffs->dev_name = data->dev_name;
@@ -1065,26 +1064,21 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&simple_dir_inode_operations,
&data->perms);
if (unlikely(!inode))
- goto enomem1;
- d = d_alloc_root(inode);
- if (unlikely(!d))
- goto enomem2;
- sb->s_root = d;
+ goto Enomem;
+ sb->s_root = d_alloc_root(inode);
+ if (unlikely(!sb->s_root)) {
+ iput(inode);
+ goto Enomem;
+ }
/* EP0 file */
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
&ffs_ep0_operations, NULL)))
- goto enomem3;
+ goto Enomem;
return 0;
-enomem3:
- dput(d);
-enomem2:
- iput(inode);
-enomem1:
- ffs_data_put(ffs);
-enomem0:
+Enomem:
return -ENOMEM;
}
@@ -1196,14 +1190,11 @@ ffs_fs_mount(struct file_system_type *t, int flags,
static void
ffs_fs_kill_sb(struct super_block *sb)
{
- void *ptr;
-
ENTER();
kill_litter_super(sb);
- ptr = xchg(&sb->s_fs_info, NULL);
- if (ptr)
- ffs_data_put(ptr);
+ if (sb->s_fs_info)
+ ffs_data_put(sb->s_fs_info);
}
static struct file_system_type ffs_fs_type = {
@@ -1408,7 +1399,7 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
count = ffs->eps_count;
- epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL);
+ epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
return -ENOMEM;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c39d58860fa..6353eca1e85 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -1873,17 +1873,14 @@ static int check_command(struct fsg_common *common, int cmnd_size,
common->lun, lun);
/* Check the LUN */
- if (common->lun < common->nluns) {
- curlun = &common->luns[common->lun];
- common->curlun = curlun;
+ curlun = common->curlun;
+ if (curlun) {
if (common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- common->curlun = NULL;
- curlun = NULL;
common->bad_lun_okay = 0;
/*
@@ -1929,6 +1926,17 @@ static int check_command(struct fsg_common *common, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_common *common,
+ int cmnd_size, enum data_direction data_dir,
+ unsigned int mask, int needs_medium, const char *name)
+{
+ if (common->curlun)
+ common->data_size_from_cmnd <<= common->curlun->blkbits;
+ return check_command(common, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
+
static int do_scsi_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
@@ -2011,9 +2019,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
if (reply == 0)
@@ -2022,9 +2030,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
if (reply == 0)
@@ -2033,9 +2041,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_TO_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
if (reply == 0)
@@ -2134,9 +2142,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
if (reply == 0)
@@ -2145,9 +2153,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
if (reply == 0)
@@ -2156,9 +2164,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_FROM_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
if (reply == 0)
@@ -2273,6 +2281,10 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
+ if (common->lun >= 0 && common->lun < common->nluns)
+ common->curlun = &common->luns[common->lun];
+ else
+ common->curlun = NULL;
common->tag = cbw->Tag;
return 0;
}
@@ -2763,7 +2775,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs.
*/
- curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
+ curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
if (unlikely(!curlun)) {
rc = -ENOMEM;
goto error_release;
@@ -2975,6 +2987,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 16a509ae517..7cdcb63b21f 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
- struct net_device *dev = fp->dev;
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
- netdev_free_page(dev, page);
+ put_page(page);
return err;
}
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- pn_rx_submit(fp, req, GFP_ATOMIC);
+ pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
}
/*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
- pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
}
spin_unlock(&port->lock);
return 0;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed2..cf33a8d0fd5 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 11b5196284a..e0f30fc70e4 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2297,19 +2297,17 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
DBG(fsg, "using LUN %d from CBW, "
"not LUN %d from CDB\n",
fsg->lun, lun);
- } else
- fsg->lun = lun; // Use LUN from the command
+ }
/* Check the LUN */
- if (fsg->lun < fsg->nluns) {
- fsg->curlun = curlun = &fsg->luns[fsg->lun];
+ curlun = fsg->curlun;
+ if (curlun) {
if (fsg->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- fsg->curlun = curlun = NULL;
fsg->bad_lun_okay = 0;
/* INQUIRY and REQUEST SENSE commands are explicitly allowed
@@ -2351,6 +2349,16 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
+ enum data_direction data_dir, unsigned int mask,
+ int needs_medium, const char *name)
+{
+ if (fsg->curlun)
+ fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
+ return check_command(fsg, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
static int do_scsi_command(struct fsg_dev *fsg)
{
@@ -2425,26 +2433,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)")) == 0)
reply = do_read(fsg);
break;
case READ_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)")) == 0)
reply = do_read(fsg);
break;
case READ_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)")) == 0)
reply = do_read(fsg);
@@ -2529,26 +2538,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)")) == 0)
reply = do_write(fsg);
break;
case WRITE_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)")) == 0)
reply = do_write(fsg);
break;
case WRITE_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)")) == 0)
reply = do_write(fsg);
@@ -2715,7 +2725,17 @@ static int get_next_command(struct fsg_dev *fsg)
memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
fsg->cbbuf_cmnd_size = 0;
spin_unlock_irq(&fsg->lock);
+
+ /* Use LUN from the command */
+ fsg->lun = fsg->cmnd[1] >> 5;
}
+
+ /* Update current lun */
+ if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
+ fsg->curlun = &fsg->luns[fsg->lun];
+ else
+ fsg->curlun = NULL;
+
return rc;
}
@@ -3584,7 +3604,7 @@ static void fsg_resume(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver fsg_driver = {
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
.function = (char *) fsg_string_product,
.unbind = fsg_unbind,
.disconnect = fsg_disconnect,
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f3..dcbc0a2e48d 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
@@ -88,7 +89,6 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c..b95697c03d0 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2351,7 +2350,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
/* hook up the driver */
udc_controller->driver = driver;
udc_controller->gadget.dev.driver = &driver->driver;
- udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
+ udc_controller->gadget.speed = driver->max_speed;
spin_unlock_irqrestore(&udc_controller->lock, flags);
retval = bind(&udc_controller->gadget);
@@ -2815,20 +2814,7 @@ static struct platform_driver udc_driver = {
#endif
};
-static int __init qe_udc_init(void)
-{
- printk(KERN_INFO "%s: %s, %s\n", driver_name, driver_desc,
- DRIVER_VERSION);
- return platform_driver_register(&udc_driver);
-}
-
-static void __exit qe_udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-
-module_init(qe_udc_init);
-module_exit(qe_udc_exit);
+module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b3b3d83b7c3..d7ea6c076ce 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2530,7 +2525,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
- udc_controller->gadget.is_dualspeed = 1;
+ udc_controller->gadget.max_speed = USB_SPEED_HIGH;
udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda..f781f5dec41 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 74da206c840..5831cb4a0b3 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1317,7 +1317,7 @@ static int fusb300_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1463,7 +1463,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
dev_set_name(&fusb300->gadget.dev, "gadget");
- fusb300->gadget.is_dualspeed = 1;
+ fusb300->gadget.max_speed = USB_SPEED_HIGH;
fusb300->gadget.dev.parent = &pdev->dev;
fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
fusb300->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 7f87805cddc..5af70fcce13 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1357,7 +1357,7 @@ static int goku_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
@@ -1796,6 +1796,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
+ dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 2d978c0e7ce..8d1c75abd73 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1336,7 +1336,7 @@ static int imx_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 6ccae2707e5..ae04266dba1 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1766,9 +1766,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
static struct usb_gadget_driver gadgetfs_driver = {
#ifdef CONFIG_USB_GADGET_DUALSPEED
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#else
- .speed = USB_SPEED_FULL,
+ .max_speed = USB_SPEED_FULL,
#endif
.function = (char *) driver_desc,
.unbind = gadgetfs_unbind,
@@ -1792,7 +1792,7 @@ static int gadgetfs_probe (struct usb_gadget *gadget)
}
static struct usb_gadget_driver probe_driver = {
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = gadgetfs_nop,
.setup = (void *)gadgetfs_nop,
.disconnect = gadgetfs_nop,
@@ -2035,7 +2035,6 @@ static int
gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
{
struct inode *inode;
- struct dentry *d;
struct dev_data *dev;
if (the_device)
@@ -2058,24 +2057,27 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
NULL, &simple_dir_operations,
S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
- goto enomem0;
+ goto Enomem;
inode->i_op = &simple_dir_inode_operations;
- if (!(d = d_alloc_root (inode)))
- goto enomem1;
- sb->s_root = d;
+ if (!(sb->s_root = d_alloc_root (inode))) {
+ iput(inode);
+ goto Enomem;
+ }
/* the ep0 file is named after the controller we expect;
* user mode code can use it for sanity checks, like we do.
*/
dev = dev_new ();
if (!dev)
- goto enomem2;
+ goto Enomem;
dev->sb = sb;
if (!gadgetfs_create_file (sb, CHIP,
dev, &dev_init_operations,
- &dev->dentry))
- goto enomem3;
+ &dev->dentry)) {
+ put_dev(dev);
+ goto Enomem;
+ }
/* other endpoint files are available after hardware setup,
* from binding to a controller.
@@ -2083,13 +2085,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
the_device = dev;
return 0;
-enomem3:
- put_dev (dev);
-enomem2:
- dput (d);
-enomem1:
- iput (inode);
-enomem0:
+Enomem:
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index c9fa3bf5b37..fa0fcc11263 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -3267,7 +3267,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- dev->gadget.is_dualspeed = 1; /* support dual speed */
+ dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
#ifdef OTG_TRANSCEIVER
dev->gadget.is_otg = 1; /* support otg mode */
#endif
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a..3608b3bd573 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1653,7 +1653,7 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->gadget.ops = &m66592_gadget_ops;
device_initialize(&m66592->gadget.dev);
dev_set_name(&m66592->gadget.dev, "gadget");
- m66592->gadget.is_dualspeed = 1;
+ m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.dev.parent = &pdev->dev;
m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
m66592->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
index daa75c12f33..34aadfae723 100644
--- a/drivers/usb/gadget/mv_udc.h
+++ b/drivers/usb/gadget/mv_udc.h
@@ -180,7 +180,7 @@ struct mv_udc {
struct mv_cap_regs __iomem *cap_regs;
struct mv_op_regs __iomem *op_regs;
- unsigned int phy_regs;
+ void __iomem *phy_regs;
unsigned int max_eps;
struct mv_dqh *ep_dqh;
size_t ep_dqh_size;
@@ -211,11 +211,14 @@ struct mv_udc {
softconnected:1,
force_fs:1,
clock_gating:1,
- active:1;
+ active:1,
+ stopped:1; /* stop bit is setted */
struct work_struct vbus_work;
struct workqueue_struct *qwork;
+ struct otg_transceiver *transceiver;
+
struct mv_usb_platform_data *pdata;
/* some SOC has mutiple clock sources for USB*/
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 892412103dd..f97e737d26f 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -276,11 +276,12 @@ static void done(struct mv_ep *ep, struct mv_req *req, int status)
static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
{
- u32 tmp, epstatus, bit_pos, direction;
struct mv_udc *udc;
struct mv_dqh *dqh;
+ u32 bit_pos, direction;
+ u32 usbcmd, epstatus;
unsigned int loops;
- int readsafe, retval = 0;
+ int retval = 0;
udc = ep->udc;
direction = ep_dir(ep);
@@ -293,30 +294,18 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
lastreq->tail->dtd_next =
req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- if (readl(&udc->op_regs->epprime) & bit_pos) {
- loops = LOOPS(PRIME_TIMEOUT);
- while (readl(&udc->op_regs->epprime) & bit_pos) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- udelay(LOOPS_USEC);
- loops--;
- }
- if (readl(&udc->op_regs->epstatus) & bit_pos)
- goto done;
- }
- readsafe = 0;
+
+ wmb();
+
+ if (readl(&udc->op_regs->epprime) & bit_pos)
+ goto done;
+
loops = LOOPS(READSAFE_TIMEOUT);
- while (readsafe == 0) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
+ while (1) {
/* start with setting the semaphores */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
- writel(tmp, &udc->op_regs->usbcmd);
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+ writel(usbcmd, &udc->op_regs->usbcmd);
/* read the endpoint status */
epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
@@ -329,98 +318,46 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
* primed.
*/
if (readl(&udc->op_regs->usbcmd)
- & USBCMD_ATDTW_TRIPWIRE_SET) {
- readsafe = 1;
- }
+ & USBCMD_ATDTW_TRIPWIRE_SET)
+ break;
+
loops--;
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Timeout for ATDTW_TRIPWIRE...\n");
+ retval = -ETIME;
+ goto done;
+ }
udelay(LOOPS_USEC);
}
/* Clear the semaphore */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
- writel(tmp, &udc->op_regs->usbcmd);
-
- /* If endpoint is not active, we activate it now. */
- if (!epstatus) {
- if (direction == EP_DIR_IN) {
- struct mv_dtd *curr_dtd = dma_to_virt(
- &udc->dev->dev, dqh->curr_dtd_ptr);
-
- loops = LOOPS(DTD_TIMEOUT);
- while (curr_dtd->size_ioc_sts
- & DTD_STATUS_ACTIVE) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
- }
- /* No other transfers on the queue */
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+ writel(usbcmd, &udc->op_regs->usbcmd);
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ if (epstatus)
+ goto done;
+ }
- /*
- * Ensure that updates to the QH will
- * occur before priming.
- */
- wmb();
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
- }
- } else {
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
+ /* Prime the Endpoint */
+ writel(bit_pos, &udc->op_regs->epprime);
- if (direction == EP_DIR_IN) {
- /* FIXME add status check after prime the IN ep */
- int prime_again;
- u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
-
- loops = LOOPS(DTD_TIMEOUT);
- prime_again = 0;
- while ((curr_dtd_ptr != req->head->td_dma)) {
- curr_dtd_ptr = dqh->curr_dtd_ptr;
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "failed to prime %s\n",
- ep->name);
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
-
- if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
- if (prime_again)
- goto done;
- dev_info(&udc->dev->dev,
- "prime again\n");
- writel(bit_pos,
- &udc->op_regs->epprime);
- prime_again = 1;
- }
- }
- }
- }
done:
return retval;
}
+
static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
dma_addr_t *dma, int *is_last)
{
@@ -841,6 +778,27 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return 0;
}
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+{
+ struct mv_dqh *dqh = ep->dqh;
+ u32 bit_pos;
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
+
+ bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+ /* Prime the Endpoint */
+ writel(bit_pos, &ep->udc->op_regs->epprime);
+}
+
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
@@ -883,15 +841,13 @@ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct mv_dqh *qh;
struct mv_req *next_req;
- qh = ep->dqh;
- next_req = list_entry(req->queue.next, struct mv_req,
- queue);
+ next_req = list_entry(req->queue.next,
+ struct mv_req, queue);
/* Point the QH to the first TD of next request */
- writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ mv_prime_ep(ep, next_req);
} else {
struct mv_dqh *qh;
@@ -1056,6 +1012,8 @@ static void udc_stop(struct mv_udc *udc)
USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
writel(tmp, &udc->op_regs->usbintr);
+ udc->stopped = 1;
+
/* Reset the Run the bit in the command register to stop VUSB */
tmp = readl(&udc->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
@@ -1072,6 +1030,8 @@ static void udc_start(struct mv_udc *udc)
/* Enable interrupts */
writel(usbintr, &udc->op_regs->usbintr);
+ udc->stopped = 0;
+
/* Set the Run bit in the command register */
writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
}
@@ -1134,11 +1094,11 @@ static int udc_reset(struct mv_udc *udc)
return 0;
}
-static int mv_udc_enable(struct mv_udc *udc)
+static int mv_udc_enable_internal(struct mv_udc *udc)
{
int retval;
- if (udc->clock_gating == 0 || udc->active)
+ if (udc->active)
return 0;
dev_dbg(&udc->dev->dev, "enable udc\n");
@@ -1157,9 +1117,17 @@ static int mv_udc_enable(struct mv_udc *udc)
return 0;
}
-static void mv_udc_disable(struct mv_udc *udc)
+static int mv_udc_enable(struct mv_udc *udc)
{
- if (udc->clock_gating && udc->active) {
+ if (udc->clock_gating)
+ return mv_udc_enable_internal(udc);
+
+ return 0;
+}
+
+static void mv_udc_disable_internal(struct mv_udc *udc)
+{
+ if (udc->active) {
dev_dbg(&udc->dev->dev, "disable udc\n");
if (udc->pdata->phy_deinit)
udc->pdata->phy_deinit(udc->phy_regs);
@@ -1168,6 +1136,12 @@ static void mv_udc_disable(struct mv_udc *udc)
}
}
+static void mv_udc_disable(struct mv_udc *udc)
+{
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+}
+
static int mv_udc_get_frame(struct usb_gadget *gadget)
{
struct mv_udc *udc;
@@ -1178,7 +1152,7 @@ static int mv_udc_get_frame(struct usb_gadget *gadget)
udc = container_of(gadget, struct mv_udc, gadget);
- retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+ retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
return retval;
}
@@ -1212,10 +1186,11 @@ static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->vbus_active = (is_active != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->vbus_active = (is_active != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1244,10 +1219,11 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->softconnect = (is_on != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->softconnect = (is_on != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1407,6 +1383,20 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
return retval;
}
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "unable to register peripheral to otg\n");
+ if (driver->unbind) {
+ driver->unbind(&udc->gadget);
+ udc->gadget.dev.driver = NULL;
+ udc->driver = NULL;
+ }
+ return retval;
+ }
+ }
+
/* pullup is always on */
mv_udc_pullup(&udc->gadget, 1);
@@ -2026,6 +2016,10 @@ static irqreturn_t mv_udc_irq(int irq, void *dev)
struct mv_udc *udc = (struct mv_udc *)dev;
u32 status, intr;
+ /* Disable ISR when stopped bit is set */
+ if (udc->stopped)
+ return IRQ_NONE;
+
spin_lock(&udc->lock);
status = readl(&udc->op_regs->usbsts);
@@ -2109,7 +2103,12 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
destroy_workqueue(udc->qwork);
}
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ /*
+ * If we have transceiver inited,
+ * then vbus irq will not be requested in udc driver.
+ */
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(udc->pdata->vbus->irq, &dev->dev);
/* free memory allocated in probe */
@@ -2129,11 +2128,9 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
if (udc->cap_regs)
iounmap(udc->cap_regs);
- udc->cap_regs = NULL;
if (udc->phy_regs)
- iounmap((void *)udc->phy_regs);
- udc->phy_regs = 0;
+ iounmap(udc->phy_regs);
if (udc->status_req) {
kfree(udc->status_req->req.buf);
@@ -2182,6 +2179,11 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->dev = dev;
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->mode == MV_USB_MODE_OTG)
+ udc->transceiver = otg_get_transceiver();
+#endif
+
udc->clknum = pdata->clknum;
for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
@@ -2213,24 +2215,20 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
goto err_iounmap_capreg;
}
- udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
- if (udc->phy_regs == 0) {
+ udc->phy_regs = ioremap(r->start, resource_size(r));
+ if (udc->phy_regs == NULL) {
dev_err(&dev->dev, "failed to map phy I/O memory\n");
retval = -EBUSY;
goto err_iounmap_capreg;
}
/* we will acces controller register, so enable the clk */
- udc_clock_enable(udc);
- if (pdata->phy_init) {
- retval = pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&dev->dev, "phy init error %d\n", retval);
- goto err_iounmap_phyreg;
- }
- }
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
+ goto err_iounmap_phyreg;
- udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
+ udc->op_regs =
+ (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+ (readl(&udc->cap_regs->caplength_hciversion)
& CAPLENGTH_MASK));
udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
@@ -2312,7 +2310,7 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- udc->gadget.is_dualspeed = 1; /* support dual speed */
+ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&udc->gadget.dev, "gadget");
@@ -2328,7 +2326,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
eps_init(udc);
/* VBUS detect: we can disable/enable clock on demand.*/
- if (pdata->vbus) {
+ if (udc->transceiver)
+ udc->clock_gating = 1;
+ else if (pdata->vbus) {
udc->clock_gating = 1;
retval = request_threaded_irq(pdata->vbus->irq, NULL,
mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
@@ -2354,11 +2354,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
* If not, it means that VBUS detection is not supported, we
* have to enable vbus active all the time to let controller work.
*/
- if (udc->clock_gating) {
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
- } else
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+ else
udc->vbus_active = 1;
retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
@@ -2371,7 +2369,8 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
return 0;
err_unregister:
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(pdata->vbus->irq, &dev->dev);
device_unregister(&udc->gadget.dev);
err_free_irq:
@@ -2387,11 +2386,9 @@ err_free_dma:
dma_free_coherent(&dev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
err_disable_clock:
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
+ mv_udc_disable_internal(udc);
err_iounmap_phyreg:
- iounmap((void *)udc->phy_regs);
+ iounmap(udc->phy_regs);
err_iounmap_capreg:
iounmap(udc->cap_regs);
err_put_clk:
@@ -2407,7 +2404,30 @@ static int mv_udc_suspend(struct device *_dev)
{
struct mv_udc *udc = the_controller;
- udc_stop(udc);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (udc->pdata->vbus && udc->pdata->vbus->poll)
+ if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+ dev_info(&udc->dev->dev, "USB cable is connected!\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * only cable is unplugged, udc can suspend.
+ * So do not care about clock_gating == 1.
+ */
+ if (!udc->clock_gating) {
+ udc_stop(udc);
+
+ spin_lock_irq(&udc->lock);
+ /* stop all usb activities */
+ stop_activity(udc, udc->driver);
+ spin_unlock_irq(&udc->lock);
+
+ mv_udc_disable_internal(udc);
+ }
return 0;
}
@@ -2417,20 +2437,22 @@ static int mv_udc_resume(struct device *_dev)
struct mv_udc *udc = the_controller;
int retval;
- if (udc->pdata->phy_init) {
- retval = udc->pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&udc->dev->dev,
- "init phy error %d when resume back\n",
- retval);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (!udc->clock_gating) {
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
return retval;
+
+ if (udc->driver && udc->softconnect) {
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
}
}
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
-
return 0;
}
@@ -2457,30 +2479,16 @@ static struct platform_driver udc_driver = {
.shutdown = mv_udc_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "pxa-u2o",
+ .name = "mv-udc",
#ifdef CONFIG_PM
.pm = &mv_udc_pm_ops,
#endif
},
};
-MODULE_ALIAS("platform:pxa-u2o");
+module_platform_driver(udc_driver);
+MODULE_ALIAS("platform:mv-udc");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-
-static int __init init(void)
-{
- return platform_driver_register(&udc_driver);
-}
-module_init(init);
-
-
-static void __exit cleanup(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(cleanup);
-
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index d1b76368472..4c81d540bc2 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -1459,7 +1459,7 @@ static int net2272_start(struct usb_gadget *_gadget,
unsigned i;
if (!driver || !driver->unbind || !driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->max_speed != USB_SPEED_HIGH)
return -EINVAL;
dev = container_of(_gadget, struct net2272, gadget);
@@ -2235,7 +2235,7 @@ net2272_probe_init(struct device *dev, unsigned int irq)
ret->irq = irq;
ret->dev = dev;
ret->gadget.ops = &net2272_ops;
- ret->gadget.is_dualspeed = 1;
+ ret->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&ret->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cd..cf1f36454d0 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
@@ -2698,7 +2698,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init (&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 788989a1022..7db5bbe6251 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2110,7 +2110,7 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
return -ENODEV;
if (!driver
// FIXME if otg, check: driver->is_otg
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->setup)
return -EINVAL;
@@ -2676,6 +2676,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
device_initialize(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 5048a0c0764..dd2313cce1d 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -2693,7 +2693,7 @@ static int pch_udc_start(struct usb_gadget_driver *driver,
struct pch_udc_dev *dev = pch_udc;
int retval;
- if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
+ if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
!driver->setup || !driver->unbind || !driver->disconnect) {
dev_err(&dev->pdev->dev,
"%s: invalid driver parameter\n", __func__);
@@ -2941,7 +2941,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = KBUILD_MODNAME;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
retval = device_register(&dev->gadget.dev);
if (retval)
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 65a8834f274..d83134b0f78 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1141,7 +1141,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_DT_DEVICE_QUALIFIER:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/*
* assumes ep0 uses the same value for both
@@ -1155,7 +1155,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
case USB_DT_OTHER_SPEED_CONFIG:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/* FALLTHROUGH */
#endif /* CONFIG_USB_GADGET_DUALSPEED */
@@ -1535,7 +1535,7 @@ fail:
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver printer_driver = {
- .speed = DEVSPEED,
+ .max_speed = DEVSPEED,
.function = (char *) driver_desc,
.unbind = printer_unbind,
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index c090a7e3ecf..dd470635f4f 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1264,7 +1264,7 @@ static int pxa25x_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 18b6b091f2a..f4c44eb806c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1807,7 +1807,7 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
struct pxa_udc *udc = the_controller;
int retval;
- if (!driver || driver->speed < USB_SPEED_FULL || !bind
+ if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
|| !driver->disconnect || !driver->setup)
return -EINVAL;
if (!udc)
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 24f84b210ce..f5b8d215e1d 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
@@ -1911,7 +1911,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.ops = &r8a66597_gadget_ops;
dev_set_name(&r8a66597->gadget.dev, "gadget");
- r8a66597->gadget.is_dualspeed = 1;
+ r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.dev.parent = &pdev->dev;
r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
r8a66597->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc94..69295ba9d99 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->max_speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
@@ -3364,7 +3362,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
dev_set_name(&hsotg->gadget.dev, "gadget");
- hsotg->gadget.is_dualspeed = 1;
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
@@ -3469,18 +3467,7 @@ static struct platform_driver s3c_hsotg_driver = {
.resume = s3c_hsotg_resume,
};
-static int __init s3c_hsotg_modinit(void)
-{
- return platform_driver_register(&s3c_hsotg_driver);
-}
-
-static void __exit s3c_hsotg_modexit(void)
-{
- platform_driver_unregister(&s3c_hsotg_driver);
-}
-
-module_init(s3c_hsotg_modinit);
-module_exit(s3c_hsotg_modexit);
+module_platform_driver(s3c_hsotg_driver);
MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cef..df8661d266c 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -28,9 +28,10 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/prefetch.h>
+#include <linux/platform_data/s3c-hsudc.h>
+#include <linux/regulator/consumer.h>
#include <mach/regs-s3c2443-clock.h>
-#include <plat/udc.h>
#define S3C_HSUDC_REG(x) (x)
@@ -87,6 +88,12 @@
#define DATA_STATE_XMIT (1)
#define DATA_STATE_RECV (2)
+static const char * const s3c_hsudc_supply_names[] = {
+ "vdda", /* analog phy supply, 3.3V */
+ "vddi", /* digital phy supply, 1.2V */
+ "vddosc", /* oscillator supply, 1.8V - 3.3V */
+};
+
/**
* struct s3c_hsudc_ep - Endpoint representation used by driver.
* @ep: USB gadget layer representation of device endpoint.
@@ -139,6 +146,7 @@ struct s3c_hsudc {
struct device *dev;
struct s3c24xx_hsudc_platdata *pd;
struct otg_transceiver *transceiver;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
spinlock_t lock;
void __iomem *regs;
struct resource *mem_rsrc;
@@ -153,7 +161,6 @@ struct s3c_hsudc {
#define ep_index(_ep) ((_ep)->bEndpointAddress & \
USB_ENDPOINT_NUMBER_MASK)
-static struct s3c_hsudc *the_controller;
static const char driver_name[] = "s3c-udc";
static const char ep0name[] = "ep0-control";
@@ -282,8 +289,7 @@ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
* All the endpoints are stopped and any pending transfer requests if any on
* the endpoint are terminated.
*/
-static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
- struct usb_gadget_driver *driver)
+static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
{
struct s3c_hsudc_ep *hsep;
int epnum;
@@ -295,10 +301,6 @@ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
hsep->stopped = 1;
s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
}
-
- spin_unlock(&hsudc->lock);
- driver->disconnect(&hsudc->gadget);
- spin_lock(&hsudc->lock);
}
/**
@@ -1135,17 +1137,15 @@ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
return IRQ_HANDLED;
}
-static int s3c_hsudc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int s3c_hsudc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
- || !bind
- || !driver->unbind || !driver->disconnect || !driver->setup)
+ || driver->max_speed < USB_SPEED_FULL
+ || !driver->setup)
return -EINVAL;
if (!hsudc)
@@ -1156,21 +1156,12 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->driver = driver;
hsudc->gadget.dev.driver = &driver->driver;
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- ret = device_add(&hsudc->gadget.dev);
- if (ret) {
- dev_err(hsudc->dev, "failed to probe gadget device");
- return ret;
- }
- ret = bind(&hsudc->gadget);
- if (ret) {
- dev_err(hsudc->dev, "%s: bind failed\n", hsudc->gadget.name);
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
+ goto err_supplies;
}
/* connect to bus through transceiver */
@@ -1179,13 +1170,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
if (ret) {
dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
hsudc->gadget.name);
- driver->unbind(&hsudc->gadget);
-
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ goto err_otg;
}
}
@@ -1198,34 +1183,43 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->pd->gpio_init();
return 0;
+err_otg:
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ return ret;
}
-static int s3c_hsudc_stop(struct usb_gadget_driver *driver)
+static int s3c_hsudc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
unsigned long flags;
if (!hsudc)
return -ENODEV;
- if (!driver || driver != hsudc->driver || !driver->unbind)
+ if (!driver || driver != hsudc->driver)
return -EINVAL;
spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->driver = 0;
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_uninit_phy();
if (hsudc->pd->gpio_uninit)
hsudc->pd->gpio_uninit();
- s3c_hsudc_stop_activity(hsudc, driver);
+ s3c_hsudc_stop_activity(hsudc);
spin_unlock_irqrestore(&hsudc->lock, flags);
if (hsudc->transceiver)
(void) otg_set_peripheral(hsudc->transceiver, NULL);
- driver->unbind(&hsudc->gadget);
- device_del(&hsudc->gadget.dev);
disable_irq(hsudc->irq);
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+
dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
driver->driver.name);
return 0;
@@ -1243,7 +1237,7 @@ static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
if (!hsudc)
return -ENODEV;
@@ -1256,18 +1250,18 @@ static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
- .start = s3c_hsudc_start,
- .stop = s3c_hsudc_stop,
+ .udc_start = s3c_hsudc_start,
+ .udc_stop = s3c_hsudc_stop,
.vbus_draw = s3c_hsudc_vbus_draw,
};
-static int s3c_hsudc_probe(struct platform_device *pdev)
+static int __devinit s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
- int ret;
+ int ret, i;
hsudc = kzalloc(sizeof(struct s3c_hsudc) +
sizeof(struct s3c_hsudc_ep) * pd->epnum,
@@ -1277,13 +1271,22 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- the_controller = hsudc;
platform_set_drvdata(pdev, dev);
hsudc->dev = dev;
hsudc->pd = pdev->dev.platform_data;
hsudc->transceiver = otg_get_transceiver();
+ for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
+ hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ goto err_supplies;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "unable to obtain driver resource data\n");
@@ -1308,10 +1311,9 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
spin_lock_init(&hsudc->lock);
- device_initialize(&hsudc->gadget.dev);
dev_set_name(&hsudc->gadget.dev, "gadget");
- hsudc->gadget.is_dualspeed = 1;
+ hsudc->gadget.max_speed = USB_SPEED_HIGH;
hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
hsudc->gadget.name = dev_name(dev);
hsudc->gadget.dev.parent = dev;
@@ -1320,6 +1322,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc->gadget.is_otg = 0;
hsudc->gadget.is_a_peripheral = 0;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_setup_ep(hsudc);
@@ -1349,12 +1352,20 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
disable_irq(hsudc->irq);
local_irq_enable();
+ ret = device_register(&hsudc->gadget.dev);
+ if (ret) {
+ put_device(&hsudc->gadget.dev);
+ goto err_add_device;
+ }
+
ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
if (ret)
goto err_add_udc;
return 0;
err_add_udc:
+ device_unregister(&hsudc->gadget.dev);
+err_add_device:
clk_disable(hsudc->uclk);
clk_put(hsudc->uclk);
err_clk:
@@ -1363,10 +1374,13 @@ err_irq:
iounmap(hsudc->regs);
err_remap:
- release_resource(hsudc->mem_rsrc);
- kfree(hsudc->mem_rsrc);
-
+ release_mem_region(res->start, resource_size(res));
err_res:
+ if (hsudc->transceiver)
+ otg_put_transceiver(hsudc->transceiver);
+
+ regulator_bulk_free(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
kfree(hsudc);
return ret;
}
@@ -1378,21 +1392,10 @@ static struct platform_driver s3c_hsudc_driver = {
},
.probe = s3c_hsudc_probe,
};
-MODULE_ALIAS("platform:s3c-hsudc");
-
-static int __init s3c_hsudc_modinit(void)
-{
- return platform_driver_register(&s3c_hsudc_driver);
-}
-static void __exit s3c_hsudc_modexit(void)
-{
- platform_driver_unregister(&s3c_hsudc_driver);
-}
-
-module_init(s3c_hsudc_modinit);
-module_exit(s3c_hsudc_modexit);
+module_platform_driver(s3c_hsudc_driver);
MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index b8643771fa8..3f87cb9344b 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -3,7 +3,7 @@
*
* Samsung S3C24xx series on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert PĂśtzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@
#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
#define DRIVER_VERSION "29 Apr 2007"
-#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
+#define DRIVER_AUTHOR "Herbert PĂśtzl <herbert@13thfloor.at>, " \
"Arnaud Patard <arnaud.patard@rtp-net.org>"
static const char gadget_name[] = "s3c2410_udc";
@@ -1683,9 +1683,9 @@ static int s3c2410_udc_start(struct usb_gadget_driver *driver,
if (udc->driver)
return -EBUSY;
- if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
+ if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
- bind, driver->setup, driver->speed);
+ bind, driver->setup, driver->max_speed);
return -EINVAL;
}
#if defined(MODULE)
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index a48f619cb1c..1653bae08b8 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -2,7 +2,7 @@
* linux/drivers/usb/gadget/s3c2410_udc.h
* Samsung on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert PĂśtzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 6939e17f458..0b0d12ccc48 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -371,14 +371,28 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
}
static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-static ssize_t usb_udc_speed_show(struct device *dev,
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t usb_udc_##param##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return snprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR(name, S_IRUSR, usb_udc_##param##_show, NULL)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+/* TODO: Scheduled for removal in 3.8. */
+static ssize_t usb_udc_is_dualspeed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- usb_speed_string(udc->gadget->speed));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ gadget_is_dualspeed(udc->gadget));
}
-static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(is_dualspeed, S_IRUSR, usb_udc_is_dualspeed_show, NULL);
#define USB_UDC_ATTR(name) \
ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -391,7 +405,6 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
} \
static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
-static USB_UDC_ATTR(is_dualspeed);
static USB_UDC_ATTR(is_otg);
static USB_UDC_ATTR(is_a_peripheral);
static USB_UDC_ATTR(b_hnp_enable);
@@ -401,7 +414,8 @@ static USB_UDC_ATTR(a_alt_hnp_support);
static struct attribute *usb_udc_attrs[] = {
&dev_attr_srp.attr,
&dev_attr_soft_connect.attr,
- &dev_attr_speed.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
&dev_attr_is_dualspeed.attr,
&dev_attr_is_otg.attr,
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 58c4d37d312..4d25b9009ed 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -13,82 +13,17 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/nls.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <asm/unaligned.h>
-
-
-static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
-{
- int count = 0;
- u8 c;
- u16 uchar;
-
- /* this insists on correct encodings, though not minimal ones.
- * BUT it currently rejects legit 4-byte UTF-8 code points,
- * which need surrogate pairs. (Unicode 3.1 can use them.)
- */
- while (len != 0 && (c = (u8) *s++) != 0) {
- if (unlikely(c & 0x80)) {
- // 2-byte sequence:
- // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
- if ((c & 0xe0) == 0xc0) {
- uchar = (c & 0x1f) << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- // 3-byte sequence (most CJKV characters):
- // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
- } else if ((c & 0xf0) == 0xe0) {
- uchar = (c & 0x0f) << 12;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- /* no bogus surrogates */
- if (0xd800 <= uchar && uchar <= 0xdfff)
- goto fail;
-
- // 4-byte sequence (surrogate pairs, currently rare):
- // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
- // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
- // (uuuuu = wwww + 1)
- // FIXME accept the surrogate code points (only)
-
- } else
- goto fail;
- } else
- uchar = c;
- put_unaligned_le16(uchar, cp++);
- count++;
- len--;
- }
- return count;
-fail:
- return -1;
-}
-
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
- * @buf: at least 256 bytes
+ * @buf: at least 256 bytes, must be 16-bit aligned
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
@@ -125,8 +60,8 @@ usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
/* string descriptors have length, tag, then UTF16-LE text */
len = min ((size_t) 126, strlen (s->s));
- memset (buf + 2, 0, 2 * len); /* zero all the bytes */
- len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
+ len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
+ (wchar_t *) &buf[2], 126);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 060e0e2b1ae..4c0c9734251 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -194,6 +194,15 @@ config USB_EHCI_S5P
help
Enable support for the S5P SOC's on-chip EHCI controller.
+config USB_EHCI_MV
+ bool "EHCI support for Marvell on-chip controller"
+ depends on USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for Marvell (including PXA and MMP series) on-chip
+ USB SPH and OTG controller. SPH is a single port host, and it can
+ only be EHCI host. OTG is controller that can switch to host mode.
+
config USB_W90X900_EHCI
bool "W90X900(W90P910) EHCI support"
depends on USB_EHCI_HCD && ARCH_W90X900
@@ -371,6 +380,12 @@ config USB_OHCI_SH
Enables support for the on-chip OHCI controller on the SuperH.
If you use the PCI OHCI controller, this option is not necessary.
+config USB_OHCI_EXYNOS
+ boolean "OHCI support for Samsung EXYNOS SoC Series"
+ depends on USB_OHCI_HCD && ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module"
depends on USB_OHCI_HCD && ARCH_CNS3XXX
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 18bafa99fe5..bf7441afed1 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -23,6 +23,7 @@ static int au1xxx_ehci_setup(struct usb_hcd *hcd)
int ret = ehci_init(hcd);
ehci->need_io_watchdog = 0;
+ ehci_reset(ehci);
return ret;
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 3ff9f82f726..e311a511529 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -48,6 +48,10 @@
#include <asm/system.h>
#include <asm/unaligned.h>
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -230,12 +234,58 @@ static int ehci_halt (struct ehci_hcd *ehci)
STS_HALT, STS_HALT, 16 * 125);
}
+#if defined(CONFIG_USB_SUSPEND) && defined(CONFIG_PPC_PS3)
+
+/*
+ * The EHCI controller of the Cell Super Companion Chip used in the
+ * PS3 will stop the root hub after all root hub ports are suspended.
+ * When in this condition handshake will return -ETIMEDOUT. The
+ * STS_HLT bit will not be set, so inspection of the frame index is
+ * used here to test for the condition. If the condition is found
+ * return success to allow the USB suspend to complete.
+ */
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ unsigned int old_index;
+ int error;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ETIMEDOUT;
+
+ old_index = ehci_read_frame_index(ehci);
+
+ error = handshake(ehci, ptr, mask, done, usec);
+
+ if (error == -ETIMEDOUT && ehci_read_frame_index(ehci) == old_index)
+ return 0;
+
+ return error;
+}
+
+#else
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ return -ETIMEDOUT;
+}
+
+#endif
+
static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
int error;
error = handshake(ehci, ptr, mask, done, usec);
+ if (error == -ETIMEDOUT)
+ error = handshake_for_broken_root_hub(ehci, ptr, mask, done,
+ usec);
+
if (error) {
ehci_halt(ehci);
ehci->rh_state = EHCI_RH_HALTED;
@@ -620,6 +670,7 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+ hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
@@ -677,22 +728,13 @@ static int ehci_init(struct usb_hcd *hcd)
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- int retval;
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
- /*
- * TDI driver does the ehci_reset in their reset callback.
- * Don't reset here, because configuration settings will
- * vanish.
- */
- if (!ehci_is_TDI(ehci) && (retval = ehci_reset(ehci)) != 0) {
- ehci_mem_cleanup(ehci);
- return retval;
- }
+
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
@@ -1324,11 +1366,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_pxa168_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ehci-xls.c"
#define PLATFORM_DRIVER ehci_xls_driver
#endif
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define PLATFORM_DRIVER ehci_mv_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 00000000000..52a604fb932
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK (0xff)
+
+struct ehci_hcd_mv {
+ struct usb_hcd *hcd;
+
+ /* Which mode does this ehci running OTG/Host ? */
+ int mode;
+
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ void __iomem *op_regs;
+
+ struct otg_transceiver *otg;
+
+ struct mv_usb_platform_data *pdata;
+
+ /* clock source and total clock number */
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_enable(ehci_mv->clk[i]);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_disable(ehci_mv->clk[i]);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ int retval;
+
+ ehci_clock_enable(ehci_mv);
+ if (ehci_mv->pdata->phy_init) {
+ retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ if (ehci_mv->pdata->phy_deinit)
+ ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ int retval;
+
+ if (ehci_mv == NULL) {
+ dev_err(dev, "Can not find private ehci data\n");
+ return -ENODEV;
+ }
+
+ /*
+ * data structure init
+ */
+ retval = ehci_init(hcd);
+ if (retval) {
+ dev_err(dev, "ehci_init failed %d\n", retval);
+ return retval;
+ }
+
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+
+ retval = ehci_reset(ehci);
+ if (retval) {
+ dev_err(dev, "ehci_reset failed %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = mv_ehci_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct ehci_hcd_mv *ehci_mv;
+ struct resource *r;
+ int clk_i, retval = -ENODEV;
+ u32 offset;
+ size_t size;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ if (!hcd)
+ return -ENOMEM;
+
+ size = sizeof(*ehci_mv) + sizeof(struct clk *) * pdata->clknum;
+ ehci_mv = kzalloc(size, GFP_KERNEL);
+ if (ehci_mv == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
+ retval = -ENOMEM;
+ goto err_put_hcd;
+ }
+
+ platform_set_drvdata(pdev, ehci_mv);
+ ehci_mv->pdata = pdata;
+ ehci_mv->hcd = hcd;
+
+ ehci_mv->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++) {
+ ehci_mv->clk[clk_i] =
+ clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(ehci_mv->clk[clk_i])) {
+ dev_err(&pdev->dev, "error get clck \"%s\"\n",
+ pdata->clkname[clk_i]);
+ retval = PTR_ERR(ehci_mv->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_clk;
+ }
+
+ ehci_mv->phy_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->phy_regs == 0) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_put_clk;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_iounmap_phyreg;
+ }
+
+ ehci_mv->cap_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_iounmap_phyreg;
+ }
+
+ retval = mv_ehci_enable(ehci_mv);
+ if (retval) {
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
+ goto err_iounmap_capreg;
+ }
+
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+ ehci_mv->op_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = r->end - r->start + 1;
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (!hcd->irq) {
+ dev_err(&pdev->dev, "Cannot get irq.");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->regs = (struct ehci_regs *) ehci_mv->op_regs;
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci_mv->mode = pdata->mode;
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
+#ifdef CONFIG_USB_OTG_UTILS
+ ehci_mv->otg = otg_get_transceiver();
+ if (!ehci_mv->otg) {
+ dev_err(&pdev->dev,
+ "unable to find transceiver\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_host(ehci_mv->otg, &hcd->self);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "unable to register with transceiver\n");
+ retval = -ENODEV;
+ goto err_put_transceiver;
+ }
+ /* otg will enable clock before use as host */
+ mv_ehci_disable(ehci_mv);
+#else
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+ "must have CONFIG_USB_OTG_UTILS enabled\n");
+ goto err_disable_clk;
+#endif
+ } else {
+ if (pdata->set_vbus)
+ pdata->set_vbus(1);
+
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "failed to add hcd with err %d\n", retval);
+ goto err_set_vbus;
+ }
+ }
+
+ if (pdata->private_init)
+ pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+ dev_info(&pdev->dev,
+ "successful find EHCI device with regs 0x%p irq %d"
+ " working in %s mode\n", hcd->regs, hcd->irq,
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+ return 0;
+
+err_set_vbus:
+ if (pdata->set_vbus)
+ pdata->set_vbus(0);
+#ifdef CONFIG_USB_OTG_UTILS
+err_put_transceiver:
+ if (ehci_mv->otg)
+ otg_put_transceiver(ehci_mv->otg);
+#endif
+err_disable_clk:
+ mv_ehci_disable(ehci_mv);
+err_iounmap_capreg:
+ iounmap(ehci_mv->cap_regs);
+err_iounmap_phyreg:
+ iounmap(ehci_mv->phy_regs);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(ehci_mv->clk[clk_i]);
+ platform_set_drvdata(pdev, NULL);
+ kfree(ehci_mv);
+err_put_hcd:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+ int clk_i;
+
+ if (hcd->rh_registered)
+ usb_remove_hcd(hcd);
+
+ if (ehci_mv->otg) {
+ otg_set_host(ehci_mv->otg, NULL);
+ otg_put_transceiver(ehci_mv->otg);
+ }
+
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
+ if (ehci_mv->pdata->set_vbus)
+ ehci_mv->pdata->set_vbus(0);
+
+ mv_ehci_disable(ehci_mv);
+ }
+
+ iounmap(ehci_mv->cap_regs);
+ iounmap(ehci_mv->phy_regs);
+
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++)
+ clk_put(ehci_mv->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(ehci_mv);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+ {"pxa-u2oehci", PXA_U2OEHCI},
+ {"pxa-sph", PXA_SPH},
+ {"mmp3-hsic", MMP3_HSIC},
+ {"mmp3-fsic", MMP3_FSIC},
+ {},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (!hcd->rh_registered)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+ .probe = mv_ehci_probe,
+ .remove = mv_ehci_remove,
+ .shutdown = mv_ehci_shutdown,
+ .driver = {
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ },
+ .id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ba1f5136113..c0104882c72 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -155,6 +155,8 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index e39b0297bad..bba9850f32f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -41,6 +41,7 @@
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -190,11 +191,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_err(dev, "failed to start usbhs with err %d\n", ret);
- goto err_enable;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
@@ -228,6 +226,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
+ ehci_reset(omap_ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
@@ -240,11 +240,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_enable:
disable_put_regulator(pdata);
- usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
err_io:
iounmap(regs);
@@ -266,10 +263,12 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index a68a2a5c4b8..6c6a5a3b4ea 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -172,7 +172,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
static void __init
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -182,7 +182,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -194,6 +194,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -259,8 +260,9 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- ehci_orion_conf_mbus_windows(hcd, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 2dc32da75cf..a20e496eb47 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -21,6 +21,34 @@
#include <asm/firmware.h>
#include <asm/ps3.h>
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
+{
+ /* PS3 HC internal setup register offsets. */
+
+ enum ps3_ehci_hc_insnreg {
+ ps3_ehci_hc_insnreg01 = 0x084,
+ ps3_ehci_hc_insnreg02 = 0x088,
+ ps3_ehci_hc_insnreg03 = 0x08c,
+ };
+
+ /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+ * internal INSNREGXX setup regs back to the chip default values
+ * on Host Controller Reset (CMD_RESET) or Light Host Controller
+ * Reset (CMD_LRESET). The work-around for this is for the HC
+ * driver to re-initialise these regs when ever the HC is reset.
+ */
+
+ /* Set burst transfer counts to 256 out, 32 in. */
+
+ writel_be(0x01000020, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg01);
+
+ /* Enable burst transfer counts. */
+
+ writel_be(0x00000001, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg03);
+}
+
static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
{
int result;
@@ -49,6 +77,8 @@ static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
ehci_reset(ehci);
+ ps3_ehci_setup_insnreg(ehci);
+
return result;
}
diff --git a/drivers/usb/host/ehci-pxa168.c b/drivers/usb/host/ehci-pxa168.c
index ac0c16e8f53..8d0e7a22e71 100644
--- a/drivers/usb/host/ehci-pxa168.c
+++ b/drivers/usb/host/ehci-pxa168.c
@@ -299,7 +299,7 @@ static int __devinit ehci_pxa168_drv_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
ehci->sbrn = 0x20;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4e4066c35a0..36ca5077cdf 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -373,6 +373,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ ehci_dbg(ehci,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
@@ -647,7 +658,7 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 024b65c4990..293f7412992 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -14,8 +14,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <mach/regs-pmu.h>
-#include <plat/cpu.h>
#include <plat/ehci.h>
#include <plat/usb-phy.h>
@@ -136,6 +134,8 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 56a32033adb..a60679cbbf8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- } while (start > next);
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index db9d1b4bfbd..dbc7fe8ca9e 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -21,7 +21,12 @@
#include <linux/platform_data/tegra_usb.h>
#include <linux/irq.h>
#include <linux/usb/otg.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
#include <mach/usb_phy.h>
+#include <mach/iomap.h>
#define TEGRA_USB_DMA_ALIGN 32
@@ -574,6 +579,35 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.port_handed_over = ehci_port_handed_over,
};
+static int setup_vbus_gpio(struct platform_device *pdev)
+{
+ int err = 0;
+ int gpio;
+
+ if (!pdev->dev.of_node)
+ return 0;
+
+ gpio = of_get_named_gpio(pdev->dev.of_node, "nvidia,vbus-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ err = gpio_request(gpio, "vbus_gpio");
+ if (err) {
+ dev_err(&pdev->dev, "can't request vbus gpio %d", gpio);
+ return err;
+ }
+ err = gpio_direction_output(gpio, 1);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable vbus\n");
+ return err;
+ }
+ gpio_set_value(gpio, 1);
+
+ return err;
+}
+
+static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
+
static int tegra_ehci_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -590,6 +624,15 @@ static int tegra_ehci_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+
+ setup_vbus_gpio(pdev);
+
tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
@@ -640,6 +683,28 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
+ /* This is pretty ugly and needs to be fixed when we do only
+ * device-tree probing. Old code relies on the platform_device
+ * numbering that we lack for device-tree-instantiated devices.
+ */
+ if (instance < 0) {
+ switch (res->start) {
+ case TEGRA_USB_BASE:
+ instance = 0;
+ break;
+ case TEGRA_USB2_BASE:
+ instance = 1;
+ break;
+ case TEGRA_USB3_BASE:
+ instance = 2;
+ break;
+ default:
+ err = -ENODEV;
+ dev_err(&pdev->dev, "unknown usb instance\n");
+ goto fail_phy;
+ }
+ }
+
tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
@@ -773,6 +838,11 @@ static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-ehci", },
+ { },
+};
+
static struct platform_driver tegra_ehci_driver = {
.probe = tegra_ehci_probe,
.remove = tegra_ehci_remove,
@@ -783,5 +853,6 @@ static struct platform_driver tegra_ehci_driver = {
.shutdown = tegra_ehci_hcd_shutdown,
.driver = {
.name = "tegra-ehci",
+ .of_match_table = tegra_ehci_of_match,
}
};
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 54d1ab8aec4..c1eda73916c 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -132,6 +132,8 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
ehci_port_power(ehci, 1);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, pdev->resource[1].start,
IRQF_SHARED);
if (ret == 0) {
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index d661cf7de14..3d2e26cbb34 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -78,6 +78,8 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
if (irq < 0)
goto err4;
+ ehci_reset(ehci);
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index b4fb511d24b..72f08196f8c 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -69,7 +69,7 @@ int ehci_xls_probe_internal(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 4ed6d19f2a5..d2623747b48 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -824,17 +824,7 @@ static struct platform_driver of_fhci_driver = {
.remove = __devexit_p(of_fhci_remove),
};
-static int __init fhci_module_init(void)
-{
- return platform_driver_register(&of_fhci_driver);
-}
-module_init(fhci_module_init);
-
-static void __exit fhci_module_exit(void)
-{
- platform_driver_unregister(&of_fhci_driver);
-}
-module_exit(fhci_module_exit);
+module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9037035ad1e..7916e56a725 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -297,17 +297,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
};
-static int __init fsl_usb2_mph_dr_init(void)
-{
- return platform_driver_register(&fsl_usb2_mph_dr_driver);
-}
-module_init(fsl_usb2_mph_dr_init);
-
-static void __exit fsl_usb2_mph_dr_exit(void)
-{
- platform_driver_unregister(&fsl_usb2_mph_dr_driver);
-}
-module_exit(fsl_usb2_mph_dr_exit);
+module_platform_driver(fsl_usb2_mph_dr_driver);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 9bfac657572..104730dabd2 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -481,7 +481,7 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
encryption_value = 0;
}
- /* Set the encryption type for commmunicating with the device */
+ /* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -776,7 +776,6 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
@@ -837,18 +836,7 @@ static struct usb_driver hwahc_driver = {
.id_table = hwahc_id_table,
};
-static int __init hwahc_driver_init(void)
-{
- return usb_register(&hwahc_driver);
-}
-module_init(hwahc_driver_init);
-
-static void __exit hwahc_driver_exit(void)
-{
- usb_deregister(&hwahc_driver);
-}
-module_exit(hwahc_driver_exit);
-
+module_usb_driver(hwahc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 2ee18cfa1ef..ff471c1c165 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -473,7 +473,7 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
/* End handling */
/* =========================================== */
-/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i;
@@ -1924,18 +1924,7 @@ static struct platform_driver imx21_hcd_driver = {
.resume = NULL,
};
-static int __init imx21_hcd_init(void)
-{
- return platform_driver_register(&imx21_hcd_driver);
-}
-
-static void __exit imx21_hcd_cleanup(void)
-{
- platform_driver_unregister(&imx21_hcd_driver);
-}
-
-module_init(imx21_hcd_init);
-module_exit(imx21_hcd_cleanup);
+module_platform_driver(imx21_hcd_driver);
MODULE_DESCRIPTION("i.MX21 USB Host controller");
MODULE_AUTHOR("Martin Fuzzey");
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27dfab80ed8..fc72d44bf78 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -32,6 +32,13 @@ static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
static struct kmem_cache *urb_listitem_cachep;
+enum queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
+
struct isp1760_hcd {
u32 hcs_params;
spinlock_t lock;
@@ -40,7 +47,7 @@ struct isp1760_hcd {
struct slotinfo int_slots[32];
int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
- struct list_head controlqhs, bulkqhs, interruptqhs;
+ struct list_head qh_list[QH_END];
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -406,12 +413,12 @@ static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 hcc_params;
+ int i;
spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->interruptqhs);
- INIT_LIST_HEAD(&priv->controlqhs);
- INIT_LIST_HEAD(&priv->bulkqhs);
+ for (i = 0; i < QH_END; i++)
+ INIT_LIST_HEAD(&priv->qh_list[i]);
/*
* hw default: 1K periodic list heads, one per frame.
@@ -930,9 +937,9 @@ void schedule_ptds(struct usb_hcd *hcd)
struct isp1760_hcd *priv;
struct isp1760_qh *qh, *qh_next;
struct list_head *ep_queue;
- struct usb_host_endpoint *ep;
LIST_HEAD(urb_list);
struct urb_listitem *urb_listitem, *urb_listitem_next;
+ int i;
if (!hcd) {
WARN_ON(1);
@@ -944,28 +951,13 @@ void schedule_ptds(struct usb_hcd *hcd)
/*
* check finished/retired xfers, transfer payloads, call urb_done()
*/
- ep_queue = &priv->interruptqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
- ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
- qtd_list)->urb->ep;
collect_qtds(hcd, qh, &urb_list);
- if (list_empty(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list))
list_del(&qh->qh_list);
- if (ep->hcpriv == NULL) {
- /* Endpoint has been disabled, so we
- can free the associated queue head. */
- qh_free(qh);
- }
- }
}
-
- if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->controlqhs;
- else if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
@@ -998,17 +990,10 @@ void schedule_ptds(struct usb_hcd *hcd)
*
* I'm sure this scheme could be improved upon!
*/
- ep_queue = &priv->controlqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
enqueue_qtds(hcd, qh);
-
- if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->interruptqhs;
- else if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
}
@@ -1543,16 +1528,16 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
- ep_queue = &priv->controlqhs;
+ ep_queue = &priv->qh_list[QH_CONTROL];
break;
case PIPE_BULK:
- ep_queue = &priv->bulkqhs;
+ ep_queue = &priv->qh_list[QH_BULK];
break;
case PIPE_INTERRUPT:
if (urb->interval < 0)
return -EINVAL;
/* FIXME: Check bandwidth */
- ep_queue = &priv->interruptqhs;
+ ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
case PIPE_ISOCHRONOUS:
dev_err(hcd->self.controller, "%s: isochronous USB packets "
@@ -1714,8 +1699,8 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
unsigned long spinflags;
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
+ struct isp1760_qh *qh, *qh_iter;
+ int i;
spin_lock_irqsave(&priv->lock, spinflags);
@@ -1723,14 +1708,17 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
if (!qh)
goto out;
- list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
- if (qtd->status != QTD_RETIRE) {
- dequeue_urb_from_qtd(hcd, qh, qtd);
- qtd->urb->status = -ECONNRESET;
- }
+ WARN_ON(!list_empty(&qh->qtd_list));
+ for (i = 0; i < QH_END; i++)
+ list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
+ if (qh_iter == qh) {
+ list_del(&qh_iter->qh_list);
+ i = QH_END;
+ break;
+ }
+ qh_free(qh);
ep->hcpriv = NULL;
- /* Cannot free qh here since it will be parsed by schedule_ptds() */
schedule_ptds(hcd);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index a7dc1e1d45f..4592dc17a9f 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
#include "isp1760-hcd.h"
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -31,7 +31,7 @@
#include <linux/pci.h>
#endif
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct isp1760 {
struct usb_hcd *hcd;
int rst_gpio;
@@ -47,23 +47,27 @@ static int of_isp1760_probe(struct platform_device *dev)
int virq;
resource_size_t res_len;
int ret;
- const unsigned int *prop;
unsigned int devflags = 0;
enum of_gpio_flags gpio_flags;
+ u32 bus_width = 0;
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
- if (ret)
- return -ENXIO;
+ if (ret) {
+ ret = -ENXIO;
+ goto free_data;
+ }
res_len = resource_size(&memory);
res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
- if (!res)
- return -EBUSY;
+ if (!res) {
+ ret = -EBUSY;
+ goto free_data;
+ }
if (of_irq_map_one(dp, 0, &oirq)) {
ret = -ENODEV;
@@ -77,8 +81,8 @@ static int of_isp1760_probe(struct platform_device *dev)
devflags |= ISP1760_FLAG_ISP1761;
/* Some systems wire up only 16 of the 32 data lines */
- prop = of_get_property(dp, "bus-width", NULL);
- if (prop && *prop == 16)
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (of_get_property(dp, "port1-otg", NULL) != NULL)
@@ -125,6 +129,7 @@ free_gpio:
gpio_free(drvdata->rst_gpio);
release_reg:
release_mem_region(memory.start, res_len);
+free_data:
kfree(drvdata);
return ret;
}
@@ -437,7 +442,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
@@ -457,7 +462,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 95a9fec38e8..5df0b0e3392 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,7 +223,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (port < 0 || port >= 2)
return;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return;
gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
@@ -234,7 +234,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (port < 0 || port >= 2)
return -EINVAL;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return -EINVAL;
return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
@@ -465,7 +465,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
gpio_request(pdata->vbus_pin[i], "ohci_vbus");
ohci_at91_usb_set_power(pdata, i, 1);
@@ -474,7 +474,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
int ret;
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
gpio_request(pdata->overcurrent_pin[i], "ohci_overcurrent");
@@ -499,14 +499,14 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
gpio_free(pdata->overcurrent_pin[i]);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 9b66df8278f..40d886adff5 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -173,12 +173,9 @@ static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave(&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d7d34492934..5179fcd73d8 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -127,6 +127,19 @@ static char *hcfs2string (int state)
return "?";
}
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+ switch (ohci->rh_state) {
+ case OHCI_RH_HALTED:
+ return "halted";
+ case OHCI_RH_SUSPENDED:
+ return "suspended";
+ case OHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
@@ -136,9 +149,10 @@ ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
temp = ohci_readl (controller, &regs->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
- "OHCI %d.%d, %s legacy support registers\n",
+ "OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
- (temp & 0x0100) ? "with" : "NO");
+ (temp & 0x0100) ? "with" : "NO",
+ rh_state_string(controller));
temp = ohci_readl (controller, &regs->control);
ohci_dbg_sw (controller, next, size,
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index dc45d489d00..3d63574d2c7 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -179,8 +179,6 @@ static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_
ohci->next_statechange = jiffies;
ep93xx_stop_hc(&pdev->dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 00000000000..55aa35aa3d7
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,274 @@
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <mach/ohci.h>
+#include <plat/usb-phy.h>
+
+struct exynos_ohci_hcd {
+ struct device *dev;
+ struct usb_hcd *hcd;
+ struct clk *clk;
+};
+
+static int ohci_exynos_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ohci_dbg(ohci, "ohci_exynos_start, ohci:%p", ohci);
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver exynos_ohci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EXYNOS OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY|HCD_USB11,
+
+ .start = ohci_exynos_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ .get_frame_number = ohci_get_frame,
+
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int __devinit exynos_ohci_probe(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata;
+ struct exynos_ohci_hcd *exynos_ohci;
+ struct usb_hcd *hcd;
+ struct ohci_hcd *ohci;
+ struct resource *res;
+ int irq;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data defined\n");
+ return -EINVAL;
+ }
+
+ exynos_ohci = kzalloc(sizeof(struct exynos_ohci_hcd), GFP_KERNEL);
+ if (!exynos_ohci)
+ return -ENOMEM;
+
+ exynos_ohci->dev = &pdev->dev;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ exynos_ohci->hcd = hcd;
+ exynos_ohci->clk = clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ohci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ohci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(exynos_ohci->clk);
+ if (err)
+ goto fail_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ ohci = hcd_to_ohci(hcd);
+ ohci_hcd_init(ohci);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, exynos_ohci);
+
+ return 0;
+
+fail:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(exynos_ohci->clk);
+fail_clken:
+ clk_put(exynos_ohci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(exynos_ohci);
+ return err;
+}
+
+static int __devexit exynos_ohci_remove(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+
+ iounmap(hcd->regs);
+
+ clk_disable(exynos_ohci->clk);
+ clk_put(exynos_ohci->clk);
+
+ usb_put_hcd(hcd);
+ kfree(exynos_ohci);
+
+ return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ *
+ * This is still racy as hcd->state is manipulated outside of
+ * any locks =P But that will be a different fix.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+fail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ /* Mark hardware accessible again as we are out of D3 state by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define exynos_ohci_suspend NULL
+#define exynos_ohci_resume NULL
+#endif
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+ .suspend = exynos_ohci_suspend,
+ .resume = exynos_ohci_resume,
+};
+
+static struct platform_driver exynos_ohci_driver = {
+ .probe = exynos_ohci_probe,
+ .remove = __devexit_p(exynos_ohci_remove),
+ .shutdown = exynos_ohci_shutdown,
+ .driver = {
+ .name = "exynos-ohci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ohci_pm_ops,
+ }
+};
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b2639191549..5f5a6324143 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -209,7 +209,7 @@ static int ohci_urb_enqueue (
retval = -ENODEV;
goto fail;
}
- if (!HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
@@ -274,7 +274,7 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
- } else if (HC_IS_RUNNING(hcd->state)) {
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@@ -321,7 +321,7 @@ ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
rescan:
spin_lock_irqsave (&ohci->lock, flags);
- if (!HC_IS_RUNNING (hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
@@ -377,6 +377,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -500,7 +501,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
@@ -575,7 +576,7 @@ static int ohci_run (struct ohci_hcd *ohci)
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
@@ -688,7 +689,7 @@ retry:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
- hcd->state = HC_STATE_RUNNING;
+ ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
@@ -725,7 +726,6 @@ retry:
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
- hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
@@ -761,7 +761,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
@@ -771,7 +771,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
ints &= ohci_readl(ohci, &regs->intrenable);
/* interrupt for some other device? */
- if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT))
+ if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
@@ -786,8 +786,8 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
schedule_work (&ohci->nec_work);
} else {
- disable (ohci);
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
@@ -871,11 +871,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
- && HC_IS_RUNNING(hcd->state))
+ && ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
// flush those writes
@@ -929,7 +929,7 @@ static int ohci_restart (struct ohci_hcd *ohci)
struct urb_priv *priv;
spin_lock_irq(&ohci->lock);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
@@ -1005,6 +1005,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
+#ifdef CONFIG_USB_OHCI_EXYNOS
+#include "ohci-exynos.c"
+#define PLATFORM_DRIVER exynos_ohci_driver
+#endif
+
#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
@@ -1111,7 +1116,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_ath79_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ohci-xls.c"
#define PLATFORM_DRIVER ohci_xls_driver
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 2f00040fc40..836772dfabd 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -111,6 +111,7 @@ __acquires(ohci->lock)
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
+ ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
@@ -140,7 +141,7 @@ __acquires(ohci->lock)
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
- if (hcd->state == HC_STATE_RESUMING) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
@@ -274,6 +275,7 @@ skip_resume:
(void) ohci_readl (ohci, &ohci->regs->control);
}
+ ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
@@ -336,11 +338,8 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
/* If needed, reinitialize and suspend the root hub */
if (need_reinit) {
spin_lock_irq(&ohci->lock);
- hcd->state = HC_STATE_RESUMING;
ohci_rh_resume(ohci);
- hcd->state = HC_STATE_QUIESCING;
ohci_rh_suspend(ohci, 0);
- hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq(&ohci->lock);
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index e4b8782cc6e..db3968656d2 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -516,7 +516,6 @@ static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
ohci->next_statechange = jiffies;
omap_ohci_clock_power(0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 516ebc4d6cc..1b8133b6e45 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
/*-------------------------------------------------------------------------*/
@@ -134,7 +135,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
int irq;
if (usb_disabled())
- goto err_end;
+ return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
@@ -172,11 +173,8 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_dbg(dev, "failed to start ohci\n");
- goto err_end;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -189,9 +187,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_end:
+ pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
err_io:
@@ -220,9 +216,9 @@ static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
iounmap(hcd->regs);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
usb_put_hcd(hcd);
-
return 0;
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bc01b064585..6109810cc2d 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -308,12 +308,9 @@ static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave (&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 29dfefe1c72..6313e4439f3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -502,8 +502,6 @@ static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
ohci->ohci.next_statechange = jiffies;
pxa27x_stop_hc(ohci, dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 15dc51ded61..c5a1ea9145f 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -912,7 +912,7 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
@@ -1012,7 +1012,7 @@ rescan_this:
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
+ if (ohci->rh_state == OHCI_RH_RUNNING)
ed_schedule (ohci, ed);
}
@@ -1021,9 +1021,7 @@ rescan_this:
}
/* maybe reenable control and bulk lists */
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
- && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
- && !ohci->ed_rm_list) {
+ if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index a1877c47601..56dcf069246 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -486,15 +486,66 @@ static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
+ rc = -EINVAL;
+ goto bail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ s3c2410_stop_hc(pdev);
+bail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ s3c2410_start_hc(pdev, hcd);
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define ohci_hcd_s3c2410_drv_suspend NULL
+#define ohci_hcd_s3c2410_drv_resume NULL
+#endif
+
+static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
+ .suspend = ohci_hcd_s3c2410_drv_suspend,
+ .resume = ohci_hcd_s3c2410_drv_resume,
+};
+
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_drv_probe,
.remove = __devexit_p(ohci_hcd_s3c2410_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_s3c2410_drv_suspend, */
- /*.resume = ohci_hcd_s3c2410_drv_resume, */
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-ohci",
+ .pm = &ohci_hcd_s3c2410_pm_ops,
},
};
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index afc4eb6bb9d..84686d90805 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -29,7 +29,6 @@ static int ohci_sh_start(struct usb_hcd *hcd)
ohci_hcd_init(ohci);
ohci_init(ohci);
ohci_run(ohci);
- hcd->state = HC_STATE_RUNNING;
return 0;
}
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 968cea2b6d4..5596ac2ba1c 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -224,7 +224,6 @@ static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
ohci->next_statechange = jiffies;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 69874654f3b..95c16489e88 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -203,7 +203,6 @@ static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
ohci->next_statechange = jiffies;
spear_stop_ohci(ohci_p);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 06331d93117..120bfe6ede3 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -318,9 +318,6 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
if (ret)
return ret;
}
-
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c
index a3a9c6f45b9..a2247867af8 100644
--- a/drivers/usb/host/ohci-xls.c
+++ b/drivers/usb/host/ohci-xls.c
@@ -40,7 +40,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver,
goto err1;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 0795b934d00..8ff6f7ea96f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -344,6 +344,12 @@ typedef struct urb_priv {
* a subset of what the full implementation needs. (Linus)
*/
+enum ohci_rh_state {
+ OHCI_RH_HALTED,
+ OHCI_RH_SUSPENDED,
+ OHCI_RH_RUNNING
+};
+
struct ohci_hcd {
spinlock_t lock;
@@ -384,6 +390,7 @@ struct ohci_hcd {
/*
* driver state
*/
+ enum ohci_rh_state rh_state;
int num_ports;
int load [NUM_INTS];
u32 hc_control; /* copy of hc control reg */
@@ -679,11 +686,6 @@ static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
/*-------------------------------------------------------------------------*/
-static inline void disable (struct ohci_hcd *ohci)
-{
- ohci_to_hcd(ohci)->state = HC_STATE_HALT;
-}
-
#define FI 0x2edf /* 12000 bits per frame (-1) */
#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
#define FIT (1 << 31)
@@ -707,7 +709,7 @@ static inline void periodic_reinit (struct ohci_hcd *ohci)
#define read_roothub(hc, register, mask) ({ \
u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
if (temp == -1) \
- disable (hc); \
+ hc->rh_state = OHCI_RH_HALTED; \
else if (hc->flags & OHCI_QUIRK_AMD756) \
while (temp & mask) \
temp = ohci_readl (hc, &hc->regs->roothub.register); \
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index dcd889803f0..6f62de5c6e3 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3951,24 +3951,7 @@ static struct platform_driver oxu_driver = {
}
};
-static int __init oxu_module_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&oxu_driver);
- if (retval < 0)
- return retval;
-
- return retval;
-}
-
-static void __exit oxu_module_cleanup(void)
-{
- platform_driver_unregister(&oxu_driver);
-}
-
-module_init(oxu_module_init);
-module_exit(oxu_module_cleanup);
+module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index f6ca80ee4ce..d2c6f5ac462 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e17542861..76083ae9213 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 430e88fd3f6..35e257f79c7 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -57,17 +57,15 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
- /* Ugh, these should be #defines, FIXME */
- /* Using table 11-13 in USB 2.0 spec. */
temp = 0;
- /* Bits 1:0 - support port power switching, or power always on */
+ /* Bits 1:0 - support per-port power switching, or power always on */
if (HCC_PPC(xhci->hcc_params))
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
else
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
/* Bit 2 - root hubs are not part of a compound device */
/* Bits 4:3 - individual port over current protection */
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
/* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */
desc->wHubCharacteristics = cpu_to_le16(temp);
@@ -86,9 +84,9 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb2_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x29;
+ desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
/* The Device Removable bits are reported on a byte granularity.
* If the port doesn't exist within that byte, the bit is set to 0.
@@ -137,8 +135,8 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb3_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x2a;
- desc->bDescLength = 12;
+ desc->bDescriptorType = USB_DT_SS_HUB;
+ desc->bDescLength = USB_DT_SS_HUB_SIZE;
/* header decode latency should be zero for roothubs,
* see section 4.23.5.2.
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e4b25fa3bc..36cbe2226a4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -42,15 +42,12 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
- xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
- xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)dma);
memset(seg->trbs, 0, SEGMENT_SIZE);
seg->dma = dma;
@@ -62,12 +59,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
- xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)seg->dma);
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
- xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
kfree(seg);
}
@@ -101,9 +95,6 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
- xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
- (unsigned long long)prev->dma,
- (unsigned long long)next->dma);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -117,7 +108,6 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->first_seg) {
first_seg = ring->first_seg;
seg = first_seg->next;
- xhci_dbg(xhci, "Freeing ring at %p\n", ring);
while (seg != first_seg) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
@@ -160,7 +150,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
struct xhci_segment *prev;
ring = kzalloc(sizeof *(ring), flags);
- xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
return NULL;
@@ -191,9 +180,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
cpu_to_le32(LINK_TOGGLE);
- xhci_dbg(xhci, "Wrote link toggle flag to"
- " segment %p (virtual), 0x%llx (DMA)\n",
- prev, (unsigned long long)prev->dma);
}
xhci_initialize_ring_info(ring);
return ring;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9f1d4b15d81..b90e1386418 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -155,10 +155,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
@@ -231,10 +227,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
}
ring->enq_seg = ring->enq_seg->next;
@@ -560,12 +552,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
- "in seg %p (0x%llx dma)\n",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
@@ -705,9 +694,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
- xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
- cur_td->first_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
@@ -1627,7 +1616,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
@@ -1643,7 +1631,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
break;
case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
@@ -1946,6 +1933,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1959,6 +1956,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1985,7 +1992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
- xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
@@ -1995,11 +2002,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
- xhci_warn(xhci, "WARN: babble error on endpoint\n");
+ xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
@@ -2390,17 +2397,7 @@ hw_died:
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
- irqreturn_t ret;
- struct xhci_hcd *xhci;
-
- xhci = hcd_to_xhci(hcd);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (xhci->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
-
- ret = xhci_irq(hcd);
-
- return ret;
+ return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
@@ -2488,11 +2485,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt()) {
- xhci_dbg(xhci, "queue_trb: Toggle cycle "
- "state for ring %p = %i\n",
- ring, (unsigned int)ring->cycle_state);
- }
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
@@ -2561,13 +2553,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
struct scatterlist *sg;
sg = NULL;
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
- xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
- unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
@@ -2582,22 +2572,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
- xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
- i, (unsigned long long)sg_dma_address(sg),
- len, len, num_trbs - previous_total_trbs);
-
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
- xhci_dbg(xhci, "\n");
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
- "num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- num_trbs);
return num_trbs;
}
@@ -2745,7 +2724,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
@@ -2783,8 +2762,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
- xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
- trb_buff_len);
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
@@ -2816,11 +2793,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
- xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
- "64KB boundary at %#x, end dma = %#x\n",
- (unsigned int) addr, trb_buff_len, trb_buff_len,
- (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
- (unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
@@ -2926,15 +2898,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
- "addr = %#llx, num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_trbs);
-
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
@@ -3055,9 +3018,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
- if (!in_interrupt())
- xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
- slot_id, ep_index);
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -3249,15 +3209,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
}
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
- " addr = %#llx, num_tds = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_tds);
-
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aa94c019579..6bbe3c3a711 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -200,14 +200,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_err(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg(xhci, "failed to allocate MSI entry\n");
return ret;
}
ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_err(xhci, "disable MSI interrupt\n");
+ xhci_dbg(xhci, "disable MSI interrupt\n");
pci_disable_msi(pdev);
}
@@ -270,7 +270,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_err(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg(xhci, "Failed to enable MSI-X\n");
goto free_entries;
}
@@ -286,7 +286,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
return ret;
disable_msix:
- xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg(xhci, "disable MSI-X interrupt\n");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
@@ -1330,9 +1333,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -1341,12 +1341,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
urb_priv = urb->hcpriv;
-
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ i = urb_priv->td_cnt;
+ if (i < urb_priv->length)
+ xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx\n",
+ urb, urb->dev->devpath,
+ urb->ep->desc.bEndpointAddress,
+ (unsigned long long) xhci_trb_virt_to_dma(
+ urb_priv->td[i]->start_seg,
+ urb_priv->td[i]->first_trb));
+
+ for (; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
}
@@ -1617,6 +1623,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
+ case COMP_2ND_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
@@ -2793,8 +2800,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
if (ret < 0)
return ret;
- max_streams = USB_SS_MAX_STREAMS(
- eps[i]->ss_ep_comp.bmAttributes);
+ max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c8fbd2772e..fb99c837914 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1033,7 +1033,6 @@ struct xhci_transfer_event {
/* Invalid Stream ID Error */
#define COMP_STRID_ERR 34
/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
-/* FIXME - check for this */
#define COMP_2ND_BW_ERR 35
/* Split Transaction Error */
#define COMP_SPLIT_ERR 36
@@ -1356,7 +1355,7 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
return 1;
}
-/* There is one ehci_hci structure per controller */
+/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
struct usb_hcd *main_hcd;
struct usb_hcd *shared_hcd;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 27e209a7222..9c0f8caba3b 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -809,19 +809,7 @@ static void mts_usb_disconnect (struct usb_interface *intf)
kfree(desc);
}
-
-static int __init microtek_drv_init(void)
-{
- return usb_register(&mts_usb_driver);
-}
-
-static void __exit microtek_drv_exit(void)
-{
- usb_deregister(&mts_usb_driver);
-}
-
-module_init(microtek_drv_init);
-module_exit(microtek_drv_exit);
+module_usb_driver(mts_usb_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index fe858711651..284b8546141 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -885,40 +885,7 @@ static struct usb_driver adu_driver = {
.id_table = device_table,
};
-static int __init adu_init(void)
-{
- int result;
-
- dbg(2," %s : enter", __func__);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&adu_driver);
- if (result < 0) {
- printk(KERN_ERR "usb_register failed for the "__FILE__
- " driver. Error number %d\n", result);
- goto exit;
- }
-
- printk(KERN_INFO "adutux " DRIVER_DESC " " DRIVER_VERSION "\n");
- printk(KERN_INFO "adutux is an experimental driver. "
- "Use at your own risk\n");
-
-exit:
- dbg(2," %s : leave, return value %d", __func__, result);
-
- return result;
-}
-
-static void __exit adu_exit(void)
-{
- dbg(2," %s : enter", __func__);
- /* deregister this driver with the USB subsystem */
- usb_deregister(&adu_driver);
- dbg(2," %s : leave", __func__);
-}
-
-module_init(adu_init);
-module_exit(adu_exit);
+module_usb_driver(adu_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 9251773ecef..3f7c1a92579 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -271,27 +271,7 @@ static struct usb_driver cypress_driver = {
.id_table = cypress_table,
};
-static int __init cypress_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&cypress_driver);
- if (result)
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
- "Error number: %d\n", result);
-
- return result;
-}
-
-static void __exit cypress_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 1d7251bc1b5..5b9831b95d9 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -417,31 +417,7 @@ static void cytherm_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "Cypress thermometer now disconnected\n");
}
-
-static int __init usb_cytherm_init(void)
-{
- int result;
-
- result = usb_register(&cytherm_driver);
- if (result) {
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
- "Error number: %d\n", result);
- return result;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return 0;
-}
-
-static void __exit usb_cytherm_exit(void)
-{
- usb_deregister(&cytherm_driver);
-}
-
-
-module_init (usb_cytherm_init);
-module_exit (usb_cytherm_exit);
+module_usb_driver(cytherm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index a6521c95f68..d9b6a035544 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -276,18 +276,7 @@ static struct usb_driver emi26_driver = {
.id_table = id_table,
};
-static int __init emi26_init (void)
-{
- return usb_register(&emi26_driver);
-}
-
-static void __exit emi26_exit (void)
-{
- usb_deregister (&emi26_driver);
-}
-
-module_init(emi26_init);
-module_exit(emi26_exit);
+module_usb_driver(emi26_driver);
MODULE_AUTHOR("Tapio LaxstrĂśm");
MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader.");
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index fc15ad4c313..9f39062ebb0 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -290,22 +290,7 @@ static struct usb_driver emi62_driver = {
.id_table = id_table,
};
-static int __init emi62_init (void)
-{
- int retval;
- retval = usb_register (&emi62_driver);
- if (retval)
- printk(KERN_ERR "adi-emi: registration failed\n");
- return retval;
-}
-
-static void __exit emi62_exit (void)
-{
- usb_deregister (&emi62_driver);
-}
-
-module_init(emi62_init);
-module_exit(emi62_exit);
+module_usb_driver(emi62_driver);
MODULE_AUTHOR("Tapio LaxstrĂśm");
MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader.");
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 515b67fffab..0dee2469850 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -428,29 +428,7 @@ static void idmouse_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "disconnected\n");
}
-static int __init usb_idmouse_init(void)
-{
- int result;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&idmouse_driver);
- if (result)
- err("Unable to register device (error %d).", result);
-
- return result;
-}
-
-static void __exit usb_idmouse_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&idmouse_driver);
-}
-
-module_init(usb_idmouse_init);
-module_exit(usb_idmouse_exit);
+module_usb_driver(idmouse_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 81457904d6b..2453a39b479 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -734,7 +734,7 @@ static const struct file_operations iowarrior_fops = {
.llseek = noop_llseek,
};
-static char *iowarrior_devnode(struct device *dev, mode_t *mode)
+static char *iowarrior_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -927,15 +927,4 @@ static struct usb_driver iowarrior_driver = {
.id_table = iowarrior_ids,
};
-static int __init iowarrior_init(void)
-{
- return usb_register(&iowarrior_driver);
-}
-
-static void __exit iowarrior_exit(void)
-{
- usb_deregister(&iowarrior_driver);
-}
-
-module_init(iowarrior_init);
-module_exit(iowarrior_exit);
+module_usb_driver(iowarrior_driver);
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index fe1d44319d0..1c61830e96f 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
ptr = firmware->data;
+ buf[0] = 0x01;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR
"Failed to initialise isight firmware loader\n");
@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
}
}
+ buf[0] = 0x00;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR "isight firmware loading completion failed\n");
ret = -ENODEV;
@@ -126,18 +128,7 @@ static struct usb_driver isight_firmware_driver = {
.id_table = id_table,
};
-static int __init isight_firmware_init(void)
-{
- return usb_register(&isight_firmware_driver);
-}
-
-static void __exit isight_firmware_exit(void)
-{
- usb_deregister(&isight_firmware_driver);
-}
-
-module_init(isight_firmware_init);
-module_exit(isight_firmware_exit);
+module_usb_driver(isight_firmware_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 48c166f0d76..5db4ab52061 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -821,30 +821,5 @@ static struct usb_driver ld_usb_driver = {
.id_table = ld_usb_table,
};
-/**
- * ld_usb_init
- */
-static int __init ld_usb_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&ld_usb_driver);
- if (retval)
- err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
-
- return retval;
-}
-
-/**
- * ld_usb_exit
- */
-static void __exit ld_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&ld_usb_driver);
-}
-
-module_init(ld_usb_init);
-module_exit(ld_usb_exit);
+module_usb_driver(ld_usb_driver);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index a989356f693..57522204276 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -269,7 +269,7 @@ static const struct file_operations tower_fops = {
.llseek = tower_llseek,
};
-static char *legousbtower_devnode(struct device *dev, mode_t *mode)
+static char *legousbtower_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -1043,51 +1043,7 @@ static void tower_disconnect (struct usb_interface *interface)
dbg(2, "%s: leave", __func__);
}
-
-
-/**
- * lego_usb_tower_init
- */
-static int __init lego_usb_tower_init(void)
-{
- int result;
- int retval = 0;
-
- dbg(2, "%s: enter", __func__);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&tower_driver);
- if (result < 0) {
- err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
- retval = -1;
- goto exit;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
-exit:
- dbg(2, "%s: leave, return value %d", __func__, retval);
-
- return retval;
-}
-
-
-/**
- * lego_usb_tower_exit
- */
-static void __exit lego_usb_tower_exit(void)
-{
- dbg(2, "%s: enter", __func__);
-
- /* deregister this driver with the USB subsystem */
- usb_deregister (&tower_driver);
-
- dbg(2, "%s: leave", __func__);
-}
-
-module_init (lego_usb_tower_init);
-module_exit (lego_usb_tower_exit);
+module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 4e23d3841b4..487a8ce0775 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -531,33 +531,7 @@ static struct usb_driver rio_driver = {
.id_table = rio_table,
};
-static int __init usb_rio_init(void)
-{
- int retval;
- retval = usb_register(&rio_driver);
- if (retval)
- goto out;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
-out:
- return retval;
-}
-
-
-static void __exit usb_rio_cleanup(void)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- rio->present = 0;
- usb_deregister(&rio_driver);
-
-
-}
-
-module_init(usb_rio_init);
-module_exit(usb_rio_cleanup);
+module_usb_driver(rio_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index f63776a48e2..741efed4a23 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -137,26 +137,7 @@ static struct usb_driver tv_driver = {
.id_table = id_table,
};
-static int __init tv_init(void)
-{
- int retval = usb_register(&tv_driver);
- if (retval) {
- err("usb_register failed. Error number %d", retval);
- return retval;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return 0;
-}
-
-static void __exit tv_exit(void)
-{
- usb_deregister(&tv_driver);
-}
-
-module_init (tv_init);
-module_exit (tv_exit);
+module_usb_driver(tv_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 1871cdf10da..e2b4bd31c2b 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -450,25 +450,7 @@ static struct usb_driver lcd_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_lcd_init(void)
-{
- int result;
-
- result = usb_register(&lcd_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-
-static void __exit usb_lcd_exit(void)
-{
- usb_deregister(&lcd_driver);
-}
-
-module_init(usb_lcd_init);
-module_exit(usb_lcd_exit);
+module_usb_driver(lcd_driver);
MODULE_AUTHOR("Georges Toth <g.toth@e-biz.lu>");
MODULE_DESCRIPTION(DRIVER_VERSION);
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 43f84e50d51..12d03e7ad63 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -31,6 +31,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
{ USB_DEVICE(0x1d34, 0x0004),
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ { USB_DEVICE(0x1d34, 0x000a),
+ .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -231,23 +233,7 @@ static struct usb_driver led_driver = {
.id_table = id_table,
};
-static int __init usb_led_init(void)
-{
- int retval = 0;
-
- retval = usb_register(&led_driver);
- if (retval)
- err("usb_register failed. Error number %d", retval);
- return retval;
-}
-
-static void __exit usb_led_exit(void)
-{
- usb_deregister(&led_driver);
-}
-
-module_init(usb_led_init);
-module_exit(usb_led_exit);
+module_usb_driver(led_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 417b8f207e8..107bf13b1cf 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -437,23 +437,7 @@ static struct usb_driver sevseg_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_sevseg_init(void)
-{
- int rc = 0;
-
- rc = usb_register(&sevseg_driver);
- if (rc)
- err("usb_register failed. Error number %d", rc);
- return rc;
-}
-
-static void __exit usb_sevseg_exit(void)
-{
- usb_deregister(&sevseg_driver);
-}
-
-module_init(usb_sevseg_init);
-module_exit(usb_sevseg_exit);
+module_usb_driver(sevseg_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index bd6d00802ea..959145baf3c 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1765,7 +1765,6 @@ static int test_unaligned_bulk(
* off just killing the userspace task and waiting for it to exit.
*/
-/* No BKL needed */
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index ac5bfd619e6..897edda4227 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -539,26 +539,6 @@ static const struct file_operations yurex_fops = {
.llseek = default_llseek,
};
-
-static int __init usb_yurex_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&yurex_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit usb_yurex_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&yurex_driver);
-}
-
-module_init(usb_yurex_init);
-module_exit(usb_yurex_exit);
+module_usb_driver(yurex_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07a03460a59..f70cab3beee 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -5,14 +5,13 @@
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
depends on USB && USB_GADGET
- depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
select USB_GADGET_DUALSPEED
- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -31,9 +30,10 @@ config USB_MUSB_HDRC
To compile this driver as a module, choose M here; the
module will be called "musb-hdrc".
+if USB_MUSB_HDRC
+
choice
prompt "Platform Glue Layer"
- depends on USB_MUSB_HDRC
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -45,7 +45,6 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
- depends on ARCH_OMAP
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
@@ -65,46 +64,54 @@ config USB_MUSB_UX500
endchoice
-config MUSB_PIO_ONLY
- bool 'Disable DMA (always use PIO)'
- depends on USB_MUSB_HDRC
- default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
+choice
+ prompt 'MUSB DMA mode'
+ default USB_UX500_DMA if USB_MUSB_UX500
+ default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
+ default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
+ default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
help
- All data is copied between memory and FIFO by the CPU.
- DMA controllers are ignored.
-
- Do not select 'n' here unless DMA support for your SOC or board
- is unavailable (or unstable). When DMA is enabled at compile time,
- you can still disable it at run time using the "use_dma=n" module
- parameter.
+ Unfortunately, only one option can be enabled here. Ideally one
+ should be able to build all these drivers into one kernel to
+ allow using DMA on multiplatform kernels.
config USB_UX500_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_UX500
+ bool 'ST Ericsson U8500 and U5500'
+ depends on USB_MUSB_UX500
help
Enable DMA transfers on UX500 platforms.
config USB_INVENTRA_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ bool 'Inventra'
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
help
Enable DMA transfers using Mentor's engine.
config USB_TI_CPPI_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_DAVINCI
+ bool 'TI CPPI (Davinci)'
+ depends on USB_MUSB_DAVINCI
help
Enable DMA transfers when TI CPPI DMA is available.
config USB_TUSB_OMAP_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ bool 'TUSB 6010'
depends on USB_MUSB_TUSB6010
depends on ARCH_OMAP
- default y
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+endchoice
+
+endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index d8fd9d092de..88bfb9dee4b 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,25 +24,7 @@ obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
# PIO only, or DMA (several potential schemes).
# though PIO is always there to back up DMA, and for ep0
-ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
-
- ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
- musb_hdrc-y += musbhsdma.o
-
- else
- ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
- musb_hdrc-y += cppi_dma.o
-
- else
- ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
- musb_hdrc-y += tusb6010_omap.o
-
- else
- ifeq ($(CONFIG_USB_UX500_DMA),y)
- musb_hdrc-y += ux500_dma.o
-
- endif
- endif
- endif
- endif
-endif
+musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
+musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
+musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 318fb4e8a88..53be7aef630 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -513,7 +513,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
- /* flush writebufer */
+ /* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c1fa12ec7a9..f6ff7923048 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -661,7 +661,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
musb->is_active = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
musb->ep0_stage = MUSB_EP0_START;
@@ -1432,7 +1431,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
struct musb_hw_ep *hw_ep = musb->endpoints + i;
hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync_va =
@@ -1631,6 +1630,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
}
}
}
+EXPORT_SYMBOL_GPL(musb_dma_completion);
#else
#define use_dma 0
@@ -2012,8 +2012,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
- pm_runtime_put(musb->controller);
-
status = musb_init_debugfs(musb);
if (status < 0)
goto fail4;
@@ -2158,6 +2156,7 @@ static void musb_save_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
musb->context.index_regs[i].txcsr =
@@ -2233,6 +2232,7 @@ static void musb_restore_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
musb_writew(epio, MUSB_TXCSR,
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b3c065ab9db..3d28fb8a2dc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,7 +40,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/timer.h>
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -311,6 +310,7 @@ struct musb_context_registers {
u8 index, testmode;
u8 devctl, busctl, misc;
+ u32 otg_interfsel;
struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
};
@@ -327,6 +327,7 @@ struct musb {
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct work_struct otg_notifier_work;
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -372,6 +373,7 @@ struct musb {
u16 int_tx;
struct otg_transceiver *xceiv;
+ u8 xceiv_event;
int nIrq;
unsigned irq_wake:1;
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 742eada5002..27ba8f79946 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -43,8 +43,8 @@
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
#ifdef CONFIG_DEBUG_FS
-extern int musb_init_debugfs(struct musb *musb);
-extern void musb_exit_debugfs(struct musb *musb);
+int musb_init_debugfs(struct musb *musb);
+void musb_exit_debugfs(struct musb *musb);
#else
static inline int musb_init_debugfs(struct musb *musb)
{
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 61f4ee466df..13d9af9bf92 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -33,11 +33,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -46,10 +42,6 @@
#include "musb_core.h"
#include "musb_debug.h"
-#ifdef CONFIG_ARCH_DAVINCI
-#include "davinci.h"
-#endif
-
struct musb_register_map {
char *name;
unsigned offset;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d51043acfe1..ac3d2eec20f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -40,8 +40,6 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/stat.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -1844,7 +1842,7 @@ int __init musb_gadget_setup(struct musb *musb)
*/
musb->g.ops = &musb_gadget_operations;
- musb->g.is_dualspeed = 1;
+ musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
/* this "gadget" abstracts/virtualizes the controller */
@@ -1903,7 +1901,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->max_speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 6a0d0467ec7..e40d7647caf 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -37,7 +37,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8066e..79cb0af779f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
+ else if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 03c6ccdbb3b..e61aa95f2d2 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -74,7 +74,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
{ __raw_writel(data, addr + offset); }
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ba85f273e48..c27bbbf32b5 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -228,21 +227,25 @@ static int musb_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
struct musb *musb = container_of(nb, struct musb, nb);
+
+ musb->xceiv_event = event;
+ schedule_work(&musb->otg_notifier_work);
+
+ return 0;
+}
+
+static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
+{
+ struct musb *musb = container_of(data_notifier_work, struct musb, otg_notifier_work);
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *pdata = dev->platform_data;
struct omap_musb_board_data *data = pdata->board_data;
- switch (event) {
+ switch (musb->xceiv_event) {
case USB_EVENT_ID:
dev_dbg(musb->controller, "ID GND\n");
- if (is_otg_enabled(musb)) {
- if (musb->gadget_driver) {
- pm_runtime_get_sync(musb->controller);
- otg_init(musb->xceiv);
- omap2430_musb_set_vbus(musb, 1);
- }
- } else {
+ if (!is_otg_enabled(musb) || musb->gadget_driver) {
pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
omap2430_musb_set_vbus(musb, 1);
@@ -274,10 +277,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
break;
default:
dev_dbg(musb->controller, "ID float\n");
- return NOTIFY_DONE;
}
-
- return NOTIFY_OK;
}
static int omap2430_musb_init(struct musb *musb)
@@ -297,6 +297,8 @@ static int omap2430_musb_init(struct musb *musb)
return -ENODEV;
}
+ INIT_WORK(&musb->otg_notifier_work, musb_otg_notifier_work);
+
status = pm_runtime_get_sync(dev);
if (status < 0) {
dev_err(dev, "pm_runtime_get_sync FAILED");
@@ -334,7 +336,6 @@ static int omap2430_musb_init(struct musb *musb)
return 0;
err1:
- pm_runtime_disable(dev);
return status;
}
@@ -350,20 +351,19 @@ static void omap2430_musb_enable(struct musb *musb)
case USB_EVENT_ID:
otg_init(musb->xceiv);
- if (data->interface_type == MUSB_INTERFACE_UTMI) {
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
+ if (data->interface_type != MUSB_INTERFACE_UTMI)
+ break;
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "configured as A device timeout");
+ break;
}
}
break;
@@ -478,7 +478,6 @@ static int __exit omap2430_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
kfree(glue);
return 0;
@@ -491,6 +490,9 @@ static int omap2430_runtime_suspend(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
omap2430_low_level_exit(musb);
otg_set_suspend(musb->xceiv, 1);
@@ -503,6 +505,9 @@ static int omap2430_runtime_resume(struct device *dev)
struct musb *musb = glue_to_musb(glue);
omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+
otg_set_suspend(musb->xceiv, 0);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ec1480191f7..1f405616e6c 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -56,6 +56,7 @@ u8 tusb_get_revision(struct musb *musb)
return rev;
}
+EXPORT_SYMBOL_GPL(tusb_get_revision);
static int tusb_print_revision(struct musb *musb)
{
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index ef4333f4bbe..a163632877a 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -37,7 +37,6 @@ struct ux500_dma_channel {
struct dma_channel channel;
struct ux500_dma_controller *controller;
struct musb_hw_ep *hw_ep;
- struct work_struct channel_work;
struct dma_chan *dma_chan;
unsigned int cur_len;
dma_cookie_t cookie;
@@ -56,31 +55,11 @@ struct ux500_dma_controller {
dma_addr_t phy_base;
};
-/* Work function invoked from DMA callback to handle tx transfers. */
-static void ux500_tx_work(struct work_struct *data)
-{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
- struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
- struct musb *musb = hw_ep->musb;
- unsigned long flags;
-
- dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
- hw_ep->epnum);
-
- spin_lock_irqsave(&musb->lock, flags);
- ux500_channel->channel.actual_len = ux500_channel->cur_len;
- ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
- musb_dma_completion(musb, hw_ep->epnum,
- ux500_channel->is_tx);
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
/* Work function invoked from DMA callback to handle rx transfers. */
-static void ux500_rx_work(struct work_struct *data)
+void ux500_dma_callback(void *private_data)
{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
+ struct dma_channel *channel = private_data;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct musb *musb = hw_ep->musb;
unsigned long flags;
@@ -94,14 +73,7 @@ static void ux500_rx_work(struct work_struct *data)
musb_dma_completion(musb, hw_ep->epnum,
ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-void ux500_dma_callback(void *private_data)
-{
- struct dma_channel *channel = (struct dma_channel *)private_data;
- struct ux500_dma_channel *ux500_channel = channel->private_data;
- schedule_work(&ux500_channel->channel_work);
}
static bool ux500_configure_channel(struct dma_channel *channel,
@@ -330,7 +302,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
void **param_array;
struct ux500_dma_channel *channel_array;
u32 ch_count;
- void (*musb_channel_work)(struct work_struct *);
dma_cap_mask_t mask;
if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
@@ -347,7 +318,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
channel_array = controller->rx_channel;
ch_count = data->num_rx_channels;
param_array = data->dma_rx_param_array;
- musb_channel_work = ux500_rx_work;
for (dir = 0; dir < 2; dir++) {
for (ch_num = 0; ch_num < ch_count; ch_num++) {
@@ -374,15 +344,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return -EBUSY;
}
- INIT_WORK(&ux500_channel->channel_work,
- musb_channel_work);
}
/* Prepare the loop for TX channels */
channel_array = controller->tx_channel;
ch_count = data->num_tx_channels;
param_array = data->dma_tx_param_array;
- musb_channel_work = ux500_tx_work;
is_tx = 1;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481ad98d..2a25955881f 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -82,9 +82,9 @@ config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
select USB_OTG_UTILS
help
- this driver is to be used by all the usb transceiver which are either
- built-in with usb ip or which are autonomous and doesn't require any
- phy programming such as ISP1x04 etc.
+ This driver is to be used by all the usb transceiver which are either
+ built-in with usb ip or which are autonomous and doesn't require any
+ phy programming such as ISP1x04 etc.
config USB_LANGWELL_OTG
tristate "Intel Langwell USB OTG dual-role support"
@@ -114,13 +114,13 @@ config USB_MSM_OTG
has an external PHY.
config AB8500_USB
- tristate "AB8500 USB Transceiver Driver"
- depends on AB8500_CORE
- select USB_OTG_UTILS
- help
- Enable this to support the USB OTG transceiver in AB8500 chip.
- This transceiver supports high and full speed devices plus,
- in host mode, low speed.
+ tristate "AB8500 USB Transceiver Driver"
+ depends on AB8500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB8500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
@@ -130,4 +130,16 @@ config FSL_USB2_OTG
help
Enable this to support Freescale USB OTG transceiver.
+config USB_MV_OTG
+ tristate "Marvell USB OTG support"
+ depends on USB_MV_UDC
+ select USB_OTG
+ select USB_OTG_UTILS
+ help
+ Say Y here if you want to build Marvell USB OTG transciever
+ driver in kernel (including PXA and MMP series). This driver
+ implements role switch between EHCI host driver and gadget driver.
+
+ To compile this driver as a module, choose M here.
+
endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c5333..b2c5a959863 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
+obj-$(CONFIG_USB_MV_OTG) += mv_otg.o
diff --git a/drivers/usb/otg/fsl_otg.c b/drivers/usb/otg/fsl_otg.c
index 0f420b25e9a..a190850d2d3 100644
--- a/drivers/usb/otg/fsl_otg.c
+++ b/drivers/usb/otg/fsl_otg.c
@@ -639,7 +639,7 @@ static int fsl_otg_set_power(struct otg_transceiver *otg_p, unsigned mA)
* Delayed pin detect interrupt processing.
*
* When the Mini-A cable is disconnected from the board,
- * the pin-detect interrupt happens before the disconnnect
+ * the pin-detect interrupt happens before the disconnect
* interrupts for the connected device(s). In order to
* process the disconnect interrupt(s) prior to switching
* roles, the pin-detect interrupts are delayed, and handled
@@ -1151,18 +1151,7 @@ struct platform_driver fsl_otg_driver = {
},
};
-static int __init fsl_usb_otg_init(void)
-{
- pr_info(DRIVER_INFO "\n");
- return platform_driver_register(&fsl_otg_driver);
-}
-module_init(fsl_usb_otg_init);
-
-static void __exit fsl_usb_otg_exit(void)
-{
- platform_driver_unregister(&fsl_otg_driver);
-}
-module_exit(fsl_usb_otg_exit);
+module_platform_driver(fsl_otg_driver);
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c
new file mode 100644
index 00000000000..db0d4fcdc8e
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/mv_usb.h>
+
+#include "mv_otg.h"
+
+#define DRIVER_DESC "Marvell USB OTG transceiver driver"
+#define DRIVER_VERSION "Jan 20, 2010"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "mv-otg";
+
+static char *state_string[] = {
+ "undefined",
+ "b_idle",
+ "b_srp_init",
+ "b_peripheral",
+ "b_wait_acon",
+ "b_host",
+ "a_idle",
+ "a_wait_vrise",
+ "a_wait_bcon",
+ "a_host",
+ "a_suspend",
+ "a_peripheral",
+ "a_wait_vfall",
+ "a_vbus_err"
+};
+
+static int mv_otg_set_vbus(struct otg_transceiver *otg, bool on)
+{
+ struct mv_otg *mvotg = container_of(otg, struct mv_otg, otg);
+ if (mvotg->pdata->set_vbus == NULL)
+ return -ENODEV;
+
+ return mvotg->pdata->set_vbus(on);
+}
+
+static int mv_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ otg->host = host;
+
+ return 0;
+}
+
+static int mv_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ otg->gadget = gadget;
+
+ return 0;
+}
+
+static void mv_otg_run_state_machine(struct mv_otg *mvotg,
+ unsigned long delay)
+{
+ dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
+ if (!mvotg->qwork)
+ return;
+
+ queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
+}
+
+static void mv_otg_timer_await_bcon(unsigned long data)
+{
+ struct mv_otg *mvotg = (struct mv_otg *) data;
+
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
+
+ dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+}
+
+static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+
+ if (timer_pending(timer))
+ del_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
+ unsigned long interval,
+ void (*callback) (unsigned long))
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+ if (timer_pending(timer)) {
+ dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
+ return -EBUSY;
+ }
+
+ init_timer(timer);
+ timer->data = (unsigned long) mvotg;
+ timer->function = callback;
+ timer->expires = jiffies + interval;
+ add_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_reset(struct mv_otg *mvotg)
+{
+ unsigned int loops;
+ u32 tmp;
+
+ /* Stop the controller */
+ tmp = readl(&mvotg->op_regs->usbcmd);
+ tmp &= ~USBCMD_RUN_STOP;
+ writel(tmp, &mvotg->op_regs->usbcmd);
+
+ /* Reset the controller to get default values */
+ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
+
+ loops = 500;
+ while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+ if (loops == 0) {
+ dev_err(&mvotg->pdev->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return -ETIMEDOUT;
+ }
+ loops--;
+ udelay(20);
+ }
+
+ writel(0x0, &mvotg->op_regs->usbintr);
+ tmp = readl(&mvotg->op_regs->usbsts);
+ writel(tmp, &mvotg->op_regs->usbsts);
+
+ return 0;
+}
+
+static void mv_otg_init_irq(struct mv_otg *mvotg)
+{
+ u32 otgsc;
+
+ mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
+ | OTGSC_INTR_A_VBUS_VALID;
+ mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
+ | OTGSC_INTSTS_A_VBUS_VALID;
+
+ if (mvotg->pdata->vbus == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
+ | OTGSC_INTR_B_SESSION_END;
+ mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
+ | OTGSC_INTSTS_B_SESSION_END;
+ }
+
+ if (mvotg->pdata->id == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_USB_ID;
+ mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
+ }
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+}
+
+static void mv_otg_start_host(struct mv_otg *mvotg, int on)
+{
+ struct otg_transceiver *otg = &mvotg->otg;
+ struct usb_hcd *hcd;
+
+ if (!otg->host)
+ return;
+
+ dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
+
+ hcd = bus_to_hcd(otg->host);
+
+ if (on)
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ else
+ usb_remove_hcd(hcd);
+}
+
+static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
+{
+ struct otg_transceiver *otg = &mvotg->otg;
+
+ if (!otg->gadget)
+ return;
+
+ dev_info(otg->dev, "gadget %s\n", on ? "on" : "off");
+
+ if (on)
+ usb_gadget_vbus_connect(otg->gadget);
+ else
+ usb_gadget_vbus_disconnect(otg->gadget);
+}
+
+static void otg_clock_enable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_enable(mvotg->clk[i]);
+}
+
+static void otg_clock_disable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_disable(mvotg->clk[i]);
+}
+
+static int mv_otg_enable_internal(struct mv_otg *mvotg)
+{
+ int retval = 0;
+
+ if (mvotg->active)
+ return 0;
+
+ dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
+
+ otg_clock_enable(mvotg);
+ if (mvotg->pdata->phy_init) {
+ retval = mvotg->pdata->phy_init(mvotg->phy_regs);
+ if (retval) {
+ dev_err(&mvotg->pdev->dev,
+ "init phy error %d\n", retval);
+ otg_clock_disable(mvotg);
+ return retval;
+ }
+ }
+ mvotg->active = 1;
+
+ return 0;
+
+}
+
+static int mv_otg_enable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ return mv_otg_enable_internal(mvotg);
+
+ return 0;
+}
+
+static void mv_otg_disable_internal(struct mv_otg *mvotg)
+{
+ if (mvotg->active) {
+ dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
+ if (mvotg->pdata->phy_deinit)
+ mvotg->pdata->phy_deinit(mvotg->phy_regs);
+ otg_clock_disable(mvotg);
+ mvotg->active = 0;
+ }
+}
+
+static void mv_otg_disable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+}
+
+static void mv_otg_update_inputs(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+
+ if (mvotg->pdata->vbus) {
+ if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
+ otg_ctrl->b_sess_vld = 1;
+ otg_ctrl->b_sess_end = 0;
+ } else {
+ otg_ctrl->b_sess_vld = 0;
+ otg_ctrl->b_sess_end = 1;
+ }
+ } else {
+ otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
+ otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
+ }
+
+ if (mvotg->pdata->id)
+ otg_ctrl->id = !!mvotg->pdata->id->poll();
+ else
+ otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
+
+ if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
+ otg_ctrl->a_bus_req = 1;
+
+ otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
+ otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
+
+ dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
+ dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
+ dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
+ dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
+}
+
+static void mv_otg_update_state(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ struct otg_transceiver *otg = &mvotg->otg;
+ int old_state = otg->state;
+
+ switch (old_state) {
+ case OTG_STATE_UNDEFINED:
+ otg->state = OTG_STATE_B_IDLE;
+ /* FALL THROUGH */
+ case OTG_STATE_B_IDLE:
+ if (otg_ctrl->id == 0)
+ otg->state = OTG_STATE_A_IDLE;
+ else if (otg_ctrl->b_sess_vld)
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
+ otg->state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_A_IDLE:
+ if (otg_ctrl->id)
+ otg->state = OTG_STATE_B_IDLE;
+ else if (!(otg_ctrl->a_bus_drop) &&
+ (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
+ otg->state = OTG_STATE_A_WAIT_VRISE;
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ if (otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (otg_ctrl->id || otg_ctrl->a_bus_drop
+ || otg_ctrl->a_wait_bcon_timeout) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ otg_ctrl->a_bus_req = 0;
+ } else if (!otg_ctrl->a_vbus_vld) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ } else if (otg_ctrl->b_conn) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_HOST;
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ if (otg_ctrl->id || !otg_ctrl->b_conn
+ || otg_ctrl->a_bus_drop)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ else if (!otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (otg_ctrl->id
+ || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
+ || otg_ctrl->a_bus_req)
+ otg->state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ if (otg_ctrl->id || otg_ctrl->a_clr_err
+ || otg_ctrl->a_bus_drop) {
+ otg_ctrl->a_clr_err = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void mv_otg_work(struct work_struct *work)
+{
+ struct mv_otg *mvotg;
+ struct otg_transceiver *otg;
+ int old_state;
+
+ mvotg = container_of((struct delayed_work *)work, struct mv_otg, work);
+
+run:
+ /* work queue is single thread, or we need spin_lock to protect */
+ otg = &mvotg->otg;
+ old_state = otg->state;
+
+ if (!mvotg->active)
+ return;
+
+ mv_otg_update_inputs(mvotg);
+ mv_otg_update_state(mvotg);
+
+ if (old_state != otg->state) {
+ dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
+ state_string[old_state],
+ state_string[otg->state]);
+
+ switch (otg->state) {
+ case OTG_STATE_B_IDLE:
+ mvotg->otg.default_a = 0;
+ if (old_state == OTG_STATE_B_PERIPHERAL)
+ mv_otg_start_periphrals(mvotg, 0);
+ mv_otg_reset(mvotg);
+ mv_otg_disable(mvotg);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ mv_otg_enable(mvotg);
+ mv_otg_start_periphrals(mvotg, 1);
+ break;
+ case OTG_STATE_A_IDLE:
+ mvotg->otg.default_a = 1;
+ mv_otg_enable(mvotg);
+ if (old_state == OTG_STATE_A_WAIT_VFALL)
+ mv_otg_start_host(mvotg, 0);
+ mv_otg_reset(mvotg);
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ mv_otg_set_vbus(&mvotg->otg, 1);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (old_state != OTG_STATE_A_HOST)
+ mv_otg_start_host(mvotg, 1);
+ mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
+ T_A_WAIT_BCON,
+ mv_otg_timer_await_bcon);
+ /*
+ * Now, we directly enter A_HOST. So set b_conn = 1
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 1;
+ break;
+ case OTG_STATE_A_HOST:
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /*
+ * Now, we has exited A_HOST. So set b_conn = 0
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 0;
+ mv_otg_set_vbus(&mvotg->otg, 0);
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ break;
+ default:
+ break;
+ }
+ goto run;
+ }
+}
+
+static irqreturn_t mv_otg_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ /*
+ * if we have vbus, then the vbus detection for B-device
+ * will be done by mv_otg_inputs_irq().
+ */
+ if (mvotg->pdata->vbus)
+ if ((otgsc & OTGSC_STS_USB_ID) &&
+ !(otgsc & OTGSC_INTSTS_USB_ID))
+ return IRQ_NONE;
+
+ if ((otgsc & mvotg->irq_status) == 0)
+ return IRQ_NONE;
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+
+ /* The clock may disabled at this time */
+ if (!mvotg->active) {
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+ }
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_req);
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+
+ if (count > 2)
+ return -1;
+
+ /* We will use this interface to change to A device */
+ if (mvotg->otg.state != OTG_STATE_B_IDLE
+ && mvotg->otg.state != OTG_STATE_A_IDLE)
+ return -1;
+
+ /* The clock may disabled and we need to set irq for ID detected */
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_req = 1;
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_req = 1\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req,
+ set_a_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_clr_err = 1;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_clr_err = 1\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_drop);
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 0\n");
+ } else if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_drop = 1;
+ mvotg->otg_ctrl.a_bus_req = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 1\n");
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: and a_bus_req = 0\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR,
+ get_a_bus_drop, set_a_bus_drop);
+
+static struct attribute *inputs_attrs[] = {
+ &dev_attr_a_bus_req.attr,
+ &dev_attr_a_clr_err.attr,
+ &dev_attr_a_bus_drop.attr,
+ NULL,
+};
+
+static struct attribute_group inputs_attr_group = {
+ .name = "inputs",
+ .attrs = inputs_attrs,
+};
+
+int mv_otg_remove(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ int clk_i;
+
+ sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
+
+ if (mvotg->irq)
+ free_irq(mvotg->irq, mvotg);
+
+ if (mvotg->pdata->vbus)
+ free_irq(mvotg->pdata->vbus->irq, mvotg);
+ if (mvotg->pdata->id)
+ free_irq(mvotg->pdata->id->irq, mvotg);
+
+ if (mvotg->qwork) {
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+ }
+
+ mv_otg_disable(mvotg);
+
+ if (mvotg->cap_regs)
+ iounmap(mvotg->cap_regs);
+
+ if (mvotg->phy_regs)
+ iounmap(mvotg->phy_regs);
+
+ for (clk_i = 0; clk_i <= mvotg->clknum; clk_i++)
+ clk_put(mvotg->clk[clk_i]);
+
+ otg_set_transceiver(NULL);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_otg *mvotg;
+ struct resource *r;
+ int retval = 0, clk_i, i;
+ size_t size;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "failed to get platform data\n");
+ return -ENODEV;
+ }
+
+ size = sizeof(*mvotg) + sizeof(struct clk *) * pdata->clknum;
+ mvotg = kzalloc(size, GFP_KERNEL);
+ if (!mvotg) {
+ dev_err(&pdev->dev, "failed to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mvotg);
+
+ mvotg->pdev = pdev;
+ mvotg->pdata = pdata;
+
+ mvotg->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < mvotg->clknum; clk_i++) {
+ mvotg->clk[clk_i] = clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(mvotg->clk[clk_i])) {
+ retval = PTR_ERR(mvotg->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
+ if (!mvotg->qwork) {
+ dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
+ retval = -ENOMEM;
+ goto err_put_clk;
+ }
+
+ INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
+
+ /* OTG common part */
+ mvotg->pdev = pdev;
+ mvotg->otg.dev = &pdev->dev;
+ mvotg->otg.label = driver_name;
+ mvotg->otg.set_host = mv_otg_set_host;
+ mvotg->otg.set_peripheral = mv_otg_set_peripheral;
+ mvotg->otg.set_vbus = mv_otg_set_vbus;
+ mvotg->otg.state = OTG_STATE_UNDEFINED;
+
+ for (i = 0; i < OTG_TIMER_NUM; i++)
+ init_timer(&mvotg->otg_ctrl.timer[i]);
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_destroy_workqueue;
+ }
+
+ mvotg->phy_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->phy_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_destroy_workqueue;
+ }
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "capregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_unmap_phyreg;
+ }
+
+ mvotg->cap_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_unmap_phyreg;
+ }
+
+ /* we will acces controller register, so enable the udc controller */
+ retval = mv_otg_enable_internal(mvotg);
+ if (retval) {
+ dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
+ goto err_unmap_capreg;
+ }
+
+ mvotg->op_regs =
+ (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
+ + (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
+
+ if (pdata->id) {
+ retval = request_threaded_irq(pdata->id->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "id", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for ID\n");
+ pdata->id = NULL;
+ }
+ }
+
+ if (pdata->vbus) {
+ mvotg->clock_gating = 1;
+ retval = request_threaded_irq(pdata->vbus->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "vbus", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for VBUS, "
+ "disable clock gating\n");
+ mvotg->clock_gating = 0;
+ pdata->vbus = NULL;
+ }
+ }
+
+ if (pdata->disable_otg_clock_gating)
+ mvotg->clock_gating = 0;
+
+ mv_otg_reset(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ mvotg->irq = r->start;
+ if (request_irq(mvotg->irq, mv_otg_irq, IRQF_SHARED,
+ driver_name, mvotg)) {
+ dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
+ mvotg->irq);
+ mvotg->irq = 0;
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_transceiver(&mvotg->otg);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "can't register transceiver, %d\n",
+ retval);
+ goto err_free_irq;
+ }
+
+ retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
+ if (retval < 0) {
+ dev_dbg(&pdev->dev,
+ "Can't register sysfs attr group: %d\n", retval);
+ goto err_set_transceiver;
+ }
+
+ spin_lock_init(&mvotg->wq_lock);
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 2 * HZ);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ dev_info(&pdev->dev,
+ "successful probe OTG device %s clock gating.\n",
+ mvotg->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_set_transceiver:
+ otg_set_transceiver(NULL);
+err_free_irq:
+ free_irq(mvotg->irq, mvotg);
+err_disable_clk:
+ if (pdata->vbus)
+ free_irq(pdata->vbus->irq, mvotg);
+ if (pdata->id)
+ free_irq(pdata->id->irq, mvotg);
+ mv_otg_disable_internal(mvotg);
+err_unmap_capreg:
+ iounmap(mvotg->cap_regs);
+err_unmap_phyreg:
+ iounmap(mvotg->phy_regs);
+err_destroy_workqueue:
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(mvotg->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(mvotg);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+
+ if (mvotg->otg.state != OTG_STATE_B_IDLE) {
+ dev_info(&pdev->dev,
+ "OTG state is not B_IDLE, it is %d!\n",
+ mvotg->otg.state);
+ return -EAGAIN;
+ }
+
+ if (!mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_resume(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ u32 otgsc;
+
+ if (!mvotg->clock_gating) {
+ mv_otg_enable_internal(mvotg);
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+ return 0;
+}
+#endif
+
+static struct platform_driver mv_otg_driver = {
+ .probe = mv_otg_probe,
+ .remove = __exit_p(mv_otg_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = driver_name,
+ },
+#ifdef CONFIG_PM
+ .suspend = mv_otg_suspend,
+ .resume = mv_otg_resume,
+#endif
+};
+
+static int __init mv_otg_init(void)
+{
+ return platform_driver_register(&mv_otg_driver);
+}
+
+static void __exit mv_otg_exit(void)
+{
+ platform_driver_unregister(&mv_otg_driver);
+}
+
+module_init(mv_otg_init);
+module_exit(mv_otg_exit);
diff --git a/drivers/usb/otg/mv_otg.h b/drivers/usb/otg/mv_otg.h
new file mode 100644
index 00000000000..be6ca143764
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_USB_OTG_CONTROLLER__
+#define __MV_USB_OTG_CONTROLLER__
+
+#include <linux/types.h>
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP (0x00000001)
+#define USBCMD_CTRL_RESET (0x00000002)
+
+/* otgsc Register Bit Masks */
+#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
+#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
+#define OTGSC_CTRL_OTG_TERM 0x00000008
+#define OTGSC_CTRL_DATA_PULSING 0x00000010
+#define OTGSC_STS_USB_ID 0x00000100
+#define OTGSC_STS_A_VBUS_VALID 0x00000200
+#define OTGSC_STS_A_SESSION_VALID 0x00000400
+#define OTGSC_STS_B_SESSION_VALID 0x00000800
+#define OTGSC_STS_B_SESSION_END 0x00001000
+#define OTGSC_STS_1MS_TOGGLE 0x00002000
+#define OTGSC_STS_DATA_PULSING 0x00004000
+#define OTGSC_INTSTS_USB_ID 0x00010000
+#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
+#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
+#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
+#define OTGSC_INTSTS_B_SESSION_END 0x00100000
+#define OTGSC_INTSTS_1MS 0x00200000
+#define OTGSC_INTSTS_DATA_PULSING 0x00400000
+#define OTGSC_INTR_USB_ID 0x01000000
+#define OTGSC_INTR_A_VBUS_VALID 0x02000000
+#define OTGSC_INTR_A_SESSION_VALID 0x04000000
+#define OTGSC_INTR_B_SESSION_VALID 0x08000000
+#define OTGSC_INTR_B_SESSION_END 0x10000000
+#define OTGSC_INTR_1MS_TIMER 0x20000000
+#define OTGSC_INTR_DATA_PULSING 0x40000000
+
+#define CAPLENGTH_MASK (0xff)
+
+/* Timer's interval, unit 10ms */
+#define T_A_WAIT_VRISE 100
+#define T_A_WAIT_BCON 2000
+#define T_A_AIDL_BDIS 100
+#define T_A_BIDL_ADIS 20
+#define T_B_ASE0_BRST 400
+#define T_B_SE0_SRP 300
+#define T_B_SRP_FAIL 2000
+#define T_B_DATA_PLS 10
+#define T_B_SRP_INIT 100
+#define T_A_SRP_RSPNS 10
+#define T_A_DRV_RSM 5
+
+enum otg_function {
+ OTG_B_DEVICE = 0,
+ OTG_A_DEVICE
+};
+
+enum mv_otg_timer {
+ A_WAIT_BCON_TIMER = 0,
+ OTG_TIMER_NUM
+};
+
+/* PXA OTG state machine */
+struct mv_otg_ctrl {
+ /* internal variables */
+ u8 a_set_b_hnp_en; /* A-Device set b_hnp_en */
+ u8 b_srp_done;
+ u8 b_hnp_en;
+
+ /* OTG inputs */
+ u8 a_bus_drop;
+ u8 a_bus_req;
+ u8 a_clr_err;
+ u8 a_bus_resume;
+ u8 a_bus_suspend;
+ u8 a_conn;
+ u8 a_sess_vld;
+ u8 a_srp_det;
+ u8 a_vbus_vld;
+ u8 b_bus_req; /* B-Device Require Bus */
+ u8 b_bus_resume;
+ u8 b_bus_suspend;
+ u8 b_conn;
+ u8 b_se0_srp;
+ u8 b_sess_end;
+ u8 b_sess_vld;
+ u8 id;
+ u8 a_suspend_req;
+
+ /*Timer event */
+ u8 a_aidl_bdis_timeout;
+ u8 b_ase0_brst_timeout;
+ u8 a_bidl_adis_timeout;
+ u8 a_wait_bcon_timeout;
+
+ struct timer_list timer[OTG_TIMER_NUM];
+};
+
+#define VUSBHS_MAX_PORTS 8
+
+struct mv_otg_regs {
+ u32 usbcmd; /* Command register */
+ u32 usbsts; /* Status register */
+ u32 usbintr; /* Interrupt enable */
+ u32 frindex; /* Frame index */
+ u32 reserved1[1];
+ u32 deviceaddr; /* Device Address */
+ u32 eplistaddr; /* Endpoint List Address */
+ u32 ttctrl; /* HOST TT status and control */
+ u32 burstsize; /* Programmable Burst Size */
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
+ u32 reserved[4];
+ u32 epnak; /* Endpoint NAK */
+ u32 epnaken; /* Endpoint NAK Enable */
+ u32 configflag; /* Configured Flag register */
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+ u32 otgsc;
+ u32 usbmode; /* USB Host/Device mode */
+ u32 epsetupstat; /* Endpoint Setup Status */
+ u32 epprime; /* Endpoint Initialize */
+ u32 epflush; /* Endpoint De-initialize */
+ u32 epstatus; /* Endpoint Status */
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
+ u32 mcr; /* Mux Control */
+ u32 isr; /* Interrupt Status */
+ u32 ier; /* Interrupt Enable */
+};
+
+struct mv_otg {
+ struct otg_transceiver otg;
+ struct mv_otg_ctrl otg_ctrl;
+
+ /* base address */
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ struct mv_otg_regs __iomem *op_regs;
+
+ struct platform_device *pdev;
+ int irq;
+ u32 irq_status;
+ u32 irq_en;
+
+ struct delayed_work work;
+ struct workqueue_struct *qwork;
+
+ spinlock_t wq_lock;
+
+ struct mv_usb_platform_data *pdata;
+
+ unsigned int active;
+ unsigned int clock_gating;
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+#endif
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 08c679c0dde..e9a5b1d2615 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -95,25 +95,15 @@ struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev)
/*
* syscfg functions
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
+static void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, SCKE, enable ? SCKE : 0);
}
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, HSE, enable ? HSE : 0);
-}
-
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, USBE, enable ? USBE : 0);
-}
-
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DCFM | DRPD;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DCFM | DRPD | HSE | USBE;
int has_otg = usbhs_get_dparam(priv, has_otg);
if (has_otg)
@@ -130,8 +120,8 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DPRPU;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DPRPU | HSE | USBE;
/*
* if enable
@@ -142,6 +132,11 @@ void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode)
+{
+ usbhs_write(priv, TESTMODE, mode);
+}
+
/*
* frame functions
*/
@@ -229,7 +224,7 @@ static void usbhsc_bus_init(struct usbhs_priv *priv)
/*
* device configuration
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum,
u16 upphub, u16 hubport, u16 speed)
{
struct device *dev = usbhs_priv_to_dev(priv);
@@ -301,18 +296,25 @@ static u32 usbhsc_default_pipe_type[] = {
*/
static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
{
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct device *dev = usbhs_priv_to_dev(priv);
if (enable) {
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
} else {
/* USB off */
usbhs_sys_clock_ctrl(priv, enable);
+ /* disable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -388,7 +390,7 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
usbhsc_hotplug(priv);
}
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
+static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int delay = usbhs_get_dparam(priv, detection_delay);
@@ -398,7 +400,8 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
* To make sure safety context,
* use workqueue for usbhs_notify_hotplug
*/
- schedule_delayed_work(&priv->notify_hotplug_work, delay);
+ schedule_delayed_work(&priv->notify_hotplug_work,
+ msecs_to_jiffies(delay));
return 0;
}
@@ -637,18 +640,7 @@ static struct platform_driver renesas_usbhs_driver = {
.remove = __devexit_p(usbhs_remove),
};
-static int __init usbhs_init(void)
-{
- return platform_driver_register(&renesas_usbhs_driver);
-}
-
-static void __exit usbhs_exit(void)
-{
- platform_driver_unregister(&renesas_usbhs_driver);
-}
-
-module_init(usbhs_init);
-module_exit(usbhs_exit);
+module_platform_driver(renesas_usbhs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas USB driver");
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8729da5c3be..d79b3e27db9 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -33,6 +33,7 @@ struct usbhs_priv;
#define SYSCFG 0x0000
#define BUSWAIT 0x0002
#define DVSTCTR 0x0008
+#define TESTMODE 0x000C
#define CFIFO 0x0014
#define CFIFOSEL 0x0020
#define CFIFOCTR 0x0022
@@ -275,19 +276,15 @@ u16 usbhs_read(struct usbhs_priv *priv, u32 reg);
void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data);
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data);
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev);
-
#define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f)
#define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f)
/*
* sysconfig
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable);
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode);
/*
* usb request
@@ -311,7 +308,7 @@ int usbhs_frame_get_num(struct usbhs_priv *priv);
/*
* device config
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum, u16 upphub,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum, u16 upphub,
u16 hubport, u16 speed);
/*
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ffdf5d15085..b51fcd80d24 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -56,7 +56,7 @@ static struct usbhs_pkt_handle usbhsf_null_handler = {
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero)
+ void *buf, int len, int zero, int sequence)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
@@ -90,6 +90,7 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
pkt->zero = zero;
pkt->actual = 0;
pkt->done = done;
+ pkt->sequence = sequence;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
@@ -481,6 +482,9 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
int i, ret, len;
int is_short;
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
+
ret = usbhsf_fifo_select(pipe, fifo, 1);
if (ret < 0)
return 0;
@@ -584,6 +588,8 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
/*
* pipe enable to prepare packet receive
*/
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
usbhs_pipe_enable(pipe);
usbhsf_rx_irq_ctrl(pipe, 1);
@@ -641,6 +647,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
* "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
*/
if (0 == rcv_len) {
+ pkt->zero = 1;
usbhsf_fifo_clear(pipe, fifo);
goto usbhs_fifo_read_end;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 32a7b246b28..f68609c0f48 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -59,6 +59,7 @@ struct usbhs_pkt {
int trans;
int actual;
int zero;
+ int sequence;
};
struct usbhs_pkt_handle {
@@ -95,7 +96,7 @@ void usbhs_pkt_init(struct usbhs_pkt *pkt);
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero);
+ void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d7000..1b97fb12694 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -50,7 +50,9 @@ static int usbhsm_autonomy_irq_vbus(struct usbhs_priv *priv,
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- return usbhsc_drvcllbck_notify_hotplug(pdev);
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ return 0;
}
void usbhs_mod_autonomy_mode(struct usbhs_priv *priv)
@@ -349,7 +351,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index d9717e0bc1f..528691d5f3e 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -44,7 +45,6 @@ struct usbhsg_uep {
struct usbhsg_gpriv {
struct usb_gadget gadget;
struct usbhs_mod mod;
- struct list_head link;
struct usbhsg_uep *uep;
int uep_size;
@@ -114,16 +114,6 @@ struct usbhsg_recip_handle {
#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
#define usbhsg_status_has(gp, b) (gp->status & b)
-/* controller */
-LIST_HEAD(the_controller_link);
-
-#define usbhsg_for_each_controller(gpriv)\
- list_for_each_entry(gpriv, &the_controller_link, link)
-#define usbhsg_controller_register(gpriv)\
- list_add_tail(&(gpriv)->link, &the_controller_link)
-#define usbhsg_controller_unregister(gpriv)\
- list_del_init(&(gpriv)->link)
-
/*
* queue push/pop
*/
@@ -164,7 +154,7 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
req->actual = 0;
req->status = -EINPROGRESS;
usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
- req->buf, req->length, req->zero);
+ req->buf, req->length, req->zero, -1);
usbhs_pkt_start(pipe);
dev_dbg(dev, "pipe %d : queue push (%d)\n",
@@ -195,7 +185,7 @@ static int usbhsg_dma_map(struct device *dev,
}
if (dma_mapping_error(dev, pkt->dma)) {
- dev_err(dev, "dma mapping error %x\n", pkt->dma);
+ dev_err(dev, "dma mapping error %llx\n", (u64)pkt->dma);
return -EIO;
}
@@ -271,6 +261,8 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ usbhs_pkt_start(pipe);
+
return 0;
}
@@ -282,6 +274,145 @@ struct usbhsg_recip_handle req_clear_feature = {
};
/*
+ * USB_TYPE_STANDARD / set feature functions
+ */
+static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ udelay(100);
+ usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+ break;
+ default:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+
+ usbhs_pipe_stall(pipe);
+
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_set_feature = {
+ .name = "set feature",
+ .device = usbhsg_recip_handler_std_set_device,
+ .interface = usbhsg_recip_handler_std_control_done,
+ .endpoint = usbhsg_recip_handler_std_set_endpoint,
+};
+
+/*
+ * USB_TYPE_STANDARD / get status functions
+ */
+static void __usbhsg_recip_send_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
+
+ /* free allocated recip-buffer/usb_request */
+ kfree(ureq->pkt.buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
+ unsigned short status)
+{
+ struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ struct usb_request *req;
+ unsigned short *buf;
+
+ /* alloc new usb_request for recip */
+ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
+ if (!req) {
+ dev_err(dev, "recip request allocation fail\n");
+ return;
+ }
+
+ /* alloc recip data buffer */
+ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf) {
+ usb_ep_free_request(&dcp->ep, req);
+ dev_err(dev, "recip data allocation fail\n");
+ return;
+ }
+
+ /* recip data is status */
+ *buf = cpu_to_le16(status);
+
+ /* allocated usb_request/buffer will be freed */
+ req->complete = __usbhsg_recip_send_complete;
+ req->buf = buf;
+ req->length = sizeof(*buf);
+ req->zero = 0;
+
+ /* push packet */
+ pipe->handler = &usbhs_fifo_pio_push_handler;
+ usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req));
+}
+
+static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 1 << USB_DEVICE_SELF_POWERED;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 0;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ unsigned short status = 0;
+
+ if (usbhs_pipe_is_stall(pipe))
+ status = 1 << USB_ENDPOINT_HALT;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_get_status = {
+ .name = "get status",
+ .device = usbhsg_recip_handler_std_get_device,
+ .interface = usbhsg_recip_handler_std_get_interface,
+ .endpoint = usbhsg_recip_handler_std_get_endpoint,
+};
+
+/*
* USB_TYPE handler
*/
static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
@@ -303,8 +434,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
pipe = usbhsg_uep_to_pipe(uep);
if (!pipe) {
dev_err(dev, "wrong recip request\n");
- ret = -EINVAL;
- goto usbhsg_recip_run_handle_end;
+ return -EINVAL;
}
switch (recip) {
@@ -327,20 +457,10 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
}
if (func) {
- unsigned long flags;
-
dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
-
- /******************** spin lock ********************/
- usbhs_lock(priv, flags);
ret = func(priv, uep, ctrl);
- usbhs_unlock(priv, flags);
- /******************** spin unlock ******************/
}
-usbhsg_recip_run_handle_end:
- usbhs_pkt_start(pipe);
-
return ret;
}
@@ -412,6 +532,12 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
case USB_REQ_CLEAR_FEATURE:
recip_handler = &req_clear_feature;
break;
+ case USB_REQ_SET_FEATURE:
+ recip_handler = &req_set_feature;
+ break;
+ case USB_REQ_GET_STATUS:
+ recip_handler = &req_get_status;
+ break;
}
}
@@ -439,14 +565,16 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhs_pkt *pkt;
- usbhs_pipe_disable(pipe);
-
while (1) {
pkt = usbhs_pkt_pop(pipe, NULL);
if (!pkt)
break;
+
+ usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET);
}
+ usbhs_pipe_disable(pipe);
+
return 0;
}
@@ -681,9 +809,7 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
* - function
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_function_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -731,9 +857,8 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
gpriv->gadget.speed = USB_SPEED_UNKNOWN;
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
+ usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
usbhsg_pipe_disable(dcp);
@@ -751,53 +876,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->max_speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
@@ -827,6 +931,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
@@ -876,12 +987,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
- gpriv->gadget.is_dualspeed = 1;
+ gpriv->gadget.max_speed = USB_SPEED_HIGH;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
@@ -908,16 +1021,17 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
}
}
- usbhsg_controller_register(gpriv);
-
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
@@ -933,7 +1047,7 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
usb_del_gadget_udc(&gpriv->gadget);
- usbhsg_controller_unregister(gpriv);
+ device_unregister(&gpriv->gadget.dev);
kfree(gpriv->uep);
kfree(gpriv);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index bade761a1e5..1834cf50888 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -45,36 +45,34 @@
*
* +--------+ pipes are reused for each uep.
* | udev 1 |-+- [uep 0 (dcp) ] --+ pipe will be switched when
- * +--------+ | | target device was changed
+ * +--------+ | | other device requested
* +- [uep 1 (bulk)] --|---+ +--------------+
* | +--------------> | pipe0 (dcp) |
- * +- [uep 2 (bulk)] --|---|---+ +--------------+
- * | | | | pipe1 (isoc) |
- * +--------+ | | | +--------------+
- * | udev 2 |-+- [uep 0 (dcp) ] --+ +-- |------> | pipe2 (bulk) |
- * +--------+ | | | | +--------------+
- * +- [uep 1 (int) ] --|-+ | +------> | pipe3 (bulk) |
- * | | | | +--------------+
- * +--------+ | +-|---|------> | pipe4 (int) |
- * | udev 3 |-+- [uep 0 (dcp) ] --+ | | +--------------+
- * +--------+ | | | | .... |
- * +- [uep 1 (bulk)] ------+ | | .... |
+ * +- [uep 2 (bulk)] -@ | +--------------+
+ * | | pipe1 (isoc) |
+ * +--------+ | +--------------+
+ * | udev 2 |-+- [uep 0 (dcp) ] -@ +----------> | pipe2 (bulk) |
+ * +--------+ | +--------------+
+ * +- [uep 1 (int) ] ----+ +------> | pipe3 (bulk) |
+ * | | +--------------+
+ * +--------+ +-----|------> | pipe4 (int) |
+ * | udev 3 |-+- [uep 0 (dcp) ] -@ | +--------------+
+ * +--------+ | | | .... |
+ * +- [uep 1 (bulk)] -@ | | .... |
* | |
* +- [uep 2 (bulk)]-----------+
+ *
+ * @ : uep requested free pipe, but all have been used.
+ * now it is waiting for free pipe
*/
/*
* struct
*/
-struct usbhsh_pipe_info {
- unsigned int usr_cnt; /* see usbhsh_endpoint_alloc() */
-};
-
struct usbhsh_request {
struct urb *urb;
struct usbhs_pkt pkt;
- struct list_head ureq_link; /* see hpriv :: ureq_link_xxx */
};
struct usbhsh_device {
@@ -83,11 +81,10 @@ struct usbhsh_device {
};
struct usbhsh_ep {
- struct usbhs_pipe *pipe;
+ struct usbhs_pipe *pipe; /* attached pipe */
struct usbhsh_device *udev; /* attached udev */
+ struct usb_host_endpoint *ep;
struct list_head ep_list; /* list to usbhsh_device */
-
- int maxp;
};
#define USBHSH_DEVICE_MAX 10 /* see DEVADDn / DCPMAXP / PIPEMAXP */
@@ -98,16 +95,9 @@ struct usbhsh_hpriv {
struct usbhsh_device udev[USBHSH_DEVICE_MAX];
- struct usbhsh_pipe_info *pipe_info;
- int pipe_size;
-
u32 port_stat; /* USB_PORT_STAT_xxx */
struct completion setup_ack_done;
-
- /* see usbhsh_req_alloc/free */
- struct list_head ureq_link_active;
- struct list_head ureq_link_free;
};
@@ -119,17 +109,6 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_priv_to_hpriv(priv) \
container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
-#define __usbhsh_for_each_hpipe(start, pos, h, i) \
- for (i = start, pos = (h)->hpipe + i; \
- i < (h)->hpipe_size; \
- i++, pos = (h)->hpipe + i)
-
-#define usbhsh_for_each_hpipe(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(1, pos, hpriv, i)
-
-#define usbhsh_for_each_hpipe_with_dcp(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(0, pos, hpriv, i)
-
#define __usbhsh_for_each_udev(start, pos, h, i) \
for (i = start, pos = (h)->udev + i; \
i < USBHSH_DEVICE_MAX; \
@@ -152,15 +131,20 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_ep_to_uep(u) ((u)->hcpriv)
#define usbhsh_uep_to_pipe(u) ((u)->pipe)
#define usbhsh_uep_to_udev(u) ((u)->udev)
+#define usbhsh_uep_to_ep(u) ((u)->ep)
+
#define usbhsh_urb_to_ureq(u) ((u)->hcpriv)
#define usbhsh_urb_to_usbv(u) ((u)->dev)
#define usbhsh_usbv_to_udev(d) dev_get_drvdata(&(d)->dev)
#define usbhsh_udev_to_usbv(h) ((h)->usbv)
+#define usbhsh_udev_is_used(h) usbhsh_udev_to_usbv(h)
-#define usbhsh_pipe_info(p) ((p)->mod_private)
+#define usbhsh_pipe_to_uep(p) ((p)->mod_private)
+#define usbhsh_device_parent(d) (usbhsh_usbv_to_udev((d)->usbv->parent))
+#define usbhsh_device_hubport(d) ((d)->usbv->portnum)
#define usbhsh_device_number(h, d) ((int)((d) - (h)->udev))
#define usbhsh_device_nth(h, d) ((h)->udev + d)
#define usbhsh_device0(h) usbhsh_device_nth(h, 0)
@@ -170,38 +154,13 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s))
#define usbhsh_port_stat_get(h) ((h)->port_stat)
-#define usbhsh_pkt_to_req(p) \
+#define usbhsh_pkt_to_ureq(p) \
container_of((void *)p, struct usbhsh_request, pkt)
/*
* req alloc/free
*/
-static void usbhsh_req_list_init(struct usbhsh_hpriv *hpriv)
-{
- INIT_LIST_HEAD(&hpriv->ureq_link_active);
- INIT_LIST_HEAD(&hpriv->ureq_link_free);
-}
-
-static void usbhsh_req_list_quit(struct usbhsh_hpriv *hpriv)
-{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usbhsh_request *ureq, *next;
-
- /* kfree all active ureq */
- list_for_each_entry_safe(ureq, next,
- &hpriv->ureq_link_active,
- ureq_link) {
- dev_err(dev, "active ureq (%p) is force freed\n", ureq);
- kfree(ureq);
- }
-
- /* kfree all free ureq */
- list_for_each_entry_safe(ureq, next, &hpriv->ureq_link_free, ureq_link)
- kfree(ureq);
-}
-
-static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
+static struct usbhsh_request *usbhsh_ureq_alloc(struct usbhsh_hpriv *hpriv,
struct urb *urb,
gfp_t mem_flags)
{
@@ -209,270 +168,460 @@ static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- if (list_empty(&hpriv->ureq_link_free)) {
- /*
- * create new one if there is no free ureq
- */
- ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
- if (ureq)
- INIT_LIST_HEAD(&ureq->ureq_link);
- } else {
- /*
- * reuse "free" ureq if exist
- */
- ureq = list_entry(hpriv->ureq_link_free.next,
- struct usbhsh_request,
- ureq_link);
- if (ureq)
- list_del_init(&ureq->ureq_link);
- }
-
+ ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
if (!ureq) {
dev_err(dev, "ureq alloc fail\n");
return NULL;
}
usbhs_pkt_init(&ureq->pkt);
-
- /*
- * push it to "active" list
- */
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_active);
ureq->urb = urb;
+ usbhsh_urb_to_ureq(urb) = ureq;
return ureq;
}
-static void usbhsh_req_free(struct usbhsh_hpriv *hpriv,
+static void usbhsh_ureq_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_request *ureq)
{
- struct usbhs_pkt *pkt = &ureq->pkt;
+ usbhsh_urb_to_ureq(ureq->urb) = NULL;
+ ureq->urb = NULL;
- usbhs_pkt_init(pkt);
+ kfree(ureq);
+}
+/*
+ * status
+ */
+static int usbhsh_is_running(struct usbhsh_hpriv *hpriv)
+{
/*
- * removed from "active" list,
- * and push it to "free" list
+ * we can decide some device is attached or not
+ * by checking mod.irq_attch
+ * see
+ * usbhsh_irq_attch()
+ * usbhsh_irq_dtch()
*/
- ureq->urb = NULL;
- list_del_init(&ureq->ureq_link);
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_free);
+ return (hpriv->mod.irq_attch == NULL);
}
/*
- * device control
+ * pipe control
*/
-
-static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+static void usbhsh_endpoint_sequence_save(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pkt *pkt)
{
- return !list_empty(&udev->ep_list_head);
-}
+ int len = urb->actual_length;
+ int maxp = usb_endpoint_maxp(&urb->ep->desc);
+ int t = 0;
-static struct usbhsh_device *usbhsh_device_alloc(struct usbhsh_hpriv *hpriv,
- struct urb *urb)
-{
- struct usbhsh_device *udev = NULL;
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
- struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- int i;
+ /* DCP is out of sequence control */
+ if (usb_pipecontrol(urb->pipe))
+ return;
/*
- * device 0
+ * renesas_usbhs pipe has a limitation in a number.
+ * So, driver should re-use the limited pipe for each device/endpoint.
+ * DATA0/1 sequence should be saved for it.
+ * see [image of mod_host]
+ * [HARDWARE LIMITATION]
*/
- if (0 == usb_pipedevice(urb->pipe)) {
- udev = usbhsh_device0(hpriv);
- goto usbhsh_device_find;
- }
/*
- * find unused device
+ * next sequence depends on actual_length
+ *
+ * ex) actual_length = 1147, maxp = 512
+ * data0 : 512
+ * data1 : 512
+ * data0 : 123
+ * data1 is the next sequence
*/
- usbhsh_for_each_udev(udev, hpriv, i) {
- if (usbhsh_udev_to_usbv(udev))
- continue;
- goto usbhsh_device_find;
+ t = len / maxp;
+ if (len % maxp)
+ t++;
+ if (pkt->zero)
+ t++;
+ t %= 2;
+
+ if (t)
+ usb_dotoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb);
+
+static int usbhsh_pipe_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usbhs_pipe *pipe;
+ struct usb_endpoint_descriptor *desc = &urb->ep->desc;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
+ int dir_in_req = !!usb_pipein(urb->pipe);
+ int is_dcp = usb_endpoint_xfer_control(desc);
+ int i, dir_in;
+ int ret = -EBUSY;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ if (unlikely(usbhsh_uep_to_pipe(uep))) {
+ dev_err(dev, "uep already has pipe\n");
+ goto usbhsh_pipe_attach_done;
}
- dev_err(dev, "no free usbhsh_device\n");
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- return NULL;
+ /* check pipe type */
+ if (!usbhs_pipe_type_is(pipe, usb_endpoint_type(desc)))
+ continue;
-usbhsh_device_find:
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev have old endpoint\n");
+ /* check pipe direction if normal pipe */
+ if (!is_dcp) {
+ dir_in = !!usbhs_pipe_is_dir_in(pipe);
+ if (0 != (dir_in - dir_in_req))
+ continue;
+ }
- /* uep will be attached */
- INIT_LIST_HEAD(&udev->ep_list_head);
+ /* check pipe is free */
+ if (usbhsh_pipe_to_uep(pipe))
+ continue;
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be enable
- */
- dev_set_drvdata(&usbv->dev, udev);
- udev->usbv = usbv;
+ /*
+ * attach pipe to uep
+ *
+ * usbhs_pipe_config_update() should be called after
+ * usbhs_set_device_config()
+ * see
+ * DCPMAXP/PIPEMAXP
+ */
+ usbhsh_uep_to_pipe(uep) = pipe;
+ usbhsh_pipe_to_uep(pipe) = uep;
- /* set device config */
- usbhs_set_device_speed(priv,
- usbhsh_device_number(hpriv, udev),
- usbhsh_device_number(hpriv, udev),
- 0, /* FIXME no parent */
- usbv->speed);
+ usbhs_pipe_config_update(pipe,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usb_endpoint_maxp(desc));
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ dev_dbg(dev, "%s [%d-%d(%s:%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usbhs_pipe_name(pipe),
+ dir_in_req ? "in" : "out");
- return udev;
+ ret = 0;
+ break;
+ }
+
+usbhsh_pipe_attach_done:
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ return ret;
}
-static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev)
+static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_ep *uep)
{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pipe *pipe;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev still have endpoint\n");
+ pipe = usbhsh_uep_to_pipe(uep);
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be disable
- */
- dev_set_drvdata(&usbv->dev, NULL);
- udev->usbv = NULL;
+ if (unlikely(!pipe)) {
+ dev_err(dev, "uep doens't have pipe\n");
+ } else {
+ struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep);
+ struct usbhsh_device *udev = usbhsh_uep_to_udev(uep);
+
+ /* detach pipe from uep */
+ usbhsh_uep_to_pipe(uep) = NULL;
+ usbhsh_pipe_to_uep(pipe) = NULL;
+
+ dev_dbg(dev, "%s [%d-%d(%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(&ep->desc),
+ usbhs_pipe_name(pipe));
+ }
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
- * end-point control
+ * endpoint control
*/
-struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev,
- struct usb_host_endpoint *ep,
- int dir_in_req,
- gfp_t mem_flags)
+static int usbhsh_endpoint_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usb_host_endpoint *ep = urb->ep;
struct usbhsh_ep *uep;
- struct usbhsh_pipe_info *info;
- struct usbhs_pipe *pipe, *best_pipe;
- struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct device *dev = usbhs_priv_to_dev(priv);
struct usb_endpoint_descriptor *desc = &ep->desc;
- int type, i, dir_in;
- unsigned int min_usr;
-
- dir_in_req = !!dir_in_req;
+ unsigned long flags;
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep) {
dev_err(dev, "usbhsh_ep alloc fail\n");
- return NULL;
+ return -ENOMEM;
}
- if (usb_endpoint_xfer_control(desc)) {
- best_pipe = usbhsh_hpriv_to_dcp(hpriv);
- goto usbhsh_endpoint_alloc_find_pipe;
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /*
+ * init endpoint
+ */
+ INIT_LIST_HEAD(&uep->ep_list);
+ list_add_tail(&uep->ep_list, &udev->ep_list_head);
+
+ usbhsh_uep_to_udev(uep) = udev;
+ usbhsh_uep_to_ep(uep) = ep;
+ usbhsh_ep_to_uep(ep) = uep;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc));
+
+ return 0;
+}
+
+static void usbhsh_endpoint_detach(struct usbhsh_hpriv *hpriv,
+ struct usb_host_endpoint *ep)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
+ unsigned long flags;
+
+ if (!uep)
+ return;
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
+ usb_endpoint_num(&ep->desc));
+
+ if (usbhsh_uep_to_pipe(uep))
+ usbhsh_pipe_detach(hpriv, uep);
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /* remove this endpoint from udev */
+ list_del_init(&uep->ep_list);
+
+ usbhsh_uep_to_udev(uep) = NULL;
+ usbhsh_uep_to_ep(uep) = NULL;
+ usbhsh_ep_to_uep(ep) = NULL;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ kfree(uep);
+}
+
+static void usbhsh_endpoint_detach_all(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
+{
+ struct usbhsh_ep *uep, *next;
+
+ list_for_each_entry_safe(uep, next, &udev->ep_list_head, ep_list)
+ usbhsh_endpoint_detach(hpriv, usbhsh_uep_to_ep(uep));
+}
+
+/*
+ * device control
+ */
+static int usbhsh_connected_to_rhdev(struct usb_hcd *hcd,
+ struct usbhsh_device *udev)
+{
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+
+ return hcd->self.root_hub == usbv->parent;
+}
+
+static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+{
+ return !list_empty(&udev->ep_list_head);
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* usbhsh_device_attach() is still not called */
+ if (!udev)
+ return NULL;
+
+ /* if it is device0, return it */
+ if (0 == usb_pipedevice(urb->pipe))
+ return usbhsh_device0(hpriv);
+
+ /* return attached device */
+ return udev;
+}
+
+static struct usbhsh_device *usbhsh_device_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhsh_device *udev = NULL;
+ struct usbhsh_device *udev0 = usbhsh_device0(hpriv);
+ struct usbhsh_device *pos;
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ unsigned long flags;
+ u16 upphub, hubport;
+ int i;
+
+ /*
+ * This function should be called only while urb is pointing to device0.
+ * It will attach unused usbhsh_device to urb (usbv),
+ * and initialize device0.
+ * You can use usbhsh_device_get() to get "current" udev,
+ * and usbhsh_usbv_to_udev() is for "attached" udev.
+ */
+ if (0 != usb_pipedevice(urb->pipe)) {
+ dev_err(dev, "%s fail: urb isn't pointing device0\n", __func__);
+ return NULL;
}
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
/*
- * find best pipe for endpoint
- * see
- * HARDWARE LIMITATION
+ * find unused device
*/
- type = usb_endpoint_type(desc);
- min_usr = ~0;
- best_pipe = NULL;
- usbhs_for_each_pipe(pipe, priv, i) {
- if (!usbhs_pipe_type_is(pipe, type))
+ usbhsh_for_each_udev(pos, hpriv, i) {
+ if (usbhsh_udev_is_used(pos))
continue;
+ udev = pos;
+ break;
+ }
- dir_in = !!usbhs_pipe_is_dir_in(pipe);
- if (0 != (dir_in - dir_in_req))
- continue;
+ if (udev) {
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be enable
+ */
+ dev_set_drvdata(&usbv->dev, udev);
+ udev->usbv = usbv;
+ }
- info = usbhsh_pipe_info(pipe);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
- if (min_usr > info->usr_cnt) {
- min_usr = info->usr_cnt;
- best_pipe = pipe;
- }
+ if (!udev) {
+ dev_err(dev, "no free usbhsh_device\n");
+ return NULL;
}
- if (unlikely(!best_pipe)) {
- dev_err(dev, "couldn't find best pipe\n");
- kfree(uep);
- return NULL;
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
+
+ if (usbhsh_device_has_endpoint(udev0)) {
+ dev_warn(dev, "udev0 have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev0);
}
-usbhsh_endpoint_alloc_find_pipe:
+
+ /* uep will be attached */
+ INIT_LIST_HEAD(&udev0->ep_list_head);
+ INIT_LIST_HEAD(&udev->ep_list_head);
+
/*
- * init uep
+ * set device0 config
*/
- uep->pipe = best_pipe;
- uep->maxp = usb_endpoint_maxp(desc);
- usbhsh_uep_to_udev(uep) = udev;
- usbhsh_ep_to_uep(ep) = uep;
+ usbhs_set_device_config(priv,
+ 0, 0, 0, usbv->speed);
/*
- * update pipe user count
+ * set new device config
*/
- info = usbhsh_pipe_info(best_pipe);
- info->usr_cnt++;
+ upphub = 0;
+ hubport = 0;
+ if (!usbhsh_connected_to_rhdev(hcd, udev)) {
+ /* if udev is not connected to rhdev, it means parent is Hub */
+ struct usbhsh_device *parent = usbhsh_device_parent(udev);
- /* init this endpoint, and attach it to udev */
- INIT_LIST_HEAD(&uep->ep_list);
- list_add_tail(&uep->ep_list, &udev->ep_list_head);
+ upphub = usbhsh_device_number(hpriv, parent);
+ hubport = usbhsh_device_hubport(udev);
- /*
- * usbhs_pipe_config_update() should be called after
- * usbhs_device_config()
- * see
- * DCPMAXP/PIPEMAXP
- */
- usbhs_pipe_sequence_data0(uep->pipe);
- usbhs_pipe_config_update(uep->pipe,
- usbhsh_device_number(hpriv, udev),
- usb_endpoint_num(desc),
- uep->maxp);
+ dev_dbg(dev, "%s connecte to Hub [%d:%d](%p)\n", __func__,
+ upphub, hubport, parent);
+ }
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev),
- usbhs_pipe_name(uep->pipe), uep);
+ usbhs_set_device_config(priv,
+ usbhsh_device_number(hpriv, udev),
+ upphub, hubport, usbv->speed);
- return uep;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
+
+ return udev;
}
-void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
- struct usb_host_endpoint *ep)
+static void usbhsh_device_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
{
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct device *dev = usbhs_priv_to_dev(priv);
- struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
- struct usbhsh_pipe_info *info;
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ unsigned long flags;
- if (!uep)
- return;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
- usbhs_pipe_name(uep->pipe), uep);
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev still have endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
- info = usbhsh_pipe_info(uep->pipe);
- info->usr_cnt--;
+ /*
+ * There is nothing to do if it is device0.
+ * see
+ * usbhsh_device_attach()
+ * usbhsh_device_get()
+ */
+ if (0 == usbhsh_device_number(hpriv, udev))
+ return;
- /* remove this endpoint from udev */
- list_del_init(&uep->ep_list);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- usbhsh_uep_to_udev(uep) = NULL;
- usbhsh_ep_to_uep(ep) = NULL;
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be disable
+ */
+ dev_set_drvdata(&usbv->dev, NULL);
+ udev->usbv = NULL;
- kfree(uep);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
@@ -480,11 +629,12 @@ void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
*/
static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct urb *urb = ureq->urb;
struct device *dev = usbhs_priv_to_dev(priv);
+ int status = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -493,29 +643,43 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
return;
}
+ if (!usbhsh_is_running(hpriv))
+ status = -ESHUTDOWN;
+
urb->actual_length = pkt->actual;
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
+
+ usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+ usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
- usb_hcd_giveback_urb(hcd, urb, 0);
+ usb_hcd_giveback_urb(hcd, urb, status);
}
static int usbhsh_queue_push(struct usb_hcd *hcd,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mem_flags)
{
- struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
- struct usbhs_pkt *pkt = &ureq->pkt;
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usbhsh_request *ureq;
void *buf;
- int len;
+ int len, sequence;
if (usb_pipeisoc(urb->pipe)) {
dev_err(dev, "pipe iso is not supported now\n");
return -EIO;
}
+ /* this ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq)) {
+ dev_err(dev, "ureq alloc fail\n");
+ return -ENOMEM;
+ }
+
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_fifo_pio_pop_handler;
else
@@ -524,25 +688,59 @@ static int usbhsh_queue_push(struct usb_hcd *hcd,
buf = (void *)(urb->transfer_buffer + urb->actual_length);
len = urb->transfer_buffer_length - urb->actual_length;
+ sequence = usb_gettoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+
dev_dbg(dev, "%s\n", __func__);
- usbhs_pkt_push(pipe, pkt, usbhsh_queue_done,
- buf, len, (urb->transfer_flags & URB_ZERO_PACKET));
+ usbhs_pkt_push(pipe, &ureq->pkt, usbhsh_queue_done,
+ buf, len, (urb->transfer_flags & URB_ZERO_PACKET),
+ sequence);
+
usbhs_pkt_start(pipe);
return 0;
}
+static void usbhsh_queue_force_pop(struct usbhs_priv *priv,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhs_pkt *pkt;
+
+ while (1) {
+ pkt = usbhs_pkt_pop(pipe, NULL);
+ if (!pkt)
+ break;
+
+ /*
+ * if all packet are gone, usbhsh_endpoint_disable()
+ * will be called.
+ * then, attached device/endpoint/pipe will be detached
+ */
+ usbhsh_queue_done(priv, pkt);
+ }
+}
+
+static void usbhsh_queue_force_pop_all(struct usbhs_priv *priv)
+{
+ struct usbhs_pipe *pos;
+ int i;
+
+ usbhs_for_each_pipe_with_dcp(pos, priv, i)
+ usbhsh_queue_force_pop(priv, pos);
+}
+
/*
* DCP setup stage
*/
static int usbhsh_is_request_address(struct urb *urb)
{
- struct usb_ctrlrequest *cmd;
+ struct usb_ctrlrequest *req;
- cmd = (struct usb_ctrlrequest *)urb->setup_packet;
+ req = (struct usb_ctrlrequest *)urb->setup_packet;
- if ((DeviceOutRequest == cmd->bRequestType << 8) &&
- (USB_REQ_SET_ADDRESS == cmd->bRequest))
+ if ((DeviceOutRequest == req->bRequestType << 8) &&
+ (USB_REQ_SET_ADDRESS == req->bRequest))
return 1;
else
return 0;
@@ -570,11 +768,15 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
/*
* renesas_usbhs can not use original usb address.
* see HARDWARE LIMITATION.
- * modify usb address here.
+ * modify usb address here to use attached device.
+ * see usbhsh_device_attach()
*/
if (usbhsh_is_request_address(urb)) {
- /* FIXME */
- req.wValue = 1;
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* udev is a attached device */
+ req.wValue = usbhsh_device_number(hpriv, udev);
dev_dbg(dev, "create new address - %d\n", req.wValue);
}
@@ -595,82 +797,80 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
static void usbhsh_data_stage_packet_done(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct urb *urb = ureq->urb;
/* this ureq was connected to urb when usbhsh_urb_enqueue() */
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
}
-static void usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
- struct urb *urb,
- struct usbhs_pipe *pipe)
+static int usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
+
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * data stage uses ureq which is connected to urb
- * see usbhsh_urb_enqueue() :: alloc new request.
- * it will be freed in usbhsh_data_stage_packet_done()
- */
- ureq = usbhsh_urb_to_ureq(urb);
- pkt = &ureq->pkt;
+ /* this ureq will be freed on usbhsh_data_stage_packet_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_data_stage_in_handler;
else
pipe->handler = &usbhs_dcp_data_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_data_stage_packet_done,
urb->transfer_buffer,
urb->transfer_buffer_length,
- (urb->transfer_flags & URB_ZERO_PACKET));
+ (urb->transfer_flags & URB_ZERO_PACKET),
+ -1);
+
+ return 0;
}
/*
* DCP status stage
*/
-static void usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
+static int usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
- struct usbhs_pipe *pipe)
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * status stage uses allocated ureq.
- * it will be freed on usbhsh_queue_done()
- */
- ureq = usbhsh_req_alloc(hpriv, urb, GFP_KERNEL);
- pkt = &ureq->pkt;
+ /* This ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_status_stage_in_handler;
else
pipe->handler = &usbhs_dcp_status_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_queue_done,
NULL,
urb->transfer_buffer_length,
- 0);
+ 0, -1);
+
+ return 0;
}
static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
- struct usbhsh_hpriv *hpriv,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mflags)
{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ int ret;
dev_dbg(dev, "%s\n", __func__);
@@ -686,13 +886,22 @@ static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
*
* It is pushed only when urb has buffer.
*/
- if (urb->transfer_buffer_length)
- usbhsh_data_stage_packet_push(hpriv, urb, pipe);
+ if (urb->transfer_buffer_length) {
+ ret = usbhsh_data_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "data stage failed\n");
+ return ret;
+ }
+ }
/*
* status stage
*/
- usbhsh_status_stage_packet_push(hpriv, urb, pipe);
+ ret = usbhsh_status_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "status stage failed\n");
+ return ret;
+ }
/*
* start pushed packets
@@ -729,71 +938,82 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usb_host_endpoint *ep = urb->ep;
- struct usbhsh_request *ureq;
- struct usbhsh_device *udev, *new_udev = NULL;
- struct usbhs_pipe *pipe;
- struct usbhsh_ep *uep;
+ struct usbhsh_device *new_udev = NULL;
int is_dir_in = usb_pipein(urb->pipe);
-
+ int i;
int ret;
dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
+ if (!usbhsh_is_running(hpriv)) {
+ ret = -EIO;
+ dev_err(dev, "host is not running\n");
+ goto usbhsh_urb_enqueue_error_not_linked;
+ }
+
ret = usb_hcd_link_urb_to_ep(hcd, urb);
- if (ret)
+ if (ret) {
+ dev_err(dev, "urb link failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
+ }
/*
- * get udev
+ * attach udev if needed
+ * see [image of mod_host]
*/
- udev = usbhsh_usbv_to_udev(usbv);
- if (!udev) {
- new_udev = usbhsh_device_alloc(hpriv, urb);
- if (!new_udev)
+ if (!usbhsh_device_get(hpriv, urb)) {
+ new_udev = usbhsh_device_attach(hpriv, urb);
+ if (!new_udev) {
+ ret = -EIO;
+ dev_err(dev, "device attach failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
-
- udev = new_udev;
+ }
}
/*
- * get uep
+ * attach endpoint if needed
+ * see [image of mod_host]
*/
- uep = usbhsh_ep_to_uep(ep);
- if (!uep) {
- uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
- is_dir_in, mem_flags);
- if (!uep)
+ if (!usbhsh_ep_to_uep(ep)) {
+ ret = usbhsh_endpoint_attach(hpriv, urb, mem_flags);
+ if (ret < 0) {
+ dev_err(dev, "endpoint attach failed\n");
goto usbhsh_urb_enqueue_error_free_device;
+ }
}
- pipe = usbhsh_uep_to_pipe(uep);
/*
- * alloc new request
+ * attach pipe to endpoint
+ * see [image of mod_host]
*/
- ureq = usbhsh_req_alloc(hpriv, urb, mem_flags);
- if (unlikely(!ureq)) {
- ret = -ENOMEM;
+ for (i = 0; i < 1024; i++) {
+ ret = usbhsh_pipe_attach(hpriv, urb);
+ if (ret < 0)
+ msleep(100);
+ else
+ break;
+ }
+ if (ret < 0) {
+ dev_err(dev, "pipe attach failed\n");
goto usbhsh_urb_enqueue_error_free_endpoint;
}
- usbhsh_urb_to_ureq(urb) = ureq;
/*
* push packet
*/
if (usb_pipecontrol(urb->pipe))
- usbhsh_dcp_queue_push(hcd, hpriv, pipe, urb);
+ ret = usbhsh_dcp_queue_push(hcd, urb, mem_flags);
else
- usbhsh_queue_push(hcd, pipe, urb);
+ ret = usbhsh_queue_push(hcd, urb, mem_flags);
- return 0;
+ return ret;
usbhsh_urb_enqueue_error_free_endpoint:
- usbhsh_endpoint_free(hpriv, ep);
+ usbhsh_endpoint_detach(hpriv, ep);
usbhsh_urb_enqueue_error_free_device:
if (new_udev)
- usbhsh_device_free(hpriv, new_udev);
+ usbhsh_device_detach(hpriv, new_udev);
usbhsh_urb_enqueue_error_not_linked:
dev_dbg(dev, "%s error\n", __func__);
@@ -807,8 +1027,11 @@ static int usbhsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
if (ureq) {
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pkt *pkt = &ureq->pkt;
+
+ usbhs_pkt_pop(pkt->pipe, pkt);
+ usbhsh_queue_done(priv, pkt);
}
return 0;
@@ -823,7 +1046,7 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
/*
* this function might be called manytimes by same hcd/ep
- * in-endpoitn == out-endpoint if ep == dcp.
+ * in-endpoint == out-endpoint if ep == dcp.
*/
if (!uep)
return;
@@ -831,15 +1054,14 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
udev = usbhsh_uep_to_udev(uep);
hpriv = usbhsh_hcd_to_hpriv(hcd);
- usbhsh_endpoint_free(hpriv, ep);
- ep->hcpriv = NULL;
+ usbhsh_endpoint_detach(hpriv, ep);
/*
* if there is no endpoint,
* free device
*/
if (!usbhsh_device_has_endpoint(udev))
- usbhsh_device_free(hpriv, udev);
+ usbhsh_device_detach(hpriv, udev);
}
static int usbhsh_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -919,6 +1141,8 @@ static int __usbhsh_hub_port_feature(struct usbhsh_hpriv *hpriv,
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_LOW_SPEED);
+ usbhsh_queue_force_pop_all(priv);
+
usbhs_bus_send_reset(priv);
msleep(20);
usbhs_bus_send_sof_enable(priv);
@@ -1082,6 +1306,20 @@ static int usbhsh_irq_attch(struct usbhs_priv *priv,
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * attch interrupt might happen infinitely on some device
+ * (on self power USB hub ?)
+ * disable it here.
+ *
+ * usbhsh_is_running() becomes effective
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = NULL;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
return 0;
}
@@ -1096,6 +1334,24 @@ static int usbhsh_irq_dtch(struct usbhs_priv *priv,
usbhsh_port_stat_clear(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * enable attch interrupt again
+ *
+ * usbhsh_is_running() becomes invalid
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = usbhsh_irq_attch;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
+ /*
+ * usbhsh_queue_force_pop_all() should be called
+ * after usbhsh_is_running() becomes invalid.
+ */
+ usbhsh_queue_force_pop_all(priv);
+
return 0;
}
@@ -1131,7 +1387,6 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct usbhsh_pipe_info *pipe_info = hpriv->pipe_info;
struct usbhs_pipe *pipe;
u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
@@ -1140,7 +1395,6 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
/* init all pipe */
old_type = USB_ENDPOINT_XFER_CONTROL;
for (i = 0; i < pipe_size; i++) {
- pipe_info[i].usr_cnt = 0;
/*
* data "output" will be finished as soon as possible,
@@ -1174,7 +1428,7 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
dir_in);
}
- pipe->mod_private = pipe_info + i;
+ pipe->mod_private = NULL;
}
}
@@ -1205,9 +1459,7 @@ static int usbhsh_start(struct usbhs_priv *priv)
* - host
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_host_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -1242,9 +1494,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
usb_remove_hcd(hcd);
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
usbhs_sys_host_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
dev_dbg(dev, "quit host\n");
@@ -1255,10 +1505,8 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
- struct usbhsh_pipe_info *pipe_info;
struct usbhsh_device *udev;
struct device *dev = usbhs_priv_to_dev(priv);
- int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
/* initialize hcd */
@@ -1267,12 +1515,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
-
- pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
- if (!pipe_info) {
- dev_err(dev, "Could not allocate pipe_info\n");
- goto usbhs_mod_host_probe_err;
- }
+ hcd->has_tt = 1; /* for low/full speed */
/*
* CAUTION
@@ -1293,9 +1536,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
hpriv->mod.name = "host";
hpriv->mod.start = usbhsh_start;
hpriv->mod.stop = usbhsh_stop;
- hpriv->pipe_info = pipe_info;
- hpriv->pipe_size = pipe_size;
- usbhsh_req_list_init(hpriv);
usbhsh_port_stat_init(hpriv);
/* init all device */
@@ -1307,11 +1547,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_info(dev, "host probed\n");
return 0;
-
-usbhs_mod_host_probe_err:
- usb_put_hcd(hcd);
-
- return -ENOMEM;
}
int usbhs_mod_host_remove(struct usbhs_priv *priv)
@@ -1319,8 +1554,6 @@ int usbhs_mod_host_remove(struct usbhs_priv *priv)
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- usbhsh_req_list_quit(hpriv);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c74389ce217..feb06d6d281 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -257,6 +257,13 @@ void usbhs_pipe_stall(struct usbhs_pipe *pipe)
}
}
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe)
+{
+ u16 pid = usbhsp_pipectrl_get(pipe) & PID_MASK;
+
+ return (int)(pid == PID_STALL10 || pid == PID_STALL11);
+}
+
/*
* pipe setup
*/
@@ -323,8 +330,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
- if ((is_host && !dir_in) ||
- (!is_host && dir_in))
+ if (!!is_host ^ !!dir_in)
dir |= DIR_OUT;
if (!dir)
@@ -471,10 +477,27 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
-void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int data)
+void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
- u16 val = (data) ? SQSET : SQCLR;
+ u16 val;
+
+ /*
+ * sequence
+ * 0 : data0
+ * 1 : data1
+ * -1 : no change
+ */
+ switch (sequence) {
+ case 0:
+ val = SQCLR;
+ break;
+ case 1:
+ val = SQSET;
+ break;
+ default:
+ return;
+ }
usbhsp_pipectrl_set(pipe, mask, val);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 6334fc644cc..fa18b7dc2b2 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -87,6 +87,7 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe);
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
u16 epnum, u16 maxp);
diff --git a/drivers/usb/serial/ChangeLog.history b/drivers/usb/serial/ChangeLog.history
deleted file mode 100644
index f13fd488ebe..00000000000
--- a/drivers/usb/serial/ChangeLog.history
+++ /dev/null
@@ -1,730 +0,0 @@
-This is the contents of some of the drivers/usb/serial/ files that had old
-changelog comments. They were quite old, and out of date, and we don't keep
-them anymore, so I've put them here, away from the source files, in case
-people still care to see them.
-
-- Greg Kroah-Hartman <greg@kroah.com> October 20, 2005
-
------------------------------------------------------------------------
-usb-serial.h Change Log comments:
-
- (03/26/2002) gkh
- removed the port->tty check from port_paranoia_check() due to serial
- consoles not having a tty device assigned to them.
-
- (12/03/2001) gkh
- removed active from the port structure.
- added documentation to the usb_serial_device_type structure
-
- (10/10/2001) gkh
- added vendor and product to serial structure. Needed to determine device
- owner when the device is disconnected.
-
- (05/30/2001) gkh
- added sem to port structure and removed port_lock
-
- (10/05/2000) gkh
- Added interrupt_in_endpointAddress and bulk_in_endpointAddress to help
- fix bug with urb->dev not being set properly, now that the usb core
- needs it.
-
- (09/11/2000) gkh
- Added usb_serial_debug_data function to help get rid of #DEBUG in the
- drivers.
-
- (08/28/2000) gkh
- Added port_lock to port structure.
-
- (08/08/2000) gkh
- Added open_count to port structure.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
------------------------------------------------------------------------
-usb-serial.c Change Log comments:
-
- (12/10/2002) gkh
- Split the ports off into their own struct device, and added a
- usb-serial bus driver.
-
- (11/19/2002) gkh
- removed a few #ifdefs for the generic code and cleaned up the failure
- logic in initialization.
-
- (10/02/2002) gkh
- moved the console code to console.c and out of this file.
-
- (06/05/2002) gkh
- moved location of startup() call in serial_probe() until after all
- of the port information and endpoints are initialized. This makes
- things easier for some drivers.
-
- (04/10/2002) gkh
- added serial_read_proc function which creates a
- /proc/tty/driver/usb-serial file.
-
- (03/27/2002) gkh
- Got USB serial console code working properly and merged into the main
- version of the tree. Thanks to Randy Dunlap for the initial version
- of this code, and for pushing me to finish it up.
- The USB serial console works with any usb serial driver device.
-
- (03/21/2002) gkh
- Moved all manipulation of port->open_count into the core. Now the
- individual driver's open and close functions are called only when the
- first open() and last close() is called. Making the drivers a bit
- smaller and simpler.
- Fixed a bug if a driver didn't have the owner field set.
-
- (02/26/2002) gkh
- Moved all locking into the main serial_* functions, instead of having
- the individual drivers have to grab the port semaphore. This should
- reduce races.
- Reworked the MOD_INC logic a bit to always increment and decrement, even
- if the generic driver is being used.
-
- (10/10/2001) gkh
- usb_serial_disconnect() now sets the serial->dev pointer is to NULL to
- help prevent child drivers from accessing the device since it is now
- gone.
-
- (09/13/2001) gkh
- Moved generic driver initialize after we have registered with the USB
- core. Thanks to Randy Dunlap for pointing this problem out.
-
- (07/03/2001) gkh
- Fixed module paramater size. Thanks to John Brockmeyer for the pointer.
- Fixed vendor and product getting defined through the MODULE_PARM macro
- if the Generic driver wasn't compiled in.
- Fixed problem with generic_shutdown() not being called for drivers that
- don't have a shutdown() function.
-
- (06/06/2001) gkh
- added evil hack that is needed for the prolific pl2303 device due to the
- crazy way its endpoints are set up.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (04/08/2001) gb
- Identify version on module load.
-
- 2001_02_05 gkh
- Fixed buffer overflows bug with the generic serial driver. Thanks to
- Todd Squires <squirest@ct0.com> for fixing this.
-
- (01/10/2001) gkh
- Fixed bug where the generic serial adaptor grabbed _any_ device that was
- offered to it.
-
- (12/12/2000) gkh
- Removed MOD_INC and MOD_DEC from poll and disconnect functions, and
- moved them to the serial_open and serial_close functions.
- Also fixed bug with there not being a MOD_DEC for the generic driver
- (thanks to Gary Brubaker for finding this.)
-
- (11/29/2000) gkh
- Small NULL pointer initialization cleanup which saves a bit of disk image
-
- (11/01/2000) Adam J. Richter
- instead of using idVendor/idProduct pairs, usb serial drivers
- now identify their hardware interest with usb_device_id tables,
- which they usually have anyhow for use with MODULE_DEVICE_TABLE.
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (08/28/2000) gkh
- Added port_lock to port structure.
- Added locks for SMP safeness to generic driver
- Fixed the ability to open a generic device's port more than once.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
- (07/03/2000) gkh
- Added more debugging to serial_ioctl call
-
- (06/25/2000) gkh
- Changed generic_write_bulk_callback to not call wake_up_interruptible
- directly, but to have port_softint do it at a safer time.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (05/22/2000) gkh
- Changed the makefile, enabling the big CONFIG_USB_SERIAL_SOMTHING to be
- removed from the individual device source files.
-
- (05/03/2000) gkh
- Added the Digi Acceleport driver from Al Borchers and Peter Berger.
-
- (05/02/2000) gkh
- Changed devfs and tty register code to work properly now. This was based on
- the ACM driver changes by Vojtech Pavlik.
-
- (04/27/2000) Ryan VanderBijl
- Put calls to *_paranoia_checks into one function.
-
- (04/23/2000) gkh
- Fixed bug that Randy Dunlap found for Generic devices with no bulk out ports.
- Moved when the startup code printed out the devices that are supported.
-
- (04/19/2000) gkh
- Added driver for ZyXEL omni.net lcd plus ISDN TA
- Made startup info message specify which drivers were compiled in.
-
- (04/03/2000) gkh
- Changed the probe process to remove the module unload races.
- Changed where the tty layer gets initialized to have devfs work nicer.
- Added initial devfs support.
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
- (03/19/2000) gkh
- Fixed oops that could happen when device was removed while a program
- was talking to the device.
- Removed the static urbs and now all urbs are created and destroyed
- dynamically.
- Reworked the internal interface. Now everything is based on the
- usb_serial_port structure instead of the larger usb_serial structure.
- This fixes the bug that a multiport device could not have more than
- one port open at one time.
-
- (03/17/2000) gkh
- Added config option for debugging messages.
- Added patch for keyspan pda from Brian Warner.
-
- (03/06/2000) gkh
- Added the keyspan pda code from Brian Warner <warner@lothar.com>
- Moved a bunch of the port specific stuff into its own structure. This
- is in anticipation of the true multiport devices (there's a bug if you
- try to access more than one port of any multiport device right now)
-
- (02/21/2000) gkh
- Made it so that any serial devices only have to specify which functions
- they want to overload from the generic function calls (great,
- inheritance in C, in a driver, just what I wanted...)
- Added support for set_termios and ioctl function calls. No drivers take
- advantage of this yet.
- Removed the #ifdef MODULE, now there is no module specific code.
- Cleaned up a few comments in usb-serial.h that were wrong (thanks again
- to Miles Lott).
- Small fix to get_free_serial.
-
- (02/14/2000) gkh
- Removed the Belkin and Peracom functionality from the driver due to
- the lack of support from the vendor, and me not wanting people to
- accidenatly buy the device, expecting it to work with Linux.
- Added read_bulk_callback and write_bulk_callback to the type structure
- for the needs of the FTDI and WhiteHEAT driver.
- Changed all reverences to FTDI to FTDI_SIO at the request of Bill
- Ryder.
- Changed the output urb size back to the max endpoint size to make
- the ftdi_sio driver have it easier, and due to the fact that it didn't
- really increase the speed any.
-
- (02/11/2000) gkh
- Added VISOR_FUNCTION_CONSOLE to the visor startup function. This was a
- patch from Miles Lott (milos@insync.net).
- Fixed bug with not restoring the minor range that a device grabs, if
- the startup function fails (thanks Miles for finding this).
-
- (02/05/2000) gkh
- Added initial framework for the Keyspan PDA serial converter so that
- Brian Warner has a place to put his code.
- Made the ezusb specific functions generic enough that different
- devices can use them (whiteheat and keyspan_pda both need them).
- Split out a whole bunch of structure and other stuff to a separate
- usb-serial.h file.
- Made the Visor connection messages a little more understandable, now
- that Miles Lott (milos@insync.net) has gotten the Generic channel to
- work. Also made them always show up in the log file.
-
- (01/25/2000) gkh
- Added initial framework for FTDI serial converter so that Bill Ryder
- has a place to put his code.
- Added the vendor specific info from Handspring. Now we can print out
- informational debug messages as well as understand what is happening.
-
- (01/23/2000) gkh
- Fixed problem of crash when trying to open a port that didn't have a
- device assigned to it. Made the minor node finding a little smarter,
- now it looks to find a continuous space for the new device.
-
- (01/21/2000) gkh
- Fixed bug in visor_startup with patch from Miles Lott (milos@insync.net)
- Fixed get_serial_by_minor which was all messed up for multi port
- devices. Fixed multi port problem for generic devices. Now the number
- of ports is determined by the number of bulk out endpoints for the
- generic device.
-
- (01/19/2000) gkh
- Removed lots of cruft that was around from the old (pre urb) driver
- interface.
- Made the serial_table dynamic. This should save lots of memory when
- the number of minor nodes goes up to 256.
- Added initial support for devices that have more than one port.
- Added more debugging comments for the Visor, and added a needed
- set_configuration call.
-
- (01/17/2000) gkh
- Fixed the WhiteHEAT firmware (my processing tool had a bug)
- and added new debug loader firmware for it.
- Removed the put_char function as it isn't really needed.
- Added visor startup commands as found by the Win98 dump.
-
- (01/13/2000) gkh
- Fixed the vendor id for the generic driver to the one I meant it to be.
-
- (01/12/2000) gkh
- Forget the version numbering...that's pretty useless...
- Made the driver able to be compiled so that the user can select which
- converter they want to use. This allows people who only want the Visor
- support to not pay the memory size price of the WhiteHEAT.
- Fixed bug where the generic driver (idVendor=0000 and idProduct=0000)
- grabbed the root hub. Not good.
-
- version 0.4.0 (01/10/2000) gkh
- Added whiteheat.h containing the firmware for the ConnectTech WhiteHEAT
- device. Added startup function to allow firmware to be downloaded to
- a device if it needs to be.
- Added firmware download logic to the WhiteHEAT device.
- Started to add #defines to split up the different drivers for potential
- configuration option.
-
- version 0.3.1 (12/30/99) gkh
- Fixed problems with urb for bulk out.
- Added initial support for multiple sets of endpoints. This enables
- the Handspring Visor to be attached successfully. Only the first
- bulk in / bulk out endpoint pair is being used right now.
-
- version 0.3.0 (12/27/99) gkh
- Added initial support for the Handspring Visor based on a patch from
- Miles Lott (milos@sneety.insync.net)
- Cleaned up the code a bunch and converted over to using urbs only.
-
- version 0.2.3 (12/21/99) gkh
- Added initial support for the Connect Tech WhiteHEAT converter.
- Incremented the number of ports in expectation of getting the
- WhiteHEAT to work properly (4 ports per connection).
- Added notification on insertion and removal of what port the
- device is/was connected to (and what kind of device it was).
-
- version 0.2.2 (12/16/99) gkh
- Changed major number to the new allocated number. We're legal now!
-
- version 0.2.1 (12/14/99) gkh
- Fixed bug that happens when device node is opened when there isn't a
- device attached to it. Thanks to marek@webdesign.no for noticing this.
-
- version 0.2.0 (11/10/99) gkh
- Split up internals to make it easier to add different types of serial
- converters to the code.
- Added a "generic" driver that gets it's vendor and product id
- from when the module is loaded. Thanks to David E. Nelson (dnelson@jump.net)
- for the idea and sample code (from the usb scanner driver.)
- Cleared up any licensing questions by releasing it under the GNU GPL.
-
- version 0.1.2 (10/25/99) gkh
- Fixed bug in detecting device.
-
- version 0.1.1 (10/05/99) gkh
- Changed the major number to not conflict with anything else.
-
- version 0.1 (09/28/99) gkh
- Can recognize the two different devices and start up a read from
- device when asked to. Writes also work. No control signals yet, this
- all is vendor specific data (i.e. no spec), also no control for
- different baud rates or other bit settings.
- Currently we are using the same devid as the acm driver. This needs
- to change.
-
------------------------------------------------------------------------
-visor.c Change Log comments:
-
- (06/03/2003) Judd Montgomery <judd at jpilot.org>
- Added support for module parameter options for untested/unknown
- devices.
-
- (03/09/2003) gkh
- Added support for the Sony Clie NZ90V device. Thanks to Martin Brachtl
- <brachtl@redgrep.cz> for the information.
-
- (03/05/2003) gkh
- Think Treo support is now working.
-
- (04/03/2002) gkh
- Added support for the Sony OS 4.1 devices. Thanks to Hiroyuki ARAKI
- <hiro@zob.ne.jp> for the information.
-
- (03/27/2002) gkh
- Removed assumptions that port->tty was always valid (is not true
- for usb serial console devices.)
-
- (03/23/2002) gkh
- Added support for the Palm i705 device, thanks to Thomas Riemer
- <tom@netmech.com> for the information.
-
- (03/21/2002) gkh
- Added support for the Palm m130 device, thanks to Udo Eisenbarth
- <udo.eisenbarth@web.de> for the information.
-
- (02/27/2002) gkh
- Reworked the urb handling logic. We have no more pool, but dynamically
- allocate the urb and the transfer buffer on the fly. In testing this
- does not incure any measurable overhead. This also relies on the fact
- that we have proper reference counting logic for urbs.
-
- (02/21/2002) SilaS
- Added initial support for the Palm m515 devices.
-
- (02/14/2002) gkh
- Added support for the Clie S-360 device.
-
- (12/18/2001) gkh
- Added better Clie support for 3.5 devices. Thanks to Geoffrey Levand
- for the patch.
-
- (11/11/2001) gkh
- Added support for the m125 devices, and added check to prevent oopses
- for CliĂŠ devices that lie about the number of ports they have.
-
- (08/30/2001) gkh
- Added support for the Clie devices, both the 3.5 and 4.0 os versions.
- Many thanks to Daniel Burke, and Bryan Payne for helping with this.
-
- (08/23/2001) gkh
- fixed a few potential bugs pointed out by Oliver Neukum.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (05/28/2000) gkh
- Added initial support for the Palm m500 and Palm m505 devices.
-
- (04/08/2001) gb
- Identify version on module load.
-
- (01/21/2000) gkh
- Added write_room and chars_in_buffer, as they were previously using the
- generic driver versions which is all wrong now that we are using an urb
- pool. Thanks to Wolfgang Grandegger for pointing this out to me.
- Removed count assignment in the write function, which was not needed anymore
- either. Thanks to Al Borchers for pointing this out.
-
- (12/12/2000) gkh
- Moved MOD_DEC to end of visor_close to be nicer, as the final write
- message can sleep.
-
- (11/12/2000) gkh
- Fixed bug with data being dropped on the floor by forcing tty->low_latency
- to be on. Hopefully this fixes the OHCI issue!
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Got rid of always calling kmalloc for every urb we wrote out to the
- device.
- Added visor_read_callback so we can keep track of bytes in and out for
- those people who like to know the speed of their device.
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (09/06/2000) gkh
- Fixed oops in visor_exit. Need to uncomment usb_unlink_urb call _after_
- the host controller drivers set urb->dev = NULL when the urb is finished.
-
- (08/28/2000) gkh
- Added locks for SMP safeness.
-
- (08/08/2000) gkh
- Fixed endian problem in visor_startup.
- Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- than once.
-
- (07/23/2000) gkh
- Added pool of write urbs to speed up transfers to the visor.
-
- (07/19/2000) gkh
- Added module_init and module_exit functions to handle the fact that this
- driver is a loadable module now.
-
- (07/03/2000) gkh
- Added visor_set_ioctl and visor_set_termios functions (they don't do much
- of anything, but are good for debugging.)
-
- (06/25/2000) gkh
- Fixed bug in visor_unthrottle that should help with the disconnect in PPP
- bug that people have been reporting.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (04/27/2000) Ryan VanderBijl
- Fixed memory leak in visor_close
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
------------------------------------------------------------------------
-pl2303.c Change Log comments:
-
- 2002_Mar_26 gkh
- allowed driver to work properly if there is no tty assigned to a port
- (this happens for serial console devices.)
-
- 2001_Oct_06 gkh
- Added RTS and DTR line control. Thanks to joe@bndlg.de for parts of it.
-
- 2001_Sep_19 gkh
- Added break support.
-
- 2001_Aug_30 gkh
- fixed oops in write_bulk_callback.
-
- 2001_Aug_28 gkh
- reworked buffer logic to be like other usb-serial drivers. Hopefully
- removing some reported problems.
-
- 2001_Jun_06 gkh
- finished porting to 2.4 format.
-
-
------------------------------------------------------------------------
-io_edgeport.c Change Log comments:
-
- 2003_04_03 al borchers
- - fixed a bug (that shows up with dosemu) where the tty struct is
- used in a callback after it has been freed
-
- 2.3 2002_03_08 greg kroah-hartman
- - fixed bug when multiple devices were attached at the same time.
-
- 2.2 2001_11_14 greg kroah-hartman
- - fixed bug in edge_close that kept the port from being used more
- than once.
- - fixed memory leak on device removal.
- - fixed potential double free of memory when command urb submitting
- failed.
- - other small cleanups when the device is removed
-
- 2.1 2001_07_09 greg kroah-hartman
- - added support for TIOCMBIS and TIOCMBIC.
-
- (04/08/2001) gb
- - Identify version on module load.
-
- 2.0 2001_03_05 greg kroah-hartman
- - reworked entire driver to fit properly in with the other usb-serial
- drivers. Occasional oopses still happen, but it's a good start.
-
- 1.2.3 (02/23/2001) greg kroah-hartman
- - changed device table to work properly for 2.4.x final format.
- - fixed problem with dropping data at high data rates.
-
- 1.2.2 (11/27/2000) greg kroah-hartman
- - cleaned up more NTisms.
- - Added device table for 2.4.0-test11
-
- 1.2.1 (11/08/2000) greg kroah-hartman
- - Started to clean up NTisms.
- - Fixed problem with dev field of urb for kernels >= 2.4.0-test9
-
- 1.2 (10/17/2000) David Iacovelli
- Remove all EPIC code and GPL source
- Fix RELEVANT_IFLAG macro to include flow control
- changes port configuration changes.
- Fix redefinition of SERIAL_MAGIC
- Change all timeout values to 5 seconds
- Tried to fix the UHCI multiple urb submission, but failed miserably.
- it seems to work fine with OHCI.
- ( Greg take a look at the #if 0 at end of WriteCmdUsb() we must
- find a way to work arount this UHCI bug )
-
- 1.1 (10/11/2000) David Iacovelli
- Fix XON/XOFF flow control to support both IXON and IXOFF
-
- 0.9.27 (06/30/2000) David Iacovelli
- Added transmit queue and now allocate urb for command writes.
-
- 0.9.26 (06/29/2000) David Iacovelli
- Add support for 80251 based edgeport
-
- 0.9.25 (06/27/2000) David Iacovelli
- Do not close the port if it has multiple opens.
-
- 0.9.24 (05/26/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
- and first cut at running BlackBox Demo
-
- 0.9.23 (05/24/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
-
- 0.9.22 (05/23/2000) David Iacovelli
- fixed bug in enumeration. If epconfig turns on mapping by
- path after a device is already plugged in, we now update
- the mapping correctly
-
- 0.9.21 (05/16/2000) David Iacovelli
- Added BlockUntilChaseResp() to also wait for txcredits
- Updated the way we allocate and handle write URBs
- Add debug code to dump buffers
-
- 0.9.20 (05/01/2000) David Iacovelli
- change driver to use usb/tts/
-
- 0.9.19 (05/01/2000) David Iacovelli
- Update code to compile if DEBUG is off
-
- 0.9.18 (04/28/2000) David Iacovelli
- cleanup and test tty_register with devfs
-
- 0.9.17 (04/27/2000) greg kroah-hartman
- changed tty_register around to be like the way it
- was before, but now it works properly with devfs.
-
- 0.9.16 (04/26/2000) david iacovelli
- Fixed bug in GetProductInfo()
-
- 0.9.15 (04/25/2000) david iacovelli
- Updated enumeration
-
- 0.9.14 (04/24/2000) david iacovelli
- Removed all config/status IOCTLS and
- converted to using /proc/edgeport
- still playing with devfs
-
- 0.9.13 (04/24/2000) david iacovelli
- Removed configuration based on ttyUSB0
- Added support for configuration using /prod/edgeport
- first attempt at using devfs (not working yet!)
- Added IOCTL to GetProductInfo()
- Added support for custom baud rates
- Add support for random port numbers
-
- 0.9.12 (04/18/2000) david iacovelli
- added additional configuration IOCTLs
- use ttyUSB0 for configuration
-
- 0.9.11 (04/17/2000) greg kroah-hartman
- fixed module initialization race conditions.
- made all urbs dynamically allocated.
- made driver devfs compatible. now it only registers the tty device
- when the device is actually plugged in.
-
- 0.9.10 (04/13/2000) greg kroah-hartman
- added proc interface framework.
-
- 0.9.9 (04/13/2000) david iacovelli
- added enumeration code and ioctls to configure the device
-
- 0.9.8 (04/12/2000) david iacovelli
- Change interrupt read start when device is plugged in
- and stop when device is removed
- process interrupt reads when all ports are closed
- (keep value of rxBytesAvail consistent with the edgeport)
- set the USB_BULK_QUEUE flag so that we can shove a bunch
- of urbs at once down the pipe
-
- 0.9.7 (04/10/2000) david iacovelli
- start to add enumeration code.
- generate serial number for epic devices
- add support for kdb
-
- 0.9.6 (03/30/2000) david iacovelli
- add IOCTL to get string, manufacture, and boot descriptors
-
- 0.9.5 (03/14/2000) greg kroah-hartman
- more error checking added to SerialOpen to try to fix UHCI open problem
-
- 0.9.4 (03/09/2000) greg kroah-hartman
- added more error checking to handle oops when data is hanging
- around and tty is abruptly closed.
-
- 0.9.3 (03/09/2000) david iacovelli
- Add epic support for xon/xoff chars
- play with performance
-
- 0.9.2 (03/08/2000) greg kroah-hartman
- changed most "info" calls to "dbg"
- implemented flow control properly in the termios call
-
- 0.9.1 (03/08/2000) david iacovelli
- added EPIC support
- enabled bootloader update
-
- 0.9 (03/08/2000) greg kroah-hartman
- Release to IO networks.
- Integrated changes that David made
- made getting urbs for writing SMP safe
-
- 0.8 (03/07/2000) greg kroah-hartman
- Release to IO networks.
- Fixed problems that were seen in code by David.
- Now both Edgeport/4 and Edgeport/2 works properly.
- Changed most of the functions to use port instead of serial.
-
- 0.7 (02/27/2000) greg kroah-hartman
- Milestone 3 release.
- Release to IO Networks
- ioctl for waiting on line change implemented.
- ioctl for getting statistics implemented.
- multiport support working.
- lsr and msr registers are now handled properly.
- change break now hooked up and working.
- support for all known Edgeport devices.
-
- 0.6 (02/22/2000) greg kroah-hartman
- Release to IO networks.
- CHASE is implemented correctly when port is closed.
- SerialOpen now blocks correctly until port is fully opened.
-
- 0.5 (02/20/2000) greg kroah-hartman
- Release to IO networks.
- Known problems:
- modem status register changes are not sent on to the user
- CHASE is not implemented when the port is closed.
-
- 0.4 (02/16/2000) greg kroah-hartman
- Second cut at the CeBit demo.
- Doesn't leak memory on every write to the port
- Still small leaks on startup.
- Added support for Edgeport/2 and Edgeport/8
-
- 0.3 (02/15/2000) greg kroah-hartman
- CeBit demo release.
- Force the line settings to 4800, 8, 1, e for the demo.
- Warning! This version leaks memory like crazy!
-
- 0.2 (01/30/2000) greg kroah-hartman
- Milestone 1 release.
- Device is found by USB subsystem, enumerated, firmware is downloaded
- and the descriptors are printed to the debug log, config is set, and
- green light starts to blink. Open port works, and data can be sent
- and received at the default settings of the UART. Loopback connector
- and debug log confirms this.
-
- 0.1 (01/23/2000) greg kroah-hartman
- Initial release to help IO Networks try to set up their test system.
- Edgeport4 is recognized, firmware is downloaded, config is set so
- device blinks green light every 3 sec. Port is bound, but opening,
- closing, and sending data do not work properly.
-
-
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index d6921fa1403..f9f29b289f2 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -20,50 +20,7 @@
* TODO:
* -- Add true modem contol line query capability. Currently we track the
* states reported by the interrupt and the states we request.
- * -- Add error reporting back to application for UART error conditions.
- * Just point me at how to implement this and I'll do it. I've put the
- * framework in, but haven't analyzed the "tty_flip" interface yet.
* -- Add support for flush commands
- * -- Add everything that is missing :)
- *
- * 27-Nov-2001 gkh
- * compressed all the differnent device entries into 1.
- *
- * 30-May-2001 gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 12-Mar-2001 gkh
- * - Added support for the GoHubs GO-COM232 device which is the same as the
- * Peracom device.
- *
- * 06-Nov-2000 gkh
- * - Added support for the old Belkin and Peracom devices.
- * - Made the port able to be opened multiple times.
- * - Added some defaults incase the line settings are things these devices
- * can't support.
- *
- * 18-Oct-2000 William Greathouse
- * Released into the wild (linux-usb-devel)
- *
- * 17-Oct-2000 William Greathouse
- * Add code to recognize firmware version and set hardware flow control
- * appropriately. Belkin states that firmware prior to 3.05 does not
- * operate correctly in hardware handshake mode. I have verified this
- * on firmware 2.05 -- for both RTS and DTR input flow control, the control
- * line is not reset. The test performed by the Belkin Win* driver is
- * to enable hardware flow control for firmware 2.06 or greater and
- * for 1.00 or prior. I am only enabling for 2.06 or greater.
- *
- * 12-Oct-2000 William Greathouse
- * First cut at supporting Belkin USB Serial Adapter F5U103
- * I did not have a copy of the original work to support this
- * adapter, so pardon any stupid mistakes. All of the information
- * I am using to write this driver was acquired by using a modified
- * UsbSnoop on Windows2000 and from examining the other USB drivers.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 6ae1c0688b5..0e77511060c 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -335,13 +335,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
goto out;
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
ch341_close(port);
- return -EPROTO;
+ goto out;
}
r = usb_serial_generic_open(tty, port);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fd67cc53545..adfe660ed00 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
@@ -280,7 +281,10 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send config request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -331,7 +335,10 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -395,10 +402,11 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
- if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
- dev_err(&port->dev, "%s - Unable to enable UART\n",
- __func__);
- return -EPROTO;
+ result = cp210x_set_config_single(port, CP210X_IFC_ENABLE,
+ UART_ENABLE);
+ if (result) {
+ dev_err(&port->dev, "%s - Unable to enable UART\n", __func__);
+ return result;
}
result = usb_serial_generic_open(tty, port);
@@ -520,18 +528,13 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
cflag |= PARENB;
break;
case BITS_PARITY_MARK:
- dbg("%s - parity = MARK (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = MARK", __func__);
+ cflag |= (PARENB|PARODD|CMSPAR);
break;
case BITS_PARITY_SPACE:
- dbg("%s - parity = SPACE (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = SPACE", __func__);
+ cflag &= ~PARODD;
+ cflag |= (PARENB|CMSPAR);
break;
default:
dbg("%s - Unknown parity mode, disabling parity", __func__);
@@ -588,7 +591,6 @@ static void cp210x_set_termios(struct tty_struct *tty,
if (!tty)
return;
- tty->termios->c_cflag &= ~CMSPAR;
cflag = tty->termios->c_cflag;
old_cflag = old_termios->c_cflag;
baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
@@ -643,16 +645,27 @@ static void cp210x_set_termios(struct tty_struct *tty,
"not supported by device\n");
}
- if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
+ if ((cflag & (PARENB|PARODD|CMSPAR)) !=
+ (old_cflag & (PARENB|PARODD|CMSPAR))) {
cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_PARITY_MASK;
if (cflag & PARENB) {
- if (cflag & PARODD) {
- bits |= BITS_PARITY_ODD;
- dbg("%s - parity = ODD", __func__);
+ if (cflag & CMSPAR) {
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_MARK;
+ dbg("%s - parity = MARK", __func__);
+ } else {
+ bits |= BITS_PARITY_SPACE;
+ dbg("%s - parity = SPACE", __func__);
+ }
} else {
- bits |= BITS_PARITY_EVEN;
- dbg("%s - parity = EVEN", __func__);
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_ODD;
+ dbg("%s - parity = ODD", __func__);
+ } else {
+ bits |= BITS_PARITY_EVEN;
+ dbg("%s - parity = EVEN", __func__);
+ }
}
}
if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index f744ab7a3b1..98bf8334983 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -138,7 +138,6 @@ static int cyberjack_startup(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
int result;
- serial->port[i]->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(serial->port[i]->interrupt_in_urb,
GFP_KERNEL);
if (result)
@@ -208,7 +207,6 @@ static void cyberjack_close(struct usb_serial_port *port)
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
@@ -221,22 +219,18 @@ static int cyberjack_write(struct tty_struct *tty,
return 0;
}
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
spin_lock_irqsave(&priv->lock, flags);
if (count+priv->wrfilled > sizeof(priv->wrbuf)) {
/* To much data for buffer. Reset buffer. */
priv->wrfilled = 0;
- port->write_urb_busy = 0;
spin_unlock_irqrestore(&priv->lock, flags);
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -265,13 +259,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrsent = length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -283,7 +271,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrfilled = 0;
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -351,7 +339,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
spin_unlock(&priv->lock);
if (!old_rdtodo) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting "
@@ -362,7 +349,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
}
resubmit:
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
@@ -415,7 +401,6 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
/* Continue to read if we have still urbs to do. */
if (todo /* || (urb->actual_length==port->bulk_in_endpointAddress)*/) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting read "
@@ -432,7 +417,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
@@ -455,13 +440,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
priv->wrsent += length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((port->serial->type->write_bulk_callback) ?
- port->serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index d9906eb9d16..07680d6b792 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -16,32 +16,6 @@
*
* See http://geocities.com/i0xox0i for information on this driver and the
* earthmate usb device.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 4-29-2005
- * Fixed problem where setting or retreiving the serial config would fail
- * with EPIPE. Removed CRTS toggling so the driver behaves more like
- * other usbserial adapters. Issued new interval of 1ms instead of the
- * default 10ms. As a result, transfer speed has been substantially
- * increased from avg. 850bps to avg. 3300bps. initial termios has also
- * been modified. Cleaned up code and formatting issues so it is more
- * readable. Replaced the C++ style comments.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 12-15-2004
- * Incorporated write buffering from pl2303 driver. Fixed bug with line
- * handling so both lines are raised in cypress_open. (was dropping rts)
- * Various code cleanups made as well along with other misc bug fixes.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 04-10-2004
- * Driver modified to support dynamic line settings. Various improvements
- * and features.
- *
- * Neil Whelchel
- * 10-2003
- * Driver first released.
- *
*/
/* Thanks to Neil Whelchel for writing the first cypress m8 implementation
@@ -1162,8 +1136,6 @@ static void cypress_unthrottle(struct tty_struct *tty)
return;
if (actually_throttled) {
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb, "
@@ -1352,7 +1324,6 @@ static void cypress_write_int_callback(struct urb *urb)
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
port->interrupt_out_urb->transfer_buffer_length = 1;
- port->interrupt_out_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (!result)
return;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e92cbefc0f8..6d26a77d0f2 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -13,222 +13,6 @@
*
* Peter Berger (pberger@brimson.com)
* Al Borchers (borchers@steinerpoint.com)
-*
-* (12/03/2001) gkh
-* switched to using port->port.count instead of private version.
-* Removed port->active
-*
-* (04/08/2001) gb
-* Identify version on module load.
-*
-* (11/01/2000) Adam J. Richter
-* usb_device_id table support
-*
-* (11/01/2000) pberger and borchers
-* -- Turned off the USB_DISABLE_SPD flag for write bulk urbs--it caused
-* USB 4 ports to hang on startup.
-* -- Serialized access to write urbs by adding the dp_write_urb_in_use
-* flag; otherwise, the driver caused SMP system hangs. Watching the
-* urb status is not sufficient.
-*
-* (10/05/2000) gkh
-* -- Fixed bug with urb->dev not being set properly, now that the usb
-* core needs it.
-*
-* (8/8/2000) pberger and borchers
-* -- Fixed close so that
-* - it can timeout while waiting for transmit idle, if needed;
-* - it ignores interrupts when flushing the port, turning
-* of modem signalling, and so on;
-* - it waits for the flush to really complete before returning.
-* -- Read_bulk_callback and write_bulk_callback check for a closed
-* port before using the tty struct or writing to the port.
-* -- The two changes above fix the oops caused by interrupted closes.
-* -- Added interruptible args to write_oob_command and set_modem_signals
-* and added a timeout arg to transmit_idle; needed for fixes to
-* close.
-* -- Added code for rx_throttle and rx_unthrottle so that input flow
-* control works.
-* -- Added code to set overrun, parity, framing, and break errors
-* (untested).
-* -- Set USB_DISABLE_SPD flag for write bulk urbs, so no 0 length
-* bulk writes are done. These hung the Digi USB device. The
-* 0 length bulk writes were a new feature of usb-uhci added in
-* the 2.4.0-test6 kernels.
-* -- Fixed mod inc race in open; do mod inc before sleeping to wait
-* for a close to finish.
-*
-* (7/31/2000) pberger
-* -- Fixed bugs with hardware handshaking:
-* - Added code to set/clear tty->hw_stopped in digi_read_oob_callback()
-* and digi_set_termios()
-* -- Added code in digi_set_termios() to
-* - add conditional in code handling transition from B0 to only
-* set RTS if RTS/CTS flow control is either not in use or if
-* the port is not currently throttled.
-* - handle turning off CRTSCTS.
-*
-* (7/30/2000) borchers
-* -- Added support for more than one Digi USB device by moving
-* globals to a private structure in the pointed to from the
-* usb_serial structure.
-* -- Moved the modem change and transmit idle wait queues into
-* the port private structure, so each port has its own queue
-* rather than sharing global queues.
-* -- Added support for break signals.
-*
-* (7/25/2000) pberger
-* -- Added USB-2 support. Note: the USB-2 supports 3 devices: two
-* serial and a parallel port. The parallel port is implemented
-* as a serial-to-parallel converter. That is, the driver actually
-* presents all three USB-2 interfaces as serial ports, but the third
-* one physically connects to a parallel device. Thus, for example,
-* one could plug a parallel printer into the USB-2's third port,
-* but from the kernel's (and userland's) point of view what's
-* actually out there is a serial device.
-*
-* (7/15/2000) borchers
-* -- Fixed race in open when a close is in progress.
-* -- Keep count of opens and dec the module use count for each
-* outstanding open when shutdown is called (on disconnect).
-* -- Fixed sanity checks in read_bulk_callback and write_bulk_callback
-* so pointers are checked before use.
-* -- Split read bulk callback into in band and out of band
-* callbacks, and no longer restart read chains if there is
-* a status error or a sanity error. This fixed the seg
-* faults and other errors we used to get on disconnect.
-* -- Port->active is once again a flag as usb-serial intended it
-* to be, not a count. Since it was only a char it would
-* have been limited to 256 simultaneous opens. Now the open
-* count is kept in the port private structure in dp_open_count.
-* -- Added code for modularization of the digi_acceleport driver.
-*
-* (6/27/2000) pberger and borchers
-* -- Zeroed out sync field in the wakeup_task before first use;
-* otherwise the uninitialized value might prevent the task from
-* being scheduled.
-* -- Initialized ret value to 0 in write_bulk_callback, otherwise
-* the uninitialized value could cause a spurious debugging message.
-*
-* (6/22/2000) pberger and borchers
-* -- Made cond_wait_... inline--apparently on SPARC the flags arg
-* to spin_lock_irqsave cannot be passed to another function
-* to call spin_unlock_irqrestore. Thanks to Pauline Middelink.
-* -- In digi_set_modem_signals the inner nested spin locks use just
-* spin_lock() rather than spin_lock_irqsave(). The old code
-* mistakenly left interrupts off. Thanks to Pauline Middelink.
-* -- copy_from_user (which can sleep) is no longer called while a
-* spinlock is held. We copy to a local buffer before getting
-* the spinlock--don't like the extra copy but the code is simpler.
-* -- Printk and dbg are no longer called while a spin lock is held.
-*
-* (6/4/2000) pberger and borchers
-* -- Replaced separate calls to spin_unlock_irqrestore and
-* interruptible_sleep_on_timeout with a new function
-* cond_wait_interruptible_timeout_irqrestore. This eliminates
-* the race condition where the wake up could happen after
-* the unlock and before the sleep.
-* -- Close now waits for output to drain.
-* -- Open waits until any close in progress is finished.
-* -- All out of band responses are now processed, not just the
-* first in a USB packet.
-* -- Fixed a bug that prevented the driver from working when the
-* first Digi port was not the first USB serial port--the driver
-* was mistakenly using the external USB serial port number to
-* try to index into its internal ports.
-* -- Fixed an SMP bug -- write_bulk_callback is called directly from
-* an interrupt, so spin_lock_irqsave/spin_unlock_irqrestore are
-* needed for locks outside write_bulk_callback that are also
-* acquired by write_bulk_callback to prevent deadlocks.
-* -- Fixed support for select() by making digi_chars_in_buffer()
-* return 256 when -EINPROGRESS is set, as the line discipline
-* code in n_tty.c expects.
-* -- Fixed an include file ordering problem that prevented debugging
-* messages from working.
-* -- Fixed an intermittent timeout problem that caused writes to
-* sometimes get stuck on some machines on some kernels. It turns
-* out in these circumstances write_chan() (in n_tty.c) was
-* asleep waiting for our wakeup call. Even though we call
-* wake_up_interruptible() in digi_write_bulk_callback(), there is
-* a race condition that could cause the wakeup to fail: if our
-* wake_up_interruptible() call occurs between the time that our
-* driver write routine finishes and write_chan() sets current->state
-* to TASK_INTERRUPTIBLE, the effect of our wakeup setting the state
-* to TASK_RUNNING will be lost and write_chan's subsequent call to
-* schedule() will never return (unless it catches a signal).
-* This race condition occurs because write_bulk_callback() (and thus
-* the wakeup) are called asynchronously from an interrupt, rather than
-* from the scheduler. We can avoid the race by calling the wakeup
-* from the scheduler queue and that's our fix: Now, at the end of
-* write_bulk_callback() we queue up a wakeup call on the scheduler
-* task queue. We still also invoke the wakeup directly since that
-* squeezes a bit more performance out of the driver, and any lost
-* race conditions will get cleaned up at the next scheduler run.
-*
-* NOTE: The problem also goes away if you comment out
-* the two code lines in write_chan() where current->state
-* is set to TASK_RUNNING just before calling driver.write() and to
-* TASK_INTERRUPTIBLE immediately afterwards. This is why the
-* problem did not show up with the 2.2 kernels -- they do not
-* include that code.
-*
-* (5/16/2000) pberger and borchers
-* -- Added timeouts to sleeps, to defend against lost wake ups.
-* -- Handle transition to/from B0 baud rate in digi_set_termios.
-*
-* (5/13/2000) pberger and borchers
-* -- All commands now sent on out of band port, using
-* digi_write_oob_command.
-* -- Get modem control signals whenever they change, support TIOCMGET/
-* SET/BIS/BIC ioctls.
-* -- digi_set_termios now supports parity, word size, stop bits, and
-* receive enable.
-* -- Cleaned up open and close, use digi_set_termios and
-* digi_write_oob_command to set port parameters.
-* -- Added digi_startup_device to start read chains on all ports.
-* -- Write buffer is only used when count==1, to be sure put_char can
-* write a char (unless the buffer is full).
-*
-* (5/10/2000) pberger and borchers
-* -- Added MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT calls on open/close.
-* -- Fixed problem where the first incoming character is lost on
-* port opens after the first close on that port. Now we keep
-* the read_urb chain open until shutdown.
-* -- Added more port conditioning calls in digi_open and digi_close.
-* -- Convert port->active to a use count so that we can deal with multiple
-* opens and closes properly.
-* -- Fixed some problems with the locking code.
-*
-* (5/3/2000) pberger and borchers
-* -- First alpha version of the driver--many known limitations and bugs.
-*
-*
-* Locking and SMP
-*
-* - Each port, including the out-of-band port, has a lock used to
-* serialize all access to the port's private structure.
-* - The port lock is also used to serialize all writes and access to
-* the port's URB.
-* - The port lock is also used for the port write_wait condition
-* variable. Holding the port lock will prevent a wake up on the
-* port's write_wait; this can be used with cond_wait_... to be sure
-* the wake up is not lost in a race when dropping the lock and
-* sleeping waiting for the wakeup.
-* - digi_write() does not sleep, since it is sometimes called on
-* interrupt time.
-* - digi_write_bulk_callback() and digi_read_bulk_callback() are
-* called directly from interrupts. Hence spin_lock_irqsave()
-* and spin_unlock_irqrestore() are used in the rest of the code
-* for any locks they acquire.
-* - digi_write_bulk_callback() gets the port lock before waking up
-* processes sleeping on the port write_wait. It also schedules
-* wake ups so they happen from the scheduler, because the tty
-* system can miss wake ups from interrupts.
-* - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to
-* recheck the condition they are sleeping on. This is defensive,
-* in case a wake up is lost.
-* - Following Documentation/DocBook/kernel-locking.tmpl no spin locks
-* are held when calling copy_to/from_user or printk.
*/
#include <linux/kernel.h>
@@ -654,7 +438,6 @@ static int digi_write_oob_command(struct usb_serial_port *port,
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
@@ -732,7 +515,6 @@ static int digi_write_inb_command(struct usb_serial_port *port,
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -803,7 +585,6 @@ static int digi_set_modem_signals(struct usb_serial_port *port,
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -899,10 +680,8 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
- if (priv->dp_throttle_restart) {
- port->read_urb->dev = port->serial->dev;
+ if (priv->dp_throttle_restart)
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- }
/* turn throttle off */
priv->dp_throttled = 0;
@@ -1195,7 +974,6 @@ static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
}
port->write_urb->transfer_buffer_length = data_len+2;
- port->write_urb->dev = port->serial->dev;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
@@ -1271,7 +1049,6 @@ static void digi_write_bulk_callback(struct urb *urb)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
- port->write_urb->dev = serial->dev;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -1473,7 +1250,6 @@ static int digi_startup_device(struct usb_serial *serial)
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
@@ -1616,7 +1392,6 @@ static void digi_read_bulk_callback(struct urb *urb)
}
/* continue read */
- urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bd4298bb675..c290df97108 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,6 +2105,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
cflag = termios->c_cflag;
+ if (old_termios == 0)
+ goto no_skip;
+
if (old_termios->c_cflag == termios->c_cflag
&& old_termios->c_ispeed == termios->c_ispeed
&& old_termios->c_ospeed == termios->c_ospeed)
@@ -2117,6 +2121,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
(termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
goto no_data_parity_stop_changes;
+no_skip:
/* Set number of data bits, parity, stop bits */
urb_value = 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c..df1d7da933e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -2,7 +2,7 @@
* vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
* Please keep numerically sorted within individual areas, thanks!
*
- * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
+ * Philipp GĂźhring - pg@futureware.at - added the Device ID of the USB relais
* from Rudolf Gugler
*
*/
@@ -78,7 +78,7 @@
*/
#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
-/* www.starting-point-systems.com ľChameleon device */
+/* www.starting-point-systems.com ÂľChameleon device */
#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
/*
@@ -112,6 +112,7 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
@@ -290,7 +291,7 @@
/*
* Teratronik product ids.
- * Submitted by O. Wölfelschneider.
+ * Submitted by O. WĂślfelschneider.
*/
#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 1a49ca9c8ea..bf12565f8e8 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -901,7 +901,6 @@ static int garmin_init_session(struct usb_serial_port *port)
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - adding interrupt input", __func__);
- port->interrupt_in_urb->dev = serial->dev;
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status)
dev_err(&serial->dev->dev,
@@ -1277,7 +1276,6 @@ static void garmin_read_int_callback(struct urb *urb)
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
- struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -1311,12 +1309,6 @@ static void garmin_read_int_callback(struct urb *urb)
if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
/* bulk data available */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- garmin_read_bulk_callback, port);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
@@ -1353,7 +1345,6 @@ static void garmin_read_int_callback(struct urb *urb)
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e4db5ad2bc5..f7403576f99 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,7 +1,7 @@
/*
* USB Serial Converter Generic functions
*
- * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
+ * Copyright (C) 2010 - 2011 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
@@ -132,7 +132,7 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
/* if we have a bulk endpoint, start reading from it */
if (port->bulk_in_size)
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
return result;
}
@@ -157,8 +157,10 @@ static void generic_cleanup(struct usb_serial_port *port)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
}
- if (port->bulk_in_size)
- usb_kill_urb(port->read_urb);
+ if (port->bulk_in_size) {
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+ }
}
}
@@ -308,19 +310,52 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
return chars;
}
-int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+ int index, gfp_t mem_flags)
+{
+ int res;
+
+ if (!test_and_clear_bit(index, &port->read_urbs_free))
+ return 0;
+
+ dbg("%s - port %d, urb %d\n", __func__, port->number, index);
+
+ res = usb_submit_urb(port->read_urbs[index], mem_flags);
+ if (res) {
+ if (res != -EPERM) {
+ dev_err(&port->dev,
+ "%s - usb_submit_urb failed: %d\n",
+ __func__, res);
+ }
+ set_bit(index, &port->read_urbs_free);
+ return res;
+ }
+
+ return 0;
+}
+
+int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags)
{
- int result;
+ int res;
+ int i;
- result = usb_submit_urb(port->read_urb, mem_flags);
- if (result && result != -EPERM) {
- dev_err(&port->dev, "%s - error submitting urb: %d\n",
- __func__, result);
+ dbg("%s - port %d", __func__, port->number);
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ res = usb_serial_generic_submit_read_urb(port, i, mem_flags);
+ if (res)
+ goto err;
}
- return result;
+
+ return 0;
+err:
+ for (; i >= 0; --i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ return res;
}
-EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb);
+EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
@@ -356,14 +391,19 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
unsigned long flags;
+ int i;
- dbg("%s - port %d", __func__, port->number);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ if (urb == port->read_urbs[i])
+ break;
+ }
+ set_bit(i, &port->read_urbs_free);
- if (unlikely(status != 0)) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
+ dbg("%s - port %d, urb %d, len %d\n", __func__, port->number, i,
+ urb->actual_length);
+ if (urb->status) {
+ dbg("%s - non-zero urb status: %d\n", __func__, urb->status);
return;
}
@@ -376,7 +416,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
+ usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
} else
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -443,7 +483,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
spin_unlock_irq(&port->lock);
if (was_throttled)
- usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
@@ -509,8 +549,9 @@ int usb_serial_generic_resume(struct usb_serial *serial)
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
- if (port->read_urb) {
- r = usb_submit_urb(port->read_urb, GFP_NOIO);
+ if (port->bulk_in_size) {
+ r = usb_serial_generic_submit_read_urbs(port,
+ GFP_NOIO);
if (r < 0)
c++;
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 2ee807523f5..abd2ee2b2f9 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -610,7 +610,6 @@ static void edge_interrupt_callback(struct urb *urb)
/* we have pending bytes on the
bulk in pipe, send a request */
- edge_serial->read_urb->dev = edge_serial->serial->dev;
result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (result) {
dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __func__, result);
@@ -711,7 +710,6 @@ static void edge_bulk_in_callback(struct urb *urb)
/* check to see if there's any more data for us to read */
if (edge_serial->rxBytesAvail > 0) {
dbg("%s - posting a read", __func__);
- edge_serial->read_urb->dev = edge_serial->serial->dev;
retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&urb->dev->dev,
@@ -1330,7 +1328,6 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
edge_port->txCredits -= count;
edge_port->icount.tx += count;
- urb->dev = edge_serial->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
/* something went wrong */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0aac00afb5c..e44d375edaa 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -15,13 +15,6 @@
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <pberger@brimson.com>,
* or Al Borchers <alborchers@steinerpoint.com>.
- *
- * Version history:
- *
- * July 11, 2002 Removed 4 port device structure since all TI UMP
- * chips have only 2 ports
- * David Iacovelli (davidi@ionetworks.com)
- *
*/
#include <linux/kernel.h>
@@ -1777,12 +1770,11 @@ static void edge_bulk_in_callback(struct urb *urb)
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
- if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) {
- urb->dev = edge_port->port->serial->dev;
+ if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) {
+ else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
- }
+
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(&urb->dev->dev,
@@ -1959,9 +1951,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_es_lock;
}
- urb->complete = edge_interrupt_callback;
urb->context = edge_serial;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -1987,9 +1977,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
- urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -2118,12 +2106,7 @@ static void edge_send(struct tty_struct *tty)
port->write_urb->transfer_buffer);
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- edge_bulk_out_callback,
- port);
+ port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -2267,9 +2250,6 @@ static int restart_read(struct edgeport_port *edge_port)
if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) {
urb = edge_port->port->read_urb;
- urb->complete = edge_bulk_in_callback;
- urb->context = edge_port;
- urb->dev = edge_port->port->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 4735931b4c7..36f5cbe9048 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -8,40 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * (12/12/2002) ganesh
- * Added support for practically all devices supported by ActiveSync
- * on Windows. Thanks to Wes Cilldhaire <billybobjoehenrybob@hotmail.com>.
- *
- * (26/11/2002) ganesh
- * Added insmod options to specify product and vendor id.
- * Use modprobe ipaq vendor=0xfoo product=0xbar
- *
- * (26/7/2002) ganesh
- * Fixed up broken error handling in ipaq_open. Retry the "kickstart"
- * packet much harder - this drastically reduces connection failures.
- *
- * (30/4/2002) ganesh
- * Added support for the Casio EM500. Completely untested. Thanks
- * to info from Nathan <wfilardo@fuse.net>
- *
- * (19/3/2002) ganesh
- * Don't submit urbs while holding spinlocks. Not strictly necessary
- * in 2.5.x.
- *
- * (8/3/2002) ganesh
- * The ipaq sometimes emits a '\0' before the CLIENT string. At this
- * point of time, the ppp ldisc is not yet attached to the tty, so
- * n_tty echoes "^ " to the ipaq, which messes up the chat. In 2.5.6-pre2
- * this causes a panic because echo_char() tries to sleep in interrupt
- * context.
- * The fix is to tell the upper layers that this is a raw device so that
- * echoing is suppressed. Thanks to Lyle Lindholm for a detailed bug
- * report.
- *
- * (25/2/2002) ganesh
- * Added support for the HP Jornada 548 and 568. Completely untested.
- * Thanks to info from Heath Robinson and Arieh Davidoff.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index ccbce4066d0..0c537da0d3c 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -22,38 +22,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * 2008_Jun_02 Felipe Balbi <me@felipebalbi.com>
- * Introduced common header to be used also in USB Gadget Framework.
- * Still needs some other style fixes.
- *
- * 2007_Jun_21 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Minimal cleanups for some of the driver problens and tty layer abuse.
- * Still needs fixing to allow multiple dongles.
- *
- * 2002_Mar_07 greg kh
- * moved some needed structures and #define values from the
- * net/irda/irda-usb.h file into our file, as we don't want to depend on
- * that codebase compiling correctly :)
- *
- * 2002_Jan_14 gb
- * Added module parameter to force specific number of XBOFs.
- * Added ir_xbof_change().
- * Reorganized read_bulk_callback error handling.
- * Switched from FILL_BULK_URB() to usb_fill_bulk_urb().
- *
- * 2001_Nov_08 greg kh
- * Changed the irda_usb_find_class_desc() function based on comments and
- * code from Martin Diehl.
- *
- * 2001_Nov_01 greg kh
- * Added support for more IrDA USB devices.
- * Added support for zero packet. Added buffer override paramater, so
- * users can transfer larger packets at once if they wish. Both patches
- * came from Dag Brattli <dag@obexcode.com>.
- *
- * 2001_Oct_07 greg kh
- * initial version released.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 6aca631a407..64d0ffd4440 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1168,15 +1168,14 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_KERNEL);
-
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
iuu_close(port);
- return -EPROTO;
} else {
dbg("%s - rxcmd OK", __func__);
}
+
return result;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index a442352d7b6..bc8dc203e81 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -25,73 +25,6 @@
Tip 'o the hat to IBM (and previously Linuxcare :) for supporting
staff in their work on open source projects.
-
- Change History
-
- 2003sep04 LPM (Keyspan) add support for new single port product USA19HS.
- Improve setup message handling for all devices.
-
- Wed Feb 19 22:00:00 PST 2003 (Jeffrey S. Laing <keyspan@jsl.com>)
- Merged the current (1/31/03) Keyspan code with the current (2.4.21-pre4)
- Linux source tree. The Linux tree lacked support for the 49WLC and
- others. The Keyspan patches didn't work with the current kernel.
-
- 2003jan30 LPM add support for the 49WLC and MPR
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Mon Oct 8 14:29:00 EST 2001 hugh
- Fixed bug that prevented mulitport devices operating correctly
- if they weren't the first unit attached.
-
- Sat Oct 6 12:31:21 EST 2001 hugh
- Added support for USA-28XA and -28XB, misc cleanups, break support
- for usa26 based models thanks to David Gibson.
-
- Thu May 31 11:56:42 PDT 2001 gkh
- switched from using spinlock to a semaphore
-
- (04/08/2001) gb
- Identify version on module load.
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support.
-
- Tue Oct 10 23:15:33 EST 2000 Hugh
- Merged Paul's changes with my USA-49W mods. Work in progress
- still...
-
- Wed Jul 19 14:00:42 EST 2000 gkh
- Added module_init and module_exit functions to handle the fact that
- this driver is a loadable module now.
-
- Tue Jul 18 16:14:52 EST 2000 Hugh
- Basic character input/output for USA-19 now mostly works,
- fixed at 9600 baud for the moment.
-
- Sat Jul 8 11:11:48 EST 2000 Hugh
- First public release - nothing works except the firmware upload.
- Tested on PPC and x86 architectures, seems to behave...
*/
@@ -397,7 +330,6 @@ static int keyspan_write(struct tty_struct *tty,
/* send the data out the bulk port */
this_urb->transfer_buffer_length = todo + dataOffset;
- this_urb->dev = port->serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("usb_submit_urb(write bulk) failed (%d)", err);
@@ -463,7 +395,6 @@ static void usa26_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -559,7 +490,6 @@ static void usa26_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -609,7 +539,6 @@ static void usa28_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)",
@@ -694,7 +623,6 @@ static void usa28_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -789,8 +717,6 @@ static void usa49_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -848,7 +774,6 @@ static void usa49_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -919,8 +844,6 @@ static void usa49wg_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -996,7 +919,6 @@ static void usa90_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1047,7 +969,6 @@ static void usa90_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1123,7 +1044,6 @@ static void usa67_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1223,7 +1143,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->in_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* make sure endpoint data toggle is synchronized
with the device */
@@ -1239,7 +1158,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->out_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 0); */
}
@@ -1956,7 +1874,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2084,7 +2001,6 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed", __func__);
@@ -2271,8 +2187,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
-
- this_urb->dev = serial->dev;
}
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2415,7 +2329,6 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2561,7 +2474,6 @@ static int keyspan_usa67_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2650,14 +2562,12 @@ static int keyspan_startup(struct usb_serial *serial)
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
- s_priv->instat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit instat urb failed %d", __func__,
err);
}
if (s_priv->indat_urb != NULL) {
- s_priv->indat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit indat urb failed %d", __func__,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index d5c0c6ab496..a40615674a6 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -12,59 +12,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (09/07/2001) gkh
- * cleaned up the Xircom support. Added ids for Entregra device which is
- * the same as the Xircom device. Enabled the code to be compiled for
- * either Xircom or Keyspan devices.
- *
- * (08/11/2001) Cristian M. Craciunescu
- * support for Xircom PGSDB9
- *
- * (05/31/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- *
- * (07/20/2000) borchers
- * - keyspan_pda_write no longer sleeps if it is called on interrupt time;
- * PPP and the line discipline with stty echo on can call write on
- * interrupt time and this would cause an oops if write slept
- * - if keyspan_pda_write is in an interrupt, it will not call
- * usb_control_msg (which sleeps) to query the room in the device
- * buffer, it simply uses the current room value it has
- * - if the urb is busy or if it is throttled keyspan_pda_write just
- * returns 0, rather than sleeping to wait for this to change; the
- * write_chan code in n_tty.c will sleep if needed before calling
- * keyspan_pda_write again
- * - if the device needs to be unthrottled, write now queues up the
- * call to usb_control_msg (which sleeps) to unthrottle the device
- * - the wakeups from keyspan_pda_write_bulk_callback are queued rather
- * than done directly from the callback to avoid the race in write_chan
- * - keyspan_pda_chars_in_buffer also indicates its buffer is full if the
- * urb status is -EINPROGRESS, meaning it cannot write at the moment
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
@@ -290,7 +237,6 @@ static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
/* just restart the receive interrupt URB */
dbg("keyspan_pda_rx_unthrottle port %d", port->number);
- port->interrupt_in_urb->dev = port->serial->dev;
if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
dbg(" usb_submit_urb(read urb) failed");
}
@@ -532,11 +478,11 @@ static int keyspan_pda_write(struct tty_struct *tty,
the device is full (wait until it says there is room)
*/
spin_lock_bh(&port->lock);
- if (port->write_urb_busy || priv->tx_throttled) {
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
spin_unlock_bh(&port->lock);
return 0;
}
- port->write_urb_busy = 1;
+ clear_bit(0, &port->write_urbs_free);
spin_unlock_bh(&port->lock);
/* At this point the URB is in our control, nobody else can submit it
@@ -598,7 +544,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
priv->tx_room -= count;
- port->write_urb->dev = port->serial->dev;
rc = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (rc) {
dbg(" usb_submit_urb(write bulk) failed");
@@ -618,7 +563,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = count;
exit:
if (rc < 0)
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return rc;
}
@@ -628,7 +573,7 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct keyspan_pda_private *priv;
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
priv = usb_get_serial_port_data(port);
/* queue up a wakeup at scheduler time */
@@ -661,7 +606,7 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
n_tty.c:normal_poll() ) that we're not writeable. */
spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy || priv->tx_throttled)
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled)
ret = 256;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
@@ -717,7 +662,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
- port->interrupt_in_urb->dev = serial->dev;
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (rc) {
dbg("%s - usb_submit_urb(read int) failed", __func__);
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index ddd146300dd..5d3beeeb5fd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -20,18 +20,6 @@
*
* Supported readers: USB TWIN, KAAN Standard Plus and SecOVID Reader Plus
* (Adapter K), B1 Professional and KAAN Professional (Adapter B)
- *
- * (21/05/2004) tw
- * Fix bug with P'n'P readers
- *
- * (28/05/2003) tw
- * Add support for KAAN SIM
- *
- * (12/09/2002) tw
- * Adapted to 2.5.
- *
- * (11/08/2002) tw
- * Initial version.
*/
@@ -231,9 +219,6 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
priv = usb_get_serial_port_data(port);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
@@ -393,8 +378,6 @@ static void kobil_read_int_callback(struct urb *urb)
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dbg("%s - port %d Send read URB returns: %i",
@@ -475,17 +458,9 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
priv->filled = 0;
priv->cur_pos = 0;
- /* someone sets the dev to 0 if the close method
- has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* start reading (except TWIN and KAAN SIM) */
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
- /* someone sets the dev to 0 if the close method has
- been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb,
GFP_NOIO);
dbg("%s - port %d Send read URB returns: %i",
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ba0d28727cc..a975bb80303 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -19,50 +19,6 @@
* DTR/RTS signal handling may be incomplete or incorrect. I have mainly
* implemented what I have seen with SniffUSB or found in belkin_sa.c.
* For further TODOs check also belkin_sa.c.
- *
- * TEST STATUS:
- * Basic tests have been performed with minicom/zmodem transfers and
- * modem dialing under Linux 2.4.0-test10 (for me it works fine).
- *
- * 04-Nov-2003 Bill Marr <marr at flex dot com>
- * - Mimic Windows driver by sending 2 USB 'device request' messages
- * following normal 'baud rate change' message. This allows data to be
- * transmitted to RS-232 devices which don't assert the 'CTS' signal.
- *
- * 10-Nov-2001 Wolfgang Grandegger
- * - Fixed an endianess problem with the baudrate selection for PowerPC.
- *
- * 06-Dec-2001 Martin Hamilton <martinh@gnu.org>
- * - Added support for the Belkin F5U109 DB9 adaptor
- *
- * 30-May-2001 Greg Kroah-Hartman
- * - switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 04-May-2001 Stelian Pop
- * - Set the maximum bulk output size for Sitecom U232-P25 model to 16 bytes
- * instead of the device reported 32 (using 32 bytes causes many data
- * loss, Windows driver uses 16 too).
- *
- * 02-May-2001 Stelian Pop
- * - Fixed the baud calculation for Sitecom U232-P25 model
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 06-Jan-2001 Cornel Ciocirlan
- * - Added support for Sitecom U232-P25 model (Product Id 0x0230)
- * - Added support for D-Link DU-H3SP USB BAY (Product Id 0x0200)
- *
- * 29-Nov-2000 Greg Kroah-Hartman
- * - Added device id table to fit with 2.4.0-test11 structure.
- * - took out DEAL_WITH_TWO_INT_IN_ENDPOINTS #define as it's not needed
- * (lots of things will change if/when the usb-serial core changes to
- * handle these issues.
- *
- * 27-Nov-2000 Wolfgang Grandegge
- * A version for kernel 2.4.0-test10 released to the Linux community
- * (via linux-usb-devel).
*/
#include <linux/kernel.h>
@@ -526,7 +482,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
mct_u232_msr_to_state(&priv->control_state, priv->last_msr);
spin_unlock_irqrestore(&priv->lock, flags);
- port->read_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev,
@@ -535,7 +490,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
goto error;
}
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
usb_kill_urb(port->read_urb);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 3524a105d04..19d112f51b9 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -939,14 +939,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
}
tty_kref_put(tty);
- if (!port->read_urb) {
- dbg("URB KILLED !!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = port->serial->dev;
-
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dbg("usb_submit_urb(read bulk) failed, retval = %d",
@@ -1014,7 +1007,6 @@ static int mos77xx_calc_num_ports(struct usb_serial *serial)
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
- struct usb_serial_port *port0;
struct urb *urb;
struct moschip_port *mos7720_port;
int response;
@@ -1029,8 +1021,6 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
if (mos7720_port == NULL)
return -ENODEV;
- port0 = serial->port[0];
-
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -1735,8 +1725,6 @@ static void change_port_settings(struct tty_struct *tty,
write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
-
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
@@ -1786,13 +1774,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
/* change the port settings to the new ones specified */
change_port_settings(tty, mos7720_port, old_termios);
- if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c72abd52498..55cfd6265b9 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -792,8 +792,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
}
- mos7840_port->read_urb->dev = serial->dev;
-
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -2058,7 +2056,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
@@ -2130,7 +2127,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
}
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 60f38d5e64f..45a8c55881d 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -9,31 +9,6 @@
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- * Fixed potential race in omninet_write_bulk_callback
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
*/
#include <linux/kernel.h>
@@ -44,7 +19,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
@@ -174,12 +148,6 @@ static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
tty_port_tty_set(&wport->port, tty);
/* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
@@ -236,11 +204,6 @@ static void omninet_read_bulk_callback(struct urb *urb)
}
/* Continue trying to always read */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer, urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
@@ -267,14 +230,10 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
- spin_lock_bh(&wport->lock);
- if (wport->write_urb_busy) {
- spin_unlock_bh(&wport->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- wport->write_urb_busy = 1;
- spin_unlock_bh(&wport->lock);
count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count;
@@ -292,10 +251,9 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = 64;
- wport->write_urb->dev = serial->dev;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
- wport->write_urb_busy = 0;
+ set_bit(0, &wport->write_urbs_free);
dev_err(&port->dev,
"%s - failed submitting write urb, error %d\n",
__func__, result);
@@ -314,8 +272,7 @@ static int omninet_write_room(struct tty_struct *tty)
int room = 0; /* Default: no room */
- /* FIXME: no consistent locking for write_urb_busy */
- if (wport->write_urb_busy)
+ if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dbg("%s - returns %d", __func__, room);
@@ -332,7 +289,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
dbg("%s - port %0x", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c248a914743..691f57a9d71 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -384,7 +384,6 @@ static void opticon_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irqrestore(&priv->lock, flags);
- priv->bulk_read_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d865878c9f9..c96b6b6509f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -476,6 +476,10 @@ static void option_instat_callback(struct urb *urb);
#define VIETTEL_VENDOR_ID 0x2262
#define VIETTEL_PRODUCT_VT1000 0x0002
+/* ZD Incorporated */
+#define ZD_VENDOR_ID 0x0685
+#define ZD_PRODUCT_7000 0x7000
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -661,6 +665,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +759,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1169,6 +1182,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 4c29e6c2bda..2161d1c3c08 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -264,7 +264,6 @@ static void setup_line(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -321,7 +320,6 @@ static void send_data(struct work_struct *work)
priv->flags.write_urb_in_use = 0;
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -334,7 +332,6 @@ static void send_data(struct work_struct *work)
port->write_urb->transfer_buffer,
count, &port->lock);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -583,13 +580,12 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
kfree(buf);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
oti6858_close(port);
- return -EPROTO;
+ return result;
}
/* setup termios */
@@ -837,7 +833,6 @@ static void oti6858_read_int_callback(struct urb *urb)
if (can_recv) {
int result;
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result != 0) {
priv->flags.read_urb_in_use = 0;
@@ -866,7 +861,6 @@ static void oti6858_read_int_callback(struct urb *urb)
int result;
/* dbg("%s(): submitting interrupt urb", __func__); */
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&urb->dev->dev,
@@ -894,18 +888,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- /*
- if (status == -EPROTO) {
- * PL2303 mysteriously fails with -EPROTO reschedule
- the read *
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result);
- return;
- }
- */
dbg("%s(): unable to handle the error, exiting", __func__);
return;
}
@@ -918,7 +900,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
tty_kref_put(tty);
/* schedule the interrupt urb */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0 && result != -EPERM) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -955,7 +936,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
dbg("%s(): overflow in write", __func__);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -968,7 +948,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
priv->flags.write_urb_in_use = 0;
/* schedule the interrupt urb if we are still open */
- port->interrupt_in_urb->dev = port->serial->dev;
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index fc2d66f7f4e..329295615d0 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -502,21 +502,20 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
pl2303_set_termios(tty, port, &tmp_termios);
- dbg("%s - submitting read urb", __func__);
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
- if (result) {
- pl2303_close(port);
- return -EPROTO;
- }
-
dbg("%s - submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
- pl2303_close(port);
- return -EPROTO;
+ return result;
}
+
+ result = usb_serial_generic_open(tty, port);
+ if (result) {
+ usb_kill_urb(port->interrupt_in_urb);
+ return result;
+ }
+
port->port.drain_delay = 256;
return 0;
}
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index b18179bda0d..f2485429172 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -681,7 +681,6 @@ static void sierra_instat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 7096f799b07..c70cc012d03 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -182,7 +182,6 @@ static void symbol_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irq(&priv->lock);
- priv->int_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
if (result)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ea8445689c8..4af21f46096 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -535,9 +535,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_lock;
}
- urb->complete = ti_interrupt_callback;
urb->context = tdev;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -619,9 +617,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s - submit read urb failed, %d\n",
@@ -1236,12 +1232,11 @@ static void ti_bulk_in_callback(struct urb *urb)
exit:
/* continue to read unless stopping */
spin_lock(&tport->tp_lock);
- if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) {
- urb->dev = port->serial->dev;
+ if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) {
+ else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING)
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
- }
+
spin_unlock(&tport->tp_lock);
if (retval)
dev_err(dev, "%s - resubmit read urb failed, %d\n",
@@ -1574,9 +1569,7 @@ static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty)
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb = tport->tp_port->read_urb;
spin_unlock_irqrestore(&tport->tp_lock, flags);
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = tport->tp_port->serial->dev;
status = usb_submit_urb(urb, GFP_KERNEL);
} else {
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index cc274fdf262..ce6c1a65a54 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -50,7 +50,7 @@ static struct usb_driver usb_serial_driver = {
.disconnect = usb_serial_disconnect,
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
.supports_autosuspend = 1,
};
@@ -260,6 +260,10 @@ static int serial_activate(struct tty_port *tport, struct tty_struct *tty)
else
retval = port->serial->type->open(tty, port);
mutex_unlock(&serial->disc_mutex);
+
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
+
return retval;
}
@@ -360,7 +364,8 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
-
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
exit:
return retval;
}
@@ -562,8 +567,8 @@ static void kill_traffic(struct usb_serial_port *port)
{
int i;
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_kill_urb(port->write_urbs[i]);
/*
@@ -595,17 +600,17 @@ static void port_release(struct device *dev)
kill_traffic(port);
cancel_work_sync(&port->work);
- usb_free_urb(port->read_urb);
- usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ usb_free_urb(port->read_urbs[i]);
+ kfree(port->bulk_in_buffers[i]);
+ }
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
usb_free_urb(port->write_urbs[i]);
kfree(port->bulk_out_buffers[i]);
}
kfifo_free(&port->write_fifo);
- kfree(port->bulk_in_buffer);
- kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
kfree(port->interrupt_out_buffer);
kfree(port);
@@ -686,16 +691,18 @@ static int serial_carrier_raised(struct tty_port *port)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->carrier_raised)
return drv->carrier_raised(p);
/* No carrier control - don't block */
- return 1;
+ return 1;
}
static void serial_dtr_rts(struct tty_port *port, int on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->dtr_rts)
drv->dtr_rts(p, on);
}
@@ -724,6 +731,7 @@ int usb_serial_probe(struct usb_interface *interface,
unsigned int minor;
int buffer_size;
int i;
+ int j;
int num_interrupt_in = 0;
int num_interrupt_out = 0;
int num_bulk_in = 0;
@@ -906,38 +914,41 @@ int usb_serial_probe(struct usb_interface *interface,
for (i = 0; i < num_bulk_in; ++i) {
endpoint = bulk_in_endpoint[i];
port = serial->port[i];
- port->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->read_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
buffer_size = max_t(int, serial->type->bulk_in_size,
usb_endpoint_maxp(endpoint));
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_in_buffer) {
- dev_err(&interface->dev,
+
+ for (j = 0; j < ARRAY_SIZE(port->read_urbs); ++j) {
+ set_bit(j, &port->read_urbs_free);
+ port->read_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->read_urbs[j]) {
+ dev_err(&interface->dev,
+ "No free urbs available\n");
+ goto probe_error;
+ }
+ port->bulk_in_buffers[j] = kmalloc(buffer_size,
+ GFP_KERNEL);
+ if (!port->bulk_in_buffers[j]) {
+ dev_err(&interface->dev,
"Couldn't allocate bulk_in_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->read_urb, dev,
- usb_rcvbulkpipe(dev,
+ goto probe_error;
+ }
+ usb_fill_bulk_urb(port->read_urbs[j], dev,
+ usb_rcvbulkpipe(dev,
endpoint->bEndpointAddress),
- port->bulk_in_buffer, buffer_size,
- serial->type->read_bulk_callback, port);
+ port->bulk_in_buffers[j], buffer_size,
+ serial->type->read_bulk_callback,
+ port);
+ }
+
+ port->read_urb = port->read_urbs[0];
+ port->bulk_in_buffer = port->bulk_in_buffers[0];
}
for (i = 0; i < num_bulk_out; ++i) {
- int j;
-
endpoint = bulk_out_endpoint[i];
port = serial->port[i];
- port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->write_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
buffer_size = serial->type->bulk_out_size;
@@ -945,17 +956,7 @@ int usb_serial_probe(struct usb_interface *interface,
buffer_size = usb_endpoint_maxp(endpoint);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_out_buffer) {
- dev_err(&interface->dev,
- "Couldn't allocate bulk_out_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->write_urb, dev,
- usb_sndbulkpipe(dev,
- endpoint->bEndpointAddress),
- port->bulk_out_buffer, buffer_size,
- serial->type->write_bulk_callback, port);
+
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
set_bit(j, &port->write_urbs_free);
port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
@@ -978,6 +979,9 @@ int usb_serial_probe(struct usb_interface *interface,
serial->type->write_bulk_callback,
port);
}
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
}
if (serial->type->read_int_callback) {
@@ -1196,7 +1200,7 @@ static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
- .hangup = serial_hangup,
+ .hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
@@ -1206,9 +1210,9 @@ static const struct tty_operations serial_ops = {
.chars_in_buffer = serial_chars_in_buffer,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
- .get_icount = serial_get_icount,
- .cleanup = serial_cleanup,
- .install = serial_install,
+ .get_icount = serial_get_icount,
+ .cleanup = serial_cleanup,
+ .install = serial_install,
.proc_fops = &serial_proc_fops,
};
@@ -1237,7 +1241,7 @@ static int __init usb_serial_init(void)
usb_serial_tty_driver->owner = THIS_MODULE;
usb_serial_tty_driver->driver_name = "usbserial";
- usb_serial_tty_driver->name = "ttyUSB";
+ usb_serial_tty_driver->name = "ttyUSB";
usb_serial_tty_driver->major = SERIAL_TTY_MAJOR;
usb_serial_tty_driver->minor_start = 0;
usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1336,7 +1340,6 @@ static void fixup_generic(struct usb_serial_driver *device)
int usb_serial_register(struct usb_serial_driver *driver)
{
- /* must be called with BKL held */
int retval;
if (usb_disabled())
@@ -1374,7 +1377,6 @@ EXPORT_SYMBOL_GPL(usb_serial_register);
void usb_serial_deregister(struct usb_serial_driver *device)
{
- /* must be called with BKL held */
printk(KERN_INFO "USB Serial deregistering driver %s\n",
device->description);
mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 95a82148ee8..9b632e75321 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -40,7 +40,7 @@ static struct usb_driver debug_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
/* This HW really does not support a serial break, so one will be
@@ -54,19 +54,18 @@ static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
}
-static void usb_debug_read_bulk_callback(struct urb *urb)
+static void usb_debug_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
- memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
- USB_DEBUG_BRK_SIZE) == 0) {
+ memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+ USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
return;
}
- usb_serial_generic_read_bulk_callback(urb);
+ usb_serial_generic_process_read_urb(urb);
}
static struct usb_serial_driver debug_device = {
@@ -79,7 +78,7 @@ static struct usb_serial_driver debug_device = {
.num_ports = 1,
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
- .read_bulk_callback = usb_debug_read_bulk_callback,
+ .process_read_urb = usb_debug_process_read_urb,
};
static int __init debug_init(void)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5b073bcc807..11af903cb09 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -14,57 +14,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (10/09/2002) Stuart MacDonald (stuartm@connecttech.com)
- * Upgrade to full working driver
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * 2001_Mar_19 gkh
- * Fixed MOD_INC and MOD_DEC logic, the ability to open a port more
- * than once, and the got the proper usb_device_id table entries so
- * the driver works again.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (10/03/2000) smd
- * firmware is improved to guard against crap sent to device
- * firmware now replies CMD_FAILURE on bad things
- * read_callback fix you provided for private info struct
- * command_finished now indicates success or fail
- * setup_port struct now packed to avoid gcc padding
- * firmware uses 1 based port numbering, driver now handles that
- *
- * (09/11/2000) gkh
- * Removed DEBUG #ifdefs with call to usb_serial_debug_data
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- * Fixed bug with port->minor that was found by Al Borchers
- *
- * (07/04/2000) gkh
- * Added support for port settings. Baud rate can now be changed. Line
- * signals are not transferred to and from the tty layer yet, but things
- * seem to be working well now.
- *
- * (05/04/2000) gkh
- * First cut at open and close commands. Data can flow through the ports at
- * default speeds now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
#include <linux/kernel.h>
@@ -753,7 +702,6 @@ static void whiteheat_close(struct usb_serial_port *port)
static int whiteheat_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
struct urb *urb;
@@ -789,7 +737,6 @@ static int whiteheat_write(struct tty_struct *tty,
usb_serial_debug_data(debug, &port->dev,
__func__, bytes, urb->transfer_buffer);
- urb->dev = serial->dev;
urb->transfer_buffer_length = bytes;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
@@ -1035,7 +982,6 @@ static void command_port_read_callback(struct urb *urb)
dbg("%s - bad reply from firmware", __func__);
/* Continue trying to always read */
- command_port->read_urb->dev = command_port->serial->dev;
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed resubmitting read urb, error %d",
@@ -1141,7 +1087,6 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
transfer_buffer[0] = command;
memcpy(&transfer_buffer[1], data, datasize);
command_port->write_urb->transfer_buffer_length = datasize + 1;
- command_port->write_urb->dev = port->serial->dev;
retval = usb_submit_urb(command_port->write_urb, GFP_NOIO);
if (retval) {
dbg("%s - submit urb failed", __func__);
@@ -1362,7 +1307,6 @@ static int start_command_port(struct usb_serial *serial)
/* Work around HCD bugs */
usb_clear_halt(serial->dev, command_port->read_urb->pipe);
- command_port->read_urb->dev = serial->dev;
retval = usb_submit_urb(command_port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&serial->dev->dev,
@@ -1410,7 +1354,6 @@ static int start_port_read(struct usb_serial_port *port)
list_del(tmp);
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- urb->dev = port->serial->dev;
spin_unlock_irqrestore(&info->lock, flags);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
@@ -1490,7 +1433,6 @@ static void rx_data_softint(struct work_struct *work)
sent += tty_insert_flip_string(tty,
urb->transfer_buffer, urb->actual_length);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 42d0eaed4a0..51af2fee2ef 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -139,7 +139,7 @@ static int init_alauda(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id alauda_usb_ids[] = {
+static struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
@@ -1278,15 +1278,4 @@ static struct usb_driver alauda_driver = {
.soft_unbind = 1,
};
-static int __init alauda_init(void)
-{
- return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
- usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index c8447182118..387cbd47acc 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id cypress_usb_ids[] = {
+static struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
@@ -274,15 +274,4 @@ static struct usb_driver cypress_driver = {
.soft_unbind = 1,
};
-static int __init cypress_init(void)
-{
- return usb_register(&cypress_driver);
-}
-
-static void __exit cypress_exit(void)
-{
- usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index ded836b02d7..15d41f2b3d6 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -88,7 +88,7 @@ static int datafab_determine_lun(struct us_data *us,
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id datafab_usb_ids[] = {
+static struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
@@ -753,15 +753,4 @@ static struct usb_driver datafab_driver = {
.soft_unbind = 1,
};
-static int __init datafab_init(void)
-{
- return usb_register(&datafab_driver);
-}
-
-static void __exit datafab_exit(void)
-{
- usb_deregister(&datafab_driver);
-}
-
-module_init(datafab_init);
-module_exit(datafab_exit);
+module_usb_driver(datafab_driver);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 9fbe742343c..a6ade4071a9 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -42,7 +42,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id ene_ub6250_usb_ids[] = {
+static struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -607,8 +607,8 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
- u32 bl_num;
- u16 bl_len;
+ u32 bl_num;
+ u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
@@ -622,7 +622,7 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
} else {
- bl_len = 1<<(info->SD_READ_BL_LEN);
+ bl_len = 1 << (info->SD_READ_BL_LEN);
bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1)
* (1 << (info->SD_C_SIZE_MULT + 2)) - 1;
}
@@ -777,7 +777,7 @@ static int ms_lib_free_logicalmap(struct us_data *us)
return 0;
}
-int ms_lib_alloc_logicalmap(struct us_data *us)
+static int ms_lib_alloc_logicalmap(struct us_data *us)
{
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -2248,7 +2248,7 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
/*
* ms_scsi_irp()
*/
-int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
+static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
@@ -2409,15 +2409,4 @@ static struct usb_driver ene_ub6250_driver = {
.soft_unbind = 1,
};
-static int __init ene_ub6250_init(void)
-{
- return usb_register(&ene_ub6250_driver);
-}
-
-static void __exit ene_ub6250_exit(void)
-{
- usb_deregister(&ene_ub6250_driver);
-}
-
-module_init(ene_ub6250_init);
-module_exit(ene_ub6250_exit);
+module_usb_driver(ene_ub6250_driver);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 6542ca40d50..fa161574847 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id freecom_usb_ids[] = {
+static struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
@@ -555,15 +555,4 @@ static struct usb_driver freecom_driver = {
.soft_unbind = 1,
};
-static int __init freecom_init(void)
-{
- return usb_register(&freecom_driver);
-}
-
-static void __exit freecom_exit(void)
-{
- usb_deregister(&freecom_driver);
-}
-
-module_init(freecom_init);
-module_exit(freecom_exit);
+module_usb_driver(freecom_driver);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index ffc4193e950..bd550270083 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -61,7 +61,7 @@
#include "scsiglue.h"
MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
-MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
+MODULE_AUTHOR("BjĂśrn Stenberg <bjorn@haxx.se>");
MODULE_LICENSE("GPL");
static int isd200_Initialization(struct us_data *us);
@@ -76,7 +76,7 @@ static int isd200_Initialization(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id isd200_usb_ids[] = {
+static struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
@@ -1568,15 +1568,4 @@ static struct usb_driver isd200_driver = {
.soft_unbind = 1,
};
-static int __init isd200_init(void)
-{
- return usb_register(&isd200_driver);
-}
-
-static void __exit isd200_exit(void)
-{
- usb_deregister(&isd200_driver);
-}
-
-module_init(isd200_init);
-module_exit(isd200_exit);
+module_usb_driver(isd200_driver);
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 6168596c5ac..a19211b5c26 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -71,7 +71,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id jumpshot_usb_ids[] = {
+static struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
@@ -679,15 +679,4 @@ static struct usb_driver jumpshot_driver = {
.soft_unbind = 1,
};
-static int __init jumpshot_init(void)
-{
- return usb_register(&jumpshot_driver);
-}
-
-static void __exit jumpshot_exit(void)
-{
- usb_deregister(&jumpshot_driver);
-}
-
-module_init(jumpshot_init);
-module_exit(jumpshot_exit);
+module_usb_driver(jumpshot_driver);
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index ba1b7890688..e720f8ebdf9 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -59,7 +59,7 @@ static int rio_karma_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id karma_usb_ids[] = {
+static struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
@@ -232,15 +232,4 @@ static struct usb_driver karma_driver = {
.soft_unbind = 1,
};
-static int __init karma_init(void)
-{
- return usb_register(&karma_driver);
-}
-
-static void __exit karma_exit(void)
-{
- usb_deregister(&karma_driver);
-}
-
-module_init(karma_init);
-module_exit(karma_exit);
+module_usb_driver(karma_driver);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 1943be5a291..d75155c3820 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -69,7 +69,7 @@ struct usb_onetouch {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id onetouch_usb_ids[] = {
+static struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
@@ -314,15 +314,4 @@ static struct usb_driver onetouch_driver = {
.soft_unbind = 1,
};
-static int __init onetouch_init(void)
-{
- return usb_register(&onetouch_driver);
-}
-
-static void __exit onetouch_exit(void)
-{
- usb_deregister(&onetouch_driver);
-}
-
-module_init(onetouch_init);
-module_exit(onetouch_exit);
+module_usb_driver(onetouch_driver);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 0ce5f79197e..1f62723ef1a 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -398,10 +398,9 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
u8 cmnd[12] = { 0 };
u8 *buf;
- buf = kmalloc(len, GFP_NOIO);
+ buf = kmemdup(data, len, GFP_NOIO);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
- memcpy(buf, data, len);
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
@@ -507,15 +506,14 @@ static int enable_oscillator(struct us_data *us)
static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
{
int retval;
- u16 addr = 0xFE47;
u8 cmnd[12] = {0};
- US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len);
+ US_DEBUGP("%s, addr = 0xfe47, len = %d\n", __FUNCTION__, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
- cmnd[2] = (u8)(addr >> 8);
- cmnd[3] = (u8)addr;
+ cmnd[2] = 0xfe;
+ cmnd[3] = 0x47;
cmnd[4] = (u8)(len >> 8);
cmnd[5] = (u8)len;
@@ -818,7 +816,7 @@ static inline int working_scsi(struct scsi_cmnd *srb)
return 1;
}
-void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
@@ -977,7 +975,7 @@ static void realtek_cr_destructor(void *extra)
}
#ifdef CONFIG_PM
-int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
+static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
@@ -1104,15 +1102,4 @@ static struct usb_driver realtek_cr_driver = {
.supports_autosuspend = 1,
};
-static int __init realtek_cr_init(void)
-{
- return usb_register(&realtek_cr_driver);
-}
-
-static void __exit realtek_cr_exit(void)
-{
- usb_deregister(&realtek_cr_driver);
-}
-
-module_init(realtek_cr_init);
-module_exit(realtek_cr_exit);
+module_usb_driver(realtek_cr_driver);
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index bcb9a709d34..425df7df2e5 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -71,7 +71,7 @@ static int usb_stor_sddr09_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr09_usb_ids[] = {
+static struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
@@ -1789,15 +1789,4 @@ static struct usb_driver sddr09_driver = {
.soft_unbind = 1,
};
-static int __init sddr09_init(void)
-{
- return usb_register(&sddr09_driver);
-}
-
-static void __exit sddr09_exit(void)
-{
- usb_deregister(&sddr09_driver);
-}
-
-module_init(sddr09_init);
-module_exit(sddr09_exit);
+module_usb_driver(sddr09_driver);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 44dfed7754e..e4ca5fcb7cc 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr55_usb_ids[] = {
+static struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
@@ -1008,15 +1008,4 @@ static struct usb_driver sddr55_driver = {
.soft_unbind = 1,
};
-static int __init sddr55_init(void)
-{
- return usb_register(&sddr55_driver);
-}
-
-static void __exit sddr55_exit(void)
-{
- usb_deregister(&sddr55_driver);
-}
-
-module_init(sddr55_init);
-module_exit(sddr55_exit);
+module_usb_driver(sddr55_driver);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 0b00091d2ae..1369d259061 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -170,7 +170,7 @@ static int init_usbat_flash(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id usbat_usb_ids[] = {
+static struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
@@ -1865,15 +1865,4 @@ static struct usb_driver usbat_driver = {
.soft_unbind = 1,
};
-static int __init usbat_init(void)
-{
- return usb_register(&usbat_driver);
-}
-
-static void __exit usbat_exit(void)
-{
- usb_deregister(&usbat_driver);
-}
-
-module_init(usbat_init);
-module_exit(usbat_exit);
+module_usb_driver(usbat_driver);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1d10d5b8204..a33ead5dce2 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -760,18 +760,7 @@ static struct usb_driver uas_driver = {
.id_table = uas_usb_ids,
};
-static int uas_init(void)
-{
- return usb_register(&uas_driver);
-}
-
-static void uas_exit(void)
-{
- usb_deregister(&uas_driver);
-}
-
-module_init(uas_init);
-module_exit(uas_exit);
+module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf..856ad92c40d 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
@@ -1907,7 +1914,7 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
-/* Patch by Richard Schütz <r.schtz@t-online.de>
+/* Patch by Richard SchĂźtz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69415a..3dd7da9fd50 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
dev_dbg(dev, "device found\n");
- set_freezable_with_signal();
+ set_freezable();
+
/*
* Wait for the timeout to expire or for a disconnect
*
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
* fail to freeze, but we can't be non-freezable either. Nor can
* khubd freeze while waiting for scanning to complete as it may
* hold the device lock, causing a hang when suspending devices.
- * So we request a fake signal when freezing and use
- * interruptible sleep to kick us out of our wait early when
- * freezing happens.
+ * So instead of using wait_event_freezable(), explicitly test
+ * for (DONT_SCAN || freezing) in interruptible wait and proceed
+ * if any of DONT_SCAN, freezing or timeout has happened.
*/
if (delay_use > 0) {
dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
wait_event_interruptible_timeout(us->delay_wait,
- test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
- delay_use * HZ);
+ test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
+ freezing(current), delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
@@ -1073,6 +1074,7 @@ static struct usb_driver usb_storage_driver = {
.id_table = usb_storage_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
+ .no_dynamic_id = 1,
};
static int __init usb_stor_init(void)
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 32d6fc95390..8efeae24764 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -27,6 +27,8 @@
#define USB_SKEL_VENDOR_ID 0xfff0
#define USB_SKEL_PRODUCT_ID 0xfff0
+static DEFINE_MUTEX(skel_mutex);
+
/* table of devices that work with this driver */
static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
@@ -60,7 +62,6 @@ struct usb_skel {
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
int errors; /* the last request tanked */
- int open_count; /* count the number of openers */
bool ongoing_read; /* a read is going on */
bool processed_urb; /* indicates we haven't processed the urb */
spinlock_t err_lock; /* lock for errors */
@@ -100,39 +101,37 @@ static int skel_open(struct inode *inode, struct file *file)
goto exit;
}
+ mutex_lock(&skel_mutex);
dev = usb_get_intfdata(interface);
if (!dev) {
+ mutex_unlock(&skel_mutex);
retval = -ENODEV;
goto exit;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
+ mutex_unlock(&skel_mutex);
/* lock the device to allow correctly handling errors
* in resumption */
mutex_lock(&dev->io_mutex);
+ if (!dev->interface) {
+ retval = -ENODEV;
+ goto out_err;
+ }
- if (!dev->open_count++) {
- retval = usb_autopm_get_interface(interface);
- if (retval) {
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- }
- } /* else { //uncomment this block if you want exclusive open
- retval = -EBUSY;
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- } */
- /* prevent the device from being autosuspended */
+ retval = usb_autopm_get_interface(interface);
+ if (retval)
+ goto out_err;
/* save our object in the file's private structure */
file->private_data = dev;
+
+out_err:
mutex_unlock(&dev->io_mutex);
+ if (retval)
+ kref_put(&dev->kref, skel_delete);
exit:
return retval;
@@ -148,7 +147,7 @@ static int skel_release(struct inode *inode, struct file *file)
/* allow the device to be autosuspended */
mutex_lock(&dev->io_mutex);
- if (!--dev->open_count && dev->interface)
+ if (dev->interface)
usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->io_mutex);
@@ -612,7 +611,6 @@ static void skel_disconnect(struct usb_interface *interface)
int minor = interface->minor;
dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &skel_class);
@@ -624,8 +622,12 @@ static void skel_disconnect(struct usb_interface *interface)
usb_kill_anchored_urbs(&dev->submitted);
+ mutex_lock(&skel_mutex);
+ usb_set_intfdata(interface, NULL);
+
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
+ mutex_unlock(&skel_mutex);
dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
}
@@ -688,25 +690,6 @@ static struct usb_driver skel_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_skel_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&skel_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit usb_skel_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&skel_driver);
-}
-
-module_init(usb_skel_init);
-module_exit(usb_skel_exit);
+module_usb_driver(skel_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
index eb09a0a14a8..0ead8826ec7 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/usb/wusbcore/Kconfig
@@ -5,6 +5,7 @@ config USB_WUSB
tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on USB
+ depends on PCI
select UWB
select CRYPTO
select CRYPTO_BLKCIPHER
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 200fd7c6c7d..7f78f300f8f 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -655,17 +655,7 @@ static struct usb_driver cbaf_driver = {
.disconnect = cbaf_disconnect,
};
-static int __init cbaf_driver_init(void)
-{
- return usb_register(&cbaf_driver);
-}
-module_init(cbaf_driver_init);
-
-static void __exit cbaf_driver_exit(void)
-{
- usb_deregister(&cbaf_driver);
-}
-module_exit(cbaf_driver_exit);
+module_usb_driver(cbaf_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Cable Based Association");
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 371f61733f0..fa810a83e83 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -354,7 +354,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_keydvt_in keydvt_in;
struct wusb_keydvt_out keydvt_out;
- hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+ hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
if (hs == NULL) {
dev_err(dev, "can't allocate handshake data\n");
goto error_kzalloc;
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
index de81ebf5178..86ed7e61e59 100644
--- a/drivers/uwb/est.c
+++ b/drivers/uwb/est.c
@@ -184,7 +184,7 @@ int uwb_est_create(void)
uwb_est_size = 2;
uwb_est_used = 0;
- uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
+ uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
if (uwb_est == NULL)
return -ENOMEM;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 2babcd4fbfc..66797e9c501 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -914,17 +914,7 @@ static struct usb_driver hwarc_driver = {
.post_reset = hwarc_post_reset,
};
-static int __init hwarc_driver_init(void)
-{
- return usb_register(&hwarc_driver);
-}
-module_init(hwarc_driver_init);
-
-static void __exit hwarc_driver_exit(void)
-{
- usb_deregister(&hwarc_driver);
-}
-module_exit(hwarc_driver_exit);
+module_usb_driver(hwarc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index ba8664328af..2bfc846ac07 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -104,7 +104,7 @@ void i1480_usb_destroy(struct i1480_usb *i1480_usb)
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
- * case, we have a max size we can send, soooo.
+ * case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
@@ -451,25 +451,7 @@ static struct usb_driver i1480_dfu_driver = {
.disconnect = NULL,
};
-
-/*
- * Initialize the i1480 DFU driver.
- *
- * We also need to register our function for guessing event sizes.
- */
-static int __init i1480_dfu_driver_init(void)
-{
- return usb_register(&i1480_dfu_driver);
-}
-module_init(i1480_dfu_driver_init);
-
-
-static void __exit i1480_dfu_driver_exit(void)
-{
- usb_deregister(&i1480_dfu_driver);
-}
-module_exit(i1480_dfu_driver_exit);
-
+module_usb_driver(i1480_dfu_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2cda6ba0939..0a2cce7285b 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -621,6 +621,8 @@ static struct amba_id clcdfb_id_table[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, clcdfb_id_table);
+
static struct amba_driver clcd_driver = {
.drv = {
.name = "clcd-pl11x",
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 1105fa1ed7f..a1376dc73d7 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -270,17 +270,7 @@ static struct platform_driver pm860x_backlight_driver = {
.remove = pm860x_backlight_remove,
};
-static int __init pm860x_backlight_init(void)
-{
- return platform_driver_register(&pm860x_backlight_driver);
-}
-module_init(pm860x_backlight_init);
-
-static void __exit pm860x_backlight_exit(void)
-{
- platform_driver_unregister(&pm860x_backlight_driver);
-}
-module_exit(pm860x_backlight_exit);
+module_platform_driver(pm860x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Marvell Semiconductor 88PM8606");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 278aeaa9250..681b36929fe 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -280,14 +280,6 @@ config BACKLIGHT_WM831X
If you have a backlight driven by the ISINK and DCDC of a
WM831x PMIC say y to enable the backlight driver for it.
-config BACKLIGHT_ADX
- tristate "Avionic Design Xanthos Backlight Driver"
- depends on ARCH_PXA_ADX
- default y
- help
- Say Y to enable the backlight driver on Avionic Design Xanthos-based
- boards.
-
config BACKLIGHT_ADP5520
tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
depends on PMIC_ADP5520
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index fdd1fc4b277..af5cf654ec7 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index dfb763e9147..2e630bf1164 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -384,17 +384,7 @@ static struct platform_driver adp5520_bl_driver = {
.resume = adp5520_bl_resume,
};
-static int __init adp5520_bl_init(void)
-{
- return platform_driver_register(&adp5520_bl_driver);
-}
-module_init(adp5520_bl_init);
-
-static void __exit adp5520_bl_exit(void)
-{
- platform_driver_unregister(&adp5520_bl_driver);
-}
-module_exit(adp5520_bl_exit);
+module_platform_driver(adp5520_bl_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
deleted file mode 100644
index c861c41af44..00000000000
--- a/drivers/video/backlight/adx_bl.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * linux/drivers/video/backlight/adx.c
- *
- * Copyright (C) 2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Written by Thierry Reding <thierry.reding@avionic-design.de>
- */
-
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-/* register definitions */
-#define ADX_BACKLIGHT_CONTROL 0x00
-#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
-#define ADX_BACKLIGHT_BRIGHTNESS 0x08
-#define ADX_BACKLIGHT_STATUS 0x10
-#define ADX_BACKLIGHT_ERROR 0x18
-
-struct adxbl {
- void __iomem *base;
-};
-
-static int adx_backlight_update_status(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 value;
-
- value = bldev->props.brightness;
- writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
-
- value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
-
- if (bldev->props.state & BL_CORE_FBBLANK)
- value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
- else
- value |= ADX_BACKLIGHT_CONTROL_ENABLE;
-
- writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
-
- return 0;
-}
-
-static int adx_backlight_get_brightness(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 brightness;
-
- brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
- return brightness & 0xff;
-}
-
-static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
-{
- return 1;
-}
-
-static const struct backlight_ops adx_backlight_ops = {
- .options = 0,
- .update_status = adx_backlight_update_status,
- .get_brightness = adx_backlight_get_brightness,
- .check_fb = adx_backlight_check_fb,
-};
-
-static int __devinit adx_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bldev;
- struct resource *res;
- struct adxbl *bl;
- int ret = 0;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), res->name);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
- if (!bl) {
- ret = -ENOMEM;
- goto out;
- }
-
- bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!bl->base) {
- ret = -ENXIO;
- goto out;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 0xff;
- bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
- bl, &adx_backlight_ops, &props);
- if (IS_ERR(bldev)) {
- ret = PTR_ERR(bldev);
- goto out;
- }
-
- bldev->props.brightness = 0xff;
- bldev->props.power = FB_BLANK_UNBLANK;
-
- platform_set_drvdata(pdev, bldev);
-
-out:
- return ret;
-}
-
-static int __devexit adx_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bldev;
- int ret = 0;
-
- bldev = platform_get_drvdata(pdev);
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 0xff;
- backlight_update_status(bldev);
- backlight_device_unregister(bldev);
- platform_set_drvdata(pdev, NULL);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int adx_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int adx_backlight_resume(struct platform_device *pdev)
-{
- return 0;
-}
-#else
-#define adx_backlight_suspend NULL
-#define adx_backlight_resume NULL
-#endif
-
-static struct platform_driver adx_backlight_driver = {
- .probe = adx_backlight_probe,
- .remove = __devexit_p(adx_backlight_remove),
- .suspend = adx_backlight_suspend,
- .resume = adx_backlight_resume,
- .driver = {
- .name = "adx-backlight",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init adx_backlight_init(void)
-{
- return platform_driver_register(&adx_backlight_driver);
-}
-
-static void __exit adx_backlight_exit(void)
-{
- platform_driver_unregister(&adx_backlight_driver);
-}
-
-module_init(adx_backlight_init);
-module_exit(adx_backlight_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 7363c1b169e..bf5b1ece716 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -102,7 +102,7 @@ static void backlight_generate_event(struct backlight_device *bd,
}
static ssize_t backlight_show_power(struct device *dev,
- struct device_attribute *attr,char *buf)
+ struct device_attribute *attr, char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
@@ -116,7 +116,7 @@ static ssize_t backlight_store_power(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long power;
- rc = strict_strtoul(buf, 0, &power);
+ rc = kstrtoul(buf, 0, &power);
if (rc)
return rc;
@@ -150,7 +150,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long brightness;
- rc = strict_strtoul(buf, 0, &brightness);
+ rc = kstrtoul(buf, 0, &brightness);
if (rc)
return rc;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index d68f14bbb68..abb4a06268f 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -199,17 +199,7 @@ static struct platform_driver da903x_backlight_driver = {
.remove = da903x_backlight_remove,
};
-static int __init da903x_backlight_init(void)
-{
- return platform_driver_register(&da903x_backlight_driver);
-}
-module_init(da903x_backlight_init);
-
-static void __exit da903x_backlight_exit(void)
-{
- platform_driver_unregister(&da903x_backlight_driver);
-}
-module_exit(da903x_backlight_exit);
+module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index c74a6f4baa1..b62b8b9063b 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/backlight.h>
@@ -144,17 +143,7 @@ static struct platform_driver ep93xxbl_driver = {
.resume = ep93xxbl_resume,
};
-static int __init ep93xxbl_init(void)
-{
- return platform_driver_register(&ep93xxbl_driver);
-}
-module_init(ep93xxbl_init);
-
-static void __exit ep93xxbl_exit(void)
-{
- platform_driver_unregister(&ep93xxbl_driver);
-}
-module_exit(ep93xxbl_exit);
+module_platform_driver(ep93xxbl_driver);
MODULE_DESCRIPTION("EP93xx Backlight Driver");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index adb191466d6..9ce6170c186 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -132,18 +132,7 @@ static struct platform_driver genericbl_driver = {
},
};
-static int __init genericbl_init(void)
-{
- return platform_driver_register(&genericbl_driver);
-}
-
-static void __exit genericbl_exit(void)
-{
- platform_driver_unregister(&genericbl_driver);
-}
-
-module_init(genericbl_init);
-module_exit(genericbl_exit);
+module_platform_driver(genericbl_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("Generic Backlight Driver");
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index de65d80159b..2f8af5d786a 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -147,19 +147,8 @@ static struct platform_driver jornada_bl_driver = {
},
};
-static int __init jornada_bl_init(void)
-{
- return platform_driver_register(&jornada_bl_driver);
-}
-
-static void __exit jornada_bl_exit(void)
-{
- platform_driver_unregister(&jornada_bl_driver);
-}
+module_platform_driver(jornada_bl_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 Backlight driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_bl_init);
-module_exit(jornada_bl_exit);
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index d2ff658b414..22d231a17e3 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,19 +135,8 @@ static struct platform_driver jornada_lcd_driver = {
},
};
-static int __init jornada_lcd_init(void)
-{
- return platform_driver_register(&jornada_lcd_driver);
-}
-
-static void __exit jornada_lcd_exit(void)
-{
- platform_driver_unregister(&jornada_lcd_driver);
-}
+module_platform_driver(jornada_lcd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_lcd_init);
-module_exit(jornada_lcd_exit);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 71a11cadffc..79c1b0d609a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -97,19 +97,16 @@ static ssize_t lcd_store_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int power = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long power;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &power);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %d\n", power);
+ pr_debug("lcd: set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -136,19 +133,16 @@ static ssize_t lcd_store_contrast(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int contrast = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long contrast;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &contrast);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %d\n", contrast);
+ pr_debug("lcd: set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index da9a5ce0ccb..78dafc0c8fc 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -31,6 +31,7 @@
#include <linux/lcd.h>
#include <linux/backlight.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include "ld9040_gamma.h"
@@ -53,8 +54,51 @@ struct ld9040 {
struct lcd_device *ld;
struct backlight_device *bd;
struct lcd_platform_data *lcd_pd;
+
+ struct mutex lock;
+ bool enabled;
+};
+
+static struct regulator_bulk_data supplies[] = {
+ { .supply = "vdd3", },
+ { .supply = "vci", },
};
+static void ld9040_regulator_enable(struct ld9040 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ mutex_lock(&lcd->lock);
+ if (!lcd->enabled) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = true;
+ }
+ mdelay(pd->power_on_delay);
+out:
+ mutex_unlock(&lcd->lock);
+}
+
+static void ld9040_regulator_disable(struct ld9040 *lcd)
+{
+ int ret = 0;
+
+ mutex_lock(&lcd->lock);
+ if (lcd->enabled) {
+ ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = false;
+ }
+out:
+ mutex_unlock(&lcd->lock);
+}
+
static const unsigned short seq_swreset[] = {
0x01, COMMAND_ONLY,
ENDDEF, 0x00
@@ -532,13 +576,8 @@ static int ld9040_power_on(struct ld9040 *lcd)
return -EFAULT;
}
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else {
- pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
- }
+ /* lcd power on */
+ ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
@@ -582,11 +621,8 @@ static int ld9040_power_off(struct ld9040 *lcd)
mdelay(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ /* lcd power off */
+ ld9040_regulator_disable(lcd);
return 0;
}
@@ -693,6 +729,14 @@ static int ld9040_probe(struct spi_device *spi)
goto out_free_lcd;
}
+ mutex_init(&lcd->lock);
+
+ ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
+ goto out_free_lcd;
+ }
+
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
@@ -739,6 +783,8 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
out_free_lcd:
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+
kfree(lcd);
return ret;
}
@@ -750,6 +796,7 @@ static int __devexit ld9040_remove(struct spi_device *spi)
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
kfree(lcd);
return 0;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 7bbc802560e..c915e3b5388 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -188,17 +188,7 @@ static struct platform_driver max8925_backlight_driver = {
.remove = __devexit_p(max8925_backlight_remove),
};
-static int __init max8925_backlight_init(void)
-{
- return platform_driver_register(&max8925_backlight_driver);
-}
-module_init(max8925_backlight_init);
-
-static void __exit max8925_backlight_exit(void)
-{
- platform_driver_unregister(&max8925_backlight_driver);
-};
-module_exit(max8925_backlight_exit);
+module_platform_driver(max8925_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Maxim MAX8925");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 08d26a72394..d8cde277ec8 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -195,18 +195,7 @@ static struct platform_driver omapbl_driver = {
},
};
-static int __init omapbl_init(void)
-{
- return platform_driver_register(&omapbl_driver);
-}
-
-static void __exit omapbl_exit(void)
-{
- platform_driver_unregister(&omapbl_driver);
-}
-
-module_init(omapbl_init);
-module_exit(omapbl_exit);
+module_platform_driver(omapbl_driver);
MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
MODULE_DESCRIPTION("OMAP LCD Backlight driver");
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index ef5628d6056..13e88b71dae 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -173,17 +173,7 @@ static struct platform_driver pcf50633_bl_driver = {
},
};
-static int __init pcf50633_bl_init(void)
-{
- return platform_driver_register(&pcf50633_bl_driver);
-}
-module_init(pcf50633_bl_init);
-
-static void __exit pcf50633_bl_exit(void)
-{
- platform_driver_unregister(&pcf50633_bl_driver);
-}
-module_exit(pcf50633_bl_exit);
+module_platform_driver(pcf50633_bl_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PCF50633 backlight driver");
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 302330acf62..f0bf491ed08 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -85,7 +85,8 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return -EINVAL;
}
- plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
+ plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd),
+ GFP_KERNEL);
if (!plcd) {
dev_err(dev, "no memory for state\n");
return -ENOMEM;
@@ -98,7 +99,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
if (IS_ERR(plcd->lcd)) {
dev_err(dev, "cannot register lcd device\n");
err = PTR_ERR(plcd->lcd);
- goto err_mem;
+ goto err;
}
platform_set_drvdata(pdev, plcd);
@@ -106,8 +107,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return 0;
- err_mem:
- kfree(plcd);
+ err:
return err;
}
@@ -116,7 +116,6 @@ static int __devexit platform_lcd_remove(struct platform_device *pdev)
struct platform_lcd *plcd = platform_get_drvdata(pdev);
lcd_device_unregister(plcd->lcd);
- kfree(plcd);
return 0;
}
@@ -157,18 +156,7 @@ static struct platform_driver platform_lcd_driver = {
.resume = platform_lcd_resume,
};
-static int __init platform_lcd_init(void)
-{
- return platform_driver_register(&platform_lcd_driver);
-}
-
-static void __exit platform_lcd_cleanup(void)
-{
- platform_driver_unregister(&platform_lcd_driver);
-}
-
-module_init(platform_lcd_init);
-module_exit(platform_lcd_cleanup);
+module_platform_driver(platform_lcd_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 8b5b2a4124c..7496d04e1d3 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -169,10 +169,9 @@ static int pwm_backlight_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pwm_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int pwm_backlight_suspend(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
if (pb->notify)
@@ -184,40 +183,32 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
return 0;
}
-static int pwm_backlight_resume(struct platform_device *pdev)
+static int pwm_backlight_resume(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
}
-#else
-#define pwm_backlight_suspend NULL
-#define pwm_backlight_resume NULL
+
+static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
+ pwm_backlight_resume);
+
#endif
static struct platform_driver pwm_backlight_driver = {
.driver = {
.name = "pwm-backlight",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pwm_backlight_pm_ops,
+#endif
},
.probe = pwm_backlight_probe,
.remove = pwm_backlight_remove,
- .suspend = pwm_backlight_suspend,
- .resume = pwm_backlight_resume,
};
-static int __init pwm_backlight_init(void)
-{
- return platform_driver_register(&pwm_backlight_driver);
-}
-module_init(pwm_backlight_init);
-
-static void __exit pwm_backlight_exit(void)
-{
- platform_driver_unregister(&pwm_backlight_driver);
-}
-module_exit(pwm_backlight_exit);
+module_platform_driver(pwm_backlight_driver);
MODULE_DESCRIPTION("PWM based Backlight Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index fbe9e9316f3..4e915f5eca9 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -236,17 +236,7 @@ static struct platform_driver wm831x_backlight_driver = {
.remove = wm831x_backlight_remove,
};
-static int __init wm831x_backlight_init(void)
-{
- return platform_driver_register(&wm831x_backlight_driver);
-}
-module_init(wm831x_backlight_init);
-
-static void __exit wm831x_backlight_exit(void)
-{
- platform_driver_unregister(&wm831x_backlight_driver);
-}
-module_exit(wm831x_backlight_exit);
+module_platform_driver(wm831x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 56720fb476b..46b03f53985 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: ADSP-BF54x Framebufer driver
+ * Description: ADSP-BF54x Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index d5e12675961..7a0c05f3537 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: Blackfin LCD Framebufer driver
+ * Description: Blackfin LCD Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 2209e354f53..c2d11fef114 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -66,8 +66,6 @@ config SGI_NEWPORT_CONSOLE
Say Y here if you want the console on the Newport aka XL graphics
card of your Indy. Most people say Y here.
-# bool 'IODC console' CONFIG_IODC_CONSOLE
-
config DUMMY_CONSOLE
bool
depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00..29577bf1f55 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
/* Clock registers available only on Version 2 */
#define LCD_CLK_ENABLE_REG 0x6c
#define LCD_CLK_RESET_REG 0x70
+#define LCD_CLK_MAIN_RESET BIT(3)
#define LCD_NUM_BUFFERS 2
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
{
u32 reg;
+ /* Bring LCDC out of reset */
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_CLK_RESET_REG);
+
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (!(reg & LCD_RASTER_ENABLE))
lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (reg & LCD_RASTER_ENABLE)
lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ /* Write 1 to reset LCDC */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
}
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
- if (lcd_revision == LCD_VERSION_2)
+ if (lcd_revision == LCD_VERSION_2) {
lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+ /* Write 1 to reset */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+ lcdc_write(0, LCD_CLK_RESET_REG);
+ }
}
static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d837d63c456..eb3c5eea1a0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -328,7 +328,7 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
dev_dbg(&host->pdev->dev, "%s\n", __func__);
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
/* if it was disabled, re-enable the mode again */
@@ -368,7 +368,7 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->enabled = 0;
}
@@ -668,7 +668,7 @@ static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
fb_info->fix.ypanstep = 1;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->enabled = 1;
return 0;
@@ -841,7 +841,7 @@ static int __devinit mxsfb_probe(struct platform_device *pdev)
error_register:
if (host->enabled)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
fb_destroy_modelist(&fb_info->modelist);
error_init_fb:
kfree(fb_info->pseudo_palette);
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index cb163a5397b..0c4f34311ed 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -41,13 +41,14 @@
/* Supported palette hacks */
enum {
cmap_unknown,
- cmap_m64, /* ATI Mach64 */
+ cmap_simple, /* ATI Mach64 */
cmap_r128, /* ATI Rage128 */
cmap_M3A, /* ATI Rage Mobility M3 Head A */
cmap_M3B, /* ATI Rage Mobility M3 Head B */
cmap_radeon, /* ATI Radeon */
cmap_gxt2000, /* IBM GXT2000 */
cmap_avivo, /* ATI R5xx */
+ cmap_qemu, /* qemu vga */
};
struct offb_par {
@@ -100,36 +101,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct offb_par *par = (struct offb_par *) info->par;
- int i, depth;
- u32 *pal = info->pseudo_palette;
-
- depth = info->var.bits_per_pixel;
- if (depth == 16)
- depth = (info->var.green.length == 5) ? 15 : 16;
-
- if (regno > 255 ||
- (depth == 16 && regno > 63) ||
- (depth == 15 && regno > 31))
- return 1;
-
- if (regno < 16) {
- switch (depth) {
- case 15:
- pal[regno] = (regno << 10) | (regno << 5) | regno;
- break;
- case 16:
- pal[regno] = (regno << 11) | (regno << 5) | regno;
- break;
- case 24:
- pal[regno] = (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- i = (regno << 8) | regno;
- pal[regno] = (i << 16) | i;
- break;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *pal = info->pseudo_palette;
+ u32 cr = red >> (16 - info->var.red.length);
+ u32 cg = green >> (16 - info->var.green.length);
+ u32 cb = blue >> (16 - info->var.blue.length);
+ u32 value;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ value = (cr << info->var.red.offset) |
+ (cg << info->var.green.offset) |
+ (cb << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
}
+ pal[regno] = value;
+ return 0;
}
+ if (regno > 255)
+ return -EINVAL;
+
red >>= 8;
green >>= 8;
blue >>= 8;
@@ -138,7 +135,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(regno, par->cmap_adr);
writeb(red, par->cmap_data);
writeb(green, par->cmap_data);
@@ -208,7 +205,7 @@ static int offb_blank(int blank, struct fb_info *info)
if (blank)
for (i = 0; i < 256; i++) {
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(i, par->cmap_adr);
for (j = 0; j < 3; j++)
writeb(0, par->cmap_data);
@@ -350,7 +347,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_data = par->cmap_adr + 1;
- par->cmap_type = cmap_m64;
+ par->cmap_type = cmap_simple;
} else if (dp && (of_device_is_compatible(dp, "pci1014,b7") ||
of_device_is_compatible(dp, "pci1014,21c"))) {
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
@@ -371,6 +368,16 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_type = cmap_avivo;
}
of_node_put(pciparent);
+ } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+ const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+ u64 io_addr = of_translate_address(dp, io_of_addr);
+ if (io_addr != OF_BAD_ADDR) {
+ par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
+ if (par->cmap_adr) {
+ par->cmap_type = cmap_simple;
+ par->cmap_data = par->cmap_adr + 1;
+ }
+ }
}
info->fix.visual = (par->cmap_type != cmap_unknown) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR;
@@ -381,7 +388,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
int pitch, unsigned long address,
int foreign_endian, struct device_node *dp)
{
- unsigned long res_size = pitch * height * (depth + 7) / 8;
+ unsigned long res_size = pitch * height;
struct offb_par *par = &default_par;
unsigned long res_start = address;
struct fb_fix_screeninfo *fix;
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47b..6f61e781f15 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 0c6981f1a4a..2c1a3402bef 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -2,7 +2,7 @@
* OMAP2 Remote Frame Buffer Interface support
*
* Copyright (C) 2005 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Author: Juha YrjÜlä <juha.yrjola@nokia.com>
* Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 8fb7c708f56..f79c137753d 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -2,7 +2,7 @@
* OMAP1 Special OptimiSed Screen Interface support
*
* Copyright (C) 2004-2005 Nokia Corporation
- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Author: Juha YrjÜlä <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551c..5c81533eaca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
unsigned long fclk = 0;
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- if (width != out_width || height != out_height)
- return -EINVAL;
- else
- return 0;
- }
+ if (width == out_width && height == out_height)
+ return 0;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+ return -EINVAL;
if (out_width < width / maxdownscale ||
out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa3..c56378c555b 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
unsigned long hdmi_get_pixel_clock(void)
{
/* HDMI Pixel Clock in Mhz */
- return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+ return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
}
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 83d3fe7ec9a..4ea17dc3258 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,6 +1,6 @@
menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support"
- depends on FB && OMAP2_DSS
+ depends on FB && OMAP2_DSS && !DRM_OMAP
select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index aaccffac67a..3c22994ea31 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1792,24 +1792,7 @@ static struct usb_driver ufx_driver = {
.id_table = id_table,
};
-static int __init ufx_module_init(void)
-{
- int res;
-
- res = usb_register(&ufx_driver);
- if (res)
- err("usb_register failed. Error number %d", res);
-
- return res;
-}
-
-static void __exit ufx_module_exit(void)
-{
- usb_deregister(&ufx_driver);
-}
-
-module_init(ufx_module_init);
-module_exit(ufx_module_exit);
+module_usb_driver(ufx_driver);
static void ufx_urb_completion(struct urb *urb)
{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 3473e75ce78..1f868d0187a 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1761,24 +1761,7 @@ static struct usb_driver dlfb_driver = {
.id_table = id_table,
};
-static int __init dlfb_module_init(void)
-{
- int res;
-
- res = usb_register(&dlfb_driver);
- if (res)
- err("usb_register failed. Error number %d", res);
-
- return res;
-}
-
-static void __exit dlfb_module_exit(void)
-{
- usb_deregister(&dlfb_driver);
-}
-
-module_init(dlfb_module_init);
-module_exit(dlfb_module_exit);
+module_usb_driver(dlfb_driver);
static void dlfb_urb_completion(struct urb *urb)
{
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe70..c01c1c16272 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
#define M1200X720_R60_VSP POSITIVE
/* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP NEGATIVE
-#define M1200X900_R60_VSP NEGATIVE
+#define M1200X900_R60_HSP POSITIVE
+#define M1200X900_R60_VSP POSITIVE
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index beac52fc1c0..cb4529c40d7 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -671,20 +671,17 @@ InitWait:
}
}
-static struct xenbus_device_id xenfb_ids[] = {
+static const struct xenbus_device_id xenfb_ids[] = {
{ "vfb" },
{ "" }
};
-static struct xenbus_driver xenfb_driver = {
- .name = "vfb",
- .owner = THIS_MODULE,
- .ids = xenfb_ids,
+static DEFINE_XENBUS_DRIVER(xenfb, ,
.probe = xenfb_probe,
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
-};
+);
static int __init xenfb_init(void)
{
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 7317dc2ec42..0269717436a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return 0;
}
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ return vm_dev->pdev->name;
+}
static struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
.del_vqs = vm_del_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
+ .bus_name = vm_bus_name,
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 03d1984bd36..baabb7937ec 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -598,6 +598,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
false, false);
}
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return pci_name(vp_dev->pci_dev);
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
@@ -608,6 +615,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
.del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
+ .bus_name = vp_bus_name,
};
static void virtio_pci_release_dev(struct device *_d)
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index b5abaae38e9..4f7e1d770f8 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1002,26 +1002,7 @@ static void ds_disconnect(struct usb_interface *intf)
kfree(dev);
}
-static int ds_init(void)
-{
- int err;
-
- err = usb_register(&ds_driver);
- if (err) {
- printk(KERN_INFO "Failed to register DS9490R USB device: err=%d.\n", err);
- return err;
- }
-
- return 0;
-}
-
-static void ds_fini(void)
-{
- usb_deregister(&ds_driver);
-}
-
-module_init(ds_init);
-module_exit(ds_fini);
+module_usb_driver(ds_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index a1ef9b5b38c..ff29ae747ee 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -175,11 +175,13 @@ static ssize_t w1_therm_read(struct device *device,
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
- u8 rom[9], crc, verdict;
+ u8 rom[9], crc, verdict, external_power;
int i, max_trying = 10;
ssize_t c = PAGE_SIZE;
- mutex_lock(&dev->mutex);
+ i = mutex_lock_interruptible(&dev->mutex);
+ if (i != 0)
+ return i;
memset(rom, 0, sizeof(rom));
@@ -190,13 +192,37 @@ static ssize_t w1_therm_read(struct device *device,
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
+ unsigned long sleep_rem;
+
+ w1_write_8(dev, W1_READ_PSUPPLY);
+ external_power = w1_read_8(dev);
+
+ if (w1_reset_select_slave(sl))
+ continue;
/* 750ms strong pullup (or delay) after the convert */
- if (w1_strong_pullup)
+ if (!external_power && w1_strong_pullup)
w1_next_pullup(dev, tm);
+
w1_write_8(dev, W1_CONVERT_TEMP);
- if (!w1_strong_pullup)
- msleep(tm);
+
+ if (external_power) {
+ mutex_unlock(&dev->mutex);
+
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0)
+ return -EINTR;
+
+ i = mutex_lock_interruptible(&dev->mutex);
+ if (i != 0)
+ return i;
+ } else if (!w1_strong_pullup) {
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ mutex_unlock(&dev->mutex);
+ return -EINTR;
+ }
+ }
if (!w1_reset_select_slave(sl)) {
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c3749782385..9761950697b 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -892,6 +892,16 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
break;
}
+ /* Do fast search on single slave bus */
+ if (dev->max_slave_count == 1) {
+ w1_write_8(dev, W1_READ_ROM);
+
+ if (w1_read_block(dev, (u8 *)&rn, 8) == 8 && rn)
+ cb(dev, rn);
+
+ break;
+ }
+
/* Start the search */
w1_write_8(dev, search_type);
for (i = 0; i < 64; ++i) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79fd606b7cd..877b107f77a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -772,6 +772,19 @@ config SMSC37B787_WDT
Most people will say N.
+config VIA_WDT
+ tristate "VIA Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ ---help---
+ This is the driver for the hardware watchdog timer on VIA
+ southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+
+ To compile this driver as a module, choose M here; the module
+ will be called via_wdt.
+
+ Most people will say N.
+
config W83627HF_WDT
tristate "W83627HF/W83627DHG Watchdog Timer"
depends on X86
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index fe893e91935..e8f479a1640 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o
obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
obj-$(CONFIG_SMSC_SCH311X_WDT) += sch311x_wdt.o
obj-$(CONFIG_SMSC37B787_WDT) += smsc37b787_wdt.o
+obj-$(CONFIG_VIA_WDT) += via_wdt.o
obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
obj-$(CONFIG_W83697HF_WDT) += w83697hf_wdt.o
obj-$(CONFIG_W83697UG_WDT) += w83697ug_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b2922178359..502773ad5ac 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -70,8 +70,8 @@ struct ar7_wdt {
};
static unsigned long wdt_is_open;
-static spinlock_t wdt_lock;
static unsigned expect_close;
+static DEFINE_SPINLOCK(wdt_lock);
/* XXX currently fixed, allows max margin ~68.72 secs */
#define prescale_value 0xffff
@@ -280,8 +280,6 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
{
int rc;
- spin_lock_init(&wdt_lock);
-
ar7_regs_wdt =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!ar7_regs_wdt) {
@@ -355,15 +353,4 @@ static struct platform_driver ar7_wdt_driver = {
},
};
-static int __init ar7_wdt_init(void)
-{
- return platform_driver_register(&ar7_wdt_driver);
-}
-
-static void __exit ar7_wdt_cleanup(void)
-{
- platform_driver_unregister(&ar7_wdt_driver);
-}
-
-module_init(ar7_wdt_init);
-module_exit(ar7_wdt_cleanup);
+module_platform_driver(ar7_wdt_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 87445b2d72a..00562566ef5 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -35,6 +35,11 @@
#define DRV_NAME "AT91SAM9 Watchdog"
+#define wdt_read(field) \
+ __raw_readl(at91wdt_private.base + field)
+#define wdt_write(field, val) \
+ __raw_writel((val), at91wdt_private.base + field)
+
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
@@ -63,6 +68,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static void at91_ping(unsigned long data);
static struct {
+ void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
unsigned long open;
char expect_close;
@@ -77,7 +83,7 @@ static struct {
*/
static inline void at91_wdt_reset(void)
{
- at91_sys_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -132,7 +138,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
unsigned int mr;
/* Check if disabled */
- mr = at91_sys_read(AT91_WDT_MR);
+ mr = wdt_read(AT91_WDT_MR);
if (mr & AT91_WDT_WDDIS) {
printk(KERN_ERR DRV_NAME": sorry, watchdog is disabled\n");
return -EIO;
@@ -149,7 +155,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
| AT91_WDT_WDDBGHLT /* disabled in debug mode */
| AT91_WDT_WDD /* restart at any time */
| (timeout & AT91_WDT_WDV); /* timer value */
- at91_sys_write(AT91_WDT_MR, reg);
+ wdt_write(AT91_WDT_MR, reg);
return 0;
}
@@ -248,12 +254,22 @@ static struct miscdevice at91wdt_miscdev = {
static int __init at91wdt_probe(struct platform_device *pdev)
{
+ struct resource *r;
int res;
if (at91wdt_miscdev.parent)
return -EBUSY;
at91wdt_miscdev.parent = &pdev->dev;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+ at91wdt_private.base = ioremap(r->start, resource_size(r));
+ if (!at91wdt_private.base) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
/* Set watchdog */
res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
if (res)
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index 757f9cab5c8..c6fbb2e6c41 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -16,11 +16,11 @@
#ifndef AT91_WDT_H
#define AT91_WDT_H
-#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
+#define AT91_WDT_CR 0x00 /* Watchdog Control Register */
#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
-#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
+#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
@@ -30,7 +30,7 @@
#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
-#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
+#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 725c84bfdd7..9db808349f8 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -68,17 +68,23 @@ static int max_timeout;
static inline void ath79_wdt_keepalive(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG);
}
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static inline void ath79_wdt_disable(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static int ath79_wdt_set_timeout(int val)
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 5064e831752..8dc7de641e2 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -311,18 +311,7 @@ static struct platform_driver bcm63xx_wdt = {
}
};
-static int __init bcm63xx_wdt_init(void)
-{
- return platform_driver_register(&bcm63xx_wdt);
-}
-
-static void __exit bcm63xx_wdt_exit(void)
-{
- platform_driver_unregister(&bcm63xx_wdt);
-}
-
-module_init(bcm63xx_wdt_init);
-module_exit(bcm63xx_wdt_exit);
+module_platform_driver(bcm63xx_wdt);
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 03f449a430d..5b89f7d6cd0 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -76,8 +76,6 @@ static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
static struct device *parent;
/*
@@ -461,6 +459,10 @@ out:
}
#ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index edd3475f41d..251c863d71d 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -39,7 +39,7 @@
static int verbose;
static int port = 0x91;
static int ticks = 10000;
-static spinlock_t cpu5wdt_lock;
+static DEFINE_SPINLOCK(cpu5wdt_lock);
#define PFX "cpu5wdt: "
@@ -223,7 +223,6 @@ static int __devinit cpu5wdt_init(void)
"port=0x%x, verbose=%i\n", port, verbose);
init_completion(&cpu5wdt_device.stop);
- spin_lock_init(&cpu5wdt_lock);
cpu5wdt_device.queue = 0;
setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 1e013e8457b..1b793dfd868 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -687,15 +687,4 @@ static struct platform_driver cpwd_driver = {
.remove = __devexit_p(cpwd_remove),
};
-static int __init cpwd_init(void)
-{
- return platform_driver_register(&cpwd_driver);
-}
-
-static void __exit cpwd_exit(void)
-{
- platform_driver_unregister(&cpwd_driver);
-}
-
-module_init(cpwd_init);
-module_exit(cpwd_exit);
+module_platform_driver(cpwd_driver);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 51b5551b4e3..c8c5c8032bc 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -271,18 +271,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(davinci_wdt_remove),
};
-static int __init davinci_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit davinci_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(davinci_wdt_init);
-module_exit(davinci_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("DaVinci Watchdog Driver");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index f10f8c0abba..1b0e3dd81c1 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -358,17 +358,7 @@ static struct platform_driver dw_wdt_driver = {
},
};
-static int __init dw_wdt_watchdog_init(void)
-{
- return platform_driver_register(&dw_wdt_driver);
-}
-module_init(dw_wdt_watchdog_init);
-
-static void __exit dw_wdt_watchdog_exit(void)
-{
- platform_driver_unregister(&dw_wdt_driver);
-}
-module_exit(dw_wdt_watchdog_exit);
+module_platform_driver(dw_wdt_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 41018d429ab..3946c51099c 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -64,7 +64,7 @@
static unsigned long eurwdt_is_open;
static int eurwdt_timeout;
static char eur_expect_close;
-static spinlock_t eurwdt_lock;
+static DEFINE_SPINLOCK(eurwdt_lock);
/*
* You must set these - there is no sane way to probe for this board.
@@ -446,8 +446,6 @@ static int __init eurwdt_init(void)
goto outreg;
}
- spin_lock_init(&eurwdt_lock);
-
ret = misc_register(&eurwdt_miscdev);
if (ret) {
printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n",
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3774c9b8dac..8464ea1c36a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
asminline_call(&cmn_regs, bios32_entrypoint);
if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
if ((physical_bios_base + physical_bios_offset)) {
cru_rom_addr =
ioremap(cru_physical_address, cru_length);
- if (cru_rom_addr)
+ if (cru_rom_addr) {
+ set_memory_x((unsigned long)cru_rom_addr, cru_length);
retval = 0;
+ }
}
printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index ba6ad662635..99796c5d913 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
module_param(turn_SMI_watchdog_clear_off, int, 0);
MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
- "Turn off SMI clearing watchdog (default=0)");
+ "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
/*
* Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out_unmap;
}
- if (turn_SMI_watchdog_clear_off) {
+ if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 195e0f798e7..c7481ad5162 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -68,7 +68,7 @@ static char asr_expect_close;
static unsigned int asr_type, asr_base, asr_length;
static unsigned int asr_read_addr, asr_write_addr;
static unsigned char asr_toggle_mask, asr_disable_mask;
-static spinlock_t asr_lock;
+static DEFINE_SPINLOCK(asr_lock);
static void __asr_toggle(void)
{
@@ -386,8 +386,6 @@ static int __init ibmasr_init(void)
if (!asr_type)
return -ENODEV;
- spin_lock_init(&asr_lock);
-
rc = asr_get_base_address();
if (rc)
return rc;
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 1cc5609666d..1475e09f9af 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -28,7 +28,7 @@
#define PFX "indydog: "
static unsigned long indydog_alive;
-static spinlock_t indydog_lock;
+static DEFINE_SPINLOCK(indydog_lock);
#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
@@ -185,8 +185,6 @@ static int __init watchdog_init(void)
{
int ret;
- spin_lock_init(&indydog_lock);
-
ret = register_reboot_notifier(&indydog_notifier);
if (ret) {
printk(KERN_ERR PFX
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index aef94789019..82fa7a92a8d 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -37,7 +37,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned long wdt_status;
static unsigned long boot_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -226,9 +226,6 @@ static int __init iop_wdt_init(void)
{
int ret;
- spin_lock_init(&wdt_lock);
-
-
/* check if the reset was caused by the watchdog timer */
boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index e86952a7168..084f71aa855 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -32,7 +32,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -189,7 +189,6 @@ static int __init ixp2000_wdt_init(void)
return -EIO;
}
wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
- spin_lock_init(&wdt_lock);
return misc_register(&ixp2000_wdt_miscdev);
}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index e02c0ecda26..4fc2e9ac26f 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -181,7 +181,6 @@ static int __init ixp4xx_wdt_init(void)
return -ENODEV;
}
- spin_lock_init(&wdt_lock);
boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
WDIOF_CARDRESET : 0;
ret = misc_register(&ixp4xx_wdt_miscdev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 684ba01fb54..17ef300bccc 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -295,18 +295,7 @@ static struct platform_driver jz4740_wdt_driver = {
},
};
-
-static int __init jz4740_wdt_init(void)
-{
- return platform_driver_register(&jz4740_wdt_driver);
-}
-module_init(jz4740_wdt_init);
-
-static void __exit jz4740_wdt_exit(void)
-{
- platform_driver_unregister(&jz4740_wdt_driver);
-}
-module_exit(jz4740_wdt_exit);
+module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 811471903e8..51757a520e8 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
static unsigned long ks8695wdt_busy;
-static spinlock_t ks8695_lock;
+static DEFINE_SPINLOCK(ks8695_lock);
/* ......................................................................... */
@@ -288,7 +288,6 @@ static struct platform_driver ks8695wdt_driver = {
static int __init ks8695_wdt_init(void)
{
- spin_lock_init(&ks8695_lock);
/* Check that the heartbeat value is within range;
if not reset to the default */
if (ks8695_wdt_settimeout(wdt_time)) {
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 102aed0efbf..d3a63be2e28 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -222,9 +222,6 @@ ltq_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ltq_wdt_miscdev);
- if (ltq_wdt_membase)
- iounmap(ltq_wdt_membase);
-
return 0;
}
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 73ba2fd8e59..af63ecfbfa6 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -364,18 +364,7 @@ static struct platform_driver max63xx_wdt_driver = {
},
};
-static int __init max63xx_wdt_init(void)
-{
- return platform_driver_register(&max63xx_wdt_driver);
-}
-
-static void __exit max63xx_wdt_exit(void)
-{
- platform_driver_unregister(&max63xx_wdt_driver);
-}
-
-module_init(max63xx_wdt_init);
-module_exit(max63xx_wdt_exit);
+module_platform_driver(max63xx_wdt_driver);
MODULE_AUTHOR("Marc Zyngier <maz@misterjones.org>");
MODULE_DESCRIPTION("max63xx Watchdog Driver");
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ac37bb82392..c29e31d99fe 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -253,18 +253,7 @@ static struct platform_driver mtx1_wdt_driver = {
.driver.owner = THIS_MODULE,
};
-static int __init mtx1_wdt_init(void)
-{
- return platform_driver_register(&mtx1_wdt_driver);
-}
-
-static void __exit mtx1_wdt_exit(void)
-{
- platform_driver_unregister(&mtx1_wdt_driver);
-}
-
-module_init(mtx1_wdt_init);
-module_exit(mtx1_wdt_exit);
+module_platform_driver(mtx1_wdt_driver);
MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 6cee33d4b16..50359bad917 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -334,18 +334,7 @@ static struct platform_driver nuc900wdt_driver = {
},
};
-static int __init nuc900_wdt_init(void)
-{
- return platform_driver_register(&nuc900wdt_driver);
-}
-
-static void __exit nuc900_wdt_exit(void)
-{
- platform_driver_unregister(&nuc900wdt_driver);
-}
-
-module_init(nuc900_wdt_init);
-module_exit(nuc900_wdt_exit);
+module_platform_driver(nuc900wdt_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("Watchdog driver for NUC900");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 4ec741ac952..f359ab85c3b 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -414,18 +414,7 @@ static struct platform_driver xwdt_driver = {
},
};
-static int __init xwdt_init(void)
-{
- return platform_driver_register(&xwdt_driver);
-}
-
-static void __exit xwdt_exit(void)
-{
- platform_driver_unregister(&xwdt_driver);
-}
-
-module_init(xwdt_init);
-module_exit(xwdt_exit);
+module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 2b4acb86c19..4b33e3fd726 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -55,7 +55,7 @@ module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
static unsigned int wdt_trgr_pattern = 0x1234;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
struct omap_wdt_dev {
void __iomem *base; /* physical */
@@ -232,6 +232,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
if (cpu_is_omap24xx())
return put_user(omap_prcm_get_reset_sources(),
(int __user *)arg);
+ return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
@@ -437,19 +438,7 @@ static struct platform_driver omap_wdt_driver = {
},
};
-static int __init omap_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&omap_wdt_driver);
-}
-
-static void __exit omap_wdt_exit(void)
-{
- platform_driver_unregister(&omap_wdt_driver);
-}
-
-module_init(omap_wdt_init);
-module_exit(omap_wdt_exit);
+module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 2d9fb96a9ee..4ad78f86851 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -41,7 +41,7 @@ static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
static unsigned int wdt_tclk;
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
static void orion_wdt_ping(void)
{
@@ -294,19 +294,7 @@ static struct platform_driver orion_wdt_driver = {
},
};
-static int __init orion_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&orion_wdt_driver);
-}
-
-static void __exit orion_wdt_exit(void)
-{
- platform_driver_unregister(&orion_wdt_driver);
-}
-
-module_init(orion_wdt_init);
-module_exit(orion_wdt_exit);
+module_platform_driver(orion_wdt_driver);
MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
MODULE_DESCRIPTION("Orion Processor Watchdog");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 748a74bd85e..d8de1ddd176 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -827,37 +827,4 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
}
-
-
-/**
- * usb_pcwd_init
- */
-static int __init usb_pcwd_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&usb_pcwd_driver);
- if (result) {
- printk(KERN_ERR PFX "usb_register failed. Error number %d\n",
- result);
- return result;
- }
-
- printk(KERN_INFO PFX DRIVER_DESC " v" DRIVER_VERSION "\n");
- return 0;
-}
-
-
-/**
- * usb_pcwd_exit
- */
-static void __exit usb_pcwd_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_pcwd_driver);
-}
-
-
-module_init(usb_pcwd_init);
-module_exit(usb_pcwd_exit);
+module_usb_driver(usb_pcwd_driver);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 61493322556..bd143c9dd3e 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -334,18 +334,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(pnx4008_wdt_remove),
};
-static int __init pnx4008_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit pnx4008_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(pnx4008_wdt_init);
-module_exit(pnx4008_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index d4c29b5311a..bf7bc8aa1c0 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -332,18 +332,7 @@ static struct platform_driver rc32434_wdt_driver = {
}
};
-static int __init rc32434_wdt_init(void)
-{
- return platform_driver_register(&rc32434_wdt_driver);
-}
-
-static void __exit rc32434_wdt_exit(void)
-{
- platform_driver_unregister(&rc32434_wdt_driver);
-}
-
-module_init(rc32434_wdt_init);
-module_exit(rc32434_wdt_exit);
+module_platform_driver(rc32434_wdt_driver);
MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
"Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 428f8a1583e..042ccc56ae2 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -293,18 +293,7 @@ static struct platform_driver rdc321x_wdt_driver = {
},
};
-static int __init rdc321x_wdt_init(void)
-{
- return platform_driver_register(&rdc321x_wdt_driver);
-}
-
-static void __exit rdc321x_wdt_exit(void)
-{
- platform_driver_unregister(&rdc321x_wdt_driver);
-}
-
-module_init(rdc321x_wdt_init);
-module_exit(rdc321x_wdt_exit);
+module_platform_driver(rdc321x_wdt_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x watchdog driver");
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 109b533896b..c7e17ceafa6 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -247,15 +247,4 @@ static struct platform_driver riowd_driver = {
.remove = __devexit_p(riowd_remove),
};
-static int __init riowd_init(void)
-{
- return platform_driver_register(&riowd_driver);
-}
-
-static void __exit riowd_exit(void)
-{
- platform_driver_unregister(&riowd_driver);
-}
-
-module_init(riowd_init);
-module_exit(riowd_exit);
+module_platform_driver(riowd_driver);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index a79e3840782..4bc3744e14e 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -378,6 +378,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
"cannot start\n");
}
+ watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index cc2cfbe33b3..eef1524ae52 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
{
.id = 0x00141805,
.mask = 0x00ffffff,
@@ -359,6 +359,8 @@ static struct amba_id sp805_wdt_ids[] __initdata = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, sp805_wdt_ids);
+
static struct amba_driver sp805_wdt_driver = {
.drv = {
.name = MODULE_NAME,
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
index ac2346a452e..4c2a4e8698f 100644
--- a/drivers/watchdog/stmp3xxx_wdt.c
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -272,18 +272,7 @@ static struct platform_driver platform_wdt_driver = {
.resume = stmp3xxx_wdt_resume,
};
-static int __init stmp3xxx_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit stmp3xxx_wdt_exit(void)
-{
- return platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(stmp3xxx_wdt_init);
-module_exit(stmp3xxx_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 5a90a4a871d..1490293dc7d 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -506,17 +506,7 @@ static struct platform_driver ts72xx_wdt_driver = {
},
};
-static __init int ts72xx_wdt_init(void)
-{
- return platform_driver_register(&ts72xx_wdt_driver);
-}
-module_init(ts72xx_wdt_init);
-
-static __exit void ts72xx_wdt_exit(void)
-{
- platform_driver_unregister(&ts72xx_wdt_driver);
-}
-module_exit(ts72xx_wdt_exit);
+module_platform_driver(ts72xx_wdt_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("TS-72xx SBC Watchdog");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index b5045ca7e61..0764c6239b9 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -256,17 +256,7 @@ static struct platform_driver twl4030_wdt_driver = {
},
};
-static int __devinit twl4030_wdt_init(void)
-{
- return platform_driver_register(&twl4030_wdt_driver);
-}
-module_init(twl4030_wdt_init);
-
-static void __devexit twl4030_wdt_exit(void)
-{
- platform_driver_unregister(&twl4030_wdt_driver);
-}
-module_exit(twl4030_wdt_exit);
+module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
new file mode 100644
index 00000000000..026b4bbfa0a
--- /dev/null
+++ b/drivers/watchdog/via_wdt.c
@@ -0,0 +1,267 @@
+/*
+ * VIA Chipset Watchdog Driver
+ *
+ * Copyright (C) 2011 Sigfox
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Marc Vertes <marc.vertes@sigfox.com>
+ * Based on a preliminary version from Harald Welte <HaraldWelte@viatech.com>
+ * Timer code by Wim Van Sebroeck <wim@iguana.be>
+ *
+ * Caveat: PnP must be enabled in BIOS to allow full access to watchdog
+ * control registers. If not, the watchdog must be configured in BIOS manually.
+ */
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/watchdog.h>
+
+/* Configuration registers relative to the pci device */
+#define VIA_WDT_MMIO_BASE 0xe8 /* MMIO region base address */
+#define VIA_WDT_CONF 0xec /* watchdog enable state */
+
+/* Relevant bits for the VIA_WDT_CONF register */
+#define VIA_WDT_CONF_ENABLE 0x01 /* 1: enable watchdog */
+#define VIA_WDT_CONF_MMIO 0x02 /* 1: enable watchdog MMIO */
+
+/*
+ * The MMIO region contains the watchog control register and the
+ * hardware timer counter.
+ */
+#define VIA_WDT_MMIO_LEN 8 /* MMIO region length in bytes */
+#define VIA_WDT_CTL 0 /* MMIO addr+0: state/control reg. */
+#define VIA_WDT_COUNT 4 /* MMIO addr+4: timer counter reg. */
+
+/* Bits for the VIA_WDT_CTL register */
+#define VIA_WDT_RUNNING 0x01 /* 0: stop, 1: running */
+#define VIA_WDT_FIRED 0x02 /* 1: restarted by expired watchdog */
+#define VIA_WDT_PWROFF 0x04 /* 0: reset, 1: poweroff */
+#define VIA_WDT_DISABLED 0x08 /* 1: timer is disabled */
+#define VIA_WDT_TRIGGER 0x80 /* 1: start a new countdown */
+
+/* Hardware heartbeat in seconds */
+#define WDT_HW_HEARTBEAT 1
+
+/* Timer heartbeat (500ms) */
+#define WDT_HEARTBEAT (HZ/2) /* should be <= ((WDT_HW_HEARTBEAT*HZ)/2) */
+
+/* User space timeout in seconds */
+#define WDT_TIMEOUT_MAX 1023 /* approx. 17 min. */
+#define WDT_TIMEOUT 60
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, between 1 and 1023 "
+ "(default = " __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default = " __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct watchdog_device wdt_dev;
+static struct resource wdt_res;
+static void __iomem *wdt_mem;
+static unsigned int mmio;
+static void wdt_timer_tick(unsigned long data);
+static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0);
+ /* The timer that pings the watchdog */
+static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
+
+static inline void wdt_reset(void)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl | VIA_WDT_TRIGGER, wdt_mem);
+}
+
+/*
+ * Timer tick: the timer will make sure that the watchdog timer hardware
+ * is being reset in time. The conditions to do this are:
+ * 1) the watchog timer has been started and /dev/watchdog is open
+ * and there is still time left before userspace should send the
+ * next heartbeat/ping. (note: the internal heartbeat is much smaller
+ * then the external/userspace heartbeat).
+ * 2) the watchdog timer has been stopped by userspace.
+ */
+static void wdt_timer_tick(unsigned long data)
+{
+ if (time_before(jiffies, next_heartbeat) ||
+ (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ wdt_reset();
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ } else
+ pr_crit("I will reboot your machine !\n");
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+ /* calculate when the next userspace timeout will be */
+ next_heartbeat = jiffies + timeout * HZ;
+ return 0;
+}
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(timeout, wdt_mem + VIA_WDT_COUNT);
+ writel(ctl | VIA_WDT_RUNNING | VIA_WDT_TRIGGER, wdt_mem);
+ wdt_ping(wdd);
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl & ~VIA_WDT_RUNNING, wdt_mem);
+ return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_timeout)
+{
+ if (new_timeout < 1 || new_timeout > WDT_TIMEOUT_MAX)
+ return -EINVAL;
+ writel(new_timeout, wdt_mem + VIA_WDT_COUNT);
+ timeout = new_timeout;
+ return 0;
+}
+
+static const struct watchdog_info wdt_info = {
+ .identity = "VIA watchdog",
+ .options = WDIOF_CARDRESET |
+ WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .ping = wdt_ping,
+ .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_device wdt_dev = {
+ .info = &wdt_info,
+ .ops = &wdt_ops,
+};
+
+static int __devinit wdt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned char conf;
+ int ret = -ENODEV;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate a MMIO region which contains watchdog control register
+ * and counter, then configure the watchdog to use this region.
+ * This is possible only if PnP is properly enabled in BIOS.
+ * If not, the watchdog must be configured in BIOS manually.
+ */
+ if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN,
+ 0xf0000000, 0xffffff00, 0xff, NULL, NULL)) {
+ dev_err(&pdev->dev, "MMIO allocation failed\n");
+ goto err_out_disable_device;
+ }
+
+ pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start);
+ pci_read_config_byte(pdev, VIA_WDT_CONF, &conf);
+ conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO;
+ pci_write_config_byte(pdev, VIA_WDT_CONF, conf);
+
+ pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio);
+ if (mmio) {
+ dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio);
+ } else {
+ dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n");
+ goto err_out_resource;
+ }
+
+ if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) {
+ dev_err(&pdev->dev, "MMIO region busy\n");
+ goto err_out_resource;
+ }
+
+ wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN);
+ if (wdt_mem == NULL) {
+ dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n");
+ goto err_out_release;
+ }
+
+ wdt_dev.timeout = timeout;
+ watchdog_set_nowayout(&wdt_dev, nowayout);
+ if (readl(wdt_mem) & VIA_WDT_FIRED)
+ wdt_dev.bootstatus |= WDIOF_CARDRESET;
+
+ ret = watchdog_register_device(&wdt_dev);
+ if (ret)
+ goto err_out_iounmap;
+
+ /* start triggering, in case of watchdog already enabled by BIOS */
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+
+err_out_iounmap:
+ iounmap(wdt_mem);
+err_out_release:
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+err_out_resource:
+ release_resource(&wdt_res);
+err_out_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit wdt_remove(struct pci_dev *pdev)
+{
+ watchdog_unregister_device(&wdt_dev);
+ del_timer(&timer);
+ iounmap(wdt_mem);
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+ release_resource(&wdt_res);
+ pci_disable_device(pdev);
+}
+
+DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
+ { 0 }
+};
+
+static struct pci_driver wdt_driver = {
+ .name = "via_wdt",
+ .id_table = wdt_pci_table,
+ .probe = wdt_probe,
+ .remove = __devexit_p(wdt_remove),
+};
+
+static int __init wdt_init(void)
+{
+ if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
+ timeout = WDT_TIMEOUT;
+ return pci_register_driver(&wdt_driver);
+}
+
+static void __exit wdt_exit(void)
+{
+ pci_unregister_driver(&wdt_driver);
+}
+
+module_init(wdt_init);
+module_exit(wdt_exit);
+
+MODULE_AUTHOR("Marc Vertes");
+MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index dd5d6754875..576a388a116 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -4,7 +4,7 @@
* (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
* added support for W83627THF.
*
- * (c) Copyright 2003,2007 Pádraig Brady <P@draigBrady.com>
+ * (c) Copyright 2003,2007 PĂĄdraig Brady <P@draigBrady.com>
*
* Based on advantechwdt.c which is based on wdt.c.
* Original copyright messages:
@@ -401,6 +401,6 @@ module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
+MODULE_AUTHOR("PĂĄdraig Brady <P@draigBrady.com>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index e789a47db41..263c883f080 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -199,7 +199,8 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
- driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
if (!driver_data) {
dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
@@ -213,11 +214,9 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
+ watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
- if (nowayout)
- wm831x_wdt->status |= WDOG_NO_WAY_OUT;
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
reg &= WM831X_WDOG_TO_MASK;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
@@ -252,7 +251,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
ret);
- goto err_alloc;
+ goto err;
}
ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -294,8 +293,6 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
err_gpio:
if (driver_data->update_gpio)
gpio_free(driver_data->update_gpio);
-err_alloc:
- kfree(driver_data);
err:
return ret;
}
@@ -320,17 +317,7 @@ static struct platform_driver wm831x_wdt_driver = {
},
};
-static int __init wm831x_wdt_init(void)
-{
- return platform_driver_register(&wm831x_wdt_driver);
-}
-module_init(wm831x_wdt_init);
-
-static void __exit wm831x_wdt_exit(void)
-{
- platform_driver_unregister(&wm831x_wdt_driver);
-}
-module_exit(wm831x_wdt_exit);
+module_platform_driver(wm831x_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x Watchdog");
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index b68d928c8f9..909c78650d3 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -311,17 +311,7 @@ static struct platform_driver wm8350_wdt_driver = {
},
};
-static int __init wm8350_wdt_init(void)
-{
- return platform_driver_register(&wm8350_wdt_driver);
-}
-module_init(wm8350_wdt_init);
-
-static void __exit wm8350_wdt_exit(void)
-{
- platform_driver_unregister(&wm8350_wdt_driver);
-}
-module_exit(wm8350_wdt_exit);
+module_platform_driver(wm8350_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 Watchdog");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8795480c235..a1ced521cf7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -86,6 +86,7 @@ config XEN_BACKEND
config XENFS
tristate "Xen filesystem"
+ select XEN_PRIVCMD
default y
help
The xen filesystem provides a way for domains to share
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND
xen-pciback.hide=(03:00.0)(04:00.0)
If in doubt, say m.
+
+config XEN_PRIVCMD
+ tristate
+ depends on XEN
+ default m
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 974fffdf22b..aa31337192c 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
+obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
+xen-privcmd-y := privcmd.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6e075cdd0c6..e5e5812a101 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -87,6 +87,7 @@ enum xen_irq_type {
*/
struct irq_info {
struct list_head list;
+ int refcnt;
enum xen_irq_type type; /* type */
unsigned irq;
unsigned short evtchn; /* event channel */
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq)
panic("Unable to allocate metadata for IRQ%d\n", irq);
info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
irq_set_handler_data(irq, info);
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
irq_set_handler_data(irq, NULL);
+ WARN_ON(info->refcnt > 0);
+
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
- goto out; /* XXX need refcount? */
+ goto out;
}
irq = xen_allocate_irq_gsi(gsi);
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq)
{
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
mutex_lock(&irq_mapping_update_lock);
+ if (info->refcnt > 0) {
+ info->refcnt--;
+ if (info->refcnt != 0)
+ goto done;
+ }
+
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
+ done:
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+int evtchn_make_refcounted(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ struct irq_info *info;
+
+ if (irq == -1)
+ return -ENOENT;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ return -ENOENT;
+
+ WARN_ON(info->refcnt != -1);
+
+ info->refcnt = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+int evtchn_get(unsigned int evtchn)
+{
+ int irq;
+ struct irq_info *info;
+ int err = -ENOENT;
+
+ if (evtchn >= NR_EVENT_CHANNELS)
+ return -EINVAL;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+ irq = evtchn_to_irq[evtchn];
+ if (irq == -1)
+ goto done;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ goto done;
+
+ err = -EINVAL;
+ if (info->refcnt <= 0)
+ goto done;
+
+ info->refcnt++;
+ err = 0;
+ done:
+ mutex_unlock(&irq_mapping_update_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(evtchn_get);
+
+void evtchn_put(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ if (WARN_ON(irq == -1))
+ return;
+ unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(evtchn_put);
+
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
int irq = per_cpu(ipi_to_irq, cpu)[vector];
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index dbc13e94b61..b1f60a0c0be 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
u->name, (void *)(unsigned long)port);
if (rc >= 0)
- rc = 0;
+ rc = evtchn_make_refcounted(port);
return rc;
}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e1c4c6e5b46..934985d14c2 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
"the gntalloc device");
static LIST_HEAD(gref_list);
-static DEFINE_SPINLOCK(gref_lock);
+static DEFINE_MUTEX(gref_mutex);
static int gref_size;
struct notify_info {
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data {
uint64_t index;
};
+struct gntalloc_vma_private_data {
+ struct gntalloc_gref *gref;
+ int users;
+ int count;
+};
+
static void __del_gref(struct gntalloc_gref *gref);
static void do_cleanup(void)
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
}
/* Add to gref lists. */
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
list_splice_tail(&queue_gref, &gref_list);
list_splice_tail(&queue_file, &priv->list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
undo:
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) {
@@ -167,7 +173,7 @@ undo:
*/
if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref)
tmp[gref->notify.pgoff] = 0;
kunmap(gref->page);
}
- if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
+ evtchn_put(gref->notify.event);
+ }
gref->notify.flags = 0;
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref)
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
return;
+
+ gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
pr_debug("%s: priv %p\n", __func__, priv);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
while (!list_empty(&priv->list)) {
gref = list_entry(priv->list.next,
struct gntalloc_gref, next_file);
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
__del_gref(gref);
}
kfree(priv);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
}
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
goto out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
/* Clean up pages that were at zero (local) users but were still mapped
* by remote domains. Since those pages count towards the limit that we
* are about to enforce, removing them here is a good idea.
*/
do_cleanup();
if (gref_size + op.count > limit) {
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = -ENOSPC;
goto out_free;
}
gref_size += op.count;
op.index = priv->index;
priv->index += op.count * PAGE_SIZE;
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = add_grefs(&op, gref_ids, priv);
if (rc < 0)
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
goto dealloc_grant_out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, op.index, op.count);
if (gref) {
/* Remove from the file list only, and decrease reference count.
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
do_cleanup();
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
dealloc_grant_out:
return rc;
}
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
index = op.index & ~(PAGE_SIZE - 1);
pgoff = op.index & (PAGE_SIZE - 1);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, index, 1);
if (!gref) {
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
goto unlock_out;
}
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+ }
+
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(gref->notify.event);
+
gref->notify.flags = op.action;
gref->notify.pgoff = pgoff;
gref->notify.event = op.event_channel_port;
rc = 0;
+
unlock_out:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
static void gntalloc_vma_open(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users++;
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users++;
+ mutex_unlock(&gref_mutex);
}
static void gntalloc_vma_close(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+ struct gntalloc_gref *gref, *next;
+ int i;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users--;
- if (gref->users == 0)
- __del_gref(gref);
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users--;
+ if (priv->users == 0) {
+ gref = priv->gref;
+ for (i = 0; i < priv->count; i++) {
+ gref->users--;
+ next = list_entry(gref->next_gref.next,
+ struct gntalloc_gref, next_gref);
+ if (gref->users == 0)
+ __del_gref(gref);
+ gref = next;
+ }
+ kfree(priv);
+ }
+ mutex_unlock(&gref_mutex);
}
static struct vm_operations_struct gntalloc_vmops = {
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = {
static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_vma_private_data *vm_priv;
struct gntalloc_gref *gref;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int rv, i;
- pr_debug("%s: priv %p, page %lu+%d\n", __func__,
- priv, vma->vm_pgoff, count);
-
if (!(vma->vm_flags & VM_SHARED)) {
printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
return -EINVAL;
}
- spin_lock(&gref_lock);
+ vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
+ if (!vm_priv)
+ return -ENOMEM;
+
+ mutex_lock(&gref_mutex);
+
+ pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
+ priv, vm_priv, vma->vm_pgoff, count);
+
gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
if (gref == NULL) {
rv = -ENOENT;
pr_debug("%s: Could not find grant reference",
__func__);
+ kfree(vm_priv);
goto out_unlock;
}
- vma->vm_private_data = gref;
+ vm_priv->gref = gref;
+ vm_priv->users = 1;
+ vm_priv->count = count;
+
+ vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
vma->vm_ops = &gntalloc_vmops;
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
rv = 0;
out_unlock:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rv;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index afca14d9042..99d8151c824 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map)
atomic_sub(map->count, &pages_mapped);
- if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
+ evtchn_put(map->notify.event);
+ }
if (map->pages) {
if (!use_ptemod)
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
+ pages, true);
if (err)
return err;
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct ioctl_gntdev_unmap_notify op;
struct grant_map *map;
int rc;
+ int out_flags;
+ unsigned int out_event;
if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
return -EINVAL;
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port))
+ return -EINVAL;
+ }
+
+ out_flags = op.action;
+ out_event = op.event_channel_port;
+
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
goto unlock_out;
}
+ out_flags = map->notify.flags;
+ out_event = map->notify.event;
+
map->notify.flags = op.action;
map->notify.addr = op.index - (map->index << PAGE_SHIFT);
map->notify.event = op.event_channel_port;
+
rc = 0;
+
unlock_out:
spin_unlock(&priv->lock);
+
+ /* Drop the reference to the event channel we did not save in the map */
+ if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(out_event);
+
return rc;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bf1c094f4eb..1cd94daa71d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -44,16 +44,19 @@
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
+#include <xen/hvc-console.h>
#include <asm/xen/hypercall.h>
#include <asm/pgtable.h>
#include <asm/sync_bitops.h>
-
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+#define GREFS_PER_GRANT_FRAME \
+(grant_table_version == 1 ? \
+(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
+(PAGE_SIZE / sizeof(union grant_entry_v2)))
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock);
unsigned long xen_hvm_resume_frames;
EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
-static struct grant_entry *shared;
+static union {
+ struct grant_entry_v1 *v1;
+ union grant_entry_v2 *v2;
+ void *addr;
+} gnttab_shared;
+
+/*This is a structure of function pointers for grant table*/
+struct gnttab_ops {
+ /*
+ * Mapping a list of frames for storing grant entries. Frames parameter
+ * is used to store grant table address when grant table being setup,
+ * nr_gframes is the number of frames to map grant table. Returning
+ * GNTST_okay means success and negative value means failure.
+ */
+ int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
+ /*
+ * Release a list of frames which are mapped in map_frames for grant
+ * entry status.
+ */
+ void (*unmap_frames)(void);
+ /*
+ * Introducing a valid entry into the grant table, granting the frame of
+ * this grant entry to domain for accessing or transfering. Ref
+ * parameter is reference of this introduced grant entry, domid is id of
+ * granted domain, frame is the page frame to be granted, and flags is
+ * status of the grant entry to be updated.
+ */
+ void (*update_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags);
+ /*
+ * Stop granting a grant entry to domain for accessing. Ref parameter is
+ * reference of a grant entry whose grant access will be stopped,
+ * readonly is not in use in this function. If the grant entry is
+ * currently mapped for reading or writing, just return failure(==0)
+ * directly and don't tear down the grant access. Otherwise, stop grant
+ * access for this entry and return success(==1).
+ */
+ int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
+ /*
+ * Stop granting a grant entry to domain for transfer. Ref parameter is
+ * reference of a grant entry whose grant transfer will be stopped. If
+ * tranfer has not started, just reclaim the grant entry and return
+ * failure(==0). Otherwise, wait for the transfer to complete and then
+ * return the frame.
+ */
+ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+ /*
+ * Query the status of a grant entry. Ref parameter is reference of
+ * queried grant entry, return value is the status of queried entry.
+ * Detailed status(writing/reading) can be gotten from the return value
+ * by bit operations.
+ */
+ int (*query_foreign_access)(grant_ref_t ref);
+ /*
+ * Grant a domain to access a range of bytes within the page referred by
+ * an available grant entry. Ref parameter is reference of a grant entry
+ * which will be sub-page accessed, domid is id of grantee domain, frame
+ * is frame address of subpage grant, flags is grant type and flag
+ * information, page_off is offset of the range of bytes, and length is
+ * length of bytes to be accessed.
+ */
+ void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off, unsigned length);
+ /*
+ * Redirect an available grant entry on domain A to another grant
+ * reference of domain B, then allow domain C to use grant reference
+ * of domain B transitively. Ref parameter is an available grant entry
+ * reference on domain A, domid is id of domain C which accesses grant
+ * entry transitively, flags is grant type and flag information,
+ * trans_domid is id of domain B whose grant entry is finally accessed
+ * transitively, trans_gref is grant entry transitive reference of
+ * domain B.
+ */
+ void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
+ domid_t trans_domid, grant_ref_t trans_gref);
+};
+
+static struct gnttab_ops *gnttab_interface;
+
+/*This reflects status of grant entries, so act as a global value*/
+static grant_status_t *grstatus;
+
+static int grant_table_version;
static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
-static void update_grant_entry(grant_ref_t ref, domid_t domid,
- unsigned long frame, unsigned flags)
+/*
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ */
+static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
+{
+ gnttab_shared.v1[ref].domid = domid;
+ gnttab_shared.v1[ref].frame = frame;
+ wmb();
+ gnttab_shared.v1[ref].flags = flags;
+}
+
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
{
- /*
- * Introducing a valid entry into the grant table:
- * 1. Write ent->domid.
- * 2. Write ent->frame:
- * GTF_permit_access: Frame to which access is permitted.
- * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
- * frame, or zero if none.
- * 3. Write memory barrier (WMB).
- * 4. Write ent->flags, inc. valid type.
- */
- shared[ref].frame = frame;
- shared[ref].domid = domid;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ gnttab_shared.v2[ref].full_page.frame = frame;
wmb();
- shared[ref].flags = flags;
+ gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
}
/*
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid,
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly)
{
- update_grant_entry(ref, domid, frame,
+ gnttab_interface->update_entry(ref, domid, frame,
GTF_permit_access | (readonly ? GTF_readonly : 0));
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-int gnttab_query_foreign_access(grant_ref_t ref)
+void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
+{
+ gnttab_shared.v2[ref].sub_page.frame = frame;
+ gnttab_shared.v2[ref].sub_page.page_off = page_off;
+ gnttab_shared.v2[ref].sub_page.length = length;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_sub_page | flags;
+}
+
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
{
- u16 nflags;
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_transitive))
+ return -EPERM;
- nflags = shared[ref].flags;
+ if (gnttab_interface->update_subpage_entry == NULL)
+ return -ENOSYS;
- return nflags & (GTF_reading|GTF_writing);
+ gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
+ page_off, length);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
+
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+ int flags, unsigned page_off,
+ unsigned length)
+{
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
+ page_off, length);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
+
+bool gnttab_subpage_grants_available(void)
+{
+ return gnttab_interface->update_subpage_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
+
+void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
+ gnttab_shared.v2[ref].transitive.gref = trans_gref;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_transitive | flags;
+}
+
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_sub_page))
+ return -EPERM;
+
+ if (gnttab_interface->update_trans_entry == NULL)
+ return -ENOSYS;
+
+ gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
+ trans_gref);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
+
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+ domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
+ trans_domid, trans_gref);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
+
+bool gnttab_trans_grants_available(void)
+{
+ return gnttab_interface->update_trans_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
+
+static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+{
+ return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+}
+
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+ return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+ return gnttab_interface->query_foreign_access(ref);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
+ u16 *pflags;
- nflags = shared[ref].flags;
+ pflags = &gnttab_shared.v1[ref].flags;
+ nflags = *pflags;
do {
flags = nflags;
if (flags & (GTF_reading|GTF_writing)) {
printk(KERN_ALERT "WARNING: g.e. still in use!\n");
return 0;
}
- } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+ } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
+
+ return 1;
+}
+
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+ gnttab_shared.v2[ref].hdr.flags = 0;
+ mb();
+ if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+ return 0;
+ } else {
+ /* The read of grstatus needs to have acquire
+ semantics. On x86, reads already have
+ that, and we just need to protect against
+ compiler reorderings. On other
+ architectures we may need a full
+ barrier. */
+#ifdef CONFIG_X86
+ barrier();
+#else
+ mb();
+#endif
+ }
return 1;
}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ return gnttab_interface->end_foreign_access_ref(ref, readonly);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
unsigned long pfn)
{
- update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+ gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v1[ref].flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
- while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
- if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
- flags = shared[ref].flags;
+ flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
- frame = shared[ref].frame;
+ frame = gnttab_shared.v1[ref].frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = *pflags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = gnttab_shared.v2[ref].full_page.frame;
BUG_ON(frame == 0);
return frame;
}
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+ return gnttab_interface->end_foreign_transfer_ref(ref);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
- /* If you really wanted to do this:
- * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- *
- * The reason we do not implement it is b/c on the
- * unmap path (gnttab_unmap_refs) we have no means of
- * checking whether the page is !GNTMAP_contains_pte.
- *
- * That is without some extra data-structure to carry
- * the struct page, bool clear_pte, and list_head next
- * tuples and deal with allocation/delallocation, etc.
- *
- * The users of this API set the GNTMAP_contains_pte
- * flag so lets just return not supported until it
- * becomes neccessary to implement.
- */
- return -EOPNOTSUPP;
+ mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
return ret;
}
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct page **pages, unsigned int count)
+ struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
for (i = 0; i < count; i++) {
- ret = m2p_remove_override(pages[i], true /* clear the PTE */);
+ ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+static unsigned nr_status_frames(unsigned nr_grant_frames)
+{
+ return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+}
+
+static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+{
+ int rc;
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v1(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+}
+
+static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
+{
+ uint64_t *sframes;
+ unsigned int nr_sframes;
+ struct gnttab_get_status_frames getframes;
+ int rc;
+
+ nr_sframes = nr_status_frames(nr_gframes);
+
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_get_status_frames.
+ */
+ sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
+ if (!sframes)
+ return -ENOMEM;
+
+ getframes.dom = DOMID_SELF;
+ getframes.nr_frames = nr_sframes;
+ set_xen_guest_handle(getframes.frame_list, sframes);
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+ &getframes, 1);
+ if (rc == -ENOSYS) {
+ kfree(sframes);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || getframes.status);
+
+ rc = arch_gnttab_map_status(sframes, nr_sframes,
+ nr_status_frames(gnttab_max_grant_frames()),
+ &grstatus);
+ BUG_ON(rc);
+ kfree(sframes);
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+ arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
return rc;
}
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_setup_table.
+ */
frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
if (!frames)
return -ENOMEM;
@@ -567,19 +916,65 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
BUG_ON(rc || setup.status);
- rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
- &shared);
- BUG_ON(rc);
+ rc = gnttab_interface->map_frames(frames, nr_gframes);
kfree(frames);
- return 0;
+ return rc;
+}
+
+static struct gnttab_ops gnttab_v1_ops = {
+ .map_frames = gnttab_map_frames_v1,
+ .unmap_frames = gnttab_unmap_frames_v1,
+ .update_entry = gnttab_update_entry_v1,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
+ .query_foreign_access = gnttab_query_foreign_access_v1,
+};
+
+static struct gnttab_ops gnttab_v2_ops = {
+ .map_frames = gnttab_map_frames_v2,
+ .unmap_frames = gnttab_unmap_frames_v2,
+ .update_entry = gnttab_update_entry_v2,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
+ .query_foreign_access = gnttab_query_foreign_access_v2,
+ .update_subpage_entry = gnttab_update_subpage_entry_v2,
+ .update_trans_entry = gnttab_update_trans_entry_v2,
+};
+
+static void gnttab_request_version(void)
+{
+ int rc;
+ struct gnttab_set_version gsv;
+
+ gsv.version = 2;
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0) {
+ grant_table_version = 2;
+ gnttab_interface = &gnttab_v2_ops;
+ } else if (grant_table_version == 2) {
+ /*
+ * If we've already used version 2 features,
+ * but then suddenly discover that they're not
+ * available (e.g. migrating to an older
+ * version of Xen), almost unbounded badness
+ * can happen.
+ */
+ panic("we need grant tables version 2, but only version 1 is available");
+ } else {
+ grant_table_version = 1;
+ gnttab_interface = &gnttab_v1_ops;
+ }
+ printk(KERN_INFO "Grant tables using version %d layout.\n",
+ grant_table_version);
}
int gnttab_resume(void)
{
unsigned int max_nr_gframes;
+ gnttab_request_version();
max_nr_gframes = gnttab_max_grant_frames();
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
@@ -587,9 +982,10 @@ int gnttab_resume(void)
if (xen_pv_domain())
return gnttab_map(0, nr_grant_frames - 1);
- if (!shared) {
- shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
- if (shared == NULL) {
+ if (gnttab_shared.addr == NULL) {
+ gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+ PAGE_SIZE * max_nr_gframes);
+ if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING
"Failed to ioremap gnttab share frames!");
return -ENOMEM;
@@ -603,7 +999,7 @@ int gnttab_resume(void)
int gnttab_suspend(void)
{
- arch_gnttab_unmap_shared(shared, nr_grant_frames);
+ gnttab_interface->unmap_frames();
return 0;
}
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/privcmd.c
index dbd3b16fd13..ccee0f16bcf 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -18,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -32,6 +34,10 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
@@ -359,7 +365,6 @@ static long privcmd_ioctl(struct file *file,
return ret;
}
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -392,9 +397,39 @@ static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
}
-#endif
-const struct file_operations privcmd_file_ops = {
+const struct file_operations xen_privcmd_fops = {
+ .owner = THIS_MODULE,
.unlocked_ioctl = privcmd_ioctl,
.mmap = privcmd_mmap,
};
+EXPORT_SYMBOL_GPL(xen_privcmd_fops);
+
+static struct miscdevice privcmd_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/privcmd",
+ .fops = &xen_privcmd_fops,
+};
+
+static int __init privcmd_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&privcmd_dev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register Xen privcmd device\n");
+ return err;
+ }
+ return 0;
+}
+
+static void __exit privcmd_exit(void)
+{
+ misc_deregister(&privcmd_dev);
+}
+
+module_init(privcmd_init);
+module_exit(privcmd_exit);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
new file mode 100644
index 00000000000..14facaeed36
--- /dev/null
+++ b/drivers/xen/privcmd.h
@@ -0,0 +1,3 @@
+#include <linux/fs.h>
+
+extern const struct file_operations xen_privcmd_fops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c44..19e6a204137 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
char *m = NULL;
unsigned int repeat = 3;
- nr_tbl = swioltb_nr_tbl();
+ nr_tbl = swiotlb_nr_tbl();
if (nr_tbl)
xen_io_tlb_nslabs = nr_tbl;
else {
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 9cc2259c999..3832e303c33 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/sysdev.h>
#include <linux/capability.h>
#include <xen/xen.h>
@@ -46,9 +45,9 @@
#define BALLOON_CLASS_NAME "xen_memory"
-static struct sys_device balloon_sysdev;
+static struct device balloon_dev;
-static int register_balloon(struct sys_device *sysdev);
+static int register_balloon(struct device *dev);
/* React to a change in the target key */
static void watch_target(struct xenbus_watch *watch,
@@ -98,9 +97,9 @@ static int __init balloon_init(void)
pr_info("xen-balloon: Initialising balloon driver.\n");
- register_balloon(&balloon_sysdev);
+ register_balloon(&balloon_dev);
- register_xen_selfballooning(&balloon_sysdev);
+ register_xen_selfballooning(&balloon_dev);
register_xenstore_notifier(&xenstore_notifier);
@@ -117,31 +116,31 @@ static void balloon_exit(void)
module_exit(balloon_exit);
#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+ static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
-static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
-static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
-static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
}
-static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_target_kb(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -158,11 +157,11 @@ static ssize_t store_target_kb(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR,
show_target_kb, store_target_kb);
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -170,8 +169,8 @@ static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr
<< PAGE_SHIFT);
}
-static ssize_t store_target(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_target(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -188,23 +187,23 @@ static ssize_t store_target(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
show_target, store_target);
-static struct sysdev_attribute *balloon_attrs[] = {
- &attr_target_kb,
- &attr_target,
- &attr_schedule_delay.attr,
- &attr_max_schedule_delay.attr,
- &attr_retry_count.attr,
- &attr_max_retry_count.attr
+static struct device_attribute *balloon_attrs[] = {
+ &dev_attr_target_kb,
+ &dev_attr_target,
+ &dev_attr_schedule_delay.attr,
+ &dev_attr_max_schedule_delay.attr,
+ &dev_attr_retry_count.attr,
+ &dev_attr_max_retry_count.attr
};
static struct attribute *balloon_info_attrs[] = {
- &attr_current_kb.attr,
- &attr_low_kb.attr,
- &attr_high_kb.attr,
+ &dev_attr_current_kb.attr,
+ &dev_attr_low_kb.attr,
+ &dev_attr_high_kb.attr,
NULL
};
@@ -213,34 +212,35 @@ static struct attribute_group balloon_info_group = {
.attrs = balloon_info_attrs
};
-static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME
+static struct bus_type balloon_subsys = {
+ .name = BALLOON_CLASS_NAME,
+ .dev_name = BALLOON_CLASS_NAME,
};
-static int register_balloon(struct sys_device *sysdev)
+static int register_balloon(struct device *dev)
{
int i, error;
- error = sysdev_class_register(&balloon_sysdev_class);
+ error = bus_register(&balloon_subsys);
if (error)
return error;
- sysdev->id = 0;
- sysdev->cls = &balloon_sysdev_class;
+ dev->id = 0;
+ dev->bus = &balloon_subsys;
- error = sysdev_register(sysdev);
+ error = device_register(dev);
if (error) {
- sysdev_class_unregister(&balloon_sysdev_class);
+ bus_unregister(&balloon_subsys);
return error;
}
for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = sysdev_create_file(sysdev, balloon_attrs[i]);
+ error = device_create_file(dev, balloon_attrs[i]);
if (error)
goto fail;
}
- error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+ error = sysfs_create_group(&dev->kobj, &balloon_info_group);
if (error)
goto fail;
@@ -248,9 +248,9 @@ static int register_balloon(struct sys_device *sysdev)
fail:
while (--i >= 0)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
+ device_remove_file(dev, balloon_attrs[i]);
+ device_unregister(dev);
+ bus_unregister(&balloon_subsys);
return error;
}
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 8f06e1ed028..7944a17f5cb 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref)
kfree(pci_get_drvdata(psdev->dev));
pci_set_drvdata(psdev->dev, NULL);
+ psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
pci_dev_put(psdev->dev);
kfree(psdev);
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
xen_pcibk_config_free_dyn_fields(found_psdev->dev);
xen_pcibk_config_reset_dev(found_psdev->dev);
+ xen_unregister_device_domain_owner(found_psdev->dev);
+
spin_lock_irqsave(&found_psdev->lock, flags);
found_psdev->pdev = NULL;
spin_unlock_irqrestore(&found_psdev->lock, flags);
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
dev_dbg(&dev->dev, "reset device\n");
xen_pcibk_reset_device(dev);
+ dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
return 0;
config_release:
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 075525945e3..8e1c44d8ab4 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -241,11 +241,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
goto out;
dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
if (xen_register_device_domain_owner(dev,
pdev->xdev->otherend_id) != 0) {
- dev_err(&dev->dev, "device has been assigned to another " \
- "domain! Over-writting the ownership, but beware.\n");
+ dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
+ xen_find_device_domain_owner(dev));
xen_unregister_device_domain_owner(dev);
xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
}
@@ -281,7 +280,6 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
}
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
xen_unregister_device_domain_owner(dev);
xen_pcibk_release_pci_dev(pdev, dev);
@@ -707,19 +705,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
return 0;
}
-static const struct xenbus_device_id xenpci_ids[] = {
+static const struct xenbus_device_id xen_pcibk_ids[] = {
{"pci"},
{""},
};
-static struct xenbus_driver xenbus_xen_pcibk_driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
.probe = xen_pcibk_xenbus_probe,
.remove = xen_pcibk_xenbus_remove,
.otherend_changed = xen_pcibk_frontend_changed,
-};
+);
const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
@@ -735,11 +730,11 @@ int __init xen_pcibk_xenbus_register(void)
if (passthrough)
xen_pcibk_backend = &xen_pcibk_passthrough_backend;
pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
- return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+ return xenbus_register_backend(&xen_pcibk_driver);
}
void __exit xen_pcibk_xenbus_unregister(void)
{
destroy_workqueue(xen_pcibk_wq);
- xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+ xenbus_unregister_driver(&xen_pcibk_driver);
}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index d93c70857e0..767ff656d5a 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -74,6 +74,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#include <xen/balloon.h>
#include <xen/tmem.h>
#include <xen/xen.h>
@@ -266,21 +267,20 @@ static void selfballoon_process(struct work_struct *work)
#ifdef CONFIG_SYSFS
-#include <linux/sysdev.h>
#include <linux/capability.h>
#define SELFBALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
+ static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
return sprintf(buf, format, ##args); \
}
SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
-static ssize_t store_selfballooning(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballooning(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -303,13 +303,13 @@ static ssize_t store_selfballooning(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR,
show_selfballooning, store_selfballooning);
SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
-static ssize_t store_selfballoon_interval(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_interval(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -325,13 +325,13 @@ static ssize_t store_selfballoon_interval(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
show_selfballoon_interval, store_selfballoon_interval);
SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
-static ssize_t store_selfballoon_downhys(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_downhys(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -347,14 +347,14 @@ static ssize_t store_selfballoon_downhys(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_downhys, store_selfballoon_downhys);
SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
-static ssize_t store_selfballoon_uphys(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_uphys(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -370,14 +370,14 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_uphys, store_selfballoon_uphys);
SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
selfballoon_min_usable_mb);
-static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -393,7 +393,7 @@ static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
show_selfballoon_min_usable_mb,
store_selfballoon_min_usable_mb);
@@ -401,8 +401,8 @@ static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
-static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_selfshrinking(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -424,13 +424,13 @@ static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
show_frontswap_selfshrinking, store_frontswap_selfshrinking);
SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
-static ssize_t store_frontswap_inertia(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_inertia(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -447,13 +447,13 @@ static ssize_t store_frontswap_inertia(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
show_frontswap_inertia, store_frontswap_inertia);
SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
-static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_hysteresis(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -469,21 +469,21 @@ static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
show_frontswap_hysteresis, store_frontswap_hysteresis);
#endif /* CONFIG_FRONTSWAP */
static struct attribute *selfballoon_attrs[] = {
- &attr_selfballooning.attr,
- &attr_selfballoon_interval.attr,
- &attr_selfballoon_downhysteresis.attr,
- &attr_selfballoon_uphysteresis.attr,
- &attr_selfballoon_min_usable_mb.attr,
+ &dev_attr_selfballooning.attr,
+ &dev_attr_selfballoon_interval.attr,
+ &dev_attr_selfballoon_downhysteresis.attr,
+ &dev_attr_selfballoon_uphysteresis.attr,
+ &dev_attr_selfballoon_min_usable_mb.attr,
#ifdef CONFIG_FRONTSWAP
- &attr_frontswap_selfshrinking.attr,
- &attr_frontswap_hysteresis.attr,
- &attr_frontswap_inertia.attr,
+ &dev_attr_frontswap_selfshrinking.attr,
+ &dev_attr_frontswap_hysteresis.attr,
+ &dev_attr_frontswap_inertia.attr,
#endif
NULL
};
@@ -494,12 +494,12 @@ static struct attribute_group selfballoon_group = {
};
#endif
-int register_xen_selfballooning(struct sys_device *sysdev)
+int register_xen_selfballooning(struct device *dev)
{
int error = -1;
#ifdef CONFIG_SYSFS
- error = sysfs_create_group(&sysdev->kobj, &selfballoon_group);
+ error = sysfs_create_group(&dev->kobj, &selfballoon_group);
#endif
return error;
}
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 8dca685358b..31e2e9050c7 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -1,4 +1,5 @@
obj-y += xenbus.o
+obj-y += xenbus_dev_frontend.o
xenbus-objs =
xenbus-objs += xenbus_client.o
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
+obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 1906125eab4..566d2adbd6e 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -32,15 +32,39 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
+#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
+
+#include "xenbus_probe.h"
+
+struct xenbus_map_node {
+ struct list_head next;
+ union {
+ struct vm_struct *area; /* PV */
+ struct page *page; /* HVM */
+ };
+ grant_handle_t handle;
+};
+
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
+static LIST_HEAD(xenbus_valloc_pages);
+
+struct xenbus_ring_ops {
+ int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
+ int (*unmap)(struct xenbus_device *dev, void *vaddr);
+};
+
+static const struct xenbus_ring_ops *ring_ops __read_mostly;
const char *xenbus_strstate(enum xenbus_state state)
{
@@ -436,19 +460,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
*/
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
+ return ring_ops->map(dev, gnt_ref, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
struct gnttab_map_grant_ref op = {
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
.ref = gnt_ref,
.dom = dev->otherend_id,
};
+ struct xenbus_map_node *node;
struct vm_struct *area;
pte_t *pte;
*vaddr = NULL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
area = alloc_vm_area(PAGE_SIZE, &pte);
- if (!area)
+ if (!area) {
+ kfree(node);
return -ENOMEM;
+ }
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
if (op.status != GNTST_okay) {
free_vm_area(area);
+ kfree(node);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
return op.status;
}
- /* Stuff the handle in an unused field */
- area->phys_addr = (unsigned long)op.handle;
+ node->handle = op.handle;
+ node->area = area;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
return 0;
}
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
+ struct xenbus_map_node *node;
+ int err;
+ void *addr;
+
+ *vaddr = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
+ if (err)
+ goto out_err;
+
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+
+ err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
+ if (err)
+ goto out_err;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = addr;
+ return 0;
+
+ out_err:
+ free_xenballooned_pages(1, &node->page);
+ kfree(node);
+ return err;
+}
/**
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
grant_handle_t *handle, void *vaddr)
{
- struct gnttab_map_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .flags = GNTMAP_host_map,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
+ struct gnttab_map_grant_ref op;
+
+ gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
+ dev->otherend_id);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
*/
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
- struct vm_struct *area;
+ return ring_ops->unmap(dev, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+{
+ struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref op = {
.host_addr = (unsigned long)vaddr,
};
unsigned int level;
- /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
- * method so that we don't have to muck with vmalloc internals here.
- * We could force the user to hang on to their struct vm_struct from
- * xenbus_map_ring_valloc, but these 6 lines considerably simplify
- * this API.
- */
- read_lock(&vmlist_lock);
- for (area = vmlist; area != NULL; area = area->next) {
- if (area->addr == vaddr)
- break;
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ if (node->area->addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
}
- read_unlock(&vmlist_lock);
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
- if (!area) {
+ if (!node) {
xenbus_dev_error(dev, -ENOENT,
"can't find mapped virtual address %p", vaddr);
return GNTST_bad_virt_addr;
}
- op.handle = (grant_handle_t)area->phys_addr;
+ op.handle = node->handle;
op.host_addr = arbitrary_virt_to_machine(
lookup_address((unsigned long)vaddr, &level)).maddr;
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
BUG();
if (op.status == GNTST_okay)
- free_vm_area(area);
+ free_vm_area(node->area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
- (int16_t)area->phys_addr, op.status);
+ node->handle, op.status);
+ kfree(node);
return op.status;
}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+{
+ int rv;
+ struct xenbus_map_node *node;
+ void *addr;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+ if (addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
+ }
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
+
+ if (!node) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return GNTST_bad_virt_addr;
+ }
+
+ rv = xenbus_unmap_ring(dev, node->handle, addr);
+
+ if (!rv)
+ free_xenballooned_pages(1, &node->page);
+ else
+ WARN(1, "Leaking %p\n", vaddr);
+
+ kfree(node);
+ return rv;
+}
/**
* xenbus_unmap_ring
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr)
{
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .handle = handle,
- };
+ struct gnttab_unmap_grant_ref op;
+
+ gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+
+static const struct xenbus_ring_ops ring_ops_pv = {
+ .map = xenbus_map_ring_valloc_pv,
+ .unmap = xenbus_unmap_ring_vfree_pv,
+};
+
+static const struct xenbus_ring_ops ring_ops_hvm = {
+ .map = xenbus_map_ring_valloc_hvm,
+ .unmap = xenbus_unmap_ring_vfree_hvm,
+};
+
+void __init xenbus_ring_ops_init(void)
+{
+ if (xen_pv_domain())
+ ring_ops = &ring_ops_pv;
+ else
+ ring_ops = &ring_ops_hvm;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index c21db751373..6e42800fa49 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -31,6 +31,8 @@
#ifndef _XENBUS_COMMS_H
#define _XENBUS_COMMS_H
+#include <linux/fs.h>
+
int xs_init(void);
int xb_init_comms(void);
@@ -43,4 +45,6 @@ int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
+extern const struct file_operations xen_xenbus_fops;
+
#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
new file mode 100644
index 00000000000..3d3be78c109
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -0,0 +1,90 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/xenbus_dev.h>
+
+#include "xenbus_comms.h"
+
+MODULE_LICENSE("GPL");
+
+static int xenbus_backend_open(struct inode *inode, struct file *filp)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return nonseekable_open(inode, filp);
+}
+
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IOCTL_XENBUS_BACKEND_EVTCHN:
+ if (xen_store_evtchn > 0)
+ return xen_store_evtchn;
+ return -ENODEV;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+ return -EINVAL;
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_pfn(xen_store_interface),
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+const struct file_operations xenbus_backend_fops = {
+ .open = xenbus_backend_open,
+ .mmap = xenbus_backend_mmap,
+ .unlocked_ioctl = xenbus_backend_ioctl,
+};
+
+static struct miscdevice xenbus_backend_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus_backend",
+ .fops = &xenbus_backend_fops,
+};
+
+static int __init xenbus_backend_init(void)
+{
+ int err;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_backend_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus backend device\n");
+ return err;
+}
+
+static void __exit xenbus_backend_exit(void)
+{
+ misc_deregister(&xenbus_backend_dev);
+}
+
+module_init(xenbus_backend_init);
+module_exit(xenbus_backend_exit);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index bbd000f88af..527dc2a3b89 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -52,13 +52,17 @@
#include <linux/namei.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
-#include "xenfs.h"
-#include "../xenbus/xenbus_comms.h"
+#include "xenbus_comms.h"
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
+MODULE_LICENSE("GPL");
+
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -101,7 +105,7 @@ struct xenbus_file_priv {
unsigned int len;
union {
struct xsd_sockmsg msg;
- char buffer[PAGE_SIZE];
+ char buffer[XENSTORE_PAYLOAD_MAX];
} u;
/* Response queue. */
@@ -583,7 +587,7 @@ static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
return 0;
}
-const struct file_operations xenbus_file_ops = {
+const struct file_operations xen_xenbus_fops = {
.read = xenbus_file_read,
.write = xenbus_file_write,
.open = xenbus_file_open,
@@ -591,3 +595,31 @@ const struct file_operations xenbus_file_ops = {
.poll = xenbus_file_poll,
.llseek = no_llseek,
};
+EXPORT_SYMBOL_GPL(xen_xenbus_fops);
+
+static struct miscdevice xenbus_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus",
+ .fops = &xen_xenbus_fops,
+};
+
+static int __init xenbus_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus frontend device\n");
+ return err;
+}
+
+static void __exit xenbus_exit(void)
+{
+ misc_deregister(&xenbus_dev);
+}
+
+module_init(xenbus_init);
+module_exit(xenbus_exit);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 1b178c6e893..3864967202b 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev)
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name)
+ struct xen_bus_type *bus)
{
- drv->driver.name = drv->name;
drv->driver.bus = &bus->bus;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
return driver_register(&drv->driver);
}
@@ -730,6 +725,8 @@ static int __init xenbus_init(void)
if (!xen_domain())
return -ENODEV;
+ xenbus_ring_ops_init();
+
if (xen_hvm_domain()) {
uint64_t v = 0;
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 9b1de4e34c6..bb4f92ed873 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name);
+ struct xen_bus_type *bus);
extern int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node);
+void xenbus_ring_ops_init(void);
+
#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index c3c7cd195c1..257be37d909 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
-int __xenbus_register_backend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_backend(struct xenbus_driver *drv)
{
drv->read_otherend_details = read_frontend_details;
- return xenbus_register_driver_common(drv, &xenbus_backend,
- owner, mod_name);
+ return xenbus_register_driver_common(drv, &xenbus_backend);
}
-EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+EXPORT_SYMBOL_GPL(xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 2f73195512b..9c57819df51 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
print_device_status);
}
-int __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_frontend(struct xenbus_driver *drv)
{
int ret;
drv->read_otherend_details = read_backend_details;
- ret = xenbus_register_driver_common(drv, &xenbus_frontend,
- owner, mod_name);
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
if (ret)
return ret;
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
return 0;
}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
static int backend_state;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f3ad1..d1c217b23a4 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t,
{
va_list ap;
int ret;
-#define PRINTF_BUFFER_SIZE 4096
- char *printf_buffer;
-
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
- if (printf_buffer == NULL)
- return -ENOMEM;
+ char *buf;
va_start(ap, fmt);
- ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
va_end(ap);
- BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
- ret = xenbus_write(t, dir, node, printf_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = xenbus_write(t, dir, node, buf);
- kfree(printf_buffer);
+ kfree(buf);
return ret;
}
@@ -621,15 +618,6 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
-static void xs_reset_watches(void)
-{
- int err;
-
- err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
- if (err && err != -EEXIST)
- printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -810,6 +798,12 @@ static int process_msg(void)
goto out;
}
+ if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
+ kfree(msg);
+ err = -EINVAL;
+ goto out;
+ }
+
body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
if (body == NULL) {
kfree(msg);
@@ -906,9 +900,5 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
- /* shutdown watches for kexec boot */
- if (xen_hvm_domain())
- xs_reset_watches();
-
return 0;
}
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index 4fde9440fe1..b019865fcc5 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_XENFS) += xenfs.o
-xenfs-y = super.o xenbus.o privcmd.o
+xenfs-y = super.o
xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 1aa38971984..a84b53c0143 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -16,6 +16,8 @@
#include <xen/xen.h>
#include "xenfs.h"
+#include "../privcmd.h"
+#include "../xenbus/xenbus_comms.h"
#include <asm/xen/hypervisor.h>
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
[1] = {},
- { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+ { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
- { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
+ { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
int rc;
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index b68aa620000..6b80c7779c0 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -1,8 +1,6 @@
#ifndef _XENFS_XENBUS_H
#define _XENFS_XENBUS_H
-extern const struct file_operations xenbus_file_ops;
-extern const struct file_operations privcmd_file_ops;
extern const struct file_operations xsd_kva_file_ops;
extern const struct file_operations xsd_port_file_ops;
diff --git a/drivers/zorro/zorro.ids b/drivers/zorro/zorro.ids
index de24e3deced..119abea8c6c 100644
--- a/drivers/zorro/zorro.ids
+++ b/drivers/zorro/zorro.ids
@@ -351,7 +351,7 @@
0200 EGS 28/24 Spectrum [Graphics Card]
0892 Apollo
0100 A1200 [FPU and RAM Expansion]
-0893 Ingenieurbüro Helfrich
+0893 IngenieurbĂźro Helfrich
0500 Piccolo RAM [Graphics Card]
0600 Piccolo [Graphics Card]
0700 PeggyPlus MPEG [Video Card]
diff --git a/firmware/README.AddingFirmware b/firmware/README.AddingFirmware
index e24cd8986d8..ea78c3a17ee 100644
--- a/firmware/README.AddingFirmware
+++ b/firmware/README.AddingFirmware
@@ -12,7 +12,7 @@ here.
This directory is _NOT_ for adding arbitrary new firmware images. The
place to add those is the separate linux-firmware repository:
- git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
That repository contains all these firmware images which have been
extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
To submit firmware to that repository, please send either a git binary
diff or preferably a git pull request to:
David Woodhouse <dwmw2@infradead.org>
+ Ben Hutchings <ben@decadent.org.uk>
Your commit should include an update to the WHENCE file clearly
identifying the licence under which the firmware is available, and
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 945aa5f02f9..a9ea73d6dcf 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -62,8 +62,8 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
uint16_t klen = 0;
v9ses = (struct v9fs_session_info *)cookie_netfs_data;
- P9_DPRINTK(P9_DEBUG_FSC, "session %p buf %p size %u", v9ses,
- buffer, bufmax);
+ p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
+ v9ses, buffer, bufmax);
if (v9ses->cachetag)
klen = strlen(v9ses->cachetag);
@@ -72,7 +72,7 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
return 0;
memcpy(buffer, v9ses->cachetag, klen);
- P9_DPRINTK(P9_DEBUG_FSC, "cache session tag %s", v9ses->cachetag);
+ p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
return klen;
}
@@ -91,14 +91,14 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
&v9fs_cache_session_index_def,
v9ses);
- P9_DPRINTK(P9_DEBUG_FSC, "session %p get cookie %p", v9ses,
- v9ses->fscache);
+ p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
+ v9ses, v9ses->fscache);
}
void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
{
- P9_DPRINTK(P9_DEBUG_FSC, "session %p put cookie %p", v9ses,
- v9ses->fscache);
+ p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
+ v9ses, v9ses->fscache);
fscache_relinquish_cookie(v9ses->fscache, 0);
v9ses->fscache = NULL;
}
@@ -109,8 +109,8 @@ static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
{
const struct v9fs_inode *v9inode = cookie_netfs_data;
memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
- v9inode->qid.path);
+ p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
+ &v9inode->vfs_inode, v9inode->qid.path);
return sizeof(v9inode->qid.path);
}
@@ -120,8 +120,8 @@ static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
const struct v9fs_inode *v9inode = cookie_netfs_data;
*size = i_size_read(&v9inode->vfs_inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &v9inode->vfs_inode,
- *size);
+ p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
+ &v9inode->vfs_inode, *size);
}
static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
@@ -129,8 +129,8 @@ static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
{
const struct v9fs_inode *v9inode = cookie_netfs_data;
memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
- v9inode->qid.version);
+ p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
+ &v9inode->vfs_inode, v9inode->qid.version);
return sizeof(v9inode->qid.version);
}
@@ -206,8 +206,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
&v9fs_cache_inode_index_def,
v9inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
- v9inode->fscache);
+ p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
+ inode, v9inode->fscache);
}
void v9fs_cache_inode_put_cookie(struct inode *inode)
@@ -216,8 +216,8 @@ void v9fs_cache_inode_put_cookie(struct inode *inode)
if (!v9inode->fscache)
return;
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
- v9inode->fscache);
+ p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
+ inode, v9inode->fscache);
fscache_relinquish_cookie(v9inode->fscache, 0);
v9inode->fscache = NULL;
@@ -229,8 +229,8 @@ void v9fs_cache_inode_flush_cookie(struct inode *inode)
if (!v9inode->fscache)
return;
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
- v9inode->fscache);
+ p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
+ inode, v9inode->fscache);
fscache_relinquish_cookie(v9inode->fscache, 1);
v9inode->fscache = NULL;
@@ -272,8 +272,8 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
v9inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
- inode, old, v9inode->fscache);
+ p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
+ inode, old, v9inode->fscache);
spin_unlock(&v9inode->fscache_lock);
}
@@ -323,7 +323,7 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
int ret;
const struct v9fs_inode *v9inode = V9FS_I(inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
if (!v9inode->fscache)
return -ENOBUFS;
@@ -335,13 +335,13 @@ int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
switch (ret) {
case -ENOBUFS:
case -ENODATA:
- P9_DPRINTK(P9_DEBUG_FSC, "page/inode not in cache %d", ret);
+ p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
return 1;
case 0:
- P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+ p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
return ret;
default:
- P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+ p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
return ret;
}
}
@@ -361,7 +361,7 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
int ret;
const struct v9fs_inode *v9inode = V9FS_I(inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
+ p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
if (!v9inode->fscache)
return -ENOBUFS;
@@ -373,15 +373,15 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
switch (ret) {
case -ENOBUFS:
case -ENODATA:
- P9_DPRINTK(P9_DEBUG_FSC, "pages/inodes not in cache %d", ret);
+ p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
return 1;
case 0:
BUG_ON(!list_empty(pages));
BUG_ON(*nr_pages != 0);
- P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted");
+ p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
return ret;
default:
- P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret);
+ p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
return ret;
}
}
@@ -396,9 +396,9 @@ void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
int ret;
const struct v9fs_inode *v9inode = V9FS_I(inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
- P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret);
+ p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
if (ret != 0)
v9fs_uncache_page(inode, page);
}
@@ -409,7 +409,7 @@ void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
{
const struct v9fs_inode *v9inode = V9FS_I(inode);
- P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
+ p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
if (PageFsCache(page))
fscache_wait_on_page_write(v9inode->fscache, page);
}
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 85b67ffa2a4..da8eefbe830 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -45,8 +45,8 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
{
struct v9fs_dentry *dent;
- P9_DPRINTK(P9_DEBUG_VFS, "fid %d dentry %s\n",
- fid->fid, dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "fid %d dentry %s\n",
+ fid->fid, dentry->d_name.name);
dent = dentry->d_fsdata;
if (!dent) {
@@ -79,8 +79,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
struct v9fs_dentry *dent;
struct p9_fid *fid, *ret;
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
- dentry->d_name.name, dentry, uid, any);
+ p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
+ dentry->d_name.name, dentry, uid, any);
dent = (struct v9fs_dentry *) dentry->d_fsdata;
ret = NULL;
if (dent) {
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 2b78014a124..1964f98e74b 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -85,15 +87,15 @@ static int get_cache_mode(char *s)
if (!strcmp(s, "loose")) {
version = CACHE_LOOSE;
- P9_DPRINTK(P9_DEBUG_9P, "Cache mode: loose\n");
+ p9_debug(P9_DEBUG_9P, "Cache mode: loose\n");
} else if (!strcmp(s, "fscache")) {
version = CACHE_FSCACHE;
- P9_DPRINTK(P9_DEBUG_9P, "Cache mode: fscache\n");
+ p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
} else if (!strcmp(s, "none")) {
version = CACHE_NONE;
- P9_DPRINTK(P9_DEBUG_9P, "Cache mode: none\n");
+ p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
} else
- printk(KERN_INFO "9p: Unknown Cache mode %s.\n", s);
+ pr_info("Unknown Cache mode %s\n", s);
return version;
}
@@ -140,8 +142,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_debug:
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
ret = r;
continue;
}
@@ -154,8 +156,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_dfltuid:
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
ret = r;
continue;
}
@@ -164,8 +166,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_dfltgid:
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
ret = r;
continue;
}
@@ -174,8 +176,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_afid:
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
ret = r;
continue;
}
@@ -205,8 +207,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem allocating copy of cache arg\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "problem allocating copy of cache arg\n");
goto free_and_return;
}
ret = get_cache_mode(s);
@@ -223,8 +225,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem allocating copy of access arg\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "problem allocating copy of access arg\n");
goto free_and_return;
}
@@ -240,8 +242,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
v9ses->uid = simple_strtoul(s, &e, 10);
if (*e != '\0') {
ret = -EINVAL;
- printk(KERN_INFO "9p: Unknown access "
- "argument %s.\n", s);
+ pr_info("Unknown access argument %s\n",
+ s);
kfree(s);
goto free_and_return;
}
@@ -254,9 +256,8 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#ifdef CONFIG_9P_FS_POSIX_ACL
v9ses->flags |= V9FS_POSIX_ACL;
#else
- P9_DPRINTK(P9_DEBUG_ERROR,
- "Not defined CONFIG_9P_FS_POSIX_ACL. "
- "Ignoring posixacl option\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
#endif
break;
@@ -318,7 +319,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (IS_ERR(v9ses->clnt)) {
retval = PTR_ERR(v9ses->clnt);
v9ses->clnt = NULL;
- P9_DPRINTK(P9_DEBUG_ERROR, "problem initializing 9p client\n");
+ p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n");
goto error;
}
@@ -371,7 +372,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
fid = NULL;
- P9_DPRINTK(P9_DEBUG_ERROR, "cannot attach\n");
+ p9_debug(P9_DEBUG_ERROR, "cannot attach\n");
goto error;
}
@@ -429,7 +430,7 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
*/
void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
- P9_DPRINTK(P9_DEBUG_ERROR, "cancel session %p\n", v9ses);
+ p9_debug(P9_DEBUG_ERROR, "cancel session %p\n", v9ses);
p9_client_disconnect(v9ses->clnt);
}
@@ -442,7 +443,7 @@ void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses)
{
- P9_DPRINTK(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses);
+ p9_debug(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses);
p9_client_begin_disconnect(v9ses->clnt);
}
@@ -591,23 +592,23 @@ static void v9fs_cache_unregister(void)
static int __init init_v9fs(void)
{
int err;
- printk(KERN_INFO "Installing v9fs 9p2000 file system support\n");
+ pr_info("Installing v9fs 9p2000 file system support\n");
/* TODO: Setup list of registered trasnport modules */
err = register_filesystem(&v9fs_fs_type);
if (err < 0) {
- printk(KERN_ERR "Failed to register filesystem\n");
+ pr_err("Failed to register filesystem\n");
return err;
}
err = v9fs_cache_register();
if (err < 0) {
- printk(KERN_ERR "Failed to register v9fs for caching\n");
+ pr_err("Failed to register v9fs for caching\n");
goto out_fs_unreg;
}
err = v9fs_sysfs_init();
if (err < 0) {
- printk(KERN_ERR "Failed to register with sysfs\n");
+ pr_err("Failed to register with sysfs\n");
goto out_sysfs_cleanup;
}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 410ffd6ceb5..dc95a252523 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
+struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t);
int v9fs_init_inode(struct v9fs_session_info *v9ses,
- struct inode *inode, int mode, dev_t);
+ struct inode *inode, umode_t mode, dev_t);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 2524e4cbb8e..0ad61c6a65a 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -56,7 +56,7 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
struct inode *inode;
inode = page->mapping->host;
- P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ p9_debug(P9_DEBUG_VFS, "\n");
BUG_ON(!PageLocked(page));
@@ -116,14 +116,14 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
struct inode *inode;
inode = mapping->host;
- P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
+ p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
if (ret == 0)
return ret;
ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
- P9_DPRINTK(P9_DEBUG_VFS, " = %d\n", ret);
+ p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
return ret;
}
@@ -263,10 +263,9 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* Now that we do caching with cache mode enabled, We need
* to support direct IO
*/
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
- "off/no(%lld/%lu) EINVAL\n",
- iocb->ki_filp->f_path.dentry->d_name.name,
- (long long) pos, nr_segs);
+ p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
+ iocb->ki_filp->f_path.dentry->d_name.name,
+ (long long)pos, nr_segs);
return -EINVAL;
}
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index e022890c6f4..d529437ff44 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -53,8 +53,8 @@
static int v9fs_dentry_delete(const struct dentry *dentry)
{
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
- dentry);
+ p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+ dentry->d_name.name, dentry);
return 1;
}
@@ -66,8 +66,8 @@ static int v9fs_dentry_delete(const struct dentry *dentry)
*/
static int v9fs_cached_dentry_delete(const struct dentry *dentry)
{
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n",
- dentry->d_name.name, dentry);
+ p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+ dentry->d_name.name, dentry);
/* Don't cache negative dentries */
if (!dentry->d_inode)
@@ -86,8 +86,8 @@ static void v9fs_dentry_release(struct dentry *dentry)
struct v9fs_dentry *dent;
struct p9_fid *temp, *current_fid;
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
- dentry);
+ p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
+ dentry->d_name.name, dentry);
dent = dentry->d_fsdata;
if (dent) {
list_for_each_entry_safe(current_fid, temp, &dent->fidlist,
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 598fff1a54e..ff911e77965 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -140,7 +140,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
int reclen = 0;
struct p9_rdir *rdir;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
fid = filp->private_data;
buflen = fid->clnt->msize - P9_IOHDRSZ;
@@ -168,7 +168,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
rdir->tail - rdir->head, &st);
if (err) {
- P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
err = -EIO;
p9stat_free(&st);
goto unlock_and_exit;
@@ -213,7 +213,7 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
struct p9_dirent curdirent;
u64 oldoffset = 0;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
fid = filp->private_data;
buflen = fid->clnt->msize - P9_READDIRHDRSZ;
@@ -244,7 +244,7 @@ static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
rdir->tail - rdir->head,
&curdirent);
if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
err = -EIO;
goto unlock_and_exit;
}
@@ -290,9 +290,8 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
struct p9_fid *fid;
fid = filp->private_data;
- P9_DPRINTK(P9_DEBUG_VFS,
- "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
- inode, filp, fid ? fid->fid : -1);
+ p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
+ inode, filp, fid ? fid->fid : -1);
if (fid)
p9_client_clunk(fid);
return 0;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 62857a810a7..fc06fd27065 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -61,7 +61,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
struct p9_fid *fid;
int omode;
- P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
+ p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9inode = V9FS_I(inode);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
@@ -135,7 +135,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
int res = 0;
struct inode *inode = filp->f_path.dentry->d_inode;
- P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
+ p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
/* No mandatory locks */
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -204,7 +204,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
break;
if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
break;
- schedule_timeout_interruptible(P9_LOCK_TIMEOUT);
+ if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+ break;
}
/* map 9p status to VFS status */
@@ -304,8 +305,8 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
struct inode *inode = filp->f_path.dentry->d_inode;
int ret = -ENOLCK;
- P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
- cmd, fl, filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
+ filp, cmd, fl, filp->f_path.dentry->d_name.name);
/* No mandatory locks */
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -340,8 +341,8 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
struct inode *inode = filp->f_path.dentry->d_inode;
int ret = -ENOLCK;
- P9_DPRINTK(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp,
- cmd, fl, filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
+ filp, cmd, fl, filp->f_path.dentry->d_name.name);
/* No mandatory locks */
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -384,8 +385,8 @@ v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
{
int n, total, size;
- P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
- (long long unsigned) offset, count);
+ p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n",
+ fid->fid, (long long unsigned)offset, count);
n = 0;
total = 0;
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@@ -443,7 +444,7 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
struct p9_fid *fid;
size_t size;
- P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
+ p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
fid = filp->private_data;
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@@ -470,8 +471,8 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
loff_t origin = *offset;
unsigned long pg_start, pg_end;
- P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
- (int)count, (int)*offset);
+ p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
+ data, (int)count, (int)*offset);
clnt = fid->clnt;
do {
@@ -552,7 +553,7 @@ static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
return retval;
mutex_lock(&inode->i_mutex);
- P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
+ p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
fid = filp->private_data;
v9fs_blank_wstat(&wstat);
@@ -575,8 +576,7 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
return retval;
mutex_lock(&inode->i_mutex);
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n",
- filp, datasync);
+ p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
fid = filp->private_data;
@@ -607,8 +607,8 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = filp->f_path.dentry->d_inode;
- P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n",
- page, (unsigned long)filp->private_data);
+ p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
+ page, (unsigned long)filp->private_data);
v9inode = V9FS_I(inode);
/* make sure the cache has finished storing the page */
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 879ed885173..014c8dd6296 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -59,15 +61,13 @@ static const struct inode_operations v9fs_symlink_inode_operations;
*
*/
-static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
+static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode)
{
int res;
res = mode & 0777;
if (S_ISDIR(mode))
res |= P9_DMDIR;
if (v9fs_proto_dotu(v9ses)) {
- if (S_ISLNK(mode))
- res |= P9_DMSYMLINK;
if (v9ses->nodev == 0) {
if (S_ISSOCK(mode))
res |= P9_DMSOCKET;
@@ -85,10 +85,33 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
res |= P9_DMSETGID;
if ((mode & S_ISVTX) == S_ISVTX)
res |= P9_DMSETVTX;
- if ((mode & P9_DMLINK))
- res |= P9_DMLINK;
}
+ return res;
+}
+/**
+ * p9mode2perm- convert plan9 mode bits to unix permission bits
+ * @v9ses: v9fs session information
+ * @stat: p9_wstat from which mode need to be derived
+ *
+ */
+static int p9mode2perm(struct v9fs_session_info *v9ses,
+ struct p9_wstat *stat)
+{
+ int res;
+ int mode = stat->mode;
+
+ res = mode & S_IALLUGO;
+ if (v9fs_proto_dotu(v9ses)) {
+ if ((mode & P9_DMSETUID) == P9_DMSETUID)
+ res |= S_ISUID;
+
+ if ((mode & P9_DMSETGID) == P9_DMSETGID)
+ res |= S_ISGID;
+
+ if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
+ res |= S_ISVTX;
+ }
return res;
}
@@ -99,14 +122,14 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
* @rdev: major number, minor number in case of device files.
*
*/
-static int p9mode2unixmode(struct v9fs_session_info *v9ses,
- struct p9_wstat *stat, dev_t *rdev)
+static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
+ struct p9_wstat *stat, dev_t *rdev)
{
int res;
- int mode = stat->mode;
+ u32 mode = stat->mode;
- res = mode & S_IALLUGO;
*rdev = 0;
+ res = p9mode2perm(v9ses, stat);
if ((mode & P9_DMDIR) == P9_DMDIR)
res |= S_IFDIR;
@@ -133,24 +156,13 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses,
res |= S_IFBLK;
break;
default:
- P9_DPRINTK(P9_DEBUG_ERROR,
- "Unknown special type %c %s\n", type,
- stat->extension);
+ p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n",
+ type, stat->extension);
};
*rdev = MKDEV(major, minor);
} else
res |= S_IFREG;
- if (v9fs_proto_dotu(v9ses)) {
- if ((mode & P9_DMSETUID) == P9_DMSETUID)
- res |= S_ISUID;
-
- if ((mode & P9_DMSETGID) == P9_DMSETGID)
- res |= S_ISGID;
-
- if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
- res |= S_ISVTX;
- }
return res;
}
@@ -251,7 +263,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
static void v9fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
}
@@ -261,7 +272,7 @@ void v9fs_destroy_inode(struct inode *inode)
}
int v9fs_init_inode(struct v9fs_session_info *v9ses,
- struct inode *inode, int mode, dev_t rdev)
+ struct inode *inode, umode_t mode, dev_t rdev)
{
int err = 0;
@@ -281,8 +292,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
} else if (v9fs_proto_dotu(v9ses)) {
inode->i_op = &v9fs_file_inode_operations;
} else {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "special files without extended mode\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "special files without extended mode\n");
err = -EINVAL;
goto error;
}
@@ -307,8 +318,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
break;
case S_IFLNK:
if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) {
- P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with "
- "legacy protocol.\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "extended modes used with legacy protocol\n");
err = -EINVAL;
goto error;
}
@@ -335,8 +346,8 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
break;
default:
- P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n",
- mode, mode & S_IFMT);
+ p9_debug(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n",
+ mode, mode & S_IFMT);
err = -EINVAL;
goto error;
}
@@ -352,17 +363,18 @@ error:
*
*/
-struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
+struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
{
int err;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
- P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
+ p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode);
inode = new_inode(sb);
if (!inode) {
- P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
+ pr_warn("%s (%d): Problem allocating inode\n",
+ __func__, task_pid_nr(current));
return ERR_PTR(-ENOMEM);
}
err = v9fs_init_inode(v9ses, inode, mode, rdev);
@@ -492,7 +504,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
int new)
{
dev_t rdev;
- int retval, umode;
+ int retval;
+ umode_t umode;
unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
@@ -578,15 +591,15 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
struct p9_fid *v9fid, *dfid;
struct v9fs_session_info *v9ses;
- P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n",
- dir, dentry, flags);
+ p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n",
+ dir, dentry, flags);
v9ses = v9fs_inode2v9ses(dir);
inode = dentry->d_inode;
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
retval = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
return retval;
}
if (v9fs_proto_dotl(v9ses))
@@ -635,7 +648,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
struct p9_fid *dfid, *ofid, *fid;
struct inode *inode;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
ofid = NULL;
@@ -644,7 +657,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return ERR_PTR(err);
}
@@ -652,36 +665,41 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
ofid = p9_client_walk(dfid, 0, NULL, 1);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
return ERR_PTR(err);
}
err = p9_client_fcreate(ofid, name, perm, mode, extension);
if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err);
goto error;
}
- /* now walk from the parent so we can get unopened fid */
- fid = p9_client_walk(dfid, 1, &name, 1);
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- fid = NULL;
- goto error;
- }
-
- /* instantiate inode and assign the unopened fid to the dentry */
- inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
- goto error;
+ if (!(perm & P9_DMLINK)) {
+ /* now walk from the parent so we can get unopened fid */
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ p9_debug(P9_DEBUG_VFS,
+ "p9_client_walk failed %d\n", err);
+ fid = NULL;
+ goto error;
+ }
+ /*
+ * instantiate inode and assign the unopened fid to the dentry
+ */
+ inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ p9_debug(P9_DEBUG_VFS,
+ "inode creation failed %d\n", err);
+ goto error;
+ }
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ d_instantiate(dentry, inode);
}
- err = v9fs_fid_add(dentry, fid);
- if (err < 0)
- goto error;
- d_instantiate(dentry, inode);
return ofid;
error:
if (ofid)
@@ -703,7 +721,7 @@ error:
*/
static int
-v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int err;
@@ -786,14 +804,14 @@ error:
*
*/
-static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
struct p9_fid *fid;
struct v9fs_session_info *v9ses;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode | S_IFDIR);
@@ -831,8 +849,8 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
char *name;
int result = 0;
- P9_DPRINTK(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
- dir, dentry->d_name.name, dentry, nameidata);
+ p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
+ dir, dentry->d_name.name, dentry, nameidata);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@@ -938,7 +956,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct p9_fid *newdirfid;
struct p9_wstat wstat;
- P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ p9_debug(P9_DEBUG_VFS, "\n");
retval = 0;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
@@ -974,8 +992,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* 9P .u can only handle file rename in the same directory
*/
- P9_DPRINTK(P9_DEBUG_ERROR,
- "old dir and new dir are different\n");
+ p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n");
retval = -EXDEV;
goto clunk_newdir;
}
@@ -1031,7 +1048,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct p9_fid *fid;
struct p9_wstat *st;
- P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+ p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
@@ -1068,7 +1085,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct p9_fid *fid;
struct p9_wstat wstat;
- P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ p9_debug(P9_DEBUG_VFS, "\n");
retval = inode_change_ok(dentry->d_inode, iattr);
if (retval)
return retval;
@@ -1131,7 +1148,7 @@ void
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb)
{
- mode_t mode;
+ umode_t mode;
char ext[32];
char tag_name[14];
unsigned int i_nlink;
@@ -1167,7 +1184,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
set_nlink(inode, i_nlink);
}
}
- mode = stat->mode & S_IALLUGO;
+ mode = p9mode2perm(v9ses, stat);
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->length);
@@ -1213,7 +1230,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
struct p9_fid *fid;
struct p9_wstat *st;
- P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
@@ -1235,8 +1252,8 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
/* copy extension buffer into buffer */
strncpy(buffer, st->extension, buflen);
- P9_DPRINTK(P9_DEBUG_VFS,
- "%s -> %s (%s)\n", dentry->d_name.name, st->extension, buffer);
+ p9_debug(P9_DEBUG_VFS, "%s -> %s (%s)\n",
+ dentry->d_name.name, st->extension, buffer);
retval = strnlen(buffer, buflen);
done:
@@ -1257,7 +1274,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
int len = 0;
char *link = __getname();
- P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
if (!link)
link = ERR_PTR(-ENOMEM);
@@ -1288,8 +1305,8 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
- P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
- IS_ERR(s) ? "<error>" : s);
+ p9_debug(P9_DEBUG_VFS, " %s %s\n",
+ dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
if (!IS_ERR(s))
__putname(s);
}
@@ -1304,19 +1321,17 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
*/
static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
- int mode, const char *extension)
+ u32 perm, const char *extension)
{
- u32 perm;
struct p9_fid *fid;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(dir);
if (!v9fs_proto_dotu(v9ses)) {
- P9_DPRINTK(P9_DEBUG_ERROR, "not extended\n");
+ p9_debug(P9_DEBUG_ERROR, "not extended\n");
return -EPERM;
}
- perm = unixmode2p9mode(v9ses, mode);
fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm,
P9_OREAD);
if (IS_ERR(fid))
@@ -1340,10 +1355,10 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
static int
v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
- P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino,
- dentry->d_name.name, symname);
+ p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
+ dir->i_ino, dentry->d_name.name, symname);
- return v9fs_vfs_mkspecial(dir, dentry, S_IFLNK, symname);
+ return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname);
}
/**
@@ -1362,9 +1377,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
char *name;
struct p9_fid *oldfid;
- P9_DPRINTK(P9_DEBUG_VFS,
- " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
- old_dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
+ dir->i_ino, dentry->d_name.name, old_dentry->d_name.name);
oldfid = v9fs_fid_clone(old_dentry);
if (IS_ERR(oldfid))
@@ -1398,14 +1412,16 @@ clunk_fid:
*/
static int
-v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
+ struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
int retval;
char *name;
+ u32 perm;
- P9_DPRINTK(P9_DEBUG_VFS,
- " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
- dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
+ p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
+ dir->i_ino, dentry->d_name.name, mode,
+ MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -1427,7 +1443,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return -EINVAL;
}
- retval = v9fs_vfs_mkspecial(dir, dentry, mode, name);
+ perm = unixmode2p9mode(v9ses, mode);
+ retval = v9fs_vfs_mkspecial(dir, dentry, perm, name);
__putname(name);
return retval;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 0b5745e2194..a1e6c990cd4 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -48,7 +48,7 @@
#include "acl.h"
static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev);
/**
@@ -253,7 +253,7 @@ int v9fs_open_to_dotl_flags(int flags)
*/
static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
struct nameidata *nd)
{
int err = 0;
@@ -283,13 +283,13 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
}
name = (char *) dentry->d_name.name;
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
- "mode:0x%x\n", name, flags, omode);
+ p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
+ name, flags, omode);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return err;
}
@@ -297,7 +297,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
ofid = p9_client_walk(dfid, 0, NULL, 1);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
return err;
}
@@ -307,16 +307,15 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in creat %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
+ err);
goto error;
}
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
mode, gid, &qid);
if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "p9_client_open_dotl failed in creat %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
+ err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
@@ -325,14 +324,14 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
fid = NULL;
goto error;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
@@ -395,7 +394,7 @@ err_clunk_old_fid:
*/
static int v9fs_vfs_mkdir_dotl(struct inode *dir,
- struct dentry *dentry, int omode)
+ struct dentry *dentry, umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -408,7 +407,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
@@ -420,7 +419,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
@@ -430,8 +429,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in mkdir %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n",
+ err);
goto error;
}
name = (char *) dentry->d_name.name;
@@ -444,8 +443,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
fid = NULL;
goto error;
}
@@ -453,8 +452,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
@@ -495,7 +494,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
struct p9_fid *fid;
struct p9_stat_dotl *st;
- P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+ p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
@@ -523,6 +522,46 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
+/*
+ * Attribute flags.
+ */
+#define P9_ATTR_MODE (1 << 0)
+#define P9_ATTR_UID (1 << 1)
+#define P9_ATTR_GID (1 << 2)
+#define P9_ATTR_SIZE (1 << 3)
+#define P9_ATTR_ATIME (1 << 4)
+#define P9_ATTR_MTIME (1 << 5)
+#define P9_ATTR_CTIME (1 << 6)
+#define P9_ATTR_ATIME_SET (1 << 7)
+#define P9_ATTR_MTIME_SET (1 << 8)
+
+struct dotl_iattr_map {
+ int iattr_valid;
+ int p9_iattr_valid;
+};
+
+static int v9fs_mapped_iattr_valid(int iattr_valid)
+{
+ int i;
+ int p9_iattr_valid = 0;
+ struct dotl_iattr_map dotl_iattr_map[] = {
+ { ATTR_MODE, P9_ATTR_MODE },
+ { ATTR_UID, P9_ATTR_UID },
+ { ATTR_GID, P9_ATTR_GID },
+ { ATTR_SIZE, P9_ATTR_SIZE },
+ { ATTR_ATIME, P9_ATTR_ATIME },
+ { ATTR_MTIME, P9_ATTR_MTIME },
+ { ATTR_CTIME, P9_ATTR_CTIME },
+ { ATTR_ATIME_SET, P9_ATTR_ATIME_SET },
+ { ATTR_MTIME_SET, P9_ATTR_MTIME_SET },
+ };
+ for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) {
+ if (iattr_valid & dotl_iattr_map[i].iattr_valid)
+ p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid;
+ }
+ return p9_iattr_valid;
+}
+
/**
* v9fs_vfs_setattr_dotl - set file metadata
* @dentry: file whose metadata to set
@@ -537,13 +576,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
- P9_DPRINTK(P9_DEBUG_VFS, "\n");
+ p9_debug(P9_DEBUG_VFS, "\n");
retval = inode_change_ok(dentry->d_inode, iattr);
if (retval)
return retval;
- p9attr.valid = iattr->ia_valid;
+ p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
p9attr.mode = iattr->ia_mode;
p9attr.uid = iattr->ia_uid;
p9attr.gid = iattr->ia_gid;
@@ -594,7 +633,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
{
- mode_t mode;
+ umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -670,14 +709,13 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
struct v9fs_session_info *v9ses;
name = (char *) dentry->d_name.name;
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
- dir->i_ino, name, symname);
+ p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return err;
}
@@ -687,7 +725,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
goto error;
}
@@ -697,8 +735,8 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
fid = NULL;
goto error;
}
@@ -707,8 +745,8 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
@@ -751,9 +789,8 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
- P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
- dir->i_ino, old_dentry->d_name.name,
- dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+ dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = v9fs_dentry_from_dir_inode(dir);
@@ -770,7 +807,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
if (err < 0) {
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
return err;
}
@@ -799,7 +836,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
*
*/
static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev)
{
int err;
@@ -813,9 +850,9 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
- P9_DPRINTK(P9_DEBUG_VFS,
- " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
- dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+ p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
+ dir->i_ino, dentry->d_name.name, omode,
+ MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
@@ -825,7 +862,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
- P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
@@ -835,8 +872,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "Failed to get acl values in mknod %d\n", err);
+ p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n",
+ err);
goto error;
}
name = (char *) dentry->d_name.name;
@@ -851,8 +888,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
- P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
fid = NULL;
goto error;
}
@@ -860,8 +897,8 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
- err);
+ p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
goto error;
}
err = v9fs_fid_add(dentry, fid);
@@ -905,7 +942,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
char *link = __getname();
char *target;
- P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
if (!link) {
link = ERR_PTR(-ENOMEM);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c70251d47ed..7b0cd87b07c 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -117,11 +117,11 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
struct inode *inode = NULL;
struct dentry *root = NULL;
struct v9fs_session_info *v9ses = NULL;
- int mode = S_IRWXUGO | S_ISVTX;
+ umode_t mode = S_IRWXUGO | S_ISVTX;
struct p9_fid *fid;
int retval = 0;
- P9_DPRINTK(P9_DEBUG_VFS, " \n");
+ p9_debug(P9_DEBUG_VFS, "\n");
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
@@ -191,7 +191,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto release_sb;
v9fs_fid_add(root, fid);
- P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
+ p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
clunk_fid:
@@ -223,7 +223,7 @@ static void v9fs_kill_super(struct super_block *s)
{
struct v9fs_session_info *v9ses = s->s_fs_info;
- P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
+ p9_debug(P9_DEBUG_VFS, " %p\n", s);
kill_anon_super(s);
@@ -231,7 +231,7 @@ static void v9fs_kill_super(struct super_block *s)
v9fs_session_close(v9ses);
kfree(v9ses);
s->s_fs_info = NULL;
- P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n");
+ p9_debug(P9_DEBUG_VFS, "exiting kill_super\n");
}
static void
@@ -303,7 +303,7 @@ static int v9fs_write_inode(struct inode *inode,
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
- P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
v9inode = V9FS_I(inode);
if (!v9inode->writeback_fid)
return 0;
@@ -326,7 +326,7 @@ static int v9fs_write_inode_dotl(struct inode *inode,
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
- P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
v9inode = V9FS_I(inode);
if (!v9inode->writeback_fid)
return 0;
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index d288773871b..29653b70a9c 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -32,8 +32,8 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
if (IS_ERR(attr_fid)) {
retval = PTR_ERR(attr_fid);
- P9_DPRINTK(P9_DEBUG_VFS,
- "p9_client_attrwalk failed %zd\n", retval);
+ p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
+ retval);
attr_fid = NULL;
goto error;
}
@@ -87,8 +87,8 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
{
struct p9_fid *fid;
- P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n",
- __func__, name, buffer_size);
+ p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
+ name, buffer_size);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -115,8 +115,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
int retval, msize, write_count;
struct p9_fid *fid = NULL;
- P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu flags = %d\n",
- __func__, name, value_len, flags);
+ p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
+ name, value_len, flags);
fid = v9fs_fid_clone(dentry);
if (IS_ERR(fid)) {
@@ -129,8 +129,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
*/
retval = p9_client_xattrcreate(fid, name, value_len, flags);
if (retval < 0) {
- P9_DPRINTK(P9_DEBUG_VFS,
- "p9_client_xattrcreate failed %d\n", retval);
+ p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
+ retval);
goto error;
}
msize = fid->clnt->msize;
diff --git a/fs/Kconfig b/fs/Kconfig
index 5f4c45d4aa1..d621f02a3f9 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -218,6 +218,8 @@ source "fs/exofs/Kconfig"
endif # MISC_FILESYSTEMS
+source "fs/exofs/Kconfig.ore"
+
menuconfig NETWORK_FILESYSTEMS
bool "Network File Systems"
default y
@@ -266,14 +268,6 @@ source "fs/9p/Kconfig"
endif # NETWORK_FILESYSTEMS
-if BLOCK
-menu "Partition Types"
-
-source "fs/partitions/Kconfig"
-
-endmenu
-endif
-
source "fs/nls/Kconfig"
source "fs/dlm/Kconfig"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 79e2ca7973b..e95d1b64082 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -27,6 +27,9 @@ config COMPAT_BINFMT_ELF
bool
depends on COMPAT && BINFMT_ELF
+config ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ bool
+
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y
diff --git a/fs/Makefile b/fs/Makefile
index d2c3353d547..93804d4d66e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -19,6 +19,8 @@ else
obj-y += no-block.o
endif
+obj-$(CONFIG_PROC_FS) += proc_namespace.o
+
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
obj-y += notify/
obj-$(CONFIG_EPOLL) += eventpoll.o
@@ -52,7 +54,6 @@ obj-$(CONFIG_FHANDLE) += fhandle.o
obj-y += quota/
obj-$(CONFIG_PROC_FS) += proc/
-obj-y += partitions/
obj-$(CONFIG_SYSFS) += sysfs/
obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index c8bf36a1996..8e3b36ace30 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -126,9 +126,9 @@ static void adfs_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int adfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct adfs_sb_info *asb = ADFS_SB(mnt->mnt_sb);
+ struct adfs_sb_info *asb = ADFS_SB(root->d_sb);
if (asb->s_uid != 0)
seq_printf(seq, ",uid=%u", asb->s_uid);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index c2b9c79eb64..45a0ce45d7b 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -136,7 +136,7 @@ extern int affs_remove_header(struct dentry *dentry);
extern u32 affs_checksum_block(struct super_block *sb, struct buffer_head *bh);
extern void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh);
extern void secs_to_datestamp(time_t secs, struct affs_date *ds);
-extern mode_t prot_to_mode(u32 prot);
+extern umode_t prot_to_mode(u32 prot);
extern void mode_to_prot(struct inode *inode);
extern void affs_error(struct super_block *sb, const char *function, const char *fmt, ...);
extern void affs_warning(struct super_block *sb, const char *function, const char *fmt, ...);
@@ -156,8 +156,8 @@ extern void affs_free_bitmap(struct super_block *sb);
extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *);
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
-extern int affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *);
-extern int affs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *);
+extern int affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
struct dentry *dentry);
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index de37ec84234..52a6407682e 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -390,10 +390,10 @@ secs_to_datestamp(time_t secs, struct affs_date *ds)
ds->ticks = cpu_to_be32(secs * 50);
}
-mode_t
+umode_t
prot_to_mode(u32 prot)
{
- int mode = 0;
+ umode_t mode = 0;
if (!(prot & FIBF_NOWRITE))
mode |= S_IWUSR;
@@ -421,7 +421,7 @@ void
mode_to_prot(struct inode *inode)
{
u32 prot = AFFS_I(inode)->i_protect;
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
if (!(mode & S_IXUSR))
prot |= FIBF_NOEXECUTE;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 780a11dc631..47806940aac 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -255,13 +255,13 @@ affs_unlink(struct inode *dir, struct dentry *dentry)
}
int
-affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
int error;
- pr_debug("AFFS: create(%lu,\"%.*s\",0%o)\n",dir->i_ino,(int)dentry->d_name.len,
+ pr_debug("AFFS: create(%lu,\"%.*s\",0%ho)\n",dir->i_ino,(int)dentry->d_name.len,
dentry->d_name.name,mode);
inode = affs_new_inode(dir);
@@ -285,12 +285,12 @@ affs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata
}
int
-affs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int error;
- pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%o)\n",dir->i_ino,
+ pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%ho)\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,mode);
inode = affs_new_inode(dir);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index b31507d0f9b..8ba73fed796 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -98,7 +98,6 @@ static struct inode *affs_alloc_inode(struct super_block *sb)
static void affs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 1b0b1955001..e22dc4b4a50 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -28,9 +28,9 @@ static int afs_d_delete(const struct dentry *dentry);
static void afs_d_release(struct dentry *dentry);
static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
-static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd);
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
@@ -764,7 +764,7 @@ static void afs_d_release(struct dentry *dentry)
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct afs_file_status status;
struct afs_callback cb;
@@ -777,7 +777,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
dvnode = AFS_FS_I(dir);
- _enter("{%x:%u},{%s},%o",
+ _enter("{%x:%u},{%s},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
ret = -ENAMETOOLONG;
@@ -948,7 +948,7 @@ error:
/*
* create a regular file on an AFS filesystem
*/
-static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct afs_file_status status;
@@ -962,7 +962,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
dvnode = AFS_FS_I(dir);
- _enter("{%x:%u},{%s},%o,",
+ _enter("{%x:%u},{%s},%ho,",
dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
ret = -ENAMETOOLONG;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index aa59184151d..8f4ce2658b7 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -242,7 +242,7 @@ struct vfsmount *afs_d_automount(struct path *path)
{
struct vfsmount *newmnt;
- _enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name);
+ _enter("{%s}", path->dentry->d_name.name);
newmnt = afs_mntpt_do_automount(path->dentry);
if (IS_ERR(newmnt))
@@ -252,7 +252,7 @@ struct vfsmount *afs_d_automount(struct path *path)
mnt_set_expiry(newmnt, &afs_vfsmounts);
queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
afs_mntpt_expiry_timeout * HZ);
- _leave(" = %p {%s}", newmnt, newmnt->mnt_devname);
+ _leave(" = %p", newmnt);
return newmnt;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 356dcf0929e..983ec59fc80 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -495,7 +495,6 @@ static void afs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct afs_vnode *vnode = AFS_FS_I(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(afs_inode_cachep, vnode);
}
diff --git a/fs/attr.c b/fs/attr.c
index 7ee7ba48831..95053ad8abc 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(setattr_copy);
int notify_change(struct dentry * dentry, struct iattr * attr)
{
struct inode *inode = dentry->d_inode;
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
int error;
struct timespec now;
unsigned int ia_valid = attr->ia_valid;
@@ -177,7 +177,7 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
}
if ((ia_valid & ATTR_MODE)) {
- mode_t amode = attr->ia_mode;
+ umode_t amode = attr->ia_mode;
/* Flag setting protected by i_mutex */
if (is_sxid(amode))
inode->i_flags &= ~S_NOSEC;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 326dc08d3e3..5869d4e974a 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -155,7 +155,7 @@ static inline int autofs4_ispending(struct dentry *dentry)
return 0;
}
-struct inode *autofs4_get_inode(struct super_block *, mode_t);
+struct inode *autofs4_get_inode(struct super_block *, umode_t);
void autofs4_free_ino(struct autofs_info *);
/* Expiration */
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 509fe1eb66a..76741d8d778 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -194,7 +194,7 @@ static int find_autofs_mount(const char *pathname,
return err;
err = -ENOENT;
while (path.dentry == path.mnt->mnt_root) {
- if (path.mnt->mnt_sb->s_magic == AUTOFS_SUPER_MAGIC) {
+ if (path.dentry->d_sb->s_magic == AUTOFS_SUPER_MAGIC) {
if (test(&path, data)) {
path_get(&path);
if (!err) /* already found some */
@@ -212,7 +212,7 @@ static int find_autofs_mount(const char *pathname,
static int test_by_dev(struct path *path, void *p)
{
- return path->mnt->mnt_sb->s_dev == *(dev_t *)p;
+ return path->dentry->d_sb->s_dev == *(dev_t *)p;
}
static int test_by_type(struct path *path, void *p)
@@ -538,11 +538,11 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
err = find_autofs_mount(name, &path, test_by_type, &type);
if (err)
goto out;
- devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
+ devid = new_encode_dev(path.dentry->d_sb->s_dev);
err = 0;
if (path.mnt->mnt_root == path.dentry) {
err = 1;
- magic = path.mnt->mnt_sb->s_magic;
+ magic = path.dentry->d_sb->s_magic;
}
} else {
dev_t dev = sbi->sb->s_dev;
@@ -556,7 +556,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
err = have_submounts(path.dentry);
if (follow_down_one(&path))
- magic = path.mnt->mnt_sb->s_magic;
+ magic = path.dentry->d_sb->s_magic;
}
param->ismountpoint.out.devid = devid;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 8179f1ab817..2ba44c79d54 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -70,10 +70,10 @@ out_kill_sb:
kill_litter_super(sb);
}
-static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int autofs4_show_options(struct seq_file *m, struct dentry *root)
{
- struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb);
- struct inode *root_inode = mnt->mnt_sb->s_root->d_inode;
+ struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
+ struct inode *root_inode = root->d_sb->s_root->d_inode;
if (!sbi)
return 0;
@@ -326,7 +326,7 @@ fail_unlock:
return -EINVAL;
}
-struct inode *autofs4_get_inode(struct super_block *sb, mode_t mode)
+struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index f55ae23b137..75e5f1c8e02 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -26,7 +26,7 @@
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
-static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
+static int autofs4_dir_mkdir(struct inode *,struct dentry *,umode_t);
static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
#ifdef CONFIG_COMPAT
static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
@@ -699,7 +699,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9205cf25f1c..22e9a78872f 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -173,7 +173,7 @@ static const struct file_operations bad_file_ops =
};
static int bad_inode_create (struct inode *dir, struct dentry *dentry,
- int mode, struct nameidata *nd)
+ umode_t mode, struct nameidata *nd)
{
return -EIO;
}
@@ -202,7 +202,7 @@ static int bad_inode_symlink (struct inode *dir, struct dentry *dentry,
}
static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry,
- int mode)
+ umode_t mode)
{
return -EIO;
}
@@ -213,7 +213,7 @@ static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
}
static int bad_inode_mknod (struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
return -EIO;
}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 8342ca67abc..6e6d536767f 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -286,7 +286,6 @@ befs_alloc_inode(struct super_block *sb)
static void befs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
}
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 9cc07401947..d12c7966db2 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -84,7 +84,7 @@ const struct file_operations bfs_dir_operations = {
extern void dump_imap(const char *, struct super_block *);
-static int bfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int err;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 697af5bf70b..b0391bc402b 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -251,7 +251,6 @@ static struct inode *bfs_alloc_inode(struct super_block *sb)
static void bfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 21ac5ee4b43..bcb884e2d61 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -794,7 +794,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
-#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
/* Memory randomization might have been switched off
* in runtime via sysctl.
* If that is the case, retain the original non-zero
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 1e9edbdeda7..a9198dfd5f8 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -560,7 +560,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
break;
case 2: set_bit(Enabled, &e->flags);
break;
- case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
+ case 3: root = dget(file->f_path.dentry->d_sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
kill_node(e);
@@ -587,7 +587,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
Node *e;
struct inode *inode;
struct dentry *root, *dentry;
- struct super_block *sb = file->f_path.mnt->mnt_sb;
+ struct super_block *sb = file->f_path.dentry->d_sb;
int err = 0;
e = create_entry(buffer, count);
@@ -666,7 +666,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
switch (res) {
case 1: enabled = 0; break;
case 2: enabled = 1; break;
- case 3: root = dget(file->f_path.mnt->mnt_sb->s_root);
+ case 3: root = dget(file->f_path.dentry->d_sb->s_root);
mutex_lock(&root->d_inode->i_mutex);
while (!list_empty(&entries))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b07f1da1de4..69a5b6fbee2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/buffer_head.h>
+#include <linux/swap.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/mpage.h>
@@ -25,6 +26,7 @@
#include <linux/namei.h>
#include <linux/log2.h>
#include <linux/kmemleak.h>
+#include <linux/cleancache.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -82,13 +84,35 @@ static sector_t max_block(struct block_device *bdev)
}
/* Kill _all_ buffers and pagecache , dirty or not.. */
-static void kill_bdev(struct block_device *bdev)
+void kill_bdev(struct block_device *bdev)
{
- if (bdev->bd_inode->i_mapping->nrpages == 0)
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+ if (mapping->nrpages == 0)
return;
+
invalidate_bh_lrus();
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ truncate_inode_pages(mapping, 0);
}
+EXPORT_SYMBOL(kill_bdev);
+
+/* Invalidate clean unused buffers and pagecache. */
+void invalidate_bdev(struct block_device *bdev)
+{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+ if (mapping->nrpages == 0)
+ return;
+
+ invalidate_bh_lrus();
+ lru_add_drain_all(); /* make sure all lru add caches are flushed */
+ invalidate_mapping_pages(mapping, 0, -1);
+ /* 99% of the time, we don't need to flush the cleancache on the bdev.
+ * But, for the strange corners, lets be cautious
+ */
+ cleancache_flush_inode(mapping);
+}
+EXPORT_SYMBOL(invalidate_bdev);
int set_blocksize(struct block_device *bdev, int size)
{
@@ -425,7 +449,6 @@ static void bdev_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct bdev_inode *bdi = BDEV_I(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(bdev_cachep, bdi);
}
@@ -493,7 +516,7 @@ static struct file_system_type bd_type = {
.kill_sb = kill_anon_super,
};
-struct super_block *blockdev_superblock __read_mostly;
+static struct super_block *blockdev_superblock __read_mostly;
void __init bdev_cache_init(void)
{
@@ -639,6 +662,11 @@ static struct block_device *bd_acquire(struct inode *inode)
return bdev;
}
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+ return sb == blockdev_superblock;
+}
+
/* Call when you free inode */
void bd_forget(struct inode *inode)
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7ec14097fef..0cc20b35c1c 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
int idle;
};
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
/*
* btrfs_start_workers uses kthread_run, which can block waiting for memory
* for a very long time. It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
{
struct worker_start *start;
start = container_of(work, struct worker_start, work);
- btrfs_start_workers(start->queue, 1);
+ __btrfs_start_workers(start->queue);
kfree(start);
}
-static int start_new_worker(struct btrfs_workers *queue)
-{
- struct worker_start *start;
- int ret;
-
- start = kzalloc(sizeof(*start), GFP_NOFS);
- if (!start)
- return -ENOMEM;
-
- start->work.func = start_new_worker_func;
- start->queue = queue;
- ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
- if (ret)
- kfree(start);
- return ret;
-}
-
/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
struct btrfs_workers *workers = worker->workers;
+ struct worker_start *start;
unsigned long flags;
rmb();
if (!workers->atomic_start_pending)
return;
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return;
+
+ start->work.func = start_new_worker_func;
+ start->queue = workers;
+
spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- start_new_worker(workers);
+ btrfs_queue_worker(workers->atomic_worker_start, &start->work);
return;
out:
+ kfree(start);
spin_unlock_irqrestore(&workers->lock, flags);
}
@@ -331,7 +325,7 @@ again:
run_ordered_completions(worker->workers, work);
check_pending_worker_creates(worker);
-
+ cond_resched();
}
spin_lock_irq(&worker->lock);
@@ -340,7 +334,7 @@ again:
if (freezing(current)) {
worker->working = 0;
spin_unlock_irq(&worker->lock);
- refrigerator();
+ try_to_freeze();
} else {
spin_unlock_irq(&worker->lock);
if (!kthread_should_stop()) {
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-static int __btrfs_start_workers(struct btrfs_workers *workers,
- int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
- int i;
- for (i = 0; i < num_workers; i++) {
- worker = kzalloc(sizeof(*worker), GFP_NOFS);
- if (!worker) {
- ret = -ENOMEM;
- goto fail;
- }
+ worker = kzalloc(sizeof(*worker), GFP_NOFS);
+ if (!worker) {
+ ret = -ENOMEM;
+ goto fail;
+ }
- INIT_LIST_HEAD(&worker->pending);
- INIT_LIST_HEAD(&worker->prio_pending);
- INIT_LIST_HEAD(&worker->worker_list);
- spin_lock_init(&worker->lock);
-
- atomic_set(&worker->num_pending, 0);
- atomic_set(&worker->refs, 1);
- worker->workers = workers;
- worker->task = kthread_run(worker_loop, worker,
- "btrfs-%s-%d", workers->name,
- workers->num_workers + i);
- if (IS_ERR(worker->task)) {
- ret = PTR_ERR(worker->task);
- kfree(worker);
- goto fail;
- }
- spin_lock_irq(&workers->lock);
- list_add_tail(&worker->worker_list, &workers->idle_list);
- worker->idle = 1;
- workers->num_workers++;
- workers->num_workers_starting--;
- WARN_ON(workers->num_workers_starting < 0);
- spin_unlock_irq(&workers->lock);
+ INIT_LIST_HEAD(&worker->pending);
+ INIT_LIST_HEAD(&worker->prio_pending);
+ INIT_LIST_HEAD(&worker->worker_list);
+ spin_lock_init(&worker->lock);
+
+ atomic_set(&worker->num_pending, 0);
+ atomic_set(&worker->refs, 1);
+ worker->workers = workers;
+ worker->task = kthread_run(worker_loop, worker,
+ "btrfs-%s-%d", workers->name,
+ workers->num_workers + 1);
+ if (IS_ERR(worker->task)) {
+ ret = PTR_ERR(worker->task);
+ kfree(worker);
+ goto fail;
}
+ spin_lock_irq(&workers->lock);
+ list_add_tail(&worker->worker_list, &workers->idle_list);
+ worker->idle = 1;
+ workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
+ spin_unlock_irq(&workers->lock);
+
return 0;
fail:
- btrfs_stop_workers(workers);
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting--;
+ spin_unlock_irq(&workers->lock);
return ret;
}
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
{
spin_lock_irq(&workers->lock);
- workers->num_workers_starting += num_workers;
+ workers->num_workers_starting++;
spin_unlock_irq(&workers->lock);
- return __btrfs_start_workers(workers, num_workers);
+ return __btrfs_start_workers(workers);
}
/*
@@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
struct btrfs_worker_thread *worker;
unsigned long flags;
struct list_head *fallback;
+ int ret;
-again:
spin_lock_irqsave(&workers->lock, flags);
+again:
worker = next_worker(workers);
if (!worker) {
@@ -584,7 +578,10 @@ again:
workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
- __btrfs_start_workers(workers, 1);
+ ret = __btrfs_start_workers(workers);
+ spin_lock_irqsave(&workers->lock, flags);
+ if (ret)
+ goto fallback;
goto again;
}
}
@@ -665,7 +662,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
@@ -673,7 +670,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
/* don't requeue something already on a list */
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
- goto out;
+ return;
worker = find_worker(workers);
if (workers->ordered) {
@@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
- return 0;
}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 5077746cf85..f34cc31fa3c 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -109,8 +109,8 @@ struct btrfs_workers {
char *name;
};
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 04a5dfcee5a..67385033323 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2369,6 +2369,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 min_reserved);
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes);
@@ -2689,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5b163572e0c..9c1eccc2c50 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
* we're accounted for.
*/
- if (!trans->bytes_reserved &&
- src_rsv != &root->fs_info->delalloc_block_rsv) {
+ if (!src_rsv || (!trans->bytes_reserved &&
+ src_rsv != &root->fs_info->delalloc_block_rsv)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/*
* Since we're under a transaction reserve_metadata_bytes could
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 632f8f3cc9d..f99a099a774 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
btrfs_run_defrag_inodes(root->fs_info);
}
- if (freezing(current)) {
- refrigerator();
- } else {
+ if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
@@ -1635,9 +1633,7 @@ sleep:
wake_up_process(root->fs_info->cleaner_kthread);
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
- if (freezing(current)) {
- refrigerator();
- } else {
+ if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
!btrfs_transaction_blocked(root->fs_info))
@@ -2194,19 +2190,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
- btrfs_start_workers(&fs_info->workers, 1);
- btrfs_start_workers(&fs_info->generic_worker, 1);
- btrfs_start_workers(&fs_info->submit_workers, 1);
- btrfs_start_workers(&fs_info->delalloc_workers, 1);
- btrfs_start_workers(&fs_info->fixup_workers, 1);
- btrfs_start_workers(&fs_info->endio_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
- btrfs_start_workers(&fs_info->delayed_workers, 1);
- btrfs_start_workers(&fs_info->caching_workers, 1);
- btrfs_start_workers(&fs_info->readahead_workers, 1);
+ /*
+ * btrfs_start_workers can really only fail because of ENOMEM so just
+ * return -ENOMEM if any of these fail.
+ */
+ ret = btrfs_start_workers(&fs_info->workers);
+ ret |= btrfs_start_workers(&fs_info->generic_worker);
+ ret |= btrfs_start_workers(&fs_info->submit_workers);
+ ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+ ret |= btrfs_start_workers(&fs_info->fixup_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+ ret |= btrfs_start_workers(&fs_info->delayed_workers);
+ ret |= btrfs_start_workers(&fs_info->caching_workers);
+ ret |= btrfs_start_workers(&fs_info->readahead_workers);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_sb_buffer;
+ }
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 930ae894973..f5fbe576d2b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2822,7 +2822,7 @@ out_free:
btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
- if (!ret)
+ if (!ret && dcs == BTRFS_DC_SETUP)
block_group->cache_generation = trans->transid;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);
@@ -3888,9 +3888,9 @@ int btrfs_block_rsv_check(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_refill(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 min_reserved)
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved, int flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
@@ -3909,7 +3909,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
if (!ret)
return 0;
- ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+ ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
block_rsv_add_bytes(block_rsv, num_bytes, 0);
return 0;
@@ -3918,6 +3918,20 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
+
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 min_reserved)
+{
+ return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
+}
+
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv,
u64 num_bytes)
@@ -4190,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve = 0;
+ u64 csum_bytes;
unsigned nr_extents = 0;
+ int extra_reserve = 0;
int flush = 1;
int ret;
+ /* Need to be holding the i_mutex here if we aren't free space cache */
if (btrfs_is_free_space_inode(root, inode))
flush = 0;
+ else
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
if (flush && btrfs_transaction_in_commit(root->fs_info))
schedule_timeout(1);
@@ -4206,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_I(inode)->outstanding_extents++;
if (BTRFS_I(inode)->outstanding_extents >
- BTRFS_I(inode)->reserved_extents) {
+ BTRFS_I(inode)->reserved_extents)
nr_extents = BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- BTRFS_I(inode)->reserved_extents += nr_extents;
- }
/*
* Add an item to reserve for updating the inode when we complete the
@@ -4218,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
*/
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
nr_extents++;
- BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ extra_reserve = 1;
}
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+ csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4232,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
/*
- * Somebody could have come in and twiddled with the
- * reservation, so if we have to free more than we would have
- * reserved from this reservation go ahead and release those
- * bytes.
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
*/
- to_free -= to_reserve;
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
if (to_free)
btrfs_block_rsv_release(root, block_rsv, to_free);
return ret;
}
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (extra_reserve) {
+ BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ nr_extents--;
+ }
+ BTRFS_I(inode)->reserved_extents += nr_extents;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
@@ -5093,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root = orig_root->fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group_cache *used_block_group;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
- int last_ptr_loop = 0;
int loop = 0;
int index = 0;
int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5159,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
+ used_block_group = block_group;
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
@@ -5196,6 +5228,7 @@ search:
u64 offset;
int cached;
+ used_block_group = block_group;
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
@@ -5265,84 +5298,73 @@ alloc:
spin_lock(&block_group->free_space_ctl->tree_lock);
if (cached &&
block_group->free_space_ctl->free_space <
- num_bytes + empty_size) {
+ num_bytes + empty_cluster + empty_size) {
spin_unlock(&block_group->free_space_ctl->tree_lock);
goto loop;
}
spin_unlock(&block_group->free_space_ctl->tree_lock);
/*
- * Ok we want to try and use the cluster allocator, so lets look
- * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
- * have tried the cluster allocator plenty of times at this
- * point and not have found anything, so we are likely way too
- * fragmented for the clustering stuff to find anything, so lets
- * just skip it and let the allocator find whatever block it can
- * find
+ * Ok we want to try and use the cluster allocator, so
+ * lets look there
*/
- if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+ if (last_ptr) {
/*
* the refill lock keeps out other
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
- if (last_ptr->block_group &&
- (last_ptr->block_group->ro ||
- !block_group_bits(last_ptr->block_group, data))) {
- offset = 0;
+ used_block_group = last_ptr->block_group;
+ if (used_block_group != block_group &&
+ (!used_block_group ||
+ used_block_group->ro ||
+ !block_group_bits(used_block_group, data))) {
+ used_block_group = block_group;
goto refill_cluster;
}
- offset = btrfs_alloc_from_cluster(block_group, last_ptr,
- num_bytes, search_start);
+ if (used_block_group != block_group)
+ btrfs_get_block_group(used_block_group);
+
+ offset = btrfs_alloc_from_cluster(used_block_group,
+ last_ptr, num_bytes, used_block_group->key.objectid);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
- spin_lock(&last_ptr->lock);
- /*
- * whoops, this cluster doesn't actually point to
- * this block group. Get a ref on the block
- * group is does point to and try again
- */
- if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group &&
- index <=
- get_block_group_index(last_ptr->block_group)) {
-
- btrfs_put_block_group(block_group);
- block_group = last_ptr->block_group;
- btrfs_get_block_group(block_group);
- spin_unlock(&last_ptr->lock);
- spin_unlock(&last_ptr->refill_lock);
-
- last_ptr_loop = 1;
- search_start = block_group->key.objectid;
- /*
- * we know this block group is properly
- * in the list because
- * btrfs_remove_block_group, drops the
- * cluster before it removes the block
- * group from the list
- */
- goto have_block_group;
+ WARN_ON(last_ptr->block_group != used_block_group);
+ if (used_block_group != block_group) {
+ btrfs_put_block_group(used_block_group);
+ used_block_group = block_group;
}
- spin_unlock(&last_ptr->lock);
refill_cluster:
+ BUG_ON(used_block_group != block_group);
+ /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+ * set up a new clusters, so lets just skip it
+ * and let the allocator find whatever block
+ * it can find. If we reach this point, we
+ * will have tried the cluster allocator
+ * plenty of times and not have found
+ * anything, so we are likely way too
+ * fragmented for the clustering stuff to find
+ * anything. */
+ if (loop >= LOOP_NO_EMPTY_SIZE) {
+ spin_unlock(&last_ptr->refill_lock);
+ goto unclustered_alloc;
+ }
+
/*
* this cluster didn't work out, free it and
* start over
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
- last_ptr_loop = 0;
-
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
- offset, num_bytes,
+ search_start, num_bytes,
empty_cluster + empty_size);
if (ret == 0) {
/*
@@ -5378,6 +5400,7 @@ refill_cluster:
goto loop;
}
+unclustered_alloc:
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size);
/*
@@ -5404,14 +5427,14 @@ checks:
search_start = stripe_align(root, offset);
/* move on to the next group */
if (search_start + num_bytes >= search_end) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
/* move on to the next group */
if (search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ used_block_group->key.objectid + used_block_group->key.offset) {
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5419,14 +5442,14 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
- ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+ ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
alloc_type);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5435,15 +5458,19 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
break;
loop:
failed_cluster_refill = false;
failed_alloc = false;
BUG_ON(index != get_block_group_index(block_group));
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
}
up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 9472d3de5e5..49f3c9dc09f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
node = tree_search(tree, start);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
*/
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
this_end = last_start - 1;
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
/*
* Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
*/
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
@@ -2287,14 +2295,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (!uptodate) {
int failed_mirror;
failed_mirror = (int)(unsigned long)bio->bi_bdev;
- if (tree->ops && tree->ops->readpage_io_failed_hook)
- ret = tree->ops->readpage_io_failed_hook(
- bio, page, start, end,
- failed_mirror, state);
- else
- ret = bio_readpage_error(bio, page, start, end,
- failed_mirror, NULL);
+ /*
+ * The generic bio_readpage_error handles errors the
+ * following way: If possible, new read requests are
+ * created and submitted and will end up in
+ * end_bio_extent_readpage as well (if we're lucky, not
+ * in the !uptodate case). In that case it returns 0 and
+ * we just go on with the next page in our bio. If it
+ * can't handle the error it will return -EIO and we
+ * remain responsible for that page.
+ */
+ ret = bio_readpage_error(bio, page, start, end,
+ failed_mirror, NULL);
if (ret == 0) {
+error_handled:
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
@@ -2302,6 +2316,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
uncache_state(&cached);
continue;
}
+ if (tree->ops && tree->ops->readpage_io_failed_hook) {
+ ret = tree->ops->readpage_io_failed_hook(
+ bio, page, start, end,
+ failed_mirror, state);
+ if (ret == 0)
+ goto error_handled;
+ }
}
if (uptodate) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index dafdfa059bf..034d9850322 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1081,7 +1081,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
again:
for (i = 0; i < num_pages; i++) {
pages[i] = find_or_create_page(inode->i_mapping, index + i,
- mask);
+ mask | __GFP_WRITE);
if (!pages[i]) {
faili = i - 1;
err = -ENOMEM;
@@ -1136,7 +1136,8 @@ again:
GFP_NOFS);
}
for (i = 0; i < num_pages; i++) {
- clear_page_dirty_for_io(pages[i]);
+ if (clear_page_dirty_for_io(pages[i]))
+ account_page_redirty(pages[i]);
set_page_extent_mapped(pages[i]);
WARN_ON(!PageLocked(pages[i]));
}
@@ -1167,6 +1168,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
+ nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+ nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -1387,7 +1390,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
goto out;
}
- file_update_time(file);
+ err = btrfs_update_time(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
BTRFS_I(inode)->sequence++;
start_pos = round_down(pos, root->sectorsize);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6e5b7e46369..9a897bf7953 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -423,7 +423,7 @@ static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
}
if (index == 0)
- offset = sizeof(u32) * io_ctl->num_pages;;
+ offset = sizeof(u32) * io_ctl->num_pages;
crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
PAGE_CACHE_SIZE - offset);
@@ -1470,6 +1470,7 @@ static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
{
info->offset = offset_to_bitmap(ctl, offset);
info->bytes = 0;
+ INIT_LIST_HEAD(&info->list);
link_free_space(ctl, info);
ctl->total_bitmaps++;
@@ -2319,6 +2320,7 @@ again:
if (!found) {
start = i;
+ cluster->max_size = 0;
found = true;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 526dd51a196..81b235a61f8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -1943,7 +1944,7 @@ enum btrfs_orphan_cleanup_state {
};
/*
- * This is called in transaction commmit time. If there are no orphan
+ * This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
* structure.
*/
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret);
+ BUG_ON(ret && ret != -EEXIST);
}
/* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
if (ret && ret != -ESTALE)
goto out;
+ if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ struct btrfs_root *dead_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int is_dead_root = 0;
+
+ /*
+ * this is an orphan in the tree root. Currently these
+ * could come from 2 sources:
+ * a) a snapshot deletion in progress
+ * b) a free space cache inode
+ * We need to distinguish those two, as the snapshot
+ * orphan must not get deleted.
+ * find_dead_roots already ran before us, so if this
+ * is a snapshot deletion, we should find the root
+ * in the dead_roots list
+ */
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry(dead_root, &fs_info->dead_roots,
+ root_list) {
+ if (dead_root->root_key.objectid ==
+ found_key.objectid) {
+ is_dead_root = 1;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (is_dead_root) {
+ /* prevent this orphan from being found again */
+ key.offset = found_key.objectid - 1;
+ continue;
+ }
+ }
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
continue;
}
nr_truncate++;
+ /*
+ * Need to hold the imutex for reservation purposes, not
+ * a huge deal here but I have a WARN_ON in
+ * btrfs_delalloc_reserve_space to catch offenders.
+ */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
} else {
nr_unlink++;
}
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- trans = btrfs_start_transaction(root, 2);
+ trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
return 0;
if (newsize > oldsize) {
- i_size_write(inode, newsize);
- btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
- if (ret) {
- btrfs_setsize(inode, oldsize);
+ if (ret)
return ret;
- }
- mark_inode_dirty(inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ btrfs_end_transaction_throttle(trans, root);
} else {
/*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid) {
setattr_copy(inode, attr);
- mark_inode_dirty(inode);
+ err = btrfs_dirty_inode(inode);
- if (attr->ia_valid & ATTR_MODE)
+ if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
@@ -3490,7 +3538,7 @@ void btrfs_evict_inode(struct inode *inode)
* doing the truncate.
*/
while (1) {
- ret = btrfs_block_rsv_refill(root, rsv, min_size);
+ ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
/*
* Try and steal from the global reserve since we will
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
- return;
+ return 0;
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %ld\n",
- (unsigned long long)btrfs_ino(inode),
- PTR_ERR(trans));
- return;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %d\n",
- (unsigned long long)btrfs_ino(inode),
- ret);
- }
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
+
+ return ret;
+}
+
+/*
+ * This is a copy of file_update_time. We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct timespec now;
+ int ret;
+ enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+ /* First try to exhaust all avenues to not sync */
+ if (IS_NOCMTIME(inode))
+ return 0;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
+ sync_it = S_MTIME;
+
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it |= S_CTIME;
+
+ if (IS_I_VERSION(inode))
+ sync_it |= S_VERSION;
+
+ if (!sync_it)
+ return 0;
+
+ /* Finally allowed to write? Takes lock. */
+ if (mnt_want_write_file(file))
+ return 0;
+
+ /* Only change inode inside the lock region */
+ if (sync_it & S_VERSION)
+ inode_inc_iversion(inode);
+ if (sync_it & S_CTIME)
+ inode->i_ctime = now;
+ if (sync_it & S_MTIME)
+ inode->i_mtime = now;
+ ret = btrfs_dirty_inode(inode);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ mnt_drop_write(file->f_path.mnt);
+ return ret;
}
/*
@@ -4326,8 +4412,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir,
const char *name, int name_len,
- u64 ref_objectid, u64 objectid, int mode,
- u64 *index)
+ u64 ref_objectid, u64 objectid,
+ umode_t mode, u64 *index)
{
struct inode *inode;
struct btrfs_inode_item *inode_item;
@@ -4504,17 +4590,13 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
int err = btrfs_add_link(trans, dir, inode,
dentry->d_name.name, dentry->d_name.len,
backref, index);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
if (err > 0)
err = -EEXIST;
return err;
}
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -4555,13 +4637,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+
+ inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
- inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4575,7 +4665,7 @@ out_unlock:
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
- int mode, struct nameidata *nd)
+ umode_t mode, struct nameidata *nd)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -4613,15 +4703,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+ d_instantiate(dentry, inode);
}
out_unlock:
nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
+ d_instantiate(dentry, inode);
btrfs_log_new_name(trans, inode, NULL, parent);
}
@@ -4693,7 +4792,7 @@ fail:
return err;
}
-static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
@@ -6303,7 +6402,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 page_start;
u64 page_end;
+ /* Need this to keep space reservations serialized */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
+ if (!ret)
+ ret = btrfs_update_time(vma->vm_file);
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@ static int btrfs_truncate(struct inode *inode)
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out;
+ ret = err = PTR_ERR(trans);
+ trans = NULL;
+ break;
}
}
@@ -6656,7 +6761,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
static void btrfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
@@ -7076,14 +7180,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
@@ -7132,6 +7243,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
drop_inode = 1;
out_unlock:
+ if (!err)
+ d_instantiate(dentry, inode);
nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root);
if (drop_inode) {
@@ -7353,6 +7466,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a90e749ed6d..5441ff1480f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -201,7 +201,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
}
}
- ret = mnt_want_write(file->f_path.mnt);
+ ret = mnt_want_write_file(file);
if (ret)
goto out_unlock;
@@ -252,14 +252,14 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
+ btrfs_update_iflags(inode);
+ inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
- btrfs_update_iflags(inode);
- inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
ret = 0;
out_unlock:
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
return 0;
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
+ mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
again:
@@ -1278,7 +1280,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root);
- } else {
+ } else if (new_size < old_size) {
ret = btrfs_shrink_device(device, new_size);
}
@@ -1853,7 +1855,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out;
}
- err = mnt_want_write(file->f_path.mnt);
+ err = mnt_want_write_file(file);
if (err)
goto out;
@@ -1969,7 +1971,7 @@ out_dput:
dput(dentry);
out_unlock_dir:
mutex_unlock(&dir->i_mutex);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
out:
kfree(vol_args);
return err;
@@ -1985,7 +1987,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
if (btrfs_root_readonly(root))
return -EROFS;
- ret = mnt_want_write(file->f_path.mnt);
+ ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -2038,7 +2040,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EINVAL;
}
out:
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
return ret;
}
@@ -2193,7 +2195,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (btrfs_root_readonly(root))
return -EROFS;
- ret = mnt_want_write(file->f_path.mnt);
+ ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -2508,7 +2510,7 @@ out_unlock:
out_fput:
fput(src_file);
out_drop_write:
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
return ret;
}
@@ -2547,7 +2549,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
if (btrfs_root_readonly(root))
goto out;
- ret = mnt_want_write(file->f_path.mnt);
+ ret = mnt_want_write_file(file);
if (ret)
goto out;
@@ -2563,7 +2565,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
out_drop:
atomic_dec(&root->fs_info->open_ioctl_trans);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
out:
return ret;
}
@@ -2798,7 +2800,7 @@ long btrfs_ioctl_trans_end(struct file *file)
atomic_dec(&root->fs_info->open_ioctl_trans);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
return 0;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index dff29d5e151..cfb55434a46 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
while (index <= last_index) {
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
if (ret)
goto out;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fab420db512..ddf2c90d3fc 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -256,6 +256,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
btrfs_release_path(swarn->path);
ipath = init_ipath(4096, local_root, swarn->path);
+ if (IS_ERR(ipath)) {
+ ret = PTR_ERR(ipath);
+ ipath = NULL;
+ goto err;
+ }
ret = paths_from_inode(inum, ipath);
if (ret < 0)
@@ -1530,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
mutex_lock(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) {
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
fs_info->thread_pool_size, &fs_info->generic_worker);
fs_info->scrub_workers.idle_thresh = 4;
- btrfs_start_workers(&fs_info->scrub_workers, 1);
+ ret = btrfs_start_workers(&fs_info->scrub_workers);
+ if (ret)
+ goto out;
}
++fs_info->scrub_workers_refcnt;
+out:
mutex_unlock(&fs_info->scrub_lock);
- return 0;
+ return ret;
}
static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 17ee7fc5e64..ae488aa1966 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -40,7 +40,7 @@
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/cleancache.h>
-#include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -661,9 +661,9 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return ret;
}
-static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
- struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
+ struct btrfs_root *root = btrfs_sb(dentry->d_sb);
struct btrfs_fs_info *info = root->fs_info;
char *compress_type;
@@ -1053,11 +1053,11 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
u64 avail_space;
u64 used_space;
u64 min_stripe_size;
- int min_stripes = 1;
+ int min_stripes = 1, num_stripes = 1;
int i = 0, nr_devices;
int ret;
- nr_devices = fs_info->fs_devices->rw_devices;
+ nr_devices = fs_info->fs_devices->open_devices;
BUG_ON(!nr_devices);
devices_info = kmalloc(sizeof(*devices_info) * nr_devices,
@@ -1067,20 +1067,24 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
/* calc min stripe number for data space alloction */
type = btrfs_get_alloc_profile(root, 1);
- if (type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & BTRFS_BLOCK_GROUP_RAID0) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID1)
+ num_stripes = nr_devices;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID10)
+ num_stripes = 2;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
min_stripes = 4;
+ num_stripes = 4;
+ }
if (type & BTRFS_BLOCK_GROUP_DUP)
min_stripe_size = 2 * BTRFS_STRIPE_LEN;
else
min_stripe_size = BTRFS_STRIPE_LEN;
- list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
- if (!device->in_fs_metadata)
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->in_fs_metadata || !device->bdev)
continue;
avail_space = device->total_bytes - device->bytes_used;
@@ -1141,13 +1145,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
i = nr_devices - 1;
avail_space = 0;
while (nr_devices >= min_stripes) {
+ if (num_stripes > nr_devices)
+ num_stripes = nr_devices;
+
if (devices_info[i].max_avail >= min_stripe_size) {
int j;
u64 alloc_size;
- avail_space += devices_info[i].max_avail * min_stripes;
+ avail_space += devices_info[i].max_avail * num_stripes;
alloc_size = devices_info[i].max_avail;
- for (j = i + 1 - min_stripes; j <= i; j++)
+ for (j = i + 1 - num_stripes; j <= i; j++)
devices_info[j].max_avail -= alloc_size;
}
i--;
@@ -1264,6 +1271,16 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+ int ret;
+
+ ret = btrfs_dirty_inode(inode);
+ if (ret)
+ printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+ "error %d\n", btrfs_ino(inode), ret);
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -1271,7 +1288,7 @@ static const struct super_operations btrfs_super_ops = {
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_dirty_inode,
+ .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c37433d3cd8..f4b839fd3c9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -295,6 +295,12 @@ loop_lock:
btrfs_requeue_work(&device->work);
goto done;
}
+ /* unplug every 64 requests just for good measure */
+ if (batch_run % 64 == 0) {
+ blk_finish_plug(&plug);
+ blk_start_plug(&plug);
+ sync_pending = 0;
+ }
}
cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+ bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
- } else if (err) {
+ } else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
diff --git a/fs/buffer.c b/fs/buffer.c
index 19d8eb7fdc8..1a30db77af3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -41,7 +41,6 @@
#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
-#include <linux/cleancache.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
@@ -231,55 +230,6 @@ out:
return ret;
}
-/* If invalidate_buffers() will trash dirty buffers, it means some kind
- of fs corruption is going on. Trashing dirty data always imply losing
- information that was supposed to be just stored on the physical layer
- by the user.
-
- Thus invalidate_buffers in general usage is not allwowed to trash
- dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
- be preserved. These buffers are simply skipped.
-
- We also skip buffers which are still in use. For example this can
- happen if a userspace program is reading the block device.
-
- NOTE: In the case where the user removed a removable-media-disk even if
- there's still dirty data not synced on disk (due a bug in the device driver
- or due an error of the user), by not destroying the dirty buffers we could
- generate corruption also on the next media inserted, thus a parameter is
- necessary to handle this case in the most safe way possible (trying
- to not corrupt also the new disk inserted with the data belonging to
- the old now corrupted disk). Also for the ramdisk the natural thing
- to do in order to release the ramdisk memory is to destroy dirty buffers.
-
- These are two special cases. Normal usage imply the device driver
- to issue a sync on the device (without waiting I/O completion) and
- then an invalidate_buffers call that doesn't trash dirty buffers.
-
- For handling cache coherency with the blkdev pagecache the 'update' case
- is been introduced. It is needed to re-read from disk any pinned
- buffer. NOTE: re-reading from disk is destructive so we can do it only
- when we assume nobody is changing the buffercache under our I/O and when
- we think the disk contains more recent information than the buffercache.
- The update == 1 pass marks the buffers we need to update, the update == 2
- pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev)
-{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
-
- if (mapping->nrpages == 0)
- return;
-
- invalidate_bh_lrus();
- lru_add_drain_all(); /* make sure all lru add caches are flushed */
- invalidate_mapping_pages(mapping, 0, -1);
- /* 99% of the time, we don't need to flush the cleancache on the bdev.
- * But, for the strange corners, lets be cautious
- */
- cleancache_flush_inode(mapping);
-}
-EXPORT_SYMBOL(invalidate_bdev);
-
/*
* Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 1064805e653..67bef6d0148 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/mount.h>
-#include <linux/buffer_head.h>
#include "internal.h"
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4144caf2f9d..173b1d22e59 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
/* dirty the head */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_head_snapc == NULL)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* now adjust page */
spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return snapc;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0f327c6c967..b60fc8bfb3e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
/*
* Find ceph_cap for given mds, if any.
*
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
*/
static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
struct ceph_cap *cap;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return cap;
}
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
int ceph_get_cap_mds(struct inode *inode)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
int mds;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
}
/*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static void __insert_cap_node(struct ceph_inode_info *ci,
struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
*
* If I_FLUSH is set, leave the inode at the front of the list.
*
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
* -> we take mdsc->cap_delay_lock
*/
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
/*
* Cancel delayed work on cap.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
wanted |= ceph_caps_for_mode(fmode);
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
if (new_cap) {
cap = new_cap;
new_cap = NULL;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
new_cap = get_cap(mdsc, caps_reservation);
if (new_cap == NULL)
return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
wake_up_all(&ci->i_cap_wq);
return 0;
}
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
struct rb_node *p;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
break;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ceph_caps_revoking %p %s = %d\n", inode,
ceph_cap_string(mask), ret);
return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
}
/*
- * called under i_lock
+ * called under i_ceph_lock
*/
static int __ceph_is_any_caps(struct ceph_inode_info *ci)
{
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
/*
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
*
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
void __ceph_remove_cap(struct ceph_cap *cap)
@@ -927,7 +928,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
u64 size, u64 max_size,
struct timespec *mtime, struct timespec *atime,
u64 time_warp_seq,
- uid_t uid, gid_t gid, mode_t mode,
+ uid_t uid, gid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
u64 follows)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
/*
* Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
*/
void ceph_queue_caps_release(struct inode *inode)
{
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
/*
* Send a cap msg on the given inode. Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
*
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
* Return non-zero if delayed release, or we experienced an error
* such that the caller should requeue + retry later.
*
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
* caller should hold snap_rwsem (read), s_mutex.
*/
static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
int op, int used, int want, int retain, int flushing,
unsigned *pflush_tid)
- __releases(cap->ci->vfs_inode->i_lock)
+ __releases(cap->ci->i_ceph_lock)
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
@@ -1077,7 +1078,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
u64 size, max_size;
struct timespec mtime, atime;
int wake = 0;
- mode_t mode;
+ umode_t mode;
uid_t uid;
gid_t gid;
struct ceph_mds_session *session;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
xattr_version = ci->i_xattrs.version;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
* Unless @again is true, skip cap_snaps that were already sent to
* the MDS (i.e., during this session).
*
- * Called under i_lock. Takes s_mutex as needed.
+ * Called under i_ceph_lock. Takes s_mutex as needed.
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession,
int again)
- __releases(ci->vfs_inode->i_lock)
- __acquires(ci->vfs_inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
int mds;
@@ -1261,7 +1262,7 @@ retry:
session = NULL;
}
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
mutex_lock(&mdsc->mutex);
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
* deletion or migration. retry, and we'll
* get a better @mds value next time.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
@@ -1285,7 +1286,7 @@ retry:
list_del_init(&capsnap->flushing_item);
list_add_tail(&capsnap->flushing_item,
&session->s_cap_snaps_flushing);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
next_follows = capsnap->follows + 1;
ceph_put_cap_snap(capsnap);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
@@ -1322,11 +1323,9 @@ out:
static void ceph_flush_snaps(struct ceph_inode_info *ci)
{
- struct inode *inode = &ci->vfs_inode;
-
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, NULL, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static int __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
invalidate_mapping_pages(&inode->i_data, 0, -1);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
if (mdsc->stopping)
is_delayed = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
__ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry_locked:
file_wanted = __ceph_caps_file_wanted(ci);
used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
dout("inverting snap/in locks on %p\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
down_read(&mdsc->snap_rwsem);
took_snap_rwsem = 1;
goto retry;
@@ -1664,10 +1663,10 @@ ack:
mds = cap->mds; /* remember mds, so we don't repeat */
sent++;
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
retain, flushing, NULL);
- goto retry; /* retake i_lock and restart our cap scan. */
+ goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
/*
@@ -1681,7 +1680,7 @@ ack:
else if (!is_delayed || force_requeue)
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_invalidate)
ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
int flushing = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
@@ -1716,7 +1715,7 @@ retry:
int delayed;
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
session = cap->session;
mutex_lock(&session->s_mutex);
goto retry;
@@ -1727,18 +1726,18 @@ retry:
flushing = __mark_caps_flushing(inode, session);
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
cap->issued | cap->implemented, flushing,
flush_tid);
if (!delayed)
goto out_unlocked;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
out_unlocked:
if (session && unlock_session)
mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
struct ceph_inode_info *ci = ceph_inode(inode);
int i, ret = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_CAP_BITS; i++)
if ((ci->i_flushing_caps & (1 << i)) &&
ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
ret = 0;
break;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_caps_dirty(ci))
__cap_delay_requeue_front(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return err;
}
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
}
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
* Take references to capabilities we hold, so that we don't release
* them to the MDS prematurely.
*
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
*/
static void __take_cap_refs(struct ceph_inode_info *ci, int got)
{
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
ceph_cap_string(have), ceph_cap_string(need));
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
int check = 0;
/* do we need to explicitly request a larger max_size? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((endoff >= ci->i_max_size ||
endoff > (inode->i_size << 1)) &&
endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
ci->i_wanted_max_size = endoff;
check = 1;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
@@ -2140,9 +2139,9 @@ retry:
*/
void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
{
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
__take_cap_refs(ci, caps);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
int last = 0, put = 0, flushsnaps = 0, wake = 0;
struct ceph_cap_snap *capsnap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (had & CEPH_CAP_PIN)
--ci->i_pin_ref;
if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
}
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
int found = 0;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wrbuffer_ref -= nr;
last = !ci->i_wrbuffer_ref;
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
* Handle a cap GRANT message from the MDS. (Note that a GRANT may
* actually be a revocation if it specifies a smaller cap set.)
*
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
*
* return value:
* 0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
struct ceph_mds_session *session,
struct ceph_cap *cap,
struct ceph_buffer *xattr_buf)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
}
BUG_ON(cap->issued & ~cap->implemented);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (writeback)
/*
* queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_caps *m,
struct ceph_mds_session *session,
struct ceph_cap *cap)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
wake_up_all(&ci->i_cap_wq);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
capsnap, capsnap->follows);
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
static void handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
struct ceph_mds_session *session)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_trunc)
ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure we haven't seen a higher mseq */
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
}
/* else, we already released it */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
up_read(&mdsc->snap_rwsem);
/* make sure we re-request max_size, if necessary */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_mds_client *mdsc = session->s_mdsc;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
+ struct ceph_inode_info *ci;
struct ceph_cap *cap;
struct ceph_mds_caps *h;
int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
/* lookup ino */
inode = ceph_find_inode(sb, vino);
+ ci = ceph_inode(inode);
dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
vino.snap, inode);
if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
}
/* the rest require a cap */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ceph_inode(inode), mds);
if (!cap) {
dout(" no cap on %p ino %llx.%llx from mds%d\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto flush_cap_releases;
}
- /* note that each of these drops i_lock for us */
+ /* note that each of these drops i_ceph_lock for us */
switch (op) {
case CEPH_CAP_OP_REVOKE:
case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
default:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
ceph_cap_op_name(op));
}
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
struct inode *inode = &ci->vfs_inode;
int last = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
BUG_ON(ci->i_nr_by_mode[fmode] == 0);
if (--ci->i_nr_by_mode[fmode] == 0)
last++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
int used, dirty;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
inode, cap, ceph_cap_string(cap->issued));
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
/*
* force an record for the directory caps if we have a dentry lease.
- * this is racy (can't take i_lock and d_lock together), but it
+ * this is racy (can't take i_ceph_lock and d_lock together), but it
* doesn't have to be perfect; the mds will revoke anything we don't
* release.
*/
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index bca3948e9db..74fd74719dc 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
/* can we use the dcache? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((filp->f_pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
ceph_dir_test_complete(inode) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(filp, dirent, filldir);
if (err != -EAGAIN)
return err;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
if (fi->dentry) {
err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
* were released during the whole readdir, and we should have
* the complete dir contents in our cache.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_release_count == fi->dir_release_count) {
ceph_dir_set_complete(inode);
ci->i_max_offset = filp->f_pos;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("readdir %p filp %p done.\n", inode, filp);
return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
!is_root_ceph_dentry(dir, dentry) &&
ceph_dir_test_complete(dir) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
di->lease_shared_gen = ci->i_shared_gen;
return NULL;
}
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -666,7 +666,7 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
}
static int ceph_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -676,7 +676,7 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
+ dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
dir, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
@@ -699,7 +699,7 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
return err;
}
-static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
dout("create in dir %p dentry %p name '%.*s'\n",
@@ -753,7 +753,7 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
return err;
}
-static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -767,7 +767,7 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
dentry->d_name.len, dentry->d_name.name, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
- dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
+ dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
goto out;
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_nlink == 1) {
drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
ci->i_ceph_flags |= CEPH_I_NODELAY;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return drop;
}
@@ -870,7 +870,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
dout("unlink/rmdir dir %p dn %p inode %p\n",
dir, dentry, inode);
- op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
+ op = S_ISDIR(dentry->d_inode->i_mode) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
} else
goto out;
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
struct ceph_dentry_info *di = ceph_dentry(dentry);
int valid = 0;
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_shared_gen == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)ci->i_shared_gen, dentry,
(unsigned)di->lease_shared_gen, valid);
@@ -1094,42 +1094,19 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry,
/*
* Set/clear/test dir complete flag on the dir's dentry.
*/
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
- struct dentry *alias;
-
- if (list_empty(&inode->i_dentry))
- return NULL;
- alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
- return alias;
-}
-
void ceph_dir_set_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry)) {
- dout(" marking %p (%p) complete\n", inode, dentry);
- set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
- }
+ /* not yet implemented */
}
void ceph_dir_clear_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry)) {
- dout(" marking %p (%p) NOT complete\n", inode, dentry);
- clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
- }
+ /* not yet implemented */
}
bool ceph_dir_test_complete(struct inode *inode)
{
- struct dentry *dentry = __d_find_any_alias(inode);
-
- if (dentry && ceph_dentry(dentry))
- return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+ /* not yet implemented */
return false;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce549d31eeb..ed72428d9c7 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
/* trivially open snapdir */
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
* write) or any MDS (for read). Update wanted set
* asynchronously.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_is_any_real_caps(ci) &&
(((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* adjust wanted? */
if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
} else if (ceph_snap(inode) != CEPH_NOSNAP &&
(ci->i_snap_caps & wanted) == wanted) {
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
*/
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_put_cap_refs(ci, got);
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
if (ret >= 0) {
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
}
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+ if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
if (ret < 0) {
offset = ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 116f36502f1..25283e7a37f 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
dout("alloc_inode %p\n", &ci->vfs_inode);
+ spin_lock_init(&ci->i_ceph_lock);
+
ci->i_version = 0;
ci->i_time_warp_seq = 0;
ci->i_ceph_flags = 0;
@@ -382,7 +384,6 @@ static void ceph_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct ceph_inode_info *ci = ceph_inode(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ceph_inode_cachep, ci);
}
@@ -583,7 +584,7 @@ static int fill_inode(struct inode *inode,
iinfo->xattr_len);
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/*
* provided version will be odd if inode value is projected,
@@ -680,7 +681,7 @@ static int fill_inode(struct inode *inode,
char *sym;
BUG_ON(symlen != inode->i_size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = -ENOMEM;
sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +690,7 @@ static int fill_inode(struct inode *inode,
memcpy(sym, iinfo->symlink, symlen);
sym[symlen] = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_symlink)
ci->i_symlink = sym;
else
@@ -715,7 +716,7 @@ static int fill_inode(struct inode *inode,
}
no_change:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* queue truncate if we saw i_size decrease */
if (queue_trunc)
@@ -750,13 +751,13 @@ no_change:
info->cap.flags,
caps_reservation);
} else {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" %p got snap_caps %s\n", inode,
ceph_cap_string(le32_to_cpu(info->cap.caps)));
ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
if (cap_fmode >= 0)
__ceph_get_fmode(ci, cap_fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else if (cap_fmode >= 0) {
pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +850,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
{
struct dentry *dir = dn->d_parent;
struct inode *inode = dir->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_dentry_info *di;
BUG_ON(!inode);
di = ceph_dentry(dn);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ceph_dir_test_complete(inode)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
di->offset = ceph_inode(inode)->i_max_offset++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
spin_lock(&dir->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1310,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
struct ceph_inode_info *ci = ceph_inode(inode);
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
inode->i_size = size;
inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1320,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
(ci->i_reported_size << 1) < ci->i_max_size)
ret = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
@@ -1376,20 +1378,20 @@ static void ceph_invalidate_work(struct work_struct *work)
u32 orig_gen;
int check = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
/* nevermind! */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto out;
}
orig_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(&inode->i_data, 0);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1403,7 @@ static void ceph_invalidate_work(struct work_struct *work)
inode, orig_gen, ci->i_rdcache_gen,
ci->i_rdcache_revoking);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1462,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
int wrbuffer_refs, wake = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
dout("__do_pending_vmtruncate %p none pending\n", inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
@@ -1474,7 +1476,7 @@ retry:
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
dout("__do_pending_vmtruncate %p flushing snaps first\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
@@ -1484,15 +1486,15 @@ retry:
wrbuffer_refs = ci->i_wrbuffer_ref;
dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
ci->i_truncate_pending, to);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(inode->i_mapping, to);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_truncate_pending--;
if (ci->i_truncate_pending == 0)
wake = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1549,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
if (IS_ERR(req))
return PTR_ERR(req);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -1695,7 +1697,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
}
release &= issued;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1719,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
__ceph_do_pending_vmtruncate(inode);
return err;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 5a14c29cbba..790914a598d 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[fi->fmode]++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
ceph_check_caps(ci, 0, NULL);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 264ab701154..6203d805eb4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
}
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = NULL;
if (mode == USE_AUTH_MDS)
cap = ci->i_auth_cap;
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
if (!cap) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto random;
}
mds = cap->session->s_mds;
dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
inode, ceph_vinop(inode), mds,
cap == ci->i_auth_cap ? "auth " : "", cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_remove_cap(cap);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
}
spin_unlock(&mdsc->cap_dirty_lock);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
while (drop--)
iput(inode);
return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
wake_up_all(&ci->i_cap_wq);
if (arg) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wanted_max_size = 0;
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return 0;
}
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
if (session->s_trim_caps <= 0)
return -1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
__ceph_remove_cap(cap);
} else {
/* try to drop referring dentries */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
d_prune_aliases(inode);
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return 0;
}
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
i_flushing_item);
struct inode *inode = &ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_cap_flush_seq <= want_flush_seq) {
dout("check_cap_flush still flushing %p "
"seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
session->s_mds);
ret = 0;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
+ spin_unlock(&temp->d_lock);
break;
} else {
pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
struct ceph_inode_info *ci = ceph_inode(inode);
dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ceph_dir_clear_complete(inode);
ci->i_release_count++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (req->r_dentry)
ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
if (err)
goto out_free;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
rec.v1.pathbase = cpu_to_le64(pathbase);
reclen = sizeof(rec.v1);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (recon_state->flock) {
int num_fcntl_locks, num_flock_locks;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4bb239921db..a50ca0e3947 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -20,7 +20,7 @@
*
* mdsc->snap_rwsem
*
- * inode->i_lock
+ * ci->i_ceph_lock
* mdsc->snap_flush_lock
* mdsc->cap_delay_lock
*
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e2643719133..a559c80f127 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
return;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
kfree(capsnap);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
*
* If capsnap can now be flushed, add to snap_flush list, and return 1.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
inode = &ci->vfs_inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, &session, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
continue;
ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_snap_realm)
goto skip_inode;
/*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
oldrealm = ci->i_snap_realm;
ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_get_snap_realm(mdsc, realm);
ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
continue;
skip_inode:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 8dc73a594a9..48f61a12af6 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -341,11 +341,11 @@ out:
/**
* ceph_show_options - Show mount options in /proc/mounts
* @m: seq_file to write to
- * @mnt: mount descriptor
+ * @root: root of that (sub)tree
*/
-static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int ceph_show_options(struct seq_file *m, struct dentry *root)
{
- struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
struct ceph_options *opt = fsc->client->options;
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
- seq_printf(m, ",rasize=%d", fsopt->rsize);
+ seq_printf(m, ",rasize=%d", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
@@ -636,19 +636,26 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == 0) {
+ struct inode *inode = req->r_target_inode;
+ req->r_target_inode = NULL;
dout("open_root_inode success\n");
- if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
+ if (ceph_ino(inode) == CEPH_INO_ROOT &&
fsc->sb->s_root == NULL) {
- root = d_alloc_root(req->r_target_inode);
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ root = ERR_PTR(-ENOMEM);
+ goto out;
+ }
ceph_init_dentry(root);
} else {
- root = d_obtain_alias(req->r_target_inode);
+ root = d_obtain_alias(inode);
}
- req->r_target_inode = NULL;
dout("open_root_inode success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
+out:
ceph_mdsc_put_request(req);
return root;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 01bf189e08a..cb3652b3727 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -136,7 +136,7 @@ struct ceph_cap_snap {
int issued, dirty;
struct ceph_snap_context *context;
- mode_t mode;
+ umode_t mode;
uid_t uid;
gid_t gid;
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
* The locking for D_COMPLETE is a bit odd:
* - we can clear it at almost any time (see ceph_d_prune)
* - it is only meaningful if:
- * - we hold dir inode i_lock
+ * - we hold dir inode i_ceph_lock
* - we hold dir FILE_SHARED caps
* - the dentry D_COMPLETE is set
*/
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
struct ceph_inode_info {
struct ceph_vino i_vino; /* ceph ino + snap */
+ spinlock_t i_ceph_lock;
+
u64 i_version;
u32 i_time_warp_seq;
@@ -271,7 +273,7 @@ struct ceph_inode_info {
struct ceph_inode_xattrs_info i_xattrs;
- /* capabilities. protected _both_ by i_lock and cap->session's
+ /* capabilities. protected _both_ by i_ceph_lock and cap->session's
* s_mutex. */
struct rb_root i_caps; /* cap list */
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags &= ~mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline void ceph_i_set(struct inode *inode, unsigned mask)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags |= mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
struct ceph_inode_info *ci = ceph_inode(inode);
bool r;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = (ci->i_ceph_flags & mask) == mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
static inline int ceph_caps_issued(struct ceph_inode_info *ci)
{
int issued;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return issued;
}
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
int touch)
{
int r;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = __ceph_caps_issued_mask(ci, mask, touch);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
extern void __ceph_remove_cap(struct ceph_cap *cap);
static inline void ceph_remove_cap(struct ceph_cap *cap)
{
- struct inode *inode = &cap->ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&cap->ci->i_ceph_lock);
__ceph_remove_cap(cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&cap->ci->i_ceph_lock);
}
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 96c6739a028..a5e36e4488a 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
}
static int __build_xattrs(struct inode *inode)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
u32 namelen;
u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
end = p + ci->i_xattrs.blob->vec.iov_len;
ceph_decode_32_safe(&p, end, numattr, bad);
xattr_version = ci->i_xattrs.version;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
GFP_NOFS);
@@ -387,7 +387,7 @@ start:
goto bad_lock;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.version != xattr_version) {
/* lost a race, retry */
for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
return err;
bad_lock:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
bad:
if (xattrs) {
for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
if (vxattrs)
vxattr = ceph_match_vxattr(vxattrs, name);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto get_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* get xattrs from mds (if we don't already have them) */
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (vxattr && vxattr->readonly) {
err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
memcpy(value, xattr->val, xattr->val_len);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
u32 len;
int i;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto list_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
err = __build_xattrs(inode);
if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
if (!xattr)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry:
issued = __ceph_caps_issued(ci, NULL);
if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
struct ceph_buffer *blob = NULL;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_sync_setxattr(dentry, name, value, size, flags);
out:
kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
return -EOPNOTSUPP;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__build_xattrs(inode);
issued = __ceph_caps_issued(ci, NULL);
dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_send_removexattr(dentry, name);
return err;
}
diff --git a/fs/char_dev.c b/fs/char_dev.c
index dca9e5e0f73..3f152b92a94 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -272,7 +272,7 @@ int __register_chrdev(unsigned int major, unsigned int baseminor,
cd = __register_chrdev_region(major, baseminor, count, name);
if (IS_ERR(cd))
return PTR_ERR(cd);
-
+
cdev = cdev_alloc();
if (!cdev)
goto out2;
@@ -280,7 +280,7 @@ int __register_chrdev(unsigned int major, unsigned int baseminor,
cdev->owner = fops->owner;
cdev->ops = fops;
kobject_set_name(&cdev->kobj, "%s", name);
-
+
err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
if (err)
goto out;
@@ -405,7 +405,7 @@ static int chrdev_open(struct inode *inode, struct file *filp)
goto out_cdev_put;
if (filp->f_op->open) {
- ret = filp->f_op->open(inode,filp);
+ ret = filp->f_op->open(inode, filp);
if (ret)
goto out_cdev_put;
}
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 500d6585927..c865bfdfe81 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -59,8 +59,8 @@ struct cifs_sb_info {
gid_t mnt_gid;
uid_t mnt_backupuid;
gid_t mnt_backupgid;
- mode_t mnt_file_mode;
- mode_t mnt_dir_mode;
+ umode_t mnt_file_mode;
+ umode_t mnt_dir_mode;
unsigned int mnt_cifs_flags;
char *mountdata; /* options received at mount time or via DFS refs */
struct backing_dev_info bdi;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8f1fe324162..b1fd382d195 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -343,9 +343,9 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
* ones are.
*/
static int
-cifs_show_options(struct seq_file *s, struct vfsmount *m)
+cifs_show_options(struct seq_file *s, struct dentry *root)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
@@ -393,7 +393,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
cifs_show_address(s, tcon->ses->server);
if (!tcon->unix_ext)
- seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+ seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
cifs_sb->mnt_file_mode,
cifs_sb->mnt_dir_mode);
if (tcon->seal)
@@ -430,7 +430,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
seq_printf(s, ",cifsacl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
seq_printf(s, ",dynperm");
- if (m->mnt_sb->s_flags & MS_POSIXACL)
+ if (root->d_sb->s_flags & MS_POSIXACL)
seq_printf(s, ",acl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
seq_printf(s, ",mfsymlinks");
@@ -488,7 +488,7 @@ static void cifs_umount_begin(struct super_block *sb)
}
#ifdef CONFIG_CIFS_STATS2
-static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
+static int cifs_show_stats(struct seq_file *s, struct dentry *root)
{
/* BB FIXME */
return 0;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 30ff56005d8..fe5ecf1b422 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -44,14 +44,14 @@ extern const struct address_space_operations cifs_addr_ops_smallbuf;
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
-extern int cifs_create(struct inode *, struct dentry *, int,
+extern int cifs_create(struct inode *, struct dentry *, umode_t,
struct nameidata *);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
struct nameidata *);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
-extern int cifs_mknod(struct inode *, struct dentry *, int, dev_t);
-extern int cifs_mkdir(struct inode *, struct dentry *, int);
+extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+extern int cifs_mkdir(struct inode *, struct dentry *, umode_t);
extern int cifs_rmdir(struct inode *, struct dentry *);
extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
struct dentry *);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8238aa13e01..ba53c1c6c6c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -169,8 +169,8 @@ struct smb_vol {
gid_t linux_gid;
uid_t backupuid;
gid_t backupgid;
- mode_t file_mode;
- mode_t dir_mode;
+ umode_t file_mode;
+ umode_t dir_mode;
unsigned secFlg;
bool retry:1;
bool intr:1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d6a972df033..4666780f315 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
byte_count += total_in_buf2;
/* don't allow buffer to overflow */
- if (byte_count > CIFSMaxBufSize)
+ if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
return -ENOBUFS;
pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
+ try_to_freeze();
+
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
@@ -2120,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
- "ntlmv2 in kernel release 3.2");
+ "ntlmv2 in kernel release 3.3");
}
ses->overrideSecFlg = volume_info->secFlg;
@@ -2817,7 +2819,7 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_backupgid = pvolume_info->backupgid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cFYI(1, "file mode: 0x%x dir mode: 0x%x",
+ cFYI(1, "file mode: 0x%hx dir mode: 0x%hx",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d7eeb9d3ed6..df8fecb5b99 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -136,7 +136,7 @@ cifs_bp_rename_retry:
/* Inode operations in similar order to how they appear in Linux file fs.h */
int
-cifs_create(struct inode *inode, struct dentry *direntry, int mode,
+cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
struct nameidata *nd)
{
int rc = -ENOENT;
@@ -355,7 +355,7 @@ cifs_create_out:
return rc;
}
-int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
+int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
dev_t device_number)
{
int rc = -EPERM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cf0b1539b32..4dd9283885e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
lock->type, lock->netfid, conf_lock);
}
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
mutex_unlock(&cinode->lock_mutex);
}
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
static int
cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
bool wait)
@@ -778,6 +791,13 @@ try_again:
return rc;
}
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
return rc;
}
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e851d5b8931..a5f54b7d982 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1264,7 +1264,7 @@ unlink_out:
return rc;
}
-int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
+int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
{
int rc = 0, tmprc;
int xid;
@@ -1275,7 +1275,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
struct inode *newinode = NULL;
struct cifs_fattr fattr;
- cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
+ cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 5de03ec2014..a090bbe6ee2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
rc);
return rc;
}
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
}
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
if (rc)
return -ENOENT;
}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 7cacba12b8f..80d85088193 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
{
int rc;
int len;
- __u16 wpwd[129];
+ __le16 wpwd[129];
/* Password cannot be longer than 128 characters */
if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
*wpwd = 0; /* Ensure string is null terminated */
}
- rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
- memset(wpwd, 0, 129 * sizeof(__u16));
+ rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+ memset(wpwd, 0, 129 * sizeof(__le16));
return rc;
}
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 28e7e135cfa..83d2fd8ec24 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -30,14 +30,14 @@
#include "coda_int.h"
/* dir inode-ops */
-static int coda_create(struct inode *dir, struct dentry *new, int mode, struct nameidata *nd);
+static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, struct nameidata *nd);
static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd);
static int coda_link(struct dentry *old_dentry, struct inode *dir_inode,
struct dentry *entry);
static int coda_unlink(struct inode *dir_inode, struct dentry *entry);
static int coda_symlink(struct inode *dir_inode, struct dentry *entry,
const char *symname);
-static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, int mode);
+static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, umode_t mode);
static int coda_rmdir(struct inode *dir_inode, struct dentry *entry);
static int coda_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry);
@@ -191,7 +191,7 @@ static inline void coda_dir_drop_nlink(struct inode *dir)
}
/* creation routines: create, mknod, mkdir, link, symlink */
-static int coda_create(struct inode *dir, struct dentry *de, int mode, struct nameidata *nd)
+static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, struct nameidata *nd)
{
int error;
const char *name=de->d_name.name;
@@ -223,7 +223,7 @@ err_out:
return error;
}
-static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
+static int coda_mkdir(struct inode *dir, struct dentry *de, umode_t mode)
{
struct inode *inode;
struct coda_vattr attrs;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 871b2771546..1c08a8cd673 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -58,7 +58,6 @@ static struct inode *coda_alloc_inode(struct super_block *sb)
static void coda_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(coda_inode_cachep, ITOC(inode));
}
diff --git a/fs/compat.c b/fs/compat.c
index c98787536bb..fa9d721ecfe 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -342,16 +342,9 @@ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct c
*/
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
{
- struct super_block *sb;
struct compat_ustat tmp;
struct kstatfs sbuf;
- int err;
-
- sb = user_get_super(new_decode_dev(dev));
- if (!sb)
- return -EINVAL;
- err = statfs_by_dentry(sb->s_root, &sbuf);
- drop_super(sb);
+ int err = vfs_ustat(new_decode_dev(dev), &sbuf);
if (err)
return err;
@@ -1288,7 +1281,7 @@ compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
* O_LARGEFILE flag.
*/
asmlinkage long
-compat_sys_open(const char __user *filename, int flags, int mode)
+compat_sys_open(const char __user *filename, int flags, umode_t mode)
{
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
@@ -1298,7 +1291,7 @@ compat_sys_open(const char __user *filename, int flags, int mode)
* O_LARGEFILE flag.
*/
asmlinkage long
-compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, int mode)
+compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, umode_t mode)
{
return do_sys_open(dfd, filename, flags, mode);
}
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 51352de88ef..a10e428b32b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1506,35 +1506,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
return -ENOIOCTLCMD;
}
-static void compat_ioctl_error(struct file *filp, unsigned int fd,
- unsigned int cmd, unsigned long arg)
-{
- char buf[10];
- char *fn = "?";
- char *path;
-
- /* find the name of the device. */
- path = (char *)__get_free_page(GFP_KERNEL);
- if (path) {
- fn = d_path(&filp->f_path, path, PAGE_SIZE);
- if (IS_ERR(fn))
- fn = "?";
- }
-
- sprintf(buf,"'%c'", (cmd>>_IOC_TYPESHIFT) & _IOC_TYPEMASK);
- if (!isprint(buf[1]))
- sprintf(buf, "%02x", buf[1]);
- compat_printk("ioctl32(%s:%d): Unknown cmd fd(%d) "
- "cmd(%08x){t:%s;sz:%u} arg(%08x) on %s\n",
- current->comm, current->pid,
- (int)fd, (unsigned int)cmd, buf,
- (cmd >> _IOC_SIZESHIFT) & _IOC_SIZEMASK,
- (unsigned int)arg, fn);
-
- if (path)
- free_page((unsigned long)path);
-}
-
static int compat_ioctl_check_table(unsigned int xcmd)
{
int i;
@@ -1621,13 +1592,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
goto found_handler;
error = do_ioctl_trans(fd, cmd, arg, filp);
- if (error == -ENOIOCTLCMD) {
- static int count;
-
- if (++count <= 50)
- compat_ioctl_error(filp, fd, cmd, arg);
- error = -EINVAL;
- }
+ if (error == -ENOIOCTLCMD)
+ error = -ENOTTY;
goto out_fput;
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 82bda8fdfc1..ede857d20a0 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -63,8 +63,8 @@ extern struct kmem_cache *configfs_dir_cachep;
extern int configfs_is_root(struct config_item *item);
-extern struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent *);
-extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *);
+extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
extern int configfs_inode_init(void);
extern void configfs_inode_exit(void);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9a37a9b6de3..5ddd7ebd9dc 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -311,8 +311,8 @@ static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
if (item->ci_parent)
parent = item->ci_parent->ci_dentry;
- else if (configfs_mount && configfs_mount->mnt_sb)
- parent = configfs_mount->mnt_sb->s_root;
+ else if (configfs_mount)
+ parent = configfs_mount->mnt_root;
else
return -EFAULT;
@@ -1170,7 +1170,7 @@ void configfs_undepend_item(struct configfs_subsystem *subsys,
}
EXPORT_SYMBOL(configfs_undepend_item);
-static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index ca418aaf635..3ee36d41886 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -116,7 +116,7 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
return error;
}
-static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
+static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -132,7 +132,7 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_ctime = iattr->ia_ctime;
}
-struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent * sd)
+struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent * sd)
{
struct inode * inode = new_inode(configfs_sb);
if (inode) {
@@ -185,7 +185,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
#endif /* CONFIG_LOCKDEP */
-int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
{
int error = 0;
struct inode * inode = NULL;
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
return bdi_init(&configfs_backing_dev_info);
}
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
{
bdi_destroy(&configfs_backing_dev_info);
}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index ecc62178bed..276e15cafd5 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
goto out;
config_kobj = kobject_create_and_add("config", kernel_kobj);
- if (!config_kobj) {
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (!config_kobj)
+ goto out2;
+
+ err = configfs_inode_init();
+ if (err)
+ goto out3;
err = register_filesystem(&configfs_fs_type);
- if (err) {
- printk(KERN_ERR "configfs: Unable to register filesystem!\n");
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (err)
+ goto out4;
- err = configfs_inode_init();
- if (err) {
- unregister_filesystem(&configfs_fs_type);
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- }
+ return 0;
+out4:
+ printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+ configfs_inode_exit();
+out3:
+ kobject_put(config_kobj);
+out2:
+ kmem_cache_destroy(configfs_dir_cachep);
+ configfs_dir_cachep = NULL;
out:
return err;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 739fb59bcdc..a2ee8f9f5a3 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -20,7 +20,6 @@
#include <linux/cramfs_fs.h>
#include <linux/slab.h>
#include <linux/cramfs_fs_sb.h>
-#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/mutex.h>
@@ -378,7 +377,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
unsigned long nextoffset;
char *name;
ino_t ino;
- mode_t mode;
+ umode_t mode;
int namelen, error;
mutex_lock(&read_mutex);
diff --git a/fs/dcache.c b/fs/dcache.c
index 10ba92def3f..3c6d3113a25 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -38,6 +38,7 @@
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include "internal.h"
+#include "mount.h"
/*
* Usage:
@@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
}
}
-static void dentry_lru_move_tail(struct dentry *dentry)
+static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
{
spin_lock(&dcache_lru_lock);
if (list_empty(&dentry->d_lru)) {
- list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+ list_add_tail(&dentry->d_lru, list);
dentry->d_sb->s_nr_dentry_unused++;
dentry_stat.nr_unused++;
} else {
- list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
+ list_move_tail(&dentry->d_lru, list);
}
spin_unlock(&dcache_lru_lock);
}
@@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
}
/**
- * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
- * @sb: superblock to shrink dentry LRU.
- * @count: number of entries to prune
- * @flags: flags to control the dentry processing
+ * prune_dcache_sb - shrink the dcache
+ * @sb: superblock
+ * @count: number of entries to try to free
+ *
+ * Attempt to shrink the superblock dcache LRU by @count entries. This is
+ * done when we need more memory an called from the superblock shrinker
+ * function.
*
- * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
+ * This function may fail to free any resources if all the dentries are in
+ * use.
*/
-static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
+void prune_dcache_sb(struct super_block *sb, int count)
{
struct dentry *dentry;
LIST_HEAD(referenced);
@@ -795,13 +800,7 @@ relock:
goto relock;
}
- /*
- * If we are honouring the DCACHE_REFERENCED flag and the
- * dentry has this flag set, don't free it. Clear the flag
- * and put it back on the LRU.
- */
- if (flags & DCACHE_REFERENCED &&
- dentry->d_flags & DCACHE_REFERENCED) {
+ if (dentry->d_flags & DCACHE_REFERENCED) {
dentry->d_flags &= ~DCACHE_REFERENCED;
list_move(&dentry->d_lru, &referenced);
spin_unlock(&dentry->d_lock);
@@ -821,23 +820,6 @@ relock:
}
/**
- * prune_dcache_sb - shrink the dcache
- * @sb: superblock
- * @nr_to_scan: number of entries to try to free
- *
- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
- * done when we need more memory an called from the superblock shrinker
- * function.
- *
- * This function may fail to free any resources if all the dentries are in
- * use.
- */
-void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
-{
- __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
-}
-
-/**
* shrink_dcache_sb - shrink dcache for a superblock
* @sb: superblock
*
@@ -1091,7 +1073,7 @@ EXPORT_SYMBOL(have_submounts);
* drop the lock and return early due to latency
* constraints.
*/
-static int select_parent(struct dentry * parent)
+static int select_parent(struct dentry *parent, struct list_head *dispose)
{
struct dentry *this_parent;
struct list_head *next;
@@ -1113,12 +1095,11 @@ resume:
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- /*
- * move only zero ref count dentries to the end
- * of the unused list for prune_dcache
+ /*
+ * move only zero ref count dentries to the dispose list.
*/
if (!dentry->d_count) {
- dentry_lru_move_tail(dentry);
+ dentry_lru_move_list(dentry, dispose);
found++;
} else {
dentry_lru_del(dentry);
@@ -1180,14 +1161,13 @@ rename_retry:
*
* Prune the dcache to remove unused children of the parent dentry.
*/
-
void shrink_dcache_parent(struct dentry * parent)
{
- struct super_block *sb = parent->d_sb;
+ LIST_HEAD(dispose);
int found;
- while ((found = select_parent(parent)) != 0)
- __shrink_dcache_sb(sb, found, 0);
+ while ((found = select_parent(parent, &dispose)) != 0)
+ shrink_dentry_list(&dispose);
}
EXPORT_SYMBOL(shrink_dcache_parent);
@@ -1460,6 +1440,23 @@ struct dentry * d_alloc_root(struct inode * root_inode)
}
EXPORT_SYMBOL(d_alloc_root);
+struct dentry *d_make_root(struct inode *root_inode)
+{
+ struct dentry *res = NULL;
+
+ if (root_inode) {
+ static const struct qstr name = { .name = "/", .len = 1 };
+
+ res = __d_alloc(root_inode->i_sb, &name);
+ if (res)
+ d_instantiate(res, root_inode);
+ else
+ iput(root_inode);
+ }
+ return res;
+}
+EXPORT_SYMBOL(d_make_root);
+
static struct dentry * __d_find_any_alias(struct inode *inode)
{
struct dentry *alias;
@@ -2439,20 +2436,19 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
/**
* prepend_path - Prepend path string to a buffer
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buffer: pointer to the end of the buffer
* @buflen: pointer to buffer length
*
* Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
*/
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+ const struct path *root,
char **buffer, int *buflen)
{
struct dentry *dentry = path->dentry;
struct vfsmount *vfsmnt = path->mnt;
+ struct mount *mnt = real_mount(vfsmnt);
bool slash = false;
int error = 0;
@@ -2462,11 +2458,11 @@ static int prepend_path(const struct path *path, struct path *root,
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
/* Global root? */
- if (vfsmnt->mnt_parent == vfsmnt) {
+ if (!mnt_has_parent(mnt))
goto global_root;
- }
- dentry = vfsmnt->mnt_mountpoint;
- vfsmnt = vfsmnt->mnt_parent;
+ dentry = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
+ vfsmnt = &mnt->mnt;
continue;
}
parent = dentry->d_parent;
@@ -2483,10 +2479,10 @@ static int prepend_path(const struct path *path, struct path *root,
dentry = parent;
}
-out:
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
+out:
br_read_unlock(vfsmount_lock);
return error;
@@ -2500,15 +2496,17 @@ global_root:
WARN(1, "Root dentry has weird name <%.*s>\n",
(int) dentry->d_name.len, dentry->d_name.name);
}
- root->mnt = vfsmnt;
- root->dentry = dentry;
+ if (!slash)
+ error = prepend(buffer, buflen, "/", 1);
+ if (!error)
+ error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
goto out;
}
/**
* __d_path - return the path of a dentry
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buf: buffer to return value in
* @buflen: buffer length
*
@@ -2519,10 +2517,10 @@ global_root:
*
* "buflen" should be positive.
*
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
*/
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+ const struct path *root,
char *buf, int buflen)
{
char *res = buf + buflen;
@@ -2533,7 +2531,28 @@ char *__d_path(const struct path *path, struct path *root,
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
+ return ERR_PTR(error);
+ if (error > 0)
+ return NULL;
+ return res;
+}
+
+char *d_absolute_path(const struct path *path,
+ char *buf, int buflen)
+{
+ struct path root = {};
+ char *res = buf + buflen;
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
+
+ if (error > 1)
+ error = -EINVAL;
+ if (error < 0)
return ERR_PTR(error);
return res;
}
@@ -2541,8 +2560,9 @@ char *__d_path(const struct path *path, struct path *root,
/*
* same as __d_path but appends "(deleted)" for unlinked files.
*/
-static int path_with_deleted(const struct path *path, struct path *root,
- char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+ const struct path *root,
+ char **buf, int *buflen)
{
prepend(buf, buflen, "\0", 1);
if (d_unlinked(path->dentry)) {
@@ -2579,7 +2599,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
/*
@@ -2594,9 +2613,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (error)
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error < 0)
res = ERR_PTR(error);
write_sequnlock(&rename_lock);
path_put(&root);
@@ -2617,7 +2635,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2642,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (!error && !path_equal(&tmp, &root))
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error > 0)
error = prepend_unreachable(&res, &buflen);
write_sequnlock(&rename_lock);
path_put(&root);
@@ -2758,19 +2774,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
- struct path tmp = root;
char *cwd = page + PAGE_SIZE;
int buflen = PAGE_SIZE;
prepend(&cwd, &buflen, "\0", 1);
- error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+ error = prepend_path(&pwd, &root, &cwd, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
goto out;
/* Unreachable from current root */
- if (!path_equal(&tmp, &root)) {
+ if (error > 0) {
error = prepend_unreachable(&cwd, &buflen);
if (error)
goto out;
@@ -2836,31 +2851,6 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
return result;
}
-int path_is_under(struct path *path1, struct path *path2)
-{
- struct vfsmount *mnt = path1->mnt;
- struct dentry *dentry = path1->dentry;
- int res;
-
- br_read_lock(vfsmount_lock);
- if (mnt != path2->mnt) {
- for (;;) {
- if (mnt->mnt_parent == mnt) {
- br_read_unlock(vfsmount_lock);
- return 0;
- }
- if (mnt->mnt_parent == path2->mnt)
- break;
- mnt = mnt->mnt_parent;
- }
- dentry = mnt->mnt_mountpoint;
- }
- res = is_subdir(dentry, path2->dentry);
- br_read_unlock(vfsmount_lock);
- return res;
-}
-EXPORT_SYMBOL(path_is_under);
-
void d_genocide(struct dentry *root)
{
struct dentry *this_parent;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 90f76575c05..f65d4455c5e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -15,9 +15,11 @@
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
+#include <linux/io.h>
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -95,7 +97,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
struct dentry *parent, u8 *value)
{
/* if there are no write bits set, make read only */
@@ -147,7 +149,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
struct dentry *parent, u16 *value)
{
/* if there are no write bits set, make read only */
@@ -199,7 +201,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value)
{
/* if there are no write bits set, make read only */
@@ -252,7 +254,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value)
{
/* if there are no write bits set, make read only */
@@ -298,7 +300,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value)
{
/* if there are no write bits set, make read only */
@@ -322,7 +324,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value)
{
/* if there are no write bits set, make read only */
@@ -346,7 +348,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value)
{
/* if there are no write bits set, make read only */
@@ -370,7 +372,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
struct dentry *parent, u64 *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_x64);
@@ -401,7 +403,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set,
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_size_t);
@@ -473,7 +475,7 @@ static const struct file_operations fops_bool = {
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, u32 *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_bool);
@@ -518,10 +520,103 @@ static const struct file_operations fops_blob = {
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
*/
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
return debugfs_create_file(name, mode, parent, blob, &fops_blob);
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
+
+#ifdef CONFIG_HAS_IOMEM
+
+/*
+ * The regset32 stuff is used to print 32-bit registers using the
+ * seq_file utilities. We offer printing a register set in an already-opened
+ * sequential file or create a debugfs file that only prints a regset32.
+ */
+
+/**
+ * debugfs_print_regs32 - use seq_print to describe a set of registers
+ * @s: the seq_file structure being used to generate output
+ * @regs: an array if struct debugfs_reg32 structures
+ * @mregs: the length of the above array
+ * @base: the base address to be used in reading the registers
+ * @prefix: a string to be prefixed to every output line
+ *
+ * This function outputs a text block describing the current values of
+ * some 32-bit hardware registers. It is meant to be used within debugfs
+ * files based on seq_file that need to show registers, intermixed with other
+ * information. The prefix argument may be used to specify a leading string,
+ * because some peripherals have several blocks of identical registers,
+ * for example configuration of dma channels
+ */
+int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < nregs; i++, regs++) {
+ if (prefix)
+ ret += seq_printf(s, "%s", prefix);
+ ret += seq_printf(s, "%s = 0x%08x\n", regs->name,
+ readl(base + regs->offset));
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(debugfs_print_regs32);
+
+static int debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct debugfs_regset32 *regset = s->private;
+
+ debugfs_print_regs32(s, regset->regs, regset->nregs, regset->base, "");
+ return 0;
+}
+
+static int debugfs_open_regset32(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations fops_regset32 = {
+ .open = debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * debugfs_create_regset32 - create a debugfs file that returns register values
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @regset: a pointer to a struct debugfs_regset32, which contains a pointer
+ * to an array of register definitions, the array size and the base
+ * address where the register bank is to be found.
+ *
+ * This function creates a file in debugfs with the given name that reports
+ * the names and values of a set of 32-bit registers. If the @mode variable
+ * is so set it can be read from. Writing is not supported.
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, %NULL will be returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned. It is not wise to check for this value, but rather, check for
+ * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
+ * code.
+ */
+struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
+{
+ return debugfs_create_file(name, mode, parent, regset, &fops_regset32);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_regset32);
+
+#endif /* CONFIG_HAS_IOMEM */
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index f3a257d7a98..956d5ddddf6 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -30,7 +30,7 @@ static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
+static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev_t dev,
void *data, const struct file_operations *fops)
{
@@ -69,7 +69,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
/* SMP-safe */
static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev, void *data,
+ umode_t mode, dev_t dev, void *data,
const struct file_operations *fops)
{
struct inode *inode;
@@ -87,7 +87,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
return error;
}
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode,
void *data, const struct file_operations *fops)
{
int res;
@@ -101,14 +101,14 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
return res;
}
-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode,
void *data, const struct file_operations *fops)
{
mode = (mode & S_IALLUGO) | S_IFLNK;
return debugfs_mknod(dir, dentry, mode, 0, data, fops);
}
-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
void *data, const struct file_operations *fops)
{
int res;
@@ -146,7 +146,7 @@ static struct file_system_type debug_fs_type = {
.kill_sb = kill_litter_super,
};
-static int debugfs_create_by_name(const char *name, mode_t mode,
+static int debugfs_create_by_name(const char *name, umode_t mode,
struct dentry *parent,
struct dentry **dentry,
void *data,
@@ -160,7 +160,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* have around.
*/
if (!parent)
- parent = debugfs_mount->mnt_sb->s_root;
+ parent = debugfs_mount->mnt_root;
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
@@ -214,7 +214,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index d5d5297efe9..c4e2a58a2e8 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -246,9 +246,9 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
return err;
}
-static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int devpts_show_options(struct seq_file *seq, struct dentry *root)
{
- struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb);
+ struct pts_fs_info *fsi = DEVPTS_SB(root->d_sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
if (opts->setuid)
@@ -301,7 +301,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
inode = new_inode(s);
if (!inode)
- goto free_fsi;
+ goto fail;
inode->i_ino = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -316,8 +316,6 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
printk(KERN_ERR "devpts: get root dentry failed\n");
iput(inode);
-free_fsi:
- kfree(s->s_fs_info);
fail:
return -ENOMEM;
}
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 6cf72fcc0d0..e7e327d43fa 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/dlmconstants.h>
#include <net/ipv6.h>
#include <net/sock.h>
@@ -36,6 +37,7 @@
static struct config_group *space_list;
static struct config_group *comm_list;
static struct dlm_comm *local_comm;
+static uint32_t dlm_comm_count;
struct dlm_clusters;
struct dlm_cluster;
@@ -103,6 +105,8 @@ struct dlm_cluster {
unsigned int cl_timewarn_cs;
unsigned int cl_waitwarn_us;
unsigned int cl_new_rsb_count;
+ unsigned int cl_recover_callbacks;
+ char cl_cluster_name[DLM_LOCKSPACE_LEN];
};
enum {
@@ -118,6 +122,8 @@ enum {
CLUSTER_ATTR_TIMEWARN_CS,
CLUSTER_ATTR_WAITWARN_US,
CLUSTER_ATTR_NEW_RSB_COUNT,
+ CLUSTER_ATTR_RECOVER_CALLBACKS,
+ CLUSTER_ATTR_CLUSTER_NAME,
};
struct cluster_attribute {
@@ -126,6 +132,27 @@ struct cluster_attribute {
ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
};
+static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf)
+{
+ return sprintf(buf, "%s\n", cl->cl_cluster_name);
+}
+
+static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl,
+ const char *buf, size_t len)
+{
+ strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN);
+ strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN);
+ return len;
+}
+
+static struct cluster_attribute cluster_attr_cluster_name = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "cluster_name",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = cluster_cluster_name_read,
+ .store = cluster_cluster_name_write,
+};
+
static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
int *info_field, int check_zero,
const char *buf, size_t len)
@@ -171,6 +198,7 @@ CLUSTER_ATTR(protocol, 0);
CLUSTER_ATTR(timewarn_cs, 1);
CLUSTER_ATTR(waitwarn_us, 0);
CLUSTER_ATTR(new_rsb_count, 0);
+CLUSTER_ATTR(recover_callbacks, 0);
static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
@@ -185,6 +213,8 @@ static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
[CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
[CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr,
+ [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr,
+ [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr,
NULL,
};
@@ -293,6 +323,7 @@ struct dlm_comms {
struct dlm_comm {
struct config_item item;
+ int seq;
int nodeid;
int local;
int addr_count;
@@ -309,6 +340,7 @@ struct dlm_node {
int nodeid;
int weight;
int new;
+ int comm_seq; /* copy of cm->seq when nd->nodeid is set */
};
static struct configfs_group_operations clusters_ops = {
@@ -455,6 +487,9 @@ static struct config_group *make_cluster(struct config_group *g,
cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
+ cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
+ memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
+ DLM_LOCKSPACE_LEN);
space_list = &sps->ss_group;
comm_list = &cms->cs_group;
@@ -558,6 +593,11 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&cm->item, name, &comm_type);
+
+ cm->seq = dlm_comm_count++;
+ if (!cm->seq)
+ cm->seq = dlm_comm_count++;
+
cm->nodeid = -1;
cm->local = 0;
cm->addr_count = 0;
@@ -801,7 +841,10 @@ static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
size_t len)
{
+ uint32_t seq = 0;
nd->nodeid = simple_strtol(buf, NULL, 0);
+ dlm_comm_seq(nd->nodeid, &seq);
+ nd->comm_seq = seq;
return len;
}
@@ -908,13 +951,13 @@ static void put_comm(struct dlm_comm *cm)
}
/* caller must free mem */
-int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
- int **new_out, int *new_count_out)
+int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+ int *count_out)
{
struct dlm_space *sp;
struct dlm_node *nd;
- int i = 0, rv = 0, ids_count = 0, new_count = 0;
- int *ids, *new;
+ struct dlm_config_node *nodes, *node;
+ int rv, count;
sp = get_space(lsname);
if (!sp)
@@ -927,73 +970,42 @@ int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
goto out;
}
- ids_count = sp->members_count;
+ count = sp->members_count;
- ids = kcalloc(ids_count, sizeof(int), GFP_NOFS);
- if (!ids) {
+ nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
+ if (!nodes) {
rv = -ENOMEM;
goto out;
}
+ node = nodes;
list_for_each_entry(nd, &sp->members, list) {
- ids[i++] = nd->nodeid;
- if (nd->new)
- new_count++;
- }
-
- if (ids_count != i)
- printk(KERN_ERR "dlm: bad nodeid count %d %d\n", ids_count, i);
-
- if (!new_count)
- goto out_ids;
+ node->nodeid = nd->nodeid;
+ node->weight = nd->weight;
+ node->new = nd->new;
+ node->comm_seq = nd->comm_seq;
+ node++;
- new = kcalloc(new_count, sizeof(int), GFP_NOFS);
- if (!new) {
- kfree(ids);
- rv = -ENOMEM;
- goto out;
+ nd->new = 0;
}
- i = 0;
- list_for_each_entry(nd, &sp->members, list) {
- if (nd->new) {
- new[i++] = nd->nodeid;
- nd->new = 0;
- }
- }
- *new_count_out = new_count;
- *new_out = new;
-
- out_ids:
- *ids_count_out = ids_count;
- *ids_out = ids;
+ *count_out = count;
+ *nodes_out = nodes;
+ rv = 0;
out:
mutex_unlock(&sp->members_lock);
put_space(sp);
return rv;
}
-int dlm_node_weight(char *lsname, int nodeid)
+int dlm_comm_seq(int nodeid, uint32_t *seq)
{
- struct dlm_space *sp;
- struct dlm_node *nd;
- int w = -EEXIST;
-
- sp = get_space(lsname);
- if (!sp)
- goto out;
-
- mutex_lock(&sp->members_lock);
- list_for_each_entry(nd, &sp->members, list) {
- if (nd->nodeid != nodeid)
- continue;
- w = nd->weight;
- break;
- }
- mutex_unlock(&sp->members_lock);
- put_space(sp);
- out:
- return w;
+ struct dlm_comm *cm = get_comm(nodeid, NULL);
+ if (!cm)
+ return -EEXIST;
+ *seq = cm->seq;
+ put_comm(cm);
+ return 0;
}
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
@@ -1047,6 +1059,8 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
#define DEFAULT_WAITWARN_US 0
#define DEFAULT_NEW_RSB_COUNT 128
+#define DEFAULT_RECOVER_CALLBACKS 0
+#define DEFAULT_CLUSTER_NAME ""
struct dlm_config_info dlm_config = {
.ci_tcp_port = DEFAULT_TCP_PORT,
@@ -1060,6 +1074,8 @@ struct dlm_config_info dlm_config = {
.ci_protocol = DEFAULT_PROTOCOL,
.ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
.ci_waitwarn_us = DEFAULT_WAITWARN_US,
- .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT
+ .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
+ .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
+ .ci_cluster_name = DEFAULT_CLUSTER_NAME
};
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 3099d0dd26c..9f5e3663bb0 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -14,6 +14,13 @@
#ifndef __CONFIG_DOT_H__
#define __CONFIG_DOT_H__
+struct dlm_config_node {
+ int nodeid;
+ int weight;
+ int new;
+ uint32_t comm_seq;
+};
+
#define DLM_MAX_ADDR_COUNT 3
struct dlm_config_info {
@@ -29,15 +36,17 @@ struct dlm_config_info {
int ci_timewarn_cs;
int ci_waitwarn_us;
int ci_new_rsb_count;
+ int ci_recover_callbacks;
+ char ci_cluster_name[DLM_LOCKSPACE_LEN];
};
extern struct dlm_config_info dlm_config;
int dlm_config_init(void);
void dlm_config_exit(void);
-int dlm_node_weight(char *lsname, int nodeid);
-int dlm_nodeid_list(char *lsname, int **ids_out, int *ids_count_out,
- int **new_out, int *new_count_out);
+int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+ int *count_out);
+int dlm_comm_seq(int nodeid, uint32_t *seq);
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
int dlm_our_nodeid(void);
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 59779237e2b..3dca2b39e83 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -393,6 +393,7 @@ static const struct seq_operations format3_seq_ops;
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
+ struct rb_node *node;
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri;
struct dlm_rsb *r;
@@ -418,9 +419,10 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
ri->format = 3;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
- list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
- res_hashchain) {
+ if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+ for (node = rb_first(&ls->ls_rsbtbl[bucket].keep); node;
+ node = rb_next(node)) {
+ r = rb_entry(node, struct dlm_rsb, res_hashnode);
if (!entry--) {
dlm_hold_rsb(r);
ri->rsb = r;
@@ -449,9 +451,9 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
}
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
- r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
- struct dlm_rsb, res_hashchain);
+ if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+ node = rb_first(&ls->ls_rsbtbl[bucket].keep);
+ r = rb_entry(node, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
ri->bucket = bucket;
@@ -467,7 +469,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
{
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri = iter_ptr;
- struct list_head *next;
+ struct rb_node *next;
struct dlm_rsb *r, *rp;
loff_t n = *pos;
unsigned bucket;
@@ -480,10 +482,10 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
spin_lock(&ls->ls_rsbtbl[bucket].lock);
rp = ri->rsb;
- next = rp->res_hashchain.next;
+ next = rb_next(&rp->res_hashnode);
- if (next != &ls->ls_rsbtbl[bucket].list) {
- r = list_entry(next, struct dlm_rsb, res_hashchain);
+ if (next) {
+ r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
@@ -511,9 +513,9 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
}
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
- r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
- struct dlm_rsb, res_hashchain);
+ if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
+ next = rb_first(&ls->ls_rsbtbl[bucket].keep);
+ r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
ri->bucket = bucket;
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 7b84c1dbc82..83641574b01 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -290,7 +290,6 @@ int dlm_recover_directory(struct dlm_ls *ls)
out_status:
error = 0;
- dlm_set_recover_status(ls, DLM_RS_DIR);
log_debug(ls, "dlm_recover_directory %d entries", count);
out_free:
kfree(last_name);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index fe2860c0244..3a564d197e9 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -103,8 +103,8 @@ struct dlm_dirtable {
};
struct dlm_rsbtable {
- struct list_head list;
- struct list_head toss;
+ struct rb_root keep;
+ struct rb_root toss;
spinlock_t lock;
};
@@ -117,6 +117,10 @@ struct dlm_member {
struct list_head list;
int nodeid;
int weight;
+ int slot;
+ int slot_prev;
+ int comm_seq;
+ uint32_t generation;
};
/*
@@ -125,10 +129,8 @@ struct dlm_member {
struct dlm_recover {
struct list_head list;
- int *nodeids; /* nodeids of all members */
- int node_count;
- int *new; /* nodeids of new members */
- int new_count;
+ struct dlm_config_node *nodes;
+ int nodes_count;
uint64_t seq;
};
@@ -285,7 +287,10 @@ struct dlm_rsb {
unsigned long res_toss_time;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
- struct list_head res_hashchain; /* rsbtbl */
+ union {
+ struct list_head res_hashchain;
+ struct rb_node res_hashnode; /* rsbtbl */
+ };
struct list_head res_grantqueue;
struct list_head res_convertqueue;
struct list_head res_waitqueue;
@@ -334,7 +339,9 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
/* dlm_header is first element of all structs sent between nodes */
#define DLM_HEADER_MAJOR 0x00030000
-#define DLM_HEADER_MINOR 0x00000000
+#define DLM_HEADER_MINOR 0x00000001
+
+#define DLM_HEADER_SLOTS 0x00000001
#define DLM_MSG 1
#define DLM_RCOM 2
@@ -422,10 +429,34 @@ union dlm_packet {
struct dlm_rcom rcom;
};
+#define DLM_RSF_NEED_SLOTS 0x00000001
+
+/* RCOM_STATUS data */
+struct rcom_status {
+ __le32 rs_flags;
+ __le32 rs_unused1;
+ __le64 rs_unused2;
+};
+
+/* RCOM_STATUS_REPLY data */
struct rcom_config {
__le32 rf_lvblen;
__le32 rf_lsflags;
- __le64 rf_unused;
+
+ /* DLM_HEADER_SLOTS adds: */
+ __le32 rf_flags;
+ __le16 rf_our_slot;
+ __le16 rf_num_slots;
+ __le32 rf_generation;
+ __le32 rf_unused1;
+ __le64 rf_unused2;
+};
+
+struct rcom_slot {
+ __le32 ro_nodeid;
+ __le16 ro_slot;
+ __le16 ro_unused1;
+ __le64 ro_unused2;
};
struct rcom_lock {
@@ -452,6 +483,7 @@ struct dlm_ls {
struct list_head ls_list; /* list of lockspaces */
dlm_lockspace_t *ls_local_handle;
uint32_t ls_global_id; /* global unique lockspace ID */
+ uint32_t ls_generation;
uint32_t ls_exflags;
int ls_lvblen;
int ls_count; /* refcount of processes in
@@ -490,6 +522,11 @@ struct dlm_ls {
int ls_total_weight;
int *ls_node_array;
+ int ls_slot;
+ int ls_num_slots;
+ int ls_slots_size;
+ struct dlm_slot *ls_slots;
+
struct dlm_rsb ls_stub_rsb; /* for returning errors */
struct dlm_lkb ls_stub_lkb; /* for returning errors */
struct dlm_message ls_stub_ms; /* for faking a reply */
@@ -537,6 +574,9 @@ struct dlm_ls {
struct list_head ls_root_list; /* root resources */
struct rw_semaphore ls_root_sem; /* protect root_list */
+ const struct dlm_lockspace_ops *ls_ops;
+ void *ls_ops_arg;
+
int ls_namelen;
char ls_name[1];
};
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 83b5e32514e..d47183043c5 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -56,6 +56,7 @@
L: receive_xxxx_reply() <- R: send_xxxx_reply()
*/
#include <linux/types.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include "dlm_internal.h"
#include <linux/dlm_device.h>
@@ -380,6 +381,8 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
list_del(&r->res_hashchain);
+ /* Convert the empty list_head to a NULL rb_node for tree usage: */
+ memset(&r->res_hashnode, 0, sizeof(struct rb_node));
ls->ls_new_rsb_count--;
spin_unlock(&ls->ls_new_rsb_spin);
@@ -388,7 +391,6 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
memcpy(r->res_name, name, len);
mutex_init(&r->res_mutex);
- INIT_LIST_HEAD(&r->res_hashchain);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
INIT_LIST_HEAD(&r->res_convertqueue);
@@ -400,14 +402,31 @@ static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
return 0;
}
-static int search_rsb_list(struct list_head *head, char *name, int len,
+static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
+{
+ char maxname[DLM_RESNAME_MAXLEN];
+
+ memset(maxname, 0, DLM_RESNAME_MAXLEN);
+ memcpy(maxname, name, nlen);
+ return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
+}
+
+static int search_rsb_tree(struct rb_root *tree, char *name, int len,
unsigned int flags, struct dlm_rsb **r_ret)
{
+ struct rb_node *node = tree->rb_node;
struct dlm_rsb *r;
int error = 0;
-
- list_for_each_entry(r, head, res_hashchain) {
- if (len == r->res_length && !memcmp(name, r->res_name, len))
+ int rc;
+
+ while (node) {
+ r = rb_entry(node, struct dlm_rsb, res_hashnode);
+ rc = rsb_cmp(r, name, len);
+ if (rc < 0)
+ node = node->rb_left;
+ else if (rc > 0)
+ node = node->rb_right;
+ else
goto found;
}
*r_ret = NULL;
@@ -420,22 +439,54 @@ static int search_rsb_list(struct list_head *head, char *name, int len,
return error;
}
+static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
+{
+ struct rb_node **newn = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int rc;
+
+ while (*newn) {
+ struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
+ res_hashnode);
+
+ parent = *newn;
+ rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
+ if (rc < 0)
+ newn = &parent->rb_left;
+ else if (rc > 0)
+ newn = &parent->rb_right;
+ else {
+ log_print("rsb_insert match");
+ dlm_dump_rsb(rsb);
+ dlm_dump_rsb(cur);
+ return -EEXIST;
+ }
+ }
+
+ rb_link_node(&rsb->res_hashnode, parent, newn);
+ rb_insert_color(&rsb->res_hashnode, tree);
+ return 0;
+}
+
static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
int error;
- error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
+ error = search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, flags, &r);
if (!error) {
kref_get(&r->res_ref);
goto out;
}
- error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
+ error = search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
if (error)
goto out;
- list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ if (error)
+ return error;
if (dlm_no_directory(ls))
goto out;
@@ -527,8 +578,7 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
nodeid = 0;
r->res_nodeid = nodeid;
}
- list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
- error = 0;
+ error = rsb_insert(r, &ls->ls_rsbtbl[bucket].keep);
out_unlock:
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
out:
@@ -556,7 +606,8 @@ static void toss_rsb(struct kref *kref)
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
kref_init(&r->res_ref);
- list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
+ rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
r->res_toss_time = jiffies;
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
@@ -1082,19 +1133,19 @@ static void dir_remove(struct dlm_rsb *r)
r->res_name, r->res_length);
}
-/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
- found since they are in order of newest to oldest? */
+/* FIXME: make this more efficient */
static int shrink_bucket(struct dlm_ls *ls, int b)
{
+ struct rb_node *n;
struct dlm_rsb *r;
int count = 0, found;
for (;;) {
found = 0;
spin_lock(&ls->ls_rsbtbl[b].lock);
- list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
- res_hashchain) {
+ for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = rb_next(n)) {
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
if (!time_after_eq(jiffies, r->res_toss_time +
dlm_config.ci_toss_secs * HZ))
continue;
@@ -1108,7 +1159,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
}
if (kref_put(&r->res_ref, kill_rsb)) {
- list_del(&r->res_hashchain);
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
spin_unlock(&ls->ls_rsbtbl[b].lock);
if (is_master(r))
@@ -4441,10 +4492,12 @@ int dlm_purge_locks(struct dlm_ls *ls)
static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
{
+ struct rb_node *n;
struct dlm_rsb *r, *r_ret = NULL;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
+ for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
if (!rsb_flag(r, RSB_LOCKS_PURGED))
continue;
hold_rsb(r);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index a1d8f1af144..a1ea25face8 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -386,12 +386,15 @@ static void threads_stop(void)
dlm_lowcomms_stop();
}
-static int new_lockspace(const char *name, int namelen, void **lockspace,
- uint32_t flags, int lvblen)
+static int new_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops, void *ops_arg,
+ int *ops_result, dlm_lockspace_t **lockspace)
{
struct dlm_ls *ls;
int i, size, error;
int do_unreg = 0;
+ int namelen = strlen(name);
if (namelen > DLM_LOCKSPACE_LEN)
return -EINVAL;
@@ -403,8 +406,24 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
return -EINVAL;
if (!dlm_user_daemon_available()) {
- module_put(THIS_MODULE);
- return -EUNATCH;
+ log_print("dlm user daemon not available");
+ error = -EUNATCH;
+ goto out;
+ }
+
+ if (ops && ops_result) {
+ if (!dlm_config.ci_recover_callbacks)
+ *ops_result = -EOPNOTSUPP;
+ else
+ *ops_result = 0;
+ }
+
+ if (dlm_config.ci_recover_callbacks && cluster &&
+ strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) {
+ log_print("dlm cluster name %s mismatch %s",
+ dlm_config.ci_cluster_name, cluster);
+ error = -EBADR;
+ goto out;
}
error = 0;
@@ -442,6 +461,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
ls->ls_flags = 0;
ls->ls_scan_time = jiffies;
+ if (ops && dlm_config.ci_recover_callbacks) {
+ ls->ls_ops = ops;
+ ls->ls_ops_arg = ops_arg;
+ }
+
if (flags & DLM_LSFL_TIMEWARN)
set_bit(LSFL_TIMEWARN, &ls->ls_flags);
@@ -457,8 +481,8 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
if (!ls->ls_rsbtbl)
goto out_lsfree;
for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
- INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
+ ls->ls_rsbtbl[i].keep.rb_node = NULL;
+ ls->ls_rsbtbl[i].toss.rb_node = NULL;
spin_lock_init(&ls->ls_rsbtbl[i].lock);
}
@@ -525,6 +549,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
if (!ls->ls_recover_buf)
goto out_dirfree;
+ ls->ls_slot = 0;
+ ls->ls_num_slots = 0;
+ ls->ls_slots_size = 0;
+ ls->ls_slots = NULL;
+
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
ls->ls_recover_list_count = 0;
@@ -614,8 +643,10 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
return error;
}
-int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
- uint32_t flags, int lvblen)
+int dlm_new_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops, void *ops_arg,
+ int *ops_result, dlm_lockspace_t **lockspace)
{
int error = 0;
@@ -625,7 +656,8 @@ int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
if (error)
goto out;
- error = new_lockspace(name, namelen, lockspace, flags, lvblen);
+ error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg,
+ ops_result, lockspace);
if (!error)
ls_count++;
if (error > 0)
@@ -685,7 +717,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
static int release_lockspace(struct dlm_ls *ls, int force)
{
struct dlm_rsb *rsb;
- struct list_head *head;
+ struct rb_node *n;
int i, busy, rv;
busy = lockspace_busy(ls, force);
@@ -746,20 +778,15 @@ static int release_lockspace(struct dlm_ls *ls, int force)
*/
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- head = &ls->ls_rsbtbl[i].list;
- while (!list_empty(head)) {
- rsb = list_entry(head->next, struct dlm_rsb,
- res_hashchain);
-
- list_del(&rsb->res_hashchain);
+ while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
+ rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+ rb_erase(n, &ls->ls_rsbtbl[i].keep);
dlm_free_rsb(rsb);
}
- head = &ls->ls_rsbtbl[i].toss;
- while (!list_empty(head)) {
- rsb = list_entry(head->next, struct dlm_rsb,
- res_hashchain);
- list_del(&rsb->res_hashchain);
+ while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
+ rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+ rb_erase(n, &ls->ls_rsbtbl[i].toss);
dlm_free_rsb(rsb);
}
}
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 990626e7da8..0b3109ee425 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -281,7 +281,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
} else {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
- ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
+ ret6->sin6_addr = in6->sin6_addr;
}
return 0;
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index b12532e553f..862640a36d5 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -19,6 +19,280 @@
#include "config.h"
#include "lowcomms.h"
+int dlm_slots_version(struct dlm_header *h)
+{
+ if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
+ return 0;
+ return 1;
+}
+
+void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
+ struct dlm_member *memb)
+{
+ struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
+
+ if (!dlm_slots_version(&rc->rc_header))
+ return;
+
+ memb->slot = le16_to_cpu(rf->rf_our_slot);
+ memb->generation = le32_to_cpu(rf->rf_generation);
+}
+
+void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc)
+{
+ struct dlm_slot *slot;
+ struct rcom_slot *ro;
+ int i;
+
+ ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
+
+ /* ls_slots array is sparse, but not rcom_slots */
+
+ for (i = 0; i < ls->ls_slots_size; i++) {
+ slot = &ls->ls_slots[i];
+ if (!slot->nodeid)
+ continue;
+ ro->ro_nodeid = cpu_to_le32(slot->nodeid);
+ ro->ro_slot = cpu_to_le16(slot->slot);
+ ro++;
+ }
+}
+
+#define SLOT_DEBUG_LINE 128
+
+static void log_debug_slots(struct dlm_ls *ls, uint32_t gen, int num_slots,
+ struct rcom_slot *ro0, struct dlm_slot *array,
+ int array_size)
+{
+ char line[SLOT_DEBUG_LINE];
+ int len = SLOT_DEBUG_LINE - 1;
+ int pos = 0;
+ int ret, i;
+
+ if (!dlm_config.ci_log_debug)
+ return;
+
+ memset(line, 0, sizeof(line));
+
+ if (array) {
+ for (i = 0; i < array_size; i++) {
+ if (!array[i].nodeid)
+ continue;
+
+ ret = snprintf(line + pos, len - pos, " %d:%d",
+ array[i].slot, array[i].nodeid);
+ if (ret >= len - pos)
+ break;
+ pos += ret;
+ }
+ } else if (ro0) {
+ for (i = 0; i < num_slots; i++) {
+ ret = snprintf(line + pos, len - pos, " %d:%d",
+ ro0[i].ro_slot, ro0[i].ro_nodeid);
+ if (ret >= len - pos)
+ break;
+ pos += ret;
+ }
+ }
+
+ log_debug(ls, "generation %u slots %d%s", gen, num_slots, line);
+}
+
+int dlm_slots_copy_in(struct dlm_ls *ls)
+{
+ struct dlm_member *memb;
+ struct dlm_rcom *rc = ls->ls_recover_buf;
+ struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
+ struct rcom_slot *ro0, *ro;
+ int our_nodeid = dlm_our_nodeid();
+ int i, num_slots;
+ uint32_t gen;
+
+ if (!dlm_slots_version(&rc->rc_header))
+ return -1;
+
+ gen = le32_to_cpu(rf->rf_generation);
+ if (gen <= ls->ls_generation) {
+ log_error(ls, "dlm_slots_copy_in gen %u old %u",
+ gen, ls->ls_generation);
+ }
+ ls->ls_generation = gen;
+
+ num_slots = le16_to_cpu(rf->rf_num_slots);
+ if (!num_slots)
+ return -1;
+
+ ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
+
+ for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+ ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
+ ro->ro_slot = le16_to_cpu(ro->ro_slot);
+ }
+
+ log_debug_slots(ls, gen, num_slots, ro0, NULL, 0);
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+ if (ro->ro_nodeid != memb->nodeid)
+ continue;
+ memb->slot = ro->ro_slot;
+ memb->slot_prev = memb->slot;
+ break;
+ }
+
+ if (memb->nodeid == our_nodeid) {
+ if (ls->ls_slot && ls->ls_slot != memb->slot) {
+ log_error(ls, "dlm_slots_copy_in our slot "
+ "changed %d %d", ls->ls_slot,
+ memb->slot);
+ return -1;
+ }
+
+ if (!ls->ls_slot)
+ ls->ls_slot = memb->slot;
+ }
+
+ if (!memb->slot) {
+ log_error(ls, "dlm_slots_copy_in nodeid %d no slot",
+ memb->nodeid);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* for any nodes that do not support slots, we will not have set memb->slot
+ in wait_status_all(), so memb->slot will remain -1, and we will not
+ assign slots or set ls_num_slots here */
+
+int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
+ struct dlm_slot **slots_out, uint32_t *gen_out)
+{
+ struct dlm_member *memb;
+ struct dlm_slot *array;
+ int our_nodeid = dlm_our_nodeid();
+ int array_size, max_slots, i;
+ int need = 0;
+ int max = 0;
+ int num = 0;
+ uint32_t gen = 0;
+
+ /* our own memb struct will have slot -1 gen 0 */
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (memb->nodeid == our_nodeid) {
+ memb->slot = ls->ls_slot;
+ memb->generation = ls->ls_generation;
+ break;
+ }
+ }
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (memb->generation > gen)
+ gen = memb->generation;
+
+ /* node doesn't support slots */
+
+ if (memb->slot == -1)
+ return -1;
+
+ /* node needs a slot assigned */
+
+ if (!memb->slot)
+ need++;
+
+ /* node has a slot assigned */
+
+ num++;
+
+ if (!max || max < memb->slot)
+ max = memb->slot;
+
+ /* sanity check, once slot is assigned it shouldn't change */
+
+ if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) {
+ log_error(ls, "nodeid %d slot changed %d %d",
+ memb->nodeid, memb->slot_prev, memb->slot);
+ return -1;
+ }
+ memb->slot_prev = memb->slot;
+ }
+
+ array_size = max + need;
+
+ array = kzalloc(array_size * sizeof(struct dlm_slot), GFP_NOFS);
+ if (!array)
+ return -ENOMEM;
+
+ num = 0;
+
+ /* fill in slots (offsets) that are used */
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (!memb->slot)
+ continue;
+
+ if (memb->slot > array_size) {
+ log_error(ls, "invalid slot number %d", memb->slot);
+ kfree(array);
+ return -1;
+ }
+
+ array[memb->slot - 1].nodeid = memb->nodeid;
+ array[memb->slot - 1].slot = memb->slot;
+ num++;
+ }
+
+ /* assign new slots from unused offsets */
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (memb->slot)
+ continue;
+
+ for (i = 0; i < array_size; i++) {
+ if (array[i].nodeid)
+ continue;
+
+ memb->slot = i + 1;
+ memb->slot_prev = memb->slot;
+ array[i].nodeid = memb->nodeid;
+ array[i].slot = memb->slot;
+ num++;
+
+ if (!ls->ls_slot && memb->nodeid == our_nodeid)
+ ls->ls_slot = memb->slot;
+ break;
+ }
+
+ if (!memb->slot) {
+ log_error(ls, "no free slot found");
+ kfree(array);
+ return -1;
+ }
+ }
+
+ gen++;
+
+ log_debug_slots(ls, gen, num, NULL, array, array_size);
+
+ max_slots = (dlm_config.ci_buffer_size - sizeof(struct dlm_rcom) -
+ sizeof(struct rcom_config)) / sizeof(struct rcom_slot);
+
+ if (num > max_slots) {
+ log_error(ls, "num_slots %d exceeds max_slots %d",
+ num, max_slots);
+ kfree(array);
+ return -1;
+ }
+
+ *gen_out = gen;
+ *slots_out = array;
+ *slots_size = array_size;
+ *num_slots = num;
+ return 0;
+}
+
static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
{
struct dlm_member *memb = NULL;
@@ -43,59 +317,51 @@ static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
}
}
-static int dlm_add_member(struct dlm_ls *ls, int nodeid)
+static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
{
struct dlm_member *memb;
- int w, error;
+ int error;
memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
if (!memb)
return -ENOMEM;
- w = dlm_node_weight(ls->ls_name, nodeid);
- if (w < 0) {
- kfree(memb);
- return w;
- }
-
- error = dlm_lowcomms_connect_node(nodeid);
+ error = dlm_lowcomms_connect_node(node->nodeid);
if (error < 0) {
kfree(memb);
return error;
}
- memb->nodeid = nodeid;
- memb->weight = w;
+ memb->nodeid = node->nodeid;
+ memb->weight = node->weight;
+ memb->comm_seq = node->comm_seq;
add_ordered_member(ls, memb);
ls->ls_num_nodes++;
return 0;
}
-static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
-{
- list_move(&memb->list, &ls->ls_nodes_gone);
- ls->ls_num_nodes--;
-}
-
-int dlm_is_member(struct dlm_ls *ls, int nodeid)
+static struct dlm_member *find_memb(struct list_head *head, int nodeid)
{
struct dlm_member *memb;
- list_for_each_entry(memb, &ls->ls_nodes, list) {
+ list_for_each_entry(memb, head, list) {
if (memb->nodeid == nodeid)
- return 1;
+ return memb;
}
+ return NULL;
+}
+
+int dlm_is_member(struct dlm_ls *ls, int nodeid)
+{
+ if (find_memb(&ls->ls_nodes, nodeid))
+ return 1;
return 0;
}
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
{
- struct dlm_member *memb;
-
- list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
- if (memb->nodeid == nodeid)
- return 1;
- }
+ if (find_memb(&ls->ls_nodes_gone, nodeid))
+ return 1;
return 0;
}
@@ -176,7 +442,7 @@ static int ping_members(struct dlm_ls *ls)
error = dlm_recovery_stopped(ls);
if (error)
break;
- error = dlm_rcom_status(ls, memb->nodeid);
+ error = dlm_rcom_status(ls, memb->nodeid, 0);
if (error)
break;
}
@@ -186,10 +452,88 @@ static int ping_members(struct dlm_ls *ls)
return error;
}
+static void dlm_lsop_recover_prep(struct dlm_ls *ls)
+{
+ if (!ls->ls_ops || !ls->ls_ops->recover_prep)
+ return;
+ ls->ls_ops->recover_prep(ls->ls_ops_arg);
+}
+
+static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
+{
+ struct dlm_slot slot;
+ uint32_t seq;
+ int error;
+
+ if (!ls->ls_ops || !ls->ls_ops->recover_slot)
+ return;
+
+ /* if there is no comms connection with this node
+ or the present comms connection is newer
+ than the one when this member was added, then
+ we consider the node to have failed (versus
+ being removed due to dlm_release_lockspace) */
+
+ error = dlm_comm_seq(memb->nodeid, &seq);
+
+ if (!error && seq == memb->comm_seq)
+ return;
+
+ slot.nodeid = memb->nodeid;
+ slot.slot = memb->slot;
+
+ ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot);
+}
+
+void dlm_lsop_recover_done(struct dlm_ls *ls)
+{
+ struct dlm_member *memb;
+ struct dlm_slot *slots;
+ int i, num;
+
+ if (!ls->ls_ops || !ls->ls_ops->recover_done)
+ return;
+
+ num = ls->ls_num_nodes;
+
+ slots = kzalloc(num * sizeof(struct dlm_slot), GFP_KERNEL);
+ if (!slots)
+ return;
+
+ i = 0;
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (i == num) {
+ log_error(ls, "dlm_lsop_recover_done bad num %d", num);
+ goto out;
+ }
+ slots[i].nodeid = memb->nodeid;
+ slots[i].slot = memb->slot;
+ i++;
+ }
+
+ ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num,
+ ls->ls_slot, ls->ls_generation);
+ out:
+ kfree(slots);
+}
+
+static struct dlm_config_node *find_config_node(struct dlm_recover *rv,
+ int nodeid)
+{
+ int i;
+
+ for (i = 0; i < rv->nodes_count; i++) {
+ if (rv->nodes[i].nodeid == nodeid)
+ return &rv->nodes[i];
+ }
+ return NULL;
+}
+
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
{
struct dlm_member *memb, *safe;
- int i, error, found, pos = 0, neg = 0, low = -1;
+ struct dlm_config_node *node;
+ int i, error, neg = 0, low = -1;
/* previously removed members that we've not finished removing need to
count as a negative change so the "neg" recovery steps will happen */
@@ -202,46 +546,32 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
/* move departed members from ls_nodes to ls_nodes_gone */
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
- found = 0;
- for (i = 0; i < rv->node_count; i++) {
- if (memb->nodeid == rv->nodeids[i]) {
- found = 1;
- break;
- }
- }
+ node = find_config_node(rv, memb->nodeid);
+ if (node && !node->new)
+ continue;
- if (!found) {
- neg++;
- dlm_remove_member(ls, memb);
+ if (!node) {
log_debug(ls, "remove member %d", memb->nodeid);
+ } else {
+ /* removed and re-added */
+ log_debug(ls, "remove member %d comm_seq %u %u",
+ memb->nodeid, memb->comm_seq, node->comm_seq);
}
- }
-
- /* Add an entry to ls_nodes_gone for members that were removed and
- then added again, so that previous state for these nodes will be
- cleared during recovery. */
-
- for (i = 0; i < rv->new_count; i++) {
- if (!dlm_is_member(ls, rv->new[i]))
- continue;
- log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
- memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
- if (!memb)
- return -ENOMEM;
- memb->nodeid = rv->new[i];
- list_add_tail(&memb->list, &ls->ls_nodes_gone);
neg++;
+ list_move(&memb->list, &ls->ls_nodes_gone);
+ ls->ls_num_nodes--;
+ dlm_lsop_recover_slot(ls, memb);
}
/* add new members to ls_nodes */
- for (i = 0; i < rv->node_count; i++) {
- if (dlm_is_member(ls, rv->nodeids[i]))
+ for (i = 0; i < rv->nodes_count; i++) {
+ node = &rv->nodes[i];
+ if (dlm_is_member(ls, node->nodeid))
continue;
- dlm_add_member(ls, rv->nodeids[i]);
- pos++;
- log_debug(ls, "add member %d", rv->nodeids[i]);
+ dlm_add_member(ls, node);
+ log_debug(ls, "add member %d", node->nodeid);
}
list_for_each_entry(memb, &ls->ls_nodes, list) {
@@ -251,7 +581,6 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
ls->ls_low_nodeid = low;
make_member_array(ls);
- dlm_set_recover_status(ls, DLM_RS_NODES);
*neg_out = neg;
error = ping_members(ls);
@@ -261,12 +590,8 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
ls->ls_members_result = error;
complete(&ls->ls_members_done);
}
- if (error)
- goto out;
- error = dlm_recover_members_wait(ls);
- out:
- log_debug(ls, "total members %d error %d", ls->ls_num_nodes, error);
+ log_debug(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
return error;
}
@@ -327,26 +652,35 @@ int dlm_ls_stop(struct dlm_ls *ls)
*/
dlm_recoverd_suspend(ls);
+
+ spin_lock(&ls->ls_recover_lock);
+ kfree(ls->ls_slots);
+ ls->ls_slots = NULL;
+ ls->ls_num_slots = 0;
+ ls->ls_slots_size = 0;
ls->ls_recover_status = 0;
+ spin_unlock(&ls->ls_recover_lock);
+
dlm_recoverd_resume(ls);
if (!ls->ls_recover_begin)
ls->ls_recover_begin = jiffies;
+
+ dlm_lsop_recover_prep(ls);
return 0;
}
int dlm_ls_start(struct dlm_ls *ls)
{
struct dlm_recover *rv = NULL, *rv_old;
- int *ids = NULL, *new = NULL;
- int error, ids_count = 0, new_count = 0;
+ struct dlm_config_node *nodes;
+ int error, count;
rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS);
if (!rv)
return -ENOMEM;
- error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
- &new, &new_count);
+ error = dlm_config_nodes(ls->ls_name, &nodes, &count);
if (error < 0)
goto fail;
@@ -361,10 +695,8 @@ int dlm_ls_start(struct dlm_ls *ls)
goto fail;
}
- rv->nodeids = ids;
- rv->node_count = ids_count;
- rv->new = new;
- rv->new_count = new_count;
+ rv->nodes = nodes;
+ rv->nodes_count = count;
rv->seq = ++ls->ls_recover_seq;
rv_old = ls->ls_recover_args;
ls->ls_recover_args = rv;
@@ -372,9 +704,8 @@ int dlm_ls_start(struct dlm_ls *ls)
if (rv_old) {
log_error(ls, "unused recovery %llx %d",
- (unsigned long long)rv_old->seq, rv_old->node_count);
- kfree(rv_old->nodeids);
- kfree(rv_old->new);
+ (unsigned long long)rv_old->seq, rv_old->nodes_count);
+ kfree(rv_old->nodes);
kfree(rv_old);
}
@@ -383,8 +714,7 @@ int dlm_ls_start(struct dlm_ls *ls)
fail:
kfree(rv);
- kfree(ids);
- kfree(new);
+ kfree(nodes);
return error;
}
diff --git a/fs/dlm/member.h b/fs/dlm/member.h
index 7a26fca1e0b..3deb70661c6 100644
--- a/fs/dlm/member.h
+++ b/fs/dlm/member.h
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -20,6 +20,14 @@ void dlm_clear_members_gone(struct dlm_ls *ls);
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
int dlm_is_removed(struct dlm_ls *ls, int nodeid);
int dlm_is_member(struct dlm_ls *ls, int nodeid);
+int dlm_slots_version(struct dlm_header *h);
+void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
+ struct dlm_member *memb);
+void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc);
+int dlm_slots_copy_in(struct dlm_ls *ls);
+int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
+ struct dlm_slot **slots_out, uint32_t *gen_out);
+void dlm_lsop_recover_done(struct dlm_ls *ls);
#endif /* __MEMBER_DOT_H__ */
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index f10a50f24e8..ac5c616c969 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -23,6 +23,7 @@
#include "memory.h"
#include "lock.h"
#include "util.h"
+#include "member.h"
static int rcom_response(struct dlm_ls *ls)
@@ -72,20 +73,30 @@ static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
dlm_lowcomms_commit_buffer(mh);
}
+static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs,
+ uint32_t flags)
+{
+ rs->rs_flags = cpu_to_le32(flags);
+}
+
/* When replying to a status request, a node also sends back its
configuration values. The requesting node then checks that the remote
node is configured the same way as itself. */
-static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
+static void set_rcom_config(struct dlm_ls *ls, struct rcom_config *rf,
+ uint32_t num_slots)
{
rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen);
rf->rf_lsflags = cpu_to_le32(ls->ls_exflags);
+
+ rf->rf_our_slot = cpu_to_le16(ls->ls_slot);
+ rf->rf_num_slots = cpu_to_le16(num_slots);
+ rf->rf_generation = cpu_to_le32(ls->ls_generation);
}
-static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
- size_t conf_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
log_error(ls, "version mismatch: %x nodeid %d: %x",
@@ -94,12 +105,6 @@ static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
return -EPROTO;
}
- if (rc->rc_header.h_length < conf_size) {
- log_error(ls, "config too short: %d nodeid %d",
- rc->rc_header.h_length, nodeid);
- return -EPROTO;
- }
-
if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen ||
le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) {
log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
@@ -127,7 +132,18 @@ static void disallow_sync_reply(struct dlm_ls *ls)
spin_unlock(&ls->ls_rcom_spin);
}
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
+/*
+ * low nodeid gathers one slot value at a time from each node.
+ * it sets need_slots=0, and saves rf_our_slot returned from each
+ * rcom_config.
+ *
+ * other nodes gather all slot values at once from the low nodeid.
+ * they set need_slots=1, and ignore the rf_our_slot returned from each
+ * rcom_config. they use the rf_num_slots returned from the low
+ * node's rcom_config.
+ */
+
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
@@ -141,10 +157,13 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
goto out;
}
- error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
+ error = create_rcom(ls, nodeid, DLM_RCOM_STATUS,
+ sizeof(struct rcom_status), &rc, &mh);
if (error)
goto out;
+ set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags);
+
allow_sync_reply(ls, &rc->rc_id);
memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
@@ -161,8 +180,11 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
/* we pretend the remote lockspace exists with 0 status */
log_debug(ls, "remote node %d not ready", nodeid);
rc->rc_result = 0;
- } else
- error = check_config(ls, rc, nodeid);
+ error = 0;
+ } else {
+ error = check_rcom_config(ls, rc, nodeid);
+ }
+
/* the caller looks at rc_result for the remote recovery status */
out:
return error;
@@ -172,17 +194,60 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
- int error, nodeid = rc_in->rc_header.h_nodeid;
+ struct rcom_status *rs;
+ uint32_t status;
+ int nodeid = rc_in->rc_header.h_nodeid;
+ int len = sizeof(struct rcom_config);
+ int num_slots = 0;
+ int error;
+
+ if (!dlm_slots_version(&rc_in->rc_header)) {
+ status = dlm_recover_status(ls);
+ goto do_create;
+ }
+
+ rs = (struct rcom_status *)rc_in->rc_buf;
+ if (!(rs->rs_flags & DLM_RSF_NEED_SLOTS)) {
+ status = dlm_recover_status(ls);
+ goto do_create;
+ }
+
+ spin_lock(&ls->ls_recover_lock);
+ status = ls->ls_recover_status;
+ num_slots = ls->ls_num_slots;
+ spin_unlock(&ls->ls_recover_lock);
+ len += num_slots * sizeof(struct rcom_slot);
+
+ do_create:
error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
- sizeof(struct rcom_config), &rc, &mh);
+ len, &rc, &mh);
if (error)
return;
+
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
- rc->rc_result = dlm_recover_status(ls);
- make_config(ls, (struct rcom_config *) rc->rc_buf);
+ rc->rc_result = status;
+
+ set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots);
+
+ if (!num_slots)
+ goto do_send;
+
+ spin_lock(&ls->ls_recover_lock);
+ if (ls->ls_num_slots != num_slots) {
+ spin_unlock(&ls->ls_recover_lock);
+ log_debug(ls, "receive_rcom_status num_slots %d to %d",
+ num_slots, ls->ls_num_slots);
+ rc->rc_result = 0;
+ set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, 0);
+ goto do_send;
+ }
+
+ dlm_slots_copy_out(ls, rc);
+ spin_unlock(&ls->ls_recover_lock);
+ do_send:
send_rcom(ls, mh, rc);
}
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index b09abd29ba3..206723ab744 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -14,7 +14,7 @@
#ifndef __RCOM_DOT_H__
#define __RCOM_DOT_H__
-int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
+int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags);
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 14638235f7b..34d5adf1fce 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -85,14 +85,20 @@ uint32_t dlm_recover_status(struct dlm_ls *ls)
return status;
}
+static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
+{
+ ls->ls_recover_status |= status;
+}
+
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
{
spin_lock(&ls->ls_recover_lock);
- ls->ls_recover_status |= status;
+ _set_recover_status(ls, status);
spin_unlock(&ls->ls_recover_lock);
}
-static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
+static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
+ int save_slots)
{
struct dlm_rcom *rc = ls->ls_recover_buf;
struct dlm_member *memb;
@@ -106,10 +112,13 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
goto out;
}
- error = dlm_rcom_status(ls, memb->nodeid);
+ error = dlm_rcom_status(ls, memb->nodeid, 0);
if (error)
goto out;
+ if (save_slots)
+ dlm_slot_save(ls, rc, memb);
+
if (rc->rc_result & wait_status)
break;
if (delay < 1000)
@@ -121,7 +130,8 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
return error;
}
-static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
+static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
+ uint32_t status_flags)
{
struct dlm_rcom *rc = ls->ls_recover_buf;
int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
@@ -132,7 +142,7 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
goto out;
}
- error = dlm_rcom_status(ls, nodeid);
+ error = dlm_rcom_status(ls, nodeid, status_flags);
if (error)
break;
@@ -152,18 +162,56 @@ static int wait_status(struct dlm_ls *ls, uint32_t status)
int error;
if (ls->ls_low_nodeid == dlm_our_nodeid()) {
- error = wait_status_all(ls, status);
+ error = wait_status_all(ls, status, 0);
if (!error)
dlm_set_recover_status(ls, status_all);
} else
- error = wait_status_low(ls, status_all);
+ error = wait_status_low(ls, status_all, 0);
return error;
}
int dlm_recover_members_wait(struct dlm_ls *ls)
{
- return wait_status(ls, DLM_RS_NODES);
+ struct dlm_member *memb;
+ struct dlm_slot *slots;
+ int num_slots, slots_size;
+ int error, rv;
+ uint32_t gen;
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ memb->slot = -1;
+ memb->generation = 0;
+ }
+
+ if (ls->ls_low_nodeid == dlm_our_nodeid()) {
+ error = wait_status_all(ls, DLM_RS_NODES, 1);
+ if (error)
+ goto out;
+
+ /* slots array is sparse, slots_size may be > num_slots */
+
+ rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
+ if (!rv) {
+ spin_lock(&ls->ls_recover_lock);
+ _set_recover_status(ls, DLM_RS_NODES_ALL);
+ ls->ls_num_slots = num_slots;
+ ls->ls_slots_size = slots_size;
+ ls->ls_slots = slots;
+ ls->ls_generation = gen;
+ spin_unlock(&ls->ls_recover_lock);
+ } else {
+ dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
+ }
+ } else {
+ error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
+ if (error)
+ goto out;
+
+ dlm_slots_copy_in(ls);
+ }
+ out:
+ return error;
}
int dlm_recover_directory_wait(struct dlm_ls *ls)
@@ -542,8 +590,6 @@ int dlm_recover_locks(struct dlm_ls *ls)
out:
if (error)
recover_list_clear(ls);
- else
- dlm_set_recover_status(ls, DLM_RS_LOCKS);
return error;
}
@@ -715,6 +761,7 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
int dlm_create_root_list(struct dlm_ls *ls)
{
+ struct rb_node *n;
struct dlm_rsb *r;
int i, error = 0;
@@ -727,7 +774,8 @@ int dlm_create_root_list(struct dlm_ls *ls)
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
spin_lock(&ls->ls_rsbtbl[i].lock);
- list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
+ for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
list_add(&r->res_root_list, &ls->ls_root_list);
dlm_hold_rsb(r);
}
@@ -741,7 +789,8 @@ int dlm_create_root_list(struct dlm_ls *ls)
continue;
}
- list_for_each_entry(r, &ls->ls_rsbtbl[i].toss, res_hashchain) {
+ for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) {
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
list_add(&r->res_root_list, &ls->ls_root_list);
dlm_hold_rsb(r);
}
@@ -771,16 +820,18 @@ void dlm_release_root_list(struct dlm_ls *ls)
void dlm_clear_toss_list(struct dlm_ls *ls)
{
- struct dlm_rsb *r, *safe;
+ struct rb_node *n, *next;
+ struct dlm_rsb *rsb;
int i;
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
spin_lock(&ls->ls_rsbtbl[i].lock);
- list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
- res_hashchain) {
- if (dlm_no_directory(ls) || !is_master(r)) {
- list_del(&r->res_hashchain);
- dlm_free_rsb(r);
+ for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
+ next = rb_next(n);;
+ rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
+ if (dlm_no_directory(ls) || !is_master(rsb)) {
+ rb_erase(n, &ls->ls_rsbtbl[i].toss);
+ dlm_free_rsb(rsb);
}
}
spin_unlock(&ls->ls_rsbtbl[i].lock);
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 774da3cf92c..3780caf7ae0 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -54,7 +54,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
unsigned long start;
int error, neg = 0;
- log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
+ log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq);
mutex_lock(&ls->ls_recoverd_active);
@@ -76,14 +76,22 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
/*
* Add or remove nodes from the lockspace's ls_nodes list.
- * Also waits for all nodes to complete dlm_recover_members.
*/
error = dlm_recover_members(ls, rv, &neg);
if (error) {
- log_debug(ls, "recover_members failed %d", error);
+ log_debug(ls, "dlm_recover_members error %d", error);
goto fail;
}
+
+ dlm_set_recover_status(ls, DLM_RS_NODES);
+
+ error = dlm_recover_members_wait(ls);
+ if (error) {
+ log_debug(ls, "dlm_recover_members_wait error %d", error);
+ goto fail;
+ }
+
start = jiffies;
/*
@@ -93,17 +101,15 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_directory(ls);
if (error) {
- log_debug(ls, "recover_directory failed %d", error);
+ log_debug(ls, "dlm_recover_directory error %d", error);
goto fail;
}
- /*
- * Wait for all nodes to complete directory rebuild.
- */
+ dlm_set_recover_status(ls, DLM_RS_DIR);
error = dlm_recover_directory_wait(ls);
if (error) {
- log_debug(ls, "recover_directory_wait failed %d", error);
+ log_debug(ls, "dlm_recover_directory_wait error %d", error);
goto fail;
}
@@ -133,7 +139,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_masters(ls);
if (error) {
- log_debug(ls, "recover_masters failed %d", error);
+ log_debug(ls, "dlm_recover_masters error %d", error);
goto fail;
}
@@ -143,13 +149,15 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_locks(ls);
if (error) {
- log_debug(ls, "recover_locks failed %d", error);
+ log_debug(ls, "dlm_recover_locks error %d", error);
goto fail;
}
+ dlm_set_recover_status(ls, DLM_RS_LOCKS);
+
error = dlm_recover_locks_wait(ls);
if (error) {
- log_debug(ls, "recover_locks_wait failed %d", error);
+ log_debug(ls, "dlm_recover_locks_wait error %d", error);
goto fail;
}
@@ -170,7 +178,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_locks_wait(ls);
if (error) {
- log_debug(ls, "recover_locks_wait failed %d", error);
+ log_debug(ls, "dlm_recover_locks_wait error %d", error);
goto fail;
}
}
@@ -186,9 +194,10 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_purge_requestqueue(ls);
dlm_set_recover_status(ls, DLM_RS_DONE);
+
error = dlm_recover_done_wait(ls);
if (error) {
- log_debug(ls, "recover_done_wait failed %d", error);
+ log_debug(ls, "dlm_recover_done_wait error %d", error);
goto fail;
}
@@ -200,34 +209,35 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = enable_locking(ls, rv->seq);
if (error) {
- log_debug(ls, "enable_locking failed %d", error);
+ log_debug(ls, "enable_locking error %d", error);
goto fail;
}
error = dlm_process_requestqueue(ls);
if (error) {
- log_debug(ls, "process_requestqueue failed %d", error);
+ log_debug(ls, "dlm_process_requestqueue error %d", error);
goto fail;
}
error = dlm_recover_waiters_post(ls);
if (error) {
- log_debug(ls, "recover_waiters_post failed %d", error);
+ log_debug(ls, "dlm_recover_waiters_post error %d", error);
goto fail;
}
dlm_grant_after_purge(ls);
- log_debug(ls, "recover %llx done: %u ms",
- (unsigned long long)rv->seq,
+ log_debug(ls, "dlm_recover %llx generation %u done: %u ms",
+ (unsigned long long)rv->seq, ls->ls_generation,
jiffies_to_msecs(jiffies - start));
mutex_unlock(&ls->ls_recoverd_active);
+ dlm_lsop_recover_done(ls);
return 0;
fail:
dlm_release_root_list(ls);
- log_debug(ls, "recover %llx error %d",
+ log_debug(ls, "dlm_recover %llx error %d",
(unsigned long long)rv->seq, error);
mutex_unlock(&ls->ls_recoverd_active);
return error;
@@ -250,8 +260,7 @@ static void do_ls_recovery(struct dlm_ls *ls)
if (rv) {
ls_recover(ls, rv);
- kfree(rv->nodeids);
- kfree(rv->new);
+ kfree(rv->nodes);
kfree(rv);
}
}
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index d8ea6075640..eb4ed9ba309 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -392,8 +392,9 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- error = dlm_new_lockspace(params->name, strlen(params->name),
- &lockspace, params->flags, DLM_USER_LVB_LEN);
+ error = dlm_new_lockspace(params->name, NULL, params->flags,
+ DLM_USER_LVB_LEN, NULL, NULL, NULL,
+ &lockspace);
if (error)
return error;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 32f90a3ae63..19a8ca4ab1d 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -144,24 +144,6 @@ static int ecryptfs_interpose(struct dentry *lower_dentry,
}
/**
- * ecryptfs_create_underlying_file
- * @lower_dir_inode: inode of the parent in the lower fs of the new file
- * @dentry: New file's dentry
- * @mode: The mode of the new file
- *
- * Creates the file in the lower file system.
- *
- * Returns zero on success; non-zero on error condition
- */
-static int
-ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
- struct dentry *dentry, int mode)
-{
- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- return vfs_create(lower_dir_inode, lower_dentry, mode, NULL);
-}
-
-/**
* ecryptfs_do_create
* @directory_inode: inode of the new file's dentry's parent in ecryptfs
* @ecryptfs_dentry: New file's dentry in ecryptfs
@@ -176,7 +158,7 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
*/
static struct inode *
ecryptfs_do_create(struct inode *directory_inode,
- struct dentry *ecryptfs_dentry, int mode)
+ struct dentry *ecryptfs_dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
@@ -191,8 +173,7 @@ ecryptfs_do_create(struct inode *directory_inode,
inode = ERR_CAST(lower_dir_dentry);
goto out;
}
- rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
- ecryptfs_dentry, mode);
+ rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, NULL);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
@@ -267,7 +248,7 @@ out:
*/
static int
ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
- int mode, struct nameidata *nd)
+ umode_t mode, struct nameidata *nd)
{
struct inode *ecryptfs_inode;
int rc;
@@ -559,7 +540,7 @@ out_lock:
return rc;
}
-static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
@@ -607,7 +588,7 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
}
static int
-ecryptfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int rc;
struct dentry *lower_dentry;
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index dbd52d40df4..9df7fd6e0c3 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -69,7 +69,6 @@ static void ecryptfs_i_callback(struct rcu_head *head)
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
}
@@ -132,9 +131,9 @@ static void ecryptfs_evict_inode(struct inode *inode)
* Prints the mount options for a given superblock.
* Returns zero; does not fail.
*/
-static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int ecryptfs_show_options(struct seq_file *m, struct dentry *root)
{
- struct super_block *sb = mnt->mnt_sb;
+ struct super_block *sb = root->d_sb;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
struct ecryptfs_global_auth_tok *walker;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 0f31acb0131..981106429a9 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -68,7 +68,6 @@ static struct inode *efs_alloc_inode(struct super_block *sb)
static void efs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
}
diff --git a/fs/exec.c b/fs/exec.c
index 36254645b7c..aeb135c7ff5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -59,6 +59,8 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
+
+#include <trace/events/task.h>
#include "internal.h"
int core_uses_pid;
@@ -1054,6 +1056,8 @@ void set_task_comm(struct task_struct *tsk, char *buf)
{
task_lock(tsk);
+ trace_task_rename(tsk, buf);
+
/*
* Threads may access current->comm without holding
* the task lock, so write the string carefully.
@@ -1225,7 +1229,7 @@ EXPORT_SYMBOL(install_exec_creds);
* - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH
*/
-int check_unsafe_exec(struct linux_binprm *bprm)
+static int check_unsafe_exec(struct linux_binprm *bprm)
{
struct task_struct *p = current, *t;
unsigned n_fs;
diff --git a/fs/exofs/Kconfig b/fs/exofs/Kconfig
index da42f32c49b..86194b2f799 100644
--- a/fs/exofs/Kconfig
+++ b/fs/exofs/Kconfig
@@ -1,14 +1,3 @@
-# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
-# for every ORE user we do it like this. Any user should add itself here
-# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
-# selected here, and we default to "ON". So in effect it is like been
-# selected by any of the users.
-config ORE
- tristate
- depends on EXOFS_FS || PNFS_OBJLAYOUT
- select ASYNC_XOR
- default SCSI_OSD_ULD
-
config EXOFS_FS
tristate "exofs: OSD based file system support"
depends on SCSI_OSD_ULD
diff --git a/fs/exofs/Kconfig.ore b/fs/exofs/Kconfig.ore
new file mode 100644
index 00000000000..1ca7fb7b6ba
--- /dev/null
+++ b/fs/exofs/Kconfig.ore
@@ -0,0 +1,12 @@
+# ORE - Objects Raid Engine (libore.ko)
+#
+# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
+# for every ORE user we do it like this. Any user should add itself here
+# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
+# selected here, and we default to "ON". So in effect it is like been
+# selected by any of the users.
+config ORE
+ tristate
+ depends on EXOFS_FS || PNFS_OBJLAYOUT
+ select ASYNC_XOR
+ default SCSI_OSD_ULD
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index d0941c6a1f7..80405836ba6 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -234,7 +234,7 @@ static unsigned char exofs_type_by_mode[S_IFMT >> S_SHIFT] = {
static inline
void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
{
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
de->file_type = exofs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 51f4b4c40f0..ca9d49665ef 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -154,7 +154,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
extern struct inode *exofs_iget(struct super_block *, unsigned long);
-struct inode *exofs_new_inode(struct inode *, int);
+struct inode *exofs_new_inode(struct inode *, umode_t);
extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
extern void exofs_evict_inode(struct inode *);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index f6dbf7768ce..ea5e1f97806 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1276,7 +1276,7 @@ static void create_done(struct ore_io_state *ios, void *p)
/*
* Set up a new inode and create an object for it on the OSD
*/
-struct inode *exofs_new_inode(struct inode *dir, int mode)
+struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index b54c43775f1..9dbf0c30103 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -59,7 +59,7 @@ static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry,
return d_splice_alias(inode, dentry);
}
-static int exofs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode = exofs_new_inode(dir, mode);
@@ -74,7 +74,7 @@ static int exofs_create(struct inode *dir, struct dentry *dentry, int mode,
return err;
}
-static int exofs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int exofs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
struct inode *inode;
@@ -153,7 +153,7 @@ static int exofs_link(struct dentry *old_dentry, struct inode *dir,
return exofs_add_nondir(dentry, inode);
}
-static int exofs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int exofs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int err = -EMLINK;
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index d271ad83720..49cf230554a 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -266,7 +266,7 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
/* first/last seg is split */
num_raid_units += layout->group_width;
- sgs_per_dev = div_u64(num_raid_units, data_devs);
+ sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
} else {
/* For Writes add parity pages array. */
max_par_pages = num_raid_units * pages_in_unit *
@@ -445,10 +445,10 @@ int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
u64 residual = ios->reading ?
or->in.residual : or->out.residual;
u64 offset = (ios->offset + ios->length) - residual;
- struct ore_dev *od = ios->oc->ods[
- per_dev->dev - ios->oc->first_dev];
+ unsigned dev = per_dev->dev - ios->oc->first_dev;
+ struct ore_dev *od = ios->oc->ods[dev];
- on_dev_error(ios, od, per_dev->dev, osi.osd_err_pri,
+ on_dev_error(ios, od, dev, osi.osd_err_pri,
offset, residual);
}
if (osi.osd_err_pri >= acumulated_osd_err) {
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index 29c47e5c4a8..d222c77cfa1 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -328,8 +328,8 @@ static int _alloc_read_4_write(struct ore_io_state *ios)
/* @si contains info of the to-be-inserted page. Update of @si should be
* maintained by caller. Specificaly si->dev, si->obj_offset, ...
*/
-static int _add_to_read_4_write(struct ore_io_state *ios,
- struct ore_striping_info *si, struct page *page)
+static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
+ struct page *page, unsigned pg_len)
{
struct request_queue *q;
struct ore_per_dev_state *per_dev;
@@ -366,17 +366,60 @@ static int _add_to_read_4_write(struct ore_io_state *ios,
_ore_add_sg_seg(per_dev, gap, true);
}
q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
- added_len = bio_add_pc_page(q, per_dev->bio, page, PAGE_SIZE, 0);
- if (unlikely(added_len != PAGE_SIZE)) {
+ added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
+ si->obj_offset % PAGE_SIZE);
+ if (unlikely(added_len != pg_len)) {
ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
per_dev->bio->bi_vcnt);
return -ENOMEM;
}
- per_dev->length += PAGE_SIZE;
+ per_dev->length += pg_len;
return 0;
}
+/* read the beginning of an unaligned first page */
+static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
+{
+ struct ore_striping_info si;
+ unsigned pg_len;
+
+ ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
+
+ pg_len = si.obj_offset % PAGE_SIZE;
+ si.obj_offset -= pg_len;
+
+ ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
+ _LLU(si.obj_offset), pg_len, page->index, si.dev);
+
+ return _add_to_r4w(ios, &si, page, pg_len);
+}
+
+/* read the end of an incomplete last page */
+static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
+{
+ struct ore_striping_info si;
+ struct page *page;
+ unsigned pg_len, p, c;
+
+ ore_calc_stripe_info(ios->layout, *offset, 0, &si);
+
+ p = si.unit_off / PAGE_SIZE;
+ c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
+ ios->layout->mirrors_p1, si.par_dev, si.dev);
+ page = ios->sp2d->_1p_stripes[p].pages[c];
+
+ pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
+ *offset += pg_len;
+
+ ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
+ p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
+
+ BUG_ON(!page);
+
+ return _add_to_r4w(ios, &si, page, pg_len);
+}
+
static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
{
struct bio_vec *bv;
@@ -444,9 +487,13 @@ static int _read_4_write(struct ore_io_state *ios)
struct page **pp = &_1ps->pages[c];
bool uptodate;
- if (*pp)
+ if (*pp) {
+ if (ios->offset % PAGE_SIZE)
+ /* Read the remainder of the page */
+ _add_to_r4w_first_page(ios, *pp);
/* to-be-written pages start here */
goto read_last_stripe;
+ }
*pp = ios->r4w->get_page(ios->private, offset,
&uptodate);
@@ -454,7 +501,7 @@ static int _read_4_write(struct ore_io_state *ios)
return -ENOMEM;
if (!uptodate)
- _add_to_read_4_write(ios, &read_si, *pp);
+ _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
/* Mark read-pages to be cache_released */
_1ps->page_is_read[c] = true;
@@ -465,8 +512,11 @@ static int _read_4_write(struct ore_io_state *ios)
}
read_last_stripe:
- offset = ios->offset + (ios->length + PAGE_SIZE - 1) /
- PAGE_SIZE * PAGE_SIZE;
+ offset = ios->offset + ios->length;
+ if (offset % PAGE_SIZE)
+ _add_to_r4w_last_page(ios, &offset);
+ /* offset will be aligned to next page */
+
last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
* bytes_in_stripe;
if (offset == last_stripe_end) /* Optimize for the aligned case */
@@ -503,7 +553,7 @@ read_last_stripe:
/* Mark read-pages to be cache_released */
_1ps->page_is_read[c] = true;
if (!uptodate)
- _add_to_read_4_write(ios, &read_si, page);
+ _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
}
offset += PAGE_SIZE;
@@ -551,7 +601,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
unsigned cur_len)
{
if (ios->reading) {
- BUG_ON(per_dev->cur_sg >= ios->sgs_per_dev);
+ if (per_dev->cur_sg >= ios->sgs_per_dev) {
+ ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
+ per_dev->cur_sg, ios->sgs_per_dev);
+ return -ENOMEM;
+ }
_ore_add_sg_seg(per_dev, cur_len, true);
} else {
struct __stripe_pages_2d *sp2d = ios->sp2d;
@@ -612,8 +666,6 @@ int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
return -ENOMEM;
}
- BUG_ON(ios->offset % PAGE_SIZE);
-
/* Round io down to last full strip */
first_stripe = div_u64(ios->offset, stripe_size);
last_stripe = div_u64(ios->offset + ios->length, stripe_size);
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index e6085ec192d..d22cd168c6e 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -166,7 +166,6 @@ static struct inode *exofs_alloc_inode(struct super_block *sb)
static void exofs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
}
@@ -839,6 +838,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
if (ret) {
EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
+ dput(sb->s_root);
+ sb->s_root = NULL;
goto free_sbi;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 47cda410b54..d37df352d32 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -279,7 +279,7 @@ static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
{
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
else
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 9a4e5e206d0..75ad433c669 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -110,7 +110,7 @@ extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *, int);
/* ialloc.c */
-extern struct inode * ext2_new_inode (struct inode *, int, const struct qstr *);
+extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
extern void ext2_free_inode (struct inode *);
extern unsigned long ext2_count_free_inodes (struct super_block *);
extern void ext2_check_inodes_bitmap (struct super_block *);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c4e81dfb74b..8b15cf8cef3 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -429,7 +429,7 @@ found:
return group;
}
-struct inode *ext2_new_inode(struct inode *dir, int mode,
+struct inode *ext2_new_inode(struct inode *dir, umode_t mode,
const struct qstr *qstr)
{
struct super_block *sb;
@@ -573,8 +573,11 @@ got:
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
if (insert_inode_locked(inode) < 0) {
- err = -EINVAL;
- goto fail_drop;
+ ext2_error(sb, "ext2_new_inode",
+ "inode number already in use - inode=%lu",
+ (unsigned long) ino);
+ err = -EIO;
+ goto fail;
}
dquot_initialize(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 91a6945af6d..740cad8dcd8 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -26,7 +26,6 @@
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
-#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
@@ -36,10 +35,6 @@
#include "acl.h"
#include "xip.h"
-MODULE_AUTHOR("Remy Card and others");
-MODULE_DESCRIPTION("Second Extended Filesystem");
-MODULE_LICENSE("GPL");
-
static int __ext2_write_inode(struct inode *inode, int do_sync);
/*
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index f81e250ac5c..1089f760c84 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -35,7 +35,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case EXT2_IOC_SETFLAGS: {
unsigned int oldflags;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -83,7 +83,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
setflags_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
}
case EXT2_IOC_GETVERSION:
@@ -91,7 +91,7 @@ setflags_out:
case EXT2_IOC_SETVERSION:
if (!inode_owner_or_capable(inode))
return -EPERM;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
if (get_user(inode->i_generation, (int __user *) arg)) {
@@ -100,7 +100,7 @@ setflags_out:
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
}
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
case EXT2_IOC_GETRSVSZ:
if (test_opt(inode->i_sb, RESERVATION)
@@ -121,7 +121,7 @@ setflags_out:
if (get_user(rsv_window_size, (int __user *)arg))
return -EFAULT;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -145,7 +145,7 @@ setflags_out:
rsv->rsv_goal_size = rsv_window_size;
}
mutex_unlock(&ei->truncate_mutex);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return 0;
}
default:
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 761fde807fc..080419814ba 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -94,7 +94,7 @@ struct dentry *ext2_get_parent(struct dentry *child)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
{
struct inode *inode;
@@ -119,7 +119,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, st
return ext2_add_nondir(dentry, inode);
}
-static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
int err;
@@ -214,7 +214,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
return err;
}
-static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err = -EMLINK;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index bd8ac164a3b..0090595beb2 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -173,7 +173,6 @@ static struct inode *ext2_alloc_inode(struct super_block *sb)
static void ext2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
}
@@ -211,9 +210,9 @@ static void destroy_inodecache(void)
kmem_cache_destroy(ext2_inode_cachep);
}
-static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext2_show_options(struct seq_file *seq, struct dentry *root)
{
- struct super_block *sb = vfs->mnt_sb;
+ struct super_block *sb = root->d_sb;
struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = sbi->s_es;
unsigned long def_mount_opts;
@@ -1521,5 +1520,8 @@ static void __exit exit_ext2_fs(void)
exit_ext2_xattr();
}
+MODULE_AUTHOR("Remy Card and others");
+MODULE_DESCRIPTION("Second Extended Filesystem");
+MODULE_LICENSE("GPL");
module_init(init_ext2_fs)
module_exit(exit_ext2_fs)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index d27b71f1d18..6dcafc7efdf 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -54,7 +54,6 @@
*/
#include <linux/buffer_head.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index c922adc8ef4..be7a8d02c9a 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -3,7 +3,6 @@
* Handler for storing security labels as extended attributes.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fs.h>
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 667e46a8d62..2989467d359 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -5,7 +5,6 @@
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/fs.h>
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 099d20f4716..f470e44c4b8 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -6,7 +6,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/string.h>
#include "ext2.h"
#include "xattr.h"
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 5c866e06e7a..1cde2843801 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -371,7 +371,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent)
* group to find a free inode.
*/
struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
- const struct qstr *qstr, int mode)
+ const struct qstr *qstr, umode_t mode)
{
struct super_block *sb;
struct buffer_head *bitmap_bh = NULL;
@@ -525,8 +525,12 @@ got:
if (IS_DIRSYNC(inode))
handle->h_sync = 1;
if (insert_inode_locked(inode) < 0) {
- err = -EINVAL;
- goto fail_drop;
+ /*
+ * Likely a bitmap corruption causing inode to be allocated
+ * twice.
+ */
+ err = -EIO;
+ goto fail;
}
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 85fe655fe3e..2d0afeca0b4 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -22,7 +22,6 @@
* Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
*/
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/ext3_jbd.h>
@@ -223,8 +222,12 @@ void ext3_evict_inode (struct inode *inode)
*
* Note that directories do not have this problem because they don't
* use page cache.
+ *
+ * The s_journal check handles the case when ext3_get_journal() fails
+ * and puts the journal inode.
*/
if (inode->i_nlink && ext3_should_journal_data(inode) &&
+ EXT3_SB(inode->i_sb)->s_journal &&
(S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
@@ -1132,9 +1135,11 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
bh = ext3_getblk(handle, inode, block, create, err);
if (!bh)
return bh;
- if (buffer_uptodate(bh))
+ if (bh_uptodate_or_lock(bh))
return bh;
- ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
+ get_bh(bh);
+ bh->b_end_io = end_buffer_read_sync;
+ submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -1617,7 +1622,13 @@ static int ext3_ordered_writepage(struct page *page,
int err;
J_ASSERT(PageLocked(page));
- WARN_ON_ONCE(IS_RDONLY(inode));
+ /*
+ * We don't want to warn for emergency remount. The condition is
+ * ordered to avoid dereferencing inode->i_sb in non-error case to
+ * avoid slow-downs.
+ */
+ WARN_ON_ONCE(IS_RDONLY(inode) &&
+ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
/*
* We give up here if we're reentered, because it might be for a
@@ -1692,7 +1703,13 @@ static int ext3_writeback_writepage(struct page *page,
int err;
J_ASSERT(PageLocked(page));
- WARN_ON_ONCE(IS_RDONLY(inode));
+ /*
+ * We don't want to warn for emergency remount. The condition is
+ * ordered to avoid dereferencing inode->i_sb in non-error case to
+ * avoid slow-downs.
+ */
+ WARN_ON_ONCE(IS_RDONLY(inode) &&
+ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
if (ext3_journal_current_handle())
goto out_fail;
@@ -1735,7 +1752,13 @@ static int ext3_journalled_writepage(struct page *page,
int err;
J_ASSERT(PageLocked(page));
- WARN_ON_ONCE(IS_RDONLY(inode));
+ /*
+ * We don't want to warn for emergency remount. The condition is
+ * ordered to avoid dereferencing inode->i_sb in non-error case to
+ * avoid slow-downs.
+ */
+ WARN_ON_ONCE(IS_RDONLY(inode) &&
+ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
if (ext3_journal_current_handle())
goto no_write;
@@ -2064,12 +2087,10 @@ static int ext3_block_truncate_page(struct inode *inode, loff_t from)
if (PageUptodate(page))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh)) {
- err = -EIO;
- ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
+ if (!bh_uptodate_or_lock(bh)) {
+ err = bh_submit_read(bh);
/* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
+ if (err)
goto unlock;
}
@@ -2490,7 +2511,7 @@ int ext3_can_truncate(struct inode *inode)
* transaction, and VFS/VM ensures that ext3_truncate() cannot run
* simultaneously on behalf of the same inode.
*
- * As we work through the truncate and commmit bits of it to the journal there
+ * As we work through the truncate and commit bits of it to the journal there
* is one core, guiding principle: the file's tree must always be consistent on
* disk. We must be able to restart the truncate after a crash.
*
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index ba1b54e23ca..4af574ce4a4 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -44,7 +44,7 @@ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -110,7 +110,7 @@ flags_err:
err = ext3_change_inode_journal_flag(inode, jflag);
flags_out:
mutex_unlock(&inode->i_mutex);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GETVERSION:
@@ -126,7 +126,7 @@ flags_out:
if (!inode_owner_or_capable(inode))
return -EPERM;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
if (get_user(generation, (int __user *) arg)) {
@@ -134,10 +134,11 @@ flags_out:
goto setversion_out;
}
+ mutex_lock(&inode->i_mutex);
handle = ext3_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- goto setversion_out;
+ goto unlock_out;
}
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
@@ -146,8 +147,11 @@ flags_out:
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
}
ext3_journal_stop(handle);
+
+unlock_out:
+ mutex_unlock(&inode->i_mutex);
setversion_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GETRSVSZ:
@@ -164,7 +168,7 @@ setversion_out:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -195,7 +199,7 @@ setversion_out:
}
mutex_unlock(&ei->truncate_mutex);
setrsvsz_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GROUP_EXTEND: {
@@ -206,7 +210,7 @@ setrsvsz_out:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -221,7 +225,7 @@ setrsvsz_out:
if (err == 0)
err = err2;
group_extend_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT3_IOC_GROUP_ADD: {
@@ -232,7 +236,7 @@ group_extend_out:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -249,7 +253,7 @@ group_extend_out:
if (err == 0)
err = err2;
group_add_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case FITRIM: {
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 642dc6d66df..e8e211795e9 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -921,9 +921,12 @@ restart:
num++;
bh = ext3_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh;
- if (bh)
- ll_rw_block(READ | REQ_META | REQ_PRIO,
- 1, &bh);
+ if (bh && !bh_uptodate_or_lock(bh)) {
+ get_bh(bh);
+ bh->b_end_io = end_buffer_read_sync;
+ submit_bh(READ | REQ_META | REQ_PRIO,
+ bh);
+ }
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
@@ -1698,7 +1701,7 @@ static int ext3_add_nondir(handle_t *handle,
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
+static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
struct nameidata *nd)
{
handle_t *handle;
@@ -1732,7 +1735,7 @@ retry:
}
static int ext3_mknod (struct inode * dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
@@ -1768,7 +1771,7 @@ retry:
return err;
}
-static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
handle_t *handle;
struct inode * inode;
@@ -2272,7 +2275,7 @@ retry:
err = PTR_ERR(handle);
goto err_drop_inode;
}
- inc_nlink(inode);
+ set_nlink(inode, 1);
err = ext3_orphan_del(handle, inode);
if (err) {
ext3_journal_stop(handle);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 922d289aeeb..726c7ef6cdf 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -511,7 +511,6 @@ static int ext3_drop_inode(struct inode *inode)
static void ext3_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
}
@@ -611,9 +610,9 @@ static char *data_mode_string(unsigned long mode)
* - it's set to a non-default value OR
* - if the per-sb default is different from the global default
*/
-static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext3_show_options(struct seq_file *seq, struct dentry *root)
{
- struct super_block *sb = vfs->mnt_sb;
+ struct super_block *sb = root->d_sb;
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
unsigned long def_mount_opts;
@@ -2060,9 +2059,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
ext3_orphan_cleanup(sb, es);
EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
- if (needs_recovery)
+ if (needs_recovery) {
+ ext3_mark_recovery_complete(sb, es);
ext3_msg(sb, KERN_INFO, "recovery complete");
- ext3_mark_recovery_complete(sb, es);
+ }
ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode",
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
@@ -2230,11 +2230,11 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
goto out_bdev;
}
journal->j_private = sb;
- ll_rw_block(READ, 1, &journal->j_sb_buffer);
- wait_on_buffer(journal->j_sb_buffer);
- if (!buffer_uptodate(journal->j_sb_buffer)) {
- ext3_msg(sb, KERN_ERR, "I/O error on journal device");
- goto out_journal;
+ if (!bh_uptodate_or_lock(journal->j_sb_buffer)) {
+ if (bh_submit_read(journal->j_sb_buffer)) {
+ ext3_msg(sb, KERN_ERR, "I/O error on journal device");
+ goto out_journal;
+ }
}
if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
ext3_msg(sb, KERN_ERR,
@@ -2910,7 +2910,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
return -EINVAL;
/* Quotafile not on the same filesystem? */
- if (path->mnt->mnt_sb != sb)
+ if (path->dentry->d_sb != sb)
return -EXDEV;
/* Journaling quota? */
if (EXT3_SB(sb)->s_qf_names[type]) {
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index 3c218b8a51d..ea26f2acab9 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -3,7 +3,6 @@
* Handler for storing security labels as extended attributes.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fs.h>
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index dc8edda9ffe..2526a8829de 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -5,7 +5,6 @@
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/fs.h>
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index 7a321974d58..b32e473a1e3 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -5,7 +5,6 @@
* Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/ext3_jbd.h>
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 12ccacda44e..f9e2cd8cf71 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -23,6 +23,8 @@
#include <trace/events/ext4.h>
+static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+ ext4_group_t block_group);
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
@@ -668,7 +670,7 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
* This function returns the number of file system metadata clusters at
* the beginning of a block group, including the reserved gdt blocks.
*/
-unsigned ext4_num_base_meta_clusters(struct super_block *sb,
+static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
ext4_group_t block_group)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 8efb2f0a344..3f11656bd72 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -13,7 +13,6 @@
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
-#include <linux/module.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 5b0e26a1272..513004fc3d8 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -511,6 +511,14 @@ struct ext4_new_group_data {
__u32 free_blocks_count;
};
+/* Indexes used to index group tables in ext4_new_group_data */
+enum {
+ BLOCK_BITMAP = 0, /* block bitmap */
+ INODE_BITMAP, /* inode bitmap */
+ INODE_TABLE, /* inode tables */
+ GROUP_TABLE_COUNT,
+};
+
/*
* Flags used by ext4_map_blocks()
*/
@@ -575,6 +583,7 @@ struct ext4_new_group_data {
/* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
+#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -957,12 +966,13 @@ struct ext4_inode_info {
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
EXT4_MOUNT2_##opt)
-#define ext4_set_bit __test_and_set_bit_le
+#define ext4_test_and_set_bit __test_and_set_bit_le
+#define ext4_set_bit __set_bit_le
#define ext4_set_bit_atomic ext2_set_bit_atomic
-#define ext4_clear_bit __test_and_clear_bit_le
+#define ext4_test_and_clear_bit __test_and_clear_bit_le
+#define ext4_clear_bit __clear_bit_le
#define ext4_clear_bit_atomic ext2_clear_bit_atomic
#define ext4_test_bit test_bit_le
-#define ext4_find_first_zero_bit find_first_zero_bit_le
#define ext4_find_next_zero_bit find_next_zero_bit_le
#define ext4_find_next_bit find_next_bit_le
@@ -1397,6 +1407,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
#define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200
+#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
@@ -1409,6 +1420,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
+#define EXT4_FEATURE_INCOMPAT_INLINEDATA 0x2000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */
#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1790,8 +1803,6 @@ extern void ext4_init_block_bitmap(struct super_block *sb,
extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
ext4_group_t block_group,
struct ext4_group_desc *gdp);
-extern unsigned ext4_num_base_meta_clusters(struct super_block *sb,
- ext4_group_t block_group);
extern unsigned ext4_num_overhead_clusters(struct super_block *sb,
ext4_group_t block_group,
struct ext4_group_desc *gdp);
@@ -1819,7 +1830,7 @@ extern int ext4fs_dirhash(const char *name, int len, struct
dx_hash_info *hinfo);
/* ialloc.c */
-extern struct inode *ext4_new_inode(handle_t *, struct inode *, int,
+extern struct inode *ext4_new_inode(handle_t *, struct inode *, umode_t,
const struct qstr *qstr, __u32 goal,
uid_t *owner);
extern void ext4_free_inode(handle_t *, struct inode *);
@@ -1880,16 +1891,9 @@ extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
-extern int ext4_block_truncate_page(handle_t *handle,
- struct address_space *mapping, loff_t from);
-extern int ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length);
extern int ext4_discard_partial_page_buffers(handle_t *handle,
struct address_space *mapping, loff_t from,
loff_t length, int flags);
-extern int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
- struct inode *inode, struct page *page, loff_t from,
- loff_t length, int flags);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -1924,6 +1928,7 @@ extern int ext4_group_add(struct super_block *sb,
extern int ext4_group_extend(struct super_block *sb,
struct ext4_super_block *es,
ext4_fsblk_t n_blocks_count);
+extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61fa9e1614a..74f23c292e1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -29,7 +29,6 @@
* - smart tree reduction
*/
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
@@ -1095,7 +1094,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
- neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+ neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -2955,7 +2954,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* Pre-conditions */
BUG_ON(!ext4_ext_is_uninitialized(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
- BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
/*
* Attempt to transfer newly initialized blocks from the currently
@@ -3282,6 +3280,9 @@ static int ext4_find_delalloc_range(struct inode *inode,
ext4_lblk_t i, pg_lblk;
pgoff_t index;
+ if (!test_opt(inode->i_sb, DELALLOC))
+ return 0;
+
/* reverse search wont work if fs block size is less than page size */
if (inode->i_blkbits < PAGE_CACHE_SHIFT)
search_hint_reverse = 0;
@@ -3454,8 +3455,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
int err = 0;
ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
- ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
- "block %llu, max_blocks %u, flags %d, allocated %u",
+ ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
+ "block %llu, max_blocks %u, flags %x, allocated %u\n",
inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
flags, allocated);
ext4_ext_show_leaf(inode, path);
@@ -3626,7 +3627,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
- ext4_lblk_t rr_cluster_start, rr_cluster_end;
+ ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
@@ -3637,7 +3638,6 @@ static int get_implied_cluster_alloc(struct super_block *sb,
/* The requested region passed into ext4_map_blocks() */
rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
- rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1);
if ((rr_cluster_start == ex_cluster_end) ||
(rr_cluster_start == ex_cluster_start)) {
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 00beb4f9cc4..25d8c9781ad 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -252,7 +252,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
fatal = ext4_journal_get_write_access(handle, bh2);
}
ext4_lock_group(sb, block_group);
- cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+ cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
if (fatal || !cleared) {
ext4_unlock_group(sb, block_group);
goto out;
@@ -351,14 +351,14 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
*/
static int find_group_orlov(struct super_block *sb, struct inode *parent,
- ext4_group_t *group, int mode,
+ ext4_group_t *group, umode_t mode,
const struct qstr *qstr)
{
ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_group_t real_ngroups = ext4_get_groups_count(sb);
int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
- unsigned int freei, avefreei;
+ unsigned int freei, avefreei, grp_free;
ext4_fsblk_t freeb, avefreec;
unsigned int ndirs;
int max_dirs, min_inodes;
@@ -477,8 +477,8 @@ fallback_retry:
for (i = 0; i < ngroups; i++) {
grp = (parent_group + i) % ngroups;
desc = ext4_get_group_desc(sb, grp, NULL);
- if (desc && ext4_free_inodes_count(sb, desc) &&
- ext4_free_inodes_count(sb, desc) >= avefreei) {
+ grp_free = ext4_free_inodes_count(sb, desc);
+ if (desc && grp_free && grp_free >= avefreei) {
*group = grp;
return 0;
}
@@ -497,7 +497,7 @@ fallback_retry:
}
static int find_group_other(struct super_block *sb, struct inode *parent,
- ext4_group_t *group, int mode)
+ ext4_group_t *group, umode_t mode)
{
ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
@@ -602,7 +602,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
*/
static int ext4_claim_inode(struct super_block *sb,
struct buffer_head *inode_bitmap_bh,
- unsigned long ino, ext4_group_t group, int mode)
+ unsigned long ino, ext4_group_t group, umode_t mode)
{
int free = 0, retval = 0, count;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super_block *sb,
*/
down_read(&grp->alloc_sem);
ext4_lock_group(sb, group);
- if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
+ if (ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data)) {
/* not a free inode */
retval = 1;
goto err_ret;
@@ -690,7 +690,7 @@ err_ret:
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
+struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
const struct qstr *qstr, __u32 goal, uid_t *owner)
{
struct super_block *sb;
@@ -885,8 +885,12 @@ got:
if (IS_DIRSYNC(inode))
ext4_handle_sync(handle);
if (insert_inode_locked(inode) < 0) {
- err = -EINVAL;
- goto fail_drop;
+ /*
+ * Likely a bitmap corruption causing inode to be allocated
+ * twice.
+ */
+ err = -EIO;
+ goto fail;
}
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 3cfc73fbca8..830e1b2bf14 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -20,7 +20,6 @@
* (sct@redhat.com), 1993, 1998
*/
-#include <linux/module.h>
#include "ext4_jbd2.h"
#include "truncate.h"
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fffec40d599..feaa82fe629 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -18,7 +18,6 @@
* Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
*/
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
@@ -72,6 +71,9 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+ struct inode *inode, struct page *page, loff_t from,
+ loff_t length, int flags);
/*
* Test whether an inode is a fast symlink.
@@ -1339,8 +1341,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
clear_buffer_unwritten(bh);
}
- /* skip page if block allocation undone */
- if (buffer_delay(bh) || buffer_unwritten(bh))
+ /*
+ * skip page if block allocation undone and
+ * block is dirty
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh))
skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
@@ -1878,7 +1883,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* a[0] = 'a';
* truncate(f, 4096);
* we have in the page first buffer_head mapped via page_mkwrite call back
- * but other bufer_heads would be unmapped but dirty(dirty done via the
+ * but other buffer_heads would be unmapped but dirty (dirty done via the
* do_wp_page). So writepage should write the first block. If we modify
* the mmap area beyond 1024 we will again get a page_fault and the
* page_mkwrite callback will do the block allocation and mark the
@@ -2387,7 +2392,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
- loff_t page_len;
index = pos >> PAGE_CACHE_SHIFT;
@@ -2434,13 +2438,6 @@ retry:
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
- } else {
- page_len = pos & (PAGE_CACHE_SIZE - 1);
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos - page_len, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2480,6 @@ static int ext4_da_write_end(struct file *file,
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
- loff_t page_len;
if (write_mode == FALL_BACK_TO_NONDELALLOC) {
if (ext4_should_order_data(inode)) {
@@ -2508,7 +2504,7 @@ static int ext4_da_write_end(struct file *file,
*/
new_i_size = pos + copied;
- if (new_i_size > EXT4_I(inode)->i_disksize) {
+ if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem);
if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2528,6 @@ static int ext4_da_write_end(struct file *file,
}
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
-
- page_len = PAGE_CACHE_SIZE -
- ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos + copied - 1, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
-
copied = ret2;
if (ret2 < 0)
ret = ret2;
@@ -2776,15 +2762,16 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
if (!io_end || !size)
goto out;
- ext_debug("ext4_end_io_dio(): io_end 0x%p"
+ ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
iocb->private, io_end->inode->i_ino, iocb, offset,
size);
+ iocb->private = NULL;
+
/* if not aio dio with unwritten extents, just free io and return */
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
- iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
@@ -2808,7 +2795,6 @@ out:
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
- iocb->private = NULL;
/* XXX: probably should move into the real I/O completion handler */
inode_dio_done(inode);
@@ -3177,7 +3163,7 @@ int ext4_discard_partial_page_buffers(handle_t *handle,
*
* Returns zero on sucess or negative on failure.
*/
-int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
+static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
struct inode *inode, struct page *page, loff_t from,
loff_t length, int flags)
{
@@ -3203,26 +3189,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- if (!page_has_buffers(page)) {
- /*
- * If the range to be discarded covers a partial block
- * we need to get the page buffers. This is because
- * partial blocks cannot be released and the page needs
- * to be updated with the contents of the block before
- * we write the zeros on top of it.
- */
- if ((from & (blocksize - 1)) ||
- ((from + length) & (blocksize - 1))) {
- create_empty_buffers(page, blocksize, 0);
- } else {
- /*
- * If there are no partial blocks,
- * there is nothing to update,
- * so we can return now
- */
- return 0;
- }
- }
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
@@ -3335,126 +3303,6 @@ next:
return err;
}
-/*
- * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
-int ext4_block_truncate_page(handle_t *handle,
- struct address_space *mapping, loff_t from)
-{
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned length;
- unsigned blocksize;
- struct inode *inode = mapping->host;
-
- blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
-
- return ext4_block_zero_page_range(handle, mapping, from, length);
-}
-
-/*
- * ext4_block_zero_page_range() zeros out a mapping of length 'length'
- * starting from file offset 'from'. The range to be zero'd must
- * be contained with in one block. If the specified range exceeds
- * the end of the block it will be shortened to end of the block
- * that cooresponds to 'from'
- */
-int ext4_block_zero_page_range(handle_t *handle,
- struct address_space *mapping, loff_t from, loff_t length)
-{
- ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
- unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned blocksize, max, pos;
- ext4_lblk_t iblock;
- struct inode *inode = mapping->host;
- struct buffer_head *bh;
- struct page *page;
- int err = 0;
-
- page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
- mapping_gfp_mask(mapping) & ~__GFP_FS);
- if (!page)
- return -ENOMEM;
-
- blocksize = inode->i_sb->s_blocksize;
- max = blocksize - (offset & (blocksize - 1));
-
- /*
- * correct length if it does not fall between
- * 'from' and the end of the block
- */
- if (length > max || length < 0)
- length = max;
-
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
-
- /* Find the buffer that contains "offset" */
- bh = page_buffers(page);
- pos = blocksize;
- while (offset >= pos) {
- bh = bh->b_this_page;
- iblock++;
- pos += blocksize;
- }
-
- err = 0;
- if (buffer_freed(bh)) {
- BUFFER_TRACE(bh, "freed: skip");
- goto unlock;
- }
-
- if (!buffer_mapped(bh)) {
- BUFFER_TRACE(bh, "unmapped");
- ext4_get_block(inode, iblock, bh, 0);
- /* unmapped? It's a hole - nothing to do */
- if (!buffer_mapped(bh)) {
- BUFFER_TRACE(bh, "still unmapped");
- goto unlock;
- }
- }
-
- /* Ok, it's mapped. Make sure it's up-to-date */
- if (PageUptodate(page))
- set_buffer_uptodate(bh);
-
- if (!buffer_uptodate(bh)) {
- err = -EIO;
- ll_rw_block(READ, 1, &bh);
- wait_on_buffer(bh);
- /* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
- goto unlock;
- }
-
- if (ext4_should_journal_data(inode)) {
- BUFFER_TRACE(bh, "get write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (err)
- goto unlock;
- }
-
- zero_user(page, offset, length);
-
- BUFFER_TRACE(bh, "zeroed end of block");
-
- err = 0;
- if (ext4_should_journal_data(inode)) {
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- } else
- mark_buffer_dirty(bh);
-
-unlock:
- unlock_page(page);
- page_cache_release(page);
- return err;
-}
-
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
@@ -3503,7 +3351,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
* transaction, and VFS/VM ensures that ext4_truncate() cannot run
* simultaneously on behalf of the same inode.
*
- * As we work through the truncate and commmit bits of it to the journal there
+ * As we work through the truncate and commit bits of it to the journal there
* is one core, guiding principle: the file's tree must always be consistent on
* disk. We must be able to restart the truncate after a crash.
*
@@ -4681,9 +4529,19 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return 0;
if (is_journal_aborted(journal))
return -EROFS;
+ /* We have to allocate physical blocks for delalloc blocks
+ * before flushing journal. otherwise delalloc blocks can not
+ * be allocated any more. even more truncate on delalloc blocks
+ * could trigger BUG by flushing delalloc blocks in journal.
+ * There is no delalloc block in non-journal data mode.
+ */
+ if (val && test_opt(inode->i_sb, DELALLOC)) {
+ err = ext4_alloc_da_blocks(inode);
+ if (err < 0)
+ return err;
+ }
jbd2_journal_lock_updates(journal);
- jbd2_journal_flush(journal);
/*
* OK, there are no updates running now, and all cached data is
@@ -4695,8 +4553,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
if (val)
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
- else
+ else {
+ jbd2_journal_flush(journal);
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+ }
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a56796814d6..6eee25591b8 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -18,6 +18,8 @@
#include "ext4_jbd2.h"
#include "ext4.h"
+#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
@@ -45,7 +47,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -134,7 +136,7 @@ flags_err:
err = ext4_ext_migrate(inode);
flags_out:
mutex_unlock(&inode->i_mutex);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_GETVERSION:
@@ -150,7 +152,7 @@ flags_out:
if (!inode_owner_or_capable(inode))
return -EPERM;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
if (get_user(generation, (int __user *) arg)) {
@@ -158,10 +160,11 @@ flags_out:
goto setversion_out;
}
+ mutex_lock(&inode->i_mutex);
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- goto setversion_out;
+ goto unlock_out;
}
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
@@ -170,8 +173,11 @@ flags_out:
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
ext4_journal_stop(handle);
+
+unlock_out:
+ mutex_unlock(&inode->i_mutex);
setversion_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
case EXT4_IOC_GROUP_EXTEND: {
@@ -182,19 +188,22 @@ setversion_out:
if (err)
return err;
- if (get_user(n_blocks_count, (__u32 __user *)arg))
- return -EFAULT;
+ if (get_user(n_blocks_count, (__u32 __user *)arg)) {
+ err = -EFAULT;
+ goto group_extend_out;
+ }
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online resizing not supported with bigalloc");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto group_extend_out;
}
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
- return err;
+ goto group_extend_out;
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
@@ -204,9 +213,9 @@ setversion_out:
}
if (err == 0)
err = err2;
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
+group_extend_out:
ext4_resize_end(sb);
-
return err;
}
@@ -240,15 +249,14 @@ setversion_out:
return -EOPNOTSUPP;
}
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
goto mext_out;
err = ext4_move_extents(filp, donor_filp, me.orig_start,
me.donor_start, me.len, &me.moved_len);
+ mnt_drop_write_file(filp);
mnt_drop_write(filp->f_path.mnt);
- if (me.moved_len > 0)
- file_remove_suid(donor_filp);
if (copy_to_user((struct move_extent __user *)arg,
&me, sizeof(me)))
@@ -267,19 +275,22 @@ mext_out:
return err;
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
- sizeof(input)))
- return -EFAULT;
+ sizeof(input))) {
+ err = -EFAULT;
+ goto group_add_out;
+ }
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
ext4_msg(sb, KERN_ERR,
"Online resizing not supported with bigalloc");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto group_add_out;
}
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
- return err;
+ goto group_add_out;
err = ext4_group_add(sb, &input);
if (EXT4_SB(sb)->s_journal) {
@@ -289,9 +300,9 @@ mext_out:
}
if (err == 0)
err = err2;
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
+group_add_out:
ext4_resize_end(sb);
-
return err;
}
@@ -301,7 +312,7 @@ mext_out:
if (!inode_owner_or_capable(inode))
return -EACCES;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
/*
@@ -313,7 +324,7 @@ mext_out:
mutex_lock(&(inode->i_mutex));
err = ext4_ext_migrate(inode);
mutex_unlock(&(inode->i_mutex));
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
@@ -323,11 +334,65 @@ mext_out:
if (!inode_owner_or_capable(inode))
return -EACCES;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
err = ext4_alloc_da_blocks(inode);
+ mnt_drop_write_file(filp);
+ return err;
+ }
+
+ case EXT4_IOC_RESIZE_FS: {
+ ext4_fsblk_t n_blocks_count;
+ struct super_block *sb = inode->i_sb;
+ int err = 0, err2 = 0;
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not (yet) supported with bigalloc");
+ return -EOPNOTSUPP;
+ }
+
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_META_BG)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not (yet) supported with meta_bg");
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
+ sizeof(__u64))) {
+ return -EFAULT;
+ }
+
+ if (n_blocks_count > MAX_32_NUM &&
+ !EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_64BIT)) {
+ ext4_msg(sb, KERN_ERR,
+ "File system only supports 32-bit block numbers");
+ return -EOPNOTSUPP;
+ }
+
+ err = ext4_resize_begin(sb);
+ if (err)
+ return err;
+
+ err = mnt_want_write(filp->f_path.mnt);
+ if (err)
+ goto resizefs_out;
+
+ err = ext4_resize_fs(sb, n_blocks_count);
+ if (EXT4_SB(sb)->s_journal) {
+ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+ err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+ }
+ if (err == 0)
+ err = err2;
mnt_drop_write(filp->f_path.mnt);
+resizefs_out:
+ ext4_resize_end(sb);
return err;
}
@@ -429,6 +494,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case EXT4_IOC_MOVE_EXT:
case FITRIM:
+ case EXT4_IOC_RESIZE_FS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e2d8be8f28b..cb990b21c69 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3671,7 +3671,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ext4_group_t group;
ext4_grpblk_t bit;
- trace_ext4_mb_release_group_pa(pa);
+ trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 16ac228dbec..e7d6bb0acfa 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -12,7 +12,6 @@
*
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include "ext4_jbd2.h"
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index aa4c782c9dd..2043f482375 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1736,7 +1736,7 @@ static int ext4_add_nondir(handle_t *handle,
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext4_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
handle_t *handle;
@@ -1770,7 +1770,7 @@ retry:
}
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
@@ -1806,7 +1806,7 @@ retry:
return err;
}
-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
@@ -2315,7 +2315,7 @@ retry:
err = PTR_ERR(handle);
goto err_drop_inode;
}
- inc_nlink(inode);
+ set_nlink(inode, 1);
err = ext4_orphan_del(handle, inode);
if (err) {
ext4_journal_stop(handle);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7ce1d0b19c9..47585189651 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -6,7 +6,6 @@
* Written by Theodore Ts'o, 2010.
*/
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
@@ -385,6 +384,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
block_end = block_start + blocksize;
if (block_start >= len) {
+ /*
+ * Comments copied from block_write_full_page_endio:
+ *
+ * The page straddles i_size. It must be zeroed out on
+ * each and every writepage invocation because it may
+ * be mmapped. "A file is mapped in multiples of the
+ * page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when
+ * mapped, and writes to that region are not written
+ * out to the file."
+ */
+ zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 996780ab4f4..f9d948f0eb8 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -134,6 +134,172 @@ static int verify_group_input(struct super_block *sb,
return err;
}
+/*
+ * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
+ * group each time.
+ */
+struct ext4_new_flex_group_data {
+ struct ext4_new_group_data *groups; /* new_group_data for groups
+ in the flex group */
+ __u16 *bg_flags; /* block group flags of groups
+ in @groups */
+ ext4_group_t count; /* number of groups in @groups
+ */
+};
+
+/*
+ * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
+ * @flexbg_size.
+ *
+ * Returns NULL on failure otherwise address of the allocated structure.
+ */
+static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+{
+ struct ext4_new_flex_group_data *flex_gd;
+
+ flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
+ if (flex_gd == NULL)
+ goto out3;
+
+ flex_gd->count = flexbg_size;
+
+ flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
+ flexbg_size, GFP_NOFS);
+ if (flex_gd->groups == NULL)
+ goto out2;
+
+ flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
+ if (flex_gd->bg_flags == NULL)
+ goto out1;
+
+ return flex_gd;
+
+out1:
+ kfree(flex_gd->groups);
+out2:
+ kfree(flex_gd);
+out3:
+ return NULL;
+}
+
+static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
+{
+ kfree(flex_gd->bg_flags);
+ kfree(flex_gd->groups);
+ kfree(flex_gd);
+}
+
+/*
+ * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
+ * and inode tables for a flex group.
+ *
+ * This function is used by 64bit-resize. Note that this function allocates
+ * group tables from the 1st group of groups contained by @flexgd, which may
+ * be a partial of a flex group.
+ *
+ * @sb: super block of fs to which the groups belongs
+ */
+static void ext4_alloc_group_tables(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd,
+ int flexbg_size)
+{
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ ext4_fsblk_t start_blk;
+ ext4_fsblk_t last_blk;
+ ext4_group_t src_group;
+ ext4_group_t bb_index = 0;
+ ext4_group_t ib_index = 0;
+ ext4_group_t it_index = 0;
+ ext4_group_t group;
+ ext4_group_t last_group;
+ unsigned overhead;
+
+ BUG_ON(flex_gd->count == 0 || group_data == NULL);
+
+ src_group = group_data[0].group;
+ last_group = src_group + flex_gd->count - 1;
+
+ BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
+ (last_group & ~(flexbg_size - 1))));
+next_group:
+ group = group_data[0].group;
+ start_blk = ext4_group_first_block_no(sb, src_group);
+ last_blk = start_blk + group_data[src_group - group].blocks_count;
+
+ overhead = ext4_bg_has_super(sb, src_group) ?
+ (1 + ext4_bg_num_gdb(sb, src_group) +
+ le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+
+ start_blk += overhead;
+
+ BUG_ON(src_group >= group_data[0].group + flex_gd->count);
+ /* We collect contiguous blocks as much as possible. */
+ src_group++;
+ for (; src_group <= last_group; src_group++)
+ if (!ext4_bg_has_super(sb, src_group))
+ last_blk += group_data[src_group - group].blocks_count;
+ else
+ break;
+
+ /* Allocate block bitmaps */
+ for (; bb_index < flex_gd->count; bb_index++) {
+ if (start_blk >= last_blk)
+ goto next_group;
+ group_data[bb_index].block_bitmap = start_blk++;
+ ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL);
+ group -= group_data[0].group;
+ group_data[group].free_blocks_count--;
+ if (flexbg_size > 1)
+ flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ }
+
+ /* Allocate inode bitmaps */
+ for (; ib_index < flex_gd->count; ib_index++) {
+ if (start_blk >= last_blk)
+ goto next_group;
+ group_data[ib_index].inode_bitmap = start_blk++;
+ ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL);
+ group -= group_data[0].group;
+ group_data[group].free_blocks_count--;
+ if (flexbg_size > 1)
+ flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ }
+
+ /* Allocate inode tables */
+ for (; it_index < flex_gd->count; it_index++) {
+ if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
+ goto next_group;
+ group_data[it_index].inode_table = start_blk;
+ ext4_get_group_no_and_offset(sb, start_blk, &group, NULL);
+ group -= group_data[0].group;
+ group_data[group].free_blocks_count -=
+ EXT4_SB(sb)->s_itb_per_group;
+ if (flexbg_size > 1)
+ flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+
+ start_blk += EXT4_SB(sb)->s_itb_per_group;
+ }
+
+ if (test_opt(sb, DEBUG)) {
+ int i;
+ group = group_data[0].group;
+
+ printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
+ "%d groups, flexbg size is %d:\n", flex_gd->count,
+ flexbg_size);
+
+ for (i = 0; i < flex_gd->count; i++) {
+ printk(KERN_DEBUG "adding %s group %u: %u "
+ "blocks (%d free)\n",
+ ext4_bg_has_super(sb, group + i) ? "normal" :
+ "no-super", group + i,
+ group_data[i].blocks_count,
+ group_data[i].free_blocks_count);
+ }
+ }
+}
+
static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
ext4_fsblk_t blk)
{
@@ -179,131 +345,250 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
}
/*
- * Set up the block and inode bitmaps, and the inode table for the new group.
+ * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
+ *
+ * Helper function for ext4_setup_new_group_blocks() which set .
+ *
+ * @sb: super block
+ * @handle: journal handle
+ * @flex_gd: flex group data
+ */
+static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+ struct ext4_new_flex_group_data *flex_gd,
+ ext4_fsblk_t block, ext4_group_t count)
+{
+ ext4_group_t count2;
+
+ ext4_debug("mark blocks [%llu/%u] used\n", block, count);
+ for (count2 = count; count > 0; count -= count2, block += count2) {
+ ext4_fsblk_t start;
+ struct buffer_head *bh;
+ ext4_group_t group;
+ int err;
+
+ ext4_get_group_no_and_offset(sb, block, &group, NULL);
+ start = ext4_group_first_block_no(sb, group);
+ group -= flex_gd->groups[0].group;
+
+ count2 = sb->s_blocksize * 8 - (block - start);
+ if (count2 > count)
+ count2 = count;
+
+ if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
+ BUG_ON(flex_gd->count > 1);
+ continue;
+ }
+
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ return err;
+
+ bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
+ if (!bh)
+ return -EIO;
+
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err)
+ return err;
+ ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
+ block - start, count2);
+ ext4_set_bits(bh->b_data, block - start, count2);
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (unlikely(err))
+ return err;
+ brelse(bh);
+ }
+
+ return 0;
+}
+
+/*
+ * Set up the block and inode bitmaps, and the inode table for the new groups.
* This doesn't need to be part of the main transaction, since we are only
* changing blocks outside the actual filesystem. We still do journaling to
* ensure the recovery is correct in case of a failure just after resize.
* If any part of this fails, we simply abort the resize.
+ *
+ * setup_new_flex_group_blocks handles a flex group as follow:
+ * 1. copy super block and GDT, and initialize group tables if necessary.
+ * In this step, we only set bits in blocks bitmaps for blocks taken by
+ * super block and GDT.
+ * 2. allocate group tables in block bitmaps, that is, set bits in block
+ * bitmap for blocks taken by group tables.
*/
-static int setup_new_group_blocks(struct super_block *sb,
- struct ext4_new_group_data *input)
+static int setup_new_flex_group_blocks(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd)
{
+ int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
+ ext4_fsblk_t start;
+ ext4_fsblk_t block;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group);
- int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
- le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
- unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group);
- struct buffer_head *bh;
+ struct ext4_super_block *es = sbi->s_es;
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ __u16 *bg_flags = flex_gd->bg_flags;
handle_t *handle;
- ext4_fsblk_t block;
- ext4_grpblk_t bit;
- int i;
- int err = 0, err2;
+ ext4_group_t group, count;
+ struct buffer_head *bh = NULL;
+ int reserved_gdb, i, j, err = 0, err2;
+
+ BUG_ON(!flex_gd->count || !group_data ||
+ group_data[0].group != sbi->s_groups_count);
+
+ reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
/* This transaction may be extended/restarted along the way */
handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
-
if (IS_ERR(handle))
return PTR_ERR(handle);
- BUG_ON(input->group != sbi->s_groups_count);
+ group = group_data[0].group;
+ for (i = 0; i < flex_gd->count; i++, group++) {
+ unsigned long gdblocks;
- /* Copy all of the GDT blocks into the backup in this group */
- for (i = 0, bit = 1, block = start + 1;
- i < gdblocks; i++, block++, bit++) {
- struct buffer_head *gdb;
+ gdblocks = ext4_bg_num_gdb(sb, group);
+ start = ext4_group_first_block_no(sb, group);
- ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
- err = extend_or_restart_transaction(handle, 1);
- if (err)
- goto exit_journal;
+ /* Copy all of the GDT blocks into the backup in this group */
+ for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
+ struct buffer_head *gdb;
- gdb = sb_getblk(sb, block);
- if (!gdb) {
- err = -EIO;
- goto exit_journal;
- }
- if ((err = ext4_journal_get_write_access(handle, gdb))) {
+ ext4_debug("update backup group %#04llx\n", block);
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ goto out;
+
+ gdb = sb_getblk(sb, block);
+ if (!gdb) {
+ err = -EIO;
+ goto out;
+ }
+
+ err = ext4_journal_get_write_access(handle, gdb);
+ if (err) {
+ brelse(gdb);
+ goto out;
+ }
+ memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+ gdb->b_size);
+ set_buffer_uptodate(gdb);
+
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+ if (unlikely(err)) {
+ brelse(gdb);
+ goto out;
+ }
brelse(gdb);
- goto exit_journal;
}
- memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
- set_buffer_uptodate(gdb);
- err = ext4_handle_dirty_metadata(handle, NULL, gdb);
- if (unlikely(err)) {
- brelse(gdb);
- goto exit_journal;
+
+ /* Zero out all of the reserved backup group descriptor
+ * table blocks
+ */
+ if (ext4_bg_has_super(sb, group)) {
+ err = sb_issue_zeroout(sb, gdblocks + start + 1,
+ reserved_gdb, GFP_NOFS);
+ if (err)
+ goto out;
}
- brelse(gdb);
- }
- /* Zero out all of the reserved backup group descriptor table blocks */
- ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
- block, sbi->s_itb_per_group);
- err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
- GFP_NOFS);
- if (err)
- goto exit_journal;
+ /* Initialize group tables of the grop @group */
+ if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
+ goto handle_bb;
- err = extend_or_restart_transaction(handle, 2);
- if (err)
- goto exit_journal;
+ /* Zero out all of the inode table blocks */
+ block = group_data[i].inode_table;
+ ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
+ block, sbi->s_itb_per_group);
+ err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
+ GFP_NOFS);
+ if (err)
+ goto out;
- bh = bclean(handle, sb, input->block_bitmap);
- if (IS_ERR(bh)) {
- err = PTR_ERR(bh);
- goto exit_journal;
- }
+handle_bb:
+ if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
+ goto handle_ib;
- if (ext4_bg_has_super(sb, input->group)) {
- ext4_debug("mark backup group tables %#04llx (+0)\n", start);
- ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
- }
+ /* Initialize block bitmap of the @group */
+ block = group_data[i].block_bitmap;
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ goto out;
- ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
- input->block_bitmap - start);
- ext4_set_bit(input->block_bitmap - start, bh->b_data);
- ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input->inode_bitmap,
- input->inode_bitmap - start);
- ext4_set_bit(input->inode_bitmap - start, bh->b_data);
-
- /* Zero out all of the inode table blocks */
- block = input->inode_table;
- ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
- block, sbi->s_itb_per_group);
- err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
- if (err)
- goto exit_bh;
- ext4_set_bits(bh->b_data, input->inode_table - start,
- sbi->s_itb_per_group);
+ bh = bclean(handle, sb, block);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ goto out;
+ }
+ if (ext4_bg_has_super(sb, group)) {
+ ext4_debug("mark backup superblock %#04llx (+0)\n",
+ start);
+ ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb +
+ 1);
+ }
+ ext4_mark_bitmap_end(group_data[i].blocks_count,
+ sb->s_blocksize * 8, bh->b_data);
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out;
+ brelse(bh);
+handle_ib:
+ if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
+ continue;
- ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
- bh->b_data);
- err = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (unlikely(err)) {
- ext4_std_error(sb, err);
- goto exit_bh;
+ /* Initialize inode bitmap of the @group */
+ block = group_data[i].inode_bitmap;
+ err = extend_or_restart_transaction(handle, 1);
+ if (err)
+ goto out;
+ /* Mark unused entries in inode bitmap used */
+ bh = bclean(handle, sb, block);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ goto out;
+ }
+
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+ sb->s_blocksize * 8, bh->b_data);
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out;
+ brelse(bh);
}
- brelse(bh);
- /* Mark unused entries in inode bitmap used */
- ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
- input->inode_bitmap, input->inode_bitmap - start);
- if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
- err = PTR_ERR(bh);
- goto exit_journal;
+ bh = NULL;
+
+ /* Mark group tables in block bitmap */
+ for (j = 0; j < GROUP_TABLE_COUNT; j++) {
+ count = group_table_count[j];
+ start = (&group_data[0].block_bitmap)[j];
+ block = start;
+ for (i = 1; i < flex_gd->count; i++) {
+ block += group_table_count[j];
+ if (block == (&group_data[i].block_bitmap)[j]) {
+ count += group_table_count[j];
+ continue;
+ }
+ err = set_flexbg_block_bitmap(sb, handle,
+ flex_gd, start, count);
+ if (err)
+ goto out;
+ count = group_table_count[j];
+ start = group_data[i].block_bitmap;
+ block = start;
+ }
+
+ if (count) {
+ err = set_flexbg_block_bitmap(sb, handle,
+ flex_gd, start, count);
+ if (err)
+ goto out;
+ }
}
- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
- bh->b_data);
- err = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (unlikely(err))
- ext4_std_error(sb, err);
-exit_bh:
+out:
brelse(bh);
-
-exit_journal:
- if ((err2 = ext4_journal_stop(handle)) && !err)
+ err2 = ext4_journal_stop(handle);
+ if (err2 && !err)
err = err2;
return err;
@@ -351,10 +636,10 @@ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
* groups in current filesystem that have BACKUPS, or -ve error code.
*/
static int verify_reserved_gdb(struct super_block *sb,
+ ext4_group_t end,
struct buffer_head *primary)
{
const ext4_fsblk_t blk = primary->b_blocknr;
- const ext4_group_t end = EXT4_SB(sb)->s_groups_count;
unsigned three = 1;
unsigned five = 5;
unsigned seven = 7;
@@ -429,7 +714,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (!gdb_bh)
return -EIO;
- gdbackups = verify_reserved_gdb(sb, gdb_bh);
+ gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
if (gdbackups < 0) {
err = gdbackups;
goto exit_bh;
@@ -592,7 +877,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
err = -EIO;
goto exit_bh;
}
- if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
+ gdbackups = verify_reserved_gdb(sb, group, primary[res]);
+ if (gdbackups < 0) {
brelse(primary[res]);
err = gdbackups;
goto exit_bh;
@@ -735,6 +1021,348 @@ exit_err:
}
}
+/*
+ * ext4_add_new_descs() adds @count group descriptor of groups
+ * starting at @group
+ *
+ * @handle: journal handle
+ * @sb: super block
+ * @group: the group no. of the first group desc to be added
+ * @resize_inode: the resize inode
+ * @count: number of group descriptors to be added
+ */
+static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
+ ext4_group_t group, struct inode *resize_inode,
+ ext4_group_t count)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ struct buffer_head *gdb_bh;
+ int i, gdb_off, gdb_num, err = 0;
+
+ for (i = 0; i < count; i++, group++) {
+ int reserved_gdb = ext4_bg_has_super(sb, group) ?
+ le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
+
+ gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
+ gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+
+ /*
+ * We will only either add reserved group blocks to a backup group
+ * or remove reserved blocks for the first group in a new group block.
+ * Doing both would be mean more complex code, and sane people don't
+ * use non-sparse filesystems anymore. This is already checked above.
+ */
+ if (gdb_off) {
+ gdb_bh = sbi->s_group_desc[gdb_num];
+ err = ext4_journal_get_write_access(handle, gdb_bh);
+
+ if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
+ err = reserve_backup_gdb(handle, resize_inode, group);
+ } else
+ err = add_new_gdb(handle, resize_inode, group);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/*
+ * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
+ */
+static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd)
+{
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ struct ext4_group_desc *gdp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *gdb_bh;
+ ext4_group_t group;
+ __u16 *bg_flags = flex_gd->bg_flags;
+ int i, gdb_off, gdb_num, err = 0;
+
+
+ for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
+ group = group_data->group;
+
+ gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
+ gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+
+ /*
+ * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
+ */
+ gdb_bh = sbi->s_group_desc[gdb_num];
+ /* Update group descriptor block for new group */
+ gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data +
+ gdb_off * EXT4_DESC_SIZE(sb));
+
+ memset(gdp, 0, EXT4_DESC_SIZE(sb));
+ ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
+ ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+ ext4_inode_table_set(sb, gdp, group_data->inode_table);
+ ext4_free_group_clusters_set(sb, gdp,
+ EXT4_B2C(sbi, group_data->free_blocks_count));
+ ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
+ gdp->bg_flags = cpu_to_le16(*bg_flags);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
+ if (unlikely(err)) {
+ ext4_std_error(sb, err);
+ break;
+ }
+
+ /*
+ * We can allocate memory for mb_alloc based on the new group
+ * descriptor
+ */
+ err = ext4_mb_add_groupinfo(sb, group, gdp);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/*
+ * ext4_update_super() updates the super block so that the newly added
+ * groups can be seen by the filesystem.
+ *
+ * @sb: super block
+ * @flex_gd: new added groups
+ */
+static void ext4_update_super(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd)
+{
+ ext4_fsblk_t blocks_count = 0;
+ ext4_fsblk_t free_blocks = 0;
+ ext4_fsblk_t reserved_blocks = 0;
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ int i;
+
+ BUG_ON(flex_gd->count == 0 || group_data == NULL);
+ /*
+ * Make the new blocks and inodes valid next. We do this before
+ * increasing the group count so that once the group is enabled,
+ * all of its blocks and inodes are already valid.
+ *
+ * We always allocate group-by-group, then block-by-block or
+ * inode-by-inode within a group, so enabling these
+ * blocks/inodes before the group is live won't actually let us
+ * allocate the new space yet.
+ */
+ for (i = 0; i < flex_gd->count; i++) {
+ blocks_count += group_data[i].blocks_count;
+ free_blocks += group_data[i].free_blocks_count;
+ }
+
+ reserved_blocks = ext4_r_blocks_count(es) * 100;
+ do_div(reserved_blocks, ext4_blocks_count(es));
+ reserved_blocks *= blocks_count;
+ do_div(reserved_blocks, 100);
+
+ ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
+ le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
+ flex_gd->count);
+
+ /*
+ * We need to protect s_groups_count against other CPUs seeing
+ * inconsistent state in the superblock.
+ *
+ * The precise rules we use are:
+ *
+ * * Writers must perform a smp_wmb() after updating all
+ * dependent data and before modifying the groups count
+ *
+ * * Readers must perform an smp_rmb() after reading the groups
+ * count and before reading any dependent data.
+ *
+ * NB. These rules can be relaxed when checking the group count
+ * while freeing data, as we can only allocate from a block
+ * group after serialising against the group count, and we can
+ * only then free after serialising in turn against that
+ * allocation.
+ */
+ smp_wmb();
+
+ /* Update the global fs size fields */
+ sbi->s_groups_count += flex_gd->count;
+
+ /* Update the reserved block counts only once the new group is
+ * active. */
+ ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
+ reserved_blocks);
+
+ /* Update the free space counts */
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+ EXT4_B2C(sbi, free_blocks));
+ percpu_counter_add(&sbi->s_freeinodes_counter,
+ EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
+
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+ sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group;
+ flex_group = ext4_flex_group(sbi, group_data[0].group);
+ atomic_add(EXT4_B2C(sbi, free_blocks),
+ &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+
+ if (test_opt(sb, DEBUG))
+ printk(KERN_DEBUG "EXT4-fs: added group %u:"
+ "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
+ blocks_count, free_blocks, reserved_blocks);
+}
+
+/* Add a flex group to an fs. Ensure we handle all possible error conditions
+ * _before_ we start modifying the filesystem, because we cannot abort the
+ * transaction and not have it write the data to disk.
+ */
+static int ext4_flex_group_add(struct super_block *sb,
+ struct inode *resize_inode,
+ struct ext4_new_flex_group_data *flex_gd)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ ext4_fsblk_t o_blocks_count;
+ ext4_grpblk_t last;
+ ext4_group_t group;
+ handle_t *handle;
+ unsigned reserved_gdb;
+ int err = 0, err2 = 0, credit;
+
+ BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
+
+ reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
+ o_blocks_count = ext4_blocks_count(es);
+ ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
+ BUG_ON(last);
+
+ err = setup_new_flex_group_blocks(sb, flex_gd);
+ if (err)
+ goto exit;
+ /*
+ * We will always be modifying at least the superblock and GDT
+ * block. If we are adding a group past the last current GDT block,
+ * we will also modify the inode and the dindirect block. If we
+ * are adding a group with superblock/GDT backups we will also
+ * modify each of the reserved GDT dindirect blocks.
+ */
+ credit = flex_gd->count * 4 + reserved_gdb;
+ handle = ext4_journal_start_sb(sb, credit);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto exit;
+ }
+
+ err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+ if (err)
+ goto exit_journal;
+
+ group = flex_gd->groups[0].group;
+ BUG_ON(group != EXT4_SB(sb)->s_groups_count);
+ err = ext4_add_new_descs(handle, sb, group,
+ resize_inode, flex_gd->count);
+ if (err)
+ goto exit_journal;
+
+ err = ext4_setup_new_descs(handle, sb, flex_gd);
+ if (err)
+ goto exit_journal;
+
+ ext4_update_super(sb, flex_gd);
+
+ err = ext4_handle_dirty_super(handle, sb);
+
+exit_journal:
+ err2 = ext4_journal_stop(handle);
+ if (!err)
+ err = err2;
+
+ if (!err) {
+ int i;
+ update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+ sizeof(struct ext4_super_block));
+ for (i = 0; i < flex_gd->count; i++, group++) {
+ struct buffer_head *gdb_bh;
+ int gdb_num;
+ gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
+ gdb_bh = sbi->s_group_desc[gdb_num];
+ update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+ gdb_bh->b_size);
+ }
+ }
+exit:
+ return err;
+}
+
+static int ext4_setup_next_flex_gd(struct super_block *sb,
+ struct ext4_new_flex_group_data *flex_gd,
+ ext4_fsblk_t n_blocks_count,
+ unsigned long flexbg_size)
+{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ ext4_fsblk_t o_blocks_count;
+ ext4_group_t n_group;
+ ext4_group_t group;
+ ext4_group_t last_group;
+ ext4_grpblk_t last;
+ ext4_grpblk_t blocks_per_group;
+ unsigned long i;
+
+ blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
+
+ o_blocks_count = ext4_blocks_count(es);
+
+ if (o_blocks_count == n_blocks_count)
+ return 0;
+
+ ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
+ BUG_ON(last);
+ ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
+
+ last_group = group | (flexbg_size - 1);
+ if (last_group > n_group)
+ last_group = n_group;
+
+ flex_gd->count = last_group - group + 1;
+
+ for (i = 0; i < flex_gd->count; i++) {
+ int overhead;
+
+ group_data[i].group = group + i;
+ group_data[i].blocks_count = blocks_per_group;
+ overhead = ext4_bg_has_super(sb, group + i) ?
+ (1 + ext4_bg_num_gdb(sb, group + i) +
+ le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+ group_data[i].free_blocks_count = blocks_per_group - overhead;
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+ flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
+ EXT4_BG_INODE_UNINIT;
+ else
+ flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
+ }
+
+ if (last_group == n_group &&
+ EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
+ /* We need to initialize block bitmap of last group. */
+ flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
+
+ if ((last_group == n_group) && (last != blocks_per_group - 1)) {
+ group_data[i - 1].blocks_count = last + 1;
+ group_data[i - 1].free_blocks_count -= blocks_per_group-
+ last - 1;
+ }
+
+ return 1;
+}
+
/* Add group descriptor data to an existing or new group descriptor block.
* Ensure we handle all possible error conditions _before_ we start modifying
* the filesystem, because we cannot abort the transaction and not have it
@@ -750,16 +1378,15 @@ exit_err:
*/
int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
{
+ struct ext4_new_flex_group_data flex_gd;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
- struct buffer_head *primary = NULL;
- struct ext4_group_desc *gdp;
struct inode *inode = NULL;
- handle_t *handle;
int gdb_off, gdb_num;
- int err, err2;
+ int err;
+ __u16 bg_flags = 0;
gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
@@ -798,175 +1425,69 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
}
- if ((err = verify_group_input(sb, input)))
- goto exit_put;
+ err = verify_group_input(sb, input);
+ if (err)
+ goto out;
- if ((err = setup_new_group_blocks(sb, input)))
- goto exit_put;
+ flex_gd.count = 1;
+ flex_gd.groups = input;
+ flex_gd.bg_flags = &bg_flags;
+ err = ext4_flex_group_add(sb, inode, &flex_gd);
+out:
+ iput(inode);
+ return err;
+} /* ext4_group_add */
- /*
- * We will always be modifying at least the superblock and a GDT
- * block. If we are adding a group past the last current GDT block,
- * we will also modify the inode and the dindirect block. If we
- * are adding a group with superblock/GDT backups we will also
- * modify each of the reserved GDT dindirect blocks.
+/*
+ * extend a group without checking assuming that checking has been done.
+ */
+static int ext4_group_extend_no_check(struct super_block *sb,
+ ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
+{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ handle_t *handle;
+ int err = 0, err2;
+
+ /* We will update the superblock, one block bitmap, and
+ * one group descriptor via ext4_group_add_blocks().
*/
- handle = ext4_journal_start_sb(sb,
- ext4_bg_has_super(sb, input->group) ?
- 3 + reserved_gdb : 4);
+ handle = ext4_journal_start_sb(sb, 3);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- goto exit_put;
+ ext4_warning(sb, "error %d on journal start", err);
+ return err;
}
- if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
- goto exit_journal;
-
- /*
- * We will only either add reserved group blocks to a backup group
- * or remove reserved blocks for the first group in a new group block.
- * Doing both would be mean more complex code, and sane people don't
- * use non-sparse filesystems anymore. This is already checked above.
- */
- if (gdb_off) {
- primary = sbi->s_group_desc[gdb_num];
- if ((err = ext4_journal_get_write_access(handle, primary)))
- goto exit_journal;
-
- if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
- err = reserve_backup_gdb(handle, inode, input->group);
- if (err)
- goto exit_journal;
- }
- } else {
- /*
- * Note that we can access new group descriptor block safely
- * only if add_new_gdb() succeeds.
- */
- err = add_new_gdb(handle, inode, input->group);
- if (err)
- goto exit_journal;
- primary = sbi->s_group_desc[gdb_num];
+ err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+ if (err) {
+ ext4_warning(sb, "error %d on journal write access", err);
+ goto errout;
}
- /*
- * OK, now we've set up the new group. Time to make it active.
- *
- * so we have to be safe wrt. concurrent accesses the group
- * data. So we need to be careful to set all of the relevant
- * group descriptor data etc. *before* we enable the group.
- *
- * The key field here is sbi->s_groups_count: as long as
- * that retains its old value, nobody is going to access the new
- * group.
- *
- * So first we update all the descriptor metadata for the new
- * group; then we update the total disk blocks count; then we
- * update the groups count to enable the group; then finally we
- * update the free space counts so that the system can start
- * using the new disk blocks.
- */
-
- /* Update group descriptor block for new group */
- gdp = (struct ext4_group_desc *)((char *)primary->b_data +
- gdb_off * EXT4_DESC_SIZE(sb));
-
- memset(gdp, 0, EXT4_DESC_SIZE(sb));
- ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
- ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
- ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
- ext4_free_group_clusters_set(sb, gdp, input->free_blocks_count);
- ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
- gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
- gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
-
- /*
- * We can allocate memory for mb_alloc based on the new group
- * descriptor
- */
- err = ext4_mb_add_groupinfo(sb, input->group, gdp);
+ ext4_blocks_count_set(es, o_blocks_count + add);
+ ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
+ o_blocks_count + add);
+ /* We add the blocks to the bitmap and set the group need init bit */
+ err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
if (err)
- goto exit_journal;
-
- /*
- * Make the new blocks and inodes valid next. We do this before
- * increasing the group count so that once the group is enabled,
- * all of its blocks and inodes are already valid.
- *
- * We always allocate group-by-group, then block-by-block or
- * inode-by-inode within a group, so enabling these
- * blocks/inodes before the group is live won't actually let us
- * allocate the new space yet.
- */
- ext4_blocks_count_set(es, ext4_blocks_count(es) +
- input->blocks_count);
- le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
-
- /*
- * We need to protect s_groups_count against other CPUs seeing
- * inconsistent state in the superblock.
- *
- * The precise rules we use are:
- *
- * * Writers must perform a smp_wmb() after updating all dependent
- * data and before modifying the groups count
- *
- * * Readers must perform an smp_rmb() after reading the groups count
- * and before reading any dependent data.
- *
- * NB. These rules can be relaxed when checking the group count
- * while freeing data, as we can only allocate from a block
- * group after serialising against the group count, and we can
- * only then free after serialising in turn against that
- * allocation.
- */
- smp_wmb();
-
- /* Update the global fs size fields */
- sbi->s_groups_count++;
-
- err = ext4_handle_dirty_metadata(handle, NULL, primary);
- if (unlikely(err)) {
- ext4_std_error(sb, err);
- goto exit_journal;
- }
-
- /* Update the reserved block counts only once the new group is
- * active. */
- ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
- input->reserved_blocks);
-
- /* Update the free space counts */
- percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_B2C(sbi, input->free_blocks_count));
- percpu_counter_add(&sbi->s_freeinodes_counter,
- EXT4_INODES_PER_GROUP(sb));
-
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
- sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group;
- flex_group = ext4_flex_group(sbi, input->group);
- atomic_add(EXT4_B2C(sbi, input->free_blocks_count),
- &sbi->s_flex_groups[flex_group].free_clusters);
- atomic_add(EXT4_INODES_PER_GROUP(sb),
- &sbi->s_flex_groups[flex_group].free_inodes);
- }
-
+ goto errout;
ext4_handle_dirty_super(handle, sb);
-
-exit_journal:
- if ((err2 = ext4_journal_stop(handle)) && !err)
+ ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
+ o_blocks_count + add);
+errout:
+ err2 = ext4_journal_stop(handle);
+ if (err2 && !err)
err = err2;
- if (!err && primary) {
- update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+
+ if (!err) {
+ if (test_opt(sb, DEBUG))
+ printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
+ "blocks\n", ext4_blocks_count(es));
+ update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
- update_backups(sb, primary->b_blocknr, primary->b_data,
- primary->b_size);
}
-exit_put:
- iput(inode);
return err;
-} /* ext4_group_add */
+}
/*
* Extend the filesystem to the new number of blocks specified. This entry
@@ -985,8 +1506,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_grpblk_t last;
ext4_grpblk_t add;
struct buffer_head *bh;
- handle_t *handle;
- int err, err2;
+ int err;
ext4_group_t group;
o_blocks_count = ext4_blocks_count(es);
@@ -1042,42 +1562,119 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
}
brelse(bh);
- /* We will update the superblock, one block bitmap, and
- * one group descriptor via ext4_free_blocks().
- */
- handle = ext4_journal_start_sb(sb, 3);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- ext4_warning(sb, "error %d on journal start", err);
- goto exit_put;
+ err = ext4_group_extend_no_check(sb, o_blocks_count, add);
+ return err;
+} /* ext4_group_extend */
+
+/*
+ * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
+ *
+ * @sb: super block of the fs to be resized
+ * @n_blocks_count: the number of blocks resides in the resized fs
+ */
+int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+{
+ struct ext4_new_flex_group_data *flex_gd = NULL;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ struct buffer_head *bh;
+ struct inode *resize_inode;
+ ext4_fsblk_t o_blocks_count;
+ ext4_group_t o_group;
+ ext4_group_t n_group;
+ ext4_grpblk_t offset;
+ unsigned long n_desc_blocks;
+ unsigned long o_desc_blocks;
+ unsigned long desc_blocks;
+ int err = 0, flexbg_size = 1;
+
+ o_blocks_count = ext4_blocks_count(es);
+
+ if (test_opt(sb, DEBUG))
+ printk(KERN_DEBUG "EXT4-fs: resizing filesystem from %llu "
+ "upto %llu blocks\n", o_blocks_count, n_blocks_count);
+
+ if (n_blocks_count < o_blocks_count) {
+ /* On-line shrinking not supported */
+ ext4_warning(sb, "can't shrink FS - resize aborted");
+ return -EINVAL;
}
- if ((err = ext4_journal_get_write_access(handle,
- EXT4_SB(sb)->s_sbh))) {
- ext4_warning(sb, "error %d on journal write access", err);
- ext4_journal_stop(handle);
- goto exit_put;
+ if (n_blocks_count == o_blocks_count)
+ /* Nothing need to do */
+ return 0;
+
+ ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
+ ext4_get_group_no_and_offset(sb, o_blocks_count, &o_group, &offset);
+
+ n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
+ EXT4_DESC_PER_BLOCK(sb);
+ o_desc_blocks = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ EXT4_DESC_PER_BLOCK(sb);
+ desc_blocks = n_desc_blocks - o_desc_blocks;
+
+ if (desc_blocks &&
+ (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) ||
+ le16_to_cpu(es->s_reserved_gdt_blocks) < desc_blocks)) {
+ ext4_warning(sb, "No reserved GDT blocks, can't resize");
+ return -EPERM;
}
- ext4_blocks_count_set(es, o_blocks_count + add);
- ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
- o_blocks_count + add);
- /* We add the blocks to the bitmap and set the group need init bit */
- err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
- ext4_handle_dirty_super(handle, sb);
- ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
- o_blocks_count + add);
- err2 = ext4_journal_stop(handle);
- if (!err && err2)
- err = err2;
- if (err)
- goto exit_put;
+ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ if (IS_ERR(resize_inode)) {
+ ext4_warning(sb, "Error opening resize inode");
+ return PTR_ERR(resize_inode);
+ }
+ /* See if the device is actually as big as what was requested */
+ bh = sb_bread(sb, n_blocks_count - 1);
+ if (!bh) {
+ ext4_warning(sb, "can't read last block, resize aborted");
+ return -ENOSPC;
+ }
+ brelse(bh);
+
+ if (offset != 0) {
+ /* extend the last group */
+ ext4_grpblk_t add;
+ add = EXT4_BLOCKS_PER_GROUP(sb) - offset;
+ err = ext4_group_extend_no_check(sb, o_blocks_count, add);
+ if (err)
+ goto out;
+ }
+
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
+ es->s_log_groups_per_flex)
+ flexbg_size = 1 << es->s_log_groups_per_flex;
+
+ o_blocks_count = ext4_blocks_count(es);
+ if (o_blocks_count == n_blocks_count)
+ goto out;
+
+ flex_gd = alloc_flex_gd(flexbg_size);
+ if (flex_gd == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Add flex groups. Note that a regular group is a
+ * flex group with 1 group.
+ */
+ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
+ flexbg_size)) {
+ ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
+ err = ext4_flex_group_add(sb, resize_inode, flex_gd);
+ if (unlikely(err))
+ break;
+ }
+
+out:
+ if (flex_gd)
+ free_flex_gd(flex_gd);
+
+ iput(resize_inode);
if (test_opt(sb, DEBUG))
- printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
- ext4_blocks_count(es));
- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
- sizeof(struct ext4_super_block));
-exit_put:
+ printk(KERN_DEBUG "EXT4-fs: resized filesystem from %llu "
+ "upto %llu blocks\n", o_blocks_count, n_blocks_count);
return err;
-} /* ext4_group_extend */
+}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3858767ec67..502c61fd739 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -930,7 +930,6 @@ static int ext4_drop_inode(struct inode *inode)
static void ext4_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}
@@ -1033,11 +1032,11 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
* - it's set to a non-default value OR
* - if the per-sb default is different from the global default
*/
-static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
int def_errors;
unsigned long def_mount_opts;
- struct super_block *sb = vfs->mnt_sb;
+ struct super_block *sb = root->d_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
@@ -1096,7 +1095,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
}
if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
seq_printf(seq, ",max_batch_time=%u",
- (unsigned) sbi->s_min_batch_time);
+ (unsigned) sbi->s_max_batch_time);
}
/*
@@ -1155,9 +1154,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",block_validity");
if (!test_opt(sb, INIT_INODE_TABLE))
- seq_puts(seq, ",noinit_inode_table");
+ seq_puts(seq, ",noinit_itable");
else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
- seq_printf(seq, ",init_inode_table=%u",
+ seq_printf(seq, ",init_itable=%u",
(unsigned) sbi->s_li_wait_mult);
ext4_show_quota_options(seq, sb);
@@ -1333,8 +1332,7 @@ enum {
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
- Opt_discard, Opt_nodiscard,
- Opt_init_inode_table, Opt_noinit_inode_table,
+ Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
};
static const match_table_t tokens = {
@@ -1407,9 +1405,9 @@ static const match_table_t tokens = {
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
- {Opt_init_inode_table, "init_itable=%u"},
- {Opt_init_inode_table, "init_itable"},
- {Opt_noinit_inode_table, "noinit_itable"},
+ {Opt_init_itable, "init_itable=%u"},
+ {Opt_init_itable, "init_itable"},
+ {Opt_noinit_itable, "noinit_itable"},
{Opt_err, NULL},
};
@@ -1892,7 +1890,7 @@ set_qf_format:
case Opt_dioread_lock:
clear_opt(sb, DIOREAD_NOLOCK);
break;
- case Opt_init_inode_table:
+ case Opt_init_itable:
set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) {
if (match_int(&args[0], &option))
@@ -1903,7 +1901,7 @@ set_qf_format:
return 0;
sbi->s_li_wait_mult = option;
break;
- case Opt_noinit_inode_table:
+ case Opt_noinit_itable:
clear_opt(sb, INIT_INODE_TABLE);
break;
default:
@@ -2007,17 +2005,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
struct ext4_group_desc *gdp = NULL;
ext4_group_t flex_group_count;
ext4_group_t flex_group;
- int groups_per_flex = 0;
+ unsigned int groups_per_flex = 0;
size_t size;
int i;
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
-
- if (groups_per_flex < 2) {
+ if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
sbi->s_log_groups_per_flex = 0;
return 1;
}
+ groups_per_flex = 1 << sbi->s_log_groups_per_flex;
/* We allocate both existing and potentially added groups */
flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
@@ -2884,8 +2881,7 @@ cont_thread:
}
mutex_unlock(&eli->li_list_mtx);
- if (freezing(current))
- refrigerator();
+ try_to_freeze();
cur = jiffies;
if ((time_after_eq(cur, next_wakeup)) ||
@@ -3509,7 +3505,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* of the filesystem.
*/
if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
- ext4_msg(sb, KERN_WARNING, "bad geometry: first data"
+ ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
"block %u is beyond end of filesystem (%llu)",
le32_to_cpu(es->s_first_data_block),
ext4_blocks_count(es));
@@ -3736,10 +3732,12 @@ no_journal:
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
+ iput(root);
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
+ iput(root);
ext4_msg(sb, KERN_ERR, "get root dentry failed");
ret = -ENOMEM;
goto failed_mount4;
@@ -3776,7 +3774,7 @@ no_journal:
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
"zone (%d)", err);
- goto failed_mount4;
+ goto failed_mount4a;
}
ext4_ext_init(sb);
@@ -3833,13 +3831,14 @@ cantfind_ext4:
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
- ext4_ext_release(sb);
-failed_mount5:
ext4_mb_release(sb);
+failed_mount5:
+ ext4_ext_release(sb);
ext4_release_system_zone(sb);
-failed_mount4:
- iput(root);
+failed_mount4a:
+ dput(sb->s_root);
sb->s_root = NULL;
+failed_mount4:
ext4_msg(sb, KERN_ERR, "mount failed");
destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
failed_mount_wq:
@@ -4783,7 +4782,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
return -EINVAL;
/* Quotafile not on the same filesystem? */
- if (path->mnt->mnt_sb != sb)
+ if (path->dentry->d_sb != sb)
return -EXDEV;
/* Journaling quota? */
if (EXT4_SB(sb)->s_qf_names[type]) {
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 34e4350dd4d..d2a200624af 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -3,7 +3,6 @@
* Handler for storing security labels as extended attributes.
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -48,8 +47,9 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *fs_info)
+static int
+ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
const struct xattr *xattr;
handle_t *handle = fs_info;
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 37e6ebca2cc..95f1f4ab59a 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -5,7 +5,6 @@
* Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/fs.h>
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 98c375352d0..0edb7611ffb 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -5,7 +5,6 @@
* Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include "ext4_jbd2.h"
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 1510a4d5199..66994f316e1 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -141,7 +141,7 @@ static inline struct msdos_inode_info *MSDOS_I(struct inode *inode)
static inline int fat_mode_can_hold_ro(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
- mode_t mask;
+ umode_t mask;
if (S_ISDIR(inode->i_mode)) {
if (!sbi->options.rodir)
@@ -156,8 +156,8 @@ static inline int fat_mode_can_hold_ro(struct inode *inode)
}
/* Convert attribute bits and a mask to the UNIX mode. */
-static inline mode_t fat_make_mode(struct msdos_sb_info *sbi,
- u8 attrs, mode_t mode)
+static inline umode_t fat_make_mode(struct msdos_sb_info *sbi,
+ u8 attrs, umode_t mode)
{
if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir))
mode &= ~S_IWUGO;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index c118acf16e4..a71fe3715ee 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -44,7 +44,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
goto out;
mutex_lock(&inode->i_mutex);
- err = mnt_want_write(file->f_path.mnt);
+ err = mnt_want_write_file(file);
if (err)
goto out_unlock_inode;
@@ -108,7 +108,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
fat_save_attrs(inode, attr);
mark_inode_dirty(inode);
out_drop_write:
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
out_unlock_inode:
mutex_unlock(&inode->i_mutex);
out:
@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(fat_getattr);
static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
struct inode *inode, umode_t *mode_ptr)
{
- mode_t mask, perm;
+ umode_t mask, perm;
/*
* Note, the basic check is already done by a caller of
@@ -351,7 +351,7 @@ static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
{
- mode_t allow_utime = sbi->options.allow_utime;
+ umode_t allow_utime = sbi->options.allow_utime;
if (current_fsuid() != inode->i_uid) {
if (in_group_p(inode->i_gid))
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 808cac7edcf..3ab841054d5 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -518,7 +518,6 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
static void fat_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
}
@@ -672,7 +671,7 @@ int fat_sync_inode(struct inode *inode)
EXPORT_SYMBOL_GPL(fat_sync_inode);
-static int fat_show_options(struct seq_file *m, struct vfsmount *mnt);
+static int fat_show_options(struct seq_file *m, struct dentry *root);
static const struct super_operations fat_sops = {
.alloc_inode = fat_alloc_inode,
.destroy_inode = fat_destroy_inode,
@@ -811,9 +810,9 @@ static const struct export_operations fat_export_ops = {
.get_parent = fat_get_parent,
};
-static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int fat_show_options(struct seq_file *m, struct dentry *root)
{
- struct msdos_sb_info *sbi = MSDOS_SB(mnt->mnt_sb);
+ struct msdos_sb_info *sbi = MSDOS_SB(root->d_sb);
struct fat_mount_options *opts = &sbi->options;
int isvfat = opts->isvfat;
@@ -898,7 +897,7 @@ enum {
Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
- Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
+ Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
};
@@ -928,17 +927,17 @@ static const match_table_t fat_tokens = {
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_discard, "discard"},
- {Opt_obsolate, "conv=binary"},
- {Opt_obsolate, "conv=text"},
- {Opt_obsolate, "conv=auto"},
- {Opt_obsolate, "conv=b"},
- {Opt_obsolate, "conv=t"},
- {Opt_obsolate, "conv=a"},
- {Opt_obsolate, "fat=%u"},
- {Opt_obsolate, "blocksize=%u"},
- {Opt_obsolate, "cvf_format=%20s"},
- {Opt_obsolate, "cvf_options=%100s"},
- {Opt_obsolate, "posix"},
+ {Opt_obsolete, "conv=binary"},
+ {Opt_obsolete, "conv=text"},
+ {Opt_obsolete, "conv=auto"},
+ {Opt_obsolete, "conv=b"},
+ {Opt_obsolete, "conv=t"},
+ {Opt_obsolete, "conv=a"},
+ {Opt_obsolete, "fat=%u"},
+ {Opt_obsolete, "blocksize=%u"},
+ {Opt_obsolete, "cvf_format=%20s"},
+ {Opt_obsolete, "cvf_options=%100s"},
+ {Opt_obsolete, "posix"},
{Opt_err, NULL},
};
static const match_table_t msdos_tokens = {
@@ -1170,7 +1169,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
break;
/* obsolete mount options */
- case Opt_obsolate:
+ case Opt_obsolete:
fat_msg(sb, KERN_INFO, "\"%s\" option is obsolete, "
"not supported now", p);
break;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 216b419f30e..c5938c9084b 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -264,7 +264,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
}
/***** Create a file */
-static int msdos_create(struct inode *dir, struct dentry *dentry, int mode,
+static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
@@ -346,7 +346,7 @@ out:
}
/***** Make a directory */
-static int msdos_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index a87a65663c2..a81eb2367d3 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN)
@@ -781,7 +782,7 @@ error:
return ERR_PTR(err);
}
-static int vfat_create(struct inode *dir, struct dentry *dentry, int mode,
+static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
@@ -870,7 +871,7 @@ out:
return err;
}
-static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 6b088641f5b..a48e4a139be 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -10,6 +10,7 @@
#include <linux/personality.h>
#include <asm/uaccess.h>
#include "internal.h"
+#include "mount.h"
static long do_sys_name_to_handle(struct path *path,
struct file_handle __user *ufh,
@@ -24,8 +25,8 @@ static long do_sys_name_to_handle(struct path *path,
* We need t make sure wether the file system
* support decoding of the file handle
*/
- if (!path->mnt->mnt_sb->s_export_op ||
- !path->mnt->mnt_sb->s_export_op->fh_to_dentry)
+ if (!path->dentry->d_sb->s_export_op ||
+ !path->dentry->d_sb->s_export_op->fh_to_dentry)
return -EOPNOTSUPP;
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
@@ -66,7 +67,8 @@ static long do_sys_name_to_handle(struct path *path,
} else
retval = 0;
/* copy the mount id */
- if (copy_to_user(mnt_id, &path->mnt->mnt_id, sizeof(*mnt_id)) ||
+ if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
+ sizeof(*mnt_id)) ||
copy_to_user(ufh, handle,
sizeof(struct file_handle) + handle_bytes))
retval = -EFAULT;
diff --git a/fs/file_table.c b/fs/file_table.c
index c322794f736..20002e39754 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -474,29 +474,6 @@ void file_sb_list_del(struct file *file)
#endif
-int fs_may_remount_ro(struct super_block *sb)
-{
- struct file *file;
- /* Check that no files are currently opened for writing. */
- lg_global_lock(files_lglock);
- do_file_list_for_each_entry(sb, file) {
- struct inode *inode = file->f_path.dentry->d_inode;
-
- /* File with pending delete? */
- if (inode->i_nlink == 0)
- goto too_bad;
-
- /* Writeable file? */
- if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
- goto too_bad;
- } while_file_list_for_each_entry;
- lg_global_unlock(files_lglock);
- return 1; /* Tis' cool bro. */
-too_bad:
- lg_global_unlock(files_lglock);
- return 0;
-}
-
/**
* mark_files_ro - mark all files read-only
* @sb: superblock in question
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 0845f84f2a5..96f24286667 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -74,7 +74,6 @@ int register_filesystem(struct file_system_type * fs)
BUG_ON(strchr(fs->name, '.'));
if (fs->next)
return -EBUSY;
- INIT_LIST_HEAD(&fs->fs_supers);
write_lock(&file_systems_lock);
p = find_filesystem(fs->name, strlen(fs->name));
if (*p)
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 7b2af5abe2f..cf9ef918a2a 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -187,10 +187,10 @@ vxfs_stiget(struct super_block *sbp, ino_t ino)
* vxfs_transmod returns a Linux mode_t for a given
* VxFS inode structure.
*/
-static __inline__ mode_t
+static __inline__ umode_t
vxfs_transmod(struct vxfs_inode_info *vip)
{
- mode_t ret = vip->vii_mode & ~VXFS_TYPE_MASK;
+ umode_t ret = vip->vii_mode & ~VXFS_TYPE_MASK;
if (VXFS_ISFIFO(vip))
ret |= S_IFIFO;
@@ -340,7 +340,6 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
static void vxfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(vxfs_inode_cachep, inode->i_private);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 73c3992b2bb..f855916657b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -20,16 +20,21 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#include "internal.h"
/*
+ * 4MB minimal write chunk size
+ */
+#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
+
+/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
struct wb_writeback_work {
@@ -47,17 +52,6 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
-const char *wb_reason_name[] = {
- [WB_REASON_BACKGROUND] = "background",
- [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages",
- [WB_REASON_SYNC] = "sync",
- [WB_REASON_PERIODIC] = "periodic",
- [WB_REASON_LAPTOP_TIMER] = "laptop_timer",
- [WB_REASON_FREE_MORE_MEM] = "free_more_memory",
- [WB_REASON_FS_FREE_SPACE] = "fs_free_space",
- [WB_REASON_FORKER_THREAD] = "forker_thread"
-};
-
/*
* Include the creation of the trace points after defining the
* wb_writeback_work structure so that the definition remains local to this
@@ -156,6 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -753,11 +748,17 @@ static long wb_writeback(struct bdi_writeback *wb,
if (work->for_background && !over_bground_thresh(wb->bdi))
break;
+ /*
+ * Kupdate and background works are special and we want to
+ * include all inodes that need writing. Livelock avoidance is
+ * handled by these works yielding to any other work so we are
+ * safe.
+ */
if (work->for_kupdate) {
oldest_jif = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
- work->older_than_this = &oldest_jif;
- }
+ } else if (work->for_background)
+ oldest_jif = jiffies;
trace_writeback_start(wb->bdi, work);
if (list_empty(&wb->b_io))
@@ -947,7 +948,7 @@ int bdi_writeback_thread(void *data)
trace_writeback_thread_start(bdi);
- while (!kthread_should_stop()) {
+ while (!kthread_freezable_should_stop(NULL)) {
/*
* Remove own delayed wake-up timer, since we are already awake
* and we'll take care of the preriodic write-back.
@@ -977,8 +978,6 @@ int bdi_writeback_thread(void *data)
*/
schedule();
}
-
- try_to_freeze();
}
/* Flush any work that raced with us exiting */
@@ -1223,6 +1222,7 @@ static void wait_sb_inodes(struct super_block *sb)
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1251,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1266,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1287,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5cb8614508c..2aaf3eaaf13 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
else if (outarg->offset + num > file_size)
num = file_size - outarg->offset;
- while (num) {
+ while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
struct page *page;
unsigned int this_num;
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
num -= this_num;
total_len += this_num;
+ index++;
}
req->misc.retrieve_in.offset = outarg->offset;
req->misc.retrieve_in.size = total_len;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 9f63e493a9b..5ddd6ea8f83 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -369,8 +369,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
* If the filesystem doesn't support this, then fall back to separate
* 'mknod' + 'open' requests.
*/
-static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
- struct nameidata *nd)
+static int fuse_create_open(struct inode *dir, struct dentry *entry,
+ umode_t mode, struct nameidata *nd)
{
int err;
struct inode *inode;
@@ -480,7 +480,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
*/
static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
struct inode *dir, struct dentry *entry,
- int mode)
+ umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
@@ -547,7 +547,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
return err;
}
-static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
+static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
dev_t rdev)
{
struct fuse_mknod_in inarg;
@@ -573,7 +573,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
return create_new_entry(fc, req, dir, entry, mode);
}
-static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
+static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
struct nameidata *nd)
{
if (nd) {
@@ -585,7 +585,7 @@ static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
return fuse_mknod(dir, entry, mode, 0);
}
-static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
+static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_conn *fc = get_fuse_conn(dir);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 594f07a81c2..0c84100acd4 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
struct inode *inode = file->f_path.dentry->d_inode;
mutex_lock(&inode->i_mutex);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+ if (origin != SEEK_CUR && origin != SEEK_SET) {
retval = fuse_update_attributes(inode, NULL, file, NULL);
if (retval)
goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
offset += i_size_read(inode);
break;
case SEEK_CUR:
+ if (offset == 0) {
+ retval = file->f_pos;
+ goto exit;
+ }
offset += file->f_pos;
break;
case SEEK_DATA:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index cf6db0a9321..1964da0257d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -80,7 +80,7 @@ struct fuse_inode {
/** The sticky bit in inode->i_mode may have been removed, so
preserve the original mode */
- mode_t orig_i_mode;
+ umode_t orig_i_mode;
/** Version of last attribute change */
u64 attr_version;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3e6d7275647..64cf8d07393 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -107,7 +107,6 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
static void fuse_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(fuse_inode_cachep, inode);
}
@@ -498,9 +497,10 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
return 1;
}
-static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int fuse_show_options(struct seq_file *m, struct dentry *root)
{
- struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
+ struct super_block *sb = root->d_sb;
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
seq_printf(m, ",user_id=%u", fc->user_id);
seq_printf(m, ",group_id=%u", fc->group_id);
@@ -510,9 +510,8 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
seq_puts(m, ",allow_other");
if (fc->max_read != ~0)
seq_printf(m, ",max_read=%u", fc->max_read);
- if (mnt->mnt_sb->s_bdev &&
- mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
- seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize);
+ if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
+ seq_printf(m, ",blksize=%lu", sb->s_blocksize);
return 0;
}
@@ -1138,28 +1137,28 @@ static int __init fuse_fs_init(void)
{
int err;
- err = register_filesystem(&fuse_fs_type);
- if (err)
- goto out;
-
- err = register_fuseblk();
- if (err)
- goto out_unreg;
-
fuse_inode_cachep = kmem_cache_create("fuse_inode",
sizeof(struct fuse_inode),
0, SLAB_HWCACHE_ALIGN,
fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
- goto out_unreg2;
+ goto out;
+
+ err = register_fuseblk();
+ if (err)
+ goto out2;
+
+ err = register_filesystem(&fuse_fs_type);
+ if (err)
+ goto out3;
return 0;
- out_unreg2:
+ out3:
unregister_fuseblk();
- out_unreg:
- unregister_filesystem(&fuse_fs_type);
+ out2:
+ kmem_cache_destroy(fuse_inode_cachep);
out:
return err;
}
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 65978d7885c..230eb0f005b 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -38,8 +38,9 @@ static const char *gfs2_acl_name(int type)
return NULL;
}
-static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
struct posix_acl *acl;
const char *name;
char *data;
@@ -67,11 +68,6 @@ static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
return acl;
}
-struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
-{
- return gfs2_acl_get(GFS2_I(inode), type);
-}
-
static int gfs2_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
@@ -125,7 +121,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
if (S_ISLNK(inode->i_mode))
return 0;
- acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
+ acl = gfs2_get_acl(&dip->i_inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl) {
@@ -166,7 +162,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
unsigned int len;
int error;
- acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
+ acl = gfs2_get_acl(&ip->i_inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl)
@@ -216,7 +212,7 @@ static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
if (type < 0)
return type;
- acl = gfs2_acl_get(GFS2_I(inode), type);
+ acl = gfs2_get_acl(inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 4858e1fed8b..501e5cba09b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
- struct gfs2_alloc *al = NULL;
+ struct gfs2_qadata *qa = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
struct page *page;
@@ -639,8 +639,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
- al = gfs2_alloc_get(ip);
- if (!al) {
+ qa = gfs2_qadata_get(ip);
+ if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
@@ -649,8 +649,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (error)
goto out_alloc_put;
- al->al_requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error)
goto out_qunlock;
}
@@ -711,7 +710,7 @@ out_trans_fail:
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
}
out_unlock:
if (&ip->i_inode == sdp->sd_rindex) {
@@ -848,7 +847,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
unsigned int to = from + len;
int ret;
@@ -880,10 +879,11 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
brelse(dibh);
failed:
gfs2_trans_end(sdp);
- if (al) {
+ if (ip->i_res)
gfs2_inplace_release(ip);
+ if (qa) {
gfs2_quota_unlock(ip);
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
}
if (inode == sdp->sd_rindex) {
gfs2_glock_dq(&m_ip->i_gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 41d494d7970..14a70401597 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -133,7 +133,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
and write it out to disk */
unsigned int n = 1;
- error = gfs2_alloc_block(ip, &block, &n);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
goto out_brelse;
if (isdir) {
@@ -503,7 +503,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
do {
int error;
n = blks - alloced;
- error = gfs2_alloc_block(ip, &bn, &n);
+ error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return error;
alloced += n;
@@ -743,9 +743,6 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
else if (ip->i_depth)
revokes = sdp->sd_inptrs;
- if (error)
- return error;
-
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
bstart = 0;
blen = 0;
@@ -1044,7 +1041,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
find_metapath(sdp, lblock, &mp, ip->i_height);
- if (!gfs2_alloc_get(ip))
+ if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1064,7 +1061,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
gfs2_quota_unhold(ip);
out:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
@@ -1166,21 +1163,20 @@ static int do_grow(struct inode *inode, u64 size)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
- struct gfs2_alloc *al = NULL;
+ struct gfs2_qadata *qa = NULL;
int error;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
- al = gfs2_alloc_get(ip);
- if (al == NULL)
+ qa = gfs2_qadata_get(ip);
+ if (qa == NULL)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto do_grow_alloc_put;
- al->al_requested = 1;
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, 1);
if (error)
goto do_grow_qunlock;
}
@@ -1189,7 +1185,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
goto do_grow_release;
- if (al) {
+ if (qa) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto do_end_trans;
@@ -1208,12 +1204,12 @@ static int do_grow(struct inode *inode, u64 size)
do_end_trans:
gfs2_trans_end(sdp);
do_grow_release:
- if (al) {
+ if (qa) {
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
do_grow_alloc_put:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
}
return error;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 8ccad2467cb..c35573abd37 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -76,6 +76,8 @@
#define IS_LEAF 1 /* Hashed (leaf) directory */
#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
+#define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
+
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
@@ -821,7 +823,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
- error = gfs2_alloc_block(ip, &bn, &n);
+ error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
@@ -1376,6 +1378,52 @@ out:
return error;
}
+/**
+ * gfs2_dir_readahead - Issue read-ahead requests for leaf blocks.
+ *
+ * Note: we can't calculate each index like dir_e_read can because we don't
+ * have the leaf, and therefore we don't have the depth, and therefore we
+ * don't have the length. So we have to just read enough ahead to make up
+ * for the loss of information.
+ */
+static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
+ struct file_ra_state *f_ra)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_glock *gl = ip->i_gl;
+ struct buffer_head *bh;
+ u64 blocknr = 0, last;
+ unsigned count;
+
+ /* First check if we've already read-ahead for the whole range. */
+ if (index + MAX_RA_BLOCKS < f_ra->start)
+ return;
+
+ f_ra->start = max((pgoff_t)index, f_ra->start);
+ for (count = 0; count < MAX_RA_BLOCKS; count++) {
+ if (f_ra->start >= hsize) /* if exceeded the hash table */
+ break;
+
+ last = blocknr;
+ blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]);
+ f_ra->start++;
+ if (blocknr == last)
+ continue;
+
+ bh = gfs2_getbuf(gl, blocknr, 1);
+ if (trylock_buffer(bh)) {
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ brelse(bh);
+ continue;
+ }
+ bh->b_end_io = end_buffer_read_sync;
+ submit_bh(READA | REQ_META, bh);
+ continue;
+ }
+ brelse(bh);
+ }
+}
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
@@ -1388,7 +1436,7 @@ out:
*/
static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
- filldir_t filldir)
+ filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
u32 hsize, len = 0;
@@ -1402,10 +1450,14 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
+ if (dip->i_hash_cache == NULL)
+ f_ra->start = 0;
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
return PTR_ERR(lp);
+ gfs2_dir_readahead(inode, hsize, index, f_ra);
+
while (index < hsize) {
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
@@ -1423,7 +1475,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
}
int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
- filldir_t filldir)
+ filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1437,7 +1489,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
- return dir_e_read(inode, offset, opaque, filldir);
+ return dir_e_read(inode, offset, opaque, filldir, f_ra);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
@@ -1798,7 +1850,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (!ht)
return -ENOMEM;
- if (!gfs2_alloc_get(dip)) {
+ if (!gfs2_qadata_get(dip)) {
error = -ENOMEM;
goto out;
}
@@ -1887,7 +1939,7 @@ out_rlist:
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
out_put:
- gfs2_alloc_put(dip);
+ gfs2_qadata_put(dip);
out:
kfree(ht);
return error;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index ff5772fbf02..98c960beab3 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -25,7 +25,7 @@ extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
- filldir_t filldir);
+ filldir_t filldir, struct file_ra_state *f_ra);
extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type);
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index fe9945f2ff7..70ba891654f 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -99,6 +99,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
struct gfs2_holder gh;
u64 offset = 0;
int error;
+ struct file_ra_state f_ra = { .start = 0 };
if (!dir)
return -EINVAL;
@@ -118,7 +119,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
if (error)
return error;
- error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
+ error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
gfs2_glock_dq_uninit(&gh);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index ce36a56dfea..c5fb3597f69 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -105,7 +105,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
return error;
}
- error = gfs2_dir_read(dir, &offset, dirent, filldir);
+ error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
gfs2_glock_dq_uninit(&d_gh);
@@ -223,7 +223,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
int error;
u32 new_flags, flags;
- error = mnt_want_write(filp->f_path.mnt);
+ error = mnt_want_write_file(filp);
if (error)
return error;
@@ -285,7 +285,7 @@ out_trans_end:
out:
gfs2_glock_dq_uninit(&gh);
out_drop_write:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return error;
}
@@ -365,7 +365,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
loff_t size;
int ret;
@@ -393,16 +393,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
ret = -ENOMEM;
- al = gfs2_alloc_get(ip);
- if (al == NULL)
+ qa = gfs2_qadata_get(ip);
+ if (qa == NULL)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
goto out_alloc_put;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
- ret = gfs2_inplace_reserve(ip);
+ ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (ret)
goto out_quota_unlock;
@@ -448,7 +447,7 @@ out_trans_fail:
out_quota_unlock:
gfs2_quota_unlock(ip);
out_alloc_put:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
@@ -609,7 +608,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
struct inode *inode = mapping->host;
int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
struct gfs2_inode *ip = GFS2_I(inode);
- int ret, ret1 = 0;
+ int ret = 0, ret1 = 0;
if (mapping->nrpages) {
ret1 = filemap_fdatawrite_range(mapping, start, end);
@@ -750,8 +749,10 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
int error;
+ const loff_t pos = offset;
+ const loff_t count = len;
loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
loff_t max_chunk_size = UINT_MAX & bsize_mask;
@@ -782,8 +783,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
while (len > 0) {
if (len < bytes)
bytes = len;
- al = gfs2_alloc_get(ip);
- if (!al) {
+ qa = gfs2_qadata_get(ip);
+ if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
@@ -795,8 +796,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
@@ -810,7 +810,6 @@ retry:
max_bytes = bytes;
calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
&max_bytes, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
RES_RG_HDR + gfs2_rg_blocks(ip);
@@ -832,8 +831,11 @@ retry:
offset += max_bytes;
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
}
+
+ if (error == 0)
+ error = generic_write_sync(file, pos, count);
goto out_unlock;
out_trans_fail:
@@ -841,7 +843,7 @@ out_trans_fail:
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&ip->i_gh);
out_uninit:
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7389dfdcc9e..e1d3bb59945 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -244,17 +244,16 @@ struct gfs2_glock {
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
-struct gfs2_alloc {
+struct gfs2_qadata { /* quota allocation data */
/* Quota stuff */
- struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
- struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
- unsigned int al_qd_num;
-
- u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
- u32 al_alloced; /* Filled in by gfs2_alloc_*() */
+ struct gfs2_quota_data *qa_qd[2*MAXQUOTAS];
+ struct gfs2_holder qa_qd_ghs[2*MAXQUOTAS];
+ unsigned int qa_qd_num;
+};
- /* Filled in by gfs2_inplace_reserve() */
- struct gfs2_holder al_rgd_gh;
+struct gfs2_blkreserv {
+ u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
+ struct gfs2_holder rs_rgd_gh; /* Filled in by gfs2_inplace_reserve() */
};
enum {
@@ -275,7 +274,8 @@ struct gfs2_inode {
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
- struct gfs2_alloc *i_alloc;
+ struct gfs2_qadata *i_qadata; /* quota allocation data */
+ struct gfs2_blkreserv *i_res; /* resource group block reservation */
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index cfd4959b218..017960cf1d7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -333,7 +333,7 @@ out:
*/
static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
- unsigned int mode)
+ umode_t mode)
{
int error;
@@ -364,7 +364,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
return 0;
}
-static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
+static void munge_mode_uid_gid(struct gfs2_inode *dip, umode_t *mode,
unsigned int *uid, unsigned int *gid)
{
if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
@@ -389,12 +389,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
int error;
+ int dblocks = 1;
- if (gfs2_alloc_get(dip) == NULL)
- return -ENOMEM;
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ fs_warn(sdp, "rindex update returns %d\n", error);
- dip->i_alloc->al_requested = RES_DINODE;
- error = gfs2_inplace_reserve(dip);
+ error = gfs2_inplace_reserve(dip, RES_DINODE);
if (error)
goto out;
@@ -402,14 +403,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
if (error)
goto out_ipreserv;
- error = gfs2_alloc_di(dip, no_addr, generation);
+ error = gfs2_alloc_blocks(dip, no_addr, &dblocks, 1, generation);
gfs2_trans_end(sdp);
out_ipreserv:
gfs2_inplace_release(dip);
out:
- gfs2_alloc_put(dip);
return error;
}
@@ -447,7 +447,7 @@ static void gfs2_init_dir(struct buffer_head *dibh,
*/
static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
- const struct gfs2_inum_host *inum, unsigned int mode,
+ const struct gfs2_inum_host *inum, umode_t mode,
unsigned int uid, unsigned int gid,
const u64 *generation, dev_t dev, const char *symname,
unsigned size, struct buffer_head **bhp)
@@ -516,7 +516,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
}
static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
- unsigned int mode, const struct gfs2_inum_host *inum,
+ umode_t mode, const struct gfs2_inum_host *inum,
const u64 *generation, dev_t dev, const char *symname,
unsigned int size, struct buffer_head **bhp)
{
@@ -525,7 +525,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
- if (!gfs2_alloc_get(dip))
+ if (!gfs2_qadata_get(dip))
return -ENOMEM;
error = gfs2_quota_lock(dip, uid, gid);
@@ -547,7 +547,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
out_quota:
gfs2_quota_unlock(dip);
out:
- gfs2_alloc_put(dip);
+ gfs2_qadata_put(dip);
return error;
}
@@ -555,13 +555,13 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
int alloc_required;
struct buffer_head *dibh;
int error;
- al = gfs2_alloc_get(dip);
- if (!al)
+ qa = gfs2_qadata_get(dip);
+ if (!qa)
return -ENOMEM;
error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -576,9 +576,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
- al->al_requested = sdp->sd_max_dirres;
-
- error = gfs2_inplace_reserve(dip);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto fail_quota_locks;
@@ -619,11 +617,11 @@ fail_quota_locks:
gfs2_quota_unlock(dip);
fail:
- gfs2_alloc_put(dip);
+ gfs2_qadata_put(dip);
return error;
}
-int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
@@ -659,7 +657,7 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
*/
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
- unsigned int mode, dev_t dev, const char *symname,
+ umode_t mode, dev_t dev, const char *symname,
unsigned int size, int excl)
{
const struct qstr *name = &dentry->d_name;
@@ -728,9 +726,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
brelse(bh);
gfs2_trans_end(sdp);
- gfs2_inplace_release(dip);
+ /* Check if we reserved space in the rgrp. Function link_dinode may
+ not, depending on whether alloc is required. */
+ if (dip->i_res)
+ gfs2_inplace_release(dip);
gfs2_quota_unlock(dip);
- gfs2_alloc_put(dip);
+ gfs2_qadata_put(dip);
mark_inode_dirty(inode);
gfs2_glock_dq_uninit_m(2, ghs);
d_instantiate(dentry, inode);
@@ -760,7 +761,7 @@ fail:
*/
static int gfs2_create(struct inode *dir, struct dentry *dentry,
- int mode, struct nameidata *nd)
+ umode_t mode, struct nameidata *nd)
{
int excl = 0;
if (nd && (nd->flags & LOOKUP_EXCL))
@@ -875,8 +876,9 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
- struct gfs2_alloc *al = gfs2_alloc_get(dip);
- if (!al) {
+ struct gfs2_qadata *qa = gfs2_qadata_get(dip);
+
+ if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
@@ -885,9 +887,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_alloc;
- al->al_requested = sdp->sd_max_dirres;
-
- error = gfs2_inplace_reserve(dip);
+ error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
@@ -930,7 +930,7 @@ out_gunlock_q:
gfs2_quota_unlock(dip);
out_alloc:
if (alloc_required)
- gfs2_alloc_put(dip);
+ gfs2_qadata_put(dip);
out_gunlock:
gfs2_glock_dq(ghs + 1);
out_child:
@@ -1037,12 +1037,14 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct buffer_head *bh;
struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd;
- int error;
+ int error = -EROFS;
gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
+ if (!rgd)
+ goto out_inodes;
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
@@ -1088,12 +1090,13 @@ out_end_trans:
out_gunlock:
gfs2_glock_dq(ghs + 2);
out_rgrp:
- gfs2_holder_uninit(ghs + 2);
gfs2_glock_dq(ghs + 1);
out_child:
- gfs2_holder_uninit(ghs + 1);
gfs2_glock_dq(ghs);
out_parent:
+ gfs2_holder_uninit(ghs + 2);
+out_inodes:
+ gfs2_holder_uninit(ghs + 1);
gfs2_holder_uninit(ghs);
return error;
}
@@ -1129,7 +1132,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
* Returns: errno
*/
-static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, 0, 0);
}
@@ -1143,7 +1146,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
*
*/
-static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0);
@@ -1350,8 +1353,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = 0;
if (alloc_required) {
- struct gfs2_alloc *al = gfs2_alloc_get(ndip);
- if (!al) {
+ struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
+
+ if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
@@ -1360,9 +1364,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_alloc;
- al->al_requested = sdp->sd_max_dirres;
-
- error = gfs2_inplace_reserve(ndip);
+ error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
@@ -1423,7 +1425,7 @@ out_gunlock_q:
gfs2_quota_unlock(ndip);
out_alloc:
if (alloc_required)
- gfs2_alloc_put(ndip);
+ gfs2_qadata_put(ndip);
out_gunlock:
while (x--) {
gfs2_glock_dq(ghs + x);
@@ -1584,7 +1586,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
ogid = ngid = NO_QUOTA_CHANGE;
- if (!gfs2_alloc_get(ip))
+ if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_lock(ip, nuid, ngid);
@@ -1616,7 +1618,7 @@ out_end_trans:
out_gunlock_q:
gfs2_quota_unlock(ip);
out_alloc:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 98c80d8c2a6..ce85b62bc0a 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -195,10 +195,10 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *fsname)
return -EINVAL;
}
- error = dlm_new_lockspace(fsname, strlen(fsname), &ls->ls_dlm,
+ error = dlm_new_lockspace(fsname, NULL,
DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
(ls->ls_nodir ? DLM_LSFL_NODIR : 0),
- GDLM_LVB_SIZE);
+ GDLM_LVB_SIZE, NULL, NULL, NULL, &ls->ls_dlm);
if (error)
printk(KERN_ERR "dlm_new_lockspace error %d", error);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 59864643436..756fae9eaf8 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -626,7 +626,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
else
- submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
+ submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
wake_up(&sdp->sd_log_waitq);
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
- if (freezing(current))
- refrigerator();
+
+ try_to_freeze();
do {
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 8a139ff1919..c150298e2d8 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -40,7 +40,8 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
- ip->i_alloc = NULL;
+ ip->i_qadata = NULL;
+ ip->i_res = NULL;
ip->i_hash_cache = NULL;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index be29858900f..181586e673f 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
- ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
+ ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
dblock++;
extlen--;
@@ -444,7 +444,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
- ll_rw_block(READA, 1, &bh);
+ ll_rw_block(READA | REQ_META, 1, &bh);
brelse(bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cb23c2be731..fe72e79e6ff 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
+ submit_bio(READ_SYNC | REQ_META, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 7e528dc14f8..a45b21b0391 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -494,11 +494,11 @@ static void qdsb_put(struct gfs2_quota_data *qd)
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
- struct gfs2_quota_data **qd = al->al_qd;
+ struct gfs2_qadata *qa = ip->i_qadata;
+ struct gfs2_quota_data **qd = qa->qa_qd;
int error;
- if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
+ if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
return -EIO;
@@ -508,20 +508,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
if (error)
goto out;
- al->al_qd_num++;
+ qa->qa_qd_num++;
qd++;
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
if (error)
goto out;
- al->al_qd_num++;
+ qa->qa_qd_num++;
qd++;
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
error = qdsb_get(sdp, QUOTA_USER, uid, qd);
if (error)
goto out;
- al->al_qd_num++;
+ qa->qa_qd_num++;
qd++;
}
@@ -529,7 +529,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
if (error)
goto out;
- al->al_qd_num++;
+ qa->qa_qd_num++;
qd++;
}
@@ -542,16 +542,16 @@ out:
void gfs2_quota_unhold(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
unsigned int x;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
- for (x = 0; x < al->al_qd_num; x++) {
- qdsb_put(al->al_qd[x]);
- al->al_qd[x] = NULL;
+ for (x = 0; x < qa->qa_qd_num; x++) {
+ qdsb_put(qa->qa_qd[x]);
+ qa->qa_qd[x] = NULL;
}
- al->al_qd_num = 0;
+ qa->qa_qd_num = 0;
}
static int sort_qd(const void *a, const void *b)
@@ -712,7 +712,7 @@ get_a_page:
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
- ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
+ ll_rw_block(READ | REQ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto unlock_out;
@@ -762,7 +762,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
struct gfs2_quota_data *qd;
loff_t offset;
unsigned int nalloc = 0, blocks;
- struct gfs2_alloc *al = NULL;
int error;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
@@ -792,26 +791,19 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
nalloc++;
}
- al = gfs2_alloc_get(ip);
- if (!al) {
- error = -ENOMEM;
- goto out_gunlock;
- }
/*
* 1 blk for unstuffing inode if stuffed. We add this extra
* block to the reservation unconditionally. If the inode
* doesn't need unstuffing, the block will be released to the
* rgrp since it won't be allocated during the transaction
*/
- al->al_requested = 1;
/* +3 in the end for unstuffing block, inode size update block
* and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
- if (nalloc)
- al->al_requested += nalloc * (data_blocks + ind_blocks);
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, 1 +
+ (nalloc * (data_blocks + ind_blocks)));
if (error)
goto out_alloc;
@@ -840,8 +832,6 @@ out_end_trans:
out_ipres:
gfs2_inplace_release(ip);
out_alloc:
- gfs2_alloc_put(ip);
-out_gunlock:
gfs2_glock_dq_uninit(&i_gh);
out:
while (qx--)
@@ -925,7 +915,7 @@ fail:
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
int error = 0;
@@ -938,15 +928,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
- sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
+ sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
sort_qd, NULL);
- for (x = 0; x < al->al_qd_num; x++) {
+ for (x = 0; x < qa->qa_qd_num; x++) {
int force = NO_FORCE;
- qd = al->al_qd[x];
+ qd = qa->qa_qd[x];
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force = FORCE;
- error = do_glock(qd, force, &al->al_qd_ghs[x]);
+ error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
if (error)
break;
}
@@ -955,7 +945,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
set_bit(GIF_QD_LOCKED, &ip->i_flags);
else {
while (x--)
- gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
+ gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
gfs2_quota_unhold(ip);
}
@@ -1000,7 +990,7 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
@@ -1008,14 +998,14 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
- for (x = 0; x < al->al_qd_num; x++) {
+ for (x = 0; x < qa->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
int sync;
- qd = al->al_qd[x];
+ qd = qa->qa_qd[x];
sync = need_sync(qd);
- gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
+ gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
if (sync && qd_trylock(qd))
qda[count++] = qd;
@@ -1048,7 +1038,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
s64 value;
unsigned int x;
@@ -1060,8 +1050,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
- for (x = 0; x < al->al_qd_num; x++) {
- qd = al->al_qd[x];
+ for (x = 0; x < qa->qa_qd_num; x++) {
+ qd = qa->qa_qd[x];
if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
@@ -1099,7 +1089,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid)
{
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
@@ -1108,8 +1098,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
- for (x = 0; x < al->al_qd_num; x++) {
- qd = al->al_qd[x];
+ for (x = 0; x < qa->qa_qd_num; x++) {
+ qd = qa->qa_qd[x];
if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
@@ -1427,8 +1417,8 @@ int gfs2_quotad(void *data)
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list(sdp);
- if (freezing(current))
- refrigerator();
+ try_to_freeze();
+
t = min(quotad_timeo, statfs_timeo);
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
@@ -1529,7 +1519,6 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
unsigned int data_blocks, ind_blocks;
unsigned int blocks = 0;
int alloc_required;
- struct gfs2_alloc *al;
loff_t offset;
int error;
@@ -1594,15 +1583,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
- al = gfs2_alloc_get(ip);
- if (al == NULL)
- goto out_i;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
- blocks = al->al_requested = 1 + data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip);
+ blocks = 1 + data_blocks + ind_blocks;
+ error = gfs2_inplace_reserve(ip, blocks);
if (error)
- goto out_alloc;
+ goto out_i;
blocks += gfs2_rg_blocks(ip);
}
@@ -1617,11 +1603,8 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
gfs2_trans_end(sdp);
out_release:
- if (alloc_required) {
+ if (alloc_required)
gfs2_inplace_release(ip);
-out_alloc:
- gfs2_alloc_put(ip);
- }
out_i:
gfs2_glock_dq_uninit(&i_gh);
out_q:
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 96bd6d759f2..22234627f68 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -65,8 +65,8 @@ static const char valid_change[16] = {
};
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
- unsigned char old_state, unsigned char new_state,
- unsigned int *n);
+ unsigned char old_state,
+ struct gfs2_bitmap **rbi);
/**
* gfs2_setbit - Set a bit in the bitmaps
@@ -860,22 +860,36 @@ fail:
}
/**
- * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
+ * gfs2_qadata_get - get the struct gfs2_qadata structure for an inode
* @ip: the incore GFS2 inode structure
*
- * Returns: the struct gfs2_alloc
+ * Returns: the struct gfs2_qadata
*/
-struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
+struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
int error;
- BUG_ON(ip->i_alloc != NULL);
- ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
+ BUG_ON(ip->i_qadata != NULL);
+ ip->i_qadata = kzalloc(sizeof(struct gfs2_qadata), GFP_NOFS);
error = gfs2_rindex_update(sdp);
if (error)
fs_warn(sdp, "rindex update returns %d\n", error);
- return ip->i_alloc;
+ return ip->i_qadata;
+}
+
+/**
+ * gfs2_blkrsv_get - get the struct gfs2_blkreserv structure for an inode
+ * @ip: the incore GFS2 inode structure
+ *
+ * Returns: the struct gfs2_qadata
+ */
+
+static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip)
+{
+ BUG_ON(ip->i_res != NULL);
+ ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS);
+ return ip->i_res;
}
/**
@@ -890,15 +904,20 @@ struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
{
- const struct gfs2_alloc *al = ip->i_alloc;
+ const struct gfs2_blkreserv *rs = ip->i_res;
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
return 0;
- if (rgd->rd_free_clone >= al->al_requested)
+ if (rgd->rd_free_clone >= rs->rs_requested)
return 1;
return 0;
}
+static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk)
+{
+ return (bi->bi_start * GFS2_NBBY) + blk;
+}
+
/**
* try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
* @rgd: The rgrp
@@ -912,20 +931,20 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
u32 goal = 0, block;
u64 no_addr;
struct gfs2_sbd *sdp = rgd->rd_sbd;
- unsigned int n;
struct gfs2_glock *gl;
struct gfs2_inode *ip;
int error;
int found = 0;
+ struct gfs2_bitmap *bi;
while (goal < rgd->rd_data) {
down_write(&sdp->sd_log_flush_lock);
- n = 1;
- block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
- GFS2_BLKST_UNLINKED, &n);
+ block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi);
up_write(&sdp->sd_log_flush_lock);
if (block == BFITNOENT)
break;
+
+ block = gfs2_bi2rgd_blk(bi, block);
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
@@ -977,8 +996,8 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd, *begin = NULL;
- struct gfs2_alloc *al = ip->i_alloc;
- int error, rg_locked;
+ struct gfs2_blkreserv *rs = ip->i_res;
+ int error, rg_locked, flags = LM_FLAG_TRY;
int loops = 0;
if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
@@ -997,7 +1016,7 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
error = 0;
} else {
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- LM_FLAG_TRY, &al->al_rgd_gh);
+ flags, &rs->rs_rgd_gh);
}
switch (error) {
case 0:
@@ -1008,12 +1027,14 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
if (rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
- gfs2_glock_dq_uninit(&al->al_rgd_gh);
+ gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
/* fall through */
case GLR_TRYFAILED:
rgd = gfs2_rgrpd_get_next(rgd);
- if (rgd == begin)
+ if (rgd == begin) {
+ flags = 0;
loops++;
+ }
break;
default:
return error;
@@ -1023,6 +1044,13 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
return -ENOSPC;
}
+static void gfs2_blkrsv_put(struct gfs2_inode *ip)
+{
+ BUG_ON(ip->i_res == NULL);
+ kfree(ip->i_res);
+ ip->i_res = NULL;
+}
+
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
@@ -1030,16 +1058,23 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
* Returns: errno
*/
-int gfs2_inplace_reserve(struct gfs2_inode *ip)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_blkreserv *rs;
int error = 0;
u64 last_unlinked = NO_BLOCK;
int tries = 0;
- if (gfs2_assert_warn(sdp, al->al_requested))
- return -EINVAL;
+ rs = gfs2_blkrsv_get(ip);
+ if (!rs)
+ return -ENOMEM;
+
+ rs->rs_requested = requested;
+ if (gfs2_assert_warn(sdp, requested)) {
+ error = -EINVAL;
+ goto out;
+ }
do {
error = get_local_rgrp(ip, &last_unlinked);
@@ -1056,6 +1091,9 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip)
gfs2_log_flush(sdp, NULL);
} while (tries++ < 3);
+out:
+ if (error)
+ gfs2_blkrsv_put(ip);
return error;
}
@@ -1068,10 +1106,11 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip)
void gfs2_inplace_release(struct gfs2_inode *ip)
{
- struct gfs2_alloc *al = ip->i_alloc;
+ struct gfs2_blkreserv *rs = ip->i_res;
- if (al->al_rgd_gh.gh_gl)
- gfs2_glock_dq_uninit(&al->al_rgd_gh);
+ gfs2_blkrsv_put(ip);
+ if (rs->rs_rgd_gh.gh_gl)
+ gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
}
/**
@@ -1108,39 +1147,35 @@ static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
}
/**
- * rgblk_search - find a block in @old_state, change allocation
- * state to @new_state
+ * rgblk_search - find a block in @state
* @rgd: the resource group descriptor
* @goal: the goal block within the RG (start here to search for avail block)
- * @old_state: GFS2_BLKST_XXX the before-allocation state to find
- * @new_state: GFS2_BLKST_XXX the after-allocation block state
- * @n: The extent length
+ * @state: GFS2_BLKST_XXX the before-allocation state to find
+ * @dinode: TRUE if the first block we allocate is for a dinode
+ * @rbi: address of the pointer to the bitmap containing the block found
*
- * Walk rgrp's bitmap to find bits that represent a block in @old_state.
- * Add the found bitmap buffer to the transaction.
- * Set the found bits to @new_state to change block's allocation state.
+ * Walk rgrp's bitmap to find bits that represent a block in @state.
*
* This function never fails, because we wouldn't call it unless we
* know (from reservation results, etc.) that a block is available.
*
- * Scope of @goal and returned block is just within rgrp, not the whole
- * filesystem.
+ * Scope of @goal is just within rgrp, not the whole filesystem.
+ * Scope of @returned block is just within bitmap, not the whole filesystem.
*
- * Returns: the block number allocated
+ * Returns: the block number found relative to the bitmap rbi
*/
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
- unsigned char old_state, unsigned char new_state,
- unsigned int *n)
+ unsigned char state,
+ struct gfs2_bitmap **rbi)
{
struct gfs2_bitmap *bi = NULL;
const u32 length = rgd->rd_length;
u32 blk = BFITNOENT;
unsigned int buf, x;
- const unsigned int elen = *n;
const u8 *buffer = NULL;
- *n = 0;
+ *rbi = NULL;
/* Find bitmap block that contains bits for goal block */
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
@@ -1163,21 +1198,21 @@ do_search:
bi = rgd->rd_bits + buf;
if (test_bit(GBF_FULL, &bi->bi_flags) &&
- (old_state == GFS2_BLKST_FREE))
+ (state == GFS2_BLKST_FREE))
goto skip;
/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
bitmaps, so we must search the originals for that. */
buffer = bi->bi_bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bi->bi_bh));
- if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+ if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
- blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
+ blk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
if (blk != BFITNOENT)
break;
- if ((goal == 0) && (old_state == GFS2_BLKST_FREE))
+ if ((goal == 0) && (state == GFS2_BLKST_FREE))
set_bit(GBF_FULL, &bi->bi_flags);
/* Try next bitmap block (wrap back to rgrp header if at end) */
@@ -1187,16 +1222,37 @@ skip:
goal = 0;
}
- if (blk == BFITNOENT)
- return blk;
+ if (blk != BFITNOENT)
+ *rbi = bi;
- *n = 1;
- if (old_state == new_state)
- goto out;
+ return blk;
+}
+
+/**
+ * gfs2_alloc_extent - allocate an extent from a given bitmap
+ * @rgd: the resource group descriptor
+ * @bi: the bitmap within the rgrp
+ * @blk: the block within the bitmap
+ * @dinode: TRUE if the first block we allocate is for a dinode
+ * @n: The extent length
+ *
+ * Add the found bitmap buffer to the transaction.
+ * Set the found bits to @new_state to change block's allocation state.
+ * Returns: starting block number of the extent (fs scope)
+ */
+static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
+ u32 blk, bool dinode, unsigned int *n)
+{
+ const unsigned int elen = *n;
+ u32 goal;
+ const u8 *buffer = NULL;
+ *n = 0;
+ buffer = bi->bi_bh->b_data + bi->bi_offset;
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
- bi, blk, new_state);
+ bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
+ (*n)++;
goal = blk;
while (*n < elen) {
goal++;
@@ -1206,11 +1262,12 @@ skip:
GFS2_BLKST_FREE)
break;
gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
- bi, goal, new_state);
+ bi, goal, GFS2_BLKST_USED);
(*n)++;
}
-out:
- return (bi->bi_start * GFS2_NBBY) + blk;
+ blk = gfs2_bi2rgd_blk(bi, blk);
+ rgd->rd_last_alloc = blk + *n - 1;
+ return rgd->rd_data0 + blk;
}
/**
@@ -1298,121 +1355,93 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
}
/**
- * gfs2_alloc_block - Allocate one or more blocks
+ * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
* @bn: Used to return the starting block number
- * @n: requested number of blocks/extent length (value/result)
+ * @ndata: requested number of blocks/extent length (value/result)
+ * @dinode: 1 if we're allocating a dinode block, else 0
+ * @generation: the generation number of the inode
*
* Returns: 0 or error
*/
-int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
+int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
+ bool dinode, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
- struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
- u32 goal, blk;
- u64 block;
+ unsigned int ndata;
+ u32 goal, blk; /* block, within the rgrp scope */
+ u64 block; /* block, within the file system scope */
int error;
+ struct gfs2_bitmap *bi;
/* Only happens if there is a bug in gfs2, return something distinctive
* to ensure that it is noticed.
*/
- if (al == NULL)
+ if (ip->i_res == NULL)
return -ECANCELED;
rgd = ip->i_rgd;
- if (rgrp_contains_block(rgd, ip->i_goal))
+ if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
else
goal = rgd->rd_last_alloc;
- blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
+ blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
/* Since all blocks are reserved in advance, this shouldn't happen */
if (blk == BFITNOENT)
goto rgrp_error;
- rgd->rd_last_alloc = blk;
- block = rgd->rd_data0 + blk;
- ip->i_goal = block + *n - 1;
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error == 0) {
- struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_goal);
- brelse(dibh);
+ block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+ ndata = *nblocks;
+ if (dinode)
+ ndata--;
+
+ if (!dinode) {
+ ip->i_goal = block + ndata - 1;
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error == 0) {
+ struct gfs2_dinode *di =
+ (struct gfs2_dinode *)dibh->b_data;
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ di->di_goal_meta = di->di_goal_data =
+ cpu_to_be64(ip->i_goal);
+ brelse(dibh);
+ }
}
- if (rgd->rd_free < *n)
+ if (rgd->rd_free < *nblocks)
goto rgrp_error;
- rgd->rd_free -= *n;
-
- gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
- gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
-
- al->al_alloced += *n;
-
- gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
- gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
-
- rgd->rd_free_clone -= *n;
- trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
- *bn = block;
- return 0;
-
-rgrp_error:
- gfs2_rgrp_error(rgd);
- return -EIO;
-}
-
-/**
- * gfs2_alloc_di - Allocate a dinode
- * @dip: the directory that the inode is going in
- * @bn: the block number which is allocated
- * @generation: the generation number of the inode
- *
- * Returns: 0 on success or error
- */
-
-int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_alloc *al = dip->i_alloc;
- struct gfs2_rgrpd *rgd = dip->i_rgd;
- u32 blk;
- u64 block;
- unsigned int n = 1;
-
- blk = rgblk_search(rgd, rgd->rd_last_alloc,
- GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
-
- /* Since all blocks are reserved in advance, this shouldn't happen */
- if (blk == BFITNOENT)
- goto rgrp_error;
-
- rgd->rd_last_alloc = blk;
- block = rgd->rd_data0 + blk;
- if (rgd->rd_free == 0)
- goto rgrp_error;
-
- rgd->rd_free--;
- rgd->rd_dinodes++;
- *generation = rgd->rd_igeneration++;
- if (*generation == 0)
+ rgd->rd_free -= *nblocks;
+ if (dinode) {
+ rgd->rd_dinodes++;
*generation = rgd->rd_igeneration++;
+ if (*generation == 0)
+ *generation = rgd->rd_igeneration++;
+ }
+
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- al->al_alloced++;
+ gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
+ if (dinode)
+ gfs2_trans_add_unrevoke(sdp, block, 1);
- gfs2_statfs_change(sdp, 0, -1, +1);
- gfs2_trans_add_unrevoke(sdp, block, 1);
+ /*
+ * This needs reviewing to see why we cannot do the quota change
+ * at this point in the dinode case.
+ */
+ if (ndata)
+ gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
+ ip->i_inode.i_gid);
- rgd->rd_free_clone--;
- trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
+ rgd->rd_free_clone -= *nblocks;
+ trace_gfs2_block_alloc(ip, block, *nblocks,
+ dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
*bn = block;
return 0;
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index cf5c5018019..ceec9106cdf 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -28,19 +28,19 @@ extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
-static inline void gfs2_alloc_put(struct gfs2_inode *ip)
+extern struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip);
+static inline void gfs2_qadata_put(struct gfs2_inode *ip)
{
- BUG_ON(ip->i_alloc == NULL);
- kfree(ip->i_alloc);
- ip->i_alloc = NULL;
+ BUG_ON(ip->i_qadata == NULL);
+ kfree(ip->i_qadata);
+ ip->i_qadata = NULL;
}
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
-extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
-extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
+extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+ bool dinode, u64 *generation);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 71e420989f7..4553ce515f6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1284,18 +1284,18 @@ static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
/**
* gfs2_show_options - Show mount options for /proc/mounts
* @s: seq_file structure
- * @mnt: vfsmount
+ * @root: root of this (sub)tree
*
* Returns: 0 on success or error code
*/
-static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int gfs2_show_options(struct seq_file *s, struct dentry *root)
{
- struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
+ struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
int val;
- if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
+ if (is_ancestor(root, sdp->sd_master_dir))
seq_printf(s, ",meta");
if (args->ar_lockproto[0])
seq_printf(s, ",lockproto=%s", args->ar_lockproto);
@@ -1399,8 +1399,9 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
struct gfs2_rgrpd *rgd;
+ struct gfs2_holder gh;
int error;
if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
@@ -1408,8 +1409,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return -EIO;
}
- al = gfs2_alloc_get(ip);
- if (!al)
+ qa = gfs2_qadata_get(ip);
+ if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1423,8 +1424,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
goto out_qs;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
- &al->al_rgd_gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
goto out_qs;
@@ -1440,11 +1440,11 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_rg_gunlock:
- gfs2_glock_dq_uninit(&al->al_rgd_gh);
+ gfs2_glock_dq_uninit(&gh);
out_qs:
gfs2_quota_unhold(ip);
out:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
@@ -1582,7 +1582,6 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
static void gfs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(gfs2_inode_cachep, inode);
}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index f8f101ef600..125d4572e1c 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -30,9 +30,9 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
{
- const struct gfs2_alloc *al = ip->i_alloc;
- if (al->al_requested < ip->i_rgd->rd_length)
- return al->al_requested + 1;
+ const struct gfs2_blkreserv *rs = ip->i_res;
+ if (rs->rs_requested < ip->i_rgd->rd_length)
+ return rs->rs_requested + 1;
return ip->i_rgd->rd_length;
}
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 71d7bf830c0..e9636591b5d 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -321,11 +321,11 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, int leave)
{
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
int error;
- al = gfs2_alloc_get(ip);
- if (!al)
+ qa = gfs2_qadata_get(ip);
+ if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -336,7 +336,7 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_quota_unhold(ip);
out_alloc:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
@@ -549,9 +549,10 @@ int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
goto out;
error = gfs2_ea_get_copy(ip, &el, data, len);
- if (error == 0)
- error = len;
- *ppdata = data;
+ if (error < 0)
+ kfree(data);
+ else
+ *ppdata = data;
out:
brelse(el.el_bh);
return error;
@@ -609,7 +610,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
u64 block;
int error;
- error = gfs2_alloc_block(ip, &block, &n);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
@@ -671,7 +672,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
- error = gfs2_alloc_block(ip, &block, &n);
+ error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
@@ -708,21 +709,19 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
struct buffer_head *dibh;
int error;
- al = gfs2_alloc_get(ip);
- if (!al)
+ qa = gfs2_qadata_get(ip);
+ if (!qa)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto out;
- al->al_requested = blks;
-
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, blks);
if (error)
goto out_gunlock_q;
@@ -751,7 +750,7 @@ out_ipres:
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
@@ -991,7 +990,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
} else {
u64 blk;
unsigned int n = 1;
- error = gfs2_alloc_block(ip, &blk, &n);
+ error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, blk, 1);
@@ -1435,9 +1434,9 @@ out:
static int ea_dealloc_block(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
struct buffer_head *dibh;
+ struct gfs2_holder gh;
int error;
rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
@@ -1446,8 +1445,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
return -EIO;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
- &al->al_rgd_gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
return error;
@@ -1471,7 +1469,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_gunlock:
- gfs2_glock_dq_uninit(&al->al_rgd_gh);
+ gfs2_glock_dq_uninit(&gh);
return error;
}
@@ -1484,11 +1482,11 @@ out_gunlock:
int gfs2_ea_dealloc(struct gfs2_inode *ip)
{
- struct gfs2_alloc *al;
+ struct gfs2_qadata *qa;
int error;
- al = gfs2_alloc_get(ip);
- if (!al)
+ qa = gfs2_qadata_get(ip);
+ if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
@@ -1510,7 +1508,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
out_quota:
gfs2_quota_unhold(ip);
out_alloc:
- gfs2_alloc_put(ip);
+ gfs2_qadata_put(ip);
return error;
}
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index bce4eef91a0..62fc14ea4b7 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -186,7 +186,7 @@ static int hfs_dir_release(struct inode *inode, struct file *file)
* a directory and return a corresponding inode, given the inode for
* the directory and the name (and its length) of the new file.
*/
-static int hfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -216,7 +216,7 @@ static int hfs_create(struct inode *dir, struct dentry *dentry, int mode,
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
-static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index ad97c2d5828..1bf967c6bfd 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -184,7 +184,7 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
-extern struct inode *hfs_new_inode(struct inode *, struct qstr *, int);
+extern struct inode *hfs_new_inode(struct inode *, struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
extern int hfs_inode_setattr(struct dentry *, struct iattr *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a1a9fdcd2a0..737dbeb6432 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -169,7 +169,7 @@ const struct address_space_operations hfs_aops = {
/*
* hfs_new_inode
*/
-struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
+struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1b55f704fb2..8137fb3e678 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -133,9 +133,9 @@ static int hfs_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
-static int hfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int hfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct hfs_sb_info *sbi = HFS_SB(mnt->mnt_sb);
+ struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
@@ -170,7 +170,6 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
static void hfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
}
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 4536cd3f15a..88e155f895c 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -424,7 +424,7 @@ out:
}
static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
@@ -453,13 +453,13 @@ out:
return res;
}
-static int hfsplus_create(struct inode *dir, struct dentry *dentry, int mode,
+static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return hfsplus_mknod(dir, dentry, mode, 0);
}
-static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d7674d051f5..21a5b7fc6db 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -402,7 +402,7 @@ void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
int hfsplus_cat_read_inode(struct inode *, struct hfs_find_data *);
int hfsplus_cat_write_inode(struct inode *);
-struct inode *hfsplus_new_inode(struct super_block *, int);
+struct inode *hfsplus_new_inode(struct super_block *, umode_t);
void hfsplus_delete_inode(struct inode *);
int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
@@ -419,7 +419,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
int hfsplus_parse_options_remount(char *input, int *force);
void hfsplus_fill_defaults(struct hfsplus_sb_info *);
-int hfsplus_show_options(struct seq_file *, struct vfsmount *);
+int hfsplus_show_options(struct seq_file *, struct dentry *);
/* super.c */
struct inode *hfsplus_iget(struct super_block *, unsigned long);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 40e1413be4c..6643b242bdd 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -378,7 +378,7 @@ static const struct file_operations hfsplus_file_operations = {
.unlocked_ioctl = hfsplus_ioctl,
};
-struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
+struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct inode *inode = new_inode(sb);
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index fbaa6690c8e..f66c7655b3f 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -43,7 +43,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
unsigned int flags;
int err = 0;
- err = mnt_want_write(file->f_path.mnt);
+ err = mnt_want_write_file(file);
if (err)
goto out;
@@ -94,7 +94,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
out_unlock_inode:
mutex_unlock(&inode->i_mutex);
out_drop_write:
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
out:
return err;
}
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index bb62a588214..06fa5618600 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -206,9 +206,9 @@ done:
return 1;
}
-int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
+int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
{
- struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d24a9b666a2..edf0a801446 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -558,7 +558,6 @@ static void hfsplus_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
}
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index bf15a43016b..3cbfa93cd78 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -39,7 +39,7 @@
struct hostfs_iattr {
unsigned int ia_valid;
- mode_t ia_mode;
+ unsigned short ia_mode;
uid_t ia_uid;
gid_t ia_gid;
loff_t ia_size;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2f72da5ae68..e130bd46d67 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -250,7 +250,6 @@ static void hostfs_evict_inode(struct inode *inode)
static void hostfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kfree(HOSTFS_I(inode));
}
@@ -259,9 +258,9 @@ static void hostfs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, hostfs_i_callback);
}
-static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
{
- const char *root_path = vfs->mnt_sb->s_fs_info;
+ const char *root_path = root->d_sb->s_fs_info;
size_t offset = strlen(root_ino) + 1;
if (strlen(root_path) > offset)
@@ -552,7 +551,7 @@ static int read_name(struct inode *ino, char *name)
return 0;
}
-int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
+int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -677,7 +676,7 @@ int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to)
return err;
}
-int hostfs_mkdir(struct inode *ino, struct dentry *dentry, int mode)
+int hostfs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode)
{
char *file;
int err;
@@ -701,7 +700,7 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
return err;
}
-int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
char *name;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index ea91fcb0ef9..30dd7b10b50 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include "hpfs_fn.h"
-static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -115,7 +115,7 @@ bail:
return err;
}
-static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -201,7 +201,7 @@ bail:
return err;
}
-static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 98580a3b500..3690467c944 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -181,7 +181,6 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
static void hpfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
}
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index f590b1160c6..d92f4ce8092 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -622,7 +622,6 @@ void hppfs_evict_inode(struct inode *ino)
static void hppfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kfree(HPPFS_I(inode));
}
@@ -726,7 +725,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
sb->s_fs_info = proc_mnt;
err = -ENOMEM;
- root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root));
+ root_inode = get_inode(sb, dget(proc_mnt->mnt_root));
if (!root_inode)
goto out_mntput;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0be5a78598d..e425ad9d049 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -447,8 +447,8 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
-static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
- gid_t gid, int mode, dev_t dev)
+static struct inode *hugetlbfs_get_root(struct super_block *sb,
+ struct hugetlbfs_config *config)
{
struct inode *inode;
@@ -456,9 +456,31 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
if (inode) {
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = uid;
- inode->i_gid = gid;
+ inode->i_mode = S_IFDIR | config->mode;
+ inode->i_uid = config->uid;
+ inode->i_gid = config->gid;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ info = HUGETLBFS_I(inode);
+ mpol_shared_policy_init(&info->policy, NULL);
+ inode->i_op = &hugetlbfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ }
+ return inode;
+}
+
+static struct inode *hugetlbfs_get_inode(struct super_block *sb,
+ struct inode *dir,
+ umode_t mode, dev_t dev)
+{
+ struct inode *inode;
+
+ inode = new_inode(sb);
+ if (inode) {
+ struct hugetlbfs_inode_info *info;
+ inode->i_ino = get_next_ino();
+ inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -500,20 +522,12 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
* File creation. Allocate an inode, and we're done..
*/
static int hugetlbfs_mknod(struct inode *dir,
- struct dentry *dentry, int mode, dev_t dev)
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOSPC;
- gid_t gid;
-
- if (dir->i_mode & S_ISGID) {
- gid = dir->i_gid;
- if (S_ISDIR(mode))
- mode |= S_ISGID;
- } else {
- gid = current_fsgid();
- }
- inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, mode, dev);
+
+ inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (inode) {
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
@@ -523,7 +537,7 @@ static int hugetlbfs_mknod(struct inode *dir,
return error;
}
-static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
if (!retval)
@@ -531,7 +545,7 @@ static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return retval;
}
-static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
@@ -541,15 +555,8 @@ static int hugetlbfs_symlink(struct inode *dir,
{
struct inode *inode;
int error = -ENOSPC;
- gid_t gid;
-
- if (dir->i_mode & S_ISGID)
- gid = dir->i_gid;
- else
- gid = current_fsgid();
- inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(),
- gid, S_IFLNK|S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
@@ -666,7 +673,6 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
static void hugetlbfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
}
@@ -858,8 +864,7 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops;
sb->s_time_gran = 1;
- inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
- S_IFDIR | config.mode, 0);
+ inode = hugetlbfs_get_root(sb, &config);
if (!inode)
goto out_free;
@@ -957,8 +962,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
path.mnt = mntget(hugetlbfs_vfsmount);
error = -ENOSPC;
- inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(),
- current_fsgid(), S_IFREG | S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out_dentry;
diff --git a/fs/inode.c b/fs/inode.c
index ee4e66b998f..4fa4f0916af 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -26,6 +26,7 @@
#include <linux/ima.h>
#include <linux/cred.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */
+#include <linux/ratelimit.h>
#include "internal.h"
/*
@@ -191,6 +192,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
}
inode->i_private = NULL;
inode->i_mapping = mapping;
+ INIT_LIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
#ifdef CONFIG_FS_POSIX_ACL
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
@@ -241,6 +243,11 @@ void __destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
fsnotify_inode_delete(inode);
+ if (!inode->i_nlink) {
+ WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
+ atomic_long_dec(&inode->i_sb->s_remove_count);
+ }
+
#ifdef CONFIG_FS_POSIX_ACL
if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
posix_acl_release(inode->i_acl);
@@ -254,7 +261,6 @@ EXPORT_SYMBOL(__destroy_inode);
static void i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(inode_cachep, inode);
}
@@ -268,6 +274,85 @@ static void destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, i_callback);
}
+/**
+ * drop_nlink - directly drop an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink. In cases
+ * where we are attempting to track writes to the
+ * filesystem, a decrement to zero means an imminent
+ * write when the file is truncated and actually unlinked
+ * on the filesystem.
+ */
+void drop_nlink(struct inode *inode)
+{
+ WARN_ON(inode->i_nlink == 0);
+ inode->__i_nlink--;
+ if (!inode->i_nlink)
+ atomic_long_inc(&inode->i_sb->s_remove_count);
+}
+EXPORT_SYMBOL(drop_nlink);
+
+/**
+ * clear_nlink - directly zero an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink. See
+ * drop_nlink() for why we care about i_nlink hitting zero.
+ */
+void clear_nlink(struct inode *inode)
+{
+ if (inode->i_nlink) {
+ inode->__i_nlink = 0;
+ atomic_long_inc(&inode->i_sb->s_remove_count);
+ }
+}
+EXPORT_SYMBOL(clear_nlink);
+
+/**
+ * set_nlink - directly set an inode's link count
+ * @inode: inode
+ * @nlink: new nlink (should be non-zero)
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink.
+ */
+void set_nlink(struct inode *inode, unsigned int nlink)
+{
+ if (!nlink) {
+ printk_ratelimited(KERN_INFO
+ "set_nlink() clearing i_nlink on %s inode %li\n",
+ inode->i_sb->s_type->name, inode->i_ino);
+ clear_nlink(inode);
+ } else {
+ /* Yes, some filesystems do change nlink from zero to one */
+ if (inode->i_nlink == 0)
+ atomic_long_dec(&inode->i_sb->s_remove_count);
+
+ inode->__i_nlink = nlink;
+ }
+}
+EXPORT_SYMBOL(set_nlink);
+
+/**
+ * inc_nlink - directly increment an inode's link count
+ * @inode: inode
+ *
+ * This is a low-level filesystem helper to replace any
+ * direct filesystem manipulation of i_nlink. Currently,
+ * it is only here for parity with dec_nlink().
+ */
+void inc_nlink(struct inode *inode)
+{
+ if (WARN_ON(inode->i_nlink == 0))
+ atomic_long_dec(&inode->i_sb->s_remove_count);
+
+ inode->__i_nlink++;
+}
+EXPORT_SYMBOL(inc_nlink);
+
void address_space_init_once(struct address_space *mapping)
{
memset(mapping, 0, sizeof(*mapping));
@@ -290,7 +375,6 @@ void inode_init_once(struct inode *inode)
{
memset(inode, 0, sizeof(*inode));
INIT_HLIST_NODE(&inode->i_hash);
- INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
@@ -692,6 +776,8 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
else
__count_vm_events(PGINODESTEAL, reap);
spin_unlock(&sb->s_inode_lru_lock);
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += reap;
dispose_list(&freeable);
}
@@ -1508,7 +1594,7 @@ void file_update_time(struct file *file)
if (sync_it & S_MTIME)
inode->i_mtime = now;
mark_inode_dirty_sync(inode);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
}
EXPORT_SYMBOL(file_update_time);
@@ -1647,7 +1733,7 @@ EXPORT_SYMBOL(init_special_inode);
* @mode: mode of the new inode
*/
void inode_init_owner(struct inode *inode, const struct inode *dir,
- mode_t mode)
+ umode_t mode)
{
inode->i_uid = current_fsuid();
if (dir && dir->i_mode & S_ISGID) {
diff --git a/fs/internal.h b/fs/internal.h
index fe327c20af8..9962c59ba28 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -15,19 +15,14 @@ struct super_block;
struct file_system_type;
struct linux_binprm;
struct path;
+struct mount;
/*
* block_dev.c
*/
#ifdef CONFIG_BLOCK
-extern struct super_block *blockdev_superblock;
extern void __init bdev_cache_init(void);
-static inline int sb_is_blkdev_sb(struct super_block *sb)
-{
- return sb == blockdev_superblock;
-}
-
extern int __sync_blockdev(struct block_device *bdev, int wait);
#else
@@ -35,11 +30,6 @@ static inline void bdev_cache_init(void)
{
}
-static inline int sb_is_blkdev_sb(struct super_block *sb)
-{
- return 0;
-}
-
static inline int __sync_blockdev(struct block_device *bdev, int wait)
{
return 0;
@@ -52,28 +42,17 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
extern void __init chrdev_init(void);
/*
- * exec.c
- */
-extern int check_unsafe_exec(struct linux_binprm *);
-
-/*
* namespace.c
*/
extern int copy_mount_options(const void __user *, unsigned long *);
extern int copy_mount_string(const void __user *, char **);
-extern unsigned int mnt_get_count(struct vfsmount *mnt);
-extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
extern struct vfsmount *lookup_mnt(struct path *);
-extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
- struct vfsmount *);
-extern void release_mounts(struct list_head *);
-extern void umount_tree(struct vfsmount *, int, struct list_head *);
-extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
extern int finish_automount(struct vfsmount *, struct path *);
extern void mnt_make_longterm(struct vfsmount *);
extern void mnt_make_shortterm(struct vfsmount *);
+extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
@@ -98,10 +77,9 @@ extern struct file *get_empty_filp(void);
*/
extern int do_remount_sb(struct super_block *, int, void *, int);
extern bool grab_super_passive(struct super_block *sb);
-extern void __put_super(struct super_block *sb);
-extern void put_super(struct super_block *sb);
extern struct dentry *mount_fs(struct file_system_type *,
int, const char *, void *);
+extern struct super_block *user_get_super(dev_t);
/*
* open.c
@@ -111,7 +89,7 @@ extern struct file *nameidata_to_filp(struct nameidata *);
extern void release_open_intent(struct nameidata *);
struct open_flags {
int open_flag;
- int mode;
+ umode_t mode;
int acc_mode;
int intent;
};
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d9b9fcb2db..066836e8184 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -42,7 +42,7 @@ static long vfs_ioctl(struct file *filp, unsigned int cmd,
error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
if (error == -ENOIOCTLCMD)
- error = -EINVAL;
+ error = -ENOTTY;
out:
return error;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index f950059525f..bd62c76fb5d 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -85,7 +85,6 @@ static struct inode *isofs_alloc_inode(struct super_block *sb)
static void isofs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
}
@@ -170,8 +169,8 @@ struct iso9660_options{
unsigned char map;
unsigned char check;
unsigned int blocksize;
- mode_t fmode;
- mode_t dmode;
+ umode_t fmode;
+ umode_t dmode;
gid_t gid;
uid_t uid;
char *iocharset;
@@ -949,8 +948,11 @@ root_found:
/* get the root dentry */
s->s_root = d_alloc_root(inode);
- if (!(s->s_root))
- goto out_no_root;
+ if (!(s->s_root)) {
+ iput(inode);
+ error = -ENOMEM;
+ goto out_no_inode;
+ }
kfree(opt.iocharset);
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 7d33de84f52..0e73f63d927 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -50,14 +50,14 @@ struct isofs_sb_info {
unsigned int s_uid_set:1;
unsigned int s_gid_set:1;
- mode_t s_fmode;
- mode_t s_dmode;
+ umode_t s_fmode;
+ umode_t s_dmode;
gid_t s_gid;
uid_t s_uid;
struct nls_table *s_nls_iocharset; /* Native language support table */
};
-#define ISOFS_INVALID_MODE ((mode_t) -1)
+#define ISOFS_INVALID_MODE ((umode_t) -1)
static inline struct isofs_sb_info *ISOFS_SB(struct super_block *sb)
{
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index f94fc48ff3a..5d1a00a5041 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -537,7 +537,7 @@ int cleanup_journal_tail(journal_t *journal)
* them.
*
* Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
*/
static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 8799207df05..f2b9a571f4c 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -392,6 +392,12 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug (3, "JBD: commit phase 1\n");
/*
+ * Clear revoked flag to reflect there is no revoked buffers
+ * in the next transaction which is going to be started.
+ */
+ journal_clear_buffer_revoked_flags(journal);
+
+ /*
* Switch to a new revoke table.
*/
journal_switch_revoke_table(journal);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index fea8dd661d2..59c09f9541b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -166,7 +166,7 @@ loop:
*/
jbd_debug(1, "Now suspending kjournald\n");
spin_unlock(&journal->j_state_lock);
- refrigerator();
+ try_to_freeze();
spin_lock(&journal->j_state_lock);
} else {
/*
@@ -721,7 +721,6 @@ static journal_t * journal_init_common (void)
init_waitqueue_head(&journal->j_wait_checkpoint);
init_waitqueue_head(&journal->j_wait_commit);
init_waitqueue_head(&journal->j_wait_updates);
- mutex_init(&journal->j_barrier);
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
spin_lock_init(&journal->j_list_lock);
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 305a9076315..25c713e7071 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -47,6 +47,10 @@
* overwriting the new data. We don't even need to clear the revoke
* bit here.
*
+ * We cache revoke status of a buffer in the current transaction in b_states
+ * bits. As the name says, revokevalid flag indicates that the cached revoke
+ * status of a buffer is valid and we can rely on the cached status.
+ *
* Revoke information on buffers is a tri-state value:
*
* RevokeValid clear: no cached revoke status, need to look it up
@@ -479,6 +483,36 @@ int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
return did_revoke;
}
+/*
+ * journal_clear_revoked_flags clears revoked flag of buffers in
+ * revoke table to reflect there is no revoked buffer in the next
+ * transaction which is going to be started.
+ */
+void journal_clear_buffer_revoked_flags(journal_t *journal)
+{
+ struct jbd_revoke_table_s *revoke = journal->j_revoke;
+ int i = 0;
+
+ for (i = 0; i < revoke->hash_size; i++) {
+ struct list_head *hash_list;
+ struct list_head *list_entry;
+ hash_list = &revoke->hash_table[i];
+
+ list_for_each(list_entry, hash_list) {
+ struct jbd_revoke_record_s *record;
+ struct buffer_head *bh;
+ record = (struct jbd_revoke_record_s *)list_entry;
+ bh = __find_get_block(journal->j_fs_dev,
+ record->blocknr,
+ journal->j_blocksize);
+ if (bh) {
+ clear_buffer_revoked(bh);
+ __brelse(bh);
+ }
+ }
+ }
+}
+
/* journal_switch_revoke table select j_revoke for next transaction
* we do not want to suspend any processing until all revokes are
* written -bzzz
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 7e59c6e66f9..7fce94b04bc 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -426,17 +426,34 @@ int journal_restart(handle_t *handle, int nblocks)
* void journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
*
- * This locks out any further updates from being started, and blocks
- * until all existing updates have completed, returning only once the
- * journal is in a quiescent state with no updates running.
- *
- * The journal lock should not be held on entry.
+ * This locks out any further updates from being started, and blocks until all
+ * existing updates have completed, returning only once the journal is in a
+ * quiescent state with no updates running.
+ *
+ * We do not use simple mutex for synchronization as there are syscalls which
+ * want to return with filesystem locked and that trips up lockdep. Also
+ * hibernate needs to lock filesystem but locked mutex then blocks hibernation.
+ * Since locking filesystem is rare operation, we use simple counter and
+ * waitqueue for locking.
*/
void journal_lock_updates(journal_t *journal)
{
DEFINE_WAIT(wait);
+wait:
+ /* Wait for previous locked operation to finish */
+ wait_event(journal->j_wait_transaction_locked,
+ journal->j_barrier_count == 0);
+
spin_lock(&journal->j_state_lock);
+ /*
+ * Check reliably under the lock whether we are the ones winning the race
+ * and locking the journal
+ */
+ if (journal->j_barrier_count > 0) {
+ spin_unlock(&journal->j_state_lock);
+ goto wait;
+ }
++journal->j_barrier_count;
/* Wait until there are no running updates */
@@ -460,14 +477,6 @@ void journal_lock_updates(journal_t *journal)
spin_lock(&journal->j_state_lock);
}
spin_unlock(&journal->j_state_lock);
-
- /*
- * We have now established a barrier against other normal updates, but
- * we also need to barrier against other journal_lock_updates() calls
- * to make sure that we serialise special journal-locked operations
- * too.
- */
- mutex_lock(&journal->j_barrier);
}
/**
@@ -475,14 +484,11 @@ void journal_lock_updates(journal_t *journal)
* @journal: Journal to release the barrier on.
*
* Release a transaction barrier obtained with journal_lock_updates().
- *
- * Should be called without the journal lock held.
*/
void journal_unlock_updates (journal_t *journal)
{
J_ASSERT(journal->j_barrier_count != 0);
- mutex_unlock(&journal->j_barrier);
spin_lock(&journal->j_state_lock);
--journal->j_barrier_count;
spin_unlock(&journal->j_state_lock);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 16a698bd906..d49d202903f 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -565,7 +565,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
*
* Called with the journal locked.
* Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
*/
static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 68d704db787..5069b847515 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -430,6 +430,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd_debug(3, "JBD2: commit phase 1\n");
/*
+ * Clear revoked flag to reflect there is no revoked buffers
+ * in the next transaction which is going to be started.
+ */
+ jbd2_clear_buffer_revoked_flags(journal);
+
+ /*
* Switch to a new revoke table.
*/
jbd2_journal_switch_revoke_table(journal);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0fa0123151d..c0a5f9f1b12 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -173,7 +173,7 @@ loop:
*/
jbd_debug(1, "Now suspending kjournald2\n");
write_unlock(&journal->j_state_lock);
- refrigerator();
+ try_to_freeze();
write_lock(&journal->j_state_lock);
} else {
/*
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 69fd9358811..30b2867d6cc 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -47,6 +47,10 @@
* overwriting the new data. We don't even need to clear the revoke
* bit here.
*
+ * We cache revoke status of a buffer in the current transaction in b_states
+ * bits. As the name says, revokevalid flag indicates that the cached revoke
+ * status of a buffer is valid and we can rely on the cached status.
+ *
* Revoke information on buffers is a tri-state value:
*
* RevokeValid clear: no cached revoke status, need to look it up
@@ -478,6 +482,36 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
return did_revoke;
}
+/*
+ * journal_clear_revoked_flag clears revoked flag of buffers in
+ * revoke table to reflect there is no revoked buffers in the next
+ * transaction which is going to be started.
+ */
+void jbd2_clear_buffer_revoked_flags(journal_t *journal)
+{
+ struct jbd2_revoke_table_s *revoke = journal->j_revoke;
+ int i = 0;
+
+ for (i = 0; i < revoke->hash_size; i++) {
+ struct list_head *hash_list;
+ struct list_head *list_entry;
+ hash_list = &revoke->hash_table[i];
+
+ list_for_each(list_entry, hash_list) {
+ struct jbd2_revoke_record_s *record;
+ struct buffer_head *bh;
+ record = (struct jbd2_revoke_record_s *)list_entry;
+ bh = __find_get_block(journal->j_fs_dev,
+ record->blocknr,
+ journal->j_blocksize);
+ if (bh) {
+ clear_buffer_revoked(bh);
+ __brelse(bh);
+ }
+ }
+ }
+}
+
/* journal_switch_revoke table select j_revoke for next transaction
* we do not want to suspend any processing until all revokes are
* written -bzzz
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a0e41a4c080..35ae096bed5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -517,12 +517,13 @@ void jbd2_journal_lock_updates(journal_t *journal)
break;
spin_lock(&transaction->t_handle_lock);
+ prepare_to_wait(&journal->j_wait_updates, &wait,
+ TASK_UNINTERRUPTIBLE);
if (!atomic_read(&transaction->t_updates)) {
spin_unlock(&transaction->t_handle_lock);
+ finish_wait(&journal->j_wait_updates, &wait);
break;
}
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
spin_unlock(&transaction->t_handle_lock);
write_unlock(&journal->j_state_lock);
schedule();
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index be6169bd8ac..973ac5822bd 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -22,16 +22,16 @@
static int jffs2_readdir (struct file *, void *, filldir_t);
-static int jffs2_create (struct inode *,struct dentry *,int,
+static int jffs2_create (struct inode *,struct dentry *,umode_t,
struct nameidata *);
static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
struct nameidata *);
static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
static int jffs2_symlink (struct inode *,struct dentry *,const char *);
-static int jffs2_mkdir (struct inode *,struct dentry *,int);
+static int jffs2_mkdir (struct inode *,struct dentry *,umode_t);
static int jffs2_rmdir (struct inode *,struct dentry *);
-static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t);
+static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
static int jffs2_rename (struct inode *, struct dentry *,
struct inode *, struct dentry *);
@@ -169,8 +169,8 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
/***********************************************************************/
-static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
- struct nameidata *nd)
+static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
+ umode_t mode, struct nameidata *nd)
{
struct jffs2_raw_inode *ri;
struct jffs2_inode_info *f, *dir_f;
@@ -450,7 +450,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
}
-static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
+static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -618,7 +618,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
return ret;
}
-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, dev_t rdev)
+static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index e513f1913c1..a01cdad6aad 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -74,7 +74,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
- ret = c->mtd->erase(c->mtd, instr);
+ ret = mtd_erase(c->mtd, instr);
if (!ret)
return;
@@ -336,12 +336,11 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
uint32_t ofs;
size_t retlen;
int ret = -EIO;
+ unsigned long *wordebuf;
- if (c->mtd->point) {
- unsigned long *wordebuf;
-
- ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size,
- &retlen, &ebuf, NULL);
+ ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
+ &ebuf, NULL);
+ if (ret != -EOPNOTSUPP) {
if (ret) {
D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
goto do_flash_read;
@@ -349,7 +348,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
if (retlen < c->sector_size) {
/* Don't muck about if it won't let us point to the whole erase sector */
D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
- c->mtd->unpoint(c->mtd, jeb->offset, retlen);
+ mtd_unpoint(c->mtd, jeb->offset, retlen);
goto do_flash_read;
}
wordebuf = ebuf-sizeof(*wordebuf);
@@ -358,7 +357,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
if (*++wordebuf != ~0)
break;
} while(--retlen);
- c->mtd->unpoint(c->mtd, jeb->offset, c->sector_size);
+ mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
if (retlen) {
printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
*wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf));
@@ -381,7 +380,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
*bad_offset = ofs;
- ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
+ ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
if (ret) {
printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
ret = -EIO;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 4b8afe39a87..2e0123867cb 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -466,7 +466,6 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
if (insert_inode_locked(inode) < 0) {
make_bad_inode(inode);
- unlock_new_inode(inode);
iput(inode);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index ee57bac1ba6..3093ac4fb24 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -62,17 +62,15 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
#ifndef __ECOS
/* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
* adding and jffs2_flash_read_end() interface. */
- if (c->mtd->point) {
- err = c->mtd->point(c->mtd, ofs, len, &retlen,
- (void **)&buffer, NULL);
- if (!err && retlen < len) {
- JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
- c->mtd->unpoint(c->mtd, ofs, retlen);
- } else if (err)
+ err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
+ if (!err && retlen < len) {
+ JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
+ mtd_unpoint(c->mtd, ofs, retlen);
+ } else if (err) {
+ if (err != -EOPNOTSUPP)
JFFS2_WARNING("MTD point failed: error code %d.\n", err);
- else
- pointed = 1; /* succefully pointed to device */
- }
+ } else
+ pointed = 1; /* succefully pointed to device */
#endif
if (!pointed) {
@@ -101,7 +99,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
kfree(buffer);
#ifndef __ECOS
else
- c->mtd->unpoint(c->mtd, ofs, len);
+ mtd_unpoint(c->mtd, ofs, len);
#endif
if (crc != tn->data_crc) {
@@ -137,7 +135,7 @@ free_out:
kfree(buffer);
#ifndef __ECOS
else
- c->mtd->unpoint(c->mtd, ofs, len);
+ mtd_unpoint(c->mtd, ofs, len);
#endif
return err;
}
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 28107ca136e..f99464833bb 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -97,15 +97,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
size_t pointlen, try_size;
if (c->mtd->point) {
- ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
- (void **)&flashbuf, NULL);
+ ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
+ (void **)&flashbuf, NULL);
if (!ret && pointlen < c->mtd->size) {
/* Don't muck about if it won't let us point to the whole flash */
D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
- c->mtd->unpoint(c->mtd, 0, pointlen);
+ mtd_unpoint(c->mtd, 0, pointlen);
flashbuf = NULL;
}
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
}
#endif
@@ -273,7 +273,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
kfree(flashbuf);
#ifndef __ECOS
else
- c->mtd->unpoint(c->mtd, 0, c->mtd->size);
+ mtd_unpoint(c->mtd, 0, c->mtd->size);
#endif
kfree(s);
return ret;
@@ -455,7 +455,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
if (jffs2_cleanmarker_oob(c)) {
int ret;
- if (c->mtd->block_isbad(c->mtd, jeb->offset))
+ if (mtd_block_isbad(c->mtd, jeb->offset))
return BLK_STATE_BADBLOCK;
ret = jffs2_check_nand_cleanmarker(c, jeb);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index e7e97445411..f2d96b5e64f 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -45,7 +45,6 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
static void jffs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
}
@@ -97,9 +96,9 @@ static const char *jffs2_compr_name(unsigned int compr)
}
}
-static int jffs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int jffs2_show_options(struct seq_file *s, struct dentry *root)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(mnt->mnt_sb);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb);
struct jffs2_mount_opts *opts = &c->mount_opts;
if (opts->override_compr)
@@ -336,9 +335,7 @@ static void jffs2_put_super (struct super_block *sb)
jffs2_flash_cleanup(c);
kfree(c->inocache_list);
jffs2_clear_xattr_subsystem(c);
- if (c->mtd->sync)
- c->mtd->sync(c->mtd);
-
+ mtd_sync(c->mtd);
D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index b09e51d2f81..30e8f47e8a2 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -228,7 +228,7 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
size_t retlen;
char *eccstr;
- ret = c->mtd->read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
+ ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
return ret;
@@ -337,7 +337,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
}
/* Do the read... */
- ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+ ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
+ buf);
/* ECC recovered ? */
if ((ret == -EUCLEAN || ret == -EBADMSG) &&
@@ -413,13 +414,12 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
if (breakme++ == 20) {
printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
breakme = 0;
- c->mtd->write(c->mtd, ofs, towrite, &retlen,
- brokenbuf);
+ mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
ret = -EIO;
} else
#endif
- ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
- rewrite_buf);
+ ret = mtd_write(c->mtd, ofs, towrite, &retlen,
+ rewrite_buf);
if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
/* Argh. We tried. Really we did. */
@@ -619,13 +619,14 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
if (breakme++ == 20) {
printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
breakme = 0;
- c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
- brokenbuf);
+ mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
+ brokenbuf);
ret = -EIO;
} else
#endif
- ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
+ ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
+ &retlen, c->wbuf);
if (ret) {
printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
@@ -861,8 +862,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
v += wbuf_retlen;
if (vlen >= c->wbuf_pagesize) {
- ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
- &wbuf_retlen, v);
+ ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
+ &wbuf_retlen, v);
if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
goto outfile;
@@ -948,11 +949,11 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
int ret;
if (!jffs2_is_writebuffered(c))
- return c->mtd->read(c->mtd, ofs, len, retlen, buf);
+ return mtd_read(c->mtd, ofs, len, retlen, buf);
/* Read flash */
down_read(&c->wbuf_sem);
- ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+ ret = mtd_read(c->mtd, ofs, len, retlen, buf);
if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
if (ret == -EBADMSG)
@@ -1031,7 +1032,7 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
ops.datbuf = NULL;
- ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+ ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
" bytes, read %zd bytes, error %d\n",
@@ -1074,7 +1075,7 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
ops.datbuf = NULL;
- ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+ ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
" bytes, read %zd bytes, error %d\n",
@@ -1100,7 +1101,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
ops.datbuf = NULL;
- ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
+ ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
if (ret || ops.oobretlen != ops.ooblen) {
printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
" bytes, read %zd bytes, error %d\n",
@@ -1129,11 +1130,8 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
if( ++jeb->bad_count < MAX_ERASE_FAILURES)
return 0;
- if (!c->mtd->block_markbad)
- return 1; // What else can we do?
-
printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
- ret = c->mtd->block_markbad(c->mtd, bad_offset);
+ ret = mtd_block_markbad(c->mtd, bad_offset);
if (ret) {
D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c
index b9276b11bac..a1bda9dab3f 100644
--- a/fs/jffs2/writev.c
+++ b/fs/jffs2/writev.c
@@ -13,30 +13,6 @@
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-/* This ought to be in core MTD code. All registered MTD devices
- without writev should have this put in place. Bug the MTD
- maintainer */
-static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
-{
- unsigned long i;
- size_t totlen = 0, thislen;
- int ret = 0;
-
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
- if (retlen)
- *retlen = totlen;
- return ret;
-}
-
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
@@ -50,18 +26,14 @@ int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
}
}
- if (c->mtd->writev)
- return c->mtd->writev(c->mtd, vecs, count, to, retlen);
- else {
- return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
- }
+ return mtd_writev(c->mtd, vecs, count, to, retlen);
}
int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
size_t *retlen, const u_char *buf)
{
int ret;
- ret = c->mtd->write(c->mtd, ofs, len, retlen, buf);
+ ret = mtd_write(c->mtd, ofs, len, retlen, buf);
if (jffs2_sum_active()) {
struct kvec vecs[1];
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 6f98a186677..f19d1e04a37 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -68,7 +68,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
unsigned int oldflags;
int err;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
return err;
@@ -120,7 +120,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
setflags_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return err;
}
default:
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cc5f811ed38..2eb952c41a6 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
if (freezing(current)) {
spin_unlock_irq(&log_redrive_lock);
- refrigerator();
+ try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index af9606057dd..bb8b661bcc5 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
if (freezing(current)) {
LAZY_UNLOCK(flags);
- refrigerator();
+ try_to_freeze();
} else {
DECLARE_WAITQUEUE(wq, current);
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
if (freezing(current)) {
TXN_UNLOCK();
- refrigerator();
+ try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
TXN_UNLOCK();
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a112ad96e47..5f7c160ea64 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -72,7 +72,7 @@ static inline void free_ea_wmap(struct inode *inode)
* RETURN: Errors from subroutines
*
*/
-static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
+static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int rc = 0;
@@ -205,7 +205,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
* note:
* EACCESS: user needs search+write permission on the parent directory
*/
-static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
+static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -1353,7 +1353,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* FUNCTION: Create a special file (device)
*/
static int jfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
struct jfs_inode_info *jfs_ip;
struct btstack btstack;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index a44eff076c1..682bca642f3 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -119,7 +119,6 @@ static void jfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct jfs_inode_info *ji = JFS_IP(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(jfs_inode_cachep, ji);
}
@@ -609,9 +608,9 @@ static int jfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
-static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int jfs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
+ struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
if (sbi->uid != -1)
seq_printf(seq, ",uid=%d", sbi->uid);
diff --git a/fs/libfs.c b/fs/libfs.c
index f6d411eef1e..5b2dbb3ba4f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -12,7 +12,7 @@
#include <linux/mutex.h>
#include <linux/exportfs.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h>
+#include <linux/buffer_head.h> /* sync_mapping_buffers */
#include <asm/uaccess.h>
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 1ca0679c80b..2240d384d78 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -403,7 +403,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file)
{
struct super_block *sb = datap;
- return sb == file->f_file->f_path.mnt->mnt_sb;
+ return sb == file->f_file->f_path.dentry->d_sb;
}
/**
diff --git a/fs/locks.c b/fs/locks.c
index 3b0d05dcd7c..637694bf3a0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
int want_write = (mode & O_ACCMODE) != O_RDONLY;
new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+ if (IS_ERR(new_fl))
+ return PTR_ERR(new_fl);
lock_flocks();
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
if (fl->fl_owner == current->files)
i_have_this_lease = 1;
- if (IS_ERR(new_fl) && !i_have_this_lease
- && ((mode & O_NONBLOCK) == 0)) {
- error = PTR_ERR(new_fl);
- goto out;
- }
-
break_time = 0;
if (lease_break_time > 0) {
break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ restart:
out:
unlock_flocks();
- if (!IS_ERR(new_fl))
- locks_free_lock(new_fl);
+ locks_free_lock(new_fl);
return error;
}
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 339e17e9133..e97404d611e 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -13,13 +13,14 @@
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
-static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
+ void *buf)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
size_t retlen;
int ret;
- ret = mtd->read(mtd, ofs, len, &retlen, buf);
+ ret = mtd_read(mtd, ofs, len, &retlen, buf);
BUG_ON(ret == -EINVAL);
if (ret)
return ret;
@@ -31,7 +32,8 @@ static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf)
return 0;
}
-static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
+ void *buf)
{
struct logfs_super *super = logfs_super(sb);
struct mtd_info *mtd = super->s_mtd;
@@ -47,7 +49,7 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
BUG_ON(len > PAGE_CACHE_SIZE);
page_start = ofs & PAGE_CACHE_MASK;
page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
- ret = mtd->write(mtd, ofs, len, &retlen, buf);
+ ret = mtd_write(mtd, ofs, len, &retlen, buf);
if (ret || (retlen != len))
return -EIO;
@@ -60,14 +62,15 @@ static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
* asynchronous properties. So just to prevent the first implementor of such
* a thing from breaking logfs in 2350, we do the usual pointless dance to
* declare a completion variable and wait for completion before returning
- * from mtd_erase(). What an exercise in futility!
+ * from logfs_mtd_erase(). What an exercise in futility!
*/
static void logfs_erase_callback(struct erase_info *ei)
{
complete((struct completion *)ei->priv);
}
-static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len)
+static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
+ size_t len)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
@@ -84,7 +87,7 @@ static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len)
return 0;
}
-static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
+static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
int ensure_write)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
@@ -102,30 +105,29 @@ static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
ei.len = len;
ei.callback = logfs_erase_callback;
ei.priv = (long)&complete;
- ret = mtd->erase(mtd, &ei);
+ ret = mtd_erase(mtd, &ei);
if (ret)
return -EIO;
wait_for_completion(&complete);
if (ei.state != MTD_ERASE_DONE)
return -EIO;
- return mtd_erase_mapping(sb, ofs, len);
+ return logfs_mtd_erase_mapping(sb, ofs, len);
}
-static void mtd_sync(struct super_block *sb)
+static void logfs_mtd_sync(struct super_block *sb)
{
struct mtd_info *mtd = logfs_super(sb)->s_mtd;
- if (mtd->sync)
- mtd->sync(mtd);
+ mtd_sync(mtd);
}
-static int mtd_readpage(void *_sb, struct page *page)
+static int logfs_mtd_readpage(void *_sb, struct page *page)
{
struct super_block *sb = _sb;
int err;
- err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+ err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
if (err == -EUCLEAN || err == -EBADMSG) {
/* -EBADMSG happens regularly on power failures */
@@ -143,18 +145,18 @@ static int mtd_readpage(void *_sb, struct page *page)
return err;
}
-static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs)
+static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
- filler_t *filler = mtd_readpage;
+ filler_t *filler = logfs_mtd_readpage;
struct mtd_info *mtd = super->s_mtd;
- if (!mtd->block_isbad)
+ if (!mtd_can_have_bb(mtd))
return NULL;
*ofs = 0;
- while (mtd->block_isbad(mtd, *ofs)) {
+ while (mtd_block_isbad(mtd, *ofs)) {
*ofs += mtd->erasesize;
if (*ofs >= mtd->size)
return NULL;
@@ -163,18 +165,18 @@ static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs)
return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
}
-static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs)
+static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
- filler_t *filler = mtd_readpage;
+ filler_t *filler = logfs_mtd_readpage;
struct mtd_info *mtd = super->s_mtd;
- if (!mtd->block_isbad)
+ if (!mtd_can_have_bb(mtd))
return NULL;
*ofs = mtd->size - mtd->erasesize;
- while (mtd->block_isbad(mtd, *ofs)) {
+ while (mtd_block_isbad(mtd, *ofs)) {
*ofs -= mtd->erasesize;
if (*ofs <= 0)
return NULL;
@@ -184,7 +186,7 @@ static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs)
return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
}
-static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
+static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
size_t nr_pages)
{
struct logfs_super *super = logfs_super(sb);
@@ -196,8 +198,8 @@ static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
page = find_lock_page(mapping, index + i);
BUG_ON(!page);
- err = mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
- page_address(page));
+ err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+ page_address(page));
unlock_page(page);
page_cache_release(page);
if (err)
@@ -206,7 +208,7 @@ static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
return 0;
}
-static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
+static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
{
struct logfs_super *super = logfs_super(sb);
int head;
@@ -227,15 +229,15 @@ static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
len += head;
}
len = PAGE_ALIGN(len);
- __mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
+ __logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
}
-static void mtd_put_device(struct logfs_super *s)
+static void logfs_mtd_put_device(struct logfs_super *s)
{
put_mtd_device(s->s_mtd);
}
-static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
+static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
{
struct logfs_super *super = logfs_super(sb);
void *buf;
@@ -244,7 +246,7 @@ static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
buf = kmalloc(super->s_writesize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = mtd_read(sb, ofs, super->s_writesize, buf);
+ err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
if (err)
goto out;
if (memchr_inv(buf, 0xff, super->s_writesize))
@@ -255,14 +257,14 @@ out:
}
static const struct logfs_device_ops mtd_devops = {
- .find_first_sb = mtd_find_first_sb,
- .find_last_sb = mtd_find_last_sb,
- .readpage = mtd_readpage,
- .writeseg = mtd_writeseg,
- .erase = mtd_erase,
- .can_write_buf = mtd_can_write_buf,
- .sync = mtd_sync,
- .put_device = mtd_put_device,
+ .find_first_sb = logfs_mtd_find_first_sb,
+ .find_last_sb = logfs_mtd_find_last_sb,
+ .readpage = logfs_mtd_readpage,
+ .writeseg = logfs_mtd_writeseg,
+ .erase = logfs_mtd_erase,
+ .can_write_buf = logfs_mtd_can_write_buf,
+ .sync = logfs_mtd_sync,
+ .put_device = logfs_mtd_put_device,
};
int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index b7d7f67cee5..501043e8966 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -482,7 +482,7 @@ out:
return ret;
}
-static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
@@ -501,7 +501,7 @@ static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return __logfs_create(dir, dentry, inode, NULL, 0);
}
-static int logfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -517,7 +517,7 @@ static int logfs_create(struct inode *dir, struct dentry *dentry, int mode,
return __logfs_create(dir, dentry, inode, NULL, 0);
}
-static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int logfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
struct inode *inode;
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index 7e441ad5f79..388df1aa35e 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -144,7 +144,6 @@ struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *is_cached)
static void logfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
}
@@ -324,7 +323,7 @@ static void logfs_set_ino_generation(struct super_block *sb,
mutex_unlock(&super->s_journal_mutex);
}
-struct inode *logfs_new_inode(struct inode *dir, int mode)
+struct inode *logfs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 398ecff6e54..926373866a5 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -520,7 +520,7 @@ extern const struct super_operations logfs_super_operations;
struct inode *logfs_iget(struct super_block *sb, ino_t ino);
struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *cookie);
void logfs_safe_iput(struct inode *inode, int cookie);
-struct inode *logfs_new_inode(struct inode *dir, int mode);
+struct inode *logfs_new_inode(struct inode *dir, umode_t mode);
struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino);
struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino);
int logfs_init_inode_cache(void);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index ef175cb8cfd..4bc50dac8e9 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -209,7 +209,7 @@ void minix_free_inode(struct inode * inode)
mark_buffer_dirty(bh);
}
-struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
+struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
{
struct super_block *sb = dir->i_sb;
struct minix_sb_info *sbi = minix_sb(sb);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1d9e33966db..fa8b612b8ce 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -71,7 +71,6 @@ static struct inode *minix_alloc_inode(struct super_block *sb)
static void minix_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(minix_inode_cachep, minix_i(inode));
}
@@ -263,23 +262,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_no_root;
}
- ret = -ENOMEM;
- s->s_root = d_alloc_root(root_inode);
- if (!s->s_root)
- goto out_iput;
-
- if (!(s->s_flags & MS_RDONLY)) {
- if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
- ms->s_state &= ~MINIX_VALID_FS;
- mark_buffer_dirty(bh);
- }
- if (!(sbi->s_mount_state & MINIX_VALID_FS))
- printk("MINIX-fs: mounting unchecked file system, "
- "running fsck is recommended\n");
- else if (sbi->s_mount_state & MINIX_ERROR_FS)
- printk("MINIX-fs: mounting file system with errors, "
- "running fsck is recommended\n");
-
/* Apparently minix can create filesystems that allocate more blocks for
* the bitmaps than needed. We simply ignore that, but verify it didn't
* create one with not enough blocks and bail out if so.
@@ -300,6 +282,23 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_iput;
}
+ ret = -ENOMEM;
+ s->s_root = d_alloc_root(root_inode);
+ if (!s->s_root)
+ goto out_iput;
+
+ if (!(s->s_flags & MS_RDONLY)) {
+ if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
+ ms->s_state &= ~MINIX_VALID_FS;
+ mark_buffer_dirty(bh);
+ }
+ if (!(sbi->s_mount_state & MINIX_VALID_FS))
+ printk("MINIX-fs: mounting unchecked file system, "
+ "running fsck is recommended\n");
+ else if (sbi->s_mount_state & MINIX_ERROR_FS)
+ printk("MINIX-fs: mounting file system with errors, "
+ "running fsck is recommended\n");
+
return 0;
out_iput:
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 26bbd55e82e..c889ef0aa57 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -46,7 +46,7 @@ struct minix_sb_info {
extern struct inode *minix_iget(struct super_block *, unsigned long);
extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
-extern struct inode * minix_new_inode(const struct inode *, int, int *);
+extern struct inode * minix_new_inode(const struct inode *, umode_t, int *);
extern void minix_free_inode(struct inode * inode);
extern unsigned long minix_count_free_inodes(struct super_block *sb);
extern int minix_new_block(struct inode * inode);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 6e6777f1b4b..2f76e38c206 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -36,7 +36,7 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st
return NULL;
}
-static int minix_mknod(struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
int error;
struct inode *inode;
@@ -54,7 +54,7 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, int mode, dev_
return error;
}
-static int minix_create(struct inode * dir, struct dentry *dentry, int mode,
+static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return minix_mknod(dir, dentry, mode, 0);
@@ -103,7 +103,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+static int minix_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err = -EMLINK;
diff --git a/fs/mount.h b/fs/mount.h
new file mode 100644
index 00000000000..4ef36d93e5a
--- /dev/null
+++ b/fs/mount.h
@@ -0,0 +1,76 @@
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+
+struct mnt_namespace {
+ atomic_t count;
+ struct mount * root;
+ struct list_head list;
+ wait_queue_head_t poll;
+ int event;
+};
+
+struct mnt_pcp {
+ int mnt_count;
+ int mnt_writers;
+};
+
+struct mount {
+ struct list_head mnt_hash;
+ struct mount *mnt_parent;
+ struct dentry *mnt_mountpoint;
+ struct vfsmount mnt;
+#ifdef CONFIG_SMP
+ struct mnt_pcp __percpu *mnt_pcp;
+ atomic_t mnt_longterm; /* how many of the refs are longterm */
+#else
+ int mnt_count;
+ int mnt_writers;
+#endif
+ struct list_head mnt_mounts; /* list of children, anchored here */
+ struct list_head mnt_child; /* and going through their mnt_child */
+ struct list_head mnt_instance; /* mount instance on sb->s_mounts */
+ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ struct list_head mnt_list;
+ struct list_head mnt_expire; /* link in fs-specific expiry list */
+ struct list_head mnt_share; /* circular list of shared mounts */
+ struct list_head mnt_slave_list;/* list of slave mounts */
+ struct list_head mnt_slave; /* slave list entry */
+ struct mount *mnt_master; /* slave is on master->mnt_slave_list */
+ struct mnt_namespace *mnt_ns; /* containing namespace */
+#ifdef CONFIG_FSNOTIFY
+ struct hlist_head mnt_fsnotify_marks;
+ __u32 mnt_fsnotify_mask;
+#endif
+ int mnt_id; /* mount identifier */
+ int mnt_group_id; /* peer group identifier */
+ int mnt_expiry_mark; /* true if marked for expiry */
+ int mnt_pinned;
+ int mnt_ghosts;
+};
+
+static inline struct mount *real_mount(struct vfsmount *mnt)
+{
+ return container_of(mnt, struct mount, mnt);
+}
+
+static inline int mnt_has_parent(struct mount *mnt)
+{
+ return mnt != mnt->mnt_parent;
+}
+
+extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
+
+static inline void get_mnt_ns(struct mnt_namespace *ns)
+{
+ atomic_inc(&ns->count);
+}
+
+struct proc_mounts {
+ struct seq_file m; /* must be the first element */
+ struct mnt_namespace *ns;
+ struct path root;
+ int (*show)(struct seq_file *, struct vfsmount *);
+};
+
+extern const struct seq_operations mounts_op;
diff --git a/fs/namei.c b/fs/namei.c
index 5008f01787f..c283a1ec008 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -36,6 +36,7 @@
#include <asm/uaccess.h>
#include "internal.h"
+#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
@@ -676,36 +677,38 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
static int follow_up_rcu(struct path *path)
{
- struct vfsmount *parent;
+ struct mount *mnt = real_mount(path->mnt);
+ struct mount *parent;
struct dentry *mountpoint;
- parent = path->mnt->mnt_parent;
- if (parent == path->mnt)
+ parent = mnt->mnt_parent;
+ if (&parent->mnt == path->mnt)
return 0;
- mountpoint = path->mnt->mnt_mountpoint;
+ mountpoint = mnt->mnt_mountpoint;
path->dentry = mountpoint;
- path->mnt = parent;
+ path->mnt = &parent->mnt;
return 1;
}
int follow_up(struct path *path)
{
- struct vfsmount *parent;
+ struct mount *mnt = real_mount(path->mnt);
+ struct mount *parent;
struct dentry *mountpoint;
br_read_lock(vfsmount_lock);
- parent = path->mnt->mnt_parent;
- if (parent == path->mnt) {
+ parent = mnt->mnt_parent;
+ if (&parent->mnt == path->mnt) {
br_read_unlock(vfsmount_lock);
return 0;
}
- mntget(parent);
- mountpoint = dget(path->mnt->mnt_mountpoint);
+ mntget(&parent->mnt);
+ mountpoint = dget(mnt->mnt_mountpoint);
br_read_unlock(vfsmount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
- path->mnt = parent;
+ path->mnt = &parent->mnt;
return 1;
}
@@ -884,7 +887,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode)
{
for (;;) {
- struct vfsmount *mounted;
+ struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
@@ -898,8 +901,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
mounted = __lookup_mnt(path->mnt, path->dentry, 1);
if (!mounted)
break;
- path->mnt = mounted;
- path->dentry = mounted->mnt_root;
+ path->mnt = &mounted->mnt;
+ path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
nd->seq = read_seqcount_begin(&path->dentry->d_seq);
/*
@@ -915,12 +918,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
static void follow_mount_rcu(struct nameidata *nd)
{
while (d_mountpoint(nd->path.dentry)) {
- struct vfsmount *mounted;
+ struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1);
if (!mounted)
break;
- nd->path.mnt = mounted;
- nd->path.dentry = mounted->mnt_root;
+ nd->path.mnt = &mounted->mnt;
+ nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
}
@@ -1976,7 +1979,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
}
}
-int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int error = may_create(dir, dentry);
@@ -2177,7 +2180,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode) {
- int mode = op->mode;
+ umode_t mode = op->mode;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
@@ -2444,7 +2447,7 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
}
EXPORT_SYMBOL(user_path_create);
-int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
@@ -2472,7 +2475,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return error;
}
-static int may_mknod(mode_t mode)
+static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
@@ -2489,7 +2492,7 @@ static int may_mknod(mode_t mode)
}
}
-SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned, dev)
{
struct dentry *dentry;
@@ -2536,12 +2539,12 @@ out_dput:
return error;
}
-SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev)
+SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error = may_create(dir, dentry);
@@ -2562,7 +2565,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return error;
}
-SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
struct dentry *dentry;
struct path path;
@@ -2590,7 +2593,7 @@ out_dput:
return error;
}
-SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return sys_mkdirat(AT_FDCWD, pathname, mode);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 6d3a1963879..e6081996c9a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -9,30 +9,17 @@
*/
#include <linux/syscalls.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/percpu.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/acct.h>
+#include <linux/export.h>
#include <linux/capability.h>
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/sysfs.h>
-#include <linux/seq_file.h>
#include <linux/mnt_namespace.h>
#include <linux/namei.h>
-#include <linux/nsproxy.h>
#include <linux/security.h>
-#include <linux/mount.h>
-#include <linux/ramfs.h>
-#include <linux/log2.h>
#include <linux/idr.h>
-#include <linux/fs_struct.h>
-#include <linux/fsnotify.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
+#include <linux/acct.h> /* acct_auto_close_mnt */
+#include <linux/ramfs.h> /* init_rootfs */
+#include <linux/fs_struct.h> /* get_fs_root et.al. */
+#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
+#include <linux/uaccess.h>
#include "pnode.h"
#include "internal.h"
@@ -78,7 +65,7 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
* allocation is serialized by namespace_sem, but we need the spinlock to
* serialize with freeing.
*/
-static int mnt_alloc_id(struct vfsmount *mnt)
+static int mnt_alloc_id(struct mount *mnt)
{
int res;
@@ -95,7 +82,7 @@ retry:
return res;
}
-static void mnt_free_id(struct vfsmount *mnt)
+static void mnt_free_id(struct mount *mnt)
{
int id = mnt->mnt_id;
spin_lock(&mnt_id_lock);
@@ -110,7 +97,7 @@ static void mnt_free_id(struct vfsmount *mnt)
*
* mnt_group_ida is protected by namespace_sem
*/
-static int mnt_alloc_group_id(struct vfsmount *mnt)
+static int mnt_alloc_group_id(struct mount *mnt)
{
int res;
@@ -129,7 +116,7 @@ static int mnt_alloc_group_id(struct vfsmount *mnt)
/*
* Release a peer group ID
*/
-void mnt_release_group_id(struct vfsmount *mnt)
+void mnt_release_group_id(struct mount *mnt)
{
int id = mnt->mnt_group_id;
ida_remove(&mnt_group_ida, id);
@@ -141,7 +128,7 @@ void mnt_release_group_id(struct vfsmount *mnt)
/*
* vfsmount lock must be held for read
*/
-static inline void mnt_add_count(struct vfsmount *mnt, int n)
+static inline void mnt_add_count(struct mount *mnt, int n)
{
#ifdef CONFIG_SMP
this_cpu_add(mnt->mnt_pcp->mnt_count, n);
@@ -152,35 +139,10 @@ static inline void mnt_add_count(struct vfsmount *mnt, int n)
#endif
}
-static inline void mnt_set_count(struct vfsmount *mnt, int n)
-{
-#ifdef CONFIG_SMP
- this_cpu_write(mnt->mnt_pcp->mnt_count, n);
-#else
- mnt->mnt_count = n;
-#endif
-}
-
-/*
- * vfsmount lock must be held for read
- */
-static inline void mnt_inc_count(struct vfsmount *mnt)
-{
- mnt_add_count(mnt, 1);
-}
-
-/*
- * vfsmount lock must be held for read
- */
-static inline void mnt_dec_count(struct vfsmount *mnt)
-{
- mnt_add_count(mnt, -1);
-}
-
/*
* vfsmount lock must be held for write
*/
-unsigned int mnt_get_count(struct vfsmount *mnt)
+unsigned int mnt_get_count(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
@@ -196,9 +158,9 @@ unsigned int mnt_get_count(struct vfsmount *mnt)
#endif
}
-static struct vfsmount *alloc_vfsmnt(const char *name)
+static struct mount *alloc_vfsmnt(const char *name)
{
- struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
+ struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
if (mnt) {
int err;
@@ -277,7 +239,7 @@ int __mnt_is_readonly(struct vfsmount *mnt)
}
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
-static inline void mnt_inc_writers(struct vfsmount *mnt)
+static inline void mnt_inc_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_inc(mnt->mnt_pcp->mnt_writers);
@@ -286,7 +248,7 @@ static inline void mnt_inc_writers(struct vfsmount *mnt)
#endif
}
-static inline void mnt_dec_writers(struct vfsmount *mnt)
+static inline void mnt_dec_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
this_cpu_dec(mnt->mnt_pcp->mnt_writers);
@@ -295,7 +257,7 @@ static inline void mnt_dec_writers(struct vfsmount *mnt)
#endif
}
-static unsigned int mnt_get_writers(struct vfsmount *mnt)
+static unsigned int mnt_get_writers(struct mount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
@@ -311,6 +273,15 @@ static unsigned int mnt_get_writers(struct vfsmount *mnt)
#endif
}
+static int mnt_is_readonly(struct vfsmount *mnt)
+{
+ if (mnt->mnt_sb->s_readonly_remount)
+ return 1;
+ /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
+ smp_rmb();
+ return __mnt_is_readonly(mnt);
+}
+
/*
* Most r/o checks on a fs are for operations that take
* discrete amounts of time, like a write() or unlink().
@@ -321,7 +292,7 @@ static unsigned int mnt_get_writers(struct vfsmount *mnt)
*/
/**
* mnt_want_write - get write access to a mount
- * @mnt: the mount on which to take a write
+ * @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is
* about to be performed to it, and makes sure that
@@ -329,8 +300,9 @@ static unsigned int mnt_get_writers(struct vfsmount *mnt)
* the write operation is finished, mnt_drop_write()
* must be called. This is effectively a refcount.
*/
-int mnt_want_write(struct vfsmount *mnt)
+int mnt_want_write(struct vfsmount *m)
{
+ struct mount *mnt = real_mount(m);
int ret = 0;
preempt_disable();
@@ -341,7 +313,7 @@ int mnt_want_write(struct vfsmount *mnt)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (mnt->mnt_flags & MNT_WRITE_HOLD)
+ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
@@ -349,12 +321,10 @@ int mnt_want_write(struct vfsmount *mnt)
* MNT_WRITE_HOLD is cleared.
*/
smp_rmb();
- if (__mnt_is_readonly(mnt)) {
+ if (mnt_is_readonly(m)) {
mnt_dec_writers(mnt);
ret = -EROFS;
- goto out;
}
-out:
preempt_enable();
return ret;
}
@@ -378,7 +348,7 @@ int mnt_clone_write(struct vfsmount *mnt)
if (__mnt_is_readonly(mnt))
return -EROFS;
preempt_disable();
- mnt_inc_writers(mnt);
+ mnt_inc_writers(real_mount(mnt));
preempt_enable();
return 0;
}
@@ -412,17 +382,23 @@ EXPORT_SYMBOL_GPL(mnt_want_write_file);
void mnt_drop_write(struct vfsmount *mnt)
{
preempt_disable();
- mnt_dec_writers(mnt);
+ mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
-static int mnt_make_readonly(struct vfsmount *mnt)
+void mnt_drop_write_file(struct file *file)
+{
+ mnt_drop_write(file->f_path.mnt);
+}
+EXPORT_SYMBOL(mnt_drop_write_file);
+
+static int mnt_make_readonly(struct mount *mnt)
{
int ret = 0;
br_write_lock(vfsmount_lock);
- mnt->mnt_flags |= MNT_WRITE_HOLD;
+ mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
/*
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
* should be visible before we do.
@@ -448,25 +424,61 @@ static int mnt_make_readonly(struct vfsmount *mnt)
if (mnt_get_writers(mnt) > 0)
ret = -EBUSY;
else
- mnt->mnt_flags |= MNT_READONLY;
+ mnt->mnt.mnt_flags |= MNT_READONLY;
/*
* MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
- mnt->mnt_flags &= ~MNT_WRITE_HOLD;
+ mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
br_write_unlock(vfsmount_lock);
return ret;
}
-static void __mnt_unmake_readonly(struct vfsmount *mnt)
+static void __mnt_unmake_readonly(struct mount *mnt)
{
br_write_lock(vfsmount_lock);
- mnt->mnt_flags &= ~MNT_READONLY;
+ mnt->mnt.mnt_flags &= ~MNT_READONLY;
+ br_write_unlock(vfsmount_lock);
+}
+
+int sb_prepare_remount_readonly(struct super_block *sb)
+{
+ struct mount *mnt;
+ int err = 0;
+
+ /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
+ if (atomic_long_read(&sb->s_remove_count))
+ return -EBUSY;
+
+ br_write_lock(vfsmount_lock);
+ list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
+ if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
+ mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
+ smp_mb();
+ if (mnt_get_writers(mnt) > 0) {
+ err = -EBUSY;
+ break;
+ }
+ }
+ }
+ if (!err && atomic_long_read(&sb->s_remove_count))
+ err = -EBUSY;
+
+ if (!err) {
+ sb->s_readonly_remount = 1;
+ smp_wmb();
+ }
+ list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
+ if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
+ mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
+ }
br_write_unlock(vfsmount_lock);
+
+ return err;
}
-static void free_vfsmnt(struct vfsmount *mnt)
+static void free_vfsmnt(struct mount *mnt)
{
kfree(mnt->mnt_devname);
mnt_free_id(mnt);
@@ -481,20 +493,20 @@ static void free_vfsmnt(struct vfsmount *mnt)
* @dir. If @dir is set return the first mount else return the last mount.
* vfsmount_lock must be held for read or write.
*/
-struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
+struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
int dir)
{
struct list_head *head = mount_hashtable + hash(mnt, dentry);
struct list_head *tmp = head;
- struct vfsmount *p, *found = NULL;
+ struct mount *p, *found = NULL;
for (;;) {
tmp = dir ? tmp->next : tmp->prev;
p = NULL;
if (tmp == head)
break;
- p = list_entry(tmp, struct vfsmount, mnt_hash);
- if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
+ p = list_entry(tmp, struct mount, mnt_hash);
+ if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
found = p;
break;
}
@@ -508,16 +520,21 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
*/
struct vfsmount *lookup_mnt(struct path *path)
{
- struct vfsmount *child_mnt;
+ struct mount *child_mnt;
br_read_lock(vfsmount_lock);
- if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1)))
- mntget(child_mnt);
- br_read_unlock(vfsmount_lock);
- return child_mnt;
+ child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
+ if (child_mnt) {
+ mnt_add_count(child_mnt, 1);
+ br_read_unlock(vfsmount_lock);
+ return &child_mnt->mnt;
+ } else {
+ br_read_unlock(vfsmount_lock);
+ return NULL;
+ }
}
-static inline int check_mnt(struct vfsmount *mnt)
+static inline int check_mnt(struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
@@ -548,12 +565,12 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
* Clear dentry's mounted state if it has no remaining mounts.
* vfsmount_lock must be held for write.
*/
-static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
+static void dentry_reset_mounted(struct dentry *dentry)
{
unsigned u;
for (u = 0; u < HASH_SIZE; u++) {
- struct vfsmount *p;
+ struct mount *p;
list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
if (p->mnt_mountpoint == dentry)
@@ -568,25 +585,26 @@ static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
/*
* vfsmount lock must be held for write
*/
-static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
+static void detach_mnt(struct mount *mnt, struct path *old_path)
{
old_path->dentry = mnt->mnt_mountpoint;
- old_path->mnt = mnt->mnt_parent;
+ old_path->mnt = &mnt->mnt_parent->mnt;
mnt->mnt_parent = mnt;
- mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_hash);
- dentry_reset_mounted(old_path->mnt, old_path->dentry);
+ dentry_reset_mounted(old_path->dentry);
}
/*
* vfsmount lock must be held for write
*/
-void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
- struct vfsmount *child_mnt)
+void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry,
+ struct mount *child_mnt)
{
- child_mnt->mnt_parent = mntget(mnt);
+ mnt_add_count(mnt, 1); /* essentially, that's mntget */
child_mnt->mnt_mountpoint = dget(dentry);
+ child_mnt->mnt_parent = mnt;
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
@@ -595,15 +613,15 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
/*
* vfsmount lock must be held for write
*/
-static void attach_mnt(struct vfsmount *mnt, struct path *path)
+static void attach_mnt(struct mount *mnt, struct path *path)
{
- mnt_set_mountpoint(path->mnt, path->dentry, mnt);
+ mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
hash(path->mnt, path->dentry));
- list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
+ list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
}
-static inline void __mnt_make_longterm(struct vfsmount *mnt)
+static inline void __mnt_make_longterm(struct mount *mnt)
{
#ifdef CONFIG_SMP
atomic_inc(&mnt->mnt_longterm);
@@ -611,7 +629,7 @@ static inline void __mnt_make_longterm(struct vfsmount *mnt)
}
/* needs vfsmount lock for write */
-static inline void __mnt_make_shortterm(struct vfsmount *mnt)
+static inline void __mnt_make_shortterm(struct mount *mnt)
{
#ifdef CONFIG_SMP
atomic_dec(&mnt->mnt_longterm);
@@ -621,10 +639,10 @@ static inline void __mnt_make_shortterm(struct vfsmount *mnt)
/*
* vfsmount lock must be held for write
*/
-static void commit_tree(struct vfsmount *mnt)
+static void commit_tree(struct mount *mnt)
{
- struct vfsmount *parent = mnt->mnt_parent;
- struct vfsmount *m;
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
LIST_HEAD(head);
struct mnt_namespace *n = parent->mnt_ns;
@@ -639,12 +657,12 @@ static void commit_tree(struct vfsmount *mnt)
list_splice(&head, n->list.prev);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
- hash(parent, mnt->mnt_mountpoint));
+ hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
touch_mnt_namespace(n);
}
-static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
+static struct mount *next_mnt(struct mount *p, struct mount *root)
{
struct list_head *next = p->mnt_mounts.next;
if (next == &p->mnt_mounts) {
@@ -657,14 +675,14 @@ static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
p = p->mnt_parent;
}
}
- return list_entry(next, struct vfsmount, mnt_child);
+ return list_entry(next, struct mount, mnt_child);
}
-static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
+static struct mount *skip_mnt_tree(struct mount *p)
{
struct list_head *prev = p->mnt_mounts.prev;
while (prev != &p->mnt_mounts) {
- p = list_entry(prev, struct vfsmount, mnt_child);
+ p = list_entry(prev, struct mount, mnt_child);
prev = p->mnt_mounts.prev;
}
return p;
@@ -673,7 +691,7 @@ static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
struct vfsmount *
vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
- struct vfsmount *mnt;
+ struct mount *mnt;
struct dentry *root;
if (!type)
@@ -684,7 +702,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
return ERR_PTR(-ENOMEM);
if (flags & MS_KERNMOUNT)
- mnt->mnt_flags = MNT_INTERNAL;
+ mnt->mnt.mnt_flags = MNT_INTERNAL;
root = mount_fs(type, flags, name, data);
if (IS_ERR(root)) {
@@ -692,19 +710,22 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
return ERR_CAST(root);
}
- mnt->mnt_root = root;
- mnt->mnt_sb = root->d_sb;
- mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt.mnt_root = root;
+ mnt->mnt.mnt_sb = root->d_sb;
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
- return mnt;
+ br_write_lock(vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
+ br_write_unlock(vfsmount_lock);
+ return &mnt->mnt;
}
EXPORT_SYMBOL_GPL(vfs_kern_mount);
-static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
+static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
- struct super_block *sb = old->mnt_sb;
- struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
+ struct super_block *sb = old->mnt.mnt_sb;
+ struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
if (mnt) {
if (flag & (CL_SLAVE | CL_PRIVATE))
@@ -718,12 +739,15 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
goto out_free;
}
- mnt->mnt_flags = old->mnt_flags & ~MNT_WRITE_HOLD;
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
atomic_inc(&sb->s_active);
- mnt->mnt_sb = sb;
- mnt->mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt.mnt_sb = sb;
+ mnt->mnt.mnt_root = dget(root);
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
+ br_write_lock(vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
+ br_write_unlock(vfsmount_lock);
if (flag & CL_SLAVE) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -753,9 +777,10 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
return NULL;
}
-static inline void mntfree(struct vfsmount *mnt)
+static inline void mntfree(struct mount *mnt)
{
- struct super_block *sb = mnt->mnt_sb;
+ struct vfsmount *m = &mnt->mnt;
+ struct super_block *sb = m->mnt_sb;
/*
* This probably indicates that somebody messed
@@ -768,32 +793,32 @@ static inline void mntfree(struct vfsmount *mnt)
* so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt));
- fsnotify_vfsmount_delete(mnt);
- dput(mnt->mnt_root);
+ fsnotify_vfsmount_delete(m);
+ dput(m->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
}
-static void mntput_no_expire(struct vfsmount *mnt)
+static void mntput_no_expire(struct mount *mnt)
{
put_again:
#ifdef CONFIG_SMP
br_read_lock(vfsmount_lock);
if (likely(atomic_read(&mnt->mnt_longterm))) {
- mnt_dec_count(mnt);
+ mnt_add_count(mnt, -1);
br_read_unlock(vfsmount_lock);
return;
}
br_read_unlock(vfsmount_lock);
br_write_lock(vfsmount_lock);
- mnt_dec_count(mnt);
+ mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
br_write_unlock(vfsmount_lock);
return;
}
#else
- mnt_dec_count(mnt);
+ mnt_add_count(mnt, -1);
if (likely(mnt_get_count(mnt)))
return;
br_write_lock(vfsmount_lock);
@@ -802,9 +827,10 @@ put_again:
mnt_add_count(mnt, mnt->mnt_pinned + 1);
mnt->mnt_pinned = 0;
br_write_unlock(vfsmount_lock);
- acct_auto_close_mnt(mnt);
+ acct_auto_close_mnt(&mnt->mnt);
goto put_again;
}
+ list_del(&mnt->mnt_instance);
br_write_unlock(vfsmount_lock);
mntfree(mnt);
}
@@ -812,10 +838,11 @@ put_again:
void mntput(struct vfsmount *mnt)
{
if (mnt) {
+ struct mount *m = real_mount(mnt);
/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
- if (unlikely(mnt->mnt_expiry_mark))
- mnt->mnt_expiry_mark = 0;
- mntput_no_expire(mnt);
+ if (unlikely(m->mnt_expiry_mark))
+ m->mnt_expiry_mark = 0;
+ mntput_no_expire(m);
}
}
EXPORT_SYMBOL(mntput);
@@ -823,7 +850,7 @@ EXPORT_SYMBOL(mntput);
struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
- mnt_inc_count(mnt);
+ mnt_add_count(real_mount(mnt), 1);
return mnt;
}
EXPORT_SYMBOL(mntget);
@@ -831,16 +858,17 @@ EXPORT_SYMBOL(mntget);
void mnt_pin(struct vfsmount *mnt)
{
br_write_lock(vfsmount_lock);
- mnt->mnt_pinned++;
+ real_mount(mnt)->mnt_pinned++;
br_write_unlock(vfsmount_lock);
}
EXPORT_SYMBOL(mnt_pin);
-void mnt_unpin(struct vfsmount *mnt)
+void mnt_unpin(struct vfsmount *m)
{
+ struct mount *mnt = real_mount(m);
br_write_lock(vfsmount_lock);
if (mnt->mnt_pinned) {
- mnt_inc_count(mnt);
+ mnt_add_count(mnt, 1);
mnt->mnt_pinned--;
}
br_write_unlock(vfsmount_lock);
@@ -858,12 +886,12 @@ static inline void mangle(struct seq_file *m, const char *s)
*
* See also save_mount_options().
*/
-int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
+int generic_show_options(struct seq_file *m, struct dentry *root)
{
const char *options;
rcu_read_lock();
- options = rcu_dereference(mnt->mnt_sb->s_options);
+ options = rcu_dereference(root->d_sb->s_options);
if (options != NULL && options[0]) {
seq_putc(m, ',');
@@ -907,10 +935,10 @@ void replace_mount_options(struct super_block *sb, char *options)
EXPORT_SYMBOL(replace_mount_options);
#ifdef CONFIG_PROC_FS
-/* iterator */
+/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct proc_mounts *p = m->private;
+ struct proc_mounts *p = container_of(m, struct proc_mounts, m);
down_read(&namespace_sem);
return seq_list_start(&p->ns->list, *pos);
@@ -918,7 +946,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct proc_mounts *p = m->private;
+ struct proc_mounts *p = container_of(m, struct proc_mounts, m);
return seq_list_next(v, &p->ns->list, pos);
}
@@ -928,222 +956,18 @@ static void m_stop(struct seq_file *m, void *v)
up_read(&namespace_sem);
}
-int mnt_had_events(struct proc_mounts *p)
-{
- struct mnt_namespace *ns = p->ns;
- int res = 0;
-
- br_read_lock(vfsmount_lock);
- if (p->m.poll_event != ns->event) {
- p->m.poll_event = ns->event;
- res = 1;
- }
- br_read_unlock(vfsmount_lock);
-
- return res;
-}
-
-struct proc_fs_info {
- int flag;
- const char *str;
-};
-
-static int show_sb_opts(struct seq_file *m, struct super_block *sb)
-{
- static const struct proc_fs_info fs_info[] = {
- { MS_SYNCHRONOUS, ",sync" },
- { MS_DIRSYNC, ",dirsync" },
- { MS_MANDLOCK, ",mand" },
- { 0, NULL }
- };
- const struct proc_fs_info *fs_infop;
-
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
- if (sb->s_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
- }
-
- return security_sb_show_options(m, sb);
-}
-
-static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
-{
- static const struct proc_fs_info mnt_info[] = {
- { MNT_NOSUID, ",nosuid" },
- { MNT_NODEV, ",nodev" },
- { MNT_NOEXEC, ",noexec" },
- { MNT_NOATIME, ",noatime" },
- { MNT_NODIRATIME, ",nodiratime" },
- { MNT_RELATIME, ",relatime" },
- { 0, NULL }
- };
- const struct proc_fs_info *fs_infop;
-
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
- if (mnt->mnt_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
- }
-}
-
-static void show_type(struct seq_file *m, struct super_block *sb)
+static int m_show(struct seq_file *m, void *v)
{
- mangle(m, sb->s_type->name);
- if (sb->s_subtype && sb->s_subtype[0]) {
- seq_putc(m, '.');
- mangle(m, sb->s_subtype);
- }
-}
-
-static int show_vfsmnt(struct seq_file *m, void *v)
-{
- struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
- int err = 0;
- struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
-
- if (mnt->mnt_sb->s_op->show_devname) {
- err = mnt->mnt_sb->s_op->show_devname(m, mnt);
- if (err)
- goto out;
- } else {
- mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
- }
- seq_putc(m, ' ');
- seq_path(m, &mnt_path, " \t\n\\");
- seq_putc(m, ' ');
- show_type(m, mnt->mnt_sb);
- seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
- err = show_sb_opts(m, mnt->mnt_sb);
- if (err)
- goto out;
- show_mnt_opts(m, mnt);
- if (mnt->mnt_sb->s_op->show_options)
- err = mnt->mnt_sb->s_op->show_options(m, mnt);
- seq_puts(m, " 0 0\n");
-out:
- return err;
+ struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct mount *r = list_entry(v, struct mount, mnt_list);
+ return p->show(m, &r->mnt);
}
const struct seq_operations mounts_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
- .show = show_vfsmnt
-};
-
-static int show_mountinfo(struct seq_file *m, void *v)
-{
- struct proc_mounts *p = m->private;
- struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
- struct super_block *sb = mnt->mnt_sb;
- struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
- struct path root = p->root;
- int err = 0;
-
- seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
- MAJOR(sb->s_dev), MINOR(sb->s_dev));
- if (sb->s_op->show_path)
- err = sb->s_op->show_path(m, mnt);
- else
- seq_dentry(m, mnt->mnt_root, " \t\n\\");
- if (err)
- goto out;
- seq_putc(m, ' ');
- seq_path_root(m, &mnt_path, &root, " \t\n\\");
- if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
- /*
- * Mountpoint is outside root, discard that one. Ugly,
- * but less so than trying to do that in iterator in a
- * race-free way (due to renames).
- */
- return SEQ_SKIP;
- }
- seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
- show_mnt_opts(m, mnt);
-
- /* Tagged fields ("foo:X" or "bar") */
- if (IS_MNT_SHARED(mnt))
- seq_printf(m, " shared:%i", mnt->mnt_group_id);
- if (IS_MNT_SLAVE(mnt)) {
- int master = mnt->mnt_master->mnt_group_id;
- int dom = get_dominating_id(mnt, &p->root);
- seq_printf(m, " master:%i", master);
- if (dom && dom != master)
- seq_printf(m, " propagate_from:%i", dom);
- }
- if (IS_MNT_UNBINDABLE(mnt))
- seq_puts(m, " unbindable");
-
- /* Filesystem specific data */
- seq_puts(m, " - ");
- show_type(m, sb);
- seq_putc(m, ' ');
- if (sb->s_op->show_devname)
- err = sb->s_op->show_devname(m, mnt);
- else
- mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
- if (err)
- goto out;
- seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
- err = show_sb_opts(m, sb);
- if (err)
- goto out;
- if (sb->s_op->show_options)
- err = sb->s_op->show_options(m, mnt);
- seq_putc(m, '\n');
-out:
- return err;
-}
-
-const struct seq_operations mountinfo_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_mountinfo,
-};
-
-static int show_vfsstat(struct seq_file *m, void *v)
-{
- struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
- struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
- int err = 0;
-
- /* device */
- if (mnt->mnt_sb->s_op->show_devname) {
- seq_puts(m, "device ");
- err = mnt->mnt_sb->s_op->show_devname(m, mnt);
- } else {
- if (mnt->mnt_devname) {
- seq_puts(m, "device ");
- mangle(m, mnt->mnt_devname);
- } else
- seq_puts(m, "no device");
- }
-
- /* mount point */
- seq_puts(m, " mounted on ");
- seq_path(m, &mnt_path, " \t\n\\");
- seq_putc(m, ' ');
-
- /* file system type */
- seq_puts(m, "with fstype ");
- show_type(m, mnt->mnt_sb);
-
- /* optional statistics */
- if (mnt->mnt_sb->s_op->show_stats) {
- seq_putc(m, ' ');
- if (!err)
- err = mnt->mnt_sb->s_op->show_stats(m, mnt);
- }
-
- seq_putc(m, '\n');
- return err;
-}
-
-const struct seq_operations mountstats_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_vfsstat,
+ .show = m_show,
};
#endif /* CONFIG_PROC_FS */
@@ -1155,11 +979,13 @@ const struct seq_operations mountstats_op = {
* open files, pwds, chroots or sub mounts that are
* busy.
*/
-int may_umount_tree(struct vfsmount *mnt)
+int may_umount_tree(struct vfsmount *m)
{
+ struct mount *mnt = real_mount(m);
int actual_refs = 0;
int minimum_refs = 0;
- struct vfsmount *p;
+ struct mount *p;
+ BUG_ON(!m);
/* write lock needed for mnt_get_count */
br_write_lock(vfsmount_lock);
@@ -1195,7 +1021,7 @@ int may_umount(struct vfsmount *mnt)
int ret = 1;
down_read(&namespace_sem);
br_write_lock(vfsmount_lock);
- if (propagate_mount_busy(mnt, 2))
+ if (propagate_mount_busy(real_mount(mnt), 2))
ret = 0;
br_write_unlock(vfsmount_lock);
up_read(&namespace_sem);
@@ -1206,25 +1032,25 @@ EXPORT_SYMBOL(may_umount);
void release_mounts(struct list_head *head)
{
- struct vfsmount *mnt;
+ struct mount *mnt;
while (!list_empty(head)) {
- mnt = list_first_entry(head, struct vfsmount, mnt_hash);
+ mnt = list_first_entry(head, struct mount, mnt_hash);
list_del_init(&mnt->mnt_hash);
- if (mnt->mnt_parent != mnt) {
+ if (mnt_has_parent(mnt)) {
struct dentry *dentry;
- struct vfsmount *m;
+ struct mount *m;
br_write_lock(vfsmount_lock);
dentry = mnt->mnt_mountpoint;
m = mnt->mnt_parent;
- mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
m->mnt_ghosts--;
br_write_unlock(vfsmount_lock);
dput(dentry);
- mntput(m);
+ mntput(&m->mnt);
}
- mntput(mnt);
+ mntput(&mnt->mnt);
}
}
@@ -1232,10 +1058,10 @@ void release_mounts(struct list_head *head)
* vfsmount lock must be held for write
* namespace_sem must be held for write
*/
-void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
{
LIST_HEAD(tmp_list);
- struct vfsmount *p;
+ struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt))
list_move(&p->mnt_hash, &tmp_list);
@@ -1250,24 +1076,24 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
p->mnt_ns = NULL;
__mnt_make_shortterm(p);
list_del_init(&p->mnt_child);
- if (p->mnt_parent != p) {
+ if (mnt_has_parent(p)) {
p->mnt_parent->mnt_ghosts++;
- dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
+ dentry_reset_mounted(p->mnt_mountpoint);
}
change_mnt_propagation(p, MS_PRIVATE);
}
list_splice(&tmp_list, kill);
}
-static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
+static void shrink_submounts(struct mount *mnt, struct list_head *umounts);
-static int do_umount(struct vfsmount *mnt, int flags)
+static int do_umount(struct mount *mnt, int flags)
{
- struct super_block *sb = mnt->mnt_sb;
+ struct super_block *sb = mnt->mnt.mnt_sb;
int retval;
LIST_HEAD(umount_list);
- retval = security_sb_umount(mnt, flags);
+ retval = security_sb_umount(&mnt->mnt, flags);
if (retval)
return retval;
@@ -1278,7 +1104,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
* (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
*/
if (flags & MNT_EXPIRE) {
- if (mnt == current->fs->root.mnt ||
+ if (&mnt->mnt == current->fs->root.mnt ||
flags & (MNT_FORCE | MNT_DETACH))
return -EINVAL;
@@ -1320,7 +1146,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
* /reboot - static binary that would close all descriptors and
* call reboot(9). Then init(8) could umount root and exec /reboot.
*/
- if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
+ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
/*
* Special case for "unmounting" root ...
* we just try to remount it readonly.
@@ -1362,6 +1188,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
{
struct path path;
+ struct mount *mnt;
int retval;
int lookup_flags = 0;
@@ -1374,21 +1201,22 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
+ mnt = real_mount(path.mnt);
retval = -EINVAL;
if (path.dentry != path.mnt->mnt_root)
goto dput_and_out;
- if (!check_mnt(path.mnt))
+ if (!check_mnt(mnt))
goto dput_and_out;
retval = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto dput_and_out;
- retval = do_umount(path.mnt, flags);
+ retval = do_umount(mnt, flags);
dput_and_out:
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path.dentry);
- mntput_no_expire(path.mnt);
+ mntput_no_expire(mnt);
out:
return retval;
}
@@ -1423,10 +1251,10 @@ static int mount_is_safe(struct path *path)
#endif
}
-struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
+struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
int flag)
{
- struct vfsmount *res, *p, *q, *r, *s;
+ struct mount *res, *p, *q, *r;
struct path path;
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
@@ -1439,6 +1267,7 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
p = mnt;
list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
+ struct mount *s;
if (!is_subdir(r->mnt_mountpoint, dentry))
continue;
@@ -1452,9 +1281,9 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
q = q->mnt_parent;
}
p = s;
- path.mnt = q;
+ path.mnt = &q->mnt;
path.dentry = p->mnt_mountpoint;
- q = clone_mnt(p, p->mnt_root, flag);
+ q = clone_mnt(p, p->mnt.mnt_root, flag);
if (!q)
goto Enomem;
br_write_lock(vfsmount_lock);
@@ -1477,11 +1306,12 @@ Enomem:
struct vfsmount *collect_mounts(struct path *path)
{
- struct vfsmount *tree;
+ struct mount *tree;
down_write(&namespace_sem);
- tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE);
+ tree = copy_tree(real_mount(path->mnt), path->dentry,
+ CL_COPY_ALL | CL_PRIVATE);
up_write(&namespace_sem);
- return tree;
+ return tree ? &tree->mnt : NULL;
}
void drop_collected_mounts(struct vfsmount *mnt)
@@ -1489,7 +1319,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
LIST_HEAD(umount_list);
down_write(&namespace_sem);
br_write_lock(vfsmount_lock);
- umount_tree(mnt, 0, &umount_list);
+ umount_tree(real_mount(mnt), 0, &umount_list);
br_write_unlock(vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
@@ -1498,21 +1328,21 @@ void drop_collected_mounts(struct vfsmount *mnt)
int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
struct vfsmount *root)
{
- struct vfsmount *mnt;
+ struct mount *mnt;
int res = f(root, arg);
if (res)
return res;
- list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
- res = f(mnt, arg);
+ list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
+ res = f(&mnt->mnt, arg);
if (res)
return res;
}
return 0;
}
-static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
+static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
- struct vfsmount *p;
+ struct mount *p;
for (p = mnt; p != end; p = next_mnt(p, mnt)) {
if (p->mnt_group_id && !IS_MNT_SHARED(p))
@@ -1520,9 +1350,9 @@ static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
}
}
-static int invent_group_ids(struct vfsmount *mnt, bool recurse)
+static int invent_group_ids(struct mount *mnt, bool recurse)
{
- struct vfsmount *p;
+ struct mount *p;
for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
@@ -1600,13 +1430,13 @@ static int invent_group_ids(struct vfsmount *mnt, bool recurse)
* Must be called without spinlocks held, since this function can sleep
* in allocations.
*/
-static int attach_recursive_mnt(struct vfsmount *source_mnt,
+static int attach_recursive_mnt(struct mount *source_mnt,
struct path *path, struct path *parent_path)
{
LIST_HEAD(tree_list);
- struct vfsmount *dest_mnt = path->mnt;
+ struct mount *dest_mnt = real_mount(path->mnt);
struct dentry *dest_dentry = path->dentry;
- struct vfsmount *child, *p;
+ struct mount *child, *p;
int err;
if (IS_MNT_SHARED(dest_mnt)) {
@@ -1627,7 +1457,7 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
if (parent_path) {
detach_mnt(source_mnt, parent_path);
attach_mnt(source_mnt, path);
- touch_mnt_namespace(parent_path->mnt->mnt_ns);
+ touch_mnt_namespace(source_mnt->mnt_ns);
} else {
mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
commit_tree(source_mnt);
@@ -1675,13 +1505,13 @@ static void unlock_mount(struct path *path)
mutex_unlock(&path->dentry->d_inode->i_mutex);
}
-static int graft_tree(struct vfsmount *mnt, struct path *path)
+static int graft_tree(struct mount *mnt, struct path *path)
{
- if (mnt->mnt_sb->s_flags & MS_NOUSER)
+ if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
return -EINVAL;
if (S_ISDIR(path->dentry->d_inode->i_mode) !=
- S_ISDIR(mnt->mnt_root->d_inode->i_mode))
+ S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
return -ENOTDIR;
if (d_unlinked(path->dentry))
@@ -1712,7 +1542,8 @@ static int flags_to_propagation_type(int flags)
*/
static int do_change_type(struct path *path, int flag)
{
- struct vfsmount *m, *mnt = path->mnt;
+ struct mount *m;
+ struct mount *mnt = real_mount(path->mnt);
int recurse = flag & MS_REC;
int type;
int err = 0;
@@ -1752,7 +1583,7 @@ static int do_loopback(struct path *path, char *old_name,
{
LIST_HEAD(umount_list);
struct path old_path;
- struct vfsmount *mnt = NULL;
+ struct mount *mnt = NULL, *old;
int err = mount_is_safe(path);
if (err)
return err;
@@ -1766,18 +1597,20 @@ static int do_loopback(struct path *path, char *old_name,
if (err)
goto out;
+ old = real_mount(old_path.mnt);
+
err = -EINVAL;
- if (IS_MNT_UNBINDABLE(old_path.mnt))
+ if (IS_MNT_UNBINDABLE(old))
goto out2;
- if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
+ if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
goto out2;
err = -ENOMEM;
if (recurse)
- mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
+ mnt = copy_tree(old, old_path.dentry, 0);
else
- mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
+ mnt = clone_mnt(old, old_path.dentry, 0);
if (!mnt)
goto out2;
@@ -1807,9 +1640,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
return 0;
if (readonly_request)
- error = mnt_make_readonly(mnt);
+ error = mnt_make_readonly(real_mount(mnt));
else
- __mnt_unmake_readonly(mnt);
+ __mnt_unmake_readonly(real_mount(mnt));
return error;
}
@@ -1823,11 +1656,12 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
{
int err;
struct super_block *sb = path->mnt->mnt_sb;
+ struct mount *mnt = real_mount(path->mnt);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!check_mnt(path->mnt))
+ if (!check_mnt(mnt))
return -EINVAL;
if (path->dentry != path->mnt->mnt_root)
@@ -1844,22 +1678,22 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
err = do_remount_sb(sb, flags, data, 0);
if (!err) {
br_write_lock(vfsmount_lock);
- mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
- path->mnt->mnt_flags = mnt_flags;
+ mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
+ mnt->mnt.mnt_flags = mnt_flags;
br_write_unlock(vfsmount_lock);
}
up_write(&sb->s_umount);
if (!err) {
br_write_lock(vfsmount_lock);
- touch_mnt_namespace(path->mnt->mnt_ns);
+ touch_mnt_namespace(mnt->mnt_ns);
br_write_unlock(vfsmount_lock);
}
return err;
}
-static inline int tree_contains_unbindable(struct vfsmount *mnt)
+static inline int tree_contains_unbindable(struct mount *mnt)
{
- struct vfsmount *p;
+ struct mount *p;
for (p = mnt; p; p = next_mnt(p, mnt)) {
if (IS_MNT_UNBINDABLE(p))
return 1;
@@ -1870,7 +1704,8 @@ static inline int tree_contains_unbindable(struct vfsmount *mnt)
static int do_move_mount(struct path *path, char *old_name)
{
struct path old_path, parent_path;
- struct vfsmount *p;
+ struct mount *p;
+ struct mount *old;
int err = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1884,8 +1719,11 @@ static int do_move_mount(struct path *path, char *old_name)
if (err < 0)
goto out;
+ old = real_mount(old_path.mnt);
+ p = real_mount(path->mnt);
+
err = -EINVAL;
- if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
+ if (!check_mnt(p) || !check_mnt(old))
goto out1;
if (d_unlinked(path->dentry))
@@ -1895,7 +1733,7 @@ static int do_move_mount(struct path *path, char *old_name)
if (old_path.dentry != old_path.mnt->mnt_root)
goto out1;
- if (old_path.mnt == old_path.mnt->mnt_parent)
+ if (!mnt_has_parent(old))
goto out1;
if (S_ISDIR(path->dentry->d_inode->i_mode) !=
@@ -1904,28 +1742,26 @@ static int do_move_mount(struct path *path, char *old_name)
/*
* Don't move a mount residing in a shared parent.
*/
- if (old_path.mnt->mnt_parent &&
- IS_MNT_SHARED(old_path.mnt->mnt_parent))
+ if (IS_MNT_SHARED(old->mnt_parent))
goto out1;
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
- if (IS_MNT_SHARED(path->mnt) &&
- tree_contains_unbindable(old_path.mnt))
+ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
goto out1;
err = -ELOOP;
- for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
- if (p == old_path.mnt)
+ for (; mnt_has_parent(p); p = p->mnt_parent)
+ if (p == old)
goto out1;
- err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
+ err = attach_recursive_mnt(old, path, &parent_path);
if (err)
goto out1;
/* if the mount is moved, it should no longer be expire
* automatically */
- list_del_init(&old_path.mnt->mnt_expire);
+ list_del_init(&old->mnt_expire);
out1:
unlock_mount(path);
out:
@@ -1958,7 +1794,7 @@ static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
return ERR_PTR(err);
}
-struct vfsmount *
+static struct vfsmount *
do_kern_mount(const char *fstype, int flags, const char *name, void *data)
{
struct file_system_type *type = get_fs_type(fstype);
@@ -1972,12 +1808,11 @@ do_kern_mount(const char *fstype, int flags, const char *name, void *data)
put_filesystem(type);
return mnt;
}
-EXPORT_SYMBOL_GPL(do_kern_mount);
/*
* add a mount into a namespace's mount tree
*/
-static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags)
+static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
{
int err;
@@ -1988,20 +1823,20 @@ static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flag
return err;
err = -EINVAL;
- if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt))
+ if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
goto unlock;
/* Refuse the same filesystem on the same mount point */
err = -EBUSY;
- if (path->mnt->mnt_sb == newmnt->mnt_sb &&
+ if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
path->mnt->mnt_root == path->dentry)
goto unlock;
err = -EINVAL;
- if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
+ if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
goto unlock;
- newmnt->mnt_flags = mnt_flags;
+ newmnt->mnt.mnt_flags = mnt_flags;
err = graft_tree(newmnt, path);
unlock:
@@ -2030,7 +1865,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
if (IS_ERR(mnt))
return PTR_ERR(mnt);
- err = do_add_mount(mnt, path, mnt_flags);
+ err = do_add_mount(real_mount(mnt), path, mnt_flags);
if (err)
mntput(mnt);
return err;
@@ -2038,11 +1873,12 @@ static int do_new_mount(struct path *path, char *type, int flags,
int finish_automount(struct vfsmount *m, struct path *path)
{
+ struct mount *mnt = real_mount(m);
int err;
/* The new mount record should have at least 2 refs to prevent it being
* expired before we get a chance to add it
*/
- BUG_ON(mnt_get_count(m) < 2);
+ BUG_ON(mnt_get_count(mnt) < 2);
if (m->mnt_sb == path->mnt->mnt_sb &&
m->mnt_root == path->dentry) {
@@ -2050,15 +1886,15 @@ int finish_automount(struct vfsmount *m, struct path *path)
goto fail;
}
- err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
+ err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
if (!err)
return 0;
fail:
/* remove m from any expiration list it may be on */
- if (!list_empty(&m->mnt_expire)) {
+ if (!list_empty(&mnt->mnt_expire)) {
down_write(&namespace_sem);
br_write_lock(vfsmount_lock);
- list_del_init(&m->mnt_expire);
+ list_del_init(&mnt->mnt_expire);
br_write_unlock(vfsmount_lock);
up_write(&namespace_sem);
}
@@ -2077,7 +1913,7 @@ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
down_write(&namespace_sem);
br_write_lock(vfsmount_lock);
- list_add_tail(&mnt->mnt_expire, expiry_list);
+ list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
br_write_unlock(vfsmount_lock);
up_write(&namespace_sem);
@@ -2091,7 +1927,7 @@ EXPORT_SYMBOL(mnt_set_expiry);
*/
void mark_mounts_for_expiry(struct list_head *mounts)
{
- struct vfsmount *mnt, *next;
+ struct mount *mnt, *next;
LIST_HEAD(graveyard);
LIST_HEAD(umounts);
@@ -2114,7 +1950,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
list_move(&mnt->mnt_expire, &graveyard);
}
while (!list_empty(&graveyard)) {
- mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
+ mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
touch_mnt_namespace(mnt->mnt_ns);
umount_tree(mnt, 1, &umounts);
}
@@ -2132,9 +1968,9 @@ EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
* search the list of submounts for a given mountpoint, and move any
* shrinkable submounts to the 'graveyard' list.
*/
-static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
+static int select_submounts(struct mount *parent, struct list_head *graveyard)
{
- struct vfsmount *this_parent = parent;
+ struct mount *this_parent = parent;
struct list_head *next;
int found = 0;
@@ -2143,10 +1979,10 @@ repeat:
resume:
while (next != &this_parent->mnt_mounts) {
struct list_head *tmp = next;
- struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
+ struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
next = tmp->next;
- if (!(mnt->mnt_flags & MNT_SHRINKABLE))
+ if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
continue;
/*
* Descend a level if the d_mounts list is non-empty.
@@ -2178,15 +2014,15 @@ resume:
*
* vfsmount_lock must be held for write
*/
-static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
+static void shrink_submounts(struct mount *mnt, struct list_head *umounts)
{
LIST_HEAD(graveyard);
- struct vfsmount *m;
+ struct mount *m;
/* extract submounts of 'mountpoint' from the expiration list */
while (select_submounts(mnt, &graveyard)) {
while (!list_empty(&graveyard)) {
- m = list_first_entry(&graveyard, struct vfsmount,
+ m = list_first_entry(&graveyard, struct mount,
mnt_expire);
touch_mnt_namespace(m->mnt_ns);
umount_tree(m, 1, umounts);
@@ -2373,12 +2209,13 @@ static struct mnt_namespace *alloc_mnt_ns(void)
void mnt_make_longterm(struct vfsmount *mnt)
{
- __mnt_make_longterm(mnt);
+ __mnt_make_longterm(real_mount(mnt));
}
-void mnt_make_shortterm(struct vfsmount *mnt)
+void mnt_make_shortterm(struct vfsmount *m)
{
#ifdef CONFIG_SMP
+ struct mount *mnt = real_mount(m);
if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
return;
br_write_lock(vfsmount_lock);
@@ -2396,7 +2233,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
{
struct mnt_namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
- struct vfsmount *p, *q;
+ struct mount *p, *q;
+ struct mount *old = mnt_ns->root;
+ struct mount *new;
new_ns = alloc_mnt_ns();
if (IS_ERR(new_ns))
@@ -2404,15 +2243,15 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
- CL_COPY_ALL | CL_EXPIRE);
- if (!new_ns->root) {
+ new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
+ if (!new) {
up_write(&namespace_sem);
kfree(new_ns);
return ERR_PTR(-ENOMEM);
}
+ new_ns->root = new;
br_write_lock(vfsmount_lock);
- list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
+ list_add_tail(&new_ns->list, &new->mnt_list);
br_write_unlock(vfsmount_lock);
/*
@@ -2420,27 +2259,27 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
*/
- p = mnt_ns->root;
- q = new_ns->root;
+ p = old;
+ q = new;
while (p) {
q->mnt_ns = new_ns;
__mnt_make_longterm(q);
if (fs) {
- if (p == fs->root.mnt) {
- fs->root.mnt = mntget(q);
+ if (&p->mnt == fs->root.mnt) {
+ fs->root.mnt = mntget(&q->mnt);
__mnt_make_longterm(q);
- mnt_make_shortterm(p);
- rootmnt = p;
+ mnt_make_shortterm(&p->mnt);
+ rootmnt = &p->mnt;
}
- if (p == fs->pwd.mnt) {
- fs->pwd.mnt = mntget(q);
+ if (&p->mnt == fs->pwd.mnt) {
+ fs->pwd.mnt = mntget(&q->mnt);
__mnt_make_longterm(q);
- mnt_make_shortterm(p);
- pwdmnt = p;
+ mnt_make_shortterm(&p->mnt);
+ pwdmnt = &p->mnt;
}
}
- p = next_mnt(p, mnt_ns->root);
- q = next_mnt(q, new_ns->root);
+ p = next_mnt(p, old);
+ q = next_mnt(q, new);
}
up_write(&namespace_sem);
@@ -2473,22 +2312,20 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
* create_mnt_ns - creates a private namespace and adds a root filesystem
* @mnt: pointer to the new root filesystem mountpoint
*/
-struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
{
- struct mnt_namespace *new_ns;
-
- new_ns = alloc_mnt_ns();
+ struct mnt_namespace *new_ns = alloc_mnt_ns();
if (!IS_ERR(new_ns)) {
+ struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
__mnt_make_longterm(mnt);
new_ns->root = mnt;
- list_add(&new_ns->list, &new_ns->root->mnt_list);
+ list_add(&new_ns->list, &mnt->mnt_list);
} else {
- mntput(mnt);
+ mntput(m);
}
return new_ns;
}
-EXPORT_SYMBOL(create_mnt_ns);
struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
{
@@ -2562,6 +2399,31 @@ out_type:
}
/*
+ * Return true if path is reachable from root
+ *
+ * namespace_sem or vfsmount_lock is held
+ */
+bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
+ const struct path *root)
+{
+ while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
+ dentry = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
+ }
+ return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
+}
+
+int path_is_under(struct path *path1, struct path *path2)
+{
+ int res;
+ br_read_lock(vfsmount_lock);
+ res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
+ br_read_unlock(vfsmount_lock);
+ return res;
+}
+EXPORT_SYMBOL(path_is_under);
+
+/*
* pivot_root Semantics:
* Moves the root file system of the current process to the directory put_old,
* makes new_root as the new root file system of the current process, and sets
@@ -2589,8 +2451,8 @@ out_type:
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
- struct vfsmount *tmp;
struct path new, old, parent_path, root_parent, root;
+ struct mount *new_mnt, *root_mnt;
int error;
if (!capable(CAP_SYS_ADMIN))
@@ -2614,11 +2476,13 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
goto out3;
error = -EINVAL;
- if (IS_MNT_SHARED(old.mnt) ||
- IS_MNT_SHARED(new.mnt->mnt_parent) ||
- IS_MNT_SHARED(root.mnt->mnt_parent))
+ new_mnt = real_mount(new.mnt);
+ root_mnt = real_mount(root.mnt);
+ if (IS_MNT_SHARED(real_mount(old.mnt)) ||
+ IS_MNT_SHARED(new_mnt->mnt_parent) ||
+ IS_MNT_SHARED(root_mnt->mnt_parent))
goto out4;
- if (!check_mnt(root.mnt) || !check_mnt(new.mnt))
+ if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
goto out4;
error = -ENOENT;
if (d_unlinked(new.dentry))
@@ -2632,33 +2496,22 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = -EINVAL;
if (root.mnt->mnt_root != root.dentry)
goto out4; /* not a mountpoint */
- if (root.mnt->mnt_parent == root.mnt)
+ if (!mnt_has_parent(root_mnt))
goto out4; /* not attached */
if (new.mnt->mnt_root != new.dentry)
goto out4; /* not a mountpoint */
- if (new.mnt->mnt_parent == new.mnt)
+ if (!mnt_has_parent(new_mnt))
goto out4; /* not attached */
/* make sure we can reach put_old from new_root */
- tmp = old.mnt;
- if (tmp != new.mnt) {
- for (;;) {
- if (tmp->mnt_parent == tmp)
- goto out4; /* already mounted on put_old */
- if (tmp->mnt_parent == new.mnt)
- break;
- tmp = tmp->mnt_parent;
- }
- if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
- goto out4;
- } else if (!is_subdir(old.dentry, new.dentry))
+ if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
goto out4;
br_write_lock(vfsmount_lock);
- detach_mnt(new.mnt, &parent_path);
- detach_mnt(root.mnt, &root_parent);
+ detach_mnt(new_mnt, &parent_path);
+ detach_mnt(root_mnt, &root_parent);
/* mount old root on put_old */
- attach_mnt(root.mnt, &old);
+ attach_mnt(root_mnt, &old);
/* mount new_root on / */
- attach_mnt(new.mnt, &root_parent);
+ attach_mnt(new_mnt, &root_parent);
touch_mnt_namespace(current->nsproxy->mnt_ns);
br_write_unlock(vfsmount_lock);
chroot_fs_refs(&root, &new);
@@ -2696,8 +2549,8 @@ static void __init init_mount_tree(void)
init_task.nsproxy->mnt_ns = ns;
get_mnt_ns(ns);
- root.mnt = ns->root;
- root.dentry = ns->root->mnt_root;
+ root.mnt = mnt;
+ root.dentry = mnt->mnt_root;
set_fs_pwd(current->fs, &root);
set_fs_root(current->fs, &root);
@@ -2710,7 +2563,7 @@ void __init mnt_init(void)
init_rwsem(&namespace_sem);
- mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+ mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
@@ -2750,7 +2603,6 @@ void put_mnt_ns(struct mnt_namespace *ns)
release_mounts(&umount_list);
kfree(ns);
}
-EXPORT_SYMBOL(put_mnt_ns);
struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
{
@@ -2776,3 +2628,8 @@ void kern_unmount(struct vfsmount *mnt)
}
}
EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+ return check_mnt(real_mount(mnt));
+}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 9c51f621e90..aeed93a6bde 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -30,15 +30,15 @@ static void ncp_do_readdir(struct file *, void *, filldir_t,
static int ncp_readdir(struct file *, void *, filldir_t);
-static int ncp_create(struct inode *, struct dentry *, int, struct nameidata *);
+static int ncp_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *);
static int ncp_unlink(struct inode *, struct dentry *);
-static int ncp_mkdir(struct inode *, struct dentry *, int);
+static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
static int ncp_rmdir(struct inode *, struct dentry *);
static int ncp_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *);
static int ncp_mknod(struct inode * dir, struct dentry *dentry,
- int mode, dev_t rdev);
+ umode_t mode, dev_t rdev);
#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
extern int ncp_symlink(struct inode *, struct dentry *, const char *);
#else
@@ -919,7 +919,7 @@ out_close:
goto out;
}
-int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
+int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev, __le32 attributes)
{
struct ncp_server *server = NCP_SERVER(dir);
@@ -928,7 +928,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
int opmode;
__u8 __name[NCP_MAXPATHLEN + 1];
- PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
+ PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
dentry->d_parent->d_name.name, dentry->d_name.name, mode);
ncp_age_dentry(server, dentry);
@@ -979,13 +979,13 @@ out:
return error;
}
-static int ncp_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return ncp_create_new(dir, dentry, mode, 0, 0);
}
-static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct ncp_entry_info finfo;
struct ncp_server *server = NCP_SERVER(dir);
@@ -1201,12 +1201,12 @@ out:
}
static int ncp_mknod(struct inode * dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
if (!new_valid_dev(rdev))
return -EINVAL;
if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
- DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%o\n", mode);
+ DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%ho\n", mode);
return ncp_create_new(dir, dentry, mode, rdev, 0);
}
return -EPERM; /* Strange, but true */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 5b5fa33b6b9..3d1e34f8a68 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -44,7 +44,7 @@
static void ncp_evict_inode(struct inode *);
static void ncp_put_super(struct super_block *);
static int ncp_statfs(struct dentry *, struct kstatfs *);
-static int ncp_show_options(struct seq_file *, struct vfsmount *);
+static int ncp_show_options(struct seq_file *, struct dentry *);
static struct kmem_cache * ncp_inode_cachep;
@@ -60,7 +60,6 @@ static struct inode *ncp_alloc_inode(struct super_block *sb)
static void ncp_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
}
@@ -323,9 +322,9 @@ static void ncp_stop_tasks(struct ncp_server *server) {
flush_work_sync(&server->timeout_tq);
}
-static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int ncp_show_options(struct seq_file *seq, struct dentry *root)
{
- struct ncp_server *server = NCP_SBP(mnt->mnt_sb);
+ struct ncp_server *server = NCP_SBP(root->d_sb);
unsigned int tmp;
if (server->m.uid != 0)
@@ -548,7 +547,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
if (error)
- goto out_bdi;
+ goto out_fput;
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
@@ -559,7 +558,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
error = -EBADF;
server->info_filp = fget(data.info_fd);
if (!server->info_filp)
- goto out_fput;
+ goto out_bdi;
error = -ENOTSOCK;
sock_inode = server->info_filp->f_path.dentry->d_inode;
if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +745,9 @@ out_nls:
out_fput2:
if (server->info_filp)
fput(server->info_filp);
-out_fput:
- bdi_destroy(&server->bdi);
out_bdi:
+ bdi_destroy(&server->bdi);
+out_fput:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogus, since
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 790e92a9ec6..6958adfaff0 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -901,7 +901,7 @@ long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = __ncp_ioctl(inode, cmd, arg);
outDropWrite:
if (need_drop_write)
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
out:
return ret;
}
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 09881e6aa5a..32c06587351 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -114,7 +114,7 @@ int ncp_dirhandle_alloc(struct ncp_server *, __u8 vol, __le32 dirent, __u8 *dirh
int ncp_dirhandle_free(struct ncp_server *, __u8 dirhandle);
int ncp_create_new(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev, __le32 attributes);
+ umode_t mode, dev_t rdev, __le32 attributes);
static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int volnum) {
#ifdef CONFIG_NCPFS_NFS_NS
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index 661f861d80c..52439ddc8de 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -108,7 +108,7 @@ int ncp_symlink(struct inode *dir, struct dentry *dentry, const char *symname) {
char *rawlink;
int length, err, i, outlen;
int kludge;
- int mode;
+ umode_t mode;
__le32 attr;
unsigned int hdr;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 43926add945..54cea8ad5a7 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
dprintk("%s enter. slotid %d seqid %d\n",
__func__, args->csa_slotid, args->csa_sequenceid);
- if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
+ if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
return htonl(NFS4ERR_BADSLOT);
slot = tbl->slots + args->csa_slotid;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 873bf00d51a..277dfaf2e99 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -84,7 +84,7 @@ retry:
/*
* Turn off NFSv4 uid/gid mapping when using AUTH_SYS
*/
-static int nfs4_disable_idmapping = 0;
+static int nfs4_disable_idmapping = 1;
/*
* RPC cruft for NFS
@@ -185,7 +185,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
#endif
- cred = rpc_lookup_machine_cred();
+ cred = rpc_lookup_machine_cred("*");
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
nfs_fscache_get_client_cookie(clp);
@@ -250,6 +250,11 @@ static void pnfs_init_server(struct nfs_server *server)
rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
}
+static void nfs4_destroy_server(struct nfs_server *server)
+{
+ nfs4_purge_state_owners(server);
+}
+
#else
static void nfs4_shutdown_client(struct nfs_client *clp)
{
@@ -1065,6 +1070,7 @@ static struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->master_link);
INIT_LIST_HEAD(&server->delegations);
INIT_LIST_HEAD(&server->layouts);
+ INIT_LIST_HEAD(&server->state_owners_lru);
atomic_set(&server->active, 0);
@@ -1538,6 +1544,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
nfs_server_insert_lists(server);
server->mount_time = jiffies;
+ server->destroy = nfs4_destroy_server;
out:
nfs_free_fattr(fattr);
return error;
@@ -1719,6 +1726,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
/* Copy data from the source */
server->nfs_client = source->nfs_client;
+ server->destroy = source->destroy;
atomic_inc(&server->nfs_client->cl_count);
nfs_server_copy_userdata(server, source);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ac289909814..fd9a872fada 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -47,13 +47,13 @@ static int nfs_opendir(struct inode *, struct file *);
static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, void *, filldir_t);
static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-static int nfs_mkdir(struct inode *, struct dentry *, int);
+static int nfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+static int nfs_mkdir(struct inode *, struct dentry *, umode_t);
static int nfs_rmdir(struct inode *, struct dentry *);
static int nfs_unlink(struct inode *, struct dentry *);
static int nfs_symlink(struct inode *, struct dentry *, const char *);
static int nfs_link(struct dentry *, struct inode *, struct dentry *);
-static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
+static int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
static int nfs_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
@@ -112,7 +112,7 @@ const struct inode_operations nfs3_dir_inode_operations = {
#ifdef CONFIG_NFS_V4
static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd);
+static int nfs_open_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd);
const struct inode_operations nfs4_dir_inode_operations = {
.create = nfs_open_create,
.lookup = nfs_atomic_lookup,
@@ -1368,18 +1368,7 @@ static fmode_t flags_to_mode(int flags)
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags)
{
- struct nfs_open_context *ctx;
- struct rpc_cred *cred;
- fmode_t fmode = flags_to_mode(open_flags);
-
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return ERR_CAST(cred);
- ctx = alloc_nfs_open_context(dentry, cred, fmode);
- put_rpccred(cred);
- if (ctx == NULL)
- return ERR_PTR(-ENOMEM);
- return ctx;
+ return alloc_nfs_open_context(dentry, flags_to_mode(open_flags));
}
static int do_open(struct inode *inode, struct file *filp)
@@ -1584,8 +1573,8 @@ no_open:
return nfs_lookup_revalidate(dentry, nd);
}
-static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
+static int nfs_open_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, struct nameidata *nd)
{
struct nfs_open_context *ctx = NULL;
struct iattr attr;
@@ -1675,8 +1664,8 @@ out_error:
* that the operation succeeded on the server, but an error in the
* reply path made it appear to have failed.
*/
-static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
+static int nfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, struct nameidata *nd)
{
struct iattr attr;
int error;
@@ -1704,7 +1693,7 @@ out_err:
* See comments for nfs_proc_create regarding failed operations.
*/
static int
-nfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct iattr attr;
int status;
@@ -1730,7 +1719,7 @@ out_err:
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct iattr attr;
int error;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eca56d4b39c..c43a452f7da 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
* origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
- if (origin != SEEK_SET || origin != SEEK_CUR) {
+ if (origin != SEEK_SET && origin != SEEK_CUR) {
struct inode *inode = filp->f_mapping->host;
int retval = nfs_revalidate_file_size(inode, filp);
@@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
datasync);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (ret)
- return ret;
mutex_lock(&inode->i_mutex);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
status = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (status >= 0 && ret < 0)
+ status = ret;
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
if (have_error)
ret = xchg(&ctx->error, 0);
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 47d1c6ff2d8..2c05f1991e1 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -38,6 +38,89 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/nfs_idmap.h>
+#include <linux/nfs_fs.h>
+
+/**
+ * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
+ * @fattr: fully initialised struct nfs_fattr
+ * @owner_name: owner name string cache
+ * @group_name: group name string cache
+ */
+void nfs_fattr_init_names(struct nfs_fattr *fattr,
+ struct nfs4_string *owner_name,
+ struct nfs4_string *group_name)
+{
+ fattr->owner_name = owner_name;
+ fattr->group_name = group_name;
+}
+
+static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr)
+{
+ fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME;
+ kfree(fattr->owner_name->data);
+}
+
+static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
+{
+ fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME;
+ kfree(fattr->group_name->data);
+}
+
+static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ struct nfs4_string *owner = fattr->owner_name;
+ __u32 uid;
+
+ if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
+ return false;
+ if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) {
+ fattr->uid = uid;
+ fattr->valid |= NFS_ATTR_FATTR_OWNER;
+ }
+ return true;
+}
+
+static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ struct nfs4_string *group = fattr->group_name;
+ __u32 gid;
+
+ if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
+ return false;
+ if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) {
+ fattr->gid = gid;
+ fattr->valid |= NFS_ATTR_FATTR_GROUP;
+ }
+ return true;
+}
+
+/**
+ * nfs_fattr_free_names - free up the NFSv4 owner and group strings
+ * @fattr: a fully initialised nfs_fattr structure
+ */
+void nfs_fattr_free_names(struct nfs_fattr *fattr)
+{
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)
+ nfs_fattr_free_owner_name(fattr);
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)
+ nfs_fattr_free_group_name(fattr);
+}
+
+/**
+ * nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free
+ * @server: pointer to the filesystem nfs_server structure
+ * @fattr: a fully initialised nfs_fattr structure
+ *
+ * This helper maps the cached NFSv4 owner/group strings in fattr into
+ * their numeric uid/gid equivalents, and then frees the cached strings.
+ */
+void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ if (nfs_fattr_map_owner_name(server, fattr))
+ nfs_fattr_free_owner_name(fattr);
+ if (nfs_fattr_map_group_name(server, fattr))
+ nfs_fattr_free_group_name(fattr);
+}
static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 50a15fa8cf9..25c3bfad795 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/nfs_xdr.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/freezer.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
- schedule();
+ freezable_schedule();
return 0;
}
@@ -629,23 +630,28 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
nfs_revalidate_inode(server, inode);
}
-struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode)
+struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
{
struct nfs_open_context *ctx;
+ struct rpc_cred *cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return ERR_CAST(cred);
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (ctx != NULL) {
- nfs_sb_active(dentry->d_sb);
- ctx->dentry = dget(dentry);
- ctx->cred = get_rpccred(cred);
- ctx->state = NULL;
- ctx->mode = f_mode;
- ctx->flags = 0;
- ctx->error = 0;
- nfs_init_lock_context(&ctx->lock_context);
- ctx->lock_context.open_context = ctx;
- INIT_LIST_HEAD(&ctx->list);
+ if (!ctx) {
+ put_rpccred(cred);
+ return ERR_PTR(-ENOMEM);
}
+ nfs_sb_active(dentry->d_sb);
+ ctx->dentry = dget(dentry);
+ ctx->cred = cred;
+ ctx->state = NULL;
+ ctx->mode = f_mode;
+ ctx->flags = 0;
+ ctx->error = 0;
+ nfs_init_lock_context(&ctx->lock_context);
+ ctx->lock_context.open_context = ctx;
+ INIT_LIST_HEAD(&ctx->list);
return ctx;
}
@@ -738,15 +744,10 @@ static void nfs_file_clear_open_context(struct file *filp)
int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- struct rpc_cred *cred;
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return PTR_ERR(cred);
- ctx = alloc_nfs_open_context(filp->f_path.dentry, cred, filp->f_mode);
- put_rpccred(cred);
- if (ctx == NULL)
- return -ENOMEM;
+ ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
nfs_fscache_set_inode_cookie(inode, filp);
@@ -1019,6 +1020,8 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->valid = 0;
fattr->time_start = jiffies;
fattr->gencount = nfs_inc_attr_generation_counter();
+ fattr->owner_name = NULL;
+ fattr->group_name = NULL;
}
struct nfs_fattr *nfs_alloc_fattr(void)
@@ -1464,7 +1467,6 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
static void nfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 3f4d95751d5..5ee92538b06 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -307,6 +307,8 @@ extern void nfs_readdata_release(struct nfs_read_data *rdata);
/* write.c */
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct list_head *head);
+extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
extern void nfs_commit_free(struct nfs_write_data *p);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d4bc9ed9174..91943953a37 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -17,6 +17,7 @@
#include <linux/nfs_page.h>
#include <linux/lockd/bind.h>
#include <linux/nfs_mount.h>
+#include <linux/freezer.h>
#include "iostat.h"
#include "internal.h"
@@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
break;
- schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!fatal_signal_pending(current));
return res;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 693ae22f873..4d7d0aedc10 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -94,6 +94,8 @@ struct nfs_unique_id {
struct nfs4_state_owner {
struct nfs_unique_id so_owner_id;
struct nfs_server *so_server;
+ struct list_head so_lru;
+ unsigned long so_expires;
struct rb_node so_server_node;
struct rpc_cred *so_cred; /* Associated cred */
@@ -319,6 +321,7 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
+extern void nfs4_purge_state_owners(struct nfs_server *);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index a62d36b9a99..71ec08617e2 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -49,13 +49,14 @@ filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
loff_t offset)
{
u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
- u64 tmp;
+ u64 stripe_no;
+ u32 rem;
offset -= flseg->pattern_offset;
- tmp = offset;
- do_div(tmp, stripe_width);
+ stripe_no = div_u64(offset, stripe_width);
+ div_u64_rem(offset, flseg->stripe_unit, &rem);
- return tmp * flseg->stripe_unit + do_div(offset, flseg->stripe_unit);
+ return stripe_no * flseg->stripe_unit + rem;
}
/* This function is used by the layout driver to calculate the
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index be2bbac1381..75366dc8968 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -39,6 +39,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
@@ -50,9 +52,11 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/module.h>
+#include <linux/nfs_idmap.h>
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
+#include <linux/freezer.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -241,7 +245,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
- schedule_timeout_killable(*timeout);
+ freezable_schedule_timeout_killable(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
@@ -361,9 +365,8 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* Must be called while holding tbl->slot_tbl_lock
*/
static void
-nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
- int free_slotid = free_slot - tbl->slots;
int slotid = free_slotid;
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
@@ -428,7 +431,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
}
spin_lock(&tbl->slot_tbl_lock);
- nfs4_free_slot(tbl, res->sr_slot);
+ nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
nfs4_check_drain_fc_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
@@ -551,13 +554,10 @@ int nfs41_setup_sequence(struct nfs4_session *session,
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
- /*
- * The state manager will wait until the slot table is empty.
- * Schedule the reset thread
- */
+ /* The state manager will wait until the slot table is empty */
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
- dprintk("%s Schedule Session Reset\n", __func__);
+ dprintk("%s session is draining\n", __func__);
return -EAGAIN;
}
@@ -762,6 +762,8 @@ struct nfs4_opendata {
struct nfs_openres o_res;
struct nfs_open_confirmargs c_arg;
struct nfs_open_confirmres c_res;
+ struct nfs4_string owner_name;
+ struct nfs4_string group_name;
struct nfs_fattr f_attr;
struct nfs_fattr dir_attr;
struct dentry *dir;
@@ -785,6 +787,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
+ nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
@@ -816,6 +819,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
+ p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_CREAT) {
u32 *s;
@@ -852,6 +856,7 @@ static void nfs4_opendata_free(struct kref *kref)
dput(p->dir);
dput(p->dentry);
nfs_sb_deactive(sb);
+ nfs_fattr_free_names(&p->f_attr);
kfree(p);
}
@@ -894,6 +899,8 @@ out:
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
+ if (delegation == NULL)
+ return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1043,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
- if (delegation == NULL ||
- !can_open_delegated(delegation, fmode)) {
+ if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
@@ -1091,7 +1097,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
- if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+ pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+ "returning a delegation for "
+ "OPEN(CLAIM_DELEGATE_CUR)\n",
+ NFS_CLIENT(inode)->cl_server);
+ } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
@@ -1423,11 +1434,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
- if (delegation != NULL &&
- test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
- rcu_read_unlock();
- goto out_no_action;
- }
+ if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+ can_open_delegated(delegation, data->o_arg.fmode))
+ goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
@@ -1444,6 +1453,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
return;
rpc_call_start(task);
return;
+unlock_no_action:
+ rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
@@ -1570,6 +1581,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
if (status != 0 || !data->rpc_done)
return status;
+ nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
+
nfs_refresh_inode(dir, o_res->dir_attr);
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1602,6 +1615,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return status;
}
+ nfs_fattr_map_and_free_names(server, &data->f_attr);
+
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
@@ -3422,19 +3437,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
*/
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
-static void buf_to_pages(const void *buf, size_t buflen,
- struct page **pages, unsigned int *pgbase)
-{
- const void *p = buf;
-
- *pgbase = offset_in_page(buf);
- p -= *pgbase;
- while (p < buf + buflen) {
- *(pages++) = virt_to_page(p);
- p += PAGE_CACHE_SIZE;
- }
-}
-
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
{
@@ -3531,9 +3533,19 @@ out:
nfs4_set_cached_acl(inode, acl);
}
+/*
+ * The getxattr API returns the required buffer length when called with a
+ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
+ * the required buf. On a NULL buf, we send a page of data to the server
+ * guessing that the ACL request can be serviced by a page. If so, we cache
+ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
+ * the cache. If not so, we throw away the page, and cache the required
+ * length. The next getxattr call will then produce another round trip to
+ * the server, this time with the input buf of the required size.
+ */
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES];
+ struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
@@ -3548,41 +3560,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- struct page *localpage = NULL;
- int ret;
+ int ret = -ENOMEM, npages, i, acl_len = 0;
- if (buflen < PAGE_SIZE) {
- /* As long as we're doing a round trip to the server anyway,
- * let's be prepared for a page of acl data. */
- localpage = alloc_page(GFP_KERNEL);
- resp_buf = page_address(localpage);
- if (localpage == NULL)
- return -ENOMEM;
- args.acl_pages[0] = localpage;
- args.acl_pgbase = 0;
- args.acl_len = PAGE_SIZE;
- } else {
- resp_buf = buf;
- buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
+ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* As long as we're doing a round trip to the server anyway,
+ * let's be prepared for a page of acl data. */
+ if (npages == 0)
+ npages = 1;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
+ }
+ if (npages > 1) {
+ /* for decoding across pages */
+ args.acl_scratch = alloc_page(GFP_KERNEL);
+ if (!args.acl_scratch)
+ goto out_free;
}
- ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
+ args.acl_len = npages * PAGE_SIZE;
+ args.acl_pgbase = 0;
+ /* Let decode_getfacl know not to fail if the ACL data is larger than
+ * the page we send as a guess */
+ if (buf == NULL)
+ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
+ resp_buf = page_address(pages[0]);
+
+ dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
+ __func__, buf, buflen, npages, args.acl_len);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
+ &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
- if (res.acl_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, res.acl_len);
+
+ acl_len = res.acl_len - res.acl_data_offset;
+ if (acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, acl_len);
else
- nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
+ nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
+ acl_len);
if (buf) {
ret = -ERANGE;
- if (res.acl_len > buflen)
+ if (acl_len > buflen)
goto out_free;
- if (localpage)
- memcpy(buf, resp_buf, res.acl_len);
+ _copy_from_pages(buf, pages, res.acl_data_offset,
+ res.acl_len);
}
- ret = res.acl_len;
+ ret = acl_len;
out_free:
- if (localpage)
- __free_page(localpage);
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ __free_page(pages[i]);
+ if (args.acl_scratch)
+ __free_page(args.acl_scratch);
return ret;
}
@@ -3613,6 +3644,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
+ /* -ENOENT is returned if there is no ACL or if there is an ACL
+ * but no cached acl data, just the acl length */
return ret;
return nfs4_get_acl_uncached(inode, buf, buflen);
}
@@ -3950,7 +3983,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- schedule_timeout_killable(timeout);
+ freezable_schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
@@ -5013,23 +5046,6 @@ out:
return ret;
}
-/*
- * Reset the forechannel and backchannel slot tables
- */
-static int nfs4_reset_slot_tables(struct nfs4_session *session)
-{
- int status;
-
- status = nfs4_reset_slot_table(&session->fc_slot_table,
- session->fc_attrs.max_reqs, 1);
- if (status)
- return status;
-
- status = nfs4_reset_slot_table(&session->bc_slot_table,
- session->bc_attrs.max_reqs, 0);
- return status;
-}
-
/* Destroy the slot table */
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
{
@@ -5075,29 +5091,35 @@ out:
}
/*
- * Initialize the forechannel and backchannel tables
+ * Initialize or reset the forechannel and backchannel tables
*/
-static int nfs4_init_slot_tables(struct nfs4_session *session)
+static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
{
struct nfs4_slot_table *tbl;
- int status = 0;
+ int status;
- tbl = &session->fc_slot_table;
+ dprintk("--> %s\n", __func__);
+ /* Fore channel */
+ tbl = &ses->fc_slot_table;
if (tbl->slots == NULL) {
- status = nfs4_init_slot_table(tbl,
- session->fc_attrs.max_reqs, 1);
+ status = nfs4_init_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
+ if (status) /* -ENOMEM */
+ return status;
+ } else {
+ status = nfs4_reset_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
if (status)
return status;
}
-
- tbl = &session->bc_slot_table;
+ /* Back channel */
+ tbl = &ses->bc_slot_table;
if (tbl->slots == NULL) {
- status = nfs4_init_slot_table(tbl,
- session->bc_attrs.max_reqs, 0);
+ status = nfs4_init_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
if (status)
- nfs4_destroy_slot_tables(session);
- }
-
+ /* Fore and back channel share a connection so get
+ * both slot tables or neither */
+ nfs4_destroy_slot_tables(ses);
+ } else
+ status = nfs4_reset_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
return status;
}
@@ -5285,13 +5307,9 @@ int nfs4_proc_create_session(struct nfs_client *clp)
if (status)
goto out;
- /* Init and reset the fore channel */
- status = nfs4_init_slot_tables(session);
- dprintk("slot table initialization returned %d\n", status);
- if (status)
- goto out;
- status = nfs4_reset_slot_tables(session);
- dprintk("slot table reset returned %d\n", status);
+ /* Init or reset the session slot tables */
+ status = nfs4_setup_session_slot_tables(session);
+ dprintk("slot table setup returned %d\n", status);
if (status)
goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 39914be40b0..a53f33b4ac3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -49,6 +49,7 @@
#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
+#include <linux/jiffies.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -377,31 +378,24 @@ nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
{
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
- struct nfs4_state_owner *sp, *res = NULL;
+ struct nfs4_state_owner *sp;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
- if (server < sp->so_server) {
- p = &parent->rb_left;
- continue;
- }
- if (server > sp->so_server) {
- p = &parent->rb_right;
- continue;
- }
if (cred < sp->so_cred)
p = &parent->rb_left;
else if (cred > sp->so_cred)
p = &parent->rb_right;
else {
+ if (!list_empty(&sp->so_lru))
+ list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
- res = sp;
- break;
+ return sp;
}
}
- return res;
+ return NULL;
}
static struct nfs4_state_owner *
@@ -421,6 +415,8 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
else if (new->so_cred > sp->so_cred)
p = &parent->rb_right;
else {
+ if (!list_empty(&sp->so_lru))
+ list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
return sp;
}
@@ -462,6 +458,7 @@ nfs4_alloc_state_owner(void)
spin_lock_init(&sp->so_sequence.lock);
INIT_LIST_HEAD(&sp->so_sequence.list);
atomic_set(&sp->so_count, 1);
+ INIT_LIST_HEAD(&sp->so_lru);
return sp;
}
@@ -479,6 +476,38 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp)
}
}
+static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
+{
+ rpc_destroy_wait_queue(&sp->so_sequence.wait);
+ put_rpccred(sp->so_cred);
+ kfree(sp);
+}
+
+static void nfs4_gc_state_owners(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *tmp;
+ unsigned long time_min, time_max;
+ LIST_HEAD(doomed);
+
+ spin_lock(&clp->cl_lock);
+ time_max = jiffies;
+ time_min = (long)time_max - (long)clp->cl_lease_time;
+ list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+ /* NB: LRU is sorted so that oldest is at the head */
+ if (time_in_range(sp->so_expires, time_min, time_max))
+ break;
+ list_move(&sp->so_lru, &doomed);
+ nfs4_remove_state_owner_locked(sp);
+ }
+ spin_unlock(&clp->cl_lock);
+
+ list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+ list_del(&sp->so_lru);
+ nfs4_free_state_owner(sp);
+ }
+}
+
/**
* nfs4_get_state_owner - Look up a state owner given a credential
* @server: nfs_server to search
@@ -496,10 +525,10 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
sp = nfs4_find_state_owner_locked(server, cred);
spin_unlock(&clp->cl_lock);
if (sp != NULL)
- return sp;
+ goto out;
new = nfs4_alloc_state_owner();
if (new == NULL)
- return NULL;
+ goto out;
new->so_server = server;
new->so_cred = cred;
spin_lock(&clp->cl_lock);
@@ -511,26 +540,58 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
rpc_destroy_wait_queue(&new->so_sequence.wait);
kfree(new);
}
+out:
+ nfs4_gc_state_owners(server);
return sp;
}
/**
* nfs4_put_state_owner - Release a nfs4_state_owner
* @sp: state owner data to release
- *
*/
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
- struct nfs_client *clp = sp->so_server->nfs_client;
- struct rpc_cred *cred = sp->so_cred;
+ struct nfs_server *server = sp->so_server;
+ struct nfs_client *clp = server->nfs_client;
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
return;
- nfs4_remove_state_owner_locked(sp);
+
+ if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+ sp->so_expires = jiffies;
+ list_add_tail(&sp->so_lru, &server->state_owners_lru);
+ spin_unlock(&clp->cl_lock);
+ } else {
+ nfs4_remove_state_owner_locked(sp);
+ spin_unlock(&clp->cl_lock);
+ nfs4_free_state_owner(sp);
+ }
+}
+
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @server: nfs_server with cached state owners to release
+ *
+ * Called at umount time. Remaining state owners will be on
+ * the LRU with ref count of zero.
+ */
+void nfs4_purge_state_owners(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *tmp;
+ LIST_HEAD(doomed);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+ list_move(&sp->so_lru, &doomed);
+ nfs4_remove_state_owner_locked(sp);
+ }
spin_unlock(&clp->cl_lock);
- rpc_destroy_wait_queue(&sp->so_sequence.wait);
- put_rpccred(cred);
- kfree(sp);
+
+ list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+ list_del(&sp->so_lru);
+ nfs4_free_state_owner(sp);
+ }
}
static struct nfs4_state *
@@ -1156,11 +1217,13 @@ restart:
if (status >= 0) {
status = nfs4_reclaim_locks(state, ops);
if (status >= 0) {
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
printk("%s: Lock reclaim failed!\n",
__func__);
}
+ spin_unlock(&state->state_lock);
nfs4_put_open_state(state);
goto restart;
}
@@ -1224,10 +1287,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
lock->ls_seqid.flags = 0;
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
}
+ spin_unlock(&state->state_lock);
}
static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1415,14 @@ static void nfs4_warn_keyexpired(const char *s)
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
+ case 0:
+ break;
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
- return 0;
+ break;
case -NFS4ERR_NO_GRACE:
nfs4_state_end_reclaim_reboot(clp);
- return 0;
+ break;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1442,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
- return 0;
+ break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
- return 0;
+ break;
+ default:
+ return error;
}
- return error;
+ return 0;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1394,6 +1463,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ nfs4_purge_state_owners(server);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;
@@ -1428,7 +1498,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
struct rpc_cred *cred;
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
- int status = -NFS4ERR_EXPIRED;
+ int status;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1508,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
+ status = -ENOKEY;
if (cred == NULL)
goto out;
}
@@ -1525,16 +1596,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
- else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
- else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED |
SEQ4_STATUS_LEASE_MOVED))
nfs41_handle_state_revoked(clp);
- else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
- else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
SEQ4_STATUS_BACKCHANNEL_FAULT |
SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1733,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
+ if (status < 0)
+ goto out_error;
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
- if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
- goto out_error;
}
/* Initialize or reset the session */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e6161b213ed..95e92e43840 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -2298,7 +2298,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_getfattr(xdr, args->dir_bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
- replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
+ replen = hdr.replen + op_decode_hdr_maxsz + 1;
encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
+ xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
+
encode_nops(&hdr);
}
@@ -3790,7 +3792,8 @@ out_overflow:
}
static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
- const struct nfs_server *server, uint32_t *uid, int may_sleep)
+ const struct nfs_server *server, uint32_t *uid,
+ struct nfs4_string *owner_name)
{
uint32_t len;
__be32 *p;
@@ -3807,8 +3810,12 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
- if (!may_sleep) {
- /* do nothing */
+ if (owner_name != NULL) {
+ owner_name->data = kmemdup(p, len, GFP_NOWAIT);
+ if (owner_name->data != NULL) {
+ owner_name->len = len;
+ ret = NFS_ATTR_FATTR_OWNER_NAME;
+ }
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
@@ -3828,7 +3835,8 @@ out_overflow:
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
- const struct nfs_server *server, uint32_t *gid, int may_sleep)
+ const struct nfs_server *server, uint32_t *gid,
+ struct nfs4_string *group_name)
{
uint32_t len;
__be32 *p;
@@ -3845,8 +3853,12 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
p = xdr_inline_decode(xdr, len);
if (unlikely(!p))
goto out_overflow;
- if (!may_sleep) {
- /* do nothing */
+ if (group_name != NULL) {
+ group_name->data = kmemdup(p, len, GFP_NOWAIT);
+ if (group_name->data != NULL) {
+ group_name->len = len;
+ ret = NFS_ATTR_FATTR_GROUP_NAME;
+ }
} else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
@@ -4283,7 +4295,7 @@ xdr_error:
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
struct nfs_fattr *fattr, struct nfs_fh *fh,
- const struct nfs_server *server, int may_sleep)
+ const struct nfs_server *server)
{
int status;
umode_t fmode = 0;
@@ -4350,12 +4362,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep);
+ status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, fattr->owner_name);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep);
+ status = decode_attr_group(xdr, bitmap, server, &fattr->gid, fattr->group_name);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
@@ -4396,7 +4408,7 @@ xdr_error:
}
static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
- struct nfs_fh *fh, const struct nfs_server *server, int may_sleep)
+ struct nfs_fh *fh, const struct nfs_server *server)
{
__be32 *savep;
uint32_t attrlen,
@@ -4415,7 +4427,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat
if (status < 0)
goto xdr_error;
- status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep);
+ status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server);
if (status < 0)
goto xdr_error;
@@ -4426,9 +4438,9 @@ xdr_error:
}
static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
- const struct nfs_server *server, int may_sleep)
+ const struct nfs_server *server)
{
- return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
+ return decode_getfattr_generic(xdr, fattr, NULL, server);
}
/*
@@ -4957,17 +4969,18 @@ decode_restorefh(struct xdr_stream *xdr)
}
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
- size_t *acl_len)
+ struct nfs_getaclres *res)
{
- __be32 *savep;
+ __be32 *savep, *bm_p;
uint32_t attrlen,
bitmap[3] = {0};
struct kvec *iov = req->rq_rcv_buf.head;
int status;
- *acl_len = 0;
+ res->acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
+ bm_p = xdr->p;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
@@ -4979,18 +4992,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t hdrlen;
u32 recvd;
+ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ * are stored with the acl data to handle the problem of
+ * variable length bitmaps.*/
+ xdr->p = bm_p;
+ res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+ res->acl_data_offset <<= 2;
+
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+ attrlen += res->acl_data_offset;
recvd = req->rq_rcv_buf.len - hdrlen;
if (attrlen > recvd) {
- dprintk("NFS: server cheating in getattr"
- " acl reply: attrlen %u > recvd %u\n",
+ if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+ /* getxattr interface called with a NULL buf */
+ res->acl_len = attrlen;
+ goto out;
+ }
+ dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
attrlen, recvd);
return -EINVAL;
}
xdr_read_pages(xdr, attrlen);
- *acl_len = attrlen;
+ res->acl_len = attrlen;
} else
status = -EOPNOTSUPP;
@@ -5696,8 +5721,7 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
status = decode_open_downgrade(xdr, res);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -5723,8 +5747,7 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_access(xdr, res);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -5753,8 +5776,7 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
- status = decode_getfattr(xdr, res->fattr, res->server
- ,!RPC_IS_ASYNC(rqstp->rq_task));
+ status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -5780,8 +5802,7 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
goto out;
status = decode_getfh(xdr, res->fh);
if (status == 0)
- status = decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -5807,8 +5828,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_remove(xdr, &res->cinfo);
if (status)
goto out;
- decode_getfattr(xdr, res->dir_attr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -5841,14 +5861,12 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
/* Current FH is target directory */
- if (decode_getfattr(xdr, res->new_fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+ if (decode_getfattr(xdr, res->new_fattr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
- decode_getfattr(xdr, res->old_fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->old_fattr, res->server);
out:
return status;
}
@@ -5884,14 +5902,12 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(xdr, res->dir_attr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+ if (decode_getfattr(xdr, res->dir_attr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -5923,14 +5939,12 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
- if (decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+ if (decode_getfattr(xdr, res->fattr, res->server))
goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
- decode_getfattr(xdr, res->dir_fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->dir_fattr, res->server);
out:
return status;
}
@@ -5962,8 +5976,7 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ status = decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6028,7 +6041,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_getacl(xdr, rqstp, &res->acl_len);
+ status = decode_getacl(xdr, rqstp, res);
out:
return status;
@@ -6061,8 +6074,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6093,13 +6105,11 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
goto out;
if (decode_getfh(xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(xdr, res->f_attr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+ if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
goto out;
if (decode_restorefh(xdr) != 0)
goto out;
- decode_getfattr(xdr, res->dir_attr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -6147,8 +6157,7 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
status = decode_open(xdr, res);
if (status)
goto out;
- decode_getfattr(xdr, res->f_attr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
}
@@ -6175,8 +6184,7 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
status = decode_setattr(xdr);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6356,8 +6364,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
if (res->fattr)
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
if (!status)
status = res->count;
out:
@@ -6386,8 +6393,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
if (res->fattr)
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6546,8 +6552,7 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
status = decode_delegreturn(xdr);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6576,8 +6581,7 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
goto out;
xdr_enter_page(xdr, PAGE_SIZE);
status = decode_getfattr(xdr, &res->fs_locations->fattr,
- res->fs_locations->server,
- !RPC_IS_ASYNC(req->rq_task));
+ res->fs_locations->server);
out:
return status;
}
@@ -6826,8 +6830,7 @@ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
status = decode_layoutcommit(xdr, rqstp, res);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6958,7 +6961,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
goto out_overflow;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
- entry->server, 1) < 0)
+ entry->server) < 0)
goto out_overflow;
if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
entry->ino = entry->fattr->mounted_on_fileid;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index c807ab93140..55d01280a60 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
static struct pnfs_layoutdriver_type objlayout_type = {
.id = LAYOUT_OSD2_OBJECTS,
.name = "LAYOUT_OSD2_OBJECTS",
- .flags = PNFS_LAYOUTRET_ON_SETATTR,
+ .flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR,
.alloc_layout_hdr = objlayout_alloc_layout_hdr,
.free_layout_hdr = objlayout_free_layout_hdr,
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 72074e3a04f..b3c29039f5b 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
oir->status = rdata->task.tk_status = status;
if (status >= 0)
rdata->res.count = status;
+ else
+ rdata->pnfs_error = status;
objlayout_iodone(oir);
/* must not use oir after this point */
@@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
if (status >= 0) {
wdata->res.count = status;
wdata->verf.committed = oir->committed;
+ } else {
+ wdata->pnfs_error = status;
}
objlayout_iodone(oir);
/* must not use oir after this point */
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 8e672a2b2d6..17149a49006 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1166,6 +1166,33 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
+static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+{
+ struct nfs_pageio_descriptor pgio;
+ LIST_HEAD(failed);
+
+ /* Resend all requests through the MDS */
+ nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+ while (!list_empty(head)) {
+ struct nfs_page *req = nfs_list_entry(head->next);
+
+ nfs_list_remove_request(req);
+ if (!nfs_pageio_add_request(&pgio, req))
+ nfs_list_add_request(req, &failed);
+ }
+ nfs_pageio_complete(&pgio);
+
+ if (!list_empty(&failed)) {
+ /* For some reason our attempt to resend pages. Mark the
+ * overall send request as having failed, and let
+ * nfs_writeback_release_full deal with the error.
+ */
+ list_move(&failed, head);
+ return -EIO;
+ }
+ return 0;
+}
+
/*
* Called by non rpc-based layout drivers
*/
@@ -1175,9 +1202,17 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
pnfs_set_layoutcommit(data);
data->mds_ops->rpc_call_done(&data->task, data);
} else {
- put_lseg(data->lseg);
- data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
+ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ /* Don't lo_commit on error, Server will needs to
+ * preform a file recovery.
+ */
+ clear_bit(NFS_INO_LAYOUTCOMMIT,
+ &NFS_I(data->inode)->flags);
+ pnfs_return_layout(data->inode);
+ }
+ data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
}
data->mds_ops->rpc_release(data);
}
@@ -1267,6 +1302,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
+ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR)
+ pnfs_return_layout(data->inode);
nfs_pageio_init_read_mds(&pgio, data->inode);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 1509530cb11..53d593a0a4f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -68,6 +68,7 @@ enum {
enum layoutdriver_policy_flags {
/* Should the pNFS client commit and return the layout upon a setattr */
PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
+ PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
};
struct nfs4_deviceid_node;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f48125da198..0c672588fe5 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -41,6 +41,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/lockd/bind.h>
+#include <linux/freezer.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
res = rpc_call_sync(clnt, msg, flags);
if (res != -EKEYEXPIRED)
break;
- schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!fatal_signal_pending(current));
return res;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 134777406ee..3dfa4f112c0 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -41,7 +41,6 @@
#include <linux/lockd/bind.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/mnt_namespace.h>
#include <linux/namei.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
@@ -263,10 +262,10 @@ static match_table_t nfs_local_lock_tokens = {
static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
-static int nfs_show_options(struct seq_file *, struct vfsmount *);
-static int nfs_show_devname(struct seq_file *, struct vfsmount *);
-static int nfs_show_path(struct seq_file *, struct vfsmount *);
-static int nfs_show_stats(struct seq_file *, struct vfsmount *);
+static int nfs_show_options(struct seq_file *, struct dentry *);
+static int nfs_show_devname(struct seq_file *, struct dentry *);
+static int nfs_show_path(struct seq_file *, struct dentry *);
+static int nfs_show_stats(struct seq_file *, struct dentry *);
static struct dentry *nfs_fs_mount(struct file_system_type *,
int, const char *, void *);
static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -721,9 +720,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
/*
* Describe the mount options on this VFS mountpoint
*/
-static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_options(struct seq_file *m, struct dentry *root)
{
- struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+ struct nfs_server *nfss = NFS_SB(root->d_sb);
nfs_show_mount_options(m, nfss, 0);
@@ -761,14 +760,14 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
#endif
#endif
-static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_devname(struct seq_file *m, struct dentry *root)
{
char *page = (char *) __get_free_page(GFP_KERNEL);
char *devname, *dummy;
int err = 0;
if (!page)
return -ENOMEM;
- devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE);
+ devname = nfs_path(&dummy, root, page, PAGE_SIZE);
if (IS_ERR(devname))
err = PTR_ERR(devname);
else
@@ -777,7 +776,7 @@ static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
return err;
}
-static int nfs_show_path(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_path(struct seq_file *m, struct dentry *dentry)
{
seq_puts(m, "/");
return 0;
@@ -786,10 +785,10 @@ static int nfs_show_path(struct seq_file *m, struct vfsmount *mnt)
/*
* Present statistical information for this VFS mountpoint
*/
-static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
+static int nfs_show_stats(struct seq_file *m, struct dentry *root)
{
int i, cpu;
- struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+ struct nfs_server *nfss = NFS_SB(root->d_sb);
struct rpc_auth *auth = nfss->client->cl_auth;
struct nfs_iostats totals = { };
@@ -799,10 +798,10 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
* Display all mount option settings
*/
seq_printf(m, "\n\topts:\t");
- seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? "ro" : "rw");
- seq_puts(m, mnt->mnt_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
- seq_puts(m, mnt->mnt_sb->s_flags & MS_NOATIME ? ",noatime" : "");
- seq_puts(m, mnt->mnt_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+ seq_puts(m, root->d_sb->s_flags & MS_RDONLY ? "ro" : "rw");
+ seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
+ seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
+ seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
nfs_show_mount_options(m, nfss, 1);
seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
@@ -909,10 +908,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data->auth_flavor_len = 1;
data->version = version;
data->minorversion = 0;
+ security_init_mnt_opts(&data->lsm_opts);
}
return data;
}
+static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
+{
+ if (data) {
+ kfree(data->client_address);
+ kfree(data->mount_server.hostname);
+ kfree(data->nfs_server.export_path);
+ kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
+ security_free_mnt_opts(&data->lsm_opts);
+ kfree(data);
+ }
+}
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -2220,9 +2233,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
- goto out_free_fh;
-
- security_init_mnt_opts(&data->lsm_opts);
+ goto out;
/* Validate the mount data */
error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
@@ -2234,8 +2245,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
#ifdef CONFIG_NFS_V4
if (data->version == 4) {
mntroot = nfs4_try_mount(flags, dev_name, data);
- kfree(data->client_address);
- kfree(data->nfs_server.export_path);
goto out;
}
#endif /* CONFIG_NFS_V4 */
@@ -2290,13 +2299,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
s->s_flags |= MS_ACTIVE;
out:
- kfree(data->nfs_server.hostname);
- kfree(data->mount_server.hostname);
- kfree(data->fscache_uniq);
- security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
+ nfs_free_parsed_mount_data(data);
nfs_free_fhandle(mntfh);
- kfree(data);
return mntroot;
out_err_nosb:
@@ -2623,9 +2627,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
mntfh = nfs_alloc_fhandle();
if (data == NULL || mntfh == NULL)
- goto out_free_fh;
-
- security_init_mnt_opts(&data->lsm_opts);
+ goto out;
/* Get a volume representation */
server = nfs4_create_server(data, mntfh);
@@ -2677,13 +2679,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
s->s_flags |= MS_ACTIVE;
- security_free_mnt_opts(&data->lsm_opts);
nfs_free_fhandle(mntfh);
return mntroot;
out:
- security_free_mnt_opts(&data->lsm_opts);
-out_free_fh:
nfs_free_fhandle(mntfh);
return ERR_PTR(error);
@@ -2788,11 +2787,15 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
const char *export_path)
{
struct dentry *dentry;
- int ret = nfs_referral_loop_protect();
+ int err;
+
+ if (IS_ERR(root_mnt))
+ return ERR_CAST(root_mnt);
- if (ret) {
+ err = nfs_referral_loop_protect();
+ if (err) {
mntput(root_mnt);
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
dentry = mount_subtree(root_mnt, export_path);
@@ -2816,9 +2819,7 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
data->nfs_server.hostname);
data->nfs_server.export_path = export_path;
- res = ERR_CAST(root_mnt);
- if (!IS_ERR(root_mnt))
- res = nfs_follow_remote_path(root_mnt, export_path);
+ res = nfs_follow_remote_path(root_mnt, export_path);
dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
IS_ERR(res) ? PTR_ERR(res) : 0,
@@ -2838,7 +2839,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
data = nfs_alloc_parsed_mount_data(4);
if (data == NULL)
- goto out_free_data;
+ goto out;
/* Validate the mount data */
error = nfs4_validate_mount_data(raw_data, data, dev_name);
@@ -2852,12 +2853,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
error = PTR_ERR(res);
out:
- kfree(data->client_address);
- kfree(data->nfs_server.export_path);
- kfree(data->nfs_server.hostname);
- kfree(data->fscache_uniq);
-out_free_data:
- kfree(data);
+ nfs_free_parsed_mount_data(data);
dprintk("<-- nfs4_mount() = %d%s\n", error,
error != 0 ? " [error]" : "");
return res;
@@ -3079,9 +3075,7 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
flags, data, data->hostname);
data->mnt_path = export_path;
- res = ERR_CAST(root_mnt);
- if (!IS_ERR(root_mnt))
- res = nfs_follow_remote_path(root_mnt, export_path);
+ res = nfs_follow_remote_path(root_mnt, export_path);
dprintk("<-- nfs4_referral_mount() = %ld%s\n",
IS_ERR(res) ? PTR_ERR(res) : 0,
IS_ERR(res) ? " [error]" : "");
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1dda78db6a7..0c3885255f9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1052,7 +1052,7 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
.pg_doio = nfs_generic_pg_writepages,
};
-static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
@@ -1166,13 +1166,7 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
static void nfs_writeback_release_full(void *calldata)
{
struct nfs_write_data *data = calldata;
- int ret, status = data->task.tk_status;
- struct nfs_pageio_descriptor pgio;
-
- if (data->pnfs_error) {
- nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
- pgio.pg_recoalesce = 1;
- }
+ int status = data->task.tk_status;
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
@@ -1188,11 +1182,6 @@ static void nfs_writeback_release_full(void *calldata)
req->wb_bytes,
(long long)req_offset(req));
- if (data->pnfs_error) {
- dprintk(", pnfs error = %d\n", data->pnfs_error);
- goto next;
- }
-
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
@@ -1212,19 +1201,7 @@ remove_request:
next:
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
- if (data->pnfs_error) {
- lock_page(page);
- nfs_pageio_cond_complete(&pgio, page->index);
- ret = nfs_page_async_flush(&pgio, page, 0);
- if (ret) {
- nfs_set_pageerror(page);
- dprintk("rewrite to MDS error = %d\n", ret);
- }
- unlock_page(page);
- }
}
- if (data->pnfs_error)
- nfs_pageio_complete(&pgio);
nfs_writedata_release(calldata);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7748d6a18d9..6f3ebb48b12 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -718,7 +718,7 @@ int set_callback_cred(void)
{
if (callback_cred)
return 0;
- callback_cred = rpc_lookup_machine_cred();
+ callback_cred = rpc_lookup_machine_cred("nfs");
if (!callback_cred)
return -ENOMEM;
return 0;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index fa383361bc6..c5e28ed8bca 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -838,7 +838,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status;
}
}
- status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
+ status = fh_want_write(&cstate->current_fh);
if (status)
return status;
status = nfs_ok;
@@ -856,7 +856,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
0, (time_t)0);
out:
- mnt_drop_write(cstate->current_fh.fh_export->ex_path.mnt);
+ fh_drop_write(&cstate->current_fh);
return status;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ed083b9a731..80a0be9ed00 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -147,11 +147,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
status = -EEXIST;
if (dentry->d_inode)
goto out_put;
- status = mnt_want_write(rec_file->f_path.mnt);
+ status = mnt_want_write_file(rec_file);
if (status)
goto out_put;
status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU);
- mnt_drop_write(rec_file->f_path.mnt);
+ mnt_drop_write_file(rec_file);
out_put:
dput(dentry);
out_unlock:
@@ -268,7 +268,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
if (!rec_file || !clp->cl_firststate)
return;
- status = mnt_want_write(rec_file->f_path.mnt);
+ status = mnt_want_write_file(rec_file);
if (status)
goto out;
clp->cl_firststate = 0;
@@ -281,7 +281,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
nfs4_reset_creds(original_cred);
if (status == 0)
vfs_fsync(rec_file, 0);
- mnt_drop_write(rec_file->f_path.mnt);
+ mnt_drop_write_file(rec_file);
out:
if (status)
printk("NFSD: Failed to remove expired client state directory"
@@ -311,13 +311,13 @@ nfsd4_recdir_purge_old(void) {
if (!rec_file)
return;
- status = mnt_want_write(rec_file->f_path.mnt);
+ status = mnt_want_write_file(rec_file);
if (status)
goto out;
status = nfsd4_list_rec_dir(purge_old);
if (status == 0)
vfs_fsync(rec_file, 0);
- mnt_drop_write(rec_file->f_path.mnt);
+ mnt_drop_write_file(rec_file);
out:
if (status)
printk("nfsd4: failed to purge old clients from recovery"
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 47e94e33a97..9ca16dc09e0 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -658,7 +658,7 @@ static int nfsd4_sanitize_slot_size(u32 size)
/*
* XXX: If we run out of reserved DRC memory we could (up to a point)
* re-negotiate active sessions and reduce their slot usage to make
- * rooom for new connections. For now we just fail the create session.
+ * room for new connections. For now we just fail the create session.
*/
static int nfsd4_get_drc_mem(int slotsize, u32 num)
{
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c45a2ea4a09..bb4a11d58a5 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -272,7 +272,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
* 2. Is that directory a mount point, or
* 3. Is that directory the root of an exported file system?
*/
- error = nlmsvc_unlock_all_by_sb(path.mnt->mnt_sb);
+ error = nlmsvc_unlock_all_by_sb(path.dentry->d_sb);
path_put(&path);
return error;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c763de5c115..68454e75fce 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -59,7 +59,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int requested)
+nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, umode_t requested)
{
mode &= S_IFMT;
@@ -293,7 +293,7 @@ out:
* include/linux/nfsd/nfsd.h.
*/
__be32
-fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
+fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
{
struct svc_export *exp;
struct dentry *dentry;
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index c16f8d8331b..e5e6707ba68 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -102,7 +102,7 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
/*
* Function prototypes
*/
-__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
+__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 7a2e442623c..d25a723b68a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -307,7 +307,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
struct dentry *dentry;
struct inode *inode;
int accmode = NFSD_MAY_SATTR;
- int ftype = 0;
+ umode_t ftype = 0;
__be32 err;
int host_err;
int size_change = 0;
@@ -730,7 +730,7 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
* N.B. After this call fhp needs an fh_put
*/
__be32
-nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int access, struct file **filp)
{
struct dentry *dentry;
@@ -1300,7 +1300,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
@@ -1325,7 +1325,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
break;
}
if (host_err < 0) {
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
goto out_nfserr;
}
@@ -1339,7 +1339,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
err2 = nfserrno(commit_metadata(fhp));
if (err2)
err = err2;
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
/*
* Update the file handle to get the new inode info.
*/
@@ -1430,7 +1430,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
v_atime = verifier[1]&0x7fffffff;
}
- host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
if (dchild->d_inode) {
@@ -1469,13 +1469,13 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
goto out;
}
host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
if (host_err < 0) {
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
goto out_nfserr;
}
if (created)
@@ -1503,7 +1503,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!err)
err = nfserrno(commit_metadata(fhp));
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
/*
* Update the filehandle to get the new inode info.
*/
@@ -1600,7 +1600,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dnew))
goto out_nfserr;
- host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
@@ -1621,7 +1621,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = nfserrno(commit_metadata(fhp));
fh_unlock(fhp);
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
dput(dnew);
@@ -1674,7 +1674,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
dold = tfhp->fh_dentry;
- host_err = mnt_want_write(tfhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(tfhp);
if (host_err) {
err = nfserrno(host_err);
goto out_dput;
@@ -1699,7 +1699,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
err = nfserrno(host_err);
}
out_drop_write:
- mnt_drop_write(tfhp->fh_export->ex_path.mnt);
+ fh_drop_write(tfhp);
out_dput:
dput(dnew);
out_unlock:
@@ -1776,7 +1776,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
host_err = -EXDEV;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out_dput_new;
- host_err = mnt_want_write(ffhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(ffhp);
if (host_err)
goto out_dput_new;
@@ -1795,7 +1795,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
host_err = commit_metadata(ffhp);
}
out_drop_write:
- mnt_drop_write(ffhp->fh_export->ex_path.mnt);
+ fh_drop_write(ffhp);
out_dput_new:
dput(ndentry);
out_dput_old:
@@ -1854,7 +1854,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!type)
type = rdentry->d_inode->i_mode & S_IFMT;
- host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ host_err = fh_want_write(fhp);
if (host_err)
goto out_put;
@@ -1868,7 +1868,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!host_err)
host_err = commit_metadata(fhp);
out_drop_write:
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
out_put:
dput(rdentry);
@@ -2270,7 +2270,7 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
} else
size = 0;
- error = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ error = fh_want_write(fhp);
if (error)
goto getout;
if (size)
@@ -2284,7 +2284,7 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
error = 0;
}
}
- mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ fh_drop_write(fhp);
getout:
kfree(value);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 3f54ad03bb2..1dcd238e11a 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -66,7 +66,7 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
loff_t, unsigned long);
#endif /* CONFIG_NFSD_V3 */
-__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
+__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
int, struct file **);
void nfsd_close(struct file *);
__be32 nfsd_read(struct svc_rqst *, struct svc_fh *,
@@ -106,4 +106,14 @@ struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
#endif
+static inline int fh_want_write(struct svc_fh *fh)
+{
+ return mnt_want_write(fh->fh_export->ex_path.mnt);
+}
+
+static inline void fh_drop_write(struct svc_fh *fh)
+{
+ mnt_drop_write(fh->fh_export->ex_path.mnt);
+}
+
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 3a1923943b1..ca35b3a46d1 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -251,7 +251,7 @@ nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
{
- mode_t mode = inode->i_mode;
+ umode_t mode = inode->i_mode;
de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b50ffb72e5b..8f7b95ac1f7 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -291,7 +291,7 @@ const struct address_space_operations nilfs_aops = {
.is_partially_uptodate = block_is_partially_uptodate,
};
-struct inode *nilfs_new_inode(struct inode *dir, int mode)
+struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct the_nilfs *nilfs = sb->s_fs_info;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 41d6743d303..886649627c3 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -27,7 +27,7 @@
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
#include <linux/compat.h> /* compat_ptr() */
-#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
+#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
@@ -119,7 +119,7 @@ static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
if (get_user(flags, (int __user *)argp))
return -EFAULT;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -154,7 +154,7 @@ static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
ret = nilfs_transaction_commit(inode->i_sb);
out:
mutex_unlock(&inode->i_mutex);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -174,7 +174,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -194,7 +194,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
up_read(&inode->i_sb->s_umount);
out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -210,7 +210,7 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -225,7 +225,7 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -591,7 +591,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
goto out_free;
+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+ goto out_free;
+
len = argv[n].v_size * argv[n].v_nmembs;
base = (void __user *)(unsigned long)argv[n].v_base;
if (len == 0) {
@@ -672,7 +675,7 @@ out_free:
vfree(kbufs[n]);
kfree(kbufs[4]);
out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return ret;
}
@@ -707,7 +710,7 @@ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
goto out;
- ret = mnt_want_write(filp->f_path.mnt);
+ ret = mnt_want_write_file(filp);
if (ret)
goto out;
@@ -718,7 +721,7 @@ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
ret = nilfs_resize_fs(inode->i_sb, newsize);
out_drop_write:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
out:
return ret;
}
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
+ case NILFS_IOCTL_CHANGE_CPMODE:
+ case NILFS_IOCTL_DELETE_CHECKPOINT:
+ case NILFS_IOCTL_GET_CPINFO:
+ case NILFS_IOCTL_GET_CPSTAT:
+ case NILFS_IOCTL_GET_SUINFO:
+ case NILFS_IOCTL_GET_SUSTAT:
+ case NILFS_IOCTL_GET_VINFO:
+ case NILFS_IOCTL_GET_BDESCS:
+ case NILFS_IOCTL_CLEAN_SEGMENTS:
+ case NILFS_IOCTL_SYNC:
+ case NILFS_IOCTL_RESIZE:
+ case NILFS_IOCTL_SET_ALLOC_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 768982de10e..1cd3f624dff 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -84,7 +84,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -112,7 +112,7 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
}
static int
-nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
+nilfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -213,7 +213,7 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
return err;
}
-static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct nilfs_transaction_info ti;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 3777d138f89..250add84da7 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -246,7 +246,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *, struct nilfs_argv *,
/* inode.c */
void nilfs_inode_add_blocks(struct inode *inode, int n);
void nilfs_inode_sub_blocks(struct inode *inode, int n);
-extern struct inode *nilfs_new_inode(struct inode *, int);
+extern struct inode *nilfs_new_inode(struct inode *, umode_t);
extern void nilfs_free_inode(struct inode *);
extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern void nilfs_set_inode_flags(struct inode *);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bb24ab6c282..0e72ad6f22a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
if (freezing(current)) {
spin_unlock(&sci->sc_state_lock);
- refrigerator();
+ try_to_freeze();
spin_lock(&sci->sc_state_lock);
} else {
DEFINE_WAIT(wait);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8351c44a732..08e3d4f9df1 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -175,8 +175,6 @@ static void nilfs_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
-
if (mdi) {
kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
kfree(mdi);
@@ -650,11 +648,11 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
- struct super_block *sb = vfs->mnt_sb;
+ struct super_block *sb = dentry->d_sb;
struct the_nilfs *nilfs = sb->s_fs_info;
- struct nilfs_root *root = NILFS_I(vfs->mnt_root->d_inode)->i_root;
+ struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root;
if (!nilfs_test_opt(nilfs, BARRIER))
seq_puts(seq, ",nobarrier");
diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
index 44a88a9fa2c..fea6bd5831d 100644
--- a/fs/nls/nls_base.c
+++ b/fs/nls/nls_base.c
@@ -52,7 +52,7 @@ static const struct utf8_table utf8_table[] =
#define SURROGATE_LOW 0x00000400
#define SURROGATE_BITS 0x000003ff
-int utf8_to_utf32(const u8 *s, int len, unicode_t *pu)
+int utf8_to_utf32(const u8 *s, int inlen, unicode_t *pu)
{
unsigned long l;
int c0, c, nc;
@@ -71,7 +71,7 @@ int utf8_to_utf32(const u8 *s, int len, unicode_t *pu)
*pu = (unicode_t) l;
return nc;
}
- if (len <= nc)
+ if (inlen <= nc)
return -1;
s++;
c = (*s ^ 0x80) & 0xFF;
@@ -83,7 +83,7 @@ int utf8_to_utf32(const u8 *s, int len, unicode_t *pu)
}
EXPORT_SYMBOL(utf8_to_utf32);
-int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
+int utf32_to_utf8(unicode_t u, u8 *s, int maxout)
{
unsigned long l;
int c, nc;
@@ -97,7 +97,7 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
return -1;
nc = 0;
- for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) {
+ for (t = utf8_table; t->cmask && maxout; t++, maxout--) {
nc++;
if (l <= t->lmask) {
c = t->shift;
@@ -114,34 +114,57 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
}
EXPORT_SYMBOL(utf32_to_utf8);
-int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
+static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian)
+{
+ switch (endian) {
+ default:
+ *s = (wchar_t) c;
+ break;
+ case UTF16_LITTLE_ENDIAN:
+ *s = __cpu_to_le16(c);
+ break;
+ case UTF16_BIG_ENDIAN:
+ *s = __cpu_to_be16(c);
+ break;
+ }
+}
+
+int utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
+ wchar_t *pwcs, int maxout)
{
u16 *op;
int size;
unicode_t u;
op = pwcs;
- while (*s && len > 0) {
+ while (inlen > 0 && maxout > 0 && *s) {
if (*s & 0x80) {
- size = utf8_to_utf32(s, len, &u);
+ size = utf8_to_utf32(s, inlen, &u);
if (size < 0)
return -EINVAL;
+ s += size;
+ inlen -= size;
if (u >= PLANE_SIZE) {
+ if (maxout < 2)
+ break;
u -= PLANE_SIZE;
- *op++ = (wchar_t) (SURROGATE_PAIR |
- ((u >> 10) & SURROGATE_BITS));
- *op++ = (wchar_t) (SURROGATE_PAIR |
+ put_utf16(op++, SURROGATE_PAIR |
+ ((u >> 10) & SURROGATE_BITS),
+ endian);
+ put_utf16(op++, SURROGATE_PAIR |
SURROGATE_LOW |
- (u & SURROGATE_BITS));
+ (u & SURROGATE_BITS),
+ endian);
+ maxout -= 2;
} else {
- *op++ = (wchar_t) u;
+ put_utf16(op++, u, endian);
+ maxout--;
}
- s += size;
- len -= size;
} else {
- *op++ = *s++;
- len--;
+ put_utf16(op++, *s++, endian);
+ inlen--;
+ maxout--;
}
}
return op - pwcs;
@@ -160,27 +183,27 @@ static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian)
}
}
-int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian,
- u8 *s, int maxlen)
+int utf16s_to_utf8s(const wchar_t *pwcs, int inlen, enum utf16_endian endian,
+ u8 *s, int maxout)
{
u8 *op;
int size;
unsigned long u, v;
op = s;
- while (len > 0 && maxlen > 0) {
+ while (inlen > 0 && maxout > 0) {
u = get_utf16(*pwcs, endian);
if (!u)
break;
pwcs++;
- len--;
+ inlen--;
if (u > 0x7f) {
if ((u & SURROGATE_MASK) == SURROGATE_PAIR) {
if (u & SURROGATE_LOW) {
/* Ignore character and move on */
continue;
}
- if (len <= 0)
+ if (inlen <= 0)
break;
v = get_utf16(*pwcs, endian);
if ((v & SURROGATE_MASK) != SURROGATE_PAIR ||
@@ -191,18 +214,18 @@ int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian,
u = PLANE_SIZE + ((u & SURROGATE_BITS) << 10)
+ (v & SURROGATE_BITS);
pwcs++;
- len--;
+ inlen--;
}
- size = utf32_to_utf8(u, op, maxlen);
+ size = utf32_to_utf8(u, op, maxout);
if (size == -1) {
/* Ignore character and move on */
} else {
op += size;
- maxlen -= size;
+ maxout -= size;
}
} else {
*op++ = (u8) u;
- maxlen--;
+ maxout--;
}
}
return op - s;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9fde1c00a29..3568c8a8b13 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -16,6 +16,8 @@
#include <asm/ioctls.h>
+#include "../../mount.h"
+
#define FANOTIFY_DEFAULT_MAX_EVENTS 16384
#define FANOTIFY_DEFAULT_MAX_MARKS 8192
#define FANOTIFY_DEFAULT_MAX_LISTENERS 128
@@ -546,7 +548,7 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
- if (removed & mnt->mnt_fsnotify_mask)
+ if (removed & real_mount(mnt)->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
return 0;
@@ -623,7 +625,7 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
- if (added & ~mnt->mnt_fsnotify_mask)
+ if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
err:
fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 79b47cbb5cd..ccb14d3fc0d 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -26,6 +26,7 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
+#include "../mount.h"
/*
* Clear all of the marks on an inode when it is being evicted from core
@@ -205,13 +206,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
struct fsnotify_group *inode_group, *vfsmount_group;
struct fsnotify_event *event = NULL;
- struct vfsmount *mnt;
+ struct mount *mnt;
int idx, ret = 0;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
if (data_is == FSNOTIFY_EVENT_PATH)
- mnt = ((struct path *)data)->mnt;
+ mnt = real_mount(((struct path *)data)->mnt);
else
mnt = NULL;
@@ -262,11 +263,11 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
/* we didn't use the vfsmount_mark */
vfsmount_group = NULL;
} else if (vfsmount_group > inode_group) {
- ret = send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
+ ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data,
data_is, cookie, file_name, &event);
inode_group = NULL;
} else {
- ret = send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
+ ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark,
mask, data, data_is, cookie, file_name,
&event);
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 778fe6cae3b..b7b4b0e8554 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -28,15 +28,17 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
+#include "../mount.h"
void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{
struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n;
+ struct mount *m = real_mount(mnt);
LIST_HEAD(free_list);
spin_lock(&mnt->mnt_root->d_lock);
- hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+ hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
list_add(&mark->m.free_m_list, &free_list);
hlist_del_init_rcu(&mark->m.m_list);
fsnotify_get_mark(mark);
@@ -59,15 +61,16 @@ void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
*/
static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
{
+ struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&mnt->mnt_root->d_lock);
- hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+ hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
new_mask |= mark->mask;
- mnt->mnt_fsnotify_mask = new_mask;
+ m->mnt_fsnotify_mask = new_mask;
}
/*
@@ -101,12 +104,13 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
struct vfsmount *mnt)
{
+ struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&mnt->mnt_root->d_lock);
- hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+ hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
if (mark->group == group) {
fsnotify_get_mark(mark);
return mark;
@@ -140,6 +144,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct vfsmount *mnt,
int allow_dups)
{
+ struct mount *m = real_mount(mnt);
struct fsnotify_mark *lmark;
struct hlist_node *node, *last = NULL;
int ret = 0;
@@ -154,13 +159,13 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
mark->m.mnt = mnt;
/* is mark the first mark? */
- if (hlist_empty(&mnt->mnt_fsnotify_marks)) {
- hlist_add_head_rcu(&mark->m.m_list, &mnt->mnt_fsnotify_marks);
+ if (hlist_empty(&m->mnt_fsnotify_marks)) {
+ hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);
goto out;
}
/* should mark be in the middle of the current list? */
- hlist_for_each_entry(lmark, node, &mnt->mnt_fsnotify_marks, m.m_list) {
+ hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) {
last = node;
if ((lmark->group == group) && !allow_dups) {
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 97e2dacbc86..2eaa6665294 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -335,7 +335,6 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb)
static void ntfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
}
@@ -2301,16 +2300,16 @@ void ntfs_evict_big_inode(struct inode *vi)
/**
* ntfs_show_options - show mount options in /proc/mounts
* @sf: seq_file in which to write our mount options
- * @mnt: vfs mount whose mount options to display
+ * @root: root of the mounted tree whose mount options to display
*
* Called by the VFS once for each mounted ntfs volume when someone reads
* /proc/mounts in order to display the NTFS specific mount options of each
- * mount. The mount options of the vfs mount @mnt are written to the seq file
+ * mount. The mount options of fs specified by @root are written to the seq file
* @sf and success is returned.
*/
-int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
+int ntfs_show_options(struct seq_file *sf, struct dentry *root)
{
- ntfs_volume *vol = NTFS_SB(mnt->mnt_sb);
+ ntfs_volume *vol = NTFS_SB(root->d_sb);
int i;
seq_printf(sf, ",uid=%i", vol->uid);
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index fe8e7e92888..db29695f845 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -298,7 +298,7 @@ extern void ntfs_clear_extent_inode(ntfs_inode *ni);
extern int ntfs_read_inode_mount(struct inode *vi);
-extern int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt);
+extern int ntfs_show_options(struct seq_file *sf, struct dentry *root);
#ifdef NTFS_RW
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index b52706da464..608be451609 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -104,7 +104,7 @@ static bool parse_options(ntfs_volume *vol, char *opt)
int errors = 0, sloppy = 0;
uid_t uid = (uid_t)-1;
gid_t gid = (gid_t)-1;
- mode_t fmask = (mode_t)-1, dmask = (mode_t)-1;
+ umode_t fmask = (umode_t)-1, dmask = (umode_t)-1;
int mft_zone_multiplier = -1, on_errors = -1;
int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1;
struct nls_table *nls_map = NULL, *old_nls;
@@ -287,9 +287,9 @@ no_mount_options:
vol->uid = uid;
if (gid != (gid_t)-1)
vol->gid = gid;
- if (fmask != (mode_t)-1)
+ if (fmask != (umode_t)-1)
vol->fmask = fmask;
- if (dmask != (mode_t)-1)
+ if (dmask != (umode_t)-1)
vol->dmask = dmask;
if (show_sys_files != -1) {
if (show_sys_files)
diff --git a/fs/ntfs/volume.h b/fs/ntfs/volume.h
index 406ab55dfb3..15e3ba8d521 100644
--- a/fs/ntfs/volume.h
+++ b/fs/ntfs/volume.h
@@ -48,8 +48,8 @@ typedef struct {
unsigned long flags; /* Miscellaneous flags, see below. */
uid_t uid; /* uid that files will be mounted as. */
gid_t gid; /* gid that files will be mounted as. */
- mode_t fmask; /* The mask for file permissions. */
- mode_t dmask; /* The mask for directory
+ umode_t fmask; /* The mask for file permissions. */
+ umode_t dmask; /* The mask for directory
permissions. */
u8 mft_zone_multiplier; /* Initial mft zone multiplier. */
u8 on_errors; /* What to do on filesystem errors. */
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index ed553c60de8..3165aebb43c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5699,7 +5699,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
- goto out;
+ goto out_commit;
}
dquot_free_space_nodirty(inode,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c1efe939c77..78b68af3b0e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -290,7 +290,15 @@ static int ocfs2_readpage(struct file *file, struct page *page)
}
if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
+ /*
+ * Unlock the page and cycle ip_alloc_sem so that we don't
+ * busyloop waiting for ip_alloc_sem to unlock
+ */
ret = AOP_TRUNCATED_PAGE;
+ unlock_page(page);
+ unlock = 0;
+ down_read(&oi->ip_alloc_sem);
+ up_read(&oi->ip_alloc_sem);
goto out_inode_unlock;
}
@@ -563,6 +571,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
/* this io's submitter should not have unlocked this before we could */
BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
@@ -570,6 +579,15 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (ocfs2_iocb_is_sem_locked(iocb))
ocfs2_iocb_clear_sem_locked(iocb);
+ if (ocfs2_iocb_is_unaligned_aio(iocb)) {
+ ocfs2_iocb_clear_unaligned_aio(iocb);
+
+ if (atomic_dec_and_test(&OCFS2_I(inode)->ip_unaligned_aio) &&
+ waitqueue_active(wq)) {
+ wake_up_all(wq);
+ }
+ }
+
ocfs2_iocb_clear_rw_locked(iocb);
level = ocfs2_iocb_rw_locked_level(iocb);
@@ -863,6 +881,12 @@ struct ocfs2_write_ctxt {
struct page *w_target_page;
/*
+ * w_target_locked is used for page_mkwrite path indicating no unlocking
+ * against w_target_page in ocfs2_write_end_nolock.
+ */
+ unsigned int w_target_locked:1;
+
+ /*
* ocfs2_write_end() uses this to know what the real range to
* write in the target should be.
*/
@@ -895,6 +919,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
{
+ int i;
+
+ /*
+ * w_target_locked is only set to true in the page_mkwrite() case.
+ * The intent is to allow us to lock the target page from write_begin()
+ * to write_end(). The caller must hold a ref on w_target_page.
+ */
+ if (wc->w_target_locked) {
+ BUG_ON(!wc->w_target_page);
+ for (i = 0; i < wc->w_num_pages; i++) {
+ if (wc->w_target_page == wc->w_pages[i]) {
+ wc->w_pages[i] = NULL;
+ break;
+ }
+ }
+ mark_page_accessed(wc->w_target_page);
+ page_cache_release(wc->w_target_page);
+ }
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
brelse(wc->w_di_bh);
@@ -1132,20 +1174,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
*/
lock_page(mmap_page);
+ /* Exit and let the caller retry */
if (mmap_page->mapping != mapping) {
+ WARN_ON(mmap_page->mapping);
unlock_page(mmap_page);
- /*
- * Sanity check - the locking in
- * ocfs2_pagemkwrite() should ensure
- * that this code doesn't trigger.
- */
- ret = -EINVAL;
- mlog_errno(ret);
+ ret = -EAGAIN;
goto out;
}
page_cache_get(mmap_page);
wc->w_pages[i] = mmap_page;
+ wc->w_target_locked = true;
} else {
wc->w_pages[i] = find_or_create_page(mapping, index,
GFP_NOFS);
@@ -1160,6 +1199,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
wc->w_target_page = wc->w_pages[i];
}
out:
+ if (ret)
+ wc->w_target_locked = false;
return ret;
}
@@ -1817,11 +1858,23 @@ try_again:
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
- if (ret) {
+ if (ret && ret != -EAGAIN) {
mlog_errno(ret);
goto out_quota;
}
+ /*
+ * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+ * the target page. In this case, we exit with no error and no target
+ * page. This will trigger the caller, page_mkwrite(), to re-try
+ * the operation.
+ */
+ if (ret == -EAGAIN) {
+ BUG_ON(wc->w_target_page);
+ ret = 0;
+ goto out_quota;
+ }
+
ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
len);
if (ret) {
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 75cf3ad987a..ffb2da370a9 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -78,6 +78,7 @@ enum ocfs2_iocb_lock_bits {
OCFS2_IOCB_RW_LOCK = 0,
OCFS2_IOCB_RW_LOCK_LEVEL,
OCFS2_IOCB_SEM,
+ OCFS2_IOCB_UNALIGNED_IO,
OCFS2_IOCB_NUM_LOCKS
};
@@ -91,4 +92,17 @@ enum ocfs2_iocb_lock_bits {
clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
#define ocfs2_iocb_is_sem_locked(iocb) \
test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
+
+#define ocfs2_iocb_set_unaligned_aio(iocb) \
+ set_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_unaligned_aio(iocb) \
+ clear_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_is_unaligned_aio(iocb) \
+ test_bit(OCFS2_IOCB_UNALIGNED_IO, (unsigned long *)&iocb->private)
+
+#define OCFS2_IOEND_WQ_HASH_SZ 37
+#define ocfs2_ioend_wq(v) (&ocfs2__ioend_wq[((unsigned long)(v)) %\
+ OCFS2_IOEND_WQ_HASH_SZ])
+extern wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bbff27..a4e855e3690 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -216,6 +216,7 @@ struct o2hb_region {
struct list_head hr_all_item;
unsigned hr_unclean_stop:1,
+ hr_aborted_start:1,
hr_item_pinned:1,
hr_item_dropped:1;
@@ -254,6 +255,10 @@ struct o2hb_region {
* a more complete api that doesn't lead to this sort of fragility. */
atomic_t hr_steady_iterations;
+ /* terminate o2hb thread if it does not reach steady state
+ * (hr_steady_iterations == 0) within hr_unsteady_iterations */
+ atomic_t hr_unsteady_iterations;
+
char hr_dev_name[BDEVNAME_SIZE];
unsigned int hr_timeout_ms;
@@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work)
static void o2hb_arm_write_timeout(struct o2hb_region *reg)
{
+ /* Arm writeout only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
+ return;
+
mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n",
O2HB_MAX_WRITE_TIMEOUT_MS);
@@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
return read == computed;
}
-/* We want to make sure that nobody is heartbeating on top of us --
- * this will help detect an invalid configuration. */
-static void o2hb_check_last_timestamp(struct o2hb_region *reg)
+/*
+ * Compare the slot data with what we wrote in the last iteration.
+ * If the match fails, print an appropriate error message. This is to
+ * detect errors like... another node hearting on the same slot,
+ * flaky device that is losing writes, etc.
+ * Returns 1 if check succeeds, 0 otherwise.
+ */
+static int o2hb_check_own_slot(struct o2hb_region *reg)
{
struct o2hb_disk_slot *slot;
struct o2hb_disk_heartbeat_block *hb_block;
@@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
slot = &reg->hr_slots[o2nm_this_node()];
/* Don't check on our 1st timestamp */
if (!slot->ds_last_time)
- return;
+ return 0;
hb_block = slot->ds_raw_block;
if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
hb_block->hb_node == slot->ds_node_num)
- return;
+ return 1;
#define ERRSTR1 "Another node is heartbeating on device"
#define ERRSTR2 "Heartbeat generation mismatch on device"
@@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg)
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
(unsigned long long)le64_to_cpu(hb_block->hb_seq));
+
+ return 0;
}
static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
o2nm_node_put(node);
}
-static void o2hb_set_quorum_device(struct o2hb_region *reg,
- struct o2hb_disk_slot *slot)
+static void o2hb_set_quorum_device(struct o2hb_region *reg)
{
- assert_spin_locked(&o2hb_live_lock);
-
if (!o2hb_global_heartbeat_active())
return;
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ /* Prevent race with o2hb_heartbeat_group_drop_item() */
+ if (kthread_should_stop())
+ return;
+
+ /* Tag region as quorum only after thread reaches steady state */
+ if (atomic_read(&reg->hr_steady_iterations) != 0)
return;
+ spin_lock(&o2hb_live_lock);
+
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ goto unlock;
+
/*
* A region can be added to the quorum only when it sees all
* live nodes heartbeat on it. In other words, the region has been
@@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
*/
if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap,
sizeof(o2hb_live_node_bitmap)))
- return;
-
- if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD)
- return;
+ goto unlock;
- printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
@@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
o2hb_region_unpin(NULL);
+unlock:
+ spin_unlock(&o2hb_live_lock);
}
static int o2hb_check_slot(struct o2hb_region *reg,
@@ -925,8 +947,6 @@ fire_callbacks:
slot->ds_equal_samples = 0;
}
out:
- o2hb_set_quorum_device(reg, slot);
-
spin_unlock(&o2hb_live_lock);
o2hb_run_event_list(&event);
@@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes,
static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
- int i, ret, highest_node, change = 0;
+ int i, ret, highest_node;
+ int membership_change = 0, own_slot_ok = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct o2hb_bio_wait_ctxt write_wc;
@@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
sizeof(configured_nodes));
if (ret) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/*
@@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
- return -EINVAL;
+ mlog(ML_NOTICE, "o2hb: No configured nodes found!\n");
+ ret = -EINVAL;
+ goto bail;
}
/* No sense in reading the slots of nodes that don't exist
@@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
/* With an up to date view of the slots, we can check that no
* other node has been improperly configured to heartbeat in
* our slot. */
- o2hb_check_last_timestamp(reg);
+ own_slot_ok = o2hb_check_own_slot(reg);
/* fill in the proper info for our next heartbeat */
o2hb_prepare_block(reg, reg->hr_generation);
- /* And fire off the write. Note that we don't wait on this I/O
- * until later. */
ret = o2hb_issue_node_write(reg, &write_wc);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ goto bail;
}
i = -1;
while((i = find_next_bit(configured_nodes,
O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
- change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+ membership_change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
}
/*
@@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* disk */
mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
write_wc.wc_error, reg->hr_dev_name);
- return write_wc.wc_error;
+ ret = write_wc.wc_error;
+ goto bail;
}
- o2hb_arm_write_timeout(reg);
+ /* Skip disarming the timeout if own slot has stale/bad data */
+ if (own_slot_ok) {
+ o2hb_set_quorum_device(reg);
+ o2hb_arm_write_timeout(reg);
+ }
+bail:
/* let the person who launched us know when things are steady */
- if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
- if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (!ret && own_slot_ok && !membership_change) {
+ if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ wake_up(&o2hb_steady_queue);
+ }
+ }
+
+ if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
+ printk(KERN_NOTICE "o2hb: Unable to stabilize "
+ "heartbeart on region %s (%s)\n",
+ config_item_name(&reg->hr_item),
+ reg->hr_dev_name);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
+ ret = -EIO;
+ }
}
- return 0;
+ return ret;
}
/* Subtract b from a, storing the result in a. a *must* have a larger
@@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data)
/* Pin node */
o2nm_depend_this_node();
- while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+ while (!kthread_should_stop() &&
+ !reg->hr_unclean_stop && !reg->hr_aborted_start) {
/* We track the time spent inside
* o2hb_do_disk_heartbeat so that we avoid more than
* hr_timeout_ms between disk writes. On busy systems
@@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data)
* likely to time itself out. */
do_gettimeofday(&before_hb);
- i = 0;
- do {
- ret = o2hb_do_disk_heartbeat(reg);
- } while (ret && ++i < 2);
+ ret = o2hb_do_disk_heartbeat(reg);
do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
@@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data)
after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
elapsed_msec);
- if (elapsed_msec < reg->hr_timeout_ms) {
+ if (!kthread_should_stop() &&
+ elapsed_msec < reg->hr_timeout_ms) {
/* the kthread api has blocked signals for us so no
* need to record the return value. */
msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
@@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data)
* to timeout on this region when we could just as easily
* write a clear generation - thus indicating to them that
* this node has left this region.
- *
- * XXX: Should we skip this on unclean_stop? */
- o2hb_prepare_block(reg, 0);
- ret = o2hb_issue_node_write(reg, &write_wc);
- if (ret == 0) {
- o2hb_wait_on_io(reg, &write_wc);
- } else {
- mlog_errno(ret);
+ */
+ if (!reg->hr_unclean_stop && !reg->hr_aborted_start) {
+ o2hb_prepare_block(reg, 0);
+ ret = o2hb_issue_node_write(reg, &write_wc);
+ if (ret == 0)
+ o2hb_wait_on_io(reg, &write_wc);
+ else
+ mlog_errno(ret);
}
/* Unpin node */
o2nm_undepend_this_node();
- mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+ mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n");
return 0;
}
@@ -1158,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
struct o2hb_debug_buf *db = inode->i_private;
struct o2hb_region *reg;
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long lts;
char *buf = NULL;
int i = -1;
int out = 0;
@@ -1194,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
case O2HB_DB_TYPE_REGION_ELAPSED_TIME:
reg = (struct o2hb_region *)db->db_data;
- out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
- jiffies_to_msecs(jiffies -
- reg->hr_last_timeout_start));
+ lts = reg->hr_last_timeout_start;
+ /* If 0, it has never been set before */
+ if (lts)
+ lts = jiffies_to_msecs(jiffies - lts);
+ out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts);
goto done;
case O2HB_DB_TYPE_REGION_PINNED:
@@ -1426,6 +1469,8 @@ static void o2hb_region_release(struct config_item *item)
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
+ mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name);
+
if (reg->hr_tmp_block)
kfree(reg->hr_tmp_block);
@@ -1792,7 +1837,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
live_threshold <<= 1;
spin_unlock(&o2hb_live_lock);
}
- atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
+ ++live_threshold;
+ atomic_set(&reg->hr_steady_iterations, live_threshold);
+ /* unsteady_iterations is double the steady_iterations */
+ atomic_set(&reg->hr_unsteady_iterations, (live_threshold << 1));
hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
reg->hr_item.ci_name);
@@ -1809,14 +1857,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = wait_event_interruptible(o2hb_steady_queue,
atomic_read(&reg->hr_steady_iterations) == 0);
if (ret) {
- /* We got interrupted (hello ptrace!). Clean up */
- spin_lock(&o2hb_live_lock);
- hb_task = reg->hr_task;
- reg->hr_task = NULL;
- spin_unlock(&o2hb_live_lock);
+ atomic_set(&reg->hr_steady_iterations, 0);
+ reg->hr_aborted_start = 1;
+ }
- if (hb_task)
- kthread_stop(hb_task);
+ if (reg->hr_aborted_start) {
+ ret = -EIO;
goto out;
}
@@ -1833,8 +1879,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
ret = -EIO;
if (hb_task && o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n",
- config_item_name(&reg->hr_item));
+ printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
out:
if (filp)
@@ -2092,13 +2138,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
/* stop the thread when the user removes the region dir */
spin_lock(&o2hb_live_lock);
- if (o2hb_global_heartbeat_active()) {
- clear_bit(reg->hr_region_num, o2hb_region_bitmap);
- clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
- if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
- quorum_region = 1;
- clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
- }
hb_task = reg->hr_task;
reg->hr_task = NULL;
reg->hr_item_dropped = 1;
@@ -2107,19 +2146,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
if (hb_task)
kthread_stop(hb_task);
+ if (o2hb_global_heartbeat_active()) {
+ spin_lock(&o2hb_live_lock);
+ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+ clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+ if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+ quorum_region = 1;
+ clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+ spin_unlock(&o2hb_live_lock);
+ printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n",
+ ((atomic_read(&reg->hr_steady_iterations) == 0) ?
+ "stopped" : "start aborted"), config_item_name(item),
+ reg->hr_dev_name);
+ }
+
/*
* If we're racing a dev_write(), we need to wake them. They will
* check reg->hr_task
*/
if (atomic_read(&reg->hr_steady_iterations) != 0) {
+ reg->hr_aborted_start = 1;
atomic_set(&reg->hr_steady_iterations, 0);
wake_up(&o2hb_steady_queue);
}
- if (o2hb_global_heartbeat_active())
- printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
- config_item_name(&reg->hr_item));
-
config_item_put(item);
if (!o2hb_global_heartbeat_active() || !quorum_region)
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 3a5835904b3..73ba81928bc 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -47,6 +47,7 @@
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
#define STATS_DEBUG_NAME "stats"
+#define NODES_DEBUG_NAME "connected_nodes"
#define SHOW_SOCK_CONTAINERS 0
#define SHOW_SOCK_STATS 1
@@ -55,6 +56,7 @@ static struct dentry *o2net_dentry;
static struct dentry *sc_dentry;
static struct dentry *nst_dentry;
static struct dentry *stats_dentry;
+static struct dentry *nodes_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
@@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = {
.release = sc_fop_release,
};
-int o2net_debugfs_init(void)
+static int o2net_fill_bitmap(char *buf, int len)
{
- o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
- if (!o2net_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int i = -1, out = 0;
- nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &nst_seq_fops);
- if (!nst_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ o2net_fill_node_map(map, sizeof(map));
- sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &sc_seq_fops);
- if (!sc_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+ out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += snprintf(buf + out, PAGE_SIZE - out, "\n");
- stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
- o2net_dentry, NULL,
- &stats_seq_fops);
- if (!stats_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
+ return out;
+}
+
+static int nodes_fop_open(struct inode *inode, struct file *file)
+{
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
+
+ file->private_data = buf;
return 0;
-bail:
- debugfs_remove(stats_dentry);
- debugfs_remove(sc_dentry);
- debugfs_remove(nst_dentry);
- debugfs_remove(o2net_dentry);
- return -ENOMEM;
}
+static int o2net_debug_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t o2net_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+
+static const struct file_operations nodes_fops = {
+ .open = nodes_fop_open,
+ .release = o2net_debug_release,
+ .read = o2net_debug_read,
+ .llseek = generic_file_llseek,
+};
+
void o2net_debugfs_exit(void)
{
+ debugfs_remove(nodes_dentry);
debugfs_remove(stats_dentry);
debugfs_remove(sc_dentry);
debugfs_remove(nst_dentry);
debugfs_remove(o2net_dentry);
}
+int o2net_debugfs_init(void)
+{
+ umode_t mode = S_IFREG|S_IRUSR;
+
+ o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
+ if (o2net_dentry)
+ nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nst_seq_fops);
+ if (nst_dentry)
+ sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &sc_seq_fops);
+ if (sc_dentry)
+ stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &stats_seq_fops);
+ if (stats_dentry)
+ nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
+ o2net_dentry, NULL, &nodes_fops);
+ if (nodes_dentry)
+ return 0;
+
+ o2net_debugfs_exit();
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ad7d0c155de..044e7b58d31 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -546,7 +546,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_NOTICE "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: No longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -556,7 +556,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
cancel_delayed_work(&nn->nn_connect_expired);
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
- "connected to" : "accepted connection from",
+ "Connected to" : "Accepted connection from",
SC_NODEF_ARGS(sc));
}
@@ -644,7 +644,7 @@ static void o2net_state_change(struct sock *sk)
o2net_sc_queue_work(sc, &sc->sc_connect_work);
break;
default:
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT
+ printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT
" shutdown, state %d\n",
SC_NODEF_ARGS(sc), sk->sk_state);
o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
@@ -1035,6 +1035,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
return ret;
}
+/* Get a map of all nodes to which this node is currently connected to */
+void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ struct o2net_sock_container *sc;
+ int node, ret;
+
+ BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memset(map, 0, bytes);
+ for (node = 0; node < O2NM_MAX_NODES; ++node) {
+ o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret);
+ if (!ret) {
+ set_bit(node, map);
+ sc_put(sc);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(o2net_fill_node_map);
+
int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
size_t caller_veclen, u8 target_node, int *status)
{
@@ -1285,11 +1304,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
- mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
- "version %llu but %llu is required, disconnecting\n",
- SC_NODEF_ARGS(sc),
- (unsigned long long)be64_to_cpu(hand->protocol_version),
- O2NET_PROTOCOL_VERSION);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net "
+ "protocol version %llu but %llu is required. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ O2NET_PROTOCOL_VERSION);
/* don't bother reconnecting if its the wrong version. */
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
@@ -1303,33 +1322,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc)
*/
if (be32_to_cpu(hand->o2net_idle_timeout_ms) !=
o2net_idle_timeout()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_idle_timeout_ms),
- o2net_idle_timeout());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network "
+ "idle timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_idle_timeout_ms),
+ o2net_idle_timeout());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2net_keepalive_delay_ms) !=
o2net_keepalive_delay()) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2net_keepalive_delay_ms),
- o2net_keepalive_delay());
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive "
+ "delay of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2net_keepalive_delay_ms),
+ o2net_keepalive_delay());
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) !=
O2HB_MAX_WRITE_TIMEOUT_MS) {
- mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of "
- "%u ms, but we use %u ms locally. disconnecting\n",
- SC_NODEF_ARGS(sc),
- be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
- O2HB_MAX_WRITE_TIMEOUT_MS);
+ printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat "
+ "timeout of %u ms, but we use %u ms locally. "
+ "Disconnecting.\n", SC_NODEF_ARGS(sc),
+ be32_to_cpu(hand->o2hb_heartbeat_timeout_ms),
+ O2HB_MAX_WRITE_TIMEOUT_MS);
o2net_ensure_shutdown(nn, sc, -ENOTCONN);
return -1;
}
@@ -1540,28 +1559,16 @@ static void o2net_idle_timer(unsigned long data)
{
struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-
#ifdef CONFIG_DEBUG_FS
- ktime_t now = ktime_get();
+ unsigned long msecs = ktime_to_ms(ktime_get()) -
+ ktime_to_ms(sc->sc_tv_timer);
+#else
+ unsigned long msecs = o2net_idle_timeout();
#endif
- printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
- "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
- o2net_idle_timeout() / 1000,
- o2net_idle_timeout() % 1000);
-
-#ifdef CONFIG_DEBUG_FS
- mlog(ML_NOTICE, "Here are some times that might help debug the "
- "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
- "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
- (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
- (long long)ktime_to_us(sc->sc_tv_data_ready),
- (long long)ktime_to_us(sc->sc_tv_advance_start),
- (long long)ktime_to_us(sc->sc_tv_advance_stop),
- sc->sc_msg_key, sc->sc_msg_type,
- (long long)ktime_to_us(sc->sc_tv_func_start),
- (long long)ktime_to_us(sc->sc_tv_func_stop));
-#endif
+ printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been "
+ "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc),
+ msecs / 1000, msecs % 1000);
/*
* Initialize the nn_timeout so that the next connection attempt
@@ -1694,8 +1701,8 @@ static void o2net_start_connect(struct work_struct *work)
out:
if (ret) {
- mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
- "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+ printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT
+ " failed with errno %d\n", SC_NODEF_ARGS(sc), ret);
/* 0 err so that another will be queued and attempted
* from set_nn_state */
if (sc)
@@ -1718,8 +1725,8 @@ static void o2net_connect_expired(struct work_struct *work)
spin_lock(&nn->nn_lock);
if (!nn->nn_sc_valid) {
- mlog(ML_ERROR, "no connection established with node %u after "
- "%u.%u seconds, giving up and returning errors.\n",
+ printk(KERN_NOTICE "o2net: No connection established with "
+ "node %u after %u.%u seconds, giving up.\n",
o2net_num_from_nn(nn),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
@@ -1862,21 +1869,21 @@ static int o2net_accept_one(struct socket *sock)
node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
- mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n",
- &sin.sin_addr.s_addr, ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from unknown "
+ "node at %pI4:%d\n", &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
if (o2nm_this_node() >= node->nd_num) {
local_node = o2nm_get_node_by_num(o2nm_this_node());
- mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
- "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
- local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port),
- node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Unexpected connect attempt seen "
+ "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
+ "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port), node->nd_name,
+ node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1901,10 +1908,10 @@ static int o2net_accept_one(struct socket *sock)
ret = 0;
spin_unlock(&nn->nn_lock);
if (ret) {
- mlog(ML_NOTICE, "attempt to connect from node '%s' at "
- "%pI4:%d but it already has an open connection\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
+ printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' "
+ "at %pI4:%d but it already has an open connection\n",
+ node->nd_name, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
goto out;
}
@@ -1984,7 +1991,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0) {
- mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+ printk(KERN_ERR "o2net: Error %d while creating socket\n", ret);
goto out;
}
@@ -2001,16 +2008,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
sock->sk->sk_reuse = 1;
ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
if (ret < 0) {
- mlog(ML_ERROR, "unable to bind socket at %pI4:%u, "
- "ret=%d\n", &addr, ntohs(port), ret);
+ printk(KERN_ERR "o2net: Error %d while binding socket at "
+ "%pI4:%u\n", ret, &addr, ntohs(port));
goto out;
}
ret = sock->ops->listen(sock, 64);
- if (ret < 0) {
- mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n",
- &addr, ntohs(port), ret);
- }
+ if (ret < 0)
+ printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n",
+ ret, &addr, ntohs(port));
out:
if (ret) {
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index fd6179eb26d..5bada2a69b5 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
struct list_head *unreg_list);
void o2net_unregister_handler_list(struct list_head *list);
+void o2net_fill_node_map(unsigned long *map, unsigned bytes);
+
struct o2nm_node;
int o2net_register_hb_callbacks(void);
void o2net_unregister_hb_callbacks(void);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index e2878b5895f..8fe4e2892ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1184,8 +1184,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
if (pde)
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
+ de->inode = 0;
dir->i_version++;
ocfs2_journal_dirty(handle, bh);
goto bail;
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index d602abb51b6..a5952ceecba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
void dlm_put(struct dlm_ctxt *dlm);
struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
@@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res)
kref_get(&res->refs);
}
void dlm_lockres_put(struct dlm_lock_resource *res);
-void __dlm_unhash_lockres(struct dlm_lock_resource *res);
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
const char *name,
unsigned int len,
@@ -902,46 +901,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
const char *name,
unsigned int namelen);
-#define dlm_lockres_set_refmap_bit(bit,res) \
- __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__)
-#define dlm_lockres_clear_refmap_bit(bit,res) \
- __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
-static inline void __dlm_lockres_set_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: setting bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- set_bit(bit, res->refmap);
-}
-
-static inline void __dlm_lockres_clear_refmap_bit(int bit,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
-{
- //printk("%s:%d:%.*s: clearing bit %d\n", file, line,
- // res->lockname.len, res->lockname.name, bit);
- clear_bit(bit, res->refmap);
-}
-
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line);
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line);
-#define dlm_lockres_drop_inflight_ref(d,r) \
- __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__)
-#define dlm_lockres_grab_inflight_ref_new(d,r) \
- __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6ed6b95dcf9..92f2ead0fab 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing,
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
-void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
- if (!hlist_unhashed(&lockres->hash_node)) {
- hlist_del_init(&lockres->hash_node);
- dlm_lockres_put(lockres);
- }
+ if (hlist_unhashed(&res->hash_node))
+ return;
+
+ mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ hlist_del_init(&res->hash_node);
+ dlm_lockres_put(res);
}
-void __dlm_insert_lockres(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res)
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
struct qstr *q;
@@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
dlm_lockres_get(res);
hlist_add_head(&res->hash_node, bucket);
+
+ mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
}
struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
@@ -539,17 +544,17 @@ again:
static void __dlm_print_nodes(struct dlm_ctxt *dlm)
{
- int node = -1;
+ int node = -1, num = 0;
assert_spin_locked(&dlm->spinlock);
- printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
-
+ printk("( ");
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
printk("%d ", node);
+ ++num;
}
- printk("\n");
+ printk(") %u nodes\n", num);
}
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -566,11 +571,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
-
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
clear_bit(node, dlm->exit_domain_map);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
__dlm_print_nodes(dlm);
/* notify anything attached to the heartbeat events */
@@ -755,6 +759,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
dlm_mark_domain_leaving(dlm);
dlm_leave_domain(dlm);
+ printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
dlm_force_free_mles(dlm);
dlm_complete_dlm_shutdown(dlm);
}
@@ -970,7 +975,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
clear_bit(assert->node_idx, dlm->exit_domain_map);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
assert->node_idx, dlm->name);
__dlm_print_nodes(dlm);
@@ -1701,8 +1706,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
bail:
spin_lock(&dlm->spinlock);
__dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
- if (!status)
+ if (!status) {
+ printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
__dlm_print_nodes(dlm);
+ }
spin_unlock(&dlm->spinlock);
if (ctxt) {
@@ -2131,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
goto leave;
}
- if (!o2hb_check_local_node_heartbeating()) {
- mlog(ML_ERROR, "the local node has not been configured, or is "
- "not heartbeating\n");
- ret = -EPROTO;
- goto leave;
- }
-
mlog(0, "register called for domain \"%s\"\n", domain);
retry:
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 8d39e0fd66f..975810b9849 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
kick_thread = 1;
}
}
- /* reduce the inflight count, this may result in the lockres
- * being purged below during calc_usage */
- if (lock->ml.node == dlm->node_num)
- dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -231,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
lock->ml.type, res->lockname.len,
res->lockname.name, flags);
+ /*
+ * Wait if resource is getting recovered, remastered, etc.
+ * If the resource was remastered and new owner is self, then exit.
+ */
spin_lock(&res->spinlock);
-
- /* will exit this call with spinlock held */
__dlm_wait_on_lockres(res);
+ if (res->owner == dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ return DLM_RECOVERING;
+ }
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* add lock to local (secondary) queue */
@@ -319,27 +321,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
sizeof(create), res->owner, &status);
if (tmpret >= 0) {
- // successfully sent and received
- ret = status; // this is already a dlm_status
+ ret = status;
if (ret == DLM_REJECTED) {
- mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
- "no longer owned by %u. that node is coming back "
- "up currently.\n", dlm->name, create.namelen,
+ mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+ "owned by node %u. That node is coming back up "
+ "currently.\n", dlm->name, create.namelen,
create.name, res->owner);
dlm_print_one_lock_resource(res);
BUG();
}
} else {
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key,
- res->owner);
- if (dlm_is_host_down(tmpret)) {
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+ "node %u\n", dlm->name, create.namelen, create.name,
+ tmpret, res->owner);
+ if (dlm_is_host_down(tmpret))
ret = DLM_RECOVERING;
- mlog(0, "node %u died so returning DLM_RECOVERING "
- "from lock message!\n", res->owner);
- } else {
+ else
ret = dlm_err_to_dlm_status(tmpret);
- }
}
return ret;
@@ -440,7 +438,7 @@ struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
/* zero memory only if kernel-allocated */
lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
if (!lksb) {
- kfree(lock);
+ kmem_cache_free(dlm_lock_cache, lock);
return NULL;
}
kernel_allocated = 1;
@@ -718,18 +716,10 @@ retry_lock:
if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
status == DLM_FORWARD) {
- mlog(0, "retrying lock with migration/"
- "recovery/in progress\n");
msleep(100);
- /* no waiting for dlm_reco_thread */
if (recovery) {
if (status != DLM_RECOVERING)
goto retry_lock;
-
- mlog(0, "%s: got RECOVERING "
- "for $RECOVERY lock, master "
- "was %u\n", dlm->name,
- res->owner);
/* wait to see the node go down, then
* drop down and allow the lockres to
* get cleaned up. need to remaster. */
@@ -741,6 +731,14 @@ retry_lock:
}
}
+ /* Inflight taken in dlm_get_lock_resource() is dropped here */
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
if (status != DLM_NORMAL) {
lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11eefb8c12e..005261c333b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -631,39 +631,54 @@ error:
return NULL;
}
-void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- int new_lockres,
- const char *file,
- int line)
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
{
- if (!new_lockres)
- assert_spin_locked(&res->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ clear_bit(bit, res->refmap);
+}
+
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
- if (!test_bit(dlm->node_num, res->refmap)) {
- BUG_ON(res->inflight_locks != 0);
- dlm_lockres_set_refmap_bit(dlm->node_num, res);
- }
res->inflight_locks++;
- mlog(0, "%s:%.*s: inflight++: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
+
+ mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
}
-void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
- struct dlm_lock_resource *res,
- const char *file,
- int line)
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
{
assert_spin_locked(&res->spinlock);
BUG_ON(res->inflight_locks == 0);
+
res->inflight_locks--;
- mlog(0, "%s:%.*s: inflight--: now %u\n",
- dlm->name, res->lockname.len, res->lockname.name,
- res->inflight_locks);
- if (res->inflight_locks == 0)
- dlm_lockres_clear_refmap_bit(dlm->node_num, res);
+
+ mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
+
wake_up(&res->wq);
}
@@ -697,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
unsigned int hash;
int tries = 0;
int bit, wait_on_recovery = 0;
- int drop_inflight_if_nonlocal = 0;
BUG_ON(!lockid);
@@ -709,36 +723,33 @@ lookup:
spin_lock(&dlm->spinlock);
tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
if (tmpres) {
- int dropping_ref = 0;
-
spin_unlock(&dlm->spinlock);
-
spin_lock(&tmpres->spinlock);
- /* We wait for the other thread that is mastering the resource */
+ /* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
}
- if (tmpres->owner == dlm->node_num) {
- BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
- dlm_lockres_grab_inflight_ref(dlm, tmpres);
- } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
- dropping_ref = 1;
- spin_unlock(&tmpres->spinlock);
-
- /* wait until done messaging the master, drop our ref to allow
- * the lockres to be purged, start over. */
- if (dropping_ref) {
- spin_lock(&tmpres->spinlock);
- __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
+ /* Wait on the resource purge to complete before continuing */
+ if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+ BUG_ON(tmpres->owner == dlm->node_num);
+ __dlm_wait_on_lockres_flags(tmpres,
+ DLM_LOCK_RES_DROPPING_REF);
spin_unlock(&tmpres->spinlock);
dlm_lockres_put(tmpres);
tmpres = NULL;
goto lookup;
}
- mlog(0, "found in hash!\n");
+ /* Grab inflight ref to pin the resource */
+ dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+ spin_unlock(&tmpres->spinlock);
if (res)
dlm_lockres_put(res);
res = tmpres;
@@ -829,8 +840,8 @@ lookup:
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
}
@@ -843,12 +854,11 @@ lookup:
/* finally add the lockres to its hash bucket */
__dlm_insert_lockres(dlm, res);
- /* since this lockres is new it doesn't not require the spinlock */
- dlm_lockres_grab_inflight_ref_new(dlm, res);
- /* if this node does not become the master make sure to drop
- * this inflight reference below */
- drop_inflight_if_nonlocal = 1;
+ /* Grab inflight ref to pin the resource */
+ spin_lock(&res->spinlock);
+ dlm_lockres_grab_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
/* get an extra ref on the mle in case this is a BLOCK
* if so, the creator of the BLOCK may try to put the last
@@ -864,8 +874,8 @@ redo_request:
* dlm spinlock would be detectable be a change on the mle,
* so we only need to clear out the recovery map once. */
if (dlm_is_recovery_lock(lockid, namelen)) {
- mlog(ML_NOTICE, "%s: recovery map is not empty, but "
- "must master $RECOVERY lock now\n", dlm->name);
+ mlog(0, "%s: Recovery map is not empty, but must "
+ "master $RECOVERY lock now\n", dlm->name);
if (!dlm_pre_master_reco_lockres(dlm, res))
wait_on_recovery = 0;
else {
@@ -883,8 +893,8 @@ redo_request:
spin_lock(&dlm->spinlock);
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES) {
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
- "recover before lock mastery can begin\n",
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
dlm->name, namelen, (char *)lockid, bit);
wait_on_recovery = 1;
} else
@@ -913,8 +923,8 @@ redo_request:
* yet, keep going until it does. this is how the
* master will know that asserts are needed back to
* the lower nodes. */
- mlog(0, "%s:%.*s: requests only up to %u but master "
- "is %u, keep going\n", dlm->name, namelen,
+ mlog(0, "%s: res %.*s, Requests only up to %u but "
+ "master is %u, keep going\n", dlm->name, namelen,
lockid, nodenum, mle->master);
}
}
@@ -924,13 +934,12 @@ wait:
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
if (ret < 0) {
wait_on_recovery = 1;
- mlog(0, "%s:%.*s: node map changed, redo the "
- "master request now, blocked=%d\n",
- dlm->name, res->lockname.len,
+ mlog(0, "%s: res %.*s, Node map changed, redo the master "
+ "request now, blocked=%d\n", dlm->name, res->lockname.len,
res->lockname.name, blocked);
if (++tries > 20) {
- mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
+ mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+ "dlm_wait_for_lock_mastery, blocked = %d\n",
dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
@@ -940,7 +949,8 @@ wait:
goto redo_request;
}
- mlog(0, "lockres mastered by %u\n", res->owner);
+ mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+ res->lockname.name, res->owner);
/* make sure we never continue without this */
BUG_ON(res->owner == O2NM_MAX_NODES);
@@ -952,8 +962,6 @@ wait:
wake_waiters:
spin_lock(&res->spinlock);
- if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
- dlm_lockres_drop_inflight_ref(dlm, res);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
@@ -1426,9 +1434,7 @@ way_up_top:
}
if (res->owner == dlm->node_num) {
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name, request->node_idx);
- dlm_lockres_set_refmap_bit(request->node_idx, res);
+ dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
spin_unlock(&res->spinlock);
response = DLM_MASTER_RESP_YES;
if (mle)
@@ -1493,10 +1499,8 @@ way_up_top:
* go back and clean the mles on any
* other nodes */
dispatch_assert = 1;
- dlm_lockres_set_refmap_bit(request->node_idx, res);
- mlog(0, "%s:%.*s: setting bit %u in refmap\n",
- dlm->name, namelen, name,
- request->node_idx);
+ dlm_lockres_set_refmap_bit(dlm, res,
+ request->node_idx);
} else
response = DLM_MASTER_RESP_NO;
} else {
@@ -1702,7 +1706,7 @@ again:
"lockres, set the bit in the refmap\n",
namelen, lockname, to);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(to, res);
+ dlm_lockres_set_refmap_bit(dlm, res, to);
spin_unlock(&res->spinlock);
}
}
@@ -2187,8 +2191,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
namelen = res->lockname.len;
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
- mlog(0, "%s:%.*s: sending deref to %d\n",
- dlm->name, namelen, lockname, res->owner);
memset(&deref, 0, sizeof(deref));
deref.node_idx = dlm->node_num;
deref.namelen = namelen;
@@ -2197,14 +2199,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
&deref, sizeof(deref), res->owner, &r);
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
- "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
- res->owner);
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+ dlm->name, namelen, lockname, ret, res->owner);
else if (r < 0) {
/* BAD. other node says I did not have a ref. */
- mlog(ML_ERROR,"while dropping ref on %s:%.*s "
- "(master=%u) got %d.\n", dlm->name, namelen,
- lockname, res->owner, r);
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+ dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
BUG();
}
@@ -2260,7 +2260,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
else {
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
}
@@ -2320,7 +2320,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
if (test_bit(node, res->refmap)) {
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
- dlm_lockres_clear_refmap_bit(node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
cleared = 1;
}
spin_unlock(&res->spinlock);
@@ -2802,7 +2802,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
BUG_ON(!list_empty(&lock->bast_list));
BUG_ON(lock->ast_pending);
BUG_ON(lock->bast_pending);
- dlm_lockres_clear_refmap_bit(lock->ml.node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ lock->ml.node);
list_del_init(&lock->list);
dlm_lock_put(lock);
/* In a normal unlock, we would have added a
@@ -2823,7 +2824,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: node %u had a ref to this "
"migrating lockres, clearing\n", dlm->name,
res->lockname.len, res->lockname.name, bit);
- dlm_lockres_clear_refmap_bit(bit, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, bit);
}
bit++;
}
@@ -2916,9 +2917,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
&migrate, sizeof(migrate), nodenum,
&status);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
- dlm->key, nodenum);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+ "MIGRATE_REQUEST to node %u\n", dlm->name,
+ migrate.namelen, migrate.name, ret, nodenum);
if (!dlm_is_host_down(ret)) {
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
BUG();
@@ -2937,7 +2938,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
dlm->name, res->lockname.len, res->lockname.name,
nodenum);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(nodenum, res);
+ dlm_lockres_set_refmap_bit(dlm, res, nodenum);
spin_unlock(&res->spinlock);
}
}
@@ -3271,7 +3272,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
* mastery reference here since old_master will briefly have
* a reference after the migration completes */
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(old_master, res);
+ dlm_lockres_set_refmap_bit(dlm, res, old_master);
spin_unlock(&res->spinlock);
mlog(0, "now time to do a migrate request to other nodes\n");
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 7efab6d28a2..01ebfd0bdad 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
}
-int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(ML_NOTICE, "%s: waiting %dms for notification of "
- "death of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_dead(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_dead(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
- "of death of node %u\n", dlm->name, node);
+ dlm_is_node_dead(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
-int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
{
- if (timeout) {
- mlog(0, "%s: waiting %dms for notification of "
- "recovery of node %u\n", dlm->name, timeout, node);
+ if (dlm_is_node_recovered(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
wait_event_timeout(dlm->dlm_reco_thread_wq,
- dlm_is_node_recovered(dlm, node),
- msecs_to_jiffies(timeout));
- } else {
- mlog(0, "%s: waiting indefinitely for notification "
- "of recovery of node %u\n", dlm->name, node);
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
wait_event(dlm->dlm_reco_thread_wq,
dlm_is_node_recovered(dlm, node));
- }
- /* for now, return 0 */
- return 0;
}
/* callers of the top-level api calls (dlmlock/dlmunlock) should
@@ -430,6 +428,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm)
{
spin_lock(&dlm->spinlock);
BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+ dlm->name, dlm->reco.dead_node);
dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
}
@@ -440,9 +440,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm)
BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
spin_unlock(&dlm->spinlock);
+ printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
wake_up(&dlm->reco.event);
}
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+ printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+ "dead node %u in domain %s\n", dlm->reco.new_master,
+ (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+ dlm->reco.dead_node, dlm->name);
+}
+
static int dlm_do_recovery(struct dlm_ctxt *dlm)
{
int status = 0;
@@ -505,9 +514,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
}
mlog(0, "another node will master this recovery session.\n");
}
- mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
- dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
- dlm->node_num, dlm->reco.dead_node);
+
+ dlm_print_recovery_master(dlm);
/* it is safe to start everything back up here
* because all of the dead node's lock resources
@@ -518,15 +526,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
return 0;
master_here:
- mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
- "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
- dlm->node_num, dlm->reco.dead_node, dlm->name);
+ dlm_print_recovery_master(dlm);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
if (status < 0) {
/* we should never hit this anymore */
- mlog(ML_ERROR, "error %d remastering locks for node %u, "
- "retrying.\n", status, dlm->reco.dead_node);
+ mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+ "retrying.\n", dlm->name, status, dlm->reco.dead_node);
/* yield a bit to allow any final network messages
* to get handled on remaining nodes */
msleep(100);
@@ -567,7 +573,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
- mlog(0, "requesting lock info from node %u\n",
+ mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
ndata->node_num);
if (ndata->node_num == dlm->node_num) {
@@ -640,7 +646,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
spin_unlock(&dlm_reco_state_lock);
}
- mlog(0, "done requesting all lock info\n");
+ mlog(0, "%s: Done requesting all lock info\n", dlm->name);
/* nodes should be sending reco data now
* just need to wait */
@@ -802,10 +808,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
/* negative status is handled by caller */
if (ret < 0)
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
- dlm->key, request_from);
-
+ mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+ "to recover dead node %u\n", dlm->name, ret,
+ request_from, dead_node);
// return from here, then
// sleep until all received or error
return ret;
@@ -956,9 +961,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
sizeof(done_msg), send_to, &tmpret);
if (ret < 0) {
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+ "to recover dead node %u\n", dlm->name, ret, send_to,
+ dead_node);
if (!dlm_is_host_down(ret)) {
BUG();
}
@@ -1127,9 +1132,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
- mlog(ML_ERROR, "Error %d when sending message %u (key "
- "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
- dlm->key, send_to);
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+ "node %u (%s)\n", dlm->name, mres->lockname_len,
+ mres->lockname, ret, send_to,
+ (orig_flags & DLM_MRES_MIGRATION ?
+ "migration" : "recovery"));
} else {
/* might get an -ENOMEM back here */
ret = status;
@@ -1767,7 +1774,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
dlm->name, mres->lockname_len, mres->lockname,
from);
spin_lock(&res->spinlock);
- dlm_lockres_set_refmap_bit(from, res);
+ dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock);
added++;
break;
@@ -1965,7 +1972,7 @@ skip_lvb:
mlog(0, "%s:%.*s: added lock for node %u, "
"setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
- dlm_lockres_set_refmap_bit(ml->node, res);
+ dlm_lockres_set_refmap_bit(dlm, res, ml->node);
added++;
}
spin_unlock(&res->spinlock);
@@ -2084,6 +2091,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
if (res->owner == dead_node) {
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
list_del_init(&res->recovering);
spin_lock(&res->spinlock);
/* new_master has our reference from
@@ -2105,40 +2115,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
- if (res->state & DLM_LOCK_RES_RECOVERING) {
- if (res->owner == dead_node) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, but "
- "clearing state anyway\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else if (res->owner == dlm->node_num) {
- mlog(0, "(this=%u) res %.*s owner=%u "
- "was not on recovering list, "
- "owner is THIS node, clearing\n",
- dlm->node_num, res->lockname.len,
- res->lockname.name, new_master);
- } else
- continue;
+ if (!(res->state & DLM_LOCK_RES_RECOVERING))
+ continue;
- if (!list_empty(&res->recovering)) {
- mlog(0, "%s:%.*s: lockres was "
- "marked RECOVERING, owner=%u\n",
- dlm->name, res->lockname.len,
- res->lockname.name, res->owner);
- list_del_init(&res->recovering);
- dlm_lockres_put(res);
- }
- spin_lock(&res->spinlock);
- /* new_master has our reference from
- * the lock state sent during recovery */
- dlm_change_lockres_owner(dlm, res, new_master);
- res->state &= ~DLM_LOCK_RES_RECOVERING;
- if (__dlm_lockres_has_locks(res))
- __dlm_dirty_lockres(dlm, res);
- spin_unlock(&res->spinlock);
- wake_up(&res->wq);
+ if (res->owner != dead_node &&
+ res->owner != dlm->node_num)
+ continue;
+
+ if (!list_empty(&res->recovering)) {
+ list_del_init(&res->recovering);
+ dlm_lockres_put(res);
}
+
+ /* new_master has our reference from
+ * the lock state sent during recovery */
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ if (__dlm_lockres_has_locks(res))
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
}
}
}
@@ -2252,12 +2252,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
res->lockname.len, res->lockname.name, freed, dead_node);
__dlm_print_one_lock_resource(res);
}
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
"no locks and had not purged before dying\n", dlm->name,
res->lockname.len, res->lockname.name, dead_node);
- dlm_lockres_clear_refmap_bit(dead_node, res);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
}
/* do not kick thread yet */
@@ -2324,9 +2324,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
- mlog(ML_NOTICE, "Ignore %.*s for "
+ mlog(ML_NOTICE, "%s: res %.*s, Skip "
"recovery as it is being freed\n",
- res->lockname.len,
+ dlm->name, res->lockname.len,
res->lockname.name);
} else
dlm_move_lockres_to_recovery_list(dlm,
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 1d6d1d22c47..e73c833fc2a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
int bit;
+ assert_spin_locked(&res->spinlock);
+
if (__dlm_lockres_has_locks(res))
return 0;
+ /* Locks are in the process of being created */
+ if (res->inflight_locks)
+ return 0;
+
if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
return 0;
if (res->state & DLM_LOCK_RES_RECOVERING)
return 0;
+ /* Another node has this resource with this node as the master */
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES)
return 0;
- /*
- * since the bit for dlm->node_num is not set, inflight_locks better
- * be zero
- */
- BUG_ON(res->inflight_locks != 0);
return 1;
}
@@ -185,8 +187,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
- mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
- res->lockname.len, res->lockname.name, ret);
if (!dlm_is_host_down(ret))
BUG();
}
@@ -209,7 +209,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
BUG();
}
- __dlm_unhash_lockres(res);
+ __dlm_unhash_lockres(dlm, res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index b4207679704..abfac0d7ae9 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -354,7 +354,6 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb)
static void dlmfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
}
@@ -401,16 +400,14 @@ static struct backing_dev_info dlmfs_backing_dev_info = {
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
- int mode = S_IFDIR | 0755;
+ umode_t mode = S_IFDIR | 0755;
struct dlmfs_inode_private *ip;
if (inode) {
ip = DLMFS_I(inode);
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+ inode_init_owner(inode, NULL, mode);
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inc_nlink(inode);
@@ -424,7 +421,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
static struct inode *dlmfs_get_inode(struct inode *parent,
struct dentry *dentry,
- int mode)
+ umode_t mode)
{
struct super_block *sb = parent->i_sb;
struct inode * inode = new_inode(sb);
@@ -434,9 +431,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
return NULL;
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+ inode_init_owner(inode, parent, mode);
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -473,13 +468,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inc_nlink(inode);
break;
}
-
- if (parent->i_mode & S_ISGID) {
- inode->i_gid = parent->i_gid;
- if (S_ISDIR(mode))
- inode->i_mode |= S_ISGID;
- }
-
return inode;
}
@@ -489,7 +477,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
/* SMP-safe */
static int dlmfs_mkdir(struct inode * dir,
struct dentry * dentry,
- int mode)
+ umode_t mode)
{
int status;
struct inode *inode = NULL;
@@ -537,7 +525,7 @@ bail:
static int dlmfs_create(struct inode *dir,
struct dentry *dentry,
- int mode,
+ umode_t mode,
struct nameidata *nd)
{
int status = 0;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e1ed5e502ff..81a4cd22f80 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1692,7 +1692,7 @@ int ocfs2_open_lock(struct inode *inode)
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
- if (ocfs2_mount_local(osb))
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
@@ -1718,6 +1718,12 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (write)
+ status = -EROFS;
+ goto out;
+ }
+
if (ocfs2_mount_local(osb))
goto out;
@@ -2298,7 +2304,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
- goto bail;
+ goto getbh;
}
if (ocfs2_mount_local(osb))
@@ -2356,7 +2362,7 @@ local:
mlog_errno(status);
goto bail;
}
-
+getbh:
if (ret_bh) {
status = ocfs2_assign_bh(inode, ret_bh, local_bh);
if (status < 0) {
@@ -2628,8 +2634,11 @@ int ocfs2_dentry_lock(struct dentry *dentry, int ex)
BUG_ON(!dl);
- if (ocfs2_is_hard_readonly(osb))
- return -EROFS;
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ return -EROFS;
+ return 0;
+ }
if (ocfs2_mount_local(osb))
return 0;
@@ -2647,7 +2656,7 @@ void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
- if (!ocfs2_mount_local(osb))
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 23457b491e8..2f5b92ef0e5 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -832,6 +832,102 @@ out:
return ret;
}
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+ unsigned int is_last = 0, is_data = 0;
+ u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ u32 cpos, cend, clen, hole_size;
+ u64 extoff, extlen;
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_extent_rec rec;
+
+ BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE);
+
+ ret = ocfs2_inode_lock(inode, &di_bh, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ if (*offset >= inode->i_size) {
+ ret = -ENXIO;
+ goto out_unlock;
+ }
+
+ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (origin == SEEK_HOLE)
+ *offset = inode->i_size;
+ goto out_unlock;
+ }
+
+ clen = 0;
+ cpos = *offset >> cs_bits;
+ cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size);
+
+ while (cpos < cend && !is_last) {
+ ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
+ &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ extoff = cpos;
+ extoff <<= cs_bits;
+
+ if (rec.e_blkno == 0ULL) {
+ clen = hole_size;
+ is_data = 0;
+ } else {
+ clen = le16_to_cpu(rec.e_leaf_clusters) -
+ (cpos - le32_to_cpu(rec.e_cpos));
+ is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
+ }
+
+ if ((!is_data && origin == SEEK_HOLE) ||
+ (is_data && origin == SEEK_DATA)) {
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ if (!is_last)
+ cpos += clen;
+ }
+
+ if (origin == SEEK_HOLE) {
+ extoff = cpos;
+ extoff <<= cs_bits;
+ extlen = clen;
+ extlen <<= cs_bits;
+
+ if ((extoff + extlen) > inode->i_size)
+ extlen = inode->i_size - extoff;
+ extoff += extlen;
+ if (extoff > *offset)
+ *offset = extoff;
+ goto out_unlock;
+ }
+
+ ret = -ENXIO;
+
+out_unlock:
+
+ brelse(di_bh);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ocfs2_inode_unlock(inode, 0);
+out:
+ if (ret && ret != -ENXIO)
+ ret = -ENXIO;
+ return ret;
+}
+
int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index e79d41c2c90..67ea57d2fd5 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len);
+int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin);
+
int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
u32 *p_cluster, u32 *num_clusters,
struct ocfs2_extent_list *el,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de4ea1af041..061591a3ab0 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,6 +1950,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
if (ret < 0)
mlog_errno(ret);
+ if (file->f_flags & O_SYNC)
+ handle->h_sync = 1;
+
ocfs2_commit_trans(osb, handle);
out_inode_unlock:
@@ -2052,6 +2055,23 @@ out:
return ret;
}
+static void ocfs2_aiodio_wait(struct inode *inode)
+{
+ wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
+
+ wait_event(*wq, (atomic_read(&OCFS2_I(inode)->ip_unaligned_aio) == 0));
+}
+
+static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
+{
+ int blockmask = inode->i_sb->s_blocksize - 1;
+ loff_t final_size = pos + count;
+
+ if ((pos & blockmask) || (final_size & blockmask))
+ return 1;
+ return 0;
+}
+
static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
struct file *file,
loff_t pos, size_t count,
@@ -2108,7 +2128,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
* remove_suid() calls ->setattr without any hint that
* we may have already done our cluster locking. Since
* ocfs2_setattr() *must* take cluster locks to
- * proceeed, this will lead us to recursively lock the
+ * proceed, this will lead us to recursively lock the
* inode. There's also the dinode i_size state which
* can be lost via setattr during extending writes (we
* set inode->i_size at the end of a write. */
@@ -2230,6 +2250,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
int full_coherency = !(osb->s_mount_opt &
OCFS2_MOUNT_COHERENCY_BUFFERED);
+ int unaligned_dio = 0;
trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2297,6 +2318,10 @@ relock:
goto out;
}
+ if (direct_io && !is_sync_kiocb(iocb))
+ unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left,
+ *ppos);
+
/*
* We can't complete the direct I/O as requested, fall back to
* buffered I/O.
@@ -2311,6 +2336,18 @@ relock:
goto relock;
}
+ if (unaligned_dio) {
+ /*
+ * Wait on previous unaligned aio to complete before
+ * proceeding.
+ */
+ ocfs2_aiodio_wait(inode);
+
+ /* Mark the iocb as needing a decrement in ocfs2_dio_end_io */
+ atomic_inc(&OCFS2_I(inode)->ip_unaligned_aio);
+ ocfs2_iocb_set_unaligned_aio(iocb);
+ }
+
/*
* To later detect whether a journal commit for sync writes is
* necessary, we sample i_size, and cluster count here.
@@ -2382,8 +2419,12 @@ out_dio:
if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
+ unaligned_dio = 0;
}
+ if (unaligned_dio)
+ atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+
out:
if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
@@ -2591,6 +2632,57 @@ bail:
return ret;
}
+/* Refer generic_file_llseek_unlocked() */
+static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret = 0;
+
+ mutex_lock(&inode->i_mutex);
+
+ switch (origin) {
+ case SEEK_SET:
+ break;
+ case SEEK_END:
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ if (offset == 0) {
+ offset = file->f_pos;
+ goto out;
+ }
+ offset += file->f_pos;
+ break;
+ case SEEK_DATA:
+ case SEEK_HOLE:
+ ret = ocfs2_seek_data_hole_offset(file, &offset, origin);
+ if (ret)
+ goto out;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+ ret = -EINVAL;
+ if (!ret && offset > inode->i_sb->s_maxbytes)
+ ret = -EINVAL;
+ if (ret)
+ goto out;
+
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ if (ret)
+ return ret;
+ return offset;
+}
+
const struct inode_operations ocfs2_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
@@ -2615,7 +2707,7 @@ const struct inode_operations ocfs2_special_file_iops = {
* ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
*/
const struct file_operations ocfs2_fops = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
@@ -2663,7 +2755,7 @@ const struct file_operations ocfs2_dops = {
* the cluster.
*/
const struct file_operations ocfs2_fops_no_plocks = {
- .llseek = generic_file_llseek,
+ .llseek = ocfs2_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.mmap = ocfs2_mmap,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index a22d2c09889..17454a904d7 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -951,7 +951,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
trace_ocfs2_cleanup_delete_inode(
(unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data);
if (sync_data)
- write_inode_now(inode, 1);
+ filemap_write_and_wait(inode->i_mapping);
truncate_inode_pages(&inode->i_data, 0);
}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1c508b149b3..88924a3133f 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -43,6 +43,9 @@ struct ocfs2_inode_info
/* protects extended attribute changes on this inode */
struct rw_semaphore ip_xattr_sem;
+ /* Number of outstanding AIO's which are not page aligned */
+ atomic_t ip_unaligned_aio;
+
/* These fields are protected by ip_lock */
spinlock_t ip_lock;
u32 ip_open_count;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bc91072b721..a6fda3c188a 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -122,7 +122,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) &
(OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE))
- goto bail_unlock;
+ goto bail_commit;
}
ocfs2_inode->ip_attr = flags;
@@ -132,6 +132,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
if (status < 0)
mlog_errno(status);
+bail_commit:
ocfs2_commit_trans(osb, handle);
bail_unlock:
ocfs2_inode_unlock(inode, 1);
@@ -381,7 +382,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
if (!oifi) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oifi, req))
@@ -431,7 +432,7 @@ bail:
o2info_set_request_error(&oifi->ifi_req, req);
kfree(oifi);
-
+out_err:
return status;
}
@@ -666,7 +667,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
if (!oiff) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ goto out_err;
}
if (o2info_from_user(*oiff, req))
@@ -716,7 +717,7 @@ bail:
o2info_set_request_error(&oiff->iff_req, req);
kfree(oiff);
-
+out_err:
return status;
}
@@ -905,12 +906,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
- status = mnt_want_write(filp->f_path.mnt);
+ status = mnt_want_write_file(filp);
if (status)
return status;
status = ocfs2_set_inode_attr(inode, flags,
OCFS2_FL_MODIFIABLE);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return status;
case OCFS2_IOC_RESVSP:
case OCFS2_IOC_RESVSP64:
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 295d56454e8..0a42ae96dca 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
/* we need to run complete recovery for offline orphan slots */
ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
- mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
- node_num, slot_num,
- MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+ printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
@@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
jbd2_journal_destroy(journal);
+ printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
+ "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
+ MINOR(osb->sb->s_dev));
done:
/* drop the lock on this nodes journal */
if (got_lock)
@@ -1808,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void)
* every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
* is done to catch any orphans that are left over in orphan directories.
*
+ * It scans all slots, even ones that are in use. It does so to handle the
+ * case described below:
+ *
+ * Node 1 has an inode it was using. The dentry went away due to memory
+ * pressure. Node 1 closes the inode, but it's on the free list. The node
+ * has the open lock.
+ * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
+ * but node 1 has no dentry and doesn't get the message. It trylocks the
+ * open lock, sees that another node has a PR, and does nothing.
+ * Later node 2 runs its orphan dir. It igets the inode, trylocks the
+ * open lock, sees the PR still, and does nothing.
+ * Basically, we have to trigger an orphan iput on node 1. The only way
+ * for this to happen is if node 1 runs node 2's orphan dir.
+ *
* ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
* seconds. It gets an EX lock on os_lockres and checks sequence number
* stored in LVB. If the sequence number has changed, it means some other
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 68cf2f6d3c6..a3385b63ff5 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -441,10 +441,11 @@ static inline int ocfs2_mknod_credits(struct super_block *sb, int is_dir,
#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
/* file update (nlink, etc) + directory mtime/ctime + dir entry block + quota
- * update on dir + index leaf + dx root update for free list */
+ * update on dir + index leaf + dx root update for free list +
+ * previous dirblock update in the free list */
static inline int ocfs2_link_credits(struct super_block *sb)
{
- return 2*OCFS2_INODE_UPDATE_CREDITS + 3 +
+ return 2*OCFS2_INODE_UPDATE_CREDITS + 4 +
ocfs2_quota_trans_credits(sb);
}
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3e9393ca39e..9cd41083e99 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
struct page *page)
{
- int ret;
+ int ret = VM_FAULT_NOPAGE;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
@@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
void *fsdata;
loff_t size = i_size_read(inode);
- /*
- * Another node might have truncated while we were waiting on
- * cluster locks.
- * We don't check size == 0 before the shift. This is borrowed
- * from do_generic_file_read.
- */
last_index = (size - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(!size || page->index > last_index)) {
- ret = -EINVAL;
- goto out;
- }
/*
- * The i_size check above doesn't catch the case where nodes
- * truncated and then re-extended the file. We'll re-check the
- * page mapping after taking the page lock inside of
- * ocfs2_write_begin_nolock().
+ * There are cases that lead to the page no longer bebongs to the
+ * mapping.
+ * 1) pagecache truncates locally due to memory pressure.
+ * 2) pagecache truncates when another is taking EX lock against
+ * inode lock. see ocfs2_data_convert_worker.
+ *
+ * The i_size check doesn't catch the case where nodes truncated and
+ * then re-extended the file. We'll re-check the page mapping after
+ * taking the page lock inside of ocfs2_write_begin_nolock().
+ *
+ * Let VM retry with these cases.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
- /*
- * the page has been umapped in ocfs2_data_downconvert_worker.
- * So return 0 here and let VFS retry.
- */
- ret = 0;
+ if ((page->mapping != inode->i_mapping) ||
+ (!PageUptodate(page)) ||
+ (page_offset(page) >= size))
goto out;
- }
/*
* Call ocfs2_write_begin() and ocfs2_write_end() to take
@@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
if (ret) {
if (ret != -ENOSPC)
mlog_errno(ret);
+ if (ret == -ENOMEM)
+ ret = VM_FAULT_OOM;
+ else
+ ret = VM_FAULT_SIGBUS;
goto out;
}
- ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
- fsdata);
- if (ret < 0) {
- mlog_errno(ret);
+ if (!locked_page) {
+ ret = VM_FAULT_NOPAGE;
goto out;
}
+ ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
+ fsdata);
BUG_ON(ret != len);
- ret = 0;
+ ret = VM_FAULT_LOCKED;
out:
return ret;
}
@@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out:
ocfs2_unblock_signals(&oldset);
- if (ret)
- ret = VM_FAULT_SIGBUS;
return ret;
}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index d53cb706f14..b1e3fce72ea 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -745,7 +745,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
*/
ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
new_phys_cpos);
- if (!new_phys_cpos) {
+ if (!*new_phys_cpos) {
ret = -ENOSPC;
goto out_commit;
}
@@ -1059,7 +1059,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
struct ocfs2_move_extents range;
struct ocfs2_move_extents_context *context = NULL;
- status = mnt_want_write(filp->f_path.mnt);
+ status = mnt_want_write_file(filp);
if (status)
return status;
@@ -1145,7 +1145,7 @@ out:
kfree(context);
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return status;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a8b2bfea574..be244692550 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -185,7 +185,7 @@ bail:
return ret;
}
-static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode)
+static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
{
struct inode *inode;
@@ -207,7 +207,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode)
static int ocfs2_mknod(struct inode *dir,
struct dentry *dentry,
- int mode,
+ umode_t mode,
dev_t dev)
{
int status = 0;
@@ -602,7 +602,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
static int ocfs2_mkdir(struct inode *dir,
struct dentry *dentry,
- int mode)
+ umode_t mode)
{
int ret;
@@ -617,7 +617,7 @@ static int ocfs2_mkdir(struct inode *dir,
static int ocfs2_create(struct inode *dir,
struct dentry *dentry,
- int mode,
+ umode_t mode,
struct nameidata *nd)
{
int ret;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 409285854f6..d355e6e36b3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -836,18 +836,65 @@ static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_set_bit_le(bit, bitmap);
+ __set_bit_le(bit, bitmap);
}
#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
{
- __test_and_clear_bit_le(bit, bitmap);
+ __clear_bit_le(bit, bitmap);
}
#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
#define ocfs2_test_bit test_bit_le
#define ocfs2_find_next_zero_bit find_next_zero_bit_le
#define ocfs2_find_next_bit find_next_bit_le
+
+static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
+{
+#if BITS_PER_LONG == 64
+ *bit += ((unsigned long) addr & 7UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~7UL);
+#elif BITS_PER_LONG == 32
+ *bit += ((unsigned long) addr & 3UL) << 3;
+ addr = (void *) ((unsigned long) addr & ~3UL);
+#else
+#error "how many bits you are?!"
+#endif
+ return addr;
+}
+
+static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_set_bit(bit, bitmap);
+}
+
+static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ ocfs2_clear_bit(bit, bitmap);
+}
+
+static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
+{
+ bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
+ return ocfs2_test_bit(bit, bitmap);
+}
+
+static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
+ int start)
+{
+ int fix = 0, ret, tmpmax;
+ bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
+ tmpmax = max + fix;
+ start += fix;
+
+ ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
+ if (ret > max)
+ return max;
+ return ret;
+}
+
#endif /* OCFS2_H */
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index dc8007fc924..f100bf70a90 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
int status = 0;
struct ocfs2_quota_recovery *rec;
- mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
rec = ocfs2_alloc_quota_recovery();
if (!rec)
return ERR_PTR(-ENOMEM);
@@ -549,8 +551,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_commit;
}
lock_buffer(qbh);
- WARN_ON(!ocfs2_test_bit(bit, dchunk->dqc_bitmap));
- ocfs2_clear_bit(bit, dchunk->dqc_bitmap);
+ WARN_ON(!ocfs2_test_bit_unaligned(bit, dchunk->dqc_bitmap));
+ ocfs2_clear_bit_unaligned(bit, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
@@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
struct inode *lqinode;
unsigned int flags;
- mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (type = 0; type < MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
@@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
/* Someone else is holding the lock? Then he must be
* doing the recovery. Just skip the file... */
if (status == -EAGAIN) {
- mlog(ML_NOTICE, "skipping quota recovery for slot %d "
- "because quota file is locked.\n", slot_num);
+ printk(KERN_NOTICE "ocfs2: Skipping quota recovery on "
+ "device (%s) for slot %d because quota file is "
+ "locked.\n", osb->dev_str, slot_num);
status = 0;
goto out_put;
} else if (status < 0) {
@@ -944,7 +949,7 @@ static struct ocfs2_quota_chunk *ocfs2_find_free_entry(struct super_block *sb,
* ol_quota_entries_per_block(sb);
}
- found = ocfs2_find_next_zero_bit(dchunk->dqc_bitmap, len, 0);
+ found = ocfs2_find_next_zero_bit_unaligned(dchunk->dqc_bitmap, len, 0);
/* We failed? */
if (found == len) {
mlog(ML_ERROR, "Did not find empty entry in chunk %d with %u"
@@ -1208,7 +1213,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
struct ocfs2_local_disk_chunk *dchunk;
dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data;
- ocfs2_set_bit(*offset, dchunk->dqc_bitmap);
+ ocfs2_set_bit_unaligned(*offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, -1);
}
@@ -1289,7 +1294,7 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
(od->dq_chunk->qc_headerbh->b_data);
/* Mark structure as freed */
lock_buffer(od->dq_chunk->qc_headerbh);
- ocfs2_clear_bit(offset, dchunk->dqc_bitmap);
+ ocfs2_clear_bit_unaligned(offset, dchunk->dqc_bitmap);
le32_add_cpu(&dchunk->dqc_free, 1);
unlock_buffer(od->dq_chunk->qc_headerbh);
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 26fc0014d50..1424c151ccc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
goto bail;
}
} else
- mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
- slot);
+ printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+ "allocated to this node!\n", slot, osb->dev_str);
ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 19965b00c43..94368017edb 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -28,6 +28,7 @@
#include "cluster/masklog.h"
#include "cluster/nodemanager.h"
#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
#include "stackglue.h"
@@ -256,6 +257,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
}
/*
+ * Check if this node is heartbeating and is connected to all other
+ * heartbeating nodes.
+ */
+static int o2cb_cluster_check(void)
+{
+ u8 node_num;
+ int i;
+ unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+ node_num = o2nm_this_node();
+ if (node_num == O2NM_MAX_NODES) {
+ printk(KERN_ERR "o2cb: This node has not been configured.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * o2dlm expects o2net sockets to be created. If not, then
+ * dlm_join_domain() fails with a stack of errors which are both cryptic
+ * and incomplete. The idea here is to detect upfront whether we have
+ * managed to connect to all nodes or not. If not, then list the nodes
+ * to allow the user to check the configuration (incorrect IP, firewall,
+ * etc.) Yes, this is racy. But its not the end of the world.
+ */
+#define O2CB_MAP_STABILIZE_COUNT 60
+ for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
+ o2hb_fill_node_map(hbmap, sizeof(hbmap));
+ if (!test_bit(node_num, hbmap)) {
+ printk(KERN_ERR "o2cb: %s heartbeat has not been "
+ "started.\n", (o2hb_global_heartbeat_active() ?
+ "Global" : "Local"));
+ return -EINVAL;
+ }
+ o2net_fill_node_map(netmap, sizeof(netmap));
+ /* Force set the current node to allow easy compare */
+ set_bit(node_num, netmap);
+ if (!memcmp(hbmap, netmap, sizeof(hbmap)))
+ return 0;
+ if (i < O2CB_MAP_STABILIZE_COUNT)
+ msleep(1000);
+ }
+
+ printk(KERN_ERR "o2cb: This node could not connect to nodes:");
+ i = -1;
+ while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
+ i + 1)) < O2NM_MAX_NODES) {
+ if (!test_bit(i, netmap))
+ printk(" %u", i);
+ }
+ printk(".\n");
+
+ return -ENOTCONN;
+}
+
+/*
* Called from the dlm when it's about to evict a node. This is how the
* classic stack signals node death.
*/
@@ -263,8 +319,8 @@ static void o2dlm_eviction_cb(int node_num, void *data)
{
struct ocfs2_cluster_connection *conn = data;
- mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n",
- node_num, conn->cc_namelen, conn->cc_name);
+ printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n",
+ node_num, conn->cc_namelen, conn->cc_name);
conn->cc_recovery_handler(node_num, conn->cc_recovery_data);
}
@@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
BUG_ON(conn == NULL);
BUG_ON(conn->cc_proto == NULL);
- /* for now we only have one cluster/node, make sure we see it
- * in the heartbeat universe */
- if (!o2hb_check_local_node_heartbeating()) {
- if (o2hb_global_heartbeat_active())
- mlog(ML_ERROR, "Global heartbeat not started\n");
- rc = -EINVAL;
+ /* Ensure cluster stack is up and all nodes are connected */
+ rc = o2cb_cluster_check();
+ if (rc) {
+ printk(KERN_ERR "o2cb: Cluster check failed. Fix errors "
+ "before retrying.\n");
goto out;
}
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index a5ebe421195..286edf1e231 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -827,8 +827,8 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
goto out;
}
- rc = dlm_new_lockspace(conn->cc_name, strlen(conn->cc_name),
- &fsdlm, DLM_LSFL_FS, DLM_LVB_LEN);
+ rc = dlm_new_lockspace(conn->cc_name, NULL, DLM_LSFL_FS, DLM_LVB_LEN,
+ NULL, NULL, NULL, &fsdlm);
if (rc) {
ocfs2_live_connection_drop(control);
goto out;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 56f61027236..604e12c4e97 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -54,6 +54,7 @@
#include "ocfs1_fs_compat.h"
#include "alloc.h"
+#include "aops.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "export.h"
@@ -107,7 +108,7 @@ static int ocfs2_parse_options(struct super_block *sb, char *options,
int is_remount);
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options);
-static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt);
+static int ocfs2_show_options(struct seq_file *s, struct dentry *root);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
@@ -568,7 +569,6 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb)
static void ocfs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
}
@@ -1107,9 +1107,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
ocfs2_set_ro_flag(osb, 1);
- printk(KERN_NOTICE "Readonly device detected. No cluster "
- "services will be utilized for this mount. Recovery "
- "will be skipped.\n");
+ printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. "
+ "Cluster services will not be used for this mount. "
+ "Recovery will be skipped.\n", osb->dev_str);
}
if (!ocfs2_is_hard_readonly(osb)) {
@@ -1533,9 +1533,9 @@ bail:
return status;
}
-static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
{
- struct ocfs2_super *osb = OCFS2_SB(mnt->mnt_sb);
+ struct ocfs2_super *osb = OCFS2_SB(root->d_sb);
unsigned long opts = osb->s_mount_opt;
unsigned int local_alloc_megs;
@@ -1567,8 +1567,7 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
if (osb->preferred_slot != OCFS2_INVALID_SLOT)
seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
- if (!(mnt->mnt_flags & MNT_NOATIME) && !(mnt->mnt_flags & MNT_RELATIME))
- seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
+ seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
if (osb->osb_commit_interval)
seq_printf(s, ",commit=%u",
@@ -1616,12 +1615,17 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
return 0;
}
+wait_queue_head_t ocfs2__ioend_wq[OCFS2_IOEND_WQ_HASH_SZ];
+
static int __init ocfs2_init(void)
{
- int status;
+ int status, i;
ocfs2_print_version();
+ for (i = 0; i < OCFS2_IOEND_WQ_HASH_SZ; i++)
+ init_waitqueue_head(&ocfs2__ioend_wq[i]);
+
status = init_ocfs2_uptodate_cache();
if (status < 0) {
mlog_errno(status);
@@ -1760,7 +1764,7 @@ static void ocfs2_inode_init_once(void *data)
ocfs2_extent_map_init(&oi->vfs_inode);
INIT_LIST_HEAD(&oi->ip_io_markers);
oi->ip_dir_start_lookup = 0;
-
+ atomic_set(&oi->ip_unaligned_aio, 0);
init_rwsem(&oi->ip_alloc_sem);
init_rwsem(&oi->ip_xattr_sem);
mutex_init(&oi->ip_io_mutex);
@@ -1974,7 +1978,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
* If we failed before we got a uuid_str yet, we can't stop
* heartbeat. Otherwise, do it.
*/
- if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str)
+ if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str &&
+ !ocfs2_is_hard_readonly(osb))
hangup_needed = 1;
if (osb->cconn)
@@ -2353,7 +2358,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog_errno(status);
goto bail;
}
- cleancache_init_shared_fs((char *)&uuid_net_key, sb);
+ cleancache_init_shared_fs((char *)&di->id2.i_super.s_uuid, sb);
bail:
return status;
@@ -2462,8 +2467,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
goto finally;
}
} else {
- mlog(ML_NOTICE, "File system was not unmounted cleanly, "
- "recovering volume.\n");
+ printk(KERN_NOTICE "ocfs2: File system on device (%s) was not "
+ "unmounted cleanly, recovering it.\n", osb->dev_str);
}
local = ocfs2_mount_local(osb);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 194fb22ef79..0ba9ea1e796 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -623,7 +623,7 @@ int ocfs2_calc_security_init(struct inode *dir,
int ocfs2_calc_xattr_init(struct inode *dir,
struct buffer_head *dir_bh,
- int mode,
+ umode_t mode,
struct ocfs2_security_xattr_info *si,
int *want_clusters,
int *xattr_credits,
@@ -2376,16 +2376,18 @@ static int ocfs2_remove_value_outside(struct inode*inode,
}
ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- break;
- }
ocfs2_commit_trans(osb, ctxt.handle);
if (ctxt.meta_ac) {
ocfs2_free_alloc_context(ctxt.meta_ac);
ctxt.meta_ac = NULL;
}
+
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
+
}
if (ctxt.meta_ac)
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index d63cfb72316..e5c7f15465b 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -68,7 +68,7 @@ int ocfs2_calc_security_init(struct inode *,
struct ocfs2_security_xattr_info *,
int *, int *, struct ocfs2_alloc_context **);
int ocfs2_calc_xattr_init(struct inode *, struct buffer_head *,
- int, struct ocfs2_security_xattr_info *,
+ umode_t, struct ocfs2_security_xattr_info *,
int *, int *, int *);
/*
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 98e54427439..f00576ec320 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -255,7 +255,7 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int omfs_add_node(struct inode *dir, struct dentry *dentry, int mode)
+static int omfs_add_node(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
struct inode *inode = omfs_new_inode(dir, mode);
@@ -279,12 +279,12 @@ out_free_inode:
return err;
}
-static int omfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return omfs_add_node(dir, dentry, mode | S_IFDIR);
}
-static int omfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return omfs_add_node(dir, dentry, mode | S_IFREG);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index e043c4cb9a9..6065bb0ba20 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -28,7 +28,7 @@ struct buffer_head *omfs_bread(struct super_block *sb, sector_t block)
return sb_bread(sb, clus_to_blk(sbi, block));
}
-struct inode *omfs_new_inode(struct inode *dir, int mode)
+struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
{
struct inode *inode;
u64 new_block;
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index 7d414fef501..8941f12c6b0 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -60,7 +60,7 @@ extern int omfs_shrink_inode(struct inode *inode);
/* inode.c */
extern struct buffer_head *omfs_bread(struct super_block *sb, sector_t block);
extern struct inode *omfs_iget(struct super_block *sb, ino_t inode);
-extern struct inode *omfs_new_inode(struct inode *dir, int mode);
+extern struct inode *omfs_new_inode(struct inode *dir, umode_t mode);
extern int omfs_reserve_block(struct super_block *sb, sector_t block);
extern int omfs_find_empty_block(struct super_block *sb, int mode, ino_t *ino);
extern int omfs_sync_inode(struct inode *inode);
diff --git a/fs/open.c b/fs/open.c
index 22c41b543f2..77becc04114 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -456,7 +456,7 @@ static int chmod_common(struct path *path, umode_t mode)
if (error)
return error;
mutex_lock(&inode->i_mutex);
- error = security_path_chmod(path->dentry, path->mnt, mode);
+ error = security_path_chmod(path, mode);
if (error)
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
@@ -468,7 +468,7 @@ out_unlock:
return error;
}
-SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
+SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
struct file * file;
int err = -EBADF;
@@ -482,7 +482,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
return err;
}
-SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
+SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode)
{
struct path path;
int error;
@@ -495,7 +495,7 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
return error;
}
-SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
+SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
{
return sys_fchmodat(AT_FDCWD, filename, mode);
}
@@ -608,7 +608,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
dentry = file->f_path.dentry;
audit_inode(NULL, dentry);
error = chown_common(&file->f_path, user, group);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
out_fput:
fput(file);
out:
@@ -877,7 +877,7 @@ void fd_install(unsigned int fd, struct file *file)
EXPORT_SYMBOL(fd_install);
-static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
{
int lookup_flags = 0;
int acc_mode;
@@ -948,7 +948,7 @@ static inline int build_open_flags(int flags, int mode, struct open_flags *op)
* have to. But in generally you should not do this, so please move
* along, nothing to see here..
*/
-struct file *filp_open(const char *filename, int flags, int mode)
+struct file *filp_open(const char *filename, int flags, umode_t mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
@@ -970,7 +970,7 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
-long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
+long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
@@ -994,7 +994,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
return fd;
}
-SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode)
+SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
{
long ret;
@@ -1008,7 +1008,7 @@ SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode)
}
SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
- int, mode)
+ umode_t, mode)
{
long ret;
@@ -1027,7 +1027,7 @@ SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
* For backward compatibility? Maybe this should be moved
* into arch/i386 instead?
*/
-SYSCALL_DEFINE2(creat, const char __user *, pathname, int, mode)
+SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
{
return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
}
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index e4e0ff7962e..a88c03bc749 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -346,7 +346,6 @@ static struct inode *openprom_alloc_inode(struct super_block *sb)
static void openprom_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(op_inode_cachep, OP_I(inode));
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 4065f07366b..f0e485d54e6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1290,11 +1290,4 @@ static int __init init_pipe_fs(void)
return err;
}
-static void __exit exit_pipe_fs(void)
-{
- kern_unmount(pipe_mnt);
- unregister_filesystem(&pipe_fs_type);
-}
-
fs_initcall(init_pipe_fs);
-module_exit(exit_pipe_fs);
diff --git a/fs/pnode.c b/fs/pnode.c
index d42514e3238..ab5fa9e1a79 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -13,45 +13,30 @@
#include "pnode.h"
/* return the next shared peer mount of @p */
-static inline struct vfsmount *next_peer(struct vfsmount *p)
+static inline struct mount *next_peer(struct mount *p)
{
- return list_entry(p->mnt_share.next, struct vfsmount, mnt_share);
+ return list_entry(p->mnt_share.next, struct mount, mnt_share);
}
-static inline struct vfsmount *first_slave(struct vfsmount *p)
+static inline struct mount *first_slave(struct mount *p)
{
- return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave);
+ return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
-static inline struct vfsmount *next_slave(struct vfsmount *p)
+static inline struct mount *next_slave(struct mount *p)
{
- return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
+ return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
}
-/*
- * Return true if path is reachable from root
- *
- * namespace_sem is held, and mnt is attached
- */
-static bool is_path_reachable(struct vfsmount *mnt, struct dentry *dentry,
- const struct path *root)
-{
- while (mnt != root->mnt && mnt->mnt_parent != mnt) {
- dentry = mnt->mnt_mountpoint;
- mnt = mnt->mnt_parent;
- }
- return mnt == root->mnt && is_subdir(dentry, root->dentry);
-}
-
-static struct vfsmount *get_peer_under_root(struct vfsmount *mnt,
- struct mnt_namespace *ns,
- const struct path *root)
+static struct mount *get_peer_under_root(struct mount *mnt,
+ struct mnt_namespace *ns,
+ const struct path *root)
{
- struct vfsmount *m = mnt;
+ struct mount *m = mnt;
do {
/* Check the namespace first for optimization */
- if (m->mnt_ns == ns && is_path_reachable(m, m->mnt_root, root))
+ if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
return m;
m = next_peer(m);
@@ -66,12 +51,12 @@ static struct vfsmount *get_peer_under_root(struct vfsmount *mnt,
*
* Caller must hold namespace_sem
*/
-int get_dominating_id(struct vfsmount *mnt, const struct path *root)
+int get_dominating_id(struct mount *mnt, const struct path *root)
{
- struct vfsmount *m;
+ struct mount *m;
for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
- struct vfsmount *d = get_peer_under_root(m, mnt->mnt_ns, root);
+ struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
if (d)
return d->mnt_group_id;
}
@@ -79,10 +64,10 @@ int get_dominating_id(struct vfsmount *mnt, const struct path *root)
return 0;
}
-static int do_make_slave(struct vfsmount *mnt)
+static int do_make_slave(struct mount *mnt)
{
- struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
- struct vfsmount *slave_mnt;
+ struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
+ struct mount *slave_mnt;
/*
* slave 'mnt' to a peer mount that has the
@@ -90,7 +75,7 @@ static int do_make_slave(struct vfsmount *mnt)
* slave it to anything that is available.
*/
while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
- peer_mnt->mnt_root != mnt->mnt_root) ;
+ peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
if (peer_mnt == mnt) {
peer_mnt = next_peer(mnt);
@@ -116,7 +101,7 @@ static int do_make_slave(struct vfsmount *mnt)
struct list_head *p = &mnt->mnt_slave_list;
while (!list_empty(p)) {
slave_mnt = list_first_entry(p,
- struct vfsmount, mnt_slave);
+ struct mount, mnt_slave);
list_del_init(&slave_mnt->mnt_slave);
slave_mnt->mnt_master = NULL;
}
@@ -129,7 +114,7 @@ static int do_make_slave(struct vfsmount *mnt)
/*
* vfsmount lock must be held for write
*/
-void change_mnt_propagation(struct vfsmount *mnt, int type)
+void change_mnt_propagation(struct mount *mnt, int type)
{
if (type == MS_SHARED) {
set_mnt_shared(mnt);
@@ -140,9 +125,9 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
list_del_init(&mnt->mnt_slave);
mnt->mnt_master = NULL;
if (type == MS_UNBINDABLE)
- mnt->mnt_flags |= MNT_UNBINDABLE;
+ mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
else
- mnt->mnt_flags &= ~MNT_UNBINDABLE;
+ mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
}
}
@@ -156,20 +141,19 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
* vfsmount found while iterating with propagation_next() is
* a peer of one we'd found earlier.
*/
-static struct vfsmount *propagation_next(struct vfsmount *m,
- struct vfsmount *origin)
+static struct mount *propagation_next(struct mount *m,
+ struct mount *origin)
{
/* are there any slaves of this mount? */
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
while (1) {
- struct vfsmount *next;
- struct vfsmount *master = m->mnt_master;
+ struct mount *master = m->mnt_master;
if (master == origin->mnt_master) {
- next = next_peer(m);
- return ((next == origin) ? NULL : next);
+ struct mount *next = next_peer(m);
+ return (next == origin) ? NULL : next;
} else if (m->mnt_slave.next != &master->mnt_slave_list)
return next_slave(m);
@@ -187,13 +171,13 @@ static struct vfsmount *propagation_next(struct vfsmount *m,
* @type return CL_SLAVE if the new mount has to be
* cloned as a slave.
*/
-static struct vfsmount *get_source(struct vfsmount *dest,
- struct vfsmount *last_dest,
- struct vfsmount *last_src,
- int *type)
+static struct mount *get_source(struct mount *dest,
+ struct mount *last_dest,
+ struct mount *last_src,
+ int *type)
{
- struct vfsmount *p_last_src = NULL;
- struct vfsmount *p_last_dest = NULL;
+ struct mount *p_last_src = NULL;
+ struct mount *p_last_dest = NULL;
while (last_dest != dest->mnt_master) {
p_last_dest = last_dest;
@@ -233,33 +217,33 @@ static struct vfsmount *get_source(struct vfsmount *dest,
* @source_mnt: source mount.
* @tree_list : list of heads of trees to be attached.
*/
-int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
- struct vfsmount *source_mnt, struct list_head *tree_list)
+int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
+ struct mount *source_mnt, struct list_head *tree_list)
{
- struct vfsmount *m, *child;
+ struct mount *m, *child;
int ret = 0;
- struct vfsmount *prev_dest_mnt = dest_mnt;
- struct vfsmount *prev_src_mnt = source_mnt;
+ struct mount *prev_dest_mnt = dest_mnt;
+ struct mount *prev_src_mnt = source_mnt;
LIST_HEAD(tmp_list);
LIST_HEAD(umount_list);
for (m = propagation_next(dest_mnt, dest_mnt); m;
m = propagation_next(m, dest_mnt)) {
int type;
- struct vfsmount *source;
+ struct mount *source;
if (IS_MNT_NEW(m))
continue;
source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
- if (!(child = copy_tree(source, source->mnt_root, type))) {
+ if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
ret = -ENOMEM;
list_splice(tree_list, tmp_list.prev);
goto out;
}
- if (is_subdir(dest_dentry, m->mnt_root)) {
+ if (is_subdir(dest_dentry, m->mnt.mnt_root)) {
mnt_set_mountpoint(m, dest_dentry, child);
list_add_tail(&child->mnt_hash, tree_list);
} else {
@@ -275,7 +259,7 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
out:
br_write_lock(vfsmount_lock);
while (!list_empty(&tmp_list)) {
- child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash);
+ child = list_first_entry(&tmp_list, struct mount, mnt_hash);
umount_tree(child, 0, &umount_list);
}
br_write_unlock(vfsmount_lock);
@@ -286,7 +270,7 @@ out:
/*
* return true if the refcount is greater than count
*/
-static inline int do_refcount_check(struct vfsmount *mnt, int count)
+static inline int do_refcount_check(struct mount *mnt, int count)
{
int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
return (mycount > count);
@@ -302,10 +286,10 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count)
*
* vfsmount lock must be held for write
*/
-int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
+int propagate_mount_busy(struct mount *mnt, int refcnt)
{
- struct vfsmount *m, *child;
- struct vfsmount *parent = mnt->mnt_parent;
+ struct mount *m, *child;
+ struct mount *parent = mnt->mnt_parent;
int ret = 0;
if (mnt == parent)
@@ -321,7 +305,7 @@ int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
- child = __lookup_mnt(m, mnt->mnt_mountpoint, 0);
+ child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
if (child && list_empty(&child->mnt_mounts) &&
(ret = do_refcount_check(child, 1)))
break;
@@ -333,17 +317,17 @@ int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
-static void __propagate_umount(struct vfsmount *mnt)
+static void __propagate_umount(struct mount *mnt)
{
- struct vfsmount *parent = mnt->mnt_parent;
- struct vfsmount *m;
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
BUG_ON(parent == mnt);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
- struct vfsmount *child = __lookup_mnt(m,
+ struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint, 0);
/*
* umount the child only if the child has no
@@ -363,7 +347,7 @@ static void __propagate_umount(struct vfsmount *mnt)
*/
int propagate_umount(struct list_head *list)
{
- struct vfsmount *mnt;
+ struct mount *mnt;
list_for_each_entry(mnt, list, mnt_hash)
__propagate_umount(mnt);
diff --git a/fs/pnode.h b/fs/pnode.h
index 1ea4ae1efcd..65c60979d54 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -9,13 +9,13 @@
#define _LINUX_PNODE_H
#include <linux/list.h>
-#include <linux/mount.h>
+#include "mount.h"
-#define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED)
-#define IS_MNT_SLAVE(mnt) (mnt->mnt_master)
-#define IS_MNT_NEW(mnt) (!mnt->mnt_ns)
-#define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED)
-#define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE)
+#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
+#define IS_MNT_SLAVE(m) ((m)->mnt_master)
+#define IS_MNT_NEW(m) (!(m)->mnt_ns)
+#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
+#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
@@ -23,17 +23,25 @@
#define CL_MAKE_SHARED 0x08
#define CL_PRIVATE 0x10
-static inline void set_mnt_shared(struct vfsmount *mnt)
+static inline void set_mnt_shared(struct mount *mnt)
{
- mnt->mnt_flags &= ~MNT_SHARED_MASK;
- mnt->mnt_flags |= MNT_SHARED;
+ mnt->mnt.mnt_flags &= ~MNT_SHARED_MASK;
+ mnt->mnt.mnt_flags |= MNT_SHARED;
}
-void change_mnt_propagation(struct vfsmount *, int);
-int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
+void change_mnt_propagation(struct mount *, int);
+int propagate_mnt(struct mount *, struct dentry *, struct mount *,
struct list_head *);
int propagate_umount(struct list_head *);
-int propagate_mount_busy(struct vfsmount *, int);
-void mnt_release_group_id(struct vfsmount *);
-int get_dominating_id(struct vfsmount *mnt, const struct path *root);
+int propagate_mount_busy(struct mount *, int);
+void mnt_release_group_id(struct mount *);
+int get_dominating_id(struct mount *mnt, const struct path *root);
+unsigned int mnt_get_count(struct mount *mnt);
+void mnt_set_mountpoint(struct mount *, struct dentry *,
+ struct mount *);
+void release_mounts(struct list_head *);
+void umount_tree(struct mount *, int, struct list_head *);
+struct mount *copy_tree(struct mount *, struct dentry *, int);
+bool is_path_reachable(struct mount *, struct dentry *,
+ const struct path *root);
#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 3a1dafd228d..8c344f037bd 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign);
sigemptyset(&sigcatch);
- cutime = cstime = utime = stime = cputime_zero;
- cgtime = gtime = cputime_zero;
+ cutime = cstime = utime = stime = 0;
+ cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) {
struct signal_struct *sig = task->signal;
@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
- gtime = cputime_add(gtime, t->gtime);
+ gtime += t->gtime;
t = next_thread(t);
} while (t != task);
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
thread_group_times(task, &utime, &stime);
- gtime = cputime_add(gtime, sig->gtime);
+ gtime += sig->gtime;
}
sid = task_session_nr_ns(task, ns);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 851ba3dcdc2..8173dfd89cb 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -83,9 +83,11 @@
#include <linux/pid_namespace.h>
#include <linux/fs_struct.h>
#include <linux/slab.h>
+#include <linux/flex_array.h>
#ifdef CONFIG_HARDWALL
#include <asm/hardwall.h>
#endif
+#include <trace/events/oom.h>
#include "internal.h"
/* NOTE:
@@ -101,7 +103,7 @@
struct pid_entry {
char *name;
int len;
- mode_t mode;
+ umode_t mode;
const struct inode_operations *iop;
const struct file_operations *fop;
union proc_op op;
@@ -133,6 +135,8 @@ struct pid_entry {
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+static int proc_fd_permission(struct inode *inode, int mask);
+
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
@@ -165,9 +169,9 @@ static int get_task_root(struct task_struct *task, struct path *root)
return result;
}
-static int proc_cwd_link(struct inode *inode, struct path *path)
+static int proc_cwd_link(struct dentry *dentry, struct path *path)
{
- struct task_struct *task = get_proc_task(inode);
+ struct task_struct *task = get_proc_task(dentry->d_inode);
int result = -ENOENT;
if (task) {
@@ -182,9 +186,9 @@ static int proc_cwd_link(struct inode *inode, struct path *path)
return result;
}
-static int proc_root_link(struct inode *inode, struct path *path)
+static int proc_root_link(struct dentry *dentry, struct path *path)
{
- struct task_struct *task = get_proc_task(inode);
+ struct task_struct *task = get_proc_task(dentry->d_inode);
int result = -ENOENT;
if (task) {
@@ -627,122 +631,52 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
-static const struct inode_operations proc_def_inode_operations = {
- .setattr = proc_setattr,
-};
-
-static int mounts_open_common(struct inode *inode, struct file *file,
- const struct seq_operations *op)
+/*
+ * May current process learn task's sched/cmdline info (for hide_pid_min=1)
+ * or euid/egid (for hide_pid_min=2)?
+ */
+static bool has_pid_permissions(struct pid_namespace *pid,
+ struct task_struct *task,
+ int hide_pid_min)
{
- struct task_struct *task = get_proc_task(inode);
- struct nsproxy *nsp;
- struct mnt_namespace *ns = NULL;
- struct path root;
- struct proc_mounts *p;
- int ret = -EINVAL;
-
- if (task) {
- rcu_read_lock();
- nsp = task_nsproxy(task);
- if (nsp) {
- ns = nsp->mnt_ns;
- if (ns)
- get_mnt_ns(ns);
- }
- rcu_read_unlock();
- if (ns && get_task_root(task, &root) == 0)
- ret = 0;
- put_task_struct(task);
- }
-
- if (!ns)
- goto err;
- if (ret)
- goto err_put_ns;
-
- ret = -ENOMEM;
- p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
- if (!p)
- goto err_put_path;
-
- file->private_data = &p->m;
- ret = seq_open(file, op);
- if (ret)
- goto err_free;
-
- p->m.private = p;
- p->ns = ns;
- p->root = root;
- p->m.poll_event = ns->event;
-
- return 0;
-
- err_free:
- kfree(p);
- err_put_path:
- path_put(&root);
- err_put_ns:
- put_mnt_ns(ns);
- err:
- return ret;
+ if (pid->hide_pid < hide_pid_min)
+ return true;
+ if (in_group_p(pid->pid_gid))
+ return true;
+ return ptrace_may_access(task, PTRACE_MODE_READ);
}
-static int mounts_release(struct inode *inode, struct file *file)
-{
- struct proc_mounts *p = file->private_data;
- path_put(&p->root);
- put_mnt_ns(p->ns);
- return seq_release(inode, file);
-}
-static unsigned mounts_poll(struct file *file, poll_table *wait)
+static int proc_pid_permission(struct inode *inode, int mask)
{
- struct proc_mounts *p = file->private_data;
- unsigned res = POLLIN | POLLRDNORM;
-
- poll_wait(file, &p->ns->poll, wait);
- if (mnt_had_events(p))
- res |= POLLERR | POLLPRI;
-
- return res;
-}
+ struct pid_namespace *pid = inode->i_sb->s_fs_info;
+ struct task_struct *task;
+ bool has_perms;
-static int mounts_open(struct inode *inode, struct file *file)
-{
- return mounts_open_common(inode, file, &mounts_op);
-}
+ task = get_proc_task(inode);
+ has_perms = has_pid_permissions(pid, task, 1);
+ put_task_struct(task);
-static const struct file_operations proc_mounts_operations = {
- .open = mounts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mounts_release,
- .poll = mounts_poll,
-};
+ if (!has_perms) {
+ if (pid->hide_pid == 2) {
+ /*
+ * Let's make getdents(), stat(), and open()
+ * consistent with each other. If a process
+ * may not stat() a file, it shouldn't be seen
+ * in procfs at all.
+ */
+ return -ENOENT;
+ }
-static int mountinfo_open(struct inode *inode, struct file *file)
-{
- return mounts_open_common(inode, file, &mountinfo_op);
+ return -EPERM;
+ }
+ return generic_permission(inode, mask);
}
-static const struct file_operations proc_mountinfo_operations = {
- .open = mountinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mounts_release,
- .poll = mounts_poll,
-};
-static int mountstats_open(struct inode *inode, struct file *file)
-{
- return mounts_open_common(inode, file, &mountstats_op);
-}
-static const struct file_operations proc_mountstats_operations = {
- .open = mountstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mounts_release,
+static const struct inode_operations proc_def_inode_operations = {
+ .setattr = proc_setattr,
};
#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
@@ -1124,6 +1058,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
else
task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
-OOM_DISABLE;
+ trace_oom_score_adj_update(task);
err_sighand:
unlock_task_sighand(task, &flags);
err_task_lock:
@@ -1211,6 +1146,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
task->signal->oom_score_adj = oom_score_adj;
if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
task->signal->oom_score_adj_min = oom_score_adj;
+ trace_oom_score_adj_update(task);
/*
* Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
* always attainable.
@@ -1567,13 +1503,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
.release = single_release,
};
-static int proc_exe_link(struct inode *inode, struct path *exe_path)
+static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
{
struct task_struct *task;
struct mm_struct *mm;
struct file *exe_file;
- task = get_proc_task(inode);
+ task = get_proc_task(dentry->d_inode);
if (!task)
return -ENOENT;
mm = get_task_mm(task);
@@ -1603,7 +1539,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
if (!proc_fd_access_allowed(inode))
goto out;
- error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
+ error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
out:
return ERR_PTR(error);
}
@@ -1642,7 +1578,7 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
if (!proc_fd_access_allowed(inode))
goto out;
- error = PROC_I(inode)->op.proc_get_link(inode, &path);
+ error = PROC_I(inode)->op.proc_get_link(dentry, &path);
if (error)
goto out;
@@ -1723,6 +1659,7 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
struct inode *inode = dentry->d_inode;
struct task_struct *task;
const struct cred *cred;
+ struct pid_namespace *pid = dentry->d_sb->s_fs_info;
generic_fillattr(inode, stat);
@@ -1731,6 +1668,14 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
stat->gid = 0;
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
+ if (!has_pid_permissions(pid, task, 2)) {
+ rcu_read_unlock();
+ /*
+ * This doesn't prevent learning whether PID exists,
+ * it only makes getattr() consistent with readdir().
+ */
+ return -ENOENT;
+ }
if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
task_dumpable(task)) {
cred = __task_cred(task);
@@ -1934,9 +1879,9 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
return -ENOENT;
}
-static int proc_fd_link(struct inode *inode, struct path *path)
+static int proc_fd_link(struct dentry *dentry, struct path *path)
{
- return proc_fd_info(inode, path, NULL);
+ return proc_fd_info(dentry->d_inode, path, NULL);
}
static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
@@ -2157,6 +2102,355 @@ static const struct file_operations proc_fd_operations = {
.llseek = default_llseek,
};
+#ifdef CONFIG_CHECKPOINT_RESTORE
+
+/*
+ * dname_to_vma_addr - maps a dentry name into two unsigned longs
+ * which represent vma start and end addresses.
+ */
+static int dname_to_vma_addr(struct dentry *dentry,
+ unsigned long *start, unsigned long *end)
+{
+ if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ unsigned long vm_start, vm_end;
+ bool exact_vma_exists = false;
+ struct mm_struct *mm = NULL;
+ struct task_struct *task;
+ const struct cred *cred;
+ struct inode *inode;
+ int status = 0;
+
+ if (nd && nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ status = -EACCES;
+ goto out_notask;
+ }
+
+ inode = dentry->d_inode;
+ task = get_proc_task(inode);
+ if (!task)
+ goto out_notask;
+
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto out;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
+ down_read(&mm->mmap_sem);
+ exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
+ up_read(&mm->mmap_sem);
+ }
+
+ mmput(mm);
+
+ if (exact_vma_exists) {
+ if (task_dumpable(task)) {
+ rcu_read_lock();
+ cred = __task_cred(task);
+ inode->i_uid = cred->euid;
+ inode->i_gid = cred->egid;
+ rcu_read_unlock();
+ } else {
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ }
+ security_task_to_inode(task, inode);
+ status = 1;
+ }
+
+out:
+ put_task_struct(task);
+
+out_notask:
+ if (status <= 0)
+ d_drop(dentry);
+
+ return status;
+}
+
+static const struct dentry_operations tid_map_files_dentry_operations = {
+ .d_revalidate = map_files_d_revalidate,
+ .d_delete = pid_delete_dentry,
+};
+
+static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
+{
+ unsigned long vm_start, vm_end;
+ struct vm_area_struct *vma;
+ struct task_struct *task;
+ struct mm_struct *mm;
+ int rc;
+
+ rc = -ENOENT;
+ task = get_proc_task(dentry->d_inode);
+ if (!task)
+ goto out;
+
+ mm = get_task_mm(task);
+ put_task_struct(task);
+ if (!mm)
+ goto out;
+
+ rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
+ if (rc)
+ goto out_mmput;
+
+ down_read(&mm->mmap_sem);
+ vma = find_exact_vma(mm, vm_start, vm_end);
+ if (vma && vma->vm_file) {
+ *path = vma->vm_file->f_path;
+ path_get(path);
+ rc = 0;
+ }
+ up_read(&mm->mmap_sem);
+
+out_mmput:
+ mmput(mm);
+out:
+ return rc;
+}
+
+struct map_files_info {
+ struct file *file;
+ unsigned long len;
+ unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
+};
+
+static struct dentry *
+proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
+ struct task_struct *task, const void *ptr)
+{
+ const struct file *file = ptr;
+ struct proc_inode *ei;
+ struct inode *inode;
+
+ if (!file)
+ return ERR_PTR(-ENOENT);
+
+ inode = proc_pid_make_inode(dir->i_sb, task);
+ if (!inode)
+ return ERR_PTR(-ENOENT);
+
+ ei = PROC_I(inode);
+ ei->op.proc_get_link = proc_map_files_get_link;
+
+ inode->i_op = &proc_pid_link_inode_operations;
+ inode->i_size = 64;
+ inode->i_mode = S_IFLNK;
+
+ if (file->f_mode & FMODE_READ)
+ inode->i_mode |= S_IRUSR;
+ if (file->f_mode & FMODE_WRITE)
+ inode->i_mode |= S_IWUSR;
+
+ d_set_d_op(dentry, &tid_map_files_dentry_operations);
+ d_add(dentry, inode);
+
+ return NULL;
+}
+
+static struct dentry *proc_map_files_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ unsigned long vm_start, vm_end;
+ struct vm_area_struct *vma;
+ struct task_struct *task;
+ struct dentry *result;
+ struct mm_struct *mm;
+
+ result = ERR_PTR(-EACCES);
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+
+ result = ERR_PTR(-ENOENT);
+ task = get_proc_task(dir);
+ if (!task)
+ goto out;
+
+ result = ERR_PTR(-EACCES);
+ if (lock_trace(task))
+ goto out_put_task;
+
+ result = ERR_PTR(-ENOENT);
+ if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
+ goto out_unlock;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_unlock;
+
+ down_read(&mm->mmap_sem);
+ vma = find_exact_vma(mm, vm_start, vm_end);
+ if (!vma)
+ goto out_no_vma;
+
+ result = proc_map_files_instantiate(dir, dentry, task, vma->vm_file);
+
+out_no_vma:
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out_unlock:
+ unlock_trace(task);
+out_put_task:
+ put_task_struct(task);
+out:
+ return result;
+}
+
+static const struct inode_operations proc_map_files_inode_operations = {
+ .lookup = proc_map_files_lookup,
+ .permission = proc_fd_permission,
+ .setattr = proc_setattr,
+};
+
+static int
+proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct vm_area_struct *vma;
+ struct task_struct *task;
+ struct mm_struct *mm;
+ ino_t ino;
+ int ret;
+
+ ret = -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+
+ ret = -ENOENT;
+ task = get_proc_task(inode);
+ if (!task)
+ goto out;
+
+ ret = -EACCES;
+ if (lock_trace(task))
+ goto out_put_task;
+
+ ret = 0;
+ switch (filp->f_pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
+ goto out_unlock;
+ filp->f_pos++;
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+ goto out_unlock;
+ filp->f_pos++;
+ default:
+ {
+ unsigned long nr_files, pos, i;
+ struct flex_array *fa = NULL;
+ struct map_files_info info;
+ struct map_files_info *p;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_unlock;
+ down_read(&mm->mmap_sem);
+
+ nr_files = 0;
+
+ /*
+ * We need two passes here:
+ *
+ * 1) Collect vmas of mapped files with mmap_sem taken
+ * 2) Release mmap_sem and instantiate entries
+ *
+ * otherwise we get lockdep complained, since filldir()
+ * routine might require mmap_sem taken in might_fault().
+ */
+
+ for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
+ if (vma->vm_file && ++pos > filp->f_pos)
+ nr_files++;
+ }
+
+ if (nr_files) {
+ fa = flex_array_alloc(sizeof(info), nr_files,
+ GFP_KERNEL);
+ if (!fa || flex_array_prealloc(fa, 0, nr_files,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ if (fa)
+ flex_array_free(fa);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ goto out_unlock;
+ }
+ for (i = 0, vma = mm->mmap, pos = 2; vma;
+ vma = vma->vm_next) {
+ if (!vma->vm_file)
+ continue;
+ if (++pos <= filp->f_pos)
+ continue;
+
+ get_file(vma->vm_file);
+ info.file = vma->vm_file;
+ info.len = snprintf(info.name,
+ sizeof(info.name), "%lx-%lx",
+ vma->vm_start, vma->vm_end);
+ if (flex_array_put(fa, i++, &info, GFP_KERNEL))
+ BUG();
+ }
+ }
+ up_read(&mm->mmap_sem);
+
+ for (i = 0; i < nr_files; i++) {
+ p = flex_array_get(fa, i);
+ ret = proc_fill_cache(filp, dirent, filldir,
+ p->name, p->len,
+ proc_map_files_instantiate,
+ task, p->file);
+ if (ret)
+ break;
+ filp->f_pos++;
+ fput(p->file);
+ }
+ for (; i < nr_files; i++) {
+ /*
+ * In case of error don't forget
+ * to put rest of file refs.
+ */
+ p = flex_array_get(fa, i);
+ fput(p->file);
+ }
+ if (fa)
+ flex_array_free(fa);
+ mmput(mm);
+ }
+ }
+
+out_unlock:
+ unlock_trace(task);
+out_put_task:
+ put_task_struct(task);
+out:
+ return ret;
+}
+
+static const struct file_operations proc_map_files_operations = {
+ .read = generic_read_dir,
+ .readdir = proc_map_files_readdir,
+ .llseek = default_llseek,
+};
+
+#endif /* CONFIG_CHECKPOINT_RESTORE */
+
/*
* /proc/pid/fd needs a special permission handler so that a process can still
* access /proc/self/fd after it has executed a setuid().
@@ -2772,6 +3066,9 @@ static const struct inode_operations proc_task_inode_operations;
static const struct pid_entry tgid_base_stuff[] = {
DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
+#endif
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
#ifdef CONFIG_NET
@@ -2875,6 +3172,7 @@ static const struct inode_operations proc_tgid_base_inode_operations = {
.lookup = proc_tgid_base_lookup,
.getattr = pid_getattr,
.setattr = proc_setattr,
+ .permission = proc_pid_permission,
};
static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
@@ -3078,6 +3376,12 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
proc_pid_instantiate, iter.task, NULL);
}
+static int fake_filldir(void *buf, const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned d_type)
+{
+ return 0;
+}
+
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
@@ -3085,6 +3389,7 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
+ filldir_t __filldir;
if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
goto out_no_task;
@@ -3106,8 +3411,13 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
for (iter = next_tgid(ns, iter);
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
+ if (has_pid_permissions(ns, iter.task, 2))
+ __filldir = filldir;
+ else
+ __filldir = fake_filldir;
+
filp->f_pos = iter.tgid + TGID_OFFSET;
- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
put_task_struct(iter.task);
goto out;
}
@@ -3442,6 +3752,7 @@ static const struct inode_operations proc_task_inode_operations = {
.lookup = proc_task_lookup,
.getattr = proc_task_getattr,
.setattr = proc_setattr,
+ .permission = proc_pid_permission,
};
static const struct file_operations proc_task_operations = {
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 10090d9c7ad..2edf34f2eb6 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -597,7 +597,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
const char *name,
- mode_t mode,
+ umode_t mode,
nlink_t nlink)
{
struct proc_dir_entry *ent = NULL;
@@ -659,7 +659,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
}
EXPORT_SYMBOL(proc_symlink);
-struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
+struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent;
@@ -699,7 +699,7 @@ struct proc_dir_entry *proc_mkdir(const char *name,
}
EXPORT_SYMBOL(proc_mkdir);
-struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
struct proc_dir_entry *parent)
{
struct proc_dir_entry *ent;
@@ -728,7 +728,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
}
EXPORT_SYMBOL(create_proc_entry);
-struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
const struct file_operations *proc_fops,
void *data)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 7737c5468a4..84fd3235a59 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
+#include <linux/pid_namespace.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
@@ -17,7 +18,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/mount.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -77,7 +80,6 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
static void proc_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(proc_inode_cachep, PROC_I(inode));
}
@@ -102,12 +104,27 @@ void __init proc_init_inodecache(void)
init_once);
}
+static int proc_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct pid_namespace *pid = sb->s_fs_info;
+
+ if (pid->pid_gid)
+ seq_printf(seq, ",gid=%lu", (unsigned long)pid->pid_gid);
+ if (pid->hide_pid != 0)
+ seq_printf(seq, ",hidepid=%u", pid->hide_pid);
+
+ return 0;
+}
+
static const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
.drop_inode = generic_delete_inode,
.evict_inode = proc_evict_inode,
.statfs = simple_statfs,
+ .remount_fs = proc_remount,
+ .show_options = proc_show_options,
};
static void __pde_users_dec(struct proc_dir_entry *pde)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 7838e5cfec1..292577531ad 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -117,6 +117,7 @@ void pde_put(struct proc_dir_entry *pde);
int proc_fill_super(struct super_block *);
struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
+int proc_remount(struct super_block *sb, int *flags, char *data);
/*
* These are generic /proc routines that use the internal
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 586174168e2..80e4645f799 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)),
- K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(global_page_state(NR_ANON_PAGES)
+ global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ K(global_page_state(NR_ANON_PAGES)),
#endif
- ),
K(global_page_state(NR_FILE_MAPPED)),
K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index be177f702ac..27da860115c 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -9,7 +9,6 @@
#include <linux/file.h>
#include <linux/utsname.h>
#include <net/net_namespace.h>
-#include <linux/mnt_namespace.h>
#include <linux/ipc_namespace.h>
#include <linux/pid_namespace.h>
#include "internal.h"
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index f738024ccc8..06e1cc17caf 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -179,7 +179,7 @@ const struct file_operations proc_net_operations = {
struct proc_dir_entry *proc_net_fops_create(struct net *net,
- const char *name, mode_t mode, const struct file_operations *fops)
+ const char *name, umode_t mode, const struct file_operations *fops)
{
return proc_create(name, mode, net->proc_net, fops);
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9a8a2b77b87..46a15d8a29c 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -18,6 +18,7 @@
#include <linux/bitops.h>
#include <linux/mount.h>
#include <linux/pid_namespace.h>
+#include <linux/parser.h>
#include "internal.h"
@@ -36,6 +37,63 @@ static int proc_set_super(struct super_block *sb, void *data)
return err;
}
+enum {
+ Opt_gid, Opt_hidepid, Opt_err,
+};
+
+static const match_table_t tokens = {
+ {Opt_hidepid, "hidepid=%u"},
+ {Opt_gid, "gid=%u"},
+ {Opt_err, NULL},
+};
+
+static int proc_parse_options(char *options, struct pid_namespace *pid)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+
+ if (!options)
+ return 1;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+ if (!*p)
+ continue;
+
+ args[0].to = args[0].from = 0;
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ pid->pid_gid = option;
+ break;
+ case Opt_hidepid:
+ if (match_int(&args[0], &option))
+ return 0;
+ if (option < 0 || option > 2) {
+ pr_err("proc: hidepid value must be between 0 and 2.\n");
+ return 0;
+ }
+ pid->hide_pid = option;
+ break;
+ default:
+ pr_err("proc: unrecognized mount option \"%s\" "
+ "or missing value\n", p);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+int proc_remount(struct super_block *sb, int *flags, char *data)
+{
+ struct pid_namespace *pid = sb->s_fs_info;
+ return !proc_parse_options(data, pid);
+}
+
static struct dentry *proc_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
@@ -43,11 +101,15 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
struct super_block *sb;
struct pid_namespace *ns;
struct proc_inode *ei;
+ char *options;
- if (flags & MS_KERNMOUNT)
+ if (flags & MS_KERNMOUNT) {
ns = (struct pid_namespace *)data;
- else
+ options = NULL;
+ } else {
ns = current->nsproxy->pid_ns;
+ options = data;
+ }
sb = sget(fs_type, proc_test_super, proc_set_super, ns);
if (IS_ERR(sb))
@@ -55,6 +117,10 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
if (!sb->s_root) {
sb->s_flags = flags;
+ if (!proc_parse_options(options, ns)) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(-EINVAL);
+ }
err = proc_fill_super(sb);
if (err) {
deactivate_locked_super(sb);
@@ -91,20 +157,18 @@ static struct file_system_type proc_fs_type = {
void __init proc_root_init(void)
{
- struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
- mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
- if (IS_ERR(mnt)) {
+ err = pid_ns_prepare_proc(&init_pid_ns);
+ if (err) {
unregister_filesystem(&proc_fs_type);
return;
}
- init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
@@ -209,5 +273,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
void pid_ns_release_proc(struct pid_namespace *ns)
{
- mntput(ns->proc_mnt);
+ kern_unmount(ns->proc_mnt);
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 42b274da92c..d76ca6ae2b1 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -22,31 +22,29 @@
#define arch_idle_time(cpu) 0
#endif
-static cputime64_t get_idle_time(int cpu)
+static u64 get_idle_time(int cpu)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
- cputime64_t idle;
+ u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
if (idle_time == -1ULL) {
/* !NO_HZ so we can rely on cpustat.idle */
- idle = kstat_cpu(cpu).cpustat.idle;
- idle = cputime64_add(idle, arch_idle_time(cpu));
+ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ idle += arch_idle_time(cpu);
} else
- idle = usecs_to_cputime(idle_time);
+ idle = usecs_to_cputime64(idle_time);
return idle;
}
-static cputime64_t get_iowait_time(int cpu)
+static u64 get_iowait_time(int cpu)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
- cputime64_t iowait;
+ u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_time == -1ULL)
/* !NO_HZ so we can rely on cpustat.iowait */
- iowait = kstat_cpu(cpu).cpustat.iowait;
+ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
- iowait = usecs_to_cputime(iowait_time);
+ iowait = usecs_to_cputime64(iowait_time);
return iowait;
}
@@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v)
{
int i, j;
unsigned long jif;
- cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
- cputime64_t guest, guest_nice;
+ u64 user, nice, system, idle, iowait, irq, softirq, steal;
+ u64 guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
struct timespec boottime;
user = nice = system = idle = iowait =
- irq = softirq = steal = cputime64_zero;
- guest = guest_nice = cputime64_zero;
+ irq = softirq = steal = 0;
+ guest = guest_nice = 0;
getboottime(&boottime);
jif = boottime.tv_sec;
for_each_possible_cpu(i) {
- user = cputime64_add(user, kstat_cpu(i).cpustat.user);
- nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
- system = cputime64_add(system, kstat_cpu(i).cpustat.system);
- idle = cputime64_add(idle, get_idle_time(i));
- iowait = cputime64_add(iowait, get_iowait_time(i));
- irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
- softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
- steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
- guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
- guest_nice = cputime64_add(guest_nice,
- kstat_cpu(i).cpustat.guest_nice);
- sum += kstat_cpu_irqs_sum(i);
- sum += arch_irq_stat_cpu(i);
+ user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
+ nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+ system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+ idle += get_idle_time(i);
+ iowait += get_iowait_time(i);
+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
@@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v)
(unsigned long long)cputime64_to_clock_t(guest_nice));
for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- user = kstat_cpu(i).cpustat.user;
- nice = kstat_cpu(i).cpustat.nice;
- system = kstat_cpu(i).cpustat.system;
+ user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
+ nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+ system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = get_idle_time(i);
iowait = get_iowait_time(i);
- irq = kstat_cpu(i).cpustat.irq;
- softirq = kstat_cpu(i).cpustat.softirq;
- steal = kstat_cpu(i).cpustat.steal;
- guest = kstat_cpu(i).cpustat.guest;
- guest_nice = kstat_cpu(i).cpustat.guest_nice;
+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
seq_printf(p,
"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
"%llu\n",
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 766b1d45605..9610ac772d7 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
+ u64 idletime;
+ u64 nsec;
+ u32 rem;
int i;
- cputime_t idletime = cputime_zero;
+ idletime = 0;
for_each_possible_cpu(i)
- idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+ idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
- cputime_to_timespec(idletime, &idle);
+ nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
+ idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+ idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
(unsigned long) uptime.tv_sec,
(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
new file mode 100644
index 00000000000..12412852d88
--- /dev/null
+++ b/fs/proc_namespace.c
@@ -0,0 +1,333 @@
+/*
+ * fs/proc_namespace.c - handling of /proc/<pid>/{mounts,mountinfo,mountstats}
+ *
+ * In fact, that's a piece of procfs; it's *almost* isolated from
+ * the rest of fs/proc, but has rather close relationships with
+ * fs/namespace.c, thus here instead of fs/proc
+ *
+ */
+#include <linux/mnt_namespace.h>
+#include <linux/nsproxy.h>
+#include <linux/security.h>
+#include <linux/fs_struct.h>
+#include "proc/internal.h" /* only for get_proc_task() in ->open() */
+
+#include "pnode.h"
+#include "internal.h"
+
+static unsigned mounts_poll(struct file *file, poll_table *wait)
+{
+ struct proc_mounts *p = file->private_data;
+ struct mnt_namespace *ns = p->ns;
+ unsigned res = POLLIN | POLLRDNORM;
+
+ poll_wait(file, &p->ns->poll, wait);
+
+ br_read_lock(vfsmount_lock);
+ if (p->m.poll_event != ns->event) {
+ p->m.poll_event = ns->event;
+ res |= POLLERR | POLLPRI;
+ }
+ br_read_unlock(vfsmount_lock);
+
+ return res;
+}
+
+struct proc_fs_info {
+ int flag;
+ const char *str;
+};
+
+static int show_sb_opts(struct seq_file *m, struct super_block *sb)
+{
+ static const struct proc_fs_info fs_info[] = {
+ { MS_SYNCHRONOUS, ",sync" },
+ { MS_DIRSYNC, ",dirsync" },
+ { MS_MANDLOCK, ",mand" },
+ { 0, NULL }
+ };
+ const struct proc_fs_info *fs_infop;
+
+ for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ if (sb->s_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+
+ return security_sb_show_options(m, sb);
+}
+
+static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
+{
+ static const struct proc_fs_info mnt_info[] = {
+ { MNT_NOSUID, ",nosuid" },
+ { MNT_NODEV, ",nodev" },
+ { MNT_NOEXEC, ",noexec" },
+ { MNT_NOATIME, ",noatime" },
+ { MNT_NODIRATIME, ",nodiratime" },
+ { MNT_RELATIME, ",relatime" },
+ { 0, NULL }
+ };
+ const struct proc_fs_info *fs_infop;
+
+ for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ if (mnt->mnt_flags & fs_infop->flag)
+ seq_puts(m, fs_infop->str);
+ }
+}
+
+static inline void mangle(struct seq_file *m, const char *s)
+{
+ seq_escape(m, s, " \t\n\\");
+}
+
+static void show_type(struct seq_file *m, struct super_block *sb)
+{
+ mangle(m, sb->s_type->name);
+ if (sb->s_subtype && sb->s_subtype[0]) {
+ seq_putc(m, '.');
+ mangle(m, sb->s_subtype);
+ }
+}
+
+static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
+{
+ struct mount *r = real_mount(mnt);
+ int err = 0;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct super_block *sb = mnt_path.dentry->d_sb;
+
+ if (sb->s_op->show_devname) {
+ err = sb->s_op->show_devname(m, mnt_path.dentry);
+ if (err)
+ goto out;
+ } else {
+ mangle(m, r->mnt_devname ? r->mnt_devname : "none");
+ }
+ seq_putc(m, ' ');
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
+ show_type(m, sb);
+ seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
+ err = show_sb_opts(m, sb);
+ if (err)
+ goto out;
+ show_mnt_opts(m, mnt);
+ if (sb->s_op->show_options)
+ err = sb->s_op->show_options(m, mnt_path.dentry);
+ seq_puts(m, " 0 0\n");
+out:
+ return err;
+}
+
+static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
+{
+ struct proc_mounts *p = m->private;
+ struct mount *r = real_mount(mnt);
+ struct super_block *sb = mnt->mnt_sb;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct path root = p->root;
+ int err = 0;
+
+ seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
+ MAJOR(sb->s_dev), MINOR(sb->s_dev));
+ if (sb->s_op->show_path)
+ err = sb->s_op->show_path(m, mnt->mnt_root);
+ else
+ seq_dentry(m, mnt->mnt_root, " \t\n\\");
+ if (err)
+ goto out;
+ seq_putc(m, ' ');
+
+ /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+ err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+ if (err)
+ goto out;
+
+ seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
+ show_mnt_opts(m, mnt);
+
+ /* Tagged fields ("foo:X" or "bar") */
+ if (IS_MNT_SHARED(r))
+ seq_printf(m, " shared:%i", r->mnt_group_id);
+ if (IS_MNT_SLAVE(r)) {
+ int master = r->mnt_master->mnt_group_id;
+ int dom = get_dominating_id(r, &p->root);
+ seq_printf(m, " master:%i", master);
+ if (dom && dom != master)
+ seq_printf(m, " propagate_from:%i", dom);
+ }
+ if (IS_MNT_UNBINDABLE(r))
+ seq_puts(m, " unbindable");
+
+ /* Filesystem specific data */
+ seq_puts(m, " - ");
+ show_type(m, sb);
+ seq_putc(m, ' ');
+ if (sb->s_op->show_devname)
+ err = sb->s_op->show_devname(m, mnt->mnt_root);
+ else
+ mangle(m, r->mnt_devname ? r->mnt_devname : "none");
+ if (err)
+ goto out;
+ seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
+ err = show_sb_opts(m, sb);
+ if (err)
+ goto out;
+ if (sb->s_op->show_options)
+ err = sb->s_op->show_options(m, mnt->mnt_root);
+ seq_putc(m, '\n');
+out:
+ return err;
+}
+
+static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
+{
+ struct mount *r = real_mount(mnt);
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct super_block *sb = mnt_path.dentry->d_sb;
+ int err = 0;
+
+ /* device */
+ if (sb->s_op->show_devname) {
+ seq_puts(m, "device ");
+ err = sb->s_op->show_devname(m, mnt_path.dentry);
+ } else {
+ if (r->mnt_devname) {
+ seq_puts(m, "device ");
+ mangle(m, r->mnt_devname);
+ } else
+ seq_puts(m, "no device");
+ }
+
+ /* mount point */
+ seq_puts(m, " mounted on ");
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
+
+ /* file system type */
+ seq_puts(m, "with fstype ");
+ show_type(m, sb);
+
+ /* optional statistics */
+ if (sb->s_op->show_stats) {
+ seq_putc(m, ' ');
+ if (!err)
+ err = sb->s_op->show_stats(m, mnt_path.dentry);
+ }
+
+ seq_putc(m, '\n');
+ return err;
+}
+
+static int mounts_open_common(struct inode *inode, struct file *file,
+ int (*show)(struct seq_file *, struct vfsmount *))
+{
+ struct task_struct *task = get_proc_task(inode);
+ struct nsproxy *nsp;
+ struct mnt_namespace *ns = NULL;
+ struct path root;
+ struct proc_mounts *p;
+ int ret = -EINVAL;
+
+ if (!task)
+ goto err;
+
+ rcu_read_lock();
+ nsp = task_nsproxy(task);
+ if (!nsp) {
+ rcu_read_unlock();
+ put_task_struct(task);
+ goto err;
+ }
+ ns = nsp->mnt_ns;
+ if (!ns) {
+ rcu_read_unlock();
+ put_task_struct(task);
+ goto err;
+ }
+ get_mnt_ns(ns);
+ rcu_read_unlock();
+ task_lock(task);
+ if (!task->fs) {
+ task_unlock(task);
+ put_task_struct(task);
+ ret = -ENOENT;
+ goto err_put_ns;
+ }
+ get_fs_root(task->fs, &root);
+ task_unlock(task);
+ put_task_struct(task);
+
+ ret = -ENOMEM;
+ p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+ if (!p)
+ goto err_put_path;
+
+ file->private_data = &p->m;
+ ret = seq_open(file, &mounts_op);
+ if (ret)
+ goto err_free;
+
+ p->m.private = p;
+ p->ns = ns;
+ p->root = root;
+ p->m.poll_event = ns->event;
+ p->show = show;
+
+ return 0;
+
+ err_free:
+ kfree(p);
+ err_put_path:
+ path_put(&root);
+ err_put_ns:
+ put_mnt_ns(ns);
+ err:
+ return ret;
+}
+
+static int mounts_release(struct inode *inode, struct file *file)
+{
+ struct proc_mounts *p = file->private_data;
+ path_put(&p->root);
+ put_mnt_ns(p->ns);
+ return seq_release(inode, file);
+}
+
+static int mounts_open(struct inode *inode, struct file *file)
+{
+ return mounts_open_common(inode, file, show_vfsmnt);
+}
+
+static int mountinfo_open(struct inode *inode, struct file *file)
+{
+ return mounts_open_common(inode, file, show_mountinfo);
+}
+
+static int mountstats_open(struct inode *inode, struct file *file)
+{
+ return mounts_open_common(inode, file, show_vfsstat);
+}
+
+const struct file_operations proc_mounts_operations = {
+ .open = mounts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mounts_release,
+ .poll = mounts_poll,
+};
+
+const struct file_operations proc_mountinfo_operations = {
+ .open = mountinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mounts_release,
+ .poll = mounts_poll,
+};
+
+const struct file_operations proc_mountstats_operations = {
+ .open = mountstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = mounts_release,
+};
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 379a02dc121..b3b426edb2f 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -80,7 +80,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = dentry->d_inode->i_private;
- p->psi->erase(p->type, p->id, p->psi);
+ if (p->psi->erase)
+ p->psi->erase(p->type, p->id, p->psi);
return simple_unlink(dir, dentry);
}
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 2bd620f0d79..9ec22d3b429 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -122,7 +122,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
memcpy(dst, s1 + s1_start, l1_cpy);
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
- ret = psinfo->write(PSTORE_TYPE_DMESG, &id, part,
+ ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
hsize + l1_cpy + l2_cpy, psinfo);
if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_new_entry = 1;
@@ -167,6 +167,7 @@ int pstore_register(struct pstore_info *psi)
}
psinfo = psi;
+ mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
@@ -195,30 +196,32 @@ EXPORT_SYMBOL_GPL(pstore_register);
void pstore_get_records(int quiet)
{
struct pstore_info *psi = psinfo;
+ char *buf = NULL;
ssize_t size;
u64 id;
enum pstore_type_id type;
struct timespec time;
int failed = 0, rc;
- unsigned long flags;
if (!psi)
return;
- spin_lock_irqsave(&psinfo->buf_lock, flags);
- rc = psi->open(psi);
- if (rc)
+ mutex_lock(&psi->read_mutex);
+ if (psi->open && psi->open(psi))
goto out;
- while ((size = psi->read(&id, &type, &time, psi)) > 0) {
- rc = pstore_mkfile(type, psi->name, id, psi->buf, (size_t)size,
+ while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
+ rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
time, psi);
+ kfree(buf);
+ buf = NULL;
if (rc && (rc != -EEXIST || !quiet))
failed++;
}
- psi->close(psi);
+ if (psi->close)
+ psi->close(psi);
out:
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ mutex_unlock(&psi->read_mutex);
if (failed)
printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
@@ -240,33 +243,5 @@ static void pstore_timefunc(unsigned long dummy)
mod_timer(&pstore_timer, jiffies + PSTORE_INTERVAL);
}
-/*
- * Call platform driver to write a record to the
- * persistent store.
- */
-int pstore_write(enum pstore_type_id type, char *buf, size_t size)
-{
- u64 id;
- int ret;
- unsigned long flags;
-
- if (!psinfo)
- return -ENODEV;
-
- if (size > psinfo->bufsize)
- return -EFBIG;
-
- spin_lock_irqsave(&psinfo->buf_lock, flags);
- memcpy(psinfo->buf, buf, size);
- ret = psinfo->write(type, &id, 0, size, psinfo);
- if (ret == 0 && pstore_is_mounted())
- pstore_mkfile(PSTORE_TYPE_DMESG, psinfo->name, id, psinfo->buf,
- size, CURRENT_TIME, psinfo);
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pstore_write);
-
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use");
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 3bdd2141843..2bfd987f485 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -199,12 +199,13 @@ static const char *qnx4_checkroot(struct super_block *sb)
if (!strcmp(rootdir->di_fname,
QNX4_BMNAME)) {
found = 1;
- qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
+ qnx4_sb(sb)->BitMap = kmemdup(rootdir,
+ sizeof(struct qnx4_inode_entry),
+ GFP_KERNEL);
if (!qnx4_sb(sb)->BitMap) {
brelse (bh);
return "not enough memory for bitmap inode";
- }
- memcpy( qnx4_sb(sb)->BitMap, rootdir, sizeof( struct qnx4_inode_entry ) ); /* keep bitmap inode known */
+ }/* keep bitmap inode known */
break;
}
}
@@ -427,7 +428,6 @@ static struct inode *qnx4_alloc_inode(struct super_block *sb)
static void qnx4_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5b572c89e6c..5ec59b20cf7 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -73,7 +73,6 @@
#include <linux/security.h>
#include <linux/kmod.h>
#include <linux/namei.h>
-#include <linux/buffer_head.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include "../internal.h" /* ugh */
@@ -2199,7 +2198,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
if (error)
return error;
/* Quota file not on the same filesystem? */
- if (path->mnt->mnt_sb != sb)
+ if (path->dentry->d_sb != sb)
error = -EXDEV;
else
error = vfs_load_quota_inode(path->dentry->d_inode, type,
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 35f4b0ecdeb..7898cd688a0 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
-#include <linux/buffer_head.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 462ceb38fec..aec766abe3a 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -52,7 +52,7 @@ static struct backing_dev_info ramfs_backing_dev_info = {
};
struct inode *ramfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev)
+ const struct inode *dir, umode_t mode, dev_t dev)
{
struct inode * inode = new_inode(sb);
@@ -92,7 +92,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
*/
/* SMP-safe */
static int
-ramfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev);
int error = -ENOSPC;
@@ -106,7 +106,7 @@ ramfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return error;
}
-static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0);
if (!retval)
@@ -114,7 +114,7 @@ static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
return retval;
}
-static int ramfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
+static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index d1aca1df4f9..70de42f09f1 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -13,6 +13,7 @@
#include <linux/reiserfs_fs_sb.h>
#include <linux/reiserfs_fs_i.h>
#include <linux/quotaops.h>
+#include <linux/seq_file.h>
#define PREALLOCATION_SIZE 9
@@ -634,6 +635,96 @@ int reiserfs_parse_alloc_options(struct super_block *s, char *options)
return 0;
}
+static void print_sep(struct seq_file *seq, int *first)
+{
+ if (!*first)
+ seq_puts(seq, ":");
+ else
+ *first = 0;
+}
+
+void show_alloc_options(struct seq_file *seq, struct super_block *s)
+{
+ int first = 1;
+
+ if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
+ (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
+ return;
+
+ seq_puts(seq, ",alloc=");
+
+ if (TEST_OPTION(concentrating_formatted_nodes, s)) {
+ print_sep(seq, &first);
+ if (REISERFS_SB(s)->s_alloc_options.border != 10) {
+ seq_printf(seq, "concentrating_formatted_nodes=%d",
+ 100 / REISERFS_SB(s)->s_alloc_options.border);
+ } else
+ seq_puts(seq, "concentrating_formatted_nodes");
+ }
+ if (TEST_OPTION(displacing_large_files, s)) {
+ print_sep(seq, &first);
+ if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
+ seq_printf(seq, "displacing_large_files=%lu",
+ REISERFS_SB(s)->s_alloc_options.large_file_size);
+ } else
+ seq_puts(seq, "displacing_large_files");
+ }
+ if (TEST_OPTION(displacing_new_packing_localities, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "displacing_new_packing_localities");
+ }
+ if (TEST_OPTION(old_hashed_relocation, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "old_hashed_relocation");
+ }
+ if (TEST_OPTION(new_hashed_relocation, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "new_hashed_relocation");
+ }
+ if (TEST_OPTION(dirid_groups, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "dirid_groups");
+ }
+ if (TEST_OPTION(oid_groups, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "oid_groups");
+ }
+ if (TEST_OPTION(packing_groups, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "packing_groups");
+ }
+ if (TEST_OPTION(hashed_formatted_nodes, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "hashed_formatted_nodes");
+ }
+ if (TEST_OPTION(skip_busy, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "skip_busy");
+ }
+ if (TEST_OPTION(hundredth_slices, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "hundredth_slices");
+ }
+ if (TEST_OPTION(old_way, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "old_way");
+ }
+ if (TEST_OPTION(displace_based_on_dirid, s)) {
+ print_sep(seq, &first);
+ seq_puts(seq, "displace_based_on_dirid");
+ }
+ if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
+ print_sep(seq, &first);
+ seq_printf(seq, "preallocmin=%d",
+ REISERFS_SB(s)->s_alloc_options.preallocmin);
+ }
+ if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
+ print_sep(seq, &first);
+ seq_printf(seq, "preallocsize=%d",
+ REISERFS_SB(s)->s_alloc_options.preallocsize);
+ }
+}
+
static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
{
char *hash_in;
@@ -1273,10 +1364,7 @@ int reiserfs_init_bitmap_cache(struct super_block *sb)
struct reiserfs_bitmap_info *bitmap;
unsigned int bmap_nr = reiserfs_bmap_count(sb);
- /* Avoid lock recursion in fault case */
- reiserfs_write_unlock(sb);
bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
- reiserfs_write_lock(sb);
if (bitmap == NULL)
return -ENOMEM;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 950f13af095..9e8cd5acd79 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1766,7 +1766,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
for the fresh inode. This can only be done outside a transaction, so
if we return non-zero, we also end the transaction. */
int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
- struct inode *dir, int mode, const char *symname,
+ struct inode *dir, umode_t mode, const char *symname,
/* 0 for regular, EMTRY_DIR_SIZE for dirs,
strlen (symname) for symlinks) */
loff_t i_size, struct dentry *dentry,
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 4e153051bc7..950e3d1b5c9 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -55,7 +55,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
break;
@@ -96,7 +96,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
setflags_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
break;
}
case REISERFS_IOC_GETVERSION:
@@ -107,7 +107,7 @@ setflags_out:
err = -EPERM;
break;
}
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
break;
if (get_user(inode->i_generation, (int __user *)arg)) {
@@ -117,7 +117,7 @@ setflags_out:
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
setversion_out:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
break;
default:
err = -ENOTTY;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index eb711060a6f..c3cf54fd4de 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2678,16 +2678,10 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
char b[BDEVNAME_SIZE];
int ret;
- /*
- * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS
- * dependency inversion warnings.
- */
- reiserfs_write_unlock(sb);
journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal));
if (!journal) {
reiserfs_warning(sb, "journal-1256",
"unable to get memory for journal structure");
- reiserfs_write_lock(sb);
return 1;
}
INIT_LIST_HEAD(&journal->j_bitmap_nodes);
@@ -2695,10 +2689,8 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
INIT_LIST_HEAD(&journal->j_working_list);
INIT_LIST_HEAD(&journal->j_journal_list);
journal->j_persistent_trans = 0;
- ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
- reiserfs_bmap_count(sb));
- reiserfs_write_lock(sb);
- if (ret)
+ if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
+ reiserfs_bmap_count(sb)))
goto free_and_return;
allocate_bitmap_nodes(sb);
@@ -2727,27 +2719,11 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
- /*
- * We need to unlock here to avoid creating the following
- * dependency:
- * reiserfs_lock -> sysfs_mutex
- * Because the reiserfs mmap path creates the following dependency:
- * mm->mmap -> reiserfs_lock, hence we have
- * mm->mmap -> reiserfs_lock ->sysfs_mutex
- * This would ends up in a circular dependency with sysfs readdir path
- * which does sysfs_mutex -> mm->mmap_sem
- * This is fine because the reiserfs lock is useless in mount path,
- * at least until we call journal_begin. We keep it for paranoid
- * reasons.
- */
- reiserfs_write_unlock(sb);
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
- reiserfs_write_lock(sb);
reiserfs_warning(sb, "sh-462",
"unable to initialize jornal device");
goto free_and_return;
}
- reiserfs_write_lock(sb);
rs = SB_DISK_SUPER_BLOCK(sb);
@@ -2829,9 +2805,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
journal->j_mount_id = 10;
journal->j_state = 0;
atomic_set(&(journal->j_jlock), 0);
- reiserfs_write_unlock(sb);
journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
- reiserfs_write_lock(sb);
journal->j_cnode_free_orig = journal->j_cnode_free_list;
journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
journal->j_cnode_used = 0;
@@ -2848,24 +2822,37 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
init_journal_hash(sb);
jl = journal->j_current_jl;
+
+ /*
+ * get_list_bitmap() may call flush_commit_list() which
+ * requires the lock. Calling flush_commit_list() shouldn't happen
+ * this early but I like to be paranoid.
+ */
+ reiserfs_write_lock(sb);
jl->j_list_bitmap = get_list_bitmap(sb, jl);
+ reiserfs_write_unlock(sb);
if (!jl->j_list_bitmap) {
reiserfs_warning(sb, "journal-2005",
"get_list_bitmap failed for journal list 0");
goto free_and_return;
}
- if (journal_read(sb) < 0) {
+
+ /*
+ * Journal_read needs to be inspected in order to push down
+ * the lock further inside (or even remove it).
+ */
+ reiserfs_write_lock(sb);
+ ret = journal_read(sb);
+ reiserfs_write_unlock(sb);
+ if (ret < 0) {
reiserfs_warning(sb, "reiserfs-2006",
"Replay Failure, unable to mount");
goto free_and_return;
}
reiserfs_mounted_fs_count++;
- if (reiserfs_mounted_fs_count <= 1) {
- reiserfs_write_unlock(sb);
+ if (reiserfs_mounted_fs_count <= 1)
commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
- reiserfs_write_lock(sb);
- }
INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
journal->j_work_sb = sb;
@@ -2896,14 +2883,13 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
journal->j_cnode_free < (journal->j_trans_max * 3)) {
return 1;
}
- /* protected by the BKL here */
+
journal->j_len_alloc += new_alloc;
th->t_blocks_allocated += new_alloc ;
return 0;
}
-/* this must be called inside a transaction, and requires the
-** kernel_lock to be held
+/* this must be called inside a transaction
*/
void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
{
@@ -2914,8 +2900,7 @@ void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
return;
}
-/* this must be called without a transaction started, and does not
-** require BKL
+/* this must be called without a transaction started
*/
void reiserfs_allow_writes(struct super_block *s)
{
@@ -2924,8 +2909,7 @@ void reiserfs_allow_writes(struct super_block *s)
wake_up(&journal->j_join_wait);
}
-/* this must be called without a transaction started, and does not
-** require BKL
+/* this must be called without a transaction started
*/
void reiserfs_wait_on_write_block(struct super_block *s)
{
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 80058e8ce36..14637886523 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode)
** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func.
*/
-static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
+static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
{
/* Make inode invalid - just in case we are going to drop it before
* the initialization happens */
@@ -572,7 +572,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
return 0;
}
-static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
int retval;
@@ -643,7 +643,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode,
return retval;
}
-static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
int retval;
@@ -721,7 +721,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
return retval;
}
-static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int retval;
struct inode *inode;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 14363b96b6a..e12d8b97cd4 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -28,6 +28,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/crc32.h>
+#include <linux/seq_file.h>
struct file_system_type reiserfs_fs_type;
@@ -61,6 +62,7 @@ static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs)
static int reiserfs_remount(struct super_block *s, int *flags, char *data);
static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
+void show_alloc_options(struct seq_file *seq, struct super_block *s);
static int reiserfs_sync_fs(struct super_block *s, int wait)
{
@@ -453,16 +455,20 @@ int remove_save_link(struct inode *inode, int truncate)
static void reiserfs_kill_sb(struct super_block *s)
{
if (REISERFS_SB(s)) {
- if (REISERFS_SB(s)->xattr_root) {
- d_invalidate(REISERFS_SB(s)->xattr_root);
- dput(REISERFS_SB(s)->xattr_root);
- REISERFS_SB(s)->xattr_root = NULL;
- }
- if (REISERFS_SB(s)->priv_root) {
- d_invalidate(REISERFS_SB(s)->priv_root);
- dput(REISERFS_SB(s)->priv_root);
- REISERFS_SB(s)->priv_root = NULL;
- }
+ /*
+ * Force any pending inode evictions to occur now. Any
+ * inodes to be removed that have extended attributes
+ * associated with them need to clean them up before
+ * we can release the extended attribute root dentries.
+ * shrink_dcache_for_umount will BUG if we don't release
+ * those before it's called so ->put_super is too late.
+ */
+ shrink_dcache_sb(s);
+
+ dput(REISERFS_SB(s)->xattr_root);
+ REISERFS_SB(s)->xattr_root = NULL;
+ dput(REISERFS_SB(s)->priv_root);
+ REISERFS_SB(s)->priv_root = NULL;
}
kill_block_super(s);
@@ -532,7 +538,6 @@ static struct inode *reiserfs_alloc_inode(struct super_block *sb)
static void reiserfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
}
@@ -597,6 +602,82 @@ out:
reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
+static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct super_block *s = root->d_sb;
+ struct reiserfs_journal *journal = SB_JOURNAL(s);
+ long opts = REISERFS_SB(s)->s_mount_opt;
+
+ if (opts & (1 << REISERFS_LARGETAIL))
+ seq_puts(seq, ",tails=on");
+ else if (!(opts & (1 << REISERFS_SMALLTAIL)))
+ seq_puts(seq, ",notail");
+ /* tails=small is default so we don't show it */
+
+ if (!(opts & (1 << REISERFS_BARRIER_FLUSH)))
+ seq_puts(seq, ",barrier=none");
+ /* barrier=flush is default so we don't show it */
+
+ if (opts & (1 << REISERFS_ERROR_CONTINUE))
+ seq_puts(seq, ",errors=continue");
+ else if (opts & (1 << REISERFS_ERROR_PANIC))
+ seq_puts(seq, ",errors=panic");
+ /* errors=ro is default so we don't show it */
+
+ if (opts & (1 << REISERFS_DATA_LOG))
+ seq_puts(seq, ",data=journal");
+ else if (opts & (1 << REISERFS_DATA_WRITEBACK))
+ seq_puts(seq, ",data=writeback");
+ /* data=ordered is default so we don't show it */
+
+ if (opts & (1 << REISERFS_ATTRS))
+ seq_puts(seq, ",attrs");
+
+ if (opts & (1 << REISERFS_XATTRS_USER))
+ seq_puts(seq, ",user_xattr");
+
+ if (opts & (1 << REISERFS_EXPOSE_PRIVROOT))
+ seq_puts(seq, ",expose_privroot");
+
+ if (opts & (1 << REISERFS_POSIXACL))
+ seq_puts(seq, ",acl");
+
+ if (REISERFS_SB(s)->s_jdev)
+ seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
+
+ if (journal->j_max_commit_age != journal->j_default_max_commit_age)
+ seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
+
+#ifdef CONFIG_QUOTA
+ if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
+ seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
+ else if (opts & (1 << REISERFS_USRQUOTA))
+ seq_puts(seq, ",usrquota");
+ if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
+ seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
+ else if (opts & (1 << REISERFS_GRPQUOTA))
+ seq_puts(seq, ",grpquota");
+ if (REISERFS_SB(s)->s_jquota_fmt) {
+ if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_OLD)
+ seq_puts(seq, ",jqfmt=vfsold");
+ else if (REISERFS_SB(s)->s_jquota_fmt == QFMT_VFS_V0)
+ seq_puts(seq, ",jqfmt=vfsv0");
+ }
+#endif
+
+ /* Block allocator options */
+ if (opts & (1 << REISERFS_NO_BORDER))
+ seq_puts(seq, ",block-allocator=noborder");
+ if (opts & (1 << REISERFS_NO_UNHASHED_RELOCATION))
+ seq_puts(seq, ",block-allocator=no_unhashed_relocation");
+ if (opts & (1 << REISERFS_HASHED_RELOCATION))
+ seq_puts(seq, ",block-allocator=hashed_relocation");
+ if (opts & (1 << REISERFS_TEST4))
+ seq_puts(seq, ",block-allocator=test4");
+ show_alloc_options(seq, s);
+ return 0;
+}
+
#ifdef CONFIG_QUOTA
static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
size_t, loff_t);
@@ -617,7 +698,7 @@ static const struct super_operations reiserfs_sops = {
.unfreeze_fs = reiserfs_unfreeze,
.statfs = reiserfs_statfs,
.remount_fs = reiserfs_remount,
- .show_options = generic_show_options,
+ .show_options = reiserfs_show_options,
#ifdef CONFIG_QUOTA
.quota_read = reiserfs_quota_read,
.quota_write = reiserfs_quota_write,
@@ -915,9 +996,9 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
{"jdev",.arg_required = 'j',.values = NULL},
{"nolargeio",.arg_required = 'w',.values = NULL},
{"commit",.arg_required = 'c',.values = NULL},
- {"usrquota",.setmask = 1 << REISERFS_QUOTA},
- {"grpquota",.setmask = 1 << REISERFS_QUOTA},
- {"noquota",.clrmask = 1 << REISERFS_QUOTA},
+ {"usrquota",.setmask = 1 << REISERFS_USRQUOTA},
+ {"grpquota",.setmask = 1 << REISERFS_GRPQUOTA},
+ {"noquota",.clrmask = 1 << REISERFS_USRQUOTA | 1 << REISERFS_GRPQUOTA},
{"errors",.arg_required = 'e',.values = error_actions},
{"usrjquota",.arg_required =
'u' | (1 << REISERFS_OPT_ALLOWEMPTY),.values = NULL},
@@ -1031,12 +1112,19 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
return 0;
}
strcpy(qf_names[qtype], arg);
- *mount_options |= 1 << REISERFS_QUOTA;
+ if (qtype == USRQUOTA)
+ *mount_options |= 1 << REISERFS_USRQUOTA;
+ else
+ *mount_options |= 1 << REISERFS_GRPQUOTA;
} else {
if (qf_names[qtype] !=
REISERFS_SB(s)->s_qf_names[qtype])
kfree(qf_names[qtype]);
qf_names[qtype] = NULL;
+ if (qtype == USRQUOTA)
+ *mount_options &= ~(1 << REISERFS_USRQUOTA);
+ else
+ *mount_options &= ~(1 << REISERFS_GRPQUOTA);
}
}
if (c == 'f') {
@@ -1075,9 +1163,10 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
"journaled quota format not specified.");
return 0;
}
- /* This checking is not precise wrt the quota type but for our purposes it is sufficient */
- if (!(*mount_options & (1 << REISERFS_QUOTA))
- && sb_any_quota_loaded(s)) {
+ if ((!(*mount_options & (1 << REISERFS_USRQUOTA)) &&
+ sb_has_quota_loaded(s, USRQUOTA)) ||
+ (!(*mount_options & (1 << REISERFS_GRPQUOTA)) &&
+ sb_has_quota_loaded(s, GRPQUOTA))) {
reiserfs_warning(s, "super-6516", "quota options must "
"be present when quota is turned on.");
return 0;
@@ -1164,7 +1253,8 @@ static void handle_quota_files(struct super_block *s, char **qf_names,
kfree(REISERFS_SB(s)->s_qf_names[i]);
REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
}
- REISERFS_SB(s)->s_jquota_fmt = *qfmt;
+ if (*qfmt)
+ REISERFS_SB(s)->s_jquota_fmt = *qfmt;
}
#endif
@@ -1225,7 +1315,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
safe_mask |= 1 << REISERFS_ERROR_RO;
safe_mask |= 1 << REISERFS_ERROR_CONTINUE;
safe_mask |= 1 << REISERFS_ERROR_PANIC;
- safe_mask |= 1 << REISERFS_QUOTA;
+ safe_mask |= 1 << REISERFS_USRQUOTA;
+ safe_mask |= 1 << REISERFS_GRPQUOTA;
/* Update the bitmask, taking care to keep
* the bits we're not allowed to change here */
@@ -1428,9 +1519,7 @@ static int read_super_block(struct super_block *s, int offset)
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
- reiserfs_write_unlock(s);
wait_on_buffer(SB_BUFFER_WITH_SB(s));
- reiserfs_write_lock(s);
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
@@ -1655,22 +1744,19 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
mutex_init(&REISERFS_SB(s)->lock);
REISERFS_SB(s)->lock_depth = -1;
- /*
- * This function is called with the bkl, which also was the old
- * locking used here.
- * do_journal_begin() will soon check if we hold the lock (ie: was the
- * bkl). This is likely because do_journal_begin() has several another
- * callers because at this time, it doesn't seem to be necessary to
- * protect against anything.
- * Anyway, let's be conservative and lock for now.
- */
- reiserfs_write_lock(s);
-
jdev_name = NULL;
if (reiserfs_parse_options
(s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
&commit_max_age, qf_names, &qfmt) == 0) {
- goto error;
+ goto error_unlocked;
+ }
+ if (jdev_name && jdev_name[0]) {
+ REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
+ if (!REISERFS_SB(s)->s_jdev) {
+ SWARN(silent, s, "", "Cannot allocate memory for "
+ "journal device name");
+ goto error;
+ }
}
#ifdef CONFIG_QUOTA
handle_quota_files(s, qf_names, &qfmt);
@@ -1678,7 +1764,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (blocks) {
SWARN(silent, s, "jmacd-7", "resize option for remount only");
- goto error;
+ goto error_unlocked;
}
/* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
@@ -1688,7 +1774,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
reiserfs_bdevname(s));
- goto error;
+ goto error_unlocked;
}
rs = SB_DISK_SUPER_BLOCK(s);
@@ -1704,7 +1790,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
"or increase size of your LVM partition");
SWARN(silent, s, "", "Or may be you forgot to "
"reboot after fdisk when it told you to");
- goto error;
+ goto error_unlocked;
}
sbi->s_mount_state = SB_REISERFS_STATE(s);
@@ -1712,8 +1798,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if ((errval = reiserfs_init_bitmap_cache(s))) {
SWARN(silent, s, "jmacd-8", "unable to read bitmap");
- goto error;
+ goto error_unlocked;
}
+
errval = -EINVAL;
#ifdef CONFIG_REISERFS_CHECK
SWARN(silent, s, "", "CONFIG_REISERFS_CHECK is set ON");
@@ -1736,24 +1823,26 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (reiserfs_barrier_flush(s)) {
printk("reiserfs: using flush barriers\n");
}
+
// set_device_ro(s->s_dev, 1) ;
if (journal_init(s, jdev_name, old_format, commit_max_age)) {
SWARN(silent, s, "sh-2022",
"unable to initialize journal space");
- goto error;
+ goto error_unlocked;
} else {
jinit_done = 1; /* once this is set, journal_release must be called
** if we error out of the mount
*/
}
+
if (reread_meta_blocks(s)) {
SWARN(silent, s, "jmacd-9",
"unable to reread meta blocks after journal init");
- goto error;
+ goto error_unlocked;
}
if (replay_only(s))
- goto error;
+ goto error_unlocked;
if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
SWARN(silent, s, "clm-7000",
@@ -1767,9 +1856,19 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
reiserfs_init_locked_inode, (void *)(&args));
if (!root_inode) {
SWARN(silent, s, "jmacd-10", "get root inode failed");
- goto error;
+ goto error_unlocked;
}
+ /*
+ * This path assumed to be called with the BKL in the old times.
+ * Now we have inherited the big reiserfs lock from it and many
+ * reiserfs helpers called in the mount path and elsewhere require
+ * this lock to be held even if it's not always necessary. Let's be
+ * conservative and hold it early. The window can be reduced after
+ * careful review of the code.
+ */
+ reiserfs_write_lock(s);
+
if (root_inode->i_state & I_NEW) {
reiserfs_read_locked_inode(root_inode, &args);
unlock_new_inode(root_inode);
@@ -1896,12 +1995,16 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return (0);
error:
- if (jinit_done) { /* kill the commit thread, free journal ram */
+ reiserfs_write_unlock(s);
+
+error_unlocked:
+ /* kill the commit thread, free journal ram */
+ if (jinit_done) {
+ reiserfs_write_lock(s);
journal_release_error(NULL, s);
+ reiserfs_write_unlock(s);
}
- reiserfs_write_unlock(s);
-
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))
brelse(SB_BUFFER_WITH_SB(s));
@@ -2054,12 +2157,13 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
int err;
struct inode *inode;
struct reiserfs_transaction_handle th;
+ int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
+ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
return -EINVAL;
/* Quotafile not on the same filesystem? */
- if (path->mnt->mnt_sb != sb) {
+ if (path->dentry->d_sb != sb) {
err = -EXDEV;
goto out;
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6bc346c160e..c24deda8a8b 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -66,7 +66,7 @@ static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
}
#endif
-static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int xattr_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
BUG_ON(!mutex_is_locked(&dir->i_mutex));
return dir->i_op->mkdir(dir, dentry, mode);
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index eed99428f10..e1a7779dd3c 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -28,9 +28,10 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
struct inode *inode = file->f_mapping->host;
struct mtd_info *mtd = inode->i_sb->s_mtd;
unsigned long isize, offset, maxpages, lpages;
+ int ret;
if (!mtd)
- goto cant_map_directly;
+ return (unsigned long) -ENOSYS;
/* the mapping mustn't extend beyond the EOF */
lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -41,23 +42,20 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
return (unsigned long) -EINVAL;
- /* we need to call down to the MTD layer to do the actual mapping */
- if (mtd->get_unmapped_area) {
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset += ROMFS_I(inode)->i_dataoffset;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset += ROMFS_I(inode)->i_dataoffset;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
-cant_map_directly:
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ if (ret == -EOPNOTSUPP)
+ ret = -ENOSYS;
+ return (unsigned long) ret;
}
/*
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 8b4089f3040..bb36ab74eb4 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -403,7 +403,6 @@ static struct inode *romfs_alloc_inode(struct super_block *sb)
static void romfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 05d6b0e78c9..4023d6be939 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -397,7 +397,7 @@ EXPORT_SYMBOL(seq_printf);
* Returns pointer past last written character in @s, or NULL in case of
* failure.
*/
-char *mangle_path(char *s, char *p, char *esc)
+char *mangle_path(char *s, const char *p, const char *esc)
{
while (s <= p) {
char c = *p++;
@@ -427,7 +427,7 @@ EXPORT_SYMBOL(mangle_path);
* return the absolute path of 'path', as represented by the
* dentry / mnt pair in the path parameter.
*/
-int seq_path(struct seq_file *m, struct path *path, char *esc)
+int seq_path(struct seq_file *m, const struct path *path, const char *esc)
{
char *buf;
size_t size = seq_get_buf(m, &buf);
@@ -449,11 +449,9 @@ EXPORT_SYMBOL(seq_path);
/*
* Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
*/
-int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
- char *esc)
+int seq_path_root(struct seq_file *m, const struct path *path,
+ const struct path *root, const char *esc)
{
char *buf;
size_t size = seq_get_buf(m, &buf);
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *p;
p = __d_path(path, root, buf, size);
+ if (!p)
+ return SEQ_SKIP;
res = PTR_ERR(p);
if (!IS_ERR(p)) {
char *end = mangle_path(buf, p, esc);
@@ -474,13 +474,13 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
}
seq_commit(m, res);
- return res < 0 ? res : 0;
+ return res < 0 && res != -ENAMETOOLONG ? res : 0;
}
/*
* returns the path of the 'dentry' from the root of its filesystem.
*/
-int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
+int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
{
char *buf;
size_t size = seq_get_buf(m, &buf);
diff --git a/fs/splice.c b/fs/splice.c
index fa2defa8afc..1ec0493266b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -25,7 +25,6 @@
#include <linux/mm_inline.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/uio.h>
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 2da1715452a..d0858c2d9a4 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -464,7 +464,6 @@ static struct inode *squashfs_alloc_inode(struct super_block *sb)
static void squashfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
}
diff --git a/fs/statfs.c b/fs/statfs.c
index 9cf04a11896..2aa6a22e0be 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -7,6 +7,7 @@
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/uaccess.h>
+#include "internal.h"
static int flags_by_mnt(int mnt_flags)
{
@@ -45,7 +46,7 @@ static int calculate_f_flags(struct vfsmount *mnt)
flags_by_sb(mnt->mnt_sb->s_flags);
}
-int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
+static int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
{
int retval;
@@ -205,19 +206,23 @@ SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user
return error;
}
-SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
+int vfs_ustat(dev_t dev, struct kstatfs *sbuf)
{
- struct super_block *s;
- struct ustat tmp;
- struct kstatfs sbuf;
+ struct super_block *s = user_get_super(dev);
int err;
-
- s = user_get_super(new_decode_dev(dev));
if (!s)
return -EINVAL;
- err = statfs_by_dentry(s->s_root, &sbuf);
+ err = statfs_by_dentry(s->s_root, sbuf);
drop_super(s);
+ return err;
+}
+
+SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
+{
+ struct ustat tmp;
+ struct kstatfs sbuf;
+ int err = vfs_ustat(new_decode_dev(dev), &sbuf);
if (err)
return err;
diff --git a/fs/super.c b/fs/super.c
index afd0f1ad45e..de41e1e46f0 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -136,12 +136,13 @@ static struct super_block *alloc_super(struct file_system_type *type)
INIT_LIST_HEAD(&s->s_files);
#endif
s->s_bdi = &default_backing_dev_info;
- INIT_LIST_HEAD(&s->s_instances);
+ INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru);
INIT_LIST_HEAD(&s->s_inode_lru);
spin_lock_init(&s->s_inode_lru_lock);
+ INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
mutex_init(&s->s_lock);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -200,6 +201,7 @@ static inline void destroy_super(struct super_block *s)
free_percpu(s->s_files);
#endif
security_sb_free(s);
+ WARN_ON(!list_empty(&s->s_mounts));
kfree(s->s_subtype);
kfree(s->s_options);
kfree(s);
@@ -210,7 +212,7 @@ static inline void destroy_super(struct super_block *s)
/*
* Drop a superblock's refcount. The caller must hold sb_lock.
*/
-void __put_super(struct super_block *sb)
+static void __put_super(struct super_block *sb)
{
if (!--sb->s_count) {
list_del_init(&sb->s_list);
@@ -225,7 +227,7 @@ void __put_super(struct super_block *sb)
* Drops a temporary reference, frees superblock if there's no
* references left.
*/
-void put_super(struct super_block *sb)
+static void put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
__put_super(sb);
@@ -328,7 +330,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
bool grab_super_passive(struct super_block *sb)
{
spin_lock(&sb_lock);
- if (list_empty(&sb->s_instances)) {
+ if (hlist_unhashed(&sb->s_instances)) {
spin_unlock(&sb_lock);
return false;
}
@@ -337,7 +339,7 @@ bool grab_super_passive(struct super_block *sb)
spin_unlock(&sb_lock);
if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
return true;
up_read(&sb->s_umount);
}
@@ -400,7 +402,7 @@ void generic_shutdown_super(struct super_block *sb)
}
spin_lock(&sb_lock);
/* should be initialized for __put_super_and_need_restart() */
- list_del_init(&sb->s_instances);
+ hlist_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
up_write(&sb->s_umount);
}
@@ -420,13 +422,14 @@ struct super_block *sget(struct file_system_type *type,
void *data)
{
struct super_block *s = NULL;
+ struct hlist_node *node;
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
if (test) {
- list_for_each_entry(old, &type->fs_supers, s_instances) {
+ hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
if (!test(old, data))
continue;
if (!grab_super(old))
@@ -462,7 +465,7 @@ retry:
s->s_type = type;
strlcpy(s->s_id, type->name, sizeof(s->s_id));
list_add_tail(&s->s_list, &super_blocks);
- list_add(&s->s_instances, &type->fs_supers);
+ hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
register_shrinker(&s->s_shrink);
@@ -497,14 +500,14 @@ void sync_supers(void)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_op->write_super && sb->s_dirt) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- if (sb->s_root && sb->s_dirt)
+ if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
sb->s_op->write_super(sb);
up_read(&sb->s_umount);
@@ -533,13 +536,13 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
f(sb, arg);
up_read(&sb->s_umount);
@@ -566,14 +569,15 @@ void iterate_supers_type(struct file_system_type *type,
void (*f)(struct super_block *, void *), void *arg)
{
struct super_block *sb, *p = NULL;
+ struct hlist_node *node;
spin_lock(&sb_lock);
- list_for_each_entry(sb, &type->fs_supers, s_instances) {
+ hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
f(sb, arg);
up_read(&sb->s_umount);
@@ -607,14 +611,14 @@ struct super_block *get_super(struct block_device *bdev)
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
/* still alive? */
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
return sb;
up_read(&sb->s_umount);
/* nope, got unmounted */
@@ -647,7 +651,7 @@ struct super_block *get_active_super(struct block_device *bdev)
restart:
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) {
if (grab_super(sb)) /* drops sb_lock */
@@ -667,14 +671,14 @@ struct super_block *user_get_super(dev_t dev)
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_dev == dev) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
/* still alive? */
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
return sb;
up_read(&sb->s_umount);
/* nope, got unmounted */
@@ -719,23 +723,29 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
/* If we are remounting RDONLY and current sb is read/write,
make sure there are no rw files opened */
if (remount_ro) {
- if (force)
+ if (force) {
mark_files_ro(sb);
- else if (!fs_may_remount_ro(sb))
- return -EBUSY;
+ } else {
+ retval = sb_prepare_remount_readonly(sb);
+ if (retval)
+ return retval;
+ }
}
if (sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data);
if (retval) {
if (!force)
- return retval;
+ goto cancel_readonly;
/* If forced remount, go ahead despite any errors */
WARN(1, "forced remount of a %s fs returned %i\n",
sb->s_type->name, retval);
}
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+ /* Needs to be ordered wrt mnt_is_readonly() */
+ smp_wmb();
+ sb->s_readonly_remount = 0;
/*
* Some filesystems modify their metadata via some other path than the
@@ -748,6 +758,10 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
if (remount_ro && sb->s_bdev)
invalidate_bdev(sb->s_bdev);
return 0;
+
+cancel_readonly:
+ sb->s_readonly_remount = 0;
+ return retval;
}
static void do_emergency_remount(struct work_struct *work)
@@ -756,12 +770,13 @@ static void do_emergency_remount(struct work_struct *work)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_write(&sb->s_umount);
- if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
+ if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
+ !(sb->s_flags & MS_RDONLY)) {
/*
* What lock protects sb->s_flags??
*/
@@ -1144,6 +1159,11 @@ int freeze_super(struct super_block *sb)
return -EBUSY;
}
+ if (!(sb->s_flags & MS_BORN)) {
+ up_write(&sb->s_umount);
+ return 0; /* sic - it's "nothing to do" */
+ }
+
if (sb->s_flags & MS_RDONLY) {
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
diff --git a/fs/sync.c b/fs/sync.c
index 101b8ef901d..f3501ef3923 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -14,7 +14,6 @@
#include <linux/linkage.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
-#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include "internal.h"
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index d4e6080b4b2..62f4fb37789 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -518,7 +518,7 @@ out:
}
int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type, mode_t amode)
+ const struct attribute *attr, int type, umode_t amode)
{
umode_t mode = (amode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
@@ -618,7 +618,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
*
*/
int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
- mode_t mode)
+ umode_t mode)
{
struct sysfs_dirent *sd;
struct iattr newattrs;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 194414f8298..dd1701caecc 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -33,7 +33,7 @@ static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
int error = 0, i;
for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
- mode_t mode = 0;
+ umode_t mode = 0;
/* in update mode, we're changing the permissions or
* visibility. Do this by first removing then
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index c81b22f3ace..4a802b4a905 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -187,7 +187,7 @@ out:
return error;
}
-static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
+static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ce29e28b766..7484a36ee67 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -79,7 +79,7 @@ struct sysfs_dirent {
};
unsigned int s_flags;
- unsigned short s_mode;
+ umode_t s_mode;
ino_t s_ino;
struct sysfs_inode_attrs *s_iattr;
};
@@ -229,7 +229,7 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd,
const struct attribute *attr, int type);
int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
- const struct attribute *attr, int type, mode_t amode);
+ const struct attribute *attr, int type, umode_t amode);
/*
* bin.c
*/
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 0c96c98bd1d..8233b02ecca 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -132,7 +132,7 @@ void sysv_free_inode(struct inode * inode)
brelse(bh);
}
-struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
+struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct sysv_sb_info *sbi = SYSV_SB(sb);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 25ffb3e9a3f..3da5ce25faf 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -336,7 +336,6 @@ static struct inode *sysv_alloc_inode(struct super_block *sb)
static void sysv_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
}
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index fa8d43c92bb..90b54b43878 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -442,7 +442,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
int sysv_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
- struct super_block *s = mnt->mnt_sb;
+ struct super_block *s = dentry->d_sb;
generic_fillattr(dentry->d_inode, stat);
stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
stat->blksize = s->s_blocksize;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index e474fbcf8bd..b217797e621 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -61,7 +61,7 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st
return NULL;
}
-static int sysv_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_t rdev)
+static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
int err;
@@ -80,7 +80,7 @@ static int sysv_mknod(struct inode * dir, struct dentry * dentry, int mode, dev_
return err;
}
-static int sysv_create(struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
+static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
{
return sysv_mknod(dir, dentry, mode, 0);
}
@@ -131,7 +131,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
+static int sysv_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err = -EMLINK;
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index bb55cdb394b..0e4b821c569 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -125,7 +125,7 @@ static inline void dirty_sb(struct super_block *sb)
/* ialloc.c */
extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned,
struct buffer_head **);
-extern struct inode * sysv_new_inode(const struct inode *, mode_t);
+extern struct inode * sysv_new_inode(const struct inode *, umode_t);
extern void sysv_free_inode(struct inode *);
extern unsigned long sysv_count_free_inodes(struct super_block *);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 68349204331..d6fe1c79f18 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -56,7 +56,7 @@
*
* This function returns the inherited flags.
*/
-static int inherit_flags(const struct inode *dir, int mode)
+static int inherit_flags(const struct inode *dir, umode_t mode)
{
int flags;
const struct ubifs_inode *ui = ubifs_inode(dir);
@@ -86,7 +86,7 @@ static int inherit_flags(const struct inode *dir, int mode)
* case of failure.
*/
struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
- int mode)
+ umode_t mode)
{
struct inode *inode;
struct ubifs_inode *ui;
@@ -253,7 +253,7 @@ out:
return ERR_PTR(err);
}
-static int ubifs_create(struct inode *dir, struct dentry *dentry, int mode,
+static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -268,7 +268,7 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, int mode,
* parent directory inode.
*/
- dbg_gen("dent '%.*s', mode %#x in dir ino %lu",
+ dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
err = ubifs_budget_space(c, &req);
@@ -712,7 +712,7 @@ out_cancel:
return err;
}
-static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct ubifs_inode *dir_ui = ubifs_inode(dir);
@@ -725,7 +725,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
* directory inode.
*/
- dbg_gen("dent '%.*s', mode %#x in dir ino %lu",
+ dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
err = ubifs_budget_space(c, &req);
@@ -769,7 +769,7 @@ out_budg:
}
static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t rdev)
+ umode_t mode, dev_t rdev)
{
struct inode *inode;
struct ubifs_inode *ui;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 548acf494af..1a7e2d8bdbe 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -173,12 +173,12 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* Make sure the file-system is read-write and make sure it
* will not become read-only while we are changing the flags.
*/
- err = mnt_want_write(file->f_path.mnt);
+ err = mnt_want_write_file(file);
if (err)
return err;
dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags);
err = setflags(inode, flags);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
return err;
}
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 6189c74d97f..66d59d0a140 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1986,12 +1986,11 @@ again:
if (path[h].in_tree)
continue;
- nnode = kmalloc(sz, GFP_NOFS);
+ nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS);
if (!nnode) {
err = -ENOMEM;
goto out;
}
- memcpy(nnode, &path[h].nnode, sz);
parent = nnode->parent;
parent->nbranch[nnode->iip].nnode = nnode;
path[h].ptr.nnode = nnode;
@@ -2004,12 +2003,11 @@ again:
const size_t sz = sizeof(struct ubifs_pnode);
struct ubifs_nnode *parent;
- pnode = kmalloc(sz, GFP_NOFS);
+ pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS);
if (!pnode) {
err = -ENOMEM;
goto out;
}
- memcpy(pnode, &path[h].pnode, sz);
parent = pnode->parent;
parent->nbranch[pnode->iip].pnode = pnode;
path[h].ptr.pnode = pnode;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20403dc5d43..63765d58445 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -276,7 +276,6 @@ static void ubifs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct ubifs_inode *ui = ubifs_inode(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ubifs_inode_slab, ui);
}
@@ -420,9 +419,9 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt)
+static int ubifs_show_options(struct seq_file *s, struct dentry *root)
{
- struct ubifs_info *c = mnt->mnt_sb->s_fs_info;
+ struct ubifs_info *c = root->d_sb->s_fs_info;
if (c->mount_opts.unmount_mode == 2)
seq_printf(s, ",fast_unmount");
@@ -2264,19 +2263,12 @@ static int __init ubifs_init(void)
return -EINVAL;
}
- err = register_filesystem(&ubifs_fs_type);
- if (err) {
- ubifs_err("cannot register file system, error %d", err);
- return err;
- }
-
- err = -ENOMEM;
ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
sizeof(struct ubifs_inode), 0,
SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
&inode_slab_ctor);
if (!ubifs_inode_slab)
- goto out_reg;
+ return -ENOMEM;
register_shrinker(&ubifs_shrinker_info);
@@ -2288,15 +2280,20 @@ static int __init ubifs_init(void)
if (err)
goto out_compr;
+ err = register_filesystem(&ubifs_fs_type);
+ if (err) {
+ ubifs_err("cannot register file system, error %d", err);
+ goto out_dbg;
+ }
return 0;
+out_dbg:
+ dbg_debugfs_exit();
out_compr:
ubifs_compressors_exit();
out_shrinker:
unregister_shrinker(&ubifs_shrinker_info);
kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
- unregister_filesystem(&ubifs_fs_type);
return err;
}
/* late_initcall to let compressors initialize first */
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 06673864768..e14ee53159d 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -344,12 +344,11 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
return err;
}
- lnc_node = kmalloc(zbr->len, GFP_NOFS);
+ lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
if (!lnc_node)
/* We don't have to have the cache, so no error */
return 0;
- memcpy(lnc_node, node, zbr->len);
zbr->leaf = lnc_node;
return 0;
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 27f22551f80..12e94774aa8 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1734,7 +1734,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr);
/* dir.c */
struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
- int mode);
+ umode_t mode);
int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index bf18f7a0454..85b27226875 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -138,12 +138,11 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui = ubifs_inode(inode);
ui->xattr = 1;
ui->flags |= UBIFS_XATTR_FL;
- ui->data = kmalloc(size, GFP_NOFS);
+ ui->data = kmemdup(value, size, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
goto out_free;
}
- memcpy(ui->data, value, size);
inode->i_size = ui->ui_size = size;
ui->data_len = size;
@@ -204,12 +203,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
return err;
kfree(ui->data);
- ui->data = kmalloc(size, GFP_NOFS);
+ ui->data = kmemdup(value, size, GFP_NOFS);
if (!ui->data) {
err = -ENOMEM;
goto out_free;
}
- memcpy(ui->data, value, size);
inode->i_size = ui->ui_size = size;
ui->data_len = size;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index d8ffa7cc661..dca0c3881e8 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -125,7 +125,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
err = udf_expand_file_adinicb(inode);
if (err) {
udf_debug("udf_expand_adinicb: err=%d\n", err);
- up_write(&iinfo->i_data_sem);
return err;
}
} else {
@@ -133,9 +132,10 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
iinfo->i_lenAlloc = pos + count;
else
iinfo->i_lenAlloc = inode->i_size;
+ up_write(&iinfo->i_data_sem);
}
- }
- up_write(&iinfo->i_data_sem);
+ } else
+ up_write(&iinfo->i_data_sem);
retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
if (retval > 0)
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 6fb7e0adcda..05ab48195be 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -46,7 +46,7 @@ void udf_free_inode(struct inode *inode)
udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
-struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
+struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
{
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 4fd1d809738..7699df7b319 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -48,13 +48,12 @@ MODULE_LICENSE("GPL");
#define EXTENT_MERGE_SIZE 5
-static mode_t udf_convert_permissions(struct fileEntry *);
+static umode_t udf_convert_permissions(struct fileEntry *);
static int udf_update_inode(struct inode *, int);
static void udf_fill_inode(struct inode *, struct buffer_head *);
static int udf_sync_inode(struct inode *inode);
static int udf_alloc_i_data(struct inode *inode, size_t size);
-static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
- sector_t *, int *);
+static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, int,
@@ -151,6 +150,12 @@ const struct address_space_operations udf_aops = {
.bmap = udf_bmap,
};
+/*
+ * Expand file stored in ICB to a normal one-block-file
+ *
+ * This function requires i_data_sem for writing and releases it.
+ * This function requires i_mutex held
+ */
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
@@ -169,9 +174,15 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
+ up_write(&iinfo->i_data_sem);
mark_inode_dirty(inode);
return 0;
}
+ /*
+ * Release i_data_sem so that we can lock a page - page lock ranks
+ * above i_data_sem. i_mutex still protects us against file changes.
+ */
+ up_write(&iinfo->i_data_sem);
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
@@ -187,6 +198,7 @@ int udf_expand_file_adinicb(struct inode *inode)
SetPageUptodate(page);
kunmap(page);
}
+ down_write(&iinfo->i_data_sem);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
@@ -196,17 +208,20 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
+ up_write(&iinfo->i_data_sem);
err = inode->i_data.a_ops->writepage(page, &udf_wbc);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
kaddr = kmap(page);
+ down_write(&iinfo->i_data_sem);
memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
inode->i_size);
kunmap(page);
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
+ up_write(&iinfo->i_data_sem);
}
page_cache_release(page);
mark_inode_dirty(inode);
@@ -310,7 +325,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
int err, new;
- struct buffer_head *bh;
sector_t phys = 0;
struct udf_inode_info *iinfo;
@@ -323,7 +337,6 @@ static int udf_get_block(struct inode *inode, sector_t block,
err = -EIO;
new = 0;
- bh = NULL;
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
@@ -332,13 +345,10 @@ static int udf_get_block(struct inode *inode, sector_t block,
iinfo->i_next_alloc_goal++;
}
- err = 0;
- bh = inode_getblk(inode, block, &err, &phys, &new);
- BUG_ON(bh);
- if (err)
+ phys = inode_getblk(inode, block, &err, &new);
+ if (!phys)
goto abort;
- BUG_ON(!phys);
if (new)
set_buffer_new(bh_result);
@@ -547,11 +557,10 @@ out:
return err;
}
-static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
- int *err, sector_t *phys, int *new)
+static sector_t inode_getblk(struct inode *inode, sector_t block,
+ int *err, int *new)
{
static sector_t last_block;
- struct buffer_head *result = NULL;
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
struct extent_position prev_epos, cur_epos, next_epos;
int count = 0, startnum = 0, endnum = 0;
@@ -566,6 +575,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
int lastblock = 0;
+ *err = 0;
+ *new = 0;
prev_epos.offset = udf_file_entry_alloc_offset(inode);
prev_epos.block = iinfo->i_location;
prev_epos.bh = NULL;
@@ -635,8 +646,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
brelse(cur_epos.bh);
brelse(next_epos.bh);
newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
- *phys = newblock;
- return NULL;
+ return newblock;
}
last_block = block;
@@ -664,7 +674,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
brelse(cur_epos.bh);
brelse(next_epos.bh);
*err = ret;
- return NULL;
+ return 0;
}
c = 0;
offset = 0;
@@ -729,7 +739,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
if (!newblocknum) {
brelse(prev_epos.bh);
*err = -ENOSPC;
- return NULL;
+ return 0;
}
iinfo->i_lenExtents += inode->i_sb->s_blocksize;
}
@@ -761,10 +771,10 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
newblock = udf_get_pblock(inode->i_sb, newblocknum,
iinfo->i_location.partitionReferenceNum, 0);
- if (!newblock)
- return NULL;
- *phys = newblock;
- *err = 0;
+ if (!newblock) {
+ *err = -EIO;
+ return 0;
+ }
*new = 1;
iinfo->i_next_alloc_block = block;
iinfo->i_next_alloc_goal = newblocknum;
@@ -775,7 +785,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
else
mark_inode_dirty(inode);
- return result;
+ return newblock;
}
static void udf_split_extents(struct inode *inode, int *c, int offset,
@@ -1111,10 +1121,9 @@ int udf_setsize(struct inode *inode, loff_t newsize)
if (bsize <
(udf_file_entry_alloc_offset(inode) + newsize)) {
err = udf_expand_file_adinicb(inode);
- if (err) {
- up_write(&iinfo->i_data_sem);
+ if (err)
return err;
- }
+ down_write(&iinfo->i_data_sem);
} else
iinfo->i_lenAlloc = newsize;
}
@@ -1452,9 +1461,9 @@ static int udf_alloc_i_data(struct inode *inode, size_t size)
return 0;
}
-static mode_t udf_convert_permissions(struct fileEntry *fe)
+static umode_t udf_convert_permissions(struct fileEntry *fe)
{
- mode_t mode;
+ umode_t mode;
uint32_t permissions;
uint32_t flags;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 4639e137222..08bf46edf9c 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -552,7 +552,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
-static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
+static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct udf_fileident_bh fibh;
@@ -596,7 +596,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
return 0;
}
-static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
+static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev)
{
struct inode *inode;
@@ -640,7 +640,7 @@ out:
return err;
}
-static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct udf_fileident_bh fibh;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e185253470d..c09a84daaf5 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -89,7 +89,7 @@ static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
static int udf_statfs(struct dentry *, struct kstatfs *);
-static int udf_show_options(struct seq_file *, struct vfsmount *);
+static int udf_show_options(struct seq_file *, struct dentry *);
struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
{
@@ -138,7 +138,6 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
static void udf_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(udf_inode_cachep, UDF_I(inode));
}
@@ -196,11 +195,11 @@ struct udf_options {
unsigned int fileset;
unsigned int rootdir;
unsigned int flags;
- mode_t umask;
+ umode_t umask;
gid_t gid;
uid_t uid;
- mode_t fmode;
- mode_t dmode;
+ umode_t fmode;
+ umode_t dmode;
struct nls_table *nls_map;
};
@@ -250,9 +249,9 @@ static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
return 0;
}
-static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int udf_show_options(struct seq_file *seq, struct dentry *root)
{
- struct super_block *sb = mnt->mnt_sb;
+ struct super_block *sb = root->d_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
@@ -280,11 +279,11 @@ static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
seq_printf(seq, ",gid=%u", sbi->s_gid);
if (sbi->s_umask != 0)
- seq_printf(seq, ",umask=%o", sbi->s_umask);
+ seq_printf(seq, ",umask=%ho", sbi->s_umask);
if (sbi->s_fmode != UDF_INVALID_MODE)
- seq_printf(seq, ",mode=%o", sbi->s_fmode);
+ seq_printf(seq, ",mode=%ho", sbi->s_fmode);
if (sbi->s_dmode != UDF_INVALID_MODE)
- seq_printf(seq, ",dmode=%o", sbi->s_dmode);
+ seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
seq_printf(seq, ",session=%u", sbi->s_session);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
@@ -1799,6 +1798,12 @@ static void udf_close_lvid(struct super_block *sb)
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ /*
+ * We set buffer uptodate unconditionally here to avoid spurious
+ * warnings from mark_buffer_dirty() when previous EIO has marked
+ * the buffer as !uptodate
+ */
+ set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index b1d4488b0f1..d7c6dbe4194 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -41,10 +41,16 @@ static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
pc = (struct pathComponent *)(from + elen);
switch (pc->componentType) {
case 1:
- if (pc->lengthComponentIdent == 0) {
- p = to;
- *p++ = '/';
- }
+ /*
+ * Symlink points to some place which should be agreed
+ * upon between originator and receiver of the media. Ignore.
+ */
+ if (pc->lengthComponentIdent > 0)
+ break;
+ /* Fall through */
+ case 2:
+ p = to;
+ *p++ = '/';
break;
case 3:
memcpy(p, "../", 3);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 5142a82e327..42ad69ac957 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -50,7 +50,7 @@
#define UDF_SPARABLE_MAP15 0x1522U
#define UDF_METADATA_MAP25 0x2511U
-#define UDF_INVALID_MODE ((mode_t)-1)
+#define UDF_INVALID_MODE ((umode_t)-1)
#pragma pack(1) /* XXX(hch): Why? This file just defines in-core structures */
@@ -127,11 +127,11 @@ struct udf_sb_info {
struct buffer_head *s_lvid_bh;
/* Default permissions */
- mode_t s_umask;
+ umode_t s_umask;
gid_t s_gid;
uid_t s_uid;
- mode_t s_fmode;
- mode_t s_dmode;
+ umode_t s_fmode;
+ umode_t s_dmode;
/* Lock protecting consistency of above permission settings */
rwlock_t s_cred_lock;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index f34e6fc0cda..ebe10314e51 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -215,7 +215,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
-extern struct inode *udf_new_inode(struct inode *, int, int *);
+extern struct inode *udf_new_inode(struct inode *, umode_t, int *);
/* truncate.c */
extern void udf_truncate_tail_extent(struct inode *);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 78a4c70d46b..4ec5c1085a8 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -170,7 +170,7 @@ static void ufs2_init_inodes_chunk(struct super_block *sb,
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode * ufs_new_inode(struct inode * dir, int mode)
+struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block * sb;
struct ufs_sb_info * sbi;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 879b13436fa..9094e1d917b 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -583,7 +583,7 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
- mode_t mode;
+ umode_t mode;
/*
* Copy data to the in-core inode.
@@ -630,7 +630,7 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
- mode_t mode;
+ umode_t mode;
UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
/*
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 639d4916224..38cac199edf 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -70,7 +70,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
+static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
@@ -94,7 +94,7 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
return err;
}
-static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
+static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
int err;
@@ -180,7 +180,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
return error;
}
-static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err = -EMLINK;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 3915ade6f9a..5246ee3e560 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1351,9 +1351,9 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
return 0;
}
-static int ufs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int ufs_show_options(struct seq_file *seq, struct dentry *root)
{
- struct ufs_sb_info *sbi = UFS_SB(vfs->mnt_sb);
+ struct ufs_sb_info *sbi = UFS_SB(root->d_sb);
unsigned mval = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE;
const struct match_token *tp = tokens;
@@ -1425,7 +1425,6 @@ static struct inode *ufs_alloc_inode(struct super_block *sb)
static void ufs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
}
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index c26f2bcec26..528750b7e70 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -104,7 +104,7 @@ extern const struct address_space_operations ufs_aops;
/* ialloc.c */
extern void ufs_free_inode (struct inode *inode);
-extern struct inode * ufs_new_inode (struct inode *, int);
+extern struct inode * ufs_new_inode (struct inode *, umode_t);
/* inode.c */
extern struct inode *ufs_iget(struct super_block *, unsigned long);
diff --git a/fs/xattr.c b/fs/xattr.c
index 67583de8218..82f43376c7c 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -397,7 +397,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
error = mnt_want_write_file(f);
if (!error) {
error = setxattr(dentry, name, value, size, flags);
- mnt_drop_write(f->f_path.mnt);
+ mnt_drop_write_file(f);
}
fput(f);
return error;
@@ -624,7 +624,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
error = mnt_want_write_file(f);
if (!error) {
error = removexattr(dentry, name);
- mnt_drop_write(f->f_path.mnt);
+ mnt_drop_write_file(f);
}
fput(f);
return error;
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6c4b3795c4..ac702a6eab9 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -39,9 +39,11 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
struct posix_acl_entry *acl_e;
struct posix_acl *acl;
struct xfs_acl_entry *ace;
- int count, i;
+ unsigned int count, i;
count = be32_to_cpu(aclp->acl_cnt);
+ if (count > XFS_ACL_MAX_ENTRIES)
+ return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index d4906e7c978..c1b55e59655 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
+ *
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
+ int dsize;
xfs_mount_t *mp = dp->i_mount;
offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
return (offset >= minforkoff) ? minforkoff : 0;
}
- if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
- if (bytes <= XFS_IFORK_ASIZE(dp))
- return dp->i_d.di_forkoff;
+ /*
+ * If the requested numbers of bytes is smaller or equal to the
+ * current attribute fork size we can always proceed.
+ *
+ * Note that if_bytes in the data fork might actually be larger than
+ * the current data fork size is due to delalloc extents. In that
+ * case either the extent count will go down when they are converted
+ * to real extents, or the delalloc conversion will take care of the
+ * literal area rebalancing.
+ */
+ if (bytes <= XFS_IFORK_ASIZE(dp))
+ return dp->i_d.di_forkoff;
+
+ /*
+ * For attr2 we can try to move the forkoff if there is space in the
+ * literal area, but for the old format we are done if there is no
+ * space in the fixed attribute fork.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
- }
dsize = dp->i_df.if_bytes;
-
+
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
+ /*
* If there is no attr fork and the data fork is extents,
- * determine if creating the default attr fork will result
- * in the extents form migrating to btree. If so, the
- * minimum offset only needs to be the space required for
+ * determine if creating the default attr fork will result
+ * in the extents form migrating to btree. If so, the
+ * minimum offset only needs to be the space required for
* the btree root.
- */
+ */
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
-
case XFS_DINODE_FMT_BTREE:
/*
- * If have data btree then keep forkoff if we have one,
- * otherwise we are adding a new attr, so then we set
- * minforkoff to where the btree root can finish so we have
+ * If we have a data btree then keep forkoff if we have one,
+ * otherwise we are adding a new attr, so then we set
+ * minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
- if (offset < dp->i_d.di_forkoff)
+ if (offset < dp->i_d.di_forkoff)
return 0;
- else
- return dp->i_d.di_forkoff;
- } else
- dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+ return dp->i_d.di_forkoff;
+ }
+ dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
break;
}
-
- /*
- * A data fork btree root must have space for at least
+
+ /*
+ * A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
- if (offset >= minforkoff && offset < maxforkoff)
- return offset;
if (offset >= maxforkoff)
return maxforkoff;
+ if (offset >= minforkoff)
+ return offset;
return 0;
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c68baeb0974..d0ab7883705 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
int tryagain;
int error;
+ ASSERT(ap->length);
+
mp = ap->ip->i_mount;
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
int error;
int rt;
+ ASSERT(bma->length > 0);
+
rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
/*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL);
+ ASSERT(len > 0);
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
- bma.length = len;
bma.offset = bno;
+ /*
+ * There's a 32/64 bit type mismatch between the
+ * allocation length request (which can be 64 bits in
+ * length) and the bma length request, which is
+ * xfs_extlen_t and therefore 32 bits. Hence we have to
+ * check for 32-bit overflows and handle them here.
+ */
+ if (len > (xfs_filblks_t)MAXEXTLEN)
+ bma.length = MAXEXTLEN;
+ else
+ bma.length = len;
+
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma, flags);
if (error)
goto error0;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index cf0ac056815..4dff85c7d7e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1370,7 +1370,7 @@ restart:
goto restart;
}
/*
- * clear the LRU reference count so the bufer doesn't get
+ * clear the LRU reference count so the buffer doesn't get
* ignored in xfs_buf_rele().
*/
atomic_set(&bp->b_lru_ref, 0);
@@ -1701,12 +1701,8 @@ xfsbufd(
struct list_head tmp;
struct blk_plug plug;
- if (unlikely(freezing(current))) {
- set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
- refrigerator();
- } else {
- clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
- }
+ if (unlikely(freezing(current)))
+ try_to_freeze();
/* sleep for a long time if there is nothing to do. */
if (list_empty(&target->bt_delwri_queue))
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 5bab046e859..df7ffb0affe 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -90,8 +90,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_DELWRI_Q, "DELWRI_Q" }
typedef enum {
- XBT_FORCE_SLEEP = 0,
- XBT_FORCE_FLUSH = 1,
+ XBT_FORCE_FLUSH = 0,
} xfs_buftarg_flags_t;
typedef struct xfs_buftarg {
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8a24f0c6c86..286a051f12c 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -68,7 +68,7 @@ xfs_trim_extents(
* Look up the longest btree in the AGF and start with it.
*/
error = xfs_alloc_lookup_le(cur, 0,
- XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
+ be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
if (error)
goto out_del_cursor;
@@ -84,7 +84,7 @@ xfs_trim_extents(
if (error)
goto out_del_cursor;
XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
- ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
+ ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
/*
* Too small? Give up.
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 25d7280e9f6..b4ff40b5f91 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -39,20 +39,19 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
-
/*
- LOCK ORDER
-
- inode lock (ilock)
- dquot hash-chain lock (hashlock)
- xqm dquot freelist lock (freelistlock
- mount's dquot list lock (mplistlock)
- user dquot lock - lock ordering among dquots is based on the uid or gid
- group dquot lock - similar to udquots. Between the two dquots, the udquot
- has to be locked first.
- pin lock - the dquot lock must be held to take this lock.
- flush lock - ditto.
-*/
+ * Lock order:
+ *
+ * ip->i_lock
+ * qh->qh_lock
+ * qi->qi_dqlist_lock
+ * dquot->q_qlock (xfs_dqlock() and friends)
+ * dquot->q_flush (xfs_dqflock() and friends)
+ * xfs_Gqm->qm_dqfrlist_lock
+ *
+ * If two dquots need to be locked the order is user before group/project,
+ * otherwise by the lowest id first, see xfs_dqlock2.
+ */
#ifdef DEBUG
xfs_buftarg_t *xfs_dqerror_target;
@@ -155,24 +154,6 @@ xfs_qm_dqdestroy(
}
/*
- * This is what a 'fresh' dquot inside a dquot chunk looks like on disk.
- */
-STATIC void
-xfs_qm_dqinit_core(
- xfs_dqid_t id,
- uint type,
- xfs_dqblk_t *d)
-{
- /*
- * Caller has zero'd the entire dquot 'chunk' already.
- */
- d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
- d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
- d->dd_diskdq.d_id = cpu_to_be32(id);
- d->dd_diskdq.d_flags = type;
-}
-
-/*
* If default limits are in force, push them into the dquot now.
* We overwrite the dquot limits only if they are zero and this
* is not the root dquot.
@@ -328,8 +309,13 @@ xfs_qm_init_dquot_blk(
curid = id - (id % q->qi_dqperchunk);
ASSERT(curid >= 0);
memset(d, 0, BBTOB(q->qi_dqchunklen));
- for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
- xfs_qm_dqinit_core(curid, type, d);
+ for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
+ d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+ d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+ d->dd_diskdq.d_id = cpu_to_be32(curid);
+ d->dd_diskdq.d_flags = type;
+ }
+
xfs_trans_dquot_buf(tp, bp,
(type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
@@ -564,36 +550,62 @@ xfs_qm_dqtobp(
* Read in the ondisk dquot using dqtobp() then copy it to an incore version,
* and release the buffer immediately.
*
+ * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
*/
-/* ARGSUSED */
-STATIC int
+int
xfs_qm_dqread(
- xfs_trans_t **tpp,
- xfs_dqid_t id,
- xfs_dquot_t *dqp, /* dquot to get filled in */
- uint flags)
+ struct xfs_mount *mp,
+ xfs_dqid_t id,
+ uint type,
+ uint flags,
+ struct xfs_dquot **O_dqpp)
{
- xfs_disk_dquot_t *ddqp;
- xfs_buf_t *bp;
- int error;
- xfs_trans_t *tp;
+ struct xfs_dquot *dqp;
+ struct xfs_disk_dquot *ddqp;
+ struct xfs_buf *bp;
+ struct xfs_trans *tp = NULL;
+ int error;
+ int cancelflags = 0;
- ASSERT(tpp);
+ dqp = xfs_qm_dqinit(mp, id, type);
trace_xfs_dqread(dqp);
+ if (flags & XFS_QMOPT_DQALLOC) {
+ tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
+ error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
+ XFS_WRITE_LOG_RES(mp) +
+ /*
+ * Round the chunklen up to the next multiple
+ * of 128 (buf log item chunk size)).
+ */
+ BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
+ 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_WRITE_LOG_COUNT);
+ if (error)
+ goto error1;
+ cancelflags = XFS_TRANS_RELEASE_LOG_RES;
+ }
+
/*
* get a pointer to the on-disk dquot and the buffer containing it
* dqp already knows its own type (GROUP/USER).
*/
- if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
- return (error);
+ error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
+ if (error) {
+ /*
+ * This can happen if quotas got turned off (ESRCH),
+ * or if the dquot didn't exist on disk and we ask to
+ * allocate (ENOENT).
+ */
+ trace_xfs_dqread_fail(dqp);
+ cancelflags |= XFS_TRANS_ABORT;
+ goto error1;
}
- tp = *tpp;
/* copy everything from disk dquot to the incore dquot */
memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
- ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
xfs_qm_dquot_logitem_init(dqp);
/*
@@ -622,77 +634,22 @@ xfs_qm_dqread(
ASSERT(xfs_buf_islocked(bp));
xfs_trans_brelse(tp, bp);
- return (error);
-}
-
-
-/*
- * allocate an incore dquot from the kernel heap,
- * and fill its core with quota information kept on disk.
- * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk
- * if it wasn't already allocated.
- */
-STATIC int
-xfs_qm_idtodq(
- xfs_mount_t *mp,
- xfs_dqid_t id, /* gid or uid, depending on type */
- uint type, /* UDQUOT or GDQUOT */
- uint flags, /* DQALLOC, DQREPAIR */
- xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */
-{
- xfs_dquot_t *dqp;
- int error;
- xfs_trans_t *tp;
- int cancelflags=0;
-
- dqp = xfs_qm_dqinit(mp, id, type);
- tp = NULL;
- if (flags & XFS_QMOPT_DQALLOC) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
- error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
- XFS_WRITE_LOG_RES(mp) +
- BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 +
- 128,
- 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_WRITE_LOG_COUNT);
- if (error) {
- cancelflags = 0;
- goto error0;
- }
- cancelflags = XFS_TRANS_RELEASE_LOG_RES;
- }
-
- /*
- * Read it from disk; xfs_dqread() takes care of
- * all the necessary initialization of dquot's fields (locks, etc)
- */
- if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
- /*
- * This can happen if quotas got turned off (ESRCH),
- * or if the dquot didn't exist on disk and we ask to
- * allocate (ENOENT).
- */
- trace_xfs_dqread_fail(dqp);
- cancelflags |= XFS_TRANS_ABORT;
- goto error0;
- }
if (tp) {
- if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES)))
- goto error1;
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ goto error0;
}
*O_dqpp = dqp;
- return (0);
+ return error;
- error0:
- ASSERT(error);
+error1:
if (tp)
xfs_trans_cancel(tp, cancelflags);
- error1:
+error0:
xfs_qm_dqdestroy(dqp);
*O_dqpp = NULL;
- return (error);
+ return error;
}
/*
@@ -710,12 +667,9 @@ xfs_qm_dqlookup(
xfs_dquot_t **O_dqpp)
{
xfs_dquot_t *dqp;
- uint flist_locked;
ASSERT(mutex_is_locked(&qh->qh_lock));
- flist_locked = B_FALSE;
-
/*
* Traverse the hashchain looking for a match
*/
@@ -725,70 +679,31 @@ xfs_qm_dqlookup(
* dqlock to look at the id field of the dquot, since the
* id can't be modified without the hashlock anyway.
*/
- if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
- trace_xfs_dqlookup_found(dqp);
+ if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp)
+ continue;
- /*
- * All in core dquots must be on the dqlist of mp
- */
- ASSERT(!list_empty(&dqp->q_mplist));
-
- xfs_dqlock(dqp);
- if (dqp->q_nrefs == 0) {
- ASSERT(!list_empty(&dqp->q_freelist));
- if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
- trace_xfs_dqlookup_want(dqp);
-
- /*
- * We may have raced with dqreclaim_one()
- * (and lost). So, flag that we don't
- * want the dquot to be reclaimed.
- */
- dqp->dq_flags |= XFS_DQ_WANT;
- xfs_dqunlock(dqp);
- mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- xfs_dqlock(dqp);
- dqp->dq_flags &= ~(XFS_DQ_WANT);
- }
- flist_locked = B_TRUE;
- }
+ trace_xfs_dqlookup_found(dqp);
- /*
- * id couldn't have changed; we had the hashlock all
- * along
- */
- ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
-
- if (flist_locked) {
- if (dqp->q_nrefs != 0) {
- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
- flist_locked = B_FALSE;
- } else {
- /* take it off the freelist */
- trace_xfs_dqlookup_freelist(dqp);
- list_del_init(&dqp->q_freelist);
- xfs_Gqm->qm_dqfrlist_cnt--;
- }
- }
+ xfs_dqlock(dqp);
+ if (dqp->dq_flags & XFS_DQ_FREEING) {
+ *O_dqpp = NULL;
+ xfs_dqunlock(dqp);
+ return -1;
+ }
- XFS_DQHOLD(dqp);
+ dqp->q_nrefs++;
- if (flist_locked)
- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
- /*
- * move the dquot to the front of the hashchain
- */
- ASSERT(mutex_is_locked(&qh->qh_lock));
- list_move(&dqp->q_hashlist, &qh->qh_list);
- trace_xfs_dqlookup_done(dqp);
- *O_dqpp = dqp;
- return 0;
- }
+ /*
+ * move the dquot to the front of the hashchain
+ */
+ list_move(&dqp->q_hashlist, &qh->qh_list);
+ trace_xfs_dqlookup_done(dqp);
+ *O_dqpp = dqp;
+ return 0;
}
*O_dqpp = NULL;
- ASSERT(mutex_is_locked(&qh->qh_lock));
- return (1);
+ return 1;
}
/*
@@ -829,11 +744,7 @@ xfs_qm_dqget(
return (EIO);
}
}
-#endif
-
- again:
-#ifdef DEBUG
ASSERT(type == XFS_DQ_USER ||
type == XFS_DQ_PROJ ||
type == XFS_DQ_GROUP);
@@ -845,13 +756,21 @@ xfs_qm_dqget(
ASSERT(ip->i_gdquot == NULL);
}
#endif
+
+restart:
mutex_lock(&h->qh_lock);
/*
* Look in the cache (hashtable).
* The chain is kept locked during lookup.
*/
- if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
+ switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) {
+ case -1:
+ XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
+ mutex_unlock(&h->qh_lock);
+ delay(1);
+ goto restart;
+ case 0:
XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
/*
* The dquot was found, moved to the front of the chain,
@@ -862,9 +781,11 @@ xfs_qm_dqget(
ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
mutex_unlock(&h->qh_lock);
trace_xfs_dqget_hit(*O_dqpp);
- return (0); /* success */
+ return 0; /* success */
+ default:
+ XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
+ break;
}
- XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
/*
* Dquot cache miss. We don't want to keep the inode lock across
@@ -882,41 +803,18 @@ xfs_qm_dqget(
version = h->qh_version;
mutex_unlock(&h->qh_lock);
- /*
- * Allocate the dquot on the kernel heap, and read the ondisk
- * portion off the disk. Also, do all the necessary initialization
- * This can return ENOENT if dquot didn't exist on disk and we didn't
- * ask it to allocate; ESRCH if quotas got turned off suddenly.
- */
- if ((error = xfs_qm_idtodq(mp, id, type,
- flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
- XFS_QMOPT_DOWARN),
- &dqp))) {
- if (ip)
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- return (error);
- }
+ error = xfs_qm_dqread(mp, id, type, flags, &dqp);
- /*
- * See if this is mount code calling to look at the overall quota limits
- * which are stored in the id == 0 user or group's dquot.
- * Since we may not have done a quotacheck by this point, just return
- * the dquot without attaching it to any hashtables, lists, etc, or even
- * taking a reference.
- * The caller must dqdestroy this once done.
- */
- if (flags & XFS_QMOPT_DQSUSER) {
- ASSERT(id == 0);
- ASSERT(! ip);
- goto dqret;
- }
+ if (ip)
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ if (error)
+ return error;
/*
* Dquot lock comes after hashlock in the lock ordering
*/
if (ip) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
/*
* A dquot could be attached to this inode by now, since
* we had dropped the ilock.
@@ -961,16 +859,21 @@ xfs_qm_dqget(
* lock order between the two dquots here since dqp isn't
* on any findable lists yet.
*/
- if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
+ switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) {
+ case 0:
+ case -1:
/*
- * Duplicate found. Just throw away the new dquot
- * and start over.
+ * Duplicate found, either in cache or on its way out.
+ * Just throw away the new dquot and start over.
*/
- xfs_qm_dqput(tmpdqp);
+ if (tmpdqp)
+ xfs_qm_dqput(tmpdqp);
mutex_unlock(&h->qh_lock);
xfs_qm_dqdestroy(dqp);
XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
- goto again;
+ goto restart;
+ default:
+ break;
}
}
@@ -1015,67 +918,49 @@ xfs_qm_dqget(
*/
void
xfs_qm_dqput(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
- xfs_dquot_t *gdqp;
+ struct xfs_dquot *gdqp;
ASSERT(dqp->q_nrefs > 0);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
trace_xfs_dqput(dqp);
- if (dqp->q_nrefs != 1) {
- dqp->q_nrefs--;
+recurse:
+ if (--dqp->q_nrefs > 0) {
xfs_dqunlock(dqp);
return;
}
+ trace_xfs_dqput_free(dqp);
+
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
+ if (list_empty(&dqp->q_freelist)) {
+ list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
+ xfs_Gqm->qm_dqfrlist_cnt++;
+ }
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+
/*
- * drop the dqlock and acquire the freelist and dqlock
- * in the right order; but try to get it out-of-order first
+ * If we just added a udquot to the freelist, then we want to release
+ * the gdquot reference that it (probably) has. Otherwise it'll keep
+ * the gdquot from getting reclaimed.
*/
- if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
- trace_xfs_dqput_wait(dqp);
- xfs_dqunlock(dqp);
- mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
- xfs_dqlock(dqp);
+ gdqp = dqp->q_gdquot;
+ if (gdqp) {
+ xfs_dqlock(gdqp);
+ dqp->q_gdquot = NULL;
}
+ xfs_dqunlock(dqp);
- while (1) {
- gdqp = NULL;
-
- /* We can't depend on nrefs being == 1 here */
- if (--dqp->q_nrefs == 0) {
- trace_xfs_dqput_free(dqp);
-
- list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
- xfs_Gqm->qm_dqfrlist_cnt++;
-
- /*
- * If we just added a udquot to the freelist, then
- * we want to release the gdquot reference that
- * it (probably) has. Otherwise it'll keep the
- * gdquot from getting reclaimed.
- */
- if ((gdqp = dqp->q_gdquot)) {
- /*
- * Avoid a recursive dqput call
- */
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
- }
- xfs_dqunlock(dqp);
-
- /*
- * If we had a group quota inside the user quota as a hint,
- * release it now.
- */
- if (! gdqp)
- break;
+ /*
+ * If we had a group quota hint, release it now.
+ */
+ if (gdqp) {
dqp = gdqp;
+ goto recurse;
}
- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
}
/*
@@ -1169,7 +1054,7 @@ xfs_qm_dqflush(
* If not dirty, or it's pinned and we are not supposed to block, nada.
*/
if (!XFS_DQ_IS_DIRTY(dqp) ||
- (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
+ ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp);
return 0;
}
@@ -1257,40 +1142,17 @@ xfs_qm_dqflush(
}
-int
-xfs_qm_dqlock_nowait(
- xfs_dquot_t *dqp)
-{
- return mutex_trylock(&dqp->q_qlock);
-}
-
-void
-xfs_dqlock(
- xfs_dquot_t *dqp)
-{
- mutex_lock(&dqp->q_qlock);
-}
-
void
xfs_dqunlock(
xfs_dquot_t *dqp)
{
- mutex_unlock(&(dqp->q_qlock));
+ xfs_dqunlock_nonotify(dqp);
if (dqp->q_logitem.qli_dquot == dqp) {
- /* Once was dqp->q_mount, but might just have been cleared */
xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
- (xfs_log_item_t*)&(dqp->q_logitem));
+ &dqp->q_logitem.qli_item);
}
}
-
-void
-xfs_dqunlock_nonotify(
- xfs_dquot_t *dqp)
-{
- mutex_unlock(&(dqp->q_qlock));
-}
-
/*
* Lock two xfs_dquot structures.
*
@@ -1319,43 +1181,18 @@ xfs_dqlock2(
}
}
-
/*
- * Take a dquot out of the mount's dqlist as well as the hashlist.
- * This is called via unmount as well as quotaoff, and the purge
- * will always succeed unless there are soft (temp) references
- * outstanding.
- *
- * This returns 0 if it was purged, 1 if it wasn't. It's not an error code
- * that we're returning! XXXsup - not cool.
+ * Take a dquot out of the mount's dqlist as well as the hashlist. This is
+ * called via unmount as well as quotaoff, and the purge will always succeed.
*/
-/* ARGSUSED */
-int
+void
xfs_qm_dqpurge(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
- xfs_dqhash_t *qh = dqp->q_hash;
- xfs_mount_t *mp = dqp->q_mount;
-
- ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock));
- ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
+ struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_dqhash *qh = dqp->q_hash;
xfs_dqlock(dqp);
- /*
- * We really can't afford to purge a dquot that is
- * referenced, because these are hard refs.
- * It shouldn't happen in general because we went thru _all_ inodes in
- * dqrele_all_inodes before calling this and didn't let the mountlock go.
- * However it is possible that we have dquots with temporary
- * references that are not attached to an inode. e.g. see xfs_setattr().
- */
- if (dqp->q_nrefs != 0) {
- xfs_dqunlock(dqp);
- mutex_unlock(&dqp->q_hash->qh_lock);
- return (1);
- }
-
- ASSERT(!list_empty(&dqp->q_freelist));
/*
* If we're turning off quotas, we have to make sure that, for
@@ -1370,23 +1207,18 @@ xfs_qm_dqpurge(
* Block on the flush lock after nudging dquot buffer,
* if it is incore.
*/
- xfs_qm_dqflock_pushbuf_wait(dqp);
+ xfs_dqflock_pushbuf_wait(dqp);
}
/*
- * XXXIf we're turning this type of quotas off, we don't care
+ * If we are turning this type of quotas off, we don't care
* about the dirty metadata sitting in this dquot. OTOH, if
* we're unmounting, we do care, so we flush it and wait.
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- /* dqflush unlocks dqflock */
/*
- * Given that dqpurge is a very rare occurrence, it is OK
- * that we're holding the hashlist and mplist locks
- * across the disk write. But, ... XXXsup
- *
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
@@ -1396,38 +1228,44 @@ xfs_qm_dqpurge(
__func__, dqp);
xfs_dqflock(dqp);
}
+
ASSERT(atomic_read(&dqp->q_pincount) == 0);
ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
!(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
+ xfs_dqfunlock(dqp);
+ xfs_dqunlock(dqp);
+
+ mutex_lock(&qh->qh_lock);
list_del_init(&dqp->q_hashlist);
qh->qh_version++;
+ mutex_unlock(&qh->qh_lock);
+
+ mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
list_del_init(&dqp->q_mplist);
mp->m_quotainfo->qi_dqreclaims++;
mp->m_quotainfo->qi_dquots--;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+
/*
- * XXX Move this to the front of the freelist, if we can get the
- * freelist lock.
+ * We move dquots to the freelist as soon as their reference count
+ * hits zero, so it really should be on the freelist here.
*/
+ mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
ASSERT(!list_empty(&dqp->q_freelist));
+ list_del_init(&dqp->q_freelist);
+ xfs_Gqm->qm_dqfrlist_cnt--;
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
- dqp->q_mount = NULL;
- dqp->q_hash = NULL;
- dqp->dq_flags = XFS_DQ_INACTIVE;
- memset(&dqp->q_core, 0, sizeof(dqp->q_core));
- xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
- mutex_unlock(&qh->qh_lock);
- return (0);
+ xfs_qm_dqdestroy(dqp);
}
-
/*
* Give the buffer a little push if it is incore and
* wait on the flush lock.
*/
void
-xfs_qm_dqflock_pushbuf_wait(
+xfs_dqflock_pushbuf_wait(
xfs_dquot_t *dqp)
{
xfs_mount_t *mp = dqp->q_mount;
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 34b7e945dbf..a1d91d8f180 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -80,8 +80,6 @@ enum {
XFS_QLOCK_NESTED,
};
-#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
-
/*
* Manage the q_flush completion queue embedded in the dquot. This completion
* queue synchronizes processes attempting to flush the in-core dquot back to
@@ -102,6 +100,21 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
complete(&dqp->q_flush);
}
+static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp)
+{
+ return mutex_trylock(&dqp->q_qlock);
+}
+
+static inline void xfs_dqlock(struct xfs_dquot *dqp)
+{
+ mutex_lock(&dqp->q_qlock);
+}
+
+static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp)
+{
+ mutex_unlock(&dqp->q_qlock);
+}
+
#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
@@ -116,12 +129,12 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_OQUOTA_ON((d)->q_mount))))
+extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint,
+ uint, struct xfs_dquot **);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
-extern int xfs_qm_dqpurge(xfs_dquot_t *);
+extern void xfs_qm_dqpurge(xfs_dquot_t *);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
-extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
@@ -129,9 +142,17 @@ extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
xfs_dqid_t, uint, uint, xfs_dquot_t **);
extern void xfs_qm_dqput(xfs_dquot_t *);
-extern void xfs_dqlock(xfs_dquot_t *);
-extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
-extern void xfs_dqunlock(xfs_dquot_t *);
-extern void xfs_dqunlock_nonotify(xfs_dquot_t *);
+
+extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
+extern void xfs_dqunlock(struct xfs_dquot *);
+extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp);
+
+static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
+{
+ xfs_dqlock(dqp);
+ dqp->q_nrefs++;
+ xfs_dqunlock(dqp);
+ return dqp;
+}
#endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 0dee0b71029..34baeae4526 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -73,7 +73,6 @@ xfs_qm_dquot_logitem_format(
logvec->i_len = sizeof(xfs_disk_dquot_t);
logvec->i_type = XLOG_REG_TYPE_DQUOT;
- ASSERT(2 == lip->li_desc->lid_size);
qlip->qli_format.qlf_size = 2;
}
@@ -134,7 +133,7 @@ xfs_qm_dquot_logitem_push(
* lock without sleeping, then there must not have been
* anyone in the process of flushing the dquot.
*/
- error = xfs_qm_dqflush(dqp, 0);
+ error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
if (error)
xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
__func__, error, dqp);
@@ -237,7 +236,7 @@ xfs_qm_dquot_logitem_trylock(
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
- if (!xfs_qm_dqlock_nowait(dqp))
+ if (!xfs_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED;
if (!xfs_dqflock_nowait(dqp)) {
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index da108977b21..558910f5e3c 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
switch (fileid_type) {
case FILEID_INO32_GEN_PARENT:
spin_lock(&dentry->d_lock);
- fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN:
- fid->i32.ino = inode->i_ino;
+ fid->i32.ino = XFS_I(inode)->i_ino;
fid->i32.gen = inode->i_generation;
break;
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
spin_lock(&dentry->d_lock);
- fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
- fid64->ino = inode->i_ino;
+ fid64->ino = XFS_I(inode)->i_ino;
fid64->gen = inode->i_generation;
break;
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 753ed9b5c70..f675f3d9d7b 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -209,10 +209,10 @@ xfs_file_fsync(
/*
* First check if the VFS inode is marked dirty. All the dirtying
- * of non-transactional updates no goes through mark_inode_dirty*,
- * which allows us to distinguish beteeen pure timestamp updates
+ * of non-transactional updates do not go through mark_inode_dirty*,
+ * which allows us to distinguish between pure timestamp updates
* and i_size updates which need to be caught for fdatasync.
- * After that also theck for the dirty state in the XFS inode, which
+ * After that also check for the dirty state in the XFS inode, which
* might gets cleared when the inode gets written out via the AIL
* or xfs_iflush_cluster.
*/
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 169380e6605..dad1a31aa4f 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -447,7 +447,7 @@ STATIC xfs_buf_t * /* allocation group buffer */
xfs_ialloc_ag_select(
xfs_trans_t *tp, /* transaction pointer */
xfs_ino_t parent, /* parent directory inode number */
- mode_t mode, /* bits set to indicate file type */
+ umode_t mode, /* bits set to indicate file type */
int okalloc) /* ok to allocate more space */
{
xfs_buf_t *agbp; /* allocation group header buffer */
@@ -640,7 +640,7 @@ int
xfs_dialloc(
xfs_trans_t *tp, /* transaction pointer */
xfs_ino_t parent, /* parent inode (directory) */
- mode_t mode, /* mode bits for new inode */
+ umode_t mode, /* mode bits for new inode */
int okalloc, /* ok to allocate more space */
xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
boolean_t *alloc_done, /* true if we needed to replenish
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index bb5385475e1..666a037398d 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -81,7 +81,7 @@ int /* error */
xfs_dialloc(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t parent, /* parent inode (directory) */
- mode_t mode, /* mode bits for new inode */
+ umode_t mode, /* mode bits for new inode */
int okalloc, /* ok to allocate more space */
struct xfs_buf **agbp, /* buf for a.g. inode header */
boolean_t *alloc_done, /* an allocation was done to replenish
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0fa98b1c70e..3960a066d7f 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -107,7 +107,6 @@ xfs_inode_free_callback(
struct inode *inode = container_of(head, struct inode, i_rcu);
struct xfs_inode *ip = XFS_I(inode);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_zone_free(xfs_inode_zone, ip);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c0237c602f1..9dda7cc3284 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -961,7 +961,7 @@ int
xfs_ialloc(
xfs_trans_t *tp,
xfs_inode_t *pip,
- mode_t mode,
+ umode_t mode,
xfs_nlink_t nlink,
xfs_dev_t rdev,
prid_t prid,
@@ -1002,7 +1002,7 @@ xfs_ialloc(
return error;
ASSERT(ip != NULL);
- ip->i_d.di_mode = (__uint16_t)mode;
+ ip->i_d.di_mode = mode;
ip->i_d.di_onlink = 0;
ip->i_d.di_nlink = nlink;
ASSERT(ip->i_d.di_nlink == nlink);
@@ -2835,6 +2835,27 @@ corrupt_out:
return XFS_ERROR(EFSCORRUPTED);
}
+void
+xfs_promote_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_buf *bp;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+ bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+ ip->i_imap.im_len, XBF_TRYLOCK);
+ if (!bp)
+ return;
+
+ if (XFS_BUF_ISDELAYWRITE(bp)) {
+ xfs_buf_delwri_promote(bp);
+ wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+ }
+
+ xfs_buf_relse(bp);
+}
+
/*
* Return a pointer to the extent record at file index idx.
*/
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 760140d1dd6..f0e6b151ba3 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -481,7 +481,7 @@ void xfs_inode_free(struct xfs_inode *ip);
/*
* xfs_inode.c prototypes.
*/
-int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t,
+int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int,
struct xfs_buf **, boolean_t *, xfs_inode_t **);
@@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
+void xfs_promote_inode(struct xfs_inode *);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index abaafdbb3e6..cfd6c7f8cc3 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -437,7 +437,6 @@ xfs_inode_item_format(
* Assert that no attribute-related log flags are set.
*/
if (!XFS_IFORK_Q(ip)) {
- ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
@@ -521,7 +520,6 @@ xfs_inode_item_format(
break;
}
- ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d99a9051890..76f3ca5cfc3 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -559,23 +559,23 @@ xfs_attrmulti_by_handle(
ops[i].am_flags);
break;
case ATTR_OP_SET:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
dentry->d_inode, attr_name,
ops[i].am_attrvalue, ops[i].am_length,
ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
+ mnt_drop_write_file(parfilp);
break;
case ATTR_OP_REMOVE:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
dentry->d_inode, attr_name,
ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
+ mnt_drop_write_file(parfilp);
break;
default:
ops[i].am_error = EINVAL;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 54e623bfbb8..f9ccb7b7c04 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -454,23 +454,23 @@ xfs_compat_attrmulti_by_handle(
&ops[i].am_length, ops[i].am_flags);
break;
case ATTR_OP_SET:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_set(
dentry->d_inode, attr_name,
compat_ptr(ops[i].am_attrvalue),
ops[i].am_length, ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
+ mnt_drop_write_file(parfilp);
break;
case ATTR_OP_REMOVE:
- ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ ops[i].am_error = mnt_want_write_file(parfilp);
if (ops[i].am_error)
break;
ops[i].am_error = xfs_attrmulti_attr_remove(
dentry->d_inode, attr_name,
ops[i].am_flags);
- mnt_drop_write(parfilp->f_path.mnt);
+ mnt_drop_write_file(parfilp);
break;
default:
ops[i].am_error = EINVAL;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 23ce927973a..f9babd17922 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -168,7 +168,7 @@ STATIC int
xfs_vn_mknod(
struct inode *dir,
struct dentry *dentry,
- int mode,
+ umode_t mode,
dev_t rdev)
{
struct inode *inode;
@@ -231,7 +231,7 @@ STATIC int
xfs_vn_create(
struct inode *dir,
struct dentry *dentry,
- int mode,
+ umode_t mode,
struct nameidata *nd)
{
return xfs_vn_mknod(dir, dentry, mode, 0);
@@ -241,7 +241,7 @@ STATIC int
xfs_vn_mkdir(
struct inode *dir,
struct dentry *dentry,
- int mode)
+ umode_t mode)
{
return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0);
}
@@ -366,7 +366,7 @@ xfs_vn_symlink(
struct xfs_inode *cip = NULL;
struct xfs_name name;
int error;
- mode_t mode;
+ umode_t mode;
mode = S_IFLNK |
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a14cd89fe46..e2cc3568c29 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -150,6 +150,117 @@ xlog_grant_add_space(
} while (head_val != old);
}
+STATIC bool
+xlog_reserveq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+ need_bytes = tic->t_unit_res * tic->t_cnt;
+ else
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_grant_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_regrant_write_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_grant_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+ trace_xfs_log_grant_wake(log, tic);
+
+ spin_lock(&log->l_grant_reserve_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_writeq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+ trace_xfs_log_regrant_write_wake(log, tic);
+
+ spin_lock(&log->l_grant_write_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
static void
xlog_tic_reset_res(xlog_ticket_t *tic)
{
@@ -350,8 +461,19 @@ xfs_log_reserve(
retval = xlog_grant_log_space(log, internal_ticket);
}
+ if (unlikely(retval)) {
+ /*
+ * If we are failing, make sure the ticket doesn't have any
+ * current reservations. We don't want to add this back
+ * when the ticket/ transaction gets cancelled.
+ */
+ internal_ticket->t_curr_res = 0;
+ /* ungrant will give back unit_res * t_cnt. */
+ internal_ticket->t_cnt = 0;
+ }
+
return retval;
-} /* xfs_log_reserve */
+}
/*
@@ -638,38 +760,6 @@ xfs_log_item_init(
INIT_LIST_HEAD(&item->li_cil);
}
-/*
- * Write region vectors to log. The write happens using the space reservation
- * of the ticket (tic). It is not a requirement that all writes for a given
- * transaction occur with one call to xfs_log_write(). However, it is important
- * to note that the transaction reservation code makes an assumption about the
- * number of log headers a transaction requires that may be violated if you
- * don't pass all the transaction vectors in one call....
- */
-int
-xfs_log_write(
- struct xfs_mount *mp,
- struct xfs_log_iovec reg[],
- int nentries,
- struct xlog_ticket *tic,
- xfs_lsn_t *start_lsn)
-{
- struct log *log = mp->m_log;
- int error;
- struct xfs_log_vec vec = {
- .lv_niovecs = nentries,
- .lv_iovecp = reg,
- };
-
- if (XLOG_FORCED_SHUTDOWN(log))
- return XFS_ERROR(EIO);
-
- error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
- if (error)
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return error;
-}
-
void
xfs_log_move_tail(xfs_mount_t *mp,
xfs_lsn_t tail_lsn)
@@ -1563,7 +1653,7 @@ xlog_print_tic_res(
};
xfs_warn(mp,
- "xfs_log_write: reservation summary:\n"
+ "xlog_write: reservation summary:\n"
" trans type = %s (%u)\n"
" unit res = %d bytes\n"
" current res = %d bytes\n"
@@ -1592,7 +1682,7 @@ xlog_print_tic_res(
}
xfs_alert_tag(mp, XFS_PTAG_LOGRES,
- "xfs_log_write: reservation ran out. Need to up reservation");
+ "xlog_write: reservation ran out. Need to up reservation");
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
@@ -1846,23 +1936,21 @@ xlog_write(
*start_lsn = 0;
len = xlog_write_calc_vec_length(ticket, log_vector);
- if (log->l_cilp) {
- /*
- * Region headers and bytes are already accounted for.
- * We only need to take into account start records and
- * split regions in this function.
- */
- if (ticket->t_flags & XLOG_TIC_INITED)
- ticket->t_curr_res -= sizeof(xlog_op_header_t);
- /*
- * Commit record headers need to be accounted for. These
- * come in as separate writes so are easy to detect.
- */
- if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
- ticket->t_curr_res -= sizeof(xlog_op_header_t);
- } else
- ticket->t_curr_res -= len;
+ /*
+ * Region headers and bytes are already accounted for.
+ * We only need to take into account start records and
+ * split regions in this function.
+ */
+ if (ticket->t_flags & XLOG_TIC_INITED)
+ ticket->t_curr_res -= sizeof(xlog_op_header_t);
+
+ /*
+ * Commit record headers need to be accounted for. These
+ * come in as separate writes so are easy to detect.
+ */
+ if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
+ ticket->t_curr_res -= sizeof(xlog_op_header_t);
if (ticket->t_curr_res < 0)
xlog_print_tic_res(log->l_mp, ticket);
@@ -2481,8 +2569,8 @@ restart:
/*
* Atomically get the log space required for a log ticket.
*
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
*
* This function is structured so that it has a lock free fast path. This is
* necessary because every new transaction reservation will come through this
@@ -2490,113 +2578,53 @@ restart:
* every pass.
*
* As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
*/
STATIC int
-xlog_grant_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_grant_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes;
- int need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("grant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_grant_enter(log, tic);
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
+ */
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
-
- /* something is already sleeping; insert new transaction at end */
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
- /* recheck the queue now we are locked */
- if (list_empty(&log->l_reserveq)) {
- spin_unlock(&log->l_grant_reserve_lock);
- goto redo;
- }
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep1(log, tic);
-
- /*
- * Gotta check this before going to sleep, while we're
- * holding the grant lock.
- */
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- /*
- * If we got an error, and the filesystem is shutting down,
- * we'll catch it down below. So just continue...
- */
- trace_xfs_log_grant_wake1(log, tic);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_reserveq)) {
spin_lock(&log->l_grant_reserve_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep2(log, tic);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- trace_xfs_log_grant_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_reserveq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_reserveq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_reserve_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_reserve_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_reserveq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_reserve_lock);
}
+ if (error)
+ return error;
- /* we've got enough space */
xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-error_return_unlocked:
- spin_lock(&log->l_grant_reserve_lock);
-error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_reserve_lock);
- trace_xfs_log_grant_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_grant_log_space */
-
+}
/*
* Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2633,12 @@ error_return:
* free fast path.
*/
STATIC int
-xlog_regrant_write_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes, need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
tic->t_curr_res = tic->t_unit_res;
xlog_tic_reset_res(tic);
@@ -2616,104 +2646,38 @@ xlog_regrant_write_log_space(xlog_t *log,
if (tic->t_cnt > 0)
return 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("regrant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_regrant_write_enter(log, tic);
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
- /* If there are other waiters on the queue then give them a
- * chance at logspace before us. Wake up the first waiters,
- * if we do not wake up all the waiters then go to sleep waiting
- * for more free space, otherwise try to get some space for
- * this transaction.
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- if (!list_empty_careful(&log->l_writeq)) {
- struct xlog_ticket *ntic;
-
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- list_for_each_entry(ntic, &log->l_writeq, t_queue) {
- ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
- if (free_bytes < ntic->t_unit_res)
- break;
- free_bytes -= ntic->t_unit_res;
- wake_up(&ntic->t_wait);
- }
-
- if (ntic != list_first_entry(&log->l_writeq,
- struct xlog_ticket, t_queue)) {
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
- trace_xfs_log_regrant_write_sleep1(log, tic);
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
- trace_xfs_log_regrant_write_wake1(log, tic);
- } else
- spin_unlock(&log->l_grant_write_lock);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_writeq)) {
spin_lock(&log->l_grant_write_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- trace_xfs_log_regrant_write_sleep2(log, tic);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
- trace_xfs_log_regrant_write_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_writeq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_writeq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_write_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_write_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_writeq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_write_lock);
}
- /* we've got enough space */
+ if (error)
+ return error;
+
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-
- error_return_unlocked:
- spin_lock(&log->l_grant_write_lock);
- error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_write_lock);
- trace_xfs_log_regrant_write_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_regrant_write_log_space */
-
+}
/* The first cnt-1 times through here we don't need to
* move the grant write head because the permanent
@@ -2933,8 +2897,7 @@ _xfs_log_force(
XFS_STATS_INC(xs_log_force);
- if (log->l_cilp)
- xlog_cil_force(log);
+ xlog_cil_force(log);
spin_lock(&log->l_icloglock);
@@ -3083,11 +3046,9 @@ _xfs_log_force_lsn(
XFS_STATS_INC(xs_log_force);
- if (log->l_cilp) {
- lsn = xlog_cil_force_lsn(log, lsn);
- if (lsn == NULLCOMMITLSN)
- return 0;
- }
+ lsn = xlog_cil_force_lsn(log, lsn);
+ if (lsn == NULLCOMMITLSN)
+ return 0;
try_again:
spin_lock(&log->l_icloglock);
@@ -3655,7 +3616,7 @@ xfs_log_force_umount(
* completed transactions are flushed to disk with the xfs_log_force()
* call below.
*/
- if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
+ if (!logerror)
xlog_cil_force(log);
/*
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 3f7bf451c03..2aee3b22d29 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -174,11 +174,6 @@ int xfs_log_reserve(struct xfs_mount *mp,
__uint8_t clientid,
uint flags,
uint t_type);
-int xfs_log_write(struct xfs_mount *mp,
- xfs_log_iovec_t region[],
- int nentries,
- struct xlog_ticket *ticket,
- xfs_lsn_t *start_lsn);
int xfs_log_unmount_write(struct xfs_mount *mp);
void xfs_log_unmount(struct xfs_mount *mp);
int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
@@ -189,8 +184,7 @@ void xlog_iodone(struct xfs_buf *);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
-void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
- struct xfs_log_vec *log_vector,
+int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_lsn_t *commit_lsn, int flags);
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index c7755d5a5fb..d4fadbe8ac9 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -32,10 +32,7 @@
#include "xfs_discard.h"
/*
- * Perform initial CIL structure initialisation. If the CIL is not
- * enabled in this filesystem, ensure the log->l_cilp is null so
- * we can check this conditional to determine if we are doing delayed
- * logging or not.
+ * Perform initial CIL structure initialisation.
*/
int
xlog_cil_init(
@@ -44,10 +41,6 @@ xlog_cil_init(
struct xfs_cil *cil;
struct xfs_cil_ctx *ctx;
- log->l_cilp = NULL;
- if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG))
- return 0;
-
cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
if (!cil)
return ENOMEM;
@@ -80,9 +73,6 @@ void
xlog_cil_destroy(
struct log *log)
{
- if (!log->l_cilp)
- return;
-
if (log->l_cilp->xc_ctx) {
if (log->l_cilp->xc_ctx->ticket)
xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
@@ -137,9 +127,6 @@ void
xlog_cil_init_post_recovery(
struct log *log)
{
- if (!log->l_cilp)
- return;
-
log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
log->l_cilp->xc_ctx->sequence = 1;
log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle,
@@ -172,37 +159,73 @@ xlog_cil_init_post_recovery(
* format the regions into the iclog as though they are being formatted
* directly out of the objects themselves.
*/
-static void
-xlog_cil_format_items(
- struct log *log,
- struct xfs_log_vec *log_vector)
+static struct xfs_log_vec *
+xlog_cil_prepare_log_vecs(
+ struct xfs_trans *tp)
{
- struct xfs_log_vec *lv;
+ struct xfs_log_item_desc *lidp;
+ struct xfs_log_vec *lv = NULL;
+ struct xfs_log_vec *ret_lv = NULL;
- ASSERT(log_vector);
- for (lv = log_vector; lv; lv = lv->lv_next) {
+
+ /* Bail out if we didn't find a log item. */
+ if (list_empty(&tp->t_items)) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
+ struct xfs_log_vec *new_lv;
void *ptr;
int index;
int len = 0;
+ uint niovecs;
+
+ /* Skip items which aren't dirty in this transaction. */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
+ continue;
+
+ /* Skip items that do not have any vectors for writing */
+ niovecs = IOP_SIZE(lidp->lid_item);
+ if (!niovecs)
+ continue;
+
+ new_lv = kmem_zalloc(sizeof(*new_lv) +
+ niovecs * sizeof(struct xfs_log_iovec),
+ KM_SLEEP);
+
+ /* The allocated iovec region lies beyond the log vector. */
+ new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
+ new_lv->lv_niovecs = niovecs;
+ new_lv->lv_item = lidp->lid_item;
/* build the vector array and calculate it's length */
- IOP_FORMAT(lv->lv_item, lv->lv_iovecp);
- for (index = 0; index < lv->lv_niovecs; index++)
- len += lv->lv_iovecp[index].i_len;
+ IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp);
+ for (index = 0; index < new_lv->lv_niovecs; index++)
+ len += new_lv->lv_iovecp[index].i_len;
- lv->lv_buf_len = len;
- lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
- ptr = lv->lv_buf;
+ new_lv->lv_buf_len = len;
+ new_lv->lv_buf = kmem_alloc(new_lv->lv_buf_len,
+ KM_SLEEP|KM_NOFS);
+ ptr = new_lv->lv_buf;
- for (index = 0; index < lv->lv_niovecs; index++) {
- struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
+ for (index = 0; index < new_lv->lv_niovecs; index++) {
+ struct xfs_log_iovec *vec = &new_lv->lv_iovecp[index];
memcpy(ptr, vec->i_addr, vec->i_len);
vec->i_addr = ptr;
ptr += vec->i_len;
}
- ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
+ ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len);
+
+ if (!ret_lv)
+ ret_lv = new_lv;
+ else
+ lv->lv_next = new_lv;
+ lv = new_lv;
}
+
+ return ret_lv;
}
/*
@@ -256,7 +279,7 @@ xfs_cil_prepare_item(
* Insert the log items into the CIL and calculate the difference in space
* consumed by the item. Add the space to the checkpoint ticket and calculate
* if the change requires additional log metadata. If it does, take that space
- * as well. Remove the amount of space we addded to the checkpoint ticket from
+ * as well. Remove the amount of space we added to the checkpoint ticket from
* the current transaction ticket so that the accounting works out correctly.
*/
static void
@@ -635,28 +658,30 @@ out_abort:
* background commit, returns without it held once background commits are
* allowed again.
*/
-void
+int
xfs_log_commit_cil(
struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_log_vec *log_vector,
xfs_lsn_t *commit_lsn,
int flags)
{
struct log *log = mp->m_log;
int log_flags = 0;
int push = 0;
+ struct xfs_log_vec *log_vector;
if (flags & XFS_TRANS_RELEASE_LOG_RES)
log_flags = XFS_LOG_REL_PERM_RESERV;
/*
- * do all the hard work of formatting items (including memory
+ * Do all the hard work of formatting items (including memory
* allocation) outside the CIL context lock. This prevents stalling CIL
* pushes when we are low on memory and a transaction commit spends a
* lot of time in memory reclaim.
*/
- xlog_cil_format_items(log, log_vector);
+ log_vector = xlog_cil_prepare_log_vecs(tp);
+ if (!log_vector)
+ return ENOMEM;
/* lock out background commit */
down_read(&log->l_cilp->xc_ctx_lock);
@@ -709,6 +734,7 @@ xfs_log_commit_cil(
*/
if (push)
xlog_cil_push(log, 0);
+ return 0;
}
/*
@@ -786,8 +812,6 @@ xfs_log_item_in_current_chkpt(
{
struct xfs_cil_ctx *ctx;
- if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG))
- return false;
if (list_empty(&lip->li_cil))
return false;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bb24dac42a2..19f69e23250 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -219,7 +219,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
must be synchronous except
for space allocations */
-#define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 0bbb1a41998..671f37eae1c 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -154,12 +154,17 @@ STATIC void
xfs_qm_destroy(
struct xfs_qm *xqm)
{
- struct xfs_dquot *dqp, *n;
int hsize, i;
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
+
unregister_shrinker(&xfs_qm_shaker);
+
+ mutex_lock(&xqm->qm_dqfrlist_lock);
+ ASSERT(list_empty(&xqm->qm_dqfrlist));
+ mutex_unlock(&xqm->qm_dqfrlist_lock);
+
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
@@ -171,17 +176,6 @@ xfs_qm_destroy(
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
- /* frlist cleanup */
- mutex_lock(&xqm->qm_dqfrlist_lock);
- list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
- xfs_dqlock(dqp);
- list_del_init(&dqp->q_freelist);
- xfs_Gqm->qm_dqfrlist_cnt--;
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- }
- mutex_unlock(&xqm->qm_dqfrlist_lock);
- mutex_destroy(&xqm->qm_dqfrlist_lock);
kmem_free(xqm);
}
@@ -232,34 +226,10 @@ STATIC void
xfs_qm_rele_quotafs_ref(
struct xfs_mount *mp)
{
- xfs_dquot_t *dqp, *n;
-
ASSERT(xfs_Gqm);
ASSERT(xfs_Gqm->qm_nrefs > 0);
/*
- * Go thru the freelist and destroy all inactive dquots.
- */
- mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-
- list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
- xfs_dqlock(dqp);
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(dqp->q_mount == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(list_empty(&dqp->q_hashlist));
- ASSERT(list_empty(&dqp->q_mplist));
- list_del_init(&dqp->q_freelist);
- xfs_Gqm->qm_dqfrlist_cnt--;
- xfs_dqunlock(dqp);
- xfs_qm_dqdestroy(dqp);
- } else {
- xfs_dqunlock(dqp);
- }
- }
- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-
- /*
* Destroy the entire XQM. If somebody mounts with quotaon, this'll
* be restarted.
*/
@@ -415,8 +385,7 @@ xfs_qm_unmount_quotas(
*/
STATIC int
xfs_qm_dqflush_all(
- struct xfs_mount *mp,
- int sync_mode)
+ struct xfs_mount *mp)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl;
@@ -429,7 +398,8 @@ again:
mutex_lock(&q->qi_dqlist_lock);
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
- if (! XFS_DQ_IS_DIRTY(dqp)) {
+ if ((dqp->dq_flags & XFS_DQ_FREEING) ||
+ !XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqunlock(dqp);
continue;
}
@@ -444,14 +414,14 @@ again:
* out immediately. We'll be able to acquire
* the flush lock when the I/O completes.
*/
- xfs_qm_dqflock_pushbuf_wait(dqp);
+ xfs_dqflock_pushbuf_wait(dqp);
}
/*
* Let go of the mplist lock. We don't want to hold it
* across a disk write.
*/
mutex_unlock(&q->qi_dqlist_lock);
- error = xfs_qm_dqflush(dqp, sync_mode);
+ error = xfs_qm_dqflush(dqp, 0);
xfs_dqunlock(dqp);
if (error)
return error;
@@ -468,6 +438,7 @@ again:
/* return ! busy */
return 0;
}
+
/*
* Release the group dquot pointers the user dquots may be
* carrying around as a hint. mplist is locked on entry and exit.
@@ -478,31 +449,26 @@ xfs_qm_detach_gdquots(
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_dquot *dqp, *gdqp;
- int nrecl;
again:
ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
xfs_dqlock(dqp);
- if ((gdqp = dqp->q_gdquot)) {
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
- xfs_dqunlock(dqp);
-
- if (gdqp) {
- /*
- * Can't hold the mplist lock across a dqput.
- * XXXmust convert to marker based iterations here.
- */
- nrecl = q->qi_dqreclaims;
+ if (dqp->dq_flags & XFS_DQ_FREEING) {
+ xfs_dqunlock(dqp);
mutex_unlock(&q->qi_dqlist_lock);
- xfs_qm_dqput(gdqp);
-
+ delay(1);
mutex_lock(&q->qi_dqlist_lock);
- if (nrecl != q->qi_dqreclaims)
- goto again;
+ goto again;
}
+
+ gdqp = dqp->q_gdquot;
+ if (gdqp)
+ dqp->q_gdquot = NULL;
+ xfs_dqunlock(dqp);
+
+ if (gdqp)
+ xfs_qm_dqrele(gdqp);
}
}
@@ -520,8 +486,8 @@ xfs_qm_dqpurge_int(
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_dquot *dqp, *n;
uint dqtype;
- int nrecl;
- int nmisses;
+ int nmisses = 0;
+ LIST_HEAD (dispose_list);
if (!q)
return 0;
@@ -540,47 +506,26 @@ xfs_qm_dqpurge_int(
*/
xfs_qm_detach_gdquots(mp);
- again:
- nmisses = 0;
- ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
/*
- * Try to get rid of all of the unwanted dquots. The idea is to
- * get them off mplist and hashlist, but leave them on freelist.
+ * Try to get rid of all of the unwanted dquots.
*/
list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
- /*
- * It's OK to look at the type without taking dqlock here.
- * We're holding the mplist lock here, and that's needed for
- * a dqreclaim.
- */
- if ((dqp->dq_flags & dqtype) == 0)
- continue;
-
- if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- nrecl = q->qi_dqreclaims;
- mutex_unlock(&q->qi_dqlist_lock);
- mutex_lock(&dqp->q_hash->qh_lock);
- mutex_lock(&q->qi_dqlist_lock);
-
- /*
- * XXXTheoretically, we can get into a very long
- * ping pong game here.
- * No one can be adding dquots to the mplist at
- * this point, but somebody might be taking things off.
- */
- if (nrecl != q->qi_dqreclaims) {
- mutex_unlock(&dqp->q_hash->qh_lock);
- goto again;
- }
+ xfs_dqlock(dqp);
+ if ((dqp->dq_flags & dqtype) != 0 &&
+ !(dqp->dq_flags & XFS_DQ_FREEING)) {
+ if (dqp->q_nrefs == 0) {
+ dqp->dq_flags |= XFS_DQ_FREEING;
+ list_move_tail(&dqp->q_mplist, &dispose_list);
+ } else
+ nmisses++;
}
-
- /*
- * Take the dquot off the mplist and hashlist. It may remain on
- * freelist in INACTIVE state.
- */
- nmisses += xfs_qm_dqpurge(dqp);
+ xfs_dqunlock(dqp);
}
mutex_unlock(&q->qi_dqlist_lock);
+
+ list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist)
+ xfs_qm_dqpurge(dqp);
+
return nmisses;
}
@@ -648,12 +593,9 @@ xfs_qm_dqattach_one(
*/
dqp = udqhint->q_gdquot;
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
- xfs_dqlock(dqp);
- XFS_DQHOLD(dqp);
ASSERT(*IO_idqpp == NULL);
- *IO_idqpp = dqp;
- xfs_dqunlock(dqp);
+ *IO_idqpp = xfs_qm_dqhold(dqp);
xfs_dqunlock(udqhint);
return 0;
}
@@ -693,11 +635,7 @@ xfs_qm_dqattach_one(
/*
* Given a udquot and gdquot, attach a ptr to the group dquot in the
- * udquot as a hint for future lookups. The idea sounds simple, but the
- * execution isn't, because the udquot might have a group dquot attached
- * already and getting rid of that gets us into lock ordering constraints.
- * The process is complicated more by the fact that the dquots may or may not
- * be locked on entry.
+ * udquot as a hint for future lookups.
*/
STATIC void
xfs_qm_dqattach_grouphint(
@@ -708,45 +646,17 @@ xfs_qm_dqattach_grouphint(
xfs_dqlock(udq);
- if ((tmp = udq->q_gdquot)) {
- if (tmp == gdq) {
- xfs_dqunlock(udq);
- return;
- }
+ tmp = udq->q_gdquot;
+ if (tmp) {
+ if (tmp == gdq)
+ goto done;
udq->q_gdquot = NULL;
- /*
- * We can't keep any dqlocks when calling dqrele,
- * because the freelist lock comes before dqlocks.
- */
- xfs_dqunlock(udq);
- /*
- * we took a hard reference once upon a time in dqget,
- * so give it back when the udquot no longer points at it
- * dqput() does the unlocking of the dquot.
- */
xfs_qm_dqrele(tmp);
-
- xfs_dqlock(udq);
- xfs_dqlock(gdq);
-
- } else {
- ASSERT(XFS_DQ_IS_LOCKED(udq));
- xfs_dqlock(gdq);
- }
-
- ASSERT(XFS_DQ_IS_LOCKED(udq));
- ASSERT(XFS_DQ_IS_LOCKED(gdq));
- /*
- * Somebody could have attached a gdquot here,
- * when we dropped the uqlock. If so, just do nothing.
- */
- if (udq->q_gdquot == NULL) {
- XFS_DQHOLD(gdq);
- udq->q_gdquot = gdq;
}
- xfs_dqunlock(gdq);
+ udq->q_gdquot = xfs_qm_dqhold(gdq);
+done:
xfs_dqunlock(udq);
}
@@ -813,17 +723,13 @@ xfs_qm_dqattach_locked(
ASSERT(ip->i_gdquot);
/*
- * We may or may not have the i_udquot locked at this point,
- * but this check is OK since we don't depend on the i_gdquot to
- * be accurate 100% all the time. It is just a hint, and this
- * will succeed in general.
- */
- if (ip->i_udquot->q_gdquot == ip->i_gdquot)
- goto done;
- /*
- * Attach i_gdquot to the gdquot hint inside the i_udquot.
+ * We do not have i_udquot locked at this point, but this check
+ * is OK since we don't depend on the i_gdquot to be accurate
+ * 100% all the time. It is just a hint, and this will
+ * succeed in general.
*/
- xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
+ if (ip->i_udquot->q_gdquot != ip->i_gdquot)
+ xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
}
done:
@@ -879,100 +785,6 @@ xfs_qm_dqdetach(
}
}
-int
-xfs_qm_sync(
- struct xfs_mount *mp,
- int flags)
-{
- struct xfs_quotainfo *q = mp->m_quotainfo;
- int recl, restarts;
- struct xfs_dquot *dqp;
- int error;
-
- if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
- return 0;
-
- restarts = 0;
-
- again:
- mutex_lock(&q->qi_dqlist_lock);
- /*
- * dqpurge_all() also takes the mplist lock and iterate thru all dquots
- * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
- * when we have the mplist lock, we know that dquots will be consistent
- * as long as we have it locked.
- */
- if (!XFS_IS_QUOTA_ON(mp)) {
- mutex_unlock(&q->qi_dqlist_lock);
- return 0;
- }
- ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
- list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
- /*
- * If this is vfs_sync calling, then skip the dquots that
- * don't 'seem' to be dirty. ie. don't acquire dqlock.
- * This is very similar to what xfs_sync does with inodes.
- */
- if (flags & SYNC_TRYLOCK) {
- if (!XFS_DQ_IS_DIRTY(dqp))
- continue;
- if (!xfs_qm_dqlock_nowait(dqp))
- continue;
- } else {
- xfs_dqlock(dqp);
- }
-
- /*
- * Now, find out for sure if this dquot is dirty or not.
- */
- if (! XFS_DQ_IS_DIRTY(dqp)) {
- xfs_dqunlock(dqp);
- continue;
- }
-
- /* XXX a sentinel would be better */
- recl = q->qi_dqreclaims;
- if (!xfs_dqflock_nowait(dqp)) {
- if (flags & SYNC_TRYLOCK) {
- xfs_dqunlock(dqp);
- continue;
- }
- /*
- * If we can't grab the flush lock then if the caller
- * really wanted us to give this our best shot, so
- * see if we can give a push to the buffer before we wait
- * on the flush lock. At this point, we know that
- * even though the dquot is being flushed,
- * it has (new) dirty data.
- */
- xfs_qm_dqflock_pushbuf_wait(dqp);
- }
- /*
- * Let go of the mplist lock. We don't want to hold it
- * across a disk write
- */
- mutex_unlock(&q->qi_dqlist_lock);
- error = xfs_qm_dqflush(dqp, flags);
- xfs_dqunlock(dqp);
- if (error && XFS_FORCED_SHUTDOWN(mp))
- return 0; /* Need to prevent umount failure */
- else if (error)
- return error;
-
- mutex_lock(&q->qi_dqlist_lock);
- if (recl != q->qi_dqreclaims) {
- if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
- break;
-
- mutex_unlock(&q->qi_dqlist_lock);
- goto again;
- }
- }
-
- mutex_unlock(&q->qi_dqlist_lock);
- return 0;
-}
-
/*
* The hash chains and the mplist use the same xfs_dqhash structure as
* their list head, but we can take the mplist qh_lock and one of the
@@ -1034,18 +846,21 @@ xfs_qm_init_quotainfo(
/*
* We try to get the limits from the superuser's limits fields.
* This is quite hacky, but it is standard quota practice.
+ *
* We look at the USR dquot with id == 0 first, but if user quotas
* are not enabled we goto the GRP dquot with id == 0.
* We don't really care to keep separate default limits for user
* and group quotas, at least not at this point.
+ *
+ * Since we may not have done a quotacheck by this point, just read
+ * the dquot without attaching it to any hashtables or lists.
*/
- error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
- XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
- (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
- XFS_DQ_PROJ),
- XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
- &dqp);
- if (! error) {
+ error = xfs_qm_dqread(mp, 0,
+ XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
+ (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
+ XFS_DQ_PROJ),
+ XFS_QMOPT_DOWARN, &dqp);
+ if (!error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
/*
@@ -1072,11 +887,6 @@ xfs_qm_init_quotainfo(
qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
- /*
- * We sent the XFS_QMOPT_DQSUSER flag to dqget because
- * we don't want this dquot cached. We haven't done a
- * quotacheck yet, and quotacheck doesn't like incore dquots.
- */
xfs_qm_dqdestroy(dqp);
} else {
qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -1661,7 +1471,7 @@ xfs_qm_quotacheck(
* successfully.
*/
if (!error)
- error = xfs_qm_dqflush_all(mp, 0);
+ error = xfs_qm_dqflush_all(mp);
/*
* We can get this error if we couldn't do a dquot allocation inside
@@ -1793,59 +1603,33 @@ xfs_qm_init_quotainos(
/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
+ * Pop the least recently used dquot off the freelist and recycle it.
*/
-STATIC xfs_dquot_t *
+STATIC struct xfs_dquot *
xfs_qm_dqreclaim_one(void)
{
- xfs_dquot_t *dqpout;
- xfs_dquot_t *dqp;
- int restarts;
- int startagain;
-
- restarts = 0;
- dqpout = NULL;
+ struct xfs_dquot *dqp;
+ int restarts = 0;
- /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
-again:
- startagain = 0;
mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
-
+restart:
list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
struct xfs_mount *mp = dqp->q_mount;
- xfs_dqlock(dqp);
+
+ if (!xfs_dqlock_nowait(dqp))
+ continue;
/*
- * We are racing with dqlookup here. Naturally we don't
- * want to reclaim a dquot that lookup wants. We release the
- * freelist lock and start over, so that lookup will grab
- * both the dquot and the freelistlock.
+ * This dquot has already been grabbed by dqlookup.
+ * Remove it from the freelist and try again.
*/
- if (dqp->dq_flags & XFS_DQ_WANT) {
- ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-
+ if (dqp->q_nrefs) {
trace_xfs_dqreclaim_want(dqp);
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
- restarts++;
- startagain = 1;
- goto dqunlock;
- }
- /*
- * If the dquot is inactive, we are assured that it is
- * not on the mplist or the hashlist, and that makes our
- * life easier.
- */
- if (dqp->dq_flags & XFS_DQ_INACTIVE) {
- ASSERT(mp == NULL);
- ASSERT(! XFS_DQ_IS_DIRTY(dqp));
- ASSERT(list_empty(&dqp->q_hashlist));
- ASSERT(list_empty(&dqp->q_mplist));
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
- dqpout = dqp;
- XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
+ restarts++;
goto dqunlock;
}
@@ -1874,64 +1658,49 @@ again:
* We flush it delayed write, so don't bother
* releasing the freelist lock.
*/
- error = xfs_qm_dqflush(dqp, 0);
+ error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
if (error) {
xfs_warn(mp, "%s: dquot %p flush failed",
__func__, dqp);
}
goto dqunlock;
}
+ xfs_dqfunlock(dqp);
/*
- * We're trying to get the hashlock out of order. This races
- * with dqlookup; so, we giveup and goto the next dquot if
- * we couldn't get the hashlock. This way, we won't starve
- * a dqlookup process that holds the hashlock that is
- * waiting for the freelist lock.
+ * Prevent lookup now that we are going to reclaim the dquot.
+ * Once XFS_DQ_FREEING is set lookup won't touch the dquot,
+ * thus we can drop the lock now.
*/
- if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
- restarts++;
- goto dqfunlock;
- }
+ dqp->dq_flags |= XFS_DQ_FREEING;
+ xfs_dqunlock(dqp);
- /*
- * This races with dquot allocation code as well as dqflush_all
- * and reclaim code. So, if we failed to grab the mplist lock,
- * giveup everything and start over.
- */
- if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
- restarts++;
- startagain = 1;
- goto qhunlock;
- }
+ mutex_lock(&dqp->q_hash->qh_lock);
+ list_del_init(&dqp->q_hashlist);
+ dqp->q_hash->qh_version++;
+ mutex_unlock(&dqp->q_hash->qh_lock);
- ASSERT(dqp->q_nrefs == 0);
+ mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
list_del_init(&dqp->q_mplist);
mp->m_quotainfo->qi_dquots--;
mp->m_quotainfo->qi_dqreclaims++;
- list_del_init(&dqp->q_hashlist);
- dqp->q_hash->qh_version++;
+ mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+
+ ASSERT(dqp->q_nrefs == 0);
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
- dqpout = dqp;
- mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
-qhunlock:
- mutex_unlock(&dqp->q_hash->qh_lock);
-dqfunlock:
- xfs_dqfunlock(dqp);
+
+ mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+ return dqp;
dqunlock:
xfs_dqunlock(dqp);
- if (dqpout)
- break;
if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
break;
- if (startagain) {
- mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
- goto again;
- }
+ goto restart;
}
+
mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
- return dqpout;
+ return NULL;
}
/*
@@ -2151,10 +1920,7 @@ xfs_qm_vop_dqalloc(
* this to caller
*/
ASSERT(ip->i_udquot);
- uq = ip->i_udquot;
- xfs_dqlock(uq);
- XFS_DQHOLD(uq);
- xfs_dqunlock(uq);
+ uq = xfs_qm_dqhold(ip->i_udquot);
}
}
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
@@ -2175,10 +1941,7 @@ xfs_qm_vop_dqalloc(
xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
- gq = ip->i_gdquot;
- xfs_dqlock(gq);
- XFS_DQHOLD(gq);
- xfs_dqunlock(gq);
+ gq = xfs_qm_dqhold(ip->i_gdquot);
}
} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
if (xfs_get_projid(ip) != prid) {
@@ -2198,10 +1961,7 @@ xfs_qm_vop_dqalloc(
xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
- gq = ip->i_gdquot;
- xfs_dqlock(gq);
- XFS_DQHOLD(gq);
- xfs_dqunlock(gq);
+ gq = xfs_qm_dqhold(ip->i_gdquot);
}
}
if (uq)
@@ -2251,14 +2011,10 @@ xfs_qm_vop_chown(
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
- * Take an extra reference, because the inode
- * is going to keep this dquot pointer even
- * after the trans_commit.
+ * Take an extra reference, because the inode is going to keep
+ * this dquot pointer even after the trans_commit.
*/
- xfs_dqlock(newdq);
- XFS_DQHOLD(newdq);
- xfs_dqunlock(newdq);
- *IO_olddq = newdq;
+ *IO_olddq = xfs_qm_dqhold(newdq);
return prevdq;
}
@@ -2390,25 +2146,21 @@ xfs_qm_vop_create_dqattach(
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
if (udqp) {
- xfs_dqlock(udqp);
- XFS_DQHOLD(udqp);
- xfs_dqunlock(udqp);
ASSERT(ip->i_udquot == NULL);
- ip->i_udquot = udqp;
ASSERT(XFS_IS_UQUOTA_ON(mp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
+
+ ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp) {
- xfs_dqlock(gdqp);
- XFS_DQHOLD(gdqp);
- xfs_dqunlock(gdqp);
ASSERT(ip->i_gdquot == NULL);
- ip->i_gdquot = gdqp;
ASSERT(XFS_IS_OQUOTA_ON(mp));
ASSERT((XFS_IS_GQUOTA_ON(mp) ?
ip->i_d.di_gid : xfs_get_projid(ip)) ==
be32_to_cpu(gdqp->q_core.d_id));
+
+ ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
}
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 43b9abe1052..9b4f3adefbc 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -33,12 +33,6 @@ extern kmem_zone_t *qm_dqzone;
extern kmem_zone_t *qm_dqtrxzone;
/*
- * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
- * iterate over the mountpt's dquot list in one call.
- */
-#define XFS_QM_SYNC_MAX_RESTARTS 7
-
-/*
* Ditto, for xfs_qm_dqreclaim_one.
*/
#define XFS_QM_RECLAIM_MAX_RESTARTS 4
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index a595f29567f..8a0807e0f97 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -87,8 +87,7 @@ typedef struct xfs_dqblk {
#define XFS_DQ_PROJ 0x0002 /* project quota */
#define XFS_DQ_GROUP 0x0004 /* a group quota */
#define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */
-#define XFS_DQ_WANT 0x0010 /* for lookup/reclaim race */
-#define XFS_DQ_INACTIVE 0x0020 /* dq off mplist & hashlist */
+#define XFS_DQ_FREEING 0x0010 /* dquot is beeing torn down */
#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
@@ -97,8 +96,7 @@ typedef struct xfs_dqblk {
{ XFS_DQ_PROJ, "PROJ" }, \
{ XFS_DQ_GROUP, "GROUP" }, \
{ XFS_DQ_DIRTY, "DIRTY" }, \
- { XFS_DQ_WANT, "WANT" }, \
- { XFS_DQ_INACTIVE, "INACTIVE" }
+ { XFS_DQ_FREEING, "FREEING" }
/*
* In the worst case, when both user and group quotas are on,
@@ -199,7 +197,6 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */
#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
-#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
@@ -326,7 +323,6 @@ extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
extern void xfs_qm_dqdetach(struct xfs_inode *);
extern void xfs_qm_dqrele(struct xfs_dquot *);
extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *);
-extern int xfs_qm_sync(struct xfs_mount *, int);
extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *);
extern void xfs_qm_mount_quotas(struct xfs_mount *);
extern void xfs_qm_unmount(struct xfs_mount *);
@@ -366,10 +362,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
#define xfs_qm_dqdetach(ip)
#define xfs_qm_dqrele(d)
#define xfs_qm_statvfs(ip, s)
-static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
-{
- return 0;
-}
#define xfs_qm_newmount(mp, a, b) (0)
#define xfs_qm_mount_quotas(mp)
#define xfs_qm_unmount(mp)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 3eca58f51ae..281961c1d81 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -199,7 +199,6 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- mp->m_flags |= XFS_MOUNT_DELAYLOG;
/*
* These can be overridden by the mount option parsing.
@@ -353,11 +352,11 @@ xfs_parseargs(
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_OQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
- mp->m_flags |= XFS_MOUNT_DELAYLOG;
+ xfs_warn(mp,
+ "delaylog is the default now, option is deprecated.");
} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
- mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
xfs_warn(mp,
- "nodelaylog is deprecated and will be removed in Linux 3.3");
+ "nodelaylog support has been removed, option is deprecated.");
} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
mp->m_flags |= XFS_MOUNT_DISCARD;
} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
@@ -395,13 +394,6 @@ xfs_parseargs(
return EINVAL;
}
- if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
- !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
- xfs_warn(mp,
- "the discard option is incompatible with the nodelaylog option");
- return EINVAL;
- }
-
#ifndef CONFIG_XFS_QUOTA
if (XFS_IS_QUOTA_RUNNING(mp)) {
xfs_warn(mp, "quota support not available in this kernel.");
@@ -501,7 +493,6 @@ xfs_showargs(
{ XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
{ XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
{ XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
- { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
{ XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD },
{ 0, NULL }
};
@@ -869,27 +860,6 @@ xfs_fs_dirty_inode(
}
STATIC int
-xfs_log_inode(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- return xfs_trans_commit(tp, 0);
-}
-
-STATIC int
xfs_fs_write_inode(
struct inode *inode,
struct writeback_control *wbc)
@@ -902,10 +872,8 @@ xfs_fs_write_inode(
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
- if (!ip->i_update_core)
- return 0;
- if (wbc->sync_mode == WB_SYNC_ALL) {
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
/*
* Make sure the inode has made it it into the log. Instead
* of forcing it all the way to stable storage using a
@@ -913,11 +881,14 @@ xfs_fs_write_inode(
* ->sync_fs call do that for thus, which reduces the number
* of synchronous log forces dramatically.
*/
- error = xfs_log_inode(ip);
+ error = xfs_log_dirty_inode(ip, NULL, 0);
if (error)
goto out;
return 0;
} else {
+ if (!ip->i_update_core)
+ return 0;
+
/*
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
@@ -1034,17 +1005,10 @@ xfs_fs_sync_fs(
int error;
/*
- * Not much we can do for the first async pass. Writing out the
- * superblock would be counter-productive as we are going to redirty
- * when writing out other data and metadata (and writing out a single
- * block is quite fast anyway).
- *
- * Try to asynchronously kick off quota syncing at least.
+ * Doing anything during the async pass would be counterproductive.
*/
- if (!wait) {
- xfs_qm_sync(mp, SYNC_TRYLOCK);
+ if (!wait)
return 0;
- }
error = xfs_quiesce_data(mp);
if (error)
@@ -1258,9 +1222,9 @@ xfs_fs_unfreeze(
STATIC int
xfs_fs_show_options(
struct seq_file *m,
- struct vfsmount *mnt)
+ struct dentry *root)
{
- return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
+ return -xfs_showargs(XFS_M(root->d_sb), m);
}
/*
@@ -1641,12 +1605,12 @@ STATIC int __init
xfs_init_workqueues(void)
{
/*
- * max_active is set to 8 to give enough concurency to allow
- * multiple work operations on each CPU to run. This allows multiple
- * filesystems to be running sync work concurrently, and scales with
- * the number of CPUs in the system.
+ * We never want to the same work item to run twice, reclaiming inodes
+ * or idling the log is not going to get any faster by multiple CPUs
+ * competing for ressources. Use the default large max_active value
+ * so that even lots of filesystems can perform these task in parallel.
*/
- xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
if (!xfs_syncd_wq)
return -ENOMEM;
return 0;
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index aa3dc1a4d53..72c01a1c16e 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
return error;
}
+int
+xfs_log_dirty_inode(
+ struct xfs_inode *ip,
+ struct xfs_perag *pag,
+ int flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ if (!ip->i_update_core)
+ return 0;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp, 0);
+}
+
/*
* When remounting a filesystem read-only or freezing the filesystem, we have
* two phases to execute. This first phase is syncing the data before we
@@ -359,10 +385,17 @@ xfs_quiesce_data(
{
int error, error2 = 0;
- xfs_qm_sync(mp, SYNC_TRYLOCK);
- xfs_qm_sync(mp, SYNC_WAIT);
+ /*
+ * Log all pending size and timestamp updates. The vfs writeback
+ * code is supposed to do this, but due to its overagressive
+ * livelock detection it will skip inodes where appending writes
+ * were written out in the first non-blocking sync phase if their
+ * completion took long enough that it happened after taking the
+ * timestamp for the cut-off in the blocking phase.
+ */
+ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
- /* force out the newly dirtied log buffers */
+ /* force out the log */
xfs_log_force(mp, XFS_LOG_SYNC);
/* write superblock and hoover up shutdown errors */
@@ -470,7 +503,6 @@ xfs_sync_worker(
error = xfs_fs_log_dummy(mp);
else
xfs_log_force(mp, 0);
- error = xfs_qm_sync(mp, SYNC_TRYLOCK);
/* start pushing all the metadata that is currently dirty */
xfs_ail_push_all(mp->m_ail);
@@ -770,6 +802,17 @@ restart:
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
+
+ /*
+ * If we only have a single dirty inode in a cluster there is
+ * a fair chance that the AIL push may have pushed it into
+ * the buffer, but xfsbufd won't touch it until 30 seconds
+ * from now, and thus we will lock up here.
+ *
+ * Promote the inode buffer to the front of the delwri list
+ * and wake up xfsbufd now.
+ */
+ xfs_promote_inode(ip);
xfs_iflock(ip);
}
diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h
index 941202e7ac6..fa965479d78 100644
--- a/fs/xfs/xfs_sync.h
+++ b/fs/xfs/xfs_sync.h
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inodes(struct xfs_inode *ip);
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index f1d2802b2f0..a9d5b1e06ef 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -743,8 +743,6 @@ DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
DEFINE_DQUOT_EVENT(xfs_dqread);
DEFINE_DQUOT_EVENT(xfs_dqread_fail);
DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
-DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
DEFINE_DQUOT_EVENT(xfs_dqget_hit);
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
@@ -834,18 +832,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 1f35b2feca9..329b06aba1c 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1158,7 +1158,6 @@ xfs_trans_add_item(
lidp->lid_item = lip;
lidp->lid_flags = 0;
- lidp->lid_size = 0;
list_add_tail(&lidp->lid_trans, &tp->t_items);
lip->li_desc = lidp;
@@ -1210,219 +1209,6 @@ xfs_trans_free_items(
}
}
-/*
- * Unlock the items associated with a transaction.
- *
- * Items which were not logged should be freed. Those which were logged must
- * still be tracked so they can be unpinned when the transaction commits.
- */
-STATIC void
-xfs_trans_unlock_items(
- struct xfs_trans *tp,
- xfs_lsn_t commit_lsn)
-{
- struct xfs_log_item_desc *lidp, *next;
-
- list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
- struct xfs_log_item *lip = lidp->lid_item;
-
- lip->li_desc = NULL;
-
- if (commit_lsn != NULLCOMMITLSN)
- IOP_COMMITTING(lip, commit_lsn);
- IOP_UNLOCK(lip);
-
- /*
- * Free the descriptor if the item is not dirty
- * within this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY))
- xfs_trans_free_item_desc(lidp);
- }
-}
-
-/*
- * Total up the number of log iovecs needed to commit this
- * transaction. The transaction itself needs one for the
- * transaction header. Ask each dirty item in turn how many
- * it needs to get the total.
- */
-static uint
-xfs_trans_count_vecs(
- struct xfs_trans *tp)
-{
- int nvecs;
- struct xfs_log_item_desc *lidp;
-
- nvecs = 1;
-
- /* In the non-debug case we need to start bailing out if we
- * didn't find a log_item here, return zero and let trans_commit
- * deal with it.
- */
- if (list_empty(&tp->t_items)) {
- ASSERT(0);
- return 0;
- }
-
- list_for_each_entry(lidp, &tp->t_items, lid_trans) {
- /*
- * Skip items which aren't dirty in this transaction.
- */
- if (!(lidp->lid_flags & XFS_LID_DIRTY))
- continue;
- lidp->lid_size = IOP_SIZE(lidp->lid_item);
- nvecs += lidp->lid_size;
- }
-
- return nvecs;
-}
-
-/*
- * Fill in the vector with pointers to data to be logged
- * by this transaction. The transaction header takes
- * the first vector, and then each dirty item takes the
- * number of vectors it indicated it needed in xfs_trans_count_vecs().
- *
- * As each item fills in the entries it needs, also pin the item
- * so that it cannot be flushed out until the log write completes.
- */
-static void
-xfs_trans_fill_vecs(
- struct xfs_trans *tp,
- struct xfs_log_iovec *log_vector)
-{
- struct xfs_log_item_desc *lidp;
- struct xfs_log_iovec *vecp;
- uint nitems;
-
- /*
- * Skip over the entry for the transaction header, we'll
- * fill that in at the end.
- */
- vecp = log_vector + 1;
-
- nitems = 0;
- ASSERT(!list_empty(&tp->t_items));
- list_for_each_entry(lidp, &tp->t_items, lid_trans) {
- /* Skip items which aren't dirty in this transaction. */
- if (!(lidp->lid_flags & XFS_LID_DIRTY))
- continue;
-
- /*
- * The item may be marked dirty but not log anything. This can
- * be used to get called when a transaction is committed.
- */
- if (lidp->lid_size)
- nitems++;
- IOP_FORMAT(lidp->lid_item, vecp);
- vecp += lidp->lid_size;
- IOP_PIN(lidp->lid_item);
- }
-
- /*
- * Now that we've counted the number of items in this transaction, fill
- * in the transaction header. Note that the transaction header does not
- * have a log item.
- */
- tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC;
- tp->t_header.th_type = tp->t_type;
- tp->t_header.th_num_items = nitems;
- log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
- log_vector->i_len = sizeof(xfs_trans_header_t);
- log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
-}
-
-/*
- * The committed item processing consists of calling the committed routine of
- * each logged item, updating the item's position in the AIL if necessary, and
- * unpinning each item. If the committed routine returns -1, then do nothing
- * further with the item because it may have been freed.
- *
- * Since items are unlocked when they are copied to the incore log, it is
- * possible for two transactions to be completing and manipulating the same
- * item simultaneously. The AIL lock will protect the lsn field of each item.
- * The value of this field can never go backwards.
- *
- * We unpin the items after repositioning them in the AIL, because otherwise
- * they could be immediately flushed and we'd have to race with the flusher
- * trying to pull the item from the AIL as we add it.
- */
-static void
-xfs_trans_item_committed(
- struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn,
- int aborted)
-{
- xfs_lsn_t item_lsn;
- struct xfs_ail *ailp;
-
- if (aborted)
- lip->li_flags |= XFS_LI_ABORTED;
- item_lsn = IOP_COMMITTED(lip, commit_lsn);
-
- /* item_lsn of -1 means the item needs no further processing */
- if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
- return;
-
- /*
- * If the returned lsn is greater than what it contained before, update
- * the location of the item in the AIL. If it is not, then do nothing.
- * Items can never move backwards in the AIL.
- *
- * While the new lsn should usually be greater, it is possible that a
- * later transaction completing simultaneously with an earlier one
- * using the same item could complete first with a higher lsn. This
- * would cause the earlier transaction to fail the test below.
- */
- ailp = lip->li_ailp;
- spin_lock(&ailp->xa_lock);
- if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
- /*
- * This will set the item's lsn to item_lsn and update the
- * position of the item in the AIL.
- *
- * xfs_trans_ail_update() drops the AIL lock.
- */
- xfs_trans_ail_update(ailp, lip, item_lsn);
- } else {
- spin_unlock(&ailp->xa_lock);
- }
-
- /*
- * Now that we've repositioned the item in the AIL, unpin it so it can
- * be flushed. Pass information about buffer stale state down from the
- * log item flags, if anyone else stales the buffer we do not want to
- * pay any attention to it.
- */
- IOP_UNPIN(lip, 0);
-}
-
-/*
- * This is typically called by the LM when a transaction has been fully
- * committed to disk. It needs to unpin the items which have
- * been logged by the transaction and update their positions
- * in the AIL if necessary.
- *
- * This also gets called when the transactions didn't get written out
- * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then.
- */
-STATIC void
-xfs_trans_committed(
- void *arg,
- int abortflag)
-{
- struct xfs_trans *tp = arg;
- struct xfs_log_item_desc *lidp, *next;
-
- list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
- xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
- xfs_trans_free_item_desc(lidp);
- }
-
- xfs_trans_free(tp);
-}
-
static inline void
xfs_log_item_batch_insert(
struct xfs_ail *ailp,
@@ -1538,258 +1324,6 @@ xfs_trans_committed_bulk(
}
/*
- * Called from the trans_commit code when we notice that the filesystem is in
- * the middle of a forced shutdown.
- *
- * When we are called here, we have already pinned all the items in the
- * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
- * so we can simply walk the items in the transaction, unpin them with an abort
- * flag and then free the items. Note that unpinning the items can result in
- * them being freed immediately, so we need to use a safe list traversal method
- * here.
- */
-STATIC void
-xfs_trans_uncommit(
- struct xfs_trans *tp,
- uint flags)
-{
- struct xfs_log_item_desc *lidp, *n;
-
- list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
- if (lidp->lid_flags & XFS_LID_DIRTY)
- IOP_UNPIN(lidp->lid_item, 1);
- }
-
- xfs_trans_unreserve_and_mod_sb(tp);
- xfs_trans_unreserve_and_mod_dquots(tp);
-
- xfs_trans_free_items(tp, NULLCOMMITLSN, flags);
- xfs_trans_free(tp);
-}
-
-/*
- * Format the transaction direct to the iclog. This isolates the physical
- * transaction commit operation from the logical operation and hence allows
- * other methods to be introduced without affecting the existing commit path.
- */
-static int
-xfs_trans_commit_iclog(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn,
- int flags)
-{
- int shutdown;
- int error;
- int log_flags = 0;
- struct xlog_in_core *commit_iclog;
-#define XFS_TRANS_LOGVEC_COUNT 16
- struct xfs_log_iovec log_vector_fast[XFS_TRANS_LOGVEC_COUNT];
- struct xfs_log_iovec *log_vector;
- uint nvec;
-
-
- /*
- * Ask each log item how many log_vector entries it will
- * need so we can figure out how many to allocate.
- * Try to avoid the kmem_alloc() call in the common case
- * by using a vector from the stack when it fits.
- */
- nvec = xfs_trans_count_vecs(tp);
- if (nvec == 0) {
- return ENOMEM; /* triggers a shutdown! */
- } else if (nvec <= XFS_TRANS_LOGVEC_COUNT) {
- log_vector = log_vector_fast;
- } else {
- log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec *
- sizeof(xfs_log_iovec_t),
- KM_SLEEP);
- }
-
- /*
- * Fill in the log_vector and pin the logged items, and
- * then write the transaction to the log.
- */
- xfs_trans_fill_vecs(tp, log_vector);
-
- if (flags & XFS_TRANS_RELEASE_LOG_RES)
- log_flags = XFS_LOG_REL_PERM_RESERV;
-
- error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn));
-
- /*
- * The transaction is committed incore here, and can go out to disk
- * at any time after this call. However, all the items associated
- * with the transaction are still locked and pinned in memory.
- */
- *commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags);
-
- tp->t_commit_lsn = *commit_lsn;
- trace_xfs_trans_commit_lsn(tp);
-
- if (nvec > XFS_TRANS_LOGVEC_COUNT)
- kmem_free(log_vector);
-
- /*
- * If we got a log write error. Unpin the logitems that we
- * had pinned, clean up, free trans structure, and return error.
- */
- if (error || *commit_lsn == -1) {
- current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
- xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT);
- return XFS_ERROR(EIO);
- }
-
- /*
- * Once the transaction has committed, unused
- * reservations need to be released and changes to
- * the superblock need to be reflected in the in-core
- * version. Do that now.
- */
- xfs_trans_unreserve_and_mod_sb(tp);
-
- /*
- * Tell the LM to call the transaction completion routine
- * when the log write with LSN commit_lsn completes (e.g.
- * when the transaction commit really hits the on-disk log).
- * After this call we cannot reference tp, because the call
- * can happen at any time and the call will free the transaction
- * structure pointed to by tp. The only case where we call
- * the completion routine (xfs_trans_committed) directly is
- * if the log is turned off on a debug kernel or we're
- * running in simulation mode (the log is explicitly turned
- * off).
- */
- tp->t_logcb.cb_func = xfs_trans_committed;
- tp->t_logcb.cb_arg = tp;
-
- /*
- * We need to pass the iclog buffer which was used for the
- * transaction commit record into this function, and attach
- * the callback to it. The callback must be attached before
- * the items are unlocked to avoid racing with other threads
- * waiting for an item to unlock.
- */
- shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb));
-
- /*
- * Mark this thread as no longer being in a transaction
- */
- current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-
- /*
- * Once all the items of the transaction have been copied
- * to the in core log and the callback is attached, the
- * items can be unlocked.
- *
- * This will free descriptors pointing to items which were
- * not logged since there is nothing more to do with them.
- * For items which were logged, we will keep pointers to them
- * so they can be unpinned after the transaction commits to disk.
- * This will also stamp each modified meta-data item with
- * the commit lsn of this transaction for dependency tracking
- * purposes.
- */
- xfs_trans_unlock_items(tp, *commit_lsn);
-
- /*
- * If we detected a log error earlier, finish committing
- * the transaction now (unpin log items, etc).
- *
- * Order is critical here, to avoid using the transaction
- * pointer after its been freed (by xfs_trans_committed
- * either here now, or as a callback). We cannot do this
- * step inside xfs_log_notify as was done earlier because
- * of this issue.
- */
- if (shutdown)
- xfs_trans_committed(tp, XFS_LI_ABORTED);
-
- /*
- * Now that the xfs_trans_committed callback has been attached,
- * and the items are released we can finally allow the iclog to
- * go to disk.
- */
- return xfs_log_release_iclog(mp, commit_iclog);
-}
-
-/*
- * Walk the log items and allocate log vector structures for
- * each item large enough to fit all the vectors they require.
- * Note that this format differs from the old log vector format in
- * that there is no transaction header in these log vectors.
- */
-STATIC struct xfs_log_vec *
-xfs_trans_alloc_log_vecs(
- xfs_trans_t *tp)
-{
- struct xfs_log_item_desc *lidp;
- struct xfs_log_vec *lv = NULL;
- struct xfs_log_vec *ret_lv = NULL;
-
-
- /* Bail out if we didn't find a log item. */
- if (list_empty(&tp->t_items)) {
- ASSERT(0);
- return NULL;
- }
-
- list_for_each_entry(lidp, &tp->t_items, lid_trans) {
- struct xfs_log_vec *new_lv;
-
- /* Skip items which aren't dirty in this transaction. */
- if (!(lidp->lid_flags & XFS_LID_DIRTY))
- continue;
-
- /* Skip items that do not have any vectors for writing */
- lidp->lid_size = IOP_SIZE(lidp->lid_item);
- if (!lidp->lid_size)
- continue;
-
- new_lv = kmem_zalloc(sizeof(*new_lv) +
- lidp->lid_size * sizeof(struct xfs_log_iovec),
- KM_SLEEP);
-
- /* The allocated iovec region lies beyond the log vector. */
- new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
- new_lv->lv_niovecs = lidp->lid_size;
- new_lv->lv_item = lidp->lid_item;
- if (!ret_lv)
- ret_lv = new_lv;
- else
- lv->lv_next = new_lv;
- lv = new_lv;
- }
-
- return ret_lv;
-}
-
-static int
-xfs_trans_commit_cil(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- xfs_lsn_t *commit_lsn,
- int flags)
-{
- struct xfs_log_vec *log_vector;
-
- /*
- * Get each log item to allocate a vector structure for
- * the log item to to pass to the log write code. The
- * CIL commit code will format the vector and save it away.
- */
- log_vector = xfs_trans_alloc_log_vecs(tp);
- if (!log_vector)
- return ENOMEM;
-
- xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
-
- current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
- xfs_trans_free(tp);
- return 0;
-}
-
-/*
* Commit the given transaction to the log.
*
* XFS disk error handling mechanism is not based on a typical
@@ -1845,17 +1379,16 @@ xfs_trans_commit(
xfs_trans_apply_sb_deltas(tp);
xfs_trans_apply_dquot_deltas(tp);
- if (mp->m_flags & XFS_MOUNT_DELAYLOG)
- error = xfs_trans_commit_cil(mp, tp, &commit_lsn, flags);
- else
- error = xfs_trans_commit_iclog(mp, tp, &commit_lsn, flags);
-
+ error = xfs_log_commit_cil(mp, tp, &commit_lsn, flags);
if (error == ENOMEM) {
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
error = XFS_ERROR(EIO);
goto out_unreserve;
}
+ current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
+ xfs_trans_free(tp);
+
/*
* If the transaction needs to be synchronous, then force the
* log out now and wait for it.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 3ae713c0abd..f6118703f20 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -163,9 +163,8 @@ typedef struct xfs_trans_header {
*/
struct xfs_log_item_desc {
struct xfs_log_item *lid_item;
- ushort lid_size;
- unsigned char lid_flags;
struct list_head lid_trans;
+ unsigned char lid_flags;
};
#define XFS_LID_DIRTY 0x1
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 8b32d1a4c5a..89dbb4a5087 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -53,7 +53,7 @@ xfs_dir_ialloc(
output: may be a new transaction. */
xfs_inode_t *dp, /* directory within whose allocate
the inode. */
- mode_t mode,
+ umode_t mode,
xfs_nlink_t nlink,
xfs_dev_t rdev,
prid_t prid, /* project id */
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index 456fca31493..5eeab4690cf 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,7 +18,7 @@
#ifndef __XFS_UTILS_H__
#define __XFS_UTILS_H__
-extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
+extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, umode_t, xfs_nlink_t,
xfs_dev_t, prid_t, int, xfs_inode_t **, int *);
extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index ce9268a2f56..f2fea868d4d 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -822,7 +822,7 @@ int
xfs_create(
xfs_inode_t *dp,
struct xfs_name *name,
- mode_t mode,
+ umode_t mode,
xfs_dev_t rdev,
xfs_inode_t **ipp)
{
@@ -1481,7 +1481,7 @@ xfs_symlink(
xfs_inode_t *dp,
struct xfs_name *link_name,
const char *target_path,
- mode_t mode,
+ umode_t mode,
xfs_inode_t **ipp)
{
xfs_mount_t *mp = dp->i_mount;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 35d3d513e1e..0c877cbde14 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -26,7 +26,7 @@ int xfs_release(struct xfs_inode *ip);
int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
-int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
+int xfs_create(struct xfs_inode *dp, struct xfs_name *name, umode_t mode,
xfs_dev_t rdev, struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
@@ -35,7 +35,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
xfs_off_t *offset, filldir_t filldir);
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
- const char *target_path, mode_t mode, struct xfs_inode **ipp);
+ const char *target_path, umode_t mode, struct xfs_inode **ipp);
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
int xfs_change_file_space(struct xfs_inode *ip, int cmd,
xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 62ce6823c0f..9a62937c56c 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,70 +4,66 @@
#include <linux/time.h>
#include <linux/jiffies.h>
-typedef unsigned long cputime_t;
+typedef unsigned long __nocast cputime_t;
-#define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1)
-#define cputime_max ((~0UL >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__ct)
+#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) (__hz)
+#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
-typedef u64 cputime64_t;
+typedef u64 __nocast cputime64_t;
-#define cputime64_zero (0ULL)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime64_to_jiffies64(__ct) (__ct)
-#define jiffies64_to_cputime64(__jif) (__jif)
-#define cputime_to_cputime64(__ct) ((u64) __ct)
-#define cputime64_gt(__a, __b) ((__a) > (__b))
+#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
+#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
-#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
+#define nsecs_to_cputime64(__ct) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
/*
* Convert cputime to microseconds and back.
*/
-#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
-#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
+#define cputime_to_usecs(__ct) \
+ jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec) \
+ jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
*/
-#define cputime_to_secs(jif) ((jif) / HZ)
-#define secs_to_cputime(sec) ((sec) * HZ)
+#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
+#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/*
* Convert cputime to timespec and back.
*/
-#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
-#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
+#define timespec_to_cputime(__val) \
+ jiffies_to_cputime(timespec_to_jiffies(__val))
+#define cputime_to_timespec(__ct,__val) \
+ jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to timeval and back.
*/
-#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
-#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
+#define timeval_to_cputime(__val) \
+ jiffies_to_cputime(timeval_to_jiffies(__val))
+#define cputime_to_timeval(__ct,__val) \
+ jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to clock and back.
*/
-#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
-#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+#define cputime_to_clock_t(__ct) \
+ jiffies_to_clock_t(cputime_to_jiffies(__ct))
+#define clock_t_to_cputime(__x) \
+ jiffies_to_cputime(clock_t_to_jiffies(__x))
/*
* Convert cputime64 to clock.
*/
-#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+#define cputime64_to_clock_t(__ct) \
+ jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 8c8621097fa..d466c8d8826 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/of.h>
#ifdef CONFIG_GPIOLIB
@@ -128,13 +129,14 @@ struct gpio_chip {
*/
struct device_node *of_node;
int of_gpio_n_cells;
- int (*of_xlate)(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags);
+ int (*of_xlate)(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags);
#endif
};
extern const char *gpiochip_is_requested(struct gpio_chip *chip,
unsigned offset);
+extern struct gpio_chip *gpio_to_chip(unsigned gpio);
extern int __must_check gpiochip_reserve(int start, int ngpio);
/* add/remove chips */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index dddc61dedc2..448303bdb85 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -327,7 +327,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
#define ioremap_wc ioremap_nocache
#endif
-static inline void iounmap(void *addr)
+static inline void iounmap(void __iomem *addr)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 351889d1de1..37d1fe28960 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -71,10 +71,14 @@ extern unsigned long memory_end;
#define PAGE_OFFSET (0)
#endif
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif
+
#ifndef __ASSEMBLY__
-#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long) (x)))
+#define __pa(x) ((unsigned long) (x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
@@ -86,7 +90,7 @@ extern unsigned long memory_end;
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#endif
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
((void *)(kaddr) < (void *)memory_end))
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
index 9a6115e7cf6..49c1704173e 100644
--- a/include/asm-generic/socket.h
+++ b/include/asm-generic/socket.h
@@ -64,4 +64,7 @@
#define SO_DOMAIN 39
#define SO_RXQ_OVFL 40
+
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
index 7a0f69e6c61..bd39806013b 100644
--- a/include/asm-generic/types.h
+++ b/include/asm-generic/types.h
@@ -6,10 +6,4 @@
*/
#include <asm-generic/int-ll64.h>
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
-
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index ac68c999b6c..9788568f797 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -289,9 +289,14 @@ strncpy_from_user(char *dst, const char __user *src, long count)
* Return 0 on exception, a value greater than N if too long
*/
#ifndef __strnlen_user
-#define __strnlen_user strnlen
+#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
#endif
+/*
+ * Unlike strnlen, strnlen_user includes the nul terminator in
+ * its returned count. Callers should check for a returned value
+ * greater than N as an indication the string is too long.
+ */
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index f4c38d8c667..2292d1af9d7 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
#undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
/*
* All syscalls below here should go away really,
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index 3a60ac88952..a5c0e10fd47 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -1,4 +1,5 @@
header-y += drm.h
+header-y += drm_fourcc.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += i810_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 4be33b4ca2f..49d94ede2ec 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -714,6 +714,10 @@ struct drm_get_cap {
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
/**
* Device specific ioctls should only be in their respective headers
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1f9e9516e2b..76caa67c22e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -820,7 +820,7 @@ struct drm_driver {
* Specifically, the timestamp in @vblank_time should correspond as
* closely as possible to the time when the first video scanline of
* the video frame after the end of VBLANK will start scanning out,
- * the time immmediately after end of the VBLANK interval. If the
+ * the time immediately after end of the VBLANK interval. If the
* @crtc is currently inside VBLANK, this will be a time in the future.
* If the @crtc is currently scanning out a frame, this will be the
* past start time of the current scanout. This is meant to adhere
@@ -918,7 +918,7 @@ struct drm_driver {
int dev_priv_size;
struct drm_ioctl_desc *ioctls;
int num_ioctls;
- struct file_operations fops;
+ const struct file_operations *fops;
union {
struct pci_driver *pci;
struct platform_device *platform_device;
@@ -1696,5 +1696,13 @@ extern void drm_platform_exit(struct drm_driver *driver, struct platform_device
extern int drm_get_platform_dev(struct platform_device *pdev,
struct drm_driver *driver);
+/* returns true if currently okay to sleep */
+static __inline__ bool drm_can_sleep(void)
+{
+ if (in_atomic() || in_dbg_master() || irqs_disabled())
+ return false;
+ return true;
+}
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 80207980928..63e4fce6728 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -29,9 +29,10 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/idr.h>
-
#include <linux/fb.h>
+#include <drm/drm_fourcc.h>
+
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
@@ -44,6 +45,7 @@ struct drm_framebuffer;
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
struct drm_mode_object {
uint32_t id;
@@ -118,7 +120,6 @@ struct drm_display_mode {
char name[DRM_DISPLAY_MODE_LEN];
- int connector_count;
enum drm_mode_status status;
int type;
@@ -238,13 +239,15 @@ struct drm_framebuffer {
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
- unsigned int pitch;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
unsigned int depth;
int bits_per_pixel;
int flags;
+ uint32_t pixel_format; /* fourcc format */
struct list_head filp_head;
/* if you are using the helper */
void *helper_private;
@@ -278,6 +281,7 @@ struct drm_crtc;
struct drm_connector;
struct drm_encoder;
struct drm_pending_vblank_event;
+struct drm_plane;
/**
* drm_crtc_funcs - control CRTCs for a given device
@@ -341,10 +345,21 @@ struct drm_crtc_funcs {
/**
* drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
* @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -423,6 +438,13 @@ struct drm_connector_funcs {
void (*force)(struct drm_connector *connector);
};
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
struct drm_encoder_funcs {
void (*reset)(struct drm_encoder *encoder);
void (*destroy)(struct drm_encoder *encoder);
@@ -435,6 +457,18 @@ struct drm_encoder_funcs {
/**
* drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
*/
struct drm_encoder {
struct drm_device *dev;
@@ -470,14 +504,37 @@ enum drm_connector_force {
/**
* drm_connector - central DRM connector control structure
- * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
- * @available_modes: modes available on this connector (from get_modes() + user)
- * @initial_x: initial x position for this connector
- * @initial_y: initial y position for this connector
- * @status: connector connected?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
* @funcs: connector control functions
+ * @user_modes: user added mode list
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @property_ids: property tracking for this connector
+ * @property_values: value pointers or data for properties
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -498,7 +555,6 @@ struct drm_connector {
bool doublescan_allowed;
struct list_head modes; /* list of modes on this connector */
- int initial_x, initial_y;
enum drm_connector_status status;
/* these are modes added by probing with DDC or the BIOS */
@@ -522,7 +578,6 @@ struct drm_connector {
/* forced on connector */
enum drm_connector_force force;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
- uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
/* EDID bits */
@@ -536,7 +591,71 @@ struct drm_connector {
};
/**
- * struct drm_mode_set
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @gamma_size: size of gamma table
+ * @gamma_store: gamma correction table
+ * @enabled: enabled flag
+ * @funcs: helper functions
+ * @helper_private: storage for drver layer
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ bool enabled;
+
+ const struct drm_plane_funcs *funcs;
+ void *helper_private;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
*
* Represents a single crtc the connectors that it drives with what mode
* and from which framebuffer it scans out from.
@@ -558,13 +677,33 @@ struct drm_mode_set {
};
/**
- * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
*/
struct drm_mode_config_funcs {
- struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
void (*output_poll_changed)(struct drm_device *dev);
};
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
@@ -576,7 +715,30 @@ struct drm_mode_group {
/**
* drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
*
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
*/
struct drm_mode_config {
struct mutex mutex; /* protects configuration (mode lists etc.) */
@@ -589,6 +751,8 @@ struct drm_mode_config {
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
+ int num_plane;
+ struct list_head plane_list;
int num_crtc;
struct list_head crtc_list;
@@ -641,6 +805,7 @@ struct drm_mode_config {
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
#define obj_to_property(x) container_of(x, struct drm_property, base)
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
extern void drm_crtc_init(struct drm_device *dev,
@@ -660,6 +825,14 @@ extern void drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type);
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
extern char *drm_get_connector_name(struct drm_connector *connector);
@@ -753,17 +926,25 @@ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_setcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
extern int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getfb(struct drm_device *dev,
@@ -824,4 +1005,7 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 73b071203dc..37515d1afab 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -117,7 +117,7 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd);
+ struct drm_mode_fb_cmd2 *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
@@ -144,4 +144,7 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+extern int drm_format_num_planes(uint32_t format);
+
#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644
index 00000000000..bdf0152cbbe
--- /dev/null
+++ b/include/drm/drm_fourcc.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <linux/types.h>
+
+#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
+ ((__u32)(c) << 16) | ((__u32)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+
+/* 2 non contiguous plane YCbCr */
+#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+/* 3 non contiguous plane YCbCr */
+#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+
+#endif /* DRM_FOURCC_H */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index ddd46db65b5..2a2acda8b43 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -120,11 +120,48 @@ struct drm_mode_crtc {
struct drm_mode_modeinfo mode;
};
-#define DRM_MODE_ENCODER_NONE 0
-#define DRM_MODE_ENCODER_DAC 1
-#define DRM_MODE_ENCODER_TMDS 2
-#define DRM_MODE_ENCODER_LVDS 3
-#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags; /* see above flags */
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x, crtc_y;
+ __u32 crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x, src_y;
+ __u32 src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
struct drm_mode_get_encoder {
@@ -231,6 +268,33 @@ struct drm_mode_fb_cmd {
__u32 handle;
};
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+
+struct drm_mode_fb_cmd2 {
+ __u32 fb_id;
+ __u32 width, height;
+ __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+ __u32 flags; /* see above flags */
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offset[0] and UV as
+ * offeset[1]. Note that offset[0] will generally
+ * be 0.
+ */
+ __u32 handles[4];
+ __u32 pitches[4]; /* pitch for each plane */
+ __u32 offsets[4]; /* offset of each plane */
+};
+
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f81676f1b31..14b6cd02228 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -182,8 +182,11 @@
{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -195,8 +198,18 @@
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -238,6 +251,7 @@
{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -480,6 +494,8 @@
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -494,6 +510,8 @@
{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
deleted file mode 100644
index 08ecf83ad5d..00000000000
--- a/include/drm/drm_sman.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple memory MANager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas HellstrĂśm <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_SMAN_H
-#define DRM_SMAN_H
-
-#include "drmP.h"
-#include "drm_hashtab.h"
-
-/*
- * A class that is an abstration of a simple memory allocator.
- * The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
- * See the SiS implementation, which may use the SiS FB kernel module
- * for memory management.
- */
-
-struct drm_sman_mm {
- /* private info. If allocated, needs to be destroyed by the destroy
- function */
- void *private;
-
- /* Allocate a memory block with given size and alignment.
- Return an opaque reference to the memory block */
-
- void *(*allocate) (void *private, unsigned long size,
- unsigned alignment);
-
- /* Free a memory block. "ref" is the opaque reference that we got from
- the "alloc" function */
-
- void (*free) (void *private, void *ref);
-
- /* Free all resources associated with this allocator */
-
- void (*destroy) (void *private);
-
- /* Return a memory offset from the opaque reference returned from the
- "alloc" function */
-
- unsigned long (*offset) (void *private, void *ref);
-};
-
-struct drm_memblock_item {
- struct list_head owner_list;
- struct drm_hash_item user_hash;
- void *mm_info;
- struct drm_sman_mm *mm;
- struct drm_sman *sman;
-};
-
-struct drm_sman {
- struct drm_sman_mm *mm;
- int num_managers;
- struct drm_open_hash owner_hash_tab;
- struct drm_open_hash user_hash_tab;
- struct list_head owner_items;
-};
-
-/*
- * Take down a memory manager. This function should only be called after a
- * successful init and after a call to drm_sman_cleanup.
- */
-
-extern void drm_sman_takedown(struct drm_sman * sman);
-
-/*
- * Allocate structures for a manager.
- * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
- * user_order is the log2 of the number of buckets in the user hash table.
- * set this to approximately log2 of the max number of memory regions
- * that will be allocated for _all_ pools together.
- * owner_order is the log2 of the number of buckets in the owner hash table.
- * set this to approximately log2 of
- * the number of client file connections that will
- * be using the manager.
- *
- */
-
-extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order);
-
-/*
- * Initialize a drm_mm.c allocator. Should be called only once for each
- * manager unless a customized allogator is used.
- */
-
-extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size);
-
-/*
- * Initialize a customized allocator for one of the managers.
- * (See the SiS module). The object pointed to by "allocator" is copied,
- * so it can be destroyed after this call.
- */
-
-extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
- struct drm_sman_mm * allocator);
-
-/*
- * Allocate a memory block. Aligment is not implemented yet.
- */
-
-extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
- unsigned int manager,
- unsigned long size,
- unsigned alignment,
- unsigned long owner);
-/*
- * Free a memory block identified by its user hash key.
- */
-
-extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
-
-/*
- * returns 1 iff there are no stale memory blocks associated with this owner.
- * Typically called to determine if we need to idle the hardware and call
- * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
- * resources associated with owner.
- */
-
-extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with this owner. Note that this
- * requires that the hardware is finished with all blocks, so the graphics engine
- * should be idled before this call is made. This function also frees
- * any resources associated with "owner" and should be called when owner
- * is not going to be referenced anymore.
- */
-
-extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
-
-/*
- * Frees all stale memory blocks associated with the memory manager.
- * See idling above.
- */
-
-extern void drm_sman_cleanup(struct drm_sman * sman);
-
-#endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1d161cb3aca..5e120f1c5cd 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,17 +32,16 @@
/**
* User-desired buffer creation information structure.
*
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
- unsigned int size;
+ uint64_t size;
unsigned int flags;
unsigned int handle;
- unsigned int pad;
};
/**
@@ -75,9 +74,16 @@ struct drm_exynos_gem_mmap {
uint64_t mapped;
};
+struct drm_exynos_plane_set_zpos {
+ __u32 plane_id;
+ __s32 zpos;
+};
+
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
#define DRM_EXYNOS_GEM_MMAP 0x02
+/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
+#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -88,6 +94,9 @@ struct drm_exynos_gem_mmap {
#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
+#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
+
/**
* Platform Specific Structure for DRM based FIMD.
*
@@ -103,4 +112,31 @@ struct exynos_drm_fimd_pdata {
unsigned int bpp;
};
+/**
+ * Platform Specific Structure for DRM based HDMI.
+ *
+ * @hdmi_dev: device point to specific hdmi driver.
+ * @mixer_dev: device point to specific mixer driver.
+ *
+ * this structure is used for common hdmi driver and each device object
+ * would be used to access specific device driver(hdmi or mixer driver)
+ */
+struct exynos_drm_common_hdmi_pd {
+ struct device *hdmi_dev;
+ struct device *mixer_dev;
+};
+
+/**
+ * Platform Specific Structure for DRM based HDMI core.
+ *
+ * @timing: default video mode for initializing
+ * @default_win: default window layer number to be used for UI.
+ * @bpp: default bit per pixel.
+ */
+struct exynos_drm_hdmi_pdata {
+ struct fb_videomode timing;
+ unsigned int default_win;
+ unsigned int bpp;
+};
+
#endif
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
new file mode 100644
index 00000000000..11368678571
--- /dev/null
+++ b/include/drm/gma_drm.h
@@ -0,0 +1,91 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRM_H_
+#define _PSB_DRM_H_
+
+/*
+ * Manage the LUT for an output
+ */
+struct drm_psb_dpst_lut_arg {
+ uint8_t lut[256];
+ int output_id;
+};
+
+/*
+ * Validate modes
+ */
+struct drm_psb_mode_operation_arg {
+ u32 obj_id;
+ u16 operation;
+ struct drm_mode_modeinfo mode;
+ u64 data;
+};
+
+/*
+ * Query the stolen memory for smarter management of
+ * memory by the server
+ */
+struct drm_psb_stolen_memory_arg {
+ u32 base;
+ u32 size;
+};
+
+struct drm_psb_get_pipe_from_crtc_id_arg {
+ /** ID of CRTC being requested **/
+ u32 crtc_id;
+ /** pipe of requested CRTC **/
+ u32 pipe;
+};
+
+struct drm_psb_gem_create {
+ __u64 size;
+ __u32 handle;
+ __u32 flags;
+#define GMA_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
+};
+
+struct drm_psb_gem_mmap {
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+/* Controlling the kernel modesetting buffers */
+
+#define DRM_GMA_GEM_CREATE 0x00 /* Create a GEM object */
+#define DRM_GMA_GEM_MMAP 0x01 /* Map GEM memory */
+#define DRM_GMA_STOLEN_MEMORY 0x02 /* Report stolen memory */
+#define DRM_GMA_2D_OP 0x03 /* Will be merged later */
+#define DRM_GMA_GAMMA 0x04 /* Set gamma table */
+#define DRM_GMA_ADB 0x05 /* Get backlight */
+#define DRM_GMA_DPST_BL 0x06 /* Set backlight */
+#define DRM_GMA_GET_PIPE_FROM_CRTC_ID 0x1 /* CRTC to physical pipe# */
+#define DRM_GMA_MODE_OPERATION 0x07 /* Mode validation/DC set */
+#define PSB_MODE_OPERATION_MODE_VALID 0x01
+
+
+#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 28c0d114cb5..924f6a454fe 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -291,6 +295,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
typedef struct drm_i915_getparam {
int param;
@@ -653,6 +658,9 @@ struct drm_i915_gem_execbuffer2 {
__u64 rsvd2;
};
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
@@ -844,4 +852,36 @@ struct drm_intel_overlay_attrs {
__u32 gamma5;
};
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index be94be6d6f1..b55da40953f 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -509,6 +509,7 @@ typedef struct {
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
+#define DRM_RADEON_GEM_VA 0x2b
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -550,6 +551,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
+#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
typedef struct drm_radeon_init {
enum {
@@ -872,12 +874,39 @@ struct drm_radeon_gem_pwrite {
uint64_t data_ptr;
};
+#define RADEON_VA_MAP 1
+#define RADEON_VA_UNMAP 2
+
+#define RADEON_VA_RESULT_OK 0
+#define RADEON_VA_RESULT_ERROR 1
+#define RADEON_VA_RESULT_VA_EXIST 2
+
+#define RADEON_VM_PAGE_VALID (1 << 0)
+#define RADEON_VM_PAGE_READABLE (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED (1 << 4)
+
+struct drm_radeon_gem_va {
+ uint32_t handle;
+ uint32_t operation;
+ uint32_t vm_id;
+ uint32_t flags;
+ uint64_t offset;
+};
+
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
+#define RADEON_CS_USE_VM 0x02
+/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
+#define RADEON_CS_RING_GFX 0
+#define RADEON_CS_RING_COMPUTE 1
+/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
+/* 0 = normal, + = higher priority, - = lower priority */
struct drm_radeon_cs_chunk {
uint32_t chunk_id;
@@ -885,6 +914,9 @@ struct drm_radeon_cs_chunk {
uint64_t chunk_data;
};
+/* drm_radeon_cs_reloc.flags */
+#define RADEON_RELOC_DONT_SYNC 0x01
+
struct drm_radeon_cs_reloc {
uint32_t handle;
uint32_t read_domains;
@@ -916,6 +948,10 @@ struct drm_radeon_cs {
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */
+/* virtual address start, va < start are reserved by the kernel */
+#define RADEON_INFO_VA_START 0x0e
+/* maximum size of ib using the virtual memory cs */
+#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 30f7b382746..035b804dda6 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -64,4 +64,8 @@ typedef struct {
unsigned int offset, size;
} drm_sis_fb_t;
+struct sis_file_private {
+ struct list_head obj_list;
+};
+
#endif /* __SIS_DRM_H__ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 42e34698518..974c8f801c3 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -122,17 +122,12 @@ struct ttm_mem_reg {
* be mmapped by user space. Each of these bos occupy a slot in the
* device address space, that can be used for normal vm operations.
*
- * @ttm_bo_type_user: These are user-space memory areas that are made
- * available to the GPU by mapping the buffer pages into the GPU aperture
- * space. These buffers cannot be mmaped from the device address space.
- *
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use.
*/
enum ttm_bo_type {
ttm_bo_type_device,
- ttm_bo_type_user,
ttm_bo_type_kernel
};
@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
-
extern int
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
/**
* ttm_bo_synccpu_write_release:
*
@@ -447,6 +442,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+
+/**
* ttm_bo_init
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct file *persistent_swap_storage,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *));
+
/**
* ttm_bo_synccpu_object_init
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 94eb1434316..d43e892307f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -43,36 +43,9 @@ struct ttm_backend;
struct ttm_backend_func {
/**
- * struct ttm_backend_func member populate
- *
- * @backend: Pointer to a struct ttm_backend.
- * @num_pages: Number of pages to populate.
- * @pages: Array of pointers to ttm pages.
- * @dummy_read_page: Page to be used instead of NULL pages in the
- * array @pages.
- * @dma_addrs: Array of DMA (bus) address of the ttm pages.
- *
- * Populate the backend with ttm pages. Depending on the backend,
- * it may or may not copy the @pages array.
- */
- int (*populate) (struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs);
- /**
- * struct ttm_backend_func member clear
- *
- * @backend: Pointer to a struct ttm_backend.
- *
- * This is an "unpopulate" function. Release all resources
- * allocated with populate.
- */
- void (*clear) (struct ttm_backend *backend);
-
- /**
* struct ttm_backend_func member bind
*
- * @backend: Pointer to a struct ttm_backend.
+ * @ttm: Pointer to a struct ttm_tt.
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding.
*
@@ -80,46 +53,29 @@ struct ttm_backend_func {
* indicated by @bo_mem. This function should be able to handle
* differences between aperture and system page sizes.
*/
- int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
* struct ttm_backend_func member unbind
*
- * @backend: Pointer to a struct ttm_backend.
+ * @ttm: Pointer to a struct ttm_tt.
*
* Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes.
*/
- int (*unbind) (struct ttm_backend *backend);
+ int (*unbind) (struct ttm_tt *ttm);
/**
* struct ttm_backend_func member destroy
*
- * @backend: Pointer to a struct ttm_backend.
+ * @ttm: Pointer to a struct ttm_tt.
*
- * Destroy the backend.
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
*/
- void (*destroy) (struct ttm_backend *backend);
-};
-
-/**
- * struct ttm_backend
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @flags: For driver use.
- * @func: Pointer to a struct ttm_backend_func that describes
- * the backend methods.
- *
- */
-
-struct ttm_backend {
- struct ttm_bo_device *bdev;
- uint32_t flags;
- struct ttm_backend_func *func;
+ void (*destroy) (struct ttm_tt *ttm);
};
-#define TTM_PAGE_FLAG_USER (1 << 1)
-#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
@@ -135,23 +91,18 @@ enum ttm_caching_state {
/**
* struct ttm_tt
*
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
* pointer.
* @pages: Array of pages backing the data.
- * @first_himem_page: Himem pages are put last in the page array, which
- * enables us to run caching attribute changes on only the first part
- * of the page array containing lomem pages. This is the index of the
- * first himem page.
- * @last_lomem_page: Index of the last lomem page in the page array.
* @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device.
* @be: Pointer to the ttm backend.
- * @tsk: The task for user ttm.
- * @start: virtual address for user ttm.
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
- * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -159,16 +110,14 @@ enum ttm_caching_state {
*/
struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
struct page *dummy_read_page;
struct page **pages;
- long first_himem_page;
- long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
struct ttm_bo_global *glob;
struct ttm_backend *be;
- struct task_struct *tsk;
- unsigned long start;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
@@ -176,7 +125,23 @@ struct ttm_tt {
tt_unbound,
tt_unpopulated,
} state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
dma_addr_t *dma_address;
+ struct list_head pages_list;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
struct ttm_bo_driver {
/**
- * struct ttm_bo_driver member create_ttm_backend_entry
+ * ttm_tt_create
*
- * @bdev: The buffer object device.
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
*
- * Create a driver specific struct ttm_backend.
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
*/
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
- struct ttm_backend *(*create_ttm_backend_entry)
- (struct ttm_bo_device *bdev);
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
/**
* struct ttm_bo_driver member invalidate_caches
@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap.
- * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
- * used by a buffer object. This is excluding page arrays and backing pages.
- * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
@@ -497,8 +486,6 @@ struct ttm_bo_global {
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
- size_t ttm_bo_extra_size;
- size_t ttm_bo_size;
struct mutex device_list_mutex;
spinlock_t lru_lock;
@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
}
/**
- * ttm_tt_create
+ * ttm_tt_init
*
+ * @ttm: The struct ttm_tt.
* @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
* Returns:
* NULL: Out of memory.
*/
-extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page);
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
/**
- * ttm_tt_set_user:
+ * ttm_tt_fini
*
- * @ttm: The struct ttm_tt to populate.
- * @tsk: A struct task_struct for which @start is a valid user-space address.
- * @start: A valid user-space address.
- * @num_pages: Size in pages of the user memory area.
+ * @ttm: the ttm_tt structure.
*
- * Populate a struct ttm_tt with a user-space memory area after first pinning
- * the pages backing it.
- * Returns:
- * !0: Error.
+ * Free memory of ttm_tt structure
*/
-
-extern int ttm_tt_set_user(struct ttm_tt *ttm,
- struct task_struct *tsk,
- unsigned long start, unsigned long num_pages);
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_bind:
@@ -646,20 +628,11 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
- * ttm_tt_populate:
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Add backing pages to all of @ttm
- */
-extern int ttm_tt_populate(struct ttm_tt *ttm);
-
-/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
*
- * Unbind, unpopulate and destroy a struct ttm_tt.
+ * Unbind, unpopulate and destroy common struct ttm_tt.
*/
extern void ttm_tt_destroy(struct ttm_tt *ttm);
@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
extern void ttm_tt_unbind(struct ttm_tt *ttm);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
- * @index: Index of the desired page.
- *
- * Return a pointer to the struct page backing @ttm at page
- * index @index. If the page is unpopulated, one will be allocated to
- * populate that index.
*
- * Returns:
- * NULL on OOM.
+ * Swap in a previously swap out ttm_tt.
*/
-extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* ttm_tt_cache_flush:
@@ -1046,17 +1013,25 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#include <linux/agp_backend.h>
/**
- * ttm_agp_backend_init
+ * ttm_agp_tt_create
*
* @bdev: Pointer to a struct ttm_bo_device.
* @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory. This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt.
*/
-extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge);
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
#endif
#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 129de12353f..5fe27400d17 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -30,45 +30,70 @@
#include "ttm_memory.h"
/**
- * Get count number of pages from pool to pages list.
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
*
- * @pages: head of empty linked list where pages are filled.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state for the page.
- * @count: number of pages to allocate.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Add backing pages to all of @ttm
*/
-int ttm_get_pages(struct list_head *pages,
- int flags,
- enum ttm_caching_state cstate,
- unsigned count,
- dma_addr_t *dma_address);
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
/**
- * Put linked list of pages to pool.
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
*
- * @pages: list of pages to free.
- * @page_count: number of pages in the list. Zero can be passed for unknown
- * count.
- * @flags: ttm flags for page allocation.
- * @cstate: ttm caching state.
- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
+ * Free all pages of @ttm
*/
-void ttm_put_pages(struct list_head *pages,
- unsigned page_count,
- int flags,
- enum ttm_caching_state cstate,
- dma_addr_t *dma_address);
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+
+
+#ifdef CONFIG_SWIOTLB
/**
* Initialize pool allocator.
*/
-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
/**
* Free pool allocator.
*/
-void ttm_page_alloc_fini(void);
+void ttm_dma_page_alloc_fini(void);
/**
* Output the state of pools to debugfs file
*/
-extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+ unsigned max_pages)
+{
+ return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index fd11a5bd892..79b3b6e0f6b 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
drm_via_blitsync_t sync;
} drm_via_dmablit_t;
+struct via_file_private {
+ struct list_head obj_list;
+};
+
#endif /* _VIA_DRM_H_ */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 619b5657af7..c94e71781b7 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -185,6 +185,7 @@ header-y += if_pppol2tp.h
header-y += if_pppox.h
header-y += if_slip.h
header-y += if_strip.h
+header-y += if_team.h
header-y += if_tr.h
header-y += if_tun.h
header-y += if_tunnel.h
@@ -194,7 +195,9 @@ header-y += igmp.h
header-y += in.h
header-y += in6.h
header-y += in_route.h
+header-y += sock_diag.h
header-y += inet_diag.h
+header-y += unix_diag.h
header-y += inotify.h
header-y += input.h
header-y += ioctl.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6001b4da39d..627a3a42e4d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -302,6 +302,10 @@ extern bool osc_sb_apei_support_acked;
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
+
+#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
+ OSC_SHPC_NATIVE_HP_CONTROL)
+
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
extern void acpi_early_init(void);
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index be3d9a77d6e..73a25005d88 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -23,6 +23,8 @@ struct ata_port_info;
struct ahci_platform_data {
int (*init)(struct device *dev, void __iomem *addr);
void (*exit)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
const struct ata_port_info *ata_port_info;
unsigned int force_port_map;
unsigned int mask_port_map;
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fcbbe71a3cc..724c69c40bb 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/resource.h>
#include <linux/regulator/consumer.h>
@@ -35,12 +36,6 @@ struct amba_device {
unsigned int irq[AMBA_NR_IRQS];
};
-struct amba_id {
- unsigned int id;
- unsigned int mask;
- void *data;
-};
-
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 4ce98f54186..572f637299c 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -238,6 +238,9 @@ struct dma_chan;
* @enable_dma: if true enables DMA driven transfers.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
+ * @autosuspend_delay: delay in ms following transfer completion before the
+ * runtime power management system suspends the device. A setting of 0
+ * indicates no delay and the device will be suspended immediately.
*/
struct pl022_ssp_controller {
u16 bus_id;
@@ -246,6 +249,7 @@ struct pl022_ssp_controller {
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
+ int autosuspend_delay;
};
/**
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
index d12f077a6da..12e023c19ac 100644
--- a/include/linux/amba/pl330.h
+++ b/include/linux/amba/pl330.h
@@ -12,17 +12,9 @@
#ifndef __AMBA_PL330_H_
#define __AMBA_PL330_H_
+#include <linux/dmaengine.h>
#include <asm/hardware/pl330.h>
-struct dma_pl330_peri {
- /*
- * Peri_Req i/f of the DMAC that is
- * peripheral could be reached from.
- */
- u8 peri_id; /* specific dma id */
- enum pl330_reqtype rqtype;
-};
-
struct dma_pl330_platdata {
/*
* Number of valid peripherals connected to DMAC.
@@ -33,9 +25,12 @@ struct dma_pl330_platdata {
*/
u8 nr_valid_peri;
/* Array of valid peripherals */
- struct dma_pl330_peri *peri;
+ u8 *peri_id;
+ /* Operational capabilities */
+ dma_cap_mask_t cap_mask;
/* Bytes to allocate for MC buffer */
unsigned mcbuf_sz;
};
+extern bool pl330_filter(struct dma_chan *chan, void *param);
#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index a6863a2dec1..ef00610837d 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -20,12 +20,148 @@
#ifndef _ASM_X86_AMD_IOMMU_H
#define _ASM_X86_AMD_IOMMU_H
-#include <linux/irqreturn.h>
+#include <linux/types.h>
#ifdef CONFIG_AMD_IOMMU
+struct task_struct;
+struct pci_dev;
+
extern int amd_iommu_detect(void);
+
+/**
+ * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
+ * in the IOMMUv2 driver
+ * @pdev: The PCI device the workaround is necessary for
+ * @erratum: The erratum workaround to enable
+ *
+ * The function needs to be called before amd_iommu_init_device().
+ * Possible values for the erratum number are for now:
+ * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
+ * is enabled
+ * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
+ * requests to one
+ */
+#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
+#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
+
+extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
+
+/**
+ * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
+ * @pdev: The PCI device to initialize
+ * @pasids: Number of PASIDs to support for this device
+ *
+ * This function does all setup for the device pdev so that it can be
+ * used with IOMMUv2.
+ * Returns 0 on success or negative value on error.
+ */
+extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
+
+/**
+ * amd_iommu_free_device() - Free all IOMMUv2 related device resources
+ * and disable IOMMUv2 usage for this device
+ * @pdev: The PCI device to disable IOMMUv2 usage for'
+ */
+extern void amd_iommu_free_device(struct pci_dev *pdev);
+
+/**
+ * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
+ * @pdev: The PCI device to bind the task to
+ * @pasid: The PASID on the device the task should be bound to
+ * @task: the task to bind
+ *
+ * The function returns 0 on success or a negative value on error.
+ */
+extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+ struct task_struct *task);
+
+/**
+ * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
+ * a device
+ * @pdev: The device of the PASID
+ * @pasid: The PASID to unbind
+ *
+ * When this function returns the device is no longer using the PASID
+ * and the PASID is no longer bound to its task.
+ */
+extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
+
+/**
+ * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
+ * PRI requests
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ *
+ * The IOMMUv2 driver invokes this call-back when it is unable to
+ * successfully handle a PRI request. The device driver can then decide
+ * which PRI response the device should see. Possible return values for
+ * the call-back are:
+ *
+ * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
+ * the device is required to disable
+ * PRI when it receives this response
+ *
+ * The function returns 0 on success or negative value on error.
+ */
+#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
+#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
+#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
+
+typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
+ int pasid,
+ unsigned long address,
+ u16);
+
+extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb);
+
+/**
+ * amd_iommu_device_info() - Get information about IOMMUv2 support of a
+ * PCI device
+ * @pdev: PCI device to query information from
+ * @info: A pointer to an amd_iommu_device_info structure which will contain
+ * the information about the PCI device
+ *
+ * Returns 0 on success, negative value on error
+ */
+
+#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
+#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
+ on memory pages */
+#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
+ super-user privileges */
+
+struct amd_iommu_device_info {
+ int max_pasids;
+ u32 flags;
+};
+
+extern int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info);
+
+/**
+ * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
+ * a pasid context. This call-back is
+ * invoked when the IOMMUv2 driver needs to
+ * invalidate a PASID context, for example
+ * because the task that is bound to that
+ * context is about to exit.
+ *
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ */
+
+typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+
+extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb);
+
#else
static inline int amd_iommu_detect(void) { return -ENODEV; }
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 9a26c83a2c9..b856a2a590d 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -27,10 +27,7 @@ extern int __devexit __pata_platform_remove(struct device *dev);
/*
* Marvell SATA private data
*/
-struct mbus_dram_target_info;
-
struct mv_sata_platform_data {
- struct mbus_dram_target_info *dram;
int n_ports; /* number of sata ports */
};
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 49a83ca900b..f4ff882cb2d 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
-/*
- * This is approximately the algorithm used by alloc_skb.
- *
- */
-
-static inline int atm_guess_pdu2truesize(int size)
-{
- return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
-}
-
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 2f81c6f3b63..426ab9f4dd8 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -468,13 +468,13 @@ extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
#define audit_get_sessionid(t) ((t)->sessionid)
extern void audit_log_task_context(struct audit_buffer *ab);
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
-extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
+extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern int audit_bprm(struct linux_binprm *bprm);
extern void audit_socketcall(int nargs, unsigned long *args);
extern int audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
extern int audit_set_macxattr(const char *name);
-extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr);
+extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
@@ -494,12 +494,12 @@ static inline void audit_fd_pair(int fd1, int fd2)
if (unlikely(!audit_dummy_context()))
__audit_fd_pair(fd1, fd2);
}
-static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
+static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
{
if (unlikely(!audit_dummy_context()))
__audit_ipc_set_perm(qbytes, uid, gid, mode);
}
-static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
+static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{
if (unlikely(!audit_dummy_context()))
__audit_mq_open(oflag, mode, attr);
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 4d4b59de946..f4b8346b1a3 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,61 +205,82 @@ struct bcma_bus {
struct ssb_sprom sprom;
};
-extern inline u32 bcma_read8(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read8(struct bcma_device *core, u16 offset)
{
return core->bus->ops->read8(core, offset);
}
-extern inline u32 bcma_read16(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read16(struct bcma_device *core, u16 offset)
{
return core->bus->ops->read16(core, offset);
}
-extern inline u32 bcma_read32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_read32(struct bcma_device *core, u16 offset)
{
return core->bus->ops->read32(core, offset);
}
-extern inline
+static inline
void bcma_write8(struct bcma_device *core, u16 offset, u32 value)
{
core->bus->ops->write8(core, offset, value);
}
-extern inline
+static inline
void bcma_write16(struct bcma_device *core, u16 offset, u32 value)
{
core->bus->ops->write16(core, offset, value);
}
-extern inline
+static inline
void bcma_write32(struct bcma_device *core, u16 offset, u32 value)
{
core->bus->ops->write32(core, offset, value);
}
#ifdef CONFIG_BCMA_BLOCKIO
-extern inline void bcma_block_read(struct bcma_device *core, void *buffer,
+static inline void bcma_block_read(struct bcma_device *core, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
core->bus->ops->block_read(core, buffer, count, offset, reg_width);
}
-extern inline void bcma_block_write(struct bcma_device *core, const void *buffer,
- size_t count, u16 offset, u8 reg_width)
+static inline void bcma_block_write(struct bcma_device *core,
+ const void *buffer, size_t count,
+ u16 offset, u8 reg_width)
{
core->bus->ops->block_write(core, buffer, count, offset, reg_width);
}
#endif
-extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
+static inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
{
return core->bus->ops->aread32(core, offset);
}
-extern inline
+static inline
void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value)
{
core->bus->ops->awrite32(core, offset, value);
}
-#define bcma_mask32(cc, offset, mask) \
- bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask))
-#define bcma_set32(cc, offset, set) \
- bcma_write32(cc, offset, bcma_read32(cc, offset) | (set))
-#define bcma_maskset32(cc, offset, mask, set) \
- bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set))
+static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask)
+{
+ bcma_write32(cc, offset, bcma_read32(cc, offset) & mask);
+}
+static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set)
+{
+ bcma_write32(cc, offset, bcma_read32(cc, offset) | set);
+}
+static inline void bcma_maskset32(struct bcma_device *cc,
+ u16 offset, u32 mask, u32 set)
+{
+ bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set);
+}
+static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask)
+{
+ bcma_write16(cc, offset, bcma_read16(cc, offset) & mask);
+}
+static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set)
+{
+ bcma_write16(cc, offset, bcma_read16(cc, offset) | set);
+}
+static inline void bcma_maskset16(struct bcma_device *cc,
+ u16 offset, u16 mask, u16 set)
+{
+ bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
+}
extern bool bcma_core_is_enabled(struct bcma_device *core);
extern void bcma_core_disable(struct bcma_device *core, u32 flags);
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1526d965ed0..a33086a7530 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -203,6 +203,7 @@
#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */
#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16
+#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400
#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a08..3c1063acb2a 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c7a6d3b5bc7..94acd8172b5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
- request_fn_proc *,
- spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index ab344a52110..66d3e954eb6 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-unsigned long free_all_memory_core_early(int nodeid);
+extern unsigned long free_low_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644
index 00000000000..7702641f87e
--- /dev/null
+++ b/include/linux/can/platform/cc770.h
@@ -0,0 +1,33 @@
+#ifndef _CAN_PLATFORM_CC770_H_
+#define _CAN_PLATFORM_CC770_H_
+
+/* CPU Interface Register (0x02) */
+#define CPUIF_CEN 0x01 /* Clock Out Enable */
+#define CPUIF_MUX 0x04 /* Multiplex */
+#define CPUIF_SLP 0x08 /* Sleep */
+#define CPUIF_PWD 0x10 /* Power Down Mode */
+#define CPUIF_DMC 0x20 /* Divide Memory Clock */
+#define CPUIF_DSC 0x40 /* Divide System Clock */
+#define CPUIF_RST 0x80 /* Hardware Reset Status */
+
+/* Clock Out Register (0x1f) */
+#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */
+#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */
+#define CLKOUT_SL_SHIFT 4
+
+/* Bus Configuration Register (0x2f) */
+#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */
+#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */
+#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */
+#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */
+#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */
+
+struct cc770_platform_data {
+ u32 osc_freq; /* CAN bus oscillator frequency in Hz */
+
+ u8 cir; /* CPU Interface Register */
+ u8 cor; /* Clock Out Register */
+ u8 bcr; /* Bus Configuration Register */
+};
+
+#endif /* !_CAN_PLATFORM_CC770_H_ */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 1b7f9d52501..e9b602151ca 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -319,7 +319,7 @@ struct cftype {
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
*/
- mode_t mode;
+ umode_t mode;
/*
* If non-zero, defines the maximum length of string that can
@@ -457,6 +457,28 @@ void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
/*
+ * Control Group taskset, used to pass around set of tasks to cgroup_subsys
+ * methods.
+ */
+struct cgroup_taskset;
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
+int cgroup_taskset_size(struct cgroup_taskset *tset);
+
+/**
+ * cgroup_taskset_for_each - iterate cgroup_taskset
+ * @task: the loop cursor
+ * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
+ * @tset: taskset to iterate
+ */
+#define cgroup_taskset_for_each(task, skip_cgrp, tset) \
+ for ((task) = cgroup_taskset_first((tset)); (task); \
+ (task) = cgroup_taskset_next((tset))) \
+ if (!(skip_cgrp) || \
+ cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
+
+/*
* Control Group subsystem type.
* See Documentation/cgroups/cgroups.txt for details
*/
@@ -467,14 +489,11 @@ struct cgroup_subsys {
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *tsk);
- int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
+ struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct task_struct *tsk);
- void (*pre_attach)(struct cgroup *cgrp);
- void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
+ struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
- struct cgroup *old_cgrp, struct task_struct *tsk);
+ struct cgroup_taskset *tset);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task);
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index ac663c18776..0bd390ce98b 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -59,8 +59,16 @@ SUBSYS(net_cls)
SUBSYS(blkio)
#endif
+/* */
+
#ifdef CONFIG_CGROUP_PERF
SUBSYS(perf)
#endif
/* */
+
+#ifdef CONFIG_NETPRIO_CGROUP
+SUBSYS(net_prio)
+#endif
+
+/* */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 7213b52b2c0..b9d46fa154b 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -107,6 +107,28 @@ static inline void clk_unprepare(struct clk *clk)
}
#endif
+/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
+static inline int clk_prepare_enable(struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare(clk);
+ if (ret)
+ return ret;
+ ret = clk_enable(clk);
+ if (ret)
+ clk_unprepare(clk);
+
+ return ret;
+}
+
+/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
+static inline void clk_disable_unprepare(struct clk *clk)
+{
+ clk_disable(clk);
+ clk_unprepare(clk);
+}
+
/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
* This is only valid once the clock source has been enabled.
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f1..081147da056 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -71,7 +71,7 @@ struct timecounter {
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc: Pointer to cycle counter.
+ * @cc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
- * @cycle: a value returned by tc->cc->read()
+ * @cycle_tstamp: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,10 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
+ * @maxadj: maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
+ * @cycle_last: most recent cycle counter value seen by ::read()
*/
struct clocksource {
/*
@@ -172,7 +174,7 @@ struct clocksource {
u32 mult;
u32 shift;
u64 max_idle_ns;
-
+ u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
@@ -186,6 +188,7 @@ struct clocksource {
void (*suspend)(struct clocksource *cs);
void (*resume)(struct clocksource *cs);
+ /* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
struct list_head wd_list;
@@ -260,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
/**
* clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles: cycles
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
*
* Converts cycles to nanoseconds, using the given mult and shift.
*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 154bf568301..41c9f6515f4 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -422,9 +422,9 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
unsigned int nr_segs, unsigned int flags);
asmlinkage long compat_sys_open(const char __user *filename, int flags,
- int mode);
+ umode_t mode);
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
- int flags, int mode);
+ int flags, umode_t mode);
asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
extern void __user *compat_alloc_user_space(unsigned long len);
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index dfadc96e9d6..2f4079175af 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -29,6 +29,7 @@
the kernel context */
#define __cold __attribute__((__cold__))
+#define __linktime_error(message) __attribute__((__error__(message)))
#if __GNUC_MINOR__ >= 5
/*
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 320d6c94ff8..4a243546d14 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -293,7 +293,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
-
+#ifndef __linktime_error
+# define __linktime_error(message)
+#endif
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 3081c58d696..34025df6182 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -124,7 +124,7 @@ extern struct config_item *config_group_find_item(struct config_group *,
struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
- mode_t ca_mode;
+ umode_t ca_mode;
};
/*
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index f932093e20c..cf68ca4a508 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -35,8 +35,8 @@ struct cordic_iq {
* @theta: angle in degrees for which i/q coordinate is to be calculated.
* @coord: function output parameter holding the i/q coordinate.
*
- * The function calculates the i/q coordinate for a given angle using
- * cordic algorithm. The coordinate consists of a real (i) and an
+ * The function calculates the i/q coordinate for a given angle using the
+ * CORDIC algorithm. The coordinate consists of a real (i) and an
* imaginary (q) part. The real part is essentially the cosine of the
* angle and the imaginary part is the sine of the angle. The returned
* values are scaled by 2^16 for precision. The range for theta is
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6cb60fd2ea8..1f6587590a1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -14,7 +14,7 @@
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
@@ -22,19 +22,20 @@
struct cpu {
int node_id; /* The node which contains the CPU */
int hotpluggable; /* creates sysfs control file if hotpluggable */
- struct sys_device sysdev;
+ struct device dev;
};
extern int register_cpu(struct cpu *cpu, int num);
-extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+extern struct device *get_cpu_device(unsigned cpu);
+extern bool cpu_is_hotpluggable(unsigned cpu);
-extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
-extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
+extern int cpu_add_dev_attr(struct device_attribute *attr);
+extern void cpu_remove_dev_attr(struct device_attribute *attr);
-extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
-extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
-extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
+extern int sched_create_sysfs_power_savings_entries(struct device *dev);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
@@ -160,7 +161,7 @@ static inline void cpu_maps_update_done(void)
}
#endif /* CONFIG_SMP */
-extern struct sysdev_class cpu_sysdev_class;
+extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 7408af843b8..23f81de5182 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -130,7 +130,6 @@ struct cpuidle_driver {
#ifdef CONFIG_CPU_IDLE
extern void disable_cpuidle(void);
extern int cpuidle_idle_call(void);
-
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
struct cpuidle_driver *cpuidle_get_driver(void);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
@@ -145,7 +144,6 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
#else
static inline void disable_cpuidle(void) { }
static inline int cpuidle_idle_call(void) { return -ENODEV; }
-
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4df92619936..a47bda5f76d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -249,6 +249,7 @@ extern int d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
+extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
@@ -339,7 +340,8 @@ extern int d_validate(struct dentry *, struct dentry *);
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index e7d9b20ddc5..6169c26fd8c 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -16,6 +16,7 @@
#define _DEBUGFS_H_
#include <linux/fs.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
@@ -26,6 +27,17 @@ struct debugfs_blob_wrapper {
unsigned long size;
};
+struct debugfs_reg32 {
+ char *name;
+ unsigned long offset;
+};
+
+struct debugfs_regset32 {
+ struct debugfs_reg32 *regs;
+ int nregs;
+ void __iomem *base;
+};
+
extern struct dentry *arch_debugfs_dir;
#if defined(CONFIG_DEBUG_FS)
@@ -34,7 +46,7 @@ extern struct dentry *arch_debugfs_dir;
extern const struct file_operations debugfs_file_operations;
extern const struct inode_operations debugfs_link_operations;
-struct dentry *debugfs_create_file(const char *name, mode_t mode,
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -49,31 +61,38 @@ void debugfs_remove_recursive(struct dentry *dentry);
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
-struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, mode_t mode,
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
+struct dentry *debugfs_create_regset32(const char *name, mode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
+
+int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix);
+
bool debugfs_initialized(void);
#else
@@ -86,7 +105,7 @@ bool debugfs_initialized(void);
* want to duplicate the design decision mistakes of procfs and devfs again.
*/
-static inline struct dentry *debugfs_create_file(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
@@ -118,76 +137,83 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
struct dentry *parent,
u8 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
struct dentry *parent,
u16 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent,
u32 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent,
u64 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_x8(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent,
u8 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_x16(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
struct dentry *parent,
u16 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_x32(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
struct dentry *parent,
u32 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent,
size_t *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
u32 *value)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
+static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob)
{
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_regset32(const char *name,
+ mode_t mode, struct dentry *parent,
+ struct debugfs_regset32 *regset)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline bool debugfs_initialized(void)
{
return false;
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 65970b811e2..0e5f5785d9f 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -46,6 +46,8 @@ struct debug_obj {
* fails
* @fixup_free: fixup function, which is called when the free check
* fails
+ * @fixup_assert_init: fixup function, which is called when the assert_init
+ * check fails
*/
struct debug_obj_descr {
const char *name;
@@ -54,6 +56,7 @@ struct debug_obj_descr {
int (*fixup_activate) (void *addr, enum debug_obj_state state);
int (*fixup_destroy) (void *addr, enum debug_obj_state state);
int (*fixup_free) (void *addr, enum debug_obj_state state);
+ int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
#ifdef CONFIG_DEBUG_OBJECTS
@@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
/*
* Active state:
@@ -89,6 +93,8 @@ static inline void
debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/device.h b/include/linux/device.h
index 3136ede5a1e..5b3adb8f958 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -53,6 +53,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* struct bus_type - The bus type of the device
*
* @name: The name of the bus.
+ * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @dev_root: Default device to use as the parent.
* @bus_attrs: Default attributes of the bus.
* @dev_attrs: Default attributes of the devices on the bus.
* @drv_attrs: Default attributes of the device drivers on the bus.
@@ -86,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
*/
struct bus_type {
const char *name;
+ const char *dev_name;
+ struct device *dev_root;
struct bus_attribute *bus_attrs;
struct device_attribute *dev_attrs;
struct driver_attribute *drv_attrs;
@@ -106,12 +110,30 @@ struct bus_type {
struct subsys_private *p;
};
-extern int __must_check bus_register(struct bus_type *bus);
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define bus_register(subsys) \
+({ \
+ static struct lock_class_key __key; \
+ __bus_register(subsys, &__key); \
+})
+extern int __must_check __bus_register(struct bus_type *bus,
+ struct lock_class_key *key);
extern void bus_unregister(struct bus_type *bus);
extern int __must_check bus_rescan_devices(struct bus_type *bus);
/* iterator helpers for buses */
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+void subsys_dev_iter_init(struct subsys_dev_iter *iter,
+ struct bus_type *subsys,
+ struct device *start,
+ const struct device_type *type);
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
@@ -121,10 +143,10 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start,
struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name);
-
+struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
+ struct device *hint);
int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
-
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
@@ -256,6 +278,33 @@ struct device *driver_find_device(struct device_driver *drv,
int (*match)(struct device *dev, void *data));
/**
+ * struct subsys_interface - interfaces to device functions
+ * @name name of the device function
+ * @subsystem subsytem of the devices to attach to
+ * @node the list of functions registered at the subsystem
+ * @add device hookup to device function handler
+ * @remove device hookup to device function handler
+ *
+ * Simple interfaces attached to a subsystem. Multiple interfaces can
+ * attach to a subsystem and its devices. Unlike drivers, they do not
+ * exclusively claim or control devices. Interfaces usually represent
+ * a specific functionality of a subsystem/class of devices.
+ */
+struct subsys_interface {
+ const char *name;
+ struct bus_type *subsys;
+ struct list_head node;
+ int (*add_dev)(struct device *dev, struct subsys_interface *sif);
+ int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+};
+
+int subsys_interface_register(struct subsys_interface *sif);
+void subsys_interface_unregister(struct subsys_interface *sif);
+
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups);
+
+/**
* struct class - device classes
* @name: Name of the class.
* @owner: The module owner.
@@ -294,7 +343,7 @@ struct class {
struct kobject *dev_kobj;
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, mode_t *mode);
+ char *(*devnode)(struct device *dev, umode_t *mode);
void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);
@@ -423,7 +472,7 @@ struct device_type {
const char *name;
const struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, mode_t *mode);
+ char *(*devnode)(struct device *dev, umode_t *mode);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
@@ -438,11 +487,31 @@ struct device_attribute {
const char *buf, size_t count);
};
-#define DEVICE_ATTR(_name, _mode, _show, _store) \
-struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+struct dev_ext_attribute {
+ struct device_attribute attr;
+ void *var;
+};
+
+ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
-extern int __must_check device_create_file(struct device *device,
- const struct device_attribute *entry);
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+#define DEVICE_INT_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+extern int device_create_file(struct device *device,
+ const struct device_attribute *entry);
extern void device_remove_file(struct device *dev,
const struct device_attribute *attr);
extern int __must_check device_create_bin_file(struct device *dev,
@@ -490,6 +559,9 @@ extern int devres_release_group(struct device *dev, void *id);
extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
extern void devm_kfree(struct device *dev, void *p);
+void __iomem *devm_request_and_ioremap(struct device *dev,
+ struct resource *res);
+
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -600,6 +672,7 @@ struct device {
struct device_node *of_node; /* associated device tree node */
dev_t devt; /* dev_t, creates the sysfs "dev" */
+ u32 id; /* device instance */
spinlock_t devres_lock;
struct list_head devres_head;
@@ -720,7 +793,7 @@ extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
extern const char *device_get_devnode(struct device *dev,
- mode_t *mode, const char **tmp);
+ umode_t *mode, const char **tmp);
extern void *dev_get_drvdata(const struct device *dev);
extern int dev_set_drvdata(struct device *dev, void *data);
@@ -924,4 +997,25 @@ extern long sysfs_deprecated;
#define sysfs_deprecated 0
#endif
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver)); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+ __unregister(&(__driver)); \
+} \
+module_exit(__driver##_exit);
+
#endif /* _DEVICE_H_ */
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index d4e02f5353a..6c7f6e9546c 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -74,15 +74,76 @@ struct dlm_lksb {
#ifdef __KERNEL__
+struct dlm_slot {
+ int nodeid; /* 1 to MAX_INT */
+ int slot; /* 1 to MAX_INT */
+};
+
+/*
+ * recover_prep: called before the dlm begins lock recovery.
+ * Notfies lockspace user that locks from failed members will be granted.
+ * recover_slot: called after recover_prep and before recover_done.
+ * Identifies a failed lockspace member.
+ * recover_done: called after the dlm completes lock recovery.
+ * Identifies lockspace members and lockspace generation number.
+ */
+
+struct dlm_lockspace_ops {
+ void (*recover_prep) (void *ops_arg);
+ void (*recover_slot) (void *ops_arg, struct dlm_slot *slot);
+ void (*recover_done) (void *ops_arg, struct dlm_slot *slots,
+ int num_slots, int our_slot, uint32_t generation);
+};
+
/*
* dlm_new_lockspace
*
- * Starts a lockspace with the given name. If the named lockspace exists in
- * the cluster, the calling node joins it.
+ * Create/join a lockspace.
+ *
+ * name: lockspace name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ * including terminating null).
+ *
+ * cluster: cluster name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ * including terminating null). Optional. When cluster is null, it
+ * is not used. When set, dlm_new_lockspace() returns -EBADR if cluster
+ * is not equal to the dlm cluster name.
+ *
+ * flags:
+ * DLM_LSFL_NODIR
+ * The dlm should not use a resource directory, but statically assign
+ * resource mastery to nodes based on the name hash that is otherwise
+ * used to select the directory node. Must be the same on all nodes.
+ * DLM_LSFL_TIMEWARN
+ * The dlm should emit netlink messages if locks have been waiting
+ * for a configurable amount of time. (Unused.)
+ * DLM_LSFL_FS
+ * The lockspace user is in the kernel (i.e. filesystem). Enables
+ * direct bast/cast callbacks.
+ * DLM_LSFL_NEWEXCL
+ * dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ *
+ * lvblen: length of lvb in bytes. Must be multiple of 8.
+ * dlm_new_lockspace() returns an error if this does not match
+ * what other nodes are using.
+ *
+ * ops: callbacks that indicate lockspace recovery points so the
+ * caller can coordinate its recovery and know lockspace members.
+ * This is only used by the initial dlm_new_lockspace() call.
+ * Optional.
+ *
+ * ops_arg: arg for ops callbacks.
+ *
+ * ops_result: tells caller if the ops callbacks (if provided) will
+ * be used or not. 0: will be used, -EXXX will not be used.
+ * -EOPNOTSUPP: the dlm does not have recovery_callbacks enabled.
+ *
+ * lockspace: handle for dlm functions
*/
-int dlm_new_lockspace(const char *name, int namelen,
- dlm_lockspace_t **lockspace, uint32_t flags, int lvblen);
+int dlm_new_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops, void *ops_arg,
+ int *ops_result, dlm_lockspace_t **lockspace);
/*
* dlm_release_lockspace
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
new file mode 100644
index 00000000000..f8ac076afa5
--- /dev/null
+++ b/include/linux/dma-buf.h
@@ -0,0 +1,176 @@
+/*
+ * Header file for dma buffer sharing framework.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_BUF_H__
+#define __DMA_BUF_H__
+
+#include <linux/file.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+struct dma_buf;
+struct dma_buf_attachment;
+
+/**
+ * struct dma_buf_ops - operations possible on struct dma_buf
+ * @attach: [optional] allows different devices to 'attach' themselves to the
+ * given buffer. It might return -EBUSY to signal that backing storage
+ * is already allocated and incompatible with the requirements
+ * of requesting device.
+ * @detach: [optional] detach a given device from this buffer.
+ * @map_dma_buf: returns list of scatter pages allocated, increases usecount
+ * of the buffer. Requires atleast one attach to be called
+ * before. Returned sg list should already be mapped into
+ * _device_ address space. This call may sleep. May also return
+ * -EINTR. Should return -EINVAL if attach hasn't been called yet.
+ * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
+ * pages.
+ * @release: release this buffer; to be called after the last dma_buf_put.
+ */
+struct dma_buf_ops {
+ int (*attach)(struct dma_buf *, struct device *,
+ struct dma_buf_attachment *);
+
+ void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
+
+ /* For {map,unmap}_dma_buf below, any specific buffer attributes
+ * required should get added to device_dma_parameters accessible
+ * via dev->dma_params.
+ */
+ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
+ enum dma_data_direction);
+ void (*unmap_dma_buf)(struct dma_buf_attachment *,
+ struct sg_table *);
+ /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
+ * if the call would block.
+ */
+
+ /* after final dma_buf_put() */
+ void (*release)(struct dma_buf *);
+
+};
+
+/**
+ * struct dma_buf - shared buffer object
+ * @size: size of the buffer
+ * @file: file pointer used for sharing buffers across, and for refcounting.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @ops: dma_buf_ops associated with this buffer object.
+ * @priv: exporter specific private data for this buffer object.
+ */
+struct dma_buf {
+ size_t size;
+ struct file *file;
+ struct list_head attachments;
+ const struct dma_buf_ops *ops;
+ /* mutex to serialize list manipulation and other ops */
+ struct mutex lock;
+ void *priv;
+};
+
+/**
+ * struct dma_buf_attachment - holds device-buffer attachment data
+ * @dmabuf: buffer for this attachment.
+ * @dev: device attached to the buffer.
+ * @node: list of dma_buf_attachment.
+ * @priv: exporter specific attachment data.
+ *
+ * This structure holds the attachment information between the dma_buf buffer
+ * and its user device(s). The list contains one attachment struct per device
+ * attached to the buffer.
+ */
+struct dma_buf_attachment {
+ struct dma_buf *dmabuf;
+ struct device *dev;
+ struct list_head node;
+ void *priv;
+};
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev);
+void dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *dmabuf_attach);
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+ size_t size, int flags);
+int dma_buf_fd(struct dma_buf *dmabuf);
+struct dma_buf *dma_buf_get(int fd);
+void dma_buf_put(struct dma_buf *dmabuf);
+
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
+ enum dma_data_direction);
+void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *);
+#else
+
+static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *dmabuf_attach)
+{
+ return;
+}
+
+static inline struct dma_buf *dma_buf_export(void *priv,
+ struct dma_buf_ops *ops,
+ size_t size, int flags)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int dma_buf_fd(struct dma_buf *dmabuf)
+{
+ return -ENODEV;
+}
+
+static inline struct dma_buf *dma_buf_get(int fd)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_put(struct dma_buf *dmabuf)
+{
+ return;
+}
+
+static inline struct sg_table *dma_buf_map_attachment(
+ struct dma_buf_attachment *attach, enum dma_data_direction write)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ return;
+}
+
+#endif /* CONFIG_DMA_SHARED_BUFFER */
+
+#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index ef90cbd8e17..57c9a8ae4f2 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
+extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
{
}
#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
#endif
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644
index 00000000000..5621547d631
--- /dev/null
+++ b/include/linux/dynamic_queue_limits.h
@@ -0,0 +1,97 @@
+/*
+ * Dynamic queue limits (dql) - Definitions
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ *
+ * This header file contains the definitions for dynamic queue limits (dql).
+ * dql would be used in conjunction with a producer/consumer type queue
+ * (possibly a HW queue). Such a queue would have these general properties:
+ *
+ * 1) Objects are queued up to some limit specified as number of objects.
+ * 2) Periodically a completion process executes which retires consumed
+ * objects.
+ * 3) Starvation occurs when limit has been reached, all queued data has
+ * actually been consumed, but completion processing has not yet run
+ * so queuing new data is blocked.
+ * 4) Minimizing the amount of queued data is desirable.
+ *
+ * The goal of dql is to calculate the limit as the minimum number of objects
+ * needed to prevent starvation.
+ *
+ * The primary functions of dql are:
+ * dql_queued - called when objects are enqueued to record number of objects
+ * dql_avail - returns how many objects are available to be queued based
+ * on the object limit and how many objects are already enqueued
+ * dql_completed - called at completion time to indicate how many objects
+ * were retired from the queue
+ *
+ * The dql implementation does not implement any locking for the dql data
+ * structures, the higher layer should provide this. dql_queued should
+ * be serialized to prevent concurrent execution of the function; this
+ * is also true for dql_completed. However, dql_queued and dlq_completed can
+ * be executed concurrently (i.e. they can be protected by different locks).
+ */
+
+#ifndef _LINUX_DQL_H
+#define _LINUX_DQL_H
+
+#ifdef __KERNEL__
+
+struct dql {
+ /* Fields accessed in enqueue path (dql_queued) */
+ unsigned int num_queued; /* Total ever queued */
+ unsigned int adj_limit; /* limit + num_completed */
+ unsigned int last_obj_cnt; /* Count at last queuing */
+
+ /* Fields accessed only by completion path (dql_completed) */
+
+ unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
+ unsigned int num_completed; /* Total ever completed */
+
+ unsigned int prev_ovlimit; /* Previous over limit */
+ unsigned int prev_num_queued; /* Previous queue total */
+ unsigned int prev_last_obj_cnt; /* Previous queuing cnt */
+
+ unsigned int lowest_slack; /* Lowest slack found */
+ unsigned long slack_start_time; /* Time slacks seen */
+
+ /* Configuration */
+ unsigned int max_limit; /* Max limit */
+ unsigned int min_limit; /* Minimum limit */
+ unsigned int slack_hold_time; /* Time to measure slack */
+};
+
+/* Set some static maximums */
+#define DQL_MAX_OBJECT (UINT_MAX / 16)
+#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+ BUG_ON(count > DQL_MAX_OBJECT);
+
+ dql->num_queued += count;
+ dql->last_obj_cnt = count;
+}
+
+/* Returns how many objects can be queued, < 0 indicates over limit. */
+static inline int dql_avail(const struct dql *dql)
+{
+ return dql->adj_limit - dql->num_queued;
+}
+
+/* Record number of completed objects and recalculate the limit. */
+void dql_completed(struct dql *dql, unsigned int count);
+
+/* Reset dql state */
+void dql_reset(struct dql *dql);
+
+/* Initialize dql state */
+int dql_init(struct dql *dql, unsigned hold_time);
+
+#endif /* _KERNEL_ */
+
+#endif /* _LINUX_DQL_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 055b248bdd5..1cd3947987e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -13,7 +13,7 @@
#define _LINUX_EDAC_H_
#include <linux/atomic.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#define EDAC_OPSTATE_INVAL -1
#define EDAC_OPSTATE_POLL 0
@@ -23,12 +23,12 @@
extern int edac_op_state;
extern int edac_err_assert;
extern atomic_t edac_handlers;
-extern struct sysdev_class edac_class;
+extern struct bus_type edac_subsys;
extern int edac_handler_set(void);
extern void edac_atomic_assert_error(void);
-extern struct sysdev_class *edac_get_sysfs_class(void);
-extern void edac_put_sysfs_class(void);
+extern struct bus_type *edac_get_sysfs_subsys(void);
+extern void edac_put_sysfs_subsys(void);
static inline void opstate_init(void)
{
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index c4627cbdb8e..e50f98b0297 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -33,6 +33,7 @@
#define PCI_EEPROM_WIDTH_93C86 8
#define PCI_EEPROM_WIDTH_OPCODE 3
#define PCI_EEPROM_WRITE_OPCODE 0x05
+#define PCI_EEPROM_ERASE_OPCODE 0x07
#define PCI_EEPROM_READ_OPCODE 0x06
#define PCI_EEPROM_EWDS_OPCODE 0x10
#define PCI_EEPROM_EWEN_OPCODE 0x13
@@ -46,6 +47,7 @@
* @register_write(struct eeprom_93cx6 *eeprom): handler to
* write to the eeprom register by using all reg_* fields.
* @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @drive_data: Set if we're driving the data line.
* @reg_data_in: register field to indicate data input
* @reg_data_out: register field to indicate data output
* @reg_data_clock: register field to set the data clock
@@ -62,6 +64,7 @@ struct eeprom_93cx6 {
int width;
+ char drive_data;
char reg_data_in;
char reg_data_out;
char reg_data_clock;
@@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
const u8 word, u16 *data);
extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
const u8 word, __le16 *data, const u16 words);
+
+extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+
+extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+ u8 addr, u16 data);
diff --git a/include/linux/elf-em.h b/include/linux/elf-em.h
index 18bea78fe47..8e2b7bac437 100644
--- a/include/linux/elf-em.h
+++ b/include/linux/elf-em.h
@@ -33,6 +33,7 @@
#define EM_H8_300 46 /* Renesas H8/300,300H,H8S */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
+#define EM_TI_C6000 140 /* TI C6X DSPs */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 034072cea85..fd0628be45c 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -17,14 +17,15 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_LOCAL 1
#define SO_EE_ORIGIN_ICMP 2
#define SO_EE_ORIGIN_ICMP6 3
-#define SO_EE_ORIGIN_TIMESTAMPING 4
+#define SO_EE_ORIGIN_TXSTATUS 4
+#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
#ifdef __KERNEL__
#include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#endif
@@ -33,7 +34,7 @@ struct sock_extended_err {
struct sock_exterr_skb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index de33de1e205..da5b2de99ae 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -489,7 +489,10 @@ struct ethtool_rx_flow_spec {
* on return.
*
* For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
- * rules on return.
+ * rules on return. If @data is non-zero on return then it is the
+ * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the
+ * driver supports any special location values. If that flag is not
+ * set in @data then special location values should not be used.
*
* For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an
* existing rule on entry and @fs contains the rule on return.
@@ -501,10 +504,23 @@ struct ethtool_rx_flow_spec {
* must use the second parameter to get_rxnfc() instead of @rule_locs.
*
* For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update.
- * @fs.@location specifies the location to use and must not be ignored.
+ * @fs.@location either specifies the location to use or is a special
+ * location value with %RX_CLS_LOC_SPECIAL flag set. On return,
+ * @fs.@location is the actual rule location.
*
* For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an
* existing rule on entry.
+ *
+ * A driver supporting the special location values for
+ * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
+ * location, and may remove a rule at a later location (lower
+ * priority) that matches exactly the same set of flows. The special
+ * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
+ * priority); and %RX_CLS_LOC_LAST, selecting the last suitable
+ * location (minimum priority). Additional special values may be
+ * defined in future and drivers must return -%EINVAL for any
+ * unrecognised value.
*/
struct ethtool_rxnfc {
__u32 cmd;
@@ -543,9 +559,15 @@ struct compat_ethtool_rxnfc {
/**
* struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
* @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
- * @size: On entry, the array size of the user buffer. On return from
- * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table.
+ * @size: On entry, the array size of the user buffer, which may be zero.
+ * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware
+ * indirection table.
* @ring_index: RX ring/queue index for each hash value
+ *
+ * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size
+ * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means
+ * the table should be reset to default values. This last feature
+ * is not supported by the original implementations.
*/
struct ethtool_rxfh_indir {
__u32 cmd;
@@ -724,9 +746,6 @@ enum ethtool_sfeatures_retval_bits {
#include <linux/rculist.h>
-/* needed by dev_disable_lro() */
-extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
-
extern int __ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd);
@@ -750,19 +769,18 @@ struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_tx_csum(struct net_device *dev);
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data);
-u32 ethtool_op_get_sg(struct net_device *dev);
-int ethtool_op_set_sg(struct net_device *dev, u32 data);
-u32 ethtool_op_get_tso(struct net_device *dev);
-int ethtool_op_set_tso(struct net_device *dev, u32 data);
-u32 ethtool_op_get_ufo(struct net_device *dev);
-int ethtool_op_set_ufo(struct net_device *dev, u32 data);
-u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
+
+/**
+ * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
+ * @index: Index in RX flow hash indirection table
+ * @n_rx_rings: Number of RX rings to use
+ *
+ * This function provides the default policy for RX flow hash indirection.
+ */
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+ return index % n_rx_rings;
+}
/**
* struct ethtool_ops - optional netdev operations
@@ -807,22 +825,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
- * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM.
- * Report whether receive checksums are turned on or off.
- * @set_rx_csum: Deprecated in favour of generic netdev features. Turn
- * receive checksum on or off. Returns a negative error code or zero.
- * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
- * are turned on or off.
- * @set_tx_csum: Deprecated in favour of generic netdev features. Turn
- * transmit checksums on or off. Returns a negative error code or zero.
- * @get_sg: Deprecated as redundant. Report whether scatter-gather is
- * enabled.
- * @set_sg: Deprecated in favour of generic netdev features. Turn
- * scatter-gather on or off. Returns a negative error code or zero.
- * @get_tso: Deprecated as redundant. Report whether TCP segmentation
- * offload is enabled.
- * @set_tso: Deprecated in favour of generic netdev features. Turn TCP
- * segmentation offload on or off. Returns a negative error code or zero.
* @self_test: Run specified self-tests
* @get_strings: Return a set of strings that describe the requested objects
* @set_phys_id: Identify the physical devices, e.g. by flashing an LED
@@ -844,15 +846,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
* negative error code or zero.
* @complete: Function to be called after any other operation except
* @begin. Will be called even if the other operation failed.
- * @get_ufo: Deprecated as redundant. Report whether UDP fragmentation
- * offload is enabled.
- * @set_ufo: Deprecated in favour of generic netdev features. Turn UDP
- * fragmentation offload on or off. Returns a negative error code or zero.
- * @get_flags: Deprecated as redundant. Report features included in
- * &enum ethtool_flags that are enabled.
- * @set_flags: Deprecated in favour of generic netdev features. Turn
- * features included in &enum ethtool_flags on or off. Returns a
- * negative error code or zero.
* @get_priv_flags: Report driver-specific feature flags.
* @set_priv_flags: Set driver-specific feature flags. Returns a negative
* error code or zero.
@@ -866,11 +859,13 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
* @reset: Reset (part of) the device, as specified by a bitmask of
* flags from &enum ethtool_reset_flags. Returns a negative
* error code or zero.
- * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code
- * or zero.
+ * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
+ * Returns zero if not supported for this specific device.
* @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
+ * Will not be called if @get_rxfh_indir_size returns zero.
* Returns a negative error code or zero.
* @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
+ * Will not be called if @get_rxfh_indir_size returns zero.
* Returns a negative error code or zero.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
@@ -917,14 +912,6 @@ struct ethtool_ops {
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
- u32 (*get_rx_csum)(struct net_device *);
- int (*set_rx_csum)(struct net_device *, u32);
- u32 (*get_tx_csum)(struct net_device *);
- int (*set_tx_csum)(struct net_device *, u32);
- u32 (*get_sg)(struct net_device *);
- int (*set_sg)(struct net_device *, u32);
- u32 (*get_tso)(struct net_device *);
- int (*set_tso)(struct net_device *, u32);
void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
void (*get_strings)(struct net_device *, u32 stringset, u8 *);
int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
@@ -932,10 +919,6 @@ struct ethtool_ops {
struct ethtool_stats *, u64 *);
int (*begin)(struct net_device *);
void (*complete)(struct net_device *);
- u32 (*get_ufo)(struct net_device *);
- int (*set_ufo)(struct net_device *, u32);
- u32 (*get_flags)(struct net_device *);
- int (*set_flags)(struct net_device *, u32);
u32 (*get_priv_flags)(struct net_device *);
int (*set_priv_flags)(struct net_device *, u32);
int (*get_sset_count)(struct net_device *, int);
@@ -944,12 +927,9 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
- int (*set_rx_ntuple)(struct net_device *,
- struct ethtool_rx_ntuple *);
- int (*get_rxfh_indir)(struct net_device *,
- struct ethtool_rxfh_indir *);
- int (*set_rxfh_indir)(struct net_device *,
- const struct ethtool_rxfh_indir *);
+ u32 (*get_rxfh_indir_size)(struct net_device *);
+ int (*get_rxfh_indir)(struct net_device *, u32 *);
+ int (*set_rxfh_indir)(struct net_device *, const u32 *);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1173,6 +1153,12 @@ struct ethtool_ops {
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
+/* Special RX classification rule insert location values */
+#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
+#define RX_CLS_LOC_ANY 0xffffffff
+#define RX_CLS_LOC_FIRST 0xfffffffe
+#define RX_CLS_LOC_LAST 0xfffffffd
+
/* Reset flags */
/* The reset() operation must clear the flags for the components which
* were actually reset. On successful return, the flags indicate the
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index dec99116a0e..f957085d40e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -884,7 +884,7 @@ extern int ext3fs_dirhash(const char *name, int len, struct
/* ialloc.c */
extern struct inode * ext3_new_inode (handle_t *, struct inode *,
- const struct qstr *, int);
+ const struct qstr *, umode_t);
extern void ext3_free_inode (handle_t *, struct inode *);
extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
extern unsigned long ext3_count_free_inodes (struct super_block *);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3ee75..0ab54e16a91 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/atomic.h>
#ifdef CONFIG_FREEZER
+extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+extern bool pm_freezing; /* PM freezing in effect */
+extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
+
/*
* Check if a process has been frozen
*/
-static inline int frozen(struct task_struct *p)
+static inline bool frozen(struct task_struct *p)
{
return p->flags & PF_FROZEN;
}
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
- return test_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-/*
- * Request that a process be frozen
- */
-static inline void set_freeze_flag(struct task_struct *p)
-{
- set_tsk_thread_flag(p, TIF_FREEZE);
-}
+extern bool freezing_slow_path(struct task_struct *p);
/*
- * Sometimes we may need to cancel the previous 'freeze' request
+ * Check if there is a request to freeze a process
*/
-static inline void clear_freeze_flag(struct task_struct *p)
-{
- clear_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-static inline bool should_send_signal(struct task_struct *p)
+static inline bool freezing(struct task_struct *p)
{
- return !(p->flags & PF_FREEZER_NOSIG);
+ if (likely(!atomic_read(&system_freezing_cnt)))
+ return false;
+ return freezing_slow_path(p);
}
/* Takes and releases task alloc lock using task_lock() */
-extern int thaw_process(struct task_struct *p);
+extern void __thaw_task(struct task_struct *t);
-extern void refrigerator(void);
+extern bool __refrigerator(bool check_kthr_stop);
extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
-static inline int try_to_freeze(void)
+static inline bool try_to_freeze(void)
{
- if (freezing(current)) {
- refrigerator();
- return 1;
- } else
- return 0;
+ might_sleep();
+ if (likely(!freezing(current)))
+ return false;
+ return __refrigerator(false);
}
-extern bool freeze_task(struct task_struct *p, bool sig_only);
-extern void cancel_freezing(struct task_struct *p);
+extern bool freeze_task(struct task_struct *p);
+extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_freezing_or_frozen(struct task_struct *task);
+extern bool cgroup_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline bool cgroup_freezing(struct task_struct *task)
{
- return 0;
+ return false;
}
#endif /* !CONFIG_CGROUP_FREEZER */
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
* appropriately in case the child has exited before the freezing of tasks is
* complete. However, we don't want kernel threads to be frozen in unexpected
* places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
- * parents. Fortunately, in the ____call_usermodehelper() case the parent won't
- * really block freeze_processes(), since ____call_usermodehelper() (the child)
- * does a little before exec/exit and it can't be frozen before waking up the
- * parent.
+ * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
+ * parent won't really block freeze_processes(), since ____call_usermodehelper()
+ * (the child) does a little before exec/exit and it can't be frozen before
+ * waking up the parent.
*/
-/*
- * If the current task is a user space one, tell the freezer not to count it as
- * freezable.
- */
+
+/* Tell the freezer not to count the current task as freezable. */
static inline void freezer_do_not_count(void)
{
- if (current->mm)
- current->flags |= PF_FREEZER_SKIP;
+ current->flags |= PF_FREEZER_SKIP;
}
/*
- * If the current task is a user space one, tell the freezer to count it as
- * freezable again and try to freeze it.
+ * Tell the freezer to count the current task as freezable again and try to
+ * freeze it.
*/
static inline void freezer_count(void)
{
- if (current->mm) {
- current->flags &= ~PF_FREEZER_SKIP;
- try_to_freeze();
- }
+ current->flags &= ~PF_FREEZER_SKIP;
+ try_to_freeze();
}
/*
@@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
}
/*
- * Tell the freezer that the current task should be frozen by it
+ * These macros are intended to be used whenever you want allow a task that's
+ * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
+ * that neither return any clear indication of whether a freeze event happened
+ * while in this function.
*/
-static inline void set_freezable(void)
-{
- current->flags &= ~PF_NOFREEZE;
-}
-/*
- * Tell the freezer that the current task should be frozen by it and that it
- * should send a fake signal to the task to freeze it.
- */
-static inline void set_freezable_with_signal(void)
-{
- current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
-}
+/* Like schedule(), but should not block the freezer. */
+#define freezable_schedule() \
+({ \
+ freezer_do_not_count(); \
+ schedule(); \
+ freezer_count(); \
+})
+
+/* Like schedule_timeout_killable(), but should not block the freezer. */
+#define freezable_schedule_timeout_killable(timeout) \
+({ \
+ long __retval; \
+ freezer_do_not_count(); \
+ __retval = schedule_timeout_killable(timeout); \
+ freezer_count(); \
+ __retval; \
+})
/*
* Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
#define wait_event_freezable(wq, condition) \
({ \
int __retval; \
- do { \
+ for (;;) { \
__retval = wait_event_interruptible(wq, \
(condition) || freezing(current)); \
- if (__retval && !freezing(current)) \
+ if (__retval || (condition)) \
break; \
- else if (!(condition)) \
- __retval = -ERESTARTSYS; \
- } while (try_to_freeze()); \
+ try_to_freeze(); \
+ } \
__retval; \
})
-
#define wait_event_freezable_timeout(wq, condition, timeout) \
({ \
long __retval = timeout; \
- do { \
+ for (;;) { \
__retval = wait_event_interruptible_timeout(wq, \
(condition) || freezing(current), \
__retval); \
- } while (try_to_freeze()); \
+ if (__retval <= 0 || (condition)) \
+ break; \
+ try_to_freeze(); \
+ } \
__retval; \
})
+
#else /* !CONFIG_FREEZER */
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void set_freeze_flag(struct task_struct *p) {}
-static inline void clear_freeze_flag(struct task_struct *p) {}
-static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline bool frozen(struct task_struct *p) { return false; }
+static inline bool freezing(struct task_struct *p) { return false; }
+static inline void __thaw_task(struct task_struct *t) {}
-static inline void refrigerator(void) {}
+static inline bool __refrigerator(bool check_kthr_stop) { return false; }
static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
-static inline int try_to_freeze(void) { return 0; }
+static inline bool try_to_freeze(void) { return false; }
static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-static inline void set_freezable_with_signal(void) {}
+
+#define freezable_schedule() schedule()
+
+#define freezable_schedule_timeout_killable(timeout) \
+ schedule_timeout_killable(timeout)
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e3130220ce3..7aacf31418f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
#include <linux/atomic.h>
+#include <linux/shrinker.h>
#include <asm/byteorder.h>
@@ -1428,6 +1428,7 @@ struct super_block {
#else
struct list_head s_files;
#endif
+ struct list_head s_mounts; /* list of mounts; _not_ for fs use */
/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
struct list_head s_dentry_lru; /* unused dentry lru */
int s_nr_dentry_unused; /* # of dentry on lru */
@@ -1440,7 +1441,7 @@ struct super_block {
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
- struct list_head s_instances;
+ struct hlist_node s_instances;
struct quota_info s_dquot; /* Diskquota specific options */
int s_frozen;
@@ -1481,6 +1482,12 @@ struct super_block {
int cleancache_poolid;
struct shrinker s_shrink; /* per-sb shrinker handle */
+
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
+ /* Being remounted read-only */
+ int s_readonly_remount;
};
/* superblock cache pruning functions */
@@ -1516,9 +1523,9 @@ extern void unlock_super(struct super_block *);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-extern int vfs_mkdir(struct inode *, struct dentry *, int);
-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
extern int vfs_rmdir(struct inode *, struct dentry *);
@@ -1534,7 +1541,7 @@ extern void dentry_unhash(struct dentry *dentry);
* VFS file helper functions.
*/
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- mode_t mode);
+ umode_t mode);
/*
* VFS FS_IOC_FIEMAP helper definitions.
*/
@@ -1619,13 +1626,13 @@ struct inode_operations {
int (*readlink) (struct dentry *, char __user *,int);
void (*put_link) (struct dentry *, struct nameidata *, void *);
- int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+ int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,int);
+ int (*mkdir) (struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+ int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
void (*truncate) (struct inode *);
@@ -1672,10 +1679,10 @@ struct super_operations {
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);
- int (*show_options)(struct seq_file *, struct vfsmount *);
- int (*show_devname)(struct seq_file *, struct vfsmount *);
- int (*show_path)(struct seq_file *, struct vfsmount *);
- int (*show_stats)(struct seq_file *, struct vfsmount *);
+ int (*show_options)(struct seq_file *, struct dentry *);
+ int (*show_devname)(struct seq_file *, struct dentry *);
+ int (*show_path)(struct seq_file *, struct dentry *);
+ int (*show_stats)(struct seq_file *, struct dentry *);
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
@@ -1764,31 +1771,10 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
-/**
- * set_nlink - directly set an inode's link count
- * @inode: inode
- * @nlink: new nlink (should be non-zero)
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink.
- */
-static inline void set_nlink(struct inode *inode, unsigned int nlink)
-{
- inode->__i_nlink = nlink;
-}
-
-/**
- * inc_nlink - directly increment an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink. Currently,
- * it is only here for parity with dec_nlink().
- */
-static inline void inc_nlink(struct inode *inode)
-{
- inode->__i_nlink++;
-}
+extern void inc_nlink(struct inode *inode);
+extern void drop_nlink(struct inode *inode);
+extern void clear_nlink(struct inode *inode);
+extern void set_nlink(struct inode *inode, unsigned int nlink);
static inline void inode_inc_link_count(struct inode *inode)
{
@@ -1796,35 +1782,6 @@ static inline void inode_inc_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * drop_nlink - directly drop an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink. In cases
- * where we are attempting to track writes to the
- * filesystem, a decrement to zero means an imminent
- * write when the file is truncated and actually unlinked
- * on the filesystem.
- */
-static inline void drop_nlink(struct inode *inode)
-{
- inode->__i_nlink--;
-}
-
-/**
- * clear_nlink - directly zero an inode's link count
- * @inode: inode
- *
- * This is a low-level filesystem helper to replace any
- * direct filesystem manipulation of i_nlink. See
- * drop_nlink() for why we care about i_nlink hitting zero.
- */
-static inline void clear_nlink(struct inode *inode)
-{
- inode->__i_nlink = 0;
-}
-
static inline void inode_dec_link_count(struct inode *inode)
{
drop_nlink(inode);
@@ -1864,7 +1821,7 @@ struct file_system_type {
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
- struct list_head fs_supers;
+ struct hlist_head fs_supers;
struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key;
@@ -1939,9 +1896,10 @@ extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
extern int vfs_statfs(struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
+extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
extern int current_umask(void);
@@ -2053,8 +2011,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
extern int do_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
- int mode);
-extern struct file *filp_open(const char *, int, int);
+ umode_t mode);
+extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
@@ -2091,6 +2049,7 @@ extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);
extern int sync_blockdev(struct block_device *bdev);
+extern void kill_bdev(struct block_device *);
extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
@@ -2098,6 +2057,7 @@ extern int fsync_bdev(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
+static inline void kill_bdev(struct block_device *bdev) {}
static inline void invalidate_bdev(struct block_device *bdev) {}
static inline struct super_block *freeze_bdev(struct block_device *sb)
@@ -2190,8 +2150,6 @@ extern const struct file_operations read_pipefifo_fops;
extern const struct file_operations write_pipefifo_fops;
extern const struct file_operations rdwr_pipefifo_fops;
-extern int fs_may_remount_ro(struct super_block *);
-
#ifdef CONFIG_BLOCK
/*
* return READ, READA, or WRITE
@@ -2414,6 +2372,7 @@ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
+extern void block_sync_page(struct page *page);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
@@ -2530,7 +2489,6 @@ extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_active_super(struct block_device *bdev);
-extern struct super_block *user_get_super(dev_t);
extern void drop_super(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
extern void iterate_supers_type(struct file_system_type *,
@@ -2589,7 +2547,7 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr);
extern void file_update_time(struct file *file);
-extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
+extern int generic_show_options(struct seq_file *m, struct dentry *root);
extern void save_mount_options(struct super_block *sb, char *options);
extern void replace_mount_options(struct super_block *sb, char *options);
@@ -2690,7 +2648,7 @@ int __init get_filesystem_list(char *buf);
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
(flag & __FMODE_NONOTIFY)))
-static inline int is_sxid(mode_t mode)
+static inline int is_sxid(umode_t mode)
{
return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
}
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 96efa6794ea..c3da42dd22b 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -172,6 +172,7 @@ enum {
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT,
};
enum {
@@ -179,6 +180,7 @@ enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+ TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
};
struct ftrace_event_call {
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 61549b26ad6..73c28dea10a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -85,6 +85,30 @@ enum {
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_genl_is_held(void);
+#endif
+
+/**
+ * rcu_dereference_genl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_genl(p) \
+ rcu_dereference_check(p, lockdep_genl_is_held())
+
+/**
+ * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds genl mutex.
+ */
+#define genl_dereference(p) \
+ rcu_dereference_protected(p, lockdep_genl_is_held())
#endif /* __KERNEL__ */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6d18f3531f1..fe23ee76858 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -163,7 +163,7 @@ struct gendisk {
* disks that can't be partitioned. */
char disk_name[DISK_NAME_LEN]; /* name of major driver */
- char *(*devnode)(struct gendisk *gd, mode_t *mode);
+ char *(*devnode)(struct gendisk *gd, umode_t *mode);
unsigned int events; /* supported events */
unsigned int async_events; /* async events, subset of all */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3a76faf6a3e..581e74b7df9 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -36,6 +36,7 @@ struct vm_area_struct;
#endif
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
+#define ___GFP_WRITE 0x1000000u
/*
* GFP bitmasks..
@@ -85,6 +86,7 @@ struct vm_area_struct;
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
/*
* This may seem redundant, but it's a way of annotating false positives vs.
@@ -92,7 +94,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
@@ -313,7 +315,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
@@ -358,6 +360,7 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, int cold);
+extern void free_hot_cold_page_list(struct list_head *list, int cold);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
@@ -367,9 +370,25 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(void);
void drain_local_pages(void *dummy);
+/*
+ * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
+ * GFP flags are used before interrupts are enabled. Once interrupts are
+ * enabled, it is set to __GFP_BITS_MASK while the system is running. During
+ * hibernation, it is used by PM to avoid I/O during memory allocation while
+ * devices are suspended.
+ */
extern gfp_t gfp_allowed_mask;
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
+#ifdef CONFIG_PM_SLEEP
+extern bool pm_suspended_storage(void);
+#else
+static inline bool pm_suspended_storage(void)
+{
+ return false;
+}
+#endif /* CONFIG_PM_SLEEP */
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
new file mode 100644
index 00000000000..05071ee34c3
--- /dev/null
+++ b/include/linux/gpio-pxa.h
@@ -0,0 +1,16 @@
+#ifndef __GPIO_PXA_H
+#define __GPIO_PXA_H
+
+#define GPIO_bit(x) (1 << ((x) & 0x1f))
+
+#define gpio_to_bank(gpio) ((gpio) >> 5)
+
+/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85).
+ * Those cases currently cause holes in the GPIO number space, the
+ * actual number of the last GPIO is recorded by 'pxa_last_gpio'.
+ */
+extern int pxa_last_gpio;
+
+extern int pxa_irq_to_gpio(int irq);
+
+#endif /* __GPIO_PXA_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f743883f769..bb7f3097185 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
extern void account_system_vtime(struct task_struct *tsk);
#endif
-#if defined(CONFIG_NO_HZ)
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-static inline void rcu_irq_enter(void)
-{
- rcu_exit_nohz();
-}
-
-static inline void rcu_irq_exit(void)
-{
- rcu_enter_nohz();
-}
static inline void rcu_nmi_enter(void)
{
@@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void)
}
#else
-extern void rcu_irq_enter(void);
-extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
-#else
-# define rcu_irq_enter() do { } while (0)
-# define rcu_irq_exit() do { } while (0)
-# define rcu_nmi_enter() do { } while (0)
-# define rcu_nmi_exit() do { } while (0)
-#endif /* #if defined(CONFIG_NO_HZ) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index c235e4e8767..3a95da60fd3 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -72,6 +72,7 @@
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/semaphore.h>
+#include <linux/power_supply.h>
/*
* We parse each description item into this structure. Short items data
@@ -190,6 +191,7 @@ struct hid_item {
#define HID_UP_UNDEFINED 0x00000000
#define HID_UP_GENDESK 0x00010000
#define HID_UP_SIMULATION 0x00020000
+#define HID_UP_GENDEVCTRLS 0x00060000
#define HID_UP_KEYBOARD 0x00070000
#define HID_UP_LED 0x00080000
#define HID_UP_BUTTON 0x00090000
@@ -239,6 +241,8 @@ struct hid_item {
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_DC_BATTERYSTRENGTH 0x00060020
+
#define HID_DG_DIGITIZER 0x000d0001
#define HID_DG_PEN 0x000d0002
#define HID_DG_LIGHTPEN 0x000d0003
@@ -482,6 +486,19 @@ struct hid_device { /* device report descriptor */
struct hid_driver *driver;
struct hid_ll_driver *ll_driver;
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+ /*
+ * Power supply information for HID devices which report
+ * battery strength. power_supply is registered iff
+ * battery.name is non-NULL.
+ */
+ struct power_supply battery;
+ __s32 battery_min;
+ __s32 battery_max;
+ __s32 battery_report_type;
+ __s32 battery_report_id;
+#endif
+
unsigned int status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
@@ -712,6 +729,8 @@ extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
int hid_input_report(struct hid_device *, int type, u8 *, int, int);
int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+unsigned int hidinput_count_leds(struct hid_device *hid);
void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
@@ -719,6 +738,8 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+ const struct hid_device_id *id);
/**
* hid_map_usage - map usage input bits
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 12ec328481d..62b908e0e59 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -35,7 +35,7 @@
#include <linux/mod_devicetable.h>
-#define MAX_PAGE_BUFFER_COUNT 16
+#define MAX_PAGE_BUFFER_COUNT 18
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 07d103a06d6..8e25a9167f1 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -482,6 +482,19 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
{
return adap->nr;
}
+
+/**
+ * module_i2c_driver() - Helper macro for registering a I2C driver
+ * @__i2c_driver: i2c_driver struct
+ *
+ * Helper macro for I2C drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_i2c_driver(__i2c_driver) \
+ module_driver(__i2c_driver, i2c_add_driver, \
+ i2c_del_driver)
+
#endif /* I2C */
#endif /* __KERNEL__ */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 42557851b12..501370b61ee 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -920,7 +920,7 @@ __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
typedef struct {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *proc_fops;
} ide_proc_entry_t;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 48363c3c40f..210e2c32553 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -128,6 +128,7 @@
#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020
#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040
#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060
+#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060
/* A-MSDU 802.11n */
#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080
/* Mesh Control 802.11s */
@@ -543,6 +544,15 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
}
+/**
+ * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
+ * @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+{
+ return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -770,6 +780,9 @@ struct ieee80211_mgmt {
} u;
} __attribute__ ((packed));
+/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1552,6 +1565,8 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
+#define WLAN_CIPHER_SUITE_SMS4 0x00147201
+
/* AKM suite selectors */
#define WLAN_AKM_SUITE_8021X 0x000FAC01
#define WLAN_AKM_SUITE_PSK 0x000FAC02
@@ -1689,6 +1704,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_public_action - check if frame is a public action frame
+ * @hdr: the frame
+ * @len: length of the frame
+ */
+static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+ size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+ if (len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+ if (!ieee80211_is_action(hdr->frame_control))
+ return false;
+ return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+}
+
+/**
* ieee80211_fhss_chan_to_freq - get channel frequency
* @channel: the FHSS channel
*
diff --git a/include/linux/if.h b/include/linux/if.h
index db20bd4fd16..06b6ef60c82 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -79,6 +79,7 @@
#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing
* skbs on transmit */
#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
+#define IFF_TEAM_PORT 0x40000 /* device used as team port */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e473003e4bd..56d907a2c80 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
+#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */
#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644
index 00000000000..828181fbad5
--- /dev/null
+++ b/include/linux/if_team.h
@@ -0,0 +1,242 @@
+/*
+ * include/linux/if_team.h - Network team device driver header
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_TEAM_H_
+#define _LINUX_IF_TEAM_H_
+
+#ifdef __KERNEL__
+
+struct team_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+};
+
+struct team;
+
+struct team_port {
+ struct net_device *dev;
+ struct hlist_node hlist; /* node in hash list */
+ struct list_head list; /* node in ordinary list */
+ struct team *team;
+ int index;
+
+ /*
+ * A place for storing original values of the device before it
+ * become a port.
+ */
+ struct {
+ unsigned char dev_addr[MAX_ADDR_LEN];
+ unsigned int mtu;
+ } orig;
+
+ bool linkup;
+ u32 speed;
+ u8 duplex;
+
+ struct rcu_head rcu;
+};
+
+struct team_mode_ops {
+ int (*init)(struct team *team);
+ void (*exit)(struct team *team);
+ rx_handler_result_t (*receive)(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb);
+ bool (*transmit)(struct team *team, struct sk_buff *skb);
+ int (*port_enter)(struct team *team, struct team_port *port);
+ void (*port_leave)(struct team *team, struct team_port *port);
+ void (*port_change_mac)(struct team *team, struct team_port *port);
+};
+
+enum team_option_type {
+ TEAM_OPTION_TYPE_U32,
+ TEAM_OPTION_TYPE_STRING,
+};
+
+struct team_option {
+ struct list_head list;
+ const char *name;
+ enum team_option_type type;
+ int (*getter)(struct team *team, void *arg);
+ int (*setter)(struct team *team, void *arg);
+};
+
+struct team_mode {
+ struct list_head list;
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ const struct team_mode_ops *ops;
+};
+
+#define TEAM_PORT_HASHBITS 4
+#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
+
+#define TEAM_MODE_PRIV_LONGS 4
+#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
+
+struct team {
+ struct net_device *dev; /* associated netdevice */
+ struct team_pcpu_stats __percpu *pcpu_stats;
+
+ struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+ /*
+ * port lists with port count
+ */
+ int port_count;
+ struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
+ struct list_head port_list;
+
+ struct list_head option_list;
+
+ const struct team_mode *mode;
+ struct team_mode_ops ops;
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
+};
+
+static inline struct hlist_head *team_port_index_hash(struct team *team,
+ int port_index)
+{
+ return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+}
+
+static inline struct team_port *team_get_port_by_index(struct team *team,
+ int port_index)
+{
+ struct hlist_node *p;
+ struct team_port *port;
+ struct hlist_head *head = team_port_index_hash(team, port_index);
+
+ hlist_for_each_entry(port, p, head, hlist)
+ if (port->index == port_index)
+ return port;
+ return NULL;
+}
+static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
+ int port_index)
+{
+ struct hlist_node *p;
+ struct team_port *port;
+ struct hlist_head *head = team_port_index_hash(team, port_index);
+
+ hlist_for_each_entry_rcu(port, p, head, hlist)
+ if (port->index == port_index)
+ return port;
+ return NULL;
+}
+
+extern int team_port_set_team_mac(struct team_port *port);
+extern int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count);
+extern void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count);
+extern int team_mode_register(struct team_mode *mode);
+extern int team_mode_unregister(struct team_mode *mode);
+
+#endif /* __KERNEL__ */
+
+#define TEAM_STRING_MAX_LEN 32
+
+/**********************************
+ * NETLINK_GENERIC netlink family.
+ **********************************/
+
+enum {
+ TEAM_CMD_NOOP,
+ TEAM_CMD_OPTIONS_SET,
+ TEAM_CMD_OPTIONS_GET,
+ TEAM_CMD_PORT_LIST_GET,
+
+ __TEAM_CMD_MAX,
+ TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
+};
+
+enum {
+ TEAM_ATTR_UNSPEC,
+ TEAM_ATTR_TEAM_IFINDEX, /* u32 */
+ TEAM_ATTR_LIST_OPTION, /* nest */
+ TEAM_ATTR_LIST_PORT, /* nest */
+
+ __TEAM_ATTR_MAX,
+ TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
+};
+
+/* Nested layout of get/set msg:
+ *
+ * [TEAM_ATTR_LIST_OPTION]
+ * [TEAM_ATTR_ITEM_OPTION]
+ * [TEAM_ATTR_OPTION_*], ...
+ * [TEAM_ATTR_ITEM_OPTION]
+ * [TEAM_ATTR_OPTION_*], ...
+ * ...
+ * [TEAM_ATTR_LIST_PORT]
+ * [TEAM_ATTR_ITEM_PORT]
+ * [TEAM_ATTR_PORT_*], ...
+ * [TEAM_ATTR_ITEM_PORT]
+ * [TEAM_ATTR_PORT_*], ...
+ * ...
+ */
+
+enum {
+ TEAM_ATTR_ITEM_OPTION_UNSPEC,
+ TEAM_ATTR_ITEM_OPTION, /* nest */
+
+ __TEAM_ATTR_ITEM_OPTION_MAX,
+ TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
+};
+
+enum {
+ TEAM_ATTR_OPTION_UNSPEC,
+ TEAM_ATTR_OPTION_NAME, /* string */
+ TEAM_ATTR_OPTION_CHANGED, /* flag */
+ TEAM_ATTR_OPTION_TYPE, /* u8 */
+ TEAM_ATTR_OPTION_DATA, /* dynamic */
+
+ __TEAM_ATTR_OPTION_MAX,
+ TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
+};
+
+enum {
+ TEAM_ATTR_ITEM_PORT_UNSPEC,
+ TEAM_ATTR_ITEM_PORT, /* nest */
+
+ __TEAM_ATTR_ITEM_PORT_MAX,
+ TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
+};
+
+enum {
+ TEAM_ATTR_PORT_UNSPEC,
+ TEAM_ATTR_PORT_IFINDEX, /* u32 */
+ TEAM_ATTR_PORT_CHANGED, /* flag */
+ TEAM_ATTR_PORT_LINKUP, /* flag */
+ TEAM_ATTR_PORT_SPEED, /* u32 */
+ TEAM_ATTR_PORT_DUPLEX, /* u8 */
+
+ __TEAM_ATTR_PORT_MAX,
+ TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
+};
+
+/*
+ * NETLINK_GENERIC related info
+ */
+#define TEAM_GENL_NAME "team"
+#define TEAM_GENL_VERSION 0x1
+#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
+
+#endif /* _LINUX_IF_TEAM_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 12d5543b14f..13aff1e2183 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-/* if this changes, algorithm will have to be reworked because this
- * depends on completely exhausting the VLAN identifier space. Thus
- * it gives constant time look-up, but in many cases it wastes memory.
- */
-#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
-#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
-
-struct vlan_group {
- struct net_device *real_dev; /* The ethernet(like) device
- * the vlan is attached to.
- */
- unsigned int nr_vlans;
- struct hlist_node hlist; /* linked list */
- struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
- struct rcu_head rcu;
-};
+struct vlan_info;
static inline int is_vlan_dev(struct net_device *dev)
{
@@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev);
#else
static inline struct net_device *
__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
{
return skb;
}
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+ return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+}
#endif
/**
@@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
return protocol;
}
+
+static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ struct vlan_hdr *vhdr)
+{
+ __be16 proto;
+ unsigned char *rawp;
+
+ /*
+ * Was a VLAN packet, grab the encapsulated protocol, which the layer
+ * three protocols care about.
+ */
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ if (ntohs(proto) >= 1536) {
+ skb->protocol = proto;
+ return;
+ }
+
+ rawp = skb->data;
+ if (*(unsigned short *) rawp == 0xFFFF)
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell
+ * breaks the protocol design and runs IPX over 802.3 without
+ * an 802.2 LLC layer. We look for FFFF which isn't a used
+ * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+ * but does for the rest.
+ */
+ skb->protocol = htons(ETH_P_802_3);
+ else
+ /*
+ * Real 802.2 LLC
+ */
+ skb->protocol = htons(ETH_P_802_2);
+}
#endif /* __KERNEL__ */
/* VLAN IOCTLs are found in sockios.h */
@@ -352,7 +398,7 @@ struct vlan_ioctl_args {
unsigned int skb_priority;
unsigned int name_type;
unsigned int bind_type;
- unsigned int flag; /* Matches vlan_dev_info flags */
+ unsigned int flag; /* Matches vlan_dev_priv flags */
} u;
short vlan_qos;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index abf5028db98..34e8d52c192 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -22,7 +22,7 @@ struct inet_diag_sockid {
/* Request structure */
-struct inet_diag_req {
+struct inet_diag_req_compat {
__u8 idiag_family; /* Family of addresses. */
__u8 idiag_src_len;
__u8 idiag_dst_len;
@@ -34,6 +34,15 @@ struct inet_diag_req {
__u32 idiag_dbs; /* Tables to dump (NI) */
};
+struct inet_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u8 idiag_ext;
+ __u8 pad;
+ __u32 idiag_states;
+ struct inet_diag_sockid id;
+};
+
enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
@@ -99,9 +108,10 @@ enum {
INET_DIAG_CONG,
INET_DIAG_TOS,
INET_DIAG_TCLASS,
+ INET_DIAG_SKMEMINFO,
};
-#define INET_DIAG_MAX INET_DIAG_TCLASS
+#define INET_DIAG_MAX INET_DIAG_SKMEMINFO
/* INET_DIAG_MEM */
@@ -125,16 +135,41 @@ struct tcpvegas_info {
#ifdef __KERNEL__
struct sock;
struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
struct inet_diag_handler {
- struct inet_hashinfo *idiag_hashinfo;
+ void (*dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct inet_diag_req *r,
+ struct nlattr *bc);
+
+ int (*dump_one)(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ struct inet_diag_req *req);
+
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
- __u16 idiag_info_size;
__u16 idiag_type;
};
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+ struct sk_buff *skb, struct inet_diag_req *req,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+ struct netlink_callback *cb, struct inet_diag_req *r,
+ struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+ struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct inet_diag_req *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
#endif /* __KERNEL__ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 94b1e356c02..9c66b1ada9d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -23,11 +23,10 @@ extern struct files_struct init_files;
extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS
-#define INIT_THREADGROUP_FORK_LOCK(sig) \
- .threadgroup_fork_lock = \
- __RWSEM_INITIALIZER(sig.threadgroup_fork_lock),
+#define INIT_GROUP_RWSEM(sig) \
+ .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
#else
-#define INIT_THREADGROUP_FORK_LOCK(sig)
+#define INIT_GROUP_RWSEM(sig)
#endif
#define INIT_SIGNALS(sig) { \
@@ -46,7 +45,7 @@ extern struct fs_struct init_fs;
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
- INIT_THREADGROUP_FORK_LOCK(sig) \
+ INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
@@ -126,6 +125,8 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#define INIT_TASK_COMM "swapper"
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +163,7 @@ extern struct cred init_cred;
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
- .comm = "swapper", \
+ .comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
new file mode 100644
index 00000000000..75d4be71771
--- /dev/null
+++ b/include/linux/input/auo-pixcir-ts.h
@@ -0,0 +1,56 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on auo_touch.h from Dell Streak kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUO_PIXCIR_TS_H__
+#define __AUO_PIXCIR_TS_H__
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * compare coordinates: interrupt is asserted when coordinates change
+ * indicate touch: interrupt is asserted during touch
+ */
+#define AUO_PIXCIR_INT_PERIODICAL 0x00
+#define AUO_PIXCIR_INT_COMP_COORD 0x01
+#define AUO_PIXCIR_INT_TOUCH_IND 0x02
+
+/*
+ * @gpio_int interrupt gpio
+ * @int_setting one of AUO_PIXCIR_INT_*
+ * @init_hw hardwarespecific init
+ * @exit_hw hardwarespecific shutdown
+ * @x_max x-resolution
+ * @y_max y-resolution
+ */
+struct auo_pixcir_ts_platdata {
+ int gpio_int;
+
+ int int_setting;
+
+ void (*init_hw)(struct i2c_client *);
+ void (*exit_hw)(struct i2c_client *);
+
+ unsigned int x_max;
+ unsigned int y_max;
+};
+
+#endif
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
new file mode 100644
index 00000000000..aad2fd44a61
--- /dev/null
+++ b/include/linux/input/gp2ap002a00f.h
@@ -0,0 +1,22 @@
+#ifndef _GP2AP002A00F_H_
+#define _GP2AP002A00F_H_
+
+#include <linux/i2c.h>
+
+#define GP2A_I2C_NAME "gp2ap002a00f"
+
+/**
+ * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
+ * @vout_gpio: The gpio connected to the object detected pin (VOUT)
+ * @wakeup: Set to true if the proximity can wake the device from suspend
+ * @hw_setup: Callback for setting up hardware such as gpios and vregs
+ * @hw_shutdown: Callback for properly shutting down hardware
+ */
+struct gp2a_platform_data {
+ int vout_gpio;
+ bool wakeup;
+ int (*hw_setup)(struct i2c_client *client);
+ int (*hw_shutdown)(struct i2c_client *client);
+};
+
+#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
new file mode 100644
index 00000000000..c1cc52d380e
--- /dev/null
+++ b/include/linux/input/gpio_tilt.h
@@ -0,0 +1,73 @@
+#ifndef _INPUT_GPIO_TILT_H
+#define _INPUT_GPIO_TILT_H
+
+/**
+ * struct gpio_tilt_axis - Axis used by the tilt switch
+ * @axis: Constant describing the axis, e.g. ABS_X
+ * @min: minimum value for abs_param
+ * @max: maximum value for abs_param
+ * @fuzz: fuzz value for abs_param
+ * @flat: flat value for abs_param
+ */
+struct gpio_tilt_axis {
+ int axis;
+ int min;
+ int max;
+ int fuzz;
+ int flat;
+};
+
+/**
+ * struct gpio_tilt_state - state description
+ * @gpios: bitfield of gpio target-states for the value
+ * @axes: array containing the axes settings for the gpio state
+ * The array indizes must correspond to the axes defined
+ * in platform_data
+ *
+ * This structure describes a supported axis settings
+ * and the necessary gpio-state which represent it.
+ *
+ * The n-th bit in the bitfield describes the state of the n-th GPIO
+ * from the gpios-array defined in gpio_regulator_config below.
+ */
+struct gpio_tilt_state {
+ int gpios;
+ int *axes;
+};
+
+/**
+ * struct gpio_tilt_platform_data
+ * @gpios: Array containing the gpios determining the tilt state
+ * @nr_gpios: Number of gpios
+ * @axes: Array of gpio_tilt_axis descriptions
+ * @nr_axes: Number of axes
+ * @states: Array of gpio_tilt_state entries describing
+ * the gpio state for specific tilts
+ * @nr_states: Number of states available
+ * @debounce_interval: debounce ticks interval in msecs
+ * @poll_interval: polling interval in msecs - for polling driver only
+ * @enable: callback to enable the tilt switch
+ * @disable: callback to disable the tilt switch
+ *
+ * This structure contains gpio-tilt-switch configuration
+ * information that must be passed by platform code to the
+ * gpio-tilt input driver.
+ */
+struct gpio_tilt_platform_data {
+ struct gpio *gpios;
+ int nr_gpios;
+
+ struct gpio_tilt_axis *axes;
+ int nr_axes;
+
+ struct gpio_tilt_state *states;
+ int nr_states;
+
+ int debounce_interval;
+
+ unsigned int poll_interval;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+};
+
+#endif
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
new file mode 100644
index 00000000000..7163d91c037
--- /dev/null
+++ b/include/linux/input/pixcir_ts.h
@@ -0,0 +1,10 @@
+#ifndef _PIXCIR_I2C_TS_H
+#define _PIXCIR_I2C_TS_H
+
+struct pixcir_ts_platform_data {
+ int (*attb_read_val)(void);
+ int x_max;
+ int y_max;
+};
+
+#endif
diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h
new file mode 100644
index 00000000000..f25619bfd8a
--- /dev/null
+++ b/include/linux/input/samsung-keypad.h
@@ -0,0 +1,43 @@
+/*
+ * Samsung Keypad platform data definitions
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SAMSUNG_KEYPAD_H
+#define __SAMSUNG_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+#define SAMSUNG_MAX_ROWS 8
+#define SAMSUNG_MAX_COLS 8
+
+/**
+ * struct samsung_keypad_platdata - Platform device data for Samsung Keypad.
+ * @keymap_data: pointer to &matrix_keymap_data.
+ * @rows: number of keypad row supported.
+ * @cols: number of keypad col supported.
+ * @no_autorepeat: disable key autorepeat.
+ * @wakeup: controls whether the device should be set up as wakeup source.
+ * @cfg_gpio: configure the GPIO.
+ *
+ * Initialisation data specific to either the machine or the platform
+ * for the device driver to use or call-back when configuring gpio.
+ */
+struct samsung_keypad_platdata {
+ const struct matrix_keymap_data *keymap_data;
+ unsigned int rows;
+ unsigned int cols;
+ bool no_autorepeat;
+ bool wakeup;
+
+ void (*cfg_gpio)(unsigned int rows, unsigned int cols);
+};
+
+#endif /* __SAMSUNG_KEYPAD_H */
diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h
new file mode 100644
index 00000000000..e71a85dc2cb
--- /dev/null
+++ b/include/linux/input/tca8418_keypad.h
@@ -0,0 +1,44 @@
+/*
+ * TCA8418 keypad platform support
+ *
+ * Copyright (C) 2011 Fuel7, Inc. All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#ifndef _TCA8418_KEYPAD_H
+#define _TCA8418_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#define TCA8418_I2C_ADDR 0x34
+#define TCA8418_NAME "tca8418_keypad"
+
+struct tca8418_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+ unsigned rows;
+ unsigned cols;
+ bool rep;
+ bool irq_is_gpio;
+};
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 432acc4c054..d937580417b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -48,19 +48,34 @@ struct iommu_domain {
#ifdef CONFIG_IOMMU_API
+/**
+ * struct iommu_ops - iommu ops and capabilities
+ * @domain_init: init iommu domain
+ * @domain_destroy: destroy iommu domain
+ * @attach_dev: attach device to an iommu domain
+ * @detach_dev: detach device from an iommu domain
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @iova_to_phys: translate iova to physical address
+ * @domain_has_cap: domain capabilities query
+ * @commit: commit iommu domain
+ * @pgsize_bitmap: bitmap of supported page sizes
+ */
struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot);
- int (*unmap)(struct iommu_domain *domain, unsigned long iova,
- int gfp_order);
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
unsigned long iova);
int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap);
+ int (*device_group)(struct device *dev, unsigned int *groupid);
+ unsigned long pgsize_bitmap;
};
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
@@ -72,15 +87,16 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot);
-extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- int gfp_order);
+ phys_addr_t paddr, size_t size, int prot);
+extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler);
+extern int iommu_device_group(struct device *dev, unsigned int *groupid);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
@@ -179,6 +195,11 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
+static inline int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 3b1594d662b..30e816148df 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -93,7 +93,7 @@ struct kern_ipc_perm
gid_t gid;
uid_t cuid;
gid_t cgid;
- mode_t mode;
+ umode_t mode;
unsigned long seq;
void *security;
};
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0c997767429..6318268dcaf 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -404,7 +404,7 @@ struct tcp6_sock {
extern int inet6_sk_rebuild_header(struct sock *sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define inet6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 99834e581b9..bd4272b61a1 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -91,10 +91,11 @@ static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
extern void irq_domain_add(struct irq_domain *domain);
extern void irq_domain_del(struct irq_domain *domain);
+
+extern struct irq_domain_ops irq_domain_simple_ops;
#endif /* CONFIG_IRQ_DOMAIN */
#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern struct irq_domain_ops irq_domain_simple_ops;
extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
extern void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index f0a2f8b0aa1..2a8b1659bf3 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -91,7 +91,7 @@ struct iscsi_boot_kobj {
* The enum of the type. This can be any value of the above
* properties.
*/
- mode_t (*is_visible) (void *data, int type);
+ umode_t (*is_visible) (void *data, int type);
/*
* Driver specific release function.
@@ -110,20 +110,20 @@ struct iscsi_boot_kobj *
iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data));
struct iscsi_boot_kobj *
iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data));
struct iscsi_boot_kobj *
iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data));
struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index c7acdde3243..d211732b9e9 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -497,7 +497,6 @@ struct transaction_s
* @j_format_version: Version of the superblock format
* @j_state_lock: Protect the various scalars in the journal
* @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_barrier: The barrier lock itself
* @j_running_transaction: The current running transaction..
* @j_committing_transaction: the transaction we are pushing to disk
* @j_checkpoint_transactions: a linked circular list of all transactions
@@ -580,9 +579,6 @@ struct journal_s
*/
int j_barrier_count;
- /* The barrier lock itself */
- struct mutex j_barrier;
-
/*
* Transactions: The current running transaction...
* [j_state_lock] [caller holding open handle]
@@ -913,6 +909,7 @@ extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
extern void journal_clear_revoke(journal_t *);
extern void journal_switch_revoke_table(journal_t *journal);
+extern void journal_clear_buffer_revoked_flags(journal_t *journal);
/*
* The log thread user interface:
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2092ea21e46..5557baefed6 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1151,6 +1151,7 @@ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t);
extern void jbd2_journal_clear_revoke(journal_t *);
extern void jbd2_journal_switch_revoke_table(journal_t *journal);
+extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
/*
* The log thread user interface:
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b5..5ce8b140428 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -14,6 +15,12 @@ struct jump_label_key {
#endif
};
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void jump_label_inc(struct jump_label_key *key);
extern void jump_label_dec(struct jump_label_key *key);
+extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
extern bool jump_label_enabled(struct jump_label_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
{
}
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+};
+
static __always_inline bool static_branch(struct jump_label_key *key)
{
if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
atomic_dec(&key->enabled);
}
+static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ jump_label_dec(&key->key);
+}
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
+
+static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+}
#endif /* HAVE_JUMP_LABEL */
+#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
+#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+
#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8b1597b5cf..f48e8a52854 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -665,6 +665,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON(condition)
+#define BUILD_BUG() (0)
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
@@ -703,6 +704,21 @@ extern int __build_bug_on_failed;
if (condition) __build_bug_on_failed = 1; \
} while(0)
#endif
+
+/**
+ * BUILD_BUG - break compile if used.
+ *
+ * If you have some code that you expect the compiler to eliminate at
+ * build time, you should use BUILD_BUG to detect if it is
+ * unexpectedly used.
+ */
+#define BUILD_BUG() \
+ do { \
+ extern void __build_bug_failed(void) \
+ __linktime_error("BUILD_BUG failed"); \
+ __build_bug_failed(); \
+ } while (0)
+
#endif /* __CHECKER__ */
/* Trap pasters of __FUNCTION__ at compile-time */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0cce2db580c..2fbd9053c2d 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -6,6 +6,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/cputime.h>
@@ -15,21 +16,25 @@
* used by rstatd/perfmeter
*/
-struct cpu_usage_stat {
- cputime64_t user;
- cputime64_t nice;
- cputime64_t system;
- cputime64_t softirq;
- cputime64_t irq;
- cputime64_t idle;
- cputime64_t iowait;
- cputime64_t steal;
- cputime64_t guest;
- cputime64_t guest_nice;
+enum cpu_usage_stat {
+ CPUTIME_USER,
+ CPUTIME_NICE,
+ CPUTIME_SYSTEM,
+ CPUTIME_SOFTIRQ,
+ CPUTIME_IRQ,
+ CPUTIME_IDLE,
+ CPUTIME_IOWAIT,
+ CPUTIME_STEAL,
+ CPUTIME_GUEST,
+ CPUTIME_GUEST_NICE,
+ NR_STATS,
+};
+
+struct kernel_cpustat {
+ u64 cpustat[NR_STATS];
};
struct kernel_stat {
- struct cpu_usage_stat cpustat;
#ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS];
#endif
@@ -38,10 +43,13 @@ struct kernel_stat {
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
+DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-#define kstat_cpu(cpu) per_cpu(kstat, cpu)
/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu __get_cpu_var(kstat)
+#define kstat_this_cpu (&__get_cpu_var(kstat))
+#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_cpu(cpu) per_cpu(kstat, cpu)
+#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
extern unsigned long long nr_context_switches(void);
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f6539073..722f477c4ef 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
extern int usermodehelper_disable(void);
extern void usermodehelper_enable(void);
extern bool usermodehelper_is_disabled(void);
+extern void read_lock_usermodehelper(void);
+extern void read_unlock_usermodehelper(void);
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ad81e1c5148..fc615a97e2d 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -191,8 +191,6 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj)
}
extern struct kobject *kset_find_obj(struct kset *, const char *);
-extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
- struct kobject *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d4a62ab2ee5..abc0120b09b 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,16 +15,81 @@
#ifndef _KREF_H_
#define _KREF_H_
-#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/atomic.h>
struct kref {
atomic_t refcount;
};
-void kref_init(struct kref *kref);
-void kref_get(struct kref *kref);
-int kref_put(struct kref *kref, void (*release) (struct kref *kref));
-int kref_sub(struct kref *kref, unsigned int count,
- void (*release) (struct kref *kref));
+/**
+ * kref_init - initialize object.
+ * @kref: object in question.
+ */
+static inline void kref_init(struct kref *kref)
+{
+ atomic_set(&kref->refcount, 1);
+}
+
+/**
+ * kref_get - increment refcount for object.
+ * @kref: object.
+ */
+static inline void kref_get(struct kref *kref)
+{
+ WARN_ON(!atomic_read(&kref->refcount));
+ atomic_inc(&kref->refcount);
+}
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function. If the caller does pass kfree to this
+ * function, you will be publicly mocked mercilessly by the kref
+ * maintainer, and anyone else who happens to notice it. You have
+ * been warned.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0. Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory. Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_sub(struct kref *kref, unsigned int count,
+ void (*release)(struct kref *kref))
+{
+ WARN_ON(release == NULL);
+
+ if (atomic_sub_and_test((int) count, &kref->refcount)) {
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * kref_put - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function. If the caller does pass kfree to this
+ * function, you will be publicly mocked mercilessly by the kref
+ * maintainer, and anyone else who happens to notice it. You have
+ * been warned.
+ *
+ * Decrement the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0. Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory. Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+ return kref_sub(kref, 1, release);
+}
#endif /* _KREF_H_ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b3a26..0714b24c0e4 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void kthread_bind(struct task_struct *k, unsigned int cpu);
int kthread_stop(struct task_struct *k);
int kthread_should_stop(void);
+bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_data(struct task_struct *k);
int kthreadd(void *unused);
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index c3892fc1d53..68e67e50d02 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
#define KVM_CAP_PPC_PAPR 68
#define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d5262319997..900c76337e8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -14,6 +14,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
@@ -50,6 +51,9 @@
#define KVM_REQ_APF_HALT 12
#define KVM_REQ_STEAL_UPDATE 13
#define KVM_REQ_NMI 14
+#define KVM_REQ_IMMEDIATE_EXIT 15
+#define KVM_REQ_PMU 16
+#define KVM_REQ_PMI 17
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
@@ -179,6 +183,7 @@ struct kvm_memory_slot {
unsigned long *rmap;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head;
+ unsigned long nr_dirty_pages;
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr;
int user_alloc;
@@ -224,11 +229,20 @@ struct kvm_irq_routing_table {};
#endif
+#ifndef KVM_MEM_SLOTS_NUM
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#endif
+
+/*
+ * Note:
+ * memslots are not sorted by id anymore, please use id_to_memslot()
+ * to get the memslot by its id.
+ */
struct kvm_memslots {
- int nmemslots;
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
- KVM_PRIVATE_MEM_SLOTS];
+ struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
+ /* The mapping table from slot id to the index in memslots[]. */
+ int id_to_index[KVM_MEM_SLOTS_NUM];
};
struct kvm {
@@ -239,7 +253,6 @@ struct kvm {
struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
- struct kvm_vcpu *bsp_vcpu;
#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
@@ -302,6 +315,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
(vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
idx++)
+#define kvm_for_each_memslot(memslot, slots) \
+ for (memslot = &slots->memslots[0]; \
+ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+ memslot++)
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
@@ -314,6 +332,7 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
@@ -322,6 +341,18 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
|| lockdep_is_held(&kvm->slots_lock));
}
+static inline struct kvm_memory_slot *
+id_to_memslot(struct kvm_memslots *slots, int id)
+{
+ int index = slots->id_to_index[id];
+ struct kvm_memory_slot *slot;
+
+ slot = &slots->memslots[index];
+
+ WARN_ON(slot->id != id);
+ return slot;
+}
+
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 47a070b0520..ff476ddaf31 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -35,4 +35,3 @@ static inline int kvm_para_has_feature(unsigned int feature)
}
#endif /* __KERNEL__ */
#endif /* __LINUX_KVM_PARA_H */
-
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index b0e99898527..e23121f9d82 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -10,6 +10,8 @@
#define _INCLUDE_GUARD_LATENCYTOP_H_
#include <linux/compiler.h>
+struct task_struct;
+
#ifdef CONFIG_LATENCYTOP
#define LT_SAVECOUNT 32
@@ -23,7 +25,6 @@ struct latency_record {
};
-struct task_struct;
extern int latencytop_enabled;
void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
new file mode 100644
index 00000000000..dcabf4fa2ae
--- /dev/null
+++ b/include/linux/leds-tca6507.h
@@ -0,0 +1,34 @@
+/*
+ * TCA6507 LED chip driver.
+ *
+ * Copyright (C) 2011 Neil Brown <neil@brown.name>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_TCA6507_H
+#define __LINUX_TCA6507_H
+#include <linux/leds.h>
+
+struct tca6507_platform_data {
+ struct led_platform_data leds;
+#ifdef CONFIG_GPIOLIB
+ int gpio_base;
+ void (*setup)(unsigned gpio_base, unsigned ngpio);
+#endif
+};
+
+#define TCA6507_MAKE_GPIO 1
+#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index f549056fb20..87f402ccec5 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/lockdep.h>
#include <linux/percpu.h>
+#include <linux/cpu.h>
/* can make br locks by using local lock for read side, global lock for write */
#define br_lock_init(name) name##_lock_init()
@@ -72,9 +73,31 @@
#define DEFINE_LGLOCK(name) \
\
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -83,6 +106,11 @@
lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -124,9 +152,9 @@
\
void name##_global_lock_online(void) { \
int i; \
- preempt_disable(); \
+ spin_lock(&name##_cpu_lock); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
@@ -137,12 +165,12 @@
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
} \
- preempt_enable(); \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index ff9abff55aa..90b0656a869 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap)
return ipv4_is_loopback(sin->sin_addr.s_addr);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline int __nlm_privileged_request6(const struct sockaddr *sap)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap)
return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
}
-#else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#else /* IS_ENABLED(CONFIG_IPV6) */
static inline int __nlm_privileged_request6(const struct sockaddr *sap)
{
return 0;
}
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
/*
* Ensure incoming requests are from local privileged callers.
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b6a56e37284..d36619ead3b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
+#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+
#else /* !LOCKDEP */
static inline void lockdep_off(void)
@@ -392,6 +394,8 @@ struct lock_class_key { };
#define lockdep_assert_held(l) do { } while (0)
+#define lockdep_recursing(tsk) (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 25b808631cd..fd7ff3d91e6 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 0 : \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index c11ff293254..efa1a6d7aca 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -32,5 +32,16 @@ struct mbus_dram_target_info
} cs[4];
};
-
+/*
+ * The Marvell mbus is to be found only on SOCs from the Orion family
+ * at the moment. Provide a dummy stub for other architectures.
+ */
+#ifdef CONFIG_PLAT_ORION
+extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+#else
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 0fe00cd4c93..76f52bbbb2f 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -32,6 +32,8 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
+ /* reset callback */
+ int (*reset)(struct mii_bus *bus);
};
/* The returned bus is not yet registered with the phy layer. */
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index e9d3fdfe41d..7c9fe3c2be7 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -20,6 +20,8 @@ struct mdio_gpio_platform_data {
unsigned int phy_mask;
int irqs[PHY_MAX_ADDR];
+ /* reset callback */
+ int (*reset)(struct mii_bus *bus);
};
#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e6b843e16e8..a6bb1023514 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,8 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
-#define MEMBLOCK_ERROR 0
-
#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
@@ -19,81 +17,161 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <asm/memblock.h>
-
#define INIT_MEMBLOCK_REGIONS 128
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ int nid;
+#endif
};
struct memblock_type {
unsigned long cnt; /* number of regions */
unsigned long max; /* size of the allocated array */
+ phys_addr_t total_size; /* size of all regions */
struct memblock_region *regions;
};
struct memblock {
phys_addr_t current_limit;
- phys_addr_t memory_size; /* Updated by memblock_analyze() */
struct memblock_type memory;
struct memblock_type reserved;
};
extern struct memblock memblock;
extern int memblock_debug;
-extern int memblock_can_resize;
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
+phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align);
int memblock_free_reserved_regions(void);
int memblock_reserve_reserved_regions(void);
-extern void memblock_init(void);
-extern void memblock_analyze(void);
-extern long memblock_add(phys_addr_t base, phys_addr_t size);
-extern long memblock_remove(phys_addr_t base, phys_addr_t size);
-extern long memblock_free(phys_addr_t base, phys_addr_t size);
-extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_allow_resize(void);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add(phys_addr_t base, phys_addr_t size);
+int memblock_remove(phys_addr_t base, phys_addr_t size);
+int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_reserve(phys_addr_t base, phys_addr_t size);
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid);
+
+/**
+ * for_each_mem_pfn_range - early memory pfn range iterator
+ * @i: an integer used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to ulong for start pfn of the range, can be %NULL
+ * @p_end: ptr to ulong for end pfn of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over configured memory ranges. Available after early_node_map is
+ * populated.
+ */
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
+
+/**
+ * for_each_free_mem_range - iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock. Available as
+ * soon as memblock is initialized.
+ */
+#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+ for (i = 0, \
+ __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_free_mem_range(&i, nid, p_start, p_end, p_nid))
+
+void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
-/* The numa aware allocator is only available if
- * CONFIG_ARCH_POPULATES_NODE_MAP is set
+/**
+ * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in reverse
+ * order. Available as soon as memblock is initialized.
*/
-extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
- int nid);
-extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- int nid);
+#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+ for (i = (u64)ULLONG_MAX, \
+ __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
-extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
+
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+ r->nid = nid;
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return r->nid;
+}
+#else
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-extern phys_addr_t memblock_alloc_base(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t max_addr);
-extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t max_addr);
-extern phys_addr_t memblock_phys_mem_size(void);
-extern phys_addr_t memblock_start_of_DRAM(void);
-extern phys_addr_t memblock_end_of_DRAM(void);
-extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
-extern int memblock_is_memory(phys_addr_t addr);
-extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
-extern int memblock_is_reserved(phys_addr_t addr);
-extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-
-extern void memblock_dump_all(void);
-
-/* Provided by the architecture */
-extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
-extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
- phys_addr_t addr2, phys_addr_t size2);
+phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_start_of_DRAM(void);
+phys_addr_t memblock_end_of_DRAM(void);
+void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+int memblock_is_memory(phys_addr_t addr);
+int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_is_reserved(phys_addr_t addr);
+int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+
+extern void __memblock_dump_all(void);
+
+static inline void memblock_dump_all(void)
+{
+ if (memblock_debug)
+ __memblock_dump_all();
+}
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
* accessible during boot
* @limit: New limit value (physical address)
*/
-extern void memblock_set_current_limit(phys_addr_t limit);
+void memblock_set_current_limit(phys_addr_t limit);
/*
@@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
region++)
-#ifdef ARCH_DISCARD_MEMBLOCK
-#define __init_memblock __init
-#define __initdata_memblock __initdata
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#define __init_memblock __meminit
+#define __initdata_memblock __meminitdata
#else
#define __init_memblock
#define __initdata_memblock
@@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
- return MEMBLOCK_ERROR;
+ return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b87068a1a09..f944591765e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
static inline
int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
{
@@ -381,5 +384,23 @@ mem_cgroup_print_bad_page(struct page *page)
}
#endif
+enum {
+ UNDER_LIMIT,
+ SOFT_LIMIT,
+ OVER_LIMIT,
+};
+
+struct sock;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 935699b30b7..1ac7f6e405f 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -15,7 +15,6 @@
#ifndef _LINUX_MEMORY_H_
#define _LINUX_MEMORY_H_
-#include <linux/sysdev.h>
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
@@ -38,7 +37,7 @@ struct memory_block {
int phys_device; /* to which fru does this belong? */
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
- struct sys_device sysdev;
+ struct device dev;
};
int arch_get_memory_phys_device(unsigned long start_pfn);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7978eec1b7d..7c727a90d70 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -164,11 +164,11 @@ static inline void mpol_get(struct mempolicy *pol)
atomic_inc(&pol->refcnt);
}
-extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
-static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (a == b)
- return 1;
+ return true;
return __mpol_equal(a, b);
}
@@ -257,9 +257,9 @@ static inline int vma_migratable(struct vm_area_struct *vma)
struct mempolicy {};
-static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
- return 1;
+ return true;
}
static inline void mpol_put(struct mempolicy *p)
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
new file mode 100644
index 00000000000..5702d1be13b
--- /dev/null
+++ b/include/linux/mfd/da9052/da9052.h
@@ -0,0 +1,131 @@
+/*
+ * da9052 declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MFD_DA9052_DA9052_H
+#define __MFD_DA9052_DA9052_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_IRQ_DCIN 0
+#define DA9052_IRQ_VBUS 1
+#define DA9052_IRQ_DCINREM 2
+#define DA9052_IRQ_VBUSREM 3
+#define DA9052_IRQ_VDDLOW 4
+#define DA9052_IRQ_ALARM 5
+#define DA9052_IRQ_SEQRDY 6
+#define DA9052_IRQ_COMP1V2 7
+#define DA9052_IRQ_NONKEY 8
+#define DA9052_IRQ_IDFLOAT 9
+#define DA9052_IRQ_IDGND 10
+#define DA9052_IRQ_CHGEND 11
+#define DA9052_IRQ_TBAT 12
+#define DA9052_IRQ_ADC_EOM 13
+#define DA9052_IRQ_PENDOWN 14
+#define DA9052_IRQ_TSIREADY 15
+#define DA9052_IRQ_GPI0 16
+#define DA9052_IRQ_GPI1 17
+#define DA9052_IRQ_GPI2 18
+#define DA9052_IRQ_GPI3 19
+#define DA9052_IRQ_GPI4 20
+#define DA9052_IRQ_GPI5 21
+#define DA9052_IRQ_GPI6 22
+#define DA9052_IRQ_GPI7 23
+#define DA9052_IRQ_GPI8 24
+#define DA9052_IRQ_GPI9 25
+#define DA9052_IRQ_GPI10 26
+#define DA9052_IRQ_GPI11 27
+#define DA9052_IRQ_GPI12 28
+#define DA9052_IRQ_GPI13 29
+#define DA9052_IRQ_GPI14 30
+#define DA9052_IRQ_GPI15 31
+
+enum da9052_chip_id {
+ DA9052,
+ DA9053_AA,
+ DA9053_BA,
+ DA9053_BB,
+};
+
+struct da9052_pdata;
+
+struct da9052 {
+ struct mutex io_lock;
+
+ struct device *dev;
+ struct regmap *regmap;
+
+ int irq_base;
+ u8 chip_id;
+
+ int chip_irq;
+};
+
+/* Device I/O API */
+static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
+{
+ int val, ret;
+
+ ret = regmap_read(da9052->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+ return val;
+}
+
+static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,
+ unsigned char val)
+{
+ return regmap_write(da9052->regmap, reg, val);
+}
+
+static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ return regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
+ unsigned char bit_mask,
+ unsigned char reg_val)
+{
+ return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
+}
+
+int da9052_device_init(struct da9052 *da9052, u8 chip_id);
+void da9052_device_exit(struct da9052 *da9052);
+
+extern struct regmap_config da9052_regmap_config;
+
+#endif /* __MFD_DA9052_DA9052_H */
diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/include/linux/mfd/da9052/pdata.h
index 8e4a1bd0ab1..62c5c3c2992 100644
--- a/arch/arm/mach-at91/include/mach/vmalloc.h
+++ b/include/linux/mfd/da9052/pdata.h
@@ -1,7 +1,9 @@
/*
- * arch/arm/mach-at91/include/mach/vmalloc.h
+ * Platform data declarations for DA9052 PMICs.
*
- * Copyright (C) 2003 SAN People
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,14 +17,24 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
*/
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
+#ifndef __MFD_DA9052_PDATA_H__
+#define __MFD_DA9052_PDATA_H__
+
+#define DA9052_MAX_REGULATORS 14
-#include <mach/hardware.h>
+struct da9052;
-#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK)
+struct da9052_pdata {
+ struct led_platform_data *pled;
+ int (*init) (struct da9052 *da9052);
+ int irq_base;
+ int gpio_base;
+ int use_for_apm;
+ struct regulator_init_data *regulators[DA9052_MAX_REGULATORS];
+};
#endif
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
new file mode 100644
index 00000000000..b97f7309d7f
--- /dev/null
+++ b/include/linux/mfd/da9052/reg.h
@@ -0,0 +1,749 @@
+/*
+ * Register declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_DA9052_REG_H
+#define __LINUX_MFD_DA9052_REG_H
+
+/* PAGE REGISTERS */
+#define DA9052_PAGE0_CON_REG 0
+#define DA9052_PAGE1_CON_REG 128
+
+/* STATUS REGISTERS */
+#define DA9052_STATUS_A_REG 1
+#define DA9052_STATUS_B_REG 2
+#define DA9052_STATUS_C_REG 3
+#define DA9052_STATUS_D_REG 4
+
+/* EVENT REGISTERS */
+#define DA9052_EVENT_A_REG 5
+#define DA9052_EVENT_B_REG 6
+#define DA9052_EVENT_C_REG 7
+#define DA9052_EVENT_D_REG 8
+#define DA9052_FAULTLOG_REG 9
+
+/* IRQ REGISTERS */
+#define DA9052_IRQ_MASK_A_REG 10
+#define DA9052_IRQ_MASK_B_REG 11
+#define DA9052_IRQ_MASK_C_REG 12
+#define DA9052_IRQ_MASK_D_REG 13
+
+/* CONTROL REGISTERS */
+#define DA9052_CONTROL_A_REG 14
+#define DA9052_CONTROL_B_REG 15
+#define DA9052_CONTROL_C_REG 16
+#define DA9052_CONTROL_D_REG 17
+
+#define DA9052_PDDIS_REG 18
+#define DA9052_INTERFACE_REG 19
+#define DA9052_RESET_REG 20
+
+/* GPIO REGISTERS */
+#define DA9052_GPIO_0_1_REG 21
+#define DA9052_GPIO_2_3_REG 22
+#define DA9052_GPIO_4_5_REG 23
+#define DA9052_GPIO_6_7_REG 24
+#define DA9052_GPIO_14_15_REG 28
+
+/* POWER SEQUENCER CONTROL REGISTERS */
+#define DA9052_ID_0_1_REG 29
+#define DA9052_ID_2_3_REG 30
+#define DA9052_ID_4_5_REG 31
+#define DA9052_ID_6_7_REG 32
+#define DA9052_ID_8_9_REG 33
+#define DA9052_ID_10_11_REG 34
+#define DA9052_ID_12_13_REG 35
+#define DA9052_ID_14_15_REG 36
+#define DA9052_ID_16_17_REG 37
+#define DA9052_ID_18_19_REG 38
+#define DA9052_ID_20_21_REG 39
+#define DA9052_SEQ_STATUS_REG 40
+#define DA9052_SEQ_A_REG 41
+#define DA9052_SEQ_B_REG 42
+#define DA9052_SEQ_TIMER_REG 43
+
+/* LDO AND BUCK REGISTERS */
+#define DA9052_BUCKA_REG 44
+#define DA9052_BUCKB_REG 45
+#define DA9052_BUCKCORE_REG 46
+#define DA9052_BUCKPRO_REG 47
+#define DA9052_BUCKMEM_REG 48
+#define DA9052_BUCKPERI_REG 49
+#define DA9052_LDO1_REG 50
+#define DA9052_LDO2_REG 51
+#define DA9052_LDO3_REG 52
+#define DA9052_LDO4_REG 53
+#define DA9052_LDO5_REG 54
+#define DA9052_LDO6_REG 55
+#define DA9052_LDO7_REG 56
+#define DA9052_LDO8_REG 57
+#define DA9052_LDO9_REG 58
+#define DA9052_LDO10_REG 59
+#define DA9052_SUPPLY_REG 60
+#define DA9052_PULLDOWN_REG 61
+#define DA9052_CHGBUCK_REG 62
+#define DA9052_WAITCONT_REG 63
+#define DA9052_ISET_REG 64
+#define DA9052_BATCHG_REG 65
+
+/* BATTERY CONTROL REGISTRS */
+#define DA9052_CHG_CONT_REG 66
+#define DA9052_INPUT_CONT_REG 67
+#define DA9052_CHG_TIME_REG 68
+#define DA9052_BBAT_CONT_REG 69
+
+/* LED CONTROL REGISTERS */
+#define DA9052_BOOST_REG 70
+#define DA9052_LED_CONT_REG 71
+#define DA9052_LEDMIN123_REG 72
+#define DA9052_LED1_CONF_REG 73
+#define DA9052_LED2_CONF_REG 74
+#define DA9052_LED3_CONF_REG 75
+#define DA9052_LED1CONT_REG 76
+#define DA9052_LED2CONT_REG 77
+#define DA9052_LED3CONT_REG 78
+#define DA9052_LED_CONT_4_REG 79
+#define DA9052_LED_CONT_5_REG 80
+
+/* ADC CONTROL REGISTERS */
+#define DA9052_ADC_MAN_REG 81
+#define DA9052_ADC_CONT_REG 82
+#define DA9052_ADC_RES_L_REG 83
+#define DA9052_ADC_RES_H_REG 84
+#define DA9052_VDD_RES_REG 85
+#define DA9052_VDD_MON_REG 86
+
+#define DA9052_ICHG_AV_REG 87
+#define DA9052_ICHG_THD_REG 88
+#define DA9052_ICHG_END_REG 89
+#define DA9052_TBAT_RES_REG 90
+#define DA9052_TBAT_HIGHP_REG 91
+#define DA9052_TBAT_HIGHN_REG 92
+#define DA9052_TBAT_LOW_REG 93
+#define DA9052_T_OFFSET_REG 94
+
+#define DA9052_ADCIN4_RES_REG 95
+#define DA9052_AUTO4_HIGH_REG 96
+#define DA9052_AUTO4_LOW_REG 97
+#define DA9052_ADCIN5_RES_REG 98
+#define DA9052_AUTO5_HIGH_REG 99
+#define DA9052_AUTO5_LOW_REG 100
+#define DA9052_ADCIN6_RES_REG 101
+#define DA9052_AUTO6_HIGH_REG 102
+#define DA9052_AUTO6_LOW_REG 103
+
+#define DA9052_TJUNC_RES_REG 104
+
+/* TSI CONTROL REGISTERS */
+#define DA9052_TSI_CONT_A_REG 105
+#define DA9052_TSI_CONT_B_REG 106
+#define DA9052_TSI_X_MSB_REG 107
+#define DA9052_TSI_Y_MSB_REG 108
+#define DA9052_TSI_LSB_REG 109
+#define DA9052_TSI_Z_MSB_REG 110
+
+/* RTC COUNT REGISTERS */
+#define DA9052_COUNT_S_REG 111
+#define DA9052_COUNT_MI_REG 112
+#define DA9052_COUNT_H_REG 113
+#define DA9052_COUNT_D_REG 114
+#define DA9052_COUNT_MO_REG 115
+#define DA9052_COUNT_Y_REG 116
+
+/* RTC CONTROL REGISTERS */
+#define DA9052_ALARM_MI_REG 117
+#define DA9052_ALARM_H_REG 118
+#define DA9052_ALARM_D_REG 119
+#define DA9052_ALARM_MO_REG 120
+#define DA9052_ALARM_Y_REG 121
+#define DA9052_SECOND_A_REG 122
+#define DA9052_SECOND_B_REG 123
+#define DA9052_SECOND_C_REG 124
+#define DA9052_SECOND_D_REG 125
+
+/* PAGE CONFIGURATION BIT */
+#define DA9052_PAGE_CONF 0X80
+
+/* STATUS REGISTER A BITS */
+#define DA9052_STATUSA_VDATDET 0X80
+#define DA9052_STATUSA_VBUSSEL 0X40
+#define DA9052_STATUSA_DCINSEL 0X20
+#define DA9052_STATUSA_VBUSDET 0X10
+#define DA9052_STATUSA_DCINDET 0X08
+#define DA9052_STATUSA_IDGND 0X04
+#define DA9052_STATUSA_IDFLOAT 0X02
+#define DA9052_STATUSA_NONKEY 0X01
+
+/* STATUS REGISTER B BITS */
+#define DA9052_STATUSB_COMPDET 0X80
+#define DA9052_STATUSB_SEQUENCING 0X40
+#define DA9052_STATUSB_GPFB2 0X20
+#define DA9052_STATUSB_CHGTO 0X10
+#define DA9052_STATUSB_CHGEND 0X08
+#define DA9052_STATUSB_CHGLIM 0X04
+#define DA9052_STATUSB_CHGPRE 0X02
+#define DA9052_STATUSB_CHGATT 0X01
+
+/* STATUS REGISTER C BITS */
+#define DA9052_STATUSC_GPI7 0X80
+#define DA9052_STATUSC_GPI6 0X40
+#define DA9052_STATUSC_GPI5 0X20
+#define DA9052_STATUSC_GPI4 0X10
+#define DA9052_STATUSC_GPI3 0X08
+#define DA9052_STATUSC_GPI2 0X04
+#define DA9052_STATUSC_GPI1 0X02
+#define DA9052_STATUSC_GPI0 0X01
+
+/* STATUS REGISTER D BITS */
+#define DA9052_STATUSD_GPI15 0X80
+#define DA9052_STATUSD_GPI14 0X40
+#define DA9052_STATUSD_GPI13 0X20
+#define DA9052_STATUSD_GPI12 0X10
+#define DA9052_STATUSD_GPI11 0X08
+#define DA9052_STATUSD_GPI10 0X04
+#define DA9052_STATUSD_GPI9 0X02
+#define DA9052_STATUSD_GPI8 0X01
+
+/* EVENT REGISTER A BITS */
+#define DA9052_EVENTA_ECOMP1V2 0X80
+#define DA9052_EVENTA_ESEQRDY 0X40
+#define DA9052_EVENTA_EALRAM 0X20
+#define DA9052_EVENTA_EVDDLOW 0X10
+#define DA9052_EVENTA_EVBUSREM 0X08
+#define DA9052_EVENTA_EDCINREM 0X04
+#define DA9052_EVENTA_EVBUSDET 0X02
+#define DA9052_EVENTA_EDCINDET 0X01
+
+/* EVENT REGISTER B BITS */
+#define DA9052_EVENTB_ETSIREADY 0X80
+#define DA9052_EVENTB_EPENDOWN 0X40
+#define DA9052_EVENTB_EADCEOM 0X20
+#define DA9052_EVENTB_ETBAT 0X10
+#define DA9052_EVENTB_ECHGEND 0X08
+#define DA9052_EVENTB_EIDGND 0X04
+#define DA9052_EVENTB_EIDFLOAT 0X02
+#define DA9052_EVENTB_ENONKEY 0X01
+
+/* EVENT REGISTER C BITS */
+#define DA9052_EVENTC_EGPI7 0X80
+#define DA9052_EVENTC_EGPI6 0X40
+#define DA9052_EVENTC_EGPI5 0X20
+#define DA9052_EVENTC_EGPI4 0X10
+#define DA9052_EVENTC_EGPI3 0X08
+#define DA9052_EVENTC_EGPI2 0X04
+#define DA9052_EVENTC_EGPI1 0X02
+#define DA9052_EVENTC_EGPI0 0X01
+
+/* EVENT REGISTER D BITS */
+#define DA9052_EVENTD_EGPI15 0X80
+#define DA9052_EVENTD_EGPI14 0X40
+#define DA9052_EVENTD_EGPI13 0X20
+#define DA9052_EVENTD_EGPI12 0X10
+#define DA9052_EVENTD_EGPI11 0X08
+#define DA9052_EVENTD_EGPI10 0X04
+#define DA9052_EVENTD_EGPI9 0X02
+#define DA9052_EVENTD_EGPI8 0X01
+
+/* IRQ MASK REGISTERS BITS */
+#define DA9052_M_NONKEY 0X0100
+
+/* TSI EVENT REGISTERS BITS */
+#define DA9052_E_PEN_DOWN 0X4000
+#define DA9052_E_TSI_READY 0X8000
+
+/* FAULT LOG REGISTER BITS */
+#define DA9052_FAULTLOG_WAITSET 0X80
+#define DA9052_FAULTLOG_NSDSET 0X40
+#define DA9052_FAULTLOG_KEYSHUT 0X20
+#define DA9052_FAULTLOG_TEMPOVER 0X08
+#define DA9052_FAULTLOG_VDDSTART 0X04
+#define DA9052_FAULTLOG_VDDFAULT 0X02
+#define DA9052_FAULTLOG_TWDERROR 0X01
+
+/* CONTROL REGISTER A BITS */
+#define DA9052_CONTROLA_GPIV 0X80
+#define DA9052_CONTROLA_PMOTYPE 0X20
+#define DA9052_CONTROLA_PMOV 0X10
+#define DA9052_CONTROLA_PMIV 0X08
+#define DA9052_CONTROLA_PMIFV 0X08
+#define DA9052_CONTROLA_PWR1EN 0X04
+#define DA9052_CONTROLA_PWREN 0X02
+#define DA9052_CONTROLA_SYSEN 0X01
+
+/* CONTROL REGISTER B BITS */
+#define DA9052_CONTROLB_SHUTDOWN 0X80
+#define DA9052_CONTROLB_DEEPSLEEP 0X40
+#define DA9052_CONTROL_B_WRITEMODE 0X20
+#define DA9052_CONTROLB_BBATEN 0X10
+#define DA9052_CONTROLB_OTPREADEN 0X08
+#define DA9052_CONTROLB_AUTOBOOT 0X04
+#define DA9052_CONTROLB_ACTDIODE 0X02
+#define DA9052_CONTROLB_BUCKMERGE 0X01
+
+/* CONTROL REGISTER C BITS */
+#define DA9052_CONTROLC_BLINKDUR 0X80
+#define DA9052_CONTROLC_BLINKFRQ 0X60
+#define DA9052_CONTROLC_DEBOUNCING 0X1C
+#define DA9052_CONTROLC_PMFB2PIN 0X02
+#define DA9052_CONTROLC_PMFB1PIN 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_CONTROLD_WATCHDOG 0X80
+#define DA9052_CONTROLD_ACCDETEN 0X40
+#define DA9052_CONTROLD_GPI1415SD 0X20
+#define DA9052_CONTROLD_NONKEYSD 0X10
+#define DA9052_CONTROLD_KEEPACTEN 0X08
+#define DA9052_CONTROLD_TWDSCALE 0X07
+
+/* POWER DOWN DISABLE REGISTER BITS */
+#define DA9052_PDDIS_PMCONTPD 0X80
+#define DA9052_PDDIS_OUT32KPD 0X40
+#define DA9052_PDDIS_CHGBBATPD 0X20
+#define DA9052_PDDIS_CHGPD 0X10
+#define DA9052_PDDIS_HS2WIREPD 0X08
+#define DA9052_PDDIS_PMIFPD 0X04
+#define DA9052_PDDIS_GPADCPD 0X02
+#define DA9052_PDDIS_GPIOPD 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_INTERFACE_IFBASEADDR 0XE0
+#define DA9052_INTERFACE_NCSPOL 0X10
+#define DA9052_INTERFACE_RWPOL 0X08
+#define DA9052_INTERFACE_CPHA 0X04
+#define DA9052_INTERFACE_CPOL 0X02
+#define DA9052_INTERFACE_IFTYPE 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_RESET_RESETEVENT 0XC0
+#define DA9052_RESET_RESETTIMER 0X3F
+
+/* GPIO REGISTERS */
+/* GPIO CONTROL REGISTER BITS */
+#define DA9052_GPIO_EVEN_PORT_PIN 0X03
+#define DA9052_GPIO_EVEN_PORT_TYPE 0X04
+#define DA9052_GPIO_EVEN_PORT_MODE 0X08
+
+#define DA9052_GPIO_ODD_PORT_PIN 0X30
+#define DA9052_GPIO_ODD_PORT_TYPE 0X40
+#define DA9052_GPIO_ODD_PORT_MODE 0X80
+
+/*POWER SEQUENCER REGISTER BITS */
+/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */
+#define DA9052_ID01_LDO1STEP 0XF0
+#define DA9052_ID01_SYSPRE 0X04
+#define DA9052_ID01_DEFSUPPLY 0X02
+#define DA9052_ID01_NRESMODE 0X01
+
+/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */
+#define DA9052_ID23_LDO3STEP 0XF0
+#define DA9052_ID23_LDO2STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */
+#define DA9052_ID45_LDO5STEP 0XF0
+#define DA9052_ID45_LDO4STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */
+#define DA9052_ID67_LDO7STEP 0XF0
+#define DA9052_ID67_LDO6STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */
+#define DA9052_ID89_LDO9STEP 0XF0
+#define DA9052_ID89_LDO8STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */
+#define DA9052_ID1011_PDDISSTEP 0XF0
+#define DA9052_ID1011_LDO10STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */
+#define DA9052_ID1213_VMEMSWSTEP 0XF0
+#define DA9052_ID1213_VPERISWSTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */
+#define DA9052_ID1415_BUCKPROSTEP 0XF0
+#define DA9052_ID1415_BUCKCORESTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */
+#define DA9052_ID1617_BUCKPERISTEP 0XF0
+#define DA9052_ID1617_BUCKMEMSTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */
+#define DA9052_ID1819_GPRISE2STEP 0XF0
+#define DA9052_ID1819_GPRISE1STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */
+#define DA9052_ID2021_GPFALL2STEP 0XF0
+#define DA9052_ID2021_GPFALL1STEP 0X0F
+
+/* POWER SEQ STATUS REGISTER BITS */
+#define DA9052_SEQSTATUS_SEQPOINTER 0XF0
+#define DA9052_SEQSTATUS_WAITSTEP 0X0F
+
+/* POWER SEQ A REGISTER BITS */
+#define DA9052_SEQA_POWEREND 0XF0
+#define DA9052_SEQA_SYSTEMEND 0X0F
+
+/* POWER SEQ B REGISTER BITS */
+#define DA9052_SEQB_PARTDOWN 0XF0
+#define DA9052_SEQB_MAXCOUNT 0X0F
+
+/* POWER SEQ TIMER REGISTER BITS */
+#define DA9052_SEQTIMER_SEQDUMMY 0XF0
+#define DA9052_SEQTIMER_SEQTIME 0X0F
+
+/*POWER SUPPLY CONTROL REGISTER BITS */
+/* BUCK REGISTER A BITS */
+#define DA9052_BUCKA_BPROILIM 0XC0
+#define DA9052_BUCKA_BPROMODE 0X30
+#define DA9052_BUCKA_BCOREILIM 0X0C
+#define DA9052_BUCKA_BCOREMODE 0X03
+
+/* BUCK REGISTER B BITS */
+#define DA9052_BUCKB_BERIILIM 0XC0
+#define DA9052_BUCKB_BPERIMODE 0X30
+#define DA9052_BUCKB_BMEMILIM 0X0C
+#define DA9052_BUCKB_BMEMMODE 0X03
+
+/* BUCKCORE REGISTER BITS */
+#define DA9052_BUCKCORE_BCORECONF 0X80
+#define DA9052_BUCKCORE_BCOREEN 0X40
+#define DA9052_BUCKCORE_VBCORE 0X3F
+
+/* BUCKPRO REGISTER BITS */
+#define DA9052_BUCKPRO_BPROCONF 0X80
+#define DA9052_BUCKPRO_BPROEN 0X40
+#define DA9052_BUCKPRO_VBPRO 0X3F
+
+/* BUCKMEM REGISTER BITS */
+#define DA9052_BUCKMEM_BMEMCONF 0X80
+#define DA9052_BUCKMEM_BMEMEN 0X40
+#define DA9052_BUCKMEM_VBMEM 0X3F
+
+/* BUCKPERI REGISTER BITS */
+#define DA9052_BUCKPERI_BPERICONF 0X80
+#define DA9052_BUCKPERI_BPERIEN 0X40
+#define DA9052_BUCKPERI_BPERIHS 0X20
+#define DA9052_BUCKPERI_VBPERI 0X1F
+
+/* LDO1 REGISTER BITS */
+#define DA9052_LDO1_LDO1CONF 0X80
+#define DA9052_LDO1_LDO1EN 0X40
+#define DA9052_LDO1_VLDO1 0X1F
+
+/* LDO2 REGISTER BITS */
+#define DA9052_LDO2_LDO2CONF 0X80
+#define DA9052_LDO2_LDO2EN 0X40
+#define DA9052_LDO2_VLDO2 0X3F
+
+/* LDO3 REGISTER BITS */
+#define DA9052_LDO3_LDO3CONF 0X80
+#define DA9052_LDO3_LDO3EN 0X40
+#define DA9052_LDO3_VLDO3 0X3F
+
+/* LDO4 REGISTER BITS */
+#define DA9052_LDO4_LDO4CONF 0X80
+#define DA9052_LDO4_LDO4EN 0X40
+#define DA9052_LDO4_VLDO4 0X3F
+
+/* LDO5 REGISTER BITS */
+#define DA9052_LDO5_LDO5CONF 0X80
+#define DA9052_LDO5_LDO5EN 0X40
+#define DA9052_LDO5_VLDO5 0X3F
+
+/* LDO6 REGISTER BITS */
+#define DA9052_LDO6_LDO6CONF 0X80
+#define DA9052_LDO6_LDO6EN 0X40
+#define DA9052_LDO6_VLDO6 0X3F
+
+/* LDO7 REGISTER BITS */
+#define DA9052_LDO7_LDO7CONF 0X80
+#define DA9052_LDO7_LDO7EN 0X40
+#define DA9052_LDO7_VLDO7 0X3F
+
+/* LDO8 REGISTER BITS */
+#define DA9052_LDO8_LDO8CONF 0X80
+#define DA9052_LDO8_LDO8EN 0X40
+#define DA9052_LDO8_VLDO8 0X3F
+
+/* LDO9 REGISTER BITS */
+#define DA9052_LDO9_LDO9CONF 0X80
+#define DA9052_LDO9_LDO9EN 0X40
+#define DA9052_LDO9_VLDO9 0X3F
+
+/* LDO10 REGISTER BITS */
+#define DA9052_LDO10_LDO10CONF 0X80
+#define DA9052_LDO10_LDO10EN 0X40
+#define DA9052_LDO10_VLDO10 0X3F
+
+/* SUPPLY REGISTER BITS */
+#define DA9052_SUPPLY_VLOCK 0X80
+#define DA9052_SUPPLY_VMEMSWEN 0X40
+#define DA9052_SUPPLY_VPERISWEN 0X20
+#define DA9052_SUPPLY_VLDO3GO 0X10
+#define DA9052_SUPPLY_VLDO2GO 0X08
+#define DA9052_SUPPLY_VBMEMGO 0X04
+#define DA9052_SUPPLY_VBPROGO 0X02
+#define DA9052_SUPPLY_VBCOREGO 0X01
+
+/* PULLDOWN REGISTER BITS */
+#define DA9052_PULLDOWN_LDO5PDDIS 0X20
+#define DA9052_PULLDOWN_LDO2PDDIS 0X10
+#define DA9052_PULLDOWN_LDO1PDDIS 0X08
+#define DA9052_PULLDOWN_MEMPDDIS 0X04
+#define DA9052_PULLDOWN_PROPDDIS 0X02
+#define DA9052_PULLDOWN_COREPDDIS 0X01
+
+/* BAT CHARGER REGISTER BITS */
+/* CHARGER BUCK REGISTER BITS */
+#define DA9052_CHGBUCK_CHGTEMP 0X80
+#define DA9052_CHGBUCK_CHGUSBILIM 0X40
+#define DA9052_CHGBUCK_CHGBUCKLP 0X20
+#define DA9052_CHGBUCK_CHGBUCKEN 0X10
+#define DA9052_CHGBUCK_ISETBUCK 0X0F
+
+/* WAIT COUNTER REGISTER BITS */
+#define DA9052_WAITCONT_WAITDIR 0X80
+#define DA9052_WAITCONT_RTCCLOCK 0X40
+#define DA9052_WAITCONT_WAITMODE 0X20
+#define DA9052_WAITCONT_EN32KOUT 0X10
+#define DA9052_WAITCONT_DELAYTIME 0X0F
+
+/* ISET CONTROL REGISTER BITS */
+#define DA9052_ISET_ISETDCIN 0XF0
+#define DA9052_ISET_ISETVBUS 0X0F
+
+/* BATTERY CHARGER CONTROL REGISTER BITS */
+#define DA9052_BATCHG_ICHGPRE 0XC0
+#define DA9052_BATCHG_ICHGBAT 0X3F
+
+/* CHARGER COUNTER REGISTER BITS */
+#define DA9052_CHG_CONT_VCHG_BAT 0XF8
+#define DA9052_CHG_CONT_TCTR 0X07
+
+/* INPUT CONTROL REGISTER BITS */
+#define DA9052_INPUT_CONT_TCTR_MODE 0X80
+#define DA9052_INPUT_CONT_VBUS_SUSP 0X10
+#define DA9052_INPUT_CONT_DCIN_SUSP 0X08
+
+/* CHARGING TIME REGISTER BITS */
+#define DA9052_CHGTIME_CHGTIME 0XFF
+
+/* BACKUP BATTERY CONTROL REGISTER BITS */
+#define DA9052_BBATCONT_BCHARGERISET 0XF0
+#define DA9052_BBATCONT_BCHARGERVSET 0X0F
+
+/* LED REGISTERS BITS */
+/* LED BOOST REGISTER BITS */
+#define DA9052_BOOST_EBFAULT 0X80
+#define DA9052_BOOST_MBFAULT 0X40
+#define DA9052_BOOST_BOOSTFRQ 0X20
+#define DA9052_BOOST_BOOSTILIM 0X10
+#define DA9052_BOOST_LED3INEN 0X08
+#define DA9052_BOOST_LED2INEN 0X04
+#define DA9052_BOOST_LED1INEN 0X02
+#define DA9052_BOOST_BOOSTEN 0X01
+
+/* LED CONTROL REGISTER BITS */
+#define DA9052_LEDCONT_SELLEDMODE 0X80
+#define DA9052_LEDCONT_LED3ICONT 0X40
+#define DA9052_LEDCONT_LED3RAMP 0X20
+#define DA9052_LEDCONT_LED3EN 0X10
+#define DA9052_LEDCONT_LED2RAMP 0X08
+#define DA9052_LEDCONT_LED2EN 0X04
+#define DA9052_LEDCONT_LED1RAMP 0X02
+#define DA9052_LEDCONT_LED1EN 0X01
+
+/* LEDMIN123 REGISTER BIT */
+#define DA9052_LEDMIN123_LEDMINCURRENT 0XFF
+
+/* LED1CONF REGISTER BIT */
+#define DA9052_LED1CONF_LED1CURRENT 0XFF
+
+/* LED2CONF REGISTER BIT */
+#define DA9052_LED2CONF_LED2CURRENT 0XFF
+
+/* LED3CONF REGISTER BIT */
+#define DA9052_LED3CONF_LED3CURRENT 0XFF
+
+/* LED COUNT REGISTER BIT */
+#define DA9052_LED_CONT_DIM 0X80
+
+/* ADC MAN REGISTERS BITS */
+#define DA9052_ADC_MAN_MAN_CONV 0X10
+#define DA9052_ADC_MAN_MUXSEL_VDDOUT 0X00
+#define DA9052_ADC_MAN_MUXSEL_ICH 0X01
+#define DA9052_ADC_MAN_MUXSEL_TBAT 0X02
+#define DA9052_ADC_MAN_MUXSEL_VBAT 0X03
+#define DA9052_ADC_MAN_MUXSEL_AD4 0X04
+#define DA9052_ADC_MAN_MUXSEL_AD5 0X05
+#define DA9052_ADC_MAN_MUXSEL_AD6 0X06
+#define DA9052_ADC_MAN_MUXSEL_VBBAT 0X09
+
+/* ADC CONTROL REGSISTERS BITS */
+#define DA9052_ADCCONT_COMP1V2EN 0X80
+#define DA9052_ADCCONT_ADCMODE 0X40
+#define DA9052_ADCCONT_TBATISRCEN 0X20
+#define DA9052_ADCCONT_AD4ISRCEN 0X10
+#define DA9052_ADCCONT_AUTOAD6EN 0X08
+#define DA9052_ADCCONT_AUTOAD5EN 0X04
+#define DA9052_ADCCONT_AUTOAD4EN 0X02
+#define DA9052_ADCCONT_AUTOVDDEN 0X01
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */
+#define DA9052_ADC_RES_LSB 0X03
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */
+#define DA9052_ADCRESH_ADCRESMSB 0XFF
+
+/* VDD RES REGSISTER BIT*/
+#define DA9052_VDDRES_VDDOUTRES 0XFF
+
+/* VDD MON REGSISTER BIT */
+#define DA9052_VDDMON_VDDOUTMON 0XFF
+
+/* ICHG_AV REGSISTER BIT */
+#define DA9052_ICHGAV_ICHGAV 0XFF
+
+/* ICHG_THD REGSISTER BIT */
+#define DA9052_ICHGTHD_ICHGTHD 0XFF
+
+/* ICHG_END REGSISTER BIT */
+#define DA9052_ICHGEND_ICHGEND 0XFF
+
+/* TBAT_RES REGSISTER BIT */
+#define DA9052_TBATRES_TBATRES 0XFF
+
+/* TBAT_HIGHP REGSISTER BIT */
+#define DA9052_TBATHIGHP_TBATHIGHP 0XFF
+
+/* TBAT_HIGHN REGSISTER BIT */
+#define DA9052_TBATHIGHN_TBATHIGHN 0XFF
+
+/* TBAT_LOW REGSISTER BIT */
+#define DA9052_TBATLOW_TBATLOW 0XFF
+
+/* T_OFFSET REGSISTER BIT */
+#define DA9052_TOFFSET_TOFFSET 0XFF
+
+/* ADCIN4_RES REGSISTER BIT */
+#define DA9052_ADCIN4RES_ADCIN4RES 0XFF
+
+/* ADCIN4_HIGH REGSISTER BIT */
+#define DA9052_AUTO4HIGH_AUTO4HIGH 0XFF
+
+/* ADCIN4_LOW REGSISTER BIT */
+#define DA9052_AUTO4LOW_AUTO4LOW 0XFF
+
+/* ADCIN5_RES REGSISTER BIT */
+#define DA9052_ADCIN5RES_ADCIN5RES 0XFF
+
+/* ADCIN5_HIGH REGSISTER BIT */
+#define DA9052_AUTO5HIGH_AUTOHIGH 0XFF
+
+/* ADCIN5_LOW REGSISTER BIT */
+#define DA9052_AUTO5LOW_AUTO5LOW 0XFF
+
+/* ADCIN6_RES REGSISTER BIT */
+#define DA9052_ADCIN6RES_ADCIN6RES 0XFF
+
+/* ADCIN6_HIGH REGSISTER BIT */
+#define DA9052_AUTO6HIGH_AUTO6HIGH 0XFF
+
+/* ADCIN6_LOW REGSISTER BIT */
+#define DA9052_AUTO6LOW_AUTO6LOW 0XFF
+
+/* TJUNC_RES REGSISTER BIT*/
+#define DA9052_TJUNCRES_TJUNCRES 0XFF
+
+/* TSI REGISTER */
+/* TSI CONTROL REGISTER A BITS */
+#define DA9052_TSICONTA_TSIDELAY 0XC0
+#define DA9052_TSICONTA_TSISKIP 0X38
+#define DA9052_TSICONTA_TSIMODE 0X04
+#define DA9052_TSICONTA_PENDETEN 0X02
+#define DA9052_TSICONTA_AUTOTSIEN 0X01
+
+/* TSI CONTROL REGISTER B BITS */
+#define DA9052_TSICONTB_ADCREF 0X80
+#define DA9052_TSICONTB_TSIMAN 0X40
+#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSISEL3 0X08
+#define DA9052_TSICONTB_TSISEL2 0X04
+#define DA9052_TSICONTB_TSISEL1 0X02
+#define DA9052_TSICONTB_TSISEL0 0X01
+
+/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIXMSB_TSIXM 0XFF
+
+/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIYMSB_TSIYM 0XFF
+
+/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
+#define DA9052_TSILSB_PENDOWN 0X40
+#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIXL 0X03
+
+/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
+#define DA9052_TSIZMSB_TSIZM 0XFF
+
+/* RTC REGISTER */
+/* RTC TIMER SECONDS REGISTER BITS */
+#define DA9052_COUNTS_MONITOR 0X40
+#define DA9052_RTC_SEC 0X3F
+
+/* RTC TIMER MINUTES REGISTER BIT */
+#define DA9052_RTC_MIN 0X3F
+
+/* RTC TIMER HOUR REGISTER BIT */
+#define DA9052_RTC_HOUR 0X1F
+
+/* RTC TIMER DAYS REGISTER BIT */
+#define DA9052_RTC_DAY 0X1F
+
+/* RTC TIMER MONTHS REGISTER BIT */
+#define DA9052_RTC_MONTH 0X0F
+
+/* RTC TIMER YEARS REGISTER BIT */
+#define DA9052_RTC_YEAR 0X3F
+
+/* RTC ALARM MINUTES REGISTER BITS */
+#define DA9052_ALARMM_I_TICK_TYPE 0X80
+#define DA9052_ALARMMI_ALARMTYPE 0X40
+
+/* RTC ALARM YEARS REGISTER BITS */
+#define DA9052_ALARM_Y_TICK_ON 0X80
+#define DA9052_ALARM_Y_ALARM_ON 0X40
+
+/* RTC SECONDS REGISTER A BITS */
+#define DA9052_SECONDA_SECONDSA 0XFF
+
+/* RTC SECONDS REGISTER B BITS */
+#define DA9052_SECONDB_SECONDSB 0XFF
+
+/* RTC SECONDS REGISTER C BITS */
+#define DA9052_SECONDC_SECONDSC 0XFF
+
+/* RTC SECONDS REGISTER D BITS */
+#define DA9052_SECONDD_SECONDSD 0XFF
+
+#endif
+/* __LINUX_MFD_DA9052_REG_H */
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 3816c2fac0a..a98e2a316d1 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -69,6 +69,7 @@ struct regulator_init_data;
struct mc13xxx_regulator_init_data {
int id;
struct regulator_init_data *init_data;
+ struct device_node *node;
};
struct mc13xxx_regulator_platform_data {
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 8bf2cb9502d..d0cb12eba40 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -740,6 +740,34 @@
#define TPS65910_GPIO_STS BIT(1)
#define TPS65910_GPIO_SET BIT(0)
+/* Regulator Index Definitions */
+#define TPS65910_REG_VRTC 0
+#define TPS65910_REG_VIO 1
+#define TPS65910_REG_VDD1 2
+#define TPS65910_REG_VDD2 3
+#define TPS65910_REG_VDD3 4
+#define TPS65910_REG_VDIG1 5
+#define TPS65910_REG_VDIG2 6
+#define TPS65910_REG_VPLL 7
+#define TPS65910_REG_VDAC 8
+#define TPS65910_REG_VAUX1 9
+#define TPS65910_REG_VAUX2 10
+#define TPS65910_REG_VAUX33 11
+#define TPS65910_REG_VMMC 12
+
+#define TPS65911_REG_VDDCTRL 4
+#define TPS65911_REG_LDO1 5
+#define TPS65911_REG_LDO2 6
+#define TPS65911_REG_LDO3 7
+#define TPS65911_REG_LDO4 8
+#define TPS65911_REG_LDO5 9
+#define TPS65911_REG_LDO6 10
+#define TPS65911_REG_LDO7 11
+#define TPS65911_REG_LDO8 12
+
+/* Max number of TPS65910/11 regulators */
+#define TPS65910_NUM_REGS 13
+
/**
* struct tps65910_board
* Board platform data may be used to initialize regulators.
@@ -751,7 +779,7 @@ struct tps65910_board {
int irq_base;
int vmbch_threshold;
int vmbch2_threshold;
- struct regulator_init_data *tps65910_pmic_init_data;
+ struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
};
/**
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 27748230aa6..2783eca629a 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -9,6 +9,7 @@
#define __LINUX_MII_H__
#include <linux/types.h>
+#include <linux/ethtool.h>
/* Generic MII registers. */
#define MII_BMCR 0x00 /* Basic mode control register */
@@ -240,6 +241,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
}
/**
+ * ethtool_adv_to_mii_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_10baseT_Half)
+ result |= ADVERTISE_10HALF;
+ if (ethadv & ADVERTISED_10baseT_Full)
+ result |= ADVERTISE_10FULL;
+ if (ethadv & ADVERTISED_100baseT_Half)
+ result |= ADVERTISE_100HALF;
+ if (ethadv & ADVERTISED_100baseT_Full)
+ result |= ADVERTISE_100FULL;
+ if (ethadv & ADVERTISED_Pause)
+ result |= ADVERTISE_PAUSE_CAP;
+ if (ethadv & ADVERTISED_Asym_Pause)
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_t
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to ethtool advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_10HALF)
+ result |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ result |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ result |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ result |= ADVERTISED_100baseT_Full;
+ if (adv & ADVERTISE_PAUSE_CAP)
+ result |= ADVERTISED_Pause;
+ if (adv & ADVERTISE_PAUSE_ASYM)
+ result |= ADVERTISED_Asym_Pause;
+
+ return result;
+}
+
+/**
+ * ethtool_adv_to_mii_ctrl1000_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_1000baseT_Half)
+ result |= ADVERTISE_1000HALF;
+ if (ethadv & ADVERTISED_1000baseT_Full)
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
+ * mii_ctrl1000_to_ethtool_adv_t
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_1000HALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (adv & ADVERTISE_1000FULL)
+ result |= ADVERTISED_1000baseT_Full;
+
+ return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-T mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_LPACK)
+ result |= ADVERTISED_Autoneg;
+
+ return result | mii_adv_to_ethtool_adv_t(lpa);
+}
+
+/**
+ * mii_stat1000_to_ethtool_lpa_t
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_1000HALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (lpa & LPA_1000FULL)
+ result |= ADVERTISED_1000baseT_Full;
+
+ return result;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_x
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000Base-X mode.
+ */
+static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_1000baseT_Half)
+ result |= ADVERTISE_1000XHALF;
+ if (ethadv & ADVERTISED_1000baseT_Full)
+ result |= ADVERTISE_1000XFULL;
+ if (ethadv & ADVERTISED_Pause)
+ result |= ADVERTISE_1000XPAUSE;
+ if (ethadv & ADVERTISED_Asym_Pause)
+ result |= ADVERTISE_1000XPSE_ASYM;
+
+ return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_x
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-X mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_1000XHALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (adv & ADVERTISE_1000XFULL)
+ result |= ADVERTISED_1000baseT_Full;
+ if (adv & ADVERTISE_1000XPAUSE)
+ result |= ADVERTISED_Pause;
+ if (adv & ADVERTISE_1000XPSE_ASYM)
+ result |= ADVERTISED_Asym_Pause;
+
+ return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_x
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-X mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_LPACK)
+ result |= ADVERTISED_Autoneg;
+
+ return result | mii_adv_to_ethtool_adv_x(lpa);
+}
+
+/**
* mii_advertise_flowctrl - get flow control advertisement flags
* @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
*/
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index c41d7270c6c..32085249e9c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -54,7 +54,7 @@ struct miscdevice {
struct device *parent;
struct device *this_device;
const char *nodename;
- mode_t mode;
+ umode_t mode;
};
extern int misc_register(struct miscdevice * misc);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index b56e4587208..9958ff2cad3 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -59,12 +59,15 @@ enum {
MLX4_CMD_HW_HEALTH_CHECK = 0x50,
MLX4_CMD_SET_PORT = 0xc,
MLX4_CMD_SET_NODE = 0x5a,
+ MLX4_CMD_QUERY_FUNC = 0x56,
MLX4_CMD_ACCESS_DDR = 0x2e,
MLX4_CMD_MAP_ICM = 0xffa,
MLX4_CMD_UNMAP_ICM = 0xff9,
MLX4_CMD_MAP_ICM_AUX = 0xffc,
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd,
+ /*master notify fw on finish for slave's flr*/
+ MLX4_CMD_INFORM_FLR_DONE = 0x5b,
/* TPT commands */
MLX4_CMD_SW2HW_MPT = 0xd,
@@ -119,6 +122,26 @@ enum {
/* miscellaneous commands */
MLX4_CMD_DIAG_RPRT = 0x30,
MLX4_CMD_NOP = 0x31,
+ MLX4_CMD_ACCESS_MEM = 0x2e,
+ MLX4_CMD_SET_VEP = 0x52,
+
+ /* Ethernet specific commands */
+ MLX4_CMD_SET_VLAN_FLTR = 0x47,
+ MLX4_CMD_SET_MCAST_FLTR = 0x48,
+ MLX4_CMD_DUMP_ETH_STATS = 0x49,
+
+ /* Communication channel commands */
+ MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+ MLX4_CMD_GEN_EQE = 0x58,
+
+ /* virtual commands */
+ MLX4_CMD_ALLOC_RES = 0xf00,
+ MLX4_CMD_FREE_RES = 0xf01,
+ MLX4_CMD_MCAST_ATTACH = 0xf05,
+ MLX4_CMD_UCAST_ATTACH = 0xf06,
+ MLX4_CMD_PROMISC = 0xf08,
+ MLX4_CMD_QUERY_FUNC_CAP = 0xf0a,
+ MLX4_CMD_QP_ATTACH = 0xf0b,
/* debug commands */
MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@ enum {
/* statistics commands */
MLX4_CMD_QUERY_IF_STAT = 0X54,
+ MLX4_CMD_SET_IF_STAT = 0X55,
};
enum {
@@ -135,7 +159,8 @@ enum {
};
enum {
- MLX4_MAILBOX_SIZE = 4096
+ MLX4_MAILBOX_SIZE = 4096,
+ MLX4_ACCESS_MEM_ALIGN = 256,
};
enum {
@@ -148,6 +173,11 @@ enum {
MLX4_SET_PORT_GID_TABLE = 0x5,
};
+enum {
+ MLX4_CMD_WRAPPED,
+ MLX4_CMD_NATIVE
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox {
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout);
+ u16 op, unsigned long timeout, int native);
/* Invoke a command with no output parameter */
static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
- u8 op_modifier, u16 op, unsigned long timeout)
+ u8 op_modifier, u16 op, unsigned long timeout,
+ int native)
{
return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
- op_modifier, op, timeout);
+ op_modifier, op, timeout, native);
}
/* Invoke a command with an output mailbox */
static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op,
- unsigned long timeout)
+ unsigned long timeout, int native)
{
return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
- op_modifier, op, timeout);
+ op_modifier, op, timeout, native);
}
/*
@@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param
*/
static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
u32 in_modifier, u8 op_modifier, u16 op,
- unsigned long timeout)
+ unsigned long timeout, int native)
{
return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
- op_modifier, op, timeout);
+ op_modifier, op, timeout, native);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
+u32 mlx4_comm_get_version(void);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+
#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 84b0b1848f1..5c4fe8e5bfe 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -47,6 +47,9 @@
enum {
MLX4_FLAG_MSI_X = 1 << 0,
MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
+ MLX4_FLAG_MASTER = 1 << 2,
+ MLX4_FLAG_SLAVE = 1 << 3,
+ MLX4_FLAG_SRIOV = 1 << 4,
};
enum {
@@ -58,6 +61,15 @@ enum {
};
enum {
+ MLX4_MAX_NUM_PF = 16,
+ MLX4_MAX_NUM_VF = 64,
+ MLX4_MFUNC_MAX = 80,
+ MLX4_MFUNC_EQ_NUM = 4,
+ MLX4_MFUNC_MAX_EQES = 8,
+ MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
+enum {
MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
@@ -77,11 +89,13 @@ enum {
MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
- MLX4_DEV_CAP_FLAG_WOL = 1LL << 38,
+ MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37,
+ MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38,
MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
- MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48
+ MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
+ MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
};
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
@@ -116,7 +130,11 @@ enum mlx4_event {
MLX4_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
MLX4_EVENT_TYPE_ECC_DETECT = 0x0e,
- MLX4_EVENT_TYPE_CMD = 0x0a
+ MLX4_EVENT_TYPE_CMD = 0x0a,
+ MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
+ MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
+ MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
+ MLX4_EVENT_TYPE_NONE = 0xff,
};
enum {
@@ -183,6 +201,7 @@ enum mlx4_qp_region {
};
enum mlx4_port_type {
+ MLX4_PORT_TYPE_NONE = 0,
MLX4_PORT_TYPE_IB = 1,
MLX4_PORT_TYPE_ETH = 2,
MLX4_PORT_TYPE_AUTO = 3
@@ -215,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
struct mlx4_caps {
u64 fw_ver;
+ u32 function;
int num_ports;
int vl_cap[MLX4_MAX_PORTS + 1];
int ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -229,6 +249,7 @@ struct mlx4_caps {
u64 trans_code[MLX4_MAX_PORTS + 1];
int local_ca_ack_delay;
int num_uars;
+ u32 uar_page_size;
int bf_reg_size;
int bf_regs_per_page;
int max_sq_sg;
@@ -252,8 +273,7 @@ struct mlx4_caps {
int num_comp_vectors;
int comp_pool;
int num_mpts;
- int num_mtt_segs;
- int mtts_per_seg;
+ int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
int reserved_mrws;
@@ -283,7 +303,9 @@ struct mlx4_caps {
int log_num_prios;
enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
u8 supported_type[MLX4_MAX_PORTS + 1];
- u32 port_mask;
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
+ u32 port_mask[MLX4_MAX_PORTS + 1];
enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
u32 max_counters;
u8 ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -303,7 +325,7 @@ struct mlx4_buf {
};
struct mlx4_mtt {
- u32 first_seg;
+ u32 offset;
int order;
int page_shift;
};
@@ -465,10 +487,12 @@ struct mlx4_counter {
struct mlx4_dev {
struct pci_dev *pdev;
unsigned long flags;
+ unsigned long num_slaves;
struct mlx4_caps caps;
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
+ int num_vfs;
};
struct mlx4_init_port_param {
@@ -487,14 +511,32 @@ struct mlx4_init_port_param {
#define mlx4_foreach_port(port, dev, type) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
- if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
- ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+ if ((type) == (dev)->caps.port_mask[(port)])
-#define mlx4_foreach_ib_transport_port(port, dev) \
- for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
- if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \
- ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define mlx4_foreach_ib_transport_port(port, dev) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+ return (qpn < dev->caps.sqp_start + 8);
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+ return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_SLAVE;
+}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf);
@@ -560,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot);
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol protocol);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -570,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 48cc4cb9785..bee8fa23127 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -97,6 +97,33 @@ enum {
MLX4_QP_BIT_RIC = 1 << 4,
};
+enum {
+ MLX4_RSS_HASH_XOR = 0,
+ MLX4_RSS_HASH_TOP = 1,
+
+ MLX4_RSS_UDP_IPV6 = 1 << 0,
+ MLX4_RSS_UDP_IPV4 = 1 << 1,
+ MLX4_RSS_TCP_IPV6 = 1 << 2,
+ MLX4_RSS_IPV6 = 1 << 3,
+ MLX4_RSS_TCP_IPV4 = 1 << 4,
+ MLX4_RSS_IPV4 = 1 << 5,
+
+ /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
+ /* offset of being RSS indirection QP within mlx4_qp_context.flags */
+ MLX4_RSS_QPC_FLAG_OFFSET = 13,
+};
+
+struct mlx4_rss_context {
+ __be32 base_qpn;
+ __be32 default_qpn;
+ u16 reserved;
+ u8 hash_fn;
+ u8 flags;
+ __be32 rss_key[10];
+ __be32 base_qpn_udp;
+};
+
struct mlx4_qp_path {
u8 fl;
u8 reserved1[2];
@@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg {
* [4] IP checksum
* [3:2] C (generate completion queue entry)
* [1] SE (solicited event)
+ * [0] FL (force loopback)
*/
__be32 srcrb_flags;
/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c48..6eba2cc016c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
@@ -1252,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+ * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
* zones, allocate the backing mem_map and account for memory holes in a more
* architecture independent manner. This is a substitute for creating the
* zone_sizes[] and zholes_size[] arrays and passing them to
* free_area_init_node()
*
* An architecture is expected to register range of page frames backed by
- * physical memory with add_active_range() before calling
+ * physical memory with memblock_add[_node]() before calling
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * add_active_range(node_id, start_pfn, end_pfn)
+ * memblock_add_node(base, size, nid)
* free_area_init_nodes(max_zone_pfns);
*
- * If the architecture guarantees that there are no holes in the ranges
- * registered with add_active_range(), free_bootmem_active_regions()
- * will call free_bootmem_node() for each registered physical page range.
- * Similarly sparse_memory_present_with_active_regions() calls
- * memory_present() for each range when SPARSEMEM is enabled.
+ * free_bootmem_with_active_regions() calls free_bootmem_node() for each
+ * registered physical page range. Similarly
+ * sparse_memory_present_with_active_regions() calls memory_present() for
+ * each range when SPARSEMEM is enabled.
*
* See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_ARCH_POPULATES_NODE_MAP
+ * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
*/
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
-extern void add_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern void remove_all_active_ranges(void);
-void sort_node_map(void);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -1299,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit);
-typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
-extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
-#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
static inline int __early_pfn_to_nid(unsigned long pfn)
{
@@ -1491,6 +1482,18 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
+static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+ unsigned long vm_start, unsigned long vm_end)
+{
+ struct vm_area_struct *vma = find_vma(mm, vm_start);
+
+ if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
+ vma = NULL;
+
+ return vma;
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
#else
@@ -1627,5 +1630,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 415f2db414e..c8ef9bc54d5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -218,6 +218,7 @@ struct mmc_card {
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
}
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 188cb2ffe8d..ca6ca92418a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -317,6 +317,12 @@ struct zone {
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
+ /*
+ * This is a per-zone reserve of pages that should not be
+ * considered dirtyable memory.
+ */
+ unsigned long dirty_balance_reserve;
+
#ifdef CONFIG_NUMA
int node;
/*
@@ -598,13 +604,13 @@ struct zonelist {
#endif
};
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
struct node_active_region {
unsigned long start_pfn;
unsigned long end_pfn;
int nid;
};
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
@@ -720,7 +726,7 @@ extern int movable_zone;
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
return movable_zone == ZONE_HIGHMEM;
#else
return 0;
@@ -938,7 +944,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+ !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
return 0;
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 29304855652..5a8e3903d77 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -2,39 +2,16 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
-#include <linux/path.h>
-#include <linux/seq_file.h>
-#include <linux/wait.h>
-
-struct mnt_namespace {
- atomic_t count;
- struct vfsmount * root;
- struct list_head list;
- wait_queue_head_t poll;
- int event;
-};
-
-struct proc_mounts {
- struct seq_file m; /* must be the first element */
- struct mnt_namespace *ns;
- struct path root;
-};
-
+struct mnt_namespace;
struct fs_struct;
-extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
-static inline void get_mnt_ns(struct mnt_namespace *ns)
-{
- atomic_inc(&ns->count);
-}
-extern const struct seq_operations mounts_op;
-extern const struct seq_operations mountinfo_op;
-extern const struct seq_operations mountstats_op;
-extern int mnt_had_events(struct proc_mounts *);
+extern const struct file_operations proc_mounts_operations;
+extern const struct file_operations proc_mountinfo_operations;
+extern const struct file_operations proc_mountstats_operations;
#endif
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 468819cdde8..83ac0713ed0 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -542,4 +542,22 @@ struct isapnp_device_id {
kernel_ulong_t driver_data; /* data private to the driver */
};
+/**
+ * struct amba_id - identifies a device on an AMBA bus
+ * @id: The significant bits if the hardware device ID
+ * @mask: Bitmask specifying which bits of the id field are significant when
+ * matching. A driver binds to a device when ((hardware device ID) & mask)
+ * == id.
+ * @data: Private data used by the driver.
+ */
+struct amba_id {
+ unsigned int id;
+ unsigned int mask;
+#ifndef __KERNEL__
+ kernel_ulong_t data;
+#else
+ void *data;
+#endif
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 33fe53d7811..d7029f4a191 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -47,45 +47,10 @@ struct mnt_namespace;
#define MNT_INTERNAL 0x4000
-struct mnt_pcp {
- int mnt_count;
- int mnt_writers;
-};
-
struct vfsmount {
- struct list_head mnt_hash;
- struct vfsmount *mnt_parent; /* fs we are mounted on */
- struct dentry *mnt_mountpoint; /* dentry of mountpoint */
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
-#ifdef CONFIG_SMP
- struct mnt_pcp __percpu *mnt_pcp;
- atomic_t mnt_longterm; /* how many of the refs are longterm */
-#else
- int mnt_count;
- int mnt_writers;
-#endif
- struct list_head mnt_mounts; /* list of children, anchored here */
- struct list_head mnt_child; /* and going through their mnt_child */
int mnt_flags;
- /* 4 bytes hole on 64bits arches without fsnotify */
-#ifdef CONFIG_FSNOTIFY
- __u32 mnt_fsnotify_mask;
- struct hlist_head mnt_fsnotify_marks;
-#endif
- const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
- struct list_head mnt_list;
- struct list_head mnt_expire; /* link in fs-specific expiry list */
- struct list_head mnt_share; /* circular list of shared mounts */
- struct list_head mnt_slave_list;/* list of slave mounts */
- struct list_head mnt_slave; /* slave list entry */
- struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */
- struct mnt_namespace *mnt_ns; /* containing namespace */
- int mnt_id; /* mount identifier */
- int mnt_group_id; /* peer group identifier */
- int mnt_expiry_mark; /* true if marked for expiry */
- int mnt_pinned;
- int mnt_ghosts;
};
struct file; /* forward dec */
@@ -94,15 +59,13 @@ extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
+extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern void mnt_pin(struct vfsmount *mnt);
extern void mnt_unpin(struct vfsmount *mnt);
extern int __mnt_is_readonly(struct vfsmount *mnt);
-extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
- const char *name, void *data);
-
struct file_system_type;
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 05acced439a..ce93a341337 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -1,6 +1,7 @@
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
+#include <linux/kobject.h>
#include <linux/list.h>
struct msi_msg {
@@ -44,6 +45,8 @@ struct msi_desc {
/* Last set MSI message */
struct msi_msg msg;
+
+ struct kobject kobj;
};
/*
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d2492549297..d5d2ec6494b 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -354,10 +354,10 @@ static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cf
onecmd = cmd;
break;
case 2:
- onecmd = cpu_to_cfi16(cmd);
+ onecmd = cpu_to_cfi16(map, cmd);
break;
case 4:
- onecmd = cpu_to_cfi32(cmd);
+ onecmd = cpu_to_cfi32(map, cmd);
break;
}
@@ -437,10 +437,10 @@ static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
case 1:
break;
case 2:
- res = cfi16_to_cpu(res);
+ res = cfi16_to_cpu(map, res);
break;
case 4:
- res = cfi32_to_cpu(res);
+ res = cfi32_to_cpu(map, res);
break;
default: BUG();
}
@@ -480,12 +480,12 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
if (map_bankwidth_is_1(map)) {
return val.x[0];
} else if (map_bankwidth_is_2(map)) {
- return cfi16_to_cpu(val.x[0]);
+ return cfi16_to_cpu(map, val.x[0]);
} else {
/* No point in a 64-bit byteswap since that would just be
swapping the responses from different chips, and we are
only interested in one chip (a representative sample) */
- return cfi32_to_cpu(val.x[0]);
+ return cfi32_to_cpu(map, val.x[0]);
}
}
@@ -496,12 +496,12 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
if (map_bankwidth_is_1(map)) {
return val.x[0] & 0xff;
} else if (map_bankwidth_is_2(map)) {
- return cfi16_to_cpu(val.x[0]);
+ return cfi16_to_cpu(map, val.x[0]);
} else {
/* No point in a 64-bit byteswap since that would just be
swapping the responses from different chips, and we are
only interested in one chip (a representative sample) */
- return cfi32_to_cpu(val.x[0]);
+ return cfi32_to_cpu(map, val.x[0]);
}
}
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
index 51cc3f5917a..b97a625071f 100644
--- a/include/linux/mtd/cfi_endian.h
+++ b/include/linux/mtd/cfi_endian.h
@@ -19,53 +19,35 @@
#include <asm/byteorder.h>
-#ifndef CONFIG_MTD_CFI_ADV_OPTIONS
-
-#define CFI_HOST_ENDIAN
-
-#else
-
-#ifdef CONFIG_MTD_CFI_NOSWAP
-#define CFI_HOST_ENDIAN
-#endif
-
-#ifdef CONFIG_MTD_CFI_LE_BYTE_SWAP
-#define CFI_LITTLE_ENDIAN
-#endif
-
-#ifdef CONFIG_MTD_CFI_BE_BYTE_SWAP
-#define CFI_BIG_ENDIAN
-#endif
-
-#endif
-
-#if defined(CFI_LITTLE_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) cpu_to_le16(x)
-#define cpu_to_cfi32(x) cpu_to_le32(x)
-#define cpu_to_cfi64(x) cpu_to_le64(x)
-#define cfi16_to_cpu(x) le16_to_cpu(x)
-#define cfi32_to_cpu(x) le32_to_cpu(x)
-#define cfi64_to_cpu(x) le64_to_cpu(x)
-#elif defined (CFI_BIG_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) cpu_to_be16(x)
-#define cpu_to_cfi32(x) cpu_to_be32(x)
-#define cpu_to_cfi64(x) cpu_to_be64(x)
-#define cfi16_to_cpu(x) be16_to_cpu(x)
-#define cfi32_to_cpu(x) be32_to_cpu(x)
-#define cfi64_to_cpu(x) be64_to_cpu(x)
-#elif defined (CFI_HOST_ENDIAN)
-#define cpu_to_cfi8(x) (x)
-#define cfi8_to_cpu(x) (x)
-#define cpu_to_cfi16(x) (x)
-#define cpu_to_cfi32(x) (x)
-#define cpu_to_cfi64(x) (x)
-#define cfi16_to_cpu(x) (x)
-#define cfi32_to_cpu(x) (x)
-#define cfi64_to_cpu(x) (x)
+#define CFI_HOST_ENDIAN 1
+#define CFI_LITTLE_ENDIAN 2
+#define CFI_BIG_ENDIAN 3
+
+#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP)
+#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN
+#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN
+#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN
#else
#error No CFI endianness defined
#endif
+
+#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN)
+#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN)
+#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN)
+#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN)
+
+#define cpu_to_cfi8(map, x) (x)
+#define cfi8_to_cpu(map, x) (x)
+#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
+#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
+#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
+#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
+#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
+#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
+
+#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x))
+#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x))
+#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x))
+#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index a9e6ba46865..94e924e2ecd 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/bug.h>
-
+#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <asm/system.h>
@@ -214,6 +214,7 @@ struct map_info {
void __iomem *virt;
void *cached;
+ int swap; /* this mapping's byte-swapping requirement */
int bankwidth; /* in octets. This isn't necessarily the width
of actual bus cycles -- it's the repeat interval
in bytes, before you are talking to the first chip again.
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9f5b312af78..1a81fde8f33 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -171,87 +171,60 @@ struct mtd_info {
struct mtd_erase_region_info *eraseregions;
/*
- * Erase is an asynchronous operation. Device drivers are supposed
- * to call instr->callback() whenever the operation completes, even
- * if it completes with a failure.
- * Callers are supposed to pass a callback function and wait for it
- * to be called before writing to the block.
+ * Do not call via these pointers, use corresponding mtd_*()
+ * wrappers instead.
*/
int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
-
- /* This stuff for eXecute-In-Place */
- /* phys is optional and may be set to NULL */
int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys);
-
- /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+ size_t *retlen, void **virt, resource_size_t *phys);
void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
-
- /* Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags);
-
- /* Backing device capabilities for this device
- * - provides mmap capabilities
- */
- struct backing_dev_info *backing_dev_info;
-
-
- int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
- /* In blackbox flight recorder like scenarios we want to make successful
- writes in interrupt context. panic_write() is only intended to be
- called when its known the kernel is about to panic and we need the
- write to succeed. Since the kernel is not going to be running for much
- longer, this function can break locks and delay to ensure the write
- succeeds (but not sleep). */
-
- int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
+ int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
int (*read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
int (*write_oob) (struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
-
- /*
- * Methods to access the protection register area, present in some
- * flash devices. The user data is one time programmable but the
- * factory data is read only.
- */
- int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
- int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
- int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
-
- /* kvec-based read/write methods.
- NB: The 'count' parameter is the number of _vectors_, each of
- which contains an (ofs, len) tuple.
- */
- int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
-
- /* Sync */
+ struct mtd_oob_ops *ops);
+ int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
+ int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
void (*sync) (struct mtd_info *mtd);
-
- /* Chip-supported device locking */
int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-
- /* Power Management functions */
+ int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
int (*suspend) (struct mtd_info *mtd);
void (*resume) (struct mtd_info *mtd);
+ /*
+ * If the driver is something smart, like UBI, it may need to maintain
+ * its own reference counting. The below functions are only for driver.
+ */
+ int (*get_device) (struct mtd_info *mtd);
+ void (*put_device) (struct mtd_info *mtd);
- /* Bad block management functions */
- int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ /* Backing device capabilities for this device
+ * - provides mmap capabilities
+ */
+ struct backing_dev_info *backing_dev_info;
struct notifier_block reboot_notifier; /* default mode before reboot */
@@ -265,18 +238,218 @@ struct mtd_info {
struct module *owner;
struct device dev;
int usecount;
-
- /* If the driver is something smart, like UBI, it may need to maintain
- * its own reference counting. The below functions are only for driver.
- * The driver may register its callbacks. These callbacks are not
- * supposed to be called by MTD users */
- int (*get_device) (struct mtd_info *mtd);
- void (*put_device) (struct mtd_info *mtd);
};
-static inline struct mtd_info *dev_to_mtd(struct device *dev)
+/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return mtd->erase(mtd, instr);
+}
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ if (!mtd->point)
+ return -EOPNOTSUPP;
+ return mtd->point(mtd, from, len, retlen, virt, phys);
+}
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- return dev ? dev_get_drvdata(dev) : NULL;
+ return mtd->unpoint(mtd, from, len);
+}
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ if (!mtd->get_unmapped_area)
+ return -EOPNOTSUPP;
+ return mtd->get_unmapped_area(mtd, len, offset, flags);
+}
+
+static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ return mtd->read(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->write)
+ return -EROFS;
+ return mtd->write(mtd, to, len, retlen, buf);
+}
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->panic_write)
+ return -EOPNOTSUPP;
+ return mtd->panic_write(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->read_oob)
+ return -EOPNOTSUPP;
+ return mtd->read_oob(mtd, from, ops);
+}
+
+static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->write_oob)
+ return -EOPNOTSUPP;
+ return mtd->write_oob(mtd, to, ops);
+}
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ if (!mtd->get_fact_prot_info)
+ return -EOPNOTSUPP;
+ return mtd->get_fact_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf,
+ size_t len)
+{
+ if (!mtd->get_user_prot_info)
+ return -EOPNOTSUPP;
+ return mtd->get_user_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->read_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->write_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ if (!mtd->lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->lock_user_prot_reg(mtd, from, len);
+}
+
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+ if (mtd->sync)
+ mtd->sync(mtd);
+}
+
+/* Chip-supported device locking */
+static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->lock)
+ return -EOPNOTSUPP;
+ return mtd->lock(mtd, ofs, len);
+}
+
+static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->unlock)
+ return -EOPNOTSUPP;
+ return mtd->unlock(mtd, ofs, len);
+}
+
+static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->is_locked)
+ return -EOPNOTSUPP;
+ return mtd->is_locked(mtd, ofs, len);
+}
+
+static inline int mtd_suspend(struct mtd_info *mtd)
+{
+ if (!mtd->suspend)
+ return -EOPNOTSUPP;
+ return mtd->suspend(mtd);
+}
+
+static inline void mtd_resume(struct mtd_info *mtd)
+{
+ if (mtd->resume)
+ mtd->resume(mtd);
+}
+
+static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->block_isbad)
+ return -EOPNOTSUPP;
+ return mtd->block_isbad(mtd, ofs);
+}
+
+static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->block_markbad)
+ return -EOPNOTSUPP;
+ return mtd->block_markbad(mtd, ofs);
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -309,6 +482,16 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->writesize);
}
+static inline int mtd_has_oob(const struct mtd_info *mtd)
+{
+ return mtd->read_oob && mtd->write_oob;
+}
+
+static inline int mtd_can_have_bb(const struct mtd_info *mtd)
+{
+ return !!mtd->block_isbad;
+}
+
/* Kernel-side ioctl definitions */
struct mtd_partition;
@@ -338,13 +521,6 @@ struct mtd_notifier {
extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen);
-
-int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
- unsigned long count, loff_t from, size_t *retlen);
-
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
void mtd_erase_callback(struct erase_info *instr);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 904131bab50..63b5a8b6dfb 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -555,6 +555,7 @@ struct nand_chip {
#define NAND_MFR_HYNIX 0xad
#define NAND_MFR_MICRON 0x2c
#define NAND_MFR_AMD 0x01
+#define NAND_MFR_MACRONIX 0xc2
/**
* struct nand_flash_dev - NAND Flash Device ID Structure
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index 04e018160e2..d2887e76b7f 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -30,6 +30,7 @@ struct physmap_flash_data {
unsigned int pfow_base;
char *probe_type;
struct mtd_partition *parts;
+ const char **part_probe_types;
};
#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index a7003b7a695..b188f68a08c 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -116,6 +116,7 @@ enum {
NDTPA_PROXY_DELAY, /* u64, msecs */
NDTPA_PROXY_QLEN, /* u32 */
NDTPA_LOCKTIME, /* u64, msecs */
+ NDTPA_QUEUE_LENBYTES, /* u32 */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644
index 00000000000..77f5202977c
--- /dev/null
+++ b/include/linux/netdev_features.h
@@ -0,0 +1,146 @@
+/*
+ * Network device features.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NETDEV_FEATURES_H
+#define _LINUX_NETDEV_FEATURES_H
+
+#include <linux/types.h>
+
+typedef u64 netdev_features_t;
+
+enum {
+ NETIF_F_SG_BIT, /* Scatter/gather IO. */
+ NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */
+ __UNUSED_NETIF_F_1,
+ NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */
+ NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */
+ NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */
+ NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */
+ NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */
+ NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */
+ NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */
+ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
+ NETIF_F_GSO_BIT, /* Enable software GSO. */
+ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
+ /* do not use LLTX in new drivers */
+ NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
+ NETIF_F_GRO_BIT, /* Generic receive offload */
+ NETIF_F_LRO_BIT, /* large receive offload */
+
+ /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
+ NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
+ = NETIF_F_GSO_SHIFT,
+ NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
+ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
+ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
+ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
+ NETIF_F_FSO_BIT, /* ... FCoE segmentation */
+ NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */
+ /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */
+ NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */
+ = NETIF_F_GSO_LAST,
+
+ NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
+ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
+ NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
+ NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
+ NETIF_F_RXHASH_BIT, /* Receive hashing offload */
+ NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
+ NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */
+ NETIF_F_LOOPBACK_BIT, /* Enable loopback */
+
+ /*
+ * Add your fresh new feature above and remember to update
+ * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * some feature mask #defines below. Please also describe it
+ * in Documentation/networking/netdev-features.txt.
+ */
+
+ /**/NETDEV_FEATURE_COUNT
+};
+
+/* copy'n'paste compression ;) */
+#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit))
+#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
+
+#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
+#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
+#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
+#define NETIF_F_FSO __NETIF_F(FSO)
+#define NETIF_F_GRO __NETIF_F(GRO)
+#define NETIF_F_GSO __NETIF_F(GSO)
+#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
+#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
+#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM)
+#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER)
+#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX)
+#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX)
+#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
+#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
+#define NETIF_F_LLTX __NETIF_F(LLTX)
+#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
+#define NETIF_F_LRO __NETIF_F(LRO)
+#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
+#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
+#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
+#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
+#define NETIF_F_RXHASH __NETIF_F(RXHASH)
+#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SG __NETIF_F(SG)
+#define NETIF_F_TSO6 __NETIF_F(TSO6)
+#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
+#define NETIF_F_TSO __NETIF_F(TSO)
+#define NETIF_F_UFO __NETIF_F(UFO)
+#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
+
+/* Features valid for ethtool to change */
+/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+
+/* remember that ((t)1 << t_BITS) is undefined in C99 */
+#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
+ (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
+ ~NETIF_F_NEVER_CHANGE)
+
+/* Segmentation offload feature mask */
+#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
+ __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
+
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
+
+#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM
+#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+ NETIF_F_FSO)
+
+/*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+ NETIF_F_SG | NETIF_F_HIGHDMA | \
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+/*
+ * If one device doesn't support one of these features, then disable it
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+
+/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+
+#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cbeb5867cff..0eac07c9525 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,7 @@
#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
#include <linux/ethtool.h>
#include <net/net_namespace.h>
@@ -50,8 +51,10 @@
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
+#include <net/netprio_cgroup.h>
+
+#include <linux/netdev_features.h>
-struct vlan_group;
struct netpoll_info;
struct phy_device;
/* 802.11 specific */
@@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc)
* used.
*/
-#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
-#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#elif IS_ENABLED(CONFIG_TR)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
#endif
-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
- !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
- !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
- !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+ !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
@@ -212,6 +213,11 @@ enum {
#include <linux/cache.h>
#include <linux/skbuff.h>
+#ifdef CONFIG_RPS
+#include <linux/jump_label.h>
+extern struct jump_label_key rps_needed;
+#endif
+
struct neighbour;
struct neigh_parms;
struct sk_buff;
@@ -272,16 +278,11 @@ struct hh_cache {
*
* We could use other alignment values, but we must maintain the
* relationship HH alignment <= LL alignment.
- *
- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
- * may need.
*/
#define LL_RESERVED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
-#define LL_ALLOCATED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
#endif
enum netdev_queue_state_t {
- __QUEUE_STATE_XOFF,
+ __QUEUE_STATE_DRV_XOFF,
+ __QUEUE_STATE_STACK_XOFF,
__QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
- (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
+ (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
+ (1 << __QUEUE_STATE_FROZEN))
};
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
+ * netif_tx_* functions below are used to manipulate this flag. The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently. The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state). Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
struct netdev_queue {
/*
@@ -528,9 +541,8 @@ struct netdev_queue {
*/
struct net_device *dev;
struct Qdisc *qdisc;
- unsigned long state;
struct Qdisc *qdisc_sleeping;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
struct kobject kobj;
#endif
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -545,6 +557,18 @@ struct netdev_queue {
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
+
+ /*
+ * Number of TX timeouts for this queue
+ * (/sys/class/net/DEV/Q/trans_timeout)
+ */
+ unsigned long trans_timeout;
+
+ unsigned long state;
+
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -573,7 +597,7 @@ struct rps_map {
struct rcu_head rcu;
u16 cpus[0];
};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@@ -597,7 +621,7 @@ struct rps_dev_flow_table {
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- (_num * sizeof(struct rps_dev_flow)))
+ ((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
@@ -608,7 +632,7 @@ struct rps_sock_flow_table {
u16 ents[0];
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
- (_num * sizeof(u16)))
+ ((_num) * sizeof(u16)))
#define RPS_NO_CPU 0xffff
@@ -660,7 +684,7 @@ struct xps_map {
struct rcu_head rcu;
u16 queues[0];
};
-#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
/ sizeof(u16))
@@ -683,6 +707,23 @@ struct netdev_tc_txq {
u16 offset;
};
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+/*
+ * This structure is to hold information about the device
+ * configured to run FCoE protocol stack.
+ */
+struct netdev_fcoe_hbainfo {
+ char manufacturer[64];
+ char serial_number[64];
+ char hardware_version[64];
+ char driver_version[64];
+ char optionrom_version[64];
+ char firmware_version[64];
+ char model[256];
+ char model_description[256];
+};
+#endif
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -767,11 +808,11 @@ struct netdev_tc_txq {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is registered.
*
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is unregistered.
*
@@ -823,6 +864,13 @@ struct netdev_tc_txq {
* perform necessary setup and returns 1 to indicate the device is set up
* successfully to perform DDP on this I/O, otherwise this returns 0.
*
+ * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ * struct netdev_fcoe_hbainfo *hbainfo);
+ * Called when the FCoE Protocol stack wants information on the underlying
+ * device. This information is utilized by the FCoE protocol stack to
+ * register attributes with Fiber Channel management service as per the
+ * FC-GS Fabric Device Management Information(FDMI) specification.
+ *
* int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
* Called when the underlying device wants to override default World Wide
* Name (WWN) generation mechanism in FCoE protocol stack to pass its own
@@ -845,12 +893,13 @@ struct netdev_tc_txq {
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
- * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
* Adjusts the requested feature flags according to device-specific
* constraints, and returns the resulting flags. Must not modify
* the device state.
*
- * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
* Must return >0 or -errno if it changed dev->features itself.
@@ -885,9 +934,9 @@ struct net_device_ops {
struct rtnl_link_stats64 *storage);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
- void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+ int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
- void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+ int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
@@ -912,7 +961,7 @@ struct net_device_ops {
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
@@ -925,9 +974,11 @@ struct net_device_ops {
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
+ int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ struct netdev_fcoe_hbainfo *hbainfo);
#endif
-#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
+#if IS_ENABLED(CONFIG_LIBFCOE)
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -944,10 +995,12 @@ struct net_device_ops {
struct net_device *slave_dev);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
- u32 (*ndo_fix_features)(struct net_device *dev,
- u32 features);
+ netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
- u32 features);
+ netdev_features_t features);
+ int (*ndo_neigh_construct)(struct neighbour *n);
+ void (*ndo_neigh_destroy)(struct neighbour *n);
};
/*
@@ -997,91 +1050,13 @@ struct net_device {
struct list_head unreg_list;
/* currently active device features */
- u32 features;
+ netdev_features_t features;
/* user-changeable features */
- u32 hw_features;
+ netdev_features_t hw_features;
/* user-requested features */
- u32 wanted_features;
+ netdev_features_t wanted_features;
/* mask of features inheritable by VLAN devices */
- u32 vlan_features;
-
- /* Net device feature bits; if you change something,
- * also update netdev_features_strings[] in ethtool.c */
-
-#define NETIF_F_SG 1 /* Scatter/gather IO. */
-#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
-#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
-#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
-#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
-#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
-#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
-#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
-#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
-#define NETIF_F_GSO 2048 /* Enable software GSO. */
-#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
- /* do not use LLTX in new drivers */
-#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
-#define NETIF_F_GRO 16384 /* Generic receive offload */
-#define NETIF_F_LRO 32768 /* large receive offload */
-
-/* the GSO_MASK reserves bits 16 through 23 */
-#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
-#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
-#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
-#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
-#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
-#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
-#define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */
-#define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */
-
- /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT 16
-#define NETIF_F_GSO_MASK 0x00ff0000
-#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
-
- /* Features valid for ethtool to change */
- /* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
-#define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
-
- /* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
- NETIF_F_TSO6 | NETIF_F_UFO)
-
-
-#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
-
-#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
- NETIF_F_FSO)
-
- /*
- * If one device supports one of these features, then enable them
- * for all in netdev_increment_features.
- */
-#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
- NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
- /*
- * If one device doesn't support one of these features, then disable it
- * for all in netdev_increment_features.
- */
-#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
-
- /* changeable features with no special hardware requirements */
-#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+ netdev_features_t vlan_features;
/* Interface index. Unique device identifier */
int ifindex;
@@ -1132,6 +1107,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
+ unsigned char neigh_priv_len;
unsigned short dev_id; /* for shared network cards */
spinlock_t addr_list_lock;
@@ -1144,11 +1120,11 @@ struct net_device {
/* Protocol specific pointers */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- struct vlan_group __rcu *vlgrp; /* VLAN group */
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ struct vlan_info __rcu *vlan_info; /* VLAN info */
#endif
-#ifdef CONFIG_NET_DSA
- void *dsa_ptr; /* dsa specific data */
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
@@ -1184,9 +1160,11 @@ struct net_device {
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
struct kset *queues_kset;
+#endif
+#ifdef CONFIG_RPS
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at register_netdev() time */
@@ -1308,10 +1286,13 @@ struct net_device {
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+ struct netprio_map __rcu *priomap;
+#endif
/* phy device may attach itself for hardware timestamping */
struct phy_device *phydev;
@@ -1515,7 +1496,7 @@ struct packet_type {
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- u32 features);
+ netdev_features_t features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
@@ -1783,7 +1764,7 @@ extern void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ if (!(txq->state & QUEUE_STATE_ANY_XOFF))
__netif_schedule(txq->qdisc);
}
@@ -1797,7 +1778,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
@@ -1829,7 +1810,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
return;
}
#endif
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
@@ -1861,7 +1842,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
return;
}
- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
@@ -1888,7 +1869,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+ return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
/**
@@ -1902,9 +1883,68 @@ static inline int netif_queue_stopped(const struct net_device *dev)
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
- return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+ if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+ clear_bit(__QUEUE_STATE_STACK_XOFF,
+ &dev_queue->state);
+ }
+#endif
+}
+
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+ unsigned pkts, unsigned bytes)
+{
+#ifdef CONFIG_BQL
+ if (likely(bytes)) {
+ dql_completed(&dev_queue->dql, bytes);
+ if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
+ &dev_queue->state) &&
+ dql_avail(&dev_queue->dql) >= 0)) {
+ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
+ &dev_queue->state))
+ netif_schedule_queue(dev_queue);
+ }
+ }
+#endif
+}
+
+static inline void netdev_completed_queue(struct net_device *dev,
+ unsigned pkts, unsigned bytes)
+{
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+ dql_reset(&q->dql);
+#endif
+}
+
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
}
/**
@@ -1991,7 +2031,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
if (netpoll_trap())
return;
#endif
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
@@ -2115,7 +2155,7 @@ extern void netdev_run_todo(void);
*/
static inline void dev_put(struct net_device *dev)
{
- irqsafe_cpu_dec(*dev->pcpu_refcnt);
+ this_cpu_dec(*dev->pcpu_refcnt);
}
/**
@@ -2126,7 +2166,7 @@ static inline void dev_put(struct net_device *dev)
*/
static inline void dev_hold(struct net_device *dev)
{
- irqsafe_cpu_inc(*dev->pcpu_refcnt);
+ this_cpu_inc(*dev->pcpu_refcnt);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
@@ -2410,6 +2450,11 @@ static inline void netif_addr_lock(struct net_device *dev)
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
@@ -2520,7 +2565,8 @@ extern int netdev_set_master(struct net_device *dev, struct net_device *master)
extern int netdev_set_bond_master(struct net_device *dev,
struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
@@ -2536,6 +2582,8 @@ extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
@@ -2547,11 +2595,13 @@ extern const char *netdev_drivername(const struct net_device *dev);
extern void linkwatch_run_queue(void);
-static inline u32 netdev_get_wanted_features(struct net_device *dev)
+static inline netdev_features_t netdev_get_wanted_features(
+ struct net_device *dev)
{
return (dev->features & ~dev->hw_features) | dev->wanted_features;
}
-u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+netdev_features_t netdev_increment_features(netdev_features_t all,
+ netdev_features_t one, netdev_features_t mask);
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
@@ -2559,21 +2609,31 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-u32 netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(u32 features, int gso_type)
+static inline int net_gso_ok(netdev_features_t features, int gso_type)
{
- int feature = gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+ /* check flags correspondence */
+ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
+static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline int netif_needs_gso(struct sk_buff *skb, int features)
+static inline int netif_needs_gso(struct sk_buff *skb,
+ netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2592,22 +2652,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
extern struct pernet_operations __net_initdata loopback_net_ops;
-static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
-{
- if (dev->features & NETIF_F_RXCSUM)
- return 1;
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
- return 0;
- return dev->ethtool_ops->get_rx_csum(dev);
-}
-
-static inline u32 dev_ethtool_get_flags(struct net_device *dev)
-{
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
- return 0;
- return dev->ethtool_ops->get_flags(dev);
-}
-
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* netdev_printk helpers, similar to dev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 857f5026ced..b809265607d 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+#if defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook))
+ return static_branch(&nf_hooks_needed[pf][hook]);
+
+ return !list_empty(&nf_hooks[pf][hook]);
+}
+#else
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ return !list_empty(&nf_hooks[pf][hook]);
+}
+#endif
+
int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh);
@@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh)
{
-#ifndef CONFIG_NETFILTER_DEBUG
- if (list_empty(&nf_hooks[pf][hook]))
- return 1;
-#endif
- return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+ if (nf_hooks_active(pf, hook))
+ return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+ return 1;
}
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index a1b410c76fc..e144f54185c 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -5,7 +5,9 @@ header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
header-y += nf_conntrack_tcp.h
header-y += nf_conntrack_tuple_common.h
+header-y += nf_nat.h
header-y += nfnetlink.h
+header-y += nfnetlink_acct.h
header-y += nfnetlink_compat.h
header-y += nfnetlink_conntrack.h
header-y += nfnetlink_log.h
@@ -21,6 +23,7 @@ header-y += xt_DSCP.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_MARK.h
+header-y += xt_nfacct.h
header-y += xt_NFLOG.h
header-y += xt_NFQUEUE.h
header-y += xt_RATEEST.h
@@ -40,6 +43,7 @@ header-y += xt_cpu.h
header-y += xt_dccp.h
header-y += xt_devgroup.h
header-y += xt_dscp.h
+header-y += xt_ecn.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
header-y += xt_helper.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0d3dd66322e..9e3a2838291 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -83,6 +83,10 @@ enum ip_conntrack_status {
/* Conntrack is a fake untracked entry */
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+
+ /* Conntrack has a userspace helper. */
+ IPS_USERSPACE_HELPER_BIT = 13,
+ IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
};
/* Connection tracking event types */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
index 2ea22b018a8..2f6bbc5b812 100644
--- a/include/linux/netfilter/nf_conntrack_tuple_common.h
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -7,6 +7,33 @@ enum ip_conntrack_dir {
IP_CT_DIR_MAX
};
+/* The protocol-specific manipulable parts of the tuple: always in
+ * network order
+ */
+union nf_conntrack_man_proto {
+ /* Add other protocols here. */
+ __be16 all;
+
+ struct {
+ __be16 port;
+ } tcp;
+ struct {
+ __be16 port;
+ } udp;
+ struct {
+ __be16 id;
+ } icmp;
+ struct {
+ __be16 port;
+ } dccp;
+ struct {
+ __be16 port;
+ } sctp;
+ struct {
+ __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */
+ } gre;
+};
+
#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h
new file mode 100644
index 00000000000..8df2d13730b
--- /dev/null
+++ b/include/linux/netfilter/nf_nat.h
@@ -0,0 +1,25 @@
+#ifndef _NETFILTER_NF_NAT_H
+#define _NETFILTER_NF_NAT_H
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_NAT_RANGE_MAP_IPS 1
+#define NF_NAT_RANGE_PROTO_SPECIFIED 2
+#define NF_NAT_RANGE_PROTO_RANDOM 4
+#define NF_NAT_RANGE_PERSISTENT 8
+
+struct nf_nat_ipv4_range {
+ unsigned int flags;
+ __be32 min_ip;
+ __be32 max_ip;
+ union nf_conntrack_man_proto min;
+ union nf_conntrack_man_proto max;
+};
+
+struct nf_nat_ipv4_multi_range_compat {
+ unsigned int rangesize;
+ struct nf_nat_ipv4_range range[1];
+};
+
+#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 74d33861473..b64454c2f79 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -48,7 +48,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_ULOG 4
#define NFNL_SUBSYS_OSF 5
#define NFNL_SUBSYS_IPSET 6
-#define NFNL_SUBSYS_COUNT 7
+#define NFNL_SUBSYS_ACCT 7
+#define NFNL_SUBSYS_COUNT 8
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
new file mode 100644
index 00000000000..7c4279b4ae7
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -0,0 +1,36 @@
+#ifndef _NFNL_ACCT_H_
+#define _NFNL_ACCT_H_
+
+#ifndef NFACCT_NAME_MAX
+#define NFACCT_NAME_MAX 32
+#endif
+
+enum nfnl_acct_msg_types {
+ NFNL_MSG_ACCT_NEW,
+ NFNL_MSG_ACCT_GET,
+ NFNL_MSG_ACCT_GET_CTRZERO,
+ NFNL_MSG_ACCT_DEL,
+ NFNL_MSG_ACCT_MAX
+};
+
+enum nfnl_acct_type {
+ NFACCT_UNSPEC,
+ NFACCT_NAME,
+ NFACCT_PKTS,
+ NFACCT_BYTES,
+ NFACCT_USE,
+ __NFACCT_MAX
+};
+#define NFACCT_MAX (__NFACCT_MAX - 1)
+
+#ifdef __KERNEL__
+
+struct nf_acct;
+
+extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+extern void nfnl_acct_put(struct nf_acct *acct);
+extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+
+#endif /* __KERNEL__ */
+
+#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 32cddf78b13..8d674a78674 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -471,7 +471,7 @@ DECLARE_PER_CPU(seqcount_t, xt_recseq);
*
* Begin packet processing : all readers must wait the end
* 1) Must be called with preemption disabled
- * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
+ * 2) softirqs must be disabled too (or we should use this_cpu_add())
* Returns :
* 1 if no recursion on this cpu
* 0 if recursion detected
@@ -503,7 +503,7 @@ static inline unsigned int xt_write_recseq_begin(void)
*
* End packet processing : all readers can proceed
* 1) Must be called with preemption disabled
- * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
+ * 2) softirqs must be disabled too (or we should use this_cpu_add())
*/
static inline void xt_write_recseq_end(unsigned int addend)
{
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index b56e76811c0..6390f0992f3 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -3,7 +3,8 @@
#include <linux/types.h>
-#define XT_CT_NOTRACK 0x1
+#define XT_CT_NOTRACK 0x1
+#define XT_CT_USERSPACE_HELPER 0x2
struct xt_ct_target_info {
__u16 flags;
diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h
new file mode 100644
index 00000000000..7158fca364f
--- /dev/null
+++ b/include/linux/netfilter/xt_ecn.h
@@ -0,0 +1,35 @@
+/* iptables module for matching the ECN header in IPv4 and TCP header
+ *
+ * (C) 2002 Harald Welte <laforge@gnumonks.org>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ *
+ * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
+*/
+#ifndef _XT_ECN_H
+#define _XT_ECN_H
+
+#include <linux/types.h>
+#include <linux/netfilter/xt_dscp.h>
+
+#define XT_ECN_IP_MASK (~XT_DSCP_MASK)
+
+#define XT_ECN_OP_MATCH_IP 0x01
+#define XT_ECN_OP_MATCH_ECE 0x10
+#define XT_ECN_OP_MATCH_CWR 0x20
+
+#define XT_ECN_OP_MATCH_MASK 0xce
+
+/* match info */
+struct xt_ecn_info {
+ __u8 operation;
+ __u8 invert;
+ __u8 ip_ect;
+ union {
+ struct {
+ __u8 ect;
+ } tcp;
+ } proto;
+};
+
+#endif /* _XT_ECN_H */
diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h
new file mode 100644
index 00000000000..3e19c8a8657
--- /dev/null
+++ b/include/linux/netfilter/xt_nfacct.h
@@ -0,0 +1,13 @@
+#ifndef _XT_NFACCT_MATCH_H
+#define _XT_NFACCT_MATCH_H
+
+#include <linux/netfilter/nfnetlink_acct.h>
+
+struct nf_acct;
+
+struct xt_nfacct_match_info {
+ char name[NFACCT_NAME_MAX];
+ struct nf_acct *nfacct;
+};
+
+#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h
new file mode 100644
index 00000000000..8358d4f7195
--- /dev/null
+++ b/include/linux/netfilter/xt_rpfilter.h
@@ -0,0 +1,23 @@
+#ifndef _XT_RPATH_H
+#define _XT_RPATH_H
+
+#include <linux/types.h>
+
+enum {
+ XT_RPFILTER_LOOSE = 1 << 0,
+ XT_RPFILTER_VALID_MARK = 1 << 1,
+ XT_RPFILTER_ACCEPT_LOCAL = 1 << 2,
+ XT_RPFILTER_INVERT = 1 << 3,
+#ifdef __KERNEL__
+ XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE |
+ XT_RPFILTER_VALID_MARK |
+ XT_RPFILTER_ACCEPT_LOCAL |
+ XT_RPFILTER_INVERT,
+#endif
+};
+
+struct xt_rpfilter_info {
+ __u8 flags;
+};
+
+#endif
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index c3b45480ecf..f9930c87fff 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -12,4 +12,3 @@ header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_realm.h
header-y += ipt_ttl.h
-header-y += nf_nat.h
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index eabf95fb7d3..0e0c063dbf6 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -1,35 +1,15 @@
-/* iptables module for matching the ECN header in IPv4 and TCP header
- *
- * (C) 2002 Harald Welte <laforge@gnumonks.org>
- *
- * This software is distributed under GNU GPL v2, 1991
- *
- * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp
-*/
#ifndef _IPT_ECN_H
#define _IPT_ECN_H
-#include <linux/types.h>
-#include <linux/netfilter/xt_dscp.h>
+#include <linux/netfilter/xt_ecn.h>
+#define ipt_ecn_info xt_ecn_info
-#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
-
-#define IPT_ECN_OP_MATCH_IP 0x01
-#define IPT_ECN_OP_MATCH_ECE 0x10
-#define IPT_ECN_OP_MATCH_CWR 0x20
-
-#define IPT_ECN_OP_MATCH_MASK 0xce
-
-/* match info */
-struct ipt_ecn_info {
- __u8 operation;
- __u8 invert;
- __u8 ip_ect;
- union {
- struct {
- __u8 ect;
- } tcp;
- } proto;
+enum {
+ IPT_ECN_IP_MASK = XT_ECN_IP_MASK,
+ IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP,
+ IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE,
+ IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR,
+ IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK,
};
-#endif /* _IPT_ECN_H */
+#endif /* IPT_ECN_H */
diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h
deleted file mode 100644
index 7a861d09fc8..00000000000
--- a/include/linux/netfilter_ipv4/nf_nat.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _LINUX_NF_NAT_H
-#define _LINUX_NF_NAT_H
-
-#include <linux/types.h>
-
-#define IP_NAT_RANGE_MAP_IPS 1
-#define IP_NAT_RANGE_PROTO_SPECIFIED 2
-#define IP_NAT_RANGE_PROTO_RANDOM 4
-#define IP_NAT_RANGE_PERSISTENT 8
-
-/* The protocol-specific manipulable parts of the tuple. */
-union nf_conntrack_man_proto {
- /* Add other protocols here. */
- __be16 all;
-
- struct {
- __be16 port;
- } tcp;
- struct {
- __be16 port;
- } udp;
- struct {
- __be16 id;
- } icmp;
- struct {
- __be16 port;
- } dccp;
- struct {
- __be16 port;
- } sctp;
- struct {
- __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */
- } gre;
-};
-
-/* Single range specification. */
-struct nf_nat_range {
- /* Set to OR of flags above. */
- unsigned int flags;
-
- /* Inclusive: network order. */
- __be32 min_ip, max_ip;
-
- /* Inclusive: network order */
- union nf_conntrack_man_proto min, max;
-};
-
-/* For backwards compat: don't use in modern code. */
-struct nf_nat_multi_range_compat {
- unsigned int rangesize; /* Must be 1. */
-
- /* hangs off end. */
- struct nf_nat_range range[1];
-};
-
-#define nf_nat_multi_range nf_nat_multi_range_compat
-
-#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 8374d296736..52e48959cfa 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -8,7 +8,7 @@
#define NETLINK_UNUSED 1 /* Unused number */
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
#define NETLINK_FIREWALL 3 /* Firewalling hook */
-#define NETLINK_INET_DIAG 4 /* INET socket monitoring */
+#define NETLINK_SOCK_DIAG 4 /* socket monitoring */
#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
#define NETLINK_XFRM 6 /* ipsec */
#define NETLINK_SELINUX 7 /* SELinux event notifications */
@@ -27,6 +27,8 @@
#define NETLINK_RDMA 20
#define NETLINK_CRYPTO 21 /* Crypto layer */
+#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG
+
#define MAX_LINKS 32
struct sockaddr_nl {
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 36cb955b05c..01d4e5d6032 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -62,6 +62,8 @@ enum nfc_commands {
NFC_CMD_GET_DEVICE,
NFC_CMD_DEV_UP,
NFC_CMD_DEV_DOWN,
+ NFC_CMD_DEP_LINK_UP,
+ NFC_CMD_DEP_LINK_DOWN,
NFC_CMD_START_POLL,
NFC_CMD_STOP_POLL,
NFC_CMD_GET_TARGET,
@@ -86,6 +88,9 @@ enum nfc_commands {
* @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID
* @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the
* target is not NFC-Forum compliant)
+ * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes
+ * @NFC_ATTR_COMM_MODE: Passive or active mode
+ * @NFC_ATTR_RF_MODE: Initiator or target
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -95,6 +100,9 @@ enum nfc_attrs {
NFC_ATTR_TARGET_INDEX,
NFC_ATTR_TARGET_SENS_RES,
NFC_ATTR_TARGET_SEL_RES,
+ NFC_ATTR_TARGET_NFCID1,
+ NFC_ATTR_COMM_MODE,
+ NFC_ATTR_RF_MODE,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
@@ -111,6 +119,14 @@ enum nfc_attrs {
#define NFC_PROTO_MAX 6
+/* NFC communication modes */
+#define NFC_COMM_ACTIVE 0
+#define NFC_COMM_PASSIVE 1
+
+/* NFC RF modes */
+#define NFC_RF_INITIATOR 0
+#define NFC_RF_TARGET 1
+
/* NFC protocols masks used in bitsets */
#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL)
#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE)
@@ -125,9 +141,22 @@ struct sockaddr_nfc {
__u32 nfc_protocol;
};
+#define NFC_LLCP_MAX_SERVICE_NAME 63
+struct sockaddr_nfc_llcp {
+ sa_family_t sa_family;
+ __u32 dev_idx;
+ __u32 target_idx;
+ __u32 nfc_protocol;
+ __u8 dsap; /* Destination SAP, if known */
+ __u8 ssap; /* Source SAP to be bound to */
+ char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
+ size_t service_name_len;
+};
+
/* NFC socket protocols */
#define NFC_SOCKPROTO_RAW 0
-#define NFC_SOCKPROTO_MAX 1
+#define NFC_SOCKPROTO_LLCP 1
+#define NFC_SOCKPROTO_MAX 2
#define NFC_HEADER_SIZE 1
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 92ecf5585fa..8c29950d2fa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -373,7 +373,7 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
-extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode);
+extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b5479df8378..ba4d7656ecf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -153,6 +153,7 @@ struct nfs_server {
struct rb_root openowner_id;
struct rb_root lockowner_id;
#endif
+ struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
void (*destroy)(struct nfs_server *);
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index ae7d6a380da..308c1887701 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -66,6 +66,8 @@ struct idmap_msg {
/* Forward declaration to make this header independent of others */
struct nfs_client;
struct nfs_server;
+struct nfs_fattr;
+struct nfs4_string;
#ifdef CONFIG_NFS_USE_NEW_IDMAPPER
@@ -97,6 +99,12 @@ void nfs_idmap_delete(struct nfs_client *);
#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
+void nfs_fattr_init_names(struct nfs_fattr *fattr,
+ struct nfs4_string *owner_name,
+ struct nfs4_string *group_name);
+void nfs_fattr_free_names(struct nfs_fattr *);
+void nfs_fattr_map_and_free_names(struct nfs_server *, struct nfs_fattr *);
+
int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *);
int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *);
int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2a7c533be5d..a764cef06b7 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -18,6 +18,11 @@
/* Forward declaration for NFS v3 */
struct nfs4_secinfo_flavors;
+struct nfs4_string {
+ unsigned int len;
+ char *data;
+};
+
struct nfs_fsid {
uint64_t major;
uint64_t minor;
@@ -61,6 +66,8 @@ struct nfs_fattr {
struct timespec pre_ctime; /* pre_op_attr.ctime */
unsigned long time_start;
unsigned long gencount;
+ struct nfs4_string *owner_name;
+ struct nfs4_string *group_name;
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -85,6 +92,8 @@ struct nfs_fattr {
#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */
#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 20) /* Treat as mountpoint */
#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 21)
+#define NFS_ATTR_FATTR_OWNER_NAME (1U << 22)
+#define NFS_ATTR_FATTR_GROUP_NAME (1U << 23)
#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
| NFS_ATTR_FATTR_MODE \
@@ -324,6 +333,7 @@ struct nfs_openargs {
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
+ const u32 * dir_bitmask;
__u32 claim;
struct nfs4_sequence_args seq_args;
};
@@ -342,6 +352,8 @@ struct nfs_openres {
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_string *owner;
+ struct nfs4_string *group_owner;
struct nfs4_sequence_res seq_res;
};
@@ -602,11 +614,16 @@ struct nfs_getaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct page * acl_scratch;
struct nfs4_sequence_args seq_args;
};
+/* getxattr ACL interface flags */
+#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
struct nfs_getaclres {
size_t acl_len;
+ size_t acl_data_offset;
+ int acl_flags;
struct nfs4_sequence_res seq_res;
};
@@ -773,11 +790,6 @@ struct nfs3_getaclres {
struct posix_acl * acl_default;
};
-struct nfs4_string {
- unsigned int len;
- char *data;
-};
-
#ifdef CONFIG_NFS_V4
typedef u64 clientid4;
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 8049bf77d79..0f5ff373982 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -509,6 +509,38 @@
* @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
* @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
*
+ * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
+ * (or GO) interface (i.e. hostapd) to ask for unexpected frames to
+ * implement sending deauth to stations that send unexpected class 3
+ * frames. Also used as the event sent by the kernel when such a frame
+ * is received.
+ * For the event, the %NL80211_ATTR_MAC attribute carries the TA and
+ * other attributes like the interface index are present.
+ * If used as the command it must have an interface index and you can
+ * only unsubscribe from the event by closing the socket. Subscription
+ * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events.
+ *
+ * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the
+ * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame
+ * and wasn't already in a 4-addr VLAN. The event will be sent similarly
+ * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener.
+ *
+ * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
+ * by sending a null data frame to it and reporting when the frame is
+ * acknowleged. This is used to allow timing out inactive clients. Uses
+ * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
+ * direct reply with an %NL80211_ATTR_COOKIE that is later used to match
+ * up the event with the request. The event includes the same data and
+ * has %NL80211_ATTR_ACK set if the frame was ACKed.
+ *
+ * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from
+ * other BSSes when any interfaces are in AP mode. This helps implement
+ * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
+ * messages. Note that per PHY only one application may register.
+ *
+ * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
+ * No Acknowledgement Policy should be applied.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -638,6 +670,16 @@ enum nl80211_commands {
NL80211_CMD_TDLS_OPER,
NL80211_CMD_TDLS_MGMT,
+ NL80211_CMD_UNEXPECTED_FRAME,
+
+ NL80211_CMD_PROBE_CLIENT,
+
+ NL80211_CMD_REGISTER_BEACONS,
+
+ NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+
+ NL80211_CMD_SET_NOACK_MAP,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -658,6 +700,8 @@ enum nl80211_commands {
#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
/* source-level API compatibility */
#define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG
#define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG
@@ -1109,6 +1153,46 @@ enum nl80211_commands {
* %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be
* used for asking the driver to perform a TDLS operation.
*
+ * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices
+ * that have AP support to indicate that they have the AP SME integrated
+ * with support for the features listed in this attribute, see
+ * &enum nl80211_ap_sme_features.
+ *
+ * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells
+ * the driver to not wait for an acknowledgement. Note that due to this,
+ * it will also not give a status callback nor return a cookie. This is
+ * mostly useful for probe responses to save airtime.
+ *
+ * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
+ * &enum nl80211_feature_flags and is advertised in wiphy information.
+ * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
+ *
+ * requests while operating in AP-mode.
+ * This attribute holds a bitmap of the supported protocols for
+ * offloading (see &enum nl80211_probe_resp_offload_support_attr).
+ *
+ * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
+ * probe-response frame. The DA field in the 802.11 header is zero-ed out,
+ * to be filled by the FW.
+ * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
+ * this feature. Currently, only supported in mac80211 drivers.
+ * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
+ * ATTR_HT_CAPABILITY to which attention should be paid.
+ * Currently, only mac80211 NICs support this feature.
+ * The values that may be configured are:
+ * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40
+ * AMPDU density and AMPDU factor.
+ * All values are treated as suggestions and may be ignored
+ * by the driver as required. The actual values may be seen in
+ * the station debugfs ht_caps file.
+ *
+ * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country
+ * abides to when initiating radiation on DFS channels. A country maps
+ * to one DFS region.
+ *
+ * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
+ * up to 16 TIDs.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1337,6 +1421,23 @@ enum nl80211_attrs {
NL80211_ATTR_TDLS_SUPPORT,
NL80211_ATTR_TDLS_EXTERNAL_SETUP,
+ NL80211_ATTR_DEVICE_AP_SME,
+
+ NL80211_ATTR_DONT_WAIT_FOR_ACK,
+
+ NL80211_ATTR_FEATURE_FLAGS,
+
+ NL80211_ATTR_PROBE_RESP_OFFLOAD,
+
+ NL80211_ATTR_PROBE_RESP,
+
+ NL80211_ATTR_DFS_REGION,
+
+ NL80211_ATTR_DISABLE_HT,
+ NL80211_ATTR_HT_CAPABILITY_MASK,
+
+ NL80211_ATTR_NOACK_MAP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1371,6 +1472,7 @@ enum nl80211_attrs {
#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES
#define NL80211_ATTR_KEY NL80211_ATTR_KEY
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
@@ -1434,7 +1536,11 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_WME: station is WME/QoS capable
* @NL80211_STA_FLAG_MFP: station uses management frame protection
* @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated
- * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer
+ * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should
+ * only be used in managed mode (even in the flags mask). Note that the
+ * flag can't be changed, it is only valid while adding a station, and
+ * attempts to change it will silently be ignored (rather than rejected
+ * as errors.)
* @NL80211_STA_FLAG_MAX: highest station flag number currently defined
* @__NL80211_STA_FLAG_AFTER_LAST: internal use
*/
@@ -1549,6 +1655,7 @@ enum nl80211_sta_bss_param {
* containing info as possible, see &enum nl80211_sta_bss_param
* @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
* @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
+ * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -1571,6 +1678,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_BSS_PARAM,
NL80211_STA_INFO_CONNECTED_TIME,
NL80211_STA_INFO_STA_FLAGS,
+ NL80211_STA_INFO_BEACON_LOSS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -1845,6 +1953,21 @@ enum nl80211_reg_rule_flags {
};
/**
+ * enum nl80211_dfs_regions - regulatory DFS regions
+ *
+ * @NL80211_DFS_UNSET: Country has no DFS master region specified
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
+ * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
+ */
+enum nl80211_dfs_regions {
+ NL80211_DFS_UNSET = 0,
+ NL80211_DFS_FCC = 1,
+ NL80211_DFS_ETSI = 2,
+ NL80211_DFS_JP = 3,
+};
+
+/**
* enum nl80211_survey_info - survey information
*
* These attribute types are used with %NL80211_ATTR_SURVEY_INFO
@@ -1977,6 +2100,10 @@ enum nl80211_mntr_flags {
* access to a broader network beyond the MBSS. This is done via Root
* Announcement frames.
*
+ * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
+ * TUs) during which a mesh STA can send only one Action frame containing a
+ * PERR element.
+ *
* @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
*
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2000,6 +2127,7 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_ELEMENT_TTL,
NL80211_MESHCONF_HWMP_RANN_INTERVAL,
NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2650,4 +2778,45 @@ enum nl80211_tdls_operation {
NL80211_TDLS_DISABLE_LINK,
};
+/*
+ * enum nl80211_ap_sme_features - device-integrated AP features
+ * Reserved for future use, no bits are defined in
+ * NL80211_ATTR_DEVICE_AP_SME yet.
+enum nl80211_ap_sme_features {
+};
+ */
+
+/**
+ * enum nl80211_feature_flags - device/driver features
+ * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back
+ * TX status to the socket error queue when requested with the
+ * socket option.
+ * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
+ */
+enum nl80211_feature_flags {
+ NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
+ NL80211_FEATURE_HT_IBSS = 1 << 1,
+};
+
+/**
+ * enum nl80211_probe_resp_offload_support_attr - optional supported
+ * protocols for probe-response offloading by the driver/FW.
+ * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
+ * Each enum value represents a bit in the bitmap of supported
+ * protocols. Typically a subset of probe-requests belonging to a
+ * supported protocol will be excluded from offload and uploaded
+ * to the host.
+ *
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u
+ */
+enum nl80211_probe_resp_offload_support_attr {
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0,
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1,
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2,
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/nls.h b/include/linux/nls.h
index d47beef08df..5dc635f8d79 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -43,7 +43,7 @@ enum utf16_endian {
UTF16_BIG_ENDIAN
};
-/* nls.c */
+/* nls_base.c */
extern int register_nls(struct nls_table *);
extern int unregister_nls(struct nls_table *);
extern struct nls_table *load_nls(char *);
@@ -52,7 +52,8 @@ extern struct nls_table *load_nls_default(void);
extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
-extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
+extern int utf8s_to_utf16s(const u8 *s, int len,
+ enum utf16_endian endian, wchar_t *pwcs, int maxlen);
extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
enum utf16_endian endian, u8 *s, int maxlen);
diff --git a/include/linux/node.h b/include/linux/node.h
index 92370e22343..624e53cecc0 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -14,12 +14,12 @@
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/cpumask.h>
#include <linux/workqueue.h>
struct node {
- struct sys_device sysdev;
+ struct device dev;
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
struct work_struct node_work;
@@ -80,6 +80,6 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
}
#endif
-#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
+#define to_node(device) container_of(device, struct node, dev)
#endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/of.h b/include/linux/of.h
index 4948552d60f..a75a831e205 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -65,6 +65,27 @@ struct device_node {
#endif
};
+#define MAX_PHANDLE_ARGS 8
+struct of_phandle_args {
+ struct device_node *np;
+ int args_count;
+ uint32_t args[MAX_PHANDLE_ARGS];
+};
+
+#if defined(CONFIG_SPARC) || !defined(CONFIG_OF)
+/* Dummy ref counting routines - to be implemented later */
+static inline struct device_node *of_node_get(struct device_node *node)
+{
+ return node;
+}
+static inline void of_node_put(struct device_node *node)
+{
+}
+#else
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+#endif
+
#ifdef CONFIG_OF
/* Pointer for first entry in chain of all nodes. */
@@ -95,21 +116,6 @@ static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
extern struct device_node *of_find_all_nodes(struct device_node *prev);
-#if defined(CONFIG_SPARC)
-/* Dummy ref counting routines - to be implemented later */
-static inline struct device_node *of_node_get(struct device_node *node)
-{
- return node;
-}
-static inline void of_node_put(struct device_node *node)
-{
-}
-
-#else
-extern struct device_node *of_node_get(struct device_node *node);
-extern void of_node_put(struct device_node *node);
-#endif
-
/*
* OF address retrieval & translation
*/
@@ -219,8 +225,8 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
-#define for_each_property(pp, properties) \
- for (pp = properties; pp != NULL; pp = pp->next)
+#define for_each_property_of_node(dn, pp) \
+ for (pp = dn->properties; pp != NULL; pp = pp->next)
extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
@@ -230,9 +236,9 @@ extern int of_modalias_node(struct device_node *node, char *modalias, int len);
extern struct device_node *of_parse_phandle(struct device_node *np,
const char *phandle_name,
int index);
-extern int of_parse_phandles_with_args(struct device_node *np,
+extern int of_parse_phandle_with_args(struct device_node *np,
const char *list_name, const char *cells_name, int index,
- struct device_node **out_node, const void **out_args);
+ struct of_phandle_args *out_args);
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index c84d900fbbb..ed136ad698c 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -71,7 +71,7 @@ extern int of_fdt_is_compatible(struct boot_param_header *blob,
unsigned long node,
const char *compat);
extern int of_fdt_match(struct boot_param_header *blob, unsigned long node,
- const char **compat);
+ const char *const *compat);
extern void of_fdt_unflatten_tree(unsigned long *blob,
struct device_node **mynodes);
@@ -88,7 +88,7 @@ extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
unsigned long *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
-extern int of_flat_dt_match(unsigned long node, const char **matches);
+extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 52280a2b5e6..b254052a49d 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+#include <linux/of.h>
struct device_node;
@@ -57,8 +58,9 @@ extern int of_mm_gpiochip_add(struct device_node *np,
extern void of_gpiochip_add(struct gpio_chip *gc);
extern void of_gpiochip_remove(struct gpio_chip *gc);
extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
-extern int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags);
+extern int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags);
#else /* CONFIG_OF_GPIO */
@@ -75,8 +77,8 @@ static inline unsigned int of_gpio_count(struct device_node *np)
}
static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
- struct device_node *np,
- const void *gpio_spec, u32 *flags)
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
{
return -ENOSYS;
}
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644
index 00000000000..eb1efa54fe8
--- /dev/null
+++ b/include/linux/openvswitch.h
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _LINUX_OPENVSWITCH_H
+#define _LINUX_OPENVSWITCH_H 1
+
+#include <linux/types.h>
+
+/**
+ * struct ovs_header - header for OVS Generic Netlink messages.
+ * @dp_ifindex: ifindex of local port for datapath (0 to make a request not
+ * specific to a datapath).
+ *
+ * Attributes following the header are specific to a particular OVS Generic
+ * Netlink family, but all of the OVS families use this header.
+ */
+
+struct ovs_header {
+ int dp_ifindex;
+};
+
+/* Datapaths. */
+
+#define OVS_DATAPATH_FAMILY "ovs_datapath"
+#define OVS_DATAPATH_MCGROUP "ovs_datapath"
+#define OVS_DATAPATH_VERSION 0x1
+
+enum ovs_datapath_cmd {
+ OVS_DP_CMD_UNSPEC,
+ OVS_DP_CMD_NEW,
+ OVS_DP_CMD_DEL,
+ OVS_DP_CMD_GET,
+ OVS_DP_CMD_SET
+};
+
+/**
+ * enum ovs_datapath_attr - attributes for %OVS_DP_* commands.
+ * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local
+ * port". This is the name of the network device whose dp_ifindex is given in
+ * the &struct ovs_header. Always present in notifications. Required in
+ * %OVS_DP_NEW requests. May be used as an alternative to specifying
+ * dp_ifindex in other requests (with a dp_ifindex of 0).
+ * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially
+ * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on
+ * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
+ * not be sent.
+ * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
+ * datapath. Always present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_DP_* commands.
+ */
+enum ovs_datapath_attr {
+ OVS_DP_ATTR_UNSPEC,
+ OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */
+ OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
+ OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
+ __OVS_DP_ATTR_MAX
+};
+
+#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1)
+
+struct ovs_dp_stats {
+ __u64 n_hit; /* Number of flow table matches. */
+ __u64 n_missed; /* Number of flow table misses. */
+ __u64 n_lost; /* Number of misses not sent to userspace. */
+ __u64 n_flows; /* Number of flows present */
+};
+
+struct ovs_vport_stats {
+ __u64 rx_packets; /* total packets received */
+ __u64 tx_packets; /* total packets transmitted */
+ __u64 rx_bytes; /* total bytes received */
+ __u64 tx_bytes; /* total bytes transmitted */
+ __u64 rx_errors; /* bad packets received */
+ __u64 tx_errors; /* packet transmit problems */
+ __u64 rx_dropped; /* no space in linux buffers */
+ __u64 tx_dropped; /* no space available in linux */
+};
+
+/* Fixed logical ports. */
+#define OVSP_LOCAL ((__u16)0)
+
+/* Packet transfer. */
+
+#define OVS_PACKET_FAMILY "ovs_packet"
+#define OVS_PACKET_VERSION 0x1
+
+enum ovs_packet_cmd {
+ OVS_PACKET_CMD_UNSPEC,
+
+ /* Kernel-to-user notifications. */
+ OVS_PACKET_CMD_MISS, /* Flow table miss. */
+ OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */
+
+ /* Userspace commands. */
+ OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */
+};
+
+/**
+ * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands.
+ * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire
+ * packet as received, from the start of the Ethernet header onward. For
+ * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by
+ * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is
+ * the flow key extracted from the packet as originally received.
+ * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key
+ * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows
+ * userspace to adapt its flow setup strategy by comparing its notion of the
+ * flow key against the kernel's.
+ * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
+ * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_USERDATA attribute.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_PACKET_* commands.
+ */
+enum ovs_packet_attr {
+ OVS_PACKET_ATTR_UNSPEC,
+ OVS_PACKET_ATTR_PACKET, /* Packet data. */
+ OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
+ OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
+ OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */
+ __OVS_PACKET_ATTR_MAX
+};
+
+#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1)
+
+/* Virtual ports. */
+
+#define OVS_VPORT_FAMILY "ovs_vport"
+#define OVS_VPORT_MCGROUP "ovs_vport"
+#define OVS_VPORT_VERSION 0x1
+
+enum ovs_vport_cmd {
+ OVS_VPORT_CMD_UNSPEC,
+ OVS_VPORT_CMD_NEW,
+ OVS_VPORT_CMD_DEL,
+ OVS_VPORT_CMD_GET,
+ OVS_VPORT_CMD_SET
+};
+
+enum ovs_vport_type {
+ OVS_VPORT_TYPE_UNSPEC,
+ OVS_VPORT_TYPE_NETDEV, /* network device */
+ OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
+ __OVS_VPORT_TYPE_MAX
+};
+
+#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1)
+
+/**
+ * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands.
+ * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath.
+ * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type
+ * of vport.
+ * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device
+ * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes
+ * plus a null terminator.
+ * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
+ * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
+ * this port. A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
+ * packets sent or received through the vport.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_VPORT_* commands.
+ *
+ * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and
+ * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is
+ * optional; if not specified a free port number is automatically selected.
+ * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
+ * of vport.
+ * and other attributes are ignored.
+ *
+ * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
+ * look up the vport to operate on; otherwise dp_idx from the &struct
+ * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
+ */
+enum ovs_vport_attr {
+ OVS_VPORT_ATTR_UNSPEC,
+ OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */
+ OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */
+ OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */
+ OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
+ OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+ OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */
+ __OVS_VPORT_ATTR_MAX
+};
+
+#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+
+/* Flows. */
+
+#define OVS_FLOW_FAMILY "ovs_flow"
+#define OVS_FLOW_MCGROUP "ovs_flow"
+#define OVS_FLOW_VERSION 0x1
+
+enum ovs_flow_cmd {
+ OVS_FLOW_CMD_UNSPEC,
+ OVS_FLOW_CMD_NEW,
+ OVS_FLOW_CMD_DEL,
+ OVS_FLOW_CMD_GET,
+ OVS_FLOW_CMD_SET
+};
+
+struct ovs_flow_stats {
+ __u64 n_packets; /* Number of matched packets. */
+ __u64 n_bytes; /* Number of matched bytes. */
+};
+
+enum ovs_key_attr {
+ OVS_KEY_ATTR_UNSPEC,
+ OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */
+ OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */
+ OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */
+ OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */
+ OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */
+ OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */
+ OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */
+ OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */
+ OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */
+ OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */
+ OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */
+ OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */
+ OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */
+ OVS_KEY_ATTR_ND, /* struct ovs_key_nd */
+ __OVS_KEY_ATTR_MAX
+};
+
+#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
+
+/**
+ * enum ovs_frag_type - IPv4 and IPv6 fragment type
+ * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
+ * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0.
+ * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset.
+ *
+ * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct
+ * ovs_key_ipv6.
+ */
+enum ovs_frag_type {
+ OVS_FRAG_TYPE_NONE,
+ OVS_FRAG_TYPE_FIRST,
+ OVS_FRAG_TYPE_LATER,
+ __OVS_FRAG_TYPE_MAX
+};
+
+#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1)
+
+struct ovs_key_ethernet {
+ __u8 eth_src[6];
+ __u8 eth_dst[6];
+};
+
+struct ovs_key_ipv4 {
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ __u8 ipv4_proto;
+ __u8 ipv4_tos;
+ __u8 ipv4_ttl;
+ __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_ipv6 {
+ __be32 ipv6_src[4];
+ __be32 ipv6_dst[4];
+ __be32 ipv6_label; /* 20-bits in least-significant bits. */
+ __u8 ipv6_proto;
+ __u8 ipv6_tclass;
+ __u8 ipv6_hlimit;
+ __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_tcp {
+ __be16 tcp_src;
+ __be16 tcp_dst;
+};
+
+struct ovs_key_udp {
+ __be16 udp_src;
+ __be16 udp_dst;
+};
+
+struct ovs_key_icmp {
+ __u8 icmp_type;
+ __u8 icmp_code;
+};
+
+struct ovs_key_icmpv6 {
+ __u8 icmpv6_type;
+ __u8 icmpv6_code;
+};
+
+struct ovs_key_arp {
+ __be32 arp_sip;
+ __be32 arp_tip;
+ __be16 arp_op;
+ __u8 arp_sha[6];
+ __u8 arp_tha[6];
+};
+
+struct ovs_key_nd {
+ __u32 nd_target[4];
+ __u8 nd_sll[6];
+ __u8 nd_tll[6];
+};
+
+/**
+ * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
+ * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
+ * key. Always present in notifications. Required for all requests (except
+ * dumps).
+ * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
+ * the actions to take for packets that match the key. Always present in
+ * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for
+ * %OVS_FLOW_CMD_SET requests.
+ * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
+ * flow. Present in notifications if the stats would be nonzero. Ignored in
+ * requests.
+ * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the
+ * TCP flags seen on packets in this flow. Only present in notifications for
+ * TCP flows, and only if it would be nonzero. Ignored in requests.
+ * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on
+ * the system monotonic clock, at which a packet was last processed for this
+ * flow. Only present in notifications if a packet has been processed for this
+ * flow. Ignored in requests.
+ * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
+ * last-used time, accumulated TCP flags, and statistics for this flow.
+ * Otherwise ignored in requests. Never present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_FLOW_* commands.
+ */
+enum ovs_flow_attr {
+ OVS_FLOW_ATTR_UNSPEC,
+ OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */
+ OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
+ OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */
+ OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
+ OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
+ OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
+ __OVS_FLOW_ATTR_MAX
+};
+
+#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
+
+/**
+ * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
+ * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
+ * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of
+ * %UINT32_MAX samples all packets and intermediate values sample intermediate
+ * fractions of packets.
+ * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
+ * Actions are passed as nested attributes.
+ *
+ * Executes the specified actions with the given probability on a per-packet
+ * basis.
+ */
+enum ovs_sample_attr {
+ OVS_SAMPLE_ATTR_UNSPEC,
+ OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
+ OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
+ __OVS_SAMPLE_ATTR_MAX,
+};
+
+#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
+
+/**
+ * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
+ * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
+ * message should be sent. Required.
+ * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the
+ * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA,
+ */
+enum ovs_userspace_attr {
+ OVS_USERSPACE_ATTR_UNSPEC,
+ OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
+ OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */
+ __OVS_USERSPACE_ATTR_MAX
+};
+
+#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
+
+/**
+ * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
+ * @vlan_tpid: Tag protocol identifier (TPID) to push.
+ * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
+ * (but it will not be set in the 802.1Q header that is pushed).
+ *
+ * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
+ * values are those that the kernel module also parses as 802.1Q headers, to
+ * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
+ * from having surprising results.
+ */
+struct ovs_action_push_vlan {
+ __be16 vlan_tpid; /* 802.1Q TPID. */
+ __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
+};
+
+/**
+ * enum ovs_action_attr - Action types.
+ *
+ * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
+ * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
+ * %OVS_USERSPACE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The
+ * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
+ * value.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
+ * packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
+ * the nested %OVS_SAMPLE_ATTR_* attributes.
+ *
+ * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
+ * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
+ * type may not be changed.
+ */
+
+enum ovs_action_attr {
+ OVS_ACTION_ATTR_UNSPEC,
+ OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */
+ OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */
+ OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */
+ OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */
+ OVS_ACTION_ATTR_POP_VLAN, /* No argument. */
+ OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */
+ __OVS_ACTION_ATTR_MAX
+};
+
+#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
+
+#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
index b0638fd91e9..22691f61404 100644
--- a/include/linux/page-debug-flags.h
+++ b/include/linux/page-debug-flags.h
@@ -13,6 +13,7 @@
enum page_debug_flags {
PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
+ PAGE_DEBUG_FLAG_GUARD,
};
/*
@@ -21,7 +22,8 @@ enum page_debug_flags {
*/
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
-#if !defined(CONFIG_PAGE_POISONING) \
+#if !defined(CONFIG_PAGE_POISONING) && \
+ !defined(CONFIG_PAGE_GUARD) \
/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
#endif
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index bab82f4c571..ed17024d2eb 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -21,7 +21,6 @@ struct pagevec {
};
void __pagevec_release(struct pagevec *pvec);
-void __pagevec_free(struct pagevec *pvec);
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
void pagevec_strip(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
@@ -67,12 +66,6 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec);
}
-static inline void pagevec_free(struct pagevec *pvec)
-{
- if (pagevec_count(pvec))
- __pagevec_free(pvec);
-}
-
static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
{
____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 7cea7b6c141..c8320144fe7 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -29,7 +29,7 @@ extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
extern void pci_disable_link_state(struct pci_dev *pdev, int state);
extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-extern void pcie_clear_aspm(void);
+extern void pcie_clear_aspm(struct pci_bus *bus);
extern void pcie_no_aspm(void);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -47,7 +47,7 @@ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
{
}
-static inline void pcie_clear_aspm(void)
+static inline void pcie_clear_aspm(struct pci_bus *bus)
{
}
static inline void pcie_no_aspm(void)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7cda65b5f79..84225c756bd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -336,6 +336,7 @@ struct pci_dev {
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI
struct list_head msi_list;
+ struct kset *msi_kset;
#endif
struct pci_vpd *vpd;
#ifdef CONFIG_PCI_ATS
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 172ba70306d..2aaee0ca9da 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,8 +517,12 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index b5d9657f310..28fe380cb19 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -537,7 +537,9 @@
#define PCI_EXT_CAP_ID_ARI 14
#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
+#define PCI_EXT_CAP_ID_PRI 19
#define PCI_EXT_CAP_ID_LTR 24
+#define PCI_EXT_CAP_ID_PASID 27
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -664,24 +666,24 @@
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
/* Page Request Interface */
-#define PCI_PRI_CAP 0x13 /* PRI capability ID */
-#define PCI_PRI_CONTROL_OFF 0x04 /* Offset of control register */
-#define PCI_PRI_STATUS_OFF 0x06 /* Offset of status register */
-#define PCI_PRI_ENABLE 0x0001 /* Enable mask */
-#define PCI_PRI_RESET 0x0002 /* Reset bit mask */
-#define PCI_PRI_STATUS_RF 0x0001 /* Request Failure */
-#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
-#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
-#define PCI_PRI_MAX_REQ_OFF 0x08 /* Cap offset for max reqs supported */
-#define PCI_PRI_ALLOC_REQ_OFF 0x0c /* Cap offset for max reqs allowed */
+#define PCI_PRI_CTRL 0x04 /* PRI control register */
+#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
+#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
+#define PCI_PRI_STATUS 0x06 /* PRI status register */
+#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
+#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
+#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
+#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
+#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
/* PASID capability */
-#define PCI_PASID_CAP 0x1b /* PASID capability ID */
-#define PCI_PASID_CAP_OFF 0x04 /* PASID feature register */
-#define PCI_PASID_CONTROL_OFF 0x06 /* PASID control register */
-#define PCI_PASID_ENABLE 0x01 /* Enable/Supported bit */
-#define PCI_PASID_EXEC 0x02 /* Exec permissions Enable/Supported */
-#define PCI_PASID_PRIV 0x04 /* Priviledge Mode Enable/Support */
+#define PCI_PASID_CAP 0x04 /* PASID feature register */
+#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */
+#define PCI_PASID_CTRL 0x06 /* PASID control register */
+#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9ca008f0c54..32cd1f67462 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -172,10 +172,10 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
* equal char, int or long. percpu_read() evaluates to a lvalue and
* all others to void.
*
- * These operations are guaranteed to be atomic w.r.t. preemption.
- * The generic versions use plain get/put_cpu_var(). Archs are
+ * These operations are guaranteed to be atomic.
+ * The generic versions disable interrupts. Archs are
* encouraged to implement single-instruction alternatives which don't
- * require preemption protection.
+ * require protection.
*/
#ifndef percpu_read
# define percpu_read(var) \
@@ -347,9 +347,10 @@ do { \
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
- preempt_disable(); \
+ unsigned long flags; \
+ local_irq_save(flags); \
*__this_cpu_ptr(&(pcp)) op val; \
- preempt_enable(); \
+ local_irq_restore(flags); \
} while (0)
#ifndef this_cpu_write
@@ -447,10 +448,11 @@ do { \
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
- preempt_disable(); \
+ unsigned long flags; \
+ local_irq_save(flags); \
__this_cpu_add(pcp, val); \
ret__ = __this_cpu_read(pcp); \
- preempt_enable(); \
+ local_irq_restore(flags); \
ret__; \
})
@@ -476,10 +478,11 @@ do { \
#define _this_cpu_generic_xchg(pcp, nval) \
({ typeof(pcp) ret__; \
- preempt_disable(); \
+ unsigned long flags; \
+ local_irq_save(flags); \
ret__ = __this_cpu_read(pcp); \
__this_cpu_write(pcp, nval); \
- preempt_enable(); \
+ local_irq_restore(flags); \
ret__; \
})
@@ -501,12 +504,14 @@ do { \
#endif
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
-({ typeof(pcp) ret__; \
- preempt_disable(); \
+({ \
+ typeof(pcp) ret__; \
+ unsigned long flags; \
+ local_irq_save(flags); \
ret__ = __this_cpu_read(pcp); \
if (ret__ == (oval)) \
__this_cpu_write(pcp, nval); \
- preempt_enable(); \
+ local_irq_restore(flags); \
ret__; \
})
@@ -538,10 +543,11 @@ do { \
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int ret__; \
- preempt_disable(); \
+ unsigned long flags; \
+ local_irq_save(flags); \
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
- preempt_enable(); \
+ local_irq_restore(flags); \
ret__; \
})
@@ -567,9 +573,9 @@ do { \
#endif
/*
- * Generic percpu operations that do not require preemption handling.
+ * Generic percpu operations for context that are safe from preemption/interrupts.
* Either we do not care about races or the caller has the
- * responsibility of handling preemptions issues. Arch code can still
+ * responsibility of handling preemption/interrupt issues. Arch code can still
* override these instructions since the arch per cpu code may be more
* efficient and may actually get race freeness for free (that is the
* case for x86 for example).
@@ -802,156 +808,4 @@ do { \
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
#endif
-/*
- * IRQ safe versions of the per cpu RMW operations. Note that these operations
- * are *not* safe against modification of the same variable from another
- * processors (which one gets when using regular atomic operations)
- * They are guaranteed to be atomic vs. local interrupts and
- * preemption only.
- */
-#define irqsafe_cpu_generic_to_op(pcp, val, op) \
-do { \
- unsigned long flags; \
- local_irq_save(flags); \
- *__this_cpu_ptr(&(pcp)) op val; \
- local_irq_restore(flags); \
-} while (0)
-
-#ifndef irqsafe_cpu_add
-# ifndef irqsafe_cpu_add_1
-# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef irqsafe_cpu_add_2
-# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef irqsafe_cpu_add_4
-# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef irqsafe_cpu_add_8
-# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
-#endif
-
-#ifndef irqsafe_cpu_sub
-# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
-#endif
-
-#ifndef irqsafe_cpu_inc
-# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
-#endif
-
-#ifndef irqsafe_cpu_dec
-# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
-#endif
-
-#ifndef irqsafe_cpu_and
-# ifndef irqsafe_cpu_and_1
-# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef irqsafe_cpu_and_2
-# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef irqsafe_cpu_and_4
-# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef irqsafe_cpu_and_8
-# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
-#endif
-
-#ifndef irqsafe_cpu_or
-# ifndef irqsafe_cpu_or_1
-# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef irqsafe_cpu_or_2
-# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef irqsafe_cpu_or_4
-# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef irqsafe_cpu_or_8
-# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
-#endif
-
-#ifndef irqsafe_cpu_xor
-# ifndef irqsafe_cpu_xor_1
-# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef irqsafe_cpu_xor_2
-# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef irqsafe_cpu_xor_4
-# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef irqsafe_cpu_xor_8
-# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
-#endif
-
-#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
-({ \
- typeof(pcp) ret__; \
- unsigned long flags; \
- local_irq_save(flags); \
- ret__ = __this_cpu_read(pcp); \
- if (ret__ == (oval)) \
- __this_cpu_write(pcp, nval); \
- local_irq_restore(flags); \
- ret__; \
-})
-
-#ifndef irqsafe_cpu_cmpxchg
-# ifndef irqsafe_cpu_cmpxchg_1
-# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_2
-# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_4
-# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_8
-# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
- __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
-#endif
-
-#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
-({ \
- int ret__; \
- unsigned long flags; \
- local_irq_save(flags); \
- ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
- oval1, oval2, nval1, nval2); \
- local_irq_restore(flags); \
- ret__; \
-})
-
-#ifndef irqsafe_cpu_cmpxchg_double
-# ifndef irqsafe_cpu_cmpxchg_double_1
-# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_double_2
-# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_double_4
-# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef irqsafe_cpu_cmpxchg_double_8
-# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
-#endif
-
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1e9ebe5e009..08855613ceb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ enum perf_hw_id {
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
PERF_COUNT_HW_MAX, /* non-ABI */
};
@@ -822,6 +823,7 @@ struct perf_event {
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
+ struct list_head rb_entry;
/* poll related */
wait_queue_head_t waitq;
@@ -889,6 +891,7 @@ struct perf_event_context {
int nr_active;
int is_active;
int nr_stat;
+ int nr_freq;
int rotate_disable;
atomic_t refcount;
struct task_struct *task;
@@ -1062,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
}
}
-extern struct jump_label_key perf_sched_events;
+extern struct jump_label_key_deferred perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
}
@@ -1076,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
}
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index f53a4167c5f..f48bfc80cb4 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -38,6 +38,7 @@
#define PNPIPE_ENCAP 1
#define PNPIPE_IFINDEX 2
#define PNPIPE_HANDLE 3
+#define PNPIPE_INITSTATE 4
#define PNADDR_ANY 0
#define PNADDR_BROADCAST 0xFC
@@ -49,6 +50,7 @@
/* ioctls */
#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
+#define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13)
#define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14)
#define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15)
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 38d10326246..e7cf6669ac3 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -30,6 +30,8 @@ struct pid_namespace {
#ifdef CONFIG_BSD_PROCESS_ACCT
struct bsd_acct_struct *bacct;
#endif
+ gid_t pid_gid;
+ int hide_pid;
};
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index 88863531d86..d0aecb7f6fb 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -48,7 +48,7 @@ struct pinmux_map {
const char *group;
struct device *dev;
const char *dev_name;
- const bool hog_on_boot;
+ bool hog_on_boot;
};
/*
@@ -66,30 +66,22 @@ struct pinmux_map {
{ .name = a, .ctrl_dev_name = b, .function = c }
/*
- * Convenience macro to map a function onto the primary device pinctrl device
- * this is especially helpful on systems that have only one pin controller
- * or need to set up a lot of mappings on the primary controller.
- */
-#define PINMUX_MAP_PRIMARY(a, b, c) \
- { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
- .dev_name = c }
-
-/*
- * Convenience macro to map a system function onto the primary pinctrl device.
- * System functions are not assigned to a particular device.
+ * Convenience macro to map a system function onto a certain pinctrl device,
+ * to be hogged by the pinmux core until the system shuts down.
*/
-#define PINMUX_MAP_PRIMARY_SYS(a, b) \
- { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b }
+#define PINMUX_MAP_SYS_HOG(a, b, c) \
+ { .name = a, .ctrl_dev_name = b, .function = c, \
+ .hog_on_boot = true }
/*
- * Convenience macro to map a system function onto the primary pinctrl device,
- * to be hogged by the pinmux core until the system shuts down.
+ * Convenience macro to map a system function onto a certain pinctrl device
+ * using a specified group, to be hogged by the pinmux core until the system
+ * shuts down.
*/
-#define PINMUX_MAP_PRIMARY_SYS_HOG(a, b) \
- { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+#define PINMUX_MAP_SYS_HOG_GROUP(a, b, c, d) \
+ { .name = a, .ctrl_dev_name = b, .function = c, .group = d, \
.hog_on_boot = true }
-
#ifdef CONFIG_PINMUX
extern int pinmux_register_mappings(struct pinmux_map const *map,
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
new file mode 100644
index 00000000000..477922cf043
--- /dev/null
+++ b/include/linux/pinctrl/pinconf.h
@@ -0,0 +1,97 @@
+/*
+ * Interface the pinconfig portions of the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCONF_H
+#define __LINUX_PINCTRL_PINCONF_H
+
+#ifdef CONFIG_PINCONF
+
+struct pinctrl_dev;
+struct seq_file;
+
+/**
+ * struct pinconf_ops - pin config operations, to be implemented by
+ * pin configuration capable drivers.
+ * @pin_config_get: get the config of a certain pin, if the requested config
+ * is not available on this controller this should return -ENOTSUPP
+ * and if it is available but disabled it should return -EINVAL
+ * @pin_config_get: get the config of a certain pin
+ * @pin_config_set: configure an individual pin
+ * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_set: configure all pins in a group
+ * @pin_config_dbg_show: optional debugfs display hook that will provide
+ * per-device info for a certain pin in debugfs
+ * @pin_config_group_dbg_show: optional debugfs display hook that will provide
+ * per-device info for a certain group in debugfs
+ */
+struct pinconf_ops {
+ int (*pin_config_get) (struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config);
+ int (*pin_config_set) (struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long config);
+ int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *config);
+ int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long config);
+ void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset);
+ void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned selector);
+};
+
+extern int pin_config_get(const char *dev_name, const char *name,
+ unsigned long *config);
+extern int pin_config_set(const char *dev_name, const char *name,
+ unsigned long config);
+extern int pin_config_group_get(const char *dev_name,
+ const char *pin_group,
+ unsigned long *config);
+extern int pin_config_group_set(const char *dev_name,
+ const char *pin_group,
+ unsigned long config);
+
+#else
+
+static inline int pin_config_get(const char *dev_name, const char *name,
+ unsigned long *config)
+{
+ return 0;
+}
+
+static inline int pin_config_set(const char *dev_name, const char *name,
+ unsigned long config)
+{
+ return 0;
+}
+
+static inline int pin_config_group_get(const char *dev_name,
+ const char *pin_group,
+ unsigned long *config)
+{
+ return 0;
+}
+
+static inline int pin_config_group_set(const char *dev_name,
+ const char *pin_group,
+ unsigned long config)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __LINUX_PINCTRL_PINCONF_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 04c011038f3..8bd22ee7aa0 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -21,6 +21,7 @@
struct pinctrl_dev;
struct pinmux_ops;
+struct pinconf_ops;
struct gpio_chip;
/**
@@ -45,6 +46,7 @@ struct pinctrl_pin_desc {
* @name: a name for the chip in this range
* @id: an ID number for the chip in this range
* @base: base offset of the GPIO range
+ * @pin_base: base pin number of the GPIO range
* @npins: number of pins in the GPIO range, including the base number
* @gc: an optional pointer to a gpio_chip
*/
@@ -53,6 +55,7 @@ struct pinctrl_gpio_range {
const char *name;
unsigned int id;
unsigned int base;
+ unsigned int pin_base;
unsigned int npins;
struct gpio_chip *gc;
};
@@ -89,22 +92,20 @@ struct pinctrl_ops {
* this pin controller
* @npins: number of descriptors in the array, usually just ARRAY_SIZE()
* of the pins field above
- * @maxpin: since pin spaces may be sparse, there can he "holes" in the
- * pin range, this attribute gives the maximum pin number in the
- * total range. This should not be lower than npins for example,
- * but may be equal to npins if you have no holes in the pin range.
* @pctlops: pin control operation vtable, to support global concepts like
* grouping of pins, this is optional.
- * @pmxops: pinmux operation vtable, if you support pinmuxing in your driver
+ * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver
+ * @confops: pin config operations vtable, if you support pin configuration in
+ * your driver
* @owner: module providing the pin controller, used for refcounting
*/
struct pinctrl_desc {
const char *name;
struct pinctrl_pin_desc const *pins;
unsigned int npins;
- unsigned int maxpin;
struct pinctrl_ops *pctlops;
struct pinmux_ops *pmxops;
+ struct pinconf_ops *confops;
struct module *owner;
};
@@ -123,7 +124,7 @@ extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
struct pinctrl_dev;
-/* Sufficiently stupid default function when pinctrl is not in use */
+/* Sufficiently stupid default functions when pinctrl is not in use */
static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
{
return pin >= 0;
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 3c430e797ef..937b3e2fa36 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -52,9 +52,15 @@ struct pinctrl_dev;
* @disable: disable a certain muxing selector with a certain pin group
* @gpio_request_enable: requests and enables GPIO on a certain pin.
* Implement this only if you can mux every pin individually as GPIO. The
- * affected GPIO range is passed along with an offset into that
+ * affected GPIO range is passed along with an offset(pin number) into that
* specific GPIO range - function selectors and pin groups are orthogonal
- * to this, the core will however make sure the pins do not collide
+ * to this, the core will however make sure the pins do not collide.
+ * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of
+ * @gpio_request_enable
+ * @gpio_set_direction: Since controllers may need different configurations
+ * depending on whether the GPIO is configured as input or output,
+ * a direction selector function may be implemented as a backing
+ * to the GPIO controllers that need pin muxing.
*/
struct pinmux_ops {
int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -73,11 +79,20 @@ struct pinmux_ops {
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset);
+ void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset);
+ int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input);
};
/* External interface to pinmux */
extern int pinmux_request_gpio(unsigned gpio);
extern void pinmux_free_gpio(unsigned gpio);
+extern int pinmux_gpio_direction_input(unsigned gpio);
+extern int pinmux_gpio_direction_output(unsigned gpio);
extern struct pinmux * __must_check pinmux_get(struct device *dev, const char *name);
extern void pinmux_put(struct pinmux *pmx);
extern int pinmux_enable(struct pinmux *pmx);
@@ -94,6 +109,16 @@ static inline void pinmux_free_gpio(unsigned gpio)
{
}
+static inline int pinmux_gpio_direction_input(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int pinmux_gpio_direction_output(unsigned gpio)
+{
+ return 0;
+}
+
static inline struct pinmux * __must_check pinmux_get(struct device *dev, const char *name)
{
return NULL;
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index c5336705921..8f1b928f777 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -30,7 +30,7 @@
*/
struct tc_stats {
- __u64 bytes; /* NUmber of enqueues bytes */
+ __u64 bytes; /* Number of enqueued bytes */
__u32 packets; /* Number of enqueued packets */
__u32 drops; /* Packets dropped because of lack of resources */
__u32 overlimits; /* Number of throttle events when this
@@ -162,25 +162,24 @@ struct tc_sfq_qopt {
unsigned flows; /* Maximal number of flows */
};
+struct tc_sfq_qopt_v1 {
+ struct tc_sfq_qopt v0;
+ unsigned int depth; /* max number of packets per flow */
+ unsigned int headdrop;
+};
+
+
struct tc_sfq_xstats {
__s32 allot;
};
-/*
- * NOTE: limit, divisor and flows are hardwired to code at the moment.
- *
- * limit=flows=128, divisor=1024;
- *
- * The only reason for this is efficiency, it is possible
- * to change these parameters in compile time.
- */
-
/* RED section */
enum {
TCA_RED_UNSPEC,
TCA_RED_PARMS,
TCA_RED_STAB,
+ TCA_RED_MAX_P,
__TCA_RED_MAX,
};
@@ -194,8 +193,9 @@ struct tc_red_qopt {
unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
unsigned char Scell_log; /* cell size for idle damping */
unsigned char flags;
-#define TC_RED_ECN 1
-#define TC_RED_HARDDROP 2
+#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
+#define TC_RED_ADAPTATIVE 4
};
struct tc_red_xstats {
@@ -214,6 +214,7 @@ enum {
TCA_GRED_PARMS,
TCA_GRED_STAB,
TCA_GRED_DPS,
+ TCA_GRED_MAX_P,
__TCA_GRED_MAX,
};
@@ -253,6 +254,7 @@ enum {
TCA_CHOKE_UNSPEC,
TCA_CHOKE_PARMS,
TCA_CHOKE_STAB,
+ TCA_CHOKE_MAX_P,
__TCA_CHOKE_MAX,
};
@@ -297,7 +299,7 @@ struct tc_htb_glob {
__u32 debug; /* debug flags */
/* stats */
- __u32 direct_pkts; /* count of non shapped packets */
+ __u32 direct_pkts; /* count of non shaped packets */
};
enum {
TCA_HTB_UNSPEC,
@@ -465,6 +467,7 @@ enum {
TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
TCA_NETEM_LOSS,
+ TCA_NETEM_RATE,
__TCA_NETEM_MAX,
};
@@ -495,6 +498,13 @@ struct tc_netem_corrupt {
__u32 correlation;
};
+struct tc_netem_rate {
+ __u32 rate; /* byte/s */
+ __s32 packet_overhead;
+ __u32 cell_size;
+ __s32 cell_overhead;
+};
+
enum {
NETEM_LOSS_UNSPEC,
NETEM_LOSS_GI, /* General Intuitive - 4 state model */
@@ -503,7 +513,7 @@ enum {
};
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
struct tc_netem_gimodel {
__u32 p13;
__u32 p31;
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
new file mode 100644
index 00000000000..b081c7245ec
--- /dev/null
+++ b/include/linux/platform_data/macb.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACB_PDATA_H__
+#define __MACB_PDATA_H__
+
+struct macb_platform_data {
+ u32 phy_mask;
+ int phy_irq_pin; /* PHY IRQ */
+ u8 is_rmii; /* using RMII interface? */
+};
+
+#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index e9d9149ddf3..d94804aca76 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -42,9 +42,23 @@ struct mv_usb_platform_data {
/* only valid for HCD. OTG or Host only*/
unsigned int mode;
- int (*phy_init)(unsigned int regbase);
- void (*phy_deinit)(unsigned int regbase);
+ /* This flag is used for that needs id pin checked by otg */
+ unsigned int disable_otg_clock_gating:1;
+ /* Force a_bus_req to be asserted */
+ unsigned int otg_force_a_bus_req:1;
+
+ int (*phy_init)(void __iomem *regbase);
+ void (*phy_deinit)(void __iomem *regbase);
int (*set_vbus)(unsigned int vbus);
+ int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
+#ifndef CONFIG_HAVE_CLK
+/* Dummy stub for clk framework */
+#define clk_get(dev, id) NULL
+#define clk_put(clock) do {} while (0)
+#define clk_enable(clock) do {} while (0)
+#define clk_disable(clock) do {} while (0)
+#endif
+
#endif
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
new file mode 100644
index 00000000000..6fa109339bf
--- /dev/null
+++ b/include/linux/platform_data/s3c-hsudc.h
@@ -0,0 +1,34 @@
+/*
+ * S3C24XX USB 2.0 High-speed USB controller gadget driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
+ * Each endpoint can be configured as either in or out endpoint. Endpoints
+ * can be configured for Bulk or Interrupt transfer mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_USB_S3C_HSUDC_H
+#define __LINUX_USB_S3C_HSUDC_H
+
+/**
+ * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
+ * @epnum: Number of endpoints to be instantiated by the controller driver.
+ * @gpio_init: Platform specific USB related GPIO initialization.
+ * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
+ *
+ * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
+ * controllers.
+ */
+struct s3c24xx_hsudc_platdata {
+ unsigned int epnum;
+ void (*gpio_init)(void);
+ void (*gpio_uninit)(void);
+};
+
+#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 2a23f7d1a82..60e9994ef40 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,7 +63,7 @@ struct platform_device_info {
u64 dma_mask;
};
extern struct platform_device *platform_device_register_full(
- struct platform_device_info *pdevinfo);
+ const struct platform_device_info *pdevinfo);
/**
* platform_device_register_resndata - add a platform-level device with
@@ -196,16 +196,8 @@ static inline void platform_set_drvdata(struct platform_device *pdev, void *data
* calling it replaces module_init() and module_exit()
*/
#define module_platform_driver(__platform_driver) \
-static int __init __platform_driver##_init(void) \
-{ \
- return platform_driver_register(&(__platform_driver)); \
-} \
-module_init(__platform_driver##_init); \
-static void __exit __platform_driver##_exit(void) \
-{ \
- platform_driver_unregister(&(__platform_driver)); \
-} \
-module_exit(__platform_driver##_exit);
+ module_driver(__platform_driver, platform_driver_register, \
+ platform_driver_unregister)
extern struct platform_device *platform_create_bundle(struct platform_driver *driver,
int (*probe)(struct platform_device *),
@@ -264,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
}
#endif /* MODULE */
-#ifdef CONFIG_PM_SLEEP
-extern int platform_pm_prepare(struct device *dev);
-extern void platform_pm_complete(struct device *dev);
-#else
-#define platform_pm_prepare NULL
-#define platform_pm_complete NULL
-#endif
-
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
-extern int platform_pm_suspend_noirq(struct device *dev);
extern int platform_pm_resume(struct device *dev);
-extern int platform_pm_resume_noirq(struct device *dev);
#else
#define platform_pm_suspend NULL
#define platform_pm_resume NULL
-#define platform_pm_suspend_noirq NULL
-#define platform_pm_resume_noirq NULL
#endif
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern int platform_pm_freeze(struct device *dev);
-extern int platform_pm_freeze_noirq(struct device *dev);
extern int platform_pm_thaw(struct device *dev);
-extern int platform_pm_thaw_noirq(struct device *dev);
extern int platform_pm_poweroff(struct device *dev);
-extern int platform_pm_poweroff_noirq(struct device *dev);
extern int platform_pm_restore(struct device *dev);
-extern int platform_pm_restore_noirq(struct device *dev);
#else
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
#define platform_pm_poweroff NULL
#define platform_pm_restore NULL
-#define platform_pm_freeze_noirq NULL
-#define platform_pm_thaw_noirq NULL
-#define platform_pm_poweroff_noirq NULL
-#define platform_pm_restore_noirq NULL
#endif
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
- .prepare = platform_pm_prepare, \
- .complete = platform_pm_complete, \
.suspend = platform_pm_suspend, \
.resume = platform_pm_resume, \
.freeze = platform_pm_freeze, \
.thaw = platform_pm_thaw, \
.poweroff = platform_pm_poweroff, \
- .restore = platform_pm_restore, \
- .suspend_noirq = platform_pm_suspend_noirq, \
- .resume_noirq = platform_pm_resume_noirq, \
- .freeze_noirq = platform_pm_freeze_noirq, \
- .thaw_noirq = platform_pm_thaw_noirq, \
- .poweroff_noirq = platform_pm_poweroff_noirq, \
- .restore_noirq = platform_pm_restore_noirq,
+ .restore = platform_pm_restore,
#else
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5c4c8b18c8b..e4982ac3fbb 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
/**
* struct dev_pm_ops - device PM callbacks
*
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- * its hardware state. Prevent new children of the device from being
- * registered after @prepare() returns (the driver's subsystem and
- * generally the rest of the kernel is supposed to prevent new calls to the
- * probe method from being made too once @prepare() has succeeded). If
- * @prepare() detects a situation it cannot handle (e.g. registration of a
- * child already in progress), it may return -EAGAIN, so that the PM core
- * can execute it once again (e.g. after the new child has been registered)
- * to recover from the race condition. This method is executed for all
- * kinds of suspend transitions and is followed by one of the suspend
- * callbacks: @suspend(), @freeze(), or @poweroff().
- * The PM core executes @prepare() for all devices before starting to
- * execute suspend callbacks for any of them, so drivers may assume all of
- * the other devices to be present and functional while @prepare() is being
- * executed. In particular, it is safe to make GFP_KERNEL memory
- * allocations from within @prepare(). However, drivers may NOT assume
- * anything about the availability of the user space at that time and it
- * is not correct to request firmware from within @prepare() (it's too
- * late to do that). [To work around this limitation, drivers may
- * register suspend and hibernation notifiers that are executed before the
- * freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved. First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types. They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that. If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ * the device from being registered after it has returned (the driver's
+ * subsystem and generally the rest of the kernel is supposed to prevent
+ * new calls to the probe method from being made too once @prepare() has
+ * succeeded). If @prepare() detects a situation it cannot handle (e.g.
+ * registration of a child already in progress), it may return -EAGAIN, so
+ * that the PM core can execute it once again (e.g. after a new child has
+ * been registered) to recover from the race condition.
+ * This method is executed for all kinds of suspend transitions and is
+ * followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ * @poweroff(). The PM core executes subsystem-level @prepare() for all
+ * devices before starting to invoke suspend callbacks for any of them, so
+ * generally devices may be assumed to be functional or to respond to
+ * runtime resume requests while @prepare() is being executed. However,
+ * device drivers may NOT assume anything about the availability of user
+ * space at that time and it is NOT valid to request firmware from within
+ * @prepare() (it's too late to do that). It also is NOT valid to allocate
+ * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ * [To work around these limitations, drivers may register suspend and
+ * hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
- * fails before the driver's suspend callback (@suspend(), @freeze(),
- * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * fails before the driver's suspend callback: @suspend(), @freeze() or
+ * @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
- * The PM core executes @complete() after it has executed the appropriate
- * resume callback for all devices.
+ * The PM core executes subsystem-level @complete() after it has executed
+ * the appropriate resume callbacks for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
- * contents of main memory are preserved. Quiesce the device, put it into
- * a low power state appropriate for the upcoming system state (such as
- * PCI_D3hot), and enable wakeup events as appropriate.
+ * contents of main memory are preserved. The exact action to perform
+ * depends on the device's subsystem (PM domain, device type, class or bus
+ * type), but generally the device must be quiescent after subsystem-level
+ * @suspend() has returned, so that it doesn't do any I/O or DMA.
+ * Subsystem-level @suspend() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @resume: Executed after waking the system up from a sleep state in which the
- * contents of main memory were preserved. Put the device into the
- * appropriate state, according to the information saved in memory by the
- * preceding @suspend(). The driver starts working again, responding to
- * hardware events and software requests. The hardware may have gone
- * through a power-off reset, or it may have maintained state from the
- * previous suspend() which the driver may rely on while resuming. On most
- * platforms, there are no restrictions on availability of resources like
- * clocks during @resume().
+ * contents of main memory were preserved. The exact action to perform
+ * depends on the device's subsystem, but generally the driver is expected
+ * to start working again, responding to hardware events and software
+ * requests (the device itself may be left in a low-power state, waiting
+ * for a runtime resume to occur). The state of the device at the time its
+ * driver's @resume() callback is run depends on the platform and subsystem
+ * the device belongs to. On most platforms, there are no restrictions on
+ * availability of resources like clocks during @resume().
+ * Subsystem-level @resume() is executed for all devices after invoking
+ * subsystem-level @resume_noirq() for all of them.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
- * Quiesce operations so that a consistent image can be created, but do NOT
- * otherwise put the device into a low power device state and do NOT emit
- * system wakeup events. Save in main memory the device settings to be
- * used by @restore() during the subsequent resume from hibernation or by
- * the subsequent @thaw(), if the creation of the image or the restoration
- * of main memory contents from it fails.
+ * Analogous to @suspend(), but it should not enable the device to signal
+ * wakeup events or change its power state. The majority of subsystems
+ * (with the notable exception of the PCI bus type) expect the driver-level
+ * @freeze() to save the device settings in memory to be used by @restore()
+ * during the subsequent resume from hibernation.
+ * Subsystem-level @freeze() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
- * if the creation of the image fails. Also executed after a failing
+ * if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
+ * Subsystem-level @thaw() is executed for all devices after invoking
+ * subsystem-level @thaw_noirq() for all of them. It also may be executed
+ * directly after @freeze() in case of a transition error.
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
- * Quiesce the device, put it into a low power state appropriate for the
- * upcoming system state (such as PCI_D3hot), and enable wakeup events as
- * appropriate.
+ * Analogous to @suspend(), but it need not save the device's settings in
+ * memory.
+ * Subsystem-level @poweroff() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
- * memory from a hibernation image. Driver starts working again,
- * responding to hardware events and software requests. Drivers may NOT
- * make ANY assumptions about the hardware state right prior to @restore().
- * On most platforms, there are no restrictions on availability of
- * resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- * actions required for suspending the device that need interrupts to be
- * disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- * actions required for resuming the device that need interrupts to be
- * disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- * actions required for freezing the device that need interrupts to be
- * disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- * actions required for thawing the device that need interrupts to be
- * disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- * actions required for handling the device that need interrupts to be
- * disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- * actions required for restoring the operations of the device that need
- * interrupts to be disabled
+ * memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
+ * additional operations required for suspending the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @suspend_noirq() is being executed.
+ * It generally is expected that the device will be in a low-power state
+ * (appropriate for the target system sleep state) after subsystem-level
+ * @suspend_noirq() has returned successfully. If the device can generate
+ * system wakeup signals and is enabled to wake up the system, it should be
+ * configured to do so at that time. However, depending on the platform
+ * and device's subsystem, @suspend() may be allowed to put the device into
+ * the low-power state and configure it to generate wakeup signals, in
+ * which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ * operations required for resuming the device that might be racing with
+ * its driver's interrupt handler, which is guaranteed not to run while
+ * @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
+ * additional operations required for freezing the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @freeze_noirq() is being executed.
+ * The power state of the device should not be changed by either @freeze()
+ * or @freeze_noirq() and it should not be configured to signal system
+ * wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
+ * @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
- * returned. The error codes returned in that cases are only printed by the PM
+ * returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
- * executed. However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed. However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
*
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
+ *
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
- * This need not mean that the device should be put into a low power state.
+ * This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
- * power and is capable of generating run-time wake-up events, remote
- * wake-up (i.e., a hardware mechanism allowing the device to request a
- * change of its power state via a wake-up event, such as PCI PME) should
- * be enabled for it.
+ * power and is capable of generating runtime wakeup events, remote wakeup
+ * (i.e., a hardware mechanism allowing the device to request a change of
+ * its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
- * wake-up event generated by hardware or at the request of software. If
- * necessary, put the device into the full power state and restore its
+ * wakeup event generated by hardware or at the request of software. If
+ * necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- * power state if all of the necessary conditions are satisfied. Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ * low-power state if all of the necessary conditions are satisfied. Check
* these conditions and handle the device as appropriate, possibly queueing
* a suspend request for it. The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
*/
struct dev_pm_ops {
@@ -261,19 +300,6 @@ const struct dev_pm_ops name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
-/*
- * Use this for subsystems (bus types, device types, device classes) that don't
- * need any special suspend/resume handling in addition to invoking the PM
- * callbacks provided by device drivers supporting both the system sleep PM and
- * runtime PM, make the pm member point to generic_subsys_pm_ops.
- */
-#ifdef CONFIG_PM
-extern struct dev_pm_ops generic_subsys_pm_ops;
-#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
-#else
-#define GENERIC_SUBSYS_PM_OPS NULL
-#endif
-
/**
* PM_EVENT_ messages
*
@@ -482,6 +508,8 @@ struct dev_pm_info {
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
+ ktime_t suspend_time;
+ s64 max_time_suspended_ns;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
struct pm_qos_constraints *constraints;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 65633e5a2bc..a03a0ad998b 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -10,6 +10,7 @@
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/err.h>
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -21,6 +22,23 @@ enum gpd_status {
struct dev_power_governor {
bool (*power_down_ok)(struct dev_pm_domain *domain);
+ bool (*stop_ok)(struct device *dev);
+};
+
+struct gpd_dev_ops {
+ int (*start)(struct device *dev);
+ int (*stop)(struct device *dev);
+ int (*save_state)(struct device *dev);
+ int (*restore_state)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*suspend_late)(struct device *dev);
+ int (*resume_early)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*freeze_late)(struct device *dev);
+ int (*thaw_early)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ bool (*active_wakeup)(struct device *dev);
};
struct generic_pm_domain {
@@ -32,6 +50,7 @@ struct generic_pm_domain {
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
+ char *name;
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
@@ -44,10 +63,13 @@ struct generic_pm_domain {
bool suspend_power_off; /* Power status before system suspend */
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
+ s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
- int (*start_device)(struct device *dev);
- int (*stop_device)(struct device *dev);
- bool (*active_wakeup)(struct device *dev);
+ s64 power_on_latency_ns;
+ struct gpd_dev_ops dev_ops;
+ s64 break_even_ns; /* Power break even for the entire domain. */
+ s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
+ ktime_t power_off_time;
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -62,8 +84,18 @@ struct gpd_link {
struct list_head slave_node;
};
+struct gpd_timing_data {
+ s64 stop_latency_ns;
+ s64 start_latency_ns;
+ s64 save_state_latency_ns;
+ s64 restore_state_latency_ns;
+ s64 break_even_ns;
+};
+
struct generic_pm_domain_data {
struct pm_domain_data base;
+ struct gpd_dev_ops ops;
+ struct gpd_timing_data td;
bool need_restore;
};
@@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
}
#ifdef CONFIG_PM_GENERIC_DOMAINS
-extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev);
+static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+{
+ return to_gpd_data(dev->power.subsys_data->domain_data);
+}
+
+extern struct dev_power_governor simple_qos_governor;
+
+extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td);
+
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev);
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
+extern int pm_genpd_add_callbacks(struct device *dev,
+ struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td);
+extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
extern void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
+
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+
+extern bool default_stop_ok(struct device *dev);
+
+extern struct dev_power_governor pm_domain_always_on_gov;
#else
+
+static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev)
{
@@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off) {}
+static inline int pm_genpd_add_callbacks(struct device *dev,
+ struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
+static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
+{
+}
static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
return -ENOSYS;
}
+static inline bool default_stop_ok(struct device *dev)
+{
+ return false;
+}
+#define pm_domain_always_on_gov NULL
#endif
+static inline int pm_genpd_remove_callbacks(struct device *dev)
+{
+ return __pm_genpd_remove_callbacks(dev, true);
+}
+
#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
extern void pm_genpd_poweroff_unused(void);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 83b0ea302a8..e5bbcbaa6f5 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_request_active(struct pm_qos_request *req);
s32 pm_qos_read_value(struct pm_qos_constraints *c);
+s32 __dev_pm_qos_read_value(struct device *dev);
s32 dev_pm_qos_read_value(struct device *dev);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
s32 value);
@@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value);
#else
static inline int pm_qos_update_target(struct pm_qos_constraints *c,
struct plist_node *node,
@@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
{ return 0; }
+static inline s32 __dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
static inline s32 dev_pm_qos_read_value(struct device *dev)
{ return 0; }
static inline int dev_pm_qos_add_request(struct device *dev,
@@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
{
dev->power.power_state = PMSG_INVALID;
}
+static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+ { return 0; }
#endif
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d3085e72a0e..609daae7a01 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern void pm_runtime_update_max_time_suspended(struct device *dev,
+ s64 delta_ns);
static inline bool pm_children_suspended(struct device *dev)
{
@@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
+static inline void pm_runtime_update_max_time_suspended(struct device *dev,
+ s64 delta_ns) {}
+
#endif /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 79159de0e34..2110a81c5e2 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -40,12 +40,6 @@
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
-#else
-#define MEMBLOCK_INACTIVE 0x44c9e71bUL
-#endif
-
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 643b96c7a94..85c50730623 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -50,7 +50,7 @@ typedef int (write_proc_t)(struct file *file, const char __user *buffer,
struct proc_dir_entry {
unsigned int low_ino;
- mode_t mode;
+ umode_t mode;
nlink_t nlink;
uid_t uid;
gid_t gid;
@@ -106,9 +106,9 @@ extern void proc_root_init(void);
void proc_flush_task(struct task_struct *task);
-extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
+extern struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
struct proc_dir_entry *parent);
-struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
const struct file_operations *proc_fops,
void *data);
@@ -146,17 +146,17 @@ extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
-extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
+extern struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
struct proc_dir_entry *parent);
-static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct file_operations *proc_fops)
{
return proc_create_data(name, mode, parent, proc_fops, NULL);
}
static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
- mode_t mode, struct proc_dir_entry *base,
+ umode_t mode, struct proc_dir_entry *base,
read_proc_t *read_proc, void * data)
{
struct proc_dir_entry *res=create_proc_entry(name,mode,base);
@@ -168,7 +168,7 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
}
extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
- const char *name, mode_t mode, const struct file_operations *fops);
+ const char *name, umode_t mode, const struct file_operations *fops);
extern void proc_net_remove(struct net *net, const char *name);
extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
struct proc_dir_entry *parent);
@@ -185,15 +185,15 @@ static inline void proc_flush_task(struct task_struct *task)
}
static inline struct proc_dir_entry *create_proc_entry(const char *name,
- mode_t mode, struct proc_dir_entry *parent) { return NULL; }
+ umode_t mode, struct proc_dir_entry *parent) { return NULL; }
static inline struct proc_dir_entry *proc_create(const char *name,
- mode_t mode, struct proc_dir_entry *parent,
+ umode_t mode, struct proc_dir_entry *parent,
const struct file_operations *proc_fops)
{
return NULL;
}
static inline struct proc_dir_entry *proc_create_data(const char *name,
- mode_t mode, struct proc_dir_entry *parent,
+ umode_t mode, struct proc_dir_entry *parent,
const struct file_operations *proc_fops, void *data)
{
return NULL;
@@ -205,10 +205,10 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
- mode_t mode, struct proc_dir_entry *parent) { return NULL; }
+ umode_t mode, struct proc_dir_entry *parent) { return NULL; }
static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
- mode_t mode, struct proc_dir_entry *base,
+ umode_t mode, struct proc_dir_entry *base,
read_proc_t *read_proc, void * data) { return NULL; }
struct tty_driver;
@@ -253,7 +253,7 @@ extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
union proc_op {
- int (*proc_get_link)(struct inode *, struct path *);
+ int (*proc_get_link)(struct dentry *, struct path *);
int (*proc_read)(struct task_struct *task, char *page);
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3..e1461e143be 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,6 +22,9 @@
#ifndef _LINUX_PSTORE_H
#define _LINUX_PSTORE_H
+#include <linux/time.h>
+#include <linux/kmsg_dump.h>
+
/* types */
enum pstore_type_id {
PSTORE_TYPE_DMESG = 0,
@@ -35,11 +38,14 @@ struct pstore_info {
spinlock_t buf_lock; /* serialize access to 'buf' */
char *buf;
size_t bufsize;
+ struct mutex read_mutex; /* serialize open/read/close */
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
- int (*write)(enum pstore_type_id type, u64 *id,
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
+ int (*write)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
struct pstore_info *psi);
@@ -48,18 +54,12 @@ struct pstore_info {
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
-extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
#else
static inline int
pstore_register(struct pstore_info *psi)
{
return -ENODEV;
}
-static inline int
-pstore_write(enum pstore_type_id type, char *buf, size_t size)
-{
- return -ENODEV;
-}
#endif
#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 9e65d9e2066..6f6df86f1ae 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -277,7 +277,10 @@ struct mdp_superblock_1 {
*/
#define MD_FEATURE_RESHAPE_ACTIVE 4
#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
-
-#define MD_FEATURE_ALL (1|2|4|8)
+#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
+ * active device with same 'role'.
+ * 'recovery_offset' is also set.
+ */
+#define MD_FEATURE_ALL (1|2|4|8|16)
#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 2b59cc82439..53272e9860a 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -132,7 +132,7 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
PROT_READ|PROT_WRITE, \
MAP_PRIVATE|MAP_ANONYMOUS,\
0, 0))
-# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
+# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y))
static inline void cpu_relax(void)
{
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 3a8f0c9b293..5bf5500db83 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,7 +2,7 @@
#define _LINUX_RAMFS_H
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
- int mode, dev_t dev);
+ umode_t mode, dev_t dev);
extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2cf4226ade7..81c04f4348e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+ struct rcu_head *rhp);
#else
static inline void rcutorture_record_test_transition(void)
{
@@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void)
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
+#ifdef CONFIG_RCU_TRACE
+extern void do_trace_rcu_torture_read(char *rcutorturename,
+ struct rcu_head *rhp);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
#endif
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
@@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
-{
-}
-
-static inline void rcu_exit_nohz(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
+extern void rcu_idle_enter(void);
+extern void rcu_idle_exit(void);
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
/*
* Infrastructure to implement the synchronize_() primitives in
@@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#ifdef CONFIG_PROVE_RCU
+extern int rcu_is_cpu_idle(void);
+#else /* !CONFIG_PROVE_RCU */
+static inline int rcu_is_cpu_idle(void)
+{
+ return 0;
+}
+#endif /* else !CONFIG_PROVE_RCU */
-extern struct lockdep_map rcu_bh_lock_map;
-# define rcu_read_acquire_bh() \
- lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
+static inline void rcu_lock_acquire(struct lockdep_map *map)
+{
+ WARN_ON_ONCE(rcu_is_cpu_idle());
+ lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
+}
-extern struct lockdep_map rcu_sched_lock_map;
-# define rcu_read_acquire_sched() \
- lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release_sched() \
- lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
+static inline void rcu_lock_release(struct lockdep_map *map)
+{
+ WARN_ON_ONCE(rcu_is_cpu_idle());
+ lock_release(map, 1, _THIS_IP_);
+}
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
extern int debug_lockdep_rcu_enabled(void);
/**
@@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void);
*
* Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
+ *
+ * Note that rcu_read_lock() and the matching rcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock() in process context if the matching rcu_read_lock()
+ * was invoked from within an irq handler.
*/
static inline int rcu_read_lock_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
return lock_is_held(&rcu_lock_map);
}
@@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void);
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes. This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
*/
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
@@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-# define rcu_read_acquire_bh() do { } while (0)
-# define rcu_read_release_bh() do { } while (0)
-# define rcu_read_acquire_sched() do { } while (0)
-# define rcu_read_release_sched() do { } while (0)
+# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
{
@@ -637,7 +658,7 @@ static inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
- rcu_read_acquire();
+ rcu_lock_acquire(&rcu_lock_map);
}
/*
@@ -657,7 +678,7 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_read_release();
+ rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
}
@@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void)
* critical sections in interrupt context can use just rcu_read_lock(),
* though this should at least be commented to avoid confusing people
* reading the code.
+ *
+ * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
+ * was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
__acquire(RCU_BH);
- rcu_read_acquire_bh();
+ rcu_lock_acquire(&rcu_bh_lock_map);
}
/*
@@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_read_release_bh();
+ rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
}
@@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void)
* are being done using call_rcu_sched() or synchronize_rcu_sched().
* Read-side critical sections can also be introduced by anything that
* disables preemption, including local_irq_disable() and friends.
+ *
+ * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_sched() from process context if the matching
+ * rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
{
preempt_disable();
__acquire(RCU_SCHED);
- rcu_read_acquire_sched();
+ rcu_lock_acquire(&rcu_sched_lock_map);
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_read_release_sched();
+ rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
}
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 690276a642c..eb93921cdd3 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -23,9 +23,8 @@ struct spi_device;
/* An enum of all the supported cache types */
enum regcache_type {
REGCACHE_NONE,
- REGCACHE_INDEXED,
REGCACHE_RBTREE,
- REGCACHE_LZO
+ REGCACHE_COMPRESSED
};
/**
@@ -83,7 +82,7 @@ struct regmap_config {
bool (*precious_reg)(struct device *dev, unsigned int reg);
unsigned int max_register;
- struct reg_default *reg_defaults;
+ const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
enum regcache_type cache_type;
const void *reg_defaults_raw;
@@ -129,6 +128,8 @@ struct regmap *regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config);
void regmap_exit(struct regmap *map);
+int regmap_reinit_cache(struct regmap *map,
+ const struct regmap_config *config);
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
@@ -139,9 +140,61 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
int regcache_sync(struct regmap *map);
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
+void regcache_mark_dirty(struct regmap *map);
+
+/**
+ * Description of an IRQ for the generic regmap irq_chip.
+ *
+ * @reg_offset: Offset of the status/mask register within the bank
+ * @mask: Mask used to flag/control the register.
+ */
+struct regmap_irq {
+ unsigned int reg_offset;
+ unsigned int mask;
+};
+
+/**
+ * Description of a generic regmap irq_chip. This is not intended to
+ * handle every possible interrupt controller, but it should handle a
+ * substantial proportion of those that are found in the wild.
+ *
+ * @name: Descriptive name for IRQ controller.
+ *
+ * @status_base: Base status register address.
+ * @mask_base: Base mask register address.
+ * @ack_base: Base ack address. If zero then the chip is clear on read.
+ *
+ * @num_regs: Number of registers in each control bank.
+ * @irqs: Descriptors for individual IRQs. Interrupt numbers are
+ * assigned based on the index in the array of the interrupt.
+ * @num_irqs: Number of descriptors.
+ */
+struct regmap_irq_chip {
+ const char *name;
+
+ unsigned int status_base;
+ unsigned int mask_base;
+ unsigned int ack_base;
+
+ int num_regs;
+
+ const struct regmap_irq *irqs;
+ int num_irqs;
+};
+
+struct regmap_irq_chip_data;
+
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f7756d146c6..f2698a0edfc 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -149,6 +149,8 @@ int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
+int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers);
void regulator_bulk_free(int num_consumers,
struct regulator_bulk_data *consumers);
@@ -212,6 +214,11 @@ static inline int regulator_disable(struct regulator *regulator)
return 0;
}
+static inline int regulator_force_disable(struct regulator *regulator)
+{
+ return 0;
+}
+
static inline int regulator_disable_deferred(struct regulator *regulator,
int ms)
{
@@ -242,6 +249,12 @@ static inline int regulator_bulk_disable(int num_consumers,
return 0;
}
+static inline int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
static inline void regulator_bulk_free(int num_consumers,
struct regulator_bulk_data *consumers)
{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 52c89ae32f6..4214b9a9d1c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -154,6 +154,7 @@ enum regulator_type {
* this type.
*
* @name: Identifying name for the regulator.
+ * @supply_name: Identifying the regulator supply
* @id: Numerical identifier for the regulator.
* @n_voltages: Number of selectors available for ops.list_voltage().
* @ops: Regulator operations table.
@@ -163,6 +164,7 @@ enum regulator_type {
*/
struct regulator_desc {
const char *name;
+ const char *supply_name;
int id;
unsigned n_voltages;
struct regulator_ops *ops;
@@ -212,7 +214,7 @@ struct regulator_dev {
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
struct device *dev, const struct regulator_init_data *init_data,
- void *driver_data);
+ void *driver_data, struct device_node *of_node);
void regulator_unregister(struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
new file mode 100644
index 00000000000..769704f296e
--- /dev/null
+++ b/include/linux/regulator/of_regulator.h
@@ -0,0 +1,22 @@
+/*
+ * OpenFirmware regulator support routines
+ *
+ */
+
+#ifndef __LINUX_OF_REG_H
+#define __LINUX_OF_REG_H
+
+#if defined(CONFIG_OF)
+extern struct regulator_init_data
+ *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node);
+#else
+static inline struct regulator_init_data
+ *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_REG_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 96d465f8d3e..2213ddcce20 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1759,13 +1759,14 @@ struct reiserfs_journal_header {
REISERFS_QUOTA_TRANS_BLOCKS(sb)))
#ifdef CONFIG_QUOTA
+#define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA))
/* We need to update data and inode (atime) */
-#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0)
+#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0)
/* 1 balancing, 1 bitmap, 1 data per write + stat data update */
-#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \
+#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
(DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0)
/* same as with INIT */
-#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \
+#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \
(DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0)
#else
#define REISERFS_QUOTA_TRANS_BLOCKS(s) 0
@@ -2056,7 +2057,7 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
struct reiserfs_security_handle;
int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
- struct inode *dir, int mode,
+ struct inode *dir, umode_t mode,
const char *symname, loff_t i_size,
struct dentry *dentry, struct inode *inode,
struct reiserfs_security_handle *security);
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 52c83b6a758..8c9e85c64b4 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -417,6 +417,7 @@ struct reiserfs_sb_info {
char *s_qf_names[MAXQUOTAS];
int s_jquota_fmt;
#endif
+ char *s_jdev; /* Stored jdev for mount option showing */
#ifdef CONFIG_REISERFS_CHECK
struct tree_balance *cur_tb; /*
@@ -482,7 +483,8 @@ enum reiserfs_mount_options {
REISERFS_ERROR_RO,
REISERFS_ERROR_CONTINUE,
- REISERFS_QUOTA, /* Some quota option specified */
+ REISERFS_USRQUOTA, /* User quota option specified */
+ REISERFS_GRPQUOTA, /* Group quota option specified */
REISERFS_TEST1,
REISERFS_TEST2,
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 14a86bc7102..a822fd71fd6 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -144,7 +144,7 @@ struct rchan_callbacks
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
- int mode,
+ umode_t mode,
struct rchan_buf *buf,
int *is_global);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 2148b122779..1afb9954bbf 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -120,6 +120,7 @@ void anon_vma_init(void); /* create anon_vma_cachep */
int anon_vma_prepare(struct vm_area_struct *);
void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+void anon_vma_moveto_tail(struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c4f3e9b9bc..21cd0303af5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FREEZING) == 0)
+ (task->flags & PF_FROZEN) == 0)
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
+extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void select_nohz_load_balancer(int stop_tick) { }
+static inline void set_cpu_sd_state_idle(void) { }
#endif
/*
@@ -483,8 +485,8 @@ struct task_cputime {
#define INIT_CPUTIME \
(struct task_cputime) { \
- .utime = cputime_zero, \
- .stime = cputime_zero, \
+ .utime = 0, \
+ .stime = 0, \
.sum_exec_runtime = 0, \
}
@@ -635,13 +637,15 @@ struct signal_struct {
#endif
#ifdef CONFIG_CGROUPS
/*
- * The threadgroup_fork_lock prevents threads from forking with
- * CLONE_THREAD while held for writing. Use this for fork-sensitive
- * threadgroup-wide operations. It's taken for reading in fork.c in
- * copy_process().
- * Currently only needed write-side by cgroups.
+ * group_rwsem prevents new tasks from entering the threadgroup and
+ * member tasks from exiting,a more specifically, setting of
+ * PF_EXITING. fork and exit paths are protected with this rwsem
+ * using threadgroup_change_begin/end(). Users which require
+ * threadgroup to remain stable should use threadgroup_[un]lock()
+ * which also takes care of exec path. Currently, cgroup is the
+ * only user.
*/
- struct rw_semaphore threadgroup_fork_lock;
+ struct rw_semaphore group_rwsem;
#endif
int oom_adj; /* OOM kill score adjustment (bit shift) */
@@ -901,6 +905,10 @@ struct sched_group_power {
* single CPU.
*/
unsigned int power, power_orig;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
};
struct sched_group {
@@ -925,6 +933,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
return to_cpumask(sg->cpumask);
}
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -1315,8 +1332,8 @@ struct task_struct {
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
- struct task_struct *real_parent; /* real parent process */
- struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
+ struct task_struct __rcu *real_parent; /* real parent process */
+ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
@@ -1527,6 +1544,7 @@ struct task_struct {
*/
int nr_dirtied;
int nr_dirtied_pause;
+ unsigned long dirty_paused_when; /* start of a write-and-pause period */
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
@@ -1772,7 +1790,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
-#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1788,7 +1805,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
-#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -2070,6 +2086,14 @@ extern int sched_setscheduler(struct task_struct *, int,
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
extern struct task_struct *idle_task(int cpu);
+/**
+ * is_idle_task - is the specified task an idle task?
+ * @tsk: the task in question.
+ */
+static inline bool is_idle_task(struct task_struct *p)
+{
+ return p->pid == 0;
+}
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
@@ -2373,29 +2397,62 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
-/* See the declaration of threadgroup_fork_lock in signal_struct. */
#ifdef CONFIG_CGROUPS
-static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
+static inline void threadgroup_change_begin(struct task_struct *tsk)
{
- down_read(&tsk->signal->threadgroup_fork_lock);
+ down_read(&tsk->signal->group_rwsem);
}
-static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
+static inline void threadgroup_change_end(struct task_struct *tsk)
{
- up_read(&tsk->signal->threadgroup_fork_lock);
+ up_read(&tsk->signal->group_rwsem);
}
-static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
+
+/**
+ * threadgroup_lock - lock threadgroup
+ * @tsk: member task of the threadgroup to lock
+ *
+ * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
+ * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
+ * perform exec. This is useful for cases where the threadgroup needs to
+ * stay stable across blockable operations.
+ *
+ * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
+ * synchronization. While held, no new task will be added to threadgroup
+ * and no existing live task will have its PF_EXITING set.
+ *
+ * During exec, a task goes and puts its thread group through unusual
+ * changes. After de-threading, exclusive access is assumed to resources
+ * which are usually shared by tasks in the same group - e.g. sighand may
+ * be replaced with a new one. Also, the exec'ing task takes over group
+ * leader role including its pid. Exclude these changes while locked by
+ * grabbing cred_guard_mutex which is used to synchronize exec path.
+ */
+static inline void threadgroup_lock(struct task_struct *tsk)
{
- down_write(&tsk->signal->threadgroup_fork_lock);
+ /*
+ * exec uses exit for de-threading nesting group_rwsem inside
+ * cred_guard_mutex. Grab cred_guard_mutex first.
+ */
+ mutex_lock(&tsk->signal->cred_guard_mutex);
+ down_write(&tsk->signal->group_rwsem);
}
-static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
+
+/**
+ * threadgroup_unlock - unlock threadgroup
+ * @tsk: member task of the threadgroup to unlock
+ *
+ * Reverse threadgroup_lock().
+ */
+static inline void threadgroup_unlock(struct task_struct *tsk)
{
- up_write(&tsk->signal->threadgroup_fork_lock);
+ up_write(&tsk->signal->group_rwsem);
+ mutex_unlock(&tsk->signal->cred_guard_mutex);
}
#else
-static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
-static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
-static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
-static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
+static inline void threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void threadgroup_change_end(struct task_struct *tsk) {}
+static inline void threadgroup_lock(struct task_struct *tsk) {}
+static inline void threadgroup_unlock(struct task_struct *tsk) {}
#endif
#ifndef __HAVE_THREAD_FUNCTIONS
diff --git a/include/linux/security.h b/include/linux/security.h
index 19d8e04e168..98112cf9388 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -186,7 +186,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Security module identifier.
*
* @name:
- * A string that acts as a unique identifeir for the LSM with max number
+ * A string that acts as a unique identifier for the LSM with max number
* of characters = SECURITY_NAME_MAX.
*
* Security hooks for program execution operations.
@@ -275,7 +275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @copy copied data which will be passed to the security module.
* Returns 0 if the copy was successful.
* @sb_remount:
- * Extracts security system specifc mount options and verifys no changes
+ * Extracts security system specific mount options and verifies no changes
* are being made to those options.
* @sb superblock being remounted
* @data contains the filesystem-specific data.
@@ -380,15 +380,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return 0 if permission is granted.
* @inode_mkdir:
* Check permissions to create a new directory in the existing directory
- * associated with inode strcture @dir.
- * @dir containst the inode structure of parent of the directory to be created.
+ * associated with inode structure @dir.
+ * @dir contains the inode structure of parent of the directory to be created.
* @dentry contains the dentry structure of new directory.
* @mode contains the mode of new directory.
* Return 0 if permission is granted.
* @path_mkdir:
* Check permissions to create a new directory in the existing directory
- * associated with path strcture @path.
- * @dir containst the path structure of parent of the directory
+ * associated with path structure @path.
+ * @dir contains the path structure of parent of the directory
* to be created.
* @dentry contains the dentry structure of new directory.
* @mode contains the mode of new directory.
@@ -578,7 +578,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @file contains the file structure.
* @cmd contains the operation to perform.
* @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg can
+ * Check permission for an ioctl operation on @file. Note that @arg
* sometimes represents a user space pointer; in other cases, it may be a
* simple integer value. When @arg represents a user space pointer, it
* should never be used by the security module.
@@ -606,7 +606,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return 0 if permission is granted.
* @file_fcntl:
* Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg can sometimes
+ * from being performed on the file @file. Note that @arg sometimes
* represents a user space pointer; in other cases, it may be a simple
* integer value. When @arg represents a user space pointer, it should
* never be used by the security module.
@@ -793,7 +793,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* information can be saved using the eff_cap field of the
* netlink_skb_parms structure. Also may be used to provide fine
* grained control over message transmission.
- * @sk associated sock of task sending the message.,
+ * @sk associated sock of task sending the message.
* @skb contains the sk_buff structure for the netlink message.
* Return 0 if the information was successfully saved and message
* is allowed to be transmitted.
@@ -1080,9 +1080,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* should free it.
* @key points to the key to be queried.
* @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
+ * resulting string (if no label or an error occurs).
* Return the length of the string (including terminating NUL) or -ve if
- * an error.
+ * an error.
* May also return 0 (and a NULL buffer pointer) if there is no label.
*
* Security hooks affecting all System V IPC operations.
@@ -1268,7 +1268,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* credentials.
* @tsk contains the task_struct for the process.
* @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
+ * @ns contains the user namespace we want the capability in
* @cap contains the capability <include/linux/capability.h>.
* @audit: Whether to write an audit message or not
* Return 0 if the capability is granted for @tsk.
@@ -1370,7 +1370,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @ctxlen contains the length of @ctx.
*
* @inode_getsecctx:
- * Returns a string containing all relavent security context information
+ * Returns a string containing all relevant security context information
*
* @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
@@ -1424,9 +1424,9 @@ struct security_operations {
#ifdef CONFIG_SECURITY_PATH
int (*path_unlink) (struct path *dir, struct dentry *dentry);
- int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode);
+ int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
int (*path_rmdir) (struct path *dir, struct dentry *dentry);
- int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode,
+ int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
int (*path_truncate) (struct path *path);
int (*path_symlink) (struct path *dir, struct dentry *dentry,
@@ -1435,8 +1435,7 @@ struct security_operations {
struct dentry *new_dentry);
int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry);
- int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode);
+ int (*path_chmod) (struct path *path, umode_t mode);
int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
int (*path_chroot) (struct path *path);
#endif
@@ -1447,16 +1446,16 @@ struct security_operations {
const struct qstr *qstr, char **name,
void **value, size_t *len);
int (*inode_create) (struct inode *dir,
- struct dentry *dentry, int mode);
+ struct dentry *dentry, umode_t mode);
int (*inode_link) (struct dentry *old_dentry,
struct inode *dir, struct dentry *new_dentry);
int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
int (*inode_symlink) (struct inode *dir,
struct dentry *dentry, const char *old_name);
- int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode);
+ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev);
+ umode_t mode, dev_t dev);
int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
@@ -1716,15 +1715,15 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len);
-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
int security_inode_symlink(struct inode *dir, struct dentry *dentry,
const char *old_name);
-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev);
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int security_inode_readlink(struct dentry *dentry);
@@ -2056,12 +2055,12 @@ static inline int security_old_inode_init_security(struct inode *inode,
char **name, void **value,
size_t *len)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline int security_inode_create(struct inode *dir,
struct dentry *dentry,
- int mode)
+ umode_t mode)
{
return 0;
}
@@ -2855,9 +2854,9 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
#ifdef CONFIG_SECURITY_PATH
int security_path_unlink(struct path *dir, struct dentry *dentry);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode);
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
int security_path_rmdir(struct path *dir, struct dentry *dentry);
-int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
int security_path_truncate(struct path *path);
int security_path_symlink(struct path *dir, struct dentry *dentry,
@@ -2866,8 +2865,7 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry);
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry);
-int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode);
+int security_path_chmod(struct path *path, umode_t mode);
int security_path_chown(struct path *path, uid_t uid, gid_t gid);
int security_path_chroot(struct path *path);
#else /* CONFIG_SECURITY_PATH */
@@ -2877,7 +2875,7 @@ static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
}
static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
- int mode)
+ umode_t mode)
{
return 0;
}
@@ -2888,7 +2886,7 @@ static inline int security_path_rmdir(struct path *dir, struct dentry *dentry)
}
static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
- int mode, unsigned int dev)
+ umode_t mode, unsigned int dev)
{
return 0;
}
@@ -2919,9 +2917,7 @@ static inline int security_path_rename(struct path *old_dir,
return 0;
}
-static inline int security_path_chmod(struct dentry *dentry,
- struct vfsmount *mnt,
- mode_t mode)
+static inline int security_path_chmod(struct path *path, umode_t mode)
{
return 0;
}
@@ -3010,7 +3006,7 @@ static inline void security_audit_rule_free(void *lsmrule)
#ifdef CONFIG_SECURITYFS
-extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
+extern struct dentry *securityfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
@@ -3025,7 +3021,7 @@ static inline struct dentry *securityfs_create_dir(const char *name,
}
static inline struct dentry *securityfs_create_file(const char *name,
- mode_t mode,
+ umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 0b69a468421..44f1514b00b 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -74,7 +74,7 @@ static inline void seq_commit(struct seq_file *m, int num)
}
}
-char *mangle_path(char *s, char *p, char *esc);
+char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
@@ -86,10 +86,10 @@ int seq_write(struct seq_file *seq, const void *data, size_t len);
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
-int seq_path(struct seq_file *, struct path *, char *);
-int seq_dentry(struct seq_file *, struct dentry *, char *);
-int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
- char *esc);
+int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_dentry(struct seq_file *, struct dentry *, const char *);
+int seq_path_root(struct seq_file *m, const struct path *path,
+ const struct path *root, const char *esc);
int seq_bitmap(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 1f05bbeac01..8f012f8ac8e 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -66,6 +66,7 @@ enum {
* dependent on the 8250 driver.
*/
struct uart_port;
+struct uart_8250_port;
int serial8250_register_port(struct uart_port *);
void serial8250_unregister_port(int line);
@@ -81,7 +82,11 @@ extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
+extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
+unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
+void serial8250_tx_chars(struct uart_8250_port *up);
+unsigned int serial8250_modem_status(struct uart_8250_port *up);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index eadf33d0abb..b67305e3ad5 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -351,6 +351,7 @@ struct uart_port {
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
+#define UPF_IIR_ONCE ((__force upf_t) (1 << 26))
/* The exact UART type is known and should not be probed. */
#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
@@ -483,10 +484,19 @@ static inline int uart_tx_stopped(struct uart_port *port)
/*
* The following are helper functions for the low level drivers.
*/
+
+extern void uart_handle_dcd_change(struct uart_port *uport,
+ unsigned int status);
+extern void uart_handle_cts_change(struct uart_port *uport,
+ unsigned int status);
+
+extern void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag);
+
+#ifdef SUPPORT_SYSRQ
static inline int
uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
{
-#ifdef SUPPORT_SYSRQ
if (port->sysrq) {
if (ch && time_before(jiffies, port->sysrq)) {
handle_sysrq(ch);
@@ -495,11 +505,10 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
}
port->sysrq = 0;
}
-#endif
return 0;
}
-#ifndef SUPPORT_SYSRQ
-#define uart_handle_sysrq_char(port,ch) uart_handle_sysrq_char(port, 0)
+#else
+#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
#endif
/*
@@ -522,89 +531,6 @@ static inline int uart_handle_break(struct uart_port *port)
return 0;
}
-/**
- * uart_handle_dcd_change - handle a change of carrier detect state
- * @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
- */
-static inline void
-uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
-{
- struct uart_state *state = uport->state;
- struct tty_port *port = &state->port;
- struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
- struct pps_event_time ts;
-
- if (ld && ld->ops->dcd_change)
- pps_get_ts(&ts);
-
- uport->icount.dcd++;
-#ifdef CONFIG_HARD_PPS
- if ((uport->flags & UPF_HARDPPS_CD) && status)
- hardpps();
-#endif
-
- if (port->flags & ASYNC_CHECK_CD) {
- if (status)
- wake_up_interruptible(&port->open_wait);
- else if (port->tty)
- tty_hangup(port->tty);
- }
-
- if (ld && ld->ops->dcd_change)
- ld->ops->dcd_change(port->tty, status, &ts);
- if (ld)
- tty_ldisc_deref(ld);
-}
-
-/**
- * uart_handle_cts_change - handle a change of clear-to-send state
- * @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
- */
-static inline void
-uart_handle_cts_change(struct uart_port *uport, unsigned int status)
-{
- struct tty_port *port = &uport->state->port;
- struct tty_struct *tty = port->tty;
-
- uport->icount.cts++;
-
- if (port->flags & ASYNC_CTS_FLOW) {
- if (tty->hw_stopped) {
- if (status) {
- tty->hw_stopped = 0;
- uport->ops->start_tx(uport);
- uart_write_wakeup(uport);
- }
- } else {
- if (!status) {
- tty->hw_stopped = 1;
- uport->ops->stop_tx(uport);
- }
- }
- }
-}
-
-#include <linux/tty_flip.h>
-
-static inline void
-uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag)
-{
- struct tty_struct *tty = port->state->port.tty;
-
- if ((status & port->ignore_status_mask & ~overrun) == 0)
- tty_insert_flip_char(tty, ch, flag);
-
- /*
- * Overrun is special. Since it's reported immediately,
- * it doesn't affect the current character.
- */
- if (status & ~port->ignore_status_mask & overrun)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-}
-
/*
* UART_ENABLE_MS - determine if port should enable modem status irqs
*/
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 5812fefbced..b160645f559 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -95,6 +95,7 @@ struct intc_desc {
unsigned int num_resources;
intc_enum force_enable;
intc_enum force_disable;
+ bool skip_syscore_suspend;
struct intc_hw_desc hw;
};
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9291ac3cc62..e4c711c6f32 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -30,7 +30,7 @@ struct shmem_sb_info {
spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
uid_t uid; /* Mount uid for root directory */
gid_t gid; /* Mount gid for root directory */
- mode_t mode; /* Mount mode for root directory */
+ umode_t mode; /* Mount mode for root directory */
struct mempolicy *mpol; /* default memory policy for mappings */
};
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a..07ceb97d53f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
- long nr; /* objs pending delete */
+ atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
diff --git a/include/linux/sigma.h b/include/linux/sigma.h
index e2accb3164d..d0de882c0d9 100644
--- a/include/linux/sigma.h
+++ b/include/linux/sigma.h
@@ -24,7 +24,7 @@ struct sigma_firmware {
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
- u32 crc;
+ __le32 crc;
};
enum {
@@ -40,19 +40,14 @@ enum {
struct sigma_action {
u8 instr;
u8 len_hi;
- u16 len;
- u16 addr;
+ __le16 len;
+ __be16 addr;
unsigned char payload[];
};
static inline u32 sigma_action_len(struct sigma_action *sa)
{
- return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
- return sizeof(*sa) + payload_len + (payload_len % 2);
+ return (sa->len_hi << 16) | le16_to_cpu(sa->len);
}
extern int process_sigma_firmware(struct i2c_client *client, const char *name);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a822300a253..7987ce74874 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -254,6 +254,7 @@ extern void set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
+extern void block_sigmask(struct k_sigaction *ka, int signr);
extern void exit_signals(struct task_struct *tsk);
extern struct kmem_cache *sighand_cachep;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fe864885c1e..50db9b04a55 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -30,6 +30,7 @@
#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
@@ -87,7 +88,6 @@
* at device setup time.
* NETIF_F_HW_CSUM - it is clever device, it is able to checksum
* everything.
- * NETIF_F_NO_CSUM - loopback or reliable single hop media.
* NETIF_F_IP_CSUM - device is dumb. It is able to csum only
* TCP/UDP over IPv4. Sigh. Vendors like this
* way by an unknown reason. Though, see comment above
@@ -128,13 +128,17 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list. Since
- * GRO uses frags we allocate at least 16 regardless of page size.
+/* To allow 64K frame to be packed as single skb without frag_list we
+ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
+ * buffers which do not start on a page boundary.
+ *
+ * Since GRO uses frags we allocate at least 16 regardless of page
+ * size.
*/
-#if (65536/PAGE_SIZE + 2) < 16
+#if (65536/PAGE_SIZE + 1) < 16
#define MAX_SKB_FRAGS 16UL
#else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
typedef struct skb_frag_struct skb_frag_t;
@@ -218,6 +222,9 @@ enum {
/* device driver supports TX zero-copy buffers */
SKBTX_DEV_ZEROCOPY = 1 << 4,
+
+ /* generate wifi status information (where possible) */
+ SKBTX_WIFI_STATUS = 1 << 5,
};
/*
@@ -235,15 +242,15 @@ struct ubuf_info {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- unsigned short nr_frags;
+ unsigned char nr_frags;
+ __u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
unsigned short gso_type;
- __be32 ip6_frag_id;
- __u8 tx_flags;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
+ __be32 ip6_frag_id;
/*
* Warning : all fields before dataref are cleared in __alloc_skb()
@@ -352,6 +359,8 @@ typedef unsigned char *sk_buff_data_t;
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
* ports.
+ * @wifi_acked_valid: wifi_acked was set
+ * @wifi_acked: whether frame was acked on wifi or not
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
@@ -445,10 +454,11 @@ struct sk_buff {
#endif
__u8 ooo_okay:1;
__u8 l4_rxhash:1;
+ __u8 wifi_acked_valid:1;
+ __u8 wifi_acked:1;
+ /* 10/12 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
- /* 0/13 bit hole */
-
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
#endif
@@ -540,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone, int node);
+extern struct sk_buff *build_skb(void *data);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
@@ -561,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
gfp_t priority);
-extern struct sk_buff *pskb_copy(struct sk_buff *skb,
- gfp_t gfp_mask);
+extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
+ int headroom, gfp_t gfp_mask);
+
extern int pskb_expand_head(struct sk_buff *skb,
int nhead, int ntail,
gfp_t gfp_mask);
@@ -1662,38 +1674,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
}
/**
- * __netdev_alloc_page - allocate a page for ps-rx on a specific device
- * @dev: network device to receive on
- * @gfp_mask: alloc_pages_node mask
- *
- * Allocate a new page. dev currently unused.
- *
- * %NULL is returned if there is no free memory.
- */
-static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
- return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
-}
-
-/**
- * netdev_alloc_page - allocate a page for ps-rx on a specific device
- * @dev: network device to receive on
- *
- * Allocate a new page. dev currently unused.
- *
- * %NULL is returned if there is no free memory.
- */
-static inline struct page *netdev_alloc_page(struct net_device *dev)
-{
- return __netdev_alloc_page(dev, GFP_ATOMIC);
-}
-
-static inline void netdev_free_page(struct net_device *dev, struct page *page)
-{
- __free_page(page);
-}
-
-/**
* skb_frag_page - retrieve the page refered to by a paged fragment
* @frag: the paged fragment
*
@@ -1824,6 +1804,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
frag->page_offset + offset, size, dir);
}
+static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
+}
+
/**
* skb_clone_writable - is the header of a clone writable
* @skb: buffer to check
@@ -2105,7 +2091,8 @@ extern void skb_split(struct sk_buff *skb,
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb,
+ netdev_features_t features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -2263,6 +2250,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
sw_tx_timestamp(skb);
}
+/**
+ * skb_complete_wifi_ack - deliver skb with wifi status
+ *
+ * @skb: the original outgoing packet
+ * @acked: ack status
+ *
+ */
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
+
extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index d00e0bacda9..fbd1117fdfd 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -15,8 +15,6 @@
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
-#include <trace/events/kmem.h>
-
/*
* struct kmem_cache
*
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
new file mode 100644
index 00000000000..ce718cbce43
--- /dev/null
+++ b/include/linux/smscphy.h
@@ -0,0 +1,25 @@
+#ifndef __LINUX_SMSCPHY_H__
+#define __LINUX_SMSCPHY_H__
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */
+
+#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644
index 00000000000..251729a4788
--- /dev/null
+++ b/include/linux/sock_diag.h
@@ -0,0 +1,48 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+
+#include <linux/types.h>
+
+#define SOCK_DIAG_BY_FAMILY 20
+
+struct sock_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+};
+
+enum {
+ SK_MEMINFO_RMEM_ALLOC,
+ SK_MEMINFO_RCVBUF,
+ SK_MEMINFO_WMEM_ALLOC,
+ SK_MEMINFO_SNDBUF,
+ SK_MEMINFO_FWD_ALLOC,
+ SK_MEMINFO_WMEM_QUEUED,
+ SK_MEMINFO_OPTMEM,
+
+ SK_MEMINFO_VARS,
+};
+
+#ifdef __KERNEL__
+struct sk_buff;
+struct nlmsghdr;
+struct sock;
+
+struct sock_diag_handler {
+ __u8 family;
+ int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(struct sock_diag_handler *h);
+void sock_diag_unregister(struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie);
+void sock_diag_save_cookie(void *sk, __u32 *cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+
+extern struct sock *sock_diag_nlsk;
+#endif /* KERNEL */
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index bb4f5fbbbd8..176fce9cc6b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -200,6 +200,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
+/**
+ * module_spi_driver() - Helper macro for registering a SPI driver
+ * @__spi_driver: spi_driver struct
+ *
+ * Helper macro for SPI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_spi_driver(__spi_driver) \
+ module_driver(__spi_driver, spi_register_driver, \
+ spi_unregister_driver)
/**
* struct spi_master - interface to SPI master controller
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 58971e891f4..e1b005918bb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -28,6 +28,7 @@
#define _LINUX_SRCU_H
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
struct srcu_struct_array {
int c[2];
@@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
__init_srcu_struct((sp), #sp, &__srcu_key); \
})
-# define srcu_read_acquire(sp) \
- lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define srcu_read_release(sp) \
- lock_release(&(sp)->dep_map, 1, _THIS_IP_)
-
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *sp);
-# define srcu_read_acquire(sp) do { } while (0)
-# define srcu_read_release(sp) do { } while (0)
-
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
void cleanup_srcu_struct(struct srcu_struct *sp);
@@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp);
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an SRCU read-side critical section unless it can
* prove otherwise.
+ *
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of view
+ * (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
+ * the CPU did an srcu_read_lock(). The reason for this is that RCU
+ * ignores CPUs that are in such a section, considering these as in
+ * extended quiescent state, so such a CPU is effectively never in an
+ * RCU read-side critical section regardless of what RCU primitives it
+ * invokes. This state of affairs is required --- we need to keep an
+ * RCU-free window in idle where the CPU may possibly enter into low
+ * power mode. This way we can notice an extended quiescent state to
+ * other CPUs that started a grace period. Otherwise we would delay any
+ * grace period as long as we run in the idle task.
*/
static inline int srcu_read_lock_held(struct srcu_struct *sp)
{
- if (debug_locks)
- return lock_is_held(&sp->dep_map);
- return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
+
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+
+ return lock_is_held(&sp->dep_map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
* one way to indirectly wait on an SRCU grace period is to acquire
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
+ *
+ * Note that srcu_read_lock() and the matching srcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
+ * was invoked in process context.
*/
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
{
int retval = __srcu_read_lock(sp);
- srcu_read_acquire(sp);
+ rcu_lock_acquire(&(sp)->dep_map);
return retval;
}
@@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__releases(sp)
{
- srcu_read_release(sp);
+ rcu_lock_release(&(sp)->dep_map);
+ __srcu_read_unlock(sp, idx);
+}
+
+/**
+ * srcu_read_lock_raw - register a new reader for an SRCU-protected structure.
+ * @sp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section. Similar to srcu_read_lock(),
+ * but avoids the RCU-lockdep checking. This means that it is legal to
+ * use srcu_read_lock_raw() in one context, for example, in an exception
+ * handler, and then have the matching srcu_read_unlock_raw() in another
+ * context, for example in the task that took the exception.
+ *
+ * However, the entire SRCU read-side critical section must reside within a
+ * single task. For example, beware of using srcu_read_lock_raw() in
+ * a device interrupt handler and srcu_read_unlock() in the interrupted
+ * task: This will not work if interrupts are threaded.
+ */
+static inline int srcu_read_lock_raw(struct srcu_struct *sp)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __srcu_read_lock(sp);
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure.
+ * @sp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock_raw().
+ *
+ * Exit an SRCU read-side critical section without lockdep-RCU checking.
+ * See srcu_read_lock_raw() for more details.
+ */
+static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
__srcu_read_unlock(sp, idx);
+ local_irq_restore(flags);
}
#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 061e560251b..dcf35b0f303 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -94,6 +94,15 @@ struct ssb_sprom {
} ghz5; /* 5GHz band */
} antenna_gain;
+ struct {
+ struct {
+ u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+ } ghz2;
+ struct {
+ u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+ } ghz5;
+ } fem;
+
/* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */
};
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 98941203a27..c814ae6eeb2 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -432,6 +432,23 @@
#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
#define SSB_SPROM8_RXPO5G_SHIFT 8
+#define SSB_SPROM8_FEM2G 0x00AE
+#define SSB_SPROM8_FEM5G 0x00B0
+#define SSB_SROM8_FEM_TSSIPOS 0x0001
+#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0
+#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006
+#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SSB_SROM8_FEM_PDET_RANGE 0x00F8
+#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SSB_SROM8_FEM_TR_ISO 0x0700
+#define SSB_SROM8_FEM_TR_ISO_SHIFT 8
+#define SSB_SROM8_FEM_ANTSWLUT 0xF800
+#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SSB_SPROM8_THERMAL 0x00B2
+#define SSB_SPROM8_MPWR_RAWTS 0x00B4
+#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6
+#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8
+#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA
#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index febc4dbec2c..7874a8a5663 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -26,6 +26,7 @@ struct auth_cred {
uid_t uid;
gid_t gid;
struct group_info *group_info;
+ const char *principal;
unsigned char machine_cred : 1;
};
@@ -127,7 +128,7 @@ void rpc_destroy_generic_auth(void);
void rpc_destroy_authunix(void);
struct rpc_cred * rpc_lookup_cred(void);
-struct rpc_cred * rpc_lookup_machine_cred(void);
+struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 8eee9dbbfe7..f1cfd4c85cd 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -82,8 +82,8 @@ struct gss_cred {
enum rpc_gss_svc gc_service;
struct gss_cl_ctx __rcu *gc_ctx;
struct gss_upcall_msg *gc_upcall;
+ const char *gc_principal;
unsigned long gc_upcall_timestamp;
- unsigned char gc_machine_cred : 1;
};
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 5efd8cef389..57531f8e595 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -203,7 +203,7 @@ extern void cache_unregister(struct cache_detail *cd);
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
- mode_t, struct cache_detail *);
+ umode_t, struct cache_detail *);
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
extern void qword_add(char **bpp, int *lp, char *str);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 3d8f9c44e27..2c5993a17c3 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
return true;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
const struct sockaddr *sap2)
{
@@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
dsin6->sin6_family = ssin6->sin6_family;
- ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
+ dsin6->sin6_addr = ssin6->sin6_addr;
return true;
}
-#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#else /* !(IS_ENABLED(CONFIG_IPV6) */
static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
const struct sockaddr *sap2)
{
@@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
{
return false;
}
-#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
/**
* rpc_cmp_addr - compare the address portion of two sockaddrs.
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index e4ea43058d8..2bb03d77375 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -55,7 +55,7 @@ extern int rpc_remove_client_dir(struct dentry *);
struct cache_detail;
extern struct dentry *rpc_create_cache_dir(struct dentry *,
struct qstr *,
- mode_t umode,
+ umode_t umode,
struct cache_detail *);
extern void rpc_remove_cache_dir(struct dentry *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index a20970ef9e4..af70af33354 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
struct xdr_array2_desc *desc);
+extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
+ size_t len);
/*
* Provide some simple tools for XDR buffer overflow-checking etc.
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a692432f8..95040cc3310 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/mm.h>
+#include <linux/freezer.h>
#include <asm/errno.h>
#ifdef CONFIG_VT
@@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE 0x0006 /* Restore failed */
+extern struct mutex pm_mutex;
+
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
void restore_processor_state(void);
@@ -351,6 +354,19 @@ extern bool events_check_enabled;
extern bool pm_wakeup_pending(void);
extern bool pm_get_wakeup_count(unsigned int *count);
extern bool pm_save_wakeup_count(unsigned int count);
+
+static inline void lock_system_sleep(void)
+{
+ freezer_do_not_count();
+ mutex_lock(&pm_mutex);
+}
+
+static inline void unlock_system_sleep(void)
+{
+ mutex_unlock(&pm_mutex);
+ freezer_count();
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
-#endif /* !CONFIG_PM_SLEEP */
-
-extern struct mutex pm_mutex;
-#ifndef CONFIG_HIBERNATE_CALLBACKS
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
-#else
-
-/* Let some subsystems like memory hotadd exclude hibernation */
-
-static inline void lock_system_sleep(void)
-{
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- mutex_unlock(&pm_mutex);
-}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1e22e126d2a..06061a7f8e6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -207,6 +207,7 @@ struct swap_list_t {
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
+extern unsigned long dirty_balance_reserve;
extern unsigned int nr_free_buffer_pages(void);
extern unsigned int nr_free_pagecache_pages(void);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 445702c60d0..e872526fdc5 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -24,7 +24,7 @@ extern int swiotlb_force;
extern void swiotlb_init(int verbose);
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swioltb_nr_tbl(void);
+extern unsigned long swiotlb_nr_tbl(void);
/*
* Enumeration for sync targets
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 86a24b1166d..515669fa3c1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -475,7 +475,7 @@ asmlinkage long sys_mincore(unsigned long start, size_t len,
asmlinkage long sys_pivot_root(const char __user *new_root,
const char __user *put_old);
asmlinkage long sys_chroot(const char __user *filename);
-asmlinkage long sys_mknod(const char __user *filename, int mode,
+asmlinkage long sys_mknod(const char __user *filename, umode_t mode,
unsigned dev);
asmlinkage long sys_link(const char __user *oldname,
const char __user *newname);
@@ -483,8 +483,8 @@ asmlinkage long sys_symlink(const char __user *old, const char __user *new);
asmlinkage long sys_unlink(const char __user *pathname);
asmlinkage long sys_rename(const char __user *oldname,
const char __user *newname);
-asmlinkage long sys_chmod(const char __user *filename, mode_t mode);
-asmlinkage long sys_fchmod(unsigned int fd, mode_t mode);
+asmlinkage long sys_chmod(const char __user *filename, umode_t mode);
+asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
#if BITS_PER_LONG == 32
@@ -517,9 +517,9 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd,
loff_t __user *offset, size_t count);
asmlinkage long sys_readlink(const char __user *path,
char __user *buf, int bufsiz);
-asmlinkage long sys_creat(const char __user *pathname, int mode);
+asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
asmlinkage long sys_open(const char __user *filename,
- int flags, int mode);
+ int flags, umode_t mode);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_access(const char __user *filename, int mode);
asmlinkage long sys_vhangup(void);
@@ -582,7 +582,7 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-asmlinkage long sys_mkdir(const char __user *pathname, int mode);
+asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
asmlinkage long sys_fchdir(unsigned int fd);
asmlinkage long sys_rmdir(const char __user *pathname);
@@ -679,7 +679,7 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth);
-asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr);
+asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
asmlinkage long sys_mq_unlink(const char __user *name);
asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
@@ -753,11 +753,11 @@ asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
__u32 __user *ustatus);
asmlinkage long sys_spu_create(const char __user *name,
- unsigned int flags, mode_t mode, int fd);
+ unsigned int flags, umode_t mode, int fd);
-asmlinkage long sys_mknodat(int dfd, const char __user * filename, int mode,
+asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
unsigned dev);
-asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, int mode);
+asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag);
asmlinkage long sys_symlinkat(const char __user * oldname,
int newdfd, const char __user * newname);
@@ -769,11 +769,11 @@ asmlinkage long sys_futimesat(int dfd, const char __user *filename,
struct timeval __user *utimes);
asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
- mode_t mode);
+ umode_t mode);
asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
- int mode);
+ umode_t mode);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
struct stat __user *statbuf, int flag);
asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 703cfa33a3c..bb9127dd814 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -1038,7 +1038,7 @@ struct ctl_table
const char *procname; /* Text ID for /proc/sys, or zero */
void *data;
int maxlen;
- mode_t mode;
+ umode_t mode;
struct ctl_table *child;
struct ctl_table *parent; /* Automatically set */
proc_handler *proc_handler; /* Callback for text formatting */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index dac0859e644..0010009b2f0 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -25,7 +25,7 @@ enum kobj_ns_type;
struct attribute {
const char *name;
- mode_t mode;
+ umode_t mode;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key *key;
struct lock_class_key skey;
@@ -55,7 +55,7 @@ do { \
struct attribute_group {
const char *name;
- mode_t (*is_visible)(struct kobject *,
+ umode_t (*is_visible)(struct kobject *,
struct attribute *, int);
struct attribute **attrs;
};
@@ -133,7 +133,7 @@ int __must_check sysfs_create_file(struct kobject *kobj,
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
- const struct attribute *attr, mode_t mode);
+ const struct attribute *attr, umode_t mode);
void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
@@ -221,7 +221,7 @@ static inline int sysfs_create_files(struct kobject *kobj,
}
static inline int sysfs_chmod_file(struct kobject *kobj,
- const struct attribute *attr, mode_t mode)
+ const struct attribute *attr, umode_t mode)
{
return 0;
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7f59ee94698..46a85c9e1f2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -238,6 +238,11 @@ struct tcp_sack_block {
u32 end_seq;
};
+/*These are used to set the sack_ok field in struct tcp_options_received */
+#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
+#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/
+#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
+
struct tcp_options_received {
/* PAWS/RTTM data */
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b232ccc0ee2..ab8be90b5cc 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -7,6 +7,7 @@
#define _LINUX_TICK_H
#include <linux/clockchips.h>
+#include <linux/irqflags.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ
-extern void tick_nohz_stop_sched_tick(int inidle);
-extern void tick_nohz_restart_sched_tick(void);
+extern void tick_nohz_idle_enter(void);
+extern void tick_nohz_idle_exit(void);
+extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
# else
-static inline void tick_nohz_stop_sched_tick(int inidle) { }
-static inline void tick_nohz_restart_sched_tick(void) { }
+static inline void tick_nohz_idle_enter(void) { }
+static inline void tick_nohz_idle_exit(void) { }
+
static inline ktime_t tick_nohz_get_sleep_length(void)
{
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
diff --git a/include/linux/types.h b/include/linux/types.h
index 57a97234bec..e5fa5034551 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -24,6 +24,7 @@ typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
+typedef unsigned short umode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
@@ -188,7 +189,7 @@ typedef __u32 __bitwise __wsum;
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid
* common 32/64-bit compat problems.
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other
- * architectures) and to 8-byte boundaries on 64-bit architetures. The new
+ * architectures) and to 8-byte boundaries on 64-bit architectures. The new
* aligned_64 type enforces 8-byte alignment so that structs containing
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures.
* No conversions are necessary between 32-bit user-space and a 64-bit kernel.
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index 5c75153f944..d21b33c4c6c 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -96,13 +96,11 @@ struct ucb1400_gpio {
struct ucb1400_ts {
struct input_dev *ts_idev;
- struct task_struct *ts_task;
int id;
- wait_queue_head_t ts_wait;
- unsigned int ts_restart:1;
int irq;
- unsigned int irq_pending; /* not bit field shared */
struct snd_ac97 *ac97;
+ wait_queue_head_t ts_wait;
+ bool stopped;
};
struct ucb1400 {
diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
new file mode 100644
index 00000000000..b1d2bf16b33
--- /dev/null
+++ b/include/linux/unix_diag.h
@@ -0,0 +1,54 @@
+#ifndef __UNIX_DIAG_H__
+#define __UNIX_DIAG_H__
+
+#include <linux/types.h>
+
+struct unix_diag_req {
+ __u8 sdiag_family;
+ __u8 sdiag_protocol;
+ __u16 pad;
+ __u32 udiag_states;
+ __u32 udiag_ino;
+ __u32 udiag_show;
+ __u32 udiag_cookie[2];
+};
+
+#define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */
+#define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */
+#define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */
+#define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */
+#define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */
+#define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */
+
+struct unix_diag_msg {
+ __u8 udiag_family;
+ __u8 udiag_type;
+ __u8 udiag_state;
+ __u8 pad;
+
+ __u32 udiag_ino;
+ __u32 udiag_cookie[2];
+};
+
+enum {
+ UNIX_DIAG_NAME,
+ UNIX_DIAG_VFS,
+ UNIX_DIAG_PEER,
+ UNIX_DIAG_ICONS,
+ UNIX_DIAG_RQLEN,
+ UNIX_DIAG_MEMINFO,
+
+ UNIX_DIAG_MAX,
+};
+
+struct unix_diag_vfs {
+ __u32 udiag_vfs_ino;
+ __u32 udiag_vfs_dev;
+};
+
+struct unix_diag_rqlen {
+ __u32 udiag_rqueue;
+ __u32 udiag_wqueue;
+};
+
+#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d3d0c137433..27a4e16d2bf 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -935,7 +935,7 @@ extern struct bus_type usb_bus_type;
*/
struct usb_class_driver {
char *name;
- char *(*devnode)(struct device *dev, mode_t *mode);
+ char *(*devnode)(struct device *dev, umode_t *mode);
const struct file_operations *fops;
int minor_base;
};
@@ -953,6 +953,18 @@ extern int usb_register_driver(struct usb_driver *, struct module *,
extern void usb_deregister(struct usb_driver *);
+/**
+ * module_usb_driver() - Helper macro for registering a USB driver
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for USB drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_usb_driver(__usb_driver) \
+ module_driver(__usb_driver, usb_register, \
+ usb_deregister)
+
extern int usb_register_device_driver(struct usb_device_driver *,
struct module *);
extern void usb_deregister_device_driver(struct usb_device_driver *);
@@ -1221,6 +1233,7 @@ struct urb {
void *transfer_buffer; /* (in) associated data buffer */
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
struct scatterlist *sg; /* (in) scatter gather buffer list */
+ int num_mapped_sgs; /* (internal) mapped sg entries */
int num_sgs; /* (in) number of entries in the sg list */
u32 transfer_buffer_length; /* (in) data buffer length */
u32 actual_length; /* (return) actual transfer length */
@@ -1598,6 +1611,19 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
/* ----------------------------------------------------------------------- */
+/* translate USB error codes to codes user space understands */
+static inline int usb_translate_errors(int error_code)
+{
+ switch (error_code) {
+ case 0:
+ case -ENOMEM:
+ case -ENODEV:
+ return error_code;
+ default:
+ return -EIO;
+ }
+}
+
/* Events from the usb core */
#define USB_DEVICE_ADD 0x0001
#define USB_DEVICE_REMOVE 0x0002
diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
index 4ebaf082417..31fdb4c6ee3 100644
--- a/include/linux/usb/ch11.h
+++ b/include/linux/usb/ch11.h
@@ -26,7 +26,6 @@
#define HUB_RESET_TT 9
#define HUB_GET_TT_STATE 10
#define HUB_STOP_TT 11
-#define HUB_SET_DEPTH 12
/*
* Hub class additional requests defined by USB 3.0 spec
@@ -165,11 +164,20 @@ struct usb_port_status {
* wHubCharacteristics (masks)
* See USB 2.0 spec Table 11-13, offset 3
*/
-#define HUB_CHAR_LPSM 0x0003 /* D1 .. D0 */
-#define HUB_CHAR_COMPOUND 0x0004 /* D2 */
-#define HUB_CHAR_OCPM 0x0018 /* D4 .. D3 */
-#define HUB_CHAR_TTTT 0x0060 /* D6 .. D5 */
-#define HUB_CHAR_PORTIND 0x0080 /* D7 */
+#define HUB_CHAR_LPSM 0x0003 /* Logical Power Switching Mode mask */
+#define HUB_CHAR_COMMON_LPSM 0x0000 /* All ports power control at once */
+#define HUB_CHAR_INDV_PORT_LPSM 0x0001 /* per-port power control */
+#define HUB_CHAR_NO_LPSM 0x0002 /* no power switching */
+
+#define HUB_CHAR_COMPOUND 0x0004 /* hub is part of a compound device */
+
+#define HUB_CHAR_OCPM 0x0018 /* Over-Current Protection Mode mask */
+#define HUB_CHAR_COMMON_OCPM 0x0000 /* All ports Over-Current reporting */
+#define HUB_CHAR_INDV_PORT_OCPM 0x0008 /* per-port Over-current reporting */
+#define HUB_CHAR_NO_OCPM 0x0010 /* No Over-current Protection support */
+
+#define HUB_CHAR_TTTT 0x0060 /* TT Think Time mask */
+#define HUB_CHAR_PORTIND 0x0080 /* per-port indicators (LEDs) */
struct usb_hub_status {
__le16 wHubStatus;
@@ -198,6 +206,17 @@ struct usb_hub_status {
#define USB_DT_HUB_NONVAR_SIZE 7
#define USB_DT_SS_HUB_SIZE 12
+/*
+ * Hub Device descriptor
+ * USB Hub class device protocols
+ */
+
+#define USB_HUB_PR_FS 0 /* Full speed hub */
+#define USB_HUB_PR_HS_NO_TT 0 /* Hi-speed hub without TT */
+#define USB_HUB_PR_HS_SINGLE_TT 1 /* Hi-speed hub with single TT */
+#define USB_HUB_PR_HS_MULTI_TT 2 /* Hi-speed hub with multiple TT */
+#define USB_HUB_PR_SS 3 /* Super speed hub */
+
struct usb_hub_descriptor {
__u8 bDescLength;
__u8 bDescriptorType;
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index d5da6c68c25..61b29057b05 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -605,8 +605,26 @@ struct usb_ss_ep_comp_descriptor {
} __attribute__ ((packed));
#define USB_DT_SS_EP_COMP_SIZE 6
+
/* Bits 4:0 of bmAttributes if this is a bulk endpoint */
-#define USB_SS_MAX_STREAMS(p) (1 << ((p) & 0x1f))
+static inline int
+usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
+{
+ int max_streams;
+
+ if (!comp)
+ return 0;
+
+ max_streams = comp->bmAttributes & 0x1f;
+
+ if (!max_streams)
+ return 0;
+
+ max_streams = 1 << max_streams;
+
+ return max_streams;
+}
+
/* Bits 1:0 of bmAttributes if this is an isoc endpoint */
#define USB_SS_MULT(p) (1 + ((p) & 0x3))
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 1d3a67523ff..da653b5c713 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/usb/ch9.h>
@@ -32,6 +33,9 @@ struct usb_ep;
* @dma: DMA address corresponding to 'buf'. If you don't set this
* field, and the usb controller needs one, it is responsible
* for mapping and unmapping the buffer.
+ * @sg: a scatterlist for SG-capable controllers.
+ * @num_sgs: number of SG entries
+ * @num_mapped_sgs: number of SG entries mapped to DMA (internal)
* @length: Length of that data
* @stream_id: The stream id, when USB3.0 bulk streams are being used
* @no_interrupt: If true, hints that no completion irq is needed.
@@ -88,6 +92,10 @@ struct usb_request {
unsigned length;
dma_addr_t dma;
+ struct scatterlist *sg;
+ unsigned num_sgs;
+ unsigned num_mapped_sgs;
+
unsigned stream_id:16;
unsigned no_interrupt:1;
unsigned zero:1;
@@ -164,7 +172,7 @@ struct usb_ep {
unsigned maxpacket:16;
unsigned max_streams:16;
unsigned mult:2;
- unsigned maxburst:4;
+ unsigned maxburst:5;
u8 address;
const struct usb_endpoint_descriptor *desc;
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -477,8 +485,9 @@ struct usb_gadget_ops {
* driver setup() requests
* @ep_list: List of other endpoints supported by the device.
* @speed: Speed of current connection to USB host.
- * @is_dualspeed: True if the controller supports both high and full speed
- * operation. If it does, the gadget driver must also support both.
+ * @max_speed: Maximal speed the UDC can handle. UDC must support this
+ * and all slower speeds.
+ * @sg_supported: true if we can handle scatter-gather
* @is_otg: True if the USB device port uses a Mini-AB jack, so that the
* gadget driver must provide a USB OTG descriptor.
* @is_a_peripheral: False unless is_otg, the "A" end of a USB cable
@@ -518,7 +527,8 @@ struct usb_gadget {
struct usb_ep *ep0;
struct list_head ep_list; /* of usb_ep */
enum usb_device_speed speed;
- unsigned is_dualspeed:1;
+ enum usb_device_speed max_speed;
+ unsigned sg_supported:1;
unsigned is_otg:1;
unsigned is_a_peripheral:1;
unsigned b_hnp_enable:1;
@@ -549,7 +559,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
static inline int gadget_is_dualspeed(struct usb_gadget *g)
{
#ifdef CONFIG_USB_GADGET_DUALSPEED
- /* runtime test would check "g->is_dualspeed" ... that might be
+ /* runtime test would check "g->max_speed" ... that might be
* useful to work around hardware bugs, but is mostly pointless
*/
return 1;
@@ -567,7 +577,7 @@ static inline int gadget_is_superspeed(struct usb_gadget *g)
{
#ifdef CONFIG_USB_GADGET_SUPERSPEED
/*
- * runtime test would check "g->is_superspeed" ... that might be
+ * runtime test would check "g->max_speed" ... that might be
* useful to work around hardware bugs, but is mostly pointless
*/
return 1;
@@ -760,7 +770,7 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
/**
* struct usb_gadget_driver - driver for usb 'slave' devices
* @function: String describing the gadget's function
- * @speed: Highest speed the driver handles.
+ * @max_speed: Highest speed the driver handles.
* @setup: Invoked for ep0 control requests that aren't handled by
* the hardware level driver. Most calls must be handled by
* the gadget driver, including descriptor and configuration
@@ -824,7 +834,7 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
*/
struct usb_gadget_driver {
char *function;
- enum usb_device_speed speed;
+ enum usb_device_speed max_speed;
void (*unbind)(struct usb_gadget *);
int (*setup)(struct usb_gadget *,
const struct usb_ctrlrequest *);
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 03354d557b7..b2f62f3a32a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -99,7 +99,6 @@ struct usb_hcd {
*/
unsigned long flags;
#define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */
-#define HCD_FLAG_SAW_IRQ 1
#define HCD_FLAG_POLL_RH 2 /* poll for rh status? */
#define HCD_FLAG_POLL_PENDING 3 /* status has changed? */
#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */
@@ -110,7 +109,6 @@ struct usb_hcd {
* be slightly faster than test_bit().
*/
#define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE))
-#define HCD_SAW_IRQ(hcd) ((hcd)->flags & (1U << HCD_FLAG_SAW_IRQ))
#define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH))
#define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING))
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index e5a40c31854..0d3f9887925 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -67,6 +67,14 @@ struct renesas_usbhs_platform_callback {
/*
* option:
*
+ * for board specific clock control
+ */
+ void (*power_ctrl)(struct platform_device *pdev,
+ void __iomem *base, int enable);
+
+ /*
+ * option:
+ *
* Phy reset for platform
*/
void (*phy_reset)(struct platform_device *pdev);
@@ -118,7 +126,7 @@ struct renesas_usbhs_driver_param {
*
* delay time from notify_hotplug callback
*/
- int detection_delay;
+ int detection_delay; /* msec */
/*
* option:
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index b29f70b2eca..4267a9c717b 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -58,11 +58,13 @@ enum port_dev_state {
* @read_urb: pointer to the bulk in struct urb for this port.
* @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
* port.
+ * @bulk_in_buffers: pointers to the bulk in buffers for this port
+ * @read_urbs: pointers to the bulk in urbs for this port
+ * @read_urbs_free: status bitmap the for bulk in urbs
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
* @write_urb: pointer to the bulk out struct urb for this port.
* @write_fifo: kfifo used to buffer outgoing data
- * @write_urb_busy: port`s writing status
* @bulk_out_buffers: pointers to the bulk out buffers for this port
* @write_urbs: pointers to the bulk out urbs for this port
* @write_urbs_free: status bitmap the for bulk out urbs
@@ -99,11 +101,14 @@ struct usb_serial_port {
struct urb *read_urb;
__u8 bulk_in_endpointAddress;
+ unsigned char *bulk_in_buffers[2];
+ struct urb *read_urbs[2];
+ unsigned long read_urbs_free;
+
unsigned char *bulk_out_buffer;
int bulk_out_size;
struct urb *write_urb;
struct kfifo write_fifo;
- int write_urb_busy;
unsigned char *bulk_out_buffers[2];
struct urb *write_urbs[2];
@@ -340,7 +345,7 @@ extern void usb_serial_generic_disconnect(struct usb_serial *serial);
extern void usb_serial_generic_release(struct usb_serial *serial);
extern int usb_serial_generic_register(int debug);
extern void usb_serial_generic_deregister(void);
-extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags);
extern void usb_serial_generic_process_read_urb(struct urb *urb);
extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e9e72bda1b7..5206d6541da 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -102,6 +102,10 @@
* vdev: the virtio_device
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
+ * @bus_name: return the bus name associated with the device
+ * vdev: the virtio_device
+ * This returns a pointer to the bus name a la pci_name from which
+ * the caller can then copy.
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
@@ -119,6 +123,7 @@ struct virtio_config_ops {
void (*del_vqs)(struct virtio_device *);
u32 (*get_features)(struct virtio_device *vdev);
void (*finalize_features)(struct virtio_device *vdev);
+ const char *(*bus_name)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -184,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
return ERR_PTR(err);
return vq;
}
+
+static inline
+const char *virtio_bus_name(struct virtio_device *vdev)
+{
+ if (!vdev->config->bus_name)
+ return "virtio";
+ return vdev->config->bus_name(vdev);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4bde182fcf9..dcdfc2bda92 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -131,6 +131,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
+extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3efc9f3f43a..a9ce45e8501 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -77,13 +77,13 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
#define init_waitqueue_head(q) \
do { \
static struct lock_class_key __key; \
\
- __init_waitqueue_head((q), &__key); \
+ __init_waitqueue_head((q), #q, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index e0aa39612eb..3157cc1fada 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -309,7 +309,7 @@ typedef struct wandev_conf
#define WANOPT_EVEN 2
/* CHDLC Protocol Options */
-/* DF Commmented out for now.
+/* DF Commented out for now.
#define WANOPT_CHDLC_NO_DCD IGNORE_DCD_FOR_LINK_STAT
#define WANOPT_CHDLC_NO_CTS IGNORE_CTS_FOR_LINK_STAT
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 111843f88b2..43ba5b3ce2a 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -53,11 +53,7 @@ struct watchdog_info {
#ifdef __KERNEL__
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-#define WATCHDOG_NOWAYOUT 1
-#else
-#define WATCHDOG_NOWAYOUT 0
-#endif
+#include <linux/bitops.h>
struct watchdog_ops;
struct watchdog_device;
@@ -122,6 +118,21 @@ struct watchdog_device {
#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
};
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+#define WATCHDOG_NOWAYOUT 1
+#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT)
+#else
+#define WATCHDOG_NOWAYOUT 0
+#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
+#endif
+
+/* Use the following function to set the nowayout feature */
+static inline void watchdog_set_nowayout(struct watchdog_device *wdd, int nowayout)
+{
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &wdd->status);
+}
+
/* Use the following functions to manipulate watchdog driver specific data */
static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
{
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
index 4b697395326..0d6373195d3 100644
--- a/include/linux/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -54,6 +54,9 @@ struct wl12xx_platform_data {
int board_ref_clock;
int board_tcxo_clock;
unsigned long platform_quirks;
+ bool pwr_in_suspend;
+
+ struct wl1271_if_operations *ops;
};
/* Platform does not support level trigger interrupts */
@@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
#endif
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+struct wl12xx_platform_data *wl12xx_get_platform_data(void);
#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0d556deb497..eb8b9f15f2e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -297,32 +297,50 @@ extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *
-__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
- struct lock_class_key *key, const char *lock_name);
+__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
+/**
+ * alloc_workqueue - allocate a workqueue
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @args: args for @fmt
+ *
+ * Allocate a workqueue with the specified parameters. For detailed
+ * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ *
+ * The __lock_name macro dance is to guarantee that single lock_class_key
+ * doesn't end up with different namesm, which isn't allowed by lockdep.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
#ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(name, flags, max_active) \
+#define alloc_workqueue(fmt, flags, max_active, args...) \
({ \
static struct lock_class_key __key; \
const char *__lock_name; \
\
- if (__builtin_constant_p(name)) \
- __lock_name = (name); \
+ if (__builtin_constant_p(fmt)) \
+ __lock_name = (fmt); \
else \
- __lock_name = #name; \
+ __lock_name = #fmt; \
\
- __alloc_workqueue_key((name), (flags), (max_active), \
- &__key, __lock_name); \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
+ &__key, __lock_name, ##args); \
})
#else
-#define alloc_workqueue(name, flags, max_active) \
- __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
+ NULL, NULL, ##args)
#endif
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
- * @name: name of the workqueue
+ * @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -331,11 +349,8 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
-static inline struct workqueue_struct *
-alloc_ordered_workqueue(const char *name, unsigned int flags)
-{
- return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
-}
+#define alloc_ordered_workqueue(fmt, flags, args...) \
+ alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a378c295851..995b8bf630a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,6 +7,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
+DECLARE_PER_CPU(int, dirty_throttle_leaks);
+
/*
* The 1/4 region under the global dirty thresh is for smooth dirty throttling:
*
@@ -23,11 +25,6 @@
#define DIRTY_SCOPE 8
#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
-/*
- * 4MB minimal write chunk size
- */
-#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
-
struct backing_dev_info;
/*
@@ -124,6 +121,7 @@ void laptop_mode_timer_fn(unsigned long data);
static inline void laptop_sync_completion(void) { }
#endif
void throttle_vm_writeout(gfp_t gfp_mask);
+bool zone_dirty_ok(struct zone *zone);
extern unsigned long global_dirty_limit;
@@ -138,8 +136,6 @@ extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
-extern unsigned long determine_dirtyable_memory(void);
-
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -195,6 +191,8 @@ void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
+void account_page_redirty(struct page *page);
+
/* pdflush.c */
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
read-only. */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
new file mode 100644
index 00000000000..9929b05cff3
--- /dev/null
+++ b/include/media/davinci/vpif_types.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPIF_TYPES_H
+#define _VPIF_TYPES_H
+
+#define VPIF_CAPTURE_MAX_CHANNELS 2
+
+enum vpif_if_type {
+ VPIF_IF_BT656,
+ VPIF_IF_BT1120,
+ VPIF_IF_RAW_BAYER
+};
+
+struct vpif_interface {
+ enum vpif_if_type if_type;
+ unsigned hd_pol:1;
+ unsigned vd_pol:1;
+ unsigned fid_pol:1;
+};
+
+struct vpif_subdev_info {
+ const char *name;
+ struct i2c_board_info board_info;
+ u32 input;
+ u32 output;
+ unsigned can_route:1;
+ struct vpif_interface vpif_if;
+};
+
+struct vpif_display_config {
+ int (*set_clock)(int, int);
+ struct vpif_subdev_info *subdevinfo;
+ int subdev_count;
+ const char **output;
+ int output_count;
+ const char *card_name;
+};
+
+struct vpif_input {
+ struct v4l2_input input;
+ const char *subdev_name;
+};
+
+struct vpif_capture_chan_config {
+ const struct vpif_input *inputs;
+ int input_count;
+};
+
+struct vpif_capture_config {
+ int (*setup_input_channel_mode)(int);
+ int (*setup_input_path)(int, const char *);
+ struct vpif_capture_chan_config chan_config[VPIF_CAPTURE_MAX_CHANNELS];
+ struct vpif_subdev_info *subdev_info;
+ int subdev_count;
+ const char *card_name;
+};
+#endif /* _VPIF_TYPES_H */
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index b1377b931eb..5fb2c3d10c0 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
return icd ? icd->vdev : NULL;
}
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
return container_of(vq, struct soc_camera_device, vb_vidq);
}
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+ return (icd->iface << 8) | (icd->devnum + 1);
+}
+
void soc_camera_lock(struct vb2_queue *vq);
void soc_camera_unlock(struct vb2_queue *vq);
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 1a7e1d20adf..36eace03b2a 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -198,7 +198,8 @@ struct otp_info {
#define MEMISLOCKED _IOR('M', 23, struct erase_info_user)
/*
* Most generic write interface; can write in-band and/or out-of-band in various
- * modes (see "struct mtd_write_req")
+ * modes (see "struct mtd_write_req"). This ioctl is not supported for flashes
+ * without OOB, e.g., NOR flash.
*/
#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 2d70b95b3b5..7184853ca36 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -63,30 +63,16 @@ enum p9_debug_flags {
#ifdef CONFIG_NET_9P_DEBUG
extern unsigned int p9_debug_level;
-
-#define P9_DPRINTK(level, format, arg...) \
-do { \
- if ((p9_debug_level & level) == level) {\
- if (level == P9_DEBUG_9P) \
- printk(KERN_NOTICE "(%8.8d) " \
- format , task_pid_nr(current) , ## arg); \
- else \
- printk(KERN_NOTICE "-- %s (%d): " \
- format , __func__, task_pid_nr(current) , ## arg); \
- } \
-} while (0)
-
+__printf(3, 4)
+void _p9_debug(enum p9_debug_flags level, const char *func,
+ const char *fmt, ...);
+#define p9_debug(level, fmt, ...) \
+ _p9_debug(level, __func__, fmt, ##__VA_ARGS__)
#else
-#define P9_DPRINTK(level, format, arg...) do { } while (0)
+#define p9_debug(level, fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#endif
-
-#define P9_EPRINTK(level, format, arg...) \
-do { \
- printk(level "9p: %s (%d): " \
- format , __func__, task_pid_nr(current), ## arg); \
-} while (0)
-
/**
* enum p9_msg_t - 9P message types
* @P9_TLERROR: not used
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index cbc6bb0a683..f68dce2d8d8 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -151,7 +151,8 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev,
const struct in6_addr *src_addr);
extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
-extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len);
+extern void addrconf_prefix_rcv(struct net_device *dev,
+ u8 *opt, int len, bool sllao);
/*
* anycast prototypes (anycast.c)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 91ab5b01678..5a4e29b168c 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -11,10 +11,13 @@ extern void unix_notinflight(struct file *fp);
extern void unix_gc(void);
extern void wait_for_unix_gc(void);
extern struct sock *unix_get_socket(struct file *filp);
+extern struct sock *unix_peer_get(struct sock *);
#define UNIX_HASH_SIZE 256
extern unsigned int unix_tot_inflight;
+extern spinlock_t unix_table_lock;
+extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
struct unix_address {
atomic_t refcnt;
@@ -63,6 +66,9 @@ struct unix_sock {
#define peer_wait peer_wq.wait
+long unix_inq_len(struct sock *sk);
+long unix_outq_len(struct sock *sk);
+
#ifdef CONFIG_SYSCTL
extern int unix_sysctl_register(struct net *net);
extern void unix_sysctl_unregister(struct net *net);
diff --git a/include/net/arp.h b/include/net/arp.h
index 4979af8b155..0013dc87940 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -23,7 +23,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, str
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
- hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
n != NULL;
n = rcu_dereference_bh(n->next)) {
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index 497ef6444a7..5865924d4aa 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -15,7 +15,6 @@
#define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
-#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
struct sk_buff;
@@ -36,24 +35,18 @@ struct clip_vcc {
struct atmarp_entry {
- __be32 ip; /* IP address */
struct clip_vcc *vccs; /* active VCCs; NULL if resolution is
pending */
unsigned long expires; /* entry expiration time */
struct neighbour *neigh; /* neighbour back-pointer */
};
-
#define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
-
struct clip_priv {
int number; /* for convenience ... */
spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */
struct net_device *next; /* next CLIP interface */
};
-
-extern struct neigh_table *clip_tbl_hook;
-
#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e86af08293a..abaad6ed9b8 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -36,6 +36,11 @@
#define PF_BLUETOOTH AF_BLUETOOTH
#endif
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1 1
+#define BLUETOOTH_VER_1_2 2
+#define BLUETOOTH_VER_2_0 3
+
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
@@ -77,6 +82,33 @@ struct bt_power {
#define BT_POWER_FORCE_ACTIVE_OFF 0
#define BT_POWER_FORCE_ACTIVE_ON 1
+#define BT_CHANNEL_POLICY 10
+
+/* BR/EDR only (default policy)
+ * AMP controllers cannot be used.
+ * Channel move requests from the remote device are denied.
+ * If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY 0
+
+/* BR/EDR Preferred
+ * Allow use of AMP controllers.
+ * If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ * Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1
+
+/* AMP Preferred
+ * Allow use of AMP controllers
+ * If the L2CAP channel is currently on BR/EDR and AMP controller
+ * resources are available, initiate a channel move to AMP.
+ * Channel move requests from the remote device are allowed.
+ * If the L2CAP socket has not been connected yet, try to create
+ * and configure the channel directly on an AMP controller rather
+ * than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+
__printf(2, 3)
int bt_printk(const char *level, const char *fmt, ...);
@@ -158,7 +190,7 @@ struct bt_skb_cb {
__u8 pkt_type;
__u8 incoming;
__u16 expect;
- __u8 tx_seq;
+ __u16 tx_seq;
__u8 retries;
__u8 sar;
unsigned short channel;
@@ -218,32 +250,10 @@ extern void bt_sysfs_cleanup(void);
extern struct dentry *bt_debugfs;
-#ifdef CONFIG_BT_L2CAP
int l2cap_init(void);
void l2cap_exit(void);
-#else
-static inline int l2cap_init(void)
-{
- return 0;
-}
-
-static inline void l2cap_exit(void)
-{
-}
-#endif
-#ifdef CONFIG_BT_SCO
int sco_init(void);
void sco_exit(void);
-#else
-static inline int sco_init(void)
-{
- return 0;
-}
-
-static inline void sco_exit(void)
-{
-}
-#endif
#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaf79af7243..5b2fed5eebf 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -88,6 +88,14 @@ enum {
HCI_RESET,
};
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+ HCI_LE_SCAN,
+};
+
/* HCI ioctl defines */
#define HCIDEVUP _IOW('H', 201, int)
#define HCIDEVDOWN _IOW('H', 202, int)
@@ -202,6 +210,7 @@ enum {
#define LMP_EV4 0x01
#define LMP_EV5 0x02
+#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
#define LMP_SNIFF_SUBR 0x02
@@ -264,6 +273,17 @@ enum {
#define HCI_LK_SMP_IRK 0x82
#define HCI_LK_SMP_CSRK 0x83
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_AUTH_FAILURE 0x05
+#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_REMOTE_USER_TERM 0x13
+#define HCI_ERROR_LOCAL_HOST_TERM 0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+
+/* Flow control modes */
+#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
+#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01
+
/* ----- HCI Commands ---- */
#define HCI_OP_NOP 0x0000
@@ -446,6 +466,14 @@ struct hci_rp_user_confirm_reply {
#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
+#define HCI_OP_USER_PASSKEY_REPLY 0x042e
+struct hci_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f
+
#define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430
struct hci_cp_remote_oob_data_reply {
bdaddr_t bdaddr;
@@ -662,6 +690,12 @@ struct hci_rp_read_local_oob_data {
#define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58
+#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66
+struct hci_rp_read_flow_control_mode {
+ __u8 status;
+ __u8 mode;
+} __packed;
+
#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
struct hci_cp_write_le_host_supported {
__u8 le;
@@ -716,6 +750,14 @@ struct hci_rp_read_bd_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a
+struct hci_rp_read_data_block_size {
+ __u8 status;
+ __le16 max_acl_len;
+ __le16 block_len;
+ __le16 num_blocks;
+} __packed;
+
#define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c
struct hci_cp_write_page_scan_activity {
__le16 interval;
@@ -726,6 +768,21 @@ struct hci_cp_write_page_scan_activity {
#define PAGE_SCAN_TYPE_STANDARD 0x00
#define PAGE_SCAN_TYPE_INTERLACED 0x01
+#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
+struct hci_rp_read_local_amp_info {
+ __u8 status;
+ __u8 amp_status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le32 max_pdu;
+ __u8 amp_type;
+ __le16 pal_cap;
+ __le16 max_assoc_size;
+ __le32 max_flush_to;
+ __le32 be_flush_to;
+} __packed;
+
#define HCI_OP_LE_SET_EVENT_MASK 0x2001
struct hci_cp_le_set_event_mask {
__u8 mask[8];
@@ -738,6 +795,18 @@ struct hci_rp_le_read_buffer_size {
__u8 le_max_pkt;
} __packed;
+#define HCI_OP_LE_SET_SCAN_PARAM 0x200b
+struct hci_cp_le_set_scan_param {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+ __u8 own_address_type;
+ __u8 filter_policy;
+} __packed;
+
+#define LE_SCANNING_DISABLED 0x00
+#define LE_SCANNING_ENABLED 0x01
+
#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c
struct hci_cp_le_set_scan_enable {
__u8 enable;
@@ -913,9 +982,14 @@ struct hci_ev_role_change {
} __packed;
#define HCI_EV_NUM_COMP_PKTS 0x13
+struct hci_comp_pkts_info {
+ __le16 handle;
+ __le16 count;
+} __packed;
+
struct hci_ev_num_comp_pkts {
__u8 num_hndl;
- /* variable length part */
+ struct hci_comp_pkts_info handles[0];
} __packed;
#define HCI_EV_MODE_CHANGE 0x14
@@ -1054,6 +1128,11 @@ struct hci_ev_user_confirm_req {
__le32 passkey;
} __packed;
+#define HCI_EV_USER_PASSKEY_REQUEST 0x34
+struct hci_ev_user_passkey_req {
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
struct hci_ev_remote_oob_data_request {
bdaddr_t bdaddr;
@@ -1309,4 +1388,6 @@ struct hci_inquiry_req {
};
#define IREQ_CACHE_FLUSH 0x0001
+extern int enable_hs;
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3779ea36225..5e2e9845849 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -28,9 +28,8 @@
#include <linux/interrupt.h>
#include <net/bluetooth/hci.h>
-/* HCI upper protocols */
-#define HCI_PROTO_L2CAP 0
-#define HCI_PROTO_SCO 1
+/* HCI priority */
+#define HCI_PRIO_MAX 7
/* HCI Core structures */
struct inquiry_data {
@@ -51,14 +50,12 @@ struct inquiry_entry {
};
struct inquiry_cache {
- spinlock_t lock;
__u32 timestamp;
struct inquiry_entry *list;
};
struct hci_conn_hash {
struct list_head list;
- spinlock_t lock;
unsigned int acl_num;
unsigned int sco_num;
unsigned int le_num;
@@ -115,7 +112,7 @@ struct adv_entry {
#define NUM_REASSEMBLY 4
struct hci_dev {
struct list_head list;
- spinlock_t lock;
+ struct mutex lock;
atomic_t refcnt;
char name[8];
@@ -150,6 +147,19 @@ struct hci_dev {
__u16 sniff_min_interval;
__u16 sniff_max_interval;
+ __u8 amp_status;
+ __u32 amp_total_bw;
+ __u32 amp_max_bw;
+ __u32 amp_min_latency;
+ __u32 amp_max_pdu;
+ __u8 amp_type;
+ __u16 amp_pal_cap;
+ __u16 amp_assoc_size;
+ __u32 amp_max_flush_to;
+ __u32 amp_be_flush_to;
+
+ __u8 flow_ctl_mode;
+
unsigned int auto_accept_delay;
unsigned long quirks;
@@ -166,6 +176,11 @@ struct hci_dev {
unsigned int sco_pkts;
unsigned int le_pkts;
+ __u16 block_len;
+ __u16 block_mtu;
+ __u16 num_blocks;
+ __u16 block_cnt;
+
unsigned long acl_last_tx;
unsigned long sco_last_tx;
unsigned long le_last_tx;
@@ -173,13 +188,18 @@ struct hci_dev {
struct workqueue_struct *workqueue;
struct work_struct power_on;
- struct work_struct power_off;
- struct timer_list off_timer;
+ struct delayed_work power_off;
+
+ __u16 discov_timeout;
+ struct delayed_work discov_off;
+
+ struct delayed_work service_cache;
struct timer_list cmd_timer;
- struct tasklet_struct cmd_task;
- struct tasklet_struct rx_task;
- struct tasklet_struct tx_task;
+
+ struct work_struct rx_work;
+ struct work_struct cmd_work;
+ struct work_struct tx_work;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
@@ -195,6 +215,8 @@ struct hci_dev {
__u16 init_last_cmd;
+ struct list_head mgmt_pending;
+
struct inquiry_cache inq_cache;
struct hci_conn_hash conn_hash;
struct list_head blacklist;
@@ -206,7 +228,7 @@ struct hci_dev {
struct list_head remote_oob_data;
struct list_head adv_entries;
- struct timer_list adv_timer;
+ struct delayed_work adv_work;
struct hci_dev_stats stat;
@@ -226,6 +248,8 @@ struct hci_dev {
struct module *owner;
+ unsigned long dev_flags;
+
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
@@ -273,20 +297,19 @@ struct hci_conn {
unsigned int sent;
struct sk_buff_head data_q;
+ struct list_head chan_list;
- struct timer_list disc_timer;
+ struct delayed_work disc_work;
struct timer_list idle_timer;
struct timer_list auto_accept_timer;
- struct work_struct work_add;
- struct work_struct work_del;
-
struct device dev;
atomic_t devref;
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
+ void *smp_conn;
struct hci_conn *link;
@@ -295,25 +318,39 @@ struct hci_conn {
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
};
-extern struct hci_proto *hci_proto[];
+struct hci_chan {
+ struct list_head list;
+
+ struct hci_conn *conn;
+ struct sk_buff_head data_q;
+ unsigned int sent;
+};
+
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
extern rwlock_t hci_cb_list_lock;
+/* ----- HCI interface to upper protocols ----- */
+extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern int l2cap_disconn_ind(struct hci_conn *hcon);
+extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
-#define inquiry_cache_lock(c) spin_lock(&c->lock)
-#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
-#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
-#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
-
static inline void inquiry_cache_init(struct hci_dev *hdev)
{
struct inquiry_cache *c = &hdev->inq_cache;
- spin_lock_init(&c->lock);
c->list = NULL;
}
@@ -353,15 +390,15 @@ static inline void hci_conn_hash_init(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
INIT_LIST_HEAD(&h->list);
- spin_lock_init(&h->lock);
h->acl_num = 0;
h->sco_num = 0;
+ h->le_num = 0;
}
static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_add(&c->list, &h->list);
+ list_add_rcu(&c->list, &h->list);
switch (c->type) {
case ACL_LINK:
h->acl_num++;
@@ -379,7 +416,10 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- list_del(&c->list);
+
+ list_del_rcu(&c->list);
+ synchronize_rcu();
+
switch (c->type) {
case ACL_LINK:
h->acl_num--;
@@ -414,14 +454,18 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->handle == handle)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->handle == handle) {
+ rcu_read_unlock();
return c;
+ }
}
+ rcu_read_unlock();
+
return NULL;
}
@@ -429,14 +473,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
__u8 type, bdaddr_t *ba)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && !bacmp(&c->dst, ba))
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
@@ -444,14 +493,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
struct hci_conn *c;
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == type && c->state == state)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->state == state) {
+ rcu_read_unlock();
return c;
+ }
}
+
+ rcu_read_unlock();
+
return NULL;
}
@@ -466,6 +520,10 @@ int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+int hci_chan_del(struct hci_chan *chan);
+void hci_chan_list_flush(struct hci_conn *conn);
+
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u8 sec_level, __u8 auth_type);
int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -475,7 +533,6 @@ int hci_conn_change_link_key(struct hci_conn *conn);
int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_conn_enter_sniff_mode(struct hci_conn *conn);
void hci_conn_hold_device(struct hci_conn *conn);
void hci_conn_put_device(struct hci_conn *conn);
@@ -483,7 +540,7 @@ void hci_conn_put_device(struct hci_conn *conn);
static inline void hci_conn_hold(struct hci_conn *conn)
{
atomic_inc(&conn->refcnt);
- del_timer(&conn->disc_timer);
+ cancel_delayed_work_sync(&conn->disc_work);
}
static inline void hci_conn_put(struct hci_conn *conn)
@@ -502,7 +559,9 @@ static inline void hci_conn_put(struct hci_conn *conn)
} else {
timeo = msecs_to_jiffies(10);
}
- mod_timer(&conn->disc_timer, jiffies + timeo);
+ cancel_delayed_work_sync(&conn->disc_work);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->disc_work, jiffies + timeo);
}
}
@@ -534,10 +593,8 @@ static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \
})
-#define hci_dev_lock(d) spin_lock(&d->lock)
-#define hci_dev_unlock(d) spin_unlock(&d->lock)
-#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
-#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
+#define hci_dev_lock(d) mutex_lock(&d->lock)
+#define hci_dev_unlock(d) mutex_unlock(&d->lock)
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@@ -545,7 +602,7 @@ struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
struct hci_dev *hci_alloc_dev(void);
void hci_free_dev(struct hci_dev *hdev);
int hci_register_dev(struct hci_dev *hdev);
-int hci_unregister_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
@@ -599,8 +656,9 @@ int hci_recv_frame(struct sk_buff *skb);
int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
-int hci_register_sysfs(struct hci_dev *hdev);
-void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_init_sysfs(struct hci_dev *hdev);
+int hci_add_sysfs(struct hci_dev *hdev);
+void hci_del_sysfs(struct hci_dev *hdev);
void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -621,53 +679,40 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE)
/* ----- HCI protocols ----- */
-struct hci_proto {
- char *name;
- unsigned int id;
- unsigned long flags;
-
- void *priv;
-
- int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr,
- __u8 type);
- int (*connect_cfm) (struct hci_conn *conn, __u8 status);
- int (*disconn_ind) (struct hci_conn *conn);
- int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
- int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb,
- __u16 flags);
- int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
- int (*security_cfm) (struct hci_conn *conn, __u8 status,
- __u8 encrypt);
-};
-
static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 type)
{
- register struct hci_proto *hp;
- int mask = 0;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
+ switch (type) {
+ case ACL_LINK:
+ return l2cap_connect_ind(hdev, bdaddr);
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_ind)
- mask |= hp->connect_ind(hdev, bdaddr, type);
+ case SCO_LINK:
+ case ESCO_LINK:
+ return sco_connect_ind(hdev, bdaddr);
- return mask;
+ default:
+ BT_ERR("unknown link type %d", type);
+ return -EINVAL;
+ }
}
static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_connect_cfm(conn, status);
+ break;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_connect_cfm(conn, status);
+ break;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->connect_cfm)
- hp->connect_cfm(conn, status);
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
@@ -675,31 +720,29 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
{
- register struct hci_proto *hp;
- int reason = 0x13;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return HCI_ERROR_REMOTE_USER_TERM;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_ind)
- reason = hp->disconn_ind(conn);
-
- return reason;
+ return l2cap_disconn_ind(conn);
}
static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
- register struct hci_proto *hp;
+ switch (conn->type) {
+ case ACL_LINK:
+ case LE_LINK:
+ l2cap_disconn_cfm(conn, reason);
+ break;
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason);
+ case SCO_LINK:
+ case ESCO_LINK:
+ sco_disconn_cfm(conn, reason);
+ break;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->disconn_cfm)
- hp->disconn_cfm(conn, reason);
+ default:
+ BT_ERR("unknown link type %d", conn->type);
+ break;
+ }
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
@@ -707,21 +750,16 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
{
- register struct hci_proto *hp;
__u8 encrypt;
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return;
+
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return;
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
-
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ l2cap_security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
@@ -730,23 +768,15 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status,
__u8 encrypt)
{
- register struct hci_proto *hp;
-
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ if (conn->type != ACL_LINK && conn->type != LE_LINK)
+ return;
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->security_cfm)
- hp->security_cfm(conn, status, encrypt);
+ l2cap_security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
-int hci_register_proto(struct hci_proto *hproto);
-int hci_unregister_proto(struct hci_proto *hproto);
-
/* ----- HCI callbacks ----- */
struct hci_cb {
struct list_head list;
@@ -771,13 +801,13 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
- read_lock_bh(&hci_cb_list_lock);
+ read_lock(&hci_cb_list_lock);
list_for_each(p, &hci_cb_list) {
struct hci_cb *cb = list_entry(p, struct hci_cb, list);
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
@@ -793,26 +823,26 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
hci_proto_encrypt_cfm(conn, status, encrypt);
- read_lock_bh(&hci_cb_list_lock);
+ read_lock(&hci_cb_list_lock);
list_for_each(p, &hci_cb_list) {
struct hci_cb *cb = list_entry(p, struct hci_cb, list);
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
struct list_head *p;
- read_lock_bh(&hci_cb_list_lock);
+ read_lock(&hci_cb_list_lock);
list_for_each(p, &hci_cb_list) {
struct hci_cb *cb = list_entry(p, struct hci_cb, list);
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
@@ -820,13 +850,13 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
{
struct list_head *p;
- read_lock_bh(&hci_cb_list_lock);
+ read_lock(&hci_cb_list_lock);
list_for_each(p, &hci_cb_list) {
struct hci_cb *cb = list_entry(p, struct hci_cb, list);
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
}
- read_unlock_bh(&hci_cb_list_lock);
+ read_unlock(&hci_cb_list_lock);
}
int hci_register_cb(struct hci_cb *hcb);
@@ -836,7 +866,7 @@ int hci_register_notifier(struct notifier_block *nb);
int hci_unregister_notifier(struct notifier_block *nb);
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
@@ -849,44 +879,63 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
/* Management interface */
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(u16 index);
-int mgmt_index_removed(u16 index);
-int mgmt_powered(u16 index, u8 powered);
-int mgmt_discoverable(u16 index, u8 discoverable);
-int mgmt_connectable(u16 index, u8 connectable);
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent);
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type);
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
-int mgmt_disconnect_failed(u16 index);
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
- u8 confirm_hint);
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
+int mgmt_index_added(struct hci_dev *hdev);
+int mgmt_index_removed(struct hci_dev *hdev);
+int mgmt_powered(struct hci_dev *hdev, u8 powered);
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ u8 persistent);
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type);
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status);
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status);
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status);
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
- u8 *eir);
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name);
-int mgmt_discovering(u16 index, u8 discovering);
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr);
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __le32 value, u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status);
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status);
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)
+/* HCI socket flags */
+#define HCI_PI_MGMT_INIT 0
+
struct hci_pinfo {
struct bt_sock bt;
struct hci_dev *hdev;
struct hci_filter filter;
__u32 cmsg_mask;
unsigned short channel;
+ unsigned long flags;
};
/* HCI security filter */
@@ -915,4 +964,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
void hci_le_ltk_neg_reply(struct hci_conn *conn);
+int hci_do_inquiry(struct hci_dev *hdev, u8 length);
+int hci_cancel_inquiry(struct hci_dev *hdev);
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 6cc18f37167..68f58915069 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,17 +27,23 @@
#ifndef __L2CAP_H
#define __L2CAP_H
+#include <asm/unaligned.h>
+
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_MIN_MTU 48
#define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_TX_WINDOW 63
+#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
#define L2CAP_DEFAULT_MAX_TX 3
#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_LE_DEFAULT_MTU 23
+#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_DISC_TIMEOUT (100)
#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */
@@ -91,52 +97,82 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
+#define L2CAP_CREATE_CHAN_REQ 0x0c
+#define L2CAP_CREATE_CHAN_RSP 0x0d
+#define L2CAP_MOVE_CHAN_REQ 0x0e
+#define L2CAP_MOVE_CHAN_RSP 0x0f
+#define L2CAP_MOVE_CHAN_CFM 0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
-/* L2CAP feature mask */
+/* L2CAP extended feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
#define L2CAP_FEAT_RETRANS 0x00000002
+#define L2CAP_FEAT_BIDIR_QOS 0x00000004
#define L2CAP_FEAT_ERTM 0x00000008
#define L2CAP_FEAT_STREAMING 0x00000010
#define L2CAP_FEAT_FCS 0x00000020
+#define L2CAP_FEAT_EXT_FLOW 0x00000040
#define L2CAP_FEAT_FIXED_CHAN 0x00000080
+#define L2CAP_FEAT_EXT_WINDOW 0x00000100
+#define L2CAP_FEAT_UCD 0x00000200
/* L2CAP checksum option */
#define L2CAP_FCS_NONE 0x00
#define L2CAP_FCS_CRC16 0x01
+/* L2CAP fixed channels */
+#define L2CAP_FC_L2CAP 0x02
+#define L2CAP_FC_A2MP 0x08
+
/* L2CAP Control Field bit masks */
-#define L2CAP_CTRL_SAR 0xC000
-#define L2CAP_CTRL_REQSEQ 0x3F00
-#define L2CAP_CTRL_TXSEQ 0x007E
-#define L2CAP_CTRL_RETRANS 0x0080
-#define L2CAP_CTRL_FINAL 0x0080
-#define L2CAP_CTRL_POLL 0x0010
-#define L2CAP_CTRL_SUPERVISE 0x000C
-#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
-
-#define L2CAP_CTRL_TXSEQ_SHIFT 1
-#define L2CAP_CTRL_REQSEQ_SHIFT 8
-#define L2CAP_CTRL_SAR_SHIFT 14
+#define L2CAP_CTRL_SAR 0xC000
+#define L2CAP_CTRL_REQSEQ 0x3F00
+#define L2CAP_CTRL_TXSEQ 0x007E
+#define L2CAP_CTRL_SUPERVISE 0x000C
+
+#define L2CAP_CTRL_RETRANS 0x0080
+#define L2CAP_CTRL_FINAL 0x0080
+#define L2CAP_CTRL_POLL 0x0010
+#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT 1
+#define L2CAP_CTRL_SUPER_SHIFT 2
+#define L2CAP_CTRL_REQSEQ_SHIFT 8
+#define L2CAP_CTRL_SAR_SHIFT 14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR 0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL 0x00040000
+#define L2CAP_EXT_CTRL_FINAL 0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2
+#define L2CAP_EXT_CTRL_SAR_SHIFT 16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT 16
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18
/* L2CAP Supervisory Function */
-#define L2CAP_SUPER_RCV_READY 0x0000
-#define L2CAP_SUPER_REJECT 0x0004
-#define L2CAP_SUPER_RCV_NOT_READY 0x0008
-#define L2CAP_SUPER_SELECT_REJECT 0x000C
+#define L2CAP_SUPER_RR 0x00
+#define L2CAP_SUPER_REJ 0x01
+#define L2CAP_SUPER_RNR 0x02
+#define L2CAP_SUPER_SREJ 0x03
/* L2CAP Segmentation and Reassembly */
-#define L2CAP_SDU_UNSEGMENTED 0x0000
-#define L2CAP_SDU_START 0x4000
-#define L2CAP_SDU_END 0x8000
-#define L2CAP_SDU_CONTINUE 0xC000
+#define L2CAP_SAR_UNSEGMENTED 0x00
+#define L2CAP_SAR_START 0x01
+#define L2CAP_SAR_END 0x02
+#define L2CAP_SAR_CONTINUE 0x03
/* L2CAP Command rej. reasons */
-#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
-#define L2CAP_REJ_MTU_EXCEEDED 0x0001
-#define L2CAP_REJ_INVALID_CID 0x0002
-
+#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000
+#define L2CAP_REJ_MTU_EXCEEDED 0x0001
+#define L2CAP_REJ_INVALID_CID 0x0002
/* L2CAP structures */
struct l2cap_hdr {
@@ -144,6 +180,12 @@ struct l2cap_hdr {
__le16 cid;
} __packed;
#define L2CAP_HDR_SIZE 4
+#define L2CAP_ENH_HDR_SIZE 6
+#define L2CAP_EXT_HDR_SIZE 8
+
+#define L2CAP_FCS_SIZE 2
+#define L2CAP_SDULEN_SIZE 2
+#define L2CAP_PSMLEN_SIZE 2
struct l2cap_cmd_hdr {
__u8 code;
@@ -188,14 +230,15 @@ struct l2cap_conn_rsp {
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
-/* connect result */
+/* connect/create channel results */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
+#define L2CAP_CR_BAD_AMP 0x0005
-/* connect status */
+/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
@@ -217,6 +260,8 @@ struct l2cap_conf_rsp {
#define L2CAP_CONF_UNACCEPT 0x0001
#define L2CAP_CONF_REJECT 0x0002
#define L2CAP_CONF_UNKNOWN 0x0003
+#define L2CAP_CONF_PENDING 0x0004
+#define L2CAP_CONF_EFS_REJECT 0x0005
struct l2cap_conf_opt {
__u8 type;
@@ -233,6 +278,8 @@ struct l2cap_conf_opt {
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_RFC 0x04
#define L2CAP_CONF_FCS 0x05
+#define L2CAP_CONF_EFS 0x06
+#define L2CAP_CONF_EWS 0x07
#define L2CAP_CONF_MAX_SIZE 22
@@ -251,6 +298,21 @@ struct l2cap_conf_rfc {
#define L2CAP_MODE_ERTM 0x03
#define L2CAP_MODE_STREAMING 0x04
+struct l2cap_conf_efs {
+ __u8 id;
+ __u8 stype;
+ __le16 msdu;
+ __le32 sdu_itime;
+ __le32 acc_lat;
+ __le32 flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC 0x00
+#define L2CAP_SERV_BESTEFFORT 0x01
+#define L2CAP_SERV_GUARANTEED 0x02
+
+#define L2CAP_BESTEFFORT_ID 0x01
+
struct l2cap_disconn_req {
__le16 dcid;
__le16 scid;
@@ -271,14 +333,57 @@ struct l2cap_info_rsp {
__u8 data[0];
} __packed;
+struct l2cap_create_chan_req {
+ __le16 psm;
+ __le16 scid;
+ __u8 amp_id;
+} __packed;
+
+struct l2cap_create_chan_rsp {
+ __le16 dcid;
+ __le16 scid;
+ __le16 result;
+ __le16 status;
+} __packed;
+
+struct l2cap_move_chan_req {
+ __le16 icid;
+ __u8 dest_amp_id;
+} __packed;
+
+struct l2cap_move_chan_rsp {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MR_SUCCESS 0x0000
+#define L2CAP_MR_PEND 0x0001
+#define L2CAP_MR_BAD_ID 0x0002
+#define L2CAP_MR_SAME_ID 0x0003
+#define L2CAP_MR_NOT_SUPP 0x0004
+#define L2CAP_MR_COLLISION 0x0005
+#define L2CAP_MR_NOT_ALLOWED 0x0006
+
+struct l2cap_move_chan_cfm {
+ __le16 icid;
+ __le16 result;
+} __packed;
+
+#define L2CAP_MC_CONFIRMED 0x0000
+#define L2CAP_MC_UNCONFIRMED 0x0001
+
+struct l2cap_move_chan_cfm_rsp {
+ __le16 icid;
+} __packed;
+
/* info type */
-#define L2CAP_IT_CL_MTU 0x0001
-#define L2CAP_IT_FEAT_MASK 0x0002
-#define L2CAP_IT_FIXED_CHAN 0x0003
+#define L2CAP_IT_CL_MTU 0x0001
+#define L2CAP_IT_FEAT_MASK 0x0002
+#define L2CAP_IT_FIXED_CHAN 0x0003
/* info result */
-#define L2CAP_IR_SUCCESS 0x0000
-#define L2CAP_IR_NOTSUPP 0x0001
+#define L2CAP_IR_SUCCESS 0x0000
+#define L2CAP_IR_NOTSUPP 0x0001
struct l2cap_conn_param_update_req {
__le16 min;
@@ -297,7 +402,7 @@ struct l2cap_conn_param_update_rsp {
/* ----- L2CAP channels and connections ----- */
struct srej_list {
- __u8 tx_seq;
+ __u16 tx_seq;
struct list_head list;
};
@@ -319,14 +424,11 @@ struct l2cap_chan {
__u16 flush_to;
__u8 mode;
__u8 chan_type;
+ __u8 chan_policy;
__le16 sport;
__u8 sec_level;
- __u8 role_switch;
- __u8 force_reliable;
- __u8 flushable;
- __u8 force_active;
__u8 ident;
@@ -337,7 +439,8 @@ struct l2cap_chan {
__u8 fcs;
- __u8 tx_win;
+ __u16 tx_win;
+ __u16 tx_win_max;
__u8 max_tx;
__u16 retrans_timeout;
__u16 monitor_timeout;
@@ -345,29 +448,45 @@ struct l2cap_chan {
unsigned long conf_state;
unsigned long conn_state;
-
- __u8 next_tx_seq;
- __u8 expected_ack_seq;
- __u8 expected_tx_seq;
- __u8 buffer_seq;
- __u8 buffer_seq_srej;
- __u8 srej_save_reqseq;
- __u8 frames_sent;
- __u8 unacked_frames;
+ unsigned long flags;
+
+ __u16 next_tx_seq;
+ __u16 expected_ack_seq;
+ __u16 expected_tx_seq;
+ __u16 buffer_seq;
+ __u16 buffer_seq_srej;
+ __u16 srej_save_reqseq;
+ __u16 frames_sent;
+ __u16 unacked_frames;
__u8 retry_count;
__u8 num_acked;
__u16 sdu_len;
struct sk_buff *sdu;
struct sk_buff *sdu_last_frag;
- __u8 remote_tx_win;
+ __u16 remote_tx_win;
__u8 remote_max_tx;
__u16 remote_mps;
- struct timer_list chan_timer;
- struct timer_list retrans_timer;
- struct timer_list monitor_timer;
- struct timer_list ack_timer;
+ __u8 local_id;
+ __u8 local_stype;
+ __u16 local_msdu;
+ __u32 local_sdu_itime;
+ __u32 local_acc_lat;
+ __u32 local_flush_to;
+
+ __u8 remote_id;
+ __u8 remote_stype;
+ __u16 remote_msdu;
+ __u32 remote_sdu_itime;
+ __u32 remote_acc_lat;
+ __u32 remote_flush_to;
+
+ struct delayed_work chan_timer;
+ struct delayed_work retrans_timer;
+ struct delayed_work monitor_timer;
+ struct delayed_work ack_timer;
+
struct sk_buff *tx_send_head;
struct sk_buff_head tx_q;
struct sk_buff_head srej_q;
@@ -391,6 +510,7 @@ struct l2cap_ops {
struct l2cap_conn {
struct hci_conn *hcon;
+ struct hci_chan *hchan;
bdaddr_t *dst;
bdaddr_t *src;
@@ -402,7 +522,7 @@ struct l2cap_conn {
__u8 info_state;
__u8 info_ident;
- struct timer_list info_timer;
+ struct delayed_work info_timer;
spinlock_t lock;
@@ -412,11 +532,11 @@ struct l2cap_conn {
__u8 disc_reason;
- struct timer_list security_timer;
+ struct delayed_work security_timer;
struct smp_chan *smp_chan;
struct list_head chan_l;
- rwlock_t chan_lock;
+ struct mutex chan_lock;
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
@@ -445,6 +565,9 @@ enum {
CONF_CONNECT_PEND,
CONF_NO_FCS_RECV,
CONF_STATE2_DEVICE,
+ CONF_EWS_RECV,
+ CONF_LOC_CONF_PEND,
+ CONF_REM_CONF_PEND,
};
#define L2CAP_CONF_MAX_CONF_REQ 2
@@ -462,6 +585,44 @@ enum {
CONN_RNR_SENT,
};
+/* Definitions for flags in l2cap_chan */
+enum {
+ FLAG_ROLE_SWITCH,
+ FLAG_FORCE_ACTIVE,
+ FLAG_FORCE_RELIABLE,
+ FLAG_FLUSHABLE,
+ FLAG_EXT_CTRL,
+ FLAG_EFS_ENABLE,
+};
+
+static inline void l2cap_chan_hold(struct l2cap_chan *c)
+{
+ atomic_inc(&c->refcnt);
+}
+
+static inline void l2cap_chan_put(struct l2cap_chan *c)
+{
+ if (atomic_dec_and_test(&c->refcnt))
+ kfree(c);
+}
+
+static inline void l2cap_set_timer(struct l2cap_chan *chan,
+ struct delayed_work *work, long timeout)
+{
+ BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
+
+ if (!__cancel_delayed_work(work))
+ l2cap_chan_hold(chan);
+ schedule_delayed_work(work, timeout);
+}
+
+static inline void l2cap_clear_timer(struct l2cap_chan *chan,
+ struct delayed_work *work)
+{
+ if (__cancel_delayed_work(work))
+ l2cap_chan_put(chan);
+}
+
#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
@@ -474,6 +635,22 @@ enum {
L2CAP_DEFAULT_ACK_TO);
#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+ int offset;
+
+ offset = (seq1 - seq2) % (chan->tx_win_max + 1);
+ if (offset < 0)
+ offset += (chan->tx_win_max + 1);
+
+ return offset;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+ return (seq + 1) % (chan->tx_win_max + 1);
+}
+
static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
{
int sub;
@@ -486,13 +663,164 @@ static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
return sub == ch->remote_tx_win;
}
-#define __get_txseq(ctrl) (((ctrl) & L2CAP_CTRL_TXSEQ) >> 1)
-#define __get_reqseq(ctrl) (((ctrl) & L2CAP_CTRL_REQSEQ) >> 8)
-#define __is_iframe(ctrl) (!((ctrl) & L2CAP_CTRL_FRAME_TYPE))
-#define __is_sframe(ctrl) ((ctrl) & L2CAP_CTRL_FRAME_TYPE)
-#define __is_sar_start(ctrl) (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
+static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
+ L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ else
+ return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+}
+
+static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_REQSEQ;
+ else
+ return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
+}
+
+static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
+ L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ else
+ return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+}
+
+static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
+ L2CAP_EXT_CTRL_TXSEQ;
+ else
+ return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
+}
+
+static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
+ else
+ return ctrl & L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u32 __set_sframe(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_CTRL_FRAME_TYPE;
+ else
+ return L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ else
+ return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+}
+
+static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
+ else
+ return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
+}
+
+static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
+{
+ return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
+}
+
+static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_CTRL_SAR;
+ else
+ return L2CAP_CTRL_SAR;
+}
+
+static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
+ L2CAP_EXT_CTRL_SUPER_SHIFT;
+ else
+ return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+}
+
+static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
+ L2CAP_EXT_CTRL_SUPERVISE;
+ else
+ return (super << L2CAP_CTRL_SUPER_SHIFT) &
+ L2CAP_CTRL_SUPERVISE;
+}
+
+static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_CTRL_FINAL;
+ else
+ return L2CAP_CTRL_FINAL;
+}
+
+static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return ctrl & L2CAP_EXT_CTRL_FINAL;
+ else
+ return ctrl & L2CAP_CTRL_FINAL;
+}
+
+static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_CTRL_POLL;
+ else
+ return L2CAP_CTRL_POLL;
+}
+
+static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return ctrl & L2CAP_EXT_CTRL_POLL;
+ else
+ return ctrl & L2CAP_CTRL_POLL;
+}
+
+static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return get_unaligned_le32(p);
+ else
+ return get_unaligned_le16(p);
+}
+
+static inline void __put_control(struct l2cap_chan *chan, __u32 control,
+ void *p)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return put_unaligned_le32(control, p);
+ else
+ return put_unaligned_le16(control, p);
+}
+
+static inline __u8 __ctrl_size(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
+ else
+ return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
+}
-extern int disable_ertm;
+extern bool disable_ertm;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
@@ -506,8 +834,11 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(struct sock *sk);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
void l2cap_chan_destroy(struct l2cap_chan *chan);
-int l2cap_chan_connect(struct l2cap_chan *chan);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d66da0f94f9..be65d341788 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -23,6 +23,23 @@
#define MGMT_INDEX_NONE 0xFFFF
+#define MGMT_STATUS_SUCCESS 0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND 0x01
+#define MGMT_STATUS_NOT_CONNECTED 0x02
+#define MGMT_STATUS_FAILED 0x03
+#define MGMT_STATUS_CONNECT_FAILED 0x04
+#define MGMT_STATUS_AUTH_FAILED 0x05
+#define MGMT_STATUS_NOT_PAIRED 0x06
+#define MGMT_STATUS_NO_RESOURCES 0x07
+#define MGMT_STATUS_TIMEOUT 0x08
+#define MGMT_STATUS_ALREADY_CONNECTED 0x09
+#define MGMT_STATUS_BUSY 0x0a
+#define MGMT_STATUS_REJECTED 0x0b
+#define MGMT_STATUS_NOT_SUPPORTED 0x0c
+#define MGMT_STATUS_INVALID_PARAMS 0x0d
+#define MGMT_STATUS_DISCONNECTED 0x0e
+#define MGMT_STATUS_NOT_POWERED 0x0f
+
struct mgmt_hdr {
__le16 opcode;
__le16 index;
@@ -44,22 +61,29 @@ struct mgmt_rp_read_index_list {
/* Reserve one extra byte for names in management messages so that they
* are always guaranteed to be nul-terminated */
#define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1)
+#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1)
+
+#define MGMT_SETTING_POWERED 0x00000001
+#define MGMT_SETTING_CONNECTABLE 0x00000002
+#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004
+#define MGMT_SETTING_DISCOVERABLE 0x00000008
+#define MGMT_SETTING_PAIRABLE 0x00000010
+#define MGMT_SETTING_LINK_SECURITY 0x00000020
+#define MGMT_SETTING_SSP 0x00000040
+#define MGMT_SETTING_BREDR 0x00000080
+#define MGMT_SETTING_HS 0x00000100
+#define MGMT_SETTING_LE 0x00000200
#define MGMT_OP_READ_INFO 0x0004
struct mgmt_rp_read_info {
- __u8 type;
- __u8 powered;
- __u8 connectable;
- __u8 discoverable;
- __u8 pairable;
- __u8 sec_mode;
bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
__u8 dev_class[3];
- __u8 features[8];
- __u16 manufacturer;
- __u8 hci_ver;
- __u16 hci_rev;
__u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
} __packed;
struct mgmt_mode {
@@ -69,70 +93,97 @@ struct mgmt_mode {
#define MGMT_OP_SET_POWERED 0x0005
#define MGMT_OP_SET_DISCOVERABLE 0x0006
+struct mgmt_cp_set_discoverable {
+ __u8 val;
+ __u16 timeout;
+} __packed;
#define MGMT_OP_SET_CONNECTABLE 0x0007
-#define MGMT_OP_SET_PAIRABLE 0x0008
+#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008
-#define MGMT_OP_ADD_UUID 0x0009
-struct mgmt_cp_add_uuid {
- __u8 uuid[16];
- __u8 svc_hint;
-} __packed;
+#define MGMT_OP_SET_PAIRABLE 0x0009
-#define MGMT_OP_REMOVE_UUID 0x000A
-struct mgmt_cp_remove_uuid {
- __u8 uuid[16];
-} __packed;
+#define MGMT_OP_SET_LINK_SECURITY 0x000A
+
+#define MGMT_OP_SET_SSP 0x000B
+
+#define MGMT_OP_SET_HS 0x000C
+
+#define MGMT_OP_SET_LE 0x000D
-#define MGMT_OP_SET_DEV_CLASS 0x000B
+#define MGMT_OP_SET_DEV_CLASS 0x000E
struct mgmt_cp_set_dev_class {
__u8 major;
__u8 minor;
} __packed;
-#define MGMT_OP_SET_SERVICE_CACHE 0x000C
-struct mgmt_cp_set_service_cache {
- __u8 enable;
+#define MGMT_OP_SET_LOCAL_NAME 0x000F
+struct mgmt_cp_set_local_name {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+} __packed;
+
+#define MGMT_OP_ADD_UUID 0x0010
+struct mgmt_cp_add_uuid {
+ __u8 uuid[16];
+ __u8 svc_hint;
+} __packed;
+
+#define MGMT_OP_REMOVE_UUID 0x0011
+struct mgmt_cp_remove_uuid {
+ __u8 uuid[16];
} __packed;
-struct mgmt_key_info {
+struct mgmt_link_key_info {
bdaddr_t bdaddr;
u8 type;
u8 val[16];
u8 pin_len;
- u8 dlen;
- u8 data[0];
} __packed;
-#define MGMT_OP_LOAD_KEYS 0x000D
-struct mgmt_cp_load_keys {
+#define MGMT_OP_LOAD_LINK_KEYS 0x0012
+struct mgmt_cp_load_link_keys {
__u8 debug_keys;
__le16 key_count;
- struct mgmt_key_info keys[0];
+ struct mgmt_link_key_info keys[0];
} __packed;
-#define MGMT_OP_REMOVE_KEY 0x000E
-struct mgmt_cp_remove_key {
+#define MGMT_OP_REMOVE_KEYS 0x0013
+struct mgmt_cp_remove_keys {
bdaddr_t bdaddr;
__u8 disconnect;
} __packed;
+struct mgmt_rp_remove_keys {
+ bdaddr_t bdaddr;
+ __u8 status;
+};
-#define MGMT_OP_DISCONNECT 0x000F
+#define MGMT_OP_DISCONNECT 0x0014
struct mgmt_cp_disconnect {
bdaddr_t bdaddr;
} __packed;
struct mgmt_rp_disconnect {
bdaddr_t bdaddr;
+ __u8 status;
} __packed;
-#define MGMT_OP_GET_CONNECTIONS 0x0010
+#define MGMT_ADDR_BREDR 0x00
+#define MGMT_ADDR_LE_PUBLIC 0x01
+#define MGMT_ADDR_LE_RANDOM 0x02
+#define MGMT_ADDR_INVALID 0xff
+
+struct mgmt_addr_info {
+ bdaddr_t bdaddr;
+ __u8 type;
+} __packed;
+
+#define MGMT_OP_GET_CONNECTIONS 0x0015
struct mgmt_rp_get_connections {
__le16 conn_count;
- bdaddr_t conn[0];
+ struct mgmt_addr_info addr[0];
} __packed;
-#define MGMT_OP_PIN_CODE_REPLY 0x0011
+#define MGMT_OP_PIN_CODE_REPLY 0x0016
struct mgmt_cp_pin_code_reply {
bdaddr_t bdaddr;
__u8 pin_len;
@@ -143,27 +194,27 @@ struct mgmt_rp_pin_code_reply {
uint8_t status;
} __packed;
-#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012
+#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017
struct mgmt_cp_pin_code_neg_reply {
bdaddr_t bdaddr;
} __packed;
-#define MGMT_OP_SET_IO_CAPABILITY 0x0013
+#define MGMT_OP_SET_IO_CAPABILITY 0x0018
struct mgmt_cp_set_io_capability {
__u8 io_capability;
} __packed;
-#define MGMT_OP_PAIR_DEVICE 0x0014
+#define MGMT_OP_PAIR_DEVICE 0x0019
struct mgmt_cp_pair_device {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
__u8 io_cap;
} __packed;
struct mgmt_rp_pair_device {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
__u8 status;
} __packed;
-#define MGMT_OP_USER_CONFIRM_REPLY 0x0015
+#define MGMT_OP_USER_CONFIRM_REPLY 0x001A
struct mgmt_cp_user_confirm_reply {
bdaddr_t bdaddr;
} __packed;
@@ -172,48 +223,69 @@ struct mgmt_rp_user_confirm_reply {
__u8 status;
} __packed;
-#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
+#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B
+struct mgmt_cp_user_confirm_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
-#define MGMT_OP_SET_LOCAL_NAME 0x0017
-struct mgmt_cp_set_local_name {
- __u8 name[MGMT_MAX_NAME_LENGTH];
+#define MGMT_OP_USER_PASSKEY_REPLY 0x001C
+struct mgmt_cp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __le32 passkey;
+} __packed;
+struct mgmt_rp_user_passkey_reply {
+ bdaddr_t bdaddr;
+ __u8 status;
} __packed;
-#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D
+struct mgmt_cp_user_passkey_neg_reply {
+ bdaddr_t bdaddr;
+} __packed;
+
+#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E
struct mgmt_rp_read_local_oob_data {
__u8 hash[16];
__u8 randomizer[16];
} __packed;
-#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019
+#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F
struct mgmt_cp_add_remote_oob_data {
bdaddr_t bdaddr;
__u8 hash[16];
__u8 randomizer[16];
} __packed;
-#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A
+#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020
struct mgmt_cp_remove_remote_oob_data {
bdaddr_t bdaddr;
} __packed;
-#define MGMT_OP_START_DISCOVERY 0x001B
+#define MGMT_OP_START_DISCOVERY 0x0021
+struct mgmt_cp_start_discovery {
+ __u8 type;
+} __packed;
-#define MGMT_OP_STOP_DISCOVERY 0x001C
+#define MGMT_OP_STOP_DISCOVERY 0x0022
-#define MGMT_OP_BLOCK_DEVICE 0x001D
-struct mgmt_cp_block_device {
+#define MGMT_OP_CONFIRM_NAME 0x0023
+struct mgmt_cp_confirm_name {
bdaddr_t bdaddr;
+ __u8 name_known;
+} __packed;
+struct mgmt_rp_confirm_name {
+ bdaddr_t bdaddr;
+ __u8 status;
} __packed;
-#define MGMT_OP_UNBLOCK_DEVICE 0x001E
-struct mgmt_cp_unblock_device {
+#define MGMT_OP_BLOCK_DEVICE 0x0024
+struct mgmt_cp_block_device {
bdaddr_t bdaddr;
} __packed;
-#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F
-struct mgmt_cp_set_fast_connectable {
- __u8 enable;
+#define MGMT_OP_UNBLOCK_DEVICE 0x0025
+struct mgmt_cp_unblock_device {
+ bdaddr_t bdaddr;
} __packed;
#define MGMT_EV_CMD_COMPLETE 0x0001
@@ -237,83 +309,82 @@ struct mgmt_ev_controller_error {
#define MGMT_EV_INDEX_REMOVED 0x0005
-#define MGMT_EV_POWERED 0x0006
+#define MGMT_EV_NEW_SETTINGS 0x0006
-#define MGMT_EV_DISCOVERABLE 0x0007
-
-#define MGMT_EV_CONNECTABLE 0x0008
+#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007
+struct mgmt_ev_class_of_dev_changed {
+ __u8 dev_class[3];
+};
-#define MGMT_EV_PAIRABLE 0x0009
+#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008
+struct mgmt_ev_local_name_changed {
+ __u8 name[MGMT_MAX_NAME_LENGTH];
+ __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH];
+} __packed;
-#define MGMT_EV_NEW_KEY 0x000A
-struct mgmt_ev_new_key {
+#define MGMT_EV_NEW_LINK_KEY 0x0009
+struct mgmt_ev_new_link_key {
__u8 store_hint;
- struct mgmt_key_info key;
+ struct mgmt_link_key_info key;
} __packed;
-#define MGMT_EV_CONNECTED 0x000B
-struct mgmt_ev_connected {
- bdaddr_t bdaddr;
- __u8 link_type;
-} __packed;
+#define MGMT_EV_CONNECTED 0x000A
-#define MGMT_EV_DISCONNECTED 0x000C
-struct mgmt_ev_disconnected {
- bdaddr_t bdaddr;
-} __packed;
+#define MGMT_EV_DISCONNECTED 0x000B
-#define MGMT_EV_CONNECT_FAILED 0x000D
+#define MGMT_EV_CONNECT_FAILED 0x000C
struct mgmt_ev_connect_failed {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
__u8 status;
} __packed;
-#define MGMT_EV_PIN_CODE_REQUEST 0x000E
+#define MGMT_EV_PIN_CODE_REQUEST 0x000D
struct mgmt_ev_pin_code_request {
bdaddr_t bdaddr;
__u8 secure;
} __packed;
-#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
+#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E
struct mgmt_ev_user_confirm_request {
bdaddr_t bdaddr;
__u8 confirm_hint;
__le32 value;
} __packed;
+#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F
+struct mgmt_ev_user_passkey_request {
+ bdaddr_t bdaddr;
+} __packed;
+
#define MGMT_EV_AUTH_FAILED 0x0010
struct mgmt_ev_auth_failed {
bdaddr_t bdaddr;
__u8 status;
} __packed;
-#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011
-struct mgmt_ev_local_name_changed {
- __u8 name[MGMT_MAX_NAME_LENGTH];
-} __packed;
-
-#define MGMT_EV_DEVICE_FOUND 0x0012
+#define MGMT_EV_DEVICE_FOUND 0x0011
struct mgmt_ev_device_found {
- bdaddr_t bdaddr;
+ struct mgmt_addr_info addr;
__u8 dev_class[3];
__s8 rssi;
+ __u8 confirm_name;
__u8 eir[HCI_MAX_EIR_LENGTH];
} __packed;
-#define MGMT_EV_REMOTE_NAME 0x0013
+#define MGMT_EV_REMOTE_NAME 0x0012
struct mgmt_ev_remote_name {
bdaddr_t bdaddr;
__u8 name[MGMT_MAX_NAME_LENGTH];
} __packed;
-#define MGMT_EV_DISCOVERING 0x0014
+#define MGMT_EV_DISCOVERING 0x0013
-#define MGMT_EV_DEVICE_BLOCKED 0x0015
+#define MGMT_EV_DEVICE_BLOCKED 0x0014
struct mgmt_ev_device_blocked {
bdaddr_t bdaddr;
} __packed;
-#define MGMT_EV_DEVICE_UNBLOCKED 0x0016
+#define MGMT_EV_DEVICE_UNBLOCKED 0x0015
struct mgmt_ev_device_unblocked {
bdaddr_t bdaddr;
} __packed;
diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
index 15b97d54944..aeaf5fa2b9f 100644
--- a/include/net/bluetooth/smp.h
+++ b/include/net/bluetooth/smp.h
@@ -115,6 +115,10 @@ struct smp_cmd_security_req {
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
+#define SMP_FLAG_TK_VALID 1
+#define SMP_FLAG_CFM_PENDING 2
+#define SMP_FLAG_MITM_AUTH 3
+
struct smp_chan {
struct l2cap_conn *conn;
u8 preq[7]; /* SMP Pairing Request */
@@ -124,6 +128,7 @@ struct smp_chan {
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
u8 smp_key_size;
+ unsigned long smp_flags;
struct crypto_blkcipher *tfm;
struct work_struct confirm;
struct work_struct random;
@@ -134,6 +139,7 @@ struct smp_chan {
int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
void smp_chan_destroy(struct l2cap_conn *conn);
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index c011281d92c..ef2dd9438bb 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -9,6 +9,7 @@
#include <net/caif/caif_layer.h>
#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
#include <linux/caif/caif_socket.h>
#include <linux/if.h>
#include <linux/net.h>
@@ -104,4 +105,24 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
*/
void caif_free_client(struct cflayer *adap_layer);
+/**
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev: Network device to enroll.
+ * @caifdev: Configuration information from CAIF Link Layer
+ * @link_support: Link layer support layer
+ * @head_room: Head room needed by link support layer
+ * @layer: Lowest layer in CAIF stack
+ * @rcv_fun: Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer, int (**rcv_func)(
+ struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *));
+
#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 35bc7883cf9..0f3a39125f9 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -121,9 +121,7 @@ enum caif_direction {
* @transmit: Packet transmit funciton.
* @ctrlcmd: Used for control signalling upwards in the stack.
* @modemcmd: Used for control signaling downwards in the stack.
- * @prio: Priority of this layer.
* @id: The identity of this layer
- * @type: The type of this layer
* @name: Name of the layer.
*
* This structure defines the layered structure in CAIF.
@@ -230,9 +228,7 @@ struct cflayer {
*/
int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
- unsigned short prio;
unsigned int id;
- unsigned int type;
char name[CAIF_LAYER_NAME_SZ];
};
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index 87c3d11b8e5..aa6a485b054 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -55,8 +55,8 @@
struct cfspi_xfer {
u16 tx_dma_len;
u16 rx_dma_len;
- void *va_tx;
- dma_addr_t pa_tx;
+ void *va_tx[2];
+ dma_addr_t pa_tx[2];
void *va_rx;
dma_addr_t pa_rx;
};
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 3e93a4a4b67..90b4ff8bad8 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -14,18 +14,6 @@
struct cfcnfg;
/**
- * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack
- *
- * @CFPHYTYPE_FRAG: Fragmented frames physical interface.
- * @CFPHYTYPE_CAIF: Generic CAIF physical interface
- */
-enum cfcnfg_phy_type {
- CFPHYTYPE_FRAG = 1,
- CFPHYTYPE_CAIF,
- CFPHYTYPE_MAX
-};
-
-/**
* enum cfcnfg_phy_preference - Physical preference HW Abstraction
*
* @CFPHYPREF_UNSPECIFIED: Default physical interface
@@ -66,21 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
* @cnfg: Pointer to a CAIF configuration object, created by
* cfcnfg_create().
- * @phy_type: Specifies the type of physical interface, e.g.
- * CFPHYTYPE_FRAG.
* @dev: Pointer to link layer device
* @phy_layer: Specify the physical layer. The transmit function
* MUST be set in the structure.
* @pref: The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
* @fcs: Specify if checksum is used in CAIF Framing Layer.
- * @stx: Specify if Start Of Frame eXtention is used.
+ * @head_room: Head space needed by link specific protocol.
*/
-
void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
- bool fcs, bool stx);
+ struct cflayer *link_support,
+ bool fcs, int head_room);
/**
* cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
index b8374321b36..f121299a342 100644
--- a/include/net/caif/cfserl.h
+++ b/include/net/caif/cfserl.h
@@ -8,5 +8,5 @@
#define CFSERL_H_
#include <net/caif/caif_layer.h>
-struct cflayer *cfserl_create(int type, int instance, bool use_stx);
-#endif /* CFSERL_H_ */
+struct cflayer *cfserl_create(int instance, bool use_stx);
+#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 95852e36713..15f4be7d768 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -391,6 +391,8 @@ struct cfg80211_crypto_settings {
* @assocresp_ies: extra information element(s) to add into (Re)Association
* Response frames or %NULL
* @assocresp_ies_len: length of assocresp_ies in octets
+ * @probe_resp_len: length of probe response template (@probe_resp)
+ * @probe_resp: probe response template (AP mode only)
*/
struct beacon_parameters {
u8 *head, *tail;
@@ -408,6 +410,8 @@ struct beacon_parameters {
size_t proberesp_ies_len;
const u8 *assocresp_ies;
size_t assocresp_ies_len;
+ int probe_resp_len;
+ u8 *probe_resp;
};
/**
@@ -501,6 +505,7 @@ struct station_parameters {
* @STATION_INFO_CONNECTED_TIME: @connected_time filled
* @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
* @STATION_INFO_STA_FLAGS: @sta_flags filled
+ * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
*/
enum station_info_flags {
STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -521,7 +526,8 @@ enum station_info_flags {
STATION_INFO_BSS_PARAM = 1<<15,
STATION_INFO_CONNECTED_TIME = 1<<16,
STATION_INFO_ASSOC_REQ_IES = 1<<17,
- STATION_INFO_STA_FLAGS = 1<<18
+ STATION_INFO_STA_FLAGS = 1<<18,
+ STATION_INFO_BEACON_LOSS_COUNT = 1<<19
};
/**
@@ -619,6 +625,7 @@ struct sta_bss_parameters {
* the cfg80211_new_sta() calls to notify user space of the IEs.
* @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
* @sta_flags: station flags mask & values
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
*/
struct station_info {
u32 filled;
@@ -646,6 +653,8 @@ struct station_info {
const u8 *assoc_req_ies;
size_t assoc_req_ies_len;
+ u32 beacon_loss_count;
+
/*
* Note: Add a new enum station_info_flags value for each new field and
* use it to check which fields are initialized.
@@ -778,6 +787,7 @@ struct mesh_config {
u16 min_discovery_timeout;
u32 dot11MeshHWMPactivePathTimeout;
u16 dot11MeshHWMPpreqMinInterval;
+ u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
u16 dot11MeshHWMPRannInterval;
@@ -798,6 +808,7 @@ struct mesh_config {
* @ie_len: length of vendor information elements
* @is_authenticated: this mesh requires authentication
* @is_secure: this mesh uses security
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
*
* These parameters are fixed when the mesh is created.
*/
@@ -810,6 +821,7 @@ struct mesh_setup {
u8 ie_len;
bool is_authenticated;
bool is_secure;
+ int mcast_rate[IEEE80211_NUM_BANDS];
};
/**
@@ -1040,6 +1052,15 @@ struct cfg80211_auth_request {
};
/**
+ * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
+ *
+ * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
+ */
+enum cfg80211_assoc_req_flags {
+ ASSOC_REQ_DISABLE_HT = BIT(0),
+};
+
+/**
* struct cfg80211_assoc_request - (Re)Association request data
*
* This structure provides information needed to complete IEEE 802.11
@@ -1050,6 +1071,10 @@ struct cfg80211_auth_request {
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
* @crypto: crypto settings
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -1057,6 +1082,9 @@ struct cfg80211_assoc_request {
size_t ie_len;
struct cfg80211_crypto_settings crypto;
bool use_mfp;
+ u32 flags;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
};
/**
@@ -1126,6 +1154,7 @@ struct cfg80211_ibss_params {
u8 *ssid;
u8 *bssid;
struct ieee80211_channel *channel;
+ enum nl80211_channel_type channel_type;
u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
@@ -1155,6 +1184,10 @@ struct cfg80211_ibss_params {
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
+ * @flags: See &enum cfg80211_assoc_req_flags
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1168,6 +1201,9 @@ struct cfg80211_connect_params {
struct cfg80211_crypto_settings crypto;
const u8 *key;
u8 key_len, key_idx;
+ u32 flags;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
};
/**
@@ -1315,7 +1351,12 @@ struct cfg80211_gtk_rekey_data {
*
* @add_station: Add a new station.
* @del_station: Remove a station; @mac may be NULL to remove all stations.
- * @change_station: Modify a given station.
+ * @change_station: Modify a given station. Note that flags changes are not much
+ * validated in cfg80211, in particular the auth/assoc/authorized flags
+ * might come to the driver in invalid combinations -- make sure to check
+ * them, also against the existing state! Also, supported_rates changes are
+ * not checked in station mode -- drivers need to reject (or ignore) them
+ * for anything but TDLS peers.
* @get_station: get station information for the station identified by @mac
* @dump_station: dump station callback -- resume dump at index @idx
*
@@ -1342,6 +1383,9 @@ struct cfg80211_gtk_rekey_data {
* doesn't verify much. Note, however, that the passed netdev may be
* %NULL as well if the user requested changing the channel for the
* device itself, or for a monitor interface.
+ * @get_channel: Get the current operating channel, should return %NULL if
+ * there's no single defined operating channel if for example the
+ * device implements channel hopping for multi-channel virtual interfaces.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1369,7 +1413,8 @@ struct cfg80211_gtk_rekey_data {
* have changed. The actual parameter values are available in
* struct wiphy. If returning an error, no value should be changed.
*
- * @set_tx_power: set the transmit power according to the parameters
+ * @set_tx_power: set the transmit power according to the parameters,
+ * the power passed is in mBm, to get dBm use MBM_TO_DBM().
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
@@ -1432,6 +1477,11 @@ struct cfg80211_gtk_rekey_data {
*
* @tdls_mgmt: Transmit a TDLS management frame.
* @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
+ *
+ * @probe_client: probe an associated client, must return a cookie that it
+ * later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1585,7 +1635,7 @@ struct cfg80211_ops {
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie);
+ bool dont_wait_for_ack, u64 *cookie);
int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
struct net_device *dev,
u64 cookie);
@@ -1621,6 +1671,15 @@ struct cfg80211_ops {
u16 status_code, const u8 *buf, size_t len);
int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
+
+ int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u64 *cookie);
+
+ int (*set_noack_map)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 noack_map);
+
+ struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
};
/*
@@ -1645,7 +1704,9 @@ struct cfg80211_ops {
* regulatory domain no user regulatory domain can enable these channels
* at a later time. This can be used for devices which do not have
* calibration information guaranteed for frequencies or settings
- * outside of its regulatory domain.
+ * outside of its regulatory domain. If used in combination with
+ * WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
+ * will be followed.
* @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
* that passive scan flags and beaconing flags may not be lifted by
* cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -1679,6 +1740,14 @@ struct cfg80211_ops {
* teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
* command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
* used for asking the driver/firmware to perform a TDLS operation.
+ * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
+ * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
+ * when there are virtual interfaces in AP mode by calling
+ * cfg80211_report_obss_beacon().
+ * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
+ * responds to probe-requests in hardware.
+ * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
+ * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -1697,6 +1766,11 @@ enum wiphy_flags {
WIPHY_FLAG_AP_UAPSD = BIT(14),
WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
+ WIPHY_FLAG_HAVE_AP_SME = BIT(17),
+ WIPHY_FLAG_REPORTS_OBSS = BIT(18),
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
+ WIPHY_FLAG_OFFCHAN_TX = BIT(20),
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
};
/**
@@ -1869,6 +1943,7 @@ struct wiphy_wowlan_support {
* @software_iftypes: bitmask of software interface types, these are not
* subject to any restrictions since they are purely managed in SW.
* @flags: wiphy flags, see &enum wiphy_flags
+ * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
* @bss_priv_size: each BSS struct has private data allocated with it,
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -1907,6 +1982,10 @@ struct wiphy_wowlan_support {
* may request, if implemented.
*
* @wowlan: WoWLAN support information
+ *
+ * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
+ * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
+ * If null, then none can be over-ridden.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -1928,7 +2007,9 @@ struct wiphy {
/* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
u16 interface_modes;
- u32 flags;
+ u32 flags, features;
+
+ u32 ap_sme_capa;
enum cfg80211_signal_type signal_type;
@@ -1960,6 +2041,13 @@ struct wiphy {
u32 available_antennas_tx;
u32 available_antennas_rx;
+ /*
+ * Bitmap of supported protocols for probe response offloading
+ * see &enum nl80211_probe_resp_offload_support_attr. Only valid
+ * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ */
+ u32 probe_resp_offload;
+
/* If multiple wiphys are registered and you're handed e.g.
* a regular netdev with assigned ieee80211_ptr, you won't
* know whether it points to a wiphy your driver has registered
@@ -1987,6 +2075,8 @@ struct wiphy {
/* dir in debugfs: ieee80211/<wiphyname> */
struct dentry *debugfsdir;
+ const struct ieee80211_ht_cap *ht_capa_mod_mask;
+
#ifdef CONFIG_NET_NS
/* the network namespace this phy lives in currently */
struct net *_net;
@@ -2183,6 +2273,8 @@ struct wireless_dev {
int beacon_interval;
+ u32 ap_unexpected_nlpid;
+
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -2349,69 +2441,6 @@ extern int ieee80211_radiotap_iterator_next(
extern const unsigned char rfc1042_header[6];
extern const unsigned char bridge_tunnel_header[6];
-/* Parsed Information Elements */
-struct ieee802_11_elems {
- u8 *ie_start;
- size_t total_len;
-
- /* pointers to IEs */
- u8 *ssid;
- u8 *supp_rates;
- u8 *fh_params;
- u8 *ds_params;
- u8 *cf_params;
- struct ieee80211_tim_ie *tim;
- u8 *ibss_params;
- u8 *challenge;
- u8 *wpa;
- u8 *rsn;
- u8 *erp_info;
- u8 *ext_supp_rates;
- u8 *wmm_info;
- u8 *wmm_param;
- struct ieee80211_ht_cap *ht_cap_elem;
- struct ieee80211_ht_info *ht_info_elem;
- struct ieee80211_meshconf_ie *mesh_config;
- u8 *mesh_id;
- u8 *peering;
- u8 *preq;
- u8 *prep;
- u8 *perr;
- struct ieee80211_rann_ie *rann;
- u8 *ch_switch_elem;
- u8 *country_elem;
- u8 *pwr_constr_elem;
- u8 *quiet_elem; /* first quite element */
- u8 *timeout_int;
-
- /* length of them, respectively */
- u8 ssid_len;
- u8 supp_rates_len;
- u8 fh_params_len;
- u8 ds_params_len;
- u8 cf_params_len;
- u8 tim_len;
- u8 ibss_params_len;
- u8 challenge_len;
- u8 wpa_len;
- u8 rsn_len;
- u8 erp_info_len;
- u8 ext_supp_rates_len;
- u8 wmm_info_len;
- u8 wmm_param_len;
- u8 mesh_id_len;
- u8 peering_len;
- u8 preq_len;
- u8 prep_len;
- u8 perr_len;
- u8 ch_switch_elem_len;
- u8 country_elem_len;
- u8 pwr_constr_elem_len;
- u8 quiet_elem_len;
- u8 num_of_quiet_elem; /* can be more the one */
- u8 timeout_int_len;
-};
-
/**
* ieee80211_get_hdrlen_from_skb - get header length from data
*
@@ -2636,8 +2665,10 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
*/
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *channel,
struct ieee80211_mgmt *mgmt, size_t len,
@@ -2659,8 +2690,10 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
*
* This informs cfg80211 that BSS information was found and
* the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
*/
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
@@ -3043,6 +3076,32 @@ void cfg80211_roamed(struct net_device *dev,
const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
/**
+ * cfg80211_roamed_bss - notify cfg80211 of roaming
+ *
+ * @dev: network device
+ * @bss: entry of bss to which STA got roamed
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @gfp: allocation flags
+ *
+ * This is just a wrapper to notify cfg80211 of roaming event with driver
+ * passing bss to avoid a race in timeout of the bss entry. It should be
+ * called by the underlying driver whenever it roamed from one AP to another
+ * while connected. Drivers which have roaming implemented in firmware
+ * may use this function to avoid a race in bss entry timeout where the bss
+ * entry of the new AP is seen in the driver, but gets timed out by the time
+ * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * rdev->event_work. In case of any failures, the reference is released
+ * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
+ * it will be released while diconneting from the current bss.
+ */
+void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+
+/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
*
* @dev: network device
@@ -3189,6 +3248,74 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
const u8 *bssid, bool preauth, gfp_t gfp);
+/**
+ * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * a spurious class 3 frame was received, to be able to deauth the
+ * sender.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * an associated station sent a 4addr frame but that wasn't expected.
+ * It is allowed and desirable to send this event only once for each
+ * station to avoid event flooding.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_probe_status - notify userspace about probe status
+ * @dev: the device the probe was sent on
+ * @addr: the address of the peer
+ * @cookie: the cookie filled in @probe_client previously
+ * @acked: indicates whether probe was acked or not
+ * @gfp: allocation flags
+ */
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+ u64 cookie, bool acked, gfp_t gfp);
+
+/**
+ * cfg80211_report_obss_beacon - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on
+ * @gfp: allocation flags
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, gfp_t gfp);
+
+/*
+ * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
+ * @wiphy: the wiphy
+ * @chan: main channel
+ * @channel_type: HT mode
+ */
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 839f768f9e3..7828ebf99ee 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -11,6 +11,11 @@
#ifndef __LINUX_NET_DSA_H
#define __LINUX_NET_DSA_H
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
#define DSA_MAX_SWITCHES 4
#define DSA_MAX_PORTS 12
@@ -54,8 +59,143 @@ struct dsa_platform_data {
struct dsa_chip_data *chip;
};
-extern bool dsa_uses_dsa_tags(void *dsa_ptr);
-extern bool dsa_uses_trailer_tags(void *dsa_ptr);
+struct dsa_switch_tree {
+ /*
+ * Configuration data for the platform device that owns
+ * this dsa switch tree instance.
+ */
+ struct dsa_platform_data *pd;
+
+ /*
+ * Reference to network device to use, and which tagging
+ * protocol to use.
+ */
+ struct net_device *master_netdev;
+ __be16 tag_protocol;
+
+ /*
+ * The switch and port to which the CPU is attached.
+ */
+ s8 cpu_switch;
+ s8 cpu_port;
+
+ /*
+ * Link state polling.
+ */
+ int link_poll_needed;
+ struct work_struct link_poll_work;
+ struct timer_list link_poll_timer;
+
+ /*
+ * Data for the individual switch chips.
+ */
+ struct dsa_switch *ds[DSA_MAX_SWITCHES];
+};
+
+struct dsa_switch {
+ /*
+ * Parent switch tree, and switch index.
+ */
+ struct dsa_switch_tree *dst;
+ int index;
+
+ /*
+ * Configuration data for this switch.
+ */
+ struct dsa_chip_data *pd;
+
+ /*
+ * The used switch driver.
+ */
+ struct dsa_switch_driver *drv;
+
+ /*
+ * Reference to mii bus to use.
+ */
+ struct mii_bus *master_mii_bus;
+
+ /*
+ * Slave mii_bus and devices for the individual ports.
+ */
+ u32 dsa_port_mask;
+ u32 phys_port_mask;
+ struct mii_bus *slave_mii_bus;
+ struct net_device *ports[DSA_MAX_PORTS];
+};
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+ return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+}
+
+static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+
+ /*
+ * If this is the root switch (i.e. the switch that connects
+ * to the CPU), return the cpu port number on this switch.
+ * Else return the (DSA) port number that connects to the
+ * switch that is one hop closer to the cpu.
+ */
+ if (dst->cpu_switch == ds->index)
+ return dst->cpu_port;
+ else
+ return ds->pd->rtable[dst->cpu_switch];
+}
+
+struct dsa_switch_driver {
+ struct list_head list;
+
+ __be16 tag_protocol;
+ int priv_size;
+
+ /*
+ * Probing and setup.
+ */
+ char *(*probe)(struct mii_bus *bus, int sw_addr);
+ int (*setup)(struct dsa_switch *ds);
+ int (*set_addr)(struct dsa_switch *ds, u8 *addr);
+
+ /*
+ * Access to the switch's PHY registers.
+ */
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port,
+ int regnum, u16 val);
+
+ /*
+ * Link state polling and IRQ handling.
+ */
+ void (*poll_link)(struct dsa_switch *ds);
+
+ /*
+ * ethtool hardware statistics.
+ */
+ void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+ void (*get_ethtool_stats)(struct dsa_switch *ds,
+ int port, uint64_t *data);
+ int (*get_sset_count)(struct dsa_switch *ds);
+};
+
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
+
+/*
+ * The original DSA tag format and some other tag formats have no
+ * ethertype, which means that we need to add a little hack to the
+ * networking receive path to make sure that received frames get
+ * the right ->protocol assigned to them when one of those tag
+ * formats is in use.
+ */
+static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
+{
+ return !!(dst->tag_protocol == htons(ETH_P_DSA));
+}
+static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+{
+ return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+}
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 4fb6c438179..344c8dd0287 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -53,6 +53,7 @@ struct dst_entry {
#define DST_NOHASH 0x0008
#define DST_NOCACHE 0x0010
#define DST_NOCOUNT 0x0020
+#define DST_NOPEER 0x0040
short error;
short obsolete;
@@ -86,12 +87,12 @@ struct dst_entry {
};
};
-static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
{
return rcu_dereference(dst->_neighbour);
}
-static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
{
return rcu_dereference_raw(dst->_neighbour);
}
@@ -205,12 +206,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
- if (!mtu)
- mtu = dst->ops->default_mtu(dst);
-
- return mtu;
+ return dst->ops->mtu(dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
@@ -397,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst)
struct neighbour *n;
rcu_read_lock();
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
neigh_confirm(n);
rcu_read_unlock();
}
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 9adb99845a5..e1c2ee0eef4 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -17,7 +17,7 @@ struct dst_ops {
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
- unsigned int (*default_mtu)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
diff --git a/include/net/flow.h b/include/net/flow.h
index a09447749e2..da1f064a81b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -59,8 +59,11 @@ struct flowi4 {
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
#define flowi4_secid __fl_common.flowic_secid
- __be32 daddr;
+
+ /* (saddr,daddr) must be grouped, same order as in IP header */
__be32 saddr;
+ __be32 daddr;
+
union flowi_uli uli;
#define fl4_sport uli.ports.sport
#define fl4_dport uli.ports.dport
@@ -207,6 +210,7 @@ extern struct flow_cache_object *flow_cache_lookup(
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
extern atomic_t flow_cache_genid;
#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644
index 00000000000..80461c1ae9e
--- /dev/null
+++ b/include/net/flow_keys.h
@@ -0,0 +1,16 @@
+#ifndef _NET_FLOW_KEYS_H
+#define _NET_FLOW_KEYS_H
+
+struct flow_keys {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ u8 ip_proto;
+};
+
+extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 82d8d09faa4..7db32995ccd 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -128,6 +128,8 @@ extern int genl_register_mc_group(struct genl_family *family,
struct genl_multicast_group *grp);
extern void genl_unregister_mc_group(struct genl_family *family,
struct genl_multicast_group *grp);
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
/**
* genlmsg_put - Add generic netlink header to netlink message
diff --git a/include/net/icmp.h b/include/net/icmp.h
index f0698b955b7..75d61564907 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -31,8 +31,8 @@ struct icmp_err {
extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry;
struct net_proto_family;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 7e2c4d483ad..71392545d0a 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -271,14 +271,6 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
-/* Ugly macro to convert literal channel numbers into their mhz equivalents
- * There are certianly some conditions that will break this (like feeding it '30')
- * but they shouldn't arise since nothing talks on channel 30. */
-#define ieee80211chan2mhz(x) \
- (((x) <= 14) ? \
- (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
- ((x) + 1000) * 5)
-
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{
diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h
index d52685defb1..ee59f8b188d 100644
--- a/include/net/ieee802154.h
+++ b/include/net/ieee802154.h
@@ -21,11 +21,14 @@
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
* Maxim Osipov <maxim.osipov@siemens.com>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
#ifndef NET_IEEE802154_H
#define NET_IEEE802154_H
+#define IEEE802154_MTU 127
+
#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
@@ -56,6 +59,9 @@
(((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE 2 /* 2 octets */
+
/* MAC's Command Frames Identifiers */
#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index e46674d5dae..00cbb4384c7 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -15,7 +15,7 @@
#define _INET6_HASHTABLES_H
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/types.h>
@@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e6db62e756d..dbf9aab34c8 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
return (void *)inet_csk(sk)->icsk_ca_priv;
}
-extern struct sock *inet_csk_clone(struct sock *sk,
- const struct request_sock *req,
- const gfp_t priority);
+extern struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority);
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b897d6e6d0a..e3e405106af 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -31,6 +31,7 @@
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
*/
struct ip_options {
__be32 faddr;
+ __be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
@@ -69,7 +71,7 @@ struct ip_options_data {
struct inet_request_sock {
struct request_sock req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
u16 inet6_rsk_offset;
#endif
__be16 loc_port;
@@ -137,7 +139,7 @@ struct rtable;
struct inet_sock {
/* sk and pinet6 has to be the first two members of inet_sock */
struct sock sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *pinet6;
#endif
/* Socket demultiplex comparisons on incoming packets. */
@@ -186,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
static inline void inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index e8c25b98120..ba52c830a7a 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
-#ifdef CONFIG_NET_NS
- return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
- /* reference counting, */
- /* initialization, or RCU. */
-#else
- return &init_net;
-#endif
+ return read_pnet(&twsk->tw_net);
}
static inline
void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
{
-#ifdef CONFIG_NET_NS
- rcu_assign_pointer(twsk->tw_net, net);
-#endif
+ write_pnet(&twsk->tw_net, net);
}
#endif /* _INET_TIMEWAIT_SOCK_ */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 78c83e62218..06b795dd590 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,6 +35,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
@@ -86,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
{
struct inetpeer_addr daddr;
- ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
+ *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(&daddr, create);
}
diff --git a/include/net/ip.h b/include/net/ip.h
index eca0ef7a495..775009f9eab 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast,
memcpy(buf, &naddr, sizeof(naddr));
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#endif
static __inline__ void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk)
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
@@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
* Functions provided by ip_sockglue.c
*/
-extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+extern void ipv4_pktinfo_prepare(struct sk_buff *skb);
extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int ip_cmsg_send(struct net *net,
struct msghdr *msg, struct ipcm_cookie *ipc);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5735a0f979c..b26bb810198 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -86,9 +86,6 @@ struct fib6_table;
struct rt6_info {
struct dst_entry dst;
-#define rt6i_dev dst.dev
-#define rt6i_expires dst.expires
-
/*
* Tail elements of dst_entry (__refcnt etc.)
* and these elements (rarely used in hot path) are in
@@ -202,6 +199,10 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
const struct in6_addr *daddr, int dst_len,
const struct in6_addr *saddr, int src_len);
+extern void fib6_clean_all_ro(struct net *net,
+ int (*func)(struct rt6_info *, void *arg),
+ int prune, void *arg);
+
extern void fib6_clean_all(struct net *net,
int (*func)(struct rt6_info *, void *arg),
int prune, void *arg);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e91b72fc71..2ad92ca4e6f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -70,6 +70,8 @@ extern void ip6_route_input(struct sk_buff *skb);
extern struct dst_entry * ip6_route_output(struct net *net,
const struct sock *sk,
struct flowi6 *fl6);
+extern struct dst_entry * ip6_route_lookup(struct net *net,
+ struct flowi6 *fl6, int flags);
extern int ip6_route_init(void);
extern void ip6_route_cleanup(void);
@@ -95,14 +97,14 @@ extern struct rt6_info *rt6_lookup(struct net *net,
extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
- const struct in6_addr *addr);
+ struct flowi6 *fl6);
extern int icmp6_dst_gc(void);
extern void fib6_force_start_gc(struct net *net);
extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
- int anycast);
+ bool anycast);
extern int ip6_dst_hoplimit(struct dst_entry *dst);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 873d5be7926..ebe517f2da9 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,7 +21,7 @@
#include <linux/netfilter.h> /* for union nf_inet_addr */
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
-#include <net/ipv6.h> /* for ipv6_addr_copy */
+#include <net/ipv6.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
const struct ipv6hdr *iph = nh;
iphdr->len = sizeof(struct ipv6hdr);
iphdr->protocol = iph->nexthdr;
- ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
- ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+ iphdr->saddr.in6 = iph->saddr;
+ iphdr->daddr.in6 = iph->daddr;
} else
#endif
{
@@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- ipv6_addr_copy(&dst->in6, &src->in6);
+ dst->in6 = src->in6;
else
#endif
dst->ip = src->ip;
@@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void);
extern struct ip_vs_dest *
ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
- __u16 protocol, __u32 fwmark);
+ __u16 protocol, __u32 fwmark, __u32 flags);
extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index a366a8a1fe2..e4170a22fc6 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -132,6 +132,15 @@ extern struct ctl_path net_ipv6_ctl_path[];
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
})
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \
+({ \
+ struct inet6_dev *_idev = (idev); \
+ if (likely(_idev != NULL)) \
+ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
#define _DEVADD(net, statname, modifier, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
@@ -168,11 +177,11 @@ extern struct ctl_path net_ipv6_ctl_path[];
_DEVINCATOMIC(net, icmpv6, _BH, idev, field)
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
- _DEVINCATOMIC(net, icmpv6msg, , idev, field +256)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
- _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
- _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field)
+ _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
struct ip6_ra_chain {
struct ip6_ra_chain *next;
@@ -300,11 +309,6 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
}
-static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
-{
- memcpy(a1, a2, sizeof(struct in6_addr));
-}
-
static inline void ipv6_addr_prefix(struct in6_addr *pfx,
const struct in6_addr *addr,
int plen)
@@ -554,7 +558,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb,
u8 *proto);
extern int ipv6_skip_exthdr(const struct sk_buff *, int start,
- u8 *nexthdrp);
+ u8 *nexthdrp, __be16 *frag_offp);
extern int ipv6_ext_hdr(u8 nexthdr);
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f2419cf44ce..0954ec95915 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -27,7 +27,6 @@ enum {
IUCV_OPEN,
IUCV_BOUND,
IUCV_LISTEN,
- IUCV_SEVERED,
IUCV_DISCONN,
IUCV_CLOSING,
IUCV_CLOSED
@@ -146,7 +145,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
void iucv_accept_unlink(struct sock *sk);
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 72eddd1b410..d49928ba5d0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -166,6 +166,7 @@ struct ieee80211_low_level_stats {
* that it is only ever disabled for station mode.
* @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
* @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
+ * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -184,6 +185,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_QOS = 1<<13,
BSS_CHANGED_IDLE = 1<<14,
BSS_CHANGED_SSID = 1<<15,
+ BSS_CHANGED_AP_PROBE_RESP = 1<<16,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -518,7 +520,7 @@ struct ieee80211_tx_rate {
* @flags: transmit info flags, defined above
* @band: the band to transmit on (use for checking for races)
* @antenna_sel_tx: antenna to use, 0 for automatic diversity
- * @pad: padding, ignore
+ * @ack_frame_id: internal frame ID for TX status, used internally
* @control: union for control data
* @status: union for status data
* @driver_data: array of driver_data pointers
@@ -535,8 +537,7 @@ struct ieee80211_tx_info {
u8 antenna_sel_tx;
- /* 2 byte hole */
- u8 pad[2];
+ u16 ack_frame_id;
union {
struct {
@@ -901,6 +902,10 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
* @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
* CCMP key if it requires CCMP encryption of management frames (MFP) to
* be done in software.
+ * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
+ * for a CCMP key if space should be prepared for the IV, but the IV
+ * itself should not be generated. Do not set together with
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
@@ -908,6 +913,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
IEEE80211_KEY_FLAG_SW_MGMT = 1<<4,
+ IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
};
/**
@@ -1304,6 +1310,16 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
}
/**
+ * ieee80211_free_txskb - free TX skb
+ * @hw: the hardware
+ * @skb: the skb
+ *
+ * Free a transmit skb. Use this funtion when some failure
+ * to transmit happened and thus status cannot be reported.
+ */
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
* DOC: Hardware crypto acceleration
*
* mac80211 is capable of taking advantage of many hardware
@@ -1423,7 +1439,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* DOC: Beacon filter support
*
* Some hardware have beacon filter support to reduce host cpu wakeups
- * which will reduce system power consumption. It usuallly works so that
+ * which will reduce system power consumption. It usually works so that
* the firmware creates a checksum of the beacon but omits all constantly
* changing elements (TSF, TIM etc). Whenever the checksum changes the
* beacon is forwarded to the host, otherwise it will be just dropped. That
@@ -1744,11 +1760,21 @@ enum ieee80211_frame_release_type {
* skb contains the buffer starting from the IEEE 802.11 header.
* The low-level driver should send the frame out based on
* configuration in the TX control data. This handler should,
- * preferably, never fail and stop queues appropriately, more
- * importantly, however, it must never fail for A-MPDU-queues.
- * This function should return NETDEV_TX_OK except in very
- * limited cases.
- * Must be implemented and atomic.
+ * preferably, never fail and stop queues appropriately.
+ * This must be implemented if @tx_frags is not.
+ * Must be atomic.
+ *
+ * @tx_frags: Called to transmit multiple fragments of a single MSDU.
+ * This handler must consume all fragments, sending out some of
+ * them only is useless and it can't ask for some of them to be
+ * queued again. If the frame is not fragmented the queue has a
+ * single SKB only. To avoid issues with the networking stack
+ * when TX status is reported the frames should be removed from
+ * the skb queue.
+ * If this is used, the tx_info @vif and @sta pointers will be
+ * invalid -- you must not use them in that case.
+ * This must be implemented if @tx isn't.
+ * Must be atomic.
*
* @start: Called before the first netdevice attached to the hardware
* is enabled. This should turn on the hardware and must turn on
@@ -2085,6 +2111,8 @@ enum ieee80211_frame_release_type {
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+ void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff_head *skbs);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
#ifdef CONFIG_PM
@@ -2661,6 +2689,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_proberesp_get - retrieve a Probe Response template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Response template which can, for example, be uploaded to
+ * hardware. The destination address should be set by the caller.
+ *
+ * Can only be called in AP mode.
+ */
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
* ieee80211_pspoll_get - retrieve a PS Poll template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -3461,9 +3502,12 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
*
* @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
* changed, rate control algorithm can update its internal state if needed.
+ * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate
+ * control algorithm needs to adjust accordingly.
*/
enum rate_control_changed {
- IEEE80211_RC_HT_CHANGED = BIT(0)
+ IEEE80211_RC_HT_CHANGED = BIT(0),
+ IEEE80211_RC_SMPS_CHANGED = BIT(1),
};
/**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 62beeb97c4b..e3133c23980 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -79,6 +79,42 @@ struct nd_opt_hdr {
__u8 nd_opt_len;
} __packed;
+static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
+{
+ const u32 *p32 = pkey;
+
+ return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+ (p32[1] * hash_rnd[1]) +
+ (p32[2] * hash_rnd[2]) +
+ (p32[3] * hash_rnd[3]));
+}
+
+static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey)
+{
+ struct neigh_hash_table *nht;
+ const u32 *p32 = pkey;
+ struct neighbour *n;
+ u32 hash_val;
+
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+ hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
+ u32 *n32 = (u32 *) n->primary_key;
+ if (n->dev == dev &&
+ ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
+ (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) {
+ if (!atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
+ break;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return n;
+}
extern int ndisc_init(void);
@@ -145,13 +181,4 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
extern void inet6_ifinfo_notify(int event,
struct inet6_dev *idev);
-static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
-{
-
- if (dev)
- return __neigh_lookup_errno(&nd_tbl, addr, dev);
-
- return ERR_PTR(-ENODEV);
-}
-
#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 2720884287c..34c996f4618 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -59,7 +59,7 @@ struct neigh_parms {
int reachable_time;
int delay_probe_time;
- int queue_len;
+ int queue_len_bytes;
int ucast_probes;
int app_probes;
int mcast_probes;
@@ -99,6 +99,7 @@ struct neighbour {
rwlock_t lock;
atomic_t refcnt;
struct sk_buff_head arp_queue;
+ unsigned int arp_queue_len_bytes;
struct timer_list timer;
unsigned long used;
atomic_t probes;
@@ -138,10 +139,12 @@ struct pneigh_entry {
* neighbour table manipulation
*/
+#define NEIGH_NUM_HASH_RND 4
+
struct neigh_hash_table {
struct neighbour __rcu **hash_buckets;
unsigned int hash_shift;
- __u32 hash_rnd;
+ __u32 hash_rnd[NEIGH_NUM_HASH_RND];
struct rcu_head rcu;
};
@@ -153,7 +156,7 @@ struct neigh_table {
int key_len;
__u32 (*hash)(const void *pkey,
const struct net_device *dev,
- __u32 hash_rnd);
+ __u32 *hash_rnd);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
@@ -172,12 +175,18 @@ struct neigh_table {
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
- struct kmem_cache *kmem_cachep;
struct neigh_statistics __percpu *stats;
struct neigh_hash_table __rcu *nht;
struct pneigh_entry **phash_buckets;
};
+#define NEIGH_PRIV_ALIGN sizeof(long long)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+ return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN);
+}
+
/* flags for neigh_update() */
#define NEIGH_UPDATE_F_OVERRIDE 0x00000001
#define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 3bb6fa0eace..ee547c14981 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -77,7 +77,7 @@ struct net {
struct netns_packet packet;
struct netns_unix unx;
struct netns_ipv4 ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netns_ipv6 ipv6;
#endif
#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 4e9c63a20db..463ae8e1669 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -15,8 +15,8 @@
#include <net/netfilter/nf_conntrack_extend.h>
struct nf_conn_counter {
- u_int64_t packets;
- u_int64_t bytes;
+ atomic64_t packets;
+ atomic64_t bytes;
};
static inline
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508b3e1..a88fb693938 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (net->ct.nf_conntrack_event_cb == NULL)
return;
e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
u32 pid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0f8a8c58753..4619caadd9d 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -91,7 +91,6 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
void nf_ct_remove_expectations(struct nf_conn *ct);
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
-void nf_ct_remove_userspace_expectations(void);
/* Allocate space for an expectation: this is mandatory before calling
nf_ct_expect_related. You will have to call put afterwards. */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 2f8fb77bfdd..aea3f8221be 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -12,7 +12,6 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
#include <linux/list_nulls.h>
/* A `tuple' is a structure containing the information to uniquely
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index b8872df7285..b4de990b55f 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,14 +1,12 @@
#ifndef _NF_NAT_H
#define _NF_NAT_H
#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/nf_nat.h>
+#include <linux/netfilter/nf_nat.h>
#include <net/netfilter/nf_conntrack_tuple.h>
-#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
-
enum nf_nat_manip_type {
- IP_NAT_MANIP_SRC,
- IP_NAT_MANIP_DST
+ NF_NAT_MANIP_SRC,
+ NF_NAT_MANIP_DST
};
/* SRC manip occurs POST_ROUTING or LOCAL_IN */
@@ -52,7 +50,7 @@ struct nf_conn_nat {
/* Set up the info structure to map into this range. */
extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype);
/* Is this tuple already taken? (not by us)*/
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 3dc7b98effe..b13d8d18d59 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -20,7 +20,7 @@ extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
- if (manip == IP_NAT_MANIP_SRC)
+ if (manip == NF_NAT_MANIP_SRC)
return ct->status & IPS_SRC_NAT_DONE;
else
return ct->status & IPS_DST_NAT_DONE;
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
index 93cc90d28e6..7b0b51165f7 100644
--- a/include/net/netfilter/nf_nat_protocol.h
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -4,14 +4,12 @@
#include <net/netfilter/nf_nat.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
-struct nf_nat_range;
+struct nf_nat_ipv4_range;
struct nf_nat_protocol {
/* Protocol number. */
unsigned int protonum;
- struct module *me;
-
/* Translate a packet to the target according to manip type.
Return true if succeeded. */
bool (*manip_pkt)(struct sk_buff *skb,
@@ -30,15 +28,12 @@ struct nf_nat_protocol {
possible. Per-protocol part of tuple is initialized to the
incoming packet. */
void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct);
- int (*range_to_nlattr)(struct sk_buff *skb,
- const struct nf_nat_range *range);
-
int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range *range);
+ struct nf_nat_ipv4_range *range);
};
/* Protocol registration. */
@@ -61,14 +56,12 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
const union nf_conntrack_man_proto *max);
extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct,
u_int16_t *rover);
-extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
- const struct nf_nat_range *range);
extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range *range);
+ struct nf_nat_ipv4_range *range);
#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index e505358d899..75ca9291cf2 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
return sk;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline struct sock *
nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0249399e51a..7a911eca0f1 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,6 +18,8 @@ struct netns_ct {
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d786b4fc02a..bbd023a1c9b 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -55,6 +55,7 @@ struct netns_ipv4 {
int current_rt_cache_rebuild_count;
unsigned int sysctl_ping_group_range[2];
+ long sysctl_tcp_mem[3];
atomic_t rt_genid;
atomic_t dev_addr_genid;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 0b44112e236..d542a4b28cc 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -10,15 +10,15 @@ struct netns_mib {
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
- DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct proc_dir_entry *proc_net_devsnmp6;
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
- DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics);
+ DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
#endif
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 748f91f87cd..5299e69a32a 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -56,7 +56,7 @@ struct netns_xfrm {
#endif
struct dst_ops xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct dst_ops xfrm6_dst_ops;
#endif
};
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644
index 00000000000..e503b87c4c1
--- /dev/null
+++ b/include/net/netprio_cgroup.h
@@ -0,0 +1,57 @@
+/*
+ * netprio_cgroup.h Control Group Priority set
+ *
+ *
+ * Authors: Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+
+struct netprio_map {
+ struct rcu_head rcu;
+ u32 priomap_len;
+ u32 priomap[];
+};
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup_netprio_state {
+ struct cgroup_subsys_state css;
+ u32 prioidx;
+};
+
+#ifndef CONFIG_NETPRIO_CGROUP
+extern int net_prio_subsys_id;
+#endif
+
+extern void sock_update_netprioidx(struct sock *sk);
+
+static inline struct cgroup_netprio_state
+ *task_netprio_state(struct task_struct *p)
+{
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+ return container_of(task_subsys_state(p, net_prio_subsys_id),
+ struct cgroup_netprio_state, css);
+#else
+ return NULL;
+#endif
+}
+
+#else
+
+#define sock_update_netprioidx(sk)
+#endif
+
+#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 39b85bc0804..2be95e2626c 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -34,32 +34,30 @@
#define NCI_MAX_NUM_CONN 10
/* NCI Status Codes */
-#define NCI_STATUS_OK 0x00
-#define NCI_STATUS_REJECTED 0x01
-#define NCI_STATUS_MESSAGE_CORRUPTED 0x02
-#define NCI_STATUS_BUFFER_FULL 0x03
-#define NCI_STATUS_FAILED 0x04
-#define NCI_STATUS_NOT_INITIALIZED 0x05
-#define NCI_STATUS_SYNTAX_ERROR 0x06
-#define NCI_STATUS_SEMANTIC_ERROR 0x07
-#define NCI_STATUS_UNKNOWN_GID 0x08
-#define NCI_STATUS_UNKNOWN_OID 0x09
-#define NCI_STATUS_INVALID_PARAM 0x0a
-#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b
+#define NCI_STATUS_OK 0x00
+#define NCI_STATUS_REJECTED 0x01
+#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
+#define NCI_STATUS_FAILED 0x03
+#define NCI_STATUS_NOT_INITIALIZED 0x04
+#define NCI_STATUS_SYNTAX_ERROR 0x05
+#define NCI_STATUS_SEMANTIC_ERROR 0x06
+#define NCI_STATUS_UNKNOWN_GID 0x07
+#define NCI_STATUS_UNKNOWN_OID 0x08
+#define NCI_STATUS_INVALID_PARAM 0x09
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
/* Discovery Specific Status Codes */
-#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
-#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
+#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
/* RF Interface Specific Status Codes */
-#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
-#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
-#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
-#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3
+#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
/* NFCEE Interface Specific Status Codes */
-#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0
-#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1
-#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2
-#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3
-#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2
+#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3
/* NCI RF Technology and Mode */
#define NCI_NFC_A_PASSIVE_POLL_MODE 0x00
@@ -67,11 +65,28 @@
#define NCI_NFC_F_PASSIVE_POLL_MODE 0x02
#define NCI_NFC_A_ACTIVE_POLL_MODE 0x03
#define NCI_NFC_F_ACTIVE_POLL_MODE 0x05
+#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06
#define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80
#define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81
#define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82
#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
+#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86
+
+/* NCI RF Technologies */
+#define NCI_NFC_RF_TECHNOLOGY_A 0x00
+#define NCI_NFC_RF_TECHNOLOGY_B 0x01
+#define NCI_NFC_RF_TECHNOLOGY_F 0x02
+#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
+
+/* NCI Bit Rates */
+#define NCI_NFC_BIT_RATE_106 0x00
+#define NCI_NFC_BIT_RATE_212 0x01
+#define NCI_NFC_BIT_RATE_424 0x02
+#define NCI_NFC_BIT_RATE_848 0x03
+#define NCI_NFC_BIT_RATE_1695 0x04
+#define NCI_NFC_BIT_RATE_3390 0x05
+#define NCI_NFC_BIT_RATE_6780 0x06
/* NCI RF Protocols */
#define NCI_RF_PROTOCOL_UNKNOWN 0x00
@@ -82,37 +97,30 @@
#define NCI_RF_PROTOCOL_NFC_DEP 0x05
/* NCI RF Interfaces */
-#define NCI_RF_INTERFACE_RFU 0x00
-#define NCI_RF_INTERFACE_FRAME 0x01
-#define NCI_RF_INTERFACE_ISO_DEP 0x02
-#define NCI_RF_INTERFACE_NFC_DEP 0x03
+#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
+#define NCI_RF_INTERFACE_FRAME 0x01
+#define NCI_RF_INTERFACE_ISO_DEP 0x02
+#define NCI_RF_INTERFACE_NFC_DEP 0x03
+
+/* NCI Reset types */
+#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
+#define NCI_RESET_TYPE_RESET_CONFIG 0x01
+
+/* NCI Static RF connection ID */
+#define NCI_STATIC_RF_CONN_ID 0x00
+
+/* NCI Data Flow Control */
+#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff
/* NCI RF_DISCOVER_MAP_CMD modes */
#define NCI_DISC_MAP_MODE_POLL 0x01
#define NCI_DISC_MAP_MODE_LISTEN 0x02
-#define NCI_DISC_MAP_MODE_BOTH 0x03
-
-/* NCI Discovery Types */
-#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00
-#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
-#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
-#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
-#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
-#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06
-#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07
-#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
-#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
-#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
-#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
-#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
-#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
/* NCI Deactivation Type */
-#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
-#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
-#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
-#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03
-#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
+#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
/* Message Type (MT) */
#define NCI_MT_DATA_PKT 0x00
@@ -144,10 +152,10 @@
#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
/* GID values */
-#define NCI_GID_CORE 0x0
-#define NCI_GID_RF_MGMT 0x1
-#define NCI_GID_NFCEE_MGMT 0x2
-#define NCI_GID_PROPRIETARY 0xf
+#define NCI_GID_CORE 0x0
+#define NCI_GID_RF_MGMT 0x1
+#define NCI_GID_NFCEE_MGMT 0x2
+#define NCI_GID_PROPRIETARY 0xf
/* ---- NCI Packet structures ---- */
#define NCI_CTRL_HDR_SIZE 3
@@ -169,24 +177,17 @@ struct nci_data_hdr {
/* ----- NCI Commands ---- */
/* ------------------------ */
#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
-
-#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
-
-#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_cmd {
- __u8 target_handle;
- __u8 num_target_specific_params;
+struct nci_core_reset_cmd {
+ __u8 reset_type;
} __packed;
-#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06)
+#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
struct disc_map_config {
__u8 rf_protocol;
__u8 mode;
- __u8 rf_interface_type;
+ __u8 rf_interface;
} __packed;
struct nci_rf_disc_map_cmd {
@@ -197,7 +198,7 @@ struct nci_rf_disc_map_cmd {
#define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
struct disc_config {
- __u8 type;
+ __u8 rf_tech_and_mode;
__u8 frequency;
} __packed;
@@ -218,6 +219,7 @@ struct nci_rf_deactivate_cmd {
struct nci_core_reset_rsp {
__u8 status;
__u8 nci_ver;
+ __u8 config_status;
} __packed;
#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
@@ -232,24 +234,12 @@ struct nci_core_init_rsp_1 {
struct nci_core_init_rsp_2 {
__u8 max_logical_connections;
__le16 max_routing_table_size;
- __u8 max_control_packet_payload_length;
- __le16 rf_sending_buffer_size;
- __le16 rf_receiving_buffer_size;
- __le16 manufacturer_id;
+ __u8 max_ctrl_pkt_payload_len;
+ __le16 max_size_for_large_params;
+ __u8 manufact_id;
+ __le32 manufact_specific_info;
} __packed;
-#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_rsp {
- __u8 status;
- __u8 max_pkt_payload_size;
- __u8 initial_num_credits;
- __u8 conn_id;
-} __packed;
-
-#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06)
-
#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -259,7 +249,7 @@ struct nci_core_conn_create_rsp {
/* --------------------------- */
/* ---- NCI Notifications ---- */
/* --------------------------- */
-#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x07)
+#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06)
struct conn_credit_entry {
__u8 conn_id;
__u8 credits;
@@ -270,12 +260,13 @@ struct nci_core_conn_credit_ntf {
struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
} __packed;
-#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
-struct nci_rf_field_info_ntf {
- __u8 rf_field_status;
+#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
+struct nci_core_intf_error_ntf {
+ __u8 status;
+ __u8 conn_id;
} __packed;
-#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
struct rf_tech_specific_params_nfca_poll {
__u16 sens_res;
__u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
@@ -289,17 +280,22 @@ struct activation_params_nfca_poll_iso_dep {
__u8 rats_res[20];
};
-struct nci_rf_activate_ntf {
- __u8 target_handle;
+struct nci_rf_intf_activated_ntf {
+ __u8 rf_discovery_id;
+ __u8 rf_interface;
__u8 rf_protocol;
- __u8 rf_tech_and_mode;
+ __u8 activation_rf_tech_and_mode;
+ __u8 max_data_pkt_payload_size;
+ __u8 initial_num_credits;
__u8 rf_tech_specific_params_len;
union {
struct rf_tech_specific_params_nfca_poll nfca_poll;
} rf_tech_specific_params;
- __u8 rf_interface_type;
+ __u8 data_exch_rf_tech_and_mode;
+ __u8 data_exch_tx_bit_rate;
+ __u8 data_exch_rx_bit_rate;
__u8 activation_params_len;
union {
@@ -309,5 +305,9 @@ struct nci_rf_activate_ntf {
} __packed;
#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_ntf {
+ __u8 type;
+ __u8 reason;
+} __packed;
#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index b8b4bbd7e0f..bccd89e9d4c 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,15 +109,14 @@ struct nci_dev {
[NCI_MAX_SUPPORTED_RF_INTERFACES];
__u8 max_logical_connections;
__u16 max_routing_table_size;
- __u8 max_control_packet_payload_length;
- __u16 rf_sending_buffer_size;
- __u16 rf_receiving_buffer_size;
- __u16 manufacturer_id;
+ __u8 max_ctrl_pkt_payload_len;
+ __u16 max_size_for_large_params;
+ __u8 manufact_id;
+ __u32 manufact_specific_info;
- /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */
- __u8 max_pkt_payload_size;
+ /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */
+ __u8 max_data_pkt_payload_size;
__u8 initial_num_credits;
- __u8 conn_id;
/* stored during nci_data_exchange */
data_exchange_cb_t data_exchange_cb;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 6a7f602aa84..8696b773a69 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -52,6 +52,9 @@ struct nfc_ops {
int (*dev_down)(struct nfc_dev *dev);
int (*start_poll)(struct nfc_dev *dev, u32 protocols);
void (*stop_poll)(struct nfc_dev *dev);
+ int (*dep_link_up)(struct nfc_dev *dev, int target_idx,
+ u8 comm_mode, u8 rf_mode);
+ int (*dep_link_down)(struct nfc_dev *dev);
int (*activate_target)(struct nfc_dev *dev, u32 target_idx,
u32 protocol);
void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx);
@@ -60,11 +63,17 @@ struct nfc_ops {
void *cb_context);
};
+#define NFC_TARGET_IDX_ANY -1
+#define NFC_MAX_GT_LEN 48
+#define NFC_MAX_NFCID1_LEN 10
+
struct nfc_target {
u32 idx;
u32 supported_protocols;
u16 sens_res;
u8 sel_res;
+ u8 nfcid1_len;
+ u8 nfcid1[NFC_MAX_NFCID1_LEN];
};
struct nfc_genl_data {
@@ -83,6 +92,8 @@ struct nfc_dev {
bool dev_up;
bool polling;
bool remote_activated;
+ bool dep_link_up;
+ u32 dep_rf_mode;
struct nfc_genl_data genl_data;
u32 supported_protocols;
@@ -157,9 +168,20 @@ static inline const char *nfc_device_name(struct nfc_dev *dev)
return dev_name(&dev->dev);
}
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp);
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+ unsigned int flags, unsigned int size,
+ unsigned int *err);
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
+
+int nfc_set_remote_general_bytes(struct nfc_dev *dev,
+ u8 *gt, u8 gt_len);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len);
int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
int ntargets);
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode);
+
#endif /* __NET_NFC_H */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6f7eb800974..875f4895b03 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -25,7 +25,7 @@
#define _PROTOCOL_H
#include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#endif
@@ -38,7 +38,7 @@ struct net_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- u32 features);
+ netdev_features_t features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
@@ -46,7 +46,7 @@ struct net_protocol {
netns_ok:1;
};
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
int (*handler)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- u32 features);
+ netdev_features_t features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
@@ -91,7 +91,7 @@ struct inet_protosw {
extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
@@ -100,7 +100,7 @@ extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num)
extern void inet_register_protosw(struct inet_protosw *p);
extern void inet_unregister_protosw(struct inet_protosw *p);
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
extern int inet6_register_protosw(struct inet_protosw *p);
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3be..baab385a473 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -5,6 +5,7 @@
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
/* Random Early Detection (RED) algorithm.
=======================================
@@ -87,6 +88,29 @@
etc.
*/
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ * if (avg > target and max_p <= 0.5)
+ * increase max_p : max_p += alpha;
+ * else if (avg < target and max_p >= 0.01)
+ * decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ * qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
#define RED_STAB_SIZE 256
#define RED_STAB_MASK (RED_STAB_SIZE - 1)
@@ -101,82 +125,112 @@ struct red_stats {
struct red_parms {
/* Parameters */
- u32 qth_min; /* Min avg length threshold: A scaled */
- u32 qth_max; /* Max avg length threshold: A scaled */
+ u32 qth_min; /* Min avg length threshold: Wlog scaled */
+ u32 qth_max; /* Max avg length threshold: Wlog scaled */
u32 Scell_max;
- u32 Rmask; /* Cached random mask, see red_rmask */
+ u32 max_P; /* probability, [0 .. 1.0] 32 scaled */
+ u32 max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
+ u32 qth_delta; /* max_th - min_th */
+ u32 target_min; /* min_th + 0.4*(max_th - min_th) */
+ u32 target_max; /* min_th + 0.6*(max_th - min_th) */
u8 Scell_log;
u8 Wlog; /* log(W) */
u8 Plog; /* random number bits */
u8 Stab[RED_STAB_SIZE];
+};
+struct red_vars {
/* Variables */
int qcount; /* Number of packets since last random
number generation */
u32 qR; /* Cached random number */
- unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ unsigned long qavg; /* Average queue length: Wlog scaled */
+ ktime_t qidlestart; /* Start of current idle period */
};
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
{
- return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+ return Plog < 32 ? (~0U >> Plog) : ~0U;
}
-static inline void red_set_parms(struct red_parms *p,
- u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
- u8 Scell_log, u8 *stab)
+static inline void red_set_vars(struct red_vars *v)
{
/* Reset average queue length, the value is strictly bound
* to the parameters below, reseting hurts a bit but leaving
* it might result in an unreasonable qavg for a while. --TGR
*/
- p->qavg = 0;
+ v->qavg = 0;
+
+ v->qcount = -1;
+}
+
+static inline void red_set_parms(struct red_parms *p,
+ u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+ u8 Scell_log, u8 *stab, u32 max_P)
+{
+ int delta = qth_max - qth_min;
+ u32 max_p_delta;
- p->qcount = -1;
p->qth_min = qth_min << Wlog;
p->qth_max = qth_max << Wlog;
p->Wlog = Wlog;
p->Plog = Plog;
- p->Rmask = red_rmask(Plog);
+ if (delta < 0)
+ delta = 1;
+ p->qth_delta = delta;
+ if (!max_P) {
+ max_P = red_maxp(Plog);
+ max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+ }
+ p->max_P = max_P;
+ max_p_delta = max_P / delta;
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+
+ /* RED Adaptative target :
+ * [min_th + 0.4*(min_th - max_th),
+ * min_th + 0.6*(min_th - max_th)].
+ */
+ delta /= 5;
+ p->target_min = qth_min + 2*delta;
+ p->target_max = qth_min + 3*delta;
+
p->Scell_log = Scell_log;
p->Scell_max = (255 << Scell_log);
memcpy(p->Stab, stab, sizeof(p->Stab));
}
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_vars *v)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return v->qidlestart.tv64 != 0;
}
-static inline void red_start_of_idle_period(struct red_parms *p)
+static inline void red_start_of_idle_period(struct red_vars *v)
{
- p->qidlestart = psched_get_time();
+ v->qidlestart = ktime_get();
}
-static inline void red_end_of_idle_period(struct red_parms *p)
+static inline void red_end_of_idle_period(struct red_vars *v)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ v->qidlestart.tv64 = 0;
}
-static inline void red_restart(struct red_parms *p)
+static inline void red_restart(struct red_vars *v)
{
- red_end_of_idle_period(p);
- p->qavg = 0;
- p->qcount = -1;
+ red_end_of_idle_period(v);
+ v->qavg = 0;
+ v->qcount = -1;
}
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p,
+ const struct red_vars *v)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), v->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
@@ -200,7 +254,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK];
if (shift)
- return p->qavg >> shift;
+ return v->qavg >> shift;
else {
/* Approximate initial part of exponent with linear function:
*
@@ -209,16 +263,17 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
* Seems, it is the best solution to
* problem of too coarse exponent tabulation.
*/
- us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log;
+ us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log;
- if (us_idle < (p->qavg >> 1))
- return p->qavg - us_idle;
+ if (us_idle < (v->qavg >> 1))
+ return v->qavg - us_idle;
else
- return p->qavg >> 1;
+ return v->qavg >> 1;
}
}
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
+ const struct red_vars *v,
unsigned int backlog)
{
/*
@@ -230,42 +285,46 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
*
* --ANK (980924)
*/
- return p->qavg + (backlog - (p->qavg >> p->Wlog));
+ return v->qavg + (backlog - (v->qavg >> p->Wlog));
}
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
+ const struct red_vars *v,
unsigned int backlog)
{
- if (!red_is_idling(p))
- return red_calc_qavg_no_idle_time(p, backlog);
+ if (!red_is_idling(v))
+ return red_calc_qavg_no_idle_time(p, v, backlog);
else
- return red_calc_qavg_from_idle_time(p);
+ return red_calc_qavg_from_idle_time(p, v);
}
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
{
- return net_random() & p->Rmask;
+ return reciprocal_divide(net_random(), p->max_P_reciprocal);
}
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p,
+ const struct red_vars *v,
+ unsigned long qavg)
{
/* The formula used below causes questions.
- OK. qR is random number in the interval 0..Rmask
+ OK. qR is random number in the interval
+ (0..1/max_P)*(qth_max-qth_min)
i.e. 0..(2^Plog). If we used floating point
arithmetics, it would be: (2^Plog)*rnd_num,
where rnd_num is less 1.
Taking into account, that qavg have fixed
- point at Wlog, and Plog is related to max_P by
- max_P = (qth_max-qth_min)/2^Plog; two lines
+ point at Wlog, two lines
below have the following floating point equivalent:
max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
Any questions? --ANK (980924)
*/
- return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR);
+ return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR);
}
enum {
@@ -274,7 +333,7 @@ enum {
RED_ABOVE_MAX_TRESH,
};
-static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg)
+static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg)
{
if (qavg < p->qth_min)
return RED_BELOW_MIN_THRESH;
@@ -290,27 +349,29 @@ enum {
RED_HARD_MARK,
};
-static inline int red_action(struct red_parms *p, unsigned long qavg)
+static inline int red_action(const struct red_parms *p,
+ struct red_vars *v,
+ unsigned long qavg)
{
switch (red_cmp_thresh(p, qavg)) {
case RED_BELOW_MIN_THRESH:
- p->qcount = -1;
+ v->qcount = -1;
return RED_DONT_MARK;
case RED_BETWEEN_TRESH:
- if (++p->qcount) {
- if (red_mark_probability(p, qavg)) {
- p->qcount = 0;
- p->qR = red_random(p);
+ if (++v->qcount) {
+ if (red_mark_probability(p, v, qavg)) {
+ v->qcount = 0;
+ v->qR = red_random(p);
return RED_PROB_MARK;
}
} else
- p->qR = red_random(p);
+ v->qR = red_random(p);
return RED_DONT_MARK;
case RED_ABOVE_MAX_TRESH:
- p->qcount = -1;
+ v->qcount = -1;
return RED_HARD_MARK;
}
@@ -318,4 +379,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
return RED_DONT_MARK;
}
+static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
+{
+ unsigned long qavg;
+ u32 max_p_delta;
+
+ qavg = v->qavg;
+ if (red_is_idling(v))
+ qavg = red_calc_qavg_from_idle_time(p, v);
+
+ /* p->qavg is fixed point number with point at Wlog */
+ qavg >>= p->Wlog;
+
+ if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+ p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+ else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+ p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+ max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+ max_p_delta = max(max_p_delta, 1U);
+ p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
#endif
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index eb7d3c2d427..a5f79933e21 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -48,6 +48,10 @@ enum environment_cap {
* 99 - built by driver but a specific alpha2 cannot be determined
* 98 - result of an intersection between two regulatory domains
* 97 - regulatory domain has not yet been configured
+ * @dfs_region: If CRDA responded with a regulatory domain that requires
+ * DFS master operation on a known DFS region (NL80211_DFS_*),
+ * dfs_region represents that region. Drivers can use this and the
+ * @alpha2 to adjust their device's DFS parameters as required.
* @intersect: indicates whether the wireless core should intersect
* the requested regulatory domain with the presently set regulatory
* domain.
@@ -67,6 +71,7 @@ struct regulatory_request {
int wiphy_idx;
enum nl80211_reg_initiator initiator;
char alpha2[2];
+ u8 dfs_region;
bool intersect;
bool processed;
enum environment_cap country_ie_env;
@@ -93,6 +98,7 @@ struct ieee80211_reg_rule {
struct ieee80211_regdomain {
u32 n_reg_rules;
char alpha2[2];
+ u8 dfs_region;
struct ieee80211_reg_rule reg_rules[];
};
diff --git a/include/net/route.h b/include/net/route.h
index db7b3432f07..91855d185b5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -71,12 +71,12 @@ struct rtable {
struct fib_info *fi; /* for client ref to shared metrics */
};
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_route_iif != 0;
}
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
{
return rt->rt_route_iif == 0;
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 6a72a58cde5..d3685615a8b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -71,7 +71,7 @@
#include <linux/jiffies.h>
#include <linux/idr.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_route.h>
#endif
@@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; }
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
void sctp_v6_pf_init(void);
void sctp_v6_pf_exit(void);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e90e7a9935d..88949a99453 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -235,12 +235,15 @@ extern struct sctp_globals {
/* Flag to indicate whether computing and verifying checksum
* is disabled. */
- int checksum_disable;
+ bool checksum_disable;
/* Threshold for rwnd update SACKS. Receive buffer shifted this many
* bits is an indicator of when to send and window update SACK.
*/
int rwnd_update_shift;
+
+ /* Threshold for autoclose timeout, in seconds. */
+ unsigned long max_autoclose;
} sctp_globals;
#define sctp_rto_initial (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
#define sctp_auth_enable (sctp_globals.auth_enable)
#define sctp_checksum_disable (sctp_globals.checksum_disable)
#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose (sctp_globals.max_autoclose)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {
@@ -365,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
return (struct sock *)sp;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct sctp6_sock {
struct sctp_sock sctp;
struct ipv6_pinfo inet6;
@@ -1085,6 +1089,7 @@ void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *);
void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8f0f9ac0307..0147b901e79 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -67,7 +67,7 @@ struct icmp_mib {
#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
struct icmpmsg_mib {
- unsigned long mibs[ICMPMSG_MIB_MAX];
+ atomic_long_t mibs[ICMPMSG_MIB_MAX];
};
/* ICMP6 (IPv6-ICMP) */
@@ -84,7 +84,7 @@ struct icmpv6_mib_device {
#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
/* per network ns counters */
struct icmpv6msg_mib {
- unsigned long mibs[ICMP6MSG_MIB_MAX];
+ atomic_long_t mibs[ICMP6MSG_MIB_MAX];
};
/* per device counters, (shared on all cpus) */
struct icmpv6msg_mib_device {
@@ -129,33 +129,33 @@ struct linux_xfrm_mib {
__this_cpu_inc(mib[0]->mibs[field])
#define SNMP_INC_STATS_USER(mib, field) \
- irqsafe_cpu_inc(mib[0]->mibs[field])
+ this_cpu_inc(mib[0]->mibs[field])
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
atomic_long_inc(&mib->mibs[field])
#define SNMP_INC_STATS(mib, field) \
- irqsafe_cpu_inc(mib[0]->mibs[field])
+ this_cpu_inc(mib[0]->mibs[field])
#define SNMP_DEC_STATS(mib, field) \
- irqsafe_cpu_dec(mib[0]->mibs[field])
+ this_cpu_dec(mib[0]->mibs[field])
#define SNMP_ADD_STATS_BH(mib, field, addend) \
__this_cpu_add(mib[0]->mibs[field], addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
- irqsafe_cpu_add(mib[0]->mibs[field], addend)
+ this_cpu_add(mib[0]->mibs[field], addend)
#define SNMP_ADD_STATS(mib, field, addend) \
- irqsafe_cpu_add(mib[0]->mibs[field], addend)
+ this_cpu_add(mib[0]->mibs[field], addend)
/*
* Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
* to make @ptr a non-percpu pointer.
*/
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
- irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
+ this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
+ this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
diff --git a/include/net/sock.h b/include/net/sock.h
index abb6e0f0c3c..bb972d254df 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -53,6 +53,8 @@
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
@@ -62,6 +64,22 @@
#include <net/dst.h>
#include <net/checksum.h>
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+#else
+static inline
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+ return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+}
+#endif
/*
* This structure really needs to be cleaned up.
* Most of it is for TCP, and not used by any of
@@ -167,6 +185,7 @@ struct sock_common {
/* public: */
};
+struct cg_proto;
/**
* struct sock - network layer representation of sockets
* @__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +246,7 @@ struct sock_common {
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_classid: this socket's cgroup classid
+ * @sk_cgrp: this socket's cgroup-specific proto data
* @sk_write_pending: a write to stream socket waits to start
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
@@ -306,8 +326,8 @@ struct sock {
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
- int sk_route_caps;
- int sk_route_nocaps;
+ netdev_features_t sk_route_caps;
+ netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
int sk_rcvlowat;
@@ -320,6 +340,9 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
+#ifdef CONFIG_CGROUPS
+ __u32 sk_cgrp_prioidx;
+#endif
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
@@ -338,6 +361,7 @@ struct sock {
#endif
__u32 sk_mark;
u32 sk_classid;
+ struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk);
@@ -563,6 +587,7 @@ enum sock_flags {
SOCK_FASYNC, /* fasync() active */
SOCK_RXQ_OVFL,
SOCK_ZEROCOPY, /* buffers from userspace */
+ SOCK_WIFI_STATUS, /* push wifi status to userspace */
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -637,12 +662,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
/*
* Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize + skb->truesize > sk->sk_rcvbuf;
+ return qsize > sk->sk_rcvbuf;
}
/* The per-socket spinlock must be held here. */
@@ -833,6 +860,37 @@ struct proto {
#ifdef SOCK_REFCNT_DEBUG
atomic_t socks;
#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+ /*
+ * cgroup specific init/deinit functions. Called once for all
+ * protocols that implement it, from cgroups populate function.
+ * This function has to setup any files the protocol want to
+ * appear in the kmem cgroup filesystem.
+ */
+ int (*init_cgroup)(struct cgroup *cgrp,
+ struct cgroup_subsys *ss);
+ void (*destroy_cgroup)(struct cgroup *cgrp,
+ struct cgroup_subsys *ss);
+ struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+ void (*enter_memory_pressure)(struct sock *sk);
+ struct res_counter *memory_allocated; /* Current allocated memory. */
+ struct percpu_counter *sockets_allocated; /* Current number of sockets. */
+ int *memory_pressure;
+ long *sysctl_mem;
+ /*
+ * memcg field is used to find which memcg we belong directly
+ * Each memcg struct can hold more than one cg_proto, so container_of
+ * won't really cut.
+ *
+ * The elegant solution would be having an inverse function to
+ * proto_cgroup in struct proto, but that means polluting the structure
+ * for everybody, instead of just for memcg users.
+ */
+ struct mem_cgroup *memcg;
};
extern int proto_register(struct proto *prot, int alloc_slab);
@@ -851,7 +909,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}
-static inline void sk_refcnt_debug_release(const struct sock *sk)
+inline void sk_refcnt_debug_release(const struct sock *sk)
{
if (atomic_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -863,6 +921,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+extern struct jump_label_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+ struct cg_proto *cg_proto)
+{
+ return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+ struct cg_proto *cg_proto)
+{
+ return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+ if (!sk->sk_prot->memory_pressure)
+ return false;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return !!*sk->sk_cgrp->memory_pressure;
+
+ return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+ int *memory_pressure = sk->sk_prot->memory_pressure;
+
+ if (!memory_pressure)
+ return;
+
+ if (*memory_pressure)
+ *memory_pressure = 0;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+ struct proto *prot = sk->sk_prot;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ if (*cg_proto->memory_pressure)
+ *cg_proto->memory_pressure = 0;
+ }
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+ if (!sk->sk_prot->enter_memory_pressure)
+ return;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+ struct proto *prot = sk->sk_prot;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ cg_proto->enter_memory_pressure(sk);
+ }
+
+ sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+ long *prot = sk->sk_prot->sysctl_mem;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ prot = sk->sk_cgrp->sysctl_mem;
+ return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+ unsigned long amt,
+ int *parent_status)
+{
+ struct res_counter *fail;
+ int ret;
+
+ ret = res_counter_charge(prot->memory_allocated,
+ amt << PAGE_SHIFT, &fail);
+
+ if (ret < 0)
+ *parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+ unsigned long amt)
+{
+ res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+ u64 ret;
+ ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+ return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return memcg_memory_allocated_read(sk->sk_cgrp);
+
+ return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+ /* update the root cgroup regardless */
+ atomic_long_add_return(amt, prot->memory_allocated);
+ return memcg_memory_allocated_read(sk->sk_cgrp);
+ }
+
+ return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+ parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+ memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+ atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ percpu_counter_dec(cg_proto->sockets_allocated);
+ }
+
+ percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+ struct cg_proto *cg_proto = sk->sk_cgrp;
+
+ for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+ percpu_counter_inc(cg_proto->sockets_allocated);
+ }
+
+ percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+
+ return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+ return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+ return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+ if (!prot->memory_pressure)
+ return false;
+ return !!*prot->memory_pressure;
+}
+
#ifdef CONFIG_PROC_FS
/* Called with local bh disabled */
@@ -1089,8 +1349,8 @@ extern struct sock *sk_alloc(struct net *net, int family,
struct proto *prot);
extern void sk_free(struct sock *sk);
extern void sk_release_kernel(struct sock *sk);
-extern struct sock *sk_clone(const struct sock *sk,
- const gfp_t priority);
+extern struct sock *sk_clone_lock(const struct sock *sk,
+ const gfp_t priority);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
@@ -1393,7 +1653,7 @@ static inline int sk_can_gso(const struct sock *sk)
extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
-static inline void sk_nocaps_add(struct sock *sk, int flags)
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
{
sk->sk_route_nocaps |= flags;
sk->sk_route_caps &= ~flags;
@@ -1670,7 +1930,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
- sk->sk_prot->enter_memory_pressure(sk);
+ sk_enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
}
return page;
@@ -1714,6 +1974,8 @@ static inline int sock_intr_errno(long timeo)
extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
+extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb);
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -1741,6 +2003,9 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
__sock_recv_timestamp(msg, sk, skb);
else
sk->sk_stamp = kt;
+
+ if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+ __sock_recv_wifi_status(msg, sk, skb);
}
extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bb18c4d69ab..0118ea999f6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -44,6 +44,7 @@
#include <net/dst.h>
#include <linux/seq_file.h>
+#include <linux/memcontrol.h>
extern struct inet_hashinfo tcp_hashinfo;
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
extern int sysctl_tcp_ecn;
extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
}
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
return true;
return false;
}
@@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk);
struct tcp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
@@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp)
static inline int tcp_is_fack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok & 2;
+ return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
}
static inline void tcp_enable_fack(struct tcp_sock *tp)
{
- tp->rx_opt.sack_ok |= 2;
+ tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
}
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+ return 3;
+}
+
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
* the default reordering threshold - but if reordering increases,
@@ -1144,7 +1152,7 @@ struct tcp6_md5sig_key {
/* - sock block */
struct tcp_md5sig_info {
struct tcp4_md5sig_key *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct tcp6_md5sig_key *keys6;
u32 entries6;
u32 alloced6;
@@ -1171,7 +1179,7 @@ struct tcp6_pseudohdr {
union tcp_md5sum_block {
struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct tcp6_pseudohdr ip6;
#endif
};
@@ -1430,7 +1438,8 @@ extern struct request_sock_ops tcp6_request_sock_ops;
extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ netdev_features_t features);
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644
index 00000000000..3512082fa90
--- /dev/null
+++ b/include/net/tcp_memcontrol.h
@@ -0,0 +1,19 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct tcp_memcontrol {
+ struct cg_proto cg_proto;
+ /* per-cgroup tcp memory pressure knobs */
+ struct res_counter tcp_memory_allocated;
+ struct percpu_counter tcp_sockets_allocated;
+ /* those two are read-mostly, leave them at the end */
+ long tcp_prot_mem[3];
+ int tcp_memory_pressure;
+};
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
+#endif /* _TCP_MEMCG_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3b285f402f4..e39592f682c 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -41,7 +41,7 @@
struct udp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
@@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport,
int dif);
+extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport,
+ int dif, struct udp_table *tbl);
extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif);
+extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, struct udp_table *tbl);
/*
* SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
} while(0)
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#define UDPX_INC_STATS_BH(sk, field) \
do { \
if ((sk)->sk_family == AF_INET) \
@@ -258,5 +264,6 @@ extern void udp4_proc_exit(void);
extern void udp_init(void);
extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features);
#endif /* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b203e14d26b..89174e29dca 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -827,6 +827,14 @@ static inline bool addr_match(const void *token1, const void *token2,
return true;
}
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+ /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+ if (prefixlen == 0)
+ return true;
+ return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+}
+
static __inline__
__be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
{
@@ -1209,8 +1217,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
break;
case AF_INET6:
- ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
- ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
+ *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
+ *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
break;
}
}
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 639a4491fc0..99965395c5f 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -281,7 +281,7 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid)
static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN ?
- vlan_dev_real_dev(dev) : 0;
+ vlan_dev_real_dev(dev) : NULL;
}
#endif /* IB_ADDR_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c8f94e8db69..83f77ac3395 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -38,6 +38,9 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
+/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
+extern struct class cm_class;
+
enum ib_cm_state {
IB_CM_IDLE,
IB_CM_LISTEN,
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index d1e95c6ac77..5a35a2a2d3c 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
u8 map_dest;
u8 spma;
u8 probe_tries;
+ u8 priority;
u8 dest_addr[ETH_ALEN];
u8 ctl_src_addr[ETH_ALEN];
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
* @lport: The associated local port
* @fcoe_pending_queue: The pending Rx queue of skbs
* @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority: Packet priority (DCB)
* @max_queue_depth: Max queue depth of pending queue
* @min_queue_depth: Min queue depth of pending queue
* @timer: The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
struct fc_lport *lport;
struct sk_buff_head fcoe_pending_queue;
u8 fcoe_pending_queue_active;
+ u8 priority;
u32 max_queue_depth;
u32 min_queue_depth;
struct timer_list timer;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 5591ed54dc9..77273f2fdd8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -185,7 +185,6 @@ typedef void (*activate_complete)(void *, int);
struct scsi_device_handler {
/* Used by the infrastructure */
struct list_head list; /* list of scsi_device_handlers */
- int idx;
/* Filled by the hardware handler */
struct module *module;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 50266c9405f..5f7d5b3b1c6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -669,6 +669,9 @@ struct Scsi_Host {
/* Asynchronous scan in progress */
unsigned async_scan:1;
+ /* Don't resume host in EH */
+ unsigned eh_noresume:1;
+
/*
* Optional work queue to be utilized by the transport
*/
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 5994bcc1b01..2c3a46d102f 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -142,7 +142,7 @@ struct iscsi_transport {
int (*get_iface_param) (struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf);
- mode_t (*attr_is_visible)(int param_type, int param);
+ umode_t (*attr_is_visible)(int param_type, int param);
int (*bsg_request)(struct bsg_job *job);
};
@@ -211,6 +211,11 @@ struct iscsi_cls_session {
unsigned int target_id;
bool ida_used;
+ /*
+ * pid of userspace process that created session or -1 if
+ * created by the kernel.
+ */
+ pid_t creator;
int state;
int sid; /* session id */
void *dd_data; /* LLD private data */
diff --git a/include/sound/info.h b/include/sound/info.h
index 5492cc40dc5..9ca1a493d37 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -72,7 +72,7 @@ struct snd_info_entry_ops {
struct snd_info_entry {
const char *name;
- mode_t mode;
+ umode_t mode;
long size;
unsigned short content;
union {
diff --git a/include/sound/saif.h b/include/sound/saif.h
index d0e0de7984e..f22f3e16edf 100644
--- a/include/sound/saif.h
+++ b/include/sound/saif.h
@@ -10,7 +10,7 @@
#define __SOUND_SAIF_H__
struct mxs_saif_platform_data {
- int (*init) (void);
- int (*get_master_id) (unsigned int saif_id);
+ bool master_mode; /* if true use master mode */
+ int master_id; /* id of the master if in slave mode */
};
#endif
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7f5fed3c89e..6873c7dd914 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_SE_CMD_FAILED = 0x00000400,
+ SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
+ SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
- atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport specific error status */
- int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
- int check_release:1;
- int cmd_wait_set:1;
+ unsigned check_release:1;
+ unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
- struct se_device *se_obj_ptr;
- struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
struct work_struct work;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- u32 t_task_pt_sgl_num;
-
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
- int sess_tearing_down:1;
+ unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
} ____cacheline_aligned;
struct se_device {
- /* Set to 1 if thread is NOT sleeping on thread_sem */
- u8 thread_active;
- u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
- atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
- atomic_t dev_tur_active;
atomic_t execute_tasks;
- atomic_t dev_status_thr_count;
- atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
- spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
- spinlock_t state_task_lock;
- spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
- spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
- pid_t process_thread_pid;
- struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
- /* Linked list for struct se_global->g_se_dev_list */
- struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index c16e9431dd0..dac4f2d859f 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -10,29 +10,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
-#define PYX_TRANSPORT_WRITE_PENDING 1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
-#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
-#define PYX_TRANSPORT_USE_SENSE_REASON -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE 0
-#define TRANSPORT_PLUGIN_REGISTERED 1
-
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 748ff7cbe55..319538bf17d 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -573,9 +573,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
);
TRACE_EVENT(ext4_mb_release_group_pa,
- TP_PROTO(struct ext4_prealloc_space *pa),
+ TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
- TP_ARGS(pa),
+ TP_ARGS(sb, pa),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -585,7 +585,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
),
TP_fast_assign(
- __entry->dev = pa->pa_inode->i_sb->s_dev;
+ __entry->dev = sb->s_dev;
__entry->pa_pstart = pa->pa_pstart;
__entry->pa_len = pa->pa_len;
),
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index a9c87ad8331..5f889f16b0c 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -147,7 +147,7 @@ DEFINE_EVENT(kmem_free, kmem_cache_free,
TP_ARGS(call_site, ptr)
);
-TRACE_EVENT(mm_page_free_direct,
+TRACE_EVENT(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
@@ -169,7 +169,7 @@ TRACE_EVENT(mm_page_free_direct,
__entry->order)
);
-TRACE_EVENT(mm_pagevec_free,
+TRACE_EVENT(mm_page_free_batched,
TP_PROTO(struct page *page, int cold),
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
new file mode 100644
index 00000000000..dd4ba3b9200
--- /dev/null
+++ b/include/trace/events/oom.h
@@ -0,0 +1,33 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM oom
+
+#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OOM_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(oom_score_adj_update,
+
+ TP_PROTO(struct task_struct *task),
+
+ TP_ARGS(task),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid)
+ __array( char, comm, TASK_COMM_LEN )
+ __field( int, oom_score_adj)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = task->pid;
+ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+ __entry->oom_score_adj = task->signal->oom_score_adj;
+ ),
+
+ TP_printk("pid=%d comm=%s oom_score_adj=%d",
+ __entry->pid, __entry->comm, __entry->oom_score_adj)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 669fbd62ec2..d2d88bed891 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -241,24 +241,73 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode and "End" for
- * leaving it.
+ * as argument: "Start" for entering dyntick-idle mode, "End" for
+ * leaving it, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle. "Error on entry: not idle task" and "Error on
+ * exit: not idle task" indicate that a non-idle task is erroneously
+ * toying with the idle loop.
+ *
+ * These events also take a pair of numbers, which indicate the nesting
+ * depth before and after the event of interest. Note that task-related
+ * events use the upper bits of each number, while interrupt-related
+ * events use the lower bits.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(char *polarity),
+ TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
- TP_ARGS(polarity),
+ TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
__field(char *, polarity)
+ __field(long long, oldnesting)
+ __field(long long, newnesting)
),
TP_fast_assign(
__entry->polarity = polarity;
+ __entry->oldnesting = oldnesting;
+ __entry->newnesting = newnesting;
+ ),
+
+ TP_printk("%s %llx %llx", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting)
+);
+
+/*
+ * Tracepoint for RCU preparation for idle, the goal being to get RCU
+ * processing done so that the current CPU can shut off its scheduling
+ * clock and enter dyntick-idle mode. One way to accomplish this is
+ * to drain all RCU callbacks from this CPU, and the other is to have
+ * done everything RCU requires for the current grace period. In this
+ * latter case, the CPU will be awakened at the end of the current grace
+ * period in order to process the remainder of its callbacks.
+ *
+ * These tracepoints take a string as argument:
+ *
+ * "No callbacks": Nothing to do, no callbacks on this CPU.
+ * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
+ * "Begin holdoff": Attempt failed, don't retry until next jiffy.
+ * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
+ * "More callbacks": Still more callbacks, try again to clear them out.
+ * "Callbacks drained": All callbacks processed, off to dyntick idle!
+ * "Timer": Timer fired to cause CPU to continue processing callbacks.
+ */
+TRACE_EVENT(rcu_prep_idle,
+
+ TP_PROTO(char *reason),
+
+ TP_ARGS(reason),
+
+ TP_STRUCT__entry(
+ __field(char *, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
),
- TP_printk("%s", __entry->polarity)
+ TP_printk("%s", __entry->reason)
);
/*
@@ -412,27 +461,71 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
- * invoked. The first argument is the name of the RCU flavor and
- * the second argument is number of callbacks actually invoked.
+ * invoked. The first argument is the name of the RCU flavor,
+ * the second argument is number of callbacks actually invoked,
+ * the third argument (cb) is whether or not any of the callbacks that
+ * were ready to invoke at the beginning of this batch are still
+ * queued, the fourth argument (nr) is the return value of need_resched(),
+ * the fifth argument (iit) is 1 if the current task is the idle task,
+ * and the sixth argument (risk) is the return value from
+ * rcu_is_callbacks_kthread().
*/
TRACE_EVENT(rcu_batch_end,
- TP_PROTO(char *rcuname, int callbacks_invoked),
+ TP_PROTO(char *rcuname, int callbacks_invoked,
+ bool cb, bool nr, bool iit, bool risk),
- TP_ARGS(rcuname, callbacks_invoked),
+ TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(int, callbacks_invoked)
+ __field(bool, cb)
+ __field(bool, nr)
+ __field(bool, iit)
+ __field(bool, risk)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->callbacks_invoked = callbacks_invoked;
+ __entry->cb = cb;
+ __entry->nr = nr;
+ __entry->iit = iit;
+ __entry->risk = risk;
+ ),
+
+ TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
+ __entry->rcuname, __entry->callbacks_invoked,
+ __entry->cb ? 'C' : '.',
+ __entry->nr ? 'S' : '.',
+ __entry->iit ? 'I' : '.',
+ __entry->risk ? 'R' : '.')
+);
+
+/*
+ * Tracepoint for rcutorture readers. The first argument is the name
+ * of the RCU flavor from rcutorture's viewpoint and the second argument
+ * is the callback address.
+ */
+TRACE_EVENT(rcu_torture_read,
+
+ TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+
+ TP_ARGS(rcutorturename, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcutorturename)
+ __field(struct rcu_head *, rhp)
+ ),
+
+ TP_fast_assign(
+ __entry->rcutorturename = rcutorturename;
+ __entry->rhp = rhp;
),
- TP_printk("%s CBs-invoked=%d",
- __entry->rcuname, __entry->callbacks_invoked)
+ TP_printk("%s torture read %p",
+ __entry->rcutorturename, __entry->rhp)
);
#else /* #ifdef CONFIG_RCU_TRACE */
@@ -443,13 +536,16 @@ TRACE_EVENT(rcu_batch_end,
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
+#define trace_rcu_prep_idle(reason) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
-#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
+ do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 1e3193b8fcc..12fbf43524e 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -55,6 +55,15 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read,
);
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
DECLARE_EVENT_CLASS(regmap_block,
TP_PROTO(struct device *dev, unsigned int reg, int count),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18b63b..6ba596b07a7 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -331,6 +331,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_ARGS(tsk, delay));
/*
+ * Tracepoint for accounting blocked time (time the task is in uninterruptible).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+ TP_ARGS(tsk, delay));
+
+/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
+#ifdef CREATE_TRACE_POINTS
+static inline u64 trace_get_sleeptime(struct task_struct *tsk)
+{
+#ifdef CONFIG_SCHEDSTATS
+ u64 block, sleep;
+
+ block = tsk->se.statistics.block_start;
+ sleep = tsk->se.statistics.sleep_start;
+ tsk->se.statistics.block_start = 0;
+ tsk->se.statistics.sleep_start = 0;
+
+ return block ? block : sleep ? sleep : 0;
+#else
+ return 0;
+#endif
+}
+#endif
+
+/*
+ * Tracepoint for accounting sleeptime (time the task is sleeping
+ * or waiting for I/O).
+ */
+TRACE_EVENT(sched_stat_sleeptime,
+
+ TP_PROTO(struct task_struct *tsk, u64 now),
+
+ TP_ARGS(tsk, now),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, sleeptime )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->sleeptime = trace_get_sleeptime(tsk);
+ __entry->sleeptime = __entry->sleeptime ?
+ now - __entry->sleeptime : 0;
+ )
+ TP_perf_assign(
+ __perf_count(__entry->sleeptime);
+ ),
+
+ TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->sleeptime)
+);
+
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
new file mode 100644
index 00000000000..b53add02e92
--- /dev/null
+++ b/include/trace/events/task.h
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM task
+
+#if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TASK_H
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(task_newtask,
+
+ TP_PROTO(struct task_struct *task, unsigned long clone_flags),
+
+ TP_ARGS(task, clone_flags),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid)
+ __array( char, comm, TASK_COMM_LEN)
+ __field( unsigned long, clone_flags)
+ __field( int, oom_score_adj)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = task->pid;
+ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+ __entry->clone_flags = clone_flags;
+ __entry->oom_score_adj = task->signal->oom_score_adj;
+ ),
+
+ TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%d",
+ __entry->pid, __entry->comm,
+ __entry->clone_flags, __entry->oom_score_adj)
+);
+
+TRACE_EVENT(task_rename,
+
+ TP_PROTO(struct task_struct *task, char *comm),
+
+ TP_ARGS(task, comm),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid)
+ __array( char, oldcomm, TASK_COMM_LEN)
+ __array( char, newcomm, TASK_COMM_LEN)
+ __field( int, oom_score_adj)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = task->pid;
+ memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
+ memcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ __entry->oom_score_adj = task->signal->oom_score_adj;
+ ),
+
+ TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%d",
+ __entry->pid, __entry->oldcomm,
+ __entry->newcomm, __entry->oom_score_adj)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index b99caa8b780..8588a891802 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -21,6 +21,16 @@
{I_REFERENCED, "I_REFERENCED"} \
)
+#define WB_WORK_REASON \
+ {WB_REASON_BACKGROUND, "background"}, \
+ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
+ {WB_REASON_SYNC, "sync"}, \
+ {WB_REASON_PERIODIC, "periodic"}, \
+ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
+ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
+ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
+ {WB_REASON_FORKER_THREAD, "forker_thread"}
+
struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
- wb_reason_name[__entry->reason]
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
- wb_reason_name[__entry->reason])
+ __print_symbolic(__entry->reason, WB_WORK_REASON)
+ )
);
TRACE_EVENT(global_dirty_state,
@@ -289,12 +300,13 @@ TRACE_EVENT(balance_dirty_pages,
unsigned long dirty_ratelimit,
unsigned long task_ratelimit,
unsigned long dirtied,
+ unsigned long period,
long pause,
unsigned long start_time),
TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
dirty_ratelimit, task_ratelimit,
- dirtied, pause, start_time),
+ dirtied, period, pause, start_time),
TP_STRUCT__entry(
__array( char, bdi, 32)
@@ -309,6 +321,8 @@ TRACE_EVENT(balance_dirty_pages,
__field(unsigned int, dirtied_pause)
__field(unsigned long, paused)
__field( long, pause)
+ __field(unsigned long, period)
+ __field( long, think)
),
TP_fast_assign(
@@ -325,6 +339,9 @@ TRACE_EVENT(balance_dirty_pages,
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->dirtied = dirtied;
__entry->dirtied_pause = current->nr_dirtied_pause;
+ __entry->think = current->dirty_paused_when == 0 ? 0 :
+ (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
+ __entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
),
@@ -335,7 +352,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld",
+ "paused=%lu pause=%ld period=%lu think=%ld",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -347,7 +364,9 @@ TRACE_EVENT(balance_dirty_pages,
__entry->dirtied,
__entry->dirtied_pause,
__entry->paused, /* ms */
- __entry->pause /* ms */
+ __entry->pause, /* ms */
+ __entry->period, /* ms */
+ __entry->think /* ms */
)
);
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c..378c7ed6760 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
};
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
/* Init with the board info */
extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
- return 0;
-}
-#endif
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index d29c153705b..cc2e1a7e44e 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -29,11 +29,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages,
bool highmem);
void free_xenballooned_pages(int nr_pages, struct page **pages);
-struct sys_device;
+struct device;
#ifdef CONFIG_XEN_SELFBALLOONING
-extern int register_xen_selfballooning(struct sys_device *sysdev);
+extern int register_xen_selfballooning(struct device *dev);
#else
-static inline int register_xen_selfballooning(struct sys_device *sysdev)
+static inline int register_xen_selfballooning(struct device *dev)
{
return -ENOSYS;
}
diff --git a/include/xen/events.h b/include/xen/events.h
index d287997d3ea..0f773708e02 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -37,6 +37,13 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
*/
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+/*
+ * Allow extra references to event channels exposed to userspace by evtchn
+ */
+int evtchn_make_refcounted(unsigned int evtchn);
+int evtchn_get(unsigned int evtchn);
+void evtchn_put(unsigned int evtchn);
+
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
int resend_irq_on_evtchn(unsigned int irq);
void rebind_evtchn_irq(int evtchn, int irq);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 11e2dfce42f..15f8a00ff00 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -62,6 +62,24 @@ int gnttab_resume(void);
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
int readonly);
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+ int flags, unsigned page_off,
+ unsigned length);
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+ domid_t trans_domid,
+ grant_ref_t trans_gref);
+
+/*
+ * Are sub-page grants available on this version of Xen? Returns true if they
+ * are, and false if they're not.
+ */
+bool gnttab_subpage_grants_available(void);
+
+/*
+ * Are transitive grants available on this version of Xen? Returns true if they
+ * are, and false if they're not.
+ */
+bool gnttab_trans_grants_available(void);
/*
* End access through the given grant reference, iff the grant entry is no
@@ -108,6 +126,13 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly);
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length);
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref);
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
@@ -145,9 +170,11 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
- struct grant_entry **__shared);
-void arch_gnttab_unmap_shared(struct grant_entry *shared,
- unsigned long nr_gframes);
+ void **__shared);
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+ unsigned long max_nr_gframes,
+ grant_status_t **__shared);
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
extern unsigned long xen_hvm_resume_frames;
unsigned int gnttab_max_grant_frames(void);
@@ -155,9 +182,9 @@ unsigned int gnttab_max_grant_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct page **pages, unsigned int count);
+ struct page **pages, unsigned int count, bool clear_pte);
#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 39e571796e3..a17d84433e6 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -85,12 +85,22 @@
*/
/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
* A grant table comprises a packed array of grant entries in one or more
* page frames shared between Xen and a guest.
* [XEN]: This field is written by Xen and read by the sharing guest.
* [GST]: This field is written by the guest and read by Xen.
*/
-struct grant_entry {
+
+/*
+ * Version 1 of the grant table entry structure is maintained purely
+ * for backwards compatibility. New guests should use version 2.
+ */
+struct grant_entry_v1 {
/* GTF_xxx: various type and flag information. [XEN,GST] */
uint16_t flags;
/* The domain being granted foreign privileges. [GST] */
@@ -108,10 +118,13 @@ struct grant_entry {
* GTF_permit_access: Allow @domid to map/access @frame.
* GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
* to this guest. Xen writes the page number to @frame.
+ * GTF_transitive: Allow @domid to transitively access a subrange of
+ * @trans_grant in @trans_domid. No mappings are allowed.
*/
#define GTF_invalid (0U<<0)
#define GTF_permit_access (1U<<0)
#define GTF_accept_transfer (2U<<0)
+#define GTF_transitive (3U<<0)
#define GTF_type_mask (3U<<0)
/*
@@ -119,6 +132,9 @@ struct grant_entry {
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * GTF_sub_page: Grant access to only a subrange of the page. @domid
+ * will only be allowed to copy from the grant, and not
+ * map it. [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
@@ -126,6 +142,8 @@ struct grant_entry {
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_sub_page (8)
+#define GTF_sub_page (1U<<_GTF_sub_page)
/*
* Subflags for GTF_accept_transfer:
@@ -142,15 +160,81 @@ struct grant_entry {
#define _GTF_transfer_completed (3)
#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+/*
+ * Version 2 grant table entries. These fulfil the same role as
+ * version 1 entries, but can represent more complicated operations.
+ * Any given domain will have either a version 1 or a version 2 table,
+ * and every entry in the table will be the same version.
+ *
+ * The interface by which domains use grant references does not depend
+ * on the grant table version in use by the other domain.
+ */
-/***********************************
- * GRANT TABLE QUERIES AND USES
+/*
+ * Version 1 and version 2 grant entries share a common prefix. The
+ * fields of the prefix are documented as part of struct
+ * grant_entry_v1.
*/
+struct grant_entry_header {
+ uint16_t flags;
+ domid_t domid;
+};
/*
- * Reference to a grant entry in a specified domain's grant table.
+ * Version 2 of the grant entry structure, here is an union because three
+ * different types are suppotted: full_page, sub_page and transitive.
+ */
+union grant_entry_v2 {
+ struct grant_entry_header hdr;
+
+ /*
+ * This member is used for V1-style full page grants, where either:
+ *
+ * -- hdr.type is GTF_accept_transfer, or
+ * -- hdr.type is GTF_permit_access and GTF_sub_page is not set.
+ *
+ * In that case, the frame field has the same semantics as the
+ * field of the same name in the V1 entry structure.
+ */
+ struct {
+ struct grant_entry_header hdr;
+ uint32_t pad0;
+ uint64_t frame;
+ } full_page;
+
+ /*
+ * If the grant type is GTF_grant_access and GTF_sub_page is set,
+ * @domid is allowed to access bytes [@page_off,@page_off+@length)
+ * in frame @frame.
+ */
+ struct {
+ struct grant_entry_header hdr;
+ uint16_t page_off;
+ uint16_t length;
+ uint64_t frame;
+ } sub_page;
+
+ /*
+ * If the grant is GTF_transitive, @domid is allowed to use the
+ * grant @gref in domain @trans_domid, as if it was the local
+ * domain. Obviously, the transitive access must be compatible
+ * with the original grant.
+ */
+ struct {
+ struct grant_entry_header hdr;
+ domid_t trans_domid;
+ uint16_t pad0;
+ grant_ref_t gref;
+ } transitive;
+
+ uint32_t __spacer[4]; /* Pad to a power of two */
+};
+
+typedef uint16_t grant_status_t;
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
*/
-typedef uint32_t grant_ref_t;
/*
* Handle to track a mapping created via a grant reference.
@@ -322,6 +406,79 @@ struct gnttab_query_size {
DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
/*
+ * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>. <new_addr> will be
+ * redirected to the null entry.
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 2. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_and_replace 7
+struct gnttab_unmap_and_replace {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t new_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
+
+/*
+ * GNTTABOP_set_version: Request a particular version of the grant
+ * table shared table structure. This operation can only be performed
+ * once in any given domain. It must be performed before any grants
+ * are activated; otherwise, the domain will be stuck with version 1.
+ * The only defined versions are 1 and 2.
+ */
+#define GNTTABOP_set_version 8
+struct gnttab_set_version {
+ /* IN parameters */
+ uint32_t version;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version);
+
+/*
+ * GNTTABOP_get_status_frames: Get the list of frames used to store grant
+ * status for <dom>. In grant format version 2, the status is separated
+ * from the other shared grant fields to allow more efficient synchronization
+ * using barriers instead of atomic cmpexch operations.
+ * <nr_frames> specify the size of vector <frame_list>.
+ * The frame addresses are returned in the <frame_list>.
+ * Only <nr_frames> addresses are returned, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_get_status_frames 9
+struct gnttab_get_status_frames {
+ /* IN parameters. */
+ uint32_t nr_frames;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+ GUEST_HANDLE(uint64_t) frame_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames);
+
+/*
+ * GNTTABOP_get_version: Get the grant table version which is in
+ * effect for domain <dom>.
+ */
+#define GNTTABOP_get_version 10
+struct gnttab_get_version {
+ /* IN parameters */
+ domid_t dom;
+ uint16_t pad;
+ /* OUT parameters */
+ uint32_t version;
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
+
+/*
* Bitfield values for update_pin_status.flags.
*/
/* Map the grant entry for access by I/O devices. */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index f0b6890370b..7cdfca24eaf 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT,
- XS_RESET_WATCHES
+ XS_RESTRICT
};
#define XS_WRITE_NONE "NONE"
@@ -88,4 +87,7 @@ struct xenstore_domain_interface {
XENSTORE_RING_IDX rsp_cons, rsp_prod;
};
+/* Violating this is very bad. See docs/misc/xenstore.txt. */
+#define XENSTORE_PAYLOAD_MAX 4096
+
#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 6a6e9144934..a890804945e 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -523,6 +523,8 @@ struct tmem_op {
} u;
};
+DEFINE_GUEST_HANDLE(u64);
+
#else /* __ASSEMBLY__ */
/* In assembly code we cannot use C numeric constant suffixes. */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b1b6676c1c4..e8c599b237c 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -85,8 +85,6 @@ struct xenbus_device_id
/* A xenbus driver. */
struct xenbus_driver {
- char *name;
- struct module *owner;
const struct xenbus_device_id *ids;
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
@@ -101,31 +99,20 @@ struct xenbus_driver {
int (*is_ready)(struct xenbus_device *dev);
};
-static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-{
- return container_of(drv, struct xenbus_driver, driver);
+#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
+struct xenbus_driver var ## _driver = { \
+ .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
+ .driver.owner = THIS_MODULE, \
+ .ids = var ## _ids, ## methods \
}
-int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner,
- const char *mod_name);
-
-static inline int __must_check
-xenbus_register_frontend(struct xenbus_driver *drv)
+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
{
- WARN_ON(drv->owner != THIS_MODULE);
- return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+ return container_of(drv, struct xenbus_driver, driver);
}
-int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
- struct module *owner,
- const char *mod_name);
-static inline int __must_check
-xenbus_register_backend(struct xenbus_driver *drv)
-{
- WARN_ON(drv->owner != THIS_MODULE);
- return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
-}
+int __must_check xenbus_register_frontend(struct xenbus_driver *);
+int __must_check xenbus_register_backend(struct xenbus_driver *);
void xenbus_unregister_driver(struct xenbus_driver *drv);
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
new file mode 100644
index 00000000000..ac5f0fe47ed
--- /dev/null
+++ b/include/xen/xenbus_dev.h
@@ -0,0 +1,41 @@
+/******************************************************************************
+ * evtchn.h
+ *
+ * Interface to /dev/xen/xenbus_backend.
+ *
+ * Copyright (c) 2011 Bastian Blank <waldi@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_XEN_XENBUS_DEV_H__
+#define __LINUX_XEN_XENBUS_DEV_H__
+
+#include <linux/ioctl.h>
+
+#define IOCTL_XENBUS_BACKEND_EVTCHN \
+ _IOC(_IOC_NONE, 'B', 0, 0)
+
+#endif /* __LINUX_XEN_XENBUS_DEV_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index 43298f9810f..a075765d5fb 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -469,14 +469,14 @@ config RCU_FANOUT_EXACT
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
- depends on TREE_RCU && NO_HZ && SMP
+ depends on NO_HZ && SMP
default n
help
This option causes RCU to attempt to accelerate grace periods
- in order to allow the final CPU to enter dynticks-idle state
- more quickly. On the other hand, this option increases the
- overhead of the dynticks-idle checking, particularly on systems
- with large numbers of CPUs.
+ in order to allow CPUs to enter dynticks-idle state more
+ quickly. On the other hand, this option increases the overhead
+ of the dynticks-idle checking, particularly on systems with
+ large numbers of CPUs.
Say Y if energy efficiency is critically important, particularly
if you have relatively few CPUs.
@@ -689,6 +689,17 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
then swapaccount=0 does the trick).
+config CGROUP_MEM_RES_CTLR_KMEM
+ bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
+ depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+ default n
+ help
+ The Kernel Memory extension for Memory Resource Controller can limit
+ the amount of memory used by kernel objects in the system. Those are
+ fundamentally different from the entities handled by the standard
+ Memory Controller, which are page-based, and can be swapped. Users of
+ the kmem extension can use it to guarantee that no group of processes
+ will ever exhaust kernel resources alone.
config CGROUP_PERF
bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 0f6e1d985a3..2974c8b3b35 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -325,17 +325,19 @@ static void __init get_fs_names(char *page)
static int __init do_mount_root(char *name, char *fs, int flags, void *data)
{
+ struct super_block *s;
int err = sys_mount(name, "/root", fs, flags, data);
if (err)
return err;
sys_chdir((const char __user __force *)"/root");
- ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
+ s = current->fs->pwd.dentry->d_sb;
+ ROOT_DEV = s->s_dev;
printk(KERN_INFO
"VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
- current->fs->pwd.mnt->mnt_sb->s_type->name,
- current->fs->pwd.mnt->mnt_sb->s_flags & MS_RDONLY ?
- " readonly" : "", MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
+ s->s_type->name,
+ s->s_flags & MS_RDONLY ? " readonly" : "",
+ MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
return 0;
}
@@ -398,15 +400,42 @@ out:
}
#ifdef CONFIG_ROOT_NFS
+
+#define NFSROOT_TIMEOUT_MIN 5
+#define NFSROOT_TIMEOUT_MAX 30
+#define NFSROOT_RETRY_MAX 5
+
static int __init mount_nfs_root(void)
{
char *root_dev, *root_data;
+ unsigned int timeout;
+ int try, err;
- if (nfs_root_data(&root_dev, &root_data) != 0)
- return 0;
- if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
+ err = nfs_root_data(&root_dev, &root_data);
+ if (err != 0)
return 0;
- return 1;
+
+ /*
+ * The server or network may not be ready, so try several
+ * times. Stop after a few tries in case the client wants
+ * to fall back to other boot methods.
+ */
+ timeout = NFSROOT_TIMEOUT_MIN;
+ for (try = 1; ; try++) {
+ err = do_mount_root(root_dev, "nfs",
+ root_mountflags, root_data);
+ if (err == 0)
+ return 1;
+ if (try > NFSROOT_RETRY_MAX)
+ break;
+
+ /* Wait, in case the server refused us immediately */
+ ssleep(timeout);
+ timeout <<= 1;
+ if (timeout > NFSROOT_TIMEOUT_MAX)
+ timeout = NFSROOT_TIMEOUT_MAX;
+ }
+ return 0;
}
#endif
diff --git a/init/initramfs.c b/init/initramfs.c
index 2531811d42c..8216c303b08 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -22,7 +22,7 @@ static void __init error(char *x)
static __initdata struct hash {
int ino, minor, major;
- mode_t mode;
+ umode_t mode;
struct hash *next;
char name[N_ALIGN(PATH_MAX)];
} *head[32];
@@ -35,7 +35,7 @@ static inline int hash(int major, int minor, int ino)
}
static char __init *find_link(int major, int minor, int ino,
- mode_t mode, char *name)
+ umode_t mode, char *name)
{
struct hash **p, *q;
for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
@@ -120,7 +120,7 @@ static __initdata time_t mtime;
/* cpio header parsing */
static __initdata unsigned long ino, major, minor, nlink;
-static __initdata mode_t mode;
+static __initdata umode_t mode;
static __initdata unsigned long body_len, name_len;
static __initdata uid_t uid;
static __initdata gid_t gid;
@@ -276,7 +276,7 @@ static int __init maybe_link(void)
return 0;
}
-static void __init clean_path(char *path, mode_t mode)
+static void __init clean_path(char *path, umode_t mode)
{
struct stat st;
diff --git a/init/main.c b/init/main.c
index 217ed23e948..2c76efb513c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -469,13 +469,12 @@ asmlinkage void __init start_kernel(void)
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
- smp_setup_processor_id();
-
/*
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
lockdep_init();
+ smp_setup_processor_id();
debug_objects_early_init();
/*
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 2e0ecfcc881..9b7c8ab7d75 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -32,6 +32,7 @@
#include <linux/nsproxy.h>
#include <linux/pid.h>
#include <linux/ipc_namespace.h>
+#include <linux/user_namespace.h>
#include <linux/slab.h>
#include <net/sock.h>
@@ -108,7 +109,7 @@ static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
}
static struct inode *mqueue_get_inode(struct super_block *sb,
- struct ipc_namespace *ipc_ns, int mode,
+ struct ipc_namespace *ipc_ns, umode_t mode,
struct mq_attr *attr)
{
struct user_struct *u = current_user();
@@ -243,7 +244,6 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb)
static void mqueue_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
}
@@ -296,7 +296,7 @@ static void mqueue_evict_inode(struct inode *inode)
}
static int mqueue_create(struct inode *dir, struct dentry *dentry,
- int mode, struct nameidata *nd)
+ umode_t mode, struct nameidata *nd)
{
struct inode *inode;
struct mq_attr *attr = dentry->d_fsdata;
@@ -543,9 +543,13 @@ static void __do_notify(struct mqueue_inode_info *info)
sig_i.si_errno = 0;
sig_i.si_code = SI_MESGQ;
sig_i.si_value = info->notify.sigev_value;
+ /* map current pid/uid into info->owner's namespaces */
+ rcu_read_lock();
sig_i.si_pid = task_tgid_nr_ns(current,
ns_of_pid(info->notify_owner));
- sig_i.si_uid = current_uid();
+ sig_i.si_uid = user_ns_map_uid(info->user->user_ns,
+ current_cred(), current_uid());
+ rcu_read_unlock();
kill_pid_info(info->notify.sigev_signo,
&sig_i, info->notify_owner);
@@ -611,7 +615,7 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
* Invoked when creating a new queue via sys_mq_open
*/
static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
- struct dentry *dentry, int oflag, mode_t mode,
+ struct dentry *dentry, int oflag, umode_t mode,
struct mq_attr *attr)
{
const struct cred *cred = current_cred();
@@ -680,7 +684,7 @@ err:
return ERR_PTR(ret);
}
-SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
+SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
struct mq_attr __user *, u_attr)
{
struct dentry *dentry;
@@ -1269,7 +1273,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
void mq_put_mnt(struct ipc_namespace *ns)
{
- mntput(ns->mq_mnt);
+ kern_unmount(ns->mq_mnt);
}
static int __init init_mqueue_fs(void)
@@ -1291,11 +1295,9 @@ static int __init init_mqueue_fs(void)
spin_lock_init(&mq_lock);
- init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
- if (IS_ERR(init_ipc_ns.mq_mnt)) {
- error = PTR_ERR(init_ipc_ns.mq_mnt);
+ error = mq_init_ns(&init_ipc_ns);
+ if (error)
goto out_filesystem;
- }
return 0;
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index 8b5ce5d3f3e..5652101cdac 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
*/
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
- .mq_queues_max = DFLT_QUEUESMAX,
- .mq_msg_max = DFLT_MSGMAX,
- .mq_msgsize_max = DFLT_MSGSIZEMAX,
-#endif
.user_ns = &init_user_ns,
};
diff --git a/kernel/Makefile b/kernel/Makefile
index e898c5b9d02..f70396e5a24 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,16 +2,15 @@
# Makefile for the linux kernel.
#
-obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
+obj-y = fork.o exec_domain.o panic.o printk.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
- notifier.o ksysfs.o sched_clock.o cred.o \
- async.o range.o
-obj-y += groups.o
+ notifier.o ksysfs.o cred.o \
+ async.o range.o groups.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
@@ -20,10 +19,11 @@ CFLAGS_REMOVE_lockdep_proc.o = -pg
CFLAGS_REMOVE_mutex-debug.o = -pg
CFLAGS_REMOVE_rtmutex-debug.o = -pg
CFLAGS_REMOVE_cgroup-debug.o = -pg
-CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_irq_work.o = -pg
endif
+obj-y += sched/
+
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
@@ -99,7 +99,6 @@ obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
-obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
@@ -110,15 +109,6 @@ obj-$(CONFIG_PADATA) += padata.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
-# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
-# needed for x86 only. Why this used to be enabled for all architectures is beyond
-# me. I suspect most platforms don't need this, but until we know that for sure
-# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
-# to get a correct value for the wait-channel (WCHAN in ps). --davidm
-CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
-endif
-
$(obj)/configs.o: $(obj)/config_data.h
# config_data.h contains the same information as ikconfig.h but gzipped.
diff --git a/kernel/acct.c b/kernel/acct.c
index fa7eb3de2dd..02e6167a53b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -84,11 +84,10 @@ static void do_acct_process(struct bsd_acct_struct *acct,
* the cache line to have the data after getting the lock.
*/
struct bsd_acct_struct {
- volatile int active;
- volatile int needcheck;
+ int active;
+ unsigned long needcheck;
struct file *file;
struct pid_namespace *ns;
- struct timer_list timer;
struct list_head list;
};
@@ -96,15 +95,6 @@ static DEFINE_SPINLOCK(acct_lock);
static LIST_HEAD(acct_list);
/*
- * Called whenever the timer says to check the free space.
- */
-static void acct_timeout(unsigned long x)
-{
- struct bsd_acct_struct *acct = (struct bsd_acct_struct *)x;
- acct->needcheck = 1;
-}
-
-/*
* Check the amount of free space and suspend/resume accordingly.
*/
static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
@@ -112,12 +102,12 @@ static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
struct kstatfs sbuf;
int res;
int act;
- sector_t resume;
- sector_t suspend;
+ u64 resume;
+ u64 suspend;
spin_lock(&acct_lock);
res = acct->active;
- if (!file || !acct->needcheck)
+ if (!file || time_is_before_jiffies(acct->needcheck))
goto out;
spin_unlock(&acct_lock);
@@ -127,8 +117,8 @@ static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
suspend = sbuf.f_blocks * SUSPEND;
resume = sbuf.f_blocks * RESUME;
- sector_div(suspend, 100);
- sector_div(resume, 100);
+ do_div(suspend, 100);
+ do_div(resume, 100);
if (sbuf.f_bavail <= suspend)
act = -1;
@@ -160,10 +150,7 @@ static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
}
}
- del_timer(&acct->timer);
- acct->needcheck = 0;
- acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
- add_timer(&acct->timer);
+ acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
res = acct->active;
out:
spin_unlock(&acct_lock);
@@ -185,9 +172,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
if (acct->file) {
old_acct = acct->file;
old_ns = acct->ns;
- del_timer(&acct->timer);
acct->active = 0;
- acct->needcheck = 0;
acct->file = NULL;
acct->ns = NULL;
list_del(&acct->list);
@@ -195,13 +180,9 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
if (file) {
acct->file = file;
acct->ns = ns;
- acct->needcheck = 0;
+ acct->needcheck = jiffies + ACCT_TIMEOUT*HZ;
acct->active = 1;
list_add(&acct->list, &acct_list);
- /* It's been deleted if it was used before so this is safe */
- setup_timer(&acct->timer, acct_timeout, (unsigned long)acct);
- acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
- add_timer(&acct->timer);
}
if (old_acct) {
mnt_unpin(old_acct->f_path.mnt);
@@ -334,7 +315,7 @@ void acct_auto_close(struct super_block *sb)
spin_lock(&acct_lock);
restart:
list_for_each_entry(acct, &acct_list, list)
- if (acct->file && acct->file->f_path.mnt->mnt_sb == sb) {
+ if (acct->file && acct->file->f_path.dentry->d_sb == sb) {
acct_file_reopen(acct, NULL, NULL);
goto restart;
}
@@ -348,7 +329,6 @@ void acct_exit_ns(struct pid_namespace *ns)
if (acct == NULL)
return;
- del_timer_sync(&acct->timer);
spin_lock(&acct_lock);
if (acct->file != NULL)
acct_file_reopen(acct, NULL, NULL);
@@ -498,7 +478,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
* Fill the accounting struct with the needed info as recorded
* by the different kernel functions.
*/
- memset((caddr_t)&ac, 0, sizeof(acct_t));
+ memset(&ac, 0, sizeof(acct_t));
ac.ac_version = ACCT_VERSION | ACCT_BYTEORDER;
strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
@@ -613,8 +593,8 @@ void acct_collect(long exitcode, int group_dead)
pacct->ac_flag |= ACORE;
if (current->flags & PF_SIGNALED)
pacct->ac_flag |= AXSIG;
- pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime);
- pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime);
+ pacct->ac_utime += current->utime;
+ pacct->ac_stime += current->stime;
pacct->ac_minflt += current->min_flt;
pacct->ac_majflt += current->maj_flt;
spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/audit.c b/kernel/audit.c
index 09fae2677a4..2c1d6ab7106 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
avail = audit_expand(ab,
max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
if (!avail)
- goto out;
+ goto out_va_end;
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
}
- va_end(args2);
if (len > 0)
skb_put(skb, len);
+out_va_end:
+ va_end(args2);
out:
return;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 47b7fc1ea89..e7fe2b0d29b 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -210,12 +210,12 @@ struct audit_context {
struct {
uid_t uid;
gid_t gid;
- mode_t mode;
+ umode_t mode;
u32 osid;
int has_perm;
uid_t perm_uid;
gid_t perm_gid;
- mode_t perm_mode;
+ umode_t perm_mode;
unsigned long qbytes;
} ipc;
struct {
@@ -234,7 +234,7 @@ struct audit_context {
} mq_sendrecv;
struct {
int oflag;
- mode_t mode;
+ umode_t mode;
struct mq_attr attr;
} mq_open;
struct {
@@ -308,7 +308,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
static int audit_match_filetype(struct audit_context *ctx, int which)
{
unsigned index = which & ~S_IFMT;
- mode_t mode = which & S_IFMT;
+ umode_t mode = which & S_IFMT;
if (unlikely(!ctx))
return 0;
@@ -1249,7 +1249,7 @@ static void show_special(struct audit_context *context, int *call_panic)
case AUDIT_IPC: {
u32 osid = context->ipc.osid;
- audit_log_format(ab, "ouid=%u ogid=%u mode=%#o",
+ audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho",
context->ipc.uid, context->ipc.gid, context->ipc.mode);
if (osid) {
char *ctx = NULL;
@@ -1267,7 +1267,7 @@ static void show_special(struct audit_context *context, int *call_panic)
ab = audit_log_start(context, GFP_KERNEL,
AUDIT_IPC_SET_PERM);
audit_log_format(ab,
- "qbytes=%lx ouid=%u ogid=%u mode=%#o",
+ "qbytes=%lx ouid=%u ogid=%u mode=%#ho",
context->ipc.qbytes,
context->ipc.perm_uid,
context->ipc.perm_gid,
@@ -1278,7 +1278,7 @@ static void show_special(struct audit_context *context, int *call_panic)
break; }
case AUDIT_MQ_OPEN: {
audit_log_format(ab,
- "oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld "
+ "oflag=0x%x mode=%#ho mq_flags=0x%lx mq_maxmsg=%ld "
"mq_msgsize=%ld mq_curmsgs=%ld",
context->mq_open.oflag, context->mq_open.mode,
context->mq_open.attr.mq_flags,
@@ -1502,7 +1502,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
if (n->ino != (unsigned long)-1) {
audit_log_format(ab, " inode=%lu"
- " dev=%02x:%02x mode=%#o"
+ " dev=%02x:%02x mode=%#ho"
" ouid=%u ogid=%u rdev=%02x:%02x",
n->ino,
MAJOR(n->dev),
@@ -2160,7 +2160,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
* @attr: queue attributes
*
*/
-void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
+void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{
struct audit_context *context = current->audit_context;
@@ -2260,7 +2260,7 @@ void __audit_ipc_obj(struct kern_ipc_perm *ipcp)
*
* Called only after audit_ipc_obj().
*/
-void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
+void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
{
struct audit_context *context = current->audit_context;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d9d5648f3cd..a5d3b5325f7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -63,7 +63,24 @@
#include <linux/atomic.h>
+/*
+ * cgroup_mutex is the master lock. Any modification to cgroup or its
+ * hierarchy must be performed while holding it.
+ *
+ * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
+ * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
+ * release_agent_path and so on. Modifying requires both cgroup_mutex and
+ * cgroup_root_mutex. Readers can acquire either of the two. This is to
+ * break the following locking order cycle.
+ *
+ * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
+ * B. namespace_sem -> cgroup_mutex
+ *
+ * B happens only through cgroup_show_options() and using cgroup_root_mutex
+ * breaks it.
+ */
static DEFINE_MUTEX(cgroup_mutex);
+static DEFINE_MUTEX(cgroup_root_mutex);
/*
* Generate an array of cgroup subsystem pointers. At boot time, this is
@@ -760,7 +777,7 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
* -> cgroup_mkdir.
*/
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -775,7 +792,7 @@ static struct backing_dev_info cgroup_backing_dev_info = {
static int alloc_css_id(struct cgroup_subsys *ss,
struct cgroup *parent, struct cgroup *child);
-static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
+static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -921,7 +938,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
*
* CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
*/
-DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
{
@@ -953,6 +970,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
int i;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
+ BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
removed_bits = root->actual_subsys_bits & ~final_bits;
added_bits = final_bits & ~root->actual_subsys_bits;
@@ -1038,12 +1056,12 @@ static int rebind_subsystems(struct cgroupfs_root *root,
return 0;
}
-static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
{
- struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
+ struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
struct cgroup_subsys *ss;
- mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
for_each_subsys(root, ss)
seq_printf(seq, ",%s", ss->name);
if (test_bit(ROOT_NOPREFIX, &root->flags))
@@ -1054,7 +1072,7 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",clone_children");
if (strlen(root->name))
seq_printf(seq, ",name=%s", root->name);
- mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&cgroup_root_mutex);
return 0;
}
@@ -1175,10 +1193,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
/*
* If the 'all' option was specified select all the subsystems,
- * otherwise 'all, 'none' and a subsystem name options were not
- * specified, let's default to 'all'
+ * otherwise if 'none', 'name=' and a subsystem name options
+ * were not specified, let's default to 'all'
*/
- if (all_ss || (!all_ss && !one_ss && !opts->none)) {
+ if (all_ss || (!one_ss && !opts->none && !opts->name)) {
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss == NULL)
@@ -1269,6 +1287,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
/* See what subsystems are wanted */
ret = parse_cgroupfs_options(data, &opts);
@@ -1297,6 +1316,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
out_unlock:
kfree(opts.release_agent);
kfree(opts.name);
+ mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return ret;
@@ -1481,6 +1501,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
int ret = 0;
struct super_block *sb;
struct cgroupfs_root *new_root;
+ struct inode *inode;
/* First find the desired set of subsystems */
mutex_lock(&cgroup_mutex);
@@ -1514,7 +1535,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
/* We used the new root structure, so this is a new hierarchy */
struct list_head tmp_cg_links;
struct cgroup *root_cgrp = &root->top_cgroup;
- struct inode *inode;
struct cgroupfs_root *existing_root;
const struct cred *cred;
int i;
@@ -1528,18 +1548,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
- if (strlen(root->name)) {
- /* Check for name clashes with existing mounts */
- for_each_active_root(existing_root) {
- if (!strcmp(existing_root->name, root->name)) {
- ret = -EBUSY;
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
- goto drop_new_super;
- }
- }
- }
+ /* Check for name clashes with existing mounts */
+ ret = -EBUSY;
+ if (strlen(root->name))
+ for_each_active_root(existing_root)
+ if (!strcmp(existing_root->name, root->name))
+ goto unlock_drop;
/*
* We're accessing css_set_count without locking
@@ -1549,18 +1565,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
* have some link structures left over
*/
ret = allocate_cg_links(css_set_count, &tmp_cg_links);
- if (ret) {
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
- goto drop_new_super;
- }
+ if (ret)
+ goto unlock_drop;
ret = rebind_subsystems(root, root->subsys_bits);
if (ret == -EBUSY) {
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
free_cg_links(&tmp_cg_links);
- goto drop_new_super;
+ goto unlock_drop;
}
/*
* There must be no failure case after here, since rebinding
@@ -1599,6 +1610,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
cred = override_creds(&init_cred);
cgroup_populate_dir(root_cgrp);
revert_creds(cred);
+ mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
} else {
@@ -1615,6 +1627,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
kfree(opts.name);
return dget(sb->s_root);
+ unlock_drop:
+ mutex_unlock(&cgroup_root_mutex);
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
drop_new_super:
deactivate_locked_super(sb);
drop_modules:
@@ -1639,6 +1655,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
BUG_ON(!list_empty(&cgrp->sibling));
mutex_lock(&cgroup_mutex);
+ mutex_lock(&cgroup_root_mutex);
/* Rebind all subsystems back to the default hierarchy */
ret = rebind_subsystems(root, 0);
@@ -1664,6 +1681,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
root_count--;
}
+ mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
kill_litter_super(sb);
@@ -1740,11 +1758,90 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
EXPORT_SYMBOL_GPL(cgroup_path);
/*
+ * Control Group taskset
+ */
+struct task_and_cgroup {
+ struct task_struct *task;
+ struct cgroup *cgrp;
+};
+
+struct cgroup_taskset {
+ struct task_and_cgroup single;
+ struct flex_array *tc_array;
+ int tc_array_len;
+ int idx;
+ struct cgroup *cur_cgrp;
+};
+
+/**
+ * cgroup_taskset_first - reset taskset and return the first task
+ * @tset: taskset of interest
+ *
+ * @tset iteration is initialized and the first task is returned.
+ */
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
+{
+ if (tset->tc_array) {
+ tset->idx = 0;
+ return cgroup_taskset_next(tset);
+ } else {
+ tset->cur_cgrp = tset->single.cgrp;
+ return tset->single.task;
+ }
+}
+EXPORT_SYMBOL_GPL(cgroup_taskset_first);
+
+/**
+ * cgroup_taskset_next - iterate to the next task in taskset
+ * @tset: taskset of interest
+ *
+ * Return the next task in @tset. Iteration must have been initialized
+ * with cgroup_taskset_first().
+ */
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
+{
+ struct task_and_cgroup *tc;
+
+ if (!tset->tc_array || tset->idx >= tset->tc_array_len)
+ return NULL;
+
+ tc = flex_array_get(tset->tc_array, tset->idx++);
+ tset->cur_cgrp = tc->cgrp;
+ return tc->task;
+}
+EXPORT_SYMBOL_GPL(cgroup_taskset_next);
+
+/**
+ * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
+ * @tset: taskset of interest
+ *
+ * Return the cgroup for the current (last returned) task of @tset. This
+ * function must be preceded by either cgroup_taskset_first() or
+ * cgroup_taskset_next().
+ */
+struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
+{
+ return tset->cur_cgrp;
+}
+EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
+
+/**
+ * cgroup_taskset_size - return the number of tasks in taskset
+ * @tset: taskset of interest
+ */
+int cgroup_taskset_size(struct cgroup_taskset *tset)
+{
+ return tset->tc_array ? tset->tc_array_len : 1;
+}
+EXPORT_SYMBOL_GPL(cgroup_taskset_size);
+
+
+/*
* cgroup_task_migrate - move a task from one cgroup to another.
*
* 'guarantee' is set if the caller promises that a new css_set for the task
* will already exist. If not set, this function might sleep, and can fail with
- * -ENOMEM. Otherwise, it can only fail with -ESRCH.
+ * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked.
*/
static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
struct task_struct *tsk, bool guarantee)
@@ -1753,14 +1850,12 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
struct css_set *newcg;
/*
- * get old css_set. we need to take task_lock and refcount it, because
- * an exiting task can change its css_set to init_css_set and drop its
- * old one without taking cgroup_mutex.
+ * We are synchronized through threadgroup_lock() against PF_EXITING
+ * setting such that we can't race against cgroup_exit() changing the
+ * css_set to init_css_set and dropping the old one.
*/
- task_lock(tsk);
+ WARN_ON_ONCE(tsk->flags & PF_EXITING);
oldcg = tsk->cgroups;
- get_css_set(oldcg);
- task_unlock(tsk);
/* locate or allocate a new css_set for this task. */
if (guarantee) {
@@ -1775,20 +1870,11 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
might_sleep();
/* find_css_set will give us newcg already referenced. */
newcg = find_css_set(oldcg, cgrp);
- if (!newcg) {
- put_css_set(oldcg);
+ if (!newcg)
return -ENOMEM;
- }
}
- put_css_set(oldcg);
- /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
task_lock(tsk);
- if (tsk->flags & PF_EXITING) {
- task_unlock(tsk);
- put_css_set(newcg);
- return -ESRCH;
- }
rcu_assign_pointer(tsk->cgroups, newcg);
task_unlock(tsk);
@@ -1814,8 +1900,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
* @cgrp: the cgroup the task is attaching to
* @tsk: the task to be attached
*
- * Call holding cgroup_mutex. May take task_lock of
- * the task 'tsk' during call.
+ * Call with cgroup_mutex and threadgroup locked. May take task_lock of
+ * @tsk during call.
*/
int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
@@ -1823,15 +1909,23 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroup *oldcgrp;
struct cgroupfs_root *root = cgrp->root;
+ struct cgroup_taskset tset = { };
+
+ /* @tsk either already exited or can't exit until the end */
+ if (tsk->flags & PF_EXITING)
+ return -ESRCH;
/* Nothing to do if the task is already in that cgroup */
oldcgrp = task_cgroup_from_root(tsk, root);
if (cgrp == oldcgrp)
return 0;
+ tset.single.task = tsk;
+ tset.single.cgrp = oldcgrp;
+
for_each_subsys(root, ss) {
if (ss->can_attach) {
- retval = ss->can_attach(ss, cgrp, tsk);
+ retval = ss->can_attach(ss, cgrp, &tset);
if (retval) {
/*
* Remember on which subsystem the can_attach()
@@ -1843,13 +1937,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
goto out;
}
}
- if (ss->can_attach_task) {
- retval = ss->can_attach_task(cgrp, tsk);
- if (retval) {
- failed_ss = ss;
- goto out;
- }
- }
}
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
@@ -1857,12 +1944,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
goto out;
for_each_subsys(root, ss) {
- if (ss->pre_attach)
- ss->pre_attach(cgrp);
- if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
if (ss->attach)
- ss->attach(ss, cgrp, oldcgrp, tsk);
+ ss->attach(ss, cgrp, &tset);
}
synchronize_rcu();
@@ -1884,7 +1967,7 @@ out:
*/
break;
if (ss->cancel_attach)
- ss->cancel_attach(ss, cgrp, tsk);
+ ss->cancel_attach(ss, cgrp, &tset);
}
}
return retval;
@@ -1935,23 +2018,17 @@ static bool css_set_check_fetched(struct cgroup *cgrp,
read_lock(&css_set_lock);
newcg = find_existing_css_set(cg, cgrp, template);
- if (newcg)
- get_css_set(newcg);
read_unlock(&css_set_lock);
/* doesn't exist at all? */
if (!newcg)
return false;
/* see if it's already in the list */
- list_for_each_entry(cg_entry, newcg_list, links) {
- if (cg_entry->cg == newcg) {
- put_css_set(newcg);
+ list_for_each_entry(cg_entry, newcg_list, links)
+ if (cg_entry->cg == newcg)
return true;
- }
- }
/* not found */
- put_css_set(newcg);
return false;
}
@@ -1985,21 +2062,21 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
* @cgrp: the cgroup to attach to
* @leader: the threadgroup leader task_struct of the group to be attached
*
- * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
- * take task_lock of each thread in leader's threadgroup individually in turn.
+ * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
+ * task_lock of each thread in leader's threadgroup individually in turn.
*/
-int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
{
int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
- bool cancel_failed_ss = false;
/* guaranteed to be initialized later, but the compiler needs this */
- struct cgroup *oldcgrp = NULL;
struct css_set *oldcg;
struct cgroupfs_root *root = cgrp->root;
/* threadgroup list cursor and array */
struct task_struct *tsk;
+ struct task_and_cgroup *tc;
struct flex_array *group;
+ struct cgroup_taskset tset = { };
/*
* we need to make sure we have css_sets for all the tasks we're
* going to move -before- we actually start moving them, so that in
@@ -2012,13 +2089,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* step 0: in order to do expensive, possibly blocking operations for
* every thread, we cannot iterate the thread group list, since it needs
* rcu or tasklist locked. instead, build an array of all threads in the
- * group - threadgroup_fork_lock prevents new threads from appearing,
- * and if threads exit, this will just be an over-estimate.
+ * group - group_rwsem prevents new threads from appearing, and if
+ * threads exit, this will just be an over-estimate.
*/
group_size = get_nr_threads(leader);
/* flex_array supports very large thread-groups better than kmalloc. */
- group = flex_array_alloc(sizeof(struct task_struct *), group_size,
- GFP_KERNEL);
+ group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
if (!group)
return -ENOMEM;
/* pre-allocate to guarantee space while iterating in rcu read-side. */
@@ -2040,49 +2116,53 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
retval = -EAGAIN;
goto out_free_group_list;
}
- /* take a reference on each task in the group to go in the array. */
+
tsk = leader;
i = 0;
do {
+ struct task_and_cgroup ent;
+
+ /* @tsk either already exited or can't exit until the end */
+ if (tsk->flags & PF_EXITING)
+ continue;
+
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
- get_task_struct(tsk);
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
*/
- retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
+ ent.task = tsk;
+ ent.cgrp = task_cgroup_from_root(tsk, root);
+ /* nothing to do if this task is already in the cgroup */
+ if (ent.cgrp == cgrp)
+ continue;
+ retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
} while_each_thread(leader, tsk);
/* remember the number of threads in the array for later. */
group_size = i;
+ tset.tc_array = group;
+ tset.tc_array_len = group_size;
read_unlock(&tasklist_lock);
+ /* methods shouldn't be called if no task is actually migrating */
+ retval = 0;
+ if (!group_size)
+ goto out_free_group_list;
+
/*
* step 1: check that we can legitimately attach to the cgroup.
*/
for_each_subsys(root, ss) {
if (ss->can_attach) {
- retval = ss->can_attach(ss, cgrp, leader);
+ retval = ss->can_attach(ss, cgrp, &tset);
if (retval) {
failed_ss = ss;
goto out_cancel_attach;
}
}
- /* a callback to be run on every thread in the threadgroup. */
- if (ss->can_attach_task) {
- /* run on each task in the threadgroup. */
- for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- retval = ss->can_attach_task(cgrp, tsk);
- if (retval) {
- failed_ss = ss;
- cancel_failed_ss = true;
- goto out_cancel_attach;
- }
- }
- }
}
/*
@@ -2091,72 +2171,36 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
INIT_LIST_HEAD(&newcg_list);
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- /* nothing to do if this task is already in the cgroup */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
- continue;
- /* get old css_set pointer */
- task_lock(tsk);
- if (tsk->flags & PF_EXITING) {
- /* ignore this task if it's going away */
- task_unlock(tsk);
- continue;
- }
- oldcg = tsk->cgroups;
- get_css_set(oldcg);
- task_unlock(tsk);
- /* see if the new one for us is already in the list? */
- if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
- /* was already there, nothing to do. */
- put_css_set(oldcg);
- } else {
- /* we don't already have it. get new one. */
+ tc = flex_array_get(group, i);
+ oldcg = tc->task->cgroups;
+
+ /* if we don't already have it in the list get a new one */
+ if (!css_set_check_fetched(cgrp, tc->task, oldcg,
+ &newcg_list)) {
retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
- put_css_set(oldcg);
if (retval)
goto out_list_teardown;
}
}
/*
- * step 3: now that we're guaranteed success wrt the css_sets, proceed
- * to move all tasks to the new cgroup, calling ss->attach_task for each
- * one along the way. there are no failure cases after here, so this is
- * the commit point.
+ * step 3: now that we're guaranteed success wrt the css_sets,
+ * proceed to move all tasks to the new cgroup. There are no
+ * failure cases after here, so this is the commit point.
*/
- for_each_subsys(root, ss) {
- if (ss->pre_attach)
- ss->pre_attach(cgrp);
- }
for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- /* leave current thread as it is if it's already there */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
- continue;
- /* if the thread is PF_EXITING, it can just get skipped. */
- retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
- if (retval == 0) {
- /* attach each task to each subsystem */
- for_each_subsys(root, ss) {
- if (ss->attach_task)
- ss->attach_task(cgrp, tsk);
- }
- } else {
- BUG_ON(retval != -ESRCH);
- }
+ tc = flex_array_get(group, i);
+ retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
+ BUG_ON(retval);
}
/* nothing is sensitive to fork() after this point. */
/*
- * step 4: do expensive, non-thread-specific subsystem callbacks.
- * TODO: if ever a subsystem needs to know the oldcgrp for each task
- * being moved, this call will need to be reworked to communicate that.
+ * step 4: do subsystem attach callbacks.
*/
for_each_subsys(root, ss) {
if (ss->attach)
- ss->attach(ss, cgrp, oldcgrp, leader);
+ ss->attach(ss, cgrp, &tset);
}
/*
@@ -2176,20 +2220,12 @@ out_cancel_attach:
/* same deal as in cgroup_attach_task */
if (retval) {
for_each_subsys(root, ss) {
- if (ss == failed_ss) {
- if (cancel_failed_ss && ss->cancel_attach)
- ss->cancel_attach(ss, cgrp, leader);
+ if (ss == failed_ss)
break;
- }
if (ss->cancel_attach)
- ss->cancel_attach(ss, cgrp, leader);
+ ss->cancel_attach(ss, cgrp, &tset);
}
}
- /* clean up the array of referenced threads in the group. */
- for (i = 0; i < group_size; i++) {
- tsk = flex_array_get_ptr(group, i);
- put_task_struct(tsk);
- }
out_free_group_list:
flex_array_free(group);
return retval;
@@ -2197,8 +2233,8 @@ out_free_group_list:
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
- * function to attach either it or all tasks in its threadgroup. Will take
- * cgroup_mutex; may take task_lock of task.
+ * function to attach either it or all tasks in its threadgroup. Will lock
+ * cgroup_mutex and threadgroup; may take task_lock of task.
*/
static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
{
@@ -2225,13 +2261,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
* detect it later.
*/
tsk = tsk->group_leader;
- } else if (tsk->flags & PF_EXITING) {
- /* optimization for the single-task-only case */
- rcu_read_unlock();
- cgroup_unlock();
- return -ESRCH;
}
-
/*
* even if we're attaching all tasks in the thread group, we
* only need to check permissions on one of them.
@@ -2254,13 +2284,15 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
get_task_struct(tsk);
}
- if (threadgroup) {
- threadgroup_fork_write_lock(tsk);
+ threadgroup_lock(tsk);
+
+ if (threadgroup)
ret = cgroup_attach_proc(cgrp, tsk);
- threadgroup_fork_write_unlock(tsk);
- } else {
+ else
ret = cgroup_attach_task(cgrp, tsk);
- }
+
+ threadgroup_unlock(tsk);
+
put_task_struct(tsk);
cgroup_unlock();
return ret;
@@ -2311,7 +2343,9 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
return -EINVAL;
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
+ mutex_lock(&cgroup_root_mutex);
strcpy(cgrp->root->release_agent_path, buffer);
+ mutex_unlock(&cgroup_root_mutex);
cgroup_unlock();
return 0;
}
@@ -2590,7 +2624,7 @@ static inline struct cftype *__file_cft(struct file *file)
return __d_cft(file->f_dentry);
}
-static int cgroup_create_file(struct dentry *dentry, mode_t mode,
+static int cgroup_create_file(struct dentry *dentry, umode_t mode,
struct super_block *sb)
{
struct inode *inode;
@@ -2631,7 +2665,7 @@ static int cgroup_create_file(struct dentry *dentry, mode_t mode,
* @mode: mode to set on new directory.
*/
static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
- mode_t mode)
+ umode_t mode)
{
struct dentry *parent;
int error = 0;
@@ -2658,9 +2692,9 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
* returns S_IRUGO if it has only a read handler
* returns S_IWUSR if it has only a write hander
*/
-static mode_t cgroup_file_mode(const struct cftype *cft)
+static umode_t cgroup_file_mode(const struct cftype *cft)
{
- mode_t mode = 0;
+ umode_t mode = 0;
if (cft->mode)
return cft->mode;
@@ -2683,7 +2717,7 @@ int cgroup_add_file(struct cgroup *cgrp,
struct dentry *dir = cgrp->dentry;
struct dentry *dentry;
int error;
- mode_t mode;
+ umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -2794,6 +2828,7 @@ static void cgroup_enable_task_cg_lists(void)
}
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+ __acquires(css_set_lock)
{
/*
* The first time anyone tries to iterate across a cgroup,
@@ -2833,6 +2868,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
}
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+ __releases(css_set_lock)
{
read_unlock(&css_set_lock);
}
@@ -3757,7 +3793,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
* Must be called with the mutex on the parent inode held
*/
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
- mode_t mode)
+ umode_t mode)
{
struct cgroup *cgrp;
struct cgroupfs_root *root = parent->root;
@@ -3851,7 +3887,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
return err;
}
-static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct cgroup *c_parent = dentry->d_parent->d_fsdata;
@@ -4496,20 +4532,31 @@ static const struct file_operations proc_cgroupstats_operations = {
*
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
- * it was not made under the protection of RCU or cgroup_mutex, so
- * might no longer be a valid cgroup pointer. cgroup_attach_task() might
- * have already changed current->cgroups, allowing the previously
- * referenced cgroup group to be removed and freed.
+ * it was not made under the protection of RCU, cgroup_mutex or
+ * threadgroup_change_begin(), so it might no longer be a valid
+ * cgroup pointer. cgroup_attach_task() might have already changed
+ * current->cgroups, allowing the previously referenced cgroup
+ * group to be removed and freed.
+ *
+ * Outside the pointer validity we also need to process the css_set
+ * inheritance between threadgoup_change_begin() and
+ * threadgoup_change_end(), this way there is no leak in any process
+ * wide migration performed by cgroup_attach_proc() that could otherwise
+ * miss a thread because it is too early or too late in the fork stage.
*
* At the point that cgroup_fork() is called, 'current' is the parent
* task, and the passed argument 'child' points to the child task.
*/
void cgroup_fork(struct task_struct *child)
{
- task_lock(current);
+ /*
+ * We don't need to task_lock() current because current->cgroups
+ * can't be changed concurrently here. The parent obviously hasn't
+ * exited and called cgroup_exit(), and we are synchronized against
+ * cgroup migration through threadgroup_change_begin().
+ */
child->cgroups = current->cgroups;
get_css_set(child->cgroups);
- task_unlock(current);
INIT_LIST_HEAD(&child->cg_list);
}
@@ -4551,10 +4598,19 @@ void cgroup_post_fork(struct task_struct *child)
{
if (use_task_css_set_links) {
write_lock(&css_set_lock);
- task_lock(child);
- if (list_empty(&child->cg_list))
+ if (list_empty(&child->cg_list)) {
+ /*
+ * It's safe to use child->cgroups without task_lock()
+ * here because we are protected through
+ * threadgroup_change_begin() against concurrent
+ * css_set change in cgroup_task_migrate(). Also
+ * the task can't exit at that point until
+ * wake_up_new_task() is called, so we are protected
+ * against cgroup_exit() setting child->cgroup to
+ * init_css_set.
+ */
list_add(&child->cg_list, &child->cgroups->tasks);
- task_unlock(child);
+ }
write_unlock(&css_set_lock);
}
}
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 5e828a2ca8e..fc0646b78a6 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
+bool cgroup_freezing(struct task_struct *task)
{
- enum freezer_state state = task_freezer(task)->state;
- return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
-}
+ enum freezer_state state;
+ bool ret;
-int cgroup_freezing_or_frozen(struct task_struct *task)
-{
- int result;
- task_lock(task);
- result = __cgroup_freezing_or_frozen(task);
- task_unlock(task);
- return result;
+ rcu_read_lock();
+ state = task_freezer(task)->state;
+ ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
+ rcu_read_unlock();
+
+ return ret;
}
/*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
* freezer_can_attach():
* cgroup_mutex (held by caller of can_attach)
*
- * cgroup_freezing_or_frozen():
- * task->alloc_lock (to get task's cgroup)
- *
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
* freezer->lock
* sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
* write_lock css_set_lock (cgroup iterator start)
* task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
- * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
+ * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
* sighand->siglock
*/
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,18 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
- kfree(cgroup_freezer(cgroup));
+ struct freezer *freezer = cgroup_freezer(cgroup);
+
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ kfree(freezer);
+}
+
+/* task is frozen or will freeze immediately when next it gets woken */
+static bool is_task_frozen_enough(struct task_struct *task)
+{
+ return frozen(task) ||
+ (task_is_stopped_or_traced(task) && freezing(task));
}
/*
@@ -160,13 +166,17 @@ static void freezer_destroy(struct cgroup_subsys *ss,
*/
static int freezer_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup,
- struct task_struct *task)
+ struct cgroup_taskset *tset)
{
struct freezer *freezer;
+ struct task_struct *task;
/*
* Anything frozen can't move or be moved to/from.
*/
+ cgroup_taskset_for_each(task, new_cgroup, tset)
+ if (cgroup_freezing(task))
+ return -EBUSY;
freezer = cgroup_freezer(new_cgroup);
if (freezer->state != CGROUP_THAWED)
@@ -175,17 +185,6 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
return 0;
}
-static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
-{
- rcu_read_lock();
- if (__cgroup_freezing_or_frozen(tsk)) {
- rcu_read_unlock();
- return -EBUSY;
- }
- rcu_read_unlock();
- return 0;
-}
-
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
{
struct freezer *freezer;
@@ -213,7 +212,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
/* Locking avoids race with FREEZING -> THAWED transitions. */
if (freezer->state == CGROUP_FREEZING)
- freeze_task(task, true);
+ freeze_task(task);
spin_unlock_irq(&freezer->lock);
}
@@ -231,7 +230,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (frozen(task))
+ if (freezing(task) && is_task_frozen_enough(task))
nfrozen++;
}
@@ -279,12 +278,11 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
unsigned int num_cant_freeze_now = 0;
- freezer->state = CGROUP_FREEZING;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
- if (!freeze_task(task, true))
+ if (!freeze_task(task))
continue;
- if (frozen(task))
+ if (is_task_frozen_enough(task))
continue;
if (!freezing(task) && !freezer_should_skip(task))
num_cant_freeze_now++;
@@ -300,12 +298,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
- thaw_process(task);
- }
+ while ((task = cgroup_iter_next(cgroup, &it)))
+ __thaw_task(task);
cgroup_iter_end(cgroup, &it);
-
- freezer->state = CGROUP_THAWED;
}
static int freezer_change_state(struct cgroup *cgroup,
@@ -319,20 +314,24 @@ static int freezer_change_state(struct cgroup *cgroup,
spin_lock_irq(&freezer->lock);
update_if_frozen(cgroup, freezer);
- if (goal_state == freezer->state)
- goto out;
switch (goal_state) {
case CGROUP_THAWED:
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ freezer->state = CGROUP_THAWED;
unfreeze_cgroup(cgroup, freezer);
break;
case CGROUP_FROZEN:
+ if (freezer->state == CGROUP_THAWED)
+ atomic_inc(&system_freezing_cnt);
+ freezer->state = CGROUP_FREEZING;
retval = try_to_freeze_cgroup(cgroup, freezer);
break;
default:
BUG();
}
-out:
+
spin_unlock_irq(&freezer->lock);
return retval;
@@ -381,10 +380,5 @@ struct cgroup_subsys freezer_subsys = {
.populate = freezer_populate,
.subsys_id = freezer_subsys_id,
.can_attach = freezer_can_attach,
- .can_attach_task = freezer_can_attach_task,
- .pre_attach = NULL,
- .attach_task = NULL,
- .attach = NULL,
.fork = freezer_fork,
- .exit = NULL,
};
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 563f1360947..2060c6e5702 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock);
for_each_process(p) {
if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
- (!cputime_eq(p->utime, cputime_zero) ||
- !cputime_eq(p->stime, cputime_zero)))
+ (p->utime || p->stime))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
"(state = %ld, flags = %x)\n",
p->comm, task_pid_nr(p), cpu,
@@ -380,6 +379,7 @@ out:
cpu_maps_update_done();
return err;
}
+EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
@@ -470,7 +470,7 @@ out:
cpu_maps_update_done();
}
-static int alloc_frozen_cpus(void)
+static int __init alloc_frozen_cpus(void)
{
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
@@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
}
-int cpu_hotplug_pm_sync_init(void)
+static int __init cpu_hotplug_pm_sync_init(void)
{
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a42..a09ac2b9a66 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
struct cpuset, css);
}
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return false;
+}
+#endif
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
- bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+ bool need_loop;
repeat:
/*
@@ -962,6 +975,14 @@ repeat:
return;
task_lock(tsk);
+ /*
+ * Determine if a loop is necessary if another thread is doing
+ * get_mems_allowed(). If at least one node remains unchanged and
+ * tsk does not have a mempolicy, then an empty nodemask will not be
+ * possible when mems_allowed is larger than a word.
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
@@ -981,11 +1002,9 @@ repeat:
/*
* Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side. No wait is necessary, however, if at least one
- * node remains unchanged.
+ * for the read-side.
*/
- while (masks_disjoint &&
- ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+ while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
task_unlock(tsk);
if (!task_curr(tsk))
yield();
@@ -1370,79 +1389,73 @@ static int fmeter_getrate(struct fmeter *fmp)
return val;
}
-/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
-static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
- struct task_struct *tsk)
-{
- struct cpuset *cs = cgroup_cs(cont);
-
- if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
- return -ENOSPC;
-
- /*
- * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
- * cannot change their cpu affinity and isolating such threads by their
- * set of allowed nodes is unnecessary. Thus, cpusets are not
- * applicable for such threads. This prevents checking for success of
- * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
- * be changed.
- */
- if (tsk->flags & PF_THREAD_BOUND)
- return -EINVAL;
-
- return 0;
-}
-
-static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
-{
- return security_task_setscheduler(task);
-}
-
/*
* Protected by cgroup_lock. The nodemasks must be stored globally because
- * dynamically allocating them is not allowed in pre_attach, and they must
- * persist among pre_attach, attach_task, and attach.
+ * dynamically allocating them is not allowed in can_attach, and they must
+ * persist until attach.
*/
static cpumask_var_t cpus_attach;
static nodemask_t cpuset_attach_nodemask_from;
static nodemask_t cpuset_attach_nodemask_to;
-/* Set-up work for before attaching each task. */
-static void cpuset_pre_attach(struct cgroup *cont)
+/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
+static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
- struct cpuset *cs = cgroup_cs(cont);
+ struct cpuset *cs = cgroup_cs(cgrp);
+ struct task_struct *task;
+ int ret;
+ if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
+ return -ENOSPC;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ /*
+ * Kthreads bound to specific cpus cannot be moved to a new
+ * cpuset; we cannot change their cpu affinity and
+ * isolating such threads by their set of allowed nodes is
+ * unnecessary. Thus, cpusets are not applicable for such
+ * threads. This prevents checking for success of
+ * set_cpus_allowed_ptr() on all attached tasks before
+ * cpus_allowed may be changed.
+ */
+ if (task->flags & PF_THREAD_BOUND)
+ return -EINVAL;
+ if ((ret = security_task_setscheduler(task)))
+ return ret;
+ }
+
+ /* prepare for attach */
if (cs == &top_cpuset)
cpumask_copy(cpus_attach, cpu_possible_mask);
else
guarantee_online_cpus(cs, cpus_attach);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
-}
-
-/* Per-thread attachment work. */
-static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
-{
- int err;
- struct cpuset *cs = cgroup_cs(cont);
- /*
- * can_attach beforehand should guarantee that this doesn't fail.
- * TODO: have a better way to handle failure here
- */
- err = set_cpus_allowed_ptr(tsk, cpus_attach);
- WARN_ON_ONCE(err);
-
- cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
- cpuset_update_task_spread_flag(cs, tsk);
+ return 0;
}
-static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
- struct cgroup *oldcont, struct task_struct *tsk)
+static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
struct mm_struct *mm;
- struct cpuset *cs = cgroup_cs(cont);
- struct cpuset *oldcs = cgroup_cs(oldcont);
+ struct task_struct *task;
+ struct task_struct *leader = cgroup_taskset_first(tset);
+ struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
+ struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *oldcs = cgroup_cs(oldcgrp);
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ /*
+ * can_attach beforehand should guarantee that this doesn't
+ * fail. TODO: have a better way to handle failure here
+ */
+ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+
+ cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+ cpuset_update_task_spread_flag(cs, task);
+ }
/*
* Change mm, possibly for multiple threads in a threadgroup. This is
@@ -1450,7 +1463,7 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
*/
cpuset_attach_nodemask_from = oldcs->mems_allowed;
cpuset_attach_nodemask_to = cs->mems_allowed;
- mm = get_task_mm(tsk);
+ mm = get_task_mm(leader);
if (mm) {
mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
if (is_memory_migrate(cs))
@@ -1906,9 +1919,6 @@ struct cgroup_subsys cpuset_subsys = {
.create = cpuset_create,
.destroy = cpuset_destroy,
.can_attach = cpuset_can_attach,
- .can_attach_task = cpuset_can_attach_task,
- .pre_attach = cpuset_pre_attach,
- .attach_task = cpuset_attach_task,
.attach = cpuset_attach,
.populate = cpuset_populate,
.post_clone = cpuset_post_clone,
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 5532dd37aa8..7d6fb40d218 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -636,7 +636,7 @@ char kdb_task_state_char (const struct task_struct *p)
(p->exit_state & EXIT_ZOMBIE) ? 'Z' :
(p->exit_state & EXIT_DEAD) ? 'E' :
(p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
- if (p->pid == 0) {
+ if (is_idle_task(p)) {
/* Idle task. Is it really idle, apart from the kdb
* interrupt? */
if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) {
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 89e5e8aa4c3..22d901f9caf 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
-obj-y := core.o ring_buffer.o
+obj-y := core.o ring_buffer.o callchain.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
new file mode 100644
index 00000000000..057e24b665c
--- /dev/null
+++ b/kernel/events/callchain.c
@@ -0,0 +1,191 @@
+/*
+ * Performance events callchain code, extracted from core.c:
+ *
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright Š 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct callchain_cpus_entries {
+ struct rcu_head rcu_head;
+ struct perf_callchain_entry *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+static struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+ struct callchain_cpus_entries *entries;
+ int cpu;
+
+ entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+
+ kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+ struct callchain_cpus_entries *entries;
+
+ entries = callchain_cpus_entries;
+ rcu_assign_pointer(callchain_cpus_entries, NULL);
+ call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+ int cpu;
+ int size;
+ struct callchain_cpus_entries *entries;
+
+ /*
+ * We can't use the percpu allocation API for data that can be
+ * accessed from NMI. Use a temporary manual per cpu allocation
+ * until that gets sorted out.
+ */
+ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
+
+ entries = kzalloc(size, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+ for_each_possible_cpu(cpu) {
+ entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!entries->cpu_entries[cpu])
+ goto fail;
+ }
+
+ rcu_assign_pointer(callchain_cpus_entries, entries);
+
+ return 0;
+
+fail:
+ for_each_possible_cpu(cpu)
+ kfree(entries->cpu_entries[cpu]);
+ kfree(entries);
+
+ return -ENOMEM;
+}
+
+int get_callchain_buffers(void)
+{
+ int err = 0;
+ int count;
+
+ mutex_lock(&callchain_mutex);
+
+ count = atomic_inc_return(&nr_callchain_events);
+ if (WARN_ON_ONCE(count < 1)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (count > 1) {
+ /* If the allocation failed, give up */
+ if (!callchain_cpus_entries)
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = alloc_callchain_buffers();
+ if (err)
+ release_callchain_buffers();
+exit:
+ mutex_unlock(&callchain_mutex);
+
+ return err;
+}
+
+void put_callchain_buffers(void)
+{
+ if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+ release_callchain_buffers();
+ mutex_unlock(&callchain_mutex);
+ }
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+ int cpu;
+ struct callchain_cpus_entries *entries;
+
+ *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ if (*rctx == -1)
+ return NULL;
+
+ entries = rcu_dereference(callchain_cpus_entries);
+ if (!entries)
+ return NULL;
+
+ cpu = smp_processor_id();
+
+ return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+ put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ int rctx;
+ struct perf_callchain_entry *entry;
+
+
+ entry = get_callchain_entry(&rctx);
+ if (rctx == -1)
+ return NULL;
+
+ if (!entry)
+ goto exit_put;
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(entry, regs);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_user(entry, regs);
+ }
+
+exit_put:
+ put_callchain_entry(rctx);
+
+ return entry;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f9..a8f4ac001a0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright Š 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright Š 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
@@ -128,7 +128,7 @@ enum event_type_t {
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
-struct jump_label_key perf_sched_events __read_mostly;
+struct jump_label_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static atomic_t nr_mmap_events __read_mostly;
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
@@ -1127,6 +1130,8 @@ event_sched_out(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
@@ -1322,6 +1327,7 @@ retry:
}
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
@@ -1403,6 +1409,8 @@ event_sched_in(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
+ if (event->attr.freq && event->attr.sample_freq)
+ ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
@@ -1659,8 +1667,7 @@ retry:
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
-static void __perf_event_mark_enabled(struct perf_event *event,
- struct perf_event_context *ctx)
+static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
@@ -1698,7 +1705,7 @@ static int __perf_event_enable(void *info)
*/
perf_cgroup_set_timestamp(current, ctx);
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
@@ -1779,7 +1786,7 @@ void perf_event_enable(struct perf_event *event)
retry:
if (!ctx->is_active) {
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
goto out;
}
@@ -1806,6 +1813,7 @@ retry:
out:
raw_spin_unlock_irq(&ctx->lock);
}
+EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
@@ -2171,9 +2179,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ if (ctx->nr_events)
+ cpuctx->task_ctx = ctx;
- cpuctx->task_ctx = ctx;
+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
@@ -2323,6 +2332,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
u64 interrupts, now;
s64 delta;
+ if (!ctx->nr_freq)
+ return;
+
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2378,12 +2390,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
struct perf_event_context *ctx = NULL;
- int rotate = 0, remove = 1;
+ int rotate = 0, remove = 1, freq = 0;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
+ if (cpuctx->ctx.nr_freq)
+ freq = 1;
}
ctx = cpuctx->task_ctx;
@@ -2391,33 +2405,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
+ if (ctx->nr_freq)
+ freq = 1;
}
+ if (!rotate && !freq)
+ goto done;
+
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- perf_ctx_adjust_freq(&cpuctx->ctx, interval);
- if (ctx)
- perf_ctx_adjust_freq(ctx, interval);
- if (!rotate)
- goto done;
+ if (freq) {
+ perf_ctx_adjust_freq(&cpuctx->ctx, interval);
+ if (ctx)
+ perf_ctx_adjust_freq(ctx, interval);
+ }
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ if (rotate) {
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ if (ctx)
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
- rotate_ctx(&cpuctx->ctx);
- if (ctx)
- rotate_ctx(ctx);
+ rotate_ctx(&cpuctx->ctx);
+ if (ctx)
+ rotate_ctx(ctx);
+
+ perf_event_sched_in(cpuctx, ctx, current);
+ }
- perf_event_sched_in(cpuctx, ctx, current);
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
-
- perf_pmu_enable(cpuctx->ctx.pmu);
- perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
void perf_event_task_tick(void)
@@ -2444,7 +2465,7 @@ static int event_enable_on_exec(struct perf_event *event,
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
- __perf_event_mark_enabled(event, ctx);
+ __perf_event_mark_enabled(event);
return 1;
}
@@ -2476,13 +2497,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
- list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
- ret = event_enable_on_exec(event, ctx);
- if (ret)
- enabled = 1;
- }
-
- list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
+ list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
@@ -2570,215 +2585,6 @@ static u64 perf_event_read(struct perf_event *event)
}
/*
- * Callchain support
- */
-
-struct callchain_cpus_entries {
- struct rcu_head rcu_head;
- struct perf_callchain_entry *cpu_entries[0];
-};
-
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
-static atomic_t nr_callchain_events;
-static DEFINE_MUTEX(callchain_mutex);
-struct callchain_cpus_entries *callchain_cpus_entries;
-
-
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
-}
-
-static void release_callchain_buffers_rcu(struct rcu_head *head)
-{
- struct callchain_cpus_entries *entries;
- int cpu;
-
- entries = container_of(head, struct callchain_cpus_entries, rcu_head);
-
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
-
- kfree(entries);
-}
-
-static void release_callchain_buffers(void)
-{
- struct callchain_cpus_entries *entries;
-
- entries = callchain_cpus_entries;
- rcu_assign_pointer(callchain_cpus_entries, NULL);
- call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
-}
-
-static int alloc_callchain_buffers(void)
-{
- int cpu;
- int size;
- struct callchain_cpus_entries *entries;
-
- /*
- * We can't use the percpu allocation API for data that can be
- * accessed from NMI. Use a temporary manual per cpu allocation
- * until that gets sorted out.
- */
- size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
-
- entries = kzalloc(size, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
-
- for_each_possible_cpu(cpu) {
- entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
- cpu_to_node(cpu));
- if (!entries->cpu_entries[cpu])
- goto fail;
- }
-
- rcu_assign_pointer(callchain_cpus_entries, entries);
-
- return 0;
-
-fail:
- for_each_possible_cpu(cpu)
- kfree(entries->cpu_entries[cpu]);
- kfree(entries);
-
- return -ENOMEM;
-}
-
-static int get_callchain_buffers(void)
-{
- int err = 0;
- int count;
-
- mutex_lock(&callchain_mutex);
-
- count = atomic_inc_return(&nr_callchain_events);
- if (WARN_ON_ONCE(count < 1)) {
- err = -EINVAL;
- goto exit;
- }
-
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- goto exit;
- }
-
- err = alloc_callchain_buffers();
- if (err)
- release_callchain_buffers();
-exit:
- mutex_unlock(&callchain_mutex);
-
- return err;
-}
-
-static void put_callchain_buffers(void)
-{
- if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
- release_callchain_buffers();
- mutex_unlock(&callchain_mutex);
- }
-}
-
-static int get_recursion_context(int *recursion)
-{
- int rctx;
-
- if (in_nmi())
- rctx = 3;
- else if (in_irq())
- rctx = 2;
- else if (in_softirq())
- rctx = 1;
- else
- rctx = 0;
-
- if (recursion[rctx])
- return -1;
-
- recursion[rctx]++;
- barrier();
-
- return rctx;
-}
-
-static inline void put_recursion_context(int *recursion, int rctx)
-{
- barrier();
- recursion[rctx]--;
-}
-
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
-{
- int cpu;
- struct callchain_cpus_entries *entries;
-
- *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
- if (*rctx == -1)
- return NULL;
-
- entries = rcu_dereference(callchain_cpus_entries);
- if (!entries)
- return NULL;
-
- cpu = smp_processor_id();
-
- return &entries->cpu_entries[cpu][*rctx];
-}
-
-static void
-put_callchain_entry(int rctx)
-{
- put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
-}
-
-static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
- int rctx;
- struct perf_callchain_entry *entry;
-
-
- entry = get_callchain_entry(&rctx);
- if (rctx == -1)
- return NULL;
-
- if (!entry)
- goto exit_put;
-
- entry->nr = 0;
-
- if (!user_mode(regs)) {
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(entry, regs);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_user(entry, regs);
- }
-
-exit_put:
- put_callchain_entry(rctx);
-
- return entry;
-}
-
-/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
@@ -2942,7 +2748,7 @@ static void free_event(struct perf_event *event)
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
@@ -2953,7 +2759,7 @@ static void free_event(struct perf_event *event)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_dec(&perf_sched_events);
+ jump_label_dec_deferred(&perf_sched_events);
}
}
@@ -3190,12 +2996,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
+ /*
+ * Race between perf_event_set_output() and perf_poll(): perf_poll()
+ * grabs the rb reference but perf_event_set_output() overrides it.
+ * Here is the timeline for two threads T1, T2:
+ * t0: T1, rb = rcu_dereference(event->rb)
+ * t1: T2, old_rb = event->rb
+ * t2: T2, event->rb = new rb
+ * t3: T2, ring_buffer_detach(old_rb)
+ * t4: T1, ring_buffer_attach(rb1)
+ * t5: T1, poll_wait(event->waitq)
+ *
+ * To avoid this problem, we grab mmap_mutex in perf_poll()
+ * thereby ensuring that the assignment of the new ring buffer
+ * and the detachment of the old buffer appear atomic to perf_poll()
+ */
+ mutex_lock(&event->mmap_mutex);
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (rb)
+ if (rb) {
+ ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
+ }
rcu_read_unlock();
+ mutex_unlock(&event->mmap_mutex);
+
poll_wait(file, &event->waitq, wait);
return events;
@@ -3496,6 +3323,53 @@ unlock:
return ret;
}
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (!list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ if (!list_empty(&event->rb_entry))
+ goto unlock;
+
+ list_add(&event->rb_entry, &rb->event_list);
+unlock:
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+ wake_up_all(&event->waitq);
+
+unlock:
+ rcu_read_unlock();
+}
+
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
@@ -3521,9 +3395,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
static void ring_buffer_put(struct ring_buffer *rb)
{
+ struct perf_event *event, *n;
+ unsigned long flags;
+
if (!atomic_dec_and_test(&rb->refcount))
return;
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ }
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+
call_rcu(&rb->rcu_head, rb_free_rcu);
}
@@ -3546,6 +3430,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
@@ -3700,7 +3585,7 @@ static const struct file_operations perf_fops = {
void perf_event_wakeup(struct perf_event *event)
{
- wake_up_all(&event->waitq);
+ ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -4737,7 +4622,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
- data->period = event->hw.last_period;
if (!overflow)
overflow = perf_swevent_set_period(event);
@@ -4771,6 +4655,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
if (!is_sampling_event(event))
return;
+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+ data->period = nr;
+ return perf_swevent_overflow(event, 1, data, regs);
+ } else
+ data->period = event->hw.last_period;
+
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
@@ -5283,7 +5173,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
- if (!(event->attr.exclude_idle && current->pid == 0))
+ if (!(event->attr.exclude_idle && is_idle_task(current)))
if (perf_event_overflow(event, &data, regs))
ret = HRTIMER_NORESTART;
}
@@ -5822,6 +5712,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
+ INIT_LIST_HEAD(&event->rb_entry);
+
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
@@ -5896,7 +5788,7 @@ done:
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
@@ -6028,6 +5920,8 @@ set:
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
@@ -6132,7 +6026,7 @@ SYSCALL_DEFINE5(perf_event_open,
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
- jump_label_inc(&perf_sched_events);
+ jump_label_inc(&perf_sched_events.key);
}
/*
@@ -6978,6 +6872,9 @@ void __init perf_event_init(void)
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
+
+ /* do not patch jump label more than once per second */
+ jump_label_rate_limit(&perf_sched_events, HZ);
}
static int __init perf_event_sysfs_init(void)
@@ -7044,10 +6941,13 @@ static int __perf_cgroup_move(void *info)
return 0;
}
-static void
-perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
+static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
- task_function_call(task, __perf_cgroup_move, task);
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset)
+ task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
@@ -7061,7 +6961,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
if (!(task->flags & PF_EXITING))
return;
- perf_cgroup_attach_task(cgrp, task);
+ task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_subsys = {
@@ -7070,6 +6970,6 @@ struct cgroup_subsys perf_subsys = {
.create = perf_cgroup_create,
.destroy = perf_cgroup_destroy,
.exit = perf_cgroup_exit,
- .attach_task = perf_cgroup_attach_task,
+ .attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09097dd8116..b0b107f90af 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -1,6 +1,10 @@
#ifndef _KERNEL_EVENTS_INTERNAL_H
#define _KERNEL_EVENTS_INTERNAL_H
+#include <linux/hardirq.h>
+
+/* Buffer handling */
+
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
@@ -22,6 +26,9 @@ struct ring_buffer {
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
+ /* poll crap */
+ spinlock_t event_lock;
+ struct list_head event_list;
struct perf_event_mmap_page *user_page;
void *data_pages[0];
@@ -64,7 +71,7 @@ static inline int page_order(struct ring_buffer *rb)
}
#endif
-static unsigned long perf_data_size(struct ring_buffer *rb)
+static inline unsigned long perf_data_size(struct ring_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}
@@ -93,4 +100,37 @@ __output_copy(struct perf_output_handle *handle,
} while (len);
}
+/* Callchain handling */
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int get_callchain_buffers(void);
+extern void put_callchain_buffers(void);
+
+static inline int get_recursion_context(int *recursion)
+{
+ int rctx;
+
+ if (in_nmi())
+ rctx = 3;
+ else if (in_irq())
+ rctx = 2;
+ else if (in_softirq())
+ rctx = 1;
+ else
+ rctx = 0;
+
+ if (recursion[rctx])
+ return -1;
+
+ recursion[rctx]++;
+ barrier();
+
+ return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+ barrier();
+ recursion[rctx]--;
+}
+
#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0..6ddaba43fb7 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright Š 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright Š 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->writable = 1;
atomic_set(&rb->refcount, 1);
+
+ INIT_LIST_HEAD(&rb->event_list);
+ spin_lock_init(&rb->event_lock);
}
#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/exit.c b/kernel/exit.c
index d0b7d988f87..94ed6e20bb5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -51,6 +51,7 @@
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
+#include <linux/writeback.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -121,9 +122,9 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+ sig->utime += tsk->utime;
+ sig->stime += tsk->stime;
+ sig->gtime += tsk->gtime;
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
@@ -679,8 +680,6 @@ static void exit_mm(struct task_struct * tsk)
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
- /* We don't want this task to be frozen prematurely */
- clear_freeze_flag(tsk);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
@@ -1037,9 +1036,12 @@ NORET_TYPE void do_exit(long code)
validate_creds_for_do_exit(tsk);
preempt_disable();
+ if (tsk->nr_dirtied)
+ __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
+ tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */
@@ -1255,19 +1257,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
- psig->cutime =
- cputime_add(psig->cutime,
- cputime_add(tgutime,
- sig->cutime));
- psig->cstime =
- cputime_add(psig->cstime,
- cputime_add(tgstime,
- sig->cstime));
- psig->cgtime =
- cputime_add(psig->cgtime,
- cputime_add(p->gtime,
- cputime_add(sig->gtime,
- sig->cgtime)));
+ psig->cutime += tgutime + sig->cutime;
+ psig->cstime += tgstime + sig->cstime;
+ psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
@@ -1540,8 +1532,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* dead body doesn't have much to contribute */
- if (p->exit_state == EXIT_DEAD)
+ if (unlikely(p->exit_state == EXIT_DEAD)) {
+ /*
+ * But do not ignore this task until the tracer does
+ * wait_task_zombie()->do_notify_parent().
+ */
+ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+ wo->notask_error = 0;
return 0;
+ }
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
diff --git a/kernel/fork.c b/kernel/fork.c
index da4a6a10d08..443f5125f11 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -76,6 +76,9 @@
#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/task.h>
+
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
@@ -972,7 +975,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS
- init_rwsem(&sig->threadgroup_fork_lock);
+ init_rwsem(&sig->group_rwsem);
#endif
sig->oom_adj = current->signal->oom_adj;
@@ -992,7 +995,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
- clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
@@ -1023,8 +1025,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
- tsk->cputime_expires.prof_exp = cputime_zero;
- tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.prof_exp = 0;
+ tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
@@ -1132,14 +1134,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
init_sigpending(&p->pending);
- p->utime = cputime_zero;
- p->stime = cputime_zero;
- p->gtime = cputime_zero;
- p->utimescaled = cputime_zero;
- p->stimescaled = cputime_zero;
+ p->utime = p->stime = p->gtime = 0;
+ p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
- p->prev_utime = cputime_zero;
- p->prev_stime = cputime_zero;
+ p->prev_utime = p->prev_stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
@@ -1158,7 +1156,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->io_context = NULL;
p->audit_context = NULL;
if (clone_flags & CLONE_THREAD)
- threadgroup_fork_read_lock(current);
+ threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@@ -1296,6 +1294,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->nr_dirtied = 0;
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
+ p->dirty_paused_when = 0;
/*
* Ok, make it visible to the rest of the system.
@@ -1373,8 +1372,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
proc_fork_connector(p);
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
- threadgroup_fork_read_unlock(current);
+ threadgroup_change_end(current);
perf_event_fork(p);
+
+ trace_task_newtask(p, clone_flags);
+
return p;
bad_fork_free_pid:
@@ -1408,7 +1410,7 @@ bad_fork_cleanup_policy:
bad_fork_cleanup_cgroup:
#endif
if (clone_flags & CLONE_THREAD)
- threadgroup_fork_read_unlock(current);
+ threadgroup_change_end(current);
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7be56c53439..9815b8d1eed 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -9,101 +9,114 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
+#include <linux/kthread.h>
-/*
- * freezing is complete, mark current process as frozen
+/* total number of freezing conditions in effect */
+atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+EXPORT_SYMBOL(system_freezing_cnt);
+
+/* indicate whether PM freezing is in effect, protected by pm_mutex */
+bool pm_freezing;
+bool pm_nosig_freezing;
+
+/* protects freezing and frozen transitions */
+static DEFINE_SPINLOCK(freezer_lock);
+
+/**
+ * freezing_slow_path - slow path for testing whether a task needs to be frozen
+ * @p: task to be tested
+ *
+ * This function is called by freezing() if system_freezing_cnt isn't zero
+ * and tests whether @p needs to enter and stay in frozen state. Can be
+ * called under any context. The freezers are responsible for ensuring the
+ * target tasks see the updated state.
*/
-static inline void frozen_process(void)
+bool freezing_slow_path(struct task_struct *p)
{
- if (!unlikely(current->flags & PF_NOFREEZE)) {
- current->flags |= PF_FROZEN;
- smp_wmb();
- }
- clear_freeze_flag(current);
+ if (p->flags & PF_NOFREEZE)
+ return false;
+
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+ if (pm_freezing && !(p->flags & PF_KTHREAD))
+ return true;
+
+ return false;
}
+EXPORT_SYMBOL(freezing_slow_path);
/* Refrigerator is place where frozen processes are stored :-). */
-void refrigerator(void)
+bool __refrigerator(bool check_kthr_stop)
{
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
- long save;
+ bool was_frozen = false;
+ long save = current->state;
- task_lock(current);
- if (freezing(current)) {
- frozen_process();
- task_unlock(current);
- } else {
- task_unlock(current);
- return;
- }
- save = current->state;
pr_debug("%s entered refrigerator\n", current->comm);
- spin_lock_irq(&current->sighand->siglock);
- recalc_sigpending(); /* We sent fake signal, clean it up */
- spin_unlock_irq(&current->sighand->siglock);
-
- /* prevent accounting of that task to load */
- current->flags |= PF_FREEZING;
-
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!frozen(current))
+
+ spin_lock_irq(&freezer_lock);
+ current->flags |= PF_FROZEN;
+ if (!freezing(current) ||
+ (check_kthr_stop && kthread_should_stop()))
+ current->flags &= ~PF_FROZEN;
+ spin_unlock_irq(&freezer_lock);
+
+ if (!(current->flags & PF_FROZEN))
break;
+ was_frozen = true;
schedule();
}
- /* Remove the accounting blocker */
- current->flags &= ~PF_FREEZING;
-
pr_debug("%s left refrigerator\n", current->comm);
- __set_current_state(save);
+
+ /*
+ * Restore saved task state before returning. The mb'd version
+ * needs to be used; otherwise, it might silently break
+ * synchronization which depends on ordered task state change.
+ */
+ set_current_state(save);
+
+ return was_frozen;
}
-EXPORT_SYMBOL(refrigerator);
+EXPORT_SYMBOL(__refrigerator);
static void fake_signal_wake_up(struct task_struct *p)
{
unsigned long flags;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, 0);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ if (lock_task_sighand(p, &flags)) {
+ signal_wake_up(p, 0);
+ unlock_task_sighand(p, &flags);
+ }
}
/**
- * freeze_task - send a freeze request to given task
- * @p: task to send the request to
- * @sig_only: if set, the request will only be sent if the task has the
- * PF_FREEZER_NOSIG flag unset
- * Return value: 'false', if @sig_only is set and the task has
- * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
+ *
+ * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
+ * flag and either sending a fake signal to it or waking it up, depending
+ * on whether it has %PF_FREEZER_NOSIG set.
*
- * The freeze request is sent by setting the tasks's TIF_FREEZE flag and
- * either sending a fake signal to it or waking it up, depending on whether
- * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
- * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
- * TIF_FREEZE flag will not be set.
+ * RETURNS:
+ * %false, if @p is not freezing or already frozen; %true, otherwise
*/
-bool freeze_task(struct task_struct *p, bool sig_only)
+bool freeze_task(struct task_struct *p)
{
- /*
- * We first check if the task is freezing and next if it has already
- * been frozen to avoid the race with frozen_process() which first marks
- * the task as frozen and next clears its TIF_FREEZE.
- */
- if (!freezing(p)) {
- smp_rmb();
- if (frozen(p))
- return false;
-
- if (!sig_only || should_send_signal(p))
- set_freeze_flag(p);
- else
- return false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&freezer_lock, flags);
+ if (!freezing(p) || frozen(p)) {
+ spin_unlock_irqrestore(&freezer_lock, flags);
+ return false;
}
- if (should_send_signal(p)) {
+ if (!(p->flags & PF_KTHREAD)) {
fake_signal_wake_up(p);
/*
* fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
* TASK_RUNNING transition can't race with task state
* testing in try_to_freeze_tasks().
*/
- } else if (sig_only) {
- return false;
} else {
wake_up_state(p, TASK_INTERRUPTIBLE);
}
+ spin_unlock_irqrestore(&freezer_lock, flags);
return true;
}
-void cancel_freezing(struct task_struct *p)
+void __thaw_task(struct task_struct *p)
{
unsigned long flags;
- if (freezing(p)) {
- pr_debug(" clean up: %s\n", p->comm);
- clear_freeze_flag(p);
- spin_lock_irqsave(&p->sighand->siglock, flags);
- recalc_sigpending_and_wake(p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- }
-}
-
-static int __thaw_process(struct task_struct *p)
-{
- if (frozen(p)) {
- p->flags &= ~PF_FROZEN;
- return 1;
- }
- clear_freeze_flag(p);
- return 0;
+ /*
+ * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
+ * be visible to @p as waking up implies wmb. Waking up inside
+ * freezer_lock also prevents wakeups from leaking outside
+ * refrigerator.
+ */
+ spin_lock_irqsave(&freezer_lock, flags);
+ if (frozen(p))
+ wake_up_process(p);
+ spin_unlock_irqrestore(&freezer_lock, flags);
}
-/*
- * Wake up a frozen process
+/**
+ * set_freezable - make %current freezable
*
- * task_lock() is needed to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails. Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
+ * Mark %current freezable and enter refrigerator if necessary.
*/
-int thaw_process(struct task_struct *p)
+bool set_freezable(void)
{
- task_lock(p);
- if (__thaw_process(p) == 1) {
- task_unlock(p);
- wake_up_process(p);
- return 1;
- }
- task_unlock(p);
- return 0;
+ might_sleep();
+
+ /*
+ * Modify flags while holding freezer_lock. This ensures the
+ * freezer notices that we aren't frozen yet or the freezing
+ * condition is visible to try_to_freeze() below.
+ */
+ spin_lock_irq(&freezer_lock);
+ current->flags &= ~PF_NOFREEZE;
+ spin_unlock_irq(&freezer_lock);
+
+ return try_to_freeze();
}
-EXPORT_SYMBOL(thaw_process);
+EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/futex.c b/kernel/futex.c
index ea87f4d2f45..1614be20173 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -314,17 +314,29 @@ again:
#endif
lock_page(page_head);
+
+ /*
+ * If page_head->mapping is NULL, then it cannot be a PageAnon
+ * page; but it might be the ZERO_PAGE or in the gate area or
+ * in a special mapping (all cases which we are happy to fail);
+ * or it may have been a good file page when get_user_pages_fast
+ * found it, but truncated or holepunched or subjected to
+ * invalidate_complete_page2 before we got the page lock (also
+ * cases which we are happy to fail). And we hold a reference,
+ * so refcount care in invalidate_complete_page's remove_mapping
+ * prevents drop_caches from setting mapping to NULL beneath us.
+ *
+ * The case we do have to guard against is when memory pressure made
+ * shmem_writepage move it from filecache to swapcache beneath us:
+ * an unlikely race, but we do need to retry for page_head->mapping.
+ */
if (!page_head->mapping) {
+ int shmem_swizzled = PageSwapCache(page_head);
unlock_page(page_head);
put_page(page_head);
- /*
- * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
- * trying to find one. RW mapping would have COW'd (and thus
- * have a mapping) so this page is RO and won't ever change.
- */
- if ((page_head == ZERO_PAGE(address)))
- return -EFAULT;
- goto again;
+ if (shmem_swizzled)
+ goto again;
+ return -EFAULT;
}
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 422e567eecf..ae34bf51682 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
+ struct timerqueue_node *next_timer;
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
- if (&timer->node == timerqueue_getnext(&base->active)) {
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
#ifdef CONFIG_HIGH_RES_TIMERS
/* Reprogram the clock event device. if enabled */
if (reprogram && hrtimer_hres_active()) {
@@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
}
#endif
}
- timerqueue_del(&base->active, &timer->node);
if (!timerqueue_getnext(&base->active))
base->cpu_base->active_bases &= ~(1 << base->index);
out:
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 8b1748d0172..2e48ec0c2e9 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
/*
* Ensure the task is not frozen.
- * Also, when a freshly created task is scheduled once, changes
- * its state to TASK_UNINTERRUPTIBLE without having ever been
- * switched out once, it musn't be checked.
+ * Also, skip vfork and any other user process that freezer should skip.
*/
- if (unlikely(t->flags & PF_FROZEN || !switch_count))
+ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+ return;
+
+ /*
+ * When a freshly created task is scheduled once, changes its state to
+ * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+ * musn't be checked.
+ */
+ if (unlikely(!switch_count))
return;
if (switch_count != t->last_switch_count) {
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 200ce832c58..1f9e26526b6 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -135,6 +135,9 @@ int irq_domain_simple_dt_translate(struct irq_domain *d,
return -EINVAL;
if (intsize < 1)
return -EINVAL;
+ if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
+ (intspec[0] >= d->hwirq_base + d->nr_irq)))
+ return -EINVAL;
*out_hwirq = intspec[0];
*out_type = IRQ_TYPE_NONE;
@@ -143,11 +146,6 @@ int irq_domain_simple_dt_translate(struct irq_domain *d,
return 0;
}
-struct irq_domain_ops irq_domain_simple_ops = {
- .dt_translate = irq_domain_simple_dt_translate,
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
-
/**
* irq_domain_create_simple() - Set up a 'simple' translation range
*/
@@ -182,3 +180,10 @@ void irq_domain_generate_simple(const struct of_device_id *match,
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
#endif /* CONFIG_OF_IRQ */
+
+struct irq_domain_ops irq_domain_simple_ops = {
+#ifdef CONFIG_OF_IRQ
+ .dt_translate = irq_domain_simple_dt_translate,
+#endif /* CONFIG_OF_IRQ */
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 67ce837ae52..a9a9dbe49fe 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
static int irq_wait_for_interrupt(struct irqaction *action)
{
+ set_current_state(TASK_INTERRUPTIBLE);
+
while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return 0;
}
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
return -1;
}
@@ -1289,7 +1292,7 @@ EXPORT_SYMBOL(free_irq);
* and to set up the interrupt handler in the right order.
*
* If you want to set up a threaded irq handler for your device
- * then you need to supply @handler and @thread_fn. @handler ist
+ * then you need to supply @handler and @thread_fn. @handler is
* still called in hard interrupt context and has to check
* whether the interrupt originates from the device. If yes it
* needs to disable the interrupt on the device and return
@@ -1596,7 +1599,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
return -ENOMEM;
action->handler = handler;
- action->flags = IRQF_PERCPU;
+ action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
action->name = devname;
action->percpu_dev_id = dev_id;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b5f4742693c..dc813a948be 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
*/
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
- (action->flags & __IRQF_TIMER) || !action->next)
+ (action->flags & __IRQF_TIMER) ||
+ (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+ !action->next)
goto out;
/* Already running on another processor */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index d802883153d..22000c3db0d 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires;
cinterval = it->incr;
- if (!cputime_eq(cval, cputime_zero)) {
+ if (cval) {
struct task_cputime cputime;
cputime_t t;
thread_group_cputimer(tsk, &cputime);
if (clock_id == CPUCLOCK_PROF)
- t = cputime_add(cputime.utime, cputime.stime);
+ t = cputime.utime + cputime.stime;
else
/* CPUCLOCK_VIRT */
t = cputime.utime;
- if (cputime_le(cval, t))
+ if (cval < t)
/* about to fire */
cval = cputime_one_jiffy;
else
- cval = cputime_sub(cval, t);
+ cval = cval - t;
}
spin_unlock_irq(&tsk->sighand->siglock);
@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires;
cinterval = it->incr;
- if (!cputime_eq(cval, cputime_zero) ||
- !cputime_eq(nval, cputime_zero)) {
- if (cputime_gt(nval, cputime_zero))
- nval = cputime_add(nval, cputime_one_jiffy);
+ if (cval || nval) {
+ if (nval > 0)
+ nval += cputime_one_jiffy;
set_process_cpu_timer(tsk, clock_id, &nval, &cval);
}
it->expires = nval;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index bbdfe2a462a..01d3b70fc98 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -66,19 +66,53 @@ void jump_label_inc(struct jump_label_key *key)
return;
jump_label_lock();
- if (atomic_add_return(1, &key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0)
jump_label_update(key, JUMP_LABEL_ENABLE);
+ atomic_inc(&key->enabled);
jump_label_unlock();
}
+EXPORT_SYMBOL_GPL(jump_label_inc);
-void jump_label_dec(struct jump_label_key *key)
+static void __jump_label_dec(struct jump_label_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
{
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
return;
- jump_label_update(key, JUMP_LABEL_DISABLE);
+ if (rate_limit) {
+ atomic_inc(&key->enabled);
+ schedule_delayed_work(work, rate_limit);
+ } else
+ jump_label_update(key, JUMP_LABEL_DISABLE);
+
jump_label_unlock();
}
+EXPORT_SYMBOL_GPL(jump_label_dec);
+
+static void jump_label_update_timeout(struct work_struct *work)
+{
+ struct jump_label_key_deferred *key =
+ container_of(work, struct jump_label_key_deferred, work.work);
+ __jump_label_dec(&key->key, 0, NULL);
+}
+
+void jump_label_dec(struct jump_label_key *key)
+{
+ __jump_label_dec(key, 0, NULL);
+}
+
+void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ __jump_label_dec(&key->key, key->timeout, &key->work);
+}
+
+
+void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+ key->timeout = rl;
+ INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
+}
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
@@ -110,7 +144,7 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
* running code can override this to make the non-live update case
* cheaper.
*/
-void __weak arch_jump_label_transform_static(struct jump_entry *entry,
+void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
arch_jump_label_transform(entry, type);
@@ -216,8 +250,13 @@ void jump_label_apply_nops(struct module *mod)
if (iter_start == iter_stop)
return;
- for (iter = iter_start; iter < iter_stop; iter++)
- arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ struct jump_label_key *iterk;
+
+ iterk = (struct jump_label_key *)(unsigned long)iter->key;
+ arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
+ JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+ }
}
static int jump_label_add_module(struct module *mod)
@@ -257,8 +296,7 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;
if (jump_label_enabled(key))
- __jump_label_update(key, iter, iter_stop,
- JUMP_LABEL_ENABLE);
+ __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
}
return 0;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc082928..090ee10d960 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1523,7 +1523,7 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1576,7 +1576,7 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
#endif
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a4bea97c75b..a0a88543934 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -36,6 +36,7 @@
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
+#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <trace/events/module.h>
@@ -50,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
+static DECLARE_RWSEM(umhelper_sem);
#ifdef CONFIG_MODULES
@@ -275,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
* (used for preventing user land processes from being created after the user
* land has been frozen during a system-wide hibernation or suspend operation).
+ * Should always be manipulated under umhelper_sem acquired for write.
*/
static int usermodehelper_disabled = 1;
@@ -282,17 +285,29 @@ static int usermodehelper_disabled = 1;
static atomic_t running_helpers = ATOMIC_INIT(0);
/*
- * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * Wait queue head used by usermodehelper_disable() to wait for all running
* helpers to finish.
*/
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
/*
* Time to wait for running_helpers to become zero before the setting of
- * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ * usermodehelper_disabled in usermodehelper_disable() fails
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
+void read_lock_usermodehelper(void)
+{
+ down_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
+
+void read_unlock_usermodehelper(void)
+{
+ up_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
+
/**
* usermodehelper_disable - prevent new helpers from being started
*/
@@ -300,8 +315,10 @@ int usermodehelper_disable(void)
{
long retval;
+ down_write(&umhelper_sem);
usermodehelper_disabled = 1;
- smp_mb();
+ up_write(&umhelper_sem);
+
/*
* From now on call_usermodehelper_exec() won't start any new
* helpers, so it is sufficient if running_helpers turns out to
@@ -314,7 +331,9 @@ int usermodehelper_disable(void)
if (retval)
return 0;
+ down_write(&umhelper_sem);
usermodehelper_disabled = 0;
+ up_write(&umhelper_sem);
return -EAGAIN;
}
@@ -323,7 +342,9 @@ int usermodehelper_disable(void)
*/
void usermodehelper_enable(void)
{
+ down_write(&umhelper_sem);
usermodehelper_disabled = 0;
+ up_write(&umhelper_sem);
}
/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b6d216a9263..3d3de633702 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -59,6 +59,31 @@ int kthread_should_stop(void)
EXPORT_SYMBOL(kthread_should_stop);
/**
+ * kthread_freezable_should_stop - should this freezable kthread return now?
+ * @was_frozen: optional out parameter, indicates whether %current was frozen
+ *
+ * kthread_should_stop() for freezable kthreads, which will enter
+ * refrigerator if necessary. This function is safe from kthread_stop() /
+ * freezer deadlock and freezable kthreads should use this function instead
+ * of calling try_to_freeze() directly.
+ */
+bool kthread_freezable_should_stop(bool *was_frozen)
+{
+ bool frozen = false;
+
+ might_sleep();
+
+ if (unlikely(freezing(current)))
+ frozen = __refrigerator(true);
+
+ if (was_frozen)
+ *was_frozen = frozen;
+
+ return kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+
+/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_states[N_HIGH_MEMORY]);
- current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
+ current->flags |= PF_NOFREEZE;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070d..8889f7dd7c4 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/kmemcheck.h>
#include <asm/sections.h>
@@ -430,6 +431,7 @@ unsigned int max_lockdep_depth;
* about it later on, in lockdep_info().
*/
static int lockdep_init_error;
+static const char *lock_init_error;
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -498,36 +500,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
usage[i] = '\0';
}
-static int __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
- if (!name)
- name = __get_key_name(class->key, str);
-
- return printk("%s", name);
-}
-
-static void print_lock_name(struct lock_class *class)
-{
- char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
- const char *name;
-
- get_usage_chars(class, usage);
-
- name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
- printk(" (%s", name);
+ printk("%s", name);
} else {
- printk(" (%s", name);
+ printk("%s", name);
if (class->name_version > 1)
printk("#%d", class->name_version);
if (class->subclass)
printk("/%d", class->subclass);
}
+}
+
+static void print_lock_name(struct lock_class *class)
+{
+ char usage[LOCK_USAGE_CHARS];
+
+ get_usage_chars(class, usage);
+
+ printk(" (");
+ __print_lock_name(class);
printk("){%s}", usage);
}
@@ -567,11 +565,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_kernel_version(void)
+static void print_kernel_ident(void)
{
- printk("%s %.*s\n", init_utsname()->release,
+ printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
+ init_utsname()->version,
+ print_tainted());
}
static int very_verbose(struct lock_class *class)
@@ -655,6 +654,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
if (unlikely(!lockdep_initialized)) {
lockdep_init();
lockdep_init_error = 1;
+ lock_init_error = lock->name;
save_stack_trace(&lockdep_init_trace);
}
#endif
@@ -722,7 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class = look_up_lock_class(lock, subclass);
if (likely(class))
- return class;
+ goto out_set_class_cache;
/*
* Debug-check: all keys must be persistent!
@@ -807,6 +807,7 @@ out_unlock_set:
graph_unlock();
raw_local_irq_restore(flags);
+out_set_class_cache:
if (!subclass || force)
lock->class_cache[0] = class;
else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1148,7 +1149,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
printk("\n");
printk("======================================================\n");
printk("[ INFO: possible circular locking dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -1487,7 +1488,7 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("======================================================\n");
printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
- print_kernel_version();
+ print_kernel_ident();
printk("------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
@@ -1716,7 +1717,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
printk("\n");
printk("=============================================\n");
printk("[ INFO: possible recursive locking detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
@@ -2223,7 +2224,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
printk("\n");
printk("=================================\n");
printk("[ INFO: inconsistent lock state ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2288,7 +2289,7 @@ print_irq_inversion_bug(struct task_struct *curr,
printk("\n");
printk("=========================================================\n");
printk("[ INFO: possible irq lock inversion dependency detected ]\n");
- print_kernel_version();
+ print_kernel_ident();
printk("---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- memset(lock, 0, sizeof(*lock));
+ int i;
+
+ kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
@@ -3169,6 +3175,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=====================================\n");
printk("[ BUG: bad unlock balance detected! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
@@ -3613,6 +3620,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk("\n");
printk("=================================\n");
printk("[ BUG: bad contention detected! ]\n");
+ print_kernel_ident();
printk("---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
@@ -3968,7 +3976,8 @@ void __init lockdep_info(void)
#ifdef CONFIG_DEBUG_LOCKDEP
if (lockdep_init_error) {
- printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+ printk("WARNING: lockdep init error! lock-%s was acquired"
+ "before lockdep_init\n", lock_init_error);
printk("Call stack leading to lockdep invocation was:\n");
print_stack_trace(&lockdep_init_trace, 0);
}
@@ -3987,6 +3996,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
printk("\n");
printk("=========================\n");
printk("[ BUG: held lock freed! ]\n");
+ print_kernel_ident();
printk("-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4044,6 +4054,7 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n");
printk("=====================================\n");
printk("[ BUG: lock held at task exit time! ]\n");
+ print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
@@ -4141,6 +4152,7 @@ void lockdep_sys_exit(void)
printk("\n");
printk("================================================\n");
printk("[ BUG: lock held when returning to user space! ]\n");
+ print_kernel_ident();
printk("------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
@@ -4160,10 +4172,33 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
printk("\n");
printk("===============================\n");
printk("[ INFO: suspicious RCU usage. ]\n");
+ print_kernel_ident();
printk("-------------------------------\n");
printk("%s:%d %s!\n", file, line, s);
printk("\nother info that might help us debug this:\n\n");
printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
+
+ /*
+ * If a CPU is in the RCU-free window in idle (ie: in the section
+ * between rcu_idle_enter() and rcu_idle_exit(), then RCU
+ * considers that CPU to be in an "extended quiescent state",
+ * which means that RCU will be completely ignoring that CPU.
+ * Therefore, rcu_read_lock() and friends have absolutely no
+ * effect on a CPU running in that state. In other words, even if
+ * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
+ * delete data structures out from under it. RCU really has no
+ * choice here: we need to keep an RCU-free window in idle where
+ * the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run
+ * in the idle task.
+ *
+ * So complain bitterly if someone does call rcu_read_lock(),
+ * rcu_read_lock_bh() and so on from extended quiescent states.
+ */
+ if (rcu_is_cpu_idle())
+ printk("RCU used illegally from extended quiescent state!\n");
+
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
diff --git a/kernel/panic.c b/kernel/panic.c
index b2659360421..3458469eb7c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -237,11 +237,20 @@ void add_taint(unsigned flag)
* Can't trust the integrity of the kernel anymore.
* We don't call directly debug_locks_off() because the issue
* is not necessarily serious enough to set oops_in_progress to 1
- * Also we want to keep up lockdep for staging development and
- * post-warning case.
+ * Also we want to keep up lockdep for staging/out-of-tree
+ * development and post-warning case.
*/
- if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
- printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ switch (flag) {
+ case TAINT_CRAP:
+ case TAINT_OOT_MODULE:
+ case TAINT_WARN:
+ case TAINT_FIRMWARE_WORKAROUND:
+ break;
+
+ default:
+ if (__debug_locks_off())
+ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+ }
set_bit(flag, &tainted_mask);
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e7cb76dc18f..125cb67daa2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched;
} else {
- return cputime_lt(now.cpu, then.cpu);
+ return now.cpu < then.cpu;
}
}
static inline void cpu_time_add(const clockid_t which_clock,
@@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
acc->sched += val.sched;
} else {
- acc->cpu = cputime_add(acc->cpu, val.cpu);
+ acc->cpu += val.cpu;
}
}
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
@@ -98,25 +98,12 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
a.sched -= b.sched;
} else {
- a.cpu = cputime_sub(a.cpu, b.cpu);
+ a.cpu -= b.cpu;
}
return a;
}
/*
- * Divide and limit the result to res >= 1
- *
- * This is necessary to prevent signal delivery starvation, when the result of
- * the division would be rounded down to 0.
- */
-static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
-{
- cputime_t res = cputime_div(time, div);
-
- return max_t(cputime_t, res, 1);
-}
-
-/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
@@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer,
} else {
cputime_t delta, incr;
- if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
+ if (now.cpu < timer->it.cpu.expires.cpu)
return;
incr = timer->it.cpu.incr.cpu;
- delta = cputime_sub(cputime_add(now.cpu, incr),
- timer->it.cpu.expires.cpu);
+ delta = now.cpu + incr - timer->it.cpu.expires.cpu;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
- for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
- incr = cputime_add(incr, incr);
- for (; i >= 0; incr = cputime_halve(incr), i--) {
- if (cputime_lt(delta, incr))
+ for (i = 0; incr < delta - incr; i++)
+ incr += incr;
+ for (; i >= 0; incr = incr >> 1, i--) {
+ if (delta < incr)
continue;
- timer->it.cpu.expires.cpu =
- cputime_add(timer->it.cpu.expires.cpu, incr);
+ timer->it.cpu.expires.cpu += incr;
timer->it_overrun += 1 << i;
- delta = cputime_sub(delta, incr);
+ delta -= incr;
}
}
}
static inline cputime_t prof_ticks(struct task_struct *p)
{
- return cputime_add(p->utime, p->stime);
+ return p->utime + p->stime;
}
static inline cputime_t virt_ticks(struct task_struct *p)
{
@@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
t = tsk;
do {
- times->utime = cputime_add(times->utime, t->utime);
- times->stime = cputime_add(times->stime, t->stime);
+ times->utime += t->utime;
+ times->stime += t->stime;
times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
@@ -258,10 +243,10 @@ out:
static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
{
- if (cputime_gt(b->utime, a->utime))
+ if (b->utime > a->utime)
a->utime = b->utime;
- if (cputime_gt(b->stime, a->stime))
+ if (b->stime > a->stime)
a->stime = b->stime;
if (b->sum_exec_runtime > a->sum_exec_runtime)
@@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
return -EINVAL;
case CPUCLOCK_PROF:
thread_group_cputime(p, &cputime);
- cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+ cpu->cpu = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
thread_group_cputime(p, &cputime);
@@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head,
unsigned long long sum_exec_runtime)
{
struct cpu_timer_list *timer, *next;
- cputime_t ptime = cputime_add(utime, stime);
+ cputime_t ptime = utime + stime;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
- if (cputime_lt(timer->expires.cpu, ptime)) {
- timer->expires.cpu = cputime_zero;
+ if (timer->expires.cpu < ptime) {
+ timer->expires.cpu = 0;
} else {
- timer->expires.cpu = cputime_sub(timer->expires.cpu,
- ptime);
+ timer->expires.cpu -= ptime;
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
- if (cputime_lt(timer->expires.cpu, utime)) {
- timer->expires.cpu = cputime_zero;
+ if (timer->expires.cpu < utime) {
+ timer->expires.cpu = 0;
} else {
- timer->expires.cpu = cputime_sub(timer->expires.cpu,
- utime);
+ timer->expires.cpu -= utime;
}
}
@@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
struct signal_struct *const sig = tsk->signal;
cleanup_timers(tsk->signal->cpu_timers,
- cputime_add(tsk->utime, sig->utime),
- cputime_add(tsk->stime, sig->stime),
+ tsk->utime + sig->utime, tsk->stime + sig->stime,
tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
}
@@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
{
- return cputime_eq(expires, cputime_zero) ||
- cputime_gt(expires, new_exp);
+ return expires == 0 || expires > new_exp;
}
/*
@@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
default:
return -EINVAL;
case CPUCLOCK_PROF:
- cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+ cpu->cpu = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
cpu->cpu = cputime.utime;
@@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk,
unsigned long soft;
maxfire = 20;
- tsk->cputime_expires.prof_exp = cputime_zero;
+ tsk->cputime_expires.prof_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
- if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.prof_exp = t->expires.cpu;
break;
}
@@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk,
++timers;
maxfire = 20;
- tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.virt_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
- if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.virt_exp = t->expires.cpu;
break;
}
@@ -1009,20 +990,19 @@ static u32 onecputick;
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
cputime_t *expires, cputime_t cur_time, int signo)
{
- if (cputime_eq(it->expires, cputime_zero))
+ if (!it->expires)
return;
- if (cputime_ge(cur_time, it->expires)) {
- if (!cputime_eq(it->incr, cputime_zero)) {
- it->expires = cputime_add(it->expires, it->incr);
+ if (cur_time >= it->expires) {
+ if (it->incr) {
+ it->expires += it->incr;
it->error += it->incr_error;
if (it->error >= onecputick) {
- it->expires = cputime_sub(it->expires,
- cputime_one_jiffy);
+ it->expires -= cputime_one_jiffy;
it->error -= onecputick;
}
} else {
- it->expires = cputime_zero;
+ it->expires = 0;
}
trace_itimer_expire(signo == SIGPROF ?
@@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
}
- if (!cputime_eq(it->expires, cputime_zero) &&
- (cputime_eq(*expires, cputime_zero) ||
- cputime_lt(it->expires, *expires))) {
+ if (it->expires && (!*expires || it->expires < *expires)) {
*expires = it->expires;
}
}
@@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
*/
static inline int task_cputime_zero(const struct task_cputime *cputime)
{
- if (cputime_eq(cputime->utime, cputime_zero) &&
- cputime_eq(cputime->stime, cputime_zero) &&
- cputime->sum_exec_runtime == 0)
+ if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
return 1;
return 0;
}
@@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk,
*/
thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
- ptime = cputime_add(utime, cputime.stime);
+ ptime = utime + cputime.stime;
sum_sched_runtime = cputime.sum_exec_runtime;
maxfire = 20;
- prof_expires = cputime_zero;
+ prof_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
- if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
+ if (!--maxfire || ptime < tl->expires.cpu) {
prof_expires = tl->expires.cpu;
break;
}
@@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk,
++timers;
maxfire = 20;
- virt_expires = cputime_zero;
+ virt_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
- if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
+ if (!--maxfire || utime < tl->expires.cpu) {
virt_expires = tl->expires.cpu;
break;
}
@@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk,
}
}
x = secs_to_cputime(soft);
- if (cputime_eq(prof_expires, cputime_zero) ||
- cputime_lt(x, prof_expires)) {
+ if (!prof_expires || x < prof_expires) {
prof_expires = x;
}
}
@@ -1249,12 +1224,9 @@ out:
static inline int task_cputime_expired(const struct task_cputime *sample,
const struct task_cputime *expires)
{
- if (!cputime_eq(expires->utime, cputime_zero) &&
- cputime_ge(sample->utime, expires->utime))
+ if (expires->utime && sample->utime >= expires->utime)
return 1;
- if (!cputime_eq(expires->stime, cputime_zero) &&
- cputime_ge(cputime_add(sample->utime, sample->stime),
- expires->stime))
+ if (expires->stime && sample->utime + sample->stime >= expires->stime)
return 1;
if (expires->sum_exec_runtime != 0 &&
sample->sum_exec_runtime >= expires->sum_exec_runtime)
@@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
* it to be relative, *newval argument is relative and we update
* it to be absolute.
*/
- if (!cputime_eq(*oldval, cputime_zero)) {
- if (cputime_le(*oldval, now.cpu)) {
+ if (*oldval) {
+ if (*oldval <= now.cpu) {
/* Just about to fire. */
*oldval = cputime_one_jiffy;
} else {
- *oldval = cputime_sub(*oldval, now.cpu);
+ *oldval -= now.cpu;
}
}
- if (cputime_eq(*newval, cputime_zero))
+ if (!*newval)
return;
- *newval = cputime_add(*newval, now.cpu);
+ *newval += now.cpu;
}
/*
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 196c01268eb..6d6d2887033 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
HIBERNATION_PLATFORM,
- HIBERNATION_TEST,
- HIBERNATION_TESTPROC,
HIBERNATION_SHUTDOWN,
HIBERNATION_REBOOT,
/* keep last */
@@ -55,7 +53,7 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
-static bool freezer_test_done;
+bool freezer_test_done;
static const struct platform_hibernation_ops *hibernation_ops;
@@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
WARN_ON(1);
return;
}
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
hibernation_ops = ops;
if (ops)
hibernation_mode = HIBERNATION_PLATFORM;
else if (hibernation_mode == HIBERNATION_PLATFORM)
hibernation_mode = HIBERNATION_SHUTDOWN;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
mdelay(5000);
}
-static int hibernation_testmode(int mode)
-{
- if (hibernation_mode == mode) {
- hibernation_debug_sleep();
- return 1;
- }
- return 0;
-}
-
static int hibernation_test(int level)
{
if (pm_test_level == level) {
@@ -114,7 +103,6 @@ static int hibernation_test(int level)
return 0;
}
#else /* !CONFIG_PM_DEBUG */
-static int hibernation_testmode(int mode) { return 0; }
static int hibernation_test(int level) { return 0; }
#endif /* !CONFIG_PM_DEBUG */
@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
goto Platform_finish;
error = disable_nonboot_cpus();
- if (error || hibernation_test(TEST_CPUS)
- || hibernation_testmode(HIBERNATION_TEST))
+ if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
local_irq_disable();
@@ -333,7 +320,7 @@ static int create_image(int platform_mode)
*/
int hibernation_snapshot(int platform_mode)
{
- pm_message_t msg = PMSG_RECOVER;
+ pm_message_t msg;
int error;
error = platform_begin(platform_mode);
@@ -347,39 +334,40 @@ int hibernation_snapshot(int platform_mode)
error = freeze_kernel_threads();
if (error)
- goto Close;
+ goto Cleanup;
- if (hibernation_test(TEST_FREEZER) ||
- hibernation_testmode(HIBERNATION_TESTPROC)) {
+ if (hibernation_test(TEST_FREEZER)) {
/*
* Indicate to the caller that we are returning due to a
* successful freezer test.
*/
freezer_test_done = true;
- goto Close;
+ goto Cleanup;
}
error = dpm_prepare(PMSG_FREEZE);
- if (error)
- goto Complete_devices;
+ if (error) {
+ dpm_complete(PMSG_RECOVER);
+ goto Cleanup;
+ }
suspend_console();
pm_restrict_gfp_mask();
+
error = dpm_suspend(PMSG_FREEZE);
- if (error)
- goto Recover_platform;
- if (hibernation_test(TEST_DEVICES))
- goto Recover_platform;
+ if (error || hibernation_test(TEST_DEVICES))
+ platform_recover(platform_mode);
+ else
+ error = create_image(platform_mode);
- error = create_image(platform_mode);
/*
- * Control returns here (1) after the image has been created or the
+ * In the case that we call create_image() above, the control
+ * returns here (1) after the image has been created or the
* image creation has failed and (2) after a successful restore.
*/
- Resume_devices:
/* We may need to release the preallocated image pages here. */
if (error || !in_suspend)
swsusp_free();
@@ -391,17 +379,15 @@ int hibernation_snapshot(int platform_mode)
pm_restore_gfp_mask();
resume_console();
-
- Complete_devices:
dpm_complete(msg);
Close:
platform_end(platform_mode);
return error;
- Recover_platform:
- platform_recover(platform_mode);
- goto Resume_devices;
+ Cleanup:
+ swsusp_free();
+ goto Close;
}
/**
@@ -586,9 +572,6 @@ int hibernation_platform_enter(void)
static void power_down(void)
{
switch (hibernation_mode) {
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
- break;
case HIBERNATION_REBOOT:
kernel_restart(NULL);
break;
@@ -607,17 +590,6 @@ static void power_down(void)
while(1);
}
-static int prepare_processes(void)
-{
- int error = 0;
-
- if (freeze_processes()) {
- error = -EBUSY;
- thaw_processes();
- }
- return error;
-}
-
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
@@ -625,7 +597,7 @@ int hibernate(void)
{
int error;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -650,7 +622,7 @@ int hibernate(void)
sys_sync();
printk("done.\n");
- error = prepare_processes();
+ error = freeze_processes();
if (error)
goto Finish;
@@ -693,7 +665,7 @@ int hibernate(void)
pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error;
}
@@ -807,11 +779,13 @@ static int software_resume(void)
goto close_finish;
error = create_basic_memory_bitmaps();
- if (error)
+ if (error) {
+ usermodehelper_enable();
goto close_finish;
+ }
pr_debug("PM: Preparing processes for restore.\n");
- error = prepare_processes();
+ error = freeze_processes();
if (error) {
swsusp_close(FMODE_READ);
goto Done;
@@ -851,8 +825,6 @@ static const char * const hibernation_modes[] = {
[HIBERNATION_PLATFORM] = "platform",
[HIBERNATION_SHUTDOWN] = "shutdown",
[HIBERNATION_REBOOT] = "reboot",
- [HIBERNATION_TEST] = "test",
- [HIBERNATION_TESTPROC] = "testproc",
};
/*
@@ -861,17 +833,15 @@ static const char * const hibernation_modes[] = {
* Hibernation can be handled in several ways. There are a few different ways
* to put the system into the sleep state: using the platform driver (e.g. ACPI
* or other hibernation_ops), powering it off or rebooting it (for testing
- * mostly), or using one of the two available test modes.
+ * mostly).
*
* The sysfs file /sys/power/disk provides an interface for selecting the
* hibernation mode to use. Reading from this file causes the available modes
- * to be printed. There are 5 modes that can be supported:
+ * to be printed. There are 3 modes that can be supported:
*
* 'platform'
* 'shutdown'
* 'reboot'
- * 'test'
- * 'testproc'
*
* If a platform hibernation driver is in use, 'platform' will be supported
* and will be used by default. Otherwise, 'shutdown' will be used by default.
@@ -895,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
switch (i) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
break;
case HIBERNATION_PLATFORM:
if (hibernation_ops)
@@ -925,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
if (len == strlen(hibernation_modes[i])
&& !strncmp(buf, hibernation_modes[i], len)) {
@@ -937,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
switch (mode) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
hibernation_mode = mode;
break;
case HIBERNATION_PLATFORM:
@@ -953,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
if (!error)
pr_debug("PM: Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error ? error : n;
}
@@ -980,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
if (maj != MAJOR(res) || min != MINOR(res))
goto out;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
swsusp_resume_device = res;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
printk(KERN_INFO "PM: Starting manual resume from disk\n");
noresume = 0;
software_resume();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f0903c3..9824b41e5a1 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
- *
+ *
* This file is released under the GPLv2
*
*/
@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error ? error : n;
}
@@ -240,7 +240,7 @@ struct kobject *power_kobj;
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
* 'disk' (Suspend-to-Disk).
*
- * store() accepts one of those strings, translates it into the
+ * store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
error = hibernate();
- goto Exit;
+ goto Exit;
}
#ifdef CONFIG_SUSPEND
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 23a2db1ec44..0c4defe6d3b 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
/* kernel/power/hibernate.c */
+extern bool freezer_test_done;
+
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index addbbe5531b..77274c9ba2f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,16 +22,7 @@
*/
#define TIMEOUT (20 * HZ)
-static inline int freezable(struct task_struct * p)
-{
- if ((p == current) ||
- (p->flags & PF_NOFREEZE) ||
- (p->exit_state != 0))
- return 0;
- return 1;
-}
-
-static int try_to_freeze_tasks(bool sig_only)
+static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
end_time = jiffies + TIMEOUT;
- if (!sig_only)
+ if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (frozen(p) || !freezable(p))
- continue;
-
- if (!freeze_task(p, sig_only))
+ if (p == current || !freeze_task(p))
continue;
/*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
- if (!sig_only) {
+ if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs = elapsed_csecs64;
if (todo) {
- /* This does not unfreeze processes that are already frozen
- * (we have slightly ugly calling convention in that respect,
- * and caller must call thaw_processes() if something fails),
- * but it cleans up leftover PF_FREEZE requests.
- */
printk("\n");
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
- thaw_workqueues();
-
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- task_lock(p);
- if (!wakeup && freezing(p) && !freezer_should_skip(p))
+ if (!wakeup && !freezer_should_skip(p) &&
+ p != current && freezing(p) && !frozen(p))
sched_show_task(p);
- cancel_freezing(p);
- task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
} else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
+ *
+ * On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
+ if (!pm_freezing)
+ atomic_inc(&system_freezing_cnt);
+
printk("Freezing user space processes ... ");
+ pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
printk("\n");
BUG_ON(in_atomic());
+ if (error)
+ thaw_processes();
return error;
}
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+ *
+ * On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_kernel_threads(void)
{
int error;
printk("Freezing remaining freezable tasks ... ");
+ pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
printk("\n");
BUG_ON(in_atomic());
+ if (error)
+ thaw_processes();
return error;
}
-static void thaw_tasks(bool nosig_only)
+void thaw_processes(void)
{
struct task_struct *g, *p;
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- if (!freezable(p))
- continue;
+ if (pm_freezing)
+ atomic_dec(&system_freezing_cnt);
+ pm_freezing = false;
+ pm_nosig_freezing = false;
- if (nosig_only && should_send_signal(p))
- continue;
+ oom_killer_enable();
+
+ printk("Restarting tasks ... ");
- if (cgroup_freezing_or_frozen(p))
- continue;
+ thaw_workqueues();
- thaw_process(p);
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ __thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
-}
-void thaw_processes(void)
-{
- oom_killer_enable();
-
- printk("Restarting tasks ... ");
- thaw_workqueues();
- thaw_tasks(true);
- thaw_tasks(false);
schedule();
printk("done.\n");
}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index cbe2c144139..1cf88900ec4 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -858,6 +858,9 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
PageReserved(page))
return NULL;
+ if (page_is_guard(page))
+ return NULL;
+
return page;
}
@@ -920,6 +923,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
&& (!kernel_page_present(page) || pfn_is_nosave(pfn)))
return NULL;
+ if (page_is_guard(page))
+ return NULL;
+
return page;
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4953dc054c5..4fd51beed87 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
suspend_ops = ops;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
@@ -106,13 +106,11 @@ static int suspend_prepare(void)
goto Finish;
error = suspend_freeze_processes();
- if (error) {
- suspend_stats.failed_freeze++;
- dpm_save_failed_step(SUSPEND_FREEZE);
- } else
+ if (!error)
return 0;
- suspend_thaw_processes();
+ suspend_stats.failed_freeze++;
+ dpm_save_failed_step(SUSPEND_FREEZE);
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 11a594c4ba2..3739ecced08 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -18,7 +18,6 @@
#include <linux/bitops.h>
#include <linux/genhd.h>
#include <linux/device.h>
-#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6d8f535c2b8..6b1ab7a8852 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -21,6 +21,7 @@
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/fs.h>
+#include <linux/compat.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
@@ -30,28 +31,6 @@
#include "power.h"
-/*
- * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
- * will be removed in the future. They are only preserved here for
- * compatibility with existing userland utilities.
- */
-#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
-#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
-
-#define PMOPS_PREPARE 1
-#define PMOPS_ENTER 2
-#define PMOPS_FINISH 3
-
-/*
- * NOTE: The following ioctl definitions are wrong and have been replaced with
- * correct ones. They are only preserved here for compatibility with existing
- * userland utilities and will be removed in the future.
- */
-#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
-#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
-#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
-#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
-
#define SNAPSHOT_MINOR 231
@@ -71,7 +50,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
struct snapshot_data *data;
int error;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -123,7 +102,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->platform_support = 0;
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error;
}
@@ -132,7 +111,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
swsusp_free();
free_basic_memory_bitmaps();
@@ -146,7 +125,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return 0;
}
@@ -158,7 +137,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
@@ -179,7 +158,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
*offp += res;
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return res;
}
@@ -191,7 +170,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
data = filp->private_data;
@@ -208,20 +187,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res > 0)
*offp += res;
unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return res;
}
-static void snapshot_deprecated_ioctl(unsigned int cmd)
-{
- if (printk_ratelimit())
- printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
- "be removed soon, update your suspend-to-disk "
- "utilities\n",
- __builtin_return_address(0), cmd);
-}
-
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -257,11 +227,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
error = freeze_processes();
- if (error) {
- thaw_processes();
+ if (error)
usermodehelper_enable();
- }
- if (!error)
+ else
data->frozen = 1;
break;
@@ -274,8 +242,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->frozen = 0;
break;
- case SNAPSHOT_ATOMIC_SNAPSHOT:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_CREATE_IMAGE:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
@@ -283,10 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
- if (!error)
+ if (!error) {
error = put_user(in_suspend, (int __user *)arg);
- if (!error)
- data->ready = 1;
+ if (!error && !freezer_test_done)
+ data->ready = 1;
+ if (freezer_test_done) {
+ freezer_test_done = false;
+ thaw_processes();
+ }
+ }
break;
case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +276,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->ready = 0;
break;
- case SNAPSHOT_SET_IMAGE_SIZE:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_PREF_IMAGE_SIZE:
image_size = arg;
break;
@@ -321,16 +290,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = put_user(size, (loff_t __user *)arg);
break;
- case SNAPSHOT_AVAIL_SWAP:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_AVAIL_SWAP_SIZE:
size = count_swap_pages(data->swap, 1);
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
- case SNAPSHOT_GET_SWAP_PAGE:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_ALLOC_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
@@ -353,27 +318,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
free_all_swap_pages(data->swap);
break;
- case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
- snapshot_deprecated_ioctl(cmd);
- if (!swsusp_swap_in_use()) {
- /*
- * User space encodes device types as two-byte values,
- * so we need to recode them
- */
- if (old_decode_dev(arg)) {
- data->swap = swap_type_of(old_decode_dev(arg),
- 0, NULL);
- if (data->swap < 0)
- error = -ENODEV;
- } else {
- data->swap = -1;
- error = -EINVAL;
- }
- } else {
- error = -EPERM;
- }
- break;
-
case SNAPSHOT_S2RAM:
if (!data->frozen) {
error = -EPERM;
@@ -396,33 +340,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = hibernation_platform_enter();
break;
- case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
- snapshot_deprecated_ioctl(cmd);
- error = -EINVAL;
-
- switch (arg) {
-
- case PMOPS_PREPARE:
- data->platform_support = 1;
- error = 0;
- break;
-
- case PMOPS_ENTER:
- if (data->platform_support)
- error = hibernation_platform_enter();
- break;
-
- case PMOPS_FINISH:
- if (data->platform_support)
- error = 0;
- break;
-
- default:
- printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
-
- }
- break;
-
case SNAPSHOT_SET_SWAP_AREA:
if (swsusp_swap_in_use()) {
error = -EPERM;
@@ -464,6 +381,66 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
return error;
}
+#ifdef CONFIG_COMPAT
+
+struct compat_resume_swap_area {
+ compat_loff_t offset;
+ u32 dev;
+} __packed;
+
+static long
+snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+ switch (cmd) {
+ case SNAPSHOT_GET_IMAGE_SIZE:
+ case SNAPSHOT_AVAIL_SWAP_SIZE:
+ case SNAPSHOT_ALLOC_SWAP_PAGE: {
+ compat_loff_t __user *uoffset = compat_ptr(arg);
+ loff_t offset;
+ mm_segment_t old_fs;
+ int err;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
+ set_fs(old_fs);
+ if (!err && put_user(offset, uoffset))
+ err = -EFAULT;
+ return err;
+ }
+
+ case SNAPSHOT_CREATE_IMAGE:
+ return snapshot_ioctl(file, cmd,
+ (unsigned long) compat_ptr(arg));
+
+ case SNAPSHOT_SET_SWAP_AREA: {
+ struct compat_resume_swap_area __user *u_swap_area =
+ compat_ptr(arg);
+ struct resume_swap_area swap_area;
+ mm_segment_t old_fs;
+ int err;
+
+ err = get_user(swap_area.offset, &u_swap_area->offset);
+ err |= get_user(swap_area.dev, &u_swap_area->dev);
+ if (err)
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
+ (unsigned long) &swap_area);
+ set_fs(old_fs);
+ return err;
+ }
+
+ default:
+ return snapshot_ioctl(file, cmd, arg);
+ }
+}
+
+#endif /* CONFIG_COMPAT */
+
static const struct file_operations snapshot_fops = {
.open = snapshot_open,
.release = snapshot_release,
@@ -471,6 +448,9 @@ static const struct file_operations snapshot_fops = {
.write = snapshot_write,
.llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = snapshot_compat_ioctl,
+#endif
};
static struct miscdevice snapshot_device = {
diff --git a/kernel/printk.c b/kernel/printk.c
index 1455a0d4eed..989e4a52da7 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
unsigned long mem;
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
- if (mem == MEMBLOCK_ERROR)
+ if (!mem)
return;
new_log_buf = __va(mem);
} else {
@@ -688,6 +688,7 @@ static void zap_locks(void)
oops_timestamp = jiffies;
+ debug_locks_off();
/* If a crash is occurring, make sure we can't deadlock */
raw_spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
@@ -840,9 +841,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
boot_delay_msec();
printk_delay();
- preempt_disable();
/* This stops the holder of console_sem just where we want him */
- raw_local_irq_save(flags);
+ local_irq_save(flags);
this_cpu = smp_processor_id();
/*
@@ -856,7 +856,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* recursion and return - but flag the recursion so that
* it can be printed at the next appropriate moment:
*/
- if (!oops_in_progress) {
+ if (!oops_in_progress && !lockdep_recursing(current)) {
recursion_bug = 1;
goto out_restore_irqs;
}
@@ -962,9 +962,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
lockdep_on();
out_restore_irqs:
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
- preempt_enable();
return printed_len;
}
EXPORT_SYMBOL(printk);
@@ -1293,10 +1292,11 @@ again:
raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
if (retry && console_trylock())
goto again;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 24d04477b25..78ab24a7b0e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
- child->signal->group_stop_count))
+ child->signal->group_stop_count)) {
child->jobctl |= JOBCTL_STOP_PENDING;
+ /*
+ * This is only possible if this thread was cloned by the
+ * traced task running in the stopped group, set the signal
+ * for the future reports.
+ * FIXME: we should change ptrace_init_task() to handle this
+ * case.
+ */
+ if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+ child->jobctl |= SIGSTOP;
+ }
+
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child
diff --git a/kernel/rcu.h b/kernel/rcu.h
index f600868d550..aa88baab5f7 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -30,6 +30,13 @@
#endif /* #else #ifdef CONFIG_RCU_TRACE */
/*
+ * Process-level increment to ->dynticks_nesting field. This allows for
+ * architectures that use half-interrupts and half-exceptions from
+ * process context.
+ */
+#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
+
+/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the
* RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c5b98e565ae..2bc4e135ff2 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -93,6 +93,8 @@ int rcu_read_lock_bh_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
+ if (rcu_is_cpu_idle())
+ return 0;
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
@@ -316,3 +318,13 @@ struct debug_obj_descr rcuhead_debug_descr = {
};
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
+void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
+{
+ trace_rcu_torture_read(rcutorturename, rhp);
+}
+EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#endif
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 636af6d9c6e..977296dca0a 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,31 +53,137 @@ static void __call_rcu(struct rcu_head *head,
#include "rcutiny_plugin.h"
-#ifdef CONFIG_NO_HZ
+static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
-static long rcu_dynticks_nesting = 1;
+/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
+static void rcu_idle_enter_common(long long oldval)
+{
+ if (rcu_dynticks_nesting) {
+ RCU_TRACE(trace_rcu_dyntick("--=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+ rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+}
/*
- * Enter dynticks-idle mode, which is an extended quiescent state
- * if we have fully entered that mode (i.e., if the new value of
- * dynticks_nesting is zero).
+ * Enter idle, which is an extended quiescent state if we have fully
+ * entered that mode (i.e., if the new value of dynticks_nesting is zero).
*/
-void rcu_enter_nohz(void)
+void rcu_idle_enter(void)
{
- if (--rcu_dynticks_nesting == 0)
- rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting = 0;
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
}
/*
- * Exit dynticks-idle mode, so that we are no longer in an extended
- * quiescent state.
+ * Exit an interrupt handler towards idle.
*/
-void rcu_exit_nohz(void)
+void rcu_irq_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ rcu_dynticks_nesting--;
+ WARN_ON_ONCE(rcu_dynticks_nesting < 0);
+ rcu_idle_enter_common(oldval);
+ local_irq_restore(flags);
+}
+
+/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
+static void rcu_idle_exit_common(long long oldval)
{
+ if (oldval) {
+ RCU_TRACE(trace_rcu_dyntick("++=",
+ oldval, rcu_dynticks_nesting));
+ return;
+ }
+ RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
+ oldval, rcu_dynticks_nesting));
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+}
+
+/*
+ * Exit idle, so that we are no longer in an extended quiescent state.
+ */
+void rcu_idle_exit(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
+ WARN_ON_ONCE(oldval != 0);
+ rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+/*
+ * Enter an interrupt handler, moving away from idle.
+ */
+void rcu_irq_enter(void)
+{
+ unsigned long flags;
+ long long oldval;
+
+ local_irq_save(flags);
+ oldval = rcu_dynticks_nesting;
rcu_dynticks_nesting++;
+ WARN_ON_ONCE(rcu_dynticks_nesting == 0);
+ rcu_idle_exit_common(oldval);
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_PROVE_RCU
+
+/*
+ * Test whether RCU thinks that the current CPU is idle.
+ */
+int rcu_is_cpu_idle(void)
+{
+ return !rcu_dynticks_nesting;
}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
-#endif /* #ifdef CONFIG_NO_HZ */
+/*
+ * Test whether the current CPU was interrupted from idle. Nested
+ * interrupts don't count, we must be running at the first interrupt
+ * level.
+ */
+int rcu_is_cpu_rrupt_from_idle(void)
+{
+ return rcu_dynticks_nesting <= 0;
+}
/*
* Helper function for rcu_sched_qs() and rcu_bh_qs().
@@ -126,14 +232,13 @@ void rcu_bh_qs(int cpu)
/*
* Check to see if the scheduling-clock interrupt came from an extended
- * quiescent state, and, if so, tell RCU about it.
+ * quiescent state, and, if so, tell RCU about it. This function must
+ * be called from hardirq context. It is normally called from the
+ * scheduling-clock interrupt.
*/
void rcu_check_callbacks(int cpu, int user)
{
- if (user ||
- (idle_cpu(cpu) &&
- !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT)))
+ if (user || rcu_is_cpu_rrupt_from_idle())
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
@@ -154,7 +259,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
+ ACCESS_ONCE(rcp->rcucblist),
+ need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
return;
}
@@ -183,7 +292,9 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
- RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
+ RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread()));
}
static void rcu_process_callbacks(struct softirq_action *unused)
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 2b0484a5dc2..9cb1ae4aabd 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -312,8 +312,8 @@ static int rcu_boost(void)
rt_mutex_lock(&mtx);
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
- return rcu_preempt_ctrlblk.boost_tasks != NULL ||
- rcu_preempt_ctrlblk.exp_tasks != NULL;
+ return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
+ ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
}
/*
@@ -885,6 +885,19 @@ static void invoke_rcu_callbacks(void)
wake_up(&rcu_kthread_wq);
}
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return rcu_kthread_task == current;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
/*
* This kthread invokes RCU callbacks whose grace periods have
* elapsed. It is awakened as needed, and takes the place of the
@@ -938,6 +951,18 @@ void invoke_rcu_callbacks(void)
raise_softirq(RCU_SOFTIRQ);
}
+#ifdef CONFIG_RCU_TRACE
+
+/*
+ * There is no callback kthread, so this thread is never it.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return false;
+}
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
void rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 764825c2685..88f17b8a3b1 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -61,9 +61,11 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
static int stutter = 5; /* Start/stop testing interval (in sec) */
static int irqreader = 1; /* RCU readers from irq (timers). */
-static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
-static int fqs_holdoff = 0; /* Hold time within burst (us). */
+static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
+static int fqs_holdoff; /* Hold time within burst (us). */
static int fqs_stutter = 3; /* Wait time between bursts (s). */
+static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
+static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
@@ -91,6 +93,10 @@ module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+module_param(onoff_interval, int, 0444);
+MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+module_param(shutdown_secs, int, 0444);
+MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
module_param(test_boost_interval, int, 0444);
@@ -119,6 +125,10 @@ static struct task_struct *shuffler_task;
static struct task_struct *stutter_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
+static struct task_struct *shutdown_task;
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *onoff_task;
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#define RCU_TORTURE_PIPE_LEN 10
@@ -149,6 +159,10 @@ static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
+static long n_offline_attempts;
+static long n_offline_successes;
+static long n_online_attempts;
+static long n_online_successes;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
@@ -160,6 +174,8 @@ static int stutter_pause_test;
#define RCUTORTURE_RUNNABLE_INIT 0
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+module_param(rcutorture_runnable, int, 0444);
+MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
@@ -167,6 +183,7 @@ int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
#define rcu_can_boost() 0
#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
+static unsigned long shutdown_time; /* jiffies to system shutdown. */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
@@ -182,6 +199,9 @@ static int fullstop = FULLSTOP_RMMOD;
*/
static DEFINE_MUTEX(fullstop_mutex);
+/* Forward reference. */
+static void rcu_torture_cleanup(void);
+
/*
* Detect and respond to a system shutdown.
*/
@@ -612,6 +632,30 @@ static struct rcu_torture_ops srcu_ops = {
.name = "srcu"
};
+static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
+{
+ return srcu_read_lock_raw(&srcu_ctl);
+}
+
+static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
+{
+ srcu_read_unlock_raw(&srcu_ctl, idx);
+}
+
+static struct rcu_torture_ops srcu_raw_ops = {
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock_raw,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock_raw,
+ .completed = srcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = srcu_torture_stats,
+ .name = "srcu_raw"
+};
+
static void srcu_torture_synchronize_expedited(void)
{
synchronize_srcu_expedited(&srcu_ctl);
@@ -913,6 +957,18 @@ rcu_torture_fakewriter(void *arg)
return 0;
}
+void rcutorture_trace_dump(void)
+{
+ static atomic_t beenhere = ATOMIC_INIT(0);
+
+ if (atomic_read(&beenhere))
+ return;
+ if (atomic_xchg(&beenhere, 1) != 0)
+ return;
+ do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
+ ftrace_dump(DUMP_ALL);
+}
+
/*
* RCU torture reader from timer handler. Dereferences rcu_torture_current,
* incrementing the corresponding element of the pipeline array. The
@@ -934,6 +990,7 @@ static void rcu_torture_timer(unsigned long unused)
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
if (p == NULL) {
/* Leave because rcu_torture_writer is not yet underway */
cur_ops->readunlock(idx);
@@ -951,6 +1008,8 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
+ if (pipe_count > 1)
+ rcutorture_trace_dump();
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -994,6 +1053,7 @@ rcu_torture_reader(void *arg)
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
+ do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
cur_ops->readunlock(idx);
@@ -1009,6 +1069,8 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
+ if (pipe_count > 1)
+ rcutorture_trace_dump();
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
@@ -1056,7 +1118,8 @@ rcu_torture_printk(char *page)
cnt += sprintf(&page[cnt],
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d rtbke: %ld rtbre: %ld "
- "rtbf: %ld rtb: %ld nt: %ld",
+ "rtbf: %ld rtb: %ld nt: %ld "
+ "onoff: %ld/%ld:%ld/%ld",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
@@ -1068,7 +1131,11 @@ rcu_torture_printk(char *page)
n_rcu_torture_boost_rterror,
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
- n_rcu_torture_timers);
+ n_rcu_torture_timers,
+ n_online_successes,
+ n_online_attempts,
+ n_offline_successes,
+ n_offline_attempts);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
@@ -1232,12 +1299,14 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
"shuffle_interval=%d stutter=%d irqreader=%d "
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d "
- "test_boost_duration=%d\n",
+ "test_boost_duration=%d shutdown_secs=%d "
+ "onoff_interval=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
- test_boost_interval, test_boost_duration);
+ test_boost_interval, test_boost_duration, shutdown_secs,
+ onoff_interval);
}
static struct notifier_block rcutorture_shutdown_nb = {
@@ -1287,6 +1356,131 @@ static int rcutorture_booster_init(int cpu)
return 0;
}
+/*
+ * Cause the rcutorture test to shutdown the system after the test has
+ * run for the time specified by the shutdown_secs module parameter.
+ */
+static int
+rcu_torture_shutdown(void *arg)
+{
+ long delta;
+ unsigned long jiffies_snap;
+
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
+ jiffies_snap = ACCESS_ONCE(jiffies);
+ while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
+ !kthread_should_stop()) {
+ delta = shutdown_time - jiffies_snap;
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_shutdown task: %lu "
+ "jiffies remaining\n",
+ torture_type, delta);
+ schedule_timeout_interruptible(delta);
+ jiffies_snap = ACCESS_ONCE(jiffies);
+ }
+ if (kthread_should_stop()) {
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
+ return 0;
+ }
+
+ /* OK, shut down the system. */
+
+ VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
+ shutdown_task = NULL; /* Avoid self-kill deadlock. */
+ rcu_torture_cleanup(); /* Get the success/failure message. */
+ kernel_power_off(); /* Shut down the system. */
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Execute random CPU-hotplug operations at the interval specified
+ * by the onoff_interval.
+ */
+static int
+rcu_torture_onoff(void *arg)
+{
+ int cpu;
+ int maxcpu = -1;
+ DEFINE_RCU_RANDOM(rand);
+
+ VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
+ for_each_online_cpu(cpu)
+ maxcpu = cpu;
+ WARN_ON(maxcpu < 0);
+ while (!kthread_should_stop()) {
+ cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
+ if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: offlining %d\n",
+ torture_type, cpu);
+ n_offline_attempts++;
+ if (cpu_down(cpu) == 0) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: "
+ "offlined %d\n",
+ torture_type, cpu);
+ n_offline_successes++;
+ }
+ } else if (cpu_is_hotpluggable(cpu)) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: onlining %d\n",
+ torture_type, cpu);
+ n_online_attempts++;
+ if (cpu_up(cpu) == 0) {
+ if (verbose)
+ printk(KERN_ALERT "%s" TORTURE_FLAG
+ "rcu_torture_onoff task: "
+ "onlined %d\n",
+ torture_type, cpu);
+ n_online_successes++;
+ }
+ }
+ schedule_timeout_interruptible(onoff_interval * HZ);
+ }
+ VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
+ return 0;
+}
+
+static int
+rcu_torture_onoff_init(void)
+{
+ if (onoff_interval <= 0)
+ return 0;
+ onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
+ if (IS_ERR(onoff_task)) {
+ onoff_task = NULL;
+ return PTR_ERR(onoff_task);
+ }
+ return 0;
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+ if (onoff_task == NULL)
+ return;
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
+ kthread_stop(onoff_task);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void
+rcu_torture_onoff_init(void)
+{
+}
+
+static void rcu_torture_onoff_cleanup(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
static int rcutorture_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -1391,6 +1585,11 @@ rcu_torture_cleanup(void)
for_each_possible_cpu(i)
rcutorture_booster_cleanup(i);
}
+ if (shutdown_task != NULL) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
+ kthread_stop(shutdown_task);
+ }
+ rcu_torture_onoff_cleanup();
/* Wait for all RCU callbacks to fire. */
@@ -1416,7 +1615,7 @@ rcu_torture_init(void)
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
- &srcu_ops, &srcu_expedited_ops,
+ &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex);
@@ -1607,6 +1806,18 @@ rcu_torture_init(void)
}
}
}
+ if (shutdown_secs > 0) {
+ shutdown_time = jiffies + shutdown_secs * HZ;
+ shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
+ "rcu_torture_shutdown");
+ if (IS_ERR(shutdown_task)) {
+ firsterr = PTR_ERR(shutdown_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
+ shutdown_task = NULL;
+ goto unwind;
+ }
+ }
+ rcu_torture_onoff_init();
register_reboot_notifier(&rcutorture_shutdown_nb);
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6b76d812740..6c4a6722abf 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -69,7 +69,7 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
NUM_RCU_LVL_3, \
NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
}, \
- .signaled = RCU_GP_IDLE, \
+ .fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
@@ -195,12 +195,10 @@ void rcu_note_context_switch(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = 1,
+ .dynticks_nesting = DYNTICK_TASK_NESTING,
.dynticks = ATOMIC_INIT(1),
};
-#endif /* #ifdef CONFIG_NO_HZ */
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */
@@ -328,11 +326,11 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
return 1;
}
- /* If preemptible RCU, no point in sending reschedule IPI. */
- if (rdp->preemptible)
- return 0;
-
- /* The CPU is online, so send it a reschedule IPI. */
+ /*
+ * The CPU is online, so send it a reschedule IPI. This forces
+ * it through the scheduler, and (inefficiently) also handles cases
+ * where idle loops fail to inform RCU about the CPU being idle.
+ */
if (rdp->cpu != smp_processor_id())
smp_send_reschedule(rdp->cpu);
else
@@ -343,59 +341,181 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */
-#ifdef CONFIG_NO_HZ
+/*
+ * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
+ *
+ * If the new value of the ->dynticks_nesting counter now is zero,
+ * we really have entered idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
+ */
+static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
+{
+ trace_rcu_dyntick("Start", oldval, 0);
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+ rcu_prepare_for_idle(smp_processor_id());
+ /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
+ smp_mb__before_atomic_inc(); /* See above. */
+ atomic_inc(&rdtp->dynticks);
+ smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+}
/**
- * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+ * rcu_idle_enter - inform RCU that current CPU is entering idle
*
- * Enter nohz mode, in other words, -leave- the mode in which RCU
+ * Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
- * critical sections can occur in irq handlers in nohz mode, a possibility
- * handled by rcu_irq_enter() and rcu_irq_exit()).
+ * critical sections can occur in irq handlers in idle, a possibility
+ * handled by irq_enter() and irq_exit().)
+ *
+ * We crowbar the ->dynticks_nesting field to zero to allow for
+ * the possibility of usermode upcalls having messed up our count
+ * of interrupt nesting level during the prior busy period.
*/
-void rcu_enter_nohz(void)
+void rcu_idle_enter(void)
{
unsigned long flags;
+ long long oldval;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (--rdtp->dynticks_nesting) {
- local_irq_restore(flags);
- return;
- }
- trace_rcu_dyntick("Start");
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
- atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting = 0;
+ rcu_idle_enter_common(rdtp, oldval);
local_irq_restore(flags);
}
-/*
- * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+/**
+ * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
+ *
+ * Exit from an interrupt handler, which might possibly result in entering
+ * idle mode, in other words, leaving the mode in which read-side critical
+ * sections can occur.
*
- * Exit nohz mode, in other words, -enter- the mode in which RCU
- * read-side critical sections normally occur.
+ * This code assumes that the idle loop never does anything that might
+ * result in unbalanced calls to irq_enter() and irq_exit(). If your
+ * architecture violates this assumption, RCU will give you what you
+ * deserve, good and hard. But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
*/
-void rcu_exit_nohz(void)
+void rcu_irq_exit(void)
{
unsigned long flags;
+ long long oldval;
struct rcu_dynticks *rdtp;
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks_nesting++) {
- local_irq_restore(flags);
- return;
- }
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting--;
+ WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
+ if (rdtp->dynticks_nesting)
+ trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+ else
+ rcu_idle_enter_common(rdtp, oldval);
+ local_irq_restore(flags);
+}
+
+/*
+ * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
+ *
+ * If the new value of the ->dynticks_nesting counter was previously zero,
+ * we really have exited idle, and must do the appropriate accounting.
+ * The caller must have disabled interrupts.
+ */
+static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
+{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
- trace_rcu_dyntick("End");
+ rcu_cleanup_after_idle(smp_processor_id());
+ trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+ if (!is_idle_task(current)) {
+ struct task_struct *idle = idle_task(smp_processor_id());
+
+ trace_rcu_dyntick("Error on exit: not idle task",
+ oldval, rdtp->dynticks_nesting);
+ ftrace_dump(DUMP_ALL);
+ WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+ current->pid, current->comm,
+ idle->pid, idle->comm); /* must be idle task! */
+ }
+}
+
+/**
+ * rcu_idle_exit - inform RCU that current CPU is leaving idle
+ *
+ * Exit idle mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections can occur.
+ *
+ * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
+ * allow for the possibility of usermode upcalls messing up our count
+ * of interrupt nesting level during the busy period that is just
+ * now starting.
+ */
+void rcu_idle_exit(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ WARN_ON_ONCE(oldval != 0);
+ rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
+ rcu_idle_exit_common(rdtp, oldval);
+ local_irq_restore(flags);
+}
+
+/**
+ * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
+ *
+ * Enter an interrupt handler, which might possibly result in exiting
+ * idle mode, in other words, entering the mode in which read-side critical
+ * sections can occur.
+ *
+ * Note that the Linux kernel is fully capable of entering an interrupt
+ * handler that it never exits, for example when doing upcalls to
+ * user mode! This code assumes that the idle loop never does upcalls to
+ * user mode. If your architecture does do upcalls from the idle loop (or
+ * does anything else that results in unbalanced calls to the irq_enter()
+ * and irq_exit() functions), RCU will give you what you deserve, good
+ * and hard. But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ */
+void rcu_irq_enter(void)
+{
+ unsigned long flags;
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+ local_irq_save(flags);
+ rdtp = &__get_cpu_var(rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting++;
+ WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
+ if (oldval)
+ trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+ else
+ rcu_idle_exit_common(rdtp, oldval);
local_irq_restore(flags);
}
@@ -442,27 +562,37 @@ void rcu_nmi_exit(void)
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
+#ifdef CONFIG_PROVE_RCU
+
/**
- * rcu_irq_enter - inform RCU of entry to hard irq context
+ * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
*
- * If the CPU was idle with dynamic ticks active, this updates the
- * rdtp->dynticks to let the RCU handling know that the CPU is active.
+ * If the current CPU is in its idle loop and is neither in an interrupt
+ * or NMI handler, return true.
*/
-void rcu_irq_enter(void)
+int rcu_is_cpu_idle(void)
{
- rcu_exit_nohz();
+ int ret;
+
+ preempt_disable();
+ ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+ preempt_enable();
+ return ret;
}
+EXPORT_SYMBOL(rcu_is_cpu_idle);
+
+#endif /* #ifdef CONFIG_PROVE_RCU */
/**
- * rcu_irq_exit - inform RCU of exit from hard irq context
+ * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
*
- * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
- * to put let the RCU handling be aware that the CPU is going back to idle
- * with no ticks.
+ * If the current CPU is idle or running at a first-level (not nested)
+ * interrupt from idle, return true. The caller must have at least
+ * disabled preemption.
*/
-void rcu_irq_exit(void)
+int rcu_is_cpu_rrupt_from_idle(void)
{
- rcu_enter_nohz();
+ return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
}
#ifdef CONFIG_SMP
@@ -475,7 +605,7 @@ void rcu_irq_exit(void)
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
- return 0;
+ return (rdp->dynticks_snap & 0x1) == 0;
}
/*
@@ -512,26 +642,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */
-#else /* #ifdef CONFIG_NO_HZ */
-
-#ifdef CONFIG_SMP
-
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
-{
- return 0;
-}
-
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
-{
- return rcu_implicit_offline_qs(rdp);
-}
-
-#endif /* #ifdef CONFIG_SMP */
-
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-int rcu_cpu_stall_suppress __read_mostly;
-
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
rsp->gp_start = jiffies;
@@ -866,8 +976,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
- WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
- rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+ WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
+ rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
record_gp_stall_check_time(rsp);
@@ -877,7 +987,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rnp->qsmask = rnp->qsmaskinit;
rnp->gpnum = rsp->gpnum;
rnp->completed = rsp->completed;
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
+ rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
rcu_start_gp_per_cpu(rsp, rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
@@ -927,7 +1037,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rnp = rcu_get_root(rsp);
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+ rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
}
@@ -991,7 +1101,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
trace_rcu_grace_period(rsp->name, rsp->completed, "end");
- rsp->signaled = RCU_GP_IDLE;
+ rsp->fqs_state = RCU_GP_IDLE;
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
}
@@ -1221,7 +1331,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
else
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
- rcu_report_exp_rnp(rsp, rnp);
+ rcu_report_exp_rnp(rsp, rnp, true);
rcu_node_kthread_setaffinity(rnp, -1);
}
@@ -1263,7 +1373,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* If no callbacks are ready, just return.*/
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
trace_rcu_batch_start(rsp->name, 0, 0);
- trace_rcu_batch_end(rsp->name, 0);
+ trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+ need_resched(), is_idle_task(current),
+ rcu_is_callbacks_kthread());
return;
}
@@ -1291,12 +1403,17 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
debug_rcu_head_unqueue(list);
__rcu_reclaim(rsp->name, list);
list = next;
- if (++count >= bl)
+ /* Stop only if limit reached and CPU has something to do. */
+ if (++count >= bl &&
+ (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
local_irq_save(flags);
- trace_rcu_batch_end(rsp->name, count);
+ trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
+ is_idle_task(current),
+ rcu_is_callbacks_kthread());
/* Update count, and requeue any remaining callbacks. */
rdp->qlen -= count;
@@ -1334,16 +1451,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
* Also schedule RCU core processing.
*
- * This function must be called with hardirqs disabled. It is normally
+ * This function must be called from hardirq context. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns
* false, there is no point in invoking rcu_check_callbacks().
*/
void rcu_check_callbacks(int cpu, int user)
{
trace_rcu_utilization("Start scheduler-tick");
- if (user ||
- (idle_cpu(cpu) && rcu_scheduler_active &&
- !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ if (user || rcu_is_cpu_rrupt_from_idle()) {
/*
* Get here if this CPU took its interrupt from user
@@ -1457,7 +1572,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
goto unlock_fqs_ret; /* no GP in progress, time updated. */
}
rsp->fqs_active = 1;
- switch (rsp->signaled) {
+ switch (rsp->fqs_state) {
case RCU_GP_IDLE:
case RCU_GP_INIT:
@@ -1473,7 +1588,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
force_qs_rnp(rsp, dyntick_save_progress_counter);
raw_spin_lock(&rnp->lock); /* irqs already disabled */
if (rcu_gp_in_progress(rsp))
- rsp->signaled = RCU_FORCE_QS;
+ rsp->fqs_state = RCU_FORCE_QS;
break;
case RCU_FORCE_QS:
@@ -1812,7 +1927,7 @@ static int rcu_pending(int cpu)
* by the current CPU, even if none need be done immediately, returning
* 1 if so.
*/
-static int rcu_needs_cpu_quick_check(int cpu)
+static int rcu_cpu_has_callbacks(int cpu)
{
/* RCU callbacks either ready or pending? */
return per_cpu(rcu_sched_data, cpu).nxtlist ||
@@ -1913,9 +2028,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
rdp->qlen = 0;
-#ifdef CONFIG_NO_HZ
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
-#endif /* #ifdef CONFIG_NO_HZ */
+ WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
+ WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
rdp->cpu = cpu;
rdp->rsp = rsp;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1942,6 +2057,10 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
+ rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
+ atomic_set(&rdp->dynticks->dynticks,
+ (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+ rcu_prepare_for_idle_init(cpu);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
/*
@@ -2023,6 +2142,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
rcu_send_cbs_to_online(&rcu_bh_state);
rcu_send_cbs_to_online(&rcu_sched_state);
rcu_preempt_send_cbs_to_online();
+ rcu_cleanup_after_idle(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 849ce9ec51f..fddff92d667 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,9 +84,10 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- int dynticks_nesting; /* Track irq/process nesting level. */
- int dynticks_nmi_nesting; /* Track NMI nesting level. */
- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
+ long long dynticks_nesting; /* Track irq/process nesting level. */
+ /* Process level is worth LLONG_MAX/2. */
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ atomic_t dynticks; /* Even value for idle, else odd. */
};
/* RCU's kthread states for tracing. */
@@ -274,16 +275,12 @@ struct rcu_data {
/* did other CPU force QS recently? */
long blimit; /* Upper limit on a processed batch */
-#ifdef CONFIG_NO_HZ
/* 3) dynticks interface. */
struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
-#endif /* #ifdef CONFIG_NO_HZ */
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
-#ifdef CONFIG_NO_HZ
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
-#endif /* #ifdef CONFIG_NO_HZ */
unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long resched_ipi; /* Sent a resched IPI. */
@@ -302,16 +299,12 @@ struct rcu_data {
struct rcu_state *rsp;
};
-/* Values for signaled field in struct rcu_state. */
+/* Values for fqs_state field in struct rcu_state. */
#define RCU_GP_IDLE 0 /* No grace period in progress. */
#define RCU_GP_INIT 1 /* Grace period being initialized. */
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
-#ifdef CONFIG_NO_HZ
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
-#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_FORCE_QS
-#endif /* #else #ifdef CONFIG_NO_HZ */
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -361,7 +354,7 @@ struct rcu_state {
/* The following fields are guarded by the root rcu_node's lock. */
- u8 signaled ____cacheline_internodealigned_in_smp;
+ u8 fqs_state ____cacheline_internodealigned_in_smp;
/* Force QS state. */
u8 fqs_active; /* force_quiescent_state() */
/* is running. */
@@ -451,7 +444,8 @@ static void rcu_preempt_check_callbacks(int cpu);
static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake);
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
static int rcu_preempt_pending(int cpu);
static int rcu_preempt_needs_cpu(int cpu);
@@ -461,6 +455,7 @@ static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
+static bool rcu_is_callbacks_kthread(void);
#ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
@@ -473,5 +468,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
#endif /* #ifdef CONFIG_RCU_BOOST */
static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
static void __cpuinit rcu_prepare_kthreads(int cpu);
+static void rcu_prepare_for_idle_init(int cpu);
+static void rcu_cleanup_after_idle(int cpu);
+static void rcu_prepare_for_idle(int cpu);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 4b9b9f8a418..8bb35d73e1f 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -312,6 +312,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
+ int empty_exp_now;
unsigned long flags;
struct list_head *np;
#ifdef CONFIG_RCU_BOOST
@@ -382,8 +383,10 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
/*
* If this was the last task on the current list, and if
* we aren't waiting on any CPUs, report the quiescent state.
- * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
+ * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
+ * so we must take a snapshot of the expedited state.
*/
+ empty_exp_now = !rcu_preempted_readers_exp(rnp);
if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report("preempt_rcu",
rnp->gpnum,
@@ -406,8 +409,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
* If this was the last task on the expedited lists,
* then we need to report up the rcu_node hierarchy.
*/
- if (!empty_exp && !rcu_preempted_readers_exp(rnp))
- rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+ if (!empty_exp && empty_exp_now)
+ rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
} else {
local_irq_restore(flags);
}
@@ -729,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
* recursively up the tree. (Calm down, calm down, we do the recursion
* iteratively!)
*
+ * Most callers will set the "wake" flag, but the task initiating the
+ * expedited grace period need not wake itself.
+ *
* Caller must hold sync_rcu_preempt_exp_mutex.
*/
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake)
{
unsigned long flags;
unsigned long mask;
@@ -744,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
}
if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up(&sync_rcu_preempt_exp_wq);
+ if (wake)
+ wake_up(&sync_rcu_preempt_exp_wq);
break;
}
mask = rnp->grpmask;
@@ -777,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
must_wait = 1;
}
if (!must_wait)
- rcu_report_exp_rnp(rsp, rnp);
+ rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
}
/*
@@ -1069,9 +1077,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
* report on tasks preempted in RCU read-side critical sections during
* expedited RCU grace periods.
*/
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+ bool wake)
{
- return;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1157,8 +1165,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
-static struct lock_class_key rcu_boost_class;
-
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
@@ -1221,15 +1227,13 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
- /* Avoid lockdep false positives. This rt_mutex is its own thing. */
- lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
- "rcu_boost_mutex");
t->rcu_boost_mutex = &mtx;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
- return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
+ return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
+ ACCESS_ONCE(rnp->boost_tasks) != NULL;
}
/*
@@ -1329,6 +1333,15 @@ static void invoke_rcu_callbacks_kthread(void)
}
/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+ return __get_cpu_var(rcu_cpu_kthread_task) == current;
+}
+
+/*
* Set the affinity of the boost kthread. The CPU-hotplug locks are
* held, so no one should be messing with the existence of the boost
* kthread.
@@ -1772,6 +1785,11 @@ static void invoke_rcu_callbacks_kthread(void)
WARN_ON_ONCE(1);
}
+static bool rcu_is_callbacks_kthread(void)
+{
+ return false;
+}
+
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
@@ -1907,7 +1925,7 @@ void synchronize_sched_expedited(void)
* grace period works for us.
*/
get_online_cpus();
- snap = atomic_read(&sync_sched_expedited_started) - 1;
+ snap = atomic_read(&sync_sched_expedited_started);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
@@ -1939,88 +1957,243 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
+ * any flavor of RCU.
*/
int rcu_needs_cpu(int cpu)
{
- return rcu_needs_cpu_quick_check(cpu);
+ return rcu_cpu_has_callbacks(cpu);
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+}
+
+/*
+ * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
+ * is nothing.
+ */
+static void rcu_prepare_for_idle(int cpu)
+{
}
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
-#define RCU_NEEDS_CPU_FLUSHES 5
+/*
+ * This code is invoked when a CPU goes idle, at which point we want
+ * to have the CPU do everything required for RCU so that it can enter
+ * the energy-efficient dyntick-idle mode. This is handled by a
+ * state machine implemented by rcu_prepare_for_idle() below.
+ *
+ * The following three proprocessor symbols control this state machine:
+ *
+ * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
+ * to satisfy RCU. Beyond this point, it is better to incur a periodic
+ * scheduling-clock interrupt than to loop through the state machine
+ * at full power.
+ * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
+ * optional if RCU does not need anything immediately from this
+ * CPU, even if this CPU still has RCU callbacks queued. The first
+ * times through the state machine are mandatory: we need to give
+ * the state machine a chance to communicate a quiescent state
+ * to the RCU core.
+ * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
+ * to sleep in dyntick-idle mode with RCU callbacks pending. This
+ * is sized to be roughly one RCU grace period. Those energy-efficiency
+ * benchmarkers who might otherwise be tempted to set this to a large
+ * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
+ * system. And if you are -that- concerned about energy efficiency,
+ * just power the system down and be done with it!
+ *
+ * The values below work well in practice. If future workloads require
+ * adjustment, they can be converted into kernel config parameters, though
+ * making the state machine smarter might be a better option.
+ */
+#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
+#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
+#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
+
static DEFINE_PER_CPU(int, rcu_dyntick_drain);
static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
+static ktime_t rcu_idle_gp_wait;
/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it. After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ */
+int rcu_needs_cpu(int cpu)
+{
+ /* If no callbacks, RCU doesn't need the CPU. */
+ if (!rcu_cpu_has_callbacks(cpu))
+ return 0;
+ /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
+ return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
+}
+
+/*
+ * Timer handler used to force CPU to start pushing its remaining RCU
+ * callbacks in the case where it entered dyntick-idle mode with callbacks
+ * pending. The hander doesn't really need to do anything because the
+ * real work is done upon re-entry to idle, or by the next scheduling-clock
+ * interrupt should idle not be re-entered.
+ */
+static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+{
+ trace_rcu_prep_idle("Timer");
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Initialize the timer used to pull CPUs out of dyntick-idle mode.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+ static int firsttime = 1;
+ struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+ hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtp->function = rcu_idle_gp_timer_func;
+ if (firsttime) {
+ unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
+
+ rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
+ firsttime = 0;
+ }
+}
+
+/*
+ * Clean up for exit from idle. Because we are exiting from idle, there
+ * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
+ * do nothing if this timer is not active, so just cancel it unconditionally.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+ hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+}
+
+/*
+ * Check to see if any RCU-related work can be done by the current CPU,
+ * and if so, schedule a softirq to get it done. This function is part
+ * of the RCU implementation; it is -not- an exported member of the RCU API.
*
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode. This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
+ * The idea is for the current CPU to clear out all work required by the
+ * RCU core for the current grace period, so that this CPU can be permitted
+ * to enter dyntick-idle mode. In some cases, it will need to be awakened
+ * at the end of the grace period by whatever CPU ends the grace period.
+ * This allows CPUs to go dyntick-idle more quickly, and to reduce the
+ * number of wakeups by a modest integer factor.
*
* Because it is not legal to invoke rcu_process_callbacks() with irqs
* disabled, we do one pass of force_quiescent_state(), then do a
* invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
* later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ *
+ * The caller must have disabled interrupts.
*/
-int rcu_needs_cpu(int cpu)
+static void rcu_prepare_for_idle(int cpu)
{
- int c = 0;
- int snap;
- int thatcpu;
-
- /* Check for being in the holdoff period. */
- if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
- return rcu_needs_cpu_quick_check(cpu);
-
- /* Don't bother unless we are the last non-dyntick-idle CPU. */
- for_each_online_cpu(thatcpu) {
- if (thatcpu == cpu)
- continue;
- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
- thatcpu).dynticks);
- smp_mb(); /* Order sampling of snap with end of grace period. */
- if ((snap & 0x1) != 0) {
- per_cpu(rcu_dyntick_drain, cpu) = 0;
- per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
- return rcu_needs_cpu_quick_check(cpu);
- }
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * If there are no callbacks on this CPU, enter dyntick-idle mode.
+ * Also reset state to avoid prejudicing later attempts.
+ */
+ if (!rcu_cpu_has_callbacks(cpu)) {
+ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("No callbacks");
+ return;
+ }
+
+ /*
+ * If in holdoff mode, just return. We will presumably have
+ * refrained from disabling the scheduling-clock tick.
+ */
+ if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("In holdoff");
+ return;
}
/* Check and update the rcu_dyntick_drain sequencing. */
if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* First time through, initialize the counter. */
- per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+ per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
+ } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+ !rcu_pending(cpu)) {
+ /* Can we go dyntick-idle despite still having callbacks? */
+ trace_rcu_prep_idle("Dyntick with callbacks");
+ per_cpu(rcu_dyntick_drain, cpu) = 0;
+ per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+ hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+ rcu_idle_gp_wait, HRTIMER_MODE_REL);
+ return; /* Nothing more to do immediately. */
} else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
/* We have hit the limit, so time to give up. */
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
- return rcu_needs_cpu_quick_check(cpu);
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("Begin holdoff");
+ invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
+ return;
}
- /* Do one step pushing remaining RCU callbacks through. */
+ /*
+ * Do one step of pushing the remaining RCU callbacks through
+ * the RCU core state machine.
+ */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
+ local_irq_restore(flags);
+ rcu_preempt_qs(cpu);
+ force_quiescent_state(&rcu_preempt_state, 0);
+ local_irq_save(flags);
+ }
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+ local_irq_restore(flags);
rcu_sched_qs(cpu);
force_quiescent_state(&rcu_sched_state, 0);
- c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+ local_irq_save(flags);
}
if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+ local_irq_restore(flags);
rcu_bh_qs(cpu);
force_quiescent_state(&rcu_bh_state, 0);
- c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
+ local_irq_save(flags);
}
- /* If RCU callbacks are still pending, RCU still needs this CPU. */
- if (c)
+ /*
+ * If RCU callbacks are still pending, RCU still needs this CPU.
+ * So try forcing the callbacks through the grace period.
+ */
+ if (rcu_cpu_has_callbacks(cpu)) {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("More callbacks");
invoke_rcu_core();
- return c;
+ } else {
+ local_irq_restore(flags);
+ trace_rcu_prep_idle("Callbacks drained");
+ }
}
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9feffa4c069..654cfe67f0d 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -67,13 +67,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->completed, rdp->gpnum,
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
- seq_printf(m, " dt=%d/%d/%d df=%lu",
+ seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, " ql=%ld qs=%c%c%c%c",
rdp->qlen,
@@ -141,13 +139,11 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
rdp->completed, rdp->gpnum,
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
rdp->qs_pending);
-#ifdef CONFIG_NO_HZ
- seq_printf(m, ",%d,%d,%d,%lu",
+ seq_printf(m, ",%d,%llx,%d,%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
-#endif /* #ifdef CONFIG_NO_HZ */
seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
@@ -171,9 +167,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
-#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
-#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
#ifdef CONFIG_RCU_BOOST
seq_puts(m, "\"kt\",\"ktl\"");
@@ -278,7 +272,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
gpnum = rsp->gpnum;
seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
- rsp->completed, gpnum, rsp->signaled,
+ rsp->completed, gpnum, rsp->fqs_state,
(long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff),
rsp->n_force_qs, rsp->n_force_qs_ngp,
diff --git a/kernel/relay.c b/kernel/relay.c
index 226fade4d72..4335e1d7ee2 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -302,7 +302,7 @@ static void buf_unmapped_default_callback(struct rchan_buf *buf,
*/
static struct dentry *create_buf_file_default_callback(const char *filename,
struct dentry *parent,
- int mode,
+ umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 34683efa2cc..6d269cce7aa 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -159,8 +159,7 @@ int res_counter_memparse_write_strategy(const char *buf,
return 0;
}
- /* FIXME - make memparse() take const char* args */
- *res = memparse((char *)buf, &end);
+ *res = memparse(buf, &end);
if (*end != '\0')
return -EINVAL;
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 8eafd1bd273..16502d3a71c 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -101,6 +101,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk("\n============================================\n");
printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk("%s\n", print_tainted());
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 3d9f31cd79e..98ec4947546 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -6,11 +6,11 @@
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
*/
+#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/freezer.h>
@@ -27,7 +27,7 @@ struct test_thread_data {
int opdata;
int mutexes[MAX_RT_TEST_MUTEXES];
int event;
- struct sys_device sysdev;
+ struct device dev;
};
static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
@@ -271,7 +271,7 @@ static int test_func(void *data)
*
* opcode:data
*/
-static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sched_param schedpar;
@@ -279,8 +279,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribut
char cmdbuf[32];
int op, dat, tid, ret;
- td = container_of(dev, struct test_thread_data, sysdev);
- tid = td->sysdev.id;
+ td = container_of(dev, struct test_thread_data, dev);
+ tid = td->dev.id;
/* strings from sysfs write are not 0 terminated! */
if (count >= sizeof(cmdbuf))
@@ -334,7 +334,7 @@ static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribut
* @dev: thread to query
* @buf: char buffer to be filled with thread status info
*/
-static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct test_thread_data *td;
@@ -342,8 +342,8 @@ static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute
char *curr = buf;
int i;
- td = container_of(dev, struct test_thread_data, sysdev);
- tsk = threads[td->sysdev.id];
+ td = container_of(dev, struct test_thread_data, dev);
+ tsk = threads[td->dev.id];
spin_lock(&rttest_lock);
@@ -360,28 +360,29 @@ static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute
spin_unlock(&rttest_lock);
curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
- mutexes[td->sysdev.id].owner);
+ mutexes[td->dev.id].owner);
return curr - buf;
}
-static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
-static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
+static DEVICE_ATTR(status, 0600, sysfs_test_status, NULL);
+static DEVICE_ATTR(command, 0600, NULL, sysfs_test_command);
-static struct sysdev_class rttest_sysclass = {
+static struct bus_type rttest_subsys = {
.name = "rttest",
+ .dev_name = "rttest",
};
static int init_test_thread(int id)
{
- thread_data[id].sysdev.cls = &rttest_sysclass;
- thread_data[id].sysdev.id = id;
+ thread_data[id].dev.bus = &rttest_subsys;
+ thread_data[id].dev.id = id;
threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
if (IS_ERR(threads[id]))
return PTR_ERR(threads[id]);
- return sysdev_register(&thread_data[id].sysdev);
+ return device_register(&thread_data[id].dev);
}
static int init_rttest(void)
@@ -393,7 +394,7 @@ static int init_rttest(void)
for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
rt_mutex_init(&mutexes[i]);
- ret = sysdev_class_register(&rttest_sysclass);
+ ret = subsys_system_register(&rttest_subsys, NULL);
if (ret)
return ret;
@@ -401,10 +402,10 @@ static int init_rttest(void)
ret = init_test_thread(i);
if (ret)
break;
- ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
+ ret = device_create_file(&thread_data[i].dev, &dev_attr_status);
if (ret)
break;
- ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
+ ret = device_create_file(&thread_data[i].dev, &dev_attr_command);
if (ret)
break;
}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index f9d8482dd48..a242e691c99 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,7 +579,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct rt_mutex_waiter *waiter)
{
int ret = 0;
- int was_disabled;
for (;;) {
/* Try to acquire the lock: */
@@ -602,17 +601,10 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
raw_spin_unlock(&lock->wait_lock);
- was_disabled = irqs_disabled();
- if (was_disabled)
- local_irq_enable();
-
debug_rt_mutex_print_deadlock(waiter);
schedule_rt_mutex(lock);
- if (was_disabled)
- local_irq_disable();
-
raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
new file mode 100644
index 00000000000..9a7dd35102a
--- /dev/null
+++ b/kernel/sched/Makefile
@@ -0,0 +1,20 @@
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_clock.o = -pg
+endif
+
+ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
+# needed for x86 only. Why this used to be enabled for all architectures is beyond
+# me. I suspect most platforms don't need this, but until we know that for sure
+# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
+# to get a correct value for the wait-channel (WCHAN in ps). --davidm
+CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+endif
+
+obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
+obj-$(CONFIG_SMP) += cpupri.o
+obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+obj-$(CONFIG_SCHEDSTATS) += stats.o
+obj-$(CONFIG_SCHED_DEBUG) += debug.o
+
+
diff --git a/kernel/sched_autogroup.c b/kernel/sched/auto_group.c
index 429242f3c48..e8a1f83ee0e 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched/auto_group.c
@@ -1,15 +1,19 @@
#ifdef CONFIG_SCHED_AUTOGROUP
+#include "sched.h"
+
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
+#include <linux/security.h>
+#include <linux/export.h>
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;
-static void __init autogroup_init(struct task_struct *init_task)
+void __init autogroup_init(struct task_struct *init_task)
{
autogroup_default.tg = &root_task_group;
kref_init(&autogroup_default.kref);
@@ -17,7 +21,7 @@ static void __init autogroup_init(struct task_struct *init_task)
init_task->signal->autogroup = &autogroup_default;
}
-static inline void autogroup_free(struct task_group *tg)
+void autogroup_free(struct task_group *tg)
{
kfree(tg->autogroup);
}
@@ -59,10 +63,6 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
return ag;
}
-#ifdef CONFIG_RT_GROUP_SCHED
-static void free_rt_sched_group(struct task_group *tg);
-#endif
-
static inline struct autogroup *autogroup_create(void)
{
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
@@ -108,8 +108,7 @@ out_fail:
return autogroup_kref_get(&autogroup_default);
}
-static inline bool
-task_wants_autogroup(struct task_struct *p, struct task_group *tg)
+bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (tg != &root_task_group)
return false;
@@ -127,22 +126,6 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
return true;
}
-static inline bool task_group_is_autogroup(struct task_group *tg)
-{
- return !!tg->autogroup;
-}
-
-static inline struct task_group *
-autogroup_task_group(struct task_struct *p, struct task_group *tg)
-{
- int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
-
- if (enabled && task_wants_autogroup(p, tg))
- return p->signal->autogroup->tg;
-
- return tg;
-}
-
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
@@ -263,7 +246,7 @@ out:
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SCHED_DEBUG
-static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
+int autogroup_path(struct task_group *tg, char *buf, int buflen)
{
if (!task_group_is_autogroup(tg))
return 0;
diff --git a/kernel/sched_autogroup.h b/kernel/sched/auto_group.h
index c2f0e7248dc..8bd04714281 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched/auto_group.h
@@ -1,5 +1,8 @@
#ifdef CONFIG_SCHED_AUTOGROUP
+#include <linux/kref.h>
+#include <linux/rwsem.h>
+
struct autogroup {
/*
* reference doesn't mean how many thread attach to this
@@ -13,9 +16,28 @@ struct autogroup {
int nice;
};
-static inline bool task_group_is_autogroup(struct task_group *tg);
+extern void autogroup_init(struct task_struct *init_task);
+extern void autogroup_free(struct task_group *tg);
+
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+ return !!tg->autogroup;
+}
+
+extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
+
static inline struct task_group *
-autogroup_task_group(struct task_struct *p, struct task_group *tg);
+autogroup_task_group(struct task_struct *p, struct task_group *tg)
+{
+ int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+ if (enabled && task_wants_autogroup(p, tg))
+ return p->signal->autogroup->tg;
+
+ return tg;
+}
+
+extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
#else /* !CONFIG_SCHED_AUTOGROUP */
diff --git a/kernel/sched_clock.c b/kernel/sched/clock.c
index c685e31492d..c685e31492d 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched/clock.c
diff --git a/kernel/sched.c b/kernel/sched/core.c
index 0e9344a71be..cecbb64be05 100644
--- a/kernel/sched.c
+++ b/kernel/sched/core.c
@@ -1,5 +1,5 @@
/*
- * kernel/sched.c
+ * kernel/sched/core.c
*
* Kernel scheduler and related syscalls
*
@@ -56,7 +56,6 @@
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
@@ -71,132 +70,21 @@
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/init_task.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
-#include <asm/mutex.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
-#include "sched_cpupri.h"
-#include "workqueue_sched.h"
-#include "sched_autogroup.h"
+#include "sched.h"
+#include "../workqueue_sched.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
- * Helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
-
-#define NICE_0_LOAD SCHED_LOAD_SCALE
-#define NICE_0_SHIFT SCHED_LOAD_SHIFT
-
-/*
- * These are the 'tuning knobs' of the scheduler:
- *
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
- * Timeslices get refilled after they expire.
- */
-#define DEF_TIMESLICE (100 * HZ / 1000)
-
-/*
- * single value that denotes runtime == period, ie unlimited time.
- */
-#define RUNTIME_INF ((u64)~0ULL)
-
-static inline int rt_policy(int policy)
-{
- if (policy == SCHED_FIFO || policy == SCHED_RR)
- return 1;
- return 0;
-}
-
-static inline int task_has_rt_policy(struct task_struct *p)
-{
- return rt_policy(p->policy);
-}
-
-/*
- * This is the priority-queue data structure of the RT scheduling class:
- */
-struct rt_prio_array {
- DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
- struct list_head queue[MAX_RT_PRIO];
-};
-
-struct rt_bandwidth {
- /* nests inside the rq lock: */
- raw_spinlock_t rt_runtime_lock;
- ktime_t rt_period;
- u64 rt_runtime;
- struct hrtimer rt_period_timer;
-};
-
-static struct rt_bandwidth def_rt_bandwidth;
-
-static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
-
-static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
-{
- struct rt_bandwidth *rt_b =
- container_of(timer, struct rt_bandwidth, rt_period_timer);
- ktime_t now;
- int overrun;
- int idle = 0;
-
- for (;;) {
- now = hrtimer_cb_get_time(timer);
- overrun = hrtimer_forward(timer, now, rt_b->rt_period);
-
- if (!overrun)
- break;
-
- idle = do_sched_rt_period_timer(rt_b, overrun);
- }
-
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
-}
-
-static
-void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
-{
- rt_b->rt_period = ns_to_ktime(period);
- rt_b->rt_runtime = runtime;
-
- raw_spin_lock_init(&rt_b->rt_runtime_lock);
-
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rt_b->rt_period_timer.function = sched_rt_period_timer;
-}
-
-static inline int rt_bandwidth_enabled(void)
-{
- return sysctl_sched_rt_runtime >= 0;
-}
-
-static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
+void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
ktime_t soft, hard, now;
@@ -216,580 +104,12 @@ static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
}
}
-static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
-{
- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
- return;
-
- if (hrtimer_active(&rt_b->rt_period_timer))
- return;
-
- raw_spin_lock(&rt_b->rt_runtime_lock);
- start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
- raw_spin_unlock(&rt_b->rt_runtime_lock);
-}
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
-{
- hrtimer_cancel(&rt_b->rt_period_timer);
-}
-#endif
-
-/*
- * sched_domains_mutex serializes calls to init_sched_domains,
- * detach_destroy_domains and partition_sched_domains.
- */
-static DEFINE_MUTEX(sched_domains_mutex);
-
-#ifdef CONFIG_CGROUP_SCHED
-
-#include <linux/cgroup.h>
-
-struct cfs_rq;
-
-static LIST_HEAD(task_groups);
-
-struct cfs_bandwidth {
-#ifdef CONFIG_CFS_BANDWIDTH
- raw_spinlock_t lock;
- ktime_t period;
- u64 quota, runtime;
- s64 hierarchal_quota;
- u64 runtime_expires;
-
- int idle, timer_active;
- struct hrtimer period_timer, slack_timer;
- struct list_head throttled_cfs_rq;
-
- /* statistics */
- int nr_periods, nr_throttled;
- u64 throttled_time;
-#endif
-};
-
-/* task group related information */
-struct task_group {
- struct cgroup_subsys_state css;
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /* schedulable entities of this group on each cpu */
- struct sched_entity **se;
- /* runqueue "owned" by this group on each cpu */
- struct cfs_rq **cfs_rq;
- unsigned long shares;
-
- atomic_t load_weight;
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
- struct sched_rt_entity **rt_se;
- struct rt_rq **rt_rq;
-
- struct rt_bandwidth rt_bandwidth;
-#endif
-
- struct rcu_head rcu;
- struct list_head list;
-
- struct task_group *parent;
- struct list_head siblings;
- struct list_head children;
-
-#ifdef CONFIG_SCHED_AUTOGROUP
- struct autogroup *autogroup;
-#endif
-
- struct cfs_bandwidth cfs_bandwidth;
-};
-
-/* task_group_lock serializes the addition/removal of task groups */
-static DEFINE_SPINLOCK(task_group_lock);
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
-
-/*
- * A weight of 0 or 1 can cause arithmetics problems.
- * A weight of a cfs_rq is the sum of weights of which entities
- * are queued on this cfs_rq, so a weight of a entity should not be
- * too large, so as the shares value of a task group.
- * (The default weight is 1024 - so there's no practical
- * limitation from this.)
- */
-#define MIN_SHARES (1UL << 1)
-#define MAX_SHARES (1UL << 18)
-
-static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
-#endif
-
-/* Default task group.
- * Every task in system belong to this group at bootup.
- */
-struct task_group root_task_group;
-
-#endif /* CONFIG_CGROUP_SCHED */
-
-/* CFS-related fields in a runqueue */
-struct cfs_rq {
- struct load_weight load;
- unsigned long nr_running, h_nr_running;
-
- u64 exec_clock;
- u64 min_vruntime;
-#ifndef CONFIG_64BIT
- u64 min_vruntime_copy;
-#endif
-
- struct rb_root tasks_timeline;
- struct rb_node *rb_leftmost;
-
- struct list_head tasks;
- struct list_head *balance_iterator;
-
- /*
- * 'curr' points to currently running entity on this cfs_rq.
- * It is set to NULL otherwise (i.e when none are currently running).
- */
- struct sched_entity *curr, *next, *last, *skip;
-
-#ifdef CONFIG_SCHED_DEBUG
- unsigned int nr_spread_over;
-#endif
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
- struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
-
- /*
- * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
- * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
- * (like users, containers etc.)
- *
- * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
- * list is used during load balance.
- */
- int on_list;
- struct list_head leaf_cfs_rq_list;
- struct task_group *tg; /* group that "owns" this runqueue */
-
-#ifdef CONFIG_SMP
- /*
- * the part of load.weight contributed by tasks
- */
- unsigned long task_weight;
-
- /*
- * h_load = weight * f(tg)
- *
- * Where f(tg) is the recursive weight fraction assigned to
- * this group.
- */
- unsigned long h_load;
-
- /*
- * Maintaining per-cpu shares distribution for group scheduling
- *
- * load_stamp is the last time we updated the load average
- * load_last is the last time we updated the load average and saw load
- * load_unacc_exec_time is currently unaccounted execution time
- */
- u64 load_avg;
- u64 load_period;
- u64 load_stamp, load_last, load_unacc_exec_time;
-
- unsigned long load_contribution;
-#endif
-#ifdef CONFIG_CFS_BANDWIDTH
- int runtime_enabled;
- u64 runtime_expires;
- s64 runtime_remaining;
-
- u64 throttled_timestamp;
- int throttled, throttle_count;
- struct list_head throttled_list;
-#endif
-#endif
-};
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-#ifdef CONFIG_CFS_BANDWIDTH
-static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
-{
- return &tg->cfs_bandwidth;
-}
-
-static inline u64 default_cfs_period(void);
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
-static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
-
-static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
-{
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, slack_timer);
- do_sched_cfs_slack_timer(cfs_b);
-
- return HRTIMER_NORESTART;
-}
-
-static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
-{
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, period_timer);
- ktime_t now;
- int overrun;
- int idle = 0;
-
- for (;;) {
- now = hrtimer_cb_get_time(timer);
- overrun = hrtimer_forward(timer, now, cfs_b->period);
-
- if (!overrun)
- break;
-
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
- }
-
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
-}
-
-static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
- raw_spin_lock_init(&cfs_b->lock);
- cfs_b->runtime = 0;
- cfs_b->quota = RUNTIME_INF;
- cfs_b->period = ns_to_ktime(default_cfs_period());
-
- INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->period_timer.function = sched_cfs_period_timer;
- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->slack_timer.function = sched_cfs_slack_timer;
-}
-
-static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
-{
- cfs_rq->runtime_enabled = 0;
- INIT_LIST_HEAD(&cfs_rq->throttled_list);
-}
-
-/* requires cfs_b->lock, may release to reprogram timer */
-static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
- /*
- * The timer may be active because we're trying to set a new bandwidth
- * period or because we're racing with the tear-down path
- * (timer_active==0 becomes visible before the hrtimer call-back
- * terminates). In either case we ensure that it's re-programmed
- */
- while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
- raw_spin_unlock(&cfs_b->lock);
- /* ensure cfs_b->lock is available while we wait */
- hrtimer_cancel(&cfs_b->period_timer);
-
- raw_spin_lock(&cfs_b->lock);
- /* if someone else restarted the timer then we're done */
- if (cfs_b->timer_active)
- return;
- }
-
- cfs_b->timer_active = 1;
- start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
-}
-
-static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
-{
- hrtimer_cancel(&cfs_b->period_timer);
- hrtimer_cancel(&cfs_b->slack_timer);
-}
-#else
-static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
-static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-
-static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
-{
- return NULL;
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
-/* Real-Time classes' related field in a runqueue: */
-struct rt_rq {
- struct rt_prio_array active;
- unsigned long rt_nr_running;
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
- struct {
- int curr; /* highest queued rt task prio */
-#ifdef CONFIG_SMP
- int next; /* next highest */
-#endif
- } highest_prio;
-#endif
-#ifdef CONFIG_SMP
- unsigned long rt_nr_migratory;
- unsigned long rt_nr_total;
- int overloaded;
- struct plist_head pushable_tasks;
-#endif
- int rt_throttled;
- u64 rt_time;
- u64 rt_runtime;
- /* Nests inside the rq lock: */
- raw_spinlock_t rt_runtime_lock;
-
-#ifdef CONFIG_RT_GROUP_SCHED
- unsigned long rt_nr_boosted;
-
- struct rq *rq;
- struct list_head leaf_rt_rq_list;
- struct task_group *tg;
-#endif
-};
-
-#ifdef CONFIG_SMP
-
-/*
- * We add the notion of a root-domain which will be used to define per-domain
- * variables. Each exclusive cpuset essentially defines an island domain by
- * fully partitioning the member cpus from any other cpuset. Whenever a new
- * exclusive cpuset is created, we also create and attach a new root-domain
- * object.
- *
- */
-struct root_domain {
- atomic_t refcount;
- atomic_t rto_count;
- struct rcu_head rcu;
- cpumask_var_t span;
- cpumask_var_t online;
-
- /*
- * The "RT overload" flag: it gets set if a CPU has more than
- * one runnable RT task.
- */
- cpumask_var_t rto_mask;
- struct cpupri cpupri;
-};
-
-/*
- * By default the system creates a single root-domain with all cpus as
- * members (mimicking the global state we have today).
- */
-static struct root_domain def_root_domain;
-
-#endif /* CONFIG_SMP */
-
-/*
- * This is the main, per-CPU runqueue data structure.
- *
- * Locking rule: those places that want to lock multiple runqueues
- * (such as the load balancing or the thread migration code), lock
- * acquire operations must be ordered by ascending &runqueue.
- */
-struct rq {
- /* runqueue lock: */
- raw_spinlock_t lock;
-
- /*
- * nr_running and cpu_load should be in the same cacheline because
- * remote CPUs use both these fields when doing load calculation.
- */
- unsigned long nr_running;
- #define CPU_LOAD_IDX_MAX 5
- unsigned long cpu_load[CPU_LOAD_IDX_MAX];
- unsigned long last_load_update_tick;
-#ifdef CONFIG_NO_HZ
- u64 nohz_stamp;
- unsigned char nohz_balance_kick;
-#endif
- int skip_clock_update;
-
- /* capture load from *all* tasks on this cpu: */
- struct load_weight load;
- unsigned long nr_load_updates;
- u64 nr_switches;
-
- struct cfs_rq cfs;
- struct rt_rq rt;
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /* list of leaf cfs_rq on this cpu: */
- struct list_head leaf_cfs_rq_list;
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
- struct list_head leaf_rt_rq_list;
-#endif
-
- /*
- * This is part of a global counter where only the total sum
- * over all CPUs matters. A task can increase this counter on
- * one CPU and if it got migrated afterwards it may decrease
- * it on another CPU. Always updated under the runqueue lock:
- */
- unsigned long nr_uninterruptible;
-
- struct task_struct *curr, *idle, *stop;
- unsigned long next_balance;
- struct mm_struct *prev_mm;
-
- u64 clock;
- u64 clock_task;
-
- atomic_t nr_iowait;
-
-#ifdef CONFIG_SMP
- struct root_domain *rd;
- struct sched_domain *sd;
-
- unsigned long cpu_power;
-
- unsigned char idle_balance;
- /* For active balancing */
- int post_schedule;
- int active_balance;
- int push_cpu;
- struct cpu_stop_work active_balance_work;
- /* cpu of this runqueue: */
- int cpu;
- int online;
-
- u64 rt_avg;
- u64 age_stamp;
- u64 idle_stamp;
- u64 avg_idle;
-#endif
-
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
- u64 prev_irq_time;
-#endif
-#ifdef CONFIG_PARAVIRT
- u64 prev_steal_time;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
- u64 prev_steal_time_rq;
-#endif
-
- /* calc_load related fields */
- unsigned long calc_load_update;
- long calc_load_active;
-
-#ifdef CONFIG_SCHED_HRTICK
-#ifdef CONFIG_SMP
- int hrtick_csd_pending;
- struct call_single_data hrtick_csd;
-#endif
- struct hrtimer hrtick_timer;
-#endif
-
-#ifdef CONFIG_SCHEDSTATS
- /* latency stats */
- struct sched_info rq_sched_info;
- unsigned long long rq_cpu_time;
- /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-
- /* sys_sched_yield() stats */
- unsigned int yld_count;
-
- /* schedule() stats */
- unsigned int sched_switch;
- unsigned int sched_count;
- unsigned int sched_goidle;
-
- /* try_to_wake_up() stats */
- unsigned int ttwu_count;
- unsigned int ttwu_local;
-#endif
-
-#ifdef CONFIG_SMP
- struct llist_head wake_list;
-#endif
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-
-static inline int cpu_of(struct rq *rq)
-{
-#ifdef CONFIG_SMP
- return rq->cpu;
-#else
- return 0;
-#endif
-}
-
-#define rcu_dereference_check_sched_domain(p) \
- rcu_dereference_check((p), \
- lockdep_is_held(&sched_domains_mutex))
-
-/*
- * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
- * See detach_destroy_domains: synchronize_sched for details.
- *
- * The domain tree of any CPU may only be accessed from within
- * preempt-disabled sections.
- */
-#define for_each_domain(cpu, __sd) \
- for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
-
-#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
-#define this_rq() (&__get_cpu_var(runqueues))
-#define task_rq(p) cpu_rq(task_cpu(p))
-#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-#define raw_rq() (&__raw_get_cpu_var(runqueues))
-
-#ifdef CONFIG_CGROUP_SCHED
-
-/*
- * Return the group to which this tasks belongs.
- *
- * We use task_subsys_state_check() and extend the RCU verification with
- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
- * task it moves into the cgroup. Therefore by holding either of those locks,
- * we pin the task to the current cgroup.
- */
-static inline struct task_group *task_group(struct task_struct *p)
-{
- struct task_group *tg;
- struct cgroup_subsys_state *css;
-
- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
- lockdep_is_held(&p->pi_lock) ||
- lockdep_is_held(&task_rq(p)->lock));
- tg = container_of(css, struct task_group, css);
-
- return autogroup_task_group(p, tg);
-}
-
-/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
-{
-#ifdef CONFIG_FAIR_GROUP_SCHED
- p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
- p->se.parent = task_group(p)->se[cpu];
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
- p->rt.rt_rq = task_group(p)->rt_rq[cpu];
- p->rt.parent = task_group(p)->rt_se[cpu];
-#endif
-}
-
-#else /* CONFIG_CGROUP_SCHED */
-
-static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
-static inline struct task_group *task_group(struct task_struct *p)
-{
- return NULL;
-}
-
-#endif /* CONFIG_CGROUP_SCHED */
+DEFINE_MUTEX(sched_domains_mutex);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static void update_rq_clock_task(struct rq *rq, s64 delta);
-static void update_rq_clock(struct rq *rq)
+void update_rq_clock(struct rq *rq)
{
s64 delta;
@@ -802,44 +122,14 @@ static void update_rq_clock(struct rq *rq)
}
/*
- * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
- */
-#ifdef CONFIG_SCHED_DEBUG
-# define const_debug __read_mostly
-#else
-# define const_debug static const
-#endif
-
-/**
- * runqueue_is_locked - Returns true if the current cpu runqueue is locked
- * @cpu: the processor in question.
- *
- * This interface allows printk to be called with the runqueue lock
- * held and know whether or not it is OK to wake up the klogd.
- */
-int runqueue_is_locked(int cpu)
-{
- return raw_spin_is_locked(&cpu_rq(cpu)->lock);
-}
-
-/*
* Debugging: various feature bits
*/
#define SCHED_FEAT(name, enabled) \
- __SCHED_FEAT_##name ,
-
-enum {
-#include "sched_features.h"
-};
-
-#undef SCHED_FEAT
-
-#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
-#include "sched_features.h"
+#include "features.h"
0;
#undef SCHED_FEAT
@@ -849,7 +139,7 @@ const_debug unsigned int sysctl_sched_features =
#name ,
static __read_mostly char *sched_feat_names[] = {
-#include "sched_features.h"
+#include "features.h"
NULL
};
@@ -859,7 +149,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
- for (i = 0; sched_feat_names[i]; i++) {
+ for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
@@ -869,6 +159,36 @@ static int sched_feat_show(struct seq_file *m, void *v)
return 0;
}
+#ifdef HAVE_JUMP_LABEL
+
+#define jump_label_key__true jump_label_key_enabled
+#define jump_label_key__false jump_label_key_disabled
+
+#define SCHED_FEAT(name, enabled) \
+ jump_label_key__##enabled ,
+
+struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
+#include "features.h"
+};
+
+#undef SCHED_FEAT
+
+static void sched_feat_disable(int i)
+{
+ if (jump_label_enabled(&sched_feat_keys[i]))
+ jump_label_dec(&sched_feat_keys[i]);
+}
+
+static void sched_feat_enable(int i)
+{
+ if (!jump_label_enabled(&sched_feat_keys[i]))
+ jump_label_inc(&sched_feat_keys[i]);
+}
+#else
+static void sched_feat_disable(int i) { };
+static void sched_feat_enable(int i) { };
+#endif /* HAVE_JUMP_LABEL */
+
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -892,17 +212,20 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
cmp += 3;
}
- for (i = 0; sched_feat_names[i]; i++) {
+ for (i = 0; i < __SCHED_FEAT_NR; i++) {
if (strcmp(cmp, sched_feat_names[i]) == 0) {
- if (neg)
+ if (neg) {
sysctl_sched_features &= ~(1UL << i);
- else
+ sched_feat_disable(i);
+ } else {
sysctl_sched_features |= (1UL << i);
+ sched_feat_enable(i);
+ }
break;
}
}
- if (!sched_feat_names[i])
+ if (i == __SCHED_FEAT_NR)
return -EINVAL;
*ppos += cnt;
@@ -931,10 +254,7 @@ static __init int sched_init_debug(void)
return 0;
}
late_initcall(sched_init_debug);
-
-#endif
-
-#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#endif /* CONFIG_SCHED_DEBUG */
/*
* Number of tasks to iterate in a single balance run.
@@ -956,7 +276,7 @@ const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
*/
unsigned int sysctl_sched_rt_period = 1000000;
-static __read_mostly int scheduler_running;
+__read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
@@ -964,112 +284,7 @@ static __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
-static inline u64 global_rt_period(void)
-{
- return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
-}
-
-static inline u64 global_rt_runtime(void)
-{
- if (sysctl_sched_rt_runtime < 0)
- return RUNTIME_INF;
- return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
-}
-
-#ifndef prepare_arch_switch
-# define prepare_arch_switch(next) do { } while (0)
-#endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev) do { } while (0)
-#endif
-
-static inline int task_current(struct rq *rq, struct task_struct *p)
-{
- return rq->curr == p;
-}
-
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
- return p->on_cpu;
-#else
- return task_current(rq, p);
-#endif
-}
-
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- */
- smp_wmb();
- prev->on_cpu = 0;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- raw_spin_unlock_irq(&rq->lock);
-#else
- raw_spin_unlock(&rq->lock);
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- */
- smp_wmb();
- prev->on_cpu = 0;
-#endif
-#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- local_irq_enable();
-#endif
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
* __task_rq_lock - lock the rq @p resides on.
@@ -1152,20 +367,6 @@ static struct rq *this_rq_lock(void)
* rq->lock.
*/
-/*
- * Use hrtick when:
- * - enabled by features
- * - hrtimer is actually high res
- */
-static inline int hrtick_enabled(struct rq *rq)
-{
- if (!sched_feat(HRTICK))
- return 0;
- if (!cpu_active(cpu_of(rq)))
- return 0;
- return hrtimer_is_hres_active(&rq->hrtick_timer);
-}
-
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
@@ -1209,7 +410,7 @@ static void __hrtick_start(void *arg)
*
* called with rq->lock held and irqs disabled
*/
-static void hrtick_start(struct rq *rq, u64 delay)
+void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
@@ -1253,7 +454,7 @@ static __init void init_hrtick(void)
*
* called with rq->lock held and irqs disabled
*/
-static void hrtick_start(struct rq *rq, u64 delay)
+void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
@@ -1304,7 +505,7 @@ static inline void init_hrtick(void)
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
-static void resched_task(struct task_struct *p)
+void resched_task(struct task_struct *p)
{
int cpu;
@@ -1325,7 +526,7 @@ static void resched_task(struct task_struct *p)
smp_send_reschedule(cpu);
}
-static void resched_cpu(int cpu)
+void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -1406,7 +607,8 @@ void wake_up_idle_cpu(int cpu)
static inline bool got_nohz_idle_kick(void)
{
- return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick;
+ int cpu = smp_processor_id();
+ return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
}
#else /* CONFIG_NO_HZ */
@@ -1418,12 +620,7 @@ static inline bool got_nohz_idle_kick(void)
#endif /* CONFIG_NO_HZ */
-static u64 sched_avg_period(void)
-{
- return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
-}
-
-static void sched_avg_update(struct rq *rq)
+void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
@@ -1439,193 +636,23 @@ static void sched_avg_update(struct rq *rq)
}
}
-static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
- rq->rt_avg += rt_delta;
- sched_avg_update(rq);
-}
-
#else /* !CONFIG_SMP */
-static void resched_task(struct task_struct *p)
+void resched_task(struct task_struct *p)
{
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
-
-static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
-{
-}
-
-static void sched_avg_update(struct rq *rq)
-{
-}
#endif /* CONFIG_SMP */
-#if BITS_PER_LONG == 32
-# define WMULT_CONST (~0UL)
-#else
-# define WMULT_CONST (1UL << 32)
-#endif
-
-#define WMULT_SHIFT 32
-
-/*
- * Shift right and round:
- */
-#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
-
-/*
- * delta *= weight / lw
- */
-static unsigned long
-calc_delta_mine(unsigned long delta_exec, unsigned long weight,
- struct load_weight *lw)
-{
- u64 tmp;
-
- /*
- * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
- * entities since MIN_SHARES = 2. Treat weight as 1 if less than
- * 2^SCHED_LOAD_RESOLUTION.
- */
- if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
- tmp = (u64)delta_exec * scale_load_down(weight);
- else
- tmp = (u64)delta_exec;
-
- if (!lw->inv_weight) {
- unsigned long w = scale_load_down(lw->weight);
-
- if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
- lw->inv_weight = 1;
- else if (unlikely(!w))
- lw->inv_weight = WMULT_CONST;
- else
- lw->inv_weight = WMULT_CONST / w;
- }
-
- /*
- * Check whether we'd overflow the 64-bit multiplication:
- */
- if (unlikely(tmp > WMULT_CONST))
- tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
- WMULT_SHIFT/2);
- else
- tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
-
- return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
-}
-
-static inline void update_load_add(struct load_weight *lw, unsigned long inc)
-{
- lw->weight += inc;
- lw->inv_weight = 0;
-}
-
-static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
-{
- lw->weight -= dec;
- lw->inv_weight = 0;
-}
-
-static inline void update_load_set(struct load_weight *lw, unsigned long w)
-{
- lw->weight = w;
- lw->inv_weight = 0;
-}
-
-/*
- * To aid in avoiding the subversion of "niceness" due to uneven distribution
- * of tasks with abnormal "nice" values across CPUs the contribution that
- * each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
- * scaled version of the new time slice allocation that they receive on time
- * slice expiry etc.
- */
-
-#define WEIGHT_IDLEPRIO 3
-#define WMULT_IDLEPRIO 1431655765
-
-/*
- * Nice levels are multiplicative, with a gentle 10% change for every
- * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
- * nice 1, it will get ~10% less CPU time than another CPU-bound task
- * that remained on nice 0.
- *
- * The "10% effect" is relative and cumulative: from _any_ nice level,
- * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
- * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
- * If a task goes up by ~10% and another task goes down by ~10% then
- * the relative distance between them is ~25%.)
- */
-static const int prio_to_weight[40] = {
- /* -20 */ 88761, 71755, 56483, 46273, 36291,
- /* -15 */ 29154, 23254, 18705, 14949, 11916,
- /* -10 */ 9548, 7620, 6100, 4904, 3906,
- /* -5 */ 3121, 2501, 1991, 1586, 1277,
- /* 0 */ 1024, 820, 655, 526, 423,
- /* 5 */ 335, 272, 215, 172, 137,
- /* 10 */ 110, 87, 70, 56, 45,
- /* 15 */ 36, 29, 23, 18, 15,
-};
-
-/*
- * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
- *
- * In cases where the weight does not change often, we can use the
- * precalculated inverse to speed up arithmetics by turning divisions
- * into multiplications:
- */
-static const u32 prio_to_wmult[40] = {
- /* -20 */ 48388, 59856, 76040, 92818, 118348,
- /* -15 */ 147320, 184698, 229616, 287308, 360437,
- /* -10 */ 449829, 563644, 704093, 875809, 1099582,
- /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
- /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
- /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
- /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
- /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
-};
-
-/* Time spent by the tasks of the cpu accounting group executing in ... */
-enum cpuacct_stat_index {
- CPUACCT_STAT_USER, /* ... user mode */
- CPUACCT_STAT_SYSTEM, /* ... kernel mode */
-
- CPUACCT_STAT_NSTATS,
-};
-
-#ifdef CONFIG_CGROUP_CPUACCT
-static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
-static void cpuacct_update_stats(struct task_struct *tsk,
- enum cpuacct_stat_index idx, cputime_t val);
-#else
-static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
-static inline void cpuacct_update_stats(struct task_struct *tsk,
- enum cpuacct_stat_index idx, cputime_t val) {}
-#endif
-
-static inline void inc_cpu_load(struct rq *rq, unsigned long load)
-{
- update_load_add(&rq->load, load);
-}
-
-static inline void dec_cpu_load(struct rq *rq, unsigned long load)
-{
- update_load_sub(&rq->load, load);
-}
-
#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
-typedef int (*tg_visitor)(struct task_group *, void *);
-
/*
* Iterate task_group tree rooted at *from, calling @down when first entering a
* node and @up when leaving it for the final time.
*
* Caller must hold rcu_lock or sufficient equivalent.
*/
-static int walk_tg_tree_from(struct task_group *from,
+int walk_tg_tree_from(struct task_group *from,
tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
@@ -1656,270 +683,13 @@ out:
return ret;
}
-/*
- * Iterate the full tree, calling @down when first entering a node and @up when
- * leaving it for the final time.
- *
- * Caller must hold rcu_lock or sufficient equivalent.
- */
-
-static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
-{
- return walk_tg_tree_from(&root_task_group, down, up, data);
-}
-
-static int tg_nop(struct task_group *tg, void *data)
+int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
-#ifdef CONFIG_SMP
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
- return cpu_rq(cpu)->load.weight;
-}
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long total = weighted_cpuload(cpu);
-
- if (type == 0 || !sched_feat(LB_BIAS))
- return total;
-
- return max(rq->cpu_load[type-1], total);
-}
-
-static unsigned long power_of(int cpu)
-{
- return cpu_rq(cpu)->cpu_power;
-}
-
-static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
-
-static unsigned long cpu_avg_load_per_task(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
-
- if (nr_running)
- return rq->load.weight / nr_running;
-
- return 0;
-}
-
-#ifdef CONFIG_PREEMPT
-
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
-/*
- * fair double_lock_balance: Safely acquires both rq->locks in a fair
- * way at the expense of forcing extra atomic operations in all
- * invocations. This assures that the double_lock is acquired using the
- * same underlying policy as the spinlock_t on this architecture, which
- * reduces latency compared to the unfair variant below. However, it
- * also adds more overhead and therefore may reduce throughput.
- */
-static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(this_rq->lock)
- __acquires(busiest->lock)
- __acquires(this_rq->lock)
-{
- raw_spin_unlock(&this_rq->lock);
- double_rq_lock(this_rq, busiest);
-
- return 1;
-}
-
-#else
-/*
- * Unfair double_lock_balance: Optimizes throughput at the expense of
- * latency by eliminating extra atomic operations when the locks are
- * already in proper order on entry. This favors lower cpu-ids and will
- * grant the double lock to lower cpus over higher ids under contention,
- * regardless of entry order into the function.
- */
-static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(this_rq->lock)
- __acquires(busiest->lock)
- __acquires(this_rq->lock)
-{
- int ret = 0;
-
- if (unlikely(!raw_spin_trylock(&busiest->lock))) {
- if (busiest < this_rq) {
- raw_spin_unlock(&this_rq->lock);
- raw_spin_lock(&busiest->lock);
- raw_spin_lock_nested(&this_rq->lock,
- SINGLE_DEPTH_NESTING);
- ret = 1;
- } else
- raw_spin_lock_nested(&busiest->lock,
- SINGLE_DEPTH_NESTING);
- }
- return ret;
-}
-
-#endif /* CONFIG_PREEMPT */
-
-/*
- * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
- */
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
-{
- if (unlikely(!irqs_disabled())) {
- /* printk() doesn't work good under rq->lock */
- raw_spin_unlock(&this_rq->lock);
- BUG_ON(1);
- }
-
- return _double_lock_balance(this_rq, busiest);
-}
-
-static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(busiest->lock)
-{
- raw_spin_unlock(&busiest->lock);
- lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
-}
-
-/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static void double_rq_lock(struct rq *rq1, struct rq *rq2)
- __acquires(rq1->lock)
- __acquires(rq2->lock)
-{
- BUG_ON(!irqs_disabled());
- if (rq1 == rq2) {
- raw_spin_lock(&rq1->lock);
- __acquire(rq2->lock); /* Fake it out ;) */
- } else {
- if (rq1 < rq2) {
- raw_spin_lock(&rq1->lock);
- raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
- } else {
- raw_spin_lock(&rq2->lock);
- raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
- }
- }
-}
-
-/*
- * double_rq_unlock - safely unlock two runqueues
- *
- * Note this does not restore interrupts like task_rq_unlock,
- * you need to do so manually after calling.
- */
-static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
- __releases(rq1->lock)
- __releases(rq2->lock)
-{
- raw_spin_unlock(&rq1->lock);
- if (rq1 != rq2)
- raw_spin_unlock(&rq2->lock);
- else
- __release(rq2->lock);
-}
-
-#else /* CONFIG_SMP */
-
-/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static void double_rq_lock(struct rq *rq1, struct rq *rq2)
- __acquires(rq1->lock)
- __acquires(rq2->lock)
-{
- BUG_ON(!irqs_disabled());
- BUG_ON(rq1 != rq2);
- raw_spin_lock(&rq1->lock);
- __acquire(rq2->lock); /* Fake it out ;) */
-}
-
-/*
- * double_rq_unlock - safely unlock two runqueues
- *
- * Note this does not restore interrupts like task_rq_unlock,
- * you need to do so manually after calling.
- */
-static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
- __releases(rq1->lock)
- __releases(rq2->lock)
-{
- BUG_ON(rq1 != rq2);
- raw_spin_unlock(&rq1->lock);
- __release(rq2->lock);
-}
-
-#endif
-
-static void calc_load_account_idle(struct rq *this_rq);
-static void update_sysctl(void);
-static int get_update_sysctl_factor(void);
-static void update_cpu_load(struct rq *this_rq);
-
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
- /*
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfully executed on another CPU. We must ensure that updates of
- * per-task data have been completed by this moment.
- */
- smp_wmb();
- task_thread_info(p)->cpu = cpu;
-#endif
-}
-
-static const struct sched_class rt_sched_class;
-
-#define sched_class_highest (&stop_sched_class)
-#define for_each_class(class) \
- for (class = sched_class_highest; class; class = class->next)
-
-#include "sched_stats.h"
-
-static void inc_nr_running(struct rq *rq)
-{
- rq->nr_running++;
-}
-
-static void dec_nr_running(struct rq *rq)
-{
- rq->nr_running--;
-}
+void update_cpu_load(struct rq *this_rq);
static void set_load_weight(struct task_struct *p)
{
@@ -1956,7 +726,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
/*
* activate_task - move a task to the runqueue.
*/
-static void activate_task(struct rq *rq, struct task_struct *p, int flags)
+void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
@@ -1967,7 +737,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int flags)
/*
* deactivate_task - remove a task from the runqueue.
*/
-static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
+void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
@@ -2158,14 +928,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static int irqtime_account_hi_update(void)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_hardirq_time);
- if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
+ if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
ret = 1;
local_irq_restore(flags);
return ret;
@@ -2173,14 +943,14 @@ static int irqtime_account_hi_update(void)
static int irqtime_account_si_update(void)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_softirq_time);
- if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
+ if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
ret = 1;
local_irq_restore(flags);
return ret;
@@ -2192,15 +962,6 @@ static int irqtime_account_si_update(void)
#endif
-#include "sched_idletask.c"
-#include "sched_fair.c"
-#include "sched_rt.c"
-#include "sched_autogroup.c"
-#include "sched_stoptask.c"
-#ifdef CONFIG_SCHED_DEBUG
-# include "sched_debug.c"
-#endif
-
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
@@ -2298,7 +1059,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio);
}
-static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
@@ -2324,38 +1085,6 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SMP
-/*
- * Is this task likely cache-hot:
- */
-static int
-task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
-{
- s64 delta;
-
- if (p->sched_class != &fair_sched_class)
- return 0;
-
- if (unlikely(p->policy == SCHED_IDLE))
- return 0;
-
- /*
- * Buddy candidates are cache hot:
- */
- if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
- return 1;
-
- if (sysctl_sched_migration_cost == -1)
- return 1;
- if (sysctl_sched_migration_cost == 0)
- return 0;
-
- delta = now - p->se.exec_start;
-
- return delta < (s64)sysctl_sched_migration_cost;
-}
-
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
@@ -2782,6 +1511,11 @@ static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
}
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
+
+static inline int ttwu_share_cache(int this_cpu, int that_cpu)
+{
+ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
+}
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
@@ -2789,7 +1523,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
struct rq *rq = cpu_rq(cpu);
#if defined(CONFIG_SMP)
- if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
+ if (sched_feat(TTWU_QUEUE) && !ttwu_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
return;
@@ -3203,6 +1937,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
+ trace_sched_stat_sleeptime(current, rq->clock);
fire_sched_in_preempt_notifiers(current);
if (mm)
@@ -3438,7 +2173,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
*/
static atomic_long_t calc_load_tasks_idle;
-static void calc_load_account_idle(struct rq *this_rq)
+void calc_load_account_idle(struct rq *this_rq)
{
long delta;
@@ -3582,7 +2317,7 @@ static void calc_global_nohz(unsigned long ticks)
*/
}
#else
-static void calc_load_account_idle(struct rq *this_rq)
+void calc_load_account_idle(struct rq *this_rq)
{
}
@@ -3725,7 +2460,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
* scheduler tick (TICK_NSEC). With tickless idle this will not be called
* every tick. We fix it up based on jiffies.
*/
-static void update_cpu_load(struct rq *this_rq)
+void update_cpu_load(struct rq *this_rq)
{
unsigned long this_load = this_rq->load.weight;
unsigned long curr_jiffies = jiffies;
@@ -3803,8 +2538,10 @@ unlock:
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
@@ -3857,6 +2594,42 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
+#ifdef CONFIG_CGROUP_CPUACCT
+struct cgroup_subsys cpuacct_subsys;
+struct cpuacct root_cpuacct;
+#endif
+
+static inline void task_group_account_field(struct task_struct *p, int index,
+ u64 tmp)
+{
+#ifdef CONFIG_CGROUP_CPUACCT
+ struct kernel_cpustat *kcpustat;
+ struct cpuacct *ca;
+#endif
+ /*
+ * Since all updates are sure to touch the root cgroup, we
+ * get ourselves ahead and touch it first. If the root cgroup
+ * is the only cgroup, then nothing else should be necessary.
+ *
+ */
+ __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
+
+#ifdef CONFIG_CGROUP_CPUACCT
+ if (unlikely(!cpuacct_subsys.active))
+ return;
+
+ rcu_read_lock();
+ ca = task_ca(p);
+ while (ca && (ca != &root_cpuacct)) {
+ kcpustat = this_cpu_ptr(ca->cpustat);
+ kcpustat->cpustat[index] += tmp;
+ ca = parent_ca(ca);
+ }
+ rcu_read_unlock();
+#endif
+}
+
+
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
@@ -3866,22 +2639,18 @@ unsigned long long task_sched_runtime(struct task_struct *p)
void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
- cputime64_t tmp;
+ int index;
/* Add user time to process. */
- p->utime = cputime_add(p->utime, cputime);
- p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+ p->utime += cputime;
+ p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
+ index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+
/* Add user time to cpustat. */
- tmp = cputime_to_cputime64(cputime);
- if (TASK_NICE(p) > 0)
- cpustat->nice = cputime64_add(cpustat->nice, tmp);
- else
- cpustat->user = cputime64_add(cpustat->user, tmp);
+ task_group_account_field(p, index, (__force u64) cputime);
- cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */
acct_update_integrals(p);
}
@@ -3895,24 +2664,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
- cputime64_t tmp;
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
-
- tmp = cputime_to_cputime64(cputime);
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
- p->utime = cputime_add(p->utime, cputime);
- p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+ p->utime += cputime;
+ p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
- p->gtime = cputime_add(p->gtime, cputime);
+ p->gtime += cputime;
/* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) {
- cpustat->nice = cputime64_add(cpustat->nice, tmp);
- cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
+ cpustat[CPUTIME_NICE] += (__force u64) cputime;
+ cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
} else {
- cpustat->user = cputime64_add(cpustat->user, tmp);
- cpustat->guest = cputime64_add(cpustat->guest, tmp);
+ cpustat[CPUTIME_USER] += (__force u64) cputime;
+ cpustat[CPUTIME_GUEST] += (__force u64) cputime;
}
}
@@ -3925,18 +2691,15 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
*/
static inline
void __account_system_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled, cputime64_t *target_cputime64)
+ cputime_t cputime_scaled, int index)
{
- cputime64_t tmp = cputime_to_cputime64(cputime);
-
/* Add system time to process. */
- p->stime = cputime_add(p->stime, cputime);
- p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+ p->stime += cputime;
+ p->stimescaled += cputime_scaled;
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
- *target_cputime64 = cputime64_add(*target_cputime64, tmp);
- cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+ task_group_account_field(p, index, (__force u64) cputime);
/* Account for system time used */
acct_update_integrals(p);
@@ -3952,8 +2715,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
- cputime64_t *target_cputime64;
+ int index;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
@@ -3961,13 +2723,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
}
if (hardirq_count() - hardirq_offset)
- target_cputime64 = &cpustat->irq;
+ index = CPUTIME_IRQ;
else if (in_serving_softirq())
- target_cputime64 = &cpustat->softirq;
+ index = CPUTIME_SOFTIRQ;
else
- target_cputime64 = &cpustat->system;
+ index = CPUTIME_SYSTEM;
- __account_system_time(p, cputime, cputime_scaled, target_cputime64);
+ __account_system_time(p, cputime, cputime_scaled, index);
}
/*
@@ -3976,10 +2738,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
*/
void account_steal_time(cputime_t cputime)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
- cputime64_t cputime64 = cputime_to_cputime64(cputime);
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
- cpustat->steal = cputime64_add(cpustat->steal, cputime64);
+ cpustat[CPUTIME_STEAL] += (__force u64) cputime;
}
/*
@@ -3988,14 +2749,13 @@ void account_steal_time(cputime_t cputime)
*/
void account_idle_time(cputime_t cputime)
{
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
- cputime64_t cputime64 = cputime_to_cputime64(cputime);
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
- cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
+ cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
else
- cpustat->idle = cputime64_add(cpustat->idle, cputime64);
+ cpustat[CPUTIME_IDLE] += (__force u64) cputime;
}
static __always_inline bool steal_account_process_tick(void)
@@ -4045,16 +2805,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
- cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
- struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
if (steal_account_process_tick())
return;
if (irqtime_account_hi_update()) {
- cpustat->irq = cputime64_add(cpustat->irq, tmp);
+ cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
} else if (irqtime_account_si_update()) {
- cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
+ cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
} else if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
@@ -4062,7 +2821,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* Also, p->stime needs to be updated for ksoftirqd.
*/
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- &cpustat->softirq);
+ CPUTIME_SOFTIRQ);
} else if (user_tick) {
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else if (p == rq->idle) {
@@ -4071,7 +2830,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else {
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- &cpustat->system);
+ CPUTIME_SYSTEM);
}
}
@@ -4170,7 +2929,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
- cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+ cputime_t rtime, utime = p->utime, total = utime + p->stime;
/*
* Use CFS's precise accounting:
@@ -4178,11 +2937,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
- u64 temp = rtime;
+ u64 temp = (__force u64) rtime;
- temp *= utime;
- do_div(temp, total);
- utime = (cputime_t)temp;
+ temp *= (__force u64) utime;
+ do_div(temp, (__force u32) total);
+ utime = (__force cputime_t) temp;
} else
utime = rtime;
@@ -4190,7 +2949,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
* Compare with previous values, to keep monotonicity:
*/
p->prev_utime = max(p->prev_utime, utime);
- p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
+ p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
*ut = p->prev_utime;
*st = p->prev_stime;
@@ -4207,21 +2966,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
thread_group_cputime(p, &cputime);
- total = cputime_add(cputime.utime, cputime.stime);
+ total = cputime.utime + cputime.stime;
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
- u64 temp = rtime;
+ u64 temp = (__force u64) rtime;
- temp *= cputime.utime;
- do_div(temp, total);
- utime = (cputime_t)temp;
+ temp *= (__force u64) cputime.utime;
+ do_div(temp, (__force u32) total);
+ utime = (__force cputime_t) temp;
} else
utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime);
- sig->prev_stime = max(sig->prev_stime,
- cputime_sub(rtime, sig->prev_utime));
+ sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
*ut = sig->prev_utime;
*st = sig->prev_stime;
@@ -4320,6 +3078,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
{
struct pt_regs *regs = get_irq_regs();
+ if (oops_in_progress)
+ return;
+
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
@@ -4810,6 +3571,9 @@ EXPORT_SYMBOL(wait_for_completion);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +3588,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -4841,6 +3607,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +3625,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -4874,6 +3645,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -5838,6 +4612,13 @@ again:
*/
if (preempt && rq != p_rq)
resched_task(p_rq->curr);
+ } else {
+ /*
+ * We might have set it in task_yield_fair(), but are
+ * not going to schedule(), so don't want to skip
+ * the next update.
+ */
+ rq->skip_clock_update = 0;
}
out:
@@ -6005,7 +4786,7 @@ void sched_show_task(struct task_struct *p)
free = stack_not_used(p);
#endif
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
- task_pid_nr(p), task_pid_nr(p->real_parent),
+ task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
(unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
@@ -6099,53 +4880,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
-}
-
-/*
- * Increase the granularity value when there are more CPUs,
- * because with more CPUs the 'effective latency' as visible
- * to users decreases. But the relationship is not linear,
- * so pick a second-best guess by going with the log2 of the
- * number of CPUs.
- *
- * This idea comes from the SD scheduler of Con Kolivas:
- */
-static int get_update_sysctl_factor(void)
-{
- unsigned int cpus = min_t(int, num_online_cpus(), 8);
- unsigned int factor;
-
- switch (sysctl_sched_tunable_scaling) {
- case SCHED_TUNABLESCALING_NONE:
- factor = 1;
- break;
- case SCHED_TUNABLESCALING_LINEAR:
- factor = cpus;
- break;
- case SCHED_TUNABLESCALING_LOG:
- default:
- factor = 1 + ilog2(cpus);
- break;
- }
-
- return factor;
-}
-
-static void update_sysctl(void)
-{
- unsigned int factor = get_update_sysctl_factor();
-
-#define SET_SYSCTL(name) \
- (sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
- SET_SYSCTL(sched_latency);
- SET_SYSCTL(sched_wakeup_granularity);
-#undef SET_SYSCTL
-}
-
-static inline void sched_init_granularity(void)
-{
- update_sysctl();
+#if defined(CONFIG_SMP)
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
}
#ifdef CONFIG_SMP
@@ -6334,30 +5071,6 @@ static void calc_global_load_remove(struct rq *rq)
rq->calc_load_active = 0;
}
-#ifdef CONFIG_CFS_BANDWIDTH
-static void unthrottle_offline_cfs_rqs(struct rq *rq)
-{
- struct cfs_rq *cfs_rq;
-
- for_each_leaf_cfs_rq(rq, cfs_rq) {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
-
- if (!cfs_rq->runtime_enabled)
- continue;
-
- /*
- * clock_task is not advancing so we just need to make sure
- * there's some valid quota amount
- */
- cfs_rq->runtime_remaining = cfs_b->quota;
- if (cfs_rq_throttled(cfs_rq))
- unthrottle_cfs_rq(cfs_rq);
- }
-}
-#else
-static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
-#endif
-
/*
* Migrate all tasks from the rq, sleeping tasks will be migrated by
* try_to_wake_up()->select_task_rq().
@@ -6463,7 +5176,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
- mode_t mode, proc_handler *proc_handler)
+ umode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
@@ -6963,6 +5676,12 @@ out:
return -ENOMEM;
}
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+struct root_domain def_root_domain;
+
static void init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
@@ -7034,6 +5753,31 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
}
/*
+ * Keep a special pointer to the highest sched_domain that has
+ * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
+ * allows us to avoid some pointer chasing select_idle_sibling().
+ *
+ * Also keep a unique ID per domain (we use the first cpu number in
+ * the cpumask of the domain), this allows us to quickly tell if
+ * two cpus are in the same cache domain, see ttwu_share_cache().
+ */
+DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_id);
+
+static void update_top_cache_domain(int cpu)
+{
+ struct sched_domain *sd;
+ int id = cpu;
+
+ sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
+ if (sd)
+ id = cpumask_first(sched_domain_span(sd));
+
+ rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+ per_cpu(sd_llc_id, cpu) = id;
+}
+
+/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
@@ -7072,6 +5816,8 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
+
+ update_top_cache_domain(cpu);
}
/* cpus with isolated domains */
@@ -7231,7 +5977,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
continue;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, cpu_to_node(i));
+ GFP_KERNEL, cpu_to_node(cpu));
if (!sg)
goto fail;
@@ -7369,6 +6115,12 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
return;
update_group_power(sd, cpu);
+ atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
+}
+
+int __weak arch_sd_sibling_asym_packing(void)
+{
+ return 0*SD_ASYM_PACKING;
}
/*
@@ -7923,54 +6675,52 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
}
#ifdef CONFIG_SCHED_MC
-static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
- char *page)
+static ssize_t sched_mc_power_savings_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(page, "%u\n", sched_mc_power_savings);
+ return sprintf(buf, "%u\n", sched_mc_power_savings);
}
-static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t sched_mc_power_savings_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
}
-static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
- sched_mc_power_savings_show,
- sched_mc_power_savings_store);
+static DEVICE_ATTR(sched_mc_power_savings, 0644,
+ sched_mc_power_savings_show,
+ sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
-static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
- char *page)
+static ssize_t sched_smt_power_savings_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(page, "%u\n", sched_smt_power_savings);
+ return sprintf(buf, "%u\n", sched_smt_power_savings);
}
-static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t sched_smt_power_savings_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 1);
}
-static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
+static DEVICE_ATTR(sched_smt_power_savings, 0644,
sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif
-int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+int __init sched_create_sysfs_power_savings_entries(struct device *dev)
{
int err = 0;
#ifdef CONFIG_SCHED_SMT
if (smt_capable())
- err = sysfs_create_file(&cls->kset.kobj,
- &attr_sched_smt_power_savings.attr);
+ err = device_create_file(dev, &dev_attr_sched_smt_power_savings);
#endif
#ifdef CONFIG_SCHED_MC
if (!err && mc_capable())
- err = sysfs_create_file(&cls->kset.kobj,
- &attr_sched_mc_power_savings.attr);
+ err = device_create_file(dev, &dev_attr_sched_mc_power_savings);
#endif
return err;
}
@@ -8006,29 +6756,6 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
}
}
-static int update_runtime(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- int cpu = (int)(long)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- disable_runtime(cpu_rq(cpu));
- return NOTIFY_OK;
-
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- enable_runtime(cpu_rq(cpu));
- return NOTIFY_OK;
-
- default:
- return NOTIFY_DONE;
- }
-}
-
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
@@ -8077,104 +6804,11 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end);
}
-static void init_cfs_rq(struct cfs_rq *cfs_rq)
-{
- cfs_rq->tasks_timeline = RB_ROOT;
- INIT_LIST_HEAD(&cfs_rq->tasks);
- cfs_rq->min_vruntime = (u64)(-(1LL << 20));
-#ifndef CONFIG_64BIT
- cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
-#endif
-}
-
-static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
-{
- struct rt_prio_array *array;
- int i;
-
- array = &rt_rq->active;
- for (i = 0; i < MAX_RT_PRIO; i++) {
- INIT_LIST_HEAD(array->queue + i);
- __clear_bit(i, array->bitmap);
- }
- /* delimiter for bitsearch: */
- __set_bit(MAX_RT_PRIO, array->bitmap);
-
-#if defined CONFIG_SMP
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- rt_rq->highest_prio.next = MAX_RT_PRIO;
- rt_rq->rt_nr_migratory = 0;
- rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks);
-#endif
-
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- rt_rq->rt_runtime = 0;
- raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-}
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
- struct sched_entity *se, int cpu,
- struct sched_entity *parent)
-{
- struct rq *rq = cpu_rq(cpu);
-
- cfs_rq->tg = tg;
- cfs_rq->rq = rq;
-#ifdef CONFIG_SMP
- /* allow initial update_cfs_load() to truncate */
- cfs_rq->load_stamp = 1;
-#endif
- init_cfs_rq_runtime(cfs_rq);
-
- tg->cfs_rq[cpu] = cfs_rq;
- tg->se[cpu] = se;
-
- /* se could be NULL for root_task_group */
- if (!se)
- return;
-
- if (!parent)
- se->cfs_rq = &rq->cfs;
- else
- se->cfs_rq = parent->my_q;
-
- se->my_q = cfs_rq;
- update_load_set(&se->load, 0);
- se->parent = parent;
-}
+#ifdef CONFIG_CGROUP_SCHED
+struct task_group root_task_group;
#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
- struct sched_rt_entity *rt_se, int cpu,
- struct sched_rt_entity *parent)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- rt_rq->rt_nr_boosted = 0;
- rt_rq->rq = rq;
- rt_rq->tg = tg;
-
- tg->rt_rq[cpu] = rt_rq;
- tg->rt_se[cpu] = rt_se;
-
- if (!rt_se)
- return;
-
- if (!parent)
- rt_se->rt_rq = &rq->rt;
- else
- rt_se->rt_rq = parent->my_q;
-
- rt_se->my_q = rt_rq;
- rt_se->parent = parent;
- INIT_LIST_HEAD(&rt_se->run_list);
-}
-#endif
+DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
void __init sched_init(void)
{
@@ -8232,9 +6866,17 @@ void __init sched_init(void)
#ifdef CONFIG_CGROUP_SCHED
list_add(&root_task_group.list, &task_groups);
INIT_LIST_HEAD(&root_task_group.children);
+ INIT_LIST_HEAD(&root_task_group.siblings);
autogroup_init(&init_task);
+
#endif /* CONFIG_CGROUP_SCHED */
+#ifdef CONFIG_CGROUP_CPUACCT
+ root_cpuacct.cpustat = &kernel_cpustat;
+ root_cpuacct.cpuusage = alloc_percpu(u64);
+ /* Too early, not expected to fail */
+ BUG_ON(!root_cpuacct.cpuusage);
+#endif
for_each_possible_cpu(i) {
struct rq *rq;
@@ -8246,7 +6888,7 @@ void __init sched_init(void)
init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.shares = root_task_group_load;
+ root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
/*
* How much cpu bandwidth does root_task_group get?
@@ -8296,7 +6938,7 @@ void __init sched_init(void)
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ
- rq->nohz_balance_kick = 0;
+ rq->nohz_flags = 0;
#endif
#endif
init_rq_hrtick(rq);
@@ -8309,10 +6951,6 @@ void __init sched_init(void)
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
-#ifdef CONFIG_SMP
- open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
-#endif
-
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&init_task.pi_waiters);
#endif
@@ -8340,17 +6978,11 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
-#ifdef CONFIG_NO_HZ
- zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
- alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
- atomic_set(&nohz.load_balancer, nr_cpu_ids);
- atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
- atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
-#endif
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
-#endif /* SMP */
+#endif
+ init_sched_fair_class();
scheduler_running = 1;
}
@@ -8502,169 +7134,14 @@ void set_curr_task(int cpu, struct task_struct *p)
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static void free_fair_sched_group(struct task_group *tg)
-{
- int i;
-
- destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
-
- for_each_possible_cpu(i) {
- if (tg->cfs_rq)
- kfree(tg->cfs_rq[i]);
- if (tg->se)
- kfree(tg->se[i]);
- }
-
- kfree(tg->cfs_rq);
- kfree(tg->se);
-}
-
-static
-int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
-{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se;
- int i;
-
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->cfs_rq)
- goto err;
- tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->se)
- goto err;
-
- tg->shares = NICE_0_LOAD;
-
- init_cfs_bandwidth(tg_cfs_bandwidth(tg));
-
- for_each_possible_cpu(i) {
- cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
- GFP_KERNEL, cpu_to_node(i));
- if (!cfs_rq)
- goto err;
-
- se = kzalloc_node(sizeof(struct sched_entity),
- GFP_KERNEL, cpu_to_node(i));
- if (!se)
- goto err_free_rq;
-
- init_cfs_rq(cfs_rq);
- init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
- }
-
- return 1;
-
-err_free_rq:
- kfree(cfs_rq);
-err:
- return 0;
-}
-
-static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
-
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-#else /* !CONFIG_FAIR_GROUP_SCHED */
-static inline void free_fair_sched_group(struct task_group *tg)
-{
-}
-
-static inline
-int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
-{
- return 1;
-}
-
-static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
-{
-}
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
#ifdef CONFIG_RT_GROUP_SCHED
-static void free_rt_sched_group(struct task_group *tg)
-{
- int i;
-
- if (tg->rt_se)
- destroy_rt_bandwidth(&tg->rt_bandwidth);
-
- for_each_possible_cpu(i) {
- if (tg->rt_rq)
- kfree(tg->rt_rq[i]);
- if (tg->rt_se)
- kfree(tg->rt_se[i]);
- }
-
- kfree(tg->rt_rq);
- kfree(tg->rt_se);
-}
-
-static
-int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
-{
- struct rt_rq *rt_rq;
- struct sched_rt_entity *rt_se;
- int i;
-
- tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->rt_rq)
- goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->rt_se)
- goto err;
-
- init_rt_bandwidth(&tg->rt_bandwidth,
- ktime_to_ns(def_rt_bandwidth.rt_period), 0);
-
- for_each_possible_cpu(i) {
- rt_rq = kzalloc_node(sizeof(struct rt_rq),
- GFP_KERNEL, cpu_to_node(i));
- if (!rt_rq)
- goto err;
-
- rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
- GFP_KERNEL, cpu_to_node(i));
- if (!rt_se)
- goto err_free_rq;
-
- init_rt_rq(rt_rq, cpu_rq(i));
- rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
- init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
- }
-
- return 1;
-
-err_free_rq:
- kfree(rt_rq);
-err:
- return 0;
-}
#else /* !CONFIG_RT_GROUP_SCHED */
-static inline void free_rt_sched_group(struct task_group *tg)
-{
-}
-
-static inline
-int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
-{
- return 1;
-}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
+/* task_group_lock serializes the addition/removal of task groups */
+static DEFINE_SPINLOCK(task_group_lock);
+
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
@@ -8770,47 +7247,6 @@ void sched_move_task(struct task_struct *tsk)
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
-static DEFINE_MUTEX(shares_mutex);
-
-int sched_group_set_shares(struct task_group *tg, unsigned long shares)
-{
- int i;
- unsigned long flags;
-
- /*
- * We can't change the weight of the root cgroup.
- */
- if (!tg->se[0])
- return -EINVAL;
-
- shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
-
- mutex_lock(&shares_mutex);
- if (tg->shares == shares)
- goto done;
-
- tg->shares = shares;
- for_each_possible_cpu(i) {
- struct rq *rq = cpu_rq(i);
- struct sched_entity *se;
-
- se = tg->se[i];
- /* Propagate contribution to hierarchy */
- raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se)
- update_cfs_shares(group_cfs_rq(se));
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
-
-done:
- mutex_unlock(&shares_mutex);
- return 0;
-}
-
-unsigned long sched_group_shares(struct task_group *tg)
-{
- return tg->shares;
-}
#endif
#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
@@ -8835,7 +7271,7 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
struct task_struct *g, *p;
do_each_thread(g, p) {
- if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+ if (rt_task(p) && task_rq(p)->rt.tg == tg)
return 1;
} while_each_thread(g, p);
@@ -9127,24 +7563,31 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
sched_destroy_group(tg);
}
-static int
-cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
- return -EINVAL;
+ if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+ return -EINVAL;
#else
- /* We don't support RT-tasks being in separate groups */
- if (tsk->sched_class != &fair_sched_class)
- return -EINVAL;
+ /* We don't support RT-tasks being in separate groups */
+ if (task->sched_class != &fair_sched_class)
+ return -EINVAL;
#endif
+ }
return 0;
}
-static void
-cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
- sched_move_task(tsk);
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset)
+ sched_move_task(task);
}
static void
@@ -9186,8 +7629,8 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
{
- int i, ret = 0, runtime_enabled;
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ int i, ret = 0, runtime_enabled, runtime_was_enabled;
+ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
if (tg == &root_task_group)
return -EINVAL;
@@ -9214,6 +7657,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
goto out_unlock;
runtime_enabled = quota != RUNTIME_INF;
+ runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
+ account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
raw_spin_lock_irq(&cfs_b->lock);
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;
@@ -9229,13 +7674,13 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
for_each_possible_cpu(i) {
struct cfs_rq *cfs_rq = tg->cfs_rq[i];
- struct rq *rq = rq_of(cfs_rq);
+ struct rq *rq = cfs_rq->rq;
raw_spin_lock_irq(&rq->lock);
cfs_rq->runtime_enabled = runtime_enabled;
cfs_rq->runtime_remaining = 0;
- if (cfs_rq_throttled(cfs_rq))
+ if (cfs_rq->throttled)
unthrottle_cfs_rq(cfs_rq);
raw_spin_unlock_irq(&rq->lock);
}
@@ -9249,7 +7694,7 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
{
u64 quota, period;
- period = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
+ period = ktime_to_ns(tg->cfs_bandwidth.period);
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
else
@@ -9262,10 +7707,10 @@ long tg_get_cfs_quota(struct task_group *tg)
{
u64 quota_us;
- if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF)
+ if (tg->cfs_bandwidth.quota == RUNTIME_INF)
return -1;
- quota_us = tg_cfs_bandwidth(tg)->quota;
+ quota_us = tg->cfs_bandwidth.quota;
do_div(quota_us, NSEC_PER_USEC);
return quota_us;
@@ -9276,10 +7721,7 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
u64 quota, period;
period = (u64)cfs_period_us * NSEC_PER_USEC;
- quota = tg_cfs_bandwidth(tg)->quota;
-
- if (period <= 0)
- return -EINVAL;
+ quota = tg->cfs_bandwidth.quota;
return tg_set_cfs_bandwidth(tg, period, quota);
}
@@ -9288,7 +7730,7 @@ long tg_get_cfs_period(struct task_group *tg)
{
u64 cfs_period_us;
- cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period);
+ cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
do_div(cfs_period_us, NSEC_PER_USEC);
return cfs_period_us;
@@ -9348,13 +7790,13 @@ static u64 normalize_cfs_quota(struct task_group *tg,
static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
{
struct cfs_schedulable_data *d = data;
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
s64 quota = 0, parent_quota = -1;
if (!tg->parent) {
quota = RUNTIME_INF;
} else {
- struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent);
+ struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
quota = normalize_cfs_quota(tg, d);
parent_quota = parent_b->hierarchal_quota;
@@ -9398,7 +7840,7 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct task_group *tg = cgroup_tg(cgrp);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
@@ -9480,8 +7922,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
- .can_attach_task = cpu_cgroup_can_attach_task,
- .attach_task = cpu_cgroup_attach_task,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
.exit = cpu_cgroup_exit,
.populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id,
@@ -9499,38 +7941,16 @@ struct cgroup_subsys cpu_cgroup_subsys = {
* (balbir@in.ibm.com).
*/
-/* track cpu usage of a group of tasks and its child groups */
-struct cpuacct {
- struct cgroup_subsys_state css;
- /* cpuusage holds pointer to a u64-type object on every cpu */
- u64 __percpu *cpuusage;
- struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
- struct cpuacct *parent;
-};
-
-struct cgroup_subsys cpuacct_subsys;
-
-/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
-{
- return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
-/* return cpu accounting group to which this task belongs */
-static inline struct cpuacct *task_ca(struct task_struct *tsk)
-{
- return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(
struct cgroup_subsys *ss, struct cgroup *cgrp)
{
- struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- int i;
+ struct cpuacct *ca;
+
+ if (!cgrp->parent)
+ return &root_cpuacct.css;
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto out;
@@ -9538,18 +7958,13 @@ static struct cgroup_subsys_state *cpuacct_create(
if (!ca->cpuusage)
goto out_free_ca;
- for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
- if (percpu_counter_init(&ca->cpustat[i], 0))
- goto out_free_counters;
-
- if (cgrp->parent)
- ca->parent = cgroup_ca(cgrp->parent);
+ ca->cpustat = alloc_percpu(struct kernel_cpustat);
+ if (!ca->cpustat)
+ goto out_free_cpuusage;
return &ca->css;
-out_free_counters:
- while (--i >= 0)
- percpu_counter_destroy(&ca->cpustat[i]);
+out_free_cpuusage:
free_percpu(ca->cpuusage);
out_free_ca:
kfree(ca);
@@ -9562,10 +7977,8 @@ static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
- int i;
- for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
- percpu_counter_destroy(&ca->cpustat[i]);
+ free_percpu(ca->cpustat);
free_percpu(ca->cpuusage);
kfree(ca);
}
@@ -9658,16 +8071,31 @@ static const char *cpuacct_stat_desc[] = {
};
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+ struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
- int i;
+ int cpu;
+ s64 val = 0;
- for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
- s64 val = percpu_counter_read(&ca->cpustat[i]);
- val = cputime64_to_clock_t(val);
- cb->fill(cb, cpuacct_stat_desc[i], val);
+ for_each_online_cpu(cpu) {
+ struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
+ val += kcpustat->cpustat[CPUTIME_USER];
+ val += kcpustat->cpustat[CPUTIME_NICE];
}
+ val = cputime64_to_clock_t(val);
+ cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
+
+ val = 0;
+ for_each_online_cpu(cpu) {
+ struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
+ val += kcpustat->cpustat[CPUTIME_SYSTEM];
+ val += kcpustat->cpustat[CPUTIME_IRQ];
+ val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
+ }
+
+ val = cputime64_to_clock_t(val);
+ cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
+
return 0;
}
@@ -9697,7 +8125,7 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
*
* called with rq->lock held.
*/
-static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
+void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
struct cpuacct *ca;
int cpu;
@@ -9711,7 +8139,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
ca = task_ca(tsk);
- for (; ca; ca = ca->parent) {
+ for (; ca; ca = parent_ca(ca)) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
}
@@ -9719,45 +8147,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
rcu_read_unlock();
}
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
- * in cputime_t units. As a result, cpuacct_update_stats calls
- * percpu_counter_add with values large enough to always overflow the
- * per cpu batch limit causing bad SMP scalability.
- *
- * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
- * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
- * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
- */
-#ifdef CONFIG_SMP
-#define CPUACCT_BATCH \
- min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
-#else
-#define CPUACCT_BATCH 0
-#endif
-
-/*
- * Charge the system/user time to the task's accounting group.
- */
-static void cpuacct_update_stats(struct task_struct *tsk,
- enum cpuacct_stat_index idx, cputime_t val)
-{
- struct cpuacct *ca;
- int batch = CPUACCT_BATCH;
-
- if (unlikely(!cpuacct_subsys.active))
- return;
-
- rcu_read_lock();
- ca = task_ca(tsk);
-
- do {
- __percpu_counter_add(&ca->cpustat[idx], val, batch);
- ca = ca->parent;
- } while (ca);
- rcu_read_unlock();
-}
-
struct cgroup_subsys cpuacct_subsys = {
.name = "cpuacct",
.create = cpuacct_create,
diff --git a/kernel/sched_cpupri.c b/kernel/sched/cpupri.c
index a86cf9d9eb1..b0d798eaf13 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -1,5 +1,5 @@
/*
- * kernel/sched_cpupri.c
+ * kernel/sched/cpupri.c
*
* CPU priority management
*
@@ -28,7 +28,7 @@
*/
#include <linux/gfp.h>
-#include "sched_cpupri.h"
+#include "cpupri.h"
/* Convert between a 140 based task->prio, and our 102 based cpupri */
static int convert_prio(int prio)
diff --git a/kernel/sched_cpupri.h b/kernel/sched/cpupri.h
index f6d75617349..f6d75617349 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched/cpupri.h
diff --git a/kernel/sched_debug.c b/kernel/sched/debug.c
index a6710a112b4..2a075e10004 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched/debug.c
@@ -1,5 +1,5 @@
/*
- * kernel/time/sched_debug.c
+ * kernel/sched/debug.c
*
* Print the CFS rbtree
*
@@ -16,6 +16,8 @@
#include <linux/kallsyms.h>
#include <linux/utsname.h>
+#include "sched.h"
+
static DEFINE_SPINLOCK(sched_debug_lock);
/*
@@ -373,7 +375,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
return 0;
}
-static void sysrq_sched_debug_show(void)
+void sysrq_sched_debug_show(void)
{
sched_debug_show(NULL, NULL);
}
diff --git a/kernel/sched_fair.c b/kernel/sched/fair.c
index 5c9e67923b7..8e42de9105f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,13 @@
#include <linux/latencytop.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/profile.h>
+#include <linux/interrupt.h>
+
+#include <trace/events/sched.h>
+
+#include "sched.h"
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -103,7 +110,110 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
-static const struct sched_class fair_sched_class;
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static int get_update_sysctl_factor(void)
+{
+ unsigned int cpus = min_t(int, num_online_cpus(), 8);
+ unsigned int factor;
+
+ switch (sysctl_sched_tunable_scaling) {
+ case SCHED_TUNABLESCALING_NONE:
+ factor = 1;
+ break;
+ case SCHED_TUNABLESCALING_LINEAR:
+ factor = cpus;
+ break;
+ case SCHED_TUNABLESCALING_LOG:
+ default:
+ factor = 1 + ilog2(cpus);
+ break;
+ }
+
+ return factor;
+}
+
+static void update_sysctl(void)
+{
+ unsigned int factor = get_update_sysctl_factor();
+
+#define SET_SYSCTL(name) \
+ (sysctl_##name = (factor) * normalized_sysctl_##name)
+ SET_SYSCTL(sched_min_granularity);
+ SET_SYSCTL(sched_latency);
+ SET_SYSCTL(sched_wakeup_granularity);
+#undef SET_SYSCTL
+}
+
+void sched_init_granularity(void)
+{
+ update_sysctl();
+}
+
+#if BITS_PER_LONG == 32
+# define WMULT_CONST (~0UL)
+#else
+# define WMULT_CONST (1UL << 32)
+#endif
+
+#define WMULT_SHIFT 32
+
+/*
+ * Shift right and round:
+ */
+#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+
+/*
+ * delta *= weight / lw
+ */
+static unsigned long
+calc_delta_mine(unsigned long delta_exec, unsigned long weight,
+ struct load_weight *lw)
+{
+ u64 tmp;
+
+ /*
+ * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
+ * entities since MIN_SHARES = 2. Treat weight as 1 if less than
+ * 2^SCHED_LOAD_RESOLUTION.
+ */
+ if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
+ tmp = (u64)delta_exec * scale_load_down(weight);
+ else
+ tmp = (u64)delta_exec;
+
+ if (!lw->inv_weight) {
+ unsigned long w = scale_load_down(lw->weight);
+
+ if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
+ lw->inv_weight = 1;
+ else if (unlikely(!w))
+ lw->inv_weight = WMULT_CONST;
+ else
+ lw->inv_weight = WMULT_CONST / w;
+ }
+
+ /*
+ * Check whether we'd overflow the 64-bit multiplication:
+ */
+ if (unlikely(tmp > WMULT_CONST))
+ tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
+ WMULT_SHIFT/2);
+ else
+ tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
+
+ return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
+}
+
+
+const struct sched_class fair_sched_class;
/**************************************************************
* CFS operations on generic schedulable entities:
@@ -413,7 +523,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
-static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
+struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = cfs_rq->rb_leftmost;
@@ -434,7 +544,7 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
}
#ifdef CONFIG_SCHED_DEBUG
-static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
@@ -684,7 +794,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
- inc_cpu_load(rq_of(cfs_rq), se->load.weight);
+ update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
if (entity_is_task(se)) {
add_cfs_task_weight(cfs_rq, se->load.weight);
list_add(&se->group_node, &cfs_rq->tasks);
@@ -697,7 +807,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
- dec_cpu_load(rq_of(cfs_rq), se->load.weight);
+ update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
if (entity_is_task(se)) {
add_cfs_task_weight(cfs_rq, -se->load.weight);
list_del_init(&se->group_node);
@@ -772,19 +882,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+ long tg_weight;
+
+ /*
+ * Use this CPU's actual weight instead of the last load_contribution
+ * to gain a more accurate current total weight. See
+ * update_cfs_rq_load_contribution().
+ */
+ tg_weight = atomic_read(&tg->load_weight);
+ tg_weight -= cfs_rq->load_contribution;
+ tg_weight += cfs_rq->load.weight;
+
+ return tg_weight;
+}
+
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long load_weight, load, shares;
+ long tg_weight, load, shares;
+ tg_weight = calc_tg_weight(tg, cfs_rq);
load = cfs_rq->load.weight;
- load_weight = atomic_read(&tg->load_weight);
- load_weight += load;
- load_weight -= cfs_rq->load_contribution;
-
shares = (tg->shares * load);
- if (load_weight)
- shares /= load_weight;
+ if (tg_weight)
+ shares /= tg_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
@@ -880,7 +1003,6 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (unlikely(delta > se->statistics.sleep_max))
se->statistics.sleep_max = delta;
- se->statistics.sleep_start = 0;
se->statistics.sum_sleep_runtime += delta;
if (tsk) {
@@ -897,7 +1019,6 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (unlikely(delta > se->statistics.block_max))
se->statistics.block_max = delta;
- se->statistics.block_start = 0;
se->statistics.sum_sleep_runtime += delta;
if (tsk) {
@@ -907,6 +1028,8 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
trace_sched_stat_iowait(tsk, delta);
}
+ trace_sched_stat_blocked(tsk, delta);
+
/*
* Blocking time is in units of nanosecs, so shift by
* 20 to get a milliseconds-range estimation of the
@@ -1274,6 +1397,32 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
*/
#ifdef CONFIG_CFS_BANDWIDTH
+
+#ifdef HAVE_JUMP_LABEL
+static struct jump_label_key __cfs_bandwidth_used;
+
+static inline bool cfs_bandwidth_used(void)
+{
+ return static_branch(&__cfs_bandwidth_used);
+}
+
+void account_cfs_bandwidth_used(int enabled, int was_enabled)
+{
+ /* only need to count groups transitioning between enabled/!enabled */
+ if (enabled && !was_enabled)
+ jump_label_inc(&__cfs_bandwidth_used);
+ else if (!enabled && was_enabled)
+ jump_label_dec(&__cfs_bandwidth_used);
+}
+#else /* HAVE_JUMP_LABEL */
+static bool cfs_bandwidth_used(void)
+{
+ return true;
+}
+
+void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
+#endif /* HAVE_JUMP_LABEL */
+
/*
* default period for cfs group bandwidth.
* default: 0.1s, units: nanoseconds
@@ -1295,7 +1444,7 @@ static inline u64 sched_cfs_bandwidth_slice(void)
*
* requires cfs_b->lock
*/
-static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
+void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
u64 now;
@@ -1307,6 +1456,11 @@ static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
}
+static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+{
+ return &tg->cfs_bandwidth;
+}
+
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
@@ -1408,7 +1562,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
unsigned long delta_exec)
{
- if (!cfs_rq->runtime_enabled)
+ if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
__account_cfs_rq_runtime(cfs_rq, delta_exec);
@@ -1416,13 +1570,13 @@ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
- return cfs_rq->throttled;
+ return cfs_bandwidth_used() && cfs_rq->throttled;
}
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
- return cfs_rq->throttle_count;
+ return cfs_bandwidth_used() && cfs_rq->throttle_count;
}
/*
@@ -1517,7 +1671,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
raw_spin_unlock(&cfs_b->lock);
}
-static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
@@ -1743,7 +1897,10 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
- if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+ if (!cfs_bandwidth_used())
+ return;
+
+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
@@ -1788,6 +1945,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
+ if (!cfs_bandwidth_used())
+ return;
+
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
@@ -1805,6 +1965,9 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
/* conditionally throttle active cfs_rq's from put_prev_entity() */
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
+ if (!cfs_bandwidth_used())
+ return;
+
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
return;
@@ -1817,7 +1980,112 @@ static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
throttle_cfs_rq(cfs_rq);
}
-#else
+
+static inline u64 default_cfs_period(void);
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
+static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
+
+static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
+{
+ struct cfs_bandwidth *cfs_b =
+ container_of(timer, struct cfs_bandwidth, slack_timer);
+ do_sched_cfs_slack_timer(cfs_b);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+{
+ struct cfs_bandwidth *cfs_b =
+ container_of(timer, struct cfs_bandwidth, period_timer);
+ ktime_t now;
+ int overrun;
+ int idle = 0;
+
+ for (;;) {
+ now = hrtimer_cb_get_time(timer);
+ overrun = hrtimer_forward(timer, now, cfs_b->period);
+
+ if (!overrun)
+ break;
+
+ idle = do_sched_cfs_period_timer(cfs_b, overrun);
+ }
+
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+}
+
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+ raw_spin_lock_init(&cfs_b->lock);
+ cfs_b->runtime = 0;
+ cfs_b->quota = RUNTIME_INF;
+ cfs_b->period = ns_to_ktime(default_cfs_period());
+
+ INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
+ hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cfs_b->period_timer.function = sched_cfs_period_timer;
+ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cfs_b->slack_timer.function = sched_cfs_slack_timer;
+}
+
+static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->runtime_enabled = 0;
+ INIT_LIST_HEAD(&cfs_rq->throttled_list);
+}
+
+/* requires cfs_b->lock, may release to reprogram timer */
+void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+ /*
+ * The timer may be active because we're trying to set a new bandwidth
+ * period or because we're racing with the tear-down path
+ * (timer_active==0 becomes visible before the hrtimer call-back
+ * terminates). In either case we ensure that it's re-programmed
+ */
+ while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
+ raw_spin_unlock(&cfs_b->lock);
+ /* ensure cfs_b->lock is available while we wait */
+ hrtimer_cancel(&cfs_b->period_timer);
+
+ raw_spin_lock(&cfs_b->lock);
+ /* if someone else restarted the timer then we're done */
+ if (cfs_b->timer_active)
+ return;
+ }
+
+ cfs_b->timer_active = 1;
+ start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
+}
+
+static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+{
+ hrtimer_cancel(&cfs_b->period_timer);
+ hrtimer_cancel(&cfs_b->slack_timer);
+}
+
+void unthrottle_offline_cfs_rqs(struct rq *rq)
+{
+ struct cfs_rq *cfs_rq;
+
+ for_each_leaf_cfs_rq(rq, cfs_rq) {
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+
+ if (!cfs_rq->runtime_enabled)
+ continue;
+
+ /*
+ * clock_task is not advancing so we just need to make sure
+ * there's some valid quota amount
+ */
+ cfs_rq->runtime_remaining = cfs_b->quota;
+ if (cfs_rq_throttled(cfs_rq))
+ unthrottle_cfs_rq(cfs_rq);
+ }
+}
+
+#else /* CONFIG_CFS_BANDWIDTH */
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
unsigned long delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -1839,8 +2107,22 @@ static inline int throttled_lb_pair(struct task_group *tg,
{
return 0;
}
+
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
#endif
+static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+{
+ return NULL;
+}
+static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
/**************************************************
* CFS operations on tasks:
*/
@@ -1853,7 +2135,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
WARN_ON(task_rq(p) != rq);
- if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
+ if (cfs_rq->nr_running > 1) {
u64 slice = sched_slice(cfs_rq, se);
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
s64 delta = slice - ran;
@@ -1884,7 +2166,7 @@ static void hrtick_update(struct rq *rq)
{
struct task_struct *curr = rq->curr;
- if (curr->sched_class != &fair_sched_class)
+ if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
return;
if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
@@ -2007,6 +2289,61 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SMP
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+ return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0 || !sched_feat(LB_BIAS))
+ return total;
+
+ return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long total = weighted_cpuload(cpu);
+
+ if (type == 0 || !sched_feat(LB_BIAS))
+ return total;
+
+ return max(rq->cpu_load[type-1], total);
+}
+
+static unsigned long power_of(int cpu)
+{
+ return cpu_rq(cpu)->cpu_power;
+}
+
+static unsigned long cpu_avg_load_per_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
+
+ if (nr_running)
+ return rq->load.weight / nr_running;
+
+ return 0;
+}
+
static void task_waking_fair(struct task_struct *p)
{
@@ -2036,36 +2373,100 @@ static void task_waking_fair(struct task_struct *p)
* Adding load to a group doesn't make a group heavier, but can cause movement
* of group shares between cpus. Assuming the shares were perfectly aligned one
* can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ * s_i = rw_i / \Sum rw_j (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ * rw_i = { 2, 4, 1, 0 }
+ * s_i = { 2/7, 4/7, 1/7, 0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ * rw'_i = { 3, 4, 1, 0 }
+ * s'_i = { 3/8, 4/8, 1/8, 0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ * dw_i = S * (s'_i - s_i) (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent)
+ if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
- long lw, w;
+ long w, W;
tg = se->my_q->tg;
- w = se->my_q->load.weight;
- /* use this cpu's instantaneous contribution */
- lw = atomic_read(&tg->load_weight);
- lw -= se->my_q->load_contribution;
- lw += w + wg;
+ /*
+ * W = @wg + \Sum rw_j
+ */
+ W = wg + calc_tg_weight(tg, se->my_q);
- wl += w;
+ /*
+ * w = rw_i + @wl
+ */
+ w = se->my_q->load.weight + wl;
- if (lw > 0 && wl < lw)
- wl = (wl * tg->shares) / lw;
+ /*
+ * wl = S * s'_i; see (2)
+ */
+ if (W > 0 && w < W)
+ wl = (w * tg->shares) / W;
else
wl = tg->shares;
- /* zero point is MIN_SHARES */
+ /*
+ * Per the above, wl is the new se->load.weight value; since
+ * those are clipped to [MIN_SHARES, ...) do so now. See
+ * calc_cfs_shares().
+ */
if (wl < MIN_SHARES)
wl = MIN_SHARES;
+
+ /*
+ * wl = dw_i = S * (s'_i - s_i); see (3)
+ */
wl -= se->load.weight;
+
+ /*
+ * Recursively apply this logic to all parent groups to compute
+ * the final effective load change on the root group. Since
+ * only the @tg group gets extra weight, all parent groups can
+ * only redistribute existing shares. @wl is the shift in shares
+ * resulting from this level per the above.
+ */
wg = 0;
}
@@ -2249,6 +2650,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
+ struct sched_group *sg;
int i;
/*
@@ -2269,25 +2671,28 @@ static int select_idle_sibling(struct task_struct *p, int target)
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
rcu_read_lock();
- for_each_domain(target, sd) {
- if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
- break;
- for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- target = i;
- break;
+ sd = rcu_dereference(per_cpu(sd_llc, target));
+ for_each_lower_domain(sd) {
+ sg = sd->groups;
+ do {
+ if (!cpumask_intersects(sched_group_cpus(sg),
+ tsk_cpus_allowed(p)))
+ goto next;
+
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (!idle_cpu(i))
+ goto next;
}
- }
- /*
- * Lets stop looking for an idle sibling when we reached
- * the domain that spans the current cpu and prev_cpu.
- */
- if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- break;
+ target = cpumask_first_and(sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ goto done;
+next:
+ sg = sg->next;
+ } while (sg != sd->groups);
}
+done:
rcu_read_unlock();
return target;
@@ -2315,6 +2720,9 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
int want_sd = 1;
int sync = wake_flags & WF_SYNC;
+ if (p->rt.nr_cpus_allowed == 1)
+ return prev_cpu;
+
if (sd_flag & SD_BALANCE_WAKE) {
if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
want_affine = 1;
@@ -2599,7 +3007,8 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
} while (cfs_rq);
p = task_of(se);
- hrtick_start_fair(rq, p);
+ if (hrtick_enabled(rq))
+ hrtick_start_fair(rq, p);
return p;
}
@@ -2643,6 +3052,12 @@ static void yield_task_fair(struct rq *rq)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ /*
+ * Tell update_rq_clock() that we've just updated,
+ * so we don't do microscopic update in schedule()
+ * and double the fastpath cost.
+ */
+ rq->skip_clock_update = 1;
}
set_skip_buddy(se);
@@ -2683,12 +3098,48 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
}
/*
+ * Is this task likely cache-hot:
+ */
+static int
+task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+{
+ s64 delta;
+
+ if (p->sched_class != &fair_sched_class)
+ return 0;
+
+ if (unlikely(p->policy == SCHED_IDLE))
+ return 0;
+
+ /*
+ * Buddy candidates are cache hot:
+ */
+ if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+ (&p->se == cfs_rq_of(&p->se)->next ||
+ &p->se == cfs_rq_of(&p->se)->last))
+ return 1;
+
+ if (sysctl_sched_migration_cost == -1)
+ return 1;
+ if (sysctl_sched_migration_cost == 0)
+ return 0;
+
+ delta = now - p->se.exec_start;
+
+ return delta < (s64)sysctl_sched_migration_cost;
+}
+
+#define LBF_ALL_PINNED 0x01
+#define LBF_NEED_BREAK 0x02
+#define LBF_ABORT 0x04
+
+/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static
int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned)
+ int *lb_flags)
{
int tsk_cache_hot = 0;
/*
@@ -2701,7 +3152,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
return 0;
}
- *all_pinned = 0;
+ *lb_flags &= ~LBF_ALL_PINNED;
if (task_running(rq, p)) {
schedstat_inc(p, se.statistics.nr_failed_migrations_running);
@@ -2775,7 +3226,7 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
- enum cpu_idle_type idle, int *all_pinned,
+ enum cpu_idle_type idle, int *lb_flags,
struct cfs_rq *busiest_cfs_rq)
{
int loops = 0, pulled = 0;
@@ -2786,12 +3237,14 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
goto out;
list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
- if (loops++ > sysctl_sched_nr_migrate)
+ if (loops++ > sysctl_sched_nr_migrate) {
+ *lb_flags |= LBF_NEED_BREAK;
break;
+ }
if ((p->se.load.weight >> 1) > rem_load_move ||
!can_migrate_task(p, busiest, this_cpu, sd, idle,
- all_pinned))
+ lb_flags))
continue;
pull_task(busiest, p, this_rq, this_cpu);
@@ -2804,8 +3257,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
* kernels will stop after the first task is pulled to minimize
* the critical section.
*/
- if (idle == CPU_NEWLY_IDLE)
+ if (idle == CPU_NEWLY_IDLE) {
+ *lb_flags |= LBF_ABORT;
break;
+ }
#endif
/*
@@ -2910,7 +3365,7 @@ static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned)
+ int *lb_flags)
{
long rem_load_move = max_load_move;
struct cfs_rq *busiest_cfs_rq;
@@ -2923,6 +3378,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long busiest_weight = busiest_cfs_rq->load.weight;
u64 rem_load, moved_load;
+ if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
+ break;
+
/*
* empty group or part of a throttled hierarchy
*/
@@ -2934,7 +3392,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rem_load = div_u64(rem_load, busiest_h_load + 1);
moved_load = balance_tasks(this_rq, this_cpu, busiest,
- rem_load, sd, idle, all_pinned,
+ rem_load, sd, idle, lb_flags,
busiest_cfs_rq);
if (!moved_load)
@@ -2960,10 +3418,10 @@ static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned)
+ int *lb_flags)
{
return balance_tasks(this_rq, this_cpu, busiest,
- max_load_move, sd, idle, all_pinned,
+ max_load_move, sd, idle, lb_flags,
&busiest->cfs);
}
#endif
@@ -2978,29 +3436,30 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned)
+ int *lb_flags)
{
unsigned long total_load_moved = 0, load_moved;
do {
load_moved = load_balance_fair(this_rq, this_cpu, busiest,
max_load_move - total_load_moved,
- sd, idle, all_pinned);
+ sd, idle, lb_flags);
total_load_moved += load_moved;
+ if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT))
+ break;
+
#ifdef CONFIG_PREEMPT
/*
* NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is pulled to minimize
* the critical section.
*/
- if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
- break;
-
- if (raw_spin_is_contended(&this_rq->lock) ||
- raw_spin_is_contended(&busiest->lock))
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) {
+ *lb_flags |= LBF_ABORT;
break;
+ }
#endif
} while (load_moved && max_load_move > total_load_moved);
@@ -3062,15 +3521,6 @@ struct sg_lb_stats {
};
/**
- * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
- * @group: The group whose first cpu is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
- return cpumask_first(sched_group_cpus(group));
-}
-
-/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
@@ -3319,7 +3769,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
sdg->sgp->power = power;
}
-static void update_group_power(struct sched_domain *sd, int cpu)
+void update_group_power(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
@@ -3511,7 +3961,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
}
/**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @sd: sched_domain whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
@@ -3585,11 +4035,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
} while (sg != sd->groups);
}
-int __weak arch_sd_sibling_asym_packing(void)
-{
- return 0*SD_ASYM_PACKING;
-}
-
/**
* check_asym_packing - Check to see if the group is packed into the
* sched doman.
@@ -3953,7 +4398,7 @@ find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
#define MAX_PINNED_INTERVAL 512
/* Working cpumask for load_balance and load_balance_newidle. */
-static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
+DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
static int need_active_balance(struct sched_domain *sd, int idle,
int busiest_cpu, int this_cpu)
@@ -4004,7 +4449,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
int *balance)
{
- int ld_moved, all_pinned = 0, active_balance = 0;
+ int ld_moved, lb_flags = 0, active_balance = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
@@ -4045,11 +4490,11 @@ redo:
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
- all_pinned = 1;
+ lb_flags |= LBF_ALL_PINNED;
local_irq_save(flags);
double_rq_lock(this_rq, busiest);
ld_moved = move_tasks(this_rq, this_cpu, busiest,
- imbalance, sd, idle, &all_pinned);
+ imbalance, sd, idle, &lb_flags);
double_rq_unlock(this_rq, busiest);
local_irq_restore(flags);
@@ -4059,8 +4504,16 @@ redo:
if (ld_moved && this_cpu != smp_processor_id())
resched_cpu(this_cpu);
+ if (lb_flags & LBF_ABORT)
+ goto out_balanced;
+
+ if (lb_flags & LBF_NEED_BREAK) {
+ lb_flags &= ~LBF_NEED_BREAK;
+ goto redo;
+ }
+
/* All tasks on this runqueue were pinned by CPU affinity */
- if (unlikely(all_pinned)) {
+ if (unlikely(lb_flags & LBF_ALL_PINNED)) {
cpumask_clear_cpu(cpu_of(busiest), cpus);
if (!cpumask_empty(cpus))
goto redo;
@@ -4090,7 +4543,7 @@ redo:
tsk_cpus_allowed(busiest->curr))) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
- all_pinned = 1;
+ lb_flags |= LBF_ALL_PINNED;
goto out_one_pinned;
}
@@ -4143,7 +4596,8 @@ out_balanced:
out_one_pinned:
/* tune up the balancing interval */
- if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
+ if (((lb_flags & LBF_ALL_PINNED) &&
+ sd->balance_interval < MAX_PINNED_INTERVAL) ||
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
@@ -4156,7 +4610,7 @@ out:
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
-static void idle_balance(int this_cpu, struct rq *this_rq)
+void idle_balance(int this_cpu, struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
@@ -4271,28 +4725,16 @@ out_unlock:
#ifdef CONFIG_NO_HZ
/*
* idle load balancing details
- * - One of the idle CPUs nominates itself as idle load_balancer, while
- * entering idle.
- * - This idle load balancer CPU will also go into tickless mode when
- * it is idle, just like all other idle CPUs
* - When one of the busy CPUs notice that there may be an idle rebalancing
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
static struct {
- atomic_t load_balancer;
- atomic_t first_pick_cpu;
- atomic_t second_pick_cpu;
cpumask_var_t idle_cpus_mask;
- cpumask_var_t grp_idle_mask;
+ atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
-int get_nohz_load_balancer(void)
-{
- return atomic_read(&nohz.load_balancer);
-}
-
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
/**
* lowest_flag_domain - Return lowest sched_domain containing flag.
@@ -4329,33 +4771,6 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
(sd && (sd->flags & flag)); sd = sd->parent)
/**
- * is_semi_idle_group - Checks if the given sched_group is semi-idle.
- * @ilb_group: group to be checked for semi-idleness
- *
- * Returns: 1 if the group is semi-idle. 0 otherwise.
- *
- * We define a sched_group to be semi idle if it has atleast one idle-CPU
- * and atleast one non-idle CPU. This helper function checks if the given
- * sched_group is semi-idle or not.
- */
-static inline int is_semi_idle_group(struct sched_group *ilb_group)
-{
- cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
- sched_group_cpus(ilb_group));
-
- /*
- * A sched_group is semi-idle when it has atleast one busy cpu
- * and atleast one idle cpu.
- */
- if (cpumask_empty(nohz.grp_idle_mask))
- return 0;
-
- if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
- return 0;
-
- return 1;
-}
-/**
* find_new_ilb - Finds the optimum idle load balancer for nomination.
* @cpu: The cpu which is nominating a new idle_load_balancer.
*
@@ -4369,9 +4784,9 @@ static inline int is_semi_idle_group(struct sched_group *ilb_group)
*/
static int find_new_ilb(int cpu)
{
+ int ilb = cpumask_first(nohz.idle_cpus_mask);
+ struct sched_group *ilbg;
struct sched_domain *sd;
- struct sched_group *ilb_group;
- int ilb = nr_cpu_ids;
/*
* Have idle load balancer selection from semi-idle packages only
@@ -4389,23 +4804,28 @@ static int find_new_ilb(int cpu)
rcu_read_lock();
for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
- ilb_group = sd->groups;
+ ilbg = sd->groups;
do {
- if (is_semi_idle_group(ilb_group)) {
- ilb = cpumask_first(nohz.grp_idle_mask);
+ if (ilbg->group_weight !=
+ atomic_read(&ilbg->sgp->nr_busy_cpus)) {
+ ilb = cpumask_first_and(nohz.idle_cpus_mask,
+ sched_group_cpus(ilbg));
goto unlock;
}
- ilb_group = ilb_group->next;
+ ilbg = ilbg->next;
- } while (ilb_group != sd->groups);
+ } while (ilbg != sd->groups);
}
unlock:
rcu_read_unlock();
out_done:
- return ilb;
+ if (ilb < nr_cpu_ids && idle_cpu(ilb))
+ return ilb;
+
+ return nr_cpu_ids;
}
#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
static inline int find_new_ilb(int call_cpu)
@@ -4425,99 +4845,68 @@ static void nohz_balancer_kick(int cpu)
nohz.next_balance++;
- ilb_cpu = get_nohz_load_balancer();
-
- if (ilb_cpu >= nr_cpu_ids) {
- ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
- if (ilb_cpu >= nr_cpu_ids)
- return;
- }
+ ilb_cpu = find_new_ilb(cpu);
- if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
- cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
+ if (ilb_cpu >= nr_cpu_ids)
+ return;
- smp_mb();
- /*
- * Use smp_send_reschedule() instead of resched_cpu().
- * This way we generate a sched IPI on the target cpu which
- * is idle. And the softirq performing nohz idle load balance
- * will be run before returning from the IPI.
- */
- smp_send_reschedule(ilb_cpu);
- }
+ if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+ return;
+ /*
+ * Use smp_send_reschedule() instead of resched_cpu().
+ * This way we generate a sched IPI on the target cpu which
+ * is idle. And the softirq performing nohz idle load balance
+ * will be run before returning from the IPI.
+ */
+ smp_send_reschedule(ilb_cpu);
return;
}
-/*
- * This routine will try to nominate the ilb (idle load balancing)
- * owner among the cpus whose ticks are stopped. ilb owner will do the idle
- * load balancing on behalf of all those cpus.
- *
- * When the ilb owner becomes busy, we will not have new ilb owner until some
- * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
- * idle load balancing by kicking one of the idle CPUs.
- *
- * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
- * ilb owner CPU in future (when there is a need for idle load balancing on
- * behalf of all idle CPUs).
- */
-void select_nohz_load_balancer(int stop_tick)
+static inline void set_cpu_sd_state_busy(void)
{
+ struct sched_domain *sd;
int cpu = smp_processor_id();
- if (stop_tick) {
- if (!cpu_active(cpu)) {
- if (atomic_read(&nohz.load_balancer) != cpu)
- return;
-
- /*
- * If we are going offline and still the leader,
- * give up!
- */
- if (atomic_cmpxchg(&nohz.load_balancer, cpu,
- nr_cpu_ids) != cpu)
- BUG();
+ if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+ return;
+ clear_bit(NOHZ_IDLE, nohz_flags(cpu));
- return;
- }
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+ rcu_read_unlock();
+}
- cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
+void set_cpu_sd_state_idle(void)
+{
+ struct sched_domain *sd;
+ int cpu = smp_processor_id();
- if (atomic_read(&nohz.first_pick_cpu) == cpu)
- atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
- if (atomic_read(&nohz.second_pick_cpu) == cpu)
- atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
+ if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
+ return;
+ set_bit(NOHZ_IDLE, nohz_flags(cpu));
- if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
- int new_ilb;
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+ rcu_read_unlock();
+}
- /* make me the ilb owner */
- if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
- cpu) != nr_cpu_ids)
- return;
+/*
+ * This routine will record that this cpu is going idle with tick stopped.
+ * This info will be used in performing idle load balancing in the future.
+ */
+void select_nohz_load_balancer(int stop_tick)
+{
+ int cpu = smp_processor_id();
- /*
- * Check to see if there is a more power-efficient
- * ilb.
- */
- new_ilb = find_new_ilb(cpu);
- if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
- atomic_set(&nohz.load_balancer, nr_cpu_ids);
- resched_cpu(new_ilb);
- return;
- }
- return;
- }
- } else {
- if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
+ if (stop_tick) {
+ if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
return;
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-
- if (atomic_read(&nohz.load_balancer) == cpu)
- if (atomic_cmpxchg(&nohz.load_balancer, cpu,
- nr_cpu_ids) != cpu)
- BUG();
+ cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_inc(&nohz.nr_cpus);
+ set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
return;
}
@@ -4531,7 +4920,7 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.
*/
-static void update_max_interval(void)
+void update_max_interval(void)
{
max_load_balance_interval = HZ*num_online_cpus()/10;
}
@@ -4623,11 +5012,12 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
struct rq *rq;
int balance_cpu;
- if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
- return;
+ if (idle != CPU_IDLE ||
+ !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+ goto end;
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
- if (balance_cpu == this_cpu)
+ if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
continue;
/*
@@ -4635,10 +5025,8 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
* work being done for other cpus. Next load
* balancing owner will pick it up.
*/
- if (need_resched()) {
- this_rq->nohz_balance_kick = 0;
+ if (need_resched())
break;
- }
raw_spin_lock_irq(&this_rq->lock);
update_rq_clock(this_rq);
@@ -4652,53 +5040,75 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
this_rq->next_balance = rq->next_balance;
}
nohz.next_balance = this_rq->next_balance;
- this_rq->nohz_balance_kick = 0;
+end:
+ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
/*
- * Current heuristic for kicking the idle load balancer
- * - first_pick_cpu is the one of the busy CPUs. It will kick
- * idle load balancer when it has more than one process active. This
- * eliminates the need for idle load balancing altogether when we have
- * only one running process in the system (common case).
- * - If there are more than one busy CPU, idle load balancer may have
- * to run for active_load_balance to happen (i.e., two busy CPUs are
- * SMT or core siblings and can run better if they move to different
- * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
- * which will kick idle load balancer as soon as it has any load.
+ * Current heuristic for kicking the idle load balancer in the presence
+ * of an idle cpu is the system.
+ * - This rq has more than one task.
+ * - At any scheduler domain level, this cpu's scheduler group has multiple
+ * busy cpu's exceeding the group's power.
+ * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
+ * domain span are idle.
*/
static inline int nohz_kick_needed(struct rq *rq, int cpu)
{
unsigned long now = jiffies;
- int ret;
- int first_pick_cpu, second_pick_cpu;
+ struct sched_domain *sd;
- if (time_before(now, nohz.next_balance))
+ if (unlikely(idle_cpu(cpu)))
return 0;
- if (idle_cpu(cpu))
- return 0;
+ /*
+ * We may be recently in ticked or tickless idle mode. At the first
+ * busy tick after returning from idle, we will update the busy stats.
+ */
+ set_cpu_sd_state_busy();
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+ }
- first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
- second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
+ /*
+ * None are in tickless mode and hence no need for NOHZ idle load
+ * balancing.
+ */
+ if (likely(!atomic_read(&nohz.nr_cpus)))
+ return 0;
- if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
- second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
+ if (time_before(now, nohz.next_balance))
return 0;
- ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
- if (ret == nr_cpu_ids || ret == cpu) {
- atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
- if (rq->nr_running > 1)
- return 1;
- } else {
- ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
- if (ret == nr_cpu_ids || ret == cpu) {
- if (rq->nr_running)
- return 1;
- }
+ if (rq->nr_running >= 2)
+ goto need_kick;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ struct sched_group *sg = sd->groups;
+ struct sched_group_power *sgp = sg->sgp;
+ int nr_busy = atomic_read(&sgp->nr_busy_cpus);
+
+ if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
+ goto need_kick_unlock;
+
+ if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
+ && (cpumask_first_and(nohz.idle_cpus_mask,
+ sched_domain_span(sd)) < cpu))
+ goto need_kick_unlock;
+
+ if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
+ break;
}
+ rcu_read_unlock();
return 0;
+
+need_kick_unlock:
+ rcu_read_unlock();
+need_kick:
+ return 1;
}
#else
static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
@@ -4733,14 +5143,14 @@ static inline int on_null_domain(int cpu)
/*
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
*/
-static inline void trigger_load_balance(struct rq *rq, int cpu)
+void trigger_load_balance(struct rq *rq, int cpu)
{
/* Don't need to rebalance while attached to NULL domain */
if (time_after_eq(jiffies, rq->next_balance) &&
likely(!on_null_domain(cpu)))
raise_softirq(SCHED_SOFTIRQ);
#ifdef CONFIG_NO_HZ
- else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
+ if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
nohz_balancer_kick(cpu);
#endif
}
@@ -4755,15 +5165,6 @@ static void rq_offline_fair(struct rq *rq)
update_sysctl();
}
-#else /* CONFIG_SMP */
-
-/*
- * on UP we do not need to balance between CPUs:
- */
-static inline void idle_balance(int cpu, struct rq *rq)
-{
-}
-
#endif /* CONFIG_SMP */
/*
@@ -4787,8 +5188,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
*/
static void task_fork_fair(struct task_struct *p)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(current);
- struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se, *curr;
int this_cpu = smp_processor_id();
struct rq *rq = this_rq();
unsigned long flags;
@@ -4797,6 +5198,9 @@ static void task_fork_fair(struct task_struct *p)
update_rq_clock(rq);
+ cfs_rq = task_cfs_rq(current);
+ curr = cfs_rq->curr;
+
if (unlikely(task_cpu(p) != this_cpu)) {
rcu_read_lock();
__set_task_cpu(p, this_cpu);
@@ -4906,6 +5310,16 @@ static void set_curr_task_fair(struct rq *rq)
}
}
+void init_cfs_rq(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->tasks_timeline = RB_ROOT;
+ INIT_LIST_HEAD(&cfs_rq->tasks);
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq)
{
@@ -4922,13 +5336,182 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* to another cgroup's rq. This does somewhat interfere with the
* fair sleeper stuff for the first placement, but who cares.
*/
+ /*
+ * When !on_rq, vruntime of the task has usually NOT been normalized.
+ * But there are some cases where it has already been normalized:
+ *
+ * - Moving a forked child which is waiting for being woken up by
+ * wake_up_new_task().
+ * - Moving a task which has been woken up by try_to_wake_up() and
+ * waiting for actually being woken up by sched_ttwu_pending().
+ *
+ * To prevent boost or penalty in the new cfs_rq caused by delta
+ * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
+ */
+ if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
+ on_rq = 1;
+
if (!on_rq)
p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
set_task_rq(p, task_cpu(p));
if (!on_rq)
p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
}
+
+void free_fair_sched_group(struct task_group *tg)
+{
+ int i;
+
+ destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
+
+ for_each_possible_cpu(i) {
+ if (tg->cfs_rq)
+ kfree(tg->cfs_rq[i]);
+ if (tg->se)
+ kfree(tg->se[i]);
+ }
+
+ kfree(tg->cfs_rq);
+ kfree(tg->se);
+}
+
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se;
+ int i;
+
+ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
+ if (!tg->cfs_rq)
+ goto err;
+ tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
+ if (!tg->se)
+ goto err;
+
+ tg->shares = NICE_0_LOAD;
+
+ init_cfs_bandwidth(tg_cfs_bandwidth(tg));
+
+ for_each_possible_cpu(i) {
+ cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
+ GFP_KERNEL, cpu_to_node(i));
+ if (!cfs_rq)
+ goto err;
+
+ se = kzalloc_node(sizeof(struct sched_entity),
+ GFP_KERNEL, cpu_to_node(i));
+ if (!se)
+ goto err_free_rq;
+
+ init_cfs_rq(cfs_rq);
+ init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+ }
+
+ return 1;
+
+err_free_rq:
+ kfree(cfs_rq);
+err:
+ return 0;
+}
+
+void unregister_fair_sched_group(struct task_group *tg, int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ return;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+ struct sched_entity *se, int cpu,
+ struct sched_entity *parent)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ cfs_rq->tg = tg;
+ cfs_rq->rq = rq;
+#ifdef CONFIG_SMP
+ /* allow initial update_cfs_load() to truncate */
+ cfs_rq->load_stamp = 1;
#endif
+ init_cfs_rq_runtime(cfs_rq);
+
+ tg->cfs_rq[cpu] = cfs_rq;
+ tg->se[cpu] = se;
+
+ /* se could be NULL for root_task_group */
+ if (!se)
+ return;
+
+ if (!parent)
+ se->cfs_rq = &rq->cfs;
+ else
+ se->cfs_rq = parent->my_q;
+
+ se->my_q = cfs_rq;
+ update_load_set(&se->load, 0);
+ se->parent = parent;
+}
+
+static DEFINE_MUTEX(shares_mutex);
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+ int i;
+ unsigned long flags;
+
+ /*
+ * We can't change the weight of the root cgroup.
+ */
+ if (!tg->se[0])
+ return -EINVAL;
+
+ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
+
+ mutex_lock(&shares_mutex);
+ if (tg->shares == shares)
+ goto done;
+
+ tg->shares = shares;
+ for_each_possible_cpu(i) {
+ struct rq *rq = cpu_rq(i);
+ struct sched_entity *se;
+
+ se = tg->se[i];
+ /* Propagate contribution to hierarchy */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ for_each_sched_entity(se)
+ update_cfs_shares(group_cfs_rq(se));
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+done:
+ mutex_unlock(&shares_mutex);
+ return 0;
+}
+#else /* CONFIG_FAIR_GROUP_SCHED */
+
+void free_fair_sched_group(struct task_group *tg) { }
+
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
+{
+ return 1;
+}
+
+void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
@@ -4948,7 +5531,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
/*
* All the scheduling class methods:
*/
-static const struct sched_class fair_sched_class = {
+const struct sched_class fair_sched_class = {
.next = &idle_sched_class,
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
@@ -4985,7 +5568,7 @@ static const struct sched_class fair_sched_class = {
};
#ifdef CONFIG_SCHED_DEBUG
-static void print_cfs_stats(struct seq_file *m, int cpu)
+void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq;
@@ -4995,3 +5578,15 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
rcu_read_unlock();
}
#endif
+
+__init void init_sched_fair_class(void)
+{
+#ifdef CONFIG_SMP
+ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
+
+#ifdef CONFIG_NO_HZ
+ zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
+#endif
+#endif /* SMP */
+
+}
diff --git a/kernel/sched_features.h b/kernel/sched/features.h
index efa0a7b75dd..e61fd73913d 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched/features.h
@@ -3,13 +3,13 @@
* them to run sooner, but does not allow tons of sleepers to
* rip the spread apart.
*/
-SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
+SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
/*
* Place new tasks ahead so that they do not starve already running
* tasks
*/
-SCHED_FEAT(START_DEBIT, 1)
+SCHED_FEAT(START_DEBIT, true)
/*
* Based on load and program behaviour, see if it makes sense to place
@@ -17,53 +17,54 @@ SCHED_FEAT(START_DEBIT, 1)
* improve cache locality. Typically used with SYNC wakeups as
* generated by pipes and the like, see also SYNC_WAKEUPS.
*/
-SCHED_FEAT(AFFINE_WAKEUPS, 1)
+SCHED_FEAT(AFFINE_WAKEUPS, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality.
*/
-SCHED_FEAT(NEXT_BUDDY, 0)
+SCHED_FEAT(NEXT_BUDDY, false)
/*
* Prefer to schedule the task that ran last (when we did
* wake-preempt) as that likely will touch the same data, increases
* cache locality.
*/
-SCHED_FEAT(LAST_BUDDY, 1)
+SCHED_FEAT(LAST_BUDDY, true)
/*
* Consider buddies to be cache hot, decreases the likelyness of a
* cache buddy being migrated away, increases cache locality.
*/
-SCHED_FEAT(CACHE_HOT_BUDDY, 1)
+SCHED_FEAT(CACHE_HOT_BUDDY, true)
/*
* Use arch dependent cpu power functions
*/
-SCHED_FEAT(ARCH_POWER, 0)
+SCHED_FEAT(ARCH_POWER, false)
-SCHED_FEAT(HRTICK, 0)
-SCHED_FEAT(DOUBLE_TICK, 0)
-SCHED_FEAT(LB_BIAS, 1)
+SCHED_FEAT(HRTICK, false)
+SCHED_FEAT(DOUBLE_TICK, false)
+SCHED_FEAT(LB_BIAS, true)
/*
* Spin-wait on mutex acquisition when the mutex owner is running on
* another cpu -- assumes that when the owner is running, it will soon
* release the lock. Decreases scheduling overhead.
*/
-SCHED_FEAT(OWNER_SPIN, 1)
+SCHED_FEAT(OWNER_SPIN, true)
/*
* Decrement CPU power based on time not spent running tasks
*/
-SCHED_FEAT(NONTASK_POWER, 1)
+SCHED_FEAT(NONTASK_POWER, true)
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
-SCHED_FEAT(TTWU_QUEUE, 1)
+SCHED_FEAT(TTWU_QUEUE, true)
-SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(FORCE_SD_OVERLAP, false)
+SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/kernel/sched_idletask.c b/kernel/sched/idle_task.c
index 0a51882534e..91b4c957f28 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched/idle_task.c
@@ -1,3 +1,5 @@
+#include "sched.h"
+
/*
* idle-task scheduling class.
*
@@ -71,7 +73,7 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
-static const struct sched_class idle_sched_class = {
+const struct sched_class idle_sched_class = {
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
diff --git a/kernel/sched_rt.c b/kernel/sched/rt.c
index 056cbd2e2a2..3640ebbb466 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched/rt.c
@@ -3,7 +3,92 @@
* policies)
*/
+#include "sched.h"
+
+#include <linux/slab.h>
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
+
+struct rt_bandwidth def_rt_bandwidth;
+
+static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
+{
+ struct rt_bandwidth *rt_b =
+ container_of(timer, struct rt_bandwidth, rt_period_timer);
+ ktime_t now;
+ int overrun;
+ int idle = 0;
+
+ for (;;) {
+ now = hrtimer_cb_get_time(timer);
+ overrun = hrtimer_forward(timer, now, rt_b->rt_period);
+
+ if (!overrun)
+ break;
+
+ idle = do_sched_rt_period_timer(rt_b, overrun);
+ }
+
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+}
+
+void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
+{
+ rt_b->rt_period = ns_to_ktime(period);
+ rt_b->rt_runtime = runtime;
+
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+}
+
+static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+ if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+ return;
+
+ if (hrtimer_active(&rt_b->rt_period_timer))
+ return;
+
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
+}
+
+void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
+{
+ struct rt_prio_array *array;
+ int i;
+
+ array = &rt_rq->active;
+ for (i = 0; i < MAX_RT_PRIO; i++) {
+ INIT_LIST_HEAD(array->queue + i);
+ __clear_bit(i, array->bitmap);
+ }
+ /* delimiter for bitsearch: */
+ __set_bit(MAX_RT_PRIO, array->bitmap);
+
+#if defined CONFIG_SMP
+ rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ rt_rq->highest_prio.next = MAX_RT_PRIO;
+ rt_rq->rt_nr_migratory = 0;
+ rt_rq->overloaded = 0;
+ plist_head_init(&rt_rq->pushable_tasks);
+#endif
+
+ rt_rq->rt_time = 0;
+ rt_rq->rt_throttled = 0;
+ rt_rq->rt_runtime = 0;
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+}
+
#ifdef CONFIG_RT_GROUP_SCHED
+static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+ hrtimer_cancel(&rt_b->rt_period_timer);
+}
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
@@ -25,6 +110,91 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
return rt_se->rt_rq;
}
+void free_rt_sched_group(struct task_group *tg)
+{
+ int i;
+
+ if (tg->rt_se)
+ destroy_rt_bandwidth(&tg->rt_bandwidth);
+
+ for_each_possible_cpu(i) {
+ if (tg->rt_rq)
+ kfree(tg->rt_rq[i]);
+ if (tg->rt_se)
+ kfree(tg->rt_se[i]);
+ }
+
+ kfree(tg->rt_rq);
+ kfree(tg->rt_se);
+}
+
+void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
+ struct sched_rt_entity *rt_se, int cpu,
+ struct sched_rt_entity *parent)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ rt_rq->rt_nr_boosted = 0;
+ rt_rq->rq = rq;
+ rt_rq->tg = tg;
+
+ tg->rt_rq[cpu] = rt_rq;
+ tg->rt_se[cpu] = rt_se;
+
+ if (!rt_se)
+ return;
+
+ if (!parent)
+ rt_se->rt_rq = &rq->rt;
+ else
+ rt_se->rt_rq = parent->my_q;
+
+ rt_se->my_q = rt_rq;
+ rt_se->parent = parent;
+ INIT_LIST_HEAD(&rt_se->run_list);
+}
+
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
+{
+ struct rt_rq *rt_rq;
+ struct sched_rt_entity *rt_se;
+ int i;
+
+ tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
+ if (!tg->rt_rq)
+ goto err;
+ tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
+ if (!tg->rt_se)
+ goto err;
+
+ init_rt_bandwidth(&tg->rt_bandwidth,
+ ktime_to_ns(def_rt_bandwidth.rt_period), 0);
+
+ for_each_possible_cpu(i) {
+ rt_rq = kzalloc_node(sizeof(struct rt_rq),
+ GFP_KERNEL, cpu_to_node(i));
+ if (!rt_rq)
+ goto err;
+
+ rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
+ GFP_KERNEL, cpu_to_node(i));
+ if (!rt_se)
+ goto err_free_rq;
+
+ init_rt_rq(rt_rq, cpu_rq(i));
+ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
+ init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
+ }
+
+ return 1;
+
+err_free_rq:
+ kfree(rt_rq);
+err:
+ return 0;
+}
+
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
@@ -47,6 +217,12 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
return &rq->rt;
}
+void free_rt_sched_group(struct task_group *tg) { }
+
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
+{
+ return 1;
+}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
@@ -556,10 +732,35 @@ static void enable_runtime(struct rq *rq)
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
+int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ int cpu = (int)(long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ disable_runtime(cpu_rq(cpu));
+ return NOTIFY_OK;
+
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ enable_runtime(cpu_rq(cpu));
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;
+ if (!sched_feat(RT_RUNTIME_SHARE))
+ return more;
+
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
@@ -645,7 +846,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
- if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+ if (runtime >= sched_rt_period(rt_rq))
return 0;
balance_runtime(rt_rq);
@@ -954,8 +1155,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
}
/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
+ * Put task to the head or the end of the run list without the overhead of
+ * dequeue followed by enqueue.
*/
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
@@ -999,6 +1200,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
cpu = task_cpu(p);
+ if (p->rt.nr_cpus_allowed == 1)
+ goto out;
+
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
@@ -1175,8 +1379,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
/* Only try algorithms three times */
#define RT_MAX_TRIES 3
-static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
-
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -1650,13 +1852,14 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
pull_rt_task(rq);
}
-static inline void init_sched_rt_class(void)
+void init_sched_rt_class(void)
{
unsigned int i;
- for_each_possible_cpu(i)
+ for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
+ }
}
#endif /* CONFIG_SMP */
@@ -1797,7 +2000,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
return 0;
}
-static const struct sched_class rt_sched_class = {
+const struct sched_class rt_sched_class = {
.next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
@@ -1832,7 +2035,7 @@ static const struct sched_class rt_sched_class = {
#ifdef CONFIG_SCHED_DEBUG
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
-static void print_rt_stats(struct seq_file *m, int cpu)
+void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
new file mode 100644
index 00000000000..98c0c2623db
--- /dev/null
+++ b/kernel/sched/sched.h
@@ -0,0 +1,1166 @@
+
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+
+#include "cpupri.h"
+
+extern __read_mostly int scheduler_running;
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
+#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+/*
+ * Helpers for converting nanosecond timing to jiffy resolution
+ */
+#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
+
+#define NICE_0_LOAD SCHED_LOAD_SCALE
+#define NICE_0_SHIFT SCHED_LOAD_SHIFT
+
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define DEF_TIMESLICE (100 * HZ / 1000)
+
+/*
+ * single value that denotes runtime == period, ie unlimited time.
+ */
+#define RUNTIME_INF ((u64)~0ULL)
+
+static inline int rt_policy(int policy)
+{
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return 1;
+ return 0;
+}
+
+static inline int task_has_rt_policy(struct task_struct *p)
+{
+ return rt_policy(p->policy);
+}
+
+/*
+ * This is the priority-queue data structure of the RT scheduling class:
+ */
+struct rt_prio_array {
+ DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
+ struct list_head queue[MAX_RT_PRIO];
+};
+
+struct rt_bandwidth {
+ /* nests inside the rq lock: */
+ raw_spinlock_t rt_runtime_lock;
+ ktime_t rt_period;
+ u64 rt_runtime;
+ struct hrtimer rt_period_timer;
+};
+
+extern struct mutex sched_domains_mutex;
+
+#ifdef CONFIG_CGROUP_SCHED
+
+#include <linux/cgroup.h>
+
+struct cfs_rq;
+struct rt_rq;
+
+static LIST_HEAD(task_groups);
+
+struct cfs_bandwidth {
+#ifdef CONFIG_CFS_BANDWIDTH
+ raw_spinlock_t lock;
+ ktime_t period;
+ u64 quota, runtime;
+ s64 hierarchal_quota;
+ u64 runtime_expires;
+
+ int idle, timer_active;
+ struct hrtimer period_timer, slack_timer;
+ struct list_head throttled_cfs_rq;
+
+ /* statistics */
+ int nr_periods, nr_throttled;
+ u64 throttled_time;
+#endif
+};
+
+/* task group related information */
+struct task_group {
+ struct cgroup_subsys_state css;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* schedulable entities of this group on each cpu */
+ struct sched_entity **se;
+ /* runqueue "owned" by this group on each cpu */
+ struct cfs_rq **cfs_rq;
+ unsigned long shares;
+
+ atomic_t load_weight;
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity **rt_se;
+ struct rt_rq **rt_rq;
+
+ struct rt_bandwidth rt_bandwidth;
+#endif
+
+ struct rcu_head rcu;
+ struct list_head list;
+
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+ struct autogroup *autogroup;
+#endif
+
+ struct cfs_bandwidth cfs_bandwidth;
+};
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
+
+/*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * A weight of a cfs_rq is the sum of weights of which entities
+ * are queued on this cfs_rq, so a weight of a entity should not be
+ * too large, so as the shares value of a task group.
+ * (The default weight is 1024 - so there's no practical
+ * limitation from this.)
+ */
+#define MIN_SHARES (1UL << 1)
+#define MAX_SHARES (1UL << 18)
+#endif
+
+/* Default task group.
+ * Every task in system belong to this group at bootup.
+ */
+extern struct task_group root_task_group;
+
+typedef int (*tg_visitor)(struct task_group *, void *);
+
+extern int walk_tg_tree_from(struct task_group *from,
+ tg_visitor down, tg_visitor up, void *data);
+
+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ *
+ * Caller must hold rcu_lock or sufficient equivalent.
+ */
+static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
+{
+ return walk_tg_tree_from(&root_task_group, down, up, data);
+}
+
+extern int tg_nop(struct task_group *tg, void *data);
+
+extern void free_fair_sched_group(struct task_group *tg);
+extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
+extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+ struct sched_entity *se, int cpu,
+ struct sched_entity *parent);
+extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
+extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+
+extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
+extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
+extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
+
+extern void free_rt_sched_group(struct task_group *tg);
+extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
+extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
+ struct sched_rt_entity *rt_se, int cpu,
+ struct sched_rt_entity *parent);
+
+#else /* CONFIG_CGROUP_SCHED */
+
+struct cfs_bandwidth { };
+
+#endif /* CONFIG_CGROUP_SCHED */
+
+/* CFS-related fields in a runqueue */
+struct cfs_rq {
+ struct load_weight load;
+ unsigned long nr_running, h_nr_running;
+
+ u64 exec_clock;
+ u64 min_vruntime;
+#ifndef CONFIG_64BIT
+ u64 min_vruntime_copy;
+#endif
+
+ struct rb_root tasks_timeline;
+ struct rb_node *rb_leftmost;
+
+ struct list_head tasks;
+ struct list_head *balance_iterator;
+
+ /*
+ * 'curr' points to currently running entity on this cfs_rq.
+ * It is set to NULL otherwise (i.e when none are currently running).
+ */
+ struct sched_entity *curr, *next, *last, *skip;
+
+#ifdef CONFIG_SCHED_DEBUG
+ unsigned int nr_spread_over;
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
+
+ /*
+ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+ * (like users, containers etc.)
+ *
+ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+ * list is used during load balance.
+ */
+ int on_list;
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg; /* group that "owns" this runqueue */
+
+#ifdef CONFIG_SMP
+ /*
+ * the part of load.weight contributed by tasks
+ */
+ unsigned long task_weight;
+
+ /*
+ * h_load = weight * f(tg)
+ *
+ * Where f(tg) is the recursive weight fraction assigned to
+ * this group.
+ */
+ unsigned long h_load;
+
+ /*
+ * Maintaining per-cpu shares distribution for group scheduling
+ *
+ * load_stamp is the last time we updated the load average
+ * load_last is the last time we updated the load average and saw load
+ * load_unacc_exec_time is currently unaccounted execution time
+ */
+ u64 load_avg;
+ u64 load_period;
+ u64 load_stamp, load_last, load_unacc_exec_time;
+
+ unsigned long load_contribution;
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_CFS_BANDWIDTH
+ int runtime_enabled;
+ u64 runtime_expires;
+ s64 runtime_remaining;
+
+ u64 throttled_timestamp;
+ int throttled, throttle_count;
+ struct list_head throttled_list;
+#endif /* CONFIG_CFS_BANDWIDTH */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+};
+
+static inline int rt_bandwidth_enabled(void)
+{
+ return sysctl_sched_rt_runtime >= 0;
+}
+
+/* Real-Time classes' related field in a runqueue: */
+struct rt_rq {
+ struct rt_prio_array active;
+ unsigned long rt_nr_running;
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+ struct {
+ int curr; /* highest queued rt task prio */
+#ifdef CONFIG_SMP
+ int next; /* next highest */
+#endif
+ } highest_prio;
+#endif
+#ifdef CONFIG_SMP
+ unsigned long rt_nr_migratory;
+ unsigned long rt_nr_total;
+ int overloaded;
+ struct plist_head pushable_tasks;
+#endif
+ int rt_throttled;
+ u64 rt_time;
+ u64 rt_runtime;
+ /* Nests inside the rq lock: */
+ raw_spinlock_t rt_runtime_lock;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ unsigned long rt_nr_boosted;
+
+ struct rq *rq;
+ struct list_head leaf_rt_rq_list;
+ struct task_group *tg;
+#endif
+};
+
+#ifdef CONFIG_SMP
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+ atomic_t refcount;
+ atomic_t rto_count;
+ struct rcu_head rcu;
+ cpumask_var_t span;
+ cpumask_var_t online;
+
+ /*
+ * The "RT overload" flag: it gets set if a CPU has more than
+ * one runnable RT task.
+ */
+ cpumask_var_t rto_mask;
+ struct cpupri cpupri;
+};
+
+extern struct root_domain def_root_domain;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ *
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the thread migration code), lock
+ * acquire operations must be ordered by ascending &runqueue.
+ */
+struct rq {
+ /* runqueue lock: */
+ raw_spinlock_t lock;
+
+ /*
+ * nr_running and cpu_load should be in the same cacheline because
+ * remote CPUs use both these fields when doing load calculation.
+ */
+ unsigned long nr_running;
+ #define CPU_LOAD_IDX_MAX 5
+ unsigned long cpu_load[CPU_LOAD_IDX_MAX];
+ unsigned long last_load_update_tick;
+#ifdef CONFIG_NO_HZ
+ u64 nohz_stamp;
+ unsigned long nohz_flags;
+#endif
+ int skip_clock_update;
+
+ /* capture load from *all* tasks on this cpu: */
+ struct load_weight load;
+ unsigned long nr_load_updates;
+ u64 nr_switches;
+
+ struct cfs_rq cfs;
+ struct rt_rq rt;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ /* list of leaf cfs_rq on this cpu: */
+ struct list_head leaf_cfs_rq_list;
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct list_head leaf_rt_rq_list;
+#endif
+
+ /*
+ * This is part of a global counter where only the total sum
+ * over all CPUs matters. A task can increase this counter on
+ * one CPU and if it got migrated afterwards it may decrease
+ * it on another CPU. Always updated under the runqueue lock:
+ */
+ unsigned long nr_uninterruptible;
+
+ struct task_struct *curr, *idle, *stop;
+ unsigned long next_balance;
+ struct mm_struct *prev_mm;
+
+ u64 clock;
+ u64 clock_task;
+
+ atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+ struct root_domain *rd;
+ struct sched_domain *sd;
+
+ unsigned long cpu_power;
+
+ unsigned char idle_balance;
+ /* For active balancing */
+ int post_schedule;
+ int active_balance;
+ int push_cpu;
+ struct cpu_stop_work active_balance_work;
+ /* cpu of this runqueue: */
+ int cpu;
+ int online;
+
+ u64 rt_avg;
+ u64 age_stamp;
+ u64 idle_stamp;
+ u64 avg_idle;
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif
+#ifdef CONFIG_PARAVIRT
+ u64 prev_steal_time;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ u64 prev_steal_time_rq;
+#endif
+
+ /* calc_load related fields */
+ unsigned long calc_load_update;
+ long calc_load_active;
+
+#ifdef CONFIG_SCHED_HRTICK
+#ifdef CONFIG_SMP
+ int hrtick_csd_pending;
+ struct call_single_data hrtick_csd;
+#endif
+ struct hrtimer hrtick_timer;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+ /* latency stats */
+ struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+ /* sys_sched_yield() stats */
+ unsigned int yld_count;
+
+ /* schedule() stats */
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
+#endif
+
+#ifdef CONFIG_SMP
+ struct llist_head wake_list;
+#endif
+};
+
+static inline int cpu_of(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+ return rq->cpu;
+#else
+ return 0;
+#endif
+}
+
+DECLARE_PER_CPU(struct rq, runqueues);
+
+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
+#define this_rq() (&__get_cpu_var(runqueues))
+#define task_rq(p) cpu_rq(task_cpu(p))
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define raw_rq() (&__raw_get_cpu_var(runqueues))
+
+#ifdef CONFIG_SMP
+
+#define rcu_dereference_check_sched_domain(p) \
+ rcu_dereference_check((p), \
+ lockdep_is_held(&sched_domains_mutex))
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, __sd) \
+ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
+ __sd; __sd = __sd->parent)
+
+#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
+
+/**
+ * highest_flag_domain - Return highest sched_domain containing flag.
+ * @cpu: The cpu whose highest level of sched domain is to
+ * be returned.
+ * @flag: The flag to check for the highest sched_domain
+ * for the given cpu.
+ *
+ * Returns the highest sched_domain of a cpu which contains the given flag.
+ */
+static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
+{
+ struct sched_domain *sd, *hsd = NULL;
+
+ for_each_domain(cpu, sd) {
+ if (!(sd->flags & flag))
+ break;
+ hsd = sd;
+ }
+
+ return hsd;
+}
+
+DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(int, sd_llc_id);
+
+#endif /* CONFIG_SMP */
+
+#include "stats.h"
+#include "auto_group.h"
+
+#ifdef CONFIG_CGROUP_SCHED
+
+/*
+ * Return the group to which this tasks belongs.
+ *
+ * We use task_subsys_state_check() and extend the RCU verification with
+ * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+ * task it moves into the cgroup. Therefore by holding either of those locks,
+ * we pin the task to the current cgroup.
+ */
+static inline struct task_group *task_group(struct task_struct *p)
+{
+ struct task_group *tg;
+ struct cgroup_subsys_state *css;
+
+ css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+ lockdep_is_held(&p->pi_lock) ||
+ lockdep_is_held(&task_rq(p)->lock));
+ tg = container_of(css, struct task_group, css);
+
+ return autogroup_task_group(p, tg);
+}
+
+/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
+{
+#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
+ struct task_group *tg = task_group(p);
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ p->se.cfs_rq = tg->cfs_rq[cpu];
+ p->se.parent = tg->se[cpu];
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ p->rt.rt_rq = tg->rt_rq[cpu];
+ p->rt.parent = tg->rt_se[cpu];
+#endif
+}
+
+#else /* CONFIG_CGROUP_SCHED */
+
+static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
+static inline struct task_group *task_group(struct task_struct *p)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_CGROUP_SCHED */
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+ set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
+ task_thread_info(p)->cpu = cpu;
+#endif
+}
+
+/*
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ */
+#ifdef CONFIG_SCHED_DEBUG
+# include <linux/jump_label.h>
+# define const_debug __read_mostly
+#else
+# define const_debug const
+#endif
+
+extern const_debug unsigned int sysctl_sched_features;
+
+#define SCHED_FEAT(name, enabled) \
+ __SCHED_FEAT_##name ,
+
+enum {
+#include "features.h"
+ __SCHED_FEAT_NR,
+};
+
+#undef SCHED_FEAT
+
+#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+static __always_inline bool static_branch__true(struct jump_label_key *key)
+{
+ return likely(static_branch(key)); /* Not out of line branch. */
+}
+
+static __always_inline bool static_branch__false(struct jump_label_key *key)
+{
+ return unlikely(static_branch(key)); /* Out of line branch. */
+}
+
+#define SCHED_FEAT(name, enabled) \
+static __always_inline bool static_branch_##name(struct jump_label_key *key) \
+{ \
+ return static_branch__##enabled(key); \
+}
+
+#include "features.h"
+
+#undef SCHED_FEAT
+
+extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
+#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
+
+static inline u64 global_rt_period(void)
+{
+ return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
+}
+
+static inline u64 global_rt_runtime(void)
+{
+ if (sysctl_sched_rt_runtime < 0)
+ return RUNTIME_INF;
+
+ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+}
+
+
+
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
+static inline int task_running(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+ return p->on_cpu;
+#else
+ return task_current(rq, p);
+#endif
+}
+
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next) do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev) do { } while (0)
+#endif
+
+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * We can optimise this out completely for !SMP, because the
+ * SMP rebalancing from interrupt is the only thing that cares
+ * here.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ */
+ smp_wmb();
+ prev->on_cpu = 0;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * We can optimise this out completely for !SMP, because the
+ * SMP rebalancing from interrupt is the only thing that cares
+ * here.
+ */
+ next->on_cpu = 1;
+#endif
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ raw_spin_unlock_irq(&rq->lock);
+#else
+ raw_spin_unlock(&rq->lock);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ */
+ smp_wmb();
+ prev->on_cpu = 0;
+#endif
+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+ local_irq_enable();
+#endif
+}
+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+
+
+static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+{
+ lw->weight += inc;
+ lw->inv_weight = 0;
+}
+
+static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
+{
+ lw->weight -= dec;
+ lw->inv_weight = 0;
+}
+
+static inline void update_load_set(struct load_weight *lw, unsigned long w)
+{
+ lw->weight = w;
+ lw->inv_weight = 0;
+}
+
+/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+#define WEIGHT_IDLEPRIO 3
+#define WMULT_IDLEPRIO 1431655765
+
+/*
+ * Nice levels are multiplicative, with a gentle 10% change for every
+ * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
+ * nice 1, it will get ~10% less CPU time than another CPU-bound task
+ * that remained on nice 0.
+ *
+ * The "10% effect" is relative and cumulative: from _any_ nice level,
+ * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
+ * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
+ * If a task goes up by ~10% and another task goes down by ~10% then
+ * the relative distance between them is ~25%.)
+ */
+static const int prio_to_weight[40] = {
+ /* -20 */ 88761, 71755, 56483, 46273, 36291,
+ /* -15 */ 29154, 23254, 18705, 14949, 11916,
+ /* -10 */ 9548, 7620, 6100, 4904, 3906,
+ /* -5 */ 3121, 2501, 1991, 1586, 1277,
+ /* 0 */ 1024, 820, 655, 526, 423,
+ /* 5 */ 335, 272, 215, 172, 137,
+ /* 10 */ 110, 87, 70, 56, 45,
+ /* 15 */ 36, 29, 23, 18, 15,
+};
+
+/*
+ * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
+ *
+ * In cases where the weight does not change often, we can use the
+ * precalculated inverse to speed up arithmetics by turning divisions
+ * into multiplications:
+ */
+static const u32 prio_to_wmult[40] = {
+ /* -20 */ 48388, 59856, 76040, 92818, 118348,
+ /* -15 */ 147320, 184698, 229616, 287308, 360437,
+ /* -10 */ 449829, 563644, 704093, 875809, 1099582,
+ /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
+ /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
+ /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
+ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
+ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
+};
+
+/* Time spent by the tasks of the cpu accounting group executing in ... */
+enum cpuacct_stat_index {
+ CPUACCT_STAT_USER, /* ... user mode */
+ CPUACCT_STAT_SYSTEM, /* ... kernel mode */
+
+ CPUACCT_STAT_NSTATS,
+};
+
+
+#define sched_class_highest (&stop_sched_class)
+#define for_each_class(class) \
+ for (class = sched_class_highest; class; class = class->next)
+
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+
+#ifdef CONFIG_SMP
+
+extern void trigger_load_balance(struct rq *rq, int cpu);
+extern void idle_balance(int this_cpu, struct rq *this_rq);
+
+#else /* CONFIG_SMP */
+
+static inline void idle_balance(int cpu, struct rq *rq)
+{
+}
+
+#endif
+
+extern void sysrq_sched_debug_show(void);
+extern void sched_init_granularity(void);
+extern void update_max_interval(void);
+extern void update_group_power(struct sched_domain *sd, int cpu);
+extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
+extern void init_sched_rt_class(void);
+extern void init_sched_fair_class(void);
+
+extern void resched_task(struct task_struct *p);
+extern void resched_cpu(int cpu);
+
+extern struct rt_bandwidth def_rt_bandwidth;
+extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+extern void update_cpu_load(struct rq *this_rq);
+
+#ifdef CONFIG_CGROUP_CPUACCT
+#include <linux/cgroup.h>
+/* track cpu usage of a group of tasks and its child groups */
+struct cpuacct {
+ struct cgroup_subsys_state css;
+ /* cpuusage holds pointer to a u64-type object on every cpu */
+ u64 __percpu *cpuusage;
+ struct kernel_cpustat __percpu *cpustat;
+};
+
+/* return cpu accounting group corresponding to this container */
+static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
+{
+ return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+/* return cpu accounting group to which this task belongs */
+static inline struct cpuacct *task_ca(struct task_struct *tsk)
+{
+ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+static inline struct cpuacct *parent_ca(struct cpuacct *ca)
+{
+ if (!ca || !ca->css.cgroup->parent)
+ return NULL;
+ return cgroup_ca(ca->css.cgroup->parent);
+}
+
+extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+#endif
+
+static inline void inc_nr_running(struct rq *rq)
+{
+ rq->nr_running++;
+}
+
+static inline void dec_nr_running(struct rq *rq)
+{
+ rq->nr_running--;
+}
+
+extern void update_rq_clock(struct rq *rq);
+
+extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
+extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
+
+extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
+
+extern const_debug unsigned int sysctl_sched_time_avg;
+extern const_debug unsigned int sysctl_sched_nr_migrate;
+extern const_debug unsigned int sysctl_sched_migration_cost;
+
+static inline u64 sched_avg_period(void)
+{
+ return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
+}
+
+void calc_load_account_idle(struct rq *this_rq);
+
+#ifdef CONFIG_SCHED_HRTICK
+
+/*
+ * Use hrtick when:
+ * - enabled by features
+ * - hrtimer is actually high res
+ */
+static inline int hrtick_enabled(struct rq *rq)
+{
+ if (!sched_feat(HRTICK))
+ return 0;
+ if (!cpu_active(cpu_of(rq)))
+ return 0;
+ return hrtimer_is_hres_active(&rq->hrtick_timer);
+}
+
+void hrtick_start(struct rq *rq, u64 delay);
+
+#else
+
+static inline int hrtick_enabled(struct rq *rq)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_HRTICK */
+
+#ifdef CONFIG_SMP
+extern void sched_avg_update(struct rq *rq);
+static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+ rq->rt_avg += rt_delta;
+ sched_avg_update(rq);
+}
+#else
+static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
+static inline void sched_avg_update(struct rq *rq) { }
+#endif
+
+extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PREEMPT
+
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
+/*
+ * fair double_lock_balance: Safely acquires both rq->locks in a fair
+ * way at the expense of forcing extra atomic operations in all
+ * invocations. This assures that the double_lock is acquired using the
+ * same underlying policy as the spinlock_t on this architecture, which
+ * reduces latency compared to the unfair variant below. However, it
+ * also adds more overhead and therefore may reduce throughput.
+ */
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(this_rq->lock)
+ __acquires(busiest->lock)
+ __acquires(this_rq->lock)
+{
+ raw_spin_unlock(&this_rq->lock);
+ double_rq_lock(this_rq, busiest);
+
+ return 1;
+}
+
+#else
+/*
+ * Unfair double_lock_balance: Optimizes throughput at the expense of
+ * latency by eliminating extra atomic operations when the locks are
+ * already in proper order on entry. This favors lower cpu-ids and will
+ * grant the double lock to lower cpus over higher ids under contention,
+ * regardless of entry order into the function.
+ */
+static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(this_rq->lock)
+ __acquires(busiest->lock)
+ __acquires(this_rq->lock)
+{
+ int ret = 0;
+
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
+ if (busiest < this_rq) {
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
+ ret = 1;
+ } else
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
+ }
+ return ret;
+}
+
+#endif /* CONFIG_PREEMPT */
+
+/*
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
+ */
+static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+{
+ if (unlikely(!irqs_disabled())) {
+ /* printk() doesn't work good under rq->lock */
+ raw_spin_unlock(&this_rq->lock);
+ BUG_ON(1);
+ }
+
+ return _double_lock_balance(this_rq, busiest);
+}
+
+static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+ __releases(busiest->lock)
+{
+ raw_spin_unlock(&busiest->lock);
+ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+ __acquires(rq1->lock)
+ __acquires(rq2->lock)
+{
+ BUG_ON(!irqs_disabled());
+ if (rq1 == rq2) {
+ raw_spin_lock(&rq1->lock);
+ __acquire(rq2->lock); /* Fake it out ;) */
+ } else {
+ if (rq1 < rq2) {
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ }
+ }
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+ __releases(rq1->lock)
+ __releases(rq2->lock)
+{
+ raw_spin_unlock(&rq1->lock);
+ if (rq1 != rq2)
+ raw_spin_unlock(&rq2->lock);
+ else
+ __release(rq2->lock);
+}
+
+#else /* CONFIG_SMP */
+
+/*
+ * double_rq_lock - safely lock two runqueues
+ *
+ * Note this does not disable interrupts like task_rq_lock,
+ * you need to do so manually before calling.
+ */
+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+ __acquires(rq1->lock)
+ __acquires(rq2->lock)
+{
+ BUG_ON(!irqs_disabled());
+ BUG_ON(rq1 != rq2);
+ raw_spin_lock(&rq1->lock);
+ __acquire(rq2->lock); /* Fake it out ;) */
+}
+
+/*
+ * double_rq_unlock - safely unlock two runqueues
+ *
+ * Note this does not restore interrupts like task_rq_unlock,
+ * you need to do so manually after calling.
+ */
+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+ __releases(rq1->lock)
+ __releases(rq2->lock)
+{
+ BUG_ON(rq1 != rq2);
+ raw_spin_unlock(&rq1->lock);
+ __release(rq2->lock);
+}
+
+#endif
+
+extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
+extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
+extern void print_cfs_stats(struct seq_file *m, int cpu);
+extern void print_rt_stats(struct seq_file *m, int cpu);
+
+extern void init_cfs_rq(struct cfs_rq *cfs_rq);
+extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
+extern void unthrottle_offline_cfs_rqs(struct rq *rq);
+
+extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
+
+#ifdef CONFIG_NO_HZ
+enum rq_nohz_flag_bits {
+ NOHZ_TICK_STOPPED,
+ NOHZ_BALANCE_KICK,
+ NOHZ_IDLE,
+};
+
+#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
+#endif
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
new file mode 100644
index 00000000000..2a581ba8e19
--- /dev/null
+++ b/kernel/sched/stats.c
@@ -0,0 +1,111 @@
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#include "sched.h"
+
+/*
+ * bump this up when changing the output format or the meaning of an existing
+ * format, so that tools can adapt (or abort)
+ */
+#define SCHEDSTAT_VERSION 15
+
+static int show_schedstat(struct seq_file *seq, void *v)
+{
+ int cpu;
+ int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
+ char *mask_str = kmalloc(mask_len, GFP_KERNEL);
+
+ if (mask_str == NULL)
+ return -ENOMEM;
+
+ seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+ seq_printf(seq, "timestamp %lu\n", jiffies);
+ for_each_online_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+#ifdef CONFIG_SMP
+ struct sched_domain *sd;
+ int dcount = 0;
+#endif
+
+ /* runqueue-specific stats */
+ seq_printf(seq,
+ "cpu%d %u %u %u %u %u %u %llu %llu %lu",
+ cpu, rq->yld_count,
+ rq->sched_switch, rq->sched_count, rq->sched_goidle,
+ rq->ttwu_count, rq->ttwu_local,
+ rq->rq_cpu_time,
+ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
+
+ seq_printf(seq, "\n");
+
+#ifdef CONFIG_SMP
+ /* domain-specific stats */
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ enum cpu_idle_type itype;
+
+ cpumask_scnprintf(mask_str, mask_len,
+ sched_domain_span(sd));
+ seq_printf(seq, "domain%d %s", dcount++, mask_str);
+ for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
+ itype++) {
+ seq_printf(seq, " %u %u %u %u %u %u %u %u",
+ sd->lb_count[itype],
+ sd->lb_balanced[itype],
+ sd->lb_failed[itype],
+ sd->lb_imbalance[itype],
+ sd->lb_gained[itype],
+ sd->lb_hot_gained[itype],
+ sd->lb_nobusyq[itype],
+ sd->lb_nobusyg[itype]);
+ }
+ seq_printf(seq,
+ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ sd->alb_count, sd->alb_failed, sd->alb_pushed,
+ sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+ sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
+ sd->ttwu_wake_remote, sd->ttwu_move_affine,
+ sd->ttwu_move_balance);
+ }
+ rcu_read_unlock();
+#endif
+ }
+ kfree(mask_str);
+ return 0;
+}
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+ unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
+ char *buf = kmalloc(size, GFP_KERNEL);
+ struct seq_file *m;
+ int res;
+
+ if (!buf)
+ return -ENOMEM;
+ res = single_open(file, show_schedstat, NULL);
+ if (!res) {
+ m = file->private_data;
+ m->buf = buf;
+ m->size = size;
+ } else
+ kfree(buf);
+ return res;
+}
+
+static const struct file_operations proc_schedstat_operations = {
+ .open = schedstat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init proc_schedstat_init(void)
+{
+ proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
+ return 0;
+}
+module_init(proc_schedstat_init);
diff --git a/kernel/sched_stats.h b/kernel/sched/stats.h
index 87f9e36ea56..2ef90a51ec5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched/stats.h
@@ -1,108 +1,5 @@
#ifdef CONFIG_SCHEDSTATS
-/*
- * bump this up when changing the output format or the meaning of an existing
- * format, so that tools can adapt (or abort)
- */
-#define SCHEDSTAT_VERSION 15
-
-static int show_schedstat(struct seq_file *seq, void *v)
-{
- int cpu;
- int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
- char *mask_str = kmalloc(mask_len, GFP_KERNEL);
-
- if (mask_str == NULL)
- return -ENOMEM;
-
- seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
- seq_printf(seq, "timestamp %lu\n", jiffies);
- for_each_online_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
-#ifdef CONFIG_SMP
- struct sched_domain *sd;
- int dcount = 0;
-#endif
-
- /* runqueue-specific stats */
- seq_printf(seq,
- "cpu%d %u %u %u %u %u %u %llu %llu %lu",
- cpu, rq->yld_count,
- rq->sched_switch, rq->sched_count, rq->sched_goidle,
- rq->ttwu_count, rq->ttwu_local,
- rq->rq_cpu_time,
- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
-
- seq_printf(seq, "\n");
-
-#ifdef CONFIG_SMP
- /* domain-specific stats */
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- enum cpu_idle_type itype;
-
- cpumask_scnprintf(mask_str, mask_len,
- sched_domain_span(sd));
- seq_printf(seq, "domain%d %s", dcount++, mask_str);
- for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
- itype++) {
- seq_printf(seq, " %u %u %u %u %u %u %u %u",
- sd->lb_count[itype],
- sd->lb_balanced[itype],
- sd->lb_failed[itype],
- sd->lb_imbalance[itype],
- sd->lb_gained[itype],
- sd->lb_hot_gained[itype],
- sd->lb_nobusyq[itype],
- sd->lb_nobusyg[itype]);
- }
- seq_printf(seq,
- " %u %u %u %u %u %u %u %u %u %u %u %u\n",
- sd->alb_count, sd->alb_failed, sd->alb_pushed,
- sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
- sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
- sd->ttwu_wake_remote, sd->ttwu_move_affine,
- sd->ttwu_move_balance);
- }
- rcu_read_unlock();
-#endif
- }
- kfree(mask_str);
- return 0;
-}
-
-static int schedstat_open(struct inode *inode, struct file *file)
-{
- unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
- char *buf = kmalloc(size, GFP_KERNEL);
- struct seq_file *m;
- int res;
-
- if (!buf)
- return -ENOMEM;
- res = single_open(file, show_schedstat, NULL);
- if (!res) {
- m = file->private_data;
- m->buf = buf;
- m->size = size;
- } else
- kfree(buf);
- return res;
-}
-
-static const struct file_operations proc_schedstat_operations = {
- .open = schedstat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init proc_schedstat_init(void)
-{
- proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
- return 0;
-}
-module_init(proc_schedstat_init);
/*
* Expects runqueue lock to be held for atomicity of update
@@ -283,8 +180,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
return;
raw_spin_lock(&cputimer->lock);
- cputimer->cputime.utime =
- cputime_add(cputimer->cputime.utime, cputime);
+ cputimer->cputime.utime += cputime;
raw_spin_unlock(&cputimer->lock);
}
@@ -307,8 +203,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
return;
raw_spin_lock(&cputimer->lock);
- cputimer->cputime.stime =
- cputime_add(cputimer->cputime.stime, cputime);
+ cputimer->cputime.stime += cputime;
raw_spin_unlock(&cputimer->lock);
}
diff --git a/kernel/sched_stoptask.c b/kernel/sched/stop_task.c
index 8b44e7fa7fb..7b386e86fd2 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched/stop_task.c
@@ -1,3 +1,5 @@
+#include "sched.h"
+
/*
* stop-task scheduling class.
*
@@ -80,7 +82,7 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
/*
* Simple, special scheduling class for the per-CPU stop tasks:
*/
-static const struct sched_class stop_sched_class = {
+const struct sched_class stop_sched_class = {
.next = &rt_sched_class,
.enqueue_task = enqueue_task_stop,
diff --git a/kernel/signal.c b/kernel/signal.c
index b3f78d09a10..c73c4284160 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -28,6 +28,7 @@
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
@@ -1019,6 +1020,34 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
+/*
+ * map the uid in struct cred into user namespace *ns
+ */
+static inline uid_t map_cred_ns(const struct cred *cred,
+ struct user_namespace *ns)
+{
+ return user_ns_map_uid(ns, cred, cred->uid);
+}
+
+#ifdef CONFIG_USER_NS
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+ if (current_user_ns() == task_cred_xxx(t, user_ns))
+ return;
+
+ if (SI_FROMKERNEL(info))
+ return;
+
+ info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
+ current_cred(), info->si_uid);
+}
+#else
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+ return;
+}
+#endif
+
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
int group, int from_ancestor_ns)
{
@@ -1088,6 +1117,9 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_pid = 0;
break;
}
+
+ userns_fixup_signal_uid(&q->info, t);
+
} else if (!is_si_special(info)) {
if (sig >= SIGRTMIN && info->si_code != SI_USER) {
/*
@@ -1626,13 +1658,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
- info.si_uid = __task_cred(tsk)->uid;
+ info.si_uid = map_cred_ns(__task_cred(tsk),
+ task_cred_xxx(tsk->parent, user_ns));
rcu_read_unlock();
- info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
- tsk->signal->utime));
- info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
- tsk->signal->stime));
+ info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
+ info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
@@ -1711,7 +1742,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
- info.si_uid = __task_cred(tsk)->uid;
+ info.si_uid = map_cred_ns(__task_cred(tsk),
+ task_cred_xxx(parent, user_ns));
rcu_read_unlock();
info.si_utime = cputime_to_clock_t(tsk->utime);
@@ -1994,8 +2026,6 @@ static bool do_signal_stop(int signr)
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
- else
- WARN_ON_ONCE(!current->ptrace);
sig->group_stop_count = 0;
@@ -2129,8 +2159,11 @@ static int ptrace_signal(int signr, siginfo_t *info,
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
+ rcu_read_lock();
info->si_pid = task_pid_vnr(current->parent);
- info->si_uid = task_uid(current->parent);
+ info->si_uid = map_cred_ns(__task_cred(current->parent),
+ current_user_ns());
+ rcu_read_unlock();
}
/* If the (new) signal is now blocked, requeue it. */
@@ -2322,6 +2355,27 @@ relock:
return signr;
}
+/**
+ * block_sigmask - add @ka's signal mask to current->blocked
+ * @ka: action for @signr
+ * @signr: signal that has been successfully delivered
+ *
+ * This function should be called when a signal has succesfully been
+ * delivered. It adds the mask of signals for @ka to current->blocked
+ * so that they are blocked during the execution of the signal
+ * handler. In addition, @signr will be blocked unless %SA_NODEFER is
+ * set in @ka->sa.sa_flags.
+ */
+void block_sigmask(struct k_sigaction *ka, int signr)
+{
+ sigset_t blocked;
+
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
+}
+
/*
* It could be that complete_signal() picked us to notify about the
* group-wide signal. Other threads should be notified now to take
@@ -2359,8 +2413,15 @@ void exit_signals(struct task_struct *tsk)
int group_stop = 0;
sigset_t unblocked;
+ /*
+ * @tsk is about to have PF_EXITING set - lock out users which
+ * expect stable threadgroup.
+ */
+ threadgroup_change_begin(tsk);
+
if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
tsk->flags |= PF_EXITING;
+ threadgroup_change_end(tsk);
return;
}
@@ -2370,6 +2431,9 @@ void exit_signals(struct task_struct *tsk)
* see wants_signal(), do_signal_stop().
*/
tsk->flags |= PF_EXITING;
+
+ threadgroup_change_end(tsk);
+
if (!signal_pending(tsk))
goto out;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2c71d91efff..4eb3a0fa351 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -347,12 +347,12 @@ void irq_exit(void)
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
- rcu_irq_exit();
#ifdef CONFIG_NO_HZ
/* Make sure that timer wheel updates are propagated */
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
- tick_nohz_stop_sched_tick(0);
+ tick_nohz_irq_exit();
#endif
+ rcu_irq_exit();
preempt_enable_no_resched();
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 481611fbd07..ddf8155bf3f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
unsigned long maxrss = 0;
memset((char *) r, 0, sizeof *r);
- utime = stime = cputime_zero;
+ utime = stime = 0;
if (who == RUSAGE_THREAD) {
task_times(current, &utime, &stime);
@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
case RUSAGE_SELF:
thread_group_times(p, &tgutime, &tgstime);
- utime = cputime_add(utime, tgutime);
- stime = cputime_add(stime, tgstime);
+ utime += tgutime;
+ stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6318b511afa..a650694883a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
fput(file);
out_putname:
- putname(pathname);
+ __putname(pathname);
out:
return result;
}
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index b26c2228fe9..2cf9cc7aa10 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -25,7 +25,7 @@ config HIGH_RES_TIMERS
config GENERIC_CLOCKEVENTS_BUILD
bool
default y
- depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
+ depends on GENERIC_CLOCKEVENTS
config GENERIC_CLOCKEVENTS_MIN_ADJUST
bool
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21..8a46f5d6450 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
struct alarm *alarm;
ktime_t expired = next->expires;
- if (expired.tv64 >= now.tv64)
+ if (expired.tv64 > now.tv64)
break;
alarm = container_of(next, struct alarm, node);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1ecd6ba36d6..9cd928f7a7c 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/smp.h>
-#include <linux/sysdev.h>
#include "tick-internal.h"
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cf52fda2e09..a45ca167ab2 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -23,8 +23,8 @@
* o Allow clocksource drivers to be unregistered
*/
+#include <linux/device.h>
#include <linux/clocksource.h>
-#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
@@ -492,6 +492,22 @@ void clocksource_touch_watchdog(void)
}
/**
+ * clocksource_max_adjustment- Returns max adjustment amount
+ * @cs: Pointer to clocksource
+ *
+ */
+static u32 clocksource_max_adjustment(struct clocksource *cs)
+{
+ u64 ret;
+ /*
+ * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ */
+ ret = (u64)cs->mult * 11;
+ do_div(ret,100);
+ return (u32)ret;
+}
+
+/**
* clocksource_max_deferment - Returns max time the clocksource can be deferred
* @cs: Pointer to clocksource
*
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
/*
* Calculate the maximum number of cycles that we can pass to the
* cyc2ns function without overflowing a 64-bit signed result. The
- * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
- * is equivalent to the below.
- * max_cycles < (2^63)/cs->mult
- * max_cycles < 2^(log2((2^63)/cs->mult))
- * max_cycles < 2^(log2(2^63) - log2(cs->mult))
- * max_cycles < 2^(63 - log2(cs->mult))
- * max_cycles < 1 << (63 - log2(cs->mult))
+ * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
+ * which is equivalent to the below.
+ * max_cycles < (2^63)/(cs->mult + cs->maxadj)
+ * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
+ * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
+ * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
+ * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
* Please note that we add 1 to the result of the log2 to account for
* any rounding errors, ensure the above inequality is satisfied and
* no overflow will occur.
*/
- max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
+ max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
/*
* The actual maximum number of cycles we can defer the clocksource is
* determined by the minimum of max_cycles and cs->mask.
+ * Note: Here we subtract the maxadj to make sure we don't sleep for
+ * too long if there's a large negative adjustment.
*/
max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
- max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
+ max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
+ cs->shift);
/*
* To ensure that the clocksource does not wrap whilst we are idle,
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
* note a margin of 12.5% is used because this can be computed with
* a shift, versus say 10% which would require division.
*/
- return max_nsecs - (max_nsecs >> 5);
+ return max_nsecs - (max_nsecs >> 3);
}
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -628,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
/**
* __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
u64 sec;
-
/*
* Calc the maximum number of seconds which we can run before
* wrapping around. For clocksources which have a mask > 32bit
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
* margin as we do in clocksource_max_deferment()
*/
- sec = (cs->mask - (cs->mask >> 5));
+ sec = (cs->mask - (cs->mask >> 3));
do_div(sec, freq);
do_div(sec, scale);
if (!sec)
@@ -661,13 +679,27 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
NSEC_PER_SEC / scale, sec * scale);
+
+ /*
+ * for clocksources that have large mults, to avoid overflow.
+ * Since mult may be adjusted by ntp, add an safety extra margin
+ *
+ */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ while ((cs->mult + cs->maxadj < cs->mult)
+ || (cs->mult - cs->maxadj > cs->mult)) {
+ cs->mult >>= 1;
+ cs->shift--;
+ cs->maxadj = clocksource_max_adjustment(cs);
+ }
+
cs->max_idle_ns = clocksource_max_deferment(cs);
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
@@ -695,12 +727,18 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
/**
* clocksource_register - Used to install new clocksources
- * @t: clocksource to be registered
+ * @cs: clocksource to be registered
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
int clocksource_register(struct clocksource *cs)
{
+ /* calculate max adjustment for given mult/shift */
+ cs->maxadj = clocksource_max_adjustment(cs);
+ WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
+ "Clocksource %s might overflow on 11%% adjustment\n",
+ cs->name);
+
/* calculate max idle time permitted for this clocksource */
cs->max_idle_ns = clocksource_max_deferment(cs);
@@ -723,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
/**
* clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs: clocksource to be changed
+ * @rating: new rating
*/
void clocksource_change_rating(struct clocksource *cs, int rating)
{
@@ -734,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
/**
* clocksource_unregister - remove a registered clocksource
+ * @cs: clocksource to be unregistered
*/
void clocksource_unregister(struct clocksource *cs)
{
@@ -749,13 +790,14 @@ EXPORT_SYMBOL(clocksource_unregister);
/**
* sysfs_show_current_clocksources - sysfs interface for current clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing current clocksource.
*/
static ssize_t
-sysfs_show_current_clocksources(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+sysfs_show_current_clocksources(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
ssize_t count = 0;
@@ -769,14 +811,15 @@ sysfs_show_current_clocksources(struct sys_device *dev,
/**
* sysfs_override_clocksource - interface for manually overriding clocksource
* @dev: unused
+ * @attr: unused
* @buf: name of override clocksource
* @count: length of buffer
*
* Takes input from sysfs interface for manually overriding the default
* clocksource selection.
*/
-static ssize_t sysfs_override_clocksource(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t sysfs_override_clocksource(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
size_t ret = count;
@@ -804,13 +847,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
/**
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
* @dev: unused
+ * @attr: unused
* @buf: char buffer to be filled with clocksource list
*
* Provides sysfs interface for listing registered clocksources
*/
static ssize_t
-sysfs_show_available_clocksources(struct sys_device *dev,
- struct sysdev_attribute *attr,
+sysfs_show_available_clocksources(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct clocksource *src;
@@ -839,35 +883,36 @@ sysfs_show_available_clocksources(struct sys_device *dev,
/*
* Sysfs setup bits:
*/
-static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
+static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
sysfs_override_clocksource);
-static SYSDEV_ATTR(available_clocksource, 0444,
+static DEVICE_ATTR(available_clocksource, 0444,
sysfs_show_available_clocksources, NULL);
-static struct sysdev_class clocksource_sysclass = {
+static struct bus_type clocksource_subsys = {
.name = "clocksource",
+ .dev_name = "clocksource",
};
-static struct sys_device device_clocksource = {
+static struct device device_clocksource = {
.id = 0,
- .cls = &clocksource_sysclass,
+ .bus = &clocksource_subsys,
};
static int __init init_clocksource_sysfs(void)
{
- int error = sysdev_class_register(&clocksource_sysclass);
+ int error = subsys_system_register(&clocksource_subsys, NULL);
if (!error)
- error = sysdev_register(&device_clocksource);
+ error = device_register(&device_clocksource);
if (!error)
- error = sysdev_create_file(
+ error = device_create_file(
&device_clocksource,
- &attr_current_clocksource);
+ &dev_attr_current_clocksource);
if (!error)
- error = sysdev_create_file(
+ error = device_create_file(
&device_clocksource,
- &attr_available_clocksource);
+ &dev_attr_available_clocksource);
return error;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f954282d9a8..fd4a7b1625a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
- clockevents_exchange_device(NULL, dev);
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 40420644d0b..7656642e4b8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -275,42 +275,17 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
-/**
- * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
- *
- * When the next event is more than a tick into the future, stop the idle tick
- * Called either from the idle loop or from irq_exit() when an idle period was
- * just interrupted by an interrupt which did not cause a reschedule.
- */
-void tick_nohz_stop_sched_tick(int inidle)
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
- unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
- struct tick_sched *ts;
+ unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
u64 time_delta;
int cpu;
- local_irq_save(flags);
-
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
- /*
- * Call to tick_nohz_start_idle stops the last_update_time from being
- * updated. Thus, it must not be called in the event we are called from
- * irq_exit() with the prior state different than idle.
- */
- if (!inidle && !ts->inidle)
- goto end;
-
- /*
- * Set ts->inidle unconditionally. Even if the system did not
- * switch to NOHZ mode the cpu frequency governers rely on the
- * update of the idle time accounting in tick_nohz_start_idle().
- */
- ts->inidle = 1;
-
now = tick_nohz_start_idle(cpu, ts);
/*
@@ -326,10 +301,10 @@ void tick_nohz_stop_sched_tick(int inidle)
}
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
- goto end;
+ return;
if (need_resched())
- goto end;
+ return;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
@@ -339,7 +314,7 @@ void tick_nohz_stop_sched_tick(int inidle)
(unsigned int) local_softirq_pending());
ratelimit++;
}
- goto end;
+ return;
}
ts->idle_calls++;
@@ -434,7 +409,6 @@ void tick_nohz_stop_sched_tick(int inidle)
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
- rcu_enter_nohz();
}
ts->idle_sleeps++;
@@ -472,8 +446,64 @@ out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
ts->sleep_length = ktime_sub(dev->next_event, now);
-end:
- local_irq_restore(flags);
+}
+
+/**
+ * tick_nohz_idle_enter - stop the idle tick from the idle task
+ *
+ * When the next event is more than a tick into the future, stop the idle tick
+ * Called when we start the idle loop.
+ *
+ * The arch is responsible of calling:
+ *
+ * - rcu_idle_enter() after its last use of RCU before the CPU is put
+ * to sleep.
+ * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ */
+void tick_nohz_idle_enter(void)
+{
+ struct tick_sched *ts;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ /*
+ * Update the idle state in the scheduler domain hierarchy
+ * when tick_nohz_stop_sched_tick() is called from the idle loop.
+ * State will be updated to busy during the first busy tick after
+ * exiting idle.
+ */
+ set_cpu_sd_state_idle();
+
+ local_irq_disable();
+
+ ts = &__get_cpu_var(tick_cpu_sched);
+ /*
+ * set ts->inidle unconditionally. even if the system did not
+ * switch to nohz mode the cpu frequency governers rely on the
+ * update of the idle time accounting in tick_nohz_start_idle().
+ */
+ ts->inidle = 1;
+ tick_nohz_stop_sched_tick(ts);
+
+ local_irq_enable();
+}
+
+/**
+ * tick_nohz_irq_exit - update next tick event from interrupt exit
+ *
+ * When an interrupt fires while we are idle and it doesn't cause
+ * a reschedule, it may still add, modify or delete a timer, enqueue
+ * an RCU callback, etc...
+ * So we need to re-calculate and reprogram the next tick event.
+ */
+void tick_nohz_irq_exit(void)
+{
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+ if (!ts->inidle)
+ return;
+
+ tick_nohz_stop_sched_tick(ts);
}
/**
@@ -515,11 +545,13 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
}
/**
- * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
+ * tick_nohz_idle_exit - restart the idle tick from the idle task
*
* Restart the idle tick when the CPU is woken up from idle
+ * This also exit the RCU extended quiescent state. The CPU
+ * can use RCU again after this function is called.
*/
-void tick_nohz_restart_sched_tick(void)
+void tick_nohz_idle_exit(void)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -529,6 +561,7 @@ void tick_nohz_restart_sched_tick(void)
ktime_t now;
local_irq_disable();
+
if (ts->idle_active || (ts->inidle && ts->tick_stopped))
now = ktime_get();
@@ -543,8 +576,6 @@ void tick_nohz_restart_sched_tick(void)
ts->inidle = 0;
- rcu_exit_nohz();
-
/* Update jiffies first */
select_nohz_load_balancer(0);
tick_do_update_jiffies64(now);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2b021b0e850..0c635818640 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -131,7 +131,7 @@ static inline s64 timekeeping_get_ns_raw(void)
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
- /* return delta convert to nanoseconds using ntp adjusted mult. */
+ /* return delta convert to nanoseconds. */
return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
}
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
/*
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
*ts = xtime;
tomono = wall_to_monotonic;
nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
} while (read_seqretry(&xtime_lock, seq));
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
s64 error, interval = timekeeper.cycle_interval;
int adj;
+ /*
+ * The point of this is to check if the error is greater then half
+ * an interval.
+ *
+ * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
+ *
+ * Note we subtract one in the shift, so that error is really error*2.
+ * This "saves" dividing(shifting) interval twice, but keeps the
+ * (error > interval) comparison as still measuring if error is
+ * larger then half an interval.
+ *
+ * Note: It does not "save" on aggravation when reading the code.
+ */
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
if (error > interval) {
+ /*
+ * We now divide error by 4(via shift), which checks if
+ * the error is greater then twice the interval.
+ * If it is greater, we need a bigadjust, if its smaller,
+ * we can adjust by 1.
+ */
error >>= 2;
+ /*
+ * XXX - In update_wall_time, we round up to the next
+ * nanosecond, and store the amount rounded up into
+ * the error. This causes the likely below to be unlikely.
+ *
+ * The proper fix is to avoid rounding up by using
+ * the high precision timekeeper.xtime_nsec instead of
+ * xtime.tv_nsec everywhere. Fixing this will take some
+ * time.
+ */
if (likely(error <= interval))
adj = 1;
else
adj = timekeeping_bigadjust(error, &interval, &offset);
} else if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
error >>= 2;
if (likely(error >= -interval)) {
adj = -1;
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
offset = -offset;
} else
adj = timekeeping_bigadjust(error, &interval, &offset);
- } else
+ } else /* No adjustment needed */
return;
+ WARN_ONCE(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj > timekeeper.clock->mult +
+ timekeeper.clock->maxadj),
+ "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ timekeeper.clock->name, (long)timekeeper.mult + adj,
+ (long)timekeeper.clock->mult +
+ timekeeper.clock->maxadj);
+ /*
+ * So the following can be confusing.
+ *
+ * To keep things simple, lets assume adj == 1 for now.
+ *
+ * When adj != 1, remember that the interval and offset values
+ * have been appropriately scaled so the math is the same.
+ *
+ * The basic idea here is that we're increasing the multiplier
+ * by one, this causes the xtime_interval to be incremented by
+ * one cycle_interval. This is because:
+ * xtime_interval = cycle_interval * mult
+ * So if mult is being incremented by one:
+ * xtime_interval = cycle_interval * (mult + 1)
+ * Its the same as:
+ * xtime_interval = (cycle_interval * mult) + cycle_interval
+ * Which can be shortened to:
+ * xtime_interval += cycle_interval
+ *
+ * So offset stores the non-accumulated cycles. Thus the current
+ * time (in shifted nanoseconds) is:
+ * now = (offset * adj) + xtime_nsec
+ * Now, even though we're adjusting the clock frequency, we have
+ * to keep time consistent. In other words, we can't jump back
+ * in time, and we also want to avoid jumping forward in time.
+ *
+ * So given the same offset value, we need the time to be the same
+ * both before and after the freq adjustment.
+ * now = (offset * adj_1) + xtime_nsec_1
+ * now = (offset * adj_2) + xtime_nsec_2
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_2) + xtime_nsec_2
+ * And we know:
+ * adj_2 = adj_1 + 1
+ * So:
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * (adj_1+1)) + xtime_nsec_2
+ * (offset * adj_1) + xtime_nsec_1 =
+ * (offset * adj_1) + offset + xtime_nsec_2
+ * Canceling the sides:
+ * xtime_nsec_1 = offset + xtime_nsec_2
+ * Which gives us:
+ * xtime_nsec_2 = xtime_nsec_1 - offset
+ * Which simplfies to:
+ * xtime_nsec -= offset
+ *
+ * XXX - TODO: Doc ntp_error calculation.
+ */
timekeeper.mult += adj;
timekeeper.xtime_interval += interval;
timekeeper.xtime_nsec -= offset;
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b1..a297ffcf888 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -427,6 +427,12 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state)
}
}
+/* Stub timer callback for improperly used timers. */
+static void stub_timer(unsigned long data)
+{
+ WARN_ON(1);
+}
+
/*
* fixup_activate is called when:
* - an active object is activated
@@ -450,7 +456,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
debug_object_activate(timer, &timer_debug_descr);
return 0;
} else {
- WARN_ON_ONCE(1);
+ setup_timer(timer, stub_timer, 0);
+ return 1;
}
return 0;
@@ -480,12 +487,40 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
}
}
+/*
+ * fixup_assert_init is called when:
+ * - an untracked/uninit-ed object is found
+ */
+static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+ /*
+ * This is not really a fixup. The timer was
+ * statically initialized. We just make sure that it
+ * is tracked in the object tracker.
+ */
+ debug_object_init(timer, &timer_debug_descr);
+ return 0;
+ } else {
+ setup_timer(timer, stub_timer, 0);
+ return 1;
+ }
+ default:
+ return 0;
+ }
+}
+
static struct debug_obj_descr timer_debug_descr = {
- .name = "timer_list",
- .debug_hint = timer_debug_hint,
- .fixup_init = timer_fixup_init,
- .fixup_activate = timer_fixup_activate,
- .fixup_free = timer_fixup_free,
+ .name = "timer_list",
+ .debug_hint = timer_debug_hint,
+ .fixup_init = timer_fixup_init,
+ .fixup_activate = timer_fixup_activate,
+ .fixup_free = timer_fixup_free,
+ .fixup_assert_init = timer_fixup_assert_init,
};
static inline void debug_timer_init(struct timer_list *timer)
@@ -508,6 +543,11 @@ static inline void debug_timer_free(struct timer_list *timer)
debug_object_free(timer, &timer_debug_descr);
}
+static inline void debug_timer_assert_init(struct timer_list *timer)
+{
+ debug_object_assert_init(timer, &timer_debug_descr);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key);
@@ -531,6 +571,7 @@ EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
static inline void debug_timer_init(struct timer_list *timer) { }
static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
+static inline void debug_timer_assert_init(struct timer_list *timer) { }
#endif
static inline void debug_init(struct timer_list *timer)
@@ -552,6 +593,11 @@ static inline void debug_deactivate(struct timer_list *timer)
trace_timer_cancel(timer);
}
+static inline void debug_assert_init(struct timer_list *timer)
+{
+ debug_timer_assert_init(timer);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
@@ -902,6 +948,8 @@ int del_timer(struct timer_list *timer)
unsigned long flags;
int ret = 0;
+ debug_assert_init(timer);
+
timer_stats_timer_clear_start_info(timer);
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
@@ -932,6 +980,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
unsigned long flags;
int ret = -1;
+ debug_assert_init(timer);
+
base = lock_timer_base(timer, &flags);
if (base->running_timer == timer)
@@ -1368,7 +1418,7 @@ SYSCALL_DEFINE0(getppid)
int pid;
rcu_read_lock();
- pid = task_tgid_vnr(current->real_parent);
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 16fc34a0806..cdea7b56b0c 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -402,7 +402,7 @@ static int blk_remove_buf_file_callback(struct dentry *dentry)
static struct dentry *blk_create_buf_file_callback(const char *filename,
struct dentry *parent,
- int mode,
+ umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900b409543d..b1e8943fed1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
ftrace_pid_function = ftrace_stub;
}
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
if (!src->count) {
free_ftrace_hash_rcu(*dst);
rcu_assign_pointer(*dst, EMPTY_HASH);
- return 0;
+ /* still need to update the function records */
+ ret = 0;
+ goto out;
}
/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f2bd275bb60..a3f1bc5d2a0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -338,7 +338,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
+ TRACE_ITER_IRQ_INFO;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@ -426,6 +427,7 @@ static const char *trace_options[] = {
"record-cmd",
"overwrite",
"disable_on_free",
+ "irq-info",
NULL
};
@@ -1843,6 +1845,33 @@ static void s_stop(struct seq_file *m, void *p)
trace_event_read_unlock();
}
+static void
+get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
+{
+ unsigned long count;
+ int cpu;
+
+ *total = 0;
+ *entries = 0;
+
+ for_each_tracing_cpu(cpu) {
+ count = ring_buffer_entries_cpu(tr->buffer, cpu);
+ /*
+ * If this buffer has skipped entries, then we hold all
+ * entries for the trace and we need to ignore the
+ * ones before the time stamp.
+ */
+ if (tr->data[cpu]->skipped_entries) {
+ count -= tr->data[cpu]->skipped_entries;
+ /* total is the same as the entries */
+ *total += count;
+ } else
+ *total += count +
+ ring_buffer_overrun_cpu(tr->buffer, cpu);
+ *entries += count;
+ }
+}
+
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
@@ -1855,12 +1884,35 @@ static void print_lat_help_header(struct seq_file *m)
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
-static void print_func_help_header(struct seq_file *m)
+static void print_event_info(struct trace_array *tr, struct seq_file *m)
+{
+ unsigned long total;
+ unsigned long entries;
+
+ get_total_entries(tr, &total, &entries);
+ seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
+ entries, total, num_online_cpus());
+ seq_puts(m, "#\n");
+}
+
+static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
{
- seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
+ print_event_info(tr, m);
+ seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
+static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+{
+ print_event_info(tr, m);
+ seq_puts(m, "# _-----=> irqs-off\n");
+ seq_puts(m, "# / _----=> need-resched\n");
+ seq_puts(m, "# | / _---=> hardirq/softirq\n");
+ seq_puts(m, "# || / _--=> preempt-depth\n");
+ seq_puts(m, "# ||| / delay\n");
+ seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+ seq_puts(m, "# | | | |||| | |\n");
+}
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
@@ -1869,32 +1921,14 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
- unsigned long entries = 0;
- unsigned long total = 0;
- unsigned long count;
+ unsigned long entries;
+ unsigned long total;
const char *name = "preemption";
- int cpu;
if (type)
name = type->name;
-
- for_each_tracing_cpu(cpu) {
- count = ring_buffer_entries_cpu(tr->buffer, cpu);
- /*
- * If this buffer has skipped entries, then we hold all
- * entries for the trace and we need to ignore the
- * ones before the time stamp.
- */
- if (tr->data[cpu]->skipped_entries) {
- count -= tr->data[cpu]->skipped_entries;
- /* total is the same as the entries */
- total += count;
- } else
- total += count +
- ring_buffer_overrun_cpu(tr->buffer, cpu);
- entries += count;
- }
+ get_total_entries(tr, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
@@ -2140,6 +2174,21 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
return print_trace_fmt(iter);
}
+void trace_latency_header(struct seq_file *m)
+{
+ struct trace_iterator *iter = m->private;
+
+ /* print nothing if the buffers are empty */
+ if (trace_empty(iter))
+ return;
+
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+ print_trace_header(m, iter);
+
+ if (!(trace_flags & TRACE_ITER_VERBOSE))
+ print_lat_help_header(m);
+}
+
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
@@ -2155,8 +2204,12 @@ void trace_default_header(struct seq_file *m)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
} else {
- if (!(trace_flags & TRACE_ITER_VERBOSE))
- print_func_help_header(m);
+ if (!(trace_flags & TRACE_ITER_VERBOSE)) {
+ if (trace_flags & TRACE_ITER_IRQ_INFO)
+ print_func_help_header_irq(iter->tr, m);
+ else
+ print_func_help_header(iter->tr, m);
+ }
}
}
@@ -4385,7 +4438,7 @@ static const struct file_operations trace_options_core_fops = {
};
struct dentry *trace_create_file(const char *name,
- mode_t mode,
+ umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
@@ -4775,6 +4828,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
__ftrace_dump(true, oops_dump_mode);
}
+EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 092e1f8d18d..b93ecbadad6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -312,7 +312,7 @@ void tracing_reset_current(int cpu);
void tracing_reset_current_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *trace_create_file(const char *name,
- mode_t mode,
+ umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops);
@@ -370,6 +370,7 @@ void trace_graph_function(struct trace_array *tr,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
+void trace_latency_header(struct seq_file *m);
void trace_default_header(struct seq_file *m);
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
@@ -654,6 +655,7 @@ enum trace_iterator_flags {
TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
+ TRACE_ITER_IRQ_INFO = 0x800000,
};
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 581876f9f38..c212a7f934e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
- __get_system(system);
system->nr_events++;
return system->entry;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 816d3d07497..f04cc3136bd 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,6 +27,12 @@
#include "trace.h"
#include "trace_output.h"
+#define DEFAULT_SYS_FILTER_MESSAGE \
+ "### global filter ###\n" \
+ "# Use this to set filters for multiple events.\n" \
+ "# Only events with the given fields will be affected.\n" \
+ "# If no events are modified, an error message will be displayed here"
+
enum filter_op_ids
{
OP_OR,
@@ -646,7 +652,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
- trace_seq_printf(s, "none\n");
+ trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n");
mutex_unlock(&event_mutex);
}
@@ -1649,7 +1655,9 @@ static int replace_system_preds(struct event_subsystem *system,
*/
err = replace_preds(call, NULL, ps, filter_string, true);
if (err)
- goto fail;
+ call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+ else
+ call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
}
list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1666,9 @@ static int replace_system_preds(struct event_subsystem *system,
if (strcmp(call->class->system, system->name) != 0)
continue;
+ if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+ continue;
+
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
@@ -1686,7 +1697,7 @@ static int replace_system_preds(struct event_subsystem *system,
* replace the filter for the call.
*/
filter = call->filter;
- call->filter = filter_item->filter;
+ rcu_assign_pointer(call->filter, filter_item->filter);
filter_item->filter = filter;
fail = false;
@@ -1741,7 +1752,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
filter = call->filter;
if (!filter)
goto out_unlock;
- call->filter = NULL;
+ RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
@@ -1782,7 +1793,7 @@ out:
* string
*/
tmp = call->filter;
- call->filter = filter;
+ rcu_assign_pointer(call->filter, filter);
if (tmp) {
/* Make sure the call is done with the filter */
synchronize_sched();
@@ -1833,7 +1844,10 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
if (!filter)
goto out;
- replace_filter_string(filter, filter_string);
+ /* System filters just show a default message */
+ kfree(filter->filter_string);
+ filter->filter_string = NULL;
+
/*
* No event actually uses the system filter
* we can free it without synchronize_sched().
@@ -1843,14 +1857,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
parse_init(ps, filter_ops, filter_string);
err = filter_parse(ps);
- if (err) {
- append_filter_err(ps, system->filter);
- goto out;
- }
+ if (err)
+ goto err_filter;
err = replace_system_preds(system, ps, filter_string);
if (err)
- append_filter_err(ps, system->filter);
+ goto err_filter;
out:
filter_opstack_clear(ps);
@@ -1860,6 +1872,11 @@ out_unlock:
mutex_unlock(&event_mutex);
return err;
+
+err_filter:
+ replace_filter_string(filter, filter_string);
+ append_filter_err(ps, system->filter);
+ goto out;
}
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 20dad0d7a16..99d20e92036 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -280,9 +280,20 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
}
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
-static void irqsoff_print_header(struct seq_file *s) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 51999309a6c..0d6ff355594 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -627,11 +627,23 @@ int trace_print_context(struct trace_iterator *iter)
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned long secs = (unsigned long)t;
char comm[TASK_COMM_LEN];
+ int ret;
trace_find_cmdline(entry->pid, comm);
- return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
- comm, entry->pid, iter->cpu, secs, usec_rem);
+ ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
+ comm, entry->pid, iter->cpu);
+ if (!ret)
+ return 0;
+
+ if (trace_flags & TRACE_ITER_IRQ_INFO) {
+ ret = trace_print_lat_fmt(s, entry);
+ if (!ret)
+ return 0;
+ }
+
+ return trace_seq_printf(s, " %5lu.%06lu: ",
+ secs, usec_rem);
}
int trace_print_lat_context(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e4a70c0c71b..ff791ea48b5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -280,9 +280,20 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
}
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
-static void wakeup_print_header(struct seq_file *s) { }
static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void wakeup_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 5bbfac85866..23b4d784ebd 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk)
local_irq_save(flags);
time = tsk->stime + tsk->utime;
- dtime = cputime_sub(time, tsk->acct_timexpd);
+ dtime = time - tsk->acct_timexpd;
jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
delta = value.tv_sec;
delta = delta * USEC_PER_SEC + value.tv_usec;
diff --git a/kernel/wait.c b/kernel/wait.c
index 26fa7797f90..7fdd9eaca2c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,10 +10,10 @@
#include <linux/wait.h>
#include <linux/hash.h>
-void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
- lockdep_set_class(&q->lock, key);
+ lockdep_set_class_and_name(&q->lock, key, name);
INIT_LIST_HEAD(&q->task_list);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42fa9ad0a81..bec7b5b53e0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -242,10 +242,10 @@ struct workqueue_struct {
int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
- const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
+ char name[]; /* I: workqueue name */
};
struct workqueue_struct *system_wq __read_mostly;
@@ -2954,14 +2954,29 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
return clamp_val(max_active, 1, lim);
}
-struct workqueue_struct *__alloc_workqueue_key(const char *name,
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
struct lock_class_key *key,
- const char *lock_name)
+ const char *lock_name, ...)
{
+ va_list args, args1;
struct workqueue_struct *wq;
unsigned int cpu;
+ size_t namelen;
+
+ /* determine namelen, allocate wq and format name */
+ va_start(args, lock_name);
+ va_copy(args1, args);
+ namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+
+ wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+ if (!wq)
+ goto err;
+
+ vsnprintf(wq->name, namelen, fmt, args1);
+ va_end(args);
+ va_end(args1);
/*
* Workqueues which may be used during memory reclaim should
@@ -2978,12 +2993,9 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
flags |= WQ_HIGHPRI;
max_active = max_active ?: WQ_DFL_ACTIVE;
- max_active = wq_clamp_max_active(max_active, flags, name);
-
- wq = kzalloc(sizeof(*wq), GFP_KERNEL);
- if (!wq)
- goto err;
+ max_active = wq_clamp_max_active(max_active, flags, wq->name);
+ /* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->flush_mutex);
@@ -2991,7 +3003,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
- wq->name = name;
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
@@ -3020,7 +3031,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
if (!rescuer)
goto err;
- rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
+ rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+ wq->name);
if (IS_ERR(rescuer->task))
goto err;
diff --git a/lib/Kconfig b/lib/Kconfig
index 36884b409e3..7f6b8bca8c2 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -251,6 +251,9 @@ config CPU_RMAP
bool
depends on SMP
+config DQL
+ bool
+
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -277,10 +280,9 @@ config AVERAGE
If unsure, say N.
config CORDIC
- tristate "Cordic function"
+ tristate "CORDIC algorithm"
help
- The option provides arithmetic function using cordic algorithm
- so its calculations are in fixed point. Modules can select this
- when they require this function. Module will be called cordic.
+ This option provides an implementation of the CORDIC algorithm;
+ calculations are in fixed point. Module will be called cordic.
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 609b2adc604..884ed376164 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -17,7 +17,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-y += kobject.o kref.o klist.o
+lib-y += kobject.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
@@ -116,6 +116,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
obj-$(CONFIG_CORDIC) += cordic.o
+obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/btree.c b/lib/btree.c
index 2a34392bcec..e5ec1e9c1aa 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -357,6 +357,7 @@ miss:
}
return NULL;
}
+EXPORT_SYMBOL_GPL(btree_get_prev);
static int getpos(struct btree_geo *geo, unsigned long *node,
unsigned long *key)
diff --git a/lib/cordic.c b/lib/cordic.c
index aa27a88d7e0..6cf477839eb 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -96,6 +96,6 @@ struct cordic_iq cordic_calc_iq(s32 theta)
}
EXPORT_SYMBOL(cordic_calc_iq);
-MODULE_DESCRIPTION("Cordic functions");
+MODULE_DESCRIPTION("CORDIC algorithm");
MODULE_AUTHOR("Broadcom Corporation");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/crc32.c b/lib/crc32.c
index a6e633a48ce..4b35d2b4437 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -51,20 +51,21 @@ static inline u32
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
{
# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
-# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
- tab[2][(crc >> 8) & 255] ^ \
- tab[1][(crc >> 16) & 255] ^ \
- tab[0][(crc >> 24) & 255]
+# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
+# define DO_CRC4 crc = t3[(crc) & 255] ^ \
+ t2[(crc >> 8) & 255] ^ \
+ t1[(crc >> 16) & 255] ^ \
+ t0[(crc >> 24) & 255]
# else
-# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
-# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
- tab[1][(crc >> 8) & 255] ^ \
- tab[2][(crc >> 16) & 255] ^ \
- tab[3][(crc >> 24) & 255]
+# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+# define DO_CRC4 crc = t0[(crc) & 255] ^ \
+ t1[(crc >> 8) & 255] ^ \
+ t2[(crc >> 16) & 255] ^ \
+ t3[(crc >> 24) & 255]
# endif
const u32 *b;
size_t rem_len;
+ const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
/* Align it */
if (unlikely((long)buf & 3 && len)) {
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a78b7c6e042..77cb245f8e7 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -268,12 +268,16 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
* Try to repair the damage, so we have a better chance to get useful
* debug output.
*/
-static void
+static int
debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
void * addr, enum debug_obj_state state)
{
+ int fixed = 0;
+
if (fixup)
- debug_objects_fixups += fixup(addr, state);
+ fixed = fixup(addr, state);
+ debug_objects_fixups += fixed;
+ return fixed;
}
static void debug_object_is_on_stack(void *addr, int onstack)
@@ -386,6 +390,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
if (!debug_objects_enabled)
return;
@@ -425,8 +432,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
* let the type specific code decide whether this is
* true or not.
*/
- debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE);
+ if (debug_object_fixup(descr->fixup_activate, addr,
+ ODEBUG_STATE_NOTAVAILABLE))
+ debug_print_object(&o, "activate");
}
/**
@@ -563,6 +571,44 @@ out_unlock:
}
/**
+ * debug_object_assert_init - debug checks when object should be init-ed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ /*
+ * Maybe the object is static. Let the type specific
+ * code decide what to do.
+ */
+ if (debug_object_fixup(descr->fixup_assert_init, addr,
+ ODEBUG_STATE_NOTAVAILABLE))
+ debug_print_object(&o, "assert_init");
+ return;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
* debug_object_active_state - debug checks object usage state machine
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index a7b80c1d6a0..31c5f7675fb 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -1,4 +1,3 @@
-/* vi: set sw = 4 ts = 4: */
/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
@@ -691,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
outbuf = malloc(BZIP2_IOBUF_SIZE);
if (!outbuf) {
- error("Could not allocate output bufer");
+ error("Could not allocate output buffer");
return RETVAL_OUT_OF_MEMORY;
}
if (buf)
@@ -699,7 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
else
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
- error("Could not allocate input bufer");
+ error("Could not allocate input buffer");
i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 476c65af970..32adb73a903 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -562,7 +562,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
else
inbuf = malloc(LZMA_IOBUF_SIZE);
if (!inbuf) {
- error("Could not allocate input bufer");
+ error("Could not allocate input buffer");
goto exit_0;
}
diff --git a/lib/devres.c b/lib/devres.c
index 7c0e953a748..4fbc09e6e9e 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -85,6 +85,57 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
+/**
+ * devm_request_and_ioremap() - Check, request region, and ioremap resource
+ * @dev: Generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
+ * everything is undone on driver detach. Checks arguments, so you can feed
+ * it the result from e.g. platform_get_resource() directly. Returns the
+ * remapped pointer or NULL on error. Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_request_and_ioremap(&pdev->dev, res);
+ * if (!base)
+ * return -EADDRNOTAVAIL;
+ */
+void __iomem *devm_request_and_ioremap(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return NULL;
+ }
+
+ size = resource_size(res);
+ name = res->name ?: dev_name(dev);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return NULL;
+ }
+
+ if (res->flags & IORESOURCE_CACHEABLE)
+ dest_ptr = devm_ioremap(dev, res->start, size);
+ else
+ dest_ptr = devm_ioremap_nocache(dev, res->start, size);
+
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_request_and_ioremap);
+
#ifdef CONFIG_HAS_IOPORT
/*
* Generic iomap devres
@@ -348,5 +399,5 @@ void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
}
}
EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif
-#endif
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_HAS_IOPORT */
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 74c6c7fce74..fea790a2b17 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{
- return ((a->dev_addr == a->dev_addr) &&
+ return ((a->dev_addr == b->dev_addr) &&
(a->dev == b->dev)) ? true : false;
}
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
new file mode 100644
index 00000000000..3d1bdcdd7db
--- /dev/null
+++ b/lib/dynamic_queue_limits.c
@@ -0,0 +1,133 @@
+/*
+ * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/dynamic_queue_limits.h>
+
+#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+
+/* Records completed count and recalculates the queue limit */
+void dql_completed(struct dql *dql, unsigned int count)
+{
+ unsigned int inprogress, prev_inprogress, limit;
+ unsigned int ovlimit, all_prev_completed, completed;
+
+ /* Can't complete more than what's in queue */
+ BUG_ON(count > dql->num_queued - dql->num_completed);
+
+ completed = dql->num_completed + count;
+ limit = dql->limit;
+ ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+ inprogress = dql->num_queued - completed;
+ prev_inprogress = dql->prev_num_queued - dql->num_completed;
+ all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+
+ if ((ovlimit && !inprogress) ||
+ (dql->prev_ovlimit && all_prev_completed)) {
+ /*
+ * Queue considered starved if:
+ * - The queue was over-limit in the last interval,
+ * and there is no more data in the queue.
+ * OR
+ * - The queue was over-limit in the previous interval and
+ * when enqueuing it was possible that all queued data
+ * had been consumed. This covers the case when queue
+ * may have becomes starved between completion processing
+ * running and next time enqueue was scheduled.
+ *
+ * When queue is starved increase the limit by the amount
+ * of bytes both sent and completed in the last interval,
+ * plus any previous over-limit.
+ */
+ limit += POSDIFF(completed, dql->prev_num_queued) +
+ dql->prev_ovlimit;
+ dql->slack_start_time = jiffies;
+ dql->lowest_slack = UINT_MAX;
+ } else if (inprogress && prev_inprogress && !all_prev_completed) {
+ /*
+ * Queue was not starved, check if the limit can be decreased.
+ * A decrease is only considered if the queue has been busy in
+ * the whole interval (the check above).
+ *
+ * If there is slack, the amount of execess data queued above
+ * the the amount needed to prevent starvation, the queue limit
+ * can be decreased. To avoid hysteresis we consider the
+ * minimum amount of slack found over several iterations of the
+ * completion routine.
+ */
+ unsigned int slack, slack_last_objs;
+
+ /*
+ * Slack is the maximum of
+ * - The queue limit plus previous over-limit minus twice
+ * the number of objects completed. Note that two times
+ * number of completed bytes is a basis for an upper bound
+ * of the limit.
+ * - Portion of objects in the last queuing operation that
+ * was not part of non-zero previous over-limit. That is
+ * "round down" by non-overlimit portion of the last
+ * queueing operation.
+ */
+ slack = POSDIFF(limit + dql->prev_ovlimit,
+ 2 * (completed - dql->num_completed));
+ slack_last_objs = dql->prev_ovlimit ?
+ POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
+
+ slack = max(slack, slack_last_objs);
+
+ if (slack < dql->lowest_slack)
+ dql->lowest_slack = slack;
+
+ if (time_after(jiffies,
+ dql->slack_start_time + dql->slack_hold_time)) {
+ limit = POSDIFF(limit, dql->lowest_slack);
+ dql->slack_start_time = jiffies;
+ dql->lowest_slack = UINT_MAX;
+ }
+ }
+
+ /* Enforce bounds on limit */
+ limit = clamp(limit, dql->min_limit, dql->max_limit);
+
+ if (limit != dql->limit) {
+ dql->limit = limit;
+ ovlimit = 0;
+ }
+
+ dql->adj_limit = limit + completed;
+ dql->prev_ovlimit = ovlimit;
+ dql->prev_last_obj_cnt = dql->last_obj_cnt;
+ dql->num_completed = completed;
+ dql->prev_num_queued = dql->num_queued;
+}
+EXPORT_SYMBOL(dql_completed);
+
+void dql_reset(struct dql *dql)
+{
+ /* Reset all dynamic values */
+ dql->limit = 0;
+ dql->num_queued = 0;
+ dql->num_completed = 0;
+ dql->last_obj_cnt = 0;
+ dql->prev_num_queued = 0;
+ dql->prev_last_obj_cnt = 0;
+ dql->prev_ovlimit = 0;
+ dql->lowest_slack = UINT_MAX;
+ dql->slack_start_time = jiffies;
+}
+EXPORT_SYMBOL(dql_reset);
+
+int dql_init(struct dql *dql, unsigned hold_time)
+{
+ dql->max_limit = DQL_MAX_LIMIT;
+ dql->min_limit = 0;
+ dql->slack_hold_time = hold_time;
+ dql_reset(dql);
+ return 0;
+}
+EXPORT_SYMBOL(dql_init);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 4f7554025e3..b4801f51b60 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -149,7 +149,7 @@ static int debugfs_ul_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
-static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
+static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_ul);
@@ -169,7 +169,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
static struct dentry *debugfs_create_stacktrace_depth(
- const char *name, mode_t mode,
+ const char *name, umode_t mode,
struct dentry *parent, unsigned long *value)
{
return debugfs_create_file(name, mode, parent, value,
@@ -193,7 +193,7 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
debugfs_atomic_t_set, "%lld\n");
-static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
+static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
@@ -202,7 +202,7 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *parent, struct fault_attr *attr)
{
- mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
diff --git a/lib/kobject.c b/lib/kobject.c
index 640bd98a4c8..c33d7a18d63 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,43 +746,11 @@ void kset_unregister(struct kset *k)
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
- return kset_find_obj_hinted(kset, name, NULL);
-}
-
-/**
- * kset_find_obj_hinted - search for object in kset given a predecessor hint.
- * @kset: kset we're looking in.
- * @name: object's name.
- * @hint: hint to possible object's predecessor.
- *
- * Check the hint's next object and if it is a match return it directly,
- * otherwise, fall back to the behavior of kset_find_obj(). Either way
- * a reference for the returned object is held and the reference on the
- * hinted object is released.
- */
-struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
- struct kobject *hint)
-{
struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
- if (!hint)
- goto slow_search;
-
- /* end of list detection */
- if (hint->entry.next == kset->list.next)
- goto slow_search;
-
- k = container_of(hint->entry.next, struct kobject, entry);
- if (!kobject_name(k) || strcmp(kobject_name(k), name))
- goto slow_search;
-
- ret = kobject_get(k);
- goto unlock_exit;
-
-slow_search:
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
@@ -790,12 +758,7 @@ slow_search:
}
}
-unlock_exit:
spin_unlock(&kset->list_lock);
-
- if (hint)
- kobject_put(hint);
-
return ret;
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index ad72a03ce5e..e66e9b63261 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -259,6 +259,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
struct sk_buff *skb;
size_t len;
+ if (!netlink_has_listeners(uevent_sock, 1))
+ continue;
+
/* allocate message with the maximum possible size */
len = strlen(action_string) + strlen(devpath) + 2;
skb = alloc_skb(len + env->buflen, GFP_KERNEL);
diff --git a/lib/kref.c b/lib/kref.c
deleted file mode 100644
index 3efb882b11d..00000000000
--- a/lib/kref.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * kref.c - library routines for handling generic reference counted objects
- *
- * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
- * Copyright (C) 2004 IBM Corp.
- *
- * based on lib/kobject.c which was:
- * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
- *
- * This file is released under the GPLv2.
- *
- */
-
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-/**
- * kref_init - initialize object.
- * @kref: object in question.
- */
-void kref_init(struct kref *kref)
-{
- atomic_set(&kref->refcount, 1);
- smp_mb();
-}
-
-/**
- * kref_get - increment refcount for object.
- * @kref: object.
- */
-void kref_get(struct kref *kref)
-{
- WARN_ON(!atomic_read(&kref->refcount));
- atomic_inc(&kref->refcount);
- smp_mb__after_atomic_inc();
-}
-
-/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
- *
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-int kref_put(struct kref *kref, void (*release)(struct kref *kref))
-{
- WARN_ON(release == NULL);
- WARN_ON(release == (void (*)(struct kref *))kfree);
-
- if (atomic_dec_and_test(&kref->refcount)) {
- release(kref);
- return 1;
- }
- return 0;
-}
-
-
-/**
- * kref_sub - subtract a number of refcounts for object.
- * @kref: object.
- * @count: Number of recounts to subtract.
- * @release: pointer to the function that will clean up the object when the
- * last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
- *
- * Subtract @count from the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
- */
-int kref_sub(struct kref *kref, unsigned int count,
- void (*release)(struct kref *kref))
-{
- WARN_ON(release == NULL);
- WARN_ON(release == (void (*)(struct kref *))kfree);
-
- if (atomic_sub_and_test((int) count, &kref->refcount)) {
- release(kref);
- return 1;
- }
- return 0;
-}
-
-EXPORT_SYMBOL(kref_init);
-EXPORT_SYMBOL(kref_get);
-EXPORT_SYMBOL(kref_put);
-EXPORT_SYMBOL(kref_sub);
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
index 6a3bd48fa2a..75510e94f7d 100644
--- a/lib/reciprocal_div.c
+++ b/lib/reciprocal_div.c
@@ -1,5 +1,6 @@
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
+#include <linux/export.h>
u32 reciprocal_value(u32 k)
{
@@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k)
do_div(val, k);
return (u32)val;
}
+EXPORT_SYMBOL(reciprocal_value);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 99093b39614..058935ef397 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
-unsigned long swioltb_nr_tbl(void)
+unsigned long swiotlb_nr_tbl(void)
{
return io_tlb_nslabs;
}
-
+EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
free_bootmem_late(__pa(io_tlb_start),
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
+ io_tlb_nslabs = 0;
}
static int is_swiotlb_buffer(phys_addr_t paddr)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 993599e66e5..8e75003d62f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -777,6 +777,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
+static
+char *netdev_feature_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec)
+{
+ spec.flags |= SPECIAL | SMALL | ZEROPAD;
+ if (spec.field_width == -1)
+ spec.field_width = 2 + 2 * sizeof(netdev_features_t);
+ spec.base = 16;
+
+ return number(buf, end, *(const netdev_features_t *)addr, spec);
+}
+
int kptr_restrict __read_mostly;
/*
@@ -824,6 +836,7 @@ int kptr_restrict __read_mostly;
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'NF' For a netdev_features_t
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -896,6 +909,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
has_capability_noaudit(current, CAP_SYSLOG))))
ptr = NULL;
break;
+ case 'N':
+ switch (fmt[1]) {
+ case 'F':
+ return netdev_feature_string(buf, end, ptr, spec);
+ }
+ break;
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 011b110365c..e338407f122 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -131,6 +131,12 @@ config SPARSEMEM_VMEMMAP
config HAVE_MEMBLOCK
boolean
+config HAVE_MEMBLOCK_NODE_MAP
+ boolean
+
+config ARCH_DISCARD_MEMBLOCK
+ boolean
+
config NO_BOOTMEM
boolean
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 8b1a477162d..4b2443254de 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
depends on !KMEMCHECK
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS
config PAGE_POISONING
bool
select WANT_PAGE_DEBUG_FLAGS
+
+config PAGE_GUARD
+ bool
+ select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 71034f41a2b..7ba8feae11b 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
/*
* Finally, kill the kernel thread. We don't need to be RCU
- * safe anymore, since the bdi is gone from visibility. Force
- * unfreeze of the thread before calling kthread_stop(), otherwise
- * it would never exet if it is currently stuck in the refrigerator.
+ * safe anymore, since the bdi is gone from visibility.
*/
- if (bdi->wb.task) {
- thaw_process(bdi->wb.task);
+ if (bdi->wb.task)
kthread_stop(bdi->wb.task);
- }
}
/*
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1a77012ecdb..668e94df8cf 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -56,7 +56,7 @@ early_param("bootmem_debug", bootmem_debug_setup);
static unsigned long __init bootmap_bytes(unsigned long pages)
{
- unsigned long bytes = (pages + 7) / 8;
+ unsigned long bytes = DIV_ROUND_UP(pages, 8);
return ALIGN(bytes, sizeof(long));
}
@@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
- int aligned;
struct page *page;
unsigned long start, end, pages, count = 0;
@@ -181,14 +180,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
start = bdata->node_min_pfn;
end = bdata->node_low_pfn;
- /*
- * If the start is aligned to the machines wordsize, we might
- * be able to free pages in bulks of that order.
- */
- aligned = !(start & (BITS_PER_LONG - 1));
-
- bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
- bdata - bootmem_node_data, start, end, aligned);
+ bdebug("nid=%td start=%lx end=%lx\n",
+ bdata - bootmem_node_data, start, end);
while (start < end) {
unsigned long *map, idx, vec;
@@ -196,12 +189,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
vec = ~map[idx / BITS_PER_LONG];
-
- if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
+ /*
+ * If we have a properly aligned and fully unreserved
+ * BITS_PER_LONG block of pages in front of us, free
+ * it in one go.
+ */
+ if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
int order = ilog2(BITS_PER_LONG);
__free_pages_bootmem(pfn_to_page(start), order);
count += BITS_PER_LONG;
+ start += BITS_PER_LONG;
} else {
unsigned long off = 0;
@@ -214,8 +212,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
vec >>= 1;
off++;
}
+ start = ALIGN(start + 1, BITS_PER_LONG);
}
- start += BITS_PER_LONG;
}
page = virt_to_page(bdata->node_bootmem_map);
diff --git a/mm/compaction.c b/mm/compaction.c
index 899d9563858..e6670c34eb4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -365,8 +365,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
nr_isolated++;
/* Avoid isolating too much */
- if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+ ++low_pfn;
break;
+ }
}
acct_isolated(zone, cc);
@@ -721,23 +723,23 @@ int sysctl_extfrag_handler(struct ctl_table *table, int write,
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
-ssize_t sysfs_compact_node(struct sys_device *dev,
- struct sysdev_attribute *attr,
+ssize_t sysfs_compact_node(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
compact_node(dev->id);
return count;
}
-static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
+static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
int compaction_register_node(struct node *node)
{
- return sysdev_create_file(&node->sysdev, &attr_compact);
+ return device_create_file(&node->dev, &dev_attr_compact);
}
void compaction_unregister_node(struct node *node)
{
- return sysdev_remove_file(&node->sysdev, &attr_compact);
+ return device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 8d723c9e8b7..469491e0af7 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -117,7 +117,8 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
break;
case POSIX_FADV_DONTNEED:
if (!bdi_write_congested(mapping->backing_dev_info))
- filemap_flush(mapping);
+ __filemap_fdatawrite_range(mapping, offset, endbyte,
+ WB_SYNC_NONE);
/* First and last FULL page! */
start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
diff --git a/mm/failslab.c b/mm/failslab.c
index 0dd7b8fec71..fefaabaab76 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -35,7 +35,7 @@ __setup("failslab=", setup_failslab);
static int __init failslab_debugfs_init(void)
{
struct dentry *dir;
- mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
if (IS_ERR(dir))
diff --git a/mm/filemap.c b/mm/filemap.c
index c0018f2d50e..c4ee2e918be 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1828,7 +1828,7 @@ repeat:
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
- err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+ err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
@@ -1971,7 +1968,7 @@ EXPORT_SYMBOL(read_cache_page);
*/
int should_remove_suid(struct dentry *dentry)
{
- mode_t mode = dentry->d_inode->i_mode;
+ umode_t mode = dentry->d_inode->i_mode;
int kill = 0;
/* suid always must be killed */
@@ -2354,8 +2351,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
int status;
+ gfp_t gfp_mask;
struct page *page;
gfp_t gfp_notmask = 0;
+
+ gfp_mask = mapping_gfp_mask(mapping) | __GFP_WRITE;
if (flags & AOP_FLAG_NOFS)
gfp_notmask = __GFP_FS;
repeat:
@@ -2363,7 +2363,7 @@ repeat:
if (page)
goto found;
- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
+ page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
if (!page)
return NULL;
status = add_to_page_cache_lru(page, mapping, index,
@@ -2407,7 +2407,6 @@ static ssize_t generic_perform_write(struct file *file,
iov_iter_count(i));
again:
-
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2462,10 @@ again:
written += copied;
balance_dirty_pages_ratelimited(mapping);
-
+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
} while (iov_iter_count(i));
return written ? written : status;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4298abaae15..36b3d988b4e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
- DEFINE_WAIT(wait);
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_alloc_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
- DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_scan_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb28a5f9db8..ea8c3a4cd2a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
@@ -799,7 +800,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
if (page && arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
- return NULL;
+ page = NULL;
}
spin_lock(&hugetlb_lock);
@@ -900,7 +901,6 @@ retry:
h->resv_huge_pages += delta;
ret = 0;
- spin_unlock(&hugetlb_lock);
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
@@ -914,6 +914,7 @@ retry:
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
+ spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
free:
@@ -1591,9 +1592,9 @@ static void __init hugetlb_sysfs_init(void)
/*
* node_hstate/s - associate per node hstate attributes, via their kobjects,
- * with node sysdevs in node_devices[] using a parallel array. The array
- * index of a node sysdev or _hstate == node id.
- * This is here to avoid any static dependency of the node sysdev driver, in
+ * with node devices in node_devices[] using a parallel array. The array
+ * index of a node device or _hstate == node id.
+ * This is here to avoid any static dependency of the node device driver, in
* the base kernel, on the hugetlb module.
*/
struct node_hstate {
@@ -1603,7 +1604,7 @@ struct node_hstate {
struct node_hstate node_hstates[MAX_NUMNODES];
/*
- * A subset of global hstate attributes for node sysdevs
+ * A subset of global hstate attributes for node devices
*/
static struct attribute *per_node_hstate_attrs[] = {
&nr_hugepages_attr.attr,
@@ -1617,7 +1618,7 @@ static struct attribute_group per_node_hstate_attr_group = {
};
/*
- * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
* Returns node id via non-NULL nidp.
*/
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
@@ -1640,13 +1641,13 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
}
/*
- * Unregister hstate attributes from a single node sysdev.
+ * Unregister hstate attributes from a single node device.
* No-op if no hstate attributes attached.
*/
void hugetlb_unregister_node(struct node *node)
{
struct hstate *h;
- struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ struct node_hstate *nhs = &node_hstates[node->dev.id];
if (!nhs->hugepages_kobj)
return; /* no hstate attributes */
@@ -1662,7 +1663,7 @@ void hugetlb_unregister_node(struct node *node)
}
/*
- * hugetlb module exit: unregister hstate attributes from node sysdevs
+ * hugetlb module exit: unregister hstate attributes from node devices
* that have them.
*/
static void hugetlb_unregister_all_nodes(void)
@@ -1670,7 +1671,7 @@ static void hugetlb_unregister_all_nodes(void)
int nid;
/*
- * disable node sysdev registrations.
+ * disable node device registrations.
*/
register_hugetlbfs_with_node(NULL, NULL);
@@ -1682,20 +1683,20 @@ static void hugetlb_unregister_all_nodes(void)
}
/*
- * Register hstate attributes for a single node sysdev.
+ * Register hstate attributes for a single node device.
* No-op if attributes already registered.
*/
void hugetlb_register_node(struct node *node)
{
struct hstate *h;
- struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ struct node_hstate *nhs = &node_hstates[node->dev.id];
int err;
if (nhs->hugepages_kobj)
return; /* already allocated */
nhs->hugepages_kobj = kobject_create_and_add("hugepages",
- &node->sysdev.kobj);
+ &node->dev.kobj);
if (!nhs->hugepages_kobj)
return;
@@ -1706,7 +1707,7 @@ void hugetlb_register_node(struct node *node)
if (err) {
printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
" for node %d\n",
- h->name, node->sysdev.id);
+ h->name, node->dev.id);
hugetlb_unregister_node(node);
break;
}
@@ -1715,8 +1716,8 @@ void hugetlb_register_node(struct node *node)
/*
* hugetlb init time: register hstate attributes for all registered node
- * sysdevs of nodes that have memory. All on-line nodes should have
- * registered their associated sysdev by this time.
+ * devices of nodes that have memory. All on-line nodes should have
+ * registered their associated device by this time.
*/
static void hugetlb_register_all_nodes(void)
{
@@ -1724,12 +1725,12 @@ static void hugetlb_register_all_nodes(void)
for_each_node_state(nid, N_HIGH_MEMORY) {
struct node *node = &node_devices[nid];
- if (node->sysdev.id == nid)
+ if (node->dev.id == nid)
hugetlb_register_node(node);
}
/*
- * Let the node sysdev driver know we're here so it can
+ * Let the node device driver know we're here so it can
* [un]register hstate attributes on node hotplug.
*/
register_hugetlbfs_with_node(hugetlb_register_node,
@@ -2314,8 +2315,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from page cache lookup which is in HPAGE_SIZE units.
*/
address = address & huge_page_mask(h);
- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
- + (vma->vm_pgoff >> PAGE_SHIFT);
+ pgoff = vma_hugecache_offset(h, vma, address);
mapping = (struct address_space *)page_private(page);
/*
@@ -2348,6 +2348,9 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
+ * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+ * cannot race with other handlers or page migration.
+ * Keep the pte_same checks anyway to make transition from the mutex easier.
*/
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
@@ -2407,7 +2410,14 @@ retry_avoidcopy:
BUG_ON(page_count(old_page) != 1);
BUG_ON(huge_pte_none(pte));
spin_lock(&mm->page_table_lock);
- goto retry_avoidcopy;
+ ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+ if (likely(pte_same(huge_ptep_get(ptep), pte)))
+ goto retry_avoidcopy;
+ /*
+ * race occurs while re-acquiring page_table_lock, and
+ * our job is done.
+ */
+ return 0;
}
WARN_ON_ONCE(1);
}
@@ -2629,6 +2639,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
+ address &= huge_page_mask(h);
+
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
diff --git a/mm/memblock.c b/mm/memblock.c
index 84bec4969ed..2f55f19b7c8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -20,12 +20,23 @@
#include <linux/seq_file.h>
#include <linux/memblock.h>
-struct memblock memblock __initdata_memblock;
+static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+
+struct memblock memblock __initdata_memblock = {
+ .memory.regions = memblock_memory_init_regions,
+ .memory.cnt = 1, /* empty dummy entry */
+ .memory.max = INIT_MEMBLOCK_REGIONS,
+
+ .reserved.regions = memblock_reserved_init_regions,
+ .reserved.cnt = 1, /* empty dummy entry */
+ .reserved.max = INIT_MEMBLOCK_REGIONS,
+
+ .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
+};
int memblock_debug __initdata_memblock;
-int memblock_can_resize __initdata_memblock;
-static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
+static int memblock_can_resize __initdata_memblock;
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char *memblock_type_name(struct memblock_type *type)
@@ -38,20 +49,15 @@ static inline const char *memblock_type_name(struct memblock_type *type)
return "unknown";
}
-/*
- * Address comparison utilities
- */
-
-static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
-{
- return addr & ~(size - 1);
-}
-
-static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
+/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
+static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
- return (addr + (size - 1)) & ~(size - 1);
+ return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}
+/*
+ * Address comparison utilities
+ */
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
phys_addr_t base2, phys_addr_t size2)
{
@@ -73,83 +79,66 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
return (i < type->cnt) ? i : -1;
}
-/*
- * Find, allocate, deallocate or reserve unreserved regions. All allocations
- * are top-down.
+/**
+ * memblock_find_in_range_node - find free area in given range and node
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ * @nid: nid of the free area to find, %MAX_NUMNODES for any node
+ *
+ * Find @size free area aligned to @align in the specified range and node.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
*/
-
-static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align)
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align, int nid)
{
- phys_addr_t base, res_base;
- long j;
-
- /* In case, huge size is requested */
- if (end < size)
- return MEMBLOCK_ERROR;
-
- base = memblock_align_down((end - size), align);
+ phys_addr_t this_start, this_end, cand;
+ u64 i;
- /* Prevent allocations returning 0 as it's also used to
- * indicate an allocation failure
- */
- if (start == 0)
- start = PAGE_SIZE;
-
- while (start <= base) {
- j = memblock_overlaps_region(&memblock.reserved, base, size);
- if (j < 0)
- return base;
- res_base = memblock.reserved.regions[j].base;
- if (res_base < size)
- break;
- base = memblock_align_down(res_base - size, align);
- }
+ /* align @size to avoid excessive fragmentation on reserved array */
+ size = round_up(size, align);
- return MEMBLOCK_ERROR;
-}
-
-static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
- phys_addr_t align, phys_addr_t start, phys_addr_t end)
-{
- long i;
-
- BUG_ON(0 == size);
-
- /* Pump up max_addr */
+ /* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
- /* We do a top-down search, this tends to limit memory
- * fragmentation by keeping early boot allocs near the
- * top of memory
- */
- for (i = memblock.memory.cnt - 1; i >= 0; i--) {
- phys_addr_t memblockbase = memblock.memory.regions[i].base;
- phys_addr_t memblocksize = memblock.memory.regions[i].size;
- phys_addr_t bottom, top, found;
+ /* adjust @start to avoid underflow and allocating the first page */
+ start = max3(start, size, (phys_addr_t)PAGE_SIZE);
+ end = max(start, end);
- if (memblocksize < size)
- continue;
- if ((memblockbase + memblocksize) <= start)
- break;
- bottom = max(memblockbase, start);
- top = min(memblockbase + memblocksize, end);
- if (bottom >= top)
- continue;
- found = memblock_find_region(bottom, top, size, align);
- if (found != MEMBLOCK_ERROR)
- return found;
+ for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
+
+ cand = round_down(this_end - size, align);
+ if (cand >= this_start)
+ return cand;
}
- return MEMBLOCK_ERROR;
+ return 0;
}
-/*
- * Find a free area with specified alignment in a specific range.
+/**
+ * memblock_find_in_range - find free area in given range
+ * @start: start of candidate range
+ * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
+ * @size: size of free area to find
+ * @align: alignment of free area to find
+ *
+ * Find @size free area aligned to @align in the specified range.
+ *
+ * RETURNS:
+ * Found address on success, %0 on failure.
*/
-u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
+phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align)
{
- return memblock_find_base(size, align, start, end);
+ return memblock_find_in_range_node(start, end, size, align,
+ MAX_NUMNODES);
}
/*
@@ -178,25 +167,21 @@ int __init_memblock memblock_reserve_reserved_regions(void)
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
{
- unsigned long i;
-
- for (i = r; i < type->cnt - 1; i++) {
- type->regions[i].base = type->regions[i + 1].base;
- type->regions[i].size = type->regions[i + 1].size;
- }
+ type->total_size -= type->regions[r].size;
+ memmove(&type->regions[r], &type->regions[r + 1],
+ (type->cnt - (r + 1)) * sizeof(type->regions[r]));
type->cnt--;
/* Special case for empty arrays */
if (type->cnt == 0) {
+ WARN_ON(type->total_size != 0);
type->cnt = 1;
type->regions[0].base = 0;
type->regions[0].size = 0;
+ memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
}
}
-/* Defined below but needed now */
-static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
-
static int __init_memblock memblock_double_array(struct memblock_type *type)
{
struct memblock_region *new_array, *old_array;
@@ -226,10 +211,10 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
*/
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
- addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
+ addr = new_array ? __pa(new_array) : 0;
} else
- addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
- if (addr == MEMBLOCK_ERROR) {
+ addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+ if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
memblock_type_name(type), type->max, type->max * 2);
return -1;
@@ -254,7 +239,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
return 0;
/* Add the new reserved region now. Should not fail ! */
- BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
+ BUG_ON(memblock_reserve(addr, new_size));
/* If the array wasn't our static init one, then free it. We only do
* that before SLAB is available as later on, we don't know whether
@@ -268,343 +253,514 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
return 0;
}
-int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
- phys_addr_t addr2, phys_addr_t size2)
-{
- return 1;
-}
-
-static long __init_memblock memblock_add_region(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size)
+/**
+ * memblock_merge_regions - merge neighboring compatible regions
+ * @type: memblock type to scan
+ *
+ * Scan @type and merge neighboring compatible regions.
+ */
+static void __init_memblock memblock_merge_regions(struct memblock_type *type)
{
- phys_addr_t end = base + size;
- int i, slot = -1;
-
- /* First try and coalesce this MEMBLOCK with others */
- for (i = 0; i < type->cnt; i++) {
- struct memblock_region *rgn = &type->regions[i];
- phys_addr_t rend = rgn->base + rgn->size;
+ int i = 0;
- /* Exit if there's no possible hits */
- if (rgn->base > end || rgn->size == 0)
- break;
+ /* cnt never goes below 1 */
+ while (i < type->cnt - 1) {
+ struct memblock_region *this = &type->regions[i];
+ struct memblock_region *next = &type->regions[i + 1];
- /* Check if we are fully enclosed within an existing
- * block
- */
- if (rgn->base <= base && rend >= end)
- return 0;
+ if (this->base + this->size != next->base ||
+ memblock_get_region_node(this) !=
+ memblock_get_region_node(next)) {
+ BUG_ON(this->base + this->size > next->base);
+ i++;
+ continue;
+ }
- /* Check if we overlap or are adjacent with the bottom
- * of a block.
- */
- if (base < rgn->base && end >= rgn->base) {
- /* If we can't coalesce, create a new block */
- if (!memblock_memory_can_coalesce(base, size,
- rgn->base,
- rgn->size)) {
- /* Overlap & can't coalesce are mutually
- * exclusive, if you do that, be prepared
- * for trouble
- */
- WARN_ON(end != rgn->base);
- goto new_block;
- }
- /* We extend the bottom of the block down to our
- * base
- */
- rgn->base = base;
- rgn->size = rend - base;
+ this->size += next->size;
+ memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+ type->cnt--;
+ }
+}
- /* Return if we have nothing else to allocate
- * (fully coalesced)
- */
- if (rend >= end)
- return 0;
+/**
+ * memblock_insert_region - insert new memblock region
+ * @type: memblock type to insert into
+ * @idx: index for the insertion point
+ * @base: base address of the new region
+ * @size: size of the new region
+ *
+ * Insert new memblock region [@base,@base+@size) into @type at @idx.
+ * @type must already have extra room to accomodate the new region.
+ */
+static void __init_memblock memblock_insert_region(struct memblock_type *type,
+ int idx, phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ struct memblock_region *rgn = &type->regions[idx];
- /* We continue processing from the end of the
- * coalesced block.
- */
- base = rend;
- size = end - base;
- }
+ BUG_ON(type->cnt >= type->max);
+ memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
+ rgn->base = base;
+ rgn->size = size;
+ memblock_set_region_node(rgn, nid);
+ type->cnt++;
+ type->total_size += size;
+}
- /* Now check if we overlap or are adjacent with the
- * top of a block
- */
- if (base <= rend && end >= rend) {
- /* If we can't coalesce, create a new block */
- if (!memblock_memory_can_coalesce(rgn->base,
- rgn->size,
- base, size)) {
- /* Overlap & can't coalesce are mutually
- * exclusive, if you do that, be prepared
- * for trouble
- */
- WARN_ON(rend != base);
- goto new_block;
- }
- /* We adjust our base down to enclose the
- * original block and destroy it. It will be
- * part of our new allocation. Since we've
- * freed an entry, we know we won't fail
- * to allocate one later, so we won't risk
- * losing the original block allocation.
- */
- size += (base - rgn->base);
- base = rgn->base;
- memblock_remove_region(type, i--);
- }
- }
+/**
+ * memblock_add_region - add new memblock region
+ * @type: memblock type to add new region into
+ * @base: base address of the new region
+ * @size: size of the new region
+ * @nid: nid of the new region
+ *
+ * Add new memblock region [@base,@base+@size) into @type. The new region
+ * is allowed to overlap with existing ones - overlaps don't affect already
+ * existing regions. @type is guaranteed to be minimal (all neighbouring
+ * compatible regions are merged) after the addition.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_add_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size, int nid)
+{
+ bool insert = false;
+ phys_addr_t obase = base;
+ phys_addr_t end = base + memblock_cap_size(base, &size);
+ int i, nr_new;
- /* If the array is empty, special case, replace the fake
- * filler region and return
- */
- if ((type->cnt == 1) && (type->regions[0].size == 0)) {
+ /* special case for empty array */
+ if (type->regions[0].size == 0) {
+ WARN_ON(type->cnt != 1 || type->total_size);
type->regions[0].base = base;
type->regions[0].size = size;
+ memblock_set_region_node(&type->regions[0], nid);
+ type->total_size = size;
return 0;
}
-
- new_block:
- /* If we are out of space, we fail. It's too late to resize the array
- * but then this shouldn't have happened in the first place.
+repeat:
+ /*
+ * The following is executed twice. Once with %false @insert and
+ * then with %true. The first counts the number of regions needed
+ * to accomodate the new area. The second actually inserts them.
*/
- if (WARN_ON(type->cnt >= type->max))
- return -1;
+ base = obase;
+ nr_new = 0;
- /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
- for (i = type->cnt - 1; i >= 0; i--) {
- if (base < type->regions[i].base) {
- type->regions[i+1].base = type->regions[i].base;
- type->regions[i+1].size = type->regions[i].size;
- } else {
- type->regions[i+1].base = base;
- type->regions[i+1].size = size;
- slot = i + 1;
+ for (i = 0; i < type->cnt; i++) {
+ struct memblock_region *rgn = &type->regions[i];
+ phys_addr_t rbase = rgn->base;
+ phys_addr_t rend = rbase + rgn->size;
+
+ if (rbase >= end)
break;
+ if (rend <= base)
+ continue;
+ /*
+ * @rgn overlaps. If it separates the lower part of new
+ * area, insert that portion.
+ */
+ if (rbase > base) {
+ nr_new++;
+ if (insert)
+ memblock_insert_region(type, i++, base,
+ rbase - base, nid);
}
+ /* area below @rend is dealt with, forget about it */
+ base = min(rend, end);
}
- if (base < type->regions[0].base) {
- type->regions[0].base = base;
- type->regions[0].size = size;
- slot = 0;
+
+ /* insert the remaining portion */
+ if (base < end) {
+ nr_new++;
+ if (insert)
+ memblock_insert_region(type, i, base, end - base, nid);
}
- type->cnt++;
- /* The array is full ? Try to resize it. If that fails, we undo
- * our allocation and return an error
+ /*
+ * If this was the first round, resize array and repeat for actual
+ * insertions; otherwise, merge and return.
*/
- if (type->cnt == type->max && memblock_double_array(type)) {
- BUG_ON(slot < 0);
- memblock_remove_region(type, slot);
- return -1;
+ if (!insert) {
+ while (type->cnt + nr_new > type->max)
+ if (memblock_double_array(type) < 0)
+ return -ENOMEM;
+ insert = true;
+ goto repeat;
+ } else {
+ memblock_merge_regions(type);
+ return 0;
}
-
- return 0;
}
-long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
+ int nid)
{
- return memblock_add_region(&memblock.memory, base, size);
+ return memblock_add_region(&memblock.memory, base, size, nid);
+}
+int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
}
-static long __init_memblock __memblock_remove(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size)
+/**
+ * memblock_isolate_range - isolate given range into disjoint memblocks
+ * @type: memblock type to isolate range for
+ * @base: base of range to isolate
+ * @size: size of range to isolate
+ * @start_rgn: out parameter for the start of isolated region
+ * @end_rgn: out parameter for the end of isolated region
+ *
+ * Walk @type and ensure that regions don't cross the boundaries defined by
+ * [@base,@base+@size). Crossing regions are split at the boundaries,
+ * which may create at most two more regions. The index of the first
+ * region inside the range is returned in *@start_rgn and end in *@end_rgn.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int __init_memblock memblock_isolate_range(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size,
+ int *start_rgn, int *end_rgn)
{
- phys_addr_t end = base + size;
+ phys_addr_t end = base + memblock_cap_size(base, &size);
int i;
- /* Walk through the array for collisions */
+ *start_rgn = *end_rgn = 0;
+
+ /* we'll create at most two more regions */
+ while (type->cnt + 2 > type->max)
+ if (memblock_double_array(type) < 0)
+ return -ENOMEM;
+
for (i = 0; i < type->cnt; i++) {
struct memblock_region *rgn = &type->regions[i];
- phys_addr_t rend = rgn->base + rgn->size;
+ phys_addr_t rbase = rgn->base;
+ phys_addr_t rend = rbase + rgn->size;
- /* Nothing more to do, exit */
- if (rgn->base > end || rgn->size == 0)
+ if (rbase >= end)
break;
-
- /* If we fully enclose the block, drop it */
- if (base <= rgn->base && end >= rend) {
- memblock_remove_region(type, i--);
+ if (rend <= base)
continue;
- }
- /* If we are fully enclosed within a block
- * then we need to split it and we are done
- */
- if (base > rgn->base && end < rend) {
- rgn->size = base - rgn->base;
- if (!memblock_add_region(type, end, rend - end))
- return 0;
- /* Failure to split is bad, we at least
- * restore the block before erroring
+ if (rbase < base) {
+ /*
+ * @rgn intersects from below. Split and continue
+ * to process the next region - the new top half.
+ */
+ rgn->base = base;
+ rgn->size -= base - rbase;
+ type->total_size -= base - rbase;
+ memblock_insert_region(type, i, rbase, base - rbase,
+ memblock_get_region_node(rgn));
+ } else if (rend > end) {
+ /*
+ * @rgn intersects from above. Split and redo the
+ * current region - the new bottom half.
*/
- rgn->size = rend - rgn->base;
- WARN_ON(1);
- return -1;
- }
-
- /* Check if we need to trim the bottom of a block */
- if (rgn->base < end && rend > end) {
- rgn->size -= end - rgn->base;
rgn->base = end;
- break;
+ rgn->size -= end - rbase;
+ type->total_size -= end - rbase;
+ memblock_insert_region(type, i--, rbase, end - rbase,
+ memblock_get_region_node(rgn));
+ } else {
+ /* @rgn is fully contained, record it */
+ if (!*end_rgn)
+ *start_rgn = i;
+ *end_rgn = i + 1;
}
+ }
- /* And check if we need to trim the top of a block */
- if (base < rend)
- rgn->size -= rend - base;
+ return 0;
+}
- }
+static int __init_memblock __memblock_remove(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size)
+{
+ int start_rgn, end_rgn;
+ int i, ret;
+
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
+
+ for (i = end_rgn - 1; i >= start_rgn; i--)
+ memblock_remove_region(type, i);
return 0;
}
-long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
{
return __memblock_remove(&memblock.memory, base, size);
}
-long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
{
+ memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
+ (unsigned long long)base,
+ (unsigned long long)base + size,
+ (void *)_RET_IP_);
+
return __memblock_remove(&memblock.reserved, base, size);
}
-long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
+int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
struct memblock_type *_rgn = &memblock.reserved;
+ memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
+ (unsigned long long)base,
+ (unsigned long long)base + size,
+ (void *)_RET_IP_);
BUG_ON(0 == size);
- return memblock_add_region(_rgn, base, size);
+ return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
}
-phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+/**
+ * __next_free_mem_range - next function for for_each_free_mem_range()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Find the first free area from *@idx which matches @nid, fill the out
+ * parameters, and update *@idx for the next iteration. The lower 32bit of
+ * *@idx contains index into memory region and the upper 32bit indexes the
+ * areas before each reserved region. For example, if reserved regions
+ * look like the following,
+ *
+ * 0:[0-16), 1:[32-48), 2:[128-130)
+ *
+ * The upper 32bit indexes the following regions.
+ *
+ * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
+ *
+ * As both region arrays are sorted, the function advances the two indices
+ * in lockstep and returns each intersection.
+ */
+void __init_memblock __next_free_mem_range(u64 *idx, int nid,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid)
{
- phys_addr_t found;
+ struct memblock_type *mem = &memblock.memory;
+ struct memblock_type *rsv = &memblock.reserved;
+ int mi = *idx & 0xffffffff;
+ int ri = *idx >> 32;
- /* We align the size to limit fragmentation. Without this, a lot of
- * small allocs quickly eat up the whole reserve array on sparc
- */
- size = memblock_align_up(size, align);
+ for ( ; mi < mem->cnt; mi++) {
+ struct memblock_region *m = &mem->regions[mi];
+ phys_addr_t m_start = m->base;
+ phys_addr_t m_end = m->base + m->size;
- found = memblock_find_base(size, align, 0, max_addr);
- if (found != MEMBLOCK_ERROR &&
- !memblock_add_region(&memblock.reserved, found, size))
- return found;
+ /* only memory regions are associated with nodes, check it */
+ if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ continue;
- return 0;
+ /* scan areas before each reservation for intersection */
+ for ( ; ri < rsv->cnt + 1; ri++) {
+ struct memblock_region *r = &rsv->regions[ri];
+ phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+ phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+ /* if ri advanced past mi, break out to advance mi */
+ if (r_start >= m_end)
+ break;
+ /* if the two regions intersect, we're done */
+ if (m_start < r_end) {
+ if (out_start)
+ *out_start = max(m_start, r_start);
+ if (out_end)
+ *out_end = min(m_end, r_end);
+ if (out_nid)
+ *out_nid = memblock_get_region_node(m);
+ /*
+ * The region which ends first is advanced
+ * for the next iteration.
+ */
+ if (m_end <= r_end)
+ mi++;
+ else
+ ri++;
+ *idx = (u32)mi | (u64)ri << 32;
+ return;
+ }
+ }
+ }
+
+ /* signal end of iteration */
+ *idx = ULLONG_MAX;
}
-phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+/**
+ * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
+ * @idx: pointer to u64 loop variable
+ * @nid: nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Reverse of __next_free_mem_range().
+ */
+void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid)
{
- phys_addr_t alloc;
+ struct memblock_type *mem = &memblock.memory;
+ struct memblock_type *rsv = &memblock.reserved;
+ int mi = *idx & 0xffffffff;
+ int ri = *idx >> 32;
- alloc = __memblock_alloc_base(size, align, max_addr);
+ if (*idx == (u64)ULLONG_MAX) {
+ mi = mem->cnt - 1;
+ ri = rsv->cnt;
+ }
- if (alloc == 0)
- panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
- (unsigned long long) size, (unsigned long long) max_addr);
+ for ( ; mi >= 0; mi--) {
+ struct memblock_region *m = &mem->regions[mi];
+ phys_addr_t m_start = m->base;
+ phys_addr_t m_end = m->base + m->size;
- return alloc;
-}
+ /* only memory regions are associated with nodes, check it */
+ if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
+ continue;
-phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
-}
+ /* scan areas before each reservation for intersection */
+ for ( ; ri >= 0; ri--) {
+ struct memblock_region *r = &rsv->regions[ri];
+ phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
+ phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
+
+ /* if ri advanced past mi, break out to advance mi */
+ if (r_end <= m_start)
+ break;
+ /* if the two regions intersect, we're done */
+ if (m_end > r_start) {
+ if (out_start)
+ *out_start = max(m_start, r_start);
+ if (out_end)
+ *out_end = min(m_end, r_end);
+ if (out_nid)
+ *out_nid = memblock_get_region_node(m);
+
+ if (m_start >= r_start)
+ mi--;
+ else
+ ri--;
+ *idx = (u32)mi | (u64)ri << 32;
+ return;
+ }
+ }
+ }
+ *idx = ULLONG_MAX;
+}
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * Additional node-local allocators. Search for node memory is bottom up
- * and walks memblock regions within that node bottom-up as well, but allocation
- * within an memblock region is top-down. XXX I plan to fix that at some stage
- *
- * WARNING: Only available after early_node_map[] has been populated,
- * on some architectures, that is after all the calls to add_active_range()
- * have been done to populate it.
+ * Common iterator interface used to define for_each_mem_range().
*/
-
-phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
+void __init_memblock __next_mem_pfn_range(int *idx, int nid,
+ unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid)
{
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
- /*
- * This code originates from sparc which really wants use to walk by addresses
- * and returns the nid. This is not very convenient for early_pfn_map[] users
- * as the map isn't sorted yet, and it really wants to be walked by nid.
- *
- * For now, I implement the inefficient method below which walks the early
- * map multiple times. Eventually we may want to use an ARCH config option
- * to implement a completely different method for both case.
- */
- unsigned long start_pfn, end_pfn;
- int i;
+ struct memblock_type *type = &memblock.memory;
+ struct memblock_region *r;
- for (i = 0; i < MAX_NUMNODES; i++) {
- get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
- if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
+ while (++*idx < type->cnt) {
+ r = &type->regions[*idx];
+
+ if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
continue;
- *nid = i;
- return min(end, PFN_PHYS(end_pfn));
+ if (nid == MAX_NUMNODES || nid == r->nid)
+ break;
+ }
+ if (*idx >= type->cnt) {
+ *idx = -1;
+ return;
}
-#endif
- *nid = 0;
- return end;
+ if (out_start_pfn)
+ *out_start_pfn = PFN_UP(r->base);
+ if (out_end_pfn)
+ *out_end_pfn = PFN_DOWN(r->base + r->size);
+ if (out_nid)
+ *out_nid = r->nid;
}
-static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
- phys_addr_t size,
- phys_addr_t align, int nid)
+/**
+ * memblock_set_node - set node ID on memblock regions
+ * @base: base of area to set node ID for
+ * @size: size of area to set node ID for
+ * @nid: node ID to set
+ *
+ * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
+ * Regions which cross the area boundaries are split as necessary.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
+ int nid)
{
- phys_addr_t start, end;
+ struct memblock_type *type = &memblock.memory;
+ int start_rgn, end_rgn;
+ int i, ret;
- start = mp->base;
- end = start + mp->size;
+ ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
+ if (ret)
+ return ret;
- start = memblock_align_up(start, align);
- while (start < end) {
- phys_addr_t this_end;
- int this_nid;
+ for (i = start_rgn; i < end_rgn; i++)
+ type->regions[i].nid = nid;
- this_end = memblock_nid_range(start, end, &this_nid);
- if (this_nid == nid) {
- phys_addr_t ret = memblock_find_region(start, this_end, size, align);
- if (ret != MEMBLOCK_ERROR &&
- !memblock_add_region(&memblock.reserved, ret, size))
- return ret;
- }
- start = this_end;
- }
+ memblock_merge_regions(type);
+ return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t max_addr,
+ int nid)
+{
+ phys_addr_t found;
- return MEMBLOCK_ERROR;
+ found = memblock_find_in_range_node(0, max_addr, size, align, nid);
+ if (found && !memblock_reserve(found, size))
+ return found;
+
+ return 0;
}
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- struct memblock_type *mem = &memblock.memory;
- int i;
+ return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
- BUG_ON(0 == size);
+phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
+}
- /* We align the size to limit fragmentation. Without this, a lot of
- * small allocs quickly eat up the whole reserve array on sparc
- */
- size = memblock_align_up(size, align);
+phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ phys_addr_t alloc;
- /* We do a bottom-up search for a region with the right
- * nid since that's easier considering how memblock_nid_range()
- * works
- */
- for (i = 0; i < mem->cnt; i++) {
- phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
- size, align, nid);
- if (ret != MEMBLOCK_ERROR)
- return ret;
- }
+ alloc = __memblock_alloc_base(size, align, max_addr);
- return 0;
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+ (unsigned long long) size, (unsigned long long) max_addr);
+
+ return alloc;
+}
+
+phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
@@ -613,7 +769,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
if (res)
return res;
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+ return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
@@ -621,10 +777,9 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* Remaining API functions
*/
-/* You must call memblock_analyze() before this. */
phys_addr_t __init memblock_phys_mem_size(void)
{
- return memblock.memory_size;
+ return memblock.memory.total_size;
}
/* lowest address */
@@ -640,45 +795,28 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void)
return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
}
-/* You must call memblock_analyze() after this. */
-void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
+void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
unsigned long i;
- phys_addr_t limit;
- struct memblock_region *p;
+ phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
- if (!memory_limit)
+ if (!limit)
return;
- /* Truncate the memblock regions to satisfy the memory limit. */
- limit = memory_limit;
+ /* find out max address */
for (i = 0; i < memblock.memory.cnt; i++) {
- if (limit > memblock.memory.regions[i].size) {
- limit -= memblock.memory.regions[i].size;
- continue;
- }
-
- memblock.memory.regions[i].size = limit;
- memblock.memory.cnt = i + 1;
- break;
- }
-
- memory_limit = memblock_end_of_DRAM();
+ struct memblock_region *r = &memblock.memory.regions[i];
- /* And truncate any reserves above the limit also. */
- for (i = 0; i < memblock.reserved.cnt; i++) {
- p = &memblock.reserved.regions[i];
-
- if (p->base > memory_limit)
- p->size = 0;
- else if ((p->base + p->size) > memory_limit)
- p->size = memory_limit - p->base;
-
- if (p->size == 0) {
- memblock_remove_region(&memblock.reserved, i);
- i--;
+ if (limit <= r->size) {
+ max_addr = r->base + limit;
+ break;
}
+ limit -= r->size;
}
+
+ /* truncate both memory and reserved regions */
+ __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
+ __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
}
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
@@ -712,16 +850,18 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
{
int idx = memblock_search(&memblock.memory, base);
+ phys_addr_t end = base + memblock_cap_size(base, &size);
if (idx == -1)
return 0;
return memblock.memory.regions[idx].base <= base &&
(memblock.memory.regions[idx].base +
- memblock.memory.regions[idx].size) >= (base + size);
+ memblock.memory.regions[idx].size) >= end;
}
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{
+ memblock_cap_size(base, &size);
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
@@ -731,86 +871,45 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
memblock.current_limit = limit;
}
-static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
+static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
{
unsigned long long base, size;
int i;
- pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
+ pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
- for (i = 0; i < region->cnt; i++) {
- base = region->regions[i].base;
- size = region->regions[i].size;
-
- pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
- name, i, base, base + size - 1, size);
+ for (i = 0; i < type->cnt; i++) {
+ struct memblock_region *rgn = &type->regions[i];
+ char nid_buf[32] = "";
+
+ base = rgn->base;
+ size = rgn->size;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ if (memblock_get_region_node(rgn) != MAX_NUMNODES)
+ snprintf(nid_buf, sizeof(nid_buf), " on node %d",
+ memblock_get_region_node(rgn));
+#endif
+ pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
+ name, i, base, base + size - 1, size, nid_buf);
}
}
-void __init_memblock memblock_dump_all(void)
+void __init_memblock __memblock_dump_all(void)
{
- if (!memblock_debug)
- return;
-
pr_info("MEMBLOCK configuration:\n");
- pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
+ pr_info(" memory size = %#llx reserved size = %#llx\n",
+ (unsigned long long)memblock.memory.total_size,
+ (unsigned long long)memblock.reserved.total_size);
memblock_dump(&memblock.memory, "memory");
memblock_dump(&memblock.reserved, "reserved");
}
-void __init memblock_analyze(void)
+void __init memblock_allow_resize(void)
{
- int i;
-
- /* Check marker in the unused last array entry */
- WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
- != MEMBLOCK_INACTIVE);
- WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
- != MEMBLOCK_INACTIVE);
-
- memblock.memory_size = 0;
-
- for (i = 0; i < memblock.memory.cnt; i++)
- memblock.memory_size += memblock.memory.regions[i].size;
-
- /* We allow resizing from there */
memblock_can_resize = 1;
}
-void __init memblock_init(void)
-{
- static int init_done __initdata = 0;
-
- if (init_done)
- return;
- init_done = 1;
-
- /* Hookup the initial arrays */
- memblock.memory.regions = memblock_memory_init_regions;
- memblock.memory.max = INIT_MEMBLOCK_REGIONS;
- memblock.reserved.regions = memblock_reserved_init_regions;
- memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
-
- /* Write a marker in the unused last array entry */
- memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
- memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
-
- /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
- * This simplifies the memblock_add() code below...
- */
- memblock.memory.regions[0].base = 0;
- memblock.memory.regions[0].size = 0;
- memblock.memory.cnt = 1;
-
- /* Ditto. */
- memblock.reserved.regions[0].base = 0;
- memblock.reserved.regions[0].size = 0;
- memblock.reserved.cnt = 1;
-
- memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
-}
-
static int __init early_memblock(char *p)
{
if (p && strstr(p, "debug"))
@@ -819,7 +918,7 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
-#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
static int memblock_debug_show(struct seq_file *m, void *private)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6aff93c98ac..d87aa3510c5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -50,6 +50,8 @@
#include <linux/cpu.h>
#include <linux/oom.h>
#include "internal.h"
+#include <net/sock.h>
+#include <net/tcp_memcontrol.h>
#include <asm/uaccess.h>
@@ -286,6 +288,10 @@ struct mem_cgroup {
*/
struct mem_cgroup_stat_cpu nocpu_base;
spinlock_t pcp_counter_lock;
+
+#ifdef CONFIG_INET
+ struct tcp_memcontrol tcp_mem;
+#endif
};
/* Stuffs for move charges at task migration. */
@@ -365,7 +371,67 @@ enum charge_type {
static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+
+/* Writing them here to avoid exposing memcg's inner layout */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_INET
+#include <net/sock.h>
+#include <net/ip.h>
+
+static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
+void sock_update_memcg(struct sock *sk)
+{
+ if (static_branch(&memcg_socket_limit_enabled)) {
+ struct mem_cgroup *memcg;
+
+ BUG_ON(!sk->sk_prot->proto_cgroup);
+
+ /* Socket cloning can throw us here with sk_cgrp already
+ * filled. It won't however, necessarily happen from
+ * process context. So the test for root memcg given
+ * the current task's memcg won't help us in this case.
+ *
+ * Respecting the original socket's memcg is a better
+ * decision in this case.
+ */
+ if (sk->sk_cgrp) {
+ BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
+ mem_cgroup_get(sk->sk_cgrp->memcg);
+ return;
+ }
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(current);
+ if (!mem_cgroup_is_root(memcg)) {
+ mem_cgroup_get(memcg);
+ sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+ }
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+ if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+ struct mem_cgroup *memcg;
+ WARN_ON(!sk->sk_cgrp->memcg);
+ memcg = sk->sk_cgrp->memcg;
+ mem_cgroup_put(memcg);
+ }
+}
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
+{
+ if (!memcg || mem_cgroup_is_root(memcg))
+ return NULL;
+
+ return &memcg->tcp_mem.cg_proto;
+}
+EXPORT_SYMBOL(tcp_proto_cgroup);
+#endif /* CONFIG_INET */
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+
static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone *
@@ -745,7 +811,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
preempt_enable();
}
-static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{
return container_of(cgroup_subsys_state(cont,
mem_cgroup_subsys_id), struct mem_cgroup,
@@ -4612,6 +4678,36 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
}
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+ /*
+ * Part of this would be better living in a separate allocation
+ * function, leaving us with just the cgroup tree population work.
+ * We, however, depend on state such as network's proto_list that
+ * is only initialized after cgroup creation. I found the less
+ * cumbersome way to deal with it to defer it all to populate time
+ */
+ return mem_cgroup_sockets_init(cont, ss);
+};
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ mem_cgroup_sockets_destroy(cont, ss);
+}
+#else
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+ return 0;
+}
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+}
+#endif
+
static struct cftype mem_cgroup_files[] = {
{
.name = "usage_in_bytes",
@@ -4843,12 +4939,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg)
/*
* Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
*/
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
if (!memcg->res.parent)
return NULL;
return mem_cgroup_from_res_counter(memcg->res.parent, res);
}
+EXPORT_SYMBOL(parent_mem_cgroup);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
@@ -4907,9 +5004,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
int cpu;
enable_swap_cgroup();
parent = NULL;
- root_mem_cgroup = memcg;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock =
&per_cpu(memcg_stock, cpu);
@@ -4948,7 +5045,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return &memcg->css;
free_out:
__mem_cgroup_free(memcg);
- root_mem_cgroup = NULL;
return ERR_PTR(error);
}
@@ -4965,6 +5061,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ kmem_cgroup_destroy(ss, cont);
+
mem_cgroup_put(memcg);
}
@@ -4978,6 +5076,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
if (!ret)
ret = register_memsw_files(cont, ss);
+
+ if (!ret)
+ ret = register_kmem_files(cont, ss);
+
return ret;
}
@@ -5298,8 +5400,9 @@ static void mem_cgroup_clear_mc(void)
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
+ struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
@@ -5337,7 +5440,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
}
@@ -5454,9 +5557,9 @@ retry:
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
+ struct task_struct *p = cgroup_taskset_first(tset);
struct mm_struct *mm = get_task_mm(p);
if (mm) {
@@ -5471,19 +5574,18 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
return 0;
}
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
- struct cgroup *old_cont,
- struct task_struct *p)
+ struct cgroup_taskset *tset)
{
}
#endif
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index adc39548181..e3d58f08846 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
+ pgoff_t pgoff;
unsigned long vmstart;
unsigned long vmend;
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (!vma || vma->vm_start > start)
return -EFAULT;
+ if (start > vma->vm_start)
+ prev = vma;
+
for (; vma && vma->vm_start < end; prev = vma, vma = next) {
next = vma->vm_next;
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+
+ pgoff = vma->vm_pgoff +
+ ((vmstart - vma->vm_start) >> PAGE_SHIFT);
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
- vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+ vma->anon_vma, vma->vm_file, pgoff,
new_pol);
if (prev) {
vma = prev;
@@ -1974,28 +1983,28 @@ struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
}
/* Slow path of a mempolicy comparison */
-int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
if (!a || !b)
- return 0;
+ return false;
if (a->mode != b->mode)
- return 0;
+ return false;
if (a->flags != b->flags)
- return 0;
+ return false;
if (mpol_store_user_nodemask(a))
if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
- return 0;
+ return false;
switch (a->mode) {
case MPOL_BIND:
/* Fall through */
case MPOL_INTERLEAVE:
- return nodes_equal(a->v.nodes, b->v.nodes);
+ return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
return a->v.preferred_node == b->v.preferred_node;
default:
BUG();
- return 0;
+ return false;
}
}
diff --git a/mm/mempool.c b/mm/mempool.c
index e73641b79bb..d9049811f35 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -27,7 +27,15 @@ static void *remove_element(mempool_t *pool)
return pool->elements[--pool->curr_nr];
}
-static void free_pool(mempool_t *pool)
+/**
+ * mempool_destroy - deallocate a memory pool
+ * @pool: pointer to the memory pool which was allocated via
+ * mempool_create().
+ *
+ * Free all reserved elements in @pool and @pool itself. This function
+ * only sleeps if the free_fn() function sleeps.
+ */
+void mempool_destroy(mempool_t *pool)
{
while (pool->curr_nr) {
void *element = remove_element(pool);
@@ -36,6 +44,7 @@ static void free_pool(mempool_t *pool)
kfree(pool->elements);
kfree(pool);
}
+EXPORT_SYMBOL(mempool_destroy);
/**
* mempool_create - create a memory pool
@@ -86,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
element = pool->alloc(GFP_KERNEL, pool->pool_data);
if (unlikely(!element)) {
- free_pool(pool);
+ mempool_destroy(pool);
return NULL;
}
add_element(pool, element);
@@ -172,23 +181,6 @@ out:
EXPORT_SYMBOL(mempool_resize);
/**
- * mempool_destroy - deallocate a memory pool
- * @pool: pointer to the memory pool which was allocated via
- * mempool_create().
- *
- * this function only sleeps if the free_fn() function sleeps. The caller
- * has to guarantee that all elements have been returned to the pool (ie:
- * freed) prior to calling mempool_destroy().
- */
-void mempool_destroy(mempool_t *pool)
-{
- /* Check for outstanding elements */
- BUG_ON(pool->curr_nr != pool->min_nr);
- free_pool(pool);
-}
-EXPORT_SYMBOL(mempool_destroy);
-
-/**
* mempool_alloc - allocate an element from a specific memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
@@ -224,28 +216,40 @@ repeat_alloc:
if (likely(pool->curr_nr)) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
+ /* paired with rmb in mempool_free(), read comment there */
+ smp_wmb();
return element;
}
- spin_unlock_irqrestore(&pool->lock, flags);
- /* We must not sleep in the GFP_ATOMIC case */
- if (!(gfp_mask & __GFP_WAIT))
+ /*
+ * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
+ * alloc failed with that and @pool was empty, retry immediately.
+ */
+ if (gfp_temp != gfp_mask) {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ gfp_temp = gfp_mask;
+ goto repeat_alloc;
+ }
+
+ /* We must not sleep if !__GFP_WAIT */
+ if (!(gfp_mask & __GFP_WAIT)) {
+ spin_unlock_irqrestore(&pool->lock, flags);
return NULL;
+ }
- /* Now start performing page reclaim */
- gfp_temp = gfp_mask;
+ /* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
- smp_mb();
- if (!pool->curr_nr) {
- /*
- * FIXME: this should be io_schedule(). The timeout is there
- * as a workaround for some DM problems in 2.6.18.
- */
- io_schedule_timeout(5*HZ);
- }
- finish_wait(&pool->wait, &wait);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ /*
+ * FIXME: this should be io_schedule(). The timeout is there as a
+ * workaround for some DM problems in 2.6.18.
+ */
+ io_schedule_timeout(5*HZ);
+
+ finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);
@@ -265,7 +269,39 @@ void mempool_free(void *element, mempool_t *pool)
if (unlikely(element == NULL))
return;
- smp_mb();
+ /*
+ * Paired with the wmb in mempool_alloc(). The preceding read is
+ * for @element and the following @pool->curr_nr. This ensures
+ * that the visible value of @pool->curr_nr is from after the
+ * allocation of @element. This is necessary for fringe cases
+ * where @element was passed to this task without going through
+ * barriers.
+ *
+ * For example, assume @p is %NULL at the beginning and one task
+ * performs "p = mempool_alloc(...);" while another task is doing
+ * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
+ * may end up using curr_nr value which is from before allocation
+ * of @p without the following rmb.
+ */
+ smp_rmb();
+
+ /*
+ * For correctness, we need a test which is guaranteed to trigger
+ * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
+ * without locking achieves that and refilling as soon as possible
+ * is desirable.
+ *
+ * Because curr_nr visible here is always a value after the
+ * allocation of @element, any task which decremented curr_nr below
+ * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
+ * incremented to min_nr afterwards. If curr_nr gets incremented
+ * to min_nr after the allocation of @element, the elements
+ * allocated after that are subject to the same guarantee.
+ *
+ * Waiters happen iff curr_nr is 0 and the above guarantee also
+ * ensures that there will be frees which return elements to the
+ * pool waking up the waiters.
+ */
if (pool->curr_nr < pool->min_nr) {
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 578e29174fa..89ea0854332 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -39,8 +39,6 @@
#include "internal.h"
-#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
-
/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
@@ -181,8 +179,6 @@ static void remove_migration_ptes(struct page *old, struct page *new)
* Something used the pte of a page under migration. We need to
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
- *
- * This function is called from do_swap_page().
*/
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
@@ -269,12 +265,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
radix_tree_replace_slot(pslot, newpage);
- page_unfreeze_refs(page, expected_count);
/*
- * Drop cache reference from old page.
+ * Drop cache reference from old page by unfreezing
+ * to one less reference.
* We know this isn't the last reference.
*/
- __put_page(page);
+ page_unfreeze_refs(page, expected_count - 1);
/*
* If moved to a different zone then also account
@@ -334,9 +330,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
radix_tree_replace_slot(pslot, newpage);
- page_unfreeze_refs(page, expected_count);
-
- __put_page(page);
+ page_unfreeze_refs(page, expected_count - 1);
spin_unlock_irq(&mapping->tree_lock);
return 0;
@@ -871,9 +865,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
-out:
unlock_page(hpage);
+out:
if (rc != -EAGAIN) {
list_del(&hpage->lru);
put_page(hpage);
diff --git a/mm/mmap.c b/mm/mmap.c
index eae90af60ea..3f758c7f4c8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1603,39 +1603,19 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+ * Note: pprev is set to NULL when return value is NULL.
+ */
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
- struct vm_area_struct *vma = NULL, *prev = NULL;
- struct rb_node *rb_node;
- if (!mm)
- goto out;
-
- /* Guard against addr being lower than the first VMA */
- vma = mm->mmap;
-
- /* Go through the RB tree quickly. */
- rb_node = mm->mm_rb.rb_node;
-
- while (rb_node) {
- struct vm_area_struct *vma_tmp;
- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
-
- if (addr < vma_tmp->vm_end) {
- rb_node = rb_node->rb_left;
- } else {
- prev = vma_tmp;
- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
- break;
- rb_node = rb_node->rb_right;
- }
- }
+ struct vm_area_struct *vma;
-out:
- *pprev = prev;
- return prev ? prev->vm_next : vma;
+ vma = find_vma(mm, addr);
+ *pprev = vma ? vma->vm_prev : NULL;
+ return vma;
}
/*
@@ -2322,13 +2302,16 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct vm_area_struct *new_vma, *prev;
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
+ bool faulted_in_anon_vma = true;
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
*/
- if (!vma->vm_file && !vma->anon_vma)
+ if (unlikely(!vma->vm_file && !vma->anon_vma)) {
pgoff = addr >> PAGE_SHIFT;
+ faulted_in_anon_vma = false;
+ }
find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
@@ -2337,9 +2320,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
/*
* Source vma may have been merged into new_vma
*/
- if (vma_start >= new_vma->vm_start &&
- vma_start < new_vma->vm_end)
+ if (unlikely(vma_start >= new_vma->vm_start &&
+ vma_start < new_vma->vm_end)) {
+ /*
+ * The only way we can get a vma_merge with
+ * self during an mremap is if the vma hasn't
+ * been faulted in yet and we were allowed to
+ * reset the dst vma->vm_pgoff to the
+ * destination address of the mremap to allow
+ * the merge to happen. mremap must change the
+ * vm_pgoff linearity between src and dst vmas
+ * (in turn preventing a vma_merge) to be
+ * safe. It is only safe to keep the vm_pgoff
+ * linear if there are no pages mapped yet.
+ */
+ VM_BUG_ON(faulted_in_anon_vma);
*vmap = new_vma;
+ } else
+ anon_vma_moveto_tail(new_vma);
} else {
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (new_vma) {
diff --git a/mm/mremap.c b/mm/mremap.c
index d6959cb4df5..87bb8393e7d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -221,6 +221,15 @@ static unsigned long move_vma(struct vm_area_struct *vma,
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
if (moved_len < old_len) {
/*
+ * Before moving the page tables from the new vma to
+ * the old vma, we need to be sure the old vma is
+ * queued after new vma in the same_anon_vma list to
+ * prevent SMP races with rmap_walk (that could lead
+ * rmap_walk to miss some page table).
+ */
+ anon_vma_moveto_tail(vma);
+
+ /*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
* and then proceed to unmap new area instead of old.
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7fa41b4a07b..24f0fc1a56d 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -41,14 +41,13 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (limit > memblock.current_limit)
limit = memblock.current_limit;
- addr = find_memory_core_early(nid, size, align, goal, limit);
-
- if (addr == MEMBLOCK_ERROR)
+ addr = memblock_find_in_range_node(goal, limit, size, align, nid);
+ if (!addr)
return NULL;
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
+ memblock_reserve(addr, size);
/*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks.
@@ -107,23 +106,27 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
__free_pages_bootmem(pfn_to_page(i), 0);
}
-unsigned long __init free_all_memory_core_early(int nodeid)
+unsigned long __init free_low_memory_core_early(int nodeid)
{
- int i;
- u64 start, end;
unsigned long count = 0;
- struct range *range = NULL;
- int nr_range;
-
- nr_range = get_free_all_memory_range(&range, nodeid);
-
- for (i = 0; i < nr_range; i++) {
- start = range[i].start;
- end = range[i].end;
- count += end - start;
- __free_pages_memory(start, end);
+ phys_addr_t start, end;
+ u64 i;
+
+ /* free reserved array temporarily so that it's treated as free area */
+ memblock_free_reserved_regions();
+
+ for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = min_t(unsigned long,
+ PFN_DOWN(end), max_low_pfn);
+ if (start_pfn < end_pfn) {
+ __free_pages_memory(start_pfn, end_pfn);
+ count += end_pfn - start_pfn;
+ }
}
+ /* put region array back? */
+ memblock_reserve_reserved_regions();
return count;
}
@@ -137,7 +140,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
register_page_bootmem_info_node(pgdat);
- /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
+ /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
return 0;
}
@@ -155,7 +158,7 @@ unsigned long __init free_all_bootmem(void)
* Use MAX_NUMNODES will make sure all ranges in early_node_map[]
* will be used instead of only Node0 related
*/
- return free_all_memory_core_early(MAX_NUMNODES);
+ return free_low_memory_core_early(MAX_NUMNODES);
}
/**
@@ -172,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
kmemleak_free_part(__va(physaddr), size);
- memblock_x86_free_range(physaddr, physaddr + size);
+ memblock_free(physaddr, size);
}
/**
@@ -187,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
void __init free_bootmem(unsigned long addr, unsigned long size)
{
kmemleak_free_part(__va(addr), size);
- memblock_x86_free_range(addr, addr + size);
+ memblock_free(addr, size);
}
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 76f2c5ae908..7c122faa05c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -33,6 +33,10 @@
#include <linux/security.h>
#include <linux/ptrace.h>
#include <linux/freezer.h>
+#include <linux/ftrace.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/oom.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
@@ -55,6 +59,7 @@ void compare_swap_oom_score_adj(int old_val, int new_val)
spin_lock_irq(&sighand->siglock);
if (current->signal->oom_score_adj == old_val)
current->signal->oom_score_adj = new_val;
+ trace_oom_score_adj_update(current);
spin_unlock_irq(&sighand->siglock);
}
@@ -74,6 +79,7 @@ int test_set_oom_score_adj(int new_val)
spin_lock_irq(&sighand->siglock);
old_val = current->signal->oom_score_adj;
current->signal->oom_score_adj = new_val;
+ trace_oom_score_adj_update(current);
spin_unlock_irq(&sighand->siglock);
return old_val;
@@ -176,7 +182,7 @@ static bool oom_unkillable_task(struct task_struct *p,
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
const nodemask_t *nodemask, unsigned long totalpages)
{
- int points;
+ long points;
if (oom_unkillable_task(p, mem, nodemask))
return 0;
@@ -328,7 +334,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
*/
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
if (unlikely(frozen(p)))
- thaw_process(p);
+ __thaw_task(p);
return ERR_PTR(-1UL);
}
if (!p->mm)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 71252486bc6..363ba7082ef 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -32,7 +32,7 @@
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
-#include <linux/buffer_head.h>
+#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
#include <trace/events/writeback.h>
@@ -42,6 +42,12 @@
#define MAX_PAUSE max(HZ/5, 1)
/*
+ * Try to keep balance_dirty_pages() call intervals higher than this many pages
+ * by raising pause time to max_pause when falls below it.
+ */
+#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
+
+/*
* Estimate write bandwidth at 200ms intervals.
*/
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
@@ -130,6 +136,191 @@ unsigned long global_dirty_limit;
static struct prop_descriptor vm_completions;
/*
+ * Work out the current dirty-memory clamping and background writeout
+ * thresholds.
+ *
+ * The main aim here is to lower them aggressively if there is a lot of mapped
+ * memory around. To avoid stressing page reclaim with lots of unreclaimable
+ * pages. It is better to clamp down on writers than to start swapping, and
+ * performing lots of scanning.
+ *
+ * We only allow 1/2 of the currently-unmapped memory to be dirtied.
+ *
+ * We don't permit the clamping level to fall below 5% - that is getting rather
+ * excessive.
+ *
+ * We make sure that the background writeout level is below the adjusted
+ * clamping level.
+ */
+
+/*
+ * In a memory zone, there is a certain amount of pages we consider
+ * available for the page cache, which is essentially the number of
+ * free and reclaimable pages, minus some zone reserves to protect
+ * lowmem and the ability to uphold the zone's watermarks without
+ * requiring writeback.
+ *
+ * This number of dirtyable pages is the base value of which the
+ * user-configurable dirty ratio is the effictive number of pages that
+ * are allowed to be actually dirtied. Per individual zone, or
+ * globally by using the sum of dirtyable pages over all zones.
+ *
+ * Because the user is allowed to specify the dirty limit globally as
+ * absolute number of bytes, calculating the per-zone dirty limit can
+ * require translating the configured limit into a percentage of
+ * global dirtyable memory first.
+ */
+
+static unsigned long highmem_dirtyable_memory(unsigned long total)
+{
+#ifdef CONFIG_HIGHMEM
+ int node;
+ unsigned long x = 0;
+
+ for_each_node_state(node, N_HIGH_MEMORY) {
+ struct zone *z =
+ &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+
+ x += zone_page_state(z, NR_FREE_PAGES) +
+ zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+ }
+ /*
+ * Make sure that the number of highmem pages is never larger
+ * than the number of the total dirtyable memory. This can only
+ * occur in very strange VM situations but we want to make sure
+ * that this does not occur.
+ */
+ return min(x, total);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * global_dirtyable_memory - number of globally dirtyable pages
+ *
+ * Returns the global number of pages potentially available for dirty
+ * page cache. This is the base value for the global dirty limits.
+ */
+unsigned long global_dirtyable_memory(void)
+{
+ unsigned long x;
+
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
+ dirty_balance_reserve;
+
+ if (!vm_highmem_is_dirtyable)
+ x -= highmem_dirtyable_memory(x);
+
+ return x + 1; /* Ensure that we never return 0 */
+}
+
+/*
+ * global_dirty_limits - background-writeback and dirty-throttling thresholds
+ *
+ * Calculate the dirty thresholds based on sysctl parameters
+ * - vm.dirty_background_ratio or vm.dirty_background_bytes
+ * - vm.dirty_ratio or vm.dirty_bytes
+ * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
+ * real-time tasks.
+ */
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
+{
+ unsigned long background;
+ unsigned long dirty;
+ unsigned long uninitialized_var(available_memory);
+ struct task_struct *tsk;
+
+ if (!vm_dirty_bytes || !dirty_background_bytes)
+ available_memory = global_dirtyable_memory();
+
+ if (vm_dirty_bytes)
+ dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
+ else
+ dirty = (vm_dirty_ratio * available_memory) / 100;
+
+ if (dirty_background_bytes)
+ background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
+ else
+ background = (dirty_background_ratio * available_memory) / 100;
+
+ if (background >= dirty)
+ background = dirty / 2;
+ tsk = current;
+ if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
+ background += background / 4;
+ dirty += dirty / 4;
+ }
+ *pbackground = background;
+ *pdirty = dirty;
+ trace_global_dirty_state(background, dirty);
+}
+
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache. This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+ /*
+ * The effective global number of dirtyable pages may exclude
+ * highmem as a big-picture measure to keep the ratio between
+ * dirty memory and lowmem reasonable.
+ *
+ * But this function is purely about the individual zone and a
+ * highmem zone can hold its share of dirty pages, so we don't
+ * care about vm_highmem_is_dirtyable here.
+ */
+ return zone_page_state(zone, NR_FREE_PAGES) +
+ zone_reclaimable_pages(zone) -
+ zone->dirty_balance_reserve;
+}
+
+/**
+ * zone_dirty_limit - maximum number of dirty pages allowed in a zone
+ * @zone: the zone
+ *
+ * Returns the maximum number of dirty pages allowed in a zone, based
+ * on the zone's dirtyable memory.
+ */
+static unsigned long zone_dirty_limit(struct zone *zone)
+{
+ unsigned long zone_memory = zone_dirtyable_memory(zone);
+ struct task_struct *tsk = current;
+ unsigned long dirty;
+
+ if (vm_dirty_bytes)
+ dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
+ zone_memory / global_dirtyable_memory();
+ else
+ dirty = vm_dirty_ratio * zone_memory / 100;
+
+ if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
+ dirty += dirty / 4;
+
+ return dirty;
+}
+
+/**
+ * zone_dirty_ok - tells whether a zone is within its dirty limits
+ * @zone: the zone to check
+ *
+ * Returns %true when the dirty pages in @zone are within the zone's
+ * dirty limit, %false if the limit is exceeded.
+ */
+bool zone_dirty_ok(struct zone *zone)
+{
+ unsigned long limit = zone_dirty_limit(zone);
+
+ return zone_page_state(zone, NR_FILE_DIRTY) +
+ zone_page_state(zone, NR_UNSTABLE_NFS) +
+ zone_page_state(zone, NR_WRITEBACK) <= limit;
+}
+
+/*
* couple the period to the dirty_ratio:
*
* period/2 ~ roundup_pow_of_two(dirty limit)
@@ -141,7 +332,7 @@ static int calc_period_shift(void)
if (vm_dirty_bytes)
dirty_total = vm_dirty_bytes / PAGE_SIZE;
else
- dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
+ dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
100;
return 2 + ilog2(dirty_total - 1);
}
@@ -196,7 +387,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
return ret;
}
-
int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -291,67 +481,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
}
EXPORT_SYMBOL(bdi_set_max_ratio);
-/*
- * Work out the current dirty-memory clamping and background writeout
- * thresholds.
- *
- * The main aim here is to lower them aggressively if there is a lot of mapped
- * memory around. To avoid stressing page reclaim with lots of unreclaimable
- * pages. It is better to clamp down on writers than to start swapping, and
- * performing lots of scanning.
- *
- * We only allow 1/2 of the currently-unmapped memory to be dirtied.
- *
- * We don't permit the clamping level to fall below 5% - that is getting rather
- * excessive.
- *
- * We make sure that the background writeout level is below the adjusted
- * clamping level.
- */
-
-static unsigned long highmem_dirtyable_memory(unsigned long total)
-{
-#ifdef CONFIG_HIGHMEM
- int node;
- unsigned long x = 0;
-
- for_each_node_state(node, N_HIGH_MEMORY) {
- struct zone *z =
- &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
-
- x += zone_page_state(z, NR_FREE_PAGES) +
- zone_reclaimable_pages(z);
- }
- /*
- * Make sure that the number of highmem pages is never larger
- * than the number of the total dirtyable memory. This can only
- * occur in very strange VM situations but we want to make sure
- * that this does not occur.
- */
- return min(x, total);
-#else
- return 0;
-#endif
-}
-
-/**
- * determine_dirtyable_memory - amount of memory that may be used
- *
- * Returns the numebr of pages that can currently be freed and used
- * by the kernel for direct mappings.
- */
-unsigned long determine_dirtyable_memory(void)
-{
- unsigned long x;
-
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
-
- if (!vm_highmem_is_dirtyable)
- x -= highmem_dirtyable_memory(x);
-
- return x + 1; /* Ensure that we never return 0 */
-}
-
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
unsigned long bg_thresh)
{
@@ -363,47 +492,6 @@ static unsigned long hard_dirty_limit(unsigned long thresh)
return max(thresh, global_dirty_limit);
}
-/*
- * global_dirty_limits - background-writeback and dirty-throttling thresholds
- *
- * Calculate the dirty thresholds based on sysctl parameters
- * - vm.dirty_background_ratio or vm.dirty_background_bytes
- * - vm.dirty_ratio or vm.dirty_bytes
- * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * real-time tasks.
- */
-void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
-{
- unsigned long background;
- unsigned long dirty;
- unsigned long uninitialized_var(available_memory);
- struct task_struct *tsk;
-
- if (!vm_dirty_bytes || !dirty_background_bytes)
- available_memory = determine_dirtyable_memory();
-
- if (vm_dirty_bytes)
- dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
- else
- dirty = (vm_dirty_ratio * available_memory) / 100;
-
- if (dirty_background_bytes)
- background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
- else
- background = (dirty_background_ratio * available_memory) / 100;
-
- if (background >= dirty)
- background = dirty / 2;
- tsk = current;
- if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
- background += background / 4;
- dirty += dirty / 4;
- }
- *pbackground = background;
- *pdirty = dirty;
- trace_global_dirty_state(background, dirty);
-}
-
/**
* bdi_dirty_limit - @bdi's share of dirty throttling threshold
* @bdi: the backing_dev_info to query
@@ -411,8 +499,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
*
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
*
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
@@ -594,6 +687,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
+ /*
+ * It's very possible that bdi_thresh is close to 0 not because the
+ * device is slow, but that it has remained inactive for long time.
+ * Honour such devices a reasonable good (hopefully IO efficient)
+ * threshold, so that the occasional writes won't be blocked and active
+ * writes can rampup the threshold quickly.
+ */
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
@@ -804,6 +904,11 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
*/
balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
dirty_rate | 1);
+ /*
+ * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
+ */
+ if (unlikely(balanced_dirty_ratelimit > write_bw))
+ balanced_dirty_ratelimit = write_bw;
/*
* We could safely do this and return immediately:
@@ -950,41 +1055,98 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
return 1;
}
-static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
- unsigned long bdi_dirty)
+static long bdi_max_pause(struct backing_dev_info *bdi,
+ unsigned long bdi_dirty)
{
- unsigned long bw = bdi->avg_write_bandwidth;
- unsigned long hi = ilog2(bw);
- unsigned long lo = ilog2(bdi->dirty_ratelimit);
- unsigned long t;
+ long bw = bdi->avg_write_bandwidth;
+ long t;
- /* target for 20ms max pause on 1-dd case */
- t = HZ / 50;
+ /*
+ * Limit pause time for small memory systems. If sleeping for too long
+ * time, a small pool of dirty/writeback pages may go empty and disk go
+ * idle.
+ *
+ * 8 serves as the safety ratio.
+ */
+ t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
+ t++;
+
+ return min_t(long, t, MAX_PAUSE);
+}
+
+static long bdi_min_pause(struct backing_dev_info *bdi,
+ long max_pause,
+ unsigned long task_ratelimit,
+ unsigned long dirty_ratelimit,
+ int *nr_dirtied_pause)
+{
+ long hi = ilog2(bdi->avg_write_bandwidth);
+ long lo = ilog2(bdi->dirty_ratelimit);
+ long t; /* target pause */
+ long pause; /* estimated next pause */
+ int pages; /* target nr_dirtied_pause */
+
+ /* target for 10ms pause on 1-dd case */
+ t = max(1, HZ / 100);
/*
* Scale up pause time for concurrent dirtiers in order to reduce CPU
* overheads.
*
- * (N * 20ms) on 2^N concurrent tasks.
+ * (N * 10ms) on 2^N concurrent tasks.
*/
if (hi > lo)
- t += (hi - lo) * (20 * HZ) / 1024;
+ t += (hi - lo) * (10 * HZ) / 1024;
/*
- * Limit pause time for small memory systems. If sleeping for too long
- * time, a small pool of dirty/writeback pages may go empty and disk go
- * idle.
+ * This is a bit convoluted. We try to base the next nr_dirtied_pause
+ * on the much more stable dirty_ratelimit. However the next pause time
+ * will be computed based on task_ratelimit and the two rate limits may
+ * depart considerably at some time. Especially if task_ratelimit goes
+ * below dirty_ratelimit/2 and the target pause is max_pause, the next
+ * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
+ * result task_ratelimit won't be executed faithfully, which could
+ * eventually bring down dirty_ratelimit.
*
- * 8 serves as the safety ratio.
+ * We apply two rules to fix it up:
+ * 1) try to estimate the next pause time and if necessary, use a lower
+ * nr_dirtied_pause so as not to exceed max_pause. When this happens,
+ * nr_dirtied_pause will be "dancing" with task_ratelimit.
+ * 2) limit the target pause time to max_pause/2, so that the normal
+ * small fluctuations of task_ratelimit won't trigger rule (1) and
+ * nr_dirtied_pause will remain as stable as dirty_ratelimit.
*/
- if (bdi_dirty)
- t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+ t = min(t, 1 + max_pause / 2);
+ pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
/*
- * The pause time will be settled within range (max_pause/4, max_pause).
- * Apply a minimal value of 4 to get a non-zero max_pause/4.
+ * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
+ * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
+ * When the 16 consecutive reads are often interrupted by some dirty
+ * throttling pause during the async writes, cfq will go into idles
+ * (deadline is fine). So push nr_dirtied_pause as high as possible
+ * until reaches DIRTY_POLL_THRESH=32 pages.
*/
- return clamp_val(t, 4, MAX_PAUSE);
+ if (pages < DIRTY_POLL_THRESH) {
+ t = max_pause;
+ pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
+ if (pages > DIRTY_POLL_THRESH) {
+ pages = DIRTY_POLL_THRESH;
+ t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
+ }
+ }
+
+ pause = HZ * pages / (task_ratelimit + 1);
+ if (pause > max_pause) {
+ t = max_pause;
+ pages = task_ratelimit * t / roundup_pow_of_two(HZ);
+ }
+
+ *nr_dirtied_pause = pages;
+ /*
+ * The minimal pause time will normally be half the target pause time.
+ */
+ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
}
/*
@@ -1005,16 +1167,21 @@ static void balance_dirty_pages(struct address_space *mapping,
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
- long pause = 0;
- long uninitialized_var(max_pause);
+ long period;
+ long pause;
+ long max_pause;
+ long min_pause;
+ int nr_dirtied_pause;
bool dirty_exceeded = false;
unsigned long task_ratelimit;
- unsigned long uninitialized_var(dirty_ratelimit);
+ unsigned long dirty_ratelimit;
unsigned long pos_ratio;
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long start_time = jiffies;
for (;;) {
+ unsigned long now = jiffies;
+
/*
* Unstable writes are a feature of certain networked
* filesystems (i.e. NFS) in which data may have been
@@ -1034,8 +1201,13 @@ static void balance_dirty_pages(struct address_space *mapping,
*/
freerun = dirty_freerun_ceiling(dirty_thresh,
background_thresh);
- if (nr_dirty <= freerun)
+ if (nr_dirty <= freerun) {
+ current->dirty_paused_when = now;
+ current->nr_dirtied = 0;
+ current->nr_dirtied_pause =
+ dirty_poll_interval(nr_dirty, dirty_thresh);
break;
+ }
if (unlikely(!writeback_in_progress(bdi)))
bdi_start_background_writeback(bdi);
@@ -1075,7 +1247,7 @@ static void balance_dirty_pages(struct address_space *mapping,
bdi_stat(bdi, BDI_WRITEBACK);
}
- dirty_exceeded = (bdi_dirty > bdi_thresh) ||
+ dirty_exceeded = (bdi_dirty > bdi_thresh) &&
(nr_dirty > dirty_thresh);
if (dirty_exceeded && !bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;
@@ -1084,20 +1256,34 @@ static void balance_dirty_pages(struct address_space *mapping,
nr_dirty, bdi_thresh, bdi_dirty,
start_time);
- max_pause = bdi_max_pause(bdi, bdi_dirty);
-
dirty_ratelimit = bdi->dirty_ratelimit;
pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
background_thresh, nr_dirty,
bdi_thresh, bdi_dirty);
task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
RATELIMIT_CALC_SHIFT;
+ max_pause = bdi_max_pause(bdi, bdi_dirty);
+ min_pause = bdi_min_pause(bdi, max_pause,
+ task_ratelimit, dirty_ratelimit,
+ &nr_dirtied_pause);
+
if (unlikely(task_ratelimit == 0)) {
+ period = max_pause;
pause = max_pause;
goto pause;
}
- pause = HZ * pages_dirtied / task_ratelimit;
- if (unlikely(pause <= 0)) {
+ period = HZ * pages_dirtied / task_ratelimit;
+ pause = period;
+ if (current->dirty_paused_when)
+ pause -= now - current->dirty_paused_when;
+ /*
+ * For less than 1s think time (ext3/4 may block the dirtier
+ * for up to 800ms from time to time on 1-HDD; so does xfs,
+ * however at much less frequency), try to compensate it in
+ * future periods by updating the virtual time; otherwise just
+ * do a reset, as it may be a light dirtier.
+ */
+ if (pause < min_pause) {
trace_balance_dirty_pages(bdi,
dirty_thresh,
background_thresh,
@@ -1107,12 +1293,24 @@ static void balance_dirty_pages(struct address_space *mapping,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
- pause,
+ period,
+ min(pause, 0L),
start_time);
- pause = 1; /* avoid resetting nr_dirtied_pause below */
+ if (pause < -HZ) {
+ current->dirty_paused_when = now;
+ current->nr_dirtied = 0;
+ } else if (period) {
+ current->dirty_paused_when += period;
+ current->nr_dirtied = 0;
+ } else if (current->nr_dirtied_pause <= pages_dirtied)
+ current->nr_dirtied_pause += pages_dirtied;
break;
}
- pause = min(pause, max_pause);
+ if (unlikely(pause > max_pause)) {
+ /* for occasional dropped task_ratelimit */
+ now += min(pause - max_pause, max_pause);
+ pause = max_pause;
+ }
pause:
trace_balance_dirty_pages(bdi,
@@ -1124,11 +1322,16 @@ pause:
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
+ period,
pause,
start_time);
__set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause);
+ current->dirty_paused_when = now + pause;
+ current->nr_dirtied = 0;
+ current->nr_dirtied_pause = nr_dirtied_pause;
+
/*
* This is typically equal to (nr_dirty < dirty_thresh) and can
* also keep "1000+ dd on a slow USB stick" under control.
@@ -1136,6 +1339,19 @@ pause:
if (task_ratelimit)
break;
+ /*
+ * In the case of an unresponding NFS server and the NFS dirty
+ * pages exceeds dirty_thresh, give the other good bdi's a pipe
+ * to go through, so that tasks on them still remain responsive.
+ *
+ * In theory 1 page is enough to keep the comsumer-producer
+ * pipe going: the flusher cleans 1 page => the task dirties 1
+ * more page. However bdi_dirty has accounting errors. So use
+ * the larger and more IO friendly bdi_stat_error.
+ */
+ if (bdi_dirty <= bdi_stat_error(bdi))
+ break;
+
if (fatal_signal_pending(current))
break;
}
@@ -1143,23 +1359,6 @@ pause:
if (!dirty_exceeded && bdi->dirty_exceeded)
bdi->dirty_exceeded = 0;
- current->nr_dirtied = 0;
- if (pause == 0) { /* in freerun area */
- current->nr_dirtied_pause =
- dirty_poll_interval(nr_dirty, dirty_thresh);
- } else if (pause <= max_pause / 4 &&
- pages_dirtied >= current->nr_dirtied_pause) {
- current->nr_dirtied_pause = clamp_val(
- dirty_ratelimit * (max_pause / 2) / HZ,
- pages_dirtied + pages_dirtied / 8,
- pages_dirtied * 4);
- } else if (pause >= max_pause) {
- current->nr_dirtied_pause = 1 | clamp_val(
- dirty_ratelimit * (max_pause / 2) / HZ,
- pages_dirtied / 4,
- pages_dirtied - pages_dirtied / 8);
- }
-
if (writeback_in_progress(bdi))
return;
@@ -1190,6 +1389,22 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
static DEFINE_PER_CPU(int, bdp_ratelimits);
+/*
+ * Normal tasks are throttled by
+ * loop {
+ * dirty tsk->nr_dirtied_pause pages;
+ * take a snap in balance_dirty_pages();
+ * }
+ * However there is a worst case. If every task exit immediately when dirtied
+ * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
+ * called to throttle the page dirties. The solution is to save the not yet
+ * throttled page dirties in dirty_throttle_leaks on task exit and charge them
+ * randomly into the running tasks. This works well for the above worst case,
+ * as the new task will pick up and accumulate the old task's leaked dirty
+ * count and eventually get throttled.
+ */
+DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
+
/**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied
@@ -1218,8 +1433,6 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
if (bdi->dirty_exceeded)
ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
- current->nr_dirtied += nr_pages_dirtied;
-
preempt_disable();
/*
* This prevents one CPU to accumulate too many dirtied pages without
@@ -1230,12 +1443,20 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
p = &__get_cpu_var(bdp_ratelimits);
if (unlikely(current->nr_dirtied >= ratelimit))
*p = 0;
- else {
- *p += nr_pages_dirtied;
- if (unlikely(*p >= ratelimit_pages)) {
- *p = 0;
- ratelimit = 0;
- }
+ else if (unlikely(*p >= ratelimit_pages)) {
+ *p = 0;
+ ratelimit = 0;
+ }
+ /*
+ * Pick up the dirtied pages by the exited tasks. This avoids lots of
+ * short-lived tasks (eg. gcc invocations in a kernel build) escaping
+ * the dirty throttling and livelock other long-run dirtiers.
+ */
+ p = &__get_cpu_var(dirty_throttle_leaks);
+ if (*p > 0 && current->nr_dirtied < ratelimit) {
+ nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
+ *p -= nr_pages_dirtied;
+ current->nr_dirtied += nr_pages_dirtied;
}
preempt_enable();
@@ -1717,6 +1938,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
task_io_account_write(PAGE_CACHE_SIZE);
+ current->nr_dirtied++;
+ this_cpu_inc(bdp_ratelimits);
}
}
EXPORT_SYMBOL(account_page_dirtied);
@@ -1777,6 +2000,24 @@ int __set_page_dirty_nobuffers(struct page *page)
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
/*
+ * Call this whenever redirtying a page, to de-account the dirty counters
+ * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
+ * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
+ * systematic errors in balanced_dirty_ratelimit and the dirty pages position
+ * control.
+ */
+void account_page_redirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ if (mapping && mapping_cap_account_dirty(mapping)) {
+ current->nr_dirtied--;
+ dec_zone_page_state(page, NR_DIRTIED);
+ dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
+ }
+}
+EXPORT_SYMBOL(account_page_redirty);
+
+/*
* When a writepage implementation decides that it doesn't want to write this
* page for some reason, it should redirty the locked page via
* redirty_page_for_writepage() and it should then unlock the page and return 0
@@ -1784,6 +2025,7 @@ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
wbc->pages_skipped++;
+ account_page_redirty(page);
return __set_page_dirty_nobuffers(page);
}
EXPORT_SYMBOL(redirty_page_for_writepage);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d89d8..794e6715c22 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/page-debug-flags.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -96,6 +97,14 @@ EXPORT_SYMBOL(node_states);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
+/*
+ * When calculating the number of globally allowed dirty pages, there
+ * is a certain number of per-zone reserves that should not be
+ * considered dirtyable memory. This is the sum of those reserves
+ * over all existing zones that contribute dirtyable memory.
+ */
+unsigned long dirty_balance_reserve __read_mostly;
+
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -127,6 +136,13 @@ void pm_restrict_gfp_mask(void)
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~GFP_IOFS;
}
+
+bool pm_suspended_storage(void)
+{
+ if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
+ return false;
+ return true;
+}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -181,39 +197,17 @@ static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
- /*
- * MAX_ACTIVE_REGIONS determines the maximum number of distinct
- * ranges of memory (RAM) that may be registered with add_active_range().
- * Ranges passed to add_active_range() will be merged if possible
- * so the number of times add_active_range() can be called is
- * related to the number of nodes and the number of holes
- */
- #ifdef CONFIG_MAX_ACTIVE_REGIONS
- /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
- #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
- #else
- #if MAX_NUMNODES >= 32
- /* If there can be many nodes, allow up to 50 holes per node */
- #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
- #else
- /* By default, allow up to 256 distinct regions */
- #define MAX_ACTIVE_REGIONS 256
- #endif
- #endif
-
- static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
- static int __meminitdata nr_nodemap_entries;
- static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
- static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
- static unsigned long __initdata required_kernelcore;
- static unsigned long __initdata required_movablecore;
- static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-
- /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
- int movable_zone;
- EXPORT_SYMBOL(movable_zone);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+static unsigned long __initdata required_kernelcore;
+static unsigned long __initdata required_movablecore;
+static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+
+/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+int movable_zone;
+EXPORT_SYMBOL(movable_zone);
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
@@ -333,8 +327,8 @@ out:
*
* The remaining PAGE_SIZE pages are called "tail pages".
*
- * All pages have PG_compound set. All pages have their ->private pointing at
- * the head page (even the head page has this).
+ * All pages have PG_compound set. All tail pages have their ->first_page
+ * pointing at the head page.
*
* The first tail page's ->lru.next holds the address of the compound page's
* put_page() function. Its ->lru.prev holds the order of allocation.
@@ -356,8 +350,8 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
-
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
@@ -403,6 +397,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+unsigned int _debug_guardpage_minorder;
+
+static int __init debug_guardpage_minorder_setup(char *buf)
+{
+ unsigned long res;
+
+ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
+ printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
+ return 0;
+ }
+ _debug_guardpage_minorder = res;
+ printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
+ return 0;
+}
+__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
+
+static inline void set_page_guard_flag(struct page *page)
+{
+ __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+
+static inline void clear_page_guard_flag(struct page *page)
+{
+ __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
+}
+#else
+static inline void set_page_guard_flag(struct page *page) { }
+static inline void clear_page_guard_flag(struct page *page) { }
+#endif
+
static inline void set_page_order(struct page *page, int order)
{
set_page_private(page, order);
@@ -460,6 +485,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
+ if (page_is_guard(buddy) && page_order(buddy) == order) {
+ VM_BUG_ON(page_count(buddy) != 0);
+ return 1;
+ }
+
if (PageBuddy(buddy) && page_order(buddy) == order) {
VM_BUG_ON(page_count(buddy) != 0);
return 1;
@@ -516,11 +546,19 @@ static inline void __free_one_page(struct page *page,
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
break;
-
- /* Our buddy is free, merge with it and move up one order. */
- list_del(&buddy->lru);
- zone->free_area[order].nr_free--;
- rmv_page_order(buddy);
+ /*
+ * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+ * merge with it and move up one order.
+ */
+ if (page_is_guard(buddy)) {
+ clear_page_guard_flag(buddy);
+ set_page_private(page, 0);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+ } else {
+ list_del(&buddy->lru);
+ zone->free_area[order].nr_free--;
+ rmv_page_order(buddy);
+ }
combined_idx = buddy_idx & page_idx;
page = page + (combined_idx - page_idx);
page_idx = combined_idx;
@@ -654,7 +692,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
int i;
int bad = 0;
- trace_mm_page_free_direct(page, order);
+ trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
if (PageAnon(page))
@@ -692,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
-/*
- * permit the bootmem allocator to evade page validation on high-order frees
- */
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
{
- if (order == 0) {
- __ClearPageReserved(page);
- set_page_count(page, 0);
- set_page_refcounted(page);
- __free_page(page);
- } else {
- int loop;
-
- prefetchw(page);
- for (loop = 0; loop < BITS_PER_LONG; loop++) {
- struct page *p = &page[loop];
+ unsigned int nr_pages = 1 << order;
+ unsigned int loop;
- if (loop + 1 < BITS_PER_LONG)
- prefetchw(p + 1);
- __ClearPageReserved(p);
- set_page_count(p, 0);
- }
+ prefetchw(page);
+ for (loop = 0; loop < nr_pages; loop++) {
+ struct page *p = &page[loop];
- set_page_refcounted(page);
- __free_pages(page, order);
+ if (loop + 1 < nr_pages)
+ prefetchw(p + 1);
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
}
+
+ set_page_refcounted(page);
+ __free_pages(page, order);
}
@@ -746,6 +775,23 @@ static inline void expand(struct zone *zone, struct page *page,
high--;
size >>= 1;
VM_BUG_ON(bad_range(zone, &page[size]));
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ if (high < debug_guardpage_minorder()) {
+ /*
+ * Mark as guard pages (or page), that will allow to
+ * merge back to allocator when buddy will be freed.
+ * Corresponding page table entries will not be touched,
+ * pages will stay not present in virtual address space
+ */
+ INIT_LIST_HEAD(&page[size].lru);
+ set_page_guard_flag(&page[size]);
+ set_page_private(&page[size], high);
+ /* Guard pages are not available for any usage */
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+ continue;
+ }
+#endif
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
@@ -1211,6 +1257,19 @@ out:
}
/*
+ * Free a list of 0-order pages
+ */
+void free_hot_cold_page_list(struct list_head *list, int cold)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, list, lru) {
+ trace_mm_page_free_batched(page, cold);
+ free_hot_cold_page(page, cold);
+ }
+}
+
+/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
* Each sub-page must be freed individually.
@@ -1408,7 +1467,7 @@ static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
static int __init fail_page_alloc_debugfs(void)
{
- mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
@@ -1457,7 +1516,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
long min = mark;
int o;
- free_pages -= (1 << order) + 1;
+ free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
min -= min / 2;
if (alloc_flags & ALLOC_HARDER)
@@ -1667,6 +1726,35 @@ zonelist_scan:
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
continue;
+ /*
+ * When allocating a page cache page for writing, we
+ * want to get it from a zone that is within its dirty
+ * limit, such that no single zone holds more than its
+ * proportional share of globally allowed dirty pages.
+ * The dirty limits take into account the zone's
+ * lowmem reserves and high watermark so that kswapd
+ * should be able to balance it without having to
+ * write pages from its LRU list.
+ *
+ * This may look like it could increase pressure on
+ * lower zones by failing allocations in higher zones
+ * before they are full. But the pages that do spill
+ * over are limited as the lower zones are protected
+ * by this very same mechanism. It should not become
+ * a practical burden to them.
+ *
+ * XXX: For now, allow allocations to potentially
+ * exceed the per-zone dirty limit in the slowpath
+ * (ALLOC_WMARK_LOW unset) before going into reclaim,
+ * which is important when on a NUMA setup the allowed
+ * zones are together not big enough to reach the
+ * global limit. The proper fix for these situations
+ * will require awareness of zones in the
+ * dirty-throttling and the flusher threads.
+ */
+ if ((alloc_flags & ALLOC_WMARK_LOW) &&
+ (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
+ goto this_zone_full;
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
@@ -1756,7 +1844,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
- if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
+ if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
+ debug_guardpage_minorder() > 0)
return;
/*
@@ -1795,12 +1884,25 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
static inline int
should_alloc_retry(gfp_t gfp_mask, unsigned int order,
+ unsigned long did_some_progress,
unsigned long pages_reclaimed)
{
/* Do not loop if specifically requested */
if (gfp_mask & __GFP_NORETRY)
return 0;
+ /* Always retry if specifically requested */
+ if (gfp_mask & __GFP_NOFAIL)
+ return 1;
+
+ /*
+ * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
+ * making forward progress without invoking OOM. Suspend also disables
+ * storage devices so kswapd will not help. Bail if we are suspending.
+ */
+ if (!did_some_progress && pm_suspended_storage())
+ return 0;
+
/*
* In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
* means __GFP_NOFAIL, but that may not be true in other
@@ -1819,13 +1921,6 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
return 1;
- /*
- * Don't let big-order allocations loop unless the caller
- * explicitly requests that.
- */
- if (gfp_mask & __GFP_NOFAIL)
- return 1;
-
return 0;
}
@@ -2218,7 +2313,8 @@ rebalance:
/* Check if we should retry the allocation */
pages_reclaimed += did_some_progress;
- if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
+ if (should_alloc_retry(gfp_mask, order, did_some_progress,
+ pages_reclaimed)) {
/* Wait for some write requests to complete then retry */
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
goto rebalance;
@@ -2328,16 +2424,6 @@ unsigned long get_zeroed_page(gfp_t gfp_mask)
}
EXPORT_SYMBOL(get_zeroed_page);
-void __pagevec_free(struct pagevec *pvec)
-{
- int i = pagevec_count(pvec);
-
- while (--i >= 0) {
- trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
- free_hot_cold_page(pvec->pages[i], pvec->cold);
- }
-}
-
void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
@@ -3377,9 +3463,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
unsigned long block_migratetype;
int reserve;
- /* Get the start pfn, end pfn and the number of blocks to reserve */
+ /*
+ * Get the start pfn, end pfn and the number of blocks to reserve
+ * We have to be careful to be aligned to pageblock_nr_pages to
+ * make sure that we always check pfn_valid for the first page in
+ * the block.
+ */
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
@@ -3401,25 +3493,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
if (page_to_nid(page) != zone_to_nid(zone))
continue;
- /* Blocks with reserved pages will never free, skip them. */
- block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
- if (pageblock_is_reserved(pfn, block_end_pfn))
- continue;
-
block_migratetype = get_pageblock_migratetype(page);
- /* If this block is reserved, account for it */
- if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
- reserve--;
- continue;
- }
+ /* Only test what is necessary when the reserves are not met */
+ if (reserve > 0) {
+ /*
+ * Blocks with reserved pages will never free, skip
+ * them.
+ */
+ block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+ if (pageblock_is_reserved(pfn, block_end_pfn))
+ continue;
- /* Suitable for reserving if this block is movable */
- if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
- set_pageblock_migratetype(page, MIGRATE_RESERVE);
- move_freepages_block(zone, page, MIGRATE_RESERVE);
- reserve--;
- continue;
+ /* If this block is reserved, account for it */
+ if (block_migratetype == MIGRATE_RESERVE) {
+ reserve--;
+ continue;
+ }
+
+ /* Suitable for reserving if this block is movable */
+ if (block_migratetype == MIGRATE_MOVABLE) {
+ set_pageblock_migratetype(page,
+ MIGRATE_RESERVE);
+ move_freepages_block(zone, page,
+ MIGRATE_RESERVE);
+ reserve--;
+ continue;
+ }
}
/*
@@ -3731,35 +3831,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
return 0;
}
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
-/*
- * Basic iterator support. Return the first range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns first region regardless of node
- */
-static int __meminit first_active_region_index_in_nid(int nid)
-{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++)
- if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
- return i;
-
- return -1;
-}
-
-/*
- * Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit next_active_region_index_in_nid(int index, int nid)
-{
- for (index = index + 1; index < nr_nodemap_entries; index++)
- if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
- return index;
-
- return -1;
-}
-
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3769,15 +3841,12 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
*/
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
- int i;
-
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long start_pfn = early_node_map[i].start_pfn;
- unsigned long end_pfn = early_node_map[i].end_pfn;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn)
- return early_node_map[i].nid;
- }
+ return nid;
/* This is a memory hole */
return -1;
}
@@ -3806,11 +3875,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
}
#endif
-/* Basic iterator support to walk early_node_map[] */
-#define for_each_active_range_index_in_nid(i, nid) \
- for (i = first_active_region_index_in_nid(nid); i != -1; \
- i = next_active_region_index_in_nid(i, nid))
-
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3820,122 +3884,34 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
* add_active_ranges() contain no holes and may be freed, this
* this function may be used instead of calling free_bootmem() manually.
*/
-void __init free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn)
-{
- int i;
-
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long size_pages = 0;
- unsigned long end_pfn = early_node_map[i].end_pfn;
-
- if (early_node_map[i].start_pfn >= max_low_pfn)
- continue;
-
- if (end_pfn > max_low_pfn)
- end_pfn = max_low_pfn;
-
- size_pages = end_pfn - early_node_map[i].start_pfn;
- free_bootmem_node(NODE_DATA(early_node_map[i].nid),
- PFN_PHYS(early_node_map[i].start_pfn),
- size_pages << PAGE_SHIFT);
- }
-}
-
-#ifdef CONFIG_HAVE_MEMBLOCK
-/*
- * Basic iterator support. Return the last range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns last region regardless of node
- */
-static int __meminit last_active_region_index_in_nid(int nid)
-{
- int i;
-
- for (i = nr_nodemap_entries - 1; i >= 0; i--)
- if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
- return i;
-
- return -1;
-}
-
-/*
- * Basic iterator support. Return the previous active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardless of node
- */
-static int __meminit previous_active_region_index_in_nid(int index, int nid)
-{
- for (index = index - 1; index >= 0; index--)
- if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
- return index;
-
- return -1;
-}
-
-#define for_each_active_range_index_in_nid_reverse(i, nid) \
- for (i = last_active_region_index_in_nid(nid); i != -1; \
- i = previous_active_region_index_in_nid(i, nid))
-
-u64 __init find_memory_core_early(int nid, u64 size, u64 align,
- u64 goal, u64 limit)
+void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
{
- int i;
-
- /* Need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid_reverse(i, nid) {
- u64 addr;
- u64 ei_start, ei_last;
- u64 final_start, final_end;
-
- ei_last = early_node_map[i].end_pfn;
- ei_last <<= PAGE_SHIFT;
- ei_start = early_node_map[i].start_pfn;
- ei_start <<= PAGE_SHIFT;
-
- final_start = max(ei_start, goal);
- final_end = min(ei_last, limit);
-
- if (final_start >= final_end)
- continue;
-
- addr = memblock_find_in_range(final_start, final_end, size, align);
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- if (addr == MEMBLOCK_ERROR)
- continue;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
+ start_pfn = min(start_pfn, max_low_pfn);
+ end_pfn = min(end_pfn, max_low_pfn);
- return addr;
+ if (start_pfn < end_pfn)
+ free_bootmem_node(NODE_DATA(this_nid),
+ PFN_PHYS(start_pfn),
+ (end_pfn - start_pfn) << PAGE_SHIFT);
}
-
- return MEMBLOCK_ERROR;
}
-#endif
int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid)
{
+ unsigned long start_pfn, end_pfn;
int i;
- u64 start, end;
/* need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
- start = early_node_map[i].start_pfn;
- end = early_node_map[i].end_pfn;
- nr_range = add_range(range, az, nr_range, start, end);
- }
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
+ nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
return nr_range;
}
-void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
-{
- int i;
- int ret;
-
- for_each_active_range_index_in_nid(i, nid) {
- ret = work_fn(early_node_map[i].start_pfn,
- early_node_map[i].end_pfn, data);
- if (ret)
- break;
- }
-}
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3946,12 +3922,11 @@ void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
- int i;
+ unsigned long start_pfn, end_pfn;
+ int i, this_nid;
- for_each_active_range_index_in_nid(i, nid)
- memory_present(early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
+ memory_present(this_nid, start_pfn, end_pfn);
}
/**
@@ -3968,13 +3943,15 @@ void __init sparse_memory_present_with_active_regions(int nid)
void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
+ unsigned long this_start_pfn, this_end_pfn;
int i;
+
*start_pfn = -1UL;
*end_pfn = 0;
- for_each_active_range_index_in_nid(i, nid) {
- *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
- *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
+ for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
}
if (*start_pfn == -1UL)
@@ -4077,46 +4054,16 @@ unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
- int i = 0;
- unsigned long prev_end_pfn = 0, hole_pages = 0;
- unsigned long start_pfn;
-
- /* Find the end_pfn of the first active range of pfns in the node */
- i = first_active_region_index_in_nid(nid);
- if (i == -1)
- return 0;
-
- prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
-
- /* Account for ranges before physical memory on this node */
- if (early_node_map[i].start_pfn > range_start_pfn)
- hole_pages = prev_end_pfn - range_start_pfn;
-
- /* Find all holes for the zone within the node */
- for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
-
- /* No need to continue if prev_end_pfn is outside the zone */
- if (prev_end_pfn >= range_end_pfn)
- break;
-
- /* Make sure the end of the zone is not within the hole */
- start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
- prev_end_pfn = max(prev_end_pfn, range_start_pfn);
+ unsigned long nr_absent = range_end_pfn - range_start_pfn;
+ unsigned long start_pfn, end_pfn;
+ int i;
- /* Update the hole size cound and move on */
- if (start_pfn > range_start_pfn) {
- BUG_ON(prev_end_pfn > start_pfn);
- hole_pages += start_pfn - prev_end_pfn;
- }
- prev_end_pfn = early_node_map[i].end_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+ nr_absent -= end_pfn - start_pfn;
}
-
- /* Account for ranges past physical memory on this node */
- if (range_end_pfn > prev_end_pfn)
- hole_pages += range_end_pfn -
- max(range_start_pfn, prev_end_pfn);
-
- return hole_pages;
+ return nr_absent;
}
/**
@@ -4137,14 +4084,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
+ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
+ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long node_start_pfn, node_end_pfn;
unsigned long zone_start_pfn, zone_end_pfn;
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
- zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
- node_start_pfn);
- zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
- node_end_pfn);
+ zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
+ zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
@@ -4152,7 +4099,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
-#else
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *zones_size)
@@ -4170,7 +4117,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
-#endif
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
@@ -4393,10 +4340,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
@@ -4421,7 +4368,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat, zones_size, zholes_size);
}
-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#if MAX_NUMNODES > 1
/*
@@ -4443,170 +4390,6 @@ static inline void setup_nr_node_ids(void)
#endif
/**
- * add_active_range - Register a range of PFNs backed by physical memory
- * @nid: The node ID the range resides on
- * @start_pfn: The start PFN of the available physical memory
- * @end_pfn: The end PFN of the available physical memory
- *
- * These ranges are stored in an early_node_map[] and later used by
- * free_area_init_nodes() to calculate zone sizes and holes. If the
- * range spans a memory hole, it is up to the architecture to ensure
- * the memory is not freed by the bootmem allocator. If possible
- * the range being registered will be merged with existing ranges.
- */
-void __init add_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int i;
-
- mminit_dprintk(MMINIT_TRACE, "memory_register",
- "Entering add_active_range(%d, %#lx, %#lx) "
- "%d entries of %d used\n",
- nid, start_pfn, end_pfn,
- nr_nodemap_entries, MAX_ACTIVE_REGIONS);
-
- mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
-
- /* Merge with existing active regions if possible */
- for (i = 0; i < nr_nodemap_entries; i++) {
- if (early_node_map[i].nid != nid)
- continue;
-
- /* Skip if an existing region covers this new one */
- if (start_pfn >= early_node_map[i].start_pfn &&
- end_pfn <= early_node_map[i].end_pfn)
- return;
-
- /* Merge forward if suitable */
- if (start_pfn <= early_node_map[i].end_pfn &&
- end_pfn > early_node_map[i].end_pfn) {
- early_node_map[i].end_pfn = end_pfn;
- return;
- }
-
- /* Merge backward if suitable */
- if (start_pfn < early_node_map[i].start_pfn &&
- end_pfn >= early_node_map[i].start_pfn) {
- early_node_map[i].start_pfn = start_pfn;
- return;
- }
- }
-
- /* Check that early_node_map is large enough */
- if (i >= MAX_ACTIVE_REGIONS) {
- printk(KERN_CRIT "More than %d memory regions, truncating\n",
- MAX_ACTIVE_REGIONS);
- return;
- }
-
- early_node_map[i].nid = nid;
- early_node_map[i].start_pfn = start_pfn;
- early_node_map[i].end_pfn = end_pfn;
- nr_nodemap_entries = i + 1;
-}
-
-/**
- * remove_active_range - Shrink an existing registered range of PFNs
- * @nid: The node id the range is on that should be shrunk
- * @start_pfn: The new PFN of the range
- * @end_pfn: The new PFN of the range
- *
- * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept near the end physical page range that has already been
- * registered. This function allows an arch to shrink an existing registered
- * range.
- */
-void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int i, j;
- int removed = 0;
-
- printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
- nid, start_pfn, end_pfn);
-
- /* Find the old active region end and shrink */
- for_each_active_range_index_in_nid(i, nid) {
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn <= end_pfn) {
- /* clear it */
- early_node_map[i].start_pfn = 0;
- early_node_map[i].end_pfn = 0;
- removed = 1;
- continue;
- }
- if (early_node_map[i].start_pfn < start_pfn &&
- early_node_map[i].end_pfn > start_pfn) {
- unsigned long temp_end_pfn = early_node_map[i].end_pfn;
- early_node_map[i].end_pfn = start_pfn;
- if (temp_end_pfn > end_pfn)
- add_active_range(nid, end_pfn, temp_end_pfn);
- continue;
- }
- if (early_node_map[i].start_pfn >= start_pfn &&
- early_node_map[i].end_pfn > end_pfn &&
- early_node_map[i].start_pfn < end_pfn) {
- early_node_map[i].start_pfn = end_pfn;
- continue;
- }
- }
-
- if (!removed)
- return;
-
- /* remove the blank ones */
- for (i = nr_nodemap_entries - 1; i > 0; i--) {
- if (early_node_map[i].nid != nid)
- continue;
- if (early_node_map[i].end_pfn)
- continue;
- /* we found it, get rid of it */
- for (j = i; j < nr_nodemap_entries - 1; j++)
- memcpy(&early_node_map[j], &early_node_map[j+1],
- sizeof(early_node_map[j]));
- j = nr_nodemap_entries - 1;
- memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
- nr_nodemap_entries--;
- }
-}
-
-/**
- * remove_all_active_ranges - Remove all currently registered regions
- *
- * During discovery, it may be found that a table like SRAT is invalid
- * and an alternative discovery method must be used. This function removes
- * all currently registered regions.
- */
-void __init remove_all_active_ranges(void)
-{
- memset(early_node_map, 0, sizeof(early_node_map));
- nr_nodemap_entries = 0;
-}
-
-/* Compare two active node_active_regions */
-static int __init cmp_node_active_region(const void *a, const void *b)
-{
- struct node_active_region *arange = (struct node_active_region *)a;
- struct node_active_region *brange = (struct node_active_region *)b;
-
- /* Done this way to avoid overflows */
- if (arange->start_pfn > brange->start_pfn)
- return 1;
- if (arange->start_pfn < brange->start_pfn)
- return -1;
-
- return 0;
-}
-
-/* sort the node_map by start_pfn */
-void __init sort_node_map(void)
-{
- sort(early_node_map, (size_t)nr_nodemap_entries,
- sizeof(struct node_active_region),
- cmp_node_active_region, NULL);
-}
-
-/**
* node_map_pfn_alignment - determine the maximum internode alignment
*
* This function should be called after node map is populated and sorted.
@@ -4628,15 +4411,11 @@ void __init sort_node_map(void)
unsigned long __init node_map_pfn_alignment(void)
{
unsigned long accl_mask = 0, last_end = 0;
+ unsigned long start, end, mask;
int last_nid = -1;
- int i;
-
- for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
- int nid = early_node_map[i].nid;
- unsigned long start = early_node_map[i].start_pfn;
- unsigned long end = early_node_map[i].end_pfn;
- unsigned long mask;
+ int i, nid;
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
if (!start || last_nid < 0 || last_nid == nid) {
last_nid = nid;
last_end = end;
@@ -4663,12 +4442,12 @@ unsigned long __init node_map_pfn_alignment(void)
/* Find the lowest pfn for a node */
static unsigned long __init find_min_pfn_for_node(int nid)
{
- int i;
unsigned long min_pfn = ULONG_MAX;
+ unsigned long start_pfn;
+ int i;
- /* Assuming a sorted map, the first range found has the starting pfn */
- for_each_active_range_index_in_nid(i, nid)
- min_pfn = min(min_pfn, early_node_map[i].start_pfn);
+ for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
+ min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING
@@ -4697,15 +4476,16 @@ unsigned long __init find_min_pfn_with_active_regions(void)
*/
static unsigned long __init early_calculate_totalpages(void)
{
- int i;
unsigned long totalpages = 0;
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ unsigned long pages = end_pfn - start_pfn;
- for (i = 0; i < nr_nodemap_entries; i++) {
- unsigned long pages = early_node_map[i].end_pfn -
- early_node_map[i].start_pfn;
totalpages += pages;
if (pages)
- node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
+ node_set_state(nid, N_HIGH_MEMORY);
}
return totalpages;
}
@@ -4760,6 +4540,8 @@ restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
for_each_node_state(nid, N_HIGH_MEMORY) {
+ unsigned long start_pfn, end_pfn;
+
/*
* Recalculate kernelcore_node if the division per node
* now exceeds what is necessary to satisfy the requested
@@ -4776,13 +4558,10 @@ restart:
kernelcore_remaining = kernelcore_node;
/* Go through each range of PFNs within this node */
- for_each_active_range_index_in_nid(i, nid) {
- unsigned long start_pfn, end_pfn;
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
- start_pfn = max(early_node_map[i].start_pfn,
- zone_movable_pfn[nid]);
- end_pfn = early_node_map[i].end_pfn;
+ start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
@@ -4884,11 +4663,8 @@ static void check_for_regular_memory(pg_data_t *pgdat)
*/
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
{
- unsigned long nid;
- int i;
-
- /* Sort early_node_map as initialisation assumes it is sorted */
- sort_node_map();
+ unsigned long start_pfn, end_pfn;
+ int i, nid;
/* Record where the zone boundaries are */
memset(arch_zone_lowest_possible_pfn, 0,
@@ -4935,11 +4711,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
}
/* Print out the early_node_map[] */
- printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
- for (i = 0; i < nr_nodemap_entries; i++)
- printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
- early_node_map[i].start_pfn,
- early_node_map[i].end_pfn);
+ printk("Early memory PFN ranges\n");
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+ printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
/* Initialise every node */
mminit_verify_pageflags_layout();
@@ -4992,7 +4766,7 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
@@ -5076,8 +4850,19 @@ static void calculate_totalreserve_pages(void)
if (max > zone->present_pages)
max = zone->present_pages;
reserve_pages += max;
+ /*
+ * Lowmem reserves are not available to
+ * GFP_HIGHUSER page cache allocations and
+ * kswapd tries to balance zones to their high
+ * watermark. As a result, neither should be
+ * regarded as dirtyable memory, to prevent a
+ * situation where reclaim has to clean pages
+ * in order to balance the zones.
+ */
+ zone->dirty_balance_reserve = max;
}
}
+ dirty_balance_reserve = reserve_pages;
totalreserve_pages = reserve_pages;
}
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index ea534960a04..12a48a88c0d 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -50,14 +50,13 @@ static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
if (!pages || !bitmap) {
if (may_alloc && !pages)
- pages = pcpu_mem_alloc(pages_size);
+ pages = pcpu_mem_zalloc(pages_size);
if (may_alloc && !bitmap)
- bitmap = pcpu_mem_alloc(bitmap_size);
+ bitmap = pcpu_mem_zalloc(bitmap_size);
if (!pages || !bitmap)
return NULL;
}
- memset(pages, 0, pages_size);
bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
*bitmapp = bitmap;
@@ -143,8 +142,8 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vunmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
@@ -206,8 +205,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_tlb_kernel_range(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
@@ -284,8 +283,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
flush_cache_vmap(
- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
+ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
+ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
}
/**
diff --git a/mm/percpu.c b/mm/percpu.c
index bf80e55dbed..716eb4acf2f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
static int pcpu_nr_slots __read_mostly;
static size_t pcpu_chunk_struct_size __read_mostly;
-/* cpus with the lowest and highest unit numbers */
-static unsigned int pcpu_first_unit_cpu __read_mostly;
-static unsigned int pcpu_last_unit_cpu __read_mostly;
+/* cpus with the lowest and highest unit addresses */
+static unsigned int pcpu_low_unit_cpu __read_mostly;
+static unsigned int pcpu_high_unit_cpu __read_mostly;
/* the address of the first chunk which starts with the kernel static area */
void *pcpu_base_addr __read_mostly;
@@ -273,11 +273,11 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
(rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
/**
- * pcpu_mem_alloc - allocate memory
+ * pcpu_mem_zalloc - allocate memory
* @size: bytes to allocate
*
* Allocate @size bytes. If @size is smaller than PAGE_SIZE,
- * kzalloc() is used; otherwise, vmalloc() is used. The returned
+ * kzalloc() is used; otherwise, vzalloc() is used. The returned
* memory is always zeroed.
*
* CONTEXT:
@@ -286,7 +286,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
-static void *pcpu_mem_alloc(size_t size)
+static void *pcpu_mem_zalloc(size_t size)
{
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
@@ -302,7 +302,7 @@ static void *pcpu_mem_alloc(size_t size)
* @ptr: memory to free
* @size: size of the area
*
- * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
+ * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
*/
static void pcpu_mem_free(void *ptr, size_t size)
{
@@ -384,7 +384,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
- new = pcpu_mem_alloc(new_size);
+ new = pcpu_mem_zalloc(new_size);
if (!new)
return -ENOMEM;
@@ -604,11 +604,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
{
struct pcpu_chunk *chunk;
- chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
+ chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
if (!chunk)
return NULL;
- chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+ chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
+ sizeof(chunk->map[0]));
if (!chunk->map) {
kfree(chunk);
return NULL;
@@ -977,6 +978,17 @@ bool is_kernel_percpu_address(unsigned long addr)
* address. The caller is responsible for ensuring @addr stays valid
* until this function finishes.
*
+ * percpu allocator has special setup for the first chunk, which currently
+ * supports either embedding in linear address space or vmalloc mapping,
+ * and, from the second one, the backing allocator (currently either vm or
+ * km) provides translation.
+ *
+ * The addr can be tranlated simply without checking if it falls into the
+ * first chunk. But the current code reflects better how percpu allocator
+ * actually works, and the verification can discover both bugs in percpu
+ * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
+ * code.
+ *
* RETURNS:
* The physical address for @addr.
*/
@@ -984,19 +996,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
bool in_first_chunk = false;
- unsigned long first_start, first_end;
+ unsigned long first_low, first_high;
unsigned int cpu;
/*
- * The following test on first_start/end isn't strictly
+ * The following test on unit_low/high isn't strictly
* necessary but will speed up lookups of addresses which
* aren't in the first chunk.
*/
- first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
- first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
- pcpu_unit_pages);
- if ((unsigned long)addr >= first_start &&
- (unsigned long)addr < first_end) {
+ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
+ first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
+ pcpu_unit_pages);
+ if ((unsigned long)addr >= first_low &&
+ (unsigned long)addr < first_high) {
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(base, cpu);
@@ -1011,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
if (!is_vmalloc_addr(addr))
return __pa(addr);
else
- return page_to_phys(vmalloc_to_page(addr));
+ return page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
} else
- return page_to_phys(pcpu_addr_to_page(addr));
+ return page_to_phys(pcpu_addr_to_page(addr)) +
+ offset_in_page(addr);
}
/**
@@ -1233,7 +1247,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX;
- pcpu_first_unit_cpu = NR_CPUS;
+
+ pcpu_low_unit_cpu = NR_CPUS;
+ pcpu_high_unit_cpu = NR_CPUS;
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
const struct pcpu_group_info *gi = &ai->groups[group];
@@ -1253,9 +1269,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
unit_map[cpu] = unit + i;
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
- if (pcpu_first_unit_cpu == NR_CPUS)
- pcpu_first_unit_cpu = cpu;
- pcpu_last_unit_cpu = cpu;
+ /* determine low/high unit_cpu */
+ if (pcpu_low_unit_cpu == NR_CPUS ||
+ unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
+ pcpu_low_unit_cpu = cpu;
+ if (pcpu_high_unit_cpu == NR_CPUS ||
+ unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
+ pcpu_high_unit_cpu = cpu;
}
}
pcpu_nr_units = unit;
@@ -1889,7 +1909,7 @@ void __init percpu_init_late(void)
BUILD_BUG_ON(size > PAGE_SIZE);
- map = pcpu_mem_alloc(size);
+ map = pcpu_mem_zalloc(size);
BUG_ON(!map);
spin_lock_irqsave(&pcpu_lock, flags);
diff --git a/mm/rmap.c b/mm/rmap.c
index a4fd3680038..a2e5ce1fa08 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -272,6 +272,51 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
}
/*
+ * Some rmap walk that needs to find all ptes/hugepmds without false
+ * negatives (like migrate and split_huge_page) running concurrent
+ * with operations that copy or move pagetables (like mremap() and
+ * fork()) to be safe. They depend on the anon_vma "same_anon_vma"
+ * list to be in a certain order: the dst_vma must be placed after the
+ * src_vma in the list. This is always guaranteed by fork() but
+ * mremap() needs to call this function to enforce it in case the
+ * dst_vma isn't newly allocated and chained with the anon_vma_clone()
+ * function but just an extension of a pre-existing vma through
+ * vma_merge.
+ *
+ * NOTE: the same_anon_vma list can still be changed by other
+ * processes while mremap runs because mremap doesn't hold the
+ * anon_vma mutex to prevent modifications to the list while it
+ * runs. All we need to enforce is that the relative order of this
+ * process vmas isn't changing (we don't care about other vmas
+ * order). Each vma corresponds to an anon_vma_chain structure so
+ * there's no risk that other processes calling anon_vma_moveto_tail()
+ * and changing the same_anon_vma list under mremap() will screw with
+ * the relative order of this process vmas in the list, because we
+ * they can't alter the order of any vma that belongs to this
+ * process. And there can't be another anon_vma_moveto_tail() running
+ * concurrently with mremap() coming from this process because we hold
+ * the mmap_sem for the whole mremap(). fork() ordering dependency
+ * also shouldn't be affected because fork() only cares that the
+ * parent vmas are placed in the list before the child vmas and
+ * anon_vma_moveto_tail() won't reorder vmas from either the fork()
+ * parent or child.
+ */
+void anon_vma_moveto_tail(struct vm_area_struct *dst)
+{
+ struct anon_vma_chain *pavc;
+ struct anon_vma *root = NULL;
+
+ list_for_each_entry_reverse(pavc, &dst->anon_vma_chain, same_vma) {
+ struct anon_vma *anon_vma = pavc->anon_vma;
+ VM_BUG_ON(pavc->vma != dst);
+ root = lock_anon_vma_root(root, anon_vma);
+ list_del(&pavc->same_anon_vma);
+ list_add_tail(&pavc->same_anon_vma, &anon_vma->head);
+ }
+ unlock_anon_vma_root(root);
+}
+
+/*
* Attach vma to its own anon_vma, as well as to the anon_vmas that
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
diff --git a/mm/shmem.c b/mm/shmem.c
index d6722506d2d..feead1943d9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1092,7 +1092,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
}
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
- int mode, dev_t dev, unsigned long flags)
+ umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
@@ -1456,7 +1456,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
* File creation. Allocate an inode, and we're done..
*/
static int
-shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOSPC;
@@ -1489,7 +1489,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return error;
}
-static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error;
@@ -1499,7 +1499,7 @@ static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return 0;
}
-static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
+static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
@@ -2118,9 +2118,9 @@ out:
return error;
}
-static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
+static int shmem_show_options(struct seq_file *seq, struct dentry *root)
{
- struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
+ struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
if (sbinfo->max_blocks != shmem_default_max_blocks())
seq_printf(seq, ",size=%luk",
@@ -2128,7 +2128,7 @@ static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (sbinfo->max_inodes != shmem_default_max_inodes())
seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
- seq_printf(seq, ",mode=%03o", sbinfo->mode);
+ seq_printf(seq, ",mode=%03ho", sbinfo->mode);
if (sbinfo->uid != 0)
seq_printf(seq, ",uid=%u", sbinfo->uid);
if (sbinfo->gid != 0)
@@ -2234,13 +2234,12 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
static void shmem_destroy_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
static void shmem_destroy_inode(struct inode *inode)
{
- if ((inode->i_mode & S_IFMT) == S_IFREG)
+ if (S_ISREG(inode->i_mode))
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
call_rcu(&inode->i_rcu, shmem_destroy_callback);
}
diff --git a/mm/slab.c b/mm/slab.c
index 708efe88615..2acfa0d9094 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -121,6 +121,8 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
+#include <trace/events/kmem.h>
+
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
@@ -595,6 +597,7 @@ static enum {
PARTIAL_AC,
PARTIAL_L3,
EARLY,
+ LATE,
FULL
} g_cpucache_up;
@@ -671,7 +674,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up != FULL)
+ if (g_cpucache_up < LATE)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1669,8 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
+ g_cpucache_up = LATE;
+
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
diff --git a/mm/slub.c b/mm/slub.c
index 7d2a996c307..d99acbf14e0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -368,7 +368,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
VM_BUG_ON(!irqs_disabled());
#ifdef CONFIG_CMPXCHG_DOUBLE
if (s->flags & __CMPXCHG_DOUBLE) {
- if (cmpxchg_double(&page->freelist,
+ if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
@@ -402,7 +402,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
{
#ifdef CONFIG_CMPXCHG_DOUBLE
if (s->flags & __CMPXCHG_DOUBLE) {
- if (cmpxchg_double(&page->freelist,
+ if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
{
struct kmem_cache_node *n = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
- struct page *page;
+ struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
enum slab_modes { M_PARTIAL, M_FREE };
@@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
if (l == M_PARTIAL)
remove_partial(n, page);
else
- add_partial(n, page, 1);
+ add_partial(n, page,
+ DEACTIVATE_TO_TAIL);
l = m;
}
@@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
"unfreezing slab"));
if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
- stat(s, FREE_SLAB);
+ page->next = discard_page;
+ discard_page = page;
}
}
if (n)
spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+ discard_page = discard_page->next;
+
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
+ }
}
/*
@@ -2295,7 +2304,7 @@ redo:
* Since this is without lock semantics the protection is only against
* code executing on this cpu *not* from access by other cpus.
*/
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
get_freepointer_safe(s, object), next_tid(tid)))) {
@@ -2525,7 +2534,7 @@ redo:
if (likely(page == c->page)) {
set_freepointer(s, object, c->freelist);
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
c->freelist, tid,
object, next_tid(tid)))) {
@@ -3645,6 +3654,9 @@ void __init kmem_cache_init(void)
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
+ if (debug_guardpage_minorder())
+ slub_max_order = 0;
+
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
@@ -4435,30 +4447,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+ int node = ACCESS_ONCE(c->node);
struct page *page;
- if (!c || c->node < 0)
+ if (node < 0)
continue;
-
- if (c->page) {
- if (flags & SO_TOTAL)
- x = c->page->objects;
+ page = ACCESS_ONCE(c->page);
+ if (page) {
+ if (flags & SO_TOTAL)
+ x = page->objects;
else if (flags & SO_OBJECTS)
- x = c->page->inuse;
+ x = page->inuse;
else
x = 1;
total += x;
- nodes[c->node] += x;
+ nodes[node] += x;
}
page = c->partial;
if (page) {
x = page->pobjects;
- total += x;
- nodes[c->node] += x;
+ total += x;
+ nodes[node] += x;
}
- per_cpu[c->node]++;
+ per_cpu[node]++;
}
}
diff --git a/mm/swap.c b/mm/swap.c
index a91caf754d9..67a09a633a0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -585,11 +585,10 @@ int lru_add_drain_all(void)
void release_pages(struct page **pages, int nr, int cold)
{
int i;
- struct pagevec pages_to_free;
+ LIST_HEAD(pages_to_free);
struct zone *zone = NULL;
unsigned long uninitialized_var(flags);
- pagevec_init(&pages_to_free, cold);
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
@@ -620,19 +619,12 @@ void release_pages(struct page **pages, int nr, int cold)
del_page_from_lru(zone, page);
}
- if (!pagevec_add(&pages_to_free, page)) {
- if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
- }
- __pagevec_free(&pages_to_free);
- pagevec_reinit(&pages_to_free);
- }
+ list_add(&page->lru, &pages_to_free);
}
if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags);
- pagevec_free(&pages_to_free);
+ free_hot_cold_page_list(&pages_to_free, cold);
}
EXPORT_SYMBOL(release_pages);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 78cc4d1f6cc..ea6b32d6187 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -13,7 +13,6 @@
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
-#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b1cd1206072..9520592d423 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -667,10 +667,10 @@ int try_to_free_swap(struct page *page)
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
- * Hibernation clears bits from gfp_allowed_mask to prevent
- * memory reclaim from writing to disk, so check that here.
+ * Hibration suspends storage while it is writing the image
+ * to disk so check that here.
*/
- if (!(gfp_allowed_mask & __GFP_IO))
+ if (pm_suspended_storage())
return 0;
delete_from_swap_cache(page);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3231bf33287..877ca046f43 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -256,7 +256,7 @@ struct vmap_area {
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
struct list_head purge_list; /* "lazy purge" list */
- void *private;
+ struct vm_struct *vm;
struct rcu_head rcu_head;
};
@@ -1118,6 +1118,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
EXPORT_SYMBOL(vm_map_ram);
/**
+ * vm_area_add_early - add vmap area early during boot
+ * @vm: vm_struct to add
+ *
+ * This function is used to add fixed kernel vm area to vmlist before
+ * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
+ * should contain proper values and the other fields should be zero.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_add_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ BUG_ON(tmp->addr < vm->addr + vm->size);
+ break;
+ } else
+ BUG_ON(tmp->addr + tmp->size > vm->addr);
+ }
+ vm->next = *p;
+ *p = vm;
+}
+
+/**
* vm_area_register_early - register vmap area early during boot
* @vm: vm_struct to register
* @align: requested alignment
@@ -1139,8 +1165,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
vm->addr = (void *)addr;
- vm->next = vmlist;
- vmlist = vm;
+ vm_area_add_early(vm);
}
void __init vmalloc_init(void)
@@ -1260,7 +1285,7 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
vm->addr = (void *)va->va_start;
vm->size = va->va_end - va->va_start;
vm->caller = caller;
- va->private = vm;
+ va->vm = vm;
va->flags |= VM_VM_AREA;
}
@@ -1290,7 +1315,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
- static struct vmap_area *va;
+ struct vmap_area *va;
struct vm_struct *area;
BUG_ON(in_interrupt());
@@ -1383,7 +1408,7 @@ static struct vm_struct *find_vm_area(const void *addr)
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA)
- return va->private;
+ return va->vm;
return NULL;
}
@@ -1402,7 +1427,7 @@ struct vm_struct *remove_vm_area(const void *addr)
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
- struct vm_struct *vm = va->private;
+ struct vm_struct *vm = va->vm;
if (!(vm->flags & VM_UNLIST)) {
struct vm_struct *tmp, **p;
@@ -1633,6 +1658,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ if (!addr)
+ return NULL;
/*
* In this function, newly allocated vm_struct is not added
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c05079..26f4a8a4e0c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
@@ -715,7 +715,13 @@ static enum page_references page_check_references(struct page *page,
*/
SetPageReferenced(page);
- if (referenced_page)
+ if (referenced_page || referenced_ptes > 1)
+ return PAGEREF_ACTIVATE;
+
+ /*
+ * Activate file-backed executable pages after first usage.
+ */
+ if (vm_flags & VM_EXEC)
return PAGEREF_ACTIVATE;
return PAGEREF_KEEP;
@@ -728,24 +734,6 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM;
}
-static noinline_for_stack void free_page_list(struct list_head *free_pages)
-{
- struct pagevec freed_pvec;
- struct page *page, *tmp;
-
- pagevec_init(&freed_pvec, 1);
-
- list_for_each_entry_safe(page, tmp, free_pages, lru) {
- list_del(&page->lru);
- if (!pagevec_add(&freed_pvec, page)) {
- __pagevec_free(&freed_pvec);
- pagevec_reinit(&freed_pvec);
- }
- }
-
- pagevec_free(&freed_pvec);
-}
-
/*
* shrink_page_list() returns the number of reclaimed pages
*/
@@ -1009,7 +997,7 @@ keep_lumpy:
if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
zone_set_flag(zone, ZONE_CONGESTED);
- free_page_list(&free_pages);
+ free_hot_cold_page_list(&free_pages, 1);
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
@@ -1178,14 +1166,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* anon page which don't already have a swap slot is
* pointless.
*/
- if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+ if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
!PageSwapCache(cursor_page))
break;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
mem_cgroup_del_lru(cursor_page);
- nr_taken += hpage_nr_pages(page);
+ nr_taken += hpage_nr_pages(cursor_page);
nr_lumpy_taken++;
if (PageDirty(cursor_page))
nr_lumpy_dirty++;
@@ -2012,8 +2000,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
- inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+ inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+ if (nr_swap_pages > 0)
+ inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
@@ -3448,9 +3437,10 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
static void warn_scan_unevictable_pages(void)
{
printk_once(KERN_WARNING
- "The scan_unevictable_pages sysctl/node-interface has been "
+ "%s: The scan_unevictable_pages sysctl/node-interface has been "
"disabled for lack of a legitimate use case. If you have "
- "one, please send an email to linux-mm@kvack.org.\n");
+ "one, please send an email to linux-mm@kvack.org.\n",
+ current->comm);
}
/*
@@ -3475,16 +3465,16 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
* a specified node's per zone unevictable lists for evictable pages.
*/
-static ssize_t read_scan_unevictable_node(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t read_scan_unevictable_node(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
warn_scan_unevictable_pages();
return sprintf(buf, "0\n"); /* always zero; should fit... */
}
-static ssize_t write_scan_unevictable_node(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t write_scan_unevictable_node(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
warn_scan_unevictable_pages();
@@ -3492,17 +3482,17 @@ static ssize_t write_scan_unevictable_node(struct sys_device *dev,
}
-static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
read_scan_unevictable_node,
write_scan_unevictable_node);
int scan_unevictable_register_node(struct node *node)
{
- return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+ return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
void scan_unevictable_unregister_node(struct node *node)
{
- sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+ device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
#endif
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5471628d3ff..efea35b02e7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -51,27 +51,6 @@ const char vlan_version[] = DRV_VERSION;
/* End of global variables definitions. */
-static void vlan_group_free(struct vlan_group *grp)
-{
- int i;
-
- for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
- kfree(grp->vlan_devices_arrays[i]);
- kfree(grp);
-}
-
-static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
-{
- struct vlan_group *grp;
-
- grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
- if (!grp)
- return NULL;
-
- grp->real_dev = real_dev;
- return grp;
-}
-
static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
{
struct net_device **array;
@@ -92,32 +71,29 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
return 0;
}
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
- vlan_group_free(container_of(rcu, struct vlan_group, rcu));
-}
-
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
- const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct vlan_info *vlan_info;
struct vlan_group *grp;
u16 vlan_id = vlan->vlan_id;
ASSERT_RTNL();
- grp = rtnl_dereference(real_dev->vlgrp);
- BUG_ON(!grp);
+ vlan_info = rtnl_dereference(real_dev->vlan_info);
+ BUG_ON(!vlan_info);
+
+ grp = &vlan_info->grp;
/* Take it out of our own structures, but be sure to interlock with
* HW accelerating devices or SW vlan input packet processing if
* VLAN is not 0 (leave it there for 802.1p).
*/
- if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
- ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+ if (vlan_id)
+ vlan_vid_del(real_dev, vlan_id);
- grp->nr_vlans--;
+ grp->nr_vlan_devs--;
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_leave(dev);
@@ -129,16 +105,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
*/
unregister_netdevice_queue(dev, head);
- /* If the group is now empty, kill off the group. */
- if (grp->nr_vlans == 0) {
+ if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
- RCU_INIT_POINTER(real_dev->vlgrp, NULL);
-
- /* Free the group, after all cpu's are done. */
- call_rcu(&grp->rcu, vlan_rcu_free);
- }
-
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
}
@@ -167,21 +136,26 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
int register_vlan_dev(struct net_device *dev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
- const struct net_device_ops *ops = real_dev->netdev_ops;
u16 vlan_id = vlan->vlan_id;
- struct vlan_group *grp, *ngrp = NULL;
+ struct vlan_info *vlan_info;
+ struct vlan_group *grp;
int err;
- grp = rtnl_dereference(real_dev->vlgrp);
- if (!grp) {
- ngrp = grp = vlan_group_alloc(real_dev);
- if (!grp)
- return -ENOBUFS;
+ err = vlan_vid_add(real_dev, vlan_id);
+ if (err)
+ return err;
+
+ vlan_info = rtnl_dereference(real_dev->vlan_info);
+ /* vlan_info should be there now. vlan_vid_add took care of it */
+ BUG_ON(!vlan_info);
+
+ grp = &vlan_info->grp;
+ if (grp->nr_vlan_devs == 0) {
err = vlan_gvrp_init_applicant(real_dev);
if (err < 0)
- goto out_free_group;
+ goto out_vid_del;
}
err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -192,7 +166,7 @@ int register_vlan_dev(struct net_device *dev)
if (err < 0)
goto out_uninit_applicant;
- /* Account for reference in struct vlan_dev_info */
+ /* Account for reference in struct vlan_dev_priv */
dev_hold(real_dev);
netif_stacked_transfer_operstate(real_dev, dev);
@@ -202,24 +176,15 @@ int register_vlan_dev(struct net_device *dev)
* it into our local structure.
*/
vlan_group_set_device(grp, vlan_id, dev);
- grp->nr_vlans++;
-
- if (ngrp) {
- rcu_assign_pointer(real_dev->vlgrp, ngrp);
- }
- if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
- ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+ grp->nr_vlan_devs++;
return 0;
out_uninit_applicant:
- if (ngrp)
+ if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
-out_free_group:
- if (ngrp) {
- /* Free the group, after all cpu's are done. */
- call_rcu(&ngrp->rcu, vlan_rcu_free);
- }
+out_vid_del:
+ vlan_vid_del(real_dev, vlan_id);
return err;
}
@@ -267,7 +232,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
}
- new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+ new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
if (new_dev == NULL)
return -ENOBUFS;
@@ -278,10 +243,10 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
*/
new_dev->mtu = real_dev->mtu;
- vlan_dev_info(new_dev)->vlan_id = vlan_id;
- vlan_dev_info(new_dev)->real_dev = real_dev;
- vlan_dev_info(new_dev)->dent = NULL;
- vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+ vlan_dev_priv(new_dev)->vlan_id = vlan_id;
+ vlan_dev_priv(new_dev)->real_dev = real_dev;
+ vlan_dev_priv(new_dev)->dent = NULL;
+ vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
@@ -298,7 +263,7 @@ out_free_newdev:
static void vlan_sync_address(struct net_device *dev,
struct net_device *vlandev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
/* May be called without an actual change */
if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
@@ -360,25 +325,26 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
{
struct net_device *dev = ptr;
struct vlan_group *grp;
+ struct vlan_info *vlan_info;
int i, flgs;
struct net_device *vlandev;
- struct vlan_dev_info *vlan;
+ struct vlan_dev_priv *vlan;
LIST_HEAD(list);
if (is_vlan_dev(dev))
__vlan_device_event(dev, event);
if ((event == NETDEV_UP) &&
- (dev->features & NETIF_F_HW_VLAN_FILTER) &&
- dev->netdev_ops->ndo_vlan_rx_add_vid) {
+ (dev->features & NETIF_F_HW_VLAN_FILTER)) {
pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name);
- dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+ vlan_vid_add(dev, 0);
}
- grp = rtnl_dereference(dev->vlgrp);
- if (!grp)
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
goto out;
+ grp = &vlan_info->grp;
/* It is OK that we do not hold the group lock right now,
* as we run under the RTNL lock.
@@ -447,7 +413,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (!(flgs & IFF_UP))
continue;
- vlan = vlan_dev_info(vlandev);
+ vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs & ~IFF_UP);
netif_stacked_transfer_operstate(dev, vlandev);
@@ -465,7 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (flgs & IFF_UP)
continue;
- vlan = vlan_dev_info(vlandev);
+ vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs | IFF_UP);
netif_stacked_transfer_operstate(dev, vlandev);
@@ -482,9 +448,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (!vlandev)
continue;
- /* unregistration of last vlan destroys group, abort
+ /* removal of last vid destroys vlan_info, abort
* afterwards */
- if (grp->nr_vlans == 1)
+ if (vlan_info->nr_vids == 1)
i = VLAN_N_VID;
unregister_vlan_dev(vlandev, &list);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9fd45f3571f..a4886d94c40 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -3,6 +3,7 @@
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
+#include <linux/list.h>
/**
@@ -40,8 +41,10 @@ struct vlan_pcpu_stats {
u32 tx_dropped;
};
+struct netpoll;
+
/**
- * struct vlan_dev_info - VLAN private device data
+ * struct vlan_dev_priv - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
* @ingress_priority_map: ingress priority mappings
* @nr_egress_mappings: number of egress priority mappings
@@ -53,7 +56,7 @@ struct vlan_pcpu_stats {
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
*/
-struct vlan_dev_info {
+struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
u32 ingress_priority_map[8];
unsigned int nr_egress_mappings;
@@ -67,13 +70,39 @@ struct vlan_dev_info {
struct proc_dir_entry *dent;
struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
};
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
}
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space. Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
+#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+struct vlan_group {
+ unsigned int nr_vlan_devs;
+ struct hlist_node hlist; /* linked list */
+ struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+ struct net_device *real_dev; /* The ethernet(like) device
+ * the vlan is attached to.
+ */
+ struct vlan_group grp;
+ struct list_head vid_list;
+ unsigned int nr_vids;
+ struct rcu_head rcu;
+};
+
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
u16 vlan_id)
{
@@ -97,10 +126,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
u16 vlan_id)
{
- struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+ struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
- if (grp)
- return vlan_group_get_device(grp, vlan_id);
+ if (vlan_info)
+ return vlan_group_get_device(&vlan_info->grp, vlan_id);
return NULL;
}
@@ -121,7 +150,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
{
- struct vlan_dev_info *vip = vlan_dev_info(dev);
+ struct vlan_dev_priv *vip = vlan_dev_priv(dev);
return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f5ffc02729d..4d39d802be2 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -36,7 +36,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
skb->pkt_type = PACKET_HOST;
}
- if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
unsigned int offset = skb->data - skb_mac_header(skb);
/*
@@ -55,7 +55,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
skb->vlan_tci = 0;
- rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+ rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
@@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
u16 vlan_id)
{
- struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+ struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
- if (grp) {
- return vlan_group_get_device(grp, vlan_id);
+ if (vlan_info) {
+ return vlan_group_get_device(&vlan_info->grp, vlan_id);
} else {
/*
* Bonding slaves do not have grp assigned to themselves.
@@ -90,13 +90,13 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return vlan_dev_info(dev)->real_dev;
+ return vlan_dev_priv(dev)->real_dev;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- return vlan_dev_info(dev)->vlan_id;
+ return vlan_dev_priv(dev)->vlan_id;
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
@@ -110,39 +110,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
return skb;
}
-static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
-{
- __be16 proto;
- unsigned char *rawp;
-
- /*
- * Was a VLAN packet, grab the encapsulated protocol, which the layer
- * three protocols care about.
- */
-
- proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= 1536) {
- skb->protocol = proto;
- return;
- }
-
- rawp = skb->data;
- if (*(unsigned short *) rawp == 0xFFFF)
- /*
- * This is a magic hack to spot IPX packets. Older Novell
- * breaks the protocol design and runs IPX over 802.3 without
- * an 802.2 LLC layer. We look for FFFF which isn't a used
- * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
- * but does for the rest.
- */
- skb->protocol = htons(ETH_P_802_3);
- else
- /*
- * Real 802.2 LLC
- */
- skb->protocol = htons(ETH_P_802_2);
-}
-
struct sk_buff *vlan_untag(struct sk_buff *skb)
{
struct vlan_hdr *vhdr;
@@ -179,3 +146,226 @@ err_free:
kfree_skb(skb);
return NULL;
}
+
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+ int i;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+ kfree(grp->vlan_devices_arrays[i]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+ vlan_group_free(&vlan_info->grp);
+ kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+ vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+ struct vlan_info *vlan_info;
+
+ vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+ if (!vlan_info)
+ return NULL;
+
+ vlan_info->real_dev = dev;
+ INIT_LIST_HEAD(&vlan_info->vid_list);
+ return vlan_info;
+}
+
+struct vlan_vid_info {
+ struct list_head list;
+ unsigned short vid;
+ int refcount;
+};
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+ unsigned short vid)
+{
+ struct vlan_vid_info *vid_info;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (vid_info->vid == vid)
+ return vid_info;
+ }
+ return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
+{
+ struct vlan_vid_info *vid_info;
+
+ vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+ if (!vid_info)
+ return NULL;
+ vid_info->vid = vid;
+
+ return vid_info;
+}
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
+ struct vlan_vid_info **pvid_info)
+{
+ struct net_device *dev = vlan_info->real_dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct vlan_vid_info *vid_info;
+ int err;
+
+ vid_info = vlan_vid_info_alloc(vid);
+ if (!vid_info)
+ return -ENOMEM;
+
+ if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ ops->ndo_vlan_rx_add_vid) {
+ err = ops->ndo_vlan_rx_add_vid(dev, vid);
+ if (err) {
+ kfree(vid_info);
+ return err;
+ }
+ }
+ list_add(&vid_info->list, &vlan_info->vid_list);
+ vlan_info->nr_vids++;
+ *pvid_info = vid_info;
+ return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+ struct vlan_info *vlan_info;
+ struct vlan_vid_info *vid_info;
+ bool vlan_info_created = false;
+ int err;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info) {
+ vlan_info = vlan_info_alloc(dev);
+ if (!vlan_info)
+ return -ENOMEM;
+ vlan_info_created = true;
+ }
+ vid_info = vlan_vid_info_get(vlan_info, vid);
+ if (!vid_info) {
+ err = __vlan_vid_add(vlan_info, vid, &vid_info);
+ if (err)
+ goto out_free_vlan_info;
+ }
+ vid_info->refcount++;
+
+ if (vlan_info_created)
+ rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+ return 0;
+
+out_free_vlan_info:
+ if (vlan_info_created)
+ kfree(vlan_info);
+ return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+ struct vlan_vid_info *vid_info)
+{
+ struct net_device *dev = vlan_info->real_dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
+ unsigned short vid = vid_info->vid;
+ int err;
+
+ if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+ ops->ndo_vlan_rx_kill_vid) {
+ err = ops->ndo_vlan_rx_kill_vid(dev, vid);
+ if (err) {
+ pr_warn("failed to kill vid %d for device %s\n",
+ vid, dev->name);
+ }
+ }
+ list_del(&vid_info->list);
+ kfree(vid_info);
+ vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+ struct vlan_info *vlan_info;
+ struct vlan_vid_info *vid_info;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ return;
+
+ vid_info = vlan_vid_info_get(vlan_info, vid);
+ if (!vid_info)
+ return;
+ vid_info->refcount--;
+ if (vid_info->refcount == 0) {
+ __vlan_vid_del(vlan_info, vid_info);
+ if (vlan_info->nr_vids == 0) {
+ RCU_INIT_POINTER(dev->vlan_info, NULL);
+ call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+ }
+ }
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+ int err;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(by_dev->vlan_info);
+ if (!vlan_info)
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ err = vlan_vid_add(dev, vid_info->vid);
+ if (err)
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(vid_info,
+ &vlan_info->vid_list,
+ list) {
+ vlan_vid_del(dev, vid_info->vid);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(by_dev->vlan_info);
+ if (!vlan_info)
+ return;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+ vlan_vid_del(dev, vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index bc252862458..9988d4abb37 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -33,6 +33,7 @@
#include "vlan.h"
#include "vlanproc.h"
#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
/*
* Rebuild the Ethernet MAC header. This is called after an ARP
@@ -72,7 +73,7 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
struct vlan_priority_tci_mapping *mp;
- mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+ mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
return mp->vlan_qos; /* This should already be shifted
@@ -103,10 +104,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 vlan_tci = 0;
int rc;
- if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
- vlan_tci = vlan_dev_info(dev)->vlan_id;
+ vlan_tci = vlan_dev_priv(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
vhdr->h_vlan_TCI = htons(vlan_tci);
@@ -129,7 +130,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
- dev = vlan_dev_info(dev)->real_dev;
+ dev = vlan_dev_priv(dev)->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
@@ -149,27 +150,29 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
- vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+ vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
u16 vlan_tci;
- vlan_tci = vlan_dev_info(dev)->vlan_id;
+ vlan_tci = vlan_dev_priv(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
- skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+ skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
len = skb->len;
+ if (netpoll_tx_running(dev))
+ return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *stats;
- stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+ stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
} else {
- this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+ this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
}
return ret;
@@ -180,7 +183,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
/* TODO: gotta make sure the underlying layer can handle it,
* maybe an IFF_VLAN_CAPABLE flag for devices?
*/
- if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+ if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
@@ -191,7 +194,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
vlan->nr_ingress_mappings--;
@@ -204,7 +207,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *mp = NULL;
struct vlan_priority_tci_mapping *np;
u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
@@ -241,7 +244,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
u32 old_flags = vlan->flags;
if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
@@ -261,12 +264,12 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
{
- strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+ strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
}
static int vlan_dev_open(struct net_device *dev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
int err;
@@ -313,7 +316,7 @@ out:
static int vlan_dev_stop(struct net_device *dev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
dev_mc_unsync(real_dev, dev);
@@ -332,7 +335,7 @@ static int vlan_dev_stop(struct net_device *dev)
static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
struct sockaddr *addr = p;
int err;
@@ -358,7 +361,7 @@ out:
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
struct ifreq ifrr;
int err = -EOPNOTSUPP;
@@ -383,7 +386,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int err = 0;
@@ -397,7 +400,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
@@ -409,7 +412,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int len = 0;
@@ -421,7 +424,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
static int vlan_dev_fcoe_enable(struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
@@ -432,7 +435,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev)
static int vlan_dev_fcoe_disable(struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
@@ -443,7 +446,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
@@ -455,7 +458,7 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
@@ -468,7 +471,7 @@ static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
@@ -480,8 +483,8 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
- dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
- dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+ dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
/*
@@ -519,7 +522,7 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
int subclass = 0;
netif_carrier_off(dev);
@@ -568,8 +571,8 @@ static int vlan_dev_init(struct net_device *dev)
vlan_dev_set_lockdep_class(dev, subclass);
- vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
- if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+ vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
return -ENOMEM;
return 0;
@@ -578,7 +581,7 @@ static int vlan_dev_init(struct net_device *dev)
static void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
free_percpu(vlan->vlan_pcpu_stats);
@@ -591,18 +594,17 @@ static void vlan_dev_uninit(struct net_device *dev)
}
}
-static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
- struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
u32 old_features = features;
- features &= real_dev->features;
features &= real_dev->vlan_features;
+ features |= NETIF_F_RXCSUM;
+ features &= real_dev->features;
features |= old_features & NETIF_F_SOFT_FEATURES;
-
- if (dev_ethtool_get_rx_csum(real_dev))
- features |= NETIF_F_RXCSUM;
features |= NETIF_F_LLTX;
return features;
@@ -611,7 +613,7 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
- const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return __ethtool_get_settings(vlan->real_dev, cmd);
}
@@ -627,7 +629,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+ if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
@@ -636,7 +638,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
- p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+ p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rxpackets = p->rx_packets;
@@ -661,6 +663,57 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
return stats;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+ return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+ struct vlan_dev_priv *info = vlan_dev_priv(dev);
+ struct net_device *real_dev = info->real_dev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!netpoll)
+ goto out;
+
+ netpoll->dev = real_dev;
+ strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
+
+ err = __netpoll_setup(netpoll);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ info->netpoll = netpoll;
+
+out:
+ return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+ struct vlan_dev_priv *info = vlan_dev_priv(dev);
+ struct netpoll *netpoll = info->netpoll;
+
+ if (!netpoll)
+ return;
+
+ info->netpoll = NULL;
+
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
+
+ __netpoll_cleanup(netpoll);
+ kfree(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -689,6 +742,11 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
.ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vlan_dev_poll_controller,
+ .ndo_netpoll_setup = vlan_dev_netpoll_setup,
+ .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
+#endif
.ndo_fix_features = vlan_dev_fix_features,
};
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
index 061ceceeef1..6f975535276 100644
--- a/net/8021q/vlan_gvrp.c
+++ b/net/8021q/vlan_gvrp.c
@@ -29,7 +29,7 @@ static struct garp_application vlan_gvrp_app __read_mostly = {
int vlan_gvrp_request_join(const struct net_device *dev)
{
- const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
@@ -38,7 +38,7 @@ int vlan_gvrp_request_join(const struct net_device *dev)
void vlan_gvrp_request_leave(const struct net_device *dev)
{
- const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 235c2197dbb..50711368ad6 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -105,7 +105,7 @@ static int vlan_changelink(struct net_device *dev,
static int vlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
int err;
@@ -149,7 +149,7 @@ static inline size_t vlan_qos_map_size(unsigned int n)
static size_t vlan_get_size(const struct net_device *dev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return nla_total_size(2) + /* IFLA_VLAN_ID */
sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
@@ -159,14 +159,14 @@ static size_t vlan_get_size(const struct net_device *dev)
static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct vlan_dev_info *vlan = vlan_dev_info(dev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *pm;
struct ifla_vlan_flags f;
struct ifla_vlan_qos_mapping m;
struct nlattr *nest;
unsigned int i;
- NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+ NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
if (vlan->flags) {
f.flags = vlan->flags;
f.mask = ~0;
@@ -218,7 +218,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
- .priv_size = sizeof(struct vlan_dev_info),
+ .priv_size = sizeof(struct vlan_dev_priv),
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d34b6daf893..c718fd3664b 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -168,13 +168,13 @@ err:
int vlan_proc_add_dev(struct net_device *vlandev)
{
- struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
- dev_info->dent =
+ vlan->dent =
proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
vn->proc_vlan_dir, &vlandev_fops, vlandev);
- if (!dev_info->dent)
+ if (!vlan->dent)
return -ENOBUFS;
return 0;
}
@@ -187,10 +187,10 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
/** NOTE: This will consume the memory pointed to by dent, it seems. */
- if (vlan_dev_info(vlandev)->dent) {
- remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+ if (vlan_dev_priv(vlandev)->dent) {
+ remove_proc_entry(vlan_dev_priv(vlandev)->dent->name,
vn->proc_vlan_dir);
- vlan_dev_info(vlandev)->dent = NULL;
+ vlan_dev_priv(vlandev)->dent = NULL;
}
return 0;
}
@@ -268,10 +268,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
nmtype ? nmtype : "UNKNOWN");
} else {
const struct net_device *vlandev = v;
- const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
- dev_info->vlan_id, dev_info->real_dev->name);
+ vlan->vlan_id, vlan->real_dev->name);
}
return 0;
}
@@ -279,7 +279,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
struct net_device *vlandev = (struct net_device *) seq->private;
- const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
static const char fmt64[] = "%30s %12llu\n";
@@ -291,8 +291,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
"%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
- vlandev->name, dev_info->vlan_id,
- (int)(dev_info->flags & 1), vlandev->priv_flags);
+ vlandev->name, vlan->vlan_id,
+ (int)(vlan->flags & 1), vlandev->priv_flags);
seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
@@ -300,23 +300,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
seq_puts(seq, "\n");
seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
- seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+ seq_printf(seq, "Device: %s", vlan->real_dev->name);
/* now show all PRIORITY mappings relating to this VLAN */
seq_printf(seq, "\nINGRESS priority mappings: "
"0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
- dev_info->ingress_priority_map[0],
- dev_info->ingress_priority_map[1],
- dev_info->ingress_priority_map[2],
- dev_info->ingress_priority_map[3],
- dev_info->ingress_priority_map[4],
- dev_info->ingress_priority_map[5],
- dev_info->ingress_priority_map[6],
- dev_info->ingress_priority_map[7]);
+ vlan->ingress_priority_map[0],
+ vlan->ingress_priority_map[1],
+ vlan->ingress_priority_map[2],
+ vlan->ingress_priority_map[3],
+ vlan->ingress_priority_map[4],
+ vlan->ingress_priority_map[5],
+ vlan->ingress_priority_map[6],
+ vlan->ingress_priority_map[7]);
seq_printf(seq, " EGRESS priority mappings: ");
for (i = 0; i < 16; i++) {
const struct vlan_priority_tci_mapping *mp
- = dev_info->egress_priority_map[i];
+ = vlan->egress_priority_map[i];
while (mp) {
seq_printf(seq, "%u:%hu ",
mp->priority, ((mp->vlan_qos >> 13) & 0x7));
diff --git a/net/9p/client.c b/net/9p/client.c
index 854ca7a911c..776618cd2be 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -23,6 +23,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -81,15 +83,15 @@ static int get_protocol_version(char *s)
if (!strcmp(s, "9p2000")) {
version = p9_proto_legacy;
- P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
+ p9_debug(P9_DEBUG_9P, "Protocol version: Legacy\n");
} else if (!strcmp(s, "9p2000.u")) {
version = p9_proto_2000u;
- P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
+ p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
} else if (!strcmp(s, "9p2000.L")) {
version = p9_proto_2000L;
- P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
+ p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
} else
- printk(KERN_INFO "9p: Unknown protocol version %s.\n", s);
+ pr_info("Unknown protocol version %s\n", s);
return version;
}
@@ -119,8 +121,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
tmp_options = kstrdup(opts, GFP_KERNEL);
if (!tmp_options) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option string\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
@@ -134,8 +136,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
case Opt_msize:
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
ret = r;
continue;
}
@@ -145,15 +147,14 @@ static int parse_opts(char *opts, struct p9_client *clnt)
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem allocating copy of trans arg\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "problem allocating copy of trans arg\n");
goto free_and_return;
}
clnt->trans_mod = v9fs_get_trans_by_name(s);
if (clnt->trans_mod == NULL) {
- printk(KERN_INFO
- "9p: Could not find "
- "request transport: %s\n", s);
+ pr_info("Could not find request transport: %s\n",
+ s);
ret = -EINVAL;
kfree(s);
goto free_and_return;
@@ -167,8 +168,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem allocating copy of version arg\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "problem allocating copy of version arg\n");
goto free_and_return;
}
ret = get_protocol_version(s);
@@ -225,7 +226,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
sizeof(struct p9_req_t), GFP_ATOMIC);
if (!c->reqs[row]) {
- printk(KERN_ERR "Couldn't grow tag array\n");
+ pr_err("Couldn't grow tag array\n");
spin_unlock_irqrestore(&c->lock, flags);
return ERR_PTR(-ENOMEM);
}
@@ -244,7 +245,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
if (!req->tc) {
req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS);
if (!req->wq) {
- printk(KERN_ERR "Couldn't grow tag array\n");
+ pr_err("Couldn't grow tag array\n");
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
@@ -253,7 +254,7 @@ p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
GFP_NOFS);
if ((!req->tc) || (!req->rc)) {
- printk(KERN_ERR "Couldn't grow tag array\n");
+ pr_err("Couldn't grow tag array\n");
kfree(req->tc);
kfree(req->rc);
kfree(req->wq);
@@ -343,9 +344,9 @@ static void p9_tag_cleanup(struct p9_client *c)
for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
for (col = 0; col < P9_ROW_MAXTAG; col++) {
if (c->reqs[row][col].status != REQ_STATUS_IDLE) {
- P9_DPRINTK(P9_DEBUG_MUX,
- "Attempting to cleanup non-free tag %d,%d\n",
- row, col);
+ p9_debug(P9_DEBUG_MUX,
+ "Attempting to cleanup non-free tag %d,%d\n",
+ row, col);
/* TODO: delay execution of cleanup */
return;
}
@@ -379,7 +380,7 @@ static void p9_tag_cleanup(struct p9_client *c)
static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
{
int tag = r->tc->tag;
- P9_DPRINTK(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
+ p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag);
r->status = REQ_STATUS_IDLE;
if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool))
@@ -394,9 +395,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
*/
void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
{
- P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
+ p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
wake_up(req->wq);
- P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
+ p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
}
EXPORT_SYMBOL(p9_client_cb);
@@ -431,8 +432,8 @@ p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag,
pdu->id = r_type;
pdu->tag = r_tag;
- P9_DPRINTK(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n", pdu->size,
- pdu->id, pdu->tag);
+ p9_debug(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n",
+ pdu->size, pdu->id, pdu->tag);
if (type)
*type = r_type;
@@ -473,7 +474,7 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
*/
trace_9p_protocol_dump(c, req->rc);
if (err) {
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+ p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
if (type != P9_RERROR && type != P9_RLERROR)
@@ -492,21 +493,21 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
- -ecode, ename);
+ p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
}
kfree(ename);
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
return err;
out_err:
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+ p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
return err;
}
@@ -538,7 +539,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
*/
trace_9p_protocol_dump(c, req->rc);
if (err) {
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+ p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
@@ -601,22 +602,22 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
- -ecode, ename);
+ p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
}
kfree(ename);
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
return err;
out_free:
kfree(ename);
out_err:
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+ p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
return err;
}
@@ -645,7 +646,7 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
if (err)
return err;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
+ p9_debug(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag);
if (IS_ERR(req))
@@ -670,7 +671,7 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
int tag, err;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
+ p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type);
/* we allow for any status other than disconnected */
if (c->status == Disconnected)
@@ -744,11 +745,11 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
req->status >= REQ_STATUS_RCVD);
if (req->status == REQ_STATUS_ERROR) {
- P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
if ((err == -ERESTARTSYS) && (c->status == Connected)) {
- P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+ p9_debug(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -827,11 +828,11 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
goto reterr;
}
if (req->status == REQ_STATUS_ERROR) {
- P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
if ((err == -ERESTARTSYS) && (c->status == Connected)) {
- P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+ p9_debug(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
@@ -865,7 +866,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
struct p9_fid *fid;
unsigned long flags;
- P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt);
+ p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt);
fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
@@ -898,7 +899,7 @@ static void p9_fid_destroy(struct p9_fid *fid)
struct p9_client *clnt;
unsigned long flags;
- P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid);
clnt = fid->clnt;
p9_idpool_put(fid->fid, clnt->fidpool);
spin_lock_irqsave(&clnt->lock, flags);
@@ -915,8 +916,8 @@ static int p9_client_version(struct p9_client *c)
char *version;
int msize;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
- c->msize, c->proto_version);
+ p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
+ c->msize, c->proto_version);
switch (c->proto_version) {
case p9_proto_2000L:
@@ -941,12 +942,12 @@ static int p9_client_version(struct p9_client *c)
err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
if (err) {
- P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
+ p9_debug(P9_DEBUG_9P, "version error %d\n", err);
trace_9p_protocol_dump(c, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
+ p9_debug(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
if (!strncmp(version, "9P2000.L", 8))
c->proto_version = p9_proto_2000L;
else if (!strncmp(version, "9P2000.u", 8))
@@ -996,8 +997,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (clnt->trans_mod == NULL) {
err = -EPROTONOSUPPORT;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "No transport defined or default transport\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "No transport defined or default transport\n");
goto destroy_tagpool;
}
@@ -1007,8 +1008,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
goto put_trans;
}
- P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
- clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
+ p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
+ clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
err = clnt->trans_mod->create(clnt, dev_name, options);
if (err)
@@ -1041,7 +1042,7 @@ void p9_client_destroy(struct p9_client *clnt)
{
struct p9_fid *fid, *fidptr;
- P9_DPRINTK(P9_DEBUG_MUX, "clnt %p\n", clnt);
+ p9_debug(P9_DEBUG_MUX, "clnt %p\n", clnt);
if (clnt->trans_mod)
clnt->trans_mod->close(clnt);
@@ -1049,7 +1050,7 @@ void p9_client_destroy(struct p9_client *clnt)
v9fs_put_trans(clnt->trans_mod);
list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) {
- printk(KERN_INFO "Found fid %d not clunked\n", fid->fid);
+ pr_info("Found fid %d not clunked\n", fid->fid);
p9_fid_destroy(fid);
}
@@ -1064,14 +1065,14 @@ EXPORT_SYMBOL(p9_client_destroy);
void p9_client_disconnect(struct p9_client *clnt)
{
- P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+ p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
clnt->status = Disconnected;
}
EXPORT_SYMBOL(p9_client_disconnect);
void p9_client_begin_disconnect(struct p9_client *clnt)
{
- P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
+ p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
clnt->status = BeginDisconnect;
}
EXPORT_SYMBOL(p9_client_begin_disconnect);
@@ -1085,8 +1086,8 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
struct p9_qid qid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
- afid ? afid->fid : -1, uname, aname);
+ p9_debug(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+ afid ? afid->fid : -1, uname, aname);
fid = p9_fid_create(clnt);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
@@ -1108,10 +1109,8 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
- qid.type,
- (unsigned long long)qid.path,
- qid.version);
+ p9_debug(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
+ qid.type, (unsigned long long)qid.path, qid.version);
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
@@ -1151,8 +1150,8 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
fid = oldfid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
- oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
+ p9_debug(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
+ oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
nwname, wnames);
@@ -1169,7 +1168,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
}
p9_free_req(clnt, req);
- P9_DPRINTK(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
+ p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
if (nwqids != nwname) {
err = -ENOENT;
@@ -1177,7 +1176,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
}
for (count = 0; count < nwqids; count++)
- P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n",
+ p9_debug(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n",
count, wqids[count].type,
(unsigned long long)wqids[count].path,
wqids[count].version);
@@ -1212,7 +1211,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
int iounit;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode);
err = 0;
@@ -1234,7 +1233,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
+ p9_debug(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type,
(unsigned long long)qid.path, qid.version, iounit);
@@ -1256,7 +1255,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
struct p9_req_t *req;
int iounit;
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P,
">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n",
ofid->fid, name, flags, mode, gid);
clnt = ofid->clnt;
@@ -1277,7 +1276,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
+ p9_debug(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
qid->type,
(unsigned long long)qid->path,
qid->version, iounit);
@@ -1301,7 +1300,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
struct p9_qid qid;
int iounit;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
fid->fid, name, perm, mode);
err = 0;
clnt = fid->clnt;
@@ -1322,7 +1321,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
+ p9_debug(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
qid.type,
(unsigned long long)qid.path,
qid.version, iounit);
@@ -1344,7 +1343,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
struct p9_client *clnt;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n",
+ p9_debug(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n",
dfid->fid, name, symtgt);
clnt = dfid->clnt;
@@ -1361,7 +1360,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
+ p9_debug(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
qid->type, (unsigned long long)qid->path, qid->version);
free_and_error:
@@ -1376,7 +1375,7 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
struct p9_client *clnt;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
+ p9_debug(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
dfid->fid, oldfid->fid, newname);
clnt = dfid->clnt;
req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid,
@@ -1384,7 +1383,7 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
if (IS_ERR(req))
return PTR_ERR(req);
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLINK\n");
+ p9_debug(P9_DEBUG_9P, "<<< RLINK\n");
p9_free_req(clnt, req);
return 0;
}
@@ -1396,7 +1395,7 @@ int p9_client_fsync(struct p9_fid *fid, int datasync)
struct p9_client *clnt;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
fid->fid, datasync);
err = 0;
clnt = fid->clnt;
@@ -1407,7 +1406,7 @@ int p9_client_fsync(struct p9_fid *fid, int datasync)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
p9_free_req(clnt, req);
@@ -1423,12 +1422,13 @@ int p9_client_clunk(struct p9_fid *fid)
struct p9_req_t *req;
if (!fid) {
- P9_EPRINTK(KERN_WARNING, "Trying to clunk with NULL fid\n");
+ pr_warn("%s (%d): Trying to clunk with NULL fid\n",
+ __func__, task_pid_nr(current));
dump_stack();
return 0;
}
- P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
err = 0;
clnt = fid->clnt;
@@ -1438,7 +1438,7 @@ int p9_client_clunk(struct p9_fid *fid)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
@@ -1456,7 +1456,7 @@ int p9_client_remove(struct p9_fid *fid)
struct p9_client *clnt;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
err = 0;
clnt = fid->clnt;
@@ -1466,7 +1466,7 @@ int p9_client_remove(struct p9_fid *fid)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
@@ -1481,7 +1481,7 @@ int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
struct p9_req_t *req;
struct p9_client *clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
dfid->fid, name, flags);
clnt = dfid->clnt;
@@ -1490,7 +1490,7 @@ int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
err = PTR_ERR(req);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
+ p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
p9_free_req(clnt, req);
error:
@@ -1509,7 +1509,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
int err, rsize, non_zc = 0;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (long long unsigned) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1552,7 +1552,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
if (non_zc) {
if (data) {
@@ -1584,7 +1584,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
struct p9_client *clnt;
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
fid->fid, (long long unsigned) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1626,7 +1626,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
p9_free_req(clnt, req);
return count;
@@ -1646,7 +1646,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
struct p9_req_t *req;
u16 ignored;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -1667,7 +1667,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P,
"<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
"<<< mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
"<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
@@ -1696,7 +1696,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
GFP_KERNEL);
struct p9_req_t *req;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
+ p9_debug(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
fid->fid, request_mask);
if (!ret)
@@ -1718,7 +1718,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P,
"<<< RGETATTR st_result_mask=%lld\n"
"<<< qid=%x.%llx.%x\n"
"<<< st_mode=%8.8x st_nlink=%llu\n"
@@ -1784,8 +1784,8 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
err = 0;
clnt = fid->clnt;
wst->size = p9_client_statsize(wst, clnt->proto_version);
- P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P,
" sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
" mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
" name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
@@ -1802,7 +1802,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
@@ -1818,8 +1818,8 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P,
" valid=%x mode=%x uid=%d gid=%d size=%lld\n"
" atime_sec=%lld atime_nsec=%lld\n"
" mtime_sec=%lld mtime_nsec=%lld\n",
@@ -1833,7 +1833,7 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
err = PTR_ERR(req);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
return err;
@@ -1849,7 +1849,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
if (IS_ERR(req)) {
@@ -1866,7 +1866,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
+ p9_debug(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
"blocks %llu bfree %llu bavail %llu files %llu ffree %llu "
"fsid %llu namelen %ld\n",
fid->fid, (long unsigned int)sb->type, (long int)sb->bsize,
@@ -1889,7 +1889,7 @@ int p9_client_rename(struct p9_fid *fid,
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
+ p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
fid->fid, newdirfid->fid, name);
req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
@@ -1899,7 +1899,7 @@ int p9_client_rename(struct p9_fid *fid,
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
@@ -1917,7 +1917,7 @@ int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
err = 0;
clnt = olddirfid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
+ p9_debug(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
" newdirfid %d new name %s\n", olddirfid->fid, old_name,
newdirfid->fid, new_name);
@@ -1928,7 +1928,7 @@ int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
+ p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
newdirfid->fid, new_name);
p9_free_req(clnt, req);
@@ -1956,7 +1956,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
attr_fid = NULL;
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P,
">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
file_fid->fid, attr_fid->fid, attr_name);
@@ -1973,7 +1973,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
goto clunk_fid;
}
p9_free_req(clnt, req);
- P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n",
+ p9_debug(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n",
attr_fid->fid, *attr_size);
return attr_fid;
clunk_fid:
@@ -1994,7 +1994,7 @@ int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
struct p9_req_t *req;
struct p9_client *clnt;
- P9_DPRINTK(P9_DEBUG_9P,
+ p9_debug(P9_DEBUG_9P,
">>> TXATTRCREATE fid %d name %s size %lld flag %d\n",
fid->fid, name, (long long)attr_size, flags);
err = 0;
@@ -2005,7 +2005,7 @@ int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
err = PTR_ERR(req);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
return err;
@@ -2019,7 +2019,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
struct p9_req_t *req;
char *dataptr;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, (long long unsigned) offset, count);
err = 0;
@@ -2056,7 +2056,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
goto free_and_error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
if (non_zc)
memmove(data, dataptr, count);
@@ -2080,7 +2080,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
+ p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
"minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev));
req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode,
MAJOR(rdev), MINOR(rdev), gid);
@@ -2092,7 +2092,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
+ p9_debug(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
(unsigned long long)qid->path, qid->version);
error:
@@ -2111,7 +2111,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
fid->fid, name, mode, gid);
req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode,
gid);
@@ -2123,7 +2123,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
+ p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
(unsigned long long)qid->path, qid->version);
error:
@@ -2141,7 +2141,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
+ p9_debug(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
"start %lld length %lld proc_id %d client_id %s\n",
fid->fid, flock->type, flock->flags, flock->start,
flock->length, flock->proc_id, flock->client_id);
@@ -2158,7 +2158,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
+ p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
error:
p9_free_req(clnt, req);
return err;
@@ -2174,7 +2174,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
+ p9_debug(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
"length %lld proc_id %d client_id %s\n", fid->fid, glock->type,
glock->start, glock->length, glock->proc_id, glock->client_id);
@@ -2191,7 +2191,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
+ p9_debug(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
"proc_id %d client_id %s\n", glock->type, glock->start,
glock->length, glock->proc_id, glock->client_id);
error:
@@ -2208,7 +2208,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
err = 0;
clnt = fid->clnt;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
+ p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
if (IS_ERR(req))
@@ -2219,7 +2219,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
- P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
+ p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
error:
p9_free_req(clnt, req);
return err;
diff --git a/net/9p/error.c b/net/9p/error.c
index 52518512a93..2ab2de76010 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -27,6 +27,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
@@ -237,8 +239,8 @@ int p9_errstr2errno(char *errstr, int len)
if (errno == 0) {
/* TODO: if error isn't found, add it dynamically */
errstr[len] = 0;
- printk(KERN_ERR "%s: server reported unknown error %s\n",
- __func__, errstr);
+ pr_err("%s: server reported unknown error %s\n",
+ __func__, errstr);
errno = ESERVERFAULT;
}
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 2664d129229..6ab36aea772 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -24,7 +24,11 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <net/9p/9p.h>
#include <linux/fs.h>
@@ -39,6 +43,29 @@ unsigned int p9_debug_level = 0; /* feature-rific global debug level */
EXPORT_SYMBOL(p9_debug_level);
module_param_named(debug, p9_debug_level, uint, 0);
MODULE_PARM_DESC(debug, "9P debugging level");
+
+void _p9_debug(enum p9_debug_flags level, const char *func,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if ((p9_debug_level & level) != level)
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (level == P9_DEBUG_9P)
+ pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf);
+ else
+ pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(_p9_debug);
#endif
/*
@@ -147,7 +174,7 @@ static int __init init_p9(void)
int ret = 0;
p9_error_init();
- printk(KERN_INFO "Installing 9P2000 support\n");
+ pr_info("Installing 9P2000 support\n");
p9_trans_fd_init();
return ret;
@@ -160,7 +187,7 @@ static int __init init_p9(void)
static void __exit exit_p9(void)
{
- printk(KERN_INFO "Unloading 9P2000 support\n");
+ pr_info("Unloading 9P2000 support\n");
p9_trans_fd_exit();
}
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 55e10a96c90..9ee48cb3017 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -534,7 +534,7 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
- P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
+ p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
}
@@ -558,8 +558,8 @@ int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
pdu->size = size;
trace_9p_protocol_dump(clnt, pdu);
- P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
- pdu->id, pdu->tag);
+ p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n",
+ pdu->size, pdu->id, pdu->tag);
return err;
}
@@ -585,7 +585,7 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
&dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
- P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
+ p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
goto out;
}
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fdfdb5747f6..fccae26fa67 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
@@ -191,7 +193,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
unsigned long flags;
LIST_HEAD(cancel_list);
- P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
+ p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
spin_lock_irqsave(&m->client->lock, flags);
@@ -217,7 +219,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
spin_unlock_irqrestore(&m->client->lock, flags);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
- P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
+ p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
list_del(&req->req_list);
p9_client_cb(m->client, req);
}
@@ -275,7 +277,7 @@ static int p9_fd_read(struct p9_client *client, void *v, int len)
return -EREMOTEIO;
if (!(ts->rd->f_flags & O_NONBLOCK))
- P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
+ p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
@@ -299,7 +301,7 @@ static void p9_read_work(struct work_struct *work)
if (m->err < 0)
return;
- P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
+ p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
if (!m->rbuf) {
m->rbuf = m->tmp_buf;
@@ -308,11 +310,11 @@ static void p9_read_work(struct work_struct *work)
}
clear_bit(Rpending, &m->wsched);
- P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
- m->rpos, m->rsize, m->rsize-m->rpos);
+ p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
+ m, m->rpos, m->rsize, m->rsize-m->rpos);
err = p9_fd_read(m->client, m->rbuf + m->rpos,
m->rsize - m->rpos);
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
+ p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
if (err == -EAGAIN) {
clear_bit(Rworksched, &m->wsched);
return;
@@ -325,25 +327,25 @@ static void p9_read_work(struct work_struct *work)
if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
u16 tag;
- P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
+ p9_debug(P9_DEBUG_TRANS, "got new header\n");
n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
if (n >= m->client->msize) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "requested packet size too big: %d\n", n);
+ p9_debug(P9_DEBUG_ERROR,
+ "requested packet size too big: %d\n", n);
err = -EIO;
goto error;
}
tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
- P9_DPRINTK(P9_DEBUG_TRANS,
- "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
+ p9_debug(P9_DEBUG_TRANS,
+ "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
m->req = p9_tag_lookup(m->client, tag);
if (!m->req || (m->req->status != REQ_STATUS_SENT &&
m->req->status != REQ_STATUS_FLSH)) {
- P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
- tag);
+ p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
+ tag);
err = -EIO;
goto error;
}
@@ -364,7 +366,7 @@ static void p9_read_work(struct work_struct *work)
/* not an else because some packets (like clunk) have no payload */
if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
- P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
+ p9_debug(P9_DEBUG_TRANS, "got new packet\n");
spin_lock(&m->client->lock);
if (m->req->status != REQ_STATUS_ERROR)
m->req->status = REQ_STATUS_RCVD;
@@ -384,7 +386,7 @@ static void p9_read_work(struct work_struct *work)
n = p9_fd_poll(m->client, NULL);
if (n & POLLIN) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
+ p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
} else
clear_bit(Rworksched, &m->wsched);
@@ -418,7 +420,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
return -EREMOTEIO;
if (!(ts->wr->f_flags & O_NONBLOCK))
- P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
+ p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
oldfs = get_fs();
set_fs(get_ds());
@@ -460,7 +462,7 @@ static void p9_write_work(struct work_struct *work)
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req_list);
req->status = REQ_STATUS_SENT;
- P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
+ p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
list_move_tail(&req->req_list, &m->req_list);
m->wbuf = req->tc->sdata;
@@ -469,11 +471,11 @@ static void p9_write_work(struct work_struct *work)
spin_unlock(&m->client->lock);
}
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
- m->wsize);
+ p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
+ m, m->wpos, m->wsize);
clear_bit(Wpending, &m->wsched);
err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
+ p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
if (err == -EAGAIN) {
clear_bit(Wworksched, &m->wsched);
return;
@@ -497,7 +499,7 @@ static void p9_write_work(struct work_struct *work)
n = p9_fd_poll(m->client, NULL);
if (n & POLLOUT) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
+ p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
} else
clear_bit(Wworksched, &m->wsched);
@@ -551,7 +553,7 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
}
if (!pwait) {
- P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
+ p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
return;
}
@@ -573,8 +575,7 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
int n;
struct p9_conn *m;
- P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
- client->msize);
+ p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -591,12 +592,12 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
n = p9_fd_poll(client, &m->pt);
if (n & POLLIN) {
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+ p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
}
if (n & POLLOUT) {
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+ p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
set_bit(Wpending, &m->wsched);
}
@@ -618,7 +619,7 @@ static void p9_poll_mux(struct p9_conn *m)
n = p9_fd_poll(m->client, NULL);
if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
- P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
+ p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
if (n >= 0)
n = -ECONNRESET;
p9_conn_cancel(m, n);
@@ -626,19 +627,19 @@ static void p9_poll_mux(struct p9_conn *m)
if (n & POLLIN) {
set_bit(Rpending, &m->wsched);
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
+ p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
+ p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
}
if (n & POLLOUT) {
set_bit(Wpending, &m->wsched);
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
+ p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
- P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
+ p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
}
}
@@ -661,8 +662,8 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = ts->conn;
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
- current, req->tc, req->tc->id);
+ p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
+ m, current, req->tc, req->tc->id);
if (m->err < 0)
return m->err;
@@ -686,7 +687,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
{
int ret = 1;
- P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+ p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
spin_lock(&client->lock);
@@ -726,8 +727,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
tmp_options = kstrdup(params, GFP_KERNEL);
if (!tmp_options) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option string\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
@@ -741,8 +742,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
if (token != Opt_err) {
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
continue;
}
}
@@ -801,7 +802,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
csocket->sk->sk_allocation = GFP_NOIO;
fd = sock_map_fd(csocket, 0);
if (fd < 0) {
- P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
+ pr_err("%s (%d): failed to map fd\n",
+ __func__, task_pid_nr(current));
sock_release(csocket);
kfree(p);
return fd;
@@ -837,8 +839,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
static void p9_conn_destroy(struct p9_conn *m)
{
- P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
- m->mux_list.prev, m->mux_list.next);
+ p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
+ m, m->mux_list.prev, m->mux_list.next);
p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
@@ -919,7 +921,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
- P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
+ pr_err("%s (%d): problem creating socket\n",
+ __func__, task_pid_nr(current));
return err;
}
@@ -927,9 +930,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
(struct sockaddr *)&sin_server,
sizeof(struct sockaddr_in), 0);
if (err < 0) {
- P9_EPRINTK(KERN_ERR,
- "p9_trans_tcp: problem connecting socket to %s\n",
- addr);
+ pr_err("%s (%d): problem connecting socket to %s\n",
+ __func__, task_pid_nr(current), addr);
sock_release(csocket);
return err;
}
@@ -947,8 +949,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
if (strlen(addr) >= UNIX_PATH_MAX) {
- P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
- addr);
+ pr_err("%s (%d): address too long: %s\n",
+ __func__, task_pid_nr(current), addr);
return -ENAMETOOLONG;
}
@@ -957,15 +959,16 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
SOCK_STREAM, 0, &csocket, 1);
if (err < 0) {
- P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
+ pr_err("%s (%d): problem creating socket\n",
+ __func__, task_pid_nr(current));
+
return err;
}
err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
sizeof(struct sockaddr_un) - 1, 0);
if (err < 0) {
- P9_EPRINTK(KERN_ERR,
- "p9_trans_unix: problem connecting socket: %s: %d\n",
- addr, err);
+ pr_err("%s (%d): problem connecting socket: %s: %d\n",
+ __func__, task_pid_nr(current), addr, err);
sock_release(csocket);
return err;
}
@@ -983,7 +986,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
parse_opts(args, &opts);
if (opts.rfd == ~0 || opts.wfd == ~0) {
- printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
+ pr_err("Insufficient options for proto=fd\n");
return -ENOPROTOOPT;
}
@@ -1050,7 +1053,7 @@ static void p9_poll_workfn(struct work_struct *work)
{
unsigned long flags;
- P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
+ p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
@@ -1066,7 +1069,7 @@ static void p9_poll_workfn(struct work_struct *work)
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
+ p9_debug(P9_DEBUG_TRANS, "finish\n");
}
int p9_trans_fd_init(void)
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 159c50f1c6b..2c69ddd691a 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
@@ -178,8 +180,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
tmp_options = kstrdup(params, GFP_KERNEL);
if (!tmp_options) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option string\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
@@ -192,8 +194,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
token = match_token(p, tokens, args);
r = match_int(&args[0], &option);
if (r < 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
continue;
}
switch (token) {
@@ -301,8 +303,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
return;
err_out:
- P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
- req, err, status);
+ p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
}
@@ -318,8 +319,8 @@ handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
static void qp_event_handler(struct ib_event *event, void *context)
{
- P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
- context);
+ p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
+ event->event, context);
}
static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -345,8 +346,7 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
break;
default:
- printk(KERN_ERR "9prdma: unexpected completion type, "
- "c->wc_op=%d, wc.opcode=%d, status=%d\n",
+ pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
c->wc_op, wc.opcode, wc.status);
break;
}
@@ -356,7 +356,7 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
static void cq_event_handler(struct ib_event *e, void *v)
{
- P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
+ p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
}
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
@@ -407,7 +407,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
return ib_post_recv(rdma->qp, &wr, &bad_wr);
error:
- P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+ p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
@@ -500,7 +500,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
kfree(c);
kfree(rpl_context->rc);
kfree(rpl_context);
- P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
+ p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
err_free1:
kfree(rpl_context->rc);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 32aa9834229..330421e5471 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
@@ -145,7 +147,7 @@ static void req_done(struct virtqueue *vq)
struct p9_req_t *req;
unsigned long flags;
- P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
+ p9_debug(P9_DEBUG_TRANS, ": request done\n");
while (1) {
spin_lock_irqsave(&chan->lock, flags);
@@ -158,8 +160,8 @@ static void req_done(struct virtqueue *vq)
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
wake_up(chan->vc_wq);
- P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
- P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
+ p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
+ p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
req->status = REQ_STATUS_RCVD;
p9_client_cb(chan->client, req);
@@ -257,7 +259,7 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
unsigned long flags;
struct virtio_chan *chan = client->trans;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+ p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req->status = REQ_STATUS_SENT;
req_retry:
@@ -280,20 +282,19 @@ req_retry:
if (err == -ERESTARTSYS)
return err;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: "
- "virtio rpc add_buf returned failure");
+ p9_debug(P9_DEBUG_TRANS,
+ "virtio rpc add_buf returned failure\n");
return -EIO;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+ p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
return 0;
}
@@ -354,7 +355,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+ p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
out_nr_pages = p9_nr_pages(uodata, outlen);
@@ -423,20 +424,19 @@ req_retry_pinned:
if (err == -ERESTARTSYS)
goto err_out;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: "
- "virtio rpc add_buf returned failure");
+ p9_debug(P9_DEBUG_TRANS,
+ "virtio rpc add_buf returned failure\n");
err = -EIO;
goto err_out;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+ p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
err = wait_event_interruptible(*req->wq,
req->status >= REQ_STATUS_RCVD);
/*
@@ -491,7 +491,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
if (!chan) {
- printk(KERN_ERR "9p: Failed to allocate virtio 9P channel\n");
+ pr_err("Failed to allocate virtio 9P channel\n");
err = -ENOMEM;
goto fail;
}
@@ -592,7 +592,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
mutex_unlock(&virtio_9p_lock);
if (!found) {
- printk(KERN_ERR "9p: no channels available\n");
+ pr_err("no channels available\n");
return ret;
}
diff --git a/net/9p/util.c b/net/9p/util.c
index 9c1c9348ac3..6ceeeb384de 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -106,7 +106,7 @@ retry:
else if (error)
return -1;
- P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
+ p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
return i;
}
EXPORT_SYMBOL(p9_idpool_get);
@@ -124,7 +124,7 @@ void p9_idpool_put(int id, struct p9_idpool *p)
{
unsigned long flags;
- P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
+ p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
spin_lock_irqsave(&p->lock, flags);
idr_remove(&p->pool, id);
diff --git a/net/Kconfig b/net/Kconfig
index a0731484423..e07272d0bb2 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -215,6 +215,7 @@ source "net/sched/Kconfig"
source "net/dcb/Kconfig"
source "net/dns_resolver/Kconfig"
source "net/batman-adv/Kconfig"
+source "net/openvswitch/Kconfig"
config RPS
boolean
@@ -232,6 +233,19 @@ config XPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config NETPRIO_CGROUP
+ tristate "Network priority cgroup"
+ depends on CGROUPS
+ ---help---
+ Cgroup subsystem for use in assigning processes to network priorities on
+ a per-interface basis
+
+config BQL
+ boolean
+ depends on SYSFS
+ select DQL
+ default y
+
config HAVE_BPF_JIT
bool
diff --git a/net/Makefile b/net/Makefile
index acdde4950de..ad432fa4d93 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
+obj-$(CONFIG_OPENVSWITCH) += openvswitch/
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index f41f02656ff..876fbe83e2e 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -26,7 +26,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
gfp_t gfp_flags)
{
struct sock *sk = sk_atm(vcc);
- int guess = atm_guess_pdu2truesize(pdu_size);
+ int guess = SKB_TRUESIZE(pdu_size);
atm_force_charge(vcc, guess);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index d07223c834a..353fccf1cde 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -53,6 +53,7 @@ static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
static const unsigned char llc_oui_pid_pad[] =
{ LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
+static const unsigned char pad[] = { PAD_BRIDGED };
static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
@@ -202,7 +203,10 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
{
struct br2684_dev *brdev = BRPRIV(dev);
struct atm_vcc *atmvcc;
- int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
+ int minheadroom = (brvcc->encaps == e_llc) ?
+ ((brdev->payload == p_bridged) ?
+ sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
+ ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
if (skb_headroom(skb) < minheadroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
@@ -450,7 +454,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
} else { /* p_bridged */
/* first 2 chars should be 0 */
- if (*((u16 *) (skb->data)) != 0)
+ if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
goto error;
skb_pull(skb, BR2684_PAD_LEN);
skb->protocol = eth_type_trans(skb, net_dev);
@@ -489,15 +493,11 @@ free_skb:
*/
static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
{
- struct sk_buff_head queue;
- int err;
struct br2684_vcc *brvcc;
- struct sk_buff *skb, *tmp;
- struct sk_buff_head *rq;
struct br2684_dev *brdev;
struct net_device *net_dev;
struct atm_backend_br2684 be;
- unsigned long flags;
+ int err;
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
@@ -550,23 +550,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
atmvcc->push = br2684_push;
atmvcc->pop = br2684_pop;
- __skb_queue_head_init(&queue);
- rq = &sk_atm(atmvcc)->sk_receive_queue;
-
- spin_lock_irqsave(&rq->lock, flags);
- skb_queue_splice_init(rq, &queue);
- spin_unlock_irqrestore(&rq->lock, flags);
-
- skb_queue_walk_safe(&queue, skb, tmp) {
- struct net_device *dev;
-
- br2684_push(atmvcc, skb);
- dev = skb->dev;
-
- dev->stats.rx_bytes -= skb->len;
- dev->stats.rx_packets--;
- }
-
/* initialize netdev carrier state */
if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
@@ -574,6 +557,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
netif_carrier_on(net_dev);
__module_get(THIS_MODULE);
+
+ /* re-process everything received between connection setup and
+ backend setup */
+ vcc_process_recv_queue(atmvcc);
return 0;
error:
@@ -600,6 +587,7 @@ static void br2684_setup(struct net_device *netdev)
struct br2684_dev *brdev = BRPRIV(netdev);
ether_setup(netdev);
+ netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
brdev->net_dev = netdev;
netdev->netdev_ops = &br2684_netdev_ops;
@@ -612,7 +600,7 @@ static void br2684_setup_routed(struct net_device *netdev)
struct br2684_dev *brdev = BRPRIV(netdev);
brdev->net_dev = netdev;
- netdev->hard_header_len = 0;
+ netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
netdev->netdev_ops = &br2684_netdev_ops_routed;
netdev->addr_len = 0;
netdev->mtu = 1500;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 852394072fa..c12c2582457 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */
+#include <net/arp.h>
#include <linux/param.h> /* for HZ */
#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
@@ -119,7 +120,7 @@ out:
/* The neighbour entry n->lock is held. */
static int neigh_check_cb(struct neighbour *n)
{
- struct atmarp_entry *entry = NEIGH2ENTRY(n);
+ struct atmarp_entry *entry = neighbour_priv(n);
struct clip_vcc *cv;
for (cv = entry->vccs; cv; cv = cv->next) {
@@ -189,6 +190,13 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
pr_debug("\n");
+
+ if (!clip_devs) {
+ atm_return(vcc, skb->truesize);
+ kfree_skb(skb);
+ return;
+ }
+
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
@@ -255,8 +263,10 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
+ __be32 *ip = (__be32 *) neigh->primary_key;
+
pr_debug("(neigh %p, skb %p)\n", neigh, skb);
- to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
+ to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
}
static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
@@ -277,72 +287,24 @@ static const struct neigh_ops clip_neigh_ops = {
static int clip_constructor(struct neighbour *neigh)
{
- struct atmarp_entry *entry = NEIGH2ENTRY(neigh);
- struct net_device *dev = neigh->dev;
- struct in_device *in_dev;
- struct neigh_parms *parms;
+ struct atmarp_entry *entry = neighbour_priv(neigh);
- pr_debug("(neigh %p, entry %p)\n", neigh, entry);
- neigh->type = inet_addr_type(&init_net, entry->ip);
- if (neigh->type != RTN_UNICAST)
+ if (neigh->tbl->family != AF_INET)
return -EINVAL;
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev) {
- rcu_read_unlock();
+ if (neigh->type != RTN_UNICAST)
return -EINVAL;
- }
-
- parms = in_dev->arp_parms;
- __neigh_parms_put(neigh->parms);
- neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
+ neigh->nud_state = NUD_NONE;
neigh->ops = &clip_neigh_ops;
- neigh->output = neigh->nud_state & NUD_VALID ?
- neigh->ops->connected_output : neigh->ops->output;
+ neigh->output = neigh->ops->output;
entry->neigh = neigh;
entry->vccs = NULL;
entry->expires = jiffies - 1;
+
return 0;
}
-static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
-{
- return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
-}
-
-static struct neigh_table clip_tbl = {
- .family = AF_INET,
- .entry_size = sizeof(struct neighbour)+sizeof(struct atmarp_entry),
- .key_len = 4,
- .hash = clip_hash,
- .constructor = clip_constructor,
- .id = "clip_arp_cache",
-
- /* parameters are copied from ARP ... */
- .parms = {
- .tbl = &clip_tbl,
- .base_reachable_time = 30 * HZ,
- .retrans_time = 1 * HZ,
- .gc_staletime = 60 * HZ,
- .reachable_time = 30 * HZ,
- .delay_probe_time = 5 * HZ,
- .queue_len = 3,
- .ucast_probes = 3,
- .mcast_probes = 3,
- .anycast_delay = 1 * HZ,
- .proxy_delay = (8 * HZ) / 10,
- .proxy_qlen = 64,
- .locktime = 1 * HZ,
- },
- .gc_interval = 30 * HZ,
- .gc_thresh1 = 128,
- .gc_thresh2 = 512,
- .gc_thresh3 = 1024,
-};
-
/* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
/*
@@ -376,28 +338,19 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (!n) {
-#if 0
- n = clip_find_neighbour(skb_dst(skb), 1);
- if (!n) {
- dev_kfree_skb(skb); /* lost that one */
- dev->stats.tx_dropped++;
- return 0;
- }
- dst_set_neighbour(dst, n);
-#endif
pr_err("NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- entry = NEIGH2ENTRY(n);
+ entry = neighbour_priv(n);
if (!entry->vccs) {
if (time_after(jiffies, entry->expires)) {
/* should be resolved */
entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
- to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
+ to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
}
if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
skb_queue_tail(&entry->neigh->arp_queue, skb);
@@ -448,10 +401,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
static int clip_mkip(struct atm_vcc *vcc, int timeout)
{
- struct sk_buff_head *rq, queue;
struct clip_vcc *clip_vcc;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
if (!vcc->push)
return -EBADFD;
@@ -472,29 +422,9 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
vcc->push = clip_push;
vcc->pop = clip_pop;
- __skb_queue_head_init(&queue);
- rq = &sk_atm(vcc)->sk_receive_queue;
-
- spin_lock_irqsave(&rq->lock, flags);
- skb_queue_splice_init(rq, &queue);
- spin_unlock_irqrestore(&rq->lock, flags);
-
/* re-process everything received between connection setup and MKIP */
- skb_queue_walk_safe(&queue, skb, tmp) {
- if (!clip_devs) {
- atm_return(vcc, skb->truesize);
- kfree_skb(skb);
- } else {
- struct net_device *dev = skb->dev;
- unsigned int len = skb->len;
-
- skb_get(skb);
- clip_push(vcc, skb);
- dev->stats.rx_packets--;
- dev->stats.rx_bytes -= len;
- kfree_skb(skb);
- }
- }
+ vcc_process_recv_queue(vcc);
+
return 0;
}
@@ -523,11 +453,11 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
rt = ip_route_output(&init_net, ip, 0, 1, 0);
if (IS_ERR(rt))
return PTR_ERR(rt);
- neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
+ neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
ip_rt_put(rt);
if (!neigh)
return -ENOMEM;
- entry = NEIGH2ENTRY(neigh);
+ entry = neighbour_priv(neigh);
if (entry != clip_vcc->entry) {
if (!clip_vcc->entry)
pr_debug("add\n");
@@ -544,13 +474,15 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
}
static const struct net_device_ops clip_netdev_ops = {
- .ndo_start_xmit = clip_start_xmit,
+ .ndo_start_xmit = clip_start_xmit,
+ .ndo_neigh_construct = clip_constructor,
};
static void clip_setup(struct net_device *dev)
{
dev->netdev_ops = &clip_netdev_ops;
dev->type = ARPHRD_ATM;
+ dev->neigh_priv_len = sizeof(struct atmarp_entry);
dev->hard_header_len = RFC1483LLC_LEN;
dev->mtu = RFC1626_MTU;
dev->tx_queue_len = 100; /* "normal" queue (packets) */
@@ -604,10 +536,8 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- if (event == NETDEV_UNREGISTER) {
- neigh_ifdown(&clip_tbl, dev);
+ if (event == NETDEV_UNREGISTER)
return NOTIFY_DONE;
- }
/* ignore non-CLIP devices */
if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
@@ -787,9 +717,10 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
/* This means the neighbour entry has no attached VCC objects. */
#define SEQ_NO_VCC_TOKEN ((void *) 2)
-static void atmarp_info(struct seq_file *seq, struct net_device *dev,
+static void atmarp_info(struct seq_file *seq, struct neighbour *n,
struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
{
+ struct net_device *dev = n->dev;
unsigned long exp;
char buf[17];
int svc, llc, off;
@@ -809,8 +740,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
seq_printf(seq, "%-6s%-4s%-4s%5ld ",
dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
- off = scnprintf(buf, sizeof(buf) - 1, "%pI4",
- &entry->ip);
+ off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
while (off < 16)
buf[off++] = ' ';
buf[off] = '\0';
@@ -881,14 +811,17 @@ static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
{
struct clip_seq_state *state = (struct clip_seq_state *)_state;
- return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
+ if (n->dev->type != ARPHRD_ATM)
+ return NULL;
+
+ return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
}
static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
{
struct clip_seq_state *state = seq->private;
state->ns.neigh_sub_iter = clip_seq_sub_iter;
- return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
+ return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
}
static int clip_seq_show(struct seq_file *seq, void *v)
@@ -900,10 +833,10 @@ static int clip_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, atm_arp_banner);
} else {
struct clip_seq_state *state = seq->private;
- struct neighbour *n = v;
struct clip_vcc *vcc = state->vcc;
+ struct neighbour *n = v;
- atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
+ atmarp_info(seq, n, neighbour_priv(n), vcc);
}
return 0;
}
@@ -934,9 +867,6 @@ static void atm_clip_exit_noproc(void);
static int __init atm_clip_init(void)
{
- neigh_table_init_no_netlink(&clip_tbl);
-
- clip_tbl_hook = &clip_tbl;
register_atm_ioctl(&clip_ioctl_ops);
register_netdevice_notifier(&clip_dev_notifier);
register_inetaddr_notifier(&clip_inet_notifier);
@@ -973,12 +903,6 @@ static void atm_clip_exit_noproc(void)
*/
del_timer_sync(&idle_timer);
- /* Next, purge the table, so that the device
- * unregister loop below does not hang due to
- * device references remaining in the table.
- */
- neigh_ifdown(&clip_tbl, NULL);
-
dev = clip_devs;
while (dev) {
next = PRIV(dev)->next;
@@ -986,11 +910,6 @@ static void atm_clip_exit_noproc(void)
free_netdev(dev);
dev = next;
}
-
- /* Now it is safe to fully shutdown whole table. */
- neigh_table_clear(&clip_tbl);
-
- clip_tbl_hook = NULL;
}
static void __exit atm_clip_exit(void)
diff --git a/net/atm/common.c b/net/atm/common.c
index 14ff9fe3998..b4b44dbed64 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -214,6 +214,26 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
}
EXPORT_SYMBOL(vcc_release_async);
+void vcc_process_recv_queue(struct atm_vcc *vcc)
+{
+ struct sk_buff_head queue, *rq;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+
+ __skb_queue_head_init(&queue);
+ rq = &sk_atm(vcc)->sk_receive_queue;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ skb_queue_splice_init(rq, &queue);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ __skb_unlink(skb, &queue);
+ vcc->push(vcc, skb);
+ }
+}
+EXPORT_SYMBOL(vcc_process_recv_queue);
+
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
@@ -502,8 +522,11 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
- if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+
+ /* only handle MSG_DONTWAIT and MSG_PEEK */
+ if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
+
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -524,8 +547,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
- pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
- atm_return(vcc, skb->truesize);
+
+ if (!(flags & MSG_PEEK)) {
+ pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
+ skb->truesize);
+ atm_return(vcc, skb->truesize);
+ }
+
skb_free_datagram(sk, skb);
return copied;
}
diff --git a/net/atm/common.h b/net/atm/common.h
index f48a76b6cdf..cc3c2dae4d7 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -24,6 +24,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
+void vcc_process_recv_queue(struct atm_vcc *vcc);
int atmpvc_init(void);
void atmpvc_exit(void);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index db4a11c61d1..df35d9a3b5f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -303,6 +303,10 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
__module_get(THIS_MODULE);
+
+ /* re-process everything received between connection setup and
+ backend setup */
+ vcc_process_recv_queue(atmvcc);
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index e7c69f4619e..3cd0a0dc91c 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -402,14 +402,14 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
break;
case AX25_T1:
- if (ax25_ctl.arg < 1)
+ if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
- if (ax25_ctl.arg < 1)
+ if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
@@ -422,10 +422,15 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
break;
case AX25_T3:
+ if (ax25_ctl.arg > ULONG_MAX / HZ)
+ goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
+ if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
+ goto einval_put;
+
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
@@ -540,15 +545,16 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
ax25_cb *ax25;
struct net_device *dev;
char devname[IFNAMSIZ];
- int opt, res = 0;
+ unsigned long opt;
+ int res = 0;
if (level != SOL_AX25)
return -ENOPROTOOPT;
- if (optlen < sizeof(int))
+ if (optlen < sizeof(unsigned int))
return -EINVAL;
- if (get_user(opt, (int __user *)optval))
+ if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
lock_sock(sk);
@@ -571,7 +577,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T1:
- if (opt < 1) {
+ if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -580,7 +586,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T2:
- if (opt < 1) {
+ if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -596,7 +602,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T3:
- if (opt < 1) {
+ if (opt < 1 || opt > ULONG_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -604,7 +610,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_IDLE:
- if (opt < 0) {
+ if (opt > ULONG_MAX / (60 * HZ)) {
res = -EINVAL;
break;
}
@@ -612,7 +618,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_BACKOFF:
- if (opt < 0 || opt > 2) {
+ if (opt > 2) {
res = -EINVAL;
break;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index b8a7414c357..c25492f7d66 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count,
unsigned long uint_val;
int ret;
- ret = strict_strtoul(buff, 10, &uint_val);
+ ret = kstrtoul(buff, 10, &uint_val);
if (ret) {
bat_info(net_dev,
"%s: Invalid parameter received: %s\n",
@@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
unsigned long val;
int ret, vis_mode_tmp = -1;
- ret = strict_strtoul(buff, 10, &val);
+ ret = kstrtoul(buff, 10, &val);
if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
(strncmp(buff, "client", 6) == 0) ||
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 0be9ff346fa..9bc63b209b3 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
/* sequence number is much newer, probably missed a lot of packets */
if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
- || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+ && (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
bat_dbg(DBG_BATMAN, bat_priv,
"We missed a lot of packets (%i) !\n",
seq_num_diff - 1);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 619fb73b3b7..24403a7350f 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -25,6 +25,7 @@
#include "gateway_common.h"
#include "hard-interface.h"
#include "originator.h"
+#include "translation-table.h"
#include "routing.h"
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -572,108 +573,142 @@ out:
return ret;
}
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
- struct orig_node *old_gw)
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr;
- struct gw_node *curr_gw;
- struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
- unsigned int header_len = 0;
- int ret = 1;
-
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
- return 0;
/* check for ethernet header */
- if (!pskb_may_pull(skb, header_len + ETH_HLEN))
- return 0;
+ if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+ return false;
ethhdr = (struct ethhdr *)skb->data;
- header_len += ETH_HLEN;
+ *header_len += ETH_HLEN;
/* check for initial vlan header */
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
- if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
- return 0;
+ if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+ return false;
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
- header_len += VLAN_HLEN;
+ *header_len += VLAN_HLEN;
}
/* check for ip header */
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_IP:
- if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
- return 0;
- iphdr = (struct iphdr *)(skb->data + header_len);
- header_len += iphdr->ihl * 4;
+ if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+ return false;
+ iphdr = (struct iphdr *)(skb->data + *header_len);
+ *header_len += iphdr->ihl * 4;
/* check for udp header */
if (iphdr->protocol != IPPROTO_UDP)
- return 0;
+ return false;
break;
case ETH_P_IPV6:
- if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
- return 0;
- ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
- header_len += sizeof(*ipv6hdr);
+ if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+ return false;
+ ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+ *header_len += sizeof(*ipv6hdr);
/* check for udp header */
if (ipv6hdr->nexthdr != IPPROTO_UDP)
- return 0;
+ return false;
break;
default:
- return 0;
+ return false;
}
- if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
- return 0;
- udphdr = (struct udphdr *)(skb->data + header_len);
- header_len += sizeof(*udphdr);
+ if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+ return false;
+ udphdr = (struct udphdr *)(skb->data + *header_len);
+ *header_len += sizeof(*udphdr);
/* check for bootp port */
if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
(ntohs(udphdr->dest) != 67))
- return 0;
+ return false;
if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
(ntohs(udphdr->dest) != 547))
- return 0;
+ return false;
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
- return -1;
+ return true;
+}
- curr_gw = gw_get_selected_gw_node(bat_priv);
- if (!curr_gw)
- return 0;
-
- /* If old_gw != NULL then this packet is unicast.
- * So, at this point we have to check the message type: if it is a
- * DHCPREQUEST we have to decide whether to drop it or not */
- if (old_gw && curr_gw->orig_node != old_gw) {
- if (is_type_dhcprequest(skb, header_len)) {
- /* If the dhcp packet has been sent to a different gw,
- * we have to evaluate whether the old gw is still
- * reliable enough */
- neigh_curr = find_router(bat_priv, curr_gw->orig_node,
- NULL);
- neigh_old = find_router(bat_priv, old_gw, NULL);
- if (!neigh_curr || !neigh_old)
- goto free_neigh;
- if (neigh_curr->tq_avg - neigh_old->tq_avg <
- GW_THRESHOLD)
- ret = -1;
- }
+bool gw_out_of_range(struct bat_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr)
+{
+ struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+ struct orig_node *orig_dst_node = NULL;
+ struct gw_node *curr_gw = NULL;
+ bool ret, out_of_range = false;
+ unsigned int header_len = 0;
+ uint8_t curr_tq_avg;
+
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (!ret)
+ goto out;
+
+ orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
+ if (!orig_dst_node)
+ goto out;
+
+ if (!orig_dst_node->gw_flags)
+ goto out;
+
+ ret = is_type_dhcprequest(skb, header_len);
+ if (!ret)
+ goto out;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case GW_MODE_SERVER:
+ /* If we are a GW then we are our best GW. We can artificially
+ * set the tq towards ourself as the maximum value */
+ curr_tq_avg = TQ_MAX_VALUE;
+ break;
+ case GW_MODE_CLIENT:
+ curr_gw = gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ goto out;
+
+ /* packet is going to our gateway */
+ if (curr_gw->orig_node == orig_dst_node)
+ goto out;
+
+ /* If the dhcp packet has been sent to a different gw,
+ * we have to evaluate whether the old gw is still
+ * reliable enough */
+ neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+ if (!neigh_curr)
+ goto out;
+
+ curr_tq_avg = neigh_curr->tq_avg;
+ break;
+ case GW_MODE_OFF:
+ default:
+ goto out;
}
-free_neigh:
+
+ neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+ if (!neigh_old)
+ goto out;
+
+ if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+ out_of_range = true;
+
+out:
+ if (orig_dst_node)
+ orig_node_free_ref(orig_dst_node);
+ if (curr_gw)
+ gw_node_free_ref(curr_gw);
if (neigh_old)
neigh_node_free_ref(neigh_old);
if (neigh_curr)
neigh_node_free_ref(neigh_curr);
- if (curr_gw)
- gw_node_free_ref(curr_gw);
- return ret;
+ return out_of_range;
}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index b9b983c07fe..e1edba08eb1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv,
void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
void gw_node_purge(struct bat_priv *bat_priv);
int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
- struct orig_node *old_gw);
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool gw_out_of_range(struct bat_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 18661af0bc3..c4ac7b0a2a6 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = strict_strtol(buff, 10, &ldown);
+ ret = kstrtol(buff, 10, &ldown);
if (ret) {
bat_err(net_dev,
"Download speed of gateway mode invalid: %s\n",
@@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = strict_strtol(slash_ptr + 1, 10, &lup);
+ ret = kstrtol(slash_ptr + 1, 10, &lup);
if (ret) {
bat_err(net_dev,
"Upload speed of gateway mode invalid: "
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 2a172505f51..d1da29da333 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
/* clears the hash */
static void hash_init(struct hashtable_t *hash)
{
- int i;
+ uint32_t i;
for (i = 0 ; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash)
}
/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size)
+struct hashtable_t *hash_new(uint32_t size)
{
struct hashtable_t *hash;
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d20aa71ba1e..4768717f07f 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
/* the hashfunction, should return an index
* based on the key in the data of the first
* argument and the size the second */
-typedef int (*hashdata_choose_cb)(const void *, int);
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
struct hashtable_t {
struct hlist_head *table; /* the hashtable itself with the buckets */
spinlock_t *list_locks; /* spinlock for each hash list entry */
- int size; /* size of hashtable */
+ uint32_t size; /* size of hashtable */
};
/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size);
+struct hashtable_t *hash_new(uint32_t size);
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
@@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash,
struct hlist_head *head;
struct hlist_node *node, *node_tmp;
spinlock_t *list_lock; /* spinlock to protect write access */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash,
hashdata_choose_cb choose,
const void *data, struct hlist_node *data_node)
{
- int index, ret = -1;
+ uint32_t index;
+ int ret = -1;
struct hlist_head *head;
struct hlist_node *node;
spinlock_t *list_lock; /* spinlock to protect write access */
@@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash,
hashdata_compare_cb compare,
hashdata_choose_cb choose, void *data)
{
- size_t index;
+ uint32_t index;
struct hlist_node *node;
struct hlist_head *head;
void *data_save = NULL;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ac3520e057c..d9c1e7bb7fb 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
spin_unlock_bh(&socket_client->lock);
- error = __copy_to_user(buf, &socket_packet->icmp_packet,
- socket_packet->icmp_len);
+ packet_len = min(count, socket_packet->icmp_len);
+ error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
- packet_len = socket_packet->icmp_len;
kfree(socket_packet);
if (error)
@@ -187,12 +186,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
skb_reserve(skb, sizeof(struct ethhdr));
icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
- if (!access_ok(VERIFY_READ, buff, packet_len)) {
- len = -EFAULT;
- goto free_skb;
- }
-
- if (__copy_from_user(icmp_packet, buff, packet_len)) {
+ if (copy_from_user(icmp_packet, buff, packet_len)) {
len = -EFAULT;
goto free_skb;
}
@@ -217,7 +211,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (icmp_packet->version != COMPAT_VERSION) {
icmp_packet->msg_type = PARAMETER_PROBLEM;
- icmp_packet->ttl = COMPAT_VERSION;
+ icmp_packet->version = COMPAT_VERSION;
bat_socket_add_packet(socket_client, icmp_packet, packet_len);
goto free_skb;
}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 964ad4d8ba3..86354e06eb4 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.4.0"
+#define SOURCE_VERSION "2012.0.0"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0e5b77255d9..0bc2045a2f2 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct orig_node *orig_node;
- int i;
+ uint32_t i;
if (!hash)
return;
@@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct orig_node *orig_node;
- int i;
+ uint32_t i;
if (!hash)
return;
@@ -413,7 +413,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
int batman_count = 0;
int last_seen_secs;
int last_seen_msecs;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
@@ -519,7 +520,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
- int i, ret;
+ uint32_t i;
+ int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
@@ -601,7 +603,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
struct hlist_head *head;
struct hard_iface *hard_iface_tmp;
struct orig_node *orig_node;
- int i, ret;
+ uint32_t i;
+ int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index cfc1f60a96a..67765ffef73 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
/* hashfunction to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(const void *data, int32_t size)
+static inline uint32_t choose_orig(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index f961cc5eade..773e606f970 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
struct hlist_head *head;
struct orig_node *orig_node;
unsigned long *word;
- int i;
+ uint32_t i;
size_t word_index;
for (i = 0; i < hash->size; i++) {
@@ -578,6 +578,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct tt_query_packet *tt_query;
+ uint16_t tt_len;
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
@@ -616,13 +617,21 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
}
break;
case TT_RESPONSE:
- /* packet needs to be linearized to access the TT changes */
- if (skb_linearize(skb) < 0)
- goto out;
+ if (is_my_mac(tt_query->dst)) {
+ /* packet needs to be linearized to access the TT
+ * changes */
+ if (skb_linearize(skb) < 0)
+ goto out;
+
+ tt_len = tt_query->tt_data * sizeof(struct tt_change);
+
+ /* Ensure we have all the claimed data */
+ if (unlikely(skb_headlen(skb) <
+ sizeof(struct tt_query_packet) + tt_len))
+ goto out;
- if (is_my_mac(tt_query->dst))
handle_tt_response(bat_priv, tt_query);
- else {
+ } else {
bat_dbg(DBG_TT, bat_priv,
"Routing TT_RESPONSE to %pM [%c]\n",
tt_query->dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index f9cc9572898..987c75a775f 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
struct bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr;
struct softif_neigh *curr_softif_neigh = NULL;
- struct orig_node *orig_node = NULL;
+ unsigned int header_len = 0;
int data_len = skb->len, ret;
short vid = -1;
- bool do_bcast;
+ bool do_bcast = false;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
@@ -598,17 +598,28 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* Register the client MAC in the transtable */
tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
- orig_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
- do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
- ret = gw_is_target(bat_priv, skb, orig_node);
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ do_bcast = true;
- if (ret < 0)
- goto dropped;
-
- if (ret)
- do_bcast = false;
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case GW_MODE_SERVER:
+ /* gateway servers should not send dhcp
+ * requests into the mesh */
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (ret)
+ goto dropped;
+ break;
+ case GW_MODE_CLIENT:
+ /* gateway clients should send dhcp requests
+ * via unicast to their gateway */
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (ret)
+ do_bcast = false;
+ break;
+ case GW_MODE_OFF:
+ default:
+ break;
+ }
}
/* ethernet packet should be broadcasted */
@@ -644,6 +655,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* unicast packet */
} else {
+ if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
+ ret = gw_out_of_range(bat_priv, skb, ethhdr);
+ if (ret)
+ goto dropped;
+ }
+
ret = unicast_send_skb(skb, bat_priv);
if (ret != 0)
goto dropped_freed;
@@ -662,8 +679,6 @@ end:
softif_neigh_free_ref(curr_softif_neigh);
if (primary_if)
hardif_free_ref(primary_if);
- if (orig_node)
- orig_node_free_ref(orig_node);
return NETDEV_TX_OK;
}
@@ -859,7 +874,7 @@ unreg_debugfs:
unreg_sysfs:
sysfs_del_meshif(soft_iface);
unreg_soft_iface:
- unregister_netdev(soft_iface);
+ unregister_netdevice(soft_iface);
return NULL;
free_soft_iface:
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7c5ed..ab8dea8b0b2 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -36,18 +36,9 @@ static void _tt_global_del(struct bat_priv *bat_priv,
static void tt_purge(struct work_struct *work);
/* returns 1 if they are the same mac addr */
-static int compare_ltt(const struct hlist_node *node, const void *data2)
+static int compare_tt(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct tt_local_entry,
- hash_entry);
-
- return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* returns 1 if they are the same mac addr */
-static int compare_gtt(const struct hlist_node *node, const void *data2)
-{
- const void *data1 = container_of(node, struct tt_global_entry,
+ const void *data1 = container_of(node, struct tt_common_entry,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
@@ -60,14 +51,13 @@ static void tt_start_timer(struct bat_priv *bat_priv)
msecs_to_jiffies(5000));
}
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
+ const void *data)
{
- struct hashtable_t *hash = bat_priv->tt_local_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
- int index;
+ struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+ uint32_t index;
if (!hash)
return NULL;
@@ -76,51 +66,46 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
head = &hash->table[index];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
- if (!compare_eth(tt_local_entry, data))
+ hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+ if (!compare_eth(tt_common_entry, data))
continue;
- if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+ if (!atomic_inc_not_zero(&tt_common_entry->refcount))
continue;
- tt_local_entry_tmp = tt_local_entry;
+ tt_common_entry_tmp = tt_common_entry;
break;
}
rcu_read_unlock();
- return tt_local_entry_tmp;
+ return tt_common_entry_tmp;
}
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
+ const void *data)
{
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct hlist_head *head;
- struct hlist_node *node;
- struct tt_global_entry *tt_global_entry;
- struct tt_global_entry *tt_global_entry_tmp = NULL;
- int index;
-
- if (!hash)
- return NULL;
-
- index = choose_orig(data, hash->size);
- head = &hash->table[index];
+ struct tt_common_entry *tt_common_entry;
+ struct tt_local_entry *tt_local_entry = NULL;
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
- if (!compare_eth(tt_global_entry, data))
- continue;
+ tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+ if (tt_common_entry)
+ tt_local_entry = container_of(tt_common_entry,
+ struct tt_local_entry, common);
+ return tt_local_entry;
+}
- if (!atomic_inc_not_zero(&tt_global_entry->refcount))
- continue;
+static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
+ const void *data)
+{
+ struct tt_common_entry *tt_common_entry;
+ struct tt_global_entry *tt_global_entry = NULL;
- tt_global_entry_tmp = tt_global_entry;
- break;
- }
- rcu_read_unlock();
+ tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+ if (tt_common_entry)
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry, common);
+ return tt_global_entry;
- return tt_global_entry_tmp;
}
static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
@@ -133,15 +118,18 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
{
- if (atomic_dec_and_test(&tt_local_entry->refcount))
- kfree_rcu(tt_local_entry, rcu);
+ if (atomic_dec_and_test(&tt_local_entry->common.refcount))
+ kfree_rcu(tt_local_entry, common.rcu);
}
static void tt_global_entry_free_rcu(struct rcu_head *rcu)
{
+ struct tt_common_entry *tt_common_entry;
struct tt_global_entry *tt_global_entry;
- tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+ tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
+ tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+ common);
if (tt_global_entry->orig_node)
orig_node_free_ref(tt_global_entry->orig_node);
@@ -151,8 +139,9 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
{
- if (atomic_dec_and_test(&tt_global_entry->refcount))
- call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
+ if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+ call_rcu(&tt_global_entry->common.rcu,
+ tt_global_entry_free_rcu);
}
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -201,6 +190,7 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct tt_local_entry *tt_local_entry = NULL;
struct tt_global_entry *tt_global_entry = NULL;
+ int hash_added;
tt_local_entry = tt_local_hash_find(bat_priv, addr);
@@ -217,26 +207,33 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
(uint8_t)atomic_read(&bat_priv->ttvn));
- memcpy(tt_local_entry->addr, addr, ETH_ALEN);
- tt_local_entry->last_seen = jiffies;
- tt_local_entry->flags = NO_FLAGS;
+ memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
+ tt_local_entry->common.flags = NO_FLAGS;
if (is_wifi_iface(ifindex))
- tt_local_entry->flags |= TT_CLIENT_WIFI;
- atomic_set(&tt_local_entry->refcount, 2);
+ tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+ atomic_set(&tt_local_entry->common.refcount, 2);
+ tt_local_entry->last_seen = jiffies;
/* the batman interface mac address should never be purged */
if (compare_eth(addr, soft_iface->dev_addr))
- tt_local_entry->flags |= TT_CLIENT_NOPURGE;
+ tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
- tt_local_event(bat_priv, addr, tt_local_entry->flags);
+ hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
+ &tt_local_entry->common,
+ &tt_local_entry->common.hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ tt_local_entry_free_ref(tt_local_entry);
+ goto out;
+ }
+
+ tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
* (consistency check) */
- tt_local_entry->flags |= TT_CLIENT_NEW;
-
- hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
- tt_local_entry, &tt_local_entry->hash_entry);
+ tt_local_entry->common.flags |= TT_CLIENT_NEW;
/* remove address from global hash if present */
tt_global_entry = tt_global_hash_find(bat_priv, addr);
@@ -245,10 +242,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (tt_global_entry) {
/* This node is probably going to update its tt table */
tt_global_entry->orig_node->tt_poss_change = true;
- /* The global entry has to be marked as PENDING and has to be
+ /* The global entry has to be marked as ROAMING and has to be
* kept for consistency purpose */
- tt_global_entry->flags |= TT_CLIENT_PENDING;
- send_roam_adv(bat_priv, tt_global_entry->addr,
+ tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ send_roam_adv(bat_priv, tt_global_entry->common.addr,
tt_global_entry->orig_node);
}
out:
@@ -310,13 +308,12 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_local_entry *tt_local_entry;
+ struct tt_common_entry *tt_common_entry;
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
- size_t buf_size, pos;
- char *buff;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
@@ -337,51 +334,27 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
"announced via TT (TTVN: %u):\n",
net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
- __hlist_for_each_rcu(node, head)
- buf_size += 29;
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- ret = -ENOMEM;
- goto out;
- }
-
- buff[0] = '\0';
- pos = 0;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_local_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 30, " * %pM "
- "[%c%c%c%c%c]\n",
- tt_local_entry->addr,
- (tt_local_entry->flags &
+ seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
+ tt_common_entry->addr,
+ (tt_common_entry->flags &
TT_CLIENT_ROAM ? 'R' : '.'),
- (tt_local_entry->flags &
+ (tt_common_entry->flags &
TT_CLIENT_NOPURGE ? 'P' : '.'),
- (tt_local_entry->flags &
+ (tt_common_entry->flags &
TT_CLIENT_NEW ? 'N' : '.'),
- (tt_local_entry->flags &
+ (tt_common_entry->flags &
TT_CLIENT_PENDING ? 'X' : '.'),
- (tt_local_entry->flags &
+ (tt_common_entry->flags &
TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
@@ -392,13 +365,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
struct tt_local_entry *tt_local_entry,
uint16_t flags)
{
- tt_local_event(bat_priv, tt_local_entry->addr,
- tt_local_entry->flags | flags);
+ tt_local_event(bat_priv, tt_local_entry->common.addr,
+ tt_local_entry->common.flags | flags);
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
* response issued before the net ttvn increment (consistency check) */
- tt_local_entry->flags |= TT_CLIENT_PENDING;
+ tt_local_entry->common.flags |= TT_CLIENT_PENDING;
}
void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -414,7 +387,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
(roaming ? TT_CLIENT_ROAM : NO_FLAGS));
bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
- "%s\n", tt_local_entry->addr, message);
+ "%s\n", tt_local_entry->common.addr, message);
out:
if (tt_local_entry)
tt_local_entry_free_ref(tt_local_entry);
@@ -424,23 +397,27 @@ static void tt_local_purge(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->tt_local_hash;
struct tt_local_entry *tt_local_entry;
+ struct tt_common_entry *tt_common_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
- if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
+ tt_local_entry = container_of(tt_common_entry,
+ struct tt_local_entry,
+ common);
+ if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
continue;
/* entry already marked for deletion */
- if (tt_local_entry->flags & TT_CLIENT_PENDING)
+ if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
continue;
if (!is_out_of_time(tt_local_entry->last_seen,
@@ -451,7 +428,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
TT_CLIENT_DEL);
bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
"pending to be removed: timed out\n",
- tt_local_entry->addr);
+ tt_local_entry->common.addr);
}
spin_unlock_bh(list_lock);
}
@@ -462,10 +439,11 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
{
struct hashtable_t *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct tt_common_entry *tt_common_entry;
struct tt_local_entry *tt_local_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- int i;
+ uint32_t i;
if (!bat_priv->tt_local_hash)
return;
@@ -477,9 +455,12 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
+ tt_local_entry = container_of(tt_common_entry,
+ struct tt_local_entry,
+ common);
tt_local_entry_free_ref(tt_local_entry);
}
spin_unlock_bh(list_lock);
@@ -527,6 +508,7 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
struct tt_global_entry *tt_global_entry;
struct orig_node *orig_node_tmp;
int ret = 0;
+ int hash_added;
tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
@@ -537,18 +519,24 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
if (!tt_global_entry)
goto out;
- memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+ memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+ tt_global_entry->common.flags = NO_FLAGS;
+ atomic_set(&tt_global_entry->common.refcount, 2);
/* Assign the new orig_node */
atomic_inc(&orig_node->refcount);
tt_global_entry->orig_node = orig_node;
tt_global_entry->ttvn = ttvn;
- tt_global_entry->flags = NO_FLAGS;
tt_global_entry->roam_at = 0;
- atomic_set(&tt_global_entry->refcount, 2);
- hash_add(bat_priv->tt_global_hash, compare_gtt,
- choose_orig, tt_global_entry,
- &tt_global_entry->hash_entry);
+ hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
+ choose_orig, &tt_global_entry->common,
+ &tt_global_entry->common.hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ tt_global_entry_free_ref(tt_global_entry);
+ goto out_remove;
+ }
atomic_inc(&orig_node->tt_size);
} else {
if (tt_global_entry->orig_node != orig_node) {
@@ -559,20 +547,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
orig_node_free_ref(orig_node_tmp);
atomic_inc(&orig_node->tt_size);
}
+ tt_global_entry->common.flags = NO_FLAGS;
tt_global_entry->ttvn = ttvn;
- tt_global_entry->flags = NO_FLAGS;
tt_global_entry->roam_at = 0;
}
if (wifi)
- tt_global_entry->flags |= TT_CLIENT_WIFI;
+ tt_global_entry->common.flags |= TT_CLIENT_WIFI;
bat_dbg(DBG_TT, bat_priv,
"Creating new global tt entry: %pM (via %pM)\n",
- tt_global_entry->addr, orig_node->orig);
+ tt_global_entry->common.addr, orig_node->orig);
+out_remove:
/* remove address from local hash if present */
- tt_local_remove(bat_priv, tt_global_entry->addr,
+ tt_local_remove(bat_priv, tt_global_entry->common.addr,
"global tt received", roaming);
ret = 1;
out:
@@ -586,13 +575,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct tt_common_entry *tt_common_entry;
struct tt_global_entry *tt_global_entry;
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
- size_t buf_size, pos;
- char *buff;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
@@ -615,53 +604,32 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, " %-13s %s %-15s %s %s\n",
"Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
- * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
- __hlist_for_each_rcu(node, head)
- buf_size += 67;
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- ret = -ENOMEM;
- goto out;
- }
-
- buff[0] = '\0';
- pos = 0;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tt_global_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 69,
- " * %pM (%3u) via %pM (%3u) "
- "[%c%c%c]\n", tt_global_entry->addr,
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry,
+ common);
+ seq_printf(seq, " * %pM (%3u) via %pM (%3u) "
+ "[%c%c%c]\n",
+ tt_global_entry->common.addr,
tt_global_entry->ttvn,
tt_global_entry->orig_node->orig,
(uint8_t) atomic_read(
&tt_global_entry->orig_node->
last_ttvn),
- (tt_global_entry->flags &
+ (tt_global_entry->common.flags &
TT_CLIENT_ROAM ? 'R' : '.'),
- (tt_global_entry->flags &
+ (tt_global_entry->common.flags &
TT_CLIENT_PENDING ? 'X' : '.'),
- (tt_global_entry->flags &
+ (tt_global_entry->common.flags &
TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
@@ -677,13 +645,13 @@ static void _tt_global_del(struct bat_priv *bat_priv,
bat_dbg(DBG_TT, bat_priv,
"Deleting global tt entry %pM (via %pM): %s\n",
- tt_global_entry->addr, tt_global_entry->orig_node->orig,
+ tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
message);
atomic_dec(&tt_global_entry->orig_node->tt_size);
- hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
- tt_global_entry->addr);
+ hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
+ tt_global_entry->common.addr);
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
@@ -694,6 +662,7 @@ void tt_global_del(struct bat_priv *bat_priv,
const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry = NULL;
+ struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
@@ -701,22 +670,37 @@ void tt_global_del(struct bat_priv *bat_priv,
if (tt_global_entry->orig_node == orig_node) {
if (roaming) {
- tt_global_entry->flags |= TT_CLIENT_ROAM;
- tt_global_entry->roam_at = jiffies;
- goto out;
+ /* if we are deleting a global entry due to a roam
+ * event, there are two possibilities:
+ * 1) the client roamed from node A to node B => we mark
+ * it with TT_CLIENT_ROAM, we start a timer and we
+ * wait for node B to claim it. In case of timeout
+ * the entry is purged.
+ * 2) the client roamed to us => we can directly delete
+ * the global entry, since it is useless now. */
+ tt_local_entry = tt_local_hash_find(bat_priv,
+ tt_global_entry->common.addr);
+ if (!tt_local_entry) {
+ tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ goto out;
+ }
}
_tt_global_del(bat_priv, tt_global_entry, message);
}
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
}
void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message)
{
struct tt_global_entry *tt_global_entry;
- int i;
+ struct tt_common_entry *tt_common_entry;
+ uint32_t i;
struct hashtable_t *hash = bat_priv->tt_global_hash;
struct hlist_node *node, *safe;
struct hlist_head *head;
@@ -730,14 +714,18 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_global_entry, node, safe,
+ hlist_for_each_entry_safe(tt_common_entry, node, safe,
head, hash_entry) {
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry,
+ common);
if (tt_global_entry->orig_node == orig_node) {
bat_dbg(DBG_TT, bat_priv,
"Deleting global tt entry %pM "
- "(via %pM): originator time out\n",
- tt_global_entry->addr,
- tt_global_entry->orig_node->orig);
+ "(via %pM): %s\n",
+ tt_global_entry->common.addr,
+ tt_global_entry->orig_node->orig,
+ message);
hlist_del_rcu(node);
tt_global_entry_free_ref(tt_global_entry);
}
@@ -750,20 +738,24 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
static void tt_global_roam_purge(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct tt_common_entry *tt_common_entry;
struct tt_global_entry *tt_global_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
- if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry,
+ common);
+ if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
continue;
if (!is_out_of_time(tt_global_entry->roam_at,
TT_CLIENT_ROAM_TIMEOUT * 1000))
@@ -771,7 +763,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
bat_dbg(DBG_TT, bat_priv, "Deleting global "
"tt entry (%pM): Roaming timeout\n",
- tt_global_entry->addr);
+ tt_global_entry->common.addr);
atomic_dec(&tt_global_entry->orig_node->tt_size);
hlist_del_rcu(node);
tt_global_entry_free_ref(tt_global_entry);
@@ -785,10 +777,11 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
{
struct hashtable_t *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct tt_common_entry *tt_common_entry;
struct tt_global_entry *tt_global_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- int i;
+ uint32_t i;
if (!bat_priv->tt_global_hash)
return;
@@ -800,9 +793,12 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry,
+ common);
tt_global_entry_free_ref(tt_global_entry);
}
spin_unlock_bh(list_lock);
@@ -818,8 +814,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
{
bool ret = false;
- if (tt_local_entry->flags & TT_CLIENT_WIFI &&
- tt_global_entry->flags & TT_CLIENT_WIFI)
+ if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
+ tt_global_entry->common.flags & TT_CLIENT_WIFI)
ret = true;
return ret;
@@ -852,7 +848,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
/* A global client marked as PENDING has already moved from that
* originator */
- if (tt_global_entry->flags & TT_CLIENT_PENDING)
+ if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
goto out;
orig_node = tt_global_entry->orig_node;
@@ -871,29 +867,34 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
{
uint16_t total = 0, total_one;
struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct tt_common_entry *tt_common_entry;
struct tt_global_entry *tt_global_entry;
struct hlist_node *node;
struct hlist_head *head;
- int i, j;
+ uint32_t i;
+ int j;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_global_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
+ tt_global_entry = container_of(tt_common_entry,
+ struct tt_global_entry,
+ common);
if (compare_eth(tt_global_entry->orig_node,
orig_node)) {
/* Roaming clients are in the global table for
* consistency only. They don't have to be
* taken into account while computing the
* global crc */
- if (tt_global_entry->flags & TT_CLIENT_ROAM)
+ if (tt_common_entry->flags & TT_CLIENT_ROAM)
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_global_entry->addr[j]);
+ tt_common_entry->addr[j]);
total ^= total_one;
}
}
@@ -908,25 +909,26 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
{
uint16_t total = 0, total_one;
struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_local_entry *tt_local_entry;
+ struct tt_common_entry *tt_common_entry;
struct hlist_node *node;
struct hlist_head *head;
- int i, j;
+ uint32_t i;
+ int j;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_local_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
/* not yet committed clients have not to be taken into
* account while computing the CRC */
- if (tt_local_entry->flags & TT_CLIENT_NEW)
+ if (tt_common_entry->flags & TT_CLIENT_NEW)
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_local_entry->addr[j]);
+ tt_common_entry->addr[j]);
total ^= total_one;
}
rcu_read_unlock();
@@ -1015,21 +1017,25 @@ unlock:
/* data_ptr is useless here, but has to be kept to respect the prototype */
static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
{
- const struct tt_local_entry *tt_local_entry = entry_ptr;
+ const struct tt_common_entry *tt_common_entry = entry_ptr;
- if (tt_local_entry->flags & TT_CLIENT_NEW)
+ if (tt_common_entry->flags & TT_CLIENT_NEW)
return 0;
return 1;
}
static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
{
- const struct tt_global_entry *tt_global_entry = entry_ptr;
+ const struct tt_common_entry *tt_common_entry = entry_ptr;
+ const struct tt_global_entry *tt_global_entry;
const struct orig_node *orig_node = data_ptr;
- if (tt_global_entry->flags & TT_CLIENT_ROAM)
+ if (tt_common_entry->flags & TT_CLIENT_ROAM)
return 0;
+ tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+ common);
+
return (tt_global_entry->orig_node == orig_node);
}
@@ -1040,7 +1046,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
const void *),
void *cb_data)
{
- struct tt_local_entry *tt_local_entry;
+ struct tt_common_entry *tt_common_entry;
struct tt_query_packet *tt_response;
struct tt_change *tt_change;
struct hlist_node *node;
@@ -1048,7 +1054,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
struct sk_buff *skb = NULL;
uint16_t tt_tot, tt_count;
ssize_t tt_query_size = sizeof(struct tt_query_packet);
- int i;
+ uint32_t i;
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
tt_len = primary_if->soft_iface->mtu - tt_query_size;
@@ -1072,15 +1078,16 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
- hlist_for_each_entry_rcu(tt_local_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
if (tt_count == tt_tot)
break;
- if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+ if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
continue;
- memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+ memcpy(tt_change->addr, tt_common_entry->addr,
+ ETH_ALEN);
tt_change->flags = NO_FLAGS;
tt_count++;
@@ -1187,11 +1194,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
- req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+ req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
if (!req_dst_orig_node)
goto out;
- res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+ res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
if (!res_dst_orig_node)
goto out;
@@ -1317,7 +1324,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
req_ttvn = tt_request->ttvn;
- orig_node = get_orig_node(bat_priv, tt_request->src);
+ orig_node = orig_hash_find(bat_priv, tt_request->src);
if (!orig_node)
goto out;
@@ -1497,7 +1504,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
goto out;
/* Check if the client has been logically deleted (but is kept for
* consistency purpose) */
- if (tt_local_entry->flags & TT_CLIENT_PENDING)
+ if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
goto out;
ret = true;
out:
@@ -1720,45 +1727,53 @@ void tt_free(struct bat_priv *bat_priv)
kfree(bat_priv->tt_buff);
}
-/* This function will reset the specified flags from all the entries in
- * the given hash table and will increment num_local_tt for each involved
- * entry */
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
+/* This function will enable or disable the specified flags for all the entries
+ * in the given hash table and returns the number of modified entries */
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
+ bool enable)
{
- int i;
- struct hashtable_t *hash = bat_priv->tt_local_hash;
+ uint32_t i;
+ uint16_t changed_num = 0;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_local_entry *tt_local_entry;
+ struct tt_common_entry *tt_common_entry;
if (!hash)
- return;
+ goto out;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_local_entry, node,
+ hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- if (!(tt_local_entry->flags & flags))
- continue;
- tt_local_entry->flags &= ~flags;
- atomic_inc(&bat_priv->num_local_tt);
+ if (enable) {
+ if ((tt_common_entry->flags & flags) == flags)
+ continue;
+ tt_common_entry->flags |= flags;
+ } else {
+ if (!(tt_common_entry->flags & flags))
+ continue;
+ tt_common_entry->flags &= ~flags;
+ }
+ changed_num++;
}
rcu_read_unlock();
}
-
+out:
+ return changed_num;
}
/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->tt_local_hash;
+ struct tt_common_entry *tt_common_entry;
struct tt_local_entry *tt_local_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
if (!hash)
return;
@@ -1768,16 +1783,19 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
- if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
+ if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
continue;
bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
- "(%pM): pending\n", tt_local_entry->addr);
+ "(%pM): pending\n", tt_common_entry->addr);
atomic_dec(&bat_priv->num_local_tt);
hlist_del_rcu(node);
+ tt_local_entry = container_of(tt_common_entry,
+ struct tt_local_entry,
+ common);
tt_local_entry_free_ref(tt_local_entry);
}
spin_unlock_bh(list_lock);
@@ -1787,7 +1805,11 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
void tt_commit_changes(struct bat_priv *bat_priv)
{
- tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
+ uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
+ TT_CLIENT_NEW, false);
+ /* all the reset entries have now to be effectively counted as local
+ * entries */
+ atomic_add(changed_num, &bat_priv->num_local_tt);
tt_local_purge_pending_clients(bat_priv);
/* Increment the TTVN only once per OGM interval */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ab8d0fe6df5..e9eb043719a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -222,24 +222,24 @@ struct socket_packet {
struct icmp_packet_rr icmp_packet;
};
-struct tt_local_entry {
+struct tt_common_entry {
uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry;
- unsigned long last_seen;
uint16_t flags;
atomic_t refcount;
struct rcu_head rcu;
};
+struct tt_local_entry {
+ struct tt_common_entry common;
+ unsigned long last_seen;
+};
+
struct tt_global_entry {
- uint8_t addr[ETH_ALEN];
- struct hlist_node hash_entry; /* entry in the global table */
+ struct tt_common_entry common;
struct orig_node *orig_node;
uint8_t ttvn;
- uint16_t flags; /* only TT_GLOBAL_ROAM is used */
unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
- atomic_t refcount;
- struct rcu_head rcu;
};
struct tt_change_node {
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f81a6b668b0..cc3b9f2f3b5 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2)
/* hash function to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(const void *data, int size)
+static uint32_t vis_info_choose(const void *data, uint32_t size)
{
const struct vis_info *vis_info = data;
const struct vis_packet *packet;
@@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
struct hlist_head *head;
struct hlist_node *node;
struct vis_info *vis_info, *vis_info_tmp = NULL;
- int index;
+ uint32_t index;
if (!hash)
return NULL;
@@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
- int i, j, ret = 0;
+ uint32_t i;
+ int j, ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
@@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
- int best_tq = -1, i;
+ int best_tq = -1;
+ uint32_t i;
packet = (struct vis_packet *)info->skb_packet->data;
@@ -607,8 +609,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
struct vis_info *info = bat_priv->my_vis_info;
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
- struct tt_local_entry *tt_local_entry;
- int best_tq = -1, i;
+ struct tt_common_entry *tt_common_entry;
+ int best_tq = -1;
+ uint32_t i;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
@@ -669,13 +672,13 @@ next:
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_local_entry, node, head,
+ hlist_for_each_entry_rcu(tt_common_entry, node, head,
hash_entry) {
entry = (struct vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
- memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
+ memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means TT */
packet->entries++;
@@ -696,7 +699,7 @@ unlock:
* held */
static void purge_vis_packets(struct bat_priv *bat_priv)
{
- int i;
+ uint32_t i;
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
@@ -733,7 +736,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct sk_buff *skb;
struct hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
- int i;
+ uint32_t i;
packet = (struct vis_packet *)info->skb_packet->data;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index bfb3dc03c9d..9ec85eb8853 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,7 +6,11 @@ menuconfig BT
tristate "Bluetooth subsystem support"
depends on NET && !S390
depends on RFKILL || !RFKILL
+ select CRC16
select CRYPTO
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ECB
help
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
@@ -15,10 +19,12 @@ menuconfig BT
Bluetooth can be found at <http://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers:
- Bluetooth Core (HCI device and connection manager, scheduler)
+ Bluetooth Core
+ HCI device and connection manager, scheduler
+ SCO audio links
+ L2CAP (Logical Link Control and Adaptation Protocol)
+ SMP (Security Manager Protocol) on LE (Low Energy) links
HCI Device drivers (Interface to the hardware)
- SCO Module (SCO audio links)
- L2CAP Module (Logical Link Control and Adaptation Protocol)
RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
@@ -33,31 +39,6 @@ menuconfig BT
to Bluetooth kernel modules are provided in the BlueZ packages. For
more information, see <http://www.bluez.org/>.
-if BT != n
-
-config BT_L2CAP
- bool "L2CAP protocol support"
- select CRC16
- select CRYPTO
- select CRYPTO_BLKCIPHER
- select CRYPTO_AES
- select CRYPTO_ECB
- help
- L2CAP (Logical Link Control and Adaptation Protocol) provides
- connection oriented and connection-less data transport. L2CAP
- support is required for most Bluetooth applications.
-
- Also included is support for SMP (Security Manager Protocol) which
- is the security layer on top of LE (Low Energy) links.
-
-config BT_SCO
- bool "SCO links support"
- help
- SCO link provides voice transport over Bluetooth. SCO support is
- required for voice applications like Headset and Audio.
-
-endif
-
source "net/bluetooth/rfcomm/Kconfig"
source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 9b67f3d08fa..2dc5a5700f5 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -8,6 +8,5 @@ obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
-bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
-bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
-bluetooth-$(CONFIG_BT_SCO) += sco.o
+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
+ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 062124cd89c..cdcfcabb34a 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -199,15 +199,14 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
BT_DBG("parent %p", parent);
- local_bh_disable();
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
- bh_lock_sock(sk);
+ lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
- bh_unlock_sock(sk);
+ release_sock(sk);
bt_accept_unlink(sk);
continue;
}
@@ -218,14 +217,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
if (newsock)
sock_graft(sk, newsock);
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return sk;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
- local_bh_enable();
return NULL;
}
diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig
index 35158b036d5..71791fc9f6b 100644
--- a/net/bluetooth/bnep/Kconfig
+++ b/net/bluetooth/bnep/Kconfig
@@ -1,6 +1,6 @@
config BT_BNEP
tristate "BNEP protocol support"
- depends on BT && BT_L2CAP
+ depends on BT
select CRC32
help
BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961e..a779ec70332 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -56,8 +56,8 @@
#define VERSION "1.3"
-static int compress_src = 1;
-static int compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
@@ -65,31 +65,24 @@ static DECLARE_RWSEM(bnep_session_sem);
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &bnep_session_list) {
- s = list_entry(p, struct bnep_session, list);
+ list_for_each_entry(s, &bnep_session_list, list)
if (!compare_ether_addr(dst, s->eh.h_source))
return s;
- }
+
return NULL;
}
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +523,7 @@ static int bnep_session(void *arg)
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
@@ -616,9 +610,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
__bnep_link_session(s);
+ __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
@@ -667,17 +663,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct list_head *p;
+ struct bnep_session *s;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each(p, &bnep_session_list) {
- struct bnep_session *s;
+ list_for_each_entry(s, &bnep_session_list, list) {
struct bnep_conninfo ci;
- s = list_entry(p, struct bnep_session, list);
-
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index d6b0382f6f3..94cbf42ce15 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,6 +1,6 @@
config BT_CMTP
tristate "CMTP protocol support"
- depends on BT && BT_L2CAP && ISDN_CAPI
+ depends on BT && ISDN_CAPI
help
CMTP (CAPI Message Transport Protocol) is a transport layer
for CAPI messages. CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9d..6c9c1fd601c 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,28 +53,24 @@ static LIST_HEAD(cmtp_session_list);
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &cmtp_session_list) {
- session = list_entry(p, struct cmtp_session, list);
+ list_for_each_entry(session, &cmtp_session_list, list)
if (!bacmp(bdaddr, &session->bdaddr))
return session;
- }
+
return NULL;
}
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +323,7 @@ static int cmtp_session(void *arg)
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
@@ -376,9 +373,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
__cmtp_link_session(session);
+ __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
@@ -431,19 +430,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct list_head *p;
+ struct cmtp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each(p, &cmtp_session_list) {
- struct cmtp_session *session;
+ list_for_each_entry(session, &cmtp_session_list, list) {
struct cmtp_conninfo ci;
- session = list_entry(p, struct cmtp_session, list);
-
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af7237cd9..3db432473ad 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
BT_DBG("%p", conn);
- if (conn->hdev->hci_ver < 2)
+ if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
bacpy(&cp.bdaddr, &conn->dst);
@@ -275,9 +275,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
}
}
-static void hci_conn_timeout(unsigned long arg)
+static void hci_conn_timeout(struct work_struct *work)
{
- struct hci_conn *conn = (void *) arg;
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ disc_work.work);
struct hci_dev *hdev = conn->hdev;
__u8 reason;
@@ -311,6 +312,42 @@ static void hci_conn_timeout(unsigned long arg)
hci_dev_unlock(hdev);
}
+/* Enter sniff mode */
+static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p mode %d", conn, conn->mode);
+
+ if (test_bit(HCI_RAW, &hdev->flags))
+ return;
+
+ if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+ return;
+
+ if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
+ return;
+
+ if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+ struct hci_cp_sniff_subrate cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_latency = cpu_to_le16(0);
+ cp.min_remote_timeout = cpu_to_le16(0);
+ cp.min_local_timeout = cpu_to_le16(0);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ struct hci_cp_sniff_mode cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+ cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+ cp.attempt = cpu_to_le16(4);
+ cp.timeout = cpu_to_le16(1);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+ }
+}
+
static void hci_conn_idle(unsigned long arg)
{
struct hci_conn *conn = (void *) arg;
@@ -325,12 +362,8 @@ static void hci_conn_auto_accept(unsigned long arg)
struct hci_conn *conn = (void *) arg;
struct hci_dev *hdev = conn->hdev;
- hci_dev_lock(hdev);
-
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
&conn->dst);
-
- hci_dev_unlock(hdev);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -374,7 +407,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
skb_queue_head_init(&conn->data_q);
- setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
+ INIT_LIST_HEAD(&conn->chan_list);;
+
+ INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
(unsigned long) conn);
@@ -383,8 +418,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
hci_dev_hold(hdev);
- tasklet_disable(&hdev->tx_task);
-
hci_conn_hash_add(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
@@ -393,8 +426,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
hci_conn_init_sysfs(conn);
- tasklet_enable(&hdev->tx_task);
-
return conn;
}
@@ -406,7 +437,7 @@ int hci_conn_del(struct hci_conn *conn)
del_timer(&conn->idle_timer);
- del_timer(&conn->disc_timer);
+ cancel_delayed_work_sync(&conn->disc_work);
del_timer(&conn->auto_accept_timer);
@@ -430,14 +461,13 @@ int hci_conn_del(struct hci_conn *conn)
}
}
- tasklet_disable(&hdev->tx_task);
+
+ hci_chan_list_flush(conn);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- tasklet_enable(&hdev->tx_task);
-
skb_queue_purge(&conn->data_q);
hci_conn_put_device(conn);
@@ -453,16 +483,13 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
- read_lock_bh(&hci_dev_list_lock);
-
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
continue;
@@ -485,7 +512,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
if (hdev)
hdev = hci_dev_hold(hdev);
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
return hdev;
}
EXPORT_SYMBOL(hci_get_route);
@@ -673,7 +700,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
goto encrypt;
auth:
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))
@@ -766,60 +793,18 @@ timer:
jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
-/* Enter sniff mode */
-void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- if (test_bit(HCI_RAW, &hdev->flags))
- return;
-
- if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
- return;
-
- if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
- return;
-
- if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
- struct hci_cp_sniff_subrate cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
- hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
- }
-
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
- struct hci_cp_sniff_mode cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
- cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
- hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
- }
-}
-
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
+ struct hci_conn *c;
BT_DBG("hdev %s", hdev->name);
- p = h->list.next;
- while (p != &h->list) {
- struct hci_conn *c;
-
- c = list_entry(p, struct hci_conn, list);
- p = p->next;
-
+ list_for_each_entry_rcu(c, &h->list, list) {
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, 0x16);
+ hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
@@ -855,10 +840,10 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
+ register struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
- struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -881,11 +866,8 @@ int hci_get_conn_list(void __user *arg)
ci = cl->conn_info;
- hci_dev_lock_bh(hdev);
- list_for_each(p, &hdev->conn_hash.list) {
- register struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -895,7 +877,7 @@ int hci_get_conn_list(void __user *arg)
if (++n >= req.conn_num)
break;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
@@ -919,7 +901,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
@@ -929,7 +911,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
ci.state = conn->state;
ci.link_mode = conn->link_mode;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
@@ -945,14 +927,60 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
if (conn)
req.type = conn->auth_type;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_chan *chan;
+
+ BT_DBG("%s conn %p", hdev->name, conn);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ chan->conn = conn;
+ skb_queue_head_init(&chan->data_q);
+
+ list_add_rcu(&chan->list, &conn->chan_list);
+
+ return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+
+ list_del_rcu(&chan->list);
+
+ synchronize_rcu();
+
+ skb_queue_purge(&chan->data_q);
+ kfree(chan);
+
+ return 0;
+}
+
+void hci_chan_list_flush(struct hci_conn *conn)
+{
+ struct hci_chan *chan;
+
+ BT_DBG("conn %p", conn);
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list)
+ hci_chan_del(chan);
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae33ae3..845da3ee56a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -54,11 +55,11 @@
#define AUTO_OFF_TIMEOUT 2000
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
+int enable_hs;
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
/* HCI device list */
LIST_HEAD(hci_dev_list);
@@ -68,10 +69,6 @@ DEFINE_RWLOCK(hci_dev_list_lock);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
-/* HCI protocols */
-#define HCI_MAX_PROTO 2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
/* HCI notifiers list */
static ATOMIC_NOTIFIER_HEAD(hci_notifier);
@@ -190,33 +187,20 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_dev *hdev)
{
struct hci_cp_delete_stored_link_key cp;
- struct sk_buff *skb;
__le16 param;
__u8 flt_type;
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Driver initialization */
-
- /* Special commands */
- while ((skb = skb_dequeue(&hdev->driver_init))) {
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
-
- skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
- }
- skb_queue_purge(&hdev->driver_init);
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
/* Mandatory initialization */
/* Reset */
if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+ set_bit(HCI_RESET, &hdev->flags);
+ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
}
/* Read Local Supported Features */
@@ -228,18 +212,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-#if 0
- /* Host buffer size */
- {
- struct hci_cp_host_buffer_size cp;
- cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
- cp.sco_mtu = HCI_MAX_SCO_SIZE;
- cp.acl_max_pkt = cpu_to_le16(0xffff);
- cp.sco_max_pkt = cpu_to_le16(0xffff);
- hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
- }
-#endif
-
/* Read BD Address */
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
@@ -267,6 +239,51 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
}
+static void amp_init(struct hci_dev *hdev)
+{
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+
+ /* Reset */
+ hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+
+ /* Read Local Version */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+}
+
+static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("%s %ld", hdev->name, opt);
+
+ /* Driver initialization */
+
+ /* Special commands */
+ while ((skb = skb_dequeue(&hdev->driver_init))) {
+ bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+ skb->dev = (void *) hdev;
+
+ skb_queue_tail(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ }
+ skb_queue_purge(&hdev->driver_init);
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ bredr_init(hdev);
+ break;
+
+ case HCI_AMP:
+ amp_init(hdev);
+ break;
+
+ default:
+ BT_ERR("Unknown device type %d", hdev->dev_type);
+ break;
+ }
+
+}
+
static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
{
BT_DBG("%s", hdev->name);
@@ -319,8 +336,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%d", index);
@@ -328,8 +344,7 @@ struct hci_dev *hci_dev_get(int index)
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -445,14 +460,14 @@ int hci_inquiry(void __user *arg)
if (!hdev)
return -ENODEV;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
@@ -474,9 +489,9 @@ int hci_inquiry(void __user *arg)
goto done;
}
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
@@ -523,8 +538,9 @@ int hci_dev_open(__u16 dev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- /* Treat all non BR/EDR controllers as raw devices for now */
- if (hdev->dev_type != HCI_BREDR)
+ /* Treat all non BR/EDR controllers as raw devices if
+ enable_hs is not set */
+ if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
@@ -551,13 +567,16 @@ int hci_dev_open(__u16 dev)
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags))
- mgmt_powered(hdev->id, 1);
+ if (!test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 1);
+ hci_dev_unlock(hdev);
+ }
} else {
/* Init failed, cleanup */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
- tasklet_kill(&hdev->cmd_task);
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->cmd_work);
+ flush_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -593,14 +612,25 @@ static int hci_dev_do_close(struct hci_dev *hdev)
return 0;
}
- /* Kill RX and TX tasks */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
+
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
- hci_dev_lock_bh(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ cancel_delayed_work(&hdev->service_cache);
+
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
@@ -613,12 +643,12 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}
- /* Kill cmd task */
- tasklet_kill(&hdev->cmd_task);
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -636,7 +666,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
* and no tasks are scheduled. */
hdev->close(hdev);
- mgmt_powered(hdev->id, 0);
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
/* Clear flags */
hdev->flags = 0;
@@ -670,7 +702,6 @@ int hci_dev_reset(__u16 dev)
return -ENODEV;
hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
@@ -679,10 +710,10 @@ int hci_dev_reset(__u16 dev)
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (hdev->flush)
hdev->flush(hdev);
@@ -695,7 +726,6 @@ int hci_dev_reset(__u16 dev)
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
- tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
@@ -794,9 +824,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
int hci_get_dev_list(void __user *arg)
{
+ struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -814,13 +844,10 @@ int hci_get_dev_list(void __user *arg)
dr = dl->dev_req;
- read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *hdev;
-
- hdev = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(hdev);
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -831,7 +858,7 @@ int hci_get_dev_list(void __user *arg)
if (++n >= dev_num)
break;
}
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
@@ -855,7 +882,8 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +940,7 @@ struct hci_dev *hci_alloc_dev(void)
if (!hdev)
return NULL;
+ hci_init_sysfs(hdev);
skb_queue_head_init(&hdev->driver_init);
return hdev;
@@ -938,39 +967,41 @@ static void hci_power_on(struct work_struct *work)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
- mod_timer(&hdev->off_timer,
- jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ schedule_delayed_work(&hdev->power_off,
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_added(hdev->id);
+ mgmt_index_added(hdev);
}
static void hci_power_off(struct work_struct *work)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ power_off.work);
BT_DBG("%s", hdev->name);
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
hci_dev_close(hdev->id);
}
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) data;
+ struct hci_dev *hdev;
+ u8 scan = SCAN_PAGE;
+
+ hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ hci_dev_lock(hdev);
- queue_work(hdev->workqueue, &hdev->power_off);
-}
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-void hci_del_off_timer(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
+ hdev->discov_timeout = 0;
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
- del_timer(&hdev->off_timer);
+ hci_dev_unlock(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1038,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
-
- k = list_entry(p, struct link_key, list);
+ struct link_key *k;
+ list_for_each_entry(k, &hdev->link_keys, list)
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
- }
return NULL;
}
@@ -1138,7 +1164,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
- mgmt_new_key(hdev->id, key, persistent);
+ mgmt_new_link_key(hdev, key, persistent);
if (!persistent) {
list_del(&key->list);
@@ -1181,7 +1207,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
memcpy(id->rand, rand, sizeof(id->rand));
if (new_key)
- mgmt_new_key(hdev->id, key, old_key_type);
+ mgmt_new_link_key(hdev, key, old_key_type);
return 0;
}
@@ -1209,7 +1235,7 @@ static void hci_cmd_timer(unsigned long arg)
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
@@ -1279,16 +1305,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
+ struct bdaddr_list *b;
+ list_for_each_entry(b, &hdev->blacklist, list)
if (bacmp(bdaddr, &b->bdaddr) == 0)
return b;
- }
return NULL;
}
@@ -1327,31 +1348,30 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
list_add(&entry->list, &hdev->blacklist);
- return mgmt_device_blocked(hdev->id, bdaddr);
+ return mgmt_device_blocked(hdev, bdaddr);
}
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+ if (bacmp(bdaddr, BDADDR_ANY) == 0)
return hci_blacklist_clear(hdev);
- }
entry = hci_blacklist_lookup(hdev, bdaddr);
- if (!entry) {
+ if (!entry)
return -ENOENT;
- }
list_del(&entry->list);
kfree(entry);
- return mgmt_device_unblocked(hdev->id, bdaddr);
+ return mgmt_device_unblocked(hdev, bdaddr);
}
-static void hci_clear_adv_cache(unsigned long arg)
+static void hci_clear_adv_cache(struct work_struct *work)
{
- struct hci_dev *hdev = (void *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ adv_work.work);
hci_dev_lock(hdev);
@@ -1425,7 +1445,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
- int i, id = 0;
+ int i, id, error;
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
hdev->bus, hdev->owner);
@@ -1433,7 +1453,12 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
- write_lock_bh(&hci_dev_list_lock);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
+ write_lock(&hci_dev_list_lock);
/* Find first available device id */
list_for_each(p, &hci_dev_list) {
@@ -1444,12 +1469,13 @@ int hci_register_dev(struct hci_dev *hdev)
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
- list_add(&hdev->list, head);
+ list_add_tail(&hdev->list, head);
atomic_set(&hdev->refcnt, 1);
- spin_lock_init(&hdev->lock);
+ mutex_init(&hdev->lock);
hdev->flags = 0;
+ hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1459,9 +1485,10 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
- tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
- tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+ INIT_WORK(&hdev->rx_work, hci_rx_work);
+ INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+ INIT_WORK(&hdev->tx_work, hci_tx_work);
+
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
@@ -1479,6 +1506,8 @@ int hci_register_dev(struct hci_dev *hdev)
hci_conn_hash_init(hdev);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
@@ -1488,24 +1517,29 @@ int hci_register_dev(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
- setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
- (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_WORK(&hdev->power_off, hci_power_off);
- setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
- hdev->workqueue = create_singlethread_workqueue(hdev->name);
- if (!hdev->workqueue)
- goto nomem;
+ hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!hdev->workqueue) {
+ error = -ENOMEM;
+ goto err;
+ }
- hci_register_sysfs(hdev);
+ error = hci_add_sysfs(hdev);
+ if (error < 0)
+ goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1518,31 +1552,33 @@ int hci_register_dev(struct hci_dev *hdev)
set_bit(HCI_AUTO_OFF, &hdev->flags);
set_bit(HCI_SETUP, &hdev->flags);
- queue_work(hdev->workqueue, &hdev->power_on);
+ schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
return id;
-nomem:
- write_lock_bh(&hci_dev_list_lock);
+err_wqueue:
+ destroy_workqueue(hdev->workqueue);
+err:
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
- return -ENOMEM;
+ return error;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
{
int i;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock_bh(&hci_dev_list_lock);
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
hci_dev_do_close(hdev);
@@ -1550,8 +1586,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_removed(hdev->id);
+ !test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
+ mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
+ }
+
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
hci_notify(hdev, HCI_DEV_UNREG);
@@ -1560,24 +1603,21 @@ int hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- hci_unregister_sysfs(hdev);
+ hci_del_sysfs(hdev);
- hci_del_off_timer(hdev);
- del_timer(&hdev->adv_timer);
+ cancel_delayed_work_sync(&hdev->adv_work);
destroy_workqueue(hdev->workqueue);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
__hci_dev_put(hdev);
-
- return 0;
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1613,9 +1653,8 @@ int hci_recv_frame(struct sk_buff *skb)
/* Time stamp */
__net_timestamp(skb);
- /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- tasklet_schedule(&hdev->rx_task);
+ queue_work(hdev->workqueue, &hdev->rx_work);
return 0;
}
@@ -1787,59 +1826,13 @@ EXPORT_SYMBOL(hci_recv_stream_fragment);
/* ---- Interface to upper protocols ---- */
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (!hci_proto[hp->id])
- hci_proto[hp->id] = hp;
- else
- err = -EEXIST;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (hci_proto[hp->id])
- hci_proto[hp->id] = NULL;
- else
- err = -ENOENT;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
@@ -1849,9 +1842,9 @@ int hci_unregister_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
@@ -1912,7 +1905,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
hdev->init_last_cmd = opcode;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
@@ -1948,23 +1941,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
- skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
-
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(&conn->data_q, skb);
+ skb_queue_tail(queue, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1960,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&conn->data_q.lock);
+ spin_lock(&queue->lock);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
flags &= ~ACL_START;
flags |= ACL_CONT;
@@ -1987,13 +1975,27 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&conn->data_q.lock);
+ spin_unlock(&queue->lock);
}
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+ skb->dev = (void *) hdev;
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
+ hci_queue_acl(conn, &chan->data_q, skb, flags);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_acl);
@@ -2016,7 +2018,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_sco);
@@ -2026,16 +2028,15 @@ EXPORT_SYMBOL(hci_send_sco);
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
+ struct hci_conn *conn = NULL, *c;
int num = 0, min = ~0;
- struct list_head *p;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -2053,6 +2054,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
break;
}
+ rcu_read_unlock();
+
if (conn) {
int cnt, q;
@@ -2084,27 +2087,159 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
+ struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
+ rcu_read_lock();
+
/* Kill stalled connections */
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
}
+
+ rcu_read_unlock();
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_chan *chan = NULL;
+ int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
+ int cnt, q, conn_num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *tmp;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ conn_num++;
+
+ list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&tmp->data_q))
+ continue;
+
+ skb = skb_peek(&tmp->data_q);
+ if (skb->priority < cur_prio)
+ continue;
+
+ if (skb->priority > cur_prio) {
+ num = 0;
+ min = ~0;
+ cur_prio = skb->priority;
+ }
+
+ num++;
+
+ if (conn->sent < min) {
+ min = conn->sent;
+ chan = tmp;
+ }
+ }
+
+ if (hci_conn_num(hdev, type) == conn_num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ if (!chan)
+ return NULL;
+
+ switch (chan->conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
+ *quote = q ? q : 1;
+ BT_DBG("chan %p quote %d", chan, *quote);
+ return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn;
+ int num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *chan;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (chan->sent) {
+ chan->sent = 0;
+ continue;
+ }
+
+ if (skb_queue_empty(&chan->data_q))
+ continue;
+
+ skb = skb_peek(&chan->data_q);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ continue;
+
+ skb->priority = HCI_PRIO_MAX - 1;
+
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ skb->priority);
+ }
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+ struct hci_chan *chan;
struct sk_buff *skb;
int quote;
+ unsigned int cnt;
BT_DBG("%s", hdev->name);
@@ -2118,19 +2253,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
hci_link_tx_to(hdev, ACL_LINK);
}
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ cnt = hdev->acl_cnt;
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ while (hdev->acl_cnt &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
+ if (cnt != hdev->acl_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
}
/* Schedule SCO */
@@ -2182,9 +2333,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
static inline void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_conn *conn;
+ struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt;
+ int quote, cnt, tmp;
BT_DBG("%s", hdev->name);
@@ -2200,30 +2351,42 @@ static inline void hci_sched_le(struct hci_dev *hdev)
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ tmp = cnt;
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
+
+ if (cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
}
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
struct sk_buff *skb;
- read_lock(&hci_task_lock);
-
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
@@ -2240,8 +2403,6 @@ static void hci_tx_task(unsigned long arg)
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
-
- read_unlock(&hci_task_lock);
}
/* ----- HCI RX task (incoming data processing) ----- */
@@ -2268,16 +2429,11 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->recv_acldata) {
- hp->recv_acldata(conn, skb, flags);
- return;
- }
+ l2cap_recv_acldata(conn, skb, flags);
+ return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
@@ -2306,14 +2462,9 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->recv_scodata) {
- hp->recv_scodata(conn, skb);
- return;
- }
+ sco_recv_scodata(conn, skb);
+ return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
@@ -2322,15 +2473,13 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb);
}
-static void hci_rx_task(unsigned long arg)
+static void hci_rx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
- read_lock(&hci_task_lock);
-
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
@@ -2355,6 +2504,7 @@ static void hci_rx_task(unsigned long arg)
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
+ BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
@@ -2373,13 +2523,11 @@ static void hci_rx_task(unsigned long arg)
break;
}
}
-
- read_unlock(&hci_task_lock);
}
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2403,7 +2551,38 @@ static void hci_cmd_task(unsigned long arg)
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
}
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EINPROGRESS;
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = length;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EPERM;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0..4221bd256bd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,7 +45,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-static int enable_le;
+static bool enable_le;
/* Handle HCI Event packets */
@@ -55,12 +55,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ clear_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,10 +82,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_conn_check_pending(hdev);
}
@@ -192,6 +194,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_RESET, &hdev->flags);
hci_req_complete(hdev, HCI_OP_RESET, status);
+
+ hdev->dev_flags = 0;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -205,13 +209,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_set_local_name_complete(hdev->id, sent, status);
+ mgmt_set_local_name_complete(hdev, sent, status);
- if (status)
- return;
+ if (status == 0)
+ memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
- memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +280,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ __u8 param, status = *((__u8 *) skb->data);
+ int old_pscan, old_iscan;
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +290,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
- if (!status) {
- __u8 param = *((__u8 *) sent);
- int old_pscan, old_iscan;
-
- old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
- old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+ param = *((__u8 *) sent);
- if (param & SCAN_INQUIRY) {
- set_bit(HCI_ISCAN, &hdev->flags);
- if (!old_iscan)
- mgmt_discoverable(hdev->id, 1);
- } else if (old_iscan)
- mgmt_discoverable(hdev->id, 0);
+ hci_dev_lock(hdev);
- if (param & SCAN_PAGE) {
- set_bit(HCI_PSCAN, &hdev->flags);
- if (!old_pscan)
- mgmt_connectable(hdev->id, 1);
- } else if (old_pscan)
- mgmt_connectable(hdev->id, 0);
+ if (status != 0) {
+ mgmt_write_scan_failed(hdev, param, status);
+ hdev->discov_timeout = 0;
+ goto done;
}
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev, 1);
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else if (old_iscan)
+ mgmt_discoverable(hdev, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev, 0);
+
+done:
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
@@ -359,11 +378,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -390,11 +406,8 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -481,7 +494,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
* any event mask for pre 1.2 devices */
- if (hdev->lmp_ver <= 1)
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
events[4] |= 0x01; /* Flow Specification Complete */
@@ -543,9 +556,12 @@ static void hci_set_le_support(struct hci_dev *hdev)
static void hci_setup(struct hci_dev *hdev)
{
+ if (hdev->dev_type != HCI_BREDR)
+ return;
+
hci_setup_event_mask(hdev);
- if (hdev->lmp_ver > 1)
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
@@ -700,6 +716,21 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
}
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->flow_ctl_mode = rp->mode;
+
+ hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
+}
+
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -739,6 +770,28 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
}
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->block_len = __le16_to_cpu(rp->block_len);
+ hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+
+ hdev->block_cnt = hdev->num_blocks;
+
+ BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+ hdev->block_cnt, hdev->block_len);
+
+ hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
+}
+
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -748,6 +801,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->amp_status = rp->amp_status;
+ hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+ hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+ hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+ hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+ hdev->amp_type = rp->amp_type;
+ hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+ hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+ hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+ hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -804,19 +881,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+ mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status != 0)
- return;
+ goto unlock;
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
if (!cp)
- return;
+ goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
+
+unlock:
+ hci_dev_unlock(hdev);
}
static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +907,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
+
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -855,9 +942,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +958,44 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +1005,17 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
- mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
rp->randomizer, rp->status);
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -898,14 +1033,28 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
- if (cp->enable == 0x01) {
- del_timer(&hdev->adv_timer);
+ switch (cp->enable) {
+ case LE_SCANNING_ENABLED:
+ set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ cancel_delayed_work_sync(&hdev->adv_work);
hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
- } else if (cp->enable == 0x00) {
- mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+ break;
+
+ case LE_SCANNING_DISABLED:
+ clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ cancel_delayed_work_sync(&hdev->adv_work);
+ queue_delayed_work(hdev->workqueue, &hdev->adv_work,
+ jiffies + ADV_CLEAR_TIMEOUT);
+ break;
+
+ default:
+ BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
+ break;
}
}
@@ -955,12 +1104,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
}
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
+ set_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 1);
+ hci_dev_unlock(hdev);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1494,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, status);
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
}
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1519,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1377,8 +1529,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
data.rssi = 0x00;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
- NULL);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, 0, NULL);
}
hci_dev_unlock(hdev);
@@ -1412,7 +1564,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type);
} else
conn->state = BT_CONNECTED;
@@ -1434,7 +1587,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
}
/* Set packet type for incoming connection */
- if (!conn->out && hdev->hci_ver < 3) {
+ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1444,7 +1597,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
@@ -1531,7 +1685,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x0f;
+ cp.reason = HCI_ERROR_REJ_BAD_ADDR;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
@@ -1543,24 +1697,27 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, ev->status);
- if (ev->status) {
- mgmt_disconnect_failed(hdev->id);
- return;
- }
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
- conn->state = BT_CLOSED;
+ if (ev->status == 0)
+ conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK)
- mgmt_disconnected(hdev->id, &conn->dst);
+ if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+ if (ev->status != 0)
+ mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+ else
+ mgmt_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type);
+ }
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
+ if (ev->status == 0) {
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -1588,7 +1745,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->sec_level = conn->pending_sec_level;
}
} else {
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1800,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_lock(hdev);
if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
- mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
+ mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
@@ -1894,10 +2051,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_read_bd_addr(hdev, skb);
break;
+ case HCI_OP_READ_DATA_BLOCK_SIZE:
+ hci_cc_read_data_block_size(hdev, skb);
+ break;
+
case HCI_OP_WRITE_CA_TIMEOUT:
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_READ_FLOW_CONTROL_MODE:
+ hci_cc_read_flow_control_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_INFO:
+ hci_cc_read_local_amp_info(hdev, skb);
+ break;
+
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
@@ -1942,6 +2111,17 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
+ case HCI_OP_USER_PASSKEY_REPLY:
+ hci_cc_user_passkey_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_PASSKEY_NEG_REPLY:
+ hci_cc_user_passkey_neg_reply(hdev, skb);
+
+ case HCI_OP_LE_SET_SCAN_PARAM:
+ hci_cc_le_set_scan_param(hdev, skb);
+ break;
+
case HCI_OP_LE_SET_SCAN_ENABLE:
hci_cc_le_set_scan_enable(hdev, skb);
break;
@@ -1969,7 +2149,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
@@ -2029,7 +2209,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_OP_DISCONNECT:
if (ev->status != 0)
- mgmt_disconnect_failed(hdev->id);
+ mgmt_disconnect_failed(hdev, NULL, ev->status);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2051,7 +2231,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
@@ -2084,56 +2264,68 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
- __le16 *ptr;
int i;
skb_pull(skb, sizeof(*ev));
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
+
if (skb->len < ev->num_hndl * 4) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
- tasklet_disable(&hdev->tx_task);
-
- for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_pkts_info *info = &ev->handles[i];
struct hci_conn *conn;
__u16 handle, count;
- handle = get_unaligned_le16(ptr++);
- count = get_unaligned_le16(ptr++);
+ handle = __le16_to_cpu(info->handle);
+ count = __le16_to_cpu(info->count);
conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (conn) {
- conn->sent -= count;
-
- if (conn->type == ACL_LINK) {
+ if (!conn)
+ continue;
+
+ conn->sent -= count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ break;
+
+ case LE_LINK:
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
- } else if (conn->type == LE_LINK) {
- if (hdev->le_pkts) {
- hdev->le_cnt += count;
- if (hdev->le_cnt > hdev->le_pkts)
- hdev->le_cnt = hdev->le_pkts;
- } else {
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- }
- } else {
- hdev->sco_cnt += count;
- if (hdev->sco_cnt > hdev->sco_pkts)
- hdev->sco_cnt = hdev->sco_pkts;
}
+ break;
+
+ case SCO_LINK:
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
+ hdev->sco_cnt = hdev->sco_pkts;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
}
}
- tasklet_schedule(&hdev->tx_task);
-
- tasklet_enable(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2194,7 +2386,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
else
secure = 0;
- mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
+ mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
}
unlock:
@@ -2363,12 +2555,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
@@ -2383,7 +2569,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
NULL);
}
@@ -2400,7 +2586,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
NULL);
}
@@ -2531,12 +2717,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2729,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x01;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
- info->rssi, info->data);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi, info->data);
}
hci_dev_unlock(hdev);
@@ -2614,7 +2794,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x18; /* Pairing not allowed */
+ cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
@@ -2706,13 +2886,28 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
confirm:
- mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
confirm_hint);
unlock:
hci_dev_unlock(hdev);
}
+static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_passkey_request(hdev, &ev->bdaddr);
+
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@@ -2732,7 +2927,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
hci_conn_put(conn);
@@ -2813,14 +3008,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status) {
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -3051,6 +3247,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_user_confirm_request_evt(hdev, skb);
break;
+ case HCI_EV_USER_PASSKEY_REQUEST:
+ hci_user_passkey_request_evt(hdev, skb);
+ break;
+
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
@@ -3104,5 +3304,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
kfree_skb(skb);
}
-module_param(enable_le, bool, 0444);
+module_param(enable_le, bool, 0644);
MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f6afe3d76a6..6d94616af31 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -49,7 +49,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-static int enable_mgmt;
+static bool enable_mgmt;
/* ----- HCI socket interface ----- */
@@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_blacklist_add(hdev, &bdaddr);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return err;
}
@@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_blacklist_del(hdev, &bdaddr);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return err;
}
@@ -343,8 +343,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
return -EINVAL;
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
- return -EINVAL;
+ if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+ if (!enable_mgmt)
+ return -EINVAL;
+ set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
+ }
lock_sock(sk);
@@ -535,10 +538,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
} else {
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
} else {
if (!capable(CAP_NET_RAW)) {
@@ -547,7 +550,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
err = len;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 661b461cf0b..52109561423 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -89,11 +89,35 @@ static struct device_type bt_link = {
.release = bt_link_release,
};
-static void add_conn(struct work_struct *work)
+/*
+ * The rfcomm tty device will possibly retain even when conn
+ * is down, and sysfs doesn't support move zombie device,
+ * so we should move the device before conn device is destroyed.
+ */
+static int __match_tty(struct device *dev, void *data)
+{
+ return !strncmp(dev_name(dev), "rfcomm", 6);
+}
+
+void hci_conn_init_sysfs(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ device_initialize(&conn->dev);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
struct hci_dev *hdev = conn->hdev;
+ BT_DBG("conn %p", conn);
+
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
dev_set_drvdata(&conn->dev, conn);
@@ -106,19 +130,8 @@ static void add_conn(struct work_struct *work)
hci_dev_hold(hdev);
}
-/*
- * The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- */
-static int __match_tty(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "rfcomm", 6);
-}
-
-static void del_conn(struct work_struct *work)
+void hci_conn_del_sysfs(struct hci_conn *conn)
{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
struct hci_dev *hdev = conn->hdev;
if (!device_is_registered(&conn->dev))
@@ -140,36 +153,6 @@ static void del_conn(struct work_struct *work)
hci_dev_put(hdev);
}
-void hci_conn_init_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
- conn->dev.parent = &hdev->dev;
-
- device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work_add, add_conn);
- INIT_WORK(&conn->work_del, del_conn);
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- queue_work(conn->hdev->workqueue, &conn->work_add);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- queue_work(conn->hdev->workqueue, &conn->work_del);
-}
-
static inline char *host_bustostr(int bus)
{
switch (bus) {
@@ -403,7 +386,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
for (e = cache->list; e; e = e->next) {
struct inquiry_data *data = &e->data;
@@ -416,7 +399,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
data->rssi, data->ssp_mode, e->timestamp);
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -436,19 +419,14 @@ static const struct file_operations inquiry_cache_fops = {
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bdaddr_list *b;
- hci_dev_lock_bh(hdev);
-
- list_for_each(l, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(l, struct bdaddr_list, list);
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->blacklist, list)
seq_printf(f, "%s\n", batostr(&b->bdaddr));
- }
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -485,19 +463,14 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
-
- hci_dev_lock_bh(hdev);
-
- list_for_each(l, &hdev->uuids) {
- struct bt_uuid *uuid;
+ struct bt_uuid *uuid;
- uuid = list_entry(l, struct bt_uuid, list);
+ hci_dev_lock(hdev);
+ list_for_each_entry(uuid, &hdev->uuids, list)
print_bt_uuid(f, uuid->uuid);
- }
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -518,11 +491,11 @@ static int auto_accept_delay_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->auto_accept_delay = val;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -531,11 +504,11 @@ static int auto_accept_delay_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
*val = hdev->auto_accept_delay;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -543,22 +516,28 @@ static int auto_accept_delay_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n");
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+ struct device *dev = &hdev->dev;
+
+ dev->type = &bt_host;
+ dev->class = bt_class;
+
+ dev_set_drvdata(dev, hdev);
+ device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->type = &bt_host;
- dev->class = bt_class;
dev->parent = hdev->parent;
-
dev_set_name(dev, "%s", hdev->name);
- dev_set_drvdata(dev, hdev);
-
- err = device_register(dev);
+ err = device_add(dev);
if (err < 0)
return err;
@@ -582,7 +561,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
return 0;
}
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 86a91543172..4deaca78e91 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
+ depends on BT && INPUT && HID_SUPPORT
select HID
help
HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 075a3e920ca..d478be11d56 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
{
struct hidp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &hidp_session_list) {
- session = list_entry(p, struct hidp_session, list);
+ list_for_each_entry(session, &hidp_session_list, list) {
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
+
return NULL;
}
static void __hidp_link_session(struct hidp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &hidp_session_list);
-
- hci_conn_hold_device(session->conn);
}
static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session)
hci_conn_put_device(session->conn);
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
BT_DBG("session %p data %p size %d", session, data, size);
+ if (atomic_read(&session->terminate))
+ return -EIO;
+
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid,
struct sk_buff *skb;
size_t len;
int numbered_reports = hid->report_enum[report_type].numbered;
+ int ret;
switch (report_type) {
case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
session->waiting_report_number = numbered_reports ? report_number : -1;
set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
data[0] = report_number;
- if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
- goto err_eio;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
+ if (ret)
+ goto err;
/* Wait for the return of the report. The returned report
gets put in session->report_return. */
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid,
5*HZ);
if (res == 0) {
/* timeout */
- goto err_eio;
+ ret = -EIO;
+ goto err;
}
if (res < 0) {
/* signal */
- goto err_restartsys;
+ ret = -ERESTARTSYS;
+ goto err;
}
}
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid,
return len;
-err_restartsys:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return -ERESTARTSYS;
-err_eio:
+err:
clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
mutex_unlock(&session->report_mutex);
- return -EIO;
+ return ret;
}
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
/* Set up our wait, and send the report request to the device. */
set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count)) {
- ret = -ENOMEM;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
+ count);
+ if (ret)
goto err;
- }
/* Wait for the ACK from the device. */
while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session,
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
+
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session,
}
/* Wake up the waiting thread. */
- if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_intr_transmit(struct hidp_session *session)
{
struct sk_buff *skb;
BT_DBG("session %p", session);
- while ((skb = skb_dequeue(&session->ctrl_transmit))) {
- if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->ctrl_transmit, skb);
+ while ((skb = skb_dequeue(&session->intr_transmit))) {
+ if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->intr_transmit, skb);
break;
}
hidp_set_timer(session);
kfree_skb(skb);
}
+}
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
+static void hidp_process_ctrl_transmit(struct hidp_session *session)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("session %p", session);
+
+ while ((skb = skb_dequeue(&session->ctrl_transmit))) {
+ if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->ctrl_transmit, skb);
break;
}
@@ -700,6 +701,7 @@ static int hidp_session(void *arg)
BT_DBG("session %p", session);
+ __module_get(THIS_MODULE);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
@@ -714,23 +716,25 @@ static int hidp_session(void *arg)
intr_sk->sk_state != BT_CONNECTED)
break;
- while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_ctrl_frame(session, skb);
+ hidp_recv_intr_frame(session, skb);
else
kfree_skb(skb);
}
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ hidp_process_intr_transmit(session);
+
+ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_intr_frame(session, skb);
+ hidp_recv_ctrl_frame(session, skb);
else
kfree_skb(skb);
}
- hidp_process_transmit(session);
+ hidp_process_ctrl_transmit(session);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@ static int hidp_session(void *arg)
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+
down_write(&hidp_session_sem);
hidp_del_timer(session);
@@ -772,34 +780,37 @@ static int hidp_session(void *arg)
kfree(session->rd_data);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
-static struct device *hidp_get_device(struct hidp_session *session)
+static struct hci_conn *hidp_get_connection(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct device *device = NULL;
+ struct hci_conn *conn;
struct hci_dev *hdev;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
- session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (session->conn)
- device = &session->conn->dev;
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (conn)
+ hci_conn_hold_device(conn);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
- return device;
+ return conn;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int err, i;
+ int i;
input = input_allocate_device();
if (!input)
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session,
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = hidp_get_device(session);
+ input->dev.parent = &session->conn->dev;
input->event = hidp_input_event;
- err = input_register_device(input);
- if (err < 0) {
- input_free_device(input);
- session->input = NULL;
- return err;
- }
-
return 0;
}
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session,
strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
- hid->dev.parent = hidp_get_device(session);
+ hid->dev.parent = &session->conn->dev;
hid->ll_driver = &hidp_hid_driver;
hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session)
- return -ENOMEM;
-
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
down_write(&hidp_session_sem);
s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
if (s && s->state == BT_CONNECTED) {
- err = -EEXIST;
- goto failed;
+ up_write(&hidp_session_sem);
+ return -EEXIST;
+ }
+
+ session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+ if (!session) {
+ up_write(&hidp_session_sem);
+ return -ENOMEM;
}
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->intr_sock = intr_sock;
session->state = BT_CONNECTED;
+ session->conn = hidp_get_connection(session);
+ if (!session->conn) {
+ err = -ENOTCONN;
+ goto failed;
+ }
+
setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
+ __hidp_link_session(session);
+
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
- if (err && err != -ENODEV)
+ if (err)
goto purge;
}
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
goto purge;
}
- __hidp_link_session(session);
-
hidp_set_timer(session);
if (session->hid) {
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
!session->waiting_for_startup);
}
- err = hid_add_device(session->hid);
+ if (session->hid)
+ err = hid_add_device(session->hid);
+ else
+ err = input_register_device(session->input);
+
if (err < 0) {
atomic_inc(&session->terminate);
wake_up_process(session->task);
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
unlink:
hidp_del_timer(session);
- __hidp_unlink_session(session);
-
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
@@ -1093,6 +1107,8 @@ unlink:
session->rd_data = NULL;
purge:
+ __hidp_unlink_session(session);
+
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req)
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct list_head *p;
+ struct hidp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each(p, &hidp_session_list) {
- struct hidp_session *session;
+ list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
- session = list_entry(p, struct hidp_session, list);
-
__hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1eecf..aa78d8c4b93 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3,6 +3,7 @@
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -56,10 +57,10 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
-int disable_ertm;
+bool disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
@@ -76,38 +77,38 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
/* ---- L2CAP channels ---- */
-static inline void chan_hold(struct l2cap_chan *c)
-{
- atomic_inc(&c->refcnt);
-}
-
-static inline void chan_put(struct l2cap_chan *c)
-{
- if (atomic_dec_and_test(&c->refcnt))
- kfree(c);
-}
-
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->dcid == cid)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->dcid == cid) {
+ r = c;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+ return r;
}
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->scid == cid)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->scid == cid) {
+ r = c;
+ break;
+ }
}
- return NULL;
+
+ rcu_read_unlock();
+ return r;
}
/* Find channel with given SCID.
@@ -116,34 +117,36 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
{
struct l2cap_chan *c;
- read_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
+ lock_sock(c->sk);
return c;
}
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->ident == ident)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->ident == ident) {
+ r = c;
+ break;
+ }
}
- return NULL;
+
+ rcu_read_unlock();
+ return r;
}
static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
struct l2cap_chan *c;
- read_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_ident(conn, ident);
if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
+ lock_sock(c->sk);
return c;
}
@@ -153,12 +156,9 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
list_for_each_entry(c, &chan_list, global_l) {
if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
- goto found;
+ return c;
}
-
- c = NULL;
-found:
- return c;
+ return NULL;
}
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
@@ -217,45 +217,51 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
return 0;
}
-static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
-{
- BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
-
- if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
- chan_hold(chan);
-}
-
-static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
+static char *state_to_string(int state)
{
- BT_DBG("chan %p state %d", chan, chan->state);
+ switch(state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
- if (timer_pending(timer) && del_timer(timer))
- chan_put(chan);
+ return "invalid state";
}
static void l2cap_state_change(struct l2cap_chan *chan, int state)
{
+ BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+ state_to_string(state));
+
chan->state = state;
chan->ops->state_change(chan->data, state);
}
-static void l2cap_chan_timeout(unsigned long arg)
+static void l2cap_chan_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (struct l2cap_chan *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ chan_timer.work);
struct sock *sk = chan->sk;
int reason;
BT_DBG("chan %p state %d", chan, chan->state);
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- chan_put(chan);
- return;
- }
+ lock_sock(sk);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
reason = ECONNREFUSED;
@@ -267,10 +273,10 @@ static void l2cap_chan_timeout(unsigned long arg)
l2cap_chan_close(chan, reason);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
- chan_put(chan);
+ l2cap_chan_put(chan);
}
struct l2cap_chan *l2cap_chan_create(struct sock *sk)
@@ -287,12 +293,14 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
list_add(&chan->global_l, &chan_list);
write_unlock_bh(&chan_list_lock);
- setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
+ INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
chan->state = BT_OPEN;
atomic_set(&chan->refcnt, 1);
+ BT_DBG("sk %p chan %p", sk, chan);
+
return chan;
}
@@ -302,15 +310,15 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
list_del(&chan->global_l);
write_unlock_bh(&chan_list_lock);
- chan_put(chan);
+ l2cap_chan_put(chan);
}
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
chan->psm, chan->dcid);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
chan->conn = conn;
@@ -337,9 +345,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
chan->omtu = L2CAP_DEFAULT_MTU;
}
- chan_hold(chan);
+ chan->local_id = L2CAP_BESTEFFORT_ID;
+ chan->local_stype = L2CAP_SERV_BESTEFFORT;
+ chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
+ chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
+ chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
+ chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ l2cap_chan_hold(chan);
- list_add(&chan->list, &conn->chan_l);
+ list_add_rcu(&chan->list, &conn->chan_l);
}
/* Delete channel.
@@ -356,10 +371,10 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (conn) {
/* Delete from channel list */
- write_lock_bh(&conn->chan_lock);
- list_del(&chan->list);
- write_unlock_bh(&conn->chan_lock);
- chan_put(chan);
+ list_del_rcu(&chan->list);
+ synchronize_rcu();
+
+ l2cap_chan_put(chan);
chan->conn = NULL;
hci_conn_put(conn->hcon);
@@ -508,7 +523,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
}
/* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
__u8 auth_type;
@@ -556,34 +571,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
flags = ACL_START;
bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ skb->priority = HCI_PRIO_MAX;
- hci_send_acl(conn->hcon, skb, flags);
+ hci_send_acl(conn->hchan, skb, flags);
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct hci_conn *hcon = chan->conn->hcon;
+ u16 flags;
+
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+ if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ hci_send_acl(chan->conn->hchan, skb, flags);
+}
+
+static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
struct l2cap_conn *conn = chan->conn;
- int count, hlen = L2CAP_HDR_SIZE + 2;
- u8 flags;
+ int count, hlen;
if (chan->state != BT_CONNECTED)
return;
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%2.2x", chan, control);
+ BT_DBG("chan %p, control 0x%8.8x", chan, control);
count = min_t(unsigned int, conn->mtu, hlen);
- control |= L2CAP_CTRL_FRAME_TYPE;
+
+ control |= __set_sframe(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= L2CAP_CTRL_POLL;
+ control |= __set_ctrl_poll(chan);
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
@@ -592,32 +631,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - 2);
- put_unaligned_le16(fcs, skb_put(skb, 2));
+ u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- if (lmp_no_flush_capable(conn->hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
-
- hci_send_acl(chan->conn->hcon, skb, flags);
+ skb->priority = HCI_PRIO_MAX;
+ l2cap_do_send(chan, skb);
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
{
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
} else
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
l2cap_send_sframe(chan, control);
}
@@ -635,7 +669,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_check_security(chan) &&
+ if (l2cap_chan_check_security(chan) &&
__l2cap_no_conn_pending(chan)) {
struct l2cap_conn_req req;
req.scid = cpu_to_le16(chan->scid);
@@ -654,7 +688,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
+ schedule_delayed_work(&conn->info_timer,
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
@@ -706,13 +740,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan, *tmp;
+ struct l2cap_chan *chan;
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -725,7 +759,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (chan->state == BT_CONNECT) {
struct l2cap_conn_req req;
- if (!l2cap_check_security(chan) ||
+ if (!l2cap_chan_check_security(chan) ||
!__l2cap_no_conn_pending(chan)) {
bh_unlock_sock(sk);
continue;
@@ -736,9 +770,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
&chan->conf_state)) {
/* l2cap_chan_close() calls list_del(chan)
* so release the lock */
- read_unlock(&conn->chan_lock);
l2cap_chan_close(chan, ECONNRESET);
- read_lock(&conn->chan_lock);
bh_unlock_sock(sk);
continue;
}
@@ -758,7 +790,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
- if (l2cap_check_security(chan)) {
+ if (l2cap_chan_check_security(chan)) {
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -794,7 +826,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
/* Find socket with cid and source bdaddr.
@@ -845,7 +877,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
parent = pchan->sk;
- bh_lock_sock(parent);
+ lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
@@ -859,8 +891,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
sk = chan->sk;
- write_lock_bh(&conn->chan_lock);
-
hci_conn_hold(conn->hcon);
bacpy(&bt_sk(sk)->src, conn->src);
@@ -868,17 +898,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ l2cap_chan_add(conn, chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_state_change(chan, BT_CONNECTED);
parent->sk_data_ready(parent, 0);
- write_unlock_bh(&conn->chan_lock);
-
clean:
- bh_unlock_sock(parent);
+ release_sock(parent);
}
static void l2cap_chan_ready(struct sock *sk)
@@ -910,9 +938,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (conn->hcon->out && conn->hcon->type == LE_LINK)
smp_conn_security(conn, conn->hcon->pending_sec_level);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -932,7 +960,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
/* Notify sockets that we cannot guaranty reliability anymore */
@@ -942,21 +970,22 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
sk->sk_err = err;
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
-static void l2cap_info_timeout(unsigned long arg)
+static void l2cap_info_timeout(struct work_struct *work)
{
- struct l2cap_conn *conn = (void *) arg;
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ info_timer.work);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -980,17 +1009,19 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
l2cap_chan_del(chan, err);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
}
+ hci_chan_del(conn->hchan);
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- del_timer_sync(&conn->info_timer);
+ __cancel_delayed_work(&conn->info_timer);
if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
- del_timer(&conn->security_timer);
+ __cancel_delayed_work(&conn->security_timer);
smp_chan_destroy(conn);
}
@@ -998,9 +1029,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
kfree(conn);
}
-static void security_timeout(unsigned long arg)
+static void security_timeout(struct work_struct *work)
{
- struct l2cap_conn *conn = (void *) arg;
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ security_timer.work);
l2cap_conn_del(conn->hcon, ETIMEDOUT);
}
@@ -1008,18 +1040,26 @@ static void security_timeout(unsigned long arg)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_chan *hchan;
if (conn || status)
return conn;
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return NULL;
+
conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn)
+ if (!conn) {
+ hci_chan_del(hchan);
return NULL;
+ }
hcon->l2cap_data = conn;
conn->hcon = hcon;
+ conn->hchan = hchan;
- BT_DBG("hcon %p conn %p", hcon, conn);
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
conn->mtu = hcon->hdev->le_mtu;
@@ -1032,29 +1072,19 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
- rwlock_init(&conn->chan_lock);
INIT_LIST_HEAD(&conn->chan_l);
if (hcon->type == LE_LINK)
- setup_timer(&conn->security_timer, security_timeout,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
else
- setup_timer(&conn->info_timer, l2cap_info_timeout,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
return conn;
}
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
-{
- write_lock_bh(&conn->chan_lock);
- __l2cap_chan_add(conn, chan);
- write_unlock_bh(&conn->chan_lock);
-}
-
/* ---- Socket interface ---- */
/* Find socket with psm and source bdaddr.
@@ -1090,11 +1120,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan)
+inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -1108,7 +1137,62 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
+
+ lock_sock(sk);
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (sk->sk_state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ err = 0;
+ goto done;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&bt_sk(sk)->dst, src);
+ chan->psm = psm;
+ chan->dcid = cid;
auth_type = l2cap_get_auth_type(chan);
@@ -1142,7 +1226,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
if (hcon->state == BT_CONNECTED) {
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
__clear_chan_timer(chan);
- if (l2cap_check_security(chan))
+ if (l2cap_chan_check_security(chan))
l2cap_state_change(chan, BT_CONNECTED);
} else
l2cap_do_start(chan);
@@ -1151,7 +1235,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
err = 0;
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -1188,17 +1272,18 @@ int __l2cap_wait_ack(struct sock *sk)
return err;
}
-static void l2cap_monitor_timeout(unsigned long arg)
+static void l2cap_monitor_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ monitor_timer.work);
struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- bh_lock_sock(sk);
+ lock_sock(sk);
if (chan->retry_count >= chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- bh_unlock_sock(sk);
+ release_sock(sk);
return;
}
@@ -1206,24 +1291,25 @@ static void l2cap_monitor_timeout(unsigned long arg)
__set_monitor_timer(chan);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ release_sock(sk);
}
-static void l2cap_retrans_timeout(unsigned long arg)
+static void l2cap_retrans_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ retrans_timer.work);
struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- bh_lock_sock(sk);
+ lock_sock(sk);
chan->retry_count = 1;
__set_monitor_timer(chan);
set_bit(CONN_WAIT_F, &chan->conn_state);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ release_sock(sk);
}
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1245,60 +1331,46 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
}
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct hci_conn *hcon = chan->conn->hcon;
- u16 flags;
-
- BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
- if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
- hci_send_acl(hcon, skb, flags);
-}
-
static void l2cap_streaming_send(struct l2cap_chan *chan)
{
struct sk_buff *skb;
- u16 control, fcs;
+ u32 control;
+ u16 fcs;
while ((skb = skb_dequeue(&chan->tx_q))) {
- control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
- control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+ control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
- put_unaligned_le16(fcs, skb->data + skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ skb->data + skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, skb);
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
skb = skb_peek(&chan->tx_q);
if (!skb)
return;
- do {
- if (bt_cb(skb)->tx_seq == tx_seq)
- break;
-
+ while (bt_cb(skb)->tx_seq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
- } while ((skb = skb_queue_next(&chan->tx_q, skb)));
+ skb = skb_queue_next(&chan->tx_q, skb);
+ }
if (chan->remote_max_tx &&
bt_cb(skb)->retries == chan->remote_max_tx) {
@@ -1308,20 +1380,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
tx_skb = skb_clone(skb, GFP_ATOMIC);
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, tx_seq);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)tx_skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1405,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
int nsent = 0;
if (chan->state != BT_CONNECTED)
@@ -1348,20 +1424,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb->data +
+ tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1447,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
bt_cb(skb)->tx_seq = chan->next_tx_seq;
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
if (bt_cb(skb)->retries == 1)
chan->unacked_frames++;
@@ -1401,12 +1480,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
static void l2cap_send_ack(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
l2cap_send_sframe(chan, control);
return;
@@ -1415,20 +1494,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
if (l2cap_ertm_send(chan) > 0)
return;
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
static void l2cap_send_srejtail(struct l2cap_chan *chan)
{
struct srej_list *tail;
- u16 control;
+ u32 control;
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= L2CAP_CTRL_FINAL;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_ctrl_final(chan);
tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, tail->tx_seq);
l2cap_send_sframe(chan, control);
}
@@ -1456,6 +1535,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
+ (*frag)->priority = skb->priority;
+
sent += count;
len -= count;
@@ -1465,15 +1546,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d", sk, (int)len);
+ BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1564,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1580,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1598,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1615,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u16 control, u16 sdulen)
+ u32 control, u16 sdulen)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen;
struct l2cap_hdr *lh;
BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1628,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (!conn)
return ERR_PTR(-ENOTCONN);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (sdulen)
- hlen += 2;
+ hlen += L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1649,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+
if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, 2));
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1566,7 +1662,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
}
if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, 2));
+ put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
bt_cb(skb)->retries = 0;
return skb;
@@ -1576,11 +1672,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
{
struct sk_buff *skb;
struct sk_buff_head sar_queue;
- u16 control;
+ u32 control;
size_t size = 0;
skb_queue_head_init(&sar_queue);
- control = L2CAP_SDU_START;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_START);
skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1593,10 +1689,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
size_t buflen;
if (len > chan->remote_mps) {
- control = L2CAP_SDU_CONTINUE;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
buflen = chan->remote_mps;
} else {
- control = L2CAP_SDU_END;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_END);
buflen = len;
}
@@ -1617,15 +1713,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
return size;
}
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
int err;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
- skb = l2cap_create_connless_pdu(chan, msg, len);
+ skb = l2cap_create_connless_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1640,7 +1737,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
return -EMSGSIZE;
/* Create a basic PDU */
- skb = l2cap_create_basic_pdu(chan, msg, len);
+ skb = l2cap_create_basic_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1652,7 +1749,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
case L2CAP_MODE_STREAMING:
/* Entire SDU fits into one PDU */
if (len <= chan->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
skb = l2cap_create_iframe_pdu(chan, msg, len, control,
0);
if (IS_ERR(skb))
@@ -1704,8 +1801,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
@@ -1720,7 +1818,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (chan->ops->recv(chan->data, nskb))
kfree_skb(nskb);
}
- read_unlock(&conn->chan_lock);
+
+ rcu_read_unlock();
}
/* ---- L2CAP signalling commands ---- */
@@ -1850,37 +1949,64 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_ack_timeout(unsigned long arg)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_conf_efs efs;
- bh_lock_sock(chan->sk);
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ efs.id = chan->local_id;
+ efs.stype = chan->local_stype;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ efs.id = 1;
+ efs.stype = L2CAP_SERV_BESTEFFORT;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = 0;
+ efs.flush_to = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+}
+
+static void l2cap_ack_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ ack_timer.work);
+
+ BT_DBG("chan %p", chan);
+
+ lock_sock(chan->sk);
l2cap_send_ack(chan);
- bh_unlock_sock(chan->sk);
+ release_sock(chan->sk);
}
static inline void l2cap_ertm_init(struct l2cap_chan *chan)
{
- struct sock *sk = chan->sk;
-
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
- setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
- (unsigned long) chan);
- setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
- (unsigned long) chan);
- setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
-
-
- sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -1896,11 +2022,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+ if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+ __l2cap_ews_supported(chan)) {
+ /* use extended control field */
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ } else {
+ chan->tx_win = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ }
+}
+
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -1913,6 +2064,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
/* fall through */
default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2096,27 @@ done:
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = chan->tx_win;
rfc.max_transmit = chan->max_tx;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_txwin_setup(chan);
+
+ rfc.txwin_size = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -1961,6 +2125,10 @@ done:
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
}
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
break;
case L2CAP_MODE_STREAMING:
@@ -1969,13 +2137,19 @@ done:
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -2002,8 +2176,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
+ u8 remote_efs = 0;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -2033,7 +2210,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+ break;
+ case L2CAP_CONF_EFS:
+ remote_efs = 1;
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_EWS:
+ if (!enable_hs)
+ return -ECONNREFUSED;
+
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ set_bit(CONF_EWS_RECV, &chan->conf_state);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ chan->remote_tx_win = val;
break;
default:
@@ -2058,6 +2250,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
break;
}
+ if (remote_efs) {
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+ else
+ return -ECONNREFUSED;
+ }
+
if (chan->mode != rfc.mode)
return -ECONNREFUSED;
@@ -2076,7 +2275,6 @@ done:
sizeof(rfc), (unsigned long) &rfc);
}
-
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
@@ -2089,6 +2287,26 @@ done:
}
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype) {
+
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (chan->num_conf_req >= 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+ set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ }
+ }
+
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2314,20 @@ done:
break;
case L2CAP_MODE_ERTM:
- chan->remote_tx_win = rfc.txwin_size;
- chan->remote_max_tx = rfc.max_transmit;
+ if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+ chan->remote_tx_win = rfc.txwin_size;
+ else
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ chan->remote_max_tx = rfc.max_transmit;
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
rfc.retrans_timeout =
le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2339,29 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+ chan->remote_flush_to =
+ le32_to_cpu(efs.flush_to);
+ chan->remote_acc_lat =
+ le32_to_cpu(efs.acc_lat);
+ chan->remote_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ }
break;
case L2CAP_MODE_STREAMING:
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2152,7 +2393,8 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
void *ptr = req->data;
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2188,6 +2430,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
break;
+
+ case L2CAP_CONF_EWS:
+ chan->tx_win = min_t(u16, val,
+ L2CAP_DEFAULT_EXT_WINDOW);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
+ break;
+
+ case L2CAP_CONF_EFS:
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *)val, olen);
+
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ break;
}
}
@@ -2196,13 +2458,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
chan->mode = rfc.mode;
- if (*result == L2CAP_CONF_SUCCESS) {
+ if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
chan->mps = le16_to_cpu(rfc.max_pdu_size);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->local_msdu = le16_to_cpu(efs.msdu);
+ chan->local_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+ chan->local_flush_to =
+ le32_to_cpu(efs.flush_to);
+ }
break;
+
case L2CAP_MODE_STREAMING:
chan->mps = le16_to_cpu(rfc.max_pdu_size);
}
@@ -2271,6 +2543,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
}
}
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC option.
+ */
+ rfc.mode = chan->mode;
+ rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+ BT_ERR("Expected RFC option was not found, using defaults");
+
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
@@ -2292,7 +2574,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
cmd->ident == conn->info_ident) {
- del_timer(&conn->info_timer);
+ __cancel_delayed_work(&conn->info_timer);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -2325,12 +2607,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
parent = pchan->sk;
- bh_lock_sock(parent);
+ lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(0x0001) &&
!hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = 0x05;
+ conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
goto response;
}
@@ -2349,11 +2631,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
sk = chan->sk;
- write_lock_bh(&conn->chan_lock);
-
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(conn, scid)) {
- write_unlock_bh(&conn->chan_lock);
sock_set_flag(sk, SOCK_ZAPPED);
chan->ops->close(chan->data);
goto response;
@@ -2368,7 +2647,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ l2cap_chan_add(conn, chan);
dcid = chan->scid;
@@ -2377,7 +2656,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
chan->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_check_security(chan)) {
+ if (l2cap_chan_check_security(chan)) {
if (bt_sk(sk)->defer_setup) {
l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
@@ -2399,10 +2678,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
status = L2CAP_CS_NO_INFO;
}
- write_unlock_bh(&conn->chan_lock);
-
response:
- bh_unlock_sock(parent);
+ release_sock(parent);
sendresp:
rsp.scid = cpu_to_le16(scid);
@@ -2418,7 +2695,7 @@ sendresp:
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
+ schedule_delayed_work(&conn->info_timer,
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
@@ -2484,19 +2761,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
break;
default:
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- break;
- }
-
l2cap_chan_del(chan, ECONNREFUSED);
break;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2602,8 +2871,23 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
chan->num_conf_req++;
}
+ /* Got Conf Rsp PENDING from remote side and asume we sent
+ Conf Rsp PENDING in the code above */
+ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+ test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ }
+
unlock:
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2631,8 +2915,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
+ clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
break;
+ case L2CAP_CONF_PENDING:
+ set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+ if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ buf, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ goto done;
+ }
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, buf,
+ L2CAP_CONF_SUCCESS, 0x0000), buf);
+ }
+ goto done;
+
case L2CAP_CONF_UNACCEPT:
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
@@ -2685,7 +2994,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
}
done:
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2714,17 +3023,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- return 0;
- }
-
l2cap_chan_del(chan, ECONNRESET);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
return 0;
@@ -2748,17 +3048,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
sk = chan->sk;
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan,BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- return 0;
- }
-
l2cap_chan_del(chan, 0);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
return 0;
@@ -2782,15 +3073,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
+ if (enable_hs)
+ feat_mask |= L2CAP_FEAT_EXT_FLOW
+ | L2CAP_FEAT_EXT_WINDOW;
+
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+ if (enable_hs)
+ l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+ else
+ l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(buf + 4, l2cap_fixed_chan, 8);
+ memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
@@ -2819,7 +3120,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
return 0;
- del_timer(&conn->info_timer);
+ __cancel_delayed_work(&conn->info_timer);
if (result != L2CAP_IR_SUCCESS) {
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -2857,6 +3158,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
return 0;
}
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ void *data)
+{
+ struct l2cap_create_chan_req *req = data;
+ struct l2cap_create_chan_rsp rsp;
+ u16 psm, scid;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+
+ /* Placeholder: Always reject */
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = L2CAP_CR_NO_MEM;
+ rsp.status = L2CAP_CS_NO_INFO;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, void *data)
+{
+ BT_DBG("conn %p", conn);
+
+ return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result)
+{
+ struct l2cap_move_chan_rsp rsp;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_chan *chan, u16 icid, u16 result)
+{
+ struct l2cap_move_chan_cfm cfm;
+ u8 ident;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ ident = l2cap_get_ident(conn);
+ if (chan)
+ chan->ident = ident;
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid)
+{
+ struct l2cap_move_chan_cfm_rsp rsp;
+
+ BT_DBG("icid %d", icid);
+
+ rsp.icid = cpu_to_le16(icid);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_req *req = data;
+ u16 icid = 0;
+ u16 result = L2CAP_MR_NOT_ALLOWED;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ icid = le16_to_cpu(req->icid);
+
+ BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ /* Placeholder: Always refuse */
+ l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_rsp *rsp = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+ result = le16_to_cpu(rsp->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ /* Placeholder: Always unconfirmed */
+ l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm *cfm = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*cfm))
+ return -EPROTO;
+
+ icid = le16_to_cpu(cfm->icid);
+ result = le16_to_cpu(cfm->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm_rsp *rsp = data;
+ u16 icid;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+
+ BT_DBG("icid %d", icid);
+
+ return 0;
+}
+
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
u16 to_multiplier)
{
@@ -2969,6 +3429,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
err = l2cap_information_rsp(conn, cmd, data);
break;
+ case L2CAP_CREATE_CHAN_REQ:
+ err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CREATE_CHAN_RSP:
+ err = l2cap_create_channel_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ break;
+
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -3047,10 +3531,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
- int hdr_size = L2CAP_HDR_SIZE + 2;
+ int hdr_size;
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hdr_size = L2CAP_EXT_HDR_SIZE;
+ else
+ hdr_size = L2CAP_ENH_HDR_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16) {
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -3062,14 +3551,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
chan->frames_sent = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
}
@@ -3081,12 +3570,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
chan->frames_sent == 0) {
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
}
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
{
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
@@ -3095,23 +3584,15 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
bt_cb(skb)->sar = sar;
next_skb = skb_peek(&chan->srej_q);
- if (!next_skb) {
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
- }
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
- do {
+ while (next_skb) {
if (bt_cb(next_skb)->tx_seq == tx_seq)
return -EINVAL;
- next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
- chan->buffer_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ next_tx_seq_offset = __seq_offset(chan,
+ bt_cb(next_skb)->tx_seq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3119,9 +3600,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
}
if (skb_queue_is_last(&chan->srej_q, next_skb))
- break;
-
- } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
+ next_skb = NULL;
+ else
+ next_skb = skb_queue_next(&chan->srej_q, next_skb);
+ }
__skb_queue_tail(&chan->srej_q, skb);
@@ -3147,24 +3629,24 @@ static void append_skb_frag(struct sk_buff *skb,
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
{
int err = -EINVAL;
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
+ switch (__get_ctrl_sar(chan, control)) {
+ case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
err = chan->ops->recv(chan->data, skb);
break;
- case L2CAP_SDU_START:
+ case L2CAP_SAR_START:
if (chan->sdu)
break;
chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
if (chan->sdu_len > chan->imtu) {
err = -EMSGSIZE;
@@ -3181,7 +3663,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_CONTINUE:
+ case L2CAP_SAR_CONTINUE:
if (!chan->sdu)
break;
@@ -3195,7 +3677,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_END:
+ case L2CAP_SAR_END:
if (!chan->sdu)
break;
@@ -3230,14 +3712,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3247,13 +3729,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
goto done;
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_poll(chan);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
chan->retry_count = 1;
@@ -3279,10 +3762,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
}
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
while ((skb = skb_peek(&chan->srej_q)) &&
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3292,7 +3775,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3300,16 +3783,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
}
- chan->buffer_seq_srej =
- (chan->buffer_seq_srej + 1) % 64;
- tx_seq = (tx_seq + 1) % 64;
+ chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
+ tx_seq = __next_seq(chan, tx_seq);
}
}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *l, *tmp;
- u16 control;
+ u32 control;
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
if (l->tx_seq == tx_seq) {
@@ -3317,45 +3799,53 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
kfree(l);
return;
}
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, l->tx_seq);
l2cap_send_sframe(chan, control);
list_del(&l->list);
list_add_tail(&l->list, &chan->srej_l);
}
}
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *new;
- u16 control;
+ u32 control;
while (tx_seq != chan->expected_tx_seq) {
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
new->tx_seq = chan->expected_tx_seq;
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
list_add_tail(&new->list, &chan->srej_l);
}
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
+ return 0;
}
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- u8 tx_seq = __get_txseq(rx_control);
- u8 req_seq = __get_reqseq(rx_control);
- u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ u16 tx_seq = __get_txseq(chan, rx_control);
+ u16 req_seq = __get_reqseq(chan, rx_control);
+ u8 sar = __get_ctrl_sar(chan, rx_control);
int tx_seq_offset, expected_tx_seq_offset;
int num_to_ack = (chan->tx_win/6) + 1;
int err = 0;
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
+ BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
tx_seq, rx_control);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3366,9 +3856,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
chan->expected_ack_seq = req_seq;
l2cap_drop_acked_frames(chan);
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
/* invalid tx_seq */
if (tx_seq_offset >= chan->tx_win) {
@@ -3413,13 +3901,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
return 0;
}
}
- l2cap_send_srejframe(chan, tx_seq);
+
+ err = l2cap_send_srejframe(chan, tx_seq);
+ if (err < 0) {
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ return err;
+ }
}
} else {
- expected_tx_seq_offset =
- (chan->expected_tx_seq - chan->buffer_seq) % 64;
- if (expected_tx_seq_offset < 0)
- expected_tx_seq_offset += 64;
+ expected_tx_seq_offset = __seq_offset(chan,
+ chan->expected_tx_seq, chan->buffer_seq);
/* duplicated tx_seq */
if (tx_seq_offset < expected_tx_seq_offset)
@@ -3437,14 +3928,18 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
set_bit(CONN_SEND_PBIT, &chan->conn_state);
- l2cap_send_srejframe(chan, tx_seq);
+ err = l2cap_send_srejframe(chan, tx_seq);
+ if (err < 0) {
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ return err;
+ }
__clear_ack_timer(chan);
}
return 0;
expected:
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
bt_cb(skb)->tx_seq = tx_seq;
@@ -3454,22 +3949,24 @@ expected:
}
err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
return err;
}
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
}
- __set_ack_timer(chan);
chan->num_acked = (chan->num_acked + 1) % num_to_ack;
if (chan->num_acked == num_to_ack - 1)
l2cap_send_ack(chan);
+ else
+ __set_ack_timer(chan);
return 0;
@@ -3478,15 +3975,15 @@ drop:
return 0;
}
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
{
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
- rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
+ __get_reqseq(chan, rx_control), rx_control);
- chan->expected_ack_seq = __get_reqseq(rx_control);
+ chan->expected_ack_seq = __get_reqseq(chan, rx_control);
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3499,7 +3996,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
l2cap_send_i_or_rr_or_rnr(chan);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3518,18 +4015,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
}
}
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
} else {
@@ -3539,15 +4036,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
set_bit(CONN_REJ_ACT, &chan->conn_state);
}
}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
@@ -3560,7 +4057,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
chan->srej_save_reqseq = tx_seq;
set_bit(CONN_SREJ_ACT, &chan->conn_state);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
chan->srej_save_reqseq == tx_seq)
clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3575,37 +4072,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
}
}
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
__clear_retrans_timer(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
return;
}
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control)) {
l2cap_send_srejtail(chan);
- else
- l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
+ } else {
+ rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
+ l2cap_send_sframe(chan, rx_control);
+ }
}
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
+ BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3613,20 +4112,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
clear_bit(CONN_WAIT_F, &chan->conn_state);
}
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
+ switch (__get_ctrl_super(chan, rx_control)) {
+ case L2CAP_SUPER_RR:
l2cap_data_channel_rrframe(chan, rx_control);
break;
- case L2CAP_SUPER_REJECT:
+ case L2CAP_SUPER_REJ:
l2cap_data_channel_rejframe(chan, rx_control);
break;
- case L2CAP_SUPER_SELECT_REJECT:
+ case L2CAP_SUPER_SREJ:
l2cap_data_channel_srejframe(chan, rx_control);
break;
- case L2CAP_SUPER_RCV_NOT_READY:
+ case L2CAP_SUPER_RNR:
l2cap_data_channel_rnrframe(chan, rx_control);
break;
}
@@ -3638,12 +4137,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- u16 control;
- u8 req_seq;
+ u32 control;
+ u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
/*
@@ -3654,26 +4153,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control) && __is_iframe(control))
- len -= 2;
+ if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
if (len > chan->mps) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
}
- req_seq = __get_reqseq(control);
- req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
- if (req_seq_offset < 0)
- req_seq_offset += 64;
+ req_seq = __get_reqseq(chan, control);
- next_tx_seq_offset =
- (chan->next_tx_seq - chan->expected_ack_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+
+ next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
+ chan->expected_ack_seq);
/* check for invalid req-seq */
if (req_seq_offset > next_tx_seq_offset) {
@@ -3681,7 +4177,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (__is_iframe(control)) {
+ if (!__is_sframe(chan, control)) {
if (len < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
@@ -3709,8 +4205,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
{
struct l2cap_chan *chan;
struct sock *sk = NULL;
- u16 control;
- u8 tx_seq;
+ u32 control;
+ u16 tx_seq;
int len;
chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3741,33 +4237,28 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
break;
case L2CAP_MODE_ERTM:
- if (!sock_owned_by_user(sk)) {
- l2cap_ertm_data_rcv(sk, skb);
- } else {
- if (sk_add_backlog(sk, skb))
- goto drop;
- }
+ l2cap_ertm_data_rcv(sk, skb);
goto done;
case L2CAP_MODE_STREAMING:
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control))
- len -= 2;
+ if (__is_sar_start(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
- if (len > chan->mps || len < 0 || __is_sframe(control))
+ if (len > chan->mps || len < 0 || __is_sframe(chan, control))
goto drop;
- tx_seq = __get_txseq(control);
+ tx_seq = __get_txseq(chan, control);
if (chan->expected_tx_seq != tx_seq) {
/* Frame(s) missing - must discard partial SDU */
@@ -3779,7 +4270,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
/* TODO: Notify userland of missing data */
}
- chan->expected_tx_seq = (tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, tx_seq);
if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3796,7 +4287,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3812,7 +4303,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3830,7 +4321,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3845,7 +4336,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3863,7 +4354,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3913,14 +4404,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
/* ---- L2CAP interface with lower layer (HCI) ---- */
-static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
int exact = 0, lm1 = 0, lm2 = 0;
struct l2cap_chan *c;
- if (type != ACL_LINK)
- return -EINVAL;
-
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets and check their link_mode */
@@ -3933,12 +4421,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm2 |= HCI_LM_MASTER;
}
}
@@ -3947,15 +4435,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return exact ? lm1 : lm2;
}
-static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn;
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
- if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
- return -EINVAL;
-
if (!status) {
conn = l2cap_conn_add(hcon, status);
if (conn)
@@ -3966,27 +4451,22 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
return 0;
}
-static int l2cap_disconn_ind(struct hci_conn *hcon)
+int l2cap_disconn_ind(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
BT_DBG("hcon %p", hcon);
- if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
- return 0x13;
-
+ if (!conn)
+ return HCI_ERROR_REMOTE_USER_TERM;
return conn->disc_reason;
}
-static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
- return -EINVAL;
-
l2cap_conn_del(hcon, bt_to_errno(reason));
-
return 0;
}
@@ -4007,7 +4487,7 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
}
}
-static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct l2cap_chan *chan;
@@ -4019,12 +4499,12 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (hcon->type == LE_LINK) {
smp_distribute_keys(conn, 0);
- del_timer(&conn->security_timer);
+ __cancel_delayed_work(&conn->security_timer);
}
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -4102,12 +4582,12 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
return 0;
}
-static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
struct l2cap_conn *conn = hcon->l2cap_data;
@@ -4168,11 +4648,11 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
BT_ERR("Frame exceeding recv MTU (len %d, "
"MTU %d)", len,
chan->imtu);
- bh_unlock_sock(sk);
+ release_sock(sk);
l2cap_conn_unreliable(conn, ECOMM);
goto drop;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
/* Allocate skb for the complete frame (with header) */
@@ -4254,17 +4734,6 @@ static const struct file_operations l2cap_debugfs_fops = {
static struct dentry *l2cap_debugfs;
-static struct hci_proto l2cap_hci_proto = {
- .name = "L2CAP",
- .id = HCI_PROTO_L2CAP,
- .connect_ind = l2cap_connect_ind,
- .connect_cfm = l2cap_connect_cfm,
- .disconn_ind = l2cap_disconn_ind,
- .disconn_cfm = l2cap_disconn_cfm,
- .security_cfm = l2cap_security_cfm,
- .recv_acldata = l2cap_recv_acldata
-};
-
int __init l2cap_init(void)
{
int err;
@@ -4273,13 +4742,6 @@ int __init l2cap_init(void)
if (err < 0)
return err;
- err = hci_register_proto(&l2cap_hci_proto);
- if (err < 0) {
- BT_ERR("L2CAP protocol registration failed");
- bt_sock_unregister(BTPROTO_L2CAP);
- goto error;
- }
-
if (bt_debugfs) {
l2cap_debugfs = debugfs_create_file("l2cap", 0444,
bt_debugfs, NULL, &l2cap_debugfs_fops);
@@ -4288,19 +4750,11 @@ int __init l2cap_init(void)
}
return 0;
-
-error:
- l2cap_cleanup_sockets();
- return err;
}
void l2cap_exit(void)
{
debugfs_remove(l2cap_debugfs);
-
- if (hci_unregister_proto(&l2cap_hci_proto) < 0)
- BT_ERR("L2CAP protocol unregistration failed");
-
l2cap_cleanup_sockets();
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c406d3136f..9ca5616166f 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -3,6 +3,7 @@
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -122,70 +123,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
if (la.l2_cid && la.l2_psm)
return -EINVAL;
- lock_sock(sk);
-
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
- && !(la.l2_psm || la.l2_cid)) {
- err = -EINVAL;
- goto done;
- }
-
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
- chan->chan_type != L2CAP_CHAN_RAW) {
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- chan->psm = la.l2_psm;
- chan->dcid = la.l2_cid;
-
- err = l2cap_chan_connect(l2cap_pi(sk)->chan);
+ err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
if (err)
goto done;
-wait:
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
- release_sock(sk);
+ if (sock_owned_by_user(sk))
+ release_sock(sk);
return err;
}
@@ -334,7 +280,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -359,10 +305,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (chan->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
opt |= L2CAP_LM_MASTER;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
opt |= L2CAP_LM_RELIABLE;
if (put_user(opt, (u32 __user *) optval))
@@ -449,7 +395,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
case BT_FLUSHABLE:
- if (put_user(chan->flushable, (u32 __user *) optval))
+ if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -461,7 +408,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- pwr.force_active = chan->force_active;
+ pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -469,6 +416,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (put_user(chan->chan_policy, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -503,7 +460,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -511,7 +468,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
break;
}
@@ -535,7 +492,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
chan->omtu = opts.omtu;
chan->fcs = opts.fcs;
chan->max_tx = opts.max_tx;
- chan->tx_win = (__u8)opts.txwin_size;
+ chan->tx_win = opts.txwin_size;
break;
case L2CAP_LM:
@@ -551,8 +508,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
if (opt & L2CAP_LM_SECURE)
chan->sec_level = BT_SECURITY_HIGH;
- chan->role_switch = (opt & L2CAP_LM_MASTER);
- chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ if (opt & L2CAP_LM_MASTER)
+ set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+ else
+ clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+ if (opt & L2CAP_LM_RELIABLE)
+ set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
break;
default:
@@ -608,8 +572,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
chan->sec_level = sec.level;
+ if (!chan->conn)
+ break;
+
conn = chan->conn;
- if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+
+ /*change security for LE channels */
+ if (chan->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
@@ -617,9 +586,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
if (smp_conn_security(conn, sec.level))
break;
-
- err = 0;
sk->sk_state = BT_CONFIG;
+
+ /* or for ACL link, under defer_setup time */
+ } else if (sk->sk_state == BT_CONNECT2 &&
+ bt_sk(sk)->defer_setup) {
+ err = l2cap_chan_check_security(chan);
+ } else {
+ err = -EINVAL;
}
break;
@@ -658,7 +632,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
}
}
- chan->flushable = opt;
+ if (opt)
+ set_bit(FLAG_FLUSHABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FLUSHABLE, &chan->flags);
break;
case BT_POWER:
@@ -675,7 +652,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
err = -EFAULT;
break;
}
- chan->force_active = pwr.force_active;
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ chan->chan_policy = (u8) opt;
break;
default:
@@ -709,7 +715,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -ENOTCONN;
}
- err = l2cap_chan_send(chan, msg, len);
+ err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
release_sock(sk);
return err;
@@ -931,11 +937,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->fcs = pchan->fcs;
chan->max_tx = pchan->max_tx;
chan->tx_win = pchan->tx_win;
+ chan->tx_win_max = pchan->tx_win_max;
chan->sec_level = pchan->sec_level;
- chan->role_switch = pchan->role_switch;
- chan->force_reliable = pchan->force_reliable;
- chan->flushable = pchan->flushable;
- chan->force_active = pchan->force_active;
+ chan->flags = pchan->flags;
security_sk_clone(parent, sk);
} else {
@@ -964,12 +968,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->fcs = L2CAP_FCS_CRC16;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
- chan->role_switch = 0;
- chan->force_reliable = 0;
- chan->flushable = BT_FLUSHABLE_OFF;
- chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+ chan->flags = 0;
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
/* Default config options */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 2c763429686..2540944d871 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,6 +22,7 @@
/* Bluetooth HCI Management interface */
+#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -29,26 +30,103 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
+#include <net/bluetooth/smp.h>
#define MGMT_VERSION 0
#define MGMT_REVISION 1
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+
+#define SERVICE_CACHE_TIMEOUT (5 * 1000)
+
struct pending_cmd {
struct list_head list;
- __u16 opcode;
+ u16 opcode;
int index;
void *param;
struct sock *sk;
void *user_data;
};
-static LIST_HEAD(cmd_list);
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+ MGMT_STATUS_SUCCESS,
+ MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
+ MGMT_STATUS_NOT_CONNECTED, /* No Connection */
+ MGMT_STATUS_FAILED, /* Hardware Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
+ MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
+ MGMT_STATUS_NO_RESOURCES, /* Memory Full */
+ MGMT_STATUS_TIMEOUT, /* Connection Timeout */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
+ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
+ MGMT_STATUS_BUSY, /* Command Disallowed */
+ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
+ MGMT_STATUS_REJECTED, /* Rejected Security */
+ MGMT_STATUS_REJECTED, /* Rejected Personal */
+ MGMT_STATUS_TIMEOUT, /* Host Timeout */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
+ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
+ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
+ MGMT_STATUS_DISCONNECTED, /* OE Power Off */
+ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
+ MGMT_STATUS_BUSY, /* Repeated Attempts */
+ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
+ MGMT_STATUS_FAILED, /* Unknown LMP PDU */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
+ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
+ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
+ MGMT_STATUS_REJECTED, /* Air Mode Rejected */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
+ MGMT_STATUS_FAILED, /* Unspecified Error */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
+ MGMT_STATUS_FAILED, /* Role Change Not Allowed */
+ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
+ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
+ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
+ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
+ MGMT_STATUS_FAILED, /* Unit Link Key Used */
+ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
+ MGMT_STATUS_TIMEOUT, /* Instant Passed */
+ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
+ MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
+ MGMT_STATUS_REJECTED, /* QoS Rejected */
+ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
+ MGMT_STATUS_REJECTED, /* Insufficient Security */
+ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Slot Violation */
+ MGMT_STATUS_FAILED, /* Role Switch Failed */
+ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
+ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
+ MGMT_STATUS_BUSY, /* Host Busy Pairing */
+ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
+ MGMT_STATUS_BUSY, /* Controller Busy */
+ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
+ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
+ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
+};
+
+static u8 mgmt_status(u8 hci_status)
+{
+ if (hci_status < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[hci_status];
+
+ return MGMT_STATUS_FAILED;
+}
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
+ int err;
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
@@ -66,10 +144,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev->status = status;
put_unaligned_le16(cmd, &ev->opcode);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -78,6 +157,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
+ int err;
BT_DBG("sock %p", sk);
@@ -97,10 +177,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
if (rp)
memcpy(ev->data, rp, rp_len);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;;
}
static int read_version(struct sock *sk)
@@ -120,6 +201,7 @@ static int read_index_list(struct sock *sk)
{
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
+ struct hci_dev *d;
size_t rp_len;
u16 count;
int i, err;
@@ -143,10 +225,9 @@ static int read_index_list(struct sock *sk)
put_unaligned_le16(count, &rp->num_controllers);
i = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(d);
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
+ cancel_delayed_work(&d->power_off);
if (test_bit(HCI_SETUP, &d->flags))
continue;
@@ -165,6 +246,262 @@ static int read_index_list(struct sock *sk)
return err;
}
+static u32 get_supported_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ settings |= MGMT_SETTING_POWERED;
+ settings |= MGMT_SETTING_CONNECTABLE;
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+ settings |= MGMT_SETTING_DISCOVERABLE;
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR)
+ settings |= MGMT_SETTING_SSP;
+
+ if (!(hdev->features[4] & LMP_NO_BREDR)) {
+ settings |= MGMT_SETTING_BREDR;
+ settings |= MGMT_SETTING_LINK_SECURITY;
+ }
+
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
+
+ return settings;
+}
+
+static u32 get_current_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ if (test_bit(HCI_UP, &hdev->flags))
+ settings |= MGMT_SETTING_POWERED;
+ else
+ return settings;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags))
+ settings |= MGMT_SETTING_CONNECTABLE;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags))
+ settings |= MGMT_SETTING_DISCOVERABLE;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->flags))
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (!(hdev->features[4] & LMP_NO_BREDR))
+ settings |= MGMT_SETTING_BREDR;
+
+ if (hdev->extfeatures[0] & LMP_HOST_LE)
+ settings |= MGMT_SETTING_LE;
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ settings |= MGMT_SETTING_LINK_SECURITY;
+
+ if (hdev->ssp_mode > 0)
+ settings |= MGMT_SETTING_SSP;
+
+ return settings;
+}
+
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
+#define PNP_INFO_SVCLASS_ID 0x1200
+
+static u8 bluetooth_base_uuid[] = {
+ 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static u16 get_uuid16(u8 *uuid128)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ if (bluetooth_base_uuid[i] != uuid128[i])
+ return 0;
+ }
+
+ memcpy(&val, &uuid128[12], 4);
+
+ val = le32_to_cpu(val);
+ if (val > 0xffff)
+ return 0;
+
+ return (u16) val;
+}
+
+static void create_eir(struct hci_dev *hdev, u8 *data)
+{
+ u8 *ptr = data;
+ u16 eir_len = 0;
+ u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
+ int i, truncated = 0;
+ struct bt_uuid *uuid;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+
+ if (name_len > 0) {
+ /* EIR Data type */
+ if (name_len > 48) {
+ name_len = 48;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ /* EIR Data length */
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ eir_len += (name_len + 2);
+ ptr += (name_len + 2);
+ }
+
+ memset(uuid16_list, 0, sizeof(uuid16_list));
+
+ /* Group all UUID16 types */
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u16 uuid16;
+
+ uuid16 = get_uuid16(uuid->uuid);
+ if (uuid16 == 0)
+ return;
+
+ if (uuid16 < 0x1100)
+ continue;
+
+ if (uuid16 == PNP_INFO_SVCLASS_ID)
+ continue;
+
+ /* Stop if not enough space to put next UUID */
+ if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
+ truncated = 1;
+ break;
+ }
+
+ /* Check for duplicates */
+ for (i = 0; uuid16_list[i] != 0; i++)
+ if (uuid16_list[i] == uuid16)
+ break;
+
+ if (uuid16_list[i] == 0) {
+ uuid16_list[i] = uuid16;
+ eir_len += sizeof(u16);
+ }
+ }
+
+ if (uuid16_list[0] != 0) {
+ u8 *length = ptr;
+
+ /* EIR Data type */
+ ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
+
+ ptr += 2;
+ eir_len += 2;
+
+ for (i = 0; uuid16_list[i] != 0; i++) {
+ *ptr++ = (uuid16_list[i] & 0x00ff);
+ *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
+ }
+
+ /* EIR Data length */
+ *length = (i * sizeof(u16)) + 1;
+ }
+}
+
+static int update_eir(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ if (!(hdev->features[6] & LMP_EXT_INQ))
+ return 0;
+
+ if (hdev->ssp_mode == 0)
+ return 0;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ create_eir(hdev, cp.data);
+
+ if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
+ return 0;
+
+ memcpy(hdev->eir, cp.data, sizeof(cp.data));
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static void service_cache_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ service_cache.work);
+
+ if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ update_eir(hdev);
+ update_class(hdev);
+
+ hci_dev_unlock(hdev);
+}
+
+static void mgmt_init_hdev(struct hci_dev *hdev)
+{
+ if (!test_and_set_bit(HCI_MGMT, &hdev->flags))
+ INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+
+ if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ schedule_delayed_work(&hdev->service_cache,
+ msecs_to_jiffies(SERVICE_CACHE_TIMEOUT));
+}
+
static int read_controller_info(struct sock *sk, u16 index)
{
struct mgmt_rp_read_info rp;
@@ -174,40 +511,33 @@ static int read_controller_info(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_READ_INFO,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- set_bit(HCI_MGMT, &hdev->flags);
+ if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
+ mgmt_init_hdev(hdev);
memset(&rp, 0, sizeof(rp));
- rp.type = hdev->dev_type;
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
- rp.powered = test_bit(HCI_UP, &hdev->flags);
- rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
- rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+ rp.version = hdev->hci_ver;
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp.sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp.sec_mode = 4;
- else
- rp.sec_mode = 2;
+ put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+
+ rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp.current_settings = cpu_to_le32(get_current_settings(hdev));
- bacpy(&rp.bdaddr, &hdev->bdaddr);
- memcpy(rp.features, hdev->features, 8);
memcpy(rp.dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
- rp.hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -221,7 +551,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
}
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- u16 index, void *data, u16 len)
+ struct hci_dev *hdev,
+ void *data, u16 len)
{
struct pending_cmd *cmd;
@@ -230,7 +561,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
return NULL;
cmd->opcode = opcode;
- cmd->index = index;
+ cmd->index = hdev->id;
cmd->param = kmalloc(len, GFP_ATOMIC);
if (!cmd->param) {
@@ -244,48 +575,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk;
sock_hold(sk);
- list_add(&cmd->list, &cmd_list);
+ list_add(&cmd->list, &hdev->mgmt_pending);
return cmd;
}
-static void mgmt_pending_foreach(u16 opcode, int index,
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
void (*cb)(struct pending_cmd *cmd, void *data),
void *data)
{
struct list_head *p, *n;
- list_for_each_safe(p, n, &cmd_list) {
+ list_for_each_safe(p, n, &hdev->mgmt_pending) {
struct pending_cmd *cmd;
cmd = list_entry(p, struct pending_cmd, list);
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
+ if (opcode > 0 && cmd->opcode != opcode)
continue;
cb(cmd, data);
}
}
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
{
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
- continue;
+ struct pending_cmd *cmd;
- return cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
}
return NULL;
@@ -297,6 +616,13 @@ static void mgmt_pending_remove(struct pending_cmd *cmd)
mgmt_pending_free(cmd);
}
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+ __le32 settings = cpu_to_le32(get_current_settings(hdev));
+
+ return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings));
+}
+
static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct mgmt_mode *cp;
@@ -309,40 +635,43 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
up = test_bit(HCI_UP, &hdev->flags);
if ((cp->val && up) || (!cp->val && !up)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
if (cp->val)
- queue_work(hdev->workqueue, &hdev->power_on);
+ schedule_work(&hdev->power_on);
else
- queue_work(hdev->workqueue, &hdev->power_off);
+ schedule_work(&hdev->power_off.work);
err = 0;
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -350,7 +679,7 @@ failed:
static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp;
+ struct mgmt_cp_set_discoverable *cp;
struct hci_dev *hdev;
struct pending_cmd *cmd;
u8 scan;
@@ -361,32 +690,36 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -396,13 +729,18 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
if (cp->val)
scan |= SCAN_INQUIRY;
+ else
+ cancel_delayed_work(&hdev->discov_off);
err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (err < 0)
mgmt_pending_remove(cmd);
+ if (cp->val)
+ hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -422,31 +760,35 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -462,14 +804,14 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
- struct sock *skip_sk)
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 data_len, struct sock *skip_sk)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
@@ -482,7 +824,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
hdr = (void *) skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(event);
- hdr->index = cpu_to_le16(index);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(data_len);
if (data)
@@ -494,20 +839,12 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
return 0;
}
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
- struct mgmt_mode rp;
-
- rp.val = val;
-
- return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp, ev;
+ struct mgmt_mode *cp;
struct hci_dev *hdev;
+ __le32 ev;
int err;
cp = (void *) data;
@@ -515,211 +852,36 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (cp->val)
set_bit(HCI_PAIRABLE, &hdev->flags);
else
clear_bit(HCI_PAIRABLE, &hdev->flags);
- err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
+ err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
if (err < 0)
goto failed;
- ev.val = cp->val;
+ ev = cpu_to_le32(get_current_settings(hdev));
- err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-#define EIR_FLAGS 0x01 /* flags */
-#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
-#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
-#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
-#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
-#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
-#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
-#define EIR_NAME_SHORT 0x08 /* shortened local name */
-#define EIR_NAME_COMPLETE 0x09 /* complete local name */
-#define EIR_TX_POWER 0x0A /* transmit power level */
-#define EIR_DEVICE_ID 0x10 /* device ID */
-
-#define PNP_INFO_SVCLASS_ID 0x1200
-
-static u8 bluetooth_base_uuid[] = {
- 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static u16 get_uuid16(u8 *uuid128)
-{
- u32 val;
- int i;
-
- for (i = 0; i < 12; i++) {
- if (bluetooth_base_uuid[i] != uuid128[i])
- return 0;
- }
-
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
- if (val > 0xffff)
- return 0;
-
- return (u16) val;
-}
-
-static void create_eir(struct hci_dev *hdev, u8 *data)
-{
- u8 *ptr = data;
- u16 eir_len = 0;
- u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
- int i, truncated = 0;
- struct list_head *p;
- size_t name_len;
-
- name_len = strlen(hdev->dev_name);
-
- if (name_len > 0) {
- /* EIR Data type */
- if (name_len > 48) {
- name_len = 48;
- ptr[1] = EIR_NAME_SHORT;
- } else
- ptr[1] = EIR_NAME_COMPLETE;
-
- /* EIR Data length */
- ptr[0] = name_len + 1;
-
- memcpy(ptr + 2, hdev->dev_name, name_len);
-
- eir_len += (name_len + 2);
- ptr += (name_len + 2);
- }
-
- memset(uuid16_list, 0, sizeof(uuid16_list));
-
- /* Group all UUID16 types */
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
- u16 uuid16;
-
- uuid16 = get_uuid16(uuid->uuid);
- if (uuid16 == 0)
- return;
-
- if (uuid16 < 0x1100)
- continue;
-
- if (uuid16 == PNP_INFO_SVCLASS_ID)
- continue;
-
- /* Stop if not enough space to put next UUID */
- if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
- truncated = 1;
- break;
- }
-
- /* Check for duplicates */
- for (i = 0; uuid16_list[i] != 0; i++)
- if (uuid16_list[i] == uuid16)
- break;
-
- if (uuid16_list[i] == 0) {
- uuid16_list[i] = uuid16;
- eir_len += sizeof(u16);
- }
- }
-
- if (uuid16_list[0] != 0) {
- u8 *length = ptr;
-
- /* EIR Data type */
- ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
-
- ptr += 2;
- eir_len += 2;
-
- for (i = 0; uuid16_list[i] != 0; i++) {
- *ptr++ = (uuid16_list[i] & 0x00ff);
- *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
- }
-
- /* EIR Data length */
- *length = (i * sizeof(u16)) + 1;
- }
-}
-
-static int update_eir(struct hci_dev *hdev)
-{
- struct hci_cp_write_eir cp;
-
- if (!(hdev->features[6] & LMP_EXT_INQ))
- return 0;
-
- if (hdev->ssp_mode == 0)
- return 0;
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
-
- memset(&cp, 0, sizeof(cp));
-
- create_eir(hdev, cp.data);
-
- if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
- return 0;
-
- memcpy(hdev->eir, cp.data, sizeof(cp.data));
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
-static u8 get_service_classes(struct hci_dev *hdev)
-{
- struct list_head *p;
- u8 val = 0;
-
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
- val |= uuid->svc_hint;
- }
-
- return val;
-}
-
-static int update_class(struct hci_dev *hdev)
-{
- u8 cod[3];
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
-
- cod[0] = hdev->minor_class;
- cod[1] = hdev->major_class;
- cod[2] = get_service_classes(hdev);
-
- if (memcmp(cod, hdev->dev_class, 3) == 0)
- return 0;
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
-}
-
static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct mgmt_cp_add_uuid *cp;
@@ -732,13 +894,15 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
if (!uuid) {
@@ -762,7 +926,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -781,13 +945,15 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
@@ -807,7 +973,8 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
}
if (found == 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
@@ -822,7 +989,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -840,97 +1007,71 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->major_class = cp->major;
hdev->minor_class = cp->minor;
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) {
+ hci_dev_unlock(hdev);
+ cancel_delayed_work_sync(&hdev->service_cache);
+ hci_dev_lock(hdev);
+ update_eir(hdev);
+ }
+
err = update_class(hdev);
if (err == 0)
err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_set_service_cache *cp;
- int err;
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- BT_DBG("hci%u enable %d", index, cp->enable);
-
- if (cp->enable) {
- set_bit(HCI_SERVICE_CACHE, &hdev->flags);
- err = 0;
- } else {
- clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
- err = update_class(hdev);
- if (err == 0)
- err = update_eir(hdev);
- }
-
- if (err == 0)
- err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
- 0);
-
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_load_keys *cp;
+ struct mgmt_cp_load_link_keys *cp;
u16 key_count, expected_len;
int i;
cp = (void *) data;
if (len < sizeof(*cp))
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
key_count = get_unaligned_le16(&cp->key_count);
- expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
- BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
len, expected_len);
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
key_count);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -942,58 +1083,84 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
for (i = 0; i < key_count; i++) {
- struct mgmt_key_info *key = &cp->keys[i];
+ struct mgmt_link_key_info *key = &cp->keys[i];
hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
}
- hci_dev_unlock_bh(hdev);
+ cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return 0;
}
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_remove_key *cp;
+ struct mgmt_cp_remove_keys *cp;
+ struct mgmt_rp_remove_keys rp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
struct hci_conn *conn;
int err;
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+ rp.status = MGMT_STATUS_FAILED;
err = hci_remove_link_key(hdev, &cp->bdaddr);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ rp.status = MGMT_STATUS_NOT_PAIRED;
goto unlock;
}
- err = 0;
-
- if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+ if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
goto unlock;
+ }
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn) {
- struct hci_cp_disconnect dc;
+ if (!conn) {
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+ goto unlock;
+ }
- put_unaligned_le16(conn->handle, &dc.handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
}
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
unlock:
- hci_dev_unlock_bh(hdev);
+ if (err < 0)
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1013,21 +1180,25 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1036,11 +1207,12 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1054,16 +1226,36 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
+static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+{
+ switch (link_type) {
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+ return MGMT_ADDR_LE_PUBLIC;
+ case ADDR_LE_DEV_RANDOM:
+ return MGMT_ADDR_LE_RANDOM;
+ default:
+ return MGMT_ADDR_INVALID;
+ }
+ case ACL_LINK:
+ return MGMT_ADDR_BREDR;
+ default:
+ return MGMT_ADDR_INVALID;
+ }
+}
+
static int get_connections(struct sock *sk, u16 index)
{
struct mgmt_rp_get_connections *rp;
struct hci_dev *hdev;
+ struct hci_conn *c;
struct list_head *p;
size_t rp_len;
u16 count;
@@ -1073,16 +1265,17 @@ static int get_connections(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
count = 0;
list_for_each(p, &hdev->conn_hash.list) {
count++;
}
- rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+ rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
err = -ENOMEM;
@@ -1092,17 +1285,22 @@ static int get_connections(struct sock *sk, u16 index)
put_unaligned_le16(count, &rp->conn_count);
i = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
- bacpy(&rp->conn[i++], &c->dst);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ bacpy(&rp->addr[i].bdaddr, &c->dst);
+ rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
+ if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ continue;
+ i++;
}
+ /* Recalculate length in case of filtered SCO connections, etc */
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
unlock:
kfree(rp);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -1113,7 +1311,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index,
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
sizeof(*cp));
if (!cmd)
return -ENOMEM;
@@ -1142,22 +1340,26 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
@@ -1169,12 +1371,12 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
if (err >= 0)
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1189,7 +1391,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1208,25 +1410,25 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENETDOWN);
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
err = send_pin_code_neg_reply(sk, index, hdev, cp);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1243,20 +1445,22 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
hdev->io_capability);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1265,19 +1469,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
+ struct pending_cmd *cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
continue;
- if (cmd->index != hdev->id)
- continue;
-
if (cmd->user_data != conn)
continue;
@@ -1292,7 +1489,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
- bacpy(&rp.bdaddr, &conn->dst);
+ bacpy(&rp.addr.bdaddr, &conn->dst);
+ rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
rp.status = status;
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
@@ -1314,20 +1512,18 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd) {
+ if (!cmd)
BT_DBG("Unable to find a pending command");
- return;
- }
-
- pairing_complete(cmd, status);
+ else
+ pairing_complete(cmd, status);
}
static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct hci_dev *hdev;
struct mgmt_cp_pair_device *cp;
+ struct mgmt_rp_pair_device rp;
struct pending_cmd *cmd;
- struct adv_entry *entry;
u8 sec_level, auth_type;
struct hci_conn *conn;
int err;
@@ -1337,13 +1533,15 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
sec_level = BT_SECURITY_MEDIUM;
if (cp->io_cap == 0x03)
@@ -1351,26 +1549,33 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- entry = hci_find_adv_entry(hdev, &cp->bdaddr);
- if (entry)
- conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+ if (cp->addr.type == MGMT_ADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
else
- conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
+ rp.status = -PTR_ERR(conn);
+ err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+ &rp, sizeof(rp));
goto unlock;
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
- err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+ rp.status = EBUSY;
+ err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+ &rp, sizeof(rp));
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
@@ -1378,7 +1583,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (!entry)
+ if (cp->addr.type == MGMT_ADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -1393,62 +1598,151 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = 0;
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len, int success)
+static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
+ u16 mgmt_op, u16 hci_op, __le32 passkey)
{
- struct mgmt_cp_user_confirm_reply *cp = (void *) data;
- u16 mgmt_op, hci_op;
struct pending_cmd *cmd;
struct hci_dev *hdev;
+ struct hci_conn *conn;
int err;
- BT_DBG("");
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_INVALID_PARAMS);
- if (success) {
- mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
- hci_op = HCI_OP_USER_CONFIRM_REPLY;
- } else {
- mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
- hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
+ hci_dev_lock(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+ goto done;
}
- if (len != sizeof(*cp))
- return cmd_status(sk, index, mgmt_op, EINVAL);
+ /*
+ * Check for an existing ACL link, if present pair via
+ * HCI commands.
+ *
+ * If no ACL link is present, check for an LE link and if
+ * present, pair via the SMP engine.
+ *
+ * If neither ACL nor LE links are present, fail with error.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+ if (!conn) {
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED);
+ goto done;
+ }
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, mgmt_op, ENODEV);
+ /* Continue with pairing via SMP */
+ err = smp_user_confirm_reply(conn, mgmt_op, passkey);
- hci_dev_lock_bh(hdev);
+ if (!err)
+ err = cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_SUCCESS);
+ else
+ err = cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_FAILED);
- if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, mgmt_op, ENETDOWN);
- goto failed;
+ goto done;
}
- cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+ cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
if (!cmd) {
err = -ENOMEM;
- goto failed;
+ goto done;
}
- err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+ /* Continue with pairing via HCI */
+ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+ struct hci_cp_user_passkey_reply cp;
+
+ bacpy(&cp.bdaddr, bdaddr);
+ cp.passkey = passkey;
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+ } else
+ err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
+
if (err < 0)
mgmt_pending_remove(cmd);
-failed:
- hci_dev_unlock_bh(hdev);
+done:
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
+static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+ struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_confirm_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+ struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
+ EINVAL);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ EINVAL);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
@@ -1461,15 +1755,17 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("");
if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1482,7 +1778,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1499,28 +1795,29 @@ static int read_local_oob_data(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENETDOWN);
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EOPNOTSUPP);
+ MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -1531,7 +1828,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
mgmt_pending_remove(cmd);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1548,24 +1845,25 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
cp->randomizer);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+ err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_FAILED);
else
err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1582,62 +1880,68 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
if (err < 0)
err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- -err);
+ MGMT_STATUS_INVALID_PARAMS);
else
err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
NULL, 0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int start_discovery(struct sock *sk, u16 index)
+static int start_discovery(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
{
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
+ struct mgmt_cp_start_discovery *cp = (void *) data;
struct pending_cmd *cmd;
struct hci_dev *hdev;
int err;
BT_DBG("hci%u", index);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
+
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, 3);
- cp.length = 0x08;
- cp.num_rsp = 0x00;
-
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1653,22 +1957,23 @@ static int stop_discovery(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+ err = hci_cancel_inquiry(hdev);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1678,7 +1983,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_block_device *cp = (void *) data;
int err;
@@ -1686,33 +1990,24 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
+ hci_dev_lock(hdev);
err = hci_blacklist_add(hdev, &cp->bdaddr);
-
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+ err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+ MGMT_STATUS_FAILED);
else
err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1722,7 +2017,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_unblock_device *cp = (void *) data;
int err;
@@ -1730,33 +2024,25 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
+ hci_dev_lock(hdev);
err = hci_blacklist_del(hdev, &cp->bdaddr);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+ err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
else
err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1766,7 +2052,7 @@ static int set_fast_connectable(struct sock *sk, u16 index,
unsigned char *data, u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_set_fast_connectable *cp = (void *) data;
+ struct mgmt_mode *cp = (void *) data;
struct hci_cp_write_page_scan_activity acp;
u8 type;
int err;
@@ -1775,16 +2061,16 @@ static int set_fast_connectable(struct sock *sk, u16 index,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (cp->enable) {
+ if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
acp.interval = 0x0024; /* 22.5 msec page scan interval */
} else {
@@ -1798,14 +2084,14 @@ static int set_fast_connectable(struct sock *sk, u16 index,
sizeof(acp), &acp);
if (err < 0) {
err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- -err);
+ MGMT_STATUS_FAILED);
goto done;
}
err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
if (err < 0) {
err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- -err);
+ MGMT_STATUS_FAILED);
goto done;
}
@@ -1868,6 +2154,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_SET_CONNECTABLE:
err = set_connectable(sk, index, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_SET_FAST_CONNECTABLE:
+ err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
case MGMT_OP_SET_PAIRABLE:
err = set_pairable(sk, index, buf + sizeof(*hdr), len);
break;
@@ -1880,14 +2170,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_SET_DEV_CLASS:
err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_SET_SERVICE_CACHE:
- err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_LOAD_KEYS:
- err = load_keys(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_LOAD_LINK_KEYS:
+ err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_REMOVE_KEY:
- err = remove_key(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_REMOVE_KEYS:
+ err = remove_keys(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_DISCONNECT:
err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1908,10 +2195,18 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
err = pair_device(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_USER_CONFIRM_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+ err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+ err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
+ len);
break;
case MGMT_OP_SET_LOCAL_NAME:
err = set_local_name(sk, index, buf + sizeof(*hdr), len);
@@ -1927,7 +2222,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
len);
break;
case MGMT_OP_START_DISCOVERY:
- err = start_discovery(sk, index);
+ err = start_discovery(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_STOP_DISCOVERY:
err = stop_discovery(sk, index);
@@ -1938,13 +2233,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_UNBLOCK_DEVICE:
err = unblock_device(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_SET_FAST_CONNECTABLE:
- err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
- len);
- break;
default:
BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode, 0x01);
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
break;
}
@@ -1958,30 +2250,39 @@ done:
return err;
}
-int mgmt_index_added(u16 index)
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
- return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+ u8 *status = data;
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_pending_remove(cmd);
}
-int mgmt_index_removed(u16 index)
+int mgmt_index_added(struct hci_dev *hdev)
{
- return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+ return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+}
+
+int mgmt_index_removed(struct hci_dev *hdev)
+{
+ u8 status = ENODEV;
+
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
}
struct cmd_lookup {
u8 val;
struct sock *sk;
+ struct hci_dev *hdev;
};
-static void mode_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct pending_cmd *cmd, void *data)
{
- struct mgmt_mode *cp = cmd->param;
struct cmd_lookup *match = data;
- if (cp->val != match->val)
- return;
-
- send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+ send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
list_del(&cmd->list);
@@ -1993,17 +2294,23 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_free(cmd);
}
-int mgmt_powered(u16 index, u8 powered)
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { powered, NULL };
+ struct cmd_lookup match = { powered, NULL, hdev };
+ __le32 ev;
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- ev.val = powered;
+ if (!powered) {
+ u8 status = ENETDOWN;
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ }
+
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
+ match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2011,36 +2318,36 @@ int mgmt_powered(u16 index, u8 powered)
return ret;
}
-int mgmt_discoverable(u16 index, u8 discoverable)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { discoverable, NULL };
+ struct cmd_lookup match = { discoverable, NULL, hdev };
+ __le32 ev;
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match);
- ev.val = discoverable;
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
match.sk);
-
if (match.sk)
sock_put(match.sk);
return ret;
}
-int mgmt_connectable(u16 index, u8 connectable)
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { connectable, NULL };
+ __le32 ev;
+ struct cmd_lookup match = { connectable, NULL, hdev };
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
+ &match);
- ev.val = connectable;
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2048,9 +2355,25 @@ int mgmt_connectable(u16 index, u8 connectable)
return ret;
}
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+{
+ u8 mgmt_err = mgmt_status(status);
+
+ if (scan & SCAN_PAGE)
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ if (scan & SCAN_INQUIRY)
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ u8 persistent)
{
- struct mgmt_ev_new_key ev;
+ struct mgmt_ev_new_link_key ev;
memset(&ev, 0, sizeof(ev));
@@ -2060,17 +2383,18 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
- return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type)
{
- struct mgmt_ev_connected ev;
+ struct mgmt_addr_info ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.link_type = link_type;
+ ev.type = link_to_mgmt(link_type, addr_type);
- return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2080,6 +2404,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
struct mgmt_rp_disconnect rp;
bacpy(&rp.bdaddr, &cp->bdaddr);
+ rp.status = 0;
cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
@@ -2089,75 +2414,110 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
+{
+ u8 *status = data;
+ struct mgmt_cp_remove_keys *cp = cmd->param;
+ struct mgmt_rp_remove_keys rp;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+ if (status != NULL)
+ rp.status = *status;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type)
{
- struct mgmt_ev_disconnected ev;
+ struct mgmt_addr_info ev;
struct sock *sk = NULL;
int err;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
+ ev.type = link_to_mgmt(link_type, addr_type);
- err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
if (sk)
sock_put(sk);
+ mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+
return err;
}
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
{
struct pending_cmd *cmd;
+ u8 mgmt_err = mgmt_status(status);
int err;
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+ if (bdaddr) {
+ struct mgmt_rp_disconnect rp;
+
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ &rp, sizeof(rp));
+ } else
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
+ mgmt_err);
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
{
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.bdaddr, bdaddr);
ev.secure = secure;
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ rp.status = mgmt_status(status);
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2165,20 +2525,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ rp.status = mgmt_status(status);
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2186,97 +2547,119 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
- u8 confirm_hint)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __le32 value, u8 confirm_hint)
{
struct mgmt_ev_user_confirm_request ev;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
bacpy(&ev.bdaddr, bdaddr);
ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
- return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_user_passkey_request ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
- u8 opcode)
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status, u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
int err;
- cmd = mgmt_pending_find(opcode, index);
+ cmd = mgmt_pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
- err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+ rp.status = mgmt_status(status);
+ err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return user_pairing_resp_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return user_pairing_resp_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, status,
+ MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
{
struct mgmt_ev_auth_failed ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct pending_cmd *cmd;
- struct hci_dev *hdev;
struct mgmt_cp_set_local_name ev;
int err;
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd)
goto send_event;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ mgmt_status(status));
goto failed;
}
- hdev = hci_dev_get(index);
- if (hdev) {
- hci_dev_lock_bh(hdev);
- update_eir(hdev);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
- }
+ update_eir(hdev);
- err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
sizeof(ev));
if (err < 0)
goto failed;
send_event:
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
failed:
@@ -2285,29 +2668,31 @@ failed:
return err;
}
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
- u8 status)
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u status %u", index, status);
+ BT_DBG("%s status %u", hdev->name, status);
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
if (!cmd)
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EIO);
+ err = cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ &rp, sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -2315,14 +2700,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
return err;
}
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
- u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
{
struct mgmt_ev_device_found ev;
memset(&ev, 0, sizeof(ev));
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.rssi = rssi;
if (eir)
@@ -2331,10 +2717,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
if (dev_class)
memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
- return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
{
struct mgmt_ev_remote_name ev;
@@ -2343,37 +2729,79 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
bacpy(&ev.bdaddr, bdaddr);
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
+ mgmt_pending_remove(cmd);
+
+ return err;
}
-int mgmt_discovering(u16 index, u8 discovering)
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
{
- return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
+ struct pending_cmd *cmd;
+
+ if (discovering)
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ else
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+ if (cmd != NULL) {
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+ mgmt_pending_remove(cmd);
+ }
+
+ return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
sizeof(discovering), NULL);
}
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_blocked ev;
- cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_unblocked ev;
- cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig
index 405a0e61e7d..22e718b554e 100644
--- a/net/bluetooth/rfcomm/Kconfig
+++ b/net/bluetooth/rfcomm/Kconfig
@@ -1,6 +1,6 @@
config BT_RFCOMM
tristate "RFCOMM protocol support"
- depends on BT && BT_L2CAP
+ depends on BT
help
RFCOMM provides connection oriented stream transport. RFCOMM
support is required for Dialup Networking, OBEX and other Bluetooth
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18211f..501649bf559 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -51,8 +51,8 @@
#define VERSION "1.11"
-static int disable_cfc;
-static int l2cap_ertm;
+static bool disable_cfc;
+static bool l2cap_ertm;
static int channel_mtu = -1;
static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
@@ -377,13 +377,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
- struct list_head *p;
- list_for_each(p, &s->dlcs) {
- d = list_entry(p, struct rfcomm_dlc, list);
+ list_for_each_entry(d, &s->dlcs, list)
if (d->dlci == dlci)
return d;
- }
+
return NULL;
}
@@ -751,7 +749,6 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
/* ---- RFCOMM frame sending ---- */
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
- struct socket *sock = s->sock;
struct kvec iv = { data, len };
struct msghdr msg;
@@ -759,7 +756,14 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
memset(&msg, 0, sizeof(msg));
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return kernel_sendmsg(s->sock, &msg, &iv, 1, len);
+}
+
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+ BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+ return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
@@ -773,7 +777,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +791,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +805,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -837,7 +841,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -1146,6 +1150,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
}
break;
@@ -2120,15 +2125,13 @@ static struct hci_cb rfcomm_cb = {
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
- struct list_head *pp, *p;
rfcomm_lock();
- list_for_each(p, &session_list) {
- s = list_entry(p, struct rfcomm_session, list);
- list_for_each(pp, &s->dlcs) {
+ list_for_each_entry(s, &session_list, list) {
+ struct rfcomm_dlc *d;
+ list_for_each_entry(d, &s->dlcs, list) {
struct sock *sk = s->sock->sk;
- struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
seq_printf(f, "%s %s %ld %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5417f612732..aea2bdd1510 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
}
+ skb->priority = sk->sk_priority;
+
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796313e..fa8f4de53b9 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@ struct rfcomm_dev {
struct rfcomm_dlc *dlc;
struct tty_struct *tty;
wait_queue_head_t wait;
- struct tasklet_struct wakeup_task;
+ struct work_struct wakeup_task;
struct device *tty_dev;
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(unsigned long arg);
+static void rfcomm_tty_wakeup(struct work_struct *work);
/* ---- Device functions ---- */
static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- struct list_head *p;
- list_for_each(p, &rfcomm_dev_list) {
- dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list)
if (dev->id == id)
return dev;
- }
return NULL;
}
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev;
+ struct rfcomm_dev *dev, *entry;
struct list_head *head = &rfcomm_dev_list, *p;
int err = 0;
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each(p, &rfcomm_dev_list) {
- if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ if (entry->id != dev->id)
break;
dev->id++;
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
} else {
dev->id = req->dev_id;
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
atomic_set(&dev->opened, 0);
init_waitqueue_head(&dev->wait);
- tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+ INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
skb_queue_head_init(&dev->pending);
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
struct rfcomm_dev *dev = (void *) skb->sk;
atomic_sub(skb->truesize, &dev->wmem_alloc);
if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- tasklet_schedule(&dev->wakeup_task);
+ queue_work(system_nrt_wq, &dev->wakeup_task);
rfcomm_dev_put(dev);
}
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg)
static int rfcomm_get_dev_list(void __user *arg)
{
+ struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg)
read_lock_bh(&rfcomm_dev_lock);
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
continue;
(di + n)->id = dev->id;
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
+static void rfcomm_tty_wakeup(struct work_struct *work)
{
- struct rfcomm_dev *dev = (void *) arg;
+ struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
+ wakeup_task);
struct tty_struct *tty = dev->tty;
if (!tty)
return;
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- tasklet_kill(&dev->wakeup_task);
+ cancel_work_sync(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = {
int __init rfcomm_init_ttys(void)
{
+ int error;
+
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -1;
+ return -ENOMEM;
rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- if (tty_register_driver(rfcomm_tty_driver)) {
+ error = tty_register_driver(rfcomm_tty_driver);
+ if (error) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return -1;
+ return error;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a324b009e34..5dc2f2126fa 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -51,7 +51,7 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
-static int disable_esco;
+static bool disable_esco;
static const struct proto_ops sco_sock_ops;
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
@@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk)
}
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -893,15 +893,12 @@ done:
}
/* ----- SCO interface with lower layer (HCI) ----- */
-static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
- if (type != SCO_LINK && type != ESCO_LINK)
- return -EINVAL;
-
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
@@ -921,13 +918,9 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
return lm;
}
-static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
if (!status) {
struct sco_conn *conn;
@@ -940,19 +933,15 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
return 0;
}
-static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
sco_conn_del(hcon, bt_to_errno(reason));
-
return 0;
}
-static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
@@ -1028,15 +1017,6 @@ static const struct net_proto_family sco_sock_family_ops = {
.create = sco_sock_create,
};
-static struct hci_proto sco_hci_proto = {
- .name = "SCO",
- .id = HCI_PROTO_SCO,
- .connect_ind = sco_connect_ind,
- .connect_cfm = sco_connect_cfm,
- .disconn_cfm = sco_disconn_cfm,
- .recv_scodata = sco_recv_scodata
-};
-
int __init sco_init(void)
{
int err;
@@ -1051,13 +1031,6 @@ int __init sco_init(void)
goto error;
}
- err = hci_register_proto(&sco_hci_proto);
- if (err < 0) {
- BT_ERR("SCO protocol registration failed");
- bt_sock_unregister(BTPROTO_SCO);
- goto error;
- }
-
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
@@ -1081,9 +1054,6 @@ void __exit sco_exit(void)
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
- if (hci_unregister_proto(&sco_hci_proto) < 0)
- BT_ERR("SCO protocol unregistration failed");
-
proto_unregister(&sco_proto);
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 759b6357264..32c47de3034 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
@@ -181,30 +182,53 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ skb->priority = HCI_PRIO_MAX;
+ hci_send_acl(conn->hchan, skb, 0);
- mod_timer(&conn->security_timer, jiffies +
+ cancel_delayed_work_sync(&conn->security_timer);
+ schedule_delayed_work(&conn->security_timer,
msecs_to_jiffies(SMP_TIMEOUT));
}
+static __u8 authreq_to_seclevel(__u8 authreq)
+{
+ if (authreq & SMP_AUTH_MITM)
+ return BT_SECURITY_HIGH;
+ else
+ return BT_SECURITY_MEDIUM;
+}
+
+static __u8 seclevel_to_authreq(__u8 sec_level)
+{
+ switch (sec_level) {
+ case BT_SECURITY_HIGH:
+ return SMP_AUTH_MITM | SMP_AUTH_BONDING;
+ case BT_SECURITY_MEDIUM:
+ return SMP_AUTH_BONDING;
+ default:
+ return SMP_AUTH_NONE;
+ }
+}
+
static void build_pairing_cmd(struct l2cap_conn *conn,
struct smp_cmd_pairing *req,
struct smp_cmd_pairing *rsp,
__u8 authreq)
{
- u8 dist_keys;
+ u8 dist_keys = 0;
- dist_keys = 0;
if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
dist_keys = SMP_DIST_ENC_KEY;
authreq |= SMP_AUTH_BONDING;
+ } else {
+ authreq &= ~SMP_AUTH_BONDING;
}
if (rsp == NULL) {
req->io_capability = conn->hcon->io_capability;
req->oob_flag = SMP_OOB_NOT_PRESENT;
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- req->init_key_dist = dist_keys;
+ req->init_key_dist = 0;
req->resp_key_dist = dist_keys;
req->auth_req = authreq;
return;
@@ -213,7 +237,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
rsp->io_capability = conn->hcon->io_capability;
rsp->oob_flag = SMP_OOB_NOT_PRESENT;
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
- rsp->init_key_dist = req->init_key_dist & dist_keys;
+ rsp->init_key_dist = 0;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
rsp->auth_req = authreq;
}
@@ -231,6 +255,107 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
return 0;
}
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+ if (send)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+ mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
+ cancel_delayed_work_sync(&conn->security_timer);
+ smp_chan_destroy(conn);
+}
+
+#define JUST_WORKS 0x00
+#define JUST_CFM 0x01
+#define REQ_PASSKEY 0x02
+#define CFM_PASSKEY 0x03
+#define REQ_OOB 0x04
+#define OVERLAP 0xFF
+
+static const u8 gen_method[5][5] = {
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+ { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
+};
+
+static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+ u8 local_io, u8 remote_io)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp = conn->smp_chan;
+ u8 method;
+ u32 passkey = 0;
+ int ret = 0;
+
+ /* Initialize key for JUST WORKS */
+ memset(smp->tk, 0, sizeof(smp->tk));
+ clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+
+ BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
+
+ /* If neither side wants MITM, use JUST WORKS */
+ /* If either side has unknown io_caps, use JUST WORKS */
+ /* Otherwise, look up method from the table */
+ if (!(auth & SMP_AUTH_MITM) ||
+ local_io > SMP_IO_KEYBOARD_DISPLAY ||
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ method = JUST_WORKS;
+ else
+ method = gen_method[local_io][remote_io];
+
+ /* If not bonding, don't ask user to confirm a Zero TK */
+ if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
+ method = JUST_WORKS;
+
+ /* If Just Works, Continue with Zero TK */
+ if (method == JUST_WORKS) {
+ set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+ return 0;
+ }
+
+ /* Not Just Works/Confirm results in MITM Authentication */
+ if (method != JUST_CFM)
+ set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
+
+ /* If both devices have Keyoard-Display I/O, the master
+ * Confirms and the slave Enters the passkey.
+ */
+ if (method == OVERLAP) {
+ if (hcon->link_mode & HCI_LM_MASTER)
+ method = CFM_PASSKEY;
+ else
+ method = REQ_PASSKEY;
+ }
+
+ /* Generate random passkey. Not valid until confirmed. */
+ if (method == CFM_PASSKEY) {
+ u8 key[16];
+
+ memset(key, 0, sizeof(key));
+ get_random_bytes(&passkey, sizeof(passkey));
+ passkey %= 1000000;
+ put_unaligned_le32(passkey, key);
+ swap128(key, smp->tk);
+ BT_DBG("PassKey: %d", passkey);
+ }
+
+ hci_dev_lock(hcon->hdev);
+
+ if (method == REQ_PASSKEY)
+ ret = mgmt_user_passkey_request(hcon->hdev, conn->dst);
+ else
+ ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+ cpu_to_le32(passkey), 0);
+
+ hci_dev_unlock(hcon->hdev);
+
+ return ret;
+}
+
static void confirm_work(struct work_struct *work)
{
struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
@@ -263,14 +388,15 @@ static void confirm_work(struct work_struct *work)
goto error;
}
+ clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+
swap128(res, cp.confirm_val);
smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
return;
error:
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
- smp_chan_destroy(conn);
+ smp_failure(conn, reason, 1);
}
static void random_work(struct work_struct *work)
@@ -353,8 +479,7 @@ static void random_work(struct work_struct *work)
return;
error:
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
- smp_chan_destroy(conn);
+ smp_failure(conn, reason, 1);
}
static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -370,6 +495,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
smp->conn = conn;
conn->smp_chan = smp;
+ conn->hcon->smp_conn = conn;
hci_conn_hold(conn->hcon);
@@ -378,19 +504,73 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
void smp_chan_destroy(struct l2cap_conn *conn)
{
- kfree(conn->smp_chan);
+ struct smp_chan *smp = conn->smp_chan;
+
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+
+ if (smp->tfm)
+ crypto_free_blkcipher(smp->tfm);
+
+ kfree(smp);
+ conn->smp_chan = NULL;
+ conn->hcon->smp_conn = NULL;
hci_conn_put(conn->hcon);
}
+int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
+{
+ struct l2cap_conn *conn = hcon->smp_conn;
+ struct smp_chan *smp;
+ u32 value;
+ u8 key[16];
+
+ BT_DBG("");
+
+ if (!conn)
+ return -ENOTCONN;
+
+ smp = conn->smp_chan;
+
+ switch (mgmt_op) {
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ value = le32_to_cpu(passkey);
+ memset(key, 0, sizeof(key));
+ BT_DBG("PassKey: %d", value);
+ put_unaligned_le32(value, key);
+ swap128(key, smp->tk);
+ /* Fall Through */
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
+ break;
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+ return 0;
+ default:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED, 1);
+ return -EOPNOTSUPP;
+ }
+
+ /* If it is our turn to send Pairing Confirm, do so now */
+ if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags))
+ queue_work(hcon->hdev->workqueue, &smp->confirm);
+
+ return 0;
+}
+
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct smp_chan *smp;
u8 key_size;
+ u8 auth = SMP_AUTH_NONE;
int ret;
BT_DBG("conn %p", conn);
+ if (conn->hcon->link_mode & HCI_LM_MASTER)
+ return SMP_CMD_NOTSUPP;
+
if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
smp = smp_chan_create(conn);
@@ -400,19 +580,16 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
- if (req->oob_flag)
- return SMP_OOB_NOT_AVAIL;
+ /* We didn't start the pairing, so match remote */
+ if (req->auth_req & SMP_AUTH_BONDING)
+ auth = req->auth_req;
- /* We didn't start the pairing, so no requirements */
- build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
+ build_pairing_cmd(conn, req, &rsp, auth);
key_size = min(req->max_key_size, rsp.max_key_size);
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
- /* Just works */
- memset(smp->tk, 0, sizeof(smp->tk));
-
ret = smp_rand(smp->prnd);
if (ret)
return SMP_UNSPECIFIED;
@@ -422,6 +599,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+ /* Request setup of TK */
+ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
return 0;
}
@@ -430,11 +612,14 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
struct smp_chan *smp = conn->smp_chan;
struct hci_dev *hdev = conn->hcon->hdev;
- u8 key_size;
+ u8 key_size, auth = SMP_AUTH_NONE;
int ret;
BT_DBG("conn %p", conn);
+ if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+ return SMP_CMD_NOTSUPP;
+
skb_pull(skb, sizeof(*rsp));
req = (void *) &smp->preq[1];
@@ -443,12 +628,6 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
- if (rsp->oob_flag)
- return SMP_OOB_NOT_AVAIL;
-
- /* Just works */
- memset(smp->tk, 0, sizeof(smp->tk));
-
ret = smp_rand(smp->prnd);
if (ret)
return SMP_UNSPECIFIED;
@@ -456,6 +635,22 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
+ if ((req->auth_req & SMP_AUTH_BONDING) &&
+ (rsp->auth_req & SMP_AUTH_BONDING))
+ auth = SMP_AUTH_BONDING;
+
+ auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
+
+ ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
+
+ /* Can't compose response until we have been confirmed */
+ if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
+ return 0;
+
queue_work(hdev->workqueue, &smp->confirm);
return 0;
@@ -477,8 +672,10 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
swap128(smp->prnd, random);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
random);
- } else {
+ } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
queue_work(hdev->workqueue, &smp->confirm);
+ } else {
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
}
return 0;
@@ -531,7 +728,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
- hcon->pending_sec_level = BT_SECURITY_MEDIUM;
+ hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
if (smp_ltk_encrypt(conn))
return 0;
@@ -558,6 +755,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
{
struct hci_conn *hcon = conn->hcon;
struct smp_chan *smp = conn->smp_chan;
+ __u8 authreq;
BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
@@ -578,18 +776,22 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
return 0;
smp = smp_chan_create(conn);
+ if (!smp)
+ return 1;
+
+ authreq = seclevel_to_authreq(sec_level);
if (hcon->link_mode & HCI_LM_MASTER) {
struct smp_cmd_pairing cp;
- build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
+ build_pairing_cmd(conn, &cp, NULL, authreq);
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
} else {
struct smp_cmd_security_req cp;
- cp.auth_req = SMP_AUTH_NONE;
+ cp.auth_req = authreq;
smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
}
@@ -618,7 +820,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*rp));
- hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
+ hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
rp->ediv, rp->rand, smp->tk);
smp_distribute_keys(conn, 1);
@@ -646,6 +848,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case SMP_CMD_PAIRING_FAIL:
+ smp_failure(conn, skb->data[0], 0);
reason = 0;
err = -EPERM;
break;
@@ -691,8 +894,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
done:
if (reason)
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
+ smp_failure(conn, reason, 1);
kfree_skb(skb);
return err;
@@ -781,7 +983,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
if (conn->hcon->out || force) {
clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
- del_timer(&conn->security_timer);
+ cancel_delayed_work_sync(&conn->security_timer);
smp_chan_destroy(conn);
}
diff --git a/net/bridge/br.c b/net/bridge/br.c
index f20c4fd915a..ba780cc8e51 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -62,7 +62,7 @@ static int __init br_init(void)
brioctl_set(br_ioctl_deviceless_stub);
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = br_fdb_test_addr;
#endif
@@ -93,7 +93,7 @@ static void __exit br_deinit(void)
rcu_barrier(); /* Wait for completion of call_rcu()'s */
br_netfilter_fini();
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = NULL;
#endif
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index feb77ea7b58..71773b014e0 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -170,8 +170,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
return -EINVAL;
spin_lock_bh(&br->lock);
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- br_stp_change_bridge_id(br, addr->sa_data);
+ if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ br_fdb_change_mac_address(br, addr->sa_data);
+ br_stp_change_bridge_id(br, addr->sa_data);
+ }
br->flags |= BR_SET_MAC_ADDR;
spin_unlock_bh(&br->lock);
@@ -186,7 +189,8 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->bus_info, "N/A");
}
-static u32 br_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t br_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
@@ -341,10 +345,10 @@ void br_dev_setup(struct net_device *dev)
dev->priv_flags = IFF_EBRIDGE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX;
br->dev = dev;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c8e7861b88b..f963f6b1884 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,7 +28,8 @@
static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr);
-static void fdb_notify(const struct net_bridge_fdb_entry *, int);
+static void fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *, int);
static u32 fdb_salt __read_mostly;
@@ -80,10 +81,10 @@ static void fdb_rcu_free(struct rcu_head *head)
kmem_cache_free(br_fdb_cache, ent);
}
-static inline void fdb_delete(struct net_bridge_fdb_entry *f)
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
- fdb_notify(f, RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
+ fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
}
@@ -114,7 +115,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
}
/* delete old one */
- fdb_delete(f);
+ fdb_delete(br, f);
goto insert;
}
}
@@ -126,6 +127,18 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
spin_unlock_bh(&br->hash_lock);
}
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+ struct net_bridge_fdb_entry *f;
+
+ /* If old entry was unassociated with any port, then delete it. */
+ f = __br_fdb_get(br, br->dev->dev_addr);
+ if (f && f->is_local && !f->dst)
+ fdb_delete(br, f);
+
+ fdb_insert(br, NULL, newaddr);
+}
+
void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
@@ -144,7 +157,7 @@ void br_fdb_cleanup(unsigned long _data)
continue;
this_timer = f->updated + delay;
if (time_before_eq(this_timer, jiffies))
- fdb_delete(f);
+ fdb_delete(br, f);
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
@@ -165,7 +178,7 @@ void br_fdb_flush(struct net_bridge *br)
struct hlist_node *h, *n;
hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
if (!f->is_static)
- fdb_delete(f);
+ fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
@@ -209,7 +222,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
}
}
- fdb_delete(f);
+ fdb_delete(br, f);
skip_delete: ;
}
}
@@ -234,7 +247,7 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
return NULL;
}
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
* if an addr is on some other bridge port */
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
@@ -249,7 +262,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
ret = 0;
else {
fdb = __br_fdb_get(port->br, addr);
- ret = fdb && fdb->dst->dev != dev &&
+ ret = fdb && fdb->dst && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
@@ -281,6 +294,10 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
if (has_expired(br, f))
continue;
+ /* ignore pseudo entry for local MAC address */
+ if (!f->dst)
+ continue;
+
if (skip) {
--skip;
continue;
@@ -347,7 +364,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb->is_static = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
- fdb_notify(fdb, RTM_NEWNEIGH);
}
return fdb;
}
@@ -371,7 +387,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
br_warn(br, "adding interface %s with same address "
"as a received packet\n",
source->dev->name);
- fdb_delete(fdb);
+ fdb_delete(br, fdb);
}
fdb = fdb_create(head, source, addr);
@@ -379,6 +395,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
return -ENOMEM;
fdb->is_local = fdb->is_static = 1;
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
@@ -424,9 +441,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
}
} else {
spin_lock(&br->hash_lock);
- if (likely(!fdb_find(head, addr)))
- fdb_create(head, source, addr);
-
+ if (likely(!fdb_find(head, addr))) {
+ fdb = fdb_create(head, source, addr);
+ if (fdb)
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
+ }
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
@@ -446,7 +465,7 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
return NUD_REACHABLE;
}
-static int fdb_fill_info(struct sk_buff *skb,
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb,
u32 pid, u32 seq, int type, unsigned int flags)
{
@@ -459,14 +478,13 @@ static int fdb_fill_info(struct sk_buff *skb,
if (nlh == NULL)
return -EMSGSIZE;
-
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = 0;
ndm->ndm_type = 0;
- ndm->ndm_ifindex = fdb->dst->dev->ifindex;
+ ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(fdb);
NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
@@ -491,9 +509,10 @@ static inline size_t fdb_nlmsg_size(void)
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
-static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+static void fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type)
{
- struct net *net = dev_net(fdb->dst->dev);
+ struct net *net = dev_net(br->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
@@ -501,7 +520,7 @@ static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
if (skb == NULL)
goto errout;
- err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
+ err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -538,7 +557,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (idx < cb->args[0])
goto skip;
- if (fdb_fill_info(skb, f,
+ if (fdb_fill_info(skb, br, f,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
@@ -556,7 +575,7 @@ skip:
return skb->len;
}
-/* Create new static fdb entry */
+/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
__u16 state, __u16 flags)
{
@@ -572,19 +591,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
fdb = fdb_create(head, source, addr);
if (!fdb)
return -ENOMEM;
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
} else {
if (flags & NLM_F_EXCL)
return -EEXIST;
+ }
+
+ if (fdb_to_nud(fdb) != state) {
+ if (state & NUD_PERMANENT)
+ fdb->is_local = fdb->is_static = 1;
+ else if (state & NUD_NOARP) {
+ fdb->is_local = 0;
+ fdb->is_static = 1;
+ } else
+ fdb->is_local = fdb->is_static = 0;
- if (flags & NLM_F_REPLACE)
- fdb->updated = fdb->used = jiffies;
- fdb->is_local = fdb->is_static = 0;
+ fdb->updated = fdb->used = jiffies;
+ fdb_notify(br, fdb, RTM_NEWNEIGH);
}
- if (state & NUD_PERMANENT)
- fdb->is_local = fdb->is_static = 1;
- else if (state & NUD_NOARP)
- fdb->is_static = 1;
return 0;
}
@@ -627,6 +652,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -EINVAL;
}
+ if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+ pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+ return -EINVAL;
+ }
+
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -634,9 +664,15 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -EINVAL;
}
- spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
- spin_unlock_bh(&p->br->hash_lock);
+ if (ndm->ndm_flags & NTF_USE) {
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr);
+ rcu_read_unlock();
+ } else {
+ spin_lock_bh(&p->br->hash_lock);
+ err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+ spin_unlock_bh(&p->br->hash_lock);
+ }
return err;
}
@@ -651,7 +687,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
if (!fdb)
return -ENOENT;
- fdb_delete(fdb);
+ fdb_delete(p->br, fdb);
return 0;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index ee64287f129..61f65344e71 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -98,7 +98,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
/* called with rcu_read_lock */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
- if (should_deliver(to, skb)) {
+ if (to && should_deliver(to, skb)) {
__br_deliver(to, skb);
return;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f603e5b0b93..0a942fbccc9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -296,10 +296,11 @@ int br_min_mtu(const struct net_bridge *br)
/*
* Recomputes features using slave's features
*/
-u32 br_features_recompute(struct net_bridge *br, u32 features)
+netdev_features_t br_features_recompute(struct net_bridge *br,
+ netdev_features_t features)
{
struct net_bridge_port *p;
- u32 mask;
+ netdev_features_t mask;
if (list_empty(&br->port_list))
return features;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5f4e576980..568d5bf1753 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/addrconf.h>
@@ -36,7 +36,7 @@
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
{
if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
@@ -52,7 +52,7 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
switch (a->proto) {
case htons(ETH_P_IP):
return a->u.ip4 == b->u.ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
#endif
@@ -65,7 +65,7 @@ static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
const struct in6_addr *ip)
{
@@ -79,7 +79,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
switch (ip->proto) {
case htons(ETH_P_IP):
return __br_ip4_hash(mdb, ip->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return __br_ip6_hash(mdb, &ip->u.ip6);
#endif
@@ -121,13 +121,13 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
return br_mdb_ip_get(mdb, &br_dst);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static struct net_bridge_mdb_entry *br_mdb_ip6_get(
struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
{
struct br_ip br_dst;
- ipv6_addr_copy(&br_dst.u.ip6, dst);
+ br_dst.u.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
return br_mdb_ip_get(mdb, &br_dst);
@@ -152,9 +152,9 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
case htons(ETH_P_IP):
ip.u.ip4 = ip_hdr(skb)->daddr;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+ ip.u.ip6 = ipv6_hdr(skb)->daddr;
break;
#endif
default:
@@ -411,7 +411,7 @@ out:
return skb;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
const struct in6_addr *group)
{
@@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
mldq->mld_cksum = 0;
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
mldq->mld_reserved = 0;
- ipv6_addr_copy(&mldq->mld_mca, group);
+ mldq->mld_mca = *group;
/* checksum */
mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -496,7 +496,7 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
switch (addr->proto) {
case htons(ETH_P_IP):
return br_ip4_multicast_alloc_query(br, addr->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
#endif
@@ -773,7 +773,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
return br_multicast_add_group(br, port, &br_group);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group)
@@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
if (!ipv6_is_transient_multicast(group))
return 0;
- ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
return br_multicast_add_group(br, port, &br_group);
@@ -845,7 +845,7 @@ static void br_multicast_send_query(struct net_bridge *br,
br_group.proto = htons(ETH_P_IP);
__br_multicast_send_query(br, port, &br_group);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
br_group.proto = htons(ETH_P_IPV6);
__br_multicast_send_query(br, port, &br_group);
#endif
@@ -989,7 +989,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
return err;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
@@ -1185,7 +1185,7 @@ out:
return err;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_query(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
@@ -1334,7 +1334,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
br_multicast_leave_group(br, port, &br_group);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group)
@@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
if (!ipv6_is_transient_multicast(group))
return;
- ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_multicast_leave_group(br, port, &br_group);
@@ -1449,7 +1449,7 @@ err_out:
return err;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb)
@@ -1458,6 +1458,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
const struct ipv6hdr *ip6h;
u8 icmp6_type;
u8 nexthdr;
+ __be16 frag_off;
unsigned len;
int offset;
int err;
@@ -1483,7 +1484,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
return -EINVAL;
nexthdr = ip6h->nexthdr;
- offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+ offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
return 0;
@@ -1595,7 +1596,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
switch (skb->protocol) {
case htons(ETH_P_IP):
return br_multicast_ipv4_rcv(br, port, skb);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_multicast_ipv6_rcv(br, port, skb);
#endif
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d6ec3720c77..84122472656 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
return NULL;
}
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
+ .mtu = fake_mtu,
};
/*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER;
rt->dst.ops = &fake_dst_ops;
}
@@ -356,7 +362,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
- neigh = dst_get_neighbour(dst);
+ neigh = dst_get_neighbour_noref(dst);
if (neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
@@ -807,7 +813,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
}
-#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
int ret;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e5f9ece3c9a..a1daf8227ed 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -18,6 +18,7 @@
#include <net/sock.h>
#include "br_private.h"
+#include "br_private_stp.h"
static inline size_t br_nlmsg_size(void)
{
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
p->state = new_state;
br_log_state(p);
+
+ spin_lock_bh(&p->br->lock);
+ br_port_state_selection(p->br);
+ spin_unlock_bh(&p->br->lock);
+
br_ifinfo_notify(RTM_NEWLINK, p);
return 0;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d7d6fb05411..0b67a63ad7a 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -56,7 +56,7 @@ struct br_ip
{
union {
__be32 ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
} u;
@@ -348,6 +348,7 @@ extern void br_fdb_fini(void);
extern void br_fdb_flush(struct net_bridge *br);
extern void br_fdb_changeaddr(struct net_bridge_port *p,
const unsigned char *newaddr);
+extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
extern void br_fdb_cleanup(unsigned long arg);
extern void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p, int do_all);
@@ -387,7 +388,8 @@ extern int br_add_if(struct net_bridge *br,
extern int br_del_if(struct net_bridge *br,
struct net_device *dev);
extern int br_min_mtu(const struct net_bridge *br);
-extern u32 br_features_recompute(struct net_bridge *br, u32 features);
+extern netdev_features_t br_features_recompute(struct net_bridge *br,
+ netdev_features_t features);
/* br_input.c */
extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -535,7 +537,7 @@ extern void br_stp_port_timer_init(struct net_bridge_port *p);
extern unsigned long br_timer_value(const struct timer_list *timer);
/* br.c */
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if IS_ENABLED(CONFIG_ATM_LANE)
extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
#endif
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index ad0a3f7cf6c..dd147d78a58 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
struct net_bridge_port *p;
unsigned int liveports = 0;
- /* Don't change port states if userspace is handling STP */
- if (br->stp_enabled == BR_USER_STP)
- return;
-
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
- if (p->port_no == br->root_port) {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_forwarding(p);
- } else if (br_is_designated_port(p)) {
- del_timer(&p->message_age_timer);
- br_make_forwarding(p);
- } else {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_blocking(p);
+ /* Don't change port states if userspace is handling STP */
+ if (br->stp_enabled != BR_USER_STP) {
+ if (p->port_no == br->root_port) {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_forwarding(p);
+ } else if (br_is_designated_port(p)) {
+ del_timer(&p->message_age_timer);
+ br_make_forwarding(p);
+ } else {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_blocking(p);
+ }
}
if (p->state == BR_STATE_FORWARDING)
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 2ed0056a39a..99c85668f55 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -55,9 +55,10 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
+ __be16 frag_off;
int offset_ph;
- offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
+ offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
if (offset_ph == -1)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 6e5a8bb9b94..f88ee537fb2 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -107,12 +107,13 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
goto out;
}
-#if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IPV6)) {
const struct ipv6hdr *ih;
struct ipv6hdr _iph;
uint8_t nexthdr;
+ __be16 frag_off;
int offset_ph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
@@ -123,7 +124,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
&ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
nexthdr = ih->nexthdr;
- offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
+ offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
if (offset_ph == -1)
goto out;
print_ports(skb, nexthdr, offset_ph);
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index 529750da962..936361e5a2b 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -40,3 +40,14 @@ config CAIF_NETDEV
If you select to build it as a built-in then the main CAIF device must
also be a built-in.
If unsure say Y.
+
+config CAIF_USB
+ tristate "CAIF USB support"
+ depends on CAIF
+ default n
+ ---help---
+ Say Y if you are using CAIF over USB CDC NCM.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say N.
diff --git a/net/caif/Makefile b/net/caif/Makefile
index ebcd4e7e6f4..cc2b51154d0 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -10,5 +10,6 @@ caif-y := caif_dev.o \
obj-$(CONFIG_CAIF) += caif.o
obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
obj-$(CONFIG_CAIF) += caif_socket.o
+obj-$(CONFIG_CAIF_USB) += caif_usb.o
export-y := caif.o
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index f1fa1f6e658..61570ee76fe 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -17,6 +17,7 @@
#include <linux/netdevice.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <net/netns/generic.h>
#include <net/net_namespace.h>
#include <net/pkt_sched.h>
@@ -24,6 +25,7 @@
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
+#include <net/caif/cfserl.h>
MODULE_LICENSE("GPL");
@@ -33,6 +35,10 @@ struct caif_device_entry {
struct list_head list;
struct net_device *netdev;
int __percpu *pcpu_refcnt;
+ spinlock_t flow_lock;
+ struct sk_buff *xoff_skb;
+ void (*xoff_skb_dtor)(struct sk_buff *skb);
+ bool xoff;
};
struct caif_device_entry_list {
@@ -47,13 +53,14 @@ struct caif_net {
};
static int caif_net_id;
+static int q_high = 50; /* Percent */
struct cfcnfg *get_cfcnfg(struct net *net)
{
struct caif_net *caifn;
- BUG_ON(!net);
caifn = net_generic(net, caif_net_id);
- BUG_ON(!caifn);
+ if (!caifn)
+ return NULL;
return caifn->cfg;
}
EXPORT_SYMBOL(get_cfcnfg);
@@ -61,20 +68,20 @@ EXPORT_SYMBOL(get_cfcnfg);
static struct caif_device_entry_list *caif_device_list(struct net *net)
{
struct caif_net *caifn;
- BUG_ON(!net);
caifn = net_generic(net, caif_net_id);
- BUG_ON(!caifn);
+ if (!caifn)
+ return NULL;
return &caifn->caifdevs;
}
static void caifd_put(struct caif_device_entry *e)
{
- irqsafe_cpu_dec(*e->pcpu_refcnt);
+ this_cpu_dec(*e->pcpu_refcnt);
}
static void caifd_hold(struct caif_device_entry *e)
{
- irqsafe_cpu_inc(*e->pcpu_refcnt);
+ this_cpu_inc(*e->pcpu_refcnt);
}
static int caifd_refcnt_read(struct caif_device_entry *e)
@@ -92,7 +99,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
struct caif_device_entry *caifd;
caifdevs = caif_device_list(dev_net(dev));
- BUG_ON(!caifdevs);
+ if (!caifdevs)
+ return NULL;
caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
if (!caifd)
@@ -112,7 +120,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
struct caif_device_entry_list *caifdevs =
caif_device_list(dev_net(dev));
struct caif_device_entry *caifd;
- BUG_ON(!caifdevs);
+ if (!caifdevs)
+ return NULL;
+
list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
if (caifd->netdev == dev)
return caifd;
@@ -120,15 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
return NULL;
}
+void caif_flow_cb(struct sk_buff *skb)
+{
+ struct caif_device_entry *caifd;
+ void (*dtor)(struct sk_buff *skb) = NULL;
+ bool send_xoff;
+
+ WARN_ON(skb->dev == NULL);
+
+ rcu_read_lock();
+ caifd = caif_get(skb->dev);
+ caifd_hold(caifd);
+ rcu_read_unlock();
+
+ spin_lock_bh(&caifd->flow_lock);
+ send_xoff = caifd->xoff;
+ caifd->xoff = 0;
+ if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
+ WARN_ON(caifd->xoff_skb != skb);
+ dtor = caifd->xoff_skb_dtor;
+ caifd->xoff_skb = NULL;
+ caifd->xoff_skb_dtor = NULL;
+ }
+ spin_unlock_bh(&caifd->flow_lock);
+
+ if (dtor)
+ dtor(skb);
+
+ if (send_xoff)
+ caifd->layer.up->
+ ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+ caifd->layer.id);
+ caifd_put(caifd);
+}
+
static int transmit(struct cflayer *layer, struct cfpkt *pkt)
{
- int err;
+ int err, high = 0, qlen = 0;
+ struct caif_dev_common *caifdev;
struct caif_device_entry *caifd =
container_of(layer, struct caif_device_entry, layer);
struct sk_buff *skb;
+ struct netdev_queue *txq;
+
+ rcu_read_lock_bh();
skb = cfpkt_tonative(pkt);
skb->dev = caifd->netdev;
+ skb_reset_network_header(skb);
+ skb->protocol = htons(ETH_P_CAIF);
+ caifdev = netdev_priv(caifd->netdev);
+
+ /* Check if we need to handle xoff */
+ if (likely(caifd->netdev->tx_queue_len == 0))
+ goto noxoff;
+
+ if (unlikely(caifd->xoff))
+ goto noxoff;
+
+ if (likely(!netif_queue_stopped(caifd->netdev))) {
+ /* If we run with a TX queue, check if the queue is too long*/
+ txq = netdev_get_tx_queue(skb->dev, 0);
+ qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
+
+ if (likely(qlen == 0))
+ goto noxoff;
+
+ high = (caifd->netdev->tx_queue_len * q_high) / 100;
+ if (likely(qlen < high))
+ goto noxoff;
+ }
+
+ /* Hold lock while accessing xoff */
+ spin_lock_bh(&caifd->flow_lock);
+ if (caifd->xoff) {
+ spin_unlock_bh(&caifd->flow_lock);
+ goto noxoff;
+ }
+
+ /*
+ * Handle flow off, we do this by temporary hi-jacking this
+ * skb's destructor function, and replace it with our own
+ * flow-on callback. The callback will set flow-on and call
+ * the original destructor.
+ */
+
+ pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
+ netif_queue_stopped(caifd->netdev),
+ qlen, high);
+ caifd->xoff = 1;
+ caifd->xoff_skb = skb;
+ caifd->xoff_skb_dtor = skb->destructor;
+ skb->destructor = caif_flow_cb;
+ spin_unlock_bh(&caifd->flow_lock);
+
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ caifd->layer.id);
+noxoff:
+ rcu_read_unlock_bh();
err = dev_queue_xmit(skb);
if (err > 0)
@@ -172,7 +273,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
/* Release reference to stack upwards */
caifd_put(caifd);
- return 0;
+
+ if (err != 0)
+ err = NET_RX_DROP;
+ return err;
}
static struct packet_type caif_packet_type __read_mostly = {
@@ -203,6 +307,57 @@ static void dev_flowctrl(struct net_device *dev, int on)
caifd_put(caifd);
}
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+ struct cflayer *link_support, int head_room,
+ struct cflayer **layer, int (**rcv_func)(
+ struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *))
+{
+ struct caif_device_entry *caifd;
+ enum cfcnfg_phy_preference pref;
+ struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+ struct caif_device_entry_list *caifdevs;
+
+ caifdevs = caif_device_list(dev_net(dev));
+ if (!cfg || !caifdevs)
+ return;
+ caifd = caif_device_alloc(dev);
+ if (!caifd)
+ return;
+ *layer = &caifd->layer;
+ spin_lock_init(&caifd->flow_lock);
+
+ switch (caifdev->link_select) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ }
+ mutex_lock(&caifdevs->lock);
+ list_add_rcu(&caifd->list, &caifdevs->list);
+
+ strncpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name) - 1);
+ caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ caifd->layer.transmit = transmit;
+ cfcnfg_add_phy_layer(cfg,
+ dev,
+ &caifd->layer,
+ pref,
+ link_support,
+ caifdev->use_fcs,
+ head_room);
+ mutex_unlock(&caifdevs->lock);
+ if (rcv_func)
+ *rcv_func = receive;
+}
+EXPORT_SYMBOL(caif_enroll_dev);
+
/* notify Caif of device events */
static int caif_device_notify(struct notifier_block *me, unsigned long what,
void *arg)
@@ -210,62 +365,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
struct net_device *dev = arg;
struct caif_device_entry *caifd = NULL;
struct caif_dev_common *caifdev;
- enum cfcnfg_phy_preference pref;
- enum cfcnfg_phy_type phy_type;
struct cfcnfg *cfg;
+ struct cflayer *layer, *link_support;
+ int head_room = 0;
struct caif_device_entry_list *caifdevs;
- if (dev->type != ARPHRD_CAIF)
- return 0;
-
cfg = get_cfcnfg(dev_net(dev));
- if (cfg == NULL)
+ caifdevs = caif_device_list(dev_net(dev));
+ if (!cfg || !caifdevs)
return 0;
- caifdevs = caif_device_list(dev_net(dev));
+ caifd = caif_get(dev);
+ if (caifd == NULL && dev->type != ARPHRD_CAIF)
+ return 0;
switch (what) {
case NETDEV_REGISTER:
- caifd = caif_device_alloc(dev);
- if (!caifd)
- return 0;
+ if (caifd != NULL)
+ break;
caifdev = netdev_priv(dev);
- caifdev->flowctrl = dev_flowctrl;
- caifd->layer.transmit = transmit;
-
- if (caifdev->use_frag)
- phy_type = CFPHYTYPE_FRAG;
- else
- phy_type = CFPHYTYPE_CAIF;
-
- switch (caifdev->link_select) {
- case CAIF_LINK_HIGH_BANDW:
- pref = CFPHYPREF_HIGH_BW;
- break;
- case CAIF_LINK_LOW_LATENCY:
- pref = CFPHYPREF_LOW_LAT;
- break;
- default:
- pref = CFPHYPREF_HIGH_BW;
- break;
+ link_support = NULL;
+ if (caifdev->use_frag) {
+ head_room = 1;
+ link_support = cfserl_create(dev->ifindex,
+ caifdev->use_stx);
+ if (!link_support) {
+ pr_warn("Out of memory\n");
+ break;
+ }
}
- strncpy(caifd->layer.name, dev->name,
- sizeof(caifd->layer.name) - 1);
- caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
-
- mutex_lock(&caifdevs->lock);
- list_add_rcu(&caifd->list, &caifdevs->list);
-
- cfcnfg_add_phy_layer(cfg,
- phy_type,
- dev,
- &caifd->layer,
- pref,
- caifdev->use_fcs,
- caifdev->use_stx);
- mutex_unlock(&caifdevs->lock);
+ caif_enroll_dev(dev, caifdev, link_support, head_room,
+ &layer, NULL);
+ caifdev->flowctrl = dev_flowctrl;
break;
case NETDEV_UP:
@@ -277,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break;
}
+ caifd->xoff = 0;
cfcnfg_set_phy_state(cfg, &caifd->layer, true);
rcu_read_unlock();
@@ -298,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd->layer.up->ctrlcmd(caifd->layer.up,
_CAIF_CTRLCMD_PHYIF_DOWN_IND,
caifd->layer.id);
+
+ spin_lock_bh(&caifd->flow_lock);
+
+ /*
+ * Replace our xoff-destructor with original destructor.
+ * We trust that skb->destructor *always* is called before
+ * the skb reference is invalid. The hijacked SKB destructor
+ * takes the flow_lock so manipulating the skb->destructor here
+ * should be safe.
+ */
+ if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
+ caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
+
+ caifd->xoff = 0;
+ caifd->xoff_skb_dtor = NULL;
+ caifd->xoff_skb = NULL;
+
+ spin_unlock_bh(&caifd->flow_lock);
caifd_put(caifd);
break;
@@ -353,15 +505,15 @@ static struct notifier_block caif_device_notifier = {
static int caif_init_net(struct net *net)
{
struct caif_net *caifn = net_generic(net, caif_net_id);
- BUG_ON(!caifn);
+ if (WARN_ON(!caifn))
+ return -EINVAL;
+
INIT_LIST_HEAD(&caifn->caifdevs.list);
mutex_init(&caifn->caifdevs.lock);
caifn->cfg = cfcnfg_create();
- if (!caifn->cfg) {
- pr_warn("can't create cfcnfg\n");
+ if (!caifn->cfg)
return -ENOMEM;
- }
return 0;
}
@@ -371,17 +523,14 @@ static void caif_exit_net(struct net *net)
struct caif_device_entry *caifd, *tmp;
struct caif_device_entry_list *caifdevs =
caif_device_list(net);
- struct cfcnfg *cfg;
+ struct cfcnfg *cfg = get_cfcnfg(net);
+
+ if (!cfg || !caifdevs)
+ return;
rtnl_lock();
mutex_lock(&caifdevs->lock);
- cfg = get_cfcnfg(net);
- if (cfg == NULL) {
- mutex_unlock(&caifdevs->lock);
- return;
- }
-
list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
int i = 0;
list_del_rcu(&caifd->list);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644
index 00000000000..5fc9eca8cd4
--- /dev/null
+++ b/net/caif/caif_usb.c
@@ -0,0 +1,208 @@
+/*
+ * CAIF USB handler
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+#include <net/netns/generic.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+
+#define CFUSB_PAD_DESCR_SZ 1 /* Alignment descriptor length */
+#define CFUSB_ALIGNMENT 4 /* Number of bytes to align. */
+#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
+#define STE_USB_VID 0x04cc /* USB Product ID for ST-Ericsson */
+#define STE_USB_PID_CAIF 0x2306 /* Product id for CAIF Modems */
+
+struct cfusbl {
+ struct cflayer layer;
+ u8 tx_eth_hdr[ETH_HLEN];
+};
+
+static bool pack_added;
+
+static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 hpad;
+
+ /* Remove padding. */
+ cfpkt_extr_head(pkt, &hpad, 1);
+ cfpkt_extr_head(pkt, NULL, hpad);
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct caif_payload_info *info;
+ u8 hpad;
+ u8 zeros[CFUSB_ALIGNMENT];
+ struct sk_buff *skb;
+ struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
+
+ skb = cfpkt_tonative(pkt);
+
+ skb_reset_network_header(skb);
+ skb->protocol = htons(ETH_P_IP);
+
+ info = cfpkt_info(pkt);
+ hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
+
+ if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
+ pr_warn("Headroom to small\n");
+ kfree_skb(skb);
+ return -EIO;
+ }
+ memset(zeros, 0, hpad);
+
+ cfpkt_add_head(pkt, zeros, hpad);
+ cfpkt_add_head(pkt, &hpad, 1);
+ cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
+ return layr->dn->transmit(layr->dn, pkt);
+}
+
+static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ if (layr->up && layr->up->ctrlcmd)
+ layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
+
+struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+ u8 braddr[ETH_ALEN])
+{
+ struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
+
+ if (!this) {
+ pr_warn("Out of memory\n");
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfusbl, layer) == 0);
+
+ memset(this, 0, sizeof(struct cflayer));
+ this->layer.receive = cfusbl_receive;
+ this->layer.transmit = cfusbl_transmit;
+ this->layer.ctrlcmd = cfusbl_ctrlcmd;
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
+ this->layer.id = phyid;
+
+ /*
+ * Construct TX ethernet header:
+ * 0-5 destination address
+ * 5-11 source address
+ * 12-13 protocol type
+ */
+ memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
+ memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
+ this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
+ this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
+ pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
+ this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
+ this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
+
+ return (struct cflayer *) this;
+}
+
+static struct packet_type caif_usb_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_802_EX1),
+};
+
+static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ void *arg)
+{
+ struct net_device *dev = arg;
+ struct caif_dev_common common;
+ struct cflayer *layer, *link_support;
+ struct usbnet *usbnet = netdev_priv(dev);
+ struct usb_device *usbdev = usbnet->udev;
+ struct ethtool_drvinfo drvinfo;
+
+ /*
+ * Quirks: High-jack ethtool to find if we have a NCM device,
+ * and find it's VID/PID.
+ */
+ if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
+ return 0;
+
+ dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+ if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
+ return 0;
+
+ pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
+ le16_to_cpu(usbdev->descriptor.idVendor),
+ le16_to_cpu(usbdev->descriptor.idProduct));
+
+ /* Check for VID/PID that supports CAIF */
+ if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
+ le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
+ return 0;
+
+ if (what == NETDEV_UNREGISTER)
+ module_put(THIS_MODULE);
+
+ if (what != NETDEV_REGISTER)
+ return 0;
+
+ __module_get(THIS_MODULE);
+
+ memset(&common, 0, sizeof(common));
+ common.use_frag = false;
+ common.use_fcs = false;
+ common.use_stx = false;
+ common.link_select = CAIF_LINK_HIGH_BANDW;
+ common.flowctrl = NULL;
+
+ link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
+ dev->broadcast);
+
+ if (!link_support)
+ return -ENOMEM;
+
+ if (dev->num_tx_queues > 1)
+ pr_warn("USB device uses more than one tx queue\n");
+
+ caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+ &layer, &caif_usb_type.func);
+ if (!pack_added)
+ dev_add_pack(&caif_usb_type);
+ pack_added = true;
+
+ strncpy(layer->name, dev->name,
+ sizeof(layer->name) - 1);
+ layer->name[sizeof(layer->name) - 1] = 0;
+
+ return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+ .notifier_call = cfusbl_device_notify,
+ .priority = 0,
+};
+
+static int __init cfusbl_init(void)
+{
+ return register_netdevice_notifier(&caif_device_notifier);
+}
+
+static void __exit cfusbl_exit(void)
+{
+ unregister_netdevice_notifier(&caif_device_notifier);
+ dev_remove_pack(&caif_usb_type);
+}
+
+module_init(cfusbl_init);
+module_exit(cfusbl_exit);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 00523ecc4ce..598aafb4cb5 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -45,8 +45,8 @@ struct cfcnfg_phyinfo {
/* Interface index */
int ifindex;
- /* Use Start of frame extension */
- bool use_stx;
+ /* Protocol head room added for CAIF link layer */
+ int head_room;
/* Use Start of frame checksum */
bool use_fcs;
@@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
if (channel_id != 0) {
struct cflayer *servl;
servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
+ cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
if (servl != NULL)
layer_set_up(servl, NULL);
} else
pr_debug("nothing to disconnect\n");
- cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
/* Do RCU sync before initiating cleanup */
synchronize_rcu();
@@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
*ifindex = phy->ifindex;
*proto_tail = 2;
- *proto_head =
-
- protohead[param.linktype] + (phy->use_stx ? 1 : 0);
+ *proto_head = protohead[param.linktype] + phy->head_room;
rcu_read_unlock();
@@ -460,13 +458,13 @@ unlock:
}
void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref,
- bool fcs, bool stx)
+ struct cflayer *link_support,
+ bool fcs, int head_room)
{
struct cflayer *frml;
- struct cflayer *phy_driver = NULL;
struct cfcnfg_phyinfo *phyinfo = NULL;
int i;
u8 phyid;
@@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
- goto out_err;
+ goto out;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
if (!phyinfo)
goto out_err;
- switch (phy_type) {
- case CFPHYTYPE_FRAG:
- phy_driver =
- cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
- if (!phy_driver)
- goto out_err;
- break;
- case CFPHYTYPE_CAIF:
- phy_driver = NULL;
- break;
- default:
- goto out_err;
- }
phy_layer->id = phyid;
phyinfo->pref = pref;
phyinfo->id = phyid;
@@ -509,7 +494,7 @@ got_phyid:
phyinfo->dev_info.dev = dev;
phyinfo->phy_layer = phy_layer;
phyinfo->ifindex = dev->ifindex;
- phyinfo->use_stx = stx;
+ phyinfo->head_room = head_room;
phyinfo->use_fcs = fcs;
frml = cffrml_create(phyid, fcs);
@@ -519,23 +504,23 @@ got_phyid:
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
- if (phy_driver != NULL) {
- phy_driver->id = phyid;
- layer_set_dn(frml, phy_driver);
- layer_set_up(phy_driver, frml);
- layer_set_dn(phy_driver, phy_layer);
- layer_set_up(phy_layer, phy_driver);
+ if (link_support != NULL) {
+ link_support->id = phyid;
+ layer_set_dn(frml, link_support);
+ layer_set_up(link_support, frml);
+ layer_set_dn(link_support, phy_layer);
+ layer_set_up(phy_layer, link_support);
} else {
layer_set_dn(frml, phy_layer);
layer_set_up(phy_layer, frml);
}
list_add_rcu(&phyinfo->node, &cnfg->phys);
+out:
mutex_unlock(&cnfg->lock);
return;
out_err:
- kfree(phy_driver);
kfree(phyinfo);
mutex_unlock(&cnfg->lock);
}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index f39921171d0..0a7df7ef062 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
- int tmp;
u16 chks;
u16 len;
+ __le16 data;
+
struct cffrml *this = container_obj(layr);
if (this->dofcs) {
chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
- tmp = cpu_to_le16(chks);
- cfpkt_add_trail(pkt, &tmp, 2);
+ data = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &data, 2);
} else {
cfpkt_pad_trail(pkt, 2);
}
len = cfpkt_getlen(pkt);
- tmp = cpu_to_le16(len);
- cfpkt_add_head(pkt, &tmp, 2);
+ data = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &data, 2);
cfpkt_info(pkt)->hdr_len += 2;
if (cfpkt_erroneous(pkt)) {
pr_err("Packet is erroneous!\n");
@@ -176,14 +177,14 @@ void cffrml_put(struct cflayer *layr)
{
struct cffrml *this = container_obj(layr);
if (layr != NULL && this->pcpu_refcnt != NULL)
- irqsafe_cpu_dec(*this->pcpu_refcnt);
+ this_cpu_dec(*this->pcpu_refcnt);
}
void cffrml_hold(struct cflayer *layr)
{
struct cffrml *this = container_obj(layr);
if (layr != NULL && this->pcpu_refcnt != NULL)
- irqsafe_cpu_inc(*this->pcpu_refcnt);
+ this_cpu_inc(*this->pcpu_refcnt);
}
int cffrml_refcnt_read(struct cflayer *layr)
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index df08c47183d..e335ba859b9 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
return (struct cfpkt *) skb;
}
-
struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
{
struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt)
kfree_skb(skb);
}
-
inline bool cfpkt_more(struct cfpkt *pkt)
{
struct sk_buff *skb = pkt_to_skb(pkt);
return skb->len > 0;
}
-
int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
{
struct sk_buff *skb = pkt_to_skb(pkt);
@@ -144,9 +141,11 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
}
from = skb_pull(skb, len);
from -= len;
- memcpy(data, from, len);
+ if (data)
+ memcpy(data, from, len);
return 0;
}
+EXPORT_SYMBOL(cfpkt_extr_head);
int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
{
@@ -170,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
return 0;
}
-
int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
{
return cfpkt_add_body(pkt, NULL, len);
}
-
int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
{
struct sk_buff *skb = pkt_to_skb(pkt);
@@ -255,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
memcpy(to, data, len);
return 0;
}
-
+EXPORT_SYMBOL(cfpkt_add_head);
inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
{
return cfpkt_add_body(pkt, data, len);
}
-
inline u16 cfpkt_getlen(struct cfpkt *pkt)
{
struct sk_buff *skb = pkt_to_skb(pkt);
return skb->len;
}
-
inline u16 cfpkt_iterate(struct cfpkt *pkt,
u16 (*iter_func)(u16, void *, u16),
u16 data)
@@ -287,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
}
-
int cfpkt_setlen(struct cfpkt *pkt, u16 len)
{
struct sk_buff *skb = pkt_to_skb(pkt);
@@ -399,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
{
return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
}
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 81660f80971..6dc75d4f8d9 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -190,7 +190,7 @@ out:
static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
{
- caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
+ caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
/* Add info for MUX-layer to route the packet out. */
cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 797c8d16599..8e68b97f13e 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid);
-struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+struct cflayer *cfserl_create(int instance, bool use_stx)
{
struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
if (!this)
@@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
this->layer.receive = cfserl_receive;
this->layer.transmit = cfserl_transmit;
this->layer.ctrlcmd = cfserl_ctrlcmd;
- this->layer.type = type;
this->usestx = use_stx;
spin_lock_init(&this->sync);
snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 42599e31dca..3a94eae7abe 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
int i, j;
int numrep;
int firstn;
- int rc = -1;
BUG_ON(ruleno >= map->max_rules);
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
* that this may or may not correspond to the specific types
* referenced by the crush rule.
*/
- if (force >= 0) {
- if (force >= map->max_devices ||
- map->device_parents[force] == 0) {
- /*dprintk("CRUSH: forcefed device dne\n");*/
- rc = -1; /* force fed device dne */
- goto out;
- }
- if (!is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
+ if (force >= 0 &&
+ force < map->max_devices &&
+ map->device_parents[force] != 0 &&
+ !is_out(map, weight, force, x)) {
+ while (1) {
+ force_context[++force_pos] = force;
+ if (force >= 0)
+ force = map->device_parents[force];
+ else
+ force = map->bucket_parents[-1-force];
+ if (force == 0)
+ break;
}
}
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
BUG_ON(1);
}
}
- rc = result_len;
-
-out:
- return rc;
+ return result_len;
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 0d357b1c4e5..674641b13ae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,12 +3,13 @@
#
obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
- gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
+ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
- neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+ sock_diag.o
obj-$(CONFIG_XFRM) += flow.o
obj-y += net-sysfs.o
@@ -19,3 +20,4 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
+obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ba50a1e404..f494675471a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,10 +133,9 @@
#include <linux/pci.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
-#include <linux/if_tunnel.h>
-#include <linux/if_pppox.h>
-#include <linux/ppp_defs.h>
#include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
+#include <net/flow_keys.h>
#include "net-sysfs.h"
@@ -1320,8 +1319,6 @@ EXPORT_SYMBOL(dev_close);
*/
void dev_disable_lro(struct net_device *dev)
{
- u32 flags;
-
/*
* If we're trying to disable lro on a vlan device
* use the underlying physical device instead
@@ -1329,15 +1326,9 @@ void dev_disable_lro(struct net_device *dev)
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
- if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
- flags = dev->ethtool_ops->get_flags(dev);
- else
- flags = ethtool_op_get_flags(dev);
+ dev->wanted_features &= ~NETIF_F_LRO;
+ netdev_update_features(dev);
- if (!(flags & ETH_FLAG_LRO))
- return;
-
- __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
if (unlikely(dev->features & NETIF_F_LRO))
netdev_WARN(dev, "failed to disable LRO!\n");
}
@@ -1396,7 +1387,7 @@ rollback:
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
- break;
+ goto outroll;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1398,7 @@ rollback:
}
}
+outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -1449,34 +1441,55 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
+#ifdef HAVE_JUMP_LABEL
+/* We are not allowed to call jump_label_dec() from irq context
+ * If net_disable_timestamp() is called from irq context, defer the
+ * jump_label_dec() calls.
+ */
+static atomic_t netstamp_needed_deferred;
+#endif
void net_enable_timestamp(void)
{
- atomic_inc(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+ int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+ if (deferred) {
+ while (--deferred)
+ jump_label_dec(&netstamp_needed);
+ return;
+ }
+#endif
+ WARN_ON(in_interrupt());
+ jump_label_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
- atomic_dec(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+ if (in_interrupt()) {
+ atomic_inc(&netstamp_needed_deferred);
+ return;
+ }
+#endif
+ jump_label_dec(&netstamp_needed);
}
EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
- if (atomic_read(&netstamp_needed))
+ skb->tstamp.tv64 = 0;
+ if (static_branch(&netstamp_needed))
__net_timestamp(skb);
- else
- skb->tstamp.tv64 = 0;
}
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
- if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
- __net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB) \
+ if (static_branch(&netstamp_needed)) { \
+ if ((COND) && !(SKB)->tstamp.tv64) \
+ __net_timestamp(SKB); \
+ } \
static int net_hwtstamp_validate(struct ifreq *ifr)
{
@@ -1923,7 +1936,8 @@ EXPORT_SYMBOL(skb_checksum_help);
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
@@ -1953,9 +1967,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
dev->ethtool_ops->get_drvinfo(dev, &info);
- WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
- info.driver, dev ? dev->features : 0L,
- skb->sk ? skb->sk->sk_route_caps : 0L,
+ WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
+ info.driver, dev ? &dev->features : NULL,
+ skb->sk ? &skb->sk->sk_route_caps : NULL,
skb->len, skb->data_len, skb->ip_summed);
if (skb_header_cloned(skb) &&
@@ -2064,7 +2078,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
struct sk_buff *segs;
@@ -2103,7 +2117,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
}
}
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_V4_CSUM) &&
@@ -2114,7 +2128,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
protocol == htons(ETH_P_FCOE)));
}
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+ __be16 protocol, netdev_features_t features)
{
if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
@@ -2126,10 +2141,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
return features;
}
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- u32 features = skb->dev->features;
+ netdev_features_t features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2175,7 +2190,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
unsigned int skb_len;
if (likely(!skb->next)) {
- u32 features;
+ netdev_features_t features;
/*
* If device doesn't need skb->dst, release it right now while
@@ -2256,7 +2271,7 @@ gso:
return rc;
}
txq_trans_update(txq);
- if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+ if (unlikely(netif_xmit_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
@@ -2456,6 +2471,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+ struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+
+ if ((!skb->priority) && (skb->sk) && map)
+ skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
static DEFINE_PER_CPU(int, xmit_recursion);
#define RECURSION_LIMIT 10
@@ -2496,6 +2523,8 @@ int dev_queue_xmit(struct sk_buff *skb)
*/
rcu_read_lock_bh();
+ skb_update_prio(skb);
+
txq = dev_pick_tx(dev, skb);
q = rcu_dereference_bh(txq->qdisc);
@@ -2530,7 +2559,7 @@ int dev_queue_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, txq, cpu);
- if (!netif_tx_queue_stopped(txq)) {
+ if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq);
__this_cpu_dec(xmit_recursion);
@@ -2591,123 +2620,28 @@ static inline void ____napi_schedule(struct softnet_data *sd,
*/
void __skb_get_rxhash(struct sk_buff *skb)
{
- int nhoff, hash = 0, poff;
- const struct ipv6hdr *ip6;
- const struct iphdr *ip;
- const struct vlan_hdr *vlan;
- u8 ip_proto;
- u32 addr1, addr2;
- u16 proto;
- union {
- u32 v32;
- u16 v16[2];
- } ports;
-
- nhoff = skb_network_offset(skb);
- proto = skb->protocol;
-
-again:
- switch (proto) {
- case __constant_htons(ETH_P_IP):
-ip:
- if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
- goto done;
-
- ip = (const struct iphdr *) (skb->data + nhoff);
- if (ip_is_fragment(ip))
- ip_proto = 0;
- else
- ip_proto = ip->protocol;
- addr1 = (__force u32) ip->saddr;
- addr2 = (__force u32) ip->daddr;
- nhoff += ip->ihl * 4;
- break;
- case __constant_htons(ETH_P_IPV6):
-ipv6:
- if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
- goto done;
-
- ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
- ip_proto = ip6->nexthdr;
- addr1 = (__force u32) ip6->saddr.s6_addr32[3];
- addr2 = (__force u32) ip6->daddr.s6_addr32[3];
- nhoff += 40;
- break;
- case __constant_htons(ETH_P_8021Q):
- if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
- goto done;
- vlan = (const struct vlan_hdr *) (skb->data + nhoff);
- proto = vlan->h_vlan_encapsulated_proto;
- nhoff += sizeof(*vlan);
- goto again;
- case __constant_htons(ETH_P_PPP_SES):
- if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
- goto done;
- proto = *((__be16 *) (skb->data + nhoff +
- sizeof(struct pppoe_hdr)));
- nhoff += PPPOE_SES_HLEN;
- switch (proto) {
- case __constant_htons(PPP_IP):
- goto ip;
- case __constant_htons(PPP_IPV6):
- goto ipv6;
- default:
- goto done;
- }
- default:
- goto done;
- }
-
- switch (ip_proto) {
- case IPPROTO_GRE:
- if (pskb_may_pull(skb, nhoff + 16)) {
- u8 *h = skb->data + nhoff;
- __be16 flags = *(__be16 *)h;
+ struct flow_keys keys;
+ u32 hash;
- /*
- * Only look inside GRE if version zero and no
- * routing
- */
- if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
- proto = *(__be16 *)(h + 2);
- nhoff += 4;
- if (flags & GRE_CSUM)
- nhoff += 4;
- if (flags & GRE_KEY)
- nhoff += 4;
- if (flags & GRE_SEQ)
- nhoff += 4;
- goto again;
- }
- }
- break;
- case IPPROTO_IPIP:
- goto again;
- default:
- break;
- }
+ if (!skb_flow_dissect(skb, &keys))
+ return;
- ports.v32 = 0;
- poff = proto_ports_offset(ip_proto);
- if (poff >= 0) {
- nhoff += poff;
- if (pskb_may_pull(skb, nhoff + 4)) {
- ports.v32 = * (__force u32 *) (skb->data + nhoff);
- if (ports.v16[1] < ports.v16[0])
- swap(ports.v16[0], ports.v16[1]);
- skb->l4_rxhash = 1;
- }
+ if (keys.ports) {
+ if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
+ swap(keys.port16[0], keys.port16[1]);
+ skb->l4_rxhash = 1;
}
/* get a consistent hash (same value on both flow directions) */
- if (addr2 < addr1)
- swap(addr1, addr2);
+ if ((__force u32)keys.dst < (__force u32)keys.src)
+ swap(keys.dst, keys.src);
- hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ hash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports, hashrnd);
if (!hash)
hash = 1;
-done:
skb->rxhash = hash;
}
EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2718,6 +2652,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+struct jump_label_key rps_needed __read_mostly;
+
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2997,12 +2933,11 @@ int netif_rx(struct sk_buff *skb)
if (netpoll_rx(skb))
return NET_RX_DROP;
- if (netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb);
#ifdef CONFIG_RPS
- {
+ if (static_branch(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -3017,14 +2952,13 @@ int netif_rx(struct sk_buff *skb)
rcu_read_unlock();
preempt_enable();
- }
-#else
+ } else
+#endif
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu();
}
-#endif
return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -3230,8 +3164,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
int ret = NET_RX_DROP;
__be16 type;
- if (!netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
@@ -3362,14 +3295,13 @@ out:
*/
int netif_receive_skb(struct sk_buff *skb)
{
- if (netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(netdev_tstamp_prequeue, skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
#ifdef CONFIG_RPS
- {
+ if (static_branch(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu, ret;
@@ -3380,16 +3312,12 @@ int netif_receive_skb(struct sk_buff *skb)
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- } else {
- rcu_read_unlock();
- ret = __netif_receive_skb(skb);
+ return ret;
}
-
- return ret;
+ rcu_read_unlock();
}
-#else
- return __netif_receive_skb(skb);
#endif
+ return __netif_receive_skb(skb);
}
EXPORT_SYMBOL(netif_receive_skb);
@@ -4282,6 +4210,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
sizeof(struct dev_iter_state));
}
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops)
+{
+ return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
@@ -4532,7 +4466,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
static int __dev_set_promiscuity(struct net_device *dev, int inc)
{
- unsigned short old_flags = dev->flags;
+ unsigned int old_flags = dev->flags;
uid_t uid;
gid_t gid;
@@ -4589,7 +4523,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
*/
int dev_set_promiscuity(struct net_device *dev, int inc)
{
- unsigned short old_flags = dev->flags;
+ unsigned int old_flags = dev->flags;
int err;
err = __dev_set_promiscuity(dev, inc);
@@ -4616,7 +4550,7 @@ EXPORT_SYMBOL(dev_set_promiscuity);
int dev_set_allmulti(struct net_device *dev, int inc)
{
- unsigned short old_flags = dev->flags;
+ unsigned int old_flags = dev->flags;
ASSERT_RTNL();
@@ -4719,7 +4653,7 @@ EXPORT_SYMBOL(dev_get_flags);
int __dev_change_flags(struct net_device *dev, unsigned int flags)
{
- int old_flags = dev->flags;
+ unsigned int old_flags = dev->flags;
int ret;
ASSERT_RTNL();
@@ -4802,10 +4736,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
* Change settings on device based state flags. The flags are
* in the userspace exported format.
*/
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags)
{
- int ret, changes;
- int old_flags = dev->flags;
+ int ret;
+ unsigned int changes, old_flags = dev->flags;
ret = __dev_change_flags(dev, flags);
if (ret < 0)
@@ -5362,7 +5296,8 @@ static void rollback_registered(struct net_device *dev)
list_del(&single);
}
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
@@ -5371,12 +5306,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
- if ((features & NETIF_F_NO_CSUM) &&
- (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- netdev_warn(dev, "mixed no checksumming and other settings.\n");
- features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
- }
-
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
@@ -5424,7 +5353,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
int __netdev_update_features(struct net_device *dev)
{
- u32 features;
+ netdev_features_t features;
int err = 0;
ASSERT_RTNL();
@@ -5440,16 +5369,16 @@ int __netdev_update_features(struct net_device *dev)
if (dev->features == features)
return 0;
- netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
- dev->features, features);
+ netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+ &dev->features, &features);
if (dev->netdev_ops->ndo_set_features)
err = dev->netdev_ops->ndo_set_features(dev, features);
if (unlikely(err < 0)) {
netdev_err(dev,
- "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
- err, features, dev->features);
+ "set_features() failed (%d); wanted %pNF, left %pNF\n",
+ err, &features, &dev->features);
return -1;
}
@@ -5548,6 +5477,9 @@ static void netdev_init_one_queue(struct net_device *dev,
queue->xmit_lock_owner = -1;
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue->dev = dev;
+#ifdef CONFIG_BQL
+ dql_init(&queue->dql, HZ);
+#endif
}
static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -5633,11 +5565,12 @@ int register_netdevice(struct net_device *dev)
dev->wanted_features = dev->features & dev->hw_features;
/* Turn on no cache copy if HW is doing checksum */
- dev->hw_features |= NETIF_F_NOCACHE_COPY;
- if ((dev->features & NETIF_F_ALL_CSUM) &&
- !(dev->features & NETIF_F_NO_CSUM)) {
- dev->wanted_features |= NETIF_F_NOCACHE_COPY;
- dev->features |= NETIF_F_NOCACHE_COPY;
+ if (!(dev->flags & IFF_LOOPBACK)) {
+ dev->hw_features |= NETIF_F_NOCACHE_COPY;
+ if (dev->features & NETIF_F_ALL_CSUM) {
+ dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+ dev->features |= NETIF_F_NOCACHE_COPY;
+ }
}
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6373,7 +6306,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+ netdev_features_t one, netdev_features_t mask)
{
if (mask & NETIF_F_GEN_CSUM)
mask |= NETIF_F_ALL_CSUM;
@@ -6382,10 +6316,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
all &= one | ~NETIF_F_ALL_FOR_ALL;
- /* If device needs checksumming, downgrade to it. */
- if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
- all &= ~NETIF_F_NO_CSUM;
-
/* If one device supports hw checksumming, set for all. */
if (all & NETIF_F_GEN_CSUM)
all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 277faef9148..29c07fef922 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -427,7 +427,7 @@ EXPORT_SYMBOL(dev_uc_del);
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
+ * locked by netif_addr_lock_bh.
*
* This function is intended to be called from the dev->set_rx_mode
* function of layered software devices.
@@ -439,11 +439,11 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_bh(to);
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
+ netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_uc_sync);
@@ -463,7 +463,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
return;
netif_addr_lock_bh(from);
- netif_addr_lock(to);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
@@ -590,7 +590,7 @@ EXPORT_SYMBOL(dev_mc_del_global);
*
* Add newly added addresses to the destination device and release
* addresses that have no users left. The source device must be
- * locked by netif_tx_lock_bh.
+ * locked by netif_addr_lock_bh.
*
* This function is intended to be called from the ndo_set_rx_mode
* function of layered software devices.
@@ -602,11 +602,11 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return -EINVAL;
- netif_addr_lock_bh(to);
+ netif_addr_lock_nested(to);
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
+ netif_addr_unlock(to);
return err;
}
EXPORT_SYMBOL(dev_mc_sync);
@@ -626,7 +626,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
return;
netif_addr_lock_bh(from);
- netif_addr_lock(to);
+ netif_addr_lock_nested(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
+ return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
}
static const struct file_operations dev_mc_seq_fops = {
diff --git a/net/core/dst.c b/net/core/dst.c
index d5e2c4c0910..43d94cedbf7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -366,7 +366,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
dev_hold(dst->dev);
dev_put(dev);
rcu_read_lock();
- neigh = dst_get_neighbour(dst);
+ neigh = dst_get_neighbour_noref(dst);
if (neigh && neigh->dev == dev) {
neigh->dev = dst->dev;
dev_hold(dst->dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f4448170712..921aa2b4b41 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -36,235 +36,44 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-u32 ethtool_op_get_tx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
-
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_IP_CSUM;
- else
- dev->features &= ~NETIF_F_IP_CSUM;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_HW_CSUM;
- else
- dev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- else
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-
-u32 ethtool_op_get_sg(struct net_device *dev)
-{
- return (dev->features & NETIF_F_SG) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_sg);
-
-int ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_SG;
- else
- dev->features &= ~NETIF_F_SG;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_sg);
-
-u32 ethtool_op_get_tso(struct net_device *dev)
-{
- return (dev->features & NETIF_F_TSO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tso);
-
-int ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_TSO;
- else
- dev->features &= ~NETIF_F_TSO;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tso);
-
-u32 ethtool_op_get_ufo(struct net_device *dev)
-{
- return (dev->features & NETIF_F_UFO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-
-int ethtool_op_set_ufo(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_UFO;
- else
- dev->features &= ~NETIF_F_UFO;
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-
-/* the following list of flags are the same as their associated
- * NETIF_F_xxx values in include/linux/netdevice.h
- */
-static const u32 flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
- ETH_FLAG_RXHASH);
-
-u32 ethtool_op_get_flags(struct net_device *dev)
-{
- /* in the future, this function will probably contain additional
- * handling for flags which are not so easily handled
- * by a simple masking operation
- */
-
- return dev->features & flags_dup_features;
-}
-EXPORT_SYMBOL(ethtool_op_get_flags);
-
-/* Check if device can enable (or disable) particular feature coded in "data"
- * argument. Flags "supported" describe features that can be toggled by device.
- * If feature can not be toggled, it state (enabled or disabled) must match
- * hardcoded device features state, otherwise flags are marked as invalid.
- */
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
-{
- u32 features = dev->features & flags_dup_features;
- /* "data" can contain only flags_dup_features bits,
- * see __ethtool_set_flags */
-
- return (features & ~supported) != (data & ~supported);
-}
-EXPORT_SYMBOL(ethtool_invalid_flags);
-
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
- if (ethtool_invalid_flags(dev, data, supported))
- return -EINVAL;
-
- dev->features = ((dev->features & ~flags_dup_features) |
- (data & flags_dup_features));
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_flags);
-
/* Handlers for each ethtool command */
-#define ETHTOOL_DEV_FEATURE_WORDS 1
-
-static void ethtool_get_features_compat(struct net_device *dev,
- struct ethtool_get_features_block *features)
-{
- if (!dev->ethtool_ops)
- return;
-
- /* getting RX checksum */
- if (dev->ethtool_ops->get_rx_csum)
- if (dev->ethtool_ops->get_rx_csum(dev))
- features[0].active |= NETIF_F_RXCSUM;
-
- /* mark legacy-changeable features */
- if (dev->ethtool_ops->set_sg)
- features[0].available |= NETIF_F_SG;
- if (dev->ethtool_ops->set_tx_csum)
- features[0].available |= NETIF_F_ALL_CSUM;
- if (dev->ethtool_ops->set_tso)
- features[0].available |= NETIF_F_ALL_TSO;
- if (dev->ethtool_ops->set_rx_csum)
- features[0].available |= NETIF_F_RXCSUM;
- if (dev->ethtool_ops->set_flags)
- features[0].available |= flags_dup_features;
-}
-
-static int ethtool_set_feature_compat(struct net_device *dev,
- int (*legacy_set)(struct net_device *, u32),
- struct ethtool_set_features_block *features, u32 mask)
-{
- u32 do_set;
-
- if (!legacy_set)
- return 0;
-
- if (!(features[0].valid & mask))
- return 0;
-
- features[0].valid &= ~mask;
-
- do_set = !!(features[0].requested & mask);
-
- if (legacy_set(dev, do_set) < 0)
- netdev_info(dev,
- "Legacy feature change (%s) failed for 0x%08x\n",
- do_set ? "set" : "clear", mask);
-
- return 1;
-}
-
-static int ethtool_set_flags_compat(struct net_device *dev,
- int (*legacy_set)(struct net_device *, u32),
- struct ethtool_set_features_block *features, u32 mask)
-{
- u32 value;
-
- if (!legacy_set)
- return 0;
-
- if (!(features[0].valid & mask))
- return 0;
-
- value = dev->features & ~features[0].valid;
- value |= features[0].requested;
-
- features[0].valid &= ~mask;
-
- if (legacy_set(dev, value & mask) < 0)
- netdev_info(dev, "Legacy flags change failed\n");
-
- return 1;
-}
-
-static int ethtool_set_features_compat(struct net_device *dev,
- struct ethtool_set_features_block *features)
-{
- int compat;
-
- if (!dev->ethtool_ops)
- return 0;
-
- compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
- features, NETIF_F_SG);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
- features, NETIF_F_ALL_CSUM);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
- features, NETIF_F_ALL_TSO);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
- features, NETIF_F_RXCSUM);
- compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
- features, flags_dup_features);
-
- return compat;
-}
+#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
+
+static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
+ [NETIF_F_SG_BIT] = "tx-scatter-gather",
+ [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4",
+ [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic",
+ [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
+ [NETIF_F_HIGHDMA_BIT] = "highdma",
+ [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
+ [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert",
+
+ [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse",
+ [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter",
+ [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
+ [NETIF_F_GSO_BIT] = "tx-generic-segmentation",
+ [NETIF_F_LLTX_BIT] = "tx-lockless",
+ [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
+ [NETIF_F_GRO_BIT] = "rx-gro",
+ [NETIF_F_LRO_BIT] = "rx-lro",
+
+ [NETIF_F_TSO_BIT] = "tx-tcp-segmentation",
+ [NETIF_F_UFO_BIT] = "tx-udp-fragmentation",
+ [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust",
+ [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
+ [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
+ [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
+
+ [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
+ [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
+ [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
+ [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
+ [NETIF_F_RXHASH_BIT] = "rx-hashing",
+ [NETIF_F_RXCSUM_BIT] = "rx-checksum",
+ [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
+ [NETIF_F_LOOPBACK_BIT] = "loopback",
+};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
@@ -272,18 +81,21 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
.cmd = ETHTOOL_GFEATURES,
.size = ETHTOOL_DEV_FEATURE_WORDS,
};
- struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
- {
- .available = dev->hw_features,
- .requested = dev->wanted_features,
- .active = dev->features,
- .never_changed = NETIF_F_NEVER_CHANGE,
- },
- };
+ struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
u32 __user *sizeaddr;
u32 copy_size;
+ int i;
- ethtool_get_features_compat(dev, features);
+ /* in case feature bits run out again */
+ BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
+
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+ features[i].available = (u32)(dev->hw_features >> (32 * i));
+ features[i].requested = (u32)(dev->wanted_features >> (32 * i));
+ features[i].active = (u32)(dev->features >> (32 * i));
+ features[i].never_changed =
+ (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
+ }
sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
if (get_user(copy_size, sizeaddr))
@@ -305,7 +117,8 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_sfeatures cmd;
struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
- int ret = 0;
+ netdev_features_t wanted = 0, valid = 0;
+ int i, ret = 0;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
@@ -317,65 +130,29 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
if (copy_from_user(features, useraddr, sizeof(features)))
return -EFAULT;
- if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
- return -EINVAL;
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+ valid |= (netdev_features_t)features[i].valid << (32 * i);
+ wanted |= (netdev_features_t)features[i].requested << (32 * i);
+ }
- if (ethtool_set_features_compat(dev, features))
- ret |= ETHTOOL_F_COMPAT;
+ if (valid & ~NETIF_F_ETHTOOL_BITS)
+ return -EINVAL;
- if (features[0].valid & ~dev->hw_features) {
- features[0].valid &= dev->hw_features;
+ if (valid & ~dev->hw_features) {
+ valid &= dev->hw_features;
ret |= ETHTOOL_F_UNSUPPORTED;
}
- dev->wanted_features &= ~features[0].valid;
- dev->wanted_features |= features[0].valid & features[0].requested;
+ dev->wanted_features &= ~valid;
+ dev->wanted_features |= wanted & valid;
__netdev_update_features(dev);
- if ((dev->wanted_features ^ dev->features) & features[0].valid)
+ if ((dev->wanted_features ^ dev->features) & valid)
ret |= ETHTOOL_F_WISH;
return ret;
}
-static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
- /* NETIF_F_SG */ "tx-scatter-gather",
- /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
- /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
- /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
- /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
- /* NETIF_F_HIGHDMA */ "highdma",
- /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
- /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
-
- /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse",
- /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter",
- /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
- /* NETIF_F_GSO */ "tx-generic-segmentation",
- /* NETIF_F_LLTX */ "tx-lockless",
- /* NETIF_F_NETNS_LOCAL */ "netns-local",
- /* NETIF_F_GRO */ "rx-gro",
- /* NETIF_F_LRO */ "rx-lro",
-
- /* NETIF_F_TSO */ "tx-tcp-segmentation",
- /* NETIF_F_UFO */ "tx-udp-fragmentation",
- /* NETIF_F_GSO_ROBUST */ "tx-gso-robust",
- /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation",
- /* NETIF_F_TSO6 */ "tx-tcp6-segmentation",
- /* NETIF_F_FSO */ "tx-fcoe-segmentation",
- "",
- "",
-
- /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc",
- /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp",
- /* NETIF_F_FCOE_MTU */ "fcoe-mtu",
- /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
- /* NETIF_F_RXHASH */ "rx-hashing",
- /* NETIF_F_RXCSUM */ "rx-checksum",
- /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
- /* NETIF_F_LOOPBACK */ "loopback",
-};
-
static int __ethtool_get_sset_count(struct net_device *dev, int sset)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -402,7 +179,7 @@ static void __ethtool_get_strings(struct net_device *dev,
ops->get_strings(dev, stringset, data);
}
-static u32 ethtool_get_feature_mask(u32 eth_cmd)
+static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
{
/* feature masks of legacy discrete ethtool ops */
@@ -433,136 +210,82 @@ static u32 ethtool_get_feature_mask(u32 eth_cmd)
}
}
-static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
-{
- const struct ethtool_ops *ops = dev->ethtool_ops;
-
- if (!ops)
- return NULL;
-
- switch (ethcmd) {
- case ETHTOOL_GTXCSUM:
- return ops->get_tx_csum;
- case ETHTOOL_GRXCSUM:
- return ops->get_rx_csum;
- case ETHTOOL_SSG:
- return ops->get_sg;
- case ETHTOOL_STSO:
- return ops->get_tso;
- case ETHTOOL_SUFO:
- return ops->get_ufo;
- default:
- return NULL;
- }
-}
-
-static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
-{
- return !!(dev->features & NETIF_F_ALL_CSUM);
-}
-
static int ethtool_get_one_feature(struct net_device *dev,
char __user *useraddr, u32 ethcmd)
{
- u32 mask = ethtool_get_feature_mask(ethcmd);
+ netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
struct ethtool_value edata = {
.cmd = ethcmd,
.data = !!(dev->features & mask),
};
- /* compatibility with discrete get_ ops */
- if (!(dev->hw_features & mask)) {
- u32 (*actor)(struct net_device *);
-
- actor = __ethtool_get_one_feature_actor(dev, ethcmd);
-
- /* bug compatibility with old get_rx_csum */
- if (ethcmd == ETHTOOL_GRXCSUM && !actor)
- actor = __ethtool_get_rx_csum_oldbug;
-
- if (actor)
- edata.data = actor(dev);
- }
-
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
}
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_sg(struct net_device *dev, u32 data);
-static int __ethtool_set_tso(struct net_device *dev, u32 data);
-static int __ethtool_set_ufo(struct net_device *dev, u32 data);
-
static int ethtool_set_one_feature(struct net_device *dev,
void __user *useraddr, u32 ethcmd)
{
struct ethtool_value edata;
- u32 mask;
+ netdev_features_t mask;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
mask = ethtool_get_feature_mask(ethcmd);
mask &= dev->hw_features;
- if (mask) {
- if (edata.data)
- dev->wanted_features |= mask;
- else
- dev->wanted_features &= ~mask;
+ if (!mask)
+ return -EOPNOTSUPP;
- __netdev_update_features(dev);
- return 0;
- }
+ if (edata.data)
+ dev->wanted_features |= mask;
+ else
+ dev->wanted_features &= ~mask;
- /* Driver is not converted to ndo_fix_features or does not
- * support changing this offload. In the latter case it won't
- * have corresponding ethtool_ops field set.
- *
- * Following part is to be removed after all drivers advertise
- * their changeable features in netdev->hw_features and stop
- * using discrete offload setting ops.
- */
+ __netdev_update_features(dev);
- switch (ethcmd) {
- case ETHTOOL_STXCSUM:
- return __ethtool_set_tx_csum(dev, edata.data);
- case ETHTOOL_SRXCSUM:
- return __ethtool_set_rx_csum(dev, edata.data);
- case ETHTOOL_SSG:
- return __ethtool_set_sg(dev, edata.data);
- case ETHTOOL_STSO:
- return __ethtool_set_tso(dev, edata.data);
- case ETHTOOL_SUFO:
- return __ethtool_set_ufo(dev, edata.data);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
+}
+
+#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
+ ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
+#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
+ NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
+
+static u32 __ethtool_get_flags(struct net_device *dev)
+{
+ u32 flags = 0;
+
+ if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
+ if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN;
+ if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN;
+ if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
+ if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
+
+ return flags;
}
-int __ethtool_set_flags(struct net_device *dev, u32 data)
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
{
- u32 changed;
+ netdev_features_t features = 0, changed;
- if (data & ~flags_dup_features)
+ if (data & ~ETH_ALL_FLAGS)
return -EINVAL;
- /* legacy set_flags() op */
- if (dev->ethtool_ops->set_flags) {
- if (unlikely(dev->hw_features & flags_dup_features))
- netdev_warn(dev,
- "driver BUG: mixed hw_features and set_flags()\n");
- return dev->ethtool_ops->set_flags(dev, data);
- }
+ if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
+ if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX;
+ if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX;
+ if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
+ if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;
/* allow changing only bits set in hw_features */
- changed = (data ^ dev->features) & flags_dup_features;
+ changed = (features ^ dev->features) & ETH_ALL_FEATURES;
if (changed & ~dev->hw_features)
return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
dev->wanted_features =
- (dev->wanted_features & ~changed) | (data & dev->hw_features);
+ (dev->wanted_features & ~changed) | (features & changed);
__netdev_update_features(dev);
@@ -716,6 +439,7 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
{
struct ethtool_rxnfc info;
size_t info_size = sizeof(info);
+ int rc;
if (!dev->ethtool_ops->set_rxnfc)
return -EOPNOTSUPP;
@@ -731,7 +455,15 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
- return dev->ethtool_ops->set_rxnfc(dev, &info);
+ rc = dev->ethtool_ops->set_rxnfc(dev, &info);
+ if (rc)
+ return rc;
+
+ if (cmd == ETHTOOL_SRXCLSRLINS &&
+ copy_to_user(useraddr, &info, info_size))
+ return -EFAULT;
+
+ return 0;
}
static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
@@ -792,34 +524,44 @@ err_out:
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_rxfh_indir *indir;
- u32 table_size;
- size_t full_size;
+ u32 user_size, dev_size;
+ u32 *indir;
int ret;
- if (!dev->ethtool_ops->get_rxfh_indir)
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->get_rxfh_indir)
+ return -EOPNOTSUPP;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
return -EOPNOTSUPP;
- if (copy_from_user(&table_size,
+ if (copy_from_user(&user_size,
useraddr + offsetof(struct ethtool_rxfh_indir, size),
- sizeof(table_size)))
+ sizeof(user_size)))
return -EFAULT;
- if (table_size >
- (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
- return -ENOMEM;
- full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
- indir = kzalloc(full_size, GFP_USER);
+ if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
+ &dev_size, sizeof(dev_size)))
+ return -EFAULT;
+
+ /* If the user buffer size is 0, this is just a query for the
+ * device table size. Otherwise, if it's smaller than the
+ * device table size it's an error.
+ */
+ if (user_size < dev_size)
+ return user_size == 0 ? 0 : -EINVAL;
+
+ indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
if (!indir)
return -ENOMEM;
- indir->cmd = ETHTOOL_GRXFHINDIR;
- indir->size = table_size;
ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
if (ret)
goto out;
- if (copy_to_user(useraddr, indir, full_size))
+ if (copy_to_user(useraddr +
+ offsetof(struct ethtool_rxfh_indir, ring_index[0]),
+ indir, dev_size * sizeof(indir[0])))
ret = -EFAULT;
out:
@@ -830,30 +572,56 @@ out:
static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_rxfh_indir *indir;
- u32 table_size;
- size_t full_size;
+ struct ethtool_rxnfc rx_rings;
+ u32 user_size, dev_size, i;
+ u32 *indir;
int ret;
- if (!dev->ethtool_ops->set_rxfh_indir)
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->set_rxfh_indir ||
+ !dev->ethtool_ops->get_rxnfc)
+ return -EOPNOTSUPP;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
return -EOPNOTSUPP;
- if (copy_from_user(&table_size,
+ if (copy_from_user(&user_size,
useraddr + offsetof(struct ethtool_rxfh_indir, size),
- sizeof(table_size)))
+ sizeof(user_size)))
return -EFAULT;
- if (table_size >
- (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
- return -ENOMEM;
- full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
- indir = kmalloc(full_size, GFP_USER);
+ if (user_size != 0 && user_size != dev_size)
+ return -EINVAL;
+
+ indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
if (!indir)
return -ENOMEM;
- if (copy_from_user(indir, useraddr, full_size)) {
- ret = -EFAULT;
+ rx_rings.cmd = ETHTOOL_GRXRINGS;
+ ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL);
+ if (ret)
goto out;
+
+ if (user_size == 0) {
+ for (i = 0; i < dev_size; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+ } else {
+ if (copy_from_user(indir,
+ useraddr +
+ offsetof(struct ethtool_rxfh_indir,
+ ring_index[0]),
+ dev_size * sizeof(indir[0]))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Validate ring indices */
+ for (i = 0; i < dev_size; i++) {
+ if (indir[i] >= rx_rings.data) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
}
ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
@@ -863,58 +631,6 @@ out:
return ret;
}
-/*
- * ethtool does not (or did not) set masks for flow parameters that are
- * not specified, so if both value and mask are 0 then this must be
- * treated as equivalent to a mask with all bits set. Implement that
- * here rather than in drivers.
- */
-static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
-{
- struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
- struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
-
- if (fs->flow_type != TCP_V4_FLOW &&
- fs->flow_type != UDP_V4_FLOW &&
- fs->flow_type != SCTP_V4_FLOW)
- return;
-
- if (!(entry->ip4src | mask->ip4src))
- mask->ip4src = htonl(0xffffffff);
- if (!(entry->ip4dst | mask->ip4dst))
- mask->ip4dst = htonl(0xffffffff);
- if (!(entry->psrc | mask->psrc))
- mask->psrc = htons(0xffff);
- if (!(entry->pdst | mask->pdst))
- mask->pdst = htons(0xffff);
- if (!(entry->tos | mask->tos))
- mask->tos = 0xff;
- if (!(fs->vlan_tag | fs->vlan_tag_mask))
- fs->vlan_tag_mask = 0xffff;
- if (!(fs->data | fs->data_mask))
- fs->data_mask = 0xffffffffffffffffULL;
-}
-
-static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
- void __user *useraddr)
-{
- struct ethtool_rx_ntuple cmd;
- const struct ethtool_ops *ops = dev->ethtool_ops;
-
- if (!ops->set_rx_ntuple)
- return -EOPNOTSUPP;
-
- if (!(dev->features & NETIF_F_NTUPLE))
- return -EINVAL;
-
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
- return -EFAULT;
-
- rx_ntuple_fix_masks(&cmd.fs);
-
- return ops->set_rx_ntuple(dev, &cmd);
-}
-
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
@@ -1231,81 +947,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
}
-static int __ethtool_set_sg(struct net_device *dev, u32 data)
-{
- int err;
-
- if (!dev->ethtool_ops->set_sg)
- return -EOPNOTSUPP;
-
- if (data && !(dev->features & NETIF_F_ALL_CSUM))
- return -EINVAL;
-
- if (!data && dev->ethtool_ops->set_tso) {
- err = dev->ethtool_ops->set_tso(dev, 0);
- if (err)
- return err;
- }
-
- if (!data && dev->ethtool_ops->set_ufo) {
- err = dev->ethtool_ops->set_ufo(dev, 0);
- if (err)
- return err;
- }
- return dev->ethtool_ops->set_sg(dev, data);
-}
-
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
-{
- int err;
-
- if (!dev->ethtool_ops->set_tx_csum)
- return -EOPNOTSUPP;
-
- if (!data && dev->ethtool_ops->set_sg) {
- err = __ethtool_set_sg(dev, 0);
- if (err)
- return err;
- }
-
- return dev->ethtool_ops->set_tx_csum(dev, data);
-}
-
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_rx_csum)
- return -EOPNOTSUPP;
-
- if (!data)
- dev->features &= ~NETIF_F_GRO;
-
- return dev->ethtool_ops->set_rx_csum(dev, data);
-}
-
-static int __ethtool_set_tso(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_tso)
- return -EOPNOTSUPP;
-
- if (data && !(dev->features & NETIF_F_SG))
- return -EINVAL;
-
- return dev->ethtool_ops->set_tso(dev, data);
-}
-
-static int __ethtool_set_ufo(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_ufo)
- return -EOPNOTSUPP;
- if (data && !(dev->features & NETIF_F_SG))
- return -EINVAL;
- if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
- == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
- return -EINVAL;
- return dev->ethtool_ops->set_ufo(dev, data);
-}
-
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
{
struct ethtool_test test;
@@ -1771,9 +1412,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
break;
case ETHTOOL_GFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_flags ?
- dev->ethtool_ops->get_flags :
- ethtool_op_get_flags));
+ __ethtool_get_flags);
break;
case ETHTOOL_SFLAGS:
rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
@@ -1804,9 +1443,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_RESET:
rc = ethtool_reset(dev, useraddr);
break;
- case ETHTOOL_SRXNTUPLE:
- rc = ethtool_set_rx_ntuple(dev, useraddr);
- break;
case ETHTOOL_GSSET_INFO:
rc = ethtool_get_sset_info(dev, useraddr);
break;
diff --git a/net/core/flow.c b/net/core/flow.c
index 8ae42de9c79..e318c7e9804 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
put_online_cpus();
}
+static void flow_cache_flush_task(struct work_struct *work)
+{
+ flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+ schedule_work(&flow_cache_flush_work);
+}
+
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
new file mode 100644
index 00000000000..0985b9b14b8
--- /dev/null
+++ b/net/core/flow_dissector.c
@@ -0,0 +1,143 @@
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+/* copy saddr & daddr, possibly using 64bit load/store
+ * Equivalent to : flow->src = iph->saddr;
+ * flow->dst = iph->daddr;
+ */
+static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
+ offsetof(typeof(*flow), src) + sizeof(flow->src));
+ memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+}
+
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+ int poff, nhoff = skb_network_offset(skb);
+ u8 ip_proto;
+ __be16 proto = skb->protocol;
+
+ memset(flow, 0, sizeof(*flow));
+
+again:
+ switch (proto) {
+ case __constant_htons(ETH_P_IP): {
+ const struct iphdr *iph;
+ struct iphdr _iph;
+ip:
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return false;
+
+ if (ip_is_fragment(iph))
+ ip_proto = 0;
+ else
+ ip_proto = iph->protocol;
+ iph_to_flow_copy_addrs(flow, iph);
+ nhoff += iph->ihl * 4;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6): {
+ const struct ipv6hdr *iph;
+ struct ipv6hdr _iph;
+ipv6:
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return false;
+
+ ip_proto = iph->nexthdr;
+ flow->src = iph->saddr.s6_addr32[3];
+ flow->dst = iph->daddr.s6_addr32[3];
+ nhoff += sizeof(struct ipv6hdr);
+ break;
+ }
+ case __constant_htons(ETH_P_8021Q): {
+ const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+
+ vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+ if (!vlan)
+ return false;
+
+ proto = vlan->h_vlan_encapsulated_proto;
+ nhoff += sizeof(*vlan);
+ goto again;
+ }
+ case __constant_htons(ETH_P_PPP_SES): {
+ struct {
+ struct pppoe_hdr hdr;
+ __be16 proto;
+ } *hdr, _hdr;
+ hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ if (!hdr)
+ return false;
+ proto = hdr->proto;
+ nhoff += PPPOE_SES_HLEN;
+ switch (proto) {
+ case __constant_htons(PPP_IP):
+ goto ip;
+ case __constant_htons(PPP_IPV6):
+ goto ipv6;
+ default:
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ } *hdr, _hdr;
+
+ hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ if (!hdr)
+ return false;
+ /*
+ * Only look inside GRE if version zero and no
+ * routing
+ */
+ if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
+ proto = hdr->proto;
+ nhoff += 4;
+ if (hdr->flags & GRE_CSUM)
+ nhoff += 4;
+ if (hdr->flags & GRE_KEY)
+ nhoff += 4;
+ if (hdr->flags & GRE_SEQ)
+ nhoff += 4;
+ goto again;
+ }
+ break;
+ }
+ case IPPROTO_IPIP:
+ goto again;
+ default:
+ break;
+ }
+
+ flow->ip_proto = ip_proto;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ __be32 *ports, _ports;
+
+ nhoff += poff;
+ ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+ if (ports)
+ flow->ports = *ports;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(skb_flow_dissect);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 039d51e6c28..e287346e093 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
it to safe state.
*/
skb_queue_purge(&n->arp_queue);
+ n->arp_queue_len_bytes = 0;
n->output = neigh_blackhole;
if (n->nud_state & NUD_VALID)
n->nud_state = NUD_NOARP;
@@ -272,7 +273,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
}
EXPORT_SYMBOL(neigh_ifdown);
-static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
@@ -287,7 +288,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
goto out_entries;
}
- n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+ if (tbl->entry_size)
+ n = kzalloc(tbl->entry_size, GFP_ATOMIC);
+ else {
+ int sz = sizeof(*n) + tbl->key_len;
+
+ sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
+ sz += dev->neigh_priv_len;
+ n = kzalloc(sz, GFP_ATOMIC);
+ }
if (!n)
goto out_entries;
@@ -313,11 +322,18 @@ out_entries:
goto out;
}
+static void neigh_get_hash_rnd(u32 *x)
+{
+ get_random_bytes(x, sizeof(*x));
+ *x |= 1;
+}
+
static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
{
size_t size = (1 << shift) * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
struct neighbour __rcu **buckets;
+ int i;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
@@ -334,8 +350,8 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
}
ret->hash_buckets = buckets;
ret->hash_shift = shift;
- get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
- ret->hash_rnd |= 1;
+ for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
+ neigh_get_hash_rnd(&ret->hash_rnd[i]);
return ret;
}
@@ -462,7 +478,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
u32 hash_val;
int key_len = tbl->key_len;
int error;
- struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+ struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
struct neigh_hash_table *nht;
if (!n) {
@@ -480,6 +496,14 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
goto out_neigh_release;
}
+ if (dev->netdev_ops->ndo_neigh_construct) {
+ error = dev->netdev_ops->ndo_neigh_construct(n);
+ if (error < 0) {
+ rc = ERR_PTR(error);
+ goto out_neigh_release;
+ }
+ }
+
/* Device specific setup. */
if (n->parms->neigh_setup &&
(error = n->parms->neigh_setup(n)) < 0) {
@@ -677,18 +701,14 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
neigh_parms_destroy(parms);
}
-static void neigh_destroy_rcu(struct rcu_head *head)
-{
- struct neighbour *neigh = container_of(head, struct neighbour, rcu);
-
- kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
-}
/*
* neighbour must already be out of the table;
*
*/
void neigh_destroy(struct neighbour *neigh)
{
+ struct net_device *dev = neigh->dev;
+
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
@@ -702,14 +722,18 @@ void neigh_destroy(struct neighbour *neigh)
printk(KERN_WARNING "Impossible event.\n");
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
- dev_put(neigh->dev);
+ if (dev->netdev_ops->ndo_neigh_destroy)
+ dev->netdev_ops->ndo_neigh_destroy(neigh);
+
+ dev_put(dev);
neigh_parms_put(neigh->parms);
NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
atomic_dec(&neigh->tbl->entries);
- call_rcu(&neigh->rcu, neigh_destroy_rcu);
+ kfree_rcu(neigh, rcu);
}
EXPORT_SYMBOL(neigh_destroy);
@@ -842,6 +866,7 @@ static void neigh_invalidate(struct neighbour *neigh)
write_lock(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
}
static void neigh_probe(struct neighbour *neigh)
@@ -980,15 +1005,20 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
if (neigh->nud_state == NUD_INCOMPLETE) {
if (skb) {
- if (skb_queue_len(&neigh->arp_queue) >=
- neigh->parms->queue_len) {
+ while (neigh->arp_queue_len_bytes + skb->truesize >
+ neigh->parms->queue_len_bytes) {
struct sk_buff *buff;
+
buff = __skb_dequeue(&neigh->arp_queue);
+ if (!buff)
+ break;
+ neigh->arp_queue_len_bytes -= buff->truesize;
kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
}
skb_dst_force(skb);
__skb_queue_tail(&neigh->arp_queue, skb);
+ neigh->arp_queue_len_bytes += skb->truesize;
}
rc = 1;
}
@@ -1167,7 +1197,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
rcu_read_lock();
/* On shaper/eql skb->dst->neighbour != neigh :( */
- if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+ if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
n1 = n2;
n1->output(n1, skb);
rcu_read_unlock();
@@ -1175,6 +1205,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
}
out:
if (update_isrouter) {
@@ -1477,11 +1508,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
tbl->parms.reachable_time =
neigh_rand_reach_time(tbl->parms.base_reachable_time);
- if (!tbl->kmem_cachep)
- tbl->kmem_cachep =
- kmem_cache_create(tbl->id, tbl->entry_size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
panic("cannot create neighbour cache statistics");
@@ -1566,9 +1592,6 @@ int neigh_table_clear(struct neigh_table *tbl)
free_percpu(tbl->stats);
tbl->stats = NULL;
- kmem_cache_destroy(tbl->kmem_cachep);
- tbl->kmem_cachep = NULL;
-
return 0;
}
EXPORT_SYMBOL(neigh_table_clear);
@@ -1747,7 +1770,11 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
- NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
+ /* approximative value for deprecated QUEUE_LEN (in packets) */
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
+ DIV_ROUND_UP(parms->queue_len_bytes,
+ SKB_TRUESIZE(ETH_FRAME_LEN)));
NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
@@ -1808,7 +1835,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
- ndc.ndtc_hash_rnd = nht->hash_rnd;
+ ndc.ndtc_hash_rnd = nht->hash_rnd[0];
ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
rcu_read_unlock_bh();
@@ -1974,7 +2001,11 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
switch (i) {
case NDTPA_QUEUE_LEN:
- p->queue_len = nla_get_u32(tbp[i]);
+ p->queue_len_bytes = nla_get_u32(tbp[i]) *
+ SKB_TRUESIZE(ETH_FRAME_LEN);
+ break;
+ case NDTPA_QUEUE_LENBYTES:
+ p->queue_len_bytes = nla_get_u32(tbp[i]);
break;
case NDTPA_PROXY_QLEN:
p->proxy_qlen = nla_get_u32(tbp[i]);
@@ -2397,7 +2428,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
- pn = pn->next;
+ do {
+ pn = pn->next;
+ } while (pn && !net_eq(pneigh_net(pn), net));
+
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
@@ -2635,117 +2669,158 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
-#define NEIGH_VARS_MAX 19
+static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int size, ret;
+ ctl_table tmp = *ctl;
+
+ tmp.data = &size;
+ size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
+ ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret)
+ *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
+ return ret;
+}
+
+enum {
+ NEIGH_VAR_MCAST_PROBE,
+ NEIGH_VAR_UCAST_PROBE,
+ NEIGH_VAR_APP_PROBE,
+ NEIGH_VAR_RETRANS_TIME,
+ NEIGH_VAR_BASE_REACHABLE_TIME,
+ NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_GC_STALETIME,
+ NEIGH_VAR_QUEUE_LEN,
+ NEIGH_VAR_QUEUE_LEN_BYTES,
+ NEIGH_VAR_PROXY_QLEN,
+ NEIGH_VAR_ANYCAST_DELAY,
+ NEIGH_VAR_PROXY_DELAY,
+ NEIGH_VAR_LOCKTIME,
+ NEIGH_VAR_RETRANS_TIME_MS,
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS,
+ NEIGH_VAR_GC_INTERVAL,
+ NEIGH_VAR_GC_THRESH1,
+ NEIGH_VAR_GC_THRESH2,
+ NEIGH_VAR_GC_THRESH3,
+ NEIGH_VAR_MAX
+};
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table neigh_vars[NEIGH_VARS_MAX];
+ struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
char *dev_name;
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
- {
+ [NEIGH_VAR_MCAST_PROBE] = {
.procname = "mcast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_UCAST_PROBE] = {
.procname = "ucast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_APP_PROBE] = {
.procname = "app_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_RETRANS_TIME] = {
.procname = "retrans_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_BASE_REACHABLE_TIME] = {
.procname = "base_reachable_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_DELAY_PROBE_TIME] = {
.procname = "delay_first_probe_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_GC_STALETIME] = {
.procname = "gc_stale_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_QUEUE_LEN] = {
.procname = "unres_qlen",
.maxlen = sizeof(int),
.mode = 0644,
+ .proc_handler = proc_unres_qlen,
+ },
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = {
+ .procname = "unres_qlen_bytes",
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_PROXY_QLEN] = {
.procname = "proxy_qlen",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_ANYCAST_DELAY] = {
.procname = "anycast_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_PROXY_DELAY] = {
.procname = "proxy_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_LOCKTIME] = {
.procname = "locktime",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_RETRANS_TIME_MS] = {
.procname = "retrans_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
- {
+ [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
.procname = "base_reachable_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
- {
+ [NEIGH_VAR_GC_INTERVAL] = {
.procname = "gc_interval",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_GC_THRESH1] = {
.procname = "gc_thresh1",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_GC_THRESH2] = {
.procname = "gc_thresh2",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_GC_THRESH3] = {
.procname = "gc_thresh3",
.maxlen = sizeof(int),
.mode = 0644,
@@ -2778,47 +2853,49 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
if (!t)
goto err;
- t->neigh_vars[0].data = &p->mcast_probes;
- t->neigh_vars[1].data = &p->ucast_probes;
- t->neigh_vars[2].data = &p->app_probes;
- t->neigh_vars[3].data = &p->retrans_time;
- t->neigh_vars[4].data = &p->base_reachable_time;
- t->neigh_vars[5].data = &p->delay_probe_time;
- t->neigh_vars[6].data = &p->gc_staletime;
- t->neigh_vars[7].data = &p->queue_len;
- t->neigh_vars[8].data = &p->proxy_qlen;
- t->neigh_vars[9].data = &p->anycast_delay;
- t->neigh_vars[10].data = &p->proxy_delay;
- t->neigh_vars[11].data = &p->locktime;
- t->neigh_vars[12].data = &p->retrans_time;
- t->neigh_vars[13].data = &p->base_reachable_time;
+ t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
+ t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
+ t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
+ t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
+ t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
+ t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
+ t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
+ t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
+ t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
+ t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
+ t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
if (dev) {
dev_name_source = dev->name;
/* Terminate the table early */
- memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+ memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
+ sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
- t->neigh_vars[14].data = (int *)(p + 1);
- t->neigh_vars[15].data = (int *)(p + 1) + 1;
- t->neigh_vars[16].data = (int *)(p + 1) + 2;
- t->neigh_vars[17].data = (int *)(p + 1) + 3;
+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
}
if (handler) {
/* RetransTime */
- t->neigh_vars[3].proc_handler = handler;
- t->neigh_vars[3].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
/* ReachableTime */
- t->neigh_vars[4].proc_handler = handler;
- t->neigh_vars[4].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
/* RetransTime (in milliseconds)*/
- t->neigh_vars[12].proc_handler = handler;
- t->neigh_vars[12].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
/* ReachableTime (in milliseconds) */
- t->neigh_vars[13].proc_handler = handler;
- t->neigh_vars[13].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
}
t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434a4c0..abf4393a77b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -21,6 +21,7 @@
#include <linux/wireless.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/jiffies.h>
#include <net/wext.h>
#include "net-sysfs.h"
@@ -606,9 +607,12 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
- if (old_map)
+ if (map)
+ jump_label_inc(&rps_needed);
+ if (old_map) {
kfree_rcu(old_map, rcu);
-
+ jump_label_dec(&rps_needed);
+ }
free_cpumask_var(mask);
return len;
}
@@ -618,15 +622,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
char *buf)
{
struct rps_dev_flow_table *flow_table;
- unsigned int val = 0;
+ unsigned long val = 0;
rcu_read_lock();
flow_table = rcu_dereference(queue->rps_flow_table);
if (flow_table)
- val = flow_table->mask + 1;
+ val = (unsigned long)flow_table->mask + 1;
rcu_read_unlock();
- return sprintf(buf, "%u\n", val);
+ return sprintf(buf, "%lu\n", val);
}
static void rps_dev_flow_table_release_work(struct work_struct *work)
@@ -650,33 +654,46 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
struct rx_queue_attribute *attr,
const char *buf, size_t len)
{
- unsigned int count;
- char *endp;
+ unsigned long mask, count;
struct rps_dev_flow_table *table, *old_table;
static DEFINE_SPINLOCK(rps_dev_flow_lock);
+ int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- count = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &count);
+ if (rc < 0)
+ return rc;
if (count) {
- int i;
-
- if (count > 1<<30) {
+ mask = count - 1;
+ /* mask = roundup_pow_of_two(count) - 1;
+ * without overflows...
+ */
+ while ((mask | (mask >> 1)) != mask)
+ mask |= (mask >> 1);
+ /* On 64 bit arches, must check mask fits in table->mask (u32),
+ * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
+ * doesnt overflow.
+ */
+#if BITS_PER_LONG > 32
+ if (mask > (unsigned long)(u32)mask)
+ return -EINVAL;
+#else
+ if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
+ / sizeof(struct rps_dev_flow)) {
/* Enforce a limit to prevent overflow */
return -EINVAL;
}
- count = roundup_pow_of_two(count);
- table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
+#endif
+ table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
if (!table)
return -ENOMEM;
- table->mask = count - 1;
- for (i = 0; i < count; i++)
- table->flows[i].cpu = RPS_NO_CPU;
+ table->mask = mask;
+ for (count = 0; count <= mask; count++)
+ table->flows[count].cpu = RPS_NO_CPU;
} else
table = NULL;
@@ -780,7 +797,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
#endif
}
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
/*
* netdev_queue sysfs structures and functions.
*/
@@ -826,6 +843,133 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
.store = netdev_queue_attr_store,
};
+static ssize_t show_trans_timeout(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ char *buf)
+{
+ unsigned long trans_timeout;
+
+ spin_lock_irq(&queue->_xmit_lock);
+ trans_timeout = queue->trans_timeout;
+ spin_unlock_irq(&queue->_xmit_lock);
+
+ return sprintf(buf, "%lu", trans_timeout);
+}
+
+static struct netdev_queue_attribute queue_trans_timeout =
+ __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
+#ifdef CONFIG_BQL
+/*
+ * Byte queue limits sysfs structures and functions.
+ */
+static ssize_t bql_show(char *buf, unsigned int value)
+{
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t bql_set(const char *buf, const size_t count,
+ unsigned int *pvalue)
+{
+ unsigned int value;
+ int err;
+
+ if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
+ value = DQL_MAX_LIMIT;
+ else {
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+ if (value > DQL_MAX_LIMIT)
+ return -EINVAL;
+ }
+
+ *pvalue = value;
+
+ return count;
+}
+
+static ssize_t bql_show_hold_time(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr,
+ char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
+}
+
+static ssize_t bql_set_hold_time(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ const char *buf, size_t len)
+{
+ struct dql *dql = &queue->dql;
+ unsigned value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ dql->slack_hold_time = msecs_to_jiffies(value);
+
+ return len;
+}
+
+static struct netdev_queue_attribute bql_hold_time_attribute =
+ __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
+ bql_set_hold_time);
+
+static ssize_t bql_show_inflight(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attr,
+ char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
+}
+
+static struct netdev_queue_attribute bql_inflight_attribute =
+ __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+
+#define BQL_ATTR(NAME, FIELD) \
+static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
+ struct netdev_queue_attribute *attr, \
+ char *buf) \
+{ \
+ return bql_show(buf, queue->dql.FIELD); \
+} \
+ \
+static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
+ struct netdev_queue_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return bql_set(buf, len, &queue->dql.FIELD); \
+} \
+ \
+static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
+ __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
+ bql_set_ ## NAME);
+
+BQL_ATTR(limit, limit)
+BQL_ATTR(limit_max, max_limit)
+BQL_ATTR(limit_min, min_limit)
+
+static struct attribute *dql_attrs[] = {
+ &bql_limit_attribute.attr,
+ &bql_limit_max_attribute.attr,
+ &bql_limit_min_attribute.attr,
+ &bql_hold_time_attribute.attr,
+ &bql_inflight_attribute.attr,
+ NULL
+};
+
+static struct attribute_group dql_group = {
+ .name = "byte_queue_limits",
+ .attrs = dql_attrs,
+};
+#endif /* CONFIG_BQL */
+
+#ifdef CONFIG_XPS
static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
{
struct net_device *dev = queue->dev;
@@ -890,6 +1034,52 @@ static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+static void xps_queue_release(struct netdev_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ unsigned long index;
+ int i, pos, nonempty = 0;
+
+ index = get_netdev_queue_index(queue);
+
+ mutex_lock(&xps_map_mutex);
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ if (dev_maps) {
+ for_each_possible_cpu(i) {
+ map = xmap_dereference(dev_maps->cpu_map[i]);
+ if (!map)
+ continue;
+
+ for (pos = 0; pos < map->len; pos++)
+ if (map->queues[pos] == index)
+ break;
+
+ if (pos < map->len) {
+ if (map->len > 1)
+ map->queues[pos] =
+ map->queues[--map->len];
+ else {
+ RCU_INIT_POINTER(dev_maps->cpu_map[i],
+ NULL);
+ kfree_rcu(map, rcu);
+ map = NULL;
+ }
+ }
+ if (map)
+ nonempty = 1;
+ }
+
+ if (!nonempty) {
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ kfree_rcu(dev_maps, rcu);
+ }
+ }
+ mutex_unlock(&xps_map_mutex);
+}
+
static ssize_t store_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute,
const char *buf, size_t len)
@@ -901,7 +1091,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
struct xps_map *map, *new_map;
struct xps_dev_maps *dev_maps, *new_dev_maps;
int nonempty = 0;
- int numa_node = -2;
+ int numa_node_id = -2;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -944,10 +1134,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
#ifdef CONFIG_NUMA
if (need_set) {
- if (numa_node == -2)
- numa_node = cpu_to_node(cpu);
- else if (numa_node != cpu_to_node(cpu))
- numa_node = -1;
+ if (numa_node_id == -2)
+ numa_node_id = cpu_to_node(cpu);
+ else if (numa_node_id != cpu_to_node(cpu))
+ numa_node_id = -1;
}
#endif
if (need_set && pos >= map_len) {
@@ -997,7 +1187,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
if (dev_maps)
kfree_rcu(dev_maps, rcu);
- netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+ netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
NUMA_NO_NODE);
mutex_unlock(&xps_map_mutex);
@@ -1020,58 +1210,23 @@ error:
static struct netdev_queue_attribute xps_cpus_attribute =
__ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+#endif /* CONFIG_XPS */
static struct attribute *netdev_queue_default_attrs[] = {
+ &queue_trans_timeout.attr,
+#ifdef CONFIG_XPS
&xps_cpus_attribute.attr,
+#endif
NULL
};
static void netdev_queue_release(struct kobject *kobj)
{
struct netdev_queue *queue = to_netdev_queue(kobj);
- struct net_device *dev = queue->dev;
- struct xps_dev_maps *dev_maps;
- struct xps_map *map;
- unsigned long index;
- int i, pos, nonempty = 0;
-
- index = get_netdev_queue_index(queue);
-
- mutex_lock(&xps_map_mutex);
- dev_maps = xmap_dereference(dev->xps_maps);
-
- if (dev_maps) {
- for_each_possible_cpu(i) {
- map = xmap_dereference(dev_maps->cpu_map[i]);
- if (!map)
- continue;
- for (pos = 0; pos < map->len; pos++)
- if (map->queues[pos] == index)
- break;
-
- if (pos < map->len) {
- if (map->len > 1)
- map->queues[pos] =
- map->queues[--map->len];
- else {
- RCU_INIT_POINTER(dev_maps->cpu_map[i],
- NULL);
- kfree_rcu(map, rcu);
- map = NULL;
- }
- }
- if (map)
- nonempty = 1;
- }
-
- if (!nonempty) {
- RCU_INIT_POINTER(dev->xps_maps, NULL);
- kfree_rcu(dev_maps, rcu);
- }
- }
-
- mutex_unlock(&xps_map_mutex);
+#ifdef CONFIG_XPS
+ xps_queue_release(queue);
+#endif
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
@@ -1092,22 +1247,29 @@ static int netdev_queue_add_kobject(struct net_device *net, int index)
kobj->kset = net->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index);
- if (error) {
- kobject_put(kobj);
- return error;
- }
+ if (error)
+ goto exit;
+
+#ifdef CONFIG_BQL
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error)
+ goto exit;
+#endif
kobject_uevent(kobj, KOBJ_ADD);
dev_hold(queue->dev);
+ return 0;
+exit:
+ kobject_put(kobj);
return error;
}
-#endif /* CONFIG_XPS */
+#endif /* CONFIG_SYSFS */
int
netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
int i;
int error = 0;
@@ -1119,20 +1281,26 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
}
}
- while (--i >= new_num)
- kobject_put(&net->_tx[i].kobj);
+ while (--i >= new_num) {
+ struct netdev_queue *queue = net->_tx + i;
+
+#ifdef CONFIG_BQL
+ sysfs_remove_group(&queue->kobj, &dql_group);
+#endif
+ kobject_put(&queue->kobj);
+ }
return error;
#else
return 0;
-#endif
+#endif /* CONFIG_SYSFS */
}
static int register_queue_kobjects(struct net_device *net)
{
int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
net->queues_kset = kset_create_and_add("queues",
NULL, &net->dev.kobj);
if (!net->queues_kset)
@@ -1173,7 +1341,7 @@ static void remove_queue_kobjects(struct net_device *net)
net_rx_queue_update_kobjects(net, real_rx, 0);
netdev_queue_update_kobjects(net, real_tx, 0);
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
kset_unregister(net->queues_kset);
#endif
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index cf64c1ffa4c..0d38808a230 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
- if (netif_tx_queue_frozen_or_stopped(txq) ||
+ if (netif_xmit_frozen_or_stopped(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
@@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
- if (!netif_tx_queue_stopped(txq)) {
+ if (!netif_xmit_stopped(txq)) {
status = ops->ndo_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
@@ -422,6 +422,7 @@ static void arp_reply(struct sk_buff *skb)
struct sk_buff *send_skb;
struct netpoll *np, *tmp;
unsigned long flags;
+ int hlen, tlen;
int hits = 0;
if (list_empty(&npinfo->rx_np))
@@ -479,8 +480,9 @@ static void arp_reply(struct sk_buff *skb)
if (tip != np->local_ip)
continue;
- send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
- LL_RESERVED_SPACE(np->dev));
+ hlen = LL_RESERVED_SPACE(np->dev);
+ tlen = np->dev->needed_tailroom;
+ send_skb = find_skb(np, size + hlen + tlen, hlen);
if (!send_skb)
continue;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
new file mode 100644
index 00000000000..3a9fd4826b7
--- /dev/null
+++ b/net/core/netprio_cgroup.c
@@ -0,0 +1,344 @@
+/*
+ * net/core/netprio_cgroup.c Priority Control Group
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
+#include <linux/atomic.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/netprio_cgroup.h>
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+ struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_prio_subsys = {
+ .name = "net_prio",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+ .populate = cgrp_populate,
+#ifdef CONFIG_NETPRIO_CGROUP
+ .subsys_id = net_prio_subsys_id,
+#endif
+ .module = THIS_MODULE
+};
+
+#define PRIOIDX_SZ 128
+
+static unsigned long prioidx_map[PRIOIDX_SZ];
+static DEFINE_SPINLOCK(prioidx_map_lock);
+static atomic_t max_prioidx = ATOMIC_INIT(0);
+
+static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
+{
+ return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
+ struct cgroup_netprio_state, css);
+}
+
+static int get_prioidx(u32 *prio)
+{
+ unsigned long flags;
+ u32 prioidx;
+
+ spin_lock_irqsave(&prioidx_map_lock, flags);
+ prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+ set_bit(prioidx, prioidx_map);
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+ if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
+ return -ENOSPC;
+
+ atomic_set(&max_prioidx, prioidx);
+ *prio = prioidx;
+ return 0;
+}
+
+static void put_prioidx(u32 idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&prioidx_map_lock, flags);
+ clear_bit(idx, prioidx_map);
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+}
+
+static void extend_netdev_table(struct net_device *dev, u32 new_len)
+{
+ size_t new_size = sizeof(struct netprio_map) +
+ ((sizeof(u32) * new_len));
+ struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
+ struct netprio_map *old_priomap;
+ int i;
+
+ old_priomap = rtnl_dereference(dev->priomap);
+
+ if (!new_priomap) {
+ printk(KERN_WARNING "Unable to alloc new priomap!\n");
+ return;
+ }
+
+ for (i = 0;
+ old_priomap && (i < old_priomap->priomap_len);
+ i++)
+ new_priomap->priomap[i] = old_priomap->priomap[i];
+
+ new_priomap->priomap_len = new_len;
+
+ rcu_assign_pointer(dev->priomap, new_priomap);
+ if (old_priomap)
+ kfree_rcu(old_priomap, rcu);
+}
+
+static void update_netdev_tables(void)
+{
+ struct net_device *dev;
+ u32 max_len = atomic_read(&max_prioidx);
+ struct netprio_map *map;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ map = rtnl_dereference(dev->priomap);
+ if ((!map) ||
+ (map->priomap_len < max_len))
+ extend_netdev_table(dev, max_len);
+ }
+ rtnl_unlock();
+}
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+ struct cgroup *cgrp)
+{
+ struct cgroup_netprio_state *cs;
+ int ret;
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
+ kfree(cs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = get_prioidx(&cs->prioidx);
+ if (ret != 0) {
+ printk(KERN_WARNING "No space in priority index array\n");
+ kfree(cs);
+ return ERR_PTR(ret);
+ }
+
+ return &cs->css;
+}
+
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+ struct cgroup_netprio_state *cs;
+ struct net_device *dev;
+ struct netprio_map *map;
+
+ cs = cgrp_netprio_state(cgrp);
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ map = rtnl_dereference(dev->priomap);
+ if (map)
+ map->priomap[cs->prioidx] = 0;
+ }
+ rtnl_unlock();
+ put_prioidx(cs->prioidx);
+ kfree(cs);
+}
+
+static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+{
+ return (u64)cgrp_netprio_state(cgrp)->prioidx;
+}
+
+static int read_priomap(struct cgroup *cont, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct net_device *dev;
+ u32 prioidx = cgrp_netprio_state(cont)->prioidx;
+ u32 priority;
+ struct netprio_map *map;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ map = rcu_dereference(dev->priomap);
+ priority = map ? map->priomap[prioidx] : 0;
+ cb->fill(cb, dev->name, priority);
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+{
+ char *devname = kstrdup(buffer, GFP_KERNEL);
+ int ret = -EINVAL;
+ u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
+ unsigned long priority;
+ char *priostr;
+ struct net_device *dev;
+ struct netprio_map *map;
+
+ if (!devname)
+ return -ENOMEM;
+
+ /*
+ * Minimally sized valid priomap string
+ */
+ if (strlen(devname) < 3)
+ goto out_free_devname;
+
+ priostr = strstr(devname, " ");
+ if (!priostr)
+ goto out_free_devname;
+
+ /*
+ *Separate the devname from the associated priority
+ *and advance the priostr poitner to the priority value
+ */
+ *priostr = '\0';
+ priostr++;
+
+ /*
+ * If the priostr points to NULL, we're at the end of the passed
+ * in string, and its not a valid write
+ */
+ if (*priostr == '\0')
+ goto out_free_devname;
+
+ ret = kstrtoul(priostr, 10, &priority);
+ if (ret < 0)
+ goto out_free_devname;
+
+ ret = -ENODEV;
+
+ dev = dev_get_by_name(&init_net, devname);
+ if (!dev)
+ goto out_free_devname;
+
+ update_netdev_tables();
+ ret = 0;
+ rcu_read_lock();
+ map = rcu_dereference(dev->priomap);
+ if (map)
+ map->priomap[prioidx] = priority;
+ rcu_read_unlock();
+ dev_put(dev);
+
+out_free_devname:
+ kfree(devname);
+ return ret;
+}
+
+static struct cftype ss_files[] = {
+ {
+ .name = "prioidx",
+ .read_u64 = read_prioidx,
+ },
+ {
+ .name = "ifpriomap",
+ .read_map = read_priomap,
+ .write_string = write_priomap,
+ },
+};
+
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+ return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
+}
+
+static int netprio_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct netprio_map *old;
+ u32 max_len = atomic_read(&max_prioidx);
+
+ /*
+ * Note this is called with rtnl_lock held so we have update side
+ * protection on our rcu assignments
+ */
+
+ switch (event) {
+
+ case NETDEV_REGISTER:
+ if (max_len)
+ extend_netdev_table(dev, max_len);
+ break;
+ case NETDEV_UNREGISTER:
+ old = rtnl_dereference(dev->priomap);
+ RCU_INIT_POINTER(dev->priomap, NULL);
+ if (old)
+ kfree_rcu(old, rcu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block netprio_device_notifier = {
+ .notifier_call = netprio_device_event
+};
+
+static int __init init_cgroup_netprio(void)
+{
+ int ret;
+
+ ret = cgroup_load_subsys(&net_prio_subsys);
+ if (ret)
+ goto out;
+#ifndef CONFIG_NETPRIO_CGROUP
+ smp_wmb();
+ net_prio_subsys_id = net_prio_subsys.subsys_id;
+#endif
+
+ register_netdevice_notifier(&netprio_device_notifier);
+
+out:
+ return ret;
+}
+
+static void __exit exit_cgroup_netprio(void)
+{
+ struct netprio_map *old;
+ struct net_device *dev;
+
+ unregister_netdevice_notifier(&netprio_device_notifier);
+
+ cgroup_unload_subsys(&net_prio_subsys);
+
+#ifndef CONFIG_NETPRIO_CGROUP
+ net_prio_subsys_id = -1;
+ synchronize_rcu();
+#endif
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ old = rtnl_dereference(dev->priomap);
+ RCU_INIT_POINTER(dev->priomap, NULL);
+ if (old)
+ kfree_rcu(old, rcu);
+ }
+ rtnl_unlock();
+}
+
+module_init(init_cgroup_netprio);
+module_exit(exit_cgroup_netprio);
+MODULE_LICENSE("GPL v2");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0001c243b35..65f80c7b165 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
+ pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
if (debug)
printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
@@ -1327,8 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
- &pkt_dev->min_in6_daddr);
+ pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
if (debug)
printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
@@ -1371,7 +1370,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
+ pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
if (debug)
printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
@@ -2025,13 +2024,13 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odevname);
- pkt_dev->queue_map_min = ntxq - 1;
+ pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odevname);
- pkt_dev->queue_map_max = ntxq - 1;
+ pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
}
/* Default to the interface's mac if not explicitly set. */
@@ -2079,9 +2078,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
ifp = ifp->if_next) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & IFA_F_TENTATIVE)) {
- ipv6_addr_copy(&pkt_dev->
- cur_in6_saddr,
- &ifp->addr);
+ pkt_dev->cur_in6_saddr = ifp->addr;
err = 0;
break;
}
@@ -2958,8 +2955,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->payload_len = htons(sizeof(struct udphdr) + datalen);
iph->nexthdr = IPPROTO_UDP;
- ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
- ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
+ iph->daddr = pkt_dev->cur_in6_daddr;
+ iph->saddr = pkt_dev->cur_in6_saddr;
skb->mac_header = (skb->network_header - ETH_HLEN -
pkt_dev->pkt_overhead);
@@ -3345,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
+ if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 182236b2510..9b570a6a33c 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -26,10 +26,11 @@
* but then some measure against one socket starving all other sockets
* would be needed.
*
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
* it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too.
*/
int sysctl_max_syn_backlog = 256;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9083e82bdae..dbf2ddafd52 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -273,6 +273,17 @@ EXPORT_SYMBOL_GPL(rtnl_unregister_all);
static LIST_HEAD(link_ops);
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+ const struct rtnl_link_ops *ops;
+
+ list_for_each_entry(ops, &link_ops, list) {
+ if (!strcmp(ops->kind, kind))
+ return ops;
+ }
+ return NULL;
+}
+
/**
* __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
* @ops: struct rtnl_link_ops * to register
@@ -285,6 +296,9 @@ static LIST_HEAD(link_ops);
*/
int __rtnl_link_register(struct rtnl_link_ops *ops)
{
+ if (rtnl_link_ops_get(ops->kind))
+ return -EEXIST;
+
if (!ops->dellink)
ops->dellink = unregister_netdevice_queue;
@@ -351,17 +365,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
-{
- const struct rtnl_link_ops *ops;
-
- list_for_each_entry(ops, &link_ops, list) {
- if (!strcmp(ops->kind, kind))
- return ops;
- }
- return NULL;
-}
-
static size_t rtnl_link_get_size(const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 025233de25f..6fd44606fdd 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
}
late_initcall(net_secret_init);
+#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)
{
/*
@@ -33,8 +34,9 @@ static u32 seq_scale(u32 seq)
*/
return seq + (ktime_to_ns(ktime_get_real()) >> 6);
}
+#endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport)
{
@@ -132,7 +134,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
#endif
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+#if IS_ENABLED(CONFIG_IP_DCCP)
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport)
{
@@ -154,7 +156,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
}
EXPORT_SYMBOL(secure_dccp_sequence_number);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
__be16 sport, __be16 dport)
{
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 18a3cebb753..da0c97f2fab 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -245,6 +245,55 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);
/**
+ * build_skb - build a network buffer
+ * @data: data buffer provided by caller
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+ * Before IO, driver allocates only data buffer where NIC put incoming frame
+ * Driver should add room at head (NET_SKB_PAD) and
+ * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
+ * After IO, driver calls build_skb(), to allocate sk_buff and populate it
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+struct sk_buff *build_skb(void *data)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ unsigned int size;
+
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb->mac_header = ~0U;
+#endif
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+ kmemcheck_annotate_variable(shinfo->destructor_arg);
+
+ return skb;
+}
+EXPORT_SYMBOL(build_skb);
+
+/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
@@ -403,7 +452,7 @@ static void skb_release_head_state(struct sk_buff *skb)
WARN_ON(in_irq());
skb->destructor(skb);
}
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb->nfct);
#endif
#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
@@ -553,15 +602,14 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
new->priority = old->priority;
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
new->ipvs_property = old->ipvs_property;
#endif
new->protocol = old->protocol;
new->mark = old->mark;
new->skb_iif = old->skb_iif;
__nf_copy(new, old);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
- defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
new->nf_trace = old->nf_trace;
#endif
#ifdef CONFIG_NET_SCHED
@@ -791,8 +839,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
EXPORT_SYMBOL(skb_copy);
/**
- * pskb_copy - create copy of an sk_buff with private head.
+ * __pskb_copy - create copy of an sk_buff with private head.
* @skb: buffer to copy
+ * @headroom: headroom of new skb
* @gfp_mask: allocation priority
*
* Make a copy of both an &sk_buff and part of its data, located
@@ -803,16 +852,16 @@ EXPORT_SYMBOL(skb_copy);
* The returned buffer has a reference count of 1.
*/
-struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
{
- unsigned int size = skb_end_pointer(skb) - skb->head;
+ unsigned int size = skb_headlen(skb) + headroom;
struct sk_buff *n = alloc_skb(size, gfp_mask);
if (!n)
goto out;
/* Set the data pointer */
- skb_reserve(n, skb_headroom(skb));
+ skb_reserve(n, headroom);
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
@@ -848,7 +897,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
out:
return n;
}
-EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(__pskb_copy);
/**
* pskb_expand_head - reallocate header of &sk_buff
@@ -2230,7 +2279,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
@@ -2621,7 +2670,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
@@ -3169,6 +3218,26 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
}
EXPORT_SYMBOL_GPL(skb_tstamp_tx);
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+{
+ struct sock *sk = skb->sk;
+ struct sock_exterr_skb *serr;
+ int err;
+
+ skb->wifi_acked_valid = 1;
+ skb->wifi_acked = acked;
+
+ serr = SKB_EXT_ERR(skb);
+ memset(serr, 0, sizeof(*serr));
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+
+ err = sock_queue_err_skb(sk, skb);
+ if (err)
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+
/**
* skb_partial_csum_set - set up and verify partial csum values for packet
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5..5c5af9988f9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,6 +111,8 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/user_namespace.h>
+#include <linux/jump_label.h>
+#include <linux/memcontrol.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -125,6 +127,7 @@
#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
#include <linux/filter.h>
@@ -134,6 +137,46 @@
#include <net/tcp.h>
#endif
+static DEFINE_MUTEX(proto_list_mutex);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+ struct proto *proto;
+ int ret = 0;
+
+ mutex_lock(&proto_list_mutex);
+ list_for_each_entry(proto, &proto_list, node) {
+ if (proto->init_cgroup) {
+ ret = proto->init_cgroup(cgrp, ss);
+ if (ret)
+ goto out;
+ }
+ }
+
+ mutex_unlock(&proto_list_mutex);
+ return ret;
+out:
+ list_for_each_entry_continue_reverse(proto, &proto_list, node)
+ if (proto->destroy_cgroup)
+ proto->destroy_cgroup(cgrp, ss);
+ mutex_unlock(&proto_list_mutex);
+ return ret;
+}
+
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+ struct proto *proto;
+
+ mutex_lock(&proto_list_mutex);
+ list_for_each_entry_reverse(proto, &proto_list, node)
+ if (proto->destroy_cgroup)
+ proto->destroy_cgroup(cgrp, ss);
+ mutex_unlock(&proto_list_mutex);
+}
+#endif
+
/*
* Each address family might have different locking rules, so we have
* one slock key per address family:
@@ -141,6 +184,9 @@
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
+struct jump_label_key memcg_socket_limit_enabled;
+EXPORT_SYMBOL(memcg_socket_limit_enabled);
+
/*
* Make lock validator output more readable. (we pre-construct these
* strings build-time, so that runtime initialization of socket
@@ -221,10 +267,16 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
-#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+#if defined(CONFIG_CGROUPS)
+#if !defined(CONFIG_NET_CLS_CGROUP)
int net_cls_subsys_id = -1;
EXPORT_SYMBOL_GPL(net_cls_subsys_id);
#endif
+#if !defined(CONFIG_NETPRIO_CGROUP)
+int net_prio_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_prio_subsys_id);
+#endif
+#endif
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
@@ -269,14 +321,14 @@ static void sock_warn_obsolete_bsdism(const char *name)
}
}
-static void sock_disable_timestamp(struct sock *sk, int flag)
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
+static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{
- if (sock_flag(sk, flag)) {
- sock_reset_flag(sk, flag);
- if (!sock_flag(sk, SOCK_TIMESTAMP) &&
- !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
+ if (sk->sk_flags & flags) {
+ sk->sk_flags &= ~flags;
+ if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
net_disable_timestamp();
- }
}
}
@@ -288,11 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
- /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf) {
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
@@ -682,7 +730,7 @@ set_rcvbuf:
SOCK_TIMESTAMPING_RX_SOFTWARE);
else
sock_disable_timestamp(sk,
- SOCK_TIMESTAMPING_RX_SOFTWARE);
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
val & SOF_TIMESTAMPING_SOFTWARE);
sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
@@ -740,6 +788,11 @@ set_rcvbuf:
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
+
+ case SO_WIFI_STATUS:
+ sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -961,6 +1014,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
break;
+ case SO_WIFI_STATUS:
+ v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -1111,6 +1168,18 @@ void sock_update_classid(struct sock *sk)
sk->sk_classid = classid;
}
EXPORT_SYMBOL(sock_update_classid);
+
+void sock_update_netprioidx(struct sock *sk)
+{
+ struct cgroup_netprio_state *state;
+ if (in_interrupt())
+ return;
+ rcu_read_lock();
+ state = task_netprio_state(current);
+ sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif
/**
@@ -1138,6 +1207,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
atomic_set(&sk->sk_wmem_alloc, 1);
sock_update_classid(sk);
+ sock_update_netprioidx(sk);
}
return sk;
@@ -1158,8 +1228,7 @@ static void __sk_free(struct sock *sk)
RCU_INIT_POINTER(sk->sk_filter, NULL);
}
- sock_disable_timestamp(sk, SOCK_TIMESTAMP);
- sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
+ sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
if (atomic_read(&sk->sk_omem_alloc))
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
@@ -1204,7 +1273,20 @@ void sk_release_kernel(struct sock *sk)
}
EXPORT_SYMBOL(sk_release_kernel);
-struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+static void sk_update_clone(const struct sock *sk, struct sock *newsk)
+{
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ sock_update_memcg(newsk);
+}
+
+/**
+ * sk_clone_lock - clone a socket, and lock its clone
+ * @sk: the socket to clone
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{
struct sock *newsk;
@@ -1287,17 +1369,18 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sk_set_socket(newsk, NULL);
newsk->sk_wq = NULL;
+ sk_update_clone(sk, newsk);
+
if (newsk->sk_prot->sockets_allocated)
- percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+ sk_sockets_allocated_inc(newsk);
- if (sock_flag(newsk, SOCK_TIMESTAMP) ||
- sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+ if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp();
}
out:
return newsk;
}
-EXPORT_SYMBOL_GPL(sk_clone);
+EXPORT_SYMBOL_GPL(sk_clone_lock);
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
@@ -1677,30 +1760,34 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
struct proto *prot = sk->sk_prot;
int amt = sk_mem_pages(size);
long allocated;
+ int parent_status = UNDER_LIMIT;
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
- allocated = atomic_long_add_return(amt, prot->memory_allocated);
+
+ allocated = sk_memory_allocated_add(sk, amt, &parent_status);
/* Under limit. */
- if (allocated <= prot->sysctl_mem[0]) {
- if (prot->memory_pressure && *prot->memory_pressure)
- *prot->memory_pressure = 0;
+ if (parent_status == UNDER_LIMIT &&
+ allocated <= sk_prot_mem_limits(sk, 0)) {
+ sk_leave_memory_pressure(sk);
return 1;
}
- /* Under pressure. */
- if (allocated > prot->sysctl_mem[1])
- if (prot->enter_memory_pressure)
- prot->enter_memory_pressure(sk);
+ /* Under pressure. (we or our parents) */
+ if ((parent_status > SOFT_LIMIT) ||
+ allocated > sk_prot_mem_limits(sk, 1))
+ sk_enter_memory_pressure(sk);
- /* Over hard limit. */
- if (allocated > prot->sysctl_mem[2])
+ /* Over hard limit (we or our parents) */
+ if ((parent_status == OVER_LIMIT) ||
+ (allocated > sk_prot_mem_limits(sk, 2)))
goto suppress_allocation;
/* guarantee minimum buffer size under pressure */
if (kind == SK_MEM_RECV) {
if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
return 1;
+
} else { /* SK_MEM_SEND */
if (sk->sk_type == SOCK_STREAM) {
if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
@@ -1710,13 +1797,13 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
return 1;
}
- if (prot->memory_pressure) {
+ if (sk_has_memory_pressure(sk)) {
int alloc;
- if (!*prot->memory_pressure)
+ if (!sk_under_memory_pressure(sk))
return 1;
- alloc = percpu_counter_read_positive(prot->sockets_allocated);
- if (prot->sysctl_mem[2] > alloc *
+ alloc = sk_sockets_allocated_read_positive(sk);
+ if (sk_prot_mem_limits(sk, 2) > alloc *
sk_mem_pages(sk->sk_wmem_queued +
atomic_read(&sk->sk_rmem_alloc) +
sk->sk_forward_alloc))
@@ -1739,7 +1826,9 @@ suppress_allocation:
/* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
- atomic_long_sub(amt, prot->memory_allocated);
+
+ sk_memory_allocated_sub(sk, amt, parent_status);
+
return 0;
}
EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1750,15 +1839,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
*/
void __sk_mem_reclaim(struct sock *sk)
{
- struct proto *prot = sk->sk_prot;
-
- atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
- prot->memory_allocated);
+ sk_memory_allocated_sub(sk,
+ sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
- if (prot->memory_pressure && *prot->memory_pressure &&
- (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
- *prot->memory_pressure = 0;
+ if (sk_under_memory_pressure(sk) &&
+ (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ sk_leave_memory_pressure(sk);
}
EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2129,16 +2216,15 @@ EXPORT_SYMBOL(sock_get_timestampns);
void sock_enable_timestamp(struct sock *sk, int flag)
{
if (!sock_flag(sk, flag)) {
+ unsigned long previous_flags = sk->sk_flags;
+
sock_set_flag(sk, flag);
/*
* we just set one of the two flags which require net
* time stamping, but time stamping might have been on
* already because of the other one
*/
- if (!sock_flag(sk,
- flag == SOCK_TIMESTAMP ?
- SOCK_TIMESTAMPING_RX_SOFTWARE :
- SOCK_TIMESTAMP))
+ if (!(previous_flags & SK_FLAGS_TIMESTAMP))
net_enable_timestamp();
}
}
@@ -2250,9 +2336,6 @@ void sk_common_release(struct sock *sk)
}
EXPORT_SYMBOL(sk_common_release);
-static DEFINE_RWLOCK(proto_list_lock);
-static LIST_HEAD(proto_list);
-
#ifdef CONFIG_PROC_FS
#define PROTO_INUSE_NR 64 /* should be enough for the first time */
struct prot_inuse {
@@ -2401,10 +2484,10 @@ int proto_register(struct proto *prot, int alloc_slab)
}
}
- write_lock(&proto_list_lock);
+ mutex_lock(&proto_list_mutex);
list_add(&prot->node, &proto_list);
assign_proto_idx(prot);
- write_unlock(&proto_list_lock);
+ mutex_unlock(&proto_list_mutex);
return 0;
out_free_timewait_sock_slab_name:
@@ -2427,10 +2510,10 @@ EXPORT_SYMBOL(proto_register);
void proto_unregister(struct proto *prot)
{
- write_lock(&proto_list_lock);
+ mutex_lock(&proto_list_mutex);
release_proto_idx(prot);
list_del(&prot->node);
- write_unlock(&proto_list_lock);
+ mutex_unlock(&proto_list_mutex);
if (prot->slab != NULL) {
kmem_cache_destroy(prot->slab);
@@ -2453,9 +2536,9 @@ EXPORT_SYMBOL(proto_unregister);
#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(proto_list_lock)
+ __acquires(proto_list_mutex)
{
- read_lock(&proto_list_lock);
+ mutex_lock(&proto_list_mutex);
return seq_list_start_head(&proto_list, *pos);
}
@@ -2465,25 +2548,36 @@ static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void proto_seq_stop(struct seq_file *seq, void *v)
- __releases(proto_list_lock)
+ __releases(proto_list_mutex)
{
- read_unlock(&proto_list_lock);
+ mutex_unlock(&proto_list_mutex);
}
static char proto_method_implemented(const void *method)
{
return method == NULL ? 'n' : 'y';
}
+static long sock_prot_memory_allocated(struct proto *proto)
+{
+ return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+}
+
+static char *sock_prot_memory_pressure(struct proto *proto)
+{
+ return proto->memory_pressure != NULL ?
+ proto_memory_pressure(proto) ? "yes" : "no" : "NI";
+}
static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
{
+
seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
proto->name,
proto->obj_size,
sock_prot_inuse_get(seq_file_net(seq), proto),
- proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
- proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+ sock_prot_memory_allocated(proto),
+ sock_prot_memory_pressure(proto),
proto->max_header,
proto->slab == NULL ? "no" : "yes",
module_name(proto->owner),
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
new file mode 100644
index 00000000000..b9868e1fd62
--- /dev/null
+++ b/net/core/sock_diag.c
@@ -0,0 +1,192 @@
+#include <linux/mutex.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <net/sock.h>
+
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+static DEFINE_MUTEX(sock_diag_table_mutex);
+
+int sock_diag_check_cookie(void *sk, __u32 *cookie)
+{
+ if ((cookie[0] != INET_DIAG_NOCOOKIE ||
+ cookie[1] != INET_DIAG_NOCOOKIE) &&
+ ((u32)(unsigned long)sk != cookie[0] ||
+ (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
+ return -ESTALE;
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
+
+void sock_diag_save_cookie(void *sk, __u32 *cookie)
+{
+ cookie[0] = (u32)(unsigned long)sk;
+ cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+}
+EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
+{
+ __u32 *mem;
+
+ mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
+
+ mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+ mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+ mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+ mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+
+ return 0;
+
+rtattr_failure:
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+ mutex_lock(&sock_diag_table_mutex);
+ inet_rcv_compat = fn;
+ mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
+
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+ mutex_lock(&sock_diag_table_mutex);
+ inet_rcv_compat = NULL;
+ mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
+
+int sock_diag_register(struct sock_diag_handler *hndl)
+{
+ int err = 0;
+
+ if (hndl->family >= AF_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sock_diag_table_mutex);
+ if (sock_diag_handlers[hndl->family])
+ err = -EBUSY;
+ else
+ sock_diag_handlers[hndl->family] = hndl;
+ mutex_unlock(&sock_diag_table_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(sock_diag_register);
+
+void sock_diag_unregister(struct sock_diag_handler *hnld)
+{
+ int family = hnld->family;
+
+ if (family >= AF_MAX)
+ return;
+
+ mutex_lock(&sock_diag_table_mutex);
+ BUG_ON(sock_diag_handlers[family] != hnld);
+ sock_diag_handlers[family] = NULL;
+ mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister);
+
+static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+ if (sock_diag_handlers[family] == NULL)
+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, family);
+
+ mutex_lock(&sock_diag_table_mutex);
+ return sock_diag_handlers[family];
+}
+
+static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+{
+ mutex_unlock(&sock_diag_table_mutex);
+}
+
+static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ int err;
+ struct sock_diag_req *req = NLMSG_DATA(nlh);
+ struct sock_diag_handler *hndl;
+
+ if (nlmsg_len(nlh) < sizeof(*req))
+ return -EINVAL;
+
+ hndl = sock_diag_lock_handler(req->sdiag_family);
+ if (hndl == NULL)
+ err = -ENOENT;
+ else
+ err = hndl->dump(skb, nlh);
+ sock_diag_unlock_handler(hndl);
+
+ return err;
+}
+
+static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ int ret;
+
+ switch (nlh->nlmsg_type) {
+ case TCPDIAG_GETSOCK:
+ case DCCPDIAG_GETSOCK:
+ if (inet_rcv_compat == NULL)
+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, AF_INET);
+
+ mutex_lock(&sock_diag_table_mutex);
+ if (inet_rcv_compat != NULL)
+ ret = inet_rcv_compat(skb, nlh);
+ else
+ ret = -EOPNOTSUPP;
+ mutex_unlock(&sock_diag_table_mutex);
+
+ return ret;
+ case SOCK_DIAG_BY_FAMILY:
+ return __sock_diag_rcv_msg(skb, nlh);
+ default:
+ return -EINVAL;
+ }
+}
+
+static DEFINE_MUTEX(sock_diag_mutex);
+
+static void sock_diag_rcv(struct sk_buff *skb)
+{
+ mutex_lock(&sock_diag_mutex);
+ netlink_rcv_skb(skb, &sock_diag_rcv_msg);
+ mutex_unlock(&sock_diag_mutex);
+}
+
+struct sock *sock_diag_nlsk;
+EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+
+static int __init sock_diag_init(void)
+{
+ sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
+ sock_diag_rcv, NULL, THIS_MODULE);
+ return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __exit sock_diag_exit(void)
+{
+ netlink_kernel_release(sock_diag_nlsk);
+}
+
+module_init(sock_diag_init);
+module_exit(sock_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 77a65f03148..d05559d4d9c 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -68,8 +68,13 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
if (sock_table != orig_sock_table) {
rcu_assign_pointer(rps_sock_flow_table, sock_table);
- synchronize_rcu();
- vfree(orig_sock_table);
+ if (sock_table)
+ jump_label_inc(&rps_needed);
+ if (orig_sock_table) {
+ jump_label_dec(&rps_needed);
+ synchronize_rcu();
+ vfree(orig_sock_table);
+ }
}
}
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 67164bb6ae4..f053198e730 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -29,7 +29,7 @@
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-static int ccid2_debug;
+static bool ccid2_debug;
#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
#else
#define ccid2_pr_debug(format, a...)
@@ -174,7 +174,7 @@ out:
/*
* Congestion window validation (RFC 2861).
*/
-static int ccid2_do_cwv = 1;
+static bool ccid2_do_cwv = true;
module_param(ccid2_do_cwv, bool, 0644);
MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3d604e1349c..56062730720 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -38,7 +38,7 @@
#include <asm/unaligned.h>
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
-static int ccid3_debug;
+static bool ccid3_debug;
#define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
#else
#define ccid3_pr_debug(format, a...)
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 1f94b7e01d3..62b5828acde 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-int tfrc_debug;
+bool tfrc_debug;
module_param(tfrc_debug, bool, 0644);
MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index f8ee3f54977..ed698c42a5f 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,7 +21,7 @@
#include "packet_history.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-extern int tfrc_debug;
+extern bool tfrc_debug;
#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
#else
#define tfrc_pr_debug(format, a...)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 583490aaf56..29d6bb629a6 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -39,7 +39,7 @@
"%s: " fmt, __func__, ##a)
#ifdef CONFIG_IP_DCCP_DEBUG
-extern int dccp_debug;
+extern bool dccp_debug;
#define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a)
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
@@ -357,7 +357,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
struct dccp_skb_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index b21f261da75..8f162575337 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -48,11 +48,23 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
dccp_get_info(sk, _info);
}
+static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+}
+
+static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct inet_diag_req *req)
+{
+ return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+}
+
static const struct inet_diag_handler dccp_diag_handler = {
- .idiag_hashinfo = &dccp_hashinfo,
+ .dump = dccp_diag_dump,
+ .dump_one = dccp_diag_dump_one,
.idiag_get_info = dccp_diag_get_info,
- .idiag_type = DCCPDIAG_GETSOCK,
- .idiag_info_size = sizeof(struct tcp_info),
+ .idiag_type = IPPROTO_DCCP,
};
static int __init dccp_diag_init(void)
@@ -71,4 +83,4 @@ module_exit(dccp_diag_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCP inet_diag handler");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-33 /* AF_INET - IPPROTO_DCCP */);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 23cea0ee310..78a2ad70e1b 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -490,8 +490,8 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
new->feat_num = feat;
new->is_local = local;
new->state = FEAT_INITIALISING;
- new->needs_confirm = 0;
- new->empty_confirm = 0;
+ new->needs_confirm = false;
+ new->empty_confirm = false;
new->val = *fval;
new->needs_mandatory = mandatory;
@@ -517,12 +517,12 @@ static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
new->feat_num = feat;
new->is_local = local;
new->state = FEAT_STABLE; /* transition in 6.6.2 */
- new->needs_confirm = 1;
+ new->needs_confirm = true;
new->empty_confirm = (fval == NULL);
new->val.nn = 0; /* zeroes the whole structure */
if (!new->empty_confirm)
new->val = *fval;
- new->needs_mandatory = 0;
+ new->needs_mandatory = false;
return 0;
}
@@ -1155,7 +1155,7 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
}
if (dccp_feat_reconcile(&entry->val, val, len, server, true)) {
- entry->empty_confirm = 0;
+ entry->empty_confirm = false;
} else if (is_mandatory) {
return DCCP_RESET_CODE_MANDATORY_ERROR;
} else if (entry->state == FEAT_INITIALISING) {
@@ -1171,10 +1171,10 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
defval = dccp_feat_default_value(feat);
if (!dccp_feat_reconcile(&entry->val, &defval, 1, server, true))
return DCCP_RESET_CODE_OPTION_ERROR;
- entry->empty_confirm = 1;
+ entry->empty_confirm = true;
}
- entry->needs_confirm = 1;
- entry->needs_mandatory = 0;
+ entry->needs_confirm = true;
+ entry->needs_mandatory = false;
entry->state = FEAT_STABLE;
return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90a919afbed..1c67fe8ff90 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
@@ -473,10 +474,11 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
+ const struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
.flowi4_oif = skb_rtable(skb)->rt_iif,
- .daddr = ip_hdr(skb)->saddr,
- .saddr = ip_hdr(skb)->daddr,
+ .daddr = iph->saddr,
+ .saddr = iph->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
.flowi4_proto = sk->sk_protocol,
.fl4_sport = dccp_hdr(skb)->dccph_dport,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 17ee85ce148..ce903f747e6 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
@@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.daddr = ireq6->rmt_addr;
+ fl6.saddr = ireq6->loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = ireq6->iif;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
dh->dccph_checksum = dccp_v6_csum_finish(skb,
&ireq6->loc_addr,
&ireq6->rmt_addr);
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
&rxip6h->daddr);
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
- ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
+ fl6.daddr = rxip6h->saddr;
+ fl6.saddr = rxip6h->daddr;
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.flowi6_oif = inet6_iif(rxskb);
@@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
ireq6 = inet6_rsk(req);
- ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+ ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq6->loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+ newnp->rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
final_p = fl6_update_dst(&fl6, opt, &final);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
@@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
- ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
- ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
+ newnp->daddr = ireq6->rmt_addr;
+ newnp->saddr = ireq6->loc_addr;
+ newnp->rcv_saddr = ireq6->loc_addr;
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
@@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
}
- ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+ np->daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
@@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- ipv6_addr_copy(&np->rcv_saddr, saddr);
+ np->rcv_saddr = *saddr;
}
/* set the source address */
- ipv6_addr_copy(&np->saddr, saddr);
+ np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index d7041a0963a..5a7f90bbffa 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -53,15 +53,15 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
if (tw != NULL) {
const struct inet_connection_sock *icsk = inet_csk(sk);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_timewait_sock *tw6;
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
tw6 = inet6_twsk((struct sock *)tw);
- ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+ tw6->tw_v6_daddr = np->daddr;
+ tw6->tw_v6_rcv_saddr = np->rcv_saddr;
tw->tw_ipv6only = np->ipv6only;
}
#endif
@@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
*/
- struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
struct dccp_request_sock *dreq = dccp_rsk(req);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 4b2ab657ac8..68fa6b7a3e0 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -544,7 +544,7 @@ int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
}
if (unlikely(val == NULL || len == 0))
- len = repeat_first = 0;
+ len = repeat_first = false;
tot_len = 3 + repeat_first + len;
if (DCCP_SKB_CB(skb)->dccpd_opt_len + tot_len > DCCP_MAX_OPT_LEN) {
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 33d0e6297c2..0a8d6ebd9b4 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,6 +152,17 @@ static const struct file_operations dccpprobe_fops = {
.llseek = noop_llseek,
};
+static __init int setup_jprobe(void)
+{
+ int ret = register_jprobe(&dccp_send_probe);
+
+ if (ret) {
+ request_module("dccp");
+ ret = register_jprobe(&dccp_send_probe);
+ }
+ return ret;
+}
+
static __init int dccpprobe_init(void)
{
int ret = -ENOMEM;
@@ -163,8 +174,7 @@ static __init int dccpprobe_init(void)
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
- try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
- "dccp");
+ ret = setup_jprobe();
if (ret)
goto err1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index e742f90a685..7065c0ae1e7 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1099,7 +1099,7 @@ module_param(thash_entries, int, 0444);
MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
#ifdef CONFIG_IP_DCCP_DEBUG
-int dccp_debug;
+bool dccp_debug;
module_param(dccp_debug, bool, 0644);
MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7f0eb087dc1..befe426491b 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -88,9 +88,9 @@ static const struct neigh_ops dn_phase3_ops = {
static u32 dn_neigh_hash(const void *pkey,
const struct net_device *dev,
- __u32 hash_rnd)
+ __u32 *hash_rnd)
{
- return jhash_2words(*(__u16 *)pkey, 0, hash_rnd);
+ return jhash_2words(*(__u16 *)pkey, 0, hash_rnd[0]);
}
struct neigh_table dn_neigh_table = {
@@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = {
.gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 0,
.app_probes = 0,
.mcast_probes = 0,
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct dn_route *rt = (struct dn_route *)dst;
- struct neighbour *neigh = dst_get_neighbour(dst);
+ struct neighbour *neigh = dst_get_neighbour_noref(dst);
struct net_device *dev = neigh->dev;
char mac_addr[ETH_ALEN];
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a77d16158eb..f31ce72dca6 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
.gc = dn_dst_gc,
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
- .default_mtu = dn_dst_default_mtu,
+ .mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
@@ -244,7 +244,7 @@ static int dn_dst_gc(struct dst_ops *ops)
*/
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
{
- struct neighbour *n = dst_get_neighbour(dst);
+ struct neighbour *n = dst_get_neighbour_noref(dst);
u32 min_mtu = 230;
struct dn_dev *dn;
@@ -713,7 +713,7 @@ out:
static int dn_to_neigh_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = dst_get_neighbour(dst);
+ struct neighbour *n = dst_get_neighbour_noref(dst);
return n->output(n, skb);
}
@@ -728,7 +728,7 @@ static int dn_output(struct sk_buff *skb)
int err = -EINVAL;
- if ((neigh = dst_get_neighbour(dst)) == NULL)
+ if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
goto error;
skb->dev = dev;
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
}
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
{
- return dst->dev->mtu;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -850,7 +852,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
}
rt->rt_type = res->type;
- if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
+ if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
if (IS_ERR(n))
return PTR_ERR(n);
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 67f691bd4ac..d9c150cc59a 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
void dn_start_slow_timer(struct sock *sk)
{
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- sk->sk_timer.function = dn_slow_timer;
- sk->sk_timer.data = (unsigned long)sk;
-
- add_timer(&sk->sk_timer);
+ setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
- sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->sk_timer.expires = jiffies + HZ / 10;
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
scp->keepalive_fxn(sk);
}
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c53ded2a98d..274791cd7a3 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,5 +1,5 @@
-menuconfig NET_DSA
- bool "Distributed Switch Architecture support"
+config NET_DSA
+ tristate "Distributed Switch Architecture support"
default n
depends on EXPERIMENTAL && NETDEVICES && !S390
select PHYLIB
@@ -23,38 +23,4 @@ config NET_DSA_TAG_TRAILER
bool
default n
-
-# switch drivers
-config NET_DSA_MV88E6XXX
- bool
- default n
-
-config NET_DSA_MV88E6060
- bool "Marvell 88E6060 ethernet switch chip support"
- select NET_DSA_TAG_TRAILER
- ---help---
- This enables support for the Marvell 88E6060 ethernet switch
- chip.
-
-config NET_DSA_MV88E6XXX_NEED_PPU
- bool
- default n
-
-config NET_DSA_MV88E6131
- bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
- select NET_DSA_MV88E6XXX
- select NET_DSA_MV88E6XXX_NEED_PPU
- select NET_DSA_TAG_DSA
- ---help---
- This enables support for the Marvell 88E6085/6095/6095F/6131
- ethernet switch chips.
-
-config NET_DSA_MV88E6123_61_65
- bool "Marvell 88E6123/6161/6165 ethernet switch chip support"
- select NET_DSA_MV88E6XXX
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for the Marvell 88E6123/6161/6165
- ethernet switch chips.
-
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 2374faff4de..7b9fcbbeda5 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,13 +1,8 @@
-# tagging formats
-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-
-# switch drivers
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o
-obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o
-
# the core
-obj-$(CONFIG_NET_DSA) += dsa.o slave.o
+obj-$(CONFIG_NET_DSA) += dsa_core.o
+dsa_core-y += dsa.o slave.o
+
+# tagging formats
+dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0dc1589343c..88e7c2f3fa0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -29,6 +29,7 @@ void register_switch_driver(struct dsa_switch_driver *drv)
list_add_tail(&drv->list, &dsa_switch_drivers);
mutex_unlock(&dsa_switch_drivers_mutex);
}
+EXPORT_SYMBOL_GPL(register_switch_driver);
void unregister_switch_driver(struct dsa_switch_driver *drv)
{
@@ -36,6 +37,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv)
list_del_init(&drv->list);
mutex_unlock(&dsa_switch_drivers_mutex);
}
+EXPORT_SYMBOL_GPL(unregister_switch_driver);
static struct dsa_switch_driver *
dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
@@ -199,29 +201,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
}
-/* hooks for ethertype-less tagging formats *********************************/
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-bool dsa_uses_dsa_tags(void *dsa_ptr)
-{
- struct dsa_switch_tree *dst = dsa_ptr;
-
- return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-bool dsa_uses_trailer_tags(void *dsa_ptr)
-{
- struct dsa_switch_tree *dst = dsa_ptr;
-
- return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
-}
-
-
/* link polling *************************************************************/
static void dsa_link_poll_work(struct work_struct *ugly)
{
@@ -419,12 +398,36 @@ static struct platform_driver dsa_driver = {
static int __init dsa_init_module(void)
{
- return platform_driver_register(&dsa_driver);
+ int rc;
+
+ rc = platform_driver_register(&dsa_driver);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ dev_add_pack(&dsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ dev_add_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ dev_add_pack(&trailer_packet_type);
+#endif
+ return 0;
}
module_init(dsa_init_module);
static void __exit dsa_cleanup_module(void)
{
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ dev_remove_pack(&trailer_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ dev_remove_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ dev_remove_pack(&dsa_packet_type);
+#endif
platform_driver_unregister(&dsa_driver);
}
module_exit(dsa_cleanup_module);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 4b0ea054044..d4cf5cc747e 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -11,97 +11,9 @@
#ifndef __DSA_PRIV_H
#define __DSA_PRIV_H
-#include <linux/list.h>
#include <linux/phy.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
#include <net/dsa.h>
-struct dsa_switch {
- /*
- * Parent switch tree, and switch index.
- */
- struct dsa_switch_tree *dst;
- int index;
-
- /*
- * Configuration data for this switch.
- */
- struct dsa_chip_data *pd;
-
- /*
- * The used switch driver.
- */
- struct dsa_switch_driver *drv;
-
- /*
- * Reference to mii bus to use.
- */
- struct mii_bus *master_mii_bus;
-
- /*
- * Slave mii_bus and devices for the individual ports.
- */
- u32 dsa_port_mask;
- u32 phys_port_mask;
- struct mii_bus *slave_mii_bus;
- struct net_device *ports[DSA_MAX_PORTS];
-};
-
-struct dsa_switch_tree {
- /*
- * Configuration data for the platform device that owns
- * this dsa switch tree instance.
- */
- struct dsa_platform_data *pd;
-
- /*
- * Reference to network device to use, and which tagging
- * protocol to use.
- */
- struct net_device *master_netdev;
- __be16 tag_protocol;
-
- /*
- * The switch and port to which the CPU is attached.
- */
- s8 cpu_switch;
- s8 cpu_port;
-
- /*
- * Link state polling.
- */
- int link_poll_needed;
- struct work_struct link_poll_work;
- struct timer_list link_poll_timer;
-
- /*
- * Data for the individual switch chips.
- */
- struct dsa_switch *ds[DSA_MAX_SWITCHES];
-};
-
-static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
-{
- return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
-}
-
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
-{
- struct dsa_switch_tree *dst = ds->dst;
-
- /*
- * If this is the root switch (i.e. the switch that connects
- * to the CPU), return the cpu port number on this switch.
- * Else return the (DSA) port number that connects to the
- * switch that is one hop closer to the cpu.
- */
- if (dst->cpu_switch == ds->index)
- return dst->cpu_port;
- else
- return ds->pd->rtable[dst->cpu_switch];
-}
-
struct dsa_slave_priv {
/*
* The linux network interface corresponding to this
@@ -123,44 +35,8 @@ struct dsa_slave_priv {
struct phy_device *phy;
};
-struct dsa_switch_driver {
- struct list_head list;
-
- __be16 tag_protocol;
- int priv_size;
-
- /*
- * Probing and setup.
- */
- char *(*probe)(struct mii_bus *bus, int sw_addr);
- int (*setup)(struct dsa_switch *ds);
- int (*set_addr)(struct dsa_switch *ds, u8 *addr);
-
- /*
- * Access to the switch's PHY registers.
- */
- int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
- int (*phy_write)(struct dsa_switch *ds, int port,
- int regnum, u16 val);
-
- /*
- * Link state polling and IRQ handling.
- */
- void (*poll_link)(struct dsa_switch *ds);
-
- /*
- * ethtool hardware statistics.
- */
- void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
- void (*get_ethtool_stats)(struct dsa_switch *ds,
- int port, uint64_t *data);
- int (*get_sset_count)(struct dsa_switch *ds);
-};
-
/* dsa.c */
extern char dsa_driver_version[];
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
/* slave.c */
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -170,12 +46,15 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds,
/* tag_dsa.c */
netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type dsa_packet_type;
/* tag_edsa.c */
netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type edsa_packet_type;
/* tag_trailer.c */
netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type trailer_packet_type;
#endif
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 98dfe80b453..cacce1e22f9 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -186,20 +186,7 @@ out:
return 0;
}
-static struct packet_type dsa_packet_type __read_mostly = {
+struct packet_type dsa_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_DSA),
.func = dsa_rcv,
};
-
-static int __init dsa_init_module(void)
-{
- dev_add_pack(&dsa_packet_type);
- return 0;
-}
-module_init(dsa_init_module);
-
-static void __exit dsa_cleanup_module(void)
-{
- dev_remove_pack(&dsa_packet_type);
-}
-module_exit(dsa_cleanup_module);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 6f383322ad2..e70c43c25e6 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -205,20 +205,7 @@ out:
return 0;
}
-static struct packet_type edsa_packet_type __read_mostly = {
+struct packet_type edsa_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_EDSA),
.func = edsa_rcv,
};
-
-static int __init edsa_init_module(void)
-{
- dev_add_pack(&edsa_packet_type);
- return 0;
-}
-module_init(edsa_init_module);
-
-static void __exit edsa_cleanup_module(void)
-{
- dev_remove_pack(&edsa_packet_type);
-}
-module_exit(edsa_cleanup_module);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d6d7d0add3c..94bc260d015 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -114,20 +114,7 @@ out:
return 0;
}
-static struct packet_type trailer_packet_type __read_mostly = {
+struct packet_type trailer_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_TRAILER),
.func = trailer_rcv,
};
-
-static int __init trailer_init_module(void)
-{
- dev_add_pack(&trailer_packet_type);
- return 0;
-}
-module_init(trailer_init_module);
-
-static void __exit trailer_cleanup_module(void)
-{
- dev_remove_pack(&trailer_packet_type);
-}
-module_exit(trailer_cleanup_module);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 1c1f26c5d67..7e717cb35ad 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -322,6 +322,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
+ int hlen, tlen;
int res;
if (len + 15 > dev->mtu) {
@@ -331,12 +332,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_hold(dev);
- skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, len + hlen + tlen,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
eb = (struct ec_cb *)&skb->cb;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 19d6aefe97d..e4ecc1eef98 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -50,8 +50,6 @@
* SUCH DAMAGE.
*/
-#define DEBUG
-
#include <linux/bitops.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -113,6 +111,20 @@ struct lowpan_dev_record {
struct list_head list;
};
+struct lowpan_fragment {
+ struct sk_buff *skb; /* skb to be assembled */
+ spinlock_t lock; /* concurency lock */
+ u16 length; /* length to be assemled */
+ u32 bytes_rcv; /* bytes received */
+ u16 tag; /* current fragment tag */
+ struct timer_list timer; /* assembling timer */
+ struct list_head list; /* fragments list */
+};
+
+static unsigned short fragment_tag;
+static LIST_HEAD(lowpan_fragments);
+spinlock_t flist_lock;
+
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
@@ -234,6 +246,50 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
return 0;
}
+static void
+lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ pr_debug("(%s): UDP header compression\n", __func__);
+
+ if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT) &&
+ ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT)) {
+ pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
+ **(hc06_ptr + 1) = /* subtraction is faster */
+ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
+ ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
+ *hc06_ptr += 2;
+ } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("(%s): remove 8 bits of dest\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
+ memcpy(*hc06_ptr + 1, &uh->source, 2);
+ **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
+ *hc06_ptr += 4;
+ } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("(%s): remove 8 bits of source\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
+ memcpy(*hc06_ptr + 1, &uh->dest, 2);
+ **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
+ *hc06_ptr += 4;
+ } else {
+ pr_debug("(%s): can't compress header\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
+ memcpy(*hc06_ptr + 1, &uh->source, 2);
+ memcpy(*hc06_ptr + 3, &uh->dest, 2);
+ *hc06_ptr += 5;
+ }
+
+ /* checksum is always inline */
+ memcpy(*hc06_ptr, &uh->check, 2);
+ *hc06_ptr += 2;
+}
+
static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
{
u8 ret;
@@ -244,6 +300,73 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
return ret;
}
+static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+{
+ u16 ret;
+
+ BUG_ON(!pskb_may_pull(skb, 2));
+
+ ret = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+ return ret;
+}
+
+static int
+lowpan_uncompress_udp_header(struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ u8 tmp;
+
+ tmp = lowpan_fetch_skb_u8(skb);
+
+ if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+ pr_debug("(%s): UDP header uncompression\n", __func__);
+ switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ memcpy(&uh->source, &skb->data[0], 2);
+ memcpy(&uh->dest, &skb->data[2], 2);
+ skb_pull(skb, 4);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ memcpy(&uh->source, &skb->data[0], 2);
+ uh->dest =
+ skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
+ skb_pull(skb, 3);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_10:
+ uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
+ memcpy(&uh->dest, &skb->data[1], 2);
+ skb_pull(skb, 3);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ uh->source =
+ LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
+ uh->dest =
+ LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
+ skb_pull(skb, 1);
+ break;
+ default:
+ pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+ goto err;
+ break;
+ }
+
+ pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
+ __func__, uh->source, uh->dest);
+
+ /* copy checksum */
+ memcpy(&uh->check, &skb->data[0], 2);
+ skb_pull(skb, 2);
+ } else {
+ pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
static int lowpan_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type, const void *_daddr,
@@ -342,8 +465,6 @@ static int lowpan_header_create(struct sk_buff *skb,
if (hdr->nexthdr == UIP_PROTO_UDP)
iphc0 |= LOWPAN_IPHC_NH_C;
-/* TODO: next header compression */
-
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
*hc06_ptr = hdr->nexthdr;
hc06_ptr += 1;
@@ -431,8 +552,9 @@ static int lowpan_header_create(struct sk_buff *skb,
}
}
- /* TODO: UDP header compression */
- /* TODO: Next Header compression */
+ /* UDP header compression */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ lowpan_compress_udp_header(&hc06_ptr, skb);
head[0] = iphc0;
head[1] = iphc1;
@@ -467,6 +589,7 @@ static int lowpan_header_create(struct sk_buff *skb,
memcpy(&(sa.hwaddr), saddr, 8);
mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
type, (void *)&da, (void *)&sa, skb->len);
}
@@ -511,6 +634,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
return stat;
}
+static void lowpan_fragment_timer_expired(unsigned long entry_addr)
+{
+ struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
+
+ pr_debug("%s: timer expired for frame with tag %d\n", __func__,
+ entry->tag);
+
+ spin_lock(&flist_lock);
+ list_del(&entry->list);
+ spin_unlock(&flist_lock);
+
+ dev_kfree_skb(entry->skb);
+ kfree(entry);
+}
+
static int
lowpan_process_data(struct sk_buff *skb)
{
@@ -525,6 +663,107 @@ lowpan_process_data(struct sk_buff *skb)
if (skb->len < 2)
goto drop;
iphc0 = lowpan_fetch_skb_u8(skb);
+
+ /* fragments assembling */
+ switch (iphc0 & LOWPAN_DISPATCH_MASK) {
+ case LOWPAN_DISPATCH_FRAG1:
+ case LOWPAN_DISPATCH_FRAGN:
+ {
+ struct lowpan_fragment *frame;
+ u8 len, offset;
+ u16 tag;
+ bool found = false;
+
+ len = lowpan_fetch_skb_u8(skb); /* frame length */
+ tag = lowpan_fetch_skb_u16(skb);
+
+ /*
+ * check if frame assembling with the same tag is
+ * already in progress
+ */
+ spin_lock(&flist_lock);
+
+ list_for_each_entry(frame, &lowpan_fragments, list)
+ if (frame->tag == tag) {
+ found = true;
+ break;
+ }
+
+ /* alloc new frame structure */
+ if (!found) {
+ frame = kzalloc(sizeof(struct lowpan_fragment),
+ GFP_ATOMIC);
+ if (!frame)
+ goto unlock_and_drop;
+
+ INIT_LIST_HEAD(&frame->list);
+
+ frame->length = (iphc0 & 7) | (len << 3);
+ frame->tag = tag;
+
+ /* allocate buffer for frame assembling */
+ frame->skb = alloc_skb(frame->length +
+ sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+ if (!frame->skb) {
+ kfree(frame);
+ goto unlock_and_drop;
+ }
+
+ frame->skb->priority = skb->priority;
+ frame->skb->dev = skb->dev;
+
+ /* reserve headroom for uncompressed ipv6 header */
+ skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+ skb_put(frame->skb, frame->length);
+
+ init_timer(&frame->timer);
+ /* time out is the same as for ipv6 - 60 sec */
+ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+ frame->timer.data = (unsigned long)frame;
+ frame->timer.function = lowpan_fragment_timer_expired;
+
+ add_timer(&frame->timer);
+
+ list_add_tail(&frame->list, &lowpan_fragments);
+ }
+
+ if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
+ goto unlock_and_drop;
+
+ offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+
+ /* if payload fits buffer, copy it */
+ if (likely((offset * 8 + skb->len) <= frame->length))
+ skb_copy_to_linear_data_offset(frame->skb, offset * 8,
+ skb->data, skb->len);
+ else
+ goto unlock_and_drop;
+
+ frame->bytes_rcv += skb->len;
+
+ /* frame assembling complete */
+ if ((frame->bytes_rcv == frame->length) &&
+ frame->timer.expires > jiffies) {
+ /* if timer haven't expired - first of all delete it */
+ del_timer(&frame->timer);
+ list_del(&frame->list);
+ spin_unlock(&flist_lock);
+
+ dev_kfree_skb(skb);
+ skb = frame->skb;
+ kfree(frame);
+ iphc0 = lowpan_fetch_skb_u8(skb);
+ break;
+ }
+ spin_unlock(&flist_lock);
+
+ return kfree_skb(skb), 0;
+ }
+ default:
+ break;
+ }
+
iphc1 = lowpan_fetch_skb_u8(skb);
_saddr = mac_cb(skb)->sa.hwaddr;
@@ -659,7 +898,10 @@ lowpan_process_data(struct sk_buff *skb)
goto drop;
}
- /* TODO: UDP header parse */
+ /* UDP data uncompression */
+ if (iphc0 & LOWPAN_IPHC_NH_C)
+ if (lowpan_uncompress_udp_header(skb))
+ goto drop;
/* Not fragmented package */
hdr.payload_len = htons(skb->len);
@@ -674,6 +916,9 @@ lowpan_process_data(struct sk_buff *skb)
lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
sizeof(hdr));
return lowpan_skb_deliver(skb, &hdr);
+
+unlock_and_drop:
+ spin_unlock(&flist_lock);
drop:
kfree_skb(skb);
return -EINVAL;
@@ -692,18 +937,115 @@ static int lowpan_set_address(struct net_device *dev, void *p)
return 0;
}
+static int lowpan_get_mac_header_length(struct sk_buff *skb)
+{
+ /*
+ * Currently long addressing mode is supported only, so the overall
+ * header size is 21:
+ * FC SeqNum DPAN DA SA Sec
+ * 2 + 1 + 2 + 8 + 8 + 0 = 21
+ */
+ return 21;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+ int mlen, int plen, int offset)
+{
+ struct sk_buff *frag;
+ int hlen, ret;
+
+ /* if payload length is zero, therefore it's a first fragment */
+ hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE);
+
+ lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+ frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+ if (!frag)
+ return -ENOMEM;
+
+ frag->priority = skb->priority;
+ frag->dev = skb->dev;
+
+ /* copy header, MFR and payload */
+ memcpy(skb_put(frag, mlen), skb->data, mlen);
+ memcpy(skb_put(frag, hlen), head, hlen);
+
+ if (plen)
+ skb_copy_from_linear_data_offset(skb, offset + mlen,
+ skb_put(frag, plen), plen);
+
+ lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
+ frag->len);
+
+ ret = dev_queue_xmit(frag);
+
+ return ret;
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb)
+{
+ int err, header_length, payload_length, tag, offset = 0;
+ u8 head[5];
+
+ header_length = lowpan_get_mac_header_length(skb);
+ payload_length = skb->len - header_length;
+ tag = fragment_tag++;
+
+ /* first fragment header */
+ head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
+ head[1] = (payload_length >> 3) & 0xff;
+ head[2] = tag & 0xff;
+ head[3] = tag >> 8;
+
+ err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
+
+ /* next fragment header */
+ head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+ head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+ while ((payload_length - offset > 0) && (err >= 0)) {
+ int len = LOWPAN_FRAG_SIZE;
+
+ head[4] = offset / 8;
+
+ if (payload_length - offset < len)
+ len = payload_length - offset;
+
+ err = lowpan_fragment_xmit(skb, head, header_length,
+ len, offset);
+ offset += len;
+ }
+
+ return err;
+}
+
static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int err = 0;
+ int err = -1;
pr_debug("(%s): package xmit\n", __func__);
skb->dev = lowpan_dev_info(dev)->real_dev;
if (skb->dev == NULL) {
pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
- dev_kfree_skb(skb);
- } else
+ goto error;
+ }
+
+ if (skb->len <= IEEE802154_MTU) {
err = dev_queue_xmit(skb);
+ goto out;
+ }
+
+ pr_debug("(%s): frame is too big, fragmentation is needed\n",
+ __func__);
+ err = lowpan_skb_fragmentation(skb);
+error:
+ dev_kfree_skb(skb);
+out:
+ if (err < 0)
+ pr_debug("(%s): ERROR: xmit failed\n", __func__);
return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
}
@@ -730,13 +1072,12 @@ static void lowpan_setup(struct net_device *dev)
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
dev->type = ARPHRD_IEEE802154;
- dev->features = NETIF_F_NO_CSUM;
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 1281;
dev->tx_queue_len = 0;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
dev->netdev_ops = &lowpan_netdev_ops;
@@ -765,8 +1106,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* check that it's our buffer */
- if ((skb->data[0] & 0xe0) == 0x60)
+ switch (skb->data[0] & 0xe0) {
+ case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
+ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
lowpan_process_data(skb);
+ break;
+ default:
+ break;
+ }
return NET_RX_SUCCESS;
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 5d8cf80b930..aeff3f31048 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -159,6 +159,24 @@
#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
+#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ * - MTU is 127 octets
+ * - maximum MHR size is 37 octets
+ * - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ * MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE 88
+
/*
* Values of fields within the IPHC encoding first byte
* (C stands for compressed and I for inline)
@@ -201,6 +219,11 @@
#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
+#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
+
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index faecf648123..1b09eaabaac 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -209,6 +209,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
unsigned mtu;
struct sk_buff *skb;
struct dgram_sock *ro = dgram_sk(sk);
+ int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
@@ -229,13 +230,15 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
mtu = dev->mtu;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT,
&err);
if (!skb)
goto out_dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 10970ca8574..f96bae8fd33 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -108,6 +108,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct net_device *dev;
unsigned mtu;
struct sk_buff *skb;
+ int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
@@ -137,12 +138,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out_dev;
}
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto out_dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index cbb505ba932..aa2a2c79776 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -163,8 +163,6 @@ config IP_PNP_RARP
operating on your network. Read
<file:Documentation/filesystems/nfs/nfsroot.txt> for details.
-# not yet ready..
-# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
config NET_IPIP
tristate "IP: tunneling"
select INET_TUNNEL
@@ -409,6 +407,14 @@ config INET_TCP_DIAG
depends on INET_DIAG
def_tristate INET_DIAG
+config INET_UDP_DIAG
+ tristate "UDP: socket monitoring interface"
+ depends on INET_DIAG
+ default n
+ ---help---
+ Support for UDP socket monitoring interface used by the ss tool.
+ If unsure, say Y.
+
menuconfig TCP_CONG_ADVANCED
bool "TCP: advanced congestion control"
---help---
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f2dc69cffb5..ff75d3bbcd6 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/
obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
+obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b5096a9875..f7b5670744f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1250,7 +1250,8 @@ out:
return err;
}
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
@@ -1572,9 +1573,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
- sizeof(struct icmpmsg_mib),
- __alignof__(struct icmpmsg_mib)) < 0)
+ net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+ GFP_KERNEL);
+ if (!net->mib.icmpmsg_statistics)
goto err_icmpmsg_mib;
tcp_mib_init(net);
@@ -1598,7 +1599,7 @@ err_tcp_mib:
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
- snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+ kfree(net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
@@ -1671,6 +1672,8 @@ static int __init inet_init(void)
ip_static_sysctl_init();
#endif
+ tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
/*
* Add all the base protocols.
*/
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96a164aa136..59402be133f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -112,11 +112,6 @@
#include <net/arp.h>
#include <net/ax25.h>
#include <net/netrom.h>
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-#include <net/atmclip.h>
-struct neigh_table *clip_tbl_hook;
-EXPORT_SYMBOL(clip_tbl_hook);
-#endif
#include <asm/system.h>
#include <linux/uaccess.h>
@@ -126,7 +121,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
/*
* Interface to generic neighbour cache.
*/
-static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd);
+static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 *hash_rnd);
static int arp_constructor(struct neighbour *neigh);
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -164,7 +159,6 @@ static const struct neigh_ops arp_broken_ops = {
struct neigh_table arp_tbl = {
.family = AF_INET,
- .entry_size = sizeof(struct neighbour) + 4,
.key_len = 4,
.hash = arp_hash,
.constructor = arp_constructor,
@@ -177,7 +171,7 @@ struct neigh_table arp_tbl = {
.gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 3,
.mcast_probes = 3,
.anycast_delay = 1 * HZ,
@@ -221,9 +215,9 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
static u32 arp_hash(const void *pkey,
const struct net_device *dev,
- __u32 hash_rnd)
+ __u32 *hash_rnd)
{
- return arp_hashfn(*(u32 *)pkey, dev, hash_rnd);
+ return arp_hashfn(*(u32 *)pkey, dev, *hash_rnd);
}
static int arp_constructor(struct neighbour *neigh)
@@ -283,9 +277,9 @@ static int arp_constructor(struct neighbour *neigh)
default:
break;
case ARPHRD_ROSE:
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
case ARPHRD_AX25:
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
case ARPHRD_NETROM:
#endif
neigh->ops = &arp_broken_ops;
@@ -592,16 +586,18 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct sk_buff *skb;
struct arphdr *arp;
unsigned char *arp_ptr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
/*
* Allocate a buffer
*/
- skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
if (skb == NULL)
return NULL;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
skb->dev = dev;
@@ -633,13 +629,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp->ar_pro = htons(ETH_P_IP);
break;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
case ARPHRD_AX25:
arp->ar_hrd = htons(ARPHRD_AX25);
arp->ar_pro = htons(AX25_P_IP);
break;
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
case ARPHRD_NETROM:
arp->ar_hrd = htons(ARPHRD_NETROM);
arp->ar_pro = htons(AX25_P_IP);
@@ -647,13 +643,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
#endif
#endif
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
case ARPHRD_FDDI:
arp->ar_hrd = htons(ARPHRD_ETHER);
arp->ar_pro = htons(ETH_P_IP);
break;
#endif
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#if IS_ENABLED(CONFIG_TR)
case ARPHRD_IEEE802_TR:
arp->ar_hrd = htons(ARPHRD_IEEE802);
arp->ar_pro = htons(ETH_P_IP);
@@ -1040,7 +1036,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
return -EINVAL;
}
switch (dev->type) {
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
case ARPHRD_FDDI:
/*
* According to RFC 1390, FDDI devices should accept ARP
@@ -1286,7 +1282,7 @@ void __init arp_init(void)
}
#ifdef CONFIG_PROC_FS
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
/* ------------------------------------------------------------------------ */
/*
@@ -1334,7 +1330,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
read_lock(&n->lock);
/* Convert hardware address to XX:XX:XX:XX ... form. */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
ax2asc2((ax25_address *)n->ha, hbuffer);
else {
@@ -1347,7 +1343,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
if (k != 0)
--k;
hbuffer[k] = 0;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c6b5092f29a..65f01dc4756 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ int new_value = *(int *)ctl->data;
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if ((new_value == 0) && (old_value != 0))
+ rt_cache_flush(net, 0);
}
return ret;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 46339ba7a2d..799fc790b3c 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -67,6 +67,7 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
return err;
}
+EXPORT_SYMBOL_GPL(fib_lookup);
static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37b671185c8..d04b13ae18f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1607,6 +1607,7 @@ found:
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(fib_table_lookup);
/*
* Remove the leaf and return parent.
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c7472eff2d5..5104bc0bbdb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -304,9 +304,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
struct flowi4 fl4;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
while (1) {
- skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+ skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
if (skb)
break;
@@ -327,7 +329,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
@@ -647,6 +649,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
__be32 group = pmc ? pmc->multiaddr : 0;
struct flowi4 fl4;
__be32 dst;
+ int hlen, tlen;
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
@@ -661,7 +664,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
if (IS_ERR(rt))
return -1;
- skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
@@ -669,7 +674,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
skb_dst_set(skb, &rt->dst);
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
@@ -875,6 +880,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
* to be intended in a v3 query.
*/
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
+ if (!max_delay)
+ max_delay = 1; /* can't mod w/ 0 */
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
@@ -1574,7 +1581,7 @@ out_unlock:
* Add multicast single-source filter to the interface list
*/
static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
- __be32 *psfsrc, int delta)
+ __be32 *psfsrc)
{
struct ip_sf_list *psf, *psf_prev;
@@ -1709,14 +1716,15 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfcount[sfmode]++;
err = 0;
for (i=0; i<sfcount; i++) {
- err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+ err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
}
if (err) {
int j;
- pmc->sfcount[sfmode]--;
+ if (!delta)
+ pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c14d88ad348..2e4e24476c4 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -418,7 +418,7 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
#define AF_INET_FAMILY(fam) 1
@@ -588,10 +588,19 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
-struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
- const gfp_t priority)
+/**
+ * inet_csk_clone_lock - clone an inet socket, and lock its clone
+ * @sk: the socket to clone
+ * @req: request_sock
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority)
{
- struct sock *newsk = sk_clone(sk, priority);
+ struct sock *newsk = sk_clone_lock(sk, priority);
if (newsk != NULL) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -615,7 +624,7 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
}
return newsk;
}
-EXPORT_SYMBOL_GPL(inet_csk_clone);
+EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
/*
* At this point, there should be no process reference to this
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 68e8ac51438..2240a8e8c44 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -33,6 +33,7 @@
#include <linux/stddef.h>
#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
static const struct inet_diag_handler **inet_diag_table;
@@ -45,24 +46,22 @@ struct inet_diag_entry {
u16 userlocks;
};
-static struct sock *idiagnl;
-
#define INET_DIAG_PUT(skb, attrtype, attrlen) \
RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
static DEFINE_MUTEX(inet_diag_table_mutex);
-static const struct inet_diag_handler *inet_diag_lock_handler(int type)
+static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
{
- if (!inet_diag_table[type])
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_INET_DIAG, type);
+ if (!inet_diag_table[proto])
+ request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, AF_INET, proto);
mutex_lock(&inet_diag_table_mutex);
- if (!inet_diag_table[type])
+ if (!inet_diag_table[proto])
return ERR_PTR(-ENOENT);
- return inet_diag_table[type];
+ return inet_diag_table[proto];
}
static inline void inet_diag_unlock_handler(
@@ -71,21 +70,21 @@ static inline void inet_diag_unlock_handler(
mutex_unlock(&inet_diag_table_mutex);
}
-static int inet_csk_diag_fill(struct sock *sk,
- struct sk_buff *skb,
- int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+ struct sk_buff *skb, struct inet_diag_req *req,
+ u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
void *info = NULL;
struct inet_diag_meminfo *minfo = NULL;
unsigned char *b = skb_tail_pointer(skb);
const struct inet_diag_handler *handler;
+ int ext = req->idiag_ext;
- handler = inet_diag_table[unlh->nlmsg_type];
+ handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(handler == NULL);
nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
@@ -97,47 +96,55 @@ static int inet_csk_diag_fill(struct sock *sk,
if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
- if (ext & (1 << (INET_DIAG_INFO - 1)))
- info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
- handler->idiag_info_size);
-
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
- const size_t len = strlen(icsk->icsk_ca_ops->name);
-
- strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
- icsk->icsk_ca_ops->name);
- }
-
- if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
- RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
r->id.idiag_if = sk->sk_bound_dev_if;
- r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+ sock_diag_save_cookie(sk, r->id.idiag_cookie);
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+ * hence this needs to be included regardless of socket family.
+ */
+ if (ext & (1 << (INET_DIAG_TOS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
+#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &np->rcv_saddr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &np->daddr);
+ *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = np->daddr;
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
}
#endif
+ r->idiag_uid = sock_i_uid(sk);
+ r->idiag_inode = sock_i_ino(sk);
+
+ if (minfo) {
+ minfo->idiag_rmem = sk_rmem_alloc_get(sk);
+ minfo->idiag_wmem = sk->sk_wmem_queued;
+ minfo->idiag_fmem = sk->sk_forward_alloc;
+ minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+ }
+
+ if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
+ if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
+ goto rtattr_failure;
+
+ if (icsk == NULL) {
+ r->idiag_rqueue = r->idiag_wqueue = 0;
+ goto out;
+ }
+
#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
@@ -158,14 +165,14 @@ static int inet_csk_diag_fill(struct sock *sk,
}
#undef EXPIRES_IN_MS
- r->idiag_uid = sock_i_uid(sk);
- r->idiag_inode = sock_i_ino(sk);
+ if (ext & (1 << (INET_DIAG_INFO - 1)))
+ info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
- if (minfo) {
- minfo->idiag_rmem = sk_rmem_alloc_get(sk);
- minfo->idiag_wmem = sk->sk_wmem_queued;
- minfo->idiag_fmem = sk->sk_forward_alloc;
- minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+ if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+ const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+ strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+ icsk->icsk_ca_ops->name);
}
handler->idiag_get_info(sk, r, info);
@@ -174,6 +181,7 @@ static int inet_csk_diag_fill(struct sock *sk,
icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
icsk->icsk_ca_ops->get_info(sk, ext, skb);
+out:
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
@@ -182,10 +190,20 @@ nlmsg_failure:
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
+EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
+
+static int inet_csk_diag_fill(struct sock *sk,
+ struct sk_buff *skb, struct inet_diag_req *req,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
+{
+ return inet_sk_diag_fill(sk, inet_csk(sk),
+ skb, req, pid, seq, nlmsg_flags, unlh);
+}
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
- struct sk_buff *skb, int ext, u32 pid,
- u32 seq, u16 nlmsg_flags,
+ struct sk_buff *skb, struct inet_diag_req *req,
+ u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
long tmo;
@@ -206,8 +224,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
r->id.idiag_if = tw->tw_bound_dev_if;
- r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+ sock_diag_save_cookie(tw, r->id.idiag_cookie);
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
r->id.idiag_src[0] = tw->tw_rcv_saddr;
@@ -219,15 +236,13 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
const struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &tw6->tw_v6_rcv_saddr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &tw6->tw_v6_daddr);
+ *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
@@ -238,42 +253,31 @@ nlmsg_failure:
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
- int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+ struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
- skb, ext, pid, seq, nlmsg_flags,
+ skb, r, pid, seq, nlmsg_flags,
unlh);
- return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+ return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
}
-static int inet_diag_get_exact(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh)
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh, struct inet_diag_req *req)
{
int err;
struct sock *sk;
- struct inet_diag_req *req = NLMSG_DATA(nlh);
struct sk_buff *rep;
- struct inet_hashinfo *hashinfo;
- const struct inet_diag_handler *handler;
-
- handler = inet_diag_lock_handler(nlh->nlmsg_type);
- if (IS_ERR(handler)) {
- err = PTR_ERR(handler);
- goto unlock;
- }
- hashinfo = handler->idiag_hashinfo;
err = -EINVAL;
-
- if (req->idiag_family == AF_INET) {
+ if (req->sdiag_family == AF_INET) {
sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
}
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- else if (req->idiag_family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (req->sdiag_family == AF_INET6) {
sk = inet6_lookup(&init_net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
@@ -283,29 +287,26 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
}
#endif
else {
- goto unlock;
+ goto out_nosk;
}
err = -ENOENT;
if (sk == NULL)
- goto unlock;
+ goto out_nosk;
- err = -ESTALE;
- if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
- req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
- ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
- (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+ err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+ if (err)
goto out;
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) +
- handler->idiag_info_size + 64)),
+ sizeof(struct tcp_info) + 64)),
GFP_KERNEL);
if (!rep)
goto out;
- err = sk_diag_fill(sk, rep, req->idiag_ext,
+ err = sk_diag_fill(sk, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
@@ -313,7 +314,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -325,8 +326,25 @@ out:
else
sock_put(sk);
}
-unlock:
+out_nosk:
+ return err;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ struct inet_diag_req *req)
+{
+ const struct inet_diag_handler *handler;
+ int err;
+
+ handler = inet_diag_lock_handler(req->sdiag_protocol);
+ if (IS_ERR(handler))
+ err = PTR_ERR(handler);
+ else
+ err = handler->dump_one(in_skb, nlh, req);
inet_diag_unlock_handler(handler);
+
return err;
}
@@ -357,9 +375,12 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
}
-static int inet_diag_bc_run(const void *bc, int len,
- const struct inet_diag_entry *entry)
+static int inet_diag_bc_run(const struct nlattr *_bc,
+ const struct inet_diag_entry *entry)
{
+ const void *bc = nla_data(_bc);
+ int len = nla_len(_bc);
+
while (len > 0) {
int yes = 1;
const struct inet_diag_bc_op *op = bc;
@@ -433,6 +454,35 @@ static int inet_diag_bc_run(const void *bc, int len,
return len == 0;
}
+int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+{
+ struct inet_diag_entry entry;
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (bc == NULL)
+ return 1;
+
+ entry.family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (entry.family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ entry.saddr = np->rcv_saddr.s6_addr32;
+ entry.daddr = np->daddr.s6_addr32;
+ } else
+#endif
+ {
+ entry.saddr = &inet->inet_rcv_saddr;
+ entry.daddr = &inet->inet_daddr;
+ }
+ entry.sport = inet->inet_num;
+ entry.dport = ntohs(inet->inet_dport);
+ entry.userlocks = sk->sk_userlocks;
+
+ return inet_diag_bc_run(bc, &entry);
+}
+EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
+
static int valid_cc(const void *bc, int len, int cc)
{
while (len >= 0) {
@@ -489,57 +539,29 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ struct inet_diag_req *r,
+ const struct nlattr *bc)
{
- struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
- struct inet_diag_entry entry;
- const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
- sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
- struct inet_sock *inet = inet_sk(sk);
-
- entry.family = sk->sk_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- if (entry.family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- entry.saddr = np->rcv_saddr.s6_addr32;
- entry.daddr = np->daddr.s6_addr32;
- } else
-#endif
- {
- entry.saddr = &inet->inet_rcv_saddr;
- entry.daddr = &inet->inet_daddr;
- }
- entry.sport = inet->inet_num;
- entry.dport = ntohs(inet->inet_dport);
- entry.userlocks = sk->sk_userlocks;
+ if (!inet_diag_bc_sk(bc, sk))
+ return 0;
- if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
- return 0;
- }
-
- return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+ return inet_csk_diag_fill(sk, skb, r,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ struct inet_diag_req *r,
+ const struct nlattr *bc)
{
- struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+ if (bc != NULL) {
struct inet_diag_entry entry;
- const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
- sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
entry.family = tw->tw_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == AF_INET6) {
struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
@@ -555,11 +577,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
- if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+ if (!inet_diag_bc_run(bc, &entry))
return 0;
}
- return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+ return inet_twsk_diag_fill(tw, skb, r,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
@@ -585,8 +607,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_retrans = req->retrans;
r->id.idiag_if = sk->sk_bound_dev_if;
- r->id.idiag_cookie[0] = (u32)(unsigned long)req;
- r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
+ sock_diag_save_cookie(req, r->id.idiag_cookie);
tmo = req->expires - jiffies;
if (tmo < 0)
@@ -601,12 +622,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_wqueue = 0;
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &inet6_rsk(req)->loc_addr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &inet6_rsk(req)->rmt_addr);
+ *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
+ *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -619,13 +638,13 @@ nlmsg_failure:
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ struct inet_diag_req *r,
+ const struct nlattr *bc)
{
struct inet_diag_entry entry;
- struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
- const struct nlattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
int reqnum, s_reqnum;
@@ -645,9 +664,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
if (!lopt || !lopt->qlen)
goto out;
- if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
- bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
- INET_DIAG_REQ_BYTECODE);
+ if (bc != NULL) {
entry.sport = inet->inet_num;
entry.userlocks = sk->sk_userlocks;
}
@@ -667,21 +684,20 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
if (bc) {
entry.saddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
(entry.family == AF_INET6) ?
inet6_rsk(req)->loc_addr.s6_addr32 :
#endif
&ireq->loc_addr;
entry.daddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
(entry.family == AF_INET6) ?
inet6_rsk(req)->rmt_addr.s6_addr32 :
#endif
&ireq->rmt_addr;
entry.dport = ntohs(ireq->rmt_port);
- if (!inet_diag_bc_run(nla_data(bc),
- nla_len(bc), &entry))
+ if (!inet_diag_bc_run(bc, &entry))
continue;
}
@@ -704,19 +720,11 @@ out:
return err;
}
-static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
+ struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
{
int i, num;
int s_i, s_num;
- struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
- const struct inet_diag_handler *handler;
- struct inet_hashinfo *hashinfo;
-
- handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
- if (IS_ERR(handler))
- goto unlock;
-
- hashinfo = handler->idiag_hashinfo;
s_i = cb->args[1];
s_num = num = cb->args[2];
@@ -741,6 +749,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
}
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_listen;
+
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_listen;
@@ -750,7 +762,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[3] > 0)
goto syn_recv;
- if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -759,7 +771,7 @@ syn_recv:
if (!(r->idiag_states & TCPF_SYN_RECV))
goto next_listen;
- if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
+ if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -781,7 +793,7 @@ skip_listen_ht:
}
if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
- goto unlock;
+ goto out;
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
@@ -806,13 +818,16 @@ skip_listen_ht:
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next_normal;
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_normal;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_normal;
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next_normal;
- if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(lock);
goto done;
}
@@ -828,13 +843,16 @@ next_normal:
if (num < s_num)
goto next_dying;
+ if (r->sdiag_family != AF_UNSPEC &&
+ tw->tw_family != r->sdiag_family)
+ goto next_dying;
if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport)
goto next_dying;
if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport)
goto next_dying;
- if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
+ if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
spin_unlock_bh(lock);
goto done;
}
@@ -848,15 +866,85 @@ next_dying:
done:
cb->args[1] = i;
cb->args[2] = num;
-unlock:
+out:
+ ;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
+
+static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ const struct inet_diag_handler *handler;
+
+ handler = inet_diag_lock_handler(r->sdiag_protocol);
+ if (!IS_ERR(handler))
+ handler->dump(skb, cb, r, bc);
inet_diag_unlock_handler(handler);
+
return skb->len;
}
-static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req);
+ if (nlmsg_attrlen(cb->nlh, hdrlen))
+ bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+ return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
+}
+
+static inline int inet_diag_type2proto(int type)
+{
+ switch (type) {
+ case TCPDIAG_GETSOCK:
+ return IPPROTO_TCP;
+ case DCCPDIAG_GETSOCK:
+ return IPPROTO_DCCP;
+ default:
+ return 0;
+ }
+}
+
+static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
+ struct inet_diag_req req;
+ struct nlattr *bc = NULL;
+ int hdrlen = sizeof(struct inet_diag_req_compat);
+
+ req.sdiag_family = AF_UNSPEC; /* compatibility */
+ req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+ req.idiag_ext = rc->idiag_ext;
+ req.idiag_states = rc->idiag_states;
+ req.id = rc->id;
+
+ if (nlmsg_attrlen(cb->nlh, hdrlen))
+ bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+ return __inet_diag_dump(skb, cb, &req, bc);
+}
+
+static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh)
+{
+ struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
+ struct inet_diag_req req;
+
+ req.sdiag_family = rc->idiag_family;
+ req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+ req.idiag_ext = rc->idiag_ext;
+ req.idiag_states = rc->idiag_states;
+ req.id = rc->id;
+
+ return inet_diag_get_exact(in_skb, nlh, &req);
+}
+
+static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ int hdrlen = sizeof(struct inet_diag_req_compat);
+
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
@@ -873,28 +961,54 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
}
- return netlink_dump_start(idiagnl, skb, nlh,
- inet_diag_dump, NULL, 0);
+ return netlink_dump_start(sock_diag_nlsk, skb, nlh,
+ inet_diag_dump_compat, NULL, 0);
}
- return inet_diag_get_exact(skb, nlh);
+ return inet_diag_get_exact_compat(skb, nlh);
}
-static DEFINE_MUTEX(inet_diag_mutex);
-
-static void inet_diag_rcv(struct sk_buff *skb)
+static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
- mutex_lock(&inet_diag_mutex);
- netlink_rcv_skb(skb, &inet_diag_rcv_msg);
- mutex_unlock(&inet_diag_mutex);
+ int hdrlen = sizeof(struct inet_diag_req);
+
+ if (nlmsg_len(h) < hdrlen)
+ return -EINVAL;
+
+ if (h->nlmsg_flags & NLM_F_DUMP) {
+ if (nlmsg_attrlen(h, hdrlen)) {
+ struct nlattr *attr;
+ attr = nlmsg_find_attr(h, hdrlen,
+ INET_DIAG_REQ_BYTECODE);
+ if (attr == NULL ||
+ nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+ inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+ return -EINVAL;
+ }
+
+ return netlink_dump_start(sock_diag_nlsk, skb, h,
+ inet_diag_dump, NULL, 0);
+ }
+
+ return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
}
+static struct sock_diag_handler inet_diag_handler = {
+ .family = AF_INET,
+ .dump = inet_diag_handler_dump,
+};
+
+static struct sock_diag_handler inet6_diag_handler = {
+ .family = AF_INET6,
+ .dump = inet_diag_handler_dump,
+};
+
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
int err = -EINVAL;
- if (type >= INET_DIAG_GETSOCK_MAX)
+ if (type >= IPPROTO_MAX)
goto out;
mutex_lock(&inet_diag_table_mutex);
@@ -913,7 +1027,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
- if (type >= INET_DIAG_GETSOCK_MAX)
+ if (type >= IPPROTO_MAX)
return;
mutex_lock(&inet_diag_table_mutex);
@@ -924,7 +1038,7 @@ EXPORT_SYMBOL_GPL(inet_diag_unregister);
static int __init inet_diag_init(void)
{
- const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
+ const int inet_diag_table_size = (IPPROTO_MAX *
sizeof(struct inet_diag_handler *));
int err = -ENOMEM;
@@ -932,25 +1046,35 @@ static int __init inet_diag_init(void)
if (!inet_diag_table)
goto out;
- idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
- inet_diag_rcv, NULL, THIS_MODULE);
- if (idiagnl == NULL)
- goto out_free_table;
- err = 0;
+ err = sock_diag_register(&inet_diag_handler);
+ if (err)
+ goto out_free_nl;
+
+ err = sock_diag_register(&inet6_diag_handler);
+ if (err)
+ goto out_free_inet;
+
+ sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
out:
return err;
-out_free_table:
+
+out_free_inet:
+ sock_diag_unregister(&inet_diag_handler);
+out_free_nl:
kfree(inet_diag_table);
goto out;
}
static void __exit inet_diag_exit(void)
{
- netlink_kernel_release(idiagnl);
+ sock_diag_unregister(&inet6_diag_handler);
+ sock_diag_unregister(&inet_diag_handler);
+ sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
kfree(inet_diag_table);
}
module_init(inet_diag_init);
module_exit(inet_diag_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3b34d1c8627..29a07b6c716 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
rt = skb_rtable(skb);
- if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+ if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
goto sr_failed;
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fdaabf2f2b6..1f23a57aa9e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -392,7 +392,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* Is this the final fragment? */
if ((flags & IP_MF) == 0) {
/* If we already have some bits beyond end
- * or have different end, the segment is corrrupted.
+ * or have different end, the segment is corrupted.
*/
if (end < qp->q.len ||
((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d55110e9312..2b53a1f7abf 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -46,7 +46,7 @@
#include <net/rtnetlink.h>
#include <net/gre.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -171,7 +171,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
{
@@ -729,9 +729,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
+ struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
const struct in6_addr *addr6;
int addr_type;
@@ -799,7 +799,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
goto tx_error;
}
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
@@ -835,6 +835,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (max_headroom > dev->needed_headroom)
+ dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
@@ -873,7 +875,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
#endif
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 05d20cca9d6..1e60f767907 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
) {
if (srrptr + 3 > srrspace)
break;
- if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+ if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
break;
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+ ip_hdr(skb)->daddr = opt->nexthop;
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
}
if (srrptr <= srrspace) {
opt->srr_is_hit = 1;
- iph->daddr = nexthop;
+ opt->nexthop = nexthop;
opt->is_changed = 1;
}
return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0bc95f3977d..ff302bde889 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -206,7 +206,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
}
rcu_read_lock();
- neigh = dst_get_neighbour(dst);
+ neigh = dst_get_neighbour_noref(dst);
if (neigh) {
int res = neigh_output(neigh, skb);
@@ -319,6 +319,20 @@ int ip_output(struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+/*
+ * copy saddr and daddr, possibly using 64bit load/stores
+ * Equivalent to :
+ * iph->saddr = fl4->saddr;
+ * iph->daddr = fl4->daddr;
+ */
+static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
+{
+ BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
+ offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
+ memcpy(&iph->saddr, &fl4->saddr,
+ sizeof(fl4->saddr) + sizeof(fl4->daddr));
+}
+
int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
{
struct sock *sk = skb->sk;
@@ -381,8 +395,8 @@ packet_routed:
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->protocol = sk->sk_protocol;
- iph->saddr = fl4->saddr;
- iph->daddr = fl4->daddr;
+ ip_copy_addrs(iph, fl4);
+
/* Transport layer set skb->h.foo itself. */
if (inet_opt && inet_opt->opt.optlen) {
@@ -1337,8 +1351,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
ip_select_ident(iph, &rt->dst, sk);
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
- iph->saddr = fl4->saddr;
- iph->daddr = fl4->daddr;
+ ip_copy_addrs(iph, fl4);
if (opt) {
iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 09ff51bf16a..8aa87c19fa0 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -37,7 +37,7 @@
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
#endif
@@ -55,20 +55,13 @@
/*
* SOL_IP control messages.
*/
+#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
- struct in_pktinfo info;
- struct rtable *rt = skb_rtable(skb);
+ struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
- if (rt) {
- info.ipi_ifindex = rt->rt_iif;
- info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
- } else {
- info.ipi_ifindex = 0;
- info.ipi_spec_dst.s_addr = 0;
- }
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
@@ -515,7 +508,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
sock_owned_by_user(sk));
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE)) &&
@@ -526,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
if (opt)
icsk->icsk_ext_hdr_len += opt->opt.optlen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
}
#endif
}
@@ -992,20 +985,28 @@ e_inval:
}
/**
- * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * ipv4_pktinfo_prepare - transfert some info from rtable to skb
* @sk: socket
* @skb: buffer
*
- * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
- * is not set, we drop skb dst entry now, while dst cache line is hot.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
+ * in skb->cb[] before dst drop.
+ * This way, receiver doesnt make cache line misses to read rtable.
*/
-int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(struct sk_buff *skb)
{
- if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
- skb_dst_drop(skb);
- return sock_queue_rcv_skb(sk, skb);
+ struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt) {
+ pktinfo->ipi_ifindex = rt->rt_iif;
+ pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+ } else {
+ pktinfo->ipi_ifindex = 0;
+ pktinfo->ipi_spec_dst.s_addr = 0;
+ }
+ skb_dst_drop(skb);
}
-EXPORT_SYMBOL(ip_queue_rcv_skb);
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0da2afc97f3..7e4ec9fc2ce 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
}
}
+ /* no point in waiting if we could not bring up at least one device */
+ if (!ic_first_dev)
+ goto have_carrier;
+
/* wait for a carrier on at least one device */
start = jiffies;
while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
@@ -763,13 +767,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
struct sk_buff *skb;
struct bootp_pkt *b;
struct iphdr *h;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
/* Allocate packet */
- skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+ skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
GFP_KERNEL);
if (!skb)
return;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
memset(b, 0, sizeof(struct bootp_pkt));
@@ -822,8 +828,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
if (dev_hard_header(skb, dev, ntohs(skb->protocol),
- dev->broadcast, dev->dev_addr, skb->len) < 0 ||
- dev_queue_xmit(skb) < 0)
+ dev->broadcast, dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ printk("E");
+ return;
+ }
+
+ if (dev_queue_xmit(skb) < 0)
printk("E");
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd8349..413ed1ba7a5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -148,7 +148,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip_get_stats(struct net_device *dev)
{
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip_tunnel_link(ipn, nt);
return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
+ struct ip_tunnel *t;
int err;
ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(ipn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 76a7f07b38b..8e54490ee3f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1520,7 +1520,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
struct mr_table *mrt;
struct vif_device *v;
int ct;
- LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
@@ -1529,10 +1528,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
v = &mrt->vif_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
- vif_delete(mrt, ct, 1, &list);
+ vif_delete(mrt, ct, 1, NULL);
}
}
- unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 9899619ab9b..4f47e064e26 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
- pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+ pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+ 0, GFP_ATOMIC))
return -1;
return 0;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 1dfc18a03fd..74dfc9e5211 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -27,7 +27,7 @@ config NF_CONNTRACK_IPV4
config NF_CONNTRACK_PROC_COMPAT
bool "proc/sysctl compatibility with old connection tracking"
- depends on NF_CONNTRACK_IPV4
+ depends on NF_CONNTRACK_PROCFS && NF_CONNTRACK_IPV4
default y
help
This option enables /proc and sysctl compatibility with the old
@@ -76,11 +76,21 @@ config IP_NF_MATCH_AH
config IP_NF_MATCH_ECN
tristate '"ecn" match support'
depends on NETFILTER_ADVANCED
- help
- This option adds a `ECN' match, which allows you to match against
- the IPv4 and TCP header ECN fields.
+ select NETFILTER_XT_MATCH_ECN
+ ---help---
+ This is a backwards-compat option for the user's convenience
+ (e.g. when running oldconfig). It selects
+ CONFIG_NETFILTER_XT_MATCH_ECN.
+
+config IP_NF_MATCH_RPFILTER
+ tristate '"rpfilter" reverse path filter match support'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option allows you to match packets whose replies would
+ go out via the interface the packet came in.
To compile it as a module, choose M here. If unsure, say N.
+ The module will be called ipt_rpfilter.
config IP_NF_MATCH_TTL
tristate '"ttl" match support'
@@ -325,7 +335,6 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index dca2082ec68..213a462b739 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
# matches
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
-obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
# targets
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index e59aabd0eae..a057fe64deb 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -404,6 +404,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
int status, type, pid, flags;
unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
+ bool enable_timestamp = false;
skblen = skb->len;
if (skblen < sizeof(*nlh))
@@ -441,12 +442,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
RCV_SKB_FAIL(-EBUSY);
}
} else {
- net_enable_timestamp();
+ enable_timestamp = true;
peer_pid = pid;
}
spin_unlock_bh(&queue_lock);
-
+ if (enable_timestamp)
+ net_enable_timestamp();
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
if (status < 0)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 9931152a78b..2f210c79dc8 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -30,9 +30,9 @@ MODULE_DESCRIPTION("Xtables: automatic-address SNAT");
/* FIXME: Multiple targets. --RR */
static int masquerade_tg_check(const struct xt_tgchk_param *par)
{
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
- if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+ if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
@@ -49,8 +49,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct nf_conn *ct;
struct nf_conn_nat *nat;
enum ip_conntrack_info ctinfo;
- struct nf_nat_range newrange;
- const struct nf_nat_multi_range_compat *mr;
+ struct nf_nat_ipv4_range newrange;
+ const struct nf_nat_ipv4_multi_range_compat *mr;
const struct rtable *rt;
__be32 newsrc;
@@ -79,13 +79,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
nat->masq_index = par->out->ifindex;
/* Transfer from original range. */
- newrange = ((struct nf_nat_range)
- { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+ newrange = ((struct nf_nat_ipv4_range)
+ { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
newsrc, newsrc,
mr->range[0].min, mr->range[0].max });
/* Hand modified range to generic setup. */
- return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_SRC);
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
}
static int
@@ -139,7 +139,7 @@ static struct xt_target masquerade_tg_reg __read_mostly = {
.name = "MASQUERADE",
.family = NFPROTO_IPV4,
.target = masquerade_tg,
- .targetsize = sizeof(struct nf_nat_multi_range_compat),
+ .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = 1 << NF_INET_POST_ROUTING,
.checkentry = masquerade_tg_check,
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 6cdb298f103..b5bfbbabf70 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -24,9 +24,9 @@ MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
static int netmap_tg_check(const struct xt_tgchk_param *par)
{
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
- if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) {
+ if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
@@ -43,8 +43,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
__be32 new_ip, netmask;
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
- struct nf_nat_range newrange;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+ struct nf_nat_ipv4_range newrange;
NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_POST_ROUTING ||
@@ -61,8 +61,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
new_ip = ip_hdr(skb)->saddr & ~netmask;
new_ip |= mr->range[0].min_ip & netmask;
- newrange = ((struct nf_nat_range)
- { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+ newrange = ((struct nf_nat_ipv4_range)
+ { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
new_ip, new_ip,
mr->range[0].min, mr->range[0].max });
@@ -74,7 +74,7 @@ static struct xt_target netmap_tg_reg __read_mostly = {
.name = "NETMAP",
.family = NFPROTO_IPV4,
.target = netmap_tg,
- .targetsize = sizeof(struct nf_nat_multi_range_compat),
+ .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING) |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 18a0656505a..7c0103a5203 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -28,9 +28,9 @@ MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
/* FIXME: Take multiple ranges --RR */
static int redirect_tg_check(const struct xt_tgchk_param *par)
{
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
- if (mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) {
+ if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
pr_debug("bad MAP_IPS.\n");
return -EINVAL;
}
@@ -47,8 +47,8 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
__be32 newdst;
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
- struct nf_nat_range newrange;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+ struct nf_nat_ipv4_range newrange;
NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT);
@@ -76,20 +76,20 @@ redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
}
/* Transfer from original range. */
- newrange = ((struct nf_nat_range)
- { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS,
+ newrange = ((struct nf_nat_ipv4_range)
+ { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
newdst, newdst,
mr->range[0].min, mr->range[0].max });
/* Hand modified range to generic setup. */
- return nf_nat_setup_info(ct, &newrange, IP_NAT_MANIP_DST);
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
}
static struct xt_target redirect_tg_reg __read_mostly = {
.name = "REDIRECT",
.family = NFPROTO_IPV4,
.target = redirect_tg,
- .targetsize = sizeof(struct nf_nat_multi_range_compat),
+ .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
.checkentry = redirect_tg_check,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index b5508151e54..ba5756d2016 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -65,7 +65,7 @@ static unsigned int flushtimeout = 10;
module_param(flushtimeout, uint, 0600);
MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
-static int nflog = 1;
+static bool nflog = true;
module_param(nflog, bool, 0400);
MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
deleted file mode 100644
index 2b57e52c746..00000000000
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/* IP tables module for matching the value of the IPv4 and TCP ECN bits
- *
- * (C) 2002 by Harald Welte <laforge@gnumonks.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_ecn.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match for IPv4");
-MODULE_LICENSE("GPL");
-
-static inline bool match_ip(const struct sk_buff *skb,
- const struct ipt_ecn_info *einfo)
-{
- return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
- !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
-}
-
-static inline bool match_tcp(const struct sk_buff *skb,
- const struct ipt_ecn_info *einfo,
- bool *hotdrop)
-{
- struct tcphdr _tcph;
- const struct tcphdr *th;
-
- /* In practice, TCP match does this, so can't fail. But let's
- * be good citizens.
- */
- th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
- if (th == NULL) {
- *hotdrop = false;
- return false;
- }
-
- if (einfo->operation & IPT_ECN_OP_MATCH_ECE) {
- if (einfo->invert & IPT_ECN_OP_MATCH_ECE) {
- if (th->ece == 1)
- return false;
- } else {
- if (th->ece == 0)
- return false;
- }
- }
-
- if (einfo->operation & IPT_ECN_OP_MATCH_CWR) {
- if (einfo->invert & IPT_ECN_OP_MATCH_CWR) {
- if (th->cwr == 1)
- return false;
- } else {
- if (th->cwr == 0)
- return false;
- }
- }
-
- return true;
-}
-
-static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
-{
- const struct ipt_ecn_info *info = par->matchinfo;
-
- if (info->operation & IPT_ECN_OP_MATCH_IP)
- if (!match_ip(skb, info))
- return false;
-
- if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
- if (!match_tcp(skb, info, &par->hotdrop))
- return false;
- }
-
- return true;
-}
-
-static int ecn_mt_check(const struct xt_mtchk_param *par)
-{
- const struct ipt_ecn_info *info = par->matchinfo;
- const struct ipt_ip *ip = par->entryinfo;
-
- if (info->operation & IPT_ECN_OP_MATCH_MASK)
- return -EINVAL;
-
- if (info->invert & IPT_ECN_OP_MATCH_MASK)
- return -EINVAL;
-
- if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
- (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
- pr_info("cannot match TCP bits in rule for non-tcp packets\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct xt_match ecn_mt_reg __read_mostly = {
- .name = "ecn",
- .family = NFPROTO_IPV4,
- .match = ecn_mt,
- .matchsize = sizeof(struct ipt_ecn_info),
- .checkentry = ecn_mt_check,
- .me = THIS_MODULE,
-};
-
-static int __init ecn_mt_init(void)
-{
- return xt_register_match(&ecn_mt_reg);
-}
-
-static void __exit ecn_mt_exit(void)
-{
- xt_unregister_match(&ecn_mt_reg);
-}
-
-module_init(ecn_mt_init);
-module_exit(ecn_mt_exit);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
new file mode 100644
index 00000000000..31371be8174
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match");
+
+/* don't try to find route from mcast/bcast/zeronet */
+static __be32 rpfilter_get_saddr(__be32 addr)
+{
+ if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
+ ipv4_is_zeronet(addr))
+ return 0;
+ return addr;
+}
+
+static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
+ const struct net_device *dev, u8 flags)
+{
+ struct fib_result res;
+ bool dev_match;
+ struct net *net = dev_net(dev);
+ int ret __maybe_unused;
+
+ if (fib_lookup(net, fl4, &res))
+ return false;
+
+ if (res.type != RTN_UNICAST) {
+ if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
+ return false;
+ }
+ dev_match = false;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+ struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+ if (nh->nh_dev == dev) {
+ dev_match = true;
+ break;
+ }
+ }
+#else
+ if (FIB_RES_DEV(res) == dev)
+ dev_match = true;
+#endif
+ if (dev_match || flags & XT_RPFILTER_LOOSE)
+ return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
+ return dev_match;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_rpfilter_info *info;
+ const struct iphdr *iph;
+ struct flowi4 flow;
+ bool invert;
+
+ info = par->matchinfo;
+ invert = info->flags & XT_RPFILTER_INVERT;
+
+ if (par->in->flags & IFF_LOOPBACK)
+ return true ^ invert;
+
+ iph = ip_hdr(skb);
+ if (ipv4_is_multicast(iph->daddr)) {
+ if (ipv4_is_zeronet(iph->saddr))
+ return ipv4_is_local_multicast(iph->daddr) ^ invert;
+ flow.flowi4_iif = 0;
+ } else {
+ flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+ }
+
+ flow.daddr = iph->saddr;
+ flow.saddr = rpfilter_get_saddr(iph->daddr);
+ flow.flowi4_oif = 0;
+ flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+ flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+
+ return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+ const struct xt_rpfilter_info *info = par->matchinfo;
+ unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+ if (info->flags & options) {
+ pr_info("unknown options encountered");
+ return -EINVAL;
+ }
+
+ if (strcmp(par->table, "mangle") != 0 &&
+ strcmp(par->table, "raw") != 0) {
+ pr_info("match only valid in the \'raw\' "
+ "or \'mangle\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+ .name = "rpfilter",
+ .family = NFPROTO_IPV4,
+ .checkentry = rpfilter_check,
+ .match = rpfilter_mt,
+ .matchsize = sizeof(struct xt_rpfilter_info),
+ .hooks = (1 << NF_INET_PRE_ROUTING),
+ .me = THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+ return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+ xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c37641e819f..0e58f09e59f 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -52,7 +52,7 @@ iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
module_param(forward, bool, 0000);
static int __net_init iptable_filter_net_init(struct net *net)
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 447bc5cfdc6..a708933dc23 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -30,7 +30,6 @@
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
static DEFINE_SPINLOCK(nf_nat_lock);
@@ -57,7 +56,7 @@ hash_by_src(const struct net *net, u16 zone,
/* Original src, to ensure we map it consistently if poss. */
hash = jhash_3words((__force u32)tuple->src.u3.ip,
(__force u32)tuple->src.u.all ^ zone,
- tuple->dst.protonum, 0);
+ tuple->dst.protonum, nf_conntrack_hash_rnd);
return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
}
@@ -82,14 +81,14 @@ EXPORT_SYMBOL(nf_nat_used_tuple);
* that meet the constraints of range. */
static int
in_range(const struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range)
+ const struct nf_nat_ipv4_range *range)
{
const struct nf_nat_protocol *proto;
int ret = 0;
/* If we are supposed to map IPs, then we must be in the
range specified, otherwise let this drag us onto a new src IP. */
- if (range->flags & IP_NAT_RANGE_MAP_IPS) {
+ if (range->flags & NF_NAT_RANGE_MAP_IPS) {
if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
return 0;
@@ -97,8 +96,8 @@ in_range(const struct nf_conntrack_tuple *tuple,
rcu_read_lock();
proto = __nf_nat_proto_find(tuple->dst.protonum);
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
- proto->in_range(tuple, IP_NAT_MANIP_SRC,
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
+ proto->in_range(tuple, NF_NAT_MANIP_SRC,
&range->min, &range->max))
ret = 1;
rcu_read_unlock();
@@ -123,7 +122,7 @@ static int
find_appropriate_src(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
- const struct nf_nat_range *range)
+ const struct nf_nat_ipv4_range *range)
{
unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn_nat *nat;
@@ -157,7 +156,7 @@ find_appropriate_src(struct net *net, u16 zone,
*/
static void
find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
@@ -166,10 +165,10 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
u_int32_t minip, maxip, j;
/* No IP mapping? Do nothing. */
- if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
+ if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return;
- if (maniptype == IP_NAT_MANIP_SRC)
+ if (maniptype == NF_NAT_MANIP_SRC)
var_ipp = &tuple->src.u3.ip;
else
var_ipp = &tuple->dst.u3.ip;
@@ -189,7 +188,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
minip = ntohl(range->min_ip);
maxip = ntohl(range->max_ip);
j = jhash_2words((__force u32)tuple->src.u3.ip,
- range->flags & IP_NAT_RANGE_PERSISTENT ?
+ range->flags & NF_NAT_RANGE_PERSISTENT ?
0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
j = ((u64)j * (maxip - minip + 1)) >> 32;
*var_ipp = htonl(minip + j);
@@ -204,7 +203,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig_tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
@@ -219,8 +218,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
This is only required for source (ie. NAT/masq) mappings.
So far, we don't do local source mappings, so multiple
manips not an issue. */
- if (maniptype == IP_NAT_MANIP_SRC &&
- !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
+ if (maniptype == NF_NAT_MANIP_SRC &&
+ !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
/* try the original tuple first */
if (in_range(orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -247,8 +246,8 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
/* Only bother mapping if it's not already in range and unique */
- if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
- if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) {
+ if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+ if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (proto->in_range(tuple, maniptype, &range->min,
&range->max) &&
(range->min.all == range->max.all ||
@@ -267,7 +266,7 @@ out:
unsigned int
nf_nat_setup_info(struct nf_conn *ct,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype)
{
struct net *net = nf_ct_net(ct);
@@ -284,8 +283,8 @@ nf_nat_setup_info(struct nf_conn *ct,
}
}
- NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
- maniptype == IP_NAT_MANIP_DST);
+ NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
+ maniptype == NF_NAT_MANIP_DST);
BUG_ON(nf_nat_initialized(ct, maniptype));
/* What we've got will look like inverse of reply. Normally
@@ -306,19 +305,19 @@ nf_nat_setup_info(struct nf_conn *ct,
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
- if (maniptype == IP_NAT_MANIP_SRC)
+ if (maniptype == NF_NAT_MANIP_SRC)
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
}
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct),
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
- /* nf_conntrack_alter_reply might re-allocate exntension aera */
+ /* nf_conntrack_alter_reply might re-allocate extension area */
nat = nfct_nat(ct);
nat->ct = ct;
hlist_add_head_rcu(&nat->bysource,
@@ -327,7 +326,7 @@ nf_nat_setup_info(struct nf_conn *ct,
}
/* It's done. */
- if (maniptype == IP_NAT_MANIP_DST)
+ if (maniptype == NF_NAT_MANIP_DST)
ct->status |= IPS_DST_NAT_DONE;
else
ct->status |= IPS_SRC_NAT_DONE;
@@ -361,7 +360,7 @@ manip_pkt(u_int16_t proto,
iph = (void *)skb->data + iphdroff;
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
iph->saddr = target->src.u3.ip;
} else {
@@ -381,7 +380,7 @@ unsigned int nf_nat_packet(struct nf_conn *ct,
unsigned long statusbit;
enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
- if (mtype == IP_NAT_MANIP_SRC)
+ if (mtype == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
@@ -414,8 +413,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
struct icmphdr icmp;
struct iphdr ip;
} *inside;
- const struct nf_conntrack_l4proto *l4proto;
- struct nf_conntrack_tuple inner, target;
+ struct nf_conntrack_tuple target;
int hdrlen = ip_hdrlen(skb);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned long statusbit;
@@ -447,7 +445,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
return 0;
}
- if (manip == IP_NAT_MANIP_SRC)
+ if (manip == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
@@ -463,16 +461,6 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
"dir %s\n", skb, manip,
dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
- /* rcu_read_lock()ed by nf_hook_slow */
- l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
-
- if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
- (hdrlen +
- sizeof(struct icmphdr) + inside->ip.ihl * 4),
- (u_int16_t)AF_INET, inside->ip.protocol,
- &inner, l3proto, l4proto))
- return 0;
-
/* Change inner back to look like incoming packet. We do the
opposite manip on this hook to normal, because it might not
pass all hooks (locally-generated ICMP). Consider incoming
@@ -575,26 +563,6 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
- const struct nf_nat_protocol *p;
-
- rcu_read_lock();
- p = __nf_nat_proto_find(protonum);
- if (!try_module_get(p->me))
- p = &nf_nat_unknown_protocol;
- rcu_read_unlock();
-
- return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
- module_put(p->me);
-}
-
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
@@ -602,7 +570,7 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
const struct nf_conn *ct,
- struct nf_nat_range *range)
+ struct nf_nat_ipv4_range *range)
{
struct nlattr *tb[CTA_PROTONAT_MAX+1];
const struct nf_nat_protocol *npt;
@@ -612,21 +580,23 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
if (err < 0)
return err;
- npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
+ rcu_read_lock();
+ npt = __nf_nat_proto_find(nf_ct_protonum(ct));
if (npt->nlattr_to_range)
err = npt->nlattr_to_range(tb, range);
- nf_nat_proto_put(npt);
+ rcu_read_unlock();
return err;
}
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
[CTA_NAT_MINIP] = { .type = NLA_U32 },
[CTA_NAT_MAXIP] = { .type = NLA_U32 },
+ [CTA_NAT_PROTO] = { .type = NLA_NESTED },
};
static int
nfnetlink_parse_nat(const struct nlattr *nat,
- const struct nf_conn *ct, struct nf_nat_range *range)
+ const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
{
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
@@ -646,7 +616,7 @@ nfnetlink_parse_nat(const struct nlattr *nat,
range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
if (range->min_ip)
- range->flags |= IP_NAT_RANGE_MAP_IPS;
+ range->flags |= NF_NAT_RANGE_MAP_IPS;
if (!tb[CTA_NAT_PROTO])
return 0;
@@ -663,7 +633,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
if (nfnetlink_parse_nat(attr, ct, &range) < 0)
return -EINVAL;
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index b9a1136addb..dc1dd912baf 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -398,7 +398,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
static void ip_nat_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this)
{
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */
nf_nat_follow_master(new, this);
@@ -409,16 +409,16 @@ static void ip_nat_q931_expect(struct nf_conn *new,
BUG_ON(new->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
- nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+ nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
- range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min = range.max = this->saved_proto;
range.min_ip = range.max_ip =
new->master->tuplehash[!this->dir].tuple.src.u3.ip;
- nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+ nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
}
/****************************************************************************/
@@ -496,21 +496,21 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
static void ip_nat_callforwarding_expect(struct nf_conn *new,
struct nf_conntrack_expect *this)
{
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
/* This must be a fresh one. */
BUG_ON(new->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
- nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC);
+ nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
- range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min = range.max = this->saved_proto;
range.min_ip = range.max_ip = this->saved_ip;
- nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST);
+ nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
}
/****************************************************************************/
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index ebc5f8894f9..af65958f630 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -253,12 +253,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
struct udphdr *udph;
int datalen, oldlen;
- /* UDP helpers might accidentally mangle the wrong packet */
- iph = ip_hdr(skb);
- if (skb->len < iph->ihl*4 + sizeof(*udph) +
- match_offset + match_len)
- return 0;
-
if (!skb_make_writable(skb, skb->len))
return 0;
@@ -430,22 +424,22 @@ nf_nat_seq_adjust(struct sk_buff *skb,
void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
- range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min = range.max = exp->saved_proto;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 3e8284ba46b..c273d58980a 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -47,7 +47,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
struct nf_conntrack_tuple t;
const struct nf_ct_pptp_master *ct_pptp_info;
const struct nf_nat_pptp *nat_pptp_info;
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -88,24 +88,24 @@ static void pptp_nat_expected(struct nf_conn *ct,
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* Change src to where master sends to */
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
if (exp->dir == IP_CT_DIR_ORIGINAL) {
- range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min = range.max = exp->saved_proto;
}
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
/* For DST manip, map port here to where it's expected. */
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
if (exp->dir == IP_CT_DIR_REPLY) {
- range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
range.min = range.max = exp->saved_proto;
}
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
/* outbound packets == from PNS to PAC */
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index a3d99761860..9993bc93e10 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -26,7 +26,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
{
__be16 port;
- if (maniptype == IP_NAT_MANIP_SRC)
+ if (maniptype == NF_NAT_MANIP_SRC)
port = tuple->src.u.all;
else
port = tuple->dst.u.all;
@@ -37,7 +37,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct,
u_int16_t *rover)
@@ -46,15 +46,15 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
__be16 *portptr;
u_int16_t off;
- if (maniptype == IP_NAT_MANIP_SRC)
+ if (maniptype == NF_NAT_MANIP_SRC)
portptr = &tuple->src.u.all;
else
portptr = &tuple->dst.u.all;
/* If no range specified... */
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
- if (maniptype == IP_NAT_MANIP_DST)
+ if (maniptype == NF_NAT_MANIP_DST)
return;
if (ntohs(*portptr) < 1024) {
@@ -75,9 +75,9 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.all) - min + 1;
}
- if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+ if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
- maniptype == IP_NAT_MANIP_SRC
+ maniptype == NF_NAT_MANIP_SRC
? tuple->dst.u.all
: tuple->src.u.all);
else
@@ -87,7 +87,7 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
*portptr = htons(min + off % range_size);
if (++i != range_size && nf_nat_used_tuple(tuple, ct))
continue;
- if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
+ if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
*rover = off;
return;
}
@@ -96,31 +96,19 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
- const struct nf_nat_range *range)
-{
- NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
- NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
- return 0;
-
-nla_put_failure:
- return -1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
-
int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range *range)
+ struct nf_nat_ipv4_range *range)
{
if (tb[CTA_PROTONAT_PORT_MIN]) {
range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
range->max.all = range->min.tcp.port;
- range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[CTA_PROTONAT_PORT_MAX]) {
range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
- range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
return 0;
}
-EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
+EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
index 570faf2667b..3f67138d187 100644
--- a/net/ipv4/netfilter/nf_nat_proto_dccp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c
@@ -24,7 +24,7 @@ static u_int16_t dccp_port_rover;
static void
dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -54,7 +54,7 @@ dccp_manip_pkt(struct sk_buff *skb,
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct dccp_hdr *)(skb->data + hdroff);
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
oldip = iph->saddr;
newip = tuple->src.u3.ip;
newport = tuple->src.u.dccp.port;
@@ -80,12 +80,10 @@ dccp_manip_pkt(struct sk_buff *skb,
static const struct nf_nat_protocol nf_nat_protocol_dccp = {
.protonum = IPPROTO_DCCP,
- .me = THIS_MODULE,
.manip_pkt = dccp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = dccp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index bc8d83a31c7..46ba0b9ab98 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -39,7 +39,7 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
/* generate unique tuple ... */
static void
gre_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -52,12 +52,12 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
if (!ct->master)
return;
- if (maniptype == IP_NAT_MANIP_SRC)
+ if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
else
keyptr = &tuple->dst.u.gre.key;
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
pr_debug("%p: NATing GRE PPTP\n", ct);
min = 1;
range_size = 0xffff;
@@ -99,7 +99,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
/* we only have destination manip of a packet, since 'source key'
* is not present in the packet itself */
- if (maniptype != IP_NAT_MANIP_DST)
+ if (maniptype != NF_NAT_MANIP_DST)
return true;
switch (greh->version) {
case GRE_VERSION_1701:
@@ -119,12 +119,10 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
static const struct nf_nat_protocol gre = {
.protonum = IPPROTO_GRE,
- .me = THIS_MODULE,
.manip_pkt = gre_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = gre_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 9f4dc1235dc..b35172851ba 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -30,7 +30,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
static void
icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -40,7 +40,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
/* If no range specified... */
- if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xFFFF;
for (i = 0; ; ++id) {
@@ -74,12 +74,10 @@ icmp_manip_pkt(struct sk_buff *skb,
const struct nf_nat_protocol nf_nat_protocol_icmp = {
.protonum = IPPROTO_ICMP,
- .me = THIS_MODULE,
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index bd5a80a62a5..3cce9b6c1c2 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -19,7 +19,7 @@ static u_int16_t nf_sctp_port_rover;
static void
sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -46,7 +46,7 @@ sctp_manip_pkt(struct sk_buff *skb,
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct sctphdr *)(skb->data + hdroff);
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
@@ -70,12 +70,10 @@ sctp_manip_pkt(struct sk_buff *skb,
static const struct nf_nat_protocol nf_nat_protocol_sctp = {
.protonum = IPPROTO_SCTP,
- .me = THIS_MODULE,
.manip_pkt = sctp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = sctp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 0d67bb80130..9fb4b4e72bb 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -23,7 +23,7 @@ static u_int16_t tcp_port_rover;
static void
tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -55,7 +55,7 @@ tcp_manip_pkt(struct sk_buff *skb,
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct tcphdr *)(skb->data + hdroff);
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
@@ -82,12 +82,10 @@ tcp_manip_pkt(struct sk_buff *skb,
const struct nf_nat_protocol nf_nat_protocol_tcp = {
.protonum = IPPROTO_TCP,
- .me = THIS_MODULE,
.manip_pkt = tcp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = tcp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 0b1b8601cba..9883336e628 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -22,7 +22,7 @@ static u_int16_t udp_port_rover;
static void
udp_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -47,7 +47,7 @@ udp_manip_pkt(struct sk_buff *skb,
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct udphdr *)(skb->data + hdroff);
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
@@ -73,12 +73,10 @@ udp_manip_pkt(struct sk_buff *skb,
const struct nf_nat_protocol nf_nat_protocol_udp = {
.protonum = IPPROTO_UDP,
- .me = THIS_MODULE,
.manip_pkt = udp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = udp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index f83ef23e2ab..d24d10a7beb 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -21,7 +21,7 @@ static u_int16_t udplite_port_rover;
static void
udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -47,7 +47,7 @@ udplite_manip_pkt(struct sk_buff *skb,
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct udphdr *)(skb->data + hdroff);
- if (maniptype == IP_NAT_MANIP_SRC) {
+ if (maniptype == NF_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
@@ -72,12 +72,10 @@ udplite_manip_pkt(struct sk_buff *skb,
static const struct nf_nat_protocol nf_nat_protocol_udplite = {
.protonum = IPPROTO_UDPLITE,
- .me = THIS_MODULE,
.manip_pkt = udplite_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = udplite_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
- .range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
index a50f2bc1c73..e0afe8112b1 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -27,7 +27,7 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
}
static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range *range,
+ const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
@@ -46,7 +46,6 @@ unknown_manip_pkt(struct sk_buff *skb,
}
const struct nf_nat_protocol nf_nat_unknown_protocol = {
- /* .me isn't set: getting a ref to this cannot fail. */
.manip_pkt = unknown_manip_pkt,
.in_range = unknown_in_range,
.unique_tuple = unknown_unique_tuple,
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 733c9abc1cb..d2a9dc314e0 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -44,7 +44,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
par->hooknum == NF_INET_LOCAL_IN);
@@ -56,7 +56,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
ctinfo == IP_CT_RELATED_REPLY));
NF_CT_ASSERT(par->out != NULL);
- return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
+ return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
}
static unsigned int
@@ -64,7 +64,7 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
par->hooknum == NF_INET_LOCAL_OUT);
@@ -74,12 +74,12 @@ ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
/* Connection must be valid and new. */
NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
- return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
+ return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
}
static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
{
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
@@ -91,7 +91,7 @@ static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
{
- const struct nf_nat_multi_range_compat *mr = par->targinfo;
+ const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
@@ -105,13 +105,13 @@ static unsigned int
alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
{
/* Force range to this IP; let proto decide mapping for
- per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+ per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
*/
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
range.flags = 0;
pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
- HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC ?
+ HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
@@ -140,7 +140,7 @@ int nf_nat_rule_find(struct sk_buff *skb,
static struct xt_target ipt_snat_reg __read_mostly = {
.name = "SNAT",
.target = ipt_snat_target,
- .targetsize = sizeof(struct nf_nat_multi_range_compat),
+ .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
.checkentry = ipt_snat_checkentry,
@@ -150,7 +150,7 @@ static struct xt_target ipt_snat_reg __read_mostly = {
static struct xt_target ipt_dnat_reg __read_mostly = {
.name = "DNAT",
.target = ipt_dnat_target,
- .targetsize = sizeof(struct nf_nat_multi_range_compat),
+ .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
.checkentry = ipt_dnat_checkentry,
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 78844d9208f..d0319f96269 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -249,25 +249,25 @@ static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
static void ip_nat_sip_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
- struct nf_nat_range range;
+ struct nf_nat_ipv4_range range;
/* This must be a fresh one. */
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
/* For DST manip, map port here to where it's expected. */
- range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min = range.max = exp->saved_proto;
range.min_ip = range.max_ip = exp->saved_ip;
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
/* Change src to where master sends to, but only if the connection
* actually came from the same source. */
if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
- range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_ip = range.max_ip
= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
- nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}
}
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 92900482ede..3828a422982 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -137,7 +137,7 @@ nf_nat_fn(unsigned int hooknum,
return ret;
} else
pr_debug("Already setup manip %s for ct %p\n",
- maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
+ maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
break;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 466ea8bb7a4..3569d8ecaea 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
local_bh_disable();
orphans = percpu_counter_sum_positive(&tcp_orphan_count);
- sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
+ sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
local_bh_enable();
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
sock_prot_inuse_get(net, &tcp_prot), orphans,
tcp_death_row.tw_count, sockets,
- atomic_long_read(&tcp_memory_allocated));
+ proto_memory_allocated(&tcp_prot));
seq_printf(seq, "UDP: inuse %d mem %ld\n",
sock_prot_inuse_get(net, &udp_prot),
- atomic_long_read(&udp_memory_allocated));
+ proto_memory_allocated(&udp_prot));
seq_printf(seq, "UDPLITE: inuse %d\n",
sock_prot_inuse_get(net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
- val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+ val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
if (val) {
type[count] = i;
vals[count++] = val;
@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
{
int i;
struct net *net = seq->private;
+ atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors");
for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
- icmpmibmap[i].index));
+ atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
- icmpmibmap[i].index | 0x100));
+ atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
/*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 007e2eb769d..3ccda5ae8a2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -292,7 +292,8 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
- if (ip_queue_rcv_skb(sk, skb) < 0) {
+ ipv4_pktinfo_prepare(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -327,6 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
unsigned int iphlen;
int err;
struct rtable *rt = *rtp;
+ int hlen, tlen;
if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -336,12 +338,14 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
if (flags&MSG_PROBE)
goto out;
+ hlen = LL_RESERVED_SPACE(rt->dst.dev);
+ tlen = rt->dst.dev->needed_tailroom;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+ length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0c74da8a047..bcacf54e541 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
@@ -108,11 +109,10 @@
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
-#include <net/atmclip.h>
#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
- ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define IP_MAX_MTU 0xFFF0
@@ -120,6 +120,7 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -131,6 +132,10 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
+static int redirect_genid;
+
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
/*
* Interface to generic destination cache.
@@ -138,7 +143,7 @@ static int rt_chain_length_max __read_mostly = 20;
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int ipv4_mtu(const struct dst_entry *dst);
static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +198,7 @@ static struct dst_ops ipv4_dst_ops = {
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
- .default_mtu = ipv4_default_mtu,
+ .mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
@@ -416,9 +421,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
else {
struct rtable *r = v;
struct neighbour *n;
- int len;
+ int len, HHUptod;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(&r->dst);
+ HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+ rcu_read_unlock();
- n = dst_get_neighbour(&r->dst);
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +441,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
dst_metric(&r->dst, RTAX_RTTVAR)),
r->rt_key_tos,
-1,
- (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+ HHUptod,
r->rt_spec_dst, &len);
seq_printf(seq, "%*s\n", 127 - len, "");
@@ -825,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
+static void rt_check_expire(void)
+{
+ static unsigned int rover;
+ unsigned int i = rover, goal;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
+ unsigned long samples = 0;
+ unsigned long sum = 0, sum2 = 0;
+ unsigned long delta;
+ u64 mult;
+
+ delta = jiffies - expires_ljiffies;
+ expires_ljiffies = jiffies;
+ mult = ((u64)delta) << rt_hash_log;
+ if (ip_rt_gc_timeout > 1)
+ do_div(mult, ip_rt_gc_timeout);
+ goal = (unsigned int)mult;
+ if (goal > rt_hash_mask)
+ goal = rt_hash_mask + 1;
+ for (; goal > 0; goal--) {
+ unsigned long tmo = ip_rt_gc_timeout;
+ unsigned long length;
+
+ i = (i + 1) & rt_hash_mask;
+ rthp = &rt_hash_table[i].chain;
+
+ if (need_resched())
+ cond_resched();
+
+ samples++;
+
+ if (rcu_dereference_raw(*rthp) == NULL)
+ continue;
+ length = 0;
+ spin_lock_bh(rt_hash_lock_addr(i));
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+ prefetch(rth->dst.rt_next);
+ if (rt_is_expired(rth)) {
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ continue;
+ }
+ if (rth->dst.expires) {
+ /* Entry is expired even if it is in use */
+ if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+ tmo >>= 1;
+ rthp = &rth->dst.rt_next;
+ /*
+ * We only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ length += has_noalias(rt_hash_table[i].chain, rth);
+ continue;
+ }
+ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+ goto nofree;
+
+ /* Cleanup aged off entries. */
+ *rthp = rth->dst.rt_next;
+ rt_free(rth);
+ }
+ spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
+ }
+ rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ rt_check_expire();
+ schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -837,6 +937,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
}
/*
@@ -1013,23 +1114,18 @@ static int slow_chain_length(const struct rtable *head)
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
- struct neigh_table *tbl = &arp_tbl;
static const __be32 inaddr_any = 0;
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
struct neighbour *n;
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
- if (dev->type == ARPHRD_ATM)
- tbl = clip_tbl_hook;
-#endif
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
pkey = &inaddr_any;
- n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
+ n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
if (n)
return n;
- return neigh_create(tbl, pkey, dev);
+ return neigh_create(&arp_tbl, pkey, dev);
}
static int rt_bind_neighbour(struct rtable *rt)
@@ -1265,7 +1361,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
struct rtable *rt = (struct rtable *) dst;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
if (rt->peer == NULL)
rt_bind_peer(rt, rt->rt_dst, 1);
@@ -1276,7 +1372,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
iph->id = htons(inet_getid(rt->peer, more));
return;
}
- } else
+ } else if (!rt)
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
__builtin_return_address(0));
@@ -1304,7 +1400,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{
struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway;
@@ -1315,21 +1411,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
rt->rt_gateway = peer->redirect_learned.a4;
n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ if (IS_ERR(n)) {
+ rt->rt_gateway = orig_gw;
+ return;
+ }
old_n = xchg(&rt->dst._neighbour, n);
if (old_n)
neigh_release(old_n);
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
- rt->rt_gateway = orig_gw;
- return -EAGAIN;
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
} else {
rt->rt_flags |= RTCF_REDIRECTED;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
- return 0;
}
/* called in rcu_read_lock() section */
@@ -1391,8 +1485,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
+ if (peer->redirect_learned.a4 != new_gw ||
+ peer->redirect_genid != redirect_genid) {
peer->redirect_learned.a4 = new_gw;
+ peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1685,12 +1781,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
}
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt_is_expired(rt))
- return NULL;
if (rt->rt_peer_genid != rt_peer_genid()) {
struct inet_peer *peer;
@@ -1699,17 +1791,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
peer = rt->peer;
if (peer) {
- check_peer_pmtu(dst, peer);
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- if (check_peer_redir(dst, peer))
- return NULL;
- }
+ peer->redirect_learned.a4 != rt->rt_gateway)
+ check_peer_redir(&rt->dst, peer);
}
rt->rt_peer_genid = rt_peer_genid();
}
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
+ return NULL;
+ ipv4_validate_peer(rt);
return dst;
}
@@ -1814,12 +1915,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
return advmss;
}
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = dst->dev->mtu;
+ const struct rtable *rt = (const struct rtable *) dst;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu && rt_is_output_route(rt))
+ return mtu;
+
+ mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
- const struct rtable *rt = (const struct rtable *) dst;
if (rt->rt_gateway != rt->rt_dst && mtu > 576)
mtu = 576;
@@ -1852,6 +1958,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
@@ -2357,6 +2465,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
if (noref) {
dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst);
@@ -2415,11 +2524,11 @@ EXPORT_SYMBOL(ip_route_input_common);
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4,
__be32 orig_daddr, __be32 orig_saddr,
- int orig_oif, struct net_device *dev_out,
+ int orig_oif, __u8 orig_rtos,
+ struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
- u32 tos = RT_FL_TOS(fl4);
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
@@ -2470,7 +2579,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
- rth->rt_key_tos = tos;
+ rth->rt_key_tos = orig_rtos;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
rth->rt_route_iif = 0;
@@ -2520,7 +2629,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
- u32 tos = RT_FL_TOS(fl4);
+ __u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
@@ -2696,7 +2805,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
make_route:
rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
- dev_out, flags);
+ tos, dev_out, flags);
if (!IS_ERR(rth)) {
unsigned int hash;
@@ -2732,6 +2841,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
@@ -2755,9 +2865,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
return NULL;
}
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2775,7 +2887,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
- .default_mtu = ipv4_blackhole_default_mtu,
+ .mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
@@ -3157,6 +3269,13 @@ static ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
+ .procname = "gc_interval",
+ .data = &ip_rt_gc_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
@@ -3366,6 +3485,11 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
+ INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+ expires_ljiffies = jiffies;
+ schedule_delayed_work(&expires_work,
+ net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 90f6544c13e..51fdbb49043 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -245,7 +245,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
if (!sysctl_tcp_timestamps)
return false;
- tcp_opt->sack_ok = (options >> 4) & 0x1;
+ tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
*ecn_ok = (options >> 5) & 1;
if (*ecn_ok && !sysctl_tcp_ecn)
return false;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 69fd7201129..4aa7e9dc0cb 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/nsproxy.h>
+#include <linux/swap.h>
#include <net/snmp.h>
#include <net/icmp.h>
#include <net/ip.h>
@@ -23,6 +24,7 @@
#include <net/cipso_ipv4.h>
#include <net/inet_frag.h>
#include <net/ping.h>
+#include <net/tcp_memcontrol.h>
static int zero;
static int tcp_retr1_max = 255;
@@ -73,7 +75,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
}
-void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
{
gid_t *data = table->data;
unsigned seq;
@@ -86,7 +88,7 @@ void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t
}
/* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, int range[2])
+static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
{
gid_t *data = table->data;
write_seqlock(&sysctl_local_ports.lock);
@@ -174,6 +176,49 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
return ret;
}
+static int ipv4_tcp_mem(ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned long vec[3];
+ struct net *net = current->nsproxy->net_ns;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+ struct mem_cgroup *memcg;
+#endif
+
+ ctl_table tmp = {
+ .data = &vec,
+ .maxlen = sizeof(vec),
+ .mode = ctl->mode,
+ };
+
+ if (!write) {
+ ctl->data = &net->ipv4.sysctl_tcp_mem;
+ return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
+ }
+
+ ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(current);
+
+ tcp_prot_mem(memcg, vec[0], 0);
+ tcp_prot_mem(memcg, vec[1], 1);
+ tcp_prot_mem(memcg, vec[2], 2);
+ rcu_read_unlock();
+#endif
+
+ net->ipv4.sysctl_tcp_mem[0] = vec[0];
+ net->ipv4.sysctl_tcp_mem[1] = vec[1];
+ net->ipv4.sysctl_tcp_mem[2] = vec[2];
+
+ return 0;
+}
+
static struct ctl_table ipv4_table[] = {
{
.procname = "tcp_timestamps",
@@ -433,13 +478,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "tcp_mem",
- .data = &sysctl_tcp_mem,
- .maxlen = sizeof(sysctl_tcp_mem),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax
- },
- {
.procname = "tcp_wmem",
.data = &sysctl_tcp_wmem,
.maxlen = sizeof(sysctl_tcp_wmem),
@@ -721,6 +759,12 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = ipv4_ping_group_range,
},
+ {
+ .procname = "tcp_mem",
+ .maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem),
+ .mode = 0644,
+ .proc_handler = ipv4_tcp_mem,
+ },
{ }
};
@@ -734,6 +778,7 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
struct ctl_table *table;
+ unsigned long limit;
table = ipv4_net_table;
if (!net_eq(net, &init_net)) {
@@ -769,6 +814,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
net->ipv4.sysctl_rt_cache_rebuild_count = 4;
+ limit = nr_free_buffer_pages() / 8;
+ limit = max(limit, 128UL);
+ net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+ net->ipv4.sysctl_tcp_mem[1] = limit;
+ net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+
net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
net_ipv4_ctl_path, table);
if (net->ipv4.ipv4_hdr == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 34f5db1e1c8..9bcdec3ad77 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,11 +282,9 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
-long sysctl_tcp_mem[3] __read_mostly;
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_mem);
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);
@@ -888,18 +886,18 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
}
EXPORT_SYMBOL(tcp_sendpage);
-#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
-#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-
-static inline int select_size(const struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, bool sg)
{
const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
- if (sk_can_gso(sk))
- tmp = 0;
- else {
+ if (sk_can_gso(sk)) {
+ /* Small frames wont use a full page:
+ * Payload will immediately follow tcp header.
+ */
+ tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+ } else {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
if (tmp >= pgbreak &&
@@ -917,9 +915,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int iovlen, flags;
+ int iovlen, flags, err, copied;
int mss_now, size_goal;
- int sg, err, copied;
+ bool sg;
long timeo;
lock_sock(sk);
@@ -946,7 +944,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
- sg = sk->sk_route_caps & NETIF_F_SG;
+ sg = !!(sk->sk_route_caps & NETIF_F_SG);
while (--iovlen >= 0) {
size_t seglen = iov->iov_len;
@@ -1005,8 +1003,13 @@ new_segment:
} else {
int merge = 0;
int i = skb_shinfo(skb)->nr_frags;
- struct page *page = TCP_PAGE(sk);
- int off = TCP_OFF(sk);
+ struct page *page = sk->sk_sndmsg_page;
+ int off;
+
+ if (page && page_count(page) == 1)
+ sk->sk_sndmsg_off = 0;
+
+ off = sk->sk_sndmsg_off;
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
@@ -1023,7 +1026,7 @@ new_segment:
} else if (page) {
if (off == PAGE_SIZE) {
put_page(page);
- TCP_PAGE(sk) = page = NULL;
+ sk->sk_sndmsg_page = page = NULL;
off = 0;
}
} else
@@ -1049,9 +1052,9 @@ new_segment:
/* If this page was new, give it to the
* socket so it does not get leaked.
*/
- if (!TCP_PAGE(sk)) {
- TCP_PAGE(sk) = page;
- TCP_OFF(sk) = 0;
+ if (!sk->sk_sndmsg_page) {
+ sk->sk_sndmsg_page = page;
+ sk->sk_sndmsg_off = 0;
}
goto do_error;
}
@@ -1061,15 +1064,15 @@ new_segment:
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
skb_fill_page_desc(skb, i, page, off, copy);
- if (TCP_PAGE(sk)) {
+ if (sk->sk_sndmsg_page) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
get_page(page);
- TCP_PAGE(sk) = page;
+ sk->sk_sndmsg_page = page;
}
}
- TCP_OFF(sk) = off + copy;
+ sk->sk_sndmsg_off = off + copy;
}
if (!copied)
@@ -2653,7 +2656,8 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
@@ -3272,14 +3276,9 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
- limit = nr_free_buffer_pages() / 8;
- limit = max(limit, 128UL);
- sysctl_tcp_mem[0] = limit / 4 * 3;
- sysctl_tcp_mem[1] = limit;
- sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
-
/* Set per-socket limits to no more than 1/128 the pressure threshold */
- limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+ limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
+ << (PAGE_SHIFT - 7);
max_share = min(4UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 850c737e08e..fc6d475f488 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
left * tp->mss_cache < sk->sk_gso_max_size)
return 1;
- return left <= tcp_max_burst(tp);
+ return left <= tcp_max_tso_deferred_mss(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 939edb3b8e4..8cd357a8be7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -34,11 +34,23 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
tcp_get_info(sk, info);
}
+static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+}
+
+static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct inet_diag_req *req)
+{
+ return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+}
+
static const struct inet_diag_handler tcp_diag_handler = {
- .idiag_hashinfo = &tcp_hashinfo,
+ .dump = tcp_diag_dump,
+ .dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
- .idiag_type = TCPDIAG_GETSOCK,
- .idiag_info_size = sizeof(struct tcp_info),
+ .idiag_type = IPPROTO_TCP,
};
static int __init tcp_diag_init(void)
@@ -54,4 +66,4 @@ static void __exit tcp_diag_exit(void)
module_init(tcp_diag_init);
module_exit(tcp_diag_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 52b5c2d0ecd..2877c3e0958 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
/* Check #1 */
if (tp->rcv_ssthresh < tp->window_clamp &&
(int)tp->rcv_ssthresh < tcp_space(sk) &&
- !tcp_memory_pressure) {
+ !sk_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
- !tcp_memory_pressure &&
- atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+ !sk_under_memory_pressure(sk) &&
+ sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
sysctl_tcp_rmem[2]);
}
@@ -865,13 +865,13 @@ static void tcp_disable_fack(struct tcp_sock *tp)
/* RFC3517 uses different metric in lost marker => reset on change */
if (tcp_is_fack(tp))
tp->lost_skb_hint = NULL;
- tp->rx_opt.sack_ok &= ~2;
+ tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
}
/* Take a notice that peer is sending D-SACKs */
static void tcp_dsack_seen(struct tcp_sock *tp)
{
- tp->rx_opt.sack_ok |= 4;
+ tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
}
/* Initialize metrics on socket. */
@@ -2663,7 +2663,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
@@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
int state = TCP_CA_Open;
- if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
+ if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
state = TCP_CA_Disorder;
if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2881,7 +2881,8 @@ static void tcp_try_to_open(struct sock *sk, int flag)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
- tcp_moderate_cwnd(tp);
+ if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+ tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_down(sk, flag);
}
@@ -3009,11 +3010,11 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int newly_acked_sacked, int flag)
+ int newly_acked_sacked, bool is_dupack,
+ int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
int fast_rexmit = 0, mib_idx;
@@ -3066,17 +3067,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
}
break;
- case TCP_CA_Disorder:
- tcp_try_undo_dsack(sk);
- if (!tp->undo_marker ||
- /* For SACK case do not Open to allow to undo
- * catching for all duplicate ACKs. */
- tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
- tp->undo_marker = 0;
- tcp_set_ca_state(sk, TCP_CA_Open);
- }
- break;
-
case TCP_CA_Recovery:
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
@@ -3117,7 +3107,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
}
- if (icsk->icsk_ca_state == TCP_CA_Disorder)
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
if (!tcp_time_to_recover(sk)) {
@@ -3681,10 +3671,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
+ bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets;
int prior_packets;
int prior_sacked = tp->sacked_out;
+ int pkts_acked = 0;
int newly_acked_sacked = 0;
int frto_cwnd = 0;
@@ -3757,6 +3749,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ pkts_acked = prior_packets - tp->packets_out;
newly_acked_sacked = (prior_packets - prior_sacked) -
(tp->packets_out - tp->sacked_out);
@@ -3771,8 +3764,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
- tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
- newly_acked_sacked, flag);
+ is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+ tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3784,6 +3778,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
return 1;
no_queue:
+ /* If data was DSACKed, see if we can undo a cwnd reduction. */
+ if (flag & FLAG_DSACKING_ACK)
+ tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
@@ -3797,10 +3795,14 @@ invalid_ack:
return -1;
old_ack:
+ /* If data was SACKed, tag it and see if we should send more data.
+ * If data was DSACKed, see if we can undo a cwnd reduction.
+ */
if (TCP_SKB_CB(skb)->sacked) {
- tcp_sacktag_write_queue(sk, skb, prior_snd_una);
- if (icsk->icsk_ca_state == TCP_CA_Open)
- tcp_try_keep_open(sk);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ newly_acked_sacked = tp->sacked_out - prior_sacked;
+ tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ is_dupack, flag);
}
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -3876,7 +3878,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
!estab && sysctl_tcp_sack) {
- opt_rx->sack_ok = 1;
+ opt_rx->sack_ok = TCP_SACK_SEEN;
tcp_sack_reset(opt_rx);
}
break;
@@ -4864,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk);
- else if (tcp_memory_pressure)
+ else if (sk_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
@@ -4930,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
return 0;
/* If we are under global TCP memory pressure, do not expand. */
- if (tcp_memory_pressure)
+ if (sk_under_memory_pressure(sk))
return 0;
/* If we are under soft global TCP memory pressure, do not expand. */
- if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
return 0;
/* If we filled the congestion window, do not expand. */
@@ -5809,6 +5811,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
if (th->syn) {
+ if (th->fin)
+ goto discard;
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a9db4b1a221..1eb4ad57670 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -73,6 +73,7 @@
#include <net/xfrm.h>
#include <net/netdma.h>
#include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
@@ -1511,6 +1512,7 @@ exit:
return NULL;
put_and_exit:
tcp_clear_xmit_timers(newsk);
+ tcp_cleanup_congestion_control(newsk);
bh_unlock_sock(newsk);
sock_put(newsk);
goto exit;
@@ -1916,7 +1918,8 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
- percpu_counter_inc(&tcp_sockets_allocated);
+ sock_update_memcg(sk);
+ sk_sockets_allocated_inc(sk);
local_bh_enable();
return 0;
@@ -1972,7 +1975,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
tp->cookie_values = NULL;
}
- percpu_counter_dec(&tcp_sockets_allocated);
+ sk_sockets_allocated_dec(sk);
+ sock_release_memcg(sk);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -2619,7 +2623,6 @@ struct proto tcp_prot = {
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
- .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
@@ -2633,10 +2636,14 @@ struct proto tcp_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+ .init_cgroup = tcp_init_cgroup,
+ .destroy_cgroup = tcp_destroy_cgroup,
+ .proto_cgroup = tcp_proto_cgroup,
+#endif
};
EXPORT_SYMBOL(tcp_prot);
-
static int __net_init tcp_sk_init(struct net *net)
{
return inet_ctl_sock_create(&net->ipv4.tcp_sock,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
new file mode 100644
index 00000000000..7fed04f875c
--- /dev/null
+++ b/net/ipv4/tcp_memcontrol.c
@@ -0,0 +1,272 @@
+#include <net/tcp.h>
+#include <net/tcp_memcontrol.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <linux/nsproxy.h>
+#include <linux/memcontrol.h>
+#include <linux/module.h>
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+ const char *buffer);
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
+
+static struct cftype tcp_files[] = {
+ {
+ .name = "kmem.tcp.limit_in_bytes",
+ .write_string = tcp_cgroup_write,
+ .read_u64 = tcp_cgroup_read,
+ .private = RES_LIMIT,
+ },
+ {
+ .name = "kmem.tcp.usage_in_bytes",
+ .read_u64 = tcp_cgroup_read,
+ .private = RES_USAGE,
+ },
+ {
+ .name = "kmem.tcp.failcnt",
+ .private = RES_FAILCNT,
+ .trigger = tcp_cgroup_reset,
+ .read_u64 = tcp_cgroup_read,
+ },
+ {
+ .name = "kmem.tcp.max_usage_in_bytes",
+ .private = RES_MAX_USAGE,
+ .trigger = tcp_cgroup_reset,
+ .read_u64 = tcp_cgroup_read,
+ },
+};
+
+static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
+{
+ return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
+}
+
+static void memcg_tcp_enter_memory_pressure(struct sock *sk)
+{
+ if (sk->sk_cgrp->memory_pressure)
+ *sk->sk_cgrp->memory_pressure = 1;
+}
+EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
+
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+ /*
+ * The root cgroup does not use res_counters, but rather,
+ * rely on the data already collected by the network
+ * subsystem
+ */
+ struct res_counter *res_parent = NULL;
+ struct cg_proto *cg_proto, *parent_cg;
+ struct tcp_memcontrol *tcp;
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+ struct net *net = current->nsproxy->net_ns;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ goto create_files;
+
+ tcp = tcp_from_cgproto(cg_proto);
+
+ tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
+ tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
+ tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
+ tcp->tcp_memory_pressure = 0;
+
+ parent_cg = tcp_prot.proto_cgroup(parent);
+ if (parent_cg)
+ res_parent = parent_cg->memory_allocated;
+
+ res_counter_init(&tcp->tcp_memory_allocated, res_parent);
+ percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+
+ cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
+ cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
+ cg_proto->sysctl_mem = tcp->tcp_prot_mem;
+ cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
+ cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
+ cg_proto->memcg = memcg;
+
+create_files:
+ return cgroup_add_files(cgrp, ss, tcp_files,
+ ARRAY_SIZE(tcp_files));
+}
+EXPORT_SYMBOL(tcp_init_cgroup);
+
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct cg_proto *cg_proto;
+ struct tcp_memcontrol *tcp;
+ u64 val;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return;
+
+ tcp = tcp_from_cgproto(cg_proto);
+ percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+
+ val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+
+ if (val != RESOURCE_MAX)
+ jump_label_dec(&memcg_socket_limit_enabled);
+}
+EXPORT_SYMBOL(tcp_destroy_cgroup);
+
+static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+ u64 old_lim;
+ int i;
+ int ret;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return -EINVAL;
+
+ if (val > RESOURCE_MAX)
+ val = RESOURCE_MAX;
+
+ tcp = tcp_from_cgproto(cg_proto);
+
+ old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+ ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++)
+ tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
+ net->ipv4.sysctl_tcp_mem[i]);
+
+ if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
+ jump_label_dec(&memcg_socket_limit_enabled);
+ else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
+ jump_label_inc(&memcg_socket_limit_enabled);
+
+ return 0;
+}
+
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+ const char *buffer)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ unsigned long long val;
+ int ret = 0;
+
+ switch (cft->private) {
+ case RES_LIMIT:
+ /* see memcontrol.c */
+ ret = res_counter_memparse_write_strategy(buffer, &val);
+ if (ret)
+ break;
+ ret = tcp_update_limit(memcg, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
+{
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return default_val;
+
+ tcp = tcp_from_cgproto(cg_proto);
+ return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+}
+
+static u64 tcp_read_usage(struct mem_cgroup *memcg)
+{
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
+
+ tcp = tcp_from_cgproto(cg_proto);
+ return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+}
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ u64 val;
+
+ switch (cft->private) {
+ case RES_LIMIT:
+ val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+ break;
+ case RES_USAGE:
+ val = tcp_read_usage(memcg);
+ break;
+ case RES_FAILCNT:
+ case RES_MAX_USAGE:
+ val = tcp_read_stat(memcg, cft->private, 0);
+ break;
+ default:
+ BUG();
+ }
+ return val;
+}
+
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+{
+ struct mem_cgroup *memcg;
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+
+ memcg = mem_cgroup_from_cont(cont);
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return 0;
+ tcp = tcp_from_cgproto(cg_proto);
+
+ switch (event) {
+ case RES_MAX_USAGE:
+ res_counter_reset_max(&tcp->tcp_memory_allocated);
+ break;
+ case RES_FAILCNT:
+ res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+ break;
+ }
+
+ return 0;
+}
+
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
+{
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+
+ cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
+ if (!cg_proto)
+ return 0;
+
+ tcp = tcp_from_cgproto(cg_proto);
+ return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+}
+
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
+{
+ struct tcp_memcontrol *tcp;
+ struct cg_proto *cg_proto;
+
+ cg_proto = tcp_prot.proto_cgroup(memcg);
+ if (!cg_proto)
+ return;
+
+ tcp = tcp_from_cgproto(cg_proto);
+
+ tcp->tcp_prot_mem[idx] = val;
+}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 66363b689ad..550e755747e 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -336,15 +336,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_timewait_sock *tw6;
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
tw6 = inet6_twsk((struct sock *)tw);
- ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+ tw6->tw_v6_daddr = np->daddr;
+ tw6->tw_v6_rcv_saddr = np->rcv_saddr;
tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only;
}
@@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
*/
struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
{
- struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
@@ -495,7 +495,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
- newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+ if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
+ !try_module_get(newicsk->icsk_ca_ops->owner))
+ newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 63170e29754..8c8de2780c7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1093,6 +1093,13 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
{
int i, k, eat;
+ eat = min_t(int, len, skb_headlen(skb));
+ if (eat) {
+ __skb_pull(skb, eat);
+ len -= eat;
+ if (!len)
+ return;
+ }
eat = len;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM;
- /* If len == headlen, we avoid __skb_pull to preserve alignment. */
- if (unlikely(len < skb_headlen(skb)))
- __skb_pull(skb, len);
- else
- __pskb_trim_head(skb, len - skb_headlen(skb));
+ __pskb_trim_head(skb, len);
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1581,7 +1584,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
* frame, so if we have space for more than 3 frames
* then send now.
*/
- if (limit > tcp_max_burst(tp) * tp->mss_cache)
+ if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
goto send_now;
}
@@ -1919,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
- if (tcp_memory_pressure)
+ if (sk_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
@@ -2147,7 +2150,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
*/
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ /* make sure skb->data is aligned on arches that require it */
+ if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+ struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ -ENOBUFS;
+ } else {
+ err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ }
if (err == 0) {
/* Update global TCP statistics. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2e0f0af76c1..a516d1e399d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -171,13 +171,13 @@ static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until;
- bool do_reset, syn_set = 0;
+ bool do_reset, syn_set = false;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
- syn_set = 1;
+ syn_set = true;
} else {
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
/* Black hole detection */
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
}
out:
- if (tcp_memory_pressure)
+ if (sk_under_memory_pressure(sk))
sk_mem_reclaim(sk);
out_unlock:
bh_unlock_sock(sk);
@@ -340,7 +340,7 @@ void tcp_retransmit_timer(struct sock *sk)
&inet->inet_daddr, ntohs(inet->inet_dport),
inet->inet_num, tp->snd_una, tp->snd_nxt);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index ac3b3ee4b07..01775983b99 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -105,7 +105,7 @@ drop:
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int tunnel64_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
@@ -134,7 +134,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
break;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static void tunnel64_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
@@ -152,7 +152,7 @@ static const struct net_protocol tunnel4_protocol = {
.netns_ok = 1,
};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static const struct net_protocol tunnel64_protocol = {
.handler = tunnel64_rcv,
.err_handler = tunnel64_err,
@@ -167,7 +167,7 @@ static int __init tunnel4_init(void)
printk(KERN_ERR "tunnel4 init: can't add protocol\n");
return -EAGAIN;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
printk(KERN_ERR "tunnel64 init: can't add protocol\n");
inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
@@ -179,7 +179,7 @@ static int __init tunnel4_init(void)
static void __exit tunnel4_fini(void)
{
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ab0966df1e2..5d075b5f70f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -445,7 +445,7 @@ exact_match:
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
-static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
@@ -512,6 +512,7 @@ begin:
rcu_read_unlock();
return result;
}
+EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
@@ -1164,7 +1165,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1187,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
@@ -1197,14 +1199,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, len);
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
@@ -1233,7 +1235,7 @@ try_again:
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
@@ -1357,7 +1359,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (inet_sk(sk)->inet_daddr)
sock_rps_save_rxhash(sk, skb);
- rc = ip_queue_rcv_skb(sk, skb);
+ rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1473,6 +1475,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
rc = 0;
+ ipv4_pktinfo_prepare(skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
@@ -2246,7 +2249,8 @@ int udp4_ufo_send_check(struct sk_buff *skb)
return 0;
}
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
new file mode 100644
index 00000000000..69f8a7ca63d
--- /dev/null
+++ b/net/ipv4/udp_diag.c
@@ -0,0 +1,201 @@
+/*
+ * udp_diag.c Module for monitoring UDP transport protocols sockets.
+ *
+ * Authors: Pavel Emelyanov, <xemul@parallels.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+ struct netlink_callback *cb, struct inet_diag_req *req,
+ struct nlattr *bc)
+{
+ if (!inet_diag_bc_sk(bc, sk))
+ return 0;
+
+ return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh, struct inet_diag_req *req)
+{
+ int err = -EINVAL;
+ struct sock *sk;
+ struct sk_buff *rep;
+
+ if (req->sdiag_family == AF_INET)
+ sk = __udp4_lib_lookup(&init_net,
+ req->id.idiag_src[0], req->id.idiag_sport,
+ req->id.idiag_dst[0], req->id.idiag_dport,
+ req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (req->sdiag_family == AF_INET6)
+ sk = __udp6_lib_lookup(&init_net,
+ (struct in6_addr *)req->id.idiag_src,
+ req->id.idiag_sport,
+ (struct in6_addr *)req->id.idiag_dst,
+ req->id.idiag_dport,
+ req->id.idiag_if, tbl);
+#endif
+ else
+ goto out_nosk;
+
+ err = -ENOENT;
+ if (sk == NULL)
+ goto out_nosk;
+
+ err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+ if (err)
+ goto out;
+
+ err = -ENOMEM;
+ rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+ sizeof(struct inet_diag_meminfo) +
+ 64)), GFP_KERNEL);
+ if (!rep)
+ goto out;
+
+ err = inet_sk_diag_fill(sk, NULL, rep, req,
+ NETLINK_CB(in_skb).pid,
+ nlh->nlmsg_seq, 0, nlh);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(rep);
+ goto out;
+ }
+ err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ MSG_DONTWAIT);
+ if (err > 0)
+ err = 0;
+out:
+ if (sk)
+ sock_put(sk);
+out_nosk:
+ return err;
+}
+
+static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ int num, s_num, slot, s_slot;
+
+ s_slot = cb->args[0];
+ num = s_num = cb->args[1];
+
+ for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+ struct sock *sk;
+ struct hlist_nulls_node *node;
+ struct udp_hslot *hslot = &table->hash[slot];
+
+ if (hlist_nulls_empty(&hslot->head))
+ continue;
+
+ spin_lock_bh(&hslot->lock);
+ sk_nulls_for_each(sk, node, &hslot->head) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (num < s_num)
+ goto next;
+ if (!(r->idiag_states & (1 << sk->sk_state)))
+ goto next;
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next;
+ if (r->id.idiag_sport != inet->inet_sport &&
+ r->id.idiag_sport)
+ goto next;
+ if (r->id.idiag_dport != inet->inet_dport &&
+ r->id.idiag_dport)
+ goto next;
+
+ if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ spin_unlock_bh(&hslot->lock);
+ goto done;
+ }
+next:
+ num++;
+ }
+ spin_unlock_bh(&hslot->lock);
+ }
+done:
+ cb->args[0] = slot;
+ cb->args[1] = num;
+}
+
+static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ udp_dump(&udp_table, skb, cb, r, bc);
+}
+
+static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct inet_diag_req *req)
+{
+ return udp_dump_one(&udp_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udp_diag_handler = {
+ .dump = udp_diag_dump,
+ .dump_one = udp_diag_dump_one,
+ .idiag_type = IPPROTO_UDP,
+};
+
+static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct inet_diag_req *r, struct nlattr *bc)
+{
+ udp_dump(&udplite_table, skb, cb, r, bc);
+}
+
+static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct inet_diag_req *req)
+{
+ return udp_dump_one(&udplite_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udplite_diag_handler = {
+ .dump = udplite_diag_dump,
+ .dump_one = udplite_diag_dump_one,
+ .idiag_type = IPPROTO_UDPLITE,
+};
+
+static int __init udp_diag_init(void)
+{
+ int err;
+
+ err = inet_diag_register(&udp_diag_handler);
+ if (err)
+ goto out;
+ err = inet_diag_register(&udplite_diag_handler);
+ if (err)
+ goto out_lite;
+out:
+ return err;
+out_lite:
+ inet_diag_unregister(&udp_diag_handler);
+ goto out;
+}
+
+static void __exit udp_diag_exit(void)
+{
+ inet_diag_unregister(&udplite_diag_handler);
+ inet_diag_unregister(&udp_diag_handler);
+}
+
+module_init(udp_diag_init);
+module_exit(udp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 82806455e85..9247d9d70e9 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -64,7 +64,7 @@ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
.priority = 2,
};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
@@ -84,7 +84,7 @@ static int __init ipip_init(void)
xfrm_unregister_type(&ipip_type, AF_INET);
return -EAGAIN;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
@@ -97,7 +97,7 @@ static int __init ipip_init(void)
static void __exit ipip_fini(void)
{
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
#endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df82e2c..0ba0866230c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -630,13 +630,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
goto out;
}
- rt = addrconf_dst_alloc(idev, addr, 0);
+ rt = addrconf_dst_alloc(idev, addr, false);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto out;
}
- ipv6_addr_copy(&ifa->addr, addr);
+ ifa->addr = *addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
@@ -650,16 +650,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ifa->rt = rt;
- /*
- * part one of RFC 4429, section 3.3
- * We should not configure an address as
- * optimistic if we do not yet know the link
- * layer address of our nexhop router
- */
-
- if (dst_get_neighbour_raw(&rt->dst) == NULL)
- ifa->flags &= ~IFA_F_OPTIMISTIC;
-
ifa->idev = idev;
in6_dev_hold(idev);
/* For caller */
@@ -807,7 +797,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ip6_del_rt(rt);
rt = NULL;
} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
- rt->rt6i_expires = expires;
+ rt->dst.expires = expires;
rt->rt6i_flags |= RTF_EXPIRES;
}
}
@@ -1228,7 +1218,7 @@ try_nextdev:
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
- ipv6_addr_copy(saddr, &hiscore->ifa->addr);
+ *saddr = hiscore->ifa->addr;
in6_ifa_put(hiscore->ifa);
return 0;
}
@@ -1249,7 +1239,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & banned_flags)) {
- ipv6_addr_copy(addr, &ifp->addr);
+ *addr = ifp->addr;
err = 0;
break;
}
@@ -1700,7 +1690,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
.fc_protocol = RTPROT_KERNEL,
};
- ipv6_addr_copy(&cfg.fc_dst, pfx);
+ cfg.fc_dst = *pfx;
/* Prevent useless cloning on PtP SIT.
This thing is done here expecting that the whole
@@ -1733,7 +1723,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
- if (rt->rt6i_dev->ifindex != dev->ifindex)
+ if (rt->dst.dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
@@ -1805,14 +1795,15 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
return ERR_PTR(-EACCES);
/* Add default multicast route */
- addrconf_add_mroute(dev);
+ if (!(dev->flags & IFF_LOOPBACK))
+ addrconf_add_mroute(dev);
/* Add link local route */
addrconf_add_lroute(dev);
return idev;
}
-void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
+void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
__u32 valid_lft;
@@ -1890,11 +1881,11 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
rt = NULL;
} else if (addrconf_finite_timeout(rt_expires)) {
/* not infinity */
- rt->rt6i_expires = jiffies + rt_expires;
+ rt->dst.expires = jiffies + rt_expires;
rt->rt6i_flags |= RTF_EXPIRES;
} else {
rt->rt6i_flags &= ~RTF_EXPIRES;
- rt->rt6i_expires = 0;
+ rt->dst.expires = 0;
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -1943,7 +1934,7 @@ ok:
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (in6_dev->cnf.optimistic_dad &&
- !net->ipv6.devconf_all->forwarding)
+ !net->ipv6.devconf_all->forwarding && sllao)
addr_flags = IFA_F_OPTIMISTIC;
#endif
@@ -3077,20 +3068,39 @@ static void addrconf_dad_run(struct inet6_dev *idev)
struct if6_iter_state {
struct seq_net_private p;
int bucket;
+ int offset;
};
-static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
+static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
{
struct inet6_ifaddr *ifa = NULL;
struct if6_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
+ int p = 0;
+
+ /* initial bucket if pos is 0 */
+ if (pos == 0) {
+ state->bucket = 0;
+ state->offset = 0;
+ }
- for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
+ for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
struct hlist_node *n;
hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
- addr_lst)
+ addr_lst) {
+ /* sync with offset */
+ if (p < state->offset) {
+ p++;
+ continue;
+ }
+ state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
+ }
+
+ /* prepare for next bucket */
+ state->offset = 0;
+ p = 0;
}
return NULL;
}
@@ -3102,13 +3112,17 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct hlist_node *n = &ifa->addr_lst;
- hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+ state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
+ }
while (++state->bucket < IN6_ADDR_HSIZE) {
+ state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
+ state->offset++;
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
}
@@ -3117,21 +3131,11 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return NULL;
}
-static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct inet6_ifaddr *ifa = if6_get_first(seq);
-
- if (ifa)
- while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
- --pos;
- return pos ? NULL : ifa;
-}
-
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu_bh)
{
rcu_read_lock_bh();
- return if6_get_idx(seq, *pos);
+ return if6_get_first(seq, *pos);
}
static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d27c797f9f0..273f48d1df2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
- if (!inet->transparent &&
+ if (!(inet->freebind || inet->transparent) &&
!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
err = -EADDRNOTAVAIL;
@@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->inet_rcv_saddr = v4addr;
inet->inet_saddr = v4addr;
- ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+ np->rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
- ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+ np->saddr = addr->sin6_addr;
/* Make sure we are allowed to bind here. */
if (sk->sk_prot->get_port(sk, snum)) {
@@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
peer == 1)
return -ENOTCONN;
sin->sin6_port = inet->inet_dport;
- ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+ sin->sin6_addr = np->daddr;
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&np->rcv_saddr))
- ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+ sin->sin6_addr = np->saddr;
else
- ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
+ sin->sin6_addr = np->rcv_saddr;
sin->sin6_port = inet->inet_sport;
}
@@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
@@ -769,7 +769,8 @@ out:
return err;
}
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
@@ -985,9 +986,9 @@ static int __net_init ipv6_init_mibs(struct net *net)
sizeof(struct icmpv6_mib),
__alignof__(struct icmpv6_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
- sizeof(struct icmpv6msg_mib),
- __alignof__(struct icmpv6msg_mib)) < 0)
+ net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
+ GFP_KERNEL);
+ if (!net->mib.icmpv6msg_statistics)
goto err_icmpmsg_mib;
return 0;
@@ -1008,7 +1009,7 @@ static void ipv6_cleanup_mibs(struct net *net)
snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
- snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
+ kfree(net->mib.icmpv6msg_statistics);
}
static int __net_init inet6_net_init(struct net *net)
@@ -1115,6 +1116,8 @@ static int __init inet6_init(void)
if (err)
goto static_sysctl_fail;
#endif
+ tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
/*
* ipngwg API draft makes clear that the correct semantics
* for TCP and UDP is to consider one TCP and UDP instance
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 4c0f894d084..2ae79dbeec2 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
goto bad;
}
- ipv6_addr_copy(&final_addr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &iph->saddr);
- ipv6_addr_copy(&iph->saddr, &final_addr);
+ final_addr = hao->addr;
+ hao->addr = iph->saddr;
+ iph->saddr = final_addr;
}
break;
}
@@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
segments = rthdr->hdrlen >> 1;
addrs = ((struct rt0_hdr *)rthdr)->addr;
- ipv6_addr_copy(&final_addr, addrs + segments - 1);
+ final_addr = addrs[segments - 1];
addrs += segments - segments_left;
memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
- ipv6_addr_copy(addrs, &iph->daddr);
- ipv6_addr_copy(&iph->daddr, &final_addr);
+ addrs[0] = iph->daddr;
+ iph->daddr = final_addr;
}
static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 674255f5e6b..59402b4637f 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (pac == NULL)
return -ENOMEM;
pac->acl_next = NULL;
- ipv6_addr_copy(&pac->acl_addr, addr);
+ pac->acl_addr = *addr;
rcu_read_lock();
if (ifindex == 0) {
@@ -83,7 +83,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
rt = rt6_lookup(net, addr, NULL, 0, 0);
if (rt) {
- dev = rt->rt6i_dev;
+ dev = rt->dst.dev;
dst_release(&rt->dst);
} else if (ishost) {
err = -EADDRNOTAVAIL;
@@ -289,14 +289,14 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
goto out;
}
- rt = addrconf_dst_alloc(idev, addr, 1);
+ rt = addrconf_dst_alloc(idev, addr, true);
if (IS_ERR(rt)) {
kfree(aca);
err = PTR_ERR(rt);
goto out;
}
- ipv6_addr_copy(&aca->aca_addr, addr);
+ aca->aca_addr = *addr;
aca->aca_idev = idev;
aca->aca_rt = rt;
aca->aca_users = 1;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e2480691c22..ae08aee1773 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
}
}
@@ -143,7 +143,7 @@ ipv4_connected:
}
}
- ipv6_addr_copy(&np->daddr, daddr);
+ np->daddr = *daddr;
np->flow_label = fl6.flowlabel;
inet->inet_dport = usin->sin6_port;
@@ -154,8 +154,8 @@ ipv4_connected:
*/
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
@@ -179,10 +179,10 @@ ipv4_connected:
/* source address lookup done in ip6_dst_lookup */
if (ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &fl6.saddr);
+ np->saddr = fl6.saddr;
if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
+ np->rcv_saddr = fl6.saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
@@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
- ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+ iph->daddr = fl6->daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
@@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
- ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+ iph->daddr = fl6->daddr;
mtu_info = IP6CBMTU(skb);
@@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
mtu_info->ip6m_addr.sin6_port = 0;
mtu_info->ip6m_addr.sin6_flowinfo = 0;
mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
- ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+ mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
@@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_port = serr->port;
sin->sin6_scope_id = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
- ipv6_addr_copy(&sin->sin6_addr,
- (struct in6_addr *)(nh + serr->addr_offset));
+ sin->sin6_addr =
+ *(struct in6_addr *)(nh + serr->addr_offset);
if (np->sndflow)
sin->sin6_flowinfo =
(*(__be32 *)(nh + serr->addr_offset - 24) &
@@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
- ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_port = 0;
sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
- ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+ sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
}
put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
@@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = opt->iif;
- ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+ src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
@@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = opt->iif;
- ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+ src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
@@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
*/
sin6.sin6_family = AF_INET6;
- ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr);
+ sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
sin6.sin6_flowinfo = 0;
sin6.sin6_scope_id = 0;
@@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
- if (!inet_sk(sk)->transparent &&
+ if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
!ipv6_chk_addr(net, &src_info->ipi6_addr,
strict ? dev : NULL, 0))
err = -EINVAL;
else
- ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
+ fl6->saddr = src_info->ipi6_addr;
}
rcu_read_unlock();
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index bf22a225f42..3d641b6e9b0 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
- ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
- ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &tmp_addr);
+ tmp_addr = ipv6h->saddr;
+ ipv6h->saddr = hao->addr;
+ hao->addr = tmp_addr;
if (skb->tstamp.tv64 == 0)
__net_timestamp(skb);
@@ -461,9 +461,9 @@ looped_back:
return -1;
}
- ipv6_addr_copy(&daddr, addr);
- ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
- ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
+ daddr = *addr;
+ *addr = ipv6_hdr(skb)->daddr;
+ ipv6_hdr(skb)->daddr = daddr;
skb_dst_drop(skb);
ip6_route_input(skb);
@@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
memcpy(phdr->addr, ihdr->addr + 1,
(hops - 1) * sizeof(struct in6_addr));
- ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
+ phdr->addr[hops - 1] = **addr_p;
*addr_p = ihdr->addr;
phdr->rt_hdr.nexthdr = *proto;
@@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
if (!opt || !opt->srcrt)
return NULL;
- ipv6_addr_copy(orig, &fl6->daddr);
- ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
+ *orig = fl6->daddr;
+ fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
return orig;
}
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 37f548b7f6d..72957f4a7c6 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -57,6 +57,9 @@ int ipv6_ext_hdr(u8 nexthdr)
* it returns NULL.
* - First fragment header is skipped, not-first ones
* are considered as unparsable.
+ * - Reports the offset field of the final fragment header so it is
+ * possible to tell whether this is a first fragment, later fragment,
+ * or not fragmented.
* - ESP is unparsable for now and considered like
* normal payload protocol.
* - Note also special handling of AUTH header. Thanks to IPsec wizards.
@@ -64,10 +67,13 @@ int ipv6_ext_hdr(u8 nexthdr)
* --ANK (980726)
*/
-int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
+int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
+ __be16 *frag_offp)
{
u8 nexthdr = *nexthdrp;
+ *frag_offp = 0;
+
while (ipv6_ext_hdr(nexthdr)) {
struct ipv6_opt_hdr _hdr, *hp;
int hdrlen;
@@ -87,7 +93,8 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
if (fp == NULL)
return -1;
- if (ntohs(*fp) & ~0x7)
+ *frag_offp = *fp;
+ if (ntohs(*frag_offp) & ~0x7)
break;
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH)
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 295571576f8..b6c57315206 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
if (!ipv6_prefix_equal(&saddr, &r->src.addr,
r->src.plen))
goto again;
- ipv6_addr_copy(&flp6->saddr, &saddr);
+ flp6->saddr = saddr;
}
goto out;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 90868fb4275..01d46bff63c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -135,11 +135,12 @@ static int is_ineligible(struct sk_buff *skb)
int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
int len = skb->len - ptr;
__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ __be16 frag_off;
if (len < 0)
return 1;
- ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
+ ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
if (ptr < 0)
return 0;
if (nexthdr == IPPROTO_ICMPV6) {
@@ -290,9 +291,9 @@ static void mip6_addr_swap(struct sk_buff *skb)
if (likely(off >= 0)) {
hao = (struct ipv6_destopt_hao *)
(skb_network_header(skb) + off);
- ipv6_addr_copy(&tmp, &iph->saddr);
- ipv6_addr_copy(&iph->saddr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &tmp);
+ tmp = iph->saddr;
+ iph->saddr = hao->addr;
+ hao->addr = tmp;
}
}
}
@@ -444,9 +445,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
- ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
+ fl6.daddr = hdr->saddr;
if (saddr)
- ipv6_addr_copy(&fl6.saddr, saddr);
+ fl6.saddr = *saddr;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
@@ -538,9 +539,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+ fl6.daddr = ipv6_hdr(skb)->saddr;
if (saddr)
- ipv6_addr_copy(&fl6.saddr, saddr);
+ fl6.saddr = *saddr;
fl6.flowi6_oif = skb->dev->ifindex;
fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -596,6 +597,7 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
int inner_offset;
int hash;
u8 nexthdr;
+ __be16 frag_off;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return;
@@ -603,7 +605,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
if (ipv6_ext_hdr(nexthdr)) {
/* now skip over extension headers */
- inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+ &nexthdr, &frag_off);
if (inner_offset<0)
return;
} else {
@@ -786,8 +789,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
int oif)
{
memset(fl6, 0, sizeof(*fl6));
- ipv6_addr_copy(&fl6->saddr, saddr);
- ipv6_addr_copy(&fl6->daddr, daddr);
+ fl6->saddr = *saddr;
+ fl6->daddr = *daddr;
fl6->flowi6_proto = IPPROTO_ICMPV6;
fl6->fl6_icmp_type = type;
fl6->fl6_icmp_code = 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index fee46d5a2f1..02dd203d9ea 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+ fl6.daddr = treq->rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+ fl6.saddr = treq->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
* request_sock (formerly open request) hash tables.
*/
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
- const u32 rnd, const u16 synq_hsize)
+ const u32 rnd, const u32 synq_hsize)
{
u32 c;
@@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
sin6->sin6_family = AF_INET6;
- ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
+ sin6->sin6_addr = np->daddr;
sin6->sin6_port = inet_sk(sk)->inet_dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
@@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6.flowlabel);
fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
+ fl6.daddr = np->daddr;
res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
rcu_read_unlock();
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 93718f3db79..b82bcde53f7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -190,7 +190,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
struct fib6_table *table;
table = kzalloc(sizeof(*table), GFP_ATOMIC);
- if (table != NULL) {
+ if (table) {
table->tb6_id = id;
table->tb6_root.leaf = net->ipv6.ip6_null_entry;
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
@@ -210,7 +210,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id)
return tb;
tb = fib6_alloc_table(net, id);
- if (tb != NULL)
+ if (tb)
fib6_link_table(net, tb);
return tb;
@@ -367,7 +367,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
s_e = cb->args[1];
w = (void *)cb->args[2];
- if (w == NULL) {
+ if (!w) {
/* New dump:
*
* 1. hook callback destructor.
@@ -379,7 +379,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
* 2. allocate and initialize walker.
*/
w = kzalloc(sizeof(*w), GFP_ATOMIC);
- if (w == NULL)
+ if (!w)
return -ENOMEM;
w->func = fib6_dump_node;
cb->args[2] = (long)w;
@@ -425,7 +425,8 @@ out:
static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
int addrlen, int plen,
- int offset)
+ int offset, int allow_create,
+ int replace_required)
{
struct fib6_node *fn, *in, *ln;
struct fib6_node *pn = NULL;
@@ -447,8 +448,18 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
* Prefix match
*/
if (plen < fn->fn_bit ||
- !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
+ !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
+ if (!allow_create) {
+ if (replace_required) {
+ pr_warn("IPv6: Can't replace route, "
+ "no match found\n");
+ return ERR_PTR(-ENOENT);
+ }
+ pr_warn("IPv6: NLM_F_CREATE should be set "
+ "when creating new route\n");
+ }
goto insert_above;
+ }
/*
* Exact match ?
@@ -456,7 +467,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
if (plen == fn->fn_bit) {
/* clean up an intermediate node */
- if ((fn->fn_flags & RTN_RTINFO) == 0) {
+ if (!(fn->fn_flags & RTN_RTINFO)) {
rt6_release(fn->leaf);
fn->leaf = NULL;
}
@@ -477,6 +488,23 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
fn = dir ? fn->right: fn->left;
} while (fn);
+ if (!allow_create) {
+ /* We should not create new node because
+ * NLM_F_REPLACE was specified without NLM_F_CREATE
+ * I assume it is safe to require NLM_F_CREATE when
+ * REPLACE flag is used! Later we may want to remove the
+ * check for replace_required, because according
+ * to netlink specification, NLM_F_CREATE
+ * MUST be specified if new route is created.
+ * That would keep IPv6 consistent with IPv4
+ */
+ if (replace_required) {
+ pr_warn("IPv6: Can't replace route, no match found\n");
+ return ERR_PTR(-ENOENT);
+ }
+ pr_warn("IPv6: NLM_F_CREATE should be set "
+ "when creating new route\n");
+ }
/*
* We walked to the bottom of tree.
* Create new leaf node without children.
@@ -484,7 +512,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
ln = node_alloc();
- if (ln == NULL)
+ if (!ln)
return NULL;
ln->fn_bit = plen;
@@ -527,7 +555,7 @@ insert_above:
in = node_alloc();
ln = node_alloc();
- if (in == NULL || ln == NULL) {
+ if (!in || !ln) {
if (in)
node_free(in);
if (ln)
@@ -581,7 +609,7 @@ insert_above:
ln = node_alloc();
- if (ln == NULL)
+ if (!ln)
return NULL;
ln->fn_bit = plen;
@@ -614,10 +642,15 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
{
struct rt6_info *iter = NULL;
struct rt6_info **ins;
+ int replace = (info->nlh &&
+ (info->nlh->nlmsg_flags & NLM_F_REPLACE));
+ int add = (!info->nlh ||
+ (info->nlh->nlmsg_flags & NLM_F_CREATE));
+ int found = 0;
ins = &fn->leaf;
- for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
+ for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
/*
* Search for duplicates
*/
@@ -626,17 +659,24 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
/*
* Same priority level
*/
+ if (info->nlh &&
+ (info->nlh->nlmsg_flags & NLM_F_EXCL))
+ return -EEXIST;
+ if (replace) {
+ found++;
+ break;
+ }
- if (iter->rt6i_dev == rt->rt6i_dev &&
+ if (iter->dst.dev == rt->dst.dev &&
iter->rt6i_idev == rt->rt6i_idev &&
ipv6_addr_equal(&iter->rt6i_gateway,
&rt->rt6i_gateway)) {
- if (!(iter->rt6i_flags&RTF_EXPIRES))
+ if (!(iter->rt6i_flags & RTF_EXPIRES))
return -EEXIST;
- iter->rt6i_expires = rt->rt6i_expires;
- if (!(rt->rt6i_flags&RTF_EXPIRES)) {
+ iter->dst.expires = rt->dst.expires;
+ if (!(rt->rt6i_flags & RTF_EXPIRES)) {
iter->rt6i_flags &= ~RTF_EXPIRES;
- iter->rt6i_expires = 0;
+ iter->dst.expires = 0;
}
return -EEXIST;
}
@@ -655,17 +695,40 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
/*
* insert node
*/
+ if (!replace) {
+ if (!add)
+ pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+
+add:
+ rt->dst.rt6_next = iter;
+ *ins = rt;
+ rt->rt6i_node = fn;
+ atomic_inc(&rt->rt6i_ref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
+
+ if (!(fn->fn_flags & RTN_RTINFO)) {
+ info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ fn->fn_flags |= RTN_RTINFO;
+ }
- rt->dst.rt6_next = iter;
- *ins = rt;
- rt->rt6i_node = fn;
- atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info);
- info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
-
- if ((fn->fn_flags & RTN_RTINFO) == 0) {
- info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
- fn->fn_flags |= RTN_RTINFO;
+ } else {
+ if (!found) {
+ if (add)
+ goto add;
+ pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+ return -ENOENT;
+ }
+ *ins = rt;
+ rt->rt6i_node = fn;
+ rt->dst.rt6_next = iter->dst.rt6_next;
+ atomic_inc(&rt->rt6i_ref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ rt6_release(iter);
+ if (!(fn->fn_flags & RTN_RTINFO)) {
+ info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ fn->fn_flags |= RTN_RTINFO;
+ }
}
return 0;
@@ -674,7 +737,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
{
if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
- (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
+ (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
mod_timer(&net->ipv6.ip6_fib_timer,
jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
}
@@ -696,11 +759,28 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+
+ if (info->nlh) {
+ if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+ allow_create = 0;
+ if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+ replace_required = 1;
+ }
+ if (!allow_create && !replace_required)
+ pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
+ rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
+ allow_create, replace_required);
+
+ if (IS_ERR(fn)) {
+ err = PTR_ERR(fn);
+ fn = NULL;
+ }
- if (fn == NULL)
+ if (!fn)
goto out;
pn = fn;
@@ -709,7 +789,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
if (rt->rt6i_src.plen) {
struct fib6_node *sn;
- if (fn->subtree == NULL) {
+ if (!fn->subtree) {
struct fib6_node *sfn;
/*
@@ -724,7 +804,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
/* Create subtree root node */
sfn = node_alloc();
- if (sfn == NULL)
+ if (!sfn)
goto st_failure;
sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
@@ -736,9 +816,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
sizeof(struct in6_addr), rt->rt6i_src.plen,
- offsetof(struct rt6_info, rt6i_src));
+ offsetof(struct rt6_info, rt6i_src),
+ allow_create, replace_required);
- if (sn == NULL) {
+ if (!sn) {
/* If it is failed, discard just allocated
root, and then (in st_failure) stale node
in main tree.
@@ -753,13 +834,18 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
sizeof(struct in6_addr), rt->rt6i_src.plen,
- offsetof(struct rt6_info, rt6i_src));
+ offsetof(struct rt6_info, rt6i_src),
+ allow_create, replace_required);
- if (sn == NULL)
+ if (IS_ERR(sn)) {
+ err = PTR_ERR(sn);
+ sn = NULL;
+ }
+ if (!sn)
goto st_failure;
}
- if (fn->leaf == NULL) {
+ if (!fn->leaf) {
fn->leaf = rt;
atomic_inc(&rt->rt6i_ref);
}
@@ -768,10 +854,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
#endif
err = fib6_add_rt2node(fn, rt, info);
-
- if (err == 0) {
+ if (!err) {
fib6_start_gc(info->nl_net, rt);
- if (!(rt->rt6i_flags&RTF_CACHE))
+ if (!(rt->rt6i_flags & RTF_CACHE))
fib6_prune_clones(info->nl_net, pn, rt);
}
@@ -819,7 +904,7 @@ st_failure:
*/
struct lookup_args {
- int offset; /* key offset on rt6_info */
+ int offset; /* key offset on rt6_info */
const struct in6_addr *addr; /* search key */
};
@@ -849,11 +934,10 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
fn = next;
continue;
}
-
break;
}
- while(fn) {
+ while (fn) {
if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
struct rt6key *key;
@@ -900,8 +984,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *da
};
fn = fib6_lookup_1(root, daddr ? args : args + 1);
-
- if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
+ if (!fn || fn->fn_flags & RTN_TL_ROOT)
fn = root;
return fn;
@@ -961,7 +1044,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
}
#endif
- if (fn && fn->fn_flags&RTN_RTINFO)
+ if (fn && fn->fn_flags & RTN_RTINFO)
return fn;
return NULL;
@@ -975,14 +1058,13 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
{
- if (fn->fn_flags&RTN_ROOT)
+ if (fn->fn_flags & RTN_ROOT)
return net->ipv6.ip6_null_entry;
- while(fn) {
- if(fn->left)
+ while (fn) {
+ if (fn->left)
return fn->left->leaf;
-
- if(fn->right)
+ if (fn->right)
return fn->right->leaf;
fn = FIB6_SUBTREE(fn);
@@ -1020,12 +1102,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
if (children == 3 || FIB6_SUBTREE(fn)
#ifdef CONFIG_IPV6_SUBTREES
/* Subtree root (i.e. fn) may have one child */
- || (children && fn->fn_flags&RTN_ROOT)
+ || (children && fn->fn_flags & RTN_ROOT)
#endif
) {
fn->leaf = fib6_find_prefix(net, fn);
#if RT6_DEBUG >= 2
- if (fn->leaf==NULL) {
+ if (!fn->leaf) {
WARN_ON(!fn->leaf);
fn->leaf = net->ipv6.ip6_null_entry;
}
@@ -1058,7 +1140,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
read_lock(&fib6_walker_lock);
FOR_WALKERS(w) {
- if (child == NULL) {
+ if (!child) {
if (w->root == fn) {
w->root = w->node = NULL;
RT6_TRACE("W %p adjusted by delroot 1\n", w);
@@ -1087,7 +1169,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
read_unlock(&fib6_walker_lock);
node_free(fn);
- if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
+ if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
return pn;
rt6_release(pn->leaf);
@@ -1121,7 +1203,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
if (w->state == FWS_C && w->leaf == rt) {
RT6_TRACE("walker %p adjusted by delroute\n", w);
w->leaf = rt->dst.rt6_next;
- if (w->leaf == NULL)
+ if (!w->leaf)
w->state = FWS_U;
}
}
@@ -1130,7 +1212,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
rt->dst.rt6_next = NULL;
/* If it was last route, expunge its radix tree node */
- if (fn->leaf == NULL) {
+ if (!fn->leaf) {
fn->fn_flags &= ~RTN_RTINFO;
net->ipv6.rt6_stats->fib_route_nodes--;
fn = fib6_repair_tree(net, fn);
@@ -1144,7 +1226,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
* to still alive ones.
*/
while (fn) {
- if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
+ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
fn->leaf = fib6_find_prefix(net, fn);
atomic_inc(&fn->leaf->rt6i_ref);
rt6_release(rt);
@@ -1171,17 +1253,17 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
return -ENOENT;
}
#endif
- if (fn == NULL || rt == net->ipv6.ip6_null_entry)
+ if (!fn || rt == net->ipv6.ip6_null_entry)
return -ENOENT;
WARN_ON(!(fn->fn_flags & RTN_RTINFO));
- if (!(rt->rt6i_flags&RTF_CACHE)) {
+ if (!(rt->rt6i_flags & RTF_CACHE)) {
struct fib6_node *pn = fn;
#ifdef CONFIG_IPV6_SUBTREES
/* clones of this route might be in another subtree */
if (rt->rt6i_src.plen) {
- while (!(pn->fn_flags&RTN_ROOT))
+ while (!(pn->fn_flags & RTN_ROOT))
pn = pn->parent;
pn = pn->parent;
}
@@ -1232,11 +1314,11 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
for (;;) {
fn = w->node;
- if (fn == NULL)
+ if (!fn)
return 0;
if (w->prune && fn != w->root &&
- fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
+ fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
w->state = FWS_C;
w->leaf = fn->leaf;
}
@@ -1265,7 +1347,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
w->state = FWS_C;
w->leaf = fn->leaf;
case FWS_C:
- if (w->leaf && fn->fn_flags&RTN_RTINFO) {
+ if (w->leaf && fn->fn_flags & RTN_RTINFO) {
int err;
if (w->count < w->skip) {
@@ -1380,6 +1462,26 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
fib6_walk(&c.w);
}
+void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
+ int prune, void *arg)
+{
+ struct fib6_table *table;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+ read_lock_bh(&table->tb6_lock);
+ fib6_clean_tree(net, &table->tb6_root,
+ func, prune, arg);
+ read_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
int prune, void *arg)
{
@@ -1439,8 +1541,8 @@ static int fib6_age(struct rt6_info *rt, void *arg)
* only if they are not in use now.
*/
- if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
- if (time_after(now, rt->rt6i_expires)) {
+ if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
+ if (time_after(now, rt->dst.expires)) {
RT6_TRACE("expiring %p\n", rt);
return -1;
}
@@ -1451,7 +1553,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
- (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
+ (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
RT6_TRACE("purging route %p via non-router but gateway\n",
rt);
return -1;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 4566dbd916d..b7867a1215b 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -EINVAL;
goto done;
}
- ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+ fl->dst = freq->flr_dst;
atomic_set(&fl->users, 1);
switch (fl->share) {
case IPV6_FL_S_EXCL:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a46c64eb0a6..1ca5d45a12e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -280,6 +280,7 @@ int ip6_mc_input(struct sk_buff *skb)
u8 *ptr = skb_network_header(skb) + opt->ra;
struct icmp6hdr *icmp6;
u8 nexthdr = hdr->nexthdr;
+ __be16 frag_off;
int offset;
/* Check if the value of Router Alert
@@ -293,7 +294,7 @@ int ip6_mc_input(struct sk_buff *skb)
goto out;
}
offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
- &nexthdr);
+ &nexthdr, &frag_off);
if (offset < 0)
goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5cac9..d97e07183ce 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -136,7 +136,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
}
rcu_read_lock();
- neigh = dst_get_neighbour(dst);
+ neigh = dst_get_neighbour_noref(dst);
if (neigh) {
int res = neigh_output(neigh, skb);
@@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hdr->nexthdr = proto;
hdr->hop_limit = hlimit;
- ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
- ipv6_addr_copy(&hdr->daddr, first_hop);
+ hdr->saddr = fl6->saddr;
+ hdr->daddr = *first_hop;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
hdr->nexthdr = proto;
hdr->hop_limit = np->hop_limit;
- ipv6_addr_copy(&hdr->saddr, saddr);
- ipv6_addr_copy(&hdr->daddr, daddr);
+ hdr->saddr = *saddr;
+ hdr->daddr = *daddr;
return 0;
}
@@ -329,10 +329,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
{
struct ipv6hdr *hdr = ipv6_hdr(skb);
u8 nexthdr = hdr->nexthdr;
+ __be16 frag_off;
int offset;
if (ipv6_ext_hdr(nexthdr)) {
- offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
+ offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
if (offset < 0)
return 0;
} else
@@ -462,7 +463,7 @@ int ip6_forward(struct sk_buff *skb)
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct rt6_info *rt;
@@ -603,7 +604,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
static atomic_t ipv6_fragmentation_id;
int old, new;
- if (rt) {
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
if (!rt->rt6i_peer)
@@ -631,6 +632,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
+ int hroom, troom;
__be32 frag_id = 0;
int ptr, offset = 0, err=0;
u8 *prevhdr, nexthdr = 0;
@@ -797,6 +799,8 @@ slow_path:
*/
*prevhdr = NEXTHDR_FRAGMENT;
+ hroom = LL_RESERVED_SPACE(rt->dst.dev);
+ troom = rt->dst.dev->needed_tailroom;
/*
* Keep copying data until we run out.
@@ -815,7 +819,8 @@ slow_path:
* Allocate buffer.
*/
- if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
+ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+ hroom + troom, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
@@ -828,7 +833,7 @@ slow_path:
*/
ip6_copy_metadata(frag, skb);
- skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(frag, hroom);
skb_put(frag, len + hlen + sizeof(struct frag_hdr));
skb_reset_network_header(frag);
fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -978,7 +983,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
* dst entry of the nexthop router
*/
rcu_read_lock();
- n = dst_get_neighbour(*dst);
+ n = dst_get_neighbour_noref(*dst);
if (n && !(n->nud_state & NUD_VALID)) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
@@ -1059,7 +1064,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (err)
return ERR_PTR(err);
if (final_dst)
- ipv6_addr_copy(&fl6->daddr, final_dst);
+ fl6->daddr = *final_dst;
if (can_sleep)
fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
@@ -1095,7 +1100,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (err)
return ERR_PTR(err);
if (final_dst)
- ipv6_addr_copy(&fl6->daddr, final_dst);
+ fl6->daddr = *final_dst;
if (can_sleep)
fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
@@ -1588,7 +1593,7 @@ int ip6_push_pending_frames(struct sock *sk)
if (np->pmtudisc < IPV6_PMTUDISC_DO)
skb->local_df = 1;
- ipv6_addr_copy(final_dst, &fl6->daddr);
+ *final_dst = fl6->daddr;
__skb_pull(skb, skb_network_header_len(skb));
if (opt && opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@@ -1604,8 +1609,8 @@ int ip6_push_pending_frames(struct sock *sk)
hdr->hop_limit = np->cork.hop_limit;
hdr->nexthdr = proto;
- ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
- ipv6_addr_copy(&hdr->daddr, final_dst);
+ hdr->saddr = fl6->saddr;
+ hdr->daddr = *final_dst;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4e2e9ff67ef..e1f7761815f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -93,7 +93,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
@@ -653,8 +653,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
NULL, 0, 0);
- if (rt && rt->rt6i_dev)
- skb2->dev = rt->rt6i_dev;
+ if (rt && rt->dst.dev)
+ skb2->dev = rt->dst.dev;
icmpv6_send(skb2, rel_type, rel_code, rel_info);
@@ -979,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
- ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
- ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
+ ipv6h->saddr = fl6->saddr;
+ ipv6h->daddr = fl6->daddr;
nf_reset(skb);
pkt_len = skb->len;
err = ip6_local_out(skb);
@@ -1155,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
/* Set up flowi template */
- ipv6_addr_copy(&fl6->saddr, &p->laddr);
- ipv6_addr_copy(&fl6->daddr, &p->raddr);
+ fl6->saddr = p->laddr;
+ fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
@@ -1185,11 +1185,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
if (rt == NULL)
return;
- if (rt->rt6i_dev) {
- dev->hard_header_len = rt->rt6i_dev->hard_header_len +
+ if (rt->dst.dev) {
+ dev->hard_header_len = rt->dst.dev->hard_header_len +
sizeof (struct ipv6hdr);
- dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+ dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu-=8;
@@ -1212,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
static int
ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
{
- ipv6_addr_copy(&t->parms.laddr, &p->laddr);
- ipv6_addr_copy(&t->parms.raddr, &p->raddr);
+ t->parms.laddr = p->laddr;
+ t->parms.raddr = p->raddr;
t->parms.flags = p->flags;
t->parms.hop_limit = p->hop_limit;
t->parms.encap_limit = p->encap_limit;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 449a9185b8f..c7e95c8c579 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
msg->im6_msgtype = MRT6MSG_WHOLEPKT;
msg->im6_mif = mrt->mroute_reg_vif_num;
msg->im6_pad = 0;
- ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
- ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+ msg->im6_src = ipv6_hdr(pkt)->saddr;
+ msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
@@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
msg->im6_msgtype = assert;
msg->im6_mif = mifi;
msg->im6_pad = 0;
- ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
- ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+ msg->im6_src = ipv6_hdr(pkt)->saddr;
+ msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net,
iph->payload_len = 0;
iph->nexthdr = IPPROTO_NONE;
iph->hop_limit = 0;
- ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
- ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
+ iph->saddr = rt->rt6i_src.addr;
+ iph->daddr = rt->rt6i_dst.addr;
err = ip6mr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c99e3ee9781..18a2719003c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -435,7 +435,7 @@ sticky_done:
goto e_inval;
np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
- ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
+ np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
retv = 0;
break;
}
@@ -503,7 +503,7 @@ done:
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
- np->mcast_hops = val;
+ np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
retv = 0;
break;
@@ -980,8 +980,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
- ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+ src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxhlim) {
@@ -992,8 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
- ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+ src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ee7839f4d6e..b853f06cc14 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -155,14 +155,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -ENOMEM;
mc_lst->next = NULL;
- ipv6_addr_copy(&mc_lst->addr, addr);
+ mc_lst->addr = *addr;
rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
if (rt) {
- dev = rt->rt6i_dev;
+ dev = rt->dst.dev;
dst_release(&rt->dst);
}
} else
@@ -256,7 +256,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
if (rt) {
- dev = rt->rt6i_dev;
+ dev = rt->dst.dev;
dev_hold(dev);
dst_release(&rt->dst);
}
@@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
- ipv6_addr_copy(&mc->mca_addr, addr);
+ mc->mca_addr = *addr;
mc->idev = idev; /* (reference taken) */
mc->mca_users = 1;
/* mca_stamp should be updated upon changes */
@@ -1343,13 +1343,15 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
struct mld2_report *pmr;
struct in6_addr addr_buf;
const struct in6_addr *saddr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int err;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- size += LL_ALLOCATED_SPACE(dev);
+ size += hlen + tlen;
/* limit our allocations to order-0 page */
size = min_t(int, size, SKB_MAX_ORDER(0, 0));
skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1357,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
if (!skb)
return NULL;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1408,18 +1410,11 @@ static void mld_sendpack(struct sk_buff *skb)
csum_partial(skb_transport_header(skb),
mldlen, 0));
- dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-
- if (!dst) {
- err = -ENOMEM;
- goto err_out;
- }
-
icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);
+ dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
err = 0;
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
@@ -1723,6 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
struct in6_addr addr_buf;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1744,7 +1741,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
IPSTATS_MIB_OUT, full_len);
rcu_read_unlock();
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
+ skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
if (skb == NULL) {
rcu_read_lock();
@@ -1754,7 +1751,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
return;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1772,7 +1769,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
memset(hdr, 0, sizeof(struct mld_msg));
hdr->mld_type = type;
- ipv6_addr_copy(&hdr->mld_mca, addr);
+ hdr->mld_mca = *addr;
hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
IPPROTO_ICMPV6,
@@ -1781,17 +1778,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
- dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
- if (!dst) {
- err = -ENOMEM;
- goto err_out;
- }
-
icmpv6_flow_init(sk, &fl6, type,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
skb->dev->ifindex);
-
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_out;
@@ -1914,7 +1904,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
* Add multicast single-source filter to the interface list
*/
static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
- const struct in6_addr *psfsrc, int delta)
+ const struct in6_addr *psfsrc)
{
struct ip6_sf_list *psf, *psf_prev;
@@ -2045,7 +2035,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
pmc->mca_sfcount[sfmode]++;
err = 0;
for (i=0; i<sfcount; i++) {
- err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+ err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 43242e6e610..7e1e0fbfef2 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -195,8 +195,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
mip6_report_rl.iif = iif;
- ipv6_addr_copy(&mip6_report_rl.src, src);
- ipv6_addr_copy(&mip6_report_rl.dst, dst);
+ mip6_report_rl.src = *src;
+ mip6_report_rl.dst = *dst;
allow = 1;
}
spin_unlock_bh(&mip6_report_rl.lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 44e5b7f2a6c..d8f02ef88e5 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -93,7 +93,7 @@
static u32 ndisc_hash(const void *pkey,
const struct net_device *dev,
- __u32 rnd);
+ __u32 *hash_rnd);
static int ndisc_constructor(struct neighbour *neigh);
static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -126,7 +126,6 @@ static const struct neigh_ops ndisc_direct_ops = {
struct neigh_table nd_tbl = {
.family = AF_INET6,
- .entry_size = sizeof(struct neighbour) + sizeof(struct in6_addr),
.key_len = sizeof(struct in6_addr),
.hash = ndisc_hash,
.constructor = ndisc_constructor,
@@ -141,7 +140,7 @@ struct neigh_table nd_tbl = {
.gc_staletime = 60 * HZ,
.reachable_time = ND_REACHABLE_TIME,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 3,
.mcast_probes = 3,
.anycast_delay = 1 * HZ,
@@ -350,16 +349,9 @@ EXPORT_SYMBOL(ndisc_mc_map);
static u32 ndisc_hash(const void *pkey,
const struct net_device *dev,
- __u32 hash_rnd)
+ __u32 *hash_rnd)
{
- const u32 *p32 = pkey;
- u32 addr_hash, i;
-
- addr_hash = 0;
- for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
- addr_hash ^= *p32++;
-
- return jhash_2words(addr_hash, dev->ifindex, hash_rnd);
+ return ndisc_hashfn(pkey, dev, hash_rnd);
}
static int ndisc_constructor(struct neighbour *neigh)
@@ -446,6 +438,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
struct sock *sk = net->ipv6.ndisc_sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int len;
int err;
u8 *opt;
@@ -459,7 +453,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
skb = sock_alloc_send_skb(sk,
(MAX_HEADER + sizeof(struct ipv6hdr) +
- len + LL_ALLOCATED_SPACE(dev)),
+ len + hlen + tlen),
1, &err);
if (!skb) {
ND_PRINTK0(KERN_ERR
@@ -468,7 +462,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
return NULL;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
skb->transport_header = skb->tail;
@@ -479,7 +473,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
if (target) {
- ipv6_addr_copy((struct in6_addr *)opt, target);
+ *(struct in6_addr *)opt = *target;
opt += sizeof(*target);
}
@@ -515,14 +509,7 @@ void ndisc_send_skb(struct sk_buff *skb,
type = icmp6h->icmp6_type;
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
-
- dst = icmp6_dst_alloc(dev, neigh, daddr);
- if (!dst) {
- kfree_skb(skb);
- return;
- }
-
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ dst = icmp6_dst_alloc(dev, neigh, &fl6);
if (IS_ERR(dst)) {
kfree_skb(skb);
return;
@@ -1237,7 +1224,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
if (rt)
- neigh = dst_get_neighbour(&rt->dst);
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (rt && lifetime == 0) {
neigh_clone(neigh);
@@ -1257,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
- neigh = dst_get_neighbour(&rt->dst);
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (neigh == NULL) {
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1271,7 +1258,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (rt)
- rt->rt6i_expires = jiffies + (HZ * lifetime);
+ rt->dst.expires = jiffies + (HZ * lifetime);
if (ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
@@ -1381,7 +1368,9 @@ skip_routeinfo:
for (p = ndopts.nd_opts_pi;
p;
p = ndisc_next_option(p, ndopts.nd_opts_pi_end)) {
- addrconf_prefix_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3);
+ addrconf_prefix_rcv(skb->dev, (u8 *)p,
+ (p->nd_opt_len) << 3,
+ ndopts.nd_opts_src_lladdr != NULL);
}
}
@@ -1533,6 +1522,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
struct inet6_dev *idev;
struct flowi6 fl6;
u8 *opt;
+ int hlen, tlen;
int rd_len;
int err;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
@@ -1571,7 +1561,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
}
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
- if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
goto release;
if (dev->addr_len) {
@@ -1590,9 +1580,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
rd_len &= ~0x7;
len += rd_len;
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
buff = sock_alloc_send_skb(sk,
(MAX_HEADER + sizeof(struct ipv6hdr) +
- len + LL_ALLOCATED_SPACE(dev)),
+ len + hlen + tlen),
1, &err);
if (buff == NULL) {
ND_PRINTK0(KERN_ERR
@@ -1601,7 +1593,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
goto release;
}
- skb_reserve(buff, LL_RESERVED_SPACE(dev));
+ skb_reserve(buff, hlen);
ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
IPPROTO_ICMPV6, len);
@@ -1617,9 +1609,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
*/
addrp = (struct in6_addr *)(icmph + 1);
- ipv6_addr_copy(addrp, target);
+ *addrp = *target;
addrp++;
- ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
+ *addrp = ipv6_hdr(skb)->daddr;
opt = (u8*) (addrp + 1);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 448464844a2..9a68fb5b9e7 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -125,6 +125,16 @@ config IP6_NF_MATCH_MH
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_MATCH_RPFILTER
+ tristate '"rpfilter" reverse path filter match support'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option allows you to match packets whose replies would
+ go out via the interface the packet came in.
+
+ To compile it as a module, choose M here. If unsure, say N.
+ The module will be called ip6t_rpfilter.
+
config IP6_NF_MATCH_RT
tristate '"rt" Routing header match support'
depends on NETFILTER_ADVANCED
@@ -186,7 +196,6 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index abfee91ce81..2eaed96db02 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
+obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
# targets
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index e63c3972a73..fb80a23c664 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -405,6 +405,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
int status, type, pid, flags;
unsigned int nlmsglen, skblen;
struct nlmsghdr *nlh;
+ bool enable_timestamp = false;
skblen = skb->len;
if (skblen < sizeof(*nlh))
@@ -442,11 +443,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
RCV_SKB_FAIL(-EBUSY);
}
} else {
- net_enable_timestamp();
+ enable_timestamp = true;
peer_pid = pid;
}
spin_unlock_bh(&queue_lock);
+ if (enable_timestamp)
+ net_enable_timestamp();
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5dd539..aad2fa41cf4 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -49,6 +49,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
const __u8 tclass = DEFAULT_TOS_VALUE;
struct dst_entry *dst = NULL;
u8 proto;
+ __be16 frag_off;
struct flowi6 fl6;
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
}
proto = oip6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
+ tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
pr_debug("Cannot get TCP header.\n");
@@ -93,8 +94,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
- ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+ fl6.saddr = oip6h->daddr;
+ fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph.dest;
fl6.fl6_dport = otcph.source;
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -129,8 +130,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
*(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
ip6h->hop_limit = ip6_dst_hoplimit(dst);
ip6h->nexthdr = IPPROTO_TCP;
- ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
- ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
+ ip6h->saddr = oip6h->daddr;
+ ip6h->daddr = oip6h->saddr;
tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
new file mode 100644
index 00000000000..5d1d8b04d69
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2011 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+
+#include <linux/netfilter/xt_rpfilter.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match");
+
+static bool rpfilter_addr_unicast(const struct in6_addr *addr)
+{
+ int addr_type = ipv6_addr_type(addr);
+ return addr_type & IPV6_ADDR_UNICAST;
+}
+
+static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
+ const struct net_device *dev, u8 flags)
+{
+ struct rt6_info *rt;
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ bool ret = false;
+ struct flowi6 fl6 = {
+ .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
+ .flowi6_proto = iph->nexthdr,
+ .daddr = iph->saddr,
+ };
+ int lookup_flags;
+
+ if (rpfilter_addr_unicast(&iph->daddr)) {
+ memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr));
+ lookup_flags = RT6_LOOKUP_F_HAS_SADDR;
+ } else {
+ lookup_flags = 0;
+ }
+
+ fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+ if ((flags & XT_RPFILTER_LOOSE) == 0) {
+ fl6.flowi6_oif = dev->ifindex;
+ lookup_flags |= RT6_LOOKUP_F_IFACE;
+ }
+
+ rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags);
+ if (rt->dst.error)
+ goto out;
+
+ if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
+ goto out;
+
+ if (rt->rt6i_flags & RTF_LOCAL) {
+ ret = flags & XT_RPFILTER_ACCEPT_LOCAL;
+ goto out;
+ }
+
+ if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+ ret = true;
+ out:
+ dst_release(&rt->dst);
+ return ret;
+}
+
+static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_rpfilter_info *info = par->matchinfo;
+ int saddrtype;
+ struct ipv6hdr *iph;
+ bool invert = info->flags & XT_RPFILTER_INVERT;
+
+ if (par->in->flags & IFF_LOOPBACK)
+ return true ^ invert;
+
+ iph = ipv6_hdr(skb);
+ saddrtype = ipv6_addr_type(&iph->saddr);
+ if (unlikely(saddrtype == IPV6_ADDR_ANY))
+ return true ^ invert; /* not routable: forward path will drop it */
+
+ return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert;
+}
+
+static int rpfilter_check(const struct xt_mtchk_param *par)
+{
+ const struct xt_rpfilter_info *info = par->matchinfo;
+ unsigned int options = ~XT_RPFILTER_OPTION_MASK;
+
+ if (info->flags & options) {
+ pr_info("unknown options encountered");
+ return -EINVAL;
+ }
+
+ if (strcmp(par->table, "mangle") != 0 &&
+ strcmp(par->table, "raw") != 0) {
+ pr_info("match only valid in the \'raw\' "
+ "or \'mangle\' tables, not \'%s\'.\n", par->table);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_match rpfilter_mt_reg __read_mostly = {
+ .name = "rpfilter",
+ .family = NFPROTO_IPV6,
+ .checkentry = rpfilter_check,
+ .match = rpfilter_mt,
+ .matchsize = sizeof(struct xt_rpfilter_info),
+ .hooks = (1 << NF_INET_PRE_ROUTING),
+ .me = THIS_MODULE
+};
+
+static int __init rpfilter_mt_init(void)
+{
+ return xt_register_match(&rpfilter_mt_reg);
+}
+
+static void __exit rpfilter_mt_exit(void)
+{
+ xt_unregister_match(&rpfilter_mt_reg);
+}
+
+module_init(rpfilter_mt_init);
+module_exit(rpfilter_mt_exit);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index c9e37c8fd62..a8f6da97e3b 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -44,7 +44,7 @@ ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
-static int forward = NF_ACCEPT;
+static bool forward = NF_ACCEPT;
module_param(forward, bool, 0000);
static int __net_init ip6table_filter_net_init(struct net *net)
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1008ce94bc3..fdeb6d03da8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -142,11 +142,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_SENTINEL
};
-/* can be called either with percpu mib (pcpumib != NULL),
- * or shared one (smib != NULL)
- */
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
- atomic_long_t *smib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
{
char name[32];
int i;
@@ -163,14 +159,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpum
snprintf(name, sizeof(name), "Icmp6%s%s",
i & 0x100 ? "Out" : "In", p);
seq_printf(seq, "%-32s\t%lu\n", name,
- pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
+ atomic_long_read(smib + i));
}
/* print by number (nonzero only) - ICMPMsgStat format */
for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
unsigned long val;
- val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
+ val = atomic_long_read(smib + i);
if (!val)
continue;
snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -215,8 +211,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
NULL, snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq,
- (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
+ snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
NULL, snmp6_udp6_list);
snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
@@ -246,7 +241,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
snmp6_ipstats_list);
snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
+ snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
return 0;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 331af3b882a..a4894f4f194 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
- ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+ np->rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
- ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+ np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
@@ -383,7 +383,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
/* Charge it to the socket. */
- if (ip_queue_rcv_skb(sk, skb) < 0) {
+ skb_dst_drop(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -494,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
- ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -610,6 +611,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
+ int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+ int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
@@ -619,11 +622,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto out;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+ length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -843,11 +846,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out;
if (!ipv6_addr_any(daddr))
- ipv6_addr_copy(&fl6.daddr, daddr);
+ fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index dfb164e9051..b69fae76a6f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
fq->id = arg->id;
fq->user = arg->user;
- ipv6_addr_copy(&fq->saddr, arg->src);
- ipv6_addr_copy(&fq->daddr, arg->dst);
+ fq->saddr = *arg->src;
+ fq->daddr = *arg->dst;
}
EXPORT_SYMBOL(ip6_frag_init);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8473016bba4..07361dfa808 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,22 +62,11 @@
#include <linux/sysctl.h>
#endif
-/* Set to 3 to get tracing. */
-#define RT6_DEBUG 2
-
-#if RT6_DEBUG >= 3
-#define RDBG(x) printk x
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
-#else
-#define RDBG(x)
-#define RT6_TRACE(x...) do { ; } while (0)
-#endif
-
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
@@ -134,7 +123,23 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
- return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev);
+ struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
+ if (n)
+ return n;
+ return neigh_create(&nd_tbl, daddr, dst->dev);
+}
+
+static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
+{
+ struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dev, &rt->rt6i_gateway);
+ if (!n) {
+ n = neigh_create(&nd_tbl, &rt->rt6i_gateway, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ }
+ dst_set_neighbour(&rt->dst, n);
+
+ return 0;
}
static struct dst_ops ip6_dst_ops_template = {
@@ -144,7 +149,7 @@ static struct dst_ops ip6_dst_ops_template = {
.gc_thresh = 1024,
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
- .default_mtu = ip6_default_mtu,
+ .mtu = ip6_mtu,
.cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
@@ -155,9 +160,11 @@ static struct dst_ops ip6_dst_ops_template = {
.neigh_lookup = ip6_neigh_lookup,
};
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +182,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
- .default_mtu = ip6_blackhole_default_mtu,
+ .mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
@@ -245,9 +252,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
{
struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
- if (rt != NULL)
+ if (rt)
memset(&rt->rt6i_table, 0,
- sizeof(*rt) - sizeof(struct dst_entry));
+ sizeof(*rt) - sizeof(struct dst_entry));
return rt;
}
@@ -261,7 +268,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
if (!(rt->dst.flags & DST_HOST))
dst_destroy_metrics_generic(dst);
- if (idev != NULL) {
+ if (idev) {
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
@@ -297,10 +304,10 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
- if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
+ if (dev != loopback_dev && idev && idev->dev == dev) {
struct inet6_dev *loopback_idev =
in6_dev_get(loopback_dev);
- if (loopback_idev != NULL) {
+ if (loopback_idev) {
rt->rt6i_idev = loopback_idev;
in6_dev_put(idev);
}
@@ -310,7 +317,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
{
return (rt->rt6i_flags & RTF_EXPIRES) &&
- time_after(jiffies, rt->rt6i_expires);
+ time_after(jiffies, rt->dst.expires);
}
static inline int rt6_need_strict(const struct in6_addr *daddr)
@@ -336,13 +343,13 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
goto out;
for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
- struct net_device *dev = sprt->rt6i_dev;
+ struct net_device *dev = sprt->dst.dev;
if (oif) {
if (dev->ifindex == oif)
return sprt;
if (dev->flags & IFF_LOOPBACK) {
- if (sprt->rt6i_idev == NULL ||
+ if (!sprt->rt6i_idev ||
sprt->rt6i_idev->dev->ifindex != oif) {
if (flags & RT6_LOOKUP_F_IFACE && oif)
continue;
@@ -383,7 +390,7 @@ static void rt6_probe(struct rt6_info *rt)
* to no more than one per minute.
*/
rcu_read_lock();
- neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+ neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
goto out;
read_lock_bh(&neigh->lock);
@@ -397,7 +404,7 @@ static void rt6_probe(struct rt6_info *rt)
target = (struct in6_addr *)&neigh->primary_key;
addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
+ ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
} else {
read_unlock_bh(&neigh->lock);
}
@@ -415,7 +422,7 @@ static inline void rt6_probe(struct rt6_info *rt)
*/
static inline int rt6_check_dev(struct rt6_info *rt, int oif)
{
- struct net_device *dev = rt->rt6i_dev;
+ struct net_device *dev = rt->dst.dev;
if (!oif || dev->ifindex == oif)
return 2;
if ((dev->flags & IFF_LOOPBACK) &&
@@ -430,7 +437,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
int m;
rcu_read_lock();
- neigh = dst_get_neighbour(&rt->dst);
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
@@ -516,9 +523,6 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
struct rt6_info *match, *rt0;
struct net *net;
- RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
- __func__, fn->leaf, oif);
-
rt0 = fn->rr_ptr;
if (!rt0)
fn->rr_ptr = rt0 = fn->leaf;
@@ -537,10 +541,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
fn->rr_ptr = next;
}
- RT6_TRACE("%s() => %p\n",
- __func__, match);
-
- net = dev_net(rt0->rt6i_dev);
+ net = dev_net(rt0->dst.dev);
return match ? match : net->ipv6.ip6_null_entry;
}
@@ -609,7 +610,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
if (!addrconf_finite_timeout(lifetime)) {
rt->rt6i_flags &= ~RTF_EXPIRES;
} else {
- rt->rt6i_expires = jiffies + HZ * lifetime;
+ rt->dst.expires = jiffies + HZ * lifetime;
rt->rt6i_flags |= RTF_EXPIRES;
}
dst_release(&rt->dst);
@@ -634,7 +635,7 @@ do { \
goto restart; \
} \
} \
-} while(0)
+} while (0)
static struct rt6_info *ip6_pol_route_lookup(struct net *net,
struct fib6_table *table,
@@ -656,6 +657,13 @@ out:
}
+struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ int flags)
+{
+ return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
+}
+EXPORT_SYMBOL_GPL(ip6_route_lookup);
+
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
const struct in6_addr *saddr, int oif, int strict)
{
@@ -704,7 +712,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
int ip6_ins_rt(struct rt6_info *rt)
{
struct nl_info info = {
- .nl_net = dev_net(rt->rt6i_dev),
+ .nl_net = dev_net(rt->dst.dev),
};
return __ip6_ins_rt(rt, &info);
}
@@ -722,29 +730,27 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
rt = ip6_rt_copy(ort, daddr);
if (rt) {
- struct neighbour *neigh;
int attempts = !in_softirq();
- if (!(rt->rt6i_flags&RTF_GATEWAY)) {
- if (rt->rt6i_dst.plen != 128 &&
+ if (!(rt->rt6i_flags & RTF_GATEWAY)) {
+ if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
- ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+ rt->rt6i_gateway = *daddr;
}
rt->rt6i_flags |= RTF_CACHE;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
- ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+ rt->rt6i_src.addr = *saddr;
rt->rt6i_src.plen = 128;
}
#endif
retry:
- neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
- if (IS_ERR(neigh)) {
- struct net *net = dev_net(rt->rt6i_dev);
+ if (rt6_bind_neighbour(rt, rt->dst.dev)) {
+ struct net *net = dev_net(rt->dst.dev);
int saved_rt_min_interval =
net->ipv6.sysctl.ip6_rt_gc_min_interval;
int saved_rt_elasticity =
@@ -769,8 +775,6 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
dst_free(&rt->dst);
return NULL;
}
- dst_set_neighbour(&rt->dst, neigh);
-
}
return rt;
@@ -783,7 +787,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
if (rt) {
rt->rt6i_flags |= RTF_CACHE;
- dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
+ dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
}
return rt;
}
@@ -817,7 +821,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -873,7 +877,7 @@ void ip6_route_input(struct sk_buff *skb)
.flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
+ .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
};
@@ -930,9 +934,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
- rt->rt6i_expires = 0;
+ rt->dst.expires = 0;
- ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+ rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_metric = 0;
@@ -995,7 +999,7 @@ static void ip6_link_failure(struct sk_buff *skb)
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
- if (rt->rt6i_flags&RTF_CACHE) {
+ if (rt->rt6i_flags & RTF_CACHE) {
dst_set_expires(&rt->dst, 0);
rt->rt6i_flags |= RTF_EXPIRES;
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
@@ -1041,10 +1045,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
return mtu;
}
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = IPV6_MIN_MTU;
struct inet6_dev *idev;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+ return mtu;
+
+ mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
@@ -1060,34 +1069,38 @@ static DEFINE_SPINLOCK(icmp6_dst_lock);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
- const struct in6_addr *addr)
+ struct flowi6 *fl6)
{
+ struct dst_entry *dst;
struct rt6_info *rt;
struct inet6_dev *idev = in6_dev_get(dev);
struct net *net = dev_net(dev);
- if (unlikely(idev == NULL))
+ if (unlikely(!idev))
return NULL;
rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
- if (unlikely(rt == NULL)) {
+ if (unlikely(!rt)) {
in6_dev_put(idev);
+ dst = ERR_PTR(-ENOMEM);
goto out;
}
if (neigh)
neigh_hold(neigh);
else {
- neigh = ndisc_get_neigh(dev, addr);
- if (IS_ERR(neigh))
- neigh = NULL;
+ neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
+ if (IS_ERR(neigh)) {
+ dst_free(&rt->dst);
+ return ERR_CAST(neigh);
+ }
}
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1);
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1099,8 +1112,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
fib6_force_start_gc(net);
+ dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
+
out:
- return &rt->dst;
+ return dst;
}
int icmp6_dst_gc(void)
@@ -1230,21 +1245,30 @@ int ip6_route_add(struct fib6_config *cfg)
if (cfg->fc_metric == 0)
cfg->fc_metric = IP6_RT_PRIO_USER;
- table = fib6_new_table(net, cfg->fc_table);
- if (table == NULL) {
- err = -ENOBUFS;
- goto out;
+ err = -ENOBUFS;
+ if (cfg->fc_nlinfo.nlh &&
+ !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
+ table = fib6_get_table(net, cfg->fc_table);
+ if (!table) {
+ printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+ table = fib6_new_table(net, cfg->fc_table);
+ }
+ } else {
+ table = fib6_new_table(net, cfg->fc_table);
}
+ if (!table)
+ goto out;
+
rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
- if (rt == NULL) {
+ if (!rt) {
err = -ENOMEM;
goto out;
}
rt->dst.obsolete = -1;
- rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
+ rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
jiffies + clock_t_to_jiffies(cfg->fc_expires) :
0;
@@ -1287,8 +1311,9 @@ int ip6_route_add(struct fib6_config *cfg)
they would result in kernel looping; promote them to reject routes
*/
if ((cfg->fc_flags & RTF_REJECT) ||
- (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
- && !(cfg->fc_flags&RTF_LOCAL))) {
+ (dev && (dev->flags & IFF_LOOPBACK) &&
+ !(addr_type & IPV6_ADDR_LOOPBACK) &&
+ !(cfg->fc_flags & RTF_LOCAL))) {
/* hold loopback dev/idev if we haven't done so. */
if (dev != net->loopback_dev) {
if (dev) {
@@ -1315,7 +1340,7 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type;
gw_addr = &cfg->fc_gateway;
- ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
+ rt->rt6i_gateway = *gw_addr;
gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -1329,26 +1354,26 @@ int ip6_route_add(struct fib6_config *cfg)
some exceptions. --ANK
*/
err = -EINVAL;
- if (!(gwa_type&IPV6_ADDR_UNICAST))
+ if (!(gwa_type & IPV6_ADDR_UNICAST))
goto out;
grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
err = -EHOSTUNREACH;
- if (grt == NULL)
+ if (!grt)
goto out;
if (dev) {
- if (dev != grt->rt6i_dev) {
+ if (dev != grt->dst.dev) {
dst_release(&grt->dst);
goto out;
}
} else {
- dev = grt->rt6i_dev;
+ dev = grt->dst.dev;
idev = grt->rt6i_idev;
dev_hold(dev);
in6_dev_hold(grt->rt6i_idev);
}
- if (!(grt->rt6i_flags&RTF_GATEWAY))
+ if (!(grt->rt6i_flags & RTF_GATEWAY))
err = 0;
dst_release(&grt->dst);
@@ -1356,12 +1381,12 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
err = -EINVAL;
- if (dev == NULL || (dev->flags&IFF_LOOPBACK))
+ if (!dev || (dev->flags & IFF_LOOPBACK))
goto out;
}
err = -ENODEV;
- if (dev == NULL)
+ if (!dev)
goto out;
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
@@ -1369,18 +1394,15 @@ int ip6_route_add(struct fib6_config *cfg)
err = -EINVAL;
goto out;
}
- ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
+ rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
rt->rt6i_prefsrc.plen = 128;
} else
rt->rt6i_prefsrc.plen = 0;
if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
- struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
- if (IS_ERR(n)) {
- err = PTR_ERR(n);
+ err = rt6_bind_neighbour(rt, dev);
+ if (err)
goto out;
- }
- dst_set_neighbour(&rt->dst, n);
}
rt->rt6i_flags = cfg->fc_flags;
@@ -1426,7 +1448,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
{
int err;
struct fib6_table *table;
- struct net *net = dev_net(rt->rt6i_dev);
+ struct net *net = dev_net(rt->dst.dev);
if (rt == net->ipv6.ip6_null_entry)
return -ENOENT;
@@ -1445,7 +1467,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
int ip6_del_rt(struct rt6_info *rt)
{
struct nl_info info = {
- .nl_net = dev_net(rt->rt6i_dev),
+ .nl_net = dev_net(rt->dst.dev),
};
return __ip6_del_rt(rt, &info);
}
@@ -1458,7 +1480,7 @@ static int ip6_route_del(struct fib6_config *cfg)
int err = -ESRCH;
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
- if (table == NULL)
+ if (!table)
return err;
read_lock_bh(&table->tb6_lock);
@@ -1470,8 +1492,8 @@ static int ip6_route_del(struct fib6_config *cfg)
if (fn) {
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (cfg->fc_ifindex &&
- (rt->rt6i_dev == NULL ||
- rt->rt6i_dev->ifindex != cfg->fc_ifindex))
+ (!rt->dst.dev ||
+ rt->dst.dev->ifindex != cfg->fc_ifindex))
continue;
if (cfg->fc_flags & RTF_GATEWAY &&
!ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
@@ -1533,7 +1555,7 @@ restart:
continue;
if (!(rt->rt6i_flags & RTF_GATEWAY))
continue;
- if (fl6->flowi6_oif != rt->rt6i_dev->ifindex)
+ if (fl6->flowi6_oif != rt->dst.dev->ifindex)
continue;
if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
continue;
@@ -1566,7 +1588,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
},
};
- ipv6_addr_copy(&rdfl.gateway, gateway);
+ rdfl.gateway = *gateway;
if (rt6_need_strict(dest))
flags |= RT6_LOOKUP_F_IFACE;
@@ -1611,18 +1633,18 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
dst_confirm(&rt->dst);
/* Duplicate redirect: silently ignore. */
- if (neigh == dst_get_neighbour_raw(&rt->dst))
+ if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
goto out;
nrt = ip6_rt_copy(rt, dest);
- if (nrt == NULL)
+ if (!nrt)
goto out;
nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY;
- ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
+ nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
if (ip6_ins_rt(nrt))
@@ -1632,7 +1654,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
netevent.new = &nrt->dst;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
- if (rt->rt6i_flags&RTF_CACHE) {
+ if (rt->rt6i_flags & RTF_CACHE) {
ip6_del_rt(rt);
return;
}
@@ -1653,7 +1675,7 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr
int allfrag = 0;
again:
rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
- if (rt == NULL)
+ if (!rt)
return;
if (rt6_check_expired(rt)) {
@@ -1703,7 +1725,7 @@ again:
1. It is connected route. Action: COW
2. It is gatewayed route or NONEXTHOP route. Action: clone it.
*/
- if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, daddr, saddr);
else
nrt = rt6_alloc_clone(rt, daddr);
@@ -1759,7 +1781,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
const struct in6_addr *dest)
{
- struct net *net = dev_net(ort->rt6i_dev);
+ struct net *net = dev_net(ort->dst.dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
ort->dst.dev, 0);
@@ -1768,7 +1790,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
rt->dst.output = ort->dst.output;
rt->dst.flags |= DST_HOST;
- ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
+ rt->rt6i_dst.addr = *dest;
rt->rt6i_dst.plen = 128;
dst_copy_metrics(&rt->dst, &ort->dst);
rt->dst.error = ort->dst.error;
@@ -1776,9 +1798,9 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->dst.lastuse = jiffies;
- rt->rt6i_expires = 0;
+ rt->dst.expires = 0;
- ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+ rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_metric = 0;
@@ -1801,7 +1823,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
struct fib6_table *table;
table = fib6_get_table(net, RT6_TABLE_INFO);
- if (table == NULL)
+ if (!table)
return NULL;
write_lock_bh(&table->tb6_lock);
@@ -1810,7 +1832,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
- if (rt->rt6i_dev->ifindex != ifindex)
+ if (rt->dst.dev->ifindex != ifindex)
continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
continue;
@@ -1841,8 +1863,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- ipv6_addr_copy(&cfg.fc_dst, prefix);
- ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+ cfg.fc_dst = *prefix;
+ cfg.fc_gateway = *gwaddr;
/* We should treat it as a default route if prefix length is 0. */
if (!prefixlen)
@@ -1860,12 +1882,12 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
- if (table == NULL)
+ if (!table)
return NULL;
write_lock_bh(&table->tb6_lock);
for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
- if (dev == rt->rt6i_dev &&
+ if (dev == rt->dst.dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->rt6i_gateway, addr))
break;
@@ -1891,7 +1913,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
.fc_nlinfo.nl_net = dev_net(dev),
};
- ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+ cfg.fc_gateway = *gwaddr;
ip6_route_add(&cfg);
@@ -1905,7 +1927,7 @@ void rt6_purge_dflt_routers(struct net *net)
/* NOTE: Keep consistent with rt6_get_dflt_router */
table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (table == NULL)
+ if (!table)
return;
restart:
@@ -1937,9 +1959,9 @@ static void rtmsg_to_fib6_config(struct net *net,
cfg->fc_nlinfo.nl_net = net;
- ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
- ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
- ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
+ cfg->fc_dst = rtmsg->rtmsg_dst;
+ cfg->fc_src = rtmsg->rtmsg_src;
+ cfg->fc_gateway = rtmsg->rtmsg_gateway;
}
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -2038,14 +2060,14 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
- int anycast)
+ bool anycast)
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
net->loopback_dev, 0);
- struct neighbour *neigh;
+ int err;
- if (rt == NULL) {
+ if (!rt) {
if (net_ratelimit())
pr_warning("IPv6: Maximum number of routes reached,"
" consider increasing route/max_size.\n");
@@ -2065,15 +2087,13 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_flags |= RTF_ANYCAST;
else
rt->rt6i_flags |= RTF_LOCAL;
- neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
- if (IS_ERR(neigh)) {
+ err = rt6_bind_neighbour(rt, rt->dst.dev);
+ if (err) {
dst_free(&rt->dst);
-
- return ERR_CAST(neigh);
+ return ERR_PTR(err);
}
- dst_set_neighbour(&rt->dst, neigh);
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
@@ -2091,7 +2111,7 @@ int ip6_route_get_saddr(struct net *net,
struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
int err = 0;
if (rt->rt6i_prefsrc.plen)
- ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
+ *saddr = rt->rt6i_prefsrc.addr;
else
err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
daddr, prefs, saddr);
@@ -2111,7 +2131,7 @@ static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
struct net *net = ((struct arg_dev_net_ip *)arg)->net;
struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
- if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
+ if (((void *)rt->dst.dev == dev || !dev) &&
rt != net->ipv6.ip6_null_entry &&
ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
/* remove prefsrc entry */
@@ -2141,11 +2161,10 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
const struct arg_dev_net *adn = arg;
const struct net_device *dev = adn->dev;
- if ((rt->rt6i_dev == dev || dev == NULL) &&
- rt != adn->net->ipv6.ip6_null_entry) {
- RT6_TRACE("deleted by ifdown %p\n", rt);
+ if ((rt->dst.dev == dev || !dev) &&
+ rt != adn->net->ipv6.ip6_null_entry)
return -1;
- }
+
return 0;
}
@@ -2178,7 +2197,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
*/
idev = __in6_dev_get(arg->dev);
- if (idev == NULL)
+ if (!idev)
return 0;
/* For administrative MTU increase, there is no way to discover
@@ -2195,7 +2214,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
also have the lowest MTU, TOO BIG MESSAGE will be lead to
PMTU discouvery.
*/
- if (rt->rt6i_dev == arg->dev &&
+ if (rt->dst.dev == arg->dev &&
!dst_metric_locked(&rt->dst, RTAX_MTU) &&
(dst_mtu(&rt->dst) >= arg->mtu ||
(dst_mtu(&rt->dst) < arg->mtu &&
@@ -2344,11 +2363,13 @@ static int rt6_fill_node(struct net *net,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
+ const struct inet_peer *peer;
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
u32 table;
struct neighbour *n;
+ u32 ts, tsage;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2358,7 +2379,7 @@ static int rt6_fill_node(struct net *net,
}
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
- if (nlh == NULL)
+ if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
@@ -2372,25 +2393,25 @@ static int rt6_fill_node(struct net *net,
table = RT6_TABLE_UNSPEC;
rtm->rtm_table = table;
NLA_PUT_U32(skb, RTA_TABLE, table);
- if (rt->rt6i_flags&RTF_REJECT)
+ if (rt->rt6i_flags & RTF_REJECT)
rtm->rtm_type = RTN_UNREACHABLE;
- else if (rt->rt6i_flags&RTF_LOCAL)
+ else if (rt->rt6i_flags & RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
- else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
+ else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
- if (rt->rt6i_flags&RTF_DYNAMIC)
+ if (rt->rt6i_flags & RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
else if (rt->rt6i_flags & RTF_ADDRCONF)
rtm->rtm_protocol = RTPROT_KERNEL;
- else if (rt->rt6i_flags&RTF_DEFAULT)
+ else if (rt->rt6i_flags & RTF_DEFAULT)
rtm->rtm_protocol = RTPROT_RA;
- if (rt->rt6i_flags&RTF_CACHE)
+ if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
if (dst) {
@@ -2430,7 +2451,7 @@ static int rt6_fill_node(struct net *net,
if (rt->rt6i_prefsrc.plen) {
struct in6_addr saddr_buf;
- ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
+ saddr_buf = rt->rt6i_prefsrc.addr;
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
@@ -2438,24 +2459,31 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
rcu_read_lock();
- n = dst_get_neighbour(&rt->dst);
+ n = dst_get_neighbour_noref(&rt->dst);
if (n)
NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
rcu_read_unlock();
if (rt->dst.dev)
- NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
+ NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
if (!(rt->rt6i_flags & RTF_EXPIRES))
expires = 0;
- else if (rt->rt6i_expires - jiffies < INT_MAX)
- expires = rt->rt6i_expires - jiffies;
+ else if (rt->dst.expires - jiffies < INT_MAX)
+ expires = rt->dst.expires - jiffies;
else
expires = INT_MAX;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+ peer = rt->rt6i_peer;
+ ts = tsage = 0;
+ if (peer && peer->tcp_ts_stamp) {
+ ts = peer->tcp_ts;
+ tsage = get_seconds() - peer->tcp_ts_stamp;
+ }
+
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
expires, rt->dst.error) < 0)
goto nla_put_failure;
@@ -2504,14 +2532,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
goto errout;
- ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
+ fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
}
if (tb[RTA_DST]) {
if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
goto errout;
- ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
+ fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
}
if (tb[RTA_IIF])
@@ -2530,7 +2558,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (skb == NULL) {
+ if (!skb) {
err = -ENOBUFS;
goto errout;
}
@@ -2565,10 +2593,10 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
int err;
err = -ENOBUFS;
- seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
+ seq = info->nlh ? info->nlh->nlmsg_seq : 0;
skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
- if (skb == NULL)
+ if (!skb)
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
@@ -2635,7 +2663,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
rcu_read_lock();
- n = dst_get_neighbour(&rt->dst);
+ n = dst_get_neighbour_noref(&rt->dst);
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
@@ -2645,14 +2673,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
seq_printf(m, " %08x %08x %08x %08x %8s\n",
rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
rt->dst.__use, rt->rt6i_flags,
- rt->rt6i_dev ? rt->rt6i_dev->name : "");
+ rt->dst.dev ? rt->dst.dev->name : "");
return 0;
}
static int ipv6_route_show(struct seq_file *m, void *v)
{
struct net *net = (struct net *)m->private;
- fib6_clean_all(net, rt6_info_route, 0, m);
+ fib6_clean_all_ro(net, rt6_info_route, 0, m);
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a18602a04..3b6dac956bb 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -91,7 +91,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
{
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip6_tunnel_link(sitn, nt);
@@ -680,7 +682,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct neighbour *neigh = NULL;
if (skb_dst(skb))
- neigh = dst_get_neighbour(skb_dst(skb));
+ neigh = dst_get_neighbour_noref(skb_dst(skb));
if (neigh == NULL) {
if (net_ratelimit())
@@ -705,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct neighbour *neigh = NULL;
if (skb_dst(skb))
- neigh = dst_get_neighbour(skb_dst(skb));
+ neigh = dst_get_neighbour_noref(skb_dst(skb));
if (neigh == NULL) {
if (net_ratelimit())
@@ -914,7 +916,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
#ifdef CONFIG_IPV6_SIT_6RD
} else {
- ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
+ ip6rd.prefix = t->ip6rd.prefix;
ip6rd.relay_prefix = t->ip6rd.relay_prefix;
ip6rd.prefixlen = t->ip6rd.prefixlen;
ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
@@ -1082,7 +1084,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if (relay_prefix != ip6rd.relay_prefix)
goto done;
- ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
+ t->ip6rd.prefix = prefix;
t->ip6rd.relay_prefix = relay_prefix;
t->ip6rd.prefixlen = ip6rd.prefixlen;
t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
+ struct ip_tunnel *t;
int err;
sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
if ((err = register_netdev(sitn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(sitn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5a0d6648bbb..8e951d8d3b8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->mss = mss;
ireq->rmt_port = th->source;
ireq->loc_port = th->dest;
- ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+ ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq6->loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 36131d122a6..906c7ca4354 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -62,6 +62,7 @@
#include <net/netdma.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
#include <asm/uaccess.h>
@@ -153,7 +154,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -195,7 +196,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->write_seq = 0;
}
- ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+ np->daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -244,9 +245,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr,
- (saddr ? saddr : &np->saddr));
+ fl6.daddr = np->daddr;
+ fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
@@ -264,11 +264,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- ipv6_addr_copy(&np->rcv_saddr, saddr);
+ np->rcv_saddr = *saddr;
}
/* set the source address */
- ipv6_addr_copy(&np->saddr, saddr);
+ np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
@@ -398,8 +398,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
@@ -489,8 +489,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
- ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+ fl6.daddr = treq->rmt_addr;
+ fl6.saddr = treq->loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = treq->iif;
fl6.flowi6_mark = sk->sk_mark;
@@ -512,7 +512,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+ fl6.daddr = treq->rmt_addr;
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -617,8 +617,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
tp->md5sig_info->alloced6++;
}
- ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
- peer);
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
@@ -750,8 +749,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
bp = &hp->md5_blk.ip6;
/* 1. TCP pseudo-header (RFC2460) */
- ipv6_addr_copy(&bp->saddr, saddr);
- ipv6_addr_copy(&bp->daddr, daddr);
+ bp->saddr = *saddr;
+ bp->daddr = *daddr;
bp->protocol = cpu_to_be32(IPPROTO_TCP);
bp->len = cpu_to_be32(nbytes);
@@ -1039,8 +1038,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
#endif
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->saddr;
+ fl6.saddr = ipv6_hdr(skb)->daddr;
buff->ip_summed = CHECKSUM_PARTIAL;
buff->csum = 0;
@@ -1250,11 +1249,18 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb);
treq = inet6_rsk(req);
- ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+ treq->rmt_addr = ipv6_hdr(skb)->saddr;
+ treq->loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
+ treq->iif = sk->sk_bound_dev_if;
+
+ /* So that link locals have meaning */
+ if (!sk->sk_bound_dev_if &&
+ ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ treq->iif = inet6_iif(skb);
+
if (!isn) {
struct inet_peer *peer = NULL;
@@ -1264,12 +1270,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
atomic_inc(&skb->users);
treq->pktopts = skb;
}
- treq->iif = sk->sk_bound_dev_if;
-
- /* So that link locals have meaning */
- if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- treq->iif = inet6_iif(skb);
if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
@@ -1380,7 +1380,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+ newnp->rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1444,9 +1444,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
- ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
- ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+ newnp->daddr = treq->rmt_addr;
+ newnp->saddr = treq->loc_addr;
+ newnp->rcv_saddr = treq->loc_addr;
newsk->sk_bound_dev_if = treq->iif;
/* Now IPv6 options...
@@ -1995,7 +1995,8 @@ static int tcp_v6_init_sock(struct sock *sk)
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
- percpu_counter_inc(&tcp_sockets_allocated);
+ sock_update_memcg(sk);
+ sk_sockets_allocated_inc(sk);
local_bh_enable();
return 0;
@@ -2214,7 +2215,6 @@ struct proto tcpv6_prot = {
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
.orphan_count = &tcp_orphan_count,
- .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
@@ -2228,6 +2228,9 @@ struct proto tcpv6_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+ .proto_cgroup = tcp_proto_cgroup,
+#endif
};
static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 846f4757eb8..4f96b5c6368 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -238,7 +238,7 @@ exact_match:
return result;
}
-static struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
@@ -305,6 +305,7 @@ begin:
rcu_read_unlock();
return result;
}
+EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
@@ -340,7 +341,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +364,10 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +378,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov,len);
+ msg->msg_iov, copied );
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
@@ -417,8 +419,7 @@ try_again:
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
else {
- ipv6_addr_copy(&sin6->sin6_addr,
- &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
@@ -432,7 +433,7 @@ try_again:
datagram_recv_ctl(sk, msg, skb);
}
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
@@ -538,7 +539,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
- if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
+ skb_dst_drop(skb);
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0) {
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
@@ -1113,11 +1116,11 @@ do_udp_sendmsg:
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
- ipv6_addr_copy(&fl6.daddr, daddr);
+ fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1298,7 +1301,8 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
return 0;
}
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 3437d7d4eed..a81ce945075 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->nexthdr = IPPROTO_BEETPH;
}
- ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+ top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+ top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
@@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h = ipv6_hdr(skb);
ip6h->payload_len = htons(skb->len - size);
- ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
- ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
+ ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
+ ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
err = 0;
out:
return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4d6edff0498..261e6e6f487 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
dsfield &= ~INET_ECN_MASK;
ipv6_change_dsfield(top_iph, 0, dsfield);
top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
- ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
+ top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+ top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index faae41737fc..4eeff89c1aa 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
struct sock *sk = skb->sk;
fl6.flowi6_oif = sk->sk_bound_dev_if;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_rxpmtu(sk, &fl6, mtu);
}
@@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
struct sock *sk = skb->sk;
fl6.fl6_dport = inet_sk(sk)->inet_dport;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d879f7efbd1..8ea65e03273 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
- ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
- ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
+ fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+ fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
while (nh + offset + 1 < skb->data ||
pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f2d72b8a3fa..3f2f7c4ab72 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
/* Initialize temporary selector matching only
* to current session. */
- ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
- ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+ *(struct in6_addr *)&sel->daddr = fl6->daddr;
+ *(struct in6_addr *)&sel->saddr = fl6->saddr;
sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
sel->dport_mask = htons(0xffff);
sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c24f25ab67d..bb14c347768 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2558,8 +2558,8 @@ bed:
self->errno = 0;
setup_timer(&self->watchdog, irda_discovery_timeout,
(unsigned long)self);
- self->watchdog.expires = jiffies + (val * HZ/1000);
- add_timer(&(self->watchdog));
+ mod_timer(&self->watchdog,
+ jiffies + msecs_to_jiffies(val));
/* Wait for IR-LMP to call us back */
__wait_event_interruptible(self->query_wait,
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 77911763627..579617cca12 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -67,7 +67,7 @@ static void *ckey;
static void *skey;
/* Module parameters */
-static int eth; /* Use "eth" or "irlan" name for devices */
+static bool eth; /* Use "eth" or "irlan" name for devices */
static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
#ifdef CONFIG_PROC_FS
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 32e3bb02611..5c93f2952b0 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1461,14 +1461,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
}
/* Allocate a new instance */
- new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
- /* Dup */
- memcpy(new, orig, sizeof(struct tsap_cb));
spin_lock_init(&new->lock);
/* We don't need the old instance any more */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 274d150320c..d5c5b8fd1d0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -130,6 +130,17 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
memcpy(&dst[8], src, 8);
}
+static void iucv_skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ if (skb->dev)
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
+}
+
static int afiucv_pm_prepare(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
@@ -164,10 +175,9 @@ static int afiucv_pm_freeze(struct device *dev)
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, node, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
- skb_queue_purge(&iucv->send_skb_q);
+ iucv_skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
switch (sk->sk_state) {
- case IUCV_SEVERED:
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CONNECTED:
@@ -212,7 +222,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
sk->sk_state_change(sk);
break;
case IUCV_DISCONN:
- case IUCV_SEVERED:
case IUCV_CLOSING:
case IUCV_LISTEN:
case IUCV_BOUND:
@@ -366,9 +375,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
- rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
- rcu_read_unlock();
+ skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
if (!skb->dev)
return -ENODEV;
if (!(skb->dev->flags & IFF_UP))
@@ -388,6 +395,7 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
err = dev_queue_xmit(skb);
if (err) {
skb_unlink(nskb, &iucv->send_skb_q);
+ dev_put(nskb->dev);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
@@ -396,25 +404,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
return err;
}
-/* Timers */
-static void iucv_sock_timeout(unsigned long arg)
-{
- struct sock *sk = (struct sock *)arg;
-
- bh_lock_sock(sk);
- sk->sk_err = ETIMEDOUT;
- sk->sk_state_change(sk);
- bh_unlock_sock(sk);
-
- iucv_sock_kill(sk);
- sock_put(sk);
-}
-
-static void iucv_sock_clear_timer(struct sock *sk)
-{
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
@@ -467,7 +456,6 @@ static void iucv_sock_close(struct sock *sk)
int err, blen;
struct sk_buff *skb;
- iucv_sock_clear_timer(sk);
lock_sock(sk);
switch (sk->sk_state) {
@@ -481,16 +469,14 @@ static void iucv_sock_close(struct sock *sk)
blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
skb = sock_alloc_send_skb(sk, blen, 1, &err);
if (skb) {
- skb_reserve(skb,
- sizeof(struct af_iucv_trans_hdr) +
- ETH_HLEN);
+ skb_reserve(skb, blen);
err = afiucv_hs_send(NULL, sk, skb,
AF_IUCV_FLAG_FIN);
}
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- case IUCV_DISCONN:
+ case IUCV_DISCONN: /* fall through */
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
@@ -520,7 +506,7 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
- skb_queue_purge(&iucv->send_skb_q);
+ iucv_skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
break;
@@ -581,8 +567,6 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
sk->sk_protocol = proto;
sk->sk_state = IUCV_OPEN;
- setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
-
iucv_sock_link(&iucv_sk_list, sk);
return sk;
}
@@ -675,16 +659,12 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == IUCV_CONNECTED ||
- sk->sk_state == IUCV_SEVERED ||
- sk->sk_state == IUCV_DISCONN || /* due to PM restore */
+ sk->sk_state == IUCV_DISCONN ||
!newsock) {
iucv_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
- if (sk->sk_state == IUCV_SEVERED)
- sk->sk_state = IUCV_DISCONN;
-
release_sock(sk);
return sk;
}
@@ -739,7 +719,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
if (!memcmp(dev->perm_addr, uid, 8)) {
memcpy(iucv->src_name, sa->siucv_name, 8);
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
- sock->sk->sk_bound_dev_if = dev->ifindex;
+ sk->sk_bound_dev_if = dev->ifindex;
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_HIPER;
if (!iucv->msglimit)
@@ -774,16 +754,13 @@ done:
static int iucv_sock_autobind(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
- char query_buffer[80];
char name[12];
int err = 0;
- /* Set the userid and name */
- cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
- if (unlikely(err))
+ if (unlikely(!pr_iucv))
return -EPROTO;
- memcpy(iucv->src_user_id, query_buffer, 8);
+ memcpy(iucv->src_user_id, iucv_userid, 8);
write_lock_bh(&iucv_sk_list.lock);
@@ -1225,6 +1202,8 @@ release:
return len;
fail:
+ if (skb->dev)
+ dev_put(skb->dev);
kfree_skb(skb);
out:
release_sock(sk);
@@ -1357,7 +1336,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
int blen;
int err = 0;
- if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue) &&
list_empty(&iucv->message_q.list))
@@ -1441,9 +1420,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
ETH_HLEN;
sskb = sock_alloc_send_skb(sk, blen, 1, &err);
if (sskb) {
- skb_reserve(sskb,
- sizeof(struct af_iucv_trans_hdr)
- + ETH_HLEN);
+ skb_reserve(sskb, blen);
err = afiucv_hs_send(NULL, sk, sskb,
AF_IUCV_FLAG_WIN);
}
@@ -1506,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
if (sk->sk_state == IUCV_CLOSED)
mask |= POLLHUP;
- if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+ if (sk->sk_state == IUCV_DISCONN)
mask |= POLLIN;
if (sock_writeable(sk))
@@ -1533,7 +1510,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
- case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
@@ -1888,10 +1864,7 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
struct sock *sk = path->private;
- if (!list_empty(&iucv_sk(sk)->accept_q))
- sk->sk_state = IUCV_SEVERED;
- else
- sk->sk_state = IUCV_DISCONN;
+ sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
@@ -2051,10 +2024,7 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
/* other end of connection closed */
if (iucv) {
bh_lock_sock(sk);
- if (!list_empty(&iucv->accept_q))
- sk->sk_state = IUCV_SEVERED;
- else
- sk->sk_state = IUCV_DISCONN;
+ sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
}
@@ -2209,6 +2179,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
break;
case 0:
/* plain data frame */
+ memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
+ CB_TRGCLS_LEN);
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
@@ -2259,6 +2231,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
case TX_NOTIFY_OK:
__skb_unlink(this, list);
iucv_sock_wake_msglim(sk);
+ dev_put(this->dev);
kfree_skb(this);
break;
case TX_NOTIFY_PENDING:
@@ -2269,6 +2242,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
atomic_dec(&iucv->pendings);
if (atomic_read(&iucv->pendings) <= 0)
iucv_sock_wake_msglim(sk);
+ dev_put(this->dev);
kfree_skb(this);
break;
case TX_NOTIFY_UNREACHABLE:
@@ -2277,11 +2251,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
case TX_NOTIFY_GENERALERROR:
case TX_NOTIFY_DELAYED_GENERALERROR:
__skb_unlink(this, list);
+ dev_put(this->dev);
kfree_skb(this);
- if (!list_empty(&iucv->accept_q))
- sk->sk_state = IUCV_SEVERED;
- else
- sk->sk_state = IUCV_DISCONN;
+ sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
break;
}
@@ -2291,6 +2263,13 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
}
spin_unlock_irqrestore(&list->lock, flags);
+ if (sk->sk_state == IUCV_CLOSING) {
+ if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+ sk->sk_state = IUCV_CLOSED;
+ sk->sk_state_change(sk);
+ }
+ }
+
out_unlock:
bh_unlock_sock(sk);
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1e733e9073d..11dbb2255cc 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -375,7 +375,7 @@ static int verify_address_len(const void *p)
const struct sadb_address *sp = p;
const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
const struct sockaddr_in *sin;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
const struct sockaddr_in6 *sin6;
#endif
int len;
@@ -387,7 +387,7 @@ static int verify_address_len(const void *p)
sp->sadb_address_prefixlen > 32)
return -EINVAL;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
if (sp->sadb_address_len != len ||
@@ -469,7 +469,7 @@ static int present_and_same_family(const struct sadb_address *src,
if (s_addr->sa_family != d_addr->sa_family)
return 0;
if (s_addr->sa_family != AF_INET
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
&& s_addr->sa_family != AF_INET6
#endif
)
@@ -579,7 +579,7 @@ static inline int pfkey_sockaddr_len(sa_family_t family)
switch (family) {
case AF_INET:
return sizeof(struct sockaddr_in);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return sizeof(struct sockaddr_in6);
#endif
@@ -595,7 +595,7 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
xaddr->a4 =
((struct sockaddr_in *)sa)->sin_addr.s_addr;
return AF_INET;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
memcpy(xaddr->a6,
&((struct sockaddr_in6 *)sa)->sin6_addr,
@@ -639,7 +639,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
case AF_INET:
xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
break;
@@ -705,14 +705,14 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
return 32;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = port;
sin6->sin6_flowinfo = 0;
- ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
+ sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
sin6->sin6_scope_id = 0;
return 128;
}
@@ -1311,7 +1311,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
@@ -3146,7 +3146,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
return NULL;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_IPSEC_POLICY) {
*dir = -EOPNOTSUPP;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index cf0f308abf5..89ff8c67943 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+ skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
inet = inet_sk(sk);
fl = &inet->cork.fl;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index dfd3a648a55..a18e6c3d36e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
copied += used;
len -= used;
+ /* For non stream protcols we get one packet per recvmsg call */
+ if (sk->sk_type != SOCK_STREAM)
+ goto copy_uaddr;
+
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
- /* For non stream protcols we get one packet per recvmsg call */
- if (sk->sk_type != SOCK_STREAM)
- goto copy_uaddr;
-
/* Partial read */
if (used + offset < skb->len)
continue;
@@ -857,6 +857,12 @@ copy_uaddr:
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
+
+ if (!(flags & MSG_PEEK)) {
+ sk_eat_skb(sk, skb, 0);
+ *seq = 0;
+ }
+
goto out;
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 7d3b438755f..96ddb72760b 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -247,15 +247,3 @@ config MAC80211_DEBUG_COUNTERS
and show them in debugfs.
If unsure, say N.
-
-config MAC80211_DRIVER_API_TRACER
- bool "Driver API tracer"
- depends on MAC80211_DEBUG_MENU
- depends on EVENT_TRACING
- help
- Say Y here to make mac80211 register with the ftrace
- framework for the driver API -- you can then see which
- driver methods it is calling and which API functions
- drivers are calling by looking at the trace.
-
- If unsure, say Y.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index fdb54e61d63..d540c3b160f 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -24,7 +24,8 @@ mac80211-y := \
util.o \
wme.o \
event.o \
- chan.o
+ chan.o \
+ driver-trace.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -41,7 +42,6 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mac80211-$(CONFIG_PM) += pm.o
-mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
CFLAGS_driver-trace.o := -I$(src)
# objects for PID algorithm
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 93b24342265..96debba2c40 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -73,8 +73,11 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
- sta->sta.addr, tid);
+ printk(KERN_DEBUG
+ "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
+ sta->sta.addr, tid,
+ initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
+ (int)reason);
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
@@ -85,7 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
/* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT && tx)
ieee80211_send_delba(sta->sdata, sta->sta.addr,
- tid, 0, reason);
+ tid, WLAN_BACK_RECIPIENT, reason);
del_timer_sync(&tid_rx->session_timer);
del_timer_sync(&tid_rx->reorder_timer);
@@ -109,7 +112,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
int i;
rcu_read_lock();
- sta = sta_info_get(sdata, addr);
+ sta = sta_info_get_bss(sdata, addr);
if (!sta) {
rcu_read_unlock();
return;
@@ -177,10 +180,13 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b3f65520e7a..76be6174419 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -55,6 +55,8 @@
* @ampdu_action function will be called with the action
* %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
* and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
+ * Note that the sta can get destroyed before the BA tear down is
+ * complete.
*/
static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -78,10 +80,13 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -102,7 +107,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb(sdata, skb);
+ ieee80211_tx_skb_tid(sdata, skb, tid);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -131,7 +136,7 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
bar->start_seq_num = cpu_to_le16(ssn);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
+ ieee80211_tx_skb_tid(sdata, skb, tid);
}
EXPORT_SYMBOL(ieee80211_send_bar);
@@ -161,6 +166,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return -ENOENT;
}
+ /* if we're already stopping ignore any new requests to stop */
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ spin_unlock_bh(&sta->lock);
+ return -EALREADY;
+ }
+
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -169,6 +180,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return 0;
}
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -176,9 +189,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
del_timer_sync(&tid_tx->addba_resp_timer);
+ del_timer_sync(&tid_tx->session_timer);
/*
* After this packets are no longer handed right through
@@ -187,6 +199,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
+ /*
+ * There might be a few packets being processed right now (on
+ * another CPU) that have already gotten past the aggregation
+ * check when it was still OPERATIONAL and consequently have
+ * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+ * call into the driver at the same time or even before the
+ * TX paths calls into it, which could confuse the driver.
+ *
+ * Wait for all currently running TX paths to finish before
+ * telling the driver. New packets will not go through since
+ * the aggregation session is no longer OPERATIONAL.
+ */
+ synchronize_net();
+
tid_tx->stop_initiator = initiator;
tid_tx->tx_stop = tx;
@@ -283,6 +309,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
__release(agg_queue);
}
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+ unsigned long flags;
+
+ ieee80211_stop_queue_agg(local, tid);
+
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
+ return;
+
+ if (!skb_queue_empty(&tid_tx->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+ ieee80211_wake_queue_agg(local, tid);
+}
+
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
@@ -294,19 +352,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
+ * Start queuing up packets for this aggregation session.
+ * We're going to release them once the driver is OK with
+ * that.
*/
- ieee80211_stop_queue_agg(local, tid);
-
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/*
- * make sure no packets are being processed to get
- * valid starting sequence number
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+ * packets have been flushed out and no packets for this TID
+ * will go into the driver during the ampdu_action call.
*/
synchronize_net();
@@ -320,17 +376,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
" tid %d\n", tid);
#endif
spin_lock_bh(&sta->lock);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
+ ieee80211_agg_splice_finish(local, tid);
spin_unlock_bh(&sta->lock);
- ieee80211_wake_queue_agg(local, tid);
kfree_rcu(tid_tx, rcu_head);
return;
}
- /* we can take packets again now */
- ieee80211_wake_queue_agg(local, tid);
-
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -338,6 +392,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
#endif
spin_lock_bh(&sta->lock);
+ sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
sta->ampdu_mlme.addba_req_num[tid]++;
spin_unlock_bh(&sta->lock);
@@ -348,6 +403,28 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
tid_tx->timeout);
}
+/*
+ * After accepting the AddBA Response we activated a timer,
+ * resetting it after each frame that we send.
+ */
+static void sta_tx_agg_session_timer_expired(unsigned long data)
+{
+ /* not an elegant detour, but there is no choice as the timer passes
+ * only one argument, and various sta_info are needed here, so init
+ * flow in sta_info_create gives the TID as data, while the timer_to_id
+ * array gives the sta through container_of */
+ u8 *ptid = (u8 *)data;
+ u8 *timer_to_id = ptid - *ptid;
+ struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+ timer_to_tid[0]);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
+#endif
+
+ ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+}
+
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
u16 timeout)
{
@@ -372,15 +449,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- /*
- * The aggregation code is not prepared to handle
- * anything but STA/AP due to the BSSID handling.
- * IBSS could work in the code but isn't supported
- * by drivers or the standard.
- */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- sdata->vif.type != NL80211_IFTYPE_AP)
+ sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
return -EINVAL;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
@@ -391,6 +464,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
}
+ /*
+ * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
+ * member of an IBSS, and has no other existing Block Ack agreement
+ * with the recipient STA, then the initiating STA shall transmit a
+ * Probe Request frame to the recipient STA and shall not transmit an
+ * ADDBA Request frame unless it receives a Probe Response frame
+ * from the recipient within dot11ADDBAFailureTimeout.
+ *
+ * The probe request mechanism for ADDBA is currently not implemented,
+ * but we only build up Block Ack session with HT STAs. This information
+ * is set when we receive a bss info from a probe response or a beacon.
+ */
+ if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+ !sta->sta.ht_cap.ht_supported) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
+ "does not advertise HT support\n", pubsta->addr);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+ return -EINVAL;
+ }
+
spin_lock_bh(&sta->lock);
/* we have tried too many times, receiver does not want A-MPDU */
@@ -399,6 +493,24 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
goto err_unlock_sta;
}
+ /*
+ * if we have tried more than HT_AGG_BURST_RETRIES times we
+ * will spread our requests in time to avoid stalling connection
+ * for too long
+ */
+ if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
+ time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
+ HT_AGG_RETRIES_PERIOD)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "BA request denied - "
+ "waiting a grace period after %d failed requests "
+ "on tid %u\n",
+ sta->ampdu_mlme.addba_req_num[tid], tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ret = -EBUSY;
+ goto err_unlock_sta;
+ }
+
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/* check if the TID is not in aggregation flow already */
if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
@@ -422,11 +534,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
tid_tx->timeout = timeout;
- /* Tx timer */
+ /* response timer */
tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
init_timer(&tid_tx->addba_resp_timer);
+ /* tx timer */
+ tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
+ tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_tx->session_timer);
+
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
@@ -446,38 +563,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct tid_ampdu_tx *tid_tx, u16 tid)
-{
- int queue = ieee80211_ac_from_tid(tid);
- unsigned long flags;
-
- ieee80211_stop_queue_agg(local, tid);
-
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
- return;
-
- if (!skb_queue_empty(&tid_tx->pending)) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* copy over remaining packets */
- skb_queue_splice_tail_init(&tid_tx->pending,
- &local->pending[queue]);
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
- ieee80211_wake_queue_agg(local, tid);
-}
-
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
@@ -531,7 +616,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
}
mutex_lock(&local->sta_mtx);
- sta = sta_info_get(sdata, ra);
+ sta = sta_info_get_bss(sdata, ra);
if (!sta) {
mutex_unlock(&local->sta_mtx);
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -660,7 +745,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
mutex_lock(&local->sta_mtx);
- sta = sta_info_get(sdata, ra);
+ sta = sta_info_get_bss(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -757,11 +842,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
}
- del_timer(&tid_tx->addba_resp_timer);
+ del_timer_sync(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
+
+ /*
+ * addba_resp_timer may have fired before we got here, and
+ * caused WANT_STOP to be set. If the stop then was already
+ * processed further, STOPPING might be set.
+ */
+ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+ test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG
+ "got addBA resp for tid %d but we already gave up\n",
+ tid);
+#endif
+ goto out;
+ }
+
/*
* IEEE 802.11-2007 7.3.1.14:
* In an ADDBA Response frame, when the Status Code field
@@ -782,6 +883,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0;
+
+ if (tid_tx->timeout)
+ mod_timer(&tid_tx->session_timer,
+ TU_TO_EXP_TIME(tid_tx->timeout));
+
} else {
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
true);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d06c65fa552..850bb96bd68 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -102,6 +102,16 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
return 0;
}
+static int ieee80211_set_noack_map(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 noack_map)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ sdata->noack_map = noack_map;
+ return 0;
+}
+
static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
@@ -345,7 +355,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
STATION_INFO_RX_DROP_MISC |
STATION_INFO_BSS_PARAM |
STATION_INFO_CONNECTED_TIME |
- STATION_INFO_STA_FLAGS;
+ STATION_INFO_STA_FLAGS |
+ STATION_INFO_BEACON_LOSS_COUNT;
do_posix_clock_monotonic_gettime(&uptime);
sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -358,6 +369,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->tx_retries = sta->tx_retry_count;
sinfo->tx_failed = sta->tx_retry_failed;
sinfo->rx_dropped_misc = sta->rx_dropped;
+ sinfo->beacon_loss_count = sta->beacon_loss_count;
if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
(sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
@@ -411,7 +423,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
BIT(NL80211_STA_FLAG_MFP) |
- BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+ BIT(NL80211_STA_FLAG_TDLS_PEER);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
@@ -422,6 +435,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
if (test_sta_flag(sta, WLAN_STA_AUTH))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
}
@@ -488,6 +503,31 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
(params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
}
+static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
+ u8 *resp, size_t resp_len)
+{
+ struct sk_buff *new, *old;
+
+ if (!resp || !resp_len)
+ return -EINVAL;
+
+ old = rtnl_dereference(sdata->u.ap.probe_resp);
+
+ new = dev_alloc_skb(resp_len);
+ if (!new)
+ return -ENOMEM;
+
+ memcpy(skb_put(new, resp_len), resp, resp_len);
+
+ rcu_assign_pointer(sdata->u.ap.probe_resp, new);
+ synchronize_rcu();
+
+ if (old)
+ dev_kfree_skb(old);
+
+ return 0;
+}
+
/*
* This handles both adding a beacon and setting new beacon info
*/
@@ -498,6 +538,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
int new_head_len, new_tail_len;
int size;
int err = -EINVAL;
+ u32 changed = 0;
old = rtnl_dereference(sdata->u.ap.beacon);
@@ -581,11 +622,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
kfree(old);
+ err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+ params->probe_resp_len);
+ if (!err)
+ changed |= BSS_CHANGED_AP_PROBE_RESP;
+
ieee80211_config_ap_ssid(sdata, params);
+ changed |= BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_BEACON |
+ BSS_CHANGED_SSID;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON |
- BSS_CHANGED_SSID);
+ ieee80211_bss_info_change_notify(sdata, changed);
return 0;
}
@@ -594,6 +641,8 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata;
struct beacon_data *old;
+ struct ieee80211_sub_if_data *vlan;
+ int ret;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -601,7 +650,24 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
- return ieee80211_config_beacon(sdata, params);
+ ret = ieee80211_config_beacon(sdata, params);
+ if (ret)
+ return ret;
+
+ /*
+ * Apply control port protocol, this allows us to
+ * not encrypt dynamic WEP control frames.
+ */
+ sdata->control_port_protocol = params->crypto.control_port_ethertype;
+ sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
+ vlan->control_port_protocol =
+ params->crypto.control_port_ethertype;
+ vlan->control_port_no_encrypt =
+ params->crypto.control_port_no_encrypt;
+ }
+
+ return 0;
}
static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -682,10 +748,11 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
netif_rx_ni(skb);
}
-static void sta_apply_parameters(struct ieee80211_local *local,
- struct sta_info *sta,
- struct station_parameters *params)
+static int sta_apply_parameters(struct ieee80211_local *local,
+ struct sta_info *sta,
+ struct station_parameters *params)
{
+ int ret = 0;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
@@ -697,13 +764,59 @@ static void sta_apply_parameters(struct ieee80211_local *local,
mask = params->sta_flags_mask;
set = params->sta_flags_set;
+ /*
+ * In mesh mode, we can clear AUTHENTICATED flag but must
+ * also make ASSOCIATED follow appropriately for the driver
+ * API. See also below, after AUTHORIZED changes.
+ */
+ if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+ /* cfg80211 should not allow this in non-mesh modes */
+ if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+ return -EINVAL;
+
+ if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED) &&
+ !test_sta_flag(sta, WLAN_STA_AUTH)) {
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_AUTH);
+ if (ret)
+ return ret;
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_ASSOC);
+ if (ret)
+ return ret;
+ }
+ }
+
if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_AUTHORIZED);
else
- clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_ASSOC);
+ if (ret)
+ return ret;
+ }
+
+ if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
+ /* cfg80211 should not allow this in non-mesh modes */
+ if (WARN_ON(!ieee80211_vif_is_mesh(&sdata->vif)))
+ return -EINVAL;
+
+ if (!(set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) &&
+ test_sta_flag(sta, WLAN_STA_AUTH)) {
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_AUTH);
+ if (ret)
+ return ret;
+ ret = sta_info_move_state_checked(sta,
+ IEEE80211_STA_NONE);
+ if (ret)
+ return ret;
+ }
}
+
if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
@@ -728,13 +841,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_MFP);
}
- if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
- if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
- set_sta_flag(sta, WLAN_STA_AUTH);
- else
- clear_sta_flag(sta, WLAN_STA_AUTH);
- }
-
if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
set_sta_flag(sta, WLAN_STA_TDLS_PEER);
@@ -778,7 +884,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
}
if (params->ht_capa)
- ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
params->ht_capa,
&sta->sta.ht_cap);
@@ -806,6 +912,8 @@ static void sta_apply_parameters(struct ieee80211_local *local,
}
#endif
}
+
+ return 0;
}
static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
@@ -832,22 +940,25 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (is_multicast_ether_addr(mac))
return -EINVAL;
- /* Only TDLS-supporting stations can add TDLS peers */
- if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
- !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
- sdata->vif.type == NL80211_IFTYPE_STATION))
- return -ENOTSUPP;
-
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
if (!sta)
return -ENOMEM;
- set_sta_flag(sta, WLAN_STA_AUTH);
- set_sta_flag(sta, WLAN_STA_ASSOC);
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
- sta_apply_parameters(local, sta, params);
+ err = sta_apply_parameters(local, sta, params);
+ if (err) {
+ sta_info_free(local, sta);
+ return err;
+ }
- rate_control_rate_init(sta);
+ /*
+ * for TDLS, rate control should be initialized only when supported
+ * rates are known.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ rate_control_rate_init(sta);
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
@@ -891,19 +1002,19 @@ static int ieee80211_change_station(struct wiphy *wiphy,
struct sta_info *sta;
struct ieee80211_sub_if_data *vlansdata;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mac);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return -ENOENT;
}
- /* The TDLS bit cannot be toggled after the STA was added */
- if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
- !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
- !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
- rcu_read_unlock();
+ /* in station mode, supported rates are only valid with TDLS */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ params->supported_rates &&
+ !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ mutex_unlock(&local->sta_mtx);
return -EINVAL;
}
@@ -912,13 +1023,13 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
vlansdata->vif.type != NL80211_IFTYPE_AP) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return -EINVAL;
}
if (params->vlan->ieee80211_ptr->use_4addr) {
if (vlansdata->u.vlan.sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return -EBUSY;
}
@@ -931,7 +1042,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
sta_apply_parameters(local, sta, params);
- rcu_read_unlock();
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
+ rate_control_rate_init(sta);
+
+ mutex_unlock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
@@ -1123,6 +1237,8 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
{
u8 *new_ie;
const u8 *old_ie;
+ struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
+ struct ieee80211_sub_if_data, u.mesh);
/* allocate information elements */
new_ie = NULL;
@@ -1149,6 +1265,10 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
if (setup->is_secure)
ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
+ /* mcast rate setting in Mesh Node */
+ memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
+ sizeof(setup->mcast_rate));
+
return 0;
}
@@ -1194,6 +1314,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask))
conf->dot11MeshHWMPpreqMinInterval =
nconf->dot11MeshHWMPpreqMinInterval;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask))
+ conf->dot11MeshHWMPperrMinInterval =
+ nconf->dot11MeshHWMPperrMinInterval;
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
mask))
conf->dot11MeshHWMPnetDiameterTraversalTime =
@@ -1394,7 +1517,7 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
(old_oper_type != local->_oper_channel_type))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+ if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
old_vif_oper_type != sdata->vif.bss_conf.channel_type)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
@@ -1917,7 +2040,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie)
+ bool dont_wait_for_ack, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1925,10 +2048,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
struct ieee80211_work *wk;
const struct ieee80211_mgmt *mgmt = (void *)buf;
- u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
+ u32 flags;
bool is_offchan = false;
+ if (dont_wait_for_ack)
+ flags = IEEE80211_TX_CTL_NO_ACK;
+ else
+ flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
/* Check that we are on the requested channel for transmission */
if (chan != local->tmp_channel &&
chan != local->oper_channel)
@@ -2488,6 +2616,82 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_qos_hdr *nullfunc;
+ struct sk_buff *skb;
+ int size = sizeof(*nullfunc);
+ __le16 fc;
+ bool qos;
+ struct ieee80211_tx_info *info;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ if (sta) {
+ qos = test_sta_flag(sta, WLAN_STA_WME);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ if (qos) {
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ } else {
+ size -= 2;
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->dev = dev;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (void *) skb_put(skb, size);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+ nullfunc->seq_ctrl = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_INTFL_NL80211_FRAME_TX;
+
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb->priority = 7;
+ if (qos)
+ nullfunc->qos_ctrl = cpu_to_le16(7);
+
+ local_bh_disable();
+ ieee80211_xmit(sdata, skb);
+ local_bh_enable();
+
+ *cookie = (unsigned long) skb;
+ return 0;
+}
+
+static struct ieee80211_channel *
+ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ return local->oper_channel;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2553,4 +2757,7 @@ struct cfg80211_ops mac80211_config_ops = {
.set_rekey_data = ieee80211_set_rekey_data,
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
+ .probe_client = ieee80211_probe_client,
+ .get_channel = ieee80211_wiphy_get_channel,
+ .set_noack_map = ieee80211_set_noack_map,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 883996b2f99..90baea53e7c 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -97,40 +97,6 @@ static const struct file_operations reset_ops = {
.llseek = noop_llseek,
};
-static ssize_t noack_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
-
- return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
- local->wifi_wme_noack_test);
-}
-
-static ssize_t noack_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- char buf[10];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
-
- return count;
-}
-
-static const struct file_operations noack_ops = {
- .read = noack_read,
- .write = noack_write,
- .open = mac80211_open_file_generic,
- .llseek = default_llseek,
-};
-
static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -190,7 +156,7 @@ static ssize_t uapsd_max_sp_len_write(struct file *file,
return -EFAULT;
buf[len] = '\0';
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret)
return -EINVAL;
@@ -398,7 +364,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
- DEBUGFS_ADD(noack);
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
DEBUGFS_ADD(channel_type);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9352819a986..176c08ffb13 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -321,6 +321,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
__IEEE80211_IF_FILE_W(tkip_mic_test);
/* AP attributes */
+IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -405,6 +406,8 @@ IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
+ u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
@@ -456,6 +459,7 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
+ DEBUGFS_ADD(num_sta_authorized);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
@@ -534,6 +538,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
+ MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c5f341798c1..2406b3e7393 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
int res = scnprintf(buf, sizeof(buf),
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
TEST(PS_DRIVER), TEST(AUTHORIZED),
- TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+ TEST(SHORT_PREAMBLE),
TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
- PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
- "3839 bytes");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 5f165d7eb2d..e8960ae3986 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,11 +5,34 @@
#include "ieee80211_i.h"
#include "driver-trace.h"
+static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
+{
+ WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+}
+
+static inline struct ieee80211_sub_if_data *
+get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+{
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+ u.ap);
+
+ return sdata;
+}
+
static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
{
local->ops->tx(&local->hw, skb);
}
+static inline void drv_tx_frags(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *skbs)
+{
+ local->ops->tx_frags(&local->hw, vif, sta, skbs);
+}
+
static inline int drv_start(struct ieee80211_local *local)
{
int ret;
@@ -69,15 +92,23 @@ static inline int drv_resume(struct ieee80211_local *local)
#endif
static inline int drv_add_interface(struct ieee80211_local *local,
- struct ieee80211_vif *vif)
+ struct ieee80211_sub_if_data *sdata)
{
int ret;
might_sleep();
- trace_drv_add_interface(local, vif_to_sdata(vif));
- ret = local->ops->add_interface(&local->hw, vif);
+ if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR))
+ return -EINVAL;
+
+ trace_drv_add_interface(local, sdata);
+ ret = local->ops->add_interface(&local->hw, &sdata->vif);
trace_drv_return_int(local, ret);
+
+ if (ret == 0)
+ sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
return ret;
}
@@ -89,6 +120,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_change_interface(local, sdata, type, p2p);
ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
trace_drv_return_int(local, ret);
@@ -96,12 +129,15 @@ static inline int drv_change_interface(struct ieee80211_local *local,
}
static inline void drv_remove_interface(struct ieee80211_local *local,
- struct ieee80211_vif *vif)
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_remove_interface(local, vif_to_sdata(vif));
- local->ops->remove_interface(&local->hw, vif);
+ check_sdata_in_driver(sdata);
+
+ trace_drv_remove_interface(local, sdata);
+ local->ops->remove_interface(&local->hw, &sdata->vif);
+ sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
trace_drv_return_void(local);
}
@@ -124,6 +160,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_bss_info_changed(local, sdata, info, changed);
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
@@ -139,6 +177,8 @@ static inline int drv_tx_sync(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_tx_sync(local, sdata, bssid, type);
if (local->ops->tx_sync)
ret = local->ops->tx_sync(&local->hw, &sdata->vif,
@@ -154,6 +194,8 @@ static inline void drv_finish_tx_sync(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_finish_tx_sync(local, sdata, bssid, type);
if (local->ops->finish_tx_sync)
local->ops->finish_tx_sync(&local->hw, &sdata->vif,
@@ -211,6 +253,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
trace_drv_return_int(local, ret);
@@ -228,6 +272,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (sta)
ista = &sta->sta;
+ check_sdata_in_driver(sdata);
+
trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
@@ -243,6 +289,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_hw_scan(local, sdata);
ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
trace_drv_return_int(local, ret);
@@ -254,6 +302,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_cancel_hw_scan(local, sdata);
local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
trace_drv_return_void(local);
@@ -269,6 +319,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sched_scan_start(local, sdata);
ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
req, ies);
@@ -281,6 +333,8 @@ static inline void drv_sched_scan_stop(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sched_scan_stop(local, sdata);
local->ops->sched_scan_stop(&local->hw, &sdata->vif);
trace_drv_return_void(local);
@@ -377,6 +431,9 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_notify(local, sdata, cmd, sta);
if (local->ops->sta_notify)
local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
@@ -391,6 +448,9 @@ static inline int drv_sta_add(struct ieee80211_local *local,
might_sleep();
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_add(local, sdata, sta);
if (local->ops->sta_add)
ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
@@ -406,6 +466,9 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
{
might_sleep();
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_remove(local, sdata, sta);
if (local->ops->sta_remove)
local->ops->sta_remove(&local->hw, &sdata->vif, sta);
@@ -421,6 +484,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_conf_tx(local, sdata, queue, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, &sdata->vif,
@@ -436,6 +501,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw, &sdata->vif);
@@ -449,6 +516,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
@@ -460,6 +529,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw, &sdata->vif);
@@ -489,6 +560,9 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
might_sleep();
+ sdata = get_bss_sdata(sdata);
+ check_sdata_in_driver(sdata);
+
trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
if (local->ops->ampdu_action)
@@ -644,6 +718,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_bitrate_mask(local, sdata, mask);
if (local->ops->set_bitrate_mask)
ret = local->ops->set_bitrate_mask(&local->hw,
@@ -657,6 +733,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_gtk_rekey_data *data)
{
+ check_sdata_in_driver(sdata);
+
trace_drv_set_rekey_data(local, sdata, data);
if (local->ops->set_rekey_data)
local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 2af4fca5533..6e9df8fd8fb 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -5,17 +5,6 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
-#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(...)
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(evt_class, name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f0fb737efa8..f25fff7607d 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,7 +19,84 @@
#include "ieee80211_i.h"
#include "rate.h"
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
+{
+ const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
+ !(sdata->u.mgd.ht_capa.cap_info & flg))
+ return true;
+ return false;
+}
+
+static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ u16 flag)
+{
+ __le16 le_flag = cpu_to_le16(flag);
+ if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
+ if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+ ht_cap->cap &= ~flag;
+ }
+}
+
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
+ u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+ int i;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION) {
+ /* AP interfaces call this code when adding new stations,
+ * so just silently ignore non station interfaces.
+ */
+ return;
+ }
+
+ /* NOTE: If you add more over-rides here, update register_hw
+ * ht_capa_mod_msk logic in main.c as well.
+ * And, if this method can ever change ht_cap.ht_supported, fix
+ * the check in ieee80211_add_ht_ie.
+ */
+
+ /* check for HT over-rides, MCS rates first. */
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ u8 m = smask[i];
+ ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */
+ /* Add back rates that are supported */
+ ht_cap->mcs.rx_mask[i] |= (m & scaps[i]);
+ }
+
+ /* Force removal of HT-40 capabilities? */
+ __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+
+ /* Allow user to disable the max-AMSDU bit. */
+ __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+
+ /* Allow user to decrease AMPDU factor */
+ if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_FACTOR) {
+ u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
+ & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ if (n < ht_cap->ampdu_factor)
+ ht_cap->ampdu_factor = n;
+ }
+
+ /* Allow the user to increase AMPDU density. */
+ if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_DENSITY) {
+ u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_DENSITY)
+ >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
+ if (n > ht_cap->ampdu_density)
+ ht_cap->ampdu_density = n;
+ }
+}
+
+
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
struct ieee80211_sta_ht_cap *ht_cap)
{
@@ -103,6 +180,12 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
/* handle MCS rate 32 too */
if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
ht_cap->mcs.rx_mask[32/8] |= 1;
+
+ /*
+ * If user has specified capability over-rides, take care
+ * of that here.
+ */
+ ieee80211_apply_htcap_overrides(sdata, ht_cap);
}
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx)
@@ -196,10 +279,13 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -214,7 +300,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.delba.params = cpu_to_le16(params);
mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
- ieee80211_tx_skb(sdata, skb);
+ ieee80211_tx_skb_tid(sdata, skb, tid);
}
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ede9a8b341a..f8a32bf9821 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -77,6 +77,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *bss;
u32 bss_change;
u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
+ enum nl80211_channel_type channel_type;
lockdep_assert_held(&ifibss->mtx);
@@ -97,6 +98,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.bss_conf.ibss_joined) {
sdata->vif.bss_conf.ibss_joined = false;
+ netif_carrier_off(sdata->dev);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
}
@@ -104,8 +106,16 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
- local->oper_channel = chan;
- WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
+ channel_type = ifibss->channel_type;
+ if (channel_type > NL80211_CHAN_HT20 &&
+ !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+ channel_type = NL80211_CHAN_HT20;
+ if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+ /* can only fail due to HT40+/- mismatch */
+ channel_type = NL80211_CHAN_HT20;
+ WARN_ON(!ieee80211_set_channel_type(local, sdata,
+ NL80211_CHAN_HT20));
+ }
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
sband = local->hw.wiphy->bands[chan->band];
@@ -171,6 +181,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(skb_put(skb, ifibss->ie_len),
ifibss->ie, ifibss->ie_len);
+ /* add HT capability and information IEs */
+ if (channel_type && sband->ht_cap.ht_supported) {
+ pos = skb_put(skb, 4 +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee80211_ht_info));
+ pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
+ pos = ieee80211_ie_build_ht_info(pos,
+ &sband->ht_cap,
+ chan,
+ channel_type);
+ }
+
if (local->hw.queues >= 4) {
pos = skb_put(skb, 9);
*pos++ = WLAN_EID_VENDOR_SPECIFIC;
@@ -194,6 +217,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss_change |= BSS_CHANGED_BEACON;
bss_change |= BSS_CHANGED_BEACON_ENABLED;
bss_change |= BSS_CHANGED_BASIC_RATES;
+ bss_change |= BSS_CHANGED_HT;
bss_change |= BSS_CHANGED_IBSS;
sdata->vif.bss_conf.ibss_joined = true;
ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -207,6 +231,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
mgmt, skb->len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
+ netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
}
@@ -250,6 +275,80 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
cbss->tsf);
}
+static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
+ __acquires(RCU)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u8 addr[ETH_ALEN];
+
+ memcpy(addr, sta->sta.addr, ETH_ALEN);
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ wiphy_debug(sdata->local->hw.wiphy,
+ "Adding new IBSS station %pM (dev=%s)\n",
+ addr, sdata->name);
+#endif
+
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+ rate_control_rate_init(sta);
+
+ /* If it fails, maybe we raced another insertion? */
+ if (sta_info_insert_rcu(sta))
+ return sta_info_get(sdata, addr);
+ return sta;
+}
+
+static struct sta_info *
+ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *addr,
+ u32 supp_rates)
+ __acquires(RCU)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ int band = local->hw.conf.channel->band;
+
+ /*
+ * XXX: Consider removing the least recently used entry and
+ * allow new one to be added.
+ */
+ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
+ sdata->name, addr);
+ rcu_read_lock();
+ return NULL;
+ }
+
+ if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) {
+ rcu_read_lock();
+ return NULL;
+ }
+
+ if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+ rcu_read_lock();
+ return NULL;
+ }
+
+ sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
+ if (!sta) {
+ rcu_read_lock();
+ return NULL;
+ }
+
+ sta->last_rx = jiffies;
+
+ /* make sure mandatory rates are always added */
+ sta->sta.supp_rates[band] = supp_rates |
+ ieee80211_mandatory_rates(local, band);
+
+ return ieee80211_ibss_finish_sta(sta);
+}
+
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len,
@@ -266,6 +365,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
u64 beacon_timestamp, rx_timestamp;
u32 supp_rates = 0;
enum ieee80211_band band = rx_status->band;
+ struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ bool rates_updated = false;
if (elems->ds_params && elems->ds_params_len == 1)
freq = ieee80211_channel_to_frequency(elems->ds_params[0],
@@ -305,17 +406,51 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
prev_rates,
sta->sta.supp_rates[band]);
#endif
- rate_control_rate_init(sta);
+ rates_updated = true;
}
- } else
+ } else {
+ rcu_read_unlock();
sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
- mgmt->sa, supp_rates,
- GFP_ATOMIC);
+ mgmt->sa, supp_rates);
+ }
}
if (sta && elems->wmm_info)
set_sta_flag(sta, WLAN_STA_WME);
+ if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+ sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
+ /* we both use HT */
+ struct ieee80211_sta_ht_cap sta_ht_cap_new;
+ enum nl80211_channel_type channel_type =
+ ieee80211_ht_info_to_channel_type(
+ elems->ht_info_elem);
+
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+ elems->ht_cap_elem,
+ &sta_ht_cap_new);
+
+ /*
+ * fall back to HT20 if we don't use or use
+ * the other extension channel
+ */
+ if ((channel_type == NL80211_CHAN_HT40MINUS ||
+ channel_type == NL80211_CHAN_HT40PLUS) &&
+ channel_type != sdata->u.ibss.channel_type)
+ sta_ht_cap_new.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new,
+ sizeof(sta_ht_cap_new))) {
+ memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
+ sizeof(sta_ht_cap_new));
+ rates_updated = true;
+ }
+ }
+
+ if (sta && rates_updated)
+ rate_control_rate_init(sta);
+
rcu_read_unlock();
}
@@ -404,21 +539,17 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_join_ibss(sdata, bss);
supp_rates = ieee80211_sta_get_rates(local, elems, band);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
- supp_rates, GFP_KERNEL);
+ supp_rates);
+ rcu_read_unlock();
}
put_bss:
ieee80211_rx_bss_put(local, bss);
}
-/*
- * Add a new IBSS station, will also be called by the RX code when,
- * in IBSS mode, receiving a frame from a yet-unknown station, hence
- * must be callable in atomic context.
- */
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 *addr, u32 supp_rates,
- gfp_t gfp)
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *addr,
+ u32 supp_rates)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
@@ -433,37 +564,29 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
if (net_ratelimit())
printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
sdata->name, addr);
- return NULL;
+ return;
}
if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
- return NULL;
+ return;
if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
- return NULL;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
- addr, sdata->name);
-#endif
+ return;
- sta = sta_info_alloc(sdata, addr, gfp);
+ sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
if (!sta)
- return NULL;
+ return;
sta->last_rx = jiffies;
- set_sta_flag(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
sta->sta.supp_rates[band] = supp_rates |
ieee80211_mandatory_rates(local, band);
- rate_control_rate_init(sta);
-
- /* If it fails, maybe we raced another insertion? */
- if (sta_info_insert(sta))
- return sta_info_get(sdata, addr);
- return sta;
+ spin_lock(&ifibss->incomplete_lock);
+ list_add(&sta->list, &ifibss->incomplete_stations);
+ spin_unlock(&ifibss->incomplete_lock);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,6 +925,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct sta_info *sta;
mutex_lock(&ifibss->mtx);
@@ -813,6 +937,19 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
if (!ifibss->ssid_len)
goto out;
+ spin_lock_bh(&ifibss->incomplete_lock);
+ while (!list_empty(&ifibss->incomplete_stations)) {
+ sta = list_first_entry(&ifibss->incomplete_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
+ ieee80211_ibss_finish_sta(sta);
+ rcu_read_unlock();
+ spin_lock_bh(&ifibss->incomplete_lock);
+ }
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
switch (ifibss->state) {
case IEEE80211_IBSS_MLME_SEARCH:
ieee80211_sta_find_ibss(sdata);
@@ -871,6 +1008,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
setup_timer(&ifibss->timer, ieee80211_ibss_timer,
(unsigned long) sdata);
mutex_init(&ifibss->mtx);
+ INIT_LIST_HEAD(&ifibss->incomplete_stations);
+ spin_lock_init(&ifibss->incomplete_lock);
}
/* scan finished notification */
@@ -894,12 +1033,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
struct sk_buff *skb;
+ u32 changed = 0;
skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
- 36 /* bitrates */ +
- 34 /* SSID */ +
- 3 /* DS params */ +
- 4 /* IBSS params */ +
+ sizeof(struct ieee80211_hdr_3addr) +
+ 12 /* struct ieee80211_mgmt.u.beacon */ +
+ 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ +
+ 2 + 8 /* max Supported Rates */ +
+ 3 /* max DS params */ +
+ 4 /* IBSS params */ +
+ 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+ 2 + sizeof(struct ieee80211_ht_cap) +
+ 2 + sizeof(struct ieee80211_ht_info) +
params->ie_len);
if (!skb)
return -ENOMEM;
@@ -920,13 +1065,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
sdata->u.ibss.channel = params->channel;
+ sdata->u.ibss.channel_type = params->channel_type;
sdata->u.ibss.fixed_channel = params->channel_fixed;
/* fix ourselves to that channel now already */
if (params->channel_fixed) {
sdata->local->oper_channel = params->channel;
- WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
- NL80211_CHAN_NO_HT));
+ if (!ieee80211_set_channel_type(sdata->local, sdata,
+ params->channel_type)) {
+ mutex_unlock(&sdata->u.ibss.mtx);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
}
if (params->ie) {
@@ -949,6 +1099,23 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(sdata->local);
mutex_unlock(&sdata->local->mtx);
+ /*
+ * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
+ * reserved, but an HT STA shall protect HT transmissions as though
+ * the HT Protection field were set to non-HT mixed mode.
+ *
+ * In an IBSS, the RIFS Mode field of the HT Operation element is
+ * also reserved, but an HT STA shall operate as though this field
+ * were set to 1.
+ */
+
+ sdata->vif.bss_conf.ht_operation_mode |=
+ IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
+ | IEEE80211_HT_PARAM_RIFS_MODE;
+
+ changed |= BSS_CHANGED_HT;
+ ieee80211_bss_info_change_notify(sdata, changed);
+
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
return 0;
@@ -962,6 +1129,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
u16 capability;
int active_ibss;
+ struct sta_info *sta;
mutex_lock(&sdata->u.ibss.mtx);
@@ -991,6 +1159,20 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
sta_info_flush(sdata->local, sdata);
+ spin_lock_bh(&ifibss->incomplete_lock);
+ while (!list_empty(&ifibss->incomplete_stations)) {
+ sta = list_first_entry(&ifibss->incomplete_stations,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
+ sta_info_free(local, sta);
+ spin_lock_bh(&ifibss->incomplete_lock);
+ }
+ spin_unlock_bh(&ifibss->incomplete_lock);
+
+ netif_carrier_off(sdata->dev);
+
/* remove beacon */
kfree(sdata->u.ibss.ie);
skb = rcu_dereference_protected(sdata->u.ibss.presp,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ea10a51babd..2f0642d9e15 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
#include <linux/leds.h>
+#include <linux/idr.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -141,6 +142,7 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
struct ieee80211_tx_data {
struct sk_buff *skb;
+ struct sk_buff_head skbs;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
@@ -184,12 +186,15 @@ enum ieee80211_packet_rx_flags {
* enum ieee80211_rx_flags - RX data flags
*
* @IEEE80211_RX_CMNTR: received on cooked monitor already
+ * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
+ * to cfg80211_report_obss_beacon().
*
* These flags are used across handling multiple interfaces
* for a single frame.
*/
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
+ IEEE80211_RX_BEACON_REPORTED = BIT(1),
};
struct ieee80211_rx_data {
@@ -228,6 +233,7 @@ struct beacon_data {
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
+ struct sk_buff __rcu *probe_resp;
struct list_head vlans;
@@ -237,6 +243,7 @@ struct ieee80211_if_ap {
u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
struct sk_buff_head ps_bc_buf;
atomic_t num_sta_ps; /* number of stations in PS mode */
+ atomic_t num_sta_authorized; /* number of authorized stations */
int dtim_count;
bool dtim_bc_mc;
};
@@ -443,6 +450,9 @@ struct ieee80211_if_managed {
*/
int rssi_min_thold, rssi_max_thold;
int last_ave_beacon_signal;
+
+ struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
};
struct ieee80211_if_ibss {
@@ -465,12 +475,16 @@ struct ieee80211_if_ibss {
u8 ssid_len, ie_len;
u8 *ie;
struct ieee80211_channel *channel;
+ enum nl80211_channel_type channel_type;
unsigned long ibss_join_req;
/* probe response/beacon for IBSS */
struct sk_buff __rcu *presp;
struct sk_buff *skb;
+ spinlock_t incomplete_lock;
+ struct list_head incomplete_stations;
+
enum {
IEEE80211_IBSS_MLME_SEARCH,
IEEE80211_IBSS_MLME_JOINED,
@@ -505,7 +519,9 @@ struct ieee80211_if_mesh {
atomic_t mpaths;
/* Timestamp of last SN update */
unsigned long last_sn_update;
- /* Timestamp of last SN sent */
+ /* Time when it's ok to send next PERR */
+ unsigned long next_perr;
+ /* Timestamp of last PREQ sent */
unsigned long last_preq;
struct mesh_rmc *rmc;
spinlock_t mesh_preq_queue_lock;
@@ -543,6 +559,7 @@ struct ieee80211_if_mesh {
* associated stations and deliver multicast frames both
* back to wireless media and to the local net stack.
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
+ * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
@@ -550,6 +567,7 @@ enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
+ IEEE80211_SDATA_IN_DRIVER = BIT(5),
};
/**
@@ -600,6 +618,9 @@ struct ieee80211_sub_if_data {
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
unsigned int fragment_next;
+ /* TID bitmap for NoAck policy */
+ u16 noack_map;
+
struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
struct ieee80211_key __rcu *default_multicast_key;
@@ -722,17 +743,16 @@ enum {
* operating channel
* @SCAN_SET_CHANNEL: Set the next channel to be scanned
* @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
- * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
- * about us leaving the channel and stop all associated STA interfaces
- * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
- * AP about us being back and restart all associated STA interfaces
+ * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
+ * send out data
+ * @SCAN_RESUME: Resume the scan and scan the next channel
*/
enum mac80211_scan_state {
SCAN_DECISION,
SCAN_SET_CHANNEL,
SCAN_SEND_PROBE,
- SCAN_LEAVE_OPER_CHANNEL,
- SCAN_ENTER_OPER_CHANNEL,
+ SCAN_SUSPEND,
+ SCAN_RESUME,
};
struct ieee80211_local {
@@ -835,18 +855,15 @@ struct ieee80211_local {
/* Station data */
/*
- * The mutex only protects the list and counter,
- * reads are done in RCU.
- * Additionally, the lock protects the hash table,
- * the pending list and each BSS's TIM bitmap.
+ * The mutex only protects the list, hash table and
+ * counter, reads are done with RCU.
*/
struct mutex sta_mtx;
- spinlock_t sta_lock;
+ spinlock_t tim_lock;
unsigned long num_sta;
- struct list_head sta_list, sta_pending_list;
+ struct list_head sta_list;
struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
- struct work_struct sta_finish_work;
int sta_generation;
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -951,7 +968,6 @@ struct ieee80211_local {
int total_ps_buffered; /* total number of all buffered unicast and
* multicast packets for power saving stations
*/
- int wifi_wme_noack_test;
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
/*
@@ -1012,6 +1028,9 @@ struct ieee80211_local {
u32 hw_roc_cookie;
bool hw_roc_for_tx;
+ struct idr ack_status_frames;
+ spinlock_t ack_status_lock;
+
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1030,6 +1049,69 @@ struct ieee80211_ra_tid {
u16 tid;
};
+/* Parsed Information Elements */
+struct ieee802_11_elems {
+ u8 *ie_start;
+ size_t total_len;
+
+ /* pointers to IEs */
+ u8 *ssid;
+ u8 *supp_rates;
+ u8 *fh_params;
+ u8 *ds_params;
+ u8 *cf_params;
+ struct ieee80211_tim_ie *tim;
+ u8 *ibss_params;
+ u8 *challenge;
+ u8 *wpa;
+ u8 *rsn;
+ u8 *erp_info;
+ u8 *ext_supp_rates;
+ u8 *wmm_info;
+ u8 *wmm_param;
+ struct ieee80211_ht_cap *ht_cap_elem;
+ struct ieee80211_ht_info *ht_info_elem;
+ struct ieee80211_meshconf_ie *mesh_config;
+ u8 *mesh_id;
+ u8 *peering;
+ u8 *preq;
+ u8 *prep;
+ u8 *perr;
+ struct ieee80211_rann_ie *rann;
+ u8 *ch_switch_elem;
+ u8 *country_elem;
+ u8 *pwr_constr_elem;
+ u8 *quiet_elem; /* first quite element */
+ u8 *timeout_int;
+
+ /* length of them, respectively */
+ u8 ssid_len;
+ u8 supp_rates_len;
+ u8 fh_params_len;
+ u8 ds_params_len;
+ u8 cf_params_len;
+ u8 tim_len;
+ u8 ibss_params_len;
+ u8 challenge_len;
+ u8 wpa_len;
+ u8 rsn_len;
+ u8 erp_info_len;
+ u8 ext_supp_rates_len;
+ u8 wmm_info_len;
+ u8 wmm_param_len;
+ u8 mesh_id_len;
+ u8 peering_len;
+ u8 preq_len;
+ u8 prep_len;
+ u8 perr_len;
+ u8 ch_switch_elem_len;
+ u8 country_elem_len;
+ u8 pwr_constr_elem_len;
+ u8 quiet_elem_len;
+ u8 num_of_quiet_elem; /* can be more the one */
+ u8 timeout_int_len;
+};
+
static inline struct ieee80211_local *hw_to_local(
struct ieee80211_hw *hw)
{
@@ -1090,9 +1172,8 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 *addr, u32 supp_rates,
- gfp_t gfp);
+void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *addr, u32 supp_rates);
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params);
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -1140,13 +1221,9 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
/* off-channel helpers */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
- bool tell_ap);
void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing,
bool offchannel_ps_disable);
void ieee80211_hw_roc_setup(struct ieee80211_local *local);
@@ -1179,7 +1256,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
/* HT */
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
struct ieee80211_sta_ht_cap *ht_cap);
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
@@ -1266,7 +1347,16 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int tid);
+static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+ ieee80211_tx_skb_tid(sdata, skb, 7);
+}
+
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems);
u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
@@ -1334,6 +1424,12 @@ void ieee80211_recalc_smps(struct ieee80211_local *local);
size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids, size_t offset);
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+ u16 cap);
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type);
/* internal work items */
void ieee80211_work_init(struct ieee80211_local *local);
@@ -1362,6 +1458,8 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
bool ieee80211_set_channel_type(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum nl80211_channel_type chantype);
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 30d73552e9a..e47768cb8cb 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -188,11 +188,22 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
return -ENOLINK;
break;
- case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_AP_VLAN: {
+ struct ieee80211_sub_if_data *master;
+
if (!sdata->bss)
return -ENOLINK;
+
list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+
+ master = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ sdata->control_port_protocol =
+ master->control_port_protocol;
+ sdata->control_port_no_encrypt =
+ master->control_port_no_encrypt;
break;
+ }
case NL80211_IFTYPE_AP:
sdata->bss = &sdata->u.ap;
break;
@@ -265,7 +276,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
break;
default:
if (coming_up) {
- res = drv_add_interface(local, &sdata->vif);
+ res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
}
@@ -282,10 +293,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
+
+ /*
+ * set default queue parameters so drivers don't
+ * need to initialise the hardware if the hardware
+ * doesn't start up with sane defaults
+ */
+ ieee80211_set_wmm_default(sdata);
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -299,8 +318,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_del_interface;
}
- /* no atomic bitop required since STA is not live yet */
- set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
res = sta_info_insert(sta);
if (res) {
@@ -329,15 +349,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags) {
+ if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
- /*
- * set default queue parameters so drivers don't
- * need to initialise the hardware if the hardware
- * doesn't start up with sane defaults
- */
- ieee80211_set_wmm_default(sdata);
- }
ieee80211_recalc_ps(local, -1);
@@ -345,7 +358,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
return 0;
err_del_interface:
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
err_stop:
if (!local->open_count)
drv_stop(local);
@@ -450,15 +463,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon =
rtnl_dereference(sdata->u.ap.beacon);
+ struct sk_buff *old_probe_resp =
+ rtnl_dereference(sdata->u.ap.probe_resp);
/* sdata_running will return false, so this will disable */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- /* remove beacon */
+ /* remove beacon and probe response */
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
synchronize_rcu();
kfree(old_beacon);
+ kfree_skb(old_probe_resp);
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -520,7 +537,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_free_keys(sdata);
if (going_down)
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
}
sdata->bss = NULL;
@@ -656,7 +673,6 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_radiotap_header *rtap = (void *)skb->data;
- u8 *p;
if (local->hw.queues < 4)
return 0;
@@ -667,19 +683,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
- if (!ieee80211_is_data(hdr->frame_control)) {
- skb->priority = 7;
- return ieee802_1d_to_ac[skb->priority];
- }
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
- skb->priority = 0;
- return ieee802_1d_to_ac[skb->priority];
- }
-
- p = ieee80211_get_qos_ctl(hdr);
- skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
-
- return ieee80211_downgrade_queue(local, skb);
+ return ieee80211_select_queue_80211(local, skb, hdr);
}
static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -850,6 +854,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
sdata->control_port_no_encrypt = false;
+ sdata->noack_map = 0;
+
/* only monitor differs */
sdata->dev->type = ARPHRD_ETHER;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index fb02ea52d2c..87a89741432 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -134,9 +134,13 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
+ WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
+
return 0;
}
@@ -179,7 +183,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sdata = key->sdata;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d999bf3b84e..0a0d94ad9b0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
- if (local->monitors || local->scanning)
+ if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning))
new_flags |= FIF_BCN_PRBRESP_PROMISC;
if (local->fif_probe_req || local->probe_req_reg)
@@ -92,50 +92,9 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
ieee80211_configure_filter(local);
}
-/*
- * Returns true if we are logically configured to be on
- * the operating channel AND the hardware-conf is currently
- * configured on the operating channel. Compares channel-type
- * as well.
- */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
-{
- struct ieee80211_channel *chan, *scan_chan;
- enum nl80211_channel_type channel_type;
-
- /* This logic needs to match logic in ieee80211_hw_config */
- if (local->scan_channel) {
- chan = local->scan_channel;
- /* If scanning on oper channel, use whatever channel-type
- * is currently in use.
- */
- if (chan == local->oper_channel)
- channel_type = local->_oper_channel_type;
- else
- channel_type = NL80211_CHAN_NO_HT;
- } else if (local->tmp_channel) {
- chan = scan_chan = local->tmp_channel;
- channel_type = local->tmp_channel_type;
- } else {
- chan = local->oper_channel;
- channel_type = local->_oper_channel_type;
- }
-
- if (chan != local->oper_channel ||
- channel_type != local->_oper_channel_type)
- return false;
-
- /* Check current hardware-config against oper_channel. */
- if ((local->oper_channel != local->hw.conf.channel) ||
- (local->_oper_channel_type != local->hw.conf.channel_type))
- return false;
-
- return true;
-}
-
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
- struct ieee80211_channel *chan, *scan_chan;
+ struct ieee80211_channel *chan;
int ret = 0;
int power;
enum nl80211_channel_type channel_type;
@@ -143,14 +102,12 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- scan_chan = local->scan_channel;
-
/* If this off-channel logic ever changes, ieee80211_on_oper_channel
* may need to change as well.
*/
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (scan_chan) {
- chan = scan_chan;
+ if (local->scan_channel) {
+ chan = local->scan_channel;
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
@@ -159,7 +116,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
- chan = scan_chan = local->tmp_channel;
+ chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
@@ -193,8 +150,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- if ((local->scanning & SCAN_SW_SCANNING) ||
- (local->scanning & SCAN_HW_SCANNING))
+ if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_HW_SCANNING, &local->scanning))
power = chan->max_power;
else
power = local->power_constr_level ?
@@ -436,9 +393,6 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
bss_conf = &sdata->vif.bss_conf;
- if (!ieee80211_sdata_running(sdata))
- return NOTIFY_DONE;
-
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
@@ -467,7 +421,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
}
bss_conf->arp_addr_cnt = c;
- /* Configure driver only if associated */
+ /* Configure driver only if associated (which also implies it is up) */
if (ifmgd->associated) {
bss_conf->arp_filter_enabled = sdata->arp_filter_state;
ieee80211_bss_info_change_notify(sdata,
@@ -560,6 +514,19 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
},
};
+static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
+ .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR |
+ IEEE80211_HT_AMPDU_PARM_DENSITY,
+
+ .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ IEEE80211_HT_CAP_SGI_40),
+ .mcs = {
+ .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, },
+ },
+};
+
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
@@ -595,7 +562,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
- WIPHY_FLAG_4ADDR_STATION;
+ WIPHY_FLAG_4ADDR_STATION |
+ WIPHY_FLAG_REPORTS_OBSS |
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
+ NL80211_FEATURE_HT_IBSS;
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -608,7 +581,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
- BUG_ON(!ops->tx);
+ BUG_ON(!ops->tx && !ops->tx_frags);
BUG_ON(!ops->start);
BUG_ON(!ops->stop);
BUG_ON(!ops->config);
@@ -628,6 +601,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->user_power_level = -1;
local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
+ wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
INIT_LIST_HEAD(&local->interfaces);
@@ -670,6 +644,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
+ spin_lock_init(&local->ack_status_lock);
+ idr_init(&local->ack_status_frames);
+ /* preallocate at least one entry */
+ idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
+
sta_info_init(local);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -757,6 +736,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->int_scan_req)
return -ENOMEM;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ local->int_scan_req->rates[band] = (u32) -1;
+ }
+
/* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
@@ -1045,6 +1030,13 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_unregister_hw);
+static int ieee80211_free_ack_frame(int id, void *p, void *data)
+{
+ WARN_ONCE(1, "Have pending ack frames!\n");
+ kfree_skb(p);
+ return 0;
+}
+
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -1055,6 +1047,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
if (local->wiphy_ciphers_allocated)
kfree(local->hw.wiphy->cipher_suites);
+ idr_for_each(&local->ack_status_frames,
+ ieee80211_free_ack_frame, NULL);
+ idr_destroy(&local->ack_status_frames);
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a7078fdba8c..c707c8bf6d2 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -76,6 +76,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
/*
* As support for each feature is added, check for matching
@@ -87,15 +88,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
* - MDA enabled
* - Power management control on fc
*/
- if (ifmsh->mesh_id_len == ie->mesh_id_len &&
- memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
- (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
- (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
- (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
- (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
- (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
- return true;
-
+ if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
+ memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
+ (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+ (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
+ (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
+ (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
+ (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
+ goto mismatch;
+
+ /* disallow peering with mismatched channel types for now */
+ if (ie->ht_info_elem &&
+ (local->_oper_channel_type !=
+ ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+ goto mismatch;
+
+ return true;
+mismatch:
return false;
}
@@ -341,6 +350,49 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
return 0;
}
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u8 *pos;
+
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
+ if (!sband->ht_cap.ht_supported ||
+ local->_oper_channel_type == NL80211_CHAN_NO_HT)
+ return 0;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap);
+
+ return 0;
+}
+
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_channel *channel = local->oper_channel;
+ enum nl80211_channel_type channel_type = local->_oper_channel_type;
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[channel->band];
+ struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+ u8 *pos;
+
+ if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+ return 0;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
+ ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+
+ return 0;
+}
static void ieee80211_mesh_path_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
@@ -697,6 +749,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
+ ifmsh->next_perr = jiffies;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8c00e2d1d63..bd14bd26a2b 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -31,6 +31,8 @@
* @MESH_PATH_FIXED: the mesh path has been manually set and should not be
* modified
* @MESH_PATH_RESOLVED: the mesh path can has been resolved
+ * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
+ * already queued up, waiting for the discovery process to start.
*
* MESH_PATH_RESOLVED is used by the mesh path timer to
* decide when to stop or cancel the mesh path discovery.
@@ -41,6 +43,7 @@ enum mesh_path_flags {
MESH_PATH_SN_VALID = BIT(2),
MESH_PATH_FIXED = BIT(3),
MESH_PATH_RESOLVED = BIT(4),
+ MESH_PATH_REQ_QUEUED = BIT(5),
};
/**
@@ -212,6 +215,10 @@ int mesh_add_vendor_ies(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
int mesh_add_ds_params_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
@@ -226,6 +233,8 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
/* Mesh paths */
int mesh_nexthop_lookup(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_nexthop_resolve(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
struct mesh_path *mesh_path_lookup(u8 *dst,
struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 174040a4288..73abb7524b2 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -113,20 +113,20 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
- int ie_len;
+ u8 *pos, ie_len;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+ sizeof(mgmt->u.action.u.mesh_action);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + 37); /* max HWMP IE */
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -240,20 +240,24 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct sk_buff *skb;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
- int ie_len;
+ u8 *pos, ie_len;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+ sizeof(mgmt->u.action.u.mesh_action);
+ if (time_before(jiffies, ifmsh->next_perr))
+ return -EAGAIN;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -290,6 +294,8 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
/* see note in function header */
prepare_frame_for_deferred_tx(sdata, skb);
+ ifmsh->next_perr = TU_TO_EXP_TIME(
+ ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
ieee80211_add_pending_skb(local, skb);
return 0;
}
@@ -393,15 +399,13 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
orig_metric = PREQ_IE_METRIC(hwmp_ie);
break;
case MPATH_PREP:
- /* Originator here refers to the MP that was the destination in
- * the Path Request. The draft refers to that MP as the
- * destination address, even though usually it is the origin of
- * the PREP frame. We divert from the nomenclature in the draft
+ /* Originator here refers to the MP that was the target in the
+ * Path Request. We divert from the nomenclature in the draft
* so that we can easily use a single function to gather path
* information from both PREQ and PREP frames.
*/
- orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
- orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
+ orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
+ orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
break;
@@ -562,9 +566,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
mhwmp_dbg("replying to the PREQ");
- mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
- cpu_to_le32(target_sn), 0, orig_addr,
- cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
+ mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
+ cpu_to_le32(orig_sn), 0, target_addr,
+ cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
} else
@@ -618,14 +622,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
- /* Note that we divert from the draft nomenclature and denominate
- * destination to what the draft refers to as origininator. So in this
- * function destnation refers to the final destination of the PREP,
- * which corresponds with the originator of the PREQ which this PREP
- * replies
- */
- target_addr = PREP_IE_TARGET_ADDR(prep_elem);
- if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
/* destination, no forwarding required */
return;
@@ -636,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
}
rcu_read_lock();
- mpath = mesh_path_lookup(target_addr, sdata);
+ mpath = mesh_path_lookup(orig_addr, sdata);
if (mpath)
spin_lock_bh(&mpath->state_lock);
else
@@ -651,7 +649,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
flags = PREP_IE_FLAGS(prep_elem);
lifetime = PREP_IE_LIFETIME(prep_elem);
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
- orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+ target_addr = PREP_IE_TARGET_ADDR(prep_elem);
target_sn = PREP_IE_TARGET_SN(prep_elem);
orig_sn = PREP_IE_ORIG_SN(prep_elem);
@@ -867,9 +865,20 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
return;
}
+ spin_lock(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_REQ_QUEUED) {
+ spin_unlock(&mpath->state_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ kfree(preq_node);
+ return;
+ }
+
memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
preq_node->flags = flags;
+ mpath->flags |= MESH_PATH_REQ_QUEUED;
+ spin_unlock(&mpath->state_lock);
+
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +930,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
if (preq_node->flags & PREQ_Q_F_START) {
if (mpath->flags & MESH_PATH_RESOLVING) {
spin_unlock_bh(&mpath->state_lock);
@@ -972,71 +982,97 @@ enddiscovery:
kfree(preq_node);
}
-/**
- * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
+/* mesh_nexthop_resolve - lookup next hop for given skb and start path
+ * discovery if no forwarding information is found.
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
- * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
- * found, the function will start a path discovery and queue the frame so it is
- * sent when the path is resolved. This means the caller must not free the skb
- * in this case.
+ * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
+ * skb is freeed here if no mpath could be allocated.
*/
-int mesh_nexthop_lookup(struct sk_buff *skb,
- struct ieee80211_sub_if_data *sdata)
+int mesh_nexthop_resolve(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
{
- struct sk_buff *skb_to_free = NULL;
- struct mesh_path *mpath;
- struct sta_info *next_hop;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mesh_path *mpath;
+ struct sk_buff *skb_to_free = NULL;
u8 *target_addr = hdr->addr3;
int err = 0;
rcu_read_lock();
- mpath = mesh_path_lookup(target_addr, sdata);
+ err = mesh_nexthop_lookup(skb, sdata);
+ if (!err)
+ goto endlookup;
+ /* no nexthop found, start resolving */
+ mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
mesh_path_add(target_addr, sdata);
mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
- sdata->u.mesh.mshstats.dropped_frames_no_route++;
+ mesh_path_discard_frame(skb, sdata);
err = -ENOSPC;
goto endlookup;
}
}
- if (mpath->flags & MESH_PATH_ACTIVE) {
- if (time_after(jiffies,
- mpath->exp_time -
- msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
- !(mpath->flags & MESH_PATH_RESOLVING) &&
- !(mpath->flags & MESH_PATH_FIXED)) {
- mesh_queue_preq(mpath,
- PREQ_Q_F_START | PREQ_Q_F_REFRESH);
- }
- next_hop = rcu_dereference(mpath->next_hop);
- if (next_hop)
- memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
- else
- err = -ENOENT;
- } else {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (!(mpath->flags & MESH_PATH_RESOLVING)) {
- /* Start discovery only if it is not running yet */
- mesh_queue_preq(mpath, PREQ_Q_F_START);
- }
+ if (!(mpath->flags & MESH_PATH_RESOLVING))
+ mesh_queue_preq(mpath, PREQ_Q_F_START);
+
+ if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
+ skb_to_free = skb_dequeue(&mpath->frame_queue);
- if (skb_queue_len(&mpath->frame_queue) >=
- MESH_FRAME_QUEUE_LEN)
- skb_to_free = skb_dequeue(&mpath->frame_queue);
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ ieee80211_set_qos_hdr(sdata, skb);
+ skb_queue_tail(&mpath->frame_queue, skb);
+ err = -ENOENT;
+ if (skb_to_free)
+ mesh_path_discard_frame(skb_to_free, sdata);
+
+endlookup:
+ rcu_read_unlock();
+ return err;
+}
+/**
+ * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
+ * this function is considered "using" the associated mpath, so preempt a path
+ * refresh if this mpath expires soon.
+ *
+ * @skb: 802.11 frame to be sent
+ * @sdata: network subif the frame will be sent through
+ *
+ * Returns: 0 if the next hop was found. Nonzero otherwise.
+ */
+int mesh_nexthop_lookup(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct mesh_path *mpath;
+ struct sta_info *next_hop;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u8 *target_addr = hdr->addr3;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ mpath = mesh_path_lookup(target_addr, sdata);
+
+ if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
+ goto endlookup;
+
+ if (time_after(jiffies,
+ mpath->exp_time -
+ msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
+ !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
+ !(mpath->flags & MESH_PATH_RESOLVING) &&
+ !(mpath->flags & MESH_PATH_FIXED))
+ mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- skb_queue_tail(&mpath->frame_queue, skb);
- if (skb_to_free)
- mesh_path_discard_frame(skb_to_free, sdata);
- err = -ENOENT;
+ next_hop = rcu_dereference(mpath->next_hop);
+ if (next_hop) {
+ memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ err = 0;
}
endlookup:
@@ -1061,6 +1097,7 @@ void mesh_path_timer(unsigned long data)
} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7f54c504223..edf167e3b8f 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -69,8 +69,6 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
lockdep_is_held(&pathtbl_resize_lock));
}
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
-
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
@@ -213,7 +211,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
- struct ieee80211_sub_if_data *sdata = mpath->sdata;
rcu_assign_pointer(mpath->next_hop, sta);
@@ -224,8 +221,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
- skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
- ieee80211_set_qos_hdr(sdata, skb);
+ memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
__skb_queue_tail(&tmpq, skb);
}
@@ -269,6 +265,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
memcpy(hdr->addr1, next_hop, ETH_ALEN);
rcu_read_unlock();
+ memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}
@@ -423,21 +420,18 @@ static void mesh_gate_node_reclaim(struct rcu_head *rp)
}
/**
- * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
- * @mesh_tbl: table which contains known_gates list
- * @mpath: mpath to known mesh gate
- *
- * Returns: 0 on success
- *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
*/
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+int mesh_path_add_gate(struct mesh_path *mpath)
{
+ struct mesh_table *tbl;
struct mpath_node *gate, *new_gate;
struct hlist_node *n;
int err;
rcu_read_lock();
- tbl = rcu_dereference(tbl);
+ tbl = rcu_dereference(mesh_paths);
hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
if (gate->mpath == mpath) {
@@ -481,8 +475,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
struct mpath_node *gate;
struct hlist_node *p, *q;
- tbl = rcu_dereference(tbl);
-
hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
if (gate->mpath == mpath) {
spin_lock_bh(&tbl->gates_lock);
@@ -501,16 +493,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
}
/**
- *
- * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
- * @mpath: gate path to add to table
- */
-int mesh_path_add_gate(struct mesh_path *mpath)
-{
- return mesh_gate_add(mesh_paths, mpath);
-}
-
-/**
* mesh_gate_num - number of gates known to this interface
* @sdata: subif data
*/
@@ -991,38 +973,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
- * If the frame was being forwarded from another MP, a PERR frame will be sent
- * to the precursor. The precursor's address (i.e. the previous hop) was saved
- * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
- * the destination is successfully resolved.
- *
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct mesh_path *mpath;
- u32 sn = 0;
- __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
-
- if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
- u8 *ra, *da;
-
- da = hdr->addr3;
- ra = hdr->addr1;
- rcu_read_lock();
- mpath = mesh_path_lookup(da, sdata);
- if (mpath) {
- spin_lock_bh(&mpath->state_lock);
- sn = ++mpath->sn;
- spin_unlock_bh(&mpath->state_lock);
- }
- rcu_read_unlock();
- mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
- cpu_to_le32(sn), reason, ra, sdata);
- }
-
kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7e57f5d07f6..41ef1b47644 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -80,11 +80,15 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
* on it in the lifecycle management section!
*/
static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
- u8 *hw_addr, u32 rates)
+ u8 *hw_addr, u32 rates,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
struct sta_info *sta;
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
+
if (local->num_sta >= MESH_MAX_PLINKS)
return NULL;
@@ -92,10 +96,17 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- set_sta_flag(sta, WLAN_STA_AUTH);
- set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
set_sta_flag(sta, WLAN_STA_WME);
+
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ if (elems->ht_cap_elem)
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+ elems->ht_cap_elem,
+ &sta->sta.ht_cap);
rate_control_rate_init(sta);
return sta;
@@ -153,23 +164,31 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
enum ieee80211_self_protected_actioncode action,
u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
- sdata->u.mesh.ie_len);
+ struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
- int ie_len = 4;
u16 peering_proto = 0;
- u8 *pos;
-
+ u8 *pos, ie_len = 4;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
+ sizeof(mgmt->u.action.u.self_prot);
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + /* capability info */
+ 2 + /* AID */
+ 2 + 8 + /* supported rates */
+ 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+ 2 + sdata->u.mesh.mesh_id_len +
+ 2 + sizeof(struct ieee80211_meshconf_ie) +
+ 2 + sizeof(struct ieee80211_ht_cap) +
+ 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + 8 + /* peering IE */
+ sdata->u.mesh.ie_len);
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
@@ -235,6 +254,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(pos, &reason, 2);
pos += 2;
}
+
+ if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+ if (mesh_add_ht_cap_ie(skb, sdata) ||
+ mesh_add_ht_info_ie(skb, sdata))
+ return -1;
+ }
+
if (mesh_add_vendor_ies(skb, sdata))
return -1;
@@ -261,7 +287,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates,
elems->ie_start, elems->total_len,
GFP_KERNEL);
else
- sta = mesh_plink_alloc(sdata, hw_addr, rates);
+ sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
if (!sta)
return;
if (sta_info_insert_rcu(sta)) {
@@ -552,7 +578,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
- sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
+ sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
if (!sta) {
mpl_dbg("Mesh plink error: plink table full\n");
return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b1b1bb368f7..ecb4c84c1bb 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -209,6 +209,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
channel_type = NL80211_CHAN_HT20;
if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
+ !ieee80111_cfg_override_disables_ht40(sdata) &&
(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
(hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
@@ -818,7 +819,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
}
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
- (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
+ !(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
netif_tx_stop_all_queues(sdata->dev);
if (drv_tx_frames_pending(local))
@@ -1120,6 +1121,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* on the next assoc, re-program HT parameters */
sdata->ht_opmode_valid = false;
+ memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
+ memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
local->power_constr_level = 0;
@@ -1359,9 +1362,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@@ -1370,6 +1370,10 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
NULL, true);
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
}
void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1377,6 +1381,16 @@ void ieee80211_beacon_connection_loss_work(struct work_struct *work)
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.beacon_connection_loss_work);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sta_info *sta;
+
+ if (ifmgd->associated) {
+ rcu_read_lock();
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ if (sta)
+ sta->beacon_loss_count++;
+ rcu_read_unlock();
+ }
if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
__ieee80211_connection_loss(sdata);
@@ -1468,6 +1482,47 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_CFG80211_DISASSOC;
}
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+ u8 *supp_rates, unsigned int supp_rates_len,
+ u32 *rates, u32 *basic_rates,
+ bool *have_higher_than_11mbit,
+ int *min_rate, int *min_rate_index)
+{
+ int i, j;
+
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = (supp_rates[i] & 0x7f) * 5;
+ bool is_basic = !!(supp_rates[i] & 0x80);
+
+ if (rate > 110)
+ *have_higher_than_11mbit = true;
+
+ /*
+ * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009
+ * 7.3.2.2 as a magic value instead of a rate. Hence, skip it.
+ *
+ * Note: Even through the membership selector and the basic
+ * rate flag share the same bit, they are not exactly
+ * the same.
+ */
+ if (!!(supp_rates[i] & 0x80) &&
+ (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+ continue;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].bitrate == rate) {
+ *rates |= BIT(j);
+ if (is_basic)
+ *basic_rates |= BIT(j);
+ if (rate < *min_rate) {
+ *min_rate = rate;
+ *min_rate_index = j;
+ }
+ break;
+ }
+ }
+ }
+}
static bool ieee80211_assoc_success(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -1484,7 +1539,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
u32 changed = 0;
- int i, j, err;
+ int err;
bool have_higher_than_11mbit = false;
u16 ap_ht_cap_flags;
int min_rate = INT_MAX, min_rate_index = -1;
@@ -1532,57 +1587,23 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
return false;
}
- set_sta_flag(sta, WLAN_STA_AUTH);
- set_sta_flag(sta, WLAN_STA_ASSOC);
- set_sta_flag(sta, WLAN_STA_ASSOC_AP);
+ sta_info_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
rates = 0;
basic_rates = 0;
sband = local->hw.wiphy->bands[wk->chan->band];
- for (i = 0; i < elems.supp_rates_len; i++) {
- int rate = (elems.supp_rates[i] & 0x7f) * 5;
- bool is_basic = !!(elems.supp_rates[i] & 0x80);
+ ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
+ &rates, &basic_rates, &have_higher_than_11mbit,
+ &min_rate, &min_rate_index);
- if (rate > 110)
- have_higher_than_11mbit = true;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
- rates |= BIT(j);
- if (is_basic)
- basic_rates |= BIT(j);
- if (rate < min_rate) {
- min_rate = rate;
- min_rate_index = j;
- }
- break;
- }
- }
- }
-
- for (i = 0; i < elems.ext_supp_rates_len; i++) {
- int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
- bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
-
- if (rate > 110)
- have_higher_than_11mbit = true;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
- rates |= BIT(j);
- if (is_basic)
- basic_rates |= BIT(j);
- if (rate < min_rate) {
- min_rate = rate;
- min_rate_index = j;
- }
- break;
- }
- }
- }
+ ieee80211_get_rates(sband, elems.ext_supp_rates,
+ elems.ext_supp_rates_len, &rates, &basic_rates,
+ &have_higher_than_11mbit,
+ &min_rate, &min_rate_index);
/*
* some buggy APs don't advertise basic_rates. use the lowest
@@ -1605,7 +1626,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
- ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems.ht_cap_elem, &sta->sta.ht_cap);
ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -1974,7 +1995,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems.ht_cap_elem, &sta->sta.ht_cap);
ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -2128,9 +2149,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@@ -2138,6 +2156,11 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH, reason,
NULL, true);
+
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+
mutex_lock(&ifmgd->mtx);
}
@@ -2358,6 +2381,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
ifmgd->flags = 0;
+ ifmgd->powersave = sdata->wdev.ps;
mutex_init(&ifmgd->mtx);
@@ -2632,6 +2656,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ if (req->flags & ASSOC_REQ_DISABLE_HT)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+
+ memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(ifmgd->ht_capa_mask));
+
if (req->ie && req->ie_len) {
memcpy(wk->ie, req->ie, req->ie_len);
wk->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 3d414411a96..f054e94901a 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -138,31 +138,16 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
mutex_unlock(&local->iflist_mtx);
}
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
- bool tell_ap)
-{
- struct ieee80211_sub_if_data *sdata;
-
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata, tell_ap);
- }
- mutex_unlock(&local->iflist_mtx);
-}
-
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing,
bool offchannel_ps_disable)
{
struct ieee80211_sub_if_data *sdata;
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
if (!ieee80211_sdata_running(sdata))
continue;
@@ -174,7 +159,6 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
}
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
- clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
/*
* This may wake up queues even though the driver
* currently has them stopped. This is not very
@@ -188,11 +172,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
netif_tx_wake_all_queues(sdata->dev);
}
- /* Check to see if we should re-enable beaconing */
- if (enable_beaconing &&
- (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
}
@@ -212,8 +194,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
return;
}
- ieee80211_recalc_idle(local);
-
if (local->hw_roc_skb) {
sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
ieee80211_tx_skb(sdata, local->hw_roc_skb);
@@ -227,6 +207,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
GFP_KERNEL);
}
+ ieee80211_recalc_idle(local);
+
mutex_unlock(&local->mtx);
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 9ee7164b207..596efaf50e0 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
}
/* stop hardware - this must stop RX */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 58a89554b78..b39dda523f3 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
-calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
- struct minstrel_rate *d, struct ieee80211_rate *rate)
+calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+ struct ieee80211_rate *rate)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
@@ -402,8 +402,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(mi, local, mr,
- &sband->bitrates[i]);
+ calc_rate_durations(local, mr, &sband->bitrates[i]);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index cdb28535716..ff5f7b84e82 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -36,8 +36,17 @@
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define GROUP_IDX(_streams, _sgi, _ht40) \
+ MINSTREL_MAX_STREAMS * 2 * _ht40 + \
+ MINSTREL_MAX_STREAMS * _sgi + \
+ _streams - 1
+
/* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) { \
+#define MCS_GROUP(_streams, _sgi, _ht40) \
+ [GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.flags = \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -58,6 +67,9 @@
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
+ *
+ * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
+ * HT40 -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, 0),
@@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight)
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
- int streams = (rate->idx / MCS_GROUP_RATES) + 1;
- u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
- if (minstrel_mcs_groups[i].streams != streams)
- continue;
- if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
- continue;
-
- return i;
- }
-
- WARN_ON(1);
- return 0;
+ return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
+ !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
+ !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
static inline struct minstrel_rate_stats *
@@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
* Recalculate success probabilities and counters for a rate using EWMA
*/
static void
-minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
{
if (unlikely(mr->attempts > 0)) {
mr->sample_skipped = 0;
@@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr
* the expected number of retransmissions and their expected length
*/
static void
-minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
- int group, int rate)
+minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
{
struct minstrel_rate_stats *mr;
unsigned int usecs;
@@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = &mg->rates[i];
mr->retry_updated = false;
index = MCS_GROUP_RATES * group + i;
- minstrel_calc_rate_ewma(mp, mr);
- minstrel_ht_calc_tp(mp, mi, group, i);
+ minstrel_calc_rate_ewma(mr);
+ minstrel_ht_calc_tp(mi, group, i);
if (!mr->cur_tp)
continue;
@@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
static bool
minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
{
- if (!rate->count)
+ if (rate->idx < 0)
return false;
- if (rate->idx < 0)
+ if (!rate->count)
return false;
return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
}
static void
-minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
- minstrel_aggr_check(mp, sta, skb);
+ minstrel_aggr_check(sta, skb);
}
}
@@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate, int index,
- struct ieee80211_tx_rate_control *txrc,
bool sample, bool rtscts)
{
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@ -628,11 +626,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
if (sample_idx >= 0) {
sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
- txrc, true, false);
+ true, false);
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
- txrc, false, false);
+ false, false);
}
if (mp->hw->max_rates >= 3) {
@@ -643,13 +641,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
*/
if (sample_idx >= 0)
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
- txrc, false, false);
+ false, false);
else
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
- txrc, false, true);
+ false, true);
minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
- txrc, false, !sample);
+ false, !sample);
ar[3].count = 0;
ar[3].idx = -1;
@@ -660,7 +658,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* max_tp_rate -> max_prob_rate by default.
*/
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
- txrc, false, !sample);
+ false, !sample);
ar[2].count = 0;
ar[2].idx = -1;
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index aeda65466f3..502d3ecc4a7 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -318,7 +318,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
rinfo[i].diff = i * pinfo->norm_offset;
}
for (i = 1; i < sband->n_bitrates; i++) {
- s = 0;
+ s = false;
for (j = 0; j < sband->n_bitrates - i; j++)
if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
sband->bitrates[rinfo[j + 1].index].bitrate)) {
@@ -327,7 +327,7 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
rinfo[j + 1].index = tmp;
rinfo[rinfo[j].index].rev_index = j;
rinfo[rinfo[j + 1].index].rev_index = j + 1;
- s = 1;
+ s = true;
}
if (!s)
break;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fb123e2e081..f407427c642 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -28,6 +28,7 @@
#include "wpa.h"
#include "tkip.h"
#include "wme.h"
+#include "rate.h"
/*
* monitor mode reception
@@ -748,10 +749,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct sta_info *sta = rx->sta;
struct tid_ampdu_rx *tid_agg_rx;
u16 sc;
- int tid;
+ u8 tid, ack_policy;
if (!ieee80211_is_data_qos(hdr->frame_control))
goto dont_reorder;
@@ -764,6 +766,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
if (!sta)
goto dont_reorder;
+ ack_policy = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_ACK_POLICY_MASK;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -774,6 +778,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
goto dont_reorder;
+ /* not part of a BA session */
+ if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ goto dont_reorder;
+
+ /* not actually part of this BA session */
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ goto dont_reorder;
+
/* new, potentially un-ordered, ampdu frame - process it */
/* reset session timer */
@@ -858,6 +871,13 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->sdata->control_port_protocol)
return RX_CONTINUE;
}
+
+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+ cfg80211_rx_spurious_frame(rx->sdata->dev,
+ hdr->addr2,
+ GFP_ATOMIC))
+ return RX_DROP_UNUSABLE;
+
return RX_DROP_MONITOR;
}
@@ -1327,15 +1347,20 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
/*
* If we receive a 4-addr nullfunc frame from a STA
- * that was not moved to a 4-addr STA vlan yet, drop
- * the frame to the monitor interface, to make sure
- * that hostapd sees it
+ * that was not moved to a 4-addr STA vlan yet send
+ * the event to userspace and for older hostapd drop
+ * the frame to the monitor interface.
*/
if (ieee80211_has_a4(hdr->frame_control) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
(rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)))
+ !rx->sdata->u.vlan.sta))) {
+ if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
+ cfg80211_rx_unexpected_4addr_frame(
+ rx->sdata->dev, sta->sta.addr,
+ GFP_ATOMIC);
return RX_DROP_MONITOR;
+ }
/*
* Update counter and free packet here to avoid
* counting this as a dropped packed.
@@ -1551,25 +1576,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
-static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
-{
- u8 *data = rx->skb->data;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return RX_CONTINUE;
-
- /* remove the qos control field, update frame type and meta-data */
- memmove(data + IEEE80211_QOS_CTL_LEN, data,
- ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
- hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
- /* change frame type to non QOS */
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
-
- return RX_CONTINUE;
-}
-
static int
ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
{
@@ -1802,7 +1808,12 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
}
if (xmit_skb) {
- /* send to wireless media */
+ /*
+ * Send to wireless media and increase priority by 256 to
+ * keep the received priority instead of reclassifying
+ * the frame (see cfg80211_classify8021d).
+ */
+ xmit_skb->priority += 256;
xmit_skb->protocol = htons(ETH_P_802_3);
skb_reset_network_header(xmit_skb);
skb_reset_mac_header(xmit_skb);
@@ -1871,13 +1882,16 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
static ieee80211_rx_result
ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *fwd_hdr, *hdr;
+ struct ieee80211_tx_info *info;
struct ieee80211s_hdr *mesh_hdr;
- unsigned int hdrlen;
struct sk_buff *skb = rx->skb, *fwd_skb;
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
+ u16 q, hdrlen;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1893,14 +1907,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (!mesh_hdr->ttl)
- /* illegal frame */
- return RX_DROP_MONITOR;
-
- if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
- IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
- dropped_frames_congestion);
return RX_DROP_MONITOR;
- }
if (mesh_hdr->flags & MESH_FLAGS_AE) {
struct mesh_path *mppath;
@@ -1933,60 +1940,50 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
return RX_CONTINUE;
- mesh_hdr->ttl--;
+ q = ieee80211_select_queue_80211(local, skb, hdr);
+ if (ieee80211_queue_stopped(&local->hw, q)) {
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+ return RX_DROP_MONITOR;
+ }
+ skb_set_queue_mapping(skb, q);
- if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
- if (!mesh_hdr->ttl)
- IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
- dropped_frames_ttl);
- else {
- struct ieee80211_hdr *fwd_hdr;
- struct ieee80211_tx_info *info;
-
- fwd_skb = skb_copy(skb, GFP_ATOMIC);
-
- if (!fwd_skb && net_ratelimit())
- printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- sdata->name);
- if (!fwd_skb)
- goto out;
-
- fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
- info = IEEE80211_SKB_CB(fwd_skb);
- memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- info->control.vif = &rx->sdata->vif;
- if (is_multicast_ether_addr(fwd_hdr->addr1)) {
- IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
- fwded_mcast);
- skb_set_queue_mapping(fwd_skb,
- ieee80211_select_queue(sdata, fwd_skb));
- ieee80211_set_qos_hdr(sdata, fwd_skb);
- } else {
- int err;
- /*
- * Save TA to addr1 to send TA a path error if a
- * suitable next hop is not found
- */
- memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
- ETH_ALEN);
- err = mesh_nexthop_lookup(fwd_skb, sdata);
- /* Failed to immediately resolve next hop:
- * fwded frame was dropped or will be added
- * later to the pending skb queue. */
- if (err)
- return RX_DROP_MONITOR;
-
- IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
- fwded_unicast);
- }
- IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
- fwded_frames);
- ieee80211_add_pending_skb(local, fwd_skb);
- }
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ goto out;
+
+ if (!--mesh_hdr->ttl) {
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+ return RX_DROP_MONITOR;
+ }
+
+ fwd_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!fwd_skb) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
+ sdata->name);
+ goto out;
}
+ fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
+ info = IEEE80211_SKB_CB(fwd_skb);
+ memset(info, 0, sizeof(*info));
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.vif = &rx->sdata->vif;
+ info->control.jiffies = jiffies;
+ if (is_multicast_ether_addr(fwd_hdr->addr1)) {
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+ } else {
+ /* unable to resolve next hop */
+ mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
+ 0, reason, fwd_hdr->addr2, sdata);
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+ return RX_DROP_MONITOR;
+ }
+
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+ ieee80211_add_pending_skb(local, fwd_skb);
out:
if (is_multicast_ether_addr(hdr->addr1) ||
sdata->dev->flags & IFF_PROMISC)
@@ -2014,12 +2011,17 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
/*
- * Allow the cooked monitor interface of an AP to see 4-addr frames so
- * that a 4-addr station can be detected and moved into a separate VLAN
+ * Send unexpected-4addr-frame event to hostapd. For older versions,
+ * also drop the frame to cooked monitor interfaces.
*/
if (ieee80211_has_a4(hdr->frame_control) &&
- sdata->vif.type == NL80211_IFTYPE_AP)
+ sdata->vif.type == NL80211_IFTYPE_AP) {
+ if (rx->sta &&
+ !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
+ cfg80211_rx_unexpected_4addr_frame(
+ rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
return RX_DROP_MONITOR;
+ }
err = __ieee80211_data_to_8023(rx, &port_control);
if (unlikely(err))
@@ -2174,6 +2176,18 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
if (!ieee80211_is_mgmt(mgmt->frame_control))
return RX_DROP_MONITOR;
+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+ ieee80211_is_beacon(mgmt->frame_control) &&
+ !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(rx->skb);
+ cfg80211_report_obss_beacon(rx->local->hw.wiphy,
+ rx->skb->data, rx->skb->len,
+ status->freq, GFP_ATOMIC);
+ rx->flags |= IEEE80211_RX_BEACON_REPORTED;
+ }
+
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
@@ -2206,16 +2220,69 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
switch (mgmt->u.action.category) {
+ case WLAN_CATEGORY_HT:
+ /* reject HT action frames from stations not supporting HT */
+ if (!rx->sta->sta.ht_cap.ht_supported)
+ goto invalid;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+ sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ break;
+
+ /* verify action & smps_control are present */
+ if (len < IEEE80211_MIN_ACTION_SIZE + 2)
+ goto invalid;
+
+ switch (mgmt->u.action.u.ht_smps.action) {
+ case WLAN_HT_ACTION_SMPS: {
+ struct ieee80211_supported_band *sband;
+ u8 smps;
+
+ /* convert to HT capability */
+ switch (mgmt->u.action.u.ht_smps.smps_control) {
+ case WLAN_HT_SMPS_CONTROL_DISABLED:
+ smps = WLAN_HT_CAP_SM_PS_DISABLED;
+ break;
+ case WLAN_HT_SMPS_CONTROL_STATIC:
+ smps = WLAN_HT_CAP_SM_PS_STATIC;
+ break;
+ case WLAN_HT_SMPS_CONTROL_DYNAMIC:
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ break;
+ default:
+ goto invalid;
+ }
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ /* if no change do nothing */
+ if ((rx->sta->sta.ht_cap.cap &
+ IEEE80211_HT_CAP_SM_PS) == smps)
+ goto handled;
+
+ rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS;
+ rx->sta->sta.ht_cap.cap |= smps;
+
+ sband = rx->local->hw.wiphy->bands[status->band];
+
+ rate_control_rate_update(local, sband, rx->sta,
+ IEEE80211_RC_SMPS_CHANGED,
+ local->_oper_channel_type);
+ goto handled;
+ }
+ default:
+ goto invalid;
+ }
+
+ break;
case WLAN_CATEGORY_BACK:
- /*
- * The aggregation code is not prepared to handle
- * anything but STA/AP due to the BSSID handling;
- * IBSS could work in the code but isn't supported
- * by drivers or the standard.
- */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- sdata->vif.type != NL80211_IFTYPE_AP)
+ sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
break;
/* verify action_code is present */
@@ -2493,6 +2560,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
goto out_free_skb;
rx->flags |= IEEE80211_RX_CMNTR;
+ /* If there are no cooked monitor interfaces, just free the SKB */
+ if (!local->cooked_mntrs)
+ goto out_free_skb;
+
if (skb_headroom(skb) < sizeof(*rthdr) &&
pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
goto out_free_skb;
@@ -2628,7 +2699,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
- CALL_RXH(ieee80211_rx_h_remove_qos_control)
CALL_RXH(ieee80211_rx_h_amsdu)
CALL_RXH(ieee80211_rx_h_data)
CALL_RXH(ieee80211_rx_h_ctrl);
@@ -2748,8 +2818,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
rate_idx = 0; /* TODO: HT rates */
else
rate_idx = status->rate_idx;
- rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
- hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
+ ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
+ BIT(rate_idx));
}
break;
case NL80211_IFTYPE_MESH_POINT:
@@ -2770,10 +2840,17 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
+ /*
+ * Accept public action frames even when the
+ * BSSID doesn't match, this is used for P2P
+ * and location updates. Note that mac80211
+ * itself never looks at these frames.
+ */
+ if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+ ieee80211_is_public_action(hdr, skb->len))
+ return 1;
if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control) &&
- !(ieee80211_is_action(hdr->frame_control) &&
- sdata->vif.p2p))
+ !ieee80211_is_beacon(hdr->frame_control))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 105436dbb90..9270771702f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -106,7 +106,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
/* save the ERP value so that it is available at association time */
if (elems->erp_info && elems->erp_info_len >= 1) {
bss->erp_value = elems->erp_info[0];
- bss->has_erp_value = 1;
+ bss->has_erp_value = true;
}
if (elems->tim) {
@@ -213,12 +213,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
- /* If we are on-operating-channel, and this packet is for the
- * current channel, pass the pkt on up the stack so that
- * the rest of the stack can make use of it.
- */
- if (ieee80211_cfg_on_oper_channel(sdata->local)
- && (channel == sdata->local->oper_channel))
+ if (channel == sdata->local->oper_channel)
return RX_CONTINUE;
dev_kfree_skb(skb);
@@ -264,8 +259,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
- bool on_oper_chan;
- bool enable_beacons = false;
lockdep_assert_held(&local->mtx);
@@ -298,25 +291,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
local->scanning = 0;
local->scan_channel = NULL;
- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
- if (was_hw_scan || !on_oper_chan)
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- else
- /* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ /* Set power back to normal operating levels. */
+ ieee80211_hw_config(local, 0);
if (!was_hw_scan) {
- bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
- /* We should always be on-channel at this point. */
- WARN_ON(!on_oper_chan2);
- if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
- enable_beacons = true;
-
- ieee80211_offchannel_return(local, enable_beacons, true);
+ ieee80211_offchannel_return(local, true);
}
ieee80211_recalc_idle(local);
@@ -361,11 +342,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- /* We always want to use off-channel PS, even if we
- * are not really leaving oper-channel. Don't
- * tell the AP though, as long as we are on-channel.
- */
- ieee80211_offchannel_enable_all_ps(local, false);
+ ieee80211_offchannel_stop_vifs(local, true);
ieee80211_configure_filter(local);
@@ -373,8 +350,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_hw_config(local, 0);
ieee80211_queue_delayed_work(&local->hw,
- &local->scan_work,
- IEEE80211_CHANNEL_TIME);
+ &local->scan_work, 0);
return 0;
}
@@ -510,96 +486,39 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
next_chan = local->scan_req->channels[local->scan_channel_idx];
- if (ieee80211_cfg_on_oper_channel(local)) {
- /* We're currently on operating channel. */
- if (next_chan == local->oper_channel)
- /* We don't need to move off of operating channel. */
- local->next_scan_state = SCAN_SET_CHANNEL;
- else
- /*
- * We do need to leave operating channel, as next
- * scan is somewhere else.
- */
- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
- } else {
- /*
- * we're currently scanning a different channel, let's
- * see if we can scan another channel without interfering
- * with the current traffic situation.
- *
- * Since we don't know if the AP has pending frames for us
- * we can only check for our tx queues and use the current
- * pm_qos requirements for rx. Hence, if no tx traffic occurs
- * at all we will scan as many channels in a row as the pm_qos
- * latency allows us to. Additionally we also check for the
- * currently negotiated listen interval to prevent losing
- * frames unnecessarily.
- *
- * Otherwise switch back to the operating channel.
- */
-
- bad_latency = time_after(jiffies +
- ieee80211_scan_get_channel_time(next_chan),
- local->leave_oper_channel_time +
- usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
-
- listen_int_exceeded = time_after(jiffies +
- ieee80211_scan_get_channel_time(next_chan),
- local->leave_oper_channel_time +
- usecs_to_jiffies(min_beacon_int * 1024) *
- local->hw.conf.listen_interval);
-
- if (associated && ( !tx_empty || bad_latency ||
- listen_int_exceeded))
- local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
- else
- local->next_scan_state = SCAN_SET_CHANNEL;
- }
-
- *next_delay = 0;
-}
-
-static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
- unsigned long *next_delay)
-{
- /* PS will already be in off-channel mode,
- * we do that once at the beginning of scanning.
- */
- ieee80211_offchannel_stop_vifs(local, false);
-
/*
- * What if the nullfunc frames didn't arrive?
+ * we're currently scanning a different channel, let's
+ * see if we can scan another channel without interfering
+ * with the current traffic situation.
+ *
+ * Since we don't know if the AP has pending frames for us
+ * we can only check for our tx queues and use the current
+ * pm_qos requirements for rx. Hence, if no tx traffic occurs
+ * at all we will scan as many channels in a row as the pm_qos
+ * latency allows us to. Additionally we also check for the
+ * currently negotiated listen interval to prevent losing
+ * frames unnecessarily.
+ *
+ * Otherwise switch back to the operating channel.
*/
- drv_flush(local, false);
- if (local->ops->flush)
- *next_delay = 0;
- else
- *next_delay = HZ / 10;
- /* remember when we left the operating channel */
- local->leave_oper_channel_time = jiffies;
+ bad_latency = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
- /* advance to the next channel to be scanned */
- local->next_scan_state = SCAN_SET_CHANNEL;
-}
-
-static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
- unsigned long *next_delay)
-{
- /* switch back to the operating channel */
- local->scan_channel = NULL;
- if (!ieee80211_cfg_on_oper_channel(local))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ listen_int_exceeded = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(min_beacon_int * 1024) *
+ local->hw.conf.listen_interval);
- /*
- * Re-enable vifs and beaconing. Leave PS
- * in off-channel state..will put that back
- * on-channel at the end of scanning.
- */
- ieee80211_offchannel_return(local, true, false);
+ if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
+ local->next_scan_state = SCAN_SUSPEND;
+ else
+ local->next_scan_state = SCAN_SET_CHANNEL;
- *next_delay = HZ / 5;
- local->next_scan_state = SCAN_DECISION;
+ *next_delay = 0;
}
static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
@@ -613,10 +532,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_channel = chan;
- /* Only call hw-config if we really need to change channels. */
- if (chan != local->hw.conf.channel)
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
+ if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
@@ -673,6 +590,44 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->next_scan_state = SCAN_DECISION;
}
+static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ /* switch back to the operating channel */
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ /*
+ * Re-enable vifs and beaconing. Leave PS
+ * in off-channel state..will put that back
+ * on-channel at the end of scanning.
+ */
+ ieee80211_offchannel_return(local, false);
+
+ *next_delay = HZ / 5;
+ /* afterwards, resume scan & go to next channel */
+ local->next_scan_state = SCAN_RESUME;
+}
+
+static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ /* PS already is in off-channel mode */
+ ieee80211_offchannel_stop_vifs(local, false);
+
+ if (local->ops->flush) {
+ drv_flush(local, false);
+ *next_delay = 0;
+ } else
+ *next_delay = HZ / 10;
+
+ /* remember when we left the operating channel */
+ local->leave_oper_channel_time = jiffies;
+
+ /* advance to the next channel to be scanned */
+ local->next_scan_state = SCAN_SET_CHANNEL;
+}
+
void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -743,11 +698,11 @@ void ieee80211_scan_work(struct work_struct *work)
case SCAN_SEND_PROBE:
ieee80211_scan_state_send_probe(local, &next_delay);
break;
- case SCAN_LEAVE_OPER_CHANNEL:
- ieee80211_scan_state_leave_oper_channel(local, &next_delay);
+ case SCAN_SUSPEND:
+ ieee80211_scan_state_suspend(local, &next_delay);
break;
- case SCAN_ENTER_OPER_CHANNEL:
- ieee80211_scan_state_enter_oper_channel(local, &next_delay);
+ case SCAN_RESUME:
+ ieee80211_scan_state_resume(local, &next_delay);
break;
}
} while (next_delay == 0);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8eaa746ec7a..b197136aea2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -62,14 +62,14 @@
* freed before they are done using it.
*/
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
static int sta_info_hash_del(struct ieee80211_local *local,
struct sta_info *sta)
{
struct sta_info *s;
s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
- lockdep_is_held(&local->sta_lock));
+ lockdep_is_held(&local->sta_mtx));
if (!s)
return -ENOENT;
if (s == sta) {
@@ -81,7 +81,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
while (rcu_access_pointer(s->hnext) &&
rcu_access_pointer(s->hnext) != sta)
s = rcu_dereference_protected(s->hnext,
- lockdep_is_held(&local->sta_lock));
+ lockdep_is_held(&local->sta_mtx));
if (rcu_access_pointer(s->hnext)) {
RCU_INIT_POINTER(s->hnext, sta->hnext);
return 0;
@@ -98,14 +98,12 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata && !sta->dummy &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
return sta;
@@ -119,14 +117,12 @@ struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
return sta;
@@ -143,7 +139,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
if ((sta->sdata == sdata ||
@@ -152,7 +147,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
return sta;
@@ -169,7 +163,6 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
if ((sta->sdata == sdata ||
@@ -177,7 +170,6 @@ struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
return sta;
@@ -204,16 +196,17 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
}
/**
- * __sta_info_free - internal STA free helper
+ * sta_info_free - free STA
*
* @local: pointer to the global information
* @sta: STA info to free
*
* This function must undo everything done by sta_info_alloc()
- * that may happen before sta_info_insert().
+ * that may happen before sta_info_insert(). It may only be
+ * called when sta_info_insert() has not been attempted (and
+ * if that fails, the station is freed anyway.)
*/
-static void __sta_info_free(struct ieee80211_local *local,
- struct sta_info *sta)
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
if (sta->rate_ctrl) {
rate_control_free_sta(sta);
@@ -227,10 +220,11 @@ static void __sta_info_free(struct ieee80211_local *local,
kfree(sta);
}
-/* Caller must hold local->sta_lock */
+/* Caller must hold local->sta_mtx */
static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
+ lockdep_assert_held(&local->sta_mtx);
sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
}
@@ -280,7 +274,7 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
}
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
- u8 *addr, gfp_t gfp)
+ const u8 *addr, gfp_t gfp)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
@@ -338,102 +332,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-static int sta_info_finish_insert(struct sta_info *sta,
- bool async, bool dummy_reinsert)
-{
- struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info sinfo;
- unsigned long flags;
- int err = 0;
-
- lockdep_assert_held(&local->sta_mtx);
-
- if (!sta->dummy || dummy_reinsert) {
- /* notify driver */
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- err = drv_sta_add(local, sdata, &sta->sta);
- if (err) {
- if (!async)
- return err;
- printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
- "driver (%d) - keeping it anyway.\n",
- sdata->name, sta->sta.addr, err);
- } else {
- sta->uploaded = true;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (async)
- wiphy_debug(local->hw.wiphy,
- "Finished adding IBSS STA %pM\n",
- sta->sta.addr);
-#endif
- }
-
- sdata = sta->sdata;
- }
-
- if (!dummy_reinsert) {
- if (!async) {
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
-
- /* make the station visible */
- spin_lock_irqsave(&local->sta_lock, flags);
- sta_info_hash_add(local, sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
- }
-
- list_add(&sta->list, &local->sta_list);
- } else {
- sta->dummy = false;
- }
-
- if (!sta->dummy) {
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
-
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
- }
-
- return 0;
-}
-
-static void sta_info_finish_pending(struct ieee80211_local *local)
-{
- struct sta_info *sta;
- unsigned long flags;
-
- spin_lock_irqsave(&local->sta_lock, flags);
- while (!list_empty(&local->sta_pending_list)) {
- sta = list_first_entry(&local->sta_pending_list,
- struct sta_info, list);
- list_del(&sta->list);
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- sta_info_finish_insert(sta, true, false);
-
- spin_lock_irqsave(&local->sta_lock, flags);
- }
- spin_unlock_irqrestore(&local->sta_lock, flags);
-}
-
-static void sta_info_finish_work(struct work_struct *work)
-{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, sta_finish_work);
-
- mutex_lock(&local->sta_mtx);
- sta_info_finish_pending(local);
- mutex_unlock(&local->sta_mtx);
-}
-
static int sta_info_insert_check(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -453,50 +351,15 @@ static int sta_info_insert_check(struct sta_info *sta)
return 0;
}
-static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
-{
- struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- unsigned long flags;
-
- spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- rcu_read_lock();
- return -EEXIST;
- }
-
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
- sta_info_hash_add(local, sta);
-
- list_add_tail(&sta->list, &local->sta_pending_list);
-
- rcu_read_lock();
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
- sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-
- ieee80211_queue_work(&local->hw, &local->sta_finish_work);
-
- return 0;
-}
-
/*
* should be called with sta_mtx locked
* this function replaces the mutex lock
* with a RCU lock
*/
-static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- unsigned long flags;
struct sta_info *exist_sta;
bool dummy_reinsert = false;
int err = 0;
@@ -504,19 +367,8 @@ static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
lockdep_assert_held(&local->sta_mtx);
/*
- * On first glance, this will look racy, because the code
- * in this function, which inserts a station with sleeping,
- * unlocks the sta_lock between checking existence in the
- * hash table and inserting into it.
- *
- * However, it is not racy against itself because it keeps
- * the mutex locked.
- */
-
- spin_lock_irqsave(&local->sta_lock, flags);
- /*
* check if STA exists already.
- * only accept a scenario of a second call to sta_info_insert_non_ibss
+ * only accept a scenario of a second call to sta_info_insert_finish
* with a dummy station entry that was inserted earlier
* in that case - assume that the dummy station flag should
* be removed.
@@ -526,20 +378,47 @@ static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
if (exist_sta == sta && sta->dummy) {
dummy_reinsert = true;
} else {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- mutex_unlock(&local->sta_mtx);
- rcu_read_lock();
- return -EEXIST;
+ err = -EEXIST;
+ goto out_err;
}
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ if (!sta->dummy || dummy_reinsert) {
+ /* notify driver */
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ goto out_err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+ "driver (%d) - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else
+ sta->uploaded = true;
+ }
- err = sta_info_finish_insert(sta, false, dummy_reinsert);
- if (err) {
- mutex_unlock(&local->sta_mtx);
- rcu_read_lock();
- return err;
+ if (!dummy_reinsert) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+
+ /* make the station visible */
+ sta_info_hash_add(local, sta);
+
+ list_add(&sta->list, &local->sta_list);
+ } else {
+ sta->dummy = false;
+ }
+
+ if (!sta->dummy) {
+ struct station_info sinfo;
+
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -555,54 +434,35 @@ static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
mesh_accept_plinks_update(sdata);
return 0;
+ out_err:
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ return err;
}
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
int err = 0;
+ might_sleep();
+
err = sta_info_insert_check(sta);
if (err) {
rcu_read_lock();
goto out_free;
}
- /*
- * In ad-hoc mode, we sometimes need to insert stations
- * from tasklet context from the RX path. To avoid races,
- * always do so in that case -- see the comment below.
- */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- err = sta_info_insert_ibss(sta);
- if (err)
- goto out_free;
-
- return 0;
- }
-
- /*
- * It might seem that the function called below is in race against
- * the function call above that atomically inserts the station... That,
- * however, is not true because the above code can only
- * be invoked for IBSS interfaces, and the below code will
- * not be -- and the two do not race against each other as
- * the hash table also keys off the interface.
- */
-
- might_sleep();
-
mutex_lock(&local->sta_mtx);
- err = sta_info_insert_non_ibss(sta);
+ err = sta_info_insert_finish(sta);
if (err)
goto out_free;
return 0;
out_free:
BUG_ON(!err);
- __sta_info_free(local, sta);
+ sta_info_free(local, sta);
return err;
}
@@ -629,7 +489,7 @@ int sta_info_reinsert(struct sta_info *sta)
might_sleep();
- err = sta_info_insert_non_ibss(sta);
+ err = sta_info_insert_finish(sta);
rcu_read_unlock();
return err;
}
@@ -716,7 +576,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
}
done:
- spin_lock_irqsave(&local->sta_lock, flags);
+ spin_lock_irqsave(&local->tim_lock, flags);
if (indicate_tim)
__bss_tim_set(bss, sta->sta.aid);
@@ -729,7 +589,7 @@ void sta_info_recalc_tim(struct sta_info *sta)
local->tim_in_locked_section = false;
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ spin_unlock_irqrestore(&local->tim_lock, flags);
}
static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
@@ -853,8 +713,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
{
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- unsigned long flags;
int ret, i, ac;
+ struct tid_ampdu_tx *tid_tx;
might_sleep();
@@ -873,15 +733,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
- spin_lock_irqsave(&local->sta_lock, flags);
ret = sta_info_hash_del(local, sta);
- /* this might still be the pending list ... which is fine */
- if (!ret)
- list_del(&sta->list);
- spin_unlock_irqrestore(&local->sta_lock, flags);
if (ret)
return ret;
+ list_del(&sta->list);
+
mutex_lock(&local->key_mtx);
for (i = 0; i < NUM_DEFAULT_KEYS; i++)
__ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
@@ -908,6 +765,9 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
+ while (sta->sta_state > IEEE80211_STA_NONE)
+ sta_info_move_state(sta, sta->sta_state - 1);
+
if (sta->uploaded) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -953,7 +813,36 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
}
#endif
- __sta_info_free(local, sta);
+ /* There could be some memory leaks because of ampdu tx pending queue
+ * not being freed before destroying the station info.
+ *
+ * Make sure that such queues are purged before freeing the station
+ * info.
+ * TODO: We have to somehow postpone the full destruction
+ * until the aggregation stop completes. Refer
+ * http://thread.gmane.org/gmane.linux.kernel.wireless.general/81936
+ */
+
+ mutex_lock(&sta->ampdu_mlme.mtx);
+
+ for (i = 0; i < STA_TID_NUM; i++) {
+ tid_tx = rcu_dereference_protected_tid_tx(sta, i);
+ if (!tid_tx)
+ continue;
+ if (skb_queue_len(&tid_tx->pending)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ wiphy_debug(local->hw.wiphy, "TX A-MPDU purging %d "
+ "packets for tid=%d\n",
+ skb_queue_len(&tid_tx->pending), i);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+ __skb_queue_purge(&tid_tx->pending);
+ }
+ kfree_rcu(tid_tx, rcu_head);
+ }
+
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+
+ sta_info_free(local, sta);
return 0;
}
@@ -1009,11 +898,9 @@ static void sta_info_cleanup(unsigned long data)
void sta_info_init(struct ieee80211_local *local)
{
- spin_lock_init(&local->sta_lock);
+ spin_lock_init(&local->tim_lock);
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
- INIT_LIST_HEAD(&local->sta_pending_list);
- INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
@@ -1042,9 +929,6 @@ int sta_info_flush(struct ieee80211_local *local,
might_sleep();
mutex_lock(&local->sta_mtx);
-
- sta_info_finish_pending(local);
-
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
if (!sdata || sdata == sta->sdata)
WARN_ON(__sta_info_destroy(sta));
@@ -1061,7 +945,11 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, *tmp;
mutex_lock(&local->sta_mtx);
- list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
+
+ list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
if (time_after(jiffies, sta->last_rx + exp_time)) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
@@ -1069,6 +957,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
#endif
WARN_ON(__sta_info_destroy(sta));
}
+ }
+
mutex_unlock(&local->sta_mtx);
}
@@ -1517,3 +1407,56 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
sta_info_recalc_tim(sta);
}
EXPORT_SYMBOL(ieee80211_sta_set_buffered);
+
+int sta_info_move_state_checked(struct sta_info *sta,
+ enum ieee80211_sta_state new_state)
+{
+ might_sleep();
+
+ if (sta->sta_state == new_state)
+ return 0;
+
+ switch (new_state) {
+ case IEEE80211_STA_NONE:
+ if (sta->sta_state == IEEE80211_STA_AUTH)
+ clear_bit(WLAN_STA_AUTH, &sta->_flags);
+ else
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_AUTH:
+ if (sta->sta_state == IEEE80211_STA_NONE)
+ set_bit(WLAN_STA_AUTH, &sta->_flags);
+ else if (sta->sta_state == IEEE80211_STA_ASSOC)
+ clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+ else
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_ASSOC:
+ if (sta->sta_state == IEEE80211_STA_AUTH) {
+ set_bit(WLAN_STA_ASSOC, &sta->_flags);
+ } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
+ clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+ } else
+ return -EINVAL;
+ break;
+ case IEEE80211_STA_AUTHORIZED:
+ if (sta->sta_state == IEEE80211_STA_ASSOC) {
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
+ set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+ } else
+ return -EINVAL;
+ break;
+ default:
+ WARN(1, "invalid state %d", new_state);
+ return -EINVAL;
+ }
+
+ printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
+ sta->sdata->name, sta->sta.addr, new_state);
+ sta->sta_state = new_state;
+
+ return 0;
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 8c8ce05ad26..6f77f12dc3f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -30,7 +30,6 @@
* when virtual port control is not in use.
* @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
* frames.
- * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
* @WLAN_STA_WME: Station is a QoS-STA.
* @WLAN_STA_WDS: Station is one of our WDS peers.
* @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
@@ -52,6 +51,7 @@
* unblocks the station.
* @WLAN_STA_SP: Station is in a service period, so don't try to
* reply to other uAPSD trigger frames or PS-Poll.
+ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH,
@@ -59,7 +59,6 @@ enum ieee80211_sta_info_flags {
WLAN_STA_PS_STA,
WLAN_STA_AUTHORIZED,
WLAN_STA_SHORT_PREAMBLE,
- WLAN_STA_ASSOC_AP,
WLAN_STA_WME,
WLAN_STA_WDS,
WLAN_STA_CLEAR_PS_FILT,
@@ -71,11 +70,22 @@ enum ieee80211_sta_info_flags {
WLAN_STA_TDLS_PEER_AUTH,
WLAN_STA_UAPSD,
WLAN_STA_SP,
+ WLAN_STA_4ADDR_EVENT,
+};
+
+enum ieee80211_sta_state {
+ /* NOTE: These need to be ordered correctly! */
+ IEEE80211_STA_NONE,
+ IEEE80211_STA_AUTH,
+ IEEE80211_STA_ASSOC,
+ IEEE80211_STA_AUTHORIZED,
};
#define STA_TID_NUM 16
#define ADDBA_RESP_INTERVAL HZ
-#define HT_AGG_MAX_RETRIES 0x3
+#define HT_AGG_MAX_RETRIES 15
+#define HT_AGG_BURST_RETRIES 3
+#define HT_AGG_RETRIES_PERIOD (15 * HZ)
#define HT_AGG_STATE_DRV_READY 0
#define HT_AGG_STATE_RESPONSE_RECEIVED 1
@@ -88,6 +98,7 @@ enum ieee80211_sta_info_flags {
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
* @rcu_head: rcu head for freeing structure
+ * @session_timer: check if we keep Tx-ing on the TID (by timeout value)
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
* @dialog_token: dialog token for aggregation session
@@ -110,6 +121,7 @@ enum ieee80211_sta_info_flags {
*/
struct tid_ampdu_tx {
struct rcu_head rcu_head;
+ struct timer_list session_timer;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
unsigned long state;
@@ -169,6 +181,7 @@ struct tid_ampdu_rx {
* @tid_tx: aggregation info for Tx per TID
* @tid_start_tx: sessions where start was requested
* @addba_req_num: number of times addBA request has been sent.
+ * @last_addba_req_time: timestamp of the last addBA request.
* @dialog_token_allocator: dialog token enumerator for each new session;
* @work: work struct for starting/stopping aggregation
* @tid_rx_timer_expired: bitmap indicating on which TIDs the
@@ -188,6 +201,7 @@ struct sta_ampdu_mlme {
struct work_struct work;
struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM];
+ unsigned long last_addba_req_time[STA_TID_NUM];
u8 addba_req_num[STA_TID_NUM];
u8 dialog_token_allocator;
};
@@ -260,6 +274,8 @@ struct sta_ampdu_mlme {
* @dummy: indicate a dummy station created for receiving
* EAP frames before association
* @sta: station information we share with the driver
+ * @sta_state: duplicates information about station state (for debug)
+ * @beacon_loss_count: number of times beacon loss has triggered
*/
struct sta_info {
/* General information, mostly static */
@@ -281,6 +297,8 @@ struct sta_info {
bool uploaded;
+ enum ieee80211_sta_state sta_state;
+
/* use the accessors defined below */
unsigned long _flags;
@@ -350,6 +368,7 @@ struct sta_info {
#endif
unsigned int lost_packets;
+ unsigned int beacon_loss_count;
/* should be right in front of sta to be in the same cache line */
bool dummy;
@@ -369,12 +388,18 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
static inline void set_sta_flag(struct sta_info *sta,
enum ieee80211_sta_info_flags flag)
{
+ WARN_ON(flag == WLAN_STA_AUTH ||
+ flag == WLAN_STA_ASSOC ||
+ flag == WLAN_STA_AUTHORIZED);
set_bit(flag, &sta->_flags);
}
static inline void clear_sta_flag(struct sta_info *sta,
enum ieee80211_sta_info_flags flag)
{
+ WARN_ON(flag == WLAN_STA_AUTH ||
+ flag == WLAN_STA_ASSOC ||
+ flag == WLAN_STA_AUTHORIZED);
clear_bit(flag, &sta->_flags);
}
@@ -387,9 +412,32 @@ static inline int test_sta_flag(struct sta_info *sta,
static inline int test_and_clear_sta_flag(struct sta_info *sta,
enum ieee80211_sta_info_flags flag)
{
+ WARN_ON(flag == WLAN_STA_AUTH ||
+ flag == WLAN_STA_ASSOC ||
+ flag == WLAN_STA_AUTHORIZED);
return test_and_clear_bit(flag, &sta->_flags);
}
+static inline int test_and_set_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
+{
+ WARN_ON(flag == WLAN_STA_AUTH ||
+ flag == WLAN_STA_ASSOC ||
+ flag == WLAN_STA_AUTHORIZED);
+ return test_and_set_bit(flag, &sta->_flags);
+}
+
+int sta_info_move_state_checked(struct sta_info *sta,
+ enum ieee80211_sta_state new_state);
+
+static inline void sta_info_move_state(struct sta_info *sta,
+ enum ieee80211_sta_state new_state)
+{
+ int ret = sta_info_move_state_checked(sta, new_state);
+ WARN_ON_ONCE(ret);
+}
+
+
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx);
@@ -480,7 +528,10 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
* until sta_info_insert().
*/
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
- u8 *addr, gfp_t gfp);
+ const u8 *addr, gfp_t gfp);
+
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta);
+
/*
* Insert STA info into hash table/list, returns zero or a
* -EEXIST if (if the same MAC address is already present).
@@ -491,7 +542,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
*/
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
-int sta_info_insert_atomic(struct sta_info *sta);
int sta_info_reinsert(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 80de436eae2..30c265c98f7 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
- __le16 txflags;
+ u16 txflags;
rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
txflags = 0;
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
!is_multicast_ether_addr(hdr->addr1))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+ txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
put_unaligned_le16(txflags, pos);
pos += 2;
@@ -340,7 +340,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u16 frag, type;
__le16 fc;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata;
@@ -476,12 +475,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
* Fragments are passed to low-level drivers as separate skbs, so these
* are actually fragments, not frames. Update frame counters only for
* the first fragment of the frame. */
-
- frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
- type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
-
if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (frag == 0) {
+ if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
local->dot11TransmittedFrameCount++;
if (is_multicast_ether_addr(hdr->addr1))
local->dot11MulticastTransmittedFrameCount++;
@@ -496,11 +491,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
* with a multicast address in the address 1 field of type Data
* or Management. */
if (!is_multicast_ether_addr(hdr->addr1) ||
- type == IEEE80211_FTYPE_DATA ||
- type == IEEE80211_FTYPE_MGMT)
+ ieee80211_is_data(fc) ||
+ ieee80211_is_mgmt(fc))
local->dot11TransmittedFragmentCount++;
} else {
- if (frag == 0)
+ if (ieee80211_is_first_frag(hdr->seq_ctrl))
local->dot11FailedCount++;
}
@@ -517,27 +512,54 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
- struct ieee80211_work *wk;
u64 cookie = (unsigned long)skb;
- rcu_read_lock();
- list_for_each_entry_rcu(wk, &local->work_list, list) {
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
- if (wk->offchan_tx.frame != skb)
- continue;
- wk->offchan_tx.status = true;
- break;
- }
- rcu_read_unlock();
- if (local->hw_roc_skb_for_status == skb) {
- cookie = local->hw_roc_cookie ^ 2;
- local->hw_roc_skb_for_status = NULL;
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+ cfg80211_probe_status(skb->dev, hdr->addr1,
+ cookie, acked, GFP_ATOMIC);
+ } else {
+ struct ieee80211_work *wk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(wk, &local->work_list, list) {
+ if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+ continue;
+ if (wk->offchan_tx.frame != skb)
+ continue;
+ wk->offchan_tx.status = true;
+ break;
+ }
+ rcu_read_unlock();
+ if (local->hw_roc_skb_for_status == skb) {
+ cookie = local->hw_roc_cookie ^ 2;
+ local->hw_roc_skb_for_status = NULL;
+ }
+
+ cfg80211_mgmt_tx_status(
+ skb->dev, cookie, skb->data, skb->len,
+ !!(info->flags & IEEE80211_TX_STAT_ACK),
+ GFP_ATOMIC);
}
+ }
- cfg80211_mgmt_tx_status(
- skb->dev, cookie, skb->data, skb->len,
- !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+ if (unlikely(info->ack_frame_id)) {
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ ack_skb = idr_find(&local->ack_status_frames,
+ info->ack_frame_id);
+ if (ack_skb)
+ idr_remove(&local->ack_status_frames,
+ info->ack_frame_id);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ /* consumes ack_skb */
+ if (ack_skb)
+ skb_complete_wifi_ack(ack_skb,
+ info->flags & IEEE80211_TX_STAT_ACK);
}
/* this was a transmitted frame, but now we want to reuse it */
@@ -545,7 +567,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* Need to make a copy before skb->cb gets cleared */
send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
- (type != IEEE80211_FTYPE_DATA);
+ !(ieee80211_is_data(fc));
/*
* This is a bit racy but we can avoid a lot of work
@@ -610,3 +632,29 @@ void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
num_packets, GFP_ATOMIC);
}
EXPORT_SYMBOL(ieee80211_report_low_ack);
+
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (unlikely(info->ack_frame_id)) {
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ ack_skb = idr_find(&local->ack_status_frames,
+ info->ack_frame_id);
+ if (ack_skb)
+ idr_remove(&local->ack_status_frames,
+ info->ack_frame_id);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ /* consumes ack_skb */
+ if (ack_skb)
+ dev_kfree_skb_any(ack_skb);
+ }
+
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ieee80211_free_txskb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1f8b120146d..edcd1c7ab83 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -36,7 +36,8 @@
/* misc utils */
-static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
+static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
+ struct sk_buff *skb, int group_addr,
int next_frag_len)
{
int rate, mrate, erp, dur, i;
@@ -44,7 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
struct ieee80211_local *local = tx->local;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/* assume HW handles this */
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
@@ -76,7 +77,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
* at the highest possible rate belonging to the PHY rates in the
* BSSBasicRateSet
*/
- hdr = (struct ieee80211_hdr *)tx->skb->data;
+ hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_ctl(hdr->frame_control)) {
/* TODO: These control frames are not currently sent by
* mac80211, but should they be implemented, this function
@@ -150,11 +151,15 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
rate = mrate;
}
- /* Time needed to transmit ACK
- * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
- * to closest integer */
-
- dur = ieee80211_frame_duration(local, 10, rate, erp,
+ /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
+ if (ieee80211_is_data_qos(hdr->frame_control) &&
+ *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ dur = 0;
+ else
+ /* Time needed to transmit ACK
+ * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
+ * to closest integer */
+ dur = ieee80211_frame_duration(local, 10, rate, erp,
tx->sdata->vif.bss_conf.use_short_preamble);
if (next_frag_len) {
@@ -290,7 +295,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
if (unlikely(!assoc &&
- tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -300,17 +304,14 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
}
- } else {
- if (unlikely(ieee80211_is_data(hdr->frame_control) &&
- tx->local->num_sta == 0 &&
- tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
- /*
- * No associated STAs - no need to send multicast
- * frames.
- */
- return TX_DROP;
- }
- return TX_CONTINUE;
+ } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
+ ieee80211_is_data(hdr->frame_control) &&
+ !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) {
+ /*
+ * No associated STAs - no need to send multicast
+ * frames.
+ */
+ return TX_DROP;
}
return TX_CONTINUE;
@@ -572,8 +573,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
switch (tx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- if (ieee80211_is_auth(hdr->frame_control))
- break;
case WLAN_CIPHER_SUITE_TKIP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
@@ -637,6 +636,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
/* set up RTS protection if desired */
@@ -844,11 +844,13 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
-static int ieee80211_fragment(struct ieee80211_local *local,
+static int ieee80211_fragment(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int hdrlen,
int frag_threshold)
{
- struct sk_buff *tail = skb, *tmp;
+ struct ieee80211_local *local = tx->local;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *tmp;
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
int pos = hdrlen + per_fragm;
int rem = skb->len - hdrlen - per_fragm;
@@ -856,6 +858,8 @@ static int ieee80211_fragment(struct ieee80211_local *local,
if (WARN_ON(rem < 0))
return -EINVAL;
+ /* first fragment was already added to queue by caller */
+
while (rem) {
int fraglen = per_fragm;
@@ -868,12 +872,21 @@ static int ieee80211_fragment(struct ieee80211_local *local,
IEEE80211_ENCRYPT_TAILROOM);
if (!tmp)
return -ENOMEM;
- tail->next = tmp;
- tail = tmp;
+
+ __skb_queue_tail(&tx->skbs, tmp);
+
skb_reserve(tmp, local->tx_headroom +
IEEE80211_ENCRYPT_HEADROOM);
/* copy control information */
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
+
+ info = IEEE80211_SKB_CB(tmp);
+ info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+ IEEE80211_TX_CTL_FIRST_FRAGMENT);
+
+ if (rem)
+ info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+
skb_copy_queue_mapping(tmp, skb);
tmp->priority = skb->priority;
tmp->dev = skb->dev;
@@ -885,6 +898,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
pos += fraglen;
}
+ /* adjust first fragment's length */
skb->len = hdrlen + per_fragm;
return 0;
}
@@ -899,6 +913,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
int hdrlen;
int fragnum;
+ /* no matter what happens, tx->skb moves to tx->skbs */
+ __skb_queue_tail(&tx->skbs, skb);
+ tx->skb = NULL;
+
if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
return TX_CONTINUE;
@@ -927,21 +945,21 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
* of the fragments then we will simply pretend to accept the skb
* but store it away as pending.
*/
- if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
+ if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
return TX_DROP;
/* update duration/seq/flags of fragments */
fragnum = 0;
- do {
+
+ skb_queue_walk(&tx->skbs, skb) {
int next_len;
const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
- if (skb->next) {
+ if (!skb_queue_is_last(&tx->skbs, skb)) {
hdr->frame_control |= morefrags;
- next_len = skb->next->len;
/*
* No multi-rate retries for fragmented frames, that
* would completely throw off the NAV at other STAs.
@@ -956,10 +974,9 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
hdr->frame_control &= ~morefrags;
next_len = 0;
}
- hdr->duration_id = ieee80211_duration(tx, 0, next_len);
hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
fragnum++;
- } while ((skb = skb->next));
+ }
return TX_CONTINUE;
}
@@ -967,16 +984,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
+ struct sk_buff *skb;
if (!tx->sta)
return TX_CONTINUE;
tx->sta->tx_packets++;
- do {
+ skb_queue_walk(&tx->skbs, skb) {
tx->sta->tx_fragments++;
tx->sta->tx_bytes += skb->len;
- } while ((skb = skb->next));
+ }
return TX_CONTINUE;
}
@@ -1015,21 +1032,25 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
+ struct sk_buff *skb;
struct ieee80211_hdr *hdr;
int next_len;
bool group_addr;
- do {
+ skb_queue_walk(&tx->skbs, skb) {
hdr = (void *) skb->data;
if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
break; /* must not overwrite AID */
- next_len = skb->next ? skb->next->len : 0;
+ if (!skb_queue_is_last(&tx->skbs, skb)) {
+ struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
+ next_len = next->len;
+ } else
+ next_len = 0;
group_addr = is_multicast_ether_addr(hdr->addr1);
hdr->duration_id =
- ieee80211_duration(tx, group_addr, next_len);
- } while ((skb = skb->next));
+ ieee80211_duration(tx, skb, group_addr, next_len);
+ }
return TX_CONTINUE;
}
@@ -1043,9 +1064,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
int tid)
{
bool queued = false;
+ bool reset_agg_timer = false;
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
+ reset_agg_timer = true;
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/*
* nothing -- this aggregation session is being started
@@ -1077,6 +1100,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
/* do nothing, let packet pass through */
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
info->flags |= IEEE80211_TX_CTL_AMPDU;
+ reset_agg_timer = true;
} else {
queued = true;
info->control.vif = &tx->sdata->vif;
@@ -1086,6 +1110,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
spin_unlock(&tx->sta->lock);
}
+ /* reset session timer */
+ if (reset_agg_timer && tid_tx->timeout)
+ mod_timer(&tid_tx->session_timer,
+ TU_TO_EXP_TIME(tid_tx->timeout));
+
return queued;
}
@@ -1108,6 +1137,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
+ __skb_queue_head_init(&tx->skbs);
/*
* If this flag is set to true anywhere, and we get here,
@@ -1152,16 +1182,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (is_multicast_ether_addr(hdr->addr1)) {
tx->flags &= ~IEEE80211_TX_UNICAST;
info->flags |= IEEE80211_TX_CTL_NO_ACK;
- } else {
+ } else
tx->flags |= IEEE80211_TX_UNICAST;
- if (unlikely(local->wifi_wme_noack_test))
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- /*
- * Flags are initialized to 0. Hence, no need to
- * explicitly unset IEEE80211_TX_CTL_NO_ACK since
- * it might already be set for injected frames.
- */
- }
if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
if (!(tx->flags & IEEE80211_TX_UNICAST) ||
@@ -1180,22 +1202,18 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
return TX_CONTINUE;
}
-/*
- * Returns false if the frame couldn't be transmitted but was queued instead.
- */
-static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
- struct sta_info *sta, bool txpending)
+static bool ieee80211_tx_frags(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *skbs,
+ bool txpending)
{
- struct sk_buff *skb = *skbp, *next;
+ struct sk_buff *skb, *tmp;
struct ieee80211_tx_info *info;
- struct ieee80211_sub_if_data *sdata;
unsigned long flags;
- int len;
- bool fragm = false;
- while (skb) {
+ skb_queue_walk_safe(skbs, skb, tmp) {
int q = skb_get_queue_mapping(skb);
- __le16 fc;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->queue_stop_reasons[q] ||
@@ -1205,24 +1223,11 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
* transmission from the tx-pending tasklet when the
* queue is woken again.
*/
-
- do {
- next = skb->next;
- skb->next = NULL;
- /*
- * NB: If txpending is true, next must already
- * be NULL since we must've gone through this
- * loop before already; therefore we can just
- * queue the frame to the head without worrying
- * about reordering of fragments.
- */
- if (unlikely(txpending))
- __skb_queue_head(&local->pending[q],
- skb);
- else
- __skb_queue_tail(&local->pending[q],
- skb);
- } while ((skb = next));
+ if (txpending)
+ skb_queue_splice_init(skbs, &local->pending[q]);
+ else
+ skb_queue_splice_tail_init(skbs,
+ &local->pending[q]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
@@ -1231,47 +1236,72 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->control.sta = sta;
- if (fragm)
- info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
- IEEE80211_TX_CTL_FIRST_FRAGMENT);
-
- next = skb->next;
- len = skb->len;
+ __skb_unlink(skb, skbs);
+ drv_tx(local, skb);
+ }
- if (next)
- info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+ return true;
+}
- sdata = vif_to_sdata(info->control.vif);
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local,
+ struct sk_buff_head *skbs, int led_len,
+ struct sta_info *sta, bool txpending)
+{
+ struct ieee80211_tx_info *info;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *pubsta;
+ struct sk_buff *skb;
+ bool result = true;
+ __le16 fc;
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_MONITOR:
- info->control.vif = NULL;
- break;
- case NL80211_IFTYPE_AP_VLAN:
- info->control.vif = &container_of(sdata->bss,
- struct ieee80211_sub_if_data, u.ap)->vif;
- break;
- default:
- /* keep */
- break;
- }
+ if (WARN_ON(skb_queue_empty(skbs)))
+ return true;
- if (sta && sta->uploaded)
- info->control.sta = &sta->sta;
- else
- info->control.sta = NULL;
+ skb = skb_peek(skbs);
+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+ info = IEEE80211_SKB_CB(skb);
+ sdata = vif_to_sdata(info->control.vif);
+ if (sta && !sta->uploaded)
+ sta = NULL;
- fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
- drv_tx(local, skb);
+ if (sta)
+ pubsta = &sta->sta;
+ else
+ pubsta = NULL;
- ieee80211_tpt_led_trig_tx(local, fc, len);
- *skbp = skb = next;
- ieee80211_led_tx(local, 1);
- fragm = true;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_MONITOR:
+ sdata = NULL;
+ vif = NULL;
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ /* fall through */
+ default:
+ vif = &sdata->vif;
+ break;
}
- return true;
+ if (local->ops->tx_frags)
+ drv_tx_frags(local, vif, pubsta, skbs);
+ else
+ result = ieee80211_tx_frags(local, vif, pubsta, skbs,
+ txpending);
+
+ ieee80211_tpt_led_trig_tx(local, fc, led_len);
+ ieee80211_led_tx(local, 1);
+
+ WARN_ON_ONCE(!skb_queue_empty(skbs));
+
+ return result;
}
/*
@@ -1280,8 +1310,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
*/
static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
ieee80211_tx_result res = TX_DROP;
#define CALL_TXH(txh) \
@@ -1299,8 +1328,11 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
- if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
+ __skb_queue_tail(&tx->skbs, tx->skb);
+ tx->skb = NULL;
goto txh_done;
+ }
CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
@@ -1315,13 +1347,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
I802_DEBUG_INC(tx->local->tx_handlers_drop);
- while (skb) {
- struct sk_buff *next;
-
- next = skb->next;
- dev_kfree_skb(skb);
- skb = next;
- }
+ if (tx->skb)
+ dev_kfree_skb(tx->skb);
+ else
+ __skb_queue_purge(&tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1342,6 +1371,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool result = true;
+ int led_len;
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
@@ -1351,6 +1381,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
/* initialises tx */
+ led_len = skb->len;
res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
if (unlikely(res_prepare == TX_DROP)) {
@@ -1364,7 +1395,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
info->band = tx.channel->band;
if (!invoke_tx_handlers(&tx))
- result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
+ result = __ieee80211_tx(local, &tx.skbs, led_len,
+ tx.sta, txpending);
out:
rcu_read_unlock();
return result;
@@ -1431,7 +1463,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (ieee80211_vif_is_mesh(&sdata->vif) &&
ieee80211_is_data(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1))
- if (mesh_nexthop_lookup(skb, sdata)) {
+ if (mesh_nexthop_resolve(skb, sdata)) {
/* skb queued: don't free */
rcu_read_unlock();
return;
@@ -1685,8 +1717,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
int nh_pos, h_pos;
struct sta_info *sta = NULL;
bool wme_sta = false, authorized = false, tdls_auth = false;
- struct sk_buff *tmp_skb;
bool tdls_direct = false;
+ bool multicast;
+ u32 info_flags = 0;
+ u16 info_id = 0;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
@@ -1873,7 +1907,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* if it is a multicast address (which can only happen
* in AP mode)
*/
- if (!is_multicast_ether_addr(hdr.addr1)) {
+ multicast = is_multicast_ether_addr(hdr.addr1);
+ if (!multicast) {
rcu_read_lock();
sta = sta_info_get(sdata, hdr.addr1);
if (sta) {
@@ -1914,11 +1949,54 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
goto fail;
}
+ if (unlikely(!multicast && skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
+ struct sk_buff *orig_skb = skb;
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ unsigned long flags;
+ int id, r;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ r = idr_get_new_above(&local->ack_status_frames,
+ orig_skb, 1, &id);
+ if (r == -EAGAIN) {
+ idr_pre_get(&local->ack_status_frames,
+ GFP_ATOMIC);
+ r = idr_get_new_above(&local->ack_status_frames,
+ orig_skb, 1, &id);
+ }
+ if (WARN_ON(!id) || id > 0xffff) {
+ idr_remove(&local->ack_status_frames, id);
+ r = -ERANGE;
+ }
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (!r) {
+ info_id = id;
+ info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ } else if (skb_shared(skb)) {
+ kfree_skb(orig_skb);
+ } else {
+ kfree_skb(skb);
+ skb = orig_skb;
+ }
+ } else {
+ /* couldn't clone -- lose tx status ... */
+ skb = orig_skb;
+ }
+ }
+
/*
* If the skb is shared we need to obtain our own copy.
*/
if (skb_shared(skb)) {
- tmp_skb = skb;
+ struct sk_buff *tmp_skb = skb;
+
+ /* can't happen -- skb is a clone if info_id != 0 */
+ WARN_ON(info_id);
+
skb = skb_clone(skb, GFP_ATOMIC);
kfree_skb(tmp_skb);
@@ -2019,6 +2097,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memset(info, 0, sizeof(*info));
dev->trans_start = jiffies;
+
+ info->flags = info_flags;
+ info->ack_frame_id = info_id;
+
ieee80211_xmit(sdata, skb);
return NETDEV_TX_OK;
@@ -2062,10 +2144,15 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
result = ieee80211_tx(sdata, skb, true);
} else {
+ struct sk_buff_head skbs;
+
+ __skb_queue_head_init(&skbs);
+ __skb_queue_tail(&skbs, skb);
+
hdr = (struct ieee80211_hdr *)skb->data;
sta = sta_info_get(sdata, hdr->addr1);
- result = __ieee80211_tx(local, &skb, sta, true);
+ result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
}
return result;
@@ -2178,10 +2265,10 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
/* Bitmap control */
*pos++ = n1 | aid0;
/* Part Virt Bitmap */
+ skb_put(skb, n2 - n1);
memcpy(pos, bss->tim + n1, n2 - n1 + 1);
tim[1] = n2 - n1 + 4;
- skb_put(skb, n2 - n1);
} else {
*pos++ = aid0; /* Bitmap control */
*pos++ = 0; /* Part Virt Bitmap */
@@ -2246,9 +2333,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
} else {
unsigned long flags;
- spin_lock_irqsave(&local->sta_lock, flags);
+ spin_lock_irqsave(&local->tim_lock, flags);
ieee80211_beacon_add_tim(ap, skb, beacon);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ spin_unlock_irqrestore(&local->tim_lock, flags);
}
if (tim_offset)
@@ -2279,22 +2366,31 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_mgmt *mgmt;
u8 *pos;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
+ sizeof(mgmt->u.beacon);
#ifdef CONFIG_MAC80211_MESH
if (!sdata->u.mesh.mesh_id_len)
goto out;
#endif
- /* headroom, head length, tail length and maximum TIM length */
- skb = dev_alloc_skb(local->tx_headroom + 400 +
- sdata->u.mesh.ie_len);
+ skb = dev_alloc_skb(local->tx_headroom +
+ hdr_len +
+ 2 + /* NULL SSID */
+ 2 + 8 + /* supported rates */
+ 2 + 3 + /* DS params */
+ 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+ 2 + sizeof(struct ieee80211_ht_cap) +
+ 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + sdata->u.mesh.mesh_id_len +
+ 2 + sizeof(struct ieee80211_meshconf_ie) +
+ sdata->u.mesh.ie_len);
if (!skb)
goto out;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 24 + sizeof(mgmt->u.beacon));
- memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
@@ -2313,6 +2409,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
mesh_add_ds_params_ie(skb, sdata) ||
ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_ht_cap_ie(skb, sdata) ||
+ mesh_add_ht_info_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata) ||
mesh_add_vendor_ies(skb, sdata)) {
@@ -2355,6 +2453,37 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_if_ap *ap = NULL;
+ struct sk_buff *presp = NULL, *skb = NULL;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+
+ ap = &sdata->u.ap;
+ presp = rcu_dereference(ap->probe_resp);
+ if (!presp)
+ goto out;
+
+ skb = skb_copy(presp, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ memset(hdr->addr1, 0, sizeof(hdr->addr1));
+
+out:
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_proberesp_get);
+
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -2567,15 +2696,15 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int tid)
{
skb_set_mac_header(skb, 0);
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
- /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
- skb_set_queue_mapping(skb, IEEE80211_AC_VO);
- skb->priority = 7;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+ skb->priority = tid;
/*
* The other path calling ieee80211_xmit is from the tasklet,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index eca0fad0970..9919892575f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/bitmap.h>
+#include <linux/crc32.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
@@ -96,13 +97,13 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
+ struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- do {
+ skb_queue_walk(&tx->skbs, skb) {
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- } while ((skb = skb->next));
+ }
}
int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -564,6 +565,172 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ u64 filter, u32 crc)
+{
+ size_t left = len;
+ u8 *pos = start;
+ bool calc_crc = filter != 0;
+
+ memset(elems, 0, sizeof(*elems));
+ elems->ie_start = start;
+ elems->total_len = len;
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left)
+ break;
+
+ if (calc_crc && id < 64 && (filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ }
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (pos[3] == 1) {
+ /* OUI Type 1 - WPA IE */
+ elems->wpa = pos;
+ elems->wpa_len = elen;
+ } else if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ break;
+ case WLAN_EID_HT_INFORMATION:
+ if (elen >= sizeof(struct ieee80211_ht_info))
+ elems->ht_info_elem = (void *)pos;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ elems->ch_switch_elem = pos;
+ elems->ch_switch_elem_len = elen;
+ break;
+ case WLAN_EID_QUIET:
+ if (!elems->quiet_elem) {
+ elems->quiet_elem = pos;
+ elems->quiet_elem_len = elen;
+ }
+ elems->num_of_quiet_elem++;
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ elems->pwr_constr_elem = pos;
+ elems->pwr_constr_elem_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ default:
+ break;
+ }
+
+ left -= elen;
+ pos += elen;
+ }
+
+ return crc;
+}
+
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems)
{
@@ -812,23 +979,9 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
offset = noffset;
}
- if (sband->ht_cap.ht_supported) {
- u16 cap = sband->ht_cap.cap;
- __le16 tmp;
-
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
- tmp = cpu_to_le16(cap);
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density <<
- IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- pos += sizeof(sband->ht_cap.mcs);
- pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
- }
+ if (sband->ht_cap.ht_supported)
+ pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
/*
* If adding more here, adjust code in main.c
@@ -989,16 +1142,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
*/
}
#endif
-
- /* setup fragmentation threshold */
- drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
-
- /* setup RTS threshold */
- drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
-
- /* reset coverage class */
- drv_set_coverage_class(local, hw->wiphy->coverage_class);
-
/* everything else happens only if HW was up & running */
if (!local->open_count)
goto wake_up;
@@ -1017,6 +1160,15 @@ int ieee80211_reconfig(struct ieee80211_local *local)
return res;
}
+ /* setup fragmentation threshold */
+ drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+
+ /* setup RTS threshold */
+ drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+
+ /* reset coverage class */
+ drv_set_coverage_class(local, hw->wiphy->coverage_class);
+
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1026,7 +1178,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
ieee80211_sdata_running(sdata))
- res = drv_add_interface(local, &sdata->vif);
+ res = drv_add_interface(local, sdata);
}
/* add STAs back */
@@ -1039,7 +1191,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data,
u.ap);
- memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
}
@@ -1077,11 +1228,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_BEACON_INT |
BSS_CHANGED_BSSID |
BSS_CHANGED_CQM |
- BSS_CHANGED_QOS;
+ BSS_CHANGED_QOS |
+ BSS_CHANGED_IDLE;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- changed |= BSS_CHANGED_ASSOC;
+ changed |= BSS_CHANGED_ASSOC |
+ BSS_CHANGED_ARP_FILTER;
mutex_lock(&sdata->u.mgd.mtx);
ieee80211_bss_info_change_notify(sdata, changed);
mutex_unlock(&sdata->u.mgd.mtx);
@@ -1091,6 +1244,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* fall through */
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ changed |= BSS_CHANGED_AP_PROBE_RESP;
+
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
@@ -1112,6 +1269,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ ieee80211_recalc_ps(local, -1);
+
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1367,6 +1526,108 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+ u16 cap)
+{
+ __le16 tmp;
+
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+ /* capability flags */
+ tmp = cpu_to_le16(cap);
+ memcpy(pos, &tmp, sizeof(u16));
+ pos += sizeof(u16);
+
+ /* AMPDU parameters */
+ *pos++ = ht_cap->ampdu_factor |
+ (ht_cap->ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+ /* MCS set */
+ memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
+ pos += sizeof(ht_cap->mcs);
+
+ /* extended capabilities */
+ pos += sizeof(__le16);
+
+ /* BF capabilities */
+ pos += sizeof(__le32);
+
+ /* antenna selection */
+ pos += sizeof(u8);
+
+ return pos;
+}
+
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_ht_info *ht_info;
+ /* Build HT Information */
+ *pos++ = WLAN_EID_HT_INFORMATION;
+ *pos++ = sizeof(struct ieee80211_ht_info);
+ ht_info = (struct ieee80211_ht_info *)pos;
+ ht_info->control_chan =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ switch (channel_type) {
+ case NL80211_CHAN_HT40MINUS:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ break;
+ case NL80211_CHAN_HT20:
+ default:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ break;
+ }
+ if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+
+ /*
+ * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
+ * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
+ */
+ ht_info->operation_mode = 0x0000;
+ ht_info->stbc_param = 0x0000;
+
+ /* It seems that Basic MCS set and Supported MCS set
+ are identical for the first 10 bytes */
+ memset(&ht_info->basic_set, 0, 16);
+ memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+
+ return pos + sizeof(struct ieee80211_ht_info);
+}
+
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+{
+ enum nl80211_channel_type channel_type;
+
+ if (!ht_info)
+ return NL80211_CHAN_NO_HT;
+
+ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ channel_type = NL80211_CHAN_HT20;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ channel_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ channel_type = NL80211_CHAN_HT40MINUS;
+ break;
+ default:
+ channel_type = NL80211_CHAN_NO_HT;
+ }
+
+ return channel_type;
+}
+
int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a1c6bfd55f0..68ad351479d 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -330,13 +330,12 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
ieee80211_tx_set_protected(tx);
- skb = tx->skb;
- do {
+ skb_queue_walk(&tx->skbs, skb) {
if (wep_encrypt_skb(tx, skb) < 0) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
return TX_DROP;
}
- } while ((skb = skb->next));
+ }
return TX_CONTINUE;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index fd52e695c07..89511be3111 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,30 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
+/* Indicate which queue to use for this fully formed 802.11 frame */
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ struct ieee80211_hdr *hdr)
+{
+ u8 *p;
+
+ if (local->hw.queues < 4)
+ return 0;
+
+ if (!ieee80211_is_data(hdr->frame_control)) {
+ skb->priority = 7;
+ return ieee802_1d_to_ac[skb->priority];
+ }
+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ skb->priority = 0;
+ return ieee802_1d_to_ac[skb->priority];
+ }
+
+ p = ieee80211_get_qos_ctl(hdr);
+ skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+ return ieee80211_downgrade_queue(local, skb);
+}
/* Indicate which queue to use. */
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
@@ -83,7 +107,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- ra = skb->data;
+ qos = true;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -139,16 +163,24 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
- u8 ack_policy = 0, tid;
+ u8 ack_policy, tid;
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- if (unlikely(sdata->local->wifi_wme_noack_test))
+ /* preserve EOSP bit */
+ ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
+
+ if (is_multicast_ether_addr(hdr->addr1) ||
+ sdata->noack_map & BIT(tid)) {
ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ }
+
/* qos header is 2 bytes */
*p++ = ack_policy | tid;
*p = ieee80211_vif_is_mesh(&sdata->vif) ?
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 34e166fbf4d..94edceb617f 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,6 +15,9 @@
extern const int ieee802_1d_to_ac[8];
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ struct ieee80211_hdr *hdr);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 6c53b6d1002..c6dd01a0529 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -94,7 +94,8 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
/* frame sending functions */
-static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
+static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, const u8 *ht_info_ie,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps)
@@ -102,8 +103,10 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
struct ieee80211_ht_info *ht_info;
u8 *pos;
u32 flags = channel->flags;
- u16 cap = sband->ht_cap.cap;
- __le16 tmp;
+ u16 cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
if (!sband->ht_cap.ht_supported)
return;
@@ -114,9 +117,13 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
return;
+ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
/* determine capability flags */
+ cap = ht_cap.cap;
switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -154,34 +161,8 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
}
/* reserve and fill IE */
-
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-
- /* capability flags */
- tmp = cpu_to_le16(cap);
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
-
- /* AMPDU parameters */
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density <<
- IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-
- /* MCS set */
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- pos += sizeof(sband->ht_cap.mcs);
-
- /* extended capabilities */
- pos += sizeof(__le16);
-
- /* BF capabilities */
- pos += sizeof(__le32);
-
- /* antenna selection */
- pos += sizeof(u8);
+ ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
}
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
@@ -356,7 +337,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
if (wk->assoc.use_11n && wk->assoc.wmm_used &&
local->hw.queues >= 4)
- ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
+ ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
sband, wk->chan, wk->assoc.smps);
/* if present, add any custom non-vendor IEs that go after HT */
@@ -881,44 +862,6 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
kfree_skb(skb);
}
-static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
- enum nl80211_channel_type oper_ct)
-{
- switch (wk_ct) {
- case NL80211_CHAN_NO_HT:
- return true;
- case NL80211_CHAN_HT20:
- if (oper_ct != NL80211_CHAN_NO_HT)
- return true;
- return false;
- case NL80211_CHAN_HT40MINUS:
- case NL80211_CHAN_HT40PLUS:
- return (wk_ct == oper_ct);
- }
- WARN_ON(1); /* shouldn't get here */
- return false;
-}
-
-static enum nl80211_channel_type
-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
- enum nl80211_channel_type oper_ct)
-{
- switch (wk_ct) {
- case NL80211_CHAN_NO_HT:
- return oper_ct;
- case NL80211_CHAN_HT20:
- if (oper_ct != NL80211_CHAN_NO_HT)
- return oper_ct;
- return wk_ct;
- case NL80211_CHAN_HT40MINUS:
- case NL80211_CHAN_HT40PLUS:
- return wk_ct;
- }
- WARN_ON(1); /* shouldn't get here */
- return wk_ct;
-}
-
-
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@@ -969,51 +912,12 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
- bool on_oper_chan;
- bool tmp_chan_changed = false;
- bool on_oper_chan2;
- enum nl80211_channel_type wk_ct;
- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
- /* Work with existing channel type if possible. */
- wk_ct = wk->chan_type;
- if (wk->chan == local->hw.conf.channel)
- wk_ct = ieee80211_calc_ct(wk->chan_type,
- local->hw.conf.channel_type);
-
- if (local->tmp_channel)
- if ((local->tmp_channel != wk->chan) ||
- (local->tmp_channel_type != wk_ct))
- tmp_chan_changed = true;
+ ieee80211_offchannel_stop_vifs(local, true);
local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk_ct;
- /*
- * Leave the station vifs in awake mode if they
- * happen to be on the same channel as
- * the requested channel.
- */
- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
- if (on_oper_chan != on_oper_chan2) {
- if (on_oper_chan2) {
- /* going off oper channel, PS too */
- ieee80211_offchannel_stop_vifs(local,
- true);
- ieee80211_hw_config(local, 0);
- } else {
- /* going on channel, but leave PS
- * off-channel. */
- ieee80211_hw_config(local, 0);
- ieee80211_offchannel_return(local,
- true,
- false);
- }
- } else if (tmp_chan_changed)
- /* Still off-channel, but on some other
- * channel, so update hardware.
- * PS should already be off-channel.
- */
- ieee80211_hw_config(local, 0);
+ local->tmp_channel_type = wk->chan_type;
+
+ ieee80211_hw_config(local, 0);
started = true;
wk->timeout = jiffies;
@@ -1082,34 +986,17 @@ static void ieee80211_work_work(struct work_struct *work)
list_for_each_entry(wk, &local->work_list, list) {
if (!wk->started)
continue;
- if (wk->chan != local->tmp_channel)
- continue;
- if (!ieee80211_work_ct_coexists(wk->chan_type,
- local->tmp_channel_type))
+ if (wk->chan != local->tmp_channel ||
+ wk->chan_type != local->tmp_channel_type)
continue;
remain_off_channel = true;
}
if (!remain_off_channel && local->tmp_channel) {
local->tmp_channel = NULL;
- /* If tmp_channel wasn't operating channel, then
- * we need to go back on-channel.
- * NOTE: If we can ever be here while scannning,
- * or if the hw_config() channel config logic changes,
- * then we may need to do a more thorough check to see if
- * we still need to do a hardware config. Currently,
- * we cannot be here while scanning, however.
- */
- if (!ieee80211_cfg_on_oper_channel(local))
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_config(local, 0);
- /* At the least, we need to disable offchannel_ps,
- * so just go ahead and run the entire offchannel
- * return logic here. We *could* skip enabling
- * beaconing if we were already on-oper-channel
- * as a future optimization.
- */
- ieee80211_offchannel_return(local, true, true);
+ ieee80211_offchannel_return(local, true);
/* give connection some time to breathe */
run_again(local, jiffies + HZ/2);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f614ce7bb6e..93aab0715e8 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -223,14 +223,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
ieee80211_tx_result
ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
+ struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
- do {
+ skb_queue_walk(&tx->skbs, skb) {
if (tkip_encrypt_skb(tx, skb) < 0)
return TX_DROP;
- } while ((skb = skb->next));
+ }
return TX_CONTINUE;
}
@@ -390,7 +390,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 scratch[6 * AES_BLOCK_SIZE];
if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/*
* hwaccel has no need for preallocated room for CCMP
* header or MIC fields
@@ -412,6 +413,12 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
@@ -442,14 +449,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
ieee80211_tx_result
ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
+ struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
- do {
+ skb_queue_walk(&tx->skbs, skb) {
if (ccmp_encrypt_skb(tx, skb) < 0)
return TX_DROP;
- } while ((skb = skb->next));
+ }
return TX_CONTINUE;
}
@@ -547,15 +554,22 @@ static inline void bip_ipn_swap(u8 *d, const u8 *s)
ieee80211_tx_result
ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
{
- struct sk_buff *skb = tx->skb;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie *mmie;
u8 aad[20];
u64 pn64;
+ if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+ return TX_DROP;
+
+ skb = skb_peek(&tx->skbs);
+
+ info = IEEE80211_SKB_CB(skb);
+
if (info->control.hw_key)
- return 0;
+ return TX_CONTINUE;
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8260b13d93c..f8ac4ef0b79 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -4,6 +4,14 @@ menu "Core Netfilter Configuration"
config NETFILTER_NETLINK
tristate
+config NETFILTER_NETLINK_ACCT
+tristate "Netfilter NFACCT over NFNETLINK interface"
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK
+ help
+ If this option is enabled, the kernel will include support
+ for extended accounting via NFNETLINK.
+
config NETFILTER_NETLINK_QUEUE
tristate "Netfilter NFQUEUE over NFNETLINK interface"
depends on NETFILTER_ADVANCED
@@ -75,6 +83,16 @@ config NF_CONNTRACK_ZONES
If unsure, say `N'.
+config NF_CONNTRACK_PROCFS
+ bool "Supply CT list in procfs (OBSOLETE)"
+ default y
+ depends on PROC_FS
+ ---help---
+ This option enables for the list of known conntrack entries
+ to be shown in procfs under net/netfilter/nf_conntrack. This
+ is considered obsolete in favor of using the conntrack(8)
+ tool which uses Netlink.
+
config NF_CONNTRACK_EVENTS
bool "Connection tracking events"
depends on NETFILTER_ADVANCED
@@ -201,7 +219,6 @@ config NF_CONNTRACK_BROADCAST
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
- depends on NETFILTER_ADVANCED
select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +559,6 @@ config NETFILTER_XT_TARGET_NOTRACK
tristate '"NOTRACK" target support'
depends on IP_NF_RAW || IP6_NF_RAW
depends on NF_CONNTRACK
- depends on NETFILTER_ADVANCED
help
The NOTRACK target allows a select rule to specify
which packets *not* to enter the conntrack/NAT
@@ -772,6 +788,15 @@ config NETFILTER_XT_MATCH_DSCP
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_ECN
+ tristate '"ecn" match support'
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds an "ECN" match, which allows you to match against
+ the IPv4 and TCP header ECN fields.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_ESP
tristate '"esp" match support'
depends on NETFILTER_ADVANCED
@@ -881,6 +906,16 @@ config NETFILTER_XT_MATCH_MULTIPORT
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_NFACCT
+ tristate '"nfacct" match support'
+ depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_ACCT
+ help
+ This option allows you to use the extended accounting through
+ nfnetlink_acct.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_OSF
tristate '"osf" Passive OS fingerprint match'
depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 1a02853df86..40f4c3d636c 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -7,6 +7,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
obj-$(CONFIG_NETFILTER) = netfilter.o
obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
+obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
@@ -80,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_NFACCT) += xt_nfacct.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OSF) += xt_osf.o
obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index afca6c78948..b4e8ff05b30 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -54,6 +54,12 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
EXPORT_SYMBOL(nf_hooks);
+
+#if defined(CONFIG_JUMP_LABEL)
+struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+EXPORT_SYMBOL(nf_hooks_needed);
+#endif
+
static DEFINE_MUTEX(nf_hook_mutex);
int nf_register_hook(struct nf_hook_ops *reg)
@@ -70,6 +76,9 @@ int nf_register_hook(struct nf_hook_ops *reg)
}
list_add_rcu(&reg->list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
+#if defined(CONFIG_JUMP_LABEL)
+ jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
return 0;
}
EXPORT_SYMBOL(nf_register_hook);
@@ -79,7 +88,9 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
mutex_lock(&nf_hook_mutex);
list_del_rcu(&reg->list);
mutex_unlock(&nf_hook_mutex);
-
+#if defined(CONFIG_JUMP_LABEL)
+ jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
synchronize_net();
}
EXPORT_SYMBOL(nf_unregister_hook);
@@ -218,7 +229,7 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
}
EXPORT_SYMBOL(skb_make_writable);
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 052579fe389..1f03556666f 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -109,16 +109,18 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
}
EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
bool
ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
int protoff;
u8 nexthdr;
+ __be16 frag_off;
nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
if (protoff < 0)
return false;
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index f2d576e6b76..4015fcaf87b 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
static inline void
hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
{
- ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->ip.in6 = src->ip.in6;
}
static inline void
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 6ee10f5d59b..37d667e3f6f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fb90e344e90..e69e2718fbe 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index deb3e3dfa5f..64199b4e93c 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 60d016541c5..28988196775 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -267,7 +267,7 @@ static inline void
hash_net6_data_copy(struct hash_net6_elem *dst,
const struct hash_net6_elem *src)
{
- ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
}
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 70bd1d0774c..f9871385a65 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -122,7 +122,6 @@ config IP_VS_RR
config IP_VS_WRR
tristate "weighted round-robin scheduling"
- select GCD
---help---
The weighted robin-robin scheduling algorithm directs network
connections to different real servers based on server weights
@@ -232,6 +231,21 @@ config IP_VS_NQ
If you want to compile it in kernel, say Y. To compile it as a
module, choose M here. If unsure, say N.
+comment 'IPVS SH scheduler'
+
+config IP_VS_SH_TAB_BITS
+ int "IPVS source hashing table size (the Nth power of 2)"
+ range 4 20
+ default 8
+ ---help---
+ The source hashing scheduler maps source IPs to destinations
+ stored in a hash table. This table is tiled by each destination
+ until all slots in the table are filled. When using weights to
+ allow destinations to receive more connections, the table is
+ tiled an amount proportional to the weights specified. The table
+ needs to be large enough to effectively fit all the destinations
+ multiplied by their respective weights.
+
comment 'IPVS application helper'
config IP_VS_FTP
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 12571fb2881..29fa5badde7 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
if ((cp) && (!cp->dest)) {
dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
- cp->protocol, cp->fwmark);
+ cp->protocol, cp->fwmark, cp->flags);
ip_vs_bind_dest(cp, dest);
return dest;
} else
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 093cc327020..611c3359b94 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
if (!cp)
return NF_ACCEPT;
- ipv6_addr_copy(&snet.in6, &iph->saddr);
+ snet.in6 = iph->saddr;
return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
pp, offset, sizeof(struct ipv6hdr));
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 008bf97cc91..b3afe189af6 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -85,7 +85,7 @@ static int __ip_vs_addr_is_local_v6(struct net *net,
};
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
- if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
+ if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
return 1;
return 0;
@@ -619,15 +619,21 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
- __be16 vport, __u16 protocol, __u32 fwmark)
+ __be16 vport, __u16 protocol, __u32 fwmark,
+ __u32 flags)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
+ __be16 port = dport;
svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
- dest = ip_vs_lookup_dest(svc, daddr, dport);
+ if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+ port = 0;
+ dest = ip_vs_lookup_dest(svc, daddr, port);
+ if (!dest)
+ dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
if (dest)
atomic_inc(&dest->refcnt);
ip_vs_service_put(svc);
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 13d607ae9c5..1aa5cac748c 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -108,7 +108,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
struct ip_vs_conn *ct)
{
- bool ret = 0;
+ bool ret = false;
if (ct->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) &&
@@ -121,7 +121,7 @@ static bool ip_vs_sip_ct_match(const struct ip_vs_conn_param *p,
ct->protocol == p->protocol &&
ct->pe_data && ct->pe_data_len == p->pe_data_len &&
!memcmp(ct->pe_data, p->pe_data, p->pe_data_len))
- ret = 1;
+ ret = true;
IP_VS_DBG_BUF(9, "SIP template match %s %s->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 33815f4fb45..069e8d4d5c0 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -30,6 +30,11 @@
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
+ * The weight destination attribute can be used to control the
+ * distribution of connections to the destinations in servernode. The
+ * greater the weight, the more connections the destination
+ * will receive.
+ *
*/
#define KMSG_COMPONENT "IPVS"
@@ -99,9 +104,11 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
struct ip_vs_sh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
+ int d_count;
b = tbl;
p = &svc->destinations;
+ d_count = 0;
for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
if (list_empty(p)) {
b->dest = NULL;
@@ -113,7 +120,16 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
atomic_inc(&dest->refcnt);
b->dest = dest;
- p = p->next;
+ IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
+ i, IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ atomic_read(&dest->weight));
+
+ /* Don't move to next dest until filling weight */
+ if (++d_count >= atomic_read(&dest->weight)) {
+ p = p->next;
+ d_count = 0;
+ }
+
}
b++;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 3cdd479f9b5..8a0d6d6889f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -603,9 +603,9 @@ sloop:
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6) {
p += sizeof(struct ip_vs_sync_v6);
- ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
- ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
- ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+ s->v6.caddr = cp->caddr.in6;
+ s->v6.vaddr = cp->vaddr.in6;
+ s->v6.daddr = cp->daddr.in6;
} else
#endif
{
@@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
* but still handled.
*/
dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
- param->vport, protocol, fwmark);
+ param->vport, protocol, fwmark, flags);
/* Set the approprite ativity flag */
if (protocol == IPPROTO_TCP) {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index aa2d7206ee8..7fd66dec859 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -207,7 +207,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
{
- return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
+ return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
}
static struct dst_entry *
@@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
goto out_err;
}
}
- ipv6_addr_copy(ret_saddr, &fl6.saddr);
+ *ret_saddr = fl6.saddr;
return dst;
out_err:
@@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
atomic_read(&rt->dst.__refcnt));
}
if (ret_saddr)
- ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
+ *ret_saddr = dest->dst_saddr.in6;
spin_unlock(&dest->dst_lock);
} else {
dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -541,7 +541,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -658,7 +658,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
goto tx_error;
- ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
+ ipv6_hdr(skb)->daddr = cp->daddr.in6;
if (!local || !skb->dev) {
/* drop the old route when skb is not shared */
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
iph->priority = old_iph->priority;
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
- ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
- ipv6_addr_copy(&iph->saddr, &saddr);
+ iph->daddr = cp->daddr.in6;
+ iph->saddr = saddr;
iph->hop_limit = old_iph->hop_limit;
/* Another hack: avoid icmp_send in ip_fragment */
@@ -1173,7 +1173,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
@@ -1293,7 +1293,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 369df3f08d4..f4f8cda0598 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -18,7 +18,7 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
-static int nf_ct_acct __read_mostly;
+static bool nf_ct_acct __read_mostly;
module_param_named(acct, nf_ct_acct, bool, 0644);
MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -46,8 +46,8 @@ seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
return 0;
return seq_printf(s, "packets=%llu bytes=%llu ",
- (unsigned long long)acct[dir].packets,
- (unsigned long long)acct[dir].bytes);
+ (unsigned long long)atomic64_read(&acct[dir].packets),
+ (unsigned long long)atomic64_read(&acct[dir].bytes));
};
EXPORT_SYMBOL_GPL(seq_print_acct);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7202b0631cd..e875f8902db 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -67,6 +67,7 @@ DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
unsigned int nf_conntrack_hash_rnd __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
{
@@ -1044,10 +1045,8 @@ acct:
acct = nf_conn_acct_find(ct);
if (acct) {
- spin_lock_bh(&ct->lock);
- acct[CTINFO2DIR(ctinfo)].packets++;
- acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
- spin_unlock_bh(&ct->lock);
+ atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+ atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
}
}
}
@@ -1063,11 +1062,9 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
acct = nf_conn_acct_find(ct);
if (acct) {
- spin_lock_bh(&ct->lock);
- acct[CTINFO2DIR(ctinfo)].packets++;
- acct[CTINFO2DIR(ctinfo)].bytes +=
- skb->len - skb_network_offset(skb);
- spin_unlock_bh(&ct->lock);
+ atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+ atomic64_add(skb->len - skb_network_offset(skb),
+ &acct[CTINFO2DIR(ctinfo)].bytes);
}
}
@@ -1087,7 +1084,7 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
};
#endif
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1342,8 +1339,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
get_order(sz));
if (!hash) {
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
- hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL);
+ hash = vzalloc(sz);
}
if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 6b368be937c..b62c4148b92 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -27,22 +27,17 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned long events;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -83,19 +78,20 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
int ret = 0;
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -105,32 +101,34 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
int ret = 0;
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -140,15 +138,16 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 340c80d968d..4147ba3f653 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -38,8 +38,6 @@ unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
-static HLIST_HEAD(nf_ct_userspace_expect_list);
-
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 pid, int report)
@@ -47,14 +45,14 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
struct nf_conn_help *master_help = nfct_help(exp->master);
struct net *net = nf_ct_exp_net(exp);
+ NF_CT_ASSERT(master_help);
NF_CT_ASSERT(!timer_pending(&exp->timeout));
hlist_del_rcu(&exp->hnode);
net->ct.expect_count--;
hlist_del(&exp->lnode);
- if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
- master_help->expecting[exp->class]--;
+ master_help->expecting[exp->class]--;
nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
nf_ct_expect_put(exp);
@@ -314,37 +312,34 @@ void nf_ct_expect_put(struct nf_conntrack_expect *exp)
}
EXPORT_SYMBOL_GPL(nf_ct_expect_put);
-static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
+static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
{
struct nf_conn_help *master_help = nfct_help(exp->master);
+ struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(exp);
- const struct nf_conntrack_expect_policy *p;
unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
/* two references : one for hash insert, one for the timer */
atomic_add(2, &exp->use);
- if (master_help) {
- hlist_add_head(&exp->lnode, &master_help->expectations);
- master_help->expecting[exp->class]++;
- } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
- hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
+ hlist_add_head(&exp->lnode, &master_help->expectations);
+ master_help->expecting[exp->class]++;
hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
net->ct.expect_count++;
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
(unsigned long)exp);
- if (master_help) {
- p = &rcu_dereference_protected(
- master_help->helper,
- lockdep_is_held(&nf_conntrack_lock)
- )->expect_policy[exp->class];
- exp->timeout.expires = jiffies + p->timeout * HZ;
+ helper = rcu_dereference_protected(master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock));
+ if (helper) {
+ exp->timeout.expires = jiffies +
+ helper->expect_policy[exp->class].timeout * HZ;
}
add_timer(&exp->timeout);
NF_CT_STAT_INC(net, expect_create);
+ return 0;
}
/* Race with expectations being used means we could have none to find; OK. */
@@ -389,14 +384,13 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
struct nf_conntrack_expect *i;
struct nf_conn *master = expect->master;
struct nf_conn_help *master_help = nfct_help(master);
+ struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *n;
unsigned int h;
int ret = 1;
- /* Don't allow expectations created from kernel-space with no helper */
- if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
- (!master_help || (master_help && !master_help->helper))) {
+ if (!master_help) {
ret = -ESHUTDOWN;
goto out;
}
@@ -414,11 +408,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
}
/* Will be over limit? */
- if (master_help) {
- p = &rcu_dereference_protected(
- master_help->helper,
- lockdep_is_held(&nf_conntrack_lock)
- )->expect_policy[expect->class];
+ helper = rcu_dereference_protected(master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock));
+ if (helper) {
+ p = &helper->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
@@ -450,8 +443,9 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
if (ret <= 0)
goto out;
- ret = 0;
- nf_ct_expect_insert(expect);
+ ret = nf_ct_expect_insert(expect);
+ if (ret < 0)
+ goto out;
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
return ret;
@@ -461,22 +455,7 @@ out:
}
EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
-void nf_ct_remove_userspace_expectations(void)
-{
- struct nf_conntrack_expect *exp;
- struct hlist_node *n, *next;
-
- hlist_for_each_entry_safe(exp, n, next,
- &nf_ct_userspace_expect_list, lnode) {
- if (del_timer(&exp->timeout)) {
- nf_ct_unlink_expect(exp);
- nf_ct_expect_put(exp);
- }
- }
-}
-EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
-
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
struct ct_expect_iter_state {
struct seq_net_private p;
unsigned int bucket;
@@ -604,25 +583,25 @@ static const struct file_operations exp_file_ops = {
.llseek = seq_lseek,
.release = seq_release_net,
};
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
static int exp_proc_init(struct net *net)
{
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
struct proc_dir_entry *proc;
proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
if (!proc)
return -ENOMEM;
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
return 0;
}
static void exp_proc_remove(struct net *net)
{
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
proc_net_remove(net, "nf_conntrack_expect");
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
}
module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 6f5801eac99..8c5c95c6d34 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -42,7 +42,7 @@ static u_int16_t ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
-static int loose;
+static bool loose;
module_param(loose, bool, 0600);
unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f03c2d4539f..722291f8af7 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -42,7 +42,7 @@ static int gkrouted_only __read_mostly = 1;
module_param(gkrouted_only, int, 0600);
MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
-static int callforward_filter __read_mostly = 1;
+static bool callforward_filter __read_mostly = true;
module_param(callforward_filter, bool, 0600);
MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
"if both endpoints are on different sides "
@@ -743,17 +743,16 @@ static int callforward_do_filter(const union nf_inet_addr *src,
}
break;
}
-#if defined(CONFIG_NF_CONNTRACK_IPV6) || \
- defined(CONFIG_NF_CONNTRACK_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
case AF_INET6: {
struct flowi6 fl1, fl2;
struct rt6_info *rt1, *rt2;
memset(&fl1, 0, sizeof(fl1));
- ipv6_addr_copy(&fl1.daddr, &src->in6);
+ fl1.daddr = src->in6;
memset(&fl2, 0, sizeof(fl2));
- ipv6_addr_copy(&fl2.daddr, &dst->in6);
+ fl2.daddr = dst->in6;
if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 93c4bdbfc1a..c9e0de08aa8 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -121,6 +121,18 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
int ret = 0;
if (tmpl != NULL) {
+ /* we've got a userspace helper. */
+ if (tmpl->status & IPS_USERSPACE_HELPER) {
+ help = nf_ct_helper_ext_add(ct, flags);
+ if (help == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rcu_assign_pointer(help->helper, NULL);
+ __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+ ret = 0;
+ goto out;
+ }
help = nfct_help(tmpl);
if (help != NULL)
helper = help->helper;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index e58aa9b1fe8..e07dc3ae930 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -135,7 +135,7 @@ nla_put_failure:
static inline int
ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
{
- long timeout = (ct->timeout.expires - jiffies) / HZ;
+ long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
if (timeout < 0)
timeout = 0;
@@ -203,25 +203,18 @@ nla_put_failure:
}
static int
-ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
- enum ip_conntrack_dir dir)
+dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
+ enum ip_conntrack_dir dir)
{
enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
struct nlattr *nest_count;
- const struct nf_conn_counter *acct;
-
- acct = nf_conn_acct_find(ct);
- if (!acct)
- return 0;
nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
if (!nest_count)
goto nla_put_failure;
- NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS,
- cpu_to_be64(acct[dir].packets));
- NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES,
- cpu_to_be64(acct[dir].bytes));
+ NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
+ NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
nla_nest_end(skb, nest_count);
@@ -232,6 +225,27 @@ nla_put_failure:
}
static int
+ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
+ enum ip_conntrack_dir dir, int type)
+{
+ struct nf_conn_counter *acct;
+ u64 pkts, bytes;
+
+ acct = nf_conn_acct_find(ct);
+ if (!acct)
+ return 0;
+
+ if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
+ pkts = atomic64_xchg(&acct[dir].packets, 0);
+ bytes = atomic64_xchg(&acct[dir].bytes, 0);
+ } else {
+ pkts = atomic64_read(&acct[dir].packets);
+ bytes = atomic64_read(&acct[dir].bytes);
+ }
+ return dump_counters(skb, pkts, bytes, dir);
+}
+
+static int
ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nlattr *nest_count;
@@ -393,15 +407,15 @@ nla_put_failure:
}
static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
- int event, struct nf_conn *ct)
+ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ struct nf_conn *ct)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
- unsigned int flags = pid ? NLM_F_MULTI : 0;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
- event |= NFNL_SUBSYS_CTNETLINK << 8;
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
if (nlh == NULL)
goto nlmsg_failure;
@@ -430,8 +444,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_timeout(skb, ct) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
+ ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
@@ -612,8 +626,10 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto nla_put_failure;
if (events & (1 << IPCT_DESTROY)) {
- if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ if (ctnetlink_dump_counters(skb, ct,
+ IP_CT_DIR_ORIGINAL, type) < 0 ||
+ ctnetlink_dump_counters(skb, ct,
+ IP_CT_DIR_REPLY, type) < 0 ||
ctnetlink_dump_timestamp(skb, ct) < 0)
goto nla_put_failure;
} else {
@@ -709,20 +725,13 @@ restart:
}
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- IPCTNL_MSG_CT_NEW, ct) < 0) {
+ NFNL_MSG_TYPE(
+ cb->nlh->nlmsg_type),
+ ct) < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
}
-
- if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
- IPCTNL_MSG_CT_GET_CTRZERO) {
- struct nf_conn_counter *acct;
-
- acct = nf_conn_acct_find(ct);
- if (acct)
- memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
- }
}
if (cb->args[1]) {
cb->args[1] = 0;
@@ -1001,7 +1010,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
rcu_read_lock();
err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
- IPCTNL_MSG_CT_NEW, ct);
+ NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
rcu_read_unlock();
nf_ct_put(ct);
if (err <= 0)
@@ -1087,14 +1096,14 @@ ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
if (cda[CTA_NAT_DST]) {
ret = ctnetlink_parse_nat_setup(ct,
- IP_NAT_MANIP_DST,
+ NF_NAT_MANIP_DST,
cda[CTA_NAT_DST]);
if (ret < 0)
return ret;
}
if (cda[CTA_NAT_SRC]) {
ret = ctnetlink_parse_nat_setup(ct,
- IP_NAT_MANIP_SRC,
+ NF_NAT_MANIP_SRC,
cda[CTA_NAT_SRC]);
if (ret < 0)
return ret;
@@ -1358,12 +1367,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
+ spin_unlock_bh(&nf_conntrack_lock);
#ifdef CONFIG_MODULES
if (request_module("nfct-helper-%s", helpname) < 0) {
+ spin_lock_bh(&nf_conntrack_lock);
err = -EOPNOTSUPP;
goto err1;
}
+ spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname,
nf_ct_l3num(ct),
@@ -1638,7 +1650,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
{
struct nf_conn *master = exp->master;
- long timeout = (exp->timeout.expires - jiffies) / HZ;
+ long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
if (timeout < 0)
@@ -1847,7 +1859,9 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- if (cda[CTA_EXPECT_MASTER])
+ if (cda[CTA_EXPECT_TUPLE])
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ else if (cda[CTA_EXPECT_MASTER])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
else
return -EINVAL;
@@ -1869,25 +1883,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
err = -ENOMEM;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb2 == NULL)
+ if (skb2 == NULL) {
+ nf_ct_expect_put(exp);
goto out;
+ }
rcu_read_lock();
err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
rcu_read_unlock();
+ nf_ct_expect_put(exp);
if (err <= 0)
goto free;
- nf_ct_expect_put(exp);
+ err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
- return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ return 0;
free:
kfree_skb(skb2);
out:
- nf_ct_expect_put(exp);
- return err;
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
}
static int
@@ -2023,6 +2042,10 @@ ctnetlink_create_expect(struct net *net, u16 zone,
}
help = nfct_help(ct);
if (!help) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) {
if (!cda[CTA_EXPECT_TIMEOUT]) {
err = -EINVAL;
goto out;
@@ -2163,6 +2186,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int ret;
+
+ ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot register notifier.\n");
+ goto err_out;
+ }
+
+ ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot expect register notifier.\n");
+ goto err_unreg_notifier;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+ return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+ .init = ctnetlink_net_init,
+ .exit_batch = ctnetlink_net_exit_batch,
+};
+
static int __init ctnetlink_init(void)
{
int ret;
@@ -2180,28 +2251,15 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- ret = nf_conntrack_register_notifier(&ctnl_notifier);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot register notifier.\n");
+ if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
- ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot expect register notifier.\n");
- goto err_unreg_notifier;
- }
-#endif
-
return 0;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
- nf_conntrack_unregister_notifier(&ctnl_notifier);
err_unreg_exp_subsys:
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
err_unreg_subsys:
nfnetlink_subsys_unregister(&ctnl_subsys);
err_out:
@@ -2212,12 +2270,7 @@ static void __exit ctnetlink_exit(void)
{
pr_info("ctnetlink: unregistering from nfnetlink.\n");
- nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
- nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+ unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 2e664a69d7d..d6dde6dc09e 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -629,7 +629,7 @@ static int dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
return seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
}
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct)
{
@@ -770,7 +770,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = dccp_to_nlattr,
.nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
@@ -792,7 +792,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = dccp_to_nlattr,
.nlattr_size = dccp_nlattr_size,
.from_nlattr = nlattr_to_dccp,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d69facdd9a7..f0338791b82 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -291,7 +291,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.new = gre_new,
.destroy = gre_destroy,
.me = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 6772b115465..afa69136061 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -461,7 +461,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -666,7 +666,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.packet = sctp_packet,
.new = sctp_new,
.me = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = sctp_to_nlattr,
.nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
@@ -696,7 +696,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.packet = sctp_packet,
.new = sctp_new,
.me = THIS_MODULE,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = sctp_to_nlattr,
.nlattr_size = sctp_nlattr_size,
.from_nlattr = nlattr_to_sctp,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 8235b86b4e8..97b9f3ebf28 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1126,7 +1126,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@@ -1447,7 +1447,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
.nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
@@ -1479,7 +1479,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.packet = tcp_packet,
.new = tcp_new,
.error = tcp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
.nlattr_size = tcp_nlattr_size,
.from_nlattr = nlattr_to_tcp,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 8289088b821..5f35757fbff 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -188,7 +188,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
@@ -216,7 +216,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.packet = udp_packet,
.new = udp_new,
.error = udp_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 263b5a72588..f52ca118101 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -174,7 +174,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.packet = udplite_packet,
.new = udplite_new,
.error = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -198,7 +198,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.packet = udplite_packet,
.new = udplite_new,
.error = udplite_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 05e9feb101c..885f5ab9bc2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -34,7 +34,7 @@
MODULE_LICENSE("GPL");
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
int
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
@@ -396,7 +396,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
}
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NF_CONNTRACK_PROCFS */
/* Sysctl support */
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index af7dd31af0a..e8d27afbbdb 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -15,7 +15,7 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
-static int nf_ct_tstamp __read_mostly;
+static bool nf_ct_tstamp __read_mostly;
module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
new file mode 100644
index 00000000000..11ba013e47f
--- /dev/null
+++ b/net/netfilter/nfnetlink_acct.c
@@ -0,0 +1,361 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
+
+static LIST_HEAD(nfnl_acct_list);
+
+struct nf_acct {
+ atomic64_t pkts;
+ atomic64_t bytes;
+ struct list_head head;
+ atomic_t refcnt;
+ char name[NFACCT_NAME_MAX];
+ struct rcu_head rcu_head;
+};
+
+static int
+nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ struct nf_acct *nfacct, *matching = NULL;
+ char *acct_name;
+
+ if (!tb[NFACCT_NAME])
+ return -EINVAL;
+
+ acct_name = nla_data(tb[NFACCT_NAME]);
+
+ list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+ if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
+ continue;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ matching = nfacct;
+ break;
+ }
+
+ if (matching) {
+ if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ /* reset counters if you request a replacement. */
+ atomic64_set(&matching->pkts, 0);
+ atomic64_set(&matching->bytes, 0);
+ return 0;
+ }
+ return -EBUSY;
+ }
+
+ nfacct = kzalloc(sizeof(struct nf_acct), GFP_KERNEL);
+ if (nfacct == NULL)
+ return -ENOMEM;
+
+ strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
+
+ if (tb[NFACCT_BYTES]) {
+ atomic64_set(&nfacct->bytes,
+ be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+ }
+ if (tb[NFACCT_PKTS]) {
+ atomic64_set(&nfacct->pkts,
+ be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+ }
+ atomic_set(&nfacct->refcnt, 1);
+ list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+ return 0;
+}
+
+static int
+nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ int event, struct nf_acct *acct)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0;
+ u64 pkts, bytes;
+
+ event |= NFNL_SUBSYS_ACCT << 8;
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+
+ if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
+ pkts = atomic64_xchg(&acct->pkts, 0);
+ bytes = atomic64_xchg(&acct->bytes, 0);
+ } else {
+ pkts = atomic64_read(&acct->pkts);
+ bytes = atomic64_read(&acct->bytes);
+ }
+ NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
+ NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
+ NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nf_acct *cur, *last;
+
+ if (cb->args[2])
+ return 0;
+
+ last = (struct nf_acct *)cb->args[1];
+ if (cb->args[1])
+ cb->args[1] = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ if (last && cur != last)
+ continue;
+
+ if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ NFNL_MSG_ACCT_NEW, cur) < 0) {
+ cb->args[1] = (unsigned long)cur;
+ break;
+ }
+ }
+ if (!cb->args[1])
+ cb->args[2] = 1;
+ rcu_read_unlock();
+ return skb->len;
+}
+
+static int
+nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ int ret = -ENOENT;
+ struct nf_acct *cur;
+ char *acct_name;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump,
+ NULL, 0);
+ }
+
+ if (!tb[NFACCT_NAME])
+ return -EINVAL;
+ acct_name = nla_data(tb[NFACCT_NAME]);
+
+ list_for_each_entry(cur, &nfnl_acct_list, head) {
+ struct sk_buff *skb2;
+
+ if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+ continue;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_ACCT_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ return ret;
+}
+
+/* try to delete object, fail if it is still in use. */
+static int nfnl_acct_try_del(struct nf_acct *cur)
+{
+ int ret = 0;
+
+ /* we want to avoid races with nfnl_acct_find_get. */
+ if (atomic_dec_and_test(&cur->refcnt)) {
+ /* We are protected by nfnl mutex. */
+ list_del_rcu(&cur->head);
+ kfree_rcu(cur, rcu_head);
+ } else {
+ /* still in use, restore reference counter. */
+ atomic_inc(&cur->refcnt);
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int
+nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ char *acct_name;
+ struct nf_acct *cur;
+ int ret = -ENOENT;
+
+ if (!tb[NFACCT_NAME]) {
+ list_for_each_entry(cur, &nfnl_acct_list, head)
+ nfnl_acct_try_del(cur);
+
+ return 0;
+ }
+ acct_name = nla_data(tb[NFACCT_NAME]);
+
+ list_for_each_entry(cur, &nfnl_acct_list, head) {
+ if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
+ continue;
+
+ ret = nfnl_acct_try_del(cur);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ return ret;
+}
+
+static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
+ [NFACCT_NAME] = { .type = NLA_NUL_STRING, .len = NFACCT_NAME_MAX-1 },
+ [NFACCT_BYTES] = { .type = NLA_U64 },
+ [NFACCT_PKTS] = { .type = NLA_U64 },
+};
+
+static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
+ [NFNL_MSG_ACCT_NEW] = { .call = nfnl_acct_new,
+ .attr_count = NFACCT_MAX,
+ .policy = nfnl_acct_policy },
+ [NFNL_MSG_ACCT_GET] = { .call = nfnl_acct_get,
+ .attr_count = NFACCT_MAX,
+ .policy = nfnl_acct_policy },
+ [NFNL_MSG_ACCT_GET_CTRZERO] = { .call = nfnl_acct_get,
+ .attr_count = NFACCT_MAX,
+ .policy = nfnl_acct_policy },
+ [NFNL_MSG_ACCT_DEL] = { .call = nfnl_acct_del,
+ .attr_count = NFACCT_MAX,
+ .policy = nfnl_acct_policy },
+};
+
+static const struct nfnetlink_subsystem nfnl_acct_subsys = {
+ .name = "acct",
+ .subsys_id = NFNL_SUBSYS_ACCT,
+ .cb_count = NFNL_MSG_ACCT_MAX,
+ .cb = nfnl_acct_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
+
+struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+{
+ struct nf_acct *cur, *acct = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
+ continue;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err;
+
+ if (!atomic_inc_not_zero(&cur->refcnt)) {
+ module_put(THIS_MODULE);
+ goto err;
+ }
+
+ acct = cur;
+ break;
+ }
+err:
+ rcu_read_unlock();
+ return acct;
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
+
+void nfnl_acct_put(struct nf_acct *acct)
+{
+ atomic_dec(&acct->refcnt);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_put);
+
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct)
+{
+ atomic64_inc(&nfacct->pkts);
+ atomic64_add(skb->len, &nfacct->bytes);
+}
+EXPORT_SYMBOL_GPL(nfnl_acct_update);
+
+static int __init nfnl_acct_init(void)
+{
+ int ret;
+
+ pr_info("nfnl_acct: registering with nfnetlink.\n");
+ ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
+ if (ret < 0) {
+ pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ return ret;
+}
+
+static void __exit nfnl_acct_exit(void)
+{
+ struct nf_acct *cur, *tmp;
+
+ pr_info("nfnl_acct: unregistering from nfnetlink.\n");
+ nfnetlink_subsys_unregister(&nfnl_acct_subsys);
+
+ list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
+ list_del_rcu(&cur->head);
+ /* We are sure that our objects have no clients at this point,
+ * it's safe to release them all without checking refcnt. */
+ kfree_rcu(cur, rcu_head);
+ }
+}
+
+module_init(nfnl_acct_init);
+module_exit(nfnl_acct_exit);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index 4bca15a0c38..ba92824086f 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -98,6 +98,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
u8 nexthdr;
+ __be16 frag_off;
int offset;
ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
@@ -108,7 +109,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
nexthdr = ih->nexthdr;
offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
- &nexthdr);
+ &nexthdr, &frag_off);
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
&ih->saddr, &ih->daddr, nexthdr);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10de75..8e87123f137 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -62,8 +62,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
int ret = 0;
u8 proto;
- if (info->flags & ~XT_CT_NOTRACK)
- return -EINVAL;
+ if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER))
+ return -EOPNOTSUPP;
if (info->flags & XT_CT_NOTRACK) {
ct = nf_ct_untracked_get();
@@ -92,7 +92,9 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
GFP_KERNEL))
goto err3;
- if (info->helper[0]) {
+ if (info->flags & XT_CT_USERSPACE_HELPER) {
+ __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
+ } else if (info->helper[0]) {
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index d4f4b5d66b2..95237c89607 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -49,7 +49,7 @@ static u32 hash_v4(const struct sk_buff *skb)
return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
}
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 hash_v6(const struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -74,7 +74,7 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
if (par->family == NFPROTO_IPV4)
queue = (((u64) hash_v4(skb) * info->queues_total) >>
32) + queue;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
queue = (((u64) hash_v6(skb) * info->queues_total) >>
32) + queue;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9e63b43faee..190ad37c5cf 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
struct flowi6 *fl6 = &fl.u.ip6;
memset(fl6, 0, sizeof(*fl6));
- ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+ fl6->daddr = ipv6_hdr(skb)->saddr;
}
rcu_read_lock();
ai = nf_get_afinfo(family);
@@ -198,17 +198,18 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static unsigned int
tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 nexthdr;
+ __be16 frag_off;
int tcphoff;
int ret;
nexthdr = ipv6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+ tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
if (tcphoff < 0)
return NF_DROP;
ret = tcpmss_mangle_packet(skb, par->targinfo,
@@ -259,7 +260,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
return -EINVAL;
}
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
@@ -292,7 +293,7 @@ static struct xt_target tcpmss_tg_reg[] __read_mostly = {
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.family = NFPROTO_IPV6,
.name = "TCPMSS",
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 9dc9ecfdd54..25fd1c4e1ee 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -80,16 +80,17 @@ tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
sizeof(struct iphdr) + sizeof(struct tcphdr));
}
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
static unsigned int
tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int tcphoff;
u_int8_t nexthdr;
+ __be16 frag_off;
nexthdr = ipv6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+ tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
if (tcphoff < 0)
return NF_DROP;
@@ -108,7 +109,7 @@ static struct xt_target tcpoptstrip_tg_reg[] __read_mostly = {
.targetsize = sizeof(struct xt_tcpoptstrip_target_info),
.me = THIS_MODULE,
},
-#if defined(CONFIG_IP6_NF_MANGLE) || defined(CONFIG_IP6_NF_MANGLE_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_MANGLE)
{
.name = "TCPOPTSTRIP",
.family = NFPROTO_IPV6,
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 5f054a0dbbb..3aae66facf9 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -25,13 +25,10 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_TEE.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
# define WITH_CONNTRACK 1
# include <net/netfilter/nf_conntrack.h>
#endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-# define WITH_IPV6 1
-#endif
struct xt_tee_priv {
struct notifier_block notifier;
@@ -136,7 +133,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
static bool
tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
{
@@ -196,7 +193,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
}
return XT_CONTINUE;
}
-#endif /* WITH_IPV6 */
+#endif
static int tee_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
@@ -276,7 +273,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
{
.name = "TEE",
.revision = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index dcfd57eb9d0..35a959a096e 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -22,7 +22,7 @@
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index b77d383cec7..49c5ff7f6dd 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -16,7 +16,7 @@
#include <linux/ip.h>
#include <net/route.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_fib.h>
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Xtables: address type match");
MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype");
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
const struct in6_addr *addr)
{
@@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
int route_err;
memset(&flow, 0, sizeof(flow));
- ipv6_addr_copy(&flow.daddr, addr);
+ flow.daddr = *addr;
if (dev)
flow.flowi6_oif = dev->ifindex;
@@ -149,7 +149,7 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
else if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
dev = par->out;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (par->family == NFPROTO_IPV6)
return addrtype_mt6(net, dev, skb, info);
#endif
@@ -190,7 +190,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
return -EINVAL;
}
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
if (par->family == NFPROTO_IPV6) {
if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
pr_err("ipv6 BLACKHOLE matching not supported\n");
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 5b138506690..e595e07a759 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -40,46 +40,46 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
case XT_CONNBYTES_PKTS:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
- what = counters[IP_CT_DIR_ORIGINAL].packets;
+ what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
break;
case XT_CONNBYTES_DIR_REPLY:
- what = counters[IP_CT_DIR_REPLY].packets;
+ what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
case XT_CONNBYTES_DIR_BOTH:
- what = counters[IP_CT_DIR_ORIGINAL].packets;
- what += counters[IP_CT_DIR_REPLY].packets;
+ what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
+ what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
}
break;
case XT_CONNBYTES_BYTES:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
- what = counters[IP_CT_DIR_ORIGINAL].bytes;
+ what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
break;
case XT_CONNBYTES_DIR_REPLY:
- what = counters[IP_CT_DIR_REPLY].bytes;
+ what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
break;
case XT_CONNBYTES_DIR_BOTH:
- what = counters[IP_CT_DIR_ORIGINAL].bytes;
- what += counters[IP_CT_DIR_REPLY].bytes;
+ what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+ what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
break;
}
break;
case XT_CONNBYTES_AVGPKT:
switch (sinfo->direction) {
case XT_CONNBYTES_DIR_ORIGINAL:
- bytes = counters[IP_CT_DIR_ORIGINAL].bytes;
- pkts = counters[IP_CT_DIR_ORIGINAL].packets;
+ bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes);
+ pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets);
break;
case XT_CONNBYTES_DIR_REPLY:
- bytes = counters[IP_CT_DIR_REPLY].bytes;
- pkts = counters[IP_CT_DIR_REPLY].packets;
+ bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+ pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
case XT_CONNBYTES_DIR_BOTH:
- bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
- counters[IP_CT_DIR_REPLY].bytes;
- pkts = counters[IP_CT_DIR_ORIGINAL].packets +
- counters[IP_CT_DIR_REPLY].packets;
+ bytes = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes) +
+ atomic64_read(&counters[IP_CT_DIR_REPLY].bytes);
+ pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) +
+ atomic64_read(&counters[IP_CT_DIR_REPLY].packets);
break;
}
if (pkts != 0)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
break;
}
- if (sinfo->count.to)
+ if (sinfo->count.to >= sinfo->count.from)
return what <= sinfo->count.to && what >= sinfo->count.from;
- else
- return what >= sinfo->count.from;
+ else /* inverted */
+ return what < sinfo->count.to || what > sinfo->count.from;
}
static int connbytes_mt_check(const struct xt_mtchk_param *par)
diff --git a/net/netfilter/xt_ecn.c b/net/netfilter/xt_ecn.c
new file mode 100644
index 00000000000..3c831a8efeb
--- /dev/null
+++ b/net/netfilter/xt_ecn.c
@@ -0,0 +1,179 @@
+/*
+ * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits
+ *
+ * (C) 2002 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_ecn.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ecn");
+MODULE_ALIAS("ip6t_ecn");
+
+static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_ecn_info *einfo = par->matchinfo;
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ /* In practice, TCP match does this, so can't fail. But let's
+ * be good citizens.
+ */
+ th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return false;
+
+ if (einfo->operation & XT_ECN_OP_MATCH_ECE) {
+ if (einfo->invert & XT_ECN_OP_MATCH_ECE) {
+ if (th->ece == 1)
+ return false;
+ } else {
+ if (th->ece == 0)
+ return false;
+ }
+ }
+
+ if (einfo->operation & XT_ECN_OP_MATCH_CWR) {
+ if (einfo->invert & XT_ECN_OP_MATCH_CWR) {
+ if (th->cwr == 1)
+ return false;
+ } else {
+ if (th->cwr == 0)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline bool match_ip(const struct sk_buff *skb,
+ const struct xt_ecn_info *einfo)
+{
+ return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^
+ !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_ecn_info *info = par->matchinfo;
+
+ if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info))
+ return false;
+
+ if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+ !match_tcp(skb, par))
+ return false;
+
+ return true;
+}
+
+static int ecn_mt_check4(const struct xt_mtchk_param *par)
+{
+ const struct xt_ecn_info *info = par->matchinfo;
+ const struct ipt_ip *ip = par->entryinfo;
+
+ if (info->operation & XT_ECN_OP_MATCH_MASK)
+ return -EINVAL;
+
+ if (info->invert & XT_ECN_OP_MATCH_MASK)
+ return -EINVAL;
+
+ if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+ (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
+ pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool match_ipv6(const struct sk_buff *skb,
+ const struct xt_ecn_info *einfo)
+{
+ return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) ==
+ einfo->ip_ect) ^
+ !!(einfo->invert & XT_ECN_OP_MATCH_IP);
+}
+
+static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_ecn_info *info = par->matchinfo;
+
+ if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info))
+ return false;
+
+ if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+ !match_tcp(skb, par))
+ return false;
+
+ return true;
+}
+
+static int ecn_mt_check6(const struct xt_mtchk_param *par)
+{
+ const struct xt_ecn_info *info = par->matchinfo;
+ const struct ip6t_ip6 *ip = par->entryinfo;
+
+ if (info->operation & XT_ECN_OP_MATCH_MASK)
+ return -EINVAL;
+
+ if (info->invert & XT_ECN_OP_MATCH_MASK)
+ return -EINVAL;
+
+ if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
+ (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
+ pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_match ecn_mt_reg[] __read_mostly = {
+ {
+ .name = "ecn",
+ .family = NFPROTO_IPV4,
+ .match = ecn_mt4,
+ .matchsize = sizeof(struct xt_ecn_info),
+ .checkentry = ecn_mt_check4,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "ecn",
+ .family = NFPROTO_IPV6,
+ .match = ecn_mt6,
+ .matchsize = sizeof(struct xt_ecn_info),
+ .checkentry = ecn_mt_check6,
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init ecn_mt_init(void)
+{
+ return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+static void __exit ecn_mt_exit(void)
+{
+ xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg));
+}
+
+module_init(ecn_mt_init);
+module_exit(ecn_mt_exit);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dfd52bad152..8e499210187 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -21,7 +21,7 @@
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/ip.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#include <linux/ipv6.h>
#include <net/ipv6.h>
#endif
@@ -64,7 +64,7 @@ struct dsthash_dst {
__be32 src;
__be32 dst;
} ip;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
struct {
__be32 src[4];
__be32 dst[4];
@@ -413,7 +413,7 @@ static inline __be32 maskl(__be32 a, unsigned int l)
return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
}
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
{
switch (p) {
@@ -445,6 +445,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
{
__be16 _ports[2], *ports;
u8 nexthdr;
+ __be16 frag_off;
int poff;
memset(dst, 0, sizeof(*dst));
@@ -463,7 +464,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
nexthdr = ip_hdr(skb)->protocol;
break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
@@ -480,7 +481,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
(XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
return 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
if ((int)protoff < 0)
return -1;
break;
@@ -615,7 +616,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
.destroy = hashlimit_mt_destroy,
.me = THIS_MODULE,
},
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.name = "hashlimit",
.revision = 1,
@@ -692,7 +693,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
ent->rateinfo.credit, ent->rateinfo.credit_cap,
ent->rateinfo.cost);
break;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
(long)(ent->expires - jiffies)/HZ,
@@ -760,7 +761,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
if (!hashlimit_net->ipt_hashlimit)
return -ENOMEM;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
if (!hashlimit_net->ip6t_hashlimit) {
proc_net_remove(net, "ipt_hashlimit");
@@ -773,7 +774,7 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
static void __net_exit hashlimit_proc_net_exit(struct net *net)
{
proc_net_remove(net, "ipt_hashlimit");
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
proc_net_remove(net, "ip6t_hashlimit");
#endif
}
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
new file mode 100644
index 00000000000..b3be0ef21f1
--- /dev/null
+++ b/net/netfilter/xt_nfacct.c
@@ -0,0 +1,76 @@
+/*
+ * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2011 Intra2net AG <http://www.intra2net.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (or any
+ * later at your option) as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nfnetlink_acct.h>
+#include <linux/netfilter/xt_nfacct.h>
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: match for the extended accounting infrastructure");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_nfacct");
+MODULE_ALIAS("ip6t_nfacct");
+
+static bool nfacct_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_nfacct_match_info *info = par->targinfo;
+
+ nfnl_acct_update(skb, info->nfacct);
+
+ return true;
+}
+
+static int
+nfacct_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_nfacct_match_info *info = par->matchinfo;
+ struct nf_acct *nfacct;
+
+ nfacct = nfnl_acct_find_get(info->name);
+ if (nfacct == NULL) {
+ pr_info("xt_nfacct: accounting object with name `%s' "
+ "does not exists\n", info->name);
+ return -ENOENT;
+ }
+ info->nfacct = nfacct;
+ return 0;
+}
+
+static void
+nfacct_mt_destroy(const struct xt_mtdtor_param *par)
+{
+ const struct xt_nfacct_match_info *info = par->matchinfo;
+
+ nfnl_acct_put(info->nfacct);
+}
+
+static struct xt_match nfacct_mt_reg __read_mostly = {
+ .name = "nfacct",
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfacct_mt_checkentry,
+ .match = nfacct_mt,
+ .destroy = nfacct_mt_destroy,
+ .matchsize = sizeof(struct xt_nfacct_match_info),
+ .me = THIS_MODULE,
+};
+
+static int __init nfacct_mt_init(void)
+{
+ return xt_register_match(&nfacct_mt_reg);
+}
+
+static void __exit nfacct_mt_exit(void)
+{
+ xt_unregister_match(&nfacct_mt_reg);
+}
+
+module_init(nfacct_mt_init);
+module_exit(nfacct_mt_exit);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index fe39f7e913d..72bb07f57f9 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -22,7 +22,7 @@
#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_SOCKET_HAVE_IPV6 1
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -30,7 +30,7 @@
#include <linux/netfilter/xt_socket.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#define XT_SOCKET_HAVE_CONNTRACK 1
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -214,6 +214,7 @@ extract_icmp6_fields(const struct sk_buff *skb,
struct icmp6hdr *icmph, _icmph;
__be16 *ports, _ports[2];
u8 inside_nexthdr;
+ __be16 inside_fragoff;
int inside_hdrlen;
icmph = skb_header_pointer(skb, outside_hdrlen,
@@ -229,7 +230,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 1;
inside_nexthdr = inside_iph->nexthdr;
- inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), &inside_nexthdr);
+ inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+ &inside_nexthdr, &inside_fragoff);
if (inside_hdrlen < 0)
return 1; /* hjm: Packet has no/incomplete transport layer headers. */
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 96b749dacc3..6f1701322fb 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -96,7 +96,7 @@ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_search - Search for a matching IPv6 address entry
* @addr: IPv6 address
@@ -185,7 +185,7 @@ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head)
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_add - Add a new IPv6 address entry to a list
* @entry: address entry
@@ -263,7 +263,7 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
return entry;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_remove_entry - Remove an IPv6 address entry
* @entry: address entry
@@ -342,7 +342,7 @@ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
}
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_af6list_audit_addr - Audit an IPv6 address
* @audit_buf: audit buffer
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index fdbc1d2c735..a1287ce1813 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -133,7 +133,7 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
}
#endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list)
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 3f905e5370c..38204112b9f 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -78,7 +78,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
struct netlbl_dom_map *ptr;
struct netlbl_af4list *iter4;
struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
@@ -90,7 +90,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_domhsh_addr4_entry(iter4));
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&ptr->type_def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
@@ -217,7 +217,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
cipsov4 = map4->type_def.cipsov4;
netlbl_af4list_audit_addr(audit_buf, 0, NULL,
addr4->addr, addr4->mask);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
} else if (addr6 != NULL) {
struct netlbl_domaddr6_map *map6;
map6 = netlbl_domhsh_addr6_entry(addr6);
@@ -306,7 +306,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
struct netlbl_dom_map *entry_old;
struct netlbl_af4list *iter4;
struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
@@ -338,7 +338,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
&entry->type_def.addrsel->list4)
netlbl_domhsh_audit_add(entry, iter4, NULL,
ret_val, audit_info);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6)
netlbl_domhsh_audit_add(entry, NULL, iter6,
@@ -365,7 +365,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
ret_val = -EEXIST;
goto add_return;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6)
if (netlbl_af6list_search_exact(&iter6->addr,
@@ -386,7 +386,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
if (ret_val != 0)
goto add_return;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
&entry->type_def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
@@ -510,7 +510,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
struct netlbl_dom_map *entry_map;
struct netlbl_af4list *entry_addr;
struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
#endif /* IPv6 */
struct netlbl_domaddr4_map *entry;
@@ -533,7 +533,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
goto remove_af4_failure;
netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
goto remove_af4_single_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
goto remove_af4_single_addr;
#endif /* IPv6 */
@@ -644,7 +644,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
return netlbl_domhsh_addr4_entry(addr_iter);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
* @domain: the domain name to search for
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index bfcc0f7024c..90872c4ca30 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -104,7 +104,7 @@ int netlbl_domhsh_walk(u32 *skip_bkt,
int (*callback) (struct netlbl_dom_map *entry, void *arg),
void *cb_arg);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr);
#endif /* IPv6 */
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 9c24de10a65..2560e7b441c 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
- const struct in_addr *addr4, *mask4;
- const struct in6_addr *addr6, *mask6;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
- case AF_INET:
- addr4 = addr;
- mask4 = mask;
+ case AF_INET: {
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -148,25 +146,29 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
- case AF_INET6:
- addr6 = addr;
- mask6 = mask;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6: {
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->type = NETLBL_NLTYPE_UNLABELED;
- ipv6_addr_copy(&map6->list.addr, addr6);
+ map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
- ipv6_addr_copy(&map6->list.mask, mask6);
+ map6->list.mask = *mask6;
map6->list.valid = 1;
- ret_val = netlbl_af4list_add(&map4->list,
- &addrmap->list4);
+ ret_val = netlbl_af6list_add(&map6->list,
+ &addrmap->list6);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
+ }
+#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -667,7 +673,7 @@ int netlbl_sock_setattr(struct sock *sk,
ret_val = -ENOENT;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
/* since we don't support any IPv6 labeling protocols right
* now we can optimize everything away until we do */
@@ -718,7 +724,7 @@ int netlbl_sock_getattr(struct sock *sk,
case AF_INET:
ret_val = cipso_v4_sock_getattr(sk, secattr);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ret_val = -ENOMSG;
break;
@@ -776,7 +782,7 @@ int netlbl_conn_setattr(struct sock *sk,
ret_val = -ENOENT;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
/* since we don't support any IPv6 labeling protocols right
* now we can optimize everything away until we do */
@@ -847,7 +853,7 @@ int netlbl_req_setattr(struct request_sock *req,
ret_val = -ENOENT;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
/* since we don't support any IPv6 labeling protocols right
* now we can optimize everything away until we do */
@@ -920,7 +926,7 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
ret_val = -ENOENT;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
/* since we don't support any IPv6 labeling protocols right
* now we can optimize everything away until we do */
@@ -959,7 +965,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
cipso_v4_skbuff_getattr(skb, secattr) == 0)
return 0;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
break;
#endif /* IPv6 */
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index bfa55586977..4809e2e48b0 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -184,7 +184,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
entry->type = NETLBL_NLTYPE_ADDRSELECT;
entry->type_def.addrsel = addrmap;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
} else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
struct in6_addr *addr;
struct in6_addr *mask;
@@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_failure;
}
- ipv6_addr_copy(&map->list.addr, addr);
+ map->list.addr = *addr;
map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
- ipv6_addr_copy(&map->list.mask, mask);
+ map->list.mask = *mask;
map->list.valid = 1;
map->type = entry->type;
@@ -270,7 +270,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
struct nlattr *nla_a;
struct nlattr *nla_b;
struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
#endif
@@ -324,7 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_b);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
&entry->type_def.addrsel->list6) {
struct netlbl_domaddr6_map *map6;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e251c2c8852..4b5fa0fe78f 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -170,7 +170,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
struct netlbl_unlhsh_iface *iface;
struct netlbl_af4list *iter4;
struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_af6list *tmp6;
#endif /* IPv6 */
@@ -184,7 +184,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_unlhsh_addr4_entry(iter4));
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) {
netlbl_af6list_remove_entry(iter6);
kfree(netlbl_unlhsh_addr6_entry(iter6));
@@ -274,7 +274,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
return ret_val;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
* @iface: the associated interface entry
@@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
if (entry == NULL)
return -ENOMEM;
- ipv6_addr_copy(&entry->list.addr, addr);
+ entry->list.addr = *addr;
entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
- ipv6_addr_copy(&entry->list.mask, mask);
+ entry->list.mask = *mask;
entry->list.valid = 1;
entry->secid = secid;
@@ -436,7 +436,7 @@ int netlbl_unlhsh_add(struct net *net,
mask4->s_addr);
break;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case sizeof(struct in6_addr): {
const struct in6_addr *addr6 = addr;
const struct in6_addr *mask6 = mask;
@@ -531,7 +531,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/**
* netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
* @net: network namespace
@@ -606,14 +606,14 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
{
struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
#endif /* IPv6 */
spin_lock(&netlbl_unlhsh_lock);
netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list)
goto unlhsh_condremove_failure;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list)
goto unlhsh_condremove_failure;
#endif /* IPv6 */
@@ -680,7 +680,7 @@ int netlbl_unlhsh_remove(struct net *net,
iface, addr, mask,
audit_info);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case sizeof(struct in6_addr):
ret_val = netlbl_unlhsh_remove_addr6(net,
iface, addr, mask,
@@ -1196,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
struct netlbl_unlhsh_iface *iface;
struct list_head *iter_list;
struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *addr6;
#endif
@@ -1228,7 +1228,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
goto unlabel_staticlist_return;
}
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(addr6,
&iface->addr6_list) {
if (iter_addr6++ < skip_addr6)
@@ -1277,7 +1277,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
u32 skip_addr6 = cb->args[1];
u32 iter_addr4 = 0;
struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
u32 iter_addr6 = 0;
struct netlbl_af6list *addr6;
#endif
@@ -1303,7 +1303,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
goto unlabel_staticlistdef_return;
}
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
if (iter_addr6++ < skip_addr6)
continue;
@@ -1494,7 +1494,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid;
break;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6: {
struct ipv6hdr *hdr6;
struct netlbl_af6list *addr6;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1201b6d4183..629b06182f3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -139,12 +139,12 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
-static u32 netlink_group_mask(u32 group)
+static inline u32 netlink_group_mask(u32 group)
{
return group ? 1 << (group - 1) : 0;
}
-static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
{
return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
}
@@ -226,8 +226,7 @@ netlink_unlock_table(void)
wake_up(&nl_table_wait);
}
-static inline struct sock *netlink_lookup(struct net *net, int protocol,
- u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
{
struct nl_pid_hash *hash = &nl_table[protocol].hash;
struct hlist_head *head;
@@ -248,7 +247,7 @@ found:
return sk;
}
-static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_pid_hash_zalloc(size_t size)
{
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_ATOMIC);
@@ -258,7 +257,7 @@ static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
get_order(size));
}
-static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_pid_hash_free(struct hlist_head *table, size_t size)
{
if (size <= PAGE_SIZE)
kfree(table);
@@ -578,7 +577,7 @@ retry:
return err;
}
-static inline int netlink_capable(struct socket *sock, unsigned int flag)
+static inline int netlink_capable(const struct socket *sock, unsigned int flag)
{
return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
capable(CAP_NET_ADMIN);
@@ -846,8 +845,7 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
sock_put(sk);
}
-static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
- gfp_t allocation)
+static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{
int delta;
@@ -871,7 +869,7 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
return skb;
}
-static inline void netlink_rcv_wake(struct sock *sk)
+static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -881,7 +879,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
wake_up_interruptible(&nlk->wait);
}
-static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
{
int ret;
struct netlink_sock *nlk = nlk_sk(sk);
@@ -952,8 +950,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);
-static inline int netlink_broadcast_deliver(struct sock *sk,
- struct sk_buff *skb)
+static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -962,7 +959,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
skb_set_owner_r(skb, sk);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
- return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
+ return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
}
return -1;
}
@@ -982,7 +979,7 @@ struct netlink_broadcast_data {
void *tx_data;
};
-static inline int do_one_broadcast(struct sock *sk,
+static int do_one_broadcast(struct sock *sk,
struct netlink_broadcast_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
@@ -1110,8 +1107,7 @@ struct netlink_set_err_data {
int code;
};
-static inline int do_one_set_err(struct sock *sk,
- struct netlink_set_err_data *p)
+static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
{
struct netlink_sock *nlk = nlk_sk(sk);
int ret = 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 482fa571b4e..a403b618faa 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,6 +33,14 @@ void genl_unlock(void)
}
EXPORT_SYMBOL(genl_unlock);
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_genl_is_held(void)
+{
+ return lockdep_is_held(&genl_mutex);
+}
+EXPORT_SYMBOL(lockdep_genl_is_held);
+#endif
+
#define GENL_FAM_TAB_SIZE 16
#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
@@ -98,7 +106,7 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
/* Of course we are going to have problems once we hit
* 2^16 alive types, but that can only happen by year 2K
*/
-static inline u16 genl_generate_id(void)
+static u16 genl_generate_id(void)
{
static u16 id_gen_idx = GENL_MIN_ID;
int i;
@@ -784,6 +792,15 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
res = genl_family_find_byname(name);
+#ifdef CONFIG_MODULES
+ if (res == NULL) {
+ genl_unlock();
+ request_module("net-pf-%d-proto-%d-type-%s",
+ PF_NETLINK, NETLINK_GENERIC, name);
+ genl_lock();
+ res = genl_family_find_byname(name);
+ }
+#endif
err = -ENOENT;
}
@@ -946,3 +963,16 @@ int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
return genlmsg_mcast(skb, pid, group, flags);
}
EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags)
+{
+ struct sock *sk = net->genl_sock;
+ int report = 0;
+
+ if (nlh)
+ report = nlmsg_report(nlh);
+
+ nlmsg_notify(sk, skb, pid, group, report, flags);
+}
+EXPORT_SYMBOL(genl_notify);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 732152f718e..7dab229bfbc 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -306,26 +306,26 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
- int opt;
+ unsigned long opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
- if (optlen < sizeof(int))
+ if (optlen < sizeof(unsigned int))
return -EINVAL;
- if (get_user(opt, (int __user *)optval))
+ if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETROM_T1:
- if (opt < 1)
+ if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
- if (opt < 1)
+ if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
@@ -337,13 +337,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
return 0;
case NETROM_T4:
- if (opt < 1)
+ if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
- if (opt < 0)
+ if (opt > ULONG_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
@@ -1244,7 +1244,8 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCADDRT:
case SIOCDELRT:
case SIOCNRDECOBS:
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
return nr_rt_ioctl(cmd, argp);
default:
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 915a87ba23e..2cf330162d7 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -670,14 +670,17 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
case SIOCADDRT:
if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
return -EFAULT;
- if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
+ if (nr_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
- if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
- dev_put(dev);
+ if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
return -EINVAL;
- }
switch (nr_route.type) {
case NETROM_NODE:
+ if (strnlen(nr_route.mnemonic, 7) == 7) {
+ ret = -EINVAL;
+ break;
+ }
+
ret = nr_add_node(&nr_route.callsign,
nr_route.mnemonic,
&nr_route.neighbour,
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 58cddadf8e8..44c865b86d6 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,5 +14,6 @@ menuconfig NFC
be called nfc.
source "net/nfc/nci/Kconfig"
+source "net/nfc/llcp/Kconfig"
source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index fbb550f2377..7b4a6dcfa56 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_NFC) += nfc.o
obj-$(CONFIG_NFC_NCI) += nci/
nfc-objs := core.o netlink.o af_nfc.o rawsock.o
+nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 47e02c1b8c0..3ddf6e698df 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -21,10 +21,13 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nfc.h>
#include "nfc.h"
@@ -33,25 +36,6 @@
int nfc_devlist_generation;
DEFINE_MUTEX(nfc_devlist_mutex);
-int nfc_printk(const char *level, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = printk("%sNFC: %pV\n", level, &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(nfc_printk);
-
/**
* nfc_dev_up - turn on the NFC device
*
@@ -63,7 +47,7 @@ int nfc_dev_up(struct nfc_dev *dev)
{
int rc = 0;
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
device_lock(&dev->dev);
@@ -97,7 +81,7 @@ int nfc_dev_down(struct nfc_dev *dev)
{
int rc = 0;
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
device_lock(&dev->dev);
@@ -139,7 +123,8 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
{
int rc;
- nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols);
+ pr_debug("dev_name=%s protocols=0x%x\n",
+ dev_name(&dev->dev), protocols);
if (!protocols)
return -EINVAL;
@@ -174,7 +159,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
{
int rc = 0;
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
device_lock(&dev->dev);
@@ -196,6 +181,86 @@ error:
return rc;
}
+int nfc_dep_link_up(struct nfc_dev *dev, int target_index,
+ u8 comm_mode, u8 rf_mode)
+{
+ int rc = 0;
+
+ pr_debug("dev_name=%s comm:%d rf:%d\n",
+ dev_name(&dev->dev), comm_mode, rf_mode);
+
+ if (!dev->ops->dep_link_up)
+ return -EOPNOTSUPP;
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (dev->dep_link_up == true) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_up(dev, target_index, comm_mode, rf_mode);
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+int nfc_dep_link_down(struct nfc_dev *dev)
+{
+ int rc = 0;
+
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+
+ if (!dev->ops->dep_link_down)
+ return -EOPNOTSUPP;
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (dev->dep_link_up == false) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ if (dev->dep_rf_mode == NFC_RF_TARGET) {
+ rc = -EOPNOTSUPP;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_down(dev);
+ if (!rc) {
+ dev->dep_link_up = false;
+ nfc_llcp_mac_is_down(dev);
+ nfc_genl_dep_link_down_event(dev);
+ }
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ dev->dep_link_up = true;
+ dev->dep_rf_mode = rf_mode;
+
+ nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
+
+ return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
+}
+EXPORT_SYMBOL(nfc_dep_link_is_up);
+
/**
* nfc_activate_target - prepare the target for data exchange
*
@@ -207,8 +272,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
{
int rc;
- nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev),
- target_idx, protocol);
+ pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
+ dev_name(&dev->dev), target_idx, protocol);
device_lock(&dev->dev);
@@ -236,7 +301,8 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
{
int rc = 0;
- nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx);
+ pr_debug("dev_name=%s target_idx=%u\n",
+ dev_name(&dev->dev), target_idx);
device_lock(&dev->dev);
@@ -271,8 +337,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
{
int rc;
- nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev),
- target_idx, skb->len);
+ pr_debug("dev_name=%s target_idx=%u skb->len=%u\n",
+ dev_name(&dev->dev), target_idx, skb->len);
device_lock(&dev->dev);
@@ -289,13 +355,54 @@ error:
return rc;
}
+int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+ pr_debug("dev_name=%s gb_len=%d\n",
+ dev_name(&dev->dev), gb_len);
+
+ if (gb_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+
+ return nfc_llcp_set_remote_gb(dev, gb, gb_len);
+}
+EXPORT_SYMBOL(nfc_set_remote_general_bytes);
+
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len)
+{
+ return nfc_llcp_general_bytes(dev, gt_len);
+}
+EXPORT_SYMBOL(nfc_get_local_general_bytes);
+
/**
- * nfc_alloc_skb - allocate a skb for data exchange responses
+ * nfc_alloc_send_skb - allocate a skb for data exchange responses
*
* @size: size to allocate
* @gfp: gfp flags
*/
-struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
+struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
+ unsigned int flags, unsigned int size,
+ unsigned int *err)
+{
+ struct sk_buff *skb;
+ unsigned int total_size;
+
+ total_size = size +
+ dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+ skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err);
+ if (skb)
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+ return skb;
+}
+
+/**
+ * nfc_alloc_recv_skb - allocate a skb for data exchange responses
+ *
+ * @size: size to allocate
+ * @gfp: gfp flags
+ */
+struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp)
{
struct sk_buff *skb;
unsigned int total_size;
@@ -308,7 +415,7 @@ struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
return skb;
}
-EXPORT_SYMBOL(nfc_alloc_skb);
+EXPORT_SYMBOL(nfc_alloc_recv_skb);
/**
* nfc_targets_found - inform that targets were found
@@ -326,7 +433,7 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
{
int i;
- nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets);
+ pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
dev->polling = false;
@@ -360,7 +467,7 @@ static void nfc_release(struct device *d)
{
struct nfc_dev *dev = to_nfc_dev(d);
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
nfc_genl_data_exit(&dev->genl_data);
kfree(dev->targets);
@@ -446,7 +553,7 @@ int nfc_register_device(struct nfc_dev *dev)
{
int rc;
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
mutex_lock(&nfc_devlist_mutex);
nfc_devlist_generation++;
@@ -456,11 +563,14 @@ int nfc_register_device(struct nfc_dev *dev)
if (rc < 0)
return rc;
- rc = nfc_genl_device_added(dev);
+ rc = nfc_llcp_register_device(dev);
if (rc)
- nfc_dbg("The userspace won't be notified that the device %s was"
- " added", dev_name(&dev->dev));
+ pr_err("Could not register llcp device\n");
+ rc = nfc_genl_device_added(dev);
+ if (rc)
+ pr_debug("The userspace won't be notified that the device %s was added\n",
+ dev_name(&dev->dev));
return 0;
}
@@ -475,7 +585,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
{
int rc;
- nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
mutex_lock(&nfc_devlist_mutex);
nfc_devlist_generation++;
@@ -488,10 +598,12 @@ void nfc_unregister_device(struct nfc_dev *dev)
mutex_unlock(&nfc_devlist_mutex);
+ nfc_llcp_unregister_device(dev);
+
rc = nfc_genl_device_removed(dev);
if (rc)
- nfc_dbg("The userspace won't be notified that the device %s"
- " was removed", dev_name(&dev->dev));
+ pr_debug("The userspace won't be notified that the device %s was removed\n",
+ dev_name(&dev->dev));
}
EXPORT_SYMBOL(nfc_unregister_device);
@@ -500,7 +612,7 @@ static int __init nfc_init(void)
{
int rc;
- nfc_info("NFC Core ver %s", VERSION);
+ pr_info("NFC Core ver %s\n", VERSION);
rc = class_register(&nfc_class);
if (rc)
@@ -517,6 +629,10 @@ static int __init nfc_init(void)
if (rc)
goto err_rawsock;
+ rc = nfc_llcp_init();
+ if (rc)
+ goto err_llcp_sock;
+
rc = af_nfc_init();
if (rc)
goto err_af_nfc;
@@ -524,6 +640,8 @@ static int __init nfc_init(void)
return 0;
err_af_nfc:
+ nfc_llcp_exit();
+err_llcp_sock:
rawsock_exit();
err_rawsock:
nfc_genl_exit();
@@ -535,6 +653,7 @@ err_genl:
static void __exit nfc_exit(void)
{
af_nfc_exit();
+ nfc_llcp_exit();
rawsock_exit();
nfc_genl_exit();
class_unregister(&nfc_class);
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig
new file mode 100644
index 00000000000..fbf5e815090
--- /dev/null
+++ b/net/nfc/llcp/Kconfig
@@ -0,0 +1,7 @@
+config NFC_LLCP
+ depends on NFC && EXPERIMENTAL
+ bool "NFC LLCP support (EXPERIMENTAL)"
+ default n
+ help
+ Say Y here if you want to build support for a kernel NFC LLCP
+ implementation. \ No newline at end of file
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
new file mode 100644
index 00000000000..151f2ef429c
--- /dev/null
+++ b/net/nfc/llcp/commands.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
+ 0,
+ 1, /* VERSION */
+ 2, /* MIUX */
+ 2, /* WKS */
+ 1, /* LTO */
+ 1, /* RW */
+ 0, /* SN */
+ 1, /* OPT */
+ 0, /* SDREQ */
+ 2, /* SDRES */
+
+};
+
+static u8 llcp_tlv8(u8 *tlv, u8 type)
+{
+ if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+ return 0;
+
+ return tlv[2];
+}
+
+static u8 llcp_tlv16(u8 *tlv, u8 type)
+{
+ if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
+ return 0;
+
+ return be16_to_cpu(*((__be16 *)(tlv + 2)));
+}
+
+
+static u8 llcp_tlv_version(u8 *tlv)
+{
+ return llcp_tlv8(tlv, LLCP_TLV_VERSION);
+}
+
+static u16 llcp_tlv_miux(u8 *tlv)
+{
+ return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f;
+}
+
+static u16 llcp_tlv_wks(u8 *tlv)
+{
+ return llcp_tlv16(tlv, LLCP_TLV_WKS);
+}
+
+static u16 llcp_tlv_lto(u8 *tlv)
+{
+ return llcp_tlv8(tlv, LLCP_TLV_LTO);
+}
+
+static u8 llcp_tlv_opt(u8 *tlv)
+{
+ return llcp_tlv8(tlv, LLCP_TLV_OPT);
+}
+
+static u8 llcp_tlv_rw(u8 *tlv)
+{
+ return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf;
+}
+
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
+{
+ u8 *tlv, length;
+
+ pr_debug("type %d\n", type);
+
+ if (type >= LLCP_TLV_MAX)
+ return NULL;
+
+ length = llcp_tlv_length[type];
+ if (length == 0 && value_length == 0)
+ return NULL;
+ else
+ length = value_length;
+
+ *tlv_length = 2 + length;
+ tlv = kzalloc(2 + length, GFP_KERNEL);
+ if (tlv == NULL)
+ return tlv;
+
+ tlv[0] = type;
+ tlv[1] = length;
+ memcpy(tlv + 2, value, length);
+
+ return tlv;
+}
+
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len)
+{
+ u8 *tlv = tlv_array, type, length, offset = 0;
+
+ pr_debug("TLV array length %d\n", tlv_array_len);
+
+ if (local == NULL)
+ return -ENODEV;
+
+ while (offset < tlv_array_len) {
+ type = tlv[0];
+ length = tlv[1];
+
+ pr_debug("type 0x%x length %d\n", type, length);
+
+ switch (type) {
+ case LLCP_TLV_VERSION:
+ local->remote_version = llcp_tlv_version(tlv);
+ break;
+ case LLCP_TLV_MIUX:
+ local->remote_miu = llcp_tlv_miux(tlv) + 128;
+ break;
+ case LLCP_TLV_WKS:
+ local->remote_wks = llcp_tlv_wks(tlv);
+ break;
+ case LLCP_TLV_LTO:
+ local->remote_lto = llcp_tlv_lto(tlv) * 10;
+ break;
+ case LLCP_TLV_OPT:
+ local->remote_opt = llcp_tlv_opt(tlv);
+ break;
+ case LLCP_TLV_RW:
+ local->remote_rw = llcp_tlv_rw(tlv);
+ break;
+ default:
+ pr_err("Invalid gt tlv value 0x%x\n", type);
+ break;
+ }
+
+ offset += length + 2;
+ tlv += length + 2;
+ }
+
+ pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
+ local->remote_version, local->remote_miu,
+ local->remote_lto, local->remote_opt,
+ local->remote_wks, local->remote_rw);
+
+ return 0;
+}
+
+static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
+ u8 dsap, u8 ssap, u8 ptype)
+{
+ u8 header[2];
+
+ pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+ header[0] = (u8)((dsap << 2) | (ptype >> 2));
+ header[1] = (u8)((ptype << 6) | ssap);
+
+ pr_debug("header 0x%x 0x%x\n", header[0], header[1]);
+
+ memcpy(skb_put(pdu, LLCP_HEADER_SIZE), header, LLCP_HEADER_SIZE);
+
+ return pdu;
+}
+
+static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv, u8 tlv_length)
+{
+ /* XXX Add an skb length check */
+
+ if (tlv == NULL)
+ return NULL;
+
+ memcpy(skb_put(pdu, tlv_length), tlv, tlv_length);
+
+ return pdu;
+}
+
+static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
+ u8 cmd, u16 size)
+{
+ struct sk_buff *skb;
+ int err;
+
+ if (sock->ssap == 0)
+ return NULL;
+
+ skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+ size + LLCP_HEADER_SIZE, &err);
+ if (skb == NULL) {
+ pr_err("Could not allocate PDU\n");
+ return NULL;
+ }
+
+ skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd);
+
+ return skb;
+}
+
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
+{
+ struct sk_buff *skb;
+ struct nfc_dev *dev;
+ struct nfc_llcp_local *local;
+ u16 size = 0;
+
+ pr_debug("Sending DISC\n");
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ dev = sock->dev;
+ if (dev == NULL)
+ return -ENODEV;
+
+ size += LLCP_HEADER_SIZE;
+ size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+ skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC);
+
+ skb_queue_tail(&local->tx_queue, skb);
+
+ return 0;
+}
+
+int nfc_llcp_send_symm(struct nfc_dev *dev)
+{
+ struct sk_buff *skb;
+ struct nfc_llcp_local *local;
+ u16 size = 0;
+
+ pr_debug("Sending SYMM\n");
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+ return -ENODEV;
+
+ size += LLCP_HEADER_SIZE;
+ size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+ skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
+
+ return nfc_data_exchange(dev, local->target_idx, skb,
+ nfc_llcp_recv, local);
+}
+
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+{
+ struct nfc_llcp_local *local;
+ struct sk_buff *skb;
+ u8 *service_name_tlv = NULL, service_name_tlv_length;
+ int err;
+ u16 size = 0;
+
+ pr_debug("Sending CONNECT\n");
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ if (sock->service_name != NULL) {
+ service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
+ sock->service_name,
+ sock->service_name_len,
+ &service_name_tlv_length);
+ size += service_name_tlv_length;
+ }
+
+ pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
+
+ if (service_name_tlv != NULL)
+ skb = llcp_add_tlv(skb, service_name_tlv,
+ service_name_tlv_length);
+
+ skb_queue_tail(&local->tx_queue, skb);
+
+ return 0;
+
+error_tlv:
+ pr_err("error %d\n", err);
+
+ kfree(service_name_tlv);
+
+ return err;
+}
+
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+{
+ struct nfc_llcp_local *local;
+ struct sk_buff *skb;
+
+ pr_debug("Sending CC\n");
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_queue_tail(&local->tx_queue, skb);
+
+ return 0;
+}
+
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
+{
+ struct sk_buff *skb;
+ struct nfc_dev *dev;
+ u16 size = 1; /* Reason code */
+
+ pr_debug("Sending DM reason 0x%x\n", reason);
+
+ if (local == NULL)
+ return -ENODEV;
+
+ dev = local->dev;
+ if (dev == NULL)
+ return -ENODEV;
+
+ size += LLCP_HEADER_SIZE;
+ size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
+ skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM);
+
+ memcpy(skb_put(skb, 1), &reason, 1);
+
+ skb_queue_head(&local->tx_queue, skb);
+
+ return 0;
+}
+
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
+{
+ struct sk_buff *skb;
+ struct nfc_llcp_local *local;
+
+ pr_debug("Send DISC\n");
+
+ local = sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_queue_head(&local->tx_queue, skb);
+
+ return 0;
+}
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
new file mode 100644
index 00000000000..1d32680807d
--- /dev/null
+++ b/net/nfc/llcp/llcp.c
@@ -0,0 +1,971 @@
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
+
+static struct list_head llcp_devices;
+
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
+{
+ struct nfc_llcp_sock *parent, *s, *n;
+ struct sock *sk, *parent_sk;
+ int i;
+
+
+ mutex_lock(&local->socket_lock);
+
+ for (i = 0; i < LLCP_MAX_SAP; i++) {
+ parent = local->sockets[i];
+ if (parent == NULL)
+ continue;
+
+ /* Release all child sockets */
+ list_for_each_entry_safe(s, n, &parent->list, list) {
+ list_del(&s->list);
+ sk = &s->sk;
+
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CONNECTED)
+ nfc_put_device(s->dev);
+
+ sk->sk_state = LLCP_CLOSED;
+ sock_set_flag(sk, SOCK_DEAD);
+
+ release_sock(sk);
+ }
+
+ parent_sk = &parent->sk;
+
+ lock_sock(parent_sk);
+
+ if (parent_sk->sk_state == LLCP_LISTEN) {
+ struct nfc_llcp_sock *lsk, *n;
+ struct sock *accept_sk;
+
+ list_for_each_entry_safe(lsk, n, &parent->accept_queue,
+ accept_queue) {
+ accept_sk = &lsk->sk;
+ lock_sock(accept_sk);
+
+ nfc_llcp_accept_unlink(accept_sk);
+
+ accept_sk->sk_state = LLCP_CLOSED;
+ sock_set_flag(accept_sk, SOCK_DEAD);
+
+ release_sock(accept_sk);
+
+ sock_orphan(accept_sk);
+ }
+ }
+
+ if (parent_sk->sk_state == LLCP_CONNECTED)
+ nfc_put_device(parent->dev);
+
+ parent_sk->sk_state = LLCP_CLOSED;
+ sock_set_flag(parent_sk, SOCK_DEAD);
+
+ release_sock(parent_sk);
+ }
+
+ mutex_unlock(&local->socket_lock);
+}
+
+static void nfc_llcp_timeout_work(struct work_struct *work)
+{
+ struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+ timeout_work);
+
+ nfc_dep_link_down(local->dev);
+}
+
+static void nfc_llcp_symm_timer(unsigned long data)
+{
+ struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+ pr_err("SYMM timeout\n");
+
+ queue_work(local->timeout_wq, &local->timeout_work);
+}
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
+{
+ struct nfc_llcp_local *local, *n;
+
+ list_for_each_entry_safe(local, n, &llcp_devices, list)
+ if (local->dev == dev)
+ return local;
+
+ pr_debug("No device found\n");
+
+ return NULL;
+}
+
+static char *wks[] = {
+ NULL,
+ NULL, /* SDP */
+ "urn:nfc:sn:ip",
+ "urn:nfc:sn:obex",
+ "urn:nfc:sn:snep",
+};
+
+static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
+{
+ int sap, num_wks;
+
+ pr_debug("%s\n", service_name);
+
+ if (service_name == NULL)
+ return -EINVAL;
+
+ num_wks = ARRAY_SIZE(wks);
+
+ for (sap = 0 ; sap < num_wks; sap++) {
+ if (wks[sap] == NULL)
+ continue;
+
+ if (strncmp(wks[sap], service_name, service_name_len) == 0)
+ return sap;
+ }
+
+ return -EINVAL;
+}
+
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ struct nfc_llcp_sock *sock)
+{
+ mutex_lock(&local->sdp_lock);
+
+ if (sock->service_name != NULL && sock->service_name_len > 0) {
+ int ssap = nfc_llcp_wks_sap(sock->service_name,
+ sock->service_name_len);
+
+ if (ssap > 0) {
+ pr_debug("WKS %d\n", ssap);
+
+ /* This is a WKS, let's check if it's free */
+ if (local->local_wks & BIT(ssap)) {
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_SAP_MAX;
+ }
+
+ set_bit(BIT(ssap), &local->local_wks);
+ mutex_unlock(&local->sdp_lock);
+
+ return ssap;
+ }
+
+ /*
+ * This is not a well known service,
+ * we should try to find a local SDP free spot
+ */
+ ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
+ if (ssap == LLCP_SDP_NUM_SAP) {
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_SAP_MAX;
+ }
+
+ pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
+
+ set_bit(BIT(ssap), &local->local_sdp);
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_WKS_NUM_SAP + ssap;
+
+ } else if (sock->ssap != 0) {
+ if (sock->ssap < LLCP_WKS_NUM_SAP) {
+ if (!(local->local_wks & BIT(sock->ssap))) {
+ set_bit(BIT(sock->ssap), &local->local_wks);
+ mutex_unlock(&local->sdp_lock);
+
+ return sock->ssap;
+ }
+
+ } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
+ if (!(local->local_sdp &
+ BIT(sock->ssap - LLCP_WKS_NUM_SAP))) {
+ set_bit(BIT(sock->ssap - LLCP_WKS_NUM_SAP),
+ &local->local_sdp);
+ mutex_unlock(&local->sdp_lock);
+
+ return sock->ssap;
+ }
+ }
+ }
+
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_SAP_MAX;
+}
+
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local)
+{
+ u8 local_ssap;
+
+ mutex_lock(&local->sdp_lock);
+
+ local_ssap = find_first_zero_bit(&local->local_sap, LLCP_LOCAL_NUM_SAP);
+ if (local_ssap == LLCP_LOCAL_NUM_SAP) {
+ mutex_unlock(&local->sdp_lock);
+ return LLCP_SAP_MAX;
+ }
+
+ set_bit(BIT(local_ssap), &local->local_sap);
+
+ mutex_unlock(&local->sdp_lock);
+
+ return local_ssap + LLCP_LOCAL_SAP_OFFSET;
+}
+
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
+{
+ u8 local_ssap;
+ unsigned long *sdp;
+
+ if (ssap < LLCP_WKS_NUM_SAP) {
+ local_ssap = ssap;
+ sdp = &local->local_wks;
+ } else if (ssap < LLCP_LOCAL_NUM_SAP) {
+ local_ssap = ssap - LLCP_WKS_NUM_SAP;
+ sdp = &local->local_sdp;
+ } else if (ssap < LLCP_MAX_SAP) {
+ local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
+ sdp = &local->local_sap;
+ } else {
+ return;
+ }
+
+ mutex_lock(&local->sdp_lock);
+
+ clear_bit(1 << local_ssap, sdp);
+
+ mutex_unlock(&local->sdp_lock);
+}
+
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL) {
+ *general_bytes_len = 0;
+ return NULL;
+ }
+
+ *general_bytes_len = local->gb_len;
+
+ return local->gb;
+}
+
+static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+{
+ u8 *gb_cur, *version_tlv, version, version_length;
+ u8 *lto_tlv, lto, lto_length;
+ u8 *wks_tlv, wks_length;
+ u8 gb_len = 0;
+
+ version = LLCP_VERSION_11;
+ version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+ 1, &version_length);
+ gb_len += version_length;
+
+ /* 1500 ms */
+ lto = 150;
+ lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length);
+ gb_len += lto_length;
+
+ pr_debug("Local wks 0x%lx\n", local->local_wks);
+ wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
+ &wks_length);
+ gb_len += wks_length;
+
+ gb_len += ARRAY_SIZE(llcp_magic);
+
+ if (gb_len > NFC_MAX_GT_LEN) {
+ kfree(version_tlv);
+ return -EINVAL;
+ }
+
+ gb_cur = local->gb;
+
+ memcpy(gb_cur, llcp_magic, ARRAY_SIZE(llcp_magic));
+ gb_cur += ARRAY_SIZE(llcp_magic);
+
+ memcpy(gb_cur, version_tlv, version_length);
+ gb_cur += version_length;
+
+ memcpy(gb_cur, lto_tlv, lto_length);
+ gb_cur += lto_length;
+
+ memcpy(gb_cur, wks_tlv, wks_length);
+ gb_cur += wks_length;
+
+ kfree(version_tlv);
+ kfree(lto_tlv);
+
+ local->gb_len = gb_len;
+
+ return 0;
+}
+
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+ struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+ if (local == NULL) {
+ pr_err("No LLCP device\n");
+ return -ENODEV;
+ }
+
+ memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
+ memcpy(local->remote_gb, gb, gb_len);
+ local->remote_gb_len = gb_len;
+
+ if (local->remote_gb == NULL ||
+ local->remote_gb_len == 0)
+ return -ENODEV;
+
+ if (memcmp(local->remote_gb, llcp_magic, 3)) {
+ pr_err("MAC does not support LLCP\n");
+ return -EINVAL;
+ }
+
+ return nfc_llcp_parse_tlv(local,
+ &local->remote_gb[3], local->remote_gb_len - 3);
+}
+
+static void nfc_llcp_tx_work(struct work_struct *work)
+{
+ struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+ tx_work);
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&local->tx_queue);
+ if (skb != NULL) {
+ pr_debug("Sending pending skb\n");
+ nfc_data_exchange(local->dev, local->target_idx,
+ skb, nfc_llcp_recv, local);
+ } else {
+ nfc_llcp_send_symm(local->dev);
+ }
+
+ mod_timer(&local->link_timer,
+ jiffies + msecs_to_jiffies(local->remote_lto));
+}
+
+static u8 nfc_llcp_dsap(struct sk_buff *pdu)
+{
+ return (pdu->data[0] & 0xfc) >> 2;
+}
+
+static u8 nfc_llcp_ptype(struct sk_buff *pdu)
+{
+ return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6);
+}
+
+static u8 nfc_llcp_ssap(struct sk_buff *pdu)
+{
+ return pdu->data[1] & 0x3f;
+}
+
+static u8 nfc_llcp_ns(struct sk_buff *pdu)
+{
+ return pdu->data[2] >> 4;
+}
+
+static u8 nfc_llcp_nr(struct sk_buff *pdu)
+{
+ return pdu->data[2] & 0xf;
+}
+
+static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
+{
+ pdu->data[2] = (sock->send_n << 4) | ((sock->recv_n - 1) % 16);
+ sock->send_n = (sock->send_n + 1) % 16;
+ sock->recv_ack_n = (sock->recv_n - 1) % 16;
+}
+
+static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+ u8 ssap, u8 dsap)
+{
+ struct nfc_llcp_sock *sock, *llcp_sock, *n;
+
+ if (ssap == 0 && dsap == 0)
+ return NULL;
+
+ mutex_lock(&local->socket_lock);
+ sock = local->sockets[ssap];
+ if (sock == NULL) {
+ mutex_unlock(&local->socket_lock);
+ return NULL;
+ }
+
+ pr_debug("root dsap %d (%d)\n", sock->dsap, dsap);
+
+ if (sock->dsap == dsap) {
+ sock_hold(&sock->sk);
+ mutex_unlock(&local->socket_lock);
+ return sock;
+ }
+
+ list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
+ pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
+ &llcp_sock->sk, llcp_sock->dsap);
+ if (llcp_sock->dsap == dsap) {
+ sock_hold(&llcp_sock->sk);
+ mutex_unlock(&local->socket_lock);
+ return llcp_sock;
+ }
+ }
+
+ pr_err("Could not find socket for %d %d\n", ssap, dsap);
+
+ mutex_unlock(&local->socket_lock);
+
+ return NULL;
+}
+
+static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+{
+ sock_put(&sock->sk);
+}
+
+static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
+{
+ u8 *tlv = &skb->data[2], type, length;
+ size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0;
+
+ while (offset < tlv_array_len) {
+ type = tlv[0];
+ length = tlv[1];
+
+ pr_debug("type 0x%x length %d\n", type, length);
+
+ if (type == LLCP_TLV_SN) {
+ *sn_len = length;
+ return &tlv[2];
+ }
+
+ offset += length + 2;
+ tlv += length + 2;
+ }
+
+ return NULL;
+}
+
+static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
+ struct sk_buff *skb)
+{
+ struct sock *new_sk, *parent;
+ struct nfc_llcp_sock *sock, *new_sock;
+ u8 dsap, ssap, bound_sap, reason;
+
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+
+ pr_debug("%d %d\n", dsap, ssap);
+
+ nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
+
+ if (dsap != LLCP_SAP_SDP) {
+ bound_sap = dsap;
+
+ mutex_lock(&local->socket_lock);
+ sock = local->sockets[dsap];
+ if (sock == NULL) {
+ mutex_unlock(&local->socket_lock);
+ reason = LLCP_DM_NOBOUND;
+ goto fail;
+ }
+
+ sock_hold(&sock->sk);
+ mutex_unlock(&local->socket_lock);
+
+ lock_sock(&sock->sk);
+
+ if (sock->dsap == LLCP_SAP_SDP &&
+ sock->sk.sk_state == LLCP_LISTEN)
+ goto enqueue;
+ } else {
+ u8 *sn;
+ size_t sn_len;
+
+ sn = nfc_llcp_connect_sn(skb, &sn_len);
+ if (sn == NULL) {
+ reason = LLCP_DM_NOBOUND;
+ goto fail;
+ }
+
+ pr_debug("Service name length %zu\n", sn_len);
+
+ mutex_lock(&local->socket_lock);
+ for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
+ bound_sap++) {
+ sock = local->sockets[bound_sap];
+ if (sock == NULL)
+ continue;
+
+ if (sock->service_name == NULL ||
+ sock->service_name_len == 0)
+ continue;
+
+ if (sock->service_name_len != sn_len)
+ continue;
+
+ if (sock->dsap == LLCP_SAP_SDP &&
+ sock->sk.sk_state == LLCP_LISTEN &&
+ !memcmp(sn, sock->service_name, sn_len)) {
+ pr_debug("Found service name at SAP %d\n",
+ bound_sap);
+ sock_hold(&sock->sk);
+ mutex_unlock(&local->socket_lock);
+
+ lock_sock(&sock->sk);
+
+ goto enqueue;
+ }
+ }
+ mutex_unlock(&local->socket_lock);
+ }
+
+ reason = LLCP_DM_NOBOUND;
+ goto fail;
+
+enqueue:
+ parent = &sock->sk;
+
+ if (sk_acceptq_is_full(parent)) {
+ reason = LLCP_DM_REJ;
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type,
+ GFP_ATOMIC);
+ if (new_sk == NULL) {
+ reason = LLCP_DM_REJ;
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ new_sock = nfc_llcp_sock(new_sk);
+ new_sock->dev = local->dev;
+ new_sock->local = local;
+ new_sock->nfc_protocol = sock->nfc_protocol;
+ new_sock->ssap = bound_sap;
+ new_sock->dsap = ssap;
+ new_sock->parent = parent;
+
+ pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
+
+ list_add_tail(&new_sock->list, &sock->list);
+
+ nfc_llcp_accept_enqueue(&sock->sk, new_sk);
+
+ nfc_get_device(local->dev->idx);
+
+ new_sk->sk_state = LLCP_CONNECTED;
+
+ /* Wake the listening processes */
+ parent->sk_data_ready(parent, 0);
+
+ /* Send CC */
+ nfc_llcp_send_cc(new_sock);
+
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+
+ return;
+
+fail:
+ /* Send DM */
+ nfc_llcp_send_dm(local, dsap, ssap, reason);
+
+ return;
+
+}
+
+static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
+ struct sk_buff *skb)
+{
+ struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
+ u8 dsap, ssap, ptype, ns, nr;
+
+ ptype = nfc_llcp_ptype(skb);
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+ ns = nfc_llcp_ns(skb);
+ nr = nfc_llcp_nr(skb);
+
+ pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns);
+
+ llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+ if (llcp_sock == NULL) {
+ nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+ return;
+ }
+
+ sk = &llcp_sock->sk;
+ lock_sock(sk);
+ if (sk->sk_state == LLCP_CLOSED) {
+ release_sock(sk);
+ nfc_llcp_sock_put(llcp_sock);
+ }
+
+ if (ns == llcp_sock->recv_n)
+ llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16;
+ else
+ pr_err("Received out of sequence I PDU\n");
+
+ /* Pass the payload upstream */
+ if (ptype == LLCP_PDU_I) {
+ pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
+
+ skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE);
+ if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
+ pr_err("receive queue is full\n");
+ skb_queue_head(&llcp_sock->tx_backlog_queue, skb);
+ }
+ }
+
+ /* Remove skbs from the pending queue */
+ if (llcp_sock->send_ack_n != nr) {
+ struct sk_buff *s, *tmp;
+
+ llcp_sock->send_ack_n = nr;
+
+ skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp)
+ if (nfc_llcp_ns(s) <= nr) {
+ skb_unlink(s, &llcp_sock->tx_pending_queue);
+ kfree_skb(s);
+ }
+ }
+
+ /* Queue some I frames for transmission */
+ while (llcp_sock->remote_ready &&
+ skb_queue_len(&llcp_sock->tx_pending_queue) <= local->remote_rw) {
+ struct sk_buff *pdu, *pending_pdu;
+
+ pdu = skb_dequeue(&llcp_sock->tx_queue);
+ if (pdu == NULL)
+ break;
+
+ /* Update N(S)/N(R) */
+ nfc_llcp_set_nrns(llcp_sock, pdu);
+
+ pending_pdu = skb_clone(pdu, GFP_KERNEL);
+
+ skb_queue_tail(&local->tx_queue, pdu);
+ skb_queue_tail(&llcp_sock->tx_pending_queue, pending_pdu);
+ }
+
+ release_sock(sk);
+ nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
+ struct sk_buff *skb)
+{
+ struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
+ u8 dsap, ssap;
+
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+
+ llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+ if (llcp_sock == NULL) {
+ nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+ return;
+ }
+
+ sk = &llcp_sock->sk;
+ lock_sock(sk);
+ if (sk->sk_state == LLCP_CLOSED) {
+ release_sock(sk);
+ nfc_llcp_sock_put(llcp_sock);
+ }
+
+
+ if (sk->sk_state == LLCP_CONNECTED) {
+ nfc_put_device(local->dev);
+ sk->sk_state = LLCP_CLOSED;
+ sk->sk_state_change(sk);
+ }
+
+ nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_DISC);
+
+ release_sock(sk);
+ nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_recv_cc(struct nfc_llcp_local *local,
+ struct sk_buff *skb)
+{
+ struct nfc_llcp_sock *llcp_sock;
+ u8 dsap, ssap;
+
+
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+
+ llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+
+ if (llcp_sock == NULL)
+ llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+
+ if (llcp_sock == NULL) {
+ pr_err("Invalid CC\n");
+ nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
+
+ return;
+ }
+
+ llcp_sock->dsap = ssap;
+
+ nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
+
+ nfc_llcp_sock_put(llcp_sock);
+}
+
+static void nfc_llcp_rx_work(struct work_struct *work)
+{
+ struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+ rx_work);
+ u8 dsap, ssap, ptype;
+ struct sk_buff *skb;
+
+ skb = local->rx_pending;
+ if (skb == NULL) {
+ pr_debug("No pending SKB\n");
+ return;
+ }
+
+ ptype = nfc_llcp_ptype(skb);
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+
+ pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
+
+ switch (ptype) {
+ case LLCP_PDU_SYMM:
+ pr_debug("SYMM\n");
+ break;
+
+ case LLCP_PDU_CONNECT:
+ pr_debug("CONNECT\n");
+ nfc_llcp_recv_connect(local, skb);
+ break;
+
+ case LLCP_PDU_DISC:
+ pr_debug("DISC\n");
+ nfc_llcp_recv_disc(local, skb);
+ break;
+
+ case LLCP_PDU_CC:
+ pr_debug("CC\n");
+ nfc_llcp_recv_cc(local, skb);
+ break;
+
+ case LLCP_PDU_I:
+ case LLCP_PDU_RR:
+ pr_debug("I frame\n");
+ nfc_llcp_recv_hdlc(local, skb);
+ break;
+
+ }
+
+ queue_work(local->tx_wq, &local->tx_work);
+ kfree_skb(local->rx_pending);
+ local->rx_pending = NULL;
+
+ return;
+}
+
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
+{
+ struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
+
+ pr_debug("Received an LLCP PDU\n");
+ if (err < 0) {
+ pr_err("err %d", err);
+ return;
+ }
+
+ local->rx_pending = skb_get(skb);
+ del_timer(&local->link_timer);
+ queue_work(local->rx_wq, &local->rx_work);
+
+ return;
+}
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+ return;
+
+ /* Close and purge all existing sockets */
+ nfc_llcp_socket_release(local);
+}
+
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct nfc_llcp_local *local;
+
+ pr_debug("rf mode %d\n", rf_mode);
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+ return;
+
+ local->target_idx = target_idx;
+ local->comm_mode = comm_mode;
+ local->rf_mode = rf_mode;
+
+ if (rf_mode == NFC_RF_INITIATOR) {
+ pr_debug("Queueing Tx work\n");
+
+ queue_work(local->tx_wq, &local->tx_work);
+ } else {
+ mod_timer(&local->link_timer,
+ jiffies + msecs_to_jiffies(local->remote_lto));
+ }
+}
+
+int nfc_llcp_register_device(struct nfc_dev *ndev)
+{
+ struct device *dev = &ndev->dev;
+ struct nfc_llcp_local *local;
+ char name[32];
+ int err;
+
+ local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
+ if (local == NULL)
+ return -ENOMEM;
+
+ local->dev = ndev;
+ INIT_LIST_HEAD(&local->list);
+ mutex_init(&local->sdp_lock);
+ mutex_init(&local->socket_lock);
+ init_timer(&local->link_timer);
+ local->link_timer.data = (unsigned long) local;
+ local->link_timer.function = nfc_llcp_symm_timer;
+
+ skb_queue_head_init(&local->tx_queue);
+ INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
+ snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
+ local->tx_wq = alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (local->tx_wq == NULL) {
+ err = -ENOMEM;
+ goto err_local;
+ }
+
+ local->rx_pending = NULL;
+ INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
+ snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
+ local->rx_wq = alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (local->rx_wq == NULL) {
+ err = -ENOMEM;
+ goto err_tx_wq;
+ }
+
+ INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
+ snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
+ local->timeout_wq = alloc_workqueue(name,
+ WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (local->timeout_wq == NULL) {
+ err = -ENOMEM;
+ goto err_rx_wq;
+ }
+
+ nfc_llcp_build_gb(local);
+
+ local->remote_miu = LLCP_DEFAULT_MIU;
+ local->remote_lto = LLCP_DEFAULT_LTO;
+ local->remote_rw = LLCP_DEFAULT_RW;
+
+ list_add(&llcp_devices, &local->list);
+
+ return 0;
+
+err_rx_wq:
+ destroy_workqueue(local->rx_wq);
+
+err_tx_wq:
+ destroy_workqueue(local->tx_wq);
+
+err_local:
+ kfree(local);
+
+ return 0;
+}
+
+void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+ struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+
+ if (local == NULL) {
+ pr_debug("No such device\n");
+ return;
+ }
+
+ list_del(&local->list);
+ nfc_llcp_socket_release(local);
+ del_timer_sync(&local->link_timer);
+ skb_queue_purge(&local->tx_queue);
+ destroy_workqueue(local->tx_wq);
+ destroy_workqueue(local->rx_wq);
+ kfree_skb(local->rx_pending);
+ kfree(local);
+}
+
+int __init nfc_llcp_init(void)
+{
+ INIT_LIST_HEAD(&llcp_devices);
+
+ return nfc_llcp_sock_init();
+}
+
+void nfc_llcp_exit(void)
+{
+ nfc_llcp_sock_exit();
+}
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
new file mode 100644
index 00000000000..0ad2e336158
--- /dev/null
+++ b/net/nfc/llcp/llcp.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+enum llcp_state {
+ LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
+ LLCP_CLOSED,
+ LLCP_BOUND,
+ LLCP_LISTEN,
+};
+
+#define LLCP_DEFAULT_LTO 100
+#define LLCP_DEFAULT_RW 1
+#define LLCP_DEFAULT_MIU 128
+
+#define LLCP_WKS_NUM_SAP 16
+#define LLCP_SDP_NUM_SAP 16
+#define LLCP_LOCAL_NUM_SAP 32
+#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
+#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
+
+struct nfc_llcp_sock;
+
+struct nfc_llcp_local {
+ struct list_head list;
+ struct nfc_dev *dev;
+
+ struct mutex sdp_lock;
+ struct mutex socket_lock;
+
+ struct timer_list link_timer;
+ struct sk_buff_head tx_queue;
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+ struct sk_buff *rx_pending;
+ struct workqueue_struct *timeout_wq;
+ struct work_struct timeout_work;
+
+ u32 target_idx;
+ u8 rf_mode;
+ u8 comm_mode;
+ unsigned long local_wks; /* Well known services */
+ unsigned long local_sdp; /* Local services */
+ unsigned long local_sap; /* Local SAPs, not available for discovery */
+
+ /* local */
+ u8 gb[NFC_MAX_GT_LEN];
+ u8 gb_len;
+
+ /* remote */
+ u8 remote_gb[NFC_MAX_GT_LEN];
+ u8 remote_gb_len;
+
+ u8 remote_version;
+ u16 remote_miu;
+ u16 remote_lto;
+ u8 remote_opt;
+ u16 remote_wks;
+ u8 remote_rw;
+
+ /* sockets array */
+ struct nfc_llcp_sock *sockets[LLCP_MAX_SAP];
+};
+
+struct nfc_llcp_sock {
+ struct sock sk;
+ struct list_head list;
+ struct nfc_dev *dev;
+ struct nfc_llcp_local *local;
+ u32 target_idx;
+ u32 nfc_protocol;
+
+ u8 ssap;
+ u8 dsap;
+ char *service_name;
+ size_t service_name_len;
+
+ /* Link variables */
+ u8 send_n;
+ u8 send_ack_n;
+ u8 recv_n;
+ u8 recv_ack_n;
+
+ /* Is the remote peer ready to receive */
+ u8 remote_ready;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_pending_queue;
+ struct sk_buff_head tx_backlog_queue;
+
+ struct list_head accept_queue;
+ struct sock *parent;
+};
+
+#define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
+#define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev)
+
+#define LLCP_HEADER_SIZE 2
+#define LLCP_SEQUENCE_SIZE 1
+
+/* LLCP versions: 1.1 is 1.0 plus SDP */
+#define LLCP_VERSION_10 0x10
+#define LLCP_VERSION_11 0x11
+
+/* LLCP PDU types */
+#define LLCP_PDU_SYMM 0x0
+#define LLCP_PDU_PAX 0x1
+#define LLCP_PDU_AGF 0x2
+#define LLCP_PDU_UI 0x3
+#define LLCP_PDU_CONNECT 0x4
+#define LLCP_PDU_DISC 0x5
+#define LLCP_PDU_CC 0x6
+#define LLCP_PDU_DM 0x7
+#define LLCP_PDU_FRMR 0x8
+#define LLCP_PDU_SNL 0x9
+#define LLCP_PDU_I 0xc
+#define LLCP_PDU_RR 0xd
+#define LLCP_PDU_RNR 0xe
+
+/* Parameters TLV types */
+#define LLCP_TLV_VERSION 0x1
+#define LLCP_TLV_MIUX 0x2
+#define LLCP_TLV_WKS 0x3
+#define LLCP_TLV_LTO 0x4
+#define LLCP_TLV_RW 0x5
+#define LLCP_TLV_SN 0x6
+#define LLCP_TLV_OPT 0x7
+#define LLCP_TLV_SDREQ 0x8
+#define LLCP_TLV_SDRES 0x9
+#define LLCP_TLV_MAX 0xa
+
+/* Well known LLCP SAP */
+#define LLCP_SAP_SDP 0x1
+#define LLCP_SAP_IP 0x2
+#define LLCP_SAP_OBEX 0x3
+#define LLCP_SAP_SNEP 0x4
+#define LLCP_SAP_MAX 0xff
+
+/* Disconnection reason code */
+#define LLCP_DM_DISC 0x00
+#define LLCP_DM_NOCONN 0x01
+#define LLCP_DM_NOBOUND 0x02
+#define LLCP_DM_REJ 0x03
+
+
+struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
+ struct nfc_llcp_sock *sock);
+u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
+void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
+
+/* Sock API */
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
+void nfc_llcp_accept_unlink(struct sock *sk);
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
+struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
+
+/* TLV API */
+int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len);
+
+/* Commands API */
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
+void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
+int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_symm(struct nfc_dev *dev);
+int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
+int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
+
+/* Socket API */
+int __init nfc_llcp_sock_init(void);
+void nfc_llcp_sock_exit(void);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
new file mode 100644
index 00000000000..f738ccd535f
--- /dev/null
+++ b/net/nfc/llcp/sock.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include "../nfc.h"
+#include "llcp.h"
+
+static struct proto llcp_sock_proto = {
+ .name = "NFC_LLCP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct nfc_llcp_sock),
+};
+
+static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+ struct sock *sk = sock->sk;
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ struct nfc_llcp_local *local;
+ struct nfc_dev *dev;
+ struct sockaddr_nfc_llcp llcp_addr;
+ int len, ret = 0;
+
+ pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
+ if (!addr || addr->sa_family != AF_NFC)
+ return -EINVAL;
+
+ memset(&llcp_addr, 0, sizeof(llcp_addr));
+ len = min_t(unsigned int, sizeof(llcp_addr), alen);
+ memcpy(&llcp_addr, addr, len);
+
+ /* This is going to be a listening socket, dsap must be 0 */
+ if (llcp_addr.dsap != 0)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != LLCP_CLOSED) {
+ ret = -EBADFD;
+ goto error;
+ }
+
+ dev = nfc_get_device(llcp_addr.dev_idx);
+ if (dev == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL) {
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
+ llcp_sock->dev = dev;
+ llcp_sock->local = local;
+ llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+ llcp_sock->service_name_len = min_t(unsigned int,
+ llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+ llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+ llcp_sock->service_name_len, GFP_KERNEL);
+
+ llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ if (llcp_sock->ssap == LLCP_MAX_SAP)
+ goto put_dev;
+
+ local->sockets[llcp_sock->ssap] = llcp_sock;
+
+ pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
+
+ sk->sk_state = LLCP_BOUND;
+
+put_dev:
+ nfc_put_device(dev);
+
+error:
+ release_sock(sk);
+ return ret;
+}
+
+static int llcp_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int ret = 0;
+
+ pr_debug("sk %p backlog %d\n", sk, backlog);
+
+ lock_sock(sk);
+
+ if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
+ || sk->sk_state != LLCP_BOUND) {
+ ret = -EBADFD;
+ goto error;
+ }
+
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+
+ pr_debug("Socket listening\n");
+ sk->sk_state = LLCP_LISTEN;
+
+error:
+ release_sock(sk);
+
+ return ret;
+}
+
+void nfc_llcp_accept_unlink(struct sock *sk)
+{
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+ pr_debug("state %d\n", sk->sk_state);
+
+ list_del_init(&llcp_sock->accept_queue);
+ sk_acceptq_removed(llcp_sock->parent);
+ llcp_sock->parent = NULL;
+
+ sock_put(sk);
+}
+
+void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
+{
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent);
+
+ /* Lock will be free from unlink */
+ sock_hold(sk);
+
+ list_add_tail(&llcp_sock->accept_queue,
+ &llcp_sock_parent->accept_queue);
+ llcp_sock->parent = parent;
+ sk_acceptq_added(parent);
+}
+
+struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
+ struct socket *newsock)
+{
+ struct nfc_llcp_sock *lsk, *n, *llcp_parent;
+ struct sock *sk;
+
+ llcp_parent = nfc_llcp_sock(parent);
+
+ list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue,
+ accept_queue) {
+ sk = &lsk->sk;
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CLOSED) {
+ release_sock(sk);
+ nfc_llcp_accept_unlink(sk);
+ continue;
+ }
+
+ if (sk->sk_state == LLCP_CONNECTED || !newsock) {
+ nfc_llcp_accept_unlink(sk);
+ if (newsock)
+ sock_graft(sk, newsock);
+
+ release_sock(sk);
+
+ pr_debug("Returning sk state %d\n", sk->sk_state);
+
+ return sk;
+ }
+
+ release_sock(sk);
+ }
+
+ return NULL;
+}
+
+static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sock *sk = sock->sk, *new_sk;
+ long timeo;
+ int ret = 0;
+
+ pr_debug("parent %p\n", sk);
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_state != LLCP_LISTEN) {
+ ret = -EBADFD;
+ goto error;
+ }
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ /* Wait for an incoming connection. */
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (!timeo) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (ret)
+ goto error;
+
+ newsock->state = SS_CONNECTED;
+
+ pr_debug("new socket %p\n", new_sk);
+
+error:
+ release_sock(sk);
+
+ return ret;
+}
+
+static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *len, int peer)
+{
+ struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *) addr;
+ struct sock *sk = sock->sk;
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+ pr_debug("%p\n", sk);
+
+ addr->sa_family = AF_NFC;
+ *len = sizeof(struct sockaddr_nfc_llcp);
+
+ llcp_addr->dev_idx = llcp_sock->dev->idx;
+ llcp_addr->dsap = llcp_sock->dsap;
+ llcp_addr->ssap = llcp_sock->ssap;
+ llcp_addr->service_name_len = llcp_sock->service_name_len;
+ memcpy(llcp_addr->service_name, llcp_sock->service_name,
+ llcp_addr->service_name_len);
+
+ return 0;
+}
+
+static inline unsigned int llcp_accept_poll(struct sock *parent)
+{
+ struct nfc_llcp_sock *llcp_sock, *n, *parent_sock;
+ struct sock *sk;
+
+ parent_sock = nfc_llcp_sock(parent);
+
+ list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue,
+ accept_queue) {
+ sk = &llcp_sock->sk;
+
+ if (sk->sk_state == LLCP_CONNECTED)
+ return POLLIN | POLLRDNORM;
+ }
+
+ return 0;
+}
+
+static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ unsigned int mask = 0;
+
+ pr_debug("%p\n", sk);
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
+ if (sk->sk_state == LLCP_LISTEN)
+ return llcp_accept_poll(sk);
+
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ mask |= POLLERR;
+
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ mask |= POLLIN;
+
+ if (sk->sk_state == LLCP_CLOSED)
+ mask |= POLLHUP;
+
+ return mask;
+}
+
+static int llcp_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct nfc_llcp_local *local;
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+ if (!sk)
+ return 0;
+
+ pr_debug("%p\n", sk);
+
+ local = llcp_sock->local;
+ if (local == NULL)
+ return -ENODEV;
+
+ mutex_lock(&local->socket_lock);
+
+ if (llcp_sock == local->sockets[llcp_sock->ssap]) {
+ local->sockets[llcp_sock->ssap] = NULL;
+ } else {
+ struct nfc_llcp_sock *parent, *s, *n;
+
+ parent = local->sockets[llcp_sock->ssap];
+
+ list_for_each_entry_safe(s, n, &parent->list, list)
+ if (llcp_sock == s) {
+ list_del(&s->list);
+ break;
+ }
+
+ }
+
+ mutex_unlock(&local->socket_lock);
+
+ lock_sock(sk);
+
+ /* Send a DISC */
+ if (sk->sk_state == LLCP_CONNECTED)
+ nfc_llcp_disconnect(llcp_sock);
+
+ if (sk->sk_state == LLCP_LISTEN) {
+ struct nfc_llcp_sock *lsk, *n;
+ struct sock *accept_sk;
+
+ list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
+ accept_queue) {
+ accept_sk = &lsk->sk;
+ lock_sock(accept_sk);
+
+ nfc_llcp_disconnect(lsk);
+ nfc_llcp_accept_unlink(accept_sk);
+
+ release_sock(accept_sk);
+
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_orphan(accept_sk);
+ sock_put(accept_sk);
+ }
+ }
+
+ /* Freeing the SAP */
+ if ((sk->sk_state == LLCP_CONNECTED
+ && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
+ sk->sk_state == LLCP_BOUND ||
+ sk->sk_state == LLCP_LISTEN)
+ nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
+
+ sock_set_flag(sk, SOCK_DEAD);
+
+ release_sock(sk);
+
+ sock_orphan(sk);
+ sock_put(sk);
+
+ return 0;
+}
+
+static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ int len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr;
+ struct nfc_dev *dev;
+ struct nfc_llcp_local *local;
+ int ret = 0;
+
+ pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
+
+ if (!addr || len < sizeof(struct sockaddr_nfc) ||
+ addr->sa_family != AF_NFC) {
+ pr_err("Invalid socket\n");
+ return -EINVAL;
+ }
+
+ if (addr->service_name_len == 0 && addr->dsap == 0) {
+ pr_err("Missing service name or dsap\n");
+ return -EINVAL;
+ }
+
+ pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx,
+ addr->target_idx, addr->nfc_protocol);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CONNECTED) {
+ ret = -EISCONN;
+ goto error;
+ }
+
+ dev = nfc_get_device(addr->dev_idx);
+ if (dev == NULL) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL) {
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
+ device_lock(&dev->dev);
+ if (dev->dep_link_up == false) {
+ ret = -ENOLINK;
+ device_unlock(&dev->dev);
+ goto put_dev;
+ }
+ device_unlock(&dev->dev);
+
+ if (local->rf_mode == NFC_RF_INITIATOR &&
+ addr->target_idx != local->target_idx) {
+ ret = -ENOLINK;
+ goto put_dev;
+ }
+
+ llcp_sock->dev = dev;
+ llcp_sock->local = local;
+ llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ if (addr->service_name_len == 0)
+ llcp_sock->dsap = addr->dsap;
+ else
+ llcp_sock->dsap = LLCP_SAP_SDP;
+ llcp_sock->nfc_protocol = addr->nfc_protocol;
+ llcp_sock->service_name_len = min_t(unsigned int,
+ addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME);
+ llcp_sock->service_name = kmemdup(addr->service_name,
+ llcp_sock->service_name_len, GFP_KERNEL);
+
+ local->sockets[llcp_sock->ssap] = llcp_sock;
+
+ ret = nfc_llcp_send_connect(llcp_sock);
+ if (ret)
+ goto put_dev;
+
+ sk->sk_state = LLCP_CONNECTED;
+
+ release_sock(sk);
+ return 0;
+
+put_dev:
+ nfc_put_device(dev);
+
+error:
+ release_sock(sk);
+ return ret;
+}
+
+static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ int noblock = flags & MSG_DONTWAIT;
+ struct sock *sk = sock->sk;
+ unsigned int copied, rlen;
+ struct sk_buff *skb, *cskb;
+ int err = 0;
+
+ pr_debug("%p %zu\n", sk, len);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == LLCP_CLOSED &&
+ skb_queue_empty(&sk->sk_receive_queue)) {
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ pr_err("Recv datagram failed state %d %d %d",
+ sk->sk_state, err, sock_error(sk));
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 0;
+
+ return err;
+ }
+
+ rlen = skb->len; /* real length of skb */
+ copied = min_t(unsigned int, rlen, len);
+
+ cskb = skb;
+ if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ return -EFAULT;
+ }
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+
+ /* SOCK_STREAM: re-queue skb if it contains unreceived data */
+ if (sk->sk_type == SOCK_STREAM) {
+ skb_pull(skb, copied);
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ goto done;
+ }
+ }
+
+ kfree_skb(skb);
+ }
+
+ /* XXX Queue backlogged skbs */
+
+done:
+ /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
+ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
+ copied = rlen;
+
+ return copied;
+}
+
+static const struct proto_ops llcp_sock_ops = {
+ .family = PF_NFC,
+ .owner = THIS_MODULE,
+ .bind = llcp_sock_bind,
+ .connect = llcp_sock_connect,
+ .release = llcp_sock_release,
+ .socketpair = sock_no_socketpair,
+ .accept = llcp_sock_accept,
+ .getname = llcp_sock_getname,
+ .poll = llcp_sock_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = llcp_sock_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = llcp_sock_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static void llcp_sock_destruct(struct sock *sk)
+{
+ struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+
+ pr_debug("%p\n", sk);
+
+ if (sk->sk_state == LLCP_CONNECTED)
+ nfc_put_device(llcp_sock->dev);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+
+ nfc_llcp_sock_free(llcp_sock);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_err("Freeing alive NFC LLCP socket %p\n", sk);
+ return;
+ }
+}
+
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+{
+ struct sock *sk;
+ struct nfc_llcp_sock *llcp_sock;
+
+ sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+ if (!sk)
+ return NULL;
+
+ llcp_sock = nfc_llcp_sock(sk);
+
+ sock_init_data(sock, sk);
+ sk->sk_state = LLCP_CLOSED;
+ sk->sk_protocol = NFC_SOCKPROTO_LLCP;
+ sk->sk_type = type;
+ sk->sk_destruct = llcp_sock_destruct;
+
+ llcp_sock->ssap = 0;
+ llcp_sock->dsap = LLCP_SAP_SDP;
+ llcp_sock->send_n = llcp_sock->send_ack_n = 0;
+ llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
+ llcp_sock->remote_ready = 1;
+ skb_queue_head_init(&llcp_sock->tx_queue);
+ skb_queue_head_init(&llcp_sock->tx_pending_queue);
+ skb_queue_head_init(&llcp_sock->tx_backlog_queue);
+ INIT_LIST_HEAD(&llcp_sock->list);
+ INIT_LIST_HEAD(&llcp_sock->accept_queue);
+
+ if (sock != NULL)
+ sock->state = SS_UNCONNECTED;
+
+ return sk;
+}
+
+void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
+{
+ kfree(sock->service_name);
+
+ skb_queue_purge(&sock->tx_queue);
+ skb_queue_purge(&sock->tx_pending_queue);
+ skb_queue_purge(&sock->tx_backlog_queue);
+
+ list_del_init(&sock->accept_queue);
+
+ sock->parent = NULL;
+}
+
+static int llcp_sock_create(struct net *net, struct socket *sock,
+ const struct nfc_protocol *nfc_proto)
+{
+ struct sock *sk;
+
+ pr_debug("%p\n", sock);
+
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM)
+ return -ESOCKTNOSUPPORT;
+
+ sock->ops = &llcp_sock_ops;
+
+ sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+ if (sk == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct nfc_protocol llcp_nfc_proto = {
+ .id = NFC_SOCKPROTO_LLCP,
+ .proto = &llcp_sock_proto,
+ .owner = THIS_MODULE,
+ .create = llcp_sock_create
+};
+
+int __init nfc_llcp_sock_init(void)
+{
+ return nfc_proto_register(&llcp_nfc_proto);
+}
+
+void nfc_llcp_sock_exit(void)
+{
+ nfc_proto_unregister(&llcp_nfc_proto);
+}
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 3925c657876..7650139a1a0 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -69,7 +71,7 @@ static int __nci_request(struct nci_dev *ndev,
__u32 timeout)
{
int rc = 0;
- unsigned long completion_rc;
+ long completion_rc;
ndev->req_status = NCI_REQ_PEND;
@@ -79,7 +81,7 @@ static int __nci_request(struct nci_dev *ndev,
&ndev->req_completion,
timeout);
- nfc_dbg("wait_for_completion return %ld", completion_rc);
+ pr_debug("wait_for_completion return %ld\n", completion_rc);
if (completion_rc > 0) {
switch (ndev->req_status) {
@@ -96,8 +98,8 @@ static int __nci_request(struct nci_dev *ndev,
break;
}
} else {
- nfc_err("wait_for_completion_interruptible_timeout failed %ld",
- completion_rc);
+ pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
+ completion_rc);
rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
}
@@ -126,7 +128,10 @@ static inline int nci_request(struct nci_dev *ndev,
static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
{
- nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+ struct nci_core_reset_cmd cmd;
+
+ cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
}
static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
@@ -136,17 +141,11 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
{
- struct nci_core_conn_create_cmd conn_cmd;
struct nci_rf_disc_map_cmd cmd;
struct disc_map_config *cfg = cmd.mapping_configs;
__u8 *num = &cmd.num_mapping_configs;
int i;
- /* create static rf connection */
- conn_cmd.target_handle = 0;
- conn_cmd.num_target_specific_params = 0;
- nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
-
/* set rf mapping configurations */
*num = 0;
@@ -155,14 +154,16 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
if (ndev->supported_rf_interfaces[i] ==
NCI_RF_INTERFACE_ISO_DEP) {
cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
- cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
- cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
+ NCI_DISC_MAP_MODE_LISTEN;
+ cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
(*num)++;
} else if (ndev->supported_rf_interfaces[i] ==
NCI_RF_INTERFACE_NFC_DEP) {
cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
- cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
- cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
+ NCI_DISC_MAP_MODE_LISTEN;
+ cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
(*num)++;
}
@@ -187,16 +188,16 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
|| protocols & NFC_PROTO_MIFARE_MASK
|| protocols & NFC_PROTO_ISO14443_MASK
|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
- cmd.disc_configs[cmd.num_disc_configs].type =
- NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_A_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
(protocols & NFC_PROTO_ISO14443_MASK)) {
- cmd.disc_configs[cmd.num_disc_configs].type =
- NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_B_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
@@ -204,8 +205,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
(protocols & NFC_PROTO_FELICA_MASK
|| protocols & NFC_PROTO_NFC_DEP_MASK)) {
- cmd.disc_configs[cmd.num_disc_configs].type =
- NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_F_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
cmd.num_disc_configs++;
}
@@ -326,8 +327,6 @@ static void nci_cmd_timer(unsigned long arg)
{
struct nci_dev *ndev = (void *) arg;
- nfc_dbg("entry");
-
atomic_set(&ndev->cmd_cnt, 1);
queue_work(ndev->cmd_wq, &ndev->cmd_work);
}
@@ -336,8 +335,6 @@ static int nci_dev_up(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry");
-
return nci_open_device(ndev);
}
@@ -345,8 +342,6 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry");
-
return nci_close_device(ndev);
}
@@ -355,20 +350,18 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dbg("entry");
-
if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
- nfc_err("unable to start poll, since poll is already active");
+ pr_err("unable to start poll, since poll is already active\n");
return -EBUSY;
}
if (ndev->target_active_prot) {
- nfc_err("there is an active target");
+ pr_err("there is an active target\n");
return -EBUSY;
}
if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
- nfc_dbg("target is active, implicitly deactivate...");
+ pr_debug("target is active, implicitly deactivate...\n");
rc = nci_request(ndev, nci_rf_deactivate_req, 0,
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
@@ -389,10 +382,8 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry");
-
if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
- nfc_err("unable to stop poll, since poll is not active");
+ pr_err("unable to stop poll, since poll is not active\n");
return;
}
@@ -405,21 +396,21 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+ pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
- nfc_err("there is no available target to activate");
+ pr_err("there is no available target to activate\n");
return -EINVAL;
}
if (ndev->target_active_prot) {
- nfc_err("there is already an active target");
+ pr_err("there is already an active target\n");
return -EBUSY;
}
if (!(ndev->target_available_prots & (1 << protocol))) {
- nfc_err("target does not support the requested protocol 0x%x",
- protocol);
+ pr_err("target does not support the requested protocol 0x%x\n",
+ protocol);
return -EINVAL;
}
@@ -433,10 +424,10 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry, target_idx %d", target_idx);
+ pr_debug("target_idx %d\n", target_idx);
if (!ndev->target_active_prot) {
- nfc_err("unable to deactivate target, no active target");
+ pr_err("unable to deactivate target, no active target\n");
return;
}
@@ -456,10 +447,10 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+ pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
if (!ndev->target_active_prot) {
- nfc_err("unable to exchange data, no active target");
+ pr_err("unable to exchange data, no active target\n");
return -EINVAL;
}
@@ -470,7 +461,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
ndev->data_exchange_cb = cb;
ndev->data_exchange_cb_context = cb_context;
- rc = nci_send_data(ndev, ndev->conn_id, skb);
+ rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
if (rc)
clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
@@ -502,7 +493,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
{
struct nci_dev *ndev;
- nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+ pr_debug("supported_protocols 0x%x\n", supported_protocols);
if (!ops->open || !ops->close || !ops->send)
return NULL;
@@ -542,8 +533,6 @@ EXPORT_SYMBOL(nci_allocate_device);
*/
void nci_free_device(struct nci_dev *ndev)
{
- nfc_dbg("entry");
-
nfc_free_device(ndev->nfc_dev);
kfree(ndev);
}
@@ -560,8 +549,6 @@ int nci_register_device(struct nci_dev *ndev)
struct device *dev = &ndev->nfc_dev->dev;
char name[32];
- nfc_dbg("entry");
-
rc = nfc_register_device(ndev->nfc_dev);
if (rc)
goto exit;
@@ -624,8 +611,6 @@ EXPORT_SYMBOL(nci_register_device);
*/
void nci_unregister_device(struct nci_dev *ndev)
{
- nfc_dbg("entry");
-
nci_close_device(ndev);
destroy_workqueue(ndev->cmd_wq);
@@ -645,7 +630,7 @@ int nci_recv_frame(struct sk_buff *skb)
{
struct nci_dev *ndev = (struct nci_dev *) skb->dev;
- nfc_dbg("entry, len %d", skb->len);
+ pr_debug("len %d\n", skb->len);
if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
&& !test_bit(NCI_INIT, &ndev->flags))) {
@@ -665,7 +650,7 @@ static int nci_send_frame(struct sk_buff *skb)
{
struct nci_dev *ndev = (struct nci_dev *) skb->dev;
- nfc_dbg("entry, len %d", skb->len);
+ pr_debug("len %d\n", skb->len);
if (!ndev) {
kfree_skb(skb);
@@ -684,11 +669,11 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
struct nci_ctrl_hdr *hdr;
struct sk_buff *skb;
- nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+ pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
if (!skb) {
- nfc_err("no memory for command");
+ pr_err("no memory for command\n");
return -ENOMEM;
}
@@ -718,7 +703,7 @@ static void nci_tx_work(struct work_struct *work)
struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
struct sk_buff *skb;
- nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+ pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
/* Send queued tx data */
while (atomic_read(&ndev->credits_cnt)) {
@@ -726,12 +711,15 @@ static void nci_tx_work(struct work_struct *work)
if (!skb)
return;
- atomic_dec(&ndev->credits_cnt);
+ /* Check if data flow control is used */
+ if (atomic_read(&ndev->credits_cnt) !=
+ NCI_DATA_FLOW_CONTROL_NOT_USED)
+ atomic_dec(&ndev->credits_cnt);
- nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
- nci_pbf(skb->data),
- nci_conn_id(skb->data),
- nci_plen(skb->data));
+ pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
nci_send_frame(skb);
}
@@ -760,7 +748,7 @@ static void nci_rx_work(struct work_struct *work)
break;
default:
- nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+ pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
kfree_skb(skb);
break;
}
@@ -774,7 +762,7 @@ static void nci_cmd_work(struct work_struct *work)
struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
struct sk_buff *skb;
- nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+ pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
/* Send queued command */
if (atomic_read(&ndev->cmd_cnt)) {
@@ -784,11 +772,11 @@ static void nci_cmd_work(struct work_struct *work)
atomic_dec(&ndev->cmd_cnt);
- nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
- nci_pbf(skb->data),
- nci_opcode_gid(nci_opcode(skb->data)),
- nci_opcode_oid(nci_opcode(skb->data)),
- nci_plen(skb->data));
+ pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+ nci_pbf(skb->data),
+ nci_opcode_gid(nci_opcode(skb->data)),
+ nci_opcode_oid(nci_opcode(skb->data)),
+ nci_plen(skb->data));
nci_send_frame(skb);
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5ed90fc1a9..e5756b30e60 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
@@ -40,7 +42,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
data_exchange_cb_t cb = ndev->data_exchange_cb;
void *cb_context = ndev->data_exchange_cb_context;
- nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+ pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
if (cb) {
ndev->data_exchange_cb = NULL;
@@ -49,7 +51,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
/* forward skb to nfc core */
cb(cb_context, skb, err);
} else if (skb) {
- nfc_err("no rx callback, dropping rx data...");
+ pr_err("no rx callback, dropping rx data...\n");
/* no waiting callback, free skb */
kfree_skb(skb);
@@ -90,12 +92,13 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
int frag_len;
int rc = 0;
- nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+ pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
__skb_queue_head_init(&frags_q);
while (total_len) {
- frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+ frag_len =
+ min_t(int, total_len, ndev->max_data_pkt_payload_size);
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
@@ -118,8 +121,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
data += frag_len;
total_len -= frag_len;
- nfc_dbg("frag_len %d, remaining total_len %d",
- frag_len, total_len);
+ pr_debug("frag_len %d, remaining total_len %d\n",
+ frag_len, total_len);
}
/* queue all fragments atomically */
@@ -148,10 +151,10 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
{
int rc = 0;
- nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+ pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
/* check if the packet need to be fragmented */
- if (skb->len <= ndev->max_pkt_payload_size) {
+ if (skb->len <= ndev->max_data_pkt_payload_size) {
/* no need to fragment packet */
nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
@@ -160,7 +163,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
/* fragment packet and queue the fragments */
rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
if (rc) {
- nfc_err("failed to fragment tx data packet");
+ pr_err("failed to fragment tx data packet\n");
goto free_exit;
}
}
@@ -190,7 +193,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
/* first, make enough room for the already accumulated data */
if (skb_cow_head(skb, reassembly_len)) {
- nfc_err("error adding room for accumulated rx data");
+ pr_err("error adding room for accumulated rx data\n");
kfree_skb(skb);
skb = 0;
@@ -227,19 +230,19 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u8 pbf = nci_pbf(skb->data);
- nfc_dbg("entry, len %d", skb->len);
+ pr_debug("len %d\n", skb->len);
- nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
- nci_pbf(skb->data),
- nci_conn_id(skb->data),
- nci_plen(skb->data));
+ pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);
if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
/* frame I/F => remove the status byte */
- nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+ pr_debug("NFC_PROTO_MIFARE => remove the status byte\n");
skb_trim(skb, (skb->len - 1));
}
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index b19dc2fa90e..6a63e5eb483 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -42,12 +42,9 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_REJECTED:
return -EBUSY;
- case NCI_STATUS_MESSAGE_CORRUPTED:
+ case NCI_STATUS_RF_FRAME_CORRUPTED:
return -EBADMSG;
- case NCI_STATUS_BUFFER_FULL:
- return -ENOBUFS;
-
case NCI_STATUS_NOT_INITIALIZED:
return -EHOSTDOWN;
@@ -80,12 +77,6 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
return -ETIMEDOUT;
- case NCI_STATUS_RF_LINK_LOSS_ERROR:
- return -ENOLINK;
-
- case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
- return -EDQUOT;
-
case NCI_STATUS_FAILED:
default:
return -ENOSYS;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 96633f5cda4..b16a8dc2afb 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -43,18 +45,21 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
int i;
- nfc_dbg("entry, num_entries %d", ntf->num_entries);
+ pr_debug("num_entries %d\n", ntf->num_entries);
if (ntf->num_entries > NCI_MAX_NUM_CONN)
ntf->num_entries = NCI_MAX_NUM_CONN;
/* update the credits */
for (i = 0; i < ntf->num_entries; i++) {
- nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
- ntf->conn_entries[i].conn_id,
- ntf->conn_entries[i].credits);
+ ntf->conn_entries[i].conn_id =
+ nci_conn_id(&ntf->conn_entries[i].conn_id);
+
+ pr_debug("entry[%d]: conn_id %d, credits %d\n",
+ i, ntf->conn_entries[i].conn_id,
+ ntf->conn_entries[i].credits);
- if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+ if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
/* found static rf connection */
atomic_add(ntf->conn_entries[i].credits,
&ndev->credits_cnt);
@@ -66,31 +71,34 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
queue_work(ndev->tx_wq, &ndev->tx_work);
}
-static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
+ struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
- nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
+ ntf->conn_id = nci_conn_id(&ntf->conn_id);
+
+ pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id);
+
+ /* complete the data exchange transaction, if exists */
+ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ nci_data_exchange_complete(ndev, NULL, -EIO);
}
-static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
- struct nci_rf_activate_ntf *ntf, __u8 *data)
+static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct rf_tech_specific_params_nfca_poll *nfca_poll;
- struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
- nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
nfca_poll->nfcid1_len = *data++;
- nfc_dbg("sens_res 0x%x, nfcid1_len %d",
- nfca_poll->sens_res,
- nfca_poll->nfcid1_len);
+ pr_debug("sens_res 0x%x, nfcid1_len %d\n",
+ nfca_poll->sens_res, nfca_poll->nfcid1_len);
memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
data += nfca_poll->nfcid1_len;
@@ -100,32 +108,32 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
if (nfca_poll->sel_res_len != 0)
nfca_poll->sel_res = *data++;
- ntf->rf_interface_type = *data++;
- ntf->activation_params_len = *data++;
+ pr_debug("sel_res_len %d, sel_res 0x%x\n",
+ nfca_poll->sel_res_len,
+ nfca_poll->sel_res);
- nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
- nfca_poll->sel_res_len,
- nfca_poll->sel_res,
- ntf->rf_interface_type,
- ntf->activation_params_len);
+ return data;
+}
- switch (ntf->rf_interface_type) {
- case NCI_RF_INTERFACE_ISO_DEP:
- nfca_poll_iso_dep->rats_res_len = *data++;
- if (nfca_poll_iso_dep->rats_res_len > 0) {
- memcpy(nfca_poll_iso_dep->rats_res,
+static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+ struct activation_params_nfca_poll_iso_dep *nfca_poll;
+
+ switch (ntf->activation_rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
+ nfca_poll->rats_res_len = *data++;
+ if (nfca_poll->rats_res_len > 0) {
+ memcpy(nfca_poll->rats_res,
data,
- nfca_poll_iso_dep->rats_res_len);
+ nfca_poll->rats_res_len);
}
break;
- case NCI_RF_INTERFACE_FRAME:
- /* no activation params */
- break;
-
default:
- nfc_err("unsupported rf_interface_type 0x%x",
- ntf->rf_interface_type);
+ pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+ ntf->activation_rf_tech_and_mode);
return -EPROTO;
}
@@ -133,7 +141,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
}
static void nci_target_found(struct nci_dev *ndev,
- struct nci_rf_activate_ntf *ntf)
+ struct nci_rf_intf_activated_ntf *ntf)
{
struct nfc_target nfc_tgt;
@@ -141,66 +149,121 @@ static void nci_target_found(struct nci_dev *ndev,
nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+ else
+ nfc_tgt.supported_protocols = 0;
nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
+ nfc_tgt.nfcid1_len = ntf->rf_tech_specific_params.nfca_poll.nfcid1_len;
+ if (nfc_tgt.nfcid1_len > 0) {
+ memcpy(nfc_tgt.nfcid1,
+ ntf->rf_tech_specific_params.nfca_poll.nfcid1,
+ nfc_tgt.nfcid1_len);
+ }
if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
- nfc_dbg("the target found does not have the desired protocol");
+ pr_debug("the target found does not have the desired protocol\n");
return;
}
- nfc_dbg("new target found, supported_protocols 0x%x",
- nfc_tgt.supported_protocols);
+ pr_debug("new target found, supported_protocols 0x%x\n",
+ nfc_tgt.supported_protocols);
ndev->target_available_prots = nfc_tgt.supported_protocols;
+ ndev->max_data_pkt_payload_size = ntf->max_data_pkt_payload_size;
+ ndev->initial_num_credits = ntf->initial_num_credits;
+
+ /* set the available credits to initial value */
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
}
-static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_rf_activate_ntf ntf;
+ struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
- int rc = -1;
+ int err = 0;
clear_bit(NCI_DISCOVERY, &ndev->flags);
set_bit(NCI_POLL_ACTIVE, &ndev->flags);
- ntf.target_handle = *data++;
+ ntf.rf_discovery_id = *data++;
+ ntf.rf_interface = *data++;
ntf.rf_protocol = *data++;
- ntf.rf_tech_and_mode = *data++;
+ ntf.activation_rf_tech_and_mode = *data++;
+ ntf.max_data_pkt_payload_size = *data++;
+ ntf.initial_num_credits = *data++;
ntf.rf_tech_specific_params_len = *data++;
- nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
- ntf.target_handle,
- ntf.rf_protocol,
- ntf.rf_tech_and_mode,
- ntf.rf_tech_specific_params_len);
-
- switch (ntf.rf_tech_and_mode) {
- case NCI_NFC_A_PASSIVE_POLL_MODE:
- rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
- data);
- break;
+ pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
+ pr_debug("rf_interface 0x%x\n", ntf.rf_interface);
+ pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
+ pr_debug("activation_rf_tech_and_mode 0x%x\n",
+ ntf.activation_rf_tech_and_mode);
+ pr_debug("max_data_pkt_payload_size 0x%x\n",
+ ntf.max_data_pkt_payload_size);
+ pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits);
+ pr_debug("rf_tech_specific_params_len %d\n",
+ ntf.rf_tech_specific_params_len);
+
+ if (ntf.rf_tech_specific_params_len > 0) {
+ switch (ntf.activation_rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfca_passive_poll(ndev,
+ &ntf, data);
+ break;
+
+ default:
+ pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+ ntf.activation_rf_tech_and_mode);
+ return;
+ }
+ }
- default:
- nfc_err("unsupported rf_tech_and_mode 0x%x",
- ntf.rf_tech_and_mode);
- return;
+ ntf.data_exch_rf_tech_and_mode = *data++;
+ ntf.data_exch_tx_bit_rate = *data++;
+ ntf.data_exch_rx_bit_rate = *data++;
+ ntf.activation_params_len = *data++;
+
+ pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
+ ntf.data_exch_rf_tech_and_mode);
+ pr_debug("data_exch_tx_bit_rate 0x%x\n",
+ ntf.data_exch_tx_bit_rate);
+ pr_debug("data_exch_rx_bit_rate 0x%x\n",
+ ntf.data_exch_rx_bit_rate);
+ pr_debug("activation_params_len %d\n",
+ ntf.activation_params_len);
+
+ if (ntf.activation_params_len > 0) {
+ switch (ntf.rf_interface) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+ err = nci_extract_activation_params_iso_dep(ndev,
+ &ntf, data);
+ break;
+
+ case NCI_RF_INTERFACE_FRAME:
+ /* no activation params */
+ break;
+
+ default:
+ pr_err("unsupported rf_interface 0x%x\n",
+ ntf.rf_interface);
+ return;
+ }
}
- if (!rc)
+ if (!err)
nci_target_found(ndev, &ntf);
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
- __u8 type = skb->data[0];
+ struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
- nfc_dbg("entry, type 0x%x", type);
+ pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
ndev->target_active_prot = 0;
@@ -223,11 +286,11 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u16 ntf_opcode = nci_opcode(skb->data);
- nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
- nci_pbf(skb->data),
- nci_opcode_gid(ntf_opcode),
- nci_opcode_oid(ntf_opcode),
- nci_plen(skb->data));
+ pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+ nci_pbf(skb->data),
+ nci_opcode_gid(ntf_opcode),
+ nci_opcode_oid(ntf_opcode),
+ nci_plen(skb->data));
/* strip the nci control header */
skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -237,12 +300,12 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_conn_credits_ntf_packet(ndev, skb);
break;
- case NCI_OP_RF_FIELD_INFO_NTF:
- nci_rf_field_info_ntf_packet(ndev, skb);
+ case NCI_OP_CORE_INTF_ERROR_NTF:
+ nci_core_conn_intf_error_ntf_packet(ndev, skb);
break;
- case NCI_OP_RF_ACTIVATE_NTF:
- nci_rf_activate_ntf_packet(ndev, skb);
+ case NCI_OP_RF_INTF_ACTIVATED_NTF:
+ nci_rf_intf_activated_ntf_packet(ndev, skb);
break;
case NCI_OP_RF_DEACTIVATE_NTF:
@@ -250,7 +313,7 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
break;
default:
- nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+ pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
break;
}
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 0403d4cd091..2840ae2f361 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -40,12 +42,13 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
struct nci_core_reset_rsp *rsp = (void *) skb->data;
- nfc_dbg("entry, status 0x%x", rsp->status);
+ pr_debug("status 0x%x\n", rsp->status);
- if (rsp->status == NCI_STATUS_OK)
+ if (rsp->status == NCI_STATUS_OK) {
ndev->nci_ver = rsp->nci_ver;
-
- nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+ pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+ rsp->nci_ver, rsp->config_status);
+ }
nci_req_complete(ndev, rsp->status);
}
@@ -55,16 +58,16 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
struct nci_core_init_rsp_2 *rsp_2;
- nfc_dbg("entry, status 0x%x", rsp_1->status);
+ pr_debug("status 0x%x\n", rsp_1->status);
if (rsp_1->status != NCI_STATUS_OK)
- return;
+ goto exit;
ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
if (ndev->num_supported_rf_interfaces >
- NCI_MAX_SUPPORTED_RF_INTERFACES) {
+ NCI_MAX_SUPPORTED_RF_INTERFACES) {
ndev->num_supported_rf_interfaces =
NCI_MAX_SUPPORTED_RF_INTERFACES;
}
@@ -73,76 +76,56 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
rsp_1->supported_rf_interfaces,
ndev->num_supported_rf_interfaces);
- rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+ rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
ndev->max_logical_connections =
rsp_2->max_logical_connections;
ndev->max_routing_table_size =
__le16_to_cpu(rsp_2->max_routing_table_size);
- ndev->max_control_packet_payload_length =
- rsp_2->max_control_packet_payload_length;
- ndev->rf_sending_buffer_size =
- __le16_to_cpu(rsp_2->rf_sending_buffer_size);
- ndev->rf_receiving_buffer_size =
- __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
- ndev->manufacturer_id =
- __le16_to_cpu(rsp_2->manufacturer_id);
-
- nfc_dbg("nfcc_features 0x%x",
- ndev->nfcc_features);
- nfc_dbg("num_supported_rf_interfaces %d",
- ndev->num_supported_rf_interfaces);
- nfc_dbg("supported_rf_interfaces[0] 0x%x",
- ndev->supported_rf_interfaces[0]);
- nfc_dbg("supported_rf_interfaces[1] 0x%x",
- ndev->supported_rf_interfaces[1]);
- nfc_dbg("supported_rf_interfaces[2] 0x%x",
- ndev->supported_rf_interfaces[2]);
- nfc_dbg("supported_rf_interfaces[3] 0x%x",
- ndev->supported_rf_interfaces[3]);
- nfc_dbg("max_logical_connections %d",
- ndev->max_logical_connections);
- nfc_dbg("max_routing_table_size %d",
- ndev->max_routing_table_size);
- nfc_dbg("max_control_packet_payload_length %d",
- ndev->max_control_packet_payload_length);
- nfc_dbg("rf_sending_buffer_size %d",
- ndev->rf_sending_buffer_size);
- nfc_dbg("rf_receiving_buffer_size %d",
- ndev->rf_receiving_buffer_size);
- nfc_dbg("manufacturer_id 0x%x",
- ndev->manufacturer_id);
-
+ ndev->max_ctrl_pkt_payload_len =
+ rsp_2->max_ctrl_pkt_payload_len;
+ ndev->max_size_for_large_params =
+ __le16_to_cpu(rsp_2->max_size_for_large_params);
+ ndev->manufact_id =
+ rsp_2->manufact_id;
+ ndev->manufact_specific_info =
+ __le32_to_cpu(rsp_2->manufact_specific_info);
+
+ pr_debug("nfcc_features 0x%x\n",
+ ndev->nfcc_features);
+ pr_debug("num_supported_rf_interfaces %d\n",
+ ndev->num_supported_rf_interfaces);
+ pr_debug("supported_rf_interfaces[0] 0x%x\n",
+ ndev->supported_rf_interfaces[0]);
+ pr_debug("supported_rf_interfaces[1] 0x%x\n",
+ ndev->supported_rf_interfaces[1]);
+ pr_debug("supported_rf_interfaces[2] 0x%x\n",
+ ndev->supported_rf_interfaces[2]);
+ pr_debug("supported_rf_interfaces[3] 0x%x\n",
+ ndev->supported_rf_interfaces[3]);
+ pr_debug("max_logical_connections %d\n",
+ ndev->max_logical_connections);
+ pr_debug("max_routing_table_size %d\n",
+ ndev->max_routing_table_size);
+ pr_debug("max_ctrl_pkt_payload_len %d\n",
+ ndev->max_ctrl_pkt_payload_len);
+ pr_debug("max_size_for_large_params %d\n",
+ ndev->max_size_for_large_params);
+ pr_debug("manufact_id 0x%x\n",
+ ndev->manufact_id);
+ pr_debug("manufact_specific_info 0x%x\n",
+ ndev->manufact_specific_info);
+
+exit:
nci_req_complete(ndev, rsp_1->status);
}
-static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
-{
- struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
-
- nfc_dbg("entry, status 0x%x", rsp->status);
-
- if (rsp->status != NCI_STATUS_OK)
- return;
-
- ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
- ndev->initial_num_credits = rsp->initial_num_credits;
- ndev->conn_id = rsp->conn_id;
-
- atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
-
- nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
- nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
- nfc_dbg("conn_id %d", ndev->conn_id);
-}
-
static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
__u8 status = skb->data[0];
- nfc_dbg("entry, status 0x%x", status);
+ pr_debug("status 0x%x\n", status);
nci_req_complete(ndev, status);
}
@@ -151,7 +134,7 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
{
__u8 status = skb->data[0];
- nfc_dbg("entry, status 0x%x", status);
+ pr_debug("status 0x%x\n", status);
if (status == NCI_STATUS_OK)
set_bit(NCI_DISCOVERY, &ndev->flags);
@@ -164,7 +147,7 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
{
__u8 status = skb->data[0];
- nfc_dbg("entry, status 0x%x", status);
+ pr_debug("status 0x%x\n", status);
clear_bit(NCI_DISCOVERY, &ndev->flags);
@@ -178,11 +161,11 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
/* we got a rsp, stop the cmd timer */
del_timer(&ndev->cmd_timer);
- nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
- nci_pbf(skb->data),
- nci_opcode_gid(rsp_opcode),
- nci_opcode_oid(rsp_opcode),
- nci_plen(skb->data));
+ pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+ nci_pbf(skb->data),
+ nci_opcode_gid(rsp_opcode),
+ nci_opcode_oid(rsp_opcode),
+ nci_plen(skb->data));
/* strip the nci control header */
skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -196,10 +179,6 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_init_rsp_packet(ndev, skb);
break;
- case NCI_OP_CORE_CONN_CREATE_RSP:
- nci_core_conn_create_rsp_packet(ndev, skb);
- break;
-
case NCI_OP_RF_DISCOVER_MAP_RSP:
nci_rf_disc_map_rsp_packet(ndev, skb);
break;
@@ -213,7 +192,7 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
break;
default:
- nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+ pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
break;
}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 03f8818e1f1..6989dfa28ee 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -21,6 +21,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <net/genetlink.h>
#include <linux/nfc.h>
#include <linux/slab.h>
@@ -44,6 +46,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
.len = NFC_DEVICE_NAME_MAXSIZE },
[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+ [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
+ [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -51,8 +55,6 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
{
void *hdr;
- nfc_dbg("entry");
-
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&nfc_genl_family, flags, NFC_CMD_GET_TARGET);
if (!hdr)
@@ -65,6 +67,9 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
target->supported_protocols);
NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
+ if (target->nfcid1_len > 0)
+ NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+ target->nfcid1);
return genlmsg_end(msg, hdr);
@@ -105,8 +110,6 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
int rc;
- nfc_dbg("entry");
-
if (!dev) {
dev = __get_device_from_cb(cb);
if (IS_ERR(dev))
@@ -139,8 +142,6 @@ static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
{
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
- nfc_dbg("entry");
-
if (dev)
nfc_put_device(dev);
@@ -152,8 +153,6 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
struct sk_buff *msg;
void *hdr;
- nfc_dbg("entry");
-
dev->genl_data.poll_req_pid = 0;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -183,8 +182,6 @@ int nfc_genl_device_added(struct nfc_dev *dev)
struct sk_buff *msg;
void *hdr;
- nfc_dbg("entry");
-
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -216,8 +213,6 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
struct sk_buff *msg;
void *hdr;
- nfc_dbg("entry");
-
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -249,8 +244,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
{
void *hdr;
- nfc_dbg("entry");
-
hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
NFC_CMD_GET_DEVICE);
if (!hdr)
@@ -277,8 +270,6 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
bool first_call = false;
- nfc_dbg("entry");
-
if (!iter) {
first_call = true;
iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
@@ -319,14 +310,81 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_dbg("entry");
-
nfc_device_iter_exit(iter);
kfree(iter);
return 0;
}
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ pr_debug("DEP link is up\n");
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_CMD_DEP_LINK_UP);
+ if (!hdr)
+ goto free_msg;
+
+ NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+ if (rf_mode == NFC_RF_INITIATOR)
+ NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
+ NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
+ NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+
+ genlmsg_end(msg, hdr);
+
+ dev->dep_link_up = true;
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ pr_debug("DEP link is down\n");
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_CMD_DEP_LINK_DOWN);
+ if (!hdr)
+ goto free_msg;
+
+ NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
@@ -334,8 +392,6 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
u32 idx;
int rc = -ENOBUFS;
- nfc_dbg("entry");
-
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
@@ -373,8 +429,6 @@ static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
int rc;
u32 idx;
- nfc_dbg("entry");
-
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
@@ -396,8 +450,6 @@ static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
int rc;
u32 idx;
- nfc_dbg("entry");
-
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
@@ -420,7 +472,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
u32 idx;
u32 protocols;
- nfc_dbg("entry");
+ pr_debug("Poll start\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_PROTOCOLS])
@@ -451,8 +503,6 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
int rc;
u32 idx;
- nfc_dbg("entry");
-
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
@@ -478,6 +528,67 @@ out:
return rc;
}
+static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc, tgt_idx;
+ u32 idx;
+ u8 comm, rf;
+
+ pr_debug("DEP link up\n");
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_COMM_MODE] ||
+ !info->attrs[NFC_ATTR_RF_MODE])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+ if (!info->attrs[NFC_ATTR_TARGET_INDEX])
+ tgt_idx = NFC_TARGET_IDX_ANY;
+ else
+ tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
+
+ comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
+ rf = nla_get_u8(info->attrs[NFC_ATTR_RF_MODE]);
+
+ if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
+ return -EINVAL;
+
+ if (rf != NFC_RF_INITIATOR && comm != NFC_RF_TARGET)
+ return -EINVAL;
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dep_link_up(dev, tgt_idx, comm, rf);
+
+ nfc_put_device(dev);
+
+ return rc;
+}
+
+static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc;
+ u32 idx;
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dep_link_down(dev);
+
+ nfc_put_device(dev);
+ return rc;
+}
+
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
@@ -507,6 +618,16 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
+ .cmd = NFC_CMD_DEP_LINK_UP,
+ .doit = nfc_genl_dep_link_up,
+ .policy = nfc_genl_policy,
+ },
+ {
+ .cmd = NFC_CMD_DEP_LINK_DOWN,
+ .doit = nfc_genl_dep_link_down,
+ .policy = nfc_genl_policy,
+ },
+ {
.cmd = NFC_CMD_GET_TARGET,
.dumpit = nfc_genl_dump_targets,
.done = nfc_genl_dump_targets_done,
@@ -524,18 +645,16 @@ static int nfc_genl_rcv_nl_event(struct notifier_block *this,
if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
goto out;
- nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
+ pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
nfc_device_iter_init(&iter);
dev = nfc_device_iter_next(&iter);
while (dev) {
- mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid == n->pid) {
nfc_stop_poll(dev);
dev->genl_data.poll_req_pid = 0;
}
- mutex_unlock(&dev->genl_data.genl_data_mutex);
dev = nfc_device_iter_next(&iter);
}
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index d86583f4831..6d28d75995b 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -27,13 +27,6 @@
#include <net/nfc/nfc.h>
#include <net/sock.h>
-__printf(2, 3)
-int nfc_printk(const char *level, const char *fmt, ...);
-
-#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
-#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg)
-#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg)
-
struct nfc_protocol {
int id;
struct proto *proto;
@@ -53,6 +46,60 @@ struct nfc_rawsock {
#define to_rawsock_sk(_tx_work) \
((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
+#ifdef CONFIG_NFC_LLCP
+
+void nfc_llcp_mac_is_down(struct nfc_dev *dev);
+void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode);
+int nfc_llcp_register_device(struct nfc_dev *dev);
+void nfc_llcp_unregister_device(struct nfc_dev *dev);
+int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *general_bytes_len);
+int __init nfc_llcp_init(void);
+void nfc_llcp_exit(void);
+
+#else
+
+static inline void nfc_llcp_mac_is_down(struct nfc_dev *dev)
+{
+}
+
+static inline void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+}
+
+static inline int nfc_llcp_register_device(struct nfc_dev *dev)
+{
+ return 0;
+}
+
+static inline void nfc_llcp_unregister_device(struct nfc_dev *dev)
+{
+}
+
+static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
+{
+ return 0;
+}
+
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+{
+ *gb_len = 0;
+ return NULL;
+}
+
+static inline int nfc_llcp_init(void)
+{
+ return 0;
+}
+
+static inline void nfc_llcp_exit(void)
+{
+}
+
+#endif
+
int __init rawsock_init(void);
void rawsock_exit(void);
@@ -75,6 +122,10 @@ int nfc_genl_targets_found(struct nfc_dev *dev);
int nfc_genl_device_added(struct nfc_dev *dev);
int nfc_genl_device_removed(struct nfc_dev *dev);
+int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
+ u8 comm_mode, u8 rf_mode);
+int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
+
struct nfc_dev *nfc_get_device(unsigned idx);
static inline void nfc_put_device(struct nfc_dev *dev)
@@ -109,6 +160,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
int nfc_stop_poll(struct nfc_dev *dev);
+int nfc_dep_link_up(struct nfc_dev *dev, int target_idx,
+ u8 comm_mode, u8 rf_mode);
+
+int nfc_dep_link_down(struct nfc_dev *dev);
+
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index ee7b2b365ef..2e2f8c6a61f 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -21,6 +21,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+
#include <net/tcp_states.h>
#include <linux/nfc.h>
#include <linux/export.h>
@@ -29,7 +31,7 @@
static void rawsock_write_queue_purge(struct sock *sk)
{
- nfc_dbg("sk=%p", sk);
+ pr_debug("sk=%p\n", sk);
spin_lock_bh(&sk->sk_write_queue.lock);
__skb_queue_purge(&sk->sk_write_queue);
@@ -39,7 +41,7 @@ static void rawsock_write_queue_purge(struct sock *sk)
static void rawsock_report_error(struct sock *sk, int err)
{
- nfc_dbg("sk=%p err=%d", sk, err);
+ pr_debug("sk=%p err=%d\n", sk, err);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_err = -err;
@@ -52,7 +54,7 @@ static int rawsock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- nfc_dbg("sock=%p", sock);
+ pr_debug("sock=%p\n", sock);
sock_orphan(sk);
sock_put(sk);
@@ -68,14 +70,14 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
struct nfc_dev *dev;
int rc = 0;
- nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags);
+ pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
if (!addr || len < sizeof(struct sockaddr_nfc) ||
addr->sa_family != AF_NFC)
return -EINVAL;
- nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx,
- addr->target_idx, addr->nfc_protocol);
+ pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
+ addr->dev_idx, addr->target_idx, addr->nfc_protocol);
lock_sock(sk);
@@ -136,7 +138,7 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
BUG_ON(in_irq());
- nfc_dbg("sk=%p err=%d", sk, err);
+ pr_debug("sk=%p err=%d\n", sk, err);
if (err)
goto error;
@@ -172,7 +174,7 @@ static void rawsock_tx_work(struct work_struct *work)
struct sk_buff *skb;
int rc;
- nfc_dbg("sk=%p target_idx=%u", sk, target_idx);
+ pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
rawsock_write_queue_purge(sk);
@@ -198,7 +200,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int rc;
- nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);
+ pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
if (msg->msg_namelen)
return -EOPNOTSUPP;
@@ -206,13 +208,10 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
- skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
- msg->msg_flags & MSG_DONTWAIT, &rc);
- if (!skb)
+ skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
+ if (skb == NULL)
return rc;
- skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
-
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
@@ -239,7 +238,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
int copied;
int rc;
- nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags);
+ pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
skb = skb_recv_datagram(sk, flags, noblock, &rc);
if (!skb)
@@ -283,7 +282,7 @@ static const struct proto_ops rawsock_ops = {
static void rawsock_destruct(struct sock *sk)
{
- nfc_dbg("sk=%p", sk);
+ pr_debug("sk=%p\n", sk);
if (sk->sk_state == TCP_ESTABLISHED) {
nfc_deactivate_target(nfc_rawsock(sk)->dev,
@@ -294,7 +293,7 @@ static void rawsock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
- nfc_err("Freeing alive NFC raw socket %p", sk);
+ pr_err("Freeing alive NFC raw socket %p\n", sk);
return;
}
}
@@ -304,14 +303,14 @@ static int rawsock_create(struct net *net, struct socket *sock,
{
struct sock *sk;
- nfc_dbg("sock=%p", sock);
+ pr_debug("sock=%p\n", sock);
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &rawsock_ops;
- sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto);
+ sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
if (!sk)
return -ENOMEM;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
new file mode 100644
index 00000000000..d9ea33c361b
--- /dev/null
+++ b/net/openvswitch/Kconfig
@@ -0,0 +1,28 @@
+#
+# Open vSwitch
+#
+
+config OPENVSWITCH
+ tristate "Open vSwitch"
+ ---help---
+ Open vSwitch is a multilayer Ethernet switch targeted at virtualized
+ environments. In addition to supporting a variety of features
+ expected in a traditional hardware switch, it enables fine-grained
+ programmatic extension and flow-based control of the network. This
+ control is useful in a wide variety of applications but is
+ particularly important in multi-server virtualization deployments,
+ which are often characterized by highly dynamic endpoints and the
+ need to maintain logical abstractions for multiple tenants.
+
+ The Open vSwitch datapath provides an in-kernel fast path for packet
+ forwarding. It is complemented by a userspace daemon, ovs-vswitchd,
+ which is able to accept configuration from a variety of sources and
+ translate it into packet processing rules.
+
+ See http://openvswitch.org for more information and userspace
+ utilities.
+
+ To compile this code as a module, choose M here: the module will be
+ called openvswitch.
+
+ If unsure, say N.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
new file mode 100644
index 00000000000..15e7384745c
--- /dev/null
+++ b/net/openvswitch/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Open vSwitch.
+#
+
+obj-$(CONFIG_OPENVSWITCH) += openvswitch.o
+
+openvswitch-y := \
+ actions.o \
+ datapath.o \
+ dp_notify.o \
+ flow.o \
+ vport.o \
+ vport-internal_dev.o \
+ vport-netdev.o \
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
new file mode 100644
index 00000000000..2725d1bdf29
--- /dev/null
+++ b/net/openvswitch/actions.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/openvswitch.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in6.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr, int len, bool keep_skb);
+
+static int make_writable(struct sk_buff *skb, int write_len)
+{
+ if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+ return 0;
+
+ return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/* remove VLAN header from packet and update csum accrodingly. */
+static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+{
+ struct vlan_hdr *vhdr;
+ int err;
+
+ err = make_writable(skb, VLAN_ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+ + ETH_HLEN, VLAN_HLEN, 0));
+
+ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+ *current_tci = vhdr->h_vlan_TCI;
+
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, VLAN_HLEN);
+
+ vlan_set_encap_proto(skb, vhdr);
+ skb->mac_header += VLAN_HLEN;
+ skb_reset_mac_len(skb);
+
+ return 0;
+}
+
+static int pop_vlan(struct sk_buff *skb)
+{
+ __be16 tci;
+ int err;
+
+ if (likely(vlan_tx_tag_present(skb))) {
+ skb->vlan_tci = 0;
+ } else {
+ if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
+
+ err = __pop_vlan_tci(skb, &tci);
+ if (err)
+ return err;
+ }
+ /* move next vlan tag to hw accel tag */
+ if (likely(skb->protocol != htons(ETH_P_8021Q) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
+
+ err = __pop_vlan_tci(skb, &tci);
+ if (unlikely(err))
+ return err;
+
+ __vlan_hwaccel_put_tag(skb, ntohs(tci));
+ return 0;
+}
+
+static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+{
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ u16 current_tag;
+
+ /* push down current VLAN tag */
+ current_tag = vlan_tx_tag_get(skb);
+
+ if (!__vlan_put_tag(skb, current_tag))
+ return -ENOMEM;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ + ETH_HLEN, VLAN_HLEN, 0));
+
+ }
+ __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+ return 0;
+}
+
+static int set_eth_addr(struct sk_buff *skb,
+ const struct ovs_key_ethernet *eth_key)
+{
+ int err;
+ err = make_writable(skb, ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+
+ return 0;
+}
+
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
+{
+ int transport_len = skb->len - skb_transport_offset(skb);
+
+ if (nh->protocol == IPPROTO_TCP) {
+ if (likely(transport_len >= sizeof(struct tcphdr)))
+ inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+ *addr, new_addr, 1);
+ } else if (nh->protocol == IPPROTO_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr)))
+ inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
+ *addr, new_addr, 1);
+ }
+
+ csum_replace4(&nh->check, *addr, new_addr);
+ skb->rxhash = 0;
+ *addr = new_addr;
+}
+
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
+{
+ csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+ nh->ttl = new_ttl;
+}
+
+static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+{
+ struct iphdr *nh;
+ int err;
+
+ err = make_writable(skb, skb_network_offset(skb) +
+ sizeof(struct iphdr));
+ if (unlikely(err))
+ return err;
+
+ nh = ip_hdr(skb);
+
+ if (ipv4_key->ipv4_src != nh->saddr)
+ set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+
+ if (ipv4_key->ipv4_dst != nh->daddr)
+ set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
+
+ if (ipv4_key->ipv4_tos != nh->tos)
+ ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
+
+ if (ipv4_key->ipv4_ttl != nh->ttl)
+ set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+
+ return 0;
+}
+
+/* Must follow make_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+ __be16 new_port, __sum16 *check)
+{
+ inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+ *port = new_port;
+ skb->rxhash = 0;
+}
+
+static int set_udp_port(struct sk_buff *skb,
+ const struct ovs_key_udp *udp_port_key)
+{
+ struct udphdr *uh;
+ int err;
+
+ err = make_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+ if (unlikely(err))
+ return err;
+
+ uh = udp_hdr(skb);
+ if (udp_port_key->udp_src != uh->source)
+ set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+
+ if (udp_port_key->udp_dst != uh->dest)
+ set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+
+ return 0;
+}
+
+static int set_tcp_port(struct sk_buff *skb,
+ const struct ovs_key_tcp *tcp_port_key)
+{
+ struct tcphdr *th;
+ int err;
+
+ err = make_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct tcphdr));
+ if (unlikely(err))
+ return err;
+
+ th = tcp_hdr(skb);
+ if (tcp_port_key->tcp_src != th->source)
+ set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
+
+ if (tcp_port_key->tcp_dst != th->dest)
+ set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
+
+ return 0;
+}
+
+static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+{
+ struct vport *vport;
+
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ vport = rcu_dereference(dp->ports[out_port]);
+ if (unlikely(!vport)) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ ovs_vport_send(vport, skb);
+ return 0;
+}
+
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ struct dp_upcall_info upcall;
+ const struct nlattr *a;
+ int rem;
+
+ upcall.cmd = OVS_PACKET_CMD_ACTION;
+ upcall.key = &OVS_CB(skb)->flow->key;
+ upcall.userdata = NULL;
+ upcall.pid = 0;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ switch (nla_type(a)) {
+ case OVS_USERSPACE_ATTR_USERDATA:
+ upcall.userdata = a;
+ break;
+
+ case OVS_USERSPACE_ATTR_PID:
+ upcall.pid = nla_get_u32(a);
+ break;
+ }
+ }
+
+ return ovs_dp_upcall(dp, skb, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ const struct nlattr *acts_list = NULL;
+ const struct nlattr *a;
+ int rem;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ switch (nla_type(a)) {
+ case OVS_SAMPLE_ATTR_PROBABILITY:
+ if (net_random() >= nla_get_u32(a))
+ return 0;
+ break;
+
+ case OVS_SAMPLE_ATTR_ACTIONS:
+ acts_list = a;
+ break;
+ }
+ }
+
+ return do_execute_actions(dp, skb, nla_data(acts_list),
+ nla_len(acts_list), true);
+}
+
+static int execute_set_action(struct sk_buff *skb,
+ const struct nlattr *nested_attr)
+{
+ int err = 0;
+
+ switch (nla_type(nested_attr)) {
+ case OVS_KEY_ATTR_PRIORITY:
+ skb->priority = nla_get_u32(nested_attr);
+ break;
+
+ case OVS_KEY_ATTR_ETHERNET:
+ err = set_eth_addr(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_IPV4:
+ err = set_ipv4(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_TCP:
+ err = set_tcp_port(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_UDP:
+ err = set_udp_port(skb, nla_data(nested_attr));
+ break;
+ }
+
+ return err;
+}
+
+/* Execute a list of actions against 'skb'. */
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr, int len, bool keep_skb)
+{
+ /* Every output action needs a separate clone of 'skb', but the common
+ * case is just a single output action, so that doing a clone and
+ * then freeing the original skbuff is wasteful. So the following code
+ * is slightly obscure just to avoid that. */
+ int prev_port = -1;
+ const struct nlattr *a;
+ int rem;
+
+ for (a = attr, rem = len; rem > 0;
+ a = nla_next(a, &rem)) {
+ int err = 0;
+
+ if (prev_port != -1) {
+ do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+ prev_port = -1;
+ }
+
+ switch (nla_type(a)) {
+ case OVS_ACTION_ATTR_OUTPUT:
+ prev_port = nla_get_u32(a);
+ break;
+
+ case OVS_ACTION_ATTR_USERSPACE:
+ output_userspace(dp, skb, a);
+ break;
+
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ err = push_vlan(skb, nla_data(a));
+ if (unlikely(err)) /* skb already freed. */
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_POP_VLAN:
+ err = pop_vlan(skb);
+ break;
+
+ case OVS_ACTION_ATTR_SET:
+ err = execute_set_action(skb, nla_data(a));
+ break;
+
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = sample(dp, skb, a);
+ break;
+ }
+
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ return err;
+ }
+ }
+
+ if (prev_port != -1) {
+ if (keep_skb)
+ skb = skb_clone(skb, GFP_ATOMIC);
+
+ do_output(dp, skb, prev_port);
+ } else if (!keep_skb)
+ consume_skb(skb);
+
+ return 0;
+}
+
+/* Execute a list of actions against 'skb'. */
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
+{
+ struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+ return do_execute_actions(dp, skb, acts->actions,
+ acts->actions_len, false);
+}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
new file mode 100644
index 00000000000..9a2725114e9
--- /dev/null
+++ b/net/openvswitch/datapath.c
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/etherdevice.h>
+#include <linux/genetlink.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/wait.h>
+#include <asm/system.h>
+#include <asm/div64.h>
+#include <linux/highmem.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/inetdevice.h>
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/rculist.h>
+#include <linux/dmi.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "flow.h"
+#include "vport-internal_dev.h"
+
+/**
+ * DOC: Locking:
+ *
+ * Writes to device state (add/remove datapath, port, set operations on vports,
+ * etc.) are protected by RTNL.
+ *
+ * Writes to other state (flow table modifications, set miscellaneous datapath
+ * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside
+ * genl_mutex.
+ *
+ * Reads are protected by RCU.
+ *
+ * There are a few special cases (mostly stats) that have their own
+ * synchronization but they nest under all of above and don't interact with
+ * each other.
+ */
+
+/* Global list of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+static LIST_HEAD(dps);
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
+static struct vport *new_vport(const struct vport_parms *);
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+ const struct dp_upcall_info *);
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+ const struct dp_upcall_info *);
+
+/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
+static struct datapath *get_dp(int dp_ifindex)
+{
+ struct datapath *dp = NULL;
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+ if (dev) {
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
+ if (vport)
+ dp = vport->dp;
+ }
+ rcu_read_unlock();
+
+ return dp;
+}
+
+/* Must be called with rcu_read_lock or RTNL lock. */
+const char *ovs_dp_name(const struct datapath *dp)
+{
+ struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+ return vport->ops->get_name(vport);
+}
+
+static int get_dpifindex(struct datapath *dp)
+{
+ struct vport *local;
+ int ifindex;
+
+ rcu_read_lock();
+
+ local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+ if (local)
+ ifindex = local->ops->get_ifindex(local);
+ else
+ ifindex = 0;
+
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
+static void destroy_dp_rcu(struct rcu_head *rcu)
+{
+ struct datapath *dp = container_of(rcu, struct datapath, rcu);
+
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+ free_percpu(dp->stats_percpu);
+ kfree(dp);
+}
+
+/* Called with RTNL lock and genl_lock. */
+static struct vport *new_vport(const struct vport_parms *parms)
+{
+ struct vport *vport;
+
+ vport = ovs_vport_add(parms);
+ if (!IS_ERR(vport)) {
+ struct datapath *dp = parms->dp;
+
+ rcu_assign_pointer(dp->ports[parms->port_no], vport);
+ list_add(&vport->node, &dp->port_list);
+ }
+
+ return vport;
+}
+
+/* Called with RTNL lock. */
+void ovs_dp_detach_port(struct vport *p)
+{
+ ASSERT_RTNL();
+
+ /* First drop references to device. */
+ list_del(&p->node);
+ rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+
+ /* Then destroy it. */
+ ovs_vport_del(p);
+}
+
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+{
+ struct datapath *dp = p->dp;
+ struct sw_flow *flow;
+ struct dp_stats_percpu *stats;
+ struct sw_flow_key key;
+ u64 *stats_counter;
+ int error;
+ int key_len;
+
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ /* Extract flow from 'skb' into 'key'. */
+ error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Look up flow. */
+ flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ if (unlikely(!flow)) {
+ struct dp_upcall_info upcall;
+
+ upcall.cmd = OVS_PACKET_CMD_MISS;
+ upcall.key = &key;
+ upcall.userdata = NULL;
+ upcall.pid = p->upcall_pid;
+ ovs_dp_upcall(dp, skb, &upcall);
+ consume_skb(skb);
+ stats_counter = &stats->n_missed;
+ goto out;
+ }
+
+ OVS_CB(skb)->flow = flow;
+
+ stats_counter = &stats->n_hit;
+ ovs_flow_used(OVS_CB(skb)->flow, skb);
+ ovs_execute_actions(dp, skb);
+
+out:
+ /* Update datapath statistics. */
+ u64_stats_update_begin(&stats->sync);
+ (*stats_counter)++;
+ u64_stats_update_end(&stats->sync);
+}
+
+static struct genl_family dp_packet_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct ovs_header),
+ .name = OVS_PACKET_FAMILY,
+ .version = OVS_PACKET_VERSION,
+ .maxattr = OVS_PACKET_ATTR_MAX
+};
+
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
+{
+ struct dp_stats_percpu *stats;
+ int dp_ifindex;
+ int err;
+
+ if (upcall_info->pid == 0) {
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ dp_ifindex = get_dpifindex(dp);
+ if (!dp_ifindex) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ if (!skb_is_gso(skb))
+ err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+ else
+ err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ u64_stats_update_begin(&stats->sync);
+ stats->n_lost++;
+ u64_stats_update_end(&stats->sync);
+
+ return err;
+}
+
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
+{
+ struct dp_upcall_info later_info;
+ struct sw_flow_key later_key;
+ struct sk_buff *segs, *nskb;
+ int err;
+
+ segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* Queue all of the segments. */
+ skb = segs;
+ do {
+ err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+ if (err)
+ break;
+
+ if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+ /* The initial flow key extracted by ovs_flow_extract()
+ * in this case is for a first fragment, so we need to
+ * properly mark later fragments.
+ */
+ later_key = *upcall_info->key;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+
+ later_info = *upcall_info;
+ later_info.key = &later_key;
+ upcall_info = &later_info;
+ }
+ } while ((skb = skb->next));
+
+ /* Free all of the segments. */
+ skb = segs;
+ do {
+ nskb = skb->next;
+ if (err)
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ } while ((skb = nskb));
+ return err;
+}
+
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
+{
+ struct ovs_header *upcall;
+ struct sk_buff *nskb = NULL;
+ struct sk_buff *user_skb; /* to be queued to userspace */
+ struct nlattr *nla;
+ unsigned int len;
+ int err;
+
+ if (vlan_tx_tag_present(skb)) {
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
+ if (!skb)
+ return -ENOMEM;
+
+ nskb->vlan_tci = 0;
+ skb = nskb;
+ }
+
+ if (nla_attr_size(skb->len) > USHRT_MAX) {
+ err = -EFBIG;
+ goto out;
+ }
+
+ len = sizeof(struct ovs_header);
+ len += nla_total_size(skb->len);
+ len += nla_total_size(FLOW_BUFSIZE);
+ if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
+ len += nla_total_size(8);
+
+ user_skb = genlmsg_new(len, GFP_ATOMIC);
+ if (!user_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
+ 0, upcall_info->cmd);
+ upcall->dp_ifindex = dp_ifindex;
+
+ nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
+ ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+ nla_nest_end(user_skb, nla);
+
+ if (upcall_info->userdata)
+ nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
+ nla_get_u64(upcall_info->userdata));
+
+ nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
+
+ skb_copy_and_csum_dev(skb, nla_data(nla));
+
+ err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+
+out:
+ kfree_skb(nskb);
+ return err;
+}
+
+/* Called with genl_mutex. */
+static int flush_flows(int dp_ifindex)
+{
+ struct flow_table *old_table;
+ struct flow_table *new_table;
+ struct datapath *dp;
+
+ dp = get_dp(dp_ifindex);
+ if (!dp)
+ return -ENODEV;
+
+ old_table = genl_dereference(dp->table);
+ new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
+ if (!new_table)
+ return -ENOMEM;
+
+ rcu_assign_pointer(dp->table, new_table);
+
+ ovs_flow_tbl_deferred_destroy(old_table);
+ return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key, int depth);
+
+static int validate_sample(const struct nlattr *attr,
+ const struct sw_flow_key *key, int depth)
+{
+ const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+ const struct nlattr *probability, *actions;
+ const struct nlattr *a;
+ int rem;
+
+ memset(attrs, 0, sizeof(attrs));
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+ return -EINVAL;
+ attrs[type] = a;
+ }
+ if (rem)
+ return -EINVAL;
+
+ probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+ if (!probability || nla_len(probability) != sizeof(u32))
+ return -EINVAL;
+
+ actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+ if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+ return -EINVAL;
+ return validate_actions(actions, key, depth + 1);
+}
+
+static int validate_set(const struct nlattr *a,
+ const struct sw_flow_key *flow_key)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ int key_type = nla_type(ovs_key);
+
+ /* There can be only one key in a action */
+ if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+ return -EINVAL;
+
+ if (key_type > OVS_KEY_ATTR_MAX ||
+ nla_len(ovs_key) != ovs_key_lens[key_type])
+ return -EINVAL;
+
+ switch (key_type) {
+ const struct ovs_key_ipv4 *ipv4_key;
+
+ case OVS_KEY_ATTR_PRIORITY:
+ case OVS_KEY_ATTR_ETHERNET:
+ break;
+
+ case OVS_KEY_ATTR_IPV4:
+ if (flow_key->eth.type != htons(ETH_P_IP))
+ return -EINVAL;
+
+ if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+ return -EINVAL;
+
+ ipv4_key = nla_data(ovs_key);
+ if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+ return -EINVAL;
+
+ if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+ return -EINVAL;
+
+ break;
+
+ case OVS_KEY_ATTR_TCP:
+ if (flow_key->ip.proto != IPPROTO_TCP)
+ return -EINVAL;
+
+ if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+ return -EINVAL;
+
+ break;
+
+ case OVS_KEY_ATTR_UDP:
+ if (flow_key->ip.proto != IPPROTO_UDP)
+ return -EINVAL;
+
+ if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+ static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
+ [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+ [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+ };
+ struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+ int error;
+
+ error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+ attr, userspace_policy);
+ if (error)
+ return error;
+
+ if (!a[OVS_USERSPACE_ATTR_PID] ||
+ !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+ const struct sw_flow_key *key, int depth)
+{
+ const struct nlattr *a;
+ int rem, err;
+
+ if (depth >= SAMPLE_ACTION_DEPTH)
+ return -EOVERFLOW;
+
+ nla_for_each_nested(a, attr, rem) {
+ /* Expected argument lengths, (u32)-1 for variable length. */
+ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+ [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+ [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+ [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+ [OVS_ACTION_ATTR_POP_VLAN] = 0,
+ [OVS_ACTION_ATTR_SET] = (u32)-1,
+ [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+ };
+ const struct ovs_action_push_vlan *vlan;
+ int type = nla_type(a);
+
+ if (type > OVS_ACTION_ATTR_MAX ||
+ (action_lens[type] != nla_len(a) &&
+ action_lens[type] != (u32)-1))
+ return -EINVAL;
+
+ switch (type) {
+ case OVS_ACTION_ATTR_UNSPEC:
+ return -EINVAL;
+
+ case OVS_ACTION_ATTR_USERSPACE:
+ err = validate_userspace(a);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_OUTPUT:
+ if (nla_get_u32(a) >= DP_MAX_PORTS)
+ return -EINVAL;
+ break;
+
+
+ case OVS_ACTION_ATTR_POP_VLAN:
+ break;
+
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ vlan = nla_data(a);
+ if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+ return -EINVAL;
+ if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+ return -EINVAL;
+ break;
+
+ case OVS_ACTION_ATTR_SET:
+ err = validate_set(a, key);
+ if (err)
+ return err;
+ break;
+
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = validate_sample(a, key, depth);
+ if (err)
+ return err;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (rem > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void clear_stats(struct sw_flow *flow)
+{
+ flow->used = 0;
+ flow->tcp_flags = 0;
+ flow->packet_count = 0;
+ flow->byte_count = 0;
+}
+
+static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ovs_header *ovs_header = info->userhdr;
+ struct nlattr **a = info->attrs;
+ struct sw_flow_actions *acts;
+ struct sk_buff *packet;
+ struct sw_flow *flow;
+ struct datapath *dp;
+ struct ethhdr *eth;
+ int len;
+ int err;
+ int key_len;
+
+ err = -EINVAL;
+ if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
+ !a[OVS_PACKET_ATTR_ACTIONS] ||
+ nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
+ goto err;
+
+ len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
+ packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!packet)
+ goto err;
+ skb_reserve(packet, NET_IP_ALIGN);
+
+ memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
+
+ skb_reset_mac_header(packet);
+ eth = eth_hdr(packet);
+
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
+ if (ntohs(eth->h_proto) >= 1536)
+ packet->protocol = eth->h_proto;
+ else
+ packet->protocol = htons(ETH_P_802_2);
+
+ /* Build an sw_flow for sending this packet. */
+ flow = ovs_flow_alloc();
+ err = PTR_ERR(flow);
+ if (IS_ERR(flow))
+ goto err_kfree_skb;
+
+ err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+ if (err)
+ goto err_flow_free;
+
+ err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+ &flow->key.phy.in_port,
+ a[OVS_PACKET_ATTR_KEY]);
+ if (err)
+ goto err_flow_free;
+
+ err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
+ if (err)
+ goto err_flow_free;
+
+ flow->hash = ovs_flow_hash(&flow->key, key_len);
+
+ acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+ err = PTR_ERR(acts);
+ if (IS_ERR(acts))
+ goto err_flow_free;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ OVS_CB(packet)->flow = flow;
+ packet->priority = flow->key.phy.priority;
+
+ rcu_read_lock();
+ dp = get_dp(ovs_header->dp_ifindex);
+ err = -ENODEV;
+ if (!dp)
+ goto err_unlock;
+
+ local_bh_disable();
+ err = ovs_execute_actions(dp, packet);
+ local_bh_enable();
+ rcu_read_unlock();
+
+ ovs_flow_free(flow);
+ return err;
+
+err_unlock:
+ rcu_read_unlock();
+err_flow_free:
+ ovs_flow_free(flow);
+err_kfree_skb:
+ kfree_skb(packet);
+err:
+ return err;
+}
+
+static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
+ [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+ [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
+ [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_packet_genl_ops[] = {
+ { .cmd = OVS_PACKET_CMD_EXECUTE,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = packet_policy,
+ .doit = ovs_packet_cmd_execute
+ }
+};
+
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+{
+ int i;
+ struct flow_table *table = genl_dereference(dp->table);
+
+ stats->n_flows = ovs_flow_tbl_count(table);
+
+ stats->n_hit = stats->n_missed = stats->n_lost = 0;
+ for_each_possible_cpu(i) {
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned int start;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+ local_stats = *percpu_stats;
+ } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+ stats->n_hit += local_stats.n_hit;
+ stats->n_missed += local_stats.n_missed;
+ stats->n_lost += local_stats.n_lost;
+ }
+}
+
+static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
+ [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+ [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_family dp_flow_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct ovs_header),
+ .name = OVS_FLOW_FAMILY,
+ .version = OVS_FLOW_VERSION,
+ .maxattr = OVS_FLOW_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
+ .name = OVS_FLOW_MCGROUP
+};
+
+/* Called with genl_lock. */
+static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+ struct sk_buff *skb, u32 pid,
+ u32 seq, u32 flags, u8 cmd)
+{
+ const int skb_orig_len = skb->len;
+ const struct sw_flow_actions *sf_acts;
+ struct ovs_flow_stats stats;
+ struct ovs_header *ovs_header;
+ struct nlattr *nla;
+ unsigned long used;
+ u8 tcp_flags;
+ int err;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+
+ ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+ if (!ovs_header)
+ return -EMSGSIZE;
+
+ ovs_header->dp_ifindex = get_dpifindex(dp);
+
+ nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
+ if (!nla)
+ goto nla_put_failure;
+ err = ovs_flow_to_nlattrs(&flow->key, skb);
+ if (err)
+ goto error;
+ nla_nest_end(skb, nla);
+
+ spin_lock_bh(&flow->lock);
+ used = flow->used;
+ stats.n_packets = flow->packet_count;
+ stats.n_bytes = flow->byte_count;
+ tcp_flags = flow->tcp_flags;
+ spin_unlock_bh(&flow->lock);
+
+ if (used)
+ NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+
+ if (stats.n_packets)
+ NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+ sizeof(struct ovs_flow_stats), &stats);
+
+ if (tcp_flags)
+ NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+ /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
+ * this is the first flow to be dumped into 'skb'. This is unusual for
+ * Netlink but individual action lists can be longer than
+ * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
+ * The userspace caller can always fetch the actions separately if it
+ * really wants them. (Most userspace callers in fact don't care.)
+ *
+ * This can only fail for dump operations because the skb is always
+ * properly sized for single flows.
+ */
+ err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
+ sf_acts->actions);
+ if (err < 0 && skb_orig_len)
+ goto error;
+
+ return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+error:
+ genlmsg_cancel(skb, ovs_header);
+ return err;
+}
+
+static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
+{
+ const struct sw_flow_actions *sf_acts;
+ int len;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+
+ /* OVS_FLOW_ATTR_KEY */
+ len = nla_total_size(FLOW_BUFSIZE);
+ /* OVS_FLOW_ATTR_ACTIONS */
+ len += nla_total_size(sf_acts->actions_len);
+ /* OVS_FLOW_ATTR_STATS */
+ len += nla_total_size(sizeof(struct ovs_flow_stats));
+ /* OVS_FLOW_ATTR_TCP_FLAGS */
+ len += nla_total_size(1);
+ /* OVS_FLOW_ATTR_USED */
+ len += nla_total_size(8);
+
+ len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+ return genlmsg_new(len, GFP_KERNEL);
+}
+
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+ struct datapath *dp,
+ u32 pid, u32 seq, u8 cmd)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = ovs_flow_cmd_alloc_info(flow);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+ BUG_ON(retval < 0);
+ return skb;
+}
+
+static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct ovs_header *ovs_header = info->userhdr;
+ struct sw_flow_key key;
+ struct sw_flow *flow;
+ struct sk_buff *reply;
+ struct datapath *dp;
+ struct flow_table *table;
+ int error;
+ int key_len;
+
+ /* Extract key. */
+ error = -EINVAL;
+ if (!a[OVS_FLOW_ATTR_KEY])
+ goto error;
+ error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ if (error)
+ goto error;
+
+ /* Validate actions. */
+ if (a[OVS_FLOW_ATTR_ACTIONS]) {
+ error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0);
+ if (error)
+ goto error;
+ } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ error = -ENODEV;
+ if (!dp)
+ goto error;
+
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ if (!flow) {
+ struct sw_flow_actions *acts;
+
+ /* Bail out if we're not allowed to create a new flow. */
+ error = -ENOENT;
+ if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
+ goto error;
+
+ /* Expand table, if necessary, to make room. */
+ if (ovs_flow_tbl_need_to_expand(table)) {
+ struct flow_table *new_table;
+
+ new_table = ovs_flow_tbl_expand(table);
+ if (!IS_ERR(new_table)) {
+ rcu_assign_pointer(dp->table, new_table);
+ ovs_flow_tbl_deferred_destroy(table);
+ table = genl_dereference(dp->table);
+ }
+ }
+
+ /* Allocate flow. */
+ flow = ovs_flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
+ goto error;
+ }
+ flow->key = key;
+ clear_stats(flow);
+
+ /* Obtain actions. */
+ acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ error = PTR_ERR(acts);
+ if (IS_ERR(acts))
+ goto error_free_flow;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ /* Put flow in bucket. */
+ flow->hash = ovs_flow_hash(&key, key_len);
+ ovs_flow_tbl_insert(table, flow);
+
+ reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq,
+ OVS_FLOW_CMD_NEW);
+ } else {
+ /* We found a matching flow. */
+ struct sw_flow_actions *old_acts;
+ struct nlattr *acts_attrs;
+
+ /* Bail out if we're not allowed to modify an existing flow.
+ * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+ * because Generic Netlink treats the latter as a dump
+ * request. We also accept NLM_F_EXCL in case that bug ever
+ * gets fixed.
+ */
+ error = -EEXIST;
+ if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
+ info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+ goto error;
+
+ /* Update actions. */
+ old_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+ acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+ if (acts_attrs &&
+ (old_acts->actions_len != nla_len(acts_attrs) ||
+ memcmp(old_acts->actions, nla_data(acts_attrs),
+ old_acts->actions_len))) {
+ struct sw_flow_actions *new_acts;
+
+ new_acts = ovs_flow_actions_alloc(acts_attrs);
+ error = PTR_ERR(new_acts);
+ if (IS_ERR(new_acts))
+ goto error;
+
+ rcu_assign_pointer(flow->sf_acts, new_acts);
+ ovs_flow_deferred_free_acts(old_acts);
+ }
+
+ reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, OVS_FLOW_CMD_NEW);
+
+ /* Clear stats. */
+ if (a[OVS_FLOW_ATTR_CLEAR]) {
+ spin_lock_bh(&flow->lock);
+ clear_stats(flow);
+ spin_unlock_bh(&flow->lock);
+ }
+ }
+
+ if (!IS_ERR(reply))
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_flow_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+ else
+ netlink_set_err(init_net.genl_sock, 0,
+ ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
+ return 0;
+
+error_free_flow:
+ ovs_flow_free(flow);
+error:
+ return error;
+}
+
+static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct ovs_header *ovs_header = info->userhdr;
+ struct sw_flow_key key;
+ struct sk_buff *reply;
+ struct sw_flow *flow;
+ struct datapath *dp;
+ struct flow_table *table;
+ int err;
+ int key_len;
+
+ if (!a[OVS_FLOW_ATTR_KEY])
+ return -EINVAL;
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ if (err)
+ return err;
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ if (!dp)
+ return -ENODEV;
+
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
+ return -ENOENT;
+
+ reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, OVS_FLOW_CMD_NEW);
+ if (IS_ERR(reply))
+ return PTR_ERR(reply);
+
+ return genlmsg_reply(reply, info);
+}
+
+static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct ovs_header *ovs_header = info->userhdr;
+ struct sw_flow_key key;
+ struct sk_buff *reply;
+ struct sw_flow *flow;
+ struct datapath *dp;
+ struct flow_table *table;
+ int err;
+ int key_len;
+
+ if (!a[OVS_FLOW_ATTR_KEY])
+ return flush_flows(ovs_header->dp_ifindex);
+ err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ if (err)
+ return err;
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ if (!dp)
+ return -ENODEV;
+
+ table = genl_dereference(dp->table);
+ flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
+ return -ENOENT;
+
+ reply = ovs_flow_cmd_alloc_info(flow);
+ if (!reply)
+ return -ENOMEM;
+
+ ovs_flow_tbl_remove(table, flow);
+
+ err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+ info->snd_seq, 0, OVS_FLOW_CMD_DEL);
+ BUG_ON(err < 0);
+
+ ovs_flow_deferred_free(flow);
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ return 0;
+}
+
+static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+ struct datapath *dp;
+ struct flow_table *table;
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ if (!dp)
+ return -ENODEV;
+
+ table = genl_dereference(dp->table);
+
+ for (;;) {
+ struct sw_flow *flow;
+ u32 bucket, obj;
+
+ bucket = cb->args[0];
+ obj = cb->args[1];
+ flow = ovs_flow_tbl_next(table, &bucket, &obj);
+ if (!flow)
+ break;
+
+ if (ovs_flow_cmd_fill_info(flow, dp, skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ OVS_FLOW_CMD_NEW) < 0)
+ break;
+
+ cb->args[0] = bucket;
+ cb->args[1] = obj;
+ }
+ return skb->len;
+}
+
+static struct genl_ops dp_flow_genl_ops[] = {
+ { .cmd = OVS_FLOW_CMD_NEW,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = flow_policy,
+ .doit = ovs_flow_cmd_new_or_set
+ },
+ { .cmd = OVS_FLOW_CMD_DEL,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = flow_policy,
+ .doit = ovs_flow_cmd_del
+ },
+ { .cmd = OVS_FLOW_CMD_GET,
+ .flags = 0, /* OK for unprivileged users. */
+ .policy = flow_policy,
+ .doit = ovs_flow_cmd_get,
+ .dumpit = ovs_flow_cmd_dump
+ },
+ { .cmd = OVS_FLOW_CMD_SET,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = flow_policy,
+ .doit = ovs_flow_cmd_new_or_set,
+ },
+};
+
+static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+ [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+};
+
+static struct genl_family dp_datapath_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct ovs_header),
+ .name = OVS_DATAPATH_FAMILY,
+ .version = OVS_DATAPATH_VERSION,
+ .maxattr = OVS_DP_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+ .name = OVS_DATAPATH_MCGROUP
+};
+
+static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
+ u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+ struct ovs_header *ovs_header;
+ struct ovs_dp_stats dp_stats;
+ int err;
+
+ ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+ flags, cmd);
+ if (!ovs_header)
+ goto error;
+
+ ovs_header->dp_ifindex = get_dpifindex(dp);
+
+ rcu_read_lock();
+ err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
+ rcu_read_unlock();
+ if (err)
+ goto nla_put_failure;
+
+ get_dp_stats(dp, &dp_stats);
+ NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+
+ return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+ genlmsg_cancel(skb, ovs_header);
+error:
+ return -EMSGSIZE;
+}
+
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+ u32 seq, u8 cmd)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+ if (retval < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(retval);
+ }
+ return skb;
+}
+
+/* Called with genl_mutex and optionally with RTNL lock also. */
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+ struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+{
+ struct datapath *dp;
+
+ if (!a[OVS_DP_ATTR_NAME])
+ dp = get_dp(ovs_header->dp_ifindex);
+ else {
+ struct vport *vport;
+
+ rcu_read_lock();
+ vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+ dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
+ rcu_read_unlock();
+ }
+ return dp ? dp : ERR_PTR(-ENODEV);
+}
+
+static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct vport_parms parms;
+ struct sk_buff *reply;
+ struct datapath *dp;
+ struct vport *vport;
+ int err;
+
+ err = -EINVAL;
+ if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
+ goto err;
+
+ rtnl_lock();
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto err_unlock_rtnl;
+
+ err = -ENOMEM;
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (dp == NULL)
+ goto err_put_module;
+ INIT_LIST_HEAD(&dp->port_list);
+
+ /* Allocate table. */
+ err = -ENOMEM;
+ rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
+ if (!dp->table)
+ goto err_free_dp;
+
+ dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_table;
+ }
+
+ /* Set up our datapath device. */
+ parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
+ parms.type = OVS_VPORT_TYPE_INTERNAL;
+ parms.options = NULL;
+ parms.dp = dp;
+ parms.port_no = OVSP_LOCAL;
+ parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+
+ vport = new_vport(&parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
+ if (err == -EBUSY)
+ err = -EEXIST;
+
+ goto err_destroy_percpu;
+ }
+
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto err_destroy_local_port;
+
+ list_add_tail(&dp->list_node, &dps);
+ rtnl_unlock();
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+ return 0;
+
+err_destroy_local_port:
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+err_destroy_percpu:
+ free_percpu(dp->stats_percpu);
+err_destroy_table:
+ ovs_flow_tbl_destroy(genl_dereference(dp->table));
+err_free_dp:
+ kfree(dp);
+err_put_module:
+ module_put(THIS_MODULE);
+err_unlock_rtnl:
+ rtnl_unlock();
+err:
+ return err;
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vport *vport, *next_vport;
+ struct sk_buff *reply;
+ struct datapath *dp;
+ int err;
+
+ rtnl_lock();
+ dp = lookup_datapath(info->userhdr, info->attrs);
+ err = PTR_ERR(dp);
+ if (IS_ERR(dp))
+ goto exit_unlock;
+
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_DEL);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto exit_unlock;
+
+ list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
+ if (vport->port_no != OVSP_LOCAL)
+ ovs_dp_detach_port(vport);
+
+ list_del(&dp->list_node);
+ ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+
+ /* rtnl_unlock() will wait until all the references to devices that
+ * are pending unregistration have been dropped. We do it here to
+ * ensure that any internal devices (which contain DP pointers) are
+ * fully destroyed before freeing the datapath.
+ */
+ rtnl_unlock();
+
+ call_rcu(&dp->rcu, destroy_dp_rcu);
+ module_put(THIS_MODULE);
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+
+ return 0;
+
+exit_unlock:
+ rtnl_unlock();
+ return err;
+}
+
+static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *reply;
+ struct datapath *dp;
+ int err;
+
+ dp = lookup_datapath(info->userhdr, info->attrs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
+
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ netlink_set_err(init_net.genl_sock, 0,
+ ovs_dp_datapath_multicast_group.id, err);
+ return 0;
+ }
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_datapath_multicast_group.id, info->nlhdr,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *reply;
+ struct datapath *dp;
+
+ dp = lookup_datapath(info->userhdr, info->attrs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
+
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
+ if (IS_ERR(reply))
+ return PTR_ERR(reply);
+
+ return genlmsg_reply(reply, info);
+}
+
+static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct datapath *dp;
+ int skip = cb->args[0];
+ int i = 0;
+
+ list_for_each_entry(dp, &dps, list_node) {
+ if (i < skip)
+ continue;
+ if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ OVS_DP_CMD_NEW) < 0)
+ break;
+ i++;
+ }
+
+ cb->args[0] = i;
+
+ return skb->len;
+}
+
+static struct genl_ops dp_datapath_genl_ops[] = {
+ { .cmd = OVS_DP_CMD_NEW,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = ovs_dp_cmd_new
+ },
+ { .cmd = OVS_DP_CMD_DEL,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = ovs_dp_cmd_del
+ },
+ { .cmd = OVS_DP_CMD_GET,
+ .flags = 0, /* OK for unprivileged users. */
+ .policy = datapath_policy,
+ .doit = ovs_dp_cmd_get,
+ .dumpit = ovs_dp_cmd_dump
+ },
+ { .cmd = OVS_DP_CMD_SET,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = ovs_dp_cmd_set,
+ },
+};
+
+static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+ [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+ [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_family dp_vport_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct ovs_header),
+ .name = OVS_VPORT_FAMILY,
+ .version = OVS_VPORT_VERSION,
+ .maxattr = OVS_VPORT_ATTR_MAX
+};
+
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
+ .name = OVS_VPORT_MCGROUP
+};
+
+/* Called with RTNL lock or RCU read lock. */
+static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+ struct ovs_header *ovs_header;
+ struct ovs_vport_stats vport_stats;
+ int err;
+
+ ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+ flags, cmd);
+ if (!ovs_header)
+ return -EMSGSIZE;
+
+ ovs_header->dp_ifindex = get_dpifindex(vport->dp);
+
+ NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
+ NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
+ NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
+ NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+
+ ovs_vport_get_stats(vport, &vport_stats);
+ NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+ &vport_stats);
+
+ err = ovs_vport_get_options(vport, skb);
+ if (err == -EMSGSIZE)
+ goto error;
+
+ return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+error:
+ genlmsg_cancel(skb, ovs_header);
+ return err;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+ u32 seq, u8 cmd)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+ if (retval < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(retval);
+ }
+ return skb;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+static struct vport *lookup_vport(struct ovs_header *ovs_header,
+ struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+{
+ struct datapath *dp;
+ struct vport *vport;
+
+ if (a[OVS_VPORT_ATTR_NAME]) {
+ vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+ if (!vport)
+ return ERR_PTR(-ENODEV);
+ return vport;
+ } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
+ u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+ if (port_no >= DP_MAX_PORTS)
+ return ERR_PTR(-EFBIG);
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ if (!dp)
+ return ERR_PTR(-ENODEV);
+
+ vport = rcu_dereference_rtnl(dp->ports[port_no]);
+ if (!vport)
+ return ERR_PTR(-ENOENT);
+ return vport;
+ } else
+ return ERR_PTR(-EINVAL);
+}
+
+static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct ovs_header *ovs_header = info->userhdr;
+ struct vport_parms parms;
+ struct sk_buff *reply;
+ struct vport *vport;
+ struct datapath *dp;
+ u32 port_no;
+ int err;
+
+ err = -EINVAL;
+ if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
+ !a[OVS_VPORT_ATTR_UPCALL_PID])
+ goto exit;
+
+ rtnl_lock();
+ dp = get_dp(ovs_header->dp_ifindex);
+ err = -ENODEV;
+ if (!dp)
+ goto exit_unlock;
+
+ if (a[OVS_VPORT_ATTR_PORT_NO]) {
+ port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+ err = -EFBIG;
+ if (port_no >= DP_MAX_PORTS)
+ goto exit_unlock;
+
+ vport = rtnl_dereference(dp->ports[port_no]);
+ err = -EBUSY;
+ if (vport)
+ goto exit_unlock;
+ } else {
+ for (port_no = 1; ; port_no++) {
+ if (port_no >= DP_MAX_PORTS) {
+ err = -EFBIG;
+ goto exit_unlock;
+ }
+ vport = rtnl_dereference(dp->ports[port_no]);
+ if (!vport)
+ break;
+ }
+ }
+
+ parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
+ parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
+ parms.options = a[OVS_VPORT_ATTR_OPTIONS];
+ parms.dp = dp;
+ parms.port_no = port_no;
+ parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+ vport = new_vport(&parms);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock;
+
+ reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+ OVS_VPORT_CMD_NEW);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ ovs_dp_detach_port(vport);
+ goto exit_unlock;
+ }
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+ rtnl_unlock();
+exit:
+ return err;
+}
+
+static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct sk_buff *reply;
+ struct vport *vport;
+ int err;
+
+ rtnl_lock();
+ vport = lookup_vport(info->userhdr, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock;
+
+ err = 0;
+ if (a[OVS_VPORT_ATTR_TYPE] &&
+ nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
+ err = -EINVAL;
+
+ if (!err && a[OVS_VPORT_ATTR_OPTIONS])
+ err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+ if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+ vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+ reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+ OVS_VPORT_CMD_NEW);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ netlink_set_err(init_net.genl_sock, 0,
+ ovs_dp_vport_multicast_group.id, err);
+ return 0;
+ }
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+ rtnl_unlock();
+ return err;
+}
+
+static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct sk_buff *reply;
+ struct vport *vport;
+ int err;
+
+ rtnl_lock();
+ vport = lookup_vport(info->userhdr, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock;
+
+ if (vport->port_no == OVSP_LOCAL) {
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+
+ reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+ OVS_VPORT_CMD_DEL);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto exit_unlock;
+
+ ovs_dp_detach_port(vport);
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+ rtnl_unlock();
+ return err;
+}
+
+static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **a = info->attrs;
+ struct ovs_header *ovs_header = info->userhdr;
+ struct sk_buff *reply;
+ struct vport *vport;
+ int err;
+
+ rcu_read_lock();
+ vport = lookup_vport(ovs_header, a);
+ err = PTR_ERR(vport);
+ if (IS_ERR(vport))
+ goto exit_unlock;
+
+ reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+ OVS_VPORT_CMD_NEW);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto exit_unlock;
+
+ rcu_read_unlock();
+
+ return genlmsg_reply(reply, info);
+
+exit_unlock:
+ rcu_read_unlock();
+ return err;
+}
+
+static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+ struct datapath *dp;
+ u32 port_no;
+ int retval;
+
+ dp = get_dp(ovs_header->dp_ifindex);
+ if (!dp)
+ return -ENODEV;
+
+ rcu_read_lock();
+ for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+ struct vport *vport;
+
+ vport = rcu_dereference(dp->ports[port_no]);
+ if (!vport)
+ continue;
+
+ if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ OVS_VPORT_CMD_NEW) < 0)
+ break;
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = port_no;
+ retval = skb->len;
+
+ return retval;
+}
+
+static void rehash_flow_table(struct work_struct *work)
+{
+ struct datapath *dp;
+
+ genl_lock();
+
+ list_for_each_entry(dp, &dps, list_node) {
+ struct flow_table *old_table = genl_dereference(dp->table);
+ struct flow_table *new_table;
+
+ new_table = ovs_flow_tbl_rehash(old_table);
+ if (!IS_ERR(new_table)) {
+ rcu_assign_pointer(dp->table, new_table);
+ ovs_flow_tbl_deferred_destroy(old_table);
+ }
+ }
+
+ genl_unlock();
+
+ schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static struct genl_ops dp_vport_genl_ops[] = {
+ { .cmd = OVS_VPORT_CMD_NEW,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = vport_policy,
+ .doit = ovs_vport_cmd_new
+ },
+ { .cmd = OVS_VPORT_CMD_DEL,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = vport_policy,
+ .doit = ovs_vport_cmd_del
+ },
+ { .cmd = OVS_VPORT_CMD_GET,
+ .flags = 0, /* OK for unprivileged users. */
+ .policy = vport_policy,
+ .doit = ovs_vport_cmd_get,
+ .dumpit = ovs_vport_cmd_dump
+ },
+ { .cmd = OVS_VPORT_CMD_SET,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = vport_policy,
+ .doit = ovs_vport_cmd_set,
+ },
+};
+
+struct genl_family_and_ops {
+ struct genl_family *family;
+ struct genl_ops *ops;
+ int n_ops;
+ struct genl_multicast_group *group;
+};
+
+static const struct genl_family_and_ops dp_genl_families[] = {
+ { &dp_datapath_genl_family,
+ dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
+ &ovs_dp_datapath_multicast_group },
+ { &dp_vport_genl_family,
+ dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
+ &ovs_dp_vport_multicast_group },
+ { &dp_flow_genl_family,
+ dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
+ &ovs_dp_flow_multicast_group },
+ { &dp_packet_genl_family,
+ dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
+ NULL },
+};
+
+static void dp_unregister_genl(int n_families)
+{
+ int i;
+
+ for (i = 0; i < n_families; i++)
+ genl_unregister_family(dp_genl_families[i].family);
+}
+
+static int dp_register_genl(void)
+{
+ int n_registered;
+ int err;
+ int i;
+
+ n_registered = 0;
+ for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
+ const struct genl_family_and_ops *f = &dp_genl_families[i];
+
+ err = genl_register_family_with_ops(f->family, f->ops,
+ f->n_ops);
+ if (err)
+ goto error;
+ n_registered++;
+
+ if (f->group) {
+ err = genl_register_mc_group(f->family, f->group);
+ if (err)
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ dp_unregister_genl(n_registered);
+ return err;
+}
+
+static int __init dp_init(void)
+{
+ struct sk_buff *dummy_skb;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+
+ pr_info("Open vSwitch switching datapath\n");
+
+ err = ovs_flow_init();
+ if (err)
+ goto error;
+
+ err = ovs_vport_init();
+ if (err)
+ goto error_flow_exit;
+
+ err = register_netdevice_notifier(&ovs_dp_device_notifier);
+ if (err)
+ goto error_vport_exit;
+
+ err = dp_register_genl();
+ if (err < 0)
+ goto error_unreg_notifier;
+
+ schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+
+ return 0;
+
+error_unreg_notifier:
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_vport_exit:
+ ovs_vport_exit();
+error_flow_exit:
+ ovs_flow_exit();
+error:
+ return err;
+}
+
+static void dp_cleanup(void)
+{
+ cancel_delayed_work_sync(&rehash_flow_wq);
+ rcu_barrier();
+ dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+ unregister_netdevice_notifier(&ovs_dp_device_notifier);
+ ovs_vport_exit();
+ ovs_flow_exit();
+}
+
+module_init(dp_init);
+module_exit(dp_cleanup);
+
+MODULE_DESCRIPTION("Open vSwitch switching datapath");
+MODULE_LICENSE("GPL");
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
new file mode 100644
index 00000000000..5b9f884b705
--- /dev/null
+++ b/net/openvswitch/datapath.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef DATAPATH_H
+#define DATAPATH_H 1
+
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/version.h>
+
+#include "flow.h"
+
+struct vport;
+
+#define DP_MAX_PORTS 1024
+#define SAMPLE_ACTION_DEPTH 3
+
+/**
+ * struct dp_stats_percpu - per-cpu packet processing statistics for a given
+ * datapath.
+ * @n_hit: Number of received packets for which a matching flow was found in
+ * the flow table.
+ * @n_miss: Number of received packets that had no matching flow in the flow
+ * table. The sum of @n_hit and @n_miss is the number of packets that have
+ * been received by the datapath.
+ * @n_lost: Number of received packets that had no matching flow in the flow
+ * table that could not be sent to userspace (normally due to an overflow in
+ * one of the datapath's queues).
+ */
+struct dp_stats_percpu {
+ u64 n_hit;
+ u64 n_missed;
+ u64 n_lost;
+ struct u64_stats_sync sync;
+};
+
+/**
+ * struct datapath - datapath for flow-based packet switching
+ * @rcu: RCU callback head for deferred destruction.
+ * @list_node: Element in global 'dps' list.
+ * @n_flows: Number of flows currently in flow table.
+ * @table: Current flow table. Protected by genl_lock and RCU.
+ * @ports: Map from port number to &struct vport. %OVSP_LOCAL port
+ * always exists, other ports may be %NULL. Protected by RTNL and RCU.
+ * @port_list: List of all ports in @ports in arbitrary order. RTNL required
+ * to iterate or modify.
+ * @stats_percpu: Per-CPU datapath statistics.
+ *
+ * Context: See the comment on locking at the top of datapath.c for additional
+ * locking information.
+ */
+struct datapath {
+ struct rcu_head rcu;
+ struct list_head list_node;
+
+ /* Flow table. */
+ struct flow_table __rcu *table;
+
+ /* Switch ports. */
+ struct vport __rcu *ports[DP_MAX_PORTS];
+ struct list_head port_list;
+
+ /* Stats. */
+ struct dp_stats_percpu __percpu *stats_percpu;
+};
+
+/**
+ * struct ovs_skb_cb - OVS data in skb CB
+ * @flow: The flow associated with this packet. May be %NULL if no flow.
+ */
+struct ovs_skb_cb {
+ struct sw_flow *flow;
+};
+#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+
+/**
+ * struct dp_upcall - metadata to include with a packet to send to userspace
+ * @cmd: One of %OVS_PACKET_CMD_*.
+ * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull.
+ * @userdata: If nonnull, its u64 value is extracted and passed to userspace as
+ * %OVS_PACKET_ATTR_USERDATA.
+ * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no
+ * packet is sent and the packet is accounted in the datapath's @n_lost
+ * counter.
+ */
+struct dp_upcall_info {
+ u8 cmd;
+ const struct sw_flow_key *key;
+ const struct nlattr *userdata;
+ u32 pid;
+};
+
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+ const struct dp_upcall_info *);
+
+const char *ovs_dp_name(const struct datapath *dp);
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
+ u8 cmd);
+
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+#endif /* datapath.h */
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
new file mode 100644
index 00000000000..46736518c45
--- /dev/null
+++ b/net/openvswitch/dp_notify.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/netdevice.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+static int dp_device_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct vport *vport;
+
+ if (ovs_is_internal_dev(dev))
+ vport = ovs_internal_dev_get_vport(dev);
+ else
+ vport = ovs_netdev_get_vport(dev);
+
+ if (!vport)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ if (!ovs_is_internal_dev(dev)) {
+ struct sk_buff *notify;
+
+ notify = ovs_vport_cmd_build_info(vport, 0, 0,
+ OVS_VPORT_CMD_DEL);
+ ovs_dp_detach_port(vport);
+ if (IS_ERR(notify)) {
+ netlink_set_err(init_net.genl_sock, 0,
+ ovs_dp_vport_multicast_group.id,
+ PTR_ERR(notify));
+ break;
+ }
+
+ genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
+ GFP_KERNEL);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ovs_dp_device_notifier = {
+ .notifier_call = dp_device_event
+};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
new file mode 100644
index 00000000000..fe7f020a843
--- /dev/null
+++ b/net/openvswitch/flow.c
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+static struct kmem_cache *flow_cache;
+
+static int check_header(struct sk_buff *skb, int len)
+{
+ if (unlikely(skb->len < len))
+ return -EINVAL;
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return -ENOMEM;
+ return 0;
+}
+
+static bool arphdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_network_offset(skb) +
+ sizeof(struct arp_eth_header));
+}
+
+static int check_iphdr(struct sk_buff *skb)
+{
+ unsigned int nh_ofs = skb_network_offset(skb);
+ unsigned int ip_len;
+ int err;
+
+ err = check_header(skb, nh_ofs + sizeof(struct iphdr));
+ if (unlikely(err))
+ return err;
+
+ ip_len = ip_hdrlen(skb);
+ if (unlikely(ip_len < sizeof(struct iphdr) ||
+ skb->len < nh_ofs + ip_len))
+ return -EINVAL;
+
+ skb_set_transport_header(skb, nh_ofs + ip_len);
+ return 0;
+}
+
+static bool tcphdr_ok(struct sk_buff *skb)
+{
+ int th_ofs = skb_transport_offset(skb);
+ int tcp_len;
+
+ if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
+ return false;
+
+ tcp_len = tcp_hdrlen(skb);
+ if (unlikely(tcp_len < sizeof(struct tcphdr) ||
+ skb->len < th_ofs + tcp_len))
+ return false;
+
+ return true;
+}
+
+static bool udphdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+}
+
+static bool icmphdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct icmphdr));
+}
+
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
+{
+ struct timespec cur_ts;
+ u64 cur_ms, idle_ms;
+
+ ktime_get_ts(&cur_ts);
+ idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+ cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+ cur_ts.tv_nsec / NSEC_PER_MSEC;
+
+ return cur_ms - idle_ms;
+}
+
+#define SW_FLOW_KEY_OFFSET(field) \
+ (offsetof(struct sw_flow_key, field) + \
+ FIELD_SIZEOF(struct sw_flow_key, field))
+
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
+ int *key_lenp)
+{
+ unsigned int nh_ofs = skb_network_offset(skb);
+ unsigned int nh_len;
+ int payload_ofs;
+ struct ipv6hdr *nh;
+ uint8_t nexthdr;
+ __be16 frag_off;
+ int err;
+
+ *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
+
+ err = check_header(skb, nh_ofs + sizeof(*nh));
+ if (unlikely(err))
+ return err;
+
+ nh = ipv6_hdr(skb);
+ nexthdr = nh->nexthdr;
+ payload_ofs = (u8 *)(nh + 1) - skb->data;
+
+ key->ip.proto = NEXTHDR_NONE;
+ key->ip.tos = ipv6_get_dsfield(nh);
+ key->ip.ttl = nh->hop_limit;
+ key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+ key->ipv6.addr.src = nh->saddr;
+ key->ipv6.addr.dst = nh->daddr;
+
+ payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
+ if (unlikely(payload_ofs < 0))
+ return -EINVAL;
+
+ if (frag_off) {
+ if (frag_off & htons(~0x7))
+ key->ip.frag = OVS_FRAG_TYPE_LATER;
+ else
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+ }
+
+ nh_len = payload_ofs - nh_ofs;
+ skb_set_transport_header(skb, nh_ofs + nh_len);
+ key->ip.proto = nexthdr;
+ return nh_len;
+}
+
+static bool icmp6hdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct icmp6hdr));
+}
+
+#define TCP_FLAGS_OFFSET 13
+#define TCP_FLAG_MASK 0x3f
+
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+{
+ u8 tcp_flags = 0;
+
+ if (flow->key.eth.type == htons(ETH_P_IP) &&
+ flow->key.ip.proto == IPPROTO_TCP) {
+ u8 *tcp = (u8 *)tcp_hdr(skb);
+ tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
+ }
+
+ spin_lock(&flow->lock);
+ flow->used = jiffies;
+ flow->packet_count++;
+ flow->byte_count += skb->len;
+ flow->tcp_flags |= tcp_flags;
+ spin_unlock(&flow->lock);
+}
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
+{
+ int actions_len = nla_len(actions);
+ struct sw_flow_actions *sfa;
+
+ /* At least DP_MAX_PORTS actions are required to be able to flood a
+ * packet to every port. Factor of 2 allows for setting VLAN tags,
+ * etc. */
+ if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+ return ERR_PTR(-EINVAL);
+
+ sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
+ if (!sfa)
+ return ERR_PTR(-ENOMEM);
+
+ sfa->actions_len = actions_len;
+ memcpy(sfa->actions, nla_data(actions), actions_len);
+ return sfa;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+ struct sw_flow *flow;
+
+ flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&flow->lock);
+ flow->sf_acts = NULL;
+
+ return flow;
+}
+
+static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+{
+ hash = jhash_1word(hash, table->hash_seed);
+ return flex_array_get(table->buckets,
+ (hash & (table->n_buckets - 1)));
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+ struct flex_array *buckets;
+ int i, err;
+
+ buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ n_buckets, GFP_KERNEL);
+ if (!buckets)
+ return NULL;
+
+ err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+ if (err) {
+ flex_array_free(buckets);
+ return NULL;
+ }
+
+ for (i = 0; i < n_buckets; i++)
+ INIT_HLIST_HEAD((struct hlist_head *)
+ flex_array_get(buckets, i));
+
+ return buckets;
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+ flex_array_free(buckets);
+}
+
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+ struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ table->buckets = alloc_buckets(new_size);
+
+ if (!table->buckets) {
+ kfree(table);
+ return NULL;
+ }
+ table->n_buckets = new_size;
+ table->count = 0;
+ table->node_ver = 0;
+ table->keep_flows = false;
+ get_random_bytes(&table->hash_seed, sizeof(u32));
+
+ return table;
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+ int i;
+
+ if (!table)
+ return;
+
+ if (table->keep_flows)
+ goto skip_flows;
+
+ for (i = 0; i < table->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(table->buckets, i);
+ struct hlist_node *node, *n;
+ int ver = table->node_ver;
+
+ hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+ hlist_del_rcu(&flow->hash_node[ver]);
+ ovs_flow_free(flow);
+ }
+ }
+
+skip_flows:
+ free_buckets(table->buckets);
+ kfree(table);
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+ struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+
+ ovs_flow_tbl_destroy(table);
+}
+
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+{
+ if (!table)
+ return;
+
+ call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+}
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+{
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ int ver;
+ int i;
+
+ ver = table->node_ver;
+ while (*bucket < table->n_buckets) {
+ i = 0;
+ head = flex_array_get(table->buckets, *bucket);
+ hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+ if (i < *last) {
+ i++;
+ continue;
+ }
+ *last = i + 1;
+ return flow;
+ }
+ (*bucket)++;
+ *last = 0;
+ }
+
+ return NULL;
+}
+
+static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
+{
+ int old_ver;
+ int i;
+
+ old_ver = old->node_ver;
+ new->node_ver = !old_ver;
+
+ /* Insert in new table. */
+ for (i = 0; i < old->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ struct hlist_node *n;
+
+ head = flex_array_get(old->buckets, i);
+
+ hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+ ovs_flow_tbl_insert(new, flow);
+ }
+ old->keep_flows = true;
+}
+
+static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
+{
+ struct flow_table *new_table;
+
+ new_table = ovs_flow_tbl_alloc(n_buckets);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ flow_table_copy_flows(table, new_table);
+
+ return new_table;
+}
+
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+{
+ return __flow_tbl_rehash(table, table->n_buckets);
+}
+
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
+{
+ return __flow_tbl_rehash(table, table->n_buckets * 2);
+}
+
+void ovs_flow_free(struct sw_flow *flow)
+{
+ if (unlikely(!flow))
+ return;
+
+ kfree((struct sf_flow_acts __force *)flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+}
+
+/* RCU callback used by ovs_flow_deferred_free. */
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+ struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+ ovs_flow_free(flow);
+}
+
+/* Schedules 'flow' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free(struct sw_flow *flow)
+{
+ call_rcu(&flow->rcu, rcu_free_flow_callback);
+}
+
+/* RCU callback used by ovs_flow_deferred_free_acts. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+ struct sw_flow_actions *sf_acts = container_of(rcu,
+ struct sw_flow_actions, rcu);
+ kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+{
+ call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ struct qtag_prefix {
+ __be16 eth_type; /* ETH_P_8021Q */
+ __be16 tci;
+ };
+ struct qtag_prefix *qp;
+
+ if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+ return 0;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
+ sizeof(__be16))))
+ return -ENOMEM;
+
+ qp = (struct qtag_prefix *) skb->data;
+ key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
+ __skb_pull(skb, sizeof(struct qtag_prefix));
+
+ return 0;
+}
+
+static __be16 parse_ethertype(struct sk_buff *skb)
+{
+ struct llc_snap_hdr {
+ u8 dsap; /* Always 0xAA */
+ u8 ssap; /* Always 0xAA */
+ u8 ctrl;
+ u8 oui[3];
+ __be16 ethertype;
+ };
+ struct llc_snap_hdr *llc;
+ __be16 proto;
+
+ proto = *(__be16 *) skb->data;
+ __skb_pull(skb, sizeof(__be16));
+
+ if (ntohs(proto) >= 1536)
+ return proto;
+
+ if (skb->len < sizeof(struct llc_snap_hdr))
+ return htons(ETH_P_802_2);
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
+ return htons(0);
+
+ llc = (struct llc_snap_hdr *) skb->data;
+ if (llc->dsap != LLC_SAP_SNAP ||
+ llc->ssap != LLC_SAP_SNAP ||
+ (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
+ return htons(ETH_P_802_2);
+
+ __skb_pull(skb, sizeof(struct llc_snap_hdr));
+ return llc->ethertype;
+}
+
+static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+ int *key_lenp, int nh_len)
+{
+ struct icmp6hdr *icmp = icmp6_hdr(skb);
+ int error = 0;
+ int key_len;
+
+ /* The ICMPv6 type and code fields use the 16-bit transport port
+ * fields, so we need to store them in 16-bit network byte order.
+ */
+ key->ipv6.tp.src = htons(icmp->icmp6_type);
+ key->ipv6.tp.dst = htons(icmp->icmp6_code);
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+
+ if (icmp->icmp6_code == 0 &&
+ (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+ int icmp_len = skb->len - skb_transport_offset(skb);
+ struct nd_msg *nd;
+ int offset;
+
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+ /* In order to process neighbor discovery options, we need the
+ * entire packet.
+ */
+ if (unlikely(icmp_len < sizeof(*nd)))
+ goto out;
+ if (unlikely(skb_linearize(skb))) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ nd = (struct nd_msg *)skb_transport_header(skb);
+ key->ipv6.nd.target = nd->target;
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+ icmp_len -= sizeof(*nd);
+ offset = 0;
+ while (icmp_len >= 8) {
+ struct nd_opt_hdr *nd_opt =
+ (struct nd_opt_hdr *)(nd->opt + offset);
+ int opt_len = nd_opt->nd_opt_len * 8;
+
+ if (unlikely(!opt_len || opt_len > icmp_len))
+ goto invalid;
+
+ /* Store the link layer address if the appropriate
+ * option is provided. It is considered an error if
+ * the same link layer option is specified twice.
+ */
+ if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
+ && opt_len == 8) {
+ if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
+ goto invalid;
+ memcpy(key->ipv6.nd.sll,
+ &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+ } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
+ && opt_len == 8) {
+ if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
+ goto invalid;
+ memcpy(key->ipv6.nd.tll,
+ &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+ }
+
+ icmp_len -= opt_len;
+ offset += opt_len;
+ }
+ }
+
+ goto out;
+
+invalid:
+ memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
+ memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
+ memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
+
+out:
+ *key_lenp = key_len;
+ return error;
+}
+
+/**
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @in_port: port number on which @skb was received.
+ * @key: output flow key
+ * @key_lenp: length of output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header pointers as follows:
+ *
+ * - skb->mac_header: the Ethernet header.
+ *
+ * - skb->network_header: just past the Ethernet header, or just past the
+ * VLAN header, to the first byte of the Ethernet payload.
+ *
+ * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
+ * on output, then just past the IP header, if one is present and
+ * of a correct length, otherwise the same as skb->network_header.
+ * For other key->dl_type values it is left untouched.
+ */
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+ int *key_lenp)
+{
+ int error = 0;
+ int key_len = SW_FLOW_KEY_OFFSET(eth);
+ struct ethhdr *eth;
+
+ memset(key, 0, sizeof(*key));
+
+ key->phy.priority = skb->priority;
+ key->phy.in_port = in_port;
+
+ skb_reset_mac_header(skb);
+
+ /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
+ * header in the linear data area.
+ */
+ eth = eth_hdr(skb);
+ memcpy(key->eth.src, eth->h_source, ETH_ALEN);
+ memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+
+ __skb_pull(skb, 2 * ETH_ALEN);
+
+ if (vlan_tx_tag_present(skb))
+ key->eth.tci = htons(skb->vlan_tci);
+ else if (eth->h_proto == htons(ETH_P_8021Q))
+ if (unlikely(parse_vlan(skb, key)))
+ return -ENOMEM;
+
+ key->eth.type = parse_ethertype(skb);
+ if (unlikely(key->eth.type == htons(0)))
+ return -ENOMEM;
+
+ skb_reset_network_header(skb);
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+
+ /* Network layer. */
+ if (key->eth.type == htons(ETH_P_IP)) {
+ struct iphdr *nh;
+ __be16 offset;
+
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+
+ error = check_iphdr(skb);
+ if (unlikely(error)) {
+ if (error == -EINVAL) {
+ skb->transport_header = skb->network_header;
+ error = 0;
+ }
+ goto out;
+ }
+
+ nh = ip_hdr(skb);
+ key->ipv4.addr.src = nh->saddr;
+ key->ipv4.addr.dst = nh->daddr;
+
+ key->ip.proto = nh->protocol;
+ key->ip.tos = nh->tos;
+ key->ip.ttl = nh->ttl;
+
+ offset = nh->frag_off & htons(IP_OFFSET);
+ if (offset) {
+ key->ip.frag = OVS_FRAG_TYPE_LATER;
+ goto out;
+ }
+ if (nh->frag_off & htons(IP_MF) ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+ /* Transport layer. */
+ if (key->ip.proto == IPPROTO_TCP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ if (tcphdr_ok(skb)) {
+ struct tcphdr *tcp = tcp_hdr(skb);
+ key->ipv4.tp.src = tcp->source;
+ key->ipv4.tp.dst = tcp->dest;
+ }
+ } else if (key->ip.proto == IPPROTO_UDP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ if (udphdr_ok(skb)) {
+ struct udphdr *udp = udp_hdr(skb);
+ key->ipv4.tp.src = udp->source;
+ key->ipv4.tp.dst = udp->dest;
+ }
+ } else if (key->ip.proto == IPPROTO_ICMP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ if (icmphdr_ok(skb)) {
+ struct icmphdr *icmp = icmp_hdr(skb);
+ /* The ICMP type and code fields use the 16-bit
+ * transport port fields, so we need to store
+ * them in 16-bit network byte order. */
+ key->ipv4.tp.src = htons(icmp->type);
+ key->ipv4.tp.dst = htons(icmp->code);
+ }
+ }
+
+ } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
+ struct arp_eth_header *arp;
+
+ arp = (struct arp_eth_header *)skb_network_header(skb);
+
+ if (arp->ar_hrd == htons(ARPHRD_ETHER)
+ && arp->ar_pro == htons(ETH_P_IP)
+ && arp->ar_hln == ETH_ALEN
+ && arp->ar_pln == 4) {
+
+ /* We only match on the lower 8 bits of the opcode. */
+ if (ntohs(arp->ar_op) <= 0xff)
+ key->ip.proto = ntohs(arp->ar_op);
+
+ if (key->ip.proto == ARPOP_REQUEST
+ || key->ip.proto == ARPOP_REPLY) {
+ memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+ memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+ memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+ memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+ }
+ }
+ } else if (key->eth.type == htons(ETH_P_IPV6)) {
+ int nh_len; /* IPv6 Header + Extensions */
+
+ nh_len = parse_ipv6hdr(skb, key, &key_len);
+ if (unlikely(nh_len < 0)) {
+ if (nh_len == -EINVAL)
+ skb->transport_header = skb->network_header;
+ else
+ error = nh_len;
+ goto out;
+ }
+
+ if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+ goto out;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+ /* Transport layer. */
+ if (key->ip.proto == NEXTHDR_TCP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ if (tcphdr_ok(skb)) {
+ struct tcphdr *tcp = tcp_hdr(skb);
+ key->ipv6.tp.src = tcp->source;
+ key->ipv6.tp.dst = tcp->dest;
+ }
+ } else if (key->ip.proto == NEXTHDR_UDP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ if (udphdr_ok(skb)) {
+ struct udphdr *udp = udp_hdr(skb);
+ key->ipv6.tp.src = udp->source;
+ key->ipv6.tp.dst = udp->dest;
+ }
+ } else if (key->ip.proto == NEXTHDR_ICMP) {
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ if (icmp6hdr_ok(skb)) {
+ error = parse_icmpv6(skb, key, &key_len, nh_len);
+ if (error < 0)
+ goto out;
+ }
+ }
+ }
+
+out:
+ *key_lenp = key_len;
+ return error;
+}
+
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+{
+ return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int key_len)
+{
+ struct sw_flow *flow;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ u32 hash;
+
+ hash = ovs_flow_hash(key, key_len);
+
+ head = find_bucket(table, hash);
+ hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+
+ if (flow->hash == hash &&
+ !memcmp(&flow->key, key, key_len)) {
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct hlist_head *head;
+
+ head = find_bucket(table, flow->hash);
+ hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+ table->count++;
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+ hlist_del_rcu(&flow->hash_node[table->node_ver]);
+ table->count--;
+ BUG_ON(table->count < 0);
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
+const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+ [OVS_KEY_ATTR_ENCAP] = -1,
+ [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+ [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+ [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+ [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+ [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+ [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+ [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+ [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+ [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+ [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+ [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+ [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+ [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+};
+
+static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+ const struct nlattr *a[], u32 *attrs)
+{
+ const struct ovs_key_icmp *icmp_key;
+ const struct ovs_key_tcp *tcp_key;
+ const struct ovs_key_udp *udp_key;
+
+ switch (swkey->ip.proto) {
+ case IPPROTO_TCP:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+ swkey->ipv4.tp.src = tcp_key->tcp_src;
+ swkey->ipv4.tp.dst = tcp_key->tcp_dst;
+ break;
+
+ case IPPROTO_UDP:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+ swkey->ipv4.tp.src = udp_key->udp_src;
+ swkey->ipv4.tp.dst = udp_key->udp_dst;
+ break;
+
+ case IPPROTO_ICMP:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+ icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+ swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
+ swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
+ break;
+ }
+
+ return 0;
+}
+
+static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+ const struct nlattr *a[], u32 *attrs)
+{
+ const struct ovs_key_icmpv6 *icmpv6_key;
+ const struct ovs_key_tcp *tcp_key;
+ const struct ovs_key_udp *udp_key;
+
+ switch (swkey->ip.proto) {
+ case IPPROTO_TCP:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+ swkey->ipv6.tp.src = tcp_key->tcp_src;
+ swkey->ipv6.tp.dst = tcp_key->tcp_dst;
+ break;
+
+ case IPPROTO_UDP:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+ swkey->ipv6.tp.src = udp_key->udp_src;
+ swkey->ipv6.tp.dst = udp_key->udp_dst;
+ break;
+
+ case IPPROTO_ICMPV6:
+ if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+ icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+ swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
+ swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+
+ if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+ swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+ const struct ovs_key_nd *nd_key;
+
+ if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_ND);
+
+ *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+ nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+ memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
+ sizeof(swkey->ipv6.nd.target));
+ memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
+ memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u32 *attrsp)
+{
+ const struct nlattr *nla;
+ u32 attrs;
+ int rem;
+
+ attrs = 0;
+ nla_for_each_nested(nla, attr, rem) {
+ u16 type = nla_type(nla);
+ int expected_len;
+
+ if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+ return -EINVAL;
+
+ expected_len = ovs_key_lens[type];
+ if (nla_len(nla) != expected_len && expected_len != -1)
+ return -EINVAL;
+
+ attrs |= 1 << type;
+ a[type] = nla;
+ }
+ if (rem)
+ return -EINVAL;
+
+ *attrsp = attrs;
+ return 0;
+}
+
+/**
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * @swkey: receives the extracted flow key.
+ * @key_lenp: number of bytes used in @swkey.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ */
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+ const struct nlattr *attr)
+{
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ const struct ovs_key_ethernet *eth_key;
+ int key_len;
+ u32 attrs;
+ int err;
+
+ memset(swkey, 0, sizeof(struct sw_flow_key));
+ key_len = SW_FLOW_KEY_OFFSET(eth);
+
+ err = parse_flow_nlattrs(attr, a, &attrs);
+ if (err)
+ return err;
+
+ /* Metadata attributes. */
+ if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+ swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
+ attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ }
+ if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+ u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+ if (in_port >= DP_MAX_PORTS)
+ return -EINVAL;
+ swkey->phy.in_port = in_port;
+ attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+ } else {
+ swkey->phy.in_port = USHRT_MAX;
+ }
+
+ /* Data attributes. */
+ if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
+ return -EINVAL;
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+
+ eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+ memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
+ memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+
+ if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
+ nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
+ const struct nlattr *encap;
+ __be16 tci;
+
+ if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
+ (1 << OVS_KEY_ATTR_ETHERTYPE) |
+ (1 << OVS_KEY_ATTR_ENCAP)))
+ return -EINVAL;
+
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ if (tci & htons(VLAN_TAG_PRESENT)) {
+ swkey->eth.tci = tci;
+
+ err = parse_flow_nlattrs(encap, a, &attrs);
+ if (err)
+ return err;
+ } else if (!tci) {
+ /* Corner case for truncated 802.1Q header. */
+ if (nla_len(encap))
+ return -EINVAL;
+
+ swkey->eth.type = htons(ETH_P_8021Q);
+ *key_lenp = key_len;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+ swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+ if (ntohs(swkey->eth.type) < 1536)
+ return -EINVAL;
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ } else {
+ swkey->eth.type = htons(ETH_P_802_2);
+ }
+
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ const struct ovs_key_ipv4 *ipv4_key;
+
+ if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
+ return -EINVAL;
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+ ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+ if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+ return -EINVAL;
+ swkey->ip.proto = ipv4_key->ipv4_proto;
+ swkey->ip.tos = ipv4_key->ipv4_tos;
+ swkey->ip.ttl = ipv4_key->ipv4_ttl;
+ swkey->ip.frag = ipv4_key->ipv4_frag;
+ swkey->ipv4.addr.src = ipv4_key->ipv4_src;
+ swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
+
+ if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+ err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+ if (err)
+ return err;
+ }
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ const struct ovs_key_ipv6 *ipv6_key;
+
+ if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
+ return -EINVAL;
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+
+ key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
+ ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+ if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+ return -EINVAL;
+ swkey->ipv6.label = ipv6_key->ipv6_label;
+ swkey->ip.proto = ipv6_key->ipv6_proto;
+ swkey->ip.tos = ipv6_key->ipv6_tclass;
+ swkey->ip.ttl = ipv6_key->ipv6_hlimit;
+ swkey->ip.frag = ipv6_key->ipv6_frag;
+ memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
+ sizeof(swkey->ipv6.addr.src));
+ memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
+ sizeof(swkey->ipv6.addr.dst));
+
+ if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+ err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+ if (err)
+ return err;
+ }
+ } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+ const struct ovs_key_arp *arp_key;
+
+ if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+ return -EINVAL;
+ attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+
+ key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+ arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+ swkey->ipv4.addr.src = arp_key->arp_sip;
+ swkey->ipv4.addr.dst = arp_key->arp_tip;
+ if (arp_key->arp_op & htons(0xff00))
+ return -EINVAL;
+ swkey->ip.proto = ntohs(arp_key->arp_op);
+ memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
+ memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+ }
+
+ if (attrs)
+ return -EINVAL;
+ *key_lenp = key_len;
+
+ return 0;
+}
+
+/**
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * @in_port: receives the extracted input port.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+ const struct nlattr *attr)
+{
+ const struct nlattr *nla;
+ int rem;
+
+ *in_port = USHRT_MAX;
+ *priority = 0;
+
+ nla_for_each_nested(nla, attr, rem) {
+ int type = nla_type(nla);
+
+ if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
+ if (nla_len(nla) != ovs_key_lens[type])
+ return -EINVAL;
+
+ switch (type) {
+ case OVS_KEY_ATTR_PRIORITY:
+ *priority = nla_get_u32(nla);
+ break;
+
+ case OVS_KEY_ATTR_IN_PORT:
+ if (nla_get_u32(nla) >= DP_MAX_PORTS)
+ return -EINVAL;
+ *in_port = nla_get_u32(nla);
+ break;
+ }
+ }
+ }
+ if (rem)
+ return -EINVAL;
+ return 0;
+}
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+{
+ struct ovs_key_ethernet *eth_key;
+ struct nlattr *nla, *encap;
+
+ if (swkey->phy.priority)
+ NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+
+ if (swkey->phy.in_port != USHRT_MAX)
+ NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+ if (!nla)
+ goto nla_put_failure;
+ eth_key = nla_data(nla);
+ memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
+ memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+
+ if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+ NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
+ NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+ encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+ if (!swkey->eth.tci)
+ goto unencap;
+ } else {
+ encap = NULL;
+ }
+
+ if (swkey->eth.type == htons(ETH_P_802_2))
+ goto unencap;
+
+ NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ struct ovs_key_ipv4 *ipv4_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+ if (!nla)
+ goto nla_put_failure;
+ ipv4_key = nla_data(nla);
+ ipv4_key->ipv4_src = swkey->ipv4.addr.src;
+ ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
+ ipv4_key->ipv4_proto = swkey->ip.proto;
+ ipv4_key->ipv4_tos = swkey->ip.tos;
+ ipv4_key->ipv4_ttl = swkey->ip.ttl;
+ ipv4_key->ipv4_frag = swkey->ip.frag;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ struct ovs_key_ipv6 *ipv6_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+ if (!nla)
+ goto nla_put_failure;
+ ipv6_key = nla_data(nla);
+ memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+ sizeof(ipv6_key->ipv6_src));
+ memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+ sizeof(ipv6_key->ipv6_dst));
+ ipv6_key->ipv6_label = swkey->ipv6.label;
+ ipv6_key->ipv6_proto = swkey->ip.proto;
+ ipv6_key->ipv6_tclass = swkey->ip.tos;
+ ipv6_key->ipv6_hlimit = swkey->ip.ttl;
+ ipv6_key->ipv6_frag = swkey->ip.frag;
+ } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+ struct ovs_key_arp *arp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+ if (!nla)
+ goto nla_put_failure;
+ arp_key = nla_data(nla);
+ memset(arp_key, 0, sizeof(struct ovs_key_arp));
+ arp_key->arp_sip = swkey->ipv4.addr.src;
+ arp_key->arp_tip = swkey->ipv4.addr.dst;
+ arp_key->arp_op = htons(swkey->ip.proto);
+ memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
+ memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+ }
+
+ if ((swkey->eth.type == htons(ETH_P_IP) ||
+ swkey->eth.type == htons(ETH_P_IPV6)) &&
+ swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+ if (swkey->ip.proto == IPPROTO_TCP) {
+ struct ovs_key_tcp *tcp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+ if (!nla)
+ goto nla_put_failure;
+ tcp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ tcp_key->tcp_src = swkey->ipv4.tp.src;
+ tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ tcp_key->tcp_src = swkey->ipv6.tp.src;
+ tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+ }
+ } else if (swkey->ip.proto == IPPROTO_UDP) {
+ struct ovs_key_udp *udp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+ if (!nla)
+ goto nla_put_failure;
+ udp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ udp_key->udp_src = swkey->ipv4.tp.src;
+ udp_key->udp_dst = swkey->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ udp_key->udp_src = swkey->ipv6.tp.src;
+ udp_key->udp_dst = swkey->ipv6.tp.dst;
+ }
+ } else if (swkey->eth.type == htons(ETH_P_IP) &&
+ swkey->ip.proto == IPPROTO_ICMP) {
+ struct ovs_key_icmp *icmp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+ if (!nla)
+ goto nla_put_failure;
+ icmp_key = nla_data(nla);
+ icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
+ icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+ } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+ swkey->ip.proto == IPPROTO_ICMPV6) {
+ struct ovs_key_icmpv6 *icmpv6_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+ sizeof(*icmpv6_key));
+ if (!nla)
+ goto nla_put_failure;
+ icmpv6_key = nla_data(nla);
+ icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
+ icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+
+ if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+ struct ovs_key_nd *nd_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+ if (!nla)
+ goto nla_put_failure;
+ nd_key = nla_data(nla);
+ memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+ sizeof(nd_key->nd_target));
+ memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
+ memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+ }
+ }
+ }
+
+unencap:
+ if (encap)
+ nla_nest_end(skb, encap);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+ flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+ 0, NULL);
+ if (flow_cache == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+ kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
new file mode 100644
index 00000000000..2747dc2c4ac
--- /dev/null
+++ b/net/openvswitch/flow.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_H
+#define FLOW_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+#include <net/inet_ecn.h>
+
+struct sk_buff;
+
+struct sw_flow_actions {
+ struct rcu_head rcu;
+ u32 actions_len;
+ struct nlattr actions[];
+};
+
+struct sw_flow_key {
+ struct {
+ u32 priority; /* Packet QoS priority. */
+ u16 in_port; /* Input switch port (or USHRT_MAX). */
+ } phy;
+ struct {
+ u8 src[ETH_ALEN]; /* Ethernet source address. */
+ u8 dst[ETH_ALEN]; /* Ethernet destination address. */
+ __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+ __be16 type; /* Ethernet frame type. */
+ } eth;
+ struct {
+ u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
+ u8 tos; /* IP ToS. */
+ u8 ttl; /* IP TTL/hop limit. */
+ u8 frag; /* One of OVS_FRAG_TYPE_*. */
+ } ip;
+ union {
+ struct {
+ struct {
+ __be32 src; /* IP source address. */
+ __be32 dst; /* IP destination address. */
+ } addr;
+ union {
+ struct {
+ __be16 src; /* TCP/UDP source port. */
+ __be16 dst; /* TCP/UDP destination port. */
+ } tp;
+ struct {
+ u8 sha[ETH_ALEN]; /* ARP source hardware address. */
+ u8 tha[ETH_ALEN]; /* ARP target hardware address. */
+ } arp;
+ };
+ } ipv4;
+ struct {
+ struct {
+ struct in6_addr src; /* IPv6 source address. */
+ struct in6_addr dst; /* IPv6 destination address. */
+ } addr;
+ __be32 label; /* IPv6 flow label. */
+ struct {
+ __be16 src; /* TCP/UDP source port. */
+ __be16 dst; /* TCP/UDP destination port. */
+ } tp;
+ struct {
+ struct in6_addr target; /* ND target address. */
+ u8 sll[ETH_ALEN]; /* ND source link layer address. */
+ u8 tll[ETH_ALEN]; /* ND target link layer address. */
+ } nd;
+ } ipv6;
+ };
+};
+
+struct sw_flow {
+ struct rcu_head rcu;
+ struct hlist_node hash_node[2];
+ u32 hash;
+
+ struct sw_flow_key key;
+ struct sw_flow_actions __rcu *sf_acts;
+
+ spinlock_t lock; /* Lock for values below. */
+ unsigned long used; /* Last used time (in jiffies). */
+ u64 packet_count; /* Number of packets matched. */
+ u64 byte_count; /* Number of bytes matched. */
+ u8 tcp_flags; /* Union of seen TCP flags. */
+};
+
+struct arp_eth_header {
+ __be16 ar_hrd; /* format of hardware address */
+ __be16 ar_pro; /* format of protocol address */
+ unsigned char ar_hln; /* length of hardware address */
+ unsigned char ar_pln; /* length of protocol address */
+ __be16 ar_op; /* ARP opcode (command) */
+
+ /* Ethernet+IPv4 specific members. */
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+} __packed;
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
+void ovs_flow_free(struct sw_flow *flow);
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
+
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+ int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
+
+/* Upper bound on the length of a nlattr-formatted flow key. The longest
+ * nlattr-formatted flow key would be:
+ *
+ * struct pad nl hdr total
+ * ------ --- ------ -----
+ * OVS_KEY_ATTR_PRIORITY 4 -- 4 8
+ * OVS_KEY_ATTR_IN_PORT 4 -- 4 8
+ * OVS_KEY_ATTR_ETHERNET 12 -- 4 16
+ * OVS_KEY_ATTR_8021Q 4 -- 4 8
+ * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8
+ * OVS_KEY_ATTR_IPV6 40 -- 4 44
+ * OVS_KEY_ATTR_ICMPV6 2 2 4 8
+ * OVS_KEY_ATTR_ND 28 -- 4 32
+ * -------------------------------------------------
+ * total 132
+ */
+#define FLOW_BUFSIZE 132
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+ const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+ const struct nlattr *);
+
+#define TBL_MIN_BUCKETS 1024
+
+struct flow_table {
+ struct flex_array *buckets;
+ unsigned int count, n_buckets;
+ struct rcu_head rcu;
+ int node_ver;
+ u32 hash_seed;
+ bool keep_flows;
+};
+
+static inline int ovs_flow_tbl_count(struct flow_table *table)
+{
+ return table->count;
+}
+
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
+{
+ return (table->count > table->n_buckets);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
+
+#endif /* flow.h */
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
new file mode 100644
index 00000000000..8fc28b86f2b
--- /dev/null
+++ b/net/openvswitch/vport-internal_dev.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+struct internal_dev {
+ struct vport *vport;
+};
+
+static struct internal_dev *internal_dev_priv(struct net_device *netdev)
+{
+ return netdev_priv(netdev);
+}
+
+/* This function is only called by the kernel network layer.*/
+static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct vport *vport = ovs_internal_dev_get_vport(netdev);
+ struct ovs_vport_stats vport_stats;
+
+ ovs_vport_get_stats(vport, &vport_stats);
+
+ /* The tx and rx stats need to be swapped because the
+ * switch and host OS have opposite perspectives. */
+ stats->rx_packets = vport_stats.tx_packets;
+ stats->tx_packets = vport_stats.rx_packets;
+ stats->rx_bytes = vport_stats.tx_bytes;
+ stats->tx_bytes = vport_stats.rx_bytes;
+ stats->rx_errors = vport_stats.tx_errors;
+ stats->tx_errors = vport_stats.rx_errors;
+ stats->rx_dropped = vport_stats.tx_dropped;
+ stats->tx_dropped = vport_stats.rx_dropped;
+
+ return stats;
+}
+
+static int internal_dev_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+/* Called with rcu_read_lock_bh. */
+static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ rcu_read_lock();
+ ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int internal_dev_open(struct net_device *netdev)
+{
+ netif_start_queue(netdev);
+ return 0;
+}
+
+static int internal_dev_stop(struct net_device *netdev)
+{
+ netif_stop_queue(netdev);
+ return 0;
+}
+
+static void internal_dev_getinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "openvswitch");
+}
+
+static const struct ethtool_ops internal_dev_ethtool_ops = {
+ .get_drvinfo = internal_dev_getinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+static void internal_dev_destructor(struct net_device *dev)
+{
+ struct vport *vport = ovs_internal_dev_get_vport(dev);
+
+ ovs_vport_free(vport);
+ free_netdev(dev);
+}
+
+static const struct net_device_ops internal_dev_netdev_ops = {
+ .ndo_open = internal_dev_open,
+ .ndo_stop = internal_dev_stop,
+ .ndo_start_xmit = internal_dev_xmit,
+ .ndo_set_mac_address = internal_dev_mac_addr,
+ .ndo_change_mtu = internal_dev_change_mtu,
+ .ndo_get_stats64 = internal_dev_get_stats,
+};
+
+static void do_setup(struct net_device *netdev)
+{
+ ether_setup(netdev);
+
+ netdev->netdev_ops = &internal_dev_netdev_ops;
+
+ netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ netdev->destructor = internal_dev_destructor;
+ SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+ netdev->tx_queue_len = 0;
+
+ netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_TX;
+ netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+ random_ether_addr(netdev->dev_addr);
+}
+
+static struct vport *internal_dev_create(const struct vport_parms *parms)
+{
+ struct vport *vport;
+ struct netdev_vport *netdev_vport;
+ struct internal_dev *internal_dev;
+ int err;
+
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_internal_vport_ops, parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
+ goto error;
+ }
+
+ netdev_vport = netdev_vport_priv(vport);
+
+ netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
+ parms->name, do_setup);
+ if (!netdev_vport->dev) {
+ err = -ENOMEM;
+ goto error_free_vport;
+ }
+
+ internal_dev = internal_dev_priv(netdev_vport->dev);
+ internal_dev->vport = vport;
+
+ err = register_netdevice(netdev_vport->dev);
+ if (err)
+ goto error_free_netdev;
+
+ dev_set_promiscuity(netdev_vport->dev, 1);
+ netif_start_queue(netdev_vport->dev);
+
+ return vport;
+
+error_free_netdev:
+ free_netdev(netdev_vport->dev);
+error_free_vport:
+ ovs_vport_free(vport);
+error:
+ return ERR_PTR(err);
+}
+
+static void internal_dev_destroy(struct vport *vport)
+{
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+ netif_stop_queue(netdev_vport->dev);
+ dev_set_promiscuity(netdev_vport->dev, -1);
+
+ /* unregister_netdevice() waits for an RCU grace period. */
+ unregister_netdevice(netdev_vport->dev);
+}
+
+static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+{
+ struct net_device *netdev = netdev_vport_priv(vport)->dev;
+ int len;
+
+ len = skb->len;
+ skb->dev = netdev;
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_rx(skb);
+
+ return len;
+}
+
+const struct vport_ops ovs_internal_vport_ops = {
+ .type = OVS_VPORT_TYPE_INTERNAL,
+ .create = internal_dev_create,
+ .destroy = internal_dev_destroy,
+ .get_name = ovs_netdev_get_name,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .send = internal_dev_recv,
+};
+
+int ovs_is_internal_dev(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &internal_dev_netdev_ops;
+}
+
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
+{
+ if (!ovs_is_internal_dev(netdev))
+ return NULL;
+
+ return internal_dev_priv(netdev)->vport;
+}
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
new file mode 100644
index 00000000000..3454447c5f1
--- /dev/null
+++ b/net/openvswitch/vport-internal_dev.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_INTERNAL_DEV_H
+#define VPORT_INTERNAL_DEV_H 1
+
+#include "datapath.h"
+#include "vport.h"
+
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
+
+#endif /* vport-internal_dev.h */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
new file mode 100644
index 00000000000..c1068aed03d
--- /dev/null
+++ b/net/openvswitch/vport-netdev.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if_arp.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/llc.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+#include <net/llc.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+/* Must be called with rcu_read_lock. */
+static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+{
+ if (unlikely(!vport)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Make our own copy of the packet. Otherwise we will mangle the
+ * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
+ * (No one comes after us, since we tell handle_bridge() that we took
+ * the packet.) */
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return;
+
+ skb_push(skb, ETH_HLEN);
+ ovs_vport_receive(vport, skb);
+}
+
+/* Called with rcu_read_lock and bottom-halves disabled. */
+static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct vport *vport;
+
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
+ vport = ovs_netdev_get_vport(skb->dev);
+
+ netdev_port_receive(vport, skb);
+
+ return RX_HANDLER_CONSUMED;
+}
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+ struct vport *vport;
+ struct netdev_vport *netdev_vport;
+ int err;
+
+ vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+ &ovs_netdev_vport_ops, parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
+ goto error;
+ }
+
+ netdev_vport = netdev_vport_priv(vport);
+
+ netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+ if (!netdev_vport->dev) {
+ err = -ENODEV;
+ goto error_free_vport;
+ }
+
+ if (netdev_vport->dev->flags & IFF_LOOPBACK ||
+ netdev_vport->dev->type != ARPHRD_ETHER ||
+ ovs_is_internal_dev(netdev_vport->dev)) {
+ err = -EINVAL;
+ goto error_put;
+ }
+
+ err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+ vport);
+ if (err)
+ goto error_put;
+
+ dev_set_promiscuity(netdev_vport->dev, 1);
+ netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+
+ return vport;
+
+error_put:
+ dev_put(netdev_vport->dev);
+error_free_vport:
+ ovs_vport_free(vport);
+error:
+ return ERR_PTR(err);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+ netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+ netdev_rx_handler_unregister(netdev_vport->dev);
+ dev_set_promiscuity(netdev_vport->dev, -1);
+
+ synchronize_rcu();
+
+ dev_put(netdev_vport->dev);
+ ovs_vport_free(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *vport)
+{
+ const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ return netdev_vport->dev->name;
+}
+
+int ovs_netdev_get_ifindex(const struct vport *vport)
+{
+ const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ return netdev_vport->dev->ifindex;
+}
+
+static unsigned packet_length(const struct sk_buff *skb)
+{
+ unsigned length = skb->len - ETH_HLEN;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ length -= VLAN_HLEN;
+
+ return length;
+}
+
+static int netdev_send(struct vport *vport, struct sk_buff *skb)
+{
+ struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ int mtu = netdev_vport->dev->mtu;
+ int len;
+
+ if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+ if (net_ratelimit())
+ pr_warn("%s: dropped over-mtu packet: %d > %d\n",
+ ovs_dp_name(vport->dp), packet_length(skb), mtu);
+ goto error;
+ }
+
+ if (unlikely(skb_warn_if_lro(skb)))
+ goto error;
+
+ skb->dev = netdev_vport->dev;
+ len = skb->len;
+ dev_queue_xmit(skb);
+
+ return len;
+
+error:
+ kfree_skb(skb);
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+ return 0;
+}
+
+/* Returns null if this device is not attached to a datapath. */
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
+{
+ if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
+ return (struct vport *)
+ rcu_dereference_rtnl(dev->rx_handler_data);
+ else
+ return NULL;
+}
+
+const struct vport_ops ovs_netdev_vport_ops = {
+ .type = OVS_VPORT_TYPE_NETDEV,
+ .create = netdev_create,
+ .destroy = netdev_destroy,
+ .get_name = ovs_netdev_get_name,
+ .get_ifindex = ovs_netdev_get_ifindex,
+ .send = netdev_send,
+};
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
new file mode 100644
index 00000000000..fd9b008a0e6
--- /dev/null
+++ b/net/openvswitch/vport-netdev.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_NETDEV_H
+#define VPORT_NETDEV_H 1
+
+#include <linux/netdevice.h>
+
+#include "vport.h"
+
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
+
+struct netdev_vport {
+ struct net_device *dev;
+};
+
+static inline struct netdev_vport *
+netdev_vport_priv(const struct vport *vport)
+{
+ return vport_priv(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+
+#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
new file mode 100644
index 00000000000..7f0ef3794c5
--- /dev/null
+++ b/net/openvswitch/vport.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/dcache.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include "vport.h"
+#include "vport-internal_dev.h"
+
+/* List of statically compiled vport implementations. Don't forget to also
+ * add yours to the list at the bottom of vport.h. */
+static const struct vport_ops *vport_ops_list[] = {
+ &ovs_netdev_vport_ops,
+ &ovs_internal_vport_ops,
+};
+
+/* Protected by RCU read lock for reading, RTNL lock for writing. */
+static struct hlist_head *dev_table;
+#define VPORT_HASH_BUCKETS 1024
+
+/**
+ * ovs_vport_init - initialize vport subsystem
+ *
+ * Called at module load time to initialize the vport subsystem.
+ */
+int ovs_vport_init(void)
+{
+ dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!dev_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ovs_vport_exit - shutdown vport subsystem
+ *
+ * Called at module exit time to shutdown the vport subsystem.
+ */
+void ovs_vport_exit(void)
+{
+ kfree(dev_table);
+}
+
+static struct hlist_head *hash_bucket(const char *name)
+{
+ unsigned int hash = full_name_hash(name, strlen(name));
+ return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
+}
+
+/**
+ * ovs_vport_locate - find a port that has already been created
+ *
+ * @name: name of port to find
+ *
+ * Must be called with RTNL or RCU read lock.
+ */
+struct vport *ovs_vport_locate(const char *name)
+{
+ struct hlist_head *bucket = hash_bucket(name);
+ struct vport *vport;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+ if (!strcmp(name, vport->ops->get_name(vport)))
+ return vport;
+
+ return NULL;
+}
+
+/**
+ * ovs_vport_alloc - allocate and initialize new vport
+ *
+ * @priv_size: Size of private data area to allocate.
+ * @ops: vport device ops
+ *
+ * Allocate and initialize a new vport defined by @ops. The vport will contain
+ * a private data area of size @priv_size that can be accessed using
+ * vport_priv(). vports that are no longer needed should be released with
+ * vport_free().
+ */
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+ const struct vport_parms *parms)
+{
+ struct vport *vport;
+ size_t alloc_size;
+
+ alloc_size = sizeof(struct vport);
+ if (priv_size) {
+ alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
+ alloc_size += priv_size;
+ }
+
+ vport = kzalloc(alloc_size, GFP_KERNEL);
+ if (!vport)
+ return ERR_PTR(-ENOMEM);
+
+ vport->dp = parms->dp;
+ vport->port_no = parms->port_no;
+ vport->upcall_pid = parms->upcall_pid;
+ vport->ops = ops;
+
+ vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
+ if (!vport->percpu_stats) {
+ kfree(vport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&vport->stats_lock);
+
+ return vport;
+}
+
+/**
+ * ovs_vport_free - uninitialize and free vport
+ *
+ * @vport: vport to free
+ *
+ * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ *
+ * The caller must ensure that an RCU grace period has passed since the last
+ * time @vport was in a datapath.
+ */
+void ovs_vport_free(struct vport *vport)
+{
+ free_percpu(vport->percpu_stats);
+ kfree(vport);
+}
+
+/**
+ * ovs_vport_add - add vport device (for kernel callers)
+ *
+ * @parms: Information about new vport.
+ *
+ * Creates a new vport with the specified configuration (which is dependent on
+ * device type). RTNL lock must be held.
+ */
+struct vport *ovs_vport_add(const struct vport_parms *parms)
+{
+ struct vport *vport;
+ int err = 0;
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
+ if (vport_ops_list[i]->type == parms->type) {
+ vport = vport_ops_list[i]->create(parms);
+ if (IS_ERR(vport)) {
+ err = PTR_ERR(vport);
+ goto out;
+ }
+
+ hlist_add_head_rcu(&vport->hash_node,
+ hash_bucket(vport->ops->get_name(vport)));
+ return vport;
+ }
+ }
+
+ err = -EAFNOSUPPORT;
+
+out:
+ return ERR_PTR(err);
+}
+
+/**
+ * ovs_vport_set_options - modify existing vport device (for kernel callers)
+ *
+ * @vport: vport to modify.
+ * @port: New configuration.
+ *
+ * Modifies an existing device with the specified configuration (which is
+ * dependent on device type). RTNL lock must be held.
+ */
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
+{
+ ASSERT_RTNL();
+
+ if (!vport->ops->set_options)
+ return -EOPNOTSUPP;
+ return vport->ops->set_options(vport, options);
+}
+
+/**
+ * ovs_vport_del - delete existing vport device
+ *
+ * @vport: vport to delete.
+ *
+ * Detaches @vport from its datapath and destroys it. It is possible to fail
+ * for reasons such as lack of memory. RTNL lock must be held.
+ */
+void ovs_vport_del(struct vport *vport)
+{
+ ASSERT_RTNL();
+
+ hlist_del_rcu(&vport->hash_node);
+
+ vport->ops->destroy(vport);
+}
+
+/**
+ * ovs_vport_get_stats - retrieve device stats
+ *
+ * @vport: vport from which to retrieve the stats
+ * @stats: location to store stats
+ *
+ * Retrieves transmit, receive, and error stats for the given device.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ /* We potentially have 2 sources of stats that need to be combined:
+ * those we have collected (split into err_stats and percpu_stats) from
+ * set_stats() and device error stats from netdev->get_stats() (for
+ * errors that happen downstream and therefore aren't reported through
+ * our vport_record_error() function).
+ * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
+ * netdev-stats can be directly read over netlink-ioctl.
+ */
+
+ spin_lock_bh(&vport->stats_lock);
+
+ stats->rx_errors = vport->err_stats.rx_errors;
+ stats->tx_errors = vport->err_stats.tx_errors;
+ stats->tx_dropped = vport->err_stats.tx_dropped;
+ stats->rx_dropped = vport->err_stats.rx_dropped;
+
+ spin_unlock_bh(&vport->stats_lock);
+
+ for_each_possible_cpu(i) {
+ const struct vport_percpu_stats *percpu_stats;
+ struct vport_percpu_stats local_stats;
+ unsigned int start;
+
+ percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+ local_stats = *percpu_stats;
+ } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+ stats->rx_bytes += local_stats.rx_bytes;
+ stats->rx_packets += local_stats.rx_packets;
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
+ }
+}
+
+/**
+ * ovs_vport_get_options - retrieve device options
+ *
+ * @vport: vport from which to retrieve the options.
+ * @skb: sk_buff where options should be appended.
+ *
+ * Retrieves the configuration of the given device, appending an
+ * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
+ * vport-specific attributes to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
+ * negative error code if a real error occurred. If an error occurs, @skb is
+ * left unmodified.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+ struct nlattr *nla;
+
+ nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
+ if (!nla)
+ return -EMSGSIZE;
+
+ if (vport->ops->get_options) {
+ int err = vport->ops->get_options(vport, skb);
+ if (err) {
+ nla_nest_cancel(skb, nla);
+ return err;
+ }
+ }
+
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
+/**
+ * ovs_vport_receive - pass up received packet to the datapath for processing
+ *
+ * @vport: vport that received the packet
+ * @skb: skb that was received
+ *
+ * Must be called with rcu_read_lock. The packet cannot be shared and
+ * skb->data should point to the Ethernet header. The caller must have already
+ * called compute_ip_summed() to initialize the checksumming fields.
+ */
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+{
+ struct vport_percpu_stats *stats;
+
+ stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+ u64_stats_update_begin(&stats->sync);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->sync);
+
+ ovs_dp_process_received_packet(vport, skb);
+}
+
+/**
+ * ovs_vport_send - send a packet on a device
+ *
+ * @vport: vport on which to send the packet
+ * @skb: skb to send
+ *
+ * Sends the given packet and returns the length of data sent. Either RTNL
+ * lock or rcu_read_lock must be held.
+ */
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+ int sent = vport->ops->send(vport, skb);
+
+ if (likely(sent)) {
+ struct vport_percpu_stats *stats;
+
+ stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+ u64_stats_update_begin(&stats->sync);
+ stats->tx_packets++;
+ stats->tx_bytes += sent;
+ u64_stats_update_end(&stats->sync);
+ }
+ return sent;
+}
+
+/**
+ * ovs_vport_record_error - indicate device error to generic stats layer
+ *
+ * @vport: vport that encountered the error
+ * @err_type: one of enum vport_err_type types to indicate the error type
+ *
+ * If using the vport generic stats layer indicate that an error of the given
+ * type has occured.
+ */
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+{
+ spin_lock(&vport->stats_lock);
+
+ switch (err_type) {
+ case VPORT_E_RX_DROPPED:
+ vport->err_stats.rx_dropped++;
+ break;
+
+ case VPORT_E_RX_ERROR:
+ vport->err_stats.rx_errors++;
+ break;
+
+ case VPORT_E_TX_DROPPED:
+ vport->err_stats.tx_dropped++;
+ break;
+
+ case VPORT_E_TX_ERROR:
+ vport->err_stats.tx_errors++;
+ break;
+ };
+
+ spin_unlock(&vport->stats_lock);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
new file mode 100644
index 00000000000..19609629dab
--- /dev/null
+++ b/net/openvswitch/vport.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_H
+#define VPORT_H 1
+
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/u64_stats_sync.h>
+
+#include "datapath.h"
+
+struct vport;
+struct vport_parms;
+
+/* The following definitions are for users of the vport subsytem: */
+
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
+
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
+
+struct vport *ovs_vport_locate(const char *name);
+
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
+
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_vport_send(struct vport *, struct sk_buff *);
+
+/* The following definitions are for implementers of vport devices: */
+
+struct vport_percpu_stats {
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 tx_bytes;
+ u64 tx_packets;
+ struct u64_stats_sync sync;
+};
+
+struct vport_err_stats {
+ u64 rx_dropped;
+ u64 rx_errors;
+ u64 tx_dropped;
+ u64 tx_errors;
+};
+
+/**
+ * struct vport - one port within a datapath
+ * @rcu: RCU callback head for deferred destruction.
+ * @port_no: Index into @dp's @ports array.
+ * @dp: Datapath to which this port belongs.
+ * @node: Element in @dp's @port_list.
+ * @upcall_pid: The Netlink port to use for packets received on this port that
+ * miss the flow table.
+ * @hash_node: Element in @dev_table hash table in vport.c.
+ * @ops: Class structure.
+ * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+ * @stats_lock: Protects @err_stats;
+ * @err_stats: Points to error statistics used and maintained by vport
+ */
+struct vport {
+ struct rcu_head rcu;
+ u16 port_no;
+ struct datapath *dp;
+ struct list_head node;
+ u32 upcall_pid;
+
+ struct hlist_node hash_node;
+ const struct vport_ops *ops;
+
+ struct vport_percpu_stats __percpu *percpu_stats;
+
+ spinlock_t stats_lock;
+ struct vport_err_stats err_stats;
+};
+
+/**
+ * struct vport_parms - parameters for creating a new vport
+ *
+ * @name: New vport's name.
+ * @type: New vport's type.
+ * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if
+ * none was supplied.
+ * @dp: New vport's datapath.
+ * @port_no: New vport's port number.
+ */
+struct vport_parms {
+ const char *name;
+ enum ovs_vport_type type;
+ struct nlattr *options;
+
+ /* For ovs_vport_alloc(). */
+ struct datapath *dp;
+ u16 port_no;
+ u32 upcall_pid;
+};
+
+/**
+ * struct vport_ops - definition of a type of virtual port
+ *
+ * @type: %OVS_VPORT_TYPE_* value for this type of virtual port.
+ * @create: Create a new vport configured as specified. On success returns
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
+ * @destroy: Destroys a vport. Must call vport_free() on the vport but not
+ * before an RCU grace period has elapsed.
+ * @set_options: Modify the configuration of an existing vport. May be %NULL
+ * if modification is not supported.
+ * @get_options: Appends vport-specific attributes for the configuration of an
+ * existing vport to a &struct sk_buff. May be %NULL for a vport that does not
+ * have any configuration.
+ * @get_name: Get the device's name.
+ * @get_config: Get the device's configuration.
+ * @get_ifindex: Get the system interface index associated with the device.
+ * May be null if the device does not have an ifindex.
+ * @send: Send a packet on the device. Returns the length of the packet sent.
+ */
+struct vport_ops {
+ enum ovs_vport_type type;
+
+ /* Called with RTNL lock. */
+ struct vport *(*create)(const struct vport_parms *);
+ void (*destroy)(struct vport *);
+
+ int (*set_options)(struct vport *, struct nlattr *);
+ int (*get_options)(const struct vport *, struct sk_buff *);
+
+ /* Called with rcu_read_lock or RTNL lock. */
+ const char *(*get_name)(const struct vport *);
+ void (*get_config)(const struct vport *, void *);
+ int (*get_ifindex)(const struct vport *);
+
+ int (*send)(struct vport *, struct sk_buff *);
+};
+
+enum vport_err_type {
+ VPORT_E_RX_DROPPED,
+ VPORT_E_RX_ERROR,
+ VPORT_E_TX_DROPPED,
+ VPORT_E_TX_ERROR,
+};
+
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+ const struct vport_parms *);
+void ovs_vport_free(struct vport *);
+
+#define VPORT_ALIGN 8
+
+/**
+ * vport_priv - access private data area of vport
+ *
+ * @vport: vport to access
+ *
+ * If a nonzero size was passed in priv_size of vport_alloc() a private data
+ * area was allocated on creation. This allows that area to be accessed and
+ * used for any purpose needed by the vport implementer.
+ */
+static inline void *vport_priv(const struct vport *vport)
+{
+ return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+}
+
+/**
+ * vport_from_priv - lookup vport from private data pointer
+ *
+ * @priv: Start of private data area.
+ *
+ * It is sometimes useful to translate from a pointer to the private data
+ * area to the vport, such as in the case where the private data pointer is
+ * the result of a hash table lookup. @priv must point to the start of the
+ * private data area.
+ */
+static inline struct vport *vport_from_priv(const void *priv)
+{
+ return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+}
+
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
+
+/* List of statically compiled vport implementations. Don't forget to also
+ * add yours to the list at the top of vport.c. */
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+
+#endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d..2dbb32b988c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1499,10 +1499,11 @@ retry:
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
- skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+ skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
@@ -1630,8 +1631,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
if (snaplen > res)
snaplen = res;
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto drop_n_acct;
if (skb_shared(skb)) {
@@ -1762,8 +1762,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize
- < (unsigned)sk->sk_rcvbuf) {
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
if (skb_shared(skb)) {
copy_skb = skb_clone(skb, GFP_ATOMIC);
} else {
@@ -1944,7 +1943,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, int size_max,
- __be16 proto, unsigned char *addr)
+ __be16 proto, unsigned char *addr, int hlen)
{
union {
struct tpacket_hdr *h1;
@@ -1978,7 +1977,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return -EMSGSIZE;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2053,6 +2052,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
unsigned char *addr;
int len_sum = 0;
int status = 0;
+ int hlen, tlen;
mutex_lock(&po->pg_vec_lock);
@@ -2101,16 +2101,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
status = TP_STATUS_SEND_REQUEST;
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(&po->sk,
- LL_ALLOCATED_SPACE(dev)
- + sizeof(struct sockaddr_ll),
+ hlen + tlen + sizeof(struct sockaddr_ll),
0, &err);
if (unlikely(skb == NULL))
goto out_status;
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
- addr);
+ addr, hlen);
if (unlikely(tp_len < 0)) {
if (po->tp_loss) {
@@ -2207,6 +2208,7 @@ static int packet_snd(struct socket *sock,
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
+ int hlen, tlen;
/*
* Get and verify the address.
@@ -2291,8 +2293,9 @@ static int packet_snd(struct socket *sock,
goto out_unlock;
err = -ENOBUFS;
- skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
- LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
@@ -2450,8 +2453,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
{
struct packet_sock *po = pkt_sk(sk);
- if (po->fanout)
+ if (po->fanout) {
+ if (dev)
+ dev_put(dev);
+
return -EINVAL;
+ }
lock_sock(sk);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 2ba6e9fb4cb..9f60008740e 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -534,6 +534,29 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
return pipe_handler_send_created_ind(sk);
}
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+
+ if (hdr->error_code != PN_PIPE_NO_ERROR)
+ return -ECONNREFUSED;
+
+ return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+ NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ if (!pn_flow_safe(pn->tx_fc)) {
+ atomic_set(&pn->tx_credits, 1);
+ sk->sk_write_space(sk);
+ }
+ pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
/* Queue an skb to an actively connected sock.
* Socket lock must be held. */
static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -579,13 +602,25 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_state = TCP_CLOSE_WAIT;
break;
}
+ if (pn->init_enable == PN_PIPE_DISABLE)
+ sk->sk_state = TCP_SYN_RECV;
+ else {
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
+ }
+ break;
- sk->sk_state = TCP_ESTABLISHED;
- if (!pn_flow_safe(pn->tx_fc)) {
- atomic_set(&pn->tx_credits, 1);
- sk->sk_write_space(sk);
+ case PNS_PEP_ENABLE_RESP:
+ if (sk->sk_state != TCP_SYN_SENT)
+ break;
+
+ if (pep_enableresp_rcv(sk, skb)) {
+ sk->sk_state = TCP_CLOSE_WAIT;
+ break;
}
- pipe_grant_credits(sk, GFP_ATOMIC);
+
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
break;
case PNS_PEP_DISCONNECT_RESP:
@@ -864,14 +899,32 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
int err;
u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
- pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+ if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+ pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
- PN_PIPE_ENABLE, data, 4);
+ pn->init_enable, data, 4);
if (err) {
pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
return err;
}
+
sk->sk_state = TCP_SYN_SENT;
+
+ return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+ int err;
+
+ err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+ NULL, 0);
+ if (err)
+ return err;
+
+ sk->sk_state = TCP_SYN_SENT;
+
return 0;
}
@@ -879,11 +932,14 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct pep_sock *pn = pep_sk(sk);
int answ;
+ int ret = -ENOIOCTLCMD;
switch (cmd) {
case SIOCINQ:
- if (sk->sk_state == TCP_LISTEN)
- return -EINVAL;
+ if (sk->sk_state == TCP_LISTEN) {
+ ret = -EINVAL;
+ break;
+ }
lock_sock(sk);
if (sock_flag(sk, SOCK_URGINLINE) &&
@@ -894,10 +950,22 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
else
answ = 0;
release_sock(sk);
- return put_user(answ, (int __user *)arg);
+ ret = put_user(answ, (int __user *)arg);
+ break;
+
+ case SIOCPNENABLEPIPE:
+ lock_sock(sk);
+ if (sk->sk_state == TCP_SYN_SENT)
+ ret = -EBUSY;
+ else if (sk->sk_state == TCP_ESTABLISHED)
+ ret = -EISCONN;
+ else
+ ret = pep_sock_enable(sk, NULL, 0);
+ release_sock(sk);
+ break;
}
- return -ENOIOCTLCMD;
+ return ret;
}
static int pep_init(struct sock *sk)
@@ -960,6 +1028,18 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
}
goto out_norel;
+ case PNPIPE_HANDLE:
+ if ((sk->sk_state == TCP_CLOSE) &&
+ (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+ pn->pipe_handle = val;
+ else
+ err = -EINVAL;
+ break;
+
+ case PNPIPE_INITSTATE:
+ pn->init_enable = !!val;
+ break;
+
default:
err = -ENOPROTOOPT;
}
@@ -995,6 +1075,10 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
return -EINVAL;
break;
+ case PNPIPE_INITSTATE:
+ val = pn->init_enable;
+ break;
+
default:
return -ENOPROTOOPT;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 5be19575c34..354760ebbbd 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -644,7 +644,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 0, &state);
+ err = kstrtoul(buf, 0, &state);
if (err)
return err;
@@ -688,7 +688,7 @@ static ssize_t rfkill_state_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 0, &state);
+ err = kstrtoul(buf, 0, &state);
if (err)
return err;
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 128677d6905..865adb61685 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -105,7 +105,7 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
ret = pdata->gpio_runtime_setup(pdev);
if (ret) {
pr_warn("%s: can't set up gpio\n", __func__);
- return ret;
+ goto fail_alloc;
}
}
@@ -220,18 +220,7 @@ static struct platform_driver rfkill_gpio_driver = {
},
};
-static int __init rfkill_gpio_init(void)
-{
- return platform_driver_register(&rfkill_gpio_driver);
-}
-
-static void __exit rfkill_gpio_exit(void)
-{
- platform_driver_unregister(&rfkill_gpio_driver);
-}
-
-module_init(rfkill_gpio_init);
-module_exit(rfkill_gpio_exit);
+module_platform_driver(rfkill_gpio_driver);
MODULE_DESCRIPTION("gpio rfkill");
MODULE_AUTHOR("NVIDIA");
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 3ca7277a3c3..11da3018a85 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -36,12 +36,12 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
if (blocked) {
if (rfkill_data->reg_enabled) {
regulator_disable(rfkill_data->vcc);
- rfkill_data->reg_enabled = 0;
+ rfkill_data->reg_enabled = false;
}
} else {
if (!rfkill_data->reg_enabled) {
regulator_enable(rfkill_data->vcc);
- rfkill_data->reg_enabled = 1;
+ rfkill_data->reg_enabled = true;
}
}
@@ -96,7 +96,7 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
if (regulator_is_enabled(vcc)) {
dev_dbg(&pdev->dev, "Regulator already enabled\n");
- rfkill_data->reg_enabled = 1;
+ rfkill_data->reg_enabled = true;
}
rfkill_data->vcc = vcc;
rfkill_data->rf_kill = rf_kill;
@@ -144,17 +144,7 @@ static struct platform_driver rfkill_regulator_driver = {
},
};
-static int __init rfkill_regulator_init(void)
-{
- return platform_driver_register(&rfkill_regulator_driver);
-}
-module_init(rfkill_regulator_init);
-
-static void __exit rfkill_regulator_exit(void)
-{
- platform_driver_unregister(&rfkill_regulator_driver);
-}
-module_exit(rfkill_regulator_exit);
+module_platform_driver(rfkill_regulator_driver);
MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index f99cfce7ca9..c3126e864f3 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -195,7 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
sp = rxrpc_skb(txb);
if (sp->need_resend) {
- sp->need_resend = 0;
+ sp->need_resend = false;
/* each Tx packet has a new serial number */
sp->hdr.serial =
@@ -216,7 +216,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
}
if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = 1;
+ sp->need_resend = true;
resend |= 1;
} else if (resend & 2) {
if (time_before(sp->resend_at, resend_at))
@@ -265,7 +265,7 @@ static void rxrpc_resend_timer(struct rxrpc_call *call)
if (sp->need_resend) {
;
} else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = 1;
+ sp->need_resend = true;
resend |= 1;
} else if (resend & 2) {
if (time_before(sp->resend_at, resend_at))
@@ -314,11 +314,11 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
switch (sacks[loop]) {
case RXRPC_ACK_TYPE_ACK:
- sp->need_resend = 0;
+ sp->need_resend = false;
*p_txb |= 1;
break;
case RXRPC_ACK_TYPE_NACK:
- sp->need_resend = 1;
+ sp->need_resend = true;
*p_txb &= ~1;
resend = 1;
break;
@@ -344,13 +344,13 @@ static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
if (*p_txb & 1) {
/* packet must have been discarded */
- sp->need_resend = 1;
+ sp->need_resend = true;
*p_txb &= ~1;
resend |= 1;
} else if (sp->need_resend) {
;
} else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = 1;
+ sp->need_resend = true;
resend |= 1;
} else if (resend & 2) {
if (time_before(sp->resend_at, resend_at))
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 43ea7de2fc8..4cba13e46ff 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -306,10 +306,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
td->data_len = len;
if (len > 0) {
- td->data = kmalloc(len, GFP_KERNEL);
+ td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- memcpy(td->data, xdr, len);
len = (len + 3) & ~3;
toklen -= len;
xdr += len >> 2;
@@ -401,10 +400,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
_debug("ticket len %u", len);
if (len > 0) {
- *_ticket = kmalloc(len, GFP_KERNEL);
+ *_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- memcpy(*_ticket, xdr, len);
len = (len + 3) & ~3;
toklen -= len;
xdr += len >> 2;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 338d793c711..16ae88762d0 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -486,7 +486,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
_proto("Tx DATA %%%u { #%u }",
ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
- sp->need_resend = 0;
+ sp->need_resend = false;
sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
_debug("run timer");
@@ -508,7 +508,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (ret < 0) {
_debug("need instant resend %d", ret);
- sp->need_resend = 1;
+ sp->need_resend = true;
rxrpc_instant_resend(call);
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 7b582300d05..1d8bd0dbcd1 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -26,6 +26,8 @@
#include <net/pkt_cls.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/flow_keys.h>
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -66,134 +68,37 @@ static inline u32 addr_fold(void *addr)
return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
}
-static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
- __be32 *data = NULL, hdata;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct iphdr,
- saddr),
- 4, &hdata);
- break;
- case htons(ETH_P_IPV6):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct ipv6hdr,
- saddr.s6_addr32[3]),
- 4, &hdata);
- break;
- }
-
- if (data)
- return ntohl(*data);
+ if (flow->src)
+ return ntohl(flow->src);
return addr_fold(skb->sk);
}
-static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
- __be32 *data = NULL, hdata;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct iphdr,
- daddr),
- 4, &hdata);
- break;
- case htons(ETH_P_IPV6):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct ipv6hdr,
- daddr.s6_addr32[3]),
- 4, &hdata);
- break;
- }
-
- if (data)
- return ntohl(*data);
+ if (flow->dst)
+ return ntohl(flow->dst);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
-static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
{
- __u8 *data = NULL, hdata;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct iphdr,
- protocol),
- 1, &hdata);
- break;
- case htons(ETH_P_IPV6):
- data = skb_header_pointer(skb,
- nhoff + offsetof(struct ipv6hdr,
- nexthdr),
- 1, &hdata);
- break;
- }
- if (data)
- return *data;
- return 0;
+ return flow->ip_proto;
}
-/* helper function to get either src or dst port */
-static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
- __be16 *_port, int dst)
+static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
- __be16 *port = NULL;
- int poff;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP): {
- struct iphdr *iph, _iph;
-
- iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
- if (!iph)
- break;
- if (ip_is_fragment(iph))
- break;
- poff = proto_ports_offset(iph->protocol);
- if (poff >= 0)
- port = skb_header_pointer(skb,
- nhoff + iph->ihl * 4 + poff + dst,
- sizeof(*_port), _port);
- break;
- }
- case htons(ETH_P_IPV6): {
- struct ipv6hdr *iph, _iph;
-
- iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
- if (!iph)
- break;
- poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0)
- port = skb_header_pointer(skb,
- nhoff + sizeof(*iph) + poff + dst,
- sizeof(*_port), _port);
- break;
- }
- }
-
- return port;
-}
-
-static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
-{
- __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
-
- if (port)
- return ntohs(*port);
+ if (flow->ports)
+ return ntohs(flow->port16[0]);
return addr_fold(skb->sk);
}
-static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
- __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
-
- if (port)
- return ntohs(*port);
+ if (flow->ports)
+ return ntohs(flow->port16[1]);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
@@ -239,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
})
#endif
-static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -248,10 +153,10 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
}
fallback:
- return flow_get_src(skb, nhoff);
+ return flow_get_src(skb, flow);
}
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -260,21 +165,21 @@ static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
}
fallback:
- return flow_get_dst(skb, nhoff);
+ return flow_get_dst(skb, flow);
}
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, src.u.all));
fallback:
- return flow_get_proto_src(skb, nhoff);
+ return flow_get_proto_src(skb, flow);
}
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, dst.u.all));
fallback:
- return flow_get_proto_dst(skb, nhoff);
+ return flow_get_proto_dst(skb, flow);
}
static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -314,21 +219,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
return skb_get_rxhash(skb);
}
-static u32 flow_key_get(struct sk_buff *skb, int key)
+static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
{
- int nhoff = skb_network_offset(skb);
-
switch (key) {
case FLOW_KEY_SRC:
- return flow_get_src(skb, nhoff);
+ return flow_get_src(skb, flow);
case FLOW_KEY_DST:
- return flow_get_dst(skb, nhoff);
+ return flow_get_dst(skb, flow);
case FLOW_KEY_PROTO:
- return flow_get_proto(skb, nhoff);
+ return flow_get_proto(skb, flow);
case FLOW_KEY_PROTO_SRC:
- return flow_get_proto_src(skb, nhoff);
+ return flow_get_proto_src(skb, flow);
case FLOW_KEY_PROTO_DST:
- return flow_get_proto_dst(skb, nhoff);
+ return flow_get_proto_dst(skb, flow);
case FLOW_KEY_IIF:
return flow_get_iif(skb);
case FLOW_KEY_PRIORITY:
@@ -338,13 +241,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
case FLOW_KEY_NFCT:
return flow_get_nfct(skb);
case FLOW_KEY_NFCT_SRC:
- return flow_get_nfct_src(skb, nhoff);
+ return flow_get_nfct_src(skb, flow);
case FLOW_KEY_NFCT_DST:
- return flow_get_nfct_dst(skb, nhoff);
+ return flow_get_nfct_dst(skb, flow);
case FLOW_KEY_NFCT_PROTO_SRC:
- return flow_get_nfct_proto_src(skb, nhoff);
+ return flow_get_nfct_proto_src(skb, flow);
case FLOW_KEY_NFCT_PROTO_DST:
- return flow_get_nfct_proto_dst(skb, nhoff);
+ return flow_get_nfct_proto_dst(skb, flow);
case FLOW_KEY_RTCLASSID:
return flow_get_rtclassid(skb);
case FLOW_KEY_SKUID:
@@ -361,6 +264,16 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
}
}
+#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
+ (1 << FLOW_KEY_DST) | \
+ (1 << FLOW_KEY_PROTO) | \
+ (1 << FLOW_KEY_PROTO_SRC) | \
+ (1 << FLOW_KEY_PROTO_DST) | \
+ (1 << FLOW_KEY_NFCT_SRC) | \
+ (1 << FLOW_KEY_NFCT_DST) | \
+ (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
+ (1 << FLOW_KEY_NFCT_PROTO_DST))
+
static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -372,17 +285,20 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
int r;
list_for_each_entry(f, &head->filters, list) {
- u32 keys[f->nkeys];
+ u32 keys[FLOW_KEY_MAX + 1];
+ struct flow_keys flow_keys;
if (!tcf_em_tree_match(skb, &f->ematches, NULL))
continue;
keymask = f->keymask;
+ if (keymask & FLOW_KEYS_NEEDED)
+ skb_flow_dissect(skb, &flow_keys);
for (n = 0; n < f->nkeys; n++) {
key = ffs(keymask) - 1;
keymask &= ~(1 << key);
- keys[n] = flow_key_get(skb, key);
+ keys[n] = flow_key_get(skb, key, &flow_keys);
}
if (f->mode == FLOW_MODE_HASH)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index dca6c1a576f..3d8981fde30 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -618,20 +618,24 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
}
EXPORT_SYMBOL(qdisc_class_hash_remove);
-/* Allocate an unique handle from space managed by kernel */
-
+/* Allocate an unique handle from space managed by kernel
+ * Possible range is [8000-FFFF]:0000 (0x8000 values)
+ */
static u32 qdisc_alloc_handle(struct net_device *dev)
{
- int i = 0x10000;
+ int i = 0x8000;
static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
do {
autohandle += TC_H_MAKE(0x10000U, 0);
if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
autohandle = TC_H_MAKE(0x80000000U, 0);
- } while (qdisc_lookup(dev, autohandle) && --i > 0);
+ if (!qdisc_lookup(dev, autohandle))
+ return autohandle;
+ cond_resched();
+ } while (--i > 0);
- return i > 0 ? autohandle : 0;
+ return 0;
}
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3422b25df9e..e465064d39a 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -19,10 +19,7 @@
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <net/red.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
+#include <net/flow_keys.h>
/*
CHOKe stateless AQM for fair bandwidth allocation
@@ -60,6 +57,7 @@ struct choke_sched_data {
struct red_parms parms;
/* Variables */
+ struct red_vars vars;
struct tcf_proto *filter_list;
struct {
u32 prob_drop; /* Early probability drops */
@@ -142,85 +140,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
--sch->q.qlen;
}
-/*
- * Compare flow of two packets
- * Returns true only if source and destination address and port match.
- * false for special cases
- */
-static bool choke_match_flow(struct sk_buff *skb1,
- struct sk_buff *skb2)
-{
- int off1, off2, poff;
- const u32 *ports1, *ports2;
- u8 ip_proto;
- __u32 hash1;
-
- if (skb1->protocol != skb2->protocol)
- return false;
-
- /* Use hash value as quick check
- * Assumes that __skb_get_rxhash makes IP header and ports linear
- */
- hash1 = skb_get_rxhash(skb1);
- if (!hash1 || hash1 != skb_get_rxhash(skb2))
- return false;
-
- /* Probably match, but be sure to avoid hash collisions */
- off1 = skb_network_offset(skb1);
- off2 = skb_network_offset(skb2);
-
- switch (skb1->protocol) {
- case __constant_htons(ETH_P_IP): {
- const struct iphdr *ip1, *ip2;
-
- ip1 = (const struct iphdr *) (skb1->data + off1);
- ip2 = (const struct iphdr *) (skb2->data + off2);
-
- ip_proto = ip1->protocol;
- if (ip_proto != ip2->protocol ||
- ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
- return false;
-
- if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
- ip_proto = 0;
- off1 += ip1->ihl * 4;
- off2 += ip2->ihl * 4;
- break;
- }
-
- case __constant_htons(ETH_P_IPV6): {
- const struct ipv6hdr *ip1, *ip2;
-
- ip1 = (const struct ipv6hdr *) (skb1->data + off1);
- ip2 = (const struct ipv6hdr *) (skb2->data + off2);
-
- ip_proto = ip1->nexthdr;
- if (ip_proto != ip2->nexthdr ||
- ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
- ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
- return false;
- off1 += 40;
- off2 += 40;
- }
-
- default: /* Maybe compare MAC header here? */
- return false;
- }
-
- poff = proto_ports_offset(ip_proto);
- if (poff < 0)
- return true;
-
- off1 += poff;
- off2 += poff;
-
- ports1 = (__force u32 *)(skb1->data + off1);
- ports2 = (__force u32 *)(skb2->data + off2);
- return *ports1 == *ports2;
-}
-
struct choke_skb_cb {
- u16 classid;
+ u16 classid;
+ u8 keys_valid;
+ struct flow_keys keys;
};
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -241,6 +164,32 @@ static u16 choke_get_classid(const struct sk_buff *skb)
}
/*
+ * Compare flow of two packets
+ * Returns true only if source and destination address and port match.
+ * false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+ struct sk_buff *skb2)
+{
+ if (skb1->protocol != skb2->protocol)
+ return false;
+
+ if (!choke_skb_cb(skb1)->keys_valid) {
+ choke_skb_cb(skb1)->keys_valid = 1;
+ skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+ }
+
+ if (!choke_skb_cb(skb2)->keys_valid) {
+ choke_skb_cb(skb2)->keys_valid = 1;
+ skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+ }
+
+ return !memcmp(&choke_skb_cb(skb1)->keys,
+ &choke_skb_cb(skb2)->keys,
+ sizeof(struct flow_keys));
+}
+
+/*
* Classify flow using either:
* 1. pre-existing classification result in skb
* 2. fast internal classification
@@ -317,7 +266,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
- struct red_parms *p = &q->parms;
+ const struct red_parms *p = &q->parms;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (q->filter_list) {
@@ -326,14 +275,15 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
goto other_drop; /* Packet was eaten by filter */
}
+ choke_skb_cb(skb)->keys_valid = 0;
/* Compute average queue usage (see RED) */
- p->qavg = red_calc_qavg(p, sch->q.qlen);
- if (red_is_idling(p))
- red_end_of_idle_period(p);
+ q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
+ if (red_is_idling(&q->vars))
+ red_end_of_idle_period(&q->vars);
/* Is queue small? */
- if (p->qavg <= p->qth_min)
- p->qcount = -1;
+ if (q->vars.qavg <= p->qth_min)
+ q->vars.qcount = -1;
else {
unsigned int idx;
@@ -345,8 +295,8 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
/* Queue is large, always mark/drop */
- if (p->qavg > p->qth_max) {
- p->qcount = -1;
+ if (q->vars.qavg > p->qth_max) {
+ q->vars.qcount = -1;
sch->qstats.overlimits++;
if (use_harddrop(q) || !use_ecn(q) ||
@@ -356,10 +306,10 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
q->stats.forced_mark++;
- } else if (++p->qcount) {
- if (red_mark_probability(p, p->qavg)) {
- p->qcount = 0;
- p->qR = red_random(p);
+ } else if (++q->vars.qcount) {
+ if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
+ q->vars.qcount = 0;
+ q->vars.qR = red_random(p);
sch->qstats.overlimits++;
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
@@ -370,7 +320,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->stats.prob_mark++;
}
} else
- p->qR = red_random(p);
+ q->vars.qR = red_random(p);
}
/* Admit new packet */
@@ -404,8 +354,8 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
struct sk_buff *skb;
if (q->head == q->tail) {
- if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
+ if (!red_is_idling(&q->vars))
+ red_start_of_idle_period(&q->vars);
return NULL;
}
@@ -428,8 +378,8 @@ static unsigned int choke_drop(struct Qdisc *sch)
if (len > 0)
q->stats.other++;
else {
- if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
+ if (!red_is_idling(&q->vars))
+ red_start_of_idle_period(&q->vars);
}
return len;
@@ -439,12 +389,13 @@ static void choke_reset(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
- red_restart(&q->parms);
+ red_restart(&q->vars);
}
static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
[TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
[TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
+ [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
};
@@ -466,6 +417,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
int err;
struct sk_buff **old = NULL;
unsigned int mask;
+ u32 max_P;
if (opt == NULL)
return -EINVAL;
@@ -478,6 +430,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
tb[TCA_CHOKE_STAB] == NULL)
return -EINVAL;
+ max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -527,10 +481,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_CHOKE_STAB]));
+ nla_data(tb[TCA_CHOKE_STAB]),
+ max_P);
+ red_set_vars(&q->vars);
if (q->head == q->tail)
- red_end_of_idle_period(&q->parms);
+ red_end_of_idle_period(&q->vars);
sch_tree_unlock(sch);
choke_free(old);
@@ -561,6 +517,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+ NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69fca279880..67fc573e013 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- if (!netif_tx_queue_frozen_or_stopped(txq)) {
+ if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
} else
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_tx_queue_frozen_or_stopped(txq))
+ if (!netif_xmit_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = dev_requeue_skb(skb, q);
}
- if (ret && netif_tx_queue_frozen_or_stopped(txq))
+ if (ret && netif_xmit_frozen_or_stopped(txq))
ret = 0;
return ret;
@@ -242,10 +242,11 @@ static void dev_watchdog(unsigned long arg)
* old device drivers set dev->trans_start
*/
trans_start = txq->trans_start ? : dev->trans_start;
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
+ txq->trans_timeout++;
break;
}
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index b9493a09a87..0b15236be7b 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -34,13 +34,14 @@ struct gred_sched;
struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
- u32 DP; /* the drop pramaters */
+ u32 DP; /* the drop parameters */
u32 bytesin; /* bytes seen on virtualQ so far*/
u32 packetsin; /* packets seen on virtualQ so far*/
u32 backlog; /* bytes on the virtualQ */
u8 prio; /* the prio of this vq */
struct red_parms parms;
+ struct red_vars vars;
struct red_stats stats;
};
@@ -55,7 +56,7 @@ struct gred_sched {
u32 red_flags;
u32 DPs;
u32 def;
- struct red_parms wred_set;
+ struct red_vars wred_set;
};
static inline int gred_wred_mode(struct gred_sched *table)
@@ -125,17 +126,17 @@ static inline u16 tc_index_to_dp(struct sk_buff *skb)
return skb->tc_index & GRED_VQ_MASK;
}
-static inline void gred_load_wred_set(struct gred_sched *table,
+static inline void gred_load_wred_set(const struct gred_sched *table,
struct gred_sched_data *q)
{
- q->parms.qavg = table->wred_set.qavg;
- q->parms.qidlestart = table->wred_set.qidlestart;
+ q->vars.qavg = table->wred_set.qavg;
+ q->vars.qidlestart = table->wred_set.qidlestart;
}
static inline void gred_store_wred_set(struct gred_sched *table,
struct gred_sched_data *q)
{
- table->wred_set.qavg = q->parms.qavg;
+ table->wred_set.qavg = q->vars.qavg;
}
static inline int gred_use_ecn(struct gred_sched *t)
@@ -170,7 +171,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
goto drop;
}
- /* fix tc_index? --could be controvesial but needed for
+ /* fix tc_index? --could be controversial but needed for
requeueing */
skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
}
@@ -181,8 +182,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
- !red_is_idling(&t->tab[i]->parms))
- qavg += t->tab[i]->parms.qavg;
+ !red_is_idling(&t->tab[i]->vars))
+ qavg += t->tab[i]->vars.qavg;
}
}
@@ -193,15 +194,17 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (gred_wred_mode(t))
gred_load_wred_set(t, q);
- q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
+ q->vars.qavg = red_calc_qavg(&q->parms,
+ &q->vars,
+ gred_backlog(t, q, sch));
- if (red_is_idling(&q->parms))
- red_end_of_idle_period(&q->parms);
+ if (red_is_idling(&q->vars))
+ red_end_of_idle_period(&q->vars);
if (gred_wred_mode(t))
gred_store_wred_set(t, q);
- switch (red_action(&q->parms, q->parms.qavg + qavg)) {
+ switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
case RED_DONT_MARK:
break;
@@ -260,7 +263,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
q->backlog -= qdisc_pkt_len(skb);
if (!q->backlog && !gred_wred_mode(t))
- red_start_of_idle_period(&q->parms);
+ red_start_of_idle_period(&q->vars);
}
return skb;
@@ -293,7 +296,7 @@ static unsigned int gred_drop(struct Qdisc *sch)
q->stats.other++;
if (!q->backlog && !gred_wred_mode(t))
- red_start_of_idle_period(&q->parms);
+ red_start_of_idle_period(&q->vars);
}
qdisc_drop(skb, sch);
@@ -320,7 +323,7 @@ static void gred_reset(struct Qdisc *sch)
if (!q)
continue;
- red_restart(&q->parms);
+ red_restart(&q->vars);
q->backlog = 0;
}
}
@@ -379,29 +382,31 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
}
static inline int gred_change_vq(struct Qdisc *sch, int dp,
- struct tc_gred_qopt *ctl, int prio, u8 *stab)
+ struct tc_gred_qopt *ctl, int prio,
+ u8 *stab, u32 max_P,
+ struct gred_sched_data **prealloc)
{
struct gred_sched *table = qdisc_priv(sch);
- struct gred_sched_data *q;
+ struct gred_sched_data *q = table->tab[dp];
- if (table->tab[dp] == NULL) {
- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
- if (table->tab[dp] == NULL)
+ if (!q) {
+ table->tab[dp] = q = *prealloc;
+ *prealloc = NULL;
+ if (!q)
return -ENOMEM;
}
- q = table->tab[dp];
q->DP = dp;
q->prio = prio;
q->limit = ctl->limit;
if (q->backlog == 0)
- red_end_of_idle_period(&q->parms);
+ red_end_of_idle_period(&q->vars);
red_set_parms(&q->parms,
ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
- ctl->Scell_log, stab);
-
+ ctl->Scell_log, stab, max_P);
+ red_set_vars(&q->vars);
return 0;
}
@@ -409,6 +414,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
[TCA_GRED_STAB] = { .len = 256 },
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
+ [TCA_GRED_MAX_P] = { .type = NLA_U32 },
};
static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -418,6 +424,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
struct nlattr *tb[TCA_GRED_MAX + 1];
int err, prio = GRED_DEF_PRIO;
u8 *stab;
+ u32 max_P;
+ struct gred_sched_data *prealloc;
if (opt == NULL)
return -EINVAL;
@@ -433,6 +441,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
tb[TCA_GRED_STAB] == NULL)
return -EINVAL;
+ max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+
err = -EINVAL;
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
@@ -455,9 +465,10 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
prio = ctl->prio;
}
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
sch_tree_lock(sch);
- err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+ err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
if (err < 0)
goto errout_locked;
@@ -471,6 +482,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
errout_locked:
sch_tree_unlock(sch);
+ kfree(prealloc);
errout:
return err;
}
@@ -498,6 +510,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
struct gred_sched *table = qdisc_priv(sch);
struct nlattr *parms, *opts = NULL;
int i;
+ u32 max_p[MAX_DPs];
struct tc_gred_sopt sopt = {
.DPs = table->DPs,
.def_DP = table->def,
@@ -509,6 +522,14 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+
+ for (i = 0; i < MAX_DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+
+ max_p[i] = q ? q->parms.max_P : 0;
+ }
+ NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+
parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL)
goto nla_put_failure;
@@ -545,12 +566,12 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.bytesin = q->bytesin;
if (gred_wred_mode(table)) {
- q->parms.qidlestart =
- table->tab[table->def]->parms.qidlestart;
- q->parms.qavg = table->tab[table->def]->parms.qavg;
+ q->vars.qidlestart =
+ table->tab[table->def]->vars.qidlestart;
+ q->vars.qavg = table->tab[table->def]->vars.qavg;
}
- opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
+ opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
append_opt:
if (nla_append(skb, sizeof(opt), &opt) < 0)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6488e642565..9bdca2e011e 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1368,6 +1368,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct tc_hfsc_stats xstats;
cl->qstats.qlen = cl->qdisc->q.qlen;
+ cl->qstats.backlog = cl->qdisc->qstats.backlog;
xstats.level = cl->level;
xstats.period = cl->cl_vtperiod;
xstats.work = cl->cl_total;
@@ -1561,6 +1562,15 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
+ struct hfsc_class *cl;
+ struct hlist_node *n;
+ unsigned int i;
+
+ sch->qstats.backlog = 0;
+ for (i = 0; i < q->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+ sch->qstats.backlog += cl->qdisc->qstats.backlog;
+ }
qopt.defcls = q->defcls;
NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index f88256cbacb..28de4309233 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
- if (nla_len(opt) < sizeof(*qopt))
+ if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index edc1950e0e7..49131d7a744 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
- if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+ if (!netif_xmit_stopped(
+ netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
- if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
+ if (!netif_xmit_stopped(
+ netdev_get_tx_queue(qdisc_dev(sch), curband))) {
qdisc = q->queues[curband];
skb = qdisc->ops->peek(qdisc);
if (skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index eb3b9a86c6e..e7e1d0b57b3 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -22,6 +22,7 @@
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
+#include <linux/reciprocal_div.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -66,7 +67,11 @@
*/
struct netem_sched_data {
+ /* internal t(ime)fifo qdisc uses sch->q and sch->limit */
+
+ /* optional qdisc for classful handling (NULL at netem init) */
struct Qdisc *qdisc;
+
struct qdisc_watchdog watchdog;
psched_tdiff_t latency;
@@ -79,6 +84,11 @@ struct netem_sched_data {
u32 duplicate;
u32 reorder;
u32 corrupt;
+ u32 rate;
+ s32 packet_overhead;
+ u32 cell_size;
+ u32 cell_size_reciprocal;
+ s32 cell_overhead;
struct crndstate {
u32 last;
@@ -111,7 +121,9 @@ struct netem_sched_data {
};
-/* Time stamp put into socket buffer control block */
+/* Time stamp put into socket buffer control block
+ * Only valid when skbs are in our internal t(ime)fifo queue.
+ */
struct netem_skb_cb {
psched_time_t time_to_send;
};
@@ -298,6 +310,51 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
+static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+{
+ u64 ticks;
+
+ len += q->packet_overhead;
+
+ if (q->cell_size) {
+ u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
+
+ if (len > cells * q->cell_size) /* extra cell needed for remainder */
+ cells++;
+ len = cells * (q->cell_size + q->cell_overhead);
+ }
+
+ ticks = (u64)len * NSEC_PER_SEC;
+
+ do_div(ticks, q->rate);
+ return PSCHED_NS2TICKS(ticks);
+}
+
+static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+{
+ struct sk_buff_head *list = &sch->q;
+ psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
+ struct sk_buff *skb;
+
+ if (likely(skb_queue_len(list) < sch->limit)) {
+ skb = skb_peek_tail(list);
+ /* Optimize for add at tail */
+ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+ return qdisc_enqueue_tail(nskb, sch);
+
+ skb_queue_reverse_walk(list, skb) {
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ break;
+ }
+
+ __skb_queue_after(list, skb, nskb);
+ sch->qstats.backlog += qdisc_pkt_len(nskb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ return qdisc_reshape_fail(nskb, sch);
+}
+
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -371,9 +428,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
&q->delay_cor, q->delay_dist);
now = psched_get_time();
+
+ if (q->rate) {
+ struct sk_buff_head *list = &sch->q;
+
+ delay += packet_len_2_sched_time(skb->len, q);
+
+ if (!skb_queue_empty(list)) {
+ /*
+ * Last packet in queue is reference point (now).
+ * First packet in queue is already in flight,
+ * calculate this time bonus and substract
+ * from delay.
+ */
+ delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+ now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
+ }
+ }
+
cb->time_to_send = now + delay;
++q->counter;
- ret = qdisc_enqueue(skb, q->qdisc);
+ ret = tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@@ -382,9 +457,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = psched_get_time();
q->counter = 0;
- __skb_queue_head(&q->qdisc->q, skb);
- q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
- q->qdisc->qstats.requeues++;
+ __skb_queue_head(&sch->q, skb);
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ sch->qstats.requeues++;
ret = NET_XMIT_SUCCESS;
}
@@ -395,19 +470,20 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
- sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
- unsigned int len = 0;
+ unsigned int len;
- if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
- sch->q.qlen--;
+ len = qdisc_queue_drop(sch);
+ if (!len && q->qdisc && q->qdisc->ops->drop)
+ len = q->qdisc->ops->drop(q->qdisc);
+ if (len)
sch->qstats.drops++;
- }
+
return len;
}
@@ -419,16 +495,16 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (qdisc_is_throttled(sch))
return NULL;
- skb = q->qdisc->ops->peek(q->qdisc);
+tfifo_dequeue:
+ skb = qdisc_peek_head(sch);
if (skb) {
const struct netem_skb_cb *cb = netem_skb_cb(skb);
- psched_time_t now = psched_get_time();
/* if more time remaining? */
- if (cb->time_to_send <= now) {
- skb = qdisc_dequeue_peeked(q->qdisc);
+ if (cb->time_to_send <= psched_get_time()) {
+ skb = qdisc_dequeue_tail(sch);
if (unlikely(!skb))
- return NULL;
+ goto qdisc_dequeue;
#ifdef CONFIG_NET_CLS_ACT
/*
@@ -439,15 +515,37 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0;
#endif
- sch->q.qlen--;
+ if (q->qdisc) {
+ int err = qdisc_enqueue(skb, q->qdisc);
+
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err)) {
+ sch->qstats.drops++;
+ qdisc_tree_decrease_qlen(sch, 1);
+ }
+ }
+ goto tfifo_dequeue;
+ }
+deliver:
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+ if (skb)
+ goto deliver;
+ }
qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
}
+qdisc_dequeue:
+ if (q->qdisc) {
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+ if (skb)
+ goto deliver;
+ }
return NULL;
}
@@ -455,8 +553,9 @@ static void netem_reset(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
- qdisc_reset(q->qdisc);
- sch->q.qlen = 0;
+ qdisc_reset_queue(sch);
+ if (q->qdisc)
+ qdisc_reset(q->qdisc);
qdisc_watchdog_cancel(&q->watchdog);
}
@@ -488,7 +587,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return -EINVAL;
s = sizeof(struct disttable) + n * sizeof(s16);
- d = kmalloc(s, GFP_KERNEL);
+ d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
if (!d)
d = vmalloc(s);
if (!d)
@@ -501,9 +600,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- dist_free(q->delay_dist);
- q->delay_dist = d;
+ swap(q->delay_dist, d);
spin_unlock_bh(root_lock);
+
+ dist_free(d);
return 0;
}
@@ -535,6 +635,19 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
init_crandom(&q->corrupt_cor, r->correlation);
}
+static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ const struct tc_netem_rate *r = nla_data(attr);
+
+ q->rate = r->rate;
+ q->packet_overhead = r->packet_overhead;
+ q->cell_size = r->cell_size;
+ if (q->cell_size)
+ q->cell_size_reciprocal = reciprocal_value(q->cell_size);
+ q->cell_overhead = r->cell_overhead;
+}
+
static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -548,7 +661,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
case NETEM_LOSS_GI: {
const struct tc_netem_gimodel *gi = nla_data(la);
- if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+ if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
pr_info("netem: incorrect gi model size\n");
return -EINVAL;
}
@@ -567,8 +680,8 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
case NETEM_LOSS_GE: {
const struct tc_netem_gemodel *ge = nla_data(la);
- if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
- pr_info("netem: incorrect gi model size\n");
+ if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
+ pr_info("netem: incorrect ge model size\n");
return -EINVAL;
}
@@ -594,6 +707,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
[TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
[TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
+ [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
[TCA_NETEM_LOSS] = { .type = NLA_NESTED },
};
@@ -631,11 +745,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (ret < 0)
return ret;
- ret = fifo_set_limit(q->qdisc, qopt->limit);
- if (ret) {
- pr_info("netem: can't set fifo limit\n");
- return ret;
- }
+ sch->limit = qopt->limit;
q->latency = qopt->latency;
q->jitter = qopt->jitter;
@@ -666,6 +776,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_CORRUPT])
get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
+ if (tb[TCA_NETEM_RATE])
+ get_rate(sch, tb[TCA_NETEM_RATE]);
+
q->loss_model = CLG_RANDOM;
if (tb[TCA_NETEM_LOSS])
ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -673,88 +786,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
return ret;
}
-/*
- * Special case version of FIFO queue for use by netem.
- * It queues in order based on timestamps in skb's
- */
-struct fifo_sched_data {
- u32 limit;
- psched_time_t oldest;
-};
-
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
-{
- struct fifo_sched_data *q = qdisc_priv(sch);
- struct sk_buff_head *list = &sch->q;
- psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
- struct sk_buff *skb;
-
- if (likely(skb_queue_len(list) < q->limit)) {
- /* Optimize for add at tail */
- if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
- q->oldest = tnext;
- return qdisc_enqueue_tail(nskb, sch);
- }
-
- skb_queue_reverse_walk(list, skb) {
- const struct netem_skb_cb *cb = netem_skb_cb(skb);
-
- if (tnext >= cb->time_to_send)
- break;
- }
-
- __skb_queue_after(list, skb, nskb);
-
- sch->qstats.backlog += qdisc_pkt_len(nskb);
-
- return NET_XMIT_SUCCESS;
- }
-
- return qdisc_reshape_fail(nskb, sch);
-}
-
-static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
-{
- struct fifo_sched_data *q = qdisc_priv(sch);
-
- if (opt) {
- struct tc_fifo_qopt *ctl = nla_data(opt);
- if (nla_len(opt) < sizeof(*ctl))
- return -EINVAL;
-
- q->limit = ctl->limit;
- } else
- q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
-
- q->oldest = PSCHED_PASTPERFECT;
- return 0;
-}
-
-static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
-{
- struct fifo_sched_data *q = qdisc_priv(sch);
- struct tc_fifo_qopt opt = { .limit = q->limit };
-
- NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
- return skb->len;
-
-nla_put_failure:
- return -1;
-}
-
-static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
- .id = "tfifo",
- .priv_size = sizeof(struct fifo_sched_data),
- .enqueue = tfifo_enqueue,
- .dequeue = qdisc_dequeue_head,
- .peek = qdisc_peek_head,
- .drop = qdisc_queue_drop,
- .init = tfifo_init,
- .reset = qdisc_reset_queue,
- .change = tfifo_init,
- .dump = tfifo_dump,
-};
-
static int netem_init(struct Qdisc *sch, struct nlattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -766,18 +797,9 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
q->loss_model = CLG_RANDOM;
- q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, 1));
- if (!q->qdisc) {
- pr_notice("netem: qdisc create tfifo qdisc failed\n");
- return -ENOMEM;
- }
-
ret = netem_change(sch, opt);
- if (ret) {
+ if (ret)
pr_info("netem: change failed\n");
- qdisc_destroy(q->qdisc);
- }
return ret;
}
@@ -786,7 +808,8 @@ static void netem_destroy(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
qdisc_watchdog_cancel(&q->watchdog);
- qdisc_destroy(q->qdisc);
+ if (q->qdisc)
+ qdisc_destroy(q->qdisc);
dist_free(q->delay_dist);
}
@@ -846,6 +869,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
struct tc_netem_corrupt corrupt;
+ struct tc_netem_rate rate;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
@@ -868,6 +892,12 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
corrupt.correlation = q->corrupt_cor.rho;
NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+ rate.rate = q->rate;
+ rate.packet_overhead = q->packet_overhead;
+ rate.cell_size = q->cell_size;
+ rate.cell_overhead = q->cell_overhead;
+ NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+
if (dump_loss_model(q, skb) != 0)
goto nla_put_failure;
@@ -883,7 +913,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
{
struct netem_sched_data *q = qdisc_priv(sch);
- if (cl != 1) /* only one class */
+ if (cl != 1 || !q->qdisc) /* only one class */
return -ENOENT;
tcm->tcm_handle |= TC_H_MIN(1);
@@ -897,14 +927,13 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct netem_sched_data *q = qdisc_priv(sch);
- if (new == NULL)
- new = &noop_qdisc;
-
sch_tree_lock(sch);
*old = q->qdisc;
q->qdisc = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
+ if (*old) {
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ }
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 10334340859..e68cb440756 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -211,6 +211,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr *tb[TCA_QFQ_MAX + 1];
u32 weight, lmax, inv_w;
int i, err;
+ int delta_w;
if (tca[TCA_OPTIONS] == NULL) {
pr_notice("qfq: no options\n");
@@ -232,9 +233,10 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
inv_w = ONE_FP / weight;
weight = ONE_FP / inv_w;
- if (q->wsum + weight > QFQ_MAX_WSUM) {
+ delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0);
+ if (q->wsum + delta_w > QFQ_MAX_WSUM) {
pr_notice("qfq: total weight out of range (%u + %u)\n",
- weight, q->wsum);
+ delta_w, q->wsum);
return -EINVAL;
}
@@ -256,13 +258,12 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
- sch_tree_lock(sch);
- if (tb[TCA_QFQ_WEIGHT]) {
- q->wsum = weight - ONE_FP / cl->inv_w;
+ if (inv_w != cl->inv_w) {
+ sch_tree_lock(sch);
+ q->wsum += delta_w;
cl->inv_w = inv_w;
+ sch_tree_unlock(sch);
}
- sch_tree_unlock(sch);
-
return 0;
}
@@ -277,7 +278,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
i = qfq_calc_index(cl->inv_w, cl->lmax);
cl->grp = &q->groups[i];
- q->wsum += weight;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
@@ -294,6 +294,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
}
+ q->wsum += weight;
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -817,11 +818,11 @@ skip_unblock:
static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
{
unsigned long mask;
- uint32_t limit, roundedF;
+ u64 limit, roundedF;
int slot_shift = cl->grp->slot_shift;
roundedF = qfq_round_down(cl->F, slot_shift);
- limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+ limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 6649463da1b..a5cc3012cf4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -39,7 +39,9 @@
struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
+ struct timer_list adapt_timer;
struct red_parms parms;
+ struct red_vars vars;
struct red_stats stats;
struct Qdisc *qdisc;
};
@@ -60,12 +62,14 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct Qdisc *child = q->qdisc;
int ret;
- q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
+ q->vars.qavg = red_calc_qavg(&q->parms,
+ &q->vars,
+ child->qstats.backlog);
- if (red_is_idling(&q->parms))
- red_end_of_idle_period(&q->parms);
+ if (red_is_idling(&q->vars))
+ red_end_of_idle_period(&q->vars);
- switch (red_action(&q->parms, q->parms.qavg)) {
+ switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
case RED_DONT_MARK:
break;
@@ -116,8 +120,8 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
} else {
- if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
+ if (!red_is_idling(&q->vars))
+ red_start_of_idle_period(&q->vars);
}
return skb;
}
@@ -143,8 +147,8 @@ static unsigned int red_drop(struct Qdisc *sch)
return len;
}
- if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
+ if (!red_is_idling(&q->vars))
+ red_start_of_idle_period(&q->vars);
return 0;
}
@@ -155,18 +159,21 @@ static void red_reset(struct Qdisc *sch)
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
- red_restart(&q->parms);
+ red_restart(&q->vars);
}
static void red_destroy(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
+
+ del_timer_sync(&q->adapt_timer);
qdisc_destroy(q->qdisc);
}
static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
[TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
+ [TCA_RED_MAX_P] = { .type = NLA_U32 },
};
static int red_change(struct Qdisc *sch, struct nlattr *opt)
@@ -176,6 +183,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
struct tc_red_qopt *ctl;
struct Qdisc *child = NULL;
int err;
+ u32 max_P;
if (opt == NULL)
return -EINVAL;
@@ -188,6 +196,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
tb[TCA_RED_STAB] == NULL)
return -EINVAL;
+ max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+
ctl = nla_data(tb[TCA_RED_PARMS]);
if (ctl->limit > 0) {
@@ -205,22 +215,42 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
q->qdisc = child;
}
- red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
- ctl->Plog, ctl->Scell_log,
- nla_data(tb[TCA_RED_STAB]));
+ red_set_parms(&q->parms,
+ ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Plog, ctl->Scell_log,
+ nla_data(tb[TCA_RED_STAB]),
+ max_P);
+ red_set_vars(&q->vars);
- if (skb_queue_empty(&sch->q))
- red_end_of_idle_period(&q->parms);
+ del_timer(&q->adapt_timer);
+ if (ctl->flags & TC_RED_ADAPTATIVE)
+ mod_timer(&q->adapt_timer, jiffies + HZ/2);
+
+ if (!q->qdisc->q.qlen)
+ red_start_of_idle_period(&q->vars);
sch_tree_unlock(sch);
return 0;
}
+static inline void red_adaptative_timer(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc *)arg;
+ struct red_sched_data *q = qdisc_priv(sch);
+ spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+ spin_lock(root_lock);
+ red_adaptative_algo(&q->parms, &q->vars);
+ mod_timer(&q->adapt_timer, jiffies + HZ/2);
+ spin_unlock(root_lock);
+}
+
static int red_init(struct Qdisc *sch, struct nlattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
q->qdisc = &noop_qdisc;
+ setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
return red_change(sch, opt);
}
@@ -243,6 +273,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
+ NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
return nla_nest_end(skb, opts);
nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index e83c272c032..96e42cae4c7 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -26,6 +26,7 @@
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
+#include <net/flow_keys.h>
/*
* SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -286,6 +287,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 minqlen = ~0;
u32 r, slot, salt, sfbhash;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ struct flow_keys keys;
if (unlikely(sch->q.qlen >= q->limit)) {
sch->qstats.overlimits++;
@@ -309,13 +311,19 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* If using external classifiers, get result and record it. */
if (!sfb_classify(skb, q, &ret, &salt))
goto other_drop;
+ keys.src = salt;
+ keys.dst = 0;
+ keys.ports = 0;
} else {
- salt = skb_get_rxhash(skb);
+ skb_flow_dissect(skb, &keys);
}
slot = q->slot;
- sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ sfbhash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports,
+ q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -347,7 +355,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(p_min >= SFB_MAX_PROB)) {
/* Inelastic flow */
if (q->double_buffering) {
- sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ sfbhash = jhash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports,
+ q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4f5510e2bd6..0a7964009e8 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -17,14 +17,13 @@
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/ipv6.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
+#include <net/flow_keys.h>
/* Stochastic Fairness Queuing algorithm.
@@ -67,16 +66,18 @@
SFQ is superior for this purpose.
IMPLEMENTATION:
- This implementation limits maximal queue length to 128;
- max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
- The only goal of this restrictions was that all data
- fit into one 4K page on 32bit arches.
+ This implementation limits :
+ - maximal queue length per flow to 127 packets.
+ - max mtu to 2^18-1;
+ - max 65408 flows,
+ - number of hash buckets to 65536.
It is easy to increase these values, but not in flight. */
-#define SFQ_DEPTH 128 /* max number of packets per flow */
-#define SFQ_SLOTS 128 /* max number of flows */
-#define SFQ_EMPTY_SLOT 255
+#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
+#define SFQ_DEFAULT_FLOWS 128
+#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
+#define SFQ_EMPTY_SLOT 0xffff
#define SFQ_DEFAULT_HASH_DIVISOR 1024
/* We use 16 bits to store allot, and want to handle packets up to 64K
@@ -85,13 +86,13 @@
#define SFQ_ALLOT_SHIFT 3
#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
-/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
-typedef unsigned char sfq_index;
+/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
+typedef u16 sfq_index;
/*
* We dont use pointers to save space.
- * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
- * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
+ * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
+ * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
* are 'pointers' to dep[] array
*/
struct sfq_head {
@@ -103,28 +104,38 @@ struct sfq_slot {
struct sk_buff *skblist_next;
struct sk_buff *skblist_prev;
sfq_index qlen; /* number of skbs in skblist */
- sfq_index next; /* next slot in sfq chain */
+ sfq_index next; /* next slot in sfq RR chain */
struct sfq_head dep; /* anchor in dep[] chains */
unsigned short hash; /* hash value (index in ht[]) */
short allot; /* credit for this slot */
};
struct sfq_sched_data {
-/* Parameters */
- int perturb_period;
- unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
- int limit;
+/* frequently used fields */
+ int limit; /* limit of total number of packets in this qdisc */
unsigned int divisor; /* number of slots in hash table */
-/* Variables */
- struct tcf_proto *filter_list;
- struct timer_list perturb_timer;
+ unsigned int maxflows; /* number of flows in flows array */
+ int headdrop;
+ int maxdepth; /* limit of packets per flow */
+
u32 perturbation;
+ struct tcf_proto *filter_list;
sfq_index cur_depth; /* depth of longest slot */
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct sfq_slot *tail; /* current slot in round */
- sfq_index *ht; /* Hash table (divisor slots) */
- struct sfq_slot slots[SFQ_SLOTS];
- struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
+ sfq_index *ht; /* Hash table ('divisor' slots) */
+ struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
+
+ struct sfq_head dep[SFQ_MAX_DEPTH + 1];
+ /* Linked lists of slots, indexed by depth
+ * dep[0] : list of unused flows
+ * dep[1] : list of flows with 1 packet
+ * dep[X] : list of flows with X packets
+ */
+
+ int perturb_period;
+ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
+ struct timer_list perturb_timer;
};
/*
@@ -132,66 +143,36 @@ struct sfq_sched_data {
*/
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
- if (val < SFQ_SLOTS)
+ if (val < SFQ_MAX_FLOWS)
return &q->slots[val].dep;
- return &q->dep[val - SFQ_SLOTS];
+ return &q->dep[val - SFQ_MAX_FLOWS];
}
-static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+/*
+ * In order to be able to quickly rehash our queue when timer changes
+ * q->perturbation, we store flow_keys in skb->cb[]
+ */
+struct sfq_skb_cb {
+ struct flow_keys keys;
+};
+
+static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
{
- return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
+ BUILD_BUG_ON(sizeof(skb->cb) <
+ sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
+ return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
}
-static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(const struct sfq_sched_data *q,
+ const struct sk_buff *skb)
{
- u32 h, h2;
-
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- {
- const struct iphdr *iph;
- int poff;
-
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- goto err;
- iph = ip_hdr(skb);
- h = (__force u32)iph->daddr;
- h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (ip_is_fragment(iph))
- break;
- poff = proto_ports_offset(iph->protocol);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
- iph = ip_hdr(skb);
- h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
- }
- break;
- }
- case htons(ETH_P_IPV6):
- {
- const struct ipv6hdr *iph;
- int poff;
-
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- goto err;
- iph = ipv6_hdr(skb);
- h = (__force u32)iph->daddr.s6_addr32[3];
- h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
- poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
- iph = ipv6_hdr(skb);
- h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
- }
- break;
- }
- default:
-err:
- h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
- h2 = (unsigned long)skb->sk;
- }
+ const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
+ unsigned int hash;
- return sfq_fold_hash(q, h, h2);
+ hash = jhash_3words((__force u32)keys->dst,
+ (__force u32)keys->src ^ keys->ip_proto,
+ (__force u32)keys->ports, q->perturbation);
+ return hash & (q->divisor - 1);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -206,8 +187,10 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
TC_H_MIN(skb->priority) <= q->divisor)
return TC_H_MIN(skb->priority);
- if (!q->filter_list)
+ if (!q->filter_list) {
+ skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
return sfq_hash(q, skb) + 1;
+ }
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, q->filter_list, &res);
@@ -228,18 +211,19 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
}
/*
- * x : slot number [0 .. SFQ_SLOTS - 1]
+ * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
*/
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
sfq_index p, n;
- int qlen = q->slots[x].qlen;
+ struct sfq_slot *slot = &q->slots[x];
+ int qlen = slot->qlen;
- p = qlen + SFQ_SLOTS;
+ p = qlen + SFQ_MAX_FLOWS;
n = q->dep[qlen].next;
- q->slots[x].dep.next = n;
- q->slots[x].dep.prev = p;
+ slot->dep.next = n;
+ slot->dep.prev = p;
q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
sfq_dep_head(q, n)->prev = x;
@@ -304,6 +288,7 @@ static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
static inline void slot_queue_init(struct sfq_slot *slot)
{
+ memset(slot, 0, sizeof(*slot));
slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}
@@ -334,7 +319,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
x = q->dep[d].next;
slot = &q->slots[x];
drop:
- skb = slot_dequeue_tail(slot);
+ skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
len = qdisc_pkt_len(skb);
sfq_dec(q, x);
kfree_skb(skb);
@@ -378,16 +363,27 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot = &q->slots[x];
if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */
+ if (x >= SFQ_MAX_FLOWS)
+ return qdisc_drop(skb, sch);
q->ht[hash] = x;
slot = &q->slots[x];
slot->hash = hash;
}
- /* If selected queue has length q->limit, do simple tail drop,
- * i.e. drop _this_ packet.
- */
- if (slot->qlen >= q->limit)
- return qdisc_drop(skb, sch);
+ if (slot->qlen >= q->maxdepth) {
+ struct sk_buff *head;
+
+ if (!q->headdrop)
+ return qdisc_drop(skb, sch);
+
+ head = slot_dequeue_head(slot);
+ sch->qstats.backlog -= qdisc_pkt_len(head);
+ qdisc_drop(head, sch);
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ slot_queue_add(slot, skb);
+ return NET_XMIT_CN;
+ }
sch->qstats.backlog += qdisc_pkt_len(skb);
slot_queue_add(slot, skb);
@@ -395,11 +391,11 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (slot->qlen == 1) { /* The flow is new */
if (q->tail == NULL) { /* It is the first flow */
slot->next = x;
+ q->tail = slot;
} else {
slot->next = q->tail->next;
q->tail->next = x;
}
- q->tail = slot;
slot->allot = q->scaled_quantum;
}
if (++sch->q.qlen <= q->limit)
@@ -468,12 +464,83 @@ sfq_reset(struct Qdisc *sch)
kfree_skb(skb);
}
+/*
+ * When q->perturbation is changed, we rehash all queued skbs
+ * to avoid OOO (Out Of Order) effects.
+ * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
+ * counters.
+ */
+static void sfq_rehash(struct Qdisc *sch)
+{
+ struct sfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ int i;
+ struct sfq_slot *slot;
+ struct sk_buff_head list;
+ int dropped = 0;
+
+ __skb_queue_head_init(&list);
+
+ for (i = 0; i < q->maxflows; i++) {
+ slot = &q->slots[i];
+ if (!slot->qlen)
+ continue;
+ while (slot->qlen) {
+ skb = slot_dequeue_head(slot);
+ sfq_dec(q, i);
+ __skb_queue_tail(&list, skb);
+ }
+ q->ht[slot->hash] = SFQ_EMPTY_SLOT;
+ }
+ q->tail = NULL;
+
+ while ((skb = __skb_dequeue(&list)) != NULL) {
+ unsigned int hash = sfq_hash(q, skb);
+ sfq_index x = q->ht[hash];
+
+ slot = &q->slots[x];
+ if (x == SFQ_EMPTY_SLOT) {
+ x = q->dep[0].next; /* get a free slot */
+ if (x >= SFQ_MAX_FLOWS) {
+drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
+ kfree_skb(skb);
+ dropped++;
+ continue;
+ }
+ q->ht[hash] = x;
+ slot = &q->slots[x];
+ slot->hash = hash;
+ }
+ if (slot->qlen >= q->maxdepth)
+ goto drop;
+ slot_queue_add(slot, skb);
+ sfq_inc(q, x);
+ if (slot->qlen == 1) { /* The flow is new */
+ if (q->tail == NULL) { /* It is the first flow */
+ slot->next = x;
+ } else {
+ slot->next = q->tail->next;
+ q->tail->next = x;
+ }
+ q->tail = slot;
+ slot->allot = q->scaled_quantum;
+ }
+ }
+ sch->q.qlen -= dropped;
+ qdisc_tree_decrease_qlen(sch, dropped);
+}
+
static void sfq_perturbation(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc *)arg;
struct sfq_sched_data *q = qdisc_priv(sch);
+ spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spin_lock(root_lock);
q->perturbation = net_random();
+ if (!q->filter_list && q->tail)
+ sfq_rehash(sch);
+ spin_unlock(root_lock);
if (q->perturb_period)
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
@@ -483,23 +550,39 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
+ struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
unsigned int qlen;
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
-
+ if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
+ ctl_v1 = nla_data(opt);
if (ctl->divisor &&
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
sch_tree_lock(sch);
- q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+ if (ctl->quantum) {
+ q->quantum = ctl->quantum;
+ q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+ }
q->perturb_period = ctl->perturb_period * HZ;
- if (ctl->limit)
- q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
- if (ctl->divisor)
+ if (ctl->flows)
+ q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ if (ctl->divisor) {
q->divisor = ctl->divisor;
+ q->maxflows = min_t(u32, q->maxflows, q->divisor);
+ }
+ if (ctl_v1) {
+ if (ctl_v1->depth)
+ q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+ q->headdrop = ctl_v1->headdrop;
+ }
+ if (ctl->limit) {
+ q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
+ q->maxflows = min_t(u32, q->maxflows, q->limit);
+ }
+
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
sfq_drop(sch);
@@ -514,46 +597,77 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
+static void *sfq_alloc(size_t sz)
+{
+ void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+ if (!ptr)
+ ptr = vmalloc(sz);
+ return ptr;
+}
+
+static void sfq_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static void sfq_destroy(struct Qdisc *sch)
+{
+ struct sfq_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ q->perturb_period = 0;
+ del_timer_sync(&q->perturb_timer);
+ sfq_free(q->ht);
+ sfq_free(q->slots);
+}
+
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- size_t sz;
int i;
q->perturb_timer.function = sfq_perturbation;
q->perturb_timer.data = (unsigned long)sch;
init_timer_deferrable(&q->perturb_timer);
- for (i = 0; i < SFQ_DEPTH; i++) {
- q->dep[i].next = i + SFQ_SLOTS;
- q->dep[i].prev = i + SFQ_SLOTS;
+ for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
+ q->dep[i].next = i + SFQ_MAX_FLOWS;
+ q->dep[i].prev = i + SFQ_MAX_FLOWS;
}
- q->limit = SFQ_DEPTH - 1;
+ q->limit = SFQ_MAX_DEPTH;
+ q->maxdepth = SFQ_MAX_DEPTH;
q->cur_depth = 0;
q->tail = NULL;
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
- if (opt == NULL) {
- q->quantum = psched_mtu(qdisc_dev(sch));
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
- q->perturb_period = 0;
- q->perturbation = net_random();
- } else {
+ q->maxflows = SFQ_DEFAULT_FLOWS;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+ q->perturb_period = 0;
+ q->perturbation = net_random();
+
+ if (opt) {
int err = sfq_change(sch, opt);
if (err)
return err;
}
- sz = sizeof(q->ht[0]) * q->divisor;
- q->ht = kmalloc(sz, GFP_KERNEL);
- if (!q->ht && sz > PAGE_SIZE)
- q->ht = vmalloc(sz);
- if (!q->ht)
+ q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
+ q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
+ if (!q->ht || !q->slots) {
+ sfq_destroy(sch);
return -ENOMEM;
+ }
for (i = 0; i < q->divisor; i++)
q->ht[i] = SFQ_EMPTY_SLOT;
- for (i = 0; i < SFQ_SLOTS; i++) {
+ for (i = 0; i < q->maxflows; i++) {
slot_queue_init(&q->slots[i]);
sfq_link(q, i);
}
@@ -564,31 +678,20 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static void sfq_destroy(struct Qdisc *sch)
-{
- struct sfq_sched_data *q = qdisc_priv(sch);
-
- tcf_destroy_chain(&q->filter_list);
- q->perturb_period = 0;
- del_timer_sync(&q->perturb_timer);
- if (is_vmalloc_addr(q->ht))
- vfree(q->ht);
- else
- kfree(q->ht);
-}
-
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
- struct tc_sfq_qopt opt;
-
- opt.quantum = q->quantum;
- opt.perturb_period = q->perturb_period / HZ;
-
- opt.limit = q->limit;
- opt.divisor = q->divisor;
- opt.flows = q->limit;
+ struct tc_sfq_qopt_v1 opt;
+
+ memset(&opt, 0, sizeof(opt));
+ opt.v0.quantum = q->quantum;
+ opt.v0.perturb_period = q->perturb_period / HZ;
+ opt.v0.limit = q->limit;
+ opt.v0.divisor = q->divisor;
+ opt.v0.flows = q->maxflows;
+ opt.depth = q->maxdepth;
+ opt.headdrop = q->headdrop;
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1dcfb5223a8..b8e156319d7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -346,6 +346,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *nest;
struct tc_tbf_qopt opt;
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a3b7120fcc7..45326599fda 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+ struct net_device *dev, struct netdev_queue *txq,
+ struct neighbour *mn)
{
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
- struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
- struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+ struct teql_sched_data *q = qdisc_priv(txq->qdisc);
struct neighbour *n = q->ncache;
if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
}
static inline int teql_resolve(struct sk_buff *skb,
- struct sk_buff *skb_res, struct net_device *dev)
+ struct sk_buff *skb_res,
+ struct net_device *dev,
+ struct netdev_queue *txq)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *mn;
+ int res;
+
if (txq->qdisc == &noop_qdisc)
return -ENODEV;
- if (dev->header_ops == NULL ||
- skb_dst(skb) == NULL ||
- dst_get_neighbour(skb_dst(skb)) == NULL)
+ if (!dev->header_ops || !dst)
return 0;
- return __teql_resolve(skb, skb_res, dev);
+
+ rcu_read_lock();
+ mn = dst_get_neighbour_noref(dst);
+ res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ rcu_read_unlock();
+
+ return res;
}
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -301,18 +310,18 @@ restart:
if (slave_txq->qdisc_sleeping != q)
continue;
- if (__netif_subqueue_stopped(slave, subq) ||
+ if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
!netif_running(slave)) {
busy = 1;
continue;
}
- switch (teql_resolve(skb, skb_res, slave)) {
+ switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
case 0:
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
- if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
+ if (!netif_xmit_frozen_or_stopped(slave_txq) &&
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
@@ -324,7 +333,7 @@ restart:
}
__netif_tx_unlock(slave_txq);
}
- if (netif_queue_stopped(dev))
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
busy = 1;
break;
case 1:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 152b5b3c3ff..acd2edbc073 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- (unsigned long)sp->autoclose * HZ;
+ min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 865e68fef21..bf812048cf6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index c8cc24e282c..68a385d7c3b 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -415,7 +415,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
sctp_subtype_t subtype;
sctp_state_t state;
int error = 0;
- int first_time = 1; /* is this the first time through the looop */
+ int first_time = 1; /* is this the first time through the loop */
if (ep->base.dead)
return;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b7692aab6e9..80f71af7138 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -105,7 +105,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
struct sctp_input_cb {
union {
struct inet_skb_parm h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct inet6_skb_parm h6;
#endif
} header;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 810427833bc..91f479121c5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
+ addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
@@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
*/
- ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+ fl6.daddr = transport->ipaddr.v6.sin6_addr;
+ fl6.saddr = transport->saddr.v6.sin6_addr;
fl6.flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6.flowlabel);
@@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- ipv6_addr_copy(&fl6.daddr, rt0->addr);
+ fl6.daddr = *rt0->addr;
}
SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
@@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
sctp_scope_t scope;
memset(fl6, 0, sizeof(struct flowi6));
- ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
+ fl6->daddr = daddr->v6.sin6_addr;
fl6->fl6_dport = daddr->v6.sin6_port;
fl6->flowi6_proto = IPPROTO_SCTP;
if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl6->fl6_sport = htons(asoc->base.bind_addr.port);
if (saddr) {
- ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
+ fl6->saddr = saddr->v6.sin6_addr;
fl6->fl6_sport = saddr->v6.sin6_port;
SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
}
@@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
rcu_read_unlock();
if (baddr) {
- ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+ fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
}
@@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
if (t->dst) {
saddr->v6.sin6_family = AF_INET6;
- ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
+ saddr->v6.sin6_addr = fl6->saddr;
}
}
@@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
+ addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -416,7 +416,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
int is_saddr)
{
- void *from;
__be16 *port;
struct sctphdr *sh;
@@ -428,12 +427,11 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
sh = sctp_hdr(skb);
if (is_saddr) {
*port = sh->source;
- from = &ipv6_hdr(skb)->saddr;
+ addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
} else {
*port = sh->dest;
- from = &ipv6_hdr(skb)->daddr;
+ addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
}
- ipv6_addr_copy(&addr->v6.sin6_addr, from);
}
/* Initialize an sctp_addr from a socket. */
@@ -441,7 +439,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
- ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
+ addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -454,7 +452,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
addr->v4.sin_addr.s_addr;
} else {
- ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
+ inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
}
}
@@ -467,7 +465,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
} else {
- ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
+ inet6_sk(sk)->daddr = addr->v6.sin6_addr;
}
}
@@ -479,7 +477,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr,
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0; /* BUG */
- ipv6_addr_copy(&addr->v6.sin6_addr, &param->v6.addr);
+ addr->v6.sin6_addr = param->v6.addr;
addr->v6.sin6_scope_id = iif;
}
@@ -493,7 +491,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
param->v6.param_hdr.length = htons(length);
- ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
+ param->v6.addr = addr->v6.sin6_addr;
return length;
}
@@ -504,7 +502,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
- ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
+ addr->v6.sin6_addr = *saddr;
}
/* Compare addresses exactly.
@@ -759,7 +757,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
}
sin6from = &asoc->peer.primary_addr.v6;
- ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr);
+ sin6->sin6_addr = sin6from->sin6_addr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6->sin6_scope_id = sin6from->sin6_scope_id;
}
@@ -787,7 +785,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
}
/* Otherwise, just copy the v6 address. */
- ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
struct sctp_ulpevent *ev = sctp_skb2event(skb);
sin6->sin6_scope_id = ev->iif;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 08b3cead650..817174eb5f4 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
- /* Update our view of the receiver's rwnd. Include sk_buff overhead
- * while updating peer.rwnd so that it reduces the chances of a
- * receiver running out of receive buffer space even when receive
- * window is still open. This can happen when a sender is sending
- * sending small messages.
- */
- datasize += sizeof(struct sk_buff);
+ /* Update our view of the receiver's rwnd. */
if (datasize < rwnd)
rwnd -= datasize;
else
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 14c2b06028f..cfeb1d4a1ee 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
}
continue;
}
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* (Section 7.2.4)), add the data size of those
* chunks to the rwnd.
*/
- q->asoc->peer.rwnd += (sctp_data_size(chunk) +
- sizeof(struct sk_buff));
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
if (chunk->transport)
transport->flight_size -= sctp_data_size(chunk);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 61b9fca5a17..5942d27b144 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -637,7 +637,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
" for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
addrw);
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/* Now we send an ASCONF for each association */
/* Note. we currently don't handle link local IPv6 addressees */
if (addrw->a.sa.sa_family == AF_INET6) {
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+ /* Initialize maximum autoclose timeout. */
+ sctp_max_autoclose = INT_MAX / HZ;
+
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 0121e0ab035..a85eeeb55dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3400,8 +3400,10 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
asconf_len -= length;
}
- if (no_err && asoc->src_out_of_asoc_ok)
+ if (no_err && asoc->src_out_of_asoc_ok) {
asoc->src_out_of_asoc_ok = 0;
+ sctp_transport_immediate_rtx(asoc->peer.primary_path);
+ }
/* Free the cached last sent asconf chunk. */
list_del_init(&asconf->transmitted_list);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 76388b083f2..1ff51c9d18d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -666,6 +666,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
struct sctp_chunk *chunk)
{
sctp_sender_hb_info_t *hbinfo;
+ int was_unconfirmed = 0;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
@@ -692,9 +693,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Mark the destination transport address as active if it is not so
* marked.
*/
- if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+ was_unconfirmed = 1;
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
SCTP_HEARTBEAT_SUCCESS);
+ }
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
@@ -712,6 +715,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
sctp_transport_hold(t);
+
+ if (was_unconfirmed && asoc->peer.transport_count == 1)
+ sctp_transport_immediate_rtx(t);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13bf5fcdbff..408ebd0e733 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addrs;
- ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+ asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
}
SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
" at %p\n", asoc, asoc->asconf_addr_del_pending,
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
@@ -6841,7 +6839,7 @@ struct proto sctp_prot = {
.sockets_allocated = &sctp_sockets_allocated,
};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct proto sctpv6_prot = {
.name = "SCTPv6",
@@ -6872,4 +6870,4 @@ struct proto sctpv6_prot = {
.memory_allocated = &sctp_memory_allocated,
.sockets_allocated = &sctp_sockets_allocated,
};
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b3952961b8..60ffbd067ff 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
.extra1 = &one,
.extra2 = &rwnd_scale_max,
},
+ {
+ .procname = "max_autoclose",
+ .data = &sctp_max_autoclose,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .extra1 = &max_autoclose_min,
+ .extra2 = &max_autoclose_max,
+ },
{ /* sentinel */ }
};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 394c57ca2f5..3889330b7b0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -641,3 +641,19 @@ void sctp_transport_reset(struct sctp_transport *t)
t->cacc.next_tsn_at_change = 0;
t->cacc.cacc_saw_newack = 0;
}
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+ /* Stop pending T3_rtx_timer */
+ if (timer_pending(&t->T3_rtx_timer)) {
+ (void)del_timer(&t->T3_rtx_timer);
+ sctp_transport_put(t);
+ }
+ sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+ if (!timer_pending(&t->T3_rtx_timer)) {
+ if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+ sctp_transport_hold(t);
+ }
+ return;
+}
diff --git a/net/socket.c b/net/socket.c
index 2877647f347..e56162cd65b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -538,6 +538,8 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
*tx_flags |= SKBTX_HW_TSTAMP;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
*tx_flags |= SKBTX_SW_TSTAMP;
+ if (sock_flag(sk, SOCK_WIFI_STATUS))
+ *tx_flags |= SKBTX_WIFI_STATUS;
return 0;
}
EXPORT_SYMBOL(sock_tx_timestamp);
@@ -549,6 +551,8 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
sock_update_classid(sock->sk);
+ sock_update_netprioidx(sock->sk);
+
si->sock = sock;
si->scm = NULL;
si->msg = msg;
@@ -674,6 +678,22 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int ack;
+
+ if (!sock_flag(sk, SOCK_WIFI_STATUS))
+ return;
+ if (!skb->wifi_acked_valid)
+ return;
+
+ ack = skb->wifi_acked;
+
+ put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack);
+}
+EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
+
static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
@@ -2738,10 +2758,10 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
case ETHTOOL_GRXRINGS:
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
+ case ETHTOOL_SRXCLSRLINS:
convert_out = true;
/* fall through */
case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
buf_size += sizeof(struct ethtool_rxnfc);
convert_in = true;
break;
@@ -2883,7 +2903,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, uifr);
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
}
@@ -3210,20 +3230,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
return sock_do_ioctl(net, sock, cmd, arg);
}
- /* Prevent warning from compat_sys_ioctl, these always
- * result in -EINVAL in the native case anyway. */
- switch (cmd) {
- case SIOCRTMSG:
- case SIOCGIFCOUNT:
- case SIOCSRARP:
- case SIOCGRARP:
- case SIOCDRARP:
- case SIOCSIFLINK:
- case SIOCGIFSLAVE:
- case SIOCSIFSLAVE:
- return -EINVAL;
- }
-
return -ENOIOCTLCMD;
}
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 67a655ee82a..ee77742e0ed 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/export.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
char *buf, const int buflen)
@@ -91,7 +91,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
return len;
}
-#else /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#else /* !IS_ENABLED(CONFIG_IPV6) */
static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
char *buf, const int buflen)
@@ -105,7 +105,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
return 0;
}
-#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#endif /* !IS_ENABLED(CONFIG_IPV6) */
static int rpc_ntop4(const struct sockaddr *sap,
char *buf, const size_t buflen)
@@ -155,7 +155,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
return sizeof(struct sockaddr_in);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int rpc_parse_scope_id(const char *buf, const size_t buflen,
const char *delim, struct sockaddr_in6 *sin6)
{
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index e010a015d99..1426ec3d0a5 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -41,15 +41,17 @@ EXPORT_SYMBOL_GPL(rpc_lookup_cred);
/*
* Public call interface for looking up machine creds.
*/
-struct rpc_cred *rpc_lookup_machine_cred(void)
+struct rpc_cred *rpc_lookup_machine_cred(const char *service_name)
{
struct auth_cred acred = {
.uid = RPC_MACHINE_CRED_USERID,
.gid = RPC_MACHINE_CRED_GROUPID,
+ .principal = service_name,
.machine_cred = 1,
};
- dprintk("RPC: looking up machine cred\n");
+ dprintk("RPC: looking up machine cred for service %s\n",
+ service_name);
return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
}
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index afb56553dfe..28d72d29873 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -392,7 +392,8 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
}
static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
- struct rpc_clnt *clnt, int machine_cred)
+ struct rpc_clnt *clnt,
+ const char *service_name)
{
struct gss_api_mech *mech = gss_msg->auth->mech;
char *p = gss_msg->databuf;
@@ -407,12 +408,8 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
p += len;
gss_msg->msg.len += len;
}
- if (machine_cred) {
- len = sprintf(p, "service=* ");
- p += len;
- gss_msg->msg.len += len;
- } else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
- len = sprintf(p, "service=nfs ");
+ if (service_name != NULL) {
+ len = sprintf(p, "service=%s ", service_name);
p += len;
gss_msg->msg.len += len;
}
@@ -429,17 +426,18 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
}
static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
- struct rpc_clnt *clnt, int machine_cred)
+ struct rpc_clnt *clnt,
+ const char *service_name)
{
if (pipe_version == 0)
gss_encode_v0_msg(gss_msg);
else /* pipe_version == 1 */
- gss_encode_v1_msg(gss_msg, clnt, machine_cred);
+ gss_encode_v1_msg(gss_msg, clnt, service_name);
}
-static inline struct gss_upcall_msg *
-gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
- int machine_cred)
+static struct gss_upcall_msg *
+gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
+ uid_t uid, const char *service_name)
{
struct gss_upcall_msg *gss_msg;
int vers;
@@ -459,7 +457,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
atomic_set(&gss_msg->count, 1);
gss_msg->uid = uid;
gss_msg->auth = gss_auth;
- gss_encode_msg(gss_msg, clnt, machine_cred);
+ gss_encode_msg(gss_msg, clnt, service_name);
return gss_msg;
}
@@ -471,7 +469,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
struct gss_upcall_msg *gss_new, *gss_msg;
uid_t uid = cred->cr_uid;
- gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
+ gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal);
if (IS_ERR(gss_new))
return gss_new;
gss_msg = gss_add_msg(gss_new);
@@ -995,7 +993,9 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
*/
cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
cred->gc_service = gss_auth->service;
- cred->gc_machine_cred = acred->machine_cred;
+ cred->gc_principal = NULL;
+ if (acred->machine_cred)
+ cred->gc_principal = acred->principal;
kref_get(&gss_auth->kref);
return &cred->gc_base;
@@ -1030,7 +1030,12 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
return 0;
out:
- if (acred->machine_cred != gss_cred->gc_machine_cred)
+ if (acred->principal != NULL) {
+ if (gss_cred->gc_principal == NULL)
+ return 0;
+ return strcmp(acred->principal, gss_cred->gc_principal) == 0;
+ }
+ if (gss_cred->gc_principal != NULL)
return 0;
return rc->cr_uid == acred->uid;
}
@@ -1104,7 +1109,8 @@ static int gss_renew_cred(struct rpc_task *task)
struct rpc_auth *auth = oldcred->cr_auth;
struct auth_cred acred = {
.uid = oldcred->cr_uid,
- .machine_cred = gss_cred->gc_machine_cred,
+ .principal = gss_cred->gc_principal,
+ .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
};
struct rpc_cred *new;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 72ad836e4fe..03b56bc3b65 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1778,7 +1778,7 @@ const struct file_operations cache_flush_operations_pipefs = {
};
int sunrpc_cache_register_pipefs(struct dentry *parent,
- const char *name, mode_t umode,
+ const char *name, umode_t umode,
struct cache_detail *cd)
{
struct qstr q;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index bfddd68b31d..63a7a7add21 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -185,7 +185,6 @@ static void
rpc_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
}
@@ -954,7 +953,7 @@ static void rpc_cachedir_depopulate(struct dentry *dentry)
}
struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
- mode_t umode, struct cache_detail *cd)
+ umode_t umode, struct cache_detail *cd)
{
return rpc_mkdir_populate(parent, name, umode, NULL,
rpc_cachedir_populate, cd);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d12ffa54581..3341d896278 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
@@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
- schedule();
+ freezable_schedule();
return 0;
}
@@ -590,6 +591,27 @@ void rpc_prepare_task(struct rpc_task *task)
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+ /* Initialize retry counters */
+ task->tk_garb_retry = 2;
+ task->tk_cred_retry = 2;
+ task->tk_rebind_retry = 2;
+
+ /* starting timestamp */
+ task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+ task->tk_timeouts = 0;
+ task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+ rpc_init_task_statistics(task);
+}
+
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
@@ -602,6 +624,7 @@ void rpc_exit_task(struct rpc_task *task)
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
+ rpc_reset_task_statistics(task);
}
}
}
@@ -804,11 +827,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
- /* Initialize retry counters */
- task->tk_garb_retry = 2;
- task->tk_cred_retry = 2;
- task->tk_rebind_retry = 2;
-
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
@@ -818,8 +836,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
- /* starting timestamp */
- task->tk_start = ktime_get();
+ rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6e038884ae0..9d01d46b05f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -826,7 +826,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
return error;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
/*
* Register an "inet6" protocol family netid with the local
* rpcbind daemon via an rpcbind v4 SET request.
@@ -872,7 +872,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
return error;
}
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
/*
* Register a kernel RPC service via rpcbind version 4.
@@ -893,11 +893,11 @@ static int __svc_register(const char *progname,
error = __svc_rpcb_register4(program, version,
protocol, port);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
error = __svc_rpcb_register6(program, version,
protocol, port);
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
}
if (error < 0)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 447cd0eb415..38649cfa4e8 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -179,13 +179,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = htons(port),
};
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_ANY_INIT,
.sin6_port = htons(port),
};
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
struct sockaddr *sap;
size_t len;
@@ -194,12 +194,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
sap = (struct sockaddr *)&sin;
len = sizeof(sin);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
sap = (struct sockaddr *)&sin6;
len = sizeof(sin6);
break;
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
default:
return ERR_PTR(-EAFNOSUPPORT);
}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index ce136323da8..01153ead1db 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
struct ip_map *item = container_of(citem, struct ip_map, h);
strcpy(new->m_class, item->m_class);
- ipv6_addr_copy(&new->m_addr, &item->m_addr);
+ new->m_addr = item->m_addr;
}
static void update(struct cache_head *cnew, struct cache_head *citem)
{
@@ -220,7 +220,7 @@ static int ip_map_parse(struct cache_detail *cd,
ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
&sin6.sin6_addr);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
memcpy(&sin6, &address.s6, sizeof(sin6));
break;
@@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m,
}
im = container_of(h, struct ip_map, h);
/* class addr domain */
- ipv6_addr_copy(&addr, &im->m_addr);
+ addr = im->m_addr;
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags))
@@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
struct cache_head *ch;
strcpy(ip.m_class, class);
- ipv6_addr_copy(&ip.m_addr, addr);
+ ip.m_addr = *addr;
ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
hash_ip6(*addr));
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 71bed1c1c77..4653286fcc9 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_level = SOL_IPV6;
cmh->cmsg_type = IPV6_PKTINFO;
pki->ipi6_ifindex = daddr->sin6_scope_id;
- ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
+ pki->ipi6_addr = daddr->sin6_addr;
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
@@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
return 0;
daddr->sin6_family = AF_INET6;
- ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+ daddr->sin6_addr = pki->ipi6_addr;
daddr->sin6_scope_id = pki->ipi6_ifindex;
return 1;
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 277ebd4bf09..593f4c60530 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
* Copies data into an arbitrary memory location from an array of pages
* The copy is assumed to be non-overlapping.
*/
-static void
+void
_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
{
struct page **pgfrom;
@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
} while ((len -= copy) != 0);
}
+EXPORT_SYMBOL_GPL(_copy_from_pages);
/*
* xdr_shrink_bufhead
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f4385e45a5f..c64c0ef519b 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -995,13 +995,11 @@ out_init_req:
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (xprt_dynamic_free_slot(xprt, req))
- return;
-
- memset(req, 0, sizeof(*req)); /* mark unused */
-
spin_lock(&xprt->reserve_lock);
- list_add(&req->rq_list, &xprt->free);
+ if (!xprt_dynamic_free_slot(xprt, req)) {
+ memset(req, 0, sizeof(*req)); /* mark unused */
+ list_add(&req->rq_list, &xprt->free);
+ }
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d78d95955a..55472c48825 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- int ret = 0;
+ int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
task->tk_pid, req->rq_slen - req->rq_bytes_sent,
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
- ret = -EAGAIN;
/*
* Notify TCP that we're limited by the application
* window size
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 28908f54459..8eb87b11d10 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -46,7 +46,7 @@
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
/**
- * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
* @secondary: pointer to secondary bearer
*
@@ -54,13 +54,13 @@
* to be paired.
*/
-struct bcbearer_pair {
+struct tipc_bcbearer_pair {
struct tipc_bearer *primary;
struct tipc_bearer *secondary;
};
/**
- * struct bcbearer - bearer used by broadcast link
+ * struct tipc_bcbearer - bearer used by broadcast link
* @bearer: (non-standard) broadcast bearer structure
* @media: (non-standard) broadcast media structure
* @bpairs: array of bearer pairs
@@ -74,38 +74,40 @@ struct bcbearer_pair {
* prevented through use of the spinlock "bc_lock".
*/
-struct bcbearer {
+struct tipc_bcbearer {
struct tipc_bearer bearer;
- struct media media;
- struct bcbearer_pair bpairs[MAX_BEARERS];
- struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+ struct tipc_media media;
+ struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+ struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
struct tipc_node_map remains;
struct tipc_node_map remains_new;
};
/**
- * struct bclink - link used for broadcast messages
+ * struct tipc_bclink - link used for broadcast messages
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
+ * @bcast_nodes: map of broadcast-capable nodes
* @retransmit_to: node that most recently requested a retransmit
*
* Handles sequence numbering, fragmentation, bundling, etc.
*/
-struct bclink {
- struct link link;
+struct tipc_bclink {
+ struct tipc_link link;
struct tipc_node node;
+ struct tipc_node_map bcast_nodes;
struct tipc_node *retransmit_to;
};
+static struct tipc_bcbearer bcast_bearer;
+static struct tipc_bclink bcast_link;
-static struct bcbearer *bcbearer;
-static struct bclink *bclink;
-static struct link *bcl;
-static DEFINE_SPINLOCK(bc_lock);
+static struct tipc_bcbearer *bcbearer = &bcast_bearer;
+static struct tipc_bclink *bclink = &bcast_link;
+static struct tipc_link *bcl = &bcast_link.link;
-/* broadcast-capable node map */
-struct tipc_node_map tipc_bcast_nmap;
+static DEFINE_SPINLOCK(bc_lock);
const char tipc_bclink_name[] = "broadcast-link";
@@ -113,11 +115,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
struct tipc_node_map *nm_b,
struct tipc_node_map *nm_diff);
-static u32 buf_seqno(struct sk_buff *buf)
-{
- return msg_seqno(buf_msg(buf));
-}
-
static u32 bcbuf_acks(struct sk_buff *buf)
{
return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -133,6 +130,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
+void tipc_bclink_add_node(u32 addr)
+{
+ spin_lock_bh(&bc_lock);
+ tipc_nmap_add(&bclink->bcast_nodes, addr);
+ spin_unlock_bh(&bc_lock);
+}
+
+void tipc_bclink_remove_node(u32 addr)
+{
+ spin_lock_bh(&bc_lock);
+ tipc_nmap_remove(&bclink->bcast_nodes, addr);
+ spin_unlock_bh(&bc_lock);
+}
static void bclink_set_last_sent(void)
{
@@ -222,14 +232,36 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
struct sk_buff *next;
unsigned int released = 0;
- if (less_eq(acked, n_ptr->bclink.acked))
- return;
-
spin_lock_bh(&bc_lock);
- /* Skip over packets that node has previously acknowledged */
-
+ /* Bail out if tx queue is empty (no clean up is required) */
crs = bcl->first_out;
+ if (!crs)
+ goto exit;
+
+ /* Determine which messages need to be acknowledged */
+ if (acked == INVALID_LINK_SEQ) {
+ /*
+ * Contact with specified node has been lost, so need to
+ * acknowledge sent messages only (if other nodes still exist)
+ * or both sent and unsent messages (otherwise)
+ */
+ if (bclink->bcast_nodes.count)
+ acked = bcl->fsm_msg_cnt;
+ else
+ acked = bcl->next_out_no;
+ } else {
+ /*
+ * Bail out if specified sequence number does not correspond
+ * to a message that has been sent and not yet acknowledged
+ */
+ if (less(acked, buf_seqno(crs)) ||
+ less(bcl->fsm_msg_cnt, acked) ||
+ less_eq(acked, n_ptr->bclink.acked))
+ goto exit;
+ }
+
+ /* Skip over packets that node has previously acknowledged */
while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
crs = crs->next;
@@ -237,7 +269,15 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
while (crs && less_eq(buf_seqno(crs), acked)) {
next = crs->next;
- bcbuf_decr_acks(crs);
+
+ if (crs != bcl->next_out)
+ bcbuf_decr_acks(crs);
+ else {
+ bcbuf_set_acks(crs, 0);
+ bcl->next_out = next;
+ bclink_set_last_sent();
+ }
+
if (bcbuf_acks(crs) == 0) {
bcl->first_out = next;
bcl->out_queue_size--;
@@ -256,6 +296,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
}
if (unlikely(released && !list_empty(&bcl->waiting_ports)))
tipc_link_wakeup_ports(bcl, 0);
+exit:
spin_unlock_bh(&bc_lock);
}
@@ -267,7 +308,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
static void bclink_send_ack(struct tipc_node *n_ptr)
{
- struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+ struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
if (l_ptr != NULL)
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
@@ -402,13 +443,19 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
spin_lock_bh(&bc_lock);
+ if (!bclink->bcast_nodes.count) {
+ res = msg_data_sz(buf_msg(buf));
+ buf_discard(buf);
+ goto exit;
+ }
+
res = tipc_link_send_buf(bcl, buf);
- if (likely(res > 0))
+ if (likely(res >= 0)) {
bclink_set_last_sent();
-
- bcl->stats.queue_sz_counts++;
- bcl->stats.accu_queue_sz += bcl->out_queue_size;
-
+ bcl->stats.queue_sz_counts++;
+ bcl->stats.accu_queue_sz += bcl->out_queue_size;
+ }
+exit:
spin_unlock_bh(&bc_lock);
return res;
}
@@ -572,13 +619,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
- bcbuf_set_acks(buf, tipc_bcast_nmap.count);
+ bcbuf_set_acks(buf, bclink->bcast_nodes.count);
msg = buf_msg(buf);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
bcl->stats.sent_info++;
- if (WARN_ON(!tipc_bcast_nmap.count)) {
+ if (WARN_ON(!bclink->bcast_nodes.count)) {
dump_stack();
return 0;
}
@@ -586,7 +633,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
/* Send buffer over bearers until all targets reached */
- bcbearer->remains = tipc_bcast_nmap;
+ bcbearer->remains = bclink->bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -630,8 +677,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
void tipc_bcbearer_sort(void)
{
- struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
- struct bcbearer_pair *bp_curr;
+ struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+ struct tipc_bcbearer_pair *bp_curr;
int b_index;
int pri;
@@ -752,25 +799,13 @@ int tipc_bclink_set_queue_limits(u32 limit)
return 0;
}
-int tipc_bclink_init(void)
+void tipc_bclink_init(void)
{
- bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
- bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
- if (!bcbearer || !bclink) {
- warn("Broadcast link creation failed, no memory\n");
- kfree(bcbearer);
- bcbearer = NULL;
- kfree(bclink);
- bclink = NULL;
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
bcbearer->bearer.media = &bcbearer->media;
bcbearer->media.send_msg = tipc_bcbearer_send;
sprintf(bcbearer->media.name, "tipc-broadcast");
- bcl = &bclink->link;
INIT_LIST_HEAD(&bcl->waiting_ports);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
@@ -780,22 +815,16 @@ int tipc_bclink_init(void)
bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-
- return 0;
}
void tipc_bclink_stop(void)
{
spin_lock_bh(&bc_lock);
- if (bcbearer) {
- tipc_link_stop(bcl);
- bcl = NULL;
- kfree(bclink);
- bclink = NULL;
- kfree(bcbearer);
- bcbearer = NULL;
- }
+ tipc_link_stop(bcl);
spin_unlock_bh(&bc_lock);
+
+ memset(bclink, 0, sizeof(*bclink));
+ memset(bcbearer, 0, sizeof(*bcbearer));
}
@@ -864,9 +893,9 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
* tipc_port_list_add - add a port to a port list, ensuring no duplicates
*/
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
{
- struct port_list *item = pl_ptr;
+ struct tipc_port_list *item = pl_ptr;
int i;
int item_sz = PLSIZE;
int cnt = pl_ptr->count;
@@ -898,10 +927,10 @@ void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
*
*/
-void tipc_port_list_free(struct port_list *pl_ptr)
+void tipc_port_list_free(struct tipc_port_list *pl_ptr)
{
- struct port_list *item;
- struct port_list *next;
+ struct tipc_port_list *item;
+ struct tipc_port_list *next;
for (item = pl_ptr->next; item; item = next) {
next = item->next;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 06740da5ae6..b009666c60b 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -51,20 +51,18 @@ struct tipc_node_map {
u32 map[MAX_NODES / WSIZE];
};
-extern struct tipc_node_map tipc_bcast_nmap;
-
#define PLSIZE 32
/**
- * struct port_list - set of node local destination ports
+ * struct tipc_port_list - set of node local destination ports
* @count: # of ports in set (only valid for first entry in list)
* @next: pointer to next entry in list
* @ports: array of port references
*/
-struct port_list {
+struct tipc_port_list {
int count;
- struct port_list *next;
+ struct tipc_port_list *next;
u32 ports[PLSIZE];
};
@@ -85,11 +83,13 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}
-void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
-void tipc_port_list_free(struct port_list *pl_ptr);
+void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
+void tipc_port_list_free(struct tipc_port_list *pl_ptr);
-int tipc_bclink_init(void);
+void tipc_bclink_init(void);
void tipc_bclink_stop(void);
+void tipc_bclink_add_node(u32 addr);
+void tipc_bclink_remove_node(u32 addr);
struct tipc_node *tipc_bclink_retransmit_to(void);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
int tipc_bclink_send_msg(struct sk_buff *buf);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e2202de3d93..329fb659fae 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -41,7 +41,7 @@
#define MAX_ADDR_STR 32
-static struct media media_list[MAX_MEDIA];
+static struct tipc_media *media_list[MAX_MEDIA];
static u32 media_count;
struct tipc_bearer tipc_bearers[MAX_BEARERS];
@@ -65,17 +65,31 @@ static int media_name_valid(const char *name)
}
/**
- * media_find - locates specified media object by name
+ * tipc_media_find - locates specified media object by name
*/
-static struct media *media_find(const char *name)
+struct tipc_media *tipc_media_find(const char *name)
{
- struct media *m_ptr;
u32 i;
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- if (!strcmp(m_ptr->name, name))
- return m_ptr;
+ for (i = 0; i < media_count; i++) {
+ if (!strcmp(media_list[i]->name, name))
+ return media_list[i];
+ }
+ return NULL;
+}
+
+/**
+ * media_find_id - locates specified media object by type identifier
+ */
+
+static struct tipc_media *media_find_id(u8 type)
+{
+ u32 i;
+
+ for (i = 0; i < media_count; i++) {
+ if (media_list[i]->type_id == type)
+ return media_list[i];
}
return NULL;
}
@@ -86,87 +100,34 @@ static struct media *media_find(const char *name)
* Bearers for this media type must be activated separately at a later stage.
*/
-int tipc_register_media(u32 media_type,
- char *name,
- int (*enable)(struct tipc_bearer *),
- void (*disable)(struct tipc_bearer *),
- int (*send_msg)(struct sk_buff *,
- struct tipc_bearer *,
- struct tipc_media_addr *),
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size),
- struct tipc_media_addr *bcast_addr,
- const u32 bearer_priority,
- const u32 link_tolerance, /* [ms] */
- const u32 send_window_limit)
+int tipc_register_media(struct tipc_media *m_ptr)
{
- struct media *m_ptr;
- u32 media_id;
- u32 i;
int res = -EINVAL;
write_lock_bh(&tipc_net_lock);
- if (tipc_mode != TIPC_NET_MODE) {
- warn("Media <%s> rejected, not in networked mode yet\n", name);
+ if (!media_name_valid(m_ptr->name))
goto exit;
- }
- if (!media_name_valid(name)) {
- warn("Media <%s> rejected, illegal name\n", name);
+ if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
+ !m_ptr->bcast_addr.broadcast)
goto exit;
- }
- if (!bcast_addr) {
- warn("Media <%s> rejected, no broadcast address\n", name);
+ if (m_ptr->priority > TIPC_MAX_LINK_PRI)
goto exit;
- }
- if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
- (bearer_priority > TIPC_MAX_LINK_PRI)) {
- warn("Media <%s> rejected, illegal priority (%u)\n", name,
- bearer_priority);
+ if ((m_ptr->tolerance < TIPC_MIN_LINK_TOL) ||
+ (m_ptr->tolerance > TIPC_MAX_LINK_TOL))
goto exit;
- }
- if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
- (link_tolerance > TIPC_MAX_LINK_TOL)) {
- warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
- link_tolerance);
+ if (media_count >= MAX_MEDIA)
goto exit;
- }
-
- media_id = media_count++;
- if (media_id >= MAX_MEDIA) {
- warn("Media <%s> rejected, media limit reached (%u)\n", name,
- MAX_MEDIA);
- media_count--;
+ if (tipc_media_find(m_ptr->name) || media_find_id(m_ptr->type_id))
goto exit;
- }
- for (i = 0; i < media_id; i++) {
- if (media_list[i].type_id == media_type) {
- warn("Media <%s> rejected, duplicate type (%u)\n", name,
- media_type);
- media_count--;
- goto exit;
- }
- if (!strcmp(name, media_list[i].name)) {
- warn("Media <%s> rejected, duplicate name\n", name);
- media_count--;
- goto exit;
- }
- }
- m_ptr = &media_list[media_id];
- m_ptr->type_id = media_type;
- m_ptr->send_msg = send_msg;
- m_ptr->enable_bearer = enable;
- m_ptr->disable_bearer = disable;
- m_ptr->addr2str = addr2str;
- memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
- strcpy(m_ptr->name, name);
- m_ptr->priority = bearer_priority;
- m_ptr->tolerance = link_tolerance;
- m_ptr->window = send_window_limit;
+ media_list[media_count] = m_ptr;
+ media_count++;
res = 0;
exit:
write_unlock_bh(&tipc_net_lock);
+ if (res)
+ warn("Media <%s> registration error\n", m_ptr->name);
return res;
}
@@ -176,27 +137,19 @@ exit:
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
{
- struct media *m_ptr;
- u32 media_type;
- u32 i;
+ char addr_str[MAX_ADDR_STR];
+ struct tipc_media *m_ptr;
- media_type = ntohl(a->type);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- if (m_ptr->type_id == media_type)
- break;
- }
-
- if ((i < media_count) && (m_ptr->addr2str != NULL)) {
- char addr_str[MAX_ADDR_STR];
+ m_ptr = media_find_id(a->media_id);
- tipc_printf(pb, "%s(%s)", m_ptr->name,
- m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
- } else {
- unchar *addr = (unchar *)&a->dev_addr;
+ if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
+ tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str);
+ else {
+ u32 i;
- tipc_printf(pb, "UNKNOWN(%u)", media_type);
- for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++)
- tipc_printf(pb, "-%02x", addr[i]);
+ tipc_printf(pb, "UNKNOWN(%u)", a->media_id);
+ for (i = 0; i < sizeof(a->value); i++)
+ tipc_printf(pb, "-%02x", a->value[i]);
}
}
@@ -207,7 +160,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
struct sk_buff *tipc_media_get_names(void)
{
struct sk_buff *buf;
- struct media *m_ptr;
int i;
buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
@@ -215,9 +167,10 @@ struct sk_buff *tipc_media_get_names(void)
return NULL;
read_lock_bh(&tipc_net_lock);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
- tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
- strlen(m_ptr->name) + 1);
+ for (i = 0; i < media_count; i++) {
+ tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
+ media_list[i]->name,
+ strlen(media_list[i]->name) + 1);
}
read_unlock_bh(&tipc_net_lock);
return buf;
@@ -232,7 +185,7 @@ struct sk_buff *tipc_media_get_names(void)
*/
static int bearer_name_validate(const char *name,
- struct bearer_name *name_parts)
+ struct tipc_bearer_names *name_parts)
{
char name_copy[TIPC_MAX_BEARER_NAME];
char *media_name;
@@ -276,10 +229,10 @@ static int bearer_name_validate(const char *name,
}
/**
- * bearer_find - locates bearer object with matching bearer name
+ * tipc_bearer_find - locates bearer object with matching bearer name
*/
-static struct tipc_bearer *bearer_find(const char *name)
+struct tipc_bearer *tipc_bearer_find(const char *name)
{
struct tipc_bearer *b_ptr;
u32 i;
@@ -318,7 +271,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
- struct media *m_ptr;
struct tipc_bearer *b_ptr;
int i, j;
@@ -327,10 +279,10 @@ struct sk_buff *tipc_bearer_get_names(void)
return NULL;
read_lock_bh(&tipc_net_lock);
- for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ for (i = 0; i < media_count; i++) {
for (j = 0; j < MAX_BEARERS; j++) {
b_ptr = &tipc_bearers[j];
- if (b_ptr->active && (b_ptr->media == m_ptr)) {
+ if (b_ptr->active && (b_ptr->media == media_list[i])) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
b_ptr->name,
strlen(b_ptr->name) + 1);
@@ -366,7 +318,7 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
static int bearer_push(struct tipc_bearer *b_ptr)
{
u32 res = 0;
- struct link *ln, *tln;
+ struct tipc_link *ln, *tln;
if (b_ptr->blocked)
return 0;
@@ -412,7 +364,8 @@ void tipc_continue(struct tipc_bearer *b_ptr)
* bearer.lock is busy
*/
-static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
+ struct tipc_link *l_ptr)
{
list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
}
@@ -425,7 +378,7 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link
* bearer.lock is free
*/
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
spin_lock_bh(&b_ptr->lock);
tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
@@ -438,7 +391,8 @@ void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
* and if there is, try to resolve it before returning.
* 'tipc_net_lock' is read_locked when this function is called
*/
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+ struct tipc_link *l_ptr)
{
int res = 1;
@@ -457,7 +411,7 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr
* tipc_bearer_congested - determines if bearer is currently congested
*/
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
{
if (unlikely(b_ptr->blocked))
return 1;
@@ -473,8 +427,8 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
{
struct tipc_bearer *b_ptr;
- struct media *m_ptr;
- struct bearer_name b_name;
+ struct tipc_media *m_ptr;
+ struct tipc_bearer_names b_names;
char addr_string[16];
u32 bearer_id;
u32 with_this_prio;
@@ -486,7 +440,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
name);
return -ENOPROTOOPT;
}
- if (!bearer_name_validate(name, &b_name)) {
+ if (!bearer_name_validate(name, &b_names)) {
warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
@@ -511,10 +465,10 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
write_lock_bh(&tipc_net_lock);
- m_ptr = media_find(b_name.media_name);
+ m_ptr = tipc_media_find(b_names.media_name);
if (!m_ptr) {
warn("Bearer <%s> rejected, media <%s> not registered\n", name,
- b_name.media_name);
+ b_names.media_name);
goto exit;
}
@@ -561,6 +515,8 @@ restart:
b_ptr->identity = bearer_id;
b_ptr->media = m_ptr;
+ b_ptr->tolerance = m_ptr->tolerance;
+ b_ptr->window = m_ptr->window;
b_ptr->net_plane = bearer_id + 'A';
b_ptr->active = 1;
b_ptr->priority = priority;
@@ -590,11 +546,11 @@ exit:
int tipc_block_bearer(const char *name)
{
struct tipc_bearer *b_ptr = NULL;
- struct link *l_ptr;
- struct link *temp_l_ptr;
+ struct tipc_link *l_ptr;
+ struct tipc_link *temp_l_ptr;
read_lock_bh(&tipc_net_lock);
- b_ptr = bearer_find(name);
+ b_ptr = tipc_bearer_find(name);
if (!b_ptr) {
warn("Attempt to block unknown bearer <%s>\n", name);
read_unlock_bh(&tipc_net_lock);
@@ -625,8 +581,8 @@ int tipc_block_bearer(const char *name)
static void bearer_disable(struct tipc_bearer *b_ptr)
{
- struct link *l_ptr;
- struct link *temp_l_ptr;
+ struct tipc_link *l_ptr;
+ struct tipc_link *temp_l_ptr;
info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
@@ -648,7 +604,7 @@ int tipc_disable_bearer(const char *name)
int res;
write_lock_bh(&tipc_net_lock);
- b_ptr = bearer_find(name);
+ b_ptr = tipc_bearer_find(name);
if (b_ptr == NULL) {
warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d696f9e414e..d3eac56b8c2 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -43,32 +43,45 @@
#define MAX_MEDIA 2
/*
+ * Identifiers associated with TIPC message header media address info
+ *
+ * - address info field is 20 bytes long
+ * - media type identifier located at offset 3
+ * - remaining bytes vary according to media type
+ */
+
+#define TIPC_MEDIA_ADDR_SIZE 20
+#define TIPC_MEDIA_TYPE_OFFSET 3
+
+/*
* Identifiers of supported TIPC media types
*/
#define TIPC_MEDIA_TYPE_ETH 1
/*
- * Destination address structure used by TIPC bearers when sending messages
- *
- * IMPORTANT: The fields of this structure MUST be stored using the specified
- * byte order indicated below, as the structure is exchanged between nodes
- * as part of a link setup process.
+ * struct tipc_media_addr - destination address used by TIPC bearers
+ * @value: address info (format defined by media)
+ * @media_id: TIPC media type identifier
+ * @broadcast: non-zero if address is a broadcast address
*/
+
struct tipc_media_addr {
- __be32 type; /* bearer type (network byte order) */
- union {
- __u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
- } dev_addr;
+ u8 value[TIPC_MEDIA_ADDR_SIZE];
+ u8 media_id;
+ u8 broadcast;
};
struct tipc_bearer;
/**
- * struct media - TIPC media information available to internal users
+ * struct tipc_media - TIPC media information available to internal users
* @send_msg: routine which handles buffer transmission
* @enable_bearer: routine which enables a bearer
* @disable_bearer: routine which disables a bearer
- * @addr2str: routine which converts bearer's address to string form
+ * @addr2str: routine which converts media address to string
+ * @str2addr: routine which converts media address from string
+ * @addr2msg: routine which converts media address to protocol message area
+ * @msg2addr: routine which converts media address from protocol message area
* @bcast_addr: media address used in broadcasting
* @priority: default link (and bearer) priority
* @tolerance: default time (in ms) before declaring link failure
@@ -77,14 +90,16 @@ struct tipc_bearer;
* @name: media name
*/
-struct media {
+struct tipc_media {
int (*send_msg)(struct sk_buff *buf,
struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest);
int (*enable_bearer)(struct tipc_bearer *b_ptr);
void (*disable_bearer)(struct tipc_bearer *b_ptr);
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size);
+ int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
+ int (*str2addr)(struct tipc_media_addr *a, char *str_buf);
+ int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
+ int (*msg2addr)(struct tipc_media_addr *a, char *msg_area);
struct tipc_media_addr bcast_addr;
u32 priority;
u32 tolerance;
@@ -103,6 +118,8 @@ struct media {
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
* @priority: default link priority for bearer
+ * @window: default window size for bearer
+ * @tolerance: default link tolerance for bearer
* @identity: array index of this bearer within TIPC bearer array
* @link_req: ptr to (optional) structure making periodic link setup requests
* @links: list of non-congested links associated with bearer
@@ -122,10 +139,12 @@ struct tipc_bearer {
struct tipc_media_addr addr; /* initalized by media */
char name[TIPC_MAX_BEARER_NAME];
spinlock_t lock;
- struct media *media;
+ struct tipc_media *media;
u32 priority;
+ u32 window;
+ u32 tolerance;
u32 identity;
- struct link_req *link_req;
+ struct tipc_link_req *link_req;
struct list_head links;
struct list_head cong_links;
int active;
@@ -133,28 +152,19 @@ struct tipc_bearer {
struct tipc_node_map nodes;
};
-struct bearer_name {
+struct tipc_bearer_names {
char media_name[TIPC_MAX_MEDIA_NAME];
char if_name[TIPC_MAX_IF_NAME];
};
-struct link;
+struct tipc_link;
extern struct tipc_bearer tipc_bearers[];
/*
* TIPC routines available to supported media types
*/
-int tipc_register_media(u32 media_type,
- char *media_name, int (*enable)(struct tipc_bearer *),
- void (*disable)(struct tipc_bearer *),
- int (*send_msg)(struct sk_buff *,
- struct tipc_bearer *, struct tipc_media_addr *),
- char *(*addr2str)(struct tipc_media_addr *a,
- char *str_buf, int str_size),
- struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
- const u32 link_tolerance, /* [ms] */
- const u32 send_window_limit);
+int tipc_register_media(struct tipc_media *m_ptr);
void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
@@ -170,16 +180,21 @@ int tipc_disable_bearer(const char *name);
int tipc_eth_media_start(void);
void tipc_eth_media_stop(void);
+int tipc_media_set_priority(const char *name, u32 new_value);
+int tipc_media_set_window(const char *name, u32 new_value);
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
struct sk_buff *tipc_bearer_get_names(void);
void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
-void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr);
+void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
+struct tipc_bearer *tipc_bearer_find(const char *name);
struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
-int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr);
+struct tipc_media *tipc_media_find(const char *name);
+int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
+ struct tipc_link *l_ptr);
+int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
void tipc_bearer_stop(void);
void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index b25a396b7e1..4785bf26cdf 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -184,13 +184,12 @@ static struct sk_buff *cfg_set_own_addr(void)
" (cannot change node address once assigned)");
/*
- * Must release all spinlocks before calling start_net() because
- * Linux version of TIPC calls eth_media_start() which calls
- * register_netdevice_notifier() which may block!
- *
- * Temporarily releasing the lock should be harmless for non-Linux TIPC,
- * but Linux version of eth_media_start() should really be reworked
- * so that it can be called with spinlocks held.
+ * Must temporarily release configuration spinlock while switching into
+ * networking mode as it calls tipc_eth_media_start(), which may sleep.
+ * Releasing the lock is harmless as other locally-issued configuration
+ * commands won't occur until this one completes, and remotely-issued
+ * configuration commands can't be received until a local configuration
+ * command to enable the first bearer is received and processed.
*/
spin_unlock_bh(&config_lock);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index c21331d58fd..2691cd57b8a 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -99,8 +99,8 @@ struct sk_buff *tipc_buf_acquire(u32 size)
static void tipc_core_stop_net(void)
{
- tipc_eth_media_stop();
tipc_net_stop();
+ tipc_eth_media_stop();
}
/**
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f2fb96e86ee..a00e5f81156 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -45,7 +45,7 @@
/**
- * struct link_req - information about an ongoing link setup request
+ * struct tipc_link_req - information about an ongoing link setup request
* @bearer: bearer issuing requests
* @dest: destination address for request messages
* @domain: network domain to which links can be established
@@ -54,7 +54,7 @@
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
*/
-struct link_req {
+struct tipc_link_req {
struct tipc_bearer *bearer;
struct tipc_media_addr dest;
u32 domain;
@@ -84,7 +84,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
msg_set_non_seq(msg, 1);
msg_set_dest_domain(msg, dest_domain);
msg_set_bc_netid(msg, tipc_net_id);
- msg_set_media_addr(msg, &b_ptr->addr);
+ b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
}
return buf;
}
@@ -120,7 +120,7 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct tipc_node *n_ptr;
- struct link *link;
+ struct tipc_link *link;
struct tipc_media_addr media_addr, *addr;
struct sk_buff *rbuf;
struct tipc_msg *msg = buf_msg(buf);
@@ -130,12 +130,15 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
u32 type = msg_type(msg);
int link_fully_up;
- msg_get_media_addr(msg, &media_addr);
+ media_addr.broadcast = 1;
+ b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
buf_discard(buf);
/* Validate discovery message from requesting node */
if (net_id != tipc_net_id)
return;
+ if (media_addr.broadcast)
+ return;
if (!tipc_addr_domain_valid(dest))
return;
if (!tipc_addr_node_valid(orig))
@@ -215,7 +218,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
* and is either not currently searching or is searching at a slow rate
*/
-static void disc_update(struct link_req *req)
+static void disc_update(struct tipc_link_req *req)
{
if (!req->num_nodes) {
if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
@@ -231,7 +234,7 @@ static void disc_update(struct link_req *req)
* @req: ptr to link request structure
*/
-void tipc_disc_add_dest(struct link_req *req)
+void tipc_disc_add_dest(struct tipc_link_req *req)
{
req->num_nodes++;
}
@@ -241,7 +244,7 @@ void tipc_disc_add_dest(struct link_req *req)
* @req: ptr to link request structure
*/
-void tipc_disc_remove_dest(struct link_req *req)
+void tipc_disc_remove_dest(struct tipc_link_req *req)
{
req->num_nodes--;
disc_update(req);
@@ -252,7 +255,7 @@ void tipc_disc_remove_dest(struct link_req *req)
* @req: ptr to link request structure
*/
-static void disc_send_msg(struct link_req *req)
+static void disc_send_msg(struct tipc_link_req *req)
{
if (!req->bearer->blocked)
tipc_bearer_send(req->bearer, req->buf, &req->dest);
@@ -265,7 +268,7 @@ static void disc_send_msg(struct link_req *req)
* Called whenever a link setup request timer associated with a bearer expires.
*/
-static void disc_timeout(struct link_req *req)
+static void disc_timeout(struct tipc_link_req *req)
{
int max_delay;
@@ -313,7 +316,7 @@ exit:
int tipc_disc_create(struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest, u32 dest_domain)
{
- struct link_req *req;
+ struct tipc_link_req *req;
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
@@ -342,7 +345,7 @@ int tipc_disc_create(struct tipc_bearer *b_ptr,
* @req: ptr to link request structure
*/
-void tipc_disc_delete(struct link_req *req)
+void tipc_disc_delete(struct tipc_link_req *req)
{
k_cancel_timer(&req->timer);
k_term_timer(&req->timer);
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index a3af595b86c..75b67c403aa 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,13 +37,13 @@
#ifndef _TIPC_DISCOVER_H
#define _TIPC_DISCOVER_H
-struct link_req;
+struct tipc_link_req;
int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
u32 dest_domain);
-void tipc_disc_delete(struct link_req *req);
-void tipc_disc_add_dest(struct link_req *req);
-void tipc_disc_remove_dest(struct link_req *req);
+void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_add_dest(struct tipc_link_req *req);
+void tipc_disc_remove_dest(struct tipc_link_req *req);
void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index e728d4ce2a1..527e3f0e165 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -38,28 +38,45 @@
#include "bearer.h"
#define MAX_ETH_BEARERS MAX_BEARERS
-#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
-#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
-#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
+
+#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */
/**
* struct eth_bearer - Ethernet bearer data structure
* @bearer: ptr to associated "generic" bearer structure
* @dev: ptr to associated Ethernet network device
* @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @cleanup: work item used when disabling bearer
*/
struct eth_bearer {
struct tipc_bearer *bearer;
struct net_device *dev;
struct packet_type tipc_packet_type;
+ struct work_struct cleanup;
};
+static struct tipc_media eth_media_info;
static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
static int eth_started;
static struct notifier_block notifier;
/**
+ * eth_media_addr_set - initialize Ethernet media address structure
+ *
+ * Media-dependent "value" field stores MAC address in first 6 bytes
+ * and zeroes out the remaining bytes.
+ */
+
+static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
+{
+ memcpy(a->value, mac, ETH_ALEN);
+ memset(a->value + ETH_ALEN, 0, sizeof(a->value) - ETH_ALEN);
+ a->media_id = TIPC_MEDIA_TYPE_ETH;
+ a->broadcast = !memcmp(mac, eth_media_info.bcast_addr.value, ETH_ALEN);
+}
+
+/**
* send_msg - send a TIPC message out over an Ethernet interface
*/
@@ -85,7 +102,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
skb_reset_network_header(clone);
clone->dev = dev;
- dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
+ dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
dev->dev_addr, clone->len);
dev_queue_xmit(clone);
return 0;
@@ -172,22 +189,41 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
tb_ptr->usr_handle = (void *)eb_ptr;
tb_ptr->mtu = dev->mtu;
tb_ptr->blocked = 0;
- tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
- memcpy(&tb_ptr->addr.dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_media_addr_set(&tb_ptr->addr, (char *)dev->dev_addr);
return 0;
}
/**
+ * cleanup_bearer - break association between Ethernet bearer and interface
+ *
+ * This routine must be invoked from a work queue because it can sleep.
+ */
+
+static void cleanup_bearer(struct work_struct *work)
+{
+ struct eth_bearer *eb_ptr =
+ container_of(work, struct eth_bearer, cleanup);
+
+ dev_remove_pack(&eb_ptr->tipc_packet_type);
+ dev_put(eb_ptr->dev);
+ eb_ptr->dev = NULL;
+}
+
+/**
* disable_bearer - detach TIPC bearer from an Ethernet interface
*
- * We really should do dev_remove_pack() here, but this function can not be
- * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
- * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * then get worker thread to complete bearer cleanup. (Can't do cleanup
+ * here because cleanup code needs to sleep and caller holds spinlocks.)
*/
static void disable_bearer(struct tipc_bearer *tb_ptr)
{
- ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL;
+ struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+
+ eb_ptr->bearer = NULL;
+ INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+ schedule_work(&eb_ptr->cleanup);
}
/**
@@ -246,17 +282,81 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
* eth_addr2str - convert Ethernet address to string
*/
-static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{
+ if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+ return 1;
+
+ sprintf(str_buf, "%pM", a->value);
+ return 0;
+}
+
+/**
+ * eth_str2addr - convert string to Ethernet address
+ */
+
+static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
+{
+ char mac[ETH_ALEN];
+ int r;
+
+ r = sscanf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2],
+ (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]);
+
+ if (r != ETH_ALEN)
+ return 1;
+
+ eth_media_addr_set(a, mac);
+ return 0;
+}
+
+/**
+ * eth_str2addr - convert Ethernet address format to message header format
+ */
+
+static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
{
- unchar *addr = (unchar *)&a->dev_addr;
+ memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
+ msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+ memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN);
+ return 0;
+}
- if (str_size < 18)
- *str_buf = '\0';
- else
- sprintf(str_buf, "%pM", addr);
- return str_buf;
+/**
+ * eth_str2addr - convert message header address format to Ethernet format
+ */
+
+static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
+{
+ if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
+ return 1;
+
+ eth_media_addr_set(a, msg_area + ETH_ADDR_OFFSET);
+ return 0;
}
+/*
+ * Ethernet media registration info
+ */
+
+static struct tipc_media eth_media_info = {
+ .send_msg = send_msg,
+ .enable_bearer = enable_bearer,
+ .disable_bearer = disable_bearer,
+ .addr2str = eth_addr2str,
+ .str2addr = eth_str2addr,
+ .addr2msg = eth_addr2msg,
+ .msg2addr = eth_msg2addr,
+ .bcast_addr = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ TIPC_MEDIA_TYPE_ETH, 1 },
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .window = TIPC_DEF_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_ETH,
+ .name = "eth"
+};
+
/**
* tipc_eth_media_start - activate Ethernet bearer support
*
@@ -266,21 +366,12 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
int tipc_eth_media_start(void)
{
- struct tipc_media_addr bcast_addr;
int res;
if (eth_started)
return -EINVAL;
- bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
- memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
-
- memset(eth_bearers, 0, sizeof(eth_bearers));
-
- res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
- enable_bearer, disable_bearer, send_msg,
- eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
- ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
+ res = tipc_register_media(&eth_media_info);
if (res)
return res;
@@ -298,22 +389,10 @@ int tipc_eth_media_start(void)
void tipc_eth_media_stop(void)
{
- int i;
-
if (!eth_started)
return;
+ flush_scheduled_work();
unregister_netdevice_notifier(&notifier);
- for (i = 0; i < MAX_ETH_BEARERS ; i++) {
- if (eth_bearers[i].bearer) {
- eth_bearers[i].bearer->blocked = 1;
- eth_bearers[i].bearer = NULL;
- }
- if (eth_bearers[i].dev) {
- dev_remove_pack(&eth_bearers[i].tipc_packet_type);
- dev_put(eth_bearers[i].dev);
- }
- }
- memset(&eth_bearers, 0, sizeof(eth_bearers));
eth_started = 0;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ae98a72da11..ac1832a66f8 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -71,35 +71,36 @@
#define START_CHANGEOVER 100000u
/**
- * struct link_name - deconstructed link name
+ * struct tipc_link_name - deconstructed link name
* @addr_local: network address of node at this end
* @if_local: name of interface at this end
* @addr_peer: network address of node at far end
* @if_peer: name of interface at far end
*/
-struct link_name {
+struct tipc_link_name {
u32 addr_local;
char if_local[TIPC_MAX_IF_NAME];
u32 addr_peer;
char if_peer[TIPC_MAX_IF_NAME];
};
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
-static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int link_recv_changeover_msg(struct tipc_link **l_ptr,
+ struct sk_buff **buf);
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
static int link_send_sections_long(struct tipc_port *sender,
struct iovec const *msg_sect,
u32 num_sect, unsigned int total_len,
u32 destnode);
-static void link_check_defragm_bufs(struct link *l_ptr);
-static void link_state_event(struct link *l_ptr, u32 event);
-static void link_reset_statistics(struct link *l_ptr);
-static void link_print(struct link *l_ptr, const char *str);
-static void link_start(struct link *l_ptr);
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+static void link_check_defragm_bufs(struct tipc_link *l_ptr);
+static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static void link_reset_statistics(struct tipc_link *l_ptr);
+static void link_print(struct tipc_link *l_ptr, const char *str);
+static void link_start(struct tipc_link *l_ptr);
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
/*
* Simple link routines
@@ -110,7 +111,7 @@ static unsigned int align(unsigned int i)
return (i + 3) & ~3u;
}
-static void link_init_max_pkt(struct link *l_ptr)
+static void link_init_max_pkt(struct tipc_link *l_ptr)
{
u32 max_pkt;
@@ -127,14 +128,14 @@ static void link_init_max_pkt(struct link *l_ptr)
l_ptr->max_pkt_probes = 0;
}
-static u32 link_next_sent(struct link *l_ptr)
+static u32 link_next_sent(struct tipc_link *l_ptr)
{
if (l_ptr->next_out)
- return msg_seqno(buf_msg(l_ptr->next_out));
+ return buf_seqno(l_ptr->next_out);
return mod(l_ptr->next_out_no);
}
-static u32 link_last_sent(struct link *l_ptr)
+static u32 link_last_sent(struct tipc_link *l_ptr)
{
return mod(link_next_sent(l_ptr) - 1);
}
@@ -143,28 +144,29 @@ static u32 link_last_sent(struct link *l_ptr)
* Simple non-static link routines (i.e. referenced outside this file)
*/
-int tipc_link_is_up(struct link *l_ptr)
+int tipc_link_is_up(struct tipc_link *l_ptr)
{
if (!l_ptr)
return 0;
return link_working_working(l_ptr) || link_working_unknown(l_ptr);
}
-int tipc_link_is_active(struct link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l_ptr)
{
return (l_ptr->owner->active_links[0] == l_ptr) ||
(l_ptr->owner->active_links[1] == l_ptr);
}
/**
- * link_name_validate - validate & (optionally) deconstruct link name
+ * link_name_validate - validate & (optionally) deconstruct tipc_link name
* @name - ptr to link name string
* @name_parts - ptr to area for link name components (or NULL if not needed)
*
* Returns 1 if link name is valid, otherwise 0.
*/
-static int link_name_validate(const char *name, struct link_name *name_parts)
+static int link_name_validate(const char *name,
+ struct tipc_link_name *name_parts)
{
char name_copy[TIPC_MAX_LINK_NAME];
char *addr_local;
@@ -238,7 +240,7 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
* tipc_node_delete() is called.)
*/
-static void link_timeout(struct link *l_ptr)
+static void link_timeout(struct tipc_link *l_ptr)
{
tipc_node_lock(l_ptr->owner);
@@ -287,7 +289,7 @@ static void link_timeout(struct link *l_ptr)
tipc_node_unlock(l_ptr->owner);
}
-static void link_set_timer(struct link *l_ptr, u32 time)
+static void link_set_timer(struct tipc_link *l_ptr, u32 time)
{
k_start_timer(&l_ptr->timer, time);
}
@@ -301,11 +303,11 @@ static void link_set_timer(struct link *l_ptr, u32 time)
* Returns pointer to link.
*/
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_msg *msg;
char *if_name;
char addr_string[16];
@@ -343,7 +345,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->checkpoint = 1;
l_ptr->peer_session = INVALID_SESSION;
l_ptr->b_ptr = b_ptr;
- link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+ link_set_supervision_props(l_ptr, b_ptr->tolerance);
l_ptr->state = RESET_UNKNOWN;
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
@@ -355,7 +357,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
strcpy((char *)msg_data(msg), if_name);
l_ptr->priority = b_ptr->priority;
- tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
+ tipc_link_set_queue_limits(l_ptr, b_ptr->window);
link_init_max_pkt(l_ptr);
@@ -382,7 +384,7 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
* to avoid a potential deadlock situation.
*/
-void tipc_link_delete(struct link *l_ptr)
+void tipc_link_delete(struct tipc_link *l_ptr)
{
if (!l_ptr) {
err("Attempt to delete non-existent link\n");
@@ -401,7 +403,7 @@ void tipc_link_delete(struct link *l_ptr)
kfree(l_ptr);
}
-static void link_start(struct link *l_ptr)
+static void link_start(struct tipc_link *l_ptr)
{
tipc_node_lock(l_ptr->owner);
link_state_event(l_ptr, STARTING_EVT);
@@ -418,7 +420,7 @@ static void link_start(struct link *l_ptr)
* has abated.
*/
-static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
{
struct tipc_port *p_ptr;
@@ -440,7 +442,7 @@ exit:
return -ELINKCONG;
}
-void tipc_link_wakeup_ports(struct link *l_ptr, int all)
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
{
struct tipc_port *p_ptr;
struct tipc_port *temp_p_ptr;
@@ -475,7 +477,7 @@ exit:
* @l_ptr: pointer to link
*/
-static void link_release_outqueue(struct link *l_ptr)
+static void link_release_outqueue(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->first_out;
struct sk_buff *next;
@@ -494,7 +496,7 @@ static void link_release_outqueue(struct link *l_ptr)
* @l_ptr: pointer to link
*/
-void tipc_link_reset_fragments(struct link *l_ptr)
+void tipc_link_reset_fragments(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->defragm_buf;
struct sk_buff *next;
@@ -512,7 +514,7 @@ void tipc_link_reset_fragments(struct link *l_ptr)
* @l_ptr: pointer to link
*/
-void tipc_link_stop(struct link *l_ptr)
+void tipc_link_stop(struct tipc_link *l_ptr)
{
struct sk_buff *buf;
struct sk_buff *next;
@@ -537,7 +539,7 @@ void tipc_link_stop(struct link *l_ptr)
l_ptr->proto_msg_queue = NULL;
}
-void tipc_link_reset(struct link *l_ptr)
+void tipc_link_reset(struct tipc_link *l_ptr)
{
struct sk_buff *buf;
u32 prev_state = l_ptr->state;
@@ -597,7 +599,7 @@ void tipc_link_reset(struct link *l_ptr)
}
-static void link_activate(struct link *l_ptr)
+static void link_activate(struct tipc_link *l_ptr)
{
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
@@ -610,9 +612,9 @@ static void link_activate(struct link *l_ptr)
* @event: state machine event to process
*/
-static void link_state_event(struct link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned event)
{
- struct link *other;
+ struct tipc_link *other;
u32 cont_intv = l_ptr->continuity_interval;
if (!l_ptr->started && (event != STARTING_EVT))
@@ -784,7 +786,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
* the tail of an existing one.
*/
-static int link_bundle_buf(struct link *l_ptr,
+static int link_bundle_buf(struct tipc_link *l_ptr,
struct sk_buff *bundler,
struct sk_buff *buf)
{
@@ -813,7 +815,7 @@ static int link_bundle_buf(struct link *l_ptr,
return 1;
}
-static void link_add_to_outqueue(struct link *l_ptr,
+static void link_add_to_outqueue(struct tipc_link *l_ptr,
struct sk_buff *buf,
struct tipc_msg *msg)
{
@@ -834,7 +836,7 @@ static void link_add_to_outqueue(struct link *l_ptr,
l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
}
-static void link_add_chain_to_outqueue(struct link *l_ptr,
+static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
struct sk_buff *buf_chain,
u32 long_msgno)
{
@@ -859,7 +861,7 @@ static void link_add_chain_to_outqueue(struct link *l_ptr,
* has failed, and from link_send()
*/
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
u32 size = msg_size(msg);
@@ -954,7 +956,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
int res = -ELINKCONG;
@@ -988,7 +990,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
void tipc_link_send_names(struct list_head *message_list, u32 dest)
{
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct sk_buff *buf;
struct sk_buff *temp_buf;
@@ -1027,7 +1029,7 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
* Link is locked. Returns user data length.
*/
-static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
u32 *used_max_pkt)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -1061,7 +1063,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
*/
int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
int res;
u32 selector = msg_origport(buf_msg(buf)) & 1;
@@ -1100,7 +1102,7 @@ int tipc_link_send_sections_fast(struct tipc_port *sender,
u32 destaddr)
{
struct tipc_msg *hdr = &sender->phdr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct sk_buff *buf;
struct tipc_node *node;
int res;
@@ -1195,7 +1197,7 @@ static int link_send_sections_long(struct tipc_port *sender,
unsigned int total_len,
u32 destaddr)
{
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *node;
struct tipc_msg *hdr = &sender->phdr;
u32 dsz = total_len;
@@ -1342,7 +1344,7 @@ reject:
/*
* tipc_link_push_packet: Push one unsent packet to the media
*/
-u32 tipc_link_push_packet(struct link *l_ptr)
+u32 tipc_link_push_packet(struct tipc_link *l_ptr)
{
struct sk_buff *buf = l_ptr->first_out;
u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1354,7 +1356,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
if (r_q_size && buf) {
u32 last = lesser(mod(r_q_head + r_q_size),
link_last_sent(l_ptr));
- u32 first = msg_seqno(buf_msg(buf));
+ u32 first = buf_seqno(buf);
while (buf && less(first, r_q_head)) {
first = mod(first + 1);
@@ -1403,7 +1405,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
if (buf) {
struct tipc_msg *msg = buf_msg(buf);
u32 next = msg_seqno(msg);
- u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+ u32 first = buf_seqno(l_ptr->first_out);
if (mod(next - first) < l_ptr->queue_limit[0]) {
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
@@ -1426,7 +1428,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
* push_queue(): push out the unsent messages of a link where
* congestion has abated. Node is locked
*/
-void tipc_link_push_queue(struct link *l_ptr)
+void tipc_link_push_queue(struct tipc_link *l_ptr)
{
u32 res;
@@ -1470,7 +1472,8 @@ static void link_reset_all(unsigned long addr)
read_unlock_bh(&tipc_net_lock);
}
-static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l_ptr,
+ struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -1514,7 +1517,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
}
}
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
+void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
u32 retransmits)
{
struct tipc_msg *msg;
@@ -1558,7 +1561,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
} else {
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
l_ptr->stats.bearer_congs++;
- l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+ l_ptr->retransm_queue_head = buf_seqno(buf);
l_ptr->retransm_queue_size = retransmits;
return;
}
@@ -1571,7 +1574,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
* link_insert_deferred_queue - insert deferred messages back into receive chain
*/
-static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
+static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
u32 seq_no;
@@ -1579,7 +1582,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
if (l_ptr->oldest_deferred_in == NULL)
return buf;
- seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ seq_no = buf_seqno(l_ptr->oldest_deferred_in);
if (seq_no == mod(l_ptr->next_in_no)) {
l_ptr->newest_deferred_in->next = buf;
buf = l_ptr->oldest_deferred_in;
@@ -1653,7 +1656,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
read_lock_bh(&tipc_net_lock);
while (head) {
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct sk_buff *crs;
struct sk_buff *buf = head;
struct tipc_msg *msg;
@@ -1733,14 +1736,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Release acked messages */
- if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
- if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
- tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
- }
+ if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
+ tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
crs = l_ptr->first_out;
while ((crs != l_ptr->next_out) &&
- less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+ less_eq(buf_seqno(crs), ackd)) {
struct sk_buff *next = crs->next;
buf_discard(crs);
@@ -1863,7 +1864,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
{
struct sk_buff *prev = NULL;
struct sk_buff *crs = *head;
- u32 seq_no = msg_seqno(buf_msg(buf));
+ u32 seq_no = buf_seqno(buf);
buf->next = NULL;
@@ -1874,7 +1875,7 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
}
/* Last ? */
- if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+ if (less(buf_seqno(*tail), seq_no)) {
(*tail)->next = buf;
*tail = buf;
return 1;
@@ -1908,10 +1909,10 @@ u32 tipc_link_defer_pkt(struct sk_buff **head,
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
*/
-static void link_handle_out_of_seq_msg(struct link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
- u32 seq_no = msg_seqno(buf_msg(buf));
+ u32 seq_no = buf_seqno(buf);
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
link_recv_proto_msg(l_ptr, buf);
@@ -1946,8 +1947,9 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
/*
* Send protocol message to the other endpoint.
*/
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
+ int probe_msg, u32 gap, u32 tolerance,
+ u32 priority, u32 ack_mtu)
{
struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
@@ -1973,10 +1975,10 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
if (!tipc_link_is_up(l_ptr))
return;
if (l_ptr->next_out)
- next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+ next_sent = buf_seqno(l_ptr->next_out);
msg_set_next_sent(msg, next_sent);
if (l_ptr->oldest_deferred_in) {
- u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
gap = mod(rec - mod(l_ptr->next_in_no));
}
msg_set_seq_gap(msg, gap);
@@ -2064,7 +2066,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
* change at any time. The node with lowest address rules
*/
-static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
{
u32 rec_gap = 0;
u32 max_pkt_info;
@@ -2197,12 +2199,12 @@ exit:
* tipc_link_tunnel(): Send one message via a link belonging to
* another bearer. Owner node is locked.
*/
-static void tipc_link_tunnel(struct link *l_ptr,
+static void tipc_link_tunnel(struct tipc_link *l_ptr,
struct tipc_msg *tunnel_hdr,
struct tipc_msg *msg,
u32 selector)
{
- struct link *tunnel;
+ struct tipc_link *tunnel;
struct sk_buff *buf;
u32 length = msg_size(msg);
@@ -2231,11 +2233,11 @@ static void tipc_link_tunnel(struct link *l_ptr,
* Owner node is locked.
*/
-void tipc_link_changeover(struct link *l_ptr)
+void tipc_link_changeover(struct tipc_link *l_ptr)
{
u32 msgcount = l_ptr->out_queue_size;
struct sk_buff *crs = l_ptr->first_out;
- struct link *tunnel = l_ptr->owner->active_links[0];
+ struct tipc_link *tunnel = l_ptr->owner->active_links[0];
struct tipc_msg tunnel_hdr;
int split_bundles;
@@ -2294,7 +2296,7 @@ void tipc_link_changeover(struct link *l_ptr)
}
}
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
{
struct sk_buff *iter;
struct tipc_msg tunnel_hdr;
@@ -2358,11 +2360,11 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
* via other link. Node is locked. Return extracted buffer.
*/
-static int link_recv_changeover_msg(struct link **l_ptr,
+static int link_recv_changeover_msg(struct tipc_link **l_ptr,
struct sk_buff **buf)
{
struct sk_buff *tunnel_buf = *buf;
- struct link *dest_link;
+ struct tipc_link *dest_link;
struct tipc_msg *msg;
struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
u32 msg_typ = msg_type(tunnel_msg);
@@ -2462,7 +2464,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
* The buffer is complete, inclusive total message length.
* Returns user data length.
*/
-static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct sk_buff *buf_chain = NULL;
struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2591,7 +2593,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
/* Is there an incomplete message waiting for this fragment? */
- while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
+ while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
prev = pbuf;
pbuf = pbuf->next;
@@ -2658,7 +2660,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
* @l_ptr: pointer to link
*/
-static void link_check_defragm_bufs(struct link *l_ptr)
+static void link_check_defragm_bufs(struct tipc_link *l_ptr)
{
struct sk_buff *prev = NULL;
struct sk_buff *next = NULL;
@@ -2688,7 +2690,7 @@ static void link_check_defragm_bufs(struct link *l_ptr)
-static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
{
if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
return;
@@ -2700,7 +2702,7 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
}
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
{
/* Data messages from this node, inclusive FIRST_FRAGM */
l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
@@ -2730,11 +2732,12 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
* Returns pointer to link (or 0 if invalid link name).
*/
-static struct link *link_find_link(const char *name, struct tipc_node **node)
+static struct tipc_link *link_find_link(const char *name,
+ struct tipc_node **node)
{
- struct link_name link_name_parts;
+ struct tipc_link_name link_name_parts;
struct tipc_bearer *b_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
if (!link_name_validate(name, &link_name_parts))
return NULL;
@@ -2754,13 +2757,113 @@ static struct link *link_find_link(const char *name, struct tipc_node **node)
return l_ptr;
}
+/**
+ * link_value_is_valid -- validate proposed link tolerance/priority/window
+ *
+ * @cmd - value type (TIPC_CMD_SET_LINK_*)
+ * @new_value - the new value
+ *
+ * Returns 1 if value is within range, 0 if not.
+ */
+
+static int link_value_is_valid(u16 cmd, u32 new_value)
+{
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ return (new_value >= TIPC_MIN_LINK_TOL) &&
+ (new_value <= TIPC_MAX_LINK_TOL);
+ case TIPC_CMD_SET_LINK_PRI:
+ return (new_value <= TIPC_MAX_LINK_PRI);
+ case TIPC_CMD_SET_LINK_WINDOW:
+ return (new_value >= TIPC_MIN_LINK_WIN) &&
+ (new_value <= TIPC_MAX_LINK_WIN);
+ }
+ return 0;
+}
+
+
+/**
+ * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
+ * @name - ptr to link, bearer, or media name
+ * @new_value - new value of link, bearer, or media setting
+ * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ *
+ * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
+ *
+ * Returns 0 if value updated and negative value on error.
+ */
+
+static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
+{
+ struct tipc_node *node;
+ struct tipc_link *l_ptr;
+ struct tipc_bearer *b_ptr;
+ struct tipc_media *m_ptr;
+
+ l_ptr = link_find_link(name, &node);
+ if (l_ptr) {
+ /*
+ * acquire node lock for tipc_link_send_proto_msg().
+ * see "TIPC locking policy" in net.c.
+ */
+ tipc_node_lock(node);
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ link_set_supervision_props(l_ptr, new_value);
+ tipc_link_send_proto_msg(l_ptr,
+ STATE_MSG, 0, 0, new_value, 0, 0);
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ l_ptr->priority = new_value;
+ tipc_link_send_proto_msg(l_ptr,
+ STATE_MSG, 0, 0, 0, new_value, 0);
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ tipc_link_set_queue_limits(l_ptr, new_value);
+ break;
+ }
+ tipc_node_unlock(node);
+ return 0;
+ }
+
+ b_ptr = tipc_bearer_find(name);
+ if (b_ptr) {
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ b_ptr->tolerance = new_value;
+ return 0;
+ case TIPC_CMD_SET_LINK_PRI:
+ b_ptr->priority = new_value;
+ return 0;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ b_ptr->window = new_value;
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ m_ptr = tipc_media_find(name);
+ if (!m_ptr)
+ return -ENODEV;
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ m_ptr->tolerance = new_value;
+ return 0;
+ case TIPC_CMD_SET_LINK_PRI:
+ m_ptr->priority = new_value;
+ return 0;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ m_ptr->window = new_value;
+ return 0;
+ }
+ return -EINVAL;
+}
+
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
u16 cmd)
{
struct tipc_link_config *args;
u32 new_value;
- struct link *l_ptr;
- struct tipc_node *node;
int res;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
@@ -2769,6 +2872,10 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
new_value = ntohl(args->value);
+ if (!link_value_is_valid(cmd, new_value))
+ return tipc_cfg_reply_error_string(
+ "cannot change, value invalid");
+
if (!strcmp(args->name, tipc_bclink_name)) {
if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
(tipc_bclink_set_queue_limits(new_value) == 0))
@@ -2778,43 +2885,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
}
read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(args->name, &node);
- if (!l_ptr) {
- read_unlock_bh(&tipc_net_lock);
- return tipc_cfg_reply_error_string("link not found");
- }
-
- tipc_node_lock(node);
- res = -EINVAL;
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- if ((new_value >= TIPC_MIN_LINK_TOL) &&
- (new_value <= TIPC_MAX_LINK_TOL)) {
- link_set_supervision_props(l_ptr, new_value);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, new_value, 0, 0);
- res = 0;
- }
- break;
- case TIPC_CMD_SET_LINK_PRI:
- if ((new_value >= TIPC_MIN_LINK_PRI) &&
- (new_value <= TIPC_MAX_LINK_PRI)) {
- l_ptr->priority = new_value;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, new_value, 0);
- res = 0;
- }
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- if ((new_value >= TIPC_MIN_LINK_WIN) &&
- (new_value <= TIPC_MAX_LINK_WIN)) {
- tipc_link_set_queue_limits(l_ptr, new_value);
- res = 0;
- }
- break;
- }
- tipc_node_unlock(node);
-
+ res = link_cmd_set_value(args->name, new_value, cmd);
read_unlock_bh(&tipc_net_lock);
if (res)
return tipc_cfg_reply_error_string("cannot change link setting");
@@ -2827,7 +2898,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
* @l_ptr: pointer to link
*/
-static void link_reset_statistics(struct link *l_ptr)
+static void link_reset_statistics(struct tipc_link *l_ptr)
{
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
l_ptr->stats.sent_info = l_ptr->next_out_no;
@@ -2837,7 +2908,7 @@ static void link_reset_statistics(struct link *l_ptr)
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
{
char *link_name;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *node;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
@@ -2885,7 +2956,7 @@ static u32 percent(u32 count, u32 total)
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
{
struct print_buf pb;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct tipc_node *node;
char *status;
u32 profile_total = 0;
@@ -3007,7 +3078,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
{
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
u32 res = MAX_PKT_DEFAULT;
if (dest == tipc_own_addr)
@@ -3026,7 +3097,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
return res;
}
-static void link_print(struct link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l_ptr, const char *str)
{
char print_area[256];
struct print_buf pb;
@@ -3046,13 +3117,12 @@ static void link_print(struct link *l_ptr, const char *str)
tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
tipc_printf(buf, "SQUE");
if (l_ptr->first_out) {
- tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+ tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
if (l_ptr->next_out)
- tipc_printf(buf, "%u..",
- msg_seqno(buf_msg(l_ptr->next_out)));
- tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
- if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
- msg_seqno(buf_msg(l_ptr->first_out)))
+ tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
+ tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
+ if ((mod(buf_seqno(l_ptr->last_out) -
+ buf_seqno(l_ptr->first_out))
!= (l_ptr->out_queue_size - 1)) ||
(l_ptr->last_out->next != NULL)) {
tipc_printf(buf, "\nSend queue inconsistency\n");
@@ -3064,8 +3134,8 @@ static void link_print(struct link *l_ptr, const char *str)
tipc_printf(buf, "[]");
tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
if (l_ptr->oldest_deferred_in) {
- u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
- u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+ u32 o = buf_seqno(l_ptr->oldest_deferred_in);
+ u32 n = buf_seqno(l_ptr->newest_deferred_in);
tipc_printf(buf, ":RQUE[%u..%u]", o, n);
if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
tipc_printf(buf, ":RQSIZ(%u)",
diff --git a/net/tipc/link.h b/net/tipc/link.h
index e56cb532913..73c18c140e1 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -45,6 +45,12 @@
#define PUSH_FINISHED 2
/*
+ * Out-of-range value for link sequence numbers
+ */
+
+#define INVALID_LINK_SEQ 0x10000
+
+/*
* Link states
*/
@@ -61,7 +67,7 @@
#define MAX_PKT_DEFAULT 1500
/**
- * struct link - TIPC link data structure
+ * struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
* @media_addr: media address to use when sending messages over link
@@ -109,7 +115,7 @@
* @stats: collects statistics regarding link activity
*/
-struct link {
+struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct tipc_media_addr media_addr;
@@ -207,24 +213,24 @@ struct link {
struct tipc_port;
-struct link *tipc_link_create(struct tipc_node *n_ptr,
+struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct link *l_ptr);
-void tipc_link_changeover(struct link *l_ptr);
-void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
-void tipc_link_reset_fragments(struct link *l_ptr);
-int tipc_link_is_up(struct link *l_ptr);
-int tipc_link_is_active(struct link *l_ptr);
-u32 tipc_link_push_packet(struct link *l_ptr);
-void tipc_link_stop(struct link *l_ptr);
+void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_changeover(struct tipc_link *l_ptr);
+void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *dest);
+void tipc_link_reset_fragments(struct tipc_link *l_ptr);
+int tipc_link_is_up(struct tipc_link *l_ptr);
+int tipc_link_is_active(struct tipc_link *l_ptr);
+u32 tipc_link_push_packet(struct tipc_link *l_ptr);
+void tipc_link_stop(struct tipc_link *l_ptr);
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
-void tipc_link_reset(struct link *l_ptr);
+void tipc_link_reset(struct tipc_link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
void tipc_link_send_names(struct list_head *message_list, u32 dest);
-int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
int tipc_link_send_sections_fast(struct tipc_port *sender,
struct iovec const *msg_sect,
@@ -235,19 +241,26 @@ void tipc_link_recv_bundle(struct sk_buff *buf);
int tipc_link_recv_fragment(struct sk_buff **pending,
struct sk_buff **fb,
struct tipc_msg **msg);
-void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
- u32 tolerance, u32 priority, u32 acked_mtu);
-void tipc_link_push_queue(struct link *l_ptr);
+void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+ u32 gap, u32 tolerance, u32 priority,
+ u32 acked_mtu);
+void tipc_link_push_queue(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
struct sk_buff *buf);
-void tipc_link_wakeup_ports(struct link *l_ptr, int all);
-void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
-void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
+void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
+void tipc_link_retransmit(struct tipc_link *l_ptr,
+ struct sk_buff *start, u32 retransmits);
/*
* Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
*/
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+ return msg_seqno(buf_msg(buf));
+}
+
static inline u32 mod(u32 x)
{
return x & 0xffffu;
@@ -282,32 +295,32 @@ static inline u32 lesser(u32 left, u32 right)
* Link status checking routines
*/
-static inline int link_working_working(struct link *l_ptr)
+static inline int link_working_working(struct tipc_link *l_ptr)
{
return l_ptr->state == WORKING_WORKING;
}
-static inline int link_working_unknown(struct link *l_ptr)
+static inline int link_working_unknown(struct tipc_link *l_ptr)
{
return l_ptr->state == WORKING_UNKNOWN;
}
-static inline int link_reset_unknown(struct link *l_ptr)
+static inline int link_reset_unknown(struct tipc_link *l_ptr)
{
return l_ptr->state == RESET_UNKNOWN;
}
-static inline int link_reset_reset(struct link *l_ptr)
+static inline int link_reset_reset(struct tipc_link *l_ptr)
{
return l_ptr->state == RESET_RESET;
}
-static inline int link_blocked(struct link *l_ptr)
+static inline int link_blocked(struct tipc_link *l_ptr)
{
return l_ptr->exp_msg_count || l_ptr->blocked;
}
-static inline int link_congested(struct link *l_ptr)
+static inline int link_congested(struct tipc_link *l_ptr)
{
return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 83d50967910..3e4d3e29be6 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -333,11 +333,14 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
}
if (msg_user(msg) == LINK_CONFIG) {
- u32 *raw = (u32 *)msg;
- struct tipc_media_addr *orig = (struct tipc_media_addr *)&raw[5];
+ struct tipc_media_addr orig;
+
tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
- tipc_media_addr_printf(buf, orig);
+ memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
+ orig.media_id = 0;
+ orig.broadcast = 0;
+ tipc_media_addr_printf(buf, &orig);
}
if (msg_user(msg) == BCAST_PROTOCOL) {
tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index d93178f2e85..7b0cda16710 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -78,6 +78,8 @@
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define TIPC_MEDIA_ADDR_OFFSET 5
+
struct tipc_msg {
__be32 hdr[15];
@@ -682,6 +684,10 @@ static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
msg_set_bits(m, 5, 12, 0x1, r);
}
+static inline char *msg_media_addr(struct tipc_msg *m)
+{
+ return (char *)&m->hdr[TIPC_MEDIA_ADDR_OFFSET];
+}
/*
* Word 9
@@ -734,14 +740,4 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
u32 num_sect, unsigned int total_len,
int max_size, int usrmem, struct sk_buff **buf);
-static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
- memcpy(&((int *)m)[5], a, sizeof(*a));
-}
-
-static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
- memcpy(a, &((int *)m)[5], sizeof(*a));
-}
-
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index b7ca1bd7b15..98ebb37f180 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -176,7 +176,7 @@ void tipc_named_withdraw(struct publication *publ)
void tipc_named_node_up(unsigned long nodearg)
{
struct tipc_node *n_ptr;
- struct link *l_ptr;
+ struct tipc_link *l_ptr;
struct publication *publ;
struct distr_item *item = NULL;
struct sk_buff *buf = NULL;
@@ -322,10 +322,9 @@ void tipc_named_recv(struct sk_buff *buf)
/**
* tipc_named_reinit - re-initialize local publication list
*
- * This routine is called whenever TIPC networking is (re)enabled.
+ * This routine is called whenever TIPC networking is enabled.
* All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's current network address.
- * (If the node's address is unchanged, the update loop terminates immediately.)
+ * are updated to reflect the node's new network address.
*/
void tipc_named_reinit(void)
@@ -333,10 +332,9 @@ void tipc_named_reinit(void)
struct publication *publ;
write_lock_bh(&tipc_nametbl_lock);
- list_for_each_entry(publ, &publ_root, local_list) {
- if (publ->node == tipc_own_addr)
- break;
+
+ list_for_each_entry(publ, &publ_root, local_list)
publ->node = tipc_own_addr;
- }
+
write_unlock_bh(&tipc_nametbl_lock);
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 46e6b6c2ecc..89eb5621ebb 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -251,8 +251,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 port, u32 key)
{
- struct subscription *s;
- struct subscription *st;
+ struct tipc_subscription *s;
+ struct tipc_subscription *st;
struct publication *publ;
struct sub_seq *sseq;
struct name_info *info;
@@ -381,7 +381,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
struct name_info *info;
struct sub_seq *free;
- struct subscription *s, *st;
+ struct tipc_subscription *s, *st;
int removed_subseq = 0;
if (!sseq)
@@ -448,7 +448,8 @@ found:
* sequence overlapping with the requested sequence
*/
-static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+static void tipc_nameseq_subscribe(struct name_seq *nseq,
+ struct tipc_subscription *s)
{
struct sub_seq *sseq = nseq->sseqs;
@@ -625,7 +626,7 @@ not_found:
*/
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct port_list *dports)
+ struct tipc_port_list *dports)
{
struct name_seq *seq;
struct sub_seq *sseq;
@@ -739,7 +740,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
-void tipc_nametbl_subscribe(struct subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s)
{
u32 type = s->seq.type;
struct name_seq *seq;
@@ -763,7 +764,7 @@ void tipc_nametbl_subscribe(struct subscription *s)
* tipc_nametbl_unsubscribe - remove a subscription object from name table
*/
-void tipc_nametbl_unsubscribe(struct subscription *s)
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
{
struct name_seq *seq;
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 62d77e5e902..8086b42f92a 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -39,8 +39,8 @@
#include "node_subscr.h"
-struct subscription;
-struct port_list;
+struct tipc_subscription;
+struct tipc_port_list;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
@@ -90,7 +90,7 @@ extern rwlock_t tipc_nametbl_lock;
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
- struct port_list *dports);
+ struct tipc_port_list *dports);
int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
struct tipc_name_seq const *seq);
struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
@@ -100,8 +100,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
u32 scope, u32 node, u32 ref, u32 key);
struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
u32 node, u32 ref, u32 key);
-void tipc_nametbl_subscribe(struct subscription *s);
-void tipc_nametbl_unsubscribe(struct subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(void);
void tipc_nametbl_stop(void);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index fafef6c3c0f..61afee7e829 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -174,7 +174,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
int tipc_net_start(u32 addr)
{
char addr_string[16];
- int res;
if (tipc_mode != TIPC_NODE_MODE)
return -ENOPROTOOPT;
@@ -187,9 +186,7 @@ int tipc_net_start(u32 addr)
tipc_named_reinit();
tipc_port_reinit();
- res = tipc_bclink_init();
- if (res)
- return res;
+ tipc_bclink_init();
tipc_k_signal((Handler)tipc_subscr_start, 0);
tipc_k_signal((Handler)tipc_cfg_init, 0);
@@ -207,8 +204,8 @@ void tipc_net_stop(void)
if (tipc_mode != TIPC_NET_MODE)
return;
write_lock_bh(&tipc_net_lock);
- tipc_bearer_stop();
tipc_mode = TIPC_NODE_MODE;
+ tipc_bearer_stop();
tipc_bclink_stop();
list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
tipc_node_delete(node);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 27b4bb0cca6..6b226faad89 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -136,9 +136,9 @@ void tipc_node_delete(struct tipc_node *n_ptr)
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- struct link **active = &n_ptr->active_links[0];
+ struct tipc_link **active = &n_ptr->active_links[0];
n_ptr->working_links++;
@@ -171,14 +171,14 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
static void node_select_active_links(struct tipc_node *n_ptr)
{
- struct link **active = &n_ptr->active_links[0];
+ struct tipc_link **active = &n_ptr->active_links[0];
u32 i;
u32 highest_prio = 0;
active[0] = active[1] = NULL;
for (i = 0; i < MAX_BEARERS; i++) {
- struct link *l_ptr = n_ptr->links[i];
+ struct tipc_link *l_ptr = n_ptr->links[i];
if (!l_ptr || !tipc_link_is_up(l_ptr) ||
(l_ptr->priority < highest_prio))
@@ -197,9 +197,9 @@ static void node_select_active_links(struct tipc_node *n_ptr)
* tipc_node_link_down - handle loss of link
*/
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- struct link **active;
+ struct tipc_link **active;
n_ptr->working_links--;
@@ -239,14 +239,14 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
return tipc_node_active_links(n_ptr);
}
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
atomic_inc(&tipc_num_links);
n_ptr->link_cnt++;
}
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
n_ptr->links[l_ptr->b_ptr->identity] = NULL;
atomic_dec(&tipc_num_links);
@@ -307,7 +307,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
n_ptr->bclink.acked = tipc_bclink_get_last_sent();
if (n_ptr->bclink.supported) {
- tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
+ tipc_bclink_add_node(n_ptr->addr);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag++;
}
@@ -350,9 +350,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
n_ptr->bclink.defragm = NULL;
}
- tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
- tipc_bclink_acknowledge(n_ptr,
- mod(n_ptr->bclink.acked + 10000));
+ tipc_bclink_remove_node(n_ptr->addr);
+ tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag--;
@@ -361,7 +360,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
- struct link *l_ptr = n_ptr->links[i];
+ struct tipc_link *l_ptr = n_ptr->links[i];
if (!l_ptr)
continue;
l_ptr->reset_checkpoint = l_ptr->next_in_no;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 4f15cb40aaa..0b1c5f8b699 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -79,8 +79,8 @@ struct tipc_node {
struct hlist_node hash;
struct list_head list;
struct list_head nsub;
- struct link *active_links[2];
- struct link *links[MAX_BEARERS];
+ struct tipc_link *active_links[2];
+ struct tipc_link *links[MAX_BEARERS];
int link_cnt;
int working_links;
int block_setup;
@@ -117,10 +117,10 @@ extern u32 tipc_own_tag;
struct tipc_node *tipc_node_find(u32 addr);
struct tipc_node *tipc_node_create(u32 addr);
void tipc_node_delete(struct tipc_node *n_ptr);
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
+void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
+void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
int tipc_node_active_links(struct tipc_node *n_ptr);
int tipc_node_redundant_links(struct tipc_node *n_ptr);
int tipc_node_is_up(struct tipc_node *n_ptr);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 54d812a5a4d..d91efc69e6f 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -80,7 +80,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
struct tipc_msg *hdr;
struct sk_buff *buf;
struct sk_buff *ibuf = NULL;
- struct port_list dports = {0, NULL, };
+ struct tipc_port_list dports = {0, NULL, };
struct tipc_port *oport = tipc_port_deref(ref);
int ext_targets;
int res;
@@ -142,11 +142,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
* If there is no port list, perform a lookup to create one
*/
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
{
struct tipc_msg *msg;
- struct port_list dports = {0, NULL, };
- struct port_list *item = dp;
+ struct tipc_port_list dports = {0, NULL, };
+ struct tipc_port_list *item = dp;
int cnt = 0;
msg = buf_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index b9aa34195ae..f751807e2a9 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -151,7 +151,7 @@ struct tipc_port {
};
extern spinlock_t tipc_port_list_lock;
-struct port_list;
+struct tipc_port_list;
/*
* TIPC port manipulation routines
@@ -228,7 +228,7 @@ int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
unsigned int total_len, int err);
struct sk_buff *tipc_port_get_ports(void);
void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
void tipc_port_reinit(void);
/**
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 83116892528..9e37b7812c3 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -110,8 +110,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
/* allocate table & mark all entries as uninitialized */
- table = __vmalloc(actual_size * sizeof(struct reference),
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ table = vzalloc(actual_size * sizeof(struct reference));
if (table == NULL)
return -ENOMEM;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 42b8324ff2e..e2f7c5d370b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -185,9 +185,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
/* Validate arguments */
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 198371723b4..8c49566da8f 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -40,14 +40,14 @@
#include "subscr.h"
/**
- * struct subscriber - TIPC network topology subscriber
+ * struct tipc_subscriber - TIPC network topology subscriber
* @port_ref: object reference to server port connecting to subscriber
* @lock: pointer to spinlock controlling access to subscriber's server port
* @subscriber_list: adjacent subscribers in top. server's list of subscribers
* @subscription_list: list of subscription objects for this subscriber
*/
-struct subscriber {
+struct tipc_subscriber {
u32 port_ref;
spinlock_t *lock;
struct list_head subscriber_list;
@@ -92,7 +92,7 @@ static u32 htohl(u32 in, int swap)
* try to take the lock if the message is rejected and returned!
*/
-static void subscr_send_event(struct subscription *sub,
+static void subscr_send_event(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
@@ -118,7 +118,7 @@ static void subscr_send_event(struct subscription *sub,
* Returns 1 if there is overlap, otherwise 0.
*/
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper)
@@ -138,7 +138,7 @@ int tipc_subscr_overlap(struct subscription *sub,
* Protected by nameseq.lock in name_table.c
*/
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
@@ -158,7 +158,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
* subscr_timeout - subscription timeout has occurred
*/
-static void subscr_timeout(struct subscription *sub)
+static void subscr_timeout(struct tipc_subscription *sub)
{
struct tipc_port *server_port;
@@ -205,7 +205,7 @@ static void subscr_timeout(struct subscription *sub)
* Called with subscriber port locked.
*/
-static void subscr_del(struct subscription *sub)
+static void subscr_del(struct tipc_subscription *sub)
{
tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscription_list);
@@ -224,11 +224,11 @@ static void subscr_del(struct subscription *sub)
* simply wait for it to be released, then claim it.)
*/
-static void subscr_terminate(struct subscriber *subscriber)
+static void subscr_terminate(struct tipc_subscriber *subscriber)
{
u32 port_ref;
- struct subscription *sub;
- struct subscription *sub_temp;
+ struct tipc_subscription *sub;
+ struct tipc_subscription *sub_temp;
/* Invalidate subscriber reference */
@@ -278,10 +278,10 @@ static void subscr_terminate(struct subscriber *subscriber)
*/
static void subscr_cancel(struct tipc_subscr *s,
- struct subscriber *subscriber)
+ struct tipc_subscriber *subscriber)
{
- struct subscription *sub;
- struct subscription *sub_temp;
+ struct tipc_subscription *sub;
+ struct tipc_subscription *sub_temp;
int found = 0;
/* Find first matching subscription, exit if not found */
@@ -314,10 +314,10 @@ static void subscr_cancel(struct tipc_subscr *s,
* Called with subscriber port locked.
*/
-static struct subscription *subscr_subscribe(struct tipc_subscr *s,
- struct subscriber *subscriber)
+static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber)
{
- struct subscription *sub;
+ struct tipc_subscription *sub;
int swap;
/* Determine subscriber's endianness */
@@ -393,7 +393,7 @@ static void subscr_conn_shutdown_event(void *usr_handle,
unsigned int size,
int reason)
{
- struct subscriber *subscriber = usr_handle;
+ struct tipc_subscriber *subscriber = usr_handle;
spinlock_t *subscriber_lock;
if (tipc_port_lock(port_ref) == NULL)
@@ -416,9 +416,9 @@ static void subscr_conn_msg_event(void *usr_handle,
const unchar *data,
u32 size)
{
- struct subscriber *subscriber = usr_handle;
+ struct tipc_subscriber *subscriber = usr_handle;
spinlock_t *subscriber_lock;
- struct subscription *sub;
+ struct tipc_subscription *sub;
/*
* Lock subscriber's server port (& make a local copy of lock pointer,
@@ -471,12 +471,12 @@ static void subscr_named_msg_event(void *usr_handle,
struct tipc_portid const *orig,
struct tipc_name_seq const *dest)
{
- struct subscriber *subscriber;
+ struct tipc_subscriber *subscriber;
u32 server_port_ref;
/* Create subscriber object */
- subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
+ subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
warn("Subscriber rejected, no memory\n");
return;
@@ -568,8 +568,8 @@ failed:
void tipc_subscr_stop(void)
{
- struct subscriber *subscriber;
- struct subscriber *subscriber_temp;
+ struct tipc_subscriber *subscriber;
+ struct tipc_subscriber *subscriber_temp;
spinlock_t *subscriber_lock;
if (topsrv.setup_port) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 4b06ef6f840..ef6529c8456 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -37,10 +37,10 @@
#ifndef _TIPC_SUBSCR_H
#define _TIPC_SUBSCR_H
-struct subscription;
+struct tipc_subscription;
/**
- * struct subscription - TIPC network topology subscription object
+ * struct tipc_subscription - TIPC network topology subscription object
* @seq: name sequence associated with subscription
* @timeout: duration of subscription (in ms)
* @filter: event filtering to be done for subscription
@@ -52,7 +52,7 @@ struct subscription;
* @evt: template for events generated by subscription
*/
-struct subscription {
+struct tipc_subscription {
struct tipc_name_seq seq;
u32 timeout;
u32 filter;
@@ -64,11 +64,11 @@ struct subscription {
struct tipc_event evt;
};
-int tipc_subscr_overlap(struct subscription *sub,
+int tipc_subscr_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper);
-void tipc_subscr_report_overlap(struct subscription *sub,
+void tipc_subscr_report_overlap(struct tipc_subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 5a69733bcda..8b31ab85d05 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -19,3 +19,10 @@ config UNIX
Say Y unless you know what you are doing.
+config UNIX_DIAG
+ tristate "UNIX: socket monitoring interface"
+ depends on UNIX
+ default n
+ ---help---
+ Support for UNIX socket monitoring interface used by the ss tool.
+ If unsure, say Y.
diff --git a/net/unix/Makefile b/net/unix/Makefile
index b852a2bde9a..b663c607b1c 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -6,3 +6,6 @@ obj-$(CONFIG_UNIX) += unix.o
unix-y := af_unix.o garbage.o
unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
+
+obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
+unix_diag-y := diag.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 466fbcc5cf7..aad8fb69998 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,8 +115,10 @@
#include <net/checksum.h>
#include <linux/security.h>
-static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-static DEFINE_SPINLOCK(unix_table_lock);
+struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+EXPORT_SYMBOL_GPL(unix_socket_table);
+DEFINE_SPINLOCK(unix_table_lock);
+EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
@@ -172,7 +174,7 @@ static inline int unix_recvq_full(struct sock const *sk)
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
-static struct sock *unix_peer_get(struct sock *s)
+struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
@@ -183,6 +185,7 @@ static struct sock *unix_peer_get(struct sock *s)
unix_state_unlock(s);
return peer;
}
+EXPORT_SYMBOL_GPL(unix_peer_get);
static inline void unix_release_addr(struct unix_address *addr)
{
@@ -847,7 +850,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- unsigned int mode;
+ umode_t mode;
err = 0;
/*
* Get the parent directory, calculate the hash for last
@@ -1957,6 +1960,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
(UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} else {
@@ -1974,6 +1978,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
if (copied == 0)
copied = -EFAULT;
break;
@@ -1991,6 +1996,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put the skb back if we didn't use it up.. */
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
@@ -2006,6 +2012,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} while (size);
@@ -2058,6 +2065,36 @@ static int unix_shutdown(struct socket *sock, int mode)
return 0;
}
+long unix_inq_len(struct sock *sk)
+{
+ struct sk_buff *skb;
+ long amount = 0;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return -EINVAL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+ if (sk->sk_type == SOCK_STREAM ||
+ sk->sk_type == SOCK_SEQPACKET) {
+ skb_queue_walk(&sk->sk_receive_queue, skb)
+ amount += skb->len;
+ } else {
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ amount = skb->len;
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ return amount;
+}
+EXPORT_SYMBOL_GPL(unix_inq_len);
+
+long unix_outq_len(struct sock *sk)
+{
+ return sk_wmem_alloc_get(sk);
+}
+EXPORT_SYMBOL_GPL(unix_outq_len);
+
static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
@@ -2066,33 +2103,16 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
switch (cmd) {
case SIOCOUTQ:
- amount = sk_wmem_alloc_get(sk);
+ amount = unix_outq_len(sk);
err = put_user(amount, (int __user *)arg);
break;
case SIOCINQ:
- {
- struct sk_buff *skb;
-
- if (sk->sk_state == TCP_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- spin_lock(&sk->sk_receive_queue.lock);
- if (sk->sk_type == SOCK_STREAM ||
- sk->sk_type == SOCK_SEQPACKET) {
- skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
- } else {
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb)
- amount = skb->len;
- }
- spin_unlock(&sk->sk_receive_queue.lock);
+ amount = unix_inq_len(sk);
+ if (amount < 0)
+ err = amount;
+ else
err = put_user(amount, (int __user *)arg);
- break;
- }
-
+ break;
default:
err = -ENOIOCTLCMD;
break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
new file mode 100644
index 00000000000..6b7697fd911
--- /dev/null
+++ b/net/unix/diag.c
@@ -0,0 +1,329 @@
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/af_unix.h>
+#include <net/tcp_states.h>
+
+#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
+ RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
+
+static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct unix_address *addr = unix_sk(sk)->addr;
+ char *s;
+
+ if (addr) {
+ s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
+ memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
+ }
+
+ return 0;
+
+rtattr_failure:
+ return -EMSGSIZE;
+}
+
+static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct dentry *dentry = unix_sk(sk)->dentry;
+ struct unix_diag_vfs *uv;
+
+ if (dentry) {
+ uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
+ uv->udiag_vfs_ino = dentry->d_inode->i_ino;
+ uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+ }
+
+ return 0;
+
+rtattr_failure:
+ return -EMSGSIZE;
+}
+
+static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct sock *peer;
+ int ino;
+
+ peer = unix_peer_get(sk);
+ if (peer) {
+ unix_state_lock(peer);
+ ino = sock_i_ino(peer);
+ unix_state_unlock(peer);
+ sock_put(peer);
+
+ RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+ }
+
+ return 0;
+rtattr_failure:
+ return -EMSGSIZE;
+}
+
+static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct sk_buff *skb;
+ u32 *buf;
+ int i;
+
+ if (sk->sk_state == TCP_LISTEN) {
+ spin_lock(&sk->sk_receive_queue.lock);
+ buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
+ sk->sk_receive_queue.qlen * sizeof(u32));
+ i = 0;
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
+ struct sock *req, *peer;
+
+ req = skb->sk;
+ /*
+ * The state lock is outer for the same sk's
+ * queue lock. With the other's queue locked it's
+ * OK to lock the state.
+ */
+ unix_state_lock_nested(req);
+ peer = unix_sk(req)->peer;
+ buf[i++] = (peer ? sock_i_ino(peer) : 0);
+ unix_state_unlock(req);
+ }
+ spin_unlock(&sk->sk_receive_queue.lock);
+ }
+
+ return 0;
+
+rtattr_failure:
+ spin_unlock(&sk->sk_receive_queue.lock);
+ return -EMSGSIZE;
+}
+
+static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
+{
+ struct unix_diag_rqlen *rql;
+
+ rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
+
+ if (sk->sk_state == TCP_LISTEN) {
+ rql->udiag_rqueue = sk->sk_receive_queue.qlen;
+ rql->udiag_wqueue = sk->sk_max_ack_backlog;
+ } else {
+ rql->udiag_rqueue = (__u32)unix_inq_len(sk);
+ rql->udiag_wqueue = (__u32)unix_outq_len(sk);
+ }
+
+ return 0;
+
+rtattr_failure:
+ return -EMSGSIZE;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+ u32 pid, u32 seq, u32 flags, int sk_ino)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct nlmsghdr *nlh;
+ struct unix_diag_msg *rep;
+
+ nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
+ nlh->nlmsg_flags = flags;
+
+ rep = NLMSG_DATA(nlh);
+
+ rep->udiag_family = AF_UNIX;
+ rep->udiag_type = sk->sk_type;
+ rep->udiag_state = sk->sk_state;
+ rep->udiag_ino = sk_ino;
+ sock_diag_save_cookie(sk, rep->udiag_cookie);
+
+ if ((req->udiag_show & UDIAG_SHOW_NAME) &&
+ sk_diag_dump_name(sk, skb))
+ goto nlmsg_failure;
+
+ if ((req->udiag_show & UDIAG_SHOW_VFS) &&
+ sk_diag_dump_vfs(sk, skb))
+ goto nlmsg_failure;
+
+ if ((req->udiag_show & UDIAG_SHOW_PEER) &&
+ sk_diag_dump_peer(sk, skb))
+ goto nlmsg_failure;
+
+ if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
+ sk_diag_dump_icons(sk, skb))
+ goto nlmsg_failure;
+
+ if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
+ sk_diag_show_rqlen(sk, skb))
+ goto nlmsg_failure;
+
+ if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
+ sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
+ goto nlmsg_failure;
+
+ nlh->nlmsg_len = skb_tail_pointer(skb) - b;
+ return skb->len;
+
+nlmsg_failure:
+ nlmsg_trim(skb, b);
+ return -EMSGSIZE;
+}
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
+ u32 pid, u32 seq, u32 flags)
+{
+ int sk_ino;
+
+ unix_state_lock(sk);
+ sk_ino = sock_i_ino(sk);
+ unix_state_unlock(sk);
+
+ if (!sk_ino)
+ return 0;
+
+ return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+}
+
+static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct unix_diag_req *req;
+ int num, s_num, slot, s_slot;
+
+ req = NLMSG_DATA(cb->nlh);
+
+ s_slot = cb->args[0];
+ num = s_num = cb->args[1];
+
+ spin_lock(&unix_table_lock);
+ for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+ struct sock *sk;
+ struct hlist_node *node;
+
+ num = 0;
+ sk_for_each(sk, node, &unix_socket_table[slot]) {
+ if (num < s_num)
+ goto next;
+ if (!(req->udiag_states & (1 << sk->sk_state)))
+ goto next;
+ if (sk_diag_dump(sk, skb, req,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ goto done;
+next:
+ num++;
+ }
+ }
+done:
+ spin_unlock(&unix_table_lock);
+ cb->args[0] = slot;
+ cb->args[1] = num;
+
+ return skb->len;
+}
+
+static struct sock *unix_lookup_by_ino(int ino)
+{
+ int i;
+ struct sock *sk;
+
+ spin_lock(&unix_table_lock);
+ for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+ struct hlist_node *node;
+
+ sk_for_each(sk, node, &unix_socket_table[i])
+ if (ino == sock_i_ino(sk)) {
+ sock_hold(sk);
+ spin_unlock(&unix_table_lock);
+
+ return sk;
+ }
+ }
+
+ spin_unlock(&unix_table_lock);
+ return NULL;
+}
+
+static int unix_diag_get_exact(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ struct unix_diag_req *req)
+{
+ int err = -EINVAL;
+ struct sock *sk;
+ struct sk_buff *rep;
+ unsigned int extra_len;
+
+ if (req->udiag_ino == 0)
+ goto out_nosk;
+
+ sk = unix_lookup_by_ino(req->udiag_ino);
+ err = -ENOENT;
+ if (sk == NULL)
+ goto out_nosk;
+
+ err = sock_diag_check_cookie(sk, req->udiag_cookie);
+ if (err)
+ goto out;
+
+ extra_len = 256;
+again:
+ err = -ENOMEM;
+ rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
+ GFP_KERNEL);
+ if (!rep)
+ goto out;
+
+ err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+ nlh->nlmsg_seq, 0, req->udiag_ino);
+ if (err < 0) {
+ kfree_skb(rep);
+ extra_len += 256;
+ if (extra_len >= PAGE_SIZE)
+ goto out;
+
+ goto again;
+ }
+ err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ MSG_DONTWAIT);
+ if (err > 0)
+ err = 0;
+out:
+ if (sk)
+ sock_put(sk);
+out_nosk:
+ return err;
+}
+
+static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+ int hdrlen = sizeof(struct unix_diag_req);
+
+ if (nlmsg_len(h) < hdrlen)
+ return -EINVAL;
+
+ if (h->nlmsg_flags & NLM_F_DUMP)
+ return netlink_dump_start(sock_diag_nlsk, skb, h,
+ unix_diag_dump, NULL, 0);
+ else
+ return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+}
+
+static struct sock_diag_handler unix_diag_handler = {
+ .family = AF_UNIX,
+ .dump = unix_diag_handler_dump,
+};
+
+static int __init unix_diag_init(void)
+{
+ return sock_diag_register(&unix_diag_handler);
+}
+
+static void __exit unix_diag_exit(void)
+{
+ sock_diag_unregister(&unix_diag_handler);
+}
+
+module_init(unix_diag_init);
+module_exit(unix_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 1f1ef70f34f..2e4444fedbe 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -121,15 +121,16 @@ config CFG80211_WEXT
config WIRELESS_EXT_SYSFS
bool "Wireless extensions sysfs files"
- default y
depends on WEXT_CORE && SYSFS
help
This option enables the deprecated wireless statistics
files in /sys/class/net/*/wireless/. The same information
is available via the ioctls as well.
- Say Y if you have programs using it, like old versions of
- hal.
+ Say N. If you know you have ancient tools requiring it,
+ like very old versions of hal (prior to 0.5.12 release),
+ say Y and update the tools as soon as possible as this
+ option will be removed soon.
config LIB80211
tristate "Common routines for IEEE802.11 drivers"
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 17cd0c04d13..2fcfe0993ca 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,6 +6,7 @@
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*/
+#include <linux/export.h>
#include <net/cfg80211.h>
#include "core.h"
@@ -44,9 +45,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-static bool can_beacon_sec_chan(struct wiphy *wiphy,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
{
struct ieee80211_channel *sec_chan;
int diff;
@@ -75,6 +76,7 @@ static bool can_beacon_sec_chan(struct wiphy *wiphy,
return true;
}
+EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, int freq,
@@ -109,8 +111,8 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
switch (channel_type) {
case NL80211_CHAN_HT40PLUS:
case NL80211_CHAN_HT40MINUS:
- if (!can_beacon_sec_chan(&rdev->wiphy, chan,
- channel_type)) {
+ if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
+ channel_type)) {
printk(KERN_DEBUG
"cfg80211: Secondary channel not "
"allowed to initiate communication\n");
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 220f3bd176f..ccdfed89765 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -492,6 +492,10 @@ int wiphy_register(struct wiphy *wiphy)
!(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
return -EINVAL;
+ if (WARN_ON(wiphy->ap_sme_capa &&
+ !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
+ return -EINVAL;
+
if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
return -EINVAL;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9ec3061ed7..43ad9c81efc 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -54,6 +54,8 @@ struct cfg80211_registered_device {
int opencount; /* also protected by devlist_mtx */
wait_queue_head_t dev_wait;
+ u32 ap_beacons_nlpid;
+
/* BSSes/scanning */
spinlock_t bss_lock;
struct list_head bss_list;
@@ -247,12 +249,11 @@ struct cfg80211_event {
u16 status;
} cr;
struct {
- struct ieee80211_channel *channel;
- u8 bssid[ETH_ALEN];
const u8 *req_ie;
const u8 *resp_ie;
size_t req_ie_len;
size_t resp_ie_len;
+ struct cfg80211_bss *bss;
} rm;
struct {
const u8 *ie;
@@ -339,13 +340,17 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
const u8 *bssid, const u8 *prev_bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len, bool use_mfp,
- struct cfg80211_crypto_settings *crypt);
+ struct cfg80211_crypto_settings *crypt,
+ u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev, struct ieee80211_channel *chan,
const u8 *bssid, const u8 *prev_bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len, bool use_mfp,
- struct cfg80211_crypto_settings *crypt);
+ struct cfg80211_crypto_settings *crypt,
+ u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask);
int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
const u8 *ie, int ie_len, u16 reason,
@@ -376,7 +381,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie);
+ bool dont_wait_for_ack, u64 *cookie);
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+ const struct ieee80211_ht_cap *ht_capa_mask);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -395,8 +402,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason,
bool wextev);
void __cfg80211_roamed(struct wireless_dev *wdev,
- struct ieee80211_channel *channel,
- const u8 *bssid,
+ struct cfg80211_bss *bss,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len);
int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 53c143f5e77..9392f8cbb90 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -7,10 +7,17 @@
#
# Copyright 2009 John W. Linville <linville@tuxdriver.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
BEGIN {
active = 0
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index b7b7868f412..8c550df1303 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -20,6 +20,7 @@
* interface
*/
#define MESH_PREQ_MIN_INT 10
+#define MESH_PERR_MIN_INT 100
#define MESH_DIAM_TRAVERSAL_TIME 50
/*
@@ -47,6 +48,7 @@ const struct mesh_config default_mesh_config = {
.dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
.dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
.dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
+ .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
.dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME,
.dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
.path_refresh_time = MESH_PATH_REFRESH_TIME,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 21fc9702f81..438dfc105b4 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -501,13 +501,32 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
return err;
}
+/* Do a logical ht_capa &= ht_capa_mask. */
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+ const struct ieee80211_ht_cap *ht_capa_mask)
+{
+ int i;
+ u8 *p1, *p2;
+ if (!ht_capa_mask) {
+ memset(ht_capa, 0, sizeof(*ht_capa));
+ return;
+ }
+
+ p1 = (u8*)(ht_capa);
+ p2 = (u8*)(ht_capa_mask);
+ for (i = 0; i<sizeof(*ht_capa); i++)
+ p1[i] &= p2[i];
+}
+
int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
const u8 *bssid, const u8 *prev_bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len, bool use_mfp,
- struct cfg80211_crypto_settings *crypt)
+ struct cfg80211_crypto_settings *crypt,
+ u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_assoc_request req;
@@ -537,6 +556,15 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
memcpy(&req.crypto, crypt, sizeof(req.crypto));
req.use_mfp = use_mfp;
req.prev_bssid = prev_bssid;
+ req.flags = assoc_flags;
+ if (ht_capa)
+ memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
+ if (ht_capa_mask)
+ memcpy(&req.ht_capa_mask, ht_capa_mask,
+ sizeof(req.ht_capa_mask));
+ cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
+ rdev->wiphy.ht_capa_mod_mask);
+
req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
if (!req.bss) {
@@ -574,14 +602,17 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
const u8 *bssid, const u8 *prev_bssid,
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len, bool use_mfp,
- struct cfg80211_crypto_settings *crypt)
+ struct cfg80211_crypto_settings *crypt,
+ u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
- ssid, ssid_len, ie, ie_len, use_mfp, crypt);
+ ssid, ssid_len, ie, ie_len, use_mfp, crypt,
+ assoc_flags, ht_capa, ht_capa_mask);
wdev_unlock(wdev);
return err;
@@ -879,6 +910,9 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
}
spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ if (nlpid == wdev->ap_unexpected_nlpid)
+ wdev->ap_unexpected_nlpid = 0;
}
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -901,7 +935,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie)
+ bool dont_wait_for_ack, u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct ieee80211_mgmt *mgmt;
@@ -992,7 +1026,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
/* Transmit the Action frame as requested by user space */
return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
channel_type, channel_type_valid,
- wait, buf, len, no_cck, cookie);
+ wait, buf, len, no_cck, dont_wait_for_ack,
+ cookie);
}
bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1107,3 +1142,30 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
}
EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO))
+ return false;
+
+ return nl80211_unexpected_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
+
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+ wdev->iftype != NL80211_IFTYPE_AP_VLAN))
+ return false;
+
+ return nl80211_unexpected_4addr_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b3a476fe827..b3d3cf8931c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -47,22 +47,21 @@ static struct genl_family nl80211_fam = {
};
/* internal helper: get rdev and dev */
-static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
- struct cfg80211_registered_device **rdev,
- struct net_device **dev)
+static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
+ struct cfg80211_registered_device **rdev,
+ struct net_device **dev)
{
- struct nlattr **attrs = info->attrs;
int ifindex;
if (!attrs[NL80211_ATTR_IFINDEX])
return -EINVAL;
ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
- *dev = dev_get_by_index(genl_info_net(info), ifindex);
+ *dev = dev_get_by_index(netns, ifindex);
if (!*dev)
return -ENODEV;
- *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex);
+ *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex);
if (IS_ERR(*rdev)) {
dev_put(*dev);
return PTR_ERR(*rdev);
@@ -89,8 +88,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
- [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
- [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -98,7 +97,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
- [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
@@ -196,6 +195,15 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
[NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
+ [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 },
+ [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_HT_CAPABILITY_MASK] = {
+ .len = NL80211_HT_CAPABILITY_LEN
+ },
+ [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
};
/* policy for the key attributes */
@@ -203,7 +211,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
[NL80211_KEY_IDX] = { .type = NLA_U8 },
[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
- [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
[NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@ -758,6 +766,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
dev->wiphy.available_antennas_rx);
+ if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
+ NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+ dev->wiphy.probe_resp_offload);
+
if ((dev->wiphy.available_antennas_tx ||
dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
u32 tx_ant = 0, rx_ant = 0;
@@ -874,7 +886,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(set_pmksa, SET_PMKSA);
CMD(del_pmksa, DEL_PMKSA);
CMD(flush_pmksa, FLUSH_PMKSA);
- CMD(remain_on_channel, REMAIN_ON_CHANNEL);
+ if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
+ CMD(remain_on_channel, REMAIN_ON_CHANNEL);
CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
CMD(mgmt_tx, FRAME);
CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
@@ -890,6 +903,16 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
CMD(sched_scan_start, START_SCHED_SCAN);
+ CMD(probe_client, PROBE_CLIENT);
+ CMD(set_noack_map, SET_NOACK_MAP);
+ if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+ i++;
+ NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+ }
+
+#ifdef CONFIG_NL80211_TESTMODE
+ CMD(testmode_cmd, TESTMODE);
+#endif
#undef CMD
@@ -905,11 +928,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_cmds);
- if (dev->ops->remain_on_channel)
+ if (dev->ops->remain_on_channel &&
+ dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
dev->wiphy.max_remain_on_channel_duration);
- if (dev->ops->mgmt_tx_cancel_wait)
+ if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
if (mgmt_stypes) {
@@ -1007,6 +1031,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nl80211_put_iface_combinations(&dev->wiphy, msg))
goto nla_put_failure;
+ if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
+ NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
+ dev->wiphy.ap_sme_capa);
+
+ NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+
+ if (dev->wiphy.ht_capa_mod_mask)
+ NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+ sizeof(*dev->wiphy.ht_capa_mod_mask),
+ dev->wiphy.ht_capa_mod_mask);
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -1725,6 +1760,23 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
}
+static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ u16 noack_map;
+
+ if (!info->attrs[NL80211_ATTR_NOACK_MAP])
+ return -EINVAL;
+
+ if (!rdev->ops->set_noack_map)
+ return -EOPNOTSUPP;
+
+ noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
+
+ return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+}
+
struct get_key_cookie {
struct sk_buff *msg;
int error;
@@ -2155,6 +2207,13 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
}
+ if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
+ params.probe_resp =
+ nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
+ params.probe_resp_len =
+ nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
+ }
+
err = call(&rdev->wiphy, dev, &params);
if (!err && params.interval)
wdev->beacon_interval = params.interval;
@@ -2187,6 +2246,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
[NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
[NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
[NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
+ [NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG },
};
static int parse_station_flags(struct genl_info *info,
@@ -2330,6 +2390,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (sinfo->filled & STATION_INFO_TX_FAILED)
NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
sinfo->tx_failed);
+ if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
+ NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
+ sinfo->beacon_loss_count);
if (sinfo->filled & STATION_INFO_BSS_PARAM) {
bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
if (!bss_param)
@@ -2453,26 +2516,34 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
/*
* Get vlan interface making sure it is running and on the right wiphy.
*/
-static int get_vlan(struct genl_info *info,
- struct cfg80211_registered_device *rdev,
- struct net_device **vlan)
+static struct net_device *get_vlan(struct genl_info *info,
+ struct cfg80211_registered_device *rdev)
{
struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
- *vlan = NULL;
-
- if (vlanattr) {
- *vlan = dev_get_by_index(genl_info_net(info),
- nla_get_u32(vlanattr));
- if (!*vlan)
- return -ENODEV;
- if (!(*vlan)->ieee80211_ptr)
- return -EINVAL;
- if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
- return -EINVAL;
- if (!netif_running(*vlan))
- return -ENETDOWN;
+ struct net_device *v;
+ int ret;
+
+ if (!vlanattr)
+ return NULL;
+
+ v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr));
+ if (!v)
+ return ERR_PTR(-ENODEV);
+
+ if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) {
+ ret = -EINVAL;
+ goto error;
}
- return 0;
+
+ if (!netif_running(v)) {
+ ret = -ENETDOWN;
+ goto error;
+ }
+
+ return v;
+ error:
+ dev_put(v);
+ return ERR_PTR(ret);
}
static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
@@ -2511,6 +2582,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.ht_capa =
nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+ if (!rdev->ops->change_station)
+ return -EOPNOTSUPP;
+
if (parse_station_flags(info, &params))
return -EINVAL;
@@ -2522,73 +2596,84 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.plink_state =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
- err = get_vlan(info, rdev, &params.vlan);
- if (err)
- goto out;
-
- /* validate settings */
- err = 0;
-
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
/* disallow mesh-specific things */
if (params.plink_action)
- err = -EINVAL;
+ return -EINVAL;
+
+ /* TDLS can't be set, ... */
+ if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ return -EINVAL;
+ /*
+ * ... but don't bother the driver with it. This works around
+ * a hostapd/wpa_supplicant issue -- it always includes the
+ * TLDS_PEER flag in the mask even for AP mode.
+ */
+ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+ /* accept only the listed bits */
+ if (params.sta_flags_mask &
+ ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+ BIT(NL80211_STA_FLAG_WME) |
+ BIT(NL80211_STA_FLAG_MFP)))
+ return -EINVAL;
+
+ /* must be last in here for error handling */
+ params.vlan = get_vlan(info, rdev);
+ if (IS_ERR(params.vlan))
+ return PTR_ERR(params.vlan);
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
/* disallow things sta doesn't support */
if (params.plink_action)
- err = -EINVAL;
- if (params.vlan)
- err = -EINVAL;
- if (params.supported_rates &&
- !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
- err = -EINVAL;
+ return -EINVAL;
if (params.ht_capa)
- err = -EINVAL;
+ return -EINVAL;
if (params.listen_interval >= 0)
- err = -EINVAL;
- if (params.sta_flags_mask &
- ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
- BIT(NL80211_STA_FLAG_TDLS_PEER)))
- err = -EINVAL;
- /* can't change the TDLS bit */
- if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
- (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
- err = -EINVAL;
+ return -EINVAL;
+ /*
+ * Don't allow userspace to change the TDLS_PEER flag,
+ * but silently ignore attempts to change it since we
+ * don't have state here to verify that it doesn't try
+ * to change the flag.
+ */
+ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+ /* reject any changes other than AUTHORIZED */
+ if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ return -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
/* disallow things mesh doesn't support */
if (params.vlan)
- err = -EINVAL;
+ return -EINVAL;
if (params.ht_capa)
- err = -EINVAL;
+ return -EINVAL;
if (params.listen_interval >= 0)
- err = -EINVAL;
+ return -EINVAL;
+ /*
+ * No special handling for TDLS here -- the userspace
+ * mesh code doesn't have this bug.
+ */
if (params.sta_flags_mask &
~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
BIT(NL80211_STA_FLAG_MFP) |
BIT(NL80211_STA_FLAG_AUTHORIZED)))
- err = -EINVAL;
+ return -EINVAL;
break;
default:
- err = -EINVAL;
+ return -EOPNOTSUPP;
}
- if (err)
- goto out;
-
- if (!rdev->ops->change_station) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ /* be aware of params.vlan when changing code here */
err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params);
- out:
if (params.vlan)
dev_put(params.vlan);
@@ -2643,70 +2728,81 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
+ if (!rdev->ops->add_station)
+ return -EOPNOTSUPP;
+
if (parse_station_flags(info, &params))
return -EINVAL;
- /* parse WME attributes if sta is WME capable */
- if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
- (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
- info->attrs[NL80211_ATTR_STA_WME]) {
- struct nlattr *tb[NL80211_STA_WME_MAX + 1];
- struct nlattr *nla;
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ /* parse WME attributes if sta is WME capable */
+ if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+ (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
+ info->attrs[NL80211_ATTR_STA_WME]) {
+ struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+ struct nlattr *nla;
+
+ nla = info->attrs[NL80211_ATTR_STA_WME];
+ err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+ nl80211_sta_wme_policy);
+ if (err)
+ return err;
- nla = info->attrs[NL80211_ATTR_STA_WME];
- err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
- nl80211_sta_wme_policy);
- if (err)
- return err;
+ if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+ params.uapsd_queues =
+ nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
+ if (params.uapsd_queues &
+ ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ return -EINVAL;
- if (tb[NL80211_STA_WME_UAPSD_QUEUES])
- params.uapsd_queues =
- nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
- if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
- return -EINVAL;
+ if (tb[NL80211_STA_WME_MAX_SP])
+ params.max_sp =
+ nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
- if (tb[NL80211_STA_WME_MAX_SP])
- params.max_sp =
- nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+ if (params.max_sp &
+ ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ return -EINVAL;
- if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+ }
+ /* TDLS peers cannot be added */
+ if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
return -EINVAL;
+ /* but don't bother the driver with it */
+ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
- params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+ /* must be last in here for error handling */
+ params.vlan = get_vlan(info, rdev);
+ if (IS_ERR(params.vlan))
+ return PTR_ERR(params.vlan);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* TDLS peers cannot be added */
+ if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ return -EINVAL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* Only TDLS peers can be added */
+ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -EINVAL;
+ /* Can only add if TDLS ... */
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -EOPNOTSUPP;
+ /* ... with external setup is supported */
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
- return -EINVAL;
-
- /*
- * Only managed stations can add TDLS peers, and only when the
- * wiphy supports external TDLS setup.
- */
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
- !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
- (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
- (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
- return -EINVAL;
-
- err = get_vlan(info, rdev, &params.vlan);
- if (err)
- goto out;
-
- /* validate settings */
- err = 0;
-
- if (!rdev->ops->add_station) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ /* be aware of params.vlan when changing code here */
err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params);
- out:
if (params.vlan)
dev_put(params.vlan);
return err;
@@ -3127,6 +3223,8 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
cur_params.dot11MeshHWMPactivePathTimeout);
NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
cur_params.dot11MeshHWMPpreqMinInterval);
+ NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+ cur_params.dot11MeshHWMPperrMinInterval);
NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
cur_params.dot11MeshHWMPnetDiameterTraversalTime);
NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
@@ -3161,6 +3259,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
[NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
@@ -3235,6 +3334,9 @@ do {\
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
+ mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
dot11MeshHWMPnetDiameterTraversalTime,
mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
@@ -3357,6 +3459,9 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
cfg80211_regdomain->alpha2);
+ if (cfg80211_regdomain->dfs_region)
+ NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
+ cfg80211_regdomain->dfs_region);
nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
if (!nl_reg_rules)
@@ -3415,6 +3520,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
char *alpha2 = NULL;
int rem_reg_rules = 0, r = 0;
u32 num_rules = 0, rule_idx = 0, size_of_regd;
+ u8 dfs_region = 0;
struct ieee80211_regdomain *rd = NULL;
if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
@@ -3425,6 +3531,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
+ if (info->attrs[NL80211_ATTR_DFS_REGION])
+ dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
+
nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
rem_reg_rules) {
num_rules++;
@@ -3452,6 +3561,13 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
rd->alpha2[0] = alpha2[0];
rd->alpha2[1] = alpha2[1];
+ /*
+ * Disable DFS master mode if the DFS region was
+ * not supported or known on this kernel.
+ */
+ if (reg_supported_dfs_region(dfs_region))
+ rd->dfs_region = dfs_region;
+
nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
rem_reg_rules) {
nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
@@ -4359,6 +4475,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
int err, ssid_len, ie_len = 0;
bool use_mfp = false;
+ u32 flags = 0;
+ struct ieee80211_ht_cap *ht_capa = NULL;
+ struct ieee80211_ht_cap *ht_capa_mask = NULL;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -4402,11 +4521,25 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_PREV_BSSID])
prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+ if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+ flags |= ASSOC_REQ_DISABLE_HT;
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ ht_capa_mask =
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]);
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+ if (!ht_capa_mask)
+ return -EINVAL;
+ ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+ }
+
err = nl80211_crypto_settings(rdev, info, &crypto, 1);
if (!err)
err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
ssid, ssid_len, ie, ie_len, use_mfp,
- &crypto);
+ &crypto, flags, ht_capa,
+ ht_capa_mask);
return err;
}
@@ -4577,13 +4710,41 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- ibss.channel = ieee80211_get_channel(wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ enum nl80211_channel_type channel_type;
+
+ channel_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40MINUS &&
+ channel_type != NL80211_CHAN_HT40PLUS)
+ return -EINVAL;
+
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ !(wiphy->features & NL80211_FEATURE_HT_IBSS))
+ return -EINVAL;
+
+ ibss.channel_type = channel_type;
+ } else {
+ ibss.channel_type = NL80211_CHAN_NO_HT;
+ }
+
+ ibss.channel = rdev_freq_to_chan(rdev,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+ ibss.channel_type);
if (!ibss.channel ||
ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
ibss.channel->flags & IEEE80211_CHAN_DISABLED)
return -EINVAL;
+ /* Both channels should be able to initiate communication */
+ if ((ibss.channel_type == NL80211_CHAN_HT40PLUS ||
+ ibss.channel_type == NL80211_CHAN_HT40MINUS) &&
+ !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel,
+ ibss.channel_type))
+ return -EINVAL;
+
ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
@@ -4662,7 +4823,7 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
static int nl80211_testmode_dump(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct cfg80211_registered_device *dev;
+ struct cfg80211_registered_device *rdev;
int err;
long phy_idx;
void *data = NULL;
@@ -4680,9 +4841,21 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
nl80211_policy);
if (err)
return err;
- if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY])
- return -EINVAL;
- phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+ if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
+ phy_idx = nla_get_u32(
+ nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
+ } else {
+ struct net_device *netdev;
+
+ err = get_rdev_dev_by_ifindex(sock_net(skb->sk),
+ nl80211_fam.attrbuf,
+ &rdev, &netdev);
+ if (err)
+ return err;
+ dev_put(netdev);
+ phy_idx = rdev->wiphy_idx;
+ cfg80211_unlock_rdev(rdev);
+ }
if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
cb->args[1] =
(long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -4694,15 +4867,15 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
}
mutex_lock(&cfg80211_mutex);
- dev = cfg80211_rdev_by_wiphy_idx(phy_idx);
- if (!dev) {
+ rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
+ if (!rdev) {
mutex_unlock(&cfg80211_mutex);
return -ENOENT;
}
- cfg80211_lock_rdev(dev);
+ cfg80211_lock_rdev(rdev);
mutex_unlock(&cfg80211_mutex);
- if (!dev->ops->testmode_dump) {
+ if (!rdev->ops->testmode_dump) {
err = -EOPNOTSUPP;
goto out_err;
}
@@ -4713,7 +4886,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
- if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) {
+ if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
genlmsg_cancel(skb, hdr);
break;
}
@@ -4723,8 +4896,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
genlmsg_cancel(skb, hdr);
break;
}
- err = dev->ops->testmode_dump(&dev->wiphy, skb, cb,
- data, data_len);
+ err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb,
+ data, data_len);
nla_nest_end(skb, tmdata);
if (err == -ENOBUFS || err == -ENOENT) {
@@ -4742,7 +4915,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
/* see above */
cb->args[0] = phy_idx + 1;
out_err:
- cfg80211_unlock_rdev(dev);
+ cfg80211_unlock_rdev(rdev);
return err;
}
@@ -4896,6 +5069,22 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(connkeys);
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+ connect.flags |= ASSOC_REQ_DISABLE_HT;
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ memcpy(&connect.ht_capa_mask,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+ sizeof(connect.ht_capa_mask));
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+ if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ return -EINVAL;
+ memcpy(&connect.ht_capa,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+ sizeof(connect.ht_capa));
+ }
+
err = cfg80211_connect(rdev, dev, &connect, connkeys);
if (err)
kfree(connkeys);
@@ -5083,7 +5272,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
duration > rdev->wiphy.max_remain_on_channel_duration)
return -EINVAL;
- if (!rdev->ops->remain_on_channel)
+ if (!rdev->ops->remain_on_channel ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5271,12 +5461,13 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
bool channel_type_valid = false;
u32 freq;
int err;
- void *hdr;
+ void *hdr = NULL;
u64 cookie;
- struct sk_buff *msg;
+ struct sk_buff *msg = NULL;
unsigned int wait = 0;
- bool offchan;
- bool no_cck;
+ bool offchan, no_cck, dont_wait_for_ack;
+
+ dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
if (!info->attrs[NL80211_ATTR_FRAME] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5295,7 +5486,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_DURATION]) {
- if (!rdev->ops->mgmt_tx_cancel_wait)
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
return -EINVAL;
wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
}
@@ -5313,6 +5504,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
+ if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
+ return -EINVAL;
+
no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -5320,29 +5514,36 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
if (chan == NULL)
return -EINVAL;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!dont_wait_for_ack) {
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
- NL80211_CMD_FRAME);
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_FRAME);
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
- goto free_msg;
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
}
+
err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
- no_cck, &cookie);
+ no_cck, dont_wait_for_ack, &cookie);
if (err)
goto free_msg;
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (msg) {
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
- genlmsg_end(msg, hdr);
- return genlmsg_reply(msg, info);
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+ }
+
+ return 0;
nla_put_failure:
err = -ENOBUFS;
@@ -5540,6 +5741,11 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
+ if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
+ !nl80211_parse_mcast_rate(rdev, setup.mcast_rate,
+ nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
+ return -EINVAL;
+
if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
/* parse additional setup parameters if given */
err = nl80211_parse_mesh_setup(info, &setup);
@@ -5832,6 +6038,91 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_register_unexpected_frame(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EINVAL;
+
+ if (wdev->ap_unexpected_nlpid)
+ return -EBUSY;
+
+ wdev->ap_unexpected_nlpid = info->snd_pid;
+ return 0;
+}
+
+static int nl80211_probe_client(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct sk_buff *msg;
+ void *hdr;
+ const u8 *addr;
+ u64 cookie;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ if (!rdev->ops->probe_client)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_PROBE_CLIENT);
+
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
+
+ addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
+ if (err)
+ goto free_msg;
+
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+ nla_put_failure:
+ err = -ENOBUFS;
+ free_msg:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
+ return -EOPNOTSUPP;
+
+ if (rdev->ap_beacons_nlpid)
+ return -EBUSY;
+
+ rdev->ap_beacons_nlpid = info->snd_pid;
+
+ return 0;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -5859,7 +6150,8 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
}
info->user_ptr[0] = rdev;
} else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
- err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs,
+ &rdev, &dev);
if (err) {
if (rtnl)
rtnl_unlock();
@@ -6387,6 +6679,39 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_UNEXPECTED_FRAME,
+ .doit = nl80211_register_unexpected_frame,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_PROBE_CLIENT,
+ .doit = nl80211_probe_client,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_REGISTER_BEACONS,
+ .doit = nl80211_register_beacons,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_SET_NOACK_MAP,
+ .doit = nl80211_set_noack_map,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -6639,10 +6964,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
if (wiphy_idx_valid(request->wiphy_idx))
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -6678,10 +7000,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6762,10 +7081,7 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6821,10 +7137,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
if (resp_ie)
NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6862,10 +7175,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
if (resp_ie)
NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6903,10 +7213,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
if (ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, GFP_KERNEL);
@@ -6939,10 +7246,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6977,10 +7281,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
if (ie_len && ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7019,10 +7320,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
if (tsc)
NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7073,10 +7371,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
goto nla_put_failure;
nla_nest_end(msg, nl_freq);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -7119,10 +7414,7 @@ static void nl80211_send_remain_on_chan_event(
if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7193,10 +7485,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7207,13 +7496,68 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+ u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+
+ if (!nlpid)
+ return false;
+
+ msg = nlmsg_new(100, gfp);
+ if (!msg)
+ return true;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return true;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+
+ err = genlmsg_end(msg, hdr);
+ if (err < 0) {
+ nlmsg_free(msg);
+ return true;
+ }
+
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+ return true;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+ return true;
+}
+
+bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp)
+{
+ return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
+ addr, gfp);
+}
+
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ return __nl80211_unexpected_frame(dev,
+ NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+ addr, gfp);
+}
+
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 nlpid,
int freq, const u8 *buf, size_t len, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- int err;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
@@ -7230,16 +7574,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
- err = genlmsg_end(msg, hdr);
- if (err < 0) {
- nlmsg_free(msg);
- return err;
- }
+ genlmsg_end(msg, hdr);
- err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
- if (err < 0)
- return err;
- return 0;
+ return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -7272,10 +7609,7 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
if (ack)
NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
return;
@@ -7317,10 +7651,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, pinfoattr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7362,10 +7693,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, rekey_attr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7408,10 +7736,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, attr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7453,7 +7778,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, pinfoattr);
- if (genlmsg_end(msg, hdr) < 0) {
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+ u64 cookie, bool acked, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (acked)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+
+ err = genlmsg_end(msg, hdr);
+ if (err < 0) {
nlmsg_free(msg);
return;
}
@@ -7466,6 +7829,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
genlmsg_cancel(msg, hdr);
nlmsg_free(msg);
}
+EXPORT_SYMBOL(cfg80211_probe_status);
+
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, gfp_t gfp)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+
+ if (!nlpid)
+ return;
+
+ msg = nlmsg_new(len + 100, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ if (freq)
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+ NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_obss_beacon);
static int nl80211_netlink_notify(struct notifier_block * nb,
unsigned long state,
@@ -7480,9 +7882,12 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
rcu_read_lock();
- list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
+ list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
cfg80211_mlme_unregister_socket(wdev, notify->pid);
+ if (rdev->ap_beacons_nlpid == notify->pid)
+ rdev->ap_beacons_nlpid = 0;
+ }
rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f24a1fbeaf1..12bf4d185ab 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -117,4 +117,9 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, int index,
const u8 *bssid, bool preauth, gfp_t gfp);
+bool nl80211_unexpected_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e71f5a66574..f65feaad155 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2,13 +2,22 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com>
+ * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
/**
* DOC: Wireless regulatory infrastructure
*
@@ -57,8 +66,17 @@
#define REG_DBG_PRINT(args...)
#endif
+static struct regulatory_request core_request_world = {
+ .initiator = NL80211_REGDOM_SET_BY_CORE,
+ .alpha2[0] = '0',
+ .alpha2[1] = '0',
+ .intersect = false,
+ .processed = true,
+ .country_ie_env = ENVIRON_ANY,
+};
+
/* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
@@ -150,7 +168,7 @@ static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
{
/* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +183,13 @@ static void reset_regdomains(void)
cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL;
+
+ if (!full_reset)
+ return;
+
+ if (last_request != &core_request_world)
+ kfree(last_request);
+ last_request = &core_request_world;
}
/*
@@ -175,7 +200,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
{
BUG_ON(!last_request);
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_world_regdom = rd;
cfg80211_regdomain = rd;
@@ -857,10 +882,22 @@ static void handle_channel(struct wiphy *wiphy,
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
- if (chan->orig_mpwr)
- chan->max_power = min(chan->orig_mpwr,
- (int) MBM_TO_DBM(power_rule->max_eirp));
- else
+ if (chan->orig_mpwr) {
+ /*
+ * Devices that have their own custom regulatory domain
+ * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+ * passed country IE power settings.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+ wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
+ chan->max_power =
+ MBM_TO_DBM(power_rule->max_eirp);
+ } else {
+ chan->max_power = min(chan->orig_mpwr,
+ (int) MBM_TO_DBM(power_rule->max_eirp));
+ }
+ } else
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
@@ -1123,6 +1160,8 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
if (ignore_reg_update(wiphy, initiator))
return;
+ last_request->dfs_region = cfg80211_regdomain->dfs_region;
+
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
handle_band(wiphy, band, initiator);
@@ -1145,9 +1184,21 @@ void regulatory_update(struct wiphy *wiphy,
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
struct cfg80211_registered_device *rdev;
+ struct wiphy *wiphy;
- list_for_each_entry(rdev, &cfg80211_rdev_list, list)
- wiphy_update_regulatory(&rdev->wiphy, initiator);
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ wiphy = &rdev->wiphy;
+ wiphy_update_regulatory(wiphy, initiator);
+ /*
+ * Regulatory updates set by CORE are ignored for custom
+ * regulatory cards. Let us notify the changes to the driver,
+ * as some drivers used this to restore its orig_* reg domain.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_CORE &&
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+ wiphy->reg_notifier)
+ wiphy->reg_notifier(wiphy, last_request);
+ }
}
static void handle_channel_custom(struct wiphy *wiphy,
@@ -1407,7 +1458,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
}
new_request:
- kfree(last_request);
+ if (last_request != &core_request_world)
+ kfree(last_request);
last_request = pending_request;
last_request->intersect = intersect;
@@ -1437,18 +1489,18 @@ new_request:
}
/* This processes *all* regulatory hints */
-static void reg_process_hint(struct regulatory_request *reg_request)
+static void reg_process_hint(struct regulatory_request *reg_request,
+ enum nl80211_reg_initiator reg_initiator)
{
int r = 0;
struct wiphy *wiphy = NULL;
- enum nl80211_reg_initiator initiator = reg_request->initiator;
BUG_ON(!reg_request->alpha2);
if (wiphy_idx_valid(reg_request->wiphy_idx))
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ if (reg_initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!wiphy) {
kfree(reg_request);
return;
@@ -1458,7 +1510,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
/* This is required so that the orig_* parameters are saved */
if (r == -EALREADY && wiphy &&
wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
- wiphy_update_regulatory(wiphy, initiator);
+ wiphy_update_regulatory(wiphy, reg_initiator);
return;
}
@@ -1467,7 +1519,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
* source of bogus requests.
*/
if (r != -EALREADY &&
- reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
+ reg_initiator == NL80211_REGDOM_SET_BY_USER)
schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
}
@@ -1504,7 +1556,7 @@ static void reg_process_pending_hints(void)
spin_unlock(&reg_requests_lock);
- reg_process_hint(reg_request);
+ reg_process_hint(reg_request, reg_request->initiator);
out:
mutex_unlock(&reg_mutex);
@@ -1577,9 +1629,6 @@ static int regulatory_hint_core(const char *alpha2)
{
struct regulatory_request *request;
- kfree(last_request);
- last_request = NULL;
-
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
if (!request)
@@ -1752,6 +1801,26 @@ static void restore_alpha2(char *alpha2, bool reset_user)
REG_DBG_PRINT("Restoring regulatory settings\n");
}
+static void restore_custom_reg_settings(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ struct ieee80211_channel *chan;
+ int i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ chan->flags = chan->orig_flags;
+ chan->max_antenna_gain = chan->orig_mag;
+ chan->max_power = chan->orig_mpwr;
+ }
+ }
+}
+
/*
* Restoring regulatory settings involves ingoring any
* possibly stale country IE information and user regulatory
@@ -1770,14 +1839,16 @@ static void restore_alpha2(char *alpha2, bool reset_user)
static void restore_regulatory_settings(bool reset_user)
{
char alpha2[2];
+ char world_alpha2[2];
struct reg_beacon *reg_beacon, *btmp;
struct regulatory_request *reg_request, *tmp;
LIST_HEAD(tmp_reg_req_list);
+ struct cfg80211_registered_device *rdev;
mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex);
- reset_regdomains();
+ reset_regdomains(true);
restore_alpha2(alpha2, reset_user);
/*
@@ -1820,11 +1891,18 @@ static void restore_regulatory_settings(bool reset_user)
/* First restore to the basic regulatory settings */
cfg80211_regdomain = cfg80211_world_regdom;
+ world_alpha2[0] = cfg80211_regdomain->alpha2[0];
+ world_alpha2[1] = cfg80211_regdomain->alpha2[1];
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (rdev->wiphy.flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+ restore_custom_reg_settings(&rdev->wiphy);
+ }
mutex_unlock(&reg_mutex);
mutex_unlock(&cfg80211_mutex);
- regulatory_hint_core(cfg80211_regdomain->alpha2);
+ regulatory_hint_core(world_alpha2);
/*
* This restores the ieee80211_regdom module parameter
@@ -1921,7 +1999,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
- pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
+ pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
@@ -1933,14 +2011,14 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
* in certain regions
*/
if (power_rule->max_antenna_gain)
- pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
- pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
@@ -1948,6 +2026,42 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
}
}
+bool reg_supported_dfs_region(u8 dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ case NL80211_DFS_FCC:
+ case NL80211_DFS_ETSI:
+ case NL80211_DFS_JP:
+ return true;
+ default:
+ REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
+ dfs_region);
+ return false;
+ }
+}
+
+static void print_dfs_region(u8 dfs_region)
+{
+ if (!dfs_region)
+ return;
+
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ pr_info(" DFS Master region FCC");
+ break;
+ case NL80211_DFS_ETSI:
+ pr_info(" DFS Master region ETSI");
+ break;
+ case NL80211_DFS_JP:
+ pr_info(" DFS Master region JP");
+ break;
+ default:
+ pr_info(" DFS Master region Uknown");
+ break;
+ }
+}
+
static void print_regdomain(const struct ieee80211_regdomain *rd)
{
@@ -1975,6 +2089,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
pr_info("Regulatory domain changed to country: %c%c\n",
rd->alpha2[0], rd->alpha2[1]);
}
+ print_dfs_region(rd->dfs_region);
print_rd_rules(rd);
}
@@ -2037,12 +2152,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
}
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+ if (!request_wiphy &&
+ (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+ last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+ schedule_delayed_work(&reg_timeout, 0);
+ return -ENODEV;
+ }
if (!last_request->intersect) {
int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
@@ -2063,7 +2184,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
if (r)
return r;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
@@ -2088,7 +2209,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
@@ -2108,7 +2229,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
kfree(rd);
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
@@ -2261,11 +2382,8 @@ void /* __init_or_exit */ regulatory_exit(void)
mutex_lock(&cfg80211_mutex);
mutex_lock(&reg_mutex);
- reset_regdomains();
-
- kfree(last_request);
+ reset_regdomains(true);
- last_request = NULL;
dev_set_uevent_suppress(&reg_pdev->dev, true);
platform_device_unregister(reg_pdev);
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4a56799d868..e2aaaf525a2 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -1,10 +1,26 @@
#ifndef __NET_WIRELESS_REG_H
#define __NET_WIRELESS_REG_H
+/*
+ * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
extern const struct ieee80211_regdomain *cfg80211_regdomain;
bool is_world_regdom(const char *alpha2);
bool reg_is_valid_request(const char *alpha2);
+bool reg_supported_dfs_region(u8 dfs_region);
int regulatory_hint_user(const char *alpha2);
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
index 818222c9251..3279cfcefb0 100644
--- a/net/wireless/regdb.h
+++ b/net/wireless/regdb.h
@@ -1,6 +1,22 @@
#ifndef __REGDB_H__
#define __REGDB_H__
+/*
+ * Copyright 2009 John W. Linville <linville@tuxdriver.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
extern const struct ieee80211_regdomain *reg_regdb[];
extern int reg_regdb_size;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index dc23b31594e..31119e32e09 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -355,8 +355,8 @@ static bool is_mesh(struct cfg80211_bss *a,
sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
}
-static int cmp_bss(struct cfg80211_bss *a,
- struct cfg80211_bss *b)
+static int cmp_bss_core(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
{
int r;
@@ -378,7 +378,15 @@ static int cmp_bss(struct cfg80211_bss *a,
b->len_information_elements);
}
- r = memcmp(a->bssid, b->bssid, ETH_ALEN);
+ return memcmp(a->bssid, b->bssid, ETH_ALEN);
+}
+
+static int cmp_bss(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
+{
+ int r;
+
+ r = cmp_bss_core(a, b);
if (r)
return r;
@@ -389,6 +397,52 @@ static int cmp_bss(struct cfg80211_bss *a,
b->len_information_elements);
}
+static int cmp_hidden_bss(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
+{
+ const u8 *ie1;
+ const u8 *ie2;
+ int i;
+ int r;
+
+ r = cmp_bss_core(a, b);
+ if (r)
+ return r;
+
+ ie1 = cfg80211_find_ie(WLAN_EID_SSID,
+ a->information_elements,
+ a->len_information_elements);
+ ie2 = cfg80211_find_ie(WLAN_EID_SSID,
+ b->information_elements,
+ b->len_information_elements);
+
+ /* Key comparator must use same algorithm in any rb-tree
+ * search function (order is important), otherwise ordering
+ * of items in the tree is broken and search gives incorrect
+ * results. This code uses same order as cmp_ies() does. */
+
+ /* sort missing IE before (left of) present IE */
+ if (!ie1)
+ return -1;
+ if (!ie2)
+ return 1;
+
+ /* zero-size SSID is used as an indication of the hidden bss */
+ if (!ie2[1])
+ return 0;
+
+ /* sort by length first, then by contents */
+ if (ie1[1] != ie2[1])
+ return ie2[1] - ie1[1];
+
+ /* zeroed SSID ie is another indication of a hidden bss */
+ for (i = 0; i < ie2[1]; i++)
+ if (ie2[i + 2])
+ return -1;
+
+ return 0;
+}
+
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
@@ -505,6 +559,48 @@ rb_find_bss(struct cfg80211_registered_device *dev,
}
static struct cfg80211_internal_bss *
+rb_find_hidden_bss(struct cfg80211_registered_device *dev,
+ struct cfg80211_internal_bss *res)
+{
+ struct rb_node *n = dev->bss_tree.rb_node;
+ struct cfg80211_internal_bss *bss;
+ int r;
+
+ while (n) {
+ bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
+ r = cmp_hidden_bss(&res->pub, &bss->pub);
+
+ if (r == 0)
+ return bss;
+ else if (r < 0)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+
+ return NULL;
+}
+
+static void
+copy_hidden_ies(struct cfg80211_internal_bss *res,
+ struct cfg80211_internal_bss *hidden)
+{
+ if (unlikely(res->pub.beacon_ies))
+ return;
+ if (WARN_ON(!hidden->pub.beacon_ies))
+ return;
+
+ res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC);
+ if (unlikely(!res->pub.beacon_ies))
+ return;
+
+ res->beacon_ies_allocated = true;
+ res->pub.len_beacon_ies = hidden->pub.len_beacon_ies;
+ memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies,
+ res->pub.len_beacon_ies);
+}
+
+static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *dev,
struct cfg80211_internal_bss *res)
{
@@ -607,6 +703,21 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
kref_put(&res->ref, bss_release);
} else {
+ struct cfg80211_internal_bss *hidden;
+
+ /* First check if the beacon is a probe response from
+ * a hidden bss. If so, copy beacon ies (with nullified
+ * ssid) into the probe response bss entry (with real ssid).
+ * It is required basically for PSM implementation
+ * (probe responses do not contain tim ie) */
+
+ /* TODO: The code is not trying to update existing probe
+ * response bss entries when beacon ies are
+ * getting changed. */
+ hidden = rb_find_hidden_bss(dev, res);
+ if (hidden)
+ copy_hidden_ies(res, hidden);
+
/* this "consumes" the reference */
list_add_tail(&res->list, &dev->bss_list);
rb_insert_bss(dev, res);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0acfdc9beac..7b9ecaed96b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
prev_bssid,
params->ssid, params->ssid_len,
params->ie, params->ie_len,
- false, &params->crypto);
+ false, &params->crypto,
+ params->flags, &params->ht_capa,
+ &params->ht_capa_mask);
if (err)
__cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
@@ -551,45 +553,35 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
EXPORT_SYMBOL(cfg80211_connect_result);
void __cfg80211_roamed(struct wireless_dev *wdev,
- struct ieee80211_channel *channel,
- const u8 *bssid,
+ struct cfg80211_bss *bss,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len)
{
- struct cfg80211_bss *bss;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
-
ASSERT_WDEV_LOCK(wdev);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
- return;
+ goto out;
if (wdev->sme_state != CFG80211_SME_CONNECTED)
- return;
+ goto out;
/* internal error -- how did we get to CONNECTED w/o BSS? */
if (WARN_ON(!wdev->current_bss)) {
- return;
+ goto out;
}
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
- bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
- wdev->ssid, wdev->ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
-
- if (WARN_ON(!bss))
- return;
-
cfg80211_hold_bss(bss_from_pub(bss));
wdev->current_bss = bss_from_pub(bss);
- nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid,
+ nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid,
req_ie, req_ie_len, resp_ie, resp_ie_len,
GFP_KERNEL);
@@ -610,11 +602,15 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
- memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+ memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN);
wdev->wext.prev_bssid_valid = true;
wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
#endif
+
+ return;
+out:
+ cfg80211_put_bss(bss);
}
void cfg80211_roamed(struct net_device *dev,
@@ -624,32 +620,57 @@ void cfg80211_roamed(struct net_device *dev,
const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_bss *bss;
+
+ CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+
+ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
+ wdev->ssid_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (WARN_ON(!bss))
+ return;
+
+ cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie,
+ resp_ie_len, gfp);
+}
+EXPORT_SYMBOL(cfg80211_roamed);
+
+void cfg80211_roamed_bss(struct net_device *dev,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+ if (WARN_ON(!bss))
+ return;
+
ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
- if (!ev)
+ if (!ev) {
+ cfg80211_put_bss(bss);
return;
+ }
ev->type = EVENT_ROAMED;
- ev->rm.channel = channel;
- memcpy(ev->rm.bssid, bssid, ETH_ALEN);
ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
ev->rm.req_ie_len = req_ie_len;
memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len);
ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
ev->rm.resp_ie_len = resp_ie_len;
memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len);
+ ev->rm.bss = bss;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
}
-EXPORT_SYMBOL(cfg80211_roamed);
+EXPORT_SYMBOL(cfg80211_roamed_bss);
void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap)
@@ -774,6 +795,9 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
wdev->connect_keys = NULL;
}
+ cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
+ rdev->wiphy.ht_capa_mod_mask);
+
if (connkeys && connkeys->def >= 0) {
int idx;
u32 cipher;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4dde429441d..9aa9db6c814 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -7,9 +7,9 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
-#include <linux/crc32.h>
#include <net/cfg80211.h>
#include <net/ip.h>
+#include <net/dsfield.h>
#include "core.h"
struct ieee80211_rate *
@@ -240,17 +240,6 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
return 0;
}
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-const unsigned char rfc1042_header[] __aligned(2) =
- { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-EXPORT_SYMBOL(rfc1042_header);
-
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-const unsigned char bridge_tunnel_header[] __aligned(2) =
- { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-EXPORT_SYMBOL(bridge_tunnel_header);
-
unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
{
unsigned int hdrlen = 24;
@@ -662,7 +651,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
switch (skb->protocol) {
case htons(ETH_P_IP):
- dscp = ip_hdr(skb)->tos & 0xfc;
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
+ break;
+ case htons(ETH_P_IPV6):
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
break;
default:
return 0;
@@ -752,9 +744,9 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
NULL);
break;
case EVENT_ROAMED:
- __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
- ev->rm.req_ie, ev->rm.req_ie_len,
- ev->rm.resp_ie, ev->rm.resp_ie_len);
+ __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
+ ev->rm.req_ie_len, ev->rm.resp_ie,
+ ev->rm.resp_ie_len);
break;
case EVENT_DISCONNECTED:
__cfg80211_disconnected(wdev->netdev,
@@ -1051,169 +1043,13 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
return 0;
}
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- u64 filter, u32 crc)
-{
- size_t left = len;
- u8 *pos = start;
- bool calc_crc = filter != 0;
-
- memset(elems, 0, sizeof(*elems));
- elems->ie_start = start;
- elems->total_len = len;
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left)
- break;
-
- if (calc_crc && id < 64 && (filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- switch (id) {
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_FH_PARAMS:
- elems->fh_params = pos;
- elems->fh_params_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- elems->ds_params = pos;
- elems->ds_params_len = elen;
- break;
- case WLAN_EID_CF_PARAMS:
- elems->cf_params = pos;
- elems->cf_params_len = elen;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- }
- break;
- case WLAN_EID_IBSS_PARAMS:
- elems->ibss_params = pos;
- elems->ibss_params_len = elen;
- break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (pos[3] == 1) {
- /* OUI Type 1 - WPA IE */
- elems->wpa = pos;
- elems->wpa_len = elen;
- } else if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- elems->erp_info = pos;
- elems->erp_info_len = elen;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- break;
- case WLAN_EID_HT_INFORMATION:
- if (elen >= sizeof(struct ieee80211_ht_info))
- elems->ht_info_elem = (void *)pos;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- break;
- case WLAN_EID_PEER_MGMT:
- elems->peering = pos;
- elems->peering_len = elen;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- elems->ch_switch_elem = pos;
- elems->ch_switch_elem_len = elen;
- break;
- case WLAN_EID_QUIET:
- if (!elems->quiet_elem) {
- elems->quiet_elem = pos;
- elems->quiet_elem_len = elen;
- }
- elems->num_of_quiet_elem++;
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- elems->pwr_constr_elem = pos;
- elems->pwr_constr_elem_len = elen;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- elems->timeout_int = pos;
- elems->timeout_int_len = elen;
- break;
- default:
- break;
- }
-
- left -= elen;
- pos += elen;
- }
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+const unsigned char rfc1042_header[] __aligned(2) =
+ { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+EXPORT_SYMBOL(rfc1042_header);
- return crc;
-}
-EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+const unsigned char bridge_tunnel_header[] __aligned(2) =
+ { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+EXPORT_SYMBOL(bridge_tunnel_header);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6897436b1d3..3c24eb97e9d 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -819,12 +819,24 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct ieee80211_channel *chan;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+ case NL80211_IFTYPE_MONITOR:
+ if (!rdev->ops->get_channel)
+ return -EINVAL;
+
+ chan = rdev->ops->get_channel(wdev->wiphy);
+ if (!chan)
+ return -EINVAL;
+ freq->m = chan->center_freq;
+ freq->e = 6;
+ return 0;
default:
if (!wdev->channel)
return -EINVAL;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 3e16c6abde4..a306bc66000 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -232,7 +232,7 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
return NOTIFY_DONE;
if (dev->type == ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
|| dev->type == ARPHRD_ETHER
#endif
) {
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index fa2b41888bd..f0ce862d1f4 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -161,7 +161,7 @@ void x25_establish_link(struct x25_neigh *nb)
*ptr = X25_IFACE_CONNECT;
break;
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
case ARPHRD_ETHER:
return;
#endif
@@ -180,7 +180,7 @@ void x25_terminate_link(struct x25_neigh *nb)
struct sk_buff *skb;
unsigned char *ptr;
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
if (nb->dev->type == ARPHRD_ETHER)
return;
#endif
@@ -213,7 +213,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
*dptr = X25_IFACE_DATA;
break;
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
case ARPHRD_ETHER:
kfree_skb(skb);
return;
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 97d77c532d8..cf636627005 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -134,7 +134,7 @@ struct net_device *x25_dev_get(char *devname)
if (dev &&
(!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
-#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
+#if IS_ENABLED(CONFIG_LLC)
&& dev->type != ARPHRD_ETHER
#endif
))){
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 552df27dcf5..7661576b6f4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
- return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
- addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+ return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+ addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
(fl4->flowi4_proto == sel->proto || !sel->proto) &&
@@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
case AF_INET:
dst_ops = &net->xfrm.xfrm4_dst_ops;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
dst_ops = &net->xfrm.xfrm6_dst_ops;
break;
@@ -1499,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto free_dst;
/* Copy neighbour for reachability confirmation */
- dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
+ dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
xfrm_init_pmtu(dst_prev);
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
{
struct dst_entry *head, *next;
- flow_cache_flush();
-
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
}
}
+static void xfrm_garbage_collect(struct net *net)
+{
+ flow_cache_flush();
+ __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+ flow_cache_flush_deferred();
+ __xfrm_garbage_collect(net);
+}
+
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@@ -2382,9 +2392,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
return dst_metric_advmss(dst->path);
}
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
- return dst_mtu(dst->path);
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst_mtu(dst->path);
}
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2423,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
dst_ops->check = xfrm_dst_check;
if (likely(dst_ops->default_advmss == NULL))
dst_ops->default_advmss = xfrm_default_advmss;
- if (likely(dst_ops->default_mtu == NULL))
- dst_ops->default_mtu = xfrm_default_mtu;
+ if (likely(dst_ops->mtu == NULL))
+ dst_ops->mtu = xfrm_mtu;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
@@ -2420,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
- afinfo->garbage_collect = __xfrm_garbage_collect;
+ afinfo->garbage_collect = xfrm_garbage_collect_deferred;
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2433,7 +2445,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
case AF_INET:
xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
break;
@@ -2483,7 +2495,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
afinfo = xfrm_policy_afinfo[AF_INET];
if (afinfo)
net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
afinfo = xfrm_policy_afinfo[AF_INET6];
if (afinfo)
net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
@@ -2514,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
- __xfrm_garbage_collect(dev_net(dev));
+ xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9414b9c5b1e..5b228f97d4b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1035,16 +1035,12 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
break;
case AF_INET6:
- ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
- (const struct in6_addr *)daddr);
- ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
- (const struct in6_addr *)saddr);
+ *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
+ *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
- ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
- (const struct in6_addr *)saddr);
- ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
- (const struct in6_addr *)daddr);
+ *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
+ *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
break;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d0a42df5160..e0d747a2e80 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -28,7 +28,7 @@
#include <net/netlink.h>
#include <net/ah.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h>
#endif
@@ -150,7 +150,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
break;
case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
break;
#else
err = -EAFNOSUPPORT;
@@ -201,7 +201,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
goto out;
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
if (attrs[XFRMA_ALG_COMP] ||
@@ -1160,7 +1160,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
break;
case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
break;
#else
return -EAFNOSUPPORT;
@@ -1231,7 +1231,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
switch (ut[i].family) {
case AF_INET:
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
break;
#endif
@@ -2604,7 +2604,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
return NULL;
}
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
if (opt != IPV6_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8fda3b3f7be..e3bfcbe8a52 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -227,7 +227,7 @@ our $Inline = qr{inline|__always_inline|noinline};
our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
our $Lval = qr{$Ident(?:$Member)*};
-our $Constant = qr{(?:[0-9]+|0x[0-9a-fA-F]+)[UL]*};
+our $Constant = qr{(?i:(?:[0-9]+|0x[0-9a-f]+)[ul]*)};
our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
our $Compare = qr{<=|>=|==|!=|<|>};
our $Operators = qr{
@@ -315,7 +315,7 @@ sub build_types {
$NonptrType = qr{
(?:$Modifier\s+|const\s+)*
(?:
- (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)|
+ (?:typeof|__typeof__)\s*\([^\)]*\)|
(?:$typeTypedefs\b)|
(?:${all}\b)
)
@@ -334,6 +334,7 @@ our $match_balanced_parentheses = qr/(\((?:[^\(\)]+|(-1))*\))/;
our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
our $LvalOrFunc = qr{($Lval)\s*($match_balanced_parentheses{0,1})\s*};
+our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)};
sub deparenthesize {
my ($string) = @_;
@@ -676,6 +677,10 @@ sub ctx_statement_block {
if ($off >= $len) {
last;
}
+ if ($level == 0 && substr($blk, $off) =~ /^.\s*#\s*define/) {
+ $level++;
+ $type = '#';
+ }
}
$p = $c;
$c = substr($blk, $off, 1);
@@ -738,6 +743,13 @@ sub ctx_statement_block {
last;
}
}
+ # Preprocessor commands end at the newline unless escaped.
+ if ($type eq '#' && $c eq "\n" && $p ne "\\") {
+ $level--;
+ $type = '';
+ $off++;
+ last;
+ }
$off++;
}
# We are truly at the end, so shuffle to the next line.
@@ -1020,7 +1032,7 @@ sub annotate_values {
} elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
print "CAST($1)\n" if ($dbg_values > 1);
push(@av_paren_type, $type);
- $type = 'C';
+ $type = 'c';
} elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
print "DECLARE($1)\n" if ($dbg_values > 1);
@@ -1212,7 +1224,9 @@ sub possible {
case|
else|
asm|__asm__|
- do
+ do|
+ \#|
+ \#\#|
)(?:\s|$)|
^(?:typedef|struct|enum)\b
)}x;
@@ -1359,6 +1373,7 @@ sub process {
my %suppress_ifbraces;
my %suppress_whiletrailers;
my %suppress_export;
+ my $suppress_statement = 0;
# Pre-scan the patch sanitizing the lines.
# Pre-scan the patch looking for any __setup documentation.
@@ -1468,6 +1483,7 @@ sub process {
%suppress_ifbraces = ();
%suppress_whiletrailers = ();
%suppress_export = ();
+ $suppress_statement = 0;
next;
# track the line number as we move through the hunk, note that
@@ -1504,9 +1520,11 @@ sub process {
if ($line =~ /^diff --git.*?(\S+)$/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@;
+ $in_commit_log = 0;
} elsif ($line =~ /^\+\+\+\s+(\S+)/) {
$realfile = $1;
$realfile =~ s@^([^/]*)/@@;
+ $in_commit_log = 0;
$p1_prefix = $1;
if (!$file && $tree && $p1_prefix ne '' &&
@@ -1546,7 +1564,8 @@ sub process {
}
# Check signature styles
- if ($line =~ /^(\s*)($signature_tags)(\s*)(.*)/) {
+ if (!$in_header_lines &&
+ $line =~ /^(\s*)($signature_tags)(\s*)(.*)/) {
my $space_before = $1;
my $sign_off = $2;
my $space_after = $3;
@@ -1623,7 +1642,7 @@ sub process {
# Check if it's the start of a commit log
# (not a header line and we haven't seen the patch filename)
if ($in_header_lines && $realfile =~ /^$/ &&
- $rawline !~ /^(commit\b|from\b|\w+:).+$/i) {
+ $rawline !~ /^(commit\b|from\b|[\w-]+:).+$/i) {
$in_header_lines = 0;
$in_commit_log = 1;
}
@@ -1655,19 +1674,26 @@ sub process {
# Only applies when adding the entry originally, after that we do not have
# sufficient context to determine whether it is indeed long enough.
if ($realfile =~ /Kconfig/ &&
- $line =~ /\+\s*(?:---)?help(?:---)?$/) {
+ $line =~ /.\s*config\s+/) {
my $length = 0;
my $cnt = $realcnt;
my $ln = $linenr + 1;
my $f;
+ my $is_start = 0;
my $is_end = 0;
- while ($cnt > 0 && defined $lines[$ln - 1]) {
+ for (; $cnt > 0 && defined $lines[$ln - 1]; $ln++) {
$f = $lines[$ln - 1];
$cnt-- if ($lines[$ln - 1] !~ /^-/);
$is_end = $lines[$ln - 1] =~ /^\+/;
- $ln++;
next if ($f =~ /^-/);
+
+ if ($lines[$ln - 1] =~ /.\s*(?:bool|tristate)\s*\"/) {
+ $is_start = 1;
+ } elsif ($lines[$ln - 1] =~ /.\s*(?:---)?help(?:---)?$/) {
+ $length = -1;
+ }
+
$f =~ s/^.//;
$f =~ s/#.*//;
$f =~ s/^\s+//;
@@ -1679,8 +1705,8 @@ sub process {
$length++;
}
WARN("CONFIG_DESCRIPTION",
- "please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
- #print "is_end<$is_end> length<$length>\n";
+ "please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_start && $is_end && $length < 4);
+ #print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
if (($realfile =~ /Makefile.*/ || $realfile =~ /Kbuild.*/) &&
@@ -1792,12 +1818,24 @@ sub process {
# Check for potential 'bare' types
my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
$realline_next);
- if ($realcnt && $line =~ /.\s*\S/) {
+#print "LINE<$line>\n";
+ if ($linenr >= $suppress_statement &&
+ $realcnt && $line =~ /.\s*\S/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0);
$stat =~ s/\n./\n /g;
$cond =~ s/\n./\n /g;
+#print "linenr<$linenr> <$stat>\n";
+ # If this statement has no statement boundaries within
+ # it there is no point in retrying a statement scan
+ # until we hit end of it.
+ my $frag = $stat; $frag =~ s/;+\s*$//;
+ if ($frag !~ /(?:{|;)/) {
+#print "skip<$line_nr_next>\n";
+ $suppress_statement = $line_nr_next;
+ }
+
# Find the real next line.
$realline_next = $line_nr_next;
if (defined $realline_next &&
@@ -1923,6 +1961,9 @@ sub process {
# Check relative indent for conditionals and blocks.
if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+ ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+ ctx_statement_block($linenr, $realcnt, 0)
+ if (!defined $stat);
my ($s, $c) = ($stat, $cond);
substr($s, 0, length($c), '');
@@ -2090,7 +2131,7 @@ sub process {
# XXX(foo);
# EXPORT_SYMBOL(something_foo);
my $name = $1;
- if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
+ if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
$name =~ /^${Ident}_$2/) {
#print "FOO C name<$name>\n";
$suppress_export{$realline_next} = 1;
@@ -2168,8 +2209,9 @@ sub process {
# * goes on variable not on type
# (char*[ const])
- if ($line =~ m{\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\)}) {
- my ($from, $to) = ($1, $1);
+ while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) {
+ #print "AA<$1>\n";
+ my ($from, $to) = ($2, $2);
# Should start with a space.
$to =~ s/^(\S)/ $1/;
@@ -2184,8 +2226,10 @@ sub process {
ERROR("POINTER_LOCATION",
"\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr);
}
- } elsif ($line =~ m{\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident)}) {
- my ($from, $to, $ident) = ($1, $1, $2);
+ }
+ while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) {
+ #print "BB<$1>\n";
+ my ($from, $to, $ident) = ($2, $2, $3);
# Should start with a space.
$to =~ s/^(\S)/ $1/;
@@ -2568,7 +2612,7 @@ sub process {
# Flatten any parentheses
$value =~ s/\(/ \(/g;
$value =~ s/\)/\) /g;
- while ($value =~ s/\[[^\{\}]*\]/1/ ||
+ while ($value =~ s/\[[^\[\]]*\]/1/ ||
$value !~ /(?:$Ident|-?$Constant)\s*
$Compare\s*
(?:$Ident|-?$Constant)/x &&
@@ -2593,28 +2637,6 @@ sub process {
}
}
-# typecasts on min/max could be min_t/max_t
- if ($line =~ /^\+(?:.*?)\b(min|max)\s*\($Typecast{0,1}($LvalOrFunc)\s*,\s*$Typecast{0,1}($LvalOrFunc)\s*\)/) {
- if (defined $2 || defined $8) {
- my $call = $1;
- my $cast1 = deparenthesize($2);
- my $arg1 = $3;
- my $cast2 = deparenthesize($8);
- my $arg2 = $9;
- my $cast;
-
- if ($cast1 ne "" && $cast2 ne "") {
- $cast = "$cast1 or $cast2";
- } elsif ($cast1 ne "") {
- $cast = $cast1;
- } else {
- $cast = $cast2;
- }
- WARN("MINMAX",
- "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr);
- }
- }
-
# Need a space before open parenthesis after if, while etc
if ($line=~/\b(if|while|for|switch)\(/) {
ERROR("SPACING", "space required before the open parenthesis '('\n" . $herecurr);
@@ -2623,6 +2645,9 @@ sub process {
# Check for illegal assignment in if conditional -- and check for trailing
# statements after the conditional.
if ($line =~ /do\s*(?!{)/) {
+ ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+ ctx_statement_block($linenr, $realcnt, 0)
+ if (!defined $stat);
my ($stat_next) = ctx_statement_block($line_nr_next,
$remain_next, $off_next);
$stat_next =~ s/\n./\n /g;
@@ -2778,47 +2803,13 @@ sub process {
my $cnt = $realcnt;
my ($off, $dstat, $dcond, $rest);
my $ctx = '';
-
- my $args = defined($1);
-
- # Find the end of the macro and limit our statement
- # search to that.
- while ($cnt > 0 && defined $lines[$ln - 1] &&
- $lines[$ln - 1] =~ /^(?:-|..*\\$)/)
- {
- $ctx .= $rawlines[$ln - 1] . "\n";
- $cnt-- if ($lines[$ln - 1] !~ /^-/);
- $ln++;
- }
- $ctx .= $rawlines[$ln - 1];
-
($dstat, $dcond, $ln, $cnt, $off) =
- ctx_statement_block($linenr, $ln - $linenr + 1, 0);
+ ctx_statement_block($linenr, $realcnt, 0);
+ $ctx = $dstat;
#print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
#print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
- # Extract the remainder of the define (if any) and
- # rip off surrounding spaces, and trailing \'s.
- $rest = '';
- while ($off != 0 || ($cnt > 0 && $rest =~ /\\\s*$/)) {
- #print "ADDING cnt<$cnt> $off <" . substr($lines[$ln - 1], $off) . "> rest<$rest>\n";
- if ($off != 0 || $lines[$ln - 1] !~ /^-/) {
- $rest .= substr($lines[$ln - 1], $off) . "\n";
- $cnt--;
- }
- $ln++;
- $off = 0;
- }
- $rest =~ s/\\\n.//g;
- $rest =~ s/^\s*//s;
- $rest =~ s/\s*$//s;
-
- # Clean up the original statement.
- if ($args) {
- substr($dstat, 0, length($dcond), '');
- } else {
- $dstat =~ s/^.\s*\#\s*define\s+$Ident\s*//;
- }
+ $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
$dstat =~ s/$;//g;
$dstat =~ s/\\\n.//g;
$dstat =~ s/^\s*//s;
@@ -2827,7 +2818,7 @@ sub process {
# Flatten any parentheses and braces
while ($dstat =~ s/\([^\(\)]*\)/1/ ||
$dstat =~ s/\{[^\{\}]*\}/1/ ||
- $dstat =~ s/\[[^\{\}]*\]/1/)
+ $dstat =~ s/\[[^\[\]]*\]/1/)
{
}
@@ -2844,23 +2835,32 @@ sub process {
^\"|\"$
}x;
#print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
- if ($rest ne '' && $rest ne ',') {
- if ($rest !~ /while\s*\(/ &&
- $dstat !~ /$exceptions/)
- {
- ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
- "Macros with multiple statements should be enclosed in a do - while loop\n" . "$here\n$ctx\n");
+ if ($dstat ne '' &&
+ $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(),
+ $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo();
+ $dstat !~ /^(?:$Ident|-?$Constant)$/ && # 10 // foo()
+ $dstat !~ /$exceptions/ &&
+ $dstat !~ /^\.$Ident\s*=/ && # .foo =
+ $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
+ $dstat !~ /^for\s*$Constant$/ && # for (...)
+ $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
+ $dstat !~ /^do\s*{/ && # do {...
+ $dstat !~ /^\({/) # ({...
+ {
+ $ctx =~ s/\n*$//;
+ my $herectx = $here . "\n";
+ my $cnt = statement_rawlines($ctx);
+
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
}
- } elsif ($ctx !~ /;/) {
- if ($dstat ne '' &&
- $dstat !~ /^(?:$Ident|-?$Constant)$/ &&
- $dstat !~ /$exceptions/ &&
- $dstat !~ /^\.$Ident\s*=/ &&
- $dstat =~ /$Operators/)
- {
+ if ($dstat =~ /;/) {
+ ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
+ "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
+ } else {
ERROR("COMPLEX_MACRO",
- "Macros with complex values should be enclosed in parenthesis\n" . "$here\n$ctx\n");
+ "Macros with complex values should be enclosed in parenthesis\n" . "$herectx");
}
}
}
@@ -3111,6 +3111,12 @@ sub process {
"__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
}
+# Check for __attribute__ format(printf, prefer __printf
+ if ($line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
+ WARN("PREFER_PRINTF",
+ "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr);
+ }
+
# check for sizeof(&)
if ($line =~ /\bsizeof\s*\(\s*\&/) {
WARN("SIZEOF_ADDRESS",
@@ -3123,6 +3129,46 @@ sub process {
"Avoid line continuations in quoted strings\n" . $herecurr);
}
+# Check for misused memsets
+ if (defined $stat &&
+ $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*$FuncArg\s*\)/s) {
+
+ my $ms_addr = $2;
+ my $ms_val = $8;
+ my $ms_size = $14;
+
+ if ($ms_size =~ /^(0x|)0$/i) {
+ ERROR("MEMSET",
+ "memset to 0's uses 0 as the 2nd argument, not the 3rd\n" . "$here\n$stat\n");
+ } elsif ($ms_size =~ /^(0x|)1$/i) {
+ WARN("MEMSET",
+ "single byte memset is suspicious. Swapped 2nd/3rd argument?\n" . "$here\n$stat\n");
+ }
+ }
+
+# typecasts on min/max could be min_t/max_t
+ if (defined $stat &&
+ $stat =~ /^\+(?:.*?)\b(min|max)\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\)/) {
+ if (defined $2 || defined $8) {
+ my $call = $1;
+ my $cast1 = deparenthesize($2);
+ my $arg1 = $3;
+ my $cast2 = deparenthesize($8);
+ my $arg2 = $9;
+ my $cast;
+
+ if ($cast1 ne "" && $cast2 ne "") {
+ $cast = "$cast1 or $cast2";
+ } elsif ($cast1 ne "") {
+ $cast = $cast1;
+ } else {
+ $cast = $cast2;
+ }
+ WARN("MINMAX",
+ "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . "$here\n$stat\n");
+ }
+ }
+
# check for new externs in .c files.
if ($realfile =~ /\.c$/ && defined $stat &&
$stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
@@ -3294,12 +3340,6 @@ sub process {
WARN("EXPORTED_WORLD_WRITABLE",
"Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
}
-
- # Check for memset with swapped arguments
- if ($line =~ /memset.*\,(\ |)(0x|)0(\ |0|)\);/) {
- ERROR("MEMSET",
- "memset size is 3rd argument, not the second.\n" . $herecurr);
- }
}
# If we have no input at all, then there is nothing to report on
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 4594f334105..f32a04c4c5b 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@ my %VCS_cmds_git = (
"execute_cmd" => \&git_execute_cmd,
"available" => '(which("git") ne "") && (-d ".git")',
"find_signers_cmd" =>
- "git log --no-color --since=\$email_git_since " .
+ "git log --no-color --follow --since=\$email_git_since " .
'--format="GitCommit: %H%n' .
'GitAuthor: %an <%ae>%n' .
'GitDate: %aD%n' .
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ba573fe7c74..914833d99b0 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
--directory=$(srctree) --directory=$(objtree) \
--output $(obj)/config.pot
$(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
- $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
- $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \
+ $(Q)(for i in `ls $(srctree)/arch/*/Kconfig \
+ $(srctree)/arch/*/um/Kconfig`; \
do \
echo " GEN $$i"; \
$(obj)/kxgettext $$i \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
done )
$(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
--output $(obj)/linux.pot
- $(Q)rm -f $(srctree)/arch/um/Kconfig
$(Q)rm -f $(obj)/config.pot
PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index f936d1fa969..363ab4666b1 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -880,6 +880,74 @@ static int do_isapnp_entry(const char *filename,
return 1;
}
+/*
+ * Append a match expression for a single masked hex digit.
+ * outp points to a pointer to the character at which to append.
+ * *outp is updated on return to point just after the appended text,
+ * to facilitate further appending.
+ */
+static void append_nibble_mask(char **outp,
+ unsigned int nibble, unsigned int mask)
+{
+ char *p = *outp;
+ unsigned int i;
+
+ switch (mask) {
+ case 0:
+ *p++ = '?';
+ break;
+
+ case 0xf:
+ p += sprintf(p, "%X", nibble);
+ break;
+
+ default:
+ /*
+ * Dumbly emit a match pattern for all possible matching
+ * digits. This could be improved in some cases using ranges,
+ * but it has the advantage of being trivially correct, and is
+ * often optimal.
+ */
+ *p++ = '[';
+ for (i = 0; i < 0x10; i++)
+ if ((i & mask) == nibble)
+ p += sprintf(p, "%X", i);
+ *p++ = ']';
+ }
+
+ /* Ensure that the string remains NUL-terminated: */
+ *p = '\0';
+
+ /* Advance the caller's end-of-string pointer: */
+ *outp = p;
+}
+
+/*
+ * looks like: "amba:dN"
+ *
+ * N is exactly 8 digits, where each is an upper-case hex digit, or
+ * a ? or [] pattern matching exactly one digit.
+ */
+static int do_amba_entry(const char *filename,
+ struct amba_id *id, char *alias)
+{
+ unsigned int digit;
+ char *p = alias;
+
+ if ((id->id & id->mask) != id->id)
+ fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: "
+ "id=0x%08X, mask=0x%08X. Please fix this driver.\n",
+ filename, id->id, id->mask);
+
+ p += sprintf(alias, "amba:d");
+ for (digit = 0; digit < 8; digit++)
+ append_nibble_mask(&p,
+ (id->id >> (4 * (7 - digit))) & 0xf,
+ (id->mask >> (4 * (7 - digit))) & 0xf);
+
+ return 1;
+}
+
/* Ignore any prefix, eg. some architectures prepend _ */
static inline int sym_is(const char *symbol, const char *name)
{
@@ -1047,6 +1115,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct isapnp_device_id), "isa",
do_isapnp_entry, mod);
+ else if (sym_is(symname, "__mod_amba_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct amba_id), "amba",
+ do_amba_entry, mod);
free(zeros);
}
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index bc6aa003860..87bf08076b1 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -141,7 +141,7 @@ perf-%pkg: FORCE
help: FORCE
@echo ' rpm-pkg - Build both source and binary RPM kernel packages'
@echo ' binrpm-pkg - Build only the binary kernel package'
- @echo ' deb-pkg - Build the kernel as an deb package'
+ @echo ' deb-pkg - Build the kernel as a deb package'
@echo ' tar-pkg - Build the kernel as an uncompressed tarball'
@echo ' targz-pkg - Build the kernel as a gzip compressed tarball'
@echo ' tarbz2-pkg - Build the kernel as a bzip2 compressed tarball'
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 69ddb47787b..e39df6d4377 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -165,7 +165,7 @@ static void __init aafs_remove(const char *name)
*
* Used aafs_remove to remove entries created with this fn.
*/
-static int __init aafs_create(const char *name, int mask,
+static int __init aafs_create(const char *name, umode_t mask,
const struct file_operations *fops)
{
struct dentry *dentry;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 37832026e58..2c0a0ff4139 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -262,7 +262,7 @@ static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
}
static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
- int mode)
+ umode_t mode)
{
return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
S_IFDIR);
@@ -274,7 +274,7 @@ static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
}
static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
- int mode, unsigned int dev)
+ umode_t mode, unsigned int dev)
{
return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
}
@@ -344,13 +344,12 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
return error;
}
-static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+static int apparmor_path_chmod(struct path *path, umode_t mode)
{
- if (!mediated_filesystem(dentry->d_inode))
+ if (!mediated_filesystem(path->dentry->d_inode))
return 0;
- return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD);
+ return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
}
static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 36cc0cc39e7..9d070a7c3ff 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -13,7 +13,6 @@
*/
#include <linux/magic.h>
-#include <linux/mnt_namespace.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
@@ -57,23 +56,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
static int d_namespace_path(struct path *path, char *buf, int buflen,
char **name, int flags)
{
- struct path root, tmp;
char *res;
- int connected, error = 0;
+ int error = 0;
+ int connected = 1;
+
+ if (path->mnt->mnt_flags & MNT_INTERNAL) {
+ /* it's not mounted anywhere */
+ res = dentry_path(path->dentry, buf, buflen);
+ *name = res;
+ if (IS_ERR(res)) {
+ *name = buf;
+ return PTR_ERR(res);
+ }
+ if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+ strncmp(*name, "/sys/", 5) == 0) {
+ /* TODO: convert over to using a per namespace
+ * control instead of hard coded /proc
+ */
+ return prepend(name, *name - buf, "/proc", 5);
+ }
+ return 0;
+ }
- /* Get the root we want to resolve too, released below */
+ /* resolve paths relative to chroot?*/
if (flags & PATH_CHROOT_REL) {
- /* resolve paths relative to chroot */
+ struct path root;
get_fs_root(current->fs, &root);
- } else {
- /* resolve paths relative to namespace */
- root.mnt = current->nsproxy->mnt_ns->root;
- root.dentry = root.mnt->mnt_root;
- path_get(&root);
+ res = __d_path(path, &root, buf, buflen);
+ if (res && !IS_ERR(res)) {
+ /* everything's fine */
+ *name = res;
+ path_put(&root);
+ goto ok;
+ }
+ path_put(&root);
+ connected = 0;
}
- tmp = root;
- res = __d_path(path, &tmp, buf, buflen);
+ res = d_absolute_path(path, buf, buflen);
*name = res;
/* handle error conditions - and still allow a partial path to
@@ -84,7 +104,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
*name = buf;
goto out;
}
+ if (!our_mnt(path->mnt))
+ connected = 0;
+ok:
/* Handle two cases:
* 1. A deleted dentry && profile is not allowing mediation of deleted
* 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +120,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
goto out;
}
- /* Determine if the path is connected to the expected root */
- connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
- /* If the path is not connected,
+ /* If the path is not connected to the expected root,
* check if it is a sysctl and handle specially else remove any
* leading / that __d_path may have returned.
* Unless
@@ -112,17 +132,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
* namespace root.
*/
if (!connected) {
- /* is the disconnect path a sysctl? */
- if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
- strncmp(*name, "/sys/", 5) == 0) {
- /* TODO: convert over to using a per namespace
- * control instead of hard coded /proc
- */
- error = prepend(name, *name - buf, "/proc", 5);
- } else if (!(flags & PATH_CONNECT_PATH) &&
+ if (!(flags & PATH_CONNECT_PATH) &&
!(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
- (tmp.mnt == current->nsproxy->mnt_ns->root &&
- tmp.dentry == tmp.mnt->mnt_root))) {
+ our_mnt(path->mnt))) {
/* disconnected path, don't return pathname starting
* with '/'
*/
@@ -133,8 +145,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
}
out:
- path_put(&root);
-
return error;
}
diff --git a/security/capability.c b/security/capability.c
index 2984ea4f776..3b5883b7179 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -125,7 +125,7 @@ static int cap_inode_init_security(struct inode *inode, struct inode *dir,
}
static int cap_inode_create(struct inode *inode, struct dentry *dentry,
- int mask)
+ umode_t mask)
{
return 0;
}
@@ -148,7 +148,7 @@ static int cap_inode_symlink(struct inode *inode, struct dentry *dentry,
}
static int cap_inode_mkdir(struct inode *inode, struct dentry *dentry,
- int mask)
+ umode_t mask)
{
return 0;
}
@@ -159,7 +159,7 @@ static int cap_inode_rmdir(struct inode *inode, struct dentry *dentry)
}
static int cap_inode_mknod(struct inode *inode, struct dentry *dentry,
- int mode, dev_t dev)
+ umode_t mode, dev_t dev)
{
return 0;
}
@@ -235,13 +235,13 @@ static void cap_inode_getsecid(const struct inode *inode, u32 *secid)
}
#ifdef CONFIG_SECURITY_PATH
-static int cap_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+static int cap_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev)
{
return 0;
}
-static int cap_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
+static int cap_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -279,8 +279,7 @@ static int cap_path_truncate(struct path *path)
return 0;
}
-static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+static int cap_path_chmod(struct path *path, umode_t mode)
{
return 0;
}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 4450fbeec41..8b5b5d8612c 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -62,11 +62,12 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys;
static int devcgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgroup, struct task_struct *task)
+ struct cgroup *new_cgrp, struct cgroup_taskset *set)
{
- if (current != task && !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ struct task_struct *task = cgroup_taskset_first(set);
+ if (current != task && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
return 0;
}
diff --git a/security/inode.c b/security/inode.c
index c4df2fbebe6..90a70a67d83 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -56,7 +56,7 @@ static const struct file_operations default_file_ops = {
.llseek = noop_llseek,
};
-static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *get_inode(struct super_block *sb, umode_t mode, dev_t dev)
{
struct inode *inode = new_inode(sb);
@@ -85,7 +85,7 @@ static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
/* SMP-safe */
static int mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev)
+ umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOMEM;
@@ -102,7 +102,7 @@ static int mknod(struct inode *dir, struct dentry *dentry,
return error;
}
-static int mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int res;
@@ -113,7 +113,7 @@ static int mkdir(struct inode *dir, struct dentry *dentry, int mode)
return res;
}
-static int create(struct inode *dir, struct dentry *dentry, int mode)
+static int create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
mode = (mode & S_IALLUGO) | S_IFREG;
return mknod(dir, dentry, mode, 0);
@@ -145,7 +145,7 @@ static struct file_system_type fs_type = {
.kill_sb = kill_litter_super,
};
-static int create_by_name(const char *name, mode_t mode,
+static int create_by_name(const char *name, umode_t mode,
struct dentry *parent,
struct dentry **dentry)
{
@@ -159,12 +159,12 @@ static int create_by_name(const char *name, mode_t mode,
* have around.
*/
if (!parent)
- parent = mount->mnt_sb->s_root;
+ parent = mount->mnt_root;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry)) {
- if ((mode & S_IFMT) == S_IFDIR)
+ if (S_ISDIR(mode))
error = mkdir(parent->d_inode, *dentry, mode);
else
error = create(parent->d_inode, *dentry, mode);
@@ -205,7 +205,7 @@ static int create_by_name(const char *name, mode_t mode,
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
-struct dentry *securityfs_create_file(const char *name, mode_t mode,
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 5dd5b140242..8738deff26f 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
+static DEFINE_MUTEX(mutex);
+
static struct shash_desc *init_desc(void)
{
int rc;
struct shash_desc *desc;
if (hmac_tfm == NULL) {
+ mutex_lock(&mutex);
+ if (hmac_tfm)
+ goto out;
hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_tfm)) {
pr_err("Can not allocate %s (reason: %ld)\n",
evm_hmac, PTR_ERR(hmac_tfm));
rc = PTR_ERR(hmac_tfm);
hmac_tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(hmac_tfm);
+ hmac_tfm = NULL;
+ mutex_unlock(&mutex);
return ERR_PTR(rc);
}
+out:
+ mutex_unlock(&mutex);
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
desc->tfm = hmac_tfm;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
- if (rc)
- goto out;
rc = crypto_shash_init(desc);
-out:
if (rc) {
kfree(desc);
return ERR_PTR(rc);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 893af8a2fa1..7bd6f138236 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -114,19 +114,20 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
int offset, ret = 0;
struct ipv6hdr *ip6;
u8 nexthdr;
+ __be16 frag_off;
ip6 = ipv6_hdr(skb);
if (ip6 == NULL)
return -EINVAL;
- ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
- ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+ ad->u.net.v6info.saddr = ip6->saddr;
+ ad->u.net.v6info.daddr = ip6->daddr;
ret = 0;
/* IPv6 can have several extension header before the Transport header
* skip them */
offset = skb_network_offset(skb);
offset += sizeof(*ip6);
nexthdr = ip6->nexthdr;
- offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
return 0;
if (proto)
diff --git a/security/security.c b/security/security.c
index 0c6cc69c8f8..214502c772a 100644
--- a/security/security.c
+++ b/security/security.c
@@ -381,14 +381,14 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
EXPORT_SYMBOL(security_old_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
@@ -397,7 +397,7 @@ int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
}
EXPORT_SYMBOL(security_path_mknod);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
@@ -454,12 +454,11 @@ int security_path_truncate(struct path *path)
return security_ops->path_truncate(path);
}
-int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+int security_path_chmod(struct path *path, umode_t mode)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
- return security_ops->path_chmod(dentry, mnt, mode);
+ return security_ops->path_chmod(path, mode);
}
int security_path_chown(struct path *path, uid_t uid, gid_t gid)
@@ -475,7 +474,7 @@ int security_path_chroot(struct path *path)
}
#endif
-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
@@ -506,7 +505,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
return security_ops->inode_symlink(dir, dentry, old_name);
}
-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
@@ -521,7 +520,7 @@ int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
return security_ops->inode_rmdir(dir, dentry);
}
-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 1126c10a5e8..7cd4c3affac 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1090,7 +1090,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
return SECCLASS_NETLINK_ROUTE_SOCKET;
case NETLINK_FIREWALL:
return SECCLASS_NETLINK_FIREWALL_SOCKET;
- case NETLINK_INET_DIAG:
+ case NETLINK_SOCK_DIAG:
return SECCLASS_NETLINK_TCPDIAG_SOCKET;
case NETLINK_NFLOG:
return SECCLASS_NETLINK_NFLOG_SOCKET;
@@ -1740,7 +1740,7 @@ static inline u32 file_mask_to_av(int mode, int mask)
{
u32 av = 0;
- if ((mode & S_IFMT) != S_IFDIR) {
+ if (!S_ISDIR(mode)) {
if (mask & MAY_EXEC)
av |= FILE__EXECUTE;
if (mask & MAY_READ)
@@ -2507,7 +2507,7 @@ static int selinux_mount(char *dev_name,
const struct cred *cred = current_cred();
if (flags & MS_REMOUNT)
- return superblock_has_perm(cred, path->mnt->mnt_sb,
+ return superblock_has_perm(cred, path->dentry->d_sb,
FILESYSTEM__REMOUNT, NULL);
else
return path_has_perm(cred, path, FILE__MOUNTON);
@@ -2598,7 +2598,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
return 0;
}
-static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return may_create(dir, dentry, SECCLASS_FILE);
}
@@ -2618,7 +2618,7 @@ static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const
return may_create(dir, dentry, SECCLASS_LNK_FILE);
}
-static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask)
{
return may_create(dir, dentry, SECCLASS_DIR);
}
@@ -2628,7 +2628,7 @@ static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
return may_link(dir, dentry, MAY_RMDIR);
}
-static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
return may_create(dir, dentry, inode_mode_to_security_class(mode));
}
@@ -3561,19 +3561,20 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
u8 nexthdr;
int ret = -EINVAL, offset;
struct ipv6hdr _ipv6h, *ip6;
+ __be16 frag_off;
offset = skb_network_offset(skb);
ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
if (ip6 == NULL)
goto out;
- ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
- ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+ ad->u.net.v6info.saddr = ip6->saddr;
+ ad->u.net.v6info.daddr = ip6->daddr;
ret = 0;
nexthdr = ip6->nexthdr;
offset += sizeof(_ipv6h);
- offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
goto out;
@@ -3871,7 +3872,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (family == PF_INET)
ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
else
- ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
+ ad.u.net.v6info.saddr = addr6->sin6_addr;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass, node_perm, &ad);
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 3bf46abaa68..86365857c08 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -220,7 +220,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
case PF_INET6:
ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), sid);
- ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
+ new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 0b62bd11246..7b9eb1faf68 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
struct sel_netport *tail;
tail = list_entry(
- rcu_dereference(sel_netport_hash[idx].list.prev),
+ rcu_dereference_protected(
+ sel_netport_hash[idx].list.prev,
+ lockdep_is_held(&sel_netport_lock)),
struct sel_netport, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 7db62b48eb4..e8af5b0ba80 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -406,7 +406,7 @@ static int smack_sb_statfs(struct dentry *dentry)
static int smack_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data)
{
- struct superblock_smack *sbp = path->mnt->mnt_sb->s_security;
+ struct superblock_smack *sbp = path->dentry->d_sb->s_security;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
@@ -435,7 +435,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, path);
- sbp = mnt->mnt_sb->s_security;
+ sbp = path.dentry->d_sb->s_security;
return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
}
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 075c3a6d164..5ca47ea3049 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -112,7 +112,7 @@ out:
*
* Returns file type string.
*/
-static inline const char *tomoyo_filetype(const mode_t mode)
+static inline const char *tomoyo_filetype(const umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
@@ -180,7 +180,7 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
struct tomoyo_mini_stat *stat;
unsigned int dev;
- mode_t mode;
+ umode_t mode;
if (!obj->stat_valid[i])
continue;
stat = &obj->stat[i];
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index ed311d7a8ce..deeab7be5b9 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -564,7 +564,7 @@ struct tomoyo_mini_stat {
uid_t uid;
gid_t gid;
ino_t ino;
- mode_t mode;
+ umode_t mode;
dev_t dev;
dev_t rdev;
};
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 738bbdf8d4c..80a09c37cac 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -4,15 +4,8 @@
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
-#include <linux/types.h>
-#include <linux/mount.h>
-#include <linux/mnt_namespace.h>
-#include <linux/fs_struct.h>
-#include <linux/magic.h>
-#include <linux/slab.h>
-#include <net/sock.h>
#include "common.h"
-#include "../../fs/internal.h"
+#include <linux/magic.h>
/**
* tomoyo_encode2 - Encode binary string to ascii string.
@@ -101,9 +94,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
{
char *pos = ERR_PTR(-ENOMEM);
if (buflen >= 256) {
- struct path ns_root = { };
/* go to whatever namespace root we are under */
- pos = __d_path(path, &ns_root, buffer, buflen - 1);
+ pos = d_absolute_path(path, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
struct inode *inode = path->dentry->d_inode;
if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +286,16 @@ char *tomoyo_realpath_from_path(struct path *path)
pos = tomoyo_get_local_path(path->dentry, buf,
buf_len - 1);
/* Get absolute name for the rest. */
- else
+ else {
pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
encode:
if (IS_ERR(pos))
continue;
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 2672ac4f3be..482b2a5f48f 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -224,7 +224,7 @@ static const struct file_operations tomoyo_operations = {
*
* Returns nothing.
*/
-static void __init tomoyo_create_entry(const char *name, const mode_t mode,
+static void __init tomoyo_create_entry(const char *name, const umode_t mode,
struct dentry *parent, const u8 key)
{
securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key,
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 4b327b69174..620d37c159a 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -186,7 +186,7 @@ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
- int mode)
+ umode_t mode)
{
struct path path = { parent->mnt, dentry };
return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
@@ -234,7 +234,7 @@ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
- int mode, unsigned int dev)
+ umode_t mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
int type = TOMOYO_TYPE_CREATE;
@@ -353,17 +353,14 @@ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
/**
* tomoyo_path_chmod - Target for security_path_chmod().
*
- * @dentry: Pointer to "struct dentry".
- * @mnt: Pointer to "struct vfsmount".
- * @mode: DAC permission mode.
+ * @path: Pointer to "struct path".
+ * @mode: DAC permission mode.
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+static int tomoyo_path_chmod(struct path *path, umode_t mode)
{
- struct path path = { mnt, dentry };
- return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, &path,
+ return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
mode & S_IALLUGO);
}
diff --git a/sound/aoa/codecs/Kconfig b/sound/aoa/codecs/Kconfig
index 808eb11ebac..0c68e32834c 100644
--- a/sound/aoa/codecs/Kconfig
+++ b/sound/aoa/codecs/Kconfig
@@ -7,14 +7,6 @@ config SND_AOA_ONYX
codec chip found in the latest Apple machines
(most of those with digital audio output).
-#config SND_AOA_TOPAZ
-# tristate "support Topaz chips"
-# ---help---
-# This option enables support for the Topaz (CS84xx)
-# codec chips found in the latest Apple machines,
-# these chips do the digital input and output on
-# some PowerMacs.
-
config SND_AOA_TAS
tristate "support TAS chips"
select I2C
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index e518d38b1c7..b37b702a3a6 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -1097,6 +1097,8 @@ static struct amba_id aaci_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, aaci_ids);
+
static struct amba_driver aaci_driver = {
.drv = {
.name = DRIVER_NAME,
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6e5addeb236..73516f69ac7 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
/* AC97 v2.2 specifications says minimum 1 us. */
udelay(2);
gpio_set_value(chip->reset_pin, 1);
+ } else {
+ ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+ udelay(2);
+ ac97c_writel(chip, MR, AC97C_MR_ENA);
}
}
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 475455c7661..c15682a2f9d 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -5,7 +5,6 @@ config SND_TIMER
config SND_PCM
tristate
select SND_TIMER
- select GCD
config SND_HWDEP
tristate
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 6c9e8e8f45f..5849b129e50 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -521,7 +521,7 @@ config SC6600_CDROMBASE
config SOUND_VIDC
tristate "VIDC 16-bit sound"
- depends on ARM && (ARCH_ACORN || ARCH_CLPS7500)
+ depends on ARM && ARCH_ACORN
help
16-bit support for the VIDC onboard sound hardware found on Acorn
machines.
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index e083122ca55..dbf94b189e7 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -148,7 +148,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
struct cs5535audio_dma_desc *desc =
&((struct cs5535audio_dma_desc *) dma->desc_buf.area)[i];
desc->addr = cpu_to_le32(addr);
- desc->size = cpu_to_le32(period_bytes);
+ desc->size = cpu_to_le16(period_bytes);
desc->ctlreserved = cpu_to_le16(PRD_EOP);
desc_addr += sizeof(struct cs5535audio_dma_desc);
addr += period_bytes;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e44b107fdc7..4562e9de6a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4046,9 +4046,9 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
/* Search for codec ID */
for (q = tbl; q->subvendor; q++) {
- unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
-
- if (vendorid == codec->subsystem_id)
+ unsigned int mask = 0xffff0000 | q->subdevice_mask;
+ unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
+ if ((codec->subsystem_id & mask) == id)
break;
}
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 7ae7578bdcc..c1da422e085 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -347,18 +347,28 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
for (i = 0; i < size; i++) {
unsigned int val = hdmi_get_eld_data(codec, nid, i);
+ /*
+ * Graphics driver might be writing to ELD buffer right now.
+ * Just abort. The caller will repoll after a while.
+ */
if (!(val & AC_ELDD_ELD_VALID)) {
- if (!i) {
- snd_printd(KERN_INFO
- "HDMI: invalid ELD data\n");
- ret = -EINVAL;
- goto error;
- }
snd_printd(KERN_INFO
"HDMI: invalid ELD data byte %d\n", i);
- val = 0;
- } else
- val &= AC_ELDD_ELD_DATA;
+ ret = -EINVAL;
+ goto error;
+ }
+ val &= AC_ELDD_ELD_DATA;
+ /*
+ * The first byte cannot be zero. This can happen on some DVI
+ * connections. Some Intel chips may also need some 250ms delay
+ * to return non-zero ELD data, even when the graphics driver
+ * correctly writes ELD content before setting ELD_valid bit.
+ */
+ if (!val && !i) {
+ snd_printdd(KERN_INFO "HDMI: 0 ELD data\n");
+ ret = -EINVAL;
+ goto error;
+ }
buf[i] = val;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 096507d2ca9..c2f79e63124 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+ /* ICH */
{ PCI_DEVICE(0x8086, 0x2668),
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
AZX_DCAPS_BUFSIZE }, /* ICH6 */
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 2fbab8e2957..70a7abda7e2 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,8 @@ struct cs_spec {
unsigned int gpio_mask;
unsigned int gpio_dir;
unsigned int gpio_data;
+ unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
+ unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
struct hda_pcm pcm_rec[2]; /* PCM information */
@@ -76,6 +78,7 @@ enum {
CS420X_MBP53,
CS420X_MBP55,
CS420X_IMAC27,
+ CS420X_APPLE,
CS420X_AUTO,
CS420X_MODELS
};
@@ -928,10 +931,9 @@ static void cs_automute(struct hda_codec *codec)
spdif_present ? 0 : PIN_OUT);
}
}
- if (spec->board_config == CS420X_MBP53 ||
- spec->board_config == CS420X_MBP55 ||
- spec->board_config == CS420X_IMAC27) {
- unsigned int gpio = hp_present ? 0x02 : 0x08;
+ if (spec->gpio_eapd_hp) {
+ unsigned int gpio = hp_present ?
+ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, gpio);
}
@@ -1276,6 +1278,7 @@ static const char * const cs420x_models[CS420X_MODELS] = {
[CS420X_MBP53] = "mbp53",
[CS420X_MBP55] = "mbp55",
[CS420X_IMAC27] = "imac27",
+ [CS420X_APPLE] = "apple",
[CS420X_AUTO] = "auto",
};
@@ -1285,7 +1288,13 @@ static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
- SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
+ /* this conflicts with too many other models */
+ /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+ {} /* terminator */
+};
+
+static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
+ SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
};
@@ -1367,6 +1376,10 @@ static int patch_cs420x(struct hda_codec *codec)
spec->board_config =
snd_hda_check_board_config(codec, CS420X_MODELS,
cs420x_models, cs420x_cfg_tbl);
+ if (spec->board_config < 0)
+ spec->board_config =
+ snd_hda_check_board_codec_sid_config(codec,
+ CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
if (spec->board_config >= 0)
fix_pincfg(codec, spec->board_config, cs_pincfgs);
@@ -1374,10 +1387,11 @@ static int patch_cs420x(struct hda_codec *codec)
case CS420X_IMAC27:
case CS420X_MBP53:
case CS420X_MBP55:
- /* GPIO1 = headphones */
- /* GPIO3 = speakers */
- spec->gpio_mask = 0x0a;
- spec->gpio_dir = 0x0a;
+ case CS420X_APPLE:
+ spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
+ spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
+ spec->gpio_mask = spec->gpio_dir =
+ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
break;
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 9850c5b481e..c505fd5d338 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -69,6 +69,7 @@ struct hdmi_spec_per_pin {
struct hda_codec *codec;
struct hdmi_eld sink_eld;
struct delayed_work work;
+ int repoll_count;
};
struct hdmi_spec {
@@ -748,7 +749,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
* Unsolicited events
*/
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry);
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
@@ -766,7 +767,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
if (pin_idx < 0)
return;
- hdmi_present_sense(&spec->pins[pin_idx], true);
+ hdmi_present_sense(&spec->pins[pin_idx], 1);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -960,7 +961,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return 0;
}
-static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
+static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
{
struct hda_codec *codec = per_pin->codec;
struct hdmi_eld *eld = &per_pin->sink_eld;
@@ -989,7 +990,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, bool retry)
if (eld_valid) {
if (!snd_hdmi_get_eld(eld, codec, pin_nid))
snd_hdmi_show_eld(eld);
- else if (retry) {
+ else if (repoll) {
queue_delayed_work(codec->bus->workq,
&per_pin->work,
msecs_to_jiffies(300));
@@ -1004,7 +1005,10 @@ static void hdmi_repoll_eld(struct work_struct *work)
struct hdmi_spec_per_pin *per_pin =
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
- hdmi_present_sense(per_pin, false);
+ if (per_pin->repoll_count++ > 6)
+ per_pin->repoll_count = 0;
+
+ hdmi_present_sense(per_pin, per_pin->repoll_count);
}
static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -1235,7 +1239,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
if (err < 0)
return err;
- hdmi_present_sense(per_pin, false);
+ hdmi_present_sense(per_pin, 0);
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 336d14eb72a..1d07e8fa243 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -277,6 +277,12 @@ static bool alc_dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
return false;
}
+static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
+{
+ return spec->capsrc_nids ?
+ spec->capsrc_nids[idx] : spec->adc_nids[idx];
+}
+
/* select the given imux item; either unmute exclusively or select the route */
static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
unsigned int idx, bool force)
@@ -291,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
imux = &spec->input_mux[mux_idx];
if (!imux->num_items && mux_idx > 0)
imux = &spec->input_mux[0];
+ if (!imux->num_items)
+ return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
@@ -303,8 +311,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
adc_idx = spec->dyn_adc_idx[idx];
}
- nid = spec->capsrc_nids ?
- spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+ nid = get_capsrc(spec, adc_idx);
/* no selection? */
num_conns = snd_hda_get_conn_list(codec, nid, NULL);
@@ -1054,8 +1061,19 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
spec->imux_pins[2] = spec->dock_mic_pin;
for (i = 0; i < 3; i++) {
strcpy(imux->items[i].label, texts[i]);
- if (spec->imux_pins[i])
+ if (spec->imux_pins[i]) {
+ hda_nid_t pin = spec->imux_pins[i];
+ int c;
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t cap = get_capsrc(spec, c);
+ int idx = get_connection_index(codec, cap, pin);
+ if (idx >= 0) {
+ imux->items[i].index = idx;
+ break;
+ }
+ }
imux->num_items = i + 1;
+ }
}
spec->num_mux_defs = 1;
spec->input_mux = imux;
@@ -1957,10 +1975,8 @@ static int alc_build_controls(struct hda_codec *codec)
if (!kctl)
kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
for (i = 0; kctl && i < kctl->count; i++) {
- const hda_nid_t *nids = spec->capsrc_nids;
- if (!nids)
- nids = spec->adc_nids;
- err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+ err = snd_hda_add_nid(codec, kctl, i,
+ get_capsrc(spec, i));
if (err < 0)
return err;
}
@@ -2615,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
case AUTO_PIN_SPEAKER_OUT:
if (cfg->line_outs == 1)
return "Speaker";
+ if (cfg->line_outs == 2)
+ return ch ? "Bass Speaker" : "Speaker";
break;
case AUTO_PIN_HP_OUT:
/* for multi-io case, only the primary out */
@@ -2747,8 +2765,7 @@ static int alc_auto_create_input_ctls(struct hda_codec *codec)
}
for (c = 0; c < num_adcs; c++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[c] : spec->adc_nids[c];
+ hda_nid_t cap = get_capsrc(spec, c);
idx = get_connection_index(codec, cap, pin);
if (idx >= 0) {
spec->imux_pins[imux->num_items] = pin;
@@ -2889,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
if (!nid)
continue;
if (found_in_nid_list(nid, spec->multiout.dac_nids,
- spec->multiout.num_dacs))
+ ARRAY_SIZE(spec->private_dac_nids)))
continue;
if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2910,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
return 0;
}
+/* return 0 if no possible DAC is found, 1 if one or more found */
static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
const hda_nid_t *pins, hda_nid_t *dacs)
{
@@ -2927,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
if (!dacs[i])
dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
}
- return 0;
+ return 1;
}
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2937,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
static int alc_auto_fill_dac_nids(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- const struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
bool redone = false;
int i;
@@ -2948,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
spec->multiout.extra_out_nid[0] = 0;
memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
spec->multiout.dac_nids = spec->private_dac_nids;
+ spec->multi_ios = 0;
/* fill hard-wired DACs first */
if (!redone) {
@@ -2981,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
for (i = 0; i < cfg->line_outs; i++) {
if (spec->private_dac_nids[i])
spec->multiout.num_dacs++;
- else
+ else {
memmove(spec->private_dac_nids + i,
spec->private_dac_nids + i + 1,
sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+ spec->private_dac_nids[cfg->line_outs - 1] = 0;
+ }
}
if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3006,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
if (cfg->line_out_type != AUTO_PIN_HP_OUT)
alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
spec->multiout.hp_out_nid);
- if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
- alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
- spec->multiout.extra_out_nid);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid);
+ /* if no speaker volume is assigned, try again as the primary
+ * output
+ */
+ if (!err && cfg->speaker_outs > 0 &&
+ cfg->line_out_type == AUTO_PIN_HP_OUT) {
+ cfg->hp_outs = cfg->line_outs;
+ memcpy(cfg->hp_pins, cfg->line_out_pins,
+ sizeof(cfg->hp_pins));
+ cfg->line_outs = cfg->speaker_outs;
+ memcpy(cfg->line_out_pins, cfg->speaker_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->speaker_outs = 0;
+ memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+ cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+ redone = false;
+ goto again;
+ }
+ }
return 0;
}
@@ -3158,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
}
static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
- hda_nid_t dac, const char *pfx)
+ hda_nid_t dac, const char *pfx,
+ int cidx)
{
struct alc_spec *spec = codec->spec;
hda_nid_t sw, vol;
@@ -3174,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
if (is_ctl_used(spec->sw_ctls, val))
return 0; /* already created */
mark_ctl_usage(spec->sw_ctls, val);
- return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+ return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
}
sw = alc_look_for_out_mute_nid(codec, pin, dac);
vol = alc_look_for_out_vol_nid(codec, pin, dac);
- err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+ err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
if (err < 0)
return err;
- err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+ err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
if (err < 0)
return err;
return 0;
@@ -3223,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
hda_nid_t dac = *dacs;
if (!dac)
dac = spec->multiout.dac_nids[0];
- return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+ return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
}
if (dacs[num_pins - 1]) {
/* OK, we have a multi-output system with individual volumes */
for (i = 0; i < num_pins; i++) {
- snprintf(name, sizeof(name), "%s %s",
- pfx, channel_name[i]);
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- name);
+ if (num_pins >= 3) {
+ snprintf(name, sizeof(name), "%s %s",
+ pfx, channel_name[i]);
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ name, 0);
+ } else {
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ pfx, i);
+ }
if (err < 0)
return err;
}
@@ -3694,8 +3740,7 @@ static int init_capsrc_for_pin(struct hda_codec *codec, hda_nid_t pin)
if (!pin)
return 0;
for (i = 0; i < spec->num_adc_nids; i++) {
- hda_nid_t cap = spec->capsrc_nids ?
- spec->capsrc_nids[i] : spec->adc_nids[i];
+ hda_nid_t cap = get_capsrc(spec, i);
int idx;
idx = get_connection_index(codec, cap, pin);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 470f6f286e8..616678fde48 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -215,6 +215,7 @@ struct sigmatel_spec {
unsigned int gpio_mute;
unsigned int gpio_led;
unsigned int gpio_led_polarity;
+ unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
unsigned int vref_led;
/* stream */
@@ -1641,6 +1642,8 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
"Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
{} /* terminator */
};
@@ -4316,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
spec->eapd_switch = val;
get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
- if (spec->gpio_led <= 8) {
- spec->gpio_mask |= spec->gpio_led;
- spec->gpio_dir |= spec->gpio_led;
- if (spec->gpio_led_polarity)
- spec->gpio_data |= spec->gpio_led;
- }
+ spec->gpio_mask |= spec->gpio_led;
+ spec->gpio_dir |= spec->gpio_led;
+ if (spec->gpio_led_polarity)
+ spec->gpio_data |= spec->gpio_led;
}
}
@@ -4439,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
int pinctl, def_conf;
/* power on when no jack detection is available */
- if (!spec->hp_detect) {
+ /* or when the VREF is used for controlling LED */
+ if (!spec->hp_detect ||
+ spec->vref_mute_led_nid == nid) {
stac_toggle_power_map(codec, nid, 1);
continue;
}
@@ -4911,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
&spec->gpio_led_polarity,
&spec->gpio_led) == 2) {
- if (spec->gpio_led < 4)
+ unsigned int max_gpio;
+ max_gpio = snd_hda_param_read(codec, codec->afg,
+ AC_PAR_GPIO_CAP);
+ max_gpio &= AC_GPIO_IO_COUNT;
+ if (spec->gpio_led < max_gpio)
spec->gpio_led = 1 << spec->gpio_led;
+ else
+ spec->vref_mute_led_nid = spec->gpio_led;
return 1;
}
if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4920,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
set_hp_led_gpio(codec);
return 1;
}
+ /* BIOS bug: unfilled OEM string */
+ if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+ set_hp_led_gpio(codec);
+ spec->gpio_led_polarity = 1;
+ return 1;
+ }
}
/*
@@ -5041,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
struct sigmatel_spec *spec = codec->spec;
/* sync mute LED */
- if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data);
- } else {
- stac_vrefout_set(codec,
- spec->gpio_led, spec->vref_led);
- }
- }
- return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
- struct sigmatel_spec *spec = codec->spec;
- if (spec->gpio_led > 8) {
- /* with vref-out pin used for mute led control
- * codec AFG is prevented from D3 state, but on
- * system suspend it can (and should) be used
- */
- snd_hda_codec_read(codec, codec->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- }
+ if (spec->vref_mute_led_nid)
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
+ else if (spec->gpio_led)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data);
return 0;
}
@@ -5074,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
struct sigmatel_spec *spec = codec->spec;
if (power_state == AC_PWRST_D3) {
- if (spec->gpio_led > 8) {
+ if (spec->vref_mute_led_nid) {
/* with vref-out pin used for mute led control
* codec AFG is prevented from D3 state
*/
@@ -5127,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
}
}
/*polarity defines *not* muted state level*/
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
if (muted)
spec->gpio_data &= ~spec->gpio_led; /* orange */
else
@@ -5145,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
muted_lvl = spec->gpio_led_polarity ?
AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
spec->vref_led = muted ? muted_lvl : notmtd_lvl;
- stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
}
return 0;
}
@@ -5659,15 +5658,13 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
@@ -5974,15 +5971,13 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 431c0d417ee..b5137629f8e 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -208,6 +208,7 @@ struct via_spec {
/* work to check hp jack state */
struct hda_codec *codec;
struct delayed_work vt1708_hp_work;
+ int hp_work_active;
int vt1708_jack_detect;
int vt1708_hp_present;
@@ -305,27 +306,35 @@ enum {
static void analog_low_current_mode(struct hda_codec *codec);
static bool is_aa_path_mute(struct hda_codec *codec);
-static void vt1708_start_hp_work(struct via_spec *spec)
+#define hp_detect_with_aa(codec) \
+ (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
+ !is_aa_path_mute(codec))
+
+static void vt1708_stop_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- if (!delayed_work_pending(&spec->vt1708_hp_work))
- schedule_delayed_work(&spec->vt1708_hp_work,
- msecs_to_jiffies(100));
+ if (spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
+ cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ spec->hp_work_active = 0;
+ }
}
-static void vt1708_stop_hp_work(struct via_spec *spec)
+static void vt1708_update_hp_work(struct via_spec *spec)
{
if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
return;
- if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
- && !is_aa_path_mute(spec->codec))
- return;
- snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
- !spec->vt1708_jack_detect);
- cancel_delayed_work_sync(&spec->vt1708_hp_work);
+ if (spec->vt1708_jack_detect &&
+ (spec->active_streams || hp_detect_with_aa(spec->codec))) {
+ if (!spec->hp_work_active) {
+ snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
+ spec->hp_work_active = 1;
+ }
+ } else if (!hp_detect_with_aa(spec->codec))
+ vt1708_stop_hp_work(spec);
}
static void set_widgets_power_state(struct hda_codec *codec)
@@ -343,12 +352,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
set_widgets_power_state(codec);
analog_low_current_mode(snd_kcontrol_chip(kcontrol));
- if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
- if (is_aa_path_mute(codec))
- vt1708_start_hp_work(codec->spec);
- else
- vt1708_stop_hp_work(codec->spec);
- }
+ vt1708_update_hp_work(codec->spec);
return change;
}
@@ -1154,7 +1158,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_dac_stream_tag = stream_tag;
spec->cur_dac_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1174,7 +1178,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
spec->cur_hp_stream_tag = stream_tag;
spec->cur_hp_format = format;
mutex_unlock(&spec->config_mutex);
- vt1708_start_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1188,7 +1192,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
spec->active_streams &= ~STREAM_MULTI_OUT;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1203,7 +1207,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
spec->active_streams &= ~STREAM_INDEP_HP;
mutex_unlock(&spec->config_mutex);
- vt1708_stop_hp_work(spec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -1645,7 +1649,8 @@ static void via_hp_automute(struct hda_codec *codec)
int nums;
struct via_spec *spec = codec->spec;
- if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
+ if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
+ (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
if (spec->smart51_enabled)
@@ -2612,8 +2617,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect =
- !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
return 0;
}
@@ -2623,18 +2626,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct via_spec *spec = codec->spec;
- int change;
+ int val;
if (spec->codec_type != VT1708)
return 0;
- spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
- change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
- == !spec->vt1708_jack_detect;
- if (spec->vt1708_jack_detect) {
+ val = !!ucontrol->value.integer.value[0];
+ if (spec->vt1708_jack_detect == val)
+ return 0;
+ spec->vt1708_jack_detect = val;
+ if (spec->vt1708_jack_detect &&
+ snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
mute_aa_path(codec, 1);
notify_aa_path_ctls(codec);
}
- return change;
+ via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
+ return 1;
}
static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
@@ -2771,6 +2778,7 @@ static int via_init(struct hda_codec *codec)
via_auto_init_unsol_event(codec);
via_hp_automute(codec);
+ vt1708_update_hp_work(spec);
return 0;
}
@@ -2787,7 +2795,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
spec->vt1708_hp_present ^= 1;
via_hp_automute(spec->codec);
}
- vt1708_start_hp_work(spec);
+ if (spec->vt1708_jack_detect)
+ schedule_delayed_work(&spec->vt1708_hp_work,
+ msecs_to_jiffies(100));
}
static int get_mux_nids(struct hda_codec *codec)
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 5c8717e29ee..8c3e7fcefd9 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -78,10 +78,15 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
return ioread32(address);
}
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
+static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
+ u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_fromio(data, address, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_fromio */
+ for (i = 0; i != len; ++i)
+ data[i] = ioread32(address + i);
}
@@ -91,11 +96,15 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
iowrite32(data, address);
}
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len)
+static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
+ const u32 *data, u32 len)
{
- void __iomem *address = lx_dsp_register(chip, port);
- memcpy_toio(address, data, len*sizeof(u32));
+ u32 __iomem *address = lx_dsp_register(chip, port);
+ int i;
+
+ /* we cannot use memcpy_to */
+ for (i = 0; i != len; ++i)
+ iowrite32(data[i], address + i);
}
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 1dd562980b6..4d7ff797a64 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -72,10 +72,7 @@ enum {
};
unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port);
-void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len);
void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data);
-void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
- u32 len);
/* plx register access */
enum {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e760adad952..19ee2203cbb 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6518,7 +6518,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
hdspm->io_type = AES32;
hdspm->card_name = "RME AES32";
hdspm->midiPorts = 2;
- } else if ((hdspm->firmware_rev == 0xd5) ||
+ } else if ((hdspm->firmware_rev == 0xd2) ||
((hdspm->firmware_rev >= 0xc8) &&
(hdspm->firmware_rev <= 0xcf))) {
hdspm->io_type = MADI;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index a391e622a19..28dfafb56dd 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int enable = 1;
+static int codecs = 1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
module_param(enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
dma_addr_t silence_dma_addr;
};
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
#define SIS_PRIMARY_CODEC_PRESENT 0x0001
#define SIS_SECONDARY_CODEC_PRESENT 0x0002
#define SIS_TERTIARY_CODEC_PRESENT 0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
{
unsigned long io = sis->ioport;
void __iomem *ioaddr = sis->ioaddr;
+ unsigned long timeout;
u16 status;
int count;
int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
udelay(1);
+ /* Command complete, we can let go of the semaphore now.
+ */
+ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+ if (!count)
+ return -EIO;
+
/* Now that we've finished the reset, find out what's attached.
+ * There are some codec/board combinations that take an extremely
+ * long time to come up. 350+ ms has been observed in the field,
+ * so we'll give them up to 500ms.
*/
- status = inl(io + SIS_AC97_STATUS);
- if (status & SIS_AC97_STATUS_CODEC_READY)
- sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC2_READY)
- sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC3_READY)
- sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
- /* All done, let go of the semaphore, and check for errors
+ sis->codecs_present = 0;
+ timeout = msecs_to_jiffies(500) + jiffies;
+ while (time_before_eq(jiffies, timeout)) {
+ status = inl(io + SIS_AC97_STATUS);
+ if (status & SIS_AC97_STATUS_CODEC_READY)
+ sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC2_READY)
+ sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC3_READY)
+ sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+ if (sis->codecs_present == codecs)
+ break;
+
+ msleep(1);
+ }
+
+ /* All done, check for errors.
*/
- outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
- if (!sis->codecs_present || !count)
+ if (!sis->codecs_present) {
+ printk(KERN_ERR "sis7019: could not find any codecs\n");
return -EIO;
+ }
+
+ if (sis->codecs_present != codecs) {
+ printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+ sis->codecs_present, codecs);
+ }
/* Let the hardware know that the audio driver is alive,
* and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
if (!enable)
goto error_out;
+ /* The user can specify which codecs should be present so that we
+ * can wait for them to show up if they are slow to recover from
+ * the AC97 cold reset. We default to a single codec, the primary.
+ *
+ * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+ */
+ codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+ SIS_TERTIARY_CODEC_PRESENT;
+ if (!codecs)
+ codecs = SIS_PRIMARY_CODEC_PRESENT;
+
rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
if (rc < 0)
goto error_out;
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index bee3c94f58b..d1fcc816ce9 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -1,6 +1,6 @@
config SND_ATMEL_SOC
tristate "SoC Audio for the Atmel System-on-Chip"
- depends on ARCH_AT91 || AVR32
+ depends on ARCH_AT91
help
Say Y or M if you want to add support for codecs attached to
the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
Say Y if you want to add support for SoC audio on WM8731-based
AT91sam9g20 evaluation board.
-config SND_AT32_SOC_PLAYPAQ
- tristate "SoC Audio support for PlayPaq with WM8510"
- depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
- select SND_ATMEL_SOC_SSC
- select SND_SOC_WM8510
- help
- Say Y or M here if you want to add support for SoC audio
- on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
- bool "Run CODEC on PlayPaq in slave mode"
- depends on SND_AT32_SOC_PLAYPAQ
- default n
- help
- Say Y if you want to run with the AT32 SSC generating the BCLK
- and FRAME signals on the PlayPaq. Unless you want to play
- with the AT32 as the SSC master, you probably want to say N here,
- as this will give you better sound quality.
-
config SND_AT91_SOC_AFEB9260
tristate "SoC Audio support for AFEB9260 board"
depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index e7ea56bd5f8..a5c0bf19da7 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
# AT91 Machine Support
snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644
index 73ae99ad457..00000000000
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- * Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code. Something like:
- * at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN GPIO_PIN_PA(30)
-#define MCLK_PERIPH GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
- /* CMR div */
- unsigned int cmr_div;
-
- /* Frame period (as needed by xCMR.PERIOD) */
- unsigned int period;
-
- /* The SSC clock rate these settings where calculated for */
- unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *cpu_dai)
-{
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- struct ssc_clock_data cd;
- unsigned int rate, width_bits, channels;
- unsigned int bitrate, ssc_div;
- unsigned actual_rate;
-
-
- /*
- * Figure out required bitrate
- */
- rate = params_rate(params);
- channels = params_channels(params);
- width_bits = snd_pcm_format_physical_width(params_format(params));
- bitrate = rate * width_bits * channels;
-
-
- /*
- * Figure out required SSC divider and period for required bitrate
- */
- cd.ssc_rate = clk_get_rate(ssc->clk);
- ssc_div = cd.ssc_rate / bitrate;
- cd.cmr_div = ssc_div / 2;
- if (ssc_div & 1) {
- /* round cmr_div up */
- cd.cmr_div++;
- }
- cd.period = width_bits - 1;
-
-
- /*
- * Find actual rate, compare to requested rate
- */
- actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
- pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
- rate, actual_rate);
-
-
- return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
- int ret;
-
-
- /* Due to difficulties with getting the correct clocks from the AT32's
- * PLL0, we're going to let the CODEC be in charge of all the clocks
- */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
-#else
- struct ssc_clock_data cd;
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
- if (ssc == NULL) {
- pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
- return -EINVAL;
- }
-
-
- /*
- * Figure out PLL and BCLK dividers for WM8510
- */
- switch (params_rate(params)) {
- case 48000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 44100:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 22050:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_4;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 16000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_6;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 11025:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_8;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 8000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_12;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- default:
- pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
- params_rate(params));
- return -EINVAL;
- }
-
-
- /*
- * set CPU and CODEC DAI configuration
- */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CODEC DAI format (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU DAI format (%d)\n",
- ret);
- return ret;
- }
-
-
- /*
- * Set CPU clock configuration
- */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
- pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
- cd.cmr_div, cd.period);
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
- cd.period);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU transmit period (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- /*
- * Set CODEC clock configuration
- */
- pr_debug("playpaq_wm8510: "
- "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
- clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
- if (ret < 0) {
- pr_warning
- ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
- clk_get_rate(CODEC_CLK), pll_out);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
- ret);
- return ret;
- }
-
-
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
- ret);
- return ret;
- }
-
-
- return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
- .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
- /* speaker connected to SPKOUT */
- {"Ext Spk", NULL, "SPKOUTP"},
- {"Ext Spk", NULL, "SPKOUTN"},
-
- {"Mic Bias", NULL, "Int Mic"},
- {"MICN", NULL, "Mic Bias"},
- {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int i;
-
- /*
- * Add DAPM widgets
- */
- for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
- snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
- /*
- * Setup audio path interconnects
- */
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
- /* always connected pins */
- snd_soc_dapm_enable_pin(dapm, "Int Mic");
- snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
- /* Make CSB show PLL rate */
- snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
- WM8510_OPCLKDIV_1 | 4);
-
- return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
- .name = "WM8510",
- .stream_name = "WM8510 PCM",
- .cpu_dai_name= "atmel-ssc-dai.0",
- .platform_name = "atmel-pcm-audio",
- .codec_name = "wm8510-codec.0-0x1a",
- .codec_dai_name = "wm8510-hifi",
- .init = playpaq_wm8510_init,
- .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
- .name = "LRS_PlayPaq_WM8510",
- .dai_link = &playpaq_wm8510_dai,
- .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
- int ret = 0;
-
- /*
- * Configure MCLK for WM8510
- */
- _gclk0 = clk_get(NULL, "gclk0");
- if (IS_ERR(_gclk0)) {
- _gclk0 = NULL;
- ret = PTR_ERR(_gclk0);
- goto err_gclk0;
- }
- _pll0 = clk_get(NULL, "pll0");
- if (IS_ERR(_pll0)) {
- _pll0 = NULL;
- ret = PTR_ERR(_pll0);
- goto err_pll0;
- }
- ret = clk_set_parent(_gclk0, _pll0);
- if (ret) {
- pr_warning("snd-soc-playpaq: "
- "Failed to set PLL0 as parent for DAC clock\n");
- goto err_set_clk;
- }
- clk_set_rate(CODEC_CLK, 12000000);
- clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
- /*
- * Create and register platform device
- */
- playpaq_snd_device = platform_device_alloc("soc-audio", 0);
- if (playpaq_snd_device == NULL) {
- ret = -ENOMEM;
- goto err_device_alloc;
- }
-
- platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
- ret = platform_device_add(playpaq_snd_device);
- if (ret) {
- pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
- ret);
- goto err_device_add;
- }
-
- return 0;
-
-
-err_device_add:
- if (playpaq_snd_device != NULL) {
- platform_device_put(playpaq_snd_device);
- playpaq_snd_device = NULL;
- }
-err_device_alloc:
-err_set_clk:
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-err_pll0:
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_free_pin(MCLK_PIN);
-#endif
-
- platform_device_unregister(playpaq_snd_device);
- playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4584514d93d..fa787d45d74 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
select SND_SOC_DFBMCS320
- select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+ select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX98095 if I2C
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 444747f0db2..dd7be0dbbc5 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -34,7 +34,7 @@
#define AD1836_ADC_CTRL2 13
#define AD1836_ADC_WORD_LEN_MASK 0x30
-#define AD1836_ADC_WORD_OFFSET 5
+#define AD1836_ADC_WORD_OFFSET 4
#define AD1836_ADC_SERFMT_MASK (7 << 6)
#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ccf8dd4757..45c63028b40 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -245,7 +245,7 @@ static const char *adau1373_bass_hpf_cutoff_text[] = {
};
static const unsigned int adau1373_bass_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(3),
0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index f1f237ecec2..73f46eb459f 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
static int cs4270_soc_resume(struct snd_soc_codec *codec)
{
struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
- struct i2c_client *i2c_client = to_i2c_client(codec->dev);
int reg;
regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
ndelay(500);
/* first restore the entire register cache ... */
- for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
- u8 val = snd_soc_read(codec, reg);
-
- if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
- dev_err(codec->dev, "i2c write failed\n");
- return -EIO;
- }
- }
+ snd_soc_cache_sync(codec);
/* ... then disable the power-down bits */
reg = snd_soc_read(codec, CS4270_PWRCTL);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 23d1bd5dadd..69fde1506fe 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -434,7 +434,8 @@ static int cs4271_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
{
int ret;
/* Set power-down bit */
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0, CS4271_MODE2_PDN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN,
+ CS4271_MODE2_PDN);
if (ret < 0)
return ret;
return 0;
@@ -501,8 +502,9 @@ static int cs4271_probe(struct snd_soc_codec *codec)
return ret;
}
- ret = snd_soc_update_bits(codec, CS4271_MODE2, 0,
- CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
+ ret = snd_soc_update_bits(codec, CS4271_MODE2,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN,
+ CS4271_MODE2_PDN | CS4271_MODE2_CPEN);
if (ret < 0)
return ret;
ret = snd_soc_update_bits(codec, CS4271_MODE2, CS4271_MODE2_PDN, 0);
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 8c3c8205d19..1ee66361f61 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
.probe = cs42l51_probe,
- .reg_cache_size = CS42L51_NUMREGS,
+ .reg_cache_size = CS42L51_NUMREGS + 1,
.reg_word_size = sizeof(u8),
};
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index e373f8f0690..3e1f4e172bf 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 9e7e964a5fa..dcf6f2a1600 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
unsigned int mask = mc->max;
unsigned int val = (ucontrol->value.integer.value[0] & mask);
unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
- unsigned int change = 1;
+ unsigned int change = 0;
- if (((max9877_regs[reg] >> shift) & mask) == val)
- change = 0;
+ if (((max9877_regs[reg] >> shift) & mask) != val)
+ change = 1;
- if (((max9877_regs[reg2] >> shift) & mask) == val2)
- change = 0;
+ if (((max9877_regs[reg2] >> shift) & mask) != val2)
+ change = 1;
if (change) {
max9877_regs[reg] &= ~(mask << shift);
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 27a078cbb6e..4646e808b90 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -177,7 +177,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
static unsigned int mic_bst_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(7),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d15695d1c27..f6b6551940a 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -365,7 +365,7 @@ static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
/* tlv for mic gain, 0db 20db 30db 40db */
static const unsigned int mic_gain_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
};
@@ -833,7 +833,7 @@ static int ldo_regulator_register(struct snd_soc_codec *codec,
ldo->voltage = voltage;
ldo->dev = regulator_register(&ldo->desc, codec->dev,
- init_data, ldo);
+ init_data, ldo, NULL);
if (IS_ERR(ldo->dev)) {
int ret = PTR_ERR(ldo->dev);
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index bb82408ab8e..d2f37152f94 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -76,6 +76,8 @@ struct sta32x_priv {
unsigned int mclk;
unsigned int format;
+
+ u32 coef_shadow[STA32X_COEF_COUNT];
};
static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
@@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
int numcoef = kcontrol->private_value >> 16;
int index = kcontrol->private_value & 0xffff;
unsigned int cfud;
@@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
snd_soc_write(codec, STA32X_CFUD, cfud);
snd_soc_write(codec, STA32X_CFADDR2, index);
+ for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
+ sta32x->coef_shadow[index + i] =
+ (ucontrol->value.bytes.data[3 * i] << 16)
+ | (ucontrol->value.bytes.data[3 * i + 1] << 8)
+ | (ucontrol->value.bytes.data[3 * i + 2]);
for (i = 0; i < 3 * numcoef; i++)
snd_soc_write(codec, STA32X_B1CF1 + i,
ucontrol->value.bytes.data[i]);
@@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
return 0;
}
+int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
+{
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
+ unsigned int cfud;
+ int i;
+
+ /* preserve reserved bits in STA32X_CFUD */
+ cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
+
+ for (i = 0; i < STA32X_COEF_COUNT; i++) {
+ snd_soc_write(codec, STA32X_CFADDR2, i);
+ snd_soc_write(codec, STA32X_B1CF1,
+ (sta32x->coef_shadow[i] >> 16) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF2,
+ (sta32x->coef_shadow[i] >> 8) & 0xff);
+ snd_soc_write(codec, STA32X_B1CF3,
+ (sta32x->coef_shadow[i]) & 0xff);
+ /* chip documentation does not say if the bits are
+ * self-clearing, so do it explicitly */
+ snd_soc_write(codec, STA32X_CFUD, cfud);
+ snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
+ }
+ return 0;
+}
+
+int sta32x_cache_sync(struct snd_soc_codec *codec)
+{
+ unsigned int mute;
+ int rc;
+
+ if (!codec->cache_sync)
+ return 0;
+
+ /* mute during register sync */
+ mute = snd_soc_read(codec, STA32X_MMUTE);
+ snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
+ sta32x_sync_coef_shadow(codec);
+ rc = snd_soc_cache_sync(codec);
+ snd_soc_write(codec, STA32X_MMUTE, mute);
+ return rc;
+}
+
#define SINGLE_COEF(xname, index) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = sta32x_coefficient_info, \
@@ -661,7 +711,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
- snd_soc_cache_sync(codec);
+ sta32x_cache_sync(codec);
}
/* Power up to mute */
@@ -790,6 +840,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
STA32X_CxCFG_OM_MASK,
2 << STA32X_CxCFG_OM_SHIFT);
+ /* initialize coefficient shadow RAM with reset values */
+ for (i = 4; i <= 49; i += 5)
+ sta32x->coef_shadow[i] = 0x400000;
+ for (i = 50; i <= 54; i++)
+ sta32x->coef_shadow[i] = 0x7fffff;
+ sta32x->coef_shadow[55] = 0x5a9df7;
+ sta32x->coef_shadow[56] = 0x7fffff;
+ sta32x->coef_shadow[59] = 0x7fffff;
+ sta32x->coef_shadow[60] = 0x400000;
+ sta32x->coef_shadow[61] = 0x400000;
+
sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
index b97ee5a7566..d8e32a6262e 100644
--- a/sound/soc/codecs/sta32x.h
+++ b/sound/soc/codecs/sta32x.h
@@ -19,6 +19,7 @@
/* STA326 register addresses */
#define STA32X_REGISTER_COUNT 0x2d
+#define STA32X_COEF_COUNT 62
#define STA32X_CONFA 0x00
#define STA32X_CONFB 0x01
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index c5ca8cfea60..0441893e270 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
static int __init uda1380_modinit(void)
{
- int ret;
+ int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&uda1380_i2c_driver);
if (ret != 0)
pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
#endif
- return 0;
+ return ret;
}
module_init(uda1380_modinit);
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7e5ec03f6f8..a7c9ae17fc7 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -453,6 +453,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
+ codec->cache_sync = 1;
break;
}
codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a9504710bb6..3a629d0d690 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -190,6 +190,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
u16 ioctl;
+ if (wm8753->dai_func == ucontrol->value.integer.value[0])
+ return 0;
+
if (codec->active)
return -EBUSY;
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index bfdc52370ad..d3b0a20744f 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -235,6 +235,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
switch (snd_pcm_format_width(params_format(params))) {
case 16:
iface = 0;
+ break;
case 20:
iface = 0x10;
break;
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0293763debe..5a14d5c0e0e 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
+ memcpy(&data32, fw->data, sizeof(data32));
+ data32 = be32_to_cpu(data32);
dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 91d3c6dbeba..53edd9a8c75 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1973,7 +1973,7 @@ static int wm8962_reset(struct snd_soc_codec *codec)
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
static const unsigned int mixinpga_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(5),
0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
@@ -1988,7 +1988,7 @@ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
static const unsigned int classd_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index eec8e143511..d1a142f48b0 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -512,7 +512,7 @@ static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
static const unsigned int drc_max_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 9c982e47eb9..d0c545b73d7 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
};
static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
};
static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
};
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
lrclk = bclk_rate / params_rate(params);
+ if (!lrclk) {
+ dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+ bclk_rate);
+ return -EINVAL;
+ }
dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
lrclk, bclk_rate / lrclk);
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
switch (wm8994->revision) {
case 0:
case 1:
+ case 2:
+ case 3:
wm8994->hubs.dcs_codes_l = -9;
wm8994->hubs.dcs_codes_r = -5;
break;
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 645c980d6b8..a33b04d1719 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
break;
case 24576000:
ratediv = WM8996_SYSCLK_DIV;
+ wm8996->sysclk /= 2;
case 12288000:
snd_soc_update_bits(codec, WM8996_AIF_RATE,
WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 3cd35a02c28..4a398c3bfe8 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -807,7 +807,6 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
mdelay(100);
/* Normal bias enable & soft start off */
- reg |= WM9081_BIAS_ENA;
reg &= ~WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -818,7 +817,7 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
}
/* VMID 2*240k */
- reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg &= ~WM9081_VMID_SEL_MASK;
reg |= 0x04;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
@@ -830,14 +829,15 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_OFF:
- /* Startup bias source */
+ /* Startup bias source and disable bias */
reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_BIAS_SRC;
+ reg &= ~WM9081_BIAS_ENA;
snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
- /* Disable VMID and biases with soft ramping */
+ /* Disable VMID with soft ramping */
reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
- reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
+ reg &= ~WM9081_VMID_SEL_MASK;
reg |= WM9081_VMID_RAMP;
snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 2b5252c9e37..f94c06057c6 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -177,19 +177,19 @@ static void wait_for_dc_servo(struct snd_soc_codec *codec)
}
static const unsigned int in_tlv[] = {
- TLV_DB_RANGE_HEAD(6),
+ TLV_DB_RANGE_HEAD(3),
0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
};
static const unsigned int mix_tlv[] = {
- TLV_DB_RANGE_HEAD(4),
+ TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 84f33d4ea2c..48e61e91240 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -40,7 +40,7 @@ static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
static const unsigned int spkboost_tlv[] = {
- TLV_DB_RANGE_HEAD(7),
+ TLV_DB_RANGE_HEAD(2),
0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
};
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0268cf98973..83c4bd5b2dd 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -694,6 +694,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
/* Initialize the the device_attribute structure */
dev_attr = &ssi_private->dev_attr;
+ sysfs_attr_init(&dev_attr->attr);
dev_attr->attr.name = "statistics";
dev_attr->attr.mode = S_IRUGO;
dev_attr->show = fsl_sysfs_ssi_show;
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 31af405bda8..ae49f1c78c6 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
}
if (strcasecmp(sprop, "i2s-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
}
machine_data->clk_frequency = be32_to_cpup(iprop);
} else if (strcasecmp(sprop, "i2s-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "lj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "lj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "rj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "rj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "ac97-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "ac97-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else {
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index b133bfcc584..738391757f2 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
config SND_SOC_MX27VIS_AIC32X4
tristate "SoC audio support for Visstrim M10 boards"
- depends on MACH_IMX27_VISSTRIM_M10
+ depends on MACH_IMX27_VISSTRIM_M10 && I2C
select SND_SOC_TLV320AIC32X4
select SND_MXC_SOC_MX2
help
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 8f49e165f4d..c62d715235e 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
config SND_KIRKWOOD_SOC_OPENRD
tristate "SoC Audio support for Kirkwood Openrd Client"
depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+ depends on I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_CS42L51
help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
config SND_KIRKWOOD_SOC_T5325
tristate "SoC Audio support for HP t5325"
- depends on SND_KIRKWOOD_SOC && MACH_T5325
+ depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_ALC5623
help
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index cd33de1c5b7..df12e0993f5 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -94,9 +94,10 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
- unsigned long dma,
- struct mbus_dram_target_info *dram)
+static void
+kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
+ unsigned long dma,
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -106,7 +107,7 @@ static void kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
/* try to find matching cs for current dma address */
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) {
writel(cs->base & 0xffff0000,
base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
@@ -127,6 +128,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
struct kirkwood_dma_data *priv;
struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
+ const struct mbus_dram_target_info *dram;
unsigned long addr;
priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
@@ -175,15 +177,16 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK);
}
+ dram = mv_mbus_dram_info();
addr = virt_to_phys(substream->dma_buffer.area);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
prdata->play_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
- KIRKWOOD_PLAYBACK_WIN, addr, priv->dram);
+ KIRKWOOD_PLAYBACK_WIN, addr, dram);
} else {
prdata->rec_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
- KIRKWOOD_RECORD_WIN, addr, priv->dram);
+ KIRKWOOD_RECORD_WIN, addr, dram);
}
return 0;
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index dea5aa4aa64..f39d7dd9fbc 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
platform_driver_unregister(&mxs_pcm_driver);
}
module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 76dc74d24fc..d775b0761e0 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -210,7 +210,7 @@ int mxs_saif_put_mclk(unsigned int saif_id)
return -EBUSY;
}
- clk_disable(saif->clk);
+ clk_disable_unprepare(saif->clk);
/* disable MCLK output */
__raw_writel(BM_SAIF_CTRL_CLKGATE,
@@ -264,7 +264,7 @@ int mxs_saif_get_mclk(unsigned int saif_id, unsigned int mclk,
if (ret)
return ret;
- ret = clk_enable(saif->clk);
+ ret = clk_prepare_enable(saif->clk);
if (ret)
return ret;
@@ -625,13 +625,6 @@ static int mxs_saif_probe(struct platform_device *pdev)
if (pdev->id >= ARRAY_SIZE(mxs_saif))
return -EINVAL;
- pdata = pdev->dev.platform_data;
- if (pdata && pdata->init) {
- ret = pdata->init();
- if (ret)
- return ret;
- }
-
saif = kzalloc(sizeof(*saif), GFP_KERNEL);
if (!saif)
return -ENOMEM;
@@ -639,12 +632,17 @@ static int mxs_saif_probe(struct platform_device *pdev)
mxs_saif[pdev->id] = saif;
saif->id = pdev->id;
- saif->master_id = saif->id;
- if (pdata && pdata->get_master_id) {
- saif->master_id = pdata->get_master_id(saif->id);
+ pdata = pdev->dev.platform_data;
+ if (pdata && !pdata->master_mode) {
+ saif->master_id = pdata->master_id;
if (saif->master_id < 0 ||
- saif->master_id >= ARRAY_SIZE(mxs_saif))
+ saif->master_id >= ARRAY_SIZE(mxs_saif) ||
+ saif->master_id == saif->id) {
+ dev_err(&pdev->dev, "get wrong master id\n");
return -EINVAL;
+ }
+ } else {
+ saif->master_id = saif->id;
}
saif->clk = clk_get(&pdev->dev, NULL);
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 7fbeaec06eb..1c57f6630a4 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 9c0edad90d8..a4e3237956e 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret)
goto out3;
- mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/
+ /* enbale ac97 multifunction pin */
+ mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
return 0;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index ffd2242e305..a0f7d3cfa47 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
config SND_SOC_RAUMFELD
tristate "SoC Audio support Raumfeld audio adapter"
depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+ depends on I2C && SPI_MASTER
select SND_PXA_SOC_SSP
select SND_SOC_CS4270
select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
config SND_PXA2XX_SOC_HX4700
tristate "SoC Audio support for HP iPAQ hx4700"
- depends on SND_PXA2XX_SOC && MACH_H4700
+ depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
select SND_PXA2XX_SOC_I2S
select SND_SOC_AK4641
help
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index 65c124831a0..c664e33fb6d 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
snd_soc_card_hx4700.dev = &pdev->dev;
ret = snd_soc_register_card(&snd_soc_card_hx4700);
if (ret)
- return ret;
+ gpio_free_array(hx4700_audio_gpios,
+ ARRAY_SIZE(hx4700_audio_gpios));
- return 0;
+ return ret;
}
static int __devexit hx4700_audio_remove(struct platform_device *pdev)
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
index 1826acf20f7..8e523fd9189 100644
--- a/sound/soc/samsung/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int err;
/* These endpoints are not being used. */
snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
.dai_link = &jive_dai,
.num_links = 1,
- .dapm_widgtets = wm8750_dapm_widgets,
+ .dapm_widgets = wm8750_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
index 3a0dbfc793f..8bd1dc5706b 100644
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/module.h>
#include <sound/soc.h>
static struct snd_soc_card smdk2443;
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index f75e43997d5..ad9ac42522e 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -9,6 +9,7 @@
#include "../codecs/wm8994.h"
#include <sound/pcm_params.h>
+#include <linux/module.h>
/*
* Default CFG switch settings to use this driver:
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index 85bf541a771..4b8e35410eb 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
- snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+ snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a5d3685a5d3..a25fa63ce9a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
struct snd_soc_card *card = dev_get_drvdata(dev);
int i, ac97_control = 0;
+ /* If the initialization of this soc device failed, there is no codec
+ * associated with it. Just bail out in this case.
+ */
+ if (list_empty(&card->codec_dev_list))
+ return 0;
+
/* AC97 devices might have other drivers hanging off them so
* need to resume immediately. Other drivers don't have that
* problem and may take a substantial amount of time to resume
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 0c12b98484b..4220bb0f273 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
}
EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+ .formats = 0xffffffff,
+ .channels_min = 1,
+ .channels_max = UINT_MAX,
+
+ /* Random values to keep userspace happy when checking constraints */
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .buffer_bytes_max = 128*1024,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = PAGE_SIZE*2,
+ .periods_min = 2,
+ .periods_max = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+ return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+ .open = dummy_dma_open,
+ .ioctl = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+ .ops = &dummy_dma_ops,
+};
static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
{
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 6ce277860fd..c6e81fb928e 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -29,7 +29,7 @@ MODULE_DESCRIPTION("Core sound module");
MODULE_AUTHOR("Alan Cox");
MODULE_LICENSE("GPL");
-static char *sound_devnode(struct device *dev, mode_t *mode)
+static char *sound_devnode(struct device *dev, umode_t *mode)
{
if (MAJOR(dev->devt) == SOUND_MAJOR)
return NULL;
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
index c7dca7b0b9f..ac2d5e10f1a 100644
--- a/sound/usb/6fire/chip.c
+++ b/sound/usb/6fire/chip.c
@@ -211,22 +211,11 @@ static struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
-static struct usb_driver driver = {
+static struct usb_driver usb_driver = {
.name = "snd-usb-6fire",
.probe = usb6fire_chip_probe,
.disconnect = usb6fire_chip_disconnect,
.id_table = device_table,
};
-static int __init usb6fire_chip_init(void)
-{
- return usb_register(&driver);
-}
-
-static void __exit usb6fire_chip_cleanup(void)
-{
- usb_deregister(&driver);
-}
-
-module_init(usb6fire_chip_init);
-module_exit(usb6fire_chip_cleanup);
+module_usb_driver(usb_driver);
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 3eb605bd950..457fb274ff9 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -538,16 +538,5 @@ static struct usb_driver snd_usb_driver = {
.id_table = snd_usb_id_table,
};
-static int __init snd_module_init(void)
-{
- return usb_register(&snd_usb_driver);
-}
-
-static void __exit snd_module_exit(void)
-{
- usb_deregister(&snd_usb_driver);
-}
-
-module_init(snd_module_init)
-module_exit(snd_module_exit)
+module_usb_driver(snd_usb_driver);
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index c0609c21030..4c11da911a1 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -1387,16 +1387,4 @@ static struct usb_driver ua101_driver = {
#endif
};
-static int __init alsa_card_ua101_init(void)
-{
- return usb_register(&ua101_driver);
-}
-
-static void __exit alsa_card_ua101_exit(void)
-{
- usb_deregister(&ua101_driver);
- mutex_destroy(&devices_mutex);
-}
-
-module_init(alsa_card_ua101_init);
-module_exit(alsa_card_ua101_exit);
+module_usb_driver(ua101_driver);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index b61945f3af9..32d2a21f2e3 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1633,6 +1633,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ /* Roland GAIA SH-01 */
+ USB_DEVICE(0x0582, 0x0111),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Roland",
+ .product_name = "GAIA",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
USB_DEVICE(0x0582, 0x0113),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
/* .vendor_name = "BOSS", */
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 726c1a7b89b..625f7ca6a89 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -772,16 +772,4 @@ static struct usb_driver snd_us122l_usb_driver = {
.supports_autosuspend = 1
};
-
-static int __init snd_us122l_module_init(void)
-{
- return usb_register(&snd_us122l_usb_driver);
-}
-
-static void __exit snd_us122l_module_exit(void)
-{
- usb_deregister(&snd_us122l_usb_driver);
-}
-
-module_init(snd_us122l_module_init)
-module_exit(snd_us122l_module_exit)
+module_usb_driver(snd_us122l_usb_driver);
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index cbd37f2c76d..0c738ed3ed3 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -459,15 +459,4 @@ static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr)
}
}
-static int __init snd_usX2Y_module_init(void)
-{
- return usb_register(&snd_usX2Y_usb_driver);
-}
-
-static void __exit snd_usX2Y_module_exit(void)
-{
- usb_deregister(&snd_usX2Y_usb_driver);
-}
-
-module_init(snd_usX2Y_module_init)
-module_exit(snd_usX2Y_module_exit)
+module_usb_driver(snd_usX2Y_usb_driver);
diff --git a/tools/perf/Documentation/examples.txt b/tools/perf/Documentation/examples.txt
index 8eb6c489fb1..77f95276242 100644
--- a/tools/perf/Documentation/examples.txt
+++ b/tools/perf/Documentation/examples.txt
@@ -17,8 +17,8 @@ titan:~> perf list
kmem:kmem_cache_alloc_node [Tracepoint event]
kmem:kfree [Tracepoint event]
kmem:kmem_cache_free [Tracepoint event]
- kmem:mm_page_free_direct [Tracepoint event]
- kmem:mm_pagevec_free [Tracepoint event]
+ kmem:mm_page_free [Tracepoint event]
+ kmem:mm_page_free_batched [Tracepoint event]
kmem:mm_page_alloc [Tracepoint event]
kmem:mm_page_alloc_zone_locked [Tracepoint event]
kmem:mm_page_pcpu_drain [Tracepoint event]
@@ -29,15 +29,15 @@ measured. For example the page alloc/free properties of a 'hackbench
run' are:
titan:~> perf stat -e kmem:mm_page_pcpu_drain -e kmem:mm_page_alloc
- -e kmem:mm_pagevec_free -e kmem:mm_page_free_direct ./hackbench 10
+ -e kmem:mm_page_free_batched -e kmem:mm_page_free ./hackbench 10
Time: 0.575
Performance counter stats for './hackbench 10':
13857 kmem:mm_page_pcpu_drain
27576 kmem:mm_page_alloc
- 6025 kmem:mm_pagevec_free
- 20934 kmem:mm_page_free_direct
+ 6025 kmem:mm_page_free_batched
+ 20934 kmem:mm_page_free
0.613972165 seconds time elapsed
@@ -45,8 +45,8 @@ You can observe the statistical properties as well, by using the
'repeat the workload N times' feature of perf stat:
titan:~> perf stat --repeat 5 -e kmem:mm_page_pcpu_drain -e
- kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
- kmem:mm_page_free_direct ./hackbench 10
+ kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+ kmem:mm_page_free ./hackbench 10
Time: 0.627
Time: 0.644
Time: 0.564
@@ -57,8 +57,8 @@ You can observe the statistical properties as well, by using the
12920 kmem:mm_page_pcpu_drain ( +- 3.359% )
25035 kmem:mm_page_alloc ( +- 3.783% )
- 6104 kmem:mm_pagevec_free ( +- 0.934% )
- 18376 kmem:mm_page_free_direct ( +- 4.941% )
+ 6104 kmem:mm_page_free_batched ( +- 0.934% )
+ 18376 kmem:mm_page_free ( +- 4.941% )
0.643954516 seconds time elapsed ( +- 2.363% )
@@ -158,15 +158,15 @@ Or you can observe the whole system's page allocations for 10
seconds:
titan:~/git> perf stat -a -e kmem:mm_page_pcpu_drain -e
-kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
-kmem:mm_page_free_direct sleep 10
+kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+kmem:mm_page_free sleep 10
Performance counter stats for 'sleep 10':
171585 kmem:mm_page_pcpu_drain
322114 kmem:mm_page_alloc
- 73623 kmem:mm_pagevec_free
- 254115 kmem:mm_page_free_direct
+ 73623 kmem:mm_page_free_batched
+ 254115 kmem:mm_page_free
10.000591410 seconds time elapsed
@@ -174,15 +174,15 @@ Or observe how fluctuating the page allocations are, via statistical
analysis done over ten 1-second intervals:
titan:~/git> perf stat --repeat 10 -a -e kmem:mm_page_pcpu_drain -e
- kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
- kmem:mm_page_free_direct sleep 1
+ kmem:mm_page_alloc -e kmem:mm_page_free_batched -e
+ kmem:mm_page_free sleep 1
Performance counter stats for 'sleep 1' (10 runs):
17254 kmem:mm_page_pcpu_drain ( +- 3.709% )
34394 kmem:mm_page_alloc ( +- 4.617% )
- 7509 kmem:mm_pagevec_free ( +- 4.820% )
- 25653 kmem:mm_page_free_direct ( +- 3.672% )
+ 7509 kmem:mm_page_free_batched ( +- 4.820% )
+ 25653 kmem:mm_page_free ( +- 3.672% )
1.058135029 seconds time elapsed ( +- 3.089% )
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index fe6762ed56b..c89f9e1453f 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -22,7 +22,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-d::
--dsos=<dso[,dso...]>::
@@ -66,7 +66,7 @@ OPTIONS
used. This interfaces starts by centering on the line with more
samples, TAB/UNTAB cycles through the lines with more samples.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
index cc22325ffd1..25c52efcc7f 100644
--- a/tools/perf/Documentation/perf-buildid-list.txt
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -26,7 +26,7 @@ OPTIONS
Show only DSOs with hits.
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-f::
--force::
Don't do ownership validation.
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 0cada9e053d..0507ec7bad7 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -18,7 +18,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index a52fcde894c..7c8fbbf3f61 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -23,7 +23,7 @@ OPTIONS
-------
-i <file>::
--input=<file>::
- Select the input file (default: perf.data)
+ Select the input file (default: perf.data unless stdin is a fifo)
--caller::
Show per-callsite statistics
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 4a26a2f3a6a..d6b2a4f2108 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -29,7 +29,7 @@ COMMON OPTIONS
-i::
--input=<file>::
- Input file name.
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a520f82529..2937f7e14bb 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -89,7 +89,7 @@ OPTIONS
-m::
--mmap-pages=::
- Number of mmap data pages.
+ Number of mmap data pages. Must be a power of two.
-g::
--call-graph::
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 212f24d672e..9b430e98712 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -19,7 +19,7 @@ OPTIONS
-------
-i::
--input=::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
@@ -39,7 +39,7 @@ OPTIONS
-T::
--threads::
Show per-thread event counters
--C::
+-c::
--comms=::
Only consider symbols in these comms. CSV that understands
file://filename entries.
@@ -80,9 +80,10 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
--g [type,min,order]::
+-g [type,min[,limit],order]::
--call-graph::
- Display call chains using type, min percent threshold and order.
+ Display call chains using type, min percent threshold, optional print
+ limit and order.
type can be either:
- flat: single column, linear exposure of call chains.
- graph: use a graph tree, displaying absolute overhead rates.
@@ -128,7 +129,7 @@ OPTIONS
--symfs=<directory>::
Look for files with symbols relative to this directory.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 5b212b57f70..8ff4df95695 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -40,7 +40,7 @@ OPTIONS
-------
-i::
--input=<file>::
- Input file name. (default: perf.data)
+ Input file name. (default: perf.data unless stdin is a fifo)
-v::
--verbose::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index dec87ecb530..2f6cef43da2 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -106,7 +106,7 @@ OPTIONS
-i::
--input=::
- Input file name.
+ Input file name. (default: perf.data unless stdin is a fifo)
-d::
--debug-mode::
@@ -182,12 +182,17 @@ OPTIONS
--hide-call-graph::
When printing symbols do not display call chain.
--c::
+-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
CPUs are specified with -: 0-2. Default is to report samples on all
CPUs.
+-c::
+--comms=::
+ Only display events for these comms. CSV that understands
+ file://filename entries.
+
-I::
--show-info::
Display extended information about the perf.data file. This adds
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index 2c3b462f64b..b24ac40fcd5 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -8,13 +8,19 @@ perf-test - Runs sanity tests.
SYNOPSIS
--------
[verse]
-'perf test <options>'
+'perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]'
DESCRIPTION
-----------
This command does assorted sanity tests, initially through linked routines but
also will look for a directory with more tests in the form of scripts.
+To get a list of available tests use 'perf test list', specifying a test name
+fragment will show all tests that have it.
+
+To run just specific tests, inform test name fragments or the numbers obtained
+from 'perf test list'.
+
OPTIONS
-------
-v::
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index d7b79e2ba2a..1632b0efc75 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -27,7 +27,7 @@ OPTIONS
Select the output file (default: output.svg)
-i::
--input=::
- Select the input file (default: perf.data)
+ Select the input file (default: perf.data unless stdin is a fifo)
-w::
--width=::
Select the width of the SVG file (default: 1000)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b98e3075646..ac86d67b636 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -278,6 +278,7 @@ LIB_H += util/strbuf.h
LIB_H += util/strlist.h
LIB_H += util/strfilter.h
LIB_H += util/svghelper.h
+LIB_H += util/tool.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 48ae0c5e3f7..7cdd61d0e27 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -9,7 +9,10 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <stdlib.h>
+#ifndef __UCLIBC__
#include <libio.h>
+#endif
#include <dwarf-regs.h>
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 46b4c24f338..214ba7f9f57 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,32 +27,32 @@
#include "util/sort.h"
#include "util/hist.h"
#include "util/session.h"
+#include "util/tool.h"
#include <linux/bitmap.h>
-static char const *input_name = "perf.data";
-
-static bool force, use_tui, use_stdio;
-
-static bool full_paths;
-
-static bool print_line;
-
-static const char *sym_hist_filter;
-
-static const char *cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+struct perf_annotate {
+ struct perf_tool tool;
+ char const *input_name;
+ bool force, use_tui, use_stdio;
+ bool full_paths;
+ bool print_line;
+ const char *sym_hist_filter;
+ const char *cpu_list;
+ DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
-static int perf_evlist__add_sample(struct perf_evlist *evlist,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct addr_location *al)
+static int perf_evsel__add_sample(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct addr_location *al,
+ struct perf_annotate *ann)
{
struct hist_entry *he;
int ret;
- if (sym_hist_filter != NULL &&
- (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
+ if (ann->sym_hist_filter != NULL &&
+ (al->sym == NULL ||
+ strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
@@ -69,8 +69,7 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist,
ret = 0;
if (he->ms.sym != NULL) {
struct annotation *notes = symbol__annotation(he->ms.sym);
- if (notes->src == NULL &&
- symbol__alloc_hist(he->ms.sym, evlist->nr_entries) < 0)
+ if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -81,25 +80,26 @@ static int perf_evlist__add_sample(struct perf_evlist *evlist,
return ret;
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol__annotate_init) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
return 0;
- if (!al.filtered &&
- perf_evlist__add_sample(session->evlist, sample, evsel, &al)) {
+ if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
@@ -108,14 +108,15 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static int hist_entry__tty_annotate(struct hist_entry *he, int evidx)
+static int hist_entry__tty_annotate(struct hist_entry *he, int evidx,
+ struct perf_annotate *ann)
{
return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
- print_line, full_paths, 0, 0);
+ ann->print_line, ann->full_paths, 0, 0);
}
static void hists__find_annotations(struct hists *self, int evidx,
- int nr_events)
+ struct perf_annotate *ann)
{
struct rb_node *nd = rb_first(&self->entries), *next;
int key = K_RIGHT;
@@ -138,8 +139,7 @@ find_next:
}
if (use_browser > 0) {
- key = hist_entry__tui_annotate(he, evidx, nr_events,
- NULL, NULL, 0);
+ key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0);
switch (key) {
case K_RIGHT:
next = rb_next(nd);
@@ -154,7 +154,7 @@ find_next:
if (next != NULL)
nd = next;
} else {
- hist_entry__tty_annotate(he, evidx);
+ hist_entry__tty_annotate(he, evidx, ann);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
@@ -167,33 +167,26 @@ find_next:
}
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .fork = perf_event__process_task,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
-};
-
-static int __cmd_annotate(void)
+static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
struct perf_session *session;
struct perf_evsel *pos;
u64 total_nr_samples;
- session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+ session = perf_session__new(ann->input_name, O_RDONLY,
+ ann->force, false, &ann->tool);
if (session == NULL)
return -ENOMEM;
- if (cpu_list) {
- ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+ if (ann->cpu_list) {
+ ret = perf_session__cpu_bitmap(session, ann->cpu_list,
+ ann->cpu_bitmap);
if (ret)
goto out_delete;
}
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &ann->tool);
if (ret)
goto out_delete;
@@ -217,13 +210,12 @@ static int __cmd_annotate(void)
total_nr_samples += nr_samples;
hists__collapse_resort(hists);
hists__output_resort(hists);
- hists__find_annotations(hists, pos->idx,
- session->evlist->nr_entries);
+ hists__find_annotations(hists, pos->idx, ann);
}
}
if (total_nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", input_name);
+ ui__warning("The %s file has no samples!\n", session->filename);
goto out_delete;
}
out_delete:
@@ -247,29 +239,41 @@ static const char * const annotate_usage[] = {
NULL
};
-static const struct option options[] = {
- OPT_STRING('i', "input", &input_name, "file",
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+{
+ struct perf_annotate annotate = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .fork = perf_event__process_task,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
+ };
+ const struct option options[] = {
+ OPT_STRING('i', "input", &annotate.input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
- OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
+ OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
- OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('l', "print-line", &print_line,
+ OPT_BOOLEAN('l', "print-line", &annotate.print_line,
"print matching source lines (may be slow)"),
- OPT_BOOLEAN('P', "full-paths", &full_paths,
+ OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
"Don't shorten the displayed pathnames"),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
@@ -279,15 +283,13 @@ static const struct option options[] = {
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_END()
-};
+ };
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, options, annotate_usage, 0);
- if (use_stdio)
+ if (annotate.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (annotate.use_tui)
use_browser = 1;
setup_browser(true);
@@ -308,7 +310,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
if (argc > 1)
usage_with_options(annotate_usage, options);
- sym_hist_filter = argv[0];
+ annotate.sym_hist_filter = argv[0];
}
if (field_sep && *field_sep == '.') {
@@ -316,5 +318,5 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
return -1;
}
- return __cmd_annotate();
+ return __cmd_annotate(&annotate);
}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index cb690a65bf0..52480467e9f 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -18,7 +18,7 @@
#include <libelf.h>
-static char const *input_name = "perf.data";
+static const char *input_name;
static bool force;
static bool show_kernel;
static bool with_hits;
@@ -39,24 +39,6 @@ static const struct option options[] = {
OPT_END()
};
-static int perf_session__list_build_ids(void)
-{
- struct perf_session *session;
-
- session = perf_session__new(input_name, O_RDONLY, force, false,
- &build_id__mark_dso_hit_ops);
- if (session == NULL)
- return -1;
-
- if (with_hits)
- perf_session__process_events(session, &build_id__mark_dso_hit_ops);
-
- perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
-
- perf_session__delete(session);
- return 0;
-}
-
static int sysfs__fprintf_build_id(FILE *fp)
{
u8 kallsyms_build_id[BUILD_ID_SIZE];
@@ -85,17 +67,36 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
return fprintf(fp, "%s\n", sbuild_id);
}
-static int __cmd_buildid_list(void)
+static int perf_session__list_build_ids(void)
{
- if (show_kernel)
- return sysfs__fprintf_build_id(stdout);
+ struct perf_session *session;
elf_version(EV_CURRENT);
+
+ session = perf_session__new(input_name, O_RDONLY, force, false,
+ &build_id__mark_dso_hit_ops);
+ if (session == NULL)
+ return -1;
+
/*
- * See if this is an ELF file first:
- */
- if (filename__fprintf_build_id(input_name, stdout))
- return 0;
+ * See if this is an ELF file first:
+ */
+ if (filename__fprintf_build_id(session->filename, stdout))
+ goto out;
+
+ if (with_hits)
+ perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+
+ perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
+out:
+ perf_session__delete(session);
+ return 0;
+}
+
+static int __cmd_buildid_list(void)
+{
+ if (show_kernel)
+ return sysfs__fprintf_build_id(stdout);
return perf_session__list_build_ids();
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index b39f3a1ee7d..4f19513d7dd 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -9,7 +9,9 @@
#include "util/debug.h"
#include "util/event.h"
#include "util/hist.h"
+#include "util/evsel.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/sort.h"
#include "util/symbol.h"
#include "util/util.h"
@@ -30,14 +32,15 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(union perf_event *event,
+static int diff__process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -46,16 +49,16 @@ static int diff__process_sample_event(union perf_event *event,
if (al.filtered || al.sym == NULL)
return 0;
- if (hists__add_entry(&session->hists, &al, sample->period)) {
+ if (hists__add_entry(&evsel->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_diff = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
@@ -145,13 +148,13 @@ static int __cmd_diff(void)
int ret, i;
struct perf_session *session[2];
- session[0] = perf_session__new(input_old, O_RDONLY, force, false, &event_ops);
- session[1] = perf_session__new(input_new, O_RDONLY, force, false, &event_ops);
+ session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
+ session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
- ret = perf_session__process_events(session[i], &event_ops);
+ ret = perf_session__process_events(session[i], &perf_diff);
if (ret)
goto out_delete;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 4c5e9e04a41..26760322c4f 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -15,7 +15,7 @@
#include "util/parse-options.h"
#include "util/session.h"
-static char const *input_name = "perf.data";
+static const char *input_name;
static int __cmd_evlist(void)
{
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 8dfc12bb119..09c106193e6 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -9,6 +9,7 @@
#include "perf.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/debug.h"
#include "util/parse-options.h"
@@ -16,8 +17,9 @@
static char const *input_name = "-";
static bool inject_build_ids;
-static int perf_event__repipe_synth(union perf_event *event,
- struct perf_session *session __used)
+static int perf_event__repipe_synth(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct machine *machine __used)
{
uint32_t size;
void *buf = event;
@@ -36,41 +38,70 @@ static int perf_event__repipe_synth(union perf_event *event,
return 0;
}
-static int perf_event__repipe(union perf_event *event,
+static int perf_event__repipe_op2_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session __used)
+{
+ return perf_event__repipe_synth(tool, event, NULL);
+}
+
+static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
+ union perf_event *event)
+{
+ return perf_event__repipe_synth(tool, event, NULL);
+}
+
+static int perf_event__repipe_tracing_data_synth(union perf_event *event,
+ struct perf_session *session __used)
+{
+ return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe_attr(union perf_event *event,
+ struct perf_evlist **pevlist __used)
+{
+ return perf_event__repipe_synth(NULL, event, NULL);
+}
+
+static int perf_event__repipe(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- return perf_event__repipe_synth(event, session);
+ return perf_event__repipe_synth(tool, event, machine);
}
-static int perf_event__repipe_sample(union perf_event *event,
+static int perf_event__repipe_sample(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
- return perf_event__repipe_synth(event, session);
+ return perf_event__repipe_synth(tool, event, machine);
}
-static int perf_event__repipe_mmap(union perf_event *event,
+static int perf_event__repipe_mmap(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
int err;
- err = perf_event__process_mmap(event, sample, session);
- perf_event__repipe(event, sample, session);
+ err = perf_event__process_mmap(tool, event, sample, machine);
+ perf_event__repipe(tool, event, sample, machine);
return err;
}
-static int perf_event__repipe_task(union perf_event *event,
+static int perf_event__repipe_task(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
int err;
- err = perf_event__process_task(event, sample, session);
- perf_event__repipe(event, sample, session);
+ err = perf_event__process_task(tool, event, sample, machine);
+ perf_event__repipe(tool, event, sample, machine);
return err;
}
@@ -80,7 +111,7 @@ static int perf_event__repipe_tracing_data(union perf_event *event,
{
int err;
- perf_event__repipe_synth(event, session);
+ perf_event__repipe_synth(NULL, event, NULL);
err = perf_event__process_tracing_data(event, session);
return err;
@@ -100,10 +131,10 @@ static int dso__read_build_id(struct dso *self)
return -1;
}
-static int dso__inject_build_id(struct dso *self, struct perf_session *session)
+static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
+ struct machine *machine)
{
u16 misc = PERF_RECORD_MISC_USER;
- struct machine *machine;
int err;
if (dso__read_build_id(self) < 0) {
@@ -111,17 +142,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
return -1;
}
- machine = perf_session__find_host_machine(session);
- if (machine == NULL) {
- pr_err("Can't find machine for session\n");
- return -1;
- }
-
if (self->kernel)
misc = PERF_RECORD_MISC_KERNEL;
- err = perf_event__synthesize_build_id(self, misc, perf_event__repipe,
- machine, session);
+ err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe,
+ machine);
if (err) {
pr_err("Can't synthesize build_id event for %s\n", self->long_name);
return -1;
@@ -130,10 +155,11 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
return 0;
}
-static int perf_event__inject_buildid(union perf_event *event,
+static int perf_event__inject_buildid(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
struct thread *thread;
@@ -141,21 +167,21 @@ static int perf_event__inject_buildid(union perf_event *event,
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- thread = perf_session__findnew(session, event->ip.pid);
+ thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
goto repipe;
}
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
if (al.map != NULL) {
if (!al.map->dso->hit) {
al.map->dso->hit = 1;
if (map__load(al.map, NULL) >= 0) {
- dso__inject_build_id(al.map->dso, session);
+ dso__inject_build_id(al.map->dso, tool, machine);
/*
* If this fails, too bad, let the other side
* account this as unresolved.
@@ -168,24 +194,24 @@ static int perf_event__inject_buildid(union perf_event *event,
}
repipe:
- perf_event__repipe(event, sample, session);
+ perf_event__repipe(tool, event, sample, machine);
return 0;
}
-struct perf_event_ops inject_ops = {
+struct perf_tool perf_inject = {
.sample = perf_event__repipe_sample,
.mmap = perf_event__repipe,
.comm = perf_event__repipe,
.fork = perf_event__repipe,
.exit = perf_event__repipe,
.lost = perf_event__repipe,
- .read = perf_event__repipe,
+ .read = perf_event__repipe_sample,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
- .attr = perf_event__repipe_synth,
- .event_type = perf_event__repipe_synth,
- .tracing_data = perf_event__repipe_synth,
- .build_id = perf_event__repipe_synth,
+ .attr = perf_event__repipe_attr,
+ .event_type = perf_event__repipe_event_type_synth,
+ .tracing_data = perf_event__repipe_tracing_data_synth,
+ .build_id = perf_event__repipe_op2_synth,
};
extern volatile int session_done;
@@ -203,17 +229,17 @@ static int __cmd_inject(void)
signal(SIGINT, sig_handler);
if (inject_build_ids) {
- inject_ops.sample = perf_event__inject_buildid;
- inject_ops.mmap = perf_event__repipe_mmap;
- inject_ops.fork = perf_event__repipe_task;
- inject_ops.tracing_data = perf_event__repipe_tracing_data;
+ perf_inject.sample = perf_event__inject_buildid;
+ perf_inject.mmap = perf_event__repipe_mmap;
+ perf_inject.fork = perf_event__repipe_task;
+ perf_inject.tracing_data = perf_event__repipe_tracing_data;
}
- session = perf_session__new(input_name, O_RDONLY, false, true, &inject_ops);
+ session = perf_session__new(input_name, O_RDONLY, false, true, &perf_inject);
if (session == NULL)
return -ENOMEM;
- ret = perf_session__process_events(session, &inject_ops);
+ ret = perf_session__process_events(session, &perf_inject);
perf_session__delete(session);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 225e963df10..fe1ad8f2196 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -7,6 +7,7 @@
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -18,7 +19,7 @@
struct alloc_stat;
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
-static char const *input_name = "perf.data";
+static const char *input_name;
static int alloc_flag;
static int caller_flag;
@@ -303,12 +304,13 @@ static void process_raw_event(union perf_event *raw_event __used, void *data,
}
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -324,7 +326,7 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_kmem = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
@@ -483,7 +485,7 @@ static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
@@ -494,7 +496,7 @@ static int __cmd_kmem(void)
goto out_delete;
setup_pager();
- err = perf_session__process_events(session, &event_ops);
+ err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
goto out_delete;
sort_result();
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 34d1e853829..032324a76b8 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -38,7 +38,7 @@ static const struct option kvm_options[] = {
OPT_BOOLEAN(0, "guest", &perf_guest,
"Collect guest os data"),
OPT_BOOLEAN(0, "host", &perf_host,
- "Collect guest os data"),
+ "Collect host os data"),
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
"guest mount directory under which every guest os"
" instance has a subdir"),
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 899080ace26..2296c391d0f 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -12,6 +12,7 @@
#include "util/debug.h"
#include "util/session.h"
+#include "util/tool.h"
#include <sys/types.h>
#include <sys/prctl.h>
@@ -325,7 +326,7 @@ alloc_failed:
die("memory allocation failed\n");
}
-static char const *input_name = "perf.data";
+static const char *input_name;
struct raw_event_sample {
u32 size;
@@ -845,12 +846,13 @@ static void dump_info(void)
die("Unknown type of information\n");
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
- struct perf_session *s)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(s, sample->tid);
+ struct thread *thread = machine__findnew_thread(machine, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -863,7 +865,7 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static struct perf_event_ops eops = {
+static struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 710ae3d0a48..59d43abfbfe 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -46,7 +46,6 @@
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
#define DEFAULT_FUNC_FILTER "!_*"
-#define MAX_PATH_LEN 256
/* Session management structure */
static struct {
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6ab58cc99d5..0abfb18b911 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -22,6 +22,7 @@
#include "util/evsel.h"
#include "util/debug.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/symbol.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
@@ -35,55 +36,36 @@ enum write_mode_t {
WRITE_APPEND
};
-static u64 user_interval = ULLONG_MAX;
-static u64 default_interval = 0;
-
-static unsigned int page_size;
-static unsigned int mmap_pages = UINT_MAX;
-static unsigned int user_freq = UINT_MAX;
-static int freq = 1000;
-static int output;
-static int pipe_output = 0;
-static const char *output_name = NULL;
-static bool group = false;
-static int realtime_prio = 0;
-static bool nodelay = false;
-static bool raw_samples = false;
-static bool sample_id_all_avail = true;
-static bool system_wide = false;
-static pid_t target_pid = -1;
-static pid_t target_tid = -1;
-static pid_t child_pid = -1;
-static bool no_inherit = false;
-static enum write_mode_t write_mode = WRITE_FORCE;
-static bool call_graph = false;
-static bool inherit_stat = false;
-static bool no_samples = false;
-static bool sample_address = false;
-static bool sample_time = false;
-static bool no_buildid = false;
-static bool no_buildid_cache = false;
-static struct perf_evlist *evsel_list;
-
-static long samples = 0;
-static u64 bytes_written = 0;
-
-static int file_new = 1;
-static off_t post_processing_offset;
-
-static struct perf_session *session;
-static const char *cpu_list;
-static const char *progname;
-
-static void advance_output(size_t size)
+struct perf_record {
+ struct perf_tool tool;
+ struct perf_record_opts opts;
+ u64 bytes_written;
+ const char *output_name;
+ struct perf_evlist *evlist;
+ struct perf_session *session;
+ const char *progname;
+ int output;
+ unsigned int page_size;
+ int realtime_prio;
+ enum write_mode_t write_mode;
+ bool no_buildid;
+ bool no_buildid_cache;
+ bool force;
+ bool file_new;
+ bool append_file;
+ long samples;
+ off_t post_processing_offset;
+};
+
+static void advance_output(struct perf_record *rec, size_t size)
{
- bytes_written += size;
+ rec->bytes_written += size;
}
-static void write_output(void *buf, size_t size)
+static void write_output(struct perf_record *rec, void *buf, size_t size)
{
while (size) {
- int ret = write(output, buf, size);
+ int ret = write(rec->output, buf, size);
if (ret < 0)
die("failed to write");
@@ -91,30 +73,33 @@ static void write_output(void *buf, size_t size)
size -= ret;
buf += ret;
- bytes_written += ret;
+ rec->bytes_written += ret;
}
}
-static int process_synthesized_event(union perf_event *event,
+static int process_synthesized_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *self __used)
+ struct machine *machine __used)
{
- write_output(event, event->header.size);
+ struct perf_record *rec = container_of(tool, struct perf_record, tool);
+ write_output(rec, event, event->header.size);
return 0;
}
-static void mmap_read(struct perf_mmap *md)
+static void perf_record__mmap_read(struct perf_record *rec,
+ struct perf_mmap *md)
{
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
- unsigned char *data = md->base + page_size;
+ unsigned char *data = md->base + rec->page_size;
unsigned long size;
void *buf;
if (old == head)
return;
- samples++;
+ rec->samples++;
size = head - old;
@@ -123,14 +108,14 @@ static void mmap_read(struct perf_mmap *md)
size = md->mask + 1 - (old & md->mask);
old += size;
- write_output(buf, size);
+ write_output(rec, buf, size);
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- write_output(buf, size);
+ write_output(rec, buf, size);
md->prev = old;
perf_mmap__write_tail(md, old);
@@ -149,17 +134,18 @@ static void sig_handler(int sig)
signr = sig;
}
-static void sig_atexit(void)
+static void perf_record__sig_exit(int exit_status __used, void *arg)
{
+ struct perf_record *rec = arg;
int status;
- if (child_pid > 0) {
+ if (rec->evlist->workload.pid > 0) {
if (!child_finished)
- kill(child_pid, SIGTERM);
+ kill(rec->evlist->workload.pid, SIGTERM);
wait(&status);
if (WIFSIGNALED(status))
- psignal(WTERMSIG(status), progname);
+ psignal(WTERMSIG(status), rec->progname);
}
if (signr == -1 || signr == SIGUSR1)
@@ -169,78 +155,6 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
-static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
-{
- struct perf_event_attr *attr = &evsel->attr;
- int track = !evsel->idx; /* only the first counter needs these */
-
- attr->disabled = 1;
- attr->inherit = !no_inherit;
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING |
- PERF_FORMAT_ID;
-
- attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
-
- if (evlist->nr_entries > 1)
- attr->sample_type |= PERF_SAMPLE_ID;
-
- /*
- * We default some events to a 1 default interval. But keep
- * it a weak assumption overridable by the user.
- */
- if (!attr->sample_period || (user_freq != UINT_MAX &&
- user_interval != ULLONG_MAX)) {
- if (freq) {
- attr->sample_type |= PERF_SAMPLE_PERIOD;
- attr->freq = 1;
- attr->sample_freq = freq;
- } else {
- attr->sample_period = default_interval;
- }
- }
-
- if (no_samples)
- attr->sample_freq = 0;
-
- if (inherit_stat)
- attr->inherit_stat = 1;
-
- if (sample_address) {
- attr->sample_type |= PERF_SAMPLE_ADDR;
- attr->mmap_data = track;
- }
-
- if (call_graph)
- attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
-
- if (system_wide)
- attr->sample_type |= PERF_SAMPLE_CPU;
-
- if (sample_id_all_avail &&
- (sample_time || system_wide || !no_inherit || cpu_list))
- attr->sample_type |= PERF_SAMPLE_TIME;
-
- if (raw_samples) {
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_CPU;
- }
-
- if (nodelay) {
- attr->watermark = 0;
- attr->wakeup_events = 1;
- }
-
- attr->mmap = track;
- attr->comm = track;
-
- if (target_pid == -1 && target_tid == -1 && !system_wide) {
- attr->disabled = 1;
- attr->enable_on_exec = 1;
- }
-}
-
static bool perf_evlist__equal(struct perf_evlist *evlist,
struct perf_evlist *other)
{
@@ -260,15 +174,17 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
return true;
}
-static void open_counters(struct perf_evlist *evlist)
+static void perf_record__open(struct perf_record *rec)
{
struct perf_evsel *pos, *first;
-
- if (evlist->cpus->map[0] < 0)
- no_inherit = true;
+ struct perf_evlist *evlist = rec->evlist;
+ struct perf_session *session = rec->session;
+ struct perf_record_opts *opts = &rec->opts;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ perf_evlist__config_attrs(evlist, opts);
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
struct xyarray *group_fd = NULL;
@@ -286,29 +202,27 @@ static void open_counters(struct perf_evlist *evlist)
*/
bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
- if (group && pos != first)
+ if (opts->group && pos != first)
group_fd = first->fd;
-
- config_attr(pos, evlist);
retry_sample_id:
- attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+ attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
- group_fd) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
+ opts->group, group_fd) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
exit(EXIT_FAILURE);
- } else if (err == ENODEV && cpu_list) {
+ } else if (err == ENODEV && opts->cpu_list) {
die("No such device - did you specify"
" an out-of-range profile CPU?\n");
- } else if (err == EINVAL && sample_id_all_avail) {
+ } else if (err == EINVAL && opts->sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
- sample_id_all_avail = false;
- if (!sample_time && !raw_samples && !time_needed)
+ opts->sample_id_all_avail = false;
+ if (!opts->sample_time && !opts->raw_samples && !time_needed)
attr->sample_type &= ~PERF_SAMPLE_TIME;
goto retry_sample_id;
@@ -358,10 +272,20 @@ try_again:
exit(-1);
}
- if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
+ if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+ if (errno == EPERM)
+ die("Permission error mapping pages.\n"
+ "Consider increasing "
+ "/proc/sys/kernel/perf_event_mlock_kb,\n"
+ "or try again with a smaller value of -m/--mmap_pages.\n"
+ "(current value: %d)\n", opts->mmap_pages);
+ else if (!is_power_of_2(opts->mmap_pages))
+ die("--mmap_pages/-m value must be a power of two.");
+
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ }
- if (file_new)
+ if (rec->file_new)
session->evlist = evlist;
else {
if (!perf_evlist__equal(session->evlist, evlist)) {
@@ -373,29 +297,32 @@ try_again:
perf_session__update_sample_type(session);
}
-static int process_buildids(void)
+static int process_buildids(struct perf_record *rec)
{
- u64 size = lseek(output, 0, SEEK_CUR);
+ u64 size = lseek(rec->output, 0, SEEK_CUR);
if (size == 0)
return 0;
- session->fd = output;
- return __perf_session__process_events(session, post_processing_offset,
- size - post_processing_offset,
+ rec->session->fd = rec->output;
+ return __perf_session__process_events(rec->session, rec->post_processing_offset,
+ size - rec->post_processing_offset,
size, &build_id__mark_dso_hit_ops);
}
-static void atexit_header(void)
+static void perf_record__exit(int status __used, void *arg)
{
- if (!pipe_output) {
- session->header.data_size += bytes_written;
-
- if (!no_buildid)
- process_buildids();
- perf_session__write_header(session, evsel_list, output, true);
- perf_session__delete(session);
- perf_evlist__delete(evsel_list);
+ struct perf_record *rec = arg;
+
+ if (!rec->opts.pipe_output) {
+ rec->session->header.data_size += rec->bytes_written;
+
+ if (!rec->no_buildid)
+ process_buildids(rec);
+ perf_session__write_header(rec->session, rec->evlist,
+ rec->output, true);
+ perf_session__delete(rec->session);
+ perf_evlist__delete(rec->evlist);
symbol__exit();
}
}
@@ -403,7 +330,7 @@ static void atexit_header(void)
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
{
int err;
- struct perf_session *psession = data;
+ struct perf_tool *tool = data;
if (machine__is_host(machine))
return;
@@ -416,8 +343,8 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
*method is used to avoid symbol missing when the first addr is
*in module instead of in guest kernel.
*/
- err = perf_event__synthesize_modules(process_synthesized_event,
- psession, machine);
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -426,12 +353,11 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
* We use _stext for guest kernel because guest kernel's /proc/kallsyms
* have no _text sometimes.
*/
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_text");
if (err < 0)
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- psession, machine,
- "_stext");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_stext");
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -442,73 +368,71 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void mmap_read_all(void)
+static void perf_record__mmap_read_all(struct perf_record *rec)
{
int i;
- for (i = 0; i < evsel_list->nr_mmaps; i++) {
- if (evsel_list->mmap[i].base)
- mmap_read(&evsel_list->mmap[i]);
+ for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+ if (rec->evlist->mmap[i].base)
+ perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
}
- if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
- write_output(&finished_round_event, sizeof(finished_round_event));
+ if (perf_header__has_feat(&rec->session->header, HEADER_TRACE_INFO))
+ write_output(rec, &finished_round_event, sizeof(finished_round_event));
}
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
{
struct stat st;
int flags;
- int err;
+ int err, output;
unsigned long waking = 0;
- int child_ready_pipe[2], go_pipe[2];
const bool forks = argc > 0;
- char buf;
struct machine *machine;
+ struct perf_tool *tool = &rec->tool;
+ struct perf_record_opts *opts = &rec->opts;
+ struct perf_evlist *evsel_list = rec->evlist;
+ const char *output_name = rec->output_name;
+ struct perf_session *session;
- progname = argv[0];
+ rec->progname = argv[0];
- page_size = sysconf(_SC_PAGE_SIZE);
+ rec->page_size = sysconf(_SC_PAGE_SIZE);
- atexit(sig_atexit);
+ on_exit(perf_record__sig_exit, rec);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGUSR1, sig_handler);
- if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
- perror("failed to create pipes");
- exit(-1);
- }
-
if (!output_name) {
if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
- pipe_output = 1;
+ opts->pipe_output = true;
else
- output_name = "perf.data";
+ rec->output_name = output_name = "perf.data";
}
if (output_name) {
if (!strcmp(output_name, "-"))
- pipe_output = 1;
+ opts->pipe_output = true;
else if (!stat(output_name, &st) && st.st_size) {
- if (write_mode == WRITE_FORCE) {
+ if (rec->write_mode == WRITE_FORCE) {
char oldname[PATH_MAX];
snprintf(oldname, sizeof(oldname), "%s.old",
output_name);
unlink(oldname);
rename(output_name, oldname);
}
- } else if (write_mode == WRITE_APPEND) {
- write_mode = WRITE_FORCE;
+ } else if (rec->write_mode == WRITE_APPEND) {
+ rec->write_mode = WRITE_FORCE;
}
}
flags = O_CREAT|O_RDWR;
- if (write_mode == WRITE_APPEND)
- file_new = 0;
+ if (rec->write_mode == WRITE_APPEND)
+ rec->file_new = 0;
else
flags |= O_TRUNC;
- if (pipe_output)
+ if (opts->pipe_output)
output = STDOUT_FILENO;
else
output = open(output_name, flags, S_IRUSR | S_IWUSR);
@@ -517,17 +441,21 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
+ rec->output = output;
+
session = perf_session__new(output_name, O_WRONLY,
- write_mode == WRITE_FORCE, false, NULL);
+ rec->write_mode == WRITE_FORCE, false, NULL);
if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
- if (!no_buildid)
+ rec->session = session;
+
+ if (!rec->no_buildid)
perf_header__set_feat(&session->header, HEADER_BUILD_ID);
- if (!file_new) {
+ if (!rec->file_new) {
err = perf_session__read_header(session, output);
if (err < 0)
goto out_delete_session;
@@ -549,94 +477,57 @@ static int __cmd_record(int argc, const char **argv)
perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_CPUID);
- /* 512 kiB: default amount of unprivileged mlocked memory */
- if (mmap_pages == UINT_MAX)
- mmap_pages = (512 * 1024) / page_size;
-
if (forks) {
- child_pid = fork();
- if (child_pid < 0) {
- perror("failed to fork");
- exit(-1);
- }
-
- if (!child_pid) {
- if (pipe_output)
- dup2(2, 1);
- close(child_ready_pipe[0]);
- close(go_pipe[1]);
- fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
-
- /*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
- * Tell the parent we're ready to go
- */
- close(child_ready_pipe[1]);
-
- /*
- * Wait until the parent tells us to go.
- */
- if (read(go_pipe[0], &buf, 1) == -1)
- perror("unable to read pipe");
-
- execvp(argv[0], (char **)argv);
-
- perror(argv[0]);
- kill(getppid(), SIGUSR1);
- exit(-1);
- }
-
- if (!system_wide && target_tid == -1 && target_pid == -1)
- evsel_list->threads->map[0] = child_pid;
-
- close(child_ready_pipe[1]);
- close(go_pipe[0]);
- /*
- * wait for child to settle
- */
- if (read(child_ready_pipe[0], &buf, 1) == -1) {
- perror("unable to read pipe");
- exit(-1);
+ err = perf_evlist__prepare_workload(evsel_list, opts, argv);
+ if (err < 0) {
+ pr_err("Couldn't run the workload!\n");
+ goto out_delete_session;
}
- close(child_ready_pipe[0]);
}
- open_counters(evsel_list);
+ perf_record__open(rec);
/*
- * perf_session__delete(session) will be called at atexit_header()
+ * perf_session__delete(session) will be called at perf_record__exit()
*/
- atexit(atexit_header);
+ on_exit(perf_record__exit, rec);
- if (pipe_output) {
+ if (opts->pipe_output) {
err = perf_header__write_pipe(output);
if (err < 0)
return err;
- } else if (file_new) {
+ } else if (rec->file_new) {
err = perf_session__write_header(session, evsel_list,
output, false);
if (err < 0)
return err;
}
- post_processing_offset = lseek(output, 0, SEEK_CUR);
+ if (!!rec->no_buildid
+ && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
+ pr_err("Couldn't generating buildids. "
+ "Use --no-buildid to profile anyway.\n");
+ return -1;
+ }
- if (pipe_output) {
- err = perf_session__synthesize_attrs(session,
- process_synthesized_event);
+ rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
+
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("Couldn't find native kernel information.\n");
+ return -1;
+ }
+
+ if (opts->pipe_output) {
+ err = perf_event__synthesize_attrs(tool, session,
+ process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
return err;
}
- err = perf_event__synthesize_event_types(process_synthesized_event,
- session);
+ err = perf_event__synthesize_event_types(tool, process_synthesized_event,
+ machine);
if (err < 0) {
pr_err("Couldn't synthesize event_types.\n");
return err;
@@ -651,56 +542,49 @@ static int __cmd_record(int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
- err = perf_event__synthesize_tracing_data(output, evsel_list,
- process_synthesized_event,
- session);
+ err = perf_event__synthesize_tracing_data(tool, output, evsel_list,
+ process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
return err;
}
- advance_output(err);
+ advance_output(rec, err);
}
}
- machine = perf_session__find_host_machine(session);
- if (!machine) {
- pr_err("Couldn't find native kernel information.\n");
- return -1;
- }
-
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_text");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_text");
if (err < 0)
- err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
- session, machine, "_stext");
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine, "_stext");
if (err < 0)
pr_err("Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/kallsyms permission or run as root.\n");
- err = perf_event__synthesize_modules(process_synthesized_event,
- session, machine);
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
if (err < 0)
pr_err("Couldn't record kernel module information.\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
"Check /proc/modules permission or run as root.\n");
if (perf_guest)
- perf_session__process_machines(session,
+ perf_session__process_machines(session, tool,
perf_event__synthesize_guest_os);
- if (!system_wide)
- perf_event__synthesize_thread_map(evsel_list->threads,
+ if (!opts->system_wide)
+ perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event,
- session);
+ machine);
else
- perf_event__synthesize_threads(process_synthesized_event,
- session);
+ perf_event__synthesize_threads(tool, process_synthesized_event,
+ machine);
- if (realtime_prio) {
+ if (rec->realtime_prio) {
struct sched_param param;
- param.sched_priority = realtime_prio;
+ param.sched_priority = rec->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
pr_err("Could not set realtime priority.\n");
exit(-1);
@@ -713,14 +597,14 @@ static int __cmd_record(int argc, const char **argv)
* Let the child rip
*/
if (forks)
- close(go_pipe[1]);
+ perf_evlist__start_workload(evsel_list);
for (;;) {
- int hits = samples;
+ int hits = rec->samples;
- mmap_read_all();
+ perf_record__mmap_read_all(rec);
- if (hits == samples) {
+ if (hits == rec->samples) {
if (done)
break;
err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
@@ -741,9 +625,9 @@ static int __cmd_record(int argc, const char **argv)
*/
fprintf(stderr,
"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
- (double)bytes_written / 1024.0 / 1024.0,
+ (double)rec->bytes_written / 1024.0 / 1024.0,
output_name,
- bytes_written / 24);
+ rec->bytes_written / 24);
return 0;
@@ -758,58 +642,89 @@ static const char * const record_usage[] = {
NULL
};
-static bool force, append_file;
+/*
+ * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
+ * because we need to have access to it in perf_record__exit, that is called
+ * after cmd_record() exits, but since record_options need to be accessible to
+ * builtin-script, leave it here.
+ *
+ * At least we don't ouch it in all the other functions here directly.
+ *
+ * Just say no to tons of global variables, sigh.
+ */
+static struct perf_record record = {
+ .opts = {
+ .target_pid = -1,
+ .target_tid = -1,
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 1000,
+ .sample_id_all_avail = true,
+ },
+ .write_mode = WRITE_FORCE,
+ .file_new = true,
+};
+/*
+ * XXX Will stay a global variable till we fix builtin-script.c to stop messing
+ * with it and switch to use the library functions in perf_evlist that came
+ * from builtin-record.c, i.e. use perf_record_opts,
+ * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
+ * using pipes, etc.
+ */
const struct option record_options[] = {
- OPT_CALLBACK('e', "event", &evsel_list, "event",
+ OPT_CALLBACK('e', "event", &record.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
- OPT_CALLBACK(0, "filter", &evsel_list, "filter",
+ OPT_CALLBACK(0, "filter", &record.evlist, "filter",
"event filter", parse_filter),
- OPT_INTEGER('p', "pid", &target_pid,
+ OPT_INTEGER('p', "pid", &record.opts.target_pid,
"record events on existing process id"),
- OPT_INTEGER('t', "tid", &target_tid,
+ OPT_INTEGER('t', "tid", &record.opts.target_tid,
"record events on existing thread id"),
- OPT_INTEGER('r', "realtime", &realtime_prio,
+ OPT_INTEGER('r', "realtime", &record.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
- OPT_BOOLEAN('D', "no-delay", &nodelay,
+ OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
"collect data without buffering"),
- OPT_BOOLEAN('R', "raw-samples", &raw_samples,
+ OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
"collect raw sample records from all opened counters"),
- OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
"system-wide collection from all CPUs"),
- OPT_BOOLEAN('A', "append", &append_file,
+ OPT_BOOLEAN('A', "append", &record.append_file,
"append to the output file to do incremental profiling"),
- OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
"list of cpus to monitor"),
- OPT_BOOLEAN('f', "force", &force,
+ OPT_BOOLEAN('f', "force", &record.force,
"overwrite existing data file (deprecated)"),
- OPT_U64('c', "count", &user_interval, "event period to sample"),
- OPT_STRING('o', "output", &output_name, "file",
+ OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
+ OPT_STRING('o', "output", &record.output_name, "file",
"output file name"),
- OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+ OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit,
"child tasks do not inherit counters"),
- OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
- OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
- OPT_BOOLEAN(0, "group", &group,
+ OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
+ OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages,
+ "number of mmap data pages"),
+ OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('g', "call-graph", &call_graph,
+ OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
"do call-graph (stack chain/backtrace) recording"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
- OPT_BOOLEAN('s', "stat", &inherit_stat,
+ OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
"per thread counts"),
- OPT_BOOLEAN('d', "data", &sample_address,
+ OPT_BOOLEAN('d', "data", &record.opts.sample_address,
"Sample addresses"),
- OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"),
- OPT_BOOLEAN('n', "no-samples", &no_samples,
+ OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
+ OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
+ OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
"don't sample"),
- OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
+ OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
"do not update the buildid cache"),
- OPT_BOOLEAN('B', "no-buildid", &no_buildid,
+ OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
"do not collect buildids in perf.data"),
- OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
+ OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
OPT_END()
@@ -819,6 +734,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
{
int err = -ENOMEM;
struct perf_evsel *pos;
+ struct perf_evlist *evsel_list;
+ struct perf_record *rec = &record;
perf_header__set_cmdline(argc, argv);
@@ -826,23 +743,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
if (evsel_list == NULL)
return -ENOMEM;
+ rec->evlist = evsel_list;
+
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && target_pid == -1 && target_tid == -1 &&
- !system_wide && !cpu_list)
+ if (!argc && rec->opts.target_pid == -1 && rec->opts.target_tid == -1 &&
+ !rec->opts.system_wide && !rec->opts.cpu_list)
usage_with_options(record_usage, record_options);
- if (force && append_file) {
+ if (rec->force && rec->append_file) {
fprintf(stderr, "Can't overwrite and append at the same time."
" You need to choose between -f and -A");
usage_with_options(record_usage, record_options);
- } else if (append_file) {
- write_mode = WRITE_APPEND;
+ } else if (rec->append_file) {
+ rec->write_mode = WRITE_APPEND;
} else {
- write_mode = WRITE_FORCE;
+ rec->write_mode = WRITE_FORCE;
}
- if (nr_cgroups && !system_wide) {
+ if (nr_cgroups && !rec->opts.system_wide) {
fprintf(stderr, "cgroup monitoring only available in"
" system-wide mode\n");
usage_with_options(record_usage, record_options);
@@ -860,7 +779,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
"even with a suitable vmlinux or kallsyms file.\n\n");
- if (no_buildid_cache || no_buildid)
+ if (rec->no_buildid_cache || rec->no_buildid)
disable_buildid_cache();
if (evsel_list->nr_entries == 0 &&
@@ -869,43 +788,37 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
goto out_symbol_exit;
}
- if (target_pid != -1)
- target_tid = target_pid;
+ if (rec->opts.target_pid != -1)
+ rec->opts.target_tid = rec->opts.target_pid;
- if (perf_evlist__create_maps(evsel_list, target_pid,
- target_tid, cpu_list) < 0)
+ if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
+ rec->opts.target_tid, rec->opts.cpu_list) < 0)
usage_with_options(record_usage, record_options);
list_for_each_entry(pos, &evsel_list->entries, node) {
- if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
- evsel_list->threads->nr) < 0)
- goto out_free_fd;
if (perf_header__push_event(pos->attr.config, event_name(pos)))
goto out_free_fd;
}
- if (perf_evlist__alloc_pollfd(evsel_list) < 0)
- goto out_free_fd;
-
- if (user_interval != ULLONG_MAX)
- default_interval = user_interval;
- if (user_freq != UINT_MAX)
- freq = user_freq;
+ if (rec->opts.user_interval != ULLONG_MAX)
+ rec->opts.default_interval = rec->opts.user_interval;
+ if (rec->opts.user_freq != UINT_MAX)
+ rec->opts.freq = rec->opts.user_freq;
/*
* User specified count overrides default frequency.
*/
- if (default_interval)
- freq = 0;
- else if (freq) {
- default_interval = freq;
+ if (rec->opts.default_interval)
+ rec->opts.freq = 0;
+ else if (rec->opts.freq) {
+ rec->opts.default_interval = rec->opts.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
err = -EINVAL;
goto out_free_fd;
}
- err = __cmd_record(argc, argv);
+ err = __cmd_record(&record, argc, argv);
out_free_fd:
perf_evlist__delete_maps(evsel_list);
out_symbol_exit:
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4d7c8340c32..25d34d483e4 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -25,6 +25,7 @@
#include "util/evsel.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -35,38 +36,35 @@
#include <linux/bitmap.h>
-static char const *input_name = "perf.data";
-
-static bool force, use_tui, use_stdio;
-static bool hide_unresolved;
-static bool dont_use_callchains;
-static bool show_full_info;
-
-static bool show_threads;
-static struct perf_read_values show_threads_values;
-
-static const char default_pretty_printing_style[] = "normal";
-static const char *pretty_printing_style = default_pretty_printing_style;
-
-static char callchain_default_opt[] = "fractal,0.5,callee";
-static bool inverted_callchain;
-static symbol_filter_t annotate_init;
-
-static const char *cpu_list;
-static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+struct perf_report {
+ struct perf_tool tool;
+ struct perf_session *session;
+ char const *input_name;
+ bool force, use_tui, use_stdio;
+ bool hide_unresolved;
+ bool dont_use_callchains;
+ bool show_full_info;
+ bool show_threads;
+ bool inverted_callchain;
+ struct perf_read_values show_threads_values;
+ const char *pretty_printing_style;
+ symbol_filter_t annotate_init;
+ const char *cpu_list;
+ DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+};
-static int perf_session__add_hist_entry(struct perf_session *session,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel)
+static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+ struct addr_location *al,
+ struct perf_sample *sample,
+ struct machine *machine)
{
struct symbol *parent = NULL;
int err = 0;
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = perf_session__resolve_callchain(session, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample->callchain, &parent);
if (err)
return err;
}
@@ -76,7 +74,8 @@ static int perf_session__add_hist_entry(struct perf_session *session,
return -ENOMEM;
if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, &session->callchain_cursor,
+ err = callchain_append(he->callchain,
+ &evsel->hists.callchain_cursor,
sample->period);
if (err)
return err;
@@ -92,8 +91,7 @@ static int perf_session__add_hist_entry(struct perf_session *session,
assert(evsel != NULL);
err = -ENOMEM;
- if (notes->src == NULL &&
- symbol__alloc_hist(he->ms.sym, session->evlist->nr_entries) < 0)
+ if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
goto out;
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
@@ -106,30 +104,32 @@ out:
}
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_report *rep = container_of(tool, struct perf_report, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, session, &al, sample,
- annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
+ rep->annotate_init) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (al.filtered || (hide_unresolved && al.sym == NULL))
+ if (al.filtered || (rep->hide_unresolved && al.sym == NULL))
return 0;
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
if (al.map != NULL)
al.map->dso->hit = 1;
- if (perf_session__add_hist_entry(session, &al, sample, evsel)) {
+ if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) {
pr_debug("problem incrementing symbol period, skipping event\n");
return -1;
}
@@ -137,15 +137,17 @@ static int process_sample_event(union perf_event *event,
return 0;
}
-static int process_read_event(union perf_event *event,
+static int process_read_event(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct perf_evsel *evsel,
+ struct machine *machine __used)
{
- struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist,
- event->read.id);
- if (show_threads) {
+ struct perf_report *rep = container_of(tool, struct perf_report, tool);
+
+ if (rep->show_threads) {
const char *name = evsel ? event_name(evsel) : "unknown";
- perf_read_values_add_value(&show_threads_values,
+ perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
event->read.id,
name,
@@ -159,8 +161,10 @@ static int process_read_event(union perf_event *event,
return 0;
}
-static int perf_session__setup_sample_type(struct perf_session *self)
+static int perf_report__setup_sample_type(struct perf_report *rep)
{
+ struct perf_session *self = rep->session;
+
if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__warning("Selected --sort parent, but no "
@@ -173,7 +177,8 @@ static int perf_session__setup_sample_type(struct perf_session *self)
"you call 'perf record' without -g?\n");
return -1;
}
- } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
+ } else if (!rep->dont_use_callchains &&
+ callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
@@ -186,22 +191,6 @@ static int perf_session__setup_sample_type(struct perf_session *self)
return 0;
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .lost = perf_event__process_lost,
- .read = process_read_event,
- .attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
-};
-
extern volatile int session_done;
static void sig_handler(int sig __used)
@@ -224,6 +213,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
}
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
+ struct perf_report *rep,
const char *help)
{
struct perf_evsel *pos;
@@ -241,18 +231,18 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
parent_pattern == default_parent_pattern) {
fprintf(stdout, "#\n# (%s)\n#\n", help);
- if (show_threads) {
- bool style = !strcmp(pretty_printing_style, "raw");
- perf_read_values_display(stdout, &show_threads_values,
+ if (rep->show_threads) {
+ bool style = !strcmp(rep->pretty_printing_style, "raw");
+ perf_read_values_display(stdout, &rep->show_threads_values,
style);
- perf_read_values_destroy(&show_threads_values);
+ perf_read_values_destroy(&rep->show_threads_values);
}
}
return 0;
}
-static int __cmd_report(void)
+static int __cmd_report(struct perf_report *rep)
{
int ret = -EINVAL;
u64 nr_samples;
@@ -264,27 +254,31 @@ static int __cmd_report(void)
signal(SIGINT, sig_handler);
- session = perf_session__new(input_name, O_RDONLY, force, false, &event_ops);
+ session = perf_session__new(rep->input_name, O_RDONLY,
+ rep->force, false, &rep->tool);
if (session == NULL)
return -ENOMEM;
- if (cpu_list) {
- ret = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+ rep->session = session;
+
+ if (rep->cpu_list) {
+ ret = perf_session__cpu_bitmap(session, rep->cpu_list,
+ rep->cpu_bitmap);
if (ret)
goto out_delete;
}
if (use_browser <= 0)
- perf_session__fprintf_info(session, stdout, show_full_info);
+ perf_session__fprintf_info(session, stdout, rep->show_full_info);
- if (show_threads)
- perf_read_values_init(&show_threads_values);
+ if (rep->show_threads)
+ perf_read_values_init(&rep->show_threads_values);
- ret = perf_session__setup_sample_type(session);
+ ret = perf_report__setup_sample_type(rep);
if (ret)
goto out_delete;
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &rep->tool);
if (ret)
goto out_delete;
@@ -327,7 +321,7 @@ static int __cmd_report(void)
}
if (nr_samples == 0) {
- ui__warning("The %s file has no samples!\n", input_name);
+ ui__warning("The %s file has no samples!\n", session->filename);
goto out_delete;
}
@@ -335,7 +329,7 @@ static int __cmd_report(void)
perf_evlist__tui_browse_hists(session->evlist, help,
NULL, NULL, 0);
} else
- perf_evlist__tty_browse_hists(session->evlist, help);
+ perf_evlist__tty_browse_hists(session->evlist, rep, help);
out_delete:
/*
@@ -354,9 +348,9 @@ out_delete:
}
static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
- int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
+ struct perf_report *rep = (struct perf_report *)opt->value;
char *tok, *tok2;
char *endptr;
@@ -364,7 +358,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
* --no-call-graph
*/
if (unset) {
- dont_use_callchains = true;
+ rep->dont_use_callchains = true;
return 0;
}
@@ -412,7 +406,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
goto setup;
if (tok2[0] != 'c') {
- callchain_param.print_limit = strtod(tok2, &endptr);
+ callchain_param.print_limit = strtoul(tok2, &endptr, 0);
tok2 = strtok(NULL, ",");
if (!tok2)
goto setup;
@@ -433,13 +427,34 @@ setup:
return 0;
}
-static const char * const report_usage[] = {
- "perf report [<options>] <command>",
- NULL
-};
-
-static const struct option options[] = {
- OPT_STRING('i', "input", &input_name, "file",
+int cmd_report(int argc, const char **argv, const char *prefix __used)
+{
+ struct stat st;
+ char callchain_default_opt[] = "fractal,0.5,callee";
+ const char * const report_usage[] = {
+ "perf report [<options>]",
+ NULL
+ };
+ struct perf_report report = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .read = process_read_event,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
+ .pretty_printing_style = "normal",
+ };
+ const struct option options[] = {
+ OPT_STRING('i', "input", &report.input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
@@ -449,17 +464,18 @@ static const struct option options[] = {
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
"file", "kallsyms pathname"),
- OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_BOOLEAN('T', "threads", &show_threads,
+ OPT_BOOLEAN('T', "threads", &report.show_threads,
"Show per-thread event counters"),
- OPT_STRING(0, "pretty", &pretty_printing_style, "key",
+ OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
"pretty printing style key: normal raw"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &report.use_stdio,
+ "Use the stdio interface"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -468,13 +484,14 @@ static const struct option options[] = {
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
- OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent, call_order",
- "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold and callchain order. "
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
+ "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt),
- OPT_BOOLEAN('G', "inverted", &inverted_callchain, "alias for inverted call graph"),
+ OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
+ "alias for inverted call graph"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
- OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
@@ -484,12 +501,13 @@ static const struct option options[] = {
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
- OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
+ OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
"Only display entries resolved to a symbol"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
- OPT_BOOLEAN('I', "show-info", &show_full_info,
+ OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_BOOLEAN('I', "show-info", &report.show_full_info,
"Display extended information about perf.data file"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
@@ -500,24 +518,30 @@ static const struct option options[] = {
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_END()
-};
+ };
-int cmd_report(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, options, report_usage, 0);
- if (use_stdio)
+ if (report.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (report.use_tui)
use_browser = 1;
- if (inverted_callchain)
+ if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
- if (strcmp(input_name, "-") != 0)
+ if (!report.input_name || !strlen(report.input_name)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ report.input_name = "-";
+ else
+ report.input_name = "perf.data";
+ }
+
+ if (strcmp(report.input_name, "-") != 0)
setup_browser(true);
else
use_browser = 0;
+
/*
* Only in the newt browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
@@ -525,7 +549,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
*/
if (use_browser > 0) {
symbol_conf.priv_size = sizeof(struct annotation);
- annotate_init = symbol__annotate_init;
+ report.annotate_init = symbol__annotate_init;
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
@@ -572,5 +596,5 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
- return __cmd_report();
+ return __cmd_report(&report);
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5177964943e..fb8b5f83b4a 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2,11 +2,14 @@
#include "perf.h"
#include "util/util.h"
+#include "util/evlist.h"
#include "util/cache.h"
+#include "util/evsel.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
@@ -19,7 +22,7 @@
#include <pthread.h>
#include <math.h>
-static char const *input_name = "perf.data";
+static const char *input_name;
static char default_sort_order[] = "avg, max, switch, runtime";
static const char *sort_order = default_sort_order;
@@ -723,21 +726,21 @@ struct trace_migrate_task_event {
struct trace_sched_handler {
void (*switch_event)(struct trace_switch_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*runtime_event)(struct trace_runtime_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*wakeup_event)(struct trace_wakeup_event *,
- struct perf_session *,
+ struct machine *,
struct event *,
int cpu,
u64 timestamp,
@@ -750,7 +753,7 @@ struct trace_sched_handler {
struct thread *thread);
void (*migrate_task_event)(struct trace_migrate_task_event *,
- struct perf_session *session,
+ struct machine *machine,
struct event *,
int cpu,
u64 timestamp,
@@ -760,7 +763,7 @@ struct trace_sched_handler {
static void
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct perf_session *session __used,
+ struct machine *machine __used,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -787,7 +790,7 @@ static u64 cpu_last_switched[MAX_CPUS];
static void
replay_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session __used,
+ struct machine *machine __used,
struct event *event,
int cpu,
u64 timestamp,
@@ -1021,7 +1024,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
static void
latency_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int cpu,
u64 timestamp,
@@ -1045,8 +1048,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- sched_out = perf_session__findnew(session, switch_event->prev_pid);
- sched_in = perf_session__findnew(session, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+ sched_in = machine__findnew_thread(machine, switch_event->next_pid);
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_events) {
@@ -1074,13 +1077,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
static void
latency_runtime_event(struct trace_runtime_event *runtime_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int cpu,
u64 timestamp,
struct thread *this_thread __used)
{
- struct thread *thread = perf_session__findnew(session, runtime_event->pid);
+ struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1097,7 +1100,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
static void
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1111,7 +1114,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
if (!wakeup_event->success)
return;
- wakee = perf_session__findnew(session, wakeup_event->pid);
+ wakee = machine__findnew_thread(machine, wakeup_event->pid);
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) {
thread_atoms_insert(wakee);
@@ -1145,7 +1148,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
static void
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1161,7 +1164,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
if (profile_cpu == -1)
return;
- migrant = perf_session__findnew(session, migrate_task_event->pid);
+ migrant = machine__findnew_thread(machine, migrate_task_event->pid);
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
if (!atoms) {
thread_atoms_insert(migrant);
@@ -1356,12 +1359,13 @@ static void sort_lat(void)
static struct trace_sched_handler *trace_handler;
static void
-process_sched_wakeup_event(void *data, struct perf_session *session,
+process_sched_wakeup_event(struct perf_tool *tool __used,
struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_wakeup_event wakeup_event;
FILL_COMMON_FIELDS(wakeup_event, event, data);
@@ -1373,8 +1377,8 @@ process_sched_wakeup_event(void *data, struct perf_session *session,
FILL_FIELD(wakeup_event, cpu, event, data);
if (trace_handler->wakeup_event)
- trace_handler->wakeup_event(&wakeup_event, session, event,
- cpu, timestamp, thread);
+ trace_handler->wakeup_event(&wakeup_event, machine, event,
+ sample->cpu, sample->time, thread);
}
/*
@@ -1392,7 +1396,7 @@ static char next_shortname2 = '0';
static void
map_switch_event(struct trace_switch_event *switch_event,
- struct perf_session *session,
+ struct machine *machine,
struct event *event __used,
int this_cpu,
u64 timestamp,
@@ -1420,8 +1424,8 @@ map_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- sched_out = perf_session__findnew(session, switch_event->prev_pid);
- sched_in = perf_session__findnew(session, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
+ sched_in = machine__findnew_thread(machine, switch_event->next_pid);
curr_thread[this_cpu] = sched_in;
@@ -1469,14 +1473,15 @@ map_switch_event(struct trace_switch_event *switch_event,
}
}
-
static void
-process_sched_switch_event(void *data, struct perf_session *session,
+process_sched_switch_event(struct perf_tool *tool __used,
struct event *event,
- int this_cpu,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ int this_cpu = sample->cpu;
+ void *data = sample->raw_data;
struct trace_switch_event switch_event;
FILL_COMMON_FIELDS(switch_event, event, data);
@@ -1498,19 +1503,20 @@ process_sched_switch_event(void *data, struct perf_session *session,
nr_context_switch_bugs++;
}
if (trace_handler->switch_event)
- trace_handler->switch_event(&switch_event, session, event,
- this_cpu, timestamp, thread);
+ trace_handler->switch_event(&switch_event, machine, event,
+ this_cpu, sample->time, thread);
curr_pid[this_cpu] = switch_event.next_pid;
}
static void
-process_sched_runtime_event(void *data, struct perf_session *session,
- struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+process_sched_runtime_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_runtime_event runtime_event;
FILL_ARRAY(runtime_event, comm, event, data);
@@ -1519,16 +1525,18 @@ process_sched_runtime_event(void *data, struct perf_session *session,
FILL_FIELD(runtime_event, vruntime, event, data);
if (trace_handler->runtime_event)
- trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
+ trace_handler->runtime_event(&runtime_event, machine, event,
+ sample->cpu, sample->time, thread);
}
static void
-process_sched_fork_event(void *data,
+process_sched_fork_event(struct perf_tool *tool __used,
struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ struct perf_sample *sample,
+ struct machine *machine __used,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_fork_event fork_event;
FILL_COMMON_FIELDS(fork_event, event, data);
@@ -1540,13 +1548,14 @@ process_sched_fork_event(void *data,
if (trace_handler->fork_event)
trace_handler->fork_event(&fork_event, event,
- cpu, timestamp, thread);
+ sample->cpu, sample->time, thread);
}
static void
-process_sched_exit_event(struct event *event,
- int cpu __used,
- u64 timestamp __used,
+process_sched_exit_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample __used,
+ struct machine *machine __used,
struct thread *thread __used)
{
if (verbose)
@@ -1554,12 +1563,13 @@ process_sched_exit_event(struct event *event,
}
static void
-process_sched_migrate_task_event(void *data, struct perf_session *session,
- struct event *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+process_sched_migrate_task_event(struct perf_tool *tool __used,
+ struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread)
{
+ void *data = sample->raw_data;
struct trace_migrate_task_event migrate_task_event;
FILL_COMMON_FIELDS(migrate_task_event, event, data);
@@ -1570,67 +1580,47 @@ process_sched_migrate_task_event(void *data, struct perf_session *session,
FILL_FIELD(migrate_task_event, cpu, event, data);
if (trace_handler->migrate_task_event)
- trace_handler->migrate_task_event(&migrate_task_event, session,
- event, cpu, timestamp, thread);
+ trace_handler->migrate_task_event(&migrate_task_event, machine,
+ event, sample->cpu,
+ sample->time, thread);
}
-static void process_raw_event(union perf_event *raw_event __used,
- struct perf_session *session, void *data, int cpu,
- u64 timestamp, struct thread *thread)
-{
- struct event *event;
- int type;
-
-
- type = trace_parse_common_type(data);
- event = trace_find_event(type);
-
- if (!strcmp(event->name, "sched_switch"))
- process_sched_switch_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_stat_runtime"))
- process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_wakeup"))
- process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_wakeup_new"))
- process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_process_fork"))
- process_sched_fork_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_process_exit"))
- process_sched_exit_event(event, cpu, timestamp, thread);
- if (!strcmp(event->name, "sched_migrate_task"))
- process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
-}
+typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *thread);
-static int process_sample_event(union perf_event *event,
- struct perf_sample *sample,
- struct perf_evsel *evsel __used,
- struct perf_session *session)
+static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
+ union perf_event *event __used,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
{
- struct thread *thread;
-
- if (!(session->sample_type & PERF_SAMPLE_RAW))
- return 0;
+ struct thread *thread = machine__findnew_thread(machine, sample->pid);
- thread = perf_session__findnew(session, sample->pid);
if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
- event->header.type);
+ pr_debug("problem processing %s event, skipping it.\n",
+ evsel->name);
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ evsel->hists.stats.total_period += sample->period;
+ hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
- if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
- return 0;
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
- process_raw_event(event, session, sample->raw_data, sample->cpu,
- sample->time, thread);
+ if (evsel->handler.data == NULL)
+ evsel->handler.data = trace_find_event(evsel->attr.config);
+
+ f(tool, evsel->handler.data, sample, machine, thread);
+ }
return 0;
}
-static struct perf_event_ops event_ops = {
- .sample = process_sample_event,
+static struct perf_tool perf_sched = {
+ .sample = perf_sched__process_tracepoint_sample,
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_event__process_task,
@@ -1640,13 +1630,25 @@ static struct perf_event_ops event_ops = {
static void read_events(bool destroy, struct perf_session **psession)
{
int err = -EINVAL;
+ const struct perf_evsel_str_handler handlers[] = {
+ { "sched:sched_switch", process_sched_switch_event, },
+ { "sched:sched_stat_runtime", process_sched_runtime_event, },
+ { "sched:sched_wakeup", process_sched_wakeup_event, },
+ { "sched:sched_wakeup_new", process_sched_wakeup_event, },
+ { "sched:sched_process_fork", process_sched_fork_event, },
+ { "sched:sched_process_exit", process_sched_exit_event, },
+ { "sched:sched_migrate_task", process_sched_migrate_task_event, },
+ };
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_sched);
if (session == NULL)
die("No Memory");
+ err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+ assert(err == 0);
+
if (perf_session__has_traces(session, "record -R")) {
- err = perf_session__process_events(session, &event_ops);
+ err = perf_session__process_events(session, &perf_sched);
if (err)
die("Failed to process events, error %d", err);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 2f62a295226..fd1909afcfd 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -7,6 +7,7 @@
#include "util/header.h"
#include "util/parse-options.h"
#include "util/session.h"
+#include "util/tool.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -23,6 +24,7 @@ static u64 nr_unordered;
extern const struct option record_options[];
static bool no_callchain;
static bool show_full_info;
+static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -315,7 +317,7 @@ static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
static void print_sample_addr(union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread,
struct perf_event_attr *attr)
{
@@ -328,11 +330,11 @@ static void print_sample_addr(union perf_event *event,
if (!sample_addr_correlates_sym(attr))
return;
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, sample->addr, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->addr, &al);
if (!al.map)
- thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE,
- event->ip.pid, sample->addr, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
+ sample->addr, &al);
al.cpu = sample->cpu;
al.sym = NULL;
@@ -362,7 +364,7 @@ static void print_sample_addr(union perf_event *event,
static void process_event(union perf_event *event __unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread)
{
struct perf_event_attr *attr = &evsel->attr;
@@ -377,15 +379,15 @@ static void process_event(union perf_event *event __unused,
sample->raw_size);
if (PRINT_FIELD(ADDR))
- print_sample_addr(event, sample, session, thread, attr);
+ print_sample_addr(event, sample, machine, thread, attr);
if (PRINT_FIELD(IP)) {
if (!symbol_conf.use_callchain)
printf(" ");
else
printf("\n");
- perf_session__print_ip(event, sample, session,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO));
+ perf_event__print_ip(event, sample, machine, evsel,
+ PRINT_FIELD(SYM), PRINT_FIELD(DSO));
}
printf("\n");
@@ -432,14 +434,16 @@ static int cleanup_scripting(void)
return scripting_ops->stop_script();
}
-static char const *input_name = "perf.data";
+static const char *input_name;
-static int process_sample_event(union perf_event *event,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct addr_location al;
+ struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -458,16 +462,25 @@ static int process_sample_event(union perf_event *event,
return 0;
}
+ if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (al.filtered)
+ return 0;
+
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, session, thread);
+ scripting_ops->process_event(event, sample, evsel, machine, thread);
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_script = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
@@ -494,7 +507,7 @@ static int __cmd_script(struct perf_session *session)
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &perf_script);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -523,12 +536,6 @@ static struct script_spec *script_spec__new(const char *spec,
return s;
}
-static void script_spec__delete(struct script_spec *s)
-{
- free(s->spec);
- free(s);
-}
-
static void script_spec__add(struct script_spec *s)
{
list_add_tail(&s->node, &script_specs);
@@ -554,16 +561,11 @@ static struct script_spec *script_spec__findnew(const char *spec,
s = script_spec__new(spec, ops);
if (!s)
- goto out_delete_spec;
+ return NULL;
script_spec__add(s);
return s;
-
-out_delete_spec:
- script_spec__delete(s);
-
- return NULL;
}
int script_spec_register(const char *spec, struct scripting_ops *ops)
@@ -681,7 +683,8 @@ static int parse_output_fields(const struct option *opt __used,
type = PERF_TYPE_RAW;
else {
fprintf(stderr, "Invalid event type in field string.\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (output[type].user_set)
@@ -923,6 +926,24 @@ static int read_script_info(struct script_desc *desc, const char *filename)
return 0;
}
+static char *get_script_root(struct dirent *script_dirent, const char *suffix)
+{
+ char *script_root, *str;
+
+ script_root = strdup(script_dirent->d_name);
+ if (!script_root)
+ return NULL;
+
+ str = (char *)ends_with(script_root, suffix);
+ if (!str) {
+ free(script_root);
+ return NULL;
+ }
+
+ *str = '\0';
+ return script_root;
+}
+
static int list_available_scripts(const struct option *opt __used,
const char *s __used, int unset __used)
{
@@ -934,7 +955,6 @@ static int list_available_scripts(const struct option *opt __used,
struct script_desc *desc;
char first_half[BUFSIZ];
char *script_root;
- char *str;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
@@ -950,16 +970,14 @@ static int list_available_scripts(const struct option *opt __used,
continue;
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- script_root = strdup(script_dirent.d_name);
- str = (char *)ends_with(script_root, REPORT_SUFFIX);
- if (str) {
- *str = '\0';
+ script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+ if (script_root) {
desc = script_desc__findnew(script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent.d_name);
read_script_info(desc, script_path);
+ free(script_root);
}
- free(script_root);
}
}
@@ -981,8 +999,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
char script_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char lang_path[MAXPATHLEN];
- char *str, *__script_root;
- char *path = NULL;
+ char *__script_root;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
@@ -998,23 +1015,18 @@ static char *get_script_path(const char *script_root, const char *suffix)
continue;
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- __script_root = strdup(script_dirent.d_name);
- str = (char *)ends_with(__script_root, suffix);
- if (str) {
- *str = '\0';
- if (strcmp(__script_root, script_root))
- continue;
+ __script_root = get_script_root(&script_dirent, suffix);
+ if (__script_root && !strcmp(script_root, __script_root)) {
+ free(__script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
lang_path, script_dirent.d_name);
- path = strdup(script_path);
- free(__script_root);
- break;
+ return strdup(script_path);
}
free(__script_root);
}
}
- return path;
+ return NULL;
}
static bool is_top_script(const char *script_path)
@@ -1083,7 +1095,11 @@ static const struct option options[] = {
OPT_CALLBACK('f', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
parse_output_fields),
- OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ "only display events for these comms"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_END()
@@ -1110,7 +1126,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
struct perf_session *session;
char *script_path = NULL;
const char **__argv;
- bool system_wide;
int i, j, err;
setup_scripting();
@@ -1178,15 +1193,17 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
}
if (!pid) {
- system_wide = true;
j = 0;
dup2(live_pipe[1], 1);
close(live_pipe[0]);
- if (!is_top_script(argv[0]))
+ if (is_top_script(argv[0])) {
+ system_wide = true;
+ } else if (!system_wide) {
system_wide = !have_cmd(argc - rep_args,
&argv[rep_args]);
+ }
__argv = malloc((argc + 6) * sizeof(const char *));
if (!__argv)
@@ -1234,10 +1251,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
script_path = rep_script_path;
if (script_path) {
- system_wide = false;
j = 0;
- if (rec_script_path)
+ if (!rec_script_path)
+ system_wide = false;
+ else if (!system_wide)
system_wide = !have_cmd(argc - 1, &argv[1]);
__argv = malloc((argc + 2) * sizeof(const char *));
@@ -1261,7 +1279,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (!script_name)
setup_pager();
- session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops);
+ session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
if (session == NULL)
return -ENOMEM;
@@ -1287,7 +1305,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
return -1;
}
- input = open(input_name, O_RDONLY);
+ input = open(session->filename, O_RDONLY); /* input_name */
if (input < 0) {
perror("failed to open file");
exit(-1);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7d98676808d..f5d2a63eba6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
- if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+ if (errno == EINVAL || errno == ENOSYS ||
+ errno == ENOENT || errno == EOPNOTSUPP) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
@@ -577,6 +578,33 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
avg / avg_stats(&walltime_nsecs_stats));
}
+/* used for get_ratio_color() */
+enum grc_type {
+ GRC_STALLED_CYCLES_FE,
+ GRC_STALLED_CYCLES_BE,
+ GRC_CACHE_MISSES,
+ GRC_MAX_NR
+};
+
+static const char *get_ratio_color(enum grc_type type, double ratio)
+{
+ static const double grc_table[GRC_MAX_NR][3] = {
+ [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
+ [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
+ [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
+ };
+ const char *color = PERF_COLOR_NORMAL;
+
+ if (ratio > grc_table[type][0])
+ color = PERF_COLOR_RED;
+ else if (ratio > grc_table[type][1])
+ color = PERF_COLOR_MAGENTA;
+ else if (ratio > grc_table[type][2])
+ color = PERF_COLOR_YELLOW;
+
+ return color;
+}
+
static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
@@ -587,13 +615,7 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 50.0)
- color = PERF_COLOR_RED;
- else if (ratio > 30.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 10.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -610,13 +632,7 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 75.0)
- color = PERF_COLOR_RED;
- else if (ratio > 50.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 20.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -633,13 +649,7 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -656,13 +666,7 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -679,13 +683,7 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -702,13 +700,7 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -725,13 +717,7 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -748,13 +734,7 @@ static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, doub
if (total)
ratio = avg / total * 100.0;
- color = PERF_COLOR_NORMAL;
- if (ratio > 20.0)
- color = PERF_COLOR_RED;
- else if (ratio > 10.0)
- color = PERF_COLOR_MAGENTA;
- else if (ratio > 5.0)
- color = PERF_COLOR_YELLOW;
+ color = get_ratio_color(GRC_CACHE_MISSES, ratio);
fprintf(output, " # ");
color_fprintf(output, color, "%6.2f%%", ratio);
@@ -1107,22 +1087,13 @@ static const struct option options[] = {
*/
static int add_default_attributes(void)
{
- struct perf_evsel *pos;
- size_t attr_nr = 0;
- size_t c;
-
/* Set attrs if no event is selected and !null_run: */
if (null_run)
return 0;
if (!evsel_list->nr_entries) {
- for (c = 0; c < ARRAY_SIZE(default_attrs); c++) {
- pos = perf_evsel__new(default_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
- attr_nr += c;
+ if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0)
+ return -1;
}
/* Detailed events get appended to the event list: */
@@ -1131,38 +1102,21 @@ static int add_default_attributes(void)
return 0;
/* Append detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) {
- pos = perf_evsel__new(detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
- attr_nr += c;
+ if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0)
+ return -1;
if (detailed_run < 2)
return 0;
/* Append very detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) {
- pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
+ if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0)
+ return -1;
if (detailed_run < 3)
return 0;
/* Append very, very detailed run extra attributes: */
- for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) {
- pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr);
- if (pos == NULL)
- return -1;
- perf_evlist__add(evsel_list, pos);
- }
-
-
- return 0;
+ return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs);
}
int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1266,8 +1220,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
- perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
- perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
+ perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
goto out_free_fd;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 831d1baeac3..2b9a7f497a2 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -7,6 +7,7 @@
#include "util/cache.h"
#include "util/debug.h"
+#include "util/debugfs.h"
#include "util/evlist.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -14,8 +15,6 @@
#include "util/thread_map.h"
#include "../../include/linux/hw_breakpoint.h"
-static long page_size;
-
static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
{
bool *visited = symbol__priv(sym);
@@ -31,6 +30,7 @@ static int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
+ long page_size = sysconf(_SC_PAGE_SIZE);
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
/*
@@ -247,7 +247,7 @@ static int trace_event__id(const char *evname)
if (asprintf(&filename,
"%s/syscalls/%s/id",
- debugfs_path, evname) < 0)
+ tracing_events_path, evname) < 0)
return -1;
fd = open(filename, O_RDONLY);
@@ -603,7 +603,7 @@ out_free_threads:
#define TEST_ASSERT_VAL(text, cond) \
do { \
- if (!cond) { \
+ if (!(cond)) { \
pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
return -1; \
} \
@@ -759,6 +759,103 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_tracepoint(evlist);
+}
+
+static int
+test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ }
+
+ return test__checkevent_tracepoint_multi(evlist);
+}
+
+static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_raw(evlist);
+}
+
+static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_numeric(evlist);
+}
+
+static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+ return test__checkevent_symbolic_alias(evlist);
+}
+
+static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+ return test__checkevent_genhw(evlist);
+}
+
static struct test__event_st {
const char *name;
__u32 type;
@@ -808,6 +905,34 @@ static struct test__event_st {
.name = "mem:0:w",
.check = test__checkevent_breakpoint_w,
},
+ {
+ .name = "syscalls:sys_enter_open:k",
+ .check = test__checkevent_tracepoint_modifier,
+ },
+ {
+ .name = "syscalls:*:u",
+ .check = test__checkevent_tracepoint_multi_modifier,
+ },
+ {
+ .name = "r1:kp",
+ .check = test__checkevent_raw_modifier,
+ },
+ {
+ .name = "1:1:hp",
+ .check = test__checkevent_numeric_modifier,
+ },
+ {
+ .name = "instructions:h",
+ .check = test__checkevent_symbolic_name_modifier,
+ },
+ {
+ .name = "faults:u",
+ .check = test__checkevent_symbolic_alias_modifier,
+ },
+ {
+ .name = "L1-dcache-load-miss:kp",
+ .check = test__checkevent_genhw_modifier,
+ },
};
#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -841,6 +966,336 @@ static int test__parse_events(void)
return ret;
}
+
+static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
+ size_t *sizep)
+{
+ cpu_set_t *mask;
+ size_t size;
+ int i, cpu = -1, nrcpus = 1024;
+realloc:
+ mask = CPU_ALLOC(nrcpus);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, mask);
+
+ if (sched_getaffinity(pid, size, mask) == -1) {
+ CPU_FREE(mask);
+ if (errno == EINVAL && nrcpus < (1024 << 8)) {
+ nrcpus = nrcpus << 2;
+ goto realloc;
+ }
+ perror("sched_getaffinity");
+ return -1;
+ }
+
+ for (i = 0; i < nrcpus; i++) {
+ if (CPU_ISSET_S(i, size, mask)) {
+ if (cpu == -1) {
+ cpu = i;
+ *maskp = mask;
+ *sizep = size;
+ } else
+ CPU_CLR_S(i, size, mask);
+ }
+ }
+
+ if (cpu == -1)
+ CPU_FREE(mask);
+
+ return cpu;
+}
+
+static int test__PERF_RECORD(void)
+{
+ struct perf_record_opts opts = {
+ .target_pid = -1,
+ .target_tid = -1,
+ .no_delay = true,
+ .freq = 10,
+ .mmap_pages = 256,
+ .sample_id_all_avail = true,
+ };
+ cpu_set_t *cpu_mask = NULL;
+ size_t cpu_mask_size = 0;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel;
+ struct perf_sample sample;
+ const char *cmd = "sleep";
+ const char *argv[] = { cmd, "1", NULL, };
+ char *bname;
+ u64 sample_type, prev_time = 0;
+ bool found_cmd_mmap = false,
+ found_libc_mmap = false,
+ found_vdso_mmap = false,
+ found_ld_mmap = false;
+ int err = -1, errs = 0, i, wakeups = 0, sample_size;
+ u32 cpu;
+ int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
+
+ if (evlist == NULL || argv == NULL) {
+ pr_debug("Not enough memory to create evlist\n");
+ goto out;
+ }
+
+ /*
+ * We need at least one evsel in the evlist, use the default
+ * one: "cycles".
+ */
+ err = perf_evlist__add_default(evlist);
+ if (err < 0) {
+ pr_debug("Not enough memory to create evsel\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Create maps of threads and cpus to monitor. In this case
+ * we start with all threads and cpus (-1, -1) but then in
+ * perf_evlist__prepare_workload we'll fill in the only thread
+ * we're monitoring, the one forked there.
+ */
+ err = perf_evlist__create_maps(evlist, opts.target_pid,
+ opts.target_tid, opts.cpu_list);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Prepare the workload in argv[] to run, it'll fork it, and then wait
+ * for perf_evlist__start_workload() to exec it. This is done this way
+ * so that we have time to open the evlist (calling sys_perf_event_open
+ * on all the fds) and then mmap them.
+ */
+ err = perf_evlist__prepare_workload(evlist, &opts, argv);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Config the evsels, setting attr->comm on the first one, etc.
+ */
+ evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+ evsel->attr.sample_type |= PERF_SAMPLE_CPU;
+ evsel->attr.sample_type |= PERF_SAMPLE_TID;
+ evsel->attr.sample_type |= PERF_SAMPLE_TIME;
+ perf_evlist__config_attrs(evlist, &opts);
+
+ err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
+ &cpu_mask_size);
+ if (err < 0) {
+ pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ cpu = err;
+
+ /*
+ * So that we can check perf_sample.cpu on all the samples.
+ */
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
+ pr_debug("sched_setaffinity: %s\n", strerror(errno));
+ goto out_free_cpu_mask;
+ }
+
+ /*
+ * Call sys_perf_event_open on all the fds on all the evsels,
+ * grouping them if asked to.
+ */
+ err = perf_evlist__open(evlist, opts.group);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * mmap the first fd on a given CPU and ask for events for the other
+ * fds in the same CPU to be injected in the same mmap ring buffer
+ * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
+ */
+ err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ if (err < 0) {
+ pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * We'll need these two to parse the PERF_SAMPLE_* fields in each
+ * event.
+ */
+ sample_type = perf_evlist__sample_type(evlist);
+ sample_size = __perf_evsel__sample_size(sample_type);
+
+ /*
+ * Now that all is properly set up, enable the events, they will
+ * count just on workload.pid, which will start...
+ */
+ perf_evlist__enable(evlist);
+
+ /*
+ * Now!
+ */
+ perf_evlist__start_workload(evlist);
+
+ while (1) {
+ int before = total_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ const char *name = perf_event__name(type);
+
+ ++total_events;
+ if (type < PERF_RECORD_MAX)
+ nr_events[type]++;
+
+ err = perf_event__parse_sample(event, sample_type,
+ sample_size, true,
+ &sample, false);
+ if (err < 0) {
+ if (verbose)
+ perf_event__fprintf(event, stderr);
+ pr_debug("Couldn't parse sample\n");
+ goto out_err;
+ }
+
+ if (verbose) {
+ pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
+ perf_event__fprintf(event, stderr);
+ }
+
+ if (prev_time > sample.time) {
+ pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
+ name, prev_time, sample.time);
+ ++errs;
+ }
+
+ prev_time = sample.time;
+
+ if (sample.cpu != cpu) {
+ pr_debug("%s with unexpected cpu, expected %d, got %d\n",
+ name, cpu, sample.cpu);
+ ++errs;
+ }
+
+ if ((pid_t)sample.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.pid);
+ ++errs;
+ }
+
+ if ((pid_t)sample.tid != evlist->workload.pid) {
+ pr_debug("%s with unexpected tid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.tid);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP ||
+ type == PERF_RECORD_FORK ||
+ type == PERF_RECORD_EXIT) &&
+ (pid_t)event->comm.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid/tid\n", name);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP) &&
+ event->comm.pid != event->comm.tid) {
+ pr_debug("%s with different pid/tid!\n", name);
+ ++errs;
+ }
+
+ switch (type) {
+ case PERF_RECORD_COMM:
+ if (strcmp(event->comm.comm, cmd)) {
+ pr_debug("%s with unexpected comm!\n", name);
+ ++errs;
+ }
+ break;
+ case PERF_RECORD_EXIT:
+ goto found_exit;
+ case PERF_RECORD_MMAP:
+ bname = strrchr(event->mmap.filename, '/');
+ if (bname != NULL) {
+ if (!found_cmd_mmap)
+ found_cmd_mmap = !strcmp(bname + 1, cmd);
+ if (!found_libc_mmap)
+ found_libc_mmap = !strncmp(bname + 1, "libc", 4);
+ if (!found_ld_mmap)
+ found_ld_mmap = !strncmp(bname + 1, "ld", 2);
+ } else if (!found_vdso_mmap)
+ found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
+ break;
+
+ case PERF_RECORD_SAMPLE:
+ /* Just ignore samples for now */
+ break;
+ default:
+ pr_debug("Unexpected perf_event->header.type %d!\n",
+ type);
+ ++errs;
+ }
+ }
+ }
+
+ /*
+ * We don't use poll here because at least at 3.1 times the
+ * PERF_RECORD_{!SAMPLE} events don't honour
+ * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
+ */
+ if (total_events == before && false)
+ poll(evlist->pollfd, evlist->nr_fds, -1);
+
+ sleep(1);
+ if (++wakeups > 5) {
+ pr_debug("No PERF_RECORD_EXIT event!\n");
+ break;
+ }
+ }
+
+found_exit:
+ if (nr_events[PERF_RECORD_COMM] > 1) {
+ pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
+ ++errs;
+ }
+
+ if (nr_events[PERF_RECORD_COMM] == 0) {
+ pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_cmd_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_libc_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
+ ++errs;
+ }
+
+ if (!found_ld_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
+ ++errs;
+ }
+
+ if (!found_vdso_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
+ ++errs;
+ }
+out_err:
+ perf_evlist__munmap(evlist);
+out_free_cpu_mask:
+ CPU_FREE(cpu_mask);
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return (err < 0 || errs > 0) ? -1 : 0;
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -866,45 +1321,89 @@ static struct test {
.func = test__parse_events,
},
{
+ .desc = "Validate PERF_RECORD_* events & perf_sample fields",
+ .func = test__PERF_RECORD,
+ },
+ {
.func = NULL,
},
};
-static int __cmd_test(void)
+static bool perf_test__matches(int curr, int argc, const char *argv[])
{
- int i = 0;
+ int i;
+
+ if (argc == 0)
+ return true;
- page_size = sysconf(_SC_PAGE_SIZE);
+ for (i = 0; i < argc; ++i) {
+ char *end;
+ long nr = strtoul(argv[i], &end, 10);
+
+ if (*end == '\0') {
+ if (nr == curr + 1)
+ return true;
+ continue;
+ }
+
+ if (strstr(tests[curr].desc, argv[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int __cmd_test(int argc, const char *argv[])
+{
+ int i = 0;
while (tests[i].func) {
- int err;
- pr_info("%2d: %s:", i + 1, tests[i].desc);
+ int curr = i++, err;
+
+ if (!perf_test__matches(curr, argc, argv))
+ continue;
+
+ pr_info("%2d: %s:", i, tests[curr].desc);
pr_debug("\n--- start ---\n");
- err = tests[i].func();
- pr_debug("---- end ----\n%s:", tests[i].desc);
+ err = tests[curr].func();
+ pr_debug("---- end ----\n%s:", tests[curr].desc);
pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
- ++i;
}
return 0;
}
-static const char * const test_usage[] = {
- "perf test [<options>]",
- NULL,
-};
+static int perf_test__list(int argc, const char **argv)
+{
+ int i = 0;
+
+ while (tests[i].func) {
+ int curr = i++;
-static const struct option test_options[] = {
+ if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
+ continue;
+
+ pr_info("%2d: %s\n", i, tests[curr].desc);
+ }
+
+ return 0;
+}
+
+int cmd_test(int argc, const char **argv, const char *prefix __used)
+{
+ const char * const test_usage[] = {
+ "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
+ NULL,
+ };
+ const struct option test_options[] = {
OPT_INTEGER('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_END()
-};
+ };
-int cmd_test(int argc, const char **argv, const char *prefix __used)
-{
argc = parse_options(argc, argv, test_options, test_usage, 0);
- if (argc)
- usage_with_options(test_usage, test_options);
+ if (argc >= 1 && !strcmp(argv[0], "list"))
+ return perf_test__list(argc, argv);
symbol_conf.priv_size = sizeof(int);
symbol_conf.sort_by_name = true;
@@ -915,5 +1414,5 @@ int cmd_test(int argc, const char **argv, const char *prefix __used)
setup_pager();
- return __cmd_test();
+ return __cmd_test(argc, argv);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index aa26f4d66d1..3b75b2e21ea 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -19,6 +19,7 @@
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
+#include "util/evsel.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
#include "util/callchain.h"
@@ -31,13 +32,14 @@
#include "util/event.h"
#include "util/session.h"
#include "util/svghelper.h"
+#include "util/tool.h"
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
-static char const *input_name = "perf.data";
-static char const *output_name = "output.svg";
+static const char *input_name;
+static const char *output_name = "output.svg";
static unsigned int numcpus;
static u64 min_freq; /* Lowest CPU frequency seen */
@@ -273,25 +275,28 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(union perf_event *event,
+static int process_comm_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_set_comm(event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(union perf_event *event,
+static int process_fork_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(union perf_event *event,
+static int process_exit_event(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
pid_exit(event->fork.pid, event->fork.time);
return 0;
@@ -486,14 +491,15 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int process_sample_event(union perf_event *event __used,
+static int process_sample_event(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct perf_evsel *evsel,
+ struct machine *machine __used)
{
struct trace_entry *te;
- if (session->sample_type & PERF_SAMPLE_TIME) {
+ if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
if (!first_time || first_time > sample->time)
first_time = sample->time;
if (last_time < sample->time)
@@ -501,7 +507,7 @@ static int process_sample_event(union perf_event *event __used,
}
te = (void *)sample->raw_data;
- if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
char *event_str;
#ifdef SUPPORT_OLD_POWER_EVENTS
struct power_entry_old *peo;
@@ -974,7 +980,7 @@ static void write_svg_file(const char *filename)
svg_close();
}
-static struct perf_event_ops event_ops = {
+static struct perf_tool perf_timechart = {
.comm = process_comm_event,
.fork = process_fork_event,
.exit = process_exit_event,
@@ -985,7 +991,7 @@ static struct perf_event_ops event_ops = {
static int __cmd_timechart(void)
{
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &event_ops);
+ 0, false, &perf_timechart);
int ret = -EINVAL;
if (session == NULL)
@@ -994,7 +1000,7 @@ static int __cmd_timechart(void)
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
- ret = perf_session__process_events(session, &event_ops);
+ ret = perf_session__process_events(session, &perf_timechart);
if (ret)
goto out_delete;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c9cdedb5813..4f81eeb9987 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -64,44 +64,6 @@
#include <linux/unistd.h>
#include <linux/types.h>
-static struct perf_top top = {
- .count_filter = 5,
- .delay_secs = 2,
- .target_pid = -1,
- .target_tid = -1,
- .freq = 1000, /* 1 KHz */
-};
-
-static bool system_wide = false;
-
-static bool use_tui, use_stdio;
-
-static bool sort_has_symbols;
-
-static bool dont_use_callchains;
-static char callchain_default_opt[] = "fractal,0.5,callee";
-
-
-static int default_interval = 0;
-
-static bool kptr_restrict_warned;
-static bool vmlinux_warned;
-static bool inherit = false;
-static int realtime_prio = 0;
-static bool group = false;
-static bool sample_id_all_avail = true;
-static unsigned int mmap_pages = 128;
-
-static bool dump_symtab = false;
-
-static struct winsize winsize;
-
-static const char *sym_filter = NULL;
-static int sym_pcnt_filter = 5;
-
-/*
- * Source functions
- */
void get_term_dimensions(struct winsize *ws)
{
@@ -125,21 +87,23 @@ void get_term_dimensions(struct winsize *ws)
ws->ws_col = 80;
}
-static void update_print_entries(struct winsize *ws)
+static void perf_top__update_print_entries(struct perf_top *top)
{
- top.print_entries = ws->ws_row;
+ top->print_entries = top->winsize.ws_row;
- if (top.print_entries > 9)
- top.print_entries -= 9;
+ if (top->print_entries > 9)
+ top->print_entries -= 9;
}
-static void sig_winch_handler(int sig __used)
+static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
{
- get_term_dimensions(&winsize);
- update_print_entries(&winsize);
+ struct perf_top *top = arg;
+
+ get_term_dimensions(&top->winsize);
+ perf_top__update_print_entries(top);
}
-static int parse_source(struct hist_entry *he)
+static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
struct symbol *sym;
struct annotation *notes;
@@ -170,7 +134,7 @@ static int parse_source(struct hist_entry *he)
pthread_mutex_lock(&notes->lock);
- if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+ if (symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -181,7 +145,7 @@ static int parse_source(struct hist_entry *he)
err = symbol__annotate(sym, map, 0);
if (err == 0) {
out_assign:
- top.sym_filter_entry = he;
+ top->sym_filter_entry = he;
}
pthread_mutex_unlock(&notes->lock);
@@ -194,14 +158,16 @@ static void __zero_source_counters(struct hist_entry *he)
symbol__annotate_zero_histograms(sym);
}
-static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
+static void perf_top__record_precise_ip(struct perf_top *top,
+ struct hist_entry *he,
+ int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
if (he == NULL || he->ms.sym == NULL ||
- ((top.sym_filter_entry == NULL ||
- top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
+ ((top->sym_filter_entry == NULL ||
+ top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
return;
sym = he->ms.sym;
@@ -210,8 +176,7 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
if (pthread_mutex_trylock(&notes->lock))
return;
- if (notes->src == NULL &&
- symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
+ if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -225,8 +190,9 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
pthread_mutex_unlock(&notes->lock);
}
-static void show_details(struct hist_entry *he)
+static void perf_top__show_details(struct perf_top *top)
{
+ struct hist_entry *he = top->sym_filter_entry;
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -242,15 +208,15 @@ static void show_details(struct hist_entry *he)
if (notes->src == NULL)
goto out_unlock;
- printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
+ printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+ printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
- more = symbol__annotate_printf(symbol, he->ms.map, top.sym_evsel->idx,
- 0, sym_pcnt_filter, top.print_entries, 4);
- if (top.zero)
- symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
+ more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
+ 0, top->sym_pcnt_filter, top->print_entries, 4);
+ if (top->zero)
+ symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
else
- symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
+ symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock:
@@ -259,11 +225,9 @@ out_unlock:
static const char CONSOLE_CLEAR[] = "";
-static struct hist_entry *
- perf_session__add_hist_entry(struct perf_session *session,
- struct addr_location *al,
- struct perf_sample *sample,
- struct perf_evsel *evsel)
+static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
+ struct addr_location *al,
+ struct perf_sample *sample)
{
struct hist_entry *he;
@@ -271,50 +235,51 @@ static struct hist_entry *
if (he == NULL)
return NULL;
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return he;
}
-static void print_sym_table(void)
+static void perf_top__print_sym_table(struct perf_top *top)
{
char bf[160];
int printed = 0;
- const int win_width = winsize.ws_col - 1;
+ const int win_width = top->winsize.ws_col - 1;
puts(CONSOLE_CLEAR);
- perf_top__header_snprintf(&top, bf, sizeof(bf));
+ perf_top__header_snprintf(top, bf, sizeof(bf));
printf("%s\n", bf);
- perf_top__reset_sample_counters(&top);
+ perf_top__reset_sample_counters(top);
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
- if (top.sym_evsel->hists.stats.nr_lost_warned !=
- top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
- top.sym_evsel->hists.stats.nr_lost_warned =
- top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+ if (top->sym_evsel->hists.stats.nr_lost_warned !=
+ top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
+ top->sym_evsel->hists.stats.nr_lost_warned =
+ top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
- top.sym_evsel->hists.stats.nr_lost_warned);
+ top->sym_evsel->hists.stats.nr_lost_warned);
++printed;
}
- if (top.sym_filter_entry) {
- show_details(top.sym_filter_entry);
+ if (top->sym_filter_entry) {
+ perf_top__show_details(top);
return;
}
- hists__collapse_resort_threaded(&top.sym_evsel->hists);
- hists__output_resort_threaded(&top.sym_evsel->hists);
- hists__decay_entries_threaded(&top.sym_evsel->hists,
- top.hide_user_symbols,
- top.hide_kernel_symbols);
- hists__output_recalc_col_len(&top.sym_evsel->hists, winsize.ws_row - 3);
+ hists__collapse_resort_threaded(&top->sym_evsel->hists);
+ hists__output_resort_threaded(&top->sym_evsel->hists);
+ hists__decay_entries_threaded(&top->sym_evsel->hists,
+ top->hide_user_symbols,
+ top->hide_kernel_symbols);
+ hists__output_recalc_col_len(&top->sym_evsel->hists,
+ top->winsize.ws_row - 3);
putchar('\n');
- hists__fprintf(&top.sym_evsel->hists, NULL, false, false,
- winsize.ws_row - 4 - printed, win_width, stdout);
+ hists__fprintf(&top->sym_evsel->hists, NULL, false, false,
+ top->winsize.ws_row - 4 - printed, win_width, stdout);
}
static void prompt_integer(int *target, const char *msg)
@@ -352,17 +317,17 @@ static void prompt_percent(int *target, const char *msg)
*target = tmp;
}
-static void prompt_symbol(struct hist_entry **target, const char *msg)
+static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
{
char *buf = malloc(0), *p;
- struct hist_entry *syme = *target, *n, *found = NULL;
+ struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
struct rb_node *next;
size_t dummy = 0;
/* zero counters of active symbol */
if (syme) {
__zero_source_counters(syme);
- *target = NULL;
+ top->sym_filter_entry = NULL;
}
fprintf(stdout, "\n%s: ", msg);
@@ -373,7 +338,7 @@ static void prompt_symbol(struct hist_entry **target, const char *msg)
if (p)
*p = 0;
- next = rb_first(&top.sym_evsel->hists.entries);
+ next = rb_first(&top->sym_evsel->hists.entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -386,47 +351,46 @@ static void prompt_symbol(struct hist_entry **target, const char *msg)
if (!found) {
fprintf(stderr, "Sorry, %s is not active.\n", buf);
sleep(1);
- return;
} else
- parse_source(found);
+ perf_top__parse_source(top, found);
out_free:
free(buf);
}
-static void print_mapped_keys(void)
+static void perf_top__print_mapped_keys(struct perf_top *top)
{
char *name = NULL;
- if (top.sym_filter_entry) {
- struct symbol *sym = top.sym_filter_entry->ms.sym;
+ if (top->sym_filter_entry) {
+ struct symbol *sym = top->sym_filter_entry->ms.sym;
name = sym->name;
}
fprintf(stdout, "\nMapped keys:\n");
- fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top.delay_secs);
- fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top.print_entries);
+ fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
+ fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
- if (top.evlist->nr_entries > 1)
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top.sym_evsel));
+ if (top->evlist->nr_entries > 1)
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel));
- fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top.count_filter);
+ fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
- top.hide_kernel_symbols ? "yes" : "no");
+ top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
- top.hide_user_symbols ? "yes" : "no");
- fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top.zero ? 1 : 0);
+ top->hide_user_symbols ? "yes" : "no");
+ fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
fprintf(stdout, "\t[qQ] quit.\n");
}
-static int key_mapped(int c)
+static int perf_top__key_mapped(struct perf_top *top, int c)
{
switch (c) {
case 'd':
@@ -442,7 +406,7 @@ static int key_mapped(int c)
case 'S':
return 1;
case 'E':
- return top.evlist->nr_entries > 1 ? 1 : 0;
+ return top->evlist->nr_entries > 1 ? 1 : 0;
default:
break;
}
@@ -450,13 +414,13 @@ static int key_mapped(int c)
return 0;
}
-static void handle_keypress(int c)
+static void perf_top__handle_keypress(struct perf_top *top, int c)
{
- if (!key_mapped(c)) {
+ if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
- print_mapped_keys();
+ perf_top__print_mapped_keys(top);
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
fflush(stdout);
@@ -471,81 +435,86 @@ static void handle_keypress(int c)
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
- if (!key_mapped(c))
+ if (!perf_top__key_mapped(top, c))
return;
}
switch (c) {
case 'd':
- prompt_integer(&top.delay_secs, "Enter display delay");
- if (top.delay_secs < 1)
- top.delay_secs = 1;
+ prompt_integer(&top->delay_secs, "Enter display delay");
+ if (top->delay_secs < 1)
+ top->delay_secs = 1;
break;
case 'e':
- prompt_integer(&top.print_entries, "Enter display entries (lines)");
- if (top.print_entries == 0) {
- sig_winch_handler(SIGWINCH);
- signal(SIGWINCH, sig_winch_handler);
+ prompt_integer(&top->print_entries, "Enter display entries (lines)");
+ if (top->print_entries == 0) {
+ struct sigaction act = {
+ .sa_sigaction = perf_top__sig_winch,
+ .sa_flags = SA_SIGINFO,
+ };
+ perf_top__sig_winch(SIGWINCH, NULL, top);
+ sigaction(SIGWINCH, &act, NULL);
} else
signal(SIGWINCH, SIG_DFL);
break;
case 'E':
- if (top.evlist->nr_entries > 1) {
+ if (top->evlist->nr_entries > 1) {
/* Select 0 as the default event: */
int counter = 0;
fprintf(stderr, "\nAvailable events:");
- list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
- fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
+ list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
- if (counter >= top.evlist->nr_entries) {
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
- fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
+ if (counter >= top->evlist->nr_entries) {
+ top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
sleep(1);
break;
}
- list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
- if (top.sym_evsel->idx == counter)
+ list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
+ if (top->sym_evsel->idx == counter)
break;
} else
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
break;
case 'f':
- prompt_integer(&top.count_filter, "Enter display event count filter");
+ prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
- prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
+ prompt_percent(&top->sym_pcnt_filter,
+ "Enter details display event filter (percent)");
break;
case 'K':
- top.hide_kernel_symbols = !top.hide_kernel_symbols;
+ top->hide_kernel_symbols = !top->hide_kernel_symbols;
break;
case 'q':
case 'Q':
printf("exiting.\n");
- if (dump_symtab)
- perf_session__fprintf_dsos(top.session, stderr);
+ if (top->dump_symtab)
+ perf_session__fprintf_dsos(top->session, stderr);
exit(0);
case 's':
- prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
+ perf_top__prompt_symbol(top, "Enter details symbol");
break;
case 'S':
- if (!top.sym_filter_entry)
+ if (!top->sym_filter_entry)
break;
else {
- struct hist_entry *syme = top.sym_filter_entry;
+ struct hist_entry *syme = top->sym_filter_entry;
- top.sym_filter_entry = NULL;
+ top->sym_filter_entry = NULL;
__zero_source_counters(syme);
}
break;
case 'U':
- top.hide_user_symbols = !top.hide_user_symbols;
+ top->hide_user_symbols = !top->hide_user_symbols;
break;
case 'z':
- top.zero = !top.zero;
+ top->zero = !top->zero;
break;
default:
break;
@@ -563,28 +532,30 @@ static void perf_top__sort_new_samples(void *arg)
hists__collapse_resort_threaded(&t->sym_evsel->hists);
hists__output_resort_threaded(&t->sym_evsel->hists);
hists__decay_entries_threaded(&t->sym_evsel->hists,
- top.hide_user_symbols,
- top.hide_kernel_symbols);
+ t->hide_user_symbols,
+ t->hide_kernel_symbols);
}
-static void *display_thread_tui(void *arg __used)
+static void *display_thread_tui(void *arg)
{
+ struct perf_top *top = arg;
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
- perf_top__sort_new_samples(&top);
- perf_evlist__tui_browse_hists(top.evlist, help,
+ perf_top__sort_new_samples(top);
+ perf_evlist__tui_browse_hists(top->evlist, help,
perf_top__sort_new_samples,
- &top, top.delay_secs);
+ top, top->delay_secs);
exit_browser(0);
exit(0);
return NULL;
}
-static void *display_thread(void *arg __used)
+static void *display_thread(void *arg)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
+ struct perf_top *top = arg;
int delay_msecs, c;
tcgetattr(0, &save);
@@ -595,13 +566,13 @@ static void *display_thread(void *arg __used)
pthread__unblock_sigwinch();
repeat:
- delay_msecs = top.delay_secs * 1000;
+ delay_msecs = top->delay_secs * 1000;
tcsetattr(0, TCSANOW, &tc);
/* trash return*/
getc(stdin);
while (1) {
- print_sym_table();
+ perf_top__print_sym_table(top);
/*
* Either timeout expired or we got an EINTR due to SIGWINCH,
* refresh screen in both cases.
@@ -621,7 +592,7 @@ process_hotkey:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
- handle_keypress(c);
+ perf_top__handle_keypress(top, c);
goto repeat;
return NULL;
@@ -673,47 +644,17 @@ static int symbol_filter(struct map *map __used, struct symbol *sym)
return 0;
}
-static void perf_event__process_sample(const union perf_event *event,
+static void perf_event__process_sample(struct perf_tool *tool,
+ const union perf_event *event,
struct perf_evsel *evsel,
struct perf_sample *sample,
- struct perf_session *session)
+ struct machine *machine)
{
+ struct perf_top *top = container_of(tool, struct perf_top, tool);
struct symbol *parent = NULL;
u64 ip = event->ip.ip;
struct addr_location al;
- struct machine *machine;
int err;
- u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
- ++top.samples;
-
- switch (origin) {
- case PERF_RECORD_MISC_USER:
- ++top.us_samples;
- if (top.hide_user_symbols)
- return;
- machine = perf_session__find_host_machine(session);
- break;
- case PERF_RECORD_MISC_KERNEL:
- ++top.kernel_samples;
- if (top.hide_kernel_symbols)
- return;
- machine = perf_session__find_host_machine(session);
- break;
- case PERF_RECORD_MISC_GUEST_KERNEL:
- ++top.guest_kernel_samples;
- machine = perf_session__find_machine(session, event->ip.pid);
- break;
- case PERF_RECORD_MISC_GUEST_USER:
- ++top.guest_us_samples;
- /*
- * TODO: we don't process guest user from host side
- * except simple counting.
- */
- return;
- default:
- return;
- }
if (!machine && perf_guest) {
pr_err("Can't find guest [%d]'s kernel information\n",
@@ -722,14 +663,14 @@ static void perf_event__process_sample(const union perf_event *event,
}
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
- top.exact_samples++;
+ top->exact_samples++;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol_filter) < 0 ||
al.filtered)
return;
- if (!kptr_restrict_warned &&
+ if (!top->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
ui__warning(
@@ -740,7 +681,7 @@ static void perf_event__process_sample(const union perf_event *event,
" modules" : "");
if (use_browser <= 0)
sleep(5);
- kptr_restrict_warned = true;
+ top->kptr_restrict_warned = true;
}
if (al.sym == NULL) {
@@ -756,7 +697,7 @@ static void perf_event__process_sample(const union perf_event *event,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (!kptr_restrict_warned && !vmlinux_warned &&
+ if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
if (symbol_conf.vmlinux_name) {
@@ -769,7 +710,7 @@ static void perf_event__process_sample(const union perf_event *event,
if (use_browser <= 0)
sleep(5);
- vmlinux_warned = true;
+ top->vmlinux_warned = true;
}
}
@@ -778,70 +719,109 @@ static void perf_event__process_sample(const union perf_event *event,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
- err = perf_session__resolve_callchain(session, al.thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al.thread,
+ sample->callchain, &parent);
if (err)
return;
}
- he = perf_session__add_hist_entry(session, &al, sample, evsel);
+ he = perf_evsel__add_hist_entry(evsel, &al, sample);
if (he == NULL) {
pr_err("Problem incrementing symbol period, skipping event\n");
return;
}
if (symbol_conf.use_callchain) {
- err = callchain_append(he->callchain, &session->callchain_cursor,
+ err = callchain_append(he->callchain, &evsel->hists.callchain_cursor,
sample->period);
if (err)
return;
}
- if (sort_has_symbols)
- record_precise_ip(he, evsel->idx, ip);
+ if (top->sort_has_symbols)
+ perf_top__record_precise_ip(top, he, evsel->idx, ip);
}
return;
}
-static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
+static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct perf_sample sample;
struct perf_evsel *evsel;
+ struct perf_session *session = top->session;
union perf_event *event;
+ struct machine *machine;
+ u8 origin;
int ret;
- while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
- ret = perf_session__parse_sample(self, event, &sample);
+ while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
+ ret = perf_session__parse_sample(session, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
}
- evsel = perf_evlist__id2evsel(self->evlist, sample.id);
+ evsel = perf_evlist__id2evsel(session->evlist, sample.id);
assert(evsel != NULL);
+ origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
if (event->header.type == PERF_RECORD_SAMPLE)
- perf_event__process_sample(event, evsel, &sample, self);
- else if (event->header.type < PERF_RECORD_MAX) {
+ ++top->samples;
+
+ switch (origin) {
+ case PERF_RECORD_MISC_USER:
+ ++top->us_samples;
+ if (top->hide_user_symbols)
+ continue;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_KERNEL:
+ ++top->kernel_samples;
+ if (top->hide_kernel_symbols)
+ continue;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ ++top->guest_kernel_samples;
+ machine = perf_session__find_machine(session, event->ip.pid);
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ ++top->guest_us_samples;
+ /*
+ * TODO: we don't process guest user from host side
+ * except simple counting.
+ */
+ /* Fall thru */
+ default:
+ continue;
+ }
+
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ perf_event__process_sample(&top->tool, event, evsel,
+ &sample, machine);
+ } else if (event->header.type < PERF_RECORD_MAX) {
hists__inc_nr_events(&evsel->hists, event->header.type);
- perf_event__process(event, &sample, self);
+ perf_event__process(&top->tool, event, &sample, machine);
} else
- ++self->hists.stats.nr_unknown_events;
+ ++session->hists.stats.nr_unknown_events;
}
}
-static void perf_session__mmap_read(struct perf_session *self)
+static void perf_top__mmap_read(struct perf_top *top)
{
int i;
- for (i = 0; i < top.evlist->nr_mmaps; i++)
- perf_session__mmap_read_idx(self, i);
+ for (i = 0; i < top->evlist->nr_mmaps; i++)
+ perf_top__mmap_read_idx(top, i);
}
-static void start_counters(struct perf_evlist *evlist)
+static void perf_top__start_counters(struct perf_top *top)
{
struct perf_evsel *counter, *first;
+ struct perf_evlist *evlist = top->evlist;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
@@ -849,15 +829,15 @@ static void start_counters(struct perf_evlist *evlist)
struct perf_event_attr *attr = &counter->attr;
struct xyarray *group_fd = NULL;
- if (group && counter != first)
+ if (top->group && counter != first)
group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- if (top.freq) {
+ if (top->freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
- attr->sample_freq = top.freq;
+ attr->sample_freq = top->freq;
}
if (evlist->nr_entries > 1) {
@@ -870,23 +850,23 @@ static void start_counters(struct perf_evlist *evlist)
attr->mmap = 1;
attr->comm = 1;
- attr->inherit = inherit;
+ attr->inherit = top->inherit;
retry_sample_id:
- attr->sample_id_all = sample_id_all_avail ? 1 : 0;
+ attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(counter, top.evlist->cpus,
- top.evlist->threads, group,
+ if (perf_evsel__open(counter, top->evlist->cpus,
+ top->evlist->threads, top->group,
group_fd) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
goto out_err;
- } else if (err == EINVAL && sample_id_all_avail) {
+ } else if (err == EINVAL && top->sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
- sample_id_all_avail = false;
+ top->sample_id_all_avail = false;
goto retry_sample_id;
}
/*
@@ -920,7 +900,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
ui__warning("Failed to mmap with %d (%s)\n",
errno, strerror(errno));
goto out_err;
@@ -933,14 +913,14 @@ out_err:
exit(0);
}
-static int setup_sample_type(void)
+static int perf_top__setup_sample_type(struct perf_top *top)
{
- if (!sort_has_symbols) {
+ if (!top->sort_has_symbols) {
if (symbol_conf.use_callchain) {
ui__warning("Selected -g but \"sym\" not present in --sort/-s.");
return -EINVAL;
}
- } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
+ } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
if (callchain_register_param(&callchain_param) < 0) {
ui__warning("Can't register callchain params.\n");
return -EINVAL;
@@ -950,7 +930,7 @@ static int setup_sample_type(void)
return 0;
}
-static int __cmd_top(void)
+static int __cmd_top(struct perf_top *top)
{
pthread_t thread;
int ret;
@@ -958,39 +938,40 @@ static int __cmd_top(void)
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
*/
- top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
- if (top.session == NULL)
+ top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
+ if (top->session == NULL)
return -ENOMEM;
- ret = setup_sample_type();
+ ret = perf_top__setup_sample_type(top);
if (ret)
goto out_delete;
- if (top.target_tid != -1)
- perf_event__synthesize_thread_map(top.evlist->threads,
- perf_event__process, top.session);
+ if (top->target_tid != -1)
+ perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
+ perf_event__process,
+ &top->session->host_machine);
else
- perf_event__synthesize_threads(perf_event__process, top.session);
-
- start_counters(top.evlist);
- top.session->evlist = top.evlist;
- perf_session__update_sample_type(top.session);
+ perf_event__synthesize_threads(&top->tool, perf_event__process,
+ &top->session->host_machine);
+ perf_top__start_counters(top);
+ top->session->evlist = top->evlist;
+ perf_session__update_sample_type(top->session);
/* Wait for a minimal set of events before starting the snapshot */
- poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+ poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
- perf_session__mmap_read(top.session);
+ perf_top__mmap_read(top);
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
- display_thread), NULL)) {
+ display_thread), top)) {
printf("Could not create display thread.\n");
exit(-1);
}
- if (realtime_prio) {
+ if (top->realtime_prio) {
struct sched_param param;
- param.sched_priority = realtime_prio;
+ param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
printf("Could not set realtime priority.\n");
exit(-1);
@@ -998,25 +979,25 @@ static int __cmd_top(void)
}
while (1) {
- u64 hits = top.samples;
+ u64 hits = top->samples;
- perf_session__mmap_read(top.session);
+ perf_top__mmap_read(top);
- if (hits == top.samples)
- ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
+ if (hits == top->samples)
+ ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
}
out_delete:
- perf_session__delete(top.session);
- top.session = NULL;
+ perf_session__delete(top->session);
+ top->session = NULL;
return 0;
}
static int
-parse_callchain_opt(const struct option *opt __used, const char *arg,
- int unset)
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
+ struct perf_top *top = (struct perf_top *)opt->value;
char *tok, *tok2;
char *endptr;
@@ -1024,7 +1005,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
* --no-call-graph
*/
if (unset) {
- dont_use_callchains = true;
+ top->dont_use_callchains = true;
return 0;
}
@@ -1052,9 +1033,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
symbol_conf.use_callchain = false;
return 0;
- }
-
- else
+ } else
return -1;
/* get the min percentage */
@@ -1098,17 +1077,32 @@ static const char * const top_usage[] = {
NULL
};
-static const struct option options[] = {
+int cmd_top(int argc, const char **argv, const char *prefix __used)
+{
+ struct perf_evsel *pos;
+ int status = -ENOMEM;
+ struct perf_top top = {
+ .count_filter = 5,
+ .delay_secs = 2,
+ .target_pid = -1,
+ .target_tid = -1,
+ .freq = 1000, /* 1 KHz */
+ .sample_id_all_avail = true,
+ .mmap_pages = 128,
+ .sym_pcnt_filter = 5,
+ };
+ char callchain_default_opt[] = "fractal,0.5,callee";
+ const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
- OPT_INTEGER('c', "count", &default_interval,
+ OPT_INTEGER('c', "count", &top.default_interval,
"event period to sample"),
OPT_INTEGER('p', "pid", &top.target_pid,
"profile events on existing process id"),
OPT_INTEGER('t', "tid", &top.target_tid,
"profile events on existing thread id"),
- OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &top.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
"list of cpus to monitor"),
@@ -1116,20 +1110,20 @@ static const struct option options[] = {
"file", "vmlinux pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
- OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
- OPT_INTEGER('r', "realtime", &realtime_prio,
+ OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
+ OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
"number of seconds to delay between refreshes"),
- OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
+ OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
- OPT_BOOLEAN('g', "group", &group,
+ OPT_BOOLEAN('g', "group", &top.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('i', "inherit", &inherit,
+ OPT_BOOLEAN('i', "inherit", &top.inherit,
"child tasks inherit counters"),
- OPT_STRING(0, "sym-annotate", &sym_filter, "symbol name",
+ OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero,
"zero history across updates"),
@@ -1139,15 +1133,15 @@ static const struct option options[] = {
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
- OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
- OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
+ OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
+ OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
- OPT_CALLBACK_DEFAULT('G', "call-graph", NULL, "output_type,min_percent, call_order",
+ OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
"Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
"Default: fractal,0.5,callee", &parse_callchain_opt,
callchain_default_opt),
@@ -1166,12 +1160,7 @@ static const struct option options[] = {
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_END()
-};
-
-int cmd_top(int argc, const char **argv, const char *prefix __used)
-{
- struct perf_evsel *pos;
- int status = -ENOMEM;
+ };
top.evlist = perf_evlist__new(NULL, NULL);
if (top.evlist == NULL)
@@ -1188,9 +1177,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
setup_sorting(top_usage, options);
- if (use_stdio)
+ if (top.use_stdio)
use_browser = 0;
- else if (use_tui)
+ else if (top.use_tui)
use_browser = 1;
setup_browser(false);
@@ -1215,38 +1204,31 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
return -ENOMEM;
}
+ symbol_conf.nr_events = top.evlist->nr_entries;
+
if (top.delay_secs < 1)
top.delay_secs = 1;
/*
* User specified count overrides default frequency.
*/
- if (default_interval)
+ if (top.default_interval)
top.freq = 0;
else if (top.freq) {
- default_interval = top.freq;
+ top.default_interval = top.freq;
} else {
fprintf(stderr, "frequency and count are zero, aborting\n");
exit(EXIT_FAILURE);
}
list_for_each_entry(pos, &top.evlist->entries, node) {
- if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
- top.evlist->threads->nr) < 0)
- goto out_free_fd;
/*
* Fill in the ones not specifically initialized via -c:
*/
- if (pos->attr.sample_period)
- continue;
-
- pos->attr.sample_period = default_interval;
+ if (!pos->attr.sample_period)
+ pos->attr.sample_period = top.default_interval;
}
- if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
- perf_evlist__alloc_mmap(top.evlist) < 0)
- goto out_free_fd;
-
top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
symbol_conf.priv_size = sizeof(struct annotation);
@@ -1263,16 +1245,20 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
* Avoid annotation data structures overhead when symbols aren't on the
* sort list.
*/
- sort_has_symbols = sort_sym.list.next != NULL;
+ top.sort_has_symbols = sort_sym.list.next != NULL;
- get_term_dimensions(&winsize);
+ get_term_dimensions(&top.winsize);
if (top.print_entries == 0) {
- update_print_entries(&winsize);
- signal(SIGWINCH, sig_winch_handler);
+ struct sigaction act = {
+ .sa_sigaction = perf_top__sig_winch,
+ .sa_flags = SA_SIGINFO,
+ };
+ perf_top__update_print_entries(&top);
+ sigaction(SIGWINCH, &act, NULL);
}
- status = __cmd_top();
-out_free_fd:
+ status = __cmd_top(&top);
+
perf_evlist__delete(top.evlist);
return status;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 73d0cac8b67..2b2e225a4d4 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -29,8 +29,6 @@ struct pager_config {
int val;
};
-static char debugfs_mntpt[MAXPATHLEN];
-
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
@@ -81,15 +79,6 @@ static void commit_pager_choice(void)
}
}
-static void set_debugfs_path(void)
-{
- char *path;
-
- path = getenv(PERF_DEBUGFS_ENVIRONMENT);
- snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
- "tracing/events");
-}
-
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
@@ -161,15 +150,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
- strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
- debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ debugfs_set_path((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
- strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
- debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
+ fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
} else {
@@ -281,7 +269,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
if (use_pager == -1 && p->option & USE_PAGER)
use_pager = 1;
commit_pager_choice();
- set_debugfs_path();
status = p->fn(argc, argv, prefix);
exit_browser(status);
@@ -416,17 +403,6 @@ static int run_argv(int *argcp, const char ***argv)
return done_alias;
}
-/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
-static void get_debugfs_mntpt(void)
-{
- const char *path = debugfs_mount(NULL);
-
- if (path)
- strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
- else
- debugfs_mntpt[0] = '\0';
-}
-
static void pthread__block_sigwinch(void)
{
sigset_t set;
@@ -453,7 +429,7 @@ int main(int argc, const char **argv)
if (!cmd)
cmd = "perf-help";
/* get debugfs mount point from /proc/mounts */
- get_debugfs_mntpt();
+ debugfs_mount(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
@@ -476,7 +452,6 @@ int main(int argc, const char **argv)
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
- set_debugfs_path();
set_buildid_dir();
if (argc > 0) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 914c895510f..64f8bee31ce 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -185,4 +185,28 @@ extern const char perf_version_string[];
void pthread__unblock_sigwinch(void);
+struct perf_record_opts {
+ pid_t target_pid;
+ pid_t target_tid;
+ bool call_graph;
+ bool group;
+ bool inherit_stat;
+ bool no_delay;
+ bool no_inherit;
+ bool no_samples;
+ bool pipe_output;
+ bool raw_samples;
+ bool sample_address;
+ bool sample_time;
+ bool sample_id_all_avail;
+ bool system_wide;
+ bool period;
+ unsigned int freq;
+ unsigned int mmap_pages;
+ unsigned int user_freq;
+ u64 default_interval;
+ u64 user_interval;
+ const char *cpu_list;
+};
+
#endif
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 119e996035c..011ed267660 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -25,17 +25,17 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
return 0;
}
-int symbol__alloc_hist(struct symbol *sym, int nevents)
+int symbol__alloc_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
(sym->end - sym->start) * sizeof(u64));
- notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
+ notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
if (notes->src == NULL)
return -1;
notes->src->sizeof_sym_hist = sizeof_sym_hist;
- notes->src->nr_histograms = nevents;
+ notes->src->nr_histograms = symbol_conf.nr_events;
INIT_LIST_HEAD(&notes->src->source);
return 0;
}
@@ -334,7 +334,7 @@ fallback:
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
map__rip_2objdump(map, sym->start),
- map__rip_2objdump(map, sym->end),
+ map__rip_2objdump(map, sym->end+1),
symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
symbol_conf.annotate_src ? "-S" : "",
symfs_filename, filename);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index d9072523d34..efa5dc82bfa 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -72,7 +72,7 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
int evidx, u64 addr);
-int symbol__alloc_hist(struct symbol *sym, int nevents);
+int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
@@ -99,8 +99,7 @@ static inline int symbol__tui_annotate(struct symbol *sym __used,
}
#else
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
- int nr_events, void(*timer)(void *arg), void *arg,
- int delay_secs);
+ void(*timer)(void *arg), void *arg, int delay_secs);
#endif
extern const char *disassembler_style;
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a91cd99f26e..dff9c7a725f 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -13,15 +13,18 @@
#include "symbol.h"
#include <linux/kernel.h>
#include "debug.h"
+#include "session.h"
+#include "tool.h"
-static int build_id__mark_dso_hit(union perf_event *event,
+static int build_id__mark_dso_hit(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session)
+ struct machine *machine)
{
struct addr_location al;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
@@ -29,8 +32,8 @@ static int build_id__mark_dso_hit(union perf_event *event,
return -1;
}
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -38,25 +41,26 @@ static int build_id__mark_dso_hit(union perf_event *event,
return 0;
}
-static int perf_event__exit_del_thread(union perf_event *event,
+static int perf_event__exit_del_thread(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->fork.tid);
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
if (thread) {
- rb_erase(&thread->rb_node, &session->threads);
- session->last_match = NULL;
+ rb_erase(&thread->rb_node, &machine->threads);
+ machine->last_match = NULL;
thread__delete(thread);
}
return 0;
}
-struct perf_event_ops build_id__mark_dso_hit_ops = {
+struct perf_tool build_id__mark_dso_hit_ops = {
.sample = build_id__mark_dso_hit,
.mmap = perf_event__process_mmap,
.fork = perf_event__process_task,
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 5dafb00eaa0..a993ba87d99 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -3,7 +3,7 @@
#include "session.h"
-extern struct perf_event_ops build_id__mark_dso_hit_ops;
+extern struct perf_tool build_id__mark_dso_hit_ops;
char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 9b4ff16cac9..7f9c0f1ae3a 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -101,6 +101,9 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
+struct ip_callchain;
+union perf_event;
+
bool ip_callchain__valid(struct ip_callchain *chain,
const union perf_event *event);
/*
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 96bee5c4600..dbe2f16b1a1 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -3,7 +3,6 @@
#include "parse-options.h"
#include "evsel.h"
#include "cgroup.h"
-#include "debugfs.h" /* MAX_PATH, STR() */
#include "evlist.h"
int nr_cgroups;
@@ -12,7 +11,7 @@ static int
cgroupfs_find_mountpoint(char *buf, size_t maxlen)
{
FILE *fp;
- char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
+ char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
char *token, *saved_ptr = NULL;
int found = 0;
@@ -25,8 +24,8 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
* and inspect every cgroupfs mount point to find one that has
* perf_event subsystem
*/
- while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
- STR(MAX_PATH)"s %*d %*d\n",
+ while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+ STR(PATH_MAX)"s %*d %*d\n",
mountpoint, type, tokens) == 3) {
if (!strcmp(type, "cgroup")) {
@@ -57,15 +56,15 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
static int open_cgroup(char *name)
{
- char path[MAX_PATH+1];
- char mnt[MAX_PATH+1];
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
int fd;
- if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
return -1;
- snprintf(path, MAX_PATH, "%s/%s", mnt, name);
+ snprintf(path, PATH_MAX, "%s/%s", mnt, name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 80d9598db31..0deac6a14b6 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -1,5 +1,8 @@
/*
- * GIT - The information manager from hell
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index a88fefc0cc0..ffc35e748e8 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -2,8 +2,12 @@
#include "debugfs.h"
#include "cache.h"
+#include <linux/kernel.h>
+#include <sys/mount.h>
+
static int debugfs_premounted;
-static char debugfs_mountpoint[MAX_PATH+1];
+char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
+char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
static const char *debugfs_known_mountpoints[] = {
"/sys/kernel/debug/",
@@ -62,11 +66,9 @@ const char *debugfs_find_mountpoint(void)
/* give up and parse /proc/mounts */
fp = fopen("/proc/mounts", "r");
if (fp == NULL)
- die("Can't open /proc/mounts for read");
+ return NULL;
- while (fscanf(fp, "%*s %"
- STR(MAX_PATH)
- "s %99s %*s %*d %*d\n",
+ while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
debugfs_mountpoint, type) == 2) {
if (strcmp(type, "debugfs") == 0)
break;
@@ -106,6 +108,12 @@ int debugfs_valid_entry(const char *path)
return 0;
}
+static void debugfs_set_tracing_events_path(const char *mountpoint)
+{
+ snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
+ mountpoint, "tracing/events");
+}
+
/* mount the debugfs somewhere if it's not mounted */
char *debugfs_mount(const char *mountpoint)
@@ -113,7 +121,7 @@ char *debugfs_mount(const char *mountpoint)
/* see if it's already mounted */
if (debugfs_find_mountpoint()) {
debugfs_premounted = 1;
- return debugfs_mountpoint;
+ goto out;
}
/* if not mounted and no argument */
@@ -129,12 +137,19 @@ char *debugfs_mount(const char *mountpoint)
return NULL;
/* save the mountpoint */
- strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
debugfs_found = 1;
-
+ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+out:
+ debugfs_set_tracing_events_path(debugfs_mountpoint);
return debugfs_mountpoint;
}
+void debugfs_set_path(const char *mountpoint)
+{
+ snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
+ debugfs_set_tracing_events_path(mountpoint);
+}
+
/* umount the debugfs */
int debugfs_umount(void)
@@ -158,7 +173,7 @@ int debugfs_umount(void)
int debugfs_write(const char *entry, const char *value)
{
- char path[MAX_PATH+1];
+ char path[PATH_MAX + 1];
int ret, count;
int fd;
@@ -203,7 +218,7 @@ int debugfs_write(const char *entry, const char *value)
*/
int debugfs_read(const char *entry, char *buffer, size_t size)
{
- char path[MAX_PATH+1];
+ char path[PATH_MAX + 1];
int ret;
int fd;
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 83a02879745..4a878f735eb 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -1,25 +1,18 @@
#ifndef __DEBUGFS_H__
#define __DEBUGFS_H__
-#include <sys/mount.h>
+const char *debugfs_find_mountpoint(void);
+int debugfs_valid_mountpoint(const char *debugfs);
+int debugfs_valid_entry(const char *path);
+char *debugfs_mount(const char *mountpoint);
+int debugfs_umount(void);
+void debugfs_set_path(const char *mountpoint);
+int debugfs_write(const char *entry, const char *value);
+int debugfs_read(const char *entry, char *buffer, size_t size);
+void debugfs_force_cleanup(void);
+int debugfs_make_path(const char *element, char *buffer, int size);
-#ifndef MAX_PATH
-# define MAX_PATH 256
-#endif
-
-#ifndef STR
-# define _STR(x) #x
-# define STR(x) _STR(x)
-#endif
-
-extern const char *debugfs_find_mountpoint(void);
-extern int debugfs_valid_mountpoint(const char *debugfs);
-extern int debugfs_valid_entry(const char *path);
-extern char *debugfs_mount(const char *mountpoint);
-extern int debugfs_umount(void);
-extern int debugfs_write(const char *entry, const char *value);
-extern int debugfs_read(const char *entry, char *buffer, size_t size);
-extern void debugfs_force_cleanup(void);
-extern int debugfs_make_path(const char *element, char *buffer, int size);
+extern char debugfs_mountpoint[];
+extern char tracing_events_path[];
#endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 437f8ca679a..73ddaf06b8e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,7 +1,6 @@
#include <linux/types.h>
#include "event.h"
#include "debug.h"
-#include "session.h"
#include "sort.h"
#include "string.h"
#include "strlist.h"
@@ -44,36 +43,27 @@ static struct perf_sample synth_sample = {
.period = 1,
};
-static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
- int full, perf_event__handler_t process,
- struct perf_session *session)
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
{
char filename[PATH_MAX];
char bf[BUFSIZ];
FILE *fp;
size_t size = 0;
- DIR *tasks;
- struct dirent dirent, *next;
- pid_t tgid = 0;
+ pid_t tgid = -1;
snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
fp = fopen(filename, "r");
if (fp == NULL) {
-out_race:
- /*
- * We raced with a task exiting - just return:
- */
pr_debug("couldn't open %s\n", filename);
return 0;
}
- memset(&event->comm, 0, sizeof(event->comm));
-
- while (!event->comm.comm[0] || !event->comm.pid) {
+ while (!comm[0] || (tgid < 0)) {
if (fgets(bf, sizeof(bf), fp) == NULL) {
- pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
- goto out;
+ pr_warning("couldn't get COMM and pgid, malformed %s\n",
+ filename);
+ break;
}
if (memcmp(bf, "Name:", 5) == 0) {
@@ -81,33 +71,65 @@ out_race:
while (*name && isspace(*name))
++name;
size = strlen(name) - 1;
- memcpy(event->comm.comm, name, size++);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+
} else if (memcmp(bf, "Tgid:", 5) == 0) {
char *tgids = bf + 5;
while (*tgids && isspace(*tgids))
++tgids;
- tgid = event->comm.pid = atoi(tgids);
+ tgid = atoi(tgids);
}
}
+ fclose(fp);
+
+ return tgid;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ int full,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ char filename[PATH_MAX];
+ size_t size;
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ if (tgid < 0)
+ goto out;
+
+ event->comm.pid = tgid;
event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
size = ALIGN(size, sizeof(u64));
- memset(event->comm.comm + size, 0, session->id_hdr_size);
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
- session->id_hdr_size);
+ machine->id_hdr_size);
if (!full) {
event->comm.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
goto out;
}
snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
tasks = opendir(filename);
- if (tasks == NULL)
- goto out_race;
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
@@ -115,22 +137,32 @@ out_race:
if (*end)
continue;
+ /* already have tgid; jut want to update the comm */
+ (void) perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+
+ size = strlen(event->comm.comm) + 1;
+ size = ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+
event->comm.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
closedir(tasks);
out:
- fclose(fp);
-
return tgid;
}
-static int perf_event__synthesize_mmap_events(union perf_event *event,
+static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
char filename[PATH_MAX];
FILE *fp;
@@ -193,12 +225,12 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
event->mmap.len -= event->mmap.start;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, session->id_hdr_size);
- event->mmap.header.size += session->id_hdr_size;
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
event->mmap.pid = tgid;
event->mmap.tid = pid;
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
}
@@ -206,14 +238,14 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
return 0;
}
-int perf_event__synthesize_modules(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine)
{
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
union perf_event *event = zalloc((sizeof(event->mmap) +
- session->id_hdr_size));
+ machine->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -243,15 +275,15 @@ int perf_event__synthesize_modules(perf_event__handler_t process,
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, session->id_hdr_size);
- event->mmap.header.size += session->id_hdr_size;
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
event->mmap.start = pos->start;
event->mmap.len = pos->end - pos->start;
event->mmap.pid = machine->pid;
memcpy(event->mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
- process(event, &synth_sample, session);
+ process(tool, event, &synth_sample, machine);
}
free(event);
@@ -260,40 +292,69 @@ int perf_event__synthesize_modules(perf_event__handler_t process,
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
- pid_t pid, perf_event__handler_t process,
- struct perf_session *session)
+ pid_t pid, int full,
+ perf_event__handler_t process,
+ struct perf_tool *tool,
+ struct machine *machine)
{
- pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
- session);
+ pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+ process, machine);
if (tgid == -1)
return -1;
- return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
- process, session);
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine);
}
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event *comm_event, *mmap_event;
- int err = -1, thread;
+ int err = -1, thread, j;
- comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
- threads->map[thread],
- process, session)) {
+ threads->map[thread], 0,
+ process, tool, machine)) {
err = -1;
break;
}
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != threads->map[thread]) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == threads->map[j]) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event,
+ mmap_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine)) {
+ err = -1;
+ break;
+ }
+ }
}
free(mmap_event);
out_free_comm:
@@ -302,19 +363,20 @@ out:
return err;
}
-int perf_event__synthesize_threads(perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
DIR *proc;
struct dirent dirent, *next;
union perf_event *comm_event, *mmap_event;
int err = -1;
- comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
goto out;
- mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
if (mmap_event == NULL)
goto out_free_comm;
@@ -329,8 +391,8 @@ int perf_event__synthesize_threads(perf_event__handler_t process,
if (*end) /* only interested in proper numerical dirents */
continue;
- __event__synthesize_thread(comm_event, mmap_event, pid,
- process, session);
+ __event__synthesize_thread(comm_event, mmap_event, pid, 1,
+ process, tool, machine);
}
closedir(proc);
@@ -365,8 +427,8 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine,
const char *symbol_name)
{
@@ -383,7 +445,7 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
*/
struct process_symbol_args args = { .name = symbol_name, };
union perf_event *event = zalloc((sizeof(event->mmap) +
- session->id_hdr_size));
+ machine->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -417,25 +479,32 @@ int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
size = ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
event->mmap.pgoff = args.start;
event->mmap.start = map->start;
event->mmap.len = map->end - event->mmap.start;
event->mmap.pid = machine->pid;
- err = process(event, &synth_sample, session);
+ err = process(tool, event, &synth_sample, machine);
free(event);
return err;
}
-int perf_event__process_comm(union perf_event *event,
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+}
+
+int perf_event__process_comm(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->comm.tid);
+ struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
- dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
+ if (dump_trace)
+ perf_event__fprintf_comm(event, stdout);
if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
@@ -445,13 +514,13 @@ int perf_event__process_comm(union perf_event *event,
return 0;
}
-int perf_event__process_lost(union perf_event *event,
+int perf_event__process_lost(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine __used)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
event->lost.id, event->lost.lost);
- session->hists.stats.total_lost += event->lost.lost;
return 0;
}
@@ -468,21 +537,15 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event,
maps[MAP__FUNCTION]->end = ~0ULL;
}
-static int perf_event__process_kernel_mmap(union perf_event *event,
- struct perf_session *session)
+static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
+ union perf_event *event,
+ struct machine *machine)
{
struct map *map;
char kmmap_prefix[PATH_MAX];
- struct machine *machine;
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
- machine = perf_session__findnew_machine(session, event->mmap.pid);
- if (!machine) {
- pr_err("Can't find id %d's machine\n", event->mmap.pid);
- goto out_problem;
- }
-
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL;
@@ -549,9 +612,9 @@ static int perf_event__process_kernel_mmap(union perf_event *event,
* time /proc/sys/kernel/kptr_restrict was non zero.
*/
if (event->mmap.pgoff != 0) {
- perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
- symbol_name,
- event->mmap.pgoff);
+ maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
}
if (machine__is_default_guest(machine)) {
@@ -567,32 +630,35 @@ out_problem:
return -1;
}
-int perf_event__process_mmap(union perf_event *event,
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+}
+
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct machine *machine;
struct thread *thread;
struct map *map;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
int ret = 0;
- dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
- event->mmap.pid, event->mmap.tid, event->mmap.start,
- event->mmap.len, event->mmap.pgoff, event->mmap.filename);
+ if (dump_trace)
+ perf_event__fprintf_mmap(event, stdout);
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
cpumode == PERF_RECORD_MISC_KERNEL) {
- ret = perf_event__process_kernel_mmap(event, session);
+ ret = perf_event__process_kernel_mmap(tool, event, machine);
if (ret < 0)
goto out_problem;
return 0;
}
- machine = perf_session__find_host_machine(session);
- if (machine == NULL)
- goto out_problem;
- thread = perf_session__findnew(session, event->mmap.pid);
+ thread = machine__findnew_thread(machine, event->mmap.pid);
if (thread == NULL)
goto out_problem;
map = map__new(&machine->user_dsos, event->mmap.start,
@@ -610,18 +676,26 @@ out_problem:
return 0;
}
-int perf_event__process_task(union perf_event *event,
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, "(%d:%d):(%d:%d)\n",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+}
+
+int perf_event__process_task(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_sample *sample __used,
- struct perf_session *session)
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, event->fork.tid);
- struct thread *parent = perf_session__findnew(session, event->fork.ptid);
+ struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
- dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
- event->fork.ppid, event->fork.ptid);
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
if (event->header.type == PERF_RECORD_EXIT) {
- perf_session__remove_thread(session, thread);
+ machine__remove_thread(machine, thread);
return 0;
}
@@ -634,22 +708,45 @@ int perf_event__process_task(union perf_event *event,
return 0;
}
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session)
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
+{
+ size_t ret = fprintf(fp, "PERF_RECORD_%s",
+ perf_event__name(event->header.type));
+
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ ret += perf_event__fprintf_comm(event, fp);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ ret += perf_event__fprintf_task(event, fp);
+ break;
+ case PERF_RECORD_MMAP:
+ ret += perf_event__fprintf_mmap(event, fp);
+ break;
+ default:
+ ret += fprintf(fp, "\n");
+ }
+
+ return ret;
+}
+
+int perf_event__process(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine)
{
switch (event->header.type) {
case PERF_RECORD_COMM:
- perf_event__process_comm(event, sample, session);
+ perf_event__process_comm(tool, event, sample, machine);
break;
case PERF_RECORD_MMAP:
- perf_event__process_mmap(event, sample, session);
+ perf_event__process_mmap(tool, event, sample, machine);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
- perf_event__process_task(event, sample, session);
+ perf_event__process_task(tool, event, sample, machine);
break;
case PERF_RECORD_LOST:
- perf_event__process_lost(event, sample, session);
+ perf_event__process_lost(tool, event, sample, machine);
default:
break;
}
@@ -658,36 +755,29 @@ int perf_event__process(union perf_event *event, struct perf_sample *sample,
}
void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+ struct machine *machine, u8 cpumode,
+ enum map_type type, u64 addr,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
- struct machine *machine = NULL;
al->thread = self;
al->addr = addr;
al->cpumode = cpumode;
al->filtered = false;
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- machine = perf_session__find_host_machine(session);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
- machine = perf_session__find_host_machine(session);
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
- machine = perf_session__find_machine(session, pid);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
} else {
/*
@@ -733,13 +823,12 @@ try_again:
al->addr = al->map->map_ip(al->map, al->addr);
}
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
{
- thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
+ thread__find_addr_map(thread, machine, cpumode, type, addr, al);
if (al->map != NULL)
al->sym = map__find_symbol(al->map, al->addr, filter);
else
@@ -747,13 +836,13 @@ void thread__find_addr_location(struct thread *self,
}
int perf_event__preprocess_sample(const union perf_event *event,
- struct perf_session *session,
+ struct machine *machine,
struct addr_location *al,
struct perf_sample *sample,
symbol_filter_t filter)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL)
return -1;
@@ -764,18 +853,18 @@ int perf_event__preprocess_sample(const union perf_event *event,
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
/*
- * Have we already created the kernel maps for the host machine?
+ * Have we already created the kernel maps for this machine?
*
* This should have happened earlier, when we processed the kernel MMAP
* events, but for older perf.data files there was no such thing, so do
* it now.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL &&
- session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
- machine__create_kernel_maps(&session->host_machine);
+ machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(machine);
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ event->ip.ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -783,13 +872,14 @@ int perf_event__preprocess_sample(const union perf_event *event,
al->cpu = sample->cpu;
if (al->map) {
+ struct dso *dso = al->map->dso;
+
if (symbol_conf.dso_list &&
- (!al->map || !al->map->dso ||
- !(strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->short_name) ||
- (al->map->dso->short_name != al->map->dso->long_name &&
- strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->long_name)))))
+ (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+ dso->short_name) ||
+ (dso->short_name != dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ dso->long_name)))))
goto out_filtered;
al->sym = map__find_symbol(al->map, al->addr, filter);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 357a85b8524..cbdeaad9c5e 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -2,6 +2,7 @@
#define __PERF_RECORD_H
#include <limits.h>
+#include <stdio.h>
#include "../perf.h"
#include "map.h"
@@ -141,43 +142,54 @@ union perf_event {
void perf_event__print_totals(void);
-struct perf_session;
+struct perf_tool;
struct thread_map;
-typedef int (*perf_event__handler_synth_t)(union perf_event *event,
- struct perf_session *session);
-typedef int (*perf_event__handler_t)(union perf_event *event,
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+ union perf_event *event,
struct perf_sample *sample,
- struct perf_session *session);
+ struct machine *machine);
-int perf_event__synthesize_thread_map(struct thread_map *threads,
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_threads(perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
- struct perf_session *session,
+ struct machine *machine);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine,
const char *symbol_name);
-int perf_event__synthesize_modules(perf_event__handler_t process,
- struct perf_session *session,
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
struct machine *machine);
-int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
-int perf_event__process(union perf_event *event, struct perf_sample *sample,
- struct perf_session *session);
+int perf_event__process_comm(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_task(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
struct addr_location;
int perf_event__preprocess_sample(const union perf_event *self,
- struct perf_session *session,
+ struct machine *machine,
struct addr_location *al,
struct perf_sample *sample,
symbol_filter_t filter);
@@ -187,5 +199,13 @@ const char *perf_event__name(unsigned int id);
int perf_event__parse_sample(const union perf_event *event, u64 type,
int sample_size, bool sample_id_all,
struct perf_sample *sample, bool swapped);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index fbb4b4ab9cc..fa1837088ca 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -6,12 +6,16 @@
*
* Released under the GPL v2. (and only v2, not any later version)
*/
+#include "util.h"
+#include "debugfs.h"
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
#include "evlist.h"
#include "evsel.h"
-#include "util.h"
+#include <unistd.h>
+
+#include "parse-events.h"
#include <sys/mman.h>
@@ -30,6 +34,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
perf_evlist__set_maps(evlist, cpus, threads);
+ evlist->workload.pid = -1;
}
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -43,6 +48,22 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
return evlist;
}
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ perf_evsel__config(evsel, opts);
+
+ if (evlist->nr_entries > 1)
+ evsel->attr.sample_type |= PERF_SAMPLE_ID;
+ }
+}
+
static void perf_evlist__purge(struct perf_evlist *evlist)
{
struct perf_evsel *pos, *n;
@@ -76,6 +97,14 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
++evlist->nr_entries;
}
+static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries)
+{
+ list_splice_tail(list, &evlist->entries);
+ evlist->nr_entries += nr_entries;
+}
+
int perf_evlist__add_default(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
@@ -100,6 +129,126 @@ error:
return -ENOMEM;
}
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(head);
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++) {
+ evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->node, &head);
+ }
+
+ perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+ return 0;
+
+out_delete_partial_list:
+ list_for_each_entry_safe(evsel, n, &head, node)
+ perf_evsel__delete(evsel);
+ return -1;
+}
+
+static int trace_event__id(const char *evname)
+{
+ char *filename, *colon;
+ int err = -1, fd;
+
+ if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
+ return -1;
+
+ colon = strrchr(filename, ':');
+ if (colon != NULL)
+ *colon = '/';
+
+ fd = open(filename, O_RDONLY);
+ if (fd >= 0) {
+ char id[16];
+ if (read(fd, id, sizeof(id)) > 0)
+ err = atoi(id);
+ close(fd);
+ }
+
+ free(filename);
+ return err;
+}
+
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[],
+ size_t nr_tracepoints)
+{
+ int err;
+ size_t i;
+ struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
+
+ if (attrs == NULL)
+ return -1;
+
+ for (i = 0; i < nr_tracepoints; i++) {
+ err = trace_event__id(tracepoints[i]);
+
+ if (err < 0)
+ goto out_free_attrs;
+
+ attrs[i].type = PERF_TYPE_TRACEPOINT;
+ attrs[i].config = err;
+ attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU);
+ attrs[i].sample_period = 1;
+ }
+
+ err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
+out_free_attrs:
+ free(attrs);
+ return err;
+}
+
+static struct perf_evsel *
+ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ (int)evsel->attr.config == id)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs)
+{
+ struct perf_evsel *evsel;
+ int err;
+ size_t i;
+
+ for (i = 0; i < nr_assocs; i++) {
+ err = trace_event__id(assocs[i].name);
+ if (err < 0)
+ goto out;
+
+ evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
+ if (evsel == NULL)
+ continue;
+
+ err = -EEXIST;
+ if (evsel->handler.func != NULL)
+ goto out;
+ evsel->handler.func = assocs[i].handler;
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
void perf_evlist__disable(struct perf_evlist *evlist)
{
int cpu, thread;
@@ -126,7 +275,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
}
}
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
@@ -282,7 +431,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
evlist->mmap = NULL;
}
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = evlist->cpus->nr;
if (evlist->cpus->map[0] == -1)
@@ -298,8 +447,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
evlist->mmap[idx].mask = mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
- if (evlist->mmap[idx].base == MAP_FAILED)
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ evlist->mmap[idx].base = NULL;
return -1;
+ }
perf_evlist__add_pollfd(evlist, fd);
return 0;
@@ -400,14 +551,22 @@ out_unmap:
*
* Using perf_evlist__read_on_cpu does this automatically.
*/
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite)
{
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
- int mask = pages * page_size - 1;
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return -EINVAL;
+
+ mask = pages * page_size - 1;
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -512,6 +671,38 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
return first->attr.sample_type;
}
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
+{
+ struct perf_evsel *first;
+ struct perf_sample *data;
+ u64 sample_type;
+ u16 size = 0;
+
+ first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+ if (!first->attr.sample_id_all)
+ goto out;
+
+ sample_type = first->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(data->tid) * 2;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(data->time);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(data->id);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(data->stream_id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(data->cpu) * 2;
+out:
+ return size;
+}
+
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
{
struct perf_evsel *pos, *first;
@@ -569,3 +760,97 @@ out_err:
return err;
}
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[])
+{
+ int child_ready_pipe[2], go_pipe[2];
+ char bf;
+
+ if (pipe(child_ready_pipe) < 0) {
+ perror("failed to create 'ready' pipe");
+ return -1;
+ }
+
+ if (pipe(go_pipe) < 0) {
+ perror("failed to create 'go' pipe");
+ goto out_close_ready_pipe;
+ }
+
+ evlist->workload.pid = fork();
+ if (evlist->workload.pid < 0) {
+ perror("failed to fork");
+ goto out_close_pipes;
+ }
+
+ if (!evlist->workload.pid) {
+ if (opts->pipe_output)
+ dup2(2, 1);
+
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &bf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ kill(getppid(), SIGUSR1);
+ exit(-1);
+ }
+
+ if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
+ evlist->threads->map[0] = evlist->workload.pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &bf, 1) == -1) {
+ perror("unable to read pipe");
+ goto out_close_pipes;
+ }
+
+ evlist->workload.cork_fd = go_pipe[1];
+ close(child_ready_pipe[0]);
+ return 0;
+
+out_close_pipes:
+ close(go_pipe[0]);
+ close(go_pipe[1]);
+out_close_ready_pipe:
+ close(child_ready_pipe[0]);
+ close(child_ready_pipe[1]);
+ return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+ if (evlist->workload.cork_fd > 0) {
+ /*
+ * Remove the cork, let it rip!
+ */
+ return close(evlist->workload.cork_fd);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1779ffef782..8922aeed046 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -2,12 +2,16 @@
#define __PERF_EVLIST_H 1
#include <linux/list.h>
+#include <stdio.h>
#include "../perf.h"
#include "event.h"
+#include "util.h"
+#include <unistd.h>
struct pollfd;
struct thread_map;
struct cpu_map;
+struct perf_record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
@@ -19,6 +23,10 @@ struct perf_evlist {
int nr_fds;
int nr_mmaps;
int mmap_len;
+ struct {
+ int cork_fd;
+ pid_t pid;
+ } workload;
bool overwrite;
union perf_event event_copy;
struct perf_mmap *mmap;
@@ -28,6 +36,11 @@ struct perf_evlist {
struct perf_evsel *selected;
};
+struct perf_evsel_str_handler {
+ const char *name;
+ void *handler;
+};
+
struct perf_evsel;
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
@@ -39,11 +52,26 @@ void perf_evlist__delete(struct perf_evlist *evlist);
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
int perf_evlist__add_default(struct perf_evlist *evlist);
+int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs);
+int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
+ const char *tracepoints[], size_t nr_tracepoints);
+int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define perf_evlist__add_attrs_array(evlist, array) \
+ perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__add_tracepoints_array(evlist, array) \
+ perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+
+#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
+ perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
-int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
@@ -52,8 +80,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist, bool group);
-int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
+void perf_evlist__config_attrs(struct perf_evlist *evlist,
+ struct perf_record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct perf_record_opts *opts,
+ const char *argv[]);
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist);
@@ -77,6 +113,7 @@ int perf_evlist__set_filters(struct perf_evlist *evlist);
u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4262642258..667f3b78bb2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
return size;
}
+static void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
@@ -53,6 +63,79 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
+
+ attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
+ attr->inherit = !opts->no_inherit;
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
+
+ attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+ /*
+ * We default some events to a 1 default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+ opts->user_interval != ULLONG_MAX)) {
+ if (opts->freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+ }
+
+ if (opts->no_samples)
+ attr->sample_freq = 0;
+
+ if (opts->inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (opts->sample_address) {
+ attr->sample_type |= PERF_SAMPLE_ADDR;
+ attr->mmap_data = track;
+ }
+
+ if (opts->call_graph)
+ attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+ if (opts->system_wide)
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ if (opts->period)
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+
+ if (opts->sample_id_all_avail &&
+ (opts->sample_time || opts->system_wide ||
+ !opts->no_inherit || opts->cpu_list))
+ attr->sample_type |= PERF_SAMPLE_TIME;
+
+ if (opts->raw_samples) {
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ }
+
+ if (opts->no_delay) {
+ attr->watermark = 0;
+ attr->wakeup_events = 1;
+ }
+
+ attr->mmap = track;
+ attr->comm = track;
+
+ if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+}
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
@@ -377,7 +460,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
u32 val32[2];
} u;
-
+ memset(data, 0, sizeof(*data));
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
@@ -494,3 +577,82 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
return 0;
}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ const struct perf_sample *sample,
+ bool swapped)
+{
+ u64 *array;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union {
+ u64 val64;
+ u32 val32[2];
+ } u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ event->ip.ip = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_event__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b1d15e6f7ae..326b8e4d503 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -61,12 +61,17 @@ struct perf_evsel {
off_t id_offset;
};
struct cgroup_sel *cgrp;
+ struct {
+ void *func;
+ void *data;
+ } handler;
bool supported;
};
struct cpu_map;
struct thread_map;
struct perf_evlist;
+struct perf_record_opts;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
void perf_evsel__init(struct perf_evsel *evsel,
@@ -74,6 +79,9 @@ void perf_evsel__init(struct perf_evsel *evsel,
void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
+void perf_evsel__config(struct perf_evsel *evsel,
+ struct perf_record_opts *opts);
+
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bcd05d05b4f..3e7e0b09c12 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,6 +8,7 @@
#include <stdlib.h>
#include <linux/list.h>
#include <linux/kernel.h>
+#include <linux/bitops.h>
#include <sys/utsname.h>
#include "evlist.h"
@@ -28,9 +29,6 @@ static struct perf_trace_event_type *events;
static u32 header_argc;
static const char **header_argv;
-static int dsos__write_buildid_table(struct perf_header *header, int fd);
-static int perf_session__cache_build_ids(struct perf_session *session);
-
int perf_header__push_event(u64 id, const char *name)
{
if (strlen(name) > MAX_EVENT_NAME)
@@ -187,6 +185,252 @@ perf_header__set_cmdline(int argc, const char **argv)
return 0;
}
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+ u16 misc, int fd)
+{
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ if (!pos->hit)
+ continue;
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+ err = write_padded(fd, pos->long_name,
+ pos->long_name_len + 1, len);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int machine__write_buildid_table(struct machine *machine, int fd)
+{
+ int err;
+ u16 kmisc = PERF_RECORD_MISC_KERNEL,
+ umisc = PERF_RECORD_MISC_USER;
+
+ if (!machine__is_host(machine)) {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
+ kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&machine->user_dsos,
+ machine->pid, umisc, fd);
+ return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = machine__write_buildid_table(&session->host_machine, fd);
+
+ if (err)
+ return err;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ err = machine__write_buildid_table(pos, fd);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms)
+{
+ const size_t size = PATH_MAX;
+ char *realname, *filename = zalloc(size),
+ *linkname = zalloc(size), *targetname;
+ int len, err = -1;
+
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ return 0;
+ }
+ realname = (char *)name;
+ } else
+ realname = realpath(name, NULL);
+
+ if (realname == NULL || filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = snprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", realname);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(realname, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ if (!is_kallsyms)
+ free(realname);
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = zalloc(size),
+ *linkname = zalloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size - 1) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+{
+ bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
+ dso->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+{
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+ return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *session)
+{
+ struct rb_node *nd;
+ int ret;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ ret = machine__cache_build_ids(&session->host_machine, debugdir);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__cache_build_ids(pos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
+{
+ bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+ return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
+{
+ struct rb_node *nd;
+ bool ret = machine__read_build_ids(&session->host_machine, with_hits);
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__read_build_ids(pos, with_hits);
+ }
+
+ return ret;
+}
+
static int write_trace_info(int fd, struct perf_header *h __used,
struct perf_evlist *evlist)
{
@@ -202,6 +446,9 @@ static int write_build_id(int fd, struct perf_header *h,
session = container_of(h, struct perf_session, header);
+ if (!perf_session__read_build_ids(session, true))
+ return -1;
+
err = dsos__write_buildid_table(h, fd);
if (err < 0) {
pr_debug("failed to write buildid table\n");
@@ -388,7 +635,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, attr->name);
+ ret = do_write_string(fd, event_name(attr));
if (ret < 0)
return ret;
/*
@@ -1065,26 +1312,30 @@ struct feature_ops {
bool full_only;
};
-#define FEAT_OPA(n, w, p) \
- [n] = { .name = #n, .write = w, .print = p }
-#define FEAT_OPF(n, w, p) \
- [n] = { .name = #n, .write = w, .print = p, .full_only = true }
+#define FEAT_OPA(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPF(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_trace_info NULL
+#define print_build_id NULL
static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
- FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL),
- FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL),
- FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname),
- FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease),
- FEAT_OPA(HEADER_VERSION, write_version, print_version),
- FEAT_OPA(HEADER_ARCH, write_arch, print_arch),
- FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus),
- FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc),
- FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid),
- FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem),
- FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc),
- FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline),
- FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology),
- FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology),
+ FEAT_OPA(HEADER_TRACE_INFO, trace_info),
+ FEAT_OPA(HEADER_BUILD_ID, build_id),
+ FEAT_OPA(HEADER_HOSTNAME, hostname),
+ FEAT_OPA(HEADER_OSRELEASE, osrelease),
+ FEAT_OPA(HEADER_VERSION, version),
+ FEAT_OPA(HEADER_ARCH, arch),
+ FEAT_OPA(HEADER_NRCPUS, nrcpus),
+ FEAT_OPA(HEADER_CPUDESC, cpudesc),
+ FEAT_OPA(HEADER_CPUID, cpuid),
+ FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPA(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPA(HEADER_CMDLINE, cmdline),
+ FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
+ FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
};
struct header_print_data {
@@ -1103,9 +1354,9 @@ static int perf_file_section__fprintf_info(struct perf_file_section *section,
"%d, continuing...\n", section->offset, feat);
return 0;
}
- if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) {
+ if (feat >= HEADER_LAST_FEATURE) {
pr_warning("unknown feature %d\n", feat);
- return -1;
+ return 0;
}
if (!feat_ops[feat].print)
return 0;
@@ -1132,252 +1383,6 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
return 0;
}
-#define dsos__for_each_with_build_id(pos, head) \
- list_for_each_entry(pos, head, node) \
- if (!pos->has_build_id) \
- continue; \
- else
-
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
- u16 misc, int fd)
-{
- struct dso *pos;
-
- dsos__for_each_with_build_id(pos, head) {
- int err;
- struct build_id_event b;
- size_t len;
-
- if (!pos->hit)
- continue;
- len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
- memset(&b, 0, sizeof(b));
- memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
- b.pid = pid;
- b.header.misc = misc;
- b.header.size = sizeof(b) + len;
- err = do_write(fd, &b, sizeof(b));
- if (err < 0)
- return err;
- err = write_padded(fd, pos->long_name,
- pos->long_name_len + 1, len);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static int machine__write_buildid_table(struct machine *machine, int fd)
-{
- int err;
- u16 kmisc = PERF_RECORD_MISC_KERNEL,
- umisc = PERF_RECORD_MISC_USER;
-
- if (!machine__is_host(machine)) {
- kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
- umisc = PERF_RECORD_MISC_GUEST_USER;
- }
-
- err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
- kmisc, fd);
- if (err == 0)
- err = __dsos__write_buildid_table(&machine->user_dsos,
- machine->pid, umisc, fd);
- return err;
-}
-
-static int dsos__write_buildid_table(struct perf_header *header, int fd)
-{
- struct perf_session *session = container_of(header,
- struct perf_session, header);
- struct rb_node *nd;
- int err = machine__write_buildid_table(&session->host_machine, fd);
-
- if (err)
- return err;
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- err = machine__write_buildid_table(pos, fd);
- if (err)
- break;
- }
- return err;
-}
-
-int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms)
-{
- const size_t size = PATH_MAX;
- char *realname, *filename = zalloc(size),
- *linkname = zalloc(size), *targetname;
- int len, err = -1;
-
- if (is_kallsyms) {
- if (symbol_conf.kptr_restrict) {
- pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
- return 0;
- }
- realname = (char *)name;
- } else
- realname = realpath(name, NULL);
-
- if (realname == NULL || filename == NULL || linkname == NULL)
- goto out_free;
-
- len = snprintf(filename, size, "%s%s%s",
- debugdir, is_kallsyms ? "/" : "", realname);
- if (mkdir_p(filename, 0755))
- goto out_free;
-
- snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
-
- if (access(filename, F_OK)) {
- if (is_kallsyms) {
- if (copyfile("/proc/kallsyms", filename))
- goto out_free;
- } else if (link(realname, filename) && copyfile(name, filename))
- goto out_free;
- }
-
- len = snprintf(linkname, size, "%s/.build-id/%.2s",
- debugdir, sbuild_id);
-
- if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
- goto out_free;
-
- snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
- targetname = filename + strlen(debugdir) - 5;
- memcpy(targetname, "../..", 5);
-
- if (symlink(targetname, linkname) == 0)
- err = 0;
-out_free:
- if (!is_kallsyms)
- free(realname);
- free(filename);
- free(linkname);
- return err;
-}
-
-static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
- const char *name, const char *debugdir,
- bool is_kallsyms)
-{
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(build_id, build_id_size, sbuild_id);
-
- return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
-}
-
-int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
-{
- const size_t size = PATH_MAX;
- char *filename = zalloc(size),
- *linkname = zalloc(size);
- int err = -1;
-
- if (filename == NULL || linkname == NULL)
- goto out_free;
-
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, sbuild_id + 2);
-
- if (access(linkname, F_OK))
- goto out_free;
-
- if (readlink(linkname, filename, size - 1) < 0)
- goto out_free;
-
- if (unlink(linkname))
- goto out_free;
-
- /*
- * Since the link is relative, we must make it absolute:
- */
- snprintf(linkname, size, "%s/.build-id/%.2s/%s",
- debugdir, sbuild_id, filename);
-
- if (unlink(linkname))
- goto out_free;
-
- err = 0;
-out_free:
- free(filename);
- free(linkname);
- return err;
-}
-
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
-{
- bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
-
- return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
- dso->long_name, debugdir, is_kallsyms);
-}
-
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
-{
- struct dso *pos;
- int err = 0;
-
- dsos__for_each_with_build_id(pos, head)
- if (dso__cache_build_id(pos, debugdir))
- err = -1;
-
- return err;
-}
-
-static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
-{
- int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
- ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
- return ret;
-}
-
-static int perf_session__cache_build_ids(struct perf_session *session)
-{
- struct rb_node *nd;
- int ret;
- char debugdir[PATH_MAX];
-
- snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
- if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
- return -1;
-
- ret = machine__cache_build_ids(&session->host_machine, debugdir);
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__cache_build_ids(pos, debugdir);
- }
- return ret ? -1 : 0;
-}
-
-static bool machine__read_build_ids(struct machine *machine, bool with_hits)
-{
- bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
- ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
- return ret;
-}
-
-static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
-{
- struct rb_node *nd;
- bool ret = machine__read_build_ids(&session->host_machine, with_hits);
-
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret |= machine__read_build_ids(pos, with_hits);
- }
-
- return ret;
-}
-
static int do_write_feat(int fd, struct perf_header *h, int type,
struct perf_file_section **p,
struct perf_evlist *evlist)
@@ -1386,6 +1391,8 @@ static int do_write_feat(int fd, struct perf_header *h, int type,
int ret = 0;
if (perf_header__has_feat(h, type)) {
+ if (!feat_ops[type].write)
+ return -1;
(*p)->offset = lseek(fd, 0, SEEK_CUR);
@@ -1408,18 +1415,12 @@ static int perf_header__adds_write(struct perf_header *header,
struct perf_evlist *evlist, int fd)
{
int nr_sections;
- struct perf_session *session;
struct perf_file_section *feat_sec, *p;
int sec_size;
u64 sec_start;
+ int feat;
int err;
- session = container_of(header, struct perf_session, header);
-
- if (perf_header__has_feat(header, HEADER_BUILD_ID &&
- !perf_session__read_build_ids(session, true)))
- perf_header__clear_feat(header, HEADER_BUILD_ID);
-
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
@@ -1433,64 +1434,11 @@ static int perf_header__adds_write(struct perf_header *header,
sec_start = header->data_offset + header->data_size;
lseek(fd, sec_start + sec_size, SEEK_SET);
- err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist);
- if (err)
- goto out_free;
-
- err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist);
- if (err) {
- perf_header__clear_feat(header, HEADER_BUILD_ID);
- goto out_free;
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (do_write_feat(fd, header, feat, &p, evlist))
+ perf_header__clear_feat(header, feat);
}
- err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_HOSTNAME);
-
- err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_OSRELEASE);
-
- err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_VERSION);
-
- err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_ARCH);
-
- err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_NRCPUS);
-
- err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPUDESC);
-
- err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPUID);
-
- err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_TOTAL_MEM);
-
- err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CMDLINE);
-
- err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_EVENT_DESC);
-
- err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY);
-
- err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist);
- if (err)
- perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY);
-
lseek(fd, sec_start, SEEK_SET);
/*
* may write more than needed due to dropped feature, but
@@ -1499,7 +1447,6 @@ static int perf_header__adds_write(struct perf_header *header,
err = do_write(fd, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
-out_free:
free(feat_sec);
return err;
}
@@ -1637,20 +1584,20 @@ static int perf_header__getbuffer64(struct perf_header *header,
int perf_header__process_sections(struct perf_header *header, int fd,
void *data,
int (*process)(struct perf_file_section *section,
- struct perf_header *ph,
- int feat, int fd, void *data))
+ struct perf_header *ph,
+ int feat, int fd, void *data))
{
- struct perf_file_section *feat_sec;
+ struct perf_file_section *feat_sec, *sec;
int nr_sections;
int sec_size;
- int idx = 0;
- int err = -1, feat = 1;
+ int feat;
+ int err;
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
- feat_sec = calloc(sizeof(*feat_sec), nr_sections);
+ feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
if (!feat_sec)
return -1;
@@ -1658,20 +1605,16 @@ int perf_header__process_sections(struct perf_header *header, int fd,
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
- if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
+ err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+ if (err < 0)
goto out_free;
- err = 0;
- while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
- if (perf_header__has_feat(header, feat)) {
- struct perf_file_section *sec = &feat_sec[idx++];
-
- err = process(sec, header, feat, fd, data);
- if (err < 0)
- break;
- }
- ++feat;
+ for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ err = process(sec++, header, feat, fd, data);
+ if (err < 0)
+ goto out_free;
}
+ err = 0;
out_free:
free(feat_sec);
return err;
@@ -1906,32 +1849,21 @@ static int perf_file_section__process(struct perf_file_section *section,
return 0;
}
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ return 0;
+ }
+
switch (feat) {
case HEADER_TRACE_INFO:
trace_report(fd, false);
break;
-
case HEADER_BUILD_ID:
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
pr_debug("Failed to read buildids, continuing...\n");
break;
-
- case HEADER_HOSTNAME:
- case HEADER_OSRELEASE:
- case HEADER_VERSION:
- case HEADER_ARCH:
- case HEADER_NRCPUS:
- case HEADER_CPUDESC:
- case HEADER_CPUID:
- case HEADER_TOTAL_MEM:
- case HEADER_CMDLINE:
- case HEADER_EVENT_DESC:
- case HEADER_CPU_TOPOLOGY:
- case HEADER_NUMA_TOPOLOGY:
- break;
-
default:
- pr_debug("unknown feature %d, continuing...\n", feat);
+ break;
}
return 0;
@@ -2041,6 +1973,8 @@ int perf_session__read_header(struct perf_session *session, int fd)
lseek(fd, tmp, SEEK_SET);
}
+ symbol_conf.nr_events = nr_attrs;
+
if (f_header.event_types.size) {
lseek(fd, f_header.event_types.offset, SEEK_SET);
events = malloc(f_header.event_types.size);
@@ -2068,9 +2002,9 @@ out_delete_evlist:
return -ENOMEM;
}
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process)
{
union perf_event *ev;
size_t size;
@@ -2092,22 +2026,23 @@ int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = size;
- err = process(ev, NULL, session);
+ err = process(tool, ev, NULL, NULL);
free(ev);
return err;
}
-int perf_session__synthesize_attrs(struct perf_session *session,
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
perf_event__handler_t process)
{
struct perf_evsel *attr;
int err = 0;
list_for_each_entry(attr, &session->evlist->entries, node) {
- err = perf_event__synthesize_attr(&attr->attr, attr->ids,
- attr->id, process, session);
+ err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
+ attr->id, process);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
@@ -2118,23 +2053,23 @@ int perf_session__synthesize_attrs(struct perf_session *session,
}
int perf_event__process_attr(union perf_event *event,
- struct perf_session *session)
+ struct perf_evlist **pevlist)
{
unsigned int i, ids, n_ids;
struct perf_evsel *evsel;
+ struct perf_evlist *evlist = *pevlist;
- if (session->evlist == NULL) {
- session->evlist = perf_evlist__new(NULL, NULL);
- if (session->evlist == NULL)
+ if (evlist == NULL) {
+ *pevlist = evlist = perf_evlist__new(NULL, NULL);
+ if (evlist == NULL)
return -ENOMEM;
}
- evsel = perf_evsel__new(&event->attr.attr,
- session->evlist->nr_entries);
+ evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
if (evsel == NULL)
return -ENOMEM;
- perf_evlist__add(session->evlist, evsel);
+ perf_evlist__add(evlist, evsel);
ids = event->header.size;
ids -= (void *)&event->attr.id - (void *)event;
@@ -2148,18 +2083,16 @@ int perf_event__process_attr(union perf_event *event,
return -ENOMEM;
for (i = 0; i < n_ids; i++) {
- perf_evlist__id_add(session->evlist, evsel, 0, i,
- event->attr.id[i]);
+ perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
}
- perf_session__update_sample_type(session);
-
return 0;
}
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
perf_event__handler_t process,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event ev;
size_t size = 0;
@@ -2177,13 +2110,14 @@ int perf_event__synthesize_event_type(u64 event_id, char *name,
ev.event_type.header.size = sizeof(ev.event_type) -
(sizeof(ev.event_type.event_type.name) - size);
- err = process(&ev, NULL, session);
+ err = process(tool, &ev, NULL, machine);
return err;
}
-int perf_event__synthesize_event_types(perf_event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
struct perf_trace_event_type *type;
int i, err = 0;
@@ -2191,9 +2125,9 @@ int perf_event__synthesize_event_types(perf_event__handler_t process,
for (i = 0; i < event_count; i++) {
type = &events[i];
- err = perf_event__synthesize_event_type(type->event_id,
+ err = perf_event__synthesize_event_type(tool, type->event_id,
type->name, process,
- session);
+ machine);
if (err) {
pr_debug("failed to create perf header event type\n");
return err;
@@ -2203,8 +2137,8 @@ int perf_event__synthesize_event_types(perf_event__handler_t process,
return err;
}
-int perf_event__process_event_type(union perf_event *event,
- struct perf_session *session __unused)
+int perf_event__process_event_type(struct perf_tool *tool __unused,
+ union perf_event *event)
{
if (perf_header__push_event(event->event_type.event_type.event_id,
event->event_type.event_type.name) < 0)
@@ -2213,9 +2147,9 @@ int perf_event__process_event_type(union perf_event *event,
return 0;
}
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
- perf_event__handler_t process,
- struct perf_session *session __unused)
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
{
union perf_event ev;
struct tracing_data *tdata;
@@ -2246,7 +2180,7 @@ int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
- process(&ev, NULL, session);
+ process(tool, &ev, NULL, NULL);
/*
* The put function will copy all the tracing data
@@ -2288,10 +2222,10 @@ int perf_event__process_tracing_data(union perf_event *event,
return size_read + padding;
}
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
perf_event__handler_t process,
- struct machine *machine,
- struct perf_session *session)
+ struct machine *machine)
{
union perf_event ev;
size_t len;
@@ -2311,12 +2245,13 @@ int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
ev.build_id.header.size = sizeof(ev.build_id) + len;
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
- err = process(&ev, NULL, session);
+ err = process(tool, &ev, NULL, machine);
return err;
}
-int perf_event__process_build_id(union perf_event *event,
+int perf_event__process_build_id(struct perf_tool *tool __used,
+ union perf_event *event,
struct perf_session *session)
{
__event_process_build_id(&event->build_id,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 3d5a742f4a2..ac4ec956024 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -10,7 +10,8 @@
#include <linux/bitmap.h>
enum {
- HEADER_TRACE_INFO = 1,
+ HEADER_RESERVED = 0, /* always cleared */
+ HEADER_TRACE_INFO = 1,
HEADER_BUILD_ID,
HEADER_HOSTNAME,
@@ -27,10 +28,9 @@ enum {
HEADER_NUMA_TOPOLOGY,
HEADER_LAST_FEATURE,
+ HEADER_FEAT_BITS = 256,
};
-#define HEADER_FEAT_BITS 256
-
struct perf_file_section {
u64 offset;
u64 size;
@@ -68,6 +68,7 @@ struct perf_header {
};
struct perf_evlist;
+struct perf_session;
int perf_session__read_header(struct perf_session *session, int fd);
int perf_session__write_header(struct perf_session *session,
@@ -96,32 +97,36 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
-int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- perf_event__handler_t process,
- struct perf_session *session);
-int perf_session__synthesize_attrs(struct perf_session *session,
- perf_event__handler_t process);
-int perf_event__process_attr(union perf_event *event, struct perf_session *session);
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u16 ids, u64 *id,
+ perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process);
+int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
-int perf_event__synthesize_event_type(u64 event_id, char *name,
+int perf_event__synthesize_event_type(struct perf_tool *tool,
+ u64 event_id, char *name,
perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__synthesize_event_types(perf_event__handler_t process,
- struct perf_session *session);
-int perf_event__process_event_type(union perf_event *event,
- struct perf_session *session);
-
-int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
- perf_event__handler_t process,
- struct perf_session *session);
+ struct machine *machine);
+int perf_event__synthesize_event_types(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_event_type(struct perf_tool *tool,
+ union perf_event *event);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+ int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process);
int perf_event__process_tracing_data(union perf_event *event,
struct perf_session *session);
-int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
perf_event__handler_t process,
- struct machine *machine,
- struct perf_session *session);
-int perf_event__process_build_id(union perf_event *event,
+ struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session);
/*
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a36a3fa81ff..abef2703cd2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
return ret;
}
-
-void hists__init(struct hists *hists)
-{
- memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
- hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
- pthread_mutex_init(&hists->lock, NULL);
-}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index c86c1d27bd1..ff6f9d56ea4 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -63,8 +63,6 @@ struct hists {
struct callchain_cursor callchain_cursor;
};
-void hists__init(struct hists *hists);
-
struct hist_entry *__hists__add_entry(struct hists *self,
struct addr_location *al,
struct symbol *parent, u64 period);
@@ -119,7 +117,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
int evidx __used,
- int nr_events __used,
void(*timer)(void *arg) __used,
void *arg __used,
int delay_secs __used)
@@ -130,7 +127,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
#define K_RIGHT -2
#else
#include "ui/keysyms.h"
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs);
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 305c8484f20..62cdee78db7 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -9,6 +9,17 @@
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
static inline void set_bit(int nr, unsigned long *addr)
{
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
@@ -30,4 +41,111 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
#endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 78284b13e80..316aa0ab712 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -562,6 +562,10 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid)
INIT_LIST_HEAD(&self->user_dsos);
INIT_LIST_HEAD(&self->kernel_dsos);
+ self->threads = RB_ROOT;
+ INIT_LIST_HEAD(&self->dead_threads);
+ self->last_match = NULL;
+
self->kmaps.machine = self;
self->pid = pid;
self->root_dir = strdup(root_dir);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 890d85545d0..2b8017f8a93 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -18,9 +18,11 @@ enum map_type {
extern const char *map_type__name[MAP__NR_TYPES];
struct dso;
+struct ip_callchain;
struct ref_reloc_sym;
struct map_groups;
struct machine;
+struct perf_evsel;
struct map {
union {
@@ -61,7 +63,11 @@ struct map_groups {
struct machine {
struct rb_node rb_node;
pid_t pid;
+ u16 id_hdr_size;
char *root_dir;
+ struct rb_root threads;
+ struct list_head dead_threads;
+ struct thread *last_match;
struct list_head user_dsos;
struct list_head kernel_dsos;
struct map_groups kmaps;
@@ -148,6 +154,13 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid);
void machine__exit(struct machine *self);
void machine__delete(struct machine *self);
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel, struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
+ u64 addr);
+
/*
* Default guest kernel is defined by parameter --guestkallsyms
* and --guestmodules
@@ -190,6 +203,12 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
struct map **mapp,
symbol_filter_t filter);
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+void machine__remove_thread(struct machine *machine, struct thread *th);
+
+size_t machine__fprintf(struct machine *machine, FILE *fp);
+
static inline
struct symbol *machine__find_kernel_symbol(struct machine *self,
enum map_type type, u64 addr,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 928918b796b..531c283fc0c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -25,8 +25,6 @@ enum event_result {
EVT_HANDLED_ALL
};
-char debugfs_path[MAXPATHLEN];
-
#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -40,6 +38,7 @@ static struct event_symbol event_symbols[] = {
{ CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
{ CHW(BRANCH_MISSES), "branch-misses", "" },
{ CHW(BUS_CYCLES), "bus-cycles", "" },
+ { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
{ CSW(CPU_CLOCK), "cpu-clock", "" },
{ CSW(TASK_CLOCK), "task-clock", "" },
@@ -70,6 +69,7 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
"bus-cycles",
"stalled-cycles-frontend",
"stalled-cycles-backend",
+ "ref-cycles",
};
static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
@@ -140,7 +140,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
char evt_path[MAXPATHLEN];
int fd;
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
sys_dir->d_name, evt_dir->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
@@ -171,16 +171,16 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return NULL;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return NULL;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
@@ -447,7 +447,7 @@ parse_single_tracepoint_event(char *sys_name,
u64 id;
int fd;
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
sys_name, evt_name);
fd = open(evt_path, O_RDONLY);
@@ -485,7 +485,7 @@ parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
struct dirent *evt_ent;
DIR *evt_dir;
- snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
+ snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
if (!evt_dir) {
@@ -528,7 +528,7 @@ parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
char sys_name[MAX_EVENT_LENGTH];
unsigned int sys_length, evt_length;
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return 0;
evt_name = strchr(*strp, ':');
@@ -920,10 +920,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return;
@@ -932,7 +932,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
!strglobmatch(sys_dirent.d_name, subsys_glob))
continue;
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
@@ -964,16 +964,16 @@ int is_valid_tracepoint(const char *event_string)
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return 0;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return 0;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 2f8e375e038..7e0cbe75d5f 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -39,7 +39,6 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
int print_hwcache_events(const char *event_glob);
extern int is_valid_tracepoint(const char *event_string);
-extern char debugfs_path[];
extern int valid_debugfs_mount(const char *debugfs);
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 1132c8f0ce8..17e94d0c36f 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -5,7 +5,6 @@
#include "util.h"
#include "probe-event.h"
-#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 74350ffb57f..e30749e38a9 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -27,7 +27,10 @@
#include "../../perf.h"
#include "../util.h"
+#include "../thread.h"
+#include "../event.h"
#include "../trace-event.h"
+#include "../evsel.h"
#include <EXTERN.h>
#include <perl.h>
@@ -245,11 +248,11 @@ static inline struct event *find_cache_event(int type)
return event;
}
-static void perl_process_event(union perf_event *pevent __unused,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct perf_session *session __unused,
- struct thread *thread)
+static void perl_process_tracepoint(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __unused,
+ struct thread *thread)
{
struct format_field *field;
static char handler[256];
@@ -265,6 +268,9 @@ static void perl_process_event(union perf_event *pevent __unused,
dSP;
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return;
+
type = trace_parse_common_type(data);
event = find_cache_event(type);
@@ -332,6 +338,42 @@ static void perl_process_event(union perf_event *pevent __unused,
LEAVE;
}
+static void perl_process_event_generic(union perf_event *pevent __unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __unused,
+ struct machine *machine __unused,
+ struct thread *thread __unused)
+{
+ dSP;
+
+ if (!get_cv("process_event", 0))
+ return;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+ XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size)));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
+ PUTBACK;
+ call_pv("process_event", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void perl_process_event(union perf_event *pevent,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct thread *thread)
+{
+ perl_process_tracepoint(pevent, sample, evsel, machine, thread);
+ perl_process_event_generic(pevent, sample, evsel, machine, thread);
+}
+
static void run_start_sub(void)
{
dSP; /* access to Perl stack */
@@ -553,7 +595,28 @@ static int perl_generate_script(const char *outfile)
fprintf(ofp, "sub print_header\n{\n"
"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
"\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
- "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}");
+ "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
+
+ fprintf(ofp,
+ "\n# Packed byte string args of process_event():\n"
+ "#\n"
+ "# $event:\tunion perf_event\tutil/event.h\n"
+ "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
+ "# $sample:\tstruct perf_sample\tutil/event.h\n"
+ "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
+ "\n"
+ "sub process_event\n"
+ "{\n"
+ "\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
+ "\n"
+ "\tmy @event\t= unpack(\"LSS\", $event);\n"
+ "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
+ "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
+ "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
+ "\n"
+ "\tuse Data::Dumper;\n"
+ "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
+ "}\n");
fclose(ofp);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 6ccf70e8d8f..0b2a4878317 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -29,6 +29,8 @@
#include "../../perf.h"
#include "../util.h"
+#include "../event.h"
+#include "../thread.h"
#include "../trace-event.h"
PyMODINIT_FUNC initperf_trace_context(void);
@@ -207,7 +209,7 @@ static inline struct event *find_cache_event(int type)
static void python_process_event(union perf_event *pevent __unused,
struct perf_sample *sample,
struct perf_evsel *evsel __unused,
- struct perf_session *session __unused,
+ struct machine *machine __unused,
struct thread *thread)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 85c1e6b76f0..b5ca2558c7b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -10,6 +10,7 @@
#include "evlist.h"
#include "evsel.h"
#include "session.h"
+#include "tool.h"
#include "sort.h"
#include "util.h"
#include "cpumap.h"
@@ -78,39 +79,13 @@ out_close:
return -1;
}
-static void perf_session__id_header_size(struct perf_session *session)
-{
- struct perf_sample *data;
- u64 sample_type = session->sample_type;
- u16 size = 0;
-
- if (!session->sample_id_all)
- goto out;
-
- if (sample_type & PERF_SAMPLE_TID)
- size += sizeof(data->tid) * 2;
-
- if (sample_type & PERF_SAMPLE_TIME)
- size += sizeof(data->time);
-
- if (sample_type & PERF_SAMPLE_ID)
- size += sizeof(data->id);
-
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- size += sizeof(data->stream_id);
-
- if (sample_type & PERF_SAMPLE_CPU)
- size += sizeof(data->cpu) * 2;
-out:
- session->id_hdr_size = size;
-}
-
void perf_session__update_sample_type(struct perf_session *self)
{
self->sample_type = perf_evlist__sample_type(self->evlist);
self->sample_size = __perf_evsel__sample_size(self->sample_type);
self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
- perf_session__id_header_size(self);
+ self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
+ self->host_machine.id_hdr_size = self->id_hdr_size;
}
int perf_session__create_kernel_maps(struct perf_session *self)
@@ -130,18 +105,26 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self)
struct perf_session *perf_session__new(const char *filename, int mode,
bool force, bool repipe,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
- size_t len = filename ? strlen(filename) + 1 : 0;
- struct perf_session *self = zalloc(sizeof(*self) + len);
+ struct perf_session *self;
+ struct stat st;
+ size_t len;
+
+ if (!filename || !strlen(filename)) {
+ if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
+ filename = "-";
+ else
+ filename = "perf.data";
+ }
+
+ len = strlen(filename);
+ self = zalloc(sizeof(*self) + len);
if (self == NULL)
goto out;
memcpy(self->filename, filename, len);
- self->threads = RB_ROOT;
- INIT_LIST_HEAD(&self->dead_threads);
- self->last_match = NULL;
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
@@ -171,10 +154,10 @@ struct perf_session *perf_session__new(const char *filename, int mode,
goto out_delete;
}
- if (ops && ops->ordering_requires_timestamps &&
- ops->ordered_samples && !self->sample_id_all) {
+ if (tool && tool->ordering_requires_timestamps &&
+ tool->ordered_samples && !self->sample_id_all) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
- ops->ordered_samples = false;
+ tool->ordered_samples = false;
}
out:
@@ -184,17 +167,22 @@ out_delete:
return NULL;
}
-static void perf_session__delete_dead_threads(struct perf_session *self)
+static void machine__delete_dead_threads(struct machine *machine)
{
struct thread *n, *t;
- list_for_each_entry_safe(t, n, &self->dead_threads, node) {
+ list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
list_del(&t->node);
thread__delete(t);
}
}
-static void perf_session__delete_threads(struct perf_session *self)
+static void perf_session__delete_dead_threads(struct perf_session *session)
+{
+ machine__delete_dead_threads(&session->host_machine);
+}
+
+static void machine__delete_threads(struct machine *self)
{
struct rb_node *nd = rb_first(&self->threads);
@@ -207,6 +195,11 @@ static void perf_session__delete_threads(struct perf_session *self)
}
}
+static void perf_session__delete_threads(struct perf_session *session)
+{
+ machine__delete_threads(&session->host_machine);
+}
+
void perf_session__delete(struct perf_session *self)
{
perf_session__destroy_kernel_maps(self);
@@ -217,7 +210,7 @@ void perf_session__delete(struct perf_session *self)
free(self);
}
-void perf_session__remove_thread(struct perf_session *self, struct thread *th)
+void machine__remove_thread(struct machine *self, struct thread *th)
{
self->last_match = NULL;
rb_erase(&th->rb_node, &self->threads);
@@ -236,16 +229,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
return 0;
}
-int perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
int err;
- callchain_cursor_reset(&self->callchain_cursor);
+ callchain_cursor_reset(&evsel->hists.callchain_cursor);
for (i = 0; i < chain->nr; i++) {
u64 ip;
@@ -272,7 +265,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
al.filtered = false;
thread__find_addr_location(thread, self, cpumode,
- MAP__FUNCTION, thread->pid, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
symbol__match_parent_regex(al.sym))
@@ -281,7 +274,7 @@ int perf_session__resolve_callchain(struct perf_session *self,
break;
}
- err = callchain_cursor_append(&self->callchain_cursor,
+ err = callchain_cursor_append(&evsel->hists.callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
@@ -290,75 +283,91 @@ int perf_session__resolve_callchain(struct perf_session *self,
return 0;
}
-static int process_event_synth_stub(union perf_event *event __used,
- struct perf_session *session __used)
+static int process_event_synth_tracing_data_stub(union perf_event *event __used,
+ struct perf_session *session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_sample_stub(union perf_event *event __used,
+static int process_event_synth_attr_stub(union perf_event *event __used,
+ struct perf_evlist **pevlist __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_event_sample_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample __used,
struct perf_evsel *evsel __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_stub(union perf_event *event __used,
+static int process_event_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
struct perf_sample *sample __used,
- struct perf_session *session __used)
+ struct machine *machine __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round_stub(struct perf_tool *tool __used,
+ union perf_event *event __used,
+ struct perf_session *perf_session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round_stub(union perf_event *event __used,
- struct perf_session *session __used,
- struct perf_event_ops *ops __used)
+static int process_event_type_stub(struct perf_tool *tool __used,
+ union perf_event *event __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round(union perf_event *event,
- struct perf_session *session,
- struct perf_event_ops *ops);
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+static void perf_tool__fill_defaults(struct perf_tool *tool)
{
- if (handler->sample == NULL)
- handler->sample = process_event_sample_stub;
- if (handler->mmap == NULL)
- handler->mmap = process_event_stub;
- if (handler->comm == NULL)
- handler->comm = process_event_stub;
- if (handler->fork == NULL)
- handler->fork = process_event_stub;
- if (handler->exit == NULL)
- handler->exit = process_event_stub;
- if (handler->lost == NULL)
- handler->lost = perf_event__process_lost;
- if (handler->read == NULL)
- handler->read = process_event_stub;
- if (handler->throttle == NULL)
- handler->throttle = process_event_stub;
- if (handler->unthrottle == NULL)
- handler->unthrottle = process_event_stub;
- if (handler->attr == NULL)
- handler->attr = process_event_synth_stub;
- if (handler->event_type == NULL)
- handler->event_type = process_event_synth_stub;
- if (handler->tracing_data == NULL)
- handler->tracing_data = process_event_synth_stub;
- if (handler->build_id == NULL)
- handler->build_id = process_event_synth_stub;
- if (handler->finished_round == NULL) {
- if (handler->ordered_samples)
- handler->finished_round = process_finished_round;
+ if (tool->sample == NULL)
+ tool->sample = process_event_sample_stub;
+ if (tool->mmap == NULL)
+ tool->mmap = process_event_stub;
+ if (tool->comm == NULL)
+ tool->comm = process_event_stub;
+ if (tool->fork == NULL)
+ tool->fork = process_event_stub;
+ if (tool->exit == NULL)
+ tool->exit = process_event_stub;
+ if (tool->lost == NULL)
+ tool->lost = perf_event__process_lost;
+ if (tool->read == NULL)
+ tool->read = process_event_sample_stub;
+ if (tool->throttle == NULL)
+ tool->throttle = process_event_stub;
+ if (tool->unthrottle == NULL)
+ tool->unthrottle = process_event_stub;
+ if (tool->attr == NULL)
+ tool->attr = process_event_synth_attr_stub;
+ if (tool->event_type == NULL)
+ tool->event_type = process_event_type_stub;
+ if (tool->tracing_data == NULL)
+ tool->tracing_data = process_event_synth_tracing_data_stub;
+ if (tool->build_id == NULL)
+ tool->build_id = process_finished_round_stub;
+ if (tool->finished_round == NULL) {
+ if (tool->ordered_samples)
+ tool->finished_round = process_finished_round;
else
- handler->finished_round = process_finished_round_stub;
+ tool->finished_round = process_finished_round_stub;
}
}
@@ -490,11 +499,11 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset);
static void flush_sample_queue(struct perf_session *s,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
@@ -505,7 +514,7 @@ static void flush_sample_queue(struct perf_session *s,
unsigned idx = 0, progress_next = os->nr_samples / 16;
int ret;
- if (!ops->ordered_samples || !limit)
+ if (!tool->ordered_samples || !limit)
return;
list_for_each_entry_safe(iter, tmp, head, list) {
@@ -516,7 +525,7 @@ static void flush_sample_queue(struct perf_session *s,
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
- perf_session_deliver_event(s, iter->event, &sample, ops,
+ perf_session_deliver_event(s, iter->event, &sample, tool,
iter->file_offset);
os->last_flush = iter->timestamp;
@@ -578,11 +587,11 @@ static void flush_sample_queue(struct perf_session *s,
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(union perf_event *event __used,
- struct perf_session *session,
- struct perf_event_ops *ops)
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event __used,
+ struct perf_session *session)
{
- flush_sample_queue(session, ops);
+ flush_sample_queue(session, tool);
session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
return 0;
@@ -737,13 +746,26 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
callchain__printf(sample);
}
+static struct machine *
+ perf_session__find_machine_for_cpumode(struct perf_session *session,
+ union perf_event *event)
+{
+ const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
+ return perf_session__find_machine(session, event->ip.pid);
+
+ return perf_session__find_host_machine(session);
+}
+
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset)
{
struct perf_evsel *evsel;
+ struct machine *machine;
dump_event(session, event, file_offset, sample);
@@ -765,6 +787,8 @@ static int perf_session_deliver_event(struct perf_session *session,
hists__inc_nr_events(&evsel->hists, event->header.type);
}
+ machine = perf_session__find_machine_for_cpumode(session, event);
+
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
dump_sample(session, event, sample);
@@ -772,23 +796,25 @@ static int perf_session_deliver_event(struct perf_session *session,
++session->hists.stats.nr_unknown_id;
return -1;
}
- return ops->sample(event, sample, evsel, session);
+ return tool->sample(tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
- return ops->mmap(event, sample, session);
+ return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM:
- return ops->comm(event, sample, session);
+ return tool->comm(tool, event, sample, machine);
case PERF_RECORD_FORK:
- return ops->fork(event, sample, session);
+ return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
- return ops->exit(event, sample, session);
+ return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
- return ops->lost(event, sample, session);
+ if (tool->lost == perf_event__process_lost)
+ session->hists.stats.total_lost += event->lost.lost;
+ return tool->lost(tool, event, sample, machine);
case PERF_RECORD_READ:
- return ops->read(event, sample, session);
+ return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
- return ops->throttle(event, sample, session);
+ return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
- return ops->unthrottle(event, sample, session);
+ return tool->unthrottle(tool, event, sample, machine);
default:
++session->hists.stats.nr_unknown_events;
return -1;
@@ -812,24 +838,29 @@ static int perf_session__preprocess_sample(struct perf_session *session,
}
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
- struct perf_event_ops *ops, u64 file_offset)
+ struct perf_tool *tool, u64 file_offset)
{
+ int err;
+
dump_event(session, event, file_offset, NULL);
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
- return ops->attr(event, session);
+ err = tool->attr(event, &session->evlist);
+ if (err == 0)
+ perf_session__update_sample_type(session);
+ return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
- return ops->event_type(event, session);
+ return tool->event_type(tool, event);
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(session->fd, file_offset, SEEK_SET);
- return ops->tracing_data(event, session);
+ return tool->tracing_data(event, session);
case PERF_RECORD_HEADER_BUILD_ID:
- return ops->build_id(event, session);
+ return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
- return ops->finished_round(event, session, ops);
+ return tool->finished_round(tool, event, session);
default:
return -EINVAL;
}
@@ -837,7 +868,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
static int perf_session__process_event(struct perf_session *session,
union perf_event *event,
- struct perf_event_ops *ops,
+ struct perf_tool *tool,
u64 file_offset)
{
struct perf_sample sample;
@@ -853,7 +884,7 @@ static int perf_session__process_event(struct perf_session *session,
hists__inc_nr_events(&session->hists, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
- return perf_session__process_user_event(session, event, ops, file_offset);
+ return perf_session__process_user_event(session, event, tool, file_offset);
/*
* For all kernel events we get the sample data
@@ -866,14 +897,14 @@ static int perf_session__process_event(struct perf_session *session,
if (perf_session__preprocess_sample(session, event, &sample))
return 0;
- if (ops->ordered_samples) {
+ if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample,
file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session_deliver_event(session, event, &sample, ops,
+ return perf_session_deliver_event(session, event, &sample, tool,
file_offset);
}
@@ -884,6 +915,11 @@ void perf_event_header__bswap(struct perf_event_header *self)
self->size = bswap_16(self->size);
}
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
+{
+ return machine__findnew_thread(&session->host_machine, pid);
+}
+
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
{
struct thread *thread = perf_session__findnew(self, 0);
@@ -897,9 +933,9 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
}
static void perf_session__warn_about_errors(const struct perf_session *session,
- const struct perf_event_ops *ops)
+ const struct perf_tool *tool)
{
- if (ops->lost == perf_event__process_lost &&
+ if (tool->lost == perf_event__process_lost &&
session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
@@ -934,7 +970,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
union perf_event event;
uint32_t size;
@@ -943,7 +979,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self,
int err;
void *p;
- perf_event_ops__fill_defaults(ops);
+ perf_tool__fill_defaults(tool);
head = 0;
more:
@@ -979,8 +1015,7 @@ more:
}
}
- if (size == 0 ||
- (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
+ if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
head, event.header.size, event.header.type);
/*
@@ -1003,7 +1038,7 @@ more:
done:
err = 0;
out_err:
- perf_session__warn_about_errors(self, ops);
+ perf_session__warn_about_errors(self, tool);
perf_session_free_sample_buffers(self);
return err;
}
@@ -1034,7 +1069,7 @@ fetch_mmaped_event(struct perf_session *session,
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
- u64 file_size, struct perf_event_ops *ops)
+ u64 file_size, struct perf_tool *tool)
{
u64 head, page_offset, file_offset, file_pos, progress_next;
int err, mmap_prot, mmap_flags, map_idx = 0;
@@ -1043,7 +1078,7 @@ int __perf_session__process_events(struct perf_session *session,
union perf_event *event;
uint32_t size;
- perf_event_ops__fill_defaults(ops);
+ perf_tool__fill_defaults(tool);
page_size = sysconf(_SC_PAGESIZE);
@@ -1098,7 +1133,7 @@ more:
size = event->header.size;
if (size == 0 ||
- perf_session__process_event(session, event, ops, file_pos) < 0) {
+ perf_session__process_event(session, event, tool, file_pos) < 0) {
dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
file_offset + head, event->header.size,
event->header.type);
@@ -1127,15 +1162,15 @@ more:
err = 0;
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
- flush_sample_queue(session, ops);
+ flush_sample_queue(session, tool);
out_err:
- perf_session__warn_about_errors(session, ops);
+ perf_session__warn_about_errors(session, tool);
perf_session_free_sample_buffers(session);
return err;
}
int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *ops)
+ struct perf_tool *tool)
{
int err;
@@ -1146,9 +1181,9 @@ int perf_session__process_events(struct perf_session *self,
err = __perf_session__process_events(self,
self->header.data_offset,
self->header.data_size,
- self->size, ops);
+ self->size, tool);
else
- err = __perf_session__process_pipe_events(self, ops);
+ err = __perf_session__process_pipe_events(self, tool);
return err;
}
@@ -1163,9 +1198,8 @@ bool perf_session__has_traces(struct perf_session *self, const char *msg)
return true;
}
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr)
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name, u64 addr)
{
char *bracket;
enum map_type i;
@@ -1224,6 +1258,27 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
return ret;
}
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
+{
+ /*
+ * FIXME: Here we have to actually print all the machines in this
+ * session, not just the host...
+ */
+ return machine__fprintf(&session->host_machine, fp);
+}
+
+void perf_session__remove_thread(struct perf_session *session,
+ struct thread *th)
+{
+ /*
+ * FIXME: This one makes no sense, we need to remove the thread from
+ * the machine it belongs to, perf_session can have many machines, so
+ * doing it always on ->host_machine is wrong. Fix when auditing all
+ * the 'perf kvm' code.
+ */
+ machine__remove_thread(&session->host_machine, th);
+}
+
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
@@ -1236,17 +1291,16 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
return NULL;
}
-void perf_session__print_ip(union perf_event *event,
- struct perf_sample *sample,
- struct perf_session *session,
- int print_sym, int print_dso)
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso)
{
struct addr_location al;
const char *symname, *dsoname;
- struct callchain_cursor *cursor = &session->callchain_cursor;
+ struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
struct callchain_cursor_node *node;
- if (perf_event__preprocess_sample(event, session, &al, sample,
+ if (perf_event__preprocess_sample(event, machine, &al, sample,
NULL) < 0) {
error("problem processing %d event, skipping it.\n",
event->header.type);
@@ -1255,7 +1309,7 @@ void perf_session__print_ip(union perf_event *event,
if (symbol_conf.use_callchain && sample->callchain) {
- if (perf_session__resolve_callchain(session, al.thread,
+ if (machine__resolve_callchain(machine, evsel, al.thread,
sample->callchain, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
@@ -1333,6 +1387,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
}
map = cpu_map__new(cpu_list);
+ if (map == NULL) {
+ pr_err("Invalid cpu_list\n");
+ return -1;
+ }
for (i = 0; i < map->nr; i++) {
int cpu = map->map[i];
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 6e393c98eb3..37bc38381fb 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -30,9 +30,6 @@ struct perf_session {
struct perf_header header;
unsigned long size;
unsigned long mmap_window;
- struct rb_root threads;
- struct list_head dead_threads;
- struct thread *last_match;
struct machine host_machine;
struct rb_root machines;
struct perf_evlist *evlist;
@@ -53,65 +50,31 @@ struct perf_session {
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
- struct callchain_cursor callchain_cursor;
- char filename[0];
+ char filename[1];
};
-struct perf_evsel;
-struct perf_event_ops;
-
-typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample,
- struct perf_evsel *evsel, struct perf_session *session);
-typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
- struct perf_session *session);
-typedef int (*event_synth_op)(union perf_event *self,
- struct perf_session *session);
-typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
- struct perf_event_ops *ops);
-
-struct perf_event_ops {
- event_sample sample;
- event_op mmap,
- comm,
- fork,
- exit,
- lost,
- read,
- throttle,
- unthrottle;
- event_synth_op attr,
- event_type,
- tracing_data,
- build_id;
- event_op2 finished_round;
- bool ordered_samples;
- bool ordering_requires_timestamps;
-};
+struct perf_tool;
struct perf_session *perf_session__new(const char *filename, int mode,
bool force, bool repipe,
- struct perf_event_ops *ops);
+ struct perf_tool *tool);
void perf_session__delete(struct perf_session *self);
void perf_event_header__bswap(struct perf_event_header *self);
int __perf_session__process_events(struct perf_session *self,
u64 data_offset, u64 data_size, u64 size,
- struct perf_event_ops *ops);
+ struct perf_tool *tool);
int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *event_ops);
+ struct perf_tool *tool);
-int perf_session__resolve_callchain(struct perf_session *self,
+int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent);
bool perf_session__has_traces(struct perf_session *self, const char *msg);
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr);
-
void mem_bswap_64(void *src, int byte_size);
void perf_event__attr_swap(struct perf_event_attr *attr);
@@ -144,12 +107,16 @@ struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t p
static inline
void perf_session__process_machines(struct perf_session *self,
+ struct perf_tool *tool,
machine__process_t process)
{
- process(&self->host_machine, self);
- return machines__process(&self->machines, process, self);
+ process(&self->host_machine, tool);
+ return machines__process(&self->machines, process, tool);
}
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+
size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
@@ -167,13 +134,20 @@ static inline int perf_session__parse_sample(struct perf_session *session,
session->header.needs_swap);
}
+static inline int perf_session__synthesize_sample(struct perf_session *session,
+ union perf_event *event,
+ const struct perf_sample *sample)
+{
+ return perf_event__synthesize_sample(event, session->sample_type,
+ sample, session->header.needs_swap);
+}
+
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
-void perf_session__print_ip(union perf_event *event,
- struct perf_sample *sample,
- struct perf_session *session,
- int print_sym, int print_dso);
+void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
+ struct machine *machine, struct perf_evsel *evsel,
+ int print_sym, int print_dso);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 95d37007492..36d4c561957 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -27,7 +27,8 @@ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
- 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
+ 'util/util.c', 'util/xyarray.c', 'util/cgroup.c',
+ 'util/debugfs.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 632b50c7bc2..215d50f2042 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1757,7 +1757,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
struct stat st;
/*sshfs might return bad dent->d_type, so we have to stat*/
- sprintf(path, "%s/%s", dir_name, dent->d_name);
+ snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
if (stat(path, &st))
continue;
@@ -1766,8 +1766,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
!strcmp(dent->d_name, ".."))
continue;
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
ret = map_groups__set_modules_path_dir(mg, path);
if (ret < 0)
goto out;
@@ -1788,9 +1786,6 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
if (map == NULL)
continue;
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
-
long_name = strdup(path);
if (long_name == NULL) {
ret = -1;
@@ -2609,10 +2604,10 @@ int symbol__init(void)
symbol_conf.initialized = true;
return 0;
-out_free_dso_list:
- strlist__delete(symbol_conf.dso_list);
out_free_comm_list:
strlist__delete(symbol_conf.comm_list);
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
return -1;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 29f8d742e92..123c2e14353 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -68,6 +68,7 @@ struct strlist;
struct symbol_conf {
unsigned short priv_size;
+ unsigned short nr_events;
bool try_vmlinux_path,
use_modules,
sort_by_name,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index d5d3b22250f..fb4b7ea6752 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -61,7 +61,7 @@ static size_t thread__fprintf(struct thread *self, FILE *fp)
map_groups__fprintf(&self->mg, verbose, fp);
}
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
+struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
{
struct rb_node **p = &self->threads.rb_node;
struct rb_node *parent = NULL;
@@ -125,12 +125,12 @@ int thread__fork(struct thread *self, struct thread *parent)
return 0;
}
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
+size_t machine__fprintf(struct machine *machine, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
- for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index e5f2401c1b5..70c2c13ff67 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,16 +18,14 @@ struct thread {
int comm_len;
};
-struct perf_session;
+struct machine;
void thread__delete(struct thread *self);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
static inline struct map *thread__find_map(struct thread *self,
enum map_type type, u64 addr)
@@ -35,14 +33,12 @@ static inline struct map *thread__find_map(struct thread *self,
return self ? map_groups__find(&self->mg, type, addr) : NULL;
}
-void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_map(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al);
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
new file mode 100644
index 00000000000..b0e1aadba8d
--- /dev/null
+++ b/tools/perf/util/tool.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_TOOL_H
+#define __PERF_TOOL_H
+
+#include <stdbool.h>
+
+struct perf_session;
+union perf_event;
+struct perf_evlist;
+struct perf_evsel;
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine);
+
+typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+typedef int (*event_attr_op)(union perf_event *event,
+ struct perf_evlist **pevlist);
+typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
+
+typedef int (*event_synth_op)(union perf_event *event,
+ struct perf_session *session);
+
+typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
+ struct perf_session *session);
+
+struct perf_tool {
+ event_sample sample,
+ read;
+ event_op mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ throttle,
+ unthrottle;
+ event_attr_op attr;
+ event_synth_op tracing_data;
+ event_simple_op event_type;
+ event_op2 finished_round,
+ build_id;
+ bool ordered_samples;
+ bool ordering_requires_timestamps;
+};
+
+#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 39965096795..a248f3c2c60 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -1,15 +1,17 @@
#ifndef __PERF_TOP_H
#define __PERF_TOP_H 1
+#include "tool.h"
#include "types.h"
-#include "../perf.h"
#include <stddef.h>
+#include <stdbool.h>
struct perf_evlist;
struct perf_evsel;
struct perf_session;
struct perf_top {
+ struct perf_tool tool;
struct perf_evlist *evlist;
/*
* Symbols will be added here in perf_event__process_sample and will
@@ -23,10 +25,26 @@ struct perf_top {
int freq;
pid_t target_pid, target_tid;
bool hide_kernel_symbols, hide_user_symbols, zero;
+ bool system_wide;
+ bool use_tui, use_stdio;
+ bool sort_has_symbols;
+ bool dont_use_callchains;
+ bool kptr_restrict_warned;
+ bool vmlinux_warned;
+ bool inherit;
+ bool group;
+ bool sample_id_all_avail;
+ bool dump_symtab;
const char *cpu_list;
struct hist_entry *sym_filter_entry;
struct perf_evsel *sym_evsel;
struct perf_session *session;
+ struct winsize winsize;
+ unsigned int mmap_pages;
+ int default_interval;
+ int realtime_prio;
+ int sym_pcnt_filter;
+ const char *sym_filter;
};
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d2655f08bcc..ac6830d8292 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -18,7 +18,8 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define _GNU_SOURCE
+#include <ctype.h>
+#include "util.h"
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
@@ -31,7 +32,6 @@
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
-#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
#include <linux/list.h>
@@ -44,10 +44,6 @@
#define VERSION "0.5"
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define MAX_PATH 256
-
#define TRACE_CTRL "tracing_on"
#define TRACE "trace"
#define AVAILABLE "available_tracers"
@@ -73,26 +69,6 @@ struct events {
};
-
-static void die(const char *fmt, ...)
-{
- va_list ap;
- int ret = errno;
-
- if (errno)
- perror("perf");
- else
- ret = -1;
-
- va_start(ap, fmt);
- fprintf(stderr, " ");
- vfprintf(stderr, fmt, ap);
- va_end(ap);
-
- fprintf(stderr, "\n");
- exit(ret);
-}
-
void *malloc_or_die(unsigned int size)
{
void *data;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0a7ed5b5e28..6c164dc9ee9 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
field = malloc_or_die(sizeof(*field));
type = process_arg(event, field, &token);
+ while (type == EVENT_OP)
+ type = process_op(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index c9dcbec7d80..a3fdf55f317 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,7 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __unused,
struct perf_sample *sample __unused,
struct perf_evsel *evsel __unused,
- struct perf_session *session __unused,
+ struct machine *machine __unused,
struct thread *thread __unused)
{
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index a8410081764..58ae14c5baa 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,7 +3,11 @@
#include <stdbool.h>
#include "parse-events.h"
-#include "session.h"
+
+struct machine;
+struct perf_sample;
+union perf_event;
+struct thread;
#define __unused __attribute__((unused))
@@ -292,7 +296,7 @@ struct scripting_ops {
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct perf_session *session,
+ struct machine *machine,
struct thread *thread);
int (*generate_script) (const char *outfile);
};
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 0575905d120..295a9c93f94 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -224,7 +224,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
}
static int annotate_browser__run(struct annotate_browser *self, int evidx,
- int nr_events, void(*timer)(void *arg),
+ void(*timer)(void *arg),
void *arg, int delay_secs)
{
struct rb_node *nd = NULL;
@@ -328,8 +328,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
notes = symbol__annotation(target);
pthread_mutex_lock(&notes->lock);
- if (notes->src == NULL &&
- symbol__alloc_hist(target, nr_events) < 0) {
+ if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
pthread_mutex_unlock(&notes->lock);
ui__warning("Not enough memory for annotating '%s' symbol!\n",
target->name);
@@ -337,7 +336,7 @@ static int annotate_browser__run(struct annotate_browser *self, int evidx,
}
pthread_mutex_unlock(&notes->lock);
- symbol__tui_annotate(target, ms->map, evidx, nr_events,
+ symbol__tui_annotate(target, ms->map, evidx,
timer, arg, delay_secs);
}
continue;
@@ -358,15 +357,15 @@ out:
return key;
}
-int hist_entry__tui_annotate(struct hist_entry *he, int evidx, int nr_events,
+int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
void(*timer)(void *arg), void *arg, int delay_secs)
{
- return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, nr_events,
+ return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
timer, arg, delay_secs);
}
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
- int nr_events, void(*timer)(void *arg), void *arg,
+ void(*timer)(void *arg), void *arg,
int delay_secs)
{
struct objdump_line *pos, *n;
@@ -419,8 +418,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
- ret = annotate_browser__run(&browser, evidx, nr_events,
- timer, arg, delay_secs);
+ ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
objdump_line__free(pos);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index d0c94b45968..1212a386a03 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -1020,7 +1020,7 @@ do_annotate:
* Don't let this be freed, say, by hists__decay_entry.
*/
he->used = true;
- err = hist_entry__tui_annotate(he, evsel->idx, nr_events,
+ err = hist_entry__tui_annotate(he, evsel->idx,
timer, arg, delay_secs);
he->used = false;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
index 295e366b631..13aa64e50e1 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/util/ui/progress.c
@@ -14,6 +14,9 @@ void ui_progress__update(u64 curr, u64 total, const char *title)
if (use_browser <= 0)
return;
+ if (total == 0)
+ return;
+
ui__refresh_dimensions(true);
pthread_mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2;
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index e16bf9a707e..d76d1c0ff98 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -1,5 +1,8 @@
/*
- * GIT - The information manager from hell
+ * usage.c
+ *
+ * Various reporting routines.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
*/
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0128906bac8..37be34dff79 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -245,4 +245,15 @@ int readn(int fd, void *buf, size_t size);
#define _STR(x) #x
#define STR(x) _STR(x)
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
#endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index bdd33470b23..697c8b4e59c 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -32,6 +32,7 @@ void perf_read_values_destroy(struct perf_read_values *values)
for (i = 0; i < values->threads; i++)
free(values->value[i]);
+ free(values->value);
free(values->pid);
free(values->tid);
free(values->counterrawid);
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index ff75125deed..555c69a5592 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -38,8 +38,8 @@ displays the statistics gathered since it was forked.
.PP
.SH FIELD DESCRIPTIONS
.nf
-\fBpkg\fP processor package number.
-\fBcore\fP processor core number.
+\fBpk\fP processor package number.
+\fBcr\fP processor core number.
\fBCPU\fP Linux CPU (logical processor) number.
\fB%c0\fP percent of the interval that the CPU retired instructions.
\fBGHz\fP average clock rate while the CPU was in c0 state.
@@ -58,7 +58,7 @@ Subsequent rows show per-CPU statistics.
.nf
[root@x980]# ./turbostat
-core CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
+cr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07
0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07
0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07
@@ -102,7 +102,7 @@ until ^C while the other CPUs are mostly idle:
.nf
[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
-^Ccore CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
+^Ccr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00
0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00
0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index dbedfa19672..553c06b7d6f 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -950,7 +950,7 @@
# TEST_START
# TEST_TYPE = config_bisect
# CONFIG_BISECT_TYPE = build
-# CONFIG_BISECT = /home/test/˘onfig-bad
+# CONFIG_BISECT = /home/test/config-bad
# MIN_CONFIG = /home/test/config-min
# BISECT_MANUAL = 1
#
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 3ad0925d23a..758e3b36d4c 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -17,6 +17,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
#include "irq.h"
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ out:
return r;
}
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device. To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs. PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file. We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+ int i;
+ bool bar_found = false;
+
+ for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+ char *kpath, *syspath;
+ struct path path;
+ struct inode *inode;
+ int r;
+
+ if (!pci_resource_len(dev, i))
+ continue;
+
+ kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+ if (!kpath)
+ return -ENOMEM;
+
+ /* Per sysfs-rules, sysfs is always at /sys */
+ syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+ kfree(kpath);
+ if (!syspath)
+ return -ENOMEM;
+
+ r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+ kfree(syspath);
+ if (r)
+ return r;
+
+ inode = path.dentry->d_inode;
+
+ r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+ path_put(&path);
+ if (r)
+ return r;
+
+ bar_found = true;
+ }
+
+ /* If no resources, probably something special */
+ if (!bar_found)
+ return -EPERM;
+
+ return 0;
+#else
+ return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
+ u8 header_type;
+
+ if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+ return -EINVAL;
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
r = -EINVAL;
goto out_free;
}
+
+ /* Don't allow bridges to be assigned */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+ if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+ r = -EPERM;
+ goto out_put;
+ }
+
+ r = probe_sysfs_permissions(dev);
+ if (r)
+ goto out_put;
+
if (pci_enable_device(dev)) {
printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
list_add(&match->list, &kvm->arch.assigned_dev_head);
- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
- if (!kvm->arch.iommu_domain) {
- r = kvm_iommu_map_guest(kvm);
- if (r)
- goto out_list_del;
- }
- r = kvm_assign_device(kvm, match);
+ if (!kvm->arch.iommu_domain) {
+ r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
+ r = kvm_assign_device(kvm, match);
+ if (r)
+ goto out_list_del;
out:
srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
goto out;
}
- if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
- kvm_deassign_device(kvm, match);
+ kvm_deassign_device(kvm, match);
kvm_free_assigned_device(kvm, match);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index a6ec206f36b..88b2fe3ddf4 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -28,9 +28,15 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
* (addr,len) is fully included in
* (zone->addr, zone->size)
*/
-
- return (dev->zone.addr <= addr &&
- addr + len <= dev->zone.addr + dev->zone.size);
+ if (len < 0)
+ return 0;
+ if (addr + len < addr)
+ return 0;
+ if (addr < dev->zone.addr)
+ return 0;
+ if (addr + len > dev->zone.addr + dev->zone.size)
+ return 0;
+ return 1;
}
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 3eed61eb486..dcaf272c26c 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -185,7 +185,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
irqe.dest_mode = 0; /* Physical mode. */
/* need to read apic_id from apic regiest since
* it can be rewritten */
- irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
+ irqe.dest_id = ioapic->kvm->bsp_vcpu_id;
}
#endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
@@ -332,9 +332,18 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
(void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */
- if (len == 4 || len == 8)
+ switch (len) {
+ case 8:
+ case 4:
data = *(u32 *) val;
- else {
+ break;
+ case 2:
+ data = *(u16 *) val;
+ break;
+ case 1:
+ data = *(u8 *) val;
+ break;
+ default:
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
return 0;
}
@@ -343,7 +352,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
spin_lock(&ioapic->lock);
switch (addr) {
case IOAPIC_REG_SELECT:
- ioapic->ioregsel = data;
+ ioapic->ioregsel = data & 0xFF; /* 8-bit register */
break;
case IOAPIC_REG_WINDOW:
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index a195c07fa82..0fb448e6a1a 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -113,7 +113,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
/* Map into IO address space */
r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
- get_order(page_size), flags);
+ page_size, flags);
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
@@ -134,14 +134,15 @@ unmap_pages:
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
- int i, idx, r = 0;
+ int idx, r = 0;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
+ kvm_for_each_memslot(memslot, slots) {
+ r = kvm_iommu_map_pages(kvm, memslot);
if (r)
break;
}
@@ -292,15 +293,15 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
while (gfn < end_gfn) {
unsigned long unmap_pages;
- int order;
+ size_t size;
/* Get physical address */
phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
pfn = phys >> PAGE_SHIFT;
/* Unmap address from IO address space */
- order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
- unmap_pages = 1ULL << order;
+ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
+ unmap_pages = 1ULL << get_order(size);
/* Unpin all pages we just unmapped to not leak any memory */
kvm_unpin_pages(kvm, pfn, unmap_pages);
@@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
- int i, idx;
+ int idx;
struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
- slots->memslots[i].npages);
- }
+ kvm_for_each_memslot(memslot, slots)
+ kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
+
srcu_read_unlock(&kvm->srcu, idx);
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d9cfb782cb8..7287bf5d1c9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+static void kvm_init_memslots_id(struct kvm *kvm)
+{
+ int i;
+ struct kvm_memslots *slots = kvm->memslots;
+
+ for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+ slots->id_to_index[i] = slots->memslots[i].id = i;
+}
+
static struct kvm *kvm_create_vm(void)
{
int r, i;
@@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err_nosrcu;
+ kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu))
goto out_err_nosrcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
@@ -547,11 +557,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm)
{
- int i;
struct kvm_memslots *slots = kvm->memslots;
+ struct kvm_memory_slot *memslot;
- for (i = 0; i < slots->nmemslots; ++i)
- kvm_free_physmem_slot(&slots->memslots[i], NULL);
+ kvm_for_each_memslot(memslot, slots)
+ kvm_free_physmem_slot(memslot, NULL);
kfree(kvm->memslots);
}
@@ -625,10 +635,69 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return -ENOMEM;
memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+ memslot->nr_dirty_pages = 0;
return 0;
}
#endif /* !CONFIG_S390 */
+static struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+{
+ struct kvm_memory_slot *memslot;
+
+ kvm_for_each_memslot(memslot, slots)
+ if (gfn >= memslot->base_gfn &&
+ gfn < memslot->base_gfn + memslot->npages)
+ return memslot;
+
+ return NULL;
+}
+
+static int cmp_memslot(const void *slot1, const void *slot2)
+{
+ struct kvm_memory_slot *s1, *s2;
+
+ s1 = (struct kvm_memory_slot *)slot1;
+ s2 = (struct kvm_memory_slot *)slot2;
+
+ if (s1->npages < s2->npages)
+ return 1;
+ if (s1->npages > s2->npages)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Sort the memslots base on its size, so the larger slots
+ * will get better fit.
+ */
+static void sort_memslots(struct kvm_memslots *slots)
+{
+ int i;
+
+ sort(slots->memslots, KVM_MEM_SLOTS_NUM,
+ sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
+
+ for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+ slots->id_to_index[slots->memslots[i].id] = i;
+}
+
+void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
+{
+ if (new) {
+ int id = new->id;
+ struct kvm_memory_slot *old = id_to_memslot(slots, id);
+ unsigned long npages = old->npages;
+
+ *old = *new;
+ if (new->npages != npages)
+ sort_memslots(slots);
+ }
+
+ slots->generation++;
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -662,12 +731,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
- if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+ if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- memslot = &kvm->memslots->memslots[mem->slot];
+ memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
@@ -774,15 +843,17 @@ skip_lpage:
#endif /* not defined CONFIG_S390 */
if (!npages) {
+ struct kvm_memory_slot *slot;
+
r = -ENOMEM;
- slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
if (!slots)
goto out_free;
- memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
- if (mem->slot >= slots->nmemslots)
- slots->nmemslots = mem->slot + 1;
- slots->generation++;
- slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+ slot = id_to_memslot(slots, mem->slot);
+ slot->flags |= KVM_MEMSLOT_INVALID;
+
+ update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
@@ -810,13 +881,10 @@ skip_lpage:
}
r = -ENOMEM;
- slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
if (!slots)
goto out_free;
- memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
- if (mem->slot >= slots->nmemslots)
- slots->nmemslots = mem->slot + 1;
- slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
@@ -826,7 +894,7 @@ skip_lpage:
new.lpage_info[i] = NULL;
}
- slots->memslots[mem->slot] = new;
+ update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
@@ -888,7 +956,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots->memslots[log->slot];
+ memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -966,16 +1034,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
gfn_t gfn)
{
- int i;
-
- for (i = 0; i < slots->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &slots->memslots[i];
-
- if (gfn >= memslot->base_gfn
- && gfn < memslot->base_gfn + memslot->npages)
- return memslot;
- }
- return NULL;
+ return search_memslots(slots, gfn);
}
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
@@ -986,20 +1045,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
- int i;
- struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &slots->memslots[i];
-
- if (memslot->flags & KVM_MEMSLOT_INVALID)
- continue;
+ if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
+ memslot->flags & KVM_MEMSLOT_INVALID)
+ return 0;
- if (gfn >= memslot->base_gfn
- && gfn < memslot->base_gfn + memslot->npages)
- return 1;
- }
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
@@ -1491,7 +1543,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- __set_bit_le(rel_gfn, memslot->dirty_bitmap);
+ if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
+ memslot->nr_dirty_pages++;
}
}
@@ -1690,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
smp_wmb();
atomic_inc(&kvm->online_vcpus);
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- if (kvm->bsp_vcpu_id == id)
- kvm->bsp_vcpu = vcpu;
-#endif
mutex_unlock(&kvm->lock);
return r;
@@ -1768,12 +1817,11 @@ out_free1:
struct kvm_regs *kvm_regs;
r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
- if (!kvm_regs)
+ kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
+ if (IS_ERR(kvm_regs)) {
+ r = PTR_ERR(kvm_regs);
goto out;
- r = -EFAULT;
- if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
- goto out_free2;
+ }
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
if (r)
goto out_free2;
@@ -1797,13 +1845,11 @@ out_free2:
break;
}
case KVM_SET_SREGS: {
- kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
- r = -ENOMEM;
- if (!kvm_sregs)
- goto out;
- r = -EFAULT;
- if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
+ kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
+ if (IS_ERR(kvm_sregs)) {
+ r = PTR_ERR(kvm_sregs);
goto out;
+ }
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
if (r)
goto out;
@@ -1899,13 +1945,11 @@ out_free2:
break;
}
case KVM_SET_FPU: {
- fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
- r = -ENOMEM;
- if (!fpu)
- goto out;
- r = -EFAULT;
- if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
+ fpu = memdup_user(argp, sizeof(*fpu));
+ if (IS_ERR(fpu)) {
+ r = PTR_ERR(fpu);
goto out;
+ }
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
if (r)
goto out;
@@ -2520,10 +2564,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
if (bus->dev_count > NR_IOBUS_DEVS-1)
return -ENOSPC;
- new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
- memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
@@ -2539,13 +2582,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
int i, r;
struct kvm_io_bus *new_bus, *bus;
- new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ bus = kvm->buses[bus_idx];
+
+ new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
- bus = kvm->buses[bus_idx];
- memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
-
r = -ENOENT;
for (i = 0; i < new_bus->dev_count; i++)
if (new_bus->range[i].dev == dev) {
@@ -2612,15 +2654,29 @@ static const struct file_operations *stat_fops[] = {
[KVM_STAT_VM] = &vm_stat_fops,
};
-static void kvm_init_debug(void)
+static int kvm_init_debug(void)
{
+ int r = -EFAULT;
struct kvm_stats_debugfs_item *p;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
- for (p = debugfs_entries; p->name; ++p)
+ if (kvm_debugfs_dir == NULL)
+ goto out;
+
+ for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
+ if (p->dentry == NULL)
+ goto out_dir;
+ }
+
+ return 0;
+
+out_dir:
+ debugfs_remove_recursive(kvm_debugfs_dir);
+out:
+ return r;
}
static void kvm_exit_debug(void)
@@ -2764,10 +2820,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
- kvm_init_debug();
+ r = kvm_init_debug();
+ if (r) {
+ printk(KERN_ERR "kvm: create debugfs files failed\n");
+ goto out_undebugfs;
+ }
return 0;
+out_undebugfs:
+ unregister_syscore_ops(&kvm_syscore_ops);
out_unreg:
kvm_async_pf_deinit();
out_free: